{"signature": "def exists():", "body": "try:_call('')return Trueexcept:return False", "docstring": "Returns True if there's a valid git installation, otherwise False", "id": "f1:m2"} {"signature": "def is_committed():", "body": "return '' in _call('')", "docstring": "Returns True if repository is committed, otherwise False", "id": "f1:m3"} {"signature": "def list_tags():", "body": "_fetch_tags()return [t for t in _call('').strip().split('') if t != '']", "docstring": "Returns a list of tags", "id": "f1:m4"} {"signature": "def create_tag(tag):", "body": "_call('' + str(tag))", "docstring": "Creates a tag", "id": "f1:m5"} {"signature": "def delete_tag(tag):", "body": "_call('' + str(tag))", "docstring": "Deletes a tag", "id": "f1:m6"} {"signature": "def push_tag(tag):", "body": "_call('' + str(tag))", "docstring": "Pushes a tag into the upstream", "id": "f1:m7"} {"signature": "def check_environment(target, label):", "body": "if not git.exists():click.secho('', fg='')sys.exit()if not os.path.isdir(''):click.secho('', fg='')sys.exit()if not git.is_committed():click.secho('',fg='')sys.exit()if target is None and label is None:click.secho('', fg='')sys.exit()", "docstring": "Performs some environment checks prior to the program's execution", "id": "f2:m1"} {"signature": "def print_information(handler, label):", "body": "click.echo(''.format(tag=click.style(str(handler.latest_stable or ''), fg='' ifhandler.latest_stable else '')))if label is not None:latest_revision = handler.latest_revision(label)click.echo(''.format(label=click.style(label, fg=''),tag=click.style(str(latest_revision or ''),fg='' if latest_revision else '')))", "docstring": "Prints latest tag's information", "id": "f2:m2"} {"signature": "def confirm(tag):", "body": "click.echo()if click.confirm(''.format(tag=click.style(str(tag), fg='')),default=True, abort=True):git.create_tag(tag)if click.confirm(''.format(tag=click.style(str(tag), fg='')),default=True):git.push_tag(tag)click.echo('')else:git.delete_tag(tag)click.echo('')", "docstring": "Prompts user before proceeding", "id": "f2:m3"} {"signature": "def clone(self):", "body": "return Version(self.major, self.minor, self.patch)", "docstring": "Returns a copy of this object", "id": "f3:c0:m4"} {"signature": "def bump(self, target):", "body": "if target == '':return Version(self.major, self.minor, self.patch + )if target == '':return Version(self.major, self.minor + , )if target == '':return Version(self.major + , , )return self.clone()", "docstring": "Bumps the Version given a target\n\nThe target can be either MAJOR, MINOR or PATCH", "id": "f3:c0:m5"} {"signature": "def clone(self):", "body": "return Revision(self.label, self.number)", "docstring": "Returns a copy of this object", "id": "f3:c1:m4"} {"signature": "def bump(self):", "body": "return Revision(self.label, self.number + )", "docstring": "Bumps the Revision's number", "id": "f3:c1:m5"} {"signature": "def clone(self):", "body": "t = Tag(self.version.major, self.version.minor, self.version.patch)if self.revision is not None:t.revision = self.revision.clone()return t", "docstring": "Returns a copy of this object", "id": "f3:c2:m4"} {"signature": "def with_revision(self, label, number):", "body": "t = self.clone()t.revision = Revision(label, number)return t", "docstring": "Returns a Tag with a given revision", "id": "f3:c2:m5"} {"signature": "@staticmethoddef from_version(version):", "body": "return Tag(version.major, version.minor, version.patch)", "docstring": "Creates a Tag, given a Version", "id": "f3:c2:m6"} {"signature": "@staticmethoddef default():", "body": "return Tag(, , )", "docstring": "Returns the default Tag (v0.0.0)", "id": "f3:c2:m7"} {"signature": "@staticmethoddef parse(s):", "body": "try:m = _regex.match(s)t = Tag(int(m.group('')),int(m.group('')),int(m.group('')))return tif m.group('') is Noneelse t.with_revision(m.group(''), int(m.group('')))except AttributeError:return None", "docstring": "Parses a string into a Tag", "id": "f3:c2:m8"} {"signature": "def yield_tag(self, target=None, label=None):", "body": "if target is None and label is None:raise ValueError('')if label is None:return self._yield_from_target(target)if target is None:return self._yield_from_label(label)return self._yield_from_target_and_label(target, label)", "docstring": "Returns a new Tag containing the bumped target and/or the bumped label", "id": "f3:c3:m6"} {"signature": "@invoke.task()def build(ctx):", "body": "ctx.run(f'')", "docstring": "Build the package into distributables.\n\n This will create two distributables: source and wheel.", "id": "f5:m0"} {"signature": "@invoke.task()def clean(ctx):", "body": "ctx.run(f'')dist = ROOT.joinpath('')print(f'')shutil.rmtree(str(dist))", "docstring": "Clean previously built package artifacts.", "id": "f5:m1"} {"signature": "@invoke.task(pre=[clean, build])def upload(ctx, repo):", "body": "artifacts = ''.join(shlex.quote(str(n))for n in ROOT.joinpath('').glob(''))ctx.run(f'')", "docstring": "Upload the package to an index server.\n\n This implies cleaning and re-building the package.\n\n :param repo: Required. Name of the index server to upload to, as specifies\n in your .pypirc configuration file.", "id": "f5:m2"} {"signature": "def walk_up(bottom):", "body": "bottom = os.path.realpath(bottom)try:names = os.listdir(bottom)except Exception:returndirs, nondirs = [], []for name in names:if os.path.isdir(os.path.join(bottom, name)):dirs.append(name)else:nondirs.append(name)yield bottom, dirs, nondirsnew_path = os.path.realpath(os.path.join(bottom, ''))if new_path == bottom:returnfor x in walk_up(new_path):yield x", "docstring": "mimic os.walk, but walk 'up' instead of down the directory tree.\n From: https://gist.github.com/zdavkeos/1098474", "id": "f10:m1"} {"signature": "def load(pipfile_path=None, inject_env=True):", "body": "if pipfile_path is None:pipfile_path = Pipfile.find()return Pipfile.load(filename=pipfile_path, inject_env=inject_env)", "docstring": "Loads a pipfile from a given path.\n If none is provided, one will try to be found.", "id": "f10:m2"} {"signature": "def inject_environment_variables(self, d):", "body": "if not d:return dif isinstance(d, six.string_types):return os.path.expandvars(d)for k, v in d.items():if isinstance(v, six.string_types):d[k] = os.path.expandvars(v)elif isinstance(v, dict):d[k] = self.inject_environment_variables(v)elif isinstance(v, list):d[k] = [self.inject_environment_variables(e) for e in v]return d", "docstring": "Recursively injects environment variables into TOML values", "id": "f10:c0:m2"} {"signature": "@staticmethoddef find(max_depth=):", "body": "i = for c, d, f in walk_up(os.getcwd()):i += if i < max_depth:if '':p = os.path.join(c, '')if os.path.isfile(p):return praise RuntimeError('')", "docstring": "Returns the path of a Pipfile in parent directories.", "id": "f10:c1:m1"} {"signature": "@classmethoddef load(klass, filename, inject_env=True):", "body": "p = PipfileParser(filename=filename)pipfile = klass(filename=filename)pipfile.data = p.parse(inject_env=inject_env)return pipfile", "docstring": "Load a Pipfile from a given filename.", "id": "f10:c1:m2"} {"signature": "@propertydef hash(self):", "body": "content = json.dumps(self.data, sort_keys=True, separators=(\"\", \"\"))return hashlib.sha256(content.encode(\"\")).hexdigest()", "docstring": "Returns the SHA256 of the pipfile's data.", "id": "f10:c1:m3"} {"signature": "@propertydef contents(self):", "body": "with codecs.open(self.filename, '', '') as f:return f.read()", "docstring": "Returns the contents of the pipfile.", "id": "f10:c1:m4"} {"signature": "def lock(self):", "body": "data = self.datadata[''][''] = {\"\": self.hash}data[''][''] = return json.dumps(data, indent=, separators=('', ''))", "docstring": "Returns a JSON representation of the Pipfile.", "id": "f10:c1:m5"} {"signature": "def assert_requirements(self):", "body": "if hasattr(sys, ''):implementation_version = format_full_version(sys.implementation.version)else:implementation_version = \"\"if hasattr(sys, ''):implementation_name = sys.implementation.nameelse:implementation_name = ''lookup = {'': os.name,'': sys.platform,'': platform.machine(),'': platform.python_implementation(),'': platform.release(),'': platform.system(),'': platform.version(),'': platform.python_version()[:],'': platform.python_version(),'': implementation_name,'': implementation_version}for marker, specifier in self.data[''][''].items():if marker in lookup:try:assert lookup[marker] == specifierexcept AssertionError:raise AssertionError(''.format(marker, specifier))", "docstring": "Asserts PEP 508 specifiers.", "id": "f10:c1:m6"} {"signature": "def add_templates_to_message(self):", "body": "self.message.subject_template = self.subject_templateself.message.body_template = self.body_template", "docstring": "Adds templates to the fixture message, ensuring it can be rendered.", "id": "f20:c1:m1"} {"signature": "def add_templates_to_message(self):", "body": "super(TemplatedHTMLEmailMessageViewTestCase, self).add_templates_to_message()self.message.html_body_template = self.html_body_template", "docstring": "Adds templates to the fixture message, ensuring it can be rendered.", "id": "f20:c2:m1"} {"signature": "@propertydef headers(self):", "body": "if not hasattr(self, ''):self._headers = {}return self._headers", "docstring": "A dictionary containing the headers for this message.", "id": "f23:c0:m0"} {"signature": "def get_context_data(self, **kwargs):", "body": "return Context(kwargs)", "docstring": "Returns the context that will be used for rendering this message.\n\n:rtype: :class:`django.template.Context`", "id": "f23:c0:m3"} {"signature": "def render_to_message(self, extra_context=None, **kwargs):", "body": "if extra_context is None:extra_context = {}kwargs.setdefault('', {}).update(self.headers)context = self.get_context_data(**extra_context)return self.message_class(subject=self.render_subject(context),body=self.render_body(context),**kwargs)", "docstring": "Renders and returns an unsent message with the provided context.\n\nAny extra keyword arguments passed will be passed through as keyword\narguments to the message constructor.\n\n:param extra_context: Any additional context to use when rendering the\n templated content.\n:type extra_context: :class:`dict`\n:returns: A message instance.\n:rtype: :attr:`.message_class`", "id": "f23:c0:m4"} {"signature": "def send(self, extra_context=None, **kwargs):", "body": "message = self.render_to_message(extra_context=extra_context, **kwargs)return message.send()", "docstring": "Renders and sends an email message.\n\nAll keyword arguments other than ``extra_context`` are passed through\nas keyword arguments when constructing a new :attr:`message_class`\ninstance for this message.\n\nThis method exists primarily for convenience, and the proper\nrendering of your message should not depend on the behavior of this\nmethod. To alter how a message is created, override\n:meth:``render_to_message`` instead, since that should always be\ncalled, even if a message is not sent.\n\n:param extra_context: Any additional context data that will be used\n when rendering this message.\n:type extra_context: :class:`dict`", "id": "f23:c0:m5"} {"signature": "def render_subject(self, context):", "body": "rendered = self.subject_template.render(unescape(context))return rendered.strip()", "docstring": "Renders the message subject for the given context.\n\nThe context data is automatically unescaped to avoid rendering HTML\nentities in ``text/plain`` content.\n\n:param context: The context to use when rendering the subject template.\n:type context: :class:`~django.template.Context`\n:returns: A rendered subject.\n:rtype: :class:`str`", "id": "f23:c1:m5"} {"signature": "def render_body(self, context):", "body": "return self.body_template.render(unescape(context))", "docstring": "Renders the message body for the given context.\n\nThe context data is automatically unescaped to avoid rendering HTML\nentities in ``text/plain`` content.\n\n:param context: The context to use when rendering the body template.\n:type context: :class:`~django.template.Context`\n:returns: A rendered body.\n:rtype: :class:`str`", "id": "f23:c1:m6"} {"signature": "def render_html_body(self, context):", "body": "return self.html_body_template.render(context)", "docstring": "Renders the message body for the given context.\n\n:param context: The context to use when rendering the body template.\n:type context: :class:`~django.template.Context`\n:returns: A rendered HTML body.\n:rtype: :class:`str`", "id": "f23:c2:m2"} {"signature": "def render_to_message(self, extra_context=None, *args, **kwargs):", "body": "message = super(TemplatedHTMLEmailMessageView, self).render_to_message(extra_context, *args, **kwargs)if extra_context is None:extra_context = {}context = self.get_context_data(**extra_context)content = self.render_html_body(context)message.attach_alternative(content, mimetype='')return message", "docstring": "Renders and returns an unsent message with the given context.\n\nAny extra keyword arguments passed will be passed through as keyword\narguments to the message constructor.\n\n:param extra_context: Any additional context to use when rendering\n templated content.\n:type extra_context: :class:`dict`\n:returns: A message instance.\n:rtype: :attr:`.message_class`", "id": "f23:c2:m3"} {"signature": "def maybe_decode_header(header):", "body": "value, encoding = decode_header(header)[]if encoding:return value.decode(encoding)else:return value", "docstring": "Decodes an encoded 7-bit ASCII header value into it's actual value.", "id": "f25:m0"} {"signature": "def autodiscover():", "body": "from django.conf import settingsfor application in settings.INSTALLED_APPS:module = import_module(application)if module_has_submodule(module, ''):emails = import_module('' % application)try:import_module('' % application)except ImportError:if module_has_submodule(emails, ''):raise", "docstring": "Imports all available previews classes.", "id": "f25:m1"} {"signature": "def __iter__(self):", "body": "for module in sorted(self.__previews.keys()):previews = ModulePreviews(module, sorted(self.__previews[module].values(), key=str))yield previews", "docstring": "Returns an iterator of :class:`ModulePreviews` tuples, sorted by module nae.", "id": "f25:c0:m1"} {"signature": "def register(self, cls):", "body": "preview = cls(site=self)logger.debug('', preview, self)index = self.__previews.setdefault(preview.module, {})index[cls.__name__] = preview", "docstring": "Adds a preview to the index.", "id": "f25:c0:m2"} {"signature": "def list_view(self, request):", "body": "return render(request, '', {'': self,})", "docstring": "Returns a list view response containing all of the registered previews.", "id": "f25:c0:m4"} {"signature": "def detail_view(self, request, module, preview):", "body": "try:preview = self.__previews[module][preview]except KeyError:raise Http404 return preview.detail_view(request)", "docstring": "Looks up a preview in the index, returning a detail view response.", "id": "f25:c0:m5"} {"signature": "@propertydef description(self):", "body": "return getattr(split_docstring(self.message_view), '', None)", "docstring": "A longer description of this preview that is used in the preview index.\n\nIf not provided, this defaults to the first paragraph of the underlying\nmessage view class' docstring.", "id": "f25:c1:m3"} {"signature": "@propertydef url(self):", "body": "return reverse('' % URL_NAMESPACE, kwargs={'': self.module,'': type(self).__name__,})", "docstring": "The URL to access this preview.", "id": "f25:c1:m4"} {"signature": "def detail_view(self, request):", "body": "context = {'': self,}kwargs = {}if self.form_class:if request.GET:form = self.form_class(data=request.GET)else:form = self.form_class()context[''] = formif not form.is_bound or not form.is_valid():return render(request, '', context)kwargs.update(form.get_message_view_kwargs())message_view = self.get_message_view(request, **kwargs)message = message_view.render_to_message()raw = message.message()headers = OrderedDict((header, maybe_decode_header(raw[header])) for header in self.headers)context.update({'': message,'': message.subject,'': message.body,'': headers,'': raw.as_string(),})alternatives = getattr(message, '', [])try:html = next(alternative[] for alternative in alternativesif alternative[] == '')context.update({'': html,'': b64encode(html.encode('')),})except StopIteration:passreturn render(request, self.template_name, context)", "docstring": "Renders the message view to a response.", "id": "f25:c1:m6"} {"signature": "def split_docstring(value):", "body": "docstring = textwrap.dedent(getattr(value, '', ''))if not docstring:return Nonepieces = docstring.strip().split('', )try:body = pieces[]except IndexError:body = Nonereturn Docstring(pieces[], body)", "docstring": "Splits the docstring of the given value into it's summary and body.\n\n:returns: a 2-tuple of the format ``(summary, body)``", "id": "f26:m0"} {"signature": "def unescape(context):", "body": "return Context(context, autoescape=False)", "docstring": "Accepts a context object, returning a new context with autoescape off.\n\nUseful for rendering plain-text templates without having to wrap the entire\ntemplate in an `{% autoescape off %}` tag.", "id": "f26:m2"} {"signature": "def login(self, oauth_filename=\"\", uploader_id=None):", "body": "cls_name = type(self).__name__oauth_cred = os.path.join(os.path.dirname(OAUTH_FILEPATH), oauth_filename + '')try:if not self.api.login(oauth_credentials=oauth_cred, uploader_id=uploader_id):try:self.api.perform_oauth(storage_filepath=oauth_cred)except OSError:logger.exception(\"\")self.api.login(oauth_credentials=oauth_cred, uploader_id=uploader_id)except (OSError, ValueError):logger.exception(\"\".format(cls_name))return Falseif not self.is_authenticated:logger.warning(\"\".format(cls_name))return Falselogger.info(\"\".format(cls_name))return True", "docstring": "Authenticate the gmusicapi Musicmanager instance.\n\n Parameters:\n oauth_filename (str): The filename of the oauth credentials file to use/create for login.\n Default: ``oauth``\n\n uploader_id (str): A unique id as a MAC address (e.g. ``'00:11:22:33:AA:BB'``).\n This should only be provided in cases where the default (host MAC address incremented by 1) won't work.\n\n Returns:\n ``True`` on successful login, ``False`` on unsuccessful login.", "id": "f33:c0:m1"} {"signature": "def logout(self, revoke_oauth=False):", "body": "return self.api.logout(revoke_oauth=revoke_oauth)", "docstring": "Log out the gmusicapi Musicmanager instance.\n\n Parameters:\n revoke_oauth (bool): If ``True``, oauth credentials will be revoked and the corresponding oauth file will be deleted.\n\n Returns:\n ``True`` on success.", "id": "f33:c0:m2"} {"signature": "def get_google_songs(self, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False,uploaded=True, purchased=True):", "body": "if not uploaded and not purchased:raise ValueError(\"\")logger.info(\"\")google_songs = []if uploaded:google_songs += self.api.get_uploaded_songs()if purchased:for song in self.api.get_purchased_songs():if song not in google_songs:google_songs.append(song)matched_songs, filtered_songs = filter_google_songs(google_songs, include_filters=include_filters, exclude_filters=exclude_filters,all_includes=all_includes, all_excludes=all_excludes)logger.info(\"\".format(len(filtered_songs)))logger.info(\"\".format(len(matched_songs)))return matched_songs, filtered_songs", "docstring": "Create song list from user's Google Music library.\n\n Parameters:\n include_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid Google Music metadata field available to the Musicmanager client.\n Patterns are Python regex patterns.\n Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.\n\n exclude_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid Google Music metadata field available to the Musicmanager client.\n Patterns are Python regex patterns.\n Google Music songs are filtered out if the given metadata field values match any of the given patterns.\n\n all_includes (bool): If ``True``, all include_filters criteria must match to include a song.\n\n all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.\n\n uploaded (bool): Include uploaded songs. Default: ``True``.\n\n purchased (bool): Include purchased songs. Default: ``True``.\n\n Returns:\n A list of Google Music song dicts matching criteria and\n a list of Google Music song dicts filtered out using filter criteria.", "id": "f33:c0:m3"} {"signature": "@cast_to_list()def download(self, songs, template=None):", "body": "if not template:template = os.getcwd()songnum = total = len(songs)results = []errors = {}pad = len(str(total))for result in self._download(songs, template):song_id = songs[songnum]['']songnum += downloaded, error = resultif downloaded:logger.info(\"\".format(num=songnum, pad=pad, total=total, file=downloaded[song_id], song_id=song_id))results.append({'': '', '': song_id, '': downloaded[song_id]})elif error:title = songs[songnum].get('', \"\")artist = songs[songnum].get('', \"\")album = songs[songnum].get('', \"\")logger.info(\"\".format(num=songnum, pad=pad, total=total, title=title, artist=artist, album=album, song_id=song_id))results.append({'': '', '': song_id, '': error[song_id]})if errors:logger.info(\"\")for filepath, e in errors.items():logger.info(\"\".format(file=filepath, error=e))logger.info(\"\")return results", "docstring": "Download Google Music songs.\n\n Parameters:\n songs (list or dict): Google Music song dict(s).\n\n template (str): A filepath which can include template patterns.\n\n Returns:\n A list of result dictionaries.\n ::\n\n [\n {'result': 'downloaded', 'id': song_id, 'filepath': downloaded[song_id]}, # downloaded\n {'result': 'error', 'id': song_id, 'message': error[song_id]} # error\n ]", "id": "f33:c0:m5"} {"signature": "@cast_to_list()def upload(self, filepaths, enable_matching=False, transcode_quality='', delete_on_success=False):", "body": "filenum = total = len(filepaths)results = []errors = {}pad = len(str(total))exist_strings = [\"\", \"\"]for result in self._upload(filepaths, enable_matching=enable_matching, transcode_quality=transcode_quality):filepath = filepaths[filenum]filenum += uploaded, matched, not_uploaded, error = resultif uploaded:logger.info(\"\".format(num=filenum, pad=pad, total=total, file=filepath, song_id=uploaded[filepath]))results.append({'': '', '': filepath, '': uploaded[filepath]})elif matched:logger.info(\"\".format(num=filenum, pad=pad, total=total, file=filepath, song_id=matched[filepath]))results.append({'': '', '': filepath, '': matched[filepath]})elif error:logger.warning(\"\".format(num=filenum, pad=pad, total=total, file=filepath))results.append({'': '', '': filepath, '': error[filepath]})errors.update(error)else:if any(exist_string in not_uploaded[filepath] for exist_string in exist_strings):response = \"\"song_id = GM_ID_RE.search(not_uploaded[filepath]).group()logger.info(\"\".format(num=filenum, pad=pad, total=total, file=filepath, response=response, song_id=song_id))results.append({'': '', '': filepath, '': song_id, '': not_uploaded[filepath]})else:response = not_uploaded[filepath]logger.info(\"\".format(num=filenum, pad=pad, total=total, file=filepath, response=response))results.append({'': '', '': filepath, '': not_uploaded[filepath]})success = (uploaded or matched) or (not_uploaded and '' in not_uploaded[filepath])if success and delete_on_success:try:os.remove(filepath)except (OSError, PermissionError):logger.warning(\"\".format(filepath))if errors:logger.info(\"\")for filepath, e in errors.items():logger.info(\"\".format(file=filepath, error=e))logger.info(\"\")return results", "docstring": "Upload local songs to Google Music.\n\n Parameters:\n filepaths (list or str): Filepath(s) to upload.\n\n enable_matching (bool): If ``True`` attempt to use `scan and match\n `__.\n This requieres ffmpeg or avconv.\n\n transcode_quality (str or int): If int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame `VBR quality\n '__.\n If string, pass to ffmpeg/avconv ``-b:a`` for libmp3lame `CBR quality\n '__.\n Default: ``320k``\n\n delete_on_success (bool): Delete successfully uploaded local files. Default: ``False``\n\n Returns:\n A list of result dictionaries.\n ::\n\n [\n {'result': 'uploaded', 'filepath': , 'id': }, # uploaded\n {'result': 'matched', 'filepath': , 'id': }, # matched\n {'result': 'error', 'filepath': , 'message': }, # error\n {'result': 'not_uploaded', 'filepath': , 'id': , 'message': }, # not_uploaded ALREADY_EXISTS\n {'result': 'not_uploaded', 'filepath': , 'message': } # not_uploaded\n ]", "id": "f33:c0:m7"} {"signature": "def login(self, username=None, password=None, android_id=None):", "body": "cls_name = type(self).__name__if username is None:username = input(\"\")if password is None:password = getpass.getpass(\"\")if android_id is None:android_id = Mobileclient.FROM_MAC_ADDRESStry:self.api.login(username, password, android_id)except OSError:logger.exception(\"\".format(cls_name))if not self.is_authenticated:logger.warning(\"\".format(cls_name))return Falselogger.info(\"\".format(cls_name))return True", "docstring": "Authenticate the gmusicapi Mobileclient instance.\n\n Parameters:\n username (Optional[str]): Your Google Music username. Will be prompted if not given.\n\n password (Optional[str]): Your Google Music password. Will be prompted if not given.\n\n android_id (Optional[str]): The 16 hex digits from an Android device ID.\n Default: Use gmusicapi.Mobileclient.FROM_MAC_ADDRESS to create ID from computer's MAC address.\n\n Returns:\n ``True`` on successful login or ``False`` on unsuccessful login.", "id": "f35:c0:m1"} {"signature": "def logout(self):", "body": "return self.api.logout()", "docstring": "Log out the gmusicapi Mobileclient instance.\n\n Returns:\n ``True`` on success.", "id": "f35:c0:m2"} {"signature": "@propertydef is_subscribed(self):", "body": "return self.api.is_subscribed", "docstring": "Check the subscription status of the gmusicapi client instance.\n\n Returns:\n ``True`` if subscribed, ``False`` if not.", "id": "f35:c0:m3"} {"signature": "def get_google_songs(self, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):", "body": "logger.info(\"\")google_songs = self.api.get_all_songs()matched_songs, filtered_songs = filter_google_songs(google_songs, include_filters=include_filters, exclude_filters=exclude_filters,all_includes=all_includes, all_excludes=all_excludes)logger.info(\"\".format(len(filtered_songs)))logger.info(\"\".format(len(matched_songs)))return matched_songs, filtered_songs", "docstring": "Create song list from user's Google Music library.\n\n Parameters:\n include_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid Google Music metadata field available to the Mobileclient client.\n Patterns are Python regex patterns.\n Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.\n\n exclude_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid Google Music metadata field available to the Mobileclient client.\n Patterns are Python regex patterns.\n Google Music songs are filtered out if the given metadata field values match any of the given patterns.\n\n all_includes (bool): If ``True``, all include_filters criteria must match to include a song.\n\n all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.\n\n Returns:\n A list of Google Music song dicts matching criteria and\n a list of Google Music song dicts filtered out using filter criteria.", "id": "f35:c0:m4"} {"signature": "def get_google_playlist(self, playlist):", "body": "logger.info(\"\".format(playlist))for google_playlist in self.api.get_all_user_playlist_contents():if google_playlist[''] == playlist or google_playlist[''] == playlist:return google_playlistelse:logger.warning(\"\".format(playlist))return {}", "docstring": "Get playlist information of a user-generated Google Music playlist.\n\n Parameters:\n playlist (str): Name or ID of Google Music playlist. Names are case-sensitive.\n Google allows multiple playlists with the same name.\n If multiple playlists have the same name, the first one encountered is used.\n\n Returns:\n dict: The playlist dict as returned by Mobileclient.get_all_user_playlist_contents.", "id": "f35:c0:m5"} {"signature": "def get_google_playlist_songs(self, playlist, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):", "body": "logger.info(\"\")google_playlist = self.get_google_playlist(playlist)if not google_playlist:return [], []playlist_song_ids = [track[''] for track in google_playlist['']]playlist_songs = [song for song in self.api.get_all_songs() if song[''] in playlist_song_ids]matched_songs, filtered_songs = filter_google_songs(playlist_songs, include_filters=include_filters, exclude_filters=exclude_filters,all_includes=all_includes, all_excludes=all_excludes)logger.info(\"\".format(len(filtered_songs)))logger.info(\"\".format(len(matched_songs)))return matched_songs, filtered_songs", "docstring": "Create song list from a user-generated Google Music playlist.\n\n Parameters:\n playlist (str): Name or ID of Google Music playlist. Names are case-sensitive.\n Google allows multiple playlists with the same name.\n If multiple playlists have the same name, the first one encountered is used.\n\n include_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid Google Music metadata field available to the Musicmanager client.\n Patterns are Python regex patterns.\n Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.\n\n exclude_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid Google Music metadata field available to the Musicmanager client.\n Patterns are Python regex patterns.\n Google Music songs are filtered out if the given metadata field values match any of the given patterns.\n\n all_includes (bool): If ``True``, all include_filters criteria must match to include a song.\n\n all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.\n\n Returns:\n A list of Google Music song dicts in the playlist matching criteria and\n a list of Google Music song dicts in the playlist filtered out using filter criteria.", "id": "f35:c0:m6"} {"signature": "@propertydef is_authenticated(self):", "body": "return self.api.is_authenticated()", "docstring": "Check the authentication status of the gmusicapi client instance.\n\n Returns:\n ``True`` if authenticated, ``False`` if not.", "id": "f36:c0:m1"} {"signature": "@staticmethod@cast_to_list()def get_local_songs(filepaths, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False,exclude_patterns=None, max_depth=float('')):", "body": "logger.info(\"\")supported_filepaths = get_supported_filepaths(filepaths, SUPPORTED_SONG_FORMATS, max_depth=max_depth)included_songs, excluded_songs = exclude_filepaths(supported_filepaths, exclude_patterns=exclude_patterns)matched_songs, filtered_songs = filter_local_songs(included_songs, include_filters=include_filters, exclude_filters=exclude_filters,all_includes=all_includes, all_excludes=all_excludes)logger.info(\"\".format(len(excluded_songs)))logger.info(\"\".format(len(filtered_songs)))logger.info(\"\".format(len(matched_songs)))return matched_songs, filtered_songs, excluded_songs", "docstring": "Load songs from local filepaths.\n\n Parameters:\n filepaths (list or str): Filepath(s) to search for music files.\n\n include_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid mutagen metadata fields. Patterns are Python regex patterns.\n Local songs are filtered out if the given metadata field values don't match any of the given patterns.\n\n exclude_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid mutagen metadata fields. Patterns are Python regex patterns.\n Local songs are filtered out if the given metadata field values match any of the given patterns.\n\n all_includes (bool): If ``True``, all include_filters criteria must match to include a song.\n\n all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.\n\n exclude_patterns (list or str): Pattern(s) to exclude.\n Patterns are Python regex patterns.\n Filepaths are excluded if they match any of the exclude patterns.\n\n max_depth (int): The depth in the directory tree to walk.\n A depth of '0' limits the walk to the top directory.\n Default: No limit.\n\n Returns:\n A list of local song filepaths matching criteria,\n a list of local song filepaths filtered out using filter criteria,\n and a list of local song filepaths excluded using exclusion criteria.", "id": "f36:c0:m2"} {"signature": "@staticmethod@cast_to_list()def get_local_playlists(filepaths, exclude_patterns=None, max_depth=float('')):", "body": "logger.info(\"\")included_playlists = []excluded_playlists = []supported_filepaths = get_supported_filepaths(filepaths, SUPPORTED_PLAYLIST_FORMATS, max_depth=max_depth)included_playlists, excluded_playlists = exclude_filepaths(supported_filepaths, exclude_patterns=exclude_patterns)logger.info(\"\".format(len(excluded_playlists)))logger.info(\"\".format(len(included_playlists)))return included_playlists, excluded_playlists", "docstring": "Load playlists from local filepaths.\n\n Parameters:\n filepaths (list or str): Filepath(s) to search for music files.\n\n exclude_patterns (list or str): Pattern(s) to exclude.\n Patterns are Python regex patterns.\n Filepaths are excluded if they match any of the exclude patterns.\n\n max_depth (int): The depth in the directory tree to walk.\n A depth of '0' limits the walk to the top directory.\n Default: No limit.\n\n Returns:\n A list of local playlist filepaths matching criteria\n and a list of local playlist filepaths excluded using exclusion criteria.", "id": "f36:c0:m3"} {"signature": "@staticmethoddef get_local_playlist_songs(playlist, include_filters=None, exclude_filters=None,all_includes=False, all_excludes=False, exclude_patterns=None):", "body": "logger.info(\"\")if os.name == '' and CYGPATH_RE.match(playlist):playlist = convert_cygwin_path(playlist)filepaths = []base_filepath = os.path.dirname(os.path.abspath(playlist))with open(playlist) as local_playlist:for line in local_playlist.readlines():line = line.strip()if line.lower().endswith(SUPPORTED_SONG_FORMATS):path = lineif not os.path.isabs(path):path = os.path.join(base_filepath, path)if os.path.isfile(path):filepaths.append(path)supported_filepaths = get_supported_filepaths(filepaths, SUPPORTED_SONG_FORMATS)included_songs, excluded_songs = exclude_filepaths(supported_filepaths, exclude_patterns=exclude_patterns)matched_songs, filtered_songs = filter_local_songs(included_songs, include_filters=include_filters, exclude_filters=exclude_filters,all_includes=all_includes, all_excludes=all_excludes)logger.info(\"\".format(len(excluded_songs)))logger.info(\"\".format(len(filtered_songs)))logger.info(\"\".format(len(matched_songs)))return matched_songs, filtered_songs, excluded_songs", "docstring": "Load songs from local playlist.\n\n Parameters:\n playlist (str): An M3U(8) playlist filepath.\n\n include_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid mutagen metadata fields. Patterns are Python regex patterns.\n Local songs are filtered out if the given metadata field values don't match any of the given patterns.\n\n exclude_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid mutagen metadata fields. Patterns are Python regex patterns.\n Local songs are filtered out if the given metadata field values match any of the given patterns.\n\n all_includes (bool): If ``True``, all include_filters criteria must match to include a song.\n\n all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.\n\n exclude_patterns (list or str): Pattern(s) to exclude.\n Patterns are Python regex patterns.\n Filepaths are excluded if they match any of the exclude patterns.\n\n Returns:\n A list of local playlist song filepaths matching criteria,\n a list of local playlist song filepaths filtered out using filter criteria,\n and a list of local playlist song filepaths excluded using exclusion criteria.", "id": "f36:c0:m4"} {"signature": "def convert_cygwin_path(path):", "body": "try:win_path = subprocess.check_output([\"\", \"\", path], universal_newlines=True).strip()except (FileNotFoundError, subprocess.CalledProcessError):logger.exception(\"\")raisereturn win_path", "docstring": "Convert Unix path from Cygwin to Windows path.", "id": "f38:m0"} {"signature": "def _get_mutagen_metadata(filepath):", "body": "try:metadata = mutagen.File(filepath, easy=True)except mutagen.MutagenError:logger.warning(\"\".format(filepath))raisereturn metadata", "docstring": "Get mutagen metadata dict from a file.", "id": "f38:m1"} {"signature": "def _mutagen_fields_to_single_value(metadata):", "body": "return dict((k, v[]) for k, v in metadata.items() if v)", "docstring": "Replace mutagen metadata field list values in mutagen tags with the first list value.", "id": "f38:m2"} {"signature": "def _split_field_to_single_value(field):", "body": "split_field = re.match(r'', field)return split_field.group() or field", "docstring": "Convert number field values split by a '/' to a single number value.", "id": "f38:m3"} {"signature": "def _filter_comparison_fields(song):", "body": "return [field for field in ['', '', '', '', ''] if field in song and song[field]]", "docstring": "Filter missing artist, album, title, or track fields to improve match accuracy.", "id": "f38:m4"} {"signature": "def _normalize_metadata(metadata):", "body": "metadata = str(metadata)metadata = metadata.lower()metadata = re.sub(r'', '', metadata) metadata = re.sub(r'', r'', metadata) metadata = re.sub(r'', '', metadata) metadata = re.sub(r'', '', metadata) metadata = re.sub(r'', '', metadata) metadata = re.sub(r'', '', metadata) metadata = re.sub(r'', '', metadata) metadata = re.sub(r'', '', metadata, re.I) return metadata", "docstring": "Normalize metadata to improve match accuracy.", "id": "f38:m5"} {"signature": "def _normalize_song(song):", "body": "return song if isinstance(song, dict) else _mutagen_fields_to_single_value(_get_mutagen_metadata(song))", "docstring": "Convert filepath to song dict while leaving song dicts untouched.", "id": "f38:m6"} {"signature": "def compare_song_collections(src_songs, dst_songs):", "body": "def gather_field_values(song):return tuple((_normalize_metadata(song[field]) for field in _filter_comparison_fields(song)))dst_songs_criteria = {gather_field_values(_normalize_song(dst_song)) for dst_song in dst_songs}return [src_song for src_song in src_songs if gather_field_values(_normalize_song(src_song)) not in dst_songs_criteria]", "docstring": "Compare two song collections to find missing songs.\n\n Parameters:\n src_songs (list): Google Music song dicts or filepaths of local songs.\n\n dest_songs (list): Google Music song dicts or filepaths of local songs.\n\n Returns:\n A list of Google Music song dicts or local song filepaths from source missing in destination.", "id": "f38:m7"} {"signature": "@cast_to_list()def get_supported_filepaths(filepaths, supported_extensions, max_depth=float('')):", "body": "supported_filepaths = []for path in filepaths:if os.name == '' and CYGPATH_RE.match(path):path = convert_cygwin_path(path)if os.path.isdir(path):for root, __, files in walk_depth(path, max_depth):for f in files:if f.lower().endswith(supported_extensions):supported_filepaths.append(os.path.join(root, f))elif os.path.isfile(path) and path.lower().endswith(supported_extensions):supported_filepaths.append(path)return supported_filepaths", "docstring": "Get filepaths with supported extensions from given filepaths.\n\n Parameters:\n filepaths (list or str): Filepath(s) to check.\n\n supported_extensions (tuple or str): Supported file extensions or a single file extension.\n\n max_depth (int): The depth in the directory tree to walk.\n A depth of '0' limits the walk to the top directory.\n Default: No limit.\n\n Returns:\n A list of supported filepaths.", "id": "f38:m8"} {"signature": "@cast_to_list()def exclude_filepaths(filepaths, exclude_patterns=None):", "body": "if not exclude_patterns:return filepaths, []exclude_re = re.compile(\"\".join(pattern for pattern in exclude_patterns))included_songs = []excluded_songs = []for filepath in filepaths:if exclude_patterns and exclude_re.search(filepath):excluded_songs.append(filepath)else:included_songs.append(filepath)return included_songs, excluded_songs", "docstring": "Exclude file paths based on regex patterns.\n\n Parameters:\n filepaths (list or str): Filepath(s) to check.\n\n exclude_patterns (list): Python regex patterns to check filepaths against.\n\n Returns:\n A list of filepaths to include and a list of filepaths to exclude.", "id": "f38:m9"} {"signature": "def _check_field_value(field_value, pattern):", "body": "if isinstance(field_value, list):return any(re.search(pattern, str(value), re.I) for value in field_value)else:return re.search(pattern, str(field_value), re.I)", "docstring": "Check a song metadata field value for a pattern.", "id": "f38:m10"} {"signature": "def _check_filters(song, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):", "body": "include = Trueif include_filters:if all_includes:if not all(field in song and _check_field_value(song[field], pattern) for field, pattern in include_filters):include = Falseelse:if not any(field in song and _check_field_value(song[field], pattern) for field, pattern in include_filters):include = Falseif exclude_filters:if all_excludes:if all(field in song and _check_field_value(song[field], pattern) for field, pattern in exclude_filters):include = Falseelse:if any(field in song and _check_field_value(song[field], pattern) for field, pattern in exclude_filters):include = Falsereturn include", "docstring": "Check a song metadata dict against a set of metadata filters.", "id": "f38:m11"} {"signature": "def filter_google_songs(songs, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):", "body": "matched_songs = []filtered_songs = []if include_filters or exclude_filters:for song in songs:if _check_filters(song, include_filters=include_filters, exclude_filters=exclude_filters,all_includes=all_includes, all_excludes=all_excludes):matched_songs.append(song)else:filtered_songs.append(song)else:matched_songs += songsreturn matched_songs, filtered_songs", "docstring": "Match a Google Music song dict against a set of metadata filters.\n\n Parameters:\n songs (list): Google Music song dicts to filter.\n\n include_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid Google Music metadata field available to the Musicmanager client.\n Patterns are Python regex patterns.\n Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.\n\n exclude_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid Google Music metadata field available to the Musicmanager client.\n Patterns are Python regex patterns.\n Google Music songs are filtered out if the given metadata field values match any of the given patterns.\n\n all_includes (bool): If ``True``, all include_filters criteria must match to include a song.\n\n all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.\n\n Returns:\n A list of Google Music song dicts matching criteria and\n a list of Google Music song dicts filtered out using filter criteria.\n ::\n\n (matched, filtered)", "id": "f38:m12"} {"signature": "def filter_local_songs(filepaths, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):", "body": "matched_songs = []filtered_songs = []for filepath in filepaths:try:song = _get_mutagen_metadata(filepath)except mutagen.MutagenError:filtered_songs.append(filepath)else:if include_filters or exclude_filters:if _check_filters(song, include_filters=include_filters, exclude_filters=exclude_filters,all_includes=all_includes, all_excludes=all_excludes):matched_songs.append(filepath)else:filtered_songs.append(filepath)else:matched_songs.append(filepath)return matched_songs, filtered_songs", "docstring": "Match a local file against a set of metadata filters.\n\n Parameters:\n filepaths (list): Filepaths to filter.\n\n include_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid mutagen metadata fields.\n Patterns are Python regex patterns.\n Local songs are filtered out if the given metadata field values don't match any of the given patterns.\n\n exclude_filters (list): A list of ``(field, pattern)`` tuples.\n Fields are any valid mutagen metadata fields.\n Patterns are Python regex patterns.\n Local songs are filtered out if the given metadata field values match any of the given patterns.\n\n all_includes (bool): If ``True``, all include_filters criteria must match to include a song.\n\n all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.\n\n Returns:\n A list of local song filepaths matching criteria and\n a list of local song filepaths filtered out using filter criteria.\n Invalid music files are also filtered out.\n ::\n\n (matched, filtered)", "id": "f38:m13"} {"signature": "def get_suggested_filename(metadata):", "body": "if metadata.get('') and metadata.get(''):suggested_filename = ''.format(**metadata)elif metadata.get('') and metadata.get(''):suggested_filename = ''.format(**metadata)elif metadata.get('') and metadata.get(''):suggested_filename = ''.format(**metadata)else:suggested_filename = ''.format(metadata.get('', ''))return suggested_filename", "docstring": "Generate a filename for a song based on metadata.\n\n Parameters:\n metadata (dict): A metadata dict.\n\n Returns:\n A filename.", "id": "f38:m14"} {"signature": "def template_to_filepath(template, metadata, template_patterns=None):", "body": "if template_patterns is None:template_patterns = TEMPLATE_PATTERNSmetadata = metadata if isinstance(metadata, dict) else _mutagen_fields_to_single_value(metadata)assert isinstance(metadata, dict)suggested_filename = get_suggested_filename(metadata).replace('', '')if template == os.getcwd() or template == '':filepath = suggested_filenameelse:t = template.replace('', suggested_filename)filepath = _replace_template_patterns(t, metadata, template_patterns)return filepath", "docstring": "Create directory structure and file name based on metadata template.\n\n Parameters:\n template (str): A filepath which can include template patterns as defined by :param template_patterns:.\n\n metadata (dict): A metadata dict.\n\n template_patterns (dict): A dict of ``pattern: field`` pairs used to replace patterns with metadata field values.\n Default: :const TEMPLATE_PATTERNS:\n\n Returns:\n A filepath.", "id": "f38:m16"} {"signature": "def walk_depth(path, max_depth=float('')):", "body": "start_level = os.path.abspath(path).count(os.path.sep)for dir_entry in os.walk(path):root, dirs, _ = dir_entrylevel = root.count(os.path.sep) - start_levelyield dir_entryif level >= max_depth:dirs[:] = []", "docstring": "Walk a directory tree with configurable depth.\n\n Parameters:\n path (str): A directory path to walk.\n\n max_depth (int): The depth in the directory tree to walk.\n A depth of '0' limits the walk to the top directory.\n Default: No limit.", "id": "f38:m17"} {"signature": "def cast_to_list(position):", "body": "@wrapt.decoratordef wrapper(function, instance, args, kwargs):if not isinstance(args[position], list):args = list(args)args[position] = [args[position]]args = tuple(args)return function(*args, **kwargs)return wrapper", "docstring": "Cast the positional argument at given position into a list if not already a list.", "id": "f39:m0"} {"signature": "def dict_cursor(func):", "body": "@wraps(func)def wrapper(cls, *args, **kwargs):with (yield from cls.get_cursor(_CursorType.DICT)) as c:return (yield from func(cls, c, *args, **kwargs))return wrapper", "docstring": "Decorator that provides a dictionary cursor to the calling function\n\nAdds the cursor as the second argument to the calling functions\n\nRequires that the function being decorated is an instance of a class or object\nthat yields a cursor from a get_cursor(cursor_type=CursorType.DICT) coroutine or provides such an object\nas the first argument in its signature\n\nYields:\n A client-side dictionary cursor", "id": "f41:m0"} {"signature": "def cursor(func):", "body": "@wraps(func)def wrapper(cls, *args, **kwargs):with (yield from cls.get_cursor()) as c:return (yield from func(cls, c, *args, **kwargs))return wrapper", "docstring": "Decorator that provides a cursor to the calling function\n\nAdds the cursor as the second argument to the calling functions\n\nRequires that the function being decorated is an instance of a class or object\nthat yields a cursor from a get_cursor() coroutine or provides such an object\nas the first argument in its signature\n\nYields:\n A client-side cursor", "id": "f41:m1"} {"signature": "def nt_cursor(func):", "body": "@wraps(func)def wrapper(cls, *args, **kwargs):with (yield from cls.get_cursor(_CursorType.NAMEDTUPLE)) as c:return (yield from func(cls, c, *args, **kwargs))return wrapper", "docstring": "Decorator that provides a namedtuple cursor to the calling function\n\nAdds the cursor as the second argument to the calling functions\n\nRequires that the function being decorated is an instance of a class or object\nthat yields a cursor from a get_cursor(cursor_type=CursorType.NAMEDTUPLE) coroutine or provides such an object\nas the first argument in its signature\n\nYields:\n A client-side namedtuple cursor", "id": "f41:m2"} {"signature": "def transaction(func):", "body": "@wraps(func)def wrapper(cls, *args, **kwargs):with (yield from cls.get_cursor(_CursorType.NAMEDTUPLE)) as c:try:yield from c.execute('')result = (yield from func(cls, c, *args, **kwargs))except Exception:yield from c.execute('')else:yield from c.execute('')return resultreturn wrapper", "docstring": "Provides a transacted cursor which will run in autocommit=false mode\n\nFor any exception the transaction will be rolled back.\nRequires that the function being decorated is an instance of a class or object\nthat yields a cursor from a get_cursor(cursor_type=CursorType.NAMEDTUPLE) coroutine or provides such an object\nas the first argument in its signature\n\nYields:\n A client-side transacted named cursor", "id": "f41:m3"} {"signature": "@classmethoddef connect(cls, database: str, user: str, password: str, host: str, port: int, *, use_pool: bool=True,enable_ssl: bool=False, minsize=, maxsize=, keepalives_idle=, keepalives_interval=, echo=False,**kwargs):", "body": "cls._connection_params[''] = databasecls._connection_params[''] = usercls._connection_params[''] = passwordcls._connection_params[''] = hostcls._connection_params[''] = portcls._connection_params[''] = '' if enable_ssl else ''cls._connection_params[''] = minsizecls._connection_params[''] = maxsizecls._connection_params[''] = keepalives_idlecls._connection_params[''] = keepalives_intervalcls._connection_params[''] = echocls._connection_params.update(kwargs)cls._use_pool = use_pool", "docstring": "Sets connection parameters\nFor more information on the parameters that is accepts,\nsee : http://www.postgresql.org/docs/9.2/static/libpq-connect.html", "id": "f41:c0:m0"} {"signature": "@classmethoddef use_pool(cls, pool: Pool):", "body": "cls._pool = pool", "docstring": "Sets an existing connection pool instead of using connect() to make one", "id": "f41:c0:m1"} {"signature": "@classmethod@coroutinedef get_pool(cls) -> Pool:", "body": "if len(cls._connection_params) < :raise ConnectionError('')if not cls._pool:cls._pool = yield from create_pool(**cls._connection_params)return cls._pool", "docstring": "Yields:\n existing db connection pool", "id": "f41:c0:m2"} {"signature": "@classmethod@coroutinedef get_cursor(cls, cursor_type=_CursorType.PLAIN) -> Cursor:", "body": "_cur = Noneif cls._use_pool:_connection_source = yield from cls.get_pool()else:_connection_source = yield from aiopg.connect(echo=False, **cls._connection_params)if cursor_type == _CursorType.PLAIN:_cur = yield from _connection_source.cursor()if cursor_type == _CursorType.NAMEDTUPLE:_cur = yield from _connection_source.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)if cursor_type == _CursorType.DICT:_cur = yield from _connection_source.cursor(cursor_factory=psycopg2.extras.DictCursor)if not cls._use_pool:_cur = cursor_context_manager(_connection_source, _cur)return _cur", "docstring": "Yields:\n new client-side cursor from existing db connection pool", "id": "f41:c0:m3"} {"signature": "@classmethod@coroutine@cursordef count(cls, cur, table:str, where_keys: list=None):", "body": "if where_keys:where_clause, values = cls._get_where_clause_with_values(where_keys)query = cls._count_query_where.format(table, where_clause)q, t = query, valueselse:query = cls._count_query.format(table)q, t = query, ()yield from cur.execute(q, t)result = yield from cur.fetchone()return int(result[])", "docstring": "gives the number of records in the table\n\nArgs:\n table: a string indicating the name of the table\n\nReturns:\n an integer indicating the number of records in the table", "id": "f41:c0:m4"} {"signature": "@classmethod@coroutine@nt_cursordef insert(cls, cur, table: str, values: dict):", "body": "keys = cls._COMMA.join(values.keys())value_place_holder = cls._PLACEHOLDER * len(values)query = cls._insert_string.format(table, keys, value_place_holder[:-])yield from cur.execute(query, tuple(values.values()))return (yield from cur.fetchone())", "docstring": "Creates an insert statement with only chosen fields\n\nArgs:\n table: a string indicating the name of the table\n values: a dict of fields and values to be inserted\n\nReturns:\n A 'Record' object with table columns as properties", "id": "f41:c0:m5"} {"signature": "@classmethod@coroutine@nt_cursordef update(cls, cur, table: str, values: dict, where_keys: list) -> tuple:", "body": "keys = cls._COMMA.join(values.keys())value_place_holder = cls._PLACEHOLDER * len(values)where_clause, where_values = cls._get_where_clause_with_values(where_keys)query = cls._update_string.format(table, keys, value_place_holder[:-], where_clause)yield from cur.execute(query, (tuple(values.values()) + where_values))return (yield from cur.fetchall())", "docstring": "Creates an update query with only chosen fields\nSupports only a single field where clause\n\nArgs:\n table: a string indicating the name of the table\n values: a dict of fields and values to be inserted\n where_keys: list of dictionary\n example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}]\n where_clause will look like ((name>%s and url=%s) or (type <= %s))\n items within each dictionary get 'AND'-ed and dictionaries themselves get 'OR'-ed\n\nReturns:\n an integer indicating count of rows deleted", "id": "f41:c0:m6"} {"signature": "@classmethod@coroutine@cursordef delete(cls, cur, table: str, where_keys: list):", "body": "where_clause, values = cls._get_where_clause_with_values(where_keys)query = cls._delete_query.format(table, where_clause)yield from cur.execute(query, values)return cur.rowcount", "docstring": "Creates a delete query with where keys\nSupports multiple where clause with and or or both\n\nArgs:\n table: a string indicating the name of the table\n where_keys: list of dictionary\n example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}]\n where_clause will look like ((name>%s and url=%s) or (type <= %s))\n items within each dictionary get 'AND'-ed and dictionaries themselves get 'OR'-ed\n\nReturns:\n an integer indicating count of rows deleted", "id": "f41:c0:m8"} {"signature": "@classmethod@coroutine@nt_cursordef select(cls, cur, table: str, order_by: str, columns: list=None, where_keys: list=None, limit=,offset=):", "body": "if columns:columns_string = cls._COMMA.join(columns)if where_keys:where_clause, values = cls._get_where_clause_with_values(where_keys)query = cls._select_selective_column_with_condition.format(columns_string, table, where_clause,order_by, limit, offset)q, t = query, valueselse:query = cls._select_selective_column.format(columns_string, table, order_by, limit, offset)q, t = query, ()else:if where_keys:where_clause, values = cls._get_where_clause_with_values(where_keys)query = cls._select_all_string_with_condition.format(table, where_clause, order_by, limit, offset)q, t = query, valueselse:query = cls._select_all_string.format(table, order_by, limit, offset)q, t = query, ()yield from cur.execute(q, t)return (yield from cur.fetchall())", "docstring": "Creates a select query for selective columns with where keys\nSupports multiple where claus with and or or both\n\nArgs:\n table: a string indicating the name of the table\n order_by: a string indicating column name to order the results on\n columns: list of columns to select from\n where_keys: list of dictionary\n limit: the limit on the number of results\n offset: offset on the results\n\n example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}]\n where_clause will look like ((name>%s and url=%s) or (type <= %s))\n items within each dictionary get 'AND'-ed and across dictionaries get 'OR'-ed\n\nReturns:\n A list of 'Record' object with table columns as properties", "id": "f41:c0:m9"} {"signature": "@classmethod@coroutine@nt_cursordef raw_sql(cls, cur, query: str, values: tuple):", "body": "yield from cur.execute(query, values)return (yield from cur.fetchall())", "docstring": "Run a raw sql query\n\nArgs:\n query : query string to execute\n values : tuple of values to be used with the query\n\nReturns:\n result of query as list of named tuple", "id": "f41:c0:m10"} {"signature": "def connect(self, host, port, minsize=, maxsize=, loop=asyncio.get_event_loop()):", "body": "self._pool = yield from aioredis.create_pool((host, port), minsize=minsize, maxsize=maxsize, loop=loop)", "docstring": "Setup a connection pool\n:param host: Redis host\n:param port: Redis port\n:param loop: Event loop", "id": "f42:c0:m1"} {"signature": "def set_key(self, key, value, namespace=None, expire=):", "body": "with (yield from self._pool) as redis:if namespace is not None:key = self._get_key(namespace, key)yield from redis.set(key, value, expire=expire)", "docstring": "Set a key in a cache.\n:param key: Key name\n:param value: Value\n:param namespace : Namespace to associate the key with\n:param expire: expiration\n:return:", "id": "f42:c0:m2"} {"signature": "def indexesOptional(f):", "body": "stack = inspect.stack()_NO_INDEX_CHECK_NEEDED.add('' % (f.__module__, stack[][], f.__name__))del stackreturn f", "docstring": "Decorate test methods with this if you don't require strict index checking", "id": "f44:m1"} {"signature": "def handle_change(self, change):", "body": "op = change['']if op in '':self.add(len(change['']), LatLng(*change['']))elif op == '':self.add(change[''], LatLng(*change['']))elif op == '':points = [LatLng(*p) for p in change['']]self.addAll([bridge.encode(c) for c in points])elif op == '':self.set(change[''], LatLng(*change['']))elif op == '':self.remove(change[''])else:raise NotImplementedError(\"\".format(op))", "docstring": "Handle changes from atom ContainerLists", "id": "f70:c1:m1"} {"signature": "def create_widget(self):", "body": "self.init_options()MapFragment.newInstance(self.options).then(self.on_map_fragment_created)self.widget = FrameLayout(self.get_context())self.map = GoogleMap(__id__=bridge.generate_id())", "docstring": "Create the underlying widget.", "id": "f70:c24:m0"} {"signature": "def init_options(self):", "body": "self.options = GoogleMapOptions()d = self.declarationself.set_map_type(d.map_type)if d.ambient_mode:self.set_ambient_mode(d.ambient_mode)if (d.camera_position or d.camera_zoom ord.camera_tilt or d.camera_bearing):self.update_camera()if d.map_bounds:self.set_map_bounds(d.map_bounds)if not d.show_compass:self.set_show_compass(d.show_compass)if not d.show_zoom_controls:self.set_show_zoom_controls(d.show_zoom_controls)if not d.show_toolbar:self.set_show_toolbar(d.show_toolbar)if d.lite_mode:self.set_lite_mode(d.lite_mode)if not d.rotate_gestures:self.set_rotate_gestures(d.rotate_gestures)if not d.scroll_gestures:self.set_scroll_gestures(d.scroll_gestures)if not d.tilt_gestures:self.set_tilt_gestures(d.tilt_gestures)if not d.zoom_gestures:self.set_zoom_gestures(d.zoom_gestures)if d.min_zoom:self.set_min_zoom(d.min_zoom)if d.max_zoom:self.set_max_zoom(d.max_zoom)", "docstring": "Initialize the underlying map options.", "id": "f70:c24:m1"} {"signature": "def init_map(self):", "body": "d = self.declarationif d.show_location:self.set_show_location(d.show_location)if d.show_traffic:self.set_show_traffic(d.show_traffic)if d.show_indoors:self.set_show_indoors(d.show_indoors)if d.show_buildings:self.set_show_buildings(d.show_buildings)mapview = self.mapmid = mapview.getId()mapview.onCameraChange.connect(self.on_camera_changed)mapview.onCameraMoveStarted.connect(self.on_camera_move_started)mapview.onCameraMoveCanceled.connect(self.on_camera_move_stopped)mapview.onCameraIdle.connect(self.on_camera_move_stopped)mapview.setOnCameraChangeListener(mid)mapview.setOnCameraMoveStartedListener(mid)mapview.setOnCameraMoveCanceledListener(mid)mapview.setOnCameraIdleListener(mid)mapview.onMapClick.connect(self.on_map_clicked)mapview.setOnMapClickListener(mid)mapview.onMapLongClick.connect(self.on_map_long_clicked)mapview.setOnMapLongClickListener(mid)mapview.onMarkerClick.connect(self.on_marker_clicked)mapview.setOnMarkerClickListener(self.map.getId())mapview.onMarkerDragStart.connect(self.on_marker_drag_start)mapview.onMarkerDrag.connect(self.on_marker_drag)mapview.onMarkerDragEnd.connect(self.on_marker_drag_end)mapview.setOnMarkerDragListener(mid)mapview.onInfoWindowClick.connect(self.on_info_window_clicked)mapview.onInfoWindowLongClick.connect(self.on_info_window_long_clicked)mapview.onInfoWindowClose.connect(self.on_info_window_closed)mapview.setOnInfoWindowClickListener(mid)mapview.setOnInfoWindowCloseListener(mid)mapview.setOnInfoWindowLongClickListener(mid)mapview.onPolygonClick.connect(self.on_poly_clicked)mapview.onPolylineClick.connect(self.on_poly_clicked)mapview.setOnPolygonClickListener(mid)mapview.setOnPolylineClickListener(mid)mapview.onCircleClick.connect(self.on_circle_clicked)mapview.setOnCircleClickListener(mid)", "docstring": "Add markers, polys, callouts, etc..", "id": "f70:c24:m2"} {"signature": "def init_info_window_adapter(self):", "body": "adapter = self.adapterif adapter:return adapter = GoogleMap.InfoWindowAdapter()adapter.getInfoContents.connect(self.on_info_window_contents_requested)adapter.getInfoWindow.connect(self.on_info_window_requested)self.map.setInfoWindowAdapter(adapter)", "docstring": "Initialize the info window adapter. Should only be done if one of \n the markers defines a custom view.", "id": "f70:c24:m3"} {"signature": "def on_map_fragment_created(self, obj_id):", "body": "self.fragment = MapFragment(__id__=obj_id)self.map.onMapReady.connect(self.on_map_ready)self.fragment.getMapAsync(self.map.getId())context = self.get_context()def on_transaction(id):trans = FragmentTransaction(__id__=id)trans.add(self.widget.getId(), self.fragment)trans.commit()def on_fragment_manager(id):fm = FragmentManager(__id__=id)fm.beginTransaction().then(on_transaction)context.widget.getSupportFragmentManager().then(on_fragment_manager)", "docstring": "Create the fragment and pull the map reference when it's loaded.", "id": "f70:c24:m4"} {"signature": "def on_map_clicked(self, pos):", "body": "d = self.declarationd.clicked({'': '','': tuple(pos)})", "docstring": "Called when the map is clicked", "id": "f70:c24:m8"} {"signature": "def on_map_long_clicked(self, pos):", "body": "d = self.declarationd.clicked({'': '','': tuple(pos)})", "docstring": "Called when the map is clicked", "id": "f70:c24:m9"} {"signature": "def add_to_map(self):", "body": "raise NotImplementedError", "docstring": "Add this item to the map", "id": "f70:c25:m1"} {"signature": "def destroy(self):", "body": "marker = self.markerparent = self.parent()if marker:if parent:del parent.markers[marker.__id__]marker.remove()super(AndroidMapItemBase, self).destroy()", "docstring": "Remove the marker if it was added to the map when destroying", "id": "f70:c25:m2"} {"signature": "def create_widget(self):", "body": "self.options = MarkerOptions()", "docstring": "Create the MarkerOptions for this map marker\n this later gets converted into a \"Marker\" instance when addMarker \n is called", "id": "f70:c26:m0"} {"signature": "def child_added(self, child):", "body": "if child.widget:self.parent().init_info_window_adapter()super(AndroidMapMarker, self).child_added(child)", "docstring": "If a child is added we have to make sure the map adapter exists", "id": "f70:c26:m3"} {"signature": "def on_marker(self, marker):", "body": "mid, pos = markerself.marker = Marker(__id__=mid)mapview = self.parent()mapview.markers[mid] = selfself.marker.setTag(mid)for w in self.child_widgets():mapview.init_info_window_adapter()breakd = self.declarationif d.show_info:self.set_show_info(d.show_info)del self.options", "docstring": "Convert our options into the actual marker object", "id": "f70:c26:m4"} {"signature": "def create_widget(self):", "body": "self.options = CircleOptions()", "docstring": "Create the CircleOptions for this map item\n this later gets converted into a \"Circle\" instance when addCircle \n is called", "id": "f70:c27:m0"} {"signature": "def on_marker(self, mid):", "body": "self.marker = Circle(__id__=mid)self.parent().markers[mid] = selfself.marker.setTag(mid)d = self.declarationif d.clickable:self.set_clickable(d.clickable)del self.options", "docstring": "Convert our options into the actual circle object", "id": "f70:c27:m3"} {"signature": "def create_widget(self):", "body": "self.options = PolylineOptions()self.points = LatLngList()", "docstring": "Create the MarkerOptions for this map marker\n this later gets converted into a \"Marker\" instance when addMarker \n is called", "id": "f70:c28:m0"} {"signature": "def on_marker(self, mid):", "body": "self.marker = Polyline(__id__=mid)self.parent().markers[mid] = selfself.marker.setTag(mid)d = self.declarationif d.clickable:self.set_clickable(d.clickable)del self.options", "docstring": "Convert our options into the actual marker object", "id": "f70:c28:m3"} {"signature": "def update_points(self, change):", "body": "self.points.handle_change(change)self.marker.setPoints(self.points)", "docstring": "Update the points in a smart way without passing them over the \n bridge with every change.", "id": "f70:c28:m6"} {"signature": "def create_widget(self):", "body": "self.options = PolygonOptions()self.points = LatLngList()", "docstring": "Create the MarkerOptions for this map marker\n this later gets converted into a \"Marker\" instance when addMarker \n is called", "id": "f70:c29:m0"} {"signature": "def on_marker(self, mid):", "body": "self.marker = Polygon(__id__=mid)self.parent().markers[mid] = selfself.marker.setTag(mid)d = self.declarationif d.clickable:self.set_clickable(d.clickable)del self.options", "docstring": "Convert our options into the actual marker object", "id": "f70:c29:m3"} {"signature": "@observe('', '', '','', '', '', '','', '', '','', '', '', '','','', '','', '','', '')def _update_proxy(self, change):", "body": "super(MapView, self)._update_proxy(change)", "docstring": "An observer which sends the state change to the proxy.", "id": "f72:c7:m0"} {"signature": "@observe('', '', '', '', '', '','', '', '', '', '', '')def _update_proxy(self, change):", "body": "super(MapMarker, self)._update_proxy(change)", "docstring": "An observer which sends the state change to the proxy.", "id": "f72:c8:m0"} {"signature": "@observe('', '', '', '', '', '','', '')def _update_proxy(self, change):", "body": "super(MapCircle, self)._update_proxy(change)", "docstring": "An observer which sends the state change to the proxy.", "id": "f72:c9:m0"} {"signature": "@observe('', '', '', '', '','', '', '', '', '')def _update_proxy(self, change):", "body": "if change[''] == '':self.proxy.update_points(change)else:super(MapPolyline, self)._update_proxy(change)", "docstring": "An observer which sends the state change to the proxy.", "id": "f72:c10:m0"} {"signature": "@observe('', '', '', '', '','', '', '', '')def _update_proxy(self, change):", "body": "if change[''] == '':self.proxy.update_points(change)else:super(MapPolygon, self)._update_proxy(change)", "docstring": "An observer which sends the state change to the proxy.", "id": "f72:c11:m0"} {"signature": "def run_command(cmd_to_run):", "body": "with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file:popen = subprocess.Popen(cmd_to_run, stdout=stdout_file, stderr=stderr_file)popen.wait()stderr_file.seek()stdout_file.seek()stderr = stderr_file.read()stdout = stdout_file.read()if six.PY3:stderr = stderr.decode()stdout = stdout.decode()return stderr, stdout", "docstring": "Wrapper around subprocess that pipes the stderr and stdout from `cmd_to_run`\nto temporary files. Using the temporary files gets around subprocess.PIPE's\nissues with handling large buffers.\n\nNote: this command will block the python process until `cmd_to_run` has completed.\n\nReturns a tuple, containing the stderr and stdout as strings.", "id": "f94:m0"} {"signature": "def decode_html_entities(html):", "body": "if not html:return htmlfor entity, char in six.iteritems(html_entity_map):html = html.replace(entity, char)return html", "docstring": "Decodes a limited set of HTML entities.", "id": "f94:m9"} {"signature": "def normalize_housecode(house_code):", "body": "if house_code is None:raise X10InvalidHouseCode('' % house_code)if not isinstance(house_code, basestring):raise X10InvalidHouseCode('' % house_code)if len(house_code) != :raise X10InvalidHouseCode('' % house_code)house_code = house_code.upper()if not ('' <= house_code <= ''):raise X10InvalidHouseCode('' % house_code)return house_code", "docstring": "Returns a normalized house code, i.e. upper case.\n Raises exception X10InvalidHouseCode if house code appears to be invalid", "id": "f101:m0"} {"signature": "def normalize_unitnumber(unit_number):", "body": "try:try:unit_number = int(unit_number)except ValueError:raise X10InvalidUnitNumber('' % unit_number)except TypeError:raise X10InvalidUnitNumber('' % unit_number)if not ( <= unit_number <= ):raise X10InvalidUnitNumber('' % unit_number)return unit_number", "docstring": "Returns a normalized unit number, i.e. integers\n Raises exception X10InvalidUnitNumber if unit number appears to be invalid", "id": "f101:m1"} {"signature": "def x10_command(self, house_code, unit_number, state):", "body": "house_code = normalize_housecode(house_code)if unit_number is not None:unit_number = normalize_unitnumber(unit_number)return self._x10_command(house_code, unit_number, state)", "docstring": "Send X10 command to ??? unit.\n\n @param house_code (A-P) - example='A'\n @param unit_number (1-16)- example=1 (or None to impact entire house code)\n @param state - Mochad command/state, See\n https://sourceforge.net/p/mochad/code/ci/master/tree/README\n examples=OFF, 'OFF', 'ON', ALL_OFF, 'all_units_off', 'xdim 128', etc.\n\n Examples:\n x10_command('A', '1', ON)\n x10_command('A', '1', OFF)\n x10_command('A', '1', 'ON')\n x10_command('A', '1', 'OFF')\n x10_command('A', None, ON)\n x10_command('A', None, OFF)\n x10_command('A', None, 'all_lights_off')\n x10_command('A', None, 'all_units_off')\n x10_command('A', None, ALL_OFF)\n x10_command('A', None, 'all_lights_on')\n x10_command('A', 1, 'xdim 128')", "id": "f101:c3:m3"} {"signature": "def _x10_command(self, house_code, unit_number, state):", "body": "print('' % ((house_code, unit_number, state), ))raise NotImplementedError()", "docstring": "Real implementation", "id": "f101:c3:m4"} {"signature": "def __init__(self, device_address=None, default_type=None):", "body": "self.device_address = device_address or ('', )self.default_type = default_type or ''self.default_type = to_bytes(self.default_type)", "docstring": "@param device_address - Optional tuple of (host_address, host_port).\n Defaults to localhost:1099\n@param default_type - Option type of device to send command,\n 'rf' or 'pl'. Defaults to 'rf'", "id": "f101:c4:m0"} {"signature": "def _x10_command(self, house_code, unit_number, state):", "body": "log = default_loggerif state.startswith('') or state.startswith('') or state.startswith(''):raise NotImplementedError('' % ((house_code, unit_num, state), ))if unit_number is not None:house_and_unit = '' % (house_code, unit_number)else:raise NotImplementedError('' % ((house_code, unit_number, state), ))house_and_unit = house_codehouse_and_unit = to_bytes(house_and_unit)state = to_bytes(state)mochad_cmd = self.default_type + b'' + house_and_unit + b'' + state + b'' log.debug('', mochad_cmd)mochad_host, mochad_port = self.device_addressresult = netcat(mochad_host, mochad_port, mochad_cmd)log.debug('', result)", "docstring": "Real implementation", "id": "f101:c4:m1"} {"signature": "def __init__(self, device_address=None):", "body": "log = default_loggerlog.debug('', firecracker, x10)if firecracker is None and x10 is None:raise X10BaseException('') if device_address is None:log.info('')possible_serial_ports = list(serial.tools.list_ports.comports())log.debug('', possible_serial_ports)device_address = possible_serial_ports[][]log.debug('')self.device_address = device_addresslog.debug('', self.device_address)", "docstring": "@param device_address - Optional name of serial port\n Defaults to first found serial port", "id": "f101:c5:m0"} {"signature": "def _x10_command(self, house_code, unit_number, state):", "body": "log = default_loggerdef scale_255_to_8(x):\"\"\"\"\"\"factor = x / return - int(abs(round( * factor)))def scale_31_to_8(x):\"\"\"\"\"\"factor = x / return - int(abs(round( * factor)))serial_port_name = self.device_addresshouse_code = normalize_housecode(house_code)if unit_number is not None:unit_number = normalize_unitnumber(unit_number)else:if firecracker:log.error('')if firecracker:log.debug('', (serial_port_name, house_code, unit_number, state))firecracker.send_command(serial_port_name, house_code, unit_number, state)else:if unit_number is not None:if state.startswith('') or state.startswith('') or state.startswith(''):dim_count = int(state.split()[-])if state.startswith(''):dim_count = scale_255_to_8(dim_count)else:dim_count = scale_31_to_8(dim_count)dim_str = '' % (house_code, )dim_list = []for _ in range(dim_count):dim_list.append(dim_str)dim_str = ''.join(dim_list)if dim_count == :x10_command_str = '' % (house_code, unit_number, '')else:x10_command_str = '' % (house_code, unit_number, '', house_code, unit_number, '', dim_str)else:x10_command_str = '' % (house_code, unit_number, state)else:state = x10_mapping[state]x10_command_str = '' % (house_code, state)log.debug('', x10_command_str)x10.sendCommands(serial_port_name, x10_command_str)", "docstring": "Real implementation", "id": "f101:c5:m1"} {"signature": "def _translateCommands(commands):", "body": "for command in commands.split(''):result = [, ]device, command = command.strip().upper().split(None, )result[] = houseCodes[device[]]if len(device) > :deviceNumber = deviceNumbers[device[:]]result[] |= deviceNumber[]result[] = deviceNumber[]result[] |= commandCodes[command]yield ''.join(map(_strBinary, result))", "docstring": "Generate the binary strings for a comma seperated list of commands.", "id": "f102:m0"} {"signature": "def _strBinary(n):", "body": "results = []for i in range():n, r = divmod(n, )results.append(''[r])results.reverse()return ''.join(results)", "docstring": "Conert an integer to binary (i.e., a string of 1s and 0s).", "id": "f102:m1"} {"signature": "def _sendBinaryData(port, data):", "body": "_reset(port)time.sleep(leadInOutDelay)for digit in data:_sendBit(port, digit)time.sleep(leadInOutDelay)", "docstring": "Send a string of binary data to the FireCracker with proper timing.\n\n See the diagram in the spec referenced above for timing information.\n The module level variables leadInOutDelay and bitDelay represent how\n long each type of delay should be in seconds. They may require tweaking\n on some setups.", "id": "f102:m2"} {"signature": "def _reset(port):", "body": "_setRTSDTR(port, , )_setRTSDTR(port, , )", "docstring": "Perform a rest of the FireCracker module.", "id": "f102:m3"} {"signature": "def _sendBit(port, bit):", "body": "if bit == '':_setRTSDTR(port, , )elif bit == '':_setRTSDTR(port, , )else:returntime.sleep(bitDelay)_setRTSDTR(port, , )time.sleep(bitDelay)", "docstring": "Send an individual bit to the FireCracker module usr RTS/DTR.", "id": "f102:m4"} {"signature": "def _setRTSDTR(port, RTS, DTR):", "body": "port.setRTS(RTS)port.setDTR(DTR)", "docstring": "Set RTS and DTR to the requested state.", "id": "f102:m5"} {"signature": "def sendCommands(comPort, commands):", "body": "mutex.acquire()try:try:port = serial.Serial(port=comPort)header = ''footer = ''for command in _translateCommands(commands):_sendBinaryData(port, header + command + footer)except serial.SerialException:print('' % comPort)print('')raisefinally:mutex.release()", "docstring": "Send X10 commands using the FireCracker on comPort\n\n comPort should be the name of a serial port on the host platform. On\n Windows, for example, 'com1'.\n\n commands should be a string consisting of X10 commands separated by\n commas. For example. 'A1 On, A Dim, A Dim, A Dim, A Lamps Off'. The\n letter is a house code (A-P) and the number is the device number (1-16).\n Possible commands for a house code / device number combination are\n 'On' and 'Off'. The commands 'Bright' and 'Dim' should be used with a\n house code alone after sending an On command to a specific device. The\n 'All On', 'All Off', 'Lamps On', and 'Lamps Off' commands should also\n be used with a house code alone.\n\n # Turn on module A1\n >>> sendCommands('com1', 'A1 On')\n\n # Turn all modules with house code A off\n >>> sendCommands('com1', 'A All Off')\n\n # Turn all lamp modules with house code B on\n >>> sendCommands('com1', 'B Lamps On')\n\n # Turn on module A1 and dim it 3 steps, then brighten it 1 step\n >>> sendCommands('com1', 'A1 On, A Dim, A Dim, A Dim, A Bright')", "id": "f102:m6"} {"signature": "def main(argv=None):", "body": "if len(argv):commands = ''.join(argv)comPort, commands = commands.split(None, )sendCommands(comPort, commands)return ", "docstring": "Send X10 commands when module is used from the command line.\n\n This uses syntax similar to sendCommands, for example:\n\n x10.py com2 A1 On, A2 Off, B All Off", "id": "f102:m7"} {"signature": "def connect(self, host):", "body": "return False", "docstring": "this is a no-op for all but wc3270", "id": "f108:c7:m2"} {"signature": "def __init__(self, visible=False, timeout=, app=None, args=None):", "body": "self.app = app or self.create_app(visible, args)self.is_terminated = Falseself.status = Status(None)self.timeout = timeoutself.last_host = None", "docstring": "Create an emulator instance\n\n`visible` controls which executable will be used.\n`timeout` controls the timeout paramater to any Wait() command sent\n to x3270.\n`args` allows sending parameters to the emulator executable", "id": "f108:c14:m0"} {"signature": "def __del__(self):", "body": "self.terminate()", "docstring": "Since an emulator creates a process (and sometimes a socket handle), it is good practice\nto clean these up when done. Note, not terminating at this point will usually have no\nill effect - only Python 3+ on Windows had problems in this regard.", "id": "f108:c14:m1"} {"signature": "def exec_command(self, cmdstr):", "body": "if self.is_terminated:raise TerminatedError(\"\")log.debug(\"\", cmdstr)c = Command(self.app, cmdstr)start = time.time()c.execute()elapsed = time.time() - startlog.debug(\"\".format(elapsed))self.status = Status(c.status_line)return c", "docstring": "Execute an x3270 command\n\n`cmdstr` gets sent directly to the x3270 subprocess on it's stdin.", "id": "f108:c14:m3"} {"signature": "def terminate(self):", "body": "if not self.is_terminated:log.debug(\"\")try:self.exec_command(b\"\")except BrokenPipeError: passexcept socket.error as e:if e.errno != errno.ECONNRESET:raiseself.app.close()self.is_terminated = True", "docstring": "terminates the underlying x3270 subprocess. Once called, this\nEmulator instance must no longer be used.", "id": "f108:c14:m4"} {"signature": "def is_connected(self):", "body": "try:self.exec_command(b\"\")return self.status.connection_state.startswith(b\"\")except NotConnectedException:return False", "docstring": "Return bool indicating connection state", "id": "f108:c14:m5"} {"signature": "def connect(self, host):", "body": "if not self.app.connect(host):command = \"\".format(host).encode(\"\")self.exec_command(command)self.last_host = host", "docstring": "Connect to a host", "id": "f108:c14:m6"} {"signature": "def reconnect(self):", "body": "self.exec_command(b\"\")self.connect(self.last_host)", "docstring": "Disconnect from the host and re-connect to the same host", "id": "f108:c14:m7"} {"signature": "def wait_for_field(self):", "body": "self.exec_command(\"\".format(self.timeout).encode(\"\"))if self.status.keyboard != b\"\":raise KeyboardStateError(\"\".format(self.status.keyboard.decode(\"\")))", "docstring": "Wait until the screen is ready, the cursor has been positioned\non a modifiable field, and the keyboard is unlocked.\n\nSometimes the server will \"unlock\" the keyboard but the screen will\nnot yet be ready. In that case, an attempt to read or write to the\nscreen will result in a 'E' keyboard status because we tried to\nread from a screen that is not yet ready.\n\nUsing this method tells the client to wait until a field is\ndetected and the cursor has been positioned on it.", "id": "f108:c14:m8"} {"signature": "def move_to(self, ypos, xpos):", "body": "xpos -= ypos -= self.exec_command(\"\".format(ypos, xpos).encode(\"\"))", "docstring": "move the cursor to the given co-ordinates. Co-ordinates are 1\nbased, as listed in the status area of the terminal.", "id": "f108:c14:m9"} {"signature": "def send_string(self, tosend, ypos=None, xpos=None):", "body": "if xpos is not None and ypos is not None:self.move_to(ypos, xpos)tosend = tosend.replace('', '')self.exec_command(''.format(tosend).encode(\"\"))", "docstring": "Send a string to the screen at the current cursor location or at\nscreen co-ordinates `ypos`/`xpos` if they are both given.\n\nCo-ordinates are 1 based, as listed in the status area of the\nterminal.", "id": "f108:c14:m10"} {"signature": "def string_get(self, ypos, xpos, length):", "body": "xpos -= ypos -= cmd = self.exec_command(\"\".format(ypos, xpos, length).encode(\"\"))assert len(cmd.data) == , cmd.datareturn cmd.data[].decode(\"\")", "docstring": "Get a string of `length` at screen co-ordinates `ypos`/`xpos`\n\nCo-ordinates are 1 based, as listed in the status area of the\nterminal.", "id": "f108:c14:m19"} {"signature": "def string_found(self, ypos, xpos, string):", "body": "found = self.string_get(ypos, xpos, len(string))log.debug(''.format(found))return found == string", "docstring": "Return True if `string` is found at screen co-ordinates\n`ypos`/`xpos`, False otherwise.\n\nCo-ordinates are 1 based, as listed in the status area of the\nterminal.", "id": "f108:c14:m20"} {"signature": "def delete_field(self):", "body": "self.exec_command(b\"\")", "docstring": "Delete contents in field at current cursor location and positions\ncursor at beginning of field.", "id": "f108:c14:m21"} {"signature": "def fill_field(self, ypos, xpos, tosend, length):", "body": "if length < len(tosend):raise FieldTruncateError('' % (length, tosend))if xpos is not None and ypos is not None:self.move_to(ypos, xpos)self.delete_field()self.send_string(tosend)", "docstring": "clears the field at the position given and inserts the string\n`tosend`\n\ntosend: the string to insert\nlength: the length of the field\n\nCo-ordinates are 1 based, as listed in the status area of the\nterminal.\n\nraises: FieldTruncateError if `tosend` is longer than\n `length`.", "id": "f108:c14:m22"} {"signature": "def get_dataframe(self):", "body": "assert self.dataframe is not None, (\"\"\"\"% self.__class__.__name__)dataframe = self.dataframereturn dataframe", "docstring": "Get the DataFrame for this view.\nDefaults to using `self.dataframe`.\n\nThis method should always be used rather than accessing `self.dataframe`\ndirectly, as `self.dataframe` gets evaluated only once, and those results\nare cached for all subsequent requests.\n\nYou may want to override this if you need to provide different\ndataframes depending on the incoming request.", "id": "f112:c0:m0"} {"signature": "def update_dataframe(self, dataframe):", "body": "return dataframe", "docstring": "Indicates that the dataframe needs to be updated. The default implementation\njust returns the argument. This method has to be ovewritten to make changing\noperations stick.", "id": "f112:c0:m1"} {"signature": "def index_row(self, dataframe):", "body": "return dataframe.loc[self.kwargs[self.lookup_url_kwarg]].to_frame().T", "docstring": "Indexes the row based on the request parameters.", "id": "f112:c0:m2"} {"signature": "def get_object(self):", "body": "dataframe = self.filter_dataframe(self.get_dataframe())assert self.lookup_url_kwarg in self.kwargs, ('''''' %(self.__class__.__name__, self.lookup_url_kwarg))try:obj = self.index_row(dataframe)except (IndexError, KeyError, ValueError):raise Http404self.check_object_permissions(self.request, obj)return obj", "docstring": "Returns the row the view is displaying.\n\nYou may want to override this if you need to provide non-standard\nqueryset lookups. Eg if objects are referenced using multiple\nkeyword arguments in the url conf.", "id": "f112:c0:m3"} {"signature": "def get_serializer(self, *args, **kwargs):", "body": "serializer_class = self.get_serializer_class()kwargs[''] = self.get_serializer_context()return serializer_class(*args, **kwargs)", "docstring": "Return the serializer instance that should be used for validating and\ndeserializing input, and for serializing output.", "id": "f112:c0:m4"} {"signature": "def get_serializer_class(self):", "body": "assert self.serializer_class is not None, (\"\"\"\"% self.__class__.__name__)return self.serializer_class", "docstring": "Return the class to use for the serializer.\nDefaults to using `self.serializer_class`.\n\nYou may want to override this if you need to provide different\nserializations depending on the incoming request.\n\n(Eg. admins get full serialization, others get basic serialization)", "id": "f112:c0:m5"} {"signature": "def get_serializer_context(self):", "body": "return {'': self.request,'': self.format_kwarg,'': self}", "docstring": "Extra context provided to the serializer class.", "id": "f112:c0:m6"} {"signature": "def filter_dataframe(self, dataframe):", "body": "return dataframe", "docstring": "Given a dataframe, filter it.", "id": "f112:c0:m7"} {"signature": "@propertydef paginator(self):", "body": "if not hasattr(self, ''):if self.pagination_class is None:self._paginator = Noneelse:self._paginator = self.pagination_class()return self._paginator", "docstring": "The paginator instance associated with the view, or `None`.", "id": "f112:c0:m8"} {"signature": "def paginate_dataframe(self, dataframe):", "body": "if self.paginator is None:return Nonereturn self.paginator.paginate_dataframe(dataframe, self.request, view=self)", "docstring": "Return a single page of results, or `None` if pagination is disabled.", "id": "f112:c0:m9"} {"signature": "def get_paginated_response(self, data):", "body": "assert self.paginator is not Nonereturn self.paginator.get_paginated_response(data)", "docstring": "Return a paginated style `Response` object for the given output data.", "id": "f112:c0:m10"} {"signature": "def __init__(self):", "body": "try:p = subprocess.Popen(['', ''],stdout=subprocess.PIPE,stderr=subprocess.PIPE)num_colors = int(p.stdout.read())except (OSError, ValueError):num_colors = self.has_colors = Falseif num_colors > :self.has_colors = Trueself.enable_colors()self.COLORS = self.enumerate_colors()", "docstring": "Checks if the shell supports colors", "id": "f119:c4:m0"} {"signature": "def enable_colors(self):", "body": "self.colors_enabled = True", "docstring": "Method to enable colors", "id": "f119:c4:m2"} {"signature": "def disable_colors(self):", "body": "self.colors_enabled = False", "docstring": "Method to disable colors", "id": "f119:c4:m3"} {"signature": "def _wrap_color(self, code, text, format=None, style=None):", "body": "color = Noneif code[:] == self.bg.PREFIX:color = self.bg.COLORS.get(code, None)if not color:color = self.fg.COLORS.get(code, None)if not color:raise Exception('')if format and format not in self.formats:raise Exception('')fmt = \"\"if format == '':fmt = \"\"elif format == '':fmt = \"\"parts = color.split('')color = ''.format(parts[], fmt, parts[])if self.has_colors and self.colors_enabled:st = ''if style:st = self.st.COLORS.get(style, '')return \"\".format(st, color, text, self.st.COLORS[''])else:return text", "docstring": "Colors text with code and given format", "id": "f119:c4:m4"} {"signature": "def __call__(self, schema: Schema) -> typing.Dict[str, typing.Any]:", "body": "result = {}for key, item in schema.items():if callable(item):result[key] = self.get(key=key, type_=item)continueresult[key] = self.get(key=item.get(\"\", key),default=item.get(\"\", UNSET),type_=item.get(\"\", str),subtype=item.get(\"\", str),mapper=item.get(\"\", None),)return result", "docstring": "Parse the environment according to a schema.\n\n:param schema: the schema to parse\n:return: a dictionary of config values", "id": "f127:c1:m1"} {"signature": "def parse(self,value: str,type_: typing.Type[typing.Any] = str,subtype: typing.Type[typing.Any] = str,) -> typing.Any:", "body": "if type_ is bool:return type_(value.lower() in self.TRUE_STRINGS)try:if isinstance(type_, type) and issubclass(type_, (list, tuple, set, frozenset)):return type_(self.parse(v.strip(\"\"), subtype)for v in value.split(\"\")if value.strip(\"\"))return type_(value)except ValueError as e:raise ConfigError(*e.args)", "docstring": "Parse value from string.\n\nConvert :code:`value` to\n\n.. code-block:: python\n\n >>> parser = Config()\n >>> parser.parse('12345', type_=int)\n <<< 12345\n >>>\n >>> parser.parse('1,2,3,4', type_=list, subtype=int)\n <<< [1, 2, 3, 4]\n\n:param value: string\n:param type\\\\_: the type to return\n:param subtype: subtype for iterator types\n:return: the parsed config value", "id": "f127:c1:m2"} {"signature": "def get(self,key: str,default: typing.Any = UNSET,type_: typing.Type[typing.Any] = str,subtype: typing.Type[typing.Any] = str,mapper: typing.Optional[typing.Callable[[object], object]] = None,) -> typing.Any:", "body": "value = self.environ.get(key, UNSET)if value is UNSET and default is UNSET:raise ConfigError(\"\".format(key))if value is UNSET:value = defaultelse:value = self.parse(typing.cast(str, value), type_, subtype)if mapper:value = mapper(value)return value", "docstring": "Parse a value from an environment variable.\n\n.. code-block:: python\n\n >>> os.environ['FOO']\n <<< '12345'\n >>>\n >>> os.environ['BAR']\n <<< '1,2,3,4'\n >>>\n >>> 'BAZ' in os.environ\n <<< False\n >>>\n >>> parser = Config()\n >>> parser.get('FOO', type_=int)\n <<< 12345\n >>>\n >>> parser.get('BAR', type_=list, subtype=int)\n <<< [1, 2, 3, 4]\n >>>\n >>> parser.get('BAZ', default='abc123')\n <<< 'abc123'\n >>>\n >>> parser.get('FOO', type_=int, mapper=lambda x: x*10)\n <<< 123450\n\n:param key: the key to look up the value under\n:param default: default value to return when when no value is present\n:param type\\\\_: the type to return\n:param subtype: subtype for iterator types\n:param mapper: a function to post-process the value with\n:return: the parsed config value", "id": "f127:c1:m3"} {"signature": "def dumps(obj,key=None,salt='',serializer=JSONSerializer,compress=False):", "body": "data = serializer().dumps(obj)is_compressed = Falseif compress:compressed = zlib.compress(data)if len(compressed) < (len(data) - ):data = compressedis_compressed = Truebase64d = b64_encode(data)if is_compressed:base64d = b'' + base64dreturn TimestampSigner(key, salt=salt).sign(base64d)", "docstring": "Returns URL-safe, sha1 signed base64 compressed JSON string. If key is\nNone, settings.SECRET_KEY is used instead.\n\nIf compress is True (not the default) checks if compressing using zlib can\nsave some space. Prepends a '.' to signify compression. This is included\nin the signature, to protect against zip bombs.\n\nSalt can be used to namespace the hash, so that a signed string is\nonly valid for a given namespace. Leaving this at the default\nvalue or re-using a salt value across different parts of your\napplication without good cause is a security risk.\n\nThe serializer is expected to return a bytestring.", "id": "f144:m1"} {"signature": "def loads(s,key=None,salt='',serializer=JSONSerializer,max_age=None):", "body": "base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age))decompress = Falseif base64d[:] == b'':base64d = base64d[:]decompress = Truedata = b64_decode(base64d)if decompress:data = zlib.decompress(data)return serializer().loads(data)", "docstring": "Reverse of dumps(), raises BadSignature if signature fails.\n\nThe serializer is expected to accept a bytestring.", "id": "f144:m2"} {"signature": "def unsign(self, value, max_age=None):", "body": "result = super(TimestampSigner, self).unsign(value)value, timestamp = result.rsplit(self.sep, )timestamp = baseconv.base62.decode(timestamp)if max_age is not None:if isinstance(max_age, datetime.timedelta):max_age = max_age.total_seconds()age = time.time() - timestampif age > max_age:raise SignatureExpired('' %(age, max_age))return value", "docstring": "Retrieve original value and check it wasn't signed more\nthan max_age seconds ago.", "id": "f144:c1:m2"} {"signature": "def __init__(self, key=None):", "body": "self.digest = hashes.SHA256()self.key = force_bytes(key or settings.SECRET_KEY)", "docstring": ":type key: any\n:rtype: None", "id": "f144:c3:m0"} {"signature": "def signature(self, value):", "body": "h = HMAC(self.key, self.digest, backend=settings.CRYPTOGRAPHY_BACKEND)h.update(force_bytes(value))return h", "docstring": ":type value: any\n:rtype: HMAC", "id": "f144:c3:m1"} {"signature": "def sign(self, value):", "body": "payload = struct.pack('', self.version, int(time.time()))payload += force_bytes(value)return payload + self.signature(payload).finalize()", "docstring": ":type value: any\n:rtype: bytes", "id": "f144:c3:m2"} {"signature": "def unsign(self, signed_value, ttl=None):", "body": "h_size, d_size = struct.calcsize(''), self.digest.digest_sizefmt = '' % (len(signed_value) - h_size - d_size, d_size)try:version, timestamp, value, sig = struct.unpack(fmt, signed_value)except struct.error:raise BadSignature('')if version != self.version:raise BadSignature('')if ttl is not None:if isinstance(ttl, datetime.timedelta):ttl = ttl.total_seconds()age = abs(time.time() - timestamp)if age > ttl + _MAX_CLOCK_SKEW:raise SignatureExpired('' % (age,ttl))try:self.signature(signed_value[:-d_size]).verify(sig)except InvalidSignature:raise BadSignature('' % binascii.b2a_base64(sig))return value", "docstring": "Retrieve original value and check it wasn't signed more\nthan max_age seconds ago.\n\n:type signed_value: bytes\n:type ttl: int | datetime.timedelta", "id": "f144:c3:m3"} {"signature": "def get_version(version=None):", "body": "version = get_complete_version(version)main = get_main_version(version)sub = ''if version[] == '' and version[] == :git_changeset = get_git_changeset()if git_changeset:sub = '' % git_changesetelif version[] != '':mapping = {'': '', '': '', '': ''}sub = mapping[version[]] + str(version[])return str(main + sub)", "docstring": "Returns a PEP 386-compliant version number from VERSION.", "id": "f145:m0"} {"signature": "def get_main_version(version=None):", "body": "version = get_complete_version(version)parts = if version[] == else return ''.join(str(x) for x in version[:parts])", "docstring": "Returns main version (X.Y[.Z]) from VERSION.", "id": "f145:m1"} {"signature": "def get_complete_version(version=None):", "body": "if version is None:from django_cryptography import VERSION as versionelse:assert len(version) == assert version[] in ('', '', '', '')return version", "docstring": "Returns a tuple of the django_cryptography version. If version\nargument is non-empty, then checks for correctness of the tuple\nprovided.", "id": "f145:m2"} {"signature": "def get_git_changeset():", "body": "repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))git_log = subprocess.Popen('',stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True,cwd=repo_dir,universal_newlines=True)timestamp = git_log.communicate()[]try:timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))except ValueError:return Nonereturn timestamp.strftime('')", "docstring": "Returns a numeric identifier of the latest git changeset.\n\nThe result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\nThis value isn't guaranteed to be unique, but collisions are very unlikely,\nso it's sufficient for generating the development version numbers.", "id": "f145:m4"} {"signature": "def salted_hmac(key_salt, value, secret=None):", "body": "if secret is None:secret = settings.SECRET_KEYkey_salt = force_bytes(key_salt)secret = force_bytes(secret)digest = hashes.Hash(settings.CRYPTOGRAPHY_DIGEST, backend=settings.CRYPTOGRAPHY_BACKEND)digest.update(key_salt + secret)key = digest.finalize()h = HMAC(key,settings.CRYPTOGRAPHY_DIGEST,backend=settings.CRYPTOGRAPHY_BACKEND)h.update(force_bytes(value))return h", "docstring": "Returns the HMAC-HASH of 'value', using a key generated from key_salt and a\nsecret (which defaults to settings.SECRET_KEY).\n\nA different key_salt should be passed in for every application of HMAC.\n\n:type key_salt: any\n:type value: any\n:type secret: any\n:rtype: HMAC", "id": "f146:m0"} {"signature": "def constant_time_compare(val1, val2):", "body": "return constant_time.bytes_eq(force_bytes(val1), force_bytes(val2))", "docstring": ":type val1: any\n:type val2: any\n:rtype: bool", "id": "f146:m1"} {"signature": "def pbkdf2(password, salt, iterations, dklen=, digest=None):", "body": "if digest is None:digest = settings.CRYPTOGRAPHY_DIGESTif not dklen:dklen = digest.digest_sizepassword = force_bytes(password)salt = force_bytes(salt)kdf = PBKDF2HMAC(algorithm=digest,length=dklen,salt=salt,iterations=iterations,backend=settings.CRYPTOGRAPHY_BACKEND)return kdf.derive(password)", "docstring": "Implements PBKDF2 with the same API as Django's existing\nimplementation, using cryptography.\n\n:type password: any\n:type salt: any\n:type iterations: int\n:type dklen: int\n:type digest: cryptography.hazmat.primitives.hashes.HashAlgorithm", "id": "f146:m2"} {"signature": "def encrypt(self, data):", "body": "data = force_bytes(data)iv = os.urandom()return self._encrypt_from_parts(data, iv)", "docstring": ":type data: any\n:rtype: any", "id": "f146:c1:m1"} {"signature": "def _encrypt_from_parts(self, data, iv):", "body": "padder = padding.PKCS7(algorithms.AES.block_size).padder()padded_data = padder.update(data) + padder.finalize()encryptor = Cipher(algorithms.AES(self._encryption_key), modes.CBC(iv),self._backend).encryptor()ciphertext = encryptor.update(padded_data) + encryptor.finalize()return self._signer.sign(iv + ciphertext)", "docstring": ":type data: bytes\n:type iv: bytes\n:rtype: any", "id": "f146:c1:m2"} {"signature": "def decrypt(self, data, ttl=None):", "body": "data = self._signer.unsign(data, ttl)iv = data[:]ciphertext = data[:]decryptor = Cipher(algorithms.AES(self._encryption_key), modes.CBC(iv),self._backend).decryptor()plaintext_padded = decryptor.update(ciphertext)try:plaintext_padded += decryptor.finalize()except ValueError:raise InvalidTokenunpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()unpadded = unpadder.update(plaintext_padded)try:unpadded += unpadder.finalize()except ValueError:raise InvalidTokenreturn unpadded", "docstring": ":type data: bytes\n:type ttl: int\n:rtype: bytes", "id": "f146:c1:m3"} {"signature": "def get_encrypted_field(base_class):", "body": "assert not isinstance(base_class, models.Field)field_name = '' + base_class.__name__if base_class not in FIELD_CACHE:FIELD_CACHE[base_class] = type(field_name,(EncryptedMixin, base_class), {'': base_class,})return FIELD_CACHE[base_class]", "docstring": "A get or create method for encrypted fields, we cache the field in\nthe module to avoid recreation. This also allows us to always return\nthe same class reference for a field.\n\n:type base_class: models.Field[T]\n:rtype: models.Field[EncryptedMixin, T]", "id": "f147:m0"} {"signature": "def encrypt(base_field, key=None, ttl=None):", "body": "if not isinstance(base_field, models.Field):assert key is Noneassert ttl is Nonereturn get_encrypted_field(base_field)name, path, args, kwargs = base_field.deconstruct()kwargs.update({'': key, '': ttl})return get_encrypted_field(base_field.__class__)(*args, **kwargs)", "docstring": "A decorator for creating encrypted model fields.\n\n:type base_field: models.Field[T]\n:param bytes key: This is an optional argument.\n\n Allows for specifying an instance specific encryption key.\n:param int ttl: This is an optional argument.\n\n The amount of time in seconds that a value can be stored for. If the\n time to live of the data has passed, it will become unreadable.\n The expired value will return an :class:`Expired` object.\n:rtype: models.Field[EncryptedMixin, T]", "id": "f147:m1"} {"signature": "def value_to_string(self, obj):", "body": "value = self.value_from_object(obj)return b64encode(self._dump(value)).decode('')", "docstring": "Pickled data is serialized as base64", "id": "f147:c0:m10"} {"signature": "def __init__(self, store, url=\"\", credentials={}, do_verify_certificate=True):", "body": "self.store = storeself.url = urlself.credentials = credentialsself.do_verify_certificate = do_verify_certificate", "docstring": "Initialize the reporter.\n store store the data store", "id": "f155:c0:m0"} {"signature": "def log(self, url=None, credentials=None, do_verify_certificate=True):", "body": "if url is None:url = self.urlif re.match(\"\", url):self.log_file(url)elif re.match(\"\", url) or re.match(\"\", url):self.log_post(url, credentials, do_verify_certificate)else:self.log_stdout()", "docstring": "Wrapper for the other log methods, decide which one based on the\nURL parameter.", "id": "f155:c0:m1"} {"signature": "def log_stdout(self):", "body": "print(self.store.get_text())", "docstring": "Write to standard output", "id": "f155:c0:m2"} {"signature": "def log_file(self, url=None):", "body": "if url is None:url = self.urlf = re.sub(\"\", \"\", url)try:with open(f, \"\") as of:of.write(str(self.store.get_json_tuples(True)))except IOError as e:print(e)print(\"\")", "docstring": "Write to a local log file", "id": "f155:c0:m3"} {"signature": "def log_post(self, url=None, credentials=None, do_verify_certificate=True):", "body": "if url is None:url = self.urlif credentials is None:credentials = self.credentialsif do_verify_certificate is None:do_verify_certificate = self.do_verify_certificateif credentials and \"\" in credentials:headers = {\"\": \"\",'': '' % credentials[\"\"]}else:headers = {\"\": \"\"}try:request = requests.post(url, headers=headers,data=self.store.get_json(), verify=do_verify_certificate)except httplib.IncompleteRead as e:request = e.partial", "docstring": "Write to a remote host via HTTP POST", "id": "f155:c0:m4"} {"signature": "def log_ssh(self):", "body": "pass", "docstring": "Write to a remote file via ssh", "id": "f155:c0:m5"} {"signature": "def register_credentials(self, credentials=None, user=None, user_file=None, password=None, password_file=None):", "body": "if credentials is not None:self.credentials = credentialselse:self.credentials = {}if user:self.credentials[\"\"] = userelif user_file:with open(user_file, \"\") as of:pattern = re.compile(\"\")for l in of:if re.match(pattern, l):l = l[:-]self.credentials[\"\"] = re.sub(pattern, \"\", l)if self.credentials[\"\"][:] == '' andself.credentials[\"\"][-:] == '':self.credentials[\"\"] = self.credentials[\"\"][:-]if password:self.credentials[\"\"] = passwordelif password_file:with open(password_file, \"\") as of:pattern = re.compile(\"\")for l in of:if re.match(pattern, l):l = l[:-]self.credentials[\"\"] =re.sub(pattern, \"\", l)if self.credentials[\"\"][:] == '' andself.credentials[\"\"][-:] == '':self.credentials[\"\"] =self.credentials[\"\"][:-]if \"\" in self.credentials and \"\" in self.credentials:c = self.credentials[\"\"] + \"\" + self.credentials[\"\"]self.credentials[\"\"] = b64encode(c.encode()).decode(\"\")", "docstring": "Helper method to store username and password", "id": "f155:c0:m6"} {"signature": "def register_json(self, data):", "body": "j = json.loads(data)self.last_data_timestamp =datetime.datetime.utcnow().replace(microsecond=).isoformat()try:for v in j:self.data[v[self.id_key]] = {}self.data[v[self.id_key]][self.id_key] =v[self.id_key]self.data[v[self.id_key]][self.value_key] =v[self.value_key]if self.unit_key in v:self.data[v[self.id_key]][self.unit_key] =v[self.unit_key]if self.threshold_key in v:self.data[v[self.id_key]][self.threshold_key] =v[self.threshold_key]for k in self.other_keys:if k in v:self.data[v[self.id_key]][k] = v[k]if self.sensor_time_key in v:self.data[v[self.sensor_time_key]][self.sensor_time_key] =v[self.sensor_time_key]self.data[v[self.id_key]][self.time_key] =self.last_data_timestampexcept KeyError as e:print(\"\" +str(e))except ValueError as e:print(\"\")print(\"\" + str(e))", "docstring": "Register the contents as JSON", "id": "f156:c0:m2"} {"signature": "def get_text(self):", "body": "t = \"\" + str(self.last_data_timestamp) + \"\"for k in self.data:t += k + \"\" + str(self.data[k][self.value_key])u = \"\"if self.unit_key in self.data[k]:u = self.data[k][self.unit_key]t += uif self.threshold_key in self.data[k]:if (self.data[k][self.threshold_key] t += \"\" +str(self.data[k][self.threshold_key]) + \"\"else:t += \"\" + str(self.data[k][self.threshold_key]) + u + \"\"for l in self.other_keys:if l in self.data[k]:t += \"\" + self.data[k][l]t += \"\"return t", "docstring": "Get the data in text form (i.e. human readable)", "id": "f156:c0:m3"} {"signature": "def get_translated_data(self):", "body": "j = {}for k in self.data:d = {}for l in self.data[k]:d[self.translation_keys[l]] = self.data[k][l]j[k] = dreturn j", "docstring": "Translate the data with the translation table", "id": "f156:c0:m4"} {"signature": "def get_json(self, prettyprint=False, translate=True):", "body": "j = []if translate:d = self.get_translated_data()else:d = self.datafor k in d:j.append(d[k])if prettyprint:j = json.dumps(j, indent=, separators=('',''))else:j = json.dumps(j)return j", "docstring": "Get the data in JSON form", "id": "f156:c0:m5"} {"signature": "def get_json_tuples(self, prettyprint=False, translate=True):", "body": "j = self.get_json(prettyprint, translate)if len(j) > :if prettyprint:j = j[:-] + \"\"else:j = j[:-] + \"\"else:j = \"\"return j", "docstring": "Get the data as JSON tuples", "id": "f156:c0:m6"} {"signature": "def __init__(self, device, baudrate, store, rounds=, timeout=):", "body": "threading.Thread.__init__(self)self.baudrate = baudrateself.store = storeself.rounds = roundsself.do_run = Trueself.device_name = devicetry:if device:self.device = serial.Serial(device, self.baudrate, timeout=timeout);except serial.serialutil.SerialException:print(\"\" + self.device_name)", "docstring": "Initialize the serial reader class\n device device name to connect to\n baudrate the baud rate for the serial line\n store the data store object to send the data to\n rounds number of rounds to run / listen for input", "id": "f157:c0:m0"} {"signature": "def age(self):", "body": "if self.rounds == :self.do_run = Falseelif self.rounds > :self.rounds -= ", "docstring": "Get closer to your EOL", "id": "f157:c0:m1"} {"signature": "def run(self):", "body": "if not self.device:returntry:data = \"\"while (self.do_run):try:if (self.device.inWaiting() > ):l = self.device.readline()[:-]l = l.decode(\"\")if (l == \"\"):data = \"\"elif (l == \"\") and (len(data) > ) and (data[] == \"\"):data = data + \"\"self.store.register_json(data)self.age()elif (l[:] == \"\"):data = data + \"\" + lelse:sleep()self.age()except (UnicodeDecodeError, ValueError):data = \"\"self.age()except serial.serialutil.SerialException:print(\"\" + self.device_name)", "docstring": "Open a connection over the serial line and receive data lines", "id": "f157:c0:m2"} {"signature": "def halt(self):", "body": "self.do_run = False", "docstring": "Tell the this object to stop working after the next round", "id": "f157:c0:m3"} {"signature": "def bdp_bds_cache(func, tickers, flds, **kwargs) -> ToQuery:", "body": "cache_data = []log_level = kwargs.get('', logs.LOG_LEVEL)logger = logs.get_logger(bdp_bds_cache, level=log_level)kwargs[''] = kwargs.pop('', func == '')kwargs[''] = kwargs.get('', True)tickers = utils.flatten(tickers)flds = utils.flatten(flds)loaded = pd.DataFrame(data=, index=tickers, columns=flds)for ticker, fld in product(tickers, flds):data_file = storage.ref_file(ticker=ticker, fld=fld, ext='', **{k: v for k, v in kwargs.items() if k not in EXC_COLS})if not files.exists(data_file): continuelogger.debug(f'')cache_data.append(pd.read_pickle(data_file))loaded.loc[ticker, fld] = to_qry = loaded.where(loaded == ).dropna(how='', axis=).dropna(how='', axis=)return ToQuery(tickers=to_qry.index.tolist(), flds=to_qry.columns.tolist(),cached_data=cache_data)", "docstring": "Find cached `BDP` / `BDS` queries\n\nArgs:\n func: function name - bdp or bds\n tickers: tickers\n flds: fields\n **kwargs: other kwargs\n\nReturns:\n ToQuery(ticker, flds, kwargs)", "id": "f160:m0"} {"signature": "def exists(path) -> bool:", "body": "return os.path.exists(path=path)", "docstring": "Check path or file exists (use os.path.exists)\n\nArgs:\n path: path or file", "id": "f161:m0"} {"signature": "def abspath(cur_file, parent=) -> str:", "body": "file_path = os.path.abspath(cur_file).replace('', '')if os.path.isdir(file_path) and parent == : return file_pathadj = - os.path.isdir(file_path)return ''.join(file_path.split('')[:-(parent + adj)])", "docstring": "Absolute path\n\nArgs:\n cur_file: __file__ or file or path str\n parent: level of parent to look for\n\nReturns:\n str", "id": "f161:m1"} {"signature": "def create_folder(path_name: str, is_file=False):", "body": "path_sep = path_name.replace('', '').split('')for i in range(, len(path_sep) + ( if is_file else )):cur_path = ''.join(path_sep[:i])if not os.path.exists(cur_path): os.mkdir(cur_path)", "docstring": "Make folder as well as all parent folders if not exists\n\nArgs:\n path_name: full path name\n is_file: whether input is name of file", "id": "f161:m2"} {"signature": "def all_files(path_name, keyword='', ext='', full_path=True,has_date=False, date_fmt=DATE_FMT) -> list:", "body": "if not os.path.exists(path=path_name): return []path_name = path_name.replace('', '')if keyword or ext:keyword = f'' if keyword else ''if not ext: ext = ''files = sort_by_modified([f.replace('', '') for f in glob.iglob(f'')if os.path.isfile(f) and (f.replace('', '').split('')[-][] != '')])else:files = sort_by_modified([f'' for f in os.listdir(path=path_name)if os.path.isfile(f'') and (f[] != '')])if has_date:files = filter_by_dates(files, date_fmt=date_fmt)return files if full_path else [f.split('')[-] for f in files]", "docstring": "Search all files with criteria\nReturned list will be sorted by last modified\n\nArgs:\n path_name: full path name\n keyword: keyword to search\n ext: file extensions, split by ','\n full_path: whether return full path (default True)\n has_date: whether has date in file name (default False)\n date_fmt: date format to check for has_date parameter\n\nReturns:\n list: all file names with criteria fulfilled", "id": "f161:m3"} {"signature": "def all_folders(path_name, keyword='', has_date=False, date_fmt=DATE_FMT) -> list:", "body": "if not os.path.exists(path=path_name): return []path_name = path_name.replace('', '')if keyword:folders = sort_by_modified([f.replace('', '') for f in glob.iglob(f'')if os.path.isdir(f) and (f.replace('', '').split('')[-][] != '')])else:folders = sort_by_modified([f'' for f in os.listdir(path=path_name)if os.path.isdir(f'') and (f[] != '')])if has_date:folders = filter_by_dates(folders, date_fmt=date_fmt)return folders", "docstring": "Search all folders with criteria\nReturned list will be sorted by last modified\n\nArgs:\n path_name: full path name\n keyword: keyword to search\n has_date: whether has date in file name (default False)\n date_fmt: date format to check for has_date parameter\n\nReturns:\n list: all folder names fulfilled criteria", "id": "f161:m4"} {"signature": "def sort_by_modified(files_or_folders: list) -> list:", "body": "return sorted(files_or_folders, key=os.path.getmtime, reverse=True)", "docstring": "Sort files or folders by modified time\n\nArgs:\n files_or_folders: list of files or folders\n\nReturns:\n list", "id": "f161:m5"} {"signature": "def filter_by_dates(files_or_folders: list, date_fmt=DATE_FMT) -> list:", "body": "r = re.compile(f'')return list(filter(lambda vv: r.match(vv.replace('', '').split('')[-]) is not None,files_or_folders,))", "docstring": "Filter files or dates by date patterns\n\nArgs:\n files_or_folders: list of files or folders\n date_fmt: date format\n\nReturns:\n list", "id": "f161:m6"} {"signature": "def file_modified_time(file_name) -> pd.Timestamp:", "body": "return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name)))", "docstring": "File modified time in python\n\nArgs:\n file_name: file name\n\nReturns:\n pd.Timestamp", "id": "f161:m8"} {"signature": "def load_info(cat):", "body": "res = _load_yaml_(f'')root = os.environ.get('', '').replace('', '')if not root: return resfor cat, ovrd in _load_yaml_(f'').items():if isinstance(ovrd, dict):if cat in res: res[cat].update(ovrd)else: res[cat] = ovrdif isinstance(ovrd, list) and isinstance(res[cat], list): res[cat] += ovrdreturn res", "docstring": "Load parameters for assets\n\nArgs:\n cat: category\n\nReturns:\n dict\n\nExamples:\n >>> import pandas as pd\n >>>\n >>> assets = load_info(cat='assets')\n >>> all(cat in assets for cat in ['Equity', 'Index', 'Curncy', 'Corp'])\n True\n >>> os.environ['BBG_PATH'] = ''\n >>> exch = load_info(cat='exch')\n >>> pd.Series(exch['EquityUS']).allday\n [400, 2000]\n >>> test_root = f'{PKG_PATH}/tests'\n >>> os.environ['BBG_PATH'] = test_root\n >>> ovrd_exch = load_info(cat='exch')\n >>> # Somehow os.environ is not set properly in doctest environment\n >>> ovrd_exch.update(_load_yaml_(f'{test_root}/markets/exch.yml'))\n >>> pd.Series(ovrd_exch['EquityUS']).allday\n [300, 2100]", "id": "f162:m0"} {"signature": "def _load_yaml_(file_name):", "body": "if not os.path.exists(file_name): return dict()with open(file_name, '', encoding='') as fp:return YAML().load(stream=fp)", "docstring": "Load assets infomation from file\n\nArgs:\n file_name: file name\n\nReturns:\n dict", "id": "f162:m1"} {"signature": "def to_hour(num) -> str:", "body": "to_str = str(int(num))return pd.Timestamp(f'').strftime('')", "docstring": "Convert YAML input to hours\n\nArgs:\n num: number in YMAL file, e.g., 900, 1700, etc.\n\nReturns:\n str\n\nExamples:\n >>> to_hour(900)\n '09:00'\n >>> to_hour(1700)\n '17:00'", "id": "f162:m2"} {"signature": "def hist_file(ticker: str, dt, typ='') -> str:", "body": "data_path = os.environ.get(assist.BBG_ROOT, '').replace('', '')if not data_path: return ''asset = ticker.split()[-]proper_ticker = ticker.replace('', '')cur_dt = pd.Timestamp(dt).strftime('')return f''", "docstring": "Data file location for Bloomberg historical data\n\nArgs:\n ticker: ticker name\n dt: date\n typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n\nReturns:\n file location\n\nExamples:\n >>> os.environ['BBG_ROOT'] = ''\n >>> hist_file(ticker='ES1 Index', dt='2018-08-01') == ''\n True\n >>> os.environ['BBG_ROOT'] = '/data/bbg'\n >>> hist_file(ticker='ES1 Index', dt='2018-08-01')\n '/data/bbg/Index/ES1 Index/TRADE/2018-08-01.parq'", "id": "f163:m0"} {"signature": "def ref_file(ticker: str, fld: str, has_date=False, cache=False, ext='', **kwargs) -> str:", "body": "data_path = os.environ.get(assist.BBG_ROOT, '').replace('', '')if (not data_path) or (not cache): return ''proper_ticker = ticker.replace('', '')cache_days = kwargs.pop('', )root = f''if len(kwargs) > : info = utils.to_str(kwargs)[:-].replace('', '')else: info = ''if has_date:cur_dt = utils.cur_time()missing = f''to_find = re.compile(rf'')cur_files = list(filter(to_find.match, sorted(files.all_files(path_name=root, keyword=info, ext=ext))))if len(cur_files) > :upd_dt = to_find.match(cur_files[-]).group()diff = pd.Timestamp('') - pd.Timestamp(upd_dt)if diff >= pd.Timedelta(days=cache_days): return missingreturn sorted(cur_files)[-]else: return missingelse: return f''", "docstring": "Data file location for Bloomberg reference data\n\nArgs:\n ticker: ticker name\n fld: field\n has_date: whether add current date to data file\n cache: if has_date is True, whether to load file from latest cached\n ext: file extension\n **kwargs: other overrides passed to ref function\n\nReturns:\n file location\n\nExamples:\n >>> import shutil\n >>>\n >>> os.environ['BBG_ROOT'] = ''\n >>> ref_file('BLT LN Equity', fld='Crncy') == ''\n True\n >>> os.environ['BBG_ROOT'] = '/data/bbg'\n >>> ref_file('BLT LN Equity', fld='Crncy', cache=True)\n '/data/bbg/Equity/BLT LN Equity/Crncy/ovrd=None.parq'\n >>> ref_file('BLT LN Equity', fld='Crncy')\n ''\n >>> cur_dt = utils.cur_time(tz=utils.DEFAULT_TZ)\n >>> ref_file(\n ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, cache=True,\n ... ).replace(cur_dt, '[cur_date]')\n '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], ovrd=None.parq'\n >>> ref_file(\n ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True,\n ... cache=True, DVD_Start_Dt='20180101',\n ... ).replace(cur_dt, '[cur_date]')[:-5]\n '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], DVD_Start_Dt=20180101'\n >>> sample = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl'\n >>> root_path = 'xbbg/tests/data'\n >>> sub_path = f'{root_path}/Equity/AAPL US Equity/DVD_Hist_All'\n >>> os.environ['BBG_ROOT'] = root_path\n >>> for tmp_file in files.all_files(sub_path): os.remove(tmp_file)\n >>> files.create_folder(sub_path)\n >>> sample in shutil.copy(f'{root_path}/{sample}', sub_path)\n True\n >>> new_file = ref_file(\n ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101',\n ... has_date=True, cache=True, ext='pkl'\n ... )\n >>> new_file.split('/')[-1] == f'asof={cur_dt}, DVD_Start_Dt=20180101.pkl'\n True\n >>> old_file = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl'\n >>> old_full = '/'.join(new_file.split('/')[:-1] + [old_file])\n >>> updated_file = old_full.replace('2018-11-02', cur_dt)\n >>> updated_file in shutil.copy(old_full, updated_file)\n True\n >>> exist_file = ref_file(\n ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101',\n ... has_date=True, cache=True, ext='pkl'\n ... )\n >>> exist_file == updated_file\n False\n >>> exist_file = ref_file(\n ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101',\n ... DVD_End_Dt='20180501', has_date=True, cache=True, ext='pkl'\n ... )\n >>> exist_file == updated_file\n True", "id": "f163:m1"} {"signature": "def save_intraday(data: pd.DataFrame, ticker: str, dt, typ=''):", "body": "cur_dt = pd.Timestamp(dt).strftime('')logger = logs.get_logger(save_intraday, level='')info = f''data_file = hist_file(ticker=ticker, dt=dt, typ=typ)if not data_file: returnif data.empty:logger.warning(f'')returnexch = const.exch_info(ticker=ticker)if exch.empty: returnend_time = pd.Timestamp(const.market_timing(ticker=ticker, dt=dt, timing='')).tz_localize(exch.tz)now = pd.Timestamp('', tz=exch.tz) - pd.Timedelta('')if end_time > now:logger.debug(f'')returnlogger.info(f'')files.create_folder(data_file, is_file=True)data.to_parquet(data_file)", "docstring": "Check whether data is done for the day and save\n\nArgs:\n data: data\n ticker: ticker\n dt: date\n typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n\nExamples:\n >>> os.environ['BBG_ROOT'] = 'xbbg/tests/data'\n >>> sample = pd.read_parquet('xbbg/tests/data/aapl.parq')\n >>> save_intraday(sample, 'AAPL US Equity', '2018-11-02')\n >>> # Invalid exchange\n >>> save_intraday(sample, 'AAPL XX Equity', '2018-11-02')\n >>> # Invalid empty data\n >>> save_intraday(pd.DataFrame(), 'AAPL US Equity', '2018-11-02')\n >>> # Invalid date - too close\n >>> cur_dt = utils.cur_time()\n >>> save_intraday(sample, 'AAPL US Equity', cur_dt)", "id": "f163:m2"} {"signature": "def get_logger(name_or_func, level=LOG_LEVEL, types='', **kwargs):", "body": "if isinstance(level, str): level = getattr(logging, level.upper())log_name = utils.func_scope(name_or_func) if callable(name_or_func) else name_or_funclogger = logging.getLogger(name=log_name)logger.setLevel(level=level)if not len(logger.handlers):formatter = logging.Formatter(fmt=kwargs.get('', LOG_FMT))if '' in types and '' in kwargs:file_handler = logging.FileHandler(kwargs[''])file_handler.setFormatter(fmt=formatter)logger.addHandler(file_handler)if '' in types:stream_handler = logging.StreamHandler()stream_handler.setFormatter(fmt=formatter)logger.addHandler(stream_handler)return logger", "docstring": "Generate logger\n\nArgs:\n name_or_func: logger name or current running function\n level: level of logs - debug, info, error\n types: file or stream, or both\n\nReturns:\n logger\n\nExamples:\n >>> get_logger(name_or_func='download_data', level='debug', types='stream')\n \n >>> get_logger(name_or_func='preprocess', log_file='pre.log', types='file|stream')\n ", "id": "f164:m0"} {"signature": "def get_tz(tz) -> str:", "body": "from xbbg.const import exch_infoif tz is None: return DEFAULT_TZto_tz = tzif isinstance(tz, str):if hasattr(TimeZone, tz):to_tz = getattr(TimeZone, tz)else:exch = exch_info(ticker=tz)if '' in exch.index:to_tz = exch.tzreturn to_tz", "docstring": "Convert tz from ticker / shorthands to timezone\n\nArgs:\n tz: ticker or timezone shorthands\n\nReturns:\n str: Python timzone\n\nExamples:\n >>> get_tz('NY')\n 'America/New_York'\n >>> get_tz(TimeZone.NY)\n 'America/New_York'\n >>> get_tz('BHP AU Equity')\n 'Australia/Sydney'", "id": "f165:m0"} {"signature": "def tz_convert(dt, to_tz, from_tz=None) -> str:", "body": "logger = logs.get_logger(tz_convert, level='')f_tz, t_tz = get_tz(from_tz), get_tz(to_tz)from_dt = pd.Timestamp(str(dt), tz=f_tz)logger.debug(f'')return str(pd.Timestamp(str(from_dt), tz=t_tz))", "docstring": "Convert to tz\n\nArgs:\n dt: date time\n to_tz: to tz\n from_tz: from tz - will be ignored if tz from dt is given\n\nReturns:\n str: date & time\n\nExamples:\n >>> dt_1 = pd.Timestamp('2018-09-10 16:00', tz='Asia/Hong_Kong')\n >>> tz_convert(dt_1, to_tz='NY')\n '2018-09-10 04:00:00-04:00'\n >>> dt_2 = pd.Timestamp('2018-01-10 16:00')\n >>> tz_convert(dt_2, to_tz='HK', from_tz='NY')\n '2018-01-11 05:00:00+08:00'\n >>> dt_3 = '2018-09-10 15:00'\n >>> tz_convert(dt_3, to_tz='NY', from_tz='JP')\n '2018-09-10 02:00:00-04:00'", "id": "f165:m1"} {"signature": "def proc_ovrds(**kwargs):", "body": "return [(k, v) for k, v in kwargs.items()if k not in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()) + PRSV_COLS]", "docstring": "Bloomberg overrides\n\nArgs:\n **kwargs: overrides\n\nReturns:\n list of tuples\n\nExamples:\n >>> proc_ovrds(DVD_Start_Dt='20180101')\n [('DVD_Start_Dt', '20180101')]\n >>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True)\n [('DVD_Start_Dt', '20180101')]", "id": "f166:m0"} {"signature": "def proc_elms(**kwargs) -> list:", "body": "return [(ELEM_KEYS.get(k, k), ELEM_VALS.get(ELEM_KEYS.get(k, k), dict()).get(v, v))for k, v in kwargs.items()if (k in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()))and (k not in PRSV_COLS)]", "docstring": "Bloomberg overrides for elements\n\nArgs:\n **kwargs: overrides\n\nReturns:\n list of tuples\n\nExamples:\n >>> proc_elms(PerAdj='A', Per='W')\n [('periodicityAdjustment', 'ACTUAL'), ('periodicitySelection', 'WEEKLY')]\n >>> proc_elms(Days='A', Fill='B')\n [('nonTradingDayFillOption', 'ALL_CALENDAR_DAYS'), ('nonTradingDayFillMethod', 'NIL_VALUE')]\n >>> proc_elms(CshAdjNormal=False, CshAdjAbnormal=True)\n [('adjustmentNormal', False), ('adjustmentAbnormal', True)]\n >>> proc_elms(Per='W', Quote='Average', start_date='2018-01-10')\n [('periodicitySelection', 'WEEKLY'), ('overrideOption', 'OVERRIDE_OPTION_GPA')]\n >>> proc_elms(QuoteType='Y')\n [('pricingOption', 'PRICING_OPTION_YIELD')]\n >>> proc_elms(QuoteType='Y', cache=True)\n [('pricingOption', 'PRICING_OPTION_YIELD')]", "id": "f166:m1"} {"signature": "def format_earning(data: pd.DataFrame, header: pd.DataFrame) -> pd.DataFrame:", "body": "if data.dropna(subset=['']).empty: return pd.DataFrame()res = pd.concat([grp.loc[:, ['']].set_index(header.value)for _, grp in data.groupby(data.position)], axis=)res.index.name = Noneres.columns = res.iloc[]res = res.iloc[:].transpose().reset_index().apply(pd.to_numeric, downcast='', errors='')res.rename(columns=lambda vv: ''.join(vv.lower().split()).replace('', ''),inplace=True,)years = res.columns[res.columns.str.startswith('')]lvl_1 = res.level == for yr in years:res.loc[:, yr] = res.loc[:, yr].round()pct = f''res.loc[:, pct] = res.loc[lvl_1, pct] = res.loc[lvl_1, pct].astype(float).round()res.loc[lvl_1, pct] = res.loc[lvl_1, yr] / res.loc[lvl_1, yr].sum() * sub_pct = []for _, snap in res[::-].iterrows():if snap.level > : continueif snap.level == :if len(sub_pct) == : continuesub = pd.concat(sub_pct, axis=).transpose()res.loc[sub.index, pct] =res.loc[sub.index, yr] / res.loc[sub.index, yr].sum() * sub_pct = []if snap.level == : sub_pct.append(snap)res.set_index('', inplace=True)res.index.name = Nonereturn res", "docstring": "Standardized earning outputs and add percentage by each blocks\n\nArgs:\n data: earning data block\n header: earning headers\n\nReturns:\n pd.DataFrame\n\nExamples:\n >>> format_earning(\n ... data=pd.read_pickle('xbbg/tests/data/sample_earning.pkl'),\n ... header=pd.read_pickle('xbbg/tests/data/sample_earning_header.pkl')\n ... ).round(2)\n level fy2017 fy2017_pct\n Asia-Pacific 1.0 3540.0 66.43\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0China 2.0 1747.0 49.35\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0Japan 2.0 1242.0 35.08\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0Singapore 2.0 551.0 15.56\n United States 1.0 1364.0 25.60\n Europe 1.0 263.0 4.94\n Other Countries 1.0 162.0 3.04", "id": "f166:m2"} {"signature": "def format_output(data: pd.DataFrame, source, col_maps=None) -> pd.DataFrame:", "body": "if data.empty: return pd.DataFrame()if source == '': req_cols = ['', '', '']else: req_cols = ['', '', '', '', '']if any(col not in data for col in req_cols): return pd.DataFrame()if data.dropna(subset=['']).empty: return pd.DataFrame()if source == '':res = pd.DataFrame(pd.concat([pd.Series({**{'': t}, **grp.set_index('').value.to_dict()})for t, grp in data.groupby('')], axis=, sort=False)).transpose().set_index('')else:res = pd.DataFrame(pd.concat([grp.loc[:, ['', '']].set_index('').transpose().reset_index(drop=True).assign(ticker=t)for (t, _), grp in data.groupby(['', ''])], sort=False)).reset_index(drop=True).set_index('')res.columns.name = Noneif col_maps is None: col_maps = dict()return res.rename(columns=lambda vv: col_maps.get(vv, vv.lower().replace('', '').replace('', ''))).apply(pd.to_numeric, errors='', downcast='')", "docstring": "Format `pdblp` outputs to column-based results\n\nArgs:\n data: `pdblp` result\n source: `bdp` or `bds`\n col_maps: rename columns with these mappings\n\nReturns:\n pd.DataFrame\n\nExamples:\n >>> format_output(\n ... data=pd.read_pickle('xbbg/tests/data/sample_bdp.pkl'),\n ... source='bdp'\n ... ).reset_index()\n ticker name\n 0 QQQ US Equity INVESCO QQQ TRUST SERIES 1\n 1 SPY US Equity SPDR S&P 500 ETF TRUST\n >>> format_output(\n ... data=pd.read_pickle('xbbg/tests/data/sample_dvd.pkl'),\n ... source='bds', col_maps={'Dividend Frequency': 'dvd_freq'}\n ... ).loc[:, ['ex_date', 'dividend_amount', 'dvd_freq']].reset_index()\n ticker ex_date dividend_amount dvd_freq\n 0 C US Equity 2018-02-02 0.32 Quarter", "id": "f166:m3"} {"signature": "def format_intraday(data: pd.DataFrame, ticker, **kwargs) -> pd.DataFrame:", "body": "if data.empty: return pd.DataFrame()data.columns = pd.MultiIndex.from_product([[ticker], data.rename(columns=dict(numEvents='')).columns], names=['', ''])data.index.name = Noneif kwargs.get('', False):kw_xs = dict(axis=, level=)close = data.xs('', **kw_xs)volume = data.xs('', **kw_xs).iloc[:, ]return close.loc[volume > ] if volume.min() > else closeelse: return data", "docstring": "Format intraday data\n\nArgs:\n data: pd.DataFrame from bdib\n ticker: ticker\n\nReturns:\n pd.DataFrame\n\nExamples:\n >>> format_intraday(\n ... data=pd.read_parquet('xbbg/tests/data/sample_bdib.parq'),\n ... ticker='SPY US Equity',\n ... ).xs('close', axis=1, level=1, drop_level=False)\n ticker SPY US Equity\n field close\n 2018-12-28 09:30:00-05:00 249.67\n 2018-12-28 09:31:00-05:00 249.54\n 2018-12-28 09:32:00-05:00 249.22\n 2018-12-28 09:33:00-05:00 249.01\n 2018-12-28 09:34:00-05:00 248.86\n >>> format_intraday(\n ... data=pd.read_parquet('xbbg/tests/data/sample_bdib.parq'),\n ... ticker='SPY US Equity', price_only=True\n ... )\n ticker SPY US Equity\n 2018-12-28 09:30:00-05:00 249.67\n 2018-12-28 09:31:00-05:00 249.54\n 2018-12-28 09:32:00-05:00 249.22\n 2018-12-28 09:33:00-05:00 249.01\n 2018-12-28 09:34:00-05:00 248.86", "id": "f166:m4"} {"signature": "def info_qry(tickers, flds) -> str:", "body": "full_list = ''.join([f''] + [f'' for n in range(, len(tickers), )])return f''", "docstring": "Logging info for given tickers and fields\n\nArgs:\n tickers: tickers\n flds: fields\n\nReturns:\n str\n\nExamples:\n >>> print(info_qry(\n ... tickers=['NVDA US Equity'], flds=['Name', 'Security_Name']\n ... ))\n tickers: ['NVDA US Equity']\n fields: ['Name', 'Security_Name']", "id": "f166:m5"} {"signature": "def missing_info(**kwargs) -> str:", "body": "func = kwargs.pop('', '')if '' in kwargs: kwargs[''] = kwargs[''].replace('', '')info = utils.to_str(kwargs, fmt='', sep='')[:-]return f''", "docstring": "Full infomation for missing query", "id": "f167:m0"} {"signature": "def current_missing(**kwargs) -> int:", "body": "data_path = os.environ.get(BBG_ROOT, '').replace('', '')if not data_path: return return len(files.all_files(f''))", "docstring": "Check number of trials for missing values\n\nReturns:\n int: number of trials already tried", "id": "f167:m1"} {"signature": "def update_missing(**kwargs):", "body": "data_path = os.environ.get(BBG_ROOT, '').replace('', '')if not data_path: returnif len(kwargs) == : returnlog_path = f''cnt = len(files.all_files(log_path)) + files.create_folder(log_path)open(f'', '').close()", "docstring": "Update number of trials for missing values", "id": "f167:m2"} {"signature": "def with_bloomberg(func):", "body": "@wraps(func)def wrapper(*args, **kwargs):scope = utils.func_scope(func=func)param = inspect.signature(func).parametersport = kwargs.pop('', _PORT_)timeout = kwargs.pop('', _TIMEOUT_)restart = kwargs.pop('', False)all_kw = {k: args[n] if n < len(args) else v.defaultfor n, (k, v) in enumerate(param.items()) if k != ''}all_kw.update(kwargs)log_level = kwargs.get('', logs.LOG_LEVEL)for to_list in ['', '']:conv = all_kw.get(to_list, None)if hasattr(conv, ''):all_kw[to_list] = getattr(conv, '')()if isinstance(conv, str):all_kw[to_list] = [conv]cached_data = []if scope in ['', '']:to_qry = cached.bdp_bds_cache(func=func.__name__, **all_kw)cached_data += to_qry.cached_dataif not (to_qry.tickers and to_qry.flds):if not cached_data: return pd.DataFrame()res = pd.concat(cached_data, sort=False).reset_index(drop=True)if not all_kw.get('', False):res = assist.format_output(data=res, source=func.__name__,col_maps=all_kw.get('', dict()))return resall_kw[''] = to_qry.tickersall_kw[''] = to_qry.fldsif scope in ['']:data_file = storage.hist_file(ticker=all_kw[''], dt=all_kw[''], typ=all_kw[''],)if files.exists(data_file):logger = logs.get_logger(func, level=log_level)if all_kw.get('', False): returnlogger.debug(f'')return assist.format_intraday(data=pd.read_parquet(data_file), **all_kw)_, new = create_connection(port=port, timeout=timeout, restart=restart)res = func(**{k: v for k, v in all_kw.items() if k not in ['', '']})if new: delete_connection()if scope.startswith('') and isinstance(res, list):final = cached_data + resif not final: return pd.DataFrame()res = pd.DataFrame(pd.concat(final, sort=False))if (scope in ['', ''])and (not all_kw.get('', False)):res = assist.format_output(data=res.reset_index(drop=True), source=func.__name__,col_maps=all_kw.get('', dict()),)return resreturn wrapper", "docstring": "Wrapper function for Bloomberg connection\n\nArgs:\n func: function to wrap", "id": "f169:m0"} {"signature": "def create_connection(port=_PORT_, timeout=_TIMEOUT_, restart=False):", "body": "if _CON_SYM_ in globals():if not isinstance(globals()[_CON_SYM_], pdblp.BCon):del globals()[_CON_SYM_]if (_CON_SYM_ in globals()) and (not restart):con = globals()[_CON_SYM_]if getattr(con, '').start(): con.start()return con, Falseelse:con = pdblp.BCon(port=port, timeout=timeout)globals()[_CON_SYM_] = concon.start()return con, True", "docstring": "Create Bloomberg connection\n\nReturns:\n (Bloomberg connection, if connection is new)", "id": "f169:m1"} {"signature": "def delete_connection():", "body": "if _CON_SYM_ in globals():con = globals().pop(_CON_SYM_)if not getattr(con, '').start(): con.stop()", "docstring": "Stop and destroy Bloomberg connection", "id": "f169:m2"} {"signature": "def flatten(iterable, maps=None, unique=False) -> list:", "body": "if iterable is None: return []if maps is None: maps = dict()if isinstance(iterable, (str, int, float)):return [maps.get(iterable, iterable)]else:x = [maps.get(item, item) for item in _to_gen_(iterable)]return list(set(x)) if unique else x", "docstring": "Flatten any array of items to list\n\nArgs:\n iterable: any array or value\n maps: map items to values\n unique: drop duplicates\n\nReturns:\n list: flattened list\n\nReferences:\n https://stackoverflow.com/a/40857703/1332656\n\nExamples:\n >>> flatten('abc')\n ['abc']\n >>> flatten(1)\n [1]\n >>> flatten(1.)\n [1.0]\n >>> flatten(['ab', 'cd', ['xy', 'zz']])\n ['ab', 'cd', 'xy', 'zz']\n >>> flatten(['ab', ['xy', 'zz']], maps={'xy': '0x'})\n ['ab', '0x', 'zz']", "id": "f170:m0"} {"signature": "def _to_gen_(iterable):", "body": "from collections import Iterablefor elm in iterable:if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):yield from flatten(elm)else: yield elm", "docstring": "Recursively iterate lists and tuples", "id": "f170:m1"} {"signature": "def fmt_dt(dt, fmt='') -> str:", "body": "return pd.Timestamp(dt).strftime(fmt)", "docstring": "Format date string\n\nArgs:\n dt: any date format\n fmt: output date format\n\nReturns:\n str: date format\n\nExamples:\n >>> fmt_dt(dt='2018-12')\n '2018-12-01'\n >>> fmt_dt(dt='2018-12-31', fmt='%Y%m%d')\n '20181231'", "id": "f170:m2"} {"signature": "def cur_time(typ='', tz=DEFAULT_TZ) -> (datetime.date, str):", "body": "dt = pd.Timestamp('', tz=tz)if typ == '': return dt.strftime('')if typ == '': return dt.strftime('')if typ == '': return dt.strftime('')if typ == '': return dtreturn dt.date()", "docstring": "Current time\n\nArgs:\n typ: one of ['date', 'time', 'time_path', 'raw', '']\n tz: timezone\n\nReturns:\n relevant current time or date\n\nExamples:\n >>> cur_dt = pd.Timestamp('now')\n >>> cur_time(typ='date') == cur_dt.strftime('%Y-%m-%d')\n True\n >>> cur_time(typ='time') == cur_dt.strftime('%Y-%m-%d %H:%M:%S')\n True\n >>> cur_time(typ='time_path') == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')\n True\n >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)\n True\n >>> cur_time(typ='') == cur_dt.date()\n True", "id": "f170:m3"} {"signature": "def fstr(fmt, **kwargs) -> str:", "body": "locals().update(kwargs)return f''", "docstring": "Delayed evaluation of f-strings\n\nArgs:\n fmt: f-string but in terms of normal string, i.e., '{path}/{file}.parq'\n **kwargs: variables for f-strings, i.e., path, file = '/data', 'daily'\n\nReturns:\n FString object\n\nReferences:\n https://stackoverflow.com/a/42497694/1332656\n https://stackoverflow.com/a/4014070/1332656\n\nExamples:\n >>> fmt = '{data_path}/{data_file}.parq'\n >>> fstr(fmt, data_path='your/data/path', data_file='sample')\n 'your/data/path/sample.parq'", "id": "f170:m4"} {"signature": "def to_str(data: dict, fmt='', sep='', public_only=True) -> str:", "body": "if public_only: keys = list(filter(lambda vv: vv[] != '', data.keys()))else: keys = list(data.keys())return '' + sep.join([to_str(data=v, fmt=fmt, sep=sep)if isinstance(v, dict) else fstr(fmt=fmt, key=k, value=v)for k, v in data.items() if k in keys]) + ''", "docstring": "Convert dict to string\n\nArgs:\n data: dict\n fmt: how key and value being represented\n sep: how pairs of key and value are seperated\n public_only: if display public members only\n\nReturns:\n str: string representation of dict\n\nExamples:\n >>> test_dict = dict(b=1, a=0, c=2, _d=3)\n >>> to_str(test_dict)\n '{b=1, a=0, c=2}'\n >>> to_str(test_dict, sep='|')\n '{b=1|a=0|c=2}'\n >>> to_str(test_dict, public_only=False)\n '{b=1, a=0, c=2, _d=3}'", "id": "f170:m5"} {"signature": "def func_scope(func) -> str:", "body": "cur_mod = sys.modules[func.__module__]return f''", "docstring": "Function scope name\n\nArgs:\n func: python function\n\nReturns:\n str: module_name.func_name\n\nExamples:\n >>> func_scope(flatten)\n 'xbbg.core.utils.flatten'\n >>> func_scope(time.strftime)\n 'time.strftime'", "id": "f170:m6"} {"signature": "def load_module(full_path):", "body": "from importlib import utilfile_name = full_path.replace('', '').split('')[-]if file_name[-:] != '':raise ImportError(f'')module_name = file_name[:-]spec = util.spec_from_file_location(name=module_name, location=full_path)module = util.module_from_spec(spec=spec)spec.loader.exec_module(module=module)return module", "docstring": "Load module from full path\nArgs:\n full_path: module full path name\nReturns:\n python module\nReferences:\n https://stackoverflow.com/a/67692/1332656\nExamples:\n >>> import os\n >>>\n >>> cur_file = os.path.abspath(__file__).replace('\\\\\\\\', '/')\n >>> cur_path = '/'.join(cur_file.split('/')[:-1])\n >>> load_module(f'{cur_path}/timezone.py').__name__\n 'timezone'\n >>> load_module(f'{cur_path}/timezone.pyc')\n Traceback (most recent call last):\n ImportError: not a python file: timezone.pyc", "id": "f170:m7"} {"signature": "def get_interval(ticker, session) -> Session:", "body": "if '' not in session:session = f''interval = Intervals(ticker=ticker)ss_info = session.split('')return getattr(interval, f'')(*ss_info)", "docstring": "Get interval from defined session\n\nArgs:\n ticker: ticker\n session: session\n\nReturns:\n Session of start_time and end_time\n\nExamples:\n >>> get_interval('005490 KS Equity', 'day_open_30')\n Session(start_time='09:00', end_time='09:30')\n >>> get_interval('005490 KS Equity', 'day_normal_30_20')\n Session(start_time='09:31', end_time='15:00')\n >>> get_interval('005490 KS Equity', 'day_close_20')\n Session(start_time='15:01', end_time='15:20')\n >>> get_interval('700 HK Equity', 'am_open_30')\n Session(start_time='09:30', end_time='10:00')\n >>> get_interval('700 HK Equity', 'am_normal_30_30')\n Session(start_time='10:01', end_time='11:30')\n >>> get_interval('700 HK Equity', 'am_close_30')\n Session(start_time='11:31', end_time='12:00')\n >>> get_interval('ES1 Index', 'day_exact_2130_2230')\n Session(start_time=None, end_time=None)\n >>> get_interval('ES1 Index', 'allday_exact_2130_2230')\n Session(start_time='21:30', end_time='22:30')\n >>> get_interval('ES1 Index', 'allday_exact_2130_0230')\n Session(start_time='21:30', end_time='02:30')\n >>> get_interval('AMLP US', 'day_open_30')\n Session(start_time=None, end_time=None)\n >>> get_interval('7974 JP Equity', 'day_normal_180_300') is SessNA\n True\n >>> get_interval('Z 1 Index', 'allday_normal_30_30')\n Session(start_time='01:31', end_time='20:30')\n >>> get_interval('GBP Curncy', 'day')\n Session(start_time='17:02', end_time='17:00')", "id": "f171:m0"} {"signature": "def shift_time(start_time, mins) -> str:", "body": "s_time = pd.Timestamp(start_time)e_time = s_time + np.sign(mins) * pd.Timedelta(f'')return e_time.strftime('')", "docstring": "Shift start time by mins\n\nArgs:\n start_time: start time in terms of HH:MM string\n mins: number of minutes (+ / -)\n\nReturns:\n end time in terms of HH:MM string", "id": "f171:m1"} {"signature": "def __init__(self, ticker):", "body": "self.ticker = tickerself.exch = const.exch_info(ticker=ticker)", "docstring": "Args:\n ticker: ticker", "id": "f171:c0:m0"} {"signature": "def market_open(self, session, mins) -> Session:", "body": "if session not in self.exch: return SessNAstart_time = self.exch[session][]return Session(start_time, shift_time(start_time, int(mins)))", "docstring": "Time intervals for market open\n\nArgs:\n session: [allday, day, am, pm, night]\n mins: mintues after open\n\nReturns:\n Session of start_time and end_time", "id": "f171:c0:m1"} {"signature": "def market_close(self, session, mins) -> Session:", "body": "if session not in self.exch: return SessNAend_time = self.exch[session][-]return Session(shift_time(end_time, -int(mins) + ), end_time)", "docstring": "Time intervals for market close\n\nArgs:\n session: [allday, day, am, pm, night]\n mins: mintues before close\n\nReturns:\n Session of start_time and end_time", "id": "f171:c0:m2"} {"signature": "def market_normal(self, session, after_open, before_close) -> Session:", "body": "logger = logs.get_logger(self.market_normal)if session not in self.exch: return SessNAss = self.exch[session]s_time = shift_time(ss[], int(after_open) + )e_time = shift_time(ss[-], -int(before_close))request_cross = pd.Timestamp(s_time) >= pd.Timestamp(e_time)session_cross = pd.Timestamp(ss[]) >= pd.Timestamp(ss[])if request_cross and (not session_cross):logger.warning(f'')return SessNAreturn Session(s_time, e_time)", "docstring": "Time intervals between market\n\nArgs:\n session: [allday, day, am, pm, night]\n after_open: mins after open\n before_close: mins before close\n\nReturns:\n Session of start_time and end_time", "id": "f171:c0:m3"} {"signature": "def market_exact(self, session, start_time: str, end_time: str) -> Session:", "body": "if session not in self.exch: return SessNAss = self.exch[session]same_day = ss[] < ss[-]if not start_time: s_time = ss[]else:s_time = param.to_hour(start_time)if same_day: s_time = max(s_time, ss[])if not end_time: e_time = ss[-]else:e_time = param.to_hour(end_time)if same_day: e_time = min(e_time, ss[-])if same_day and (s_time > e_time): return SessNAreturn Session(start_time=s_time, end_time=e_time)", "docstring": "Explicitly specify start time and end time\n\nArgs:\n session: predefined session\n start_time: start time in terms of HHMM string\n end_time: end time in terms of HHMM string\n\nReturns:\n Session of start_time and end_time", "id": "f171:c0:m4"} {"signature": "def exch_info(ticker: str) -> pd.Series:", "body": "logger = logs.get_logger(exch_info, level='')if '' not in ticker.strip():ticker = f''info = param.load_info(cat='').get(market_info(ticker=ticker).get('', ''), dict())if ('' in info) and ('' not in info):info[''] = info['']if any(req not in info for req in ['', '', '']):logger.error(f'')return pd.Series()for ss in ValidSessions:if ss not in info: continueinfo[ss] = [param.to_hour(num=s) for s in info[ss]]return pd.Series(info)", "docstring": "Exchange info for given ticker\n\nArgs:\n ticker: ticker or exchange\n\nReturns:\n pd.Series\n\nExamples:\n >>> exch_info('SPY US Equity')\n tz America/New_York\n allday [04:00, 20:00]\n day [09:30, 16:00]\n pre [04:00, 09:30]\n post [16:01, 20:00]\n dtype: object\n >>> exch_info('ES1 Index')\n tz America/New_York\n allday [18:00, 17:00]\n day [08:00, 17:00]\n dtype: object\n >>> exch_info('Z 1 Index')\n tz Europe/London\n allday [01:00, 21:00]\n day [01:00, 21:00]\n dtype: object\n >>> exch_info('TESTTICKER Corp').empty\n True\n >>> exch_info('US')\n tz America/New_York\n allday [04:00, 20:00]\n day [09:30, 16:00]\n pre [04:00, 09:30]\n post [16:01, 20:00]\n dtype: object", "id": "f172:m0"} {"signature": "def market_info(ticker: str) -> dict:", "body": "t_info = ticker.split()assets = param.load_info('')if (t_info[-] == '') and ('' not in t_info[]):exch = t_info[-]for info in assets.get('', [dict()]):if '' not in info: continueif exch in info['']: return inforeturn dict()if t_info[-] == '':for info in assets.get('', [dict()]):if '' not in info: continueif (t_info[].split('')[] in info['']) or(t_info[][-].isdigit() and (t_info[][:-] in info[''])):return inforeturn dict()if t_info[-] == '':for info in assets.get('', [dict()]):if '' not in info: continueif t_info[][:-] in info['']: return inforeturn dict()if (t_info[-] == '') or ((t_info[-] == '') and ('' in t_info[])):if t_info[-] == '':tck = t_info[].split('')[]else:tck = ''.join(t_info[:-])for info in assets.get('', [dict()]):if '' not in info: continueif (tck[:] == '') and ('' in info['']): return infoif tck in info['']:if t_info[-] == '': return infoif not info.get('', False): return infoif tck[:-].rstrip() in info['']:if info.get('', False): return inforeturn dict()if t_info[-] == '':for info in assets.get('', [dict()]):if '' not in info: continuereturn dict()", "docstring": "Get info for given market\n\nArgs:\n ticker: Bloomberg full ticker\n\nReturns:\n dict\n\nExamples:\n >>> info = market_info('SHCOMP Index')\n >>> info['exch']\n 'EquityChina'\n >>> info = market_info('ICICIC=1 IS Equity')\n >>> info['freq'], info['is_fut']\n ('M', True)\n >>> info = market_info('INT1 Curncy')\n >>> info['freq'], info['is_fut']\n ('M', True)\n >>> info = market_info('CL1 Comdty')\n >>> info['freq'], info['is_fut']\n ('M', True)\n >>> # Wrong tickers\n >>> market_info('C XX Equity')\n {}\n >>> market_info('XXX Comdty')\n {}\n >>> market_info('Bond_ISIN Corp')\n {}\n >>> market_info('XYZ Index')\n {}\n >>> market_info('XYZ Curncy')\n {}", "id": "f172:m1"} {"signature": "def ccy_pair(local, base='') -> CurrencyPair:", "body": "ccy_param = param.load_info(cat='')if f'' in ccy_param:info = ccy_param[f'']elif f'' in ccy_param:info = ccy_param[f'']info[''] = / info.get('', )info[''] = -info.get('', )elif base.lower() == local.lower():info = dict(ticker='')info[''] = if base[-].lower() == base[-]:info[''] /= if local[-].lower() == local[-]:info[''] *= else:logger = logs.get_logger(ccy_pair)logger.error(f'')return CurrencyPair(ticker='', factor=, power=)if '' not in info: info[''] = if '' not in info: info[''] = return CurrencyPair(**info)", "docstring": "Currency pair info\n\nArgs:\n local: local currency\n base: base currency\n\nReturns:\n CurrencyPair\n\nExamples:\n >>> ccy_pair(local='HKD', base='USD')\n CurrencyPair(ticker='HKD Curncy', factor=1.0, power=1)\n >>> ccy_pair(local='GBp')\n CurrencyPair(ticker='GBP Curncy', factor=100, power=-1)\n >>> ccy_pair(local='USD', base='GBp')\n CurrencyPair(ticker='GBP Curncy', factor=0.01, power=1)\n >>> ccy_pair(local='XYZ', base='USD')\n CurrencyPair(ticker='', factor=1.0, power=1)\n >>> ccy_pair(local='GBP', base='GBp')\n CurrencyPair(ticker='', factor=0.01, power=1)\n >>> ccy_pair(local='GBp', base='GBP')\n CurrencyPair(ticker='', factor=100.0, power=1)", "id": "f172:m2"} {"signature": "def market_timing(ticker, dt, timing='', tz='') -> str:", "body": "logger = logs.get_logger(market_timing)exch = pd.Series(exch_info(ticker=ticker))if any(req not in exch.index for req in ['', '', '']):logger.error(f'')return ''mkt_time = {'': exch.day[], '': exch.allday[-]}.get(timing, exch.day[-])cur_dt = pd.Timestamp(str(dt)).strftime('')if tz == '':return f''return timezone.tz_convert(f'', to_tz=tz, from_tz=exch.tz)", "docstring": "Market close time for ticker\n\nArgs:\n ticker: ticker name\n dt: date\n timing: [EOD (default), BOD]\n tz: conversion to timezone\n\nReturns:\n str: date & time\n\nExamples:\n >>> market_timing('7267 JT Equity', dt='2018-09-10')\n '2018-09-10 14:58'\n >>> market_timing('7267 JT Equity', dt='2018-09-10', tz=timezone.TimeZone.NY)\n '2018-09-10 01:58:00-04:00'\n >>> market_timing('7267 JT Equity', dt='2018-01-10', tz='NY')\n '2018-01-10 00:58:00-05:00'\n >>> market_timing('7267 JT Equity', dt='2018-09-10', tz='SPX Index')\n '2018-09-10 01:58:00-04:00'\n >>> market_timing('8035 JT Equity', dt='2018-09-10', timing='BOD')\n '2018-09-10 09:01'\n >>> market_timing('Z 1 Index', dt='2018-09-10', timing='FINISHED')\n '2018-09-10 21:00'\n >>> market_timing('TESTTICKER Corp', dt='2018-09-10')\n ''", "id": "f172:m3"} {"signature": "@with_bloombergdef bdp(tickers, flds, **kwargs):", "body": "logger = logs.get_logger(bdp, level=kwargs.pop('', logs.LOG_LEVEL))con, _ = create_connection()ovrds = assist.proc_ovrds(**kwargs)logger.info(f''f'')data = con.ref(tickers=tickers, flds=flds, ovrds=ovrds)if not kwargs.get('', False): return [data]qry_data = []for r, snap in data.iterrows():subset = [r]data_file = storage.ref_file(ticker=snap.ticker, fld=snap.field, ext='', **kwargs)if data_file:if not files.exists(data_file): qry_data.append(data.iloc[subset])files.create_folder(data_file, is_file=True)data.iloc[subset].to_pickle(data_file)return qry_data", "docstring": "Bloomberg reference data\n\nArgs:\n tickers: tickers\n flds: fields to query\n **kwargs: bbg overrides\n\nReturns:\n pd.DataFrame\n\nExamples:\n >>> bdp('IQ US Equity', 'Crncy', raw=True)\n ticker field value\n 0 IQ US Equity Crncy USD\n >>> bdp('IQ US Equity', 'Crncy').reset_index()\n ticker crncy\n 0 IQ US Equity USD", "id": "f174:m0"} {"signature": "@with_bloombergdef bds(tickers, flds, **kwargs):", "body": "logger = logs.get_logger(bds, level=kwargs.pop('', logs.LOG_LEVEL))con, _ = create_connection()ovrds = assist.proc_ovrds(**kwargs)logger.info(f''f'')data = con.bulkref(tickers=tickers, flds=flds, ovrds=ovrds)if not kwargs.get('', False): return [data]qry_data = []for (ticker, fld), grp in data.groupby(['', '']):data_file = storage.ref_file(ticker=ticker, fld=fld, ext='',has_date=kwargs.get('', True), **kwargs)if data_file:if not files.exists(data_file): qry_data.append(grp)files.create_folder(data_file, is_file=True)grp.reset_index(drop=True).to_pickle(data_file)return qry_data", "docstring": "Bloomberg block data\n\nArgs:\n tickers: ticker(s)\n flds: field(s)\n **kwargs: other overrides for query\n -> raw: raw output from `pdbdp` library, default False\n\nReturns:\n pd.DataFrame: block data\n\nExamples:\n >>> import os\n >>>\n >>> pd.options.display.width = 120\n >>> s_dt, e_dt = '20180301', '20181031'\n >>> dvd = bds(\n ... 'NVDA US Equity', 'DVD_Hist_All',\n ... DVD_Start_Dt=s_dt, DVD_End_Dt=e_dt, raw=True,\n ... )\n >>> dvd.loc[:, ['ticker', 'name', 'value']].head(8)\n ticker name value\n 0 NVDA US Equity Declared Date 2018-08-16\n 1 NVDA US Equity Ex-Date 2018-08-29\n 2 NVDA US Equity Record Date 2018-08-30\n 3 NVDA US Equity Payable Date 2018-09-21\n 4 NVDA US Equity Dividend Amount 0.15\n 5 NVDA US Equity Dividend Frequency Quarter\n 6 NVDA US Equity Dividend Type Regular Cash\n 7 NVDA US Equity Declared Date 2018-05-10\n >>> dvd = bds(\n ... 'NVDA US Equity', 'DVD_Hist_All',\n ... DVD_Start_Dt=s_dt, DVD_End_Dt=e_dt,\n ... )\n >>> dvd.reset_index().loc[:, ['ticker', 'ex_date', 'dividend_amount']]\n ticker ex_date dividend_amount\n 0 NVDA US Equity 2018-08-29 0.15\n 1 NVDA US Equity 2018-05-23 0.15\n >>> if not os.environ.get('BBG_ROOT', ''):\n ... os.environ['BBG_ROOT'] = f'{files.abspath(__file__, 1)}/tests/data'\n >>> idx_kw = dict(End_Dt='20181220', cache=True)\n >>> idx_wt = bds('DJI Index', 'Indx_MWeight_Hist', **idx_kw)\n >>> idx_wt.round(2).tail().reset_index(drop=True)\n index_member percent_weight\n 0 V UN 3.82\n 1 VZ UN 1.63\n 2 WBA UW 2.06\n 3 WMT UN 2.59\n 4 XOM UN 2.04\n >>> idx_wt = bds('DJI Index', 'Indx_MWeight_Hist', **idx_kw)\n >>> idx_wt.round(2).head().reset_index(drop=True)\n index_member percent_weight\n 0 AAPL UW 4.65\n 1 AXP UN 2.84\n 2 BA UN 9.29\n 3 CAT UN 3.61\n 4 CSCO UW 1.26", "id": "f174:m1"} {"signature": "@with_bloombergdef bdh(tickers, flds=None, start_date=None, end_date='', adjust=None, **kwargs) -> pd.DataFrame:", "body": "logger = logs.get_logger(bdh, level=kwargs.pop('', logs.LOG_LEVEL))if isinstance(adjust, str) and adjust:if adjust == '':kwargs[''] = Truekwargs[''] = Truekwargs[''] = Trueelse:kwargs[''] = '' in adjust or '' in adjustkwargs[''] = '' in adjust or '' in adjustkwargs[''] = '' in adjustcon, _ = create_connection()elms = assist.proc_elms(**kwargs)ovrds = assist.proc_ovrds(**kwargs)if isinstance(tickers, str): tickers = [tickers]if flds is None: flds = ['']if isinstance(flds, str): flds = [flds]e_dt = utils.fmt_dt(end_date, fmt='')if start_date is None:start_date = pd.Timestamp(e_dt) - relativedelta(months=)s_dt = utils.fmt_dt(start_date, fmt='')logger.info(f''f'')logger.debug(f'')res = con.bdh(tickers=tickers, flds=flds, elms=elms, ovrds=ovrds, start_date=s_dt, end_date=e_dt)res.index.name = Noneif (len(flds) == ) and kwargs.get('', False):return res.xs(flds[], axis=, level=)return res", "docstring": "Bloomberg historical data\n\nArgs:\n tickers: ticker(s)\n flds: field(s)\n start_date: start date\n end_date: end date - default today\n adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None\n exact match of above words will adjust for corresponding events\n Case 0: `-` no adjustment for dividend or split\n Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits\n Case 2: `adjust` will adjust for splits and ignore all dividends\n Case 3: `all` == `dvd|split` == adjust for all\n Case 4: None == Bloomberg default OR use kwargs\n **kwargs: overrides\n\nReturns:\n pd.DataFrame\n\nExamples:\n >>> res = bdh(\n ... tickers='VIX Index', flds=['High', 'Low', 'Last_Price'],\n ... start_date='2018-02-05', end_date='2018-02-07',\n ... ).round(2).transpose()\n >>> res.index.name = None\n >>> res.columns.name = None\n >>> res\n 2018-02-05 2018-02-06 2018-02-07\n VIX Index High 38.80 50.30 31.64\n Low 16.80 22.42 21.17\n Last_Price 37.32 29.98 27.73\n >>> bdh(\n ... tickers='AAPL US Equity', flds='Px_Last',\n ... start_date='20140605', end_date='20140610', adjust='-'\n ... ).round(2)\n ticker AAPL US Equity\n field Px_Last\n 2014-06-05 647.35\n 2014-06-06 645.57\n 2014-06-09 93.70\n 2014-06-10 94.25\n >>> bdh(\n ... tickers='AAPL US Equity', flds='Px_Last',\n ... start_date='20140606', end_date='20140609',\n ... CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False,\n ... ).round(2)\n ticker AAPL US Equity\n field Px_Last\n 2014-06-06 645.57\n 2014-06-09 93.70", "id": "f174:m2"} {"signature": "@with_bloombergdef bdib(ticker, dt, typ='', **kwargs) -> pd.DataFrame:", "body": "from xbbg.core import missinglogger = logs.get_logger(bdib, level=kwargs.pop('', logs.LOG_LEVEL))t_1 = pd.Timestamp('').date() - pd.Timedelta('')whole_day = pd.Timestamp(dt).date() < t_1batch = kwargs.pop('', False)if (not whole_day) and batch:logger.warning(f'')return pd.DataFrame()cur_dt = pd.Timestamp(dt).strftime('')asset = ticker.split()[-]info_log = f''if asset in ['', '', '', '']:exch = const.exch_info(ticker=ticker)if exch.empty: return pd.DataFrame()else:logger.error(f'')return pd.DataFrame()time_fmt = ''time_idx = pd.DatetimeIndex([f'', f'']).tz_localize(exch.tz).tz_convert(DEFAULT_TZ).tz_convert('')if time_idx[] > time_idx[]: time_idx -= pd.TimedeltaIndex(['', ''])q_tckr = tickerif exch.get('', False):if '' not in exch:logger.error(f'')is_sprd = exch.get('', False) and (len(ticker[:-]) != exch[''][])if not is_sprd:q_tckr = fut_ticker(gen_ticker=ticker, dt=dt, freq=exch[''])if q_tckr == '':logger.error(f'')return pd.DataFrame()info_log = f''miss_kw = dict(ticker=ticker, dt=dt, typ=typ, func='')cur_miss = missing.current_missing(**miss_kw)if cur_miss >= :if batch: return pd.DataFrame()logger.info(f'')return pd.DataFrame()logger.info(f'')con, _ = create_connection()try:data = con.bdib(ticker=q_tckr, event_type=typ, interval=,start_datetime=time_idx[].strftime(time_fmt),end_datetime=time_idx[].strftime(time_fmt),)except KeyError:data = pd.DataFrame()if not isinstance(data, pd.DataFrame):raise ValueError(f'')if data.empty:logger.warning(f'')missing.update_missing(**miss_kw)return pd.DataFrame()data = data.tz_localize('').tz_convert(exch.tz)storage.save_intraday(data=data, ticker=ticker, dt=dt, typ=typ)return pd.DataFrame() if batch else assist.format_intraday(data=data, ticker=ticker)", "docstring": "Bloomberg intraday bar data\n\nArgs:\n ticker: ticker name\n dt: date to download\n typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n **kwargs:\n batch: whether is batch process to download data\n log: level of logs\n\nReturns:\n pd.DataFrame", "id": "f174:m3"} {"signature": "def intraday(ticker, dt, session='', **kwargs) -> pd.DataFrame:", "body": "from xbbg.core import intervalscur_data = bdib(ticker=ticker, dt=dt, typ=kwargs.get('', ''))if cur_data.empty: return pd.DataFrame()fmt = ''ss = intervals.SessNAref = kwargs.get('', None)exch = pd.Series() if ref is None else const.exch_info(ticker=ref)if session: ss = intervals.get_interval(ticker=kwargs.get('', ticker), session=session)start_time = kwargs.get('', None)end_time = kwargs.get('', None)if ss != intervals.SessNA:start_time = pd.Timestamp(ss.start_time).strftime(fmt)end_time = pd.Timestamp(ss.end_time).strftime(fmt)if start_time and end_time:kw = dict(start_time=start_time, end_time=end_time)if not exch.empty:cur_tz = cur_data.index.tzres = cur_data.tz_convert(exch.tz).between_time(**kw)if kwargs.get('', False):res = res.tz_convert(cur_tz)return pd.DataFrame(res)return pd.DataFrame(cur_data.between_time(**kw))return cur_data", "docstring": "Bloomberg intraday bar data within market session\n\nArgs:\n ticker: ticker\n dt: date\n session: examples include\n day_open_30, am_normal_30_30, day_close_30, allday_exact_0930_1000\n **kwargs:\n ref: reference ticker or exchange for timezone\n keep_tz: if keep tz if reference ticker / exchange is given\n start_time: start time\n end_time: end time\n typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n\nReturns:\n pd.DataFrame", "id": "f174:m4"} {"signature": "@with_bloombergdef earning(ticker, by='', typ='', ccy=None, level=None, **kwargs) -> pd.DataFrame:", "body": "ovrd = '' if by[].upper() == '' else ''new_kw = dict(raw=True, Product_Geo_Override=ovrd)header = bds(tickers=ticker, flds='', **new_kw, **kwargs)if ccy: kwargs[''] = ccyif level: kwargs[''] = leveldata = bds(tickers=ticker, flds=f'', **new_kw, **kwargs)return assist.format_earning(data=data, header=header)", "docstring": "Earning exposures by Geo or Products\n\nArgs:\n ticker: ticker name\n by: [G(eo), P(roduct)]\n typ: type of earning, start with `PG_` in Bloomberg FLDS - default `Revenue`\n ccy: currency of earnings\n level: hierarchy level of earnings\n\nReturns:\n pd.DataFrame\n\nExamples:\n >>> data = earning('AMD US Equity', Eqy_Fund_Year=2017, Number_Of_Periods=1)\n >>> data.round(2)\n level fy2017 fy2017_pct\n Asia-Pacific 1.0 3540.0 66.43\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0China 2.0 1747.0 49.35\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0Japan 2.0 1242.0 35.08\n \u00a0\u00a0\u00a0\u00a0\u00a0\u00a0Singapore 2.0 551.0 15.56\n United States 1.0 1364.0 25.60\n Europe 1.0 263.0 4.94\n Other Countries 1.0 162.0 3.04", "id": "f174:m5"} {"signature": "def dividend(tickers, typ='', start_date=None, end_date=None, **kwargs) -> pd.DataFrame:", "body": "if isinstance(tickers, str): tickers = [tickers]tickers = [t for t in tickers if ('' in t) and ('' not in t)]fld = {'': '', '': '','': '', '': '','': '', '': '','': '','': '','': '','': '',}.get(typ, typ)if (fld == '') and ('' not in kwargs):kwargs[''] = ''if fld in ['', '', '','', '',]:if start_date: kwargs[''] = utils.fmt_dt(start_date, fmt='')if end_date: kwargs[''] = utils.fmt_dt(end_date, fmt='')kwargs[''] = {'': '', '': '','': '', '': '','': '', '': '','': '', '': '','': '', '': '','': '','': '','': '', '': '',}return bds(tickers=tickers, flds=fld, raw=False, **kwargs)", "docstring": "Bloomberg dividend / split history\n\nArgs:\n tickers: list of tickers\n typ: dividend adjustment type\n `all`: `DVD_Hist_All`\n `dvd`: `DVD_Hist`\n `split`: `Eqy_DVD_Hist_Splits`\n `gross`: `Eqy_DVD_Hist_Gross`\n `adjust`: `Eqy_DVD_Adjust_Fact`\n `adj_fund`: `Eqy_DVD_Adj_Fund`\n `with_amt`: `DVD_Hist_All_with_Amt_Status`\n `dvd_amt`: `DVD_Hist_with_Amt_Status`\n `gross_amt`: `DVD_Hist_Gross_with_Amt_Stat`\n `projected`: `BDVD_Pr_Ex_Dts_DVD_Amts_w_Ann`\n start_date: start date\n end_date: end date\n **kwargs: overrides\n\nReturns:\n pd.DataFrame\n\nExamples:\n >>> res = dividend(\n ... tickers=['C US Equity', 'NVDA US Equity', 'MS US Equity'],\n ... start_date='2018-01-01', end_date='2018-05-01'\n ... )\n >>> res.index.name = None\n >>> res.loc[:, ['ex_date', 'rec_date', 'dvd_amt']].round(2)\n ex_date rec_date dvd_amt\n C US Equity 2018-02-02 2018-02-05 0.32\n MS US Equity 2018-04-27 2018-04-30 0.25\n MS US Equity 2018-01-30 2018-01-31 0.25\n NVDA US Equity 2018-02-22 2018-02-23 0.15", "id": "f174:m6"} {"signature": "@with_bloombergdef active_futures(ticker: str, dt) -> str:", "body": "t_info = ticker.split()prefix, asset = ''.join(t_info[:-]), t_info[-]info = const.market_info(f'')f1, f2 = f'', f''fut_2 = fut_ticker(gen_ticker=f2, dt=dt, freq=info[''])fut_1 = fut_ticker(gen_ticker=f1, dt=dt, freq=info[''])fut_tk = bdp(tickers=[fut_1, fut_2], flds='', cache=True)if pd.Timestamp(dt).month < pd.Timestamp(fut_tk.last_tradeable_dt[]).month: return fut_1d1 = bdib(ticker=f1, dt=dt)d2 = bdib(ticker=f2, dt=dt)return fut_1 if d1[f1].volume.sum() > d2[f2].volume.sum() else fut_2", "docstring": "Active futures contract\n\nArgs:\n ticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc.\n dt: date\n\nReturns:\n str: ticker name", "id": "f174:m7"} {"signature": "@with_bloombergdef fut_ticker(gen_ticker: str, dt, freq: str, log=logs.LOG_LEVEL) -> str:", "body": "logger = logs.get_logger(fut_ticker, level=log)dt = pd.Timestamp(dt)t_info = gen_ticker.split()asset = t_info[-]if asset in ['', '', '']:ticker = ''.join(t_info[:-])prefix, idx, postfix = ticker[:-], int(ticker[-]) - , assetelif asset == '':ticker = t_info[]prefix, idx, postfix = ticker[:-], int(ticker[-]) - , ''.join(t_info[:])else:logger.error(f'')return ''month_ext = if asset == '' else months = pd.date_range(start=dt, periods=max(idx + month_ext, ), freq=freq)logger.debug(f'')def to_fut(month):return prefix + const.Futures[month.strftime('')] +month.strftime('')[-] + '' + postfixfut = [to_fut(m) for m in months]logger.debug(f'')try:fut_matu = bdp(tickers=fut, flds='', cache=True)except Exception as e1:logger.error(f'')try:fut = fut[:-]logger.debug(f'')fut_matu = bdp(tickers=fut, flds='', cache=True)except Exception as e2:logger.error(f'')return ''sub_fut = fut_matu[pd.DatetimeIndex(fut_matu.last_tradeable_dt) > dt]logger.debug(f'')logger.debug(f'')return sub_fut.index.values[idx]", "docstring": "Get proper ticker from generic ticker\n\nArgs:\n gen_ticker: generic ticker\n dt: date\n freq: futures contract frequency\n log: level of logs\n\nReturns:\n str: exact futures ticker", "id": "f174:m8"} {"signature": "@with_bloombergdef check_hours(tickers, tz_exch, tz_loc=DEFAULT_TZ) -> pd.DataFrame:", "body": "cols = ['', '']con, _ = create_connection()hours = con.ref(tickers=tickers, flds=cols)cur_dt = pd.Timestamp('').strftime('')hours.loc[:, ''] = hours.value.astype(str).str[:-]hours.loc[:, ''] = pd.DatetimeIndex(cur_dt + hours.value.astype(str)).tz_localize(tz_loc).tz_convert(tz_exch).strftime('')hours = pd.concat([hours.set_index(['', '']).exch.unstack().loc[:, cols],hours.set_index(['', '']).local.unstack().loc[:, cols],], axis=)hours.columns = ['', '', '', '']return hours", "docstring": "Check exchange hours vs local hours\n\nArgs:\n tickers: list of tickers\n tz_exch: exchange timezone\n tz_loc: local timezone\n\nReturns:\n Local and exchange hours", "id": "f174:m9"} {"signature": "def parse_version(package):", "body": "init_file = f''with open(init_file, '', encoding='') as f:for line in f.readlines():if '' in line:return line.split('')[].strip()[:-]return ''", "docstring": "Parse versions", "id": "f175:m0"} {"signature": "def parse_markdown():", "body": "readme_file = f''if path.exists(readme_file):with open(readme_file, '', encoding='') as f:long_description = f.read()return long_description", "docstring": "Parse markdown as description", "id": "f175:m1"} {"signature": "def parse_description(markdown=True):", "body": "if markdown: return parse_markdown()try:from pypandoc import convertreadme_file = f''if not path.exists(readme_file):raise ImportErrorreturn convert(readme_file, '')except ImportError:return parse_markdown()", "docstring": "Parse the description in the README file", "id": "f175:m2"} {"signature": "def put(self, metrics):", "body": "if type(metrics) == list:for metric in metrics:self.c.put_metric_data(**metric)else:self.c.put_metric_data(**metrics)", "docstring": "Put metrics to cloudwatch. Metric shoult be instance or list of\ninstances of CloudWatchMetric", "id": "f177:c0:m1"} {"signature": "async def fitness(self) -> float:", "body": "return ", "docstring": "Returns a number that indicates, on an arbitrary scale, how capable the executing node\n is of satisfying this resource. A larger value indicates more capability, while a falsy\n value indicates the resource is unavailable or unusable. Truthy values returned here will\n only be compared against other truthy values returned by resources of the same type.", "id": "f182:c1:m3"} {"signature": "def import_submodules(package, recursive=True):", "body": "if isinstance(package, str):package = importlib.import_module(package)results = {}for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):full_name = package.__name__ + '' + nameresults[full_name] = importlib.import_module(full_name)if recursive and is_pkg:results.update(import_submodules(full_name))return results", "docstring": "Import all submodules of a module, recursively, including subpackages\n\n :param package: package (name or actual module)\n :type package: str | module\n :rtype: dict[str, types.ModuleType]", "id": "f183:m1"} {"signature": "def block_resource_fitnesses(self, block: block.Block):", "body": "if not block.resources:return {n: for n in self.config.nodes.keys()}node_fitnesses = {}for resource in block.resources:resource_fitnesses = self.resource_fitnesses(resource)if not resource_fitnesses:raise UnassignableBlock(block.name)max_fit = max(resource_fitnesses.values())min_fit = min(resource_fitnesses.values())for node, fitness in resource_fitnesses.items():if node not in node_fitnesses:node_fitnesses[node] = {}if not fitness:node_fitnesses[node][resource.describe()] = Falseelse:if max_fit - min_fit:node_fitnesses[node][resource.describe()] = (fitness - min_fit) / (max_fit - min_fit)else:node_fitnesses[node][resource.describe()] = res = {}for node, res_fits in node_fitnesses.items():fit_sum = for res_desc, fit in res_fits.items():if fit is False:fit_sum = Falsebreakfit_sum += fitif fit_sum is False:res[node] = Falsecontinueres[node] = fit_sumreturn res", "docstring": "Returns a map of nodename to average fitness value for this block.\n Assumes that required resources have been checked on all nodes.", "id": "f207:c4:m12"} {"signature": "def parse_generator_doubling(config):", "body": "start = if '' in config:start = int(config[''])def generator():val = startwhile(True):yield valval = val * return generator()", "docstring": "Returns generators that double with each value returned\n Config includes optional start value", "id": "f213:m0"} {"signature": "@staticmethoddef parse(config):", "body": "if not isinstance(config, basestring):raise TypeError(\"\")validator = ContainsValidator()validator.contains_string = configreturn validator", "docstring": "Parse a contains validator, which takes as the config a simple string to find", "id": "f213:c0:m1"} {"signature": "def bind_variable(self, variable_name, variable_value):", "body": "str_name = str(variable_name)prev = self.variables.get(str_name)if prev != variable_value:self.variables[str(variable_name)] = variable_valueself.mod_count = self.mod_count + ", "docstring": "Bind a named variable to a value within the context\n This allows for passing in variables in testing", "id": "f215:c0:m0"} {"signature": "def add_generator(self, generator_name, generator):", "body": "if not isinstance(generator, types.GeneratorType):raise ValueError(''.format(generator_name))self.generators[str(generator_name)] = generator", "docstring": "Adds a generator to the context, this can be used to set values for a variable\n Once created, you can set values with the generator via bind_generator_next", "id": "f215:c0:m2"} {"signature": "def bind_generator_next(self, variable_name, generator_name):", "body": "str_gen_name = str(generator_name)str_name = str(variable_name)val = next(self.generators[str_gen_name])prev = self.variables.get(str_name)if prev != val:self.variables[str_name] = valself.mod_count = self.mod_count + return val", "docstring": "Binds the next value for generator_name to variable_name and return value used", "id": "f215:c0:m3"} {"signature": "def get_value(self, variable_name):", "body": "return self.variables.get(str(variable_name))", "docstring": "Get bound variable value, or return none if not set", "id": "f215:c0:m5"} {"signature": "def setUp(self):", "body": "config_args = ('', os.path.join(djangopath, ''))proc = Process(target=call_command, args=config_args)proc.start()self.server_process = proctime.sleep()", "docstring": "Start a mini Django-tastypie REST webapp with test data for testing REST tests", "id": "f219:c0:m0"} {"signature": "def tearDown(self):", "body": "self.server_process.terminate()self.server_process = None", "docstring": "Stop the server process", "id": "f219:c0:m1"} {"signature": "def median(array):", "body": "mysorted = [x for x in array]mysorted.sort()middle = int(len(mysorted) / ) if len(mysorted) % == : return float((mysorted[middle] + mysorted[middle - ])) / else:return mysorted[middle]", "docstring": "Get the median of an array", "id": "f223:m0"} {"signature": "def std_deviation(array):", "body": "if not array or len(array) == :return average = AGGREGATES[''](array)variance = map(lambda x: (x - average)**, array)try:len(variance)except TypeError: variance = list(variance)stdev = AGGREGATES[''](variance)return math.sqrt(stdev)", "docstring": "Compute the standard deviation of an array of numbers", "id": "f223:m1"} {"signature": "def realize_partial(self, context=None):", "body": "if not self.is_dynamic():return selfif self.is_context_modifier():return selfelse:copyout = copy.coppass", "docstring": "Attempt to template out what is possible for this benchmark", "id": "f223:m2"} {"signature": "def parse_benchmark(base_url, node):", "body": "node = lowercase_keys(flatten_dictionaries(node)) benchmark = Benchmark()benchmark = Test.parse_test(base_url, node, benchmark)for key, value in node.items():if key == u'':benchmark.warmup_runs = int(value)elif key == u'':benchmark.benchmark_runs = int(value)elif key == u'':format = value.lower()if format in OUTPUT_FORMATS:benchmark.output_format = formatelse:raise ValueError('' + format)elif key == u'':if not isinstance(value, basestring):raise ValueError(\"\")benchmark.output_file = valueelif key == u'':if isinstance(value, basestring):benchmark.add_metric(tests.coerce_to_string(value))elif isinstance(value, list) or isinstance(value, set):for metric in value:if isinstance(metric, dict):for metricname, aggregate in metric.items():if not isinstance(metricname, basestring):raise TypeError(\"\")if not isinstance(aggregate, basestring):raise TypeError(\"\")benchmark.add_metric(tests.coerce_to_string(metricname),tests.coerce_to_string(aggregate))elif isinstance(metric, basestring):benchmark.add_metric(tests.coerce_to_string(metric))elif isinstance(value, dict):for metricname, aggregate in value.items():if not isinstance(metricname, basestring):raise TypeError(\"\")if not isinstance(aggregate, basestring):raise TypeError(\"\")benchmark.add_metric(tests.coerce_to_string(metricname),tests.coerce_to_string(aggregate))else:raise TypeError(\"\" + str(value))return benchmark", "docstring": "Try building a benchmark configuration from deserialized configuration root node", "id": "f223:m4"} {"signature": "def ninja_copy(self):", "body": "output = Benchmark()myvars = vars(self)output.__dict__ = myvars.copy()return output", "docstring": "Optimization: limited, fast copy of benchmark, overrides Test parent method", "id": "f223:c0:m0"} {"signature": "def add_metric(self, metric_name, aggregate=None):", "body": "clean_metric = metric_name.lower().strip()if clean_metric.lower() not in METRICS:raise Exception(\"\" + metric_name +\"\")self.metrics.add(clean_metric)if not aggregate:self.raw_metrics.add(clean_metric)elif aggregate.lower().strip() in AGGREGATES:clean_aggregate = aggregate.lower().strip()current_aggregates = self.aggregated_metrics.get(clean_metric, list())current_aggregates.append(clean_aggregate)self.aggregated_metrics[clean_metric] = current_aggregateselse:raise Exception(\"\" + aggregate +\"\")return self", "docstring": "Add a metric-aggregate pair to the benchmark, where metric is a number to measure from curl, and aggregate is an aggregation function\n (See METRICS and AGGREGATES)\n If aggregate is not defined (False,empty, or None), then the raw number is reported\n Returns self, for fluent-syle construction of config", "id": "f223:c0:m1"} {"signature": "def parse_headers(header_string):", "body": "if not header_string:return list()request, headers = header_string.split('', )if not headers:return list()if sys.version_info < (,):header_msg = message_from_string(headers.encode(HEADER_ENCODING))return [(text_type(k.lower(), HEADER_ENCODING), text_type(v, HEADER_ENCODING))for k, v in header_msg.items()]else:header_msg = message_from_string(headers)return [(k.lower(), v) for k, v in header_msg.items()]", "docstring": "Parse a header-string into individual headers\n Implementation based on: http://stackoverflow.com/a/5955949/95122\n Note that headers are a list of (key, value) since duplicate headers are allowed\n\n NEW NOTE: keys & values are unicode strings, but can only contain ISO-8859-1 characters", "id": "f231:m1"} {"signature": "def parse_configuration(node, base_config=None):", "body": "test_config = base_configif not test_config:test_config = TestConfig()node = lowercase_keys(flatten_dictionaries(node)) for key, value in node.items():if key == u'':test_config.timeout = int(value)elif key == u'':test_config.print_bodies = safe_to_bool(value)elif key == u'':test_config.retries = int(value)elif key == u'':if not test_config.variable_binds:test_config.variable_binds = dict()test_config.variable_binds.update(flatten_dictionaries(value))elif key == u'':flat = flatten_dictionaries(value)gen_map = dict()for generator_name, generator_config in flat.items():gen = parse_generator(generator_config)gen_map[str(generator_name)] = gentest_config.generators = gen_mapreturn test_config", "docstring": "Parse input config to configuration information", "id": "f231:m3"} {"signature": "def read_file(path):", "body": "with open(path, \"\") as f:string = f.read()f.close()return string", "docstring": "Read an input into a file, doing necessary conversions around relative path handling", "id": "f231:m4"} {"signature": "def analyze_benchmark_results(benchmark_result, benchmark):", "body": "output = BenchmarkResult()output.name = benchmark_result.nameoutput.group = benchmark_result.groupoutput.failures = benchmark_result.failuresraw_results = benchmark_result.resultstemp = dict()for metric in benchmark.raw_metrics:temp[metric] = raw_results[metric]output.results = tempaggregate_results = list()for metricname, aggregate_list in benchmark.aggregated_metrics.items():numbers = raw_results[metricname]for aggregate_name in aggregate_list:if numbers: aggregate_function = AGGREGATES[aggregate_name]aggregate_results.append((metricname, aggregate_name, aggregate_function(numbers)))else:aggregate_results.append((metricname, aggregate_name, None))output.aggregates = aggregate_resultsreturn output", "docstring": "Take a benchmark result containing raw benchmark results, and do aggregation by\n applying functions\n\n Aggregates come out in format of metricname, aggregate_name, result", "id": "f231:m7"} {"signature": "def metrics_to_tuples(raw_metrics):", "body": "if not isinstance(raw_metrics, dict):raise TypeError(\"\")metrics = sorted(raw_metrics.keys())arrays = [raw_metrics[metric] for metric in metrics]num_rows = len(arrays[]) output = list()output.append(tuple(metrics)) for row in xrange(, num_rows):new_row = tuple([arrays[col][row] for col in xrange(, len(arrays))])output.append(new_row)return output", "docstring": "Converts metric dictionary of name:values_array into list of tuples\n Use case: writing out benchmark to CSV, etc\n\n Input:\n {'metric':[value1,value2...], 'metric2':[value1,value2,...]...}\n\n Output: list, with tuple header row, then list of tuples of values\n [('metric','metric',...), (metric1_value1,metric2_value1, ...) ... ]", "id": "f231:m8"} {"signature": "def register_extensions(modules):", "body": "if isinstance(modules, basestring): modules = [modules]for ext in modules:segments = ext.split('')module = segments.pop()package = ''.join(segments)module = __import__(ext, globals(), locals(), package)extension_applies = {'': validators.register_validator,'': validators.register_comparator,'': validators.register_test,'': validators.register_extractor,'': generators.register_generator}has_registry = Falsefor registry_name, register_function in extension_applies.items():if hasattr(module, registry_name):registry = getattr(module, registry_name)for key, val in registry.items():register_function(key, val)if registry:has_registry = Trueif not has_registry:raise ImportError(\"\".format(ext))", "docstring": "Import the modules and register their respective extensions", "id": "f231:m13"} {"signature": "def main(args):", "body": "if '' in args and args[''] is not None:logger.setLevel(LOGGING_LEVELS.get(args[''].lower(), logging.NOTSET))if '' in args and args['']:extensions = args[''].split('')working_folder = args['']if working_folder not in sys.path:sys.path.insert(, working_folder)register_extensions(extensions)test_file = args['']test_structure = read_test_file(test_file)my_vars = Noneif '' in args and args[''] is not None:my_vars = yaml.safe_load(args[''])if my_vars and not isinstance(my_vars, dict):raise Exception(\"\")base_url = args['']if '' in args and args['']:base_url = ''tests = parse_testsets(base_url, test_structure,working_directory=os.path.dirname(test_file), vars=my_vars)for t in tests:if '' in args and args[''] is not None and bool(args['']):t.config.print_bodies = safe_to_bool(args[''])if '' in args and args[''] is not None and bool(args['']):t.config.print_headers = safe_to_bool(args[''])if '' in args and args[''] is not None:t.config.interactive = safe_to_bool(args[''])if '' in args and args[''] is not None:t.config.verbose = safe_to_bool(args[''])if '' in args and args[''] is not None:t.config.ssl_insecure = safe_to_bool(args[''])if '' in args and args[''] is not None:t.config.skip_term_colors = safe_to_bool(args[''])failures = run_testsets(tests)sys.exit(failures)", "docstring": "Execute a test against the given base url.\n\nKeys allowed for args:\n url - REQUIRED - Base URL\n test - REQUIRED - Test file (yaml)\n print_bodies - OPTIONAL - print response body\n print_headers - OPTIONAL - print response headers\n log - OPTIONAL - set logging level {debug,info,warning,error,critical} (default=warning)\n interactive - OPTIONAL - mode that prints info before and after test exectuion and pauses for user input for each test\n absolute_urls - OPTIONAL - mode that treats URLs in tests as absolute/full URLs instead of relative URLs\n skip_term_colors - OPTIONAL - mode that turn off the output term colors", "id": "f231:m14"} {"signature": "def parse_command_line_args(args_in):", "body": "parser = OptionParser(usage=\"\")parser.add_option(u\"\", help=\"\",action=\"\", type=\"\", dest=\"\")parser.add_option(u\"\", help=\"\",action=\"\", type=\"\", dest=\"\")parser.add_option(u\"\", help=\"\",action=\"\", type=\"\")parser.add_option(u\"\", help=\"\",action=\"\", type=\"\")parser.add_option(u\"\", help=\"\", action=\"\", type=\"\")parser.add_option(u\"\", help=\"\",action=\"\", type=\"\")parser.add_option(u'',help='', action=\"\", type=\"\")parser.add_option(u'', help='', action=\"\", type=\"\")parser.add_option(u'', help='',action='', default=False, dest=\"\")parser.add_option(u'', help='',action='', default=False, dest=\"\")parser.add_option(u'', help='',action=\"\", dest=\"\")parser.add_option(u'', help='',action='', default=False, dest=\"\")(args, unparsed_args) = parser.parse_args(args_in)args = vars(args)if not args[''] or not args['']:if len(unparsed_args) == :args[u''] = unparsed_args[]args[u''] = unparsed_args[]elif len(unparsed_args) == and args['']:args[''] = unparsed_args[]elif len(unparsed_args) == and args['']:args[''] = unparsed_args[]else:parser.print_help()parser.error(\"\")args[''] = os.path.realpath(os.path.abspath(os.getcwd()))return args", "docstring": "Runs everything needed to execute from the command line, so main method is callable without arg parsing", "id": "f231:m15"} {"signature": "def safe_length(var):", "body": "output = -try:output = len(var)except:passreturn output", "docstring": "Exception-safe length check, returns -1 if no length on type or error", "id": "f232:m1"} {"signature": "def _get_extractor(config_dict):", "body": "extractor = Noneextract_config = Nonefor key, value in config_dict.items():if key in EXTRACTORS:return parse_extractor(key, value)else: raise Exception(''.format(config_dict))", "docstring": "Utility function, get an extract function for a single valid extractor name in config\n and error if more than one or none", "id": "f232:m3"} {"signature": "def parse_extractor(extractor_type, config):", "body": "parse = EXTRACTORS.get(extractor_type.lower())if not parse:raise ValueError(\"\".format(extractor_type))parsed = parse(config)if isinstance(parsed, AbstractExtractor): return parseditems = AbstractExtractor().__dict__if set(parsed.__dict__.keys()).issuperset(set(items.keys())):return parsedelse:raise TypeError(\"\")", "docstring": "Convert extractor type and config to an extractor instance\n Uses registered parse function for that extractor type\n Parse functions may return either:\n - An extraction function (wrapped in an Extractor instance with configs and returned)\n - OR a a full Extractor instance (configured)", "id": "f232:m4"} {"signature": "def parse_validator(name, config_node):", "body": "name = name.lower()if name not in VALIDATORS:raise ValueError(\"\".format(name))valid = VALIDATORS[name](config_node)if valid.name is None: valid.name = nameif valid.config is None: valid.config = config_nodereturn valid", "docstring": "Parse a validator from configuration and use it", "id": "f232:m5"} {"signature": "def register_validator(name, parse_function):", "body": "name = name.lower()if name in VALIDATORS:raise Exception(\"\".format(name))VALIDATORS[name] = parse_function", "docstring": "Registers a validator for use by this library\n Name is the string name for validator\n\n Parse function does parse(config_node) and returns a Validator object\n Validator functions have signature:\n validate(response_body, context=None) - context is a bindings.Context object\n\n Validators return true or false and optionally can return a Failure instead of false\n This allows for passing more details", "id": "f232:m6"} {"signature": "def register_extractor(extractor_name, parse_function):", "body": "if not isinstance(extractor_name, basestring):raise TypeError(\"\")if extractor_name.lower() == '':raise ValueError(\"\")elif extractor_name.lower() == '':raise ValueError(\"\")elif extractor_name.lower() == '':raise ValueError(\"\")elif extractor_name in EXTRACTORS:raise ValueError(\"\".format(extractor_name))EXTRACTORS[extractor_name] = parse_function", "docstring": "Register a new body extraction function", "id": "f232:m7"} {"signature": "def register_comparator(comparator_name, comparator_function):", "body": "if not isinstance(comparator_name, basestring):raise TypeError(\"\")elif comparator_name in COMPARATORS:raise ValueError(\"\".format(comparator_name))COMPARATORS[comparator_name] = comparator_function", "docstring": "Register a new twpo-argument comparator function returning true or false", "id": "f232:m9"} {"signature": "def __nonzero__(self):", "body": "return False", "docstring": "Failure objects test as False, simplifies coding with them", "id": "f232:c0:m0"} {"signature": "def __bool__(self):", "body": "return False", "docstring": "Failure objects test as False, simplifies coding with them", "id": "f232:c0:m1"} {"signature": "def extract_internal(self, query=None, body=None, headers=None, args=None):", "body": "pass", "docstring": "Do extraction, query should be pre-templated", "id": "f232:c1:m1"} {"signature": "def extract(self, body=None, headers=None, context=None):", "body": "query = self.templated_query(context=context)args = self.argsreturn self.extract_internal(query=query, body=body, headers=headers, args=self.args)", "docstring": "Extract data", "id": "f232:c1:m2"} {"signature": "def get_readable_config(self, context=None):", "body": "query = self.templated_query(context=context)output = ''.format(self.extractor_type, query, self.is_templated)args_string = Noneif self.args:args_string = \"\" + str(self.args)output = output + args_stringreturn output", "docstring": "Print a human-readable version of the configuration", "id": "f232:c1:m4"} {"signature": "@classmethoddef configure_base(cls, config, extractor_base):", "body": "if isinstance(config, dict):try:config = config['']extractor_base.is_templated = Trueextractor_base.query = configexcept KeyError:raise ValueError(\"\")elif isinstance(config, basestring):extractor_base.query = configextractor_base.is_templated = Falseelse:raise TypeError(\"\")return extractor_base", "docstring": "Parse config object and do basic config on an Extractor", "id": "f232:c1:m5"} {"signature": "@staticmethoddef query_dictionary(query, dictionary, delimiter=''):", "body": "try:stripped_query = query.strip(delimiter)if stripped_query:for x in stripped_query.split(delimiter):try:x = int(x)dictionary = dictionary[x]except ValueError:dictionary = dictionary[x]except:return Nonereturn dictionary", "docstring": "Do an xpath-like query with dictionary, using a template if relevant", "id": "f232:c2:m1"} {"signature": "def validate(self, body=None, headers=None, context=None):", "body": "pass", "docstring": "Run the validation function, return true or a Failure", "id": "f232:c5:m0"} {"signature": "def get_readable_config(self, context=None):", "body": "string_frags = list()string_frags.append(\"\" + self.extractor.get_readable_config(context=context))if isinstance(self.expected, AbstractExtractor):string_frags.append(\"\" +self.expected.get_readable_config(context=context))elif self.isTemplateExpected:string_frags.append(''.format(self.expected))return os.linesep.join(string_frags)", "docstring": "Get a human-readable config string", "id": "f232:c6:m0"} {"signature": "@staticmethoddef parse(config):", "body": "output = ComparatorValidator()config = parsing.lowercase_keys(parsing.flatten_dictionaries(config))output.config = configoutput.extractor = _get_extractor(config)if output.extractor is None:raise ValueError(\"\")if '' not in config: output.comparator_name = ''else:output.comparator_name = config[''].lower()output.comparator = COMPARATORS[output.comparator_name]if not output.comparator:raise ValueError(\"\")try:expected = config['']except KeyError:raise ValueError(\"\")if isinstance(expected, basestring) or isinstance(expected, (int, long, float, complex)):output.expected = expectedelif isinstance(expected, dict):expected = parsing.lowercase_keys(expected)template = expected.get('')if template: if not isinstance(template, basestring):raise ValueError(\"\")output.isTemplateExpected = Trueoutput.expected = templateelse: output.expected = _get_extractor(expected)if not output.expected:raise ValueError(\"\")return output", "docstring": "Create a validator that does an extract from body and applies a comparator,\n Then does comparison vs expected value\n Syntax sample:\n { jsonpath_mini: 'node.child',\n operator: 'eq',\n expected: 'myValue'\n }", "id": "f232:c6:m2"} {"signature": "def get_readable_config(self, context=None):", "body": "return \"\" + self.extractor.get_readable_config(context=context)", "docstring": "Get a human-readable config string", "id": "f232:c7:m0"} {"signature": "def _add_doc(func, doc):", "body": "func.__doc__ = doc", "docstring": "Add documentation to a function.", "id": "f235:m0"} {"signature": "def _import_module(name):", "body": "__import__(name)return sys.modules[name]", "docstring": "Import module, returning the module after the last dot.", "id": "f235:m1"} {"signature": "def add_move(move):", "body": "setattr(_MovedItems, move.name, move)", "docstring": "Add an item to six.moves.", "id": "f235:m2"} {"signature": "def remove_move(name):", "body": "try:delattr(_MovedItems, name)except AttributeError:try:del moves.__dict__[name]except KeyError:raise AttributeError(\"\" % (name,))", "docstring": "Remove item from six.moves.", "id": "f235:m3"} {"signature": "def with_metaclass(meta, *bases):", "body": "class metaclass(meta):def __new__(cls, name, this_bases, d):return meta(name, bases, d)return type.__new__(metaclass, '', (), {})", "docstring": "Create a base class with a metaclass.", "id": "f235:m7"} {"signature": "def add_metaclass(metaclass):", "body": "def wrapper(cls):orig_vars = cls.__dict__.copy()slots = orig_vars.get('')if slots is not None:if isinstance(slots, str):slots = [slots]for slots_var in slots:orig_vars.pop(slots_var)orig_vars.pop('', None)orig_vars.pop('', None)return metaclass(cls.__name__, cls.__bases__, orig_vars)return wrapper", "docstring": "Class decorator for creating a class with a metaclass.", "id": "f235:m8"} {"signature": "def python_2_unicode_compatible(klass):", "body": "if PY2:if '' not in klass.__dict__:raise ValueError(\"\"\"\" %klass.__name__)klass.__unicode__ = klass.__str__klass.__str__ = lambda self: self.__unicode__().encode('')return klass", "docstring": "A decorator that defines __unicode__ and __str__ methods under Python 2.\nUnder Python 3 it does nothing.\n\nTo support Python 2 and 3 with a single code base, define a __str__ method\nreturning text and apply this decorator to the class.", "id": "f235:m9"} {"signature": "def is_package(self, fullname):", "body": "return hasattr(self.__get_module(fullname), \"\")", "docstring": "Return true, if the named module is a package.\n\nWe need this method to get correct spec objects with\nPython 3.4 (see PEP451)", "id": "f235:c4:m6"} {"signature": "def get_code(self, fullname):", "body": "self.__get_module(fullname) return None", "docstring": "Return None\n\n Required, if is_package is implemented", "id": "f235:c4:m7"} {"signature": "def is_dynamic(self):", "body": "return self.is_template_path or self.is_template_content", "docstring": "Is templating used?", "id": "f239:c0:m0"} {"signature": "def get_content(self, context=None):", "body": "if self.is_file:path = self.contentif self.is_template_path and context:path = string.Template(path).safe_substitute(context.get_values())data = Nonewith open(path, '') as f:data = f.read()if self.is_template_content and context:return string.Template(data).safe_substitute(context.get_values())else:return dataelse:if self.is_template_content and context:return safe_substitute_unicode_template(self.content, context.get_values())else:return self.content", "docstring": "Does all context binding and pathing to get content, templated out", "id": "f239:c0:m1"} {"signature": "def create_noread_version(self):", "body": "if not self.is_file or self.is_template_path:return selfoutput = ContentHandler()output.is_template_content = self.is_template_contentwith open(self.content, '') as f:output.content = f.read()return output", "docstring": "Read file content if it is static and return content handler with no I/O", "id": "f239:c0:m2"} {"signature": "def setup(self, input, is_file=False, is_template_path=False, is_template_content=False):", "body": "if not isinstance(input, basestring):raise TypeError(\"\")if is_file:input = os.path.abspath(input)self.content = inputself.is_file = is_fileself.is_template_path = is_template_pathself.is_template_content = is_template_content", "docstring": "Self explanatory, input is inline content or file path.", "id": "f239:c0:m3"} {"signature": "@staticmethoddef parse_content(node):", "body": "output = ContentHandler()is_template_path = Falseis_template_content = Falseis_file = Falseis_done = Falsewhile (node and not is_done): if isinstance(node, basestring):output.content = nodeoutput.setup(node, is_file=is_file, is_template_path=is_template_path,is_template_content=is_template_content)return outputelif not isinstance(node, dict) and not isinstance(node, list):raise TypeError(\"\")is_done = Trueflat = lowercase_keys(flatten_dictionaries(node))for key, value in flat.items():if key == u'':if isinstance(value, basestring):if is_file:value = os.path.abspath(value)output.content = valueis_template_content = is_template_content or not is_fileoutput.is_template_content = is_template_contentoutput.is_template_path = is_fileoutput.is_file = is_filereturn outputelse:is_template_content = Truenode = valueis_done = Falsebreakelif key == '':if isinstance(value, basestring):output.content = os.path.abspath(value)output.is_file = Trueoutput.is_template_content = is_template_contentreturn outputelse:is_file = Truenode = valueis_done = Falsebreakraise Exception(\"\")", "docstring": "Parse content from input node and returns ContentHandler object\n it'll look like:\n\n - template:\n - file:\n - temple: path\n\n or something", "id": "f239:c0:m4"} {"signature": "def factory_generate_ids(starting_id=, increment=):", "body": "def generate_started_ids():val = starting_idlocal_increment = incrementwhile(True):yield valval += local_incrementreturn generate_started_ids", "docstring": "Return function generator for ids starting at starting_id\n Note: needs to be called with () to make generator", "id": "f240:m0"} {"signature": "def generator_basic_ids():", "body": "return factory_generate_ids()()", "docstring": "Return ids generator starting at 1", "id": "f240:m1"} {"signature": "def generator_random_int32():", "body": "rand = random.Random()while (True):yield random.randint(, INT32_MAX_VALUE)", "docstring": "Random integer generator for up to 32-bit signed ints", "id": "f240:m2"} {"signature": "def factory_generate_text(legal_characters=string.ascii_letters, min_length=, max_length=):", "body": "def generate_text():local_min_len = min_lengthlocal_max_len = max_lengthrand = random.Random()while(True):length = random.randint(local_min_len, local_max_len)array = [random.choice(legal_characters)for x in xrange(, length)]yield ''.join(array)return generate_text", "docstring": "Returns a generator function for text with given legal_characters string and length\n Default is ascii letters, length 8\n\n For hex digits, combine with string.hexstring, etc", "id": "f240:m3"} {"signature": "def factory_fixed_sequence(values):", "body": "def seq_generator():my_list = list(values)i = while(True):yield my_list[i]if i == len(my_list):i = return seq_generator", "docstring": "Return a generator that runs through a list of values in order, looping after end", "id": "f240:m4"} {"signature": "def parse_fixed_sequence(config):", "body": "vals = config['']if not vals:raise ValueError('')if not isinstance(vals, list):raise ValueError('')return factory_fixed_sequence(vals)()", "docstring": "Parse fixed sequence string", "id": "f240:m5"} {"signature": "def factory_choice_generator(values):", "body": "def choice_generator():my_list = list(values)rand = random.Random()while(True):yield random.choice(my_list)return choice_generator", "docstring": "Return a generator that picks values from a list randomly", "id": "f240:m6"} {"signature": "def parse_choice_generator(config):", "body": "vals = config['']if not vals:raise ValueError('')if not isinstance(vals, list):raise ValueError('')return factory_choice_generator(vals)()", "docstring": "Parse choice generator", "id": "f240:m7"} {"signature": "def factory_env_variable(env_variable):", "body": "def return_variable():variable_name = env_variablewhile(True):yield os.environ.get(variable_name)return return_variable", "docstring": "Return a generator function that reads from an environment variable", "id": "f240:m8"} {"signature": "def factory_env_string(env_string):", "body": "def return_variable():my_input = env_stringwhile(True):yield os.path.expandvars(my_input)return return_variable", "docstring": "Return a generator function that uses OS expand path to expand environment variables in string", "id": "f240:m9"} {"signature": "def parse_random_text_generator(configuration):", "body": "character_set = configuration.get(u'')characters = Noneif character_set:character_set = character_set.lower()if character_set not in CHARACTER_SETS:raise ValueError(\"\".format(character_set))characters = CHARACTER_SETS[character_set]else: characters = str(configuration.get(u''))min_length = max_length = if configuration.get(u''):min_length = int(configuration.get(u''))if configuration.get(u''):max_length = int(configuration.get(u''))if configuration.get(u''):length = int(configuration.get(u''))min_length = lengthmax_length = lengthif characters:return factory_generate_text(legal_characters=characters, min_length=min_length, max_length=max_length)()else:return factory_generate_text(min_length=min_length, max_length=max_length)()", "docstring": "Parses configuration options for a random text generator", "id": "f240:m10"} {"signature": "def register_generator(typename, parse_function):", "body": "if not isinstance(typename, basestring):raise TypeError(''.format(typename))if typename in GENERATOR_TYPES:raise ValueError(''.format(typename))GENERATOR_TYPES.add(typename)GENERATOR_PARSING[typename] = parse_function", "docstring": "Register a new generator for use in testing\n typename is the new generator type name (must not already exist)\n parse_function will parse a configuration object (dict)", "id": "f240:m11"} {"signature": "def parse_generator(configuration):", "body": "configuration = lowercase_keys(flatten_dictionaries(configuration))gen_type = str(configuration.get(u'')).lower()if gen_type not in GENERATOR_TYPES:raise ValueError(''.format(gen_type))if gen_type == u'':return factory_env_variable(configuration[u''])()elif gen_type == u'':return factory_env_string(configuration[u''])()elif gen_type == u'':start = configuration.get('')increment = configuration.get('')if not start:start = else:start = int(start)if not increment:increment = else:increment = int(increment)return factory_generate_ids(start, increment)()elif gen_type == u'':return generator_random_int32()elif gen_type == u'':return parse_random_text_generator(configuration)elif gen_type in GENERATOR_TYPES:return GENERATOR_PARSING[gen_type](configuration)else:raise Exception(\"\".format(''))", "docstring": "Parses a configuration built from yaml and returns a generator\n Configuration should be a map", "id": "f240:m12"} {"signature": "def encode_unicode_bytes(my_string):", "body": "if not isinstance(my_string, basestring):my_string = repr(my_string)if PYTHON_MAJOR_VERSION == :if isinstance(my_string, str):return my_stringelif isinstance(my_string, unicode):return my_string.encode('')else:if isinstance(my_string, str):return my_string.encode('')elif isinstance(my_string, bytes):return my_string", "docstring": "Shim function, converts Unicode to UTF-8 encoded bytes regardless of the source format\n Intended for python 3 compatibility mode, and b/c PyCurl only takes raw bytes", "id": "f241:m0"} {"signature": "def safe_substitute_unicode_template(templated_string, variable_map):", "body": "if PYTHON_MAJOR_VERSION > : return string.Template(templated_string).safe_substitute(variable_map)my_template = string.Template(encode_unicode_bytes(templated_string))my_escaped_dict = dict(map(lambda x: (x[], encode_unicode_bytes(x[])), variable_map.items()))templated = my_template.safe_substitute(my_escaped_dict)return text_type(templated, '')", "docstring": "Perform string.Template safe_substitute on unicode input with unicode variable values by using escapes\n Catch: cannot accept unicode variable names, just values\n Returns a Unicode type output, if you want UTF-8 bytes, do encode_unicode_bytes on it", "id": "f241:m1"} {"signature": "def safe_to_json(in_obj):", "body": "if isinstance(in_obj, bytearray):return str(in_obj)if hasattr(in_obj, ''):return in_obj.__dict__try:return str(in_obj)except:return repr(in_obj)", "docstring": "Safely get dict from object if present for json dumping", "id": "f241:m2"} {"signature": "def flatten_dictionaries(input):", "body": "output = dict()if isinstance(input, list):for map in input:output.update(map)else: output = inputreturn output", "docstring": "Flatten a list of dictionaries into a single dictionary, to allow flexible YAML use\n Dictionary comprehensions can do this, but would like to allow for pre-Python 2.7 use\n If input isn't a list, just return it....", "id": "f241:m3"} {"signature": "def lowercase_keys(input_dict):", "body": "if not isinstance(input_dict, dict):return input_dictsafe = dict()for key, value in input_dict.items():safe[str(key).lower()] = valuereturn safe", "docstring": "Take input and if a dictionary, return version with keys all lowercase and cast to str", "id": "f241:m4"} {"signature": "def safe_to_bool(input):", "body": "if isinstance(input, bool):return inputelif isinstance(input, basestring) and input.lower() == u'':return Falseelif isinstance(input, basestring) and input.lower() == u'':return Trueelse:raise TypeError('')", "docstring": "Safely convert user input to a boolean, throwing exception if not boolean or boolean-appropriate string\n For flexibility, we allow case insensitive string matching to false/true values\n If it's not a boolean or string that matches 'false' or 'true' when ignoring case, throws an exception", "id": "f241:m5"} {"signature": "def run_configure(self, key, value, configurable, validator_func=None, converter_func=None, store_func=None, *args, **kwargs):", "body": "if validator_func and not validator(value):raise TypeError(\"\".format(value))storeable = valueif converter_func:storeable = converter_func(value)if store_func:store_func(configurable, key, storeable)else:configurable.setattr(configurable, key, value)", "docstring": "Run a single configuration element\n Run a validator on the value, if supplied\n Run a converter_funct to turn the value into something to storeable:\n converter_func takes params (value) at least and throws exception if failed\n If a store_func is supplied, use that to store the option\n store_func needs to take params (object, key, value, args, kwargs)\n If store_func NOT supplied we do a setattr on object", "id": "f241:c0:m0"} {"signature": "def configure(self, configs, configurable, handler, *args, **kwargs):", "body": "for key, value in configs.items():handler[key] = config_optionsself.run_configure(value, configurable)", "docstring": "Use the configs and configurable to parse", "id": "f241:c0:m1"} {"signature": "def coerce_list_of_ints(val):", "body": "if isinstance(val, list):return [int(x) for x in val]else:return [int(val)]", "docstring": "If single value, try to parse as integer, else try to parse as list of integer", "id": "f242:m3"} {"signature": "def ninja_copy(self):", "body": "output = Test()myvars = vars(self)output.__dict__ = myvars.copy()return output", "docstring": "Optimization: limited copy of test object, for realize() methods\n This only copies fields changed vs. class, and keeps methods the same", "id": "f242:c0:m1"} {"signature": "def set_template(self, variable_name, template_string):", "body": "if self.templates is None:self.templates = dict()self.templates[variable_name] = string.Template(template_string)", "docstring": "Add a templating instance for variable given", "id": "f242:c0:m2"} {"signature": "def del_template(self, variable_name):", "body": "if self.templates is not None and variable_name in self.templates:del self.templates[variable_name]", "docstring": "Remove template instance, so we no longer use one for this test", "id": "f242:c0:m3"} {"signature": "def realize_template(self, variable_name, context):", "body": "val = Noneif context is None or self.templates is None or variable_name not in self.templates:return Nonereturn self.templates[variable_name].safe_substitute(context.get_values())", "docstring": "Realize a templated value, using variables from context\n Returns None if no template is set for that variable", "id": "f242:c0:m4"} {"signature": "def set_body(self, value):", "body": "self._body = value", "docstring": "Set body, directly", "id": "f242:c0:m5"} {"signature": "def get_body(self, context=None):", "body": "if self._body is None:return Noneelif isinstance(self._body, basestring):return self._bodyelse:return self._body.get_content(context=context)", "docstring": "Read body from file, applying template if pertinent", "id": "f242:c0:m6"} {"signature": "def set_url(self, value, isTemplate=False):", "body": "if isTemplate:self.set_template(self.NAME_URL, value)else:self.del_template(self.NAME_URL)self._url = value", "docstring": "Set URL, passing flag if using a template", "id": "f242:c0:m7"} {"signature": "def get_url(self, context=None):", "body": "val = self.realize_template(self.NAME_URL, context)if val is None:val = self._urlreturn val", "docstring": "Get URL, applying template if pertinent", "id": "f242:c0:m8"} {"signature": "def set_headers(self, value, isTemplate=False):", "body": "if isTemplate:self.set_template(self.NAME_HEADERS, '')else:self.del_template(self.NAME_HEADERS)self._headers = value", "docstring": "Set headers, passing flag if using a template", "id": "f242:c0:m9"} {"signature": "def get_headers(self, context=None):", "body": "if not context or not self.templates or self.NAME_HEADERS not in self.templates:return self._headersvals = context.get_values()def template_tuple(tuple_input):return (string.Template(str(tuple_item)).safe_substitute(vals) for tuple_item in tuple_input)return dict(map(template_tuple, self._headers.items()))", "docstring": "Get headers, applying template if pertinent", "id": "f242:c0:m10"} {"signature": "def update_context_before(self, context):", "body": "if self.variable_binds:context.bind_variables(self.variable_binds)if self.generator_binds:for key, value in self.generator_binds.items():context.bind_generator_next(key, value)", "docstring": "Make pre-test context updates, by applying variable and generator updates", "id": "f242:c0:m11"} {"signature": "def update_context_after(self, response_body, headers, context):", "body": "if self.extract_binds:for key, value in self.extract_binds.items():result = value.extract(body=response_body, headers=headers, context=context)context.bind_variable(key, result)", "docstring": "Run the extraction routines to update variables based on HTTP response body", "id": "f242:c0:m12"} {"signature": "def is_context_modifier(self):", "body": "return self.variable_binds or self.generator_binds or self.extract_binds", "docstring": "Returns true if context can be modified by this test\n (disallows caching of templated test bodies)", "id": "f242:c0:m13"} {"signature": "def is_dynamic(self):", "body": "if self.templates:return Trueelif isinstance(self._body, ContentHandler) and self._body.is_dynamic():return Truereturn False", "docstring": "Returns true if this test does templating", "id": "f242:c0:m14"} {"signature": "def realize(self, context=None):", "body": "if not self.is_dynamic() or context is None:return selfelse:selfcopy = self.ninja_copy()selfcopy.templates = Noneif isinstance(self._body, ContentHandler):selfcopy._body = self._body.get_content(context)selfcopy._url = self.get_url(context=context)selfcopy._headers = self.get_headers(context=context)return selfcopy", "docstring": "Return a fully-templated test object, for configuring curl\n Warning: this is a SHALLOW copy, mutation of fields will cause problems!\n Can accept a None context", "id": "f242:c0:m15"} {"signature": "def realize_partial(self, context=None):", "body": "if self.is_context_modifier():return selfelif self.is_dynamic(): return self.realize(context=context)bod = self._bodynewbod = Noneif bod and isinstance(bod, ContentHandler) and bod.is_file and not bod.is_template_path:newbod = bod.create_noread_version()output = selfif newbod: output = copy.copy(self)output._body = newbodreturn output", "docstring": "Attempt to template out what is static if possible, and load files.\n Used for performance optimization, in cases where a test is re-run repeatedly\n WITH THE SAME Context.", "id": "f242:c0:m16"} {"signature": "def configure_curl(self, timeout=DEFAULT_TIMEOUT, context=None, curl_handle=None):", "body": "if curl_handle:curl = curl_handletry: curl.getinfo(curl.HTTP_CODE) curl.reset()curl.setopt(curl.COOKIELIST, \"\")except pycurl.error:curl = pycurl.Curl()else:curl = pycurl.Curl()curl.setopt(curl.URL, str(self.url))curl.setopt(curl.TIMEOUT, timeout)is_unicoded = Falsebod = self.bodyif isinstance(bod, text_type): bod = bod.encode('')is_unicoded = Trueif bod and len(bod) > :curl.setopt(curl.READFUNCTION, MyIO(bod).read)if self.auth_username and self.auth_password:curl.setopt(pycurl.USERPWD, parsing.encode_unicode_bytes(self.auth_username) + b'' + parsing.encode_unicode_bytes(self.auth_password))if self.auth_type:curl.setopt(pycurl.HTTPAUTH, self.auth_type)if self.method == u'':curl.setopt(HTTP_METHODS[u''], )if bod is not None:curl.setopt(pycurl.POSTFIELDSIZE, len(bod))else:curl.setopt(pycurl.POSTFIELDSIZE, )elif self.method == u'':curl.setopt(HTTP_METHODS[u''], )if bod is not None:curl.setopt(pycurl.INFILESIZE, len(bod))else:curl.setopt(pycurl.INFILESIZE, )elif self.method == u'':curl.setopt(curl.POSTFIELDS, bod)curl.setopt(curl.CUSTOMREQUEST, '')if bod is not None:curl.setopt(pycurl.INFILESIZE, len(bod))else:curl.setopt(pycurl.INFILESIZE, )elif self.method == u'':curl.setopt(curl.CUSTOMREQUEST, '')if bod is not None:curl.setopt(pycurl.POSTFIELDS, bod)curl.setopt(pycurl.POSTFIELDSIZE, len(bod))elif self.method == u'':curl.setopt(curl.NOBODY, )curl.setopt(curl.CUSTOMREQUEST, '')elif self.method and self.method.upper() != '': curl.setopt(curl.CUSTOMREQUEST, self.method.upper())if bod is not None:curl.setopt(pycurl.POSTFIELDS, bod)curl.setopt(pycurl.POSTFIELDSIZE, len(bod))head = self.get_headers(context=context)head = copy.copy(head) if is_unicoded and u'' in head.keys():content = head[u'']if u'' not in content:head[u''] = content + u''if head:headers = [str(headername) + '' + str(headervalue)for headername, headervalue in head.items()]else:headers = list()headers.append(\"\")headers.append(\"\")curl.setopt(curl.HTTPHEADER, headers)if self.curl_options:filterfunc = lambda x: x[] is not None and x[] is not None for (key, value) in ifilter(filterfunc, self.curl_options.items()):curl.setopt(getattr(curl, key), value)return curl", "docstring": "Create and mostly configure a curl object for test, reusing existing if possible", "id": "f242:c0:m19"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(MailChimp, self).__init__(*args, **kwargs)self.root = self.api_root = Root(self)self.authorized_apps = AuthorizedApps(self)self.automations = Automations(self)self.automations.actions = AutomationActions(self)self.automations.emails = AutomationEmails(self)self.automations.emails.actions = AutomationEmailActions(self)self.automations.emails.queues = AutomationEmailQueues(self)self.automations.removed_subscribers = AutomationRemovedSubscribers(self)self.batches = self.batch_operations = BatchOperations(self)self.batch_webhooks = BatchWebhooks(self)self.campaign_folders = CampaignFolders(self)self.campaigns = Campaigns(self)self.campaigns.actions = CampaignActions(self)self.campaigns.content = CampaignContent(self)self.campaigns.feedback = CampaignFeedback(self)self.campaigns.send_checklist = CampaignSendChecklist(self)self.conversations = Conversations(self)self.conversations.messages = ConversationMessages(self)self.stores = self.ecommerce = Stores(self)self.stores.carts = StoreCarts(self)self.stores.carts.lines = StoreCartLines(self)self.stores.customers = StoreCustomers(self)self.stores.orders = StoreOrders(self)self.stores.orders.lines = StoreOrderLines(self)self.stores.products = StoreProducts(self)self.stores.products.images = StoreProductImages(self)self.stores.products.variants = StoreProductVariants(self)self.stores.promo_rules = StorePromoRules(self)self.stores.promo_codes = StorePromoCodes(self)self.files = FileManagerFiles(self)self.folders = FileManagerFolders(self)self.lists = Lists(self)self.lists.abuse_reports = ListAbuseReports(self)self.lists.activity = ListActivity(self)self.lists.clients = ListClients(self)self.lists.growth_history = ListGrowthHistory(self)self.lists.interest_categories = ListInterestCategories(self)self.lists.interest_categories.interests = ListInterestCategoryInterest(self)self.lists.members = ListMembers(self)self.lists.members.activity = ListMemberActivity(self)self.lists.members.goals = ListMemberGoals(self)self.lists.members.notes = ListMemberNotes(self)self.lists.members.tags = ListMemberTags(self)self.lists.merge_fields = ListMergeFields(self)self.lists.segments = ListSegments(self)self.lists.segments.members = ListSegmentMembers(self)self.lists.signup_forms = ListSignupForms(self)self.lists.webhooks = ListWebhooks(self)self.ping = Ping(self)self.reports = Reports(self)self.reports.abuse_reports = ReportCampaignAbuseReports(self)self.reports.advice = ReportCampaignAdvice(self)self.reports.click_details = ReportClickDetailReports(self)self.reports.click_details.members = ReportClickDetailMembers(self)self.reports.domain_performance = ReportDomainPerformance(self)self.reports.eepurl = ReportEepURL(self)self.reports.email_activity = ReportEmailActivity(self)self.reports.locations = ReportLocations(self)self.reports.sent_to = ReportSentTo(self)self.reports.subreports = ReportSubReports(self)self.reports.unsubscribes = ReportUnsubscribes(self)self.reports.open_details = ReportOpenDetails(self)self.reports.google_analytics = ReportGoogleAnalytics(self)self.search_campaigns = SearchCampaigns(self)self.search_members = SearchMembers(self)self.template_folders = TemplateFolders(self)self.templates = Templates(self)self.templates.default_content = TemplateDefaultContent(self)", "docstring": "Initialize the class with your api_key and user_id and attach all of\nthe endpoints", "id": "f245:c0:m0"} {"signature": "def get_subscriber_hash(member_email):", "body": "check_email(member_email)member_email = member_email.lower().encode()m = hashlib.md5(member_email)return m.hexdigest()", "docstring": "The MD5 hash of the lowercase version of the list member's email.\nUsed as subscriber_hash\n\n:param member_email: The member's email address\n:type member_email: :py:class:`str`\n:returns: The MD5 hash in hex\n:rtype: :py:class:`str`", "id": "f246:m0"} {"signature": "def check_subscriber_hash(potential_hash):", "body": "if re.match(r\"\", potential_hash):return potential_hashelse:return get_subscriber_hash(potential_hash)", "docstring": "Check the passed value to see if it matches a 32 character hex number that\nMD5 generates as output, or compute that value assuming that the input is\nan email address.\n\n:param potential_hash: A value to be passed to any of the endpoints that\nexpect an MD5 of an email address\n:type potential_hash: :py:class:`str`\n:returns: A valid MD5 hash in hex\n:rtype: :py:class:`str`", "id": "f246:m1"} {"signature": "def check_email(email):", "body": "if not re.match(r\"\", email):raise ValueError('')return", "docstring": "Function that verifies that the string passed is a valid email address.\n\nRegex for email validation based on MailChimp limits:\nhttp://kb.mailchimp.com/accounts/management/international-characters-in-mailchimp\n\n:param email: The potential email address\n:type email: :py:class:`str`\n:return: Nothing", "id": "f246:m2"} {"signature": "def check_url(url):", "body": "URL_REGEX = re.compile(u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\"u\"\", re.UNICODE)if not re.match(URL_REGEX, url):raise ValueError('')return", "docstring": "Function that verifies that the string passed is a valid url.\n\nOriginal regex author Diego Perini (http://www.iport.it)\nregex ported to Python by adamrofer (https://github.com/adamrofer)\nUsed under MIT license.\n\n:param url:\n:return: Nothing", "id": "f246:m3"} {"signature": "def merge_results(x, y):", "body": "z = x.copy()for key, value in y.items():if isinstance(value, list) and isinstance(z.get(key), list):z[key] += valueelse:z[key] = valuereturn z", "docstring": "Given two dicts, x and y, merge them into a new dict as a shallow copy.\n\nThe result only differs from `x.update(y)` in the way that it handles list\nvalues when both x and y have list values for the same key. In which case\nthe returned dictionary, z, has a value according to:\n z[key] = x[key] + z[key]\n\n:param x: The first dictionary\n:type x: :py:class:`dict`\n:param y: The second dictionary\n:type y: :py:class:`dict`\n:returns: The merged dictionary\n:rtype: :py:class:`dict`", "id": "f246:m4"} {"signature": "def __init__(self, mc_client):", "body": "super(BaseApi, self).__init__()self._mc_client = mc_clientself.endpoint = ''", "docstring": "Initialize the class with you user_id and secret_key\n\n:param mc_client: The mailchimp client connection\n:type mc_client: :mod:`mailchimp3.mailchimpclient.MailChimpClient`", "id": "f247:c0:m0"} {"signature": "def _build_path(self, *args):", "body": "return ''.join(chain((self.endpoint,), map(str, args)))", "docstring": "Build path with endpoint and args\n\n:param args: Tokens in the endpoint URL\n:type args: :py:class:`unicode`", "id": "f247:c0:m1"} {"signature": "def _iterate(self, url, **queryparams):", "body": "if '' in queryparams:if '' not in queryparams[''].split(''):queryparams[''] += ''queryparams.pop(\"\", None)queryparams.pop(\"\", None)result = self._mc_client._get(url=url, offset=, count=, **queryparams)total = result['']if total > :for offset in range(, int(total / ) + ):result = merge_results(result, self._mc_client._get(url=url,offset=int(offset * ),count=,**queryparams))return resultelse: return result", "docstring": "Iterate over all pages for the given url. Feed in the result of self._build_path as the url.\n\n:param url: The url of the endpoint\n:type url: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f247:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(TemplateFolders, self).__init__(*args, **kwargs)self.endpoint = ''self.folder_id = None", "docstring": "Initialize the endpoint", "id": "f248:c0:m0"} {"signature": "def create(self, data):", "body": "if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(), data=data)if response is not None:self.folder_id = response['']else:self.folder_id = Nonereturn response", "docstring": "Create a new template folder.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f248:c0:m1"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.folder_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get all folders used to organize templates.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f248:c0:m2"} {"signature": "def get(self, folder_id, **queryparams):", "body": "self.folder_id = folder_idreturn self._mc_client._get(url=self._build_path(folder_id), **queryparams)", "docstring": "Get information about a specific folder used to organize templates.\n\n:param folder_id: The unique id for the File Manager folder.\n:type folder_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f248:c0:m3"} {"signature": "def update(self, folder_id, data):", "body": "if '' not in data:raise KeyError('')self.folder_id = folder_idreturn self._mc_client._patch(url=self._build_path(folder_id), data=data)", "docstring": "Update a specific folder used to organize templates.\n\n:param folder_id: The unique id for the File Manager folder.\n:type folder_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f248:c0:m4"} {"signature": "def delete(self, folder_id):", "body": "self.folder_id = folder_idreturn self._mc_client._delete(url=self._build_path(folder_id))", "docstring": "Delete a specific template folder, and mark all the templates in the\nfolder as \u2018unfiled\u2019.\n\n:param folder_id: The unique id for the File Manager folder.\n:type folder_id: :py:class:`str`", "id": "f248:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(Automations, self).__init__(*args, **kwargs)self.endpoint = ''self.workflow_id = Noneself.actions = AutomationActions(self)self.emails = AutomationEmails(self)self.removed_subscribers = AutomationRemovedSubscribers(self)", "docstring": "Initialize the endpoint", "id": "f249:c0:m0"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.workflow_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get a summary of an account\u2019s Automations.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: the query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f249:c0:m1"} {"signature": "def get(self, workflow_id, **queryparams):", "body": "self.workflow_id = workflow_idreturn self._mc_client._get(url=self._build_path(workflow_id), **queryparams)", "docstring": "Get a summary of an individual Automation workflow\u2019s settings and\ncontent. The trigger_settings object returns information for the first\nemail in the workflow.\n\n:param workflow_id: The unique id for the Automation workflow\n:type workflow_id: :py:class:`str`\n:param queryparams: the query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f249:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(AutomationEmails, self).__init__(*args, **kwargs)self.endpoint = ''self.workflow_id = Noneself.email_id = Noneself.actions = AutomationEmailActions(self)self.queues = AutomationEmailQueues(self)", "docstring": "Initialize the endpoint", "id": "f250:c0:m0"} {"signature": "def all(self, workflow_id, get_all=False, **queryparams):", "body": "self.workflow_id = workflow_idself.email_id = Noneif get_all:return self._iterate(url=self._build_path(workflow_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(workflow_id, ''), **queryparams)", "docstring": "Get a summary of the emails in an Automation workflow.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: the query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f250:c0:m1"} {"signature": "def get(self, workflow_id, email_id):", "body": "self.workflow_id = workflow_idself.email_id = email_idreturn self._mc_client._get(url=self._build_path(workflow_id, '', email_id))", "docstring": "Get information about an individual Automation workflow email.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param email_id: The unique id for the Automation workflow email.\n:type email_id: :py:class:`str`", "id": "f250:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(AutomationEmailQueues, self).__init__(*args, **kwargs)self.endpoint = ''self.workflow_id = Noneself.email_id = Noneself.subscriber_hash = None", "docstring": "Initialize the endpoint", "id": "f251:c0:m0"} {"signature": "def create(self, workflow_id, email_id, data):", "body": "self.workflow_id = workflow_idself.email_id = email_idif '' not in data:raise KeyError('')check_email(data[''])response = self._mc_client._post(url=self._build_path(workflow_id, '', email_id, ''),data=data)if response is not None:self.subscriber_hash = response['']else:self.subscriber_hash = Nonereturn response", "docstring": "Manually add a subscriber to a workflow, bypassing the default trigger\nsettings. You can also use this endpoint to trigger a series of\nautomated emails in an API 3.0 workflow type or add subscribers to an\nautomated email queue that uses the API request delay type.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param email_id: The unique id for the Automation workflow email.\n:type email_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"email_address\": string*\n}", "id": "f251:c0:m1"} {"signature": "def all(self, workflow_id, email_id):", "body": "self.workflow_id = workflow_idself.email_id = email_idself.subscriber_hash = Nonereturn self._mc_client._get(url=self._build_path(workflow_id, '', email_id, ''))", "docstring": "Get information about an Automation email queue.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param email_id: The unique id for the Automation workflow email.\n:type email_id: :py:class:`str`", "id": "f251:c0:m2"} {"signature": "def get(self, workflow_id, email_id, subscriber_hash):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.workflow_id = workflow_idself.email_id = email_idself.subscriber_hash = subscriber_hashreturn self._mc_client._get(url=self._build_path(workflow_id, '', email_id, '', subscriber_hash))", "docstring": "Get information about a specific subscriber in an Automation email\nqueue.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param email_id: The unique id for the Automation workflow email.\n:type email_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`", "id": "f251:c0:m3"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(SearchCampaigns, self).__init__(*args, **kwargs)self.endpoint = ''", "docstring": "Initialize the endpoint", "id": "f252:c0:m0"} {"signature": "def get(self, **queryparams):", "body": "return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Search all campaigns for the specified query terms.\n\n:param queryparams: The query string parameters\nqueryparams['fields'] = array\nqueryparams['exclude_fields'] = array\nqueryparams['query'] = string\nqueryparams['snip_start'] = string\nqueryparams['snip_end'] = string\nqueryparams['offset'] = integer", "id": "f252:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(TemplateDefaultContent, self).__init__(*args, **kwargs)self.endpoint = ''self.template_id = None", "docstring": "Initialize the endpoint", "id": "f253:c0:m0"} {"signature": "def all(self, template_id, **queryparams):", "body": "self.template_id = template_idreturn self._mc_client._get(url=self._build_path(template_id, ''), **queryparams)", "docstring": "Get the sections that you can edit in a template, including each\nsection\u2019s default content.\n\n:param template_id: The unique id for the template.\n:type template_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f253:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(Templates, self).__init__(*args, **kwargs)self.endpoint = ''self.template_id = Noneself.default_content = TemplateDefaultContent(self)", "docstring": "Initialize the endpoint", "id": "f254:c0:m0"} {"signature": "def create(self, data):", "body": "if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(), data=data)if response is not None:self.template_id = response['']else:self.template_id = Nonereturn response", "docstring": "Create a new template for the account. Only Classic templates are\nsupported.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*,\n \"html\": string*\n}", "id": "f254:c0:m1"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.template_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get a list of an account\u2019s available templates.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['created_by'] = string\nqueryparams['before_created_at'] = string\nqueryparams['since_created_at'] = string\nqueryparams['type'] = string\nqueryparams['folder_id'] = string", "id": "f254:c0:m2"} {"signature": "def get(self, template_id, **queryparams):", "body": "self.template_id = template_idreturn self._mc_client._get(url=self._build_path(template_id), **queryparams)", "docstring": "Get information about a specific template.\n\n:param template_id: The unique id for the template.\n:type template_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f254:c0:m3"} {"signature": "def update(self, template_id, data):", "body": "if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')self.template_id = template_idreturn self._mc_client._patch(url=self._build_path(template_id), data=data)", "docstring": "Update the name, HTML, or folder_id of an existing template.\n\n:param template_id: The unique id for the template.\n:type template_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*,\n \"html\": string*\n}", "id": "f254:c0:m4"} {"signature": "def delete(self, template_id):", "body": "self.template_id = template_idreturn self._mc_client._delete(url=self._build_path(template_id))", "docstring": "Delete a specific template.\n\n:param template_id: The unique id for the template.\n:type template_id: :py:class:`str`", "id": "f254:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportEepURL, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = None", "docstring": "Initialize the endpoint", "id": "f255:c0:m0"} {"signature": "def all(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_idreturn self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get a summary of social activity for the campaign, tracked by EepURL.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f255:c0:m1"} {"signature": "def all(self, list_id, **queryparams):", "body": "return self._mc_client._get(url=self._build_path(list_id, ''), **queryparams)", "docstring": "returns the first 10 segments for a specific list.", "id": "f256:c0:m1"} {"signature": "def get(self, list_id, segment_id):", "body": "return self._mc_client._get(url=self._build_path(list_id, '', segment_id))", "docstring": "returns the specified list segment.", "id": "f256:c0:m2"} {"signature": "def update(self, list_id, segment_id, data):", "body": "return self._mc_client._patch(url=self._build_path(list_id, '', segment_id), data=data)", "docstring": "updates an existing list segment.", "id": "f256:c0:m3"} {"signature": "def delete(self, list_id, segment_id):", "body": "return self._mc_client._delete(url=self._build_path(list_id, '', segment_id))", "docstring": "removes an existing list segment from the list. This cannot be undone.", "id": "f256:c0:m4"} {"signature": "def create(self, list_id, data):", "body": "return self._mc_client._post(url=self._build_path(list_id, ''), data=data)", "docstring": "adds a new segment to the list.", "id": "f256:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(FileManagerFiles, self).__init__(*args, **kwargs)self.endpoint = ''self.file_id = None", "docstring": "Initialize the endpoint", "id": "f257:c0:m0"} {"signature": "def create(self, data):", "body": "if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(), data=data)if response is not None:self.file_id = response['']else:self.file_id = Nonereturn response", "docstring": "Upload a new image or file to the File Manager.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*,\n \"file_data\": string*\n}", "id": "f257:c0:m1"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.file_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get a list of available images and files stored in the File Manager for the account.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['type'] = string\nqueryparams['created_by'] = string\nqueryparams['before_created_at'] = string\nqueryparams['since_created_at'] = string\nqueryparams['sort_field'] = string\nqueryparams['sort_dir'] = string", "id": "f257:c0:m2"} {"signature": "def get(self, file_id, **queryparams):", "body": "self.file_id = file_idreturn self._mc_client._get(url=self._build_path(file_id), **queryparams)", "docstring": "Get information about a specific file in the File Manager.\n\n:param file_id: The unique id for the File Manager file.\n:type file_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f257:c0:m3"} {"signature": "def update(self, file_id, data):", "body": "self.file_id = file_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')return self._mc_client._patch(url=self._build_path(file_id), data=data)", "docstring": "Update a file in the File Manager.\n\n:param file_id: The unique id for the File Manager file.\n:type file_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*,\n \"file_data\": string*\n}", "id": "f257:c0:m4"} {"signature": "def delete(self, file_id):", "body": "self.file_id = file_idreturn self._mc_client._delete(url=self._build_path(file_id))", "docstring": "Remove a specific file from the File Manager.\n\n:param file_id: The unique id for the File Manager file.\n:type file_id: :py:class:`str`", "id": "f257:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListSegmentMembers, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.segment_id = Noneself.subscriber_hash = None", "docstring": "Initialize the endpoint", "id": "f258:c0:m0"} {"signature": "def create(self, list_id, segment_id, data):", "body": "self.list_id = list_idself.segment_id = segment_idif '' not in data:raise KeyError('')check_email(data[''])if '' not in data:raise KeyError('')if data[''] not in ['', '', '', '']:raise ValueError('''')response = self._mc_client._post(url=self._build_path(list_id, '', segment_id, ''), data=data)if response is not None:self.subscriber_hash = response['']else:self.subscriber_hash = Nonereturn response", "docstring": "Add a member to a static segment.\n\nThe documentation does not currently elaborate on the path or request\nbody parameters. Looking at the example provided, it will be assumed\nthat email_address and status are required request body parameters and\nthey are documented and error-checked as such.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param segment_id: The unique id for the segment.\n:type segment_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"email_address\": string*,\n \"status\": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending')\n}", "id": "f258:c0:m1"} {"signature": "def all(self, list_id, segment_id, get_all=False, **queryparams):", "body": "self.list_id = list_idself.segment_id = segment_idself.subscriber_hash = Noneif get_all:return self._iterate(url=self._build_path(list_id, '', segment_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(list_id, '', segment_id, ''), **queryparams)", "docstring": "Get information about members in a saved segment.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param segment_id: The unique id for the segment.\n:type segment_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f258:c0:m2"} {"signature": "def delete(self, list_id, segment_id, subscriber_hash):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.segment_id = segment_idself.subscriber_hash = subscriber_hashreturn self._mc_client._delete(url=self._build_path(list_id, '', segment_id, '', subscriber_hash))", "docstring": "Remove a member from the specified static segment.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param segment_id: The unique id for the segment.\n:type segment_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`", "id": "f258:c0:m3"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportEmailActivity, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = Noneself.subscriber_hash = None", "docstring": "Initialize the endpoint", "id": "f259:c0:m0"} {"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_idself.subscriber_hash = Noneif get_all:return self._iterate(url=self._build_path(campaign_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get a list of member\u2019s subscriber activity in a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f259:c0:m1"} {"signature": "def get(self, campaign_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.campaign_id = campaign_idself.subscriber_hash = subscriber_hashreturn self._mc_client._get(url=self._build_path(campaign_id, '', subscriber_hash),**queryparams)", "docstring": "Get a specific list member\u2019s activity in a campaign including opens,\nclicks, and bounces.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f259:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(BatchWebhooks, self).__init__(*args, **kwargs)self.endpoint = ''self.batch_webhook_id = None", "docstring": "Initialize the endpoint", "id": "f260:c0:m0"} {"signature": "def create(self, data):", "body": "if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(), data=data)if response is not None:self.batch_webhook_id = response['']else:self.batch_webhook_id = Nonereturn response", "docstring": "Configure a webhook that will fire whenever any batch request\ncompletes processing.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"url\": string*\n}", "id": "f260:c0:m1"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.batch_webhook_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get all webhooks that have been configured for batches.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f260:c0:m2"} {"signature": "def get(self, batch_webhook_id, **queryparams):", "body": "self.batch_webhook_id = batch_webhook_idreturn self._mc_client._get(url=self._build_path(batch_webhook_id), **queryparams)", "docstring": "Get information about a specific batch webhook.\n\n:param batch_webhook_id: The unique id for the batch webhook.\n:type batch_webhook_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f260:c0:m3"} {"signature": "def update(self, batch_webhook_id, data):", "body": "self.batch_webhook_id = batch_webhook_idif '' not in data:raise KeyError('')return self._mc_client._patch(url=self._build_path(batch_webhook_id), data=data)", "docstring": "Update a webhook that will fire whenever any batch request completes\nprocessing.\n\n:param batch_webhook_id: The unique id for the batch webhook.\n:type batch_webhook_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"url\": string*\n}", "id": "f260:c0:m4"} {"signature": "def delete(self, batch_webhook_id):", "body": "self.batch_webhook_id = batch_webhook_idreturn self._mc_client._delete(url=self._build_path(batch_webhook_id))", "docstring": "Remove a batch webhook. Webhooks will no longer be sent to the given\nURL.\n\n:param batch_webhook_id: The unique id for the batch webhook.\n:type batch_webhook_id: :py:class:`str`", "id": "f260:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportSubReports, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = None", "docstring": "Initialize the endpoint", "id": "f261:c0:m0"} {"signature": "def all(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_idreturn self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get a list of reports with child campaigns for a specific parent\ncampaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f261:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListSignupForms, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = None", "docstring": "Initialize the endpoint", "id": "f262:c0:m0"} {"signature": "def create(self, list_id, data):", "body": "self.list_id = list_idresponse = self._mc_client._post(url=self._build_path(list_id, ''), data=data)return response", "docstring": "Create a customized list signup form.\n\nNo fields are listed as required in the documentation and the\ndescription of the method does not indicate any required fields\neither.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f262:c0:m1"} {"signature": "def all(self, list_id):", "body": "self.list_id = list_idreturn self._mc_client._get(url=self._build_path(list_id, ''))", "docstring": "Get signup forms for a specific list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`", "id": "f262:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportCampaignAbuseReports, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = Noneself.report_id = None", "docstring": "Initialize the endpoint", "id": "f263:c0:m0"} {"signature": "def all(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_idself.report_id = Nonereturn self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get a list of abuse complaints for a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f263:c0:m1"} {"signature": "def get(self, campaign_id, report_id, **queryparams):", "body": "self.campaign_id = campaign_idself.report_id = report_idreturn self._mc_client._get(url=self._build_path(campaign_id, '', report_id), **queryparams)", "docstring": "Get information about a specific abuse report for a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param report_id: The id for the abuse report.\n:type report_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f263:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(AuthorizedApps, self).__init__(*args, **kwargs)self.endpoint = ''self.app_id = None", "docstring": "Initialize the endpoint", "id": "f264:c0:m0"} {"signature": "def create(self, data):", "body": "self.app_id = Noneif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')return self._mc_client._post(url=self._build_path(), data=data)", "docstring": "Retrieve OAuth2-based credentials to associate API calls with your\napplication.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"client_id\": string*,\n \"client_secret\": string*\n}", "id": "f264:c0:m1"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.app_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get a list of an account\u2019s registered, connected applications.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f264:c0:m2"} {"signature": "def get(self, app_id, **queryparams):", "body": "self.app_id = app_idreturn self._mc_client._get(url=self._build_path(app_id), **queryparams)", "docstring": "Get information about a specific authorized application\n\n:param app_id: The unique id for the connected authorized application\n:type app_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f264:c0:m3"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListAbuseReports, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.report_id = None", "docstring": "Initialize the endpoint", "id": "f265:c0:m0"} {"signature": "def all(self, list_id, get_all=False, **queryparams):", "body": "self.list_id = list_idself.report_id = Noneif get_all:return self._iterate(url=self._build_path(list_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(list_id, ''), **queryparams)", "docstring": "Get all abuse reports for a specific list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f265:c0:m1"} {"signature": "def get(self, list_id, report_id, **queryparams):", "body": "self.list_id = list_idself.report_id = report_idreturn self._mc_client._get(url=self._build_path(list_id, '', report_id), **queryparams)", "docstring": "Get details about a specific abuse report.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param report_id: The id for the abuse report.\n:type report_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f265:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(Stores, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = Noneself.carts = StoreCarts(self)self.customers = StoreCustomers(self)self.orders = StoreOrders(self)self.products = StoreProducts(self)", "docstring": "Initialize the endpoint", "id": "f266:c0:m0"} {"signature": "def create(self, data):", "body": "if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if not re.match(r\"\", data['']):raise ValueError('')response = self._mc_client._post(url=self._build_path(), data=data)if response is not None:self.store_id = response['']else:self.store_id = Nonereturn response", "docstring": "Add a new store to your MailChimp account.\n\nError checking on the currency code verifies that it is in the correct\nthree-letter, all-caps format as specified by ISO 4217 but does not\ncheck that it is a valid code as the list of valid codes changes over\ntime.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"list_id\": string*,\n \"name\": string*,\n \"currency_code\": string*\n}", "id": "f266:c0:m1"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.store_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get information about all stores in the account.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f266:c0:m2"} {"signature": "def get(self, store_id, **queryparams):", "body": "self.store_id = store_idreturn self._mc_client._get(url=self._build_path(store_id), **queryparams)", "docstring": "Get information about a specific store.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f266:c0:m3"} {"signature": "def update(self, store_id, data):", "body": "self.store_id = store_idreturn self._mc_client._patch(url=self._build_path(store_id), data=data)", "docstring": "Update a store.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f266:c0:m4"} {"signature": "def delete(self, store_id):", "body": "self.store_id = store_idreturn self._mc_client._delete(url=self._build_path(store_id))", "docstring": "Delete a store. Deleting a store will also delete any associated\nsubresources, including Customers, Orders, Products, and Carts.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`", "id": "f266:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportClickDetailMembers, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = Noneself.link_id = Noneself.subscriber_hash = None", "docstring": "Initialize the endpoint", "id": "f267:c0:m0"} {"signature": "def all(self, campaign_id, link_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_idself.link_id = link_idself.subscriber_hash = Noneif get_all:return self._iterate(url=self._build_path(campaign_id, '', link_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(campaign_id, '', link_id, ''),**queryparams)", "docstring": "Get information about list members who clicked on a specific link in a\ncampaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param link_id: The id for the link.\n:type link_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f267:c0:m1"} {"signature": "def get(self, campaign_id, link_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.campaign_id = campaign_idself.link_id = link_idself.subscriber_hash = subscriber_hashreturn self._mc_client._get(url=self._build_path(campaign_id, '', link_id, '', subscriber_hash),**queryparams)", "docstring": "Get information about a specific subscriber who clicked a link in a\nspecific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param link_id: The id for the link.\n:type link_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f267:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreCarts, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = Noneself.cart_id = Noneself.lines = StoreCartLines(self)", "docstring": "Initialize the endpoint", "id": "f268:c0:m0"} {"signature": "def create(self, store_id, data):", "body": "self.store_id = store_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data:raise KeyError('')if not re.match(r\"\", data['']):raise ValueError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')for line in data['']:if '' not in line:raise KeyError('')if '' not in line:raise KeyError('')if '' not in line:raise KeyError('')if '' not in line:raise KeyError('')if '' not in line:raise KeyError('')response = self._mc_client._post(url=self._build_path(store_id, ''), data=data)if response is not None:self.cart_id = response['']else:self.cart_id = Nonereturn response", "docstring": "Add a new cart to a store.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"customer\": object*\n {\n \"id\": string*\n },\n \"currency_code\": string*,\n \"order_total\": number*,\n \"lines\": array*\n [\n {\n \"id\": string*,\n \"product_id\": string*,\n \"product_variant_id\": string*,\n \"quantity\": integer*,\n \"price\": number*\n }\n ]\n}", "id": "f268:c0:m1"} {"signature": "def all(self, store_id, get_all=False, **queryparams):", "body": "self.store_id = store_idself.cart_id = Noneif get_all:return self._iterate(url=self._build_path(store_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(store_id, ''), **queryparams)", "docstring": "Get information about a store\u2019s carts.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f268:c0:m2"} {"signature": "def get(self, store_id, cart_id, **queryparams):", "body": "self.store_id = store_idself.cart_id = cart_idreturn self._mc_client._get(url=self._build_path(store_id, '', cart_id), **queryparams)", "docstring": "Get information about a specific cart.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f268:c0:m3"} {"signature": "def update(self, store_id, cart_id, data):", "body": "self.store_id = store_idself.cart_id = cart_idreturn self._mc_client._patch(url=self._build_path(store_id, '', cart_id), data=data)", "docstring": "Update a specific cart.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f268:c0:m4"} {"signature": "def delete(self, store_id, cart_id):", "body": "self.store_id = store_idself.cart_id = cart_idreturn self._mc_client._delete(url=self._build_path(store_id, '', cart_id))", "docstring": "Delete a cart.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`", "id": "f268:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(Reports, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = Noneself.abuse_reports = ReportCampaignAbuseReports(self)self.advice = ReportCampaignAdvice(self)self.click_details = ReportClickDetailReports(self)self.domain_performance = ReportDomainPerformance(self)self.eepurl = ReportEepURL(self)self.email_activity = ReportEmailActivity(self)self.locations = ReportLocations(self)self.sent_to = ReportSentTo(self)self.subreports = ReportSubReports(self)self.unsubscribes = ReportUnsubscribes(self)", "docstring": "Initialize the endpoint", "id": "f269:c0:m0"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.campaign_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get campaign reports.\n\n.. note::\n The before_send_time and since_send_time queryparams expect times\n to be listed in the ISO 8601 format in UTC (ex.\n 2015-10-21T15:41:36+00:00).\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['type'] = []\nqueryparams['before_send_time'] = string\nqueryparams['since_send_time'] = string", "id": "f269:c0:m1"} {"signature": "def get(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_idreturn self._mc_client._get(url=self._build_path(campaign_id), **queryparams)", "docstring": "Get report details for a specific sent campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f269:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreProducts, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = Noneself.product_id = Noneself.images = StoreProductImages(self)self.variants = StoreProductVariants(self)", "docstring": "Initialize the endpoint", "id": "f270:c0:m0"} {"signature": "def create(self, store_id, data):", "body": "self.store_id = store_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')for variant in data['']:if '' not in variant:raise KeyError('')if '' not in variant:raise KeyError('')response = self._mc_client._post(url=self._build_path(store_id, ''), data=data)if response is not None:self.product_id = response['']else:self.product_id = Nonereturn response", "docstring": "Add a new product to a store.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"title\": string*,\n \"variants\": array*\n [\n {\n \"id\": string*,\n \"title\": string*\n }\n ]\n}", "id": "f270:c0:m1"} {"signature": "def all(self, store_id, get_all=False, **queryparams):", "body": "self.store_id = store_idself.product_id = Noneif get_all:return self._iterate(url=self._build_path(store_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(store_id, ''), **queryparams)", "docstring": "Get information about a store\u2019s products.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f270:c0:m2"} {"signature": "def get(self, store_id, product_id, **queryparams):", "body": "self.store_id = store_idself.product_id = product_idreturn self._mc_client._get(url=self._build_path(store_id, '', product_id), **queryparams)", "docstring": "Get information about a specific product.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f270:c0:m3"} {"signature": "def update(self, store_id, product_id, data):", "body": "self.store_id = store_idself.product_id = product_idreturn self._mc_client._patch(url=self._build_path(store_id, '', product_id),data=data)", "docstring": "Update a product.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f270:c0:m4"} {"signature": "def delete(self, store_id, product_id):", "body": "self.store_id = store_idself.product_id = product_idreturn self._mc_client._delete(url=self._build_path(store_id, '', product_id))", "docstring": "Delete a product.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`", "id": "f270:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListGrowthHistory, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.month = None", "docstring": "Initialize the endpoint", "id": "f271:c0:m0"} {"signature": "def all(self, list_id, get_all=False, **queryparams):", "body": "self.list_id = list_idself.month = Noneif get_all:return self._iterate(url=self._build_path(list_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(list_id, ''), **queryparams)", "docstring": "Get a month-by-month summary of a specific list\u2019s growth activity.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f271:c0:m1"} {"signature": "def get(self, list_id, month, **queryparams):", "body": "self.list_id = list_idself.month = monthreturn self._mc_client._get(url=self._build_path(list_id, '', month), **queryparams)", "docstring": "Get a summary of a specific list\u2019s growth activity for a specific month and year.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param month: A specific month of list growth history.\n:type month: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f271:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(Conversations, self).__init__(*args, **kwargs)self.endpoint = ''self.conversation_id = Noneself.messages = ConversationMessages(self)", "docstring": "Initialize the endpoint", "id": "f272:c0:m0"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.conversation_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get a list of conversations for the account.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['has_unread_messages'] = string\nqueryparams['list_id'] = string\nqueryparams['campaign_id'] = string", "id": "f272:c0:m1"} {"signature": "def get(self, conversation_id, **queryparams):", "body": "self.conversation_id = conversation_idreturn self._mc_client._get(url=self._build_path(conversation_id), **queryparams)", "docstring": "Get details about an individual conversation.\n\n:param conversation_id: The unique id for the conversation.\n:type conversation_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f272:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListMemberTags, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.subscriber_hash = None", "docstring": "Initialize the endpoint", "id": "f273:c0:m0"} {"signature": "def update(self, list_id, subscriber_hash, data):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashif '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(list_id, '', subscriber_hash, ''), data=data)return response", "docstring": "Update tags for a specific subscriber.\n\nThe documentation lists only the tags request body parameter so it is\nbeing documented and error-checked as if it were required based on the\ndescription of the method.\n\nThe data list needs to include a \"status\" key. This determines if the\ntag should be added or removed from the user:\n\ndata = {\n 'tags': [\n {'name': 'foo', 'status': 'active'},\n {'name': 'bar', 'status': 'inactive'}\n ]\n}\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"tags\": list*\n}", "id": "f273:c0:m1"} {"signature": "def all(self, list_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashreturn self._mc_client._get(url=self._build_path(list_id, '', subscriber_hash, ''), **queryparams)", "docstring": "Get all tags for a specific list member.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`", "id": "f273:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(AutomationActions, self).__init__(*args, **kwargs)self.endpoint = ''self.workflow_id = None", "docstring": "Initialize the endpoint", "id": "f274:c0:m0"} {"signature": "def pause(self, workflow_id):", "body": "self.workflow_id = workflow_idreturn self._mc_client._post(url=self._build_path(workflow_id, ''))", "docstring": "Pause all emails in a specific Automation workflow.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`", "id": "f274:c0:m1"} {"signature": "def start(self, workflow_id):", "body": "self.workflow_id = workflow_idreturn self._mc_client._post(url=self._build_path(workflow_id, ''))", "docstring": "Start all emails in an Automation workflow.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`", "id": "f274:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListWebhooks, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.webhook_id = None", "docstring": "Initialize the endpoint", "id": "f275:c0:m0"} {"signature": "def create(self, list_id, data):", "body": "self.list_id = list_idif '' not in data:raise KeyError('')check_url(data[''])response = self._mc_client._post(url=self._build_path(list_id, ''), data=data)if response is not None:self.webhook_id = response['']else:self.webhook_id = Nonereturn response", "docstring": "Create a new webhook for a specific list.\n\nThe documentation does not include any required request body\nparameters but the url parameter is being listed here as a required\nparameter in documentation and error-checking based on the description\nof the method\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"url\": string*\n}", "id": "f275:c0:m1"} {"signature": "def all(self, list_id):", "body": "self.list_id = list_idself.webhook_id = Nonereturn self._mc_client._get(url=self._build_path(list_id, ''))", "docstring": "Get information about all webhooks for a specific list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`", "id": "f275:c0:m2"} {"signature": "def get(self, list_id, webhook_id):", "body": "self.list_id = list_idself.webhook_id = webhook_idreturn self._mc_client._get(url=self._build_path(list_id, '', webhook_id))", "docstring": "Get information about a specific webhook.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param webhook_id: The unique id for the webhook.\n:type webhook_id: :py:class:`str`", "id": "f275:c0:m3"} {"signature": "def update(self, list_id, webhook_id, data):", "body": "self.list_id = list_idself.webhook_id = webhook_idreturn self._mc_client._patch(url=self._build_path(list_id, '', webhook_id), data=data)", "docstring": "Update the settings for an existing webhook.\n\n:param list_id: The unique id for the list\n:type list_id: :py:class:`str`\n:param webhook_id: The unique id for the webhook\n:type webhook_id: :py:class:`str`", "id": "f275:c0:m4"} {"signature": "def delete(self, list_id, webhook_id):", "body": "self.list_id = list_idself.webhook_id = webhook_idreturn self._mc_client._delete(url=self._build_path(list_id, '', webhook_id))", "docstring": "Delete a specific webhook in a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param webhook_id: The unique id for the webhook.\n:type webhook_id: :py:class:`str`", "id": "f275:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListClients, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = None", "docstring": "Initialize the endpoint", "id": "f276:c0:m0"} {"signature": "def all(self, list_id, **queryparams):", "body": "self.list_id = list_idreturn self._mc_client._get(url=self._build_path(list_id, ''), **queryparams)", "docstring": "Get a list of the top email clients based on user-agent strings.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f276:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportLocations, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = None", "docstring": "Initialize the endpoint", "id": "f277:c0:m0"} {"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_idif get_all:return self._iterate(url=self._build_path(campaign_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get top open locations for a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f277:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportDomainPerformance, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = None", "docstring": "Initialize the endpoint", "id": "f278:c0:m0"} {"signature": "def all(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_idreturn self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get statistics for the top-performing email domains in a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f278:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreOrderLines, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = Noneself.order_id = Noneself.line_id = None", "docstring": "Initialize the endpoint", "id": "f279:c0:m0"} {"signature": "def create(self, store_id, order_id, data):", "body": "self.store_id = store_idself.order_id = order_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(store_id, '', order_id, ''))if response is not None:self.line_id = response['']else:self.line_id = Nonereturn response", "docstring": "Add a new line item to an existing order.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param order_id: The id for the order in a store.\n:type order_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"product_id\": string*,\n \"product_variant_id\": string*,\n \"quantity\": integer*,\n \"price\": number*\n}", "id": "f279:c0:m1"} {"signature": "def all(self, store_id, order_id, get_all=False, **queryparams):", "body": "self.store_id = store_idself.order_id = order_idself.line_id = Noneif get_all:return self._iterate(url=self._build_path(store_id, '', order_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(store_id, '', order_id, ''), **queryparams)", "docstring": "Get information about an order\u2019s line items.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param order_id: The id for the order in a store.\n:type order_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f279:c0:m2"} {"signature": "def get(self, store_id, order_id, line_id, **queryparams):", "body": "self.store_id = store_idself.order_id = order_idself.line_id = line_idreturn self._mc_client._get(url=self._build_path(store_id, '', order_id, '', line_id), **queryparams)", "docstring": "Get information about a specific order line item.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param order_id: The id for the order in a store.\n:type order_id: :py:class:`str`\n:param line_id: The id for the line item of a cart.\n:type line_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f279:c0:m3"} {"signature": "def update(self, store_id, order_id, line_id, data):", "body": "self.store_id = store_idself.order_id = order_idself.line_id = line_idreturn self._mc_client._patch(url=self._build_path(store_id, '', order_id, '', line_id), data=data)", "docstring": "Update a specific order line item.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param order_id: The id for the order in a store.\n:type order_id: :py:class:`str`\n:param line_id: The id for the line item of a cart.\n:type line_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f279:c0:m4"} {"signature": "def delete(self, store_id, order_id, line_id):", "body": "self.store_id = store_idself.order_id = order_idself.line_id = line_idreturn self._mc_client._delete(url=self._build_path(store_id, '', order_id, '', line_id))", "docstring": "Delete a specific order line item.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param order_id: The id for the order in a store.\n:type order_id: :py:class:`str`\n:param line_id: The id for the line item of a cart.\n:type line_id: :py:class:`str`", "id": "f279:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(CampaignFeedback, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = Noneself.feedback_id = None", "docstring": "Initialize the endpoint", "id": "f280:c0:m0"} {"signature": "def create(self, campaign_id, data, **queryparams):", "body": "self.campaign_id = campaign_idif '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(campaign_id, ''), data=data, **queryparams)if response is not None:self.feedback_id = response['']else:self.feedback_id = Nonereturn response", "docstring": "Add feedback on a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"message\": string*\n}\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f280:c0:m1"} {"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_idself.feedback_id = Noneif get_all:return self._iterate(url=self._build_path(campaign_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get team feedback while you\u2019re working together on a MailChimp\ncampaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f280:c0:m2"} {"signature": "def get(self, campaign_id, feedback_id, **queryparams):", "body": "self.campaign_id = campaign_idself.feedback_id = feedback_idreturn self._mc_client._get(url=self._build_path(campaign_id, '', feedback_id), **queryparams)", "docstring": "Get a specific feedback message from a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param feedback_id: The unique id for the feedback message.\n:type feedback_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f280:c0:m3"} {"signature": "def update(self, campaign_id, feedback_id, data):", "body": "self.campaign_id = campaign_idself.feedback_id = feedback_idif '' not in data:raise KeyError('')return self._mc_client._patch(url=self._build_path(campaign_id, '', feedback_id), data=data)", "docstring": "Update a specific feedback message for a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param feedback_id: The unique id for the feedback message.\n:type feedback_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"message\": string*\n}", "id": "f280:c0:m4"} {"signature": "def delete(self, campaign_id, feedback_id):", "body": "self.campaign_id = campaign_idself.feedback_id = feedback_idreturn self._mc_client._delete(url=self._build_path(campaign_id, '', feedback_id))", "docstring": "Remove a specific feedback message for a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param feedback_id: The unique id for the feedback message.\n:type feedback_id: :py:class:`str`", "id": "f280:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(OpenDetails, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = None", "docstring": "Initialize the endpoint", "id": "f281:c0:m0"} {"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_idif get_all:return self._iterate(url=self._build_path(campaign_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get detailed information about any campaign emails that were opened by a list member.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['since'] = str", "id": "f281:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ConversationMessages, self).__init__(*args, **kwargs)self.endpoint = ''self.conversation_id = Noneself.message_id = None", "docstring": "Initialize the endpoint", "id": "f282:c0:m0"} {"signature": "def create(self, conversation_id, data):", "body": "self.conversation_id = conversation_idif '' not in data:raise KeyError('')check_email(data[''])if '' not in data:raise KeyError('')if data[''] not in [True, False]:raise TypeError('')response = self._mc_client._post(url=self._build_path(conversation_id, ''), data=data)if response is not None:self.message_id = response['']else:self.message_id = Nonereturn response", "docstring": "Post a new message to a conversation.\n\n:param conversation_id: The unique id for the conversation.\n:type conversation_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"from_email\": string*,\n \"read\": boolean*\n}", "id": "f282:c0:m1"} {"signature": "def all(self, conversation_id, **queryparams):", "body": "self.conversation_id = conversation_idself.message_id = Nonereturn self._mc_client._get(url=self._build_path(conversation_id, ''), **queryparams)", "docstring": "Get messages from a specific conversation.\n\nThis endpoint does not currently support count and offset, preventing\nit from having the get_all parameter that most all() methods have\n\n:param conversation_id: The unique id for the conversation.\n:type conversation_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = p[\nqueryparams['is_read'] = string\nqueryparams['before_timestamp'] = string\nqueryparams['since_timestamp'] = string", "id": "f282:c0:m2"} {"signature": "def get(self, conversation_id, message_id, **queryparams):", "body": "self.conversation_id = conversation_idself.message_id = message_idreturn self._mc_client._get(url=self._build_path(conversation_id, '', message_id), **queryparams)", "docstring": "Get an individual message in a conversation.\n\n:param conversation_id: The unique id for the conversation.\n:type conversation_id: :py:class:`str`\n:param message_id: The unique id for the conversation message.\n:type message_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f282:c0:m3"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreCustomers, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = Noneself.customer_id = None", "docstring": "Initialize the endpoint", "id": "f283:c0:m0"} {"signature": "def create(self, store_id, data):", "body": "self.store_id = store_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')check_email(data[''])if '' not in data:raise KeyError('')if data[''] not in [True, False]:raise TypeError('')response = self._mc_client._post(url=self._build_path(store_id, ''), data=data)if response is not None:self.customer_id = response['']else:self.customer_id = Nonereturn response", "docstring": "Add a new customer to a store.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"email_address\": string*,\n \"opt_in_status\": boolean*\n}", "id": "f283:c0:m1"} {"signature": "def all(self, store_id, get_all=False, **queryparams):", "body": "self.store_id = store_idself.customer_id = Noneif get_all:return self._iterate(url=self._build_path(store_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(store_id, ''), **queryparams)", "docstring": "Get information about a store\u2019s customers.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['email_address'] = string", "id": "f283:c0:m2"} {"signature": "def get(self, store_id, customer_id, **queryparams):", "body": "self.store_id = store_idself.customer_id = customer_idreturn self._mc_client._get(url=self._build_path(store_id, '', customer_id), **queryparams)", "docstring": "Get information about a specific customer.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param customer_id: The id for the customer of a store.\n:type customer_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f283:c0:m3"} {"signature": "def update(self, store_id, customer_id, data):", "body": "self.store_id = store_idself.customer_id = customer_idreturn self._mc_client._patch(url=self._build_path(store_id, '', customer_id), data=data)", "docstring": "Update a customer.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param customer_id: The id for the customer of a store.\n:type customer_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f283:c0:m4"} {"signature": "def create_or_update(self, store_id, customer_id, data):", "body": "self.store_id = store_idself.customer_id = customer_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')check_email(data[''])if '' not in data:raise KeyError('')if data[''] not in [True, False]:raise TypeError('')return self._mc_client._put(url=self._build_path(store_id, '', customer_id), data=data)", "docstring": "Add or update a customer.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param customer_id: The id for the customer of a store.\n:type customer_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"email_address\": string*,\n \"opt_in_status\": boolean\n}", "id": "f283:c0:m5"} {"signature": "def delete(self, store_id, customer_id):", "body": "self.store_id = store_idself.customer_id = customer_idreturn self._mc_client._delete(url=self._build_path(store_id, '', customer_id))", "docstring": "Delete a customer from a store.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param customer_id: The id for the customer of a store.\n:type customer_id: :py:class:`str`", "id": "f283:c0:m6"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListInterestCategoryInterest, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.category_id = Noneself.interest_id = None", "docstring": "Initialize the endpoint", "id": "f284:c0:m0"} {"signature": "def create(self, list_id, category_id, data):", "body": "self.list_id = list_idself.category_id = category_idif '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(list_id, '', category_id, ''),data=data)if response is not None:self.interest_id = response['']else:self.interest_id = Nonereturn response", "docstring": "Create a new interest or \u2018group name\u2019 for a specific category.\n\nThe documentation lists only the name request body parameter so it is\nbeing documented and error-checked as if it were required based on the\ndescription of the method.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f284:c0:m1"} {"signature": "def all(self, list_id, category_id, get_all=False, **queryparams):", "body": "self.list_id = list_idself.category_id = category_idself.interest_id = Noneif get_all:return self._iterate(url=self._build_path(list_id, '', category_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(list_id, '', category_id, ''),**queryparams)", "docstring": "Get a list of this category\u2019s interests.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f284:c0:m2"} {"signature": "def get(self, list_id, category_id, interest_id, **queryparams):", "body": "self.list_id = list_idself.category_id = category_idself.interest_id = interest_idreturn self._mc_client._get(url=self._build_path(list_id, '', category_id, '', interest_id),**queryparams)", "docstring": "Get interests or \u2018group names\u2019 for a specific category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param interest_id: The specific interest or \u2018group name\u2019.\n:type interest_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f284:c0:m3"} {"signature": "def update(self, list_id, category_id, interest_id, data):", "body": "self.list_id = list_idself.category_id = category_idself.interest_id = interest_idif '' not in data:raise KeyError('')return self._mc_client._patch(url=self._build_path(list_id, '', category_id, '', interest_id),data=data)", "docstring": "Update interests or \u2018group names\u2019 for a specific category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param interest_id: The specific interest or \u2018group name\u2019.\n:type interest_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f284:c0:m4"} {"signature": "def delete(self, list_id, category_id, interest_id):", "body": "self.list_id = list_idself.category_id = category_idself.interest_id = interest_idreturn self._mc_client._delete(url=self._build_path(list_id, '', category_id, '', interest_id))", "docstring": "Delete interests or group names in a specific category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param interest_id: The specific interest or \u2018group name\u2019.\n:type interest_id: :py:class:`str`", "id": "f284:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(CampaignFolders, self).__init__(*args, **kwargs)self.endpoint = ''self.folder_id = None", "docstring": "Initialize the endpoint", "id": "f285:c0:m0"} {"signature": "def create(self, data):", "body": "if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(), data=data)if response is not None:self.folder_id = response['']else:self.folder_id = Nonereturn response", "docstring": "Create a new campaign folder.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f285:c0:m1"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.folder_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get all folders used to organize campaigns.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f285:c0:m2"} {"signature": "def get(self, folder_id, **queryparams):", "body": "self.folder_id = folder_idreturn self._mc_client._get(url=self._build_path(folder_id), **queryparams)", "docstring": "Get information about a specific folder used to organize campaigns.\n\n:param folder_id: The unique id for the campaign folder.\n:type folder_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f285:c0:m3"} {"signature": "def update(self, folder_id, data):", "body": "self.folder_id = folder_idif '' not in data:raise KeyError('')return self._mc_client._patch(url=self._build_path(folder_id), data=data)", "docstring": "Update a specific folder used to organize campaigns.\n\n:param folder_id: The unique id for the campaign folder.\n:type folder_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f285:c0:m4"} {"signature": "def delete(self, folder_id):", "body": "self.folder_id = folder_idreturn self._mc_client._delete(url=self._build_path(folder_id))", "docstring": "Delete a specific campaign folder, and mark all the campaigns in the\nfolder as \u2018unfiled\u2019.\n\n:param folder_id: The unique id for the campaign folder.\n:type folder_id: :py:class:`str`", "id": "f285:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListActivity, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = None", "docstring": "Initialize the endpoint", "id": "f286:c0:m0"} {"signature": "def all(self, list_id, **queryparams):", "body": "self.list_id = list_idreturn self._mc_client._get(url=self._build_path(list_id, ''), **queryparams)", "docstring": "Get up to the previous 180 days of daily detailed aggregated activity\nstats for a list, not including Automation activity.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f286:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListMemberActivity, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.subscriber_hash = None", "docstring": "Initialize the endpoint", "id": "f287:c0:m0"} {"signature": "def all(self, list_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashreturn self._mc_client._get(url=self._build_path(list_id, '', subscriber_hash, ''), **queryparams)", "docstring": "Get the last 50 events of a member\u2019s activity on a specific list,\nincluding opens, clicks, and unsubscribes.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f287:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportCampaignAdvice, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = None", "docstring": "Initialize the endpoint", "id": "f288:c0:m0"} {"signature": "def all(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_idreturn self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get feedback based on a campaign\u2019s statistics. Advice feedback is\nbased on campaign stats like opens, clicks, unsubscribes, bounces, and\nmore.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f288:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(CampaignActions, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = None", "docstring": "Initialize the endpoint", "id": "f289:c0:m0"} {"signature": "def cancel(self, campaign_id):", "body": "self.campaign_id = campaign_idreturn self._mc_client._post(url=self._build_path(campaign_id, ''))", "docstring": "Cancel a Regular or Plain-Text Campaign after you send, before all of\nyour recipients receive it. This feature is included with MailChimp\nPro.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`", "id": "f289:c0:m1"} {"signature": "def pause(self, campaign_id):", "body": "self.campaign_id = campaign_idreturn self._mc_client._post(url=self._build_path(campaign_id, ''))", "docstring": "Pause an RSS-Driven campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`", "id": "f289:c0:m2"} {"signature": "def replicate(self, campaign_id):", "body": "self.campaign_id = campaign_idreturn self._mc_client._post(url=self._build_path(campaign_id, ''))", "docstring": "Replicate a campaign in saved or send status.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`", "id": "f289:c0:m3"} {"signature": "def resume(self, campaign_id):", "body": "self.campaign_id = campaign_idreturn self._mc_client._post(url=self._build_path(campaign_id, ''))", "docstring": "Resume an RSS-Driven campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`", "id": "f289:c0:m4"} {"signature": "def schedule(self, campaign_id, data):", "body": "if not data['']:raise ValueError('')else:if data[''].tzinfo is None:raise ValueError('')else:if data[''].tzinfo.utcoffset(None) != timedelta():raise ValueError('')if data[''].minute not in [, , , ]:raise ValueError('')data[''] = data[''].strftime('')self.campaign_id = campaign_idreturn self._mc_client._post(url=self._build_path(campaign_id, ''), data=data)", "docstring": "Schedule a campaign for delivery. If you\u2019re using Multivariate\nCampaigns to test send times or sending RSS Campaigns, use the send\naction instead.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"schedule_time\": datetime* (A UTC timezone datetime that ends on the quarter hour [:00, :15, :30, or :45])\n}", "id": "f289:c0:m5"} {"signature": "def send(self, campaign_id):", "body": "self.campaign_id = campaign_idreturn self._mc_client._post(url=self._build_path(campaign_id, ''))", "docstring": "Send a MailChimp campaign. For RSS Campaigns, the campaign will send\naccording to its schedule. All other campaigns will send immediately.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`", "id": "f289:c0:m6"} {"signature": "def unschedule(self, campaign_id):", "body": "self.campaign_id = campaign_idreturn self._mc_client._post(url=self._build_path(campaign_id, ''))", "docstring": "Unschedule a scheduled campaign that hasn\u2019t started sending.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`", "id": "f289:c0:m8"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(CampaignContent, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = None", "docstring": "Initialize the endpoint", "id": "f290:c0:m0"} {"signature": "def get(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_idreturn self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get the the HTML and plain-text content for a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f290:c0:m1"} {"signature": "def update(self, campaign_id, data):", "body": "self.campaign_id = campaign_idreturn self._mc_client._put(url=self._build_path(campaign_id, ''), data=data)", "docstring": "Set the content for a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f290:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportClickDetailReports, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = Noneself.link_id = Noneself.members = ReportClickDetailMembers(self)", "docstring": "Initialize the endpoint", "id": "f291:c0:m0"} {"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_idself.link_id = Noneif get_all:return self._iterate(url=self._build_path(campaign_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get information about clicks on specific links in your MailChimp\ncampaigns.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f291:c0:m1"} {"signature": "def get(self, campaign_id, link_id, **queryparams):", "body": "self.campaign_id = campaign_idself.link_id = link_idreturn self._mc_client._get(url=self._build_path(campaign_id, '', link_id), **queryparams)", "docstring": "Get click details for a specific link in a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param link_id: The id for the link.\n:type link_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f291:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreCartLines, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = Noneself.cart_id = Noneself.line_id = None", "docstring": "Initialize the endpoint", "id": "f292:c0:m0"} {"signature": "def create(self, store_id, cart_id, data):", "body": "self.store_id = store_idself.cart_id = cart_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(store_id, '', cart_id, ''), data=data)if response is not None:self.line_id = response['']else:self.line_id = Nonereturn response", "docstring": "Add a new line item to an existing cart.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"product_id\": string*,\n \"product_variant_id\": string*,\n \"quantity\": integer*,\n \"price\": number*\n}", "id": "f292:c0:m1"} {"signature": "def all(self, store_id, cart_id, get_all=False, **queryparams):", "body": "self.store_id = store_idself.cart_id = cart_idself.line_id = Noneif get_all:return self._iterate(url=self._build_path(store_id, '', cart_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(store_id, '', cart_id, ''), **queryparams)", "docstring": "Get information about a cart\u2019s line items.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f292:c0:m2"} {"signature": "def get(self, store_id, cart_id, line_id, **queryparams):", "body": "self.store_id = store_idself.cart_id = cart_idself.line_id = line_idreturn self._mc_client._get(url=self._build_path(store_id, '', cart_id, '', line_id), **queryparams)", "docstring": "Get information about a specific cart line item.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param line_id: The id for the line item of a cart.\n:type line_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f292:c0:m3"} {"signature": "def update(self, store_id, cart_id, line_id, data):", "body": "self.store_id = store_idself.cart_id = cart_idself.line_id = line_idreturn self._mc_client._patch(url=self._build_path(store_id, '', cart_id, '', line_id), data=data)", "docstring": "Update a specific cart line item.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param line_id: The id for the line item of a cart.\n:type line_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f292:c0:m4"} {"signature": "def delete(self, store_id, cart_id, line_id):", "body": "self.store_id = store_idself.cart_id = cart_idself.line_id = line_idreturn self._mc_client._delete(url=self._build_path(store_id, '', cart_id, '', line_id))", "docstring": "Delete a cart.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param line_id: The id for the line item of a cart.\n:type line_id: :py:class:`str`", "id": "f292:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(SearchMembers, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = None", "docstring": "Initialize the endpoint", "id": "f293:c0:m0"} {"signature": "def get(self, **queryparams):", "body": "if '' in queryparams:self.list_id = queryparams['']else:self.list_id = Nonereturn self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Search for list members. This search can be restricted to a specific\nlist, or can be used to search across all lists in an account.\n\n:param queryparams: The query string parameters\nqueryparams['fields'] = array\nqueryparams['exclude_fields'] = array\nqueryparams['query'] = string\nqueryparams['list_id'] = string\nqueryparams['offset'] = integer", "id": "f293:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListSegments, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.segment_id = Noneself.members = ListSegmentMembers(self)", "docstring": "Initialize the endpoint", "id": "f294:c0:m0"} {"signature": "def create(self, list_id, data):", "body": "self.list_id = list_idif '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(list_id, ''), data=data)if response is not None:self.segment_id = response['']else:self.segment_id = Nonereturn response", "docstring": "Create a new segment in a specific list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f294:c0:m1"} {"signature": "def all(self, list_id, get_all=False, **queryparams):", "body": "self.list_id = list_idself.segment_id = Noneif get_all:return self._iterate(url=self._build_path(list_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(list_id, ''), **queryparams)", "docstring": "Get information about all available segments for a specific list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['type'] = string\nqueryparams['before_created_at'] = string\nqueryparams['since_created_at'] = string\nqueryparams['before_updated_at'] = string\nqueryparams['since_updated_at'] = string", "id": "f294:c0:m2"} {"signature": "def get(self, list_id, segment_id, **queryparams):", "body": "self.list_id = list_idself.segment_id = segment_idreturn self._mc_client._get(url=self._build_path(list_id, '', segment_id), **queryparams)", "docstring": "Get information about a specific segment.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param segment_id: The unique id for the segment.\n:type segment_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f294:c0:m3"} {"signature": "def update(self, list_id, segment_id, data):", "body": "self.list_id = list_idself.segment_id = segment_idif '' not in data:raise KeyError('')return self._mc_client._patch(url=self._build_path(list_id, '', segment_id), data=data)", "docstring": "Update a specific segment in a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param segment_id: The unique id for the segment.\n:type segment_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f294:c0:m4"} {"signature": "def update_members(self, list_id, segment_id, data):", "body": "self.list_id = list_idself.segment_id = segment_idreturn self._mc_client._post(url=self._build_path(list_id, '', segment_id), data=data)", "docstring": "Batch add/remove list members to static segment.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param segment_id: The unique id for the segment.\n:type segment_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"members_to_add\": array,\n \"members_to_remove\": array\n}", "id": "f294:c0:m5"} {"signature": "def delete(self, list_id, segment_id):", "body": "self.list_id = list_idself.segment_id = segment_idreturn self._mc_client._delete(url=self._build_path(list_id, '', segment_id))", "docstring": "Delete a specific segment in a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param segment_id: The unique id for the segment.\n:type segment_id: :py:class:`str`", "id": "f294:c0:m6"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreOrders, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = Noneself.order_id = Noneself.lines = StoreOrderLines(self)", "docstring": "Initialize the endpoint", "id": "f295:c0:m0"} {"signature": "def create(self, store_id, data):", "body": "self.store_id = store_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data:raise KeyError('')if not re.match(r\"\", data['']):raise ValueError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')for line in data['']:if '' not in line:raise KeyError('')if '' not in line:raise KeyError('')if '' not in line:raise KeyError('')if '' not in line:raise KeyError('')if '' not in line:raise KeyError('')response = self._mc_client._post(url=self._build_path(store_id, ''), data=data)if response is not None:self.order_id = response['']else:self.order_id = Nonereturn response", "docstring": "Add a new order to a store.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"customer\": object*\n {\n \"'id\": string*\n },\n \"curency_code\": string*,\n \"order_total\": number*,\n \"lines\": array*\n [\n {\n \"id\": string*,\n \"product_id\": string*,\n \"product_variant_id\": string*,\n \"quantity\": integer*,\n \"price\": number*\n }\n ]\n}", "id": "f295:c0:m1"} {"signature": "def all(self, store_id, get_all=False, **queryparams):", "body": "self.store_id = store_idself.order_id = Noneif get_all:return self._iterate(url=self._build_path(store_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(store_id, ''), **queryparams)", "docstring": "Get information about a store\u2019s orders.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['customer_id'] = string", "id": "f295:c0:m2"} {"signature": "def get(self, store_id, order_id, **queryparams):", "body": "self.store_id = store_idself.order_id = order_idreturn self._mc_client._get(url=self._build_path(store_id, '', order_id), **queryparams)", "docstring": "Get information about a specific order.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param order_id: The id for the order in a store.\n:type order_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f295:c0:m3"} {"signature": "def update(self, store_id, order_id, data):", "body": "self.store_id = store_idself.order_id = order_idreturn self._mc_client._patch(url=self._build_path(store_id, '', order_id), data=data)", "docstring": "Update a specific order.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param order_id: The id for the order in a store.\n:type order_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f295:c0:m4"} {"signature": "def delete(self, store_id, order_id):", "body": "self.store_id = store_idself.order_id = order_idreturn self._mc_client._delete(url=self._build_path(store_id, '', order_id))", "docstring": "Delete an order.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param order_id: The id for the order in a store.\n:type order_id: :py:class:`str`", "id": "f295:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(AutomationEmailActions, self).__init__(*args, **kwargs)self.endpoint = ''self.workflow_id = Noneself.email_id = None", "docstring": "Initialize the endpoint", "id": "f296:c0:m0"} {"signature": "def pause(self, workflow_id, email_id):", "body": "self.workflow_id = workflow_idself.email_id = email_idreturn self._mc_client._post(url=self._build_path(workflow_id, '', email_id, ''))", "docstring": "Pause an automated email.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param email_id: The unique id for the Automation workflow email.\n:type email_id: :py:class:`str`", "id": "f296:c0:m1"} {"signature": "def start(self, workflow_id, email_id):", "body": "self.workflow_id = workflow_idself.email_id = email_idreturn self._mc_client._post(url=self._build_path(workflow_id, '', email_id, ''))", "docstring": "Start an automated email.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param email_id: The unique id for the Automation workflow email.\n:type email_id: :py:class:`str`", "id": "f296:c0:m2"} {"signature": "def delete(self, workflow_id, email_id):", "body": "self.workflow_id = workflow_idself.email_id = email_idreturn self._mc_client._delete(url=self._build_path(workflow_id, '', email_id))", "docstring": "Removes an individual Automation workflow email.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param email_id: The unique id for the Automation workflow email.\n:type email_id: :py:class:`str`", "id": "f296:c0:m3"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(StorePromoCodes, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = None", "docstring": "Initialize the Endpoint\n:param args:\n:param kwargs:", "id": "f297:c0:m0"} {"signature": "def create(self, store_id, promo_rule_id, data):", "body": "self.store_id = store_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(store_id, '', promo_rule_id, ''), data=data)if response is not None:return response", "docstring": "Add a new promo code to a store.\n\n:param store_id: The store id\n:type store_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict'\ndata = {\n \"id\": string*,\n \"code\": string*,\n \"redemption_url\": string*,\n \"usage_count\": string,\n \"enabled\": boolean,\n \"created_at_foreign\": string,\n \"updated_at_foreign\": string,\n}", "id": "f297:c0:m1"} {"signature": "def all(self, store_id, promo_rule_id, get_all=False, **queryparams):", "body": "self.store_id=store_idself.promo_rule_id=promo_rule_idif get_all:return self._iterate(url=self._build_path(store_id, '', promo_rule_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(store_id, '', promo_rule_id), **queryparams)", "docstring": "Get information about a store\u2019s promo codes.\n\n:param store_id: The store's id\n:type store_id: `str`\n:param promo_rule_id: The store promo rule id\n:type store_id: `str`\n:param get_all:\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f297:c0:m2"} {"signature": "def get(self, store_id, promo_rule_id, promo_code_id, **queryparams):", "body": "self.store_id = store_idself.promo_rule_id = promo_rule_idself.promo_code_id = promo_code_idreturn self._mc_client._get(url=self._build_path(store_id, '', promo_rule_id, '', promo_code_id), **queryparams)", "docstring": "Get information about a specific promo code.\n\n:param store_id: The store's id\n:type store_id: `string`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f297:c0:m3"} {"signature": "def update(self, store_id, promo_rule_id, promo_code_id, data):", "body": "self.store_id = store_idself.promo_rule_id = promo_rule_idself.promo_code_id = promo_code_idreturn self._mc_client._patch(url=self._build_path(store_id, '', promo_rule_id, '', promo_code_id), data=data)", "docstring": "Update a promo code\n\n:param store_id: The store id\n:type :py:class:`str`\n:param promo_rule_id: The id for the promo rule of a store.\n:type :py:class:`str`\n:param promo_code_id: The id for the promo code of a store.\n:type :py:class:`str`\n:param data:\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string,\n \"title\": string,\n \"description\": string,\n \"starts_at\": string,\n \"ends_at\": string,\n \"amount\": number,\n \"type\": string,\n \"target\": string,\n \"enabled\": boolean,\n \"created_at_foreign\": string,\n \"updated_at_foreign\": string,\n}", "id": "f297:c0:m4"} {"signature": "def delete(self, store_id, promo_rule_id, promo_code_id):", "body": "self.store_id=store_idself.promo_rule_id=promo_rule_idreturn self._mc_client._delete(url=self._build_path(store_id, '', promo_rule_id, '', promo_code_id))", "docstring": "Delete a promo code\n:param store_id: The store id\n:type :py:class:`str`\n:param promo_rule_id: The id for the promo rule of a store.\n:type :py:class:`str`\n:param promo_code_id: The id for the promo code of a store.\n:type :py:class:`str`", "id": "f297:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(Ping, self).__init__(*args, **kwargs)self.endpoint = ''", "docstring": "Initialize the endpoint", "id": "f298:c0:m0"} {"signature": "def get(self):", "body": "return self._mc_client._get(url=self._build_path())", "docstring": "A health check for the API that won\u2019t return any account-specific information.", "id": "f298:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListInterestCategories, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.category_id = Noneself.interests = ListInterestCategoryInterest(self)", "docstring": "Initialize the endpoint", "id": "f299:c0:m0"} {"signature": "def create(self, list_id, data):", "body": "self.list_id = list_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if data[''] not in ['', '', '', '']:raise ValueError('''')response = self._mc_client._post(url=self._build_path(list_id, ''), data=data)if response is not None:self.category_id = response['']else:self.category_id = Nonereturn response", "docstring": "Create a new interest category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"title\": string*,\n \"type\": string* (Must be one of 'checkboxes', 'dropdown', 'radio', or 'hidden')\n}", "id": "f299:c0:m1"} {"signature": "def all(self, list_id, get_all=False, **queryparams):", "body": "self.list_id = list_idself.category_id = Noneif get_all:return self._iterate(url=self._build_path(list_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(list_id, ''), **queryparams)", "docstring": "Get information about a list\u2019s interest categories.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['type'] = string", "id": "f299:c0:m2"} {"signature": "def get(self, list_id, category_id, **queryparams):", "body": "self.list_id = list_idself.category_id = category_idreturn self._mc_client._get(url=self._build_path(list_id, '', category_id), **queryparams)", "docstring": "Get information about a specific interest category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f299:c0:m3"} {"signature": "def update(self, list_id, category_id, data):", "body": "self.list_id = list_idself.category_id = category_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if data[''] not in ['', '', '', '']:raise ValueError('''')return self._mc_client._patch(url=self._build_path(list_id, '', category_id), data=data)", "docstring": "Update a specific interest category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"title\": string*,\n \"type\": string* (Must be one of 'checkboxes', 'dropdown', 'radio', or 'hidden')\n}", "id": "f299:c0:m4"} {"signature": "def delete(self, list_id, category_id):", "body": "self.list_id = list_idself.category_id = category_idreturn self._mc_client._delete(url=self._build_path(list_id, '', category_id))", "docstring": "Delete a specific interest category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`", "id": "f299:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportGoogleAnalytics, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = Noneself.profile_id = None", "docstring": "Initialize the endpoint", "id": "f300:c0:m0"} {"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_idif get_all:return self._iterate(url=self._build_path(campaign_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get a summary of Google Analytics reports for a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f300:c0:m1"} {"signature": "def get(self, campaign_id, profile_id, **queryparams):", "body": "self.campaign_id = campaign_idself.profile_id = profile_idreturn self._mc_client._get(url=self._build_path(campaign_id, '', profile_id), **queryparams)", "docstring": "Get information about a specific Google Analytics report for a campaign.\n\n:param campaign_id: The unique id for the campaign\n:type campaign_id: :py:class:`str`\n:param profile_id: The Google Analytics View ID\n:type campaign_id: :py:class:`str`\n:param queryparams:\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f300:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListMergeFields, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.merge_id = None", "docstring": "Initialize the endpoint", "id": "f301:c0:m0"} {"signature": "def create(self, list_id, data):", "body": "self.list_id = list_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(list_id, ''), data=data)if response is not None:self.merge_id = response['']else:self.merge_id = Nonereturn response", "docstring": "Add a new merge field for a specific list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*,\n \"type\": string*\n}", "id": "f301:c0:m1"} {"signature": "def all(self, list_id, get_all=False, **queryparams):", "body": "self.list_id = list_idself.merge_id = Noneif get_all:return self._iterate(url=self._build_path(list_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(list_id, ''), **queryparams)", "docstring": "Get a list of all merge fields (formerly merge vars) for a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['type'] = string\nqueryparams['required'] = boolean", "id": "f301:c0:m2"} {"signature": "def get(self, list_id, merge_id):", "body": "self.list_id = list_idself.merge_id = merge_idreturn self._mc_client._get(url=self._build_path(list_id, '', merge_id))", "docstring": "Get information about a specific merge field in a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param merge_id: The id for the merge field.\n:type merge_id: :py:class:`str`", "id": "f301:c0:m3"} {"signature": "def update(self, list_id, merge_id, data):", "body": "self.list_id = list_idself.merge_id = merge_idif '' not in data:raise KeyError('')return self._mc_client._patch(url=self._build_path(list_id, '', merge_id), data=data)", "docstring": "Update a specific merge field in a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param merge_id: The id for the merge field.\n:type merge_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f301:c0:m4"} {"signature": "def delete(self, list_id, merge_id):", "body": "self.list_id = list_idself.merge_id = merge_idreturn self._mc_client._delete(url=self._build_path(list_id, '', merge_id))", "docstring": "Delete a specific merge field in a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param merge_id: The id for the merge field.\n:type merge_id: :py:class:`str`", "id": "f301:c0:m5"} {"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_idself.subscriber_hash = Noneif get_all:return self._iterate(url=self._build_path(campaign_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get information about members who have unsubscribed from a specific\ncampaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f302:c0:m1"} {"signature": "def get(self, campaign_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.campaign_id = campaign_idself.subscriber_hash = subscriber_hashreturn self._mc_client._get(url=self._build_path(campaign_id, '', subscriber_hash), **queryparams)", "docstring": "Get information about a specific list member who unsubscribed from a\ncampaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f302:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListMembers, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.subscriber_hash = Noneself.activity = ListMemberActivity(self)self.goals = ListMemberGoals(self)self.notes = ListMemberNotes(self)", "docstring": "Initialize the endpoint", "id": "f303:c0:m0"} {"signature": "def create(self, list_id, data):", "body": "self.list_id = list_idif '' not in data:raise KeyError('')if data[''] not in ['', '', '', '', '']:raise ValueError('''')if '' not in data:raise KeyError('')check_email(data[''])response = self._mc_client._post(url=self._build_path(list_id, ''), data=data)if response is not None:self.subscriber_hash = response['']else:self.subscriber_hash = Nonereturn response", "docstring": "Add a new member to the list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"status\": string*, (Must be one of 'subscribed', 'unsubscribed', 'cleaned',\n 'pending', or 'transactional')\n \"email_address\": string*\n}", "id": "f303:c0:m1"} {"signature": "def all(self, list_id, get_all=False, **queryparams):", "body": "self.list_id = list_idself.subscriber_hash = Noneif get_all:return self._iterate(url=self._build_path(list_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(list_id, ''), **queryparams)", "docstring": "Get information about members in a specific MailChimp list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['email_type'] = string\nqueryparams['status'] = string\nqueryparams['before_timestamp_opt'] = string\nqueryparams['since_timestamp_opt'] = string\nqueryparams['before_last_changed'] = string\nqueryparams['since_last_changed'] = string\nqueryparams['unique_email_id'] = string\nqueryparams['vip_only'] = boolean\nqueryparams['interest_category_id'] = string\nqueryparams['interest_ids'] = string\nqueryparams['interest_match'] = string", "id": "f303:c0:m2"} {"signature": "def get(self, list_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashreturn self._mc_client._get(url=self._build_path(list_id, '', subscriber_hash), **queryparams)", "docstring": "Get information about a specific list member, including a currently\nsubscribed, unsubscribed, or bounced member.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f303:c0:m3"} {"signature": "def update(self, list_id, subscriber_hash, data):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashreturn self._mc_client._patch(url=self._build_path(list_id, '', subscriber_hash), data=data)", "docstring": "Update information for a specific list member.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f303:c0:m4"} {"signature": "def create_or_update(self, list_id, subscriber_hash, data):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashif '' not in data:raise KeyError('')check_email(data[''])if '' not in data:raise KeyError('')if data[''] not in ['', '', '', '', '']:raise ValueError('''')return self._mc_client._put(url=self._build_path(list_id, '', subscriber_hash), data=data)", "docstring": "Add or update a list member.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"email_address\": string*,\n \"status_if_new\": string* (Must be one of 'subscribed',\n 'unsubscribed', 'cleaned', 'pending', or 'transactional')\n}", "id": "f303:c0:m5"} {"signature": "def delete(self, list_id, subscriber_hash):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashreturn self._mc_client._delete(url=self._build_path(list_id, '', subscriber_hash))", "docstring": "Delete a member from a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`", "id": "f303:c0:m6"} {"signature": "def delete_permanent(self, list_id, subscriber_hash):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashreturn self._mc_client._post(url=self._build_path(list_id, '', subscriber_hash, '', ''))", "docstring": "Delete permanently a member from a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`", "id": "f303:c0:m7"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(BatchOperations, self).__init__(*args, **kwargs)self.endpoint = ''self.batch_id = Noneself.operation_status = None", "docstring": "Initialize the endpoint", "id": "f304:c0:m0"} {"signature": "def create(self, data):", "body": "if '' not in data:raise KeyError('')for op in data['']:if '' not in op:raise KeyError('')if op[''] not in ['', '', '', '', '']:raise ValueError(''''.format(op['']))if '' not in op:raise KeyError('')return self._mc_client._post(url=self._build_path(), data=data)", "docstring": "Begin processing a batch operations request.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"operations\": array*\n [\n {\n \"method\": string* (Must be one of \"GET\", \"POST\", \"PUT\", \"PATCH\", or \"DELETE\")\n \"path\": string*,\n }\n ]\n}", "id": "f304:c0:m1"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.batch_id = Noneself.operation_status = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get a summary of batch requests that have been made.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f304:c0:m2"} {"signature": "def get(self, batch_id, **queryparams):", "body": "self.batch_id = batch_idself.operation_status = Nonereturn self._mc_client._get(url=self._build_path(batch_id), **queryparams)", "docstring": "Get the status of a batch request.\n\n:param batch_id: The unique id for the batch operation.\n:type batch_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f304:c0:m3"} {"signature": "def delete(self, batch_id):", "body": "self.batch_id = batch_idself.operation_status = Nonereturn self._mc_client._delete(url=self._build_path(batch_id))", "docstring": "Stops a batch request from running. Since only one batch request is\nrun at a time, this can be used to cancel a long running request. The\nresults of any completed operations will not be available after this\ncall.\n\n:param batch_id: The unique id for the batch operation.\n:type batch_id: :py:class:`str`", "id": "f304:c0:m4"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(FileManagerFolders, self).__init__(*args, **kwargs)self.endpoint = ''self.folder_id = None", "docstring": "Initialize the endpoint", "id": "f305:c0:m0"} {"signature": "def create(self, data):", "body": "if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(), data=data)if response is not None:self.folder_id = response['']else:self.folder_id = Nonereturn response", "docstring": "Create a new folder in the File Manager.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f305:c0:m1"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.folder_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get a list of all folders in the File Manager.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['created_by'] = string\nqueryparams['before_created_at'] = string\nqueryparams['since_created_at'] = string", "id": "f305:c0:m2"} {"signature": "def get(self, folder_id, **queryparams):", "body": "self.folder_id = folder_idreturn self._mc_client._get(url=self._build_path(folder_id), **queryparams)", "docstring": "Get information about a specific folder in the File Manager.\n\n:param folder_id: The unique id for the File Manager folder.\n:type folder_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f305:c0:m3"} {"signature": "def update(self, folder_id, data):", "body": "self.folder_id = folder_idif '' not in data:raise KeyError('')return self._mc_client._patch(url=self._build_path(folder_id), data=data)", "docstring": "Update a specific File Manager file.\n\n:param folder_id: The unique id for the File Manager folder.\n:type folder_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*\n}", "id": "f305:c0:m4"} {"signature": "def delete(self, folder_id):", "body": "self.folder_id = folder_idreturn self._mc_client._delete(url=self._build_path(folder_id))", "docstring": "Delete a specific folder in the File Manager.\n\n:param folder_id: The unique id for the File Manager folder.\n:type folder_id: :py:class:`str`", "id": "f305:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListMemberNotes, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.subscriber_hash = Noneself.note_id = None", "docstring": "Initialize the endpoint", "id": "f306:c0:m0"} {"signature": "def create(self, list_id, subscriber_hash, data):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashif '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(list_id, '', subscriber_hash, ''), data=data)if response is not None:self.note_id = response['']else:self.note_id = Nonereturn response", "docstring": "Add a new note for a specific subscriber.\n\nThe documentation lists only the note request body parameter so it is\nbeing documented and error-checked as if it were required based on the\ndescription of the method.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"note\": string*\n}", "id": "f306:c0:m1"} {"signature": "def all(self, list_id, subscriber_hash, get_all=False, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashself.note_id = Noneif get_all:return self._iterate(url=self._build_path(list_id, '', subscriber_hash, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(list_id, '', subscriber_hash, ''), **queryparams)", "docstring": "Get recent notes for a specific list member.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f306:c0:m2"} {"signature": "def get(self, list_id, subscriber_hash, note_id, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashself.note_id = note_idreturn self._mc_client._get(url=self._build_path(list_id, '', subscriber_hash, '', note_id),**queryparams)", "docstring": "Get a specific note for a specific list member.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param note_id: The id for the note.\n:type note_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f306:c0:m3"} {"signature": "def update(self, list_id, subscriber_hash, note_id, data):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashself.note_id = note_idif '' not in data:raise KeyError('')return self._mc_client._patch(url=self._build_path(list_id, '', subscriber_hash, '', note_id),data=data)", "docstring": "Update a specific note for a specific list member.\n\nThe documentation lists only the note request body parameter so it is\nbeing documented and error-checked as if it were required based on the\ndescription of the method.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param note_id: The id for the note.\n:type note_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"note\": string*\n}", "id": "f306:c0:m4"} {"signature": "def delete(self, list_id, subscriber_hash, note_id):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashself.note_id = note_idreturn self._mc_client._delete(url=self._build_path(list_id, '', subscriber_hash, '', note_id))", "docstring": "Delete a specific note for a specific list member.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param note_id: The id for the note.\n:type note_id: :py:class:`str`", "id": "f306:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(StorePromoRules, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = None", "docstring": "Initialize the Endpoint\n:param args:\n:param kwargs:", "id": "f307:c0:m0"} {"signature": "def create(self, store_id, data):", "body": "self.store_id = store_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(store_id, ''), data=data)if response is not None:return response", "docstring": "Add new promo rule to a store\n\n:param store_id: The store id\n:type store_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict'\ndata = {\n \"id\": string*,\n \"title\": string,\n \"description\": string*,\n \"starts_at\": string,\n \"ends_at\": string,\n \"amount\": number*,\n \"type\": string*,\n \"target\": string*,\n \"enabled\": boolean,\n \"created_at_foreign\": string,\n \"updated_at_foreign\": string,\n}", "id": "f307:c0:m1"} {"signature": "def all(self, store_id, get_all=False, **queryparams):", "body": "self.store_id = store_idif get_all:return self._iterate(url=self._build_path(store_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(store_id, ''), **queryparams)", "docstring": "Get information about a store\u2019s promo rules.\n\n:param store_id: The store's id\n:type store_id: `str`\n:param get_all:\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f307:c0:m2"} {"signature": "def get(self, store_id, promo_rule_id, **queryparams):", "body": "self.store_id = store_idself.promo_rule_id = promo_rule_idreturn self._mc_client._get(url=self._build_path(store_id, '', promo_rule_id), **queryparams)", "docstring": "Get information about a specific promo rule.\n\n:param store_id: The store's id\n:type store_id: `string`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f307:c0:m3"} {"signature": "def update(self, store_id, promo_rule_id, data):", "body": "self.store_id = store_idself.promo_rule_id = promo_rule_idreturn self._mc_client._patch(url=self._build_path(store_id, '', promo_rule_id), data=data)", "docstring": "Update a promo rule\n\n:param store_id: The store id\n:type :py:class:`str`\n:param promo_rule_id: The id for the promo rule of a store.\n:type :py:class:`str`\n:param data:\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string,\n \"title\": string,\n \"description\": string,\n \"starts_at\": string,\n \"ends_at\": string,\n \"amount\": number,\n \"type\": string,\n \"target\": string,\n \"enabled\": boolean,\n \"created_at_foreign\": string,\n \"updated_at_foreign\": string,\n}", "id": "f307:c0:m4"} {"signature": "def delete(self, store_id, promo_rule_id):", "body": "self.store_id=store_idself.promo_rule_id=promo_rule_idreturn self._mc_client._delete(url=self._build_path(store_id, '', promo_rule_id))", "docstring": "Delete a promo rule\n:param store_id: The store id\n:type :py:class:`str`\n:param promo_rule_id: The id for the promo rule of a store.\n:type :py:class:`str`", "id": "f307:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreProductImages, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = Noneself.product_id = Noneself.image_id = None", "docstring": "Initialize the endpoint", "id": "f308:c0:m0"} {"signature": "def create(self, store_id, product_id, data):", "body": "self.store_id = store_idself.product_id = product_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(store_id, '', product_id, ''), data=data)if response is not None:self.image_id = response['']else:self.image_id = Nonereturn response", "docstring": "Add a new image to the product.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"url\": string*\n}", "id": "f308:c0:m1"} {"signature": "def all(self, store_id, product_id, get_all=False, **queryparams):", "body": "self.store_id = store_idself.product_id = product_idself.image_id = Noneif get_all:return self._iterate(url=self._build_path(store_id, '', product_id, ''), **queryparams)else:return self._mc_client._post(url=self._build_path(store_id, '', product_id, ''), **queryparams)", "docstring": "Get information about a product\u2019s images.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f308:c0:m2"} {"signature": "def get(self, store_id, product_id, image_id, **queryparams):", "body": "self.store_id = store_idself.product_id = product_idself.image_id = image_idreturn self._mc_client._post(url=self._build_path(store_id, '', product_id, '', image_id),**queryparams)", "docstring": "Get information about a specific product image.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param image_id: The id for the product image.\n:type image_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f308:c0:m3"} {"signature": "def update(self, store_id, product_id, image_id, data):", "body": "self.store_id = store_idself.product_id = product_idself.image_id = image_idreturn self._mc_client._patch(url=self._build_path(store_id, '', product_id, '', image_id),data=data)", "docstring": "Update a product image.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param image_id: The id for the product image.\n:type image_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f308:c0:m4"} {"signature": "def delete(self, store_id, product_id, image_id):", "body": "self.store_id = store_idself.product_id = product_idself.image_id = image_idreturn self._mc_client._delete(url=self._build_path(store_id, '', product_id, '', image_id))", "docstring": "Delete a product image.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param image_id: The id for the product image.\n:type image_id: :py:class:`str`", "id": "f308:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListMemberGoals, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.subscriber_hash = None", "docstring": "Initialize the endpoint", "id": "f309:c0:m0"} {"signature": "def all(self, list_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.list_id = list_idself.subscriber_hash = subscriber_hashreturn self._mc_client._get(url=self._build_path(list_id, '', subscriber_hash, ''), **queryparams)", "docstring": "Get the last 50 Goal events for a member on a specific list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f309:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreProductVariants, self).__init__(*args, **kwargs)self.endpoint = ''self.store_id = Noneself.product_id = Noneself.variant_id = None", "docstring": "Initialize the endpoint", "id": "f310:c0:m0"} {"signature": "def create(self, store_id, product_id, data):", "body": "self.store_id = store_idself.product_id = product_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')response = self._mc_client._post(url=self._build_path(store_id, '', product_id, ''), data=data)if response is not None:self.variant_id = response['']else:self.variant_id = Nonereturn response", "docstring": "Add a new variant to the product.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"title\": string*\n}", "id": "f310:c0:m1"} {"signature": "def all(self, store_id, product_id, get_all=False, **queryparams):", "body": "self.store_id = store_idself.product_id = product_idself.variant_id = Noneif get_all:return self._iterate(url=self._build_path(store_id, '', product_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(store_id, '', product_id, ''),**queryparams)", "docstring": "Get information about a product\u2019s variants.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f310:c0:m2"} {"signature": "def get(self, store_id, product_id, variant_id, **queryparams):", "body": "self.store_id = store_idself.product_id = product_idself.variant_id = variant_idreturn self._mc_client._get(url=self._build_path(store_id, '', product_id, '', variant_id),**queryparams)", "docstring": "Get information about a specific product variant.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param variant_id: The id for the product variant.\n:type variant_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f310:c0:m3"} {"signature": "def update(self, store_id, product_id, variant_id, data):", "body": "self.store_id = store_idself.product_id = product_idself.variant_id = variant_idreturn self._mc_client._patch(url=self._build_path(store_id, '', product_id, '', variant_id),data=data)", "docstring": "Update a product variant.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param variant_id: The id for the product variant.\n:type variant_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f310:c0:m4"} {"signature": "def create_or_update(self, store_id, product_id, variant_id, data):", "body": "self.store_id = store_idself.product_id = product_idself.variant_id = variant_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')return self._mc_client._put(url=self._build_path(store_id, '', product_id, '', variant_id),data=data)", "docstring": "Add or update a product variant.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param variant_id: The id for the product variant.\n:type variant_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"id\": string*,\n \"title\": string*\n}", "id": "f310:c0:m5"} {"signature": "def delete(self, store_id, product_id, variant_id):", "body": "self.store_id = store_idself.product_id = product_idself.variant_id = variant_idreturn self._mc_client._delete(url=self._build_path(store_id, '', product_id, '', variant_id))", "docstring": "Delete a product variant.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param variant_id: The id for the product variant.\n:type variant_id: :py:class:`str`", "id": "f310:c0:m6"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(Root, self).__init__(*args, **kwargs)self.endpoint = ''", "docstring": "Initialize the endpoint", "id": "f311:c0:m0"} {"signature": "def get(self, **queryparams):", "body": "return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get links to all other resources available in the API.\n\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f311:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportSentTo, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = Noneself.subscriber_hash = None", "docstring": "Initialize the endpoint", "id": "f312:c0:m0"} {"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_idself.subscriber_hash = Noneif get_all:return self._iterate(url=self._build_path(campaign_id, ''), **queryparams)else:return self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Get information about campaign recipients.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f312:c0:m1"} {"signature": "def get(self, campaign_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)self.campaign_id = campaign_idself.subscriber_hash = subscriber_hashreturn self._mc_client._get(url=self._build_path(campaign_id, '', subscriber_hash), **queryparams)", "docstring": "Get information about a specific campaign recipient.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f312:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(Lists, self).__init__(*args, **kwargs)self.endpoint = ''self.list_id = Noneself.abuse_reports = ListAbuseReports(self)self.activity = ListActivity(self)self.clients = ListClients(self)self.growth_history = ListGrowthHistory(self)self.interest_categories = ListInterestCategories(self)self.members = ListMembers(self)self.merge_fields = ListMergeFields(self)self.segments = ListSegments(self)self.signup_forms = ListSignupForms(self)self.webhooks = ListWebhooks(self)", "docstring": "Initialize the endpoint", "id": "f313:c0:m0"} {"signature": "def create(self, data):", "body": "if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')check_email(data[''][''])if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data:raise KeyError('')if data[''] not in [True, False]:raise TypeError('')response = self._mc_client._post(url=self._build_path(), data=data)if response is not None:self.list_id = response['']else:self.list_id = Nonereturn response", "docstring": "Create a new list in your MailChimp account.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*,\n \"contact\": object*\n {\n \"company\": string*,\n \"address1\": string*,\n \"city\": string*,\n \"state\": string*,\n \"zip\": string*,\n \"country\": string*\n },\n \"permission_reminder\": string*,\n \"campaign_defaults\": object*\n {\n \"from_name\": string*,\n \"from_email\": string*,\n \"subject\": string*,\n \"language\": string*\n },\n \"email_type_option\": boolean\n}", "id": "f313:c0:m1"} {"signature": "def update_members(self, list_id, data):", "body": "self.list_id = list_idif '' not in data:raise KeyError('')else:if not len(data['']) <= :raise ValueError('')for member in data['']:if '' not in member:raise KeyError('')check_email(member[''])if '' not in member and '' not in member:raise KeyError('')valid_statuses = ['', '', '', '']if '' in member and member[''] not in valid_statuses:raise ValueError('''')if '' in member and member[''] not in valid_statuses:raise ValueError('''')if '' not in data:data[''] = Falsereturn self._mc_client._post(url=self._build_path(list_id), data=data)", "docstring": "Batch subscribe or unsubscribe list members.\n\nOnly the members array is required in the request body parameters.\nWithin the members array, each member requires an email_address\nand either a status or status_if_new. The update_existing parameter\nwill also be considered required to help prevent accidental updates\nto existing members and will default to false if not present.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"members\": array*\n [\n {\n \"email_address\": string*,\n \"status\": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending'),\n \"status_if_new\": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending')\n }\n ],\n \"update_existing\": boolean*\n}", "id": "f313:c0:m2"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.list_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get information about all lists in the account.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['before_date_created'] = string\nqueryparams['since_date_created'] = string\nqueryparams['before_campaign_last_sent'] = string\nqueryparams['since_campaign_last_sent'] = string\nqueryparams['email'] = string\nqueryparams['sort_field'] = string (Must be 'date_created')\nqueryparams['sort_dir'] = string (Must be one of 'ASC' or 'DESC')", "id": "f313:c0:m3"} {"signature": "def get(self, list_id, **queryparams):", "body": "self.list_id = list_idreturn self._mc_client._get(url=self._build_path(list_id), **queryparams)", "docstring": "Get information about a specific list in your MailChimp account.\nResults include list members who have signed up but haven\u2019t confirmed\ntheir subscription yet and unsubscribed or cleaned.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f313:c0:m4"} {"signature": "def update(self, list_id, data):", "body": "self.list_id = list_idif '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')check_email(data[''][''])if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data:raise KeyError('')if data[''] not in [True, False]:raise TypeError('')return self._mc_client._patch(url=self._build_path(list_id), data=data)", "docstring": "Update the settings for a specific list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"name\": string*,\n \"contact\": object*\n {\n \"company\": string*,\n \"address1\": string*,\n \"city\": string*,\n \"state\": string*,\n \"zip\": string*,\n \"country\": string*\n },\n \"permission_reminder\": string*,\n \"campaign_defaults\": object*\n {\n \"from_name\": string*,\n \"from_email\": string*,\n \"subject\": string*,\n \"language\": string*\n },\n \"email_type_option\": boolean\n}", "id": "f313:c0:m5"} {"signature": "def delete(self, list_id):", "body": "self.list_id = list_idreturn self._mc_client._delete(url=self._build_path(list_id))", "docstring": "Delete a list from your MailChimp account. If you delete a list,\nyou\u2019ll lose the list history\u2014including subscriber activity,\nunsubscribes, complaints, and bounces. You\u2019ll also lose subscribers\u2019\nemail addresses, unless you exported and backed up your list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`", "id": "f313:c0:m6"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(CampaignSendChecklist, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = None", "docstring": "Initialize the endpoint", "id": "f314:c0:m0"} {"signature": "def get(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_idreturn self._mc_client._get(url=self._build_path(campaign_id, ''), **queryparams)", "docstring": "Review the send checklist for a campaign, and resolve any issues\nbefore sending.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f314:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(Campaigns, self).__init__(*args, **kwargs)self.endpoint = ''self.campaign_id = Noneself.actions = CampaignActions(self)self.content = CampaignContent(self)self.feedback = CampaignFeedback(self)self.send_checklist = CampaignSendChecklist(self)", "docstring": "Initialize the endpoint", "id": "f315:c0:m0"} {"signature": "def create(self, data):", "body": "if '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')check_email(data[''][''])if '' not in data:raise KeyError('')if not data[''] in ['', '', '', '', '']:raise ValueError('')if data[''] == '':if '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if data[''][''] not in ['', '', '', '']:raise ValueError('''')if data[''] == '':if '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if not data[''][''] in ['', '', '']:raise ValueError('')response = self._mc_client._post(url=self._build_path(), data=data)if response is not None:self.campaign_id = response['']else:self.campaign_id = Nonereturn response", "docstring": "Create a new MailChimp campaign.\n\nThe ValueError raised by an invalid type in data does not mention\n'absplit' as a potential value because the documentation indicates\nthat the absplit type has been deprecated.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"recipients\": object*\n {\n \"list_id\": string*\n },\n \"settings\": object*\n {\n \"subject_line\": string*,\n \"from_name\": string*,\n \"reply_to\": string*\n },\n \"variate_settings\": object* (Required if type is \"variate\")\n {\n \"winner_criteria\": string* (Must be one of \"opens\", \"clicks\", \"total_revenue\", or \"manual\")\n },\n \"rss_opts\": object* (Required if type is \"rss\")\n {\n \"feed_url\": string*,\n \"frequency\": string* (Must be one of \"daily\", \"weekly\", or \"monthly\")\n },\n \"type\": string* (Must be one of \"regular\", \"plaintext\", \"rss\", \"variate\", or \"absplit\")\n}", "id": "f315:c0:m1"} {"signature": "def all(self, get_all=False, **queryparams):", "body": "self.campaign_id = Noneif get_all:return self._iterate(url=self._build_path(), **queryparams)else:return self._mc_client._get(url=self._build_path(), **queryparams)", "docstring": "Get all campaigns in an account.\n\n.. note::\n The before_create_time, since_create_time, before_send_time, and\n since_send_time queryparams expect times to be listed in the ISO\n 8601 format in UTC (ex. 2015-10-21T15:41:36+00:00).\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['type'] = []\nqueryparams['status'] = []\nqueryparams['before_send_time'] = string\nqueryparams['since_send_time'] = string\nqueryparams['before_create_time'] = string\nqueryparams['since_create_time'] = string\nqueryparams['list_id'] = string\nqueryparams['folder_id'] = string\nqueryparams['sort_field'] = string\nqueryparams['sort_dir'] = string", "id": "f315:c0:m2"} {"signature": "def get(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_idreturn self._mc_client._get(url=self._build_path(campaign_id), **queryparams)", "docstring": "Get information about a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['sort_field'] = string\nqueryparams['create_time'] = string", "id": "f315:c0:m3"} {"signature": "def update(self, campaign_id, data):", "body": "self.campaign_id = campaign_idif '' not in data:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')if '' not in data['']:raise KeyError('')check_email(data[''][''])return self._mc_client._patch(url=self._build_path(campaign_id), data=data)", "docstring": "Update some or all of the settings for a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"settings\": object*\n {\n \"subject_line\": string*,\n \"from_name\": string*,\n \"reply_to\": string*\n },\n}", "id": "f315:c0:m4"} {"signature": "def delete(self, campaign_id):", "body": "self.campaign_id = campaign_idreturn self._mc_client._delete(url=self._build_path(campaign_id))", "docstring": "Remove a campaign from your MailChimp account.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`", "id": "f315:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(AutomationRemovedSubscribers, self).__init__(*args, **kwargs)self.endpoint = ''self.workflow_id = None", "docstring": "Initialize the endpoint", "id": "f316:c0:m0"} {"signature": "def create(self, workflow_id, data):", "body": "self.workflow_id = workflow_idif '' not in data:raise KeyError('')check_email(data[''])return self._mc_client._post(url=self._build_path(workflow_id, ''), data=data)", "docstring": "Remove a subscriber from a specific Automation workflow. You can\nremove a subscriber at any point in an Automation workflow, regardless\nof how many emails they\u2019ve been sent from that workflow. Once they\u2019re\nremoved, they can never be added back to the same workflow.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n \"email_address\": string*\n}", "id": "f316:c0:m1"} {"signature": "def all(self, workflow_id):", "body": "self.workflow_id = workflow_idreturn self._mc_client._get(url=self._build_path(workflow_id, ''))", "docstring": "Get information about subscribers who were removed from an Automation\nworkflow.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`", "id": "f316:c0:m2"} {"signature": "def __init__(self, mc_api=None, mc_user='', access_token=None, enabled=True, timeout=None,request_hooks=None, request_headers=None):", "body": "super(MailChimpClient, self).__init__()self.enabled = enabledself.timeout = timeoutif access_token:self.auth = MailChimpOAuth(access_token)self.base_url = self.auth.get_base_url() + ''elif mc_api:if not re.match(r\"\", mc_api.split('')[]):raise ValueError('''')self.auth = HTTPBasicAuth(mc_user, mc_api)datacenter = mc_api.split('').pop()self.base_url = ''.format(datacenter)else:raise Exception('')self.request_headers = request_headers or requests.utils.default_headers()self.request_hooks = request_hooks or requests.hooks.default_hooks()", "docstring": "Initialize the class with your optional user_id and required api_key.\n\nIf `enabled` is not True, these methods become no-ops. This is\nparticularly useful for testing or disabling with configuration.\n\n:param mc_api: Mailchimp API key\n:type mc_api: :py:class:`str`\n:param mc_user: Mailchimp user id\n:type mc_user: :py:class:`str`\n:param access_token: The OAuth access token\n:type access_token: :py:class:`str`\n:param enabled: Whether the API should execute any requests\n:type enabled: :py:class:`bool`\n:param timeout: (optional) How long to wait for the server to send\n data before giving up, as a float, or a :ref:`(connect timeout,\n read timeout) ` tuple.\n:type timeout: float or tuple\n:param request_hooks: (optional) Hooks for :py:func:`requests.requests`.\n:type request_hooks: :py:class:`dict`\n:param request_headers: (optional) Headers for\n :py:func:`requests.requests`.\n:type request_headers: :py:class:`dict`", "id": "f317:c1:m0"} {"signature": "@_enabled_or_noopdef _post(self, url, data=None):", "body": "url = urljoin(self.base_url, url)try:r = self._make_request(**dict(method='',url=url,json=data,auth=self.auth,timeout=self.timeout,hooks=self.request_hooks,headers=self.request_headers))except requests.exceptions.RequestException as e:raise eelse:if r.status_code >= :try:error_data = r.json()except ValueError:error_data = { \"\": r }raise MailChimpError(error_data)if r.status_code == :return Nonereturn r.json()", "docstring": "Handle authenticated POST requests\n\n:param url: The url for the endpoint including path parameters\n:type url: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:data:`none` or :py:class:`dict`\n:returns: The JSON output from the API or an error message", "id": "f317:c1:m2"} {"signature": "@_enabled_or_noopdef _get(self, url, **queryparams):", "body": "url = urljoin(self.base_url, url)if len(queryparams):url += '' + urlencode(queryparams)try:r = self._make_request(**dict(method='',url=url,auth=self.auth,timeout=self.timeout,hooks=self.request_hooks,headers=self.request_headers))except requests.exceptions.RequestException as e:raise eelse:if r.status_code >= :raise MailChimpError(r.json())return r.json()", "docstring": "Handle authenticated GET requests\n\n:param url: The url for the endpoint including path parameters\n:type url: :py:class:`str`\n:param queryparams: The query string parameters\n:returns: The JSON output from the API", "id": "f317:c1:m3"} {"signature": "@_enabled_or_noopdef _delete(self, url):", "body": "url = urljoin(self.base_url, url)try:r = self._make_request(**dict(method='',url=url,auth=self.auth,timeout=self.timeout,hooks=self.request_hooks,headers=self.request_headers))except requests.exceptions.RequestException as e:raise eelse:if r.status_code >= :raise MailChimpError(r.json())if r.status_code == :returnreturn r.json()", "docstring": "Handle authenticated DELETE requests\n\n:param url: The url for the endpoint including path parameters\n:type url: :py:class:`str`\n:returns: The JSON output from the API", "id": "f317:c1:m4"} {"signature": "@_enabled_or_noopdef _patch(self, url, data=None):", "body": "url = urljoin(self.base_url, url)try:r = self._make_request(**dict(method='',url=url,json=data,auth=self.auth,timeout=self.timeout,hooks=self.request_hooks,headers=self.request_headers))except requests.exceptions.RequestException as e:raise eelse:if r.status_code >= :raise MailChimpError(r.json())return r.json()", "docstring": "Handle authenticated PATCH requests\n\n:param url: The url for the endpoint including path parameters\n:type url: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:data:`none` or :py:class:`dict`\n:returns: The JSON output from the API", "id": "f317:c1:m5"} {"signature": "@_enabled_or_noopdef _put(self, url, data=None):", "body": "url = urljoin(self.base_url, url)try:r = self._make_request(**dict(method='',url=url,json=data,auth=self.auth,timeout=self.timeout,hooks=self.request_hooks,headers=self.request_headers))except requests.exceptions.RequestException as e:raise eelse:if r.status_code >= :try:error_data = r.json()except ValueError:error_data = { \"\": r }raise MailChimpError(error_data)return r.json()", "docstring": "Handle authenticated PUT requests\n\n:param url: The url for the endpoint including path parameters\n:type url: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:data:`none` or :py:class:`dict`\n:returns: The JSON output from the API", "id": "f317:c1:m6"} {"signature": "def __init__(self, access_token):", "body": "self._access_token = access_token", "docstring": "Initialize the OAuth and save the access token\n\n:param access_token: The access token provided by OAuth authentication\n:type access_token: :py:class:`str`", "id": "f317:c2:m0"} {"signature": "def __call__(self, r):", "body": "r.headers[''] = '' + self._access_tokenreturn r", "docstring": "Authorize with the access token provided in __init__", "id": "f317:c2:m1"} {"signature": "def get_metadata(self):", "body": "try:r = requests.get('', auth=self)except requests.exceptions.RequestException as e:raise eelse:r.raise_for_status()output = r.json()if '' in output:raise requests.exceptions.RequestException(output[''])return output", "docstring": "Get the metadata returned after authentication", "id": "f317:c2:m2"} {"signature": "def get_base_url(self):", "body": "try:return self.get_metadata()['']except requests.exceptions.RequestException:raise", "docstring": "Get the base_url from the authentication metadata", "id": "f317:c2:m3"} {"signature": "def create_local_copy(cookie_file):", "body": "if isinstance(cookie_file, list):cookie_file = cookie_file[]if os.path.exists(cookie_file):tmp_cookie_file = tempfile.NamedTemporaryFile(suffix='').nameopen(tmp_cookie_file, '').write(open(cookie_file, '').read())return tmp_cookie_fileelse:raise BrowserCookieError('' + cookie_file)", "docstring": "Make a local copy of the sqlite cookie database and return the new filename.\n This is necessary in case this database is still being written to while the user browses\n to avoid sqlite locking errors.", "id": "f320:m0"} {"signature": "def create_cookie(host, path, secure, expires, name, value):", "body": "return http.cookiejar.Cookie(, name, value, None, False, host, host.startswith(''), host.startswith(''), path,True, secure, expires, False, None, None, {})", "docstring": "Shortcut function to create a cookie", "id": "f320:m3"} {"signature": "def chrome(cookie_file=None, domain_name=\"\"):", "body": "return Chrome(cookie_file, domain_name).load()", "docstring": "Returns a cookiejar of the cookies used by Chrome. Optionally pass in a\n domain name to only load cookies from the specified domain", "id": "f320:m4"} {"signature": "def firefox(cookie_file=None, domain_name=\"\"):", "body": "return Firefox(cookie_file, domain_name).load()", "docstring": "Returns a cookiejar of the cookies and sessions used by Firefox. Optionally\n pass in a domain name to only load cookies from the specified domain", "id": "f320:m5"} {"signature": "def load(domain_name=\"\"):", "body": "cj = http.cookiejar.CookieJar()for cookie_fn in [chrome, firefox]:try:for cookie in cookie_fn(domain_name=domain_name):cj.set_cookie(cookie)except BrowserCookieError:passreturn cj", "docstring": "Try to load cookies from all supported browsers and return combined cookiejar\n Optionally pass in a domain name to only load cookies from the specified domain", "id": "f320:m6"} {"signature": "def load(self):", "body": "con = sqlite3.connect(self.tmp_cookie_file)cur = con.cursor()try:cur.execute(''''.format(self.domain_name))except sqlite3.OperationalError:cur.execute(''''.format(self.domain_name))cj = http.cookiejar.CookieJar()for item in cur.fetchall():host, path, secure, expires, name = item[:]value = self._decrypt(item[], item[])c = create_cookie(host, path, secure, expires, name, value)cj.set_cookie(c)con.close()return cj", "docstring": "Load sqlite cookies into a cookiejar", "id": "f320:c1:m3"} {"signature": "def _decrypt(self, value, encrypted_value):", "body": "if sys.platform == '':return self._decrypt_windows_chrome(value, encrypted_value)if value or (encrypted_value[:] != b''):return valueencrypted_value = encrypted_value[:]encrypted_value_half_len = int(len(encrypted_value) / )cipher = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(self.key, self.iv))decrypted = cipher.feed(encrypted_value[:encrypted_value_half_len])decrypted += cipher.feed(encrypted_value[encrypted_value_half_len:])decrypted += cipher.feed()return decrypted.decode(\"\")", "docstring": "Decrypt encoded cookies", "id": "f320:c1:m5"} {"signature": "def _is_root():", "body": "import osimport ctypestry:return os.geteuid() == except AttributeError:return ctypes.windll.shell32.IsUserAnAdmin() != return False", "docstring": "Checks if the user is rooted.", "id": "f322:m0"} {"signature": "def cmdclass(path, enable=None, user=None):", "body": "import warningsfrom setuptools.command.install import installfrom setuptools.command.develop import developfrom os.path import dirname, join, exists, realpathfrom traceback import extract_stacktry:from notebook.nbextensions import install_nbextensionfrom notebook.services.config import ConfigManagerexcept ImportError:try:from IPython.html.nbextensions import install_nbextensionfrom IPython.html.services.config import ConfigManagerexcept ImportError:warnings.warn(\"\"\"\"\"\"\"\")return {}if user is None:user = not _is_root()calling_file = extract_stack()[-][]fullpath = realpath(calling_file)if not exists(fullpath):raise Exception('')extension_dir = join(dirname(fullpath), path)def run_nbextension_install(develop):import syssysprefix = hasattr(sys, '')if sysprefix:install_nbextension(extension_dir, symlink=develop, sys_prefix=sysprefix)else:install_nbextension(extension_dir, symlink=develop, user=user)if enable is not None:print(\"\")cm = ConfigManager()cm.update('', {\"\": {enable: True}})class InstallCommand(install):def run(self):print(\"\")install.run(self)print(\"\")run_nbextension_install(False)class DevelopCommand(develop):def run(self):print(\"\")develop.run(self)print(\"\")run_nbextension_install(True)return {'': InstallCommand,'': DevelopCommand,}", "docstring": "Build nbextension cmdclass dict for the setuptools.setup method.\n\n Parameters\n ----------\n path: str\n Directory relative to the setup file that the nbextension code\n lives in.\n enable: [str=None]\n Extension to \"enable\". Enabling an extension causes it to be loaded\n automatically by the IPython notebook.\n user: [bool=None]\n Whether or not the nbextension should be installed in user mode.\n If this is undefined, the script will install as user mode IF the\n installer is not sudo.\n\n Usage\n -----\n For automatic loading:\n # Assuming `./extension` is the relative path to the JS files and\n # `./extension/main.js` is the file that you want automatically loaded.\n setup(\n name='extension',\n ...\n cmdclass=cmdclass('extension', 'extension/main'),\n )\n\n For manual loading:\n # Assuming `./extension` is the relative path to the JS files.\n setup(\n name='extension',\n ...\n cmdclass=cmdclass('extension'),\n )", "id": "f322:m1"} {"signature": "def dependency_sorted(containers):", "body": "if not isinstance(containers, collections.Mapping):containers = dict((c.name, c) for c in containers)container_links = dict((name, set(c.links.keys()))for name, c in containers.items())sorted_names = _resolve(container_links)return [containers[name] for name in sorted_names]", "docstring": "Sort a dictionary or list of containers into dependency order\n\n Returns a sequence", "id": "f325:m2"} {"signature": "@staticmethoddef from_dict(name, values):", "body": "count = count_value = values.get('', )if isinstance(count_value, int):count = max(count_value, )def with_index(name, idx):if name and idx:return '' % (name, idx)return namedef get_instance(n, idx=None):return BlockadeContainerConfig(with_index(n, idx),values[''],command=values.get(''),links=values.get(''),volumes=values.get(''),publish_ports=values.get(''),expose_ports=values.get(''),environment=values.get(''),hostname=values.get(''),dns=values.get(''),start_delay=values.get('', ),neutral=values.get('', False),holy=values.get('', False),container_name=with_index(values.get(''), idx),cap_add=values.get(''))if count == :yield get_instance(name)else:for idx in range(, count+):yield get_instance(name, idx)", "docstring": "Convert a dictionary of configuration values\ninto a sequence of BlockadeContainerConfig instances", "id": "f325:c0:m0"} {"signature": "@staticmethoddef from_dict(values):", "body": "try:containers = values['']parsed_containers = {}for name, container_dict in containers.items():try:for cnt in BlockadeContainerConfig.from_dict(name, container_dict):if cnt.container_name:cname = cnt.container_nameexisting = [c for c in parsed_containers.values() if c.container_name == cname]if existing:raise BlockadeConfigError(\"\" % (cname))parsed_containers[cnt.name] = cntexcept Exception as err:raise BlockadeConfigError(\"\" % (name, err))network = values.get('')if network:defaults = _DEFAULT_NETWORK_CONFIG.copy()defaults.update(network)network = defaultselse:network = _DEFAULT_NETWORK_CONFIG.copy()return BlockadeConfig(parsed_containers, network=network)except KeyError as err:raise BlockadeConfigError(\"\" + str(err))except Exception as err:raise BlockadeConfigError(\"\" + str(err))", "docstring": "Instantiate a BlockadeConfig instance based on\na given dictionary of configuration values", "id": "f325:c1:m0"} {"signature": "def wait_for_children():", "body": "wait(lambda: len(multiprocessing.active_children()) == )", "docstring": "Wait for child processes to exit\n\n The testing system launches and terminates child processes, but\n doesn't wait for them to actually die. So in a few places we need\n this extra call", "id": "f338:m0"} {"signature": "def cmd_up(opts):", "body": "config = load_config(opts.config)b = get_blockade(config, opts)containers = b.create(verbose=opts.verbose, force=opts.force)print_containers(containers, opts.json)", "docstring": "Start the containers and link them together", "id": "f342:m7"} {"signature": "def cmd_destroy(opts):", "body": "config = load_config(opts.config)b = get_blockade(config, opts)b.destroy()", "docstring": "Destroy all containers and restore networks", "id": "f342:m8"} {"signature": "def cmd_status(opts):", "body": "config = load_config(opts.config)b = get_blockade(config, opts)containers = b.status()print_containers(containers, opts.json)", "docstring": "Print status of containers and networks", "id": "f342:m9"} {"signature": "def cmd_start(opts):", "body": "__with_containers(opts, Blockade.start)", "docstring": "Start some or all containers", "id": "f342:m11"} {"signature": "def cmd_kill(opts):", "body": "kill_signal = opts.signal if hasattr(opts, '') else \"\"__with_containers(opts, Blockade.kill, signal=kill_signal)", "docstring": "Kill some or all containers", "id": "f342:m12"} {"signature": "def cmd_stop(opts):", "body": "__with_containers(opts, Blockade.stop)", "docstring": "Stop some or all containers", "id": "f342:m13"} {"signature": "def cmd_restart(opts):", "body": "__with_containers(opts, Blockade.restart)", "docstring": "Restart some or all containers", "id": "f342:m14"} {"signature": "def cmd_flaky(opts):", "body": "__with_containers(opts, Blockade.flaky)", "docstring": "Make the network flaky for some or all containers", "id": "f342:m15"} {"signature": "def cmd_slow(opts):", "body": "__with_containers(opts, Blockade.slow)", "docstring": "Make the network slow for some or all containers", "id": "f342:m16"} {"signature": "def cmd_fast(opts):", "body": "__with_containers(opts, Blockade.fast)", "docstring": "Restore network speed and reliability for some or all containers", "id": "f342:m17"} {"signature": "def cmd_duplicate(opts):", "body": "__with_containers(opts, Blockade.duplicate)", "docstring": "Introduce packet duplication into the network of some or all containers", "id": "f342:m18"} {"signature": "def cmd_partition(opts):", "body": "config = load_config(opts.config)b = get_blockade(config, opts)if opts.random:if opts.partitions:raise BlockadeError(\"\"\"\")b.random_partition()else:partitions = []for partition in opts.partitions:names = []for name in partition.split(\"\"):name = name.strip()if name:names.append(name)partitions.append(names)if not partitions:raise BlockadeError(\"\"\"\")b.partition(partitions)", "docstring": "Partition the network between containers\n\n Replaces any existing partitions outright. Any containers NOT specified\n in arguments will be globbed into a single implicit partition. For\n example if you have three containers: c1, c2, and c3 and you run:\n\n blockade partition c1\n\n The result will be a partition with just c1 and another partition with\n c2 and c3.\n\n Alternatively, --random may be specified, and zero or more random\n partitions will be generated by blockade.", "id": "f342:m20"} {"signature": "def cmd_join(opts):", "body": "config = load_config(opts.config)b = get_blockade(config, opts)b.join()", "docstring": "Restore full networking between containers", "id": "f342:m21"} {"signature": "def cmd_logs(opts):", "body": "config = load_config(opts.config)b = get_blockade(config, opts)puts(b.logs(opts.container).decode(encoding=''))", "docstring": "Fetch the logs of a container", "id": "f342:m22"} {"signature": "def cmd_daemon(opts):", "body": "if opts.data_dir is None:raise BlockadeError(\"\")rest.start(data_dir=opts.data_dir, port=opts.port, debug=opts.debug,host_exec=get_host_exec())", "docstring": "Start the Blockade REST API", "id": "f342:m23"} {"signature": "def cmd_add(opts):", "body": "config = load_config(opts.config)b = get_blockade(config, opts)b.add_container(opts.containers)", "docstring": "Add one or more existing Docker containers to a Blockade group", "id": "f342:m24"} {"signature": "def cmd_version(opts):", "body": "import blockade.versionputs(\"\" + blockade.version.__version__)", "docstring": "Show the Blockade version information", "id": "f342:m25"} {"signature": "def cmd_events(opts):", "body": "config = load_config(opts.config)b = get_blockade(config, opts)if opts.json:outf = None_write = putsif opts.output is not None:outf = open(opts.output, \"\")_write = outf.writetry:delim = \"\"logs = b.get_audit().read_logs(as_json=False)_write('')_write(os.linesep)for l in logs:_write(delim + l)delim = \"\" + os.linesep_write(os.linesep)_write('')finally:if opts.output is not None:outf.close()else:puts(colored.blue(columns([\"\", ],[\"\", ],[\"\", ],[\"\", ],[\"\", ])))logs = b.get_audit().read_logs(as_json=True)for l in logs:puts(columns([l[''], ],[str([str(t) for t in l['']]), ],[l[''], ],[str(l['']), ],[l[''], ]))", "docstring": "Get the event log for a given blockade", "id": "f342:m26"} {"signature": "def get_source_chains(self, blockade_id):", "body": "result = {}if not blockade_id:raise ValueError(\"\")lines = self.get_chain_rules(\"\")for line in lines:parts = line.split()if len(parts) < :continuetry:partition_index = parse_partition_index(blockade_id, parts[])except ValueError:continue source = parts[]if source:result[source] = partition_indexreturn result", "docstring": "Get a map of blockade chains IDs -> list of IPs targeted at them\n\n For figuring out which container is in which partition", "id": "f343:c2:m4"} {"signature": "def insert_rule(self, chain, src=None, dest=None, target=None):", "body": "if not chain:raise ValueError(\"\")if not target:raise ValueError(\"\")if not (src or dest):raise ValueError(\"\")args = [\"\", chain]if src:args += [\"\", src]if dest:args += [\"\", dest]args += [\"\", target]self.call(*args)", "docstring": "Insert a new rule in the chain", "id": "f343:c2:m8"} {"signature": "def create_chain(self, chain):", "body": "if not chain:raise ValueError(\"\")self.call(\"\", chain)", "docstring": "Create a new chain", "id": "f343:c2:m9"} {"signature": "def clear(self, blockade_id):", "body": "self.delete_blockade_rules(blockade_id)self.delete_blockade_chains(blockade_id)", "docstring": "Remove all iptables rules and chains related to this blockade", "id": "f343:c2:m10"} {"signature": "def _sm_start(self, *args, **kwargs):", "body": "millisec = random.randint(self._start_min_delay, self._start_max_delay)self._timer = threading.Timer(millisec / , self.event_timeout)self._timer.start()", "docstring": "Start the timer waiting for pain", "id": "f345:c2:m11"} {"signature": "def _sm_to_pain(self, *args, **kwargs):", "body": "_logger.info(\"\" % self._blockade_name)self._do_blockade_event()millisec = random.randint(self._run_min_time, self._run_max_time)self._timer = threading.Timer(millisec / , self.event_timeout)self._timer.start()", "docstring": "Start the blockade event", "id": "f345:c2:m12"} {"signature": "def _sm_stop_from_no_pain(self, *args, **kwargs):", "body": "_logger.info(\"\" % self._blockade_name)self._timer.cancel()", "docstring": "Stop chaos when there is no current blockade operation", "id": "f345:c2:m13"} {"signature": "def _sm_relieve_pain(self, *args, **kwargs):", "body": "_logger.info(\"\" % self._blockade_name)self._do_reset_all()millisec = random.randint(self._start_min_delay, self._start_max_delay)self._timer = threading.Timer(millisec/, self.event_timeout)self._timer.start()", "docstring": "End the blockade event and return to a steady state", "id": "f345:c2:m14"} {"signature": "def _sm_stop_from_pain(self, *args, **kwargs):", "body": "_logger.info(\"\" % self._blockade_name)self._do_reset_all()", "docstring": "Stop chaos while there is a blockade event in progress", "id": "f345:c2:m15"} {"signature": "def _sm_cleanup(self, *args, **kwargs):", "body": "if self._done_notification_func is not None:self._done_notification_func()self._timer.cancel()", "docstring": "Delete all state associated with the chaos session", "id": "f345:c2:m16"} {"signature": "def _sm_stale_timer(self, *args, **kwargs):", "body": "_logger.debug(\"\")", "docstring": "This is used when a cancel was called right before the timer fired but\nafter it was too late to cancel the timer.", "id": "f345:c2:m17"} {"signature": "def expand_partitions(containers, partitions):", "body": "all_names = frozenset(c.name for c in containers if not c.holy)holy_names = frozenset(c.name for c in containers if c.holy)neutral_names = frozenset(c.name for c in containers if c.neutral)partitions = [frozenset(p) for p in partitions]unknown = set()holy = set()union = set()for partition in partitions:unknown.update(partition - all_names - holy_names)holy.update(partition - all_names)union.update(partition)if unknown:raise BlockadeError('' %list(unknown))if holy:raise BlockadeError('' %list(holy))leftover = all_names.difference(union)if leftover:partitions.append(leftover)if not neutral_names.issubset(leftover):partitions.append(neutral_names)return partitions", "docstring": "Validate the partitions of containers. If there are any containers\nnot in any partition, place them in an new partition.", "id": "f350:m0"} {"signature": "@propertydef blockade_net_name(self):", "body": "return \"\" % self._blockade_id", "docstring": "Generate blockade nework name based on the blockade_id", "id": "f351:c0:m2"} {"signature": "@propertydef containers(self):", "body": "return deepcopy(self._containers)", "docstring": "Dictionary of container information", "id": "f351:c0:m3"} {"signature": "def container_id(self, name):", "body": "container = self._containers.get(name, None)if not container is None:return container.get('', None)return None", "docstring": "Try to find the container ID with the specified name", "id": "f351:c0:m4"} {"signature": "def initialize(self, containers):", "body": "self._containers = deepcopy(containers)self.__write(containers, initialize=True)", "docstring": "Initialize a new state file with the given contents.\nThis function fails in case the state file already exists.", "id": "f351:c0:m5"} {"signature": "def exists(self):", "body": "return os.path.isfile(self._state_file)", "docstring": "Checks whether a blockade state file already exists", "id": "f351:c0:m6"} {"signature": "def update(self, containers):", "body": "self._containers = deepcopy(containers)self.__write(containers, initialize=False)", "docstring": "Update the current state file with the specified contents", "id": "f351:c0:m7"} {"signature": "def load(self):", "body": "try:with open(self._state_file) as f:state = yaml.safe_load(f)self._containers = state['']except (IOError, OSError) as err:if err.errno == errno.ENOENT:raise NotInitializedError(\"\")raise InconsistentStateError(\"\"+ str(err))except Exception as err:raise InconsistentStateError(\"\"+ str(err))", "docstring": "Try to load a blockade state file in the current directory", "id": "f351:c0:m8"} {"signature": "def destroy(self):", "body": "self._state_delete()", "docstring": "Try to remove the current state file and directory", "id": "f351:c0:m9"} {"signature": "def _get_blockade_id_from_cwd(self, cwd=None):", "body": "if not cwd:cwd = os.getcwd()parent_dir = os.path.abspath(cwd)basename = os.path.basename(parent_dir).lower()blockade_id = re.sub(r\"\", \"\", basename)if not blockade_id: blockade_id = \"\"return blockade_id", "docstring": "Generate a new blockade ID based on the CWD", "id": "f351:c0:m10"} {"signature": "def _assure_dir(self):", "body": "try:os.makedirs(self._state_dir)except OSError as err:if err.errno != errno.EEXIST:raise", "docstring": "Make sure the state directory exists", "id": "f351:c0:m11"} {"signature": "def _state_delete(self):", "body": "try:os.remove(self._state_file)except OSError as err:if err.errno not in (errno.EPERM, errno.ENOENT):raisetry:os.rmdir(self._state_dir)except OSError as err:if err.errno not in (errno.ENOTEMPTY, errno.ENOENT):raise", "docstring": "Try to delete the state.yml file and the folder .blockade", "id": "f351:c0:m12"} {"signature": "def __base_state(self, containers):", "body": "return dict(blockade_id=self._blockade_id,containers=containers,version=self._state_version)", "docstring": "Convert blockade ID and container information into\na state dictionary object.", "id": "f351:c0:m13"} {"signature": "def __write(self, containers, initialize=True):", "body": "path = self._state_fileself._assure_dir()try:flags = os.O_WRONLY | os.O_CREATif initialize:flags |= os.O_EXCLwith os.fdopen(os.open(path, flags), \"\") as f:yaml.safe_dump(self.__base_state(containers), f)except OSError as err:if err.errno == errno.EEXIST:raise AlreadyInitializedError(\"\"\"\" % path)raiseexcept Exception:self._state_delete()raise", "docstring": "Write the given state information into a file", "id": "f351:c0:m14"} {"signature": "def select(options=None):", "body": "if not options:return Nonewidth = len(str(len(options)))for x,option in enumerate(options):sys.stdout.write(''.format(x+,option, width=width))sys.stdout.write(''.format('', width=width+))sys.stdout.flush()if sys.stdin.isatty():try:response = input().strip()except (EOFError, KeyboardInterrupt):response = ''else:sys.stdin = open(\"\")try:response = ''while True:response += sys.stdin.read()if response.endswith(''):breakexcept (EOFError, KeyboardInterrupt):sys.stdout.flush()passtry:response = int(response) - except ValueError:return Noneif response < or response >= len(options):return Nonereturn options[response]", "docstring": "pass in a list of options, promt the user to select one, and return the selected option or None", "id": "f356:m0"} {"signature": "def file_reader(fname, read_quals=False):", "body": "f = utils.open_file_read(fname)line = f.readline()phylip_regex = re.compile('')gbk_regex = re.compile('')if line.startswith('>'):seq = Fasta()previous_lines[f] = lineelif line.startswith(''):seq = Fasta()while not line.startswith('>'):line = f.readline()if not line:utils.close(f)raise Error('' + fname + '')seq = Fasta()previous_lines[f] = lineelif line.startswith('') and line[] != '':seq = Embl()previous_lines[f] = lineelif gbk_regex.search(line):seq = Embl()previous_lines[f] = lineelif line.startswith(''):seq = Fastq()previous_lines[f] = lineelif phylip_regex.search(line):number_of_seqs, bases_per_seq = line.strip().split()number_of_seqs = int(number_of_seqs)bases_per_seq = int(bases_per_seq)got_blank_line = Falsefirst_line = lineseq_lines = []while :line = f.readline()if line == '':breakelif line == '':got_blank_line = Trueelse:seq_lines.append(line.rstrip())utils.close(f)if len(seq_lines) == or len(seq_lines) == number_of_seqs:sequential = Trueelif seq_lines[][] != '' and seq_lines[][] == '':sequential = Trueelse:sequential = Falseif sequential:current_id = Nonecurrent_seq = ''for line in seq_lines:if len(current_seq) == bases_per_seq or len(current_seq) == :if current_id is not None:yield Fasta(current_id, current_seq.replace('', ''))current_seq = ''current_id, new_bases = line[:].rstrip(), line.rstrip()[:]else:new_bases = line.rstrip()current_seq += new_bases.replace('','')yield Fasta(current_id, current_seq.replace('', ''))else:if seq_lines[number_of_seqs + ][] == '':first_gap_pos = seq_lines[].find('')end_of_gap = first_gap_poswhile seq_lines[][end_of_gap] == '':end_of_gap += first_seq_base = end_of_gapelse:first_seq_base = seqs = []for i in range(number_of_seqs):name, bases = seq_lines[i][:first_seq_base].rstrip(), seq_lines[i][first_seq_base:]seqs.append(Fasta(name, bases))for i in range(number_of_seqs, len(seq_lines)):seqs[i%number_of_seqs].seq += seq_lines[i]for fa in seqs:fa.seq = fa.seq.replace('','').replace('','')yield fareturnelif line == '':utils.close(f)returnelse:utils.close(f)raise Error('' + fname + '' + line.rstrip())try:while seq.get_next_from_file(f, read_quals):yield seqfinally:utils.close(f)", "docstring": "Iterates over a FASTA or FASTQ file, yielding the next sequence in the file until there are no more sequences", "id": "f364:m0"} {"signature": "def subseq(self, start, end):", "body": "return Fasta(self.id, self.seq[start:end])", "docstring": "Returns Fasta object with the same name, of the bases from start to end, but not including end", "id": "f364:c1:m5"} {"signature": "def split_capillary_id(self):", "body": "try:a = self.id.rsplit('', )if a[].startswith(''):dir = ''elif a[].startswith(''):dir = ''else:dir = ''return {'': a[], '': dir, '':a[]}except:raise Error('', self.id)", "docstring": "Gets the prefix and suffix of an name of a capillary read, e.g. xxxxx.p1k or xxxx.q1k. Returns a tuple (prefix, suffx)", "id": "f364:c1:m6"} {"signature": "def expand_nucleotides(self):", "body": "s = list(self.seq)for i in range(len(s)):if s[i] in redundant_nts:s[i] = ''.join(redundant_nts[s[i]])seqs = []for x in itertools.product(*s):seqs.append(Fasta(self.id + '' + str(len(seqs) + ), ''.join(x)))return seqs", "docstring": "Assumes sequence is nucleotides. Returns list of all combinations of redundant nucleotides. e.g. R is A or G, so CRT would have combinations CAT and CGT", "id": "f364:c1:m7"} {"signature": "def strip_after_first_whitespace(self):", "body": "self.id = self.id.split()[]", "docstring": "Removes everything in the name after the first whitespace character", "id": "f364:c1:m8"} {"signature": "def strip_illumina_suffix(self):", "body": "if self.id.endswith('') or self.id.endswith(''):self.id = self.id[:-]", "docstring": "Removes any trailing /1 or /2 off the end of the name", "id": "f364:c1:m9"} {"signature": "def revcomp(self):", "body": "self.seq = self.seq.translate(str.maketrans(\"\", \"\"))[::-]", "docstring": "Reverse complements the sequence", "id": "f364:c1:m10"} {"signature": "def is_all_Ns(self, start=, end=None):", "body": "if end is not None:if start > end:raise Error('')end += else:end = len(self)if len(self) == :return Falseelse:return re.search('', self.seq[start:end]) is None", "docstring": "Returns true if the sequence is all Ns (upper or lower case)", "id": "f364:c1:m11"} {"signature": "def trim_Ns(self):", "body": "self.seq = self.seq.strip('')", "docstring": "Removes any leading or trailing N or n characters from the sequence", "id": "f364:c1:m12"} {"signature": "def replace_bases(self, old, new):", "body": "self.seq = self.seq.replace(old, new)", "docstring": "Replaces all occurrences of 'old' with 'new", "id": "f364:c1:m14"} {"signature": "def replace_non_acgt(self):", "body": "self.seq = re.sub(r'''''', '', self.seq)", "docstring": "Replace all non acgt characters with an N (case insensitive)", "id": "f364:c1:m15"} {"signature": "def replace_interval(self, start, end, new):", "body": "if start > end or start > len(self) - or end > len(self) - :raise Error('' + str(start) + '' + str(end) + '' + self.id)self.seq = self.seq[:start] + new + self.seq[end + :]", "docstring": "Replaces the sequence from start to end with the sequence \"new", "id": "f364:c1:m16"} {"signature": "def gaps(self, min_length = ):", "body": "gaps = []regex = re.compile('', re.IGNORECASE)for m in regex.finditer(self.seq):if m.span()[] - m.span()[] + >= min_length:gaps.append(intervals.Interval(m.span()[], m.span()[] - ))return gaps", "docstring": "Finds the positions of all gaps in the sequence that are at least min_length long. Returns a list of Intervals. Coords are zero-based", "id": "f364:c1:m17"} {"signature": "def contig_coords(self):", "body": "gaps = self.gaps()if len(gaps) == :return [intervals.Interval(, len(self) - )]coords = []for g in gaps:if g.start == :coords = [g.end + ]else:coords += [g.start - , g.end + ]if coords[-] < len(self):coords.append(len(self) - )return [intervals.Interval(coords[i], coords[i+]) for i in range(, len(coords)-,)]", "docstring": "Finds coords of contigs, i.e. everything that's not a gap (N or n). Returns a list of Intervals. Coords are zero-based", "id": "f364:c1:m18"} {"signature": "def orfs(self, frame=, revcomp=False):", "body": "assert frame in [,,]if revcomp:self.revcomp()aa_seq = self.translate(frame=frame).seq.rstrip('')if revcomp:self.revcomp()orfs = _orfs_from_aa_seq(aa_seq)for i in range(len(orfs)):if revcomp:start = len(self) - (orfs[i].end * + ) - frameend = len(self) - (orfs[i].start * ) - - frameelse:start = orfs[i].start * + frameend = orfs[i].end * + + frameorfs[i] = intervals.Interval(start, end)return orfs", "docstring": "Returns a list of ORFs that the sequence has, starting on the given\n frame. Each returned ORF is an interval.Interval object.\n If revomp=True, then finds the ORFs of the reverse complement\n of the sequence.", "id": "f364:c1:m19"} {"signature": "def all_orfs(self, min_length=):", "body": "orfs = []for frame in [,,]:for revcomp in [False, True]:orfs.extend([(t, revcomp) for t in self.orfs(frame=frame, revcomp=revcomp) if len(t)>=min_length])return sorted(orfs, key=lambda t:t[])", "docstring": "Finds all open reading frames in the sequence, that are at least as\n long as min_length. Includes ORFs on the reverse strand.\n Returns a list of ORFs, where each element is a tuple:\n (interval.Interval, bool)\n where bool=True means on the reverse strand", "id": "f364:c1:m20"} {"signature": "def is_complete_orf(self):", "body": "if len(self) % != or len(self) < :return Falseorfs = self.orfs()complete_orf = intervals.Interval(, len(self) - )for orf in orfs:if orf == complete_orf:return Truereturn False", "docstring": "Returns true iff length is >= 6, is a multiple of 3, and there is exactly one stop codon in the sequence and it is at the end", "id": "f364:c1:m21"} {"signature": "def looks_like_gene(self):", "body": "return self.is_complete_orf()and len(self) >= and len(self) % == and self.seq[:].upper() in genetic_codes.starts[genetic_code]", "docstring": "Returns true iff: length >=6, length is a multiple of 3, first codon is start, last codon is a stop and has no other stop codons", "id": "f364:c1:m22"} {"signature": "def make_into_gene(self):", "body": "for reverse in [True, False]:for frame in range():new_seq = copy.copy(self)if reverse:new_seq.revcomp()new_seq.seq = new_seq[frame:]if len(new_seq) % :new_seq.seq = new_seq.seq[:-(len(new_seq) % )]new_aa_seq = new_seq.translate()if len(new_aa_seq) >= and new_seq[:] in genetic_codes.starts[genetic_code] and new_aa_seq[-] == '' and '' not in new_aa_seq[:-]:strand = '' if reverse else ''return new_seq, strand, framereturn None", "docstring": "Tries to make into a gene sequence. Tries all three reading frames and both strands. Returns a tuple (new sequence, strand, frame) if it was successful. Otherwise returns None.", "id": "f364:c1:m23"} {"signature": "def trim(self, start, end):", "body": "self.seq = self.seq[start:len(self.seq) - end]", "docstring": "Removes first 'start'/'end' bases off the start/end of the sequence", "id": "f364:c1:m27"} {"signature": "def to_Fastq(self, qual_scores):", "body": "if len(self) != len(qual_scores):raise Error('', self.id)return Fastq(self.id, self.seq, ''.join([chr(max(, min(x, )) + ) for x in qual_scores]))", "docstring": "Returns a Fastq object. qual_scores expected to be a list of numbers, like you would get in a .qual file", "id": "f364:c1:m28"} {"signature": "def search(self, search_string):", "body": "seq = self.seq.upper()search_string = search_string.upper()pos = found = seq.find(search_string, pos)hits = []while found != -:hits.append((found, ''))pos = found + found = seq.find(search_string, pos)pos = search_string = Fasta('', search_string)search_string.revcomp()search_string = search_string.seqfound = seq.find(search_string, pos)while found != -:hits.append((found, ''))pos = found + found = seq.find(search_string, pos)return hits", "docstring": "Finds every occurrence (including overlapping ones) of the search_string, including on the reverse strand. Returns a list where each element is a tuple (position, strand) where strand is in ['-', '+']. Positions are zero-based", "id": "f364:c1:m29"} {"signature": "def translate(self, frame=):", "body": "return Fasta(self.id, ''.join([genetic_codes.codes[genetic_code].get(self.seq[x:x+].upper(), '') for x in range(frame, len(self)--frame, )]))", "docstring": "Returns a Fasta sequence, translated into amino acids. Starts translating from 'frame', where frame expected to be 0,1 or 2", "id": "f364:c1:m30"} {"signature": "def gc_content(self, as_decimal=True):", "body": "gc_total = num_bases = n_tuple = tuple('')accepted_bases = tuple('')for base, count in Counter(self.seq).items():if base not in n_tuple:num_bases += countif base in accepted_bases: gc_total += countgc_content = gc_total / num_basesif not as_decimal: gc_content *= return gc_content", "docstring": "Returns the GC content for the sequence.\n Notes:\n This method ignores N when calculating the length of the sequence.\n It does not, however ignore other ambiguous bases. It also only\n includes the ambiguous base S (G or C). In this sense the method is\n conservative with its calculation.\n\n Args:\n as_decimal (bool): Return the result as a decimal. Setting to False\n will return as a percentage. i.e for the sequence GCAT it will\n return 0.5 by default and 50.00 if set to False.\n\n Returns:\n float: GC content calculated as the number of G, C, and S divided\n by the number of (non-N) bases (length).", "id": "f364:c1:m31"} {"signature": "def subseq(self, start, end):", "body": "return Fastq(self.id, self.seq[start:end], self.qual[start:end])", "docstring": "Returns Fastq object with the same name, of the bases from start to end, but not including end", "id": "f364:c3:m3"} {"signature": "def revcomp(self):", "body": "super().revcomp()self.qual = self.qual[::-]", "docstring": "Reverse complements the sequence", "id": "f364:c3:m5"} {"signature": "def trim(self, start, end):", "body": "super().trim(start, end)self.qual = self.qual[start:len(self.qual) - end]", "docstring": "Removes first 'start'/'end' bases off the start/end of the sequence", "id": "f364:c3:m6"} {"signature": "def trim_Ns(self):", "body": "i = while i < len(self) and self.seq[i] in '':i += self.seq = self.seq[i:]self.qual = self.qual[i:]self.seq = self.seq.rstrip('')self.qual = self.qual[:len(self.seq)]", "docstring": "Removes any leading or trailing N or n characters from the sequence", "id": "f364:c3:m9"} {"signature": "def replace_interval(self, start, end, new, qual_string):", "body": "if len(new) != len(qual_string):raise Error('')super().replace_interval(start, end, new)self.qual = self.qual[:start] + qual_string + self.qual[end + :]", "docstring": "Replaces the sequence from start to end with the sequence \"new", "id": "f364:c3:m10"} {"signature": "def translate(self):", "body": "fa = super().translate()return Fastq(fa.id, fa.seq, ''*len(fa.seq))", "docstring": "Returns a Fasta sequence, translated into amino acids. Starts translating from 'frame', where frame expected to be 0,1 or 2", "id": "f364:c3:m11"} {"signature": "def acgtn_only(infile, outfile):", "body": "f = utils.open_file_write(outfile)for seq in sequences.file_reader(infile):seq.replace_non_acgt()print(seq, file=f)utils.close(f)", "docstring": "Replace every non-acgtn (case insensitve) character with an N", "id": "f405:m0"} {"signature": "def caf_to_fastq(infile, outfile, min_length=, trim=False):", "body": "caf_reader = caf.file_reader(infile)fout = utils.open_file_write(outfile)for c in caf_reader:if trim:if c.clip_start is not None and c.clip_end is not None:c.seq.seq = c.seq.seq[c.clip_start:c.clip_end + ]c.seq.qual = c.seq.qual[c.clip_start:c.clip_end + ]else:print('', c.id, file=sys.stderr)if len(c.seq) >= min_length:print(c.seq, file=fout)utils.close(fout)", "docstring": "Convert a CAF file to fastq. Reads shorter than min_length are not output. If clipping information is in the CAF file (with a line Clipping QUAL ...) and trim=True, then trim the reads", "id": "f405:m1"} {"signature": "def count_sequences(infile):", "body": "seq_reader = sequences.file_reader(infile)n = for seq in seq_reader:n += return n", "docstring": "Returns the number of sequences in a file", "id": "f405:m3"} {"signature": "def interleave(infile_1, infile_2, outfile, suffix1=None, suffix2=None):", "body": "seq_reader_1 = sequences.file_reader(infile_1)seq_reader_2 = sequences.file_reader(infile_2)f_out = utils.open_file_write(outfile)for seq_1 in seq_reader_1:try:seq_2 = next(seq_reader_2)except:utils.close(f_out)raise Error('', seq_1.id, '')if suffix1 is not None and not seq_1.id.endswith(suffix1):seq_1.id += suffix1if suffix2 is not None and not seq_2.id.endswith(suffix2):seq_2.id += suffix2print(seq_1, file=f_out)print(seq_2, file=f_out)try:seq_2 = next(seq_reader_2)except:seq_2 = Noneif seq_2 is not None:utils.close(f_out)raise Error('', seq_2.id, '')utils.close(f_out)", "docstring": "Makes interleaved file from two sequence files. If used, will append suffix1 onto end\n of every sequence name in infile_1, unless it already ends with suffix1. Similar for sufffix2.", "id": "f405:m16"} {"signature": "def make_random_contigs(contigs, length, outfile, name_by_letters=False, prefix='', seed=None, first_number=):", "body": "random.seed(a=seed)fout = utils.open_file_write(outfile)letters = list('')letters_index = for i in range(contigs):if name_by_letters:name = letters[letters_index]letters_index += if letters_index == len(letters):letters_index = else:name = str(i + first_number)fa = sequences.Fasta(prefix + name, ''.join([random.choice('') for x in range(length)]))print(fa, file=fout)utils.close(fout)", "docstring": "Makes a multi fasta file of random sequences, all the same length", "id": "f405:m17"} {"signature": "def mean_length(infile, limit=None):", "body": "total = count = seq_reader = sequences.file_reader(infile)for seq in seq_reader:total += len(seq)count += if limit is not None and count >= limit:breakassert count > return total / count", "docstring": "Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences, use limit=N", "id": "f405:m18"} {"signature": "def merge_to_one_seq(infile, outfile, seqname=''):", "body": "seq_reader = sequences.file_reader(infile)seqs = []for seq in seq_reader:seqs.append(copy.copy(seq))new_seq = ''.join([seq.seq for seq in seqs])if type(seqs[]) == sequences.Fastq:new_qual = ''.join([seq.qual for seq in seqs])seqs[:] = []merged = sequences.Fastq(seqname, new_seq, new_qual)else:merged = sequences.Fasta(seqname, new_seq)seqs[:] = []f = utils.open_file_write(outfile)print(merged, file=f)utils.close(f)", "docstring": "Takes a multi fasta or fastq file and writes a new file that contains just one sequence, with the original sequences catted together, preserving their order", "id": "f405:m19"} {"signature": "def scaffolds_to_contigs(infile, outfile, number_contigs=False):", "body": "seq_reader = sequences.file_reader(infile)fout = utils.open_file_write(outfile)for seq in seq_reader:contigs = seq.contig_coords()counter = for contig in contigs:if number_contigs:name = seq.id + '' + str(counter)counter += else:name = ''.join([seq.id, str(contig.start + ), str(contig.end + )])print(sequences.Fasta(name, seq[contig.start:contig.end+]), file=fout)utils.close(fout)", "docstring": "Makes a file of contigs from scaffolds by splitting at every N.\n Use number_contigs=True to add .1, .2, etc onto end of each\n contig, instead of default to append coordinates.", "id": "f405:m21"} {"signature": "def sort_by_size(infile, outfile, smallest_first=False):", "body": "seqs = {}file_to_dict(infile, seqs)seqs = list(seqs.values())seqs.sort(key=lambda x: len(x), reverse=not smallest_first)fout = utils.open_file_write(outfile)for seq in seqs:print(seq, file=fout)utils.close(fout)", "docstring": "Sorts input sequence file by biggest sequence first, writes sorted output file. Set smallest_first=True to have smallest first", "id": "f405:m24"} {"signature": "def sort_by_name(infile, outfile):", "body": "seqs = {}file_to_dict(infile, seqs)fout = utils.open_file_write(outfile)for name in sorted(seqs):print(seqs[name], file=fout)utils.close(fout)", "docstring": "Sorts input sequence file by sort -d -k1,1, writes sorted output file.", "id": "f405:m25"} {"signature": "def to_fastg(infile, outfile, circular=None):", "body": "if circular is None:to_circularise = set()elif type(circular) is not set:f = utils.open_file_read(circular)to_circularise = set([x.rstrip() for x in f.readlines()])utils.close(f)else:to_circularise = circularseq_reader = sequences.file_reader(infile)fout = utils.open_file_write(outfile)nodes = for seq in seq_reader:new_id = ''.join(['', str(nodes),'', str(len(seq)),'', '','', seq.id])if seq.id in to_circularise:seq.id = new_id + '' + new_id + ''print(seq, file=fout)seq.revcomp()seq.id = new_id + \"\" + new_id + \"\"print(seq, file=fout)else:seq.id = new_id + ''print(seq, file=fout)seq.revcomp()seq.id = new_id + \"\"print(seq, file=fout)nodes += utils.close(fout)", "docstring": "Writes a FASTG file in SPAdes format from input file. Currently only whether or not a sequence is circular is supported. Put circular=set of ids, or circular=filename to make those sequences circular in the output. Puts coverage=1 on all contigs", "id": "f405:m26"} {"signature": "def length_offsets_from_fai(fai_file):", "body": "positions = {}total_length = f = utils.open_file_read(fai_file)for line in f:try:(name, length) = line.rstrip().split()[:]length = int(length)except:raise Error('' + fai_file + '' + line)positions[name] = total_lengthtotal_length += lengthutils.close(f)return positions", "docstring": "Returns a dictionary of positions of the start of each sequence, as\n if all the sequences were catted into one sequence.\n eg if file has three sequences, seq1 10bp, seq2 30bp, seq3 20bp, then\n the output would be: {'seq1': 0, 'seq2': 10, 'seq3': 40}", "id": "f405:m31"} {"signature": "def split_by_base_count(infile, outfiles_prefix, max_bases, max_seqs=None):", "body": "seq_reader = sequences.file_reader(infile)base_count = file_count = seq_count = fout = Noneif max_seqs is None:max_seqs = float('')for seq in seq_reader:if base_count == :fout = utils.open_file_write(outfiles_prefix + '' + str(file_count))file_count += if base_count + len(seq) > max_bases or seq_count >= max_seqs:if base_count == :print(seq, file=fout)utils.close(fout)else:utils.close(fout)fout = utils.open_file_write(outfiles_prefix + '' + str(file_count))print(seq, file=fout)base_count = len(seq)file_count += seq_count = else:base_count += len(seq)seq_count += print(seq, file=fout)utils.close(fout)", "docstring": "Splits a fasta/q file into separate files, file size determined by number of bases.\n\n Puts <= max_bases in each split file The exception is a single sequence >=max_bases\n is put in its own file. This does not split sequences.", "id": "f405:m32"} {"signature": "def split_by_fixed_size(infile, outfiles_prefix, chunk_size, tolerance, skip_if_all_Ns=False):", "body": "file_count = coords = []small_sequences = [] seq_reader = sequences.file_reader(infile)f_coords = utils.open_file_write(outfiles_prefix + '')for seq in seq_reader:if skip_if_all_Ns and seq.is_all_Ns():continueif len(seq) < chunk_size:small_sequences.append(copy.copy(seq))elif len(seq) <= chunk_size + tolerance:f = utils.open_file_write(outfiles_prefix + '' + str(file_count))print(seq, file=f)utils.close(f)file_count += else:chunks = [(x,x+chunk_size) for x in range(, len(seq), chunk_size)]if chunks[-][] - > len(seq):chunks[-] = (chunks[-][], len(seq))if len(chunks) > and (chunks[-][] - chunks[-][]) <= tolerance:chunks[-] = (chunks[-][], chunks[-][])chunks.pop()offset = for chunk in chunks:if not(skip_if_all_Ns and seq.is_all_Ns(start=chunk[], end=chunk[]-)):f = utils.open_file_write(outfiles_prefix + '' + str(file_count))chunk_id = seq.id + '' + str(chunk[]+) + '' + str(chunk[])print(sequences.Fasta(chunk_id, seq[chunk[]:chunk[]]), file=f)print(chunk_id, seq.id, offset, sep='', file=f_coords)utils.close(f)file_count += offset += chunk[] - chunk[]if len(small_sequences):f = utils.open_file_write(outfiles_prefix + '' + str(file_count))file_count += base_count = for seq in small_sequences:if base_count > and base_count + len(seq) > chunk_size + tolerance:utils.close(f)f = utils.open_file_write(outfiles_prefix + '' + str(file_count))file_count += base_count = print(seq, file=f)base_count += len(seq)utils.close(f)", "docstring": "Splits fasta/q file into separate files, with up to (chunk_size + tolerance) bases in each file", "id": "f405:m33"} {"signature": "def split_by_fixed_size_onefile(infile, outfile, chunk_size, tolerance, skip_if_all_Ns=False):", "body": "seq_reader = sequences.file_reader(infile)f_out = utils.open_file_write(outfile)for seq in seq_reader:for i in range(, len(seq), chunk_size):if i + chunk_size + tolerance >= len(seq):end = len(seq)else:end = i + chunk_sizesubseq = seq.subseq(i, end)if not (skip_if_all_Ns and subseq.is_all_Ns()):subseq.id += '' + str(i+) + '' + str(end)print(subseq, file=f_out)if end == len(seq):breakutils.close(f_out)", "docstring": "Splits each sequence in infile into chunks of fixed size, last chunk can be up to\n (chunk_size + tolerance) in length", "id": "f405:m34"} {"signature": "def stats_from_fai(infile):", "body": "f = utils.open_file_read(infile)try:lengths = sorted([int(line.split('')[]) for line in f], reverse=True)except:raise Error('' + infile)utils.close(f)stats = {}if len(lengths) > :stats[''] = max(lengths)stats[''] = min(lengths)stats[''] = sum(lengths)stats[''] = stats[''] / len(lengths)stats[''] = len(lengths)cumulative_length = for length in lengths:cumulative_length += lengthif cumulative_length >= * stats['']:stats[''] = lengthbreakelse:stats = {x: for x in ('', '', '', '', '', '')}return stats", "docstring": "Returns dictionary of length stats from an fai file. Keys are: longest, shortest, mean, total_length, N50, number", "id": "f405:m37"} {"signature": "def to_boulderio(infile, outfile):", "body": "seq_reader = sequences.file_reader(infile)f_out = utils.open_file_write(outfile)for sequence in seq_reader:print(\"\" + sequence.id, file=f_out)print(\"\" + sequence.seq, file=f_out)print(\"\", file=f_out)utils.close(f_out)", "docstring": "Converts input sequence file into a \"Boulder-IO format\", as used by primer3", "id": "f405:m38"} {"signature": "def intersection(l1, l2):", "body": "if len(l1) == or len(l2) == :return []out = []l2_pos = for l in l1:while l2_pos < len(l2) and l2[l2_pos].end < l.start:l2_pos += if l2_pos == len(l2):breakwhile l2_pos < len(l2) and l.intersects(l2[l2_pos]):out.append(l.intersection(l2[l2_pos]))l2_pos += l2_pos = max(, l2_pos - )return out", "docstring": "Returns intersection of two lists. Assumes the lists are sorted by start positions", "id": "f408:m0"} {"signature": "def merge_overlapping_in_list(l):", "body": "i = l.sort()while i < len(l) - :u = l[i].union(l[i+])if u is not None:l[i] = ul.pop(i+)else:i += ", "docstring": "Sorts list, merges any overlapping intervals, and also adjacent intervals. e.g.\n [0,1], [1,2] would be merge to [0,.2].", "id": "f408:m1"} {"signature": "def remove_contained_in_list(l):", "body": "i = l.sort()while i < len(l) - :if l[i+].contains(l[i]):l.pop(i)elif l[i].contains(l[i+]):l.pop(i+)else:i += ", "docstring": "Sorts list in place, then removes any intervals that are completely\n contained inside another interval", "id": "f408:m2"} {"signature": "def length_sum_from_list(l):", "body": "return sum([len(x) for x in l])", "docstring": "Returns total length of intervals from a list", "id": "f408:m3"} {"signature": "def distance_to_point(self, p):", "body": "if self.start <= p <= self.end:return else:return min(abs(self.start - p), abs(self.end - p))", "docstring": "Returns the distance from the point to the interval. Zero if the point lies inside the interval.", "id": "f408:c1:m7"} {"signature": "def intersects(self, i):", "body": "return self.start <= i.end and i.start <= self.end", "docstring": "Returns true iff this interval intersects the interval i", "id": "f408:c1:m8"} {"signature": "def contains(self, i):", "body": "return self.start <= i.start and i.end <= self.end", "docstring": "Returns true iff this interval contains the interval i", "id": "f408:c1:m9"} {"signature": "def union(self, i):", "body": "if self.intersects(i) or self.end + == i.start or i.end + == self.start:return Interval(min(self.start, i.start), max(self.end, i.end))else:return None", "docstring": "If intervals intersect, returns their union, otherwise returns None", "id": "f408:c1:m10"} {"signature": "def union_fill_gap(self, i):", "body": "return Interval(min(self.start, i.start), max(self.end, i.end))", "docstring": "Like union, but ignores whether the two intervals intersect or not", "id": "f408:c1:m11"} {"signature": "def intersection(self, i):", "body": "if self.intersects(i):return Interval(max(self.start, i.start), min(self.end, i.end))else:return None", "docstring": "If intervals intersect, returns their intersection, otherwise returns None", "id": "f408:c1:m12"} {"signature": "def _make_user_class(session, name):", "body": "attrs = session.eval('' % name, nout=).ravel().tolist()methods = session.eval('' % name, nout=).ravel().tolist()ref = weakref.ref(session)doc = _DocDescriptor(ref, name)values = dict(__doc__=doc, _name=name, _ref=ref, _attrs=attrs,__module__='')for method in methods:doc = _MethodDocDescriptor(ref, name, method)cls_name = '' % (name, method)method_values = dict(__doc__=doc)method_cls = type(str(cls_name),(OctaveUserClassMethod,), method_values)values[method] = method_cls(ref, method, name)for attr in attrs:values[attr] = OctaveUserClassAttr(ref, attr, attr)return type(str(name), (OctaveUserClass,), values)", "docstring": "Make an Octave class for a given class name", "id": "f410:m0"} {"signature": "def _make_variable_ptr_instance(session, name):", "body": "return OctaveVariablePtr(weakref.ref(session), name, name)", "docstring": "Make a pointer instance for a given variable by name.", "id": "f410:m2"} {"signature": "def __init__(self, *inputs, **kwargs):", "body": "addr = self._address = '' % (self._name, id(self))self._ref().feval(self._name, *inputs, store_as=addr, **kwargs)", "docstring": "Create a new instance with the user class constructor.", "id": "f410:c7:m0"} {"signature": "@classmethoddef from_value(cls, value):", "body": "instance = OctaveUserClass.__new__(cls)instance._address = '' % (instance._name, id(instance))instance._ref().push(instance._address, value)return instance", "docstring": "This is how an instance is created when we read a\n MatlabObject from a MAT file.", "id": "f410:c7:m1"} {"signature": "@classmethoddef to_value(cls, instance):", "body": "if not isinstance(instance, OctaveUserClass) or not instance._attrs:return dict()dtype = []values = []for attr in instance._attrs:dtype.append((str(attr), object))values.append(getattr(instance, attr))struct = np.array([tuple(values)], dtype)return MatlabObject(struct, instance._name)", "docstring": "Convert to a value to send to Octave.", "id": "f410:c7:m2"} {"signature": "@classmethoddef to_pointer(cls, instance):", "body": "return OctavePtr(instance._ref, instance._name, instance._address)", "docstring": "Get a pointer to the private object.", "id": "f410:c7:m3"} {"signature": "def demo(delay=, interactive=True):", "body": "script = \"\"\"\"\"\"if not PY2:script = script.replace('', '')for line in script.strip().split(''):line = line.strip()if not '' in line:time.sleep(delay)print(\"\".format(line))time.sleep(delay)if not interactive:if '' in line or '' in line or '' in line:line = ''exec(line)", "docstring": "Play a demo script showing most of the oct2py api features.\n\nParameters\n==========\ndelay : float\n Time between each command in seconds.", "id": "f411:m0"} {"signature": "def helper(self, base, keys, types):", "body": "for key, type_ in zip(keys, types):if not type(base[key]) == type_:try:assert type_(base[key]) == base[key], keyexcept ValueError:assert np.allclose(type_(base[key]), base[key])", "docstring": "Perform type checking of the values\n\nParameters\n==========\nbase : dict\n Sub-dictionary we are accessing.\nkeys : array-like\n List of keys to test in base.\ntypes : array-like\n List of expected return types for the keys.", "id": "f412:c0:m2"} {"signature": "def nested_equal(self, val1, val2):", "body": "if isinstance(val1, list):for (subval1, subval2) in zip(val1, val2):if isinstance(subval1, list):self.nested_equal(subval1, subval2)elif isinstance(subval1, np.ndarray):np.allclose(subval1, subval2)else:assert subval1 == subval2elif isinstance(val1, np.ndarray):np.allclose(val1, np.array(val2))elif isinstance(val1, (str, unicode)):assert val1 == val2else:try:assert (np.alltrue(np.isnan(val1)) andnp.alltrue(np.isnan(val2)))except (AssertionError, NotImplementedError):assert val1 == val2", "docstring": "Test for equality in a nested list or ndarray", "id": "f413:c0:m2"} {"signature": "def helper(self, outgoing, expected_type=None):", "body": "incoming = self.oc.roundtrip(outgoing)if expected_type is None:expected_type = type(outgoing)self.nested_equal(incoming, outgoing)try:assert type(incoming) == expected_typeexcept AssertionError:if type(incoming) == np.float32 and expected_type == np.float64:pass", "docstring": "Use roundtrip.m to make sure the data goes out and back intact.\n\nParameters\n==========\noutgoing : object\n Object to send to Octave.", "id": "f413:c0:m3"} {"signature": "def helper(self, outgoing, incoming=None, expected_type=None):", "body": "if incoming is None:incoming = self.oc.roundtrip(outgoing)if not expected_type:for out_type, _, in_type in TYPE_CONVERSIONS:if out_type == type(outgoing):expected_type = in_typebreakif not expected_type:expected_type = np.ndarraytry:assert incoming == outgoingexcept ValueError:assert np.allclose(np.array(incoming), np.array(outgoing))if type(incoming) != expected_type:incoming = self.oc.roundtrip(outgoing)assert expected_type(incoming) == incoming", "docstring": "Uses roundtrip.m to make sure the data goes out and back intact.\n\nParameters\n==========\noutgoing : object\n Object to send to Octave\nincoming : object, optional\n Object already retreived from Octave", "id": "f413:c1:m2"} {"signature": "@classmethoddef setUpClass(cls):", "body": "if not sys.stdin.encoding:sys.stdin = codecs.getreader('')(sys.stdin)cls.ip = get_ipython()cls.ip.magic('')cls.ip.ex('')cls.svgs_generated = ", "docstring": "Set up an IPython session just once.\n It'd be safer to set it up for each test, but for now,\n I'm mimicking the IPython team's logic.", "id": "f417:c0:m0"} {"signature": "def load_ipython_extension(ip):", "body": "ip.register_magics(OctaveMagics)", "docstring": "Load the extension in IPython.", "id": "f418:m0"} {"signature": "def __init__(self, shell):", "body": "super(OctaveMagics, self).__init__(shell)self._oct = oct2py.octaveself._display = display", "docstring": "Parameters\n----------\nshell : IPython shell", "id": "f418:c0:m0"} {"signature": "def kill_octave():", "body": "import osif os.name == '':os.system('')else:os.system('')os.system('')octave.restart()", "docstring": "Kill all octave instances (cross-platform).\n\n This will restart the \"octave\" instance. If you have instantiated\n Any other Oct2Py objects, you must restart them.", "id": "f421:m0"} {"signature": "def thread_check(nthreads=):", "body": "print(\"\".format(nthreads,datetime.datetime.now()))threads = []for i in range(nthreads):thread = ThreadClass()thread.setDaemon(True)thread.start()threads.append(thread)for thread in threads:thread.join()print(''.format(datetime.datetime.now()))", "docstring": "Start a number of threads and verify each has a unique Octave session.\n\nParameters\n==========\nnthreads : int\n Number of threads to use.\n\nRaises\n======\nOct2PyError\n If the thread does not sucessfully demonstrate independence.", "id": "f422:m0"} {"signature": "def run(self):", "body": "octave = Oct2Py()octave.push('', self.getName())name = octave.pull('')now = datetime.datetime.now()print(\"\".format(self.getName(), name, now))octave.exit()try:assert self.getName() == nameexcept AssertionError: raise Oct2PyError('')return", "docstring": "Create a unique instance of Octave and verify namespace uniqueness.\n\nRaises\n======\nOct2PyError\n If the thread does not sucessfully demonstrate independence", "id": "f422:c0:m0"} {"signature": "def __init__(self, executable=None, logger=None, timeout=None,oned_as='', temp_dir=None, convert_to_float=True,backend=None):", "body": "self._oned_as = oned_asself._executable = executableself._engine = Noneself._logger = Noneself.logger = loggerself.timeout = timeoutself.backend = backendself.temp_dir = temp_dir or tempfile.mkdtemp()self.convert_to_float = convert_to_floatself._user_classes = dict()self._function_ptrs = dict()self.restart()atexit.register(self._cleanup)", "docstring": "Start Octave and set up the session.", "id": "f423:c0:m0"} {"signature": "@propertydef logger(self):", "body": "return self._logger", "docstring": "The logging instance used by the session.", "id": "f423:c0:m1"} {"signature": "def __enter__(self):", "body": "if not self._engine:self.restart()return self", "docstring": "Return octave object, restart session if necessary", "id": "f423:c0:m3"} {"signature": "def __exit__(self, type, value, traceback):", "body": "self.exit()", "docstring": "Close session", "id": "f423:c0:m4"} {"signature": "def exit(self):", "body": "if self._engine:self._engine.repl.terminate()self._engine = None", "docstring": "Quits this octave session and cleans up.", "id": "f423:c0:m5"} {"signature": "def push(self, name, var, timeout=None, verbose=True):", "body": "if isinstance(name, (str, unicode)):name = [name]var = [var]for (n, v) in zip(name, var):self.feval('', '', n, v, nout=, timeout=timeout,verbose=verbose)", "docstring": "Put a variable or variables into the Octave session.\n\nParameters\n----------\nname : str or list\n Name of the variable(s).\nvar : object or list\n The value(s) to pass.\ntimeout : float\n Time to wait for response from Octave (per line).\n**kwargs: Deprecated kwargs, ignored.\n\nExamples\n--------\n>>> from oct2py import octave\n>>> y = [1, 2]\n>>> octave.push('y', y)\n>>> octave.pull('y')\narray([[ 1., 2.]])\n>>> octave.push(['x', 'y'], ['spam', [1, 2, 3, 4]])\n>>> octave.pull(['x', 'y']) # doctest: +SKIP\n[u'spam', array([[1, 2, 3, 4]])]\n\nNotes\n-----\nInteger type arguments will be converted to floating point\nunless `convert_to_float=False`.", "id": "f423:c0:m6"} {"signature": "def pull(self, var, timeout=None, verbose=True):", "body": "if isinstance(var, (str, unicode)):var = [var]outputs = []for name in var:exist = self._exist(name)if exist == :outputs.append(self.feval('', '', name,timeout=timeout, verbose=verbose))else:outputs.append(self.get_pointer(name, timeout=timeout))if len(outputs) == :return outputs[]return outputs", "docstring": "Retrieve a value or values from the Octave session.\n\nParameters\n----------\nvar : str or list\n Name of the variable(s) to retrieve.\ntimeout : float, optional.\n Time to wait for response from Octave (per line).\n**kwargs: Deprecated kwargs, ignored.\n\nReturns\n-------\nout : object\n Object returned by Octave.\n\nRaises\n------\nOct2PyError\n If the variable does not exist in the Octave session.\n\nExamples\n--------\n >>> from oct2py import octave\n >>> y = [1, 2]\n >>> octave.push('y', y)\n >>> octave.pull('y')\n array([[ 1., 2.]])\n >>> octave.push(['x', 'y'], ['spam', [1, 2, 3, 4]])\n >>> octave.pull(['x', 'y']) # doctest: +SKIP\n [u'spam', array([[1, 2, 3, 4]])]", "id": "f423:c0:m7"} {"signature": "def get_pointer(self, name, timeout=None):", "body": "exist = self._exist(name)isobject = self._isobject(name, exist)if exist == :raise Oct2PyError('' % name)elif exist == :return _make_variable_ptr_instance(self, name)elif isobject:return self._get_user_class(name)elif exist in [, , ]:return self._get_function_ptr(name)raise Oct2PyError('' % name)", "docstring": "Get a pointer to a named object in the Octave workspace.\n\n Parameters\n ----------\n name: str\n The name of the object in the Octave workspace.\n timemout: float, optional.\n Time to wait for response from Octave (per line).\n\n Examples\n --------\n >>> from oct2py import octave\n >>> octave.eval('foo = [1, 2];')\n >>> ptr = octave.get_pointer('foo')\n >>> ptr.value\n array([[ 1., 2.]])\n >>> ptr.address\n 'foo'\n >>> # Can be passed as an argument\n >>> octave.disp(ptr) # doctest: +SKIP\n 1 2\n\n >>> from oct2py import octave\n >>> sin = octave.get_pointer('sin') # equivalent to `octave.sin`\n >>> sin.address\n '@sin'\n >>> x = octave.quad(sin, 0, octave.pi())\n >>> x\n 2.0\n\n Notes\n -----\n Pointers can be passed to `feval` or dynamic functions as function arguments. A pointer passed as a nested value will be passed by value instead.\n\n Raises\n ------\n Oct2PyError\n If the variable does not exist in the Octave session or is of\n unknown type.\n\n Returns\n -------\n A variable, object, user class, or function pointer as appropriate.", "id": "f423:c0:m8"} {"signature": "def extract_figures(self, plot_dir, remove=False):", "body": "figures = self._engine.extract_figures(plot_dir, remove)return figures", "docstring": "Extract the figures in the directory to IPython display objects.\n\n Parameters\n ----------\n plot_dir: str\n The plot dir where the figures were created.\n remove: bool, optional.\n Whether to remove the plot directory after saving.", "id": "f423:c0:m9"} {"signature": "def feval(self, func_path, *func_args, **kwargs):", "body": "if not self._engine:raise Oct2PyError('')nout = kwargs.get('', None)if nout is None:nout = plot_dir = kwargs.get('')settings = dict(backend='' if plot_dir else self.backend,format=kwargs.get(''),name=kwargs.get(''),width=kwargs.get(''),height=kwargs.get(''),resolution=kwargs.get(''))self._engine.plot_settings = settingsdname = osp.dirname(func_path)fname = osp.basename(func_path)func_name, ext = osp.splitext(fname)if ext and not ext == '':raise TypeError('')if func_name == '':raise Oct2PyError('' +'')stream_handler = kwargs.get('')verbose = kwargs.get('', True)store_as = kwargs.get('', '')timeout = kwargs.get('', self.timeout)if not stream_handler:stream_handler = self.logger.info if verbose else self.logger.debugreturn self._feval(func_name, func_args, dname=dname, nout=nout,timeout=timeout, stream_handler=stream_handler,store_as=store_as, plot_dir=plot_dir)", "docstring": "Run a function in Octave and return the result.\n\n Parameters\n ----------\n func_path: str\n Name of function to run or a path to an m-file.\n func_args: object, optional\n Args to send to the function.\n nout: int, optional\n Desired number of return arguments, defaults to 1.\n store_as: str, optional\n If given, saves the result to the given Octave variable name\n instead of returning it.\n verbose : bool, optional\n Log Octave output at INFO level. If False, log at DEBUG level.\n stream_handler: callable, optional\n A function that is called for each line of output from the\n evaluation.\n timeout: float, optional\n The timeout in seconds for the call.\n plot_dir: str, optional\n If specificed, save the session's plot figures to the plot\n directory instead of displaying the plot window.\n plot_name : str, optional\n Saved plots will start with `plot_name` and\n end with \"_%%.xxx' where %% is the plot number and\n xxx is the `plot_format`.\n plot_format: str, optional\n The format in which to save the plot.\n plot_width: int, optional\n The plot with in pixels.\n plot_height: int, optional\n The plot height in pixels.\n\n Notes\n -----\n The function arguments passed follow Octave calling convention, not\n Python. That is, all values must be passed as a comma separated list,\n not using `x=foo` assignment.\n\n Examples\n --------\n >>> from oct2py import octave\n >>> cell = octave.feval('cell', 10, 10, 10)\n >>> cell.shape\n (10, 10, 10)\n\n >>> from oct2py import octave\n >>> x = octave.feval('linspace', 0, octave.pi() / 2)\n >>> x.shape\n (1, 100)\n\n >>> from oct2py import octave\n >>> x = octave.feval('svd', octave.hilb(3))\n >>> x\n array([[ 1.40831893],\n [ 0.12232707],\n [ 0.00268734]])\n >>> # specify three return values\n >>> (u, v, d) = octave.feval('svd', octave.hilb(3), nout=3)\n >>> u.shape\n (3, 3)\n\n Returns\n -------\n The Python value(s) returned by the Octave function call.", "id": "f423:c0:m10"} {"signature": "def eval(self, cmds, verbose=True, timeout=None, stream_handler=None,temp_dir=None, plot_dir=None, plot_name='', plot_format='',plot_width=None, plot_height=None, plot_res=None,nout=, **kwargs):", "body": "if isinstance(cmds, (str, unicode)):cmds = [cmds]prev_temp_dir = self.temp_dirself.temp_dir = temp_dir or self.temp_dirprev_log_level = self.logger.levelif kwargs.get('') is False:self.logger.setLevel(logging.WARN)for name in ['', '']:if name not in kwargs:continuemsg = ''warnings.warn(msg % name, stacklevel=)return_both = kwargs.pop('', False)lines = []if return_both and not stream_handler:stream_handler = lines.appendans = Nonefor cmd in cmds:resp = self.feval('', '', cmd,nout=nout, timeout=timeout,stream_handler=stream_handler,verbose=verbose, plot_dir=plot_dir,plot_name=plot_name, plot_format=plot_format,plot_width=plot_width, plot_height=plot_height,plot_res=plot_res)if resp is not None:ans = respself.temp_dir = prev_temp_dirself.logger.setLevel(prev_log_level)if return_both:return ''.join(lines), ansreturn ans", "docstring": "Evaluate an Octave command or commands.\n\nParameters\n----------\ncmds : str or list\n Commands(s) to pass to Octave.\nverbose : bool, optional\n Log Octave output at INFO level. If False, log at DEBUG level.\nstream_handler: callable, optional\n A function that is called for each line of output from the\n evaluation.\ntimeout : float, optional\n Time to wait for response from Octave (per line). If not given,\n the instance `timeout` is used.\nnout : int, optional.\n The desired number of returned values, defaults to 0. If nout\n is 0, the `ans` will be returned as the return value.\ntemp_dir: str, optional\n If specified, the session's MAT files will be created in the\n directory, otherwise a the instance `temp_dir` is used.\n a shared memory (tmpfs) path.\nplot_dir: str, optional\n If specificed, save the session's plot figures to the plot\n directory instead of displaying the plot window.\nplot_name : str, optional\n Saved plots will start with `plot_name` and\n end with \"_%%.xxx' where %% is the plot number and\n xxx is the `plot_format`.\nplot_format: str, optional\n The format in which to save the plot (PNG by default).\nplot_width: int, optional\n The plot with in pixels.\nplot_height: int, optional\n The plot height in pixels.\nplot_res: int, optional\n The plot resolution in pixels per inch.\n**kwargs Deprectated kwargs.\n\nExamples\n--------\n>>> from oct2py import octave\n>>> octave.eval('disp(\"hello\")') # doctest: +SKIP\nhello\n>>> x = octave.eval('round(quad(@sin, 0, pi/2));')\n>>> x\n1.0\n\n>>> a = octave.eval('disp(\"hello\");1;') # doctest: +SKIP\nhello\n>>> a = octave.eval('disp(\"hello\");1;', verbose=False)\n>>> a\n1.0\n\n>>> from oct2py import octave\n>>> lines = []\n>>> octave.eval('for i = 1:3; disp(i);end', \\\n stream_handler=lines.append)\n>>> lines # doctest: +SKIP\n[' 1', ' 2', ' 3']\n\nReturns\n-------\nout : object\n Octave \"ans\" variable, or None.\n\nNotes\n-----\nThe deprecated `log` kwarg will temporarily set the `logger` level to\n`WARN`. Using the `logger` settings directly is preferred.\nThe deprecated `return_both` kwarg will still work, but the preferred\nmethod is to use the `stream_handler`. If `stream_handler` is given,\nthe `return_both` kwarg will be honored but will give an empty string\nas the reponse.\n\nRaises\n------\nOct2PyError\n If the command(s) fail.", "id": "f423:c0:m11"} {"signature": "def restart(self):", "body": "if self._engine:self._engine.repl.terminate()executable = self._executableif executable:os.environ[''] = executableif '' not in os.environ and '' in os.environ:os.environ[''] = os.environ['']self._engine = OctaveEngine(stdin_handler=self._handle_stdin,logger=self.logger)self._engine.eval('' % HERE.replace(osp.sep, ''))", "docstring": "Restart an Octave session in a clean state", "id": "f423:c0:m12"} {"signature": "def _feval(self, func_name, func_args=(), dname='', nout=,timeout=None, stream_handler=None, store_as='', plot_dir=None):", "body": "engine = self._engineif engine is None:raise Oct2PyError('')out_file = osp.join(self.temp_dir, '')out_file = out_file.replace(osp.sep, '')in_file = osp.join(self.temp_dir, '')in_file = in_file.replace(osp.sep, '')func_args = list(func_args)ref_indices = []for (i, value) in enumerate(func_args):if isinstance(value, OctavePtr):ref_indices.append(i + )func_args[i] = value.addressref_indices = np.array(ref_indices)req = dict(func_name=func_name, func_args=tuple(func_args),dname=dname or '', nout=nout,store_as=store_as or '',ref_indices=ref_indices)write_file(req, out_file, oned_as=self._oned_as,convert_to_float=self.convert_to_float)engine.stream_handler = stream_handler or self.logger.infoif timeout is None:timeout = self.timeouttry:engine.eval('' % (out_file, in_file),timeout=timeout)except KeyboardInterrupt as e:stream_handler(engine.repl.interrupt())raiseexcept TIMEOUT:stream_handler(engine.repl.interrupt())raise Oct2PyError('')except EOF:stream_handler(engine.repl.child.before)self.restart()raise Oct2PyError('')resp = read_file(in_file, self)if resp['']:msg = self._parse_error(resp[''])raise Oct2PyError(msg)result = resp[''].ravel().tolist()if isinstance(result, list) and len(result) == :result = result[]if (isinstance(result, Cell) andresult.size == andisinstance(result[], string_types) andresult[] == ''):result = Noneif plot_dir:self._engine.make_figures(plot_dir)return result", "docstring": "Run the given function with the given args.", "id": "f423:c0:m13"} {"signature": "def _parse_error(self, err):", "body": "self.logger.debug(err)stack = err.get('', [])if not err[''].startswith(''):err[''] = '' + err['']errmsg = '' % err['']if not isinstance(stack, StructArray):return errmsgerrmsg += ''for item in stack[:-]:errmsg += '' % itemtry:errmsg += '' % itemexcept Exception:passreturn errmsg", "docstring": "Create a traceback for an Octave evaluation error.", "id": "f423:c0:m14"} {"signature": "def _handle_stdin(self, line):", "body": "return input(line.replace(STDIN_PROMPT, ''))", "docstring": "Handle a stdin request from the session.", "id": "f423:c0:m15"} {"signature": "def _get_doc(self, name):", "body": "doc = '' % nameengine = self._enginedoc = engine.eval('' % name, silent=True)if '' in doc.lower():raise Oct2PyError(doc)if '' in doc.lower():doc = engine.eval('' % name, silent=True)doc = ''.join(doc.splitlines()[:])default = self.feval.__doc__default = '' + default[default.find(''):]default = ''.join([line[:] for line in default.splitlines()])doc = ''.join(doc.splitlines())doc = '' + doc + '' + defaultdoc += ''doc += ''doc += ''doc += ''doc += ''doc += ''doc += ''return doc", "docstring": "Get the documentation of an Octave procedure or object.\n\nParameters\n----------\nname : str\n Function name to search for.\n\nReturns\n-------\nout : str\n Documentation string.\n\nRaises\n------\nOct2PyError\n If the procedure or object function has a syntax error.", "id": "f423:c0:m16"} {"signature": "def _exist(self, name):", "body": "cmd = '' % nameresp = self._engine.eval(cmd, silent=True).strip()exist = int(resp.split()[-])if exist == :msg = ''raise Oct2PyError(msg % name)return exist", "docstring": "Test whether a name exists and return the name code.\n\n Raises an error when the name does not exist.", "id": "f423:c0:m17"} {"signature": "def _isobject(self, name, exist):", "body": "if exist in [, ]:return Falsecmd = '' % nameresp = self._engine.eval(cmd, silent=True).strip()return resp == ''", "docstring": "Test whether the name is an object.", "id": "f423:c0:m18"} {"signature": "def _get_function_ptr(self, name):", "body": "func = _make_function_ptr_instanceself._function_ptrs.setdefault(name, func(self, name))return self._function_ptrs[name]", "docstring": "Get or create a function pointer of the given name.", "id": "f423:c0:m19"} {"signature": "def _get_user_class(self, name):", "body": "self._user_classes.setdefault(name, _make_user_class(self, name))return self._user_classes[name]", "docstring": "Get or create a user class of the given type.", "id": "f423:c0:m20"} {"signature": "def _cleanup(self):", "body": "self.exit()workspace = osp.join(os.getcwd(), '')if osp.exists(workspace):os.remove(workspace)", "docstring": "Clean up resources used by the session.", "id": "f423:c0:m21"} {"signature": "def __getattr__(self, attr):", "body": "if attr.startswith(''):return super(Oct2Py, self).__getattr__(attr)if attr[-] == \"\":name = attr[:-]else:name = attrif self._engine is None:raise Oct2PyError('')exist = self._exist(name)if exist not in [, , , ]:msg = ''raise Oct2PyError(msg % name)if name == '':raise Oct2PyError('' +'')if self._isobject(name, exist):obj = self._get_user_class(name)else:obj = self._get_function_ptr(name)setattr(self, attr, obj)return obj", "docstring": "Automatically creates a wapper to an Octave function or object.\n\n Adapted from the mlabwrap project.", "id": "f423:c0:m22"} {"signature": "def get_log(name=None):", "body": "if name is None:name = ''else:name = '' + namelog = logging.getLogger(name)log.setLevel(logging.INFO)return log", "docstring": "Return a console logger.\n\n Output may be sent to the logger using the `debug`, `info`, `warning`,\n `error` and `critical` methods.\n\n Parameters\n ----------\n name : str\n Name of the log.\n\n References\n ----------\n .. [1] Logging facility for Python,\n http://docs.python.org/library/logging.html", "id": "f424:m0"} {"signature": "def _setup_log():", "body": "try:handler = logging.StreamHandler(stream=sys.stdout)except TypeError: handler = logging.StreamHandler(strm=sys.stdout)log = get_log()log.addHandler(handler)log.setLevel(logging.INFO)log.propagate = False", "docstring": "Configure root logger.", "id": "f424:m1"} {"signature": "def speed_check():", "body": "test = SpeedCheck()test.run()", "docstring": "Checks the speed penalty of the Python to Octave bridge.\n\n Uses timeit to test the raw execution of a Octave command,\n Then tests progressively larger array passing.", "id": "f425:m0"} {"signature": "def __init__(self):", "body": "self.octave = Oct2Py()self.array = []", "docstring": "Create our Octave instance and initialize the data array", "id": "f425:c0:m0"} {"signature": "def raw_speed(self):", "body": "self.octave.eval(\"\")", "docstring": "Run a fast Octave command and see how long it takes.", "id": "f425:c0:m1"} {"signature": "def large_array_put(self):", "body": "self.octave.push('', self.array)", "docstring": "Create a large matrix and load it into the octave session.", "id": "f425:c0:m2"} {"signature": "def large_array_get(self):", "body": "self.octave.pull('')", "docstring": "Retrieve the large matrix from the octave session", "id": "f425:c0:m3"} {"signature": "def run(self):", "body": "print('')print('' * )time.sleep()print('')avg = timeit.timeit(self.raw_speed, number=) / print(''.format(avg * ))sides = [, , , ]runs = [, , , ]for (side, nruns) in zip(sides, runs):self.array = np.reshape(np.arange(side ** ), (-))print(''.format(side, side))avg = timeit.timeit(self.large_array_put, number=nruns) / nrunsprint(''.format(avg * ))print(''.format(side, side))avg = timeit.timeit(self.large_array_get, number=nruns) / nrunsprint(''.format(avg * ))self.octave.exit()print('' * )print('')", "docstring": "Perform the Oct2Py speed analysis.\n\n Uses timeit to test the raw execution of an Octave command,\n Then tests progressively larger array passing.", "id": "f425:c0:m4"} {"signature": "def read_file(path, session=None):", "body": "try:data = loadmat(path, struct_as_record=True)except UnicodeDecodeError as e:raise Oct2PyError(str(e))out = dict()for (key, value) in data.items():out[key] = _extract(value, session)return out", "docstring": "Read the data from the given file path.", "id": "f426:m0"} {"signature": "def write_file(obj, path, oned_as='', convert_to_float=True):", "body": "data = _encode(obj, convert_to_float)try:with _WRITE_LOCK:savemat(path, data, appendmat=False, oned_as=oned_as,long_field_names=True)except KeyError: raise Exception('')", "docstring": "Save a Python object to an Octave file on the given path.", "id": "f426:m1"} {"signature": "def _extract(data, session=None):", "body": "if isinstance(data, list):return [_extract(d, session) for d in data]if not isinstance(data, np.ndarray):return dataif isinstance(data, MatlabObject):cls = session._get_user_class(data.classname)return cls.from_value(data)if data.dtype.names:if data.size == :return _create_struct(data, session)return StructArray(data, session)if data.dtype.kind == '':return Cell(data, session)if data.size == :return data.item()if data.size == :if data.dtype.kind in '':return ''return []return data", "docstring": "Convert the Octave values to values suitable for Python.", "id": "f426:m2"} {"signature": "def _create_struct(data, session):", "body": "out = Struct()for name in data.dtype.names:item = data[name]if isinstance(item, np.ndarray) and item.dtype.kind == '':item = item.squeeze().tolist()out[name] = _extract(item, session)return out", "docstring": "Create a struct from session data.", "id": "f426:m3"} {"signature": "def _encode(data, convert_to_float):", "body": "ctf = convert_to_floatif isinstance(data, (OctaveVariablePtr)):return _encode(data.value, ctf)if isinstance(data, OctaveUserClass):return _encode(OctaveUserClass.to_value(data), ctf)if isinstance(data, (OctaveFunctionPtr, MatlabFunction)):raise Oct2PyError('')if isinstance(data, MatlabObject):view = data.view(np.ndarray)out = MatlabObject(data, data.classname)for name in out.dtype.names:out[name] = _encode(view[name], ctf)return outif isinstance(data, (DataFrame, Series)):return _encode(data.values, ctf)if isinstance(data, dict):out = dict()for (key, value) in data.items():out[key] = _encode(value, ctf)return outif data is None:return np.NaNif isinstance(data, set):return _encode(list(data), ctf)if isinstance(data, list):if _is_simple_numeric(data):return _encode(np.array(data), ctf)return _encode(tuple(data), ctf)if isinstance(data, tuple):obj = np.empty(len(data), dtype=object)for (i, item) in enumerate(data):obj[i] = _encode(item, ctf)return objif isinstance(data, spmatrix):return data.astype(np.float64)if not isinstance(data, np.ndarray):return dataif data.dtype.kind in '':out = np.empty(data.size, dtype=data.dtype)for (i, item) in enumerate(data.ravel()):if data.dtype.names:for name in data.dtype.names:out[i][name] = _encode(item[name], ctf)else:out[i] = _encode(item, ctf)return out.reshape(data.shape)if data.dtype.name == '':return data.astype(np.complex128)if ctf and data.dtype.kind in '':return data.astype(np.float64)return data", "docstring": "Convert the Python values to values suitable to send to Octave.", "id": "f426:m4"} {"signature": "def _is_simple_numeric(data):", "body": "for item in data:if isinstance(item, set):item = list(item)if isinstance(item, list):if not _is_simple_numeric(item):return Falseelif not isinstance(item, (int, float, complex)):return Falsereturn True", "docstring": "Test if a list contains simple numeric data.", "id": "f426:m5"} {"signature": "def __new__(cls, value, session=None):", "body": "value = np.asarray(value)if (value.shape[value.ndim - ] == ):value = value.squeeze(axis=value.ndim - )value = np.atleast_1d(value)if not session:return value.view(cls)obj = np.empty(value.size, dtype=value.dtype).view(cls)for (i, item) in enumerate(value.ravel()):for name in value.dtype.names:obj[i][name] = _extract(item[name], session)return obj.reshape(value.shape)", "docstring": "Create a struct array from a value and optional Octave session.", "id": "f426:c1:m0"} {"signature": "@propertydef fieldnames(self):", "body": "return self.dtype.names", "docstring": "The field names of the struct array.", "id": "f426:c1:m1"} {"signature": "def __getattribute__(self, attr):", "body": "attr = np.recarray.__getattribute__(self, attr)if isinstance(attr, np.ndarray) and attr.dtype.kind == '':return Cell(attr)return attr", "docstring": "Return object arrays as cells and all other values unchanged.", "id": "f426:c1:m2"} {"signature": "def __getitem__(self, item):", "body": "item = np.recarray.__getitem__(self, item)if isinstance(item, np.ndarray) and item.dtype.kind == '':return Cell(item)return item", "docstring": "Return object arrays as cells and all other values unchanged.", "id": "f426:c1:m3"} {"signature": "def __new__(cls, value, session=None):", "body": "value = np.asarray(value, dtype=object)if (value.shape[value.ndim - ] == ):value = value.squeeze(axis=value.ndim - )value = np.atleast_1d(value)if not session:return value.view(cls)obj = np.empty(value.size, dtype=object).view(cls)for (i, item) in enumerate(value.ravel()):obj[i] = _extract(item, session)return obj.reshape(value.shape)", "docstring": "Create a cell array from a value and optional Octave session.", "id": "f426:c2:m0"} {"signature": "@register.inclusion_tag(\"\", takes_context=True)def stored_messages_list(context, num_elements=):", "body": "if \"\" in context:user = context[\"\"]if user.is_authenticated():qs = Inbox.objects.select_related(\"\").filter(user=user)return {\"\": qs[:num_elements],\"\": qs.count(),}", "docstring": "Renders a list of unread stored messages for the current user", "id": "f444:m0"} {"signature": "@register.assignment_tag(takes_context=True)def stored_messages_count(context):", "body": "if \"\" in context:user = context[\"\"]if user.is_authenticated():return Inbox.objects.select_related(\"\").filter(user=user).count()", "docstring": "Renders a list of unread stored messages for the current user", "id": "f444:m1"} {"signature": "@register.inclusion_tag(\"\", takes_context=True)def stored_messages_archive(context, num_elements=):", "body": "if \"\" in context:user = context[\"\"]if user.is_authenticated():qs = MessageArchive.objects.select_related(\"\").filter(user=user)return {\"\": qs[:num_elements],\"\": qs.count(),}", "docstring": "Renders a list of archived messages for the current user", "id": "f444:m2"} {"signature": "@login_required@api_view([''])def mark_all_read(request):", "body": "from .settings import stored_messages_settingsbackend = stored_messages_settings.STORAGE_BACKEND()backend.inbox_purge(request.user)return Response({\"\": \"\"})", "docstring": "Mark all messages as read (i.e. delete from inbox) for current logged in user", "id": "f447:m0"} {"signature": "@detail_route(methods=[''])def read(self, request, pk=None):", "body": "from .settings import stored_messages_settingsbackend = stored_messages_settings.STORAGE_BACKEND()try:backend.inbox_delete(request.user, pk)except MessageDoesNotExist as e:return Response(e.message, status='')return Response({'': ''})", "docstring": "Mark the message as read (i.e. delete from inbox)", "id": "f447:c0:m2"} {"signature": "def _get(self, *args, **kwargs):", "body": "messages, all_retrieved = super(StorageMixin, self)._get(*args, **kwargs)if self.user.is_authenticated():inbox_messages = self.backend.inbox_list(self.user)else:inbox_messages = []return messages + inbox_messages, all_retrieved", "docstring": "Retrieve unread messages for current user, both from the inbox and\nfrom other storages", "id": "f450:c0:m1"} {"signature": "def add(self, level, message, extra_tags=''):", "body": "if not message:returnlevel = int(level)if level < self.level:returnif level not in stored_messages_settings.STORE_LEVELS or self.user.is_anonymous():return super(StorageMixin, self).add(level, message, extra_tags)self.added_new = Truem = self.backend.create_message(level, message, extra_tags)self.backend.archive_store([self.user], m)self._queued_messages.append(m)", "docstring": "If the message level was configured for being stored and request.user\nis not anonymous, save it to the database. Otherwise, let some other\nclass handle the message.\n\nNotice: controls like checking the message is not empty and the level\nis above the filter need to be performed here, but it could happen\nthey'll be performed again later if the message does not need to be\nstored.", "id": "f450:c0:m2"} {"signature": "def _store(self, messages, response, *args, **kwargs):", "body": "contrib_messages = []if self.user.is_authenticated():if not messages:self.backend.inbox_purge(self.user)else:for m in messages:try:self.backend.inbox_store([self.user], m)except MessageTypeNotSupported:contrib_messages.append(m)super(StorageMixin, self)._store(contrib_messages, response, *args, **kwargs)", "docstring": "persistent messages are already in the database inside the 'archive',\nso we can say they're already \"stored\".\nHere we put them in the inbox, or remove from the inbox in case the\nmessages were iterated.\n\nmessages contains only new msgs if self.used==True\nelse contains both new and unread messages", "id": "f450:c0:m3"} {"signature": "def _prepare_messages(self, messages):", "body": "for message in messages:if not self.backend.can_handle(message):message._prepare()", "docstring": "Like the base class method, prepares a list of messages for storage\nbut avoid to do this for `models.Message` instances.", "id": "f450:c0:m4"} {"signature": "def perform_import(val, setting_name):", "body": "if isinstance(val, six.string_types):return import_from_string(val, setting_name)elif isinstance(val, (list, tuple)):return [import_from_string(item, setting_name) for item in val]return val", "docstring": "If the given setting is a string import notation,\nthen perform the necessary import or imports.", "id": "f453:m0"} {"signature": "def import_from_string(val, setting_name):", "body": "try:parts = val.split('')module_path, class_name = ''.join(parts[:-]), parts[-]module = importlib.import_module(module_path)return getattr(module, class_name)except ImportError as e:msg = \"\" % (val, setting_name,e.__class__.__name__, e)raise ImportError(msg)", "docstring": "Attempt to import a class from a string representation.", "id": "f453:m1"} {"signature": "def _toJSON(self, msg_instance):", "body": "return json.dumps(msg_instance._asdict(), cls=DjangoJSONEncoder)", "docstring": "Dump a Message instance into a JSON string", "id": "f456:c0:m2"} {"signature": "def _fromJSON(self, json_msg):", "body": "return Message(**json.loads(force_text(json_msg)))", "docstring": "Return a Message instance built from data contained in a JSON string", "id": "f456:c0:m3"} {"signature": "def _list_key(self, key):", "body": "ret = []for msg_json in self.client.lrange(key, , -):ret.append(self._fromJSON(msg_json))return ret", "docstring": "boilerplate", "id": "f456:c0:m4"} {"signature": "def create_message(self, level, msg_text, extra_tags='', date=None, url=None):", "body": "if not date:now = timezone.now()else:now = dater = now.isoformat()if now.microsecond:r = r[:] + r[:]if r.endswith(''):r = r[:-] + ''fingerprint = r + msg_textmsg_id = hashlib.sha256(fingerprint.encode('', '')).hexdigest()return Message(id=msg_id, message=msg_text, level=level, tags=extra_tags, date=r, url=url)", "docstring": "Message instances are namedtuples of type `Message`.\nThe date field is already serialized in datetime.isoformat ECMA-262 format", "id": "f456:c0:m6"} {"signature": "def create_message(self, level, msg_text, extra_tags, date=None):", "body": "raise NotImplementedError()", "docstring": "Create and return a `Message` instance.\nInstance types depend on backends implementation.\n\nParams:\n `level`: message level (see django.contrib.messages)\n `msg_text`: what you think it is\n `extra_tags`: see django.contrib.messages\n `date`: a DateTime (optional)\n\nReturn:\n `Message` instance", "id": "f457:c0:m0"} {"signature": "def inbox_list(self, user):", "body": "raise NotImplementedError()", "docstring": "Retrieve all the messages in `user`'s Inbox.\n\nParams:\n `user`: Django User instance\n\nReturn:\n An iterable containing `Message` instances", "id": "f457:c0:m1"} {"signature": "def inbox_purge(self, user):", "body": "raise NotImplementedError()", "docstring": "Delete all the messages in `user`'s Inbox.\n\nParams:\n user: Django User instance\n\nReturn:\n None", "id": "f457:c0:m2"} {"signature": "def inbox_store(self, users, msg_instance):", "body": "raise NotImplementedError()", "docstring": "Store a `Message` instance in the inbox for a list\nof users.\n\nParams:\n users: a list or iterable containing Django User instances\n msg_instance: Message instance to persist in inbox\n\nReturn:\n None\n\nRaise:\n MessageTypeNotSupported if `msg_instance` cannot be managed by current backend", "id": "f457:c0:m3"} {"signature": "def inbox_delete(self, user, msg_id):", "body": "raise NotImplementedError()", "docstring": "Remove a `Message` instance from `user`'s inbox.\n\nParams:\n user: Django User instance\n msg_id: Message identifier\n\nReturn:\n None\n\nRaise:\n MessageDoesNotExist if msg_id was not found", "id": "f457:c0:m4"} {"signature": "def inbox_get(self, user, msg_id):", "body": "", "docstring": "Retrieve a `Message` instance from `user`'s inbox.\n\nParams:\n user: Django User instance\n msg_id: Message identifier\n\nReturn:\n A `Message` instance\n\nRaise:\n MessageDoesNotExist if msg_id was not found", "id": "f457:c0:m5"} {"signature": "def archive_store(self, users, msg_instance):", "body": "raise NotImplementedError()", "docstring": "Store a `Message` instance in the archive for a list\nof users.\n\nParams:\n users: a list or iterable containing Django User instances\n msg_instance: Message instance to persist in archive\n\nReturn:\n None\n\nRaise:\n MessageTypeNotSupported if `msg_instance` cannot be managed by current backend", "id": "f457:c0:m6"} {"signature": "def archive_list(self, user):", "body": "raise NotImplementedError()", "docstring": "Retrieve all the messages in `user`'s archive.\n\nParams:\n user: Django User instance\n\nReturn:\n An iterable containing `Message` instances", "id": "f457:c0:m7"} {"signature": "def can_handle(self, msg_instance):", "body": "raise NotImplementedError()", "docstring": "Determine if this backend can handle messages\nof the same type of `msg_instance`.\n\nParams:\n `msg_instance`: `Message` instance\n\nReturn:\n True if type is correct, False otherwise", "id": "f457:c0:m8"} {"signature": "def expired_messages_cleanup(self):", "body": "raise NotImplementedError()", "docstring": "Remove messages that have been expired.\n\nParams:\n None\n\nReturn:\n None", "id": "f457:c0:m9"} {"signature": "def _flush(self):", "body": "raise NotImplementedError()", "docstring": "Clear all backend data.\nWarning: heavily destructive! Here for convenience, not used by the API anyway.\n\nParams:\n None\n\nReturn:\n None", "id": "f457:c0:m10"} {"signature": "def add_message_for(users, level, message_text, extra_tags='', date=None, url=None, fail_silently=False):", "body": "BackendClass = stored_messages_settings.STORAGE_BACKENDbackend = BackendClass()m = backend.create_message(level, message_text, extra_tags, date, url)backend.archive_store(users, m)backend.inbox_store(users, m)", "docstring": "Send a message to a list of users without passing through `django.contrib.messages`\n\n:param users: an iterable containing the recipients of the messages\n:param level: message level\n:param message_text: the string containing the message\n:param extra_tags: like the Django api, a string containing extra tags for the message\n:param date: a date, different than the default timezone.now\n:param url: an optional url\n:param fail_silently: not used at the moment", "id": "f463:m0"} {"signature": "def broadcast_message(level, message_text, extra_tags='', date=None, url=None, fail_silently=False):", "body": "from django.contrib.auth import get_user_modelusers = get_user_model().objects.all()add_message_for(users, level, message_text, extra_tags=extra_tags, date=date, url=url, fail_silently=fail_silently)", "docstring": "Send a message to all users aka broadcast.\n\n:param level: message level\n:param message_text: the string containing the message\n:param extra_tags: like the Django api, a string containing extra tags for the message\n:param date: a date, different than the default timezone.now\n:param url: an optional url\n:param fail_silently: not used at the moment", "id": "f463:m1"} {"signature": "def mark_read(user, message):", "body": "BackendClass = stored_messages_settings.STORAGE_BACKENDbackend = BackendClass()backend.inbox_delete(user, message)", "docstring": "Mark message instance as read for user.\nReturns True if the message was `unread` and thus actually marked as `read` or False in case\nit is already `read` or it does not exist at all.\n\n:param user: user instance for the recipient\n:param message: a Message instance to mark as read", "id": "f463:m2"} {"signature": "def mark_all_read(user):", "body": "BackendClass = stored_messages_settings.STORAGE_BACKENDbackend = BackendClass()backend.inbox_purge(user)", "docstring": "Mark all message instances for a user as read.\n\n:param user: user instance for the recipient", "id": "f463:m3"} {"signature": "def get_version(package):", "body": "init_py = open(os.path.join(package, '')).read()return re.search(\"\", init_py).group()", "docstring": "Return package version as listed in `__version__` in `init.py`.", "id": "f465:m0"} {"signature": "def get_version(package):", "body": "init_py = open(os.path.join(package, '')).read()return re.search(\"\", init_py).group()", "docstring": "Return package version as listed in `__version__` in `init.py`.", "id": "f466:m0"} {"signature": "@login_requireddef notice_settings(request):", "body": "notice_types = NoticeType.objects.all()settings_table = []for notice_type in notice_types:settings_row = []for medium_id, medium_display in NOTICE_MEDIA:form_label = \"\" % (notice_type.label, medium_id)setting = NoticeSetting.for_user(request.user, notice_type, medium_id)if request.method == \"\":if request.POST.get(form_label) == \"\":if not setting.send:setting.send = Truesetting.save()else:if setting.send:setting.send = Falsesetting.save()settings_row.append((form_label, setting.send))settings_table.append({\"\": notice_type, \"\": settings_row})if request.method == \"\":next_page = request.POST.get(\"\", \"\")return HttpResponseRedirect(next_page)settings = {\"\": [medium_display for medium_id, medium_display in NOTICE_MEDIA],\"\": settings_table,}return render_to_response(\"\", {\"\": notice_types,\"\": settings,}, context_instance=RequestContext(request))", "docstring": "The notice settings view.\n\nTemplate: :template:`notification/notice_settings.html`\n\nContext:\n\n notice_types\n A list of all :model:`notification.NoticeType` objects.\n\n notice_settings\n A dictionary containing ``column_headers`` for each ``NOTICE_MEDIA``\n and ``rows`` containing a list of dictionaries: ``notice_type``, a\n :model:`notification.NoticeType` object and ``cells``, a list of\n tuples whose first value is suitable for use in forms and the second\n value is ``True`` or ``False`` depending on a ``request.POST``\n variable called ``form_label``, whose valid value is ``on``.", "id": "f476:m0"} {"signature": "def can_send(self, user, notice_type):", "body": "from notification.models import NoticeSettingreturn NoticeSetting.for_user(user, notice_type, self.medium_id).send", "docstring": "Determines whether this backend is allowed to send a notification to\nthe given user and notice_type.", "id": "f480:c0:m1"} {"signature": "def deliver(self, recipient, sender, notice_type, extra_context):", "body": "raise NotImplementedError()", "docstring": "Deliver a notification to the given recipient.", "id": "f480:c0:m2"} {"signature": "def get_formatted_messages(self, formats, label, context):", "body": "format_templates = {}for fmt in formats:if fmt.endswith(\"\"):context.autoescape = Falseformat_templates[fmt] = render_to_string((\"\" % (label, fmt),\"\" % fmt), context_instance=context)return format_templates", "docstring": "Returns a dictionary with the format identifier as the key. The values are\nare fully rendered templates with the given context.", "id": "f480:c0:m3"} {"signature": "def __init__(self, path, threaded=True):", "body": "self.path = pathself.lock_file = os.path.abspath(path) + \"\"self.hostname = socket.gethostname()self.pid = os.getpid()if threaded:name = threading.current_thread().get_name()tname = \"\" % quote(name, safe=\"\")else:tname = \"\"dirname = os.path.dirname(self.lock_file)self.unique_name = os.path.join(dirname,\"\" % (self.hostname,tname,self.pid))", "docstring": ">>> lock = LockBase(\"somefile\")\n>>> lock = LockBase(\"somefile\", threaded=False)", "id": "f484:c8:m0"} {"signature": "def acquire(self, timeout=None):", "body": "raise NotImplementedError(\"\")", "docstring": "Acquire the lock.\n\n* If timeout is omitted (or None), wait forever trying to lock the\n file.\n\n* If timeout > 0, try to acquire the lock for that many seconds. If\n the lock period expires and the file is still locked, raise\n LockTimeout.\n\n* If timeout <= 0, raise AlreadyLocked immediately if the file is\n already locked.", "id": "f484:c8:m1"} {"signature": "def release(self):", "body": "raise NotImplementedError(\"\")", "docstring": "Release the lock.\n\nIf the file is not locked, raise NotLocked.", "id": "f484:c8:m2"} {"signature": "def is_locked(self):", "body": "raise NotImplementedError(\"\")", "docstring": "Tell whether or not the file is locked.", "id": "f484:c8:m3"} {"signature": "def i_am_locking(self):", "body": "raise NotImplementedError(\"\")", "docstring": "Return True if this object is locking the file.", "id": "f484:c8:m4"} {"signature": "def break_lock(self):", "body": "raise NotImplementedError(\"\")", "docstring": "Remove a lock. Useful if a locking thread failed to unlock.", "id": "f484:c8:m5"} {"signature": "def __enter__(self):", "body": "self.acquire()return self", "docstring": "Context manager support.", "id": "f484:c8:m6"} {"signature": "def __exit__(self, *_exc):", "body": "self.release()", "docstring": "Context manager support.", "id": "f484:c8:m7"} {"signature": "def __init__(self, path, threaded=True):", "body": "LockBase.__init__(self, path, threaded)if threaded:tname = \"\" % get_ident()else:tname = \"\"self.unique_name = os.path.join(self.lock_file,\"\".format(self.hostname, tname, self.pid))", "docstring": ">>> lock = MkdirFileLock(\"somefile\")\n>>> lock = MkdirFileLock(\"somefile\", threaded=False)", "id": "f484:c10:m0"} {"signature": "def get_notification_language(user):", "body": "if getattr(settings, \"\", False):try:app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split(\"\")model = models.get_model(app_label, model_name)language_model = model._default_manager.get(user__id__exact=user.id)if hasattr(language_model, \"\"):return language_model.languageexcept (ImportError, ImproperlyConfigured, model.DoesNotExist):raise LanguageStoreNotAvailableraise LanguageStoreNotAvailable", "docstring": "Returns site-specific notification language for this user. Raises\nLanguageStoreNotAvailable if this site does not use translated\nnotifications.", "id": "f485:m1"} {"signature": "def send_now(users, label, extra_context=None, sender=None):", "body": "sent = Falseif extra_context is None:extra_context = {}notice_type = NoticeType.objects.get(label=label)current_language = get_language()for user in users:try:language = get_notification_language(user)except LanguageStoreNotAvailable:language = Noneif language is not None:activate(language)for backend in NOTIFICATION_BACKENDS.values():if backend.can_send(user, notice_type):backend.deliver(user, sender, notice_type, extra_context)sent = Trueactivate(current_language)return sent", "docstring": "Creates a new notice.\n\nThis is intended to be how other apps create new notices.\n\nnotification.send(user, \"friends_invite_sent\", {\n \"spam\": \"eggs\",\n \"foo\": \"bar\",\n)", "id": "f485:m2"} {"signature": "def send(*args, **kwargs):", "body": "queue_flag = kwargs.pop(\"\", False)now_flag = kwargs.pop(\"\", False)assert not (queue_flag and now_flag), \"\"if queue_flag:return queue(*args, **kwargs)elif now_flag:return send_now(*args, **kwargs)else:if QUEUE_ALL:return queue(*args, **kwargs)else:return send_now(*args, **kwargs)", "docstring": "A basic interface around both queue and send_now. This honors a global\nflag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should\nbe queued or not. A per call ``queue`` or ``now`` keyword argument can be\nused to always override the default global behavior.", "id": "f485:m3"} {"signature": "def queue(users, label, extra_context=None, sender=None):", "body": "if extra_context is None:extra_context = {}if isinstance(users, QuerySet):users = [row[\"\"] for row in users.values(\"\")]else:users = [user.pk for user in users]notices = []for user in users:notices.append((user, label, extra_context, sender))NoticeQueueBatch(pickled_data=base64.b64encode(pickle.dumps(notices))).save()", "docstring": "Queue the notification in NoticeQueueBatch. This allows for large amounts\nof user notifications to be deferred to a seperate process running outside\nthe webserver.", "id": "f485:m4"} {"signature": "@classmethoddef create(cls, label, display, description, default=, verbosity=):", "body": "try:notice_type = cls._default_manager.get(label=label)updated = Falseif display != notice_type.display:notice_type.display = displayupdated = Trueif description != notice_type.description:notice_type.description = descriptionupdated = Trueif default != notice_type.default:notice_type.default = defaultupdated = Trueif updated:notice_type.save()if verbosity > :print(\"\" % label)except cls.DoesNotExist:cls(label=label, display=display, description=description, default=default).save()if verbosity > :print(\"\" % label)", "docstring": "Creates a new NoticeType.\n\nThis is intended to be used by other apps as a post_syncdb manangement step.", "id": "f485:c1:m1"} {"signature": "@not_implemented_for('')def divrank(G, alpha=, d=, personalization=None,max_iter=, tol=, nstart=None, weight='',dangling=None):", "body": "if len(G) == :return {}if not G.is_directed():D = G.to_directed()else:D = GW = nx.stochastic_graph(D, weight=weight)N = W.number_of_nodes()for n in W.nodes_iter():for n_ in W.nodes_iter():if n != n_ :if n_ in W[n]:W[n][n_][weight] *= alphaelse:if n_ not in W[n]:W.add_edge(n, n_)W[n][n_][weight] = - alphaif nstart is None:x = dict.fromkeys(W, / N)else:s = float(sum(nstart.values()))x = dict((k, v / s) for k, v in list(nstart.items()))if personalization is None:p = dict.fromkeys(W, / N)else:missing = set(G) - set(personalization)if missing:raise NetworkXError('''''' % missing)s = float(sum(personalization.values()))p = dict((k, v / s) for k, v in list(personalization.items()))if dangling is None:dangling_weights = pelse:missing = set(G) - set(dangling)if missing:raise NetworkXError('''''' % missing)s = float(sum(dangling.values()))dangling_weights = dict((k, v/s) for k, v in list(dangling.items()))dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == ]for _ in range(max_iter):xlast = xx = dict.fromkeys(list(xlast.keys()), )danglesum = d * sum(xlast[n] for n in dangling_nodes)for n in x:D_t = sum(W[n][nbr][weight] * xlast[nbr] for nbr in W[n])for nbr in W[n]:x[nbr] += (d * (W[n][nbr][weight] * xlast[nbr] / D_t) * xlast[n])x[n] += danglesum * dangling_weights[n] + ( - d) * p[n]err = sum([abs(x[n] - xlast[n]) for n in x])if err < N*tol:return xraise NetworkXError('''' % max_iter)", "docstring": "Returns the DivRank (Diverse Rank) of the nodes in the graph.\nThis code is based on networkx.pagerank.\n\nArgs: (diff from pagerank)\n alpha: controls strength of self-link [0.0-1.0]\n d: the damping factor\n\nReference:\n Qiaozhu Mei and Jian Guo and Dragomir Radev,\n DivRank: the Interplay of Prestige and Diversity in Information Networks,\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.174.7982", "id": "f489:m0"} {"signature": "def divrank_scipy(G, alpha=, d=, personalization=None,max_iter=, tol=, nstart=None, weight='',dangling=None):", "body": "import scipy.sparseN = len(G)if N == :return {}nodelist = G.nodes()M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,dtype=float)S = scipy.array(M.sum(axis=)).flatten()S[S != ] = / S[S != ]Q = scipy.sparse.spdiags(S.T, , *M.shape, format='')M = Q * MM = scipy.sparse.lil_matrix(M)M.setdiag()M = alpha * MM.setdiag( - alpha)x = scipy.repeat( / N, N)if personalization is None:p = scipy.repeat( / N, N)else:missing = set(nodelist) - set(personalization)if missing:raise NetworkXError('''''' % missing)p = scipy.array([personalization[n] for n in nodelist],dtype=float)p = p / p.sum()if dangling is None:dangling_weights = pelse:missing = set(nodelist) - set(dangling)if missing:raise NetworkXError('''''' % missing)dangling_weights = scipy.array([dangling[n] for n in nodelist],dtype=float)dangling_weights /= dangling_weights.sum()is_dangling = scipy.where(S == )[]for _ in range(max_iter):xlast = xD_t = M * xx = (d * (x / D_t * M * x + sum(x[is_dangling]) * dangling_weights)+ ( - d) * p)err = scipy.absolute(x - xlast).sum()if err < N * tol:return dict(list(zip(nodelist, list(map(float, x)))))raise NetworkXError('''' % max_iter)", "docstring": "Returns the DivRank (Diverse Rank) of the nodes in the graph.\nThis code is based on networkx.pagerank_scipy", "id": "f489:m1"} {"signature": "def lexrank(sentences, continuous=False, sim_threshold=, alpha=,use_divrank=False, divrank_alpha=):", "body": "ranker_params = {'': }if use_divrank:ranker = divrank_scipyranker_params[''] = divrank_alpharanker_params[''] = alphaelse:ranker = networkx.pagerank_scipyranker_params[''] = alphagraph = networkx.DiGraph()sent_tf_list = []for sent in sentences:words = tools.word_segmenter_ja(sent)tf = collections.Counter(words)sent_tf_list.append(tf)sent_vectorizer = DictVectorizer(sparse=True)sent_vecs = sent_vectorizer.fit_transform(sent_tf_list)sim_mat = - pairwise_distances(sent_vecs, sent_vecs, metric='')if continuous:linked_rows, linked_cols = numpy.where(sim_mat > )else:linked_rows, linked_cols = numpy.where(sim_mat >= sim_threshold)graph.add_nodes_from(list(range(sent_vecs.shape[])))for i, j in zip(linked_rows, linked_cols):if i == j:continueweight = sim_mat[i,j] if continuous else graph.add_edge(i, j, {'': weight})scores = ranker(graph, **ranker_params)return scores, sim_mat", "docstring": "compute centrality score of sentences.\n\nArgs:\n sentences: [u'\u3053\u3093\u306b\u3061\u306f\uff0e', u'\u79c1\u306e\u540d\u524d\u306f\u98ef\u6cbc\u3067\u3059\uff0e', ... ]\n continuous: if True, apply continuous LexRank. (see reference)\n sim_threshold: if continuous is False and smilarity is greater or\n equal to sim_threshold, link the sentences.\n alpha: the damping factor of PageRank and DivRank\n divrank: if True, apply DivRank instead of PageRank\n divrank_alpha: strength of self-link [0.0-1.0]\n (it's not the damping factor, see divrank.py)\n\nReturns: tuple\n (\n {\n # sentence index -> score\n 0: 0.003,\n 1: 0.002,\n ...\n },\n similarity_matrix\n )\n\nReference:\n G\u00fcnes Erkan and Dragomir R. Radev.\n LexRank: graph-based lexical centrality as salience in text\n summarization. (section 3)\n http://www.cs.cmu.edu/afs/cs/project/jair/pub/volume22/erkan04a-html/erkan04a.html", "id": "f492:m0"} {"signature": "def summarize(text, sent_limit=None, char_limit=None, imp_require=None,debug=False, **lexrank_params):", "body": "debug_info = {}sentences = list(tools.sent_splitter_ja(text))scores, sim_mat = lexrank(sentences, **lexrank_params)sum_scores = sum(scores.values())acc_scores = indexes = set()num_sent, num_char = , for i in sorted(scores, key=lambda i: scores[i], reverse=True):num_sent += num_char += len(sentences[i])if sent_limit is not None and num_sent > sent_limit:breakif char_limit is not None and num_char > char_limit:breakif imp_require is not None and acc_scores / sum_scores >= imp_require:breakindexes.add(i)acc_scores += scores[i]if len(indexes) > :summary_sents = [sentences[i] for i in sorted(indexes)]else:summary_sents = sentencesif debug:debug_info.update({'': sentences, '': scores})return summary_sents, debug_info", "docstring": "Args:\n text: text to be summarized (unicode string)\n sent_limit: summary length (the number of sentences)\n char_limit: summary length (the number of characters)\n imp_require: cumulative LexRank score [0.0-1.0]\n\nReturns:\n list of extracted sentences", "id": "f492:m1"} {"signature": "def get_summarizer(self, name):", "body": "if name in self.summarizers:passelif name == '':from . import lexrankself.summarizers[name] = lexrank.summarizeelif name == '':from . import mcp_summself.summarizers[name] = mcp_summ.summarizereturn self.summarizers[name]", "docstring": "import summarizers on-demand", "id": "f493:c0:m1"} {"signature": "@cherrypy.exposedef summarize(self, text=None, algo='', **summarizer_params):", "body": "try: for param, value in list(summarizer_params.items()):if value == '':del summarizer_params[param]continueelif re.match(r'', value):value = float(value)elif re.match(r'', value):value = int(value)elif value == '':value = Trueelif value == '':value = Falsesummarizer_params[param] = valueif algo in ('', '', ''):summarizer = self.get_summarizer('')if algo == '':summarizer_params[''] = Trueif algo == '':summarizer_params[''] = Trueelif algo == '':summarizer = self.get_summarizer('')summary, debug_info = summarizer(text, **summarizer_params)except Exception as e:return json.dumps({'': str(e)}, ensure_ascii=False, indent=)else:res = json.dumps(tools.tree_encode({'': summary, '': debug_info}),ensure_ascii=False, indent=)return res", "docstring": "Args:\n text: text to be summarized\n algo: summarizaion algorithm\n - 'lexrank' (default) graph-based\n - 'clexrank' Continuous LexRank\n - 'divrank' DivRank (Diverse Rank)\n - 'mcp' select sentences in terms of maximum coverage problem\n\n summarizer_params examples:\n char_limit: summary length (the number of characters)\n sent_limit: (not supported with mcp)\n summary length (the number of sentences)\n imp_require: (lexrank only)\n cumulative LexRank score [0.0-1.0]", "id": "f493:c0:m2"} {"signature": "def sent_splitter_ja(text, delimiters=set(u''),parenthesis=u''):", "body": "paren_chars = set(parenthesis)close2open = dict(zip(parenthesis[::], parenthesis[::]))pstack = []buff = []for i, c in enumerate(text):c_next = text[i+] if i+ < len(text) else Noneif c in paren_chars:if c in close2open: if len(pstack) > and pstack[-] == close2open[c]:pstack.pop()else: pstack.append(c)buff.append(c)if c in delimiters:if len(pstack) == and c_next not in delimiters:yield ''.join(buff)buff = []if len(buff) > :yield ''.join(buff)", "docstring": "Args:\n text: unicode string that contains multiple Japanese sentences.\n delimiters: set() of sentence delimiter characters.\n parenthesis: to be checked its correspondence.\nReturns:\n generator that yields sentences.", "id": "f494:m1"} {"signature": "def summarize(text, char_limit, sentence_filter=None, debug=False):", "body": "debug_info = {}sents = list(tools.sent_splitter_ja(text))words_list = [w.encode('') for s in sents for w in tools.word_segmenter_ja(s)]tf = collections.Counter()for words in words_list:for w in words:tf[w] += if sentence_filter is not None:valid_indices = [i for i, s in enumerate(sents) if sentence_filter(s)]sents = [sents[i] for i in valid_indices]words_list = [words_list[i] for i in valid_indices]sent_ids = [str(i) for i in range(len(sents))] sent_id2len = dict((id_, len(s)) for id_, s in zip(sent_ids, sents)) word_contain = dict() for id_, words in zip(sent_ids, words_list):word_contain[id_] = collections.defaultdict(lambda: )for w in words:word_contain[id_][w] = prob = pulp.LpProblem('', pulp.LpMaximize)sent_vars = pulp.LpVariable.dicts('', sent_ids, , , pulp.LpBinary)word_vars = pulp.LpVariable.dicts('', list(tf.keys()), , , pulp.LpBinary)prob += pulp.lpSum([tf[w] * word_vars[w] for w in tf])prob += pulp.lpSum([sent_id2len[id_] * sent_vars[id_] for id_ in sent_ids]) <= char_limit, ''for w in tf:prob += pulp.lpSum([word_contain[id_][w] * sent_vars[id_] for id_ in sent_ids]) >= word_vars[w], ''.format(w)prob.solve()sent_indices = []for v in prob.variables():if v.name.startswith('') and v.varValue == :sent_indices.append(int(v.name.split('')[-]))return [sents[i] for i in sent_indices], debug_info", "docstring": "select sentences in terms of maximum coverage problem\n\nArgs:\n text: text to be summarized (unicode string)\n char_limit: summary length (the number of characters)\n\nReturns:\n list of extracted sentences\n\nReference:\n Hiroya Takamura, Manabu Okumura.\n Text summarization model based on maximum coverage problem and its\n variant. (section 3)\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.222.6945", "id": "f495:m0"} {"signature": "def __init__(self, i2c, device_address, *, debug=False):", "body": "while not i2c.try_lock():passtry:i2c.writeto(device_address, b'')except OSError:try:result = bytearray()i2c.readfrom_into(device_address, result)except OSError:raise ValueError(\"\" % device_address)finally:i2c.unlock()self.i2c = i2cself.device_address = device_addressself._debug = debug", "docstring": "Try to read a byte from an address,\nif you get an OSError it means the device is not there", "id": "f501:c0:m0"} {"signature": "def readinto(self, buf, **kwargs):", "body": "self.i2c.readfrom_into(self.device_address, buf, **kwargs)if self._debug:print(\"\", [hex(i) for i in buf])", "docstring": "Read into ``buf`` from the device. The number of bytes read will be the\nlength of ``buf``.\n\nIf ``start`` or ``end`` is provided, then the buffer will be sliced\nas if ``buf[start:end]``. This will not cause an allocation like\n``buf[start:end]`` will so it saves memory.\n\n:param bytearray buffer: buffer to write into\n:param int start: Index to start writing at\n:param int end: Index to write up to but not include", "id": "f501:c0:m1"} {"signature": "def write(self, buf, **kwargs):", "body": "self.i2c.writeto(self.device_address, buf, **kwargs)if self._debug:print(\"\", [hex(i) for i in buf])", "docstring": "Write the bytes from ``buffer`` to the device. Transmits a stop bit if\n``stop`` is set.\n\nIf ``start`` or ``end`` is provided, then the buffer will be sliced\nas if ``buffer[start:end]``. This will not cause an allocation like\n``buffer[start:end]`` will so it saves memory.\n\n:param bytearray buffer: buffer containing the bytes to write\n:param int start: Index to start writing from\n:param int end: Index to read up to but not include\n:param bool stop: If true, output an I2C stop condition after the buffer is written", "id": "f501:c0:m2"} {"signature": "def write_then_readinto(self, out_buffer, in_buffer, *,out_start=, out_end=None, in_start=, in_end=None, stop=True):", "body": "if out_end is None:out_end = len(out_buffer)if in_end is None:in_end = len(in_buffer)if hasattr(self.i2c, ''):if self._debug:print(\"\",[hex(i) for i in out_buffer[out_start:out_end]])self.i2c.writeto_then_readfrom(self.device_address, out_buffer, in_buffer,out_start=out_start, out_end=out_end,in_start=in_start, in_end=in_end, stop=stop)if self._debug:print(\"\",[hex(i) for i in in_buffer[in_start:in_end]])else:self.write(out_buffer, start=out_start, end=out_end, stop=stop)if self._debug:print(\"\",[hex(i) for i in out_buffer[out_start:out_end]])self.readinto(in_buffer, start=in_start, end=in_end)if self._debug:print(\"\",[hex(i) for i in in_buffer[in_start:in_end]])", "docstring": "Write the bytes from ``out_buffer`` to the device, then immediately\nreads into ``in_buffer`` from the device. The number of bytes read\nwill be the length of ``in_buffer``.\nTransmits a stop bit after the write, if ``stop`` is set.\n\nIf ``out_start`` or ``out_end`` is provided, then the output buffer\nwill be sliced as if ``out_buffer[out_start:out_end]``. This will\nnot cause an allocation like ``buffer[out_start:out_end]`` will so\nit saves memory.\n\nIf ``in_start`` or ``in_end`` is provided, then the input buffer\nwill be sliced as if ``in_buffer[in_start:in_end]``. This will not\ncause an allocation like ``in_buffer[in_start:in_end]`` will so\nit saves memory.\n\n:param bytearray out_buffer: buffer containing the bytes to write\n:param bytearray in_buffer: buffer containing the bytes to read into\n:param int out_start: Index to start writing from\n:param int out_end: Index to read up to but not include\n:param int in_start: Index to start writing at\n:param int in_end: Index to write up to but not include\n:param bool stop: If true, output an I2C stop condition after the buffer is written", "id": "f501:c0:m3"} {"signature": "def walk(knitting_pattern):", "body": "rows_before = {} free_rows = []walk = []for row in knitting_pattern.rows:rows_before_ = row.rows_before[:]if rows_before_:rows_before[row] = rows_before_else:free_rows.append(row)assert free_rowswhile free_rows:row = free_rows.pop()walk.append(row)assert row not in rows_beforefor freed_row in reversed(row.rows_after):todo = rows_before[freed_row]todo.remove(row)if not todo:del rows_before[freed_row]free_rows.insert(, freed_row)assert not rows_before, \"\"return walk", "docstring": "Walk the knitting pattern in a right-to-left fashion.\n\n :return: an iterable to walk the rows\n :rtype: list\n :param knittingpattern.KnittingPattern.KnittingPattern knitting_pattern: a\n knitting pattern to take the rows from", "id": "f505:m0"} {"signature": "def __init__(self):", "body": "self._items = OrderedDict()", "docstring": "Create a new :class:`IdCollection` with no arguments.\n\n You can add objects later using the method :meth:`append`.", "id": "f506:c0:m0"} {"signature": "def append(self, item):", "body": "self._items[item.id] = item", "docstring": "Add an object to the end of the :class:`IdCollection`.\n\n :param item: an object that has an id", "id": "f506:c0:m1"} {"signature": "def at(self, index):", "body": "keys = list(self._items.keys())key = keys[index]return self[key]", "docstring": "Get the object at an :paramref:`index`.\n\n :param int index: the index of the object\n :return: the object at :paramref:`index`", "id": "f506:c0:m2"} {"signature": "def __getitem__(self, id_):", "body": "return self._items[id_]", "docstring": "Get the object with the :paramref:`id`\n\n .. code:: python\n\n ic = IdCollection()\n ic.append(object_1)\n ic.append(object_2)\n assert ic[object_1.id] == object_1\n assert ic[object_2.id] == object_1\n\n :param id_: the id of an object\n :return: the object with the :paramref:`id`\n :raises KeyError: if no object with :paramref:`id` was found", "id": "f506:c0:m3"} {"signature": "def __bool__(self):", "body": "return bool(self._items)", "docstring": ":return: whether there is anything in the collection.\n :rtype: bool", "id": "f506:c0:m4"} {"signature": "def __iter__(self):", "body": "for id_ in self._items:yield self[id_]", "docstring": "allows you to iterate and use for-loops\n\n The objects in the iterator have the order in which they were appended.", "id": "f506:c0:m5"} {"signature": "def __len__(self):", "body": "return len(self._items)", "docstring": ":return: the number of objects in this collection", "id": "f506:c0:m6"} {"signature": "@propertydef first(self):", "body": "return self.at()", "docstring": "The first element in this collection.\n\n :return: the first element in this collection\n :raises IndexError: if this collection is empty", "id": "f506:c0:m7"} {"signature": "def identity(object_):", "body": "return object_", "docstring": ":return: the argument\n :param object_: the object to be returned", "id": "f507:m0"} {"signature": "def true(_):", "body": "return True", "docstring": ":return: :obj:`True`\n :param _: can be ignored", "id": "f507:m1"} {"signature": "def __init__(self, process=identity, chooses_path=true):", "body": "self._process = processself._chooses_path = chooses_path", "docstring": "Create a PathLoader object.\n\n :param process: ``process(path)`` is called with the `path` to load.\n The result of :paramref:`process` is returned to the caller. The\n default value is :func:`identity`, so the paths are returned when\n loaded.\n :param chooses_path: ``chooses_path(path)`` is called before\n :paramref:`process` and returns :obj:`True` or :obj:`False`\n depending on whether a specific path should be loaded and passed to\n :paramref:`process`.", "id": "f507:c0:m0"} {"signature": "def folder(self, folder):", "body": "result = []for root, _, files in os.walk(folder):for file in files:path = os.path.join(root, file)if self._chooses_path(path):result.append(self.path(path))return result", "docstring": "Load all files from a folder recursively.\n\n Depending on :meth:`chooses_path` some paths may not be loaded.\n Every loaded path is processed and returned part of the returned list.\n\n :param str folder: the folder to load the files from\n :rtype: list\n :return: a list of the results of the processing steps of the loaded\n files", "id": "f507:c0:m1"} {"signature": "def chooses_path(self, path):", "body": "return self._chooses_path(path)", "docstring": ":return: whether the path should be loaded\n :rtype: bool\n\n :param str path: the path to the file to be tested", "id": "f507:c0:m2"} {"signature": "def path(self, path):", "body": "return self._process(path)", "docstring": "load a :paramref:`path` and return the processed result\n\n :param str path: the path to the file to be processed\n :return: the result of processing step", "id": "f507:c0:m3"} {"signature": "def _relative_to_absolute(self, module_location, folder):", "body": "if os.path.isfile(module_location):path = os.path.dirname(module_location)elif os.path.isdir(module_location):path = module_locationelse:module_folder = os.path.dirname(module_location)if module_folder:path = module_folderelse:__import__(module_location)module = sys.modules[module_location]path = os.path.dirname(module.__file__)absolute_path = os.path.join(path, folder)return absolute_path", "docstring": ":return: the absolute path for the `folder` relative to\n the module_location.\n :rtype: str", "id": "f507:c0:m4"} {"signature": "def relative_folder(self, module, folder):", "body": "folder = self._relative_to_absolute(module, folder)return self.folder(folder)", "docstring": "Load a folder located relative to a module and return the processed\n result.\n\n :param str module: can be\n\n - a path to a folder\n - a path to a file\n - a module name\n\n :param str folder: the path of a folder relative to :paramref:`module`\n :return: a list of the results of the processing\n :rtype: list\n\n Depending on :meth:`chooses_path` some paths may not be loaded.\n Every loaded path is processed and returned part of the returned list.\n You can use :meth:`choose_paths` to find out which paths are chosen to\n load.", "id": "f507:c0:m5"} {"signature": "def relative_file(self, module, file):", "body": "path = self._relative_to_absolute(module, file)return self.path(path)", "docstring": "Load a file relative to a module.\n\n :param str module: can be\n\n - a path to a folder\n - a path to a file\n - a module name\n\n :param str folder: the path of a folder relative to :paramref:`module`\n :return: the result of the processing", "id": "f507:c0:m6"} {"signature": "def choose_paths(self, paths):", "body": "return [path for path in paths if self._chooses_path(path)]", "docstring": ":return: the paths that are chosen by :meth:`chooses_path`\n :rtype: list", "id": "f507:c0:m7"} {"signature": "def example(self, relative_path):", "body": "example_path = os.path.join(\"\", relative_path)return self.relative_file(__file__, example_path)", "docstring": "Load an example from the knitting pattern examples.\n\n :param str relative_path: the path to load\n :return: the result of the processing\n\n You can use :meth:`knittingpattern.Loader.PathLoader.examples`\n to find out the paths of all examples.", "id": "f507:c0:m8"} {"signature": "def examples(self):", "body": "return self.relative_folder(__file__, \"\")", "docstring": "Load all examples form the examples folder of this packge.\n\n :return: a list of processed examples\n :rtype: list\n\n Depending on :meth:`chooses_path` some paths may not be loaded.\n Every loaded path is processed and returned part of the returned list.", "id": "f507:c0:m9"} {"signature": "def string(self, string):", "body": "return self._process(string)", "docstring": ":return: the processed result of a string\n :param str string: the string to load the ocntent from", "id": "f507:c1:m0"} {"signature": "def file(self, file):", "body": "string = file.read()return self.string(string)", "docstring": ":return: the processed result of the content of a file-like object.\n\n :param file: the file-like object to load the content from.\n It should support the ``read`` method.", "id": "f507:c1:m1"} {"signature": "def path(self, path):", "body": "with open(path) as file:return self.file(file)", "docstring": ":return: the processed result of a :paramref:`path's ` content.\n :param str path: the path where to load the content from.\n It should exist on the local file system.", "id": "f507:c1:m2"} {"signature": "def url(self, url, encoding=\"\"):", "body": "import urllib.requestwith urllib.request.urlopen(url) as file:webpage_content = file.read()webpage_content = webpage_content.decode(encoding)return self.string(webpage_content)", "docstring": "load and process the content behind a url\n\n :return: the processed result of the :paramref:`url's ` content\n :param str url: the url to retrieve the content from\n :param str encoding: the encoding of the retrieved content.\n The default encoding is UTF-8.", "id": "f507:c1:m3"} {"signature": "def object(self, object_):", "body": "return self._process(object_)", "docstring": "Processes an already loaded object.\n\n :return: the result of the processing step\n :param object: the object to be loaded", "id": "f507:c2:m0"} {"signature": "def string(self, string):", "body": "object_ = json.loads(string)return self.object(object_)", "docstring": "Load an object from a string and return the processed JSON content\n\n :return: the result of the processing step\n :param str string: the string to load the JSON from", "id": "f507:c2:m1"} {"signature": "def __init__(self, id_, name, rows, parser):", "body": "self._id = id_self._name = nameself._rows = rowsself._parser = parser", "docstring": "Create a new instance.\n\n :param id_: the id of this pattern\n :param name: the human readable name of this pattern\n :param rows: a collection of rows of instructions\n :param knittingpattern.Parser.Parser parser: the parser to use to new\n content\n\n .. seealso:: :func:`knittingpattern.new_knitting_pattern`", "id": "f508:c0:m0"} {"signature": "@propertydef id(self):", "body": "return self._id", "docstring": "the identifier within a :class:`set of knitting patterns\n `", "id": "f508:c0:m1"} {"signature": "@propertydef name(self):", "body": "return self._name", "docstring": "a human readable name", "id": "f508:c0:m2"} {"signature": "@propertydef rows(self):", "body": "return self._rows", "docstring": "a collection of rows that this pattern is made of\n\n Usually this should be a\n :class:`knittingpattern.IdCollection.IdCollection` of\n :class:`knittingpattern.Row.Row`.", "id": "f508:c0:m3"} {"signature": "def add_row(self, id_):", "body": "row = self._parser.new_row(id_)self._rows.append(row)return row", "docstring": "Add a new row to the pattern.\n\n :param id_: the id of the row", "id": "f508:c0:m4"} {"signature": "def rows_in_knit_order(self):", "body": "return walk(self)", "docstring": "Return the rows in the order that they should be knit.\n\n :rtype: list\n :return: the :attr:`rows` in the order that they should be knit\n\n .. seealso:: :mod:`knittingpattern.walk`", "id": "f508:c0:m5"} {"signature": "@propertydef instruction_colors(self):", "body": "return unique([row.instruction_colorsfor row in self.rows_in_knit_order()])", "docstring": "The colors of the instructions.\n\n :return: the colors of the instructions listed in first appearance in\n knit order\n :rtype: list", "id": "f508:c0:m6"} {"signature": "def default_instructions():", "body": "global _default_instructionsif _default_instructions is None:_default_instructions = DefaultInstructions()return _default_instructions", "docstring": ":return: a default instruction library\n :rtype: DefaultInstructions\n\n .. warning:: The return value is mutable and you should not add new\n instructions to it. If you would like to add instructions to it,\n create a new\n :class:`~knittingpattern.InstructionLibrary.DefaultInstructions`\n instance.", "id": "f509:m0"} {"signature": "@propertydef _loader_class(self):", "body": "return JSONLoader", "docstring": ":return: the class for loading the specifications with\n :attr:`load`", "id": "f509:c0:m0"} {"signature": "@propertydef _instruction_class(self):", "body": "return Instruction", "docstring": ":return: the class for the specifications", "id": "f509:c0:m1"} {"signature": "def __init__(self):", "body": "self._type_to_instruction = {}", "docstring": "Create a new :class:`InstructionLibrary\n ` without\n arguments.\n\n Use :attr:`load` to load specifications.", "id": "f509:c0:m2"} {"signature": "@propertydef load(self):", "body": "return self._loader_class(self._process_loaded_object)", "docstring": ":return: a loader that can be used to load specifications\n :rtype: knittingpattern.Loader.JSONLoader\n\n A file to load is a list of instructions in JSON format.\n\n .. code:: json\n\n [\n {\n \"type\" : \"knit\",\n \"another\" : \"attribute\"\n },\n {\n \"type\" : \"purl\"\n }\n ]", "id": "f509:c0:m3"} {"signature": "def _process_loaded_object(self, obj):", "body": "for instruction in obj:self.add_instruction(instruction)return self", "docstring": "add the loaded instructions from :attr:`load`", "id": "f509:c0:m4"} {"signature": "def add_instruction(self, specification):", "body": "instruction = self.as_instruction(specification)self._type_to_instruction[instruction.type] = instruction", "docstring": "Add an instruction specification\n\n :param specification: a specification with a key\n :data:`knittingpattern.Instruction.TYPE`\n\n .. seealso:: :meth:`as_instruction`", "id": "f509:c0:m5"} {"signature": "def as_instruction(self, specification):", "body": "instruction = self._instruction_class(specification)type_ = instruction.typeif type_ in self._type_to_instruction:instruction.inherit_from(self._type_to_instruction[type_])return instruction", "docstring": "Convert the specification into an instruction\n\n :param specification: a specification with a key\n :data:`knittingpattern.Instruction.TYPE`\n\n The instruction is not added.\n\n .. seealso:: :meth:`add_instruction`", "id": "f509:c0:m6"} {"signature": "def __getitem__(self, instruction_type):", "body": "return self.as_instruction({TYPE: instruction_type})", "docstring": ":return: the specification for :paramref:`instruction_type`\n\n .. seealso:: :meth:`as_instruction`", "id": "f509:c0:m7"} {"signature": "@propertydef loaded_types(self):", "body": "return list(self._type_to_instruction)", "docstring": "The types loaded in this library.\n\n :return: a list of types, preferably as :class:`string `\n :rtype: list", "id": "f509:c0:m8"} {"signature": "def __init__(self):", "body": "super().__init__()self.load.relative_folder(__file__, self.INSTRUCTIONS_FOLDER)", "docstring": "Create the default instruction library without arguments.\n\n The default specifications are loaded automatically form this package.", "id": "f509:c1:m0"} {"signature": "@fixturedef a1(charlotte):", "body": "return charlotte.patterns[\"\"]", "docstring": ":return: the pattern ``\"A.1\"`` in charlotte", "id": "f512:m1"} {"signature": "@fixturedef a2(charlotte):", "body": "return charlotte.patterns[\"\"]", "docstring": ":return: the pattern ``\"A.2\"`` in charlotte", "id": "f512:m2"} {"signature": "@fixture(scope=\"\")def coloring_pattern():", "body": "patterns = load_from_relative_file(__name__, \"\")return patterns.patterns[\"\"]", "docstring": "The pattern with one colored line and a uncolored line.", "id": "f513:m0"} {"signature": "@fixturedef a1():", "body": "return _charlotte().patterns[\"\"]", "docstring": ":return: the pattern ``\"A.1\"`` in charlotte", "id": "f524:m0"} {"signature": "@fixturedef single_instruction_pattern_set():", "body": "return load_from_relative_file(HERE, \"\")", "docstring": "Load the pattern set with only one instruction.", "id": "f527:m0"} {"signature": "@fixturedef pattern(single_instruction_pattern_set):", "body": "return single_instruction_pattern_set.patterns[\"\"]", "docstring": "The pattern which has only one instruction.", "id": "f527:m1"} {"signature": "@fixturedef row(pattern):", "body": "return pattern.rows[]", "docstring": "The row with one instruction.", "id": "f527:m2"} {"signature": "@fixturedef row2(pattern):", "body": "return pattern.rows[]", "docstring": "The row with one instruction.", "id": "f527:m3"} {"signature": "@fixturedef instruction(row):", "body": "return row.instructions[]", "docstring": "The instruction.", "id": "f527:m4"} {"signature": "@fixturedef instruction2(row2):", "body": "return row2.instructions[]", "docstring": "The instruction.", "id": "f527:m5"} {"signature": "@fixturedef empty_row(row, instruction):", "body": "assert instructionrow.instructions.pop()return row", "docstring": "Now, there is no instruction any more.", "id": "f527:m6"} {"signature": "def new_knitting_pattern_set_loader(specification=DefaultSpecification()):", "body": "parser = specification.new_parser(specification)loader = specification.new_loader(parser.knitting_pattern_set)return loader", "docstring": "Create a loader for a knitting pattern set.\n\n :param specification: a :class:`specification\n `\n for the knitting pattern set, default\n :class:`DefaultSpecification`", "id": "f532:m0"} {"signature": "def __init__(self,new_loader=JSONLoader,new_parser=Parser,new_parsing_error=ParsingError,new_pattern_set=KnittingPatternSet,new_pattern_collection=IdCollection,new_row_collection=IdCollection,new_pattern=KnittingPattern,new_row=Row,new_default_instructions=DefaultInstructions,new_instruction_in_row=InstructionInRow):", "body": "self.new_loader = new_loaderself.new_parser = new_parserself.new_parsing_error = new_parsing_errorself.new_pattern_set = new_pattern_setself.new_pattern_collection = new_pattern_collectionself.new_row_collection = new_row_collectionself.new_pattern = new_patternself.new_row = new_rowself.new_default_instructions = new_default_instructionsself.new_instruction_in_row = new_instruction_in_row", "docstring": "Create a new parsing specification.", "id": "f532:c0:m0"} {"signature": "def __init__(self):", "body": "super().__init__()", "docstring": "Initialize the default specification with no arguments.", "id": "f532:c1:m0"} {"signature": "@classmethoddef __repr__(cls):", "body": "return \"\".format(cls.__module__, cls.__qualname__)", "docstring": "The string representation of the object.\n\n :return: the string representation\n :rtype: str", "id": "f532:c1:m1"} {"signature": "def load_from():", "body": "from .ParsingSpecification import new_knitting_pattern_set_loaderreturn new_knitting_pattern_set_loader()", "docstring": "Create a loader to load knitting patterns with.\n\n :return: the loader to load objects with\n :rtype: knittingpattern.Loader.JSONLoader\n\n Example:\n\n .. code:: python\n\n import knittingpattern, webbrowser\n k = knittingpattern.load_from().example(\"Cafe.json\")\n webbrowser.open(k.to_svg(25).temporary_path(\".svg\"))", "id": "f534:m0"} {"signature": "def load_from_object(object_):", "body": "return load_from().object(object_)", "docstring": "Load a knitting pattern from an object.\n\n :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet", "id": "f534:m1"} {"signature": "def load_from_string(string):", "body": "return load_from().string(string)", "docstring": "Load a knitting pattern from a string.\n\n :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet", "id": "f534:m2"} {"signature": "def load_from_file(file):", "body": "return load_from().file(file)", "docstring": "Load a knitting pattern from a file-like object.\n\n :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet", "id": "f534:m3"} {"signature": "def load_from_path(path):", "body": "return load_from().path(path)", "docstring": "Load a knitting pattern from a file behind located at `path`.\n\n :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet", "id": "f534:m4"} {"signature": "def load_from_url(url):", "body": "return load_from().url(url)", "docstring": "Load a knitting pattern from a url.\n\n :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet", "id": "f534:m5"} {"signature": "def load_from_relative_file(module, path_relative_to):", "body": "return load_from().relative_file(module, path_relative_to)", "docstring": "Load a knitting pattern from a path relative to a module.\n\n :param str module: can be a module's file, a module's name or\n a module's path.\n :param str path_relative_to: is the path relative to the modules location.\n The result is loaded from this.\n\n :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet", "id": "f534:m6"} {"signature": "def convert_from_image(colors=(\"\", \"\")):", "body": "from .convert.image_to_knittingpattern importconvert_image_to_knitting_patternreturn convert_image_to_knitting_pattern(colors=colors)", "docstring": "Convert and image to a knitting pattern.\n\n :return: a loader\n :rtype: knittingpattern.Loader.PathLoader\n :param tuple colors: the colors to convert to\n\n .. code:: python\n\n convert_from_image().path(\"pattern.png\").path(\"pattern.json\")\n convert_from_image().path(\"pattern.png\").knitting_pattern()\n\n .. seealso:: :mod:`knittingoattern.convert.image_to_knitting_pattern`", "id": "f534:m7"} {"signature": "def new_knitting_pattern(id_, name=None):", "body": "knitting_pattern_set = new_knitting_pattern_set()return knitting_pattern_set.add_new_pattern(id_, name)", "docstring": "Create a new knitting pattern.\n\n :return: a new empty knitting pattern.\n :param id_: the id of the knitting pattern\n :param name: the name of the knitting pattern or :obj:`None` if the\n :paramref:`id_` should be used\n :rtype: knittingpattern.KnittingPattern.KnittingPattern\n\n .. seealso:: :meth:`KnittingPatternSet.add_new_pattern()\n `", "id": "f534:m8"} {"signature": "def new_knitting_pattern_set():", "body": "return load_from_object(EMPTY_KNITTING_PATTERN_SET)", "docstring": "Create a new, empty knitting pattern set.\n\n :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet\n :return: a new, empty knitting pattern set", "id": "f534:m9"} {"signature": "def __init__(self, row_id, values, parser):", "body": "super().__init__(values)self._id = row_idself._instructions = ObservableList()self._instructions.register_observer(self._instructions_changed)self._parser = parser", "docstring": "Create a new row.\n\n :param row_id: an identifier for the row\n :param values: the values from the specification\n :param list inheriting_from: a list of specifications to inherit values\n from, see :class:`knittingpattern.Prototype.Prototype`\n\n .. note:: Seldomly, you need to create this row on your own. You can\n load it with the :mod:`knittingpattern` or the\n :class:`knittingpattern.Parser.Parser`.", "id": "f535:c0:m0"} {"signature": "def _instructions_changed(self, change):", "body": "if change.adds():for index, instruction in change.items():if isinstance(instruction, dict):in_row = self._parser.instruction_in_row(self, instruction)self.instructions[index] = in_rowelse:instruction.transfer_to_row(self)", "docstring": "Call when there is a change in the instructions.", "id": "f535:c0:m1"} {"signature": "@propertydef id(self):", "body": "return self._id", "docstring": "The id of the row.\n\n :return: the id of the row", "id": "f535:c0:m2"} {"signature": "@propertydef instructions(self):", "body": "return self._instructions", "docstring": "The instructions in this row.\n\n :return: a collection of :class:`instructions inside the row\n `\n :rtype: ObservableList.ObservableList", "id": "f535:c0:m3"} {"signature": "@propertydef number_of_produced_meshes(self):", "body": "return sum(instruction.number_of_produced_meshesfor instruction in self.instructions)", "docstring": "The number of meshes that this row produces.\n\n :return: the number of meshes that this row produces\n :rtype: int\n\n .. seealso::\n :meth:`Instruction.number_of_produced_meshes()\n `,\n :meth:`number_of_consumed_meshes`", "id": "f535:c0:m4"} {"signature": "@propertydef number_of_consumed_meshes(self):", "body": "return sum(instruction.number_of_consumed_meshesfor instruction in self.instructions)", "docstring": "The number of meshes that this row consumes.\n\n :return: the number of meshes that this row consumes\n :rtype: int\n\n .. seealso::\n :meth:`Instruction.number_of_consumed_meshes()\n `,\n :meth:`number_of_produced_meshes`", "id": "f535:c0:m5"} {"signature": "@propertydef produced_meshes(self):", "body": "return list(chain(*(instruction.produced_meshesfor instruction in self.instructions)))", "docstring": "The meshes that this row produces with its instructions.\n\n :return: a collection of :class:`meshes `\n that this instruction produces", "id": "f535:c0:m6"} {"signature": "@propertydef consumed_meshes(self):", "body": "return list(chain(*(instruction.consumed_meshesfor instruction in self.instructions)))", "docstring": "Same as :attr:`produced_meshes` but for consumed meshes.", "id": "f535:c0:m7"} {"signature": "def __repr__(self):", "body": "return \"\".format(self.__class__.__qualname__, self.id)", "docstring": "The string representation of this row.\n\n :return: a string representation of this row\n :rtype: str", "id": "f535:c0:m8"} {"signature": "@propertydef color(self):", "body": "return self.get(COLOR)", "docstring": "The color of the row.\n\n :return: the color of the row as specified or :obj:`None`", "id": "f535:c0:m9"} {"signature": "@propertydef instruction_colors(self):", "body": "return unique(instruction.colors for instruction in self.instructions)", "docstring": "The colors of the instructions in the row in the order tehy appear.\n\n :return: a list of colors of the knitting pattern in the order that\n they appear in\n :rtype: list", "id": "f535:c0:m10"} {"signature": "@propertydef last_produced_mesh(self):", "body": "for instruction in reversed(self.instructions):if instruction.produces_meshes():return instruction.last_produced_meshraise IndexError(\"\".format(self))", "docstring": "The last produced mesh.\n\n :return: the last produced mesh\n :rtype: knittingpattern.Mesh.Mesh\n :raises IndexError: if no mesh is produced\n\n .. seealso:: :attr:`number_of_produced_meshes`", "id": "f535:c0:m11"} {"signature": "@propertydef last_consumed_mesh(self):", "body": "for instruction in reversed(self.instructions):if instruction.consumes_meshes():return instruction.last_consumed_meshraise IndexError(\"\".format(self))", "docstring": "The last consumed mesh.\n\n :return: the last consumed mesh\n :rtype: knittingpattern.Mesh.Mesh\n :raises IndexError: if no mesh is consumed\n\n .. seealso:: :attr:`number_of_consumed_meshes`", "id": "f535:c0:m12"} {"signature": "@propertydef first_produced_mesh(self):", "body": "for instruction in self.instructions:if instruction.produces_meshes():return instruction.first_produced_meshraise IndexError(\"\".format(self))", "docstring": "The first produced mesh.\n\n :return: the first produced mesh\n :rtype: knittingpattern.Mesh.Mesh\n :raises IndexError: if no mesh is produced\n\n .. seealso:: :attr:`number_of_produced_meshes`", "id": "f535:c0:m13"} {"signature": "@propertydef first_consumed_mesh(self):", "body": "for instruction in self.instructions:if instruction.consumes_meshes():return instruction.first_consumed_meshraise IndexError(\"\".format(self))", "docstring": "The first consumed mesh.\n\n :return: the first consumed mesh\n :rtype: knittingpattern.Mesh.Mesh\n :raises IndexError: if no mesh is consumed\n\n .. seealso:: :attr:`number_of_consumed_meshes`", "id": "f535:c0:m14"} {"signature": "@propertydef rows_before(self):", "body": "rows_before = []for mesh in self.consumed_meshes:if mesh.is_produced():row = mesh.producing_rowif rows_before not in rows_before:rows_before.append(row)return rows_before", "docstring": "The rows that produce meshes for this row.\n\n :rtype: list\n :return: a list of rows that produce meshes for this row. Each row\n occurs only once. They are sorted by the first occurrence in the\n instructions.", "id": "f535:c0:m15"} {"signature": "@propertydef rows_after(self):", "body": "rows_after = []for mesh in self.produced_meshes:if mesh.is_consumed():row = mesh.consuming_rowif rows_after not in rows_after:rows_after.append(row)return rows_after", "docstring": "The rows that consume meshes from this row.\n\n :rtype: list\n :return: a list of rows that consume meshes from this row. Each row\n occurs only once. They are sorted by the first occurrence in the\n instructions.", "id": "f535:c0:m16"} {"signature": "@propertydef first_instruction(self):", "body": "return self.instructions[]", "docstring": "The first instruction of the rows instructions.\n\n :rtype: knittingpattern.Instruction.InstructionInRow\n :return: the first instruction in this row's :attr:`instructions`", "id": "f535:c0:m17"} {"signature": "@propertydef last_instruction(self):", "body": "return self.instructions[-]", "docstring": "The last instruction of the rows instructions.\n\n :rtype: knittingpattern.Instruction.InstructionInRow\n :return: the last instruction in this row's :attr:`instructions`", "id": "f535:c0:m18"} {"signature": "def __init__(self, knittingpattern, layout, instruction_to_svg, builder,zoom):", "body": "self._knittingpattern = knittingpatternself._layout = layoutself._instruction_to_svg = instruction_to_svgself._builder = builderself._zoom = zoomself._instruction_type_color_to_symbol = OrderedDict()self._symbol_id_to_scale = {}", "docstring": ":param knittingpattern.KnittingPattern.KnittingPattern knittingpattern:\n a knitting pattern\n:param knittingpattern.convert.Layout.GridLayout layout:\n:param instruction_to_svg: an\n :class:`~knittingpattern.convert.InstructionToSVG.InstructionToSVG`\n :class:`\n ~knittingpattern.convert.InstructionToSVGCache.InstructionSVGCache`,\n both with instructions already loaded.\n:param knittingpattern.convert.SVGBuilder.SVGBuilder builder:\n:param float zoom: the height and width of a knit instruction", "id": "f536:c0:m0"} {"signature": "def build_SVG_dict(self):", "body": "zoom = self._zoomlayout = self._layoutbuilder = self._builderbbox = list(map(lambda f: f * zoom, layout.bounding_box))builder.bounding_box = bboxflip_x = bbox[] + bbox[] * flip_y = bbox[] + bbox[] * instructions = list(layout.walk_instructions(lambda i: (flip_x - (i.x + i.width) * zoom,flip_y - (i.y + i.height) * zoom,i.instruction)))instructions.sort(key=lambda x_y_i: x_y_i[].render_z)for x, y, instruction in instructions:render_z = instruction.render_zz_id = (\"\" if not render_z else \"\".format(render_z))layer_id = \"\".format(instruction.row.id, z_id)def_id = self._register_instruction_in_defs(instruction)scale = self._symbol_id_to_scale[def_id]group = {\"\": \"\",\"\": \"\".format(instruction.id),\"\": \"\".format(x, y, scale)}builder.place_svg_use(def_id, layer_id, group)builder.insert_defs(self._instruction_type_color_to_symbol.values())return builder.get_svg_dict()", "docstring": "Go through the layout and build the SVG.\n\n :return: an xml dict that can be exported using a\n :class:`~knittingpattern.Dumper.XMLDumper`\n :rtype: dict", "id": "f536:c0:m1"} {"signature": "def _register_instruction_in_defs(self, instruction):", "body": "type_ = instruction.typecolor_ = instruction.colorinstruction_to_svg_dict =self._instruction_to_svg.instruction_to_svg_dictinstruction_id = \"\".format(type_, color_)defs_id = instruction_id + \"\"if instruction_id not in self._instruction_type_color_to_symbol:svg_dict = instruction_to_svg_dict(instruction)self._compute_scale(instruction_id, svg_dict)symbol = self._make_definition(svg_dict, instruction_id)self._instruction_type_color_to_symbol[defs_id] =symbol[DEFINITION_HOLDER].pop(\"\", {})self._instruction_type_color_to_symbol[instruction_id] = symbolreturn instruction_id", "docstring": "Create a definition for the instruction.\n\n :return: the id of a symbol in the defs for the specified\n :paramref:`instruction`\n :rtype: str\n\n If no symbol yet exists in the defs for the :paramref:`instruction` a\n symbol is created and saved using :meth:`_make_symbol`.", "id": "f536:c0:m2"} {"signature": "def _make_definition(self, svg_dict, instruction_id):", "body": "instruction_def = svg_dict[\"\"]blacklisted_elements = [\"\", \"\"]whitelisted_attributes = [\"\"]symbol = OrderedDict({\"\": instruction_id})for content, value in instruction_def.items():if content.startswith(''):if content in whitelisted_attributes:symbol[content] = valueelif content not in blacklisted_elements:symbol[content] = valuereturn {DEFINITION_HOLDER: symbol}", "docstring": "Create a symbol out of the supplied :paramref:`svg_dict`.\n\n :param dict svg_dict: dictionary containing the SVG for the\n instruction currently processed\n :param str instruction_id: id that will be assigned to the symbol", "id": "f536:c0:m3"} {"signature": "def _compute_scale(self, instruction_id, svg_dict):", "body": "bbox = list(map(float, svg_dict[\"\"][\"\"].split()))scale = self._zoom / (bbox[] - bbox[])self._symbol_id_to_scale[instruction_id] = scale", "docstring": "Compute the scale of an instruction svg.\n\n Compute the scale using the bounding box stored in the\n :paramref:`svg_dict`. The scale is saved in a dictionary using\n :paramref:`instruction_id` as key.\n\n :param str instruction_id: id identifying a symbol in the defs\n :param dict svg_dict: dictionary containing the SVG for the\n instruction currently processed", "id": "f536:c0:m4"} {"signature": "def __init__(self):", "body": "self._structure = xmltodict.parse(SVG_FILE)self._layer_id_to_layer = {}self._svg = self._structure[\"\"]self._min_x = Noneself._min_y = Noneself._max_x = Noneself._max_y = None", "docstring": "Initialize this object without arguments.", "id": "f537:c0:m0"} {"signature": "@propertydef bounding_box(self):", "body": "return (self._min_x, self._min_y, self._max_x, self._max_y)", "docstring": "the bounding box of this SVG\n ``(min_x, min_y, max_x, max_y)``.\n\n .. code:: python\n\n svg_builder10x10.bounding_box = (0, 0, 10, 10)\n assert svg_builder10x10.bounding_box == (0, 0, 10, 10)\n\n ``viewBox``, ``width`` and ``height`` are computed from this.\n\n If the bounding box was never set, the result is a tuple of four\n :obj:`None`.", "id": "f537:c0:m1"} {"signature": "def place(self, x, y, svg, layer_id):", "body": "content = xmltodict.parse(svg)self.place_svg_dict(x, y, content, layer_id)", "docstring": "Place the :paramref:`svg` content at ``(x, y)`` position\n in the SVG, in a layer with the id :paramref:`layer_id`.\n\n :param float x: the x position of the svg\n :param float y: the y position of the svg\n :param str svg: the SVG to place at ``(x, y)``\n :param str layer_id: the id of the layer that this\n :paramref:`svg` should be placed inside", "id": "f537:c0:m3"} {"signature": "def place_svg_dict(self, x, y, svg_dict, layer_id, group=None):", "body": "if group is None:group = {}group_ = {\"\": \"\".format(x, y),\"\": list(svg_dict.values())}group_.update(group)layer = self._get_layer(layer_id)layer[\"\"].append(group_)", "docstring": "Same as :meth:`place` but with a dictionary as :paramref:`svg_dict`.\n\n :param dict svg_dict: a dictionary returned by `xmltodict.parse()\n `__\n :param dict group: a dictionary of values to add to the group the\n :paramref:`svg_dict` will be added to or :obj:`None` if nothing\n should be added", "id": "f537:c0:m4"} {"signature": "def place_svg_use_coords(self, x, y, symbol_id, layer_id, group=None):", "body": "if group is None:group = {}use = {\"\": x, \"\": y, \"\": \"\".format(symbol_id)}group_ = {\"\": use}group_.update(group)layer = self._get_layer(layer_id)layer[\"\"].append(group_)", "docstring": "Similar to :meth:`place` but with an id as :paramref:`symbol_id`.\n\n :param str symbol_id: an id which identifies an svg object defined in\n the defs\n :param dict group: a dictionary of values to add to the group the\n use statement will be added to or :obj:`None` if nothing\n should be added", "id": "f537:c0:m5"} {"signature": "def place_svg_use(self, symbol_id, layer_id, group=None):", "body": "self.place_svg_use_coords(, , symbol_id, layer_id, group)", "docstring": "Same as :meth:`place_svg_use_coords`.\n\n With implicit `x` and `y` which are set to `0` in this method and then\n :meth:`place_svg_use_coords` is called.", "id": "f537:c0:m6"} {"signature": "def _get_layer(self, layer_id):", "body": "if layer_id not in self._layer_id_to_layer:self._svg.setdefault(\"\", [])layer = {\"\": [],\"\": layer_id,\"\": layer_id,\"\": \"\",\"\": \"\"}self._layer_id_to_layer[layer_id] = layerself._svg[\"\"].append(layer)return self._layer_id_to_layer[layer_id]", "docstring": ":return: the layer with the :paramref:`layer_id`. If the layer\n does not exist, it is created.\n:param str layer_id: the id of the layer", "id": "f537:c0:m7"} {"signature": "def insert_defs(self, defs):", "body": "if self._svg[\"\"] is None:self._svg[\"\"] = {}for def_ in defs:for key, value in def_.items():if key.startswith(\"\"):continueif key not in self._svg[\"\"]:self._svg[\"\"][key] = []if not isinstance(value, list):value = [value]self._svg[\"\"][key].extend(value)", "docstring": "Adds the defs to the SVG structure.\n\n :param defs: a list of SVG dictionaries, which contain the defs,\n which should be added to the SVG structure.", "id": "f537:c0:m8"} {"signature": "def get_svg_dict(self):", "body": "return self._structure", "docstring": "Return the SVG structure generated.", "id": "f537:c0:m9"} {"signature": "def write_to_file(self, file):", "body": "xmltodict.unparse(self._structure, file, pretty=True)", "docstring": "Writes the current SVG to the :paramref:`file`.\n\n :param file: a file-like object", "id": "f537:c0:m10"} {"signature": "def load_and_dump(create_loader, create_dumper, load_and_dump_):", "body": "@wraps(load_and_dump_)def load_and_dump__(*args1, **kw):\"\"\"\"\"\"def load(*args2):\"\"\"\"\"\"def dump(*args3):\"\"\"\"\"\"return load_and_dump_(*(args2 + args3 + args1), **kw)return create_dumper(dump)return create_loader(load)return load_and_dump__", "docstring": ":return: a function that has the doc string of\n :paramref:`load_and_dump_`\n additional arguments to this function are passed on to\n :paramref:`load_and_dump_`.\n\n :param create_loader: a loader, e.g.\n :class:`knittingpattern.Loader.PathLoader`\n :param create_dumper: a dumper, e.g.\n :class:`knittingpattern.Dumper.ContentDumper`\n :param load_and_dump_: a function to call with the loaded content.\n The arguments to both, :paramref:`create_dumper` and,\n :paramref:`create_loader`\n will be passed to :paramref:`load_and_dump_`.\n Any additional arguments to the return value are also passed to\n :paramref:`load_and_dump_`.\n The return value of :paramref:`load_and_dump_` is passed back to the\n :paramref:`Dumper`.\n\n .. seealso:: :func:`decorate_load_and_dump`", "id": "f538:m0"} {"signature": "def decorate_load_and_dump(create_loader, create_dumper):", "body": "return lambda func: load_and_dump(create_loader, create_dumper, func)", "docstring": "Same as :func:`load_and_dump` but returns a function to enable decorator\n syntax.\n\n Examples:\n\n .. code:: Python\n\n @decorate_load_and_dump(ContentLoader, JSONDumper)\n def convert_from_loader_to_dumper(loaded_stuff, other=\"arguments\"):\n # convert\n return converted_stuff\n\n @decorate_load_and_dump(PathLoader, lambda dump: ContentDumper(dump,\n encoding=None))\n def convert_from_loader_to_dumper(loaded_stuff, to_file):\n # convert\n to_file.write(converted_stuff)", "id": "f538:m1"} {"signature": "def title(content):", "body": "if isinstance(content, str):return re.findall(\"\", content)[-]return content.title.cdata", "docstring": "returns the title of the svg", "id": "f545:m0"} {"signature": "def coordinates(layout):", "body": "return list(layout.walk_instructions(lambda point: (point.x, point.y)))", "docstring": "The coordinates of the layout.", "id": "f549:m0"} {"signature": "def sizes(layout):", "body": "return list(layout.walk_instructions(lambda p: (p.width, p.height)))", "docstring": "The sizes of the instructions of the layout.", "id": "f549:m1"} {"signature": "def instructions(layout):", "body": "return list(layout.walk_instructions(lambda point: point.instruction))", "docstring": "The instructions of the layout.", "id": "f549:m2"} {"signature": "def row_ids(layout):", "body": "return list(layout.walk_rows(lambda row: row.id))", "docstring": "The ids of the rows of the layout.", "id": "f549:m3"} {"signature": "def connections(layout):", "body": "return list(layout.walk_connections(lambda c: (c.start.xy, c.stop.xy)))", "docstring": "The connections between the rows of the leyout.", "id": "f549:m4"} {"signature": "@fixture(scope=\"\")def pattern(self):", "body": "path = os.path.join(\"\", self.FILE)pattern_set = load_from_relative_file(__name__, path)return pattern_set.patterns[self.PATTERN]", "docstring": "Teh pattern to test.", "id": "f549:c0:m0"} {"signature": "@fixture(scope=\"\")def grid(self, pattern):", "body": "return GridLayout(pattern)", "docstring": "The computed grid for the pattern.", "id": "f549:c0:m1"} {"signature": "def __init__(self, min_x, min_y, max_x, max_y,default_color=\"\"):", "body": "self._min_x = min_xself._min_y = min_yself._max_x = max_xself._max_y = max_yself._default_color = default_colorself._image = PIL.Image.new(\"\", (max_x - min_x, max_y - min_y),self._convert_to_image_color(default_color))", "docstring": "Initialize the builder with the bounding box and a default color.\n\n .. _png-builder-bounds:\n\n ``min_x <= x < max_x`` and ``min_y <= y < max_y`` are the bounds of the\n instructions.\n Instructions outside the bounds are not rendered.\n Any Pixel that is not set has the :paramref:`default_color`.\n\n :param int min_x: the lower bound of the x coordinates\n :param int max_x: the upper bound of the x coordinates\n :param int min_y: the lower bound of the y coordinates\n :param int max_y: the upper bound of the y coordinates\n :param default_color: a valid :ref:`color `", "id": "f550:c0:m0"} {"signature": "def write_to_file(self, file):", "body": "self._image.save(file, format=\"\")", "docstring": "write the png to the file\n\n :param file: a file-like object", "id": "f550:c0:m1"} {"signature": "@staticmethoddef _convert_color_to_rrggbb(color):", "body": "return convert_color_to_rrggbb(color)", "docstring": "takes a :ref:`color ` and converts it into a 24 bit\n color \"#RRGGBB\"", "id": "f550:c0:m2"} {"signature": "def _convert_rrggbb_to_image_color(self, rrggbb):", "body": "return webcolors.hex_to_rgb(rrggbb)", "docstring": ":return: the color that is used by the image", "id": "f550:c0:m3"} {"signature": "def _convert_to_image_color(self, color):", "body": "rgb = self._convert_color_to_rrggbb(color)return self._convert_rrggbb_to_image_color(rgb)", "docstring": ":return: a color that can be used by the image", "id": "f550:c0:m4"} {"signature": "def _set_pixel_and_convert_color(self, x, y, color):", "body": "if color is None:returncolor = self._convert_color_to_rrggbb(color)self._set_pixel(x, y, color)", "docstring": "set the pixel but convert the color before.", "id": "f550:c0:m5"} {"signature": "def _set_pixel(self, x, y, color):", "body": "if not self.is_in_bounds(x, y):returnrgb = self._convert_rrggbb_to_image_color(color)x -= self._min_xy -= self._min_yself._image.putpixel((x, y), rgb)", "docstring": "set the color of the pixel.\n\n :param color: must be a valid color in the form of \"#RRGGBB\".\n If you need to convert color, use `_set_pixel_and_convert_color()`.", "id": "f550:c0:m6"} {"signature": "def set_pixel(self, x, y, color):", "body": "self._set_pixel_and_convert_color(x, y, color)", "docstring": "set the pixel at ``(x, y)`` position to :paramref:`color`\n\n If ``(x, y)`` is out of the :ref:`bounds `\n this does not change the image.\n\n .. seealso:: :meth:`set_color_in_grid`", "id": "f550:c0:m7"} {"signature": "def is_in_bounds(self, x, y):", "body": "lower = self._min_x <= x and self._min_y <= yupper = self._max_x > x and self._max_y > yreturn lower and upper", "docstring": ":return: whether ``(x, y)`` is inside the :ref:`bounds\n `\n:rtype: bool", "id": "f550:c0:m8"} {"signature": "def set_color_in_grid(self, color_in_grid):", "body": "self._set_pixel_and_convert_color(color_in_grid.x, color_in_grid.y, color_in_grid.color)", "docstring": "Set the pixel at the position of the :paramref:`color_in_grid`\n to its color.\n\n :param color_in_grid: must have the following attributes:\n\n - ``color`` is the :ref:`color ` to set the pixel to\n - ``x`` is the x position of the pixel\n - ``y`` is the y position of the pixel\n\n .. seealso:: :meth:`set_pixel`, :meth:`set_colors_in_grid`", "id": "f550:c0:m9"} {"signature": "def set_colors_in_grid(self, some_colors_in_grid):", "body": "for color_in_grid in some_colors_in_grid:self._set_pixel_and_convert_color(color_in_grid.x, color_in_grid.y, color_in_grid.color)", "docstring": "Same as :meth:`set_color_in_grid` but with a collection of\n colors in grid.\n\n :param iterable some_colors_in_grid: a collection of colors in grid for\n :meth:`set_color_in_grid`", "id": "f550:c0:m10"} {"signature": "@propertydef default_color(self):", "body": "return self._default_color", "docstring": ":return: the :ref:`color ` of the pixels that are not set\n\n You can set this color by passing it to the :meth:`constructor\n <__init__>`.", "id": "f550:c0:m11"} {"signature": "def __init__(self, function_that_returns_a_knitting_pattern_set):", "body": "super().__init__(self._dump_knitting_pattern,text_is_expected=False, encoding=None)self.__on_dump = function_that_returns_a_knitting_pattern_set", "docstring": "Initialize the Dumper with a\n :paramref:`function_that_returns_a_knitting_pattern_set`.\n\n :param function_that_returns_a_knitting_pattern_set: a function that\n takes no arguments but returns a\n :class:`knittinpattern.KnittingPatternSet.KnittingPatternSet`\n\n When a dump is requested, the\n :paramref:`function_that_returns_a_knitting_pattern_set`\n is called and the knitting pattern set is converted and saved to the\n specified location.", "id": "f552:c0:m0"} {"signature": "def _dump_knitting_pattern(self, file):", "body": "knitting_pattern_set = self.__on_dump()knitting_pattern = knitting_pattern_set.patterns.at()layout = GridLayout(knitting_pattern)builder = AYABPNGBuilder(*layout.bounding_box)builder.set_colors_in_grid(layout.walk_instructions())builder.write_to_file(file)", "docstring": "dump a knitting pattern to a file.", "id": "f552:c0:m1"} {"signature": "def identity(object_):", "body": "return object_", "docstring": ":return: the argument", "id": "f553:m0"} {"signature": "def __init__(self, position):", "body": "self._position = position", "docstring": "Create a new InGrid object.", "id": "f553:c0:m0"} {"signature": "@propertydef x(self):", "body": "return self._position.x", "docstring": ":return: x coordinate in the grid\n :rtype: float", "id": "f553:c0:m1"} {"signature": "@propertydef y(self):", "body": "return self._position.y", "docstring": ":return: y coordinate in the grid\n :rtype: float", "id": "f553:c0:m2"} {"signature": "@propertydef xy(self):", "body": "return self._position", "docstring": ":return: ``(x, y)`` coordinate in the grid\n :rtype: tuple", "id": "f553:c0:m3"} {"signature": "@propertydef yx(self):", "body": "return self._position.y, self._position.x", "docstring": ":return: ``(y, x)`` coordinate in the grid\n :rtype: tuple", "id": "f553:c0:m4"} {"signature": "@propertydef width(self):", "body": "return self._width", "docstring": ":return: width of the object on the grid\n :rtype: float", "id": "f553:c0:m5"} {"signature": "@propertydef height(self):", "body": "return INSTRUCTION_HEIGHT", "docstring": ":return: height of the object on the grid\n :rtype: float", "id": "f553:c0:m6"} {"signature": "@propertydef row(self):", "body": "return self._row", "docstring": ":return: row of the object on the grid\n :rtype: knittingpattern.Row.Row", "id": "f553:c0:m7"} {"signature": "@propertydef bounding_box(self):", "body": "return self._bounding_box", "docstring": "The bounding box of this object.\n\n :return: (min x, min y, max x, max y)\n :rtype: tuple", "id": "f553:c0:m8"} {"signature": "@propertydef id(self):", "body": "return self._id", "docstring": "The id of this object.", "id": "f553:c0:m9"} {"signature": "def __init__(self, instruction, position):", "body": "self._instruction = instructionsuper().__init__(position)", "docstring": ":param instruction: an :class:`instruction\n `\n:param Point position: the position of the :paramref:`instruction`", "id": "f553:c1:m0"} {"signature": "@propertydef _width(self):", "body": "layout = self._instruction.get(GRID_LAYOUT)if layout is not None:width = layout.get(WIDTH)if width is not None:return widthreturn self._instruction.number_of_consumed_meshes", "docstring": "For ``self.width``.", "id": "f553:c1:m1"} {"signature": "@propertydef instruction(self):", "body": "return self._instruction", "docstring": "The instruction.\n\n :return: instruction that is placed on the grid\n :rtype: knittingpattern.Instruction.InstructionInRow", "id": "f553:c1:m2"} {"signature": "@propertydef color(self):", "body": "return self._instruction.color", "docstring": "The color of the instruction.\n\n :return: the color of the :attr:`instruction`", "id": "f553:c1:m3"} {"signature": "def _row(self):", "body": "return self._instruction.row", "docstring": "For ``self.row``.", "id": "f553:c1:m4"} {"signature": "def __init__(self, row, position):", "body": "super().__init__(position)self._row = row", "docstring": "Create a new row in the grid.", "id": "f553:c2:m0"} {"signature": "@propertydef _width(self):", "body": "return sum(map(lambda i: i.width, self.instructions))", "docstring": ":return: the number of consumed meshes", "id": "f553:c2:m1"} {"signature": "@propertydef instructions(self):", "body": "x = self.xy = self.yresult = []for instruction in self._row.instructions:instruction_in_grid = InstructionInGrid(instruction, Point(x, y))x += instruction_in_grid.widthresult.append(instruction_in_grid)return result", "docstring": "The instructions in a grid.\n\n :return: the :class:`instructions in a grid ` of\n this row\n :rtype: list", "id": "f553:c2:m2"} {"signature": "def __init__(self, first_instruction):", "body": "self._rows_in_grid = {}self._todo = []self._expand(first_instruction.row, Point(, ), [])self._walk()", "docstring": "Start walking the knitting pattern starting from first_instruction.", "id": "f553:c3:m0"} {"signature": "def _expand(self, row, consumed_position, passed):", "body": "self._todo.append((row, consumed_position, passed))", "docstring": "Add the arguments `(args, kw)` to `_walk` to the todo list.", "id": "f553:c3:m1"} {"signature": "def _step(self, row, position, passed):", "body": "if row in passed or not self._row_should_be_placed(row, position):returnself._place_row(row, position)passed = [row] + passedfor i, produced_mesh in enumerate(row.produced_meshes):self._expand_produced_mesh(produced_mesh, i, position, passed)for i, consumed_mesh in enumerate(row.consumed_meshes):self._expand_consumed_mesh(consumed_mesh, i, position, passed)", "docstring": "Walk through the knitting pattern by expanding an row.", "id": "f553:c3:m2"} {"signature": "def _expand_consumed_mesh(self, mesh, mesh_index, row_position, passed):", "body": "if not mesh.is_produced():returnrow = mesh.producing_rowposition = Point(row_position.x + mesh.index_in_producing_row - mesh_index,row_position.y - INSTRUCTION_HEIGHT)self._expand(row, position, passed)", "docstring": "expand the consumed meshes", "id": "f553:c3:m3"} {"signature": "def _expand_produced_mesh(self, mesh, mesh_index, row_position, passed):", "body": "if not mesh.is_consumed():returnrow = mesh.consuming_rowposition = Point(row_position.x - mesh.index_in_consuming_row + mesh_index,row_position.y + INSTRUCTION_HEIGHT)self._expand(row, position, passed)", "docstring": "expand the produced meshes", "id": "f553:c3:m4"} {"signature": "def _row_should_be_placed(self, row, position):", "body": "placed_row = self._rows_in_grid.get(row)return placed_row is None or placed_row.y < position.y", "docstring": ":return: whether to place this instruction", "id": "f553:c3:m5"} {"signature": "def _place_row(self, row, position):", "body": "self._rows_in_grid[row] = RowInGrid(row, position)", "docstring": "place the instruction on a grid", "id": "f553:c3:m6"} {"signature": "def _walk(self):", "body": "while self._todo:args = self._todo.pop()self._step(*args)", "docstring": "Loop through all the instructions that are `_todo`.", "id": "f553:c3:m7"} {"signature": "def instruction_in_grid(self, instruction):", "body": "row_position = self._rows_in_grid[instruction.row].xyx = instruction.index_of_first_consumed_mesh_in_rowposition = Point(row_position.x + x, row_position.y)return InstructionInGrid(instruction, position)", "docstring": "Returns an `InstructionInGrid` object for the `instruction`", "id": "f553:c3:m8"} {"signature": "def row_in_grid(self, row):", "body": "return self._rows_in_grid[row]", "docstring": "Returns an `RowInGrid` object for the `row`", "id": "f553:c3:m9"} {"signature": "def __init__(self, start, stop):", "body": "self._start = startself._stop = stop", "docstring": ":param InstructionInGrid start: the start of the connection\n:param InstructionInGrid stop: the end of the connection", "id": "f553:c4:m0"} {"signature": "@propertydef start(self):", "body": "return self._start", "docstring": ":return: the start of the connection\n :rtype: InstructionInGrid", "id": "f553:c4:m1"} {"signature": "@propertydef stop(self):", "body": "return self._stop", "docstring": ":return: the end of the connection\n :rtype: InstructionInGrid", "id": "f553:c4:m2"} {"signature": "def is_visible(self):", "body": "if self._start.y + < self._stop.y:return Truereturn False", "docstring": ":return: is this connection is visible\n :rtype: bool\n\n A connection is visible if it is longer that 0.", "id": "f553:c4:m3"} {"signature": "def __init__(self, pattern):", "body": "self._pattern = patternself._rows = list(pattern.rows)self._walk = _RecursiveWalk(self._rows[].instructions[])self._rows.sort(key=lambda row: self._walk.row_in_grid(row).yx)", "docstring": ":param knittingpattern.KnittingPattern.KnittingPattern pattern: the\n pattern to layout", "id": "f553:c5:m0"} {"signature": "def walk_instructions(self, mapping=identity):", "body": "instructions = chain(*self.walk_rows(lambda row: row.instructions))return map(mapping, instructions)", "docstring": "Iterate over instructions.\n\n :return: an iterator over :class:`instructions in grid\n `\n :param mapping: funcion to map the result\n\n .. code:: python\n\n for pos, c in layout.walk_instructions(lambda i: (i.xy, i.color)):\n print(\"color {} at {}\".format(c, pos))", "id": "f553:c5:m1"} {"signature": "def walk_rows(self, mapping=identity):", "body": "row_in_grid = self._walk.row_in_gridreturn map(lambda row: mapping(row_in_grid(row)), self._rows)", "docstring": "Iterate over rows.\n\n :return: an iterator over :class:`rows `\n :param mapping: funcion to map the result, see\n :meth:`walk_instructions` for an example usage", "id": "f553:c5:m2"} {"signature": "def walk_connections(self, mapping=identity):", "body": "for start in self.walk_instructions():for stop_instruction in start.instruction.consuming_instructions:if stop_instruction is None:continuestop = self._walk.instruction_in_grid(stop_instruction)connection = Connection(start, stop)if connection.is_visible():yield mapping(connection)", "docstring": "Iterate over connections between instructions.\n\n :return: an iterator over :class:`connections ` between\n :class:`instructions in grid `\n :param mapping: funcion to map the result, see\n :meth:`walk_instructions` for an example usage", "id": "f553:c5:m3"} {"signature": "@propertydef bounding_box(self):", "body": "min_x, min_y, max_x, max_y = zip(*list(self.walk_rows(lambda row: row.bounding_box)))return min(min_x), min(min_y), max(max_x), max(max_y)", "docstring": "The minimum and maximum bounds of this layout.\n\n :return: ``(min_x, min_y, max_x, max_y)`` the bounding box\n of this layout\n :rtype: tuple", "id": "f553:c5:m4"} {"signature": "def row_in_grid(self, row):", "body": "return self._walk.row_in_grid(row)", "docstring": "The a RowInGrid for the row with position information.\n\n :return: a row in the grid\n :rtype: RowInGrid", "id": "f553:c5:m5"} {"signature": "def default_instruction_svg_cache():", "body": "global _default_instruction_svg_cacheif _default_instruction_svg_cache is None:_default_instruction_svg_cache = InstructionSVGCache()return _default_instruction_svg_cache", "docstring": "Return the default InstructionSVGCache.\n\n :rtype: knittingpattern.convert.InstructionSVGCache.InstructionSVGCache", "id": "f554:m0"} {"signature": "def __init__(self, instruction_to_svg=None):", "body": "if instruction_to_svg is None:instruction_to_svg = default_instructions_to_svg()self._instruction_to_svg_dict =instruction_to_svg.instruction_to_svg_dictself._cache = {}", "docstring": "Create the InstructionSVGCache.\n\n :param instruction_to_svg: an\n :class:`~knittingpattern.convert.InstructionToSVG.InstructionToSVG`\n object. If :obj:`None` is given, the\n :func:`default_instructions_to_svg\n `\n is used.", "id": "f554:c0:m0"} {"signature": "def get_instruction_id(self, instruction_or_id):", "body": "if isinstance(instruction_or_id, tuple):return _InstructionId(instruction_or_id)return _InstructionId(instruction_or_id.type,instruction_or_id.hex_color)", "docstring": "The id that identifies the instruction in this cache.\n\n :param instruction_or_id: an :class:`instruction\n ` or an instruction id\n :return: a :func:`hashable ` object\n :rtype: tuple", "id": "f554:c0:m1"} {"signature": "def _new_svg_dumper(self, on_dump):", "body": "return SVGDumper(on_dump)", "docstring": "Create a new SVGDumper with the function ``on_dump``.\n\n :rtype: knittingpattern.Dumper.SVGDumper", "id": "f554:c0:m2"} {"signature": "def to_svg(self, instruction_or_id,i_promise_not_to_change_the_result=False):", "body": "return self._new_svg_dumper(lambda: self.instruction_to_svg_dict(instruction_or_id, not i_promise_not_to_change_the_result))", "docstring": "Return the SVG for an instruction.\n\n :param instruction_or_id: either an\n :class:`~knittingpattern.Instruction.Instruction` or an id\n returned by :meth:`get_instruction_id`\n :param bool i_promise_not_to_change_the_result:\n\n - :obj:`False`: the result is copied, you can alter it.\n - :obj:`True`: the result is directly from the cache. If you change\n the result, other calls of this function get the changed result.\n\n :return: an SVGDumper\n :rtype: knittingpattern.Dumper.SVGDumper", "id": "f554:c0:m3"} {"signature": "def instruction_to_svg_dict(self, instruction_or_id, copy_result=True):", "body": "instruction_id = self.get_instruction_id(instruction_or_id)if instruction_id in self._cache:result = self._cache[instruction_id]else:result = self._instruction_to_svg_dict(instruction_id)self._cache[instruction_id] = resultif copy_result:result = deepcopy(result)return result", "docstring": "Return the SVG dict for the SVGBuilder.\n\n :param instruction_or_id: the instruction or id, see\n :meth:`get_instruction_id`\n :param bool copy_result: whether to copy the result\n :rtype: dict\n\n The result is cached.", "id": "f554:c0:m4"} {"signature": "def default_instructions_to_svg():", "body": "instruction_to_svg = InstructionToSVG()instruction_to_svg.load.relative_folder(__name__, DEFAULT_SVG_FOLDER)return instruction_to_svg", "docstring": "load the default set of svg files for instructions\n\n :return: the default svg files for the instructions in this package\n :rtype: knittingpattern.InstructionToSVG.InstructionToSVG", "id": "f555:m0"} {"signature": "@propertydef _loader_class(self):", "body": "return PathLoader", "docstring": ":return: the loader to load svgs from different locations\n :rtype: knittingpattern.Loader.PathLoader .", "id": "f555:c0:m0"} {"signature": "def __init__(self):", "body": "self._instruction_type_to_file_content = {}", "docstring": "create a InstructionToSVG object without arguments.", "id": "f555:c0:m1"} {"signature": "@propertydef load(self):", "body": "return self._loader_class(self._process_loaded_object)", "docstring": ":return: a loader object that allows loading SVG files from\n various sources such as files and folders.\n :rtype: knittingpattern.Loader.PathLoader\n\n Examples:\n\n - ``instruction_to_svg.load.path(path)`` loads an SVG from a file named\n path\n - ``instruction_to_svg.load.folder(path)`` loads all SVG files for\n instructions in the folder recursively.\n If multiple files have the same name, the last occurrence is used.", "id": "f555:c0:m2"} {"signature": "def _process_loaded_object(self, path):", "body": "file_name = os.path.basename(path)name = os.path.splitext(file_name)[]with open(path) as file:string = file.read()self._instruction_type_to_file_content[name] = string", "docstring": "process the :paramref:`path`.\n\n :param str path: the path to load an svg from", "id": "f555:c0:m3"} {"signature": "def instruction_to_svg_dict(self, instruction):", "body": "instruction_type = instruction.typeif instruction_type in self._instruction_type_to_file_content:svg = self._instruction_type_to_file_content[instruction_type]return self._set_fills_in_color_layer(svg, instruction.hex_color)return self.default_instruction_to_svg_dict(instruction)", "docstring": ":return: an xml-dictionary with the same content as\n :meth:`instruction_to_svg`.", "id": "f555:c0:m4"} {"signature": "def instruction_to_svg(self, instruction):", "body": "return xmltodict.unparse(self.instruction_to_svg_dict(instruction))", "docstring": ":return: an SVG representing the instruction.\n\n The SVG file is determined by the type attribute of the instruction.\n An instruction of type ``\"knit\"`` is looked for in a file named\n ``\"knit.svg\"``.\n\n Every element inside a group labeled ``\"color\"`` of mode ``\"layer\"``\n that has a ``\"fill\"`` style gets this fill replaced by the color of\n the instruction.\n Example of a recangle that gets filled like the instruction:\n\n .. code:: xml\n\n \n \n \n\n If nothing was loaded to display this instruction, a default image is\n be generated by :meth:`default_instruction_to_svg`.", "id": "f555:c0:m5"} {"signature": "def _set_fills_in_color_layer(self, svg_string, color):", "body": "structure = xmltodict.parse(svg_string)if color is None:return structurelayers = structure[\"\"][\"\"]if not isinstance(layers, list):layers = [layers]for layer in layers:if not isinstance(layer, dict):continueif layer.get(\"\") == \"\" andlayer.get(\"\") == \"\":for key, elements in layer.items():if key.startswith(\"\") or key.startswith(\"\"):continueif not isinstance(elements, list):elements = [elements]for element in elements:style = element.get(\"\", None)if style:style = style.split(\"\")processed_style = []for style_element in style:if style_element.startswith(\"\"):style_element = \"\" + colorprocessed_style.append(style_element)style = \"\".join(processed_style)element[\"\"] = stylereturn structure", "docstring": "replaces fill colors in ```` with :paramref:`color`\n\n :param color: a color fill the objects in the layer with", "id": "f555:c0:m6"} {"signature": "def has_svg_for_instruction(self, instruction):", "body": "instruction_type = instruction.typereturn instruction_type in self._instruction_type_to_file_content", "docstring": ":return: whether there is an image for the instruction\n :rtype: bool\n\n This can be used before :meth:`instruction_to_svg` as it determines\n whether\n\n - the default value is used (:obj:`False`)\n - or there is a dedicated svg representation (:obj:`True`).", "id": "f555:c0:m7"} {"signature": "def default_instruction_to_svg(self, instruction):", "body": "svg_dict = self.default_instruction_to_svg_dict(instruction)return xmltodict.unparse(svg_dict)", "docstring": "As :meth:`instruction_to_svg` but it only takes the ``default.svg``\n file into account.\n\n In case no file is found for an instruction in\n :meth:`instruction_to_svg`,\n this method is used to determine the default svg for it.\n\n The content is created by replacing the text ``{instruction.type}`` in\n the whole svg file named ``default.svg``.\n\n If no file ``default.svg`` was loaded, an empty string is returned.", "id": "f555:c0:m8"} {"signature": "def default_instruction_to_svg_dict(self, instruction):", "body": "instruction_type = instruction.typedefault_type = \"\"rep_str = \"\"if default_type not in self._instruction_type_to_file_content:return {\"\": \"\"}default_svg = self._instruction_type_to_file_content[default_type]default_svg = default_svg.replace(rep_str, instruction_type)colored_svg = self._set_fills_in_color_layer(default_svg,instruction.hex_color)return colored_svg", "docstring": "Returns an xml-dictionary with the same content as\n :meth:`default_instruction_to_svg`\n\n If no file ``default.svg`` was loaded, an empty svg-dict is returned.", "id": "f555:c0:m9"} {"signature": "@decorate_load_and_dump(PathLoader, JSONDumper)def convert_image_to_knitting_pattern(path, colors=(\"\", \"\")):", "body": "image = PIL.Image.open(path)pattern_id = os.path.splitext(os.path.basename(path))[]rows = []connections = []pattern_set = {\"\": \"\",\"\": \"\",\"\": {\"\": path},\"\": [{\"\": pattern_id,\"\": pattern_id,\"\": rows,\"\": connections}]}bbox = image.getbbox()if not bbox:return pattern_setwhite = image.getpixel((, ))min_x, min_y, max_x, max_y = bboxlast_row_y = Nonefor y in reversed(range(min_y, max_y)):instructions = []row = {\"\": y, \"\": instructions}rows.append(row)for x in range(min_x, max_x):if image.getpixel((x, y)) == white:color = colors[]else:color = colors[]instruction = {\"\": color}instructions.append(instruction)if last_row_y is not None:connections.append({\"\": {\"\": last_row_y}, \"\": {\"\": y}})last_row_y = yreturn pattern_set", "docstring": "Load a image file such as a png bitmap of jpeg file and convert it\n to a :ref:`knitting pattern file `.\n\n :param list colors: a list of strings that should be used as\n :ref:`colors `.\n :param str path: ignore this. It is fulfilled by the loeder.\n\n Example:\n\n .. code:: python\n\n convert_image_to_knitting_pattern().path(\"image.png\").path(\"image.json\")", "id": "f556:m0"} {"signature": "def convert_color_to_rrggbb(color):", "body": "if not color.startswith(\"\"):rgb = webcolors.html5_parse_legacy_color(color)hex_color = webcolors.html5_serialize_simple_color(rgb)else:hex_color = colorreturn webcolors.normalize_hex(hex_color)", "docstring": "The color in \"#RRGGBB\" format.\n\n :return: the :attr:`color` in \"#RRGGBB\" format", "id": "f557:m0"} {"signature": "@propertydef id(self):", "body": "return self.get(ID)", "docstring": "The id of the instruction.\n\n :return: the :data:`id ` of the instruction or\n :obj:`None` if none is specified.", "id": "f558:c0:m0"} {"signature": "@propertydef type(self):", "body": "return self.get(TYPE, DEFAULT_TYPE)", "docstring": "The type of the instruction.\n\n :return: the :data:`type ` of the instruction or\n :data:`DEFAULT_TYPE` if none is specified.\n :rtype: str\n\n The type should be a string.\n Depending on the type, the instruction can receive additional\n attributes.\n\n .. seealso:: :mod:`knittingpattern.InstructionLibrary`", "id": "f558:c0:m1"} {"signature": "@propertydef color(self):", "body": "return self.get(COLOR)", "docstring": "The color of the instruction.\n\n :return: the :data:`color ` of the instruction or\n :obj:`None` if none is specified.", "id": "f558:c0:m2"} {"signature": "@propertydef colors(self):", "body": "return [self.color]", "docstring": "All the colors that an instruction has.\n\n :return: a list of colors of the instruction. If the instruction has\n no color, this is ``[None]``.\n :rtype: list", "id": "f558:c0:m3"} {"signature": "@propertydef description(self):", "body": "return self.get(DESCRIPTION)", "docstring": "The description of the instruction.\n\n :return: the :data:`description ` of the instruction or\n :obj:`None` if none is specified.", "id": "f558:c0:m4"} {"signature": "@propertydef number_of_consumed_meshes(self):", "body": "return self.get(NUMBER_OF_CONSUMED_MESHES,DEFAULT_NUMBER_OF_CONSUMED_MESHES)", "docstring": "The number of meshes that this instruction consumes.\n\n :return: the :data:`number of consumed meshes\n ` of the instruction or\n :data:`DEFAULT_NUMBER_OF_CONSUMED_MESHES` if none is specified.", "id": "f558:c0:m5"} {"signature": "@propertydef number_of_produced_meshes(self):", "body": "return self.get(NUMBER_OF_PRODUCED_MESHES,DEFAULT_NUMBER_OF_PRODUCED_MESHES)", "docstring": "The number of meshes that this instruction produces.\n\n :return: the :data:`number of produced meshes\n ` of the instruction or\n :data:`DEFAULT_NUMBER_OF_PRODUCED_MESHES` if none is specified.", "id": "f558:c0:m6"} {"signature": "def has_color(self):", "body": "return self.color is not None", "docstring": "Whether this instruction has a color.\n\n :return: whether a :data:`color ` is specified\n :rtype: bool", "id": "f558:c0:m7"} {"signature": "def does_knit(self):", "body": "return self.type == KNIT_TYPE", "docstring": "Whether this instruction is a knit instruction.\n\n :return: whether this instruction is a knit instruction\n :rtype: bool", "id": "f558:c0:m8"} {"signature": "def does_purl(self):", "body": "return self.type == PURL_TYPE", "docstring": "Whether this instruction is a purl instruction.\n\n :return: whether this instruction is a purl instruction\n :rtype: bool", "id": "f558:c0:m9"} {"signature": "def produces_meshes(self):", "body": "return self.number_of_produced_meshes != ", "docstring": "Whether this institution produces meshes.\n\n :return: whether this instruction produces any meshes\n :rtype: bool\n\n .. seealso:: :attr:`number_of_produced_meshes`", "id": "f558:c0:m10"} {"signature": "def consumes_meshes(self):", "body": "return self.number_of_consumed_meshes != ", "docstring": "Whether this instruction consumes meshes.\n\n :return: whether this instruction consumes any meshes\n :rtype: bool\n\n .. seealso:: :attr:`number_of_consumed_meshes`", "id": "f558:c0:m11"} {"signature": "@propertydef render_z(self):", "body": "return self.get(RENDER, {}).get(RENDER_Z, DEFAULT_Z)", "docstring": "The z-index of the instruction when rendered.\n\n :return: the z-index of the instruction. Instructions with a higher\n z-index are displayed in front of instructions with lower z-index.\n :rtype: float", "id": "f558:c0:m12"} {"signature": "@propertydef hex_color(self):", "body": "if self.has_color():return convert_color_to_rrggbb(self.color)return None", "docstring": "The color in \"#RRGGBB\" format.\n\n :return: the :attr:`color` in \"#RRGGBB\" format or none if no color is\n given", "id": "f558:c0:m13"} {"signature": "def to_svg(self, converter=None):", "body": "if converter is None:from knittingpattern.convert.InstructionSVGCache importdefault_svg_cacheconverter = default_svg_cache()return converter.to_svg(self)", "docstring": "Return a SVGDumper for this instruction.\n\n :param converter: a :class:`\n knittingpattern.convert.InstructionSVGCache.InstructionSVGCache` or\n :obj:`None`. If :obj:`None` is given, the :func:`\n knittingpattern.convert.InstructionSVGCache.default_svg_cache` is\n used.\n :rtype: knittingpattern.Dumper.SVGDumper", "id": "f558:c0:m14"} {"signature": "def __init__(self, row, spec):", "body": "super().__init__(spec)self._row = rowself._produced_meshes = [self._new_produced_mesh(self, index)for index in range(self.number_of_produced_meshes)]self._consumed_meshes = [self._new_consumed_mesh(self, index)for index in range(self.number_of_consumed_meshes)]self._cached_index_in_row = None", "docstring": "Create a new instruction in a row with a specification.\n\n :param knittingpattern.Row.Row row: the row the instruction is placed\n in\n :param spec: specification of the instruction", "id": "f558:c1:m0"} {"signature": "def transfer_to_row(self, new_row):", "body": "if new_row != self._row:index = self.get_index_in_row()if index is not None:self._row.instructions.pop(index)self._row = new_row", "docstring": "Transfer this instruction to a new row.\n\n :param knittingpattern.Row.Row new_row: the new row the instruction is\n in.", "id": "f558:c1:m1"} {"signature": "@propertydef _new_produced_mesh(self):", "body": "return ProducedMesh", "docstring": ":return: the class of the produced meshes.", "id": "f558:c1:m2"} {"signature": "@propertydef _new_consumed_mesh(self):", "body": "return ConsumedMesh", "docstring": ":return: the class of the consumed meshes.", "id": "f558:c1:m3"} {"signature": "@propertydef row(self):", "body": "return self._row", "docstring": "The row this instruction is in.\n\n :return: the row the instruction is placed in\n :rtype: knittingpattern.Row.Row", "id": "f558:c1:m4"} {"signature": "def is_in_row(self):", "body": "return self.get_index_in_row() is not None", "docstring": "Whether the instruction can be found in its row.\n\n :return: whether the instruction is in its row\n :rtype: bool\n\n Use this to avoid raising and :class:`InstructionNotFoundInRow`.", "id": "f558:c1:m5"} {"signature": "def get_index_in_row(self):", "body": "expected_index = self._cached_index_in_rowinstructions = self._row.instructionsif expected_index is not None and <= expected_index < len(instructions) andinstructions[expected_index] is self:return expected_indexfor index, instruction_in_row in enumerate(instructions):if instruction_in_row is self:self._cached_index_in_row = indexreturn indexreturn None", "docstring": "Index of the instruction in the instructions of the row or None.\n\n :return: index in the :attr:`row`'s instructions or None, if the\n instruction is not in the row\n :rtype: int\n\n .. seealso:: :attr:`row_instructions`, :attr:`index_in_row`,\n :meth:`is_in_row`", "id": "f558:c1:m6"} {"signature": "@propertydef index_in_row(self):", "body": "index = self.get_index_in_row()if index is None:self._raise_not_found_error()return index", "docstring": "Index of the instruction in the instructions of the row.\n\n :return: index in the :attr:`row`'s instructions\n :rtype: int\n :raises knittingpattern.Instruction.InstructionNotFoundInRow:\n if the instruction is not found at the index\n\n .. code:: python\n\n index = instruction.index_in_row\n assert instruction.row.instructions[index] == instruction\n\n .. seealso:: :attr:`row_instructions`, :meth:`get_index_in_row`,\n :meth:`is_in_row`", "id": "f558:c1:m7"} {"signature": "@propertydef row_instructions(self):", "body": "return self.row.instructions", "docstring": "Shortcut for ``instruction.row.instructions``.\n\n :return: the instructions of the :attr:`row` the instruction is in\n\n .. seealso:: :attr:`index_in_row`", "id": "f558:c1:m8"} {"signature": "@propertydef next_instruction_in_row(self):", "body": "index = self.index_in_row + if index >= len(self.row_instructions):return Nonereturn self.row_instructions[index]", "docstring": "The instruction after this one or None.\n\n :return: the instruction in :attr:`row_instructions` after this or\n :obj:`None` if this is the last\n :rtype: knittingpattern.Instruction.InstructionInRow\n\n This can be used to traverse the instructions.\n\n .. seealso:: :attr:`previous_instruction_in_row`", "id": "f558:c1:m9"} {"signature": "@propertydef previous_instruction_in_row(self):", "body": "index = self.index_in_row - if index < :return Nonereturn self.row_instructions[index]", "docstring": "The instruction before this one or None.\n\n :return: the instruction in :attr:`row_instructions` before this or\n :obj:`None` if this is the first\n :rtype: knittingpattern.Instruction.InstructionInRow\n\n This can be used to traverse the instructions.\n\n .. seealso:: :attr:`next_instruction_in_row`", "id": "f558:c1:m10"} {"signature": "@propertydef _instruction_not_found_message(self):", "body": "return INSTRUCTION_NOT_FOUND_MESSAGE.format(instruction=self, row=self.row)", "docstring": "The message for the error.\n\n :return: an error message\n :rtype: str\n\n .. warning: private, do not use", "id": "f558:c1:m11"} {"signature": "def _raise_not_found_error(self):", "body": "raise InstructionNotFoundInRow(self._instruction_not_found_message)", "docstring": "Raise an error that this instruction is in its row no longer.\n\n :raises knittingpattern.Instruction.InstructionNotFoundInRow:\n the instruction was not found\n\n .. warning: private, do not use", "id": "f558:c1:m12"} {"signature": "@propertydef index_of_first_produced_mesh_in_row(self):", "body": "index = for instruction in self.row_instructions:if instruction is self:breakindex += instruction.number_of_produced_mesheselse:self._raise_not_found_error()return index", "docstring": "Index of the first produced mesh in the row that consumes it.\n\n :return: an index of the first produced mesh of rows produced meshes\n :rtype: int\n\n .. note:: If the instruction :meth:`produces meshes\n `, this is the index of the first\n mesh the instruction produces in all the meshes of the row.\n If the instruction does not produce meshes, the index of the mesh is\n returned as if the instruction had produced a mesh.\n\n .. code::\n\n if instruction.produces_meshes():\n index = instruction.index_of_first_produced_mesh_in_row", "id": "f558:c1:m13"} {"signature": "@propertydef index_of_last_produced_mesh_in_row(self):", "body": "index = self.index_of_first_produced_mesh_in_rowreturn index + self.number_of_produced_meshes - ", "docstring": "Index of the last mesh produced by this instruction in its row.\n\n :return: an index of the last produced mesh of rows produced meshes\n :rtype: int\n\n .. note:: If this instruction :meth:`produces meshes\n `, this is the index of\n its last produces mesh in the row. However, if this instruction does\n not produce meshes, this is the index **before** the first mesh of\n the instruction if it produced meshes.\n\n .. seealso:: :attr:`index_of_first_produced_mesh_in_row`", "id": "f558:c1:m14"} {"signature": "@propertydef index_of_first_consumed_mesh_in_row(self):", "body": "index = for instruction in self.row_instructions:if instruction is self:breakindex += instruction.number_of_consumed_mesheselse:self._raise_not_found_error()return index", "docstring": "The index of the first consumed mesh of this instruction in its row.\n\n Same as :attr:`index_of_first_produced_mesh_in_row`\n but for consumed meshes.", "id": "f558:c1:m15"} {"signature": "@propertydef index_of_last_consumed_mesh_in_row(self):", "body": "index = self.index_of_first_consumed_mesh_in_rowreturn index + self.number_of_consumed_meshes - ", "docstring": "The index of the last consumed mesh of this instruction in its row.\n\n Same as :attr:`index_of_last_produced_mesh_in_row`\n but for the last consumed mesh.", "id": "f558:c1:m16"} {"signature": "@propertydef produced_meshes(self):", "body": "return self._produced_meshes", "docstring": "The meshes produced by this instruction\n\n :return: a :class:`list` of :class:`meshes\n ` that this instruction produces\n :rtype: list\n\n .. code:: python\n\n assert len(inst.produced_meshes) == inst.number_of_produced_meshes\n assert all(mesh.is_produced() for mesh in inst.produced_meshes)\n\n .. seealso:: :attr:`consumed_meshes`, :attr:`consuming_instructions`", "id": "f558:c1:m17"} {"signature": "@propertydef consumed_meshes(self):", "body": "return self._consumed_meshes", "docstring": "The meshes consumed by this instruction\n\n :return: a :class:`list` of :class:`meshes\n ` that this instruction consumes\n :rtype: list\n\n .. code:: python\n\n assert len(inst.consumed_meshes) == inst.number_of_consumed_meshes\n assert all(mesh.is_consumed() for mesh in inst.consumed_meshes)\n\n .. seealso:: :attr:`produced_meshes`, :attr:`producing_instructions`", "id": "f558:c1:m18"} {"signature": "def __repr__(self):", "body": "index = self.get_index_in_row()if index is None:position = \"\".format(self.row)else:position = \"\".format(self.row, index)return \"\".format(self.__class__.__name__,(\"\".format(self.id) if self.id is not None else \"\"),self.type,position)", "docstring": ":obj:`repr(instruction) ` used for :func:`print`.\n\n :return: the string representation of this object\n :rtype: str", "id": "f558:c1:m19"} {"signature": "@propertydef producing_instructions(self):", "body": "return [(mesh.producing_instruction if mesh.is_produced() else None)for mesh in self.consumed_meshes]", "docstring": "Instructions that produce the meshes that this instruction consumes.\n\n :return: a list of :class:`instructions\n `\n :rtype: list\n\n .. seealso:: :attr:`consuming_instructions`, :attr:`consumed_meshes`", "id": "f558:c1:m20"} {"signature": "@propertydef consuming_instructions(self):", "body": "return [(mesh.consuming_instruction if mesh.is_consumed() else None)for mesh in self.produced_meshes]", "docstring": "Instructions that consume the meshes that this instruction produces.\n\n :return: a list of :class:`instructions\n `\n :rtype: list\n\n .. seealso:: :attr:`producing_instructions`, :attr:`produced_meshes`", "id": "f558:c1:m21"} {"signature": "@propertydef color(self):", "body": "return self.get(COLOR, self.row.color)", "docstring": "The color of the instruction.\n\n :return: the :data:`color ` of the instruction or\n :obj:`None` if none is specified.\n\n If no color is specified in the instruction, it is inherited form the\n row.", "id": "f558:c1:m22"} {"signature": "@propertydef last_produced_mesh(self):", "body": "return self._produced_meshes[-]", "docstring": "The last produced mesh.\n\n :return: the last produced mesh\n :rtype: knittingpattern.Mesh.Mesh\n :raises IndexError: if no mesh is produced\n\n .. seealso:: :attr:`Instruction.number_of_produced_meshes`", "id": "f558:c1:m23"} {"signature": "@propertydef last_consumed_mesh(self):", "body": "return self._consumed_meshes[-]", "docstring": "The last consumed mesh.\n\n :return: the last consumed mesh\n :rtype: knittingpattern.Mesh.Mesh\n :raises IndexError: if no mesh is consumed\n\n .. seealso:: :attr:`Instruction.number_of_consumed_meshes`", "id": "f558:c1:m24"} {"signature": "@propertydef first_produced_mesh(self):", "body": "return self._produced_meshes[]", "docstring": "The first produced mesh.\n\n :return: the first produced mesh\n :rtype: knittingpattern.Mesh.Mesh\n :raises IndexError: if no mesh is produced\n\n .. seealso:: :attr:`Instruction.number_of_produced_meshes`", "id": "f558:c1:m25"} {"signature": "@propertydef first_consumed_mesh(self):", "body": "return self._consumed_meshes[]", "docstring": "The first consumed mesh.\n\n :return: the first consumed mesh\n :rtype: knittingpattern.Mesh.Mesh\n :raises IndexError: if no mesh is consumed\n\n .. seealso:: :attr:`Instruction.number_of_consumed_meshes`", "id": "f558:c1:m26"} {"signature": "def unique(iterables):", "body": "included_elements = set()def included(element):result = element in included_elementsincluded_elements.add(element)return resultreturn [element for elements in iterables for element in elementsif not included(element)]", "docstring": "Create an iterable from the iterables that contains each element once.\n\n :return: an iterable over the iterables. Each element of the result\n appeared only once in the result. They are ordered by the first\n occurrence in the iterables.", "id": "f559:m0"} {"signature": "def default_parser():", "body": "from .ParsingSpecification import DefaultSpecificationspecification = DefaultSpecification()return Parser(specification)", "docstring": "The parser with a default specification.\n\n :return: a parser using a\n :class:`knittingpattern.ParsingSpecification.DefaultSpecification`\n :rtype: knittingpattern.Parser.Parser", "id": "f560:m0"} {"signature": "def __init__(self, specification):", "body": "self._spec = specificationself._start()", "docstring": "Create a parser with a specification.\n\n :param specification: the types and classes to use for the resulting\n object structure, preferably a\n :class:`knittingpattern.ParsingSpecification.ParsingSpecification`", "id": "f560:c1:m0"} {"signature": "def _start(self):", "body": "self._instruction_library = self._spec.new_default_instructions()self._as_instruction = self._instruction_library.as_instructionself._id_cache = {}self._pattern_set = Noneself._inheritance_todos = []self._instruction_todos = []", "docstring": "Initialize the parsing process.", "id": "f560:c1:m1"} {"signature": "@staticmethoddef _to_id(id_):", "body": "return tuple(id_) if isinstance(id_, list) else id_", "docstring": "Converts the argument to a object suitable as an identifier.\n\n :return: a hashable object", "id": "f560:c1:m2"} {"signature": "def _error(self, text):", "body": "raise self._spec.new_parsing_error(text)", "docstring": "Raise an error.\n\n :raises: a specified ParsingError\n :param str text: the text to include in the error message", "id": "f560:c1:m3"} {"signature": "def knitting_pattern_set(self, values):", "body": "self._start()pattern_collection = self._new_pattern_collection()self._fill_pattern_collection(pattern_collection, values)self._create_pattern_set(pattern_collection, values)return self._pattern_set", "docstring": "Parse a knitting pattern set.\n\n :param dict value: the specification of the knitting pattern set\n :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet\n :raises knittingpattern.KnittingPatternSet.ParsingError: if\n :paramref:`value` does not fulfill the :ref:`specification\n `.", "id": "f560:c1:m4"} {"signature": "def _finish_inheritance(self):", "body": "while self._inheritance_todos:prototype, parent_id = self._inheritance_todos.pop()parent = self._id_cache[parent_id]prototype.inherit_from(parent)", "docstring": "Finish those who still need to inherit.", "id": "f560:c1:m5"} {"signature": "def _delay_inheritance(self, prototype, parent_id):", "body": "self._inheritance_todos.append((prototype, parent_id))", "docstring": "Add a deleyed inheritance that is ti be resolved later.\n\n When calling :meth:`_finish_inheritance` this inheritance chain shall\n be resolved.", "id": "f560:c1:m6"} {"signature": "def _finish_instructions(self):", "body": "while self._instruction_todos:row = self._instruction_todos.pop()instructions = row.get(INSTRUCTIONS, [])row.instructions.extend(instructions)", "docstring": "Finish those who still need to inherit.", "id": "f560:c1:m7"} {"signature": "def _delay_instructions(self, row):", "body": "self._instruction_todos.append(row)", "docstring": "Add a deleyed inheritance that is ti be resolved later.\n\n When calling :meth:`_finish_instructions` this inheritance chain shall\n be resolved.", "id": "f560:c1:m8"} {"signature": "def _new_pattern_collection(self):", "body": "return self._spec.new_pattern_collection()", "docstring": "Create a new pattern collection.\n\n :return: a new specified pattern collection for\n :meth:`knitting_pattern_set`", "id": "f560:c1:m9"} {"signature": "def new_row_collection(self):", "body": "return self._spec.new_row_collection()", "docstring": "Create a new row collection.\n\n :return: a new specified row collection for the\n :meth:`knitting pattern `", "id": "f560:c1:m10"} {"signature": "def _fill_pattern_collection(self, pattern_collection, values):", "body": "pattern = values.get(PATTERNS, [])for pattern_to_parse in pattern:parsed_pattern = self._pattern(pattern_to_parse)pattern_collection.append(parsed_pattern)", "docstring": "Fill a pattern collection.", "id": "f560:c1:m11"} {"signature": "def _row(self, values):", "body": "row_id = self._to_id(values[ID])row = self._spec.new_row(row_id, values, self)if SAME_AS in values:self._delay_inheritance(row, self._to_id(values[SAME_AS]))self._delay_instructions(row)self._id_cache[row_id] = rowreturn row", "docstring": "Parse a row.", "id": "f560:c1:m12"} {"signature": "def new_row(self, id_):", "body": "return self._spec.new_row(id_, {}, self)", "docstring": "Create a new row with an id.\n\n :param id_: the id of the row\n :return: a row\n :rtype: knittingpattern.Row.Row", "id": "f560:c1:m13"} {"signature": "def instruction_in_row(self, row, specification):", "body": "whole_instruction_ = self._as_instruction(specification)return self._spec.new_instruction_in_row(row, whole_instruction_)", "docstring": "Parse an instruction.\n\n :param row: the row of the instruction\n :param specification: the specification of the instruction\n :return: the instruction in the row", "id": "f560:c1:m14"} {"signature": "def _pattern(self, base):", "body": "rows = self._rows(base.get(ROWS, []))self._finish_inheritance()self._finish_instructions()self._connect_rows(base.get(CONNECTIONS, []))id_ = self._to_id(base[ID])name = base[NAME]return self.new_pattern(id_, name, rows)", "docstring": "Parse a pattern.", "id": "f560:c1:m15"} {"signature": "def new_pattern(self, id_, name, rows=None):", "body": "if rows is None:rows = self.new_row_collection()return self._spec.new_pattern(id_, name, rows, self)", "docstring": "Create a new knitting pattern.\n\n If rows is :obj:`None` it is replaced with the\n :meth:`new_row_collection`.", "id": "f560:c1:m16"} {"signature": "def _rows(self, spec):", "body": "rows = self.new_row_collection()for row in spec:rows.append(self._row(row))return rows", "docstring": "Parse a collection of rows.", "id": "f560:c1:m17"} {"signature": "def _connect_rows(self, connections):", "body": "for connection in connections:from_row_id = self._to_id(connection[FROM][ID])from_row = self._id_cache[from_row_id]from_row_start_index = connection[FROM].get(START, DEFAULT_START)from_row_number_of_possible_meshes =from_row.number_of_produced_meshes - from_row_start_indexto_row_id = self._to_id(connection[TO][ID])to_row = self._id_cache[to_row_id]to_row_start_index = connection[TO].get(START, DEFAULT_START)to_row_number_of_possible_meshes =to_row.number_of_consumed_meshes - to_row_start_indexmeshes = min(from_row_number_of_possible_meshes,to_row_number_of_possible_meshes)number_of_meshes = connection.get(MESHES, meshes)from_row_stop_index = from_row_start_index + number_of_meshesto_row_stop_index = to_row_start_index + number_of_meshesassert <= from_row_start_index <= from_row_stop_indexproduced_meshes = from_row.produced_meshes[from_row_start_index:from_row_stop_index]assert <= to_row_start_index <= to_row_stop_indexconsumed_meshes = to_row.consumed_meshes[to_row_start_index:to_row_stop_index]assert len(produced_meshes) == len(consumed_meshes)mesh_pairs = zip(produced_meshes, consumed_meshes)for produced_mesh, consumed_mesh in mesh_pairs:produced_mesh.connect_to(consumed_mesh)", "docstring": "Connect the parsed rows.", "id": "f560:c1:m18"} {"signature": "def _get_type(self, values):", "body": "if TYPE not in values:self._error(\"\"\"\".format(KNITTING_PATTERN_TYPE))type_ = values[TYPE]if type_ != KNITTING_PATTERN_TYPE:self._error(\"\"\"\"\"\".format(type_, KNITTING_PATTERN_TYPE))return type_", "docstring": ":return: the type of a knitting pattern set.", "id": "f560:c1:m19"} {"signature": "def _get_version(self, values):", "body": "return values[VERSION]", "docstring": ":return: the version of :paramref:`values`.", "id": "f560:c1:m20"} {"signature": "def _create_pattern_set(self, pattern, values):", "body": "type_ = self._get_type(values)version = self._get_version(values)comment = values.get(COMMENT)self._pattern_set = self._spec.new_pattern_set(type_, version, pattern, self, comment)", "docstring": "Create a new pattern set.", "id": "f560:c1:m21"} {"signature": "@abstractmethoddef _producing_instruction_and_index(self):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m0"} {"signature": "@abstractmethoddef _producing_row_and_index(self):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m1"} {"signature": "@abstractmethoddef _consuming_instruction_and_index(self):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m2"} {"signature": "@abstractmethoddef _consuming_row_and_index(self):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m3"} {"signature": "@abstractmethoddef _is_produced(self):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m4"} {"signature": "@abstractmethoddef _is_consumed(self):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m5"} {"signature": "@abstractmethoddef _is_consumed_mesh(self):", "body": "", "docstring": "Replace this method.\n\n :return: whether this mesh is an instance of a ConsumedMesh.", "id": "f561:c0:m6"} {"signature": "@abstractmethoddef _disconnect(self):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m7"} {"signature": "@abstractmethoddef _connect_to(self, other_mesh):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m8"} {"signature": "@abstractmethoddef _as_produced_mesh(self):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m9"} {"signature": "@abstractmethoddef _as_consumed_mesh(self):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m10"} {"signature": "@abstractmethoddef _is_connected_to(self, other_mesh):", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m11"} {"signature": "def is_produced(self):", "body": "return self._is_produced()", "docstring": "Whether the mesh has an instruction that produces it.\n\n :return: whether the mesh is produced by an instruction\n :rtype: bool\n\n If you get this mesh from\n :attr:`knittingpattern.Instruction.InstructionInRow.produced_meshes` or\n :attr:`knittingpattern.Row.Row.produced_meshes`,\n this should be :obj:`True`.\n\n .. warning:: Before you use any methods on how the mesh is produced,\n you should check with ``mesh.is_produced()``.", "id": "f561:c0:m14"} {"signature": "def is_consumed(self):", "body": "return self._is_consumed()", "docstring": "Whether the mesh has an instruction that consumed it.\n\n :return: whether the mesh is consumed by an instruction\n :rtype: bool\n\n If you get this mesh from\n :attr:`knittingpattern.Instruction.InstructionInRow.consumed_meshes` or\n :attr:`knittingpattern.Row.Row.consumed_meshes`,\n this should be :obj:`True`.\n\n .. warning:: Before you use any methods on how the mesh is consumed,\n you should check with ``mesh.is_consumed()``.", "id": "f561:c0:m15"} {"signature": "@propertydef index_in_producing_instruction(self):", "body": "self._assert_is_produced()return self._producing_instruction_and_index()[]", "docstring": "Index in instruction as a produced mesh.\n\n :return: the index of the mesh in the list of meshes that\n :attr:`producing_instruction` produces\n :rtype: int\n\n .. code:: python\n\n instruction = mesh.producing_instruction\n index = mesh.index_in_producing_instruction\n assert instruction.produced_meshes[index] == mesh\n\n .. seealso:: :attr:`producing_instruction`,\n :attr:`index_in_consuming_instruction`\n\n .. warning:: Check with :meth:`is_produced` before!", "id": "f561:c0:m16"} {"signature": "@propertydef producing_instruction(self):", "body": "self._assert_is_produced()return self._producing_instruction_and_index()[]", "docstring": "Instruction which produces this mesh.\n\n :return: the instruction that produces this mesh\n :rtype: knittingpattern.Instruction.InstructionInRow\n\n .. seealso:: :attr:`index_in_producing_instruction`,\n :attr:`producing_row`, :attr:`consuming_row`\n\n .. warning:: Check with :meth:`is_produced` before!", "id": "f561:c0:m17"} {"signature": "@propertydef producing_row(self):", "body": "self._assert_is_produced()return self._producing_row_and_index()[]", "docstring": "Row which produces this mesh.\n\n :return: the row of the instruction that produces this mesh\n :rtype: knittingpattern.Row.Row\n\n .. seealso:: :attr:`index_in_producing_row`,\n :attr:`producing_instruction`, :attr:`consuming_row`\n\n .. warning:: Check with :meth:`is_produced` before!", "id": "f561:c0:m18"} {"signature": "@propertydef index_in_producing_row(self):", "body": "self._assert_is_produced()return self._producing_row_and_index()[]", "docstring": "Index in row as produced mesh.\n\n :return: the index of the mesh in the :attr:`producing_row`\n :rtype: int\n\n .. code:: python\n\n row = mesh.producing_row\n index = mesh.index_in_producing_row\n assert row[index] == mesh\n\n .. seealso:: :attr:`producing_row`, :attr:`index_in_consuming_row`\n\n .. warning:: Check with :meth:`is_produced` before!", "id": "f561:c0:m19"} {"signature": "@propertydef index_in_consuming_row(self):", "body": "self._assert_is_consumed()return self._consuming_row_and_index()[]", "docstring": "Index in row as consumed mesh.\n\n :return: the index of the mesh in the list of meshes that\n :attr:`consuming_row` consumes\n :rtype: int\n\n .. code:: python\n\n row = mesh.consuming_row\n index = mesh.index_in_consuming_row\n assert row.consumed_meshes[index] == mesh\n\n .. seealso:: :attr:`consuming_row`, :attr:`index_in_producing_row`\n\n .. warning:: Check with :meth:`is_consumed` before!", "id": "f561:c0:m20"} {"signature": "@propertydef consuming_row(self):", "body": "self._assert_is_consumed()return self._consuming_row_and_index()[]", "docstring": "Row which consumes this mesh.\n\n :return: the row that consumes this mesh\n :rtype: knittingpattern.Row.Row\n\n .. seealso:: :attr:`index_in_consuming_row`,\n :attr:`consuming_instruction`, :attr:`producing_row`\n\n .. warning:: Check with :meth:`is_consumed` before!", "id": "f561:c0:m21"} {"signature": "@propertydef consuming_instruction(self):", "body": "self._assert_is_consumed()return self._consuming_instruction_and_index()[]", "docstring": "Instruction which consumes this mesh.\n\n :return: the instruction that consumes this mesh\n :rtype: knittingpattern.Instruction.InstructionInRow\n\n .. seealso:: :attr:`index_in_consuming_instruction`,\n :attr:`consuming_row`, :attr:`producing_instruction`\n\n .. warning:: Check with :meth:`is_consumed` before!", "id": "f561:c0:m22"} {"signature": "@propertydef index_in_consuming_instruction(self):", "body": "self._assert_is_consumed()return self._consuming_instruction_and_index()[]", "docstring": "Index in instruction as consumed mesh.\n\n :return: the index of the mesh in the list of meshes that\n :attr:`consuming_instruction` consumes\n :rtype: int\n\n .. code:: python\n\n instruction = mesh.consuming_instruction\n index = mesh.index_in_consuming_instruction\n assert instruction.consumed_meshes[index] == mesh\n\n .. seealso:: :attr:`consuming_instruction`,\n :attr:`index_in_consuming_instruction`\n\n .. warning:: Check with :meth:`is_consumed` before!", "id": "f561:c0:m23"} {"signature": "def is_knit(self):", "body": "self._assert_is_produced()return self._producing_instruction_and_index()[].does_knit()", "docstring": "Whether the mesh is produced by a knit instruction.\n\n :return: whether the mesh is knit by an instruction\n :rtype: bool\n\n .. seealso:: :attr:`producing_instruction`", "id": "f561:c0:m24"} {"signature": "def __repr__(self):", "body": "if self._is_consumed():instruction, _ = self._consuming_instruction_and_index()row, row_index = self._consuming_row_and_index()consume_string = \"\".format(instruction,row,row_index)else:consume_string = \"\"if self._is_produced():instruction, _ = self._producing_instruction_and_index()row, row_index = self._producing_row_and_index()produce_string = \"\".format(instruction,row,row_index)else:produce_string = \"\"return \"\".format(self.__class__.__name__, produce_string, consume_string)", "docstring": "This mesh as string.\n\n :return: the string representation of this mesh.\n :rtype: str\n\n This is useful for :func:`print` and class:`str`", "id": "f561:c0:m25"} {"signature": "def disconnect(self):", "body": "if self.is_connected():self._disconnect()", "docstring": "Remove the connection between two rows through this mesh.\n\n After disconnecting this mesh, it can be connected anew.", "id": "f561:c0:m26"} {"signature": "def connect_to(self, other_mesh):", "body": "other_mesh.disconnect()self.disconnect()self._connect_to(other_mesh)", "docstring": "Create a connection to an other mesh.\n\n .. warning:: Both meshes need to be disconnected and one needs to be\n a consumed and the other a produced mesh. You can check if a\n connection is possible using :meth:`can_connect_to`.\n\n .. seealso:: :meth:`is_consumed`, :meth:`is_produced`,\n :meth:`can_connect_to`", "id": "f561:c0:m27"} {"signature": "def is_connected(self):", "body": "return self._is_consumed() and self._is_produced()", "docstring": "Returns whether this mesh is already connected.\n\n :return: whether this mesh is connected to an other.\n :rtype: bool", "id": "f561:c0:m28"} {"signature": "def as_produced_mesh(self):", "body": "self._assert_is_produced()return self._as_produced_mesh()", "docstring": "The produced part to this mesh.\n\n If meshes are split up, it may be important which row the mesh is\n connected to afterwards. This method returns the mesh that is\n connected to the :attr:`producing row `.\n\n If you got this mesh from :attr:`InstructionInRow.produced_meshes\n ` or\n :attr:`Row.produced_meshes `,\n this returns the same object.\n\n .. seealso:: :meth:`as_consumed_mesh`,\n :attr:`knittinpattern.Instruction.InstructionInRow.produced_meshes`,\n :attr:`knittinpattern.Row.Row.produced_meshes`", "id": "f561:c0:m29"} {"signature": "def as_consumed_mesh(self):", "body": "self._assert_is_consumed()return self._as_consumed_mesh()", "docstring": "The consumed part to this mesh.", "id": "f561:c0:m30"} {"signature": "def is_mesh(self):", "body": "return True", "docstring": "Whether this object is a mesh.\n\n :return: :obj:`True`\n :rtype: bool", "id": "f561:c0:m31"} {"signature": "def is_connected_to(self, other_mesh):", "body": "assert other_mesh.is_mesh()return self._is_connected_to(other_mesh)", "docstring": "Whether the one mesh is conencted to the other.", "id": "f561:c0:m32"} {"signature": "def can_connect_to(self, other):", "body": "assert other.is_mesh()disconnected = not other.is_connected() and not self.is_connected()types_differ = self._is_consumed_mesh() != other._is_consumed_mesh()return disconnected and types_differ", "docstring": "Whether a connection can be established between those two meshes.", "id": "f561:c0:m33"} {"signature": "def __init__(self, producing_instruction,index_in_producing_instruction):", "body": "self.__producing_instruction_and_index = (producing_instruction,index_in_producing_instruction)self._consumed_part = None", "docstring": ":param producing_instruction: the\n :class:`instruction `\n that produces the mesh\n:param int index_in_producing_instruction: the index of the mesh\n in the list of meshes that :attr:`producing_instruction`\n produces\n\n.. note:: There should be no necessity to create instances of this\n directly. You should be able to use\n ``instruction.produced_meshes`` or ``instruction.consumed_meshes``\n to access the :class:`meshes `.", "id": "f561:c1:m0"} {"signature": "def __init__(self, consuming_instruction,index_in_consuming_instruction):", "body": "self.__consuming_instruction_and_index = (consuming_instruction,index_in_consuming_instruction)self._produced_part = None", "docstring": ":param consuming_instruction: the\n :class:`instruction `\n that consumes the mesh\n:param int index_in_consuming_instruction: the index of the mesh\n in the list of meshes that :attr:`consuming_instruction`\n consumes\n\n.. note:: There should be no necessity to create instances of this\n directly. You should be able to use\n ``instruction.produced_meshes`` or ``instruction.consumed_meshes``\n to access the :class:`meshes `.", "id": "f561:c2:m0"} {"signature": "def _connect_to_produced_mesh(self, produced_mesh):", "body": "self._produced_part = produced_mesh", "docstring": "This is called after a connection has been established by the\n produced mesh.", "id": "f561:c2:m11"} {"signature": "def __init__(self, on_dump):", "body": "super().__init__(self._dump_to_file)self.__dump_object = on_dump", "docstring": "Create a new XMLDumper object with the callable `on_dump`.\n\n `on_dump` takes no aguments and returns the object that should be\n serialized to XML.", "id": "f562:c0:m0"} {"signature": "def object(self):", "body": "return self.__dump_object()", "docstring": "Return the object that should be dumped.", "id": "f562:c0:m1"} {"signature": "def _dump_to_file(self, file):", "body": "xmltodict.unparse(self.object(), file, pretty=True)", "docstring": "dump to the file", "id": "f562:c0:m2"} {"signature": "def __init__(self, on_dump, text_is_expected=True, encoding=\"\"):", "body": "self.__dump_to_file = on_dumpself.__text_is_expected = text_is_expectedself.__encoding = encoding", "docstring": "Create a new dumper object with a function :paramref:`on_dump`\n\n :param on_dump: a function that takes a file-like object as argument\n and writes content to it.\n :param bool text_is_expected: whether to use text mode\n (:obj:`True`, default) or binary mode (:obj:`False`)\n for :paramref:`on_dump`.\n\n The dumper calls :paramref:`on_dump` with a file-like object every time\n one of its save methods, e.g. :meth:`string` or :meth:`file` is called.\n The file-like object in the :paramref:`file` argument supports the\n method ``write()`` to which the content should be written.\n\n :paramref:`text_is_expected` should be\n\n - :obj:`True` to pass a file to :paramref:`on_dump` that you can write\n strings to\n\n - :obj:`False` to pass a file to :paramref:`on_dump` that you can write\n bytes to", "id": "f564:c0:m0"} {"signature": "@propertydef encoding(self):", "body": "return self.__encoding", "docstring": ":return: the encoding for byte to string conversion\n :rtype: str", "id": "f564:c0:m1"} {"signature": "def string(self):", "body": "if self.__text_is_expected:return self._string()else:return self._bytes().decode(self.__encoding)", "docstring": ":return: the dump as a string", "id": "f564:c0:m2"} {"signature": "def _string(self):", "body": "file = StringIO()self.__dump_to_file(file)file.seek()return file.read()", "docstring": ":return: the string from a :class:`io.StringIO`", "id": "f564:c0:m3"} {"signature": "def bytes(self):", "body": "if self.__text_is_expected:return self.string().encode(self.__encoding)else:return self._bytes()", "docstring": ":return: the dump as bytes.", "id": "f564:c0:m4"} {"signature": "def _bytes(self):", "body": "file = BytesIO()self.__dump_to_file(file)file.seek()return file.read()", "docstring": ":return: bytes from a :class:`io.BytesIO`", "id": "f564:c0:m5"} {"signature": "def file(self, file=None):", "body": "if file is None:file = StringIO()self._file(file)return file", "docstring": "Saves the dump in a file-like object in text mode.\n\n :param file: :obj:`None` or a file-like object.\n :return: a file-like object\n\n If :paramref:`file` is :obj:`None`, a new :class:`io.StringIO`\n is returned.\n If :paramref:`file` is not :obj:`None` it should be a file-like object.\n\n The content is written to the file. After writing, the file's\n read/write position points behind the dumped content.", "id": "f564:c0:m6"} {"signature": "def _file(self, file):", "body": "if not self.__text_is_expected:file = BytesWrapper(file, self.__encoding)self.__dump_to_file(file)", "docstring": "Dump the content to a `file`.", "id": "f564:c0:m7"} {"signature": "def binary_file(self, file=None):", "body": "if file is None:file = BytesIO()self._binary_file(file)return file", "docstring": "Same as :meth:`file` but for binary content.", "id": "f564:c0:m8"} {"signature": "def _binary_file(self, file):", "body": "if self.__text_is_expected:file = TextWrapper(file, self.__encoding)self.__dump_to_file(file)", "docstring": "Dump the ocntent into the `file` in binary mode.", "id": "f564:c0:m9"} {"signature": "def _mode_and_encoding_for_open(self):", "body": "if self.__text_is_expected:return \"\", self.__encodingreturn \"\", None", "docstring": ":return: the file mode and encoding for :obj:`open`.", "id": "f564:c0:m10"} {"signature": "def path(self, path):", "body": "self._path(path)", "docstring": "Saves the dump in a file named :paramref:`path`.\n\n :param str path: a valid path to a file location. The file can exist.", "id": "f564:c0:m11"} {"signature": "def _path(self, path):", "body": "mode, encoding = self._mode_and_encoding_for_open()with open(path, mode, encoding=encoding) as file:self.__dump_to_file(file)", "docstring": "Saves the dump in a file named `path`.", "id": "f564:c0:m12"} {"signature": "def _temporary_file(self, delete):", "body": "file = NamedTemporaryFile(\"\", delete=delete,encoding=self.__encoding)self._file(file)return file", "docstring": ":return: a temporary file where the content is dumped to.", "id": "f564:c0:m13"} {"signature": "def temporary_path(self, extension=\"\"):", "body": "path = NamedTemporaryFile(delete=False, suffix=extension).nameself.path(path)return path", "docstring": "Saves the dump in a temporary file and returns its path.\n\n .. warning:: The user of this method is responsible for deleting this\n file to save space on the hard drive.\n If you only need a file object for a short period of time\n you can use the method :meth:`temporary_file`.\n\n :param str extension: the ending ot the file name e.g. ``\".png\"``\n :return: a path to the temporary file\n :rtype: str", "id": "f564:c0:m14"} {"signature": "def temporary_file(self, delete_when_closed=True):", "body": "return self._temporary_file(delete_when_closed)", "docstring": "Saves the dump in a temporary file and returns the open file object.\n\n :param bool delete_when_closed: whether to delete the temporary file\n when it is closed.\n :return: a file-like object\n\n If :paramref:`delete_when_closed` is :obj:`True` (default) the file\n on the hard drive will be deleted if it is closed or not referenced\n any more.\n\n If :paramref:`delete_when_closed` is :obj:`False` the returned\n temporary file is not deleted when closed or unreferenced.\n The user of this method has then the responsibility to free the\n space on the host system.\n\n The returned file-like object has an attribute ``name`` that holds\n the location of the file.", "id": "f564:c0:m15"} {"signature": "def binary_temporary_file(self, delete_when_closed=True):", "body": "return self._binary_temporary_file(delete_when_closed)", "docstring": "Same as :meth:`temporary_file` but for binary mode.", "id": "f564:c0:m16"} {"signature": "def _binary_temporary_file(self, delete):", "body": "file = NamedTemporaryFile(\"\", delete=delete)self._binary_file(file)return file", "docstring": ":return: a binary temporary file where the content is dumped to.", "id": "f564:c0:m17"} {"signature": "def __repr__(self):", "body": "return \"\".format(self.__class__.__name__,self.__encoding)", "docstring": "the string representation for people to read\n\n :return: the string representation of this object\n :rtype: str", "id": "f564:c0:m18"} {"signature": "def __init__(self, on_dump):", "body": "super().__init__(self._dump_to_file)self.__dump_object = on_dump", "docstring": "Create a new JSONDumper object with the callable `on_dump`.\n\n `on_dump` takes no arguments and returns the object that should be\n serialized to JSON.", "id": "f565:c0:m0"} {"signature": "def object(self):", "body": "return self.__dump_object()", "docstring": "Return the object that should be dumped.", "id": "f565:c0:m1"} {"signature": "def _dump_to_file(self, file):", "body": "json.dump(self.object(), file)", "docstring": "dump to the file", "id": "f565:c0:m2"} {"signature": "def knitting_pattern(self, specification=None):", "body": "from ..ParsingSpecification import new_knitting_pattern_set_loaderif specification is None:loader = new_knitting_pattern_set_loader()else:loader = new_knitting_pattern_set_loader(specification)return loader.object(self.object())", "docstring": "loads a :class:`knitting pattern\n ` from the dumped\n content\n\n :param specification: a\n :class:`~knittingpattern.ParsingSpecification.ParsingSpecification`\n or :obj:`None` to use the default specification", "id": "f565:c0:m3"} {"signature": "def kivy_svg(self):", "body": "from kivy.graphics.svg import Svgpath = self.temporary_path(\"\")try:return Svg(path)finally:remove_file(path)", "docstring": "An SVG object.\n\n :return: an SVG object\n :rtype: kivy.graphics.svg.Svg\n :raises ImportError: if the module was not found", "id": "f566:c0:m0"} {"signature": "def __init__(self, text_file, encoding):", "body": "self._file = text_fileself._encoding = encoding", "docstring": "Create a wrapper around :paramref:`text_file` that decodes\n bytes to string using :paramref:`encoding` and writes them\n to :paramref:`text_file`.\n\n :param str encoding: The encoding to use to transfer the written bytes\n to string so they can be written to :paramref:`text_file`\n :param text_file: a file-like object open in text mode", "id": "f567:c0:m0"} {"signature": "def write(self, bytes_):", "body": "string = bytes_.decode(self._encoding)self._file.write(string)", "docstring": "Write bytes to the file.", "id": "f567:c0:m1"} {"signature": "def __init__(self, binary_file, encoding):", "body": "self._file = binary_fileself._encoding = encoding", "docstring": "Create a wrapper around :paramref:`binary_file` that encodes\n strings to bytes using :paramref:`encoding` and writes them\n to :paramref:`binary_file`.\n\n :param str encoding: The encoding to use to transfer the written string\n to bytes so they can be written to :paramref:`binary_file`\n :param binary_file: a file-like object open in binary mode", "id": "f567:c1:m0"} {"signature": "def write(self, string):", "body": "bytes_ = string.encode(self._encoding)self._file.write(bytes_)", "docstring": "Write a string to the file.", "id": "f567:c1:m1"} {"signature": "def __init__(self, specification, inherited_values=()):", "body": "self.__specification = [specification] + list(inherited_values)", "docstring": "create a new prototype\n\n :param specification: the specification of the prototype.\n This specification can be inherited by other prototypes.\n It can be a :class:`dict` or an other\n :class:`knittingpattern.Prototype.Prototype` or anything else that\n supports :meth:`__contains__` and :meth:`__getitem__`\n\n To look up a key in the specification it will be walked through\n\n 1. :paramref:`specification`\n 2. :paramref:`inherited_values` in order\n\n However, new lookups can be inserted at before\n :paramref:`inherited_values`, by calling :meth:`inherit_from`.", "id": "f568:c0:m0"} {"signature": "def get(self, key, default=None):", "body": "for base in self.__specification:if key in base:return base[key]return default", "docstring": ":return: the value behind :paramref:`key` in the specification.\n If no value was found, :paramref:`default` is returned.\n:param key: a :ref:`specification key `", "id": "f568:c0:m1"} {"signature": "def __getitem__(self, key):", "body": "default = []value = self.get(key, default)if value is default:raise KeyError(key)return value", "docstring": "``prototype[key]``\n\n :param key: a :ref:`specification key `\n :return: the value behind :paramref:`key` in the specification\n :raises KeyError: if no value was found", "id": "f568:c0:m2"} {"signature": "def __contains__(self, key):", "body": "default = []value = self.get(key, default)return value is not default", "docstring": "``key in prototype``\n\n :param key: a :ref:`specification key `\n :return: whether the key was found in the specification\n :rtype: bool", "id": "f568:c0:m3"} {"signature": "def inherit_from(self, new_specification):", "body": "self.__specification.insert(, new_specification)", "docstring": "Inherit from a :paramref:`new_specification`\n\n :param new_specification: a specification as passed to :meth:`__init__`\n\n The :paramref:`new_specification` is inserted before the first\n :paramref:`inherited value <__init__.inherited_values>`.\n\n If the order is\n\n 1. :paramref:`~__init__.specification`\n 2. :paramref:`~__init__.inherited_values`\n\n after calling ``prototype.inherit_from(new_specification)`` the lookup\n order is\n\n 1. :paramref:`~__init__.specification`\n 2. :paramref:`new_specification`\n 3. :paramref:`~__init__.inherited_values`", "id": "f568:c0:m4"} {"signature": "def __init__(self, type_, version, patterns, parser, comment=None):", "body": "self._version = versionself._type = type_self._patterns = patternsself._comment = commentself._parser = parser", "docstring": "Create a new knitting pattern set.\n\n This is the class for a set of :class:`knitting patterns\n `.\n\n :param str type: the type of the knitting pattern set, see the\n :ref:`specification `.\n :param str version: the version of the knitting pattern set.\n This is not the version of the library but the version of the\n :ref:`specification `.\n :param patterns: a collection of patterns. This should be a\n :class:`~knittingpattern.IdCollection.IdCollection` of\n :class:`KnittingPatterns\n `.\n :param comment: a comment about the knitting pattern", "id": "f569:c0:m0"} {"signature": "@propertydef version(self):", "body": "return self._version", "docstring": "The version of the knitting pattern specification.\n\n :return: the version of the knitting pattern, see :meth:`__init__`\n :rtype: str\n\n .. seealso:: :ref:`FileFormatSpecification`", "id": "f569:c0:m1"} {"signature": "@propertydef type(self):", "body": "return self._type", "docstring": "The type of the knitting pattern.\n\n :return: the type of the knitting pattern, see :meth:`__init__`\n :rtype: str\n\n .. seealso:: :ref:`FileFormatSpecification`", "id": "f569:c0:m2"} {"signature": "@propertydef patterns(self):", "body": "return self._patterns", "docstring": "The pattern contained in this set.\n\n :return: the patterns of the knitting pattern, see :meth:`__init__`\n :rtype: knittingpattern.IdCollection.IdCollection\n\n The patterns can be accessed by their id.", "id": "f569:c0:m3"} {"signature": "@propertydef comment(self):", "body": "return self._comment", "docstring": "The comment about the knitting pattern.\n\n :return: the comment for the knitting pattern set or None,\n see :meth:`__init__`.", "id": "f569:c0:m4"} {"signature": "def to_ayabpng(self):", "body": "return AYABPNGDumper(lambda: self)", "docstring": "Convert the knitting pattern to a png.\n\n :return: a dumper to save this pattern set as png for the AYAB\n software\n :rtype: knittingpattern.convert.AYABPNGDumper.AYABPNGDumper\n\n Example:\n\n .. code:: python\n\n >>> knitting_pattern_set.to_ayabpng().temporary_path()\n \"/the/path/to/the/file.png\"", "id": "f569:c0:m5"} {"signature": "def to_svg(self, zoom):", "body": "def on_dump():\"\"\"\"\"\"knitting_pattern = self.patterns.at()layout = GridLayout(knitting_pattern)instruction_to_svg = default_instruction_svg_cache()builder = SVGBuilder()kp_to_svg = KnittingPatternToSVG(knitting_pattern, layout,instruction_to_svg, builder, zoom)return kp_to_svg.build_SVG_dict()return XMLDumper(on_dump)", "docstring": "Create an SVG from the knitting pattern set.\n\n :param float zoom: the height and width of a knit instruction\n :return: a dumper to save the svg to\n :rtype: knittingpattern.Dumper.XMLDumper\n\n Example:\n\n .. code:: python\n\n >>> knitting_pattern_set.to_svg(25).temporary_path(\".svg\")\n \"/the/path/to/the/file.svg\"", "id": "f569:c0:m6"} {"signature": "def add_new_pattern(self, id_, name=None):", "body": "if name is None:name = id_pattern = self._parser.new_pattern(id_, name)self._patterns.append(pattern)return pattern", "docstring": "Add a new, empty knitting pattern to the set.\n\n :param id_: the id of the pattern\n :param name: the name of the pattern to add or if :obj:`None`, the\n :paramref:`id_` is used\n :return: a new, empty knitting pattern\n :rtype: knittingpattern.KnittingPattern.KnittingPattern", "id": "f569:c0:m7"} {"signature": "@propertydef first(self):", "body": "return self._patterns.first", "docstring": "The first element in this set.\n\n :rtype: knittingpattern.KnittingPattern.KnittingPattern", "id": "f569:c0:m8"} {"signature": "def absjoin(*args):", "body": "return os.path.abspath(os.path.join(*args))", "docstring": ":return: an absolute path to the joined arguments\n:param args: the parts of the path to join", "id": "f571:m0"} {"signature": "def create_new_module_documentation():", "body": "for module in MODULES:if not os.path.isfile(module.doc_file):directory = os.path.dirname(module.doc_file)os.makedirs(directory, exist_ok=True)with open(module.doc_file, \"\") as file:write = file.writewrite(\"\") write(\"\" + module.name + \"\")write(\"\")write(module.title + \"\")write(\"\" * len(module.title) + \"\")write(\"\")write(\"\" + module.name + \"\")write(\"\")write(\"\")write(\"\")write(\"\")", "docstring": "Create documentation so it fits the tests.", "id": "f572:m6"} {"signature": "def print_bytes(bytes_):", "body": "try:print(bytes_.decode())except UnicodeDecodeError:print(bytes_)", "docstring": "Print bytes safely as string.", "id": "f573:m0"} {"signature": "@fixture(scope=\"\")def sphinx_build():", "body": "if os.path.exists(BUILD_DIRECTORY):shutil.rmtree(BUILD_DIRECTORY)output = subprocess.check_output(\"\", shell=True, cwd=DOCS_DIRECTORY,stderr=subprocess.STDOUT)output += subprocess.check_output(\"\", shell=True, cwd=DOCS_DIRECTORY,stderr=subprocess.STDOUT)print(output.decode())return output", "docstring": "Build the documentation with sphinx and return the output.", "id": "f573:m1"} {"signature": "@fixture(scope=\"\")def coverage(sphinx_build):", "body": "assert sphinx_build, \"\"with open(PYTHON_COVERAGE_FILE) as file:return file.read()", "docstring": ":return: the documentation coverage outpupt.", "id": "f573:m2"} {"signature": "@fixturedef warnings(sphinx_build):", "body": "return re.findall(WARNING_PATTERN, sphinx_build)", "docstring": ":return: the warnings during the build process.", "id": "f573:m3"} {"signature": "def get_passphrase(passphrase=None):", "body": "for passphrase_file_path in POTENTIAL_PASSPHRASE_LOCATIONS:if os.path.isfile(passphrase_file_path):with open(passphrase_file_path) as passphrase_file:return passphrase_file.read()return passphrase", "docstring": "Return a passphrase as found in a passphrase.ghost file\n\n Lookup is done in three locations on non-Windows systems and two on Windows\n All:\n `cwd/passphrase.ghost`\n `~/.ghost/passphrase.ghost`\n Only non-Windows:\n `/etc/ghost/passphrase.ghost`", "id": "f575:m2"} {"signature": "def migrate(src_path,src_passphrase,src_backend,dst_path,dst_passphrase,dst_backend):", "body": "src_storage = STORAGE_MAPPING[src_backend](**_parse_path_string(src_path))dst_storage = STORAGE_MAPPING[dst_backend](**_parse_path_string(dst_path))src_stash = Stash(src_storage, src_passphrase)dst_stash = Stash(dst_storage, dst_passphrase)keys = src_stash.export()dst_stash.load(src_passphrase, keys=keys)", "docstring": "Migrate all keys in a source stash to a destination stash\n\n The migration process will decrypt all keys using the source\n stash's passphrase and then encrypt them based on the destination\n stash's passphrase.\n\n re-encryption will take place only if the passphrases are differing", "id": "f575:m3"} {"signature": "def _get_current_time():", "body": "return datetime.fromtimestamp(time.time()).strftime('')", "docstring": "Return a human readable unix timestamp formatted string\n\n e.g. 2015-06-11 10:10:01", "id": "f575:m4"} {"signature": "def generate_passphrase(size=):", "body": "chars = string.ascii_lowercase + string.ascii_uppercase + string.digitsreturn str(''.join(random.choice(chars) for _ in range(size)))", "docstring": "Return a generate string `size` long based on lowercase, uppercase,\n and digit chars", "id": "f575:m5"} {"signature": "def _build_dict_from_key_value(keys_and_values):", "body": "key_dict = {}for key_value in keys_and_values:if '' not in key_value:raise GhostError(''.format(key_value))key, value = key_value.split('', )key_dict.update({str(key): str(value)})return key_dict", "docstring": "Return a dict from a list of key=value pairs", "id": "f575:m6"} {"signature": "def _prettify_dict(key):", "body": "assert isinstance(key, dict)pretty_key = ''for key, value in key.items():if isinstance(value, dict):pretty_value = ''for k, v in value.items():pretty_value += ''.format(k, v)value = pretty_valuepretty_key += ''.format(key.title() + '', value)return pretty_key", "docstring": "Return a human readable format of a key (dict).\n\n Example:\n\n Description: My Wonderful Key\n Uid: a54d6de1-922a-4998-ad34-cb838646daaa\n Created_At: 2016-09-15T12:42:32\n Metadata: owner=me;\n Modified_At: 2016-09-15T12:42:32\n Value: secret_key=my_secret_key;access_key=my_access_key\n Name: aws", "id": "f575:m7"} {"signature": "def _prettify_list(items):", "body": "assert isinstance(items, list)keys_list = ''for item in items:keys_list += ''.format(item)return keys_list", "docstring": "Return a human readable format of a list.\n\n Example:\n\n Available Keys:\n - my_first_key\n - my_second_key", "id": "f575:m8"} {"signature": "@click.group(context_settings=CLICK_CONTEXT_SETTINGS)def main():", "body": "", "docstring": "Ghost generates a secret-store in which you can\n keep your secrets encrypted. Ghost isn't real. It's just in your head.", "id": "f575:m12"} {"signature": "@main.command(name='', short_help='')@click.argument('', required=False, type=click.STRING)@click.option('','',default=None,type=click.STRING,help='')@click.option('', default=)@click.option('','',default='',type=click.Choice(STORAGE_MAPPING.keys()),help='')def init_stash(stash_path, passphrase, passphrase_size, backend):", "body": "stash_path = stash_path or STORAGE_DEFAULT_PATH_MAPPING[backend]click.echo(''.format(backend, stash_path))storage = STORAGE_MAPPING[backend](**_parse_path_string(stash_path))try:click.echo('')if os.path.isfile(PASSPHRASE_FILENAME):raise GhostError(''''''''.format(PASSPHRASE_FILENAME))stash = Stash(storage,passphrase=passphrase,passphrase_size=passphrase_size)passphrase = stash.init()if not passphrase:click.echo('')sys.exit()_write_passphrase_file(passphrase)except GhostError as ex:sys.exit(ex)except (OSError, IOError) as ex:click.echo(\"\")file_path = _parse_path_string(stash_path)['']click.echo(''''''.format(file_path))if os.path.isfile(file_path):os.remove(file_path)sys.exit(ex)click.echo(''.format(stash_path))click.echo(''''.format(PASSPHRASE_FILENAME))click.echo('''')", "docstring": "r\"\"\"Init a stash\n\n `STASH_PATH` is the path to the storage endpoint. If this isn't supplied,\n a default path will be used. In the path, you can specify a name\n for the stash (which, if omitted, will default to `ghost`) like so:\n `ghost init http://10.10.1.1:8500;stash1`.\n\n After initializing a stash, don't forget you can set environment\n variables for both your stash's path and its passphrase.\n On Linux/OSx you can run:\n\n export GHOST_STASH_PATH='http://10.10.1.1:8500;stash1'\n\n export GHOST_PASSPHRASE=$(cat passphrase.ghost)\n\n export GHOST_BACKEND='tinydb'", "id": "f575:m13"} {"signature": "@main.command(name='', short_help='')@click.argument('')@click.argument('', nargs=-, required=True)@click.option('','',help=\"\")@click.option('',metavar='',multiple=True,help='''')@click.option('','',is_flag=True,help='')@click.option('','',is_flag=True,help='')@click.option('',is_flag=True,help='''')@click.option('','',type=click.Choice(['', '']),default='',help='')@stash_option@passphrase_option@backend_optiondef put_key(key_name,value,description,meta,modify,add,lock,key_type,stash,passphrase,backend):", "body": "stash = _get_stash(backend, stash, passphrase)try:click.echo(''.format(key_type))stash.put(name=key_name,value=_build_dict_from_key_value(value),modify=modify,metadata=_build_dict_from_key_value(meta),description=description,lock=lock,key_type=key_type,add=add)click.echo('')except GhostError as ex:sys.exit(ex)", "docstring": "Insert a key to the stash\n\n `KEY_NAME` is the name of the key to insert\n\n `VALUE` is a key=value argument which can be provided multiple times.\n it is the encrypted value of your key", "id": "f575:m14"} {"signature": "@main.command(name='', short_help='')@click.argument('')@stash_option@passphrase_option@backend_optiondef lock_key(key_name,stash,passphrase,backend):", "body": "stash = _get_stash(backend, stash, passphrase)try:click.echo('')stash.lock(key_name=key_name)click.echo('')except GhostError as ex:sys.exit(ex)", "docstring": "Lock a key to prevent it from being deleted, purged or modified\n\n `KEY_NAME` is the name of the key to lock", "id": "f575:m15"} {"signature": "@main.command(name='', short_help='')@click.argument('')@stash_option@passphrase_option@backend_optiondef unlock_key(key_name,stash,passphrase,backend):", "body": "stash = _get_stash(backend, stash, passphrase)try:click.echo('')stash.unlock(key_name=key_name)click.echo('')except GhostError as ex:sys.exit(ex)", "docstring": "Unlock a key to allow it to be modified, deleted or purged\n\n `KEY_NAME` is the name of the key to unlock", "id": "f575:m16"} {"signature": "@main.command(name='', short_help='')@click.argument('')@click.argument('', required=False)@click.option('','',is_flag=True,default=False,help='')@click.option('',is_flag=True,default=False,help='')@stash_option@passphrase_option@backend_optiondef get_key(key_name,value_name,jsonify,no_decrypt,stash,passphrase,backend):", "body": "if value_name and no_decrypt:sys.exit('')stash = _get_stash(backend, stash, passphrase, quiet=jsonify or value_name)try:key = stash.get(key_name=key_name, decrypt=not no_decrypt)except GhostError as ex:sys.exit(ex)if not key:sys.exit(''.format(key_name))if value_name:key = key[''].get(value_name)if not key:sys.exit(''.format(value_name, key_name))if jsonify or value_name:click.echo(json.dumps(key, indent=, sort_keys=False).strip(''),nl=True)else:click.echo('')click.echo('' + _prettify_dict(key))", "docstring": "Retrieve a key from the stash\n\n \\b\n `KEY_NAME` is the name of the key to retrieve\n `VALUE_NAME` is a single value to retrieve e.g. if the value\n of the key `test` is `a=b,b=c`, `ghost get test a`a will return\n `b`", "id": "f575:m17"} {"signature": "@main.command(name='', short_help='')@click.argument('', nargs=-)@stash_option@passphrase_option@backend_optiondef delete_key(key_name, stash, passphrase, backend):", "body": "stash = _get_stash(backend, stash, passphrase)for key in key_name:try:click.echo(''.format(key))stash.delete(key_name=key)except GhostError as ex:sys.exit(ex)click.echo('')", "docstring": "Delete a key from the stash\n\n `KEY_NAME` is the name of the key to delete\n You can provide that multiple times to delete multiple keys at once", "id": "f575:m18"} {"signature": "@main.command(name='', short_help='')@click.argument('', required=False)@click.option('','',default=,help='')@click.option('','',default=,help='')@click.option('','',is_flag=True,help='')@click.option('','',is_flag=True,help='')@click.option('','',type=click.Choice(['', '']),default=None,help='')@stash_option@passphrase_option@backend_optiondef list_keys(key_name,max_suggestions,cutoff,jsonify,locked,key_type,stash,passphrase,backend):", "body": "stash = _get_stash(backend, stash, passphrase, quiet=jsonify)try:keys = stash.list(key_name=key_name,max_suggestions=max_suggestions,cutoff=cutoff,locked_only=locked,key_type=key_type)except GhostError as ex:sys.exit(ex)if jsonify:click.echo(json.dumps(keys, indent=, sort_keys=True))elif not keys:click.echo('')else:click.echo('')click.echo(_prettify_list(keys))", "docstring": "List all keys in the stash\n\n If `KEY_NAME` is provided, will look for keys containing `KEY_NAME`.\n If `KEY_NAME` starts with `~`, close matches will be provided according\n to `max_suggestions` and `cutoff`.", "id": "f575:m19"} {"signature": "@main.command(name='', short_help='')@click.option('','',required=True,is_flag=True,help='')@stash_option@passphrase_option@backend_optiondef purge_stash(force, stash, passphrase, backend):", "body": "stash = _get_stash(backend, stash, passphrase)try:click.echo('')stash.purge(force)click.echo('')except GhostError as ex:sys.exit(ex)", "docstring": "Purge the stash from all of its keys", "id": "f575:m20"} {"signature": "@main.command(name='')@click.option('','',default='',help='')@stash_option@passphrase_option@backend_optiondef export_keys(output_path, stash, passphrase, backend):", "body": "stash = _get_stash(backend, stash, passphrase)try:click.echo(''.format(output_path))stash.export(output_path=output_path)click.echo('')except GhostError as ex:sys.exit(ex)", "docstring": "Export all keys to a file", "id": "f575:m21"} {"signature": "@main.command(name='', short_help='')@click.argument('')@click.option('',help='')@stash_option@passphrase_option@backend_optiondef load_keys(key_file, origin_passphrase, stash, passphrase, backend):", "body": "stash = _get_stash(backend, stash, passphrase)click.echo(''.format(key_file))stash.load(origin_passphrase, key_file=key_file)click.echo('')", "docstring": "Load all keys from an exported key file to the stash\n\n `KEY_FILE` is the exported stash file to load keys from", "id": "f575:m22"} {"signature": "@main.command(name='',short_help='')@click.argument('', type=click.STRING)@click.argument('', type=click.STRING)@click.option('','',default=None,type=click.STRING,help='')@click.option('','',type=click.Choice(STORAGE_MAPPING.keys()),help='')@click.option('','',default=None,type=click.STRING,help='')@click.option('','',type=click.Choice(STORAGE_MAPPING.keys()),help='')def migrate_stash(source_stash_path,source_passphrase,source_backend,destination_stash_path,destination_passphrase,destination_backend):", "body": "click.echo(''.format(source_stash_path, destination_stash_path))try:migrate(src_path=source_stash_path,src_passphrase=source_passphrase,src_backend=source_backend,dst_path=destination_stash_path,dst_passphrase=destination_passphrase,dst_backend=destination_backend)except GhostError as ex:sys.exit(ex)click.echo('')", "docstring": "Migrate all keys from a source stash to a destination stash.\n\n `SOURCE_STASH_PATH` and `DESTINATION_STASH_PATH` are the paths\n to the stashs you wish to perform the migration on.", "id": "f575:m23"} {"signature": "@main.command(name='', short_help='')@click.argument('')@click.option('',is_flag=True,help=\"\")@stash_option@passphrase_option@backend_optiondef ssh(key_name, no_tunnel, stash, passphrase, backend):", "body": "def execute(command):try:click.echo(''.format(''.join(command)))subprocess.check_call(''.join(command), shell=True)except subprocess.CalledProcessError:sys.exit()stash = _get_stash(backend, stash, passphrase)key = stash.get(key_name)if key:_assert_is_ssh_type_key(key)else:sys.exit(''.format(key_name))conn_info = key['']ssh_key_path = conn_info.get('')ssh_key = conn_info.get('')proxy_key_path = conn_info.get('')proxy_key = conn_info.get('')id_file = _write_tmp(ssh_key) if ssh_key else ssh_key_pathconn_info[''] = id_fileif conn_info.get(''):proxy_id_file = _write_tmp(proxy_key) if proxy_key else proxy_key_pathconn_info[''] = proxy_id_filessh_command = _build_ssh_command(conn_info, no_tunnel)try:execute(ssh_command)finally:if id_file != ssh_key_path:click.echo(''.format(id_file))os.remove(id_file)if conn_info.get('') and proxy_id_file != proxy_key_path:click.echo(''.format(proxy_id_file))os.remove(proxy_id_file)", "docstring": "Use an ssh type key to connect to a machine via ssh\n\n Note that trying to use a key of the wrong type (e.g. `secret`)\n will result in an error.\n\n `KEY_NAME` is the key to use.\n\n For additional information on the different configuration options\n for an ssh type key, see the repo's readme.", "id": "f575:m24"} {"signature": "def _build_ssh_command(conn_info, no_tunnel=False):", "body": "command = ['', '', conn_info[''], conn_info['']]if conn_info.get('') and not no_tunnel:command.insert(, conn_info.get(''))command.insert(, '')command.insert(, '')if conn_info.get(''):command.extend(_build_proxy_command(conn_info))if conn_info.get(''):command.append(conn_info.get(''))return command", "docstring": "# TODO: Document clearly\nIndetityFile=\"~/.ssh/id_rsa\"\nProxyCommand=\"ssh -i ~/.ssh/id_rsa proxy_IP nc HOST_IP HOST_PORT\"", "id": "f575:m26"} {"signature": "def put(self,name,value=None,modify=False,metadata=None,description='',encrypt=True,lock=False,key_type='',add=False):", "body": "def assert_key_is_unlocked(existing_key):if existing_key and existing_key.get(''):raise GhostError(''''.format(name))def assert_value_provided_for_new_key(value, existing_key):if not value and not existing_key.get(''):raise GhostError('')self._assert_valid_stash()self._validate_key_schema(value, key_type)if value and encrypt and not isinstance(value, dict):raise GhostError('')key = self._handle_existing_key(name, modify or add)assert_key_is_unlocked(key)assert_value_provided_for_new_key(value, key)new_key = dict(name=name, lock=lock)if value:if add:value = self._update_existing_key(key, value)new_key[''] = self._encrypt(value) if encrypt else valueelse:new_key[''] = key.get('')new_key[''] = description or key.get('')new_key[''] = key.get('') or _get_current_time()new_key[''] = _get_current_time()new_key[''] = metadata or key.get('')new_key[''] = key.get('') or str(uuid.uuid4())new_key[''] = key.get('') or key_typekey_id = self._storage.put(new_key)audit(storage=self._storage.db_path,action='' if (modify or add) else '',message=json.dumps(dict(key_name=new_key[''],value='',description=new_key[''],uid=new_key[''],metadata=json.dumps(new_key['']),lock=new_key[''],type=new_key[''])))return key_id", "docstring": "Put a key inside the stash\n\n if key exists and modify true: delete and create\n if key exists and modify false: fail\n if key doesn't exist and modify true: fail\n if key doesn't exist and modify false: create\n\n `name` is unique and cannot be changed.\n\n `value` must be provided if the key didn't already exist, otherwise,\n the previous value will be retained.\n\n `created_at` will be left unmodified if the key\n already existed. Otherwise, the current time will be used.\n\n `modified_at` will be changed to the current time\n if the field is being modified.\n\n `metadata` will be updated if provided. If it wasn't\n provided the field from the existing key will be used and the\n same goes for the `uid` which will be generated if it didn't\n previously exist.\n\n `lock` will lock the key to prevent it from being modified or deleted\n\n `add` allows to add values to an existing key instead of overwriting.\n\n Returns the id of the key in the database", "id": "f575:c0:m4"} {"signature": "def get(self, key_name, decrypt=True):", "body": "self._assert_valid_stash()key = self._storage.get(key_name).copy()if not key.get(''):return Noneif decrypt:key[''] = self._decrypt(key[''])audit(storage=self._storage.db_path,action='',message=json.dumps(dict(key_name=key_name)))return key", "docstring": "Return a key with its parameters if it was found.", "id": "f575:c0:m7"} {"signature": "def list(self,key_name=None,max_suggestions=,cutoff=,locked_only=False,key_type=None):", "body": "self._assert_valid_stash()key_list = [k for k in self._storage.list()if k[''] != '' and(k.get('') if locked_only else True)]if key_type:types = ('', None) if key_type == '' else [key_type]key_list = [k for k in key_list if k.get('') in types]key_list = [k[''] for k in key_list]if key_name:if key_name.startswith(''):key_list = difflib.get_close_matches(key_name.lstrip(''), key_list, max_suggestions, cutoff)else:key_list = [k for k in key_list if key_name in k]audit(storage=self._storage.db_path,action='' + ('' if locked_only else ''),message=json.dumps(dict()))return key_list", "docstring": "Return a list of all keys.", "id": "f575:c0:m8"} {"signature": "def delete(self, key_name):", "body": "self._assert_valid_stash()if key_name == '':raise GhostError('''')if not self.get(key_name):raise GhostError(''.format(key_name))key = self._storage.get(key_name)if key.get(''):raise GhostError(''''.format(key_name))deleted = self._storage.delete(key_name)audit(storage=self._storage.db_path,action='',message=json.dumps(dict(key_name=key_name)))if not deleted:raise GhostError(''.format(key_name))", "docstring": "Delete a key if it exists.", "id": "f575:c0:m9"} {"signature": "def lock(self, key_name):", "body": "self._change_lock_state(key_name, lock=True)", "docstring": "Lock a key to prevent it from being deleted, purged and modified", "id": "f575:c0:m11"} {"signature": "def unlock(self, key_name):", "body": "self._change_lock_state(key_name, lock=False)", "docstring": "Unlock a locked key", "id": "f575:c0:m12"} {"signature": "def purge(self, force=False, key_type=None):", "body": "self._assert_valid_stash()if not force:raise GhostError(\"\"\"\"\"\")audit(storage=self._storage.db_path,action='',message=json.dumps(dict()))for key_name in self.list(key_type=key_type):self.delete(key_name)", "docstring": "Purge the stash from all keys", "id": "f575:c0:m14"} {"signature": "def export(self, output_path=None, decrypt=False):", "body": "self._assert_valid_stash()all_keys = []for key in self.list():all_keys.append(dict(self.get(key, decrypt=decrypt)))if all_keys:if output_path:with open(output_path, '') as output_file:output_file.write(json.dumps(all_keys, indent=))return all_keyselse:raise GhostError('')", "docstring": "Export all keys in the stash to a list or a file", "id": "f575:c0:m15"} {"signature": "def load(self, origin_passphrase, keys=None, key_file=None):", "body": "self._assert_valid_stash()if not (bool(keys) ^ bool(key_file)):raise GhostError('''')if key_file:with open(key_file) as stash_file:keys = json.loads(stash_file.read())decrypt = origin_passphrase != self.passphraseif decrypt:stub = Stash(TinyDBStorage(''), origin_passphrase)for key in keys:self.put(name=key[''],value=stub._decrypt(key['']) if decrypt else key[''],metadata=key[''],description=key[''],lock=key.get(''),key_type=key.get(''),encrypt=decrypt)", "docstring": "Import keys to the stash from either a list of keys or a file\n\n `keys` is a list of dictionaries created by `self.export`\n `stash_path` is a path to a file created by `self.export`", "id": "f575:c0:m16"} {"signature": "def _encrypt(self, value):", "body": "value = json.dumps(value)with warnings.catch_warnings():warnings.simplefilter(\"\")encrypted_value = self.cipher.encrypt(value.encode(''))hexified_value = binascii.hexlify(encrypted_value).decode('')return hexified_value", "docstring": "Turn a json serializable value into an jsonified, encrypted,\n hexa string.", "id": "f575:c0:m19"} {"signature": "def _decrypt(self, hexified_value):", "body": "encrypted_value = binascii.unhexlify(hexified_value)with warnings.catch_warnings():warnings.simplefilter(\"\")jsonified_value = self.cipher.decrypt(encrypted_value).decode('')value = json.loads(jsonified_value)return value", "docstring": "The exact opposite of _encrypt", "id": "f575:c0:m20"} {"signature": "def put(self, key):", "body": "return self.db.insert(key)", "docstring": "Insert the key and return its database id", "id": "f575:c1:m3"} {"signature": "def get(self, key_name):", "body": "result = self.db.search(Query().name == key_name)if not result:return {}return result[]", "docstring": "Return a dictionary consisting of the key itself\n\n e.g.\n {u'created_at': u'2016-10-10 08:31:53',\n u'description': None,\n u'metadata': None,\n u'modified_at': u'2016-10-10 08:31:53',\n u'name': u'aws',\n u'uid': u'459f12c0-f341-413e-9d7e-7410f912fb74',\n u'value': u'the_value'}", "id": "f575:c1:m4"} {"signature": "def list(self):", "body": "return self.db.search(Query().name.matches(''))", "docstring": "Return a list of all keys (not just key names, but rather the keys\n themselves).\n\n e.g.\n {u'created_at': u'2016-10-10 08:31:53',\n u'description': None,\n u'metadata': None,\n u'modified_at': u'2016-10-10 08:31:53',\n u'name': u'aws',\n u'uid': u'459f12c0-f341-413e-9d7e-7410f912fb74',\n u'value': u'the_value'},\n {u'created_at': u'2016-10-10 08:32:29',\n u'description': u'my gcp token',\n u'metadata': {u'owner': u'nir'},\n u'modified_at': u'2016-10-10 08:32:29',\n u'name': u'gcp',\n u'uid': u'a51a0043-f241-4d52-93c1-266a3c5de15e',\n u'value': u'the_value'}]", "id": "f575:c1:m5"} {"signature": "def delete(self, key_name):", "body": "self.db.remove(Query().name == key_name)return self.get(key_name) == {}", "docstring": "Delete the key and return true if the key was deleted, else false", "id": "f575:c1:m6"} {"signature": "def _construct_key(self, values):", "body": "key = {}for column, value in zip(self.keys.columns, values):key.update({column.name: value})return key", "docstring": "Return a dictionary representing a key from a list of columns\n and a tuple of values", "id": "f575:c2:m8"} {"signature": "def init(self):", "body": "", "docstring": "Consul creates directories on the fly, so no init is required.", "id": "f575:c3:m1"} {"signature": "@propertydef is_initialized(self):", "body": "return True", "docstring": "...and therefore, this should always return true", "id": "f575:c3:m2"} {"signature": "def put(self, key):", "body": "self._consul_request('', self._key_url(key['']), json=key)return key['']", "docstring": "Put and return the only unique identifier possible, its url", "id": "f575:c3:m3"} {"signature": "def _decode(self, data):", "body": "return json.loads(base64.b64decode(data['']).decode(''))", "docstring": "Decode one key as returned by consul.\n\n The format of the data returned is [{'Value': base-64-encoded-json,\n 'Key': keyname}]. We need to decode and return just the values.", "id": "f575:c3:m7"} {"signature": "def put(self, key):", "body": "self.client.write(self._key_path(key['']), **key)return self._key_path(key[''])", "docstring": "Put and return the only unique identifier possible, its path", "id": "f575:c4:m3"} {"signature": "def _key_path(self, key_name):", "body": "return '' + self._stash_name + '' + key_name", "docstring": "Return a valid vault path\n\n Note that we don't use os.path.join as the path is read by vault using\n slashes even on Windows.", "id": "f575:c4:m7"} {"signature": "def init(self):", "body": "self.es.indices.create(index=self.params[''], ignore=)", "docstring": "Create an Elasticsearch index if necessary", "id": "f575:c5:m1"} {"signature": "def __init__(self,db_path,bucket_location=None or os.environ.get(''),aws_access_key_id=None or os.environ.get(''),aws_secret_access_key=None or os.environ.get(''),aws_session_token=None or os.environ.get(''),profile_name=None or os.environ.get(''),region_name=None or os.environ.get('')):", "body": "if not S3_EXISTS:raise ImportError('')self.db_path = db_pathif not bucket_location:raise GhostError('')self.bucket_configuration = {'': bucket_location}session = boto3.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,aws_session_token=aws_session_token,profile_name=profile_name,region_name=region_name)self.client = session.client('')", "docstring": "Initializes the storage client\n :param db_path: The bucket name\n\n Keyword arguments:\n bucket_configuration", "id": "f575:c6:m0"} {"signature": "def init(self):", "body": "try:self.client.create_bucket(Bucket=self.db_path,CreateBucketConfiguration=self.bucket_configuration)except botocore.exceptions.ClientError as e:if '' not in str(e.response['']['']):raise e", "docstring": "Create a bucket.", "id": "f575:c6:m1"} {"signature": "def put(self, key):", "body": "self.client.put_object(Body=json.dumps(key),Bucket=self.db_path,Key=key[''])return key['']", "docstring": "Insert the key\n :return: Key name", "id": "f575:c6:m2"} {"signature": "def list(self):", "body": "response = self.client.list_objects_v2(Bucket=self.db_path)if u'' in response:keys = [key[u''] for key in response[u'']]keys_list = []for key_name in keys:key = self.get(key_name)keys_list.append(key)return keys_listreturn []", "docstring": "Lists the keys\n :return: Returns a list of all keys (not just key names, but rather\n the keys themselves).", "id": "f575:c6:m3"} {"signature": "def get(self, key_name):", "body": "try:obj = self.client.get_object(Bucket=self.db_path,Key=key_name)[''].read().decode(\"\")return json.loads(obj)except botocore.exceptions.ClientError as e:if '' in str(e.response['']['']):return {}raise e", "docstring": "Gets the key.\n :return: The key itself in a dictionary", "id": "f575:c6:m4"} {"signature": "def delete(self, key_name):", "body": "self.client.delete_object(Bucket=self.db_path,Key=key_name)return self.get(key_name) == {}", "docstring": "Delete the key.\n :return: True if it was deleted, False otherwise", "id": "f575:c6:m5"} {"signature": "@propertydef is_initialized(self):", "body": "try:return self.client.head_bucket(Bucket=self.db_path)['']['']== except botocore.exceptions.ClientError as e:if '' in str(e.response['']['']):return Falseraise e", "docstring": "Check if bucket exists.\n :return: True if initialized, False otherwise", "id": "f575:c6:m6"} {"signature": "def list(self, path=''):", "body": "return {'': {'': self.store.keys()}} if self.store.keys() else None", "docstring": "{\n 'lease_id': '',\n 'warnings': None,\n 'wrap_info': None,\n 'auth': None,\n 'lease_duration': 0,\n 'request_id': 'a0d5c74c-fe92-90ba-f73a-95b08cd4ec61',\n 'data': {\n 'keys': ['aws', 'aws2', 'awss', 'gcp', 'stored_passphrase']\n },\n 'renewable': False\n}", "id": "f576:c5:m1"} {"signature": "def read(self, path):", "body": "return self.store.get(os.path.basename(path))", "docstring": "{\n 'lease_id': '',\n 'warnings': None,\n 'wrap_info': None,\n 'auth': None,\n 'lease_duration': 2592000,\n 'request_id': 'accdb21c-5b70-06e7-c38a-4a589e9dd5d3',\n 'data': {\n 'uid': '0c5ce284-1300-4892-9e00-15e27e3db0d2',\n 'created_at': '2016-10-06 08:29:53',\n 'modified_at': '2016-10-06 08:29:53',\n 'value': 'encrypted_value',\n 'name': 'aws',\n 'metadata': None,\n 'description': None\n },\n 'renewable': False\n}", "id": "f576:c5:m2"} {"signature": "def create(self, index, ignore):", "body": "self.index_exists = True", "docstring": "{\n u'status': 400,\n u'error': {\n u'index': u'ghost',\n u'root_cause': [\n {\n u'index': u'ghost',\n u'reason': u'already exists',\n u'type': u'index_already_exists_exception'\n }\n ],\n u'type': u'index_already_exists_exception',\n u'reason': u'already exists'\n }\n}", "id": "f576:c7:m2"} {"signature": "def search(self, body, filter_path, **kwargs):", "body": "if '' in body['']:items = list(self.store.items())for name, key in items:return self.store[name]else:return {'': {'': []}}else:return self.store.get(body[''][''][''])", "docstring": "{\n u'hits': {\n u'hits': [\n {\n u'_id': u'AVewADAWUnUKEMeMQ4QB',\n u'_source': {\n u'description': None,\n u'created_at':\n u'2016-10-10 22:09:44',\n u'modified_at':\n u'2016-10-10 22:09:44',\n u'value': u'the_value',\n u'name': u'aws',\n u'uid': u'7a1caa7d-14d4-4045-842c-66adf22190b5',\n u'metadata': None\n }\n },\n ]\n }\n}", "id": "f576:c8:m1"} {"signature": "@click.group()@click.option('',type=click.Path(dir_okay=False, exists=True, resolve_path=True),help='''')@click.option('', default='', show_default=True,type=click.Choice(LOGLEVEL.keys()),help='')@click.option('', multiple=True,help='')@click.option('', '', '', count=True,help='')@click.pass_contextdef marv(ctx, config, loglevel, logfilter, verbosity):", "body": "if config is None:cwd = os.path.abspath(os.path.curdir)while cwd != os.path.sep:config = os.path.join(cwd, '')if os.path.exists(config):breakcwd = os.path.dirname(cwd)else:config = ''if not os.path.exists(config):config = Nonectx.obj = configsetup_logging(loglevel, verbosity, logfilter)", "docstring": "Manage a Marv site", "id": "f579:m3"} {"signature": "def cli():", "body": "for ep in iter_entry_points(group=''):ep.load()marv(auto_envvar_prefix='')", "docstring": "setuptools entry_point", "id": "f579:m4"} {"signature": "def __init__(self, username, password, token=None, cookies=None, appid=None, plugin_token=None, ifencodepwd=False,login=True, checkssl=False):", "body": "if not checkssl:disable_urllib3_warning() self.__username = usernameif ifencodepwd:self.__password = passwordelse:self.__password = hashlib.md5(password).hexdigest()self.__cookies = cookiesself.__lastmsgid = self.__token = tokenself.__ticket = Noneself.__ticket_id = Noneself.__fakeid = Noneself.__appid = appidself.__plugin_token = plugin_tokenif not self.__token or not self.__cookies:self.__token = ''self.__cookies = ''self.__appid = ''self.__plugin_token = ''if login:self.login()", "docstring": ":param username: \u4f60\u7684\u5fae\u4fe1\u516c\u4f17\u5e73\u53f0\u8d26\u6237\u7528\u6237\u540d\n:param password: \u4f60\u7684\u5fae\u4fe1\u516c\u4f17\u5e73\u53f0\u8d26\u6237\u5bc6\u7801\n:param token: \u76f4\u63a5\u5bfc\u5165\u7684 ``token`` \u503c, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u5b9e\u4f8b\u5316\u7684\u65f6\u5019\u81ea\u52a8\u83b7\u53d6\n:param cookies: \u76f4\u63a5\u5bfc\u5165\u7684 ``cookies`` \u503c, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u5b9e\u4f8b\u5316\u7684\u65f6\u5019\u81ea\u52a8\u83b7\u53d6\n:param appid: \u76f4\u63a5\u5bfc\u5165\u7684 ``appid`` \u503c, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u8c03\u7528 stat_ \u5f00\u5934\u7684\u65b9\u6cd5(\u7edf\u8ba1\u5206\u6790\u7c7b)\u65f6\u81ea\u52a8\u83b7\u53d6\n:param plugin_token: \u76f4\u63a5\u5bfc\u5165\u7684 ``plugin_token`` \u503c, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u8c03\u7528 stat_ \u5f00\u5934\u7684\u65b9\u6cd5(\u7edf\u8ba1\u5206\u6790\u7c7b)\u65f6\u81ea\u52a8\u83b7\u53d6\n:param ifencodepwd: \u5bc6\u7801\u662f\u5426\u5df2\u7ecf\u7ecf\u8fc7\u7f16\u7801, \u5982\u679c\u5bc6\u7801\u5df2\u7ecf\u7ecf\u8fc7\u52a0\u5bc6, \u6b64\u5904\u4e3a ``True`` , \u5982\u679c\u4f20\u5165\u7684\u5bc6\u7801\u4e3a\u660e\u6587, \u6b64\u5904\u4e3a ``False``\n:param login: \u662f\u5426\u5728\u521d\u59cb\u5316\u8fc7\u7a0b\u4e2d\u5c1d\u8bd5\u767b\u5f55 (\u63a8\u8350\u6b64\u5904\u8bbe\u7f6e\u4e3a ``False``, \u7136\u540e\u624b\u52a8\u6267\u884c\u767b\u5f55\u4ee5\u65b9\u4fbf\u8fdb\u884c\u8bc6\u522b\u9a8c\u8bc1\u7801\u7b49\u64cd\u4f5c, \u6b64\u5904\u9ed8\u8ba4\u503c\u4e3a ``True`` \u4e3a\u517c\u5bb9\u5386\u53f2\u7248\u672c\n:param checkssl: \u662f\u5426\u68c0\u67e5 SSL, \u9ed8\u8ba4\u4e3a False, \u53ef\u907f\u514d urllib3 \u7684 InsecurePlatformWarning \u8b66\u544a", "id": "f584:c0:m0"} {"signature": "def login(self, verify_code=''):", "body": "url = ''payload = {'': self.__username,'': self.__password,'': verify_code,'': '',}headers = {'': '','': '','': self.__cookies,}r = requests.post(url, data=payload, headers=headers)s = re.search(r'', r.text)if not s:try:error_code = json.loads(r.text)['']['']except (KeyError, ValueError):raise LoginError(r.text)if error_code in [-, -]:raise LoginVerifyCodeError(r.text)elif re.search(r'', r.text):raise LoginError('')else:raise LoginError(r.text)self.__token = int(s.group())self.__cookies = ''for cookie in r.cookies:self.__cookies += cookie.name + '' + cookie.value + ''", "docstring": "\u767b\u5f55\u5fae\u4fe1\u516c\u4f17\u5e73\u53f0\n\u6ce8\u610f\u5728\u5b9e\u4f8b\u5316 ``WechatExt`` \u7684\u65f6\u5019\uff0c\u5982\u679c\u6ca1\u6709\u4f20\u5165 ``token`` \u53ca ``cookies`` \uff0c\u5c06\u4f1a\u81ea\u52a8\u8c03\u7528\u8be5\u65b9\u6cd5\uff0c\u65e0\u9700\u624b\u52a8\u8c03\u7528\n\u5f53\u4e14\u4ec5\u5f53\u6355\u83b7\u5230 ``NeedLoginError`` \u5f02\u5e38\u65f6\u624d\u9700\u8981\u8c03\u7528\u6b64\u65b9\u6cd5\u8fdb\u884c\u767b\u5f55\u91cd\u8bd5\n:param verify_code: \u9a8c\u8bc1\u7801, \u4e0d\u4f20\u5165\u5219\u4e3a\u65e0\u9a8c\u8bc1\u7801\n:raises LoginVerifyCodeError: \u9700\u8981\u9a8c\u8bc1\u7801\u6216\u9a8c\u8bc1\u7801\u51fa\u9519\uff0c\u8be5\u5f02\u5e38\u4e3a ``LoginError`` \u7684\u5b50\u7c7b\n:raises LoginError: \u767b\u5f55\u51fa\u9519\u5f02\u5e38\uff0c\u5f02\u5e38\u5185\u5bb9\u4e3a\u5fae\u4fe1\u670d\u52a1\u5668\u54cd\u5e94\u7684\u5185\u5bb9\uff0c\u53ef\u4f5c\u4e3a\u65e5\u5fd7\u8bb0\u5f55\u4e0b\u6765", "id": "f584:c0:m1"} {"signature": "def get_verify_code(self, file_path):", "body": "url = ''payload = {'': self.__username,'': int(random.random() * ),}headers = {'': '',}r = requests.get(url, data=payload, headers=headers, stream=True)self.__cookies = ''for cookie in r.cookies:self.__cookies += cookie.name + '' + cookie.value + ''with open(file_path, '') as fd:for chunk in r.iter_content():fd.write(chunk)", "docstring": "\u83b7\u53d6\u767b\u5f55\u9a8c\u8bc1\u7801\u5e76\u5b58\u50a8\n:param file_path: \u5c06\u9a8c\u8bc1\u7801\u56fe\u7247\u4fdd\u5b58\u7684\u6587\u4ef6\u8def\u5f84", "id": "f584:c0:m2"} {"signature": "def get_token_cookies(self):", "body": "return {'': self.__token,'': self.__cookies,}", "docstring": "\u83b7\u53d6\u5f53\u524d token \u53ca cookies, \u4f9b\u624b\u52a8\u7f13\u5b58\u4f7f\u7528\n\n\u8fd4\u56de dict \u793a\u4f8b::\n\n {\n 'cookies': 'bizuin=3086177907;data_bizuin=3086177907;data_ticket=AgWTXTpLL+FV+bnc9yLbb3V8;slave_sid=TERlMEJ1bWFCbTlmVnRLX0lLdUpRV0pyN2k1eVkzbWhiY0NfTHVjNFRZQk1DRDRfal82UzZKWTczR3I5TFpUYjRXUDBtN1h1cmJMRTkzS3hianBHOGpHaFM0eXJiNGp6cDFWUGpqbFNyMFlyQ05GWGpseVg2T2s2Sk5DRWpnRlE=;slave_user=gh_1b2959761a7d;',\n 'token': 373179898\n }\n\n:return: \u4e00\u4e2a dict \u5bf9\u8c61, key \u4e3a ``token`` \u548c ``cookies``", "id": "f584:c0:m3"} {"signature": "def get_plugin_token_appid(self):", "body": "self._init_plugin_token_appid()return {'': self.__plugin_token,'': self.__appid,}", "docstring": "\u83b7\u53d6\u5f53\u524d plugin_token \u53ca appid, \u4f9b\u624b\u52a8\u7f13\u5b58\u4f7f\u7528\n\n\u8fd4\u56de dict \u793a\u4f8b::\n\n {\n 'plugin_token': 'll1D85fGDCTr4AAxC_RrFIsfaM1eajMksOjZN_eXodroIeT77QkrMfckyYdG0qj8CnvWGUPp7-mpBOs07dbuG-iwULOcyjoEvlTsghm1K34C0oj3AI8egAxGqixxhRs8',\n 'appid': 'wxd0c09648a48b3798'\n }\n\n:return: \u4e00\u4e2a dict \u5bf9\u8c61, key \u4e3a ``plugin_token`` \u548c ``appid``", "id": "f584:c0:m4"} {"signature": "def send_message(self, fakeid, content):", "body": "url = ''payload = {'': fakeid,'': ,'': self.__token,'': content,'': ,}headers = {'': '','': ''.format(fakeid=fakeid,token=self.__token,),'': self.__cookies,}r = requests.post(url, data=payload, headers=headers)try:message = json.loads(r.text)except ValueError:raise NeedLoginError(r.text)try:if message[''][''] == -:raise ValueError('')if message[''][''] != :raise NeedLoginError(r.text)except KeyError:raise NeedLoginError(r.text)", "docstring": "\u4e3b\u52a8\u53d1\u9001\u6587\u672c\u6d88\u606f\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid )\n:param content: \u53d1\u9001\u7684\u5185\u5bb9\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u5177\u4f53\u5185\u5bb9\u6709 ``fake id not exist``", "id": "f584:c0:m5"} {"signature": "def get_user_list(self, page=, pagesize=, groupid=):", "body": "url = ''.format(pagesize=pagesize,page=page,groupid=groupid,token=self.__token,)headers = {'': '','': ''.format(pagesize=pagesize,page=page,token=self.__token,),'': self.__cookies,}r = requests.get(url, headers=headers)try:message = json.loads(r.text)['']except (KeyError, ValueError):raise NeedLoginError(r.text)return message", "docstring": "\u83b7\u53d6\u7528\u6237\u5217\u8868\n\n\u8fd4\u56deJSON\u793a\u4f8b ::\n\n {\n \"contacts\": [\n {\n \"id\": 2431798261,\n \"nick_name\": \"Doraemonext\",\n \"remark_name\": \"\",\n \"group_id\": 0\n },\n {\n \"id\": 896229760,\n \"nick_name\": \"\u5fae\u4fe1\u6635\u79f0\",\n \"remark_name\": \"\",\n \"group_id\": 0\n }\n ]\n }\n\n:param page: \u9875\u7801 (\u4ece 0 \u5f00\u59cb)\n:param pagesize: \u6bcf\u9875\u5927\u5c0f\n:param groupid: \u5206\u7ec4 ID\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m6"} {"signature": "def stat_article_detail_list(self, page=, start_date=str(date.today()+timedelta(days=-)), end_date=str(date.today())):", "body": "self._init_plugin_token_appid()url = ''.format(page=page,appid=self.__appid,token=self.__plugin_token,rnd=int(time.time()),start_date=start_date,end_date=end_date,)headers = {'': '','': ''.format(page=page,appid=self.__appid,token=self.__plugin_token,rnd=int(time.time()),start_date=start_date,end_date=end_date,),'': self.__cookies,}r = requests.get(url, headers=headers)if not re.search(r'', self.__cookies):for cookie in r.cookies:self.__cookies += cookie.name + '' + cookie.value + ''try:data = json.loads(r.text)if data.get(''):raise NeedLoginError(r.text)message = json.dumps(data, ensure_ascii=False)except (KeyError, ValueError):raise NeedLoginError(r.text)return message", "docstring": "\u83b7\u53d6\u56fe\u6587\u5206\u6790\u6570\u636e\n\n\u8fd4\u56deJSON\u793a\u4f8b ::\n\n {\n \"hasMore\": true, // \u8bf4\u660e\u662f\u5426\u53ef\u4ee5\u589e\u52a0 page \u9875\u7801\u6765\u83b7\u53d6\u6570\u636e\n \"data\": [\n {\n \"index\": [\n \"20,816\", // \u9001\u8fbe\u4eba\u6570\n \"1,944\", // \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570\n \"2,554\", // \u56fe\u6587\u9875\u9605\u8bfb\u6b21\u6570\n \"9.34%\", // (\u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u9001\u8fbe\u4eba\u6570)\n \"0\", // \u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570\n \"0\", // \u539f\u6587\u9875\u9605\u8bfb\u6b21\u6570\n \"0%\", // \uff08\u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570)\n \"47\", // \u5206\u4eab\u8f6c\u53d1\u4eba\u6570\n \"61\", // \u5206\u4eab\u8f6c\u53d1\u6b21\u6570\n \"1\" // \u5fae\u4fe1\u6536\u85cf\u4eba\u6570\n ],\n \"time\": \"2015-01-21\",\n \"table_data\": \"{\\\"fields\\\":{\\\"TargetUser\\\":{\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"number\\\":false,\\\"colAlign\\\":\\\"center\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"PageConversion\\\":{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"OriPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"OriPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"Conversion\\\":{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"ShareUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"ShareCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"AddToFavUser\\\":{\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0}},\\\"data\\\":[{\\\"MsgId\\\":\\\"205104027_1\\\",\\\"Title\\\":\\\"\\\\u56de\\\\u5bb6\\\\u5927\\\\u4f5c\\\\u6218 | \\\\u5feb\\\\u6765\\\\u5e26\\\\u6211\\\\u56de\\\\u5bb6\\\",\\\"RefDate\\\":\\\"20150121\\\",\\\"TargetUser\\\":\\\"20,816\\\",\\\"IntPageReadUser\\\":\\\"1,944\\\",\\\"IntPageReadCount\\\":\\\"2,554\\\",\\\"OriPageReadUser\\\":\\\"0\\\",\\\"OriPageReadCount\\\":\\\"0\\\",\\\"ShareUser\\\":\\\"47\\\",\\\"ShareCount\\\":\\\"61\\\",\\\"AddToFavUser\\\":\\\"1\\\",\\\"Conversion\\\":\\\"0%\\\",\\\"PageConversion\\\":\\\"9.34%\\\"}],\\\"fixedRow\\\":false,\\\"cssSetting\\\":{\\\"\\\":\\\"\\\"},\\\"complexHeader\\\":[[{\\\"field\\\":\\\"TargetUser\\\",\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"colSpan\\\":1},{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u5206\\\\u4eab\\\\u8f6c\\\\u53d1\\\",\\\"colSpan\\\":2},{\\\"field\\\":\\\"AddToFavUser\\\",\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"enable\\\":true}],[{\\\"field\\\":\\\"IntPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"IntPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"PageConversion\\\",\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"OriPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"OriPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"Conversion\\\",\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"ShareUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"ShareCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"}]]}\",\n \"id\": \"205104027_1\",\n \"title\": \"\u56de\u5bb6\u5927\u4f5c\u6218 | \u5feb\u6765\u5e26\u6211\u56de\u5bb6\"\n },\n {\n \"index\": [\n \"20,786\", // \u9001\u8fbe\u4eba\u6570\n \"2,598\", // \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570\n \"3,368\", // \u56fe\u6587\u9875\u9605\u8bfb\u6b21\u6570\n \"12.5%\", // (\u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u9001\u8fbe\u4eba\u6570)\n \"0\", // \u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570\n \"0\", // \u539f\u6587\u9875\u9605\u8bfb\u6b21\u6570\n \"0%\", // \uff08\u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570)\n \"73\", // \u5206\u4eab\u8f6c\u53d1\u4eba\u6570\n \"98\", // \u5206\u4eab\u8f6c\u53d1\u6b21\u6570\n \"1\" // \u5fae\u4fe1\u6536\u85cf\u4eba\u6570\n ],\n \"time\": \"2015-01-20\",\n \"table_data\": \"{\\\"fields\\\":{\\\"TargetUser\\\":{\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"number\\\":false,\\\"colAlign\\\":\\\"center\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"PageConversion\\\":{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"OriPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"OriPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"Conversion\\\":{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"ShareUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"ShareCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"AddToFavUser\\\":{\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0}},\\\"data\\\":[{\\\"MsgId\\\":\\\"205066833_1\\\",\\\"Title\\\":\\\"\\\\u56de\\\\u5bb6\\\\u5927\\\\u4f5c\\\\u6218 | \\\\u5982\\\\u4f55\\\\u4f18\\\\u96c5\\\\u5730\\\\u53bb\\\\u5f80\\\\u8f66\\\\u7ad9\\\\u548c\\\\u673a\\\\u573a\\\",\\\"RefDate\\\":\\\"20150120\\\",\\\"TargetUser\\\":\\\"20,786\\\",\\\"IntPageReadUser\\\":\\\"2,598\\\",\\\"IntPageReadCount\\\":\\\"3,368\\\",\\\"OriPageReadUser\\\":\\\"0\\\",\\\"OriPageReadCount\\\":\\\"0\\\",\\\"ShareUser\\\":\\\"73\\\",\\\"ShareCount\\\":\\\"98\\\",\\\"AddToFavUser\\\":\\\"1\\\",\\\"Conversion\\\":\\\"0%\\\",\\\"PageConversion\\\":\\\"12.5%\\\"}],\\\"fixedRow\\\":false,\\\"cssSetting\\\":{\\\"\\\":\\\"\\\"},\\\"complexHeader\\\":[[{\\\"field\\\":\\\"TargetUser\\\",\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"colSpan\\\":1},{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u5206\\\\u4eab\\\\u8f6c\\\\u53d1\\\",\\\"colSpan\\\":2},{\\\"field\\\":\\\"AddToFavUser\\\",\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"enable\\\":true}],[{\\\"field\\\":\\\"IntPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"IntPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"PageConversion\\\",\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"OriPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"OriPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"Conversion\\\",\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"ShareUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"ShareCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"}]]}\",\n \"id\": \"205066833_1\",\n \"title\": \"\u56de\u5bb6\u5927\u4f5c\u6218 | \u5982\u4f55\u4f18\u96c5\u5730\u53bb\u5f80\u8f66\u7ad9\u548c\u673a\u573a\"\n },\n {\n \"index\": [\n \"20,745\", // \u9001\u8fbe\u4eba\u6570\n \"1,355\", // \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570\n \"1,839\", // \u56fe\u6587\u9875\u9605\u8bfb\u6b21\u6570\n \"6.53%\", // (\u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u9001\u8fbe\u4eba\u6570)\n \"145\", // \u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570\n \"184\", // \u539f\u6587\u9875\u9605\u8bfb\u6b21\u6570\n \"10.7%\", // \uff08\u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570)\n \"48\", // \u5206\u4eab\u8f6c\u53d1\u4eba\u6570\n \"64\", // \u5206\u4eab\u8f6c\u53d1\u6b21\u6570\n \"5\" // \u5fae\u4fe1\u6536\u85cf\u4eba\u6570\n ],\n \"time\": \"2015-01-19\",\n \"table_data\": \"{\\\"fields\\\":{\\\"TargetUser\\\":{\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"number\\\":false,\\\"colAlign\\\":\\\"center\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"PageConversion\\\":{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"OriPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"OriPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"Conversion\\\":{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"ShareUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"ShareCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"AddToFavUser\\\":{\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0}},\\\"data\\\":[{\\\"MsgId\\\":\\\"205028693_1\\\",\\\"Title\\\":\\\"\\\\u5145\\\\u7535\\\\u65f6\\\\u95f4 | \\\\u542c\\\\u542c\\\\u7535\\\\u53f0\\\\uff0c\\\\u4f18\\\\u96c5\\\\u5730\\\\u63d0\\\\u5347\\\\u5b66\\\\u4e60\\\\u6548\\\\u7387\\\",\\\"RefDate\\\":\\\"20150119\\\",\\\"TargetUser\\\":\\\"20,745\\\",\\\"IntPageReadUser\\\":\\\"1,355\\\",\\\"IntPageReadCount\\\":\\\"1,839\\\",\\\"OriPageReadUser\\\":\\\"145\\\",\\\"OriPageReadCount\\\":\\\"184\\\",\\\"ShareUser\\\":\\\"48\\\",\\\"ShareCount\\\":\\\"64\\\",\\\"AddToFavUser\\\":\\\"5\\\",\\\"Conversion\\\":\\\"10.7%\\\",\\\"PageConversion\\\":\\\"6.53%\\\"}],\\\"fixedRow\\\":false,\\\"cssSetting\\\":{\\\"\\\":\\\"\\\"},\\\"complexHeader\\\":[[{\\\"field\\\":\\\"TargetUser\\\",\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"colSpan\\\":1},{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u5206\\\\u4eab\\\\u8f6c\\\\u53d1\\\",\\\"colSpan\\\":2},{\\\"field\\\":\\\"AddToFavUser\\\",\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"enable\\\":true}],[{\\\"field\\\":\\\"IntPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"IntPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"PageConversion\\\",\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"OriPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"OriPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"Conversion\\\",\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"ShareUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"ShareCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"}]]}\",\n \"id\": \"205028693_1\",\n \"title\": \"\u5145\u7535\u65f6\u95f4 | \u542c\u542c\u7535\u53f0\uff0c\u4f18\u96c5\u5730\u63d0\u5347\u5b66\u4e60\u6548\u7387\"\n }\n ]\n }\n\n:param page: \u9875\u7801 (\u7531\u4e8e\u817e\u8baf\u63a5\u53e3\u9650\u5236\uff0cpage \u4ece 1 \u5f00\u59cb\uff0c3 \u6761\u6570\u636e\u4e3a 1 \u9875)\n:param start_date: \u5f00\u59cb\u65f6\u95f4\uff0c\u9ed8\u8ba4\u662f\u4eca\u5929-30\u5929 (\u7c7b\u578b: str \u683c\u5f0f\u793a\u4f8b: \"2015-01-15\")\n:param end_date: \u7ed3\u675f\u65f6\u95f4\uff0c\u9ed8\u8ba4\u662f\u4eca\u5929 (\u7c7b\u578b: str \u683c\u5f0f\u793a\u4f8b: \"2015-02-01\")\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\uff0c\u5177\u4f53\u7684\u5404\u9879\u5185\u5bb9\u89e3\u91ca\u53c2\u89c1\u4e0a\u9762\u7684 JSON \u8fd4\u56de\u793a\u4f8b\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m7"} {"signature": "def get_group_list(self):", "body": "url = ''.format(token=self.__token,)headers = {'': '','': ''.format(token=self.__token,),'': self.__cookies,}r = requests.get(url, headers=headers)try:message = json.loads(r.text)['']except (KeyError, ValueError):raise NeedLoginError(r.text)return message", "docstring": "\u83b7\u53d6\u5206\u7ec4\u5217\u8868\n\n\u8fd4\u56deJSON\u793a\u4f8b::\n\n {\n \"groups\": [\n {\n \"cnt\": 8,\n \"id\": 0,\n \"name\": \"\u672a\u5206\u7ec4\"\n },\n {\n \"cnt\": 0,\n \"id\": 1,\n \"name\": \"\u9ed1\u540d\u5355\"\n },\n {\n \"cnt\": 0,\n \"id\": 2,\n \"name\": \"\u661f\u6807\u7ec4\"\n }\n ]\n }\n\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m8"} {"signature": "def get_news_list(self, page, pagesize=):", "body": "begin = page * pagesizeurl = \"\".format(token=self.__token,begin=begin,pagesize=pagesize,random=round(random.random(), ),)headers = {'': '','': ''.format(token=self.__token,),'': self.__cookies,}r = requests.get(url, headers=headers)try:message = json.dumps(json.loads(r.text)[''][''], ensure_ascii=False)except (KeyError, ValueError):raise NeedLoginError(r.text)return message", "docstring": "\u83b7\u53d6\u56fe\u6587\u4fe1\u606f\u5217\u8868\n\n\u8fd4\u56deJSON\u793a\u4f8b::\n\n [\n {\n \"multi_item\": [\n {\n \"seq\": 0,\n \"title\": \"98\u8def\u516c\u4ea4\u7ebf\u8def\",\n \"show_cover_pic\": 1,\n \"author\": \"\",\n \"cover\": \"https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3GQgcgkDSoEm668gClFVDt3BR8GGQ5eB8HoL4vDezzKtSblIjckOf7A/0\",\n \"content_url\": \"http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204884970&idx=1&sn=bf25c51f07260d4ed38305a1cbc0ce0f#rd\",\n \"source_url\": \"\",\n \"file_id\": 204884939,\n \"digest\": \"98\u8def\u7ebf\u8def1.\u519c\u5927- 2.\u91d1\u9633\u5c0f\u533a- 3.\u5e02\u5ba2\u8fd0\u53f8- 4.\u5e02\u5236\u836f\u5382- 5.\u65b0\u519c\u5927- 6.\u72ec\u5c71\u5b50\u9152\u5e97- 7.\u4e09\"\n }\n ],\n \"seq\": 0,\n \"title\": \"98\u8def\u516c\u4ea4\u7ebf\u8def\",\n \"show_cover_pic\": 1,\n \"author\": \"\",\n \"app_id\": 204884970,\n \"content_url\": \"http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204884970&idx=1&sn=bf25c51f07260d4ed38305a1cbc0ce0f#rd\",\n \"create_time\": \"1405237966\",\n \"file_id\": 204884939,\n \"img_url\": \"https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3GQgcgkDSoEm668gClFVDt3BR8GGQ5eB8HoL4vDezzKtSblIjckOf7A/0\",\n \"digest\": \"98\u8def\u7ebf\u8def1.\u519c\u5927- 2.\u91d1\u9633\u5c0f\u533a- 3.\u5e02\u5ba2\u8fd0\u53f8- 4.\u5e02\u5236\u836f\u5382- 5.\u65b0\u519c\u5927- 6.\u72ec\u5c71\u5b50\u9152\u5e97- 7.\u4e09\"\n },\n {\n \"multi_item\": [\n {\n \"seq\": 0,\n \"title\": \"2013\u5e74\u65b0\u7586\u8f6f\u4ef6\u56ed\u5927\u4e8b\u8bb0\",\n \"show_cover_pic\": 0,\n \"author\": \"\",\n \"cover\": \"https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3icvFgkxZRyIrkLbic9I5ZKLa3XB8UqNlkT8CYibByHuraSvVoeSzdTRLQ/0\",\n \"content_url\": \"http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204883415&idx=1&sn=68d62215052d29ece3f2664e9c4e8cab#rd\",\n \"source_url\": \"\",\n \"file_id\": 204883412,\n \"digest\": \"1\u67081\uff0e\u65b0\u7586\u8f6f\u4ef6\u56ed\u5c55\u5385\u8bbe\u8ba1\u65b9\u6848\u6c47\u62a5\u4f1a2013\u5e741\u670815\u65e5\u5728\u7ef4\u6cf0\u5927\u53a64\u697c9\u53f7\u4f1a\u8bae\u5ba4\u53ec\u5f00\u65b0\u7586\u8f6f\u4ef6\u56ed\u5c55\u5385\u8bbe\u8ba1\u5de5\u4f5c\u5b8c\"\n },\n {\n \"seq\": 1,\n \"title\": \"2012\u5e74\u65b0\u7586\u8f6f\u4ef6\u56ed\u5927\u4e8b\u8bb0\",\n \"show_cover_pic\": 0,\n \"author\": \"\",\n \"cover\": \"https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3oErGEhSicRQc82icibxZOZ2YAGNgiaGYfOFYppmPzOOS0v1xfZ1nvyT58g/0\",\n \"content_url\": \"http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204883415&idx=2&sn=e7db9b30d770c85c61008d2f523b8610#rd\",\n \"source_url\": \"\",\n \"file_id\": 204883398,\n \"digest\": \"1\u67081\uff0e\u65b0\u7586\u8f6f\u4ef6\u56ed\u73af\u8bc4\u987a\u5229\u901a\u8fc7\u4e13\u5bb6\u4f1a\u8bc4\u5ba12012\u5e741\u670830\u65e5\uff0c\u65b0\u7586\u8f6f\u4ef6\u56ed\u73af\u5883\u5f71\u54cd\u8bc4\u4ef7\u987a\u5229\u901a\u8fc7\u4e13\u5bb6\u4f1a\u8bc4\u5ba1\uff0c\u4e0e\u4f1a\"\n },\n {\n \"seq\": 2,\n \"title\": \"2011\u5e74\u65b0\u7586\u8f6f\u4ef6\u56ed\u5927\u4e8b\u8bb0\",\n \"show_cover_pic\": 0,\n \"author\": \"\",\n \"cover\": \"https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3qA7tEN8GvkgDwnOfKsGsicJeQ6PxQSgWuJXfQaXkpM4VNlQicOWJM4Tg/0\",\n \"content_url\": \"http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204883415&idx=3&sn=4cb1c6d25cbe6dfeff37f52a62532bd0#rd\",\n \"source_url\": \"\",\n \"file_id\": 204883393,\n \"digest\": \"6\u67081\uff0e\u8f6f\u4ef6\u56ed\u53ec\u5f00\u7b2c\u4e00\u6b21\u5efa\u8bbe\u9886\u5bfc\u5c0f\u7ec4\u4f1a\u8bae2011\u5e746\u67087\u65e5\uff0c\u7b2c\u4e00\u6b21\u8f6f\u4ef6\u56ed\u5efa\u8bbe\u9886\u5bfc\u5c0f\u7ec4\u4f1a\u8bae\u53ec\u5f00\uff0c\u4f1a\u8bae\u8ba4\u4e3a\uff0c\u65b0\u7586\"\n },\n {\n \"seq\": 3,\n \"title\": \"2010\u5e74\u65b0\u7586\u8f6f\u4ef6\u56ed\u5927\u4e8b\u8bb0\",\n \"show_cover_pic\": 0,\n \"author\": \"\",\n \"cover\": \"https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3YG4sSuf9X9ecMPjDRju842IbIvpFWK7tuZs0Po4kZCz4URzOBj5rnQ/0\",\n \"content_url\": \"http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204883415&idx=4&sn=4319f7f051f36ed972e2f05a221738ec#rd\",\n \"source_url\": \"\",\n \"file_id\": 204884043,\n \"digest\": \"5\u67081\uff0e\u65b0\u7586\u8f6f\u4ef6\u56ed\u4e0e\u5f00\u53d1\u533a\uff08\u5934\u5c6f\u6cb3\u533a\uff09\u7ba1\u59d4\u4f1a\u3001\u7ecf\u4fe1\u59d4\u7b7e\u7f72\u300a\u65b0\u7586\u8f6f\u4ef6\u56ed\u5efa\u8bbe\u6218\u7565\u5408\u4f5c\u534f\u8bae\u300b2010\u5e745\u670812\u65e5\uff0c\"\n }\n ],\n \"seq\": 1,\n \"title\": \"2013\u5e74\u65b0\u7586\u8f6f\u4ef6\u56ed\u5927\u4e8b\u8bb0\",\n \"show_cover_pic\": 0,\n \"author\": \"\",\n \"app_id\": 204883415,\n \"content_url\": \"http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204883415&idx=1&sn=68d62215052d29ece3f2664e9c4e8cab#rd\",\n \"create_time\": \"1405232974\",\n \"file_id\": 204883412,\n \"img_url\": \"https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3icvFgkxZRyIrkLbic9I5ZKLa3XB8UqNlkT8CYibByHuraSvVoeSzdTRLQ/0\",\n \"digest\": \"1\u67081\uff0e\u65b0\u7586\u8f6f\u4ef6\u56ed\u5c55\u5385\u8bbe\u8ba1\u65b9\u6848\u6c47\u62a5\u4f1a2013\u5e741\u670815\u65e5\u5728\u7ef4\u6cf0\u5927\u53a64\u697c9\u53f7\u4f1a\u8bae\u5ba4\u53ec\u5f00\u65b0\u7586\u8f6f\u4ef6\u56ed\u5c55\u5385\u8bbe\u8ba1\u5de5\u4f5c\u5b8c\"\n }\n ]\n\n:param page: \u9875\u7801 (\u4ece 0 \u5f00\u59cb)\n:param pagesize: \u6bcf\u9875\u6570\u76ee\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m9"} {"signature": "def get_dialog_message(self, fakeid, last_msgid=, create_time=):", "body": "self._init_fakeid()url = ''.format(fakeid=fakeid,fromfakeid=self.__fakeid,last_msgid=last_msgid,create_time=create_time,token=self.__token,)headers = {'': '','': ''.format(token=self.__token),'': self.__cookies,}r = requests.get(url, headers=headers)try:message = json.dumps(json.loads(r.text)[''], ensure_ascii=False)except (KeyError, ValueError):raise NeedLoginError(r.text)return message", "docstring": "\u83b7\u53d6\u4e0e\u6307\u5b9a\u7528\u6237\u7684\u5bf9\u8bdd\u5185\u5bb9, \u83b7\u53d6\u7684\u5185\u5bb9\u7531 ``last_msgid`` (\u9700\u8981\u83b7\u53d6\u7684\u5bf9\u8bdd\u4e2d\u65f6\u95f4\u6700\u65e9\u7684 **\u516c\u4f17\u53f7\u53d1\u9001\u7ed9\u7528\u6237** \u7684\u6d88\u606fID) \u548c ``create_time`` (\u9700\u8981\u83b7\u53d6\u7684\u5bf9\u8bdd\u4e2d\u65f6\u95f4\u6700\u65e9\u7684\u6d88\u606f\u65f6\u95f4\u6233) \u8fdb\u884c\u8fc7\u6ee4\n\n\u6d88\u606f\u8fc7\u6ee4\u89c4\u5219:\n\n1. \u9996\u5148\u6309\u7167 ``last_msgid`` \u8fc7\u6ee4 (\u4e0d\u9700\u8981\u6309\u7167 ``last_msgid`` \u8fc7\u6ee4\u5219\u4e0d\u9700\u8981\u4f20\u5165\u6b64\u53c2\u6570)\n\n a. ``fakeid`` \u4e3a\u7528\u6237 UID\n b. \u901a\u8fc7 ``last_msgid`` \u53bb\u5339\u914d\u516c\u4f17\u53f7\u8fc7\u53bb\u53d1\u9001\u7ed9\u7528\u6237\u7684\u67d0\u4e00\u6761\u6d88\u606f\n c. \u5982\u679c\u5339\u914d\u6210\u529f, \u5219\u8fd4\u56de\u8fd9\u6761\u6d88\u606f\u4e4b\u540e\u4e0e\u8fd9\u4e2a\u7528\u6237\u76f8\u5173\u7684\u6240\u6709\u6d88\u606f\u5185\u5bb9 (\u5305\u62ec\u53d1\u9001\u7684\u6d88\u606f\u548c\u63a5\u6536\u7684)\n d. \u5982\u679c\u5339\u914d\u5931\u8d25 (\u6ca1\u6709\u627e\u5230), \u5219\u8fd4\u56de\u4e0e\u8fd9\u4e2a\u7528\u6237\u76f8\u5173\u7684\u6240\u6709\u6d88\u606f (\u5305\u62ec\u53d1\u9001\u7684\u6d88\u606f\u548c\u63a5\u6536\u7684)\n\n2. \u7b2c\u4e00\u6761\u89c4\u5219\u8fd4\u56de\u7684\u6d88\u606f\u5185\u5bb9\u63a5\u7740\u6309\u7167 ``create_time`` \u8fdb\u884c\u8fc7\u6ee4, \u8fd4\u56de ``create_time`` \u65f6\u95f4\u6233\u4e4b\u65f6\u53ca\u4e4b\u540e\u7684\u6240\u6709\u6d88\u606f (\u4e0d\u9700\u8981\u6309\u7167 ``create_time`` \u8fc7\u6ee4\u5219\u4e0d\u9700\u8981\u4f20\u5165\u6b64\u53c2\u6570)\n\n\u8fd4\u56deJSON\u793a\u4f8b::\n\n {\n \"to_nick_name\": \"Doraemonext\",\n \"msg_items\": {\n \"msg_item\": [\n {\n \"date_time\": 1408671873,\n \"has_reply\": 0,\n \"multi_item\": [ ],\n \"msg_status\": 4,\n \"nick_name\": \"Doraemonext\",\n \"to_uin\": 2391068708,\n \"content\": \"\u4f60\u5462\",\n \"source\": \"\",\n \"fakeid\": \"844735403\",\n \"send_stat\": {\n \"fail\": 0,\n \"succ\": 0,\n \"total\": 0\n },\n \"refuse_reason\": \"\",\n \"type\": 1,\n \"id\": 206439567\n },\n {\n \"date_time\": 1408529750,\n \"send_stat\": {\n \"fail\": 0,\n \"succ\": 0,\n \"total\": 0\n },\n \"app_sub_type\": 3,\n \"multi_item\": [\n {\n \"seq\": 0,\n \"title\": \"\u8f6f\u4ef6\u4f01\u4e1a\u6709\u671b\u62ce\u5305\u5165\u4f4f\u65b0\u7586\u8f6f\u4ef6\u56ed\",\n \"show_cover_pic\": 1,\n \"author\": \"\",\n \"cover\": \"https://mmbiz.qlogo.cn/mmbiz/D2pflbZwStFibz2Sb1kWOuHrxtDMPKJic3oErGEhSicRQc82icibxZOZ2YAGNgiaGYfOFYppmPzOOS0v1xfZ1nvyT58g/0\",\n \"content_url\": \"http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204885255&idx=1&sn=40e07d236a497e36d2d3e9711dfe090a#rd\",\n \"source_url\": \"\",\n \"content\": \"\",\n \"file_id\": 204885252,\n \"vote_id\": [ ],\n \"digest\": \"12\u67088\u65e5\uff0c\u56fd\u5bb6\u8f6f\u4ef6\u516c\u5171\u670d\u52a1\u5e73\u53f0\u65b0\u7586\u5206\u5e73\u53f0\u5728\u4e4c\u9c81\u6728\u9f50\u7ecf\u6d4e\u6280\u672f\u5f00\u53d1\u533a\uff08\u5934\u5c6f\u6cb3\u533a\uff09\u63ed\u724c\u3002\u8fd9\u610f\u5473\u7740\uff0c\u8f6f\u4ef6\u4f01\u4e1a\u6709\"\n }\n ],\n \"msg_status\": 2,\n \"title\": \"\u8f6f\u4ef6\u4f01\u4e1a\u6709\u671b\u62ce\u5305\u5165\u4f4f\u65b0\u7586\u8f6f\u4ef6\u56ed\",\n \"nick_name\": \"Doraemonext\",\n \"to_uin\": 844735403,\n \"content_url\": \"http://mp.weixin.qq.com/s?__biz=MjM5MTA2ODcwOA==&mid=204885255&idx=1&sn=40e07d236a497e36d2d3e9711dfe090a#rd\",\n \"show_type\": 1,\n \"content\": \"\",\n \"source\": \"biz\",\n \"fakeid\": \"2391068708\",\n \"file_id\": 204885252,\n \"has_reply\": 0,\n \"refuse_reason\": \"\",\n \"type\": 6,\n \"id\": 206379033,\n \"desc\": \"12\u67088\u65e5\uff0c\u56fd\u5bb6\u8f6f\u4ef6\u516c\u5171\u670d\u52a1\u5e73\u53f0\u65b0\u7586\u5206\u5e73\u53f0\u5728\u4e4c\u9c81\u6728\u9f50\u7ecf\u6d4e\u6280\u672f\u5f00\u53d1\u533a\uff08\u5934\u5c6f\u6cb3\u533a\uff09\u63ed\u724c\u3002\u8fd9\u610f\u5473\u7740\uff0c\u8f6f\u4ef6\u4f01\u4e1a\u6709\"\n }\n ]\n }\n }\n\n:param fakeid: \u7528\u6237 UID (\u5373 fakeid )\n:param last_msgid: \u516c\u4f17\u53f7\u4e4b\u524d\u53d1\u9001\u7ed9\u7528\u6237(fakeid)\u7684\u6d88\u606f ID, \u4e3a 0 \u5219\u8868\u793a\u5168\u90e8\u6d88\u606f\n:param create_time: \u83b7\u53d6\u8fd9\u4e2a\u65f6\u95f4\u6233\u4e4b\u65f6\u53ca\u4e4b\u540e\u7684\u6d88\u606f\uff0c\u4e3a 0 \u5219\u8868\u793a\u5168\u90e8\u6d88\u606f\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m10"} {"signature": "def send_news(self, fakeid, msgid):", "body": "url = ''payload = {'': '','': '','': fakeid,'': ,'': self.__token,'': msgid,'': msgid,'': '','': ,'': random.random(),}headers = {'': '','': ''.format(fakeid=fakeid,),'': self.__cookies,}r = requests.post(url, data=payload, headers=headers)try:message = json.loads(r.text)except ValueError:raise NeedLoginError(r.text)try:if message[''][''] == or message[''][''] == -:raise ValueError('')if message[''][''] == :raise ValueError('')if message[''][''] != :raise NeedLoginError(r.text)except KeyError:raise NeedLoginError(r.text)", "docstring": "\u5411\u6307\u5b9a\u7528\u6237\u53d1\u9001\u56fe\u6587\u6d88\u606f \uff08\u5fc5\u987b\u4ece\u56fe\u6587\u5e93\u91cc\u9009\u53d6\u6d88\u606fID\u4f20\u5165)\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid)\n:param msgid: \u56fe\u6587\u6d88\u606f ID\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u5177\u4f53\u5185\u5bb9\u6709 ``fake id not exist`` \u53ca ``message id not exist``", "id": "f584:c0:m11"} {"signature": "def add_news(self, news):", "body": "if not news:raise ValueError('')for item in news:if '' not in item or '' not in item:raise ValueError('')url = ''.format(token=self.__token,)payload = {'': self.__token,'': ,'': '','': '','': ,'': '','': '',}headers = {'': ''.format(token=self.__token),'': self.__cookies,}i = for item in news:payload[''+str(i)] = item.get('')payload[''+str(i)] = item.get('')payload[''+str(i)] = item.get('')payload[''+str(i)] = item.get('')payload[''+str(i)] = item.get('')payload[''+str(i)] = item.get('')i += payload[''] = ir = requests.post(url, data=payload, headers=headers)try:message = json.loads(r.text)except ValueError:raise NeedLoginError(r.text)try:if message[''] != '':raise ValueError(r.text)except KeyError:raise NeedLoginError(r.text)", "docstring": "\u5728\u7d20\u6750\u5e93\u4e2d\u521b\u5efa\u56fe\u6587\u6d88\u606f\n\n:param news: list \u5bf9\u8c61, \u5176\u4e2d\u7684\u6bcf\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a dict \u5bf9\u8c61, \u4ee3\u8868\u4e00\u6761\u56fe\u6587, key \u503c\u5206\u522b\u4e3a ``title``, ``author``, ``summary``,\n ``content``, ``picture_id``, ``from_url``, \u5bf9\u5e94\u5185\u5bb9\u4e3a\u6807\u9898, \u4f5c\u8005, \u6458\u8981, \u5185\u5bb9, \u7d20\u6750\u5e93\u91cc\u7684\n \u56fe\u7247ID(\u53ef\u901a\u8fc7 ``upload_file`` \u51fd\u6570\u4e0a\u4f20\u83b7\u53d6), \u6765\u6e90\u94fe\u63a5\u3002\n\n \u5176\u4e2d\u5fc5\u987b\u63d0\u4f9b\u7684 key \u503c\u4e3a ``title`` \u548c ``content``\n\n \u793a\u4f8b::\n\n [\n {\n 'title': '\u56fe\u6587\u6807\u9898',\n 'author': '\u56fe\u6587\u4f5c\u8005',\n 'summary': '\u56fe\u6587\u6458\u8981',\n 'content': '\u56fe\u6587\u5185\u5bb9',\n 'picture_id': '23412341',\n 'from_url': 'http://www.baidu.com',\n },\n {\n 'title': '\u6700\u5c11\u56fe\u6587\u6807\u9898',\n 'content': '\u56fe\u6587\u5185\u5bb9',\n }\n ]\n:raises ValueError: \u53c2\u6570\u63d0\u4f9b\u9519\u8bef\u65f6\u629b\u51fa\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m12"} {"signature": "def upload_file(self, filepath):", "body": "self._init_ticket()url = ''.format(ticket_id=self.__ticket_id,ticket=self.__ticket,token=self.__token,)try:files = {'': open(filepath, '')}except IOError:raise ValueError('')payloads = {'': filepath,'': '','': '',}headers = {'': '','': self.__cookies,}r = requests.post(url, files=files, data=payloads, headers=headers)try:message = json.loads(r.text)except ValueError:raise NeedLoginError(r.text)try:if message[''][''] != :raise ValueError(message[''][''])except KeyError:raise NeedLoginError(r.text)return message['']", "docstring": "\u4e0a\u4f20\u7d20\u6750 (\u56fe\u7247/\u97f3\u9891/\u89c6\u9891)\n:param filepath: \u672c\u5730\u6587\u4ef6\u8def\u5f84\n:return: \u76f4\u63a5\u8fd4\u56de\u4e0a\u4f20\u540e\u7684\u6587\u4ef6 ID (fid)\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u9519\u8bef\u539f\u56e0\u76f4\u63a5\u6253\u5370\u5f02\u5e38\u5373\u53ef (\u5e38\u89c1\u9519\u8bef\u5185\u5bb9: ``file not exist``: \u627e\u4e0d\u5230\u672c\u5730\u6587\u4ef6, ``audio too long``: \u97f3\u9891\u6587\u4ef6\u8fc7\u957f, ``file invalid type``: \u6587\u4ef6\u683c\u5f0f\u4e0d\u6b63\u786e, \u8fd8\u6709\u5176\u4ed6\u9519\u8bef\u8bf7\u81ea\u884c\u68c0\u67e5)", "id": "f584:c0:m13"} {"signature": "def send_file(self, fakeid, fid, type):", "body": "if type == : type = url = ''.format(token=self.__token,)payloads = {}if type == or type == : payloads = {'': self.__token,'': '','': '','': ,'': random.random(),'': type,'': fid,'': fakeid,'': fid,'': '',}elif type == : payloads = {'': self.__token,'': '','': '','': ,'': random.random(),'': type,'': fid,'': fakeid,'': fid,'': '',}headers = {'': ''.format(fakeid=fakeid,token=self.__token,),'': self.__cookies,'': '',}r = requests.post(url, data=payloads, headers=headers)try:message = json.loads(r.text)except ValueError:raise NeedLoginError(r.text)try:if message[''][''] != :raise ValueError(message[''][''])except KeyError:raise NeedLoginError(r.text)", "docstring": "\u5411\u7279\u5b9a\u7528\u6237\u53d1\u9001\u5a92\u4f53\u6587\u4ef6\n:param fakeid: \u7528\u6237 UID (\u5373 fakeid)\n:param fid: \u6587\u4ef6 ID\n:param type: \u6587\u4ef6\u7c7b\u578b (2: \u56fe\u7247, 3: \u97f3\u9891, 15: \u89c6\u9891)\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u9519\u8bef\u539f\u56e0\u76f4\u63a5\u6253\u5370\u5f02\u5e38\u5373\u53ef (\u5e38\u89c1\u9519\u8bef\u5185\u5bb9: ``system error`` \u6216 ``can not send this type of msg``: \u6587\u4ef6\u7c7b\u578b\u4e0d\u5339\u914d, ``user not exist``: \u7528\u6237 fakeid \u4e0d\u5b58\u5728, ``file not exist``: \u6587\u4ef6 fid \u4e0d\u5b58\u5728, \u8fd8\u6709\u5176\u4ed6\u9519\u8bef\u8bf7\u81ea\u884c\u68c0\u67e5)", "id": "f584:c0:m14"} {"signature": "def get_file_list(self, type, page, count=):", "body": "url = ''.format(token=self.__token,type=type,random=round(random.random(), ),begin=page*count,count=count,)headers = {'': '','': ''.format(token=self.__token,),'': self.__cookies,}r = requests.get(url, headers=headers)try:message = json.dumps(json.loads(r.text)[''], ensure_ascii=False)except (KeyError, ValueError):raise NeedLoginError(r.text)return message", "docstring": "\u83b7\u53d6\u7d20\u6750\u5e93\u6587\u4ef6\u5217\u8868\n\n\u8fd4\u56deJSON\u793a\u4f8b::\n\n {\n \"type\": 2,\n \"file_item\": [\n {\n \"update_time\": 1408723089,\n \"name\": \"Doraemonext.png\",\n \"play_length\": 0,\n \"file_id\": 206471048,\n \"type\": 2,\n \"size\": \"53.7 K\"\n },\n {\n \"update_time\": 1408722328,\n \"name\": \"Doraemonext.png\",\n \"play_length\": 0,\n \"file_id\": 206470809,\n \"type\": 2,\n \"size\": \"53.7 K\"\n }\n ],\n \"file_cnt\": {\n \"voice_cnt\": 1,\n \"app_msg_cnt\": 10,\n \"commondity_msg_cnt\": 0,\n \"video_cnt\": 0,\n \"img_cnt\": 29,\n \"video_msg_cnt\": 0,\n \"total\": 40\n }\n }\n\n:param type: \u6587\u4ef6\u7c7b\u578b (2: \u56fe\u7247, 3: \u97f3\u9891, 4: \u89c6\u9891)\n:param page: \u9875\u7801 (\u4ece 0 \u5f00\u59cb)\n:param count: \u6bcf\u9875\u5927\u5c0f\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m15"} {"signature": "def send_image(self, fakeid, fid):", "body": "return self.send_file(fakeid, fid, )", "docstring": "\u7ed9\u6307\u5b9a\u7528\u6237 fakeid \u53d1\u9001\u56fe\u7247\u4fe1\u606f\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid)\n:param fid: \u6587\u4ef6 ID\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u9519\u8bef\u539f\u56e0\u76f4\u63a5\u6253\u5370\u5f02\u5e38\u5373\u53ef (\u5e38\u89c1\u9519\u8bef\u5185\u5bb9: ``system error`` \u6216 ``can not send this type of msg``: \u6587\u4ef6\u7c7b\u578b\u4e0d\u5339\u914d, ``user not exist``: \u7528\u6237 fakeid \u4e0d\u5b58\u5728, ``file not exist``: \u6587\u4ef6 fid \u4e0d\u5b58\u5728, \u8fd8\u6709\u5176\u4ed6\u9519\u8bef\u8bf7\u81ea\u884c\u68c0\u67e5)", "id": "f584:c0:m16"} {"signature": "def send_audio(self, fakeid, fid):", "body": "return self.send_file(fakeid, fid, )", "docstring": "\u7ed9\u6307\u5b9a\u7528\u6237 fakeid \u53d1\u9001\u8bed\u97f3\u4fe1\u606f\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid)\n:param fid: \u6587\u4ef6 ID\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u9519\u8bef\u539f\u56e0\u76f4\u63a5\u6253\u5370\u5f02\u5e38\u5373\u53ef (\u5e38\u89c1\u9519\u8bef\u5185\u5bb9: ``system error`` \u6216 ``can not send this type of msg``: \u6587\u4ef6\u7c7b\u578b\u4e0d\u5339\u914d, ``user not exist``: \u7528\u6237 fakeid \u4e0d\u5b58\u5728, ``file not exist``: \u6587\u4ef6 fid \u4e0d\u5b58\u5728, \u8fd8\u6709\u5176\u4ed6\u9519\u8bef\u8bf7\u81ea\u884c\u68c0\u67e5)", "id": "f584:c0:m17"} {"signature": "def send_video(self, fakeid, fid):", "body": "return self.send_file(fakeid, fid, )", "docstring": "\u7ed9\u6307\u5b9a\u7528\u6237 fakeid \u53d1\u9001\u89c6\u9891\u6d88\u606f\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid)\n:param fid: \u6587\u4ef6 ID\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u9519\u8bef\u539f\u56e0\u76f4\u63a5\u6253\u5370\u5f02\u5e38\u5373\u53ef (\u5e38\u89c1\u9519\u8bef\u5185\u5bb9: ``system error`` \u6216 ``can not send this type of msg``: \u6587\u4ef6\u7c7b\u578b\u4e0d\u5339\u914d, ``user not exist``: \u7528\u6237 fakeid \u4e0d\u5b58\u5728, ``file not exist``: \u6587\u4ef6 fid \u4e0d\u5b58\u5728, \u8fd8\u6709\u5176\u4ed6\u9519\u8bef\u8bf7\u81ea\u884c\u68c0\u67e5)", "id": "f584:c0:m18"} {"signature": "def get_user_info(self, fakeid):", "body": "url = ''payloads = {'': ,'': '','': round(random.random(), ),'': self.__token,'': '','': fakeid,}headers = {'': '','': ''.format(token=self.__token,),'': self.__cookies,}r = requests.post(url, data=payloads, headers=headers)try:message = json.dumps(json.loads(r.text)[''], ensure_ascii=False)except (KeyError, ValueError):raise NeedLoginError(r.text)return message", "docstring": "\u83b7\u53d6\u6307\u5b9a\u7528\u6237\u7684\u4e2a\u4eba\u4fe1\u606f\n\n\u8fd4\u56deJSON\u793a\u4f8b::\n\n {\n \"province\": \"\u6e56\u5317\",\n \"city\": \"\u6b66\u6c49\",\n \"gender\": 1,\n \"nick_name\": \"Doraemonext\",\n \"country\": \"\u4e2d\u56fd\",\n \"remark_name\": \"\",\n \"fake_id\": 844735403,\n \"signature\": \"\",\n \"group_id\": 0,\n \"user_name\": \"\"\n }\n\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid)\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m19"} {"signature": "def get_avatar(self, fakeid):", "body": "url = ''.format(fakeid=fakeid,token=self.__token,)headers = {'': '','': ''.format(token=self.__token,),'': self.__cookies,}r = requests.get(url, headers=headers, stream=True)return r.raw.data", "docstring": "\u83b7\u53d6\u7528\u6237\u5934\u50cf\u4fe1\u606f\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid)\n:return: \u4e8c\u8fdb\u5236 JPG \u6570\u636e\u5b57\u7b26\u4e32, \u53ef\u76f4\u63a5\u4f5c\u4e3a File Object \u4e2d write \u7684\u53c2\u6570\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m20"} {"signature": "def get_new_message_num(self, lastid=):", "body": "url = ''.format(lastid=lastid,token=self.__token,)payloads = {'': ,'': '','': random.random(),'': '','': self.__token,}headers = {'': '','': ''.format(token=self.__token,),'': self.__cookies,}r = requests.get(url, data=payloads, headers=headers)try:return int(json.loads(r.text)[''])except (KeyError, ValueError):raise NeedLoginError(r.text)", "docstring": "\u83b7\u53d6\u65b0\u6d88\u606f\u7684\u6570\u76ee\n:param lastid: \u6700\u8fd1\u83b7\u53d6\u7684\u6d88\u606f ID, \u4e3a 0 \u65f6\u83b7\u53d6\u603b\u6d88\u606f\u6570\u76ee\n:return: \u6d88\u606f\u6570\u76ee", "id": "f584:c0:m21"} {"signature": "def get_top_message(self):", "body": "return self.get_message_list(count=)", "docstring": "\u83b7\u53d6\u6700\u65b0\u4e00\u6761\u6d88\u606f\n\n\u8fd4\u56deJSON\u793a\u4f8b::\n\n {\n \"msg_item\": [\n {\n \"id\": 206448489,\n \"type\": 2,\n \"fakeid\": \"844735403\",\n \"nick_name\": \"Doraemonext\",\n \"date_time\": 1408696938,\n \"source\": \"\",\n \"msg_status\": 4,\n \"has_reply\": 0,\n \"refuse_reason\": \"\",\n \"multi_item\": [ ],\n \"to_uin\": 2391068708,\n \"send_stat\": {\n \"total\": 0,\n \"succ\": 0,\n \"fail\": 0\n }\n }\n ]\n }\n\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m22"} {"signature": "def get_message_list(self, lastid=, offset=, count=, day=, star=False):", "body": "if star:star_param = ''else:star_param = ''if lastid == :lastid = ''url = ''.format(star=star_param,count=count,day=day,lastid=lastid,offset=offset,token=self.__token,)headers = {'': '','': ''.format(token=self.__token),'': self.__cookies,}r = requests.get(url, headers=headers)try:message = json.loads(r.text)['']except (KeyError, ValueError):raise NeedLoginError(r.text)return message", "docstring": "\u83b7\u53d6\u6d88\u606f\u5217\u8868\n\n\u8fd4\u56deJSON\u793a\u4f8b::\n {\n \"msg_item\": [\n {\n \"id\": 206439583,\n \"type\": 1,\n \"fakeid\": \"844735403\",\n \"nick_name\": \"Doraemonext\",\n \"date_time\": 1408671892,\n \"content\": \"\u6d4b\u8bd5\u6d88\u606f\",\n \"source\": \"\",\n \"msg_status\": 4,\n \"has_reply\": 0,\n \"refuse_reason\": \"\",\n \"multi_item\": [ ],\n \"to_uin\": 2391068708,\n \"send_stat\": {\n \"total\": 0,\n \"succ\": 0,\n \"fail\": 0\n }\n },\n {\n \"id\": 206439579,\n \"type\": 1,\n \"fakeid\": \"844735403\",\n \"nick_name\": \"Doraemonext\",\n \"date_time\": 1408671889,\n \"content\": \"wechat-python-sdk\",\n \"source\": \"\",\n \"msg_status\": 4,\n \"has_reply\": 0,\n \"refuse_reason\": \"\",\n \"multi_item\": [ ],\n \"to_uin\": 2391068708,\n \"send_stat\": {\n \"total\": 0,\n \"succ\": 0,\n \"fail\": 0\n }\n }\n ]\n }\n\n:param lastid: \u4f20\u5165\u6700\u540e\u7684\u6d88\u606f id \u7f16\u53f7, \u4e3a 0 \u5219\u4ece\u6700\u65b0\u4e00\u6761\u8d77\u5012\u5e8f\u83b7\u53d6\n:param offset: lastid \u8d77\u7b97\u7b2c\u4e00\u6761\u7684\u504f\u79fb\u91cf\n:param count: \u83b7\u53d6\u6570\u76ee\n:param day: \u6700\u8fd1\u51e0\u5929\u6d88\u606f (0: \u4eca\u5929, 1: \u6628\u5929, 2: \u524d\u5929, 3: \u66f4\u65e9, 7: \u5168\u90e8), \u8fd9\u91cc\u7684\u5168\u90e8\u4ec5\u67095\u5929\n:param star: \u662f\u5426\u53ea\u83b7\u53d6\u661f\u6807\u6d88\u606f\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m23"} {"signature": "def get_message_image(self, msgid, mode=''):", "body": "if mode != '' and mode != '':raise ValueError('')url = ''.format(msgid=msgid,token=self.__token,mode=mode,)headers = {'': '','': ''.format(token=self.__token,),'': self.__cookies,}r = requests.get(url, headers=headers, stream=True)if r.headers.get('', None) == '':raise NeedLoginError(r.text)if not r.raw.data:raise ValueError('')return r.raw.data", "docstring": "\u6839\u636e\u6d88\u606f ID \u83b7\u53d6\u56fe\u7247\u6d88\u606f\u5185\u5bb9\n:param msgid: \u6d88\u606f ID\n:param mode: \u56fe\u7247\u5c3a\u5bf8 ('large'\u6216'small')\n:return: \u4e8c\u8fdb\u5236 JPG \u56fe\u7247\u5b57\u7b26\u4e32, \u53ef\u76f4\u63a5\u4f5c\u4e3a File Object \u4e2d write \u7684\u53c2\u6570\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u9519\u8bef\u539f\u56e0\u76f4\u63a5\u6253\u5370\u5f02\u5e38\u5373\u53ef, \u9519\u8bef\u5185\u5bb9: ``image message not exist``: msg\u53c2\u6570\u65e0\u6548, ``mode error``: mode\u53c2\u6570\u65e0\u6548", "id": "f584:c0:m24"} {"signature": "def get_message_voice(self, msgid):", "body": "url = ''.format(msgid=msgid,token=self.__token,)headers = {'': '','': ''.format(token=self.__token,),'': self.__cookies,}r = requests.get(url, headers=headers, stream=True)if r.headers.get('', None) == '':raise NeedLoginError(r.text)if not r.raw.data:raise ValueError('')return r.raw.data", "docstring": "\u6839\u636e\u6d88\u606f ID \u83b7\u53d6\u8bed\u97f3\u6d88\u606f\u5185\u5bb9\n:param msgid: \u6d88\u606f ID\n:return: \u4e8c\u8fdb\u5236 MP3 \u97f3\u9891\u5b57\u7b26\u4e32, \u53ef\u76f4\u63a5\u4f5c\u4e3a File Object \u4e2d write \u7684\u53c2\u6570\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u9519\u8bef\u539f\u56e0\u76f4\u63a5\u6253\u5370\u5f02\u5e38\u5373\u53ef, \u9519\u8bef\u5185\u5bb9: ``voice message not exist``: msg\u53c2\u6570\u65e0\u6548", "id": "f584:c0:m25"} {"signature": "def get_message_video(self, msgid):", "body": "url = ''.format(msgid=msgid,token=self.__token,)headers = {'': '','': ''.format(token=self.__token,),'': self.__cookies,}r = requests.get(url, headers=headers, stream=True)if r.headers.get('', None) == '':raise NeedLoginError(r.text)if not r.raw.data:raise ValueError('')return r.raw.data", "docstring": "\u6839\u636e\u6d88\u606f ID \u83b7\u53d6\u89c6\u9891\u6d88\u606f\u5185\u5bb9\n:param msgid: \u6d88\u606f ID\n:return: \u4e8c\u8fdb\u5236 MP4 \u89c6\u9891\u5b57\u7b26\u4e32, \u53ef\u76f4\u63a5\u4f5c\u4e3a File Object \u4e2d write \u7684\u53c2\u6570\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u9519\u8bef\u539f\u56e0\u76f4\u63a5\u6253\u5370\u5f02\u5e38\u5373\u53ef, \u9519\u8bef\u5185\u5bb9: ``video message not exist``: msg\u53c2\u6570\u65e0\u6548", "id": "f584:c0:m26"} {"signature": "def _init_fakeid(self):", "body": "if not self.__fakeid:self._init_self_information()", "docstring": "\u521d\u59cb\u5316\u516c\u4f17\u53f7\u81ea\u8eab\u7684 ``fakeid`` \u503c\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m27"} {"signature": "def _init_ticket(self):", "body": "if not self.__ticket:self._init_self_information()", "docstring": "\u521d\u59cb\u5316\u516c\u4f17\u53f7\u81ea\u8eab\u7684 ``ticket`` \u53ca ``ticket_id`` \u503c\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m28"} {"signature": "def _init_self_information(self):", "body": "url = ''.format(token=self.__token)headers = {'': '','': '','': self.__cookies,}r = requests.get(url, headers=headers)ticket_id = re.search(r'', r.text)if not ticket_id:raise NeedLoginError(r.text)self.__ticket_id = ticket_id.group()ticket = re.search(r'', r.text)if not ticket:raise NeedLoginError(r.text)self.__ticket = ticket.group()fakeid = re.search(r'', r.text)if not fakeid:raise NeedLoginError(r.text)self.__fakeid = fakeid.group()", "docstring": "\u521d\u59cb\u5316\u516c\u4f17\u53f7\u81ea\u8eab\u7684\u5c5e\u6027\u503c (\u76ee\u524d\u5305\u62ec ``Ticket`` \u503c \u53ca \u516c\u4f17\u53f7\u81ea\u8eab\u7684 ``fakeid`` \u503c)\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m29"} {"signature": "def _init_appid(self):", "body": "if not self.__appid:self._init_plugin_token_appid()", "docstring": "\u521d\u59cb\u5316\u516c\u4f17\u53f7\u81ea\u8eab\u7684 ``appid`` \u503c\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m30"} {"signature": "def _init_plugin_token(self):", "body": "if not self.__plugin_token:self._init_plugin_token_appid()", "docstring": "\u521d\u59cb\u5316\u516c\u4f17\u53f7\u81ea\u8eab\u7684 ``PluginToken`` \u503c\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m31"} {"signature": "def _init_plugin_token_appid(self):", "body": "if not self.__plugin_token or not self.__appid:url = ''.format(token=self.__token,)headers = {'': '','': ''.format(token=self.__token,),'': self.__cookies,}r = requests.get(url, headers=headers)plugin_token = re.search(r\"\", r.text)if not plugin_token:raise NeedLoginError(r.text)self.__plugin_token = plugin_token.group()appid = re.search(r\"\", r.text)if not appid:raise NeedLoginError(r.text)self.__appid = appid.group()", "docstring": "\u521d\u59cb\u5316\u516c\u4f17\u53f7\u7684 ``PluginToken`` \u503c\u53ca\u516c\u4f17\u53f7\u81ea\u8eab\u7684 ``appid`` \u503c\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m32"} {"signature": "@classmethoddef _transcoding(cls, data):", "body": "if not data:return dataresult = Noneif isinstance(data, str) and hasattr(data, ''):result = data.decode('')else:result = datareturn result", "docstring": "\u7f16\u7801\u8f6c\u6362\n :param data: \u9700\u8981\u8f6c\u6362\u7684\u6570\u636e\n :return: \u8f6c\u6362\u597d\u7684\u6570\u636e", "id": "f585:c0:m0"} {"signature": "@classmethoddef _transcoding_list(cls, data):", "body": "if not isinstance(data, list):raise ValueError('')result = []for item in data:if isinstance(item, dict):result.append(cls._transcoding_dict(item))elif isinstance(item, list):result.append(cls._transcoding_list(item))else:result.append(item)return result", "docstring": "\u7f16\u7801\u8f6c\u6362 for list\n :param data: \u9700\u8981\u8f6c\u6362\u7684 list \u6570\u636e\n :return: \u8f6c\u6362\u597d\u7684 list", "id": "f585:c0:m1"} {"signature": "@classmethoddef _transcoding_dict(cls, data):", "body": "if not isinstance(data, dict):raise ValueError('')result = {}for k, v in data.items():k = cls._transcoding(k)if isinstance(v, dict):v = cls._transcoding_dict(v)elif isinstance(v, list):v = cls._transcoding_list(v)else:v = cls._transcoding(v)result.update({k: v})return result", "docstring": "\u7f16\u7801\u8f6c\u6362 for dict\n:param data: \u9700\u8981\u8f6c\u6362\u7684 dict \u6570\u636e\n:return: \u8f6c\u6362\u597d\u7684 dict", "id": "f585:c0:m2"} {"signature": "def __init__(self, **kwargs):", "body": "self.__request = WechatRequest()if kwargs.get('') is not True:disable_urllib3_warning() self.__token = kwargs.get('')self.__appid = kwargs.get('')self.__appsecret = kwargs.get('')self.__encrypt_mode = kwargs.get('', '')self.__encoding_aes_key = kwargs.get('')self.__crypto = Noneself._update_crypto()self.__access_token_getfunc = kwargs.get('')self.__access_token_setfunc = kwargs.get('')self.__access_token_refreshfunc = kwargs.get('')self.__access_token = kwargs.get('')self.__access_token_expires_at = kwargs.get('')self.__jsapi_ticket_getfunc = kwargs.get('')self.__jsapi_ticket_setfunc = kwargs.get('')self.__jsapi_ticket_refreshfunc = kwargs.get('')self.__jsapi_ticket = kwargs.get('')self.__jsapi_ticket_expires_at = kwargs.get('')self.__partnerid = kwargs.get('')self.__partnerkey = kwargs.get('')self.__paysignkey = kwargs.get('')", "docstring": ":param kwargs: \u914d\u7f6e\u4fe1\u606f\u5b57\u5178, \u53ef\u7528\u5b57\u5178 key \u503c\u53ca\u5bf9\u5e94\u89e3\u91ca\u5982\u4e0b:\n 'token': \u5fae\u4fe1 Token\n\n 'appid': App ID\n 'appsecret': App Secret\n\n 'encrypt_mode': \u52a0\u89e3\u5bc6\u6a21\u5f0f ('normal': \u660e\u6587\u6a21\u5f0f, 'compatible': \u517c\u5bb9\u6a21\u5f0f, 'safe': \u5b89\u5168\u6a21\u5f0f(\u9ed8\u8ba4))\n 'encoding_aes_key': EncodingAESKey \u503c (\u4f20\u5165\u6b64\u503c\u5fc5\u987b\u4fdd\u8bc1\u540c\u65f6\u4f20\u5165 token, appid, \u5426\u5219\u629b\u51fa\u5f02\u5e38)\n\n 'access_token_getfunc': access token \u83b7\u53d6\u51fd\u6570 (\u7528\u4e8e\u5355\u673a\u53ca\u5206\u5e03\u5f0f\u73af\u5883\u4e0b, \u5177\u4f53\u683c\u5f0f\u53c2\u89c1\u6587\u6863)\n 'access_token_setfunc': access token \u5199\u5165\u51fd\u6570 (\u7528\u4e8e\u5355\u673a\u53ca\u5206\u5e03\u5f0f\u73af\u5883\u4e0b, \u5177\u4f53\u683c\u5f0f\u53c2\u89c1\u6587\u6863)\n 'access_token_refreshfunc': access token \u5237\u65b0\u51fd\u6570 (\u7528\u4e8e\u5355\u673a\u53ca\u5206\u5e03\u5f0f\u73af\u5883\u4e0b, \u5177\u4f53\u683c\u5f0f\u53c2\u89c1\u6587\u6863)\n 'access_token': \u76f4\u63a5\u5bfc\u5165\u7684 access token \u503c, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\n \u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6 (\u4f20\u5165 access_token_getfunc \u548c access_token_setfunc \u51fd\u6570\n \u540e\u5c06\u4f1a\u81ea\u52a8\u5ffd\u7565\u6b64\u5904\u7684\u4f20\u5165\u503c)\n 'access_token_expires_at': \u76f4\u63a5\u5bfc\u5165\u7684 access token \u7684\u8fc7\u671f\u65e5\u671f, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\n \u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6 (\u4f20\u5165 access_token_getfunc\n \u548c access_token_setfunc \u51fd\u6570\u540e\u5c06\u4f1a\u81ea\u52a8\u5ffd\u7565\u6b64\u5904\u7684\u4f20\u5165\u503c)\n\n 'jsapi_ticket_getfunc': jsapi ticket \u83b7\u53d6\u51fd\u6570 (\u7528\u4e8e\u5355\u673a\u53ca\u5206\u5e03\u5f0f\u73af\u5883\u4e0b, \u5177\u4f53\u683c\u5f0f\u53c2\u89c1\u6587\u6863)\n 'jsapi_ticket_setfunc': jsapi ticket \u5199\u5165\u51fd\u6570 (\u7528\u4e8e\u5355\u673a\u53ca\u5206\u5e03\u5f0f\u73af\u5883\u4e0b, \u5177\u4f53\u683c\u5f0f\u53c2\u89c1\u6587\u6863)\n 'jsapi_ticket_refreshfunc': jsapi ticket \u5237\u65b0\u51fd\u6570 (\u7528\u4e8e\u5355\u673a\u53ca\u5206\u5e03\u5f0f\u73af\u5883\u4e0b, \u5177\u4f53\u683c\u5f0f\u53c2\u89c1\u6587\u6863)\n 'jsapi_ticket': \u76f4\u63a5\u5bfc\u5165\u7684 jsapi ticket \u503c, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\n \u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6 (\u4f20\u5165 jsapi_ticket_getfunc \u548c jsapi_ticket_setfunc \u51fd\u6570\n \u540e\u5c06\u4f1a\u81ea\u52a8\u5ffd\u7565\u6b64\u5904\u7684\u4f20\u5165\u503c)\n 'jsapi_ticket_expires_at': \u76f4\u63a5\u5bfc\u5165\u7684 jsapi ticket \u7684\u8fc7\u671f\u65e5\u671f, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\n \u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6 (\u4f20\u5165 jsapi_ticket_getfunc\n \u548c jsapi_ticket_setfunc \u51fd\u6570\u540e\u5c06\u4f1a\u81ea\u52a8\u5ffd\u7565\u6b64\u5904\u7684\u4f20\u5165\u503c)\n\n 'partnerid': \u8d22\u4ed8\u901a\u5546\u6237\u8eab\u4efd\u6807\u8bc6, \u652f\u4ed8\u6743\u9650\u4e13\u7528\n 'partnerkey': \u8d22\u4ed8\u901a\u5546\u6237\u6743\u9650\u5bc6\u94a5 Key, \u652f\u4ed8\u6743\u9650\u4e13\u7528\n 'paysignkey': \u5546\u6237\u7b7e\u540d\u5bc6\u94a5 Key, \u652f\u4ed8\u6743\u9650\u4e13\u7528\n\n 'checkssl': \u662f\u5426\u68c0\u67e5 SSL, \u9ed8\u8ba4\u4e0d\u68c0\u67e5 (False), \u53ef\u907f\u514d urllib3 \u7684 InsecurePlatformWarning \u8b66\u544a\n:return:", "id": "f587:c0:m0"} {"signature": "@propertydef token(self):", "body": "self._check_token()return self.__token", "docstring": "\u83b7\u53d6\u5f53\u524d Token", "id": "f587:c0:m1"} {"signature": "@token.setterdef token(self, token):", "body": "self.__token = tokenself._update_crypto()", "docstring": "\u8bbe\u7f6e\u5f53\u524d Token", "id": "f587:c0:m2"} {"signature": "@propertydef appid(self):", "body": "return self.__appid", "docstring": "\u83b7\u53d6\u5f53\u524d App ID", "id": "f587:c0:m3"} {"signature": "@propertydef appsecret(self):", "body": "return self.__appsecret", "docstring": "\u83b7\u53d6\u5f53\u524d App Secret", "id": "f587:c0:m4"} {"signature": "def set_appid_appsecret(self, appid, appsecret):", "body": "self.__appid = appidself.__appsecret = appsecretself._update_crypto()", "docstring": "\u8bbe\u7f6e\u5f53\u524d App ID \u53ca App Secret", "id": "f587:c0:m5"} {"signature": "@propertydef encoding_aes_key(self):", "body": "return self.__encoding_aes_key", "docstring": "\u83b7\u53d6\u5f53\u524d EncodingAESKey", "id": "f587:c0:m6"} {"signature": "@encoding_aes_key.setterdef encoding_aes_key(self, encoding_aes_key):", "body": "self.__encoding_aes_key = encoding_aes_keyself._update_crypto()", "docstring": "\u8bbe\u7f6e\u5f53\u524d EncodingAESKey", "id": "f587:c0:m7"} {"signature": "@encrypt_mode.setterdef encrypt_mode(self, encrypt_mode):", "body": "self.__encrypt_mode = encrypt_modeself._update_crypto()", "docstring": "\u8bbe\u7f6e\u5f53\u524d\u52a0\u5bc6\u6a21\u5f0f", "id": "f587:c0:m9"} {"signature": "@propertydef crypto(self):", "body": "return self.__crypto", "docstring": "\u83b7\u53d6\u5f53\u524d Crypto \u5b9e\u4f8b", "id": "f587:c0:m10"} {"signature": "@propertydef access_token(self):", "body": "self._check_appid_appsecret()if callable(self.__access_token_getfunc):self.__access_token, self.__access_token_expires_at = self.__access_token_getfunc()if self.__access_token:now = time.time()if self.__access_token_expires_at - now > :return self.__access_tokenself.grant_access_token() return self.__access_token", "docstring": "\u83b7\u53d6\u5f53\u524d access token \u503c, \u672c\u65b9\u6cd5\u4f1a\u81ea\u884c\u7ef4\u62a4 access token \u6709\u6548\u6027", "id": "f587:c0:m11"} {"signature": "@propertydef jsapi_ticket(self):", "body": "self._check_appid_appsecret()if callable(self.__jsapi_ticket_getfunc):self.__jsapi_ticket, self.__jsapi_ticket_expires_at = self.__jsapi_ticket_getfunc()if self.__jsapi_ticket:now = time.time()if self.__jsapi_ticket_expires_at - now > :return self.__jsapi_ticketself.grant_jsapi_ticket() return self.__jsapi_ticket", "docstring": "\u83b7\u53d6\u5f53\u524d jsapi ticket \u503c, \u672c\u65b9\u6cd5\u4f1a\u81ea\u884c\u7ef4\u62a4 jsapi ticket \u6709\u6548\u6027", "id": "f587:c0:m12"} {"signature": "@propertydef partnerid(self):", "body": "return self.__partnerid", "docstring": "\u83b7\u53d6\u5f53\u524d\u8d22\u4ed8\u901a\u5546\u6237\u8eab\u4efd\u6807\u8bc6", "id": "f587:c0:m13"} {"signature": "@propertydef partnerkey(self):", "body": "return self.__partnerkey", "docstring": "\u83b7\u53d6\u5f53\u524d\u8d22\u4ed8\u901a\u5546\u6237\u6743\u9650\u5bc6\u94a5 Key", "id": "f587:c0:m14"} {"signature": "@propertydef paysignkey(self):", "body": "return self.__paysignkey", "docstring": "\u83b7\u53d6\u5546\u6237\u7b7e\u540d\u5bc6\u94a5 Key", "id": "f587:c0:m15"} {"signature": "def grant_access_token(self):", "body": "self._check_appid_appsecret()if callable(self.__access_token_refreshfunc):self.__access_token, self.__access_token_expires_at = self.__access_token_refreshfunc()returnresponse_json = self.__request.get(url=\"\",params={\"\": \"\",\"\": self.__appid,\"\": self.__appsecret,},access_token=self.__access_token)self.__access_token = response_json['']self.__access_token_expires_at = int(time.time()) + response_json['']if callable(self.__access_token_setfunc):self.__access_token_setfunc(self.__access_token, self.__access_token_expires_at)return response_json", "docstring": "\u83b7\u53d6 access token \u5e76\u66f4\u65b0\u5f53\u524d\u914d\u7f6e\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305 (\u4f20\u5165 access_token_refreshfunc \u53c2\u6570\u540e\u8fd4\u56de None)", "id": "f587:c0:m16"} {"signature": "def grant_jsapi_ticket(self):", "body": "self._check_appid_appsecret()if callable(self.__jsapi_ticket_refreshfunc):self.__jsapi_ticket, self.__jsapi_ticket_expires_at = self.__jsapi_ticket_refreshfunc()returnresponse_json = self.__request.get(url=\"\",params={\"\": \"\",},access_token=self.access_token,)self.__jsapi_ticket = response_json['']self.__jsapi_ticket_expires_at = int(time.time()) + response_json['']if callable(self.__jsapi_ticket_setfunc):self.__jsapi_ticket_setfunc(self.__jsapi_ticket, self.__jsapi_ticket_expires_at)return response_json", "docstring": "\u83b7\u53d6 jsapi ticket \u5e76\u66f4\u65b0\u5f53\u524d\u914d\u7f6e\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305 (\u4f20\u5165 jsapi_ticket_refreshfunc \u53c2\u6570\u540e\u8fd4\u56de None)", "id": "f587:c0:m17"} {"signature": "def get_access_token(self):", "body": "self._check_appid_appsecret()return {'': self.access_token,'': self.__access_token_expires_at,}", "docstring": "\u83b7\u53d6 Access Token \u53ca Access Token \u8fc7\u671f\u65e5\u671f, \u4ec5\u4f9b\u7f13\u5b58\u4f7f\u7528, \u5982\u679c\u5e0c\u671b\u5f97\u5230\u539f\u751f\u7684 Access Token \u8bf7\u6c42\u6570\u636e\u8bf7\u4f7f\u7528 :func:`grant_token`\n**\u4ec5\u4e3a\u517c\u5bb9 v0.6.0 \u4ee5\u524d\u7248\u672c\u4f7f\u7528, \u81ea\u884c\u7ef4\u62a4 access_token \u8bf7\u4f7f\u7528 access_token_setfunc \u548c access_token_getfunc \u8fdb\u884c\u64cd\u4f5c**\n:return: dict \u5bf9\u8c61, key \u5305\u62ec `access_token` \u53ca `access_token_expires_at`", "id": "f587:c0:m18"} {"signature": "def get_jsapi_ticket(self):", "body": "self._check_appid_appsecret()return {'': self.jsapi_ticket,'': self.__jsapi_ticket_expires_at,}", "docstring": "\u83b7\u53d6 Jsapi Ticket \u53ca Jsapi Ticket \u8fc7\u671f\u65e5\u671f, \u4ec5\u4f9b\u7f13\u5b58\u4f7f\u7528, \u5982\u679c\u5e0c\u671b\u5f97\u5230\u539f\u751f\u7684 Jsapi Ticket \u8bf7\u6c42\u6570\u636e\u8bf7\u4f7f\u7528 :func:`grant_jsapi_ticket`\n**\u4ec5\u4e3a\u517c\u5bb9 v0.6.0 \u4ee5\u524d\u7248\u672c\u4f7f\u7528, \u81ea\u884c\u7ef4\u62a4 jsapi_ticket \u8bf7\u4f7f\u7528 jsapi_ticket_setfunc \u548c jsapi_ticket_getfunc \u8fdb\u884c\u64cd\u4f5c**\n:return: dict \u5bf9\u8c61, key \u5305\u62ec `jsapi_ticket` \u53ca `jsapi_ticket_expires_at`", "id": "f587:c0:m19"} {"signature": "def _check_token(self):", "body": "if not self.__token:raise NeedParamError('')", "docstring": "\u68c0\u67e5 Token \u662f\u5426\u5b58\u5728\n:raises NeedParamError: Token \u53c2\u6570\u6ca1\u6709\u5728\u521d\u59cb\u5316\u7684\u65f6\u5019\u63d0\u4f9b", "id": "f587:c0:m20"} {"signature": "def _check_appid_appsecret(self):", "body": "if not self.__appid or not self.__appsecret:raise NeedParamError('')", "docstring": "\u68c0\u67e5 AppID \u548c AppSecret \u662f\u5426\u5b58\u5728\n:raises NeedParamError: AppID \u6216 AppSecret \u53c2\u6570\u6ca1\u6709\u5728\u521d\u59cb\u5316\u7684\u65f6\u5019\u5b8c\u6574\u63d0\u4f9b", "id": "f587:c0:m21"} {"signature": "def _update_crypto(self):", "body": "if self.__encrypt_mode in ['', ''] and self.__encoding_aes_key is not None:if self.__token is None or self.__appid is None:raise NeedParamError('')self.__crypto = BasicCrypto(self.__token, self.__encoding_aes_key, self.__appid)else:self.__crypto = None", "docstring": "\u6839\u636e\u5f53\u524d\u914d\u7f6e\u5185\u5bb9\u66f4\u65b0 Crypto \u7c7b", "id": "f587:c0:m22"} {"signature": "def __init__(self, token=None, appid=None, appsecret=None, partnerid=None,partnerkey=None, paysignkey=None, access_token=None, access_token_expires_at=None,jsapi_ticket=None, jsapi_ticket_expires_at=None, checkssl=False, conf=None):", "body": "if conf is not None:self.__conf = confelif isinstance(token, WechatConf): self.__conf = tokenelse: self.__conf = WechatConf(token=token,appid=appid,appsecret=appsecret,access_token=access_token,access_token_expires_at=access_token_expires_at,jsapi_ticket=jsapi_ticket,jsapi_ticket_expires_at=jsapi_ticket_expires_at,encrypt_mode='',partnerid=partnerid,partnerkey=partnerkey,paysignkey=paysignkey,checkssl=checkssl,)self.__request = WechatRequest(conf=self.__conf)self.__is_parse = Falseself.__message = None", "docstring": ":param token: \u5fae\u4fe1 Token\n:param appid: App ID\n:param appsecret: App Secret\n:param partnerid: \u8d22\u4ed8\u901a\u5546\u6237\u8eab\u4efd\u6807\u8bc6, \u652f\u4ed8\u6743\u9650\u4e13\u7528\n:param partnerkey: \u8d22\u4ed8\u901a\u5546\u6237\u6743\u9650\u5bc6\u94a5 Key, \u652f\u4ed8\u6743\u9650\u4e13\u7528\n:param paysignkey: \u5546\u6237\u7b7e\u540d\u5bc6\u94a5 Key, \u652f\u4ed8\u6743\u9650\u4e13\u7528\n:param access_token: \u76f4\u63a5\u5bfc\u5165\u7684 access_token \u503c, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6\n:param access_token_expires_at: \u76f4\u63a5\u5bfc\u5165\u7684 access_token \u7684\u8fc7\u671f\u65e5\u671f\uff0c\u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6\n:param jsapi_ticket: \u76f4\u63a5\u5bfc\u5165\u7684 jsapi_ticket \u503c, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6\n:param jsapi_ticket_expires_at: \u76f4\u63a5\u5bfc\u5165\u7684 jsapi_ticket \u7684\u8fc7\u671f\u65e5\u671f\uff0c\u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6\n:param checkssl: \u662f\u5426\u68c0\u67e5 SSL, \u9ed8\u8ba4\u4e3a False, \u53ef\u907f\u514d urllib3 \u7684 InsecurePlatformWarning \u8b66\u544a\n:param conf: WechatConf \u914d\u7f6e\u7c7b, \u63d0\u4f9b\u6b64\u53c2\u6570\u5c06\u9ed8\u8ba4\u5ffd\u7565\u5176\u4ed6\u6240\u6709\u53c2\u6570, \u6240\u6709\u6570\u636e\u5747\u4ece\u6b64\u914d\u7f6e\u7c7b\u4e2d\u83b7\u53d6", "id": "f588:c0:m0"} {"signature": "@propertydef conf(self):", "body": "return self.__conf", "docstring": "\u83b7\u53d6\u5f53\u524d WechatConf \u914d\u7f6e\u5b9e\u4f8b", "id": "f588:c0:m1"} {"signature": "@conf.setterdef conf(self, conf):", "body": "self.__conf = confself.__request = WechatRequest(conf=self.__conf)", "docstring": "\u8bbe\u7f6e\u5f53\u524d WechatConf \u5b9e\u4f8b", "id": "f588:c0:m2"} {"signature": "@propertydef request(self):", "body": "return self.__request", "docstring": "\u83b7\u53d6\u5f53\u524d WechatConf \u914d\u7f6e\u5b9e\u4f8b", "id": "f588:c0:m3"} {"signature": "@request.setterdef request(self, request):", "body": "self.__request = request", "docstring": "\u8bbe\u7f6e\u5f53\u524d WechatConf \u5b9e\u4f8b", "id": "f588:c0:m4"} {"signature": "def check_signature(self, signature, timestamp, nonce):", "body": "if not signature or not timestamp or not nonce:return Falsetmp_list = [self.conf.token, timestamp, nonce]tmp_list.sort()tmp_str = ''.join(tmp_list)if signature != hashlib.sha1(tmp_str.encode('')).hexdigest():return Falsereturn True", "docstring": "\u9a8c\u8bc1\u5fae\u4fe1\u6d88\u606f\u771f\u5b9e\u6027\n:param signature: \u5fae\u4fe1\u52a0\u5bc6\u7b7e\u540d\n:param timestamp: \u65f6\u95f4\u6233\n:param nonce: \u968f\u673a\u6570\n:return: \u901a\u8fc7\u9a8c\u8bc1\u8fd4\u56de True, \u672a\u901a\u8fc7\u9a8c\u8bc1\u8fd4\u56de False", "id": "f588:c0:m5"} {"signature": "def generate_jsapi_signature(self, timestamp, noncestr, url, jsapi_ticket=None):", "body": "if not jsapi_ticket:jsapi_ticket = self.conf.jsapi_ticketdata = {'': jsapi_ticket,'': noncestr,'': timestamp,'': url,}keys = list(data.keys())keys.sort()data_str = ''.join(['' % (key, data[key]) for key in keys])signature = hashlib.sha1(data_str.encode('')).hexdigest()return signature", "docstring": "\u4f7f\u7528 jsapi_ticket \u5bf9 url \u8fdb\u884c\u7b7e\u540d\n:param timestamp: \u65f6\u95f4\u6233\n:param noncestr: \u968f\u673a\u6570\n:param url: \u8981\u7b7e\u540d\u7684 url\uff0c\u4e0d\u5305\u542b # \u53ca\u5176\u540e\u9762\u90e8\u5206\n:param jsapi_ticket: (\u53ef\u9009\u53c2\u6570) jsapi_ticket \u503c (\u5982\u4e0d\u63d0\u4f9b\u5c06\u81ea\u52a8\u901a\u8fc7 appid \u548c appsecret \u83b7\u53d6)\n:return: \u8fd4\u56desha1\u7b7e\u540d\u7684hexdigest\u503c", "id": "f588:c0:m6"} {"signature": "def parse_data(self, data, msg_signature=None, timestamp=None, nonce=None):", "body": "result = {}if isinstance(data, six.text_type): data = data.encode('')if self.conf.encrypt_mode == '':if not (msg_signature and timestamp and nonce):raise ParseError('')data = self.conf.crypto.decrypt_message(msg=data,msg_signature=msg_signature,timestamp=timestamp,nonce=nonce,)try:xml = XMLStore(xmlstring=data)except Exception:raise ParseError()result = xml.xml2dictresult[''] = dataresult[''] = result.pop('').lower()message_type = MESSAGE_TYPES.get(result[''], UnknownMessage)self.__message = message_type(result)self.__is_parse = True", "docstring": "\u89e3\u6790\u5fae\u4fe1\u670d\u52a1\u5668\u53d1\u9001\u8fc7\u6765\u7684\u6570\u636e\u5e76\u4fdd\u5b58\u7c7b\u4e2d\n:param data: HTTP Request \u7684 Body \u6570\u636e\n:param msg_signature: EncodingAESKey \u7684 msg_signature\n:param timestamp: EncodingAESKey \u7528\u65f6\u95f4\u6233\n:param nonce: EncodingAESKey \u7528\u968f\u673a\u6570\n:raises ParseError: \u89e3\u6790\u5fae\u4fe1\u670d\u52a1\u5668\u6570\u636e\u9519\u8bef, \u6570\u636e\u4e0d\u5408\u6cd5", "id": "f588:c0:m7"} {"signature": "def get_message(self):", "body": "self._check_parse()return self.__message", "docstring": "\u83b7\u53d6\u89e3\u6790\u597d\u7684 WechatMessage \u5bf9\u8c61\n:return: \u89e3\u6790\u597d\u7684 WechatMessage \u5bf9\u8c61", "id": "f588:c0:m9"} {"signature": "def get_access_token(self):", "body": "return self.conf.get_access_token()", "docstring": "\u83b7\u53d6 Access Token \u53ca Access Token \u8fc7\u671f\u65e5\u671f, \u4ec5\u4f9b\u7f13\u5b58\u4f7f\u7528, \u5982\u679c\u5e0c\u671b\u5f97\u5230\u539f\u751f\u7684 Access Token \u8bf7\u6c42\u6570\u636e\u8bf7\u4f7f\u7528 :func:`grant_token`\n**\u4ec5\u4e3a\u517c\u5bb9 v0.6.0 \u4ee5\u524d\u7248\u672c\u4f7f\u7528, \u81ea\u884c\u7ef4\u62a4 access_token \u8bf7\u4f7f\u7528 access_token_setfunc \u548c access_token_getfunc \u8fdb\u884c\u64cd\u4f5c**\n:return: dict \u5bf9\u8c61, key \u5305\u62ec `access_token` \u53ca `access_token_expires_at`", "id": "f588:c0:m10"} {"signature": "def get_jsapi_ticket(self):", "body": "return self.conf.get_jsapi_ticket()", "docstring": "\u83b7\u53d6 Jsapi Ticket \u53ca Jsapi Ticket \u8fc7\u671f\u65e5\u671f, \u4ec5\u4f9b\u7f13\u5b58\u4f7f\u7528, \u5982\u679c\u5e0c\u671b\u5f97\u5230\u539f\u751f\u7684 Jsapi Ticket \u8bf7\u6c42\u6570\u636e\u8bf7\u4f7f\u7528 :func:`grant_jsapi_ticket`\n**\u4ec5\u4e3a\u517c\u5bb9 v0.6.0 \u4ee5\u524d\u7248\u672c\u4f7f\u7528, \u81ea\u884c\u7ef4\u62a4 jsapi_ticket \u8bf7\u4f7f\u7528 jsapi_ticket_setfunc \u548c jsapi_ticket_getfunc \u8fdb\u884c\u64cd\u4f5c**\n:return: dict \u5bf9\u8c61, key \u5305\u62ec `jsapi_ticket` \u53ca `jsapi_ticket_expires_at`", "id": "f588:c0:m11"} {"signature": "def response_none(self):", "body": "self._check_parse()return self._encrypt_response('')", "docstring": "\u56de\u590d\u7a7a\u6d88\u606f\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684\u7a7a\u6d88\u606f", "id": "f588:c0:m12"} {"signature": "def response_text(self, content, escape=False):", "body": "self._check_parse()content = self._transcoding(content)if escape:if six.PY2:content = cgi.escape(content)else:import htmlcontent = html.escape(content)response = TextReply(message=self.__message, content=content).render()return self._encrypt_response(response)", "docstring": "\u5c06\u6587\u5b57\u4fe1\u606f content \u7ec4\u88c5\u4e3a\u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684\u54cd\u5e94\u6570\u636e\n:param content: \u56de\u590d\u6587\u5b57\n:param escape: \u662f\u5426\u8f6c\u4e49\u8be5\u6587\u672c\u5185\u5bb9 (\u9ed8\u8ba4\u4e0d\u8f6c\u4e49)\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684 XML \u54cd\u5e94\u6570\u636e", "id": "f588:c0:m13"} {"signature": "def response_image(self, media_id):", "body": "self._check_parse()response = ImageReply(message=self.__message, media_id=media_id).render()return self._encrypt_response(response)", "docstring": "\u5c06 media_id \u6240\u4ee3\u8868\u7684\u56fe\u7247\u7ec4\u88c5\u4e3a\u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684\u54cd\u5e94\u6570\u636e\n:param media_id: \u56fe\u7247\u7684 MediaID\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684 XML \u54cd\u5e94\u6570\u636e", "id": "f588:c0:m14"} {"signature": "def response_voice(self, media_id):", "body": "self._check_parse()response = VoiceReply(message=self.__message, media_id=media_id).render()return self._encrypt_response(response)", "docstring": "\u5c06 media_id \u6240\u4ee3\u8868\u7684\u8bed\u97f3\u7ec4\u88c5\u4e3a\u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684\u54cd\u5e94\u6570\u636e\n:param media_id: \u8bed\u97f3\u7684 MediaID\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684 XML \u54cd\u5e94\u6570\u636e", "id": "f588:c0:m15"} {"signature": "def response_video(self, media_id, title=None, description=None):", "body": "self._check_parse()title = self._transcoding(title)description = self._transcoding(description)response = VideoReply(message=self.__message, media_id=media_id, title=title, description=description).render()return self._encrypt_response(response)", "docstring": "\u5c06 media_id \u6240\u4ee3\u8868\u7684\u89c6\u9891\u7ec4\u88c5\u4e3a\u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684\u54cd\u5e94\u6570\u636e\n:param media_id: \u89c6\u9891\u7684 MediaID\n:param title: \u89c6\u9891\u6d88\u606f\u7684\u6807\u9898\n:param description: \u89c6\u9891\u6d88\u606f\u7684\u63cf\u8ff0\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684 XML \u54cd\u5e94\u6570\u636e", "id": "f588:c0:m16"} {"signature": "def response_music(self, music_url, title=None, description=None, hq_music_url=None, thumb_media_id=None):", "body": "self._check_parse()music_url = self._transcoding(music_url)title = self._transcoding(title)description = self._transcoding(description)hq_music_url = self._transcoding(hq_music_url)response = MusicReply(message=self.__message, title=title, description=description, music_url=music_url,hq_music_url=hq_music_url, thumb_media_id=thumb_media_id).render()return self._encrypt_response(response)", "docstring": "\u5c06\u97f3\u4e50\u4fe1\u606f\u7ec4\u88c5\u4e3a\u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684\u54cd\u5e94\u6570\u636e\n:param music_url: \u97f3\u4e50\u94fe\u63a5\n:param title: \u97f3\u4e50\u6807\u9898\n:param description: \u97f3\u4e50\u63cf\u8ff0\n:param hq_music_url: \u9ad8\u8d28\u91cf\u97f3\u4e50\u94fe\u63a5, WIFI\u73af\u5883\u4f18\u5148\u4f7f\u7528\u8be5\u94fe\u63a5\u64ad\u653e\u97f3\u4e50\n:param thumb_media_id: \u7f29\u7565\u56fe\u7684 MediaID\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684 XML \u54cd\u5e94\u6570\u636e", "id": "f588:c0:m17"} {"signature": "def response_news(self, articles):", "body": "self._check_parse()for article in articles:if article.get(''):article[''] = self._transcoding(article[''])if article.get(''):article[''] = self._transcoding(article[''])if article.get(''):article[''] = self._transcoding(article[''])if article.get(''):article[''] = self._transcoding(article[''])news = ArticleReply(message=self.__message)for article in articles:article = Article(**article)news.add_article(article)response = news.render()return self._encrypt_response(response)", "docstring": "\u5c06\u65b0\u95fb\u4fe1\u606f\u7ec4\u88c5\u4e3a\u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684\u54cd\u5e94\u6570\u636e\n:param articles: list \u5bf9\u8c61, \u6bcf\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a dict \u5bf9\u8c61, key \u5305\u542b `title`, `description`, `picurl`, `url`\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684 XML \u54cd\u5e94\u6570\u636e", "id": "f588:c0:m18"} {"signature": "def group_transfer_message(self):", "body": "self._check_parse()response = GroupTransferReply(message=self.__message).render()return self._encrypt_response(response)", "docstring": "\u5c06 message \u7fa4\u53d1\u5230\u591a\u5ba2\u670d\u7cfb\u7edf\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684 XML \u54cd\u5e94\u6570\u636e", "id": "f588:c0:m19"} {"signature": "def grant_token(self, **kwargs):", "body": "return self.conf.grant_access_token()", "docstring": "\u83b7\u53d6 Access Token\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/11/0e4b294685f817b95cbed85ba5e82b8f.html\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m20"} {"signature": "def grant_jsapi_ticket(self, **kwargs):", "body": "return self.conf.grant_jsapi_ticket()", "docstring": "\u83b7\u53d6 Jsapi Ticket\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/7/aaa137b55fb2e0456bf8dd9148dd613f.html#.E9.99.84.E5.BD.951-JS-SDK.E4.BD.BF.E7.94.A8.E6.9D.83.E9.99.90.E7.AD.BE.E5.90.8D.E7.AE.97.E6.B3.95\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m21"} {"signature": "def create_menu(self, menu_data):", "body": "menu_data = self._transcoding_dict(menu_data)return self.request.post(url='',data=menu_data)", "docstring": "\u521b\u5efa\u81ea\u5b9a\u4e49\u83dc\u5355 ::\n\n # -*- coding: utf-8 -*-\n wechat = WechatBasic(appid='appid', appsecret='appsecret')\n wechat.create_menu({\n 'button':[\n {\n 'type': 'click',\n 'name': '\u4eca\u65e5\u6b4c\u66f2',\n 'key': 'V1001_TODAY_MUSIC'\n },\n {\n 'type': 'click',\n 'name': '\u6b4c\u624b\u7b80\u4ecb',\n 'key': 'V1001_TODAY_SINGER'\n },\n {\n 'name': '\u83dc\u5355',\n 'sub_button': [\n {\n 'type': 'view',\n 'name': '\u641c\u7d22',\n 'url': 'http://www.soso.com/'\n },\n {\n 'type': 'view',\n 'name': '\u89c6\u9891',\n 'url': 'http://v.qq.com/'\n },\n {\n 'type': 'click',\n 'name': '\u8d5e\u4e00\u4e0b\u6211\u4eec',\n 'key': 'V1001_GOOD'\n }\n ]\n }\n ]})\n\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/13/43de8269be54a0a6f64413e4dfa94f39.html\n:param menu_data: Python \u5b57\u5178\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m22"} {"signature": "def get_menu(self):", "body": "return self.request.get('')", "docstring": "\u67e5\u8be2\u81ea\u5b9a\u4e49\u83dc\u5355\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/16/ff9b7b85220e1396ffa16794a9d95adc.html\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m23"} {"signature": "def delete_menu(self):", "body": "return self.request.get('')", "docstring": "\u5220\u9664\u81ea\u5b9a\u4e49\u83dc\u5355\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/16/8ed41ba931e4845844ad6d1eeb8060c8.html\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m24"} {"signature": "def upload_media(self, media_type, media_file, extension=''):", "body": "if six.PY2:return self._upload_media_py2(media_type, media_file, extension)else:return self._upload_media_py3(media_type, media_file, extension)", "docstring": "\u4e0a\u4f20\u591a\u5a92\u4f53\u6587\u4ef6\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/10/78b15308b053286e2a66b33f0f0f5fb6.html\n:param media_type: \u5a92\u4f53\u6587\u4ef6\u7c7b\u578b\uff0c\u5206\u522b\u6709\u56fe\u7247\uff08image\uff09\u3001\u8bed\u97f3\uff08voice\uff09\u3001\u89c6\u9891\uff08video\uff09\u548c\u7f29\u7565\u56fe\uff08thumb\uff09\n:param media_file: \u8981\u4e0a\u4f20\u7684\u6587\u4ef6\uff0c\u4e00\u4e2a File object \u6216 StringIO object\n:param extension: \u5982\u679c media_file \u4f20\u5165\u7684\u4e3a StringIO object\uff0c\u90a3\u4e48\u5fc5\u987b\u4f20\u5165 extension \u663e\u793a\u6307\u660e\u8be5\u5a92\u4f53\u6587\u4ef6\u6269\u5c55\u540d\uff0c\u5982 ``mp3``, ``amr``\uff1b\u5982\u679c media_file \u4f20\u5165\u7684\u4e3a File object\uff0c\u90a3\u4e48\u8be5\u53c2\u6570\u8bf7\u7559\u7a7a\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m25"} {"signature": "def download_media(self, media_id):", "body": "return self.request.get('',params={'': media_id,},stream=True,)", "docstring": "\u4e0b\u8f7d\u591a\u5a92\u4f53\u6587\u4ef6\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/10/78b15308b053286e2a66b33f0f0f5fb6.html\n:param media_id: \u5a92\u4f53\u6587\u4ef6 ID\n:return: requests \u7684 Response \u5b9e\u4f8b", "id": "f588:c0:m28"} {"signature": "def create_group(self, name):", "body": "return self.request.post(url='',data={'': {'': name,},})", "docstring": "\u521b\u5efa\u5206\u7ec4\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/13/be5272dc4930300ba561d927aead2569.html\n:param name: \u5206\u7ec4\u540d\u5b57\uff0830\u4e2a\u5b57\u7b26\u4ee5\u5185\uff09\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305\n:raise HTTPError: \u5fae\u4fe1api http \u8bf7\u6c42\u5931\u8d25", "id": "f588:c0:m29"} {"signature": "def get_groups(self):", "body": "return self.request.get('')", "docstring": "\u67e5\u8be2\u6240\u6709\u5206\u7ec4\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/13/be5272dc4930300ba561d927aead2569.html\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m30"} {"signature": "def get_group_by_id(self, openid):", "body": "return self.request.post(url='',data={'': openid,})", "docstring": "\u67e5\u8be2\u7528\u6237\u6240\u5728\u5206\u7ec4\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/13/be5272dc4930300ba561d927aead2569.html\n:param openid: \u7528\u6237\u7684OpenID\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m31"} {"signature": "def update_group(self, group_id, name):", "body": "return self.request.post(url='',data={'': {'': int(group_id),'': name,}})", "docstring": "\u4fee\u6539\u5206\u7ec4\u540d\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/13/be5272dc4930300ba561d927aead2569.html\n:param group_id: \u5206\u7ec4id\uff0c\u7531\u5fae\u4fe1\u5206\u914d\n:param name: \u5206\u7ec4\u540d\u5b57\uff0830\u4e2a\u5b57\u7b26\u4ee5\u5185\uff09\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m32"} {"signature": "def move_user(self, user_id, group_id):", "body": "return self.request.post(url='',data={'': user_id,'': group_id,})", "docstring": "\u79fb\u52a8\u7528\u6237\u5206\u7ec4\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/13/be5272dc4930300ba561d927aead2569.html\n:param user_id: \u7528\u6237 ID \u3002 \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param group_id: \u5206\u7ec4 ID\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m33"} {"signature": "def get_user_info(self, user_id, lang=''):", "body": "return self.request.get(url='',params={'': user_id,'': lang,})", "docstring": "\u83b7\u53d6\u7528\u6237\u57fa\u672c\u4fe1\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/14/bb5031008f1494a59c6f71fa0f319c66.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param lang: \u8fd4\u56de\u56fd\u5bb6\u5730\u533a\u8bed\u8a00\u7248\u672c\uff0czh_CN \u7b80\u4f53\uff0czh_TW \u7e41\u4f53\uff0cen \u82f1\u8bed\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m34"} {"signature": "def get_followers(self, first_user_id=None):", "body": "params = dict()if first_user_id:params[''] = first_user_idreturn self.request.get('', params=params)", "docstring": "\u83b7\u53d6\u5173\u6ce8\u8005\u5217\u8868\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/3/17e6919a39c1c53555185907acf70093.html\n:param first_user_id: \u53ef\u9009\u3002\u7b2c\u4e00\u4e2a\u62c9\u53d6\u7684OPENID\uff0c\u4e0d\u586b\u9ed8\u8ba4\u4ece\u5934\u5f00\u59cb\u62c9\u53d6\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m35"} {"signature": "def send_text_message(self, user_id, content):", "body": "return self.request.post(url='',data={'': user_id,'': '','': {'': content,},})", "docstring": "\u53d1\u9001\u6587\u672c\u6d88\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param content: \u6d88\u606f\u6b63\u6587\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m36"} {"signature": "def send_image_message(self, user_id, media_id):", "body": "return self.request.post(url='',data={'': user_id,'': '','': {'': media_id,},})", "docstring": "\u53d1\u9001\u56fe\u7247\u6d88\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param media_id: \u56fe\u7247\u7684\u5a92\u4f53ID\u3002 \u53ef\u4ee5\u901a\u8fc7 :func:`upload_media` \u4e0a\u4f20\u3002\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m37"} {"signature": "def send_voice_message(self, user_id, media_id):", "body": "return self.request.post(url='',data={'': user_id,'': '','': {'': media_id,},})", "docstring": "\u53d1\u9001\u8bed\u97f3\u6d88\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param media_id: \u53d1\u9001\u7684\u8bed\u97f3\u7684\u5a92\u4f53ID\u3002 \u53ef\u4ee5\u901a\u8fc7 :func:`upload_media` \u4e0a\u4f20\u3002\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m38"} {"signature": "def send_video_message(self, user_id, media_id, title=None, description=None):", "body": "video_data = {'': media_id,}if title:video_data[''] = titleif description:video_data[''] = descriptionreturn self.request.post(url='',data={'': user_id,'': '','': video_data,})", "docstring": "\u53d1\u9001\u89c6\u9891\u6d88\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param media_id: \u53d1\u9001\u7684\u89c6\u9891\u7684\u5a92\u4f53ID\u3002 \u53ef\u4ee5\u901a\u8fc7 :func:`upload_media` \u4e0a\u4f20\u3002\n:param title: \u89c6\u9891\u6d88\u606f\u7684\u6807\u9898\n:param description: \u89c6\u9891\u6d88\u606f\u7684\u63cf\u8ff0\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m39"} {"signature": "def send_music_message(self, user_id, url, hq_url, thumb_media_id, title=None, description=None):", "body": "music_data = {'': url,'': hq_url,'': thumb_media_id,}if title:music_data[''] = titleif description:music_data[''] = descriptionreturn self.request.post(url='',data={'': user_id,'': '','': music_data,})", "docstring": "\u53d1\u9001\u97f3\u4e50\u6d88\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param url: \u97f3\u4e50\u94fe\u63a5\n:param hq_url: \u9ad8\u54c1\u8d28\u97f3\u4e50\u94fe\u63a5\uff0cwifi\u73af\u5883\u4f18\u5148\u4f7f\u7528\u8be5\u94fe\u63a5\u64ad\u653e\u97f3\u4e50\n:param thumb_media_id: \u7f29\u7565\u56fe\u7684\u5a92\u4f53ID\u3002 \u53ef\u4ee5\u901a\u8fc7 :func:`upload_media` \u4e0a\u4f20\u3002\n:param title: \u97f3\u4e50\u6807\u9898\n:param description: \u97f3\u4e50\u63cf\u8ff0\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m40"} {"signature": "def send_article_message(self, user_id, articles=None, media_id=None):", "body": "if articles is None and media_id is None:raise TypeError('')if articles:articles_data = []for article in articles:article = Article(**article)articles_data.append({'': article.title,'': article.description,'': article.url,'': article.picurl,})return self.request.post(url='',data={'': user_id,'': '','': {'': articles_data,},})return self.request.post(url='',data={'': user_id,'': '','': {'': media_id,},})", "docstring": "\u53d1\u9001\u56fe\u6587\u6d88\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param articles: list \u5bf9\u8c61, \u6bcf\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a dict \u5bf9\u8c61, key \u5305\u542b `title`, `description`, `picurl`, `url`\n:param media_id: \u5f85\u53d1\u9001\u7684\u56fe\u6587 Media ID\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m41"} {"signature": "def create_qrcode(self, data):", "body": "data = self._transcoding_dict(data)return self.request.post(url='',data=data)", "docstring": "\u521b\u5efa\u4e8c\u7ef4\u7801\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/18/28fc21e7ed87bec960651f0ce873ef8a.html\n:param data: \u4f60\u8981\u53d1\u9001\u7684\u53c2\u6570 dict\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m42"} {"signature": "def show_qrcode(self, ticket):", "body": "return requests.get(url='',params={'': ticket})", "docstring": "\u901a\u8fc7ticket\u6362\u53d6\u4e8c\u7ef4\u7801\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/18/28fc21e7ed87bec960651f0ce873ef8a.html\n:param ticket: \u4e8c\u7ef4\u7801 ticket \u3002\u53ef\u4ee5\u901a\u8fc7 :func:`create_qrcode` \u83b7\u53d6\u5230\n:return: \u8fd4\u56de\u7684 Request \u5bf9\u8c61", "id": "f588:c0:m43"} {"signature": "def set_template_industry(self, industry_id1, industry_id2):", "body": "return self.request.post(url='',data={'': str(industry_id1),'': str(industry_id2),})", "docstring": "\u8bbe\u7f6e\u6240\u5c5e\u884c\u4e1a\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/17/304c1885ea66dbedf7dc170d84999a9d.html\n:param industry_id1: \u4e3b\u8425\u884c\u4e1a\u4ee3\u7801\n:param industry_id2: \u526f\u8425\u884c\u4e1a\u4ee3\u7801\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m44"} {"signature": "def get_template_id(self, template_id_short):", "body": "return self.request.post(url='',data={'': str(template_id_short),})", "docstring": "\u83b7\u5f97\u6a21\u677fID\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/17/304c1885ea66dbedf7dc170d84999a9d.html\n:param template_id_short: \u6a21\u677f\u5e93\u4e2d\u6a21\u677f\u7684\u7f16\u53f7\uff0c\u6709\u201cTM**\u201d\u548c\u201cOPENTMTM**\u201d\u7b49\u5f62\u5f0f\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m45"} {"signature": "def send_template_message(self, user_id, template_id, data, url='', topcolor=''):", "body": "unicode_data = {}if data:unicode_data = self._transcoding_dict(data)return self.request.post(url='',data={'': user_id,\"\": template_id,\"\": url,\"\": topcolor,\"\": unicode_data})", "docstring": "\u53d1\u9001\u6a21\u7248\u6d88\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/17/304c1885ea66dbedf7dc170d84999a9d.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source (OpenID)\n:param template_id: \u6a21\u677fID\n:param data: \u6a21\u677f\u6d88\u606f\u6570\u636e (dict\u5f62\u5f0f)\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a\n{\n \"first\": {\n \"value\": \"\u606d\u559c\u4f60\u8d2d\u4e70\u6210\u529f\uff01\",\n \"color\": \"#173177\"\n },\n \"keynote1\":{\n \"value\": \"\u5de7\u514b\u529b\",\n \"color\": \"#173177\"\n },\n \"keynote2\": {\n \"value\": \"39.8\u5143\",\n \"color\": \"#173177\"\n },\n \"keynote3\": {\n \"value\": \"2014\u5e749\u670816\u65e5\",\n \"color\": \"#173177\"\n },\n \"remark\":{\n \"value\": \"\u6b22\u8fce\u518d\u6b21\u8d2d\u4e70\uff01\",\n \"color\": \"#173177\"\n }\n}\n:param url: \u8df3\u8f6c\u5730\u5740 (\u9ed8\u8ba4\u4e3a\u7a7a)\n:param topcolor: \u9876\u90e8\u989c\u8272RGB\u503c (\u9ed8\u8ba4 '#FF0000' )\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m46"} {"signature": "def _check_parse(self):", "body": "if not self.__is_parse:raise NeedParseError()", "docstring": "\u68c0\u67e5\u662f\u5426\u6210\u529f\u89e3\u6790\u5fae\u4fe1\u670d\u52a1\u5668\u4f20\u6765\u7684\u6570\u636e\n:raises NeedParseError: \u9700\u8981\u89e3\u6790\u5fae\u4fe1\u670d\u52a1\u5668\u4f20\u6765\u7684\u6570\u636e", "id": "f588:c0:m49"} {"signature": "def _check_official_error(self, json_data):", "body": "if \"\" in json_data and json_data[\"\"] != :raise OfficialAPIError(errcode=json_data.get(''), errmsg=json_data.get('', ''))", "docstring": "\u68c0\u6d4b\u5fae\u4fe1\u516c\u4f17\u5e73\u53f0\u8fd4\u56de\u503c\u4e2d\u662f\u5426\u5305\u542b\u9519\u8bef\u7684\u8fd4\u56de\u7801\n:raises OfficialAPIError: \u5982\u679c\u8fd4\u56de\u7801\u63d0\u793a\u6709\u9519\u8bef\uff0c\u629b\u51fa\u5f02\u5e38\uff1b\u5426\u5219\u8fd4\u56de True", "id": "f588:c0:m50"} {"signature": "@propertydef xml2dict(self):", "body": "self._remove_whitespace_nodes(self._doc.childNodes[])return self._element2dict(self._doc.childNodes[])", "docstring": "\u5c06 XML \u8f6c\u6362\u4e3a dict", "id": "f590:c0:m1"} {"signature": "def _element2dict(self, parent):", "body": "d = {}for node in parent.childNodes:if not isinstance(node, minidom.Element):continueif not node.hasChildNodes():continueif node.childNodes[].nodeType == minidom.Node.ELEMENT_NODE:try:d[node.tagName]except KeyError:d[node.tagName] = []d[node.tagName].append(self._element2dict(node))elif len(node.childNodes) == and node.childNodes[].nodeType in [minidom.Node.CDATA_SECTION_NODE, minidom.Node.TEXT_NODE]:d[node.tagName] = node.childNodes[].datareturn d", "docstring": "\u5c06\u5355\u4e2a\u8282\u70b9\u8f6c\u6362\u4e3a dict", "id": "f590:c0:m2"} {"signature": "def _remove_whitespace_nodes(self, node, unlink=True):", "body": "remove_list = []for child in node.childNodes:if child.nodeType == Node.TEXT_NODE and not child.data.strip():remove_list.append(child)elif child.hasChildNodes():self._remove_whitespace_nodes(child, unlink)for node in remove_list:node.parentNode.removeChild(node)if unlink:node.unlink()", "docstring": "\u5220\u9664\u7a7a\u767d\u65e0\u7528\u8282\u70b9", "id": "f590:c0:m3"} {"signature": "def __init__(self, conf=None):", "body": "self.__conf = conf", "docstring": ":param conf: WechatConf \u914d\u7f6e\u7c7b\u5b9e\u4f8b", "id": "f591:c0:m0"} {"signature": "def request(self, method, url, access_token=None, **kwargs):", "body": "access_token = self.__conf.access_token if self.__conf is not None else access_tokenif \"\" not in kwargs:kwargs[\"\"] = {\"\": access_token}else:kwargs[\"\"][\"\"] = access_tokenif isinstance(kwargs.get(\"\", \"\"), dict):body = json.dumps(kwargs[\"\"], ensure_ascii=False)if isinstance(body, six.text_type):body = body.encode('')kwargs[\"\"] = bodyr = requests.request(method=method,url=url,**kwargs)r.raise_for_status()try:response_json = r.json()except ValueError: return rheadimgurl = response_json.get('')if headimgurl:response_json[''] = headimgurl.replace('', '')self._check_official_error(response_json)return response_json", "docstring": "\u5411\u5fae\u4fe1\u670d\u52a1\u5668\u53d1\u9001\u8bf7\u6c42\n:param method: \u8bf7\u6c42\u65b9\u6cd5\n:param url: \u8bf7\u6c42\u5730\u5740\n:param access_token: access token \u503c, \u5982\u679c\u521d\u59cb\u5316\u65f6\u4f20\u5165 conf \u4f1a\u81ea\u52a8\u83b7\u53d6, \u5982\u679c\u6ca1\u6709\u4f20\u5165\u5219\u8bf7\u63d0\u4f9b\u6b64\u503c\n:param kwargs: \u9644\u52a0\u6570\u636e\n:return: \u5fae\u4fe1\u670d\u52a1\u5668\u54cd\u5e94\u7684 JSON \u6570\u636e", "id": "f591:c0:m1"} {"signature": "def get(self, url, access_token=None, **kwargs):", "body": "return self.request(method=\"\",url=url,access_token=access_token,**kwargs)", "docstring": "\u4f7f\u7528 GET \u65b9\u6cd5\u5411\u5fae\u4fe1\u670d\u52a1\u5668\u53d1\u51fa\u8bf7\u6c42\n:param url: \u8bf7\u6c42\u5730\u5740\n:param access_token: access token \u503c, \u5982\u679c\u521d\u59cb\u5316\u65f6\u4f20\u5165 conf \u4f1a\u81ea\u52a8\u83b7\u53d6, \u5982\u679c\u6ca1\u6709\u4f20\u5165\u5219\u8bf7\u63d0\u4f9b\u6b64\u503c\n:param kwargs: \u9644\u52a0\u6570\u636e\n:return: \u5fae\u4fe1\u670d\u52a1\u5668\u54cd\u5e94\u7684 JSON \u6570\u636e", "id": "f591:c0:m2"} {"signature": "def post(self, url, access_token=None, **kwargs):", "body": "return self.request(method=\"\",url=url,access_token=access_token,**kwargs)", "docstring": "\u4f7f\u7528 POST \u65b9\u6cd5\u5411\u5fae\u4fe1\u670d\u52a1\u5668\u53d1\u51fa\u8bf7\u6c42\n:param url: \u8bf7\u6c42\u5730\u5740\n:param access_token: access token \u503c, \u5982\u679c\u521d\u59cb\u5316\u65f6\u4f20\u5165 conf \u4f1a\u81ea\u52a8\u83b7\u53d6, \u5982\u679c\u6ca1\u6709\u4f20\u5165\u5219\u8bf7\u63d0\u4f9b\u6b64\u503c\n:param kwargs: \u9644\u52a0\u6570\u636e\n:return: \u5fae\u4fe1\u670d\u52a1\u5668\u54cd\u5e94\u7684 JSON \u6570\u636e", "id": "f591:c0:m3"} {"signature": "def _check_official_error(self, json_data):", "body": "if '' in json_data and json_data[''] != :raise OfficialAPIError(errcode=json_data.get(''), errmsg=json_data.get('', ''))", "docstring": "\u68c0\u6d4b\u5fae\u4fe1\u516c\u4f17\u5e73\u53f0\u8fd4\u56de\u503c\u4e2d\u662f\u5426\u5305\u542b\u9519\u8bef\u7684\u8fd4\u56de\u7801\n:raises OfficialAPIError: \u5982\u679c\u8fd4\u56de\u7801\u63d0\u793a\u6709\u9519\u8bef\uff0c\u629b\u51fa\u5f02\u5e38\uff1b\u5426\u5219\u8fd4\u56de True", "id": "f591:c0:m4"} {"signature": "def __init__(self, token, encoding_aes_key, _id):", "body": "self.__key = base64.b64decode(to_binary(encoding_aes_key) + to_binary(''))if len(self.__key) != :raise ValidateAESKeyError(encoding_aes_key)self.__token = to_binary(token)self.__id = to_binary(_id)self.__pc = BaseCrypto(self.__key)", "docstring": "\u6784\u9020\u51fd\u6570\n\n :param token: \u516c\u4f17\u5e73\u53f0\u4e0a\uff0c\u5f00\u53d1\u8005\u8bbe\u7f6e\u7684Token\n :param encoding_aes_key: \u516c\u4f17\u5e73\u53f0\u4e0a\uff0c\u5f00\u53d1\u8005\u8bbe\u7f6e\u7684EncodingAESKey\n :param _id: \u516c\u4f17\u53f7\u7684 appid \u6216\u4f01\u4e1a\u53f7\u7684 corpid", "id": "f592:c0:m0"} {"signature": "def _check_signature(self, msg_signature, timestamp, nonce, echostr):", "body": "signature = get_sha1_signature(self.__token, timestamp, nonce, echostr)if not signature == msg_signature:raise ValidateSignatureError()try:return self.__pc.decrypt(echostr, self.__id)except DecryptAESError as e:raise ValidateSignatureError(e)", "docstring": "\u9a8c\u8bc1\u7b7e\u540d\u6709\u6548\u6027\n\n :param msg_signature: \u7b7e\u540d\u4e32\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684msg_signature\n :param timestamp: \u65f6\u95f4\u6233\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684timestamp\n :param nonce: \u968f\u673a\u4e32\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684nonce\n :param echostr: \u968f\u673a\u4e32\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684echostr\n :return: \u89e3\u5bc6\u4e4b\u540e\u7684echostr\n :raise ValidateSignatureError: \u7b7e\u540d\u65e0\u6548\u5f02\u5e38", "id": "f592:c0:m1"} {"signature": "def _encrypt_message(self, msg, nonce, timestamp=None):", "body": "xml =", "docstring": "\u5c06\u516c\u4f17\u53f7\u56de\u590d\u7528\u6237\u7684\u6d88\u606f\u52a0\u5bc6\u6253\u5305\n\n :param msg: \u5f85\u56de\u590d\u7528\u6237\u7684\u6d88\u606f\uff0cxml\u683c\u5f0f\u7684\u5b57\u7b26\u4e32\n :param nonce: \u968f\u673a\u4e32\uff0c\u53ef\u4ee5\u81ea\u5df1\u751f\u6210\uff0c\u4e5f\u53ef\u4ee5\u7528URL\u53c2\u6570\u7684nonce\n :param timestamp: \u65f6\u95f4\u6233\uff0c\u53ef\u4ee5\u81ea\u5df1\u751f\u6210\uff0c\u4e5f\u53ef\u4ee5\u7528URL\u53c2\u6570\u7684timestamp,\u5982\u4e3aNone\u5219\u81ea\u52a8\u7528\u5f53\u524d\u65f6\u95f4\n :return: \u52a0\u5bc6\u540e\u7684\u53ef\u4ee5\u76f4\u63a5\u56de\u590d\u7528\u6237\u7684\u5bc6\u6587\uff0c\u5305\u62ecmsg_signature, timestamp, nonce, encrypt\u7684xml\u683c\u5f0f\u7684\u5b57\u7b26\u4e32", "id": "f592:c0:m2"} {"signature": "def _decrypt_message(self, msg, msg_signature, timestamp, nonce):", "body": "timestamp = to_binary(timestamp)nonce = to_binary(nonce)if isinstance(msg, six.string_types):try:msg = xmltodict.parse(to_text(msg))['']except Exception as e:raise ParseError(e)encrypt = msg['']signature = get_sha1_signature(self.__token, timestamp, nonce, encrypt)if signature != msg_signature:raise ValidateSignatureError()return self.__pc.decrypt(encrypt, self.__id)", "docstring": "\u68c0\u9a8c\u6d88\u606f\u7684\u771f\u5b9e\u6027\uff0c\u5e76\u4e14\u83b7\u53d6\u89e3\u5bc6\u540e\u7684\u660e\u6587\n\n :param msg: \u5bc6\u6587\uff0c\u5bf9\u5e94POST\u8bf7\u6c42\u7684\u6570\u636e\n :param msg_signature: \u7b7e\u540d\u4e32\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684msg_signature\n :param timestamp: \u65f6\u95f4\u6233\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684timestamp\n :param nonce: \u968f\u673a\u4e32\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684nonce\n :return: \u89e3\u5bc6\u540e\u7684\u539f\u6587", "id": "f592:c0:m3"} {"signature": "def encrypt(self, text, appid):", "body": "text = self.get_random_str() + struct.pack(\"\", socket.htonl(len(text))) + to_binary(text) + appidpkcs7 = PKCS7Encoder()text = pkcs7.encode(text)cryptor = AES.new(self.key, self.mode, self.key[:])try:ciphertext = cryptor.encrypt(text)return base64.b64encode(ciphertext)except Exception as e:raise EncryptAESError(e)", "docstring": "\u5bf9\u660e\u6587\u8fdb\u884c\u52a0\u5bc6\n\n @param text: \u9700\u8981\u52a0\u5bc6\u7684\u660e\u6587\n @return: \u52a0\u5bc6\u5f97\u5230\u7684\u5b57\u7b26\u4e32", "id": "f593:c0:m1"} {"signature": "def decrypt(self, text, appid):", "body": "try:cryptor = AES.new(self.key, self.mode, self.key[:])plain_text = cryptor.decrypt(base64.b64decode(text))except Exception as e:raise DecryptAESError(e)try:if six.PY2:pad = ord(plain_text[-])else:pad = plain_text[-]content = plain_text[:-pad]xml_len = socket.ntohl(struct.unpack(\"\", content[: ])[])xml_content = content[: xml_len + ]from_appid = content[xml_len + :]except Exception as e:raise IllegalBuffer(e)if from_appid != appid:raise ValidateAppIDError()return xml_content", "docstring": "\u5bf9\u89e3\u5bc6\u540e\u7684\u660e\u6587\u8fdb\u884c\u8865\u4f4d\u5220\u9664\n\n @param text: \u5bc6\u6587\n @return: \u5220\u9664\u586b\u5145\u8865\u4f4d\u540e\u7684\u660e\u6587", "id": "f593:c0:m2"} {"signature": "def get_random_str(self):", "body": "rule = string.ascii_letters + string.digitsreturn \"\".join(random.sample(rule, ))", "docstring": "\u968f\u673a\u751f\u621016\u4f4d\u5b57\u7b26\u4e32\n\n @return: 16\u4f4d\u5b57\u7b26\u4e32", "id": "f593:c0:m3"} {"signature": "@classmethoddef encode(cls, text):", "body": "text_length = len(text)amount_to_pad = cls.block_size - (text_length % cls.block_size)if amount_to_pad == :amount_to_pad = cls.block_sizepad = to_binary(chr(amount_to_pad))return text + pad * amount_to_pad", "docstring": "\u5bf9\u9700\u8981\u52a0\u5bc6\u7684\u660e\u6587\u8fdb\u884c\u586b\u5145\u8865\u4f4d\n@param text: \u9700\u8981\u8fdb\u884c\u586b\u5145\u8865\u4f4d\u64cd\u4f5c\u7684\u660e\u6587\n@return: \u8865\u9f50\u660e\u6587\u5b57\u7b26\u4e32", "id": "f594:c0:m0"} {"signature": "@classmethoddef decode(cls, decrypted):", "body": "pad = ord(decrypted[-])if pad < or pad > :pad = return decrypted[:-pad]", "docstring": "\u5220\u9664\u89e3\u5bc6\u540e\u660e\u6587\u7684\u8865\u4f4d\u5b57\u7b26\n@param decrypted: \u89e3\u5bc6\u540e\u7684\u660e\u6587\n@return: \u5220\u9664\u8865\u4f4d\u5b57\u7b26\u540e\u7684\u660e\u6587", "id": "f594:c0:m1"} {"signature": "def get_sha1_signature(token, timestamp, nonce, encrypt):", "body": "try:sortlist = [token, timestamp, nonce, to_binary(encrypt)]sortlist.sort()sha = hashlib.sha1()sha.update(to_binary(\"\").join(sortlist))return sha.hexdigest()except Exception as e:raise CryptoComputeSignatureError(e)", "docstring": "\u7528 SHA1 \u7b97\u6cd5\u751f\u6210\u5b89\u5168\u7b7e\u540d\n@param token: \u7968\u636e\n@param timestamp: \u65f6\u95f4\u6233\n@param encrypt: \u5bc6\u6587\n@param nonce: \u968f\u673a\u5b57\u7b26\u4e32\n@return: \u5b89\u5168\u7b7e\u540d", "id": "f596:m0"} {"signature": "def __init__(self, message, content):", "body": "super(TextReply, self).__init__(message=message, content=content)", "docstring": ":param message: WechatMessage \u5bf9\u8c61\n:param content: \u6587\u5b57\u56de\u590d\u5185\u5bb9", "id": "f598:c1:m0"} {"signature": "def __init__(self, message, media_id):", "body": "super(ImageReply, self).__init__(message=message, media_id=media_id)", "docstring": ":param message: WechatMessage \u5bf9\u8c61\n:param media_id: \u56fe\u7247\u7684 MediaID", "id": "f598:c2:m0"} {"signature": "def __init__(self, message, media_id):", "body": "super(VoiceReply, self).__init__(message=message, media_id=media_id)", "docstring": ":param message: WechatMessage \u5bf9\u8c61\n:param media_id: \u8bed\u97f3\u7684 MediaID", "id": "f598:c3:m0"} {"signature": "def __init__(self, message, media_id, title=None, description=None):", "body": "title = title or ''description = description or ''super(VideoReply, self).__init__(message=message, media_id=media_id, title=title, description=description)", "docstring": ":param message: WechatMessage\u5bf9\u8c61\n:param media_id: \u89c6\u9891\u7684 MediaID\n:param title: \u89c6\u9891\u6d88\u606f\u7684\u6807\u9898\n:param description: \u89c6\u9891\u6d88\u606f\u7684\u63cf\u8ff0", "id": "f598:c4:m0"} {"signature": "def __init__(self, message):", "body": "super(GroupTransferReply, self).__init__(message=message)", "docstring": ":param message: WechatMessage \u5bf9\u8c61", "id": "f598:c8:m0"} {"signature": "def to_text(value, encoding=''):", "body": "if not value:return ''if isinstance(value, six.text_type):return valueif isinstance(value, six.binary_type):return value.decode(encoding)return six.text_type(value)", "docstring": "\u5c06 value \u8f6c\u4e3a unicode\uff0c\u9ed8\u8ba4\u7f16\u7801 utf-8\n\n :param value: \u5f85\u8f6c\u6362\u7684\u503c\n :param encoding: \u7f16\u7801", "id": "f599:m0"} {"signature": "def to_binary(value, encoding=''):", "body": "if not value:return b''if isinstance(value, six.binary_type):return valueif isinstance(value, six.text_type):return value.encode(encoding)if six.PY3:return six.binary_type(str(value), encoding) return six.binary_type(value)", "docstring": "\u5c06 values \u8f6c\u4e3a bytes\uff0c\u9ed8\u8ba4\u7f16\u7801 utf-8\n\n :param value: \u5f85\u8f6c\u6362\u7684\u503c\n :param encoding: \u7f16\u7801", "id": "f599:m1"} {"signature": "def disable_urllib3_warning():", "body": "try:import requests.packages.urllib3requests.packages.urllib3.disable_warnings()except Exception:pass", "docstring": "https://urllib3.readthedocs.org/en/latest/security.html#insecurerequestwarning\nInsecurePlatformWarning \u8b66\u544a\u7684\u4e34\u65f6\u89e3\u51b3\u65b9\u6848", "id": "f599:m2"} {"signature": "def generate_timestamp():", "body": "return int(time.time())", "docstring": "\u751f\u6210 timestamp\n :return: timestamp string", "id": "f599:m3"} {"signature": "def generate_nonce():", "body": "return random.randrange(, )", "docstring": "\u751f\u6210 nonce\n :return: nonce string", "id": "f599:m4"} {"signature": "def convert_ext_to_mime(extension):", "body": "table = {'': '','': '','': '','': '','': '',}if extension in table:return table[extension]raise ValueError(\"\")", "docstring": "\u5c06\u6269\u5c55\u540d\u8f6c\u6362\u4e3a MIME \u683c\u5f0f\n :return: mime string", "id": "f599:m5"} {"signature": "def is_allowed_extension(extension, type=''):", "body": "table = ('', '', '', '', '')if extension in table:return Truereturn False", "docstring": "\u68c0\u67e5\u6269\u5c55\u540d\u662f\u5426\u662f\u53ef\u4ee5\u4e0a\u4f20\u5230\u670d\u52a1\u5668\n :return: True if ok", "id": "f599:m6"} {"signature": "def exists(self, openid):", "body": "raise NotImplementedError('')", "docstring": "\u5f53 openid \u5b58\u5728\u65f6\u8fd4\u56de True", "id": "f604:c1:m26"} {"signature": "def create(self, openid):", "body": "raise NotImplementedError('')", "docstring": "\u6839\u636e openid \u65b0\u5efa\u4e00\u4e2a\u4e0a\u4e0b\u6587\u5bf9\u8bdd\u5b58\u50a8", "id": "f604:c1:m27"} {"signature": "def __init__(self, errcode, errmsg):", "body": "self.errcode = errcodeself.errmsg = errmsg", "docstring": ":param errcode: \u9519\u8bef\u4ee3\u7801\n:param errmsg: \u9519\u8bef\u4fe1\u606f", "id": "f609:c1:m0"} {"signature": "def __init__(self, message=''):", "body": "self.message = message", "docstring": ":param message: \u9519\u8bef\u5185\u5bb9\u63cf\u8ff0\uff0c\u53ef\u9009", "id": "f609:c2:m0"} {"signature": "def load(self, source, pause=False):", "body": "self._source = sourceself._load_source(source)if pause:time.sleep() self.pause()", "docstring": "Loads a new source (as a file) from ``source`` (a file path or URL)\nby killing the current ``omxplayer`` process and forking a new one.\n\nArgs:\n source (string): Path to the file to play or URL", "id": "f624:c2:m5"} {"signature": "@_check_player_is_active@_from_dbus_typedef can_quit(self):", "body": "return self._root_interface_property('')", "docstring": "Returns:\n bool: whether the player can quit or not", "id": "f624:c2:m6"} {"signature": "@_check_player_is_active@_from_dbus_typedef fullscreen(self):", "body": "return self._root_interface_property('')", "docstring": "Returns:\n bool: whether the player is fullscreen or not", "id": "f624:c2:m7"} {"signature": "@_check_player_is_active@_from_dbus_typedef can_set_fullscreen(self):", "body": "return self._root_interface_property('')", "docstring": "Returns:\n bool: whether the player can go fullscreen", "id": "f624:c2:m8"} {"signature": "@_check_player_is_active@_from_dbus_typedef can_raise(self):", "body": "return self._root_interface_property('')", "docstring": "Returns:\n bool: whether the player can raise the display window atop of all other windows", "id": "f624:c2:m9"} {"signature": "@_check_player_is_active@_from_dbus_typedef has_track_list(self):", "body": "return self._root_interface_property('')", "docstring": "Returns:\n bool: whether the player has a track list or not", "id": "f624:c2:m10"} {"signature": "@_check_player_is_active@_from_dbus_typedef identity(self):", "body": "return self._root_interface_property('')", "docstring": "Returns:\n str: Returns `omxplayer`, the name of the player", "id": "f624:c2:m11"} {"signature": "@_check_player_is_active@_from_dbus_typedef supported_uri_schemes(self):", "body": "return self._root_interface_property('')", "docstring": "Returns:\n str: list of supported URI schemes\nExamples:\n >>> player.supported_uri_schemes()\n [\"file\", \"http\", \"rtsp\", \"rtmp\"]", "id": "f624:c2:m12"} {"signature": "@_check_player_is_active@_from_dbus_typedef can_go_next(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n bool: whether the player can move to the next item in the playlist", "id": "f624:c2:m13"} {"signature": "@_check_player_is_active@_from_dbus_typedef can_go_previous(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n bool: whether the player can move to the previous item in the\n playlist", "id": "f624:c2:m14"} {"signature": "@_check_player_is_active@_from_dbus_typedef can_seek(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n bool: whether the player can seek", "id": "f624:c2:m15"} {"signature": "@_check_player_is_active@_from_dbus_typedef can_control(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n bool: whether the player can be controlled", "id": "f624:c2:m16"} {"signature": "@_check_player_is_active@_from_dbus_typedef can_play(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n bool: whether the player can play", "id": "f624:c2:m17"} {"signature": "@_check_player_is_active@_from_dbus_typedef can_pause(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n bool: whether the player can pause", "id": "f624:c2:m18"} {"signature": "@_check_player_is_active@_from_dbus_typedef playback_status(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n str: one of (\"Playing\" | \"Paused\" | \"Stopped\")", "id": "f624:c2:m19"} {"signature": "@_check_player_is_active@_from_dbus_typedef volume(self):", "body": "if self._is_muted:return return self._player_interface_property('')", "docstring": "Returns:\n float: current player volume", "id": "f624:c2:m20"} {"signature": "@_check_player_is_active@_from_dbus_typedef set_volume(self, volume):", "body": "if volume == :volume = return self._player_interface_property('', dbus.Double(volume))", "docstring": "Args:\n float: volume in the interval [0, 10]", "id": "f624:c2:m21"} {"signature": "@_check_player_is_active@_from_dbus_typedef _position_us(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n int: position in microseconds", "id": "f624:c2:m22"} {"signature": "def position(self):", "body": "return self._position_us() / ( * )", "docstring": "Returns:\n int: position in seconds", "id": "f624:c2:m23"} {"signature": "@_check_player_is_active@_from_dbus_typedef minimum_rate(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n float: minimum playback rate (as proportion of normal rate)", "id": "f624:c2:m24"} {"signature": "@_check_player_is_active@_from_dbus_typedef maximum_rate(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n float: maximum playback rate (as proportion of normal rate)", "id": "f624:c2:m25"} {"signature": "@_check_player_is_active@_from_dbus_typedef rate(self):", "body": "return self._rate", "docstring": "Returns:\n float: playback rate, 1 is the normal rate, 2 would be double speed.", "id": "f624:c2:m26"} {"signature": "@_check_player_is_active@_from_dbus_typedef set_rate(self, rate):", "body": "self._rate = self._player_interface_property('', dbus.Double(rate))return self._rate", "docstring": "Set the playback rate of the video as a multiple of the default playback speed\n\nExamples:\n >>> player.set_rate(2)\n # Will play twice as fast as normal speed\n >>> player.set_rate(0.5)\n # Will play half speed", "id": "f624:c2:m27"} {"signature": "@_check_player_is_active@_from_dbus_typedef metadata(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n dict: containing track information ('URI', 'length')\nExamples:\n >>> player.metadata()\n {\n 'mpris:length': 19691000,\n 'xesam:url': 'file:///home/name/path/to/media/file.mp4'\n }", "id": "f624:c2:m28"} {"signature": "@_check_player_is_active@_from_dbus_typedef aspect_ratio(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n float: aspect ratio", "id": "f624:c2:m29"} {"signature": "@_check_player_is_active@_from_dbus_typedef video_stream_count(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n int: number of video streams", "id": "f624:c2:m30"} {"signature": "@_check_player_is_active@_from_dbus_typedef width(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n int: video width in px", "id": "f624:c2:m31"} {"signature": "@_check_player_is_active@_from_dbus_typedef height(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n int: video height in px", "id": "f624:c2:m32"} {"signature": "@_check_player_is_active@_from_dbus_typedef _duration_us(self):", "body": "return self._player_interface_property('')", "docstring": "Returns:\n int: total length in microseconds", "id": "f624:c2:m33"} {"signature": "@_check_player_is_activedef duration(self):", "body": "return self._duration_us() / ( * )", "docstring": "Returns:\n float: duration in seconds", "id": "f624:c2:m34"} {"signature": "@_check_player_is_activedef pause(self):", "body": "self._player_interface.Pause()self._is_playing = Falseself.pauseEvent(self)", "docstring": "Pause playback", "id": "f624:c2:m35"} {"signature": "@_check_player_is_activedef play_pause(self):", "body": "self._player_interface.PlayPause()self._is_playing = not self._is_playingif self._is_playing:self.playEvent(self)else:self.pauseEvent(self)", "docstring": "Pause playback if currently playing, otherwise start playing if currently paused.", "id": "f624:c2:m36"} {"signature": "@_check_player_is_active@_from_dbus_typedef stop(self):", "body": "self._player_interface.Stop()self.stopEvent(self)", "docstring": "Stop the player, causing it to quit", "id": "f624:c2:m37"} {"signature": "@_check_player_is_active@_from_dbus_typedef seek(self, relative_position):", "body": "self._player_interface.Seek(Int64( * * relative_position))self.seekEvent(self, relative_position)", "docstring": "Seek the video by `relative_position` seconds\n\nArgs:\n relative_position (float): The position in seconds to seek to.", "id": "f624:c2:m38"} {"signature": "@_check_player_is_active@_from_dbus_typedef set_position(self, position):", "body": "self._player_interface.SetPosition(ObjectPath(\"\"), Int64(position * * ))self.positionEvent(self, position)", "docstring": "Set the video to playback position to `position` seconds from the start of the video\n\nArgs:\n position (float): The position in seconds.", "id": "f624:c2:m39"} {"signature": "@_check_player_is_active@_from_dbus_typedef set_alpha(self, alpha):", "body": "self._player_interface.SetAlpha(ObjectPath(''), Int64(alpha))", "docstring": "Set the transparency of the video overlay\n\nArgs:\n alpha (float): The transparency (0..255)", "id": "f624:c2:m40"} {"signature": "@_check_player_is_activedef mute(self):", "body": "self._is_muted = Trueself._player_interface.Mute()", "docstring": "Mute audio. If already muted, then this does not do anything", "id": "f624:c2:m41"} {"signature": "@_check_player_is_activedef unmute(self):", "body": "self._is_muted = Falseself._player_interface.Unmute()", "docstring": "Unmutes the video. If already unmuted, then this does not do anything", "id": "f624:c2:m42"} {"signature": "@_check_player_is_active@_from_dbus_typedef set_aspect_mode(self, mode):", "body": "self._player_interface.SetAspectMode(ObjectPath(''), String(mode))", "docstring": "Set the aspect mode of the video\n\nArgs:\n mode (str): One of (\"letterbox\" | \"fill\" | \"stretch\")", "id": "f624:c2:m43"} {"signature": "@_check_player_is_active@_from_dbus_typedef set_video_pos(self, x1, y1, x2, y2):", "body": "position = \"\" % (str(x1),str(y1),str(x2),str(y2))self._player_interface.VideoPos(ObjectPath(''), String(position))", "docstring": "Set the video position on the screen\n\nArgs:\n x1 (int): Top left x coordinate (px)\n y1 (int): Top left y coordinate (px)\n x2 (int): Bottom right x coordinate (px)\n y2 (int): Bottom right y coordinate (px)", "id": "f624:c2:m44"} {"signature": "@_check_player_is_activedef video_pos(self):", "body": "position_string = self._player_interface.VideoPos(ObjectPath(''))return list(map(int, position_string.split(\"\")))", "docstring": "Returns:\n (int, int, int, int): Video spatial position (x1, y1, x2, y2) where (x1, y1) is top left,\n and (x2, y2) is bottom right. All values in px.", "id": "f624:c2:m45"} {"signature": "@_check_player_is_active@_from_dbus_typedef set_video_crop(self, x1, y1, x2, y2):", "body": "crop = \"\" % (str(x1),str(y1),str(x2),str(y2))self._player_interface.SetVideoCropPos(ObjectPath(''), String(crop))", "docstring": "Args:\n x1 (int): Top left x coordinate (px)\n y1 (int): Top left y coordinate (px)\n x2 (int): Bottom right x coordinate (px)\n y2 (int): Bottom right y coordinate (px)", "id": "f624:c2:m46"} {"signature": "@_check_player_is_activedef hide_video(self):", "body": "self._player_interface.HideVideo()", "docstring": "Hides the video overlays", "id": "f624:c2:m47"} {"signature": "@_check_player_is_activedef show_video(self):", "body": "self._player_interface.UnHideVideo()", "docstring": "Shows the video (to undo a `hide_video`)", "id": "f624:c2:m48"} {"signature": "@_check_player_is_active@_from_dbus_typedef list_audio(self):", "body": "return self._player_interface.ListAudio()", "docstring": "Returns:\n [str]: A list of all known audio streams, each item is in the\n format: ``::::``", "id": "f624:c2:m49"} {"signature": "@_check_player_is_active@_from_dbus_typedef list_video(self):", "body": "return self._player_interface.ListVideo()", "docstring": "Returns:\n [str]: A list of all known video streams, each item is in the\n format: ``::::``", "id": "f624:c2:m50"} {"signature": "@_check_player_is_active@_from_dbus_typedef list_subtitles(self):", "body": "return self._player_interface.ListSubtitles()", "docstring": "Returns:\n [str]: A list of all known subtitles, each item is in the\n format: ``::::``", "id": "f624:c2:m51"} {"signature": "@_check_player_is_activedef select_subtitle(self, index):", "body": "return self._player_interface.SelectSubtitle(dbus.Int32(index))", "docstring": "Enable a subtitle specified by the index it is listed in :class:`list_subtitles`\n\nArgs:\n index (int): index of subtitle listing returned by :class:`list_subtitles`", "id": "f624:c2:m52"} {"signature": "@_check_player_is_activedef select_audio(self, index):", "body": "return self._player_interface.SelectAudio(dbus.Int32(index))", "docstring": "Select audio stream specified by the index of the stream in :class:`list_audio`\n\nArgs:\n index (int): index of audio stream returned by :class:`list_audio`", "id": "f624:c2:m53"} {"signature": "@_check_player_is_activedef show_subtitles(self):", "body": "return self._player_interface.ShowSubtitles()", "docstring": "Shows subtitles after :class:`hide_subtitles`", "id": "f624:c2:m54"} {"signature": "@_check_player_is_activedef hide_subtitles(self):", "body": "return self._player_interface.HideSubtitles()", "docstring": "Hide subtitles", "id": "f624:c2:m55"} {"signature": "@_check_player_is_active@_from_dbus_typedef action(self, code):", "body": "self._player_interface.Action(code)", "docstring": "Executes a keyboard command via a code\n\nArgs:\n code (int): The key code you wish to emulate\n refer to ``keys.py`` for the possible keys", "id": "f624:c2:m56"} {"signature": "@_check_player_is_active@_from_dbus_typedef is_playing(self):", "body": "self._is_playing = (self.playback_status() == \"\")logger.info(\"\" % self._is_playing)return self._is_playing", "docstring": "Returns:\n bool: Whether the player is playing", "id": "f624:c2:m57"} {"signature": "@_check_player_is_active@_from_dbus_typedef play_sync(self):", "body": "self.play()logger.info(\"\")try:time.sleep()logger.debug(\"\")while self.is_playing():time.sleep()except DBusException:logger.error(\"\")", "docstring": "Play the video and block whilst the video is playing", "id": "f624:c2:m58"} {"signature": "@_check_player_is_active@_from_dbus_typedef play(self):", "body": "if not self.is_playing():self.play_pause()self._is_playing = Trueself.playEvent(self)", "docstring": "Play the video asynchronously returning control immediately to the calling code", "id": "f624:c2:m59"} {"signature": "@_check_player_is_active@_from_dbus_typedef next(self):", "body": "return self._player_interface.Next()", "docstring": "Skip to the next chapter\n\nReturns:\n bool: Whether the player skipped to the next chapter", "id": "f624:c2:m60"} {"signature": "@_check_player_is_active@_from_dbus_typedef previous(self):", "body": "return self._player_interface.Previous()", "docstring": "Skip to the previous chapter\n\nReturns:\n bool: Whether the player skipped to the previous chapter", "id": "f624:c2:m61"} {"signature": "def quit(self):", "body": "if self._process is None:logger.debug('')returntry:logger.debug('')process_group_id = os.getpgid(self._process.pid)os.killpg(process_group_id, signal.SIGTERM)logger.debug('' % process_group_id)self._process_monitor.join()except OSError:logger.error('')self._process = None", "docstring": "Quit the player, blocking until the process has died", "id": "f624:c2:m68"} {"signature": "@_check_player_is_active@_from_dbus_typedef get_source(self):", "body": "return self._source", "docstring": "Get the source URI of the currently playing media\n\nReturns:\n str: source currently playing", "id": "f624:c2:m69"} {"signature": "@_check_player_is_active@_from_dbus_typedef get_filename(self):", "body": "return self.get_source()", "docstring": "Returns:\n str: source currently playing\n\n.. deprecated:: 0.2.0\n Use: :func:`get_source` instead.", "id": "f624:c2:m70"} {"signature": "def find_address_file(self):", "body": "possible_address_files = []while not possible_address_files:isnt_pid_file = lambda path: not path.endswith('')possible_address_files = list(filter(isnt_pid_file,glob('')))possible_address_files.sort(key=lambda path: os.path.getmtime(path))time.sleep()self.path = possible_address_files[-]", "docstring": "Finds the OMXPlayer DBus connection\nAssumes there is an alive OMXPlayer process.\n:return:", "id": "f628:c0:m2"} {"signature": "def exit_on_keyboard_interrupt(f):", "body": "@wraps(f)def wrapper(*args, **kwargs):raise_exception = kwargs.pop('', False)try:return f(*args, **kwargs)except KeyboardInterrupt:if not raise_exception:sys.exit()raise KeyboardInterruptreturn wrapper", "docstring": "Decorator that allows user to exit script by sending a keyboard interrupt\n (ctrl + c) without raising an exception.", "id": "f635:m0"} {"signature": "def get_operator(self, op):", "body": "if op in self.OPERATORS:return self.OPERATORS.get(op)try:n_args = len(inspect.getargspec(op)[])if n_args != :raise TypeErrorexcept:eprint('')raiseelse:return op", "docstring": "Assigns function to the operators property of the instance.", "id": "f635:c0:m1"} {"signature": "def assign_prompter(self, prompter):", "body": "if is_string(prompter):if prompter not in prompters:eprint(\"\".format(prompter))sys.exit()self.prompter = prompters[prompter]else:self.prompter = prompter", "docstring": "If you want to change the core prompters registry, you can\n override this method in a Question subclass.", "id": "f635:c1:m1"} {"signature": "def add(self, *args, **kwargs):", "body": "if '' in kwargs and isinstance(kwargs[''], Question):question = kwargs['']else:question = Question(*args, **kwargs)self.questions.setdefault(question.key, []).append(question)return question", "docstring": "Add a Question instance to the questions dict. Each key points\n to a list of Question instances with that key. Use the `question`\n kwarg to pass a Question instance if you want, or pass in the same\n args you would pass to instantiate a question.", "id": "f635:c2:m1"} {"signature": "def remove(self, key):", "body": "return self.questions.pop(key)", "docstring": "Remove all questions associated with `key`. Raises exception if `key`\n doesn't exist.", "id": "f635:c2:m5"} {"signature": "def run(self):", "body": "while not self.done:self.ask()return self.answers", "docstring": "Asks all remaining questions in the questionnaire, returns the answers.", "id": "f635:c2:m6"} {"signature": "@exit_on_keyboard_interruptdef ask(self, error=None):", "body": "q = self.next_questionif q is None:returntry:answer = q.prompter(self.get_prompt(q, error), *q.prompter_args, **q.prompter_kwargs)except QuestionnaireGoBack as e:steps = e.args[] if e.args else if steps == :self.ask() returnself.go_back(steps)else:if q._validate:error = q._validate(answer)if error:self.ask(error)returnif q._transform:answer = q._transform(answer)self.answers[q.key] = answerreturn answer", "docstring": "Asks the next question in the questionnaire and returns the answer,\n unless user goes back.", "id": "f635:c2:m7"} {"signature": "@propertydef next_question(self):", "body": "for key, questions in self.questions.items():if key in self.answers:continuefor question in questions:if self.check_condition(question._condition):return questionreturn None", "docstring": "Returns the next `Question` in the questionnaire, or `None` if there\n are no questions left. Returns first question for whose key there is no\n answer and for which condition is satisfied, or for which there is no\n condition.", "id": "f635:c2:m9"} {"signature": "def check_condition(self, condition):", "body": "if not condition:return Truefor c in condition.conditions:key, value, operator = cif not operator(self.answers[key], value):return Falsereturn True", "docstring": "Helper that returns True if condition is satisfied/doesn't exist.", "id": "f635:c2:m10"} {"signature": "def go_back(self, n=):", "body": "if not self.can_go_back:returnN = max(len(self.answers)-abs(n), )self.answers = OrderedDict(islice(self.answers.items(), N))", "docstring": "Move `n` questions back in the questionnaire by removing the last `n`\n answers.", "id": "f635:c2:m11"} {"signature": "def format_answers(self, fmt=''):", "body": "fmts = ('', '', '')if fmt not in fmts:eprint(\"\".format(fmt, fmts))returndef stringify(val):if type(val) in (list, tuple):return ''.join(str(e) for e in val)return valif fmt == '':return json.dumps(self.answers)elif fmt == '':answers = [[k, v] for k, v in self.answers.items()]return json.dumps(answers)elif fmt == '':answers = ''.join(''.format(k, stringify(v)) for k, v in self.answers.items())return answers", "docstring": "Formats answers depending on `fmt`.", "id": "f635:c2:m14"} {"signature": "def answer_display(self, s=''):", "body": "padding = len(max(self.questions.keys(), key=len)) + for key in list(self.answers.keys()):s += ''.format(key, padding, self.answers[key])return s", "docstring": "Helper method for displaying the answers so far.", "id": "f635:c2:m15"} {"signature": "def register(key=''):", "body": "def decorate(func):prompters[key] = funcreturn funcreturn decorate", "docstring": "Add decorated functions to prompters dict.", "id": "f636:m2"} {"signature": "@register(key='')def one(prompt, *args, **kwargs):", "body": "indicator = ''if sys.version_info < (, ):indicator = '>'def go_back(picker):return None, -options, verbose_options = prepare_options(args)idx = kwargs.get('', )picker = Picker(verbose_options, title=prompt, indicator=indicator, default_index=idx)picker.register_custom_handler(ord(''), go_back)picker.register_custom_handler(curses.KEY_LEFT, go_back)with stdout_redirected(sys.stderr):option, index = picker.start()if index == -:raise QuestionnaireGoBackif kwargs.get('', False):return indexreturn options[index]", "docstring": "Instantiates a picker, registers custom handlers for going back,\n and starts the picker.", "id": "f636:m3"} {"signature": "@register(key='')def many(prompt, *args, **kwargs):", "body": "def get_options(options, chosen):return [options[i] for i, c in enumerate(chosen) if c]def get_verbose_options(verbose_options, chosen):no, yes = '', ''if sys.version_info < (, ):no, yes = '', ''opts = [''.format(yes if c else no, verbose_options[i]) for i, c in enumerate(chosen)]return opts + [''.format('', kwargs.get('', ''))]options, verbose_options = prepare_options(args)chosen = [False] * len(options)index = kwargs.get('', )default = kwargs.get('', None)if isinstance(default, list):for idx in default:chosen[idx] = Trueif isinstance(default, int):chosen[default] = Truewhile True:try:index = one(prompt, *get_verbose_options(verbose_options, chosen), return_index=True, idx=index)except QuestionnaireGoBack:if any(chosen):raise QuestionnaireGoBack()else:raise QuestionnaireGoBackif index == len(options):return get_options(options, chosen)chosen[index] = not chosen[index]", "docstring": "Calls `pick` in a while loop to allow user to pick many\n options. Returns a list of chosen options.", "id": "f636:m4"} {"signature": "def prepare_options(options):", "body": "options_, verbose_options = [], []for option in options:if is_string(option):options_.append(option)verbose_options.append(option)else:options_.append(option[])verbose_options.append(option[])return options_, verbose_options", "docstring": "Create options and verbose options from strings and non-string iterables in\n `options` array.", "id": "f636:m5"} {"signature": "@register(key='')def raw(prompt, *args, **kwargs):", "body": "go_back = kwargs.get('', '')type_ = kwargs.get('', str)default = kwargs.get('', '')with stdout_redirected(sys.stderr):while True:try:if kwargs.get('', False):answer = getpass.getpass(prompt)elif sys.version_info < (, ):answer = raw_input(prompt)else:answer = input(prompt)if not answer:answer = defaultif answer == go_back:raise QuestionnaireGoBackreturn type_(answer)except ValueError:eprint(''.format(answer, type_))", "docstring": "Calls input to allow user to input an arbitrary string. User can go\n back by entering the `go_back` string. Works in both Python 2 and 3.", "id": "f636:m6"} {"signature": "@contextmanagerdef stdout_redirected(to):", "body": "stdout = sys.stdoutstdout_fd = fileno(stdout)with os.fdopen(os.dup(stdout_fd), '') as copied:stdout.flush() try:os.dup2(fileno(to), stdout_fd) except ValueError: with open(to, '') as to_file:os.dup2(to_file.fileno(), stdout_fd) try:yield stdout finally:stdout.flush()os.dup2(copied.fileno(), stdout_fd)", "docstring": "Lifted from: https://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python\n\n This is the only way I've found to redirect stdout with curses. This way the\n output from questionnaire can be piped to another program, without piping\n what's written to the terminal by the prompters.", "id": "f636:m7"} {"signature": "def cwd_at(path):", "body": "def decorator(func):@functools.wraps(func)def wrapper(*args, **kwds):try:oldcwd = os.getcwd()repo_root = os.path.dirname(test_dir)os.chdir(os.path.join(repo_root, path))return func(*args, **kwds)finally:os.chdir(oldcwd)return wrapperreturn decorator", "docstring": "Decorator to run function at `path`.\n\n:type path: str\n:arg path: relative path from repository root (e.g., 'pyqode' or 'test').", "id": "f656:m0"} {"signature": "def delete_file_on_return(path):", "body": "def decorator(func):@functools.wraps(func)def wrapper(*args, **kwds):try:return func(*args, **kwds)finally:try:os.remove(path)except (IOError, OSError):passreturn wrapperreturn decorator", "docstring": "Decorator to run function at `path`.\n\n:type path: str\n:arg path: relative path from repository root (e.g., 'pyqode' or 'test').", "id": "f656:m1"} {"signature": "def foo():", "body": "pass", "docstring": "Foo", "id": "f660:m0"} {"signature": "def spam(self):", "body": "pass", "docstring": "Spam", "id": "f660:c0:m1"} {"signature": "def setup_actions(self):", "body": "self.actionOpen.triggered.connect(self.on_open)self.actionNew.triggered.connect(self.on_new)self.actionSave.triggered.connect(self.on_save)self.actionSave_as.triggered.connect(self.on_save_as)self.actionQuit.triggered.connect(QtWidgets.QApplication.instance().quit)self.tabWidget.current_changed.connect(self.on_current_tab_changed)self.tabWidget.last_tab_closed.connect(self.on_last_tab_closed)self.actionAbout.triggered.connect(self.on_about)self.actionRun.triggered.connect(self.on_run)self.interactiveConsole.process_finished.connect(self.on_process_finished)self.actionConfigure_run.triggered.connect(self.on_configure_run)", "docstring": "Connects slots to signals", "id": "f666:c0:m3"} {"signature": "def setup_recent_files_menu(self):", "body": "self.recent_files_manager = widgets.RecentFilesManager('', '')self.menu_recents = widgets.MenuRecentFiles(self.menuFile, title='',recent_files_manager=self.recent_files_manager)self.menu_recents.open_requested.connect(self.open_file)self.menuFile.insertMenu(self.actionSave, self.menu_recents)self.menuFile.insertSeparator(self.actionSave)", "docstring": "Setup the recent files menu and manager", "id": "f666:c0:m5"} {"signature": "def closeEvent(self, QCloseEvent):", "body": "self.tabWidget.closeEvent(QCloseEvent)", "docstring": "Delegates the close event to the tabWidget to be sure we do not quit\nthe application while there are some still some unsaved tabs.", "id": "f666:c0:m6"} {"signature": "def setup_editor(self, editor):", "body": "editor.cursorPositionChanged.connect(self.on_cursor_pos_changed)try:m = editor.modes.get(modes.GoToAssignmentsMode)except KeyError:passelse:assert isinstance(m, modes.GoToAssignmentsMode)m.out_of_doc.connect(self.on_goto_out_of_doc)", "docstring": "Setup the python editor, run the server and connect a few signals.\n\n:param editor: editor to setup.", "id": "f666:c0:m7"} {"signature": "def open_file(self, path, line=None):", "body": "editor = Noneif path:interpreter, pyserver, args = self._get_backend_parameters()editor = self.tabWidget.open_document(path, None, interpreter=interpreter, server_script=pyserver,args=args)if editor:self.setup_editor(editor)self.recent_files_manager.open_file(path)self.menu_recents.update_actions()if line is not None:TextHelper(self.tabWidget.current_widget()).goto_line(line)return editor", "docstring": "Creates a new GenericCodeEdit, opens the requested file and adds it\nto the tab widget.\n\n:param path: Path of the file to open\n\n:return The opened editor if open succeeded.", "id": "f666:c0:m8"} {"signature": "def _get_backend_parameters(self):", "body": "frozen = hasattr(sys, '')interpreter = Settings().interpreterif frozen:interpreter = Nonepyserver = server.__file__ if interpreter is not None else ''args = []return interpreter, pyserver, args", "docstring": "Gets the pyqode backend parameters (interpreter and script).", "id": "f666:c0:m9"} {"signature": "def on_new(self):", "body": "interpreter, pyserver, args = self._get_backend_parameters()self.setup_editor(self.tabWidget.create_new_document(extension='', interpreter=interpreter, server_script=pyserver,args=args))self.actionRun.setDisabled(True)self.actionConfigure_run.setDisabled(True)", "docstring": "Add a new empty code editor to the tab widget", "id": "f666:c0:m10"} {"signature": "def on_open(self):", "body": "filename, filter = QtWidgets.QFileDialog.getOpenFileName(self, '')if filename:self.open_file(filename)self.actionRun.setEnabled(True)self.actionConfigure_run.setEnabled(True)", "docstring": "Shows an open file dialog and open the file if the dialog was\naccepted.", "id": "f666:c0:m11"} {"signature": "def on_save_as(self):", "body": "path = self.tabWidget.current_widget().file.pathpath = os.path.dirname(path) if path else ''filename, filter = QtWidgets.QFileDialog.getSaveFileName(self, '', path)if filename:self.tabWidget.save_current(filename)self.recent_files_manager.open_file(filename)self.menu_recents.update_actions()self.actionRun.setEnabled(True)self.actionConfigure_run.setEnabled(True)self._update_status_bar(self.tabWidget.current_widget())", "docstring": "Save the current editor document as.", "id": "f666:c0:m13"} {"signature": "def setup_mnu_edit(self, editor):", "body": "self.menuEdit.addActions(editor.actions())self.menuEdit.addSeparator()self.setup_mnu_style(editor)", "docstring": "Setup the edit menu for the current editor. We show the current editor\ncontext menu and a menu to change the python interpreter.\n\n:param editor: new editor", "id": "f666:c0:m14"} {"signature": "def setup_mnu_style(self, editor):", "body": "menu = QtWidgets.QMenu('', self.menuEdit)group = QtWidgets.QActionGroup(self)self.styles_group = groupcurrent_style = editor.syntax_highlighter.color_scheme.namegroup.triggered.connect(self.on_style_changed)for s in sorted(PYGMENTS_STYLES):a = QtWidgets.QAction(menu)a.setText(s)a.setCheckable(True)if s == current_style:a.setChecked(True)group.addAction(a)menu.addAction(a)self.menuEdit.addMenu(menu)", "docstring": "setup the style menu for an editor tab", "id": "f666:c0:m15"} {"signature": "def setup_mnu_panels(self, editor):", "body": "for panel in editor.panels:if panel.dynamic:continuea = QtWidgets.QAction(self.menuModes)a.setText(panel.name)a.setCheckable(True)a.setChecked(panel.enabled)a.changed.connect(self.on_panel_state_changed)a.panel = weakref.proxy(panel)self.menuPanels.addAction(a)", "docstring": "Setup the panels menu for the current editor.\n:param editor:", "id": "f666:c0:m17"} {"signature": "def on_current_tab_changed(self):", "body": "self.menuEdit.clear()self.menuModes.clear()self.menuPanels.clear()editor = self.tabWidget.current_widget()self.menuEdit.setEnabled(editor is not None)self.menuModes.setEnabled(editor is not None)self.menuPanels.setEnabled(editor is not None)self.actionSave.setEnabled(editor is not None)self.actionSave_as.setEnabled(editor is not None)self.actionConfigure_run.setEnabled(editor is not None)self.actionRun.setEnabled(editor is not None)if editor is not None:self.setup_mnu_edit(editor)self.setup_mnu_modes(editor)self.setup_mnu_panels(editor)self.widgetOutline.set_editor(editor)self._update_status_bar(editor)", "docstring": "Update action states when the current tab changed.", "id": "f666:c0:m19"} {"signature": "def on_panel_state_changed(self):", "body": "action = self.sender()action.panel.enabled = action.isChecked()action.panel.setVisible(action.isChecked())", "docstring": "Enable disable the selected panel.", "id": "f666:c0:m23"} {"signature": "def on_mode_state_changed(self):", "body": "action = self.sender()action.mode.enabled = action.isChecked()", "docstring": "Enable/Disable the selected mode", "id": "f666:c0:m24"} {"signature": "def on_about(self):", "body": "QtWidgets.QMessageBox.about(self, '','''')", "docstring": "Show about dialog", "id": "f666:c0:m25"} {"signature": "def on_run(self):", "body": "filename = self.tabWidget.current_widget().file.pathwd = os.path.dirname(filename)args = Settings().get_run_config_for_file(filename)self.interactiveConsole.start_process(Settings().interpreter, args=[filename] + args, cwd=wd)self.dockWidget.show()self.actionRun.setEnabled(False)self.actionConfigure_run.setEnabled(False)", "docstring": "Run the current current script", "id": "f666:c0:m26"} {"signature": "def on_goto_out_of_doc(self, assignment):", "body": "editor = self.open_file(assignment.module_path)if editor:TextHelper(editor).goto_line(assignment.line, assignment.column)", "docstring": "Open the a new tab when goto goes out of the current document.\n\n:param assignment: Destination", "id": "f666:c0:m27"} {"signature": "@propertydef run_configs(self):", "body": "string = self.settings.value('', '')return json.loads(string)", "docstring": "Returns the dictionary of run configurations. A run configuration is\njust a list of arguments to append to the run command.\n\nThis is internally stored as a json object", "id": "f671:c0:m3"} {"signature": "def read_version():", "body": "with open(\"\") as f:lines = f.read().splitlines()for l in lines:if \"\" in l:return l.split(\"\")[].strip().replace('', \"\")", "docstring": "Reads the version without self importing", "id": "f674:m0"} {"signature": "def detect_fold_level(self, prev_block, block):", "body": "lvl = super(PythonFoldDetector, self).detect_fold_level(prev_block, block)prev_lvl = TextBlockHelper.get_fold_lvl(prev_block)if prev_block and lvl > prev_lvl and not (self._strip_comments(prev_block).endswith('')):lvl = prev_lvllvl = self._handle_docstrings(block, lvl, prev_block)lvl = self._handle_imports(block, lvl, prev_block)return lvl", "docstring": "Perfoms fold level detection for current block (take previous block\ninto account).\n\n:param prev_block: previous block, None if `block` is the first block.\n:param block: block to analyse.\n:return: block fold level", "id": "f694:c0:m3"} {"signature": "def any(name, alternates):", "body": "return \"\" % name + \"\".join(alternates) + \"\"", "docstring": "Return a named group pattern matching list of alternates.", "id": "f695:m0"} {"signature": "def make_python_patterns(additional_keywords=[], additional_builtins=[]):", "body": "kw = r\"\" + any(\"\", kwlist + additional_keywords) + r\"\"kw_namespace = r\"\" + any(\"\", kw_namespace_list) + r\"\"word_operators = r\"\" + any(\"\", wordop_list) + r\"\"builtinlist = [str(name) for name in dir(builtins)if not name.startswith('')] + additional_builtinsfor v in ['', '', '']:builtinlist.remove(v)builtin = r\"\" + any(\"\", builtinlist) + r\"\"builtin_fct = any(\"\", [r''])comment = any(\"\", [r\"\"])instance = any(\"\", [r\"\", r\"\"])decorator = any('', [r'', r''])number = any(\"\",[r\"\",r\"\",r\"\",r\"\",r\"\"])sqstring = r\"\"dqstring = r''uf_sqstring = r\"\"uf_dqstring = r''sq3string = r\"\"dq3string = r''uf_sq3string = r\"\"uf_dq3string = r''string = any(\"\", [sq3string, dq3string, sqstring, dqstring])ufstring1 = any(\"\", [uf_sqstring])ufstring2 = any(\"\", [uf_dqstring])ufstring3 = any(\"\", [uf_sq3string])ufstring4 = any(\"\", [uf_dq3string])return \"\".join([instance, decorator, kw, kw_namespace, builtin,word_operators, builtin_fct, comment,ufstring1, ufstring2, ufstring3, ufstring4, string,number, any(\"\", [r\"\"])])", "docstring": "Strongly inspired from idlelib.ColorDelegator.make_pat", "id": "f695:m1"} {"signature": "def _handle_indent_between_paren(self, column, line, parent_impl, tc):", "body": "pre, post = parent_implnext_char = self._get_next_char(tc)prev_char = self._get_prev_char(tc)prev_open = prev_char in ['', '', '']next_close = next_char in ['', '', ''](open_line, open_symbol_col), (close_line, close_col) =self._get_paren_pos(tc, column)open_line_txt = self._helper.line_text(open_line)open_line_indent = len(open_line_txt) - len(open_line_txt.lstrip())if prev_open:post = (open_line_indent + self.editor.tab_length) * ''elif next_close and prev_char != '':post = open_line_indent * ''elif tc.block().blockNumber() == open_line:post = open_symbol_col * ''if close_line and close_col:txt = self._helper.line_text(close_line)bn = tc.block().blockNumber()flg = bn == close_linenext_indent = self._helper.line_indent(bn + ) * ''if flg and txt.strip().endswith('') and next_indent == post:post += self.editor.tab_length * ''if next_char in ['', \"\"]:tc.movePosition(tc.Left)is_string = self._helper.is_comment_or_string(tc, formats=[''])if next_char in ['', \"\"]:tc.movePosition(tc.Right)if is_string:trav = QTextCursor(tc)while self._helper.is_comment_or_string(trav, formats=['']):trav.movePosition(trav.Left)trav.movePosition(trav.Right)symbol = '' % self._get_next_char(trav)pre += symbolpost += symbolreturn pre, post", "docstring": "Handle indent between symbols such as parenthesis, braces,...", "id": "f697:c0:m16"} {"signature": "@staticmethoddef _at_block_start(tc, line):", "body": "if tc.atBlockStart():return Truecolumn = tc.columnNumber()indentation = len(line) - len(line.lstrip())return column <= indentation", "docstring": "Improve QTextCursor.atBlockStart to ignore spaces", "id": "f697:c0:m17"} {"signature": "@propertydef tab_always_indent(self):", "body": "return self._tab_always_indent", "docstring": "When this flag is set to True, any call to indent will indent the whole\nline instead of inserting a tab at the cursor position.", "id": "f698:c0:m0"} {"signature": "def indent(self):", "body": "if not self.tab_always_indent:super(PyIndenterMode, self).indent()else:cursor = self.editor.textCursor()assert isinstance(cursor, QtGui.QTextCursor)if cursor.hasSelection():self.indent_selection(cursor)else:tab_len = self.editor.tab_lengthcursor.beginEditBlock()if self.editor.use_spaces_instead_of_tabs:cursor.insertText(tab_len * \"\")else:cursor.insertText('')cursor.endEditBlock()self.editor.setTextCursor(cursor)", "docstring": "Performs an indentation", "id": "f698:c0:m3"} {"signature": "def unindent(self):", "body": "if self.tab_always_indent:cursor = self.editor.textCursor()if not cursor.hasSelection():cursor.select(cursor.LineUnderCursor)self.unindent_selection(cursor)else:super(PyIndenterMode, self).unindent()", "docstring": "Performs an un-indentation", "id": "f698:c0:m4"} {"signature": "def on_state_changed(self, state):", "body": "if state:self.action.triggered.connect(self.comment)self.editor.add_action(self.action, sub_menu='')if '' in os.environ[''].lower():self.editor.key_pressed.connect(self.on_key_pressed)else:self.editor.remove_action(self.action, sub_menu='')self.action.triggered.disconnect(self.comment)if '' in os.environ[''].lower():self.editor.key_pressed.disconnect(self.on_key_pressed)", "docstring": "Called when the mode is activated/deactivated", "id": "f700:c0:m1"} {"signature": "def comment(self):", "body": "cursor = self.editor.textCursor()indent, comment, nb_lines = self.get_operation()has_selection = cursor.hasSelection()if nb_lines > :self._move_cursor_to_selection_start(cursor)cursor.beginEditBlock()for i in range(nb_lines):self.comment_line(indent, cursor, comment)cursor.movePosition(cursor.NextBlock)cursor.endEditBlock()else:cursor.beginEditBlock()self.comment_line(indent, cursor, comment)if not has_selection:cursor.movePosition(cursor.NextBlock)text = cursor.block().text()indent = len(text) - len(text.lstrip())cursor.movePosition(cursor.Right, cursor.MoveAnchor, indent)cursor.endEditBlock()self.editor.setTextCursor(cursor)else:cursor.endEditBlock()", "docstring": "Comments/Uncomments the selected lines or the current lines if there\nis no selection.", "id": "f700:c0:m6"} {"signature": "def request_goto(self):", "body": "self._goto_requested = Trueself._check_word_cursor()", "docstring": "Request a goto action for the word under the text cursor.", "id": "f704:c1:m2"} {"signature": "def _check_word_cursor(self, tc=None):", "body": "if not tc:tc = TextHelper(self.editor).word_under_cursor()request_data = {'': self.editor.toPlainText(),'': tc.blockNumber(),'': tc.columnNumber(),'': self.editor.file.path,'': self.editor.file.encoding}try:self.editor.backend.send_request(workers.goto_assignments, request_data,on_receive=self._on_results_available)except NotRunning:pass", "docstring": "Request a go to assignment.\n\n:param tc: Text cursor which contains the text that we must look for\n its assignment. Can be None to go to the text that is under\n the text cursor.\n:type tc: QtGui.QTextCursor", "id": "f704:c1:m3"} {"signature": "def _unique(self, seq):", "body": "checked = []for e in seq:present = Falsefor c in checked:if str(c) == str(e):present = Truebreakif not present:checked.append(e)return checked", "docstring": "Not performant but works.", "id": "f704:c1:m5"} {"signature": "def detect_encoding(self, path):", "body": "with open(path, '') as file:source = file.read()source = source.replace(b'', b'')source_str = str(source).replace('', '')byte_mark = ast.literal_eval(r\"\")if source.startswith(byte_mark):return ''first_two_lines = re.match(r'', source_str).group()possible_encoding = re.search(r\"\",first_two_lines)if possible_encoding:return possible_encoding.group()return ''", "docstring": "For the implementation of encoding definitions in Python, look at:\n- http://www.python.org/dev/peps/pep-0263/\n\n.. note:: code taken and adapted from\n ```jedi.common.source_to_unicode.detect_encoding```", "id": "f706:c0:m0"} {"signature": "def update_terminal_colors(self):", "body": "self.color_scheme = self.create_color_scheme(background=self.syntax_highlighter.color_scheme.background,foreground=self.syntax_highlighter.color_scheme.formats[''].foreground().color())", "docstring": "Update terminal color scheme based on the pygments color scheme colors", "id": "f708:c2:m3"} {"signature": "def mouseMoveEvent(self, e):", "body": "super(PyInteractiveConsole, self).mouseMoveEvent(e)cursor = self.cursorForPosition(e.pos())assert isinstance(cursor, QtGui.QTextCursor)p = cursor.positionInBlock()usd = cursor.block().userData()if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block:if QtWidgets.QApplication.overrideCursor() is None:QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))else:if QtWidgets.QApplication.overrideCursor() is not None:QtWidgets.QApplication.restoreOverrideCursor()", "docstring": "Extends mouseMoveEvent to display a pointing hand cursor when the\nmouse cursor is over a file location", "id": "f709:c0:m3"} {"signature": "def mousePressEvent(self, e):", "body": "super(PyInteractiveConsole, self).mousePressEvent(e)cursor = self.cursorForPosition(e.pos())p = cursor.positionInBlock()usd = cursor.block().userData()if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block:if e.button() == QtCore.Qt.LeftButton:self.open_file_requested.emit(usd.filename, usd.line)", "docstring": "Emits open_file_requested if the press event occured over\na file location string.", "id": "f709:c0:m4"} {"signature": "def setPlainText(self, txt, mimetype='', encoding=''):", "body": "try:self.syntax_highlighter.docstrings[:] = []self.syntax_highlighter.import_statements[:] = []except AttributeError:passsuper(PyCodeEditBase, self).setPlainText(txt, mimetype, encoding)", "docstring": "Extends QCodeEdit.setPlainText to allow user to setPlainText without\nmimetype (since the python syntax highlighter does not use it).", "id": "f711:c0:m1"} {"signature": "def _logger():", "body": "return logging.getLogger(__name__)", "docstring": "Returns the module's logger", "id": "f714:m0"} {"signature": "def calltips(request_data):", "body": "code = request_data['']line = request_data[''] + column = request_data['']path = request_data['']encoding = ''script = jedi.Script(code, line, column, path, encoding)signatures = script.call_signatures()for sig in signatures:results = (str(sig.module_name), str(sig.name),[p.description for p in sig.params], sig.index,sig.bracket_start, column)return resultsreturn []", "docstring": "Worker that returns a list of calltips.\n\nA calltips is a tuple made of the following parts:\n - module_name: name of the module of the function invoked\n - call_name: name of the function that is being called\n - params: the list of parameter names.\n - index: index of the current parameter\n - bracket_start\n\n:returns tuple(module_name, call_name, params)", "id": "f714:m1"} {"signature": "def goto_assignments(request_data):", "body": "code = request_data['']line = request_data[''] + column = request_data['']path = request_data['']encoding = ''script = jedi.Script(code, line, column, path, encoding)try:definitions = script.goto_assignments()except jedi.NotFoundError:passelse:ret_val = [(d.module_path, d.line - if d.line else None,d.column, d.full_name)for d in definitions]return ret_val", "docstring": "Go to assignements worker.", "id": "f714:m2"} {"signature": "def defined_names(request_data):", "body": "global _old_definitionsret_val = []path = request_data['']toplvl_definitions = jedi.names(request_data[''], path, '')for d in toplvl_definitions:definition = _extract_def(d, path)if d.type != '':ret_val.append(definition)ret_val = [d.to_dict() for d in ret_val]return ret_val", "docstring": "Returns the list of defined names for the document.", "id": "f714:m4"} {"signature": "def quick_doc(request_data):", "body": "code = request_data['']line = request_data[''] + column = request_data['']path = request_data['']encoding = ''script = jedi.Script(code, line, column, path, encoding)try:definitions = script.goto_definitions()except jedi.NotFoundError:return []else:ret_val = [d.docstring() for d in definitions]return ret_val", "docstring": "Worker that returns the documentation of the symbol under cursor.", "id": "f714:m5"} {"signature": "def run_pep8(request_data):", "body": "import pycodestylefrom pyqode.python.backend.pep8utils import CustomCheckerWARNING = code = request_data['']path = request_data['']max_line_length = request_data['']ignore_rules = request_data['']ignore_rules += ['', '', '', '']pycodestyle.MAX_LINE_LENGTH = max_line_lengthpep8style = pycodestyle.StyleGuide(parse_argv=False, config_file='',checker_class=CustomChecker)try:results = pep8style.input_file(path, lines=code.splitlines(True))except Exception:_logger().exception(''% request_data)return []else:messages = []for line_number, offset, code, text, doc in results:if code in ignore_rules:continuemessages.append(('' % (code, text), WARNING,line_number - ))return messages", "docstring": "Worker that run the pep8 tool on the current editor text.\n\n:returns a list of tuples (msg, msg_type, line_number)", "id": "f714:m6"} {"signature": "def run_pyflakes(request_data):", "body": "global prev_resultsfrom pyflakes import checkerimport _astWARNING = ERROR = ret_val = []code = request_data['']path = request_data['']encoding = request_data['']if not encoding:encoding = ''if not path:path = os.path.join(tempfile.gettempdir(), '')if not code:return []else:try:tree = compile(code.encode(encoding), path, \"\",_ast.PyCF_ONLY_AST)except SyntaxError as value:msg = '' % value.args[](lineno, offset, text) = value.lineno - , value.offset, value.textif text is None:_logger().warning(\"\",path)else:ret_val.append((msg, ERROR, lineno))else:w = checker.Checker(tree, os.path.split(path)[])w.messages.sort(key=lambda m: m.lineno)for message in w.messages:msg = \"\" % str(message).split('')[-].strip()line = message.lineno - status = WARNINGif message.__class__ not in PYFLAKES_ERROR_MESSAGESelse ERRORret_val.append((msg, status, line))prev_results = ret_valreturn ret_val", "docstring": "Worker that run a frosted (the fork of pyflakes) code analysis on the\ncurrent editor text.", "id": "f714:m7"} {"signature": "def icon_from_typename(name, icon_type):", "body": "ICONS = {'': ICON_CLASS,'': ICON_NAMESPACE,'': ICON_VAR,'': ICON_VAR,'': ICON_VAR,'': ICON_VAR,'': ICON_VAR,'': ICON_NAMESPACE,'': ICON_KEYWORD,'': ICON_VAR,'': ICON_VAR,'': ICON_VAR,'': ICON_VAR,'': ICON_VAR,'': ICON_VAR,'': ICON_FUNC,'': ICON_FUNC,'': ICON_FUNC_PRIVATE,'': ICON_FUNC_PROTECTED}ret_val = Noneicon_type = icon_type.upper()if hasattr(name, \"\"):name = name.stringif icon_type == \"\" or icon_type == \"\":icon_type = \"\"if icon_type == \"\" or icon_type == \"\":if name.startswith(\"\"):icon_type += \"\"elif name.startswith(\"\"):icon_type += \"\"if icon_type in ICONS:ret_val = ICONS[icon_type]elif icon_type:_logger().warning(\"\", icon_type)return ret_val", "docstring": "Returns the icon resource filename that corresponds to the given typename.\n\n:param name: name of the completion. Use to make the distinction between\n public and private completions (using the count of starting '_')\n:pram typename: the typename reported by jedi\n\n:returns: The associate icon resource filename or None.", "id": "f714:m8"} {"signature": "@staticmethoddef complete(code, line, column, path, encoding, prefix):", "body": "ret_val = []try:script = jedi.Script(code, line + , column, path, encoding)completions = script.completions()print('' % completions)except jedi.NotFoundError:completions = []for completion in completions:ret_val.append({'': completion.name,'': icon_from_typename(completion.name, completion.type),'': completion.description})return ret_val", "docstring": "Completes python code using `jedi`_.\n\n:returns: a list of completion.", "id": "f714:c0:m0"} {"signature": "def __init__(self, pidfile):", "body": "self._pidfile = pidfileself.pidfile = None", "docstring": "Create a new instance", "id": "f731:c0:m0"} {"signature": "def acquire(self):", "body": "try:pidfile = open(self._pidfile, \"\")except IOError as err:raise SystemExit(err)try:fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)except IOError:raise SystemExit('' + self._pidfile)pidfile.seek()pidfile.truncate()pidfile.write(str(os.getpid()) + '')pidfile.flush()self.pidfile = pidfileatexit.register(self.release)", "docstring": "Acquire the pidfile.\n\n Create the pidfile, lock it, write the pid into it\n and register the release with atexit.\n\n\n :return: None\n :raise: SystemExit", "id": "f731:c0:m3"} {"signature": "def release(self):", "body": "try:self.pidfile.close()os.remove(self._pidfile)except OSError as err:if err.errno != :raise", "docstring": "Release the pidfile.\n\n Close and delete the Pidfile.\n\n\n :return: None", "id": "f731:c0:m4"} {"signature": "def close_filenos(preserve):", "body": "maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[]if maxfd == resource.RLIM_INFINITY:maxfd = for fileno in range(maxfd):if fileno not in preserve:try:os.close(fileno)except OSError as err:if not err.errno == errno.EBADF:raise DaemonError(''.format(fileno, err))", "docstring": "Close unprotected file descriptors\n\n Close all open file descriptors that are not in preserve.\n\n If ulimit -nofile is \"unlimited\", all is defined filenos <= 4096,\n else all is <= the output of resource.getrlimit().\n\n :param preserve: set with protected files\n :type preserve: set\n\n :return: None", "id": "f733:m0"} {"signature": "def default_signal_map():", "body": "name_map = {'': None,'': None,'': None,'': ''}signal_map = {}for name, target in name_map.items():if hasattr(signal, name):signal_map[getattr(signal, name)] = targetreturn signal_map", "docstring": "Create the default signal map for this system.\n\n :return: dict", "id": "f733:m1"} {"signature": "def parent_is_init():", "body": "if os.getppid() == :return Truereturn False", "docstring": "Check if parent is Init\n\n Check if the parent process is init, or something else that\n owns PID 1.\n\n :return: bool", "id": "f733:m2"} {"signature": "def parent_is_inet():", "body": "result = Falsesock = socket.fromfd(sys.__stdin__.fileno(),socket.AF_INET,socket.SOCK_RAW)try:sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)result = Trueexcept (OSError, socket.error) as err:if not err.args[] == errno.ENOTSOCK:result = Truereturn result", "docstring": "Check if parent is inet\n\n Check if our parent seems ot be a superserver, aka inetd/xinetd.\n\n This is done by checking if sys.__stdin__ is a network socket.\n\n :return: bool", "id": "f733:m3"} {"signature": "def detach_required():", "body": "if parent_is_inet() or parent_is_init():return Falsereturn True", "docstring": "Check if detaching is required\n\n This is done by collecting the results of parent_is_inet and\n parent_is_init. If one of them is True, detaching, aka the daemoninzing,\n aka the double fork magic, is not required, and can be skipped.\n\n :return: bool", "id": "f733:m4"} {"signature": "def redirect_stream(system, target):", "body": "if target is None:target_fd = os.open(os.devnull, os.O_RDWR)else:target_fd = target.fileno()try:os.dup2(target_fd, system.fileno())except OSError as err:raise DaemonError(''.format(system, target, err))", "docstring": "Redirect Unix streams\n\n If None, redirect Stream to /dev/null, else redirect to target.\n\n :param system: ether sys.stdin, sys.stdout, or sys.stderr\n :type system: file object\n\n :param target: File like object, or None\n :type target: None, File Object\n\n :return: None\n :raise: DaemonError", "id": "f733:m5"} {"signature": "def __init__(self, chroot_directory=None, working_directory='',umask=, uid=None, gid=None, prevent_core=True,detach_process=None, files_preserve=None, pidfile=None,stdin=None, stdout=None, stderr=None, signal_map=None):", "body": "self._is_open = Falseself._working_directory = Noneself.chroot_directory = chroot_directoryself.umask = umaskself.uid = uid if uid else os.getuid()self.gid = gid if gid else os.getgid()if detach_process is None:self.detach_process = detach_required()else:self.detach_process = detach_processself.signal_map = signal_map if signal_map else default_signal_map()self.files_preserve = files_preserveself.pidfile = pidfileself.prevent_core = prevent_coreself.stdin = stdinself.stdout = stdoutself.stderr = stderrself.working_directory = working_directory", "docstring": "Initialize a new Instance", "id": "f733:c1:m0"} {"signature": "def __enter__(self):", "body": "self.open()return self", "docstring": "Context Handler, wrapping self.open()\n\n :return: self", "id": "f733:c1:m1"} {"signature": "def __exit__(self, exc_type, exc_val, exc_tb):", "body": "self.close()", "docstring": "Context Handler, wrapping self.close()\n\n :return: None", "id": "f733:c1:m2"} {"signature": "def _get_signal_handler(self, handler):", "body": "if not handler:result = signal.SIG_IGNelif isinstance(handler, string_types):result = getattr(self, handler)else:result = handlerreturn result", "docstring": "get the callback function for handler\n\n If the handler is None, returns signal.SIG_IGN.\n If the handler is a string, return the matching attribute of this\n instance if possible.\n Else return the handler itself.\n\n :param handler:\n :type handler: str, None, function\n :return: function", "id": "f733:c1:m3"} {"signature": "@propertydef _files_preserve(self):", "body": "result = set()files = [] if not self.files_preserve else self.files_preservefiles.extend([self.stdin, self.stdout, self.stderr])for item in files:if hasattr(item, ''):result.add(item.fileno())if isinstance(item, int):result.add(item)return result", "docstring": "create a set of protected files\n\n create a set of files, based on self.files_preserve and\n self.stdin, self,stdout and self.stderr, that should not get\n closed while daemonizing.\n\n :return: set", "id": "f733:c1:m4"} {"signature": "@propertydef _signal_handler_map(self):", "body": "result = {}for signum, handler in self.signal_map.items():result[signum] = self._get_signal_handler(handler)return result", "docstring": "Create the signal handler map\n\n create a dictionary with signal:handler mapping based on\n self.signal_map\n\n :return: dict", "id": "f733:c1:m5"} {"signature": "@propertydef working_directory(self):", "body": "if self.chroot_directory and notself._working_directory.startswith(self.chroot_directory):return self.chroot_directory + self._working_directoryelse:return self._working_directory", "docstring": "The working_directory property\n\n :return: str", "id": "f733:c1:m6"} {"signature": "@working_directory.setterdef working_directory(self, value):", "body": "self._working_directory = value", "docstring": "Set working directory\n\n New value is ignored if already daemonized.\n\n :param value: str\n :return:", "id": "f733:c1:m7"} {"signature": "@propertydef is_open(self):", "body": "return self._is_open", "docstring": "True when this instances open method was called\n\n :return: bool", "id": "f733:c1:m8"} {"signature": "def close(self):", "body": "pass", "docstring": "Dummy function", "id": "f733:c1:m9"} {"signature": "def open(self):", "body": "if self.is_open:returntry:os.chdir(self.working_directory)if self.chroot_directory:os.chroot(self.chroot_directory)os.setgid(self.gid)os.setuid(self.uid)os.umask(self.umask)except OSError as err:raise DaemonError(''.format(err))if self.prevent_core:try:resource.setrlimit(resource.RLIMIT_CORE, (, ))except Exception as err:raise DaemonError(''.format(err))if self.detach_process:try:if os.fork() > :os._exit()except OSError as err:raise DaemonError(''.format(err))os.setsid()try:if os.fork() > :os._exit()except OSError as err:raise DaemonError(''.format(err))for (signal_number, handler) in self._signal_handler_map.items():signal.signal(signal_number, handler)close_filenos(self._files_preserve)redirect_stream(sys.stdin, self.stdin)redirect_stream(sys.stdout, self.stdout)redirect_stream(sys.stderr, self.stderr)if self.pidfile:self.pidfile.acquire()self._is_open = True", "docstring": "Daemonize this process\n\n Do everything that is needed to become a Unix daemon.\n\n :return: None\n :raise: DaemonError", "id": "f733:c1:m10"} {"signature": "def terminate(self, signal_number, stack_frame):", "body": "raise SystemExit(''.format(signal_number))", "docstring": "Terminate this process\n\n Simply terminate this process by raising SystemExit.\n This method is called if signal.SIGTERM was received.\n\n Check carefully if this really is what you want!\n\n Most likely it is not!\n\n You should implement a function/method that is able to cleanly\n shutdown you daemon. Like gracefully terminating child processes,\n threads. or closing files.\n\n You can create a custom handler by overriding this method, ot\n setting a custom handler via the signal_map. It is also possible\n to set the signal handlers directly via signal.signal().\n\n :return: None\n :raise: SystemExit", "id": "f733:c1:m11"} {"signature": "def create_and_load(self):", "body": "create_db_and_user()initialize_database()populate_database(self.daily_files, self.quarterly_files)", "docstring": "Use this to create a user, a database, and load the database with files.\nIt will take a while to run and will only work if your network allows FTP\nfile transfer. It also requires you to have a postgres server running locally.", "id": "f737:c0:m1"} {"signature": "def retrieve_document(file_path, directory=''):", "body": "ftp = FTP('', timeout=None)ftp.login()name = file_path.replace('', '')if not os.path.exists(directory):os.makedirs(directory)with tempfile.TemporaryFile() as temp:ftp.retrbinary('' % file_path, temp.write)temp.seek()with open(''.format(directory, name), '') as f:f.write(temp.read().decode(\"\"))f.closedrecords = tempretry = Falseftp.close()", "docstring": "This function takes a file path beginning with edgar and stores the form in a directory.\nThe default directory is sec_filings but can be changed through a keyword argument.", "id": "f740:m14"} {"signature": "def generate(grammar=None, num=, output=sys.stdout, max_recursion=, seed=None):", "body": "if seed is not None:gramfuzz.rand.seed(seed)fuzzer = gramfuzz.GramFuzzer()fuzzer.load_grammar(grammar)cat_group = os.path.basename(grammar).replace(\"\", \"\")results = fuzzer.gen(cat_group=cat_group, num=num, max_recursion=max_recursion)for res in results:output.write(res)", "docstring": "Load and generate ``num`` number of top-level rules from the specified grammar.\n\n :param list grammar: The grammar file to load and generate data from\n :param int num: The number of times to generate data\n :param output: The output destination (an open, writable stream-type object. default=``sys.stdout``)\n :param int max_recursion: The maximum reference-recursion when generating data (default=``10``)\n :param int seed: The seed to initialize the PRNG with. If None, will not initialize it.", "id": "f748:m0"} {"signature": "def make_present_participles(verbs):", "body": "res = []for verb in verbs:parts = verb.split()if parts[].endswith(\"\"):parts[] = parts[][:-] + \"\"else:parts[] = parts[] + \"\"res.append(\"\".join(parts))return res", "docstring": "Make the list of verbs into present participles\n\n E.g.:\n\n empower -> empowering\n drive -> driving", "id": "f751:m1"} {"signature": "def __and__(self, other):", "body": "if isinstance(other, And) and other.rolling:other.values.append(self)return otherelse:return And(self, other, rolling=True)", "docstring": "Wraps this field and the other field in an ``And``", "id": "f756:c0:m0"} {"signature": "def __or__(self, other):", "body": "if isinstance(other, Or) and other.rolling:other.values.append(self)return otherelse:return Or(self, other, rolling=True)", "docstring": "Wraps this field and the other field in an ``Or``", "id": "f756:c0:m1"} {"signature": "def __and__(self, other):", "body": "if isinstance(self, And) and self.rolling:self.values.append(other)return selfelif isinstance(other, And) and other.rolling:other.values.append(self)return otherelse:return And(self, other, rolling=True)", "docstring": "Wrap this field and the other field in an ``And``\n\n :param other: Another ``Field`` class, instance, or python object to ``Or`` with", "id": "f756:c1:m0"} {"signature": "def __or__(self, other):", "body": "if isinstance(self, Or) and self.rolling:self.values.append(other)return selfelif isinstance(other, Or) and other.rolling:other.values.append(self)return otherelse:return Or(self, other, rolling=True)", "docstring": "Wrap this field and the other field in an ``Or``\n\n :param other: Another ``Field`` class, instance, or python object to ``Or`` with", "id": "f756:c1:m1"} {"signature": "def _odds_val(self):", "body": "if len(self.odds) == :self.odds = [(, [self.min, self.max])]rand_val = rand.random()total = for percent,v in self.odds:if total <= rand_val < total+percent:found_v = vbreaktotal += percentres = Noneif isinstance(v, (tuple,list)):rand_func = rand.randfloat if type(v[]) is float else rand.randintif len(v) == :res = rand_func(v[], v[])elif len(v) == :res = v[]else:res = vreturn res", "docstring": "Determine a new random value derived from the\n defined :any:`gramfuzz.fields.Field.odds` value.\n\n :returns: The derived value", "id": "f756:c1:m2"} {"signature": "def __init__(self, value=None, **kwargs):", "body": "self.value = valueif \"\" in kwargs or \"\" in kwargs:self.odds = []self.min = kwargs.setdefault(\"\", self.min)self.max = kwargs.setdefault(\"\", self.max)self.odds = kwargs.setdefault(\"\", self.odds)", "docstring": "Create a new Int object, optionally specifying a hard-coded value\n\n :param int value: The value of the new int object\n :param int min: The minimum value (if value is not specified)\n :param int max: The maximum value (if value is not specified)\n :param list odds: The probability list. See ``Field.odds`` for more information.", "id": "f756:c2:m0"} {"signature": "def __init__(self, value=None, **kwargs):", "body": "super(String, self).__init__(value, **kwargs)self.charset = kwargs.setdefault(\"\", self.charset)", "docstring": "Create a new instance of the ``String`` field.\n\n :param value: The hard-coded value of the String field\n :param int min: The minimum size of the String when built\n :param int max: The maximum size of the String when built\n :param str charset: The character-set to be used when building the string", "id": "f756:c6:m0"} {"signature": "def __init__(self, *values, **kwargs):", "body": "self.values = list(values)self.sep = kwargs.setdefault(\"\", self.sep)self.max = kwargs.setdefault(\"\", None)", "docstring": "Create a new instance of the ``Join`` class.\n\n :param list values: The values to join\n :param str sep: The string with which to separate each of the values (default=``\",\"``)\n :param int max: The maximum number of times (inclusive) to build the first item in ``values``.\n This can be useful when a variable number of items in a list is needed. E.g.:\n\n .. code-block:: python\n\n Join(Int, max=5, sep=\",\")", "id": "f756:c7:m0"} {"signature": "def __init__(self, *values, **kwargs):", "body": "self.sep = kwargs.setdefault(\"\", self.sep)self.values = list(values)self.rolling = kwargs.setdefault(\"\", False)self.fuzzer = GramFuzzer.instance()", "docstring": "Create a new ``And`` field instance.\n\n :param list values: The list of values to be concatenated", "id": "f756:c8:m0"} {"signature": "def __init__(self, *values, **kwargs):", "body": "super(Q, self).__init__(*values, **kwargs)self.escape = kwargs.setdefault(\"\", self.escape)self.html_js_escape = kwargs.setdefault(\"\", self.html_js_escape)self.quote = kwargs.setdefault(\"\", self.quote)", "docstring": "Create the new ``Quote`` instance\n\n :param bool escape: Whether or not quoted data should be escaped (default=``False``)\n :param bool html_js_escape: Whether or not quoted data should be html-javascript escaped (default=``False``)\n :param str quote: The quote character to be used if ``escape`` and ``html_js_escape`` are ``False``", "id": "f756:c9:m0"} {"signature": "def __init__(self, *values, **kwargs):", "body": "self.shortest_vals = Noneself.values = list(values)if \"\" in kwargs and len(values) == :self.values = kwargs[\"\"]self.rolling = kwargs.setdefault(\"\", False)", "docstring": "Create a new ``Or`` instance with the provide values\n\n :param list values: The list of values to choose randomly from", "id": "f756:c10:m0"} {"signature": "def __init__(self, *values, **kwargs):", "body": "super(Opt, self).__init__(*values, **kwargs)self.prob = kwargs.setdefault(\"\", self.prob)", "docstring": "Create a new ``Opt`` instance\n\n :param list values: The list of values to build (or not)\n :param float prob: A float value between 0 and 1 that defines the probability\n of cancelling the current build.", "id": "f756:c11:m0"} {"signature": "def __init__(self, name, *values, **options):", "body": "self.name = nameself.options = optionsself.values = list(values)self.sep = self.options.setdefault(\"\", self.sep)self.cat = self.options.setdefault(\"\", self.cat)self.no_prune = self.options.setdefault(\"\", self.no_prune)self.fuzzer = GramFuzzer.instance()frame,mod_path,_,_,_,_ = inspect.stack()[]module_name = os.path.basename(mod_path).replace(\"\", \"\").replace(\"\", \"\")if \"\" in frame.f_locals:self.fuzzer.cat_group_defaults[module_name] = frame.f_locals[\"\"]self.fuzzer.add_definition(self.cat, self.name, self, no_prune=self.no_prune, gram_file=module_name)", "docstring": "Create a new rule definition. Simply instantiating a new rule definition\n will add it to the current ``GramFuzzer`` instance.\n\n :param str name: The name of the rule being defined\n :param list values: The list of values that define the value of the rule\n (will be concatenated when built)\n :param str cat: The category to create the rule in (default=``\"default\"``).\n :param bool no_prune: If this rule should not be pruned *EVEN IF* it is found to be\n unreachable (default=``False``)", "id": "f756:c12:m0"} {"signature": "def __init__(self, refname, **kwargs):", "body": "self.refname = refnameself.cat = kwargs.setdefault(\"\", self.cat)self.failsafe = kwargs.setdefault(\"\", self.failsafe)self.fuzzer = GramFuzzer.instance()", "docstring": "Create a new ``Ref`` instance\n\n :param str refname: The name of the rule to reference\n :param str cat: The name of the category the rule is defined in", "id": "f756:c13:m0"} {"signature": "def seed(val):", "body": "RANDOM.seed(val)", "docstring": "Set the seed for any subsequent random values/choices\n\n :param val: The random seed value", "id": "f757:m0"} {"signature": "def randint(a, b=None):", "body": "if b is None:return _randint(, a-)else:return _randint(a, b-)", "docstring": "Return a random integer\n\n :param int a: Either the minimum value (inclusive) if ``b`` is set, or\n the maximum value if ``b`` is not set (non-inclusive, in which case the minimum\n is implicitly 0)\n :param int b: The maximum value to generate (non-inclusive)\n :returns: int", "id": "f757:m1"} {"signature": "def randfloat(a, b=None):", "body": "if b is None:max_ = amin_ = else:min_ = amax_ = bdiff = max_ - min_res = _random()res *= diffres += min_return res", "docstring": "Return a random float\n\n :param float a: Either the minimum value (inclusive) if ``b`` is set, or\n the maximum value if ``b`` is not set (non-inclusive, in which case the minimum\n is implicitly 0.0)\n :param float b: The maximum value to generate (non-inclusive)\n :returns: float", "id": "f757:m2"} {"signature": "def maybe(prob=):", "body": "return _random() < prob", "docstring": "Return ``True`` with ``prob`` probability.\n\n :param float prob: The probability ``True`` will be returned\n :returns: bool", "id": "f757:m3"} {"signature": "def data(length, charset):", "body": "return \"\".join(_choice(charset) for x in xrange(length))", "docstring": "Generate ``length`` random characters from charset ``charset``\n\n :param int length: The number of characters to randomly generate\n :param str charset: The charset of characters to choose from\n :returns: str", "id": "f757:m4"} {"signature": "@classmethoddef instance(cls):", "body": "if cls.__instance__ is None:cls()return cls.__instance__", "docstring": "Return the singleton instance of the ``GramFuzzer``", "id": "f758:c0:m0"} {"signature": "def __init__(self, debug=False):", "body": "GramFuzzer.__instance__ = selfself.debug = debugself.defs = {}self.no_prunes = {}self.cat_groups = {}self.cat_group_defaults = {}self._staged_defs = Noneself._last_prefs = Noneself._last_pref_keys = Noneself._rules_processed = False", "docstring": "Create a new ``GramFuzzer`` instance", "id": "f758:c0:m1"} {"signature": "def load_grammar(self, path):", "body": "if not os.path.exists(path):raise Exception(\"\".format(path))grammar_path = os.path.dirname(path)if grammar_path not in sys.path:sys.path.append(grammar_path)with open(path, \"\") as f:data = f.read()code = compile(data, path, \"\")locals_ = {\"\": self, \"\": path}exec(code, locals_)if \"\" in locals_:cat_group = os.path.basename(path).replace(\"\", \"\")self.set_cat_group_top_level_cat(cat_group, locals_[\"\"])", "docstring": "Load a grammar file (python file containing grammar definitions) by\n file path. When loaded, the global variable ``GRAMFUZZER`` will be set\n within the module. This is not always needed, but can be useful.\n\n :param str path: The path to the grammar file", "id": "f758:c0:m2"} {"signature": "def set_max_recursion(self, level):", "body": "import gramfuzz.fieldsgramfuzz.fields.Ref.max_recursion = level", "docstring": "Set the maximum reference-recursion depth (not the Python system maximum stack\n recursion level). This controls how many levels deep of nested references are allowed\n before gramfuzz attempts to generate the shortest (reference-wise) rules possible.\n\n :param int level: The new maximum reference level", "id": "f758:c0:m3"} {"signature": "def preprocess_rules(self):", "body": "to_prune = self._find_shortest_paths()self._prune_rules(to_prune)self._rules_processed = True", "docstring": "Calculate shortest reference-paths of each rule (and Or field),\n and prune all unreachable rules.", "id": "f758:c0:m4"} {"signature": "def add_definition(self, cat, def_name, def_val, no_prune=False, gram_file=\"\"):", "body": "self._rules_processed = Falseself.add_to_cat_group(cat, gram_file, def_name)if no_prune:self.no_prunes.setdefault(cat, {}).setdefault(def_name, True)if self._staged_defs is not None:self._staged_defs.append((cat, def_name, def_val))else:self.defs.setdefault(cat, {}).setdefault(def_name, deque()).append(def_val)", "docstring": "Add a new rule definition named ``def_name`` having value ``def_value`` to\n the category ``cat``.\n\n :param str cat: The category to add the rule to\n :param str def_name: The name of the rule definition\n :param def_val: The value of the rule definition\n :param bool no_prune: If the rule should explicitly *NOT*\n be pruned even if it has been determined to be unreachable (default=``False``)\n :param str gram_file: The file the rule was defined in (default=``\"default\"``).", "id": "f758:c0:m10"} {"signature": "def set_cat_group_top_level_cat(self, cat_group, top_level_cat):", "body": "self.cat_group_defaults[cat_group] = top_level_cat", "docstring": "Set the default category when generating data from the grammars defined\n in cat group. *Note* a cat group is usually just the basename of the grammar\n file, minus the ``.py``.\n\n :param str cat_group: The category group to set the default top-level cat for\n :param str top_level_cat: The top-level (default) category of the cat group", "id": "f758:c0:m11"} {"signature": "def add_to_cat_group(self, cat, cat_group, def_name):", "body": "self.cat_groups.setdefault(cat, {}).setdefault(cat_group, deque()).append(def_name)", "docstring": "Associate the provided rule definition name ``def_name`` with the\n category group ``cat_group`` in the category ``cat``.\n\n :param str cat: The category the rule definition was declared in\n :param str cat_group: The group within the category the rule belongs to\n :param str def_name: The name of the rule definition", "id": "f758:c0:m12"} {"signature": "def get_ref(self, cat, refname):", "body": "if cat not in self.defs:raise errors.GramFuzzError(\"\".format(cat))if refname == \"\":refname = rand.choice(list(self.defs[cat].keys()))if refname not in self.defs[cat]:raise errors.GramFuzzError(\"\".format(refname))return rand.choice(self.defs[cat][refname])", "docstring": "Return one of the rules in the category ``cat`` with the name\n ``refname``. If multiple rule defintions exist for the defintion name\n ``refname``, use :any:`gramfuzz.rand` to choose a rule at random.\n\n :param str cat: The category to look for the rule in.\n :param str refname: The name of the rule definition. If the rule definition's name is\n ``\"*\"``, then a rule name will be chosen at random from within the category ``cat``.\n :returns: gramfuzz.fields.Def", "id": "f758:c0:m13"} {"signature": "def gen(self, num, cat=None, cat_group=None, preferred=None, preferred_ratio=, max_recursion=None, auto_process=True):", "body": "import gramfuzz.fieldsgramfuzz.fields.REF_LEVEL = if cat is None and cat_group is None:raise gramfuzz.errors.GramFuzzError(\"\")if cat is None and cat_group is not None:if cat_group not in self.cat_group_defaults:raise gramfuzz.errors.GramFuzzError(\"\")cat = self.cat_group_defaults[cat_group]if not isinstance(cat, str):raise gramfuzz.errors.GramFuzzError(\"\")if auto_process and self._rules_processed == False:self.preprocess_rules()if max_recursion is not None:self.set_max_recursion(max_recursion)if preferred is None:preferred = []res = deque()cat_defs = self.defs[cat]_res_append = res.append_res_extend = res.extend_choice = rand.choice_maybe = rand.maybe_val = utils.valkeys = list(self.defs[cat].keys())self._last_pref_keys = self._get_pref_keys(cat, preferred)self._last_prefs = preferredtotal_errors = deque()total_gend = while total_gend < num:if len(self._last_pref_keys) > and _maybe(preferred_ratio):rand_key = _choice(self._last_pref_keys)if rand_key not in cat_defs:rand_key = _choice(keys)else:rand_key = _choice(keys)if rand_key not in cat_defs:continuev = _choice(cat_defs[rand_key])info = {}pre = deque()self.pre_revert(info)val_res = Nonetry:val_res = _val(v, pre)except errors.GramFuzzError as e:raiseexcept RuntimeError as e:print(\"\")self.revert(info)continueif val_res is not None:_res_extend(pre)_res_append(val_res)total_gend += self.post_revert(cat, res, total_gend, num, info)return res", "docstring": "Generate ``num`` rules from category ``cat``, optionally specifying\n preferred category groups ``preferred`` that should be preferred at\n probability ``preferred_ratio`` over other randomly-chosen rule definitions.\n\n :param int num: The number of rules to generate\n :param str cat: The name of the category to generate ``num`` rules from\n :param str cat_group: The category group (ie python file) to generate rules from. This\n was added specifically to make it easier to generate data based on the name\n of the file the grammar was defined in, and is intended to work with the\n ``TOP_CAT`` values that may be defined in a loaded grammar file.\n :param list preferred: A list of preferred category groups to generate rules from\n :param float preferred_ratio: The percent probability that the preferred\n groups will be chosen over randomly choosen rule definitions from category ``cat``.\n :param int max_recursion: The maximum amount to allow references to recurse\n :param bool auto_process: Whether rules should be automatically pruned and\n shortest reference paths determined. See :any:`gramfuzz.GramFuzzer.preprocess_rules`\n for what would automatically be done.", "id": "f758:c0:m14"} {"signature": "def pre_revert(self, info=None):", "body": "self._staged_defs = deque()", "docstring": "Signal to begin saving any changes that might need to be reverted", "id": "f758:c0:m15"} {"signature": "def post_revert(self, cat, res, total_num, num, info):", "body": "if self._staged_defs is None:returnfor cat,def_name,def_value in self._staged_defs:self.defs.setdefault(cat, {}).setdefault(def_name, deque()).append(def_value)self._staged_defs = None", "docstring": "Commit any staged rule definition changes (rule generation went\n smoothly).", "id": "f758:c0:m16"} {"signature": "def revert(self, info=None):", "body": "self._staged_defs = None", "docstring": "Revert after a single def errored during generate (throw away all\n staged rule definition changes)", "id": "f758:c0:m17"} {"signature": "@propertydef safe(self):", "body": "return self._dumper_class is yaml.SafeDumper", "docstring": "Returns ``True`` if the safe mode is being used with (de)serialization.", "id": "f767:c0:m4"} {"signature": "def default_decoder(self, obj):", "body": "typename, marshalled_state = self.unwrap_callback(obj)if typename is None:return objtry:cls, unmarshaller = self.serializer.unmarshallers[typename]except KeyError:raise LookupError(''.format(typename)) from Noneif cls is not None:instance = cls.__new__(cls)unmarshaller(instance, marshalled_state)return instanceelse:return unmarshaller(marshalled_state)", "docstring": "Handle a dict that might contain a wrapped state for a custom type.", "id": "f771:c0:m2"} {"signature": "def wrap_state_dict(self, typename: str, state) -> Dict[str, Any]:", "body": "return {self.type_key: typename, self.state_key: state}", "docstring": "Wrap the marshalled state in a dictionary.\n\nThe returned dictionary has two keys, corresponding to the ``type_key`` and ``state_key``\noptions. The former holds the type name and the latter holds the marshalled state.\n\n:param typename: registered name of the custom type\n:param state: the marshalled state of the object\n:return: an object serializable by the serializer", "id": "f771:c0:m3"} {"signature": "def unwrap_state_dict(self, obj: Dict[str, Any]) -> Union[Tuple[str, Any], Tuple[None, None]]:", "body": "if len(obj) == :typename = obj.get(self.type_key)state = obj.get(self.state_key)if typename is not None:return typename, statereturn None, None", "docstring": "Unwraps a marshalled state previously wrapped using :meth:`wrap_state_dict`.", "id": "f771:c0:m4"} {"signature": "def default_marshaller(obj):", "body": "if hasattr(obj, ''):return obj.__getstate__()try:return obj.__dict__except AttributeError:raise TypeError(''.format(qualified_name(obj.__class__))) from None", "docstring": "Retrieve the state of the given object.\n\nCalls the ``__getstate__()`` method of the object if available, otherwise returns the\n``__dict__`` of the object.\n\n:param obj: the object to marshal\n:return: the marshalled object state", "id": "f772:m0"} {"signature": "def default_unmarshaller(instance, state) -> None:", "body": "if hasattr(instance, ''):instance.__setstate__(state)else:try:instance.__dict__.update(state)except AttributeError:raise TypeError(''.format(qualified_name(instance.__class__))) from None", "docstring": "Restore the state of an object.\n\nIf the ``__setstate__()`` method exists on the instance, it is called with the state object\nas the argument. Otherwise, the instance's ``__dict__`` is replaced with ``state``.\n\n:param instance: an uninitialized instance\n:param state: the state object, as returned by :func:`default_marshaller`", "id": "f772:m1"} {"signature": "@abstractmethoddef serialize(self, obj) -> bytes:", "body": "", "docstring": "Serialize a Python object into bytes.", "id": "f773:c0:m0"} {"signature": "@abstractmethoddef deserialize(self, payload: bytes):", "body": "", "docstring": "Deserialize bytes into a Python object.", "id": "f773:c0:m1"} {"signature": "@property@abstractmethoddef mimetype(self) -> str:", "body": "", "docstring": "Return the MIME type for this serialization format.", "id": "f773:c0:m2"} {"signature": "def register_custom_type(self, cls: type, marshaller: Optional[Callable[[Any], Any]] = default_marshaller,unmarshaller: Union[Callable[[Any, Any], None],Callable[[Any], Any], None] = default_unmarshaller, *,typename: str = None, wrap_state: bool = True) -> None:", "body": "assert check_argument_types()typename = typename or qualified_name(cls)if marshaller:self.marshallers[cls] = typename, marshaller, wrap_stateself.custom_type_codec.register_object_encoder_hook(self)if unmarshaller and self.custom_type_codec is not None:target_cls = cls if len(signature(unmarshaller).parameters) == :target_cls = Noneself.unmarshallers[typename] = target_cls, unmarshallerself.custom_type_codec.register_object_decoder_hook(self)", "docstring": "Register a marshaller and/or unmarshaller for the given class.\n\nThe state object returned by the marshaller and passed to the unmarshaller can be any\nserializable type. Usually a dictionary mapping of attribute names to values is used.\n\n.. warning:: Registering marshallers/unmarshallers for any custom type will override any\n serializer specific encoding/decoding hooks (respectively) already in place!\n\n:param cls: the class to register\n:param marshaller: a callable that takes the object to be marshalled as the argument and\n returns a state object\n:param unmarshaller: a callable that either:\n\n * takes an uninitialized instance of ``cls`` and its state object as arguments and\n restores the state of the object\n * takes a state object and returns a new instance of ``cls``\n:param typename: a unique identifier for the type (defaults to the ``module:varname``\n reference to the class)\n:param wrap_state: ``True`` to wrap the marshalled state before serialization so that it\n can be recognized later for unmarshalling, ``False`` to serialize it as is", "id": "f773:c1:m1"} {"signature": "@abstractmethoddef register_object_encoder_hook(self, serializer: CustomizableSerializer) -> None:", "body": "", "docstring": "Register a custom encoder callback on the serializer.\n\nThis callback would be called when the serializer encounters an object it cannot natively\nserialize. What the callback returns is specific to each serializer type.\n\n:param serializer: the serializer instance to use", "id": "f773:c2:m0"} {"signature": "@abstractmethoddef register_object_decoder_hook(self, serializer: CustomizableSerializer) -> None:", "body": "", "docstring": "Register a callback on the serializer for unmarshalling previously marshalled objects.\n\n:param serializer: the serializer instance to use", "id": "f773:c2:m1"} {"signature": "def get_response(word):", "body": "url = URL + \"\" + API_KEY + \"\" + word + \"\"return requests.get(url)", "docstring": "Fetch translate result from baidu api\n Args:\n word(str): query word\n Returns:\n (requests.models.Response): response object", "id": "f778:m0"} {"signature": "def print_res(data):", "body": "print('')main_part = data['']print(main_part[''])symbols = main_part[''][]print(\"\" + symbols[''] + \"\")print(\"\" + symbols[''] + \"\")print('')parts = symbols['']for part in parts:print(part[''])for mean in part['']:print(\"\", mean)print('')", "docstring": "Print translate result in a better format\n Args:\n data(str): result", "id": "f778:m1"} {"signature": "@taskdef release():", "body": "run(\"\")", "docstring": "Publishes the project on PyPI.\n\nWe have automatic publishing enabled on TRAVIS build, so this is not\nnecessary... but I'll keep here for reference.", "id": "f782:m0"} {"signature": "@taskdef travis_setpass():", "body": "print(\"\")", "docstring": "Stores the PyPI password (encrypted) in the .travis.yml file.", "id": "f782:m2"} {"signature": "def get_data_dir(self):", "body": "return self._data_dir", "docstring": ":rtype: unicode\n:returns:\n Returns the absolute path to data-directory name to use, standardized by StandardizePath.\n\n@remarks:\n This method triggers the data-directory creation.", "id": "f785:c0:m3"} {"signature": "def get_filename(self, *parts):", "body": "from zerotk.easyfs import StandardizePathresult = [self._data_dir] + list(parts)result = ''.join(result)return StandardizePath(result)", "docstring": "Returns an absolute filename in the data-directory (standardized by StandardizePath).\n\n@params parts: list(unicode)\n Path parts. Each part is joined to form a path.\n\n:rtype: unicode\n:returns:\n The full path prefixed with the data-directory.\n\n@remarks:\n This method triggers the data-directory creation.", "id": "f785:c0:m4"} {"signature": "def assert_equal_files(self, obtained_fn, expected_fn, fix_callback=lambda x:x, binary=False, encoding=None):", "body": "import osfrom zerotk.easyfs import GetFileContents, GetFileLines__tracebackhide__ = Trueimport iodef FindFile(filename):data_filename = self.get_filename(filename)if os.path.isfile(data_filename):return data_filenameif os.path.isfile(filename):return filenamefrom ._exceptions import MultipleFilesNotFoundraise MultipleFilesNotFound([filename, data_filename])obtained_fn = FindFile(obtained_fn)expected_fn = FindFile(expected_fn)if binary:obtained_lines = GetFileContents(obtained_fn, binary=True)expected_lines = GetFileContents(expected_fn, binary=True)assert obtained_lines == expected_lineselse:obtained_lines = fix_callback(GetFileLines(obtained_fn, encoding=encoding))expected_lines = GetFileLines(expected_fn, encoding=encoding)if obtained_lines != expected_lines:html_fn = os.path.splitext(obtained_fn)[] + ''html_diff = self._generate_html_diff(expected_fn, expected_lines, obtained_fn, obtained_lines)with io.open(html_fn, '') as f:f.write(html_diff)import difflibdiff = ['', obtained_fn, expected_fn]diff += ['' % html_fn]diff += difflib.context_diff(obtained_lines, expected_lines)raise AssertionError(''.join(diff) + '')", "docstring": "Compare two files contents. If the files differ, show the diff and write a nice HTML\ndiff file into the data directory.\n\nSearches for the filenames both inside and outside the data directory (in that order).\n\n:param unicode obtained_fn: basename to obtained file into the data directory, or full path.\n\n:param unicode expected_fn: basename to expected file into the data directory, or full path.\n\n:param bool binary:\n Thread both files as binary files.\n\n:param unicode encoding:\n File's encoding. If not None, contents obtained from file will be decoded using this\n `encoding`.\n\n:param callable fix_callback:\n A callback to \"fix\" the contents of the obtained (first) file.\n This callback receives a list of strings (lines) and must also return a list of lines,\n changed as needed.\n The resulting lines will be used to compare with the contents of expected_fn.\n\n:param bool binary:\n .. seealso:: zerotk.easyfs.GetFileContents", "id": "f785:c0:m6"} {"signature": "def _generate_html_diff(self, expected_fn, expected_lines, obtained_fn, obtained_lines):", "body": "import difflibdiffer = difflib.HtmlDiff()return differ.make_file(fromlines=expected_lines,fromdesc=expected_fn,tolines=obtained_lines,todesc=obtained_fn,)", "docstring": "Returns a nice side-by-side diff of the given files, as a string.", "id": "f785:c0:m7"} {"signature": "def _GetNativeEolStyle(platform=sys.platform):", "body": "_NATIVE_EOL_STYLE_MAP = {'' : EOL_STYLE_WINDOWS,'' : EOL_STYLE_UNIX,'' : EOL_STYLE_UNIX,'' : EOL_STYLE_MAC,}result = _NATIVE_EOL_STYLE_MAP.get(platform)if result is None:from ._exceptions import UnknownPlatformErrorraise UnknownPlatformError(platform)return result", "docstring": "Internal function that determines EOL_STYLE_NATIVE constant with the proper value for the\ncurrent platform.", "id": "f788:m0"} {"signature": "@contextlib.contextmanagerdef Cwd(directory):", "body": "old_directory = six.moves.getcwd()if directory is not None:os.chdir(directory)try:yield directoryfinally:os.chdir(old_directory)", "docstring": "Context manager for current directory (uses with_statement)\n\ne.g.:\n # working on some directory\n with Cwd('/home/new_dir'):\n # working on new_dir\n\n # working on some directory again\n\n:param unicode directory:\n Target directory to enter", "id": "f788:m1"} {"signature": "def NormalizePath(path):", "body": "if path.endswith('') or path.endswith(''):slash = os.path.sepelse:slash = ''return os.path.normpath(path) + slash", "docstring": "Normalizes a path maintaining the final slashes.\n\nSome environment variables need the final slash in order to work.\n\nEx. The SOURCES_DIR set by subversion must end with a slash because of the way it is used\nin the Visual Studio projects.\n\n:param unicode path:\n The path to normalize.\n\n:rtype: unicode\n:returns:\n Normalized path", "id": "f788:m2"} {"signature": "def CanonicalPath(path):", "body": "path = os.path.normpath(path)path = os.path.abspath(path)path = os.path.normcase(path)return path", "docstring": "Returns a version of a path that is unique.\n\nGiven two paths path1 and path2:\n CanonicalPath(path1) == CanonicalPath(path2) if and only if they represent the same file on\n the host OS. Takes account of case, slashes and relative paths.\n\n:param unicode path:\n The original path.\n\n:rtype: unicode\n:returns:\n The unique path.", "id": "f788:m3"} {"signature": "def StandardizePath(path, strip=False):", "body": "path = path.replace(SEPARATOR_WINDOWS, SEPARATOR_UNIX)if strip:path = path.rstrip(SEPARATOR_UNIX)return path", "docstring": "Replaces all slashes and backslashes with the target separator\n\nStandardPath:\n We are defining that the standard-path is the one with only back-slashes in it, either\n on Windows or any other platform.\n\n:param bool strip:\n If True, removes additional slashes from the end of the path.", "id": "f788:m4"} {"signature": "def NormStandardPath(path):", "body": "import posixpathif path.endswith(''):slash = ''else:slash = ''return posixpath.normpath(path) + slash", "docstring": "Normalizes a standard path (posixpath.normpath) maintaining any slashes at the end of the path.\n\nNormalize:\n Removes any local references in the path \"/../\"\n\nStandardPath:\n We are defining that the standard-path is the one with only back-slashes in it, either\n on Windows or any other platform.", "id": "f788:m5"} {"signature": "def CreateMD5(source_filename, target_filename=None):", "body": "if target_filename is None:target_filename = source_filename + ''from six.moves.urllib.parse import urlparsesource_url = urlparse(source_filename)if _UrlIsLocal(source_url):md5_contents = Md5Hex(filename=source_filename)else:md5_contents = Md5Hex(contents=GetFileContents(source_filename, binary=True))CreateFile(target_filename, md5_contents)", "docstring": "Creates a md5 file from a source file (contents are the md5 hash of source file)\n\n:param unicode source_filename:\n Path to source file\n\n:type target_filename: unicode or None\n:param target_filename:\n Name of the target file with the md5 contents\n\n If None, defaults to source_filename + '.md5'", "id": "f788:m6"} {"signature": "def CopyFile(source_filename, target_filename, override=True, md5_check=False, copy_symlink=True):", "body": "from ._exceptions import FileNotFoundErrorif not override and Exists(target_filename):from ._exceptions import FileAlreadyExistsErrorraise FileAlreadyExistsError(target_filename)md5_check = md5_check and not target_filename.endswith('')if md5_check:source_md5_filename = source_filename + ''target_md5_filename = target_filename + ''try:source_md5_contents = GetFileContents(source_md5_filename)except FileNotFoundError:source_md5_contents = Nonetry:target_md5_contents = GetFileContents(target_md5_filename)except FileNotFoundError:target_md5_contents = Noneif source_md5_contents is not None andsource_md5_contents == target_md5_contents andExists(target_filename):return MD5_SKIP_DoCopyFile(source_filename, target_filename, copy_symlink=copy_symlink)if md5_check and source_md5_contents is not None and source_md5_contents != target_md5_contents:CreateFile(target_md5_filename, source_md5_contents)", "docstring": "Copy a file from source to target.\n\n:param source_filename:\n @see _DoCopyFile\n\n:param target_filename:\n @see _DoCopyFile\n\n:param bool md5_check:\n If True, checks md5 files (of both source and target files), if they match, skip this copy\n and return MD5_SKIP\n\n Md5 files are assumed to be {source, target} + '.md5'\n\n If any file is missing (source, target or md5), the copy will always be made.\n\n:param copy_symlink:\n @see _DoCopyFile\n\n:raises FileAlreadyExistsError:\n If target_filename already exists, and override is False\n\n:raises NotImplementedProtocol:\n If file protocol is not accepted\n\n Protocols allowed are:\n source_filename: local, ftp, http\n target_filename: local, ftp\n\n:rtype: None | MD5_SKIP\n:returns:\n MD5_SKIP if the file was not copied because there was a matching .md5 file\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m7"} {"signature": "def _DoCopyFile(source_filename, target_filename, copy_symlink=True):", "body": "from six.moves.urllib.parse import urlparsesource_url = urlparse(source_filename)target_url = urlparse(target_filename)if _UrlIsLocal(source_url):if not Exists(source_filename):from ._exceptions import FileNotFoundErrorraise FileNotFoundError(source_filename)if _UrlIsLocal(target_url):_CopyFileLocal(source_filename, target_filename, copy_symlink=copy_symlink)elif target_url.scheme in ['']:from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(target_url.scheme)else:from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(target_url.scheme)elif source_url.scheme in ['', '', '']:if _UrlIsLocal(target_url):from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(target_url.scheme)else:from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(target_url.scheme)else:from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(source_url.scheme)", "docstring": ":param unicode source_filename:\n The source filename.\n Schemas: local, ftp, http\n\n:param unicode target_filename:\n Target filename.\n Schemas: local, ftp\n\n:param copy_symlink:\n @see _CopyFileLocal\n\n:raises FileNotFoundError:\n If source_filename does not exist", "id": "f788:m8"} {"signature": "def _CopyFileLocal(source_filename, target_filename, copy_symlink=True):", "body": "import shutiltry:dir_name = os.path.dirname(target_filename)if dir_name and not os.path.isdir(dir_name):os.makedirs(dir_name)if copy_symlink and IsLink(source_filename):if os.path.isfile(target_filename) or IsLink(target_filename):DeleteFile(target_filename)source_filename = ReadLink(source_filename)CreateLink(source_filename, target_filename)else:if sys.platform == '':while IsLink(source_filename):link = ReadLink(source_filename)if os.path.isabs(link):source_filename = linkelse:source_filename = os.path.join(os.path.dirname(source_filename), link)shutil.copyfile(source_filename, target_filename)shutil.copymode(source_filename, target_filename)except Exception as e:reraise(e, '' % (source_filename, target_filename))", "docstring": "Copy a file locally to a directory.\n\n:param unicode source_filename:\n The filename to copy from.\n\n:param unicode target_filename:\n The filename to copy to.\n\n:param bool copy_symlink:\n If True and source_filename is a symlink, target_filename will also be created as\n a symlink.\n\n If False, the file being linked will be copied instead.", "id": "f788:m9"} {"signature": "def CopyFiles(source_dir, target_dir, create_target_dir=False, md5_check=False):", "body": "import fnmatchif IsDir(source_dir):source_mask = ''else:source_dir, source_mask = os.path.split(source_dir)if not IsDir(target_dir):if create_target_dir:CreateDirectory(target_dir)else:from ._exceptions import DirectoryNotFoundErrorraise DirectoryNotFoundError(target_dir)filenames = ListFiles(source_dir)if filenames is None:returnfor i_filename in filenames:if md5_check and i_filename.endswith(''):continue if fnmatch.fnmatch(i_filename, source_mask):source_path = source_dir + '' + i_filenametarget_path = target_dir + '' + i_filenameif IsDir(source_path):CopyFiles(source_path, target_path, create_target_dir=True, md5_check=md5_check)else:CopyFile(source_path, target_path, md5_check=md5_check)", "docstring": "Copy files from the given source to the target.\n\n:param unicode source_dir:\n A filename, URL or a file mask.\n Ex.\n x:\\coilib50\n x:\\coilib50\\*\n http://server/directory/file\n ftp://server/directory/file\n\n\n:param unicode target_dir:\n A directory or an URL\n Ex.\n d:\\Temp\n ftp://server/directory\n\n:param bool create_target_dir:\n If True, creates the target path if it doesn't exists.\n\n:param bool md5_check:\n .. seealso:: CopyFile\n\n:raises DirectoryNotFoundError:\n If target_dir does not exist, and create_target_dir is False\n\n.. seealso:: CopyFile for documentation on accepted protocols\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m10"} {"signature": "def CopyFilesX(file_mapping):", "body": "files = []for i_target_path, i_source_path_mask in file_mapping:tree_recurse, flat_recurse, dirname, in_filters, out_filters = ExtendedPathMask.Split(i_source_path_mask)_AssertIsLocal(dirname)filenames = FindFiles(dirname, in_filters, out_filters, tree_recurse)for i_source_filename in filenames:if os.path.isdir(i_source_filename):continue i_target_filename = i_source_filename[len(dirname) + :]if flat_recurse:i_target_filename = os.path.basename(i_target_filename)i_target_filename = os.path.join(i_target_path, i_target_filename)files.append((StandardizePath(i_source_filename),StandardizePath(i_target_filename)))for i_source_filename, i_target_filename in files:target_dir = os.path.dirname(i_target_filename)CreateDirectory(target_dir)CopyFile(i_source_filename, i_target_filename)return files", "docstring": "Copies files into directories, according to a file mapping\n\n:param list(tuple(unicode,unicode)) file_mapping:\n A list of mappings between the directory in the target and the source.\n For syntax, @see: ExtendedPathMask\n\n:rtype: list(tuple(unicode,unicode))\n:returns:\n List of files copied. (source_filename, target_filename)\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m11"} {"signature": "def IsFile(path):", "body": "from six.moves.urllib.parse import urlparseurl = urlparse(path)if _UrlIsLocal(url):if IsLink(path):return IsFile(ReadLink(path))return os.path.isfile(path)elif url.scheme == '':from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(url.scheme)else:from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(url.scheme)", "docstring": ":param unicode path:\n Path to a file (local or ftp)\n\n:raises NotImplementedProtocol:\n If checking for a non-local, non-ftp file\n\n:rtype: bool\n:returns:\n True if the file exists\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m12"} {"signature": "def GetDriveType(path):", "body": "if sys.platform == '':import ctypeskdll = ctypes.windll.LoadLibrary(\"\")return kdll.GetDriveType(path + '')import win32fileif IsFile(path):path = os.path.dirname(path)return win32file.GetDriveType(path + '')else:return DRIVE_UNKNOWN", "docstring": "Determine the type of drive, which can be one of the following values:\n DRIVE_UNKNOWN = 0\n The drive type cannot be determined.\n\n DRIVE_NO_ROOT_DIR = 1\n The root path is invalid; for example, there is no volume mounted at the specified path.\n\n DRIVE_REMOVABLE = 2\n The drive has removable media; for example, a floppy drive, thumb drive, or flash card reader.\n\n DRIVE_FIXED = 3\n The drive has fixed media; for example, a hard disk drive or flash drive.\n\n DRIVE_REMOTE = 4\n The drive is a remote (network) drive.\n\n DRIVE_CDROM = 5\n The drive is a CD-ROM drive.\n\n DRIVE_RAMDISK = 6\n The drive is a RAM disk\n\n:note:\n The implementation is valid only for Windows OS\n Linux will always return DRIVE_UNKNOWN\n\n:param path:\n Path to a file or directory", "id": "f788:m13"} {"signature": "def IsDir(directory):", "body": "from six.moves.urllib.parse import urlparsedirectory_url = urlparse(directory)if _UrlIsLocal(directory_url):return os.path.isdir(directory)elif directory_url.scheme == '':from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(target_url.scheme)else:from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(directory_url.scheme)", "docstring": ":param unicode directory:\n A path\n\n:rtype: bool\n:returns:\n Returns whether the given path points to an existent directory.\n\n:raises NotImplementedProtocol:\n If the path protocol is not local or ftp\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m14"} {"signature": "def Exists(path):", "body": "from six.moves.urllib.parse import urlparsepath_url = urlparse(path)if _UrlIsLocal(path_url):return IsFile(path) or IsDir(path) or IsLink(path)return IsFile(path) or IsDir(path)", "docstring": ":rtype: bool\n:returns:\n True if the path already exists (either a file or a directory)\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m15"} {"signature": "def CopyDirectory(source_dir, target_dir, override=False):", "body": "_AssertIsLocal(source_dir)_AssertIsLocal(target_dir)if override and IsDir(target_dir):DeleteDirectory(target_dir, skip_on_error=False)import shutilshutil.copytree(source_dir, target_dir)", "docstring": "Recursively copy a directory tree.\n\n:param unicode source_dir:\n Where files will come from\n\n:param unicode target_dir:\n Where files will go to\n\n:param bool override:\n If True and target_dir already exists, it will be deleted before copying.\n\n:raises NotImplementedForRemotePathError:\n If trying to copy to/from remote directories", "id": "f788:m16"} {"signature": "def DeleteFile(target_filename):", "body": "_AssertIsLocal(target_filename)try:if IsLink(target_filename):DeleteLink(target_filename)elif IsFile(target_filename):os.remove(target_filename)elif IsDir(target_filename):from ._exceptions import FileOnlyActionErrorraise FileOnlyActionError(target_filename)except Exception as e:reraise(e, '' % (target_filename))", "docstring": "Deletes the given local filename.\n\n.. note:: If file doesn't exist this method has no effect.\n\n:param unicode target_filename:\n A local filename\n\n:raises NotImplementedForRemotePathError:\n If trying to delete a non-local path\n\n:raises FileOnlyActionError:\n Raised when filename refers to a directory.", "id": "f788:m17"} {"signature": "def AppendToFile(filename, contents, eol_style=EOL_STYLE_NATIVE, encoding=None, binary=False):", "body": "_AssertIsLocal(filename)assert isinstance(contents, six.text_type) ^ binary, ''if not binary:contents = _HandleContentsEol(contents, eol_style)contents = contents.encode(encoding or sys.getfilesystemencoding())oss = open(filename, '')try:oss.write(contents)finally:oss.close()", "docstring": "Appends content to a local file.\n\n:param unicode filename:\n\n:param unicode contents:\n\n:type eol_style: EOL_STYLE_XXX constant\n:param eol_style:\n Replaces the EOL by the appropriate EOL depending on the eol_style value.\n Considers that all content is using only \"\\n\" as EOL.\n\n:param unicode encoding:\n Target file's content encoding.\n Defaults to sys.getfilesystemencoding()\n\n:param bool binary:\n If True, content is appended in binary mode. In this case, `contents` must be `bytes` and not\n `unicode`\n\n:raises NotImplementedForRemotePathError:\n If trying to modify a non-local path\n\n:raises ValueError:\n If trying to mix unicode `contents` without `encoding`, or `encoding` without\n unicode `contents`", "id": "f788:m18"} {"signature": "def MoveFile(source_filename, target_filename):", "body": "_AssertIsLocal(source_filename)_AssertIsLocal(target_filename)import shutilshutil.move(source_filename, target_filename)", "docstring": "Moves a file.\n\n:param unicode source_filename:\n\n:param unicode target_filename:\n\n:raises NotImplementedForRemotePathError:\n If trying to operate with non-local files.", "id": "f788:m19"} {"signature": "def MoveDirectory(source_dir, target_dir):", "body": "if not IsDir(source_dir):from ._exceptions import DirectoryNotFoundErrorraise DirectoryNotFoundError(source_dir)if Exists(target_dir):from ._exceptions import DirectoryAlreadyExistsErrorraise DirectoryAlreadyExistsError(target_dir)from six.moves.urllib.parse import urlparsesource_url = urlparse(source_dir)target_url = urlparse(target_dir)if _UrlIsLocal(source_url) and _UrlIsLocal(target_url):import shutilshutil.move(source_dir, target_dir)elif source_url.scheme == '' and target_url.scheme == '':from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(target_url.scheme)else:raise NotImplementedError('')", "docstring": "Moves a directory.\n\n:param unicode source_dir:\n\n:param unicode target_dir:\n\n:raises NotImplementedError:\n If trying to move anything other than:\n Local dir -> local dir\n FTP dir -> FTP dir (same host)", "id": "f788:m20"} {"signature": "def GetFileContents(filename, binary=False, encoding=None, newline=None):", "body": "source_file = OpenFile(filename, binary=binary, encoding=encoding, newline=newline)try:contents = source_file.read()finally:source_file.close()return contents", "docstring": "Reads a file and returns its contents. Works for both local and remote files.\n\n:param unicode filename:\n\n:param bool binary:\n If True returns the file as is, ignore any EOL conversion.\n\n:param unicode encoding:\n File's encoding. If not None, contents obtained from file will be decoded using this\n `encoding`.\n\n:param None|''|'\\n'|'\\r'|'\\r\\n' newline:\n Controls universal newlines.\n See 'io.open' newline parameter documentation for more details.\n\n:returns str|unicode:\n The file's contents.\n Returns unicode string when `encoding` is not None.\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m21"} {"signature": "def GetFileLines(filename, newline=None, encoding=None):", "body": "return GetFileContents(filename,binary=False,encoding=encoding,newline=newline,).split('')", "docstring": "Reads a file and returns its contents as a list of lines. Works for both local and remote files.\n\n:param unicode filename:\n\n:param None|''|'\\n'|'\\r'|'\\r\\n' newline:\n Controls universal newlines.\n See 'io.open' newline parameter documentation for more details.\n\n:param unicode encoding:\n File's encoding. If not None, contents obtained from file will be decoded using this\n `encoding`.\n\n:returns list(unicode):\n The file's lines\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m22"} {"signature": "def OpenFile(filename, binary=False, newline=None, encoding=None):", "body": "from six.moves.urllib.parse import urlparsefilename_url = urlparse(filename)if _UrlIsLocal(filename_url):if not os.path.isfile(filename):from ._exceptions import FileNotFoundErrorraise FileNotFoundError(filename)mode = '' if binary else ''return io.open(filename, mode, encoding=encoding, newline=newline)from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(target_url.scheme)", "docstring": "Open a file and returns it.\nConsider the possibility of a remote file (HTTP, HTTPS, FTP)\n\n:param unicode filename:\n Local or remote filename.\n\n:param bool binary:\n If True returns the file as is, ignore any EOL conversion.\n If set ignores univeral_newlines parameter.\n\n:param None|''|'\\n'|'\\r'|'\\r\\n' newline:\n Controls universal newlines.\n See 'io.open' newline parameter documentation for more details.\n\n:param unicode encoding:\n File's encoding. If not None, contents obtained from file will be decoded using this\n `encoding`.\n\n:returns file:\n The open file, it must be closed by the caller\n\n@raise: FileNotFoundError\n When the given filename cannot be found\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m23"} {"signature": "def ListFiles(directory):", "body": "from six.moves.urllib.parse import urlparsedirectory_url = urlparse(directory)if _UrlIsLocal(directory_url):if not os.path.isdir(directory):return Nonereturn os.listdir(directory)elif directory_url.scheme == '':from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(directory_url.scheme)else:from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(directory_url.scheme)", "docstring": "Lists the files in the given directory\n\n:type directory: unicode | unicode\n:param directory:\n A directory or URL\n\n:rtype: list(unicode) | list(unicode)\n:returns:\n List of filenames/directories found in the given directory.\n Returns None if the given directory does not exists.\n\n If `directory` is a unicode string, all files returned will also be unicode\n\n:raises NotImplementedProtocol:\n If file protocol is not local or FTP\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m24"} {"signature": "def CheckIsFile(filename):", "body": "if not IsFile(filename):from ._exceptions import FileNotFoundErrorraise FileNotFoundError(filename)", "docstring": "Check if the given file exists.\n\n@filename: unicode\n The filename to check for existence.\n\n@raise: FileNotFoundError\n Raises if the file does not exist.", "id": "f788:m25"} {"signature": "def CheckIsDir(directory):", "body": "if not IsDir(directory):from ._exceptions import DirectoryNotFoundErrorraise DirectoryNotFoundError(directory)", "docstring": "Check if the given directory exists.\n\n@filename: unicode\n Path to a directory being checked for existence.\n\n@raise: DirectoryNotFoundError\n Raises if the directory does not exist.", "id": "f788:m26"} {"signature": "def CreateFile(filename, contents, eol_style=EOL_STYLE_NATIVE, create_dir=True, encoding=None, binary=False):", "body": "if binary:if isinstance(contents, six.text_type):raise TypeError('')else:if not isinstance(contents, six.text_type):raise TypeError('')contents = _HandleContentsEol(contents, eol_style)encoding = encoding or sys.getfilesystemencoding()contents = contents.encode(encoding)binary = Trueif create_dir:dirname = os.path.dirname(filename)if dirname:CreateDirectory(dirname)from six.moves.urllib.parse import urlparsefilename_url = urlparse(filename)if _UrlIsLocal(filename_url):with open(filename, '') as oss:oss.write(contents)elif filename_url.scheme == '':from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(directory_url.scheme)else:from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(filename_url.scheme)return filename", "docstring": "Create a file with the given contents.\n\n:param unicode filename:\n Filename and path to be created.\n\n:param unicode contents:\n The file contents as a string.\n\n:type eol_style: EOL_STYLE_XXX constant\n:param eol_style:\n Replaces the EOL by the appropriate EOL depending on the eol_style value.\n Considers that all content is using only \"\\n\" as EOL.\n\n:param bool create_dir:\n If True, also creates directories needed in filename's path\n\n:param unicode encoding:\n Target file's content encoding. Defaults to sys.getfilesystemencoding()\n Ignored if `binary` = True\n\n:param bool binary:\n If True, file is created in binary mode. In this case, `contents` must be `bytes` and not\n `unicode`\n\n:return unicode:\n Returns the name of the file created.\n\n:raises NotImplementedProtocol:\n If file protocol is not local or FTP\n\n:raises ValueError:\n If trying to mix unicode `contents` without `encoding`, or `encoding` without\n unicode `contents`\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m27"} {"signature": "def ReplaceInFile(filename, old, new, encoding=None):", "body": "contents = GetFileContents(filename, encoding=encoding)contents = contents.replace(old, new)CreateFile(filename, contents, encoding=encoding)return contents", "docstring": "Replaces all occurrences of \"old\" by \"new\" in the given file.\n\n:param unicode filename:\n The name of the file.\n\n:param unicode old:\n The string to search for.\n\n:param unicode new:\n Replacement string.\n\n:return unicode:\n The new contents of the file.", "id": "f788:m28"} {"signature": "def CreateDirectory(directory):", "body": "from six.moves.urllib.parse import urlparsedirectory_url = urlparse(directory)if _UrlIsLocal(directory_url):if not os.path.exists(directory):os.makedirs(directory)return directoryelif directory_url.scheme == '':from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(directory_url.scheme)else:from ._exceptions import NotImplementedProtocolraise NotImplementedProtocol(directory_url.scheme)", "docstring": "Create directory including any missing intermediate directory.\n\n:param unicode directory:\n\n:return unicode|urlparse.ParseResult:\n Returns the created directory or url (see urlparse).\n\n:raises NotImplementedProtocol:\n If protocol is not local or FTP.\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m29"} {"signature": "def DeleteDirectory(directory, skip_on_error=False):", "body": "_AssertIsLocal(directory)import shutildef OnError(fn, path, excinfo):''''''if IsLink(path):returnif fn is os.remove and os.access(path, os.W_OK):raiseimport statos.chmod(path, stat.S_IWRITE)fn(path)try:if not os.path.isdir(directory):if skip_on_error:returnfrom ._exceptions import DirectoryNotFoundErrorraise DirectoryNotFoundError(directory)shutil.rmtree(directory, onerror=OnError)except:if not skip_on_error:raise", "docstring": "Deletes a directory.\n\n:param unicode directory:\n\n:param bool skip_on_error:\n If True, ignore any errors when trying to delete directory (for example, directory not\n found)\n\n:raises NotImplementedForRemotePathError:\n If trying to delete a remote directory.", "id": "f788:m30"} {"signature": "def GetMTime(path):", "body": "_AssertIsLocal(path)if os.path.isdir(path):files = FindFiles(path)if len(files) > :return max(map(os.path.getmtime, files))return os.path.getmtime(path)", "docstring": ":param unicode path:\n Path to file or directory\n\n:rtype: float\n:returns:\n Modification time for path.\n\n If this is a directory, the highest mtime from files inside it will be returned.\n\n@note:\n In some Linux distros (such as CentOs, or anything with ext3), mtime will not return a value\n with resolutions higher than a second.\n\n http://stackoverflow.com/questions/2428556/os-path-getmtime-doesnt-return-fraction-of-a-second", "id": "f788:m31"} {"signature": "def ListMappedNetworkDrives():", "body": "if sys.platform != '':raise NotImplementedErrordrives_list = []netuse = _CallWindowsNetCommand([''])for line in netuse.split(EOL_STYLE_WINDOWS):match = re.match(\"\", line.rstrip())if match:drives_list.append((match.group(), match.group(), match.group() == ''))return drives_list", "docstring": "On Windows, returns a list of mapped network drives\n\n:return: tuple(string, string, bool)\n For each mapped netword drive, return 3 values tuple:\n - the local drive\n - the remote path-\n - True if the mapping is enabled (warning: not reliable)", "id": "f788:m32"} {"signature": "def CreateLink(target_path, link_path, override=True):", "body": "_AssertIsLocal(target_path)_AssertIsLocal(link_path)if override and IsLink(link_path):DeleteLink(link_path)dirname = os.path.dirname(link_path)if dirname:CreateDirectory(dirname)if sys.platform != '':return os.symlink(target_path, link_path) else:import jaraco.windows.filesystemreturn jaraco.windows.filesystem.symlink(target_path, link_path)from ._easyfs_win32 import CreateSymbolicLinktry:dw_flags = if target_path and os.path.isdir(target_path):dw_flags = return CreateSymbolicLink(target_path, link_path, dw_flags)except Exception as e:reraise(e, '' % locals())", "docstring": "Create a symbolic link at `link_path` pointing to `target_path`.\n\n:param unicode target_path:\n Link target\n\n:param unicode link_path:\n Fullpath to link name\n\n:param bool override:\n If True and `link_path` already exists as a link, that link is overridden.", "id": "f788:m34"} {"signature": "def IsLink(path):", "body": "_AssertIsLocal(path)if sys.platform != '':return os.path.islink(path)import jaraco.windows.filesystemreturn jaraco.windows.filesystem.islink(path)", "docstring": ":param unicode path:\n Path being tested\n\n:returns bool:\n True if `path` is a link", "id": "f788:m35"} {"signature": "def ReadLink(path):", "body": "_AssertIsLocal(path)if sys.platform != '':return os.readlink(path) if not IsLink(path):from ._exceptions import FileNotFoundErrorraise FileNotFoundError(path)import jaraco.windows.filesystemresult = jaraco.windows.filesystem.readlink(path)if '' in result:result = result.split('')[]return result", "docstring": "Read the target of the symbolic link at `path`.\n\n:param unicode path:\n Path to a symbolic link\n\n:returns unicode:\n Target of a symbolic link", "id": "f788:m36"} {"signature": "def _UrlIsLocal(directory_url):", "body": "return len(directory_url.scheme) < ", "docstring": ":param ParseResult directory_url:\n A parsed url as returned by urlparse.urlparse.\n\n:rtype: bool\n:returns:\n Returns whether the given url refers to a local path.\n\n.. note:: The \"directory_url.scheme\" is the drive letter for a local path on Windows and an empty string\nfor a local path on Linux. The other possible values are \"http\", \"ftp\", etc. So, checking if\nthe length is less than 2 characters long checks that the url is local.", "id": "f788:m37"} {"signature": "def _AssertIsLocal(path):", "body": "from six.moves.urllib.parse import urlparseif not _UrlIsLocal(urlparse(path)):from ._exceptions import NotImplementedForRemotePathErrorraise NotImplementedForRemotePathError", "docstring": "Checks if a given path is local, raise an exception if not.\n\nThis is used in filesystem functions that do not support remote operations yet.\n\n:param unicode path:\n\n:raises NotImplementedForRemotePathError:\n If the given path is not local", "id": "f788:m38"} {"signature": "def _HandleContentsEol(contents, eol_style):", "body": "if eol_style == EOL_STYLE_NONE:return contentsif eol_style == EOL_STYLE_UNIX:return contents.replace('', eol_style).replace('', eol_style)if eol_style == EOL_STYLE_MAC:return contents.replace('', eol_style).replace('', eol_style)if eol_style == EOL_STYLE_WINDOWS:return contents.replace('', '').replace('', '').replace('', EOL_STYLE_WINDOWS)raise ValueError('' % (eol_style,))", "docstring": "Replaces eol on each line by the given eol_style.\n\n:param unicode contents:\n:type eol_style: EOL_STYLE_XXX constant\n:param eol_style:", "id": "f788:m39"} {"signature": "def _CallWindowsNetCommand(parameters):", "body": "import subprocesspopen = subprocess.Popen([\"\"] + parameters, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)stdoutdata, stderrdata = popen.communicate()if stderrdata:raise OSError(\"\" % stderrdata)return stdoutdata", "docstring": "Call Windows NET command, used to acquire/configure network services settings.\n\n:param parameters: list of command line parameters\n\n:return: command output", "id": "f788:m40"} {"signature": "def CheckForUpdate(source, target):", "body": "returnnot os.path.isfile(target) oros.path.getmtime(source) > os.path.getmtime(target)", "docstring": "Checks if the given target filename should be re-generated because the source has changed.\n:param source: the source filename.\n:param target: the target filename.\n:return bool:\n True if the target is out-dated, False otherwise.", "id": "f788:m41"} {"signature": "def MatchMasks(filename, masks):", "body": "import fnmatchif not isinstance(masks, (list, tuple)):masks = [masks]for i_mask in masks:if fnmatch.fnmatch(filename, i_mask):return Truereturn False", "docstring": "Verifies if a filename match with given patterns.\n\n:param str filename: The filename to match.\n:param list(str) masks: The patterns to search in the filename.\n:return bool:\n True if the filename has matched with one pattern, False otherwise.", "id": "f788:m42"} {"signature": "def FindFiles(dir_, in_filters=None, out_filters=None, recursive=True, include_root_dir=True, standard_paths=False):", "body": "if in_filters is None:in_filters = ['']if out_filters is None:out_filters = []result = []for dir_root, directories, filenames in os.walk(dir_):for i_directory in directories[:]:if MatchMasks(i_directory, out_filters):directories.remove(i_directory)for filename in directories + filenames:if MatchMasks(filename, in_filters) and not MatchMasks(filename, out_filters):result.append(os.path.join(dir_root, filename))if not recursive:breakif not include_root_dir:dir_prefix = len(dir_) + result = [file[dir_prefix:] for file in result]if standard_paths:result = map(StandardizePath, result)return result", "docstring": "Searches for files in a given directory that match with the given patterns.\n\n:param str dir_: the directory root, to search the files.\n:param list(str) in_filters: a list with patterns to match (default = all). E.g.: ['*.py']\n:param list(str) out_filters: a list with patterns to ignore (default = none). E.g.: ['*.py']\n:param bool recursive: if True search in subdirectories, otherwise, just in the root.\n:param bool include_root_dir: if True, includes the directory being searched in the returned paths\n:param bool standard_paths: if True, always uses unix path separators \"/\"\n:return list(str):\n A list of strings with the files that matched (with the full path in the filesystem).", "id": "f788:m43"} {"signature": "def ExpandUser(path):", "body": "if six.PY2:encoding = sys.getfilesystemencoding()path = path.encode(encoding)result = os.path.expanduser(path)if six.PY2:result = result.decode(encoding)return result", "docstring": "os.path.expanduser wrapper, necessary because it cannot handle unicode strings properly.\n\nThis is not necessary in Python 3.\n\n:param path:\n .. seealso:: os.path.expanduser", "id": "f788:m44"} {"signature": "def DumpDirHashToStringIO(directory, stringio, base='', exclude=None, include=None):", "body": "import fnmatchimport osfiles = [(os.path.join(directory, i), i) for i in os.listdir(directory)]files = [i for i in files if os.path.isfile(i[])]for fullname, filename in files:if include is not None:if not fnmatch.fnmatch(fullname, include):continueif exclude is not None:if fnmatch.fnmatch(fullname, exclude):continuemd5 = Md5Hex(fullname)if base:stringio.write('' % (base, filename, md5))else:stringio.write('' % (filename, md5))", "docstring": "Helper to iterate over the files in a directory putting those in the passed StringIO in ini\nformat.\n\n:param unicode directory:\n The directory for which the hash should be done.\n\n:param StringIO stringio:\n The string to which the dump should be put.\n\n:param unicode base:\n If provided should be added (along with a '/') before the name=hash of file.\n\n:param unicode exclude:\n Pattern to match files to exclude from the hashing. E.g.: *.gz\n\n:param unicode include:\n Pattern to match files to include in the hashing. E.g.: *.zip", "id": "f788:m45"} {"signature": "def Md5Hex(filename=None, contents=None):", "body": "import ioimport hashlibmd5 = hashlib.md5()if filename:stream = io.open(filename, '')try:while True:data = stream.read(md5.block_size * )if not data:breakmd5.update(data)finally:stream.close()else:md5.update(contents)return six.text_type(md5.hexdigest())", "docstring": ":param unicode filename:\n The file from which the md5 should be calculated. If the filename is given, the contents\n should NOT be given.\n\n:param unicode contents:\n The contents for which the md5 should be calculated. If the contents are given, the filename\n should NOT be given.\n\n:rtype: unicode\n:returns:\n Returns a string with the hex digest of the stream.", "id": "f788:m46"} {"signature": "def GetRandomHash(length=):", "body": "import randomreturn ('' + six.text_type(length) + '') % random.randrange( ** length)", "docstring": ":param length:\n Length of hash returned.\n\n:return unicode:\n A random hexadecimal hash of the given length", "id": "f788:m47"} {"signature": "def IterHashes(iterator_size, hash_length=):", "body": "if not isinstance(iterator_size, int):raise TypeError('')count = while count != iterator_size:count += yield GetRandomHash(hash_length)", "docstring": "Iterator for random hexadecimal hashes\n\n:param iterator_size:\n Amount of hashes return before this iterator stops.\n Goes on forever if `iterator_size` is negative.\n\n:param int hash_length:\n Size of each hash returned.\n\n:return generator(unicode):", "id": "f788:m48"} {"signature": "@contextlib.contextmanagerdef PushPopItem(obj, key, value):", "body": "if key in obj:old_value = obj[key]obj[key] = valueyield valueobj[key] = old_valueelse:obj[key] = valueyield valuedel obj[key]", "docstring": "A context manager to replace and restore a value using a getter and setter.\n\n:param object obj: The object to replace/restore.\n:param object key: The key to replace/restore in the object.\n:param object value: The value to replace.\n\nExample::\n\n with PushPop2(sys.modules, 'alpha', None):\n pytest.raises(ImportError):\n import alpha", "id": "f788:m49"} {"signature": "def __init__(self, suffix='', prefix='', base_dir=None, maximum_attempts=):", "body": "self.suffix = suffixself.prefix = prefixself.base_dir = base_dirself.maximum_attempts = maximum_attemptsself.dirname = None", "docstring": ":param unicode suffix:\n A suffix to add in the name of the created directory\n\n:param unicode prefix:\n A prefix to add in the name of the created directory\n\n:param unicode base_dir:\n A path to use as base in the created directory (if any). The temp directory will be a\n child of the given base dir\n\n:param int maximum_attemps:\n The maximum number of attempts to obtain the temp dir name.", "id": "f788:c0:m0"} {"signature": "def __enter__(self):", "body": "if self.base_dir is None:import tempfileself.dirname = tempfile.mkdtemp(self.suffix, self.prefix)return self.dirnameexisting_files = set(ListFiles(self.base_dir))for random_component in IterHashes(iterator_size=self.maximum_attempts):candidate_name = '' % (self.prefix, random_component, self.suffix)candidate_path = os.path.join(self.base_dir, candidate_name)if candidate_path not in existing_files:CreateDirectory(candidate_path)self.dirname = candidate_pathreturn self.dirnameraise RuntimeError('' % self.base_dir)", "docstring": ":return unicode:\n The path to the created temp file.", "id": "f788:c0:m1"} {"signature": "def __init__(self,contents,eol_style=EOL_STYLE_NATIVE,encoding=None,suffix='',prefix='',base_dir=None,maximum_attempts=):", "body": "import tempfileself.contents = contentsself.eol_style = eol_styleself.encoding = encodingself.suffix = suffixself.prefix = prefixself.base_dir = base_dir or tempfile.gettempdir()self.maximum_attempts = maximum_attemptsself.filename = None", "docstring": ":param contents: .. seealso:: CreateFile\n:param eol_style: .. seealso:: CreateFile\n:param encoding: .. seealso:: CreateFile\n\n:param unicode suffix:\n A suffix to add in the name of the created file\n\n:param unicode prefix:\n A prefix to add in the name of the created file\n\n:param unicode base_dir:\n A path to use as base in the created file. Uses temp dir if not given.\n\n:param int maximum_attemps:\n The maximum number of attempts to obtain the temp file name.", "id": "f788:c1:m0"} {"signature": "def __enter__(self):", "body": "from ._exceptions import FileAlreadyExistsErrorfor random_component in IterHashes(iterator_size=self.maximum_attempts):filename = os.path.join(self.base_dir, self.prefix + random_component + self.suffix)try:CreateFile(filename=filename,contents=self.contents,eol_style=self.eol_style,encoding=self.encoding,)self.filename = filenamereturn filenameexcept FileAlreadyExistsError:passraise RuntimeError('' % self.base_dir)", "docstring": ":return unicode:\n The path to the created temp file.", "id": "f788:c1:m1"} {"signature": "@classmethoddef Split(cls, extended_path_mask):", "body": "import os.pathr_tree_recurse = extended_path_mask[] in ''r_flat_recurse = extended_path_mask[] in ''r_dirname, r_filters = os.path.split(extended_path_mask)if r_tree_recurse:r_dirname = r_dirname[:]filters = r_filters.split('')r_in_filters = [i for i in filters if not i.startswith('')]r_out_filters = [i[:] for i in filters if i.startswith('')]return r_tree_recurse, r_flat_recurse, r_dirname, r_in_filters, r_out_filters", "docstring": "Splits the given path into their components: recursive, dirname, in_filters and out_filters\n\n:param str: extended_path_mask:\n The \"extended path mask\" to split\n\n:rtype: tuple(bool,bool,str,list(str),list(str))\n:returns:\n Returns the extended path 5 components:\n - The tree-recurse flag\n - The flat-recurse flag\n - The actual path\n - A list of masks to include\n - A list of masks to exclude", "id": "f788:c2:m0"} {"signature": "def find_image(conn, name):", "body": "for item in conn.list_images()['']:if (item[''][''] == configuration.LOCATION anditem[''][''] == '' andname in item['']['']):return itemreturn None", "docstring": "Find image by partial name and location.", "id": "f792:m1"} {"signature": "def check_detached_cdrom_gone(parent):", "body": "parent.client.get_attached_cdrom(datacenter_id=parent.datacenter[''],server_id=parent.server[''],cdrom_id=parent.test_image1[''])", "docstring": "Check if an attached cdrom is not attached anymore and it throws a PBNotFoundError", "id": "f792:m2"} {"signature": "def get_source_files():", "body": "scripts = []modules = [\"\", \"\", \"\"]py_files = [\"\"]files = []for code_file in scripts + modules + py_files:is_script = code_file in scriptsif not os.path.exists(code_file): alternative = os.path.join(os.environ.get(\"\", \"\"), code_file)code_file = alternative if os.path.exists(alternative) else code_fileif is_script:with open(code_file, \"\") as script_file:shebang = script_file.readline().decode(\"\")if ((sys.version_info[] == and \"\" in shebang)or (\"\" in shebang and \"\" not in shebang)):files.append(code_file)else:files.append(code_file)return files", "docstring": "Return a list of sources files/directories (to check with flake8/pylint)", "id": "f802:m0"} {"signature": "def _read_config(self, filename=None):", "body": "if filename:self._config_filename = filenameelse:try:import appdirsexcept ImportError:raise Exception(\"\"\"\")self._config_filename = appdirs.user_config_dir(_LIBRARY_NAME, \"\") + \"\"if not self._config:self._config = configparser.ConfigParser()self._config.optionxform = strself._config.read(self._config_filename)", "docstring": "Read the user configuration", "id": "f811:c0:m1"} {"signature": "def _save_config(self, filename=None):", "body": "if filename is None:filename = self._config_filenameparent_path = os.path.dirname(filename)if not os.path.isdir(parent_path):os.makedirs(parent_path)with open(filename, \"\") as configfile:self._config.write(configfile)", "docstring": "Save the given user configuration.", "id": "f811:c0:m2"} {"signature": "def _get_username(self, username=None, use_config=True, config_filename=None):", "body": "if not username and use_config:if self._config is None:self._read_config(config_filename)username = self._config.get(\"\", \"\", fallback=None)if not username:username = input(\"\").strip()while not username:username = input(\"\").strip()if '' not in self._config:self._config.add_section('')self._config.set(\"\", \"\", username)self._save_config()return username", "docstring": "Determine the username\n\n If a username is given, this name is used. Otherwise the configuration\n file will be consulted if `use_config` is set to True. The user is asked\n for the username if the username is not available. Then the username is\n stored in the configuration file.\n\n :param username: Username (used directly if given)\n :type username: ``str``\n\n :param use_config: Whether to read username from configuration file\n :type use_config: ``bool``\n\n :param config_filename: Path to the configuration file\n :type config_filename: ``str``", "id": "f811:c0:m3"} {"signature": "def _get_password(self, password, use_config=True, config_filename=None,use_keyring=HAS_KEYRING):", "body": "if not password and use_config:if self._config is None:self._read_config(config_filename)password = self._config.get(\"\", \"\", fallback=None)if not password and use_keyring:logger = logging.getLogger(__name__)question = (\"\".format(self.username, self.host_base))if HAS_KEYRING:password = keyring.get_password(self.keyring_identificator, self.username)if password is None:password = getpass.getpass(question)try:keyring.set_password(self.keyring_identificator, self.username, password)except keyring.errors.PasswordSetError as error:logger.warning(\"\",self.keyring_identificator, error)else:logger.warning(\"\"\"\")password = self._config.get(\"\", \"\", fallback=None)if password is None:password = getpass.getpass(question)store_plaintext_passwords = self._config.get(\"\", \"\", fallback=None)if store_plaintext_passwords != \"\":question = (\"\" +self._config_filename())answer = ask(question, [\"\", \"\", \"\"], \"\")if answer == \"\":self._config.set(\"\", \"\", password)self._save_config()elif answer == \"\":if \"\" not in self._config:self._config.add_section(\"\")self._config.set(\"\", \"\", \"\")self._save_config()return password", "docstring": "Determine the user password\n\nIf the password is given, this password is used. Otherwise\nthis function will try to get the password from the user's keyring\nif `use_keyring` is set to True.\n\n:param username: Username (used directly if given)\n:type username: ``str``\n\n:param use_config: Whether to read username from configuration file\n:type use_config: ``bool``\n\n:param config_filename: Path to the configuration file\n:type config_filename: ``str``", "id": "f811:c0:m4"} {"signature": "def list_contracts(self, depth=):", "body": "response = self._perform_request('' + str(depth))return response", "docstring": "Retrieves information about the resource limits\nfor a particular contract and the current resource usage.", "id": "f811:c0:m5"} {"signature": "def get_datacenter(self, datacenter_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id, str(depth)))return response", "docstring": "Retrieves a data center by its ID.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m6"} {"signature": "def get_datacenter_by_name(self, name, depth=):", "body": "all_data_centers = self.list_datacenters(depth=depth)['']data_center = find_item_by_name(all_data_centers, lambda i: i[''][''], name)if not data_center:raise NameError(\"\"\"\".format(name=name))if len(data_center) > :raise NameError(\"\".format(n=len(data_center),name=name,names=\"\".join(d[''][''] for d in data_center)))return data_center[]", "docstring": "Retrieves a data center by its name.\n\nEither returns the data center response or raises an Exception\nif no or more than one data center was found with the name.\nThe search for the name is done in this relaxing way:\n\n- exact name match\n- case-insentive name match\n- data center starts with the name\n- data center starts with the name (case insensitive)\n- name appears in the data center name\n- name appears in the data center name (case insensitive)\n\n:param name: The name of the data center.\n:type name: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m7"} {"signature": "def list_datacenters(self, depth=):", "body": "response = self._perform_request('' + str(depth))return response", "docstring": "Retrieves a list of all data centers.", "id": "f811:c0:m8"} {"signature": "def delete_datacenter(self, datacenter_id):", "body": "response = self._perform_request(url='' % (datacenter_id),method='')return response", "docstring": "Removes the data center and all its components such as servers, NICs,\nload balancers, volumes.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``", "id": "f811:c0:m9"} {"signature": "def create_datacenter(self, datacenter):", "body": "server_items = []volume_items = []lan_items = []loadbalancer_items = []entities = dict()properties = {\"\": datacenter.name}if datacenter.location:properties[''] = datacenter.locationif datacenter.description:properties[''] = datacenter.descriptionif datacenter.servers:for server in datacenter.servers:server_items.append(self._create_server_dict(server))servers = {\"\": server_items}server_entities = {\"\": servers}entities.update(server_entities)if datacenter.volumes:for volume in datacenter.volumes:volume_items.append(self._create_volume_dict(volume))volumes = {\"\": volume_items}volume_entities = {\"\": volumes}entities.update(volume_entities)if datacenter.loadbalancers:for loadbalancer in datacenter.loadbalancers:loadbalancer_items.append(self._create_loadbalancer_dict(loadbalancer))loadbalancers = {\"\": loadbalancer_items}loadbalancer_entities = {\"\": loadbalancers}entities.update(loadbalancer_entities)if datacenter.lans:for lan in datacenter.lans:lan_items.append(self._create_lan_dict(lan))lans = {\"\": lan_items}lan_entities = {\"\": lans}entities.update(lan_entities)if not entities:raw = {\"\": properties,}else:raw = {\"\": properties,\"\": entities}data = json.dumps(raw)response = self._perform_request(url='',method='',data=data)return response", "docstring": "Creates a data center -- both simple and complex are supported.", "id": "f811:c0:m10"} {"signature": "def update_datacenter(self, datacenter_id, **kwargs):", "body": "data = {}for attr, value in kwargs.items():data[self._underscore_to_camelcase(attr)] = valueresponse = self._perform_request(url='' % (datacenter_id),method='',data=json.dumps(data))return response", "docstring": "Updates a data center with the parameters provided.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``", "id": "f811:c0:m11"} {"signature": "def get_firewall_rule(self, datacenter_id,server_id, nic_id, firewall_rule_id):", "body": "response = self._perform_request('' % (datacenter_id,server_id,nic_id,firewall_rule_id))return response", "docstring": "Retrieves a single firewall rule by ID.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param nic_id: The unique ID of the NIC.\n:type nic_id: ``str``\n\n:param firewall_rule_id: The unique ID of the firewall rule.\n:type firewall_rule_id: ``str``", "id": "f811:c0:m12"} {"signature": "def get_firewall_rules(self, datacenter_id, server_id, nic_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id,server_id,nic_id,str(depth)))return response", "docstring": "Retrieves a list of firewall rules available in the account.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param nic_id: The unique ID of the NIC.\n:type nic_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m13"} {"signature": "def delete_firewall_rule(self, datacenter_id, server_id,nic_id, firewall_rule_id):", "body": "response = self._perform_request(url='' % (datacenter_id,server_id,nic_id,firewall_rule_id),method='')return response", "docstring": "Removes a firewall rule from the NIC.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param nic_id: The unique ID of the NIC.\n:type nic_id: ``str``\n\n:param firewall_rule_id: The unique ID of the firewall rule.\n:type firewall_rule_id: ``str``", "id": "f811:c0:m14"} {"signature": "def create_firewall_rule(self, datacenter_id, server_id,nic_id, firewall_rule):", "body": "properties = {\"\": firewall_rule.name}if firewall_rule.protocol:properties[''] = firewall_rule.protocolif firewall_rule.source_mac:properties[''] = firewall_rule.source_macif firewall_rule.source_ip:properties[''] = firewall_rule.source_ipif firewall_rule.target_ip:properties[''] = firewall_rule.target_ipif firewall_rule.port_range_start:properties[''] = firewall_rule.port_range_startif firewall_rule.port_range_end:properties[''] = firewall_rule.port_range_endif firewall_rule.icmp_type:properties[''] = firewall_rule.icmp_typeif firewall_rule.icmp_code:properties[''] = firewall_rule.icmp_codedata = {\"\": properties}response = self._perform_request(url='' % (datacenter_id,server_id,nic_id),method='',data=json.dumps(data))return response", "docstring": "Creates a firewall rule on the specified NIC and server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param nic_id: The unique ID of the NIC.\n:type nic_id: ``str``\n\n:param firewall_rule: A firewall rule dict.\n:type firewall_rule: ``dict``", "id": "f811:c0:m15"} {"signature": "def update_firewall_rule(self, datacenter_id, server_id,nic_id, firewall_rule_id, **kwargs):", "body": "data = {}for attr, value in kwargs.items():data[self._underscore_to_camelcase(attr)] = valueif attr == '':data[''] = valueelif attr == '':data[''] = valueelif attr == '':data[''] = valueelif attr == '':data[''] = valueelif attr == '':data[''] = valueelif attr == '':data[''] = valueelif attr == '':data[''] = valueelse:data[self._underscore_to_camelcase(attr)] = valueresponse = self._perform_request(url='' % (datacenter_id,server_id,nic_id,firewall_rule_id),method='',data=json.dumps(data))return response", "docstring": "Updates a firewall rule.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param nic_id: The unique ID of the NIC.\n:type nic_id: ``str``\n\n:param firewall_rule_id: The unique ID of the firewall rule.\n:type firewall_rule_id: ``str``", "id": "f811:c0:m16"} {"signature": "def get_image(self, image_id):", "body": "response = self._perform_request('' % image_id)return response", "docstring": "Retrieves a single image by ID.\n\n:param image_id: The unique ID of the image.\n:type image_id: ``str``", "id": "f811:c0:m17"} {"signature": "def list_images(self, depth=):", "body": "response = self._perform_request('' + str(depth))return response", "docstring": "Retrieves a list of images available in the data center.\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m18"} {"signature": "def delete_image(self, image_id):", "body": "response = self._perform_request(url='' + image_id,method='')return response", "docstring": "Removes only user created images.\n\n:param image_id: The unique ID of the image.\n:type image_id: ``str``", "id": "f811:c0:m19"} {"signature": "def update_image(self, image_id, **kwargs):", "body": "data = {}for attr, value in kwargs.items():data[self._underscore_to_camelcase(attr)] = valueresponse = self._perform_request(url='' + image_id,method='',data=json.dumps(data))return response", "docstring": "Replace all properties of an image.", "id": "f811:c0:m20"} {"signature": "def get_ipblock(self, ipblock_id):", "body": "response = self._perform_request('' % ipblock_id)return response", "docstring": "Retrieves a single IP block by ID.\n\n:param ipblock_id: The unique ID of the IP block.\n:type ipblock_id: ``str``", "id": "f811:c0:m21"} {"signature": "def list_ipblocks(self, depth=):", "body": "response = self._perform_request('' % str(depth))return response", "docstring": "Retrieves a list of IP blocks available in the account.", "id": "f811:c0:m22"} {"signature": "def delete_ipblock(self, ipblock_id):", "body": "response = self._perform_request(url='' + ipblock_id, method='')return response", "docstring": "Removes a single IP block from your account.\n\n:param ipblock_id: The unique ID of the IP block.\n:type ipblock_id: ``str``", "id": "f811:c0:m23"} {"signature": "def reserve_ipblock(self, ipblock):", "body": "properties = {\"\": ipblock.name}if ipblock.location:properties[''] = ipblock.locationif ipblock.size:properties[''] = str(ipblock.size)raw = {\"\": properties,}response = self._perform_request(url='', method='', data=json.dumps(raw))return response", "docstring": "Reserves an IP block within your account.", "id": "f811:c0:m24"} {"signature": "def get_lan(self, datacenter_id, lan_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id,lan_id,str(depth)))return response", "docstring": "Retrieves a single LAN by ID.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param lan_id: The unique ID of the LAN.\n:type lan_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m25"} {"signature": "def list_lans(self, datacenter_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id,str(depth)))return response", "docstring": "Retrieves a list of LANs available in the account.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m26"} {"signature": "def delete_lan(self, datacenter_id, lan_id):", "body": "response = self._perform_request(url='' % (datacenter_id, lan_id), method='')return response", "docstring": "Removes a LAN from the data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param lan_id: The unique ID of the LAN.\n:type lan_id: ``str``", "id": "f811:c0:m27"} {"signature": "def create_lan(self, datacenter_id, lan):", "body": "data = json.dumps(self._create_lan_dict(lan))response = self._perform_request(url='' % datacenter_id,method='',data=data)return response", "docstring": "Creates a LAN in the data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param lan: The LAN object to be created.\n:type lan: ``dict``", "id": "f811:c0:m28"} {"signature": "def update_lan(self, datacenter_id, lan_id, name=None,public=None, ip_failover=None):", "body": "data = {}if name:data[''] = nameif public is not None:data[''] = publicif ip_failover:data[''] = ip_failoverresponse = self._perform_request(url='' % (datacenter_id, lan_id),method='',data=json.dumps(data))return response", "docstring": "Updates a LAN\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param lan_id: The unique ID of the LAN.\n:type lan_id: ``str``\n\n:param name: The new name of the LAN.\n:type name: ``str``\n\n:param public: Indicates if the LAN is public.\n:type public: ``bool``\n\n:param ip_failover: A list of IP fail-over dicts.\n:type ip_failover: ``list``", "id": "f811:c0:m29"} {"signature": "def get_lan_members(self, datacenter_id, lan_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id,lan_id,str(depth)))return response", "docstring": "Retrieves the list of NICs that are part of the LAN.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param lan_id: The unique ID of the LAN.\n:type lan_id: ``str``", "id": "f811:c0:m30"} {"signature": "def get_loadbalancer(self, datacenter_id, loadbalancer_id):", "body": "response = self._perform_request('' % (datacenter_id, loadbalancer_id))return response", "docstring": "Retrieves a single load balancer by ID.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param loadbalancer_id: The unique ID of the load balancer.\n:type loadbalancer_id: ``str``", "id": "f811:c0:m31"} {"signature": "def list_loadbalancers(self, datacenter_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id, str(depth)))return response", "docstring": "Retrieves a list of load balancers in the data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m32"} {"signature": "def delete_loadbalancer(self, datacenter_id, loadbalancer_id):", "body": "response = self._perform_request(url='' % (datacenter_id, loadbalancer_id), method='')return response", "docstring": "Removes the load balancer from the data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param loadbalancer_id: The unique ID of the load balancer.\n:type loadbalancer_id: ``str``", "id": "f811:c0:m33"} {"signature": "def create_loadbalancer(self, datacenter_id, loadbalancer):", "body": "data = json.dumps(self._create_loadbalancer_dict(loadbalancer))response = self._perform_request(url='' % datacenter_id,method='',data=data)return response", "docstring": "Creates a load balancer within the specified data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param loadbalancer: The load balancer object to be created.\n:type loadbalancer: ``dict``", "id": "f811:c0:m34"} {"signature": "def update_loadbalancer(self, datacenter_id,loadbalancer_id, **kwargs):", "body": "data = {}for attr, value in kwargs.items():data[self._underscore_to_camelcase(attr)] = valueresponse = self._perform_request(url='' % (datacenter_id,loadbalancer_id),method='',data=json.dumps(data))return response", "docstring": "Updates a load balancer\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param loadbalancer_id: The unique ID of the load balancer.\n:type loadbalancer_id: ``str``", "id": "f811:c0:m35"} {"signature": "def get_loadbalancer_members(self, datacenter_id, loadbalancer_id,depth=):", "body": "response = self._perform_request('' % (datacenter_id, loadbalancer_id, str(depth)))return response", "docstring": "Retrieves the list of NICs that are associated with a load balancer.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param loadbalancer_id: The unique ID of the load balancer.\n:type loadbalancer_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m36"} {"signature": "def add_loadbalanced_nics(self, datacenter_id,loadbalancer_id, nic_id):", "body": "data = '' + nic_id + ''response = self._perform_request(url='' % (datacenter_id,loadbalancer_id),method='',data=data)return response", "docstring": "Associates a NIC with the given load balancer.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param loadbalancer_id: The unique ID of the load balancer.\n:type loadbalancer_id: ``str``\n\n:param nic_id: The ID of the NIC.\n:type nic_id: ``str``", "id": "f811:c0:m37"} {"signature": "def get_loadbalanced_nic(self, datacenter_id,loadbalancer_id, nic_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id,loadbalancer_id,nic_id,str(depth)))return response", "docstring": "Gets the properties of a load balanced NIC.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param loadbalancer_id: The unique ID of the load balancer.\n:type loadbalancer_id: ``str``\n\n:param nic_id: The unique ID of the NIC.\n:type nic_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m38"} {"signature": "def remove_loadbalanced_nic(self, datacenter_id,loadbalancer_id, nic_id):", "body": "response = self._perform_request(url='' % (datacenter_id,loadbalancer_id,nic_id),method='')return response", "docstring": "Removes a NIC from the load balancer.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param loadbalancer_id: The unique ID of the load balancer.\n:type loadbalancer_id: ``str``\n\n:param nic_id: The unique ID of the NIC.\n:type nic_id: ``str``", "id": "f811:c0:m39"} {"signature": "def get_location(self, location_id, depth=):", "body": "response = self._perform_request('' % (location_id, depth))return response", "docstring": "Retrieves a single location by ID.\n\n:param location_id: The unique ID of the location.\n:type location_id: ``str``", "id": "f811:c0:m40"} {"signature": "def list_locations(self, depth=):", "body": "response = self._perform_request('' % (depth))return response", "docstring": "Retrieves a list of locations available in the account.", "id": "f811:c0:m41"} {"signature": "def get_nic(self, datacenter_id, server_id, nic_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id,server_id,nic_id,str(depth)))return response", "docstring": "Retrieves a NIC by its ID.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param nic_id: The unique ID of the NIC.\n:type nic_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m42"} {"signature": "def list_nics(self, datacenter_id, server_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id,server_id,str(depth)))return response", "docstring": "Retrieves a list of all NICs bound to the specified server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m43"} {"signature": "def delete_nic(self, datacenter_id, server_id, nic_id):", "body": "response = self._perform_request(url='' % (datacenter_id,server_id,nic_id),method='')return response", "docstring": "Removes a NIC from the server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param nic_id: The unique ID of the NIC.\n:type nic_id: ``str``", "id": "f811:c0:m44"} {"signature": "def create_nic(self, datacenter_id, server_id, nic):", "body": "data = json.dumps(self._create_nic_dict(nic))response = self._perform_request(url='' % (datacenter_id,server_id),method='',data=data)return response", "docstring": "Creates a NIC on the specified server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param nic: A NIC dict.\n:type nic: ``dict``", "id": "f811:c0:m45"} {"signature": "def update_nic(self, datacenter_id, server_id,nic_id, **kwargs):", "body": "data = {}for attr, value in kwargs.items():data[self._underscore_to_camelcase(attr)] = valueresponse = self._perform_request(url='' % (datacenter_id,server_id,nic_id),method='',data=json.dumps(data))return response", "docstring": "Updates a NIC with the parameters provided.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param nic_id: The unique ID of the NIC.\n:type nic_id: ``str``", "id": "f811:c0:m46"} {"signature": "def get_request(self, request_id, status=False):", "body": "if status:response = self._perform_request('' + request_id + '')else:response = self._perform_request('' % request_id)return response", "docstring": "Retrieves a single request by ID.\n\n:param request_id: The unique ID of the request.\n:type request_id: ``str``\n\n:param status: Retreive the full status of the request.\n:type status: ``bool``", "id": "f811:c0:m47"} {"signature": "def list_requests(self, depth=):", "body": "response = self._perform_request('' % str(depth))return response", "docstring": "Retrieves a list of requests available in the account.", "id": "f811:c0:m48"} {"signature": "def get_server(self, datacenter_id, server_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id,server_id,str(depth)))return response", "docstring": "Retrieves a server by its ID.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m49"} {"signature": "def list_servers(self, datacenter_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id, str(depth)))return response", "docstring": "Retrieves a list of all servers bound to the specified data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m50"} {"signature": "def delete_server(self, datacenter_id, server_id):", "body": "response = self._perform_request(url='' % (datacenter_id,server_id),method='')return response", "docstring": "Removes the server from your data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``", "id": "f811:c0:m51"} {"signature": "def create_server(self, datacenter_id, server):", "body": "data = json.dumps(self._create_server_dict(server))response = self._perform_request(url='' % (datacenter_id),method='',data=data)return response", "docstring": "Creates a server within the data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server: A dict of the server to be created.\n:type server: ``dict``", "id": "f811:c0:m52"} {"signature": "def update_server(self, datacenter_id, server_id, **kwargs):", "body": "data = {}for attr, value in kwargs.items():if attr == '':boot_volume_properties = {\"\": value}boot_volume_entities = {\"\": boot_volume_properties}data.update(boot_volume_entities)else:data[self._underscore_to_camelcase(attr)] = valueresponse = self._perform_request(url='' % (datacenter_id,server_id),method='',data=json.dumps(data))return response", "docstring": "Updates a server with the parameters provided.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``", "id": "f811:c0:m53"} {"signature": "def get_attached_volumes(self, datacenter_id, server_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id,server_id,str(depth)))return response", "docstring": "Retrieves a list of volumes attached to the server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m54"} {"signature": "def get_attached_volume(self, datacenter_id, server_id, volume_id):", "body": "response = self._perform_request('' % (datacenter_id,server_id,volume_id))return response", "docstring": "Retrieves volume information.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param volume_id: The unique ID of the volume.\n:type volume_id: ``str``", "id": "f811:c0:m55"} {"signature": "def attach_volume(self, datacenter_id, server_id, volume_id):", "body": "data = '' + volume_id + ''response = self._perform_request(url='' % (datacenter_id,server_id),method='',data=data)return response", "docstring": "Attaches a volume to a server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param volume_id: The unique ID of the volume.\n:type volume_id: ``str``", "id": "f811:c0:m56"} {"signature": "def detach_volume(self, datacenter_id, server_id, volume_id):", "body": "response = self._perform_request(url='' % (datacenter_id,server_id,volume_id),method='')return response", "docstring": "Detaches a volume from a server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param volume_id: The unique ID of the volume.\n:type volume_id: ``str``", "id": "f811:c0:m57"} {"signature": "def get_attached_cdroms(self, datacenter_id, server_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id,server_id,str(depth)))return response", "docstring": "Retrieves a list of CDROMs attached to the server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m58"} {"signature": "def get_attached_cdrom(self, datacenter_id, server_id, cdrom_id):", "body": "response = self._perform_request('' % (datacenter_id,server_id,cdrom_id))return response", "docstring": "Retrieves an attached CDROM.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param cdrom_id: The unique ID of the CDROM.\n:type cdrom_id: ``str``", "id": "f811:c0:m59"} {"signature": "def attach_cdrom(self, datacenter_id, server_id, cdrom_id):", "body": "data = '' + cdrom_id + ''response = self._perform_request(url='' % (datacenter_id,server_id),method='',data=data)return response", "docstring": "Attaches a CDROM to a server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param cdrom_id: The unique ID of the CDROM.\n:type cdrom_id: ``str``", "id": "f811:c0:m60"} {"signature": "def detach_cdrom(self, datacenter_id, server_id, cdrom_id):", "body": "response = self._perform_request(url='' % (datacenter_id,server_id,cdrom_id),method='')return response", "docstring": "Detaches a volume from a server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``\n\n:param cdrom_id: The unique ID of the CDROM.\n:type cdrom_id: ``str``", "id": "f811:c0:m61"} {"signature": "def start_server(self, datacenter_id, server_id):", "body": "response = self._perform_request(url='' % (datacenter_id,server_id),method='')return response", "docstring": "Starts the server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``", "id": "f811:c0:m62"} {"signature": "def stop_server(self, datacenter_id, server_id):", "body": "response = self._perform_request(url='' % (datacenter_id,server_id),method='')return response", "docstring": "Stops the server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``", "id": "f811:c0:m63"} {"signature": "def reboot_server(self, datacenter_id, server_id):", "body": "response = self._perform_request(url='' % (datacenter_id,server_id),method='')return response", "docstring": "Reboots the server.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param server_id: The unique ID of the server.\n:type server_id: ``str``", "id": "f811:c0:m64"} {"signature": "def get_snapshot(self, snapshot_id):", "body": "response = self._perform_request('' % snapshot_id)return response", "docstring": "Retrieves a single snapshot by ID.\n\n:param snapshot_id: The unique ID of the snapshot.\n:type snapshot_id: ``str``", "id": "f811:c0:m65"} {"signature": "def list_snapshots(self, depth=):", "body": "response = self._perform_request('' % str(depth))return response", "docstring": "Retrieves a list of snapshots available in the account.", "id": "f811:c0:m66"} {"signature": "def delete_snapshot(self, snapshot_id):", "body": "response = self._perform_request(url='' + snapshot_id, method='')return response", "docstring": "Removes a snapshot from your account.\n\n:param snapshot_id: The unique ID of the snapshot.\n:type snapshot_id: ``str``", "id": "f811:c0:m67"} {"signature": "def update_snapshot(self, snapshot_id, **kwargs):", "body": "data = {}for attr, value in kwargs.items():data[self._underscore_to_camelcase(attr)] = valueresponse = self._perform_request(url='' + snapshot_id, method='', data=json.dumps(data))return response", "docstring": "Removes a snapshot from your account.\n\n:param snapshot_id: The unique ID of the snapshot.\n:type snapshot_id: ``str``", "id": "f811:c0:m68"} {"signature": "def create_snapshot(self, datacenter_id, volume_id,name=None, description=None):", "body": "data = {'': name, '': description}response = self._perform_request('' % (datacenter_id, volume_id),method='',data=urlencode(data))return response", "docstring": "Creates a snapshot of the specified volume.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param volume_id: The unique ID of the volume.\n:type volume_id: ``str``\n\n:param name: The name given to the volume.\n:type name: ``str``\n\n:param description: The description given to the volume.\n:type description: ``str``", "id": "f811:c0:m69"} {"signature": "def restore_snapshot(self, datacenter_id, volume_id, snapshot_id):", "body": "data = {'': snapshot_id}response = self._perform_request(url='' % (datacenter_id,volume_id),method='',data=urlencode(data))return response", "docstring": "Restores a snapshot to the specified volume.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param volume_id: The unique ID of the volume.\n:type volume_id: ``str``\n\n:param snapshot_id: The unique ID of the snapshot.\n:type snapshot_id: ``str``", "id": "f811:c0:m70"} {"signature": "def remove_snapshot(self, snapshot_id):", "body": "response = self._perform_request(url='' + snapshot_id, method='')return response", "docstring": "Removes a snapshot.\n\n:param snapshot_id: The ID of the snapshot\n you wish to remove.\n:type snapshot_id: ``str``", "id": "f811:c0:m71"} {"signature": "def list_groups(self, depth=):", "body": "response = self._perform_request('' + str(depth))return response", "docstring": "Retrieves a list of all groups.\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m72"} {"signature": "def get_group(self, group_id, depth=):", "body": "response = self._perform_request('' % (group_id, str(depth)))return response", "docstring": "Retrieves a single group by ID.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m73"} {"signature": "def create_group(self, group):", "body": "data = json.dumps(self._create_group_dict(group))response = self._perform_request(url='',method='',data=data)return response", "docstring": "Creates a new group and set group privileges.\n\n:param group: The group object to be created.\n:type group: ``dict``", "id": "f811:c0:m74"} {"signature": "def update_group(self, group_id, **kwargs):", "body": "properties = {}if '' in kwargs:kwargs[''] = kwargs.pop('')for attr, value in kwargs.items():properties[self._underscore_to_camelcase(attr)] = valuedata = {\"\": properties}response = self._perform_request(url='' % group_id,method='',data=json.dumps(data))return response", "docstring": "Updates a group.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``", "id": "f811:c0:m75"} {"signature": "def delete_group(self, group_id):", "body": "response = self._perform_request(url='' % group_id,method='')return response", "docstring": "Removes a group.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``", "id": "f811:c0:m76"} {"signature": "def list_shares(self, group_id, depth=):", "body": "response = self._perform_request('' % (group_id, str(depth)))return response", "docstring": "Retrieves a list of all shares though a group.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m77"} {"signature": "def get_share(self, group_id, resource_id, depth=):", "body": "response = self._perform_request(''% (group_id, resource_id, str(depth)))return response", "docstring": "Retrieves a specific resource share available to a group.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``\n\n:param resource_id: The unique ID of the resource.\n:type resource_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m78"} {"signature": "def add_share(self, group_id, resource_id, **kwargs):", "body": "properties = {}for attr, value in kwargs.items():properties[self._underscore_to_camelcase(attr)] = valuedata = {\"\": properties}response = self._perform_request(url='' % (group_id, resource_id),method='',data=json.dumps(data))return response", "docstring": "Shares a resource through a group.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``\n\n:param resource_id: The unique ID of the resource.\n:type resource_id: ``str``", "id": "f811:c0:m79"} {"signature": "def update_share(self, group_id, resource_id, **kwargs):", "body": "properties = {}for attr, value in kwargs.items():properties[self._underscore_to_camelcase(attr)] = valuedata = {\"\": properties}response = self._perform_request(url='' % (group_id, resource_id),method='',data=json.dumps(data))return response", "docstring": "Updates the permissions of a group for a resource share.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``\n\n:param resource_id: The unique ID of the resource.\n:type resource_id: ``str``", "id": "f811:c0:m80"} {"signature": "def delete_share(self, group_id, resource_id):", "body": "response = self._perform_request(url='' % (group_id, resource_id),method='')return response", "docstring": "Removes a resource share from a group.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``\n\n:param resource_id: The unique ID of the resource.\n:type resource_id: ``str``", "id": "f811:c0:m81"} {"signature": "def list_users(self, depth=):", "body": "response = self._perform_request('' + str(depth))return response", "docstring": "Retrieves a list of all users.\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m82"} {"signature": "def get_user(self, user_id, depth=):", "body": "response = self._perform_request('' % (user_id, str(depth)))return response", "docstring": "Retrieves a single user by ID.\n\n:param user_id: The unique ID of the user.\n:type user_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m83"} {"signature": "def create_user(self, user):", "body": "data = self._create_user_dict(user=user)response = self._perform_request(url='',method='',data=json.dumps(data))return response", "docstring": "Creates a new user.\n\n:param user: The user object to be created.\n:type user: ``dict``", "id": "f811:c0:m84"} {"signature": "def update_user(self, user_id, **kwargs):", "body": "properties = {}for attr, value in kwargs.items():properties[self._underscore_to_camelcase(attr)] = valuedata = {\"\": properties}response = self._perform_request(url='' % user_id,method='',data=json.dumps(data))return response", "docstring": "Updates a user.\n\n:param user_id: The unique ID of the user.\n:type user_id: ``str``", "id": "f811:c0:m85"} {"signature": "def delete_user(self, user_id):", "body": "response = self._perform_request(url='' % user_id,method='')return response", "docstring": "Removes a user.\n\n:param user_id: The unique ID of the user.\n:type user_id: ``str``", "id": "f811:c0:m86"} {"signature": "def list_group_users(self, group_id, depth=):", "body": "response = self._perform_request('' % (group_id, str(depth)))return response", "docstring": "Retrieves a list of all users that are members of a particular group.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m87"} {"signature": "def add_group_user(self, group_id, user_id):", "body": "data = {\"\": user_id}response = self._perform_request(url='' % group_id,method='',data=json.dumps(data))return response", "docstring": "Adds an existing user to a group.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``\n\n:param user_id: The unique ID of the user.\n:type user_id: ``str``", "id": "f811:c0:m88"} {"signature": "def remove_group_user(self, group_id, user_id):", "body": "response = self._perform_request(url='' % (group_id, user_id),method='')return response", "docstring": "Removes a user from a group.\n\n:param group_id: The unique ID of the group.\n:type group_id: ``str``\n\n:param user_id: The unique ID of the user.\n:type user_id: ``str``", "id": "f811:c0:m89"} {"signature": "def list_resources(self, resource_type=None, depth=):", "body": "if resource_type is not None:response = self._perform_request('' % (resource_type, str(depth)))else:response = self._perform_request('' + str(depth))return response", "docstring": "Retrieves a list of all resources.\n\n:param resource_type: The resource type: datacenter, image,\n snapshot or ipblock. Default is None,\n i.e., all resources are listed.\n:type resource_type: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m90"} {"signature": "def get_resource(self, resource_type, resource_id, depth=):", "body": "response = self._perform_request('' % (resource_type, resource_id, str(depth)))return response", "docstring": "Retrieves a single resource of a particular type.\n\n:param resource_type: The resource type: datacenter, image,\n snapshot or ipblock.\n:type resource_type: ``str``\n\n:param resource_id: The unique ID of the resource.\n:type resource_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m91"} {"signature": "def get_volume(self, datacenter_id, volume_id):", "body": "response = self._perform_request('' % (datacenter_id, volume_id))return response", "docstring": "Retrieves a single volume by ID.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param volume_id: The unique ID of the volume.\n:type volume_id: ``str``", "id": "f811:c0:m92"} {"signature": "def list_volumes(self, datacenter_id, depth=):", "body": "response = self._perform_request('' % (datacenter_id, str(depth)))return response", "docstring": "Retrieves a list of volumes in the data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param depth: The depth of the response data.\n:type depth: ``int``", "id": "f811:c0:m93"} {"signature": "def delete_volume(self, datacenter_id, volume_id):", "body": "response = self._perform_request(url='' % (datacenter_id, volume_id), method='')return response", "docstring": "Removes a volume from the data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param volume_id: The unique ID of the volume.\n:type volume_id: ``str``", "id": "f811:c0:m94"} {"signature": "def create_volume(self, datacenter_id, volume):", "body": "data = (json.dumps(self._create_volume_dict(volume)))response = self._perform_request(url='' % datacenter_id,method='',data=data)return response", "docstring": "Creates a volume within the specified data center.\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param volume: A volume dict.\n:type volume: ``dict``", "id": "f811:c0:m95"} {"signature": "def update_volume(self, datacenter_id, volume_id, **kwargs):", "body": "data = {}for attr, value in kwargs.items():data[self._underscore_to_camelcase(attr)] = valueresponse = self._perform_request(url='' % (datacenter_id,volume_id),method='',data=json.dumps(data))return response", "docstring": "Updates a volume\n\n:param datacenter_id: The unique ID of the data center.\n:type datacenter_id: ``str``\n\n:param volume_id: The unique ID of the volume.\n:type volume_id: ``str``", "id": "f811:c0:m96"} {"signature": "def wait_for_completion(self, response, timeout=, initial_wait=, scaleup=):", "body": "if not response:returnlogger = logging.getLogger(__name__)wait_period = initial_waitnext_increase = time.time() + wait_period * scaleupif timeout:timeout = time.time() + timeoutwhile True:request = self.get_request(request_id=response[''], status=True)if request[''][''] == '':breakelif request[''][''] == '':raise PBFailedRequest(''.format(response[''], request['']['']),response[''])current_time = time.time()if timeout and current_time > timeout:raise PBTimeoutError(''.format(response['']), response[''])if current_time > next_increase:wait_period *= next_increase = time.time() + wait_period * scaleupscaleup *= logger.info(\"\",response[''], request[''][''], wait_period)time.sleep(wait_period)", "docstring": "Poll resource request status until resource is provisioned.\n\n:param response: A response dict, which needs to have a 'requestId' item.\n:type response: ``dict``\n\n:param timeout: Maximum waiting time in seconds. None means infinite waiting time.\n:type timeout: ``int``\n\n:param initial_wait: Initial polling interval in seconds.\n:type initial_wait: ``int``\n\n:param scaleup: Double polling interval every scaleup steps, which will be doubled.\n:type scaleup: ``int``", "id": "f811:c0:m97"} {"signature": "@staticmethoddef _b(s, encoding=''):", "body": "if six.PY2:if isinstance(s, str):return selif isinstance(s, unicode): return s.encode(encoding)else:if isinstance(s, bytes):return selif isinstance(s, str):return s.encode(encoding)raise TypeError(\"\" % (s,))", "docstring": "Returns the given string as a string of bytes. That means in\nPython2 as a str object, and in Python3 as a bytes object.\nRaises a TypeError, if it cannot be converted.", "id": "f811:c0:m102"} {"signature": "@staticmethoddef _underscore_to_camelcase(value):", "body": "def camelcase():yield str.lowerwhile True:yield str.capitalizec = camelcase()return \"\".join(next(c)(x) if x else '' for x in value.split(\"\"))", "docstring": "Convert Python snake case back to mixed case.", "id": "f811:c0:m103"} {"signature": "def __init__(self, name=None, location=None, description=None, volumes=None, servers=None, lans=None, loadbalancers=None,**kwargs):", "body": "if volumes is None:volumes = []if servers is None:servers = []if lans is None:lans = []if loadbalancers is None:loadbalancers = []self.name = nameself.description = descriptionself.location = locationself.servers = serversself.volumes = volumesself.lans = lansself.loadbalancers = loadbalancers", "docstring": "The Datacenter class initializer.\n\n:param name: The data center name..\n:type name: ``str``\n\n:param location: The data center geographical location.\n:type location: ``str``\n\n:param description: Optional description.\n:type description: ``str``\n\n:param volumes: List of volume dicts.\n:type volumes: ``list``\n\n:param servers: List of server dicts.\n:type servers: ``list``\n\n:param lans: List of LAN dicts.\n:type lans: ``list``\n\n:param loadbalancers: List of load balancer dicts.\n:type loadbalancers: ``list``", "id": "f811:c1:m0"} {"signature": "def __init__(self, name=None, protocol=None, source_mac=None, source_ip=None,target_ip=None, port_range_start=None,port_range_end=None, icmp_type=None,icmp_code=None, **kwargs):", "body": "self.name = nameself.protocol = protocolself.source_mac = source_macself.source_ip = source_ipself.target_ip = target_ipself.port_range_start = port_range_startself.port_range_end = port_range_endif icmp_type is not None:icmp_type = str(icmp_type)self.icmp_type = icmp_typeif icmp_code is not None:icmp_code = str(icmp_code)self.icmp_code = icmp_code", "docstring": "FirewallRule class initializer.\n\n:param name: The name of the firewall rule.\n:type name: ``str``\n\n:param protocol: Either TCP or UDP\n:type protocol: ``str``\n\n:param source_mac: Source MAC you want to restrict.\n:type source_mac: ``str``\n\n:param source_ip: Source IP you want to restrict.\n:type source_ip: ``str``\n\n:param target_ip: Target IP you want to restrict.\n:type target_ip: ``str``\n\n:param port_range_start: Optional port range.\n:type port_range_start: ``str``\n\n:param port_range_end: Optional port range.\n:type port_range_end: ``str``\n\n:param icmp_type: Defines the allowed type.\n:type icmp_type: ``str``\n\n:param icmp_code: Defines the allowed code.\n:type icmp_code: ``str``", "id": "f811:c2:m0"} {"signature": "def __init__(self, name=None, location=None, size=None):", "body": "self.name = nameself.location = locationself.size = size", "docstring": "IPBlock class initializer.\n\n:param name: The name of the IP block.\n:type name: ``str``\n\n:param location: The location for the IP block.\n:type location: ``str``\n\n:param size: The number of IPs in the block.\n:type size: ``str``", "id": "f811:c3:m0"} {"signature": "def __init__(self, name=None, public=None, nics=None):", "body": "if nics is None:nics = []self.name = nameself.public = publicself.nics = nics", "docstring": "LAN class initializer.\n\n:param name: The name of the LAN.\n:type name: ``str``\n\n:param public: Indicates if the LAN is public.\n:type public: ``bool``\n\n:param nics: A list of NICs\n:type nics: ``list``", "id": "f811:c4:m0"} {"signature": "def __init__(self, name=None, ip=None, dhcp=None, balancednics=None, **kwargs):", "body": "if balancednics is None:balancednics = []self.name = nameself.ip = ipself.dhcp = dhcpself.balancednics = balancednics", "docstring": "LoadBalancer class initializer.\n\n:param name: The name of the load balancer.\n:type name: ``str``\n\n:param ip: The IP for the load balancer.\n:type ip: ``str``\n\n:param dhcp: Indicates if the load balancer\n uses DHCP or not.\n:type dhcp: ``bool``\n\n:param balancednics: A list of NICs associated\n with the load balancer.\n:type balancednics: ``list``", "id": "f811:c5:m0"} {"signature": "def __init__(self, name=None, ips=None, dhcp=None, lan=None, firewall_active=None,firewall_rules=None, nat=None, **kwargs):", "body": "if firewall_rules is None:firewall_rules = []self.name = nameself.nat = natself.ips = ipsself.dhcp = dhcpself.lan = lanself.firewall_active = firewall_activeself.firewall_rules = firewall_rules", "docstring": "NIC class initializer.\n\n:param name: The name of the NIC.\n:type name: ``str``\n\n:param ips: A list of IPs.\n:type ips: ``list``\n\n:param dhcp: Enable or disable DHCP. Default is enabled.\n:type dhcp: ``bool``\n\n:param lan: ID of the LAN in which the NIC should reside.\n:type lan: ``str``\n\n:param nat: Enable or disable NAT. Default is disabled.\n:type nat: ``bool``\n\n:param firewall_active: Turns the firewall on or off;\n default is disabled.\n:type firewall_active: ``bool``\n\n:param firewall_rules: List of firewall rule dicts.\n:type firewall_rules: ``list``", "id": "f811:c6:m0"} {"signature": "def __init__(self, name=None, cores=None, ram=None, availability_zone=None,boot_volume_id=None, boot_cdrom=None, cpu_family=None,create_volumes=None, attach_volumes=None, nics=None):", "body": "if create_volumes is None:create_volumes = []if attach_volumes is None:attach_volumes = []if nics is None:nics = []self.name = nameself.cores = coresself.ram = ramself.availability_zone = availability_zoneself.boot_volume_id = boot_volume_idself.boot_cdrom = boot_cdromself.cpu_family = cpu_familyself.create_volumes = create_volumesself.attach_volumes = attach_volumesself.nics = nics", "docstring": "Server class initializer.\n\n:param name: The name of your server..\n:type name: ``str``\n\n:param cores: The number of cores for the server.\n:type cores: ``str``\n\n:param ram: The amount of memory for the server.\n:type ram: ``str``\n\n:param availability_zone: The availability zone for the server.\n:type availability_zone: ``str``\n\n:param boot_volume_id: The ID of the boot volume.\n:type boot_volume_id: ``str``\n\n:param boot_cdrom: Attach a CDROM.\n:type boot_cdrom: ``str``\n\n:param cpu_family: Set the desired CPU type.\n:type cpu_family: ``str``\n\n:param create_volumes: List of volume dicts to create.\n:type create_volumes: ``list``\n\n:param attach_volumes: List of volume IDs to attach.\n:type attach_volumes: ``list``\n\n:param nics: List of NIC dicts to create.\n:type nics: ``list``", "id": "f811:c7:m0"} {"signature": "def __init__(self, name=None, size=None, bus='', image=None, image_alias=None, disk_type='', licence_type='',image_password=None, ssh_keys=None, availability_zone='',**kwargs):", "body": "if ssh_keys is None:ssh_keys = []self.name = nameself.availability_zone = availability_zoneself.size = sizeself.image = imageself.image_alias = image_aliasself.bus = busself.disk_type = disk_typeself.licence_type = licence_typeself.image_password = image_passwordself.ssh_keys = ssh_keys", "docstring": "Volume class initializer.\n\n:param name: The name of the volume.\n:type name: ``str``\n\n:param size: The size of the volume.\n:type size: ``str``\n\n:param bus: The bus type. Def. VIRTIO.\n:type bus: ``str``\n\n:param image: The image ID to use.\n:type image: ``str``\n\n:param image_alias: An alias of the image to use.\n:type image_alias: ``str``\n\n:param disk_type: The type of storage. Def. HDD\n:type disk_type: ``str``\n\n:param licence_type: The licence type.\n:type licence_type: ``str``\n\n:param ssh_keys: A list of public SSH keys.\n:type ssh_keys: ``list``\n\n:param availability_zone: The availability zone for the server.\n:type availability_zone: ``str``", "id": "f811:c8:m0"} {"signature": "def __init__(self, name=None, description=None, licence_type='', size=None, location=None, **kwargs):", "body": "self.name = nameself.description = descriptionself.size = int(size)self.licence_type = licence_typeself.location = location", "docstring": "Snapshot class initializer.\n\n:param name: The name of the snapshot.\n:type name: ``str``\n\n:param name: The description of the snapshot.\n:type name: ``str``\n\n:param size: The size of the snapshot.\n:type size: ``str``\n\n:param licence_type: The licence type.\n:type licence_type: ``str``", "id": "f811:c9:m0"} {"signature": "def __init__(self, name=None, create_datacenter=None,create_snapshot=None, reserve_ip=None,access_activity_log=None):", "body": "self.name = nameself.create_datacenter = create_datacenterself.create_snapshot = create_snapshotself.reserve_ip = reserve_ipself.access_activity_log = access_activity_log", "docstring": "Group class initializer.\n\n:param name: The name of the group.\n:type name: ``str``\n\n:param create_datacenter: Indicates if the group is allowed\n to create virtual data centers.\n:type create_datacenter: ``bool``\n\n:param create_snapshot: Indicates if the group is allowed\n to create snapshots.\n:type create_snapshot: ``bool``\n\n:param reserve_ip: Indicates if the group is allowed\n to reserve IP addresses.\n:type reserve_ip: ``bool``\n\n:param access_activity_log: Indicates if the group is allowed\n to access activity log.\n:type access_activity_log: ``bool``", "id": "f811:c10:m0"} {"signature": "def __init__(self, firstname=None, lastname=None,email=None, password=None,administrator=None,force_sec_auth=None):", "body": "self.firstname = firstnameself.lastname = lastnameself.email = emailself.password = passwordself.administrator = administratorself.force_sec_auth = force_sec_auth", "docstring": "User class initializer.\n\n:param firstname: The user's first name.\n:type firstname: ``str``\n\n:param lastname: The user's last name.\n:type lastname: ``str``\n\n:param email: The user's email.\n:type email: ``str``\n\n:param password: A password for the user.\n:type password: ``str``\n\n:param administrator: Indicates if the user have\n administrative rights.\n:type administrator: ``bool``\n\n:param force_sec_auth: Indicates if secure (two-factor)\n authentication should be forced\n for the user.\n:type force_sec_auth: ``bool``", "id": "f811:c11:m0"} {"signature": "def ask(question, options, default):", "body": "assert default in optionsquestion += \"\".format(\"\".join(o.upper() if o == default else o for o in options))selected = Nonewhile selected not in options:selected = input(question).strip().lower()if selected == \"\":selected = defaultelse:if selected not in options:question = \"\".format(\"\".join(options[:-]), options[-],comma='' if len(options) > else '',)return selected", "docstring": "Ask the user a question with a list of allowed answers (like yes or no).\n\nThe user is presented with a question and asked to select an answer from\nthe given options list. The default will be returned if the user enters\nnothing. The user is asked to repeat his answer if his answer does not\nmatch any of the allowed anwsers.\n\n:param question: Question to present to the user (without question mark)\n:type question: ``str``\n\n:param options: List of allowed anwsers\n:type options: ``list``\n\n:param default: Default answer (if the user enters no text)\n:type default: ``str``", "id": "f813:m0"} {"signature": "def find_item_by_name(list_, namegetter, name):", "body": "matching_items = [i for i in list_ if namegetter(i) == name]if not matching_items:prog = re.compile(re.escape(name) + '', re.IGNORECASE)matching_items = [i for i in list_ if prog.match(namegetter(i))]if not matching_items:prog = re.compile(re.escape(name))matching_items = [i for i in list_ if prog.match(namegetter(i))]if not matching_items:prog = re.compile(re.escape(name), re.IGNORECASE)matching_items = [i for i in list_ if prog.match(namegetter(i))]if not matching_items:prog = re.compile(re.escape(name))matching_items = [i for i in list_ if prog.search(namegetter(i))]if not matching_items:prog = re.compile(re.escape(name), re.IGNORECASE)matching_items = [i for i in list_ if prog.search(namegetter(i))]return matching_items", "docstring": "Find a item a given list by a matching name.\n\nThe search for the name is done in this relaxing way:\n\n- exact name match\n- case-insentive name match\n- attribute starts with the name\n- attribute starts with the name (case insensitive)\n- name appears in the attribute\n- name appears in the attribute (case insensitive)\n\n:param list_: A list of elements\n:type list_: ``list``\n\n:param namegetter: Function that returns the name for a given\n element in the list\n:type namegetter: ``function``\n\n:param name: Name to search for\n:type name: ``str``", "id": "f813:m1"} {"signature": "def getLogin(filename, user, passwd):", "body": "if filename is None:return (user, passwd)isPy2 = sys.version_info[] == if os.path.exists(filename):print(\"\".format(filename))with open(filename, \"\") as loginfile:encoded_cred = loginfile.read()print(\"\".format(encoded_cred))if isPy2:decoded_cred = b64decode(encoded_cred)else:decoded_cred = b64decode(encoded_cred).decode('')login = decoded_cred.split('', )return (login[], login[])else:if user is None or passwd is None:raise ValueError(\"\")print(\"\".format(filename))with open(filename, \"\") as loginfile:creds = user+\"\"+passwdif isPy2:encoded_cred = b64encode(creds)else:encoded_cred = b64encode(creds.encode(''))print(\"\".format(encoded_cred))loginfile.write(encoded_cred)return (user, passwd)", "docstring": "write user/passwd to login file or get them from file.\nThis method is not Py3 safe (byte vs. str)", "id": "f814:m0"} {"signature": "def wait_for_request(pbclient, request_id,timeout=, initial_wait=, scaleup=):", "body": "total_wait = wait_period = initial_waitnext_scaleup = scaleup * wait_periodwait = Truewhile wait:request_status = pbclient.get_request(request_id, status=True)state = request_status['']['']if state == \"\":return(, state, request_status[''][''])if state == '':return(, state, request_status[''][''])print(\"\".format(request_id, state, wait_period))sleep(wait_period)total_wait += wait_periodif timeout != and total_wait > timeout:wait = Falsenext_scaleup -= wait_periodif next_scaleup == :wait_period += initial_waitnext_scaleup = scaleup * wait_periodprint(\"\".format(wait_period, next_scaleup))return (-, state, \"\")", "docstring": "Waits for a request to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a tuple (return code, request status, message) where return code\n0 : request successful\n1 : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f814:m1"} {"signature": "def wait_for_requests(pbclient, request_ids=None,timeout=, initial_wait=, scaleup=):", "body": "done = dict()if not request_ids:print(\"\")return donetotal_wait = wait_period = initial_waitnext_scaleup = scaleup * wait_periodwait = Truewhile wait:for request_id in request_ids:if request_id in done:continuerequest_status = pbclient.get_request(request_id, status=True)state = request_status['']['']if state == \"\":done[request_id] = (, state, request_status[''][''])print(\"\".format(request_id, state))if state == '':done[request_id] = (, state, request_status[''][''])print(\"\".format(request_id, state))if len(done) == len(request_ids):wait = Falseelse:print(\"\".format(len(done), len(request_ids), wait_period))sleep(wait_period)total_wait += wait_periodif timeout != and total_wait > timeout:wait = Falsenext_scaleup -= wait_periodif next_scaleup == :wait_period += initial_waitnext_scaleup = scaleup * wait_periodprint(\"\".format(wait_period, next_scaleup))if len(done) != len(request_ids):for request_id in request_ids:if request_id in done:continuedone[request_id] = (-, state, \"\")return done", "docstring": "Waits for a list of requests to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a dict of request_id -> result.\nresult is a tuple (return code, request status, message) where return code\n0 : request successful\n1 : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f814:m2"} {"signature": "def create_datacenter_dict(pbclient, datacenter):", "body": "server_items = []volume_items = []lan_items = []loadbalancer_items = []entities = dict()properties = {\"\": datacenter.name,\"\": datacenter.location,}if datacenter.description:properties[''] = datacenter.descriptionif datacenter.servers:for server in datacenter.servers:server_items.append(pbclient._create_server_dict(server))servers = {\"\": server_items}server_entities = {\"\": servers}entities.update(server_entities)if datacenter.volumes:for volume in datacenter.volumes:volume_items.append(pbclient._create_volume_dict(volume))volumes = {\"\": volume_items}volume_entities = {\"\": volumes}entities.update(volume_entities)if datacenter.loadbalancers:for loadbalancer in datacenter.loadbalancers:loadbalancer_items.append(pbclient._create_loadbalancer_dict(loadbalancer))loadbalancers = {\"\": loadbalancer_items}loadbalancer_entities = {\"\": loadbalancers}entities.update(loadbalancer_entities)if datacenter.lans:for lan in datacenter.lans:lan_items.append(pbclient._create_lan_dict(lan))lans = {\"\": lan_items}lan_entities = {\"\": lans}entities.update(lan_entities)if not entities:raw = {\"\": properties,}else:raw = {\"\": properties,\"\": entities}return raw", "docstring": "Creates a Datacenter dict -- both simple and complex are supported.\nThis is copied from createDatacenter() and uses private methods.", "id": "f814:m3"} {"signature": "def main(argv=None):", "body": "if argv is None:argv = sys.argvelse:sys.argv.extend(argv)program_name = os.path.basename(sys.argv[])program_version = \"\" % __version__program_build_date = str(__updated__)program_version_message = '' % (program_version,program_build_date)program_shortdesc = __import__('').__doc__.split(\"\")[]program_license =", "docstring": "Parse command line options and create a server/volume composite.", "id": "f814:m12"} {"signature": "def getLogin(filename, user, passwd):", "body": "if filename is None:return (user, passwd)isPy2 = sys.version_info[] == if os.path.exists(filename):print(\"\".format(filename))with open(filename, \"\") as loginfile:encoded_cred = loginfile.read()print(\"\".format(encoded_cred))if isPy2:decoded_cred = b64decode(encoded_cred)else:decoded_cred = b64decode(encoded_cred).decode('')login = decoded_cred.split('', )return (login[], login[])else:if user is None or passwd is None:raise ValueError(\"\")print(\"\".format(filename))with open(filename, \"\") as loginfile:creds = user+\"\"+passwdif isPy2:encoded_cred = b64encode(creds)else:encoded_cred = b64encode(creds.encode(''))print(\"\".format(encoded_cred))loginfile.write(encoded_cred)return (user, passwd)", "docstring": "write user/passwd to login file or get them from file.\nThis method is not Py3 safe (byte vs. str)", "id": "f816:m0"} {"signature": "def wait_for_request(pbclient, request_id,timeout=, initial_wait=, scaleup=):", "body": "total_wait = wait_period = initial_waitnext_scaleup = scaleup * wait_periodwait = Truewhile wait:request_status = pbclient.get_request(request_id, status=True)state = request_status['']['']if state == \"\":return(, state, request_status[''][''])if state == '':return(, state, request_status[''][''])print(\"\".format(request_id, state, wait_period))sleep(wait_period)total_wait += wait_periodif timeout != and total_wait > timeout:wait = Falsenext_scaleup -= wait_periodif next_scaleup == :wait_period += initial_waitnext_scaleup = scaleup * wait_periodprint(\"\".format(wait_period, next_scaleup))return(-, state, \"\")", "docstring": "Waits for a request to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a tuple (return code, request status, message) where return code\n0 : request successful\n1 : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f816:m1"} {"signature": "def wait_for_requests(pbclient, request_ids=None,timeout=, initial_wait=, scaleup=):", "body": "done = dict()if not request_ids:print(\"\")return donetotal_wait = wait_period = initial_waitnext_scaleup = scaleup * wait_periodwait = Truewhile wait:for request_id in request_ids:if request_id in done:continuerequest_status = pbclient.get_request(request_id, status=True)state = request_status['']['']if state == \"\":done[request_id] = (, state, request_status[''][''])print(\"\".format(request_id, state))if state == '':done[request_id] = (, state, request_status[''][''])print(\"\".format(request_id, state))if len(done) == len(request_ids):wait = Falseelse:print(\"\".format(len(done), len(request_ids), wait_period))sleep(wait_period)total_wait += wait_periodif timeout != and total_wait > timeout:wait = Falsenext_scaleup -= wait_periodif next_scaleup == :wait_period += initial_waitnext_scaleup = scaleup * wait_periodprint(\"\".format(wait_period, next_scaleup))if len(done) != len(request_ids):for request_id in request_ids:if request_id in done:continuedone[request_id] = (-, state, \"\")return done", "docstring": "Waits for a list of requests to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a dict of request_id -> result.\nresult is a tuple (return code, request status, message) where return code\n0 : request successful\n1 : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f816:m2"} {"signature": "def get_disk_image_by_name(pbclient, location, image_name):", "body": "all_images = pbclient.list_images()matching = [i for i in all_images[''] ifi[''][''] == image_name andi[''][''] == \"\" andi[''][''] == location]return matching", "docstring": "Returns all disk images within a location with a given image name.\nThe name must match exactly.\nThe list may be empty.", "id": "f816:m3"} {"signature": "def main(argv=None):", "body": "if argv is None:argv = sys.argvelse:sys.argv.extend(argv)program_name = os.path.basename(sys.argv[])program_version = \"\" % __version__program_build_date = str(__updated__)program_version_message = '' % (program_version,program_build_date)program_shortdesc = __import__('').__doc__.split(\"\")[]program_license =", "docstring": "Command line options.", "id": "f816:m4"} {"signature": "def __init__(self, file=None):", "body": "self._ns = {'': \"\",'': \"\",'': \"\"\"\",'': \"\",'': \"\"\"\"}self.file = fileself.root = Noneself.name = Noneself.osid = Noneself.licenseType = \"\"self.cpus = Noneself.ram = Noneself.disks = []self.lans = dict()self.nics = []self.resourceTypes = {'': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': ''} self.osTypeOther = {'': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '',} self.osTypeLinux = {'': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '',} self.osTypeWindows = {'': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': ''}", "docstring": "OVF meta data initializer", "id": "f816:c1:m0"} {"signature": "def _nsattr(self, attr, ns=None):", "body": "if ns is None:return attrreturn '' + self._ns[ns] + '' + attr", "docstring": "returns an attribute name w/ namespace prefix", "id": "f816:c1:m2"} {"signature": "def pp(value):", "body": "pretty_printer = pprint.PrettyPrinter(indent=)return pretty_printer.pformat(value)", "docstring": "Returns a pretty print string of the given value.\n\n@return: pretty print string\n@rtype: str", "id": "f819:m0"} {"signature": "def get_dc_inventory(pbclient, dc=None):", "body": "if pbclient is None:raise ValueError(\"\")if dc is None:raise ValueError(\"\")dc_inv = [] dcid = dc['']dc_data = [dcid, dc[''][''], dc['']['']]servers = pbclient.list_servers(dcid, )print(\"\" % (len(servers['']), dc['']['']))if verbose > :print(str(servers))bound_vols = dict() for server in servers['']:if verbose > :print(\"\" % str(server))serverid = server['']server_data = [server[''], serverid, server[''][''],server['']['']]bootOS = \"\"bootdev = server['']['']if bootdev is None:bootdev = server['']['']print(\"\" % (serverid, \"\"))if bootdev is None:print(\"\" % (serverid))else:bootOS = bootdev['']['']server_data += [bootOS, server[''][''], server['']['']]server_vols = server['']['']['']n_volumes = len(server_vols)total_disk = licence_type = \"\"for vol in server_vols:total_disk += vol['']['']licence_type = str(vol[''][''])bound_vols[vol['']] = serveridif verbose:print(\"\" % (vol[''], bound_vols[vol['']], licence_type))server_nics = server['']['']['']n_nics = len(server_nics)server_data += [n_nics, n_volumes, total_disk, \"\",server[''][''], server['']['']]dc_inv.append(dc_data + server_data)volumes = pbclient.list_volumes(dcid, ) for volume in volumes['']:if verbose > :print(\"\" % str(volume))volid = volume['']vol_data = [volume[''], volid, volume[''][''], volume[''][''],volume[''][''], \"\", \"\", \"\", \"\", volume['']['']]connect = ''if volid in bound_vols:connect = bound_vols[volid]vol_data += [connect, volume[''][''], volume['']['']]dc_inv.append(dc_data + vol_data)return dc_inv", "docstring": "gets inventory of one data center", "id": "f819:m1"} {"signature": "def get_dc_network(pbclient, dc=None):", "body": "if pbclient is None:raise ValueError(\"\")if dc is None:raise ValueError(\"\")print(\"\")dcid = dc['']dc_data = [dcid, dc[''][''], dc['']['']]lbs = pbclient.list_loadbalancers(dcid, )lbnames = dict([(lb[''], lb['']['']) for lb in lbs['']])if verbose > :print(\"\" % (str(lbs)))lans = pbclient.list_lans(dcid, )lan_inv = []servernames = dict()for lan in lans['']:if verbose > :print(\"\" % str(lan))lan_data = dc_data + [\"\"+lan[''], lan[''][''], lan[''][''],lan['']['']]nics = lan['']['']['']lan_data.append(len(nics))if nics:for nic in nics:nic_props = nic['']serverid = re.sub(r'', r'', nic[''])if serverid in lbnames:servertype = \"\"servername = lbnames[serverid]print(\"\" % (serverid, servername))else:servertype = \"\"if serverid not in servernames:if verbose:print(\"\" % serverid)server = pbclient.get_server(dcid, serverid, )servernames[serverid] = server['']['']servername = servernames[serverid]ips = [str(ip) for ip in nic_props['']]nic_data = [nic[''], nic_props[''], nic_props[''], ips, nic_props[''],nic_props[''], servertype, serverid, servername]lan_inv.append(lan_data+nic_data)else:lan_inv.append(lan_data)return lan_inv", "docstring": "gets inventory of one data center", "id": "f819:m5"} {"signature": "def main(argv=None): ", "body": "if argv is None:argv = sys.argvelse:sys.argv.extend(argv)program_name = os.path.basename(sys.argv[])program_version = \"\" % __version__program_build_date = str(__updated__)program_version_message = '' % (program_version, program_build_date)program_shortdesc = __import__('').__doc__.split(\"\")[]program_license =", "docstring": "Command line options.", "id": "f819:m7"} {"signature": "def getLogin(filename, user, passwd):", "body": "if filename is None:return (user, passwd)if os.path.exists(filename):print(\"\".format(filename))with open(filename, \"\") as loginfile:encoded_cred = loginfile.read()print(\"\".format(encoded_cred))decoded_cred = b64decode(encoded_cred)login = decoded_cred.split('', )return (login[], login[])else:if user is None or passwd is None:raise ValueError(\"\")print(\"\".format(filename))with open(filename, \"\") as loginfile:encoded_cred = b64encode(user+\"\"+passwd)print(\"\".format(encoded_cred))loginfile.write(encoded_cred)return (user, passwd)", "docstring": "write user/passwd to login file or get them from file.\nThis method is not Py3 safe (byte vs. str)", "id": "f820:m0"} {"signature": "def getServerInfo(pbclient=None, dc_id=None):", "body": "if pbclient is None:raise ValueError(\"\")if dc_id is None:raise ValueError(\"\")server_info = []servers = pbclient.list_servers(dc_id, )for server in servers['']:props = server['']info = dict(id=server[''], name=props[''],state=server[''][''],vmstate=props[''])server_info.append(info)return server_info", "docstring": "gets info of servers of a data center", "id": "f820:m1"} {"signature": "def getServerStates(pbclient=None, dc_id=None, serverid=None, servername=None):", "body": "if pbclient is None:raise ValueError(\"\")if dc_id is None:raise ValueError(\"\")server = Noneif serverid is None:if servername is None:raise ValueError(\"\")server_info = select_where(getServerInfo(pbclient, dc_id),['', '', '', ''],name=servername)if len(server_info) > :raise NameError(\"\".format(servername))if len(server_info) == :server = server_info[]else:try:server_info = pbclient.get_server(dc_id, serverid, )server = dict(id=server_info[''],name=server_info[''][''],state=server_info[''][''],vmstate=server_info[''][''])except Exception:ex = sys.exc_info()[]if ex.args[] is not None and ex.args[] == :print(\"\".format(serverid))server = Noneelse:raise exreturn server", "docstring": "gets states of a server", "id": "f820:m3"} {"signature": "def wait_for_server(pbclient=None, dc_id=None, serverid=None,indicator='', state='', timeout=):", "body": "if pbclient is None:raise ValueError(\"\")if dc_id is None:raise ValueError(\"\")if serverid is None:raise ValueError(\"\")total_sleep_time = seconds = while total_sleep_time < timeout:time.sleep(seconds)total_sleep_time += secondsif total_sleep_time == :seconds = elif total_sleep_time == :seconds = server = getServerStates(pbclient, dc_id, serverid)if server[indicator] == state:breakreturn server", "docstring": "wait for a server/VM to reach a defined state for a specified time\nindicator := {state|vmstate} specifies if server or VM stat is tested\nstate specifies the status the indicator should have", "id": "f820:m4"} {"signature": "def wait_for_datacenter(client, data_center_id):", "body": "total_sleep_time = seconds = while True:state = client.get_datacenter(data_center_id)['']['']if verbose:print(\"\".format(state))if state == \"\":breaktime.sleep(seconds)total_sleep_time += secondsif total_sleep_time == :seconds = elif total_sleep_time == :seconds = ", "docstring": "Poll the data center to become available (for the next provisionig job)", "id": "f820:m5"} {"signature": "def main(argv=None):", "body": "if argv is None:argv = sys.argvelse:sys.argv.extend(argv)program_name = os.path.basename(sys.argv[])program_version = \"\" % __version__program_build_date = str(__updated__)program_version_message = '' % (program_version,program_build_date)program_shortdesc = __import__('').__doc__.split(\"\")[]program_license =", "docstring": "Command line options.", "id": "f820:m6"} {"signature": "def getLogin(filename, user, passwd):", "body": "if filename is None:return (user, passwd)isPy2 = sys.version_info[] == if os.path.exists(filename):print(\"\".format(filename))with open(filename, \"\") as loginfile:encoded_cred = loginfile.read()print(\"\".format(encoded_cred))if isPy2:decoded_cred = b64decode(encoded_cred)else:decoded_cred = b64decode(encoded_cred).decode('')login = decoded_cred.split('', )return (login[], login[])else:if user is None or passwd is None:raise ValueError(\"\")print(\"\".format(filename))with open(filename, \"\") as loginfile:creds = user+\"\"+passwdif isPy2:encoded_cred = b64encode(creds)else:encoded_cred = b64encode(creds.encode(''))print(\"\".format(encoded_cred))loginfile.write(encoded_cred)return (user, passwd)", "docstring": "write user/passwd to login file or get them from file.\nThis method is not Py3 safe (byte vs. str)", "id": "f822:m0"} {"signature": "def wait_for_request(pbclient, request_id,timeout=, initial_wait=, scaleup=):", "body": "total_wait = wait_period = initial_waitnext_scaleup = scaleup * wait_periodwait = Truewhile wait:request_status = pbclient.get_request(request_id, status=True)state = request_status['']['']if state == \"\":return(, state, request_status[''][''])if state == '':return(, state, request_status[''][''])print(\"\".format(request_id, state, wait_period))sleep(wait_period)total_wait += wait_periodif timeout != and total_wait > timeout:wait = Falsenext_scaleup -= wait_periodif next_scaleup == :wait_period += initial_waitnext_scaleup = scaleup * wait_periodprint(\"\".format(wait_period, next_scaleup))return(-, state, \"\")", "docstring": "Waits for a request to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a tuple (return code, request status, message) where return code\n0 : request successful\n1 : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f822:m3"} {"signature": "def wait_for_requests(pbclient, request_ids=None,timeout=, initial_wait=, scaleup=):", "body": "done = dict()if not request_ids:print(\"\")return donetotal_wait = wait_period = initial_waitnext_scaleup = scaleup * wait_periodwait = Truewhile wait:for request_id in request_ids:if request_id in done:continuerequest_status = pbclient.get_request(request_id, status=True)state = request_status['']['']if state == \"\":done[request_id] = (, state, request_status[''][''])print(\"\".format(request_id, state))if state == '':done[request_id] = (, state, request_status[''][''])print(\"\".format(request_id, state))if len(done) == len(request_ids):wait = Falseelse:print(\"\".format(len(done), len(request_ids), wait_period))sleep(wait_period)total_wait += wait_periodif timeout != and total_wait > timeout:wait = Falsenext_scaleup -= wait_periodif next_scaleup == :wait_period += initial_waitnext_scaleup = scaleup * wait_periodprint(\"\".format(wait_period, next_scaleup))if len(done) != len(request_ids):for request_id in request_ids:if request_id in done:continuedone[request_id] = (-, state, \"\")return done", "docstring": "Waits for a list of requests to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a dict of request_id -> result.\nresult is a tuple (return code, request status, message) where return code\n0 : request successful\n1 : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f822:m4"} {"signature": "def getServerInfo(pbclient=None, dc_id=None):", "body": "if pbclient is None:raise ValueError(\"\")if dc_id is None:raise ValueError(\"\")server_info = []servers = pbclient.list_servers(dc_id, )for server in servers['']:props = server['']info = dict(id=server[''], name=props[''],state=server[''][''],vmstate=props[''])server_info.append(info)return server_info", "docstring": "gets info of servers of a data center", "id": "f822:m5"} {"signature": "def getServerStates(pbclient=None, dc_id=None, serverid=None, servername=None):", "body": "if pbclient is None:raise ValueError(\"\")if dc_id is None:raise ValueError(\"\")server = Noneif serverid is None:if servername is None:raise ValueError(\"\")server_info = select_where(getServerInfo(pbclient, dc_id),['', '', '', ''],name=servername)if len(server_info) > :raise NameError(\"\".format(servername))if len(server_info) == :server = server_info[]else:try:server_info = pbclient.get_server(dc_id, serverid, )server = dict(id=server_info[''],name=server_info[''][''],state=server_info[''][''],vmstate=server_info[''][''])except Exception:ex = sys.exc_info()[]if ex.args[] is not None and ex.args[] == :print(\"\".format(serverid))server = Noneelse:raise exreturn server", "docstring": "gets states of a server", "id": "f822:m7"} {"signature": "def wait_for_server(pbclient=None, dc_id=None, serverid=None,indicator='', state='', timeout=):", "body": "if pbclient is None:raise ValueError(\"\")if dc_id is None:raise ValueError(\"\")if serverid is None:raise ValueError(\"\")total_sleep_time = seconds = while total_sleep_time < timeout:sleep(seconds)total_sleep_time += secondsif total_sleep_time == :seconds = elif total_sleep_time == :seconds = server = getServerStates(pbclient, dc_id, serverid)if server[indicator] == state:breakreturn server", "docstring": "wait for a server/VM to reach a defined state for a specified time\nindicator := {state|vmstate} specifies if server or VM stat is tested\nstate specifies the status the indicator should have", "id": "f822:m8"} {"signature": "def main(argv=None):", "body": "if argv is None:argv = sys.argvelse:sys.argv.extend(argv)program_name = os.path.basename(sys.argv[])program_version = \"\" % __version__program_build_date = str(__updated__)program_version_message = '' % (program_version, program_build_date)program_shortdesc = __import__('').__doc__.split(\"\")[]program_license =", "docstring": "Parse command line options and dump a datacenter to snapshots and file.", "id": "f822:m10"} {"signature": "def getLogin(filename, user, passwd):", "body": "if filename is None:return (user, passwd)if os.path.exists(filename):print(\"\".format(filename))with open(filename, \"\") as loginfile:encoded_cred = loginfile.read()print(\"\".format(encoded_cred))decoded_cred = b64decode(encoded_cred)login = decoded_cred.split('', )return (login[], login[])else:if user is None or passwd is None:raise ValueError(\"\")print(\"\".format(filename))with open(filename, \"\") as loginfile:encoded_cred = b64encode(user+\"\"+passwd)print(\"\".format(encoded_cred))loginfile.write(encoded_cred)return (user, passwd)", "docstring": "write user/passwd to login file or get them from file.\nThis method is not Py3 safe (byte vs. str)", "id": "f827:m0"} {"signature": "def getServerInfo(pbclient=None, dc_id=None):", "body": "if pbclient is None:raise ValueError(\"\")if dc_id is None:raise ValueError(\"\")server_info = []servers = pbclient.list_servers(dc_id, )for server in servers['']:props = server['']info = dict(id=server[''], name=props[''],state=server[''][''],vmstate=props[''])server_info.append(info)return server_info", "docstring": "gets info of servers of a data center", "id": "f827:m1"} {"signature": "def getServerStates(pbclient=None, dc_id=None, serverid=None, servername=None):", "body": "if pbclient is None:raise ValueError(\"\")if dc_id is None:raise ValueError(\"\")server = Noneif serverid is None:if servername is None:raise ValueError(\"\")server_info = select_where(getServerInfo(pbclient, dc_id),['', '', '', ''],name=servername)if len(server_info) > :raise NameError(\"\".format(servername))if len(server_info) == :server = server_info[]else:try:server_info = pbclient.get_server(dc_id, serverid, )server = dict(id=server_info[''],name=server_info[''][''],state=server_info[''][''],vmstate=server_info[''][''])except Exception:ex = sys.exc_info()[]if ex.args[] is not None and ex.args[] == :print(\"\".format(serverid))server = Noneelse:raise exreturn server", "docstring": "gets states of a server", "id": "f827:m3"} {"signature": "def wait_for_server(pbclient=None, dc_id=None, serverid=None,indicator='', state='', timeout=):", "body": "if pbclient is None:raise ValueError(\"\")if dc_id is None:raise ValueError(\"\")if serverid is None:raise ValueError(\"\")total_sleep_time = seconds = while total_sleep_time < timeout:time.sleep(seconds)total_sleep_time += secondsif total_sleep_time == :seconds = elif total_sleep_time == :seconds = server = getServerStates(pbclient, dc_id, serverid)if server[indicator] == state:breakreturn server", "docstring": "wait for a server/VM to reach a defined state for a specified time\nindicator := {state|vmstate} specifies if server or VM stat is tested\nstate specifies the status the indicator should have", "id": "f827:m4"} {"signature": "def main(argv=None):", "body": "if argv is None:argv = sys.argvelse:sys.argv.extend(argv)program_name = os.path.basename(sys.argv[])program_version = \"\" % __version__program_build_date = str(__updated__)program_version_message = '' % (program_version,program_build_date)program_shortdesc = __import__('').__doc__.split(\"\")[]program_license =", "docstring": "Command line options.", "id": "f827:m5"} {"signature": "def getLogin(filename, user, passwd):", "body": "if filename is None:return (user, passwd)if os.path.exists(filename):print(\"\".format(filename))with open(filename, \"\") as loginfile:encoded_cred = loginfile.read()decoded_cred = b64decode(encoded_cred)login = decoded_cred.split('', )return (login[], login[])else:if user is None or passwd is None:raise ValueError(\"\")print(\"\".format(filename))with open(filename, \"\") as loginfile:encoded_cred = b64encode(user+\"\"+passwd)loginfile.write(encoded_cred)return (user, passwd)", "docstring": "write user/passwd to login file or get them from file.\nThis method is not Py3 safe (byte vs. str)", "id": "f828:m0"} {"signature": "def wait_for_request(pbclient, request_id,timeout=, initial_wait=, scaleup=):", "body": "total_wait = wait_period = initial_waitnext_scaleup = scaleup * wait_periodwait = Truewhile wait:request_status = pbclient.get_request(request_id, status=True)state = request_status['']['']if state == \"\":return(, state, request_status[''][''])if state == '':return(, state, request_status[''][''])if verbose > :print(\"\".format(request_id, state, wait_period))sleep(wait_period)total_wait += wait_periodif timeout != and total_wait > timeout:wait = Falsenext_scaleup -= wait_periodif next_scaleup == :wait_period += initial_waitnext_scaleup = scaleup * wait_periodif verbose > :print(\"\".format(wait_period, next_scaleup))return(-, state, \"\")", "docstring": "Waits for a request to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a tuple (return code, request status, message) where return code\n0 : request successful\n1 : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f828:m1"} {"signature": "def main(argv=None):", "body": "if argv is None:argv = sys.argvelse:sys.argv.extend(argv)program_name = os.path.basename(sys.argv[])program_version = \"\" % __version__program_build_date = str(__updated__)program_version_message = '' % (program_version,program_build_date)program_shortdesc = __import__('').__doc__.split(\"\")[]program_license =", "docstring": "Parse command line options and create a server/volume composite.", "id": "f828:m2"} {"signature": "def __init__(self, ip='', port=):", "body": "self._port = portself._ip = ipself._socket = Noneself._bulbs = []self._lock = threading.Lock()self._last_updated = datetime.datetime.now() - datetime.timedelta(seconds=)self.connect()", "docstring": "Create a hub with given IP and port, establishing socket.", "id": "f831:c0:m0"} {"signature": "def connect(self):", "body": "try:self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)self._socket.settimeout(TIMEOUT_SECONDS)self._socket.connect((self._ip, self._port))_LOGGER.debug(\"\", self._ip,self._port)except socket.error as error:_LOGGER.error(\"\", error)self._socket.close()", "docstring": "Create and connect to socket for TCP communication with hub.", "id": "f831:c0:m1"} {"signature": "@propertydef available(self):", "body": "response = self.send_command(\"\")return \"\" in response", "docstring": "Check if hub is responsive.", "id": "f831:c0:m2"} {"signature": "def send_command(self, command):", "body": "with self._lock:try:self._socket.send(command.encode(\"\"))result = self.receive()while result.startswith(\"\") or result.startswith(\"\"):_LOGGER.debug(\"\", result)result = self.receive()_LOGGER.debug(\"\", result)return resultexcept socket.error as error:_LOGGER.error(\"\", error)self.connect()return \"\"", "docstring": "Send TCP command to hub and return response.", "id": "f831:c0:m3"} {"signature": "def receive(self):", "body": "try:buffer = self._socket.recv(BUFFER_SIZE)except socket.timeout as error:_LOGGER.error(\"\", error)return \"\"buffering = Trueresponse = ''while buffering:if '' in buffer.decode(\"\"):response = buffer.decode(\"\").split('')[]buffering = Falseelse:try:more = self._socket.recv(BUFFER_SIZE)except socket.timeout:more = Noneif not more:buffering = Falseresponse = buffer.decode(\"\")else:buffer += morereturn response", "docstring": "Receive TCP response, looping to get whole thing or timeout.", "id": "f831:c0:m4"} {"signature": "def get_data(self):", "body": "response = self.send_command(GET_LIGHTS_COMMAND)_LOGGER.debug(\"\", repr(response))if not response:_LOGGER.debug(\"\", response)return {}response = response.strip()if not (response.startswith(\"\") and response.endswith(\"\")):_LOGGER.debug(\"\", repr(response))return {}response = response[:-] light_strings = response.split('')light_data_by_id = {}for light_string in light_strings:values = light_string.split('')try:light_data_by_id[values[]] = [int(values[]), int(values[]),int(values[]), int(values[]),int(values[])]except ValueError as error:_LOGGER.error(\"\", error, values, response)except IndexError as error:_LOGGER.error(\"\", error, values, response)return light_data_by_id", "docstring": "Get current light data as dictionary with light zids as keys.", "id": "f831:c0:m5"} {"signature": "def get_lights(self):", "body": "now = datetime.datetime.now()if (now - self._last_updated) < datetime.timedelta(seconds=UPDATE_INTERVAL_SECONDS):return self._bulbselse:self._last_updated = nowlight_data = self.get_data()_LOGGER.debug(\"\", light_data)if not light_data:return []if self._bulbs:for bulb in self._bulbs:try:values = light_data[bulb.zid]bulb._online, bulb._red, bulb._green, bulb._blue,bulb._level = valuesexcept KeyError:passelse:for light_id in light_data:self._bulbs.append(Bulb(self, light_id, *light_data[light_id]))return self._bulbs", "docstring": "Get current light data, set and return as list of Bulb objects.", "id": "f831:c0:m6"} {"signature": "def __init__(self, hub, zid, online, red, green, blue, level):", "body": "self._hub = hubself._zid = zidself._online = online == self._red = int(red)self._green = int(green)self._blue = int(blue)self._level = int(level)", "docstring": "Construct a Bulb (light) based on current values.", "id": "f831:c1:m0"} {"signature": "@propertydef brightness(self):", "body": "self.update()return self._level", "docstring": "Return the brightness level.", "id": "f831:c1:m1"} {"signature": "@propertydef rgb_color(self):", "body": "self.update()return [self._red, self._green, self._blue]", "docstring": "Return the color property as list of [R, G, B], each 0-255.", "id": "f831:c1:m2"} {"signature": "@propertydef zid(self):", "body": "return self._zid", "docstring": "Return the bulb ID.", "id": "f831:c1:m3"} {"signature": "@propertydef available(self):", "body": "self.update()return self._online", "docstring": "Return True if this bulb is online in the current list of bulbs.", "id": "f831:c1:m4"} {"signature": "@propertydef is_on(self):", "body": "self.update()return self._level > ", "docstring": "Determine if bulb is on (brightness not zero).", "id": "f831:c1:m5"} {"signature": "def turn_on(self):", "body": "command = \"\".format(self._zid)response = self._hub.send_command(command)_LOGGER.debug(\"\", repr(command), response)return response", "docstring": "Turn bulb on (full brightness).", "id": "f831:c1:m6"} {"signature": "def turn_off(self):", "body": "command = \"\".format(self._zid)response = self._hub.send_command(command)_LOGGER.debug(\"\", repr(command), response)return response", "docstring": "Turn bulb off (zero brightness).", "id": "f831:c1:m7"} {"signature": "def set_rgb_color(self, red, green, blue):", "body": "command = \"\".format(self._zid, red, green, blue)response = self._hub.send_command(command)_LOGGER.debug(\"\", repr(command), response)return response", "docstring": "Set color of bulb.", "id": "f831:c1:m8"} {"signature": "def set_brightness(self, brightness):", "body": "command = \"\".format(self._zid, brightness)response = self._hub.send_command(command)_LOGGER.debug(\"\", repr(command), response)return response", "docstring": "Set brightness of bulb.", "id": "f831:c1:m9"} {"signature": "def set_all(self, red, green, blue, brightness):", "body": "command = \"\".format(self._zid, red, green, blue,brightness)response = self._hub.send_command(command)_LOGGER.debug(\"\", repr(command), response)return response", "docstring": "Set color and brightness of bulb.", "id": "f831:c1:m10"} {"signature": "def update(self):", "body": "bulbs = self._hub.get_lights()if not bulbs:_LOGGER.debug(\"\", self._zid)self._online = False", "docstring": "Update light objects to their current values.", "id": "f831:c1:m11"} {"signature": "def demo():", "body": "hub = Hub()if hub.available:LOGGER.info(\"\")else:LOGGER.info(\"\")bulbs = hub.get_lights()light = get_bulb(ZID_TO_TEST, bulbs)if light is not None:if light.available:LOGGER.info(\"\")assert light.availablelight.turn_on()time.sleep(SECONDS_TO_WAIT)assert light.is_onlight.update()light.update()light.set_rgb_color(, , )time.sleep(SECONDS_TO_WAIT)light.update()assert light.rgb_color == [, , ]light.set_brightness()time.sleep(SECONDS_TO_WAIT)assert light.brightness == assert light.is_onlight.turn_off()time.sleep(SECONDS_TO_WAIT)assert not light.is_onlight.set_all(, , , )time.sleep(SECONDS_TO_WAIT)LOGGER.info(\"\", light.rgb_color,light.brightness)assert light.brightness == assert light.rgb_color == [, , ]light.turn_off()else:LOGGER.info(\"\")else:LOGGER.error(\"\")", "docstring": "Demo some specific functionality. Needs to be customised.", "id": "f833:m1"} {"signature": "def get_bulb(zid, bulbs):", "body": "for bulb in bulbs:if bulb.zid == zid:return bulbreturn None", "docstring": "Retrieve a bulb by its zid from a list of Bulb objects.", "id": "f833:m2"} {"signature": "def register_app( self, name, redirect_uri ):", "body": "client_id = self._generate_token()client_secret = self._generate_token( )self.data_store.store( '', client_id=client_id,client_secret=client_secret, name=name,redirect_uri=redirect_uri )return { '':client_id, '':client_secret }", "docstring": "register_app takes an application name and redirect_uri\nIt generates client_id (client_key) and client_secret,\nthen stores all of the above in the data_store,\nand returns a dictionary containing the client_id and client_secret.", "id": "f835:c1:m1"} {"signature": "def request_authorization( self, client_id, user_id, response_type,redirect_uri=None, scope=None, state=None,expires= ):", "body": "if response_type != '':raise Proauth2Error( '','', state=state )client = self.data_store.fetch( '', client_id=client_id )if not client: raise Proauth2Error( '' )if redirect_uri and client[''] != redirect_uri:raise Proauth2Error( '', \"\" )nonce_code = self._generate_token()expires = time() + expirestry:self.data_store.store( '', code=nonce_code,client_id=client_id, expires=expires,user_id=user_id, scope=scope )except Proauth2Error as e:e.state = stateraise ereturn { '':nonce_code, '':state }", "docstring": "request_authorization generates a nonce, and stores it in the data_store along with the\nclient_id, user_id, and expiration timestamp.\nIt then returns a dictionary containing the nonce as \"code,\" and the passed\nstate.\n---\nresponse_type MUST be \"code.\" this is directly from the OAuth2 spec.\nthis probably doesn't need to be checked here, but if it's in the spec I\nguess it should be verified somewhere.\nscope has not been implemented here. it will be stored, but there is no\nscope-checking built in here at this time.\nif a redirect_uri is passed, it must match the registered redirect_uri.\nagain, this is per spec.", "id": "f835:c1:m2"} {"signature": "def request_access_token( self, client_id, key, code, grant_type,redirect_uri=None, method='' ):", "body": "if grant_type != '':raise Proauth2Error( '','' )self._auth( client_id, key, method )user_id = self._validate_request_code( code, client_id )access_token = self._generate_token( )self.data_store.store( '', token=access_token, user_id=user_id,client_id=client_id )return { '':access_token, '':'' }", "docstring": "request_access_token validates the client_id and client_secret, using the\nprovided method, then generates an access_token, stores it with the user_id\nfrom the nonce, and returns a dictionary containing an access_token and\nbearer token.\n---\nfrom the spec, it looks like there are different types of\ntokens, but i don't understand the disctintions, so someone else can fix\nthis if need be.\nregarding the method: it appears that it is intended for there to be\nmultiple ways to verify the client_id. my assumption is that you use the\nsecret as the salt and pass the hashed of the client_id or something, and\nthen compare hashes on the server end. currently the only implemented method\nis direct comparison of the client_ids and client_secrets.\nadditional methods can be added to proauth2.auth_methods", "id": "f835:c1:m3"} {"signature": "def authenticate_token( self, token ):", "body": "token_data = self.data_store.fetch( '', token=token )if not token_data:raise Proauth2Error( '','' )return token_data['']", "docstring": "authenticate_token checks the passed token and returns the user_id it is\nassociated with. it is assumed that this method won't be directly exposed to\nthe oauth client, but some kind of framework or wrapper. this allows the\nframework to have the user_id without doing additional DB calls.", "id": "f835:c1:m4"} {"signature": "def revoke_token( self, token ):", "body": "self.data_store.remove( '', token=token )", "docstring": "revoke_token removes the access token from the data_store", "id": "f835:c1:m5"} {"signature": "def _generate_token( self, length= ):", "body": "return ''.join( choice( ascii_letters + digits ) for x in range( length ) )", "docstring": "_generate_token - internal function for generating randomized alphanumberic\nstrings of a given length", "id": "f835:c1:m6"} {"signature": "def _auth( self, client_id, key, method ):", "body": "available = auth_methods.keys()if method not in available:raise Proauth2Error( '',''''% ( method, ''.join( available ) ) )client = self.data_store.fetch( '', client_id=client_id )if not client: raise Proauth2Error( '' )if not auth_methods[method]( key, client[''] ):raise Proauth2Error( '' )", "docstring": "_auth - internal method to ensure the client_id and client_secret passed with\nthe nonce match", "id": "f835:c1:m7"} {"signature": "def _validate_request_code( self, code, client_id):", "body": "nonce = self.data_store.fetch( '', code=code )if not nonce:raise Proauth2Error( '', '' % code )if client_id != nonce['']: raise Proauth2Error( '', '' % code )user_id = nonce['']expires = nonce['']self.data_store.remove( '', code=code, client_id=client_id,user_id=user_id )if time() > expires:raise Proauth2Error( '', '' % code )return user_id", "docstring": "_validate_request_code - internal method for verifying the the given nonce.\nalso removes the nonce from the data_store, as they are intended for\none-time use.", "id": "f835:c1:m8"} {"signature": "@enginedef register_app(self, name, redirect_uri, callback):", "body": "client_id = self._generate_token()client_secret = self._generate_token()yield Task(self.data_store.store, '', client_id=client_id,client_secret=client_secret, name=name,redirect_uri=redirect_uri)callback({'':client_id, '':client_secret})", "docstring": "register_app takes an application name and redirect_uri\nIt generates client_id (client_key) and client_secret,\nthen stores all of the above in the data_store,\nand returns a dictionary containing the client_id and client_secret.", "id": "f836:c0:m1"} {"signature": "@enginedef request_authorization(self, client_id, user_id, response_type,redirect_uri=None, scope=None, state=None,expires=, callback=None):", "body": "if response_type != '':raise Proauth2Error('','', state=state)client = yield Task(self.data_store.fetch, '',client_id=client_id)if not client: raise Proauth2Error('')if redirect_uri and client[''] != redirect_uri:raise Proauth2Error('', \"\")nonce_code = self._generate_token()expires = time() + expirestry:yield Task(self.data_store.store, '', code=nonce_code,client_id=client_id, expires=expires, user_id=user_id,scope=scope)except Proauth2Error as e:e.state = stateraise ecallback({'':nonce_code, '':state})", "docstring": "request_authorization generates a nonce, and stores it in the data_store along with the\nclient_id, user_id, and expiration timestamp.\nIt then returns a dictionary containing the nonce as \"code,\" and the passed\nstate.\n---\nresponse_type MUST be \"code.\" this is directly from the OAuth2 spec.\nthis probably doesn't need to be checked here, but if it's in the spec I\nguess it should be verified somewhere.\nscope has not been implemented here. it will be stored, but there is no\nscope-checking built in here at this time.\nif a redirect_uri is passed, it must match the registered redirect_uri.\nagain, this is per spec.", "id": "f836:c0:m2"} {"signature": "@enginedef request_access_token(self, client_id, key, code, grant_type,redirect_uri=None, method='',callback=None):", "body": "if grant_type != '':raise Proauth2Error('','')yield Task(self._auth, client_id, key, method)user_id = yield Task(self._validate_request_code, code, client_id)access_token = self._generate_token()yield Task(self.data_store.store, '', token=access_token,user_id=user_id, client_id=client_id)callback({'':access_token, '':''})", "docstring": "request_access_token validates the client_id and client_secret, using the\nprovided method, then generates an access_token, stores it with the user_id\nfrom the nonce, and returns a dictionary containing an access_token and\nbearer token.\n---\nfrom the spec, it looks like there are different types of\ntokens, but i don't understand the disctintions, so someone else can fix\nthis if need be.\nregarding the method: it appears that it is intended for there to be\nmultiple ways to verify the client_id. my assumption is that you use the\nsecret as the salt and pass the hashed of the client_id or something, and\nthen compare hashes on the server end. currently the only implemented method\nis direct comparison of the client_ids and client_secrets.\nadditional methods can be added to proauth2.auth_methods", "id": "f836:c0:m3"} {"signature": "@enginedef authenticate_token(self, token, callback):", "body": "token_data = yield Task(self.data_store.fetch, '', token=token)if not token_data:raise Proauth2Error('','')callback(token_data[''])", "docstring": "authenticate_token checks the passed token and returns the user_id it is\nassociated with. it is assumed that this method won't be directly exposed to\nthe oauth client, but some kind of framework or wrapper. this allows the\nframework to have the user_id without doing additional DB calls.", "id": "f836:c0:m4"} {"signature": "@enginedef revoke_token(self, token, callback):", "body": "yield Task(self.data_store.remove, '', token=token)callback()", "docstring": "revoke_token removes the access token from the data_store", "id": "f836:c0:m5"} {"signature": "@enginedef _auth(self, client_id, key, method, callback):", "body": "available = auth_methods.keys()if method not in available:raise Proauth2Error('','''' %(method, ''.join(available)))client = yield Task(self.data_store.fetch, '',client_id=client_id)if not client: raise Proauth2Error('')if not auth_methods[method](key, client['']):raise Proauth2Error('')callback()", "docstring": "_auth - internal method to ensure the client_id and client_secret passed with\nthe nonce match", "id": "f836:c0:m6"} {"signature": "@enginedef _validate_request_code(self, code, client_id, callback):", "body": "nonce = yield Task(self.data_store.fetch, '', code=code)if not nonce:raise Proauth2Error('', '' % code)if client_id != nonce['']: raise Proauth2Error('', '' % code)user_id = nonce['']expires = nonce['']yield Task(self.data_store.remove, '', code=code,client_id=client_id, user_id=user_id)if time() > expires:raise Proauth2Error('', '' % code)callback(user_id)", "docstring": "_validate_request_code - internal method for verifying the the given nonce.\nalso removes the nonce from the data_store, as they are intended for\none-time use.", "id": "f836:c0:m7"} {"signature": "def _generate_token(self, length=):", "body": "return ''.join(choice(ascii_letters + digits) for x in range(length))", "docstring": "_generate_token - internal function for generating randomized alphanumberic\nstrings of a given length", "id": "f836:c0:m8"} {"signature": "def __init__(self, database='', host='', port=,user=None, pwd=None):", "body": "if user and pwd:connection_string = '' %(user, pwd, host, port)else:connection_string = '' %(host, port)self.db = MotorClient(connection_string).open_sync()[database]", "docstring": "initialize a mongodb connection to mongodb://user:pass@host:port\nuse database", "id": "f838:c0:m0"} {"signature": "@enginedef fetch(self, collection, **kwargs):", "body": "callback = kwargs.pop('')data = yield Op(self.db[collection].find_one, kwargs)callback(data)", "docstring": "return one record from the collection whose parameters match kwargs\n---\nkwargs should be a dictionary whose keys match column names (in\ntraditional SQL / fields in NoSQL) and whose values are the values of\nthose fields.\ne.g. kwargs={name='my application name',client_id=12345}", "id": "f838:c0:m1"} {"signature": "@enginedef remove(self, collection, **kwargs):", "body": "callback = kwargs.pop('')yield Op(self.db[collection].remove, kwargs)callback()", "docstring": "remove records from collection whose parameters match kwargs", "id": "f838:c0:m2"} {"signature": "@enginedef store(self, collection, **kwargs):", "body": "callback = kwargs.pop('')key = validate(collection, **kwargs)data = yield Task(self.fetch, collection, **{key: kwargs[key]})if data is not None:raise Proauth2Error('')yield Op(self.db[collection].insert, kwargs)callback()", "docstring": "validate the passed values in kwargs based on the collection,\nstore them in the mongodb collection", "id": "f838:c0:m3"} {"signature": "def __init__( self, database='', host='', port=,user=None, pwd=None ):", "body": "if user and pwd:connection_string = ''% ( user, pwd, host, port )self.db = MongoClient( connection_string )[database]else:self.db = MongoClient( host, port )[database]", "docstring": "initialize a mongodb connection to mongodb://user:pass@host:port\nuse database", "id": "f839:c0:m0"} {"signature": "def fetch( self, collection, **kwargs ):", "body": "return self.db[collection].find_one( kwargs )", "docstring": "return one record from the collection whose parameters match kwargs\n---\nkwargs should be a dictionary whose keys match column names (in\ntraditional SQL / fields in NoSQL) and whose values are the values of\nthose fields.\ne.g. kwargs={name='my application name',client_id=12345}", "id": "f839:c0:m1"} {"signature": "def remove( self, collection, **kwargs ):", "body": "self.db[collection].remove( kwargs )", "docstring": "remove records from collection whose parameters match kwargs", "id": "f839:c0:m2"} {"signature": "def store( self, collection, **kwargs ):", "body": "key = validate( collection, **kwargs )if self.fetch( collection, **{ key : kwargs[key] } ):raise Proauth2Error( '' )self.db[collection].insert( kwargs )", "docstring": "validate the passed values in kwargs based on the collection,\nstore them in the mongodb collection", "id": "f839:c0:m3"} {"signature": "def validate( table, **data ):", "body": "if table not in good.keys():raise Proauth2Error( '', '' % table )for req in good[table]['']:if not data.get( req, None ):raise Proauth2Error( '','' % req )for key in data.keys():if key not in good[table][''] andkey not in good[table]['']:raise Proauth2Error( '', '' % key )return good[table]['']", "docstring": "theoretically, any data store can be implemented to work with this package,\nwhich means basic data validation must be done in-package, so that weird\nstuff can't be stored in the data store.\nthis function raises an exception if an invalid table name is passed, not\nall of the required fields are in the data kwargs, or if a field that was\npassed is not expected.\nit also returns the key field name, for ensuring uniqueness (again, that may\nnot be built into whatever data store is impelemented.)", "id": "f840:m0"} {"signature": "def direct_auth( key, secret ):", "body": "if key == secret: return Truereturn False", "docstring": "directly compare the stored secret and the passed secret.", "id": "f841:m0"} {"signature": "def __init__(self, api_root=None, access_token=None, secret=None):", "body": "super().__init__(api_root=api_root, access_token=access_token, secret=secret)", "docstring": "\u521b\u5efa CoolQ HTTP API \u5bf9\u8c61\n\n------------\n\n:param str | None api_root: \u9177 Q HTTP API \u63d2\u4ef6\u7684\u76d1\u542c\u5730\u5740\u7684 URL \uff0c\u4e0e HTTP API \u7684\u914d\u7f6e\u6587\u4ef6\u8bbe\u5b9a\u548c\u5b9e\u9645\u4f7f\u7528\u73af\u5883\u76f8\u5173\u3002\u5982\u679c\u4f60\u4e0d\u9700\u8981\u8c03\u7528 API\uff0c\u4e5f\u53ef\u4ee5\u4e0d\u4f20\u5165\u3002\n:param str | None access_token: \u63d2\u4ef6\u914d\u7f6e\u6587\u4ef6\u4e2d\u6240\u6307\u5b9a\u7684 `access_token` \u3002\u5982\u679c\u672a\u8bbe\u5b9a\u53ef\u4e0d\u4f20\u6b64\u53c2\u6570\u3002\n:param str | None secret: \u63d2\u4ef6\u914d\u7f6e\u6587\u4ef6\u4e2d\u6240\u6307\u5b9a\u7684 `secret` \u3002\u5982\u679c\u672a\u8bbe\u5b9a\u53ef\u4e0d\u4f20\u6b64\u53c2\u6570\u3002", "id": "f847:c0:m0"} {"signature": "def send_private_msg(self, *, user_id, message, auto_escape=False):", "body": "return super().__getattr__('')(user_id=user_id, message=message, auto_escape=auto_escape)", "docstring": "\u53d1\u9001\u79c1\u804a\u6d88\u606f\n\n------------\n\n:param int user_id: \u5bf9\u65b9 QQ \u53f7\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: {\"message_id\": int \u6d88\u606fID}\n:rtype: dict[string, int]", "id": "f847:c0:m1"} {"signature": "def send_private_msg_async(self, *, user_id, message, auto_escape=False):", "body": "return super().__getattr__('')(user_id=user_id, message=message, auto_escape=auto_escape)", "docstring": "\u53d1\u9001\u79c1\u804a\u6d88\u606f (\u5f02\u6b65\u7248\u672c)\n\n------------\n\n:param int user_id: \u5bf9\u65b9 QQ \u53f7\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: None\n:rtype: None", "id": "f847:c0:m2"} {"signature": "def send_group_msg(self, *, group_id, message, auto_escape=False):", "body": "return super().__getattr__('')(group_id=group_id, message=message, auto_escape=auto_escape)", "docstring": "\u53d1\u9001\u7fa4\u6d88\u606f\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: {\"message_id\": int \u6d88\u606fID}\n:rtype: dict[string, int]", "id": "f847:c0:m3"} {"signature": "def send_group_msg_async(self, *, group_id, message, auto_escape=False):", "body": "return super().__getattr__('')(group_id=group_id, message=message, auto_escape=auto_escape)", "docstring": "\u53d1\u9001\u7fa4\u6d88\u606f (\u5f02\u6b65\u7248\u672c)\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: None\n:rtype: None", "id": "f847:c0:m4"} {"signature": "def send_discuss_msg(self, *, discuss_id, message, auto_escape=False):", "body": "return super().__getattr__('')(discuss_id=discuss_id, message=message, auto_escape=auto_escape)", "docstring": "\u53d1\u9001\u8ba8\u8bba\u7ec4\u6d88\u606f\n\n------------\n\n:param int discuss_id: \u8ba8\u8bba\u7ec4 ID\uff08\u6b63\u5e38\u60c5\u51b5\u4e0b\u770b\u4e0d\u5230\uff0c\u9700\u8981\u4ece\u8ba8\u8bba\u7ec4\u6d88\u606f\u4e0a\u62a5\u7684\u6570\u636e\u4e2d\u83b7\u5f97\uff09\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: {\"message_id\": int \u6d88\u606fID}\n:rtype: dict[string, int]", "id": "f847:c0:m5"} {"signature": "def send_discuss_msg_async(self, *, discuss_id, message, auto_escape=False):", "body": "return super().__getattr__('')(discuss_id=discuss_id, message=message, auto_escape=auto_escape)", "docstring": "\u53d1\u9001\u8ba8\u8bba\u7ec4\u6d88\u606f (\u5f02\u6b65\u7248\u672c)\n\n------------\n\n:param int discuss_id: \u8ba8\u8bba\u7ec4 ID\uff08\u6b63\u5e38\u60c5\u51b5\u4e0b\u770b\u4e0d\u5230\uff0c\u9700\u8981\u4ece\u8ba8\u8bba\u7ec4\u6d88\u606f\u4e0a\u62a5\u7684\u6570\u636e\u4e2d\u83b7\u5f97\uff09\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: None\n:rtype: None", "id": "f847:c0:m6"} {"signature": "def send_msg(self, *, message_type, user_id=None, group_id=None, discuss_id=None, message, auto_escape=False):", "body": "return super().__getattr__('')(message_type=message_type, user_id=user_id, group_id=group_id,discuss_id=discuss_id, message=message, auto_escape=auto_escape)", "docstring": "\u53d1\u9001\u6d88\u606f\n\n------------\n\n:param str message_type: \u6d88\u606f\u7c7b\u578b\uff0c\u652f\u6301 `private`\u3001`group`\u3001`discuss`\uff0c\u5206\u522b\u5bf9\u5e94\u79c1\u804a\u3001\u7fa4\u7ec4\u3001\u8ba8\u8bba\u7ec4\n:param int user_id: \u5bf9\u65b9 QQ \u53f7\uff08\u6d88\u606f\u7c7b\u578b\u4e3a `private` \u65f6\u9700\u8981\uff09\n:param int group_id: \u7fa4\u53f7\uff08\u6d88\u606f\u7c7b\u578b\u4e3a `group` \u65f6\u9700\u8981\uff09\n:param int discuss_id: \u8ba8\u8bba\u7ec4 ID\uff08\u9700\u8981\u4ece\u4e0a\u62a5\u6d88\u606f\u4e2d\u83b7\u53d6\uff0c\u6d88\u606f\u7c7b\u578b\u4e3a `discuss` \u65f6\u9700\u8981\uff09\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: {\"message_id\": int \u6d88\u606fID}\n:rtype: dict[string, int]", "id": "f847:c0:m7"} {"signature": "def send_msg_async(self, *, message_type, user_id=None, group_id=None, discuss_id=None, message, auto_escape=False):", "body": "return super().__getattr__('')(message_type=message_type, user_id=user_id, group_id=group_id,discuss_id=discuss_id, message=message, auto_escape=auto_escape)", "docstring": "\u53d1\u9001\u6d88\u606f (\u5f02\u6b65\u7248\u672c)\n\n------------\n\n:param str message_type: \u6d88\u606f\u7c7b\u578b\uff0c\u652f\u6301 `private`\u3001`group`\u3001`discuss`\uff0c\u5206\u522b\u5bf9\u5e94\u79c1\u804a\u3001\u7fa4\u7ec4\u3001\u8ba8\u8bba\u7ec4\n:param int user_id: \u5bf9\u65b9 QQ \u53f7\uff08\u6d88\u606f\u7c7b\u578b\u4e3a `private` \u65f6\u9700\u8981\uff09\n:param int group_id: \u7fa4\u53f7\uff08\u6d88\u606f\u7c7b\u578b\u4e3a `group` \u65f6\u9700\u8981\uff09\n:param int discuss_id: \u8ba8\u8bba\u7ec4 ID\uff08\u9700\u8981\u4ece\u4e0a\u62a5\u6d88\u606f\u4e2d\u83b7\u53d6\uff0c\u6d88\u606f\u7c7b\u578b\u4e3a `discuss` \u65f6\u9700\u8981\uff09\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: None\n:rtype: None", "id": "f847:c0:m8"} {"signature": "def delete_msg(self, *, message_id):", "body": "return super().__getattr__('')(message_id=message_id)", "docstring": "\u64a4\u56de\u6d88\u606f\n\n------------\n\n:param int message_id: \u6d88\u606f ID\n:return: None\n:rtype: None", "id": "f847:c0:m9"} {"signature": "def send_like(self, *, user_id, times=):", "body": "return super().__getattr__('')(user_id=user_id, times=times)", "docstring": "\u53d1\u9001\u597d\u53cb\u8d5e\n\n------------\n\n:param int user_id: \u5bf9\u65b9 QQ \u53f7\n:param int times: \u8d5e\u7684\u6b21\u6570\uff0c\u6bcf\u4e2a\u597d\u53cb\u6bcf\u5929\u6700\u591a 10 \u6b21\n:return: None\n:rtype: None", "id": "f847:c0:m10"} {"signature": "def set_group_kick(self, *, group_id, user_id, reject_add_request=False):", "body": "return super().__getattr__('')(group_id=group_id, user_id=user_id, reject_add_request=reject_add_request)", "docstring": "\u7fa4\u7ec4\u8e22\u4eba\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param int user_id: \u8981\u8e22\u7684 QQ \u53f7\n:param bool reject_add_request: \u62d2\u7edd\u6b64\u4eba\u7684\u52a0\u7fa4\u8bf7\u6c42\n:return: None\n:rtype: None", "id": "f847:c0:m11"} {"signature": "def set_group_ban(self, *, group_id, user_id, duration= * ):", "body": "return super().__getattr__('')(group_id=group_id, user_id=user_id, duration=duration)", "docstring": "\u7fa4\u7ec4\u5355\u4eba\u7981\u8a00\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param int user_id: \u8981\u7981\u8a00\u7684 QQ \u53f7\n:param int duration: \u7981\u8a00\u65f6\u957f\uff0c\u5355\u4f4d\u79d2\uff0c0 \u8868\u793a\u53d6\u6d88\u7981\u8a00\n:return: None\n:rtype: None", "id": "f847:c0:m12"} {"signature": "def set_group_anonymous_ban(self, *, group_id, flag, duration= * ):", "body": "return super().__getattr__('')(group_id=group_id, flag=flag, duration=duration)", "docstring": "\u7fa4\u7ec4\u533f\u540d\u7528\u6237\u7981\u8a00\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param str flag: \u8981\u7981\u8a00\u7684\u533f\u540d\u7528\u6237\u7684 flag\uff08\u9700\u4ece\u7fa4\u6d88\u606f\u4e0a\u62a5\u7684\u6570\u636e\u4e2d\u83b7\u5f97\uff09\n:param int duration: \u7981\u8a00\u65f6\u957f\uff0c\u5355\u4f4d\u79d2\uff0c**\u65e0\u6cd5\u53d6\u6d88\u533f\u540d\u7528\u6237\u7981\u8a00**\n:return: None\n:rtype: None", "id": "f847:c0:m13"} {"signature": "def set_group_whole_ban(self, *, group_id, enable=True):", "body": "return super().__getattr__('')(group_id=group_id, enable=enable)", "docstring": "\u7fa4\u7ec4\u5168\u5458\u7981\u8a00\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param bool enable: \u662f\u5426\u7981\u8a00\n:return: None\n:rtype: None", "id": "f847:c0:m14"} {"signature": "def set_group_admin(self, *, group_id, user_id, enable=True):", "body": "return super().__getattr__('')(group_id=group_id, user_id=user_id, enable=enable)", "docstring": "\u7fa4\u7ec4\u8bbe\u7f6e\u7ba1\u7406\u5458\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param user_id: \u8981\u8bbe\u7f6e\u7ba1\u7406\u5458\u7684 QQ \u53f7\n:param enable: True \u4e3a\u8bbe\u7f6e\uff0cFalse \u4e3a\u53d6\u6d88\n:return: None\n:rtype: None", "id": "f847:c0:m15"} {"signature": "def set_group_anonymous(self, *, group_id, enable=True):", "body": "return super().__getattr__('')(group_id=group_id, enable=enable)", "docstring": "\u7fa4\u7ec4\u533f\u540d\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param bool enable: \u662f\u5426\u5141\u8bb8\u533f\u540d\u804a\u5929\n:return: None\n:rtype: None", "id": "f847:c0:m16"} {"signature": "def set_group_card(self, *, group_id, user_id, card=None):", "body": "return super().__getattr__('')(group_id=group_id, user_id=user_id, card=card)", "docstring": "\u8bbe\u7f6e\u7fa4\u540d\u7247\uff08\u7fa4\u5907\u6ce8\uff09\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param int user_id: \u8981\u8bbe\u7f6e\u7684 QQ \u53f7\n:param str | None card: \u7fa4\u540d\u7247\u5185\u5bb9\uff0c\u4e0d\u586b\u6216\u7a7a\u5b57\u7b26\u4e32\u8868\u793a\u5220\u9664\u7fa4\u540d\u7247\n:return: None\n:rtype: None", "id": "f847:c0:m17"} {"signature": "def set_group_leave(self, *, group_id, is_dismiss=False):", "body": "return super().__getattr__('')(group_id=group_id, is_dismiss=is_dismiss)", "docstring": "\u9000\u51fa\u7fa4\u7ec4\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param bool is_dismiss: \u662f\u5426\u89e3\u6563\uff0c\u5982\u679c\u767b\u5f55\u53f7\u662f\u7fa4\u4e3b\uff0c\u5219\u4ec5\u5728\u6b64\u9879\u4e3a true \u65f6\u80fd\u591f\u89e3\u6563\n:return: None\n:rtype: None", "id": "f847:c0:m18"} {"signature": "def set_group_special_title(self, *, group_id, user_id, special_title, duration=-):", "body": "return super().__getattr__('')(group_id=group_id, user_id=user_id, special_title=special_title, duration=duration)", "docstring": "\u8bbe\u7f6e\u7fa4\u7ec4\u4e13\u5c5e\u5934\u8854\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param int user_id: \u8981\u8bbe\u7f6e\u7684 QQ \u53f7\n:param str special_title: \u4e13\u5c5e\u5934\u8854\uff0c\u4e0d\u586b\u6216\u7a7a\u5b57\u7b26\u4e32\u8868\u793a\u5220\u9664\u4e13\u5c5e\u5934\u8854\uff0c\u53ea\u80fd\u4fdd\u7559\u524d6\u4e2a\u82f1\u6587\u4e0e\u6c49\u5b57\uff0cEmoji \u6839\u636e\u5b57\u7b26\u5b9e\u9645\u5b57\u7b26\u957f\u5ea6\u5360\u7528\u53ea\u80fd\u653e\u6700\u591a3\u4e2a\u751a\u81f3\u66f4\u5c11\uff0c\u8d85\u51fa\u957f\u5ea6\u90e8\u5206\u4f1a\u88ab\u622a\u65ad\n:param int duration: \u4e13\u5c5e\u5934\u8854\u6709\u6548\u671f\uff0c\u5355\u4f4d\u79d2\uff0c-1 \u8868\u793a\u6c38\u4e45\uff0c\u4e0d\u8fc7\u6b64\u9879\u4f3c\u4e4e\u6ca1\u6709\u6548\u679c\uff0c\u53ef\u80fd\u662f\u53ea\u6709\u67d0\u4e9b\u7279\u6b8a\u7684\u65f6\u95f4\u957f\u5ea6\u6709\u6548\uff0c\u6709\u5f85\u6d4b\u8bd5\n:return: None\n:rtype: None", "id": "f847:c0:m19"} {"signature": "def set_discuss_leave(self, *, discuss_id):", "body": "return super().__getattr__('')(discuss_id=discuss_id)", "docstring": "\u9000\u51fa\u8ba8\u8bba\u7ec4\n\n------------\n\n:param int discuss_id: \u8ba8\u8bba\u7ec4 ID\uff08\u6b63\u5e38\u60c5\u51b5\u4e0b\u770b\u4e0d\u5230\uff0c\u9700\u8981\u4ece\u8ba8\u8bba\u7ec4\u6d88\u606f\u4e0a\u62a5\u7684\u6570\u636e\u4e2d\u83b7\u5f97\uff09\n:return: None\n:rtype: None", "id": "f847:c0:m20"} {"signature": "def set_friend_add_request(self, *, flag, approve=True, remark=None):", "body": "return super().__getattr__('')(flag=flag, approve=approve, remark=remark)", "docstring": "\u5904\u7406\u52a0\u597d\u53cb\u8bf7\u6c42\n\n------------\n\n:param str flag: \u52a0\u597d\u53cb\u8bf7\u6c42\u7684 flag\uff08\u9700\u4ece\u4e0a\u62a5\u7684\u6570\u636e\u4e2d\u83b7\u5f97\uff09\n:param bool approve: \u662f\u5426\u540c\u610f\u8bf7\u6c42\n:param str remark: \u6dfb\u52a0\u540e\u7684\u597d\u53cb\u5907\u6ce8\uff08\u4ec5\u5728\u540c\u610f\u65f6\u6709\u6548\uff09\n:return: None\n:rtype: None", "id": "f847:c0:m21"} {"signature": "def set_group_add_request(self, *, flag, type, approve=True, reason=None):", "body": "return super().__getattr__('')(flag=flag, type=type, approve=approve, reason=reason)", "docstring": "\u5904\u7406\u52a0\u7fa4\u8bf7\u6c42\u3001\u7fa4\u7ec4\u6210\u5458\u9080\u8bf7\n\n------------\n\n:param str flag: \u52a0\u7fa4\u8bf7\u6c42\u7684 flag\uff08\u9700\u4ece\u4e0a\u62a5\u7684\u6570\u636e\u4e2d\u83b7\u5f97\uff09\n:param str type: `add` \u6216 `invite`\uff0c\u8bf7\u6c42\u7c7b\u578b\uff08\u9700\u8981\u548c\u4e0a\u62a5\u6d88\u606f\u4e2d\u7684 `sub_type` \u5b57\u6bb5\u76f8\u7b26\uff09\n:param bool approve: \u662f\u5426\u540c\u610f\u8bf7\u6c42/\u9080\u8bf7\n:param str reason: \u62d2\u7edd\u7406\u7531\uff08\u4ec5\u5728\u62d2\u7edd\u65f6\u6709\u6548\uff09\n:return: None\n:rtype: None", "id": "f847:c0:m22"} {"signature": "def get_login_info(self):", "body": "return super().__getattr__('')()", "docstring": "\u83b7\u53d6\u767b\u5f55\u53f7\u4fe1\u606f\n\n------------\n\n:return: { \"user_id\": (QQ \u53f7: int), \"nickname\": (QQ \u6635\u79f0: str) }\n:rtype: dict[ str, int | str ]\n\n------------\n\n========= ========= =========\n\u54cd\u5e94\u6570\u636e\n-------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n========= ========= =========\nint user_id QQ \u53f7\nstr nickname QQ \u6635\u79f0\n========= ========= =========", "id": "f847:c0:m23"} {"signature": "def get_stranger_info(self, *, user_id, no_cache=False):", "body": "return super().__getattr__('')(user_id=user_id, no_cache=no_cache)", "docstring": "\u83b7\u53d6\u964c\u751f\u4eba\u4fe1\u606f\n\n------------\n\n:param int user_id: QQ \u53f7\uff08\u4e0d\u53ef\u4ee5\u662f\u767b\u5f55\u53f7\uff09\n:param bool no_cache: \u662f\u5426\u4e0d\u4f7f\u7528\u7f13\u5b58\uff08\u4f7f\u7528\u7f13\u5b58\u53ef\u80fd\u66f4\u65b0\u4e0d\u53ca\u65f6\uff0c\u4f46\u54cd\u5e94\u66f4\u5feb\uff09\n:return: { \"user_id\": (QQ \u53f7: int), \"nickname\": (\u6635\u79f0: str), \"sex\": (\u6027\u522b: str in ['male', 'female', 'unknown']), \"age\": (\u5e74\u9f84: int) }\n:rtype: dict[ str, int | str ]\n\n------------\n\n======== ========= ======================================\n\u54cd\u5e94\u6570\u636e\n-----------------------------------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== ========= ======================================\nint user_id QQ \u53f7\nstr nickname \u6635\u79f0\nstr sex \u6027\u522b\uff0c`male` \u6216 `female` \u6216 `unknown`\nint age \u5e74\u9f84\n======== ========= ======================================", "id": "f847:c0:m24"} {"signature": "def get_group_list(self):", "body": "return super().__getattr__('')()", "docstring": "\u83b7\u53d6\u7fa4\u5217\u8868\n\n------------\n\n:return: [{ \"group_id\": (\u7fa4\u53f7: int), \"group_name\": (\u7fa4\u540d\u79f0: str) }, ...]\n:rtype: list[ dict[ str, int | str ] ]\n\n------------\n\n======== =========== =========\n\u54cd\u5e94\u6570\u636e\n--------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== =========== =========\nint group_id \u7fa4\u53f7\nstr group_name \u7fa4\u540d\u79f0\n======== =========== =========", "id": "f847:c0:m25"} {"signature": "def get_group_member_info(self, *, group_id, user_id, no_cache=False):", "body": "return super().__getattr__('')(group_id=group_id, user_id=user_id, no_cache=no_cache)", "docstring": "\u83b7\u53d6\u7fa4\u6210\u5458\u4fe1\u606f\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param int user_id: QQ \u53f7\uff08\u4e0d\u53ef\u4ee5\u662f\u767b\u5f55\u53f7\uff09\n:param bool no_cache: \u662f\u5426\u4e0d\u4f7f\u7528\u7f13\u5b58\uff08\u4f7f\u7528\u7f13\u5b58\u53ef\u80fd\u66f4\u65b0\u4e0d\u53ca\u65f6\uff0c\u4f46\u54cd\u5e94\u66f4\u5feb\uff09\n:return: { \"group_id\": (\u7fa4\u53f7: int), \"user_id\": (QQ \u53f7: int), \"nickname\": (\u6635\u79f0: str), \"card\": (\u7fa4\u540d\u7247/\u5907\u6ce8: str), \"sex\": (\u6027\u522b: str in ['male', 'female', 'unknown']), \"age\": (\u5e74\u9f84: int), \"area\": (\u5730\u533a: str), \"join_time\": (\u52a0\u7fa4\u65f6\u95f4\u6233: int), \"last_sent_time\": (\u6700\u540e\u53d1\u8a00\u65f6\u95f4\u6233: int), \"level\": (\u6210\u5458\u7b49\u7ea7: str), \"role\": (\u89d2\u8272: str in ['owner', 'admin', 'member']), \"unfriendly\": (\u662f\u5426\u4e0d\u826f\u8bb0\u5f55\u6210\u5458: bool), \"title\": (\u4e13\u5c5e\u5934\u8854: str), \"title_expire_time\": (\u4e13\u5c5e\u5934\u8854\u8fc7\u671f\u65f6\u95f4\u6233: int), \"card_changeable\": (\u662f\u5426\u5141\u8bb8\u4fee\u6539\u7fa4\u540d\u7247: bool) }\n:rtype: dict[ str, int | str | bool ]\n\n------------\n\n======== =================== ======================================\n \u54cd\u5e94\u6570\u636e\n---------------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== =================== ======================================\nint group_id \u7fa4\u53f7\nint user_id QQ \u53f7\nstr nickname \u6635\u79f0\nstr card \u7fa4\u540d\u7247/\u5907\u6ce8\nstr sex \u6027\u522b\uff0c`male` \u6216 `female` \u6216 `unknown`\nint age \u5e74\u9f84\nstr area \u5730\u533a\nint join_time \u52a0\u7fa4\u65f6\u95f4\u6233\nint last_sent_time \u6700\u540e\u53d1\u8a00\u65f6\u95f4\u6233\nstr level \u6210\u5458\u7b49\u7ea7\nstr role \u89d2\u8272\uff0c`owner` \u6216 `admin` \u6216 `member`\nbool unfriendly \u662f\u5426\u4e0d\u826f\u8bb0\u5f55\u6210\u5458\nstr title \u4e13\u5c5e\u5934\u8854\nint title_expire_time \u4e13\u5c5e\u5934\u8854\u8fc7\u671f\u65f6\u95f4\u6233\nbool card_changeable \u662f\u5426\u5141\u8bb8\u4fee\u6539\u7fa4\u540d\u7247\n======== =================== ======================================", "id": "f847:c0:m26"} {"signature": "def get_group_member_list(self, *, group_id):", "body": "return super().__getattr__('')(group_id=group_id)", "docstring": "\u83b7\u53d6\u7fa4\u6210\u5458\u5217\u8868\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:return: [{ \"group_id\": (\u7fa4\u53f7: int), \"user_id\": (QQ \u53f7: int), \"nickname\": (\u6635\u79f0: str), \"card\": (\u7fa4\u540d\u7247/\u5907\u6ce8: str), \"sex\": (\u6027\u522b: str in ['male', 'female', 'unknown']), \"age\": (\u5e74\u9f84: int), \"area\": (\u5730\u533a: str), \"join_time\": (\u52a0\u7fa4\u65f6\u95f4\u6233: int), \"last_sent_time\": (\u6700\u540e\u53d1\u8a00\u65f6\u95f4\u6233: int), \"level\": (\u6210\u5458\u7b49\u7ea7: str), \"role\": (\u89d2\u8272: str in ['owner', 'admin', 'member']), \"unfriendly\": (\u662f\u5426\u4e0d\u826f\u8bb0\u5f55\u6210\u5458: bool), \"title\": (\u4e13\u5c5e\u5934\u8854: str), \"title_expire_time\": (\u4e13\u5c5e\u5934\u8854\u8fc7\u671f\u65f6\u95f4\u6233: int), \"card_changeable\": (\u662f\u5426\u5141\u8bb8\u4fee\u6539\u7fa4\u540d\u7247: bool) }, ...]\n:rtype: list[ dict[ str, int | str | bool ] ]\n\n------------\n\n\u54cd\u5e94\u6570\u636e\u4ee5 **\u5217\u8868** \u5305\u88c5\u7684\u5b57\u5178\u7684\u5f62\u5f0f\u63d0\u4f9b\u3002`( List[ Dict[ ...] ] )`\n\n======== =================== ======================================\n \u54cd\u5e94\u6570\u636e\n---------------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== =================== ======================================\nint group_id \u7fa4\u53f7\nint user_id QQ \u53f7\nstr nickname \u6635\u79f0\nstr card \u7fa4\u540d\u7247/\u5907\u6ce8\nstr sex \u6027\u522b\uff0c`male` \u6216 `female` \u6216 `unknown`\nint age \u5e74\u9f84\nstr area \u5730\u533a\nint join_time \u52a0\u7fa4\u65f6\u95f4\u6233\nint last_sent_time \u6700\u540e\u53d1\u8a00\u65f6\u95f4\u6233\nstr level \u6210\u5458\u7b49\u7ea7\nstr role \u89d2\u8272\uff0c`owner` \u6216 `admin` \u6216 `member`\nbool unfriendly \u662f\u5426\u4e0d\u826f\u8bb0\u5f55\u6210\u5458\nstr title \u4e13\u5c5e\u5934\u8854\nint title_expire_time \u4e13\u5c5e\u5934\u8854\u8fc7\u671f\u65f6\u95f4\u6233\nbool card_changeable \u662f\u5426\u5141\u8bb8\u4fee\u6539\u7fa4\u540d\u7247\n======== =================== ======================================\n\n**\u5907\u6ce8:** \u54cd\u5e94\u5185\u5bb9\u4e3a\u5305\u542b\u5b57\u5178\u7684\u5217\u8868 *( List[ Dict[] ] )* \uff0c\u6bcf\u4e2a\u5143\u7d20\u7684\u5185\u5bb9\u548c `get_group_member_info` \u63a5\u53e3\u76f8\u540c\uff0c\u4f46\u5bf9\u4e8e\u540c\u4e00\u4e2a\u7fa4\u7ec4\u7684\u540c\u4e00\u4e2a\u6210\u5458\uff0c\u83b7\u53d6\u5217\u8868\u65f6\u548c\u83b7\u53d6\u5355\u72ec\u7684\u6210\u5458\u4fe1\u606f\u65f6\uff0c\u67d0\u4e9b\u5b57\u6bb5\u53ef\u80fd\u6709\u6240\u4e0d\u540c\uff0c\u4f8b\u5982 `area`\u3001`title` \u7b49\u5b57\u6bb5\u5728\u83b7\u53d6\u5217\u8868\u65f6\u65e0\u6cd5\u83b7\u5f97\uff0c\u5177\u4f53\u5e94\u4ee5\u5355\u72ec\u7684\u6210\u5458\u4fe1\u606f\u4e3a\u51c6\u3002", "id": "f847:c0:m27"} {"signature": "def get_cookies(self):", "body": "return super().__getattr__('')()", "docstring": "\u83b7\u53d6 Cookies\n\n------------\n\n:return: { \"cookies\": (Cookies: str)}\n:rtype: dict[ str, str ]\n\n------------\n\n======== =========== =========\n\u54cd\u5e94\u6570\u636e\n--------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== =========== =========\nstr cookies Cookies\n======== =========== =========", "id": "f847:c0:m28"} {"signature": "def get_csrf_token(self):", "body": "return super().__getattr__('')()", "docstring": "\u83b7\u53d6 CSRF Token\n\n------------\n\n:return: { \"token\": (CSRF Token: int)}\n:rtype: dict[ str, int ]\n\n------------\n\n======== =========== ==========\n\u54cd\u5e94\u6570\u636e\n---------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== =========== ==========\nint token CSRF Token\n======== =========== ==========", "id": "f847:c0:m29"} {"signature": "def get_record(self, *, file, out_format):", "body": "return super().__getattr__('')(file=file, out_format=out_format)", "docstring": "\u83b7\u53d6\u8bed\u97f3\n\n------------\n\n:param str file: \u6536\u5230\u7684\u8bed\u97f3\u6587\u4ef6\u540d\uff0c\u5982 `0B38145AA44505000B38145AA4450500.silk`\n:param str out_format: \u8981\u8f6c\u6362\u5230\u7684\u683c\u5f0f\uff0c\u76ee\u524d\u652f\u6301 `mp3`\u3001`amr`\u3001`wma`\u3001`m4a`\u3001`spx`\u3001`ogg`\u3001`wav`\u3001`flac`\n:return: { \"file\": (\u8f6c\u6362\u540e\u7684\u8bed\u97f3\u6587\u4ef6\u540d: str)}\n:rtype: dict[ str, str ]\n\n\n------------\n\n\u5176\u5b9e\u5e76\u4e0d\u662f\u771f\u7684\u83b7\u53d6\u8bed\u97f3\uff0c\u800c\u662f\u8f6c\u6362\u8bed\u97f3\u5230\u6307\u5b9a\u7684\u683c\u5f0f\uff0c\u7136\u540e\u8fd4\u56de\u8bed\u97f3\u6587\u4ef6\u540d\uff08`data/record` \u76ee\u5f55\u4e0b\uff09\u3002\n\n======== =========== =============================================================\n\u54cd\u5e94\u6570\u636e\n------------------------------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== =========== =============================================================\nstr file \u8f6c\u6362\u540e\u7684\u8bed\u97f3\u6587\u4ef6\u540d\uff0c\u5982 `0B38145AA44505000B38145AA4450500.mp3`\n======== =========== =============================================================", "id": "f847:c0:m30"} {"signature": "def get_status(self):", "body": "return super().__getattr__('')()", "docstring": "\u83b7\u53d6\u63d2\u4ef6\u8fd0\u884c\u72b6\u6001\n\n------------\n\n:return: { \"good\": (\u6b63\u5e38\u8fd0\u884c: bool), \"app_initialized\": (\u63d2\u4ef6\u5df2\u521d\u59cb\u5316: bool), \"app_enabled\": (\u63d2\u4ef6\u5df2\u542f\u7528: bool), \"online\": (\u5f53\u524dQQ\u5728\u7ebf: bool), \"http_service_good\": (HTTP\u670d\u52a1\u6b63\u5e38\u8fd0\u884c: bool), \"ws_service_good\": (WebSocket\u670d\u52a1\u6b63\u5e38\u8fd0\u884c: bool), \"ws_reverse_service_good\": (\u53cd\u5411WebSocket\u670d\u52a1\u6b63\u5e38\u8fd0\u884c: bool) }\n:rtype: dict[ str, bool ]\n\n------------\n\n======== ======================== ====================================\n\u54cd\u5e94\u6570\u636e\n------------------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== ======================== ====================================\nbool good \u63d2\u4ef6\u72b6\u6001\u7b26\u5408\u9884\u671f\uff0c\u610f\u5473\u7740\u63d2\u4ef6\u5df2\u521d\u59cb\u5316\uff0c\u9700\u8981\u542f\u52a8\u7684\u670d\u52a1\u90fd\u5728\u6b63\u5e38\u8fd0\u884c\uff0c\u4e14 QQ \u5728\u7ebf\nbool app_initialized \u63d2\u4ef6\u5df2\u521d\u59cb\u5316\nbool app_enabled \u63d2\u4ef6\u5df2\u542f\u7528\nbool online \u5f53\u524d QQ \u5728\u7ebf\nbool http_service_good `use_http` \u914d\u7f6e\u9879\u4e3a `yes` \u65f6\u6709\u6b64\u5b57\u6bb5\uff0c\u8868\u793a HTTP \u670d\u52a1\u6b63\u5e38\u8fd0\u884c\nbool ws_service_good `use_ws` \u914d\u7f6e\u9879\u4e3a `yes` \u65f6\u6709\u6b64\u5b57\u6bb5\uff0c\u8868\u793a WebSocket \u670d\u52a1\u6b63\u5e38\u8fd0\u884c\nbool ws_reverse_service_good `use_ws_reverse` \u914d\u7f6e\u9879\u4e3a `yes` \u65f6\u6709\u6b64\u5b57\u6bb5\uff0c\u8868\u793a\u53cd\u5411 WebSocket \u670d\u52a1\u6b63\u5e38\u8fd0\u884c\n======== ======================== ====================================", "id": "f847:c0:m31"} {"signature": "def get_version_info(self):", "body": "return super().__getattr__('')()", "docstring": "\u83b7\u53d6\u9177 Q \u53ca HTTP API \u63d2\u4ef6\u7684\u7248\u672c\u4fe1\u606f\n\n------------\n\n:return: { \"coolq_directory\": (\u9177Q\u6839\u76ee\u5f55\u8def\u5f84: str), \"coolq_edition\": (\u9177Q\u7248\u672c: str in ['air', 'pro']), \"plugin_version\": (API\u63d2\u4ef6\u7248\u672c: str), \"plugin_build_number\": (API\u63d2\u4ef6build\u53f7: int), \"plugin_build_configuration\": (API\u63d2\u4ef6\u7f16\u8bd1\u914d\u7f6e: str in ['debug', 'release']) }\n:rtype: dict[ str, int | str ]\n\n\n------------\n\n======== ========================== ===============================\n\u54cd\u5e94\u6570\u636e\n---------------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== ========================== ===============================\nstr coolq_directory \u9177 Q \u6839\u76ee\u5f55\u8def\u5f84\nstr coolq_edition \u9177 Q \u7248\u672c\uff0c`air` \u6216 `pro`\nstr plugin_version HTTP API \u63d2\u4ef6\u7248\u672c\uff0c\u4f8b\u5982 2.1.3\nint plugin_build_number HTTP API \u63d2\u4ef6 build \u53f7\nstr plugin_build_configuration HTTP API \u63d2\u4ef6\u7f16\u8bd1\u914d\u7f6e\uff0c`debug` \u6216 `release`\n======== ========================== ===============================", "id": "f847:c0:m32"} {"signature": "def set_restart(self):", "body": "return super().__getattr__('')()", "docstring": "\u91cd\u542f\u9177 Q\uff0c\u5e76\u4ee5\u5f53\u524d\u767b\u5f55\u53f7\u81ea\u52a8\u767b\u5f55\uff08\u9700\u52fe\u9009\u5feb\u901f\u767b\u5f55\uff09\n\n------------\n\n:return: None\n:rtype: None", "id": "f847:c0:m33"} {"signature": "def set_restart_plugin(self):", "body": "return super().__getattr__('')()", "docstring": "\u91cd\u542f HTTP API \u63d2\u4ef6\n\n------------\n\n:return: None\n:rtype: None\n\n------------\n\n\u7531\u4e8e\u91cd\u542f\u63d2\u4ef6\u540c\u65f6\u9700\u8981\u91cd\u542f API \u670d\u52a1\uff0c\u8fd9\u610f\u5473\u7740\u5f53\u524d\u7684 API \u8bf7\u6c42\u4f1a\u88ab\u4e2d\u65ad\uff0c\u56e0\u6b64\u8fd9\u4e2a\u63a5\u53e3\u4f1a\u5ef6\u8fdf 2 \u79d2\u91cd\u542f\u63d2\u4ef6\uff0c\u63a5\u53e3\u8fd4\u56de\u7684 status \u662f async\u3002\n\n**\u5728Python SDK\u4e2d\u8fd4\u56de None \u3002**", "id": "f847:c0:m34"} {"signature": "def clean_data_dir(self, *, data_dir):", "body": "return super().__getattr__('')(data_dir=data_dir)", "docstring": "\u6e05\u7406\u6570\u636e\u76ee\u5f55\n\n------------\n\n:param str data_dir: \u6536\u5230\u6e05\u7406\u7684\u76ee\u5f55\u540d\uff0c\u652f\u6301 `image`\u3001`record`\u3001`show`\u3001`bface`\n:return: None\n:rtype: None\n\n------------\n\n\u7528\u4e8e\u6e05\u7406\u79ef\u6512\u4e86\u592a\u591a\u65e7\u6587\u4ef6\u7684\u6570\u636e\u76ee\u5f55\uff0c\u5982 `image`\u3002\n\nHTTP API v3.3.4 \u65b0\u589e", "id": "f847:c0:m35"} {"signature": "def clean_data_dir_async(self, *, data_dir):", "body": "return super().__getattr__('')(data_dir=data_dir)", "docstring": "\u6e05\u7406\u6570\u636e\u76ee\u5f55 (\u5f02\u6b65\u7248\u672c)\n\n------------\n\n:param str data_dir: \u6536\u5230\u6e05\u7406\u7684\u76ee\u5f55\u540d\uff0c\u652f\u6301 `image`\u3001`record`\u3001`show`\u3001`bface`\n:return: None\n:rtype: None\n\n------------\n\n\u7528\u4e8e\u6e05\u7406\u79ef\u6512\u4e86\u592a\u591a\u65e7\u6587\u4ef6\u7684\u6570\u636e\u76ee\u5f55\uff0c\u5982 `image`\u3002\n\nHTTP API v3.3.4 \u65b0\u589e", "id": "f847:c0:m36"} {"signature": "def _get_friend_list(self):", "body": "return super().__getattr__('')()", "docstring": "\u83b7\u53d6\u597d\u53cb\u5217\u8868 (\u5b9e\u9a8c\u6027\u529f\u80fd)\n\n------------\n\n:return: [{ \"friend_group_id\": (\u597d\u53cb\u5206\u7ec4 ID: int), \"friend_group_name\": (\u597d\u53cb\u5206\u7ec4\u540d\u79f0: str), \"friends\": (\u5206\u7ec4\u4e2d\u7684\u597d\u53cb: [{ \"nickname\": (\u597d\u53cb\u6635\u79f0: str), \"remark\": (\u597d\u53cb\u5907\u6ce8: str), \"user_id\": (\u597d\u53cb QQ \u53f7: int) }, ...]) }, ...]\n:rtype: list[ dict[ str, int | str | list[ dict[ str, int | str ] ] ] ]\n\n------------\n\n\u54cd\u5e94\u6570\u636e\u4ee5 **\u5217\u8868** \u5305\u88c5\u7684\u5b57\u5178\u7684\u5f62\u5f0f\u63d0\u4f9b\u3002`( List[ Dict[ ...] ] )`\n\n======== ================== ===============================\n\u54cd\u5e94\u6570\u636e\n-------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== ================== ===============================\nint friend_group_id \u597d\u53cb\u5206\u7ec4 ID\nstr friend_group_name \u597d\u53cb\u5206\u7ec4\u540d\u79f0\nlist friends \u5206\u7ec4\u4e2d\u7684\u597d\u53cb\n======== ================== ===============================\n\n\u5176\u4e2d\uff0c\u597d\u53cb\u4fe1\u606f\u7ed3\u6784\u4ee5 **\u5b57\u5178** \u7684\u5f62\u5f0f\u5b58\u50a8\u5728\u54cd\u5e94\u6570\u636e\u4e2d\u7684\u5206\u7ec4\u4e2d\u7684\u597d\u53cb `friends` \u7684 **\u5217\u8868** \u4e2d\u3002`( List[ Dict[ ...] ] )`\n\n======== ================== ===============================\n\u597d\u53cb\u4fe1\u606f\u7ed3\u6784\n-------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b \u5b57\u6bb5\u540d \u8bf4\u660e\n======== ================== ===============================\nstr nickname \u597d\u53cb\u6635\u79f0\nstr remark \u597d\u53cb\u5907\u6ce8\nint user_id \u597d\u53cb QQ \u53f7\n======== ================== ===============================", "id": "f847:c0:m37"} {"signature": "def send(self, context, message, **kwargs):", "body": "context = context.copy()context[''] = messagecontext.update(kwargs)if '' not in context:if '' in context:context[''] = ''elif '' in context:context[''] = ''elif '' in context:context[''] = ''return super().__getattr__('')(**context)", "docstring": "\u4fbf\u6377\u56de\u590d\u3002\u4f1a\u6839\u636e\u4f20\u5165\u7684context\u81ea\u52a8\u5224\u65ad\u56de\u590d\u5bf9\u8c61\n------------\n:param dict context: \u4e8b\u4ef6\u6536\u5230\u7684content\n:return: None\n:rtype: None\n------------", "id": "f847:c0:m38"} {"signature": "def _add_doc(func, doc):", "body": "func.__doc__ = doc", "docstring": "Add documentation to a function.", "id": "f851:m0"} {"signature": "def _import_module(name):", "body": "__import__(name)return sys.modules[name]", "docstring": "Import module, returning the module after the last dot.", "id": "f851:m1"} {"signature": "def add_move(move):", "body": "setattr(_MovedItems, move.name, move)", "docstring": "Add an item to six.moves.", "id": "f851:m2"} {"signature": "def remove_move(name):", "body": "try:delattr(_MovedItems, name)except AttributeError:try:del moves.__dict__[name]except KeyError:raise AttributeError(\"\" % (name,))", "docstring": "Remove item from six.moves.", "id": "f851:m3"} {"signature": "def iterkeys(d, **kw):", "body": "return iter(getattr(d, _iterkeys)(**kw))", "docstring": "Return an iterator over the keys of a dictionary.", "id": "f851:m4"} {"signature": "def itervalues(d, **kw):", "body": "return iter(getattr(d, _itervalues)(**kw))", "docstring": "Return an iterator over the values of a dictionary.", "id": "f851:m5"} {"signature": "def iteritems(d, **kw):", "body": "return iter(getattr(d, _iteritems)(**kw))", "docstring": "Return an iterator over the (key, value) pairs of a dictionary.", "id": "f851:m6"} {"signature": "def iterlists(d, **kw):", "body": "return iter(getattr(d, _iterlists)(**kw))", "docstring": "Return an iterator over the (key, [values]) pairs of a dictionary.", "id": "f851:m7"} {"signature": "def with_metaclass(meta, *bases):", "body": "return meta(\"\", bases, {})", "docstring": "Create a base class with a metaclass.", "id": "f851:m8"} {"signature": "def add_metaclass(metaclass):", "body": "def wrapper(cls):orig_vars = cls.__dict__.copy()orig_vars.pop('', None)orig_vars.pop('', None)for slots_var in orig_vars.get('', ()):orig_vars.pop(slots_var)return metaclass(cls.__name__, cls.__bases__, orig_vars)return wrapper", "docstring": "Class decorator for creating a class with a metaclass.", "id": "f851:m9"} {"signature": "def pypi_render(source):", "body": "ALLOWED_SCHEMES = ''''''.split()settings_overrides = {\"\": , \"\": , \"\": , \"\": , }old_stderr = sys.stderrsys.stderr = s = io.StringIO()parts = Nonetry:document = publish_doctree(source=source,settings_overrides=settings_overrides)for node in document.traverse():if node.tagname == '':continueif node.hasattr(''):uri = node['']elif node.hasattr(''):uri = node['']else:continueo = urllib.parse.urlparse(uri)if o.scheme not in ALLOWED_SCHEMES:raise TransformError('')reader = readers.doctree.Reader(parser_name='')pub = Publisher(reader, source=io.DocTreeInput(document),destination_class=io.StringOutput)pub.set_writer('')pub.process_programmatic_settings(None, settings_overrides, None)pub.set_destination(None, None)pub.publish()parts = pub.writer.partsexcept:passsys.stderr = old_stderrif parts is None or len(s.getvalue()) > :return Noneelse:return parts['']", "docstring": "Copied (and slightly adapted) from pypi.description_tools", "id": "f855:m0"} {"signature": "def getProcessOwner(pid):", "body": "try:ownerUid = os.stat('' + str(pid)).st_uidexcept:return Nonetry:ownerName = pwd.getpwuid(ownerUid).pw_nameexcept:ownerName = Nonereturn {'' : ownerUid,'' : ownerName}", "docstring": "getProcessOwner - Get the process owner of a pid\n\n@param pid - process id\n\n@return - None if process not found or can't be determined. Otherwise, a dict: \n {\n uid - Owner UID\n name - Owner name, or None if one cannot be determined\n }", "id": "f857:m0"} {"signature": "def getProcessOwnerStr(pid):", "body": "ownerInfo = getProcessOwner(pid)if ownerInfo:if ownerInfo['']:owner = ownerInfo['']else:owner = str(ownerInfo[''])else:owner = ''return owner", "docstring": "getProcessOwner - Get Process owner of a pid as a string instead of components (#getProcessOwner)\n\n@return - Returns username if it can be determined, otherwise uid, otherwise \"unknown\"", "id": "f857:m1"} {"signature": "def getProcessCommandLineStr(pid):", "body": "try:with open('' %(int(pid),), '') as f:cmdline = f.read()return cmdline.replace('', '')except:return None", "docstring": "getProcessCommandLineStr - Gets a the commandline (program + arguments) of a given pid\n\n@param pid - Process ID\n\n@return - None if process not found or can't be determined. Otherwise a string of commandline.\n\n@note Caution, args may have spaces in them, and you cannot surmise from this method. If you care (like trying to replay a command), use getProcessCommandLineList instead", "id": "f857:m2"} {"signature": "def getProcessCommandLineList(pid):", "body": "try:with open('' %(int(pid),), '') as f:cmdline = f.read()return cmdline.split('')except:return None", "docstring": "getProcessCommandLineList - Gets the commandline (program + argumentS) of a given pid as a list.\n\n@param pid - Process ID\n\n@return - None if process not found or can't be determined. Otherwise a list representing argv. First argument is process name, remainder are arguments.\n\n@note - Use this if you care about whether a process had a space in the commands", "id": "f857:m3"} {"signature": "def getProcessCwd(pid):", "body": "try:cwd = os.readlink('' %(int(pid), ))return cwdexcept:return None", "docstring": "getProcessCwd - Gets the cwd (current working directory) of a given pid\n\n@param pid - Process ID\n\n@return - None if process not found or can't be determined. Otherwise, a string of the CWD", "id": "f857:m4"} {"signature": "def getAllRunningPids():", "body": "return [int(x) for x in os.listdir('') if x.isdigit()]", "docstring": "getAllRunningPids - Gets list of all pids that are running on a given system\n\n@return > - A list of pids (process IDs).", "id": "f857:m5"} {"signature": "def scanProcessForCwd(pid, searchPortion, isExactMatch=False):", "body": "try: try:pid = int(pid)except ValueError as e:sys.stderr.write('' %(str(type(pid)),))raise ecwd = getProcessCwd(pid)if not cwd:return NoneisMatch = Falseif isExactMatch is True:if searchPortion == cwd:isMatch = Trueelse:if searchPortion.endswith('') and searchPortion[:-] == cwd:isMatch = Trueelse:if searchPortion in cwd:isMatch = Trueelse:if searchPortion.endswith('') and searchPortion[:-] in cwd:isMatch = Trueif not isMatch:return Nonecmdline = getProcessCommandLineStr(pid)owner = getProcessOwnerStr(pid)return {'' : searchPortion,'' : pid,'' : owner,'' : cmdline,'' : cwd,}except OSError:return Noneexcept IOError:return Noneexcept FileNotFoundError:return Noneexcept PermissionError:return None", "docstring": "scanProcessForCwd - Searches a given pid's cwd for a given pattern\n\n @param pid - A running process ID on this system\n @param searchPortion - Any portion of directory to search\n @param isExactMatch Default False - If match should be exact, otherwise a partial match is performed.\n\n @return - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned.\n {\n 'searchPortion' : The passed search pattern\n 'pid' : The passed pid (as an integer)\n 'owner' : String of process owner, or uid if no mapping can be found, or \"unknown\" if neither could be determined.\n 'cmdline' : Commandline string\n 'cwd' : The exact cwd of matched process\n }", "id": "f857:m6"} {"signature": "def scanAllProcessesForCwd(searchPortion, isExactMatch=False):", "body": "pids = getAllRunningPids()cwdResults = [scanProcessForCwd(pid, searchPortion, isExactMatch) for pid in pids]ret = {}for i in range(len(pids)):if cwdResults[i] is not None:ret[pids[i]] = cwdResults[i]return ret", "docstring": "scanAllProcessesForCwd - Scans all processes on the system for a given search pattern.\n\n @param searchPortion - Any portion of directory to search\n @param isExactMatch Default False - If match should be exact, otherwise a partial match is performed.\n\n@return - - A dictionary of pid -> cwdResults for each pid that matched the search pattern. For format of \"cwdResults\", @see scanProcessForCwd", "id": "f857:m7"} {"signature": "def scanProcessForMapping(pid, searchPortion, isExactMatch=False, ignoreCase=False):", "body": "try: try:pid = int(pid)except ValueError as e:sys.stderr.write('' %(str(type(pid)),))raise ewith open('' %(pid,), '') as f:contents = f.read()lines = contents.split('')matchedMappings = []if isExactMatch is True:if ignoreCase is False:isMatch = lambda searchFor, searchIn : bool(searchFor == searchIn)else:isMatch = lambda searchFor, searchIn : bool(searchFor.lower() == searchIn.lower())else:if ignoreCase is False:isMatch = lambda searchFor, searchIn : bool(searchFor in searchIn)else:isMatch = lambda searchFor, searchIn : bool(searchFor.lower() in searchIn.lower())for line in lines:portion = ''.join(line.split('')[:]).lstrip()if isMatch(searchPortion, portion):matchedMappings.append('' + line)if len(matchedMappings) == :return Nonecmdline = getProcessCommandLineStr(pid)owner = getProcessOwnerStr(pid)return {'' : searchPortion,'' : pid,'' : owner,'' : cmdline,'' : matchedMappings,}except OSError:return Noneexcept IOError:return Noneexcept FileNotFoundError:return Noneexcept PermissionError:return None", "docstring": "scanProcessForMapping - Searches a given pid's mappings for a certain pattern.\n\n @param pid - A running process ID on this system\n @param searchPortion - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings.\n @param isExactMatch Default False - If match should be exact, otherwise a partial match is performed.\n @param ignoreCase Default False - If True, search will be performed case-insensitively\n\n @return - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned.\n {\n 'searchPortion' : The passed search pattern\n 'pid' : The passed pid (as an integer)\n 'owner' : String of process owner, or uid if no mapping can be found, or \"unknown\" if neither could be determined.\n 'cmdline' : Commandline string\n 'matchedMappings' : All mappings likes that matched the given search pattern\n }", "id": "f857:m8"} {"signature": "def scanAllProcessesForMapping(searchPortion, isExactMatch=False, ignoreCase=False):", "body": "pids = getAllRunningPids()mappingResults = [scanProcessForMapping(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids]ret = {}for i in range(len(pids)):if mappingResults[i] is not None:ret[pids[i]] = mappingResults[i]return ret", "docstring": "scanAllProcessesForMapping - Scans all processes on the system for a given search pattern.\n\n @param searchPortion - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings.\n @param isExactMatch Default False - If match should be exact, otherwise a partial match is performed.\n @param ignoreCase Default False - If True, search will be performed case-insensitively\n\n@return - - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of \"mappingResults\", @see scanProcessForMapping", "id": "f857:m9"} {"signature": "def scanProcessForOpenFile(pid, searchPortion, isExactMatch=True, ignoreCase=False):", "body": "try:try:pid = int(pid)except ValueError as e:sys.stderr.write('' %(str(type(pid)),))raise eprefixDir = \"\" % (pid,)processFDs = os.listdir(prefixDir)matchedFDs = []matchedFilenames = []if isExactMatch is True:if ignoreCase is False:isMatch = lambda searchFor, totalPath : bool(searchFor == totalPath)else:isMatch = lambda searchFor, totalPath : bool(searchFor.lower() == totalPath.lower())else:if ignoreCase is False:isMatch = lambda searchFor, totalPath : bool(searchFor in totalPath)else:isMatch = lambda searchFor, totalPath : bool(searchFor.lower() in totalPath.lower())for fd in processFDs:fdPath = os.readlink(prefixDir + '' + fd)if isMatch(searchPortion, fdPath):matchedFDs.append(fd)matchedFilenames.append(fdPath)if len(matchedFDs) == :return Nonecmdline = getProcessCommandLineStr(pid)owner = getProcessOwnerStr(pid)return {'' : searchPortion,'' : pid,'' : owner,'' : cmdline,'' : matchedFDs,'' : matchedFilenames, }except OSError:return Noneexcept IOError:return Noneexcept FileNotFoundError:return Noneexcept PermissionError:return None", "docstring": "scanProcessForOpenFile - Scans open FDs for a given pid to see if any are the provided searchPortion\n\n @param searchPortion - Filename to check\n @param isExactMatch Default True - If match should be exact, otherwise a partial match is performed.\n @param ignoreCase Default False - If True, search will be performed case-insensitively\n\n@return - If result is found, the following dict is returned. If no match found on the given pid, or the pid is not found running, None is returned.\n {\n 'searchPortion' : The search portion provided\n 'pid' : The passed pid (as an integer)\n 'owner' : String of process owner, or \"unknown\" if one could not be determined\n 'cmdline' : Commandline string\n 'fds' : List of file descriptors assigned to this file (could be mapped several times)\n 'filenames' : List of the filenames matched\n }", "id": "f857:m10"} {"signature": "def scanAllProcessesForOpenFile(searchPortion, isExactMatch=True, ignoreCase=False):", "body": "pids = getAllRunningPids()mappingResults = [scanProcessForOpenFile(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids]ret = {}for i in range(len(pids)):if mappingResults[i] is not None:ret[pids[i]] = mappingResults[i]return ret", "docstring": "scanAllProcessessForOpenFile - Scans all processes on the system for a given filename\n\n @param searchPortion - Filename to check\n @param isExactMatch Default True - If match should be exact, otherwise a partial match is performed.\n @param ignoreCase Default False - If True, search will be performed case-insensitively\n\n@return - - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of \"mappingResults\", @see scanProcessForOpenFile", "id": "f857:m11"} {"signature": "def database_creator():", "body": "global databaseif database is None:database = Database()return database", "docstring": "make sure you have only one instance of Database\n\n :return: DataBase's instance", "id": "f863:m0"} {"signature": "def get_entry_urls(self, urls=list()):", "body": "try:assert type(urls) in VALIDATE_URLSexcept AssertionError:error_message('')if len(urls):for i in range(self.main_thread_number):self.append_main_thread()for url in urls:self.main_queue.put(url)else:error_message('')", "docstring": ":param urls: url you want to fetch\n:return: None", "id": "f866:c0:m1"} {"signature": "def append_main_thread(self):", "body": "thread = MainThread(main_queue=self.main_queue,main_spider=self.main_spider,branch_spider=self.branch_spider)thread.daemon = Truethread.start()", "docstring": "create & start main thread\n\n :return: None", "id": "f866:c0:m2"} {"signature": "def run(self):", "body": "config = config_creator()debug = config.debugbranch_thread_sleep = config.branch_thread_sleepwhile :url = self.branch_queue.get()if debug:print(''.format(url))branch_spider = self.branch_spider(url)sleep(random.randrange(*branch_thread_sleep))branch_spider.request_page()if debug:print(''.format(url))self.branch_queue.task_done()", "docstring": "run your main spider here\n as for branch spider result data, you can return everything or do whatever with it\n in your own code\n\n :return: None", "id": "f867:c0:m1"} {"signature": "def run(self):", "body": "global existed_urls_listconfig = config_creator()debug = config.debugmain_thread_sleep = config.main_thread_sleepbranch_thread_num = config.branch_thread_numwhile :url = self.main_queue.get()if debug:print(''.format(url))main_spider = self.main_spider(url)sleep(random.randrange(*main_thread_sleep))links = main_spider.request_urls()try:assert type(links) in VALIDATE_URLSexcept AssertionError:error_message('')links = list()branch_queue = queue.Queue(branch_thread_num)for i in range(branch_thread_num):branch_thread = BranchThread(branch_queue=branch_queue,branch_spider=self.branch_spider)branch_thread.daemon = Truebranch_thread.start()for link in links:if link not in existed_urls_list:existed_urls_list.append(link)branch_queue.put(link)branch_queue.join()if debug:print(''.format(url))self.main_queue.task_done()", "docstring": "run your main spider here, and get a list/tuple of url as result\n then make the instance of branch thread\n\n :return: None", "id": "f869:c0:m1"} {"signature": "def colorful_text(text, color=Fore.RESET):", "body": "return color + text + Fore.RESET", "docstring": "make target text colorful\n\n :param text: target text\n :param color\n :return: colored text", "id": "f872:m0"} {"signature": "def error_message(message=''):", "body": "print(colorful_text(message, Fore.RED))", "docstring": "print the error message in red color\n\n :param message: error message\n :return: None", "id": "f872:m1"} {"signature": "def set_authenticated(self, is_authenticated):", "body": "user = self.request.useruser.is_authenticated.return_value = is_authenticateduser.is_authenticated.__bool__ = lambda self: is_authenticateduser.is_authenticated.__nonzero__ = lambda self: is_authenticated", "docstring": "Set whether user is authenticated in the request.", "id": "f876:c1:m1"} {"signature": "def is_view_func_public(func):", "body": "return getattr(func, '', False)", "docstring": "Returns whether a view is public or not (ie/ has the STRONGHOLD_IS_PUBLIC\nattribute set)", "id": "f882:m0"} {"signature": "def set_view_func_public(func):", "body": "setattr(func, '', True)", "docstring": "Set the STRONGHOLD_IS_PUBLIC attribute on a given function to True", "id": "f882:m1"} {"signature": "def is_authenticated(user):", "body": "try:return user.is_authenticated()except TypeError:return user.is_authenticated", "docstring": "make compatible with django 1 and 2", "id": "f883:m0"} {"signature": "def public(function):", "body": "orig_func = functionwhile isinstance(orig_func, partial):orig_func = orig_func.funcset_view_func_public(orig_func)return function", "docstring": "Decorator for public views that do not require authentication\nSets an attribute in the fuction STRONGHOLD_IS_PUBLIC to True", "id": "f884:m0"} {"signature": "def get_sanitizer(self):", "body": "sanitizer = self.sanitizerif not sanitizer:default_sanitizer = settings.CONFIG.get(self.SANITIZER_KEY)field_settings = getattr(self, '', None)if isinstance(field_settings, six.string_types):profiles = settings.CONFIG.get(self.SANITIZER_PROFILES_KEY, {})sanitizer = profiles.get(field_settings, default_sanitizer)else:sanitizer = default_sanitizerif isinstance(sanitizer, six.string_types):sanitizer = import_string(sanitizer)return sanitizer or noop", "docstring": "Get the field sanitizer.\n\nThe priority is the first defined in the following order:\n- A sanitizer provided to the widget.\n- Profile (field settings) specific sanitizer, if defined in settings.\n- Global sanitizer defined in settings.\n- Simple no-op sanitizer which just returns the provided value.", "id": "f903:c0:m1"} {"signature": "def get_field_settings(self):", "body": "field_settings = Noneif self.field_settings:if isinstance(self.field_settings, six.string_types):profiles = settings.CONFIG.get(self.PROFILE_KEY, {})field_settings = profiles.get(self.field_settings)else:field_settings = self.field_settingsreturn field_settings", "docstring": "Get the field settings, if the configured setting is a string try\nto get a 'profile' from the global config.", "id": "f907:c0:m2"} {"signature": "def value_from_datadict(self, *args, **kwargs):", "body": "value = super(RichTextWidget, self).value_from_datadict(*args, **kwargs)if value is not None:value = self.get_sanitizer()(value)return value", "docstring": "Pass the submitted value through the sanitizer before returning it.", "id": "f907:c0:m4"} {"signature": "def clean(self, value, model_instance):", "body": "value = self.to_python(value)if value is not None:value = self.get_sanitizer()(value)self.validate(value, model_instance)self.run_validators(value)return value", "docstring": "Convert the value's type, sanitize it, and run validation. Validation\nerrors from to_python() and validate() are propagated. Return the\ncorrect value if no error is raised.", "id": "f908:c0:m2"} {"signature": "@task(name='',help={'': \"\",},)def fresh_cookies(ctx, mold=''):", "body": "mold = mold or \"\" tmpdir = os.path.join(tempfile.gettempdir(), \"\")if os.path.isdir(''):passif os.path.isdir(tmpdir):shutil.rmtree(tmpdir)if os.path.exists(mold):shutil.copytree(mold, tmpdir, ignore=shutil.ignore_patterns(\"\", \"\", \"\",))else:ctx.run(\"\".format(mold, tmpdir))shutil.copy2(\"\", tmpdir)with pushd(''):ctx.run(\"\".format(tmpdir))if os.path.exists(''):ctx.run(\"\")", "docstring": "Refresh the project from the original cookiecutter template.", "id": "f913:m0"} {"signature": "@task(help={'': \"\",}) def ci(ctx):", "body": "opts = ['']if os.environ.get('', '').lower() == '':opts += ['']else:opts += ['']ctx.run(\"\".format(''.join(opts)))", "docstring": "Perform continuous integration tasks.", "id": "f913:m1"} {"signature": "def setup(app):", "body": "lexer = MarkdownLexer()for alias in lexer.aliases:app.add_lexer(alias, lexer)return dict(version=__version__)", "docstring": "Initializer for Sphinx extension API.\n\n See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.", "id": "f916:m0"} {"signature": "def srcfile(*args):", "body": "return os.path.join(*((project_root,) + args))", "docstring": "Helper for path building.", "id": "f921:m0"} {"signature": "def _build_metadata(): ", "body": "expected_keys = ('', '', '', '', '', '', '')metadata = {}with io.open(srcfile('', package_name, ''), encoding='') as handle:pkg_init = handle.read()metadata[''] = re.search(r'', pkg_init, re.DOTALL|re.MULTILINE).group()for line in pkg_init.splitlines():match = re.match(r\"\"\"\"\"\".format(''.join(expected_keys)), line)if match:metadata[match.group()] = match.group()if not all(i in metadata for i in expected_keys):raise RuntimeError(\"\".format(name, ''.join(sorted(set(expected_keys) - set(metadata.keys()))),))text = metadata[''].strip()if text:metadata[''], text = text.split('', )metadata[''] = ''.join(metadata[''].split()).strip() + '' metadata[''] = textwrap.dedent(text).strip()metadata[''] = metadata[''].replace('', '').strip().split()requirements_files = dict(install = '',setup = '',test = '',)requires = {}for key, filename in requirements_files.items():requires[key] = []if os.path.exists(srcfile(filename)):with io.open(srcfile(filename), encoding='') as handle:for line in handle:line = line.strip()if line and not line.startswith(''):if any(line.startswith(i) for i in ('', '', '')):line = line.split('')[]requires[key].append(line)if not any('' == re.split('', i.lower())[] for i in requires['']):requires[''].append('') console_scripts = []for path, dirs, files in os.walk(srcfile('', package_name)):dirs = [i for i in dirs if not i.startswith('')]if '' in files:path = path[len(srcfile('') + os.sep):]appname = path.split(os.sep)[-]with io.open(srcfile('', path, ''), encoding='') as handle:for line in handle.readlines():match = re.match(r\"\"\"\"\"\", line)if match:appname = match.group()console_scripts.append(''.format(appname, path.replace(os.sep, '')))candidate_files = ['', '','', '', '', '','', '', '',]data_files = defaultdict(list)for filename in candidate_files:if os.path.exists(srcfile(filename)):data_files[''].append(filename)classifiers = []for classifiers_txt in ('', ''):classifiers_txt = srcfile(classifiers_txt)if os.path.exists(classifiers_txt):with io.open(classifiers_txt, encoding='') as handle:classifiers = [i.strip() for i in handle if i.strip() and not i.startswith('')]breakentry_points.setdefault('', []).extend(console_scripts)metadata.update(dict(name = name,package_dir = {'': ''},packages = find_packages(srcfile(''), exclude=['']),data_files = data_files.items(),zip_safe = False,include_package_data = True,install_requires = requires[''],setup_requires = requires[''],tests_require = requires[''],classifiers = classifiers,cmdclass = dict(test = PyTest,),entry_points = entry_points,))return metadata", "docstring": "Return project's metadata as a dict.", "id": "f921:m1"} {"signature": "def _prepare_drb_allele_name(self, parsed_beta_allele):", "body": "if \"\" not in parsed_beta_allele.gene:raise ValueError(\"\" % parsed_beta_allele)return \"\" % (parsed_beta_allele.gene,parsed_beta_allele.allele_family,parsed_beta_allele.allele_code)", "docstring": "Assume that we're dealing with a human DRB allele\nwhich NetMHCIIpan treats differently because there is\nlittle population diversity in the DR-alpha gene", "id": "f924:c0:m1"} {"signature": "def prepare_allele_name(self, allele_name):", "body": "parsed_alleles = parse_classi_or_classii_allele_name(allele_name)if len(parsed_alleles) == :allele = parsed_alleles[]if allele.species == \"\":return \"\" % (allele.species,allele.gene,allele.allele_code)return self._prepare_drb_allele_name(allele)else:alpha, beta = parsed_allelesif \"\" in alpha.gene:return self._prepare_drb_allele_name(beta)return \"\" % (alpha.gene,alpha.allele_family,alpha.allele_code,beta.gene,beta.allele_family,beta.allele_code)", "docstring": "netMHCIIpan has some unique requirements for allele formats,\nexpecting the following forms:\n - DRB1_0101 (for non-alpha/beta pairs)\n - HLA-DQA10501-DQB10636 (for alpha and beta pairs)\n\nOther than human class II alleles, the only other alleles that\nnetMHCIIpan accepts are the following mouse alleles:\n - H-2-IAb\n - H-2-IAd", "id": "f924:c0:m2"} {"signature": "def __init__(self,alleles,default_peptide_lengths=[],predictor=None,models_path=None):", "body": "from mhcflurry import Class1AffinityPredictorBasePredictor.__init__(self,alleles=alleles,default_peptide_lengths=default_peptide_lengths,min_peptide_length=,max_peptide_length=)if predictor:self.predictor = predictorelif models_path:logging.info(\"\" % models_path)self.predictor = Class1AffinityPredictor.load(models_path)else:self.predictor = Class1AffinityPredictor.load()for allele in self.alleles:if allele not in self.predictor.supported_alleles:raise UnsupportedAllele(allele)", "docstring": "Parameters\n-----------\npredictor : mhcflurry.Class1AffinityPredictor (optional)\n MHCflurry predictor to use\n\nmodels_path : string\n Models dir to use if predictor argument is None", "id": "f925:c0:m0"} {"signature": "def predict_peptides(self, peptides):", "body": "from mhcflurry.encodable_sequences import EncodableSequencesbinding_predictions = []encodable_sequences = EncodableSequences.create(peptides)for allele in self.alleles:predictions_df = self.predictor.predict_to_dataframe(encodable_sequences, allele=allele)for (_, row) in predictions_df.iterrows():binding_prediction = BindingPrediction(allele=allele,peptide=row.peptide,affinity=row.prediction,percentile_rank=(row.prediction_percentileif '' in row else nan),prediction_method_name=\"\")binding_predictions.append(binding_prediction)return BindingPredictionCollection(binding_predictions)", "docstring": "Predict MHC affinity for peptides.", "id": "f925:c0:m1"} {"signature": "def NetMHC(alleles,default_peptide_lengths=[],program_name=\"\"):", "body": "with open(os.devnull, '') as devnull:help_output = check_output([program_name, \"\"], stderr=devnull)help_output_str = help_output.decode(\"\", \"\")substring_to_netmhc_class = {\"\": NetMHC4,\"\": NetMHC3,}successes = []for substring, netmhc_class in substring_to_netmhc_class.items():if substring in help_output_str:successes.append(netmhc_class)if len(successes) > :raise SystemError(\"\"\"\" % program_name)if len(successes) == :raise SystemError(\"\"% program_name)netmhc_class = successes[]return netmhc_class(alleles=alleles,default_peptide_lengths=default_peptide_lengths,program_name=program_name)", "docstring": "This function wraps NetMHC3 and NetMHC4 to automatically detect which class\nto use. Currently based on running the '-h' command and looking for\ndiscriminating substrings between the versions.", "id": "f927:m0"} {"signature": "def predict(self, sequences):", "body": "with tempfile.NamedTemporaryFile(suffix=\"\", mode=\"\") as input_fd:for (i, sequence) in enumerate(sequences):input_fd.write(\"\" % i)input_fd.write(sequence)input_fd.write(\"\")input_fd.flush()try:output = subprocess.check_output([\"\", input_fd.name])except subprocess.CalledProcessError as e:logging.error(\"\" % (e, e.output))raiseparsed = self.parse_netchop(output)assert len(parsed) == len(sequences),\"\" % (len(sequences), len(parsed))assert [len(x) for x in parsed] == [len(x) for x in sequences]return parsed", "docstring": "Return netChop predictions for each position in each sequence.\n\nParameters\n-----------\nsequences : list of string\n Amino acid sequences to predict cleavage for\n\nReturns\n-----------\nlist of list of float\n\nThe i'th list corresponds to the i'th sequence. Each list gives\nthe cleavage probability for each position in the sequence.", "id": "f928:c0:m0"} {"signature": "@staticmethoddef parse_netchop(netchop_output):", "body": "line_iterator = iter(netchop_output.decode().split(\"\"))scores = []for line in line_iterator:if \"\" in line and '' in line and '' in line:scores.append([])if \"\" not in next(line_iterator):raise ValueError(\"\")line = next(line_iterator)while '' not in line:score = float(line.split()[])scores[-].append(score)line = next(line_iterator)return scores", "docstring": "Parse netChop stdout.", "id": "f928:c0:m1"} {"signature": "def create_input_peptides_files(peptides,max_peptides_per_file=None,group_by_length=False):", "body": "if group_by_length:peptide_lengths = {len(p) for p in peptides}peptide_groups = {l: [] for l in peptide_lengths}for p in peptides:peptide_groups[len(p)].append(p)else:peptide_groups = {\"\": peptides}file_names = []for key, group in peptide_groups.items():n_peptides = len(group)if not max_peptides_per_file:max_peptides_per_file = n_peptidesinput_file = Nonefor i, p in enumerate(group):if i % max_peptides_per_file == :if input_file is not None:file_names.append(input_file.name)input_file.close()input_file = make_writable_tempfile(prefix_number=i // max_peptides_per_file,prefix_name=key,suffix=\"\")input_file.write(\"\" % p)if input_file is not None:file_names.append(input_file.name)input_file.close()return file_names", "docstring": "Creates one or more files containing one peptide per line,\nreturns names of files.", "id": "f929:m1"} {"signature": "def __init__(self,peptide,allele,affinity,percentile_rank,source_sequence_name=None,offset=,log_affinity=None,prediction_method_name=\"\"):", "body": "if invalid_affinity(affinity) and np.isfinite(log_affinity):affinity = ** (-log_affinity + )if invalid_affinity(affinity):raise ValueError(\"\" % (affinity,peptide,allele))if invalid_percentile_rank(percentile_rank):raise ValueError(\"\" % (percentile_rank, peptide, allele))self.source_sequence_name = source_sequence_nameself.offset = offsetself.allele = alleleself.peptide = peptideself.affinity = affinityself.percentile_rank = percentile_rankself.prediction_method_name = prediction_method_name", "docstring": "Parameters\n----------\npeptide : str\n Short amino acid sequence\n\nallele : str\n HLA allele, e.g. HLA-A*02:01\n\naffinity : float\n Predicted binding affinity\n\npercentile_rank : float\n Percentile rank of the binding affinity for that allele\n\nsource_sequence_name : str\n Name of sequence from which peptide was extracted\n\noffset : int\n Base0 starting position in source sequence that all epitopes were\n extracted from\n\nlog_affinity : float, optional\n NetMHC sometimes gives invalid IC50 values but we can still\n reconstruct the value from its (1.0 - log_50000(IC50)) score.\n\nprediction_method_name : str, optional\n Name of predictor used to generate this prediction.", "id": "f930:c0:m0"} {"signature": "def clone_with_updates(self, **kwargs):", "body": "fields_dict = self.to_dict()fields_dict.update(kwargs)return BindingPrediction(**fields_dict)", "docstring": "Returns new BindingPrediction with updated fields", "id": "f930:c0:m2"} {"signature": "@propertydef length(self):", "body": "return len(self.peptide)", "docstring": "Length of peptide, preserved for backwards compatibility", "id": "f930:c0:m4"} {"signature": "@propertydef value(self):", "body": "return self.affinity", "docstring": "Alias for affinity preserved for backwards compatibility", "id": "f930:c0:m5"} {"signature": "def run_command(args, **kwargs):", "body": "assert len(args) > start_time = time.time()process = AsyncProcess(args, **kwargs)process.wait()elapsed_time = time.time() - start_timelogger.info(\"\", args[], elapsed_time)", "docstring": "Given a list whose first element is a command name, followed by arguments,\nexecute it and show timing info.", "id": "f931:m0"} {"signature": "def run_multiple_commands_redirect_stdout(multiple_args_dict,print_commands=True,process_limit=-,polling_freq=,**kwargs):", "body": "assert len(multiple_args_dict) > assert all(len(args) > for args in multiple_args_dict.values())assert all(hasattr(f, '') for f in multiple_args_dict.keys())if process_limit < :logger.debug(\"\" % cpu_count())process_limit = cpu_count()start_time = time.time()processes = Queue(maxsize=process_limit)def add_to_queue(process):process.start()if print_commands:handler = logging.FileHandler(process.redirect_stdout_file.name)handler.setLevel(logging.DEBUG)logger.addHandler(handler)logger.debug(\"\".join(process.args))logger.removeHandler(handler)processes.put(process)for f, args in multiple_args_dict.items():p = AsyncProcess(args,redirect_stdout_file=f,**kwargs)if not processes.full():add_to_queue(p)else:while processes.full():to_remove = []for possibly_done in processes.queue:if possibly_done.poll() is not None:possibly_done.wait()to_remove.append(possibly_done)if to_remove:for process_to_remove in to_remove:processes.queue.remove(process_to_remove)breaktime.sleep(polling_freq)add_to_queue(p)while not processes.empty():processes.get().wait()elapsed_time = time.time() - start_timelogger.info(\"\",len(multiple_args_dict),elapsed_time)", "docstring": "Run multiple shell commands in parallel, write each of their\nstdout output to files associated with each command.\n\nParameters\n----------\nmultiple_args_dict : dict\n A dictionary whose keys are files and values are args list.\n Run each args list as a subprocess and write stdout to the\n corresponding file.\n\nprint_commands : bool\n Print shell commands before running them.\n\nprocess_limit : int\n Limit the number of concurrent processes to this number. 0\n if there is no limit, -1 to use max number of processors\n\npolling_freq : int\n Number of seconds between checking for done processes, if\n we have a process limit", "id": "f931:m1"} {"signature": "def poll(self):", "body": "if self.process is None:self.start()return self.process.poll()", "docstring": "Peeks at whether the process is done or not, without\nwaiting for it. Leaves exception handling and such to wait().", "id": "f931:c0:m2"} {"signature": "def _parse_iedb_response(response):", "body": "if len(response) == :raise ValueError(\"\")df = pd.read_csv(io.BytesIO(response), delim_whitespace=True, header=)assert type(df) == pd.DataFramedf = pd.DataFrame(df)if len(df) == :raise ValueError(\"\" % (response,))required_columns = [\"\",\"\",\"\",\"\",\"\",]for column in required_columns:if column not in df.columns:raise ValueError(\"\"\"\" % (column,df.ix[],response))df = df.rename(columns={\"\": \"\",\"\": \"\"})return df", "docstring": "Take the binding predictions returned by IEDB's web API\n and parse them into a DataFrame\n\n Expect response to look like:\n allele seq_num start end length peptide ic50 percentile_rank\n HLA-A*01:01 1 2 10 9 LYNTVATLY 2145.70 3.7\n HLA-A*01:01 1 5 13 9 TVATLYCVH 2216.49 3.9\n HLA-A*01:01 1 7 15 9 ATLYCVHQR 2635.42 5.1\n HLA-A*01:01 1 4 12 9 NTVATLYCV 6829.04 20\n HLA-A*01:01 1 1 9 9 SLYNTVATL 8032.38 24\n HLA-A*01:01 1 8 16 9 TLYCVHQRI 8853.90 26\n HLA-A*01:01 1 3 11 9 YNTVATLYC 9865.62 29\n HLA-A*01:01 1 6 14 9 VATLYCVHQ 27575.71 58\n HLA-A*01:01 1 10 18 9 YCVHQRIDV 48929.64 74\n HLA-A*01:01 1 9 17 9 LYCVHQRID 50000.00 75", "id": "f933:m0"} {"signature": "def _query_iedb(request_values, url):", "body": "data = urlencode(request_values)req = Request(url, data.encode(\"\"))response = urlopen(req).read()return _parse_iedb_response(response)", "docstring": "Call into IEDB's web API for MHC binding prediction using request dictionary\nwith fields:\n - \"method\"\n - \"length\"\n - \"sequence_text\"\n - \"allele\"\n\nParse the response into a DataFrame.", "id": "f933:m1"} {"signature": "def predict_subsequences(self, sequence_dict, peptide_lengths=None):", "body": "sequence_dict = check_sequence_dictionary(sequence_dict)peptide_lengths = self._check_peptide_lengths(peptide_lengths)binding_predictions = []expected_peptides = set([])normalized_alleles = []for key, amino_acid_sequence in sequence_dict.items():for l in peptide_lengths:for i in range(len(amino_acid_sequence) - l + ):expected_peptides.add(amino_acid_sequence[i:i + l])self._check_peptide_inputs(expected_peptides)for allele in self.alleles:allele = normalize_allele_name(allele, omit_dra1=True)normalized_alleles.append(allele)request = self._get_iedb_request_params(amino_acid_sequence, allele)logger.info(\"\",self.url,request)response_df = _query_iedb(request, self.url)for _, row in response_df.iterrows():binding_predictions.append(BindingPrediction(source_sequence_name=key,offset=row[''] - ,allele=row[''],peptide=row[''],affinity=row[''],percentile_rank=row[''],prediction_method_name=\"\" + self.prediction_method))self._check_results(binding_predictions,alleles=normalized_alleles,peptides=expected_peptides)return BindingPredictionCollection(binding_predictions)", "docstring": "Given a dictionary mapping unique keys to amino acid sequences,\n run MHC binding predictions on all candidate epitopes extracted from\n sequences and return a EpitopeCollection.\n\n Parameters\n ----------\n fasta_dictionary : dict or string\n Mapping of protein identifiers to protein amino acid sequences.\n If string then converted to dictionary.", "id": "f933:c0:m4"} {"signature": "def __init__(self,program_name,alleles,parse_output_fn,supported_alleles_flag,input_file_flag,length_flag,allele_flag,peptide_mode_flags=[\"\"],tempdir_flag=None,extra_flags=[],max_peptides_per_file= ** ,process_limit=-,default_peptide_lengths=[],group_peptides_by_length=False,min_peptide_length=,max_peptide_length=None,):", "body": "require_string(program_name, \"\")self.program_name = program_nameif supported_alleles_flag is not None:require_string(supported_alleles_flag, \"\")self.supported_alleles_flag = supported_alleles_flagrequire_string(input_file_flag, \"\")self.input_file_flag = input_file_flagrequire_string(length_flag, \"\")self.length_flag = length_flagrequire_string(allele_flag, \"\")self.allele_flag = allele_flagrequire_iterable_of(peptide_mode_flags, string_types)self.peptide_mode_flags = peptide_mode_flagsif tempdir_flag is not None:require_string(tempdir_flag, \"\")self.tempdir_flag = tempdir_flagrequire_iterable_of(extra_flags, string_types)self.extra_flags = extra_flagsrequire_integer(max_peptides_per_file,\"\")self.max_peptides_per_file = max_peptides_per_filerequire_integer(process_limit, \"\")self.process_limit = process_limitself.parse_output_fn = parse_output_fnif isinstance(default_peptide_lengths, int):default_peptide_lengths = [default_peptide_lengths]self.group_peptides_by_length = group_peptides_by_lengthif self.supported_alleles_flag:valid_alleles = self._determine_supported_alleles(self.program_name,self.supported_alleles_flag)else:try:run_command([self.program_name])except:raise SystemError(\"\" % self.program_name)valid_alleles = Nonetry:BasePredictor.__init__(self,alleles=alleles,valid_alleles=valid_alleles,default_peptide_lengths=default_peptide_lengths,min_peptide_length=min_peptide_length,max_peptide_length=max_peptide_length)except UnsupportedAllele as e:if self.supported_alleles_flag:additional_message = (\"\" % (self.program_name,self.supported_alleles_flag))else:additional_message = \"\"raise UnsupportedAllele(str(e) + additional_message)", "docstring": "Parameters\n----------\nprogram_name : str\n Name of prediction program to run\n (e.g. \"netMHCcons\" or \"netMHCIIpan\")\n\nalleles : list of str\n MHC alleles\n\nsupported_alleles_flag : str\n Flag to pass to the predictor to get a list of supported alleles\n (e.g. \"-A\", \"-list\", \"-listMHC\")\n\nparse_output_fn : fn\n Takes the stdout string from the predictor and returns a collection\n of BindingPrediction objects\n\ninput_file_flag : str\n How to specify the input FASTA file of source sequences (e.g. \"-f\")\n\nlength_flag : str\n How to specify the desired predicted peptide length (e.g. \"-length\")\n\nallele_flag : str\n How to specify the allele we want predictions for (e.g. \"-a\")\n\npeptide_mode_flags : list of str\n How to switch from the default FASTA subsequences input mode to\n where peptides are explicitly given one per line of a text file.\n\ntempdir_flag : str, optional\n How to specify the predictor's temporary directory (e.g. \"-tdir\")\n\nextra_flags : list of str\n Extra flags to pass to the predictor\n\nmax_peptides_per_file : int, optional\n Maximum number of lines per file when predicting peptides directly.\n\nprocess_limit : int, optional\n Maximum number of parallel processes to start\n (0 for no limit, -1 for use all available processors)\n\ndefault_peptide_lengths : list of int, optional\n When making predictions across subsequences of protein sequences,\n what peptide lengths to predict for.\n\ngroup_peptides_by_length : bool\n Run commandline predictor on groups of peptides of equal length\n\nmin_peptide_length : int\n Shortest peptide this predictor can handle\n\nmax_peptide_length : int\n Longest peptide this predictor can handle", "id": "f934:c0:m0"} {"signature": "@staticmethoddef _determine_supported_alleles(command, supported_allele_flag):", "body": "try:supported_alleles_output = check_output([command, supported_allele_flag])supported_alleles_str = supported_alleles_output.decode(\"\", \"\")assert len(supported_alleles_str) > ,'' % commandsupported_alleles = set([])for line in supported_alleles_str.split(\"\"):line = line.strip()if not line.startswith('') and len(line) > :try:supported_alleles.add(normalize_allele_name(line))except AlleleParseError as error:logger.info(\"\", line, error)continueif len(supported_alleles) == :raise ValueError(\"\")return supported_allelesexcept Exception as e:logger.exception(e)raise SystemError(\"\" % (command,supported_allele_flag))", "docstring": "Try asking the commandline predictor (e.g. netMHCpan)\nwhich alleles it supports.", "id": "f934:c0:m1"} {"signature": "def prepare_allele_name(self, allele_name):", "body": "return allele_name.replace(\"\", \"\")", "docstring": "How does the predictor expect to see allele names?", "id": "f934:c0:m2"} {"signature": "def __init__(self,alleles,valid_alleles=None,default_peptide_lengths=None,min_peptide_length=,max_peptide_length=None,allow_X_in_peptides=False,allow_lowercase_in_peptides=False):", "body": "if isinstance(alleles, string_types):alleles = alleles.split('')self.alleles = self._check_hla_alleles(alleles, valid_alleles)if isinstance(default_peptide_lengths, int):default_peptide_lengths = [default_peptide_lengths]require_iterable_of(default_peptide_lengths, int)self.default_peptide_lengths = default_peptide_lengthsself.min_peptide_length = min_peptide_lengthself.max_peptide_length = max_peptide_lengthself.allow_X_in_peptides = allow_X_in_peptidesself.allow_lowercase_in_peptides = allow_lowercase_in_peptides", "docstring": "Parameters\n----------\nalleles : list\n List of strings containing names of HLA alleles we're\n making predictions for. Example:\n [\"HLA-A*02:01\", \"HLA-B*07:02\"]\n\nvalid_alleles : list, optional\n If given, constrain HLA alleles to be contained within\n this set.\n\ndefault_peptide_lengths : list of int, optional\n When making predictions across subsequences of protein sequences,\n what peptide lengths to predict for.\n\nmin_peptide_length : int\n Shortest peptide this predictor can handle\n\nmax_peptide_length : int\n Longest peptide this predictor can handle\n\nallow_X_in_peptides : bool\n Allow unknown amino acids in peptide sequences\n\nallow_lowercase_in_peptides : bool\n Allow lowercase letters in peptide sequences", "id": "f935:c0:m0"} {"signature": "def predict_peptides(self, peptides):", "body": "raise NotImplementedError(\"\" % (self.__class__.__name__))", "docstring": "Given a list of peptide sequences, returns a BindingPredictionCollection", "id": "f935:c0:m3"} {"signature": "def _check_peptide_lengths(self, peptide_lengths=None):", "body": "if not peptide_lengths:peptide_lengths = self.default_peptide_lengthsif not peptide_lengths:raise ValueError((\"\"\"\"))if isinstance(peptide_lengths, int):peptide_lengths = [peptide_lengths]require_iterable_of(peptide_lengths, int)for peptide_length in peptide_lengths:if (self.min_peptide_length is not None andpeptide_length < self.min_peptide_length):raise ValueError(\"\" % (peptide_length,self.min_peptide_length))elif (self.max_peptide_length is not None andpeptide_length > self.max_peptide_length):raise ValueError(\"\" % (peptide_length,self.max_peptide_length))return peptide_lengths", "docstring": "If peptide lengths not specified, then try using the default\nlengths associated with this predictor object. If those aren't\na valid non-empty sequence of integers, then raise an exception.\nOtherwise return the peptide lengths.", "id": "f935:c0:m5"} {"signature": "def _check_peptide_inputs(self, peptides):", "body": "require_iterable_of(peptides, string_types)check_X = not self.allow_X_in_peptidescheck_lower = not self.allow_lowercase_in_peptidescheck_min_length = self.min_peptide_length is not Nonemin_length = self.min_peptide_lengthcheck_max_length = self.max_peptide_length is not Nonemax_length = self.max_peptide_lengthfor p in peptides:if not p.isalpha():raise ValueError(\"\" % p)elif check_X and \"\" in p:raise ValueError(\"\" % p)elif check_lower and not p.isupper():raise ValueError(\"\" % p)elif check_min_length and len(p) < min_length:raise ValueError(\"\" % (p, len(p), min_length))elif check_max_length and len(p) > max_length:raise ValueError(\"\" % (p, len(p), max_length))", "docstring": "Check peptide sequences to make sure they are valid for this predictor.", "id": "f935:c0:m7"} {"signature": "def predict_subsequences(self,sequence_dict,peptide_lengths=None):", "body": "if isinstance(sequence_dict, string_types):sequence_dict = {\"\": sequence_dict}elif isinstance(sequence_dict, (list, tuple)):sequence_dict = {seq: seq for seq in sequence_dict}peptide_lengths = self._check_peptide_lengths(peptide_lengths)peptide_set = set([])peptide_to_name_offset_pairs = defaultdict(list)for name, sequence in sequence_dict.items():for peptide_length in peptide_lengths:for i in range(len(sequence) - peptide_length + ):peptide = sequence[i:i + peptide_length]peptide_set.add(peptide)peptide_to_name_offset_pairs[peptide].append((name, i))peptide_list = sorted(peptide_set)binding_predictions = self.predict_peptides(peptide_list)results = []for binding_prediction in binding_predictions:for name, offset in peptide_to_name_offset_pairs[binding_prediction.peptide]:results.append(binding_prediction.clone_with_updates(source_sequence_name=name,offset=offset))self._check_results(results,peptides=peptide_set,alleles=self.alleles)return BindingPredictionCollection(results)", "docstring": "Given a dictionary mapping sequence names to amino acid strings,\nand an optional list of peptide lengths, returns a\nBindingPredictionCollection.", "id": "f935:c0:m8"} {"signature": "@staticmethoddef _check_hla_alleles(alleles,valid_alleles=None):", "body": "require_iterable_of(alleles, string_types, \"\")alleles = {normalize_allele_name(allele.strip().upper())for allele in alleles}if valid_alleles:missing_alleles = [allelefor allele in allelesif allele not in valid_alleles]if len(missing_alleles) > :raise UnsupportedAllele(\"\" % missing_alleles)return list(alleles)", "docstring": "Given a list of HLA alleles and an optional list of valid\nHLA alleles, return a set of alleles that we will pass into\nthe MHC binding predictor.", "id": "f935:c0:m11"} {"signature": "def seq_to_str(obj, sep=\"\"):", "body": "if isinstance(obj, string_classes):return objelif isinstance(obj, (list, tuple)):return sep.join([str(x) for x in obj])else:return str(obj)", "docstring": "Given a sequence convert it to a comma separated string.\nIf, however, the argument is a single object, return its string\nrepresentation.", "id": "f937:m0"} {"signature": "def main(args_list=None):", "body": "args = parse_args(args_list)binding_predictions = run_predictor(args)df = binding_predictions.to_dataframe()logger.info('', df)if args.output_csv:df.to_csv(args.output_csv, index=False)print(\"\" % args.output_csv)", "docstring": "Script to make pMHC binding predictions from amino acid sequences.\n\nUsage example:\n mhctools\n --sequence SFFPIQQQQQAAALLLI \\\n --sequence SILQQQAQAQQAQAASSSC \\\n --extract-subsequences \\\n --mhc-predictor netmhc \\\n --mhc-alleles HLA-A0201 H2-Db \\\n --mhc-predictor netmhc \\\n --output-csv epitope.csv", "id": "f939:m4"} {"signature": "def parse_int_list(string):", "body": "integers = []for comma_part in string.split(\"\"):for substring in comma_part.split(\"\"):if len(substring) == :continueif \"\" in substring:left, right = substring.split(\"\")left_val = int(left.strip())right_val = int(right.strip())integers.extend(range(left_val, right_val + ))else:integers.append(int(substring.strip()))return integers", "docstring": "Parses a string of numbers and ranges into a list of integers. Ranges\nare separated by dashes and inclusive of both the start and end number.\n\nExample:\n parse_int_list(\"8 9 10,11-13\") == [8,9,10,11,12,13]", "id": "f941:m0"} {"signature": "def split_stdout_lines(stdout):", "body": "seen_dash = Falsefor l in stdout.split(\"\"):l = l.strip()if l.startswith(\"\"):seen_dash = Truecontinueif not seen_dash:continueif not l or l.startswith(\"\"):continueif any(l.startswith(word) for word in NETMHC_TOKENS):continueyield l.split()", "docstring": "Given the standard output from NetMHC/NetMHCpan/NetMHCcons tools,\ndrop all {comments, lines of hyphens, empty lines} and split the\nremaining lines by whitespace.", "id": "f943:m1"} {"signature": "def clean_fields(fields, ignored_value_indices, transforms):", "body": "cleaned_fields = []for i, field in enumerate(fields):if field in ignored_value_indices:ignored_index = ignored_value_indices[field]if ignored_index == i:continuecleaned_field = transforms[i](field) if i in transforms else fieldcleaned_fields.append(cleaned_field)return cleaned_fields", "docstring": "Sometimes, NetMHC* has fields that are only populated sometimes, which results\nin different count/indexing of the fields when that happens.\n\nWe handle this by looking for particular strings at particular indices, and\ndeleting them.\n\nWarning: this may result in unexpected behavior sometimes. For example, we\nignore \"SB\" and \"WB\" for NetMHC 3.x output; which also means that any line\nwith a key called SB or WB will be ignored.\n\nAlso, sometimes NetMHC* will have fields that we want to modify in some\nconsistent way, e.g. NetMHCpan3 has 1-based offsets and all other predictors\nhave 0-based offsets (and we rely on 0-based offsets). We handle this using\na map from field index to transform function.", "id": "f943:m2"} {"signature": "def parse_stdout(stdout,prediction_method_name,sequence_key_mapping,key_index,offset_index,peptide_index,allele_index,ic50_index,rank_index,log_ic50_index,ignored_value_indices={},transforms={}):", "body": "binding_predictions = []for fields in split_stdout_lines(stdout):fields = clean_fields(fields, ignored_value_indices, transforms)offset = int(fields[offset_index])peptide = str(fields[peptide_index])allele = str(fields[allele_index])ic50 = float(fields[ic50_index])rank = float(fields[rank_index]) if rank_index else log_ic50 = float(fields[log_ic50_index])key = str(fields[key_index])if sequence_key_mapping:original_key = sequence_key_mapping[key]else:original_key = keybinding_predictions.append(BindingPrediction(source_sequence_name=original_key,offset=offset,peptide=peptide,allele=normalize_allele_name(allele),affinity=ic50,percentile_rank=rank,log_affinity=log_ic50,prediction_method_name=prediction_method_name))return binding_predictions", "docstring": "Generic function for parsing any NetMHC* output, given expected indices\nof values of interest.\n\nParameters\n----------\nignored_value_indices : dict\n Map from values to the positions we'll ignore them at. See clean_fields.\n\ntransforms : dict\n Map from field index to a transform function to be applied to values in\n that field. See clean_fields.\n\nReturns BindingPredictionCollection", "id": "f943:m3"} {"signature": "def parse_netmhc3_stdout(stdout,prediction_method_name=\"\",sequence_key_mapping=None):", "body": "return parse_stdout(stdout=stdout,prediction_method_name=prediction_method_name,sequence_key_mapping=sequence_key_mapping,key_index=,offset_index=,peptide_index=,allele_index=,ic50_index=,rank_index=None,log_ic50_index=,ignored_value_indices={\"\": , \"\": })", "docstring": "Parse the output format for NetMHC 3.x, which looks like:\n\n----------------------------------------------------------------------------------------------------\npos peptide logscore affinity(nM) Bind Level Protein Name Allele\n----------------------------------------------------------------------------------------------------\n0 SIINKFELL 0.437 441 WB A1 HLA-A02:01\n--------------------------------------------------------------------------------------------------\n0 SIINKFFFQ 0.206 5411 A2 HLA-A02:01\n1 IINKFFFQQ 0.128 12544 A2 HLA-A02:01\n2 INKFFFQQQ 0.046 30406 A2 HLA-A02:01\n3 NKFFFQQQQ 0.050 29197 A2 HLA-A02:01\n--------------------------------------------------------------------------------------------------", "id": "f943:m4"} {"signature": "def parse_netmhc4_stdout(stdout,prediction_method_name=\"\",sequence_key_mapping=None):", "body": "return parse_stdout(stdout=stdout,prediction_method_name=prediction_method_name,sequence_key_mapping=sequence_key_mapping,key_index=,offset_index=,peptide_index=,allele_index=,ic50_index=,rank_index=,log_ic50_index=)", "docstring": "# Peptide length 9\n# Rank Threshold for Strong binding peptides 0.500\n# Rank Threshold for Weak binding peptides 2.000\n-----------------------------------------------------------------------------------\n pos HLA peptide Core Offset I_pos I_len D_pos D_len iCore Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel\n-----------------------------------------------------------------------------------\n 0 HLA-A0201 TMDKSELVQ TMDKSELVQ 0 0 0 0 0 TMDKSELVQ 143B_BOVIN_P293 0.051 28676.59 43.00\n 1 HLA-A0201 MDKSELVQK MDKSELVQK 0 0 0 0 0 MDKSELVQK 143B_BOVIN_P293 0.030 36155.15 70.00\n 2 HLA-A0201 DKSELVQKA DKSELVQKA 0 0 0 0 0 DKSELVQKA 143B_BOVIN_P293 0.030 36188.42 70.00\n 3 HLA-A0201 KSELVQKAK KSELVQKAK 0 0 0 0 0 KSELVQKAK 143B_BOVIN_P293 0.032 35203.22 65.00\n 4 HLA-A0201 SELVQKAKL SELVQKAKL 0 0 0 0 0 SELVQKAKL 143B_BOVIN_P293 0.031 35670.99 65.00\n 5 HLA-A0201 ELVQKAKLA ELVQKAKLA 0 0 0 0 0 ELVQKAKLA 143B_BOVIN_P293 0.080 21113.07 29.00\n 6 HLA-A0201 LVQKAKLAE LVQKAKLAE 0 0 0 0 0 LVQKAKLAE 143B_BOVIN_P293 0.027 37257.56 75.00\n 7 HLA-A0201 VQKAKLAEQ VQKAKLAEQ 0 0 0 0 0 VQKAKLAEQ 143B_BOVIN_P293 0.040 32404.62 55.00\n 219 HLA-A0201 QLLRDNLTL QLLRDNLTL 0 0 0 0 0 QLLRDNLTL 143B_BOVIN_P293 0.527 167.10 1.50 <= WB\n-----------------------------------------------------------------------------------", "id": "f943:m5"} {"signature": "def parse_netmhcpan28_stdout(stdout,prediction_method_name=\"\",sequence_key_mapping=None):", "body": "check_stdout_error(stdout, \"\")return parse_stdout(stdout=stdout,prediction_method_name=prediction_method_name,sequence_key_mapping=sequence_key_mapping,key_index=,offset_index=,peptide_index=,allele_index=,ic50_index=,rank_index=,log_ic50_index=)", "docstring": "# Affinity Threshold for Strong binding peptides 50.000',\n# Affinity Threshold for Weak binding peptides 500.000',\n# Rank Threshold for Strong binding peptides 0.500',\n# Rank Threshold for Weak binding peptides 2.000',\n----------------------------------------------------------------------------\npos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel\n----------------------------------------------------------------------------\n 0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00\n 1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00\n 2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00\n 3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00\n 4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00\n 5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00\n 6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00\n 7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00\n 8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00\n 9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00\n 10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00\n 11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB", "id": "f943:m6"} {"signature": "def parse_netmhcpan3_stdout(stdout,prediction_method_name=\"\",sequence_key_mapping=None):", "body": "transforms = {: lambda x: int(x) - ,}return parse_stdout(stdout=stdout,prediction_method_name=prediction_method_name,sequence_key_mapping=sequence_key_mapping,key_index=,offset_index=,peptide_index=,allele_index=,ic50_index=,rank_index=,log_ic50_index=,transforms=transforms)", "docstring": "# Rank Threshold for Strong binding peptides 0.500\n# Rank Threshold for Weak binding peptides 2.000\n-----------------------------------------------------------------------------------\nPos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel\n-----------------------------------------------------------------------------------\n1 HLA-B*18:01 MFCQLAKT MFCQLAKT- 0 0 0 8 1 MFCQLAKT sequence0_0 0.02864 36676.0 45.00\n2 HLA-B*18:01 FCQLAKTY F-CQLAKTY 0 0 0 1 1 FCQLAKTY sequence0_0 0.07993 21056.5 13.00", "id": "f943:m7"} {"signature": "def parse_netmhcpan4_stdout(stdout,prediction_method_name=\"\",sequence_key_mapping=None):", "body": "return parse_netmhcpan3_stdout(stdout=stdout,prediction_method_name=prediction_method_name,sequence_key_mapping=sequence_key_mapping)", "docstring": "# NetMHCpan version 4.0\n\n# Tmpdir made /var/folders/jc/fyrvcrcs3sb8g4mkdg6nl_t80000gp/T//netMHCpanuH3SvY\n# Input is in PEPTIDE format\n\n# Make binding affinity predictions\n\nHLA-A02:01 : Distance to training data 0.000 (using nearest neighbor HLA-A02:01)\n\n# Rank Threshold for Strong binding peptides 0.500\n# Rank Threshold for Weak binding peptides 2.000\n-----------------------------------------------------------------------------------\n Pos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel\n-----------------------------------------------------------------------------------\n 1 HLA-A*02:01 SIINFEKL SIINF-EKL 0 0 0 5 1 SIINFEKL PEPLIST 0.1141340 14543.1 18.9860\n-----------------------------------------------------------------------------------\n\nProtein PEPLIST. Allele HLA-A*02:01. Number of high binders 0. Number of weak binders 0. Number of peptides 1", "id": "f943:m8"} {"signature": "def parse_netmhccons_stdout(stdout,prediction_method_name=\"\",sequence_key_mapping=None):", "body": "return parse_stdout(stdout=stdout,prediction_method_name=prediction_method_name,sequence_key_mapping=sequence_key_mapping,key_index=,offset_index=,peptide_index=,allele_index=,ic50_index=,rank_index=,log_ic50_index=)", "docstring": "# Affinity Threshold for Strong binding peptides 50.000',\n# Affinity Threshold for Weak binding peptides 500.000',\n# Rank Threshold for Strong binding peptides 0.500',\n# Rank Threshold for Weak binding peptides 2.000',\n----------------------------------------------------------------------------\npos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel\n----------------------------------------------------------------------------\n 0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00\n 1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00\n 2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00\n 3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00\n 4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00\n 5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00\n 6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00\n 7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00\n 8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00\n 9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00\n 10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00\n 11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB", "id": "f943:m9"} {"signature": "def parse_netmhciipan_stdout(stdout,prediction_method_name=\"\",sequence_key_mapping=None):", "body": "check_stdout_error(stdout, \"\")return parse_stdout(stdout=stdout,prediction_method_name=prediction_method_name,sequence_key_mapping=sequence_key_mapping,key_index=,offset_index=,peptide_index=,allele_index=,ic50_index=,rank_index=,log_ic50_index=)", "docstring": "# Threshold for Strong binding peptides (IC50)\t50.000 nM\n# Threshold for Weak binding peptides (IC50)\t500.000 nM\n\n# Threshold for Strong binding peptides (%Rank)\t0.5%\n# Threshold for Weak binding peptides (%Rank)\t2%\n\n# Allele: DRB1_0301\n--------------------------------------------------------------------------------------------------------------------------------------------\n Seq Allele Peptide Identity Pos Core Core_Rel 1-log50k(aff) Affinity(nM) %Rank Exp_Bind BindingLevel\n--------------------------------------------------------------------------------------------------------------------------------------------\n 0 DRB1_0301 AGFKGEQGPKGEPG Sequence 2 FKGEQGPKG 0.810 0.080 21036.68 50.00 9.999\n 1 DRB1_0301 GELIGTLNAAKVPAD Sequence 2 LIGTLNAAK 0.650 0.340 1268.50 32.00 9.999\n 2 DRB1_0301 PEVIPMFSALSEGATP Sequence 5 MFSALSEGA 0.385 0.180 7161.16 50.00 9.999\n 3 DRB1_0301 PKYVKQNTLKLAT Sequence 2 YVKQNTLKL 0.575 0.442 418.70 6.00 9.999 <=WB\n 4 DRB1_0301 VGSDWRFLRGYHQYA Sequence 0 VGSDWRFLR 0.575 0.466 322.07 10.00 9.999 <=WB\n 5 DRB1_0301 XFVKQNAAALX Sequence 2 VKQNAAALX 0.500 0.262 2939.20 15.00 9.999\n 6 DRB1_0301 AAYSDQATPLLLSPR Sequence 1 AYSDQATPL 0.395 0.291 2152.21 50.00 9.999\n 7 DRB1_0301 PVSKMRMATPLLMQA Sequence 4 MRMATPLLM 0.890 0.770 12.00 0.01 9.999 <=SB\n 8 DRB1_0301 AYMRADAAAGGA Sequence 2 MRADAAAGG 0.835 0.303 1887.87 15.00 9.999\n 9 DRB1_0301 PKYVKQNTLKLAT Sequence 2 YVKQNTLKL 0.575 0.442 418.70 6.00 9.999 <=WB\n 10 DRB1_0301 ENPVVHFFKNIVTPR Sequence 6 FFKNIVTPR 0.425 0.357 1049.04 32.00 9.999", "id": "f943:m10"} {"signature": "def to_dataframe(self,columns=BindingPrediction.fields + (\"\",)):", "body": "return pd.DataFrame.from_records([tuple([getattr(x, name) for name in columns]) for x in self],columns=columns)", "docstring": "Converts collection of BindingPrediction objects to DataFrame", "id": "f948:c0:m0"} {"signature": "def NetMHCpan(alleles,program_name=\"\",process_limit=-,default_peptide_lengths=[],extra_flags=[]):", "body": "with open(os.devnull, '') as devnull:output = check_output([program_name, \"\", \"\"],stderr=devnull)output_str = output.decode(\"\", \"\")common_kwargs = {\"\": alleles,\"\": default_peptide_lengths,\"\": program_name,\"\": process_limit,\"\": extra_flags,}if \"\" in output_str:return NetMHCpan28(**common_kwargs)elif \"\" in output_str:return NetMHCpan3(**common_kwargs)elif \"\" in output_str:return NetMHCpan4(**common_kwargs)else:raise RuntimeError(\"\")", "docstring": "This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class\nto use, with the help of the miraculous and strange '--version' netmhcpan argument.", "id": "f950:m0"} {"signature": "def utcnow_no_ms():", "body": "return datetime.datetime.utcnow().replace(microsecond=)", "docstring": "Returns utcnow without microseconds", "id": "f967:m0"} {"signature": "def create_tables(self):", "body": "for cls in self:cls.create_table(fail_silently=True)", "docstring": "Create database tables", "id": "f967:c0:m1"} {"signature": "def destroy_tables(self):", "body": "for cls in self:cls.drop_table(fail_silently=True)", "docstring": "Destroy database tables", "id": "f967:c0:m2"} {"signature": "def register(self, model_cls):", "body": "assert issubclass(model_cls, peewee.Model)assert not hasattr(model_cls._meta, '')if model_cls in self:raise RuntimeError(\"\")self.append(model_cls)model_cls._meta.database = self.dbmreturn model_cls", "docstring": "Register model(s) with app", "id": "f967:c0:m3"} {"signature": "def connect(self):", "body": "for name, connection in self.items():connection.connect()", "docstring": "Create connection for all databases", "id": "f967:c1:m1"} {"signature": "def disconnect(self):", "body": "for name, connection in self.items():if not connection.is_closed():connection.close()", "docstring": "Disconnect from all databases", "id": "f967:c1:m2"} {"signature": "def get_database(self, model):", "body": "for router in self.routers:r = router.get_database(model)if r is not None:return rreturn self.get('')", "docstring": "Find matching database router", "id": "f967:c1:m3"} {"signature": "@classmethoddef get_or_none(cls, **kwargs):", "body": "try:return cls.get(**kwargs)except cls.DoesNotExist:return None", "docstring": "XXX: needs unit test", "id": "f967:c4:m2"} {"signature": "@classmethoddef atomic(self):", "body": "return self._meta.database.atomic()", "docstring": "Shortcut method for creating atomic context", "id": "f967:c4:m3"} {"signature": "def to_cursor_ref(self):", "body": "fields = self._meta.get_primary_keys()assert fieldsvalues = {field.name:self.__data__[field.name] for field in fields}return values", "docstring": "Returns dict of values to uniquely reference this item", "id": "f967:c4:m4"} {"signature": "@classmethoddef from_cursor_ref(self, cursor):", "body": "return self.get(**cursor)", "docstring": "Returns model instance from unique cursor reference", "id": "f967:c4:m5"} {"signature": "def refetch(self):", "body": "ref = self.to_cursor_ref()return self.from_cursor_ref(ref)", "docstring": "Return new model instance with fresh data from database\nOnly works on models which have a primary or compound key\nSee https://github.com/coleifer/peewee/issues/638\n\nXXX: Add support for models without PK", "id": "f967:c4:m6"} {"signature": "@classmethoddef paginate_query(self, query, count, offset=None, sort=None):", "body": "assert isinstance(query, peewee.Query)assert isinstance(count, int)assert isinstance(offset, (str, int, type(None)))assert isinstance(sort, (list, set, tuple, type(None)))fields = query.model._meta.get_primary_keys()if len(fields) == :raise peewee.ProgrammingError('')if len(fields) > :raise peewee.ProgrammingError('')if offset is not None:query = query.where(fields[] >= offset)order_bys = []if sort:for field, direction in sort:if not isinstance(direction, str):raise ValueError(\"\".format(field))direction = direction.lower().strip()if direction not in ['', '']:raise ValueError(\"\".format(field))order_by = peewee.SQL(field)order_by = getattr(order_by, direction)()order_bys += [order_by]order_bys += [fields[].asc()]query = query.order_by(*order_bys)query = query.limit(count)return query", "docstring": "Apply pagination to query\n\n:attr query: Instance of `peewee.Query`\n:attr count: Max rows to return\n:attr offset: Pagination offset, str/int\n:attr sort: List of tuples, e.g. [('id', 'asc')]\n\n:returns: Instance of `peewee.Query`", "id": "f967:c7:m0"} {"signature": "def get_query(self):", "body": "return self.query", "docstring": "Return query for our model", "id": "f967:c8:m0"} {"signature": "def get_paginator(self):", "body": "return self.paginator", "docstring": "Return pagination for our model", "id": "f967:c8:m1"} {"signature": "def apply_filters(self, query, filters):", "body": "assert isinstance(query, peewee.Query)assert isinstance(filters, dict)", "docstring": "Apply user specified filters to query", "id": "f967:c8:m2"} {"signature": "def list(self, filters, cursor, count):", "body": "assert isinstance(filters, dict), \"\"assert isinstance(cursor, dict), \"\"query = self.get_query()assert isinstance(query, peewee.Query)paginator = self.get_paginator()assert isinstance(paginator, Pagination)count += pquery = paginator.filter_query(query, cursor, count)items = [ item for item in pquery ]next_item = items.pop()next_cursor = next_item.to_cursor_ref()''''''return items, next_cursor", "docstring": "List items from query", "id": "f967:c8:m3"} {"signature": "def retrieve(self, cursor):", "body": "assert isinstance(cursor, dict), \"\"query = self.get_query()assert isinstance(query, peewee.Query)queryreturn query.get(**cursor)", "docstring": "Retrieve items from query", "id": "f967:c8:m4"} {"signature": "def populate_models(self):", "body": "fake = Faker()fake.seed()cities = ['', '', '', '']items = []for x in range():city = cities[x % len(cities)]items += [dict(name=fake.name(), city=city)]Person.insert_many(items).execute()assert Person.select().count() == ", "docstring": "Populate test models with (predictable) fake data", "id": "f970:c1:m0"} {"signature": "def db_value(self, value):", "body": "if not isinstance(value, UUID):value = UUID(value)parts = str(value).split(\"\")reordered = ''.join([parts[], parts[], parts[], parts[], parts[]])value = binascii.unhexlify(reordered)return super(OrderedUUIDField, self).db_value(value)", "docstring": "Convert UUID to binary blob", "id": "f972:c0:m0"} {"signature": "def python_value(self, value):", "body": "value = super(OrderedUUIDField, self).python_value(value)u = binascii.b2a_hex(value)value = u[:] + u[:] + u[:] + u[:] + u[:]return UUID(value.decode())", "docstring": "Convert binary blob to UUID instance", "id": "f972:c0:m1"} {"signature": "def db_value(self, value):", "body": "value = self.transform_value(value)return self.hhash.encrypt(value,salt_size=self.salt_size, rounds=self.rounds)", "docstring": "Convert the python value for storage in the database.", "id": "f972:c6:m3"} {"signature": "def python_value(self, value):", "body": "value = coerce_to_bytes(value)obj = HashValue(value)obj.field = selfreturn obj", "docstring": "Convert the database value to a pythonic value.", "id": "f972:c6:m4"} {"signature": "def check(self):", "body": "if not self.is_valid:raise PolyaxonDeploymentConfigError(''.format(self.deployment_type))check = Falseif self.is_kubernetes:check = self.check_for_kubernetes()elif self.is_docker_compose:check = self.check_for_docker_compose()elif self.is_docker:check = self.check_for_docker()elif self.is_heroku:check = self.check_for_heroku()if not check:raise PolyaxonDeploymentConfigError(''.format(self.deployment_type))", "docstring": "Add platform specific checks", "id": "f1019:c0:m13"} {"signature": "def install(self):", "body": "if not self.is_valid:raise PolyaxonDeploymentConfigError(''.format(self.deployment_type))if self.is_kubernetes:self.install_on_kubernetes()elif self.is_docker_compose:self.install_on_docker_compose()elif self.is_docker:self.install_on_docker()elif self.is_heroku:self.install_on_heroku()", "docstring": "Install polyaxon using the current config to the correct platform.", "id": "f1019:c0:m18"} {"signature": "def upgrade(self):", "body": "if not self.is_valid:raise PolyaxonDeploymentConfigError(''.format(self.deployment_type))if self.is_kubernetes:self.upgrade_on_kubernetes()elif self.is_docker_compose:self.upgrade_on_docker_compose()elif self.is_docker:self.upgrade_on_docker()elif self.is_heroku:self.upgrade_on_heroku()", "docstring": "Upgrade deployment.", "id": "f1019:c0:m23"} {"signature": "def teardown(self, hooks=True):", "body": "if not self.is_valid:raise PolyaxonDeploymentConfigError(''.format(self.deployment_type))if self.is_kubernetes:self.teardown_on_kubernetes(hooks=hooks)elif self.is_docker_compose:self.teardown_on_docker_compose()elif self.is_docker:self.teardown_on_docker(hooks=hooks)elif self.is_heroku:self.teardown_on_heroku(hooks=hooks)", "docstring": "Teardown Polyaxon.", "id": "f1019:c0:m28"} {"signature": "@staticmethoddef _remove_trailing_spaces(line):", "body": "while line.endswith('') and not line.endswith(''):line = line[:-]return line.replace('', '')", "docstring": "Remove trailing spaces unless they are quoted with a backslash.", "id": "f1025:c1:m1"} {"signature": "@classmethoddef find_matching(cls, path, patterns):", "body": "for pattern in patterns:if pattern.match(path):yield pattern", "docstring": "Yield all matching patterns for path.", "id": "f1025:c1:m3"} {"signature": "@classmethoddef is_ignored(cls, path, patterns):", "body": "status = Nonefor pattern in cls.find_matching(path, patterns):status = pattern.is_excludereturn status", "docstring": "Check whether a path is ignored. For directories, include a trailing slash.", "id": "f1025:c1:m4"} {"signature": "@staticmethoddef _matches_patterns(path, patterns):", "body": "for glob in patterns:try:if PurePath(path).match(glob):return Trueexcept TypeError:passreturn False", "docstring": "Given a list of patterns, returns a if a path matches any pattern.", "id": "f1025:c1:m9"} {"signature": "@classmethoddef _ignore_path(cls, path, ignore_list=None, white_list=None):", "body": "ignore_list = ignore_list or []white_list = white_list or []return (cls._matches_patterns(path, ignore_list) andnot cls._matches_patterns(path, white_list))", "docstring": "Returns a whether a path should be ignored or not.", "id": "f1025:c1:m10"} {"signature": "@click.group()@click.option('', '', is_flag=True, default=False, help='')@click.pass_context@clean_outputsdef cli(context, verbose):", "body": "configure_logger(verbose or GlobalConfigManager.get_value(''))non_check_cmds = ['', '', '', '', '', '', '']if context.invoked_subcommand not in non_check_cmds:check_cli_version()", "docstring": "Polyaxon CLI tool to:\n\n * Parse, Validate, and Check Polyaxonfiles.\n\n * Interact with Polyaxon server.\n\n * Run and Monitor experiments.\n\n Check the help available for each command listed below.", "id": "f1033:m0"} {"signature": "@contextmanagerdef create_tarfile(files, project_name):", "body": "fd, filename = tempfile.mkstemp(prefix=\"\".format(project_name), suffix='')with tarfile.open(filename, \"\") as tar:for f in files:tar.add(f)yield filenameos.close(fd)os.remove(filename)", "docstring": "Create a tar file based on the list of files passed", "id": "f1035:m3"} {"signature": "def pprint(value):", "body": "click.echo(json.dumps(value,sort_keys=True,indent=,separators=('', '')))", "docstring": "Prints as formatted JSON", "id": "f1039:m3"} {"signature": "@click.group(invoke_without_command=True)@click.option('', '', is_flag=True, help='')@clean_outputsdef config(list): ", "body": "if list:_config = GlobalConfigManager.get_config_or_default()Printer.print_header('')dict_tabulate(_config.to_dict())", "docstring": "Set and get the global configurations.", "id": "f1040:m1"} {"signature": "@config.command()@click.argument('', type=str, nargs=-)@clean_outputsdef get(keys):", "body": "_config = GlobalConfigManager.get_config_or_default()if not keys:returnprint_values = {}for key in keys:if hasattr(_config, key):print_values[key] = getattr(_config, key)else:click.echo(''.format(key))dict_tabulate(print_values, )", "docstring": "Get the global config values by keys.\n\n Example:\n\n \\b\n ```bash\n $ polyaxon config get host http_port\n ```", "id": "f1040:m2"} {"signature": "@config.command()@click.option('', type=bool, help='')@click.option('', type=str, help='')@click.option('', type=int, help='')@click.option('', type=int, help='')@click.option('', type=bool, help='')@click.option('', type=bool,help='')@clean_outputsdef set(verbose, host,http_port,ws_port,use_https,verify_ssl):", "body": "_config = GlobalConfigManager.get_config_or_default()if verbose is not None:_config.verbose = verboseif host is not None:_config.host = hostif http_port is not None:_config.http_port = http_portif ws_port is not None:_config.ws_port = ws_portif use_https is not None:_config.use_https = use_httpsif verify_ssl is False:_config.verify_ssl = verify_sslGlobalConfigManager.set_config(_config)Printer.print_success('')CliConfigManager.purge()", "docstring": "Set the global config values.\n\n Example:\n\n \\b\n ```bash\n $ polyaxon config set --hots=localhost http_port=80\n ```", "id": "f1040:m3"} {"signature": "@click.command()@clean_outputsdef upload(sync=True): ", "body": "project = ProjectManager.get_config_or_raise()files = IgnoreManager.get_unignored_file_paths()try:with create_tarfile(files, project.name) as file_path:with get_files_in_current_directory('', [file_path]) as (files, files_size):try:PolyaxonClient().project.upload_repo(project.user,project.name,files,files_size,sync=sync)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project.name))Printer.print_error(''.format(e))Printer.print_error('''''''''')sys.exit()Printer.print_success('')except Exception as e:Printer.print_error(\"\")Printer.print_error(''.format(e))sys.exit()", "docstring": "Upload code of the current directory while respecting the .polyaxonignore file.", "id": "f1041:m0"} {"signature": "def check_cli_version():", "body": "if not CliConfigManager.should_check():returnserver_version = get_server_version()current_version = get_current_version()CliConfigManager.reset(current_version=current_version,min_version=server_version.min_version)if LooseVersion(current_version) < LooseVersion(server_version.min_version):click.echo(\"\"\"\"\"\".format(current_version))if click.confirm(\"\"\"\".format(server_version.latest_version)):pip_upgrade()sys.exit()else:clint.textui.puts(\"\")with clint.textui.indent():clint.textui.puts(\"\")clint.textui.puts(\"\".format(server_version.latest_version))sys.exit()elif LooseVersion(current_version) < LooseVersion(server_version.latest_version):clint.textui.puts(\"\".format(server_version.latest_version))with clint.textui.indent():clint.textui.puts(\"\")elif LooseVersion(current_version) > LooseVersion(server_version.latest_version):clint.textui.puts(\"\"\"\"\"\".format(current_version,server_version.latest_version))", "docstring": "Check if the current cli version satisfies the server requirements", "id": "f1042:m6"} {"signature": "@click.command()@click.option('', is_flag=True, default=False, help='')@click.option('', is_flag=True, default=False, help='')@clean_outputsdef version(cli, platform):", "body": "version_client = PolyaxonClient().versioncli = cli or not any([cli, platform])if cli:try:server_version = version_client.get_cli_version()except AuthorizationError:session_expired()sys.exit()except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()cli_version = get_version(PROJECT_CLI_NAME)Printer.print_header(''.format(cli_version))Printer.print_header('')dict_tabulate(server_version.to_dict())if platform:try:platform_version = version_client.get_platform_version()except AuthorizationError:session_expired()sys.exit()except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()chart_version = version_client.get_chart_version()Printer.print_header(''.format(chart_version.version))Printer.print_header('')dict_tabulate(platform_version.to_dict())", "docstring": "Print the current version of the cli and platform.", "id": "f1042:m7"} {"signature": "@click.command()@clean_outputsdef upgrade():", "body": "try:pip_upgrade(PROJECT_CLI_NAME)except Exception as e:logger.error(e)", "docstring": "Install/Upgrade polyaxon-cli.", "id": "f1042:m8"} {"signature": "@click.group()@clean_outputsdef admin():", "body": "", "docstring": "Commands for admin management.", "id": "f1043:m1"} {"signature": "@admin.command()@click.option('', '', type=click.Path(exists=True),help='')@click.option('', type=click.Path(exists=True),help='')@click.option('', is_flag=True, default=False,help='')@click.option('', is_flag=True, default=False,help='')@clean_outputsdef deploy(file, manager_path, check, dry_run): ", "body": "config = read_deployment_config(file)manager = DeployManager(config=config,filepath=file,manager_path=manager_path,dry_run=dry_run)exception = Noneif check:manager.check()Printer.print_success('')else:try:manager.install()except Exception as e:Printer.print_error('')exception = eif exception:Printer.print_error(''.format(exception))", "docstring": "Deploy polyaxon.", "id": "f1043:m2"} {"signature": "@admin.command()@click.option('', '', type=click.Path(exists=True),help='')@click.option('', type=click.Path(exists=True),help='')@click.option('', is_flag=True, default=False,help='')@click.option('', is_flag=True, default=False,help='')@clean_outputsdef upgrade(file, manager_path, check, dry_run): ", "body": "config = read_deployment_config(file)manager = DeployManager(config=config,filepath=file,manager_path=manager_path,dry_run=dry_run)exception = Noneif check:manager.check()Printer.print_success('')else:try:manager.upgrade()except Exception as e:Printer.print_error('')exception = eif exception:Printer.print_error(''.format(exception))", "docstring": "Upgrade a Polyaxon deployment.", "id": "f1043:m3"} {"signature": "@admin.command()@click.option('', '', type=click.Path(exists=True),help='')@clean_outputsdef teardown(file): ", "body": "config = read_deployment_config(file)manager = DeployManager(config=config, filepath=file)exception = Nonetry:if click.confirm('', default=True):manager.teardown(hooks=True)else:manager.teardown(hooks=False)except Exception as e:Printer.print_error('')exception = eif exception:Printer.print_error(''.format(exception))", "docstring": "Teardown a polyaxon deployment given a config file.", "id": "f1043:m4"} {"signature": "@click.group()@click.option('', '', type=str, help=\"\")@click.option('', '', type=int, help=\"\")@click.pass_context@clean_outputsdef build(ctx, project, build): ", "body": "ctx.obj = ctx.obj or {}ctx.obj[''] = projectctx.obj[''] = build", "docstring": "Commands for build jobs.", "id": "f1045:m1"} {"signature": "@build.command()@click.pass_context@clean_outputsdef get(ctx):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get(''), ctx.obj.get(''))try:response = PolyaxonClient().build_job.get_build(user, project_name, _build)cache.cache(config_manager=BuildJobManager, response=response)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_build))Printer.print_error(''.format(e))sys.exit()get_build_details(response)", "docstring": "Get build job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon build -b 1 get\n ```\n\n \\b\n ```bash\n $ polyaxon build --build=1 --project=project_name get\n ```", "id": "f1045:m2"} {"signature": "@build.command()@click.pass_context@clean_outputsdef delete(ctx):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get(''), ctx.obj.get(''))if not click.confirm(\"\".format(_build)):click.echo('')sys.exit()try:response = PolyaxonClient().build_job.delete_build(user, project_name, _build)BuildJobManager.purge()except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_build))Printer.print_error(''.format(e))sys.exit()if response.status_code == :Printer.print_success(\"\".format(_build))", "docstring": "Delete build job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon build delete\n ```\n\n \\b\n ```bash\n $ polyaxon build -b 2 delete\n ```", "id": "f1045:m3"} {"signature": "@build.command()@click.option('', type=str,help='')@click.option('', type=str, help='')@click.option('', type=str, help='')@click.pass_context@clean_outputsdef update(ctx, name, description, tags):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get(''), ctx.obj.get(''))update_dict = {}if name:update_dict[''] = nameif description:update_dict[''] = descriptiontags = validate_tags(tags)if tags:update_dict[''] = tagsif not update_dict:Printer.print_warning('')sys.exit()try:response = PolyaxonClient().build_job.update_build(user, project_name, _build, update_dict)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_build))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")get_build_details(response)", "docstring": "Update build.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon build -b 2 update --description=\"new description for my build\"\n ```", "id": "f1045:m4"} {"signature": "@build.command()@click.option('', '', is_flag=True, default=False,help=\"\"\"\")@click.pass_context@clean_outputsdef stop(ctx, yes):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get(''), ctx.obj.get(''))if not yes and not click.confirm(\"\"\"\".format(_build)):click.echo('')sys.exit()try:PolyaxonClient().build_job.stop(user, project_name, _build)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_build))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Stop build job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon build stop\n ```\n\n \\b\n ```bash\n $ polyaxon build -b 2 stop\n ```", "id": "f1045:m5"} {"signature": "@build.command()@click.pass_context@clean_outputsdef bookmark(ctx):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get(''), ctx.obj.get(''))try:PolyaxonClient().build_job.bookmark(user, project_name, _build)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_build))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Bookmark build job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon build bookmark\n ```\n\n \\b\n ```bash\n $ polyaxon build -b 2 bookmark\n ```", "id": "f1045:m6"} {"signature": "@build.command()@click.pass_context@clean_outputsdef unbookmark(ctx):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get(''), ctx.obj.get(''))try:PolyaxonClient().build_job.unbookmark(user, project_name, _build)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_build))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Unbookmark build job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon build unbookmark\n ```\n\n \\b\n ```bash\n $ polyaxon build -b 2 unbookmark\n ```", "id": "f1045:m7"} {"signature": "@build.command()@click.option('', type=int, help=\"\")@click.pass_context@clean_outputsdef statuses(ctx, page):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get(''), ctx.obj.get(''))page = page or try:response = PolyaxonClient().build_job.get_statuses(user, project_name, _build, page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_build))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(_build))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(_build))objects = list_dicts_to_tabulate([Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='')for o in response['']])if objects:Printer.print_header(\"\")objects.pop('', None)dict_tabulate(objects, is_list_dict=True)", "docstring": "Get build job statuses.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon build -b 2 statuses\n ```", "id": "f1045:m8"} {"signature": "@build.command()@click.option('', '', is_flag=True, help='')@click.pass_context@clean_outputsdef resources(ctx, gpu):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get(''), ctx.obj.get(''))try:message_handler = Printer.gpu_resources if gpu else Printer.resourcesPolyaxonClient().build_job.resources(user,project_name,_build,message_handler=message_handler)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_build))Printer.print_error(''.format(e))sys.exit()", "docstring": "Get build job resources.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon build -b 2 resources\n ```\n\n For GPU resources\n\n \\b\n ```bash\n $ polyaxon build -b 2 resources --gpu\n ```", "id": "f1045:m9"} {"signature": "@build.command()@click.option('', '', is_flag=True, help=\"\")@click.option('', '', is_flag=True, default=False,help=\"\")@click.option('', is_flag=True, default=False,help=\"\")@click.pass_context@clean_outputsdef logs(ctx, past, follow, hide_time):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get(''), ctx.obj.get(''))if past:try:response = PolyaxonClient().build_job.logs(user, project_name, _build, stream=False)get_logs_handler(handle_job_info=False,show_timestamp=not hide_time,stream=False)(response.content.decode().split(''))print()if not follow:returnexcept (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:if not follow:Printer.print_error(''.format(_build))Printer.print_error(''.format(e))sys.exit()try:PolyaxonClient().build_job.logs(user,project_name,_build,message_handler=get_logs_handler(handle_job_info=False, show_timestamp=not hide_time))except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_build))Printer.print_error(''.format(e))sys.exit()", "docstring": "Get build logs.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon build -b 2 logs\n ```\n\n \\b\n ```bash\n $ polyaxon build logs\n ```", "id": "f1045:m10"} {"signature": "@click.group()@click.option('', '', type=str, help=\"\")@click.option('', '', type=int, help=\"\")@click.pass_context@clean_outputsdef job(ctx, project, job): ", "body": "ctx.obj = ctx.obj or {}ctx.obj[''] = projectctx.obj[''] = job", "docstring": "Commands for jobs.", "id": "f1046:m1"} {"signature": "@job.command()@click.pass_context@clean_outputsdef get(ctx):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))try:response = PolyaxonClient().job.get_job(user, project_name, _job)cache.cache(config_manager=JobManager, response=response)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()get_job_details(response)", "docstring": "Get job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon job --job=1 get\n ```\n\n \\b\n ```bash\n $ polyaxon job --job=1 --project=project_name get\n ```", "id": "f1046:m2"} {"signature": "@job.command()@click.pass_context@clean_outputsdef delete(ctx):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))if not click.confirm(\"\".format(_job)):click.echo('')sys.exit()try:response = PolyaxonClient().job.delete_job(user, project_name, _job)JobManager.purge()except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()if response.status_code == :Printer.print_success(\"\".format(_job))", "docstring": "Delete job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon job delete\n ```", "id": "f1046:m3"} {"signature": "@job.command()@click.option('', type=str,help='')@click.option('', type=str, help='')@click.option('', type=str, help='')@click.pass_context@clean_outputsdef update(ctx, name, description, tags):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))update_dict = {}if name:update_dict[''] = nameif description:update_dict[''] = descriptiontags = validate_tags(tags)if tags:update_dict[''] = tagsif not update_dict:Printer.print_warning('')sys.exit()try:response = PolyaxonClient().job.update_job(user, project_name, _job, update_dict)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")get_job_details(response)", "docstring": "Update job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon job -j 2 update --description=\"new description for my job\"\n ```", "id": "f1046:m4"} {"signature": "@job.command()@click.option('', '', is_flag=True, default=False,help=\"\"\"\")@click.pass_context@clean_outputsdef stop(ctx, yes):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))if not yes and not click.confirm(\"\"\"\".format(_job)):click.echo('')sys.exit()try:PolyaxonClient().job.stop(user, project_name, _job)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Stop job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon job stop\n ```\n\n \\b\n ```bash\n $ polyaxon job -xp 2 stop\n ```", "id": "f1046:m5"} {"signature": "@job.command()@click.option('', '', is_flag=True, default=False,help=\"\")@click.option('', '', multiple=True, type=click.Path(exists=True),help=\"\")@click.option('', is_flag=True, default=False,help=\"\")@click.pass_context@clean_outputsdef restart(ctx, copy, file, u): ", "body": "config = Noneupdate_code = Noneif file:config = rhea.read(file)if u:ctx.invoke(upload, sync=False)update_code = Trueuser, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))try:if copy:response = PolyaxonClient().job.copy(user, project_name, _job, config=config, update_code=update_code)else:response = PolyaxonClient().job.restart(user, project_name, _job, config=config, update_code=update_code)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()get_job_details(response)", "docstring": "Restart job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon job --job=1 restart\n ```", "id": "f1046:m6"} {"signature": "@job.command()@click.option('', '', multiple=True, type=click.Path(exists=True),help=\"\")@click.option('', is_flag=True, default=False,help=\"\")@click.pass_context@clean_outputsdef resume(ctx, file, u): ", "body": "config = Noneupdate_code = Noneif file:config = rhea.read(file)if u:ctx.invoke(upload, sync=False)update_code = Trueuser, project_name, _job = get_job_or_local(ctx.obj.get(''),ctx.obj.get(''))try:response = PolyaxonClient().job.resume(user, project_name, _job, config=config, update_code=update_code)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()get_job_details(response)", "docstring": "Resume job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon job --job=1 resume\n ```", "id": "f1046:m7"} {"signature": "@job.command()@click.option('', type=int, help=\"\")@click.pass_context@clean_outputsdef statuses(ctx, page):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))page = page or try:response = PolyaxonClient().job.get_statuses(user, project_name, _job, page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(_job))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(_job))objects = list_dicts_to_tabulate([Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='')for o in response['']])if objects:Printer.print_header(\"\")objects.pop('', None)dict_tabulate(objects, is_list_dict=True)", "docstring": "Get job statuses.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon job -j 2 statuses\n ```", "id": "f1046:m8"} {"signature": "@job.command()@click.option('', '', is_flag=True, help='')@click.pass_context@clean_outputsdef resources(ctx, gpu):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))try:message_handler = Printer.gpu_resources if gpu else Printer.resourcesPolyaxonClient().job.resources(user,project_name,_job,message_handler=message_handler)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()", "docstring": "Get job resources.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon job -j 2 resources\n ```\n\n For GPU resources\n\n \\b\n ```bash\n $ polyaxon job -j 2 resources --gpu\n ```", "id": "f1046:m9"} {"signature": "@job.command()@click.option('', '', is_flag=True, help=\"\")@click.option('', '', is_flag=True, default=False,help=\"\")@click.option('', is_flag=True, default=False,help=\"\")@click.pass_context@clean_outputsdef logs(ctx, past, follow, hide_time):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))if past:try:response = PolyaxonClient().job.logs(user, project_name, _job, stream=False)get_logs_handler(handle_job_info=False,show_timestamp=not hide_time,stream=False)(response.content.decode().split(''))print()if not follow:returnexcept (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:if not follow:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()try:PolyaxonClient().job.logs(user,project_name,_job,message_handler=get_logs_handler(handle_job_info=False, show_timestamp=not hide_time))except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()", "docstring": "Get job logs.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon job -j 2 logs\n ```\n\n \\b\n ```bash\n $ polyaxon job logs\n ```", "id": "f1046:m10"} {"signature": "@job.command()@click.pass_context@clean_outputsdef outputs(ctx):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))try:PolyaxonClient().job.download_outputs(user, project_name, _job)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()Printer.print_success('')", "docstring": "Download outputs for job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon job -j 1 outputs\n ```", "id": "f1046:m11"} {"signature": "@job.command()@click.pass_context@clean_outputsdef bookmark(ctx):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))try:PolyaxonClient().job.bookmark(user, project_name, _job)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Bookmark job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon job bookmark\n ```\n\n \\b\n ```bash\n $ polyaxon job -xp 2 bookmark\n ```", "id": "f1046:m12"} {"signature": "@job.command()@click.pass_context@clean_outputsdef unbookmark(ctx):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get(''), ctx.obj.get(''))try:PolyaxonClient().job.unbookmark(user, project_name, _job)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Unbookmark job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon job unbookmark\n ```\n\n \\b\n ```bash\n $ polyaxon job -xp 2 unbookmark\n ```", "id": "f1046:m13"} {"signature": "@click.command()@click.option('', '', help='')@click.option('', '', help='')@click.option('', '', help='')@clean_outputsdef login(token, username, password):", "body": "auth_client = PolyaxonClient().authif username:if not password:password = click.prompt('', type=str, hide_input=True)password = password.strip()if not password:logger.info('''')sys.exit()credentials = CredentialsConfig(username=username, password=password)try:access_code = auth_client.login(credentials=credentials)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()if not access_code:Printer.print_error(\"\")returnelse:if not token:token_url = \"\".format(auth_client.config.http_host)click.confirm('',abort=True, default=True)click.launch(token_url)logger.info(\"\")token = click.prompt('',type=str, hide_input=True)if not token:logger.info(\"\"\"\")logger.info(\"\")returnaccess_code = token.strip(\"\")try:AuthConfigManager.purge()user = PolyaxonClient().auth.get_user(token=access_code)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()access_token = AccessTokenConfig(username=user.username, token=access_code)AuthConfigManager.set_config(access_token)Printer.print_success(\"\")server_version = get_server_version()current_version = get_current_version()log_handler = get_log_handler()CliConfigManager.reset(check_count=,current_version=current_version,min_version=server_version.min_version,log_handler=log_handler)", "docstring": "Login to Polyaxon.", "id": "f1053:m0"} {"signature": "@click.command()@clean_outputsdef logout():", "body": "AuthConfigManager.purge()CliConfigManager.purge()Printer.print_success(\"\")", "docstring": "Logout of Polyaxon.", "id": "f1053:m1"} {"signature": "@click.command()@clean_outputsdef whoami():", "body": "try:user = PolyaxonClient().auth.get_user()except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()click.echo(\"\".format(**user.to_dict()))", "docstring": "Show current logged Polyaxon user.", "id": "f1053:m2"} {"signature": "@click.command()@click.argument('', type=str)@click.option('', is_flag=True, default=False, show_default=False,help='')@clean_outputsdef init(project, polyaxonfile):", "body": "user, project_name = get_project_or_local(project)try:project_config = PolyaxonClient().project.get_project(user, project_name)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project))Printer.print_error(''''''.format(project_name))Printer.print_error(''.format(e))sys.exit()init_project = Falseif ProjectManager.is_initialized():local_project = ProjectManager.get_config()click.echo('')with clint.textui.indent():clint.textui.puts(''.format(local_project.user))clint.textui.puts(''.format(local_project.name))if click.confirm('', default=False):init_project = Trueelse:init_project = Trueif init_project:ProjectManager.purge()ProjectManager.set_config(project_config, init=True)Printer.print_success('')else:Printer.print_header('')init_ignore = Falseif IgnoreManager.is_initialized():click.echo('')if click.confirm('', default=False):init_ignore = Trueelse:init_ignore = Trueif init_ignore:IgnoreManager.init_config()Printer.print_success('')else:Printer.print_header('')if polyaxonfile:create_polyaxonfile()", "docstring": "Initialize a new polyaxonfile specification.", "id": "f1054:m1"} {"signature": "@tensorboard.command()@click.pass_context@clean_outputsdef url(ctx):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))group = ctx.obj.get('')experiment = ctx.obj.get('')if experiment:try:response = PolyaxonClient().experiment.get_experiment(username=user,project_name=project_name,experiment_id=experiment)obj = ''.format(experiment)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(experiment))Printer.print_error(''.format(e))sys.exit()elif group:try:response = PolyaxonClient().experiment_group.get_experiment_group(username=user,project_name=project_name,group_id=group)obj = ''.format(group)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(group))Printer.print_error(''.format(e))sys.exit()else:try:response = PolyaxonClient().project.get_project(username=user,project_name=project_name)obj = ''.format(project_name)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()if response.has_tensorboard:click.echo(get_tensorboard_url(user=user,project_name=project_name,experiment=experiment,group=group))else:Printer.print_warning(''.format(obj))click.echo('')", "docstring": "Prints the tensorboard url for project/experiment/experiment group.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples for project tensorboards:\n\n \\b\n ```bash\n $ polyaxon tensorboard url\n ```\n\n \\b\n ```bash\n $ polyaxon tensorboard -p mnist url\n ```\n\n Examples for experiment tensorboards:\n\n \\b\n ```bash\n $ polyaxon tensorboard -xp 1 url\n ```\n\n Examples for experiment group tensorboards:\n\n \\b\n ```bash\n $ polyaxon tensorboard -g 1 url\n ```", "id": "f1055:m2"} {"signature": "@tensorboard.command()@click.option('', '', multiple=True, type=click.Path(exists=True),help='')@click.pass_context@clean_outputsdef start(ctx, file): ", "body": "specification = Nonejob_config = Noneif file:specification = check_polyaxonfile(file, log=False).specificationif specification:check_polyaxonfile_kind(specification=specification, kind=specification._TENSORBOARD)job_config = specification.parsed_datauser, project_name = get_project_or_local(ctx.obj.get(''))group = ctx.obj.get('')experiment = ctx.obj.get('')if experiment:try:response = PolyaxonClient().experiment.start_tensorboard(username=user,project_name=project_name,experiment_id=experiment,job_config=job_config)obj = ''.format(experiment)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(experiment))Printer.print_error(''.format(e))sys.exit()elif group:try:response = PolyaxonClient().experiment_group.start_tensorboard(username=user,project_name=project_name,group_id=group,job_config=job_config)obj = ''.format(group)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(group))Printer.print_error(''.format(e))sys.exit()else:try:response = PolyaxonClient().project.start_tensorboard(username=user,project_name=project_name,job_config=job_config)obj = ''.format(project_name)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()if response.status_code == :Printer.print_header(\"\".format(obj))click.echo(get_tensorboard_url(user=user,project_name=project_name,experiment=experiment,group=group))sys.exit()if response.status_code != :Printer.print_error('')sys.exit()Printer.print_success(''.format(obj))clint.textui.puts(\"\")clint.textui.puts(\"\")with clint.textui.indent():clint.textui.puts(get_tensorboard_url(user, project_name, experiment, group))", "docstring": "Start a tensorboard deployment for project/experiment/experiment group.\n\n Project tensorboard will aggregate all experiments under the project.\n\n Experiment group tensorboard will aggregate all experiments under the group.\n\n Experiment tensorboard will show all metrics for an experiment.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example: using the default tensorflow image 1.4.1.\n\n \\b\n ```bash\n $ polyaxon tensorboard start\n ```\n\n Example: with custom image and resources\n\n \\b\n ```bash\n $ polyaxon tensorboard start -f file -f file_override ...\n ```\n\n Example: starting a tensorboard for an experiment group\n\n \\b\n ```bash\n $ polyaxon tensorboard -g 1 start -f file\n ```\n\n Example: starting a tensorboard for an experiment\n\n \\b\n ```bash\n $ polyaxon tensorboard -xp 112 start -f file\n ```", "id": "f1055:m3"} {"signature": "@tensorboard.command()@click.option('', '', is_flag=True, default=False,help='''')@click.pass_context@clean_outputsdef stop(ctx, yes):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))group = ctx.obj.get('')experiment = ctx.obj.get('')if experiment:obj = ''.format(experiment)elif group:obj = ''.format(group)else:obj = ''.format(user, project_name)if not yes and not click.confirm(\"\"\"\".format(obj)):click.echo('')sys.exit()if experiment:try:PolyaxonClient().experiment.stop_tensorboard(username=user,project_name=project_name,experiment_id=experiment)Printer.print_success('')except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(obj))Printer.print_error(''.format(e))sys.exit()elif group:try:PolyaxonClient().experiment_group.stop_tensorboard(username=user,project_name=project_name,group_id=group)Printer.print_success('')except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(obj))Printer.print_error(''.format(e))sys.exit()else:try:PolyaxonClient().project.stop_tensorboard(username=user,project_name=project_name)Printer.print_success('')except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(obj))Printer.print_error(''.format(e))sys.exit()", "docstring": "Stops the tensorboard deployment for project/experiment/experiment group if it exists.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples: stopping project tensorboard\n\n \\b\n ```bash\n $ polyaxon tensorboard stop\n ```\n\n Examples: stopping experiment group tensorboard\n\n \\b\n ```bash\n $ polyaxon tensorboard -g 1 stop\n ```\n\n Examples: stopping experiment tensorboard\n\n \\b\n ```bash\n $ polyaxon tensorboard -xp 112 stop\n ```", "id": "f1055:m4"} {"signature": "@click.command()@click.option('', '', multiple=True, type=click.Path(exists=True),help='')@click.option('', '', is_flag=True, default=False, help='')@click.option('', '', is_flag=True, default=False,help='')@clean_outputsdef check(file, version,definition):", "body": "file = file or ''specification = check_polyaxonfile(file).specificationif version:Printer.decorate_format_value('',specification.version,'')if definition:job_condition = (specification.is_job orspecification.is_build orspecification.is_notebook orspecification.is_tensorboard)if specification.is_experiment:Printer.decorate_format_value('','','')if job_condition:Printer.decorate_format_value('',specification.kind,'')if specification.is_group:experiments_def = specification.experiments_defclick.echo('')get_group_experiments_info(**experiments_def)return specification", "docstring": "Check a polyaxonfile.", "id": "f1056:m3"} {"signature": "@click.command()@click.option('', '', type=str)@click.option('', '', multiple=True, type=click.Path(exists=True),help='')@click.option('', type=str,help='')@click.option('', type=str, help='')@click.option('', type=str,help='')@click.option('', type=int,help=\"\")@click.option('', is_flag=True, default=False,help='')@click.option('', is_flag=True, default=False,help='')@click.pass_context@clean_outputsdef run(ctx, project, file, name, tags, description, ttl, u, l): ", "body": "if not file:file = PolyaxonFile.check_default_path(path='')if not file:file = ''specification = check_polyaxonfile(file, log=False).specificationspec_cond = (specification.is_experiment orspecification.is_group orspecification.is_job orspecification.is_build)if not spec_cond:Printer.print_error(''''.format(specification.kind))if specification.is_notebook:click.echo('')elif specification.is_tensorboard:click.echo('')sys.exit()if u:if project:Printer.print_error('')click.echo('')sys.exit()ctx.invoke(upload, sync=False)user, project_name = get_project_or_local(project)project_client = PolyaxonClient().projecttags = validate_tags(tags)def run_experiment():click.echo('')experiment = ExperimentConfig(name=name,description=description,tags=tags,config=specification.parsed_data,ttl=ttl)try:response = PolyaxonClient().project.create_experiment(user,project_name,experiment)cache.cache(config_manager=ExperimentManager, response=response)Printer.print_success(''.format(response.id))except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()def run_group():click.echo('')experiments_def = specification.experiments_defget_group_experiments_info(**experiments_def)experiment_group = ExperimentGroupConfig(name=name,description=description,tags=tags,content=specification._data) try:response = project_client.create_experiment_group(user,project_name,experiment_group)cache.cache(config_manager=GroupManager, response=response)Printer.print_success(''.format(response.id))except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()def run_job():click.echo('')job = JobConfig(name=name,description=description,tags=tags,config=specification.parsed_data,ttl=ttl)try:response = project_client.create_job(user,project_name,job)cache.cache(config_manager=JobManager, response=response)Printer.print_success(''.format(response.id))except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()def run_build():click.echo('')job = JobConfig(name=name,description=description,tags=tags,config=specification.parsed_data,ttl=ttl)try:response = project_client.create_build(user,project_name,job)cache.cache(config_manager=BuildJobManager, response=response)Printer.print_success(''.format(response.id))except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()logs = Noneif specification.is_experiment:run_experiment()logs = experiment_logselif specification.is_group:run_group()elif specification.is_job:run_job()logs = job_logselif specification.is_build:run_build()logs = build_logsif l and logs:ctx.obj = {'': ''.format(user, project_name)}ctx.invoke(logs)", "docstring": "Run polyaxonfile specification.\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon run -f file -f file_override ...\n ```\n\n Upload before running\n\n \\b\n ```bash\n $ polyaxon run -f file -u\n ```\n\n Run and set description and tags for this run\n\n \\b\n ```bash\n $ polyaxon run -f file -u --description=\"Description of the current run\" --tags=\"foo, bar, moo\"\n ```\n Run and set a unique name for this run\n\n \\b\n ```bash\n polyaxon run --name=foo\n ```\n\n Run for a specific project\n\n \\b\n ```bash\n $ polyaxon run -p project1 -f file.yaml\n ```", "id": "f1057:m0"} {"signature": "@click.group()@clean_outputsdef user():", "body": "", "docstring": "Commands for user management.", "id": "f1058:m0"} {"signature": "@user.command()@click.argument('', type=str)@clean_outputsdef activate(username):", "body": "try:PolyaxonClient().user.activate_user(username)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(username))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\".format(username))", "docstring": "Activate a user.\n\n Example:\n\n \\b\n ```bash\n $ polyaxon user activate david\n ```", "id": "f1058:m1"} {"signature": "@user.command()@click.argument('', type=str)@clean_outputsdef delete(username):", "body": "try:PolyaxonClient().user.delete_user(username)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(username))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\".format(username))", "docstring": "Delete a user.\n\n Example:\n\n \\b\n ```bash\n $ polyaxon user delete david\n ```", "id": "f1058:m2"} {"signature": "@click.group()@click.option('', '', type=str, help=\"\")@click.option('', '', type=int, help=\"\")@click.pass_context@clean_outputsdef experiment(ctx, project, experiment): ", "body": "ctx.obj = ctx.obj or {}ctx.obj[''] = projectctx.obj[''] = experiment", "docstring": "Commands for experiments.", "id": "f1059:m1"} {"signature": "@experiment.command()@click.option('', '', type=int, help=\"\")@click.pass_context@clean_outputsdef get(ctx, job):", "body": "def get_experiment():try:response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment)cache.cache(config_manager=ExperimentManager, response=response)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()get_experiment_details(response)def get_experiment_job():try:response = PolyaxonClient().experiment_job.get_job(user,project_name,_experiment,_job)cache.cache(config_manager=ExperimentJobManager, response=response)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()if response.resources:get_resources(response.resources.to_dict(), header=\"\")response = Printer.add_status_color(response.to_light_dict(humanize_values=True,exclude_attrs=['', '', '', '', '']))Printer.print_header(\"\")dict_tabulate(response)user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))if job:_job = get_experiment_job_or_local(job)get_experiment_job()else:get_experiment()", "docstring": "Get experiment or experiment job.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples for getting an experiment:\n\n \\b\n ```bash\n $ polyaxon experiment get # if experiment is cached\n ```\n\n \\b\n ```bash\n $ polyaxon experiment --experiment=1 get\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 1 --project=cats-vs-dogs get\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get\n ```\n\n Examples for getting an experiment job:\n\n \\b\n ```bash\n $ polyaxon experiment get -j 1 # if experiment is cached\n ```\n\n \\b\n ```bash\n $ polyaxon experiment --experiment=1 get --job=10\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j 2\n ```", "id": "f1059:m2"} {"signature": "@experiment.command()@click.pass_context@clean_outputsdef delete(ctx):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))if not click.confirm(\"\".format(_experiment)):click.echo('')sys.exit()try:response = PolyaxonClient().experiment.delete_experiment(user, project_name, _experiment)ExperimentManager.purge()except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()if response.status_code == :Printer.print_success(\"\".format(_experiment))", "docstring": "Delete experiment.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon experiment delete\n ```", "id": "f1059:m3"} {"signature": "@experiment.command()@click.option('', type=str,help='')@click.option('', type=str, help='')@click.option('', type=str, help='')@click.pass_context@clean_outputsdef update(ctx, name, description, tags):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))update_dict = {}if name:update_dict[''] = nameif description:update_dict[''] = descriptiontags = validate_tags(tags)if tags:update_dict[''] = tagsif not update_dict:Printer.print_warning('')sys.exit()try:response = PolyaxonClient().experiment.update_experiment(user, project_name, _experiment, update_dict)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")get_experiment_details(response)", "docstring": "Update experiment.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon experiment -xp 2 update --description=\"new description for my experiments\"\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 2 update --tags=\"foo, bar\" --name=\"unique-name\"\n ```", "id": "f1059:m4"} {"signature": "@experiment.command()@click.option('', '', is_flag=True, default=False,help=\"\"\"\")@click.pass_context@clean_outputsdef stop(ctx, yes):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))if not yes and not click.confirm(\"\"\"\".format(_experiment)):click.echo('')sys.exit()try:PolyaxonClient().experiment.stop(user, project_name, _experiment)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Stop experiment.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon experiment stop\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 2 stop\n ```", "id": "f1059:m5"} {"signature": "@experiment.command()@click.option('', '', is_flag=True, default=False,help=\"\")@click.option('', '', multiple=True, type=click.Path(exists=True),help=\"\")@click.option('', is_flag=True, default=False,help=\"\")@click.pass_context@clean_outputsdef restart(ctx, copy, file, u): ", "body": "config = Noneupdate_code = Noneif file:config = rhea.read(file)if u:ctx.invoke(upload, sync=False)update_code = Trueuser, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))try:if copy:response = PolyaxonClient().experiment.copy(user, project_name, _experiment, config=config, update_code=update_code)Printer.print_success(''.format(response.id))else:response = PolyaxonClient().experiment.restart(user, project_name, _experiment, config=config, update_code=update_code)Printer.print_success(''.format(response.id))except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()", "docstring": "Restart experiment.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon experiment --experiment=1 restart\n ```", "id": "f1059:m6"} {"signature": "@experiment.command()@click.option('', '', multiple=True, type=click.Path(exists=True),help=\"\")@click.option('', is_flag=True, default=False,help=\"\")@click.pass_context@clean_outputsdef resume(ctx, file, u): ", "body": "config = Noneupdate_code = Noneif file:config = rhea.read(file)if u:ctx.invoke(upload, sync=False)update_code = Trueuser, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))try:response = PolyaxonClient().experiment.resume(user, project_name, _experiment, config=config, update_code=update_code)Printer.print_success(''.format(response.id))except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()", "docstring": "Resume experiment.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon experiment --experiment=1 resume\n ```", "id": "f1059:m7"} {"signature": "@experiment.command()@click.option('', type=int, help=\"\")@click.pass_context@clean_outputsdef jobs(ctx, page):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))page = page or try:response = PolyaxonClient().experiment.list_jobs(user, project_name, _experiment, page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(_experiment))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(_experiment))objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")objects.pop('', None)dict_tabulate(objects, is_list_dict=True)", "docstring": "List jobs for experiment.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon experiment --experiment=1 jobs\n ```", "id": "f1059:m8"} {"signature": "@experiment.command()@click.option('', '', type=int, help=\"\")@click.option('', type=int, help=\"\")@click.pass_context@clean_outputsdef statuses(ctx, job, page):", "body": "def get_experiment_statuses():try:response = PolyaxonClient().experiment.get_statuses(user, project_name, _experiment, page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(_experiment))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(_experiment))objects = list_dicts_to_tabulate([Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='')for o in response['']])if objects:Printer.print_header(\"\")objects.pop('', None)dict_tabulate(objects, is_list_dict=True)def get_experiment_job_statuses():try:response = PolyaxonClient().experiment_job.get_statuses(user,project_name,_experiment,_job,page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(job))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(_job))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(_job))objects = list_dicts_to_tabulate([Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='')for o in response['']])if objects:Printer.print_header(\"\")objects.pop('', None)dict_tabulate(objects, is_list_dict=True)page = page or user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))if job:_job = get_experiment_job_or_local(job)get_experiment_job_statuses()else:get_experiment_statuses()", "docstring": "Get experiment or experiment job statuses.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples getting experiment statuses:\n\n \\b\n ```bash\n $ polyaxon experiment statuses\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 1 statuses\n ```\n\n Examples getting experiment job statuses:\n\n \\b\n ```bash\n $ polyaxon experiment statuses -j 3\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 1 statuses --job 1\n ```", "id": "f1059:m9"} {"signature": "@experiment.command()@click.option('', '', type=int, help=\"\")@click.option('', '', is_flag=True, help=\"\")@click.pass_context@clean_outputsdef resources(ctx, job, gpu):", "body": "def get_experiment_resources():try:message_handler = Printer.gpu_resources if gpu else Printer.resourcesPolyaxonClient().experiment.resources(user, project_name, _experiment, message_handler=message_handler)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()def get_experiment_job_resources():try:message_handler = Printer.gpu_resources if gpu else Printer.resourcesPolyaxonClient().experiment_job.resources(user,project_name,_experiment,_job,message_handler=message_handler)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))if job:_job = get_experiment_job_or_local(job)get_experiment_job_resources()else:get_experiment_resources()", "docstring": "Get experiment or experiment job resources.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples for getting experiment resources:\n\n \\b\n ```bash\n $ polyaxon experiment -xp 19 resources\n ```\n\n For GPU resources\n\n \\b\n ```bash\n $ polyaxon experiment -xp 19 resources --gpu\n ```\n\n Examples for getting experiment job resources:\n\n \\b\n ```bash\n $ polyaxon experiment -xp 19 resources -j 1\n ```\n\n For GPU resources\n\n \\b\n ```bash\n $ polyaxon experiment -xp 19 resources -j 1 --gpu\n ```", "id": "f1059:m10"} {"signature": "@experiment.command()@click.option('', '', type=int, help=\"\")@click.option('', '', is_flag=True, help=\"\")@click.option('', '', is_flag=True, default=False,help=\"\")@click.option('', is_flag=True, default=False,help=\"\")@click.pass_context@clean_outputsdef logs(ctx, job, past, follow, hide_time):", "body": "def get_experiment_logs():if past:try:response = PolyaxonClient().experiment.logs(user, project_name, _experiment, stream=False)get_logs_handler(handle_job_info=True,show_timestamp=not hide_time,stream=False)(response.content.decode().split(''))print()if not follow:returnexcept (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:if not follow:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()try:PolyaxonClient().experiment.logs(user,project_name,_experiment,message_handler=get_logs_handler(handle_job_info=True,show_timestamp=not hide_time))except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()def get_experiment_job_logs():if past:try:response = PolyaxonClient().experiment_job.logs(user,project_name,_experiment,_job,stream=False)get_logs_handler(handle_job_info=True,show_timestamp=not hide_time,stream=False)(response.content.decode().split(''))print()if not follow:returnexcept (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:if not follow:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()try:PolyaxonClient().experiment_job.logs(user,project_name,_experiment,_job,message_handler=get_logs_handler(handle_job_info=True,show_timestamp=not hide_time))except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_job))Printer.print_error(''.format(e))sys.exit()user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))if job:_job = get_experiment_job_or_local(job)get_experiment_job_logs()else:get_experiment_logs()", "docstring": "Get experiment or experiment job logs.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples for getting experiment logs:\n\n \\b\n ```bash\n $ polyaxon experiment logs\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 10 -p mnist logs\n ```\n\n Examples for getting experiment job logs:\n\n \\b\n ```bash\n $ polyaxon experiment -xp 1 -j 1 logs\n ```", "id": "f1059:m11"} {"signature": "@experiment.command()@click.pass_context@clean_outputsdef outputs(ctx):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))try:PolyaxonClient().experiment.download_outputs(user, project_name, _experiment)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()Printer.print_success('')", "docstring": "Download outputs for experiment.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon experiment -xp 1 outputs\n ```", "id": "f1059:m12"} {"signature": "@experiment.command()@click.pass_context@clean_outputsdef bookmark(ctx):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))try:PolyaxonClient().experiment.bookmark(user, project_name, _experiment)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Bookmark experiment.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon experiment bookmark\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 2 bookmark\n ```", "id": "f1059:m13"} {"signature": "@experiment.command()@click.pass_context@clean_outputsdef unbookmark(ctx):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get(''),ctx.obj.get(''))try:PolyaxonClient().experiment.unbookmark(user, project_name, _experiment)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_experiment))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Unbookmark experiment.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon experiment unbookmark\n ```\n\n \\b\n ```bash\n $ polyaxon experiment -xp 2 unbookmark\n ```", "id": "f1059:m14"} {"signature": "@notebook.command()@click.pass_context@clean_outputsdef url(ctx):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))try:response = PolyaxonClient().project.get_project(user, project_name)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()if response.has_notebook:click.echo(get_notebook_url(user, project_name))else:Printer.print_warning(''.format(project_name))click.echo('')", "docstring": "Prints the notebook url for this project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon notebook url\n ```", "id": "f1060:m2"} {"signature": "@notebook.command()@click.option('', '', multiple=True, type=click.Path(exists=True),help='')@click.option('', is_flag=True, default=False,help='')@click.pass_context@clean_outputsdef start(ctx, file, u): ", "body": "specification = Nonejob_config = Noneif file:specification = check_polyaxonfile(file, log=False).specificationif u:ctx.invoke(upload, sync=False)if specification:check_polyaxonfile_kind(specification=specification, kind=specification._NOTEBOOK)job_config = specification.parsed_datauser, project_name = get_project_or_local(ctx.obj.get(''))try:response = PolyaxonClient().project.start_notebook(user, project_name, job_config)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()if response.status_code == :Printer.print_header(\"\")click.echo(get_notebook_url(user, project_name))sys.exit()if response.status_code != :Printer.print_error('')sys.exit()Printer.print_success(''.format(project_name))clint.textui.puts(\"\")clint.textui.puts(\"\")with clint.textui.indent():clint.textui.puts(get_notebook_url(user, project_name))", "docstring": "Start a notebook deployment for this project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon notebook start -f file -f file_override ...\n ```\n\n Example: upload before running\n\n \\b\n ```bash\n $ polyaxon -p user12/mnist notebook start -f file -u\n ```", "id": "f1060:m3"} {"signature": "@notebook.command()@click.option('', type=bool,help='')@click.option('', '', is_flag=True, default=False,help='''')@click.pass_context@clean_outputsdef stop(ctx, commit, yes):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))if not yes and not click.confirm(\"\"\"\".format(user, project_name)):click.echo('')sys.exit()if commit is None:commit = Truetry:PolyaxonClient().project.stop_notebook(user, project_name, commit)Printer.print_success('')except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()", "docstring": "Stops the notebook deployment for this project if it exists.\n\n Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1060:m4"} {"signature": "@click.group()@click.option('', '', type=str)@click.pass_context@clean_outputsdef project(ctx, project): ", "body": "if ctx.invoked_subcommand not in ['', '']:ctx.obj = ctx.obj or {}ctx.obj[''] = project", "docstring": "Commands for projects.", "id": "f1061:m1"} {"signature": "@project.command()@click.option('', required=True, type=str,help='')@click.option('', type=str, help='')@click.option('', type=str, help='')@click.option('', is_flag=True, help='')@click.option('', is_flag=True, help='')@click.pass_context@clean_outputsdef create(ctx, name, description, tags, private, init):", "body": "try:tags = tags.split('') if tags else Noneproject_dict = dict(name=name, description=description, is_public=not private, tags=tags)project_config = ProjectConfig.from_dict(project_dict)except ValidationError:Printer.print_error('')sys.exit()try:_project = PolyaxonClient().project.create_project(project_config)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(name))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\".format(_project.name))if init:ctx.obj = {}ctx.invoke(init_project, project=name)", "docstring": "Create a new project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon project create --name=cats-vs-dogs --description=\"Image Classification with DL\"\n ```", "id": "f1061:m2"} {"signature": "@project.command()@click.option('', type=int, help='')@clean_outputsdef list(page): ", "body": "user = AuthConfigManager.get_value('')if not user:Printer.print_error('')page = page or try:response = PolyaxonClient().project.list_projects(user, page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header('')Printer.print_header('')dict_tabulate(meta)else:Printer.print_header('')objects = list_dicts_to_tabulate([o.to_light_dict(humanize_values=True,exclude_attrs=['', '', '', '','', '','', '', '', ''])for o in response['']])if objects:Printer.print_header(\"\")dict_tabulate(objects, is_list_dict=True)", "docstring": "List projects.\n\n Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1061:m3"} {"signature": "@project.command()@click.pass_context@clean_outputsdef get(ctx):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))try:response = PolyaxonClient().project.get_project(user, project_name)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()get_project_details(response)", "docstring": "Get info for current project, by project_name, or user/project_name.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n To get current project:\n\n \\b\n ```bash\n $ polyaxon project get\n ```\n\n To get a project by name\n\n \\b\n ```bash\n $ polyaxon project get user/project\n ```", "id": "f1061:m4"} {"signature": "@project.command()@click.pass_context@clean_outputsdef delete(ctx):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))if not click.confirm(\"\".format(user, project_name)):click.echo('')sys.exit()try:response = PolyaxonClient().project.delete_project(user, project_name)local_project = ProjectManager.get_config()if local_project and (user, project_name) == (local_project.user, local_project.name):ProjectManager.purge()except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(user, project_name))Printer.print_error(''.format(e))sys.exit()if response.status_code == :Printer.print_success(\"\".format(user, project_name))", "docstring": "Delete project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1061:m5"} {"signature": "@project.command()@click.option('', type=str,help='')@click.option('', type=str, help='')@click.option('', type=str, help='')@click.option('', type=bool, help='')@click.pass_context@clean_outputsdef update(ctx, name, description, tags, private):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))update_dict = {}if name:update_dict[''] = nameif description:update_dict[''] = descriptionif private is not None:update_dict[''] = not privatetags = validate_tags(tags)if tags:update_dict[''] = tagsif not update_dict:Printer.print_warning('')sys.exit()try:response = PolyaxonClient().project.update_project(user, project_name, update_dict)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")get_project_details(response)", "docstring": "Update project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon update foobar --description=\"Image Classification with DL using TensorFlow\"\n ```\n\n \\b\n ```bash\n $ polyaxon update mike1/foobar --description=\"Image Classification with DL using TensorFlow\"\n ```\n\n \\b\n ```bash\n $ polyaxon update --tags=\"foo, bar\"\n ```", "id": "f1061:m6"} {"signature": "@project.command()@click.option('', '', type=str,help='')@click.option('', '', type=str, help='')@click.option('', type=int, help='')@click.pass_context@clean_outputsdef groups(ctx, query, sort, page):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))page = page or try:response = PolyaxonClient().project.list_experiment_groups(username=user,project_name=project_name,query=query,sort=sort,page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(user, project_name))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(user, project_name))objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")objects.pop('', None)objects.pop('', None)dict_tabulate(objects, is_list_dict=True)", "docstring": "List experiment groups for this project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n Get all groups:\n\n \\b\n ```bash\n $ polyaxon project groups\n ```\n\n Get all groups with with status {created or running}, and\n creation date between 2018-01-01 and 2018-01-02,\n and search algorithm not in {grid or random search}\n\n \\b\n ```bash\n $ polyaxon project groups \\\n -q \"status:created|running, started_at:2018-01-01..2018-01-02, search_algorithm:~grid|random\"\n ```\n\n Get all groups sorted by update date\n\n \\b\n ```bash\n $ polyaxon project groups -s \"-updated_at\"\n ```", "id": "f1061:m7"} {"signature": "@project.command()@click.option('', '', type=str,help='')@click.option('', '', type=str, help='')@click.option('', type=int, help='')@click.pass_context@clean_outputs@clean_outputsdef jobs(ctx, query, sort, page):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))page = page or try:response = PolyaxonClient().project.list_jobs(username=user,project_name=project_name,query=query,sort=sort,page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(user, project_name))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(user, project_name))objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")objects.pop('', None)dict_tabulate(objects, is_list_dict=True)", "docstring": "List jobs for this project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n Get all jobs:\n\n \\b\n ```bash\n $ polyaxon project jobs\n ```\n\n Get all jobs with with status not in {created or running}\n\n \\b\n ```bash\n $ polyaxon project jobs -q \"status:~created|running\"\n ```\n\n Get all jobs with with status failed\n\n \\b\n ```bash\n $ polyaxon project jobs -q \"status:failed\"\n ```\n\n Get all jobs sorted by update date\n\n \\b\n ```bash\n $ polyaxon project jobs -s \"-updated_at\"\n ```", "id": "f1061:m8"} {"signature": "@project.command()@click.option('', '', is_flag=True, help='')@click.option('', '', is_flag=True,help='')@click.option('', '', is_flag=True, help='')@click.option('', '', type=int, help='')@click.option('', '', type=str,help='')@click.option('', '', type=str, help='')@click.option('', type=int, help='')@click.pass_context@clean_outputs@clean_outputsdef experiments(ctx, metrics, declarations, independent, group, query, sort, page):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))page = page or try:response = PolyaxonClient().project.list_experiments(username=user,project_name=project_name,independent=independent,group=group,metrics=metrics,declarations=declarations,query=query,sort=sort,page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(user, project_name))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(user, project_name))if metrics:objects = get_experiments_with_metrics(response)elif declarations:objects = get_experiments_with_declarations(response)else:objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")objects.pop('', None)dict_tabulate(objects, is_list_dict=True)", "docstring": "List experiments for this project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n Get all experiments:\n\n \\b\n ```bash\n $ polyaxon project experiments\n ```\n\n Get all experiments with with status {created or running}, and\n creation date between 2018-01-01 and 2018-01-02, and declarations activation equal to sigmoid\n and metric loss less or equal to 0.2\n\n \\b\n ```bash\n $ polyaxon project experiments \\\n -q \"status:created|running, started_at:2018-01-01..2018-01-02, \\\n declarations.activation:sigmoid, metric.loss:<=0.2\"\n ```\n\n Get all experiments sorted by update date\n\n \\b\n ```bash\n $ polyaxon project experiments -s \"-updated_at\"\n ```", "id": "f1061:m9"} {"signature": "@project.command()@click.option('', type=int, help='')@click.option('', '', type=str,help='')@click.option('', '', type=str, help='')@click.pass_context@clean_outputsdef builds(ctx, query, sort, page):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))page = page or try:response = PolyaxonClient().project.list_builds(username=user,project_name=project_name,query=query,sort=sort,page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(user, project_name))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(user, project_name))objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")objects.pop('', None)dict_tabulate(objects, is_list_dict=True)", "docstring": "List build jobs for this project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n Get all builds:\n\n \\b\n ```bash\n $ polyaxon project builds\n ```\n\n Get all builds with with status not in {created or running}\n\n \\b\n ```bash\n $ polyaxon project builds -q \"status:~created\"\n ```\n\n Get all builds with with status failed\n\n \\b\n ```bash\n $ polyaxon project builds -q \"status:failed\"\n ```\n\n Get all builds sorted by update date\n\n \\b\n ```bash\n $ polyaxon project builds -s \"-updated_at\"\n ```", "id": "f1061:m10"} {"signature": "@project.command()@click.option('', type=int, help='')@click.option('', '', type=str,help='')@click.option('', '', type=str, help='')@click.pass_context@clean_outputsdef tensorboards(ctx, query, sort, page):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))page = page or try:response = PolyaxonClient().project.list_tensorboards(username=user,project_name=project_name,query=query,sort=sort,page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(user, project_name))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(user,project_name))objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")objects.pop('', None)dict_tabulate(objects, is_list_dict=True)", "docstring": "List tensorboard jobs for this project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1061:m11"} {"signature": "@project.command()@click.option('', is_flag=True, help='')@click.option('', is_flag=True, help='')@click.pass_context@clean_outputsdef ci(ctx, enable, disable): ", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))def enable_ci():try:PolyaxonClient().project.enable_ci(user, project_name)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()Printer.print_success(''.format(project_name))def disable_ci():try:PolyaxonClient().project.disable_ci(user, project_name)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()Printer.print_success(''.format(project_name))if enable:enable_ci()if disable:disable_ci()", "docstring": "Enable/Disable CI on this project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon project ci --enable\n ```\n\n \\b\n ```bash\n $ polyaxon project ci --disable\n ```", "id": "f1061:m13"} {"signature": "@project.command()@click.pass_context@clean_outputsdef download(ctx):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))try:PolyaxonClient().project.download_repo(user, project_name)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(project_name))Printer.print_error(''.format(e))sys.exit()Printer.print_success('')", "docstring": "Download code of the current project.", "id": "f1061:m14"} {"signature": "@project.command()@click.pass_context@clean_outputsdef bookmark(ctx):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))try:PolyaxonClient().project.bookmark(user, project_name)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(user, project_name))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\".format(user, project_name))", "docstring": "Bookmark project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1061:m15"} {"signature": "@project.command()@click.pass_context@clean_outputsdef unbookmark(ctx):", "body": "user, project_name = get_project_or_local(ctx.obj.get(''))try:PolyaxonClient().project.unbookmark(user, project_name)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(user, project_name))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\".format(user, project_name))", "docstring": "Unbookmark project.\n\n Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1061:m16"} {"signature": "@click.group()@click.option('', '', type=str)@click.pass_context@clean_outputsdef bookmark(ctx, username): ", "body": "ctx.obj = ctx.obj or {}ctx.obj[''] = username", "docstring": "Commands for bookmarks.", "id": "f1062:m0"} {"signature": "@bookmark.command()@click.option('', type=int, help='')@click.pass_context@clean_outputsdef projects(ctx, page):", "body": "user = get_username_or_local(ctx.obj.get(''))page = page or try:response = PolyaxonClient().bookmark.projects(username=user, page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(user))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(user))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(user))objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")dict_tabulate(objects, is_list_dict=True)", "docstring": "List bookmarked projects for user.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon bookmark projects\n ```\n\n \\b\n ```bash\n $ polyaxon bookmark -u adam projects\n ```", "id": "f1062:m1"} {"signature": "@bookmark.command()@click.option('', type=int, help='')@click.pass_context@clean_outputsdef groups(ctx, page):", "body": "user = get_username_or_local(ctx.obj.get(''))page = page or try:response = PolyaxonClient().bookmark.groups(username=user, page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(user))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(user))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(user))objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")dict_tabulate(objects, is_list_dict=True)", "docstring": "List bookmarked experiment groups for user.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon bookmark groups\n ```\n\n \\b\n ```bash\n $ polyaxon bookmark -u adam groups\n ```", "id": "f1062:m2"} {"signature": "@bookmark.command()@click.option('', type=int, help='')@click.pass_context@clean_outputsdef experiments(ctx, page):", "body": "user = get_username_or_local(ctx.obj.get(''))page = page or try:response = PolyaxonClient().bookmark.experiments(username=user, page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(user))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(user))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(user))objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")dict_tabulate(objects, is_list_dict=True)", "docstring": "List bookmarked experiments for user.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon bookmark experiments\n ```\n\n \\b\n ```bash\n $ polyaxon bookmark -u adam experiments\n ```", "id": "f1062:m3"} {"signature": "@bookmark.command()@click.option('', type=int, help='')@click.pass_context@clean_outputsdef jobs(ctx, page):", "body": "user = get_username_or_local(ctx.obj.get(''))page = page or try:response = PolyaxonClient().bookmark.jobs(username=user, page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(user))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(user))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(user))objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")dict_tabulate(objects, is_list_dict=True)", "docstring": "List bookmarked jobs for user.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon bookmark jobs\n ```\n\n \\b\n ```bash\n $ polyaxon bookmark -u adam jobs\n ```", "id": "f1062:m4"} {"signature": "@bookmark.command()@click.option('', type=int, help='')@click.pass_context@clean_outputsdef builds(ctx, page):", "body": "user = get_username_or_local(ctx.obj.get(''))page = page or try:response = PolyaxonClient().bookmark.builds(username=user, page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(user))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(user))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(user))objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")dict_tabulate(objects, is_list_dict=True)", "docstring": "List bookmarked builds for user.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon bookmark builds\n ```\n\n \\b\n ```bash\n $ polyaxon bookmark -u adam builds\n ```", "id": "f1062:m5"} {"signature": "@click.command()@click.option('', '', is_flag=True, default=False,help='''')@click.option('', is_flag=True, default=False,help='')@clean_outputsdef dashboard(yes, url):", "body": "dashboard_url = \"\".format(PolyaxonClient().api_config.http_host)if url:click.echo(dashboard_url)sys.exit()if not yes:click.confirm('',abort=True, default=True)click.launch(dashboard_url)", "docstring": "Open dashboard in browser.", "id": "f1063:m0"} {"signature": "@click.group()@click.option('', '', type=str, help=\"\")@click.option('', '', type=int, help=\"\")@click.pass_context@clean_outputsdef group(ctx, project, group): ", "body": "ctx.obj = ctx.obj or {}ctx.obj[''] = projectctx.obj[''] = group", "docstring": "Commands for experiment groups.", "id": "f1064:m1"} {"signature": "@group.command()@click.pass_context@clean_outputsdef get(ctx):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get(''),ctx.obj.get(''))try:response = PolyaxonClient().experiment_group.get_experiment_group(user, project_name, _group)cache.cache(config_manager=GroupManager, response=response)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_group))Printer.print_error(''.format(e))sys.exit()get_group_details(response)", "docstring": "Get experiment group by uuid.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon group -g 13 get\n ```", "id": "f1064:m2"} {"signature": "@group.command()@click.pass_context@clean_outputsdef delete(ctx):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get(''),ctx.obj.get(''))if not click.confirm(\"\".format(_group)):click.echo('')sys.exit()try:response = PolyaxonClient().experiment_group.delete_experiment_group(user, project_name, _group)GroupManager.purge()except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_group))Printer.print_error(''.format(e))sys.exit()if response.status_code == :Printer.print_success(\"\".format(_group))", "docstring": "Delete experiment group.\n\n Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1064:m3"} {"signature": "@group.command()@click.option('', type=str,help='')@click.option('', type=str, help='')@click.option('', type=str, help='')@click.pass_context@clean_outputsdef update(ctx, name, description, tags):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get(''),ctx.obj.get(''))update_dict = {}if name:update_dict[''] = nameif description:update_dict[''] = descriptiontags = validate_tags(tags)if tags:update_dict[''] = tagsif not update_dict:Printer.print_warning('')sys.exit()try:response = PolyaxonClient().experiment_group.update_experiment_group(user, project_name, _group, update_dict)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_group))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")get_group_details(response)", "docstring": "Update experiment group.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Example:\n\n \\b\n ```bash\n $ polyaxon group -g 2 update --description=\"new description for this group\"\n ```\n\n \\b\n ```bash\n $ polyaxon update --tags=\"foo, bar\"\n ```", "id": "f1064:m4"} {"signature": "@group.command()@click.option('', '', is_flag=True, help='')@click.option('', '', is_flag=True,help='')@click.option('', '', type=str,help='')@click.option('', '', type=str, help='')@click.option('', type=int, help='')@click.pass_context@clean_outputsdef experiments(ctx, metrics, declarations, query, sort, page):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get(''),ctx.obj.get(''))page = page or try:response = PolyaxonClient().experiment_group.list_experiments(username=user,project_name=project_name,group_id=_group,metrics=metrics,declarations=declarations,query=query,sort=sort,page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_group))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(_group))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(_group))if metrics:objects = get_experiments_with_metrics(response)elif declarations:objects = get_experiments_with_declarations(response)else:objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))for o in response['']]objects = list_dicts_to_tabulate(objects)if objects:Printer.print_header(\"\")objects.pop('', None)objects.pop('', None)objects.pop('', None)dict_tabulate(objects, is_list_dict=True)", "docstring": "List experiments for this experiment group\n\n Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1064:m5"} {"signature": "@group.command()@click.option('', '', is_flag=True, default=False,help='''')@click.option('', is_flag=True, default=False,help='')@click.pass_context@clean_outputsdef stop(ctx, yes, pending):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get(''),ctx.obj.get(''))if not yes and not click.confirm(\"\"\"\".format(_group)):click.echo('')sys.exit()try:PolyaxonClient().experiment_group.stop(user, project_name, _group, pending=pending)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_group))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Stop experiments in the group.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples: stop only pending experiments\n\n \\b\n ```bash\n $ polyaxon group stop --pending\n ```\n\n Examples: stop all unfinished\n\n \\b\n ```bash\n $ polyaxon group stop\n ```\n\n \\b\n ```bash\n $ polyaxon group -g 2 stop\n ```", "id": "f1064:m6"} {"signature": "@group.command()@click.option('', type=int, help=\"\")@click.pass_context@clean_outputsdef statuses(ctx, page):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get(''),ctx.obj.get(''))page = page or try:response = PolyaxonClient().experiment_group.get_statuses(user,project_name,_group,page=page)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_group))Printer.print_error(''.format(e))sys.exit()meta = get_meta_response(response)if meta:Printer.print_header(''.format(_group))Printer.print_header('')dict_tabulate(meta)else:Printer.print_header(''.format(_group))objects = list_dicts_to_tabulate([Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='')for o in response['']])if objects:Printer.print_header(\"\")objects.pop('', None)dict_tabulate(objects, is_list_dict=True)", "docstring": "Get experiment group statuses.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon group -g 2 statuses\n ```", "id": "f1064:m7"} {"signature": "@group.command()@click.pass_context@clean_outputsdef bookmark(ctx):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get(''),ctx.obj.get(''))try:PolyaxonClient().experiment_group.bookmark(user, project_name, _group)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_group))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Bookmark group.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon group bookmark\n ```\n\n \\b\n ```bash\n $ polyaxon group -g 2 bookmark\n ```", "id": "f1064:m8"} {"signature": "@group.command()@click.pass_context@clean_outputsdef unbookmark(ctx):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get(''),ctx.obj.get(''))try:PolyaxonClient().experiment_group.unbookmark(user, project_name, _group)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(_group))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\")", "docstring": "Unbookmark group.\n\n Uses [Caching](/references/polyaxon-cli/#caching)\n\n Examples:\n\n \\b\n ```bash\n $ polyaxon group unbookmark\n ```\n\n \\b\n ```bash\n $ polyaxon group -g 2 unbookmark\n ```", "id": "f1064:m9"} {"signature": "@click.group()@clean_outputsdef superuser():", "body": "", "docstring": "Commands for superuser role management.", "id": "f1065:m0"} {"signature": "@superuser.command()@click.argument('', type=str)@clean_outputsdef grant(username):", "body": "try:PolyaxonClient().user.grant_superuser(username)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(username))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\".format(username))", "docstring": "Grant superuser role to a user.\n\n Example:\n\n \\b\n ```bash\n $ polyaxon superuser grant david\n ```", "id": "f1065:m1"} {"signature": "@superuser.command()@click.argument('', type=str)@clean_outputsdef revoke(username):", "body": "try:PolyaxonClient().user.revoke_superuser(username)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(username))Printer.print_error(''.format(e))sys.exit()Printer.print_success(\"\".format(username))", "docstring": "Revoke superuser role to a user.\n\n Example:\n\n \\b\n ```bash\n $ polyaxon superuser revoke david\n ```", "id": "f1065:m2"} {"signature": "@click.command()@click.option('', '', type=int, help='')@clean_outputsdef cluster(node):", "body": "cluster_client = PolyaxonClient().clusterif node:try:node_config = cluster_client.get_node(node)except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error(''.format(node))Printer.print_error(''.format(e))sys.exit()get_node_info(node_config)else:try:cluster_config = cluster_client.get_cluster()except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:Printer.print_error('')Printer.print_error(''.format(e))sys.exit()get_cluster_info(cluster_config)", "docstring": "Get cluster and nodes info.", "id": "f1066:m2"} {"signature": "def clean_outputs(fn):", "body": "@wraps(fn)def clean_outputs_wrapper(*args, **kwargs):try:return fn(*args, **kwargs)except SystemExit as e:sys.stdout = StringIO()sys.exit(e.code) except Exception as e:sys.stdout = StringIO()raise ereturn clean_outputs_wrapper", "docstring": "Decorator for CLI with Sentry client handling.\n\n see https://github.com/getsentry/raven-python/issues/904 for more details.", "id": "f1068:m1"} {"signature": "@exportdef fuzzyfinder(input, collection, accessor=lambda x: x, sort_results=True):", "body": "suggestions = []input = str(input) if not isinstance(input, str) else inputpat = ''.join(map(re.escape, input))pat = ''.format(pat) regex = re.compile(pat, re.IGNORECASE)for item in collection:r = list(regex.finditer(accessor(item)))if r:best = min(r, key=lambda x: len(x.group())) suggestions.append((len(best.group()), best.start(), accessor(item), item))if sort_results:return (z[-] for z in sorted(suggestions))else:return (z[-] for z in sorted(suggestions, key=lambda x: x[:]))", "docstring": "Args:\n input (str): A partial string which is typically entered by a user.\n collection (iterable): A collection of strings which will be filtered\n based on the `input`.\n accessor (function): If the `collection` is not an iterable of strings,\n then use the accessor to fetch the string that\n will be used for fuzzy matching.\n sort_results(bool): The suggestions are sorted by considering the\n smallest contiguous match, followed by where the\n match is found in the full string. If two suggestions\n have the same rank, they are then sorted\n alpha-numerically. This parameter controls the\n *last tie-breaker-alpha-numeric sorting*. The sorting\n based on match length and position will be intact.\n\nReturns:\n suggestions (generator): A generator object that produces a list of\n suggestions narrowed down from `collection` using the `input`.", "id": "f1071:m0"} {"signature": "def export(defn):", "body": "globals()[defn.__name__] = defn__all__.append(defn.__name__)return defn", "docstring": "Decorator to explicitly mark functions that are exposed in a lib.", "id": "f1072:m0"} {"signature": "def __get_ll_type__(ll_type):", "body": "res = [llt for llt in __LL_TYPES__if llt[] == ll_type]assert len(res) < , ''if res:return res[]else:return None", "docstring": "Given an lltype value, retrieve its definition.", "id": "f1075:m0"} {"signature": "def lookup(ll_type):", "body": "res = __get_ll_type__(ll_type)if res:return res[]else:return res", "docstring": "Given an ll_type, retrieve the appropriate LL_TYPE.", "id": "f1075:m1"} {"signature": "def slookup(ll_type):", "body": "res = __get_ll_type__(ll_type)if res:return res[]else:return res", "docstring": "Given an ll_type, retrieve the short name for the link layer.", "id": "f1075:m2"} {"signature": "def clookup(ll_type):", "body": "res = __get_ll_type__(ll_type)if res:return res[]else:return res", "docstring": "Given an ll_type, retrieve the linklayer constructor to decode\nthe packets.", "id": "f1075:m3"} {"signature": "def __load_linktype__(link_type):", "body": "try:filep, pathname, description = imp.find_module(link_type, sys.path)link_type_module = imp.load_module(link_type, filep, pathname,description)except ImportError:return Nonefinally:if filep:filep.close()return link_type_module", "docstring": "Given a string for a given module, attempt to load it.", "id": "f1075:m4"} {"signature": "def _load_savefile_header(file_h):", "body": "try:raw_savefile_header = file_h.read()except UnicodeDecodeError:print(\"\")raise InvalidEncoding(\"\")if raw_savefile_header[:] in [struct.pack(\"\", _MAGIC_NUMBER),struct.pack(\"\", _MAGIC_NUMBER_NS)]:byte_order = b''unpacked = struct.unpack('', raw_savefile_header)elif raw_savefile_header[:] in [struct.pack(\"\", _MAGIC_NUMBER),struct.pack(\"\", _MAGIC_NUMBER_NS)]:byte_order = b''unpacked = struct.unpack('', raw_savefile_header)else:raise UnknownMagicNumber(\"\")(magic, major, minor, tz_off, ts_acc, snaplen, ll_type) = unpackedheader = __pcap_header__(magic, major, minor, tz_off, ts_acc, snaplen,ll_type, ctypes.c_char_p(byte_order),magic == _MAGIC_NUMBER_NS)if not __validate_header__(header):raise InvalidHeader(\"\")else:return header", "docstring": "Load and validate the header of a pcap file.", "id": "f1076:m2"} {"signature": "def load_savefile(input_file, layers=, verbose=False, lazy=False):", "body": "global VERBOSEold_verbose = VERBOSEVERBOSE = verbose__TRACE__('', (input_file.name,))header = _load_savefile_header(input_file)if __validate_header__(header):__TRACE__('')if lazy:packets = _generate_packets(input_file, header, layers)__TRACE__('')else:packets = _load_packets(input_file, header, layers)__TRACE__('', (len(packets),))sfile = pcap_savefile(header, packets)__TRACE__('')else:__TRACE__('')sfile = NoneVERBOSE = old_verbosereturn sfile", "docstring": "Parse a savefile as a pcap_savefile instance. Returns the savefile\non success and None on failure. Verbose mode prints additional information\nabout the file's processing. layers defines how many layers to descend and\ndecode the packet. input_file should be a Python file object.", "id": "f1076:m3"} {"signature": "def _load_packets(file_h, header, layers=):", "body": "pkts = []hdrp = ctypes.pointer(header)while True:pkt = _read_a_packet(file_h, hdrp, layers)if pkt:pkts.append(pkt)else:breakreturn pkts", "docstring": "Read packets from the capture file. Expects the file handle to point to\nthe location immediately after the header (24 bytes).", "id": "f1076:m5"} {"signature": "def _generate_packets(file_h, header, layers=):", "body": "hdrp = ctypes.pointer(header)while True:pkt = _read_a_packet(file_h, hdrp, layers)if pkt:yield pktelse:break", "docstring": "Read packets one by one from the capture file. Expects the file\nhandle to point to the location immediately after the header (24\nbytes).", "id": "f1076:m6"} {"signature": "def _read_a_packet(file_h, hdrp, layers=):", "body": "raw_packet_header = file_h.read()if not raw_packet_header or len(raw_packet_header) != :return Noneif hdrp[].byteorder == '':packet_header = struct.unpack('', raw_packet_header)else:packet_header = struct.unpack('', raw_packet_header)(timestamp, timestamp_us, capture_len, packet_len) = packet_headerraw_packet_data = file_h.read(capture_len)if not raw_packet_data or len(raw_packet_data) != capture_len:return Noneif layers > :layers -= raw_packet = linklayer.clookup(hdrp[].ll_type)(raw_packet_data,layers=layers)else:raw_packet = raw_packet_datapacket = pcap_packet(hdrp, timestamp, timestamp_us, capture_len,packet_len, raw_packet)return packet", "docstring": "Reads the next individual packet from the capture file. Expects\nthe file handle to be somewhere after the header, on the next\nper-packet header.", "id": "f1076:m7"} {"signature": "def init_capfile(self, layers=):", "body": "self.capfile = savefile.load_savefile(open('', ''),layers=layers)", "docstring": "Initialise capture file.", "id": "f1078:c0:m0"} {"signature": "@classmethoddef setUpClass(cls):", "body": "print('')", "docstring": "Print an intro to identify this test suite when running multiple tests.", "id": "f1078:c0:m1"} {"signature": "@classmethoddef setUpClass(cls):", "body": "print('')", "docstring": "Print a start message when loading the test suite.", "id": "f1080:c0:m0"} {"signature": "@classmethoddef setUpClass(cls):", "body": "print('')", "docstring": "Print a start message when loading the test suite.", "id": "f1081:c0:m0"} {"signature": "@classmethoddef setUpClass(cls):", "body": "print('')", "docstring": "Print a start message when loading the test suite.", "id": "f1082:c0:m0"} {"signature": "def create_pcap():", "body": "tfile = tempfile.NamedTemporaryFile()if sys.version_info[] >= : capture = pickle.loads(base64.b64decode(fixture.TESTPCAP3))else: capture = pickle.loads(fixture.TESTPCAP2.decode(''))with open(tfile.name, '') as f:f.write(capture)return tfile", "docstring": "Create a capture file from the test fixtures.", "id": "f1083:m0"} {"signature": "def init_capfile(self, layers=):", "body": "tfile = create_pcap()self.capfile = savefile.load_savefile(tfile, layers=layers)tfile.close()if os.path.exists(tfile.name):os.unlink(tfile.name)", "docstring": "Initialise the capture file.", "id": "f1083:c0:m0"} {"signature": "@classmethoddef setUpClass(cls):", "body": "print('')", "docstring": "Print an intro to identify this test suite when running multiple tests.", "id": "f1083:c0:m1"} {"signature": "def setUp(self):", "body": "if not self.capfile:self.init_capfile()", "docstring": "Set up a default capture file.", "id": "f1083:c0:m2"} {"signature": "@classmethoddef setUpClass(cls):", "body": "print('')", "docstring": "Print a start message when loading the test suite.", "id": "f1084:c0:m0"} {"signature": "def raw(self):", "body": "return self.packet", "docstring": "Return the raw binary data from the packet.", "id": "f1086:c1:m1"} {"signature": "def parse_ipv4(address):", "body": "raw = struct.pack('', address)octets = struct.unpack('', raw)[::-]ipv4 = b''.join([('' % o).encode('') for o in bytearray(octets)])return ipv4", "docstring": "Given a raw IPv4 address (i.e. as an unsigned integer), return it in\ndotted quad notation.", "id": "f1087:m0"} {"signature": "def strip_ip(packet):", "body": "if not isinstance(packet, IP):packet = IP(packet)payload = packet.payloadreturn payload", "docstring": "Remove the IP packet layer, yielding the transport layer.", "id": "f1087:m1"} {"signature": "def strip_ethernet(packet):", "body": "if not isinstance(packet, Ethernet):packet = Ethernet(packet)payload = packet.payloadreturn payload", "docstring": "Strip the Ethernet frame from a packet.", "id": "f1088:m0"} {"signature": "def payload_type(ethertype):", "body": "if ethertype == :from pcapfile.protocols.network.ip import IPreturn (IP, '')", "docstring": "Returns the appropriate payload constructor based on the supplied\nEtherType.", "id": "f1088:m1"} {"signature": "def load_network(self, layers=):", "body": "if layers:ctor = payload_type(self.type)[]if ctor:ctor = ctorpayload = self.payloadself.payload = ctor(payload, layers - )else:pass", "docstring": "Given an Ethernet frame, determine the appropriate sub-protocol;\nIf layers is greater than zerol determine the type of the payload\nand load the appropriate type of network packet. It is expected\nthat the payload be a hexified string. The layers argument determines\nhow many layers to descend while parsing the packet.", "id": "f1088:c0:m1"} {"signature": "def WIFI(frame, no_rtap=False):", "body": "pack = Nonetry:pack = WiHelper.get_wifi_packet(frame, no_rtap)except Exception as e:logging.exception(e)return pack", "docstring": "calls wifi packet discriminator and constructor.\n :frame: ctypes.Structure\n :no_rtap: Bool\n :return: packet object in success\n :return: int\n -1 on known error\n :return: int\n -2 on unknown error", "id": "f1089:m0"} {"signature": "@staticmethoddef get_wifi_packet(frame, no_rtap=False):", "body": "_, packet = WiHelper._strip_rtap(frame)frame_control = struct.unpack('', packet[:])cat = (frame_control[] >> ) & s_type = frame_control[] >> if cat not in _CATEGORIES_.keys():logging.warning(\"\" % (cat))return Unknown(frame, no_rtap)if s_type not in _SUBTYPES_[cat].keys():logging.warning(\"\" % (s_type, _CATEGORIES_[cat]))return Unknown(frame, no_rtap)if cat == :if s_type == :return ProbeReq(frame, no_rtap)elif s_type == :return ProbeResp(frame, no_rtap)elif s_type == :return Beacon(frame, no_rtap)else:return Management(frame, no_rtap)elif cat == :if s_type == :return RTS(frame, no_rtap)elif s_type == :return CTS(frame, no_rtap)elif s_type == :return BACK(frame, no_rtap)else:return Control(frame, no_rtap)elif cat == :if s_type == :return QosData(frame, no_rtap, parse_amsdu=True)else:return Data(frame, no_rtap)", "docstring": "Discriminates Wi-Fi packet and creates\n packet object.\n :frame: ctypes.Structure\n :no_rtap: Bool\n :return: obj\n Wi-Fi packet", "id": "f1089:c0:m0"} {"signature": "@staticmethoddef _strip_rtap(frame):", "body": "rtap_len = WiHelper.__get_rtap_len(frame)rtap = frame[:rtap_len]packet = frame[rtap_len:]return rtap, packet", "docstring": "strip injected radiotap header.\n :return: ctypes.Structure\n radiotap header\n :return: ctypes.Structure\n actual layer 2 Wi-Fi payload", "id": "f1089:c0:m1"} {"signature": "@staticmethoddef __get_rtap_len(frame):", "body": "r_len = struct.unpack('', frame[:])return r_len[]", "docstring": "parse length of radiotap header.\n :packet: ctypes.structure\n :return: int", "id": "f1089:c0:m2"} {"signature": "def __init__(self, rtap_bytes):", "body": "super(Radiotap, self).__init__()self._raw = {} self._bits = {} idx = self._rtap = rtap_bytesself.vers = Radiotap.strip_vers(self._rtap[idx:idx + ])idx += self.pad = Radiotap.strip_pad(self._rtap[idx:idx + ])idx += self.len = Radiotap.strip_len(self._rtap[idx:idx + ])idx += self.present, self.present_bits = Radiotap.strip_present(self._rtap[idx:idx + ])idx += if self.present.tsft: idx, self.mactime = self.strip_tsft(idx)if self.present.flags: idx, self.flags = self.strip_flags(idx)if self.present.rate: idx, self.rate = self.strip_rate(idx)if self.present.channel: idx, self.chan = self.strip_chan(idx)if self.present.fhss: idx, self.fhss = self.strip_fhss(idx)if self.present.dbm_antsignal: idx, self.dbm_antsignal = self.strip_dbm_antsignal(idx)if self.present.dbm_antnoise: idx, self.dbm_antnoise = self.strip_dbm_antnoise(idx)if self.present.lock_quality: idx, self.lock_quality = self.strip_lock_quality(idx)if self.present.tx_attenuation: idx, self.tx_attenuation = self.strip_tx_attenuation(idx)if self.present.db_tx_attenuation: idx, self.db_tx_attenuation = self.strip_db_tx_attenuation(idx)if self.present.dbm_tx_power: idx, self.dbm_tx_power = self.strip_dbm_tx_power(idx)if self.present.antenna: idx, self.antenna = self.strip_antenna(idx)if self.present.db_antsignal: idx, self.db_antsignal = self.strip_db_antsignal(idx)if self.present.db_antnoise: idx, self.db_antnoise = self.strip_db_antnoise(idx)if self.present.rxflags: idx, self.rxflags = self.strip_rx_flags(idx)if self.present.txflags: idx, self.txflags = self.strip_tx_flags(idx)if self.present.rts_retries: idx, self.rts_retries = self.strip_rts_retries(idx)if self.present.data_retries: idx, self.data_retries = self.strip_data_retries(idx)if self.present.xchannel: idx, self.xchannel = self.strip_xchannel(idx)if self.present.mcs: idx, self.mcs = self.strip_mcs(idx)if self.present.ampdu: idx, self.ampdu = self.strip_ampdu(idx)if self.present.vht: idx, self.vht = self.strip_vht(idx)self.prot_type = self.extract_protocol()", "docstring": "Constructor method.\n :rtap_bytes: ctypes.Structure", "id": "f1089:c1:m0"} {"signature": "@staticmethoddef strip_vers(payload):", "body": "return struct.unpack('', payload)[]", "docstring": "strip(1 byte) radiotap.version\n :payload: ctypes.Structure\n :return: int", "id": "f1089:c1:m1"} {"signature": "@staticmethoddef strip_pad(payload):", "body": "return struct.unpack('', payload)[]", "docstring": "strip(1 byte) radiotap.pad\n :payload: ctypes.Structure\n :return: int", "id": "f1089:c1:m2"} {"signature": "@staticmethoddef strip_len(payload):", "body": "return struct.unpack('', payload)[]", "docstring": "strip(2 byte) radiotap.length\n :payload: ctypes.Structure\n :return: int", "id": "f1089:c1:m3"} {"signature": "@staticmethoddef strip_present(payload):", "body": "present = collections.namedtuple('', ['', '', '', '', '','', '', '','', '', '','', '', '', '','', '', '', '','', '', '', '', '', ''])val = struct.unpack('', payload)[]bits = format(val, '')[::-]present.tsft = int(bits[]) present.flags = int(bits[]) present.rate = int(bits[]) present.channel = int(bits[]) present.fhss = int(bits[]) present.dbm_antsignal = int(bits[]) present.dbm_antnoise = int(bits[]) present.lock_quality = int(bits[]) present.tx_attenuation = int(bits[]) present.db_tx_attenuation = int(bits[]) present.dbm_tx_power = int(bits[]) present.antenna = int(bits[]) present.db_antsignal = int(bits[]) present.db_antnoise = int(bits[]) present.rxflags = int(bits[]) present.txflags = int(bits[]) present.rts_retries = int(bits[]) present.data_retries = int(bits[]) present.xchannel = int(bits[]) present.mcs = int(bits[]) present.ampdu = int(bits[]) present.vht = int(bits[]) present.rtap_ns = int(bits[]) present.ven_ns = int(bits[]) present.ext = int(bits[]) return present, bits", "docstring": "strip(4 byte) radiotap.present. Those are flags that\n identify existence of incoming radiotap meta-data.\n :idx: int\n :return: str\n :return: namedtuple", "id": "f1089:c1:m4"} {"signature": "def strip_tsft(self, idx):", "body": "idx = Radiotap.align(idx, )mactime, = struct.unpack_from('', self._rtap, idx)return idx + , mactime", "docstring": "strip(8 byte) radiotap.mactime\n :idx: int\n :return: int\n idx\n :return: int\n mactime", "id": "f1089:c1:m5"} {"signature": "def strip_flags(self, idx):", "body": "flags = collections.namedtuple('', ['', '', '', '', '','', '', ''])val, = struct.unpack_from('', self._rtap, idx)bits = format(val, '')[::-]flags.cfp = int(bits[])flags.preamble = int(bits[])flags.wep = int(bits[])flags.fragmentation = int(bits[])flags.fcs = int(bits[])flags.datapad = int(bits[])flags.badfcs = int(bits[])flags.shortgi = int(bits[])return idx + , flags", "docstring": "strip(1 byte) radiotap.flags\n :idx: int\n :return: int\n idx\n :return: collections.namedtuple", "id": "f1089:c1:m6"} {"signature": "def strip_rate(self, idx):", "body": "val, = struct.unpack_from('', self._rtap, idx)rate_unit = float() / return idx + , rate_unit * val", "docstring": "strip(1 byte) radiotap.datarate\n note that, unit of this field is originally 0.5 Mbps\n :idx: int\n :return: int\n idx\n :return: double\n rate in terms of Mbps", "id": "f1089:c1:m7"} {"signature": "def strip_chan(self, idx):", "body": "chan = collections.namedtuple('', ['', '', '', '', '', '','', '', '', '', '','', ''])idx = Radiotap.align(idx, )freq, flags, = struct.unpack_from('', self._rtap, idx)chan.freq = freqbits = format(flags, '')[::-]chan.turbo = int(bits[])chan.cck = int(bits[])chan.ofdm = int(bits[])chan.two_g = int(bits[])chan.five_g = int(bits[])chan.passive = int(bits[])chan.dynamic = int(bits[])chan.gfsk = int(bits[])chan.gsm = int(bits[])chan.static_turbo = int(bits[])chan.half_rate = int(bits[])chan.quarter_rate = int(bits[])return idx + , chan", "docstring": "strip(2 byte) radiotap.channel.flags\n :idx: int\n :return: int\n idx\n :return: collections.namedtuple", "id": "f1089:c1:m8"} {"signature": "def strip_fhss(self, idx):", "body": "fhss = collections.namedtuple('', ['', ''])fhss.hopset, fhss.pattern, = struct.unpack_from('', self._rtap, idx)return idx + , fhss", "docstring": "strip (2 byte) radiotap.fhss.hopset(1 byte) and\n radiotap.fhss.pattern(1 byte)\n :idx: int\n :return: int\n idx\n :return: collections.namedtuple", "id": "f1089:c1:m9"} {"signature": "def strip_dbm_antsignal(self, idx):", "body": "dbm_antsignal, = struct.unpack_from('', self._rtap, idx)return idx + , dbm_antsignal", "docstring": "strip(1 byte) radiotap.dbm.ant_signal\n :idx: int\n :return: int\n idx\n :return: int", "id": "f1089:c1:m10"} {"signature": "def strip_dbm_antnoise(self, idx):", "body": "dbm_antnoise, = struct.unpack_from('', self._rtap, idx)return idx + , dbm_antnoise", "docstring": "strip(1 byte) radiotap.dbm_antnoise\n :idx: int\n :return: int\n idx\n :return: int", "id": "f1089:c1:m11"} {"signature": "def strip_lock_quality(self, idx):", "body": "idx = Radiotap.align(idx, )lock_quality, = struct.unpack_from('', self._rtap, idx)return idx + , lock_quality", "docstring": "strip(2 byte) lock quality\n :idx: int\n :return: int\n idx\n :return: int", "id": "f1089:c1:m12"} {"signature": "def strip_tx_attenuation(self, idx):", "body": "idx = Radiotap.align(idx, )tx_attenuation, = struct.unpack_from('', self._rtap, idx)return idx + , tx_attenuation", "docstring": "strip(1 byte) tx_attenuation\n :idx: int\n :return: int\n idx\n :return: int", "id": "f1089:c1:m13"} {"signature": "def strip_db_tx_attenuation(self, idx):", "body": "idx = Radiotap.align(idx, )db_tx_attenuation, = struct.unpack_from('', self._rtap, idx)return idx + , db_tx_attenuation", "docstring": "strip(1 byte) db_tx_attenuation\n :return: int\n idx\n :return: int", "id": "f1089:c1:m14"} {"signature": "def strip_dbm_tx_power(self, idx):", "body": "idx = Radiotap.align(idx, )dbm_tx_power, = struct.unpack_from('', self._rtap, idx)return idx + , dbm_tx_power", "docstring": "strip(1 byte) dbm_tx_power\n :return: int\n idx\n :return: int", "id": "f1089:c1:m15"} {"signature": "def strip_antenna(self, idx):", "body": "antenna, = struct.unpack_from('', self._rtap, idx)return idx + , antenna", "docstring": "strip(1 byte) radiotap.antenna\n :return: int\n idx\n :return: int", "id": "f1089:c1:m16"} {"signature": "def strip_db_antsignal(self, idx):", "body": "db_antsignal, = struct.unpack_from('', self._rtap, idx)return idx + , db_antsignal", "docstring": "strip(1 byte) radiotap.db_antsignal\n :return: int\n idx\n :return: int", "id": "f1089:c1:m17"} {"signature": "def strip_db_antnoise(self, idx):", "body": "db_antnoise, = struct.unpack_from('', self._rtap, idx)return idx + , db_antnoise", "docstring": "strip(1 byte) radiotap.db_antnoise\n :return: int\n idx\n :return: int", "id": "f1089:c1:m18"} {"signature": "def strip_rx_flags(self, idx):", "body": "rx_flags = collections.namedtuple('', ['', ''])idx = Radiotap.align(idx, )flags, = struct.unpack_from('', self._rtap, idx)flag_bits = format(flags, '')[::-]rx_flags.reserved = int(flag_bits[])rx_flags.badplcp = int(flag_bits[])return idx + , rx_flags", "docstring": "strip(2 byte) radiotap.rxflags\n :idx: int\n :return: int\n idx\n :return: collections.namedtuple", "id": "f1089:c1:m19"} {"signature": "def strip_tx_flags(self, idx):", "body": "idx = Radiotap.align(idx, )tx_flags, = struct.unpack_from('', self._rtap, idx)return idx + , tx_flags", "docstring": "strip(1 byte) tx_flags\n :idx: int\n :return: int\n idx\n :return: int", "id": "f1089:c1:m20"} {"signature": "def strip_rts_retries(self, idx):", "body": "rts_retries, = struct.unpack_from('', self._rtap, idx)return idx + , rts_retries", "docstring": "strip(1 byte) rts_retries\n :idx: int\n :return: int\n idx\n :return: int", "id": "f1089:c1:m21"} {"signature": "def strip_data_retries(self, idx):", "body": "data_retries, = struct.unpack_from('', self._rtap, idx)return idx + , data_retries", "docstring": "strip(1 byte) data_retries\n :idx: int\n :return: int\n idx\n :return: int", "id": "f1089:c1:m22"} {"signature": "def strip_xchannel(self, idx):", "body": "xchannel = collections.namedtuple('', ['', '', '', ''])flags = collections.namedtuple('', ['', '', '', '', '', '','', '', '', '', '', '','', '', ''])idx = Radiotap.align(idx, )flag_val, freq, channel, max_power = struct.unpack_from('', self._rtap, idx)xchannel.freq = freqxchannel.channel = channelxchannel.max_power = max_powerbits = format(flag_val, '')[::-]flags.turbo = int(bits[])flags.cck = int(bits[])flags.ofdm = int(bits[])flags.two_g = int(bits[])flags.five_g = int(bits[])flags.passive = int(bits[])flags.dynamic = int(bits[])flags.gfsk = int(bits[])flags.gsm = int(bits[])flags.sturbo = int(bits[])flags.half = int(bits[])flags.quarter = int(bits[])flags.ht_20 = int(bits[])flags.ht_40u = int(bits[])flags.ht_40d = int(bits[])xchannel.flags = flagsreturn idx + , xchannel", "docstring": "strip(7 bytes) radiotap.xchannel.channel(1 byte),\n radiotap.xchannel.freq(2 bytes) and radiotap.xchannel.flags(4 bytes)\n :idx: int\n :return: int\n idx\n :return: collections.namedtuple", "id": "f1089:c1:m23"} {"signature": "def strip_mcs(self, idx):", "body": "mcs = collections.namedtuple('', ['', '', '', '', '','', '', '', '',''])idx = Radiotap.align(idx, )known, flags, index = struct.unpack_from('', self._rtap, idx)bits = format(flags, '')[::-]mcs.known = known mcs.index = index mcs.have_bw = int(bits[]) mcs.have_mcs = int(bits[]) mcs.have_gi = int(bits[]) mcs.have_format = int(bits[]) mcs.have_fec = int(bits[]) mcs.have_stbc = int(bits[]) mcs.have_ness = int(bits[]) mcs.ness_bit1 = int(bits[]) return idx + , mcs", "docstring": "strip(3 byte) radiotap.mcs which contains 802.11n bandwidth,\n mcs(modulation and coding scheme) and stbc(space time block coding)\n information.\n :idx: int\n :return: int\n idx\n :return: collections.namedtuple", "id": "f1089:c1:m24"} {"signature": "def strip_ampdu(self, idx):", "body": "ampdu = collections.namedtuple('', ['', '', '', ''])flags = collections.namedtuple('', ['', '', '', '',''])idx = Radiotap.align(idx, )refnum, flag_vals, crc_val, reserved = struct.unpack_from('', self._rtap, idx)ampdu.flags = flagsampdu.reference = refnumampdu.crc_val = crc_valampdu.reserved = reservedbits = format(flag_vals, '')[::-]ampdu.flags.report_zerolen = int(bits[])ampdu.flags.is_zerolen = int(bits[])ampdu.flags.lastknown = int(bits[])ampdu.flags.last = int(bits[])ampdu.flags.delim_crc_error = int(bits[])return idx + , ampdu", "docstring": "strip(8 byte) radiotap.ampdu\n :idx: int\n :return: int\n idx\n :return: collections.namedtuple", "id": "f1089:c1:m25"} {"signature": "def strip_vht(self, idx):", "body": "vht = collections.namedtuple('', ['', '', '', '','', '', '','', '', '', '', '', '','', '', '', '','', '', '', '', ''])user = collections.namedtuple('', ['', '', ''])idx = Radiotap.align(idx, )known, flags, bw = struct.unpack_from('', self._rtap, idx)mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3 = struct.unpack_from('', self._rtap, idx + )coding, group_id, partial_id = struct.unpack_from('', self._rtap, idx + )known_bits = format(known, '')[::-]vht.known_bits = known_bitsvht.have_stbc = int(known_bits[]) vht.have_txop_ps = int(known_bits[]) vht.have_gi = int(known_bits[]) vht.have_sgi_nsym_da = int(known_bits[]) vht.have_ldpc_extra = int(known_bits[]) vht.have_beamformed = int(known_bits[]) vht.have_bw = int(known_bits[]) vht.have_gid = int(known_bits[]) vht.have_paid = int(known_bits[]) flag_bits = format(flags, '')[::-]vht.flag_bits = flag_bitsvht.stbc = int(flag_bits[])vht.txop_ps = int(flag_bits[])vht.gi = int(flag_bits[])vht.sgi_nysm_da = int(flag_bits[])vht.ldpc_extra = int(flag_bits[])vht.beamformed = int(flag_bits[])vht.group_id = group_idvht.partial_id = partial_idvht.bw = bwvht.user_0 = user(None, None, None)vht.user_1 = user(None, None, None)vht.user_2 = user(None, None, None)vht.user_3 = user(None, None, None)for (i, mcs_nss) in enumerate([mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3]):if mcs_nss:nss = mcs_nss & >> mcs = (mcs_nss & ) >> coding = (coding & **i) >> iif i == :vht.user_0 = user(nss, mcs, coding)elif i == :vht.user_1 = user(nss, mcs, coding)elif i == :vht.user_2 = user(nss, mcs, coding)elif i == :vht.user_3 = user(nss, mcs, coding)return idx + , vht", "docstring": "strip(12 byte) radiotap.vht\n :idx: int\n :return: int\n idx\n :return: collections.namedtuple", "id": "f1089:c1:m26"} {"signature": "def extract_protocol(self):", "body": "if self.present.mcs:return ''if self.present.vht:return ''if self.present.channel and hasattr(self, ''):if self.chan.five_g:if self.chan.ofdm:return ''elif self.chan.two_g:if self.chan.cck:return ''elif self.chan.ofdm or self.chan.dynamic:return ''return ''", "docstring": "extract 802.11 protocol from radiotap.channel.flags\n :return: str\n protocol name\n one of below in success\n [.11a, .11b, .11g, .11n, .11ac]\n None in fail", "id": "f1089:c1:m27"} {"signature": "@staticmethoddef align(val, align):", "body": "return (val + align - ) & ~(align - )", "docstring": ":val: int\n:align: int\n:return: int", "id": "f1089:c1:m28"} {"signature": "def __init__(self, frame, no_rtap=False):", "body": "super(Wifi, self).__init__()self._raw = {}if not no_rtap:rtap_bytes, self._packet = WiHelper._strip_rtap(frame)self.radiotap = Radiotap(rtap_bytes)else:self._packet = frameself.radiotap = Nonef_cntrl = struct.unpack('', self._packet[:]) flags = f_cntrl[]self.vers = f_cntrl[] & self.category = (f_cntrl[] >> ) & self.subtype = f_cntrl[] >> flag_bits = format(flags, '')[::-]self.to_ds = int(flag_bits[])self.from_ds = int(flag_bits[])self.ds = b''.join([(flag_bits[]).encode(''),(flag_bits[]).encode('')])self.frag = int(flag_bits[])self.retry = int(flag_bits[])self.power_mgmt = int(flag_bits[])self.more_data = int(flag_bits[])self.wep = int(flag_bits[])self.order = int(flag_bits[])self.duration = struct.unpack('', self._packet[:])[] self.name = Noneif self.category == :if self.subtype in _SUBTYPES_[].keys():self.name = _SUBTYPES_[][self.subtype].encode('')elif self.category == :if self.subtype in _SUBTYPES_[].keys():self.name = _SUBTYPES_[][self.subtype].encode('')elif self.category == :if self.subtype in _SUBTYPES_[].keys():self.name = _SUBTYPES_[][self.subtype].encode('')", "docstring": "Constructor method.\n Parse common headers of all Wi-Fi frames.\n :frame: ctypes.Structure", "id": "f1089:c2:m0"} {"signature": "def get_shark_field(self, fields):", "body": "keys, exist, out = None, {}, Noneif isinstance(fields, str):fields = [fields]elif not isinstance(fields, list):logging.error('')return Noneout = dict.fromkeys(fields)if hasattr(self, ''):exist.update(self._shark_)if hasattr(self, ''):exist.update(self._s_shark_)if hasattr(self.radiotap, ''):exist.update(self.radiotap._r_shark_)keys = exist.keys()for elem in fields:if elem in keys:obj_field, tmp = exist[elem], Nonetry:tmp = operator.attrgetter(obj_field)(self)except AttributeError:tmp = Noneif not tmp:try:tmp = operator.attrgetter(obj_field)(self.radiotap)except AttributeError:tmp = Noneout[elem] = tmpreturn out", "docstring": "get parameters via wireshark syntax.\n out = x.get_shark_field('wlan.fc.type')\n out = x.get_shark_field(['wlan.fc.type', 'wlan.seq'])\n :fields: str or str[]\n :return: dict\n out[fields[0]] = val[0] or None\n out[fields[1]] = val[1] or None ...", "id": "f1089:c2:m1"} {"signature": "@staticmethoddef get_mac_addr(mac_addr):", "body": "mac_addr = bytearray(mac_addr)mac = b''.join([('' % o).encode('') for o in mac_addr])return mac", "docstring": "converts bytes to mac addr format\n :mac_addr: ctypes.structure\n :return: str\n mac addr in format\n 11:22:33:aa:bb:cc", "id": "f1089:c2:m2"} {"signature": "def get_hex_repr(self):", "body": "return hex(self.category * + self.subtype)", "docstring": "wlan.fc.type_subtype hex representation\n :return: str", "id": "f1089:c2:m3"} {"signature": "def strip_mac_addrs(self):", "body": "qos_idx, seq_idx = , sa, ta, ra, da, bssid = None, None, None, None, Noneif self.to_ds == and self.from_ds == :(ra, ta, da) = struct.unpack('', self._packet[:])sa = struct.unpack('', self._packet[:])[]qos_idx = seq_idx = elif self.to_ds == and self.from_ds == :(ra, ta, sa) = struct.unpack('', self._packet[:])qos_idx = seq_idx = elif self.to_ds == and self.from_ds == :(ra, ta, da) = struct.unpack('', self._packet[:])qos_idx = seq_idx = elif self.to_ds == and self.from_ds == :(ra, ta, bssid) = struct.unpack('', self._packet[:])qos_idx = seq_idx = if ta is not None:ta = Wifi.get_mac_addr(ta)if ra is not None:ra = Wifi.get_mac_addr(ra)if sa is not None:sa = Wifi.get_mac_addr(sa)if da is not None:da = Wifi.get_mac_addr(da)if bssid is not None:bssid = Wifi.get_mac_addr(bssid)return seq_idx, qos_idx, sa, ta, ra, da, bssid", "docstring": "strip mac address(each 6 byte) information.\n (wlan.ta, wlan.ra, wlan.sa, wlan.da)\n (transmitter, receiver, source, destination)\n :return: int\n index of sequence control\n :return: int\n index after mac addresses\n :return: str\n source address (sa)\n :return: str\n transmitter address (ta)\n :return: str\n receiver address (ra)\n :return: str\n destination address (da)\n :return: str\n basic service sed identifier (bssid)", "id": "f1089:c2:m4"} {"signature": "def strip_seq_cntrl(self, idx):", "body": "seq_cntrl = struct.unpack('', self._packet[idx:idx + ])[]seq_num = seq_cntrl >> frag_num = seq_cntrl & return seq_num, frag_num", "docstring": "strip(2 byte) wlan.seq(12 bit) and wlan.fram(4 bit)\n number information.\n :seq_cntrl: ctypes.Structure\n :return: int\n sequence number\n :return: int\n fragment number", "id": "f1089:c2:m5"} {"signature": "def __repr__(self, show_rfields=True):", "body": "out_str = ''all_fields = []if hasattr(self, ''):all_fields += self._fields_if hasattr(self, ''):all_fields += self._sfields_if all_fields:for elem in all_fields:key = elem[]try:val = operator.attrgetter(key)(self)except Exception:val = Noneif isinstance(val, list):if val:out_str += \"\".format(key, type(val[]))else:out_str += \"\".format(str(key))else:out_str += \"\".format(str(key), str(val))else:logging.error('')return Noneif show_rfields and hasattr(self.radiotap, ''):for elem in self.radiotap._rfields_:key = elem[]try:val = operator.attrgetter(key)(self.radiotap)except Exception:val = Noneif val is not None:out_str += \"\".format(key, val)return out_str", "docstring": ":show_rfields: bool\n whether to show radiotap fields too.", "id": "f1089:c2:m6"} {"signature": "def __init__(self, frame, no_rtap=False):", "body": "Wifi.__init__(self, frame, no_rtap)", "docstring": "Constructor method.\n :packet: ctypes.Structure\n :no_rtap: Bool\n shall parse radiotap headers", "id": "f1089:c3:m0"} {"signature": "def __init__(self, frame, no_rtap=False, parse_amsdu=True):", "body": "Data.__init__(self, frame, no_rtap)idx = self.sa = self.ta = self.ra = self.da = Noneself.seq_num = self.frag_num = Noneself.qos_pri = self.qos_bit = self.qos_ack = Noneself.ccmp_extiv = Noneself.payload = []seq_idx, qos_idx, self.sa, self.ta, self.ra, self.da, _ = self.strip_mac_addrs()self.seq_num, self.frag_num = self.strip_seq_cntrl(seq_idx)idx = qos_idxincr, self.qos_pri, self.qos_bit, self.qos_ack, self.amsdupresent =self.strip_qos_cntrl(idx, self.radiotap.prot_type)idx += incrif self.wep == :incr, self.ccmp_extiv = self.strip_ccmp(idx)idx += incrif parse_amsdu:if self.amsdupresent != and self.wep == :while idx < len(self._packet):msdu, offset = self.strip_msdu(idx)self.payload.append(msdu)idx += offsetelse:if self.wep == :msdu = {}offset, llc = self.strip_llc(idx)msdu[''] = llcmsdu[''] = self._packet[idx + offset:]self.payload.append(msdu)else:self.payload.append({'': self._packet[idx:]})", "docstring": "Constructor method.\n :frame: ctypes.Structure\n :parse_amsdu: Bool\n shall parse aggregated mac service data unit", "id": "f1089:c4:m0"} {"signature": "def strip_qos_cntrl(self, idx, prot_type):", "body": "qos_cntrl, = struct.unpack('', self._packet[idx:idx + ])qos_cntrl_bits = format(qos_cntrl, '')[::-]qos_pri = qos_cntrl & qos_bit = int(qos_cntrl_bits[])qos_ack = int(qos_cntrl_bits[:], )amsdupresent = if prot_type == '':amsdupresent = int(qos_cntrl_bits[])return , qos_pri, qos_bit, qos_ack, amsdupresent", "docstring": "strip(2 byte) wlan.qos\n :idx: int\n :prot_type: string\n 802.11 protocol type(.11ac, .11a, .11n, etc)\n :return: int\n number of processed bytes\n :return: int\n qos priority\n :return: int\n qos bit\n :return: int\n qos acknowledgement\n :return: int\n amsdupresent(aggregated mac service data unit)", "id": "f1089:c4:m1"} {"signature": "def strip_ccmp(self, idx):", "body": "ccmp_extiv = Noneif len(self._packet[idx:]) >= :raw_bytes = self._packet[idx:idx + ]ccmp_extiv, = struct.unpack_from('', raw_bytes, )return , ccmp_extiv", "docstring": "strip(8 byte) wlan.ccmp.extiv\n CCMP Extended Initialization Vector\n :return: int\n number of processed bytes\n :return: ctypes.raw\n ccmp vector", "id": "f1089:c4:m2"} {"signature": "def strip_msdu(self, idx):", "body": "padding = len_payload = msdu = {'': {},'': None,'': None,'': None,'': }(da_mac, sa_mac) = struct.unpack('', self._packet[idx:idx + ])msdu[''] = Wifi.get_mac_addr(da_mac)msdu[''] = Wifi.get_mac_addr(sa_mac)idx += msdu[''] = struct.unpack('', self._packet[idx:idx + ])[]idx += offset, msdu[''] = self.strip_llc(idx)idx += offsetlen_payload = msdu[''] - offsetmsdu[''] = self._packet[idx:idx + len_payload]padding = - (len_payload % )return msdu, msdu[''] + padding + ", "docstring": "strip single mac servis data unit(msdu)\n see -> https://mrncciew.com/2014/11/01/cwap-802-11-data-frame-aggregation/\n :idx: int\n :return: dict\n msdu\n :return: int\n number of processed bytes", "id": "f1089:c4:m3"} {"signature": "def strip_llc(self, idx):", "body": "llc = {}snap = llc_dsap = struct.unpack('', self._packet[idx:idx + ])[]llc[''] = llc_dsap >> llc[''] = llc_dsap & idx += llc_ssap = struct.unpack('', self._packet[idx:idx + ])[]llc[''] = llc_ssap >> llc[''] = llc_ssap & idx += if llc_dsap == snap and llc_ssap == snap:llc_control = struct.unpack('', self._packet[idx:idx + ])[]llc[''] = llc_control >> llc[''] = llc_control & idx += llc[''] = self._packet[idx:idx + ]idx += llc[''] = self._packet[idx:idx + ]return , llcelse:return , llc", "docstring": "strip(4 or 8 byte) logical link control headers\n :return: int\n number of processed bytes\n :return: dict\n llc information\n see -> http://www.wildpackets.com/resources/compendium/ethernet/frame_snap_iee8023\n ABBRVS.\n ssap: source service access point\n dsap: destination service access point\n SNAP(Subnetwork Acess Protocol)", "id": "f1089:c4:m4"} {"signature": "def __init__(self, frame, no_rtap=False):", "body": "Wifi.__init__(self, frame, no_rtap)self.tagged_params = []self._raw_tagged_params = Noneself.timestamp = Noneself.interval = Noneself.fixed_capabils = None", "docstring": "Constructor Method\n :frame: ctypes.Structure\n :subtype: int", "id": "f1089:c5:m0"} {"signature": "@staticmethoddef parse_tagged_params(raw_tagged_params):", "body": "fcs_len = idx = tagged_params = []while idx < len(raw_tagged_params) - fcs_len:tag_num, tag_len = struct.unpack('', raw_tagged_params[idx:idx + ])idx += if len(raw_tagged_params) >= idx + tag_len:param = {}param[''], param[''] = tag_num, tag_lenpayload = raw_tagged_params[idx:idx + tag_len]if tag_num in MNGMT_TAGS:param[''] = MNGMT_TAGS[tag_num]if MNGMT_TAGS[tag_num] == '':param[''] = Management.parse_vendor_ie(payload)else:param[''] = payloadelse:param[''] = Nonetagged_params.append(param)idx += tag_lenelse:logging.warning('')log_msg = ''log_msg = log_msg.format(p_idx=idx + tag_len,p_len=len(raw_tagged_params))logging.warning(log_msg)return , tagged_paramsreturn , tagged_params", "docstring": "strip tagged information elements wlan_mgt.tag\n which has generic type-length-value structure\n [type, length, value]\n type(1 byte), length(1 byte), value(varies)\n [wlan_mgt.tag.number, wlan_mgt.tag.length, payload]\n structured fields.\n :return: dict[]\n list of tagged params\n :return: int\n 0 in succ, 1 for", "id": "f1089:c5:m2"} {"signature": "@staticmethoddef get_fixed_capabils(payload):", "body": "if len(payload) != :return Nonecapabils = {}fix_cap = struct.unpack('', payload)[]cap_bits = format(fix_cap, '')[::-]capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) capabils[''] = int(cap_bits[]) return capabils", "docstring": "strip(2 byte) wlan_mgt.fixed.capabilities\n :payload: ctypes.structure\n 2 byte\n :return: dict\n None in error", "id": "f1089:c5:m3"} {"signature": "@staticmethoddef parse_vendor_ie(payload):", "body": "output = {}oui = struct.unpack('', payload[:])oui = b''.join([('' % o).encode('') for o in oui])oui_type = struct.unpack('', payload[:])[]oui_data = payload[:]output[''] = oui.upper()output[''] = oui_typeoutput[''] = oui_datareturn output", "docstring": "parse vendor specific information element\n oui -> organizationally unique identifier\n first 3 bytes of mac addresses\n see:https://www.wireshark.org/tools/oui-lookup.html\n strip wlan_mgt.tag.oui(3 bytes),\n wlan_mgt.tag.vendor.oui.type(1 byte)\n wlan_mgt.tag.vendor.data (varies)\n :payload: ctypes.structure\n :return: dict\n {'oui':00-11-22, 'oui_type':1, 'oui_data':ctypes.structure}", "id": "f1089:c5:m4"} {"signature": "@staticmethoddef get_timestamp(payload):", "body": "if len(payload) != :return Nonetimestamp = struct.unpack('', payload)[]return timestamp", "docstring": "strip wlan_mgt.fixed.timestamp(8 bytes)\n :payload: ctypes.structure\n :return: int\n None on error", "id": "f1089:c5:m5"} {"signature": "@staticmethoddef get_interval(payload):", "body": "if len(payload) != :return Noneinterval = struct.unpack('', payload)[]return interval", "docstring": "strip wlan_mgt.fixed.beacoN(2 bytes)\n beacon interval\n :payload: ctypes.structure\n :return: int\n None on error", "id": "f1089:c5:m6"} {"signature": "@staticmethoddef strip_fixed_params(payload):", "body": "if len(payload) != :return None, None, Noneidx = timestamp = Management.get_timestamp(payload[idx:idx + ])idx += interval = Management.get_interval(payload[idx:idx + ])idx += capabils = Management.get_fixed_capabils(payload[idx:idx + ])return timestamp, interval, capabils", "docstring": "strip(12 byte) wlan_mgt.fixed.all\n :payload: ctypes.structure\n :return: int\n timestamp\n :return: int\n beacon interval\n :return: dict\n capabilities", "id": "f1089:c5:m7"} {"signature": "@staticmethoddef is_valid_mac_oui(mac_block):", "body": "if len(mac_block) != :return if '' in mac_block:if len(mac_block.split('')) != :return elif '' in mac_block:if len(mac_block.split('')) != :return return ", "docstring": "checks whether mac block is in format of\n 00-11-22 or 00:11:22.\n :return: int", "id": "f1089:c5:m8"} {"signature": "def set_fixed_capabils(self, capabils):", "body": "self.ess = capabils['']self.ibss = capabils['']self.priv = capabils['']self.short_preamble = capabils['']self.pbcc = capabils['']self.chan_agility = capabils['']self.spec_man = capabils['']self.short_slot = capabils['']self.apsd = capabils['']self.radio_meas = capabils['']self.dss_ofdm = capabils['']self.del_back = capabils['']self.imm_back = capabils['']", "docstring": "set keys of capabils into fields of object\n :capabils: dict", "id": "f1089:c5:m9"} {"signature": "def get_vendor_ies(self, mac_block=None, oui_type=None):", "body": "vendor_ies = []if mac_block is not None:if Management.is_valid_mac_oui(mac_block):mac_block = mac_block.upper()if '' in mac_block:mac_block.replace('', '')else:logging.warning(\"\")return Nonefor elem in self.tagged_params:tag_num = elem['']if MNGMT_TAGS[tag_num] == '':if mac_block is None:vendor_ies.append(elem)elif elem[''][''] == mac_block.encode(''):if oui_type is None:vendor_ies.append(elem)elif elem[''][''] == oui_type:vendor_ies.append(elem)return vendor_ies", "docstring": "vendor information element querying\n :mac_block: str\n first 3 bytes of mac addresses in format of\n 00-11-22 or 00:11:22 or 001122\n :oui_type: int\n vendors ie type\n :return: int\n is valid mac_block format\n -1 is unknown\n :return: dict[]\n list of oui information elements\n -1 on error (invalid v", "id": "f1089:c5:m10"} {"signature": "def __init__(self, frame, no_rtap=False):", "body": "Management.__init__(self, frame, no_rtap)idx = self.timestamp = self.interval = Noneself.ta = self.ra = self.bssid = Noneself.seq_num = self.frag_num = Noneself.ess = self.ibss = Noneself.privacy = Noneself.priv = self.short_preamble = self.pbcc = self.chan_agility = Noneself.spec_man = self.short_slot = self.apsd = self.radio_meas = Noneself.dss_ofdm = self.del_back = self.imm_back = Noneseq_idx, _, _, self.ta, self.ra, _, self.bssid = self.strip_mac_addrs()idx = seq_idxself.seq_num, self.frag_num = self.strip_seq_cntrl(idx)idx += payload = self._packet[idx:idx + ]timestamp, interval, fixed_capabils = self.strip_fixed_params(payload)if all([timestamp, interval, fixed_capabils]):self.timestamp = timestampself.interval = intervalself.set_fixed_capabils(fixed_capabils)idx += else:logging.warning(\"\")returnif idx < len(self._packet):self._raw_tagged_params = self._packet[idx:]is_out_bound, tagged_params = self.parse_tagged_params(self._raw_tagged_params)if len(tagged_params):self.tagged_params = tagged_paramsif is_out_bound:logging.warning(\"\")", "docstring": "Constructor method.\n :frame: ctypes.Structure", "id": "f1089:c8:m0"} {"signature": "def __init__(self, frame, no_rtap=False):", "body": "Wifi.__init__(self, frame, no_rtap)", "docstring": "Constructor method.\n :frame: ctypes.Structure", "id": "f1089:c9:m0"} {"signature": "def __init__(self, frame, no_rtap=False):", "body": "Control.__init__(self, frame, no_rtap)(ra_mac, ta_mac) = struct.unpack('', self._packet[:])self.ra = Wifi.get_mac_addr(ra_mac)self.ta = Wifi.get_mac_addr(ta_mac)", "docstring": "Constructor method.\n :frame: ctypes.Structure", "id": "f1089:c10:m0"} {"signature": "def __init__(self, frame, no_rtap=False):", "body": "Control.__init__(self, frame, no_rtap)ra_mac = struct.unpack('', self._packet[:])[]self.ra = Wifi.get_mac_addr(ra_mac)", "docstring": "Constructor method.\n :frame: ctypes.Structure", "id": "f1089:c11:m0"} {"signature": "def __init__(self, frame, no_rtap=False):", "body": "Control.__init__(self, frame, no_rtap)(ra_mac, ta_mac) = struct.unpack('', self._packet[:])self.ra = self.ta = Noneself.ackpolicy = self.multitid = Noneself.ssc_frag = self.ssc_seq = Noneself.bitmap_str = Noneself.acked_seqs = []self.ra = Wifi.get_mac_addr(ra_mac)self.ta = Wifi.get_mac_addr(ta_mac)idx = payload = self._packet[idx:idx + ]self.ackpolicy, self.multitid = BACK.strip_cntrl(payload)idx += payload = self._packet[idx:idx + ]self.ssc_seq, self.ssc_frag = BACK.strip_ssc(payload)idx += payload = self._packet[idx:idx + ]self.bitmap_str = BACK.strip_bitmap_str(payload)idx += self.acked_seqs = BACK.extract_acked_seqs(self.bitmap_str, self.ssc_seq)", "docstring": "Constructor method.\n :frame: ctypes.Structure", "id": "f1089:c12:m0"} {"signature": "def get_shark_field(self, fields):", "body": "out = super(BACK, self).get_shark_field(fields)out.update({'': self.acked_seqs,'': self.bitmap_str})return out", "docstring": ":fields: str[]", "id": "f1089:c12:m1"} {"signature": "@staticmethoddef strip_cntrl(payload):", "body": "cntrl = struct.unpack('', payload)[]cntrl_bits = format(cntrl, '')[::-]ackpolicy = int(cntrl_bits[])multitid = int(cntrl_bits[])return ackpolicy, multitid", "docstring": "strip(2 byte) wlan.ba.control\n :payload: ctypes.structure\n :return: int\n multitid (tid: traffic indicator)\n :return: int\n ackpolicy", "id": "f1089:c12:m2"} {"signature": "@staticmethoddef strip_ssc(payload):", "body": "ssc = struct.unpack('', payload)[]ssc_seq = ssc >> ssc_frag = ssc & return ssc_seq, ssc_frag", "docstring": "strip(2 byte) wlan_mgt.fixed.ssc\n :payload: ctypes.structure\n :return: int\n ssc_seq (starting sequence control sequence)\n :return: int\n ssc_frag (starting sequence control fragment number)", "id": "f1089:c12:m3"} {"signature": "@staticmethoddef strip_bitmap_str(payload):", "body": "bitmap = struct.unpack('', payload)bitmap_str = ''for elem in bitmap:bitmap_str += format(elem, '')[::-]return bitmap_str", "docstring": "strip(8 byte) wlan.ba.bm\n :payload: ctypes.structure\n :return: str\n bitmap", "id": "f1089:c12:m4"} {"signature": "@staticmethoddef extract_acked_seqs(bitmap, ssc_seq):", "body": "acked_seqs = []for idx, val in enumerate(bitmap):if int(val) == :seq = (ssc_seq + idx) % acked_seqs.append(seq)return acked_seqs", "docstring": "extracts acknowledged sequences from bitmap and\n starting sequence number.\n :bitmap: str\n :ssc_seq: int\n :return: int[]\n acknowledged sequence numbers", "id": "f1089:c12:m5"} {"signature": "def parse_require(self, env, keys, defaults={}):", "body": "for k in keys:env[k] = getattr(self.options, k) or env.get(k, None)if env[k] is None:self.error(\"\"\"\" % (k, env))passfor k, v in defaults.items():env.setdefault(k, v)return env", "docstring": "check and get require config\n:param dict env: user config node\n:param list keys: check keys\n .. note::\n\n option and key name must be same.\n\n:param dict defaults: default value for keys\n:return: dict.env with verified.\n\n\n.. exception::\n\n will raise `ValueError` when some key missed.", "id": "f1101:c1:m0"} {"signature": "def command_list():", "body": "from cliez.conf import COMPONENT_ROOTroot = COMPONENT_ROOTif root is None:sys.stderr.write(\"\")sys.exit()passif not os.path.exists(root):sys.stderr.write(\"\")sys.exit()passtry:path = os.listdir(os.path.join(root, ''))return [f[:-] for f in path iff.endswith('') and f != '']except FileNotFoundError:return []", "docstring": "Get sub-command list\n\n.. note::\n\n Don't use logger handle this function errors.\n\n Because the error should be a code error,not runtime error.\n\n\n:return: `list` matched sub-parser", "id": "f1102:m0"} {"signature": "def append_arguments(klass, sub_parsers, default_epilog, general_arguments):", "body": "entry_name = hump_to_underscore(klass.__name__).replace('','')epilog = default_epilog if default_epilogelse ''''sub_parser = sub_parsers.add_parser(entry_name, help=klass.__doc__,epilog=epilog)sub_parser.description = klass.add_arguments.__doc__if hasattr(klass, ''):slot_args = klass.add_slot_args() or []for v in slot_args:sub_parser.add_argument(*v[], **v[])sub_parser.description = klass.add_slot_args.__doc__passuser_arguments = klass.add_arguments() or []for v in user_arguments:sub_parser.add_argument(*v[], **v[])if not klass.exclude_global_option:for v in general_arguments:sub_parser.add_argument(*v[], **v[])return sub_parser", "docstring": "Add class options to argparser options.\n\n:param cliez.component.Component klass: subclass of Component\n:param Namespace sub_parsers:\n:param str default_epilog: default_epilog\n:param list general_arguments: global options, defined by user\n:return: Namespace subparser", "id": "f1102:m1"} {"signature": "def parse(parser, argv=None, settings_key='', no_args_func=None):", "body": "argv = argv or sys.argvcommands = command_list()if type(argv) not in [list, tuple]:raise TypeError(\"\")if len(argv) >= and argv[] in commands:sub_parsers = parser.add_subparsers()class_name = argv[].capitalize() + ''from cliez.conf import (COMPONENT_ROOT,LOGGING_CONFIG,EPILOG,GENERAL_ARGUMENTS)sys.path.insert(, os.path.dirname(COMPONENT_ROOT))mod = importlib.import_module(''.format(os.path.basename(COMPONENT_ROOT),argv[]))klass = getattr(mod, class_name)sub_parser = append_arguments(klass, sub_parsers, EPILOG,GENERAL_ARGUMENTS)options = parser.parse_args(argv[:])settings = Settings.bind(getattr(options, settings_key)) if settings_key and hasattr(options, settings_key) else Noneobj = klass(parser, sub_parser, options, settings)logger_level = logging.CRITICALif hasattr(options, ''):if options.verbose == :logger_level = logging.ERRORelif options.verbose == :logger_level = logging.WARNINGelif options.verbose == :logger_level = logging.INFOobj.logger.setLevel(logging.INFO)passif hasattr(options, '') and options.debug:logger_level = logging.DEBUGtry:import http.client as http_clienthttp_client.HTTPConnection.debuglevel = except Exception:passpassloggers = LOGGING_CONFIG['']for k, v in loggers.items():v.setdefault('', logger_level)if logger_level in [logging.INFO, logging.DEBUG]:v[''] = ['']passlogging_config.dictConfig(LOGGING_CONFIG)obj.run(options)return objif not parser.description and len(commands):sub_parsers = parser.add_subparsers()[sub_parsers.add_parser(v) for v in commands]passpassoptions = parser.parse_args(argv[:])if no_args_func and callable(no_args_func):return no_args_func(options)else:parser._print_message(\"\")pass", "docstring": "parser cliez app\n\n:param argparse.ArgumentParser parser: an instance\n of argparse.ArgumentParser\n:param argv: argument list,default is `sys.argv`\n:type argv: list or tuple\n\n:param str settings: settings option name,\n default is settings.\n\n:param object no_args_func: a callable object.if no sub-parser matched,\n parser will call it.\n\n:return: an instance of `cliez.component.Component` or its subclass", "id": "f1102:m2"} {"signature": "def __init__(self, parser=None, sub_parser=None, options=None,settings=None,*args, **kwargs):", "body": "self.parser = parserself.sub_parser = sub_parserself.options = optionsself.settings = settingsif not self.logger_name:self.logger_name = ''.format(self.__class__.__name__.lower().replace('', ''))self.logger = logging.getLogger(self.logger_name)pass", "docstring": "Don't overwrite this method.\nIn most case,you should custom `run()` method", "id": "f1105:c0:m0"} {"signature": "def print_message(self, message, fh=None):", "body": "return self.parser._print_message(message + \"\", fh)", "docstring": "print message on screen\n\n:param str message:\n:param file fh: file handle,default is None\n:return: None", "id": "f1105:c0:m1"} {"signature": "def print_loading(self, wait, message):", "body": "tags = ['', '', '', '']for i in range(wait):time.sleep()sys.stdout.write(\"\" % {'': message,'': tags[i % ]})sys.stdout.flush()passsys.stdout.write(\"\" % message)sys.stdout.flush()pass", "docstring": "print loading message on screen\n\n.. note::\n\n loading message only write to `sys.stdout`\n\n\n:param int wait: seconds to wait\n:param str message: message to print\n:return: None", "id": "f1105:c0:m2"} {"signature": "def warn_message(self, message, fh=None, prefix=\"\", suffix=\"\"):", "body": "msg = prefix + message + suffixfh = fh or sys.stdoutif fh is sys.stdout:termcolor.cprint(msg, color=\"\")else:fh.write(msg)pass", "docstring": "print warn type message,\nif file handle is `sys.stdout`, print color message\n\n\n:param str message: message to print\n:param file fh: file handle,default is `sys.stdout`\n:param str prefix: message prefix,default is `[warn]`\n:param str suffix: message suffix ,default is `...`\n:return: None", "id": "f1105:c0:m3"} {"signature": "def warn(self, *args, **kwargs):", "body": "return self.warn_message(*args, **kwargs)", "docstring": "alias for `warn_message`", "id": "f1105:c0:m4"} {"signature": "def error_message(self, message, fh=None, prefix=\"\",suffix=\"\"):", "body": "msg = prefix + message + suffixfh = fh or sys.stderrif fh is sys.stderr:termcolor.cprint(msg, color=\"\")else:fh.write(msg)pass", "docstring": "print error type message\nif file handle is `sys.stderr`, print color message\n\n:param str message: message to print\n:param file fh: file handle, default is `sys.stdout`\n:param str prefix: message prefix,default is `[error]`\n:param str suffix: message suffix ,default is '...'\n:return: None", "id": "f1105:c0:m5"} {"signature": "def error(self, message=None, exit_code=):", "body": "return self.parser.exit(exit_code, message)", "docstring": "print error message and exit.\nthis method call `argparser.exit` to exit.\n\n:param str message: message to print\n:param int exit_code: exit code,default is 2\n\n:return: None", "id": "f1105:c0:m6"} {"signature": "def system(self, cmd, fake_code=False):", "body": "try:if self.options.dry_run:def fake_system(cmd):self.print_message(cmd)return fake_codereturn fake_system(cmd)except AttributeError:self.logger.warnning(\"\"\"\"\"\")passreturn os.system(cmd)", "docstring": "a built-in wrapper make dry-run easier.\nyou should use this instead use `os.system`\n\n.. note::\n\n to use it,you need add '--dry-run' option in\n your argparser options\n\n\n:param str cmd: command to execute\n:param bool fake_code: only display command\n when is True,default is False\n:return:", "id": "f1105:c0:m7"} {"signature": "@staticmethoddef load_resource(path, root=''):", "body": "if root:full_path = root + '' + path.strip('')else:full_path = pathbuf = ''try:buf = open(full_path).read()except IOError:pkg, path = full_path.split('', )try:import pkg_resourcesbuf = pkg_resources.resource_string(pkg, path)if type(buf) != str:buf = buf.decode('')passexcept AttributeError:passreturn buf", "docstring": ".. warning::\n\n Experiment feature.\n\n BE CAREFUL! WE MAY REMOVE THIS FEATURE!\n\n\nload resource file which in package.\nthis method is used to load file easier in different environment.\n\ne.g:\n\nconsume we have a file named `resource.io` in package `cliez.conf`,\nand we want to load it.\nthe easiest way may like this:\n\n.. code-block:: python\n\n open('../conf/resource.io').read()\n\n\nAn obvious problem is `..` is relative path.\nit will cause an error.\n\n`load_resource` is designed for solve this problem.\n\n\nThe following code are equivalent:\n\n.. code-block:: python\n\n a = Component()\n a.load_resource('resource.io', root='cliez/base')\n a.load_resource('base/resource.io', root='cliez')\n a.load_resource('/base/resource.io', root='cliez')\n a.load_resource('cliez/base/resource.io')\n a.load_resource(__file__.rsplit('/', 2)[0] +\n '/cliez/base/resource.io')\n\n\n.. note::\n\n The document charset *MUST BE* utf-8\n\n\n:param str path: file path\n:param str root: root path\n\n:return: str", "id": "f1105:c0:m8"} {"signature": "@staticmethoddef load_description(name, root=''):", "body": "desc = ''try:desc = Component.load_resource(name, root=root)except (IOError, ImportError):passreturn desc", "docstring": ".. warning::\n\n Experiment feature.\n\n BE CAREFUL! WE MAY REMOVE THIS FEATURE!\n\n\nLoad resource file as description,\nif resource file not exist,will return empty string.\n\n:param str path: name resource path\n:param str root: same as `load_resource` root\n:return: `str`", "id": "f1105:c0:m9"} {"signature": "@classmethoddef add_arguments(cls):", "body": "pass", "docstring": "Sub-Command Document Write At Here.", "id": "f1105:c0:m10"} {"signature": "@classmethoddef add_arguments(cls):", "body": "return [(('',), dict(help='')),(('',), dict(nargs='', default='', help='')),(('',),dict(action='', help='')),(('',),dict(action='', help='')),]pass", "docstring": "Create project.\n\nBy default cliez find github first,\nif not found,then try to search bitbucket\n\nif user define `--local` option. search local path first.\n\nif user define `--bitbucket`, search bitbucket first,\nthen search github.", "id": "f1106:c0:m1"} {"signature": "def load_gitconfig(self):", "body": "gitconfig_path = os.path.expanduser('')if os.path.exists(gitconfig_path):parser = Parser()parser.read(gitconfig_path)parser.sections()return parserpass", "docstring": "try use gitconfig info.\nauthor,email etc.", "id": "f1107:c0:m0"} {"signature": "def render(self, match_string, new_string):", "body": "current_dir = self.options.dirif os.path.expanduser(current_dir) in ['', os.path.expanduser(\"\")]:self.error(\"\", -)passdef match_directory(path):\"\"\"\"\"\"skip = Falsefor include_dir in ['' % s for s inself.exclude_directories]:if path.find(include_dir) > -:skip = Truebreakpassreturn skipfor v in os.walk(current_dir):if os.path.basename(v[]) in self.exclude_directories:continueif match_directory(v[]):continuefor base_name in v[]:file_name = os.path.join(v[], base_name)try:with open(file_name, '') as fh:buffer = fh.read()buffer = buffer.replace(match_string, new_string)passwith open(file_name, '') as fh:fh.write(buffer)passexcept UnicodeDecodeError:continuepasspassredo_directories = []redo_files = []for v in os.walk(current_dir):if os.path.basename(v[]) in self.exclude_directories:continueif match_directory(v[]):continuefor sub_dir in v[]:if match_string in sub_dir:redo_directories.append(os.path.join(v[], sub_dir))passfor f in v[]:if match_string in f:redo_files.append(os.path.join(v[], f))passpassredo_directories.reverse()redo_files.reverse()for v in redo_files:dir_name = os.path.dirname(v)file_name = os.path.basename(v)shutil.move(v, os.path.join(dir_name,file_name.replace(match_string, new_string)))passfor v in redo_directories:shutil.move(v, v.replace(match_string, new_string))passpass", "docstring": "render template string to user string\n:param str match_string: template string,syntax: '___VAR___'\n:param str new_string: user string\n:return:", "id": "f1107:c0:m1"} {"signature": "@classmethoddef add_arguments(cls):", "body": "return [(('',), dict(action='', help='')),(('', ''),dict(nargs='', help='')),(('',),dict(action='', help='')),]", "docstring": "Init project.", "id": "f1107:c0:m9"} {"signature": "def run(self, options):", "body": "self.logger.debug(\"\")depends = ['']nil_tools = []self.logger.info(\"\", depends)for v in depends:real_path = shutil.which(v)if real_path:self.print_message(\"\"\"\".format(v,real_path,termcolor.colored('',color='')))else:nil_tools.append(v)self.error_message(''.format(v, ''), prefix='',suffix='')passif nil_tools:self.print_message('')self.error(\"\")else:self.print_message(\"\"\"\")self.logger.debug(\"\")passpass", "docstring": ".. todo::\n\n check network connection\n\n:param Namespace options: parse result from argparse\n:return:", "id": "f1108:c0:m0"} {"signature": "@classmethoddef add_arguments(cls):", "body": "return [(('',), dict(action='', help='')),(('', ''), dict(action='')),]", "docstring": "check cliez depends envrioument.", "id": "f1108:c0:m1"} {"signature": "def include_file(filename, global_vars=None, local_vars=None):", "body": "if global_vars is None:global_vars = sys._getframe().f_globalsif local_vars is None:local_vars = sys._getframe().f_localswith open(filename, '') as f:code = compile(f.read(), os.path.basename(filename), '')exec(code, global_vars, local_vars)pass", "docstring": ".. deprecated 2.1::\n Don't use this any more.\n\n It's not pythonic.\n\n\ninclude file like php include.\n\ninclude is very useful when we need to split large config file", "id": "f1110:m0"} {"signature": "def hump_to_underscore(name):", "body": "new_name = ''pos = for c in name:if pos == :new_name = c.lower()elif <= ord(c) <= :new_name += '' + c.lower()passelse:new_name += cpos += passreturn new_name", "docstring": "Convert Hump style to underscore\n\n:param name: Hump Character\n:return: str", "id": "f1110:m1"} {"signature": "def settings(path=None, with_path=None):", "body": "if path:Settings.bind(path, with_path=with_path)return Settings._wrapped", "docstring": "Get or set `Settings._wrapped`\n\n:param str path: a python module file,\n if user set it,write config to `Settings._wrapped`\n:param str with_path: search path\n:return: A instance of `Settings`", "id": "f1111:m0"} {"signature": "@staticmethoddef bind(mod_path, with_path=None):", "body": "if with_path:if os.path.isdir(with_path):sys.path.insert(, with_path)else:sys.path.insert(, with_path.rsplit('', )[])passmod = importlib.import_module(mod_path)settings = Settings()for v in dir(mod):if v[] == '' or type(getattr(mod, v)).__name__ == '':continuesetattr(settings, v, getattr(mod, v))passSettings._path = mod_pathSettings._wrapped = settingsreturn settings", "docstring": "bind user variable to `_wrapped`\n\n.. note::\n\n you don't need call this method by yourself.\n\n program will call it in `cliez.parser.parse`\n\n\n.. expection::\n\n if path is not correct,will cause an `ImportError`\n\n\n:param str mod_path: module path, *use dot style,'mod.mod1'*\n:param str with_path: add path to `sys.path`,\n if path is file,use its parent.\n:return: A instance of `Settings`", "id": "f1111:c0:m0"} {"signature": "def set_signal(self):", "body": "def signal_handle(signum, frame):self.error(\"\")sys.exit(-)passsignal.signal(signal.SIGINT, signal_handle)pass", "docstring": "\u8bbe\u7f6e\u4fe1\u53f7\u5904\u7406\n\n\u9ed8\u8ba4\u76f4\u63a5\u4e2d\u65ad,\u8003\u8651\u5230\u4e00\u4e9b\u573a\u666f\u7528\u6237\u9700\u8981\u7b49\u5f85\u7ebf\u7a0b\u7ed3\u675f.\n\u53ef\u5728\u6b64\u81ea\u5b9a\u4e49\u64cd\u4f5c\n\n:return:", "id": "f1112:c0:m0"} {"signature": "def check_exclusive_mode(self):", "body": "if self.options.exclusive_mode:import psutilcurrent_pid = os.getpid()current = psutil.Process(current_pid).cmdline()for pid in psutil.pids():p = psutil.Process(pid)try:if current_pid != pid and current == p.cmdline():self.error_message(\"\".format(p.pid))sys.exit(-)passexcept psutil.ZombieProcess:passexcept psutil.AccessDenied:passpasspasspass", "docstring": "\u68c0\u67e5\u662f\u5426\u662f\u72ec\u5360\u6a21\u5f0f\n\n\u53c2\u6570\u987a\u5e8f\u5fc5\u987b\u4e00\u81f4,\u4e5f\u5c31\u662f\u8bf4\u5982\u679c\u53c2\u6570\u987a\u5e8f\u4e0d\u4e00\u81f4,\u5219\u5224\u5b9a\u4e3a\u662f\u4e24\u4e2a\u4e0d\u540c\u7684\u8fdb\u7a0b\n\u8fd9\u4e48\u8bbe\u8ba1\u662f\u8003\u8651\u5230:\n\n- \u4e00\u822c\u800c\u8a00,\u6392\u4ed6\u6a21\u5f0f\u7684\u670d\u52a1\u542f\u52a8\u90fd\u662fcrontab\u7b49\u811a\u672c\u6765\u5b8c\u6210\u7684,\u4e0d\u5b58\u5728\u987a\u5e8f\u53d8\u66f4\u7684\u53ef\u80fd\n- \u8fd9\u5728\u8c03\u8bd5\u7684\u65f6\u5019\u53ef\u4ee5\u5e2e\u52a9\u6211\u4eec\u4e0d\u9700\u8981\u7ed3\u675f\u539f\u6709\u8fdb\u7a0b\u5c31\u53ef\u4ee5\u7ee7\u7eed\u8c03\u8bd5\n\n:return:", "id": "f1112:c0:m1"} {"signature": "def run(self, options):", "body": "self.set_signal()self.check_exclusive_mode()slot = self.Handle(self)i = while i < options.threads:t = threading.Thread(target=self.worker, args=[slot])if options.once is True or options.no_daemon is True:t.daemon = Falseelse:t.daemon = Truet.start()i += if options.once is False:while True:if threading.active_count() > :sleep()else:if threading.current_thread().name == \"\":sys.exit()pass", "docstring": "In general, you don't need to overwrite this method.\n\n:param options:\n:return:", "id": "f1112:c0:m2"} {"signature": "async def join_rtm(self, filters=None):", "body": "if filters is None:filters = [cls(self) for cls in self.MESSAGE_FILTERS]url = await self._get_socket_url()logger.debug('', url)async with ws_connect(url) as socket:first_msg = await socket.receive()self._validate_first_message(first_msg)self.socket = socketasync for message in socket:if message.tp == MsgType.text:await self.handle_message(message, filters)elif message.tp in (MsgType.closed, MsgType.error):if not socket.closed:await socket.close()self.socket = Nonebreaklogger.info('')", "docstring": "Join the real-time messaging service.\n\n Arguments:\n filters (:py:class:`dict`, optional): Dictionary mapping\n message filters to the functions they should dispatch to.\n Use a :py:class:`collections.OrderedDict` if precedence is\n important; only one filter, the first match, will be\n applied to each message.", "id": "f1114:c0:m1"} {"signature": "async def handle_message(self, message, filters):", "body": "data = self._unpack_message(message)logger.debug(data)if data.get('') == '':raise SlackApiError(data.get('', {}).get('', str(data)))elif self.message_is_to_me(data):text = data[''][len(self.address_as):].strip()if text == '':return self._respond(channel=data[''],text=self._instruction_list(filters),)elif text == '':return self._respond(channel=data[''],text=self.VERSION,)for _filter in filters:if _filter.matches(data):logger.debug('')async for response in _filter:self._respond(channel=data[''], text=response)", "docstring": "Handle an incoming message appropriately.\n\n Arguments:\n message (:py:class:`aiohttp.websocket.Message`): The incoming\n message to handle.\n filters (:py:class:`list`): The filters to apply to incoming\n messages.", "id": "f1114:c0:m2"} {"signature": "def message_mentions_me(self, data):", "body": "return (data.get('') == '' andself.full_name in data.get('', ''))", "docstring": "If you send a message that mentions me", "id": "f1114:c0:m3"} {"signature": "def message_is_to_me(self, data):", "body": "return (data.get('') == '' anddata.get('', '').startswith(self.address_as))", "docstring": "If you send a message directly to me", "id": "f1114:c0:m4"} {"signature": "@classmethodasync def from_api_token(cls, token=None, api_cls=SlackBotApi):", "body": "api = api_cls.from_env() if token is None else api_cls(api_token=token)data = await api.execute_method(cls.API_AUTH_ENDPOINT)return cls(data[''], data[''], api)", "docstring": "Create a new instance from the API token.\n\n Arguments:\n token (:py:class:`str`, optional): The bot's API token\n (defaults to ``None``, which means looking in the\n environment).\n api_cls (:py:class:`type`, optional): The class to create\n as the ``api`` argument for API access (defaults to\n :py:class:`aslack.slack_api.SlackBotApi`).\n\n Returns:\n :py:class:`SlackBot`: The new instance.", "id": "f1114:c0:m5"} {"signature": "def _format_message(self, channel, text):", "body": "payload = {'': '', '': next(self._msg_ids)}payload.update(channel=channel, text=text)return json.dumps(payload)", "docstring": "Format an outgoing message for transmission.\n\n Note:\n Adds the message type (``'message'``) and incremental ID.\n\n Arguments:\n channel (:py:class:`str`): The channel to send to.\n text (:py:class:`str`): The message text to send.\n\n Returns:\n :py:class:`str`: The JSON string of the message.", "id": "f1114:c0:m6"} {"signature": "async def _get_socket_url(self):", "body": "data = await self.api.execute_method(self.RTM_START_ENDPOINT,simple_latest=True,no_unreads=True,)return data['']", "docstring": "Get the WebSocket URL for the RTM session.\n\n Warning:\n The URL expires if the session is not joined within 30\n seconds of the API call to the start endpoint.\n\n Returns:\n :py:class:`str`: The socket URL.", "id": "f1114:c0:m7"} {"signature": "def _instruction_list(self, filters):", "body": "return ''.join([self.INSTRUCTIONS.strip(),'',''''.format(self.user),''''.format(self.user),] + [filter.description() for filter in filters])", "docstring": "Generates the instructions for a bot and its filters.\n\n Note:\n The guidance for each filter is generated by combining the\n docstrings of the predicate filter and resulting dispatch\n function with a single space between. The class's\n :py:attr:`INSTRUCTIONS` and the default help command are\n added.\n\n Arguments:\n filters (:py:class:`list`): The filters to apply to incoming\n messages.\n\n Returns:\n :py:class:`str`: The bot's instructions.", "id": "f1114:c0:m8"} {"signature": "def _respond(self, channel, text):", "body": "result = self._format_message(channel, text)if result is not None:logger.info('',truncate(result, max_len=),)self.socket.send_str(result)", "docstring": "Respond to a message on the current socket.\n\n Args:\n channel (:py:class:`str`): The channel to send to.\n text (:py:class:`str`): The message text to send.", "id": "f1114:c0:m9"} {"signature": "@staticmethoddef _unpack_message(msg):", "body": "return json.loads(msg.data)", "docstring": "Unpack the data from the message.\n\n Arguments:\n msg (:py:class:`aiohttp.websocket.Message`): The message to\n unpack.\n\n Returns:\n :py:class:`dict`: The loaded data.\n\n Raises:\n :py:class:`AttributeError`: If there is no data attribute.\n :py:class:`json.JSONDecodeError`: If the data isn't valid\n JSON.", "id": "f1114:c0:m10"} {"signature": "@classmethoddef _validate_first_message(cls, msg):", "body": "data = cls._unpack_message(msg)logger.debug(data)if data != cls.RTM_HANDSHAKE:raise SlackApiError(''.format(data))logger.info('')", "docstring": "Check the first message matches the expected handshake.\n\n Note:\n The handshake is provided as :py:attr:`RTM_HANDSHAKE`.\n\n Arguments:\n msg (:py:class:`aiohttp.Message`): The message to validate.\n\n Raises:\n :py:class:`SlackApiError`: If the data doesn't match the\n expected handshake.", "id": "f1114:c0:m11"} {"signature": "def description(self):", "body": "if self._description is None:text = ''.join(self.__doc__.splitlines()[:]).strip()lines = []for line in map(str.strip, text.splitlines()):if line and lines:lines[-] = ''.join((lines[-], line))elif line:lines.append(line)else:lines.append('')self._description = ''.join(lines)return self._description", "docstring": "A user-friendly description of the handler.\n\n Returns:\n :py:class:`str`: The handler's description.", "id": "f1116:c0:m3"} {"signature": "def matches(self, data):", "body": "self.text = data.get('')return True", "docstring": "Whether the handler should handle the current message.\n\n Args:\n data: The data representing the current message.\n\n Returns:\n :py:class:`bool`: Whether it should be handled.", "id": "f1116:c0:m4"} {"signature": "def api_subclass_factory(name, docstring, remove_methods, base=SlackApi):", "body": "methods = deepcopy(base.API_METHODS)for parent, to_remove in remove_methods.items():if to_remove is ALL:del methods[parent]else:for method in to_remove:del methods[parent][method]return type(name, (base,), dict(API_METHODS=methods, __doc__=docstring))", "docstring": "Create an API subclass with fewer methods than its base class.\n\n Arguments:\n name (:py:class:`str`): The name of the new class.\n docstring (:py:class:`str`): The docstring for the new class.\n remove_methods (:py:class:`dict`): The methods to remove from\n the base class's :py:attr:`API_METHODS` for the subclass. The\n key is the name of the root method (e.g. ``'auth'`` for\n ``'auth.test'``, the value is either a tuple of child method\n names (e.g. ``('test',)``) or, if all children should be\n removed, the special value :py:const:`ALL`.\n base (:py:class:`type`, optional): The base class (defaults to\n :py:class:`SlackApi`).\n\n Returns:\n :py:class:`type`: The new subclass.\n\n Raises:\n :py:class:`KeyError`: If the method wasn't in the superclass.", "id": "f1117:m0"} {"signature": "async def execute_method(self, method, **params):", "body": "url = self.url_builder(method, url_params=params)logger.info('', method)response = await aiohttp.get(url)logger.info('', response.status)if response.status == :json = await response.json()logger.debug('', json)if json.get(''):return jsonraise SlackApiError(json[''])else:raise_for_status(response)", "docstring": "Execute a specified Slack Web API method.\n\n Arguments:\n method (:py:class:`str`): The name of the method.\n **params (:py:class:`dict`): Any additional parameters\n required.\n\n Returns:\n :py:class:`dict`: The JSON data from the response.\n\n Raises:\n :py:class:`aiohttp.web_exceptions.HTTPException`: If the HTTP\n request returns a code other than 200 (OK).\n SlackApiError: If the Slack API is reached but the response\n contains an error message.", "id": "f1117:c1:m0"} {"signature": "@classmethoddef method_exists(cls, method):", "body": "methods = cls.API_METHODSfor key in method.split(''):methods = methods.get(key)if methods is None:breakif isinstance(methods, str):logger.debug('', method, methods)return Truereturn False", "docstring": "Whether a given method exists in the known API.\n\n Arguments:\n method (:py:class:`str`): The name of the method.\n\n Returns:\n :py:class:`bool`: Whether the method is in the known API.", "id": "f1117:c1:m1"} {"signature": "@propertydef headers(self):", "body": "return {}", "docstring": "Get the headers for the service requests.\n\n Returns:\n :py:class:`dict`: The header mapping.", "id": "f1119:c0:m1"} {"signature": "def url_builder(self, endpoint, *, root=None, params=None, url_params=None):", "body": "if root is None:root = self.ROOTscheme, netloc, path, _, _ = urlsplit(root)return urlunsplit((scheme,netloc,urljoin(path, endpoint),urlencode(url_params or {}),'',)).format(**params or {})", "docstring": "Create a URL for the specified endpoint.\n\n Arguments:\n endpoint (:py:class:`str`): The API endpoint to access.\n root: (:py:class:`str`, optional): The root URL for the\n service API.\n params: (:py:class:`dict`, optional): The values for format\n into the created URL (defaults to ``None``).\n url_params: (:py:class:`dict`, optional): Parameters to add\n to the end of the URL (defaults to ``None``).\n\n Returns:\n :py:class:`str`: The resulting URL.", "id": "f1119:c0:m2"} {"signature": "@classmethoddef from_env(cls):", "body": "token = getenv(cls.TOKEN_ENV_VAR)if token is None:msg = ''.format(cls.TOKEN_ENV_VAR)raise ValueError(msg)return cls(api_token=token)", "docstring": "Create a service instance from an environment variable.", "id": "f1119:c1:m1"} {"signature": "def url_builder(self, endpoint, params=None, url_params=None):", "body": "if url_params is None:url_params = OrderedDict()url_params[self.AUTH_PARAM] = self.api_tokenreturn super().url_builder(endpoint,params=params,url_params=url_params,)", "docstring": "Add authentication URL parameter.", "id": "f1119:c2:m0"} {"signature": "def raise_for_status(response):", "body": "for err_name in web_exceptions.__all__:err = getattr(web_exceptions, err_name)if err.status_code == response.status:payload = dict(headers=response.headers,reason=response.reason,)if issubclass(err, web_exceptions._HTTPMove): raise err(response.headers[''], **payload)raise err(**payload)", "docstring": "Raise an appropriate error for a given response.\n\n Arguments:\n response (:py:class:`aiohttp.ClientResponse`): The API response.\n\n Raises:\n :py:class:`aiohttp.web_exceptions.HTTPException`: The appropriate\n error for the response's status.", "id": "f1120:m0"} {"signature": "def truncate(text, max_len=, end=''):", "body": "if len(text) <= max_len:return textreturn text[:max_len].rsplit('', maxsplit=)[] + end", "docstring": "Truncate the supplied text for display.\n\n Arguments:\n text (:py:class:`str`): The text to truncate.\n max_len (:py:class:`int`, optional): The maximum length of the\n text before truncation (defaults to 350 characters).\n end (:py:class:`str`, optional): The ending to use to show that\n the text was truncated (defaults to ``'...'``).\n\n Returns:\n :py:class:`str`: The truncated text.", "id": "f1120:m1"} {"signature": "def connect(self, axis0, n0_index, source_angle, axis1, n1_index, target_angle, **kwargs):", "body": "n0 = axis0.nodes[n0_index]n1 = axis1.nodes[n1_index]pth = self.dwg.path(d=\"\" % (n0.x, n0.y), fill='', **kwargs) alfa = axis0.angle() + radians(source_angle)length = sqrt( ((n0.x - axis0.start[])**) + ((n0.y-axis0.start[])**)) x = axis0.start[] + length * cos(alfa);y = axis0.start[] + length * sin(alfa);pth.push(\"\" % (x, y)) alfa = axis1.angle() + radians(target_angle)length = sqrt( ((n1.x - axis1.start[])**) + ((n1.y-axis1.start[])**)) x = axis1.start[] + length * cos(alfa);y = axis1.start[] + length * sin(alfa);pth.push(\"\" % (x, y)) pth.push(\"\" % (n1.x, n1.y)) self.dwg.add(pth)", "docstring": "Draw edges as B\u00e9zier curves.\n\n Start and end points map to the coordinates of the given nodes\n which in turn are set when adding nodes to an axis with the\n Axis.add_node() method, by using the placement information of\n the axis and a specified offset from its start point.\n\n Control points are set at the same distance from the start (or end)\n point of an axis as their corresponding nodes, but along an invisible\n axis that shares its origin but diverges by a given angle.\n\n\n Parameters\n ----------\n axis0 : source Axis object\n n0_index : key of source node in nodes dictionary of axis0\n source_angle : angle of departure for invisible axis that diverges from axis0 and holds first control points\n axis1 : target Axis object\n n1_index : key of target node in nodes dictionary of axis1\n target_angle : angle of departure for invisible axis that diverges from axis1 and holds second control points\n kwargs : extra SVG attributes for path element, optional\n Set or change attributes using key=value", "id": "f1134:c0:m2"} {"signature": "def __init__( self, start=(,), end=(,), **kwargs):", "body": "self.start = startself.end = endself.nodes = {}self.dwg = svgwrite.Drawing()self.attrs = kwargs", "docstring": "Initialize Axis object with start, end positions and optional SVG attributes\n\n Parameters\n ----------\n start : ( x, y ) start point of the axis\n end : (x1, y1) end point of the axis\n kwargs : extra SVG attributes for line element, optional\n Set or change attributes using key=value\n\n Example\n -------\n >>> axis0 = Axis( (150, 200), # start\n (150, 0), # end\n stroke=\"black\", stroke_width=1.5) # pass SVG attributes of axes", "id": "f1134:c1:m0"} {"signature": "def add_node(self, node, offset):", "body": "width = self.end[] - self.start[]height = self.end[] - self.start[] node.x = self.start[] + (width * offset)node.y = self.start[] + (height * offset)self.nodes[node.ID] = node", "docstring": "Add a Node object to nodes dictionary, calculating its coordinates using offset\n\n Parameters\n ----------\n node : a Node object\n offset : float \n number between 0 and 1 that sets the distance\n from the start point at which the node will be placed", "id": "f1134:c1:m1"} {"signature": "def __init__(self, ID):", "body": "self.ID = IDself.x = self.y = self.dwg = svgwrite.Drawing()", "docstring": "Parameters\n----------\nID: a unique key for the nodes dict of an axis.", "id": "f1134:c2:m0"} {"signature": "def notify(self, message, title=\"\", **kwargs):", "body": "logger.info(\"\".format(title, message))try:data = {'': self._token,'': self._user,'': title,'': message,}data.update(kwargs)payload = []for (k, v) in data.items():if isinstance(v, str_type):payload.append((k, v.encode(\"\")))else:payload.append((k, v))headers = {\"\": \"\",}conn = HTTPSConnection(\"\")params = urlencode(payload)conn.request(\"\", \"\", params, headers)rsp = conn.getresponse()if rsp.status != :raise PushoverException(\"\".format(rsp.status))conn.close()except Exception as e:raise PushoverException(\"\".format(e))", "docstring": "priority (-2 -1 0 1 2)\nsound (bike,bugle,cashregister,classical,cosmic,falling,gamelan,\n incoming,intermission,magic,mechanical,pianobar,siren,spacealarm,\n tugboat,alien,climb,persistent,echo,updown,none)", "id": "f1136:c1:m1"} {"signature": "def get_secrets(prefixes, relative_paths):", "body": "try:return os.path.join(sys._MEIPASS, relative_paths[-])except Exception:for prefix in prefixes:for relative_path in relative_paths:path = os.path.join(prefix, relative_path)if os.path.exists(path):return pathelse:return None", "docstring": "Taken from https://github.com/tokland/youtube-upload/blob/master/youtube_upload/main.py\nGet the first existing filename of relative_path seeking on prefixes directories.", "id": "f1140:m6"} {"signature": "def __button_action(self, data=None):", "body": "if any(not x for x in (self._ename.value, self._p1.value, self._p2.value, self._file.value)):print(\"\")returnself.__p1chars = []self.__p2chars = []options = Namespace()self.__history.append(self.__save_form())options.ename = self._ename.valueif self._ename_min.value:options.ename_min = self._ename_min.valueelse:options.ename_min = options.enameoptions.pID = self._pID.valueoptions.mtype = self._mtype.valueoptions.mmid = options.mtypeoptions.p1 = self._p1.valueoptions.p2 = self._p2.valueoptions.p1char = self._p1char.valueoptions.p2char = self._p2char.valueoptions.bracket = self._bracket.valueisadir = os.path.isdir(self._file.value)if isadir:options.file = max([os.path.join(self._file.value, f) for f in os.listdir(self._file.value) if os.path.isfile(os.path.join(self._file.value, f))], key=os.path.getmtime)else:options.file = self._file.valueoptions.tags = self._tags.valueoptions.msuffix = self._msuffix.valueoptions.mprefix = self._mprefix.valueoptions.privacy = self._privacy.valueoptions.descrip = self._description.valueoptions.titleformat = self._titleformat.valueif self._p1sponsor.value:options.p1 = \"\".join((self._p1sponsor.value, options.p1))if self._p2sponsor.value:options.p2 = \"\".join((self._p2sponsor.value, options.p2))options.ignore = Falseself.__reset_match(False, isadir)self.__add_to_qview(options)self._queueref.append(options)if consts.firstrun:thr = threading.Thread(target=self.__worker)thr.daemon = Truethr.start()consts.firstrun = False", "docstring": "Button action event", "id": "f1142:c3:m1"} {"signature": "def resorted(values):", "body": "if not values:return valuesvalues = sorted(values)first_word = next((cnt for cnt, val in enumerate(values)if val and not val[].isdigit()),None)if first_word is None:return valueswords = values[first_word:]numbers = values[:first_word]return words + numbers", "docstring": "Sort values, but put numbers after alphabetically sorted words.\n\nThis function is here to make outputs diff-compatible with Aleph.\n\nExample::\n >>> sorted([\"b\", \"1\", \"a\"])\n ['1', 'a', 'b']\n >>> resorted([\"b\", \"1\", \"a\"])\n ['a', 'b', '1']\n\nArgs:\n values (iterable): any iterable object/list/tuple/whatever.\n\nReturns:\n list of sorted values, but with numbers after words", "id": "f1159:m0"} {"signature": "def __init__(self, xml=None, resort=True):", "body": "self.leader = Noneself.oai_marc = Falseself.controlfields = OrderedDict()self.datafields = OrderedDict()self.valid_i_chars = set(list(\"\"))self.resorted = tools.resorted if resort else lambda x: xif hasattr(xml, \"\"):xml = xml.read()if xml is not None:self._original_xml = xmlself._parse_string(xml)", "docstring": "Constructor.\n\nArgs:\n xml (str/file, default None): XML to be parsed. May be file-like\n object.\n resort (bool, default True): Sort the output alphabetically?", "id": "f1161:c0:m0"} {"signature": "def _parse_string(self, xml):", "body": "if not isinstance(xml, HTMLElement):xml = dhtmlparser.parseString(str(xml))record = xml.find(\"\")if not record:raise ValueError(\"\")record = record[]self.oai_marc = len(record.find(\"\")) > if not self.oai_marc:leader = record.find(\"\")if len(leader) >= :self.leader = leader[].getContent()if self.oai_marc:self._parse_control_fields(record.find(\"\"), \"\")self._parse_data_fields(record.find(\"\"), \"\", \"\")else:self._parse_control_fields(record.find(\"\"), \"\")self._parse_data_fields(record.find(\"\"), \"\", \"\")if self.oai_marc and \"\" in self.controlfields:self.leader = self.controlfields[\"\"]", "docstring": "Parse MARC XML document to dicts, which are contained in\nself.controlfields and self.datafields.\n\nArgs:\n xml (str or HTMLElement): input data\n\nAlso detect if this is oai marc format or not (see elf.oai_marc).", "id": "f1161:c0:m1"} {"signature": "def _parse_control_fields(self, fields, tag_id=\"\"):", "body": "for field in fields:params = field.paramsif tag_id not in params:continueself.controlfields[params[tag_id]] = field.getContent().strip()", "docstring": "Parse control fields.\n\nArgs:\n fields (list): list of HTMLElements\n tag_id (str): parameter name, which holds the information, about\n field name this is normally \"tag\", but in case of\n oai_marc \"id\".", "id": "f1161:c0:m2"} {"signature": "def _parse_data_fields(self, fields, tag_id=\"\", sub_id=\"\"):", "body": "for field in fields:params = field.paramsif tag_id not in params:continuefield_repr = OrderedDict([[self.i1_name, params.get(self.i1_name, \"\")],[self.i2_name, params.get(self.i2_name, \"\")],])for subfield in field.find(\"\"):if sub_id not in subfield.params:continuecontent = MARCSubrecord(val=subfield.getContent().strip(),i1=field_repr[self.i1_name],i2=field_repr[self.i2_name],other_subfields=field_repr)code = subfield.params[sub_id]if code in field_repr:field_repr[code].append(content)else:field_repr[code] = [content]tag = params[tag_id]if tag in self.datafields:self.datafields[tag].append(field_repr)else:self.datafields[tag] = [field_repr]", "docstring": "Parse data fields.\n\nArgs:\n fields (list): of HTMLElements\n tag_id (str): parameter name, which holds the information, about\n field name this is normally \"tag\", but in case of\n oai_marc \"id\"\n sub_id (str): id of parameter, which holds informations about\n subfield name this is normally \"code\" but in case of\n oai_marc \"label\"", "id": "f1161:c0:m3"} {"signature": "def add_ctl_field(self, name, value):", "body": "if len(name) != :raise ValueError(\"\")self.controlfields[name] = value", "docstring": "Add new control field `value` with under `name` into control field\ndictionary :attr:`controlfields`.", "id": "f1161:c0:m4"} {"signature": "def add_data_field(self, name, i1, i2, subfields_dict):", "body": "if i1 not in self.valid_i_chars:raise ValueError(\"\" + i1 + \"\")if i2 not in self.valid_i_chars:raise ValueError(\"\" + i2 + \"\")if len(name) != :raise ValueError(\"\")if not subfields_dict:raise ValueError(\"\")if not isinstance(subfields_dict, dict):raise ValueError(\"\")subrecords = []for key, val in subfields_dict.items():if len(key) > :raise KeyError(\"\")if not isinstance(val, list):val = [val]subfields = map(lambda x: MARCSubrecord(x, i1, i2, None),val)subfields_dict[key] = subfieldssubrecords.extend(subfields)subfields_dict[self.i1_name] = i1subfields_dict[self.i2_name] = i2if name in self.datafields:self.datafields[name].append(subfields_dict)else:self.datafields[name] = [subfields_dict]other_subfields = self.datafields[name]for record in subrecords:record.other_subfields = other_subfields", "docstring": "Add new datafield into :attr:`datafields` and take care of OAI MARC\ndifferencies.\n\nArgs:\n name (str): Name of datafield.\n i1 (char): Value of i1/ind1 parameter.\n i2 (char): Value of i2/ind2 parameter.\n subfields_dict (dict): Dictionary containing subfields (as list).\n\n`subfields_dict` is expected to be in this format::\n\n {\n \"field_id\": [\"subfield data\",],\n ...\n \"z\": [\"X0456b\"]\n }\n\nWarning:\n For your own good, use OrderedDict for `subfields_dict`, or\n constructor's `resort` parameter set to ``True`` (it is by\n default).\n\nWarning:\n ``field_id`` can be only one character long!", "id": "f1161:c0:m5"} {"signature": "def get_i_name(self, num, is_oai=None):", "body": "if num not in (, ):raise ValueError(\"\")if is_oai is None:is_oai = self.oai_marci_name = \"\" if not is_oai else \"\"return i_name + str(num)", "docstring": "This method is used mainly internally, but it can be handy if you work\nwith with raw MARC XML object and not using getters.\n\nArgs:\n num (int): Which indicator you need (1/2).\n is_oai (bool/None): If None, :attr:`.oai_marc` is\n used.\n\nReturns:\n str: current name of ``i1``/``ind1`` parameter based on \\\n :attr:`oai_marc` property.", "id": "f1161:c0:m6"} {"signature": "@propertydef i1_name(self):", "body": "return self.get_i_name()", "docstring": "Property getter / alias for ``self.get_i_name(1)``.", "id": "f1161:c0:m7"} {"signature": "@propertydef i2_name(self):", "body": "return self.get_i_name()", "docstring": "Property getter / alias for ``self.get_i_name(2)``.", "id": "f1161:c0:m8"} {"signature": "def get_ctl_field(self, controlfield, alt=None):", "body": "if not alt:return self.controlfields[controlfield]return self.controlfields.get(controlfield, alt)", "docstring": "Method wrapper over :attr:`.controlfields` dictionary.\n\nArgs:\n controlfield (str): Name of the controlfield.\n alt (object, default None): Alternative value of the `controlfield`\n when `controlfield` couldn't be found.\n\nReturns:\n str: record from given `controlfield`", "id": "f1161:c0:m9"} {"signature": "def getDataRecords(self, datafield, subfield, throw_exceptions=True):", "body": "return self.get_subfields(datafield=datafield,subfield=subfield,exception=throw_exceptions)", "docstring": ".. deprecated::\n Use :func:`get_subfields` instead.", "id": "f1161:c0:m10"} {"signature": "def get_subfields(self, datafield, subfield, i1=None, i2=None,exception=False):", "body": "if len(datafield) != :raise ValueError(\"\")if len(subfield) != :raise ValueError(\"\")if datafield not in self.datafields:if exception:raise KeyError(datafield + \"\")return []output = []for datafield in self.datafields[datafield]:if subfield not in datafield:continuefor sfield in datafield[subfield]:if i1 and sfield.i1 != i1:continueif i2 and sfield.i2 != i2:continueoutput.append(sfield)if not output and exception:raise KeyError(subfield + \"\")return output", "docstring": "Return content of given `subfield` in `datafield`.\n\nArgs:\n datafield (str): Section name (for example \"001\", \"100\", \"700\").\n subfield (str): Subfield name (for example \"a\", \"1\", etc..).\n i1 (str, default None): Optional i1/ind1 parameter value, which\n will be used for search.\n i2 (str, default None): Optional i2/ind2 parameter value, which\n will be used for search.\n exception (bool): If ``True``, :exc:`~exceptions.KeyError` is\n raised when method couldn't found given `datafield` /\n `subfield`. If ``False``, blank array ``[]`` is returned.\n\nReturns:\n list: of :class:`.MARCSubrecord`.\n\nRaises:\n KeyError: If the subfield or datafield couldn't be found.\n\nNote:\n MARCSubrecord is practically same thing as string, but has defined\n :meth:`.MARCSubrecord.i1` and :attr:`.MARCSubrecord.i2`\n methods.\n\n You may need to be able to get this, because MARC XML depends on\n i/ind parameters from time to time (names of authors for example).", "id": "f1161:c0:m11"} {"signature": "def record_iterator(xml):", "body": "if hasattr(xml, \"\"):xml = xml.read()dom = Nonetry:dom = dhtmlparser.parseString(xml)except UnicodeError:dom = dhtmlparser.parseString(xml.encode(\"\"))for record_xml in dom.findB(\"\"):yield MARCXMLRecord(record_xml)", "docstring": "Iterate over all ```` tags in `xml`.\n\nArgs:\n xml (str/file): Input string with XML. UTF-8 is prefered encoding,\n unicode should be ok.\n\nYields:\n MARCXMLRecord: For each corresponding ````.", "id": "f1162:m0"} {"signature": "def _undefined_pattern(value, fn, undefined):", "body": "if fn(value):return undefinedreturn value", "docstring": "If ``fn(value) == True``, return `undefined`, else `value`.", "id": "f1163:m0"} {"signature": "def _parse_corporations(self, datafield, subfield, roles=[\"\"]):", "body": "if len(datafield) != :raise ValueError(\"\")if len(subfield) != :raise ValueError(\"\")parsed_corporations = []for corporation in self.get_subfields(datafield, subfield):other_subfields = corporation.other_subfieldsif \"\" in other_subfields and roles != [\"\"]:corp_roles = other_subfields[\"\"] relevant = any(map(lambda role: role in roles, corp_roles))if not relevant:continuename = \"\"place = \"\"date = \"\"name = corporationif \"\" in other_subfields:place = \"\".join(other_subfields[\"\"])if \"\" in other_subfields:date = \"\".join(other_subfields[\"\"])parsed_corporations.append(Corporation(name, place, date))return parsed_corporations", "docstring": "Parse informations about corporations from given field identified\nby `datafield` parameter.\n\nArgs:\n datafield (str): MARC field ID (\"``110``\", \"``610``\", etc..)\n subfield (str): MARC subfield ID with name, which is typically\n stored in \"``a``\" subfield.\n roles (str): specify which roles you need. Set to ``[\"any\"]`` for\n any role, ``[\"dst\"]`` for distributors, etc.. For\n details, see\n http://www.loc.gov/marc/relators/relaterm.html\n\nReturns:\n list: :class:`Corporation` objects.", "id": "f1163:c0:m1"} {"signature": "def _parse_persons(self, datafield, subfield, roles=[\"\"]):", "body": "parsed_persons = []raw_persons = self.get_subfields(datafield, subfield)for person in raw_persons:other_subfields = person.other_subfieldsif \"\" in other_subfields and roles != [\"\"]:person_roles = other_subfields[\"\"] relevant = any(map(lambda role: role in roles, person_roles))if not relevant:continueind1 = person.i1ind2 = person.i2person = person.strip()name = \"\"second_name = \"\"surname = \"\"title = \"\"if ind1 == \"\" and ind2 == \"\":if \"\" in person:surname, name = person.split(\"\", )elif \"\" in person:surname, name = person.split(\"\", )else:surname = personif \"\" in other_subfields:title = \"\".join(other_subfields[\"\"])elif ind1 == \"\" and ind2 == \"\":name = person.strip()if \"\" in other_subfields:second_name = \"\".join(other_subfields[\"\"])if \"\" in other_subfields:surname = \"\".join(other_subfields[\"\"])elif ind1 == \"\" and ind2 == \"\" or ind1 == \"\" and ind2 == \"\":name = person.strip()if \"\" in other_subfields:title = \"\".join(other_subfields[\"\"])parsed_persons.append(Person(name.strip(),second_name.strip(),surname.strip(),title.strip()))return parsed_persons", "docstring": "Parse persons from given datafield.\n\nArgs:\n datafield (str): code of datafield (\"010\", \"730\", etc..)\n subfield (char): code of subfield (\"a\", \"z\", \"4\", etc..)\n role (list of str): set to [\"any\"] for any role, [\"aut\"] for\n authors, etc.. For details see\n http://www.loc.gov/marc/relators/relaterm.html\n\nMain records for persons are: \"100\", \"600\" and \"700\", subrecords \"c\".\n\nReturns:\n list: Person objects.", "id": "f1163:c0:m2"} {"signature": "@remove_hairs_decoratordef get_name(self):", "body": "return \"\".join(self.get_subfields(\"\", \"\"))", "docstring": "Returns:\n str: Name of the book.\n\nRaises:\n KeyError: When name is not specified.", "id": "f1163:c0:m3"} {"signature": "@remove_hairs_decoratordef get_subname(self, undefined=\"\"):", "body": "return _undefined_pattern(\"\".join(self.get_subfields(\"\", \"\")),lambda x: x.strip() == \"\",undefined)", "docstring": "Args:\n undefined (optional): Argument, which will be returned if the\n `subname` record is not found.\n\nReturns:\n str: Subname of the book or `undefined` if `subname` is not \\\n found.", "id": "f1163:c0:m4"} {"signature": "@remove_hairs_decoratordef get_price(self, undefined=\"\"):", "body": "return _undefined_pattern(\"\".join(self.get_subfields(\"\", \"\")),lambda x: x.strip() == \"\",undefined)", "docstring": "Args:\n undefined (optional): Argument, which will be returned if the\n `price` record is not found.\n\nReturns:\n str: Price of the book (with currency) or `undefined` if `price` \\\n is not found.", "id": "f1163:c0:m5"} {"signature": "@remove_hairs_decoratordef get_part(self, undefined=\"\"):", "body": "return _undefined_pattern(\"\".join(self.get_subfields(\"\", \"\")),lambda x: x.strip() == \"\",undefined)", "docstring": "Args:\n undefined (optional): Argument, which will be returned if the\n `part` record is not found.\n\nReturns:\n str: Which part of the book series is this record or `undefined` \\\n if `part` is not found.", "id": "f1163:c0:m6"} {"signature": "@remove_hairs_decoratordef get_part_name(self, undefined=\"\"):", "body": "return _undefined_pattern(\"\".join(self.get_subfields(\"\", \"\")),lambda x: x.strip() == \"\",undefined)", "docstring": "Args:\n undefined (optional): Argument, which will be returned if the\n `part_name` record is not found.\n\nReturns:\n str: Name of the part of the series. or `undefined` if `part_name`\\\n is not found.", "id": "f1163:c0:m7"} {"signature": "@remove_hairs_decoratordef get_publisher(self, undefined=\"\"):", "body": "publishers = set([remove_hairs_fn(publisher)for publisher in self[\"\"] + self[\"\"]])return _undefined_pattern(\"\".join(publishers),lambda x: x.strip() == \"\",undefined)", "docstring": "Args:\n undefined (optional): Argument, which will be returned if the\n `publisher` record is not found.\n\nReturns:\n str: Name of the publisher (\"``Grada``\" for example) or \\\n `undefined` if `publisher` is not found.", "id": "f1163:c0:m8"} {"signature": "def get_pub_date(self, undefined=\"\"):", "body": "dates = self[\"\"] + self[\"\"]def clean_date(date):\"\"\"\"\"\"out = \"\"was_digit = Falsefor c in date:if c.isdigit() or (c == \"\" and was_digit) or c == \"\":out += cwas_digit = c.isdigit()return outdates = set([clean_date(date)for date in self[\"\"] + self[\"\"]])return _undefined_pattern(\"\".join(dates),lambda x: x.strip() == \"\",undefined)", "docstring": "Args:\n undefined (optional): Argument, which will be returned if the\n `pub_date` record is not found.\n\nReturns:\n str: Date of publication (month and year usually) or `undefined` \\\n if `pub_date` is not found.", "id": "f1163:c0:m9"} {"signature": "@remove_hairs_decoratordef get_pub_order(self, undefined=\"\"):", "body": "return _undefined_pattern(\"\".join(self.get_subfields(\"\", \"\")),lambda x: x.strip() == \"\",undefined)", "docstring": "Args:\n undefined (optional): Argument, which will be returned if the\n `pub_order` record is not found.\n\nReturns:\n str: Information about order in which was the book published or \\\n `undefined` if `pub_order` is not found.", "id": "f1163:c0:m10"} {"signature": "@remove_hairs_decoratordef get_pub_place(self, undefined=\"\"):", "body": "places = set([remove_hairs_fn(place)for place in self[\"\"] + self[\"\"]])return _undefined_pattern(\"\".join(places),lambda x: x.strip() == \"\",undefined)", "docstring": "Args:\n undefined (optional): Argument, which will be returned if the\n `pub_place` record is not found.\n\nReturns:\n str: Name of city/country where the book was published or \\\n `undefined` if `pub_place` is not found.", "id": "f1163:c0:m11"} {"signature": "@remove_hairs_decoratordef get_format(self, undefined=\"\"):", "body": "return _undefined_pattern(\"\".join(self.get_subfields(\"\", \"\")),lambda x: x.strip() == \"\",undefined)", "docstring": "Args:\n undefined (optional): Argument, which will be returned if the\n `format` record is not found.\n\nReturns:\n str: Dimensions of the book ('``23 cm``' for example) or \n `undefined` if `format` is not found.", "id": "f1163:c0:m12"} {"signature": "def get_authors(self):", "body": "authors = self._parse_persons(\"\", \"\")authors += self._parse_persons(\"\", \"\")authors += self._parse_persons(\"\", \"\")authors += self._parse_persons(\"\", \"\")return authors", "docstring": "Returns:\n list: Authors represented as :class:`.Person` objects.", "id": "f1163:c0:m13"} {"signature": "def get_corporations(self, roles=[\"\"]):", "body": "corporations = self._parse_corporations(\"\", \"\", roles)corporations += self._parse_corporations(\"\", \"\", roles)corporations += self._parse_corporations(\"\", \"\", roles)corporations += self._parse_corporations(\"\", \"\", roles)return corporations", "docstring": "Args:\n roles (list, optional): Specify which types of corporations you\n need. Set to ``[\"any\"]`` for any role, ``[\"dst\"]`` for\n distributors, etc..\n\nNote:\n See http://www.loc.gov/marc/relators/relaterm.html for details.\n\nReturns:\n list: :class:`.Corporation` objects specified by roles parameter.", "id": "f1163:c0:m14"} {"signature": "def get_distributors(self):", "body": "return self.get_corporations(roles=[\"\"])", "docstring": "Returns:\n list: Distributors represented as :class:`.Corporation` object.", "id": "f1163:c0:m15"} {"signature": "def _clean_isbn(self, isbn):", "body": "return isbn.strip().split(\"\", )[]", "docstring": "Clean ISBN from other information (binding).", "id": "f1163:c0:m16"} {"signature": "def get_invalid_ISBNs(self):", "body": "return [self._clean_isbn(isbn)for isbn in self[\"\"]]", "docstring": "Get list of invalid ISBN (``020z``).\n\nReturns:\n list: List with INVALID ISBN strings.", "id": "f1163:c0:m17"} {"signature": "def get_ISBNs(self):", "body": "invalid_isbns = set(self.get_invalid_ISBNs())valid_isbns = [self._clean_isbn(isbn)for isbn in self[\"\"]if self._clean_isbn(isbn) not in invalid_isbns]if valid_isbns:return valid_isbnsreturn [self._clean_isbn(isbn)for isbn in self[\"\"]]", "docstring": "Get list of VALID ISBN.\n\nReturns:\n list: List with *valid* ISBN strings.", "id": "f1163:c0:m18"} {"signature": "def get_invalid_ISSNs(self):", "body": "return [self._clean_isbn(issn)for issn in self[\"\"] + self[\"\"]]", "docstring": "Get list of invalid ISSNs (``022z`` + ``022y``).\n\nReturns:\n list: List with INVALID ISSN strings.", "id": "f1163:c0:m19"} {"signature": "def get_ISSNs(self):", "body": "invalid_issns = set(self.get_invalid_ISSNs())return [self._clean_isbn(issn)for issn in self[\"\"]if self._clean_isbn(issn) not in invalid_issns]", "docstring": "Get list of VALID ISSNs (``022a``).\n\nReturns:\n list: List with *valid* ISSN strings.", "id": "f1163:c0:m20"} {"signature": "def get_linking_ISSNs(self):", "body": "return [self._clean_isbn(issn)for issn in self[\"\"]]", "docstring": "Get list of linking ISSNs (``022l``).\n\nReturns:\n list: List with linking ISSN strings.", "id": "f1163:c0:m21"} {"signature": "def _filter_binding(self, binding):", "body": "binding = binding.strip().split(\"\", )[-] binding = remove_hairs_fn(binding) return binding.split(\"\")[-].strip()", "docstring": "Filter binding from ISBN record. In MARC XML / OAI, the binding\ninformation is stored in same subrecord as ISBN.\n\nExample:\n ``80-251-0225-4 (bro\u017e.) :`` ->\n ``bro\u017e.``.", "id": "f1163:c0:m22"} {"signature": "def get_binding(self):", "body": "return [self._filter_binding(binding)for binding in self[\"\"]if \"\" in binding and \"\" in binding]", "docstring": "Returns:\n list: Array of strings with bindings (``[\"bro\u017e.\"]``) or blank list.", "id": "f1163:c0:m23"} {"signature": "def get_originals(self):", "body": "return self.get_subfields(\"\", \"\")", "docstring": "Returns:\n list: List of strings with names of original books (names of books\\\n in original language, before translation).", "id": "f1163:c0:m24"} {"signature": "def get_urls(self):", "body": "urls = self.get_subfields(\"\", \"\", i1=\"\", i2=\"\")return map(lambda x: x.replace(\"\", \"\"), urls)", "docstring": "Content of field ``856u42``. Typically URL pointing to producers\nhomepage.\n\nReturns:\n list: List of URLs defined by producer.", "id": "f1163:c0:m25"} {"signature": "def get_internal_urls(self):", "body": "internal_urls = self.get_subfields(\"\", \"\", i1=\"\", i2=\"\")internal_urls.extend(self.get_subfields(\"\", \"\"))internal_urls.extend(self.get_subfields(\"\", \"\"))return map(lambda x: x.replace(\"\", \"\"), internal_urls)", "docstring": "URL's, which may point to edeposit, aleph, kramerius and so on.\n\nFields ``856u40``, ``998a`` and ``URLu``.\n\nReturns:\n list: List of internal URLs.", "id": "f1163:c0:m26"} {"signature": "def get_pub_type(self):", "body": "INFO_CHAR_INDEX = SECOND_INFO_CHAR_I = if not len(self.leader) >= INFO_CHAR_INDEX + :return PublicationType.monographicif self.controlfields.get(\"\") == \"\":return PublicationType.continuinginfo_char = self.leader[INFO_CHAR_INDEX]multipart_n = self.get_subfields(\"\", \"\", exception=False)multipart_p = self.get_subfields(\"\", \"\", exception=False)if info_char in \"\":return PublicationType.monographicelif info_char in \"\":return PublicationType.continuingelif info_char == \"\" and (multipart_n or multipart_p):return PublicationType.multipart_monographelif info_char == \"\" and len(self.leader) >= SECOND_INFO_CHAR_I + :if self.leader[SECOND_INFO_CHAR_I] == \"\":return PublicationType.multipart_monographelif self.leader[SECOND_INFO_CHAR_I] == \"\":return PublicationType.single_unitreturn PublicationType.monographic", "docstring": "Returns:\n PublicationType: :class:`.PublicationType` enum **value**.", "id": "f1163:c0:m27"} {"signature": "def is_monographic(self):", "body": "return self.get_pub_type() == PublicationType.monographic", "docstring": "Returns:\n bool: True if the record is monographic.", "id": "f1163:c0:m28"} {"signature": "def is_multi_mono(self):", "body": "return self.get_pub_type() == PublicationType.multipart_monograph", "docstring": "Returns:\n bool: True if the record is multi_mono.", "id": "f1163:c0:m29"} {"signature": "def is_continuing(self):", "body": "return self.get_pub_type() == PublicationType.continuing", "docstring": "Returns:\n bool: True if the record is continuing.", "id": "f1163:c0:m30"} {"signature": "def is_single_unit(self):", "body": "return self.get_pub_type() == PublicationType.single_unit", "docstring": "Returns:\n bool: True if the record is single unit.", "id": "f1163:c0:m31"} {"signature": "def __getitem__(self, item):", "body": "if not isinstance(item, basestring):raise ValueError(\"\")if len(item) == :val = self.controlfields.get(item, None)if val:return valreturn self.datafields.get(item, None)if len(item) < :raise ValueError(\"\")if len(item) > :raise ValueError(\"\")datafield = item[:]subfield = item[]i1 = Nonei2 = Noneif len(item) >= :i1 = item[]if len(item) >= :i2 = item[]return self.get_subfields(datafield=datafield,subfield=subfield,i1=i1,i2=i2,exception=False)", "docstring": "Query inteface shortcut for :meth:`.MARCXMLParser.get_ctl_fields` and\n:meth:`.MARCXMLParser.get_subfields`.\n\nFirst three characters are considered as `datafield`, next character\nas `subfield` and optionaly, two others as `i1` / `i2` parameters.\n\nReturned value is str/None in case of ``len(item)`` == 3 (ctl_fields)\nor list (or blank list) in case of ``len(item) >= 4``.\n\nReturns:\n list/str: See :meth:`.MARCXMLParser.get_subfields` for details, or\\\n None in case that nothing was found.", "id": "f1163:c0:m32"} {"signature": "def get(self, item, alt=None):", "body": "try:val = self[item]except ValueError:return altreturn val if val is not None else alt", "docstring": "Standard dict-like .get() method.\n\nArgs:\n item (str): See :meth:`.__getitem__` for details.\n alt (default None): Alternative value, if item is not found.\n\nReturns:\n obj: `item` or `alt`, if item is not found.", "id": "f1163:c0:m33"} {"signature": "def to_XML(self):", "body": "marcxml_template = \"\"\"\"\"\"a>c>CONTROL_FIELDSELDSrc>ta>>leader = self.leader if self.leader is not None else \"\"if leader: leader = \"\" + leader + \"\"if self.oai_marc:leader = \"\"xml_template = oai_template if self.oai_marc else marcxml_templatexml_output = Template(xml_template).substitute(LEADER=leader.strip(),CONTROL_FIELDS=self._serialize_ctl_fields().strip(),DATA_FIELDS=self._serialize_data_fields().strip())return xml_output", "docstring": "Serialize object back to XML string.\n\nReturns:\n str: String which should be same as original input, if everything\\\n works as expected.", "id": "f1170:c0:m4"} {"signature": "def __str__(self):", "body": "return self.to_XML()", "docstring": "Alias for :meth:`to_XML`.", "id": "f1170:c0:m5"} {"signature": "@propertydef name(self):", "body": "return self._name", "docstring": "Getter for 'name' property\n\nReturns:\n string: Issue's name", "id": "f1181:c0:m1"} {"signature": "@name.setterdef name(self, value):", "body": "self._name = value", "docstring": "Setter for 'name' property\n\nArgs:\n value (str): Issue's name", "id": "f1181:c0:m2"} {"signature": "@propertydef file(self):", "body": "return self._file", "docstring": "Getter for 'file' property\n\nReturns:\n string: Issue's file", "id": "f1181:c0:m3"} {"signature": "@file.setterdef file(self, value):", "body": "self._file = value", "docstring": "Setter for 'path' property\n\nArgs:\n value (str): Issue's file", "id": "f1181:c0:m4"} {"signature": "@propertydef severity(self):", "body": "return self._severity", "docstring": "Getter for 'severity' property\n\nReturns:\n string: Issue's severity", "id": "f1181:c0:m5"} {"signature": "@severity.setterdef severity(self, value):", "body": "self._severity = value", "docstring": "Setter for 'path' property\n\nArgs:\n value (str): Issue's severity", "id": "f1181:c0:m6"} {"signature": "@propertydef potential(self):", "body": "if self._potential is not None and self._potential:return Trueelse:return False", "docstring": "Getter for 'potential' property\n\nReturns:\n bool: potential is required?", "id": "f1181:c0:m7"} {"signature": "@potential.setterdef potential(self, value):", "body": "if value:self._potential = Trueelse:self._potential = False", "docstring": "Setter for 'potential' property\n\nArgs:\n value (bool): True if a potential is required. False else", "id": "f1181:c0:m8"} {"signature": "@propertydef details(self):", "body": "return self._details", "docstring": "Getter for 'details' property\n\nReturns:\n string: Issue's details", "id": "f1181:c0:m9"} {"signature": "@details.setterdef details(self, value):", "body": "self._details = value", "docstring": "Setter for 'details' property\n\nArgs:\n value (str): Issue's details", "id": "f1181:c0:m10"} {"signature": "@propertydef checker(self):", "body": "return self._checker_name", "docstring": "Getter for 'checker' property\n\nReturns:\n string: Issue's checker", "id": "f1181:c0:m11"} {"signature": "@checker.setterdef checker(self, value):", "body": "self._checker_name = value", "docstring": "Setter for 'checker' property\n\nArgs:\n value (str): Issue's checker", "id": "f1181:c0:m12"} {"signature": "def __todict__(self):", "body": "return {\"\": self.name,\"\": self.file,\"\": self.details,\"\": self.severity,\"\": self.potential,\"\": self.checker}", "docstring": "Returns a dictionary with the class representation\n\nReturns:\n dict: class representarion", "id": "f1181:c0:m13"} {"signature": "def __str__(self):", "body": "return json.dumps(self.__todict__())", "docstring": "Return a JSON class representation\n\nReturns:\n\n str: JSON class representation", "id": "f1181:c0:m14"} {"signature": "def __unicode__(self):", "body": "return unicode(self.__str__())", "docstring": "Return a JSON class representation (unicode)\n\nReturns:\n\n unicode: JSON class representation", "id": "f1181:c0:m15"} {"signature": "def __init__(self, command = None):", "body": "self._output = Noneself._errors = Noneself._command = Noneself.command = command", "docstring": "Class constructor. \n\nArgs:\n command (str): Command to execute", "id": "f1184:c0:m0"} {"signature": "@propertydef command(self):", "body": "return self._command", "docstring": "Getter for 'command' property\n\nReturns:\n str: Command to execute", "id": "f1184:c0:m1"} {"signature": "@command.setterdef command(self, value):", "body": "self._command = value", "docstring": "Setter for 'command' property\n\nArgs:\n value (str): Command to execute", "id": "f1184:c0:m2"} {"signature": "@propertydef output(self):", "body": "return self._output", "docstring": "Getter for 'output' property\n\nReturns:\n str: Stdout content", "id": "f1184:c0:m3"} {"signature": "@output.setterdef output(self, value):", "body": "self._output = value", "docstring": "Setter for 'output' property\n\nArgs:\n value (str): Stdout content", "id": "f1184:c0:m4"} {"signature": "@propertydef errors(self):", "body": "return self._errors", "docstring": "Getter for 'errors' property\n\nReturns:\n str: Stderr content", "id": "f1184:c0:m5"} {"signature": "@errors.setterdef errors(self, value):", "body": "self._errors = value", "docstring": "Setter for 'errors' property\n\nArgs:\n value (str): Stderr content", "id": "f1184:c0:m6"} {"signature": "def getOSName(self):", "body": "_system = platform.system()if _system in [self.__class__.OS_WINDOWS, self.__class__.OS_MAC, self.__class__.OS_LINUX]:if _system == self.__class__.OS_LINUX:_dist = platform.linux_distribution()[]if _dist.lower() == self.__class__.OS_UBUNTU.lower():return self.__class__.OS_UBUNTUelif _dist.lower() == self.__class__.OS_DEBIAN.lower():return self.__class__.OS_DEBIANelif _dist.lower() == self.__class__.OS_CENTOS.lower():return self.__class__.OS_CENTOSelif _dist.lower() == self.__class__.OS_REDHAT.lower():return self.__class__.OS_REDHATelif _dist.lower() == self.__class__.OS_KALI.lower():return self.__class__.OS_KALIreturn _systemelse:return None", "docstring": "Get the OS name. If OS is linux, returns the Linux distribution name\n\nReturns:\n str: OS name", "id": "f1184:c0:m7"} {"signature": "def execute(self, shell = True):", "body": "process = Popen(self.command, stdout=PIPE, stderr=PIPE, shell=shell)self.output, self.errors = process.communicate()", "docstring": "Executes the command setted into class\n\nArgs:\n shell (boolean): Set True if command is a shell command. Default: True", "id": "f1184:c0:m9"} {"signature": "@staticmethoddef install():", "body": "cmd = CommandHelper()cmd.install(\"\")cmd = CommandHelper()cmd.install(\"\")cmd = CommandHelper()cmd.command = \"\"cmd.execute()if cmd.errors:from termcolor import coloredprint(colored(cmd.errors, \"\"))else:print(cmd.output)", "docstring": "Install all the dependences", "id": "f1185:c0:m6"} {"signature": "def checker(func):", "body": "def execute(self, *args, **kwargs):try:print(\"\".format(n=self.__class__.NAME))if hasattr(self, ''):if self.test():func(self, *args, **kwargs)return self.issueselse:print(colored(\"\".format(c=self.__class__.__name__), \"\"))else:func(self, *args, **kwargs)return self.issuesexcept Exception as e:print(colored(\"\".format(n=self.__class__.NAME, e = e), \"\"))return execute", "docstring": "Decorator for method run. This method will be execute before the execution\nfrom the method with this decorator.", "id": "f1187:m0"} {"signature": "def __init__(self):", "body": "self._dao = Noneself._path = Noneself._config = Noneself._issues = []", "docstring": "Creates a new class instance for Generic Checkers", "id": "f1187:c0:m0"} {"signature": "@propertydef dao(self):", "body": "return self._dao", "docstring": "Getter for 'dao' property\n\nReturns:\n atomshield.helpers.DAO: Instance of DAO class", "id": "f1187:c0:m1"} {"signature": "@dao.setterdef dao(self, value):", "body": "self._dao = value", "docstring": "Setter for 'dao' property\n\nArgs:\n value (atomshield.helpers.DAO): Instance of DAO class", "id": "f1187:c0:m2"} {"signature": "@propertydef path(self):", "body": "return self._path", "docstring": "Getter for 'path' property\n\nReturns:\n str: Absolute path to scan", "id": "f1187:c0:m3"} {"signature": "@path.setterdef path(self, value):", "body": "if not value.endswith(''):self._path = ''.format(v=value)else:self._path = value", "docstring": "Setter for 'path' property\n\nArgs:\n value (str): Absolute path to scan", "id": "f1187:c0:m4"} {"signature": "@propertydef project(self):", "body": "return self._project", "docstring": "Getter for 'project' property\n\nReturns:\n str: Project's name", "id": "f1187:c0:m5"} {"signature": "@project.setterdef project(self, value):", "body": "self._project = value", "docstring": "Setter for 'project' property\n\nArgs:\n value (str): Project's name", "id": "f1187:c0:m6"} {"signature": "@propertydef issues(self):", "body": "return self._issues", "docstring": "Getter for 'issues' property\n\nReturns:\n list: List of instances of Issue class", "id": "f1187:c0:m7"} {"signature": "@issues.setterdef issues(self, value):", "body": "self._issues = value", "docstring": "Setter for 'issues' property\n\nArgs:\n value (list): List of Issue objects", "id": "f1187:c0:m8"} {"signature": "@propertydef config(self):", "body": "return self._config", "docstring": "Getter for 'config' property\n\nReturns:\n dict: Dictionary which contains the current values for this report config", "id": "f1187:c0:m9"} {"signature": "@config.setterdef config(self, value):", "body": "self._config = self.parseConfig(value)", "docstring": "Setter for 'config' property\n\nArgs:\n value (dict): Dictionary which contains the current values for this report config", "id": "f1187:c0:m10"} {"signature": "def run(self):", "body": "pass", "docstring": "Abstract method. This method will be executed for subclass which not implemented his own method", "id": "f1187:c0:m12"} {"signature": "def saveIssue(self, issue):", "body": "self.issues.append(issue)", "docstring": "Stores an issue in 'issues' property\n\nArgs:\n issue (atomshields.helpers.Issue): Issue instance", "id": "f1187:c0:m13"} {"signature": "@classmethoddef parseConfig(cls, value):", "body": "if '' in value:value[''] = bool(value[''])if '' in value:value[''] = [n.strip() for n in ast.literal_eval(value[''])]return value", "docstring": "Parse the config values\n\nArgs:\n value (dict): Dictionary which contains the checker config\n\nReturns:\n dict: The checker config with parsed values", "id": "f1187:c0:m14"} {"signature": "@staticmethoddef isInstalled(value):", "body": "function = \"\"\"\"\"\"bash -c ''", "docstring": "Check if a software is installed into machine.\n\nArgs:\n value (str): Software's name\n\nReturns:\n bool: True if the software is installed. False else", "id": "f1187:c0:m15"} {"signature": "@checkerdef run(self):", "body": "filename = \"\"command = \"\".format(path = self.path, filename = filename)cmd = CommandHelper(command)cmd.execute()files = cmd.output.split(\"\")for f in files:if not f.endswith(filename):continuerel_path = f.replace(self.path, \"\")if rel_path.startswith(tuple(self.CONFIG[''])):continueissue = Issue()issue.name = \"\"issue.potential = Falseissue.severity = Issue.SEVERITY_LOWissue.file = rel_pathself.saveIssue(issue)", "docstring": "Finds .DS_Store files into path", "id": "f1189:c0:m1"} {"signature": "def report(func):", "body": "def execute(self, *args, **kwargs):try:print(\"\".format(n=self.__class__.NAME))if hasattr(self, ''):if self.test():return func(self, *args, **kwargs)else:print(colored(\"\".format(c=self.__class__.__name__), \"\"))else:return func(self, *args, **kwargs)except Exception as e:print(colored(\"\".format(n=self.__class__.NAME, e = e), \"\"))return execute", "docstring": "Decorator for method run. This method will be execute before the execution\nfrom the method with this decorator.", "id": "f1190:m0"} {"signature": "def __init__(self, issues = None):", "body": "self._issues = []self._config = {}self._project = Noneself.issues = issues", "docstring": "Class constructor.\n\nArgs:\n issues (list): List of `Issue` instances", "id": "f1190:c0:m0"} {"signature": "@propertydef issues(self):", "body": "if self._issues is None:return []return self._issues", "docstring": "Getter for 'issues' property\n\nReturns:\n list: List of `Issue` instances", "id": "f1190:c0:m1"} {"signature": "@issues.setterdef issues(self, value):", "body": "self._issues = value", "docstring": "Setter for 'issues' property\n\nArgs:\n value (list): List of `Issue` instances", "id": "f1190:c0:m2"} {"signature": "@propertydef config(self):", "body": "return self._config", "docstring": "Getter for 'config' property\n\nReturns:\n dict: Dictionary which contains the current values for this report config", "id": "f1190:c0:m3"} {"signature": "@config.setterdef config(self, value):", "body": "self._config = value", "docstring": "Setter for 'config' property\n\nArgs:\n value (dict): Dictionary which contains the current values for this report config", "id": "f1190:c0:m4"} {"signature": "@propertydef project(self):", "body": "return self._project", "docstring": "Getter for 'project' property\n\nReturns:\n str: Project's name", "id": "f1190:c0:m5"} {"signature": "@project.setterdef project(self, value):", "body": "self._project = value", "docstring": "Setter for 'project' property\n\nArgs:\n value (str): Project's name", "id": "f1190:c0:m6"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(HttpReport, self).__init__(*args, **kwargs)", "docstring": "Class constuctor. Must call parent constructor", "id": "f1193:c0:m0"} {"signature": "@reportdef run(self):", "body": "options = {}if bool(self.config['']):options[''] = {\"\": self.config[''], \"\": self.config['']}options[\"\"] = self.config['']options[\"\"] = {\"\": json.dumps(map(lambda x: x.__todict__(), self.issues))}if '' == self.config[''].lower():requests.get(**options)else:requests.post(**options)", "docstring": "Method executed dynamically by framework. This method will do a http request to\nendpoint setted into config file with the issues and other data.", "id": "f1193:c0:m1"} {"signature": "@staticmethoddef _debug(message, color=None, attrs=None):", "body": "if attrs is None:attrs = []if color is not None:print(colored(message, color, attrs=attrs))else:if len(attrs) > :print(colored(message, \"\", attrs=attrs))else:print(message)", "docstring": "Print a message if the class attribute 'verbose' is enabled\n\nArgs:\n message (str): Message to print", "id": "f1194:c0:m1"} {"signature": "@propertydef path(self):", "body": "return self._path", "docstring": "Getter for 'path' property\n\nReturns:\n string: Absolute path to target directory", "id": "f1194:c0:m3"} {"signature": "@path.setterdef path(self, value):", "body": "self._path = os.path.abspath(value)", "docstring": "Setter for 'path' property\n\nArgs:\n value (str): Path to target directory", "id": "f1194:c0:m4"} {"signature": "@propertydef project(self):", "body": "return self._project", "docstring": "Getter for 'project' property\n\nReturns:\n string: Projects's name", "id": "f1194:c0:m5"} {"signature": "@project.setterdef project(self, value):", "body": "self._project = value", "docstring": "Setter for 'project' property\n\nArgs:\n value (str): Project's name", "id": "f1194:c0:m6"} {"signature": "@propertydef configFile(self):", "body": "return self._config_file", "docstring": "Getter for 'configFile' property\n\nReturns:\n str: Path to config file", "id": "f1194:c0:m7"} {"signature": "@configFile.setterdef configFile(self, value):", "body": "self._config_file = os.path.abspath(value)", "docstring": "Setter for 'configFile' property\n\nArgs:\n value (str): Path to config file", "id": "f1194:c0:m8"} {"signature": "@propertydef config(self):", "body": "return self._config", "docstring": "Getter for 'config' property\n\nReturns:\n str: Path to config file", "id": "f1194:c0:m9"} {"signature": "@config.setterdef config(self, value):", "body": "self._config = value", "docstring": "Setter for 'config' property\n\nArgs:\n value (dict): Dictionary which contains the config", "id": "f1194:c0:m10"} {"signature": "@propertydef issues(self):", "body": "return self._issues", "docstring": "Getter for 'issues' property\n\nReturns:\n list: List of Issue instances", "id": "f1194:c0:m11"} {"signature": "@issues.setterdef issues(self, value):", "body": "self._issues = value", "docstring": "Setter for 'issues' property\n\nArgs:\n value (list): List of Issue instances", "id": "f1194:c0:m12"} {"signature": "@staticmethoddef setup():", "body": "if not os.path.isdir(AtomShieldsScanner.CHECKERS_DIR):os.makedirs(AtomShieldsScanner.CHECKERS_DIR)if not os.path.isdir(AtomShieldsScanner.REPORTS_DIR):os.makedirs(AtomShieldsScanner.REPORTS_DIR)for f in AtomShieldsScanner._getFiles(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"\"), \"\"):AtomShieldsScanner.installChecker(f)for f in AtomShieldsScanner._getFiles(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"\"), \"\"):AtomShieldsScanner.installReport(f)AtomShieldsScanner._executeMassiveMethod(path=AtomShieldsScanner.CHECKERS_DIR, method=\"\", args={})config_dir = os.path.dirname(AtomShieldsScanner.CONFIG_PATH)if not os.path.isdir(config_dir):os.makedirs(config_dir)", "docstring": "Creates required directories and copy checkers and reports.", "id": "f1194:c0:m13"} {"signature": "@staticmethoddef _addConfig(instance, config, parent_section):", "body": "try:section_name = \"\".format(p = parent_section, n=instance.NAME.lower())config.add_section(section_name)for k in list(instance.CONFIG.keys()):config.set(section_name, k, instance.CONFIG[k])except Exception as e:print(\"\" % e)", "docstring": "Writes a section for a plugin.\n\nArgs:\n instance (object): Class instance for plugin\n config (object): Object (ConfigParser) which the current config\n parent_section (str): Parent section for plugin. Usually 'checkers' or 'reports'", "id": "f1194:c0:m21"} {"signature": "def getConfig(self, section = None):", "body": "data = {}if section is None:for s in self.config.sections():if '' in s:parent, _s = s.split('')data[parent][_s] = dict(self.config.items(s))else:data[s] = dict(self.config.items(s))else:data = dict(self.config.items(section))return data", "docstring": "Returns a dictionary which contains the current config. If a section is setted,\nonly will returns the section config\n\nArgs:\n section (str): (Optional) Section name.\n\nReturns:\n dict: Representation of current config", "id": "f1194:c0:m25"} {"signature": "@staticmethoddef _getClassInstance(path, args=None):", "body": "if not path.endswith(\"\"):return Noneif args is None:args = {}classname = AtomShieldsScanner._getClassName(path)basename = os.path.basename(path).replace(\"\", \"\")sys.path.append(os.path.dirname(path))try:mod = __import__(basename, globals(), locals(), [classname], -)class_ = getattr(mod, classname)instance = class_(**args)except Exception as e:AtomShieldsScanner._debug(\"\" % e)return Nonefinally:sys.path.remove(os.path.dirname(path))return instance", "docstring": "Returns a class instance from a .py file.\n\nArgs:\n path (str): Absolute path to .py file\n args (dict): Arguments passed via class constructor\n\nReturns:\n object: Class instance or None", "id": "f1194:c0:m30"} {"signature": "@staticmethoddef _executeMassiveMethod(path, method, args=None, classArgs = None):", "body": "response = {}if args is None:args = {}if classArgs is None:classArgs = {}sys.path.append(path)exclude = [\"\", \"\"]for f in AtomShieldsScanner._getFiles(path, \"\", exclude=exclude):try:instance = AtomShieldsScanner._getClassInstance(path = f, args = classArgs)if instance is not None:if callable(method):args[\"\"] = instanceoutput = method(**args)response[instance.__class__.NAME] = outputelse:if hasattr(instance, method):output = getattr(instance, method)(**args)response[instance.__class__.NAME] = outputelse:continueexcept Exception as e:AtomShieldsScanner._debug(\"\" % e)sys.path.remove(path)return response", "docstring": "Execute an specific method for each class instance located in path\n\nArgs:\n path (str): Absolute path which contains the .py files\n method (str): Method to execute into class instance\n\nReturns:\n dict: Dictionary which contains the response for every class instance.\n The dictionary keys are the value of 'NAME' class variable.", "id": "f1194:c0:m31"} {"signature": "def run(self):", "body": "self.checkProperties()self.debug(\"\")self.showScanProperties()self.loadConfig()init_ts = datetime.now()cwd = os.getcwd()os.chdir(self.path)issues = self.executeCheckers()os.chdir(cwd)end_ts = datetime.now()duration = ''.format(end_ts - init_ts)for plugin in list(issues.keys()):value = issues[plugin]if isinstance(value, list):list(map(self.saveIssue, value))else:self.saveIssue(value)print(\"\")self.executeReports()self.debug(\"\")self.debug(\"\".format(t=duration))self.showSummary()return self.issues", "docstring": "Run a scan in the path setted.", "id": "f1194:c0:m37"} {"signature": "def authenticate(username, password, service='', encoding='',resetcred=True):", "body": "if sys.version_info >= (,):if isinstance(username, str):username = username.encode(encoding)if isinstance(password, str):password = password.encode(encoding)if isinstance(service, str):service = service.encode(encoding)@conv_funcdef my_conv(n_messages, messages, p_response, app_data):\"\"\"\"\"\"addr = calloc(n_messages, sizeof(PamResponse))p_response[] = cast(addr, POINTER(PamResponse))for i in range(n_messages):if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:pw_copy = strdup(password)p_response.contents[i].resp = cast(pw_copy, c_char_p)p_response.contents[i].resp_retcode = return handle = PamHandle()conv = PamConv(my_conv, )retval = pam_start(service, username, byref(conv), byref(handle))if retval != :return Falseretval = pam_authenticate(handle, )auth_success = (retval == )if auth_success and resetcred:retval = pam_setcred(handle, PAM_REINITIALIZE_CRED)pam_end(handle, retval)return auth_success", "docstring": "Returns True if the given username and password authenticate for the\n given service. Returns False otherwise.\n\n ``username``: the username to authenticate\n\n ``password``: the password in plain text\n\n ``service``: the PAM service to authenticate against.\n Defaults to 'login'\n\n The above parameters can be strings or bytes. If they are strings,\n they will be encoded using the encoding given by:\n\n ``encoding``: the encoding to use for the above parameters if they\n are given as strings. Defaults to 'utf-8'\n\n ``resetcred``: Use the pam_setcred() function to\n reinitialize the credentials.\n Defaults to 'True'.", "id": "f1197:m0"} {"signature": "def parse(self):", "body": "if exists(self.filepath):content = open(self.filepath).read().decode(charset)else:content = \"\"try:config = toml.loads(content)except toml.TomlSyntaxError:raise ConfigSyntaxErrorreturn config", "docstring": "parse config, return a dict", "id": "f1199:c0:m0"} {"signature": "def initialize(self, templates_path, global_data):", "body": "self.env = Environment(loader=FileSystemLoader(templates_path))self.env.trim_blocks = Trueself.global_data = global_data", "docstring": "initialize with templates' path\n parameters\n templates_path str the position of templates directory\n global_data dict globa data can be got in any templates", "id": "f1200:c0:m0"} {"signature": "def render(self, template, **data):", "body": "dct = self.global_data.copy()dct.update(data)try:html = self.env.get_template(template).render(**dct)except TemplateNotFound:raise JinjaTemplateNotFoundreturn html", "docstring": "Render data with template, return html unicodes.\n parameters\n template str the template's filename\n data dict the data to render", "id": "f1200:c0:m1"} {"signature": "def render_to(self, path, template, **data):", "body": "html = self.render(template, **data)with open(path, '') as f:f.write(html.encode(charset))", "docstring": "Render data with template and then write to path", "id": "f1200:c0:m2"} {"signature": "def block_code(self, text, lang):", "body": "if not lang:return self._code_no_lexer(text)try:lexer = get_lexer_by_name(lang, stripall=True)except ClassNotFound: return self._code_no_lexer(text)formatter = HtmlFormatter()return highlight(text, lexer, formatter)", "docstring": "text: unicode text to render", "id": "f1201:c0:m1"} {"signature": "def __init__(self):", "body": "render = RuxHtmlRenderer() extensions = (misaka.EXT_FENCED_CODE |misaka.EXT_NO_INTRA_EMPHASIS |misaka.EXT_AUTOLINK)self.markdown = misaka.Markdown(render, extensions=extensions)", "docstring": "Initialize the parser, set markdown render handler as\n an attribute `markdown` of the parser", "id": "f1201:c1:m0"} {"signature": "def parse(self, source):", "body": "rt, title, title_pic, markdown = libparser.parse(source)if rt == -:raise SeparatorNotFoundelif rt == -:raise PostTitleNotFoundtitle, title_pic, markdown = map(to_unicode, (title, title_pic,markdown))html = self.markdown.render(markdown)summary = self.markdown.render(markdown[:])return {'': title,'': markdown,'': html,'': summary,'': title_pic}", "docstring": "Parse ascii post source, return dict", "id": "f1201:c1:m2"} {"signature": "def parse_filename(self, filepath):", "body": "name = os.path.basename(filepath)[:-src_ext_len]try:dt = datetime.strptime(name, \"\")except ValueError:raise PostNameInvalidreturn {'': name, '': dt, '': filepath}", "docstring": "parse post source files name to datetime object", "id": "f1201:c1:m3"} {"signature": "def deploy_blog():", "body": "logger.info(deploy_blog.__doc__)call('' + join(dirname(__file__), '', '') + '',shell=True)logger.success('')logger.info('')", "docstring": "Deploy new blog to current directory", "id": "f1202:m0"} {"signature": "def new_post():", "body": "logger.info(new_post.__doc__)now = datetime.datetime.now()now_s = now.strftime('')filepath = join(Post.src_dir, now_s + src_ext)if not exists(Post.src_dir):logger.error(SourceDirectoryNotFound.__doc__)sys.exit(SourceDirectoryNotFound.exit_code)content = ('''''''')f = open(filepath, '')f.write(content)f.close()logger.success('' % filepath)", "docstring": "Touch a new post in src/", "id": "f1202:m1"} {"signature": "def clean():", "body": "logger.info(clean.__doc__)paths = ['', '', '']call(['', ''] + paths)logger.success('')", "docstring": "Clean htmls rux built: `rm -rf post page index.html`", "id": "f1202:m2"} {"signature": "def run_server(self, port):", "body": "try:self.server = MultiThreadedHTTPServer(('', port), Handler)except socket.error as e: logger.error(str(e))sys.exit()logger.info(\"\"% port)try:self.server.serve_forever()except KeyboardInterrupt:logger.info(\"\")self.shutdown_server()", "docstring": "run a server binding to port", "id": "f1203:c2:m1"} {"signature": "def get_files_stat(self):", "body": "if not exists(Post.src_dir):logger.error(SourceDirectoryNotFound.__doc__)sys.exit(SourceDirectoryNotFound.exit_code)paths = []for fn in ls(Post.src_dir):if fn.endswith(src_ext):paths.append(join(Post.src_dir, fn))if exists(config.filepath):paths.append(config.filepath)files = dict((p, stat(p).st_mtime) for p in paths)return files", "docstring": "get source files' update time", "id": "f1203:c2:m2"} {"signature": "def watch_files(self):", "body": "try:while :sleep() try:files_stat = self.get_files_stat()except SystemExit:logger.error(\"\")self.shutdown_server()if self.files_stat != files_stat:logger.info(\"\")try:generator.re_generate()global _root_root = generator.rootexcept SystemExit: logger.error(\"\")self.shutdown_server()self.files_stat = files_stat except KeyboardInterrupt:logger.info(\"\")self.shutdown_watcher()", "docstring": "watch files for changes, if changed, rebuild blog. this thread\n will quit if the main process ends", "id": "f1203:c2:m3"} {"signature": "def run(self, port):", "body": "self.watcher.start()self.run_server(port)", "docstring": "start web server and watcher", "id": "f1203:c2:m4"} {"signature": "def shutdown_server(self):", "body": "self.server.shutdown()self.server.socket.close()", "docstring": "shut down the web server", "id": "f1203:c2:m5"} {"signature": "def shutdown_watcher(self):", "body": "self.watcher.join()", "docstring": "shut down the watcher thread", "id": "f1203:c2:m6"} {"signature": "def render_to(path, template, **data):", "body": "try:renderer.render_to(path, template, **data)except JinjaTemplateNotFound as e:logger.error(e.__doc__ + '' % template)sys.exit(e.exit_code)", "docstring": "shortcut to render data with `template` and then write to `path`.\n Just add exception catch to `renderer.render_to`", "id": "f1206:m0"} {"signature": "def initialize(self):", "body": "try:conf = config.parse()except ConfigSyntaxError as e:logger.error(e.__doc__)sys.exit(e.exit_code)update_nested_dict(self.config, conf)self.blog.__dict__.update(self.config[''])self.author.__dict__.update(self.config[''])self.root = self.config['']templates = join(self.blog.theme, '') jinja2_global_data = {'': self.blog,'': self.author,'': self.config,'': self.root}renderer.initialize(templates, jinja2_global_data)logger.success('')", "docstring": "Initialize configuration and renderer environment", "id": "f1206:c0:m2"} {"signature": "def join(*p):", "body": "return os.path.normpath(os.path.join(*p))", "docstring": "return normpath version of path.join", "id": "f1207:m0"} {"signature": "def chunks(lst, number):", "body": "lst_len = len(lst)for i in xrange(, lst_len, number):yield lst[i: i+number]", "docstring": "A generator, split list `lst` into `number` equal size parts.\nusage::\n\n >>> parts = chunks(range(8),3)\n >>> parts\n \n >>> list(parts)\n [[0, 1, 2], [3, 4, 5], [6, 7]]", "id": "f1207:m1"} {"signature": "def update_nested_dict(a, b):", "body": "for k, v in b.iteritems():if isinstance(v, dict):d = a.setdefault(k, {})update_nested_dict(d, v)else:a[k] = vreturn a", "docstring": "update nested dict `a` with another dict b.\nusage::\n\n >>> a = {'x' : { 'y': 1}}\n >>> b = {'x' : {'z':2, 'y':3}, 'w': 4}\n >>> update_nested_dict(a,b)\n {'x': {'y': 3, 'z': 2}, 'w': 4}", "id": "f1207:m2"} {"signature": "def mkdir_p(path):", "body": "try:os.makedirs(path)except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path):passelse:raise", "docstring": "mkdir -p\n Note: comes from stackoverflow", "id": "f1207:m3"} {"signature": "def parse(src):", "body": "rt = libparser.parse(byref(post), src)return (rt,string_at(post.title, post.tsz),string_at(post.tpic, post.tpsz),post.body)", "docstring": "Note: src should be ascii string", "id": "f1208:m0"} {"signature": "@propertydef gravatar_id(self):", "body": "return md5(self.email).hexdigest()", "docstring": "it's md5(author.email), author's gravatar_id", "id": "f1209:c1:m1"} {"signature": "def render(template, **data):", "body": "try:return renderer.render(template, **data)except JinjaTemplateNotFound as e:logger.error(e.__doc__ + '' % template)sys.exit(e.exit_code)", "docstring": "shortcut to render data with `template`. Just add exception\n catch to `renderer.render`", "id": "f1210:m0"} {"signature": "def replace_relative_url_to_absolute(self, content):", "body": "p = os.path.join(os.getcwd(), '', '')return content.replace('', p)", "docstring": "Replace '../' leaded url with absolute uri.", "id": "f1210:c0:m3"} {"signature": "def load_fixture(filename):", "body": "path = os.path.join(os.path.dirname(__file__), \"\", filename)with open(path) as json_data:return json.load(json_data)", "docstring": "Load some fixture JSON", "id": "f1216:m0"} {"signature": "def round_half(number):", "body": "return Decimal(number).quantize(Decimal(\"\"), rounding=ROUND_HALF_UP)", "docstring": "Python's round() function behaves differently in Python 2 and 3\nThis method makes it consistent.\n\n:param number: The number to round", "id": "f1219:m0"} {"signature": "def fahrenheit_to_celsius(fahrenheit):", "body": "return int(round_half((fahrenheit - ) / ))", "docstring": "Convert Fahrenheit to Celsius\n\n:param fahrenheit: The temperature to convert to Celsius", "id": "f1219:m1"} {"signature": "def fahrenheit_to_nuheat(fahrenheit):", "body": "return int(round_half(((fahrenheit - ) * ) + ))", "docstring": "Convert Fahrenheit to a temperature value that NuHeat understands\nFormula f(x) = ((x - 33) * 56) + 33\n\n:param fahrenheit: The temperature to convert to NuHeat", "id": "f1219:m2"} {"signature": "def celsius_to_fahrenheit(celsius):", "body": "return int(round_half(celsius * + ))", "docstring": "Convert Celsius to Fahrenheit\n\n:param celsius: The temperature to convert to Fahrenheit", "id": "f1219:m3"} {"signature": "def celsius_to_nuheat(celsius):", "body": "fahrenheit = celsius_to_fahrenheit(celsius)return int(round_half(((fahrenheit - ) * ) + ))", "docstring": "Convert Celsius to a temperature value that NuHeat understands\nFormula f(x) = ((x - 33) * 56) + 33\n\n:param celsius: The temperature to convert to NuHeat", "id": "f1219:m4"} {"signature": "def nuheat_to_fahrenheit(nuheat_temperature):", "body": "return int(round_half(((nuheat_temperature - ) / ) + ))", "docstring": "Convert the NuHeat temp value to Fahrenheit\nFormula f(x) = ((x - 33) / 56) + 33\n\n:param nuheat_temperature: The temperature to convert to Fahrenheit", "id": "f1219:m5"} {"signature": "def nuheat_to_celsius(nuheat_temperature):", "body": "fahrenheit = nuheat_to_fahrenheit(nuheat_temperature)return fahrenheit_to_celsius(fahrenheit)", "docstring": "Convert the NuHeat temp value to Celsius\n\n:param nuheat_temperature: The temperature to convert to Celsius", "id": "f1219:m6"} {"signature": "def __init__(self, nuheat_session, serial_number):", "body": "self._session = nuheat_sessionself.serial_number = serial_numberself.get_data()", "docstring": "Initialize a local Thermostat object with the data returned from NuHeat", "id": "f1220:c0:m0"} {"signature": "@propertydef fahrenheit(self):", "body": "if not self.temperature:return Nonereturn nuheat_to_fahrenheit(self.temperature)", "docstring": "Return the current temperature in Fahrenheit", "id": "f1220:c0:m2"} {"signature": "@propertydef celsius(self):", "body": "if not self.temperature:return Nonereturn nuheat_to_celsius(self.temperature)", "docstring": "Return the current temperature in Celsius", "id": "f1220:c0:m3"} {"signature": "@propertydef min_fahrenheit(self):", "body": "if not self.min_temperature:return Nonereturn nuheat_to_fahrenheit(self.min_temperature)", "docstring": "Return the thermostat's minimum temperature in Fahrenheit", "id": "f1220:c0:m4"} {"signature": "@propertydef min_celsius(self):", "body": "if not self.min_temperature:return Nonereturn nuheat_to_celsius(self.min_temperature)", "docstring": "Return the thermostat's minimum temperature in Celsius", "id": "f1220:c0:m5"} {"signature": "@propertydef max_fahrenheit(self):", "body": "if not self.max_temperature:return Nonereturn nuheat_to_fahrenheit(self.max_temperature)", "docstring": "Return the thermostat's maximum temperature in Fahrenheit", "id": "f1220:c0:m6"} {"signature": "@propertydef max_celsius(self):", "body": "if not self.max_temperature:return Nonereturn nuheat_to_celsius(self.max_temperature)", "docstring": "Return the thermostat's maximum temperature in Celsius", "id": "f1220:c0:m7"} {"signature": "@propertydef target_fahrenheit(self):", "body": "if not self.target_temperature:return Nonereturn nuheat_to_fahrenheit(self.target_temperature)", "docstring": "Return the current target temperature in Fahrenheit", "id": "f1220:c0:m8"} {"signature": "@propertydef target_celsius(self):", "body": "if not self.target_temperature:return Nonereturn nuheat_to_celsius(self.target_temperature)", "docstring": "Return the current target temperature in Celsius", "id": "f1220:c0:m9"} {"signature": "@target_fahrenheit.setterdef target_fahrenheit(self, fahrenheit):", "body": "self.set_target_fahrenheit(fahrenheit)", "docstring": "Helper to set and HOLD the target temperature to the desired fahrenheit\n\n:param fahrenheit: The desired temperature in F", "id": "f1220:c0:m10"} {"signature": "@target_celsius.setterdef target_celsius(self, celsius):", "body": "self.set_target_celsius(celsius)", "docstring": "Helper to set and HOLD the target temperature to the desired fahrenheit\n\n:param celsius: The desired temperature in C", "id": "f1220:c0:m11"} {"signature": "def get_data(self):", "body": "params = {\"\": self.serial_number}data = self._session.request(config.THERMOSTAT_URL, params=params)self._data = dataself.heating = data.get(\"\")self.online = data.get(\"\")self.room = data.get(\"\")self.serial_number = data.get(\"\")self.temperature = data.get(\"\")self.min_temperature = data.get(\"\")self.max_temperature = data.get(\"\")self.target_temperature = data.get(\"\")self._schedule_mode = data.get(\"\")", "docstring": "Fetch/refresh the current instance's data from the NuHeat API", "id": "f1220:c0:m12"} {"signature": "@propertydef schedule_mode(self):", "body": "return self._schedule_mode", "docstring": "Return the mode that the thermostat is currently using", "id": "f1220:c0:m13"} {"signature": "@schedule_mode.setterdef schedule_mode(self, mode):", "body": "modes = [config.SCHEDULE_RUN, config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD]if mode not in modes:raise Exception(\"\".format(modes))self.set_data({\"\": mode})", "docstring": "Set the thermostat mode\n\n:param mode: The desired mode integer value.\n Auto = 1\n Temporary hold = 2\n Permanent hold = 3", "id": "f1220:c0:m14"} {"signature": "def resume_schedule(self):", "body": "self.schedule_mode = config.SCHEDULE_RUN", "docstring": "A convenience method to tell NuHeat to resume its programmed schedule", "id": "f1220:c0:m15"} {"signature": "def set_target_fahrenheit(self, fahrenheit, mode=config.SCHEDULE_HOLD):", "body": "temperature = fahrenheit_to_nuheat(fahrenheit)self.set_target_temperature(temperature, mode)", "docstring": "Set the target temperature to the desired fahrenheit, with more granular control of the\nhold mode\n\n:param fahrenheit: The desired temperature in F\n:param mode: The desired mode to operate in", "id": "f1220:c0:m16"} {"signature": "def set_target_celsius(self, celsius, mode=config.SCHEDULE_HOLD):", "body": "temperature = celsius_to_nuheat(celsius)self.set_target_temperature(temperature, mode)", "docstring": "Set the target temperature to the desired celsius, with more granular control of the hold\nmode\n\n:param celsius: The desired temperature in C\n:param mode: The desired mode to operate in", "id": "f1220:c0:m17"} {"signature": "def set_target_temperature(self, temperature, mode=config.SCHEDULE_HOLD):", "body": "if temperature < self.min_temperature:temperature = self.min_temperatureif temperature > self.max_temperature:temperature = self.max_temperaturemodes = [config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD]if mode not in modes:raise Exception(\"\".format(modes))self.set_data({\"\": temperature,\"\": mode})", "docstring": "Updates the target temperature on the NuHeat API\n\n:param temperature: The desired temperature in NuHeat format\n:param permanent: Permanently hold the temperature. If set to False, the schedule will\n resume at the next programmed event", "id": "f1220:c0:m18"} {"signature": "def set_data(self, post_data):", "body": "params = {\"\": self.serial_number}self._session.request(config.THERMOSTAT_URL, method=\"\", data=post_data, params=params)", "docstring": "Update (patch) the current instance's data on the NuHeat API", "id": "f1220:c0:m19"} {"signature": "def __init__(self, username, password, session_id=None):", "body": "self.username = usernameself.password = passwordself._session_id = session_id", "docstring": "Initialize a NuHeat API session\n\n:param username: NuHeat username\n:param username: NuHeat password\n:param session_id: A Session ID token to re-use to avoid re-authenticating", "id": "f1222:c0:m0"} {"signature": "def authenticate(self):", "body": "if self._session_id:_LOGGER.debug(\"\")return_LOGGER.debug(\"\")post_data = {\"\": self.username,\"\": self.password,\"\": \"\"}data = self.request(config.AUTH_URL, method=\"\", data=post_data)session_id = data.get(\"\")if not session_id:raise Exception(\"\")self._session_id = session_id", "docstring": "Authenticate against the NuHeat API", "id": "f1222:c0:m2"} {"signature": "def get_thermostat(self, serial_number):", "body": "return NuHeatThermostat(self, serial_number)", "docstring": "Get a thermostat object by serial number\n\n:param serial_number: The serial number / ID of the desired thermostat", "id": "f1222:c0:m3"} {"signature": "def request(self, url, method=\"\", data=None, params=None, retry=True):", "body": "headers = config.REQUEST_HEADERSif params and self._session_id:params[''] = self._session_idif method == \"\":response = requests.get(url, headers=headers, params=params)elif method == \"\":response = requests.post(url, headers=headers, params=params, data=data)if response.status_code == and retry:_LOGGER.warn(\"\")self._session_id = Noneself.authenticate()return self.request(url, method=method, data=data, params=params, retry=False)response.raise_for_status()try:return response.json()except ValueError:return response", "docstring": "Make a request to the NuHeat API\n\n:param url: The URL to request\n:param method: The type of request to make (GET, POST)\n:param data: Data to be sent along with POST requests\n:param params: Querystring parameters\n:param retry: Attempt to re-authenticate and retry request if necessary", "id": "f1222:c0:m4"} {"signature": "def a_view(request):", "body": "return HttpResponse('')", "docstring": "Regular unlocked view.", "id": "f1225:m0"} {"signature": "@lockdown()def locked_view(request):", "body": "return HttpResponse('')", "docstring": "View, locked by the default lockdown decorator.", "id": "f1225:m1"} {"signature": "@lockdown(passwords=('',))def overridden_locked_view(request):", "body": "return HttpResponse('')", "docstring": "View, locked by the decorator with a custom password.", "id": "f1225:m2"} {"signature": "@lockdown(url_exceptions=(r'',))def locked_view_with_exception(request):", "body": "return HttpResponse('')", "docstring": "View, locked by the decorator with url exceptions.", "id": "f1225:m3"} {"signature": "@lockdown(remote_addr_exceptions=[''])def locked_view_with_ip_exception(request):", "body": "return HttpResponse('')", "docstring": "View, locked except for the configured IP-address.", "id": "f1225:m4"} {"signature": "@lockdown(extra_context={'': ''})def locked_view_with_extra_context(request):", "body": "return HttpResponse('')", "docstring": "View, locked by the decorator with extra context.", "id": "f1225:m5"} {"signature": "@lockdown(until_date=YESTERDAY)def locked_view_until_yesterday(request):", "body": "return HttpResponse('')", "docstring": "View, locked till yesterday.", "id": "f1225:m6"} {"signature": "@lockdown(until_date=TOMORROW)def locked_view_until_tomorrow(request):", "body": "return HttpResponse('')", "docstring": "View, locked till tomorrow.", "id": "f1225:m7"} {"signature": "@lockdown(after_date=YESTERDAY)def locked_view_after_yesterday(request):", "body": "return HttpResponse('')", "docstring": "View, locked since yesterday.", "id": "f1225:m8"} {"signature": "@lockdown(after_date=TOMORROW)def locked_view_after_tomorrow(request):", "body": "return HttpResponse('')", "docstring": "View, locked starting from tomorrow.", "id": "f1225:m9"} {"signature": "@lockdown(until_date=YESTERDAY, after_date=TOMORROW)def locked_view_until_and_after(request):", "body": "return HttpResponse('')", "docstring": "View, only not looked between yesterday and tomorrow.", "id": "f1225:m10"} {"signature": "@lockdown(form=AuthForm, staff_only=False)def user_locked_view(request):", "body": "return HttpResponse('')", "docstring": "View, locked by the decorator with access for known users only.", "id": "f1225:m11"} {"signature": "@lockdown(form=AuthForm)def staff_locked_view(request):", "body": "return HttpResponse('')", "docstring": "View, locked by the decorator with access for staff users only.", "id": "f1225:m12"} {"signature": "@lockdown(form=AuthForm, superusers_only=True)def superuser_locked_view(request):", "body": "return HttpResponse('')", "docstring": "View, locked by the decorator with access for superusers only.", "id": "f1225:m13"} {"signature": "def clean_answer(self):", "body": "if self.cleaned_data[''] == :return raise forms.ValidationError('')", "docstring": "Clean the answer field, by checking its value.", "id": "f1226:c0:m0"} {"signature": "def setUp(self):", "body": "super(MiddlewareTests, self).setUp()self._old_middleware_classes = django_settings.MIDDLEWAREdjango_settings.MIDDLEWARE.append('',)", "docstring": "Additional setup for middleware tests.", "id": "f1229:c2:m0"} {"signature": "def tearDown(self):", "body": "django_settings.MIDDLEWARE = self._old_middleware_classessuper(MiddlewareTests, self).tearDown()", "docstring": "Additional tear down for middleware tests.", "id": "f1229:c2:m2"} {"signature": "def __init__(self, passwords=None, *args, **kwargs):", "body": "super(LockdownForm, self).__init__(*args, **kwargs)if passwords is None:passwords = settings.PASSWORDSself.valid_passwords = passwords", "docstring": "Initialize the form by setting the valid passwords.", "id": "f1230:c0:m0"} {"signature": "def clean_password(self):", "body": "value = self.cleaned_data.get('')if value not in self.valid_passwords:raise forms.ValidationError('')return value", "docstring": "Check that the password is valid.", "id": "f1230:c0:m1"} {"signature": "def generate_token(self):", "body": "return self.cleaned_data['']", "docstring": "Save the password as the authentication token.\n\n It's acceptable to store the password raw, as it is stored server-side\n in the user's session.", "id": "f1230:c0:m2"} {"signature": "def authenticate(self, token_value):", "body": "return token_value in self.valid_passwords", "docstring": "Check that the password is valid.\n\n This allows for revoking of a user's preview rights by changing the\n valid passwords.", "id": "f1230:c0:m3"} {"signature": "def show_form(self):", "body": "return bool(self.valid_passwords)", "docstring": "Show the form if there are any valid passwords.", "id": "f1230:c0:m4"} {"signature": "def __init__(self, staff_only=None, superusers_only=None, *args,**kwargs):", "body": "super(AuthForm, self).__init__(*args, **kwargs)if staff_only is None:staff_only = getattr(django_settings,'', True)if superusers_only is None:superusers_only = getattr(django_settings,'',False)self.staff_only = staff_onlyself.superusers_only = superusers_only", "docstring": "Initialize the form by setting permissions needed for access.", "id": "f1230:c1:m0"} {"signature": "def clean(self):", "body": "cleaned_data = super(AuthForm, self).clean()user = self.get_user()if self.staff_only and (not user or not user.is_staff):raise forms.ValidationError('')if self.superusers_only and (not user or not user.is_superuser):raise forms.ValidationError('')return cleaned_data", "docstring": "When receiving the filled out form, check for valid access.", "id": "f1230:c1:m1"} {"signature": "def generate_token(self):", "body": "user = self.get_user()return '' % (user.backend, user.pk)", "docstring": "Save the password as the authentication token.\n\n It's acceptable to store the password raw, as it is stored server-side\n in the user's session.", "id": "f1230:c1:m2"} {"signature": "def authenticate(self, token_value):", "body": "try:backend_path, user_id = token_value.split('', )except (ValueError, AttributeError):return Falsebackend = auth.load_backend(backend_path)return bool(backend.get_user(user_id))", "docstring": "Check that the password is valid.\n\n This allows for revoking of a user's preview rights by changing the\n valid passwords.", "id": "f1230:c1:m3"} {"signature": "def show_form(self):", "body": "return True", "docstring": "Determine if the form should be shown on locked pages.", "id": "f1230:c1:m4"} {"signature": "def compile_url_exceptions(url_exceptions):", "body": "return [re.compile(p) for p in url_exceptions]", "docstring": "Return a list of compiled regex objects, containing the url exceptions.\n\n All URLs in that list returned won't be considered as locked.", "id": "f1231:m0"} {"signature": "def get_lockdown_form(form_path):", "body": "if not form_path:raise ImproperlyConfigured('')form_path_list = form_path.split(\"\")new_module = \"\".join(form_path_list[:-])attr = form_path_list[-]try:mod = import_module(new_module)except (ImportError, ValueError):raise ImproperlyConfigured('''''' % new_module)try:form = getattr(mod, attr)except AttributeError:raise ImproperlyConfigured(''''% (new_module, attr))return form", "docstring": "Return a form class for a given string pointing to a lockdown form.", "id": "f1231:m1"} {"signature": "def __init__(self, get_response=None, form=None, until_date=None,after_date=None, logout_key=None, session_key=None,url_exceptions=None, view_exceptions=None,remote_addr_exceptions=None, trusted_proxies=None,extra_context=None, **form_kwargs):", "body": "if logout_key is None:logout_key = settings.LOGOUT_KEYif session_key is None:session_key = settings.SESSION_KEYself.get_response = get_responseself.form = formself.form_kwargs = form_kwargsself.until_date = until_dateself.after_date = after_dateself.logout_key = logout_keyself.session_key = session_keyself.url_exceptions = url_exceptionsself.remote_addr_exceptions = remote_addr_exceptionsself.trusted_proxies = trusted_proxiesself.extra_context = extra_context", "docstring": "Initialize the middleware, by setting the configuration values.", "id": "f1231:c0:m0"} {"signature": "def __call__(self, request):", "body": "response = self.process_request(request)if not response:response = self.get_response(request)return response", "docstring": "Handle calls to the class instance.", "id": "f1231:c0:m1"} {"signature": "def process_request(self, request):", "body": "try:session = request.sessionexcept AttributeError:raise ImproperlyConfigured('''')if settings.ENABLED is False:return Noneif self.remote_addr_exceptions:remote_addr_exceptions = self.remote_addr_exceptionselse:remote_addr_exceptions = settings.REMOTE_ADDR_EXCEPTIONSif remote_addr_exceptions:trusted_proxies = self.trusted_proxies or settings.TRUSTED_PROXIESremote_addr = request.META.get('')if remote_addr in remote_addr_exceptions:return Noneif remote_addr in trusted_proxies:x_forwarded_for = request.META.get('')if x_forwarded_for:remote_addr = x_forwarded_for.split('')[-].strip()if remote_addr in remote_addr_exceptions:return Noneif self.url_exceptions:url_exceptions = compile_url_exceptions(self.url_exceptions)else:url_exceptions = compile_url_exceptions(settings.URL_EXCEPTIONS)for pattern in url_exceptions:if pattern.search(request.path):return Nonetry:resolved_path = resolve(request.path)except Resolver404:passelse:if resolved_path.func in settings.VIEW_EXCEPTIONS:return Noneif self.until_date:until_date = self.until_dateelse:until_date = settings.UNTIL_DATEif self.after_date:after_date = self.after_dateelse:after_date = settings.AFTER_DATEif until_date or after_date:locked_date = Falseif until_date and datetime.datetime.now() < until_date:locked_date = Trueif after_date and datetime.datetime.now() > after_date:locked_date = Trueif not locked_date:return Noneform_data = request.POST if request.method == '' else Noneif self.form:form_class = self.formelse:form_class = get_lockdown_form(settings.FORM)form = form_class(data=form_data, **self.form_kwargs)authorized = Falsetoken = session.get(self.session_key)if hasattr(form, ''):if form.authenticate(token):authorized = Trueelif token is True:authorized = Trueif authorized and self.logout_key and self.logout_key in request.GET:if self.session_key in session:del session[self.session_key]querystring = request.GET.copy()del querystring[self.logout_key]return self.redirect(request)if authorized:return Noneif form.is_valid():if hasattr(form, ''):token = form.generate_token()else:token = Truesession[self.session_key] = tokenreturn self.redirect(request)page_data = {'': until_date, '': after_date}if not hasattr(form, '') or form.show_form():page_data[''] = formif self.extra_context:page_data.update(self.extra_context)return render(request, '', page_data)", "docstring": "Check if each request is allowed to access the current resource.", "id": "f1231:c0:m2"} {"signature": "def redirect(self, request):", "body": "url = request.pathquerystring = request.GET.copy()if self.logout_key and self.logout_key in request.GET:del querystring[self.logout_key]if querystring:url = '' % (url, querystring.urlencode())return HttpResponseRedirect(url)", "docstring": "Handle redirects properly.", "id": "f1231:c0:m3"} {"signature": "def lockdown(*args, **kwargs):", "body": "return decorator_from_middleware_with_args(LockdownMiddleware)(*args,**kwargs)", "docstring": "Define a decorator based on the LockdownMiddleware.\n\n This decorator takes the same arguments as the middleware, but allows a\n more granular locking than the middleware.", "id": "f1233:m0"} {"signature": "def _prompt_for_values(d):", "body": "for key, value in d.items():if isinstance(value, CommentedMap):_prompt_for_values(value)elif isinstance(value, list):for item in value:_prompt_for_values(item)else:typ = type(value)if isinstance(value, ScalarFloat): typ = floatnew_value = click.prompt(key, type=typ, default=value)d[key] = new_valuereturn d", "docstring": "Update the descriptive metadata interactively.\n\n Uses values entered by the user. Note that the function keeps recursing\n whenever a value is another ``CommentedMap`` or a ``list``. The\n function works as passing dictionaries and lists into a function edits\n the values in place.", "id": "f1243:m1"} {"signature": "@click.command()@click.option(\"\", \"\", is_flag=True, help=\"\")@click.argument(\"\")@click.argument(\"\", default=\"\")@click.option(\"\", \"\", type=click.Path(exists=True))def create(quiet, name, base_uri, symlink_path):", "body": "_validate_name(name)admin_metadata = dtoolcore.generate_admin_metadata(name)parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri)if parsed_base_uri.scheme == \"\":if symlink_path is None:raise click.UsageError(\"\") if symlink_path:base_uri = dtoolcore.utils.sanitise_uri(\"\" + parsed_base_uri.path)parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri)proto_dataset = dtoolcore.generate_proto_dataset(admin_metadata=admin_metadata,base_uri=dtoolcore.utils.urlunparse(parsed_base_uri),config_path=CONFIG_PATH)if symlink_path:symlink_abspath = os.path.abspath(symlink_path)proto_dataset._storage_broker.symlink_path = symlink_abspathtry:proto_dataset.create()except dtoolcore.storagebroker.StorageBrokerOSError as err:raise click.UsageError(str(err))proto_dataset.put_readme(\"\")if quiet:click.secho(proto_dataset.uri)else:click.secho(\"\", nl=False, fg=\"\")click.secho(proto_dataset.uri)click.secho(\"\")step = if parsed_base_uri.scheme != \"\":click.secho(\"\".format(step))click.secho(\"\".format(proto_dataset.uri),fg=\"\")if parsed_base_uri.scheme == \"\":data_path = proto_dataset._storage_broker._data_abspathclick.secho(\"\")click.secho(\"\".format(data_path),fg=\"\")step = step + click.secho(\"\".format(step))click.secho(\"\".format(proto_dataset.uri),fg=\"\")step = step + click.secho(\"\".format(step))click.secho(\"\".format(proto_dataset.uri), fg=\"\")", "docstring": "Create a proto dataset.", "id": "f1243:m3"} {"signature": "@click.command()@base_dataset_uri_argument@click.argument(\"\", default=\"\")def name(dataset_uri, new_name):", "body": "if new_name != \"\":_validate_name(new_name)try:dataset = dtoolcore.ProtoDataSet.from_uri(uri=dataset_uri,config_path=CONFIG_PATH)except dtoolcore.DtoolCoreTypeError:dataset = dtoolcore.DataSet.from_uri(uri=dataset_uri,config_path=CONFIG_PATH)dataset.update_name(new_name)admin_metadata = dtoolcore._admin_metadata_from_uri(uri=dataset_uri,config_path=CONFIG_PATH)click.secho(admin_metadata[\"\"])", "docstring": "Report / update the name of the dataset.\n\nIt is only possible to update the name of a proto dataset,\ni.e. a dataset that has not yet been frozen.", "id": "f1243:m4"} {"signature": "@click.group()def readme():", "body": "", "docstring": "Edit / show readme content.\n\n The readme content is descriptive metadata describing the dataset.", "id": "f1243:m5"} {"signature": "@readme.command()@proto_dataset_uri_argumentdef interactive(proto_dataset_uri):", "body": "proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri=proto_dataset_uri,config_path=CONFIG_PATH)readme_template = _get_readme_template()yaml = YAML()yaml.explicit_start = Trueyaml.indent(mapping=, sequence=, offset=)descriptive_metadata = yaml.load(readme_template)descriptive_metadata = _prompt_for_values(descriptive_metadata)stream = StringIO()yaml.dump(descriptive_metadata, stream)proto_dataset.put_readme(stream.getvalue())click.secho(\"\", fg=\"\")click.secho(\"\")click.secho(\"\".format(proto_dataset_uri),fg=\"\")", "docstring": "Interactive prompting to populate the readme.", "id": "f1243:m6"} {"signature": "@readme.command()@base_dataset_uri_argumentdef edit(dataset_uri):", "body": "try:dataset = dtoolcore.ProtoDataSet.from_uri(uri=dataset_uri,config_path=CONFIG_PATH)except dtoolcore.DtoolCoreTypeError:dataset = dtoolcore.DataSet.from_uri(uri=dataset_uri,config_path=CONFIG_PATH)readme_content = dataset.get_readme_content()try:readme_content = unicode(readme_content, \"\")except NameError:passedited_content = click.edit(readme_content)if edited_content is not None:_validate_and_put_readme(dataset, edited_content)click.secho(\"\", nl=False, fg=\"\")else:click.secho(\"\", nl=False, fg=\"\")click.secho(dataset_uri)", "docstring": "Default editor updating of readme content.", "id": "f1243:m8"} {"signature": "@readme.command()@base_dataset_uri_argumentdef show(dataset_uri):", "body": "try:dataset = dtoolcore.ProtoDataSet.from_uri(uri=dataset_uri,config_path=CONFIG_PATH)except dtoolcore.DtoolCoreTypeError:dataset = dtoolcore.DataSet.from_uri(uri=dataset_uri,config_path=CONFIG_PATH)readme_content = dataset.get_readme_content()click.secho(readme_content)", "docstring": "Show the descriptive metadata in the readme.", "id": "f1243:m9"} {"signature": "@readme.command()@proto_dataset_uri_argument@click.argument('', type=click.File(''))def write(proto_dataset_uri, input):", "body": "proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri=proto_dataset_uri)_validate_and_put_readme(proto_dataset, input.read())", "docstring": "Use YAML from a file or stdin to populate the readme.\n\n To stream content from stdin use \"-\", e.g.\n\n echo \"desc: my data\" | dtool readme write -", "id": "f1243:m10"} {"signature": "@click.group()def add():", "body": "", "docstring": "Add items and item metadata to a proto dataset.", "id": "f1243:m11"} {"signature": "@add.command()@click.argument(\"\", type=click.Path(exists=True))@proto_dataset_uri_argument@click.argument(\"\", default=\"\")def item(proto_dataset_uri, input_file, relpath_in_dataset):", "body": "proto_dataset = dtoolcore.ProtoDataSet.from_uri(proto_dataset_uri,config_path=CONFIG_PATH)if relpath_in_dataset == \"\":relpath_in_dataset = os.path.basename(input_file)proto_dataset.put_item(input_file, relpath_in_dataset)", "docstring": "Add a file to the proto dataset.", "id": "f1243:m12"} {"signature": "@add.command()@proto_dataset_uri_argument@click.argument(\"\")@click.argument(\"\")@click.argument(\"\")def metadata(proto_dataset_uri, relpath_in_dataset, key, value):", "body": "proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri=proto_dataset_uri,config_path=CONFIG_PATH)proto_dataset.add_item_metadata(handle=relpath_in_dataset,key=key,value=value)", "docstring": "Add metadata to a file in the proto dataset.", "id": "f1243:m13"} {"signature": "@click.command()@proto_dataset_uri_argumentdef freeze(proto_dataset_uri):", "body": "proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri=proto_dataset_uri,config_path=CONFIG_PATH)num_items = len(list(proto_dataset._identifiers()))max_files_limit = int(dtoolcore.utils.get_config_value(\"\",CONFIG_PATH,))assert isinstance(max_files_limit, int)if num_items > max_files_limit:click.secho(\"\".format(num_items,max_files_limit),fg=\"\")click.secho(\"\")click.secho(\"\")click.secho(\"\")click.secho(\"\")sys.exit()handles = [h for h in proto_dataset._storage_broker.iter_item_handles()]for h in handles:if not valid_handle(h):click.secho(\"\".format(h),fg=\"\")click.secho(\"\")click.secho(\"\")sys.exit()with click.progressbar(length=len(list(proto_dataset._identifiers())),label=\"\") as progressbar:try:proto_dataset.freeze(progressbar=progressbar)except dtoolcore.storagebroker.DiskStorageBrokerValidationWarning as e:click.secho(\"\")click.secho(str(e), fg=\"\", nl=False)sys.exit()click.secho(\"\", nl=False, fg=\"\")click.secho(proto_dataset_uri)", "docstring": "Convert a proto dataset into a dataset.\n\n This step is carried out after all files have been added to the dataset.\n Freezing a dataset finalizes it with a stamp marking it as frozen.", "id": "f1243:m14"} {"signature": "@click.command()@click.option(\"\", is_flag=True, help=\"\")@click.option(\"\", \"\", is_flag=True, help=\"\")@dataset_uri_argument@click.argument(\"\")def copy(resume, quiet, dataset_uri, dest_base_uri):", "body": "click.secho(\"\",fg=\"\",err=True)click.secho(\"\",fg=\"\",err=True)_copy(resume, quiet, dataset_uri, dest_base_uri)", "docstring": "DEPRECATED: Copy a dataset to a different location.", "id": "f1243:m16"} {"signature": "@click.command()@click.option(\"\", is_flag=True, help=\"\")@click.option(\"\", \"\", is_flag=True, help=\"\")@dataset_uri_argument@click.argument(\"\")def cp(resume, quiet, dataset_uri, dest_base_uri):", "body": "_copy(resume, quiet, dataset_uri, dest_base_uri)", "docstring": "Copy a dataset to a different location.", "id": "f1243:m17"} {"signature": "@click.command()@click.option(\"\", \"\", is_flag=True, help=\"\")@dataset_uri_argumentdef publish(quiet, dataset_uri):", "body": "access_uri = http_publish(dataset_uri)if not quiet:click.secho(\"\", nl=False, fg=\"\")click.secho(access_uri)", "docstring": "Enable HTTP access to a dataset.\n\n This only works on datasets in some systems. For example, datasets stored\n in AWS S3 object storage and Microsoft Azure Storage can be published as\n datasets accessible over HTTP. A published dataset is world readable.", "id": "f1245:m0"} {"signature": "def valid_handle(handle):", "body": "if handle.find(\"\") != -:return Falsereturn True", "docstring": "Return false if the handle is invalid.\n\n For example if the handle contains a newline.", "id": "f1246:m0"} {"signature": "def logsigmoid(a):", "body": "return -tf.nn.softplus(-a)", "docstring": "Equivalent to tf.log(tf.sigmoid(a))", "id": "f1258:m0"} {"signature": "def apply_stats(self, statsUpdates):", "body": "def updateAccumStats():if self._full_stats_init:return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff= / self._stats_accum_iter)), tf.no_op)else:return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff= / self._stats_accum_iter))def updateRunningAvgStats(statsUpdates, fac_iter=):return tf.group(*self._apply_stats(statsUpdates))if self._async_stats:update_stats = self._apply_stats(statsUpdates)queue = tf.FIFOQueue(, [item.dtype for item in update_stats], shapes=[item.get_shape() for item in update_stats])enqueue_op = queue.enqueue(update_stats)def dequeue_stats_op():return queue.dequeue()self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor()), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))else:update_stats_op = tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)self._update_stats_op = update_stats_opreturn update_stats_op", "docstring": "compute stats and update/apply the new stats to the running average", "id": "f1267:c0:m5"} {"signature": "def computeStatsEigen(self):", "body": "with tf.device(''):def removeNone(tensor_list):local_list = []for item in tensor_list:if item is not None:local_list.append(item)return local_listdef copyStats(var_list):print(\"\")redundant_stats = {}copied_list = []for item in var_list:if item is not None:if item not in redundant_stats:if self._use_float64:redundant_stats[item] = tf.cast(tf.identity(item), tf.float64)else:redundant_stats[item] = tf.identity(item)copied_list.append(redundant_stats[item])else:copied_list.append(None)return copied_liststats_eigen = self.stats_eigencomputedEigen = {}eigen_reverse_lookup = {}updateOps = []with tf.control_dependencies([]):for stats_var in stats_eigen:if stats_var not in computedEigen:eigens = tf.self_adjoint_eig(stats_var)e = eigens[]Q = eigens[]if self._use_float64:e = tf.cast(e, tf.float32)Q = tf.cast(Q, tf.float32)updateOps.append(e)updateOps.append(Q)computedEigen[stats_var] = {'': e, '': Q}eigen_reverse_lookup[e] = stats_eigen[stats_var]['']eigen_reverse_lookup[Q] = stats_eigen[stats_var]['']self.eigen_reverse_lookup = eigen_reverse_lookupself.eigen_update_list = updateOpsif KFAC_DEBUG:self.eigen_update_list = [item for item in updateOps]with tf.control_dependencies(updateOps):updateOps.append(tf.Print(tf.constant(), [tf.convert_to_tensor('')]))return updateOps", "docstring": "compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue", "id": "f1267:c0:m8"} {"signature": "def store_args(method):", "body": "argspec = inspect.getfullargspec(method)defaults = {}if argspec.defaults is not None:defaults = dict(zip(argspec.args[-len(argspec.defaults):], argspec.defaults))if argspec.kwonlydefaults is not None:defaults.update(argspec.kwonlydefaults)arg_names = argspec.args[:]@functools.wraps(method)def wrapper(*positional_args, **keyword_args):self = positional_args[]args = defaults.copy()for name, value in zip(arg_names, positional_args[:]):args[name] = valueargs.update(keyword_args)self.__dict__.update(args)return method(*positional_args, **keyword_args)return wrapper", "docstring": "Stores provided method args as instance attributes.", "id": "f1268:m0"} {"signature": "def import_function(spec):", "body": "mod_name, fn_name = spec.split('')module = importlib.import_module(mod_name)fn = getattr(module, fn_name)return fn", "docstring": "Import a function identified by a string like \"pkg.module:fn_name\".", "id": "f1268:m1"} {"signature": "def flatten_grads(var_list, grads):", "body": "return tf.concat([tf.reshape(grad, [U.numel(v)])for (v, grad) in zip(var_list, grads)], )", "docstring": "Flattens a variables and their gradients.", "id": "f1268:m2"} {"signature": "def nn(input, layers_sizes, reuse=None, flatten=False, name=\"\"):", "body": "for i, size in enumerate(layers_sizes):activation = tf.nn.relu if i < len(layers_sizes) - else Noneinput = tf.layers.dense(inputs=input,units=size,kernel_initializer=tf.contrib.layers.xavier_initializer(),reuse=reuse,name=name + '' + str(i))if activation:input = activation(input)if flatten:assert layers_sizes[-] == input = tf.reshape(input, [-])return input", "docstring": "Creates a simple neural network", "id": "f1268:m3"} {"signature": "def mpi_fork(n, extra_mpi_args=[]):", "body": "if n <= :return \"\"if os.getenv(\"\") is None:env = os.environ.copy()env.update(MKL_NUM_THREADS=\"\",OMP_NUM_THREADS=\"\",IN_MPI=\"\")args = [\"\", \"\", str(n)] +extra_mpi_args +[sys.executable]args += sys.argvsubprocess.check_call(args, env=env)return \"\"else:install_mpi_excepthook()return \"\"", "docstring": "Re-launches the current script with workers\n Returns \"parent\" for original parent, \"child\" for MPI children", "id": "f1268:m5"} {"signature": "def convert_episode_to_batch_major(episode):", "body": "episode_batch = {}for key in episode.keys():val = np.array(episode[key]).copy()episode_batch[key] = val.swapaxes(, )return episode_batch", "docstring": "Converts an episode to have the batch dimension in the major (first)\n dimension.", "id": "f1268:m6"} {"signature": "def transitions_in_episode_batch(episode_batch):", "body": "shape = episode_batch[''].shapereturn shape[] * shape[]", "docstring": "Number of transitions in a given episode batch.", "id": "f1268:m7"} {"signature": "def reshape_for_broadcasting(source, target):", "body": "dim = len(target.get_shape())shape = ([] * (dim - )) + [-]return tf.reshape(tf.cast(source, target.dtype), shape)", "docstring": "Reshapes a tensor (source) to have the correct shape and dtype of the target\n before broadcasting it with MPI.", "id": "f1268:m8"} {"signature": "def cached_make_env(make_env):", "body": "if make_env not in CACHED_ENVS:env = make_env()CACHED_ENVS[make_env] = envreturn CACHED_ENVS[make_env]", "docstring": "Only creates a new environment from the provided function if one has not yet already been\ncreated. This is useful here because we need to infer certain properties of the env, e.g.\nits observation and action spaces, without any intend of actually using it.", "id": "f1269:m0"} {"signature": "def __init__(self, buffer_shapes, size_in_transitions, T, sample_transitions):", "body": "self.buffer_shapes = buffer_shapesself.size = size_in_transitions // Tself.T = Tself.sample_transitions = sample_transitionsself.buffers = {key: np.empty([self.size, *shape])for key, shape in buffer_shapes.items()}self.current_size = self.n_transitions_stored = self.lock = threading.Lock()", "docstring": "Creates a replay buffer.\n\n Args:\n buffer_shapes (dict of ints): the shape for all buffers that are used in the replay\n buffer\n size_in_transitions (int): the size of the buffer, measured in transitions\n T (int): the time horizon for episodes\n sample_transitions (function): a function that samples from the replay buffer", "id": "f1273:c0:m0"} {"signature": "def sample(self, batch_size):", "body": "buffers = {}with self.lock:assert self.current_size > for key in self.buffers.keys():buffers[key] = self.buffers[key][:self.current_size]buffers[''] = buffers[''][:, :, :]buffers[''] = buffers[''][:, :, :]transitions = self.sample_transitions(buffers, batch_size)for key in (['', '', ''] + list(self.buffers.keys())):assert key in transitions, \"\" % keyreturn transitions", "docstring": "Returns a dict {key: array(batch_size x shapes[key])}", "id": "f1273:c0:m2"} {"signature": "def store_episode(self, episode_batch):", "body": "batch_sizes = [len(episode_batch[key]) for key in episode_batch.keys()]assert np.all(np.array(batch_sizes) == batch_sizes[])batch_size = batch_sizes[]with self.lock:idxs = self._get_storage_idx(batch_size)for key in self.buffers.keys():self.buffers[key][idxs] = episode_batch[key]self.n_transitions_stored += batch_size * self.T", "docstring": "episode_batch: array(batch_size x (T or T+1) x dim_key)", "id": "f1273:c0:m3"} {"signature": "def __init__(self, size, eps=, default_clip_range=np.inf, sess=None):", "body": "self.size = sizeself.eps = epsself.default_clip_range = default_clip_rangeself.sess = sess if sess is not None else tf.get_default_session()self.local_sum = np.zeros(self.size, np.float32)self.local_sumsq = np.zeros(self.size, np.float32)self.local_count = np.zeros(, np.float32)self.sum_tf = tf.get_variable(initializer=tf.zeros_initializer(), shape=self.local_sum.shape, name='',trainable=False, dtype=tf.float32)self.sumsq_tf = tf.get_variable(initializer=tf.zeros_initializer(), shape=self.local_sumsq.shape, name='',trainable=False, dtype=tf.float32)self.count_tf = tf.get_variable(initializer=tf.ones_initializer(), shape=self.local_count.shape, name='',trainable=False, dtype=tf.float32)self.mean = tf.get_variable(initializer=tf.zeros_initializer(), shape=(self.size,), name='',trainable=False, dtype=tf.float32)self.std = tf.get_variable(initializer=tf.ones_initializer(), shape=(self.size,), name='',trainable=False, dtype=tf.float32)self.count_pl = tf.placeholder(name='', shape=(,), dtype=tf.float32)self.sum_pl = tf.placeholder(name='', shape=(self.size,), dtype=tf.float32)self.sumsq_pl = tf.placeholder(name='', shape=(self.size,), dtype=tf.float32)self.update_op = tf.group(self.count_tf.assign_add(self.count_pl),self.sum_tf.assign_add(self.sum_pl),self.sumsq_tf.assign_add(self.sumsq_pl))self.recompute_op = tf.group(tf.assign(self.mean, self.sum_tf / self.count_tf),tf.assign(self.std, tf.sqrt(tf.maximum(tf.square(self.eps),self.sumsq_tf / self.count_tf - tf.square(self.sum_tf / self.count_tf)))),)self.lock = threading.Lock()", "docstring": "A normalizer that ensures that observations are approximately distributed according to\n a standard Normal distribution (i.e. have mean zero and variance one).\n\n Args:\n size (int): the size of the observation to be normalized\n eps (float): a small constant that avoids underflows\n default_clip_range (float): normalized observations are clipped to be in\n [-default_clip_range, default_clip_range]\n sess (object): the TensorFlow session to be used", "id": "f1274:c0:m0"} {"signature": "@store_argsdef __init__(self, input_dims, buffer_size, hidden, layers, network_class, polyak, batch_size,Q_lr, pi_lr, norm_eps, norm_clip, max_u, action_l2, clip_obs, scope, T,rollout_batch_size, subtract_goals, relative_goals, clip_pos_returns, clip_return,bc_loss, q_filter, num_demo, demo_batch_size, prm_loss_weight, aux_loss_weight,sample_transitions, gamma, reuse=False, **kwargs):", "body": "if self.clip_return is None:self.clip_return = np.infself.create_actor_critic = import_function(self.network_class)input_shapes = dims_to_shapes(self.input_dims)self.dimo = self.input_dims['']self.dimg = self.input_dims['']self.dimu = self.input_dims['']stage_shapes = OrderedDict()for key in sorted(self.input_dims.keys()):if key.startswith(''):continuestage_shapes[key] = (None, *input_shapes[key])for key in ['', '']:stage_shapes[key + ''] = stage_shapes[key]stage_shapes[''] = (None,)self.stage_shapes = stage_shapeswith tf.variable_scope(self.scope):self.staging_tf = StagingArea(dtypes=[tf.float32 for _ in self.stage_shapes.keys()],shapes=list(self.stage_shapes.values()))self.buffer_ph_tf = [tf.placeholder(tf.float32, shape=shape) for shape in self.stage_shapes.values()]self.stage_op = self.staging_tf.put(self.buffer_ph_tf)self._create_network(reuse=reuse)buffer_shapes = {key: (self.T- if key != '' else self.T, *input_shapes[key])for key, val in input_shapes.items()}buffer_shapes[''] = (buffer_shapes[''][], self.dimg)buffer_shapes[''] = (self.T, self.dimg)buffer_size = (self.buffer_size // self.rollout_batch_size) * self.rollout_batch_sizeself.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)global DEMO_BUFFERDEMO_BUFFER = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)", "docstring": "Implementation of DDPG that is used in combination with Hindsight Experience Replay (HER).\n Added functionality to use demonstrations for training to Overcome exploration problem.\n\n Args:\n input_dims (dict of ints): dimensions for the observation (o), the goal (g), and the\n actions (u)\n buffer_size (int): number of transitions that are stored in the replay buffer\n hidden (int): number of units in the hidden layers\n layers (int): number of hidden layers\n network_class (str): the network class that should be used (e.g. 'baselines.her.ActorCritic')\n polyak (float): coefficient for Polyak-averaging of the target network\n batch_size (int): batch size for training\n Q_lr (float): learning rate for the Q (critic) network\n pi_lr (float): learning rate for the pi (actor) network\n norm_eps (float): a small value used in the normalizer to avoid numerical instabilities\n norm_clip (float): normalized inputs are clipped to be in [-norm_clip, norm_clip]\n max_u (float): maximum action magnitude, i.e. actions are in [-max_u, max_u]\n action_l2 (float): coefficient for L2 penalty on the actions\n clip_obs (float): clip observations before normalization to be in [-clip_obs, clip_obs]\n scope (str): the scope used for the TensorFlow graph\n T (int): the time horizon for rollouts\n rollout_batch_size (int): number of parallel rollouts per DDPG agent\n subtract_goals (function): function that subtracts goals from each other\n relative_goals (boolean): whether or not relative goals should be fed into the network\n clip_pos_returns (boolean): whether or not positive returns should be clipped\n clip_return (float): clip returns to be in [-clip_return, clip_return]\n sample_transitions (function) function that samples from the replay buffer\n gamma (float): gamma used for Q learning updates\n reuse (boolean): whether or not the networks should be reused\n bc_loss: whether or not the behavior cloning loss should be used as an auxilliary loss\n q_filter: whether or not a filter on the q value update should be used when training with demonstartions\n num_demo: Number of episodes in to be used in the demonstration buffer\n demo_batch_size: number of samples to be used from the demonstrations buffer, per mpi thread\n prm_loss_weight: Weight corresponding to the primary loss\n aux_loss_weight: Weight corresponding to the auxilliary loss also called the cloning loss", "id": "f1275:c0:m0"} {"signature": "def store_episode(self, episode_batch, update_stats=True):", "body": "self.buffer.store_episode(episode_batch)if update_stats:episode_batch[''] = episode_batch[''][:, :, :]episode_batch[''] = episode_batch[''][:, :, :]num_normalizing_transitions = transitions_in_episode_batch(episode_batch)transitions = self.sample_transitions(episode_batch, num_normalizing_transitions)o, g, ag = transitions[''], transitions[''], transitions['']transitions[''], transitions[''] = self._preprocess_og(o, ag, g)self.o_stats.update(transitions[''])self.g_stats.update(transitions[''])self.o_stats.recompute_stats()self.g_stats.recompute_stats()", "docstring": "episode_batch: array of batch_size x (T or T+1) x dim_key\n 'o' is of size T+1, others are of size T", "id": "f1275:c0:m6"} {"signature": "def __getstate__(self):", "body": "excluded_subnames = ['', '', '', '', '', '', '','', '', '', '', '','', '']state = {k: v for k, v in self.__dict__.items() if all([not subname in k for subname in excluded_subnames])}state[''] = self.buffer_sizestate[''] = self.sess.run([x for x in self._global_vars('') if '' not in x.name])return state", "docstring": "Our policies can be loaded from pkl, but after unpickling you cannot continue training.", "id": "f1275:c0:m21"} {"signature": "@store_argsdef __init__(self, inputs_tf, dimo, dimg, dimu, max_u, o_stats, g_stats, hidden, layers,**kwargs):", "body": "self.o_tf = inputs_tf['']self.g_tf = inputs_tf['']self.u_tf = inputs_tf['']o = self.o_stats.normalize(self.o_tf)g = self.g_stats.normalize(self.g_tf)input_pi = tf.concat(axis=, values=[o, g]) with tf.variable_scope(''):self.pi_tf = self.max_u * tf.tanh(nn(input_pi, [self.hidden] * self.layers + [self.dimu]))with tf.variable_scope(''):input_Q = tf.concat(axis=, values=[o, g, self.pi_tf / self.max_u])self.Q_pi_tf = nn(input_Q, [self.hidden] * self.layers + [])input_Q = tf.concat(axis=, values=[o, g, self.u_tf / self.max_u])self._input_Q = input_Q self.Q_tf = nn(input_Q, [self.hidden] * self.layers + [], reuse=True)", "docstring": "The actor-critic network and related training code.\n\n Args:\n inputs_tf (dict of tensors): all necessary inputs for the network: the\n observation (o), the goal (g), and the action (u)\n dimo (int): the dimension of the observations\n dimg (int): the dimension of the goals\n dimu (int): the dimension of the actions\n max_u (float): the maximum magnitude of actions; action outputs will be scaled\n accordingly\n o_stats (baselines.her.Normalizer): normalizer for observations\n g_stats (baselines.her.Normalizer): normalizer for goals\n hidden (int): number of hidden units that should be used in hidden layers\n layers (int): number of hidden layers", "id": "f1276:c0:m0"} {"signature": "@store_argsdef __init__(self, venv, policy, dims, logger, T, rollout_batch_size=,exploit=False, use_target_net=False, compute_Q=False, noise_eps=,random_eps=, history_len=, render=False, monitor=False, **kwargs):", "body": "assert self.T > self.info_keys = [key.replace('', '') for key in dims.keys() if key.startswith('')]self.success_history = deque(maxlen=history_len)self.Q_history = deque(maxlen=history_len)self.n_episodes = self.reset_all_rollouts()self.clear_history()", "docstring": "Rollout worker generates experience by interacting with one or many environments.\n\n Args:\n make_env (function): a factory function that creates a new instance of the environment\n when called\n policy (object): the policy that is used to act\n dims (dict of ints): the dimensions for observations (o), goals (g), and actions (u)\n logger (object): the logger that is used by the rollout worker\n rollout_batch_size (int): the number of parallel rollouts that should be used\n exploit (boolean): whether or not to exploit, i.e. to act optimally according to the\n current policy without any exploration\n use_target_net (boolean): whether or not to use the target net for rollouts\n compute_Q (boolean): whether or not to compute the Q values alongside the actions\n noise_eps (float): scale of the additive Gaussian noise\n random_eps (float): probability of selecting a completely random action\n history_len (int): length of history for statistics smoothing\n render (boolean): whether or not to render the rollouts", "id": "f1277:c0:m0"} {"signature": "def generate_rollouts(self):", "body": "self.reset_all_rollouts()o = np.empty((self.rollout_batch_size, self.dims['']), np.float32) ag = np.empty((self.rollout_batch_size, self.dims['']), np.float32) o[:] = self.initial_oag[:] = self.initial_agobs, achieved_goals, acts, goals, successes = [], [], [], [], []dones = []info_values = [np.empty((self.T - , self.rollout_batch_size, self.dims['' + key]), np.float32) for key in self.info_keys]Qs = []for t in range(self.T):policy_output = self.policy.get_actions(o, ag, self.g,compute_Q=self.compute_Q,noise_eps=self.noise_eps if not self.exploit else ,random_eps=self.random_eps if not self.exploit else ,use_target_net=self.use_target_net)if self.compute_Q:u, Q = policy_outputQs.append(Q)else:u = policy_outputif u.ndim == :u = u.reshape(, -)o_new = np.empty((self.rollout_batch_size, self.dims['']))ag_new = np.empty((self.rollout_batch_size, self.dims['']))success = np.zeros(self.rollout_batch_size)obs_dict_new, _, done, info = self.venv.step(u)o_new = obs_dict_new['']ag_new = obs_dict_new['']success = np.array([i.get('', ) for i in info])if any(done):breakfor i, info_dict in enumerate(info):for idx, key in enumerate(self.info_keys):info_values[idx][t, i] = info[i][key]if np.isnan(o_new).any():self.logger.warn('')self.reset_all_rollouts()return self.generate_rollouts()dones.append(done)obs.append(o.copy())achieved_goals.append(ag.copy())successes.append(success.copy())acts.append(u.copy())goals.append(self.g.copy())o[...] = o_newag[...] = ag_newobs.append(o.copy())achieved_goals.append(ag.copy())episode = dict(o=obs,u=acts,g=goals,ag=achieved_goals)for key, value in zip(self.info_keys, info_values):episode[''.format(key)] = valuesuccessful = np.array(successes)[-, :]assert successful.shape == (self.rollout_batch_size,)success_rate = np.mean(successful)self.success_history.append(success_rate)if self.compute_Q:self.Q_history.append(np.mean(Qs))self.n_episodes += self.rollout_batch_sizereturn convert_episode_to_batch_major(episode)", "docstring": "Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current\n policy acting on it accordingly.", "id": "f1277:c0:m2"} {"signature": "def clear_history(self):", "body": "self.success_history.clear()self.Q_history.clear()", "docstring": "Clears all histories that are used for statistics", "id": "f1277:c0:m3"} {"signature": "def save_policy(self, path):", "body": "with open(path, '') as f:pickle.dump(self.policy, f)", "docstring": "Pickles the current policy for later inspection.", "id": "f1277:c0:m6"} {"signature": "def logs(self, prefix=''):", "body": "logs = []logs += [('', np.mean(self.success_history))]if self.compute_Q:logs += [('', np.mean(self.Q_history))]logs += [('', self.n_episodes)]if prefix != '' and not prefix.endswith(''):return [(prefix + '' + key, val) for key, val in logs]else:return logs", "docstring": "Generates a dictionary that contains all collected statistics.", "id": "f1277:c0:m7"} {"signature": "def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):", "body": "if replay_strategy == '':future_p = - ( / ( + replay_k))else: future_p = def _sample_her_transitions(episode_batch, batch_size_in_transitions):\"\"\"\"\"\"T = episode_batch[''].shape[]rollout_batch_size = episode_batch[''].shape[]batch_size = batch_size_in_transitionsepisode_idxs = np.random.randint(, rollout_batch_size, batch_size)t_samples = np.random.randint(T, size=batch_size)transitions = {key: episode_batch[key][episode_idxs, t_samples].copy()for key in episode_batch.keys()}her_indexes = np.where(np.random.uniform(size=batch_size) < future_p)future_offset = np.random.uniform(size=batch_size) * (T - t_samples)future_offset = future_offset.astype(int)future_t = (t_samples + + future_offset)[her_indexes]future_ag = episode_batch[''][episode_idxs[her_indexes], future_t]transitions[''][her_indexes] = future_aginfo = {}for key, value in transitions.items():if key.startswith(''):info[key.replace('', '')] = valuereward_params = {k: transitions[k] for k in ['', '']}reward_params[''] = infotransitions[''] = reward_fun(**reward_params)transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[:])for k in transitions.keys()}assert(transitions[''].shape[] == batch_size_in_transitions)return transitionsreturn _sample_her_transitions", "docstring": "Creates a sample function that can be used for HER experience replay.\n\n Args:\n replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',\n regular DDPG experience replay is used\n replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times\n as many HER replays as regular replays are used)\n reward_fun (function): function to re-compute the reward with substituted goals", "id": "f1278:m0"} {"signature": "def __init__(self, size):", "body": "self._storage = []self._maxsize = sizeself._next_idx = ", "docstring": "Create Replay buffer.\n\n Parameters\n ----------\n size: int\n Max number of transitions to store in the buffer. When the buffer\n overflows the old memories are dropped.", "id": "f1281:c0:m0"} {"signature": "def sample(self, batch_size):", "body": "idxes = [random.randint(, len(self._storage) - ) for _ in range(batch_size)]return self._encode_sample(idxes)", "docstring": "Sample a batch of experiences.\n\n Parameters\n ----------\n batch_size: int\n How many transitions to sample.\n\n Returns\n -------\n obs_batch: np.array\n batch of observations\n act_batch: np.array\n batch of actions executed given obs_batch\n rew_batch: np.array\n rewards received as results of executing act_batch\n next_obs_batch: np.array\n next set of observations seen after executing act_batch\n done_mask: np.array\n done_mask[i] = 1 if executing act_batch[i] resulted in\n the end of an episode and 0 otherwise.", "id": "f1281:c0:m4"} {"signature": "def __init__(self, size, alpha):", "body": "super(PrioritizedReplayBuffer, self).__init__(size)assert alpha >= self._alpha = alphait_capacity = while it_capacity < size:it_capacity *= self._it_sum = SumSegmentTree(it_capacity)self._it_min = MinSegmentTree(it_capacity)self._max_priority = ", "docstring": "Create Prioritized Replay buffer.\n\n Parameters\n ----------\n size: int\n Max number of transitions to store in the buffer. When the buffer\n overflows the old memories are dropped.\n alpha: float\n how much prioritization is used\n (0 - no prioritization, 1 - full prioritization)\n\n See Also\n --------\n ReplayBuffer.__init__", "id": "f1281:c1:m0"} {"signature": "def add(self, *args, **kwargs):", "body": "idx = self._next_idxsuper().add(*args, **kwargs)self._it_sum[idx] = self._max_priority ** self._alphaself._it_min[idx] = self._max_priority ** self._alpha", "docstring": "See ReplayBuffer.store_effect", "id": "f1281:c1:m1"} {"signature": "def sample(self, batch_size, beta):", "body": "assert beta > idxes = self._sample_proportional(batch_size)weights = []p_min = self._it_min.min() / self._it_sum.sum()max_weight = (p_min * len(self._storage)) ** (-beta)for idx in idxes:p_sample = self._it_sum[idx] / self._it_sum.sum()weight = (p_sample * len(self._storage)) ** (-beta)weights.append(weight / max_weight)weights = np.array(weights)encoded_sample = self._encode_sample(idxes)return tuple(list(encoded_sample) + [weights, idxes])", "docstring": "Sample a batch of experiences.\n\n compared to ReplayBuffer.sample\n it also returns importance weights and idxes\n of sampled experiences.\n\n\n Parameters\n ----------\n batch_size: int\n How many transitions to sample.\n beta: float\n To what degree to use importance weights\n (0 - no corrections, 1 - full correction)\n\n Returns\n -------\n obs_batch: np.array\n batch of observations\n act_batch: np.array\n batch of actions executed given obs_batch\n rew_batch: np.array\n rewards received as results of executing act_batch\n next_obs_batch: np.array\n next set of observations seen after executing act_batch\n done_mask: np.array\n done_mask[i] = 1 if executing act_batch[i] resulted in\n the end of an episode and 0 otherwise.\n weights: np.array\n Array of shape (batch_size,) and dtype np.float32\n denoting importance weight of each sampled transition\n idxes: np.array\n Array of shape (batch_size,) and dtype np.int32\n idexes in buffer of sampled experiences", "id": "f1281:c1:m3"} {"signature": "def update_priorities(self, idxes, priorities):", "body": "assert len(idxes) == len(priorities)for idx, priority in zip(idxes, priorities):assert priority > assert <= idx < len(self._storage)self._it_sum[idx] = priority ** self._alphaself._it_min[idx] = priority ** self._alphaself._max_priority = max(self._max_priority, priority)", "docstring": "Update priorities of sampled transitions.\n\n sets priority of transition at index idxes[i] in buffer\n to priorities[i].\n\n Parameters\n ----------\n idxes: [int]\n List of idxes of sampled transitions\n priorities: [float]\n List of updated priorities corresponding to\n transitions at the sampled idxes denoted by\n variable `idxes`.", "id": "f1281:c1:m4"} {"signature": "def load_act(path):", "body": "return ActWrapper.load_act(path)", "docstring": "Load act function that was returned by learn function.\n\n Parameters\n ----------\n path: str\n path to the act function pickle\n\n Returns\n -------\n act: ActWrapper\n function that takes a batch of observations\n and returns actions.", "id": "f1283:m0"} {"signature": "def learn(env,network,seed=None,lr=,total_timesteps=,buffer_size=,exploration_fraction=,exploration_final_eps=,train_freq=,batch_size=,print_freq=,checkpoint_freq=,checkpoint_path=None,learning_starts=,gamma=,target_network_update_freq=,prioritized_replay=False,prioritized_replay_alpha=,prioritized_replay_beta0=,prioritized_replay_beta_iters=None,prioritized_replay_eps=,param_noise=False,callback=None,load_path=None,**network_kwargs):", "body": "sess = get_session()set_global_seeds(seed)q_func = build_q_func(network, **network_kwargs)observation_space = env.observation_spacedef make_obs_ph(name):return ObservationInput(observation_space, name=name)act, train, update_target, debug = deepq.build_train(make_obs_ph=make_obs_ph,q_func=q_func,num_actions=env.action_space.n,optimizer=tf.train.AdamOptimizer(learning_rate=lr),gamma=gamma,grad_norm_clipping=,param_noise=param_noise)act_params = {'': make_obs_ph,'': q_func,'': env.action_space.n,}act = ActWrapper(act, act_params)if prioritized_replay:replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)if prioritized_replay_beta_iters is None:prioritized_replay_beta_iters = total_timestepsbeta_schedule = LinearSchedule(prioritized_replay_beta_iters,initial_p=prioritized_replay_beta0,final_p=)else:replay_buffer = ReplayBuffer(buffer_size)beta_schedule = Noneexploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),initial_p=,final_p=exploration_final_eps)U.initialize()update_target()episode_rewards = []saved_mean_reward = Noneobs = env.reset()reset = Truewith tempfile.TemporaryDirectory() as td:td = checkpoint_path or tdmodel_file = os.path.join(td, \"\")model_saved = Falseif tf.train.latest_checkpoint(td) is not None:load_variables(model_file)logger.log(''.format(model_file))model_saved = Trueelif load_path is not None:load_variables(load_path)logger.log(''.format(load_path))for t in range(total_timesteps):if callback is not None:if callback(locals(), globals()):breakkwargs = {}if not param_noise:update_eps = exploration.value(t)update_param_noise_threshold = else:update_eps = update_param_noise_threshold = -np.log( - exploration.value(t) + exploration.value(t) / float(env.action_space.n))kwargs[''] = resetkwargs[''] = update_param_noise_thresholdkwargs[''] = Trueaction = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[]env_action = actionreset = Falsenew_obs, rew, done, _ = env.step(env_action)replay_buffer.add(obs, action, rew, new_obs, float(done))obs = new_obsepisode_rewards[-] += rewif done:obs = env.reset()episode_rewards.append()reset = Trueif t > learning_starts and t % train_freq == :if prioritized_replay:experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experienceelse:obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)weights, batch_idxes = np.ones_like(rewards), Nonetd_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)if prioritized_replay:new_priorities = np.abs(td_errors) + prioritized_replay_epsreplay_buffer.update_priorities(batch_idxes, new_priorities)if t > learning_starts and t % target_network_update_freq == :update_target()mean_100ep_reward = round(np.mean(episode_rewards[-:-]), )num_episodes = len(episode_rewards)if done and print_freq is not None and len(episode_rewards) % print_freq == :logger.record_tabular(\"\", t)logger.record_tabular(\"\", num_episodes)logger.record_tabular(\"\", mean_100ep_reward)logger.record_tabular(\"\", int( * exploration.value(t)))logger.dump_tabular()if (checkpoint_freq is not None and t > learning_starts andnum_episodes > and t % checkpoint_freq == ):if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:if print_freq is not None:logger.log(\"\".format(saved_mean_reward, mean_100ep_reward))save_variables(model_file)model_saved = Truesaved_mean_reward = mean_100ep_rewardif model_saved:if print_freq is not None:logger.log(\"\".format(saved_mean_reward))load_variables(model_file)return act", "docstring": "Train a deepq model.\n\n Parameters\n -------\n env: gym.Env\n environment to train on\n network: string or a function\n neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models\n (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which\n will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)\n seed: int or None\n prng seed. The runs with the same seed \"should\" give the same results. If None, no seeding is used.\n lr: float\n learning rate for adam optimizer\n total_timesteps: int\n number of env steps to optimizer for\n buffer_size: int\n size of the replay buffer\n exploration_fraction: float\n fraction of entire training period over which the exploration rate is annealed\n exploration_final_eps: float\n final value of random action probability\n train_freq: int\n update the model every `train_freq` steps.\n set to None to disable printing\n batch_size: int\n size of a batched sampled from replay buffer for training\n print_freq: int\n how often to print out training progress\n set to None to disable printing\n checkpoint_freq: int\n how often to save the model. This is so that the best version is restored\n at the end of the training. If you do not wish to restore the best version at\n the end of the training set this variable to None.\n learning_starts: int\n how many steps of the model to collect transitions for before learning starts\n gamma: float\n discount factor\n target_network_update_freq: int\n update the target network every `target_network_update_freq` steps.\n prioritized_replay: True\n if True prioritized replay buffer will be used.\n prioritized_replay_alpha: float\n alpha parameter for prioritized replay buffer\n prioritized_replay_beta0: float\n initial value of beta for prioritized replay buffer\n prioritized_replay_beta_iters: int\n number of iterations over which beta will be annealed from initial value\n to 1.0. If set to None equals to total_timesteps.\n prioritized_replay_eps: float\n epsilon to add to the TD errors when updating priorities.\n param_noise: bool\n whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)\n callback: (locals, globals) -> None\n function called at every steps with state of the algorithm.\n If callback returns true training stops.\n load_path: str\n path to load the model from. (default: None)\n **network_kwargs\n additional keyword arguments to pass to the network builder.\n\n Returns\n -------\n act: ActWrapper\n Wrapper over act function. Adds ability to save it and load it.\n See header of baselines/deepq/categorical.py for details on the act function.", "id": "f1283:m1"} {"signature": "def save_act(self, path=None):", "body": "if path is None:path = os.path.join(logger.get_dir(), \"\")with tempfile.TemporaryDirectory() as td:save_variables(os.path.join(td, \"\"))arc_name = os.path.join(td, \"\")with zipfile.ZipFile(arc_name, '') as zipf:for root, dirs, files in os.walk(td):for fname in files:file_path = os.path.join(root, fname)if file_path != arc_name:zipf.write(file_path, os.path.relpath(file_path, td))with open(arc_name, \"\") as f:model_data = f.read()with open(path, \"\") as f:cloudpickle.dump((model_data, self._act_params), f)", "docstring": "Save model to a pickle located at `path`", "id": "f1283:c0:m4"} {"signature": "def __init__(self, name=\"\"):", "body": "self.name = name", "docstring": "Generalized Tensorflow placeholder. The main differences are:\n - possibly uses multiple placeholders internally and returns multiple values\n - can apply light postprocessing to the value feed to placeholder.", "id": "f1284:c0:m0"} {"signature": "def get(self):", "body": "raise NotImplementedError", "docstring": "Return the tf variable(s) representing the possibly postprocessed value\n of placeholder(s).", "id": "f1284:c0:m1"} {"signature": "def make_feed_dict(self, data):", "body": "raise NotImplementedError", "docstring": "Given data input it to the placeholder(s).", "id": "f1284:c0:m2"} {"signature": "def __init__(self, placeholder):", "body": "super().__init__(placeholder.name)self._placeholder = placeholder", "docstring": "Wrapper for regular tensorflow placeholder.", "id": "f1284:c1:m0"} {"signature": "def __init__(self, observation_space, name=None):", "body": "inpt, self.processed_inpt = observation_input(observation_space, name=name)super().__init__(inpt)", "docstring": "Creates an input placeholder tailored to a specific observation space\n\n Parameters\n ----------\n\n observation_space:\n observation space of the environment. Should be one of the gym.spaces types\n name: str\n tensorflow name of the underlying placeholder", "id": "f1284:c2:m0"} {"signature": "def scope_vars(scope, trainable_only=False):", "body": "return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,scope=scope if isinstance(scope, str) else scope.name)", "docstring": "Get variables inside a scope\nThe scope can be specified as a string\nParameters\n----------\nscope: str or VariableScope\n scope in which the variables reside.\ntrainable_only: bool\n whether or not to return only the variables that were marked as trainable.\nReturns\n-------\nvars: [tf.Variable]\n list of variables in `scope`.", "id": "f1285:m0"} {"signature": "def scope_name():", "body": "return tf.get_variable_scope().name", "docstring": "Returns the name of current scope as a string, e.g. deepq/q_func", "id": "f1285:m1"} {"signature": "def absolute_scope_name(relative_scope_name):", "body": "return scope_name() + \"\" + relative_scope_name", "docstring": "Appends parent scope name to `relative_scope_name`", "id": "f1285:m2"} {"signature": "def build_act(make_obs_ph, q_func, num_actions, scope=\"\", reuse=None):", "body": "with tf.variable_scope(scope, reuse=reuse):observations_ph = make_obs_ph(\"\")stochastic_ph = tf.placeholder(tf.bool, (), name=\"\")update_eps_ph = tf.placeholder(tf.float32, (), name=\"\")eps = tf.get_variable(\"\", (), initializer=tf.constant_initializer())q_values = q_func(observations_ph.get(), num_actions, scope=\"\")deterministic_actions = tf.argmax(q_values, axis=)batch_size = tf.shape(observations_ph.get())[]random_actions = tf.random_uniform(tf.stack([batch_size]), minval=, maxval=num_actions, dtype=tf.int64)chose_random = tf.random_uniform(tf.stack([batch_size]), minval=, maxval=, dtype=tf.float32) < epsstochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)update_eps_expr = eps.assign(tf.cond(update_eps_ph >= , lambda: update_eps_ph, lambda: eps))_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],outputs=output_actions,givens={update_eps_ph: -, stochastic_ph: True},updates=[update_eps_expr])def act(ob, stochastic=True, update_eps=-):return _act(ob, stochastic, update_eps)return act", "docstring": "Creates the act function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.", "id": "f1285:m4"} {"signature": "def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=\"\", reuse=None, param_noise_filter_func=None):", "body": "if param_noise_filter_func is None:param_noise_filter_func = default_param_noise_filterwith tf.variable_scope(scope, reuse=reuse):observations_ph = make_obs_ph(\"\")stochastic_ph = tf.placeholder(tf.bool, (), name=\"\")update_eps_ph = tf.placeholder(tf.float32, (), name=\"\")update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name=\"\")update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name=\"\")reset_ph = tf.placeholder(tf.bool, (), name=\"\")eps = tf.get_variable(\"\", (), initializer=tf.constant_initializer())param_noise_scale = tf.get_variable(\"\", (), initializer=tf.constant_initializer(), trainable=False)param_noise_threshold = tf.get_variable(\"\", (), initializer=tf.constant_initializer(), trainable=False)q_values = q_func(observations_ph.get(), num_actions, scope=\"\")q_values_perturbed = q_func(observations_ph.get(), num_actions, scope=\"\")def perturb_vars(original_scope, perturbed_scope):all_vars = scope_vars(absolute_scope_name(original_scope))all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))assert len(all_vars) == len(all_perturbed_vars)perturb_ops = []for var, perturbed_var in zip(all_vars, all_perturbed_vars):if param_noise_filter_func(perturbed_var):op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=, stddev=param_noise_scale))else:op = tf.assign(perturbed_var, var)perturb_ops.append(op)assert len(perturb_ops) == len(all_vars)return tf.group(*perturb_ops)q_values_adaptive = q_func(observations_ph.get(), num_actions, scope=\"\")perturb_for_adaption = perturb_vars(original_scope=\"\", perturbed_scope=\"\")kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-)mean_kl = tf.reduce_mean(kl)def update_scale():with tf.control_dependencies([perturb_for_adaption]):update_scale_expr = tf.cond(mean_kl < param_noise_threshold,lambda: param_noise_scale.assign(param_noise_scale * ),lambda: param_noise_scale.assign(param_noise_scale / ),)return update_scale_exprupdate_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= ,lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))deterministic_actions = tf.argmax(q_values_perturbed, axis=)batch_size = tf.shape(observations_ph.get())[]random_actions = tf.random_uniform(tf.stack([batch_size]), minval=, maxval=num_actions, dtype=tf.int64)chose_random = tf.random_uniform(tf.stack([batch_size]), minval=, maxval=, dtype=tf.float32) < epsstochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)update_eps_expr = eps.assign(tf.cond(update_eps_ph >= , lambda: update_eps_ph, lambda: eps))updates = [update_eps_expr,tf.cond(reset_ph, lambda: perturb_vars(original_scope=\"\", perturbed_scope=\"\"), lambda: tf.group(*[])),tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(, trainable=False)),update_param_noise_threshold_expr,]_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],outputs=output_actions,givens={update_eps_ph: -, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},updates=updates)def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-):return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)return act", "docstring": "Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.", "id": "f1285:m5"} {"signature": "def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=,double_q=True, scope=\"\", reuse=None, param_noise=False, param_noise_filter_func=None):", "body": "if param_noise:act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,param_noise_filter_func=param_noise_filter_func)else:act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)with tf.variable_scope(scope, reuse=reuse):obs_t_input = make_obs_ph(\"\")act_t_ph = tf.placeholder(tf.int32, [None], name=\"\")rew_t_ph = tf.placeholder(tf.float32, [None], name=\"\")obs_tp1_input = make_obs_ph(\"\")done_mask_ph = tf.placeholder(tf.float32, [None], name=\"\")importance_weights_ph = tf.placeholder(tf.float32, [None], name=\"\")q_t = q_func(obs_t_input.get(), num_actions, scope=\"\", reuse=True) q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + \"\")q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope=\"\")target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + \"\")q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), )if double_q:q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope=\"\", reuse=True)q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, )q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), )else:q_tp1_best = tf.reduce_max(q_tp1, )q_tp1_best_masked = ( - done_mask_ph) * q_tp1_bestq_t_selected_target = rew_t_ph + gamma * q_tp1_best_maskedtd_error = q_t_selected - tf.stop_gradient(q_t_selected_target)errors = U.huber_loss(td_error)weighted_error = tf.reduce_mean(importance_weights_ph * errors)if grad_norm_clipping is not None:gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)for i, (grad, var) in enumerate(gradients):if grad is not None:gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)optimize_expr = optimizer.apply_gradients(gradients)else:optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)update_target_expr = []for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),sorted(target_q_func_vars, key=lambda v: v.name)):update_target_expr.append(var_target.assign(var))update_target_expr = tf.group(*update_target_expr)train = U.function(inputs=[obs_t_input,act_t_ph,rew_t_ph,obs_tp1_input,done_mask_ph,importance_weights_ph],outputs=td_error,updates=[optimize_expr])update_target = U.function([], [], updates=[update_target_expr])q_values = U.function([obs_t_input], q_t)return act_f, train, update_target, {'': q_values}", "docstring": "Creates the train function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that takes a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions\n reuse: bool\n whether or not to reuse the graph variables\n optimizer: tf.train.Optimizer\n optimizer to use for the Q-learning objective.\n grad_norm_clipping: float or None\n clip gradient norms to this value. If None no clipping is performed.\n gamma: float\n discount rate.\n double_q: bool\n if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).\n In general it is a good idea to keep it enabled.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise: bool\n whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n train: (object, np.array, np.array, object, np.array, np.array) -> np.array\n optimize the error in Bellman's equation.\n` See the top of the file for details.\n update_target: () -> ()\n copy the parameters from optimized Q function to the target Q function.\n` See the top of the file for details.\n debug: {str: function}\n a bunch of functions to print debug data like q_values.", "id": "f1285:m6"} {"signature": "def mlp(hiddens=[], layer_norm=False):", "body": "return lambda *args, **kwargs: _mlp(hiddens, layer_norm=layer_norm, *args, **kwargs)", "docstring": "This model takes as input an observation and returns values of all actions.\n\n Parameters\n ----------\n hiddens: [int]\n list of sizes of hidden layers\n layer_norm: bool\n if true applies layer normalization for every layer\n as described in https://arxiv.org/abs/1607.06450\n\n Returns\n -------\n q_func: function\n q_function for DQN algorithm.", "id": "f1286:m1"} {"signature": "def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False):", "body": "return lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, layer_norm=layer_norm, *args, **kwargs)", "docstring": "This model takes as input an observation and returns values of all actions.\n\n Parameters\n ----------\n convs: [(int, int, int)]\n list of convolutional layers in form of\n (num_outputs, kernel_size, stride)\n hiddens: [int]\n list of sizes of hidden layers\n dueling: bool\n if true double the output MLP to compute a baseline\n for action scores\n layer_norm: bool\n if true applies layer normalization for every layer\n as described in https://arxiv.org/abs/1607.06450\n\n Returns\n -------\n q_func: function\n q_function for DQN algorithm.", "id": "f1286:m3"} {"signature": "def model(inpt, num_actions, scope, reuse=False):", "body": "with tf.variable_scope(scope, reuse=reuse):out = inptout = layers.fully_connected(out, num_outputs=, activation_fn=tf.nn.tanh)out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)return out", "docstring": "This model takes as input an observation and returns values of all actions.", "id": "f1292:m0"} {"signature": "def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=, ent_coef=, lr=,vf_coef=, max_grad_norm=, gamma=, lam=,log_interval=, nminibatches=, noptepochs=, cliprange=,save_interval=, load_path=None, model_fn=None, **network_kwargs):", "body": "set_global_seeds(seed)if isinstance(lr, float): lr = constfn(lr)else: assert callable(lr)if isinstance(cliprange, float): cliprange = constfn(cliprange)else: assert callable(cliprange)total_timesteps = int(total_timesteps)policy = build_policy(env, network, **network_kwargs)nenvs = env.num_envsob_space = env.observation_spaceac_space = env.action_spacenbatch = nenvs * nstepsnbatch_train = nbatch // nminibatchesif model_fn is None:from baselines.ppo2.model import Modelmodel_fn = Modelmodel = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,max_grad_norm=max_grad_norm)if load_path is not None:model.load(load_path)runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)if eval_env is not None:eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)epinfobuf = deque(maxlen=)if eval_env is not None:eval_epinfobuf = deque(maxlen=)tfirststart = time.perf_counter()nupdates = total_timesteps//nbatchfor update in range(, nupdates+):assert nbatch % nminibatches == tstart = time.perf_counter()frac = - (update - ) / nupdateslrnow = lr(frac)cliprangenow = cliprange(frac)obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() if eval_env is not None:eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() epinfobuf.extend(epinfos)if eval_env is not None:eval_epinfobuf.extend(eval_epinfos)mblossvals = []if states is None: inds = np.arange(nbatch)for _ in range(noptepochs):np.random.shuffle(inds)for start in range(, nbatch, nbatch_train):end = start + nbatch_trainmbinds = inds[start:end]slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))mblossvals.append(model.train(lrnow, cliprangenow, *slices))else: assert nenvs % nminibatches == envsperbatch = nenvs // nminibatchesenvinds = np.arange(nenvs)flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)for _ in range(noptepochs):np.random.shuffle(envinds)for start in range(, nenvs, envsperbatch):end = start + envsperbatchmbenvinds = envinds[start:end]mbflatinds = flatinds[mbenvinds].ravel()slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))mbstates = states[mbenvinds]mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))lossvals = np.mean(mblossvals, axis=)tnow = time.perf_counter()fps = int(nbatch / (tnow - tstart))if update % log_interval == or update == :ev = explained_variance(values, returns)logger.logkv(\"\", update*nsteps)logger.logkv(\"\", update)logger.logkv(\"\", update*nbatch)logger.logkv(\"\", fps)logger.logkv(\"\", float(ev))logger.logkv('', safemean([epinfo[''] for epinfo in epinfobuf]))logger.logkv('', safemean([epinfo[''] for epinfo in epinfobuf]))if eval_env is not None:logger.logkv('', safemean([epinfo[''] for epinfo in eval_epinfobuf]) )logger.logkv('', safemean([epinfo[''] for epinfo in eval_epinfobuf]) )logger.logkv('', tnow - tfirststart)for (lossval, lossname) in zip(lossvals, model.loss_names):logger.logkv(lossname, lossval)if MPI is None or MPI.COMM_WORLD.Get_rank() == :logger.dumpkvs()if save_interval and (update % save_interval == or update == ) and logger.get_dir() and (MPI is None or MPI.COMM_WORLD.Get_rank() == ):checkdir = osp.join(logger.get_dir(), '')os.makedirs(checkdir, exist_ok=True)savepath = osp.join(checkdir, ''%update)print('', savepath)model.save(savepath)return model", "docstring": "Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)\n\nParameters:\n----------\n\nnetwork: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)\n specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns\n tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward\n neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.\n See common/models.py/lstm for more details on using recurrent nets in policies\n\nenv: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.\n The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.\n\n\nnsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where\n nenv is number of environment copies simulated in parallel)\n\ntotal_timesteps: int number of timesteps (i.e. number of actions taken in the environment)\n\nent_coef: float policy entropy coefficient in the optimization objective\n\nlr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the\n training and 0 is the end of the training.\n\nvf_coef: float value function loss coefficient in the optimization objective\n\nmax_grad_norm: float or None gradient norm clipping coefficient\n\ngamma: float discounting factor\n\nlam: float advantage estimation discounting factor (lambda in the paper)\n\nlog_interval: int number of timesteps between logging events\n\nnminibatches: int number of training minibatches per update. For recurrent policies,\n should be smaller or equal than number of environments run in parallel.\n\nnoptepochs: int number of training epochs per update\n\ncliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training\n and 0 is the end of the training\n\nsave_interval: int number of timesteps between saving events\n\nload_path: str path to load the model from\n\n**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network\n For instance, 'mlp' network architecture has arguments num_hidden and num_layers.", "id": "f1297:m1"} {"signature": "def sf01(arr):", "body": "s = arr.shapereturn arr.swapaxes(, ).reshape(s[] * s[], *s[:])", "docstring": "swap and then flatten axes 0 and 1", "id": "f1298:m0"} {"signature": "def get_task(benchmark, env_id):", "body": "return next(filter(lambda task: task[''] == env_id, benchmark['']), None)", "docstring": "Get a task by env_id. Return None if the benchmark doesn't have the env", "id": "f1300:m3"} {"signature": "def learn(*,network,env,total_timesteps,timesteps_per_batch=, max_kl=,cg_iters=,gamma=,lam=, seed=None,ent_coef=,cg_damping=,vf_stepsize=,vf_iters =,max_episodes=, max_iters=, callback=None,load_path=None,**network_kwargs):", "body": "if MPI is not None:nworkers = MPI.COMM_WORLD.Get_size()rank = MPI.COMM_WORLD.Get_rank()else:nworkers = rank = cpus_per_worker = U.get_session(config=tf.ConfigProto(allow_soft_placement=True,inter_op_parallelism_threads=cpus_per_worker,intra_op_parallelism_threads=cpus_per_worker))policy = build_policy(env, network, value_network='', **network_kwargs)set_global_seeds(seed)np.set_printoptions(precision=)ob_space = env.observation_spaceac_space = env.action_spaceob = observation_placeholder(ob_space)with tf.variable_scope(\"\"):pi = policy(observ_placeholder=ob)with tf.variable_scope(\"\"):oldpi = policy(observ_placeholder=ob)atarg = tf.placeholder(dtype=tf.float32, shape=[None]) ret = tf.placeholder(dtype=tf.float32, shape=[None]) ac = pi.pdtype.sample_placeholder([None])kloldnew = oldpi.pd.kl(pi.pd)ent = pi.pd.entropy()meankl = tf.reduce_mean(kloldnew)meanent = tf.reduce_mean(ent)entbonus = ent_coef * meanentvferr = tf.reduce_mean(tf.square(pi.vf - ret))ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) surrgain = tf.reduce_mean(ratio * atarg)optimgain = surrgain + entbonuslosses = [optimgain, meankl, entbonus, surrgain, meanent]loss_names = [\"\", \"\", \"\", \"\", \"\"]dist = meanklall_var_list = get_trainable_variables(\"\")var_list = get_pi_trainable_variables(\"\")vf_var_list = get_vf_trainable_variables(\"\")vfadam = MpiAdam(vf_var_list)get_flat = U.GetFlat(var_list)set_from_flat = U.SetFromFlat(var_list)klgrads = tf.gradients(dist, var_list)flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name=\"\")shapes = [var.get_shape().as_list() for var in var_list]start = tangents = []for shape in shapes:sz = U.intprod(shape)tangents.append(tf.reshape(flat_tangent[start:start+sz], shape))start += szgvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) fvp = U.flatgrad(gvp, var_list)assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)for (oldv, newv) in zipsame(get_variables(\"\"), get_variables(\"\"))])compute_losses = U.function([ob, ac, atarg], losses)compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list))@contextmanagerdef timed(msg):if rank == :print(colorize(msg, color=''))tstart = time.time()yieldprint(colorize(\"\"%(time.time() - tstart), color=''))else:yielddef allmean(x):assert isinstance(x, np.ndarray)if MPI is not None:out = np.empty_like(x)MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)out /= nworkerselse:out = np.copy(x)return outU.initialize()if load_path is not None:pi.load(load_path)th_init = get_flat()if MPI is not None:MPI.COMM_WORLD.Bcast(th_init, root=)set_from_flat(th_init)vfadam.sync()print(\"\", th_init.sum(), flush=True)seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)episodes_so_far = timesteps_so_far = iters_so_far = tstart = time.time()lenbuffer = deque(maxlen=) rewbuffer = deque(maxlen=) if sum([max_iters>, total_timesteps>, max_episodes>])==:return piassert sum([max_iters>, total_timesteps>, max_episodes>]) < ,''while True:if callback: callback(locals(), globals())if total_timesteps and timesteps_so_far >= total_timesteps:breakelif max_episodes and episodes_so_far >= max_episodes:breakelif max_iters and iters_so_far >= max_iters:breaklogger.log(\"\"%iters_so_far)with timed(\"\"):seg = seg_gen.__next__()add_vtarg_and_adv(seg, gamma, lam)ob, ac, atarg, tdlamret = seg[\"\"], seg[\"\"], seg[\"\"], seg[\"\"]vpredbefore = seg[\"\"] atarg = (atarg - atarg.mean()) / atarg.std() if hasattr(pi, \"\"): pi.ret_rms.update(tdlamret)if hasattr(pi, \"\"): pi.ob_rms.update(ob) args = seg[\"\"], seg[\"\"], atargfvpargs = [arr[::] for arr in args]def fisher_vector_product(p):return allmean(compute_fvp(p, *fvpargs)) + cg_damping * passign_old_eq_new() with timed(\"\"):*lossbefore, g = compute_lossandgrad(*args)lossbefore = allmean(np.array(lossbefore))g = allmean(g)if np.allclose(g, ):logger.log(\"\")else:with timed(\"\"):stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank==)assert np.isfinite(stepdir).all()shs = *stepdir.dot(fisher_vector_product(stepdir))lm = np.sqrt(shs / max_kl)fullstep = stepdir / lmexpectedimprove = g.dot(fullstep)surrbefore = lossbefore[]stepsize = thbefore = get_flat()for _ in range():thnew = thbefore + fullstep * stepsizeset_from_flat(thnew)meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args)))improve = surr - surrbeforelogger.log(\"\"%(expectedimprove, improve))if not np.isfinite(meanlosses).all():logger.log(\"\")elif kl > max_kl * :logger.log(\"\")elif improve < :logger.log(\"\")else:logger.log(\"\")breakstepsize *= else:logger.log(\"\")set_from_flat(thbefore)if nworkers > and iters_so_far % == :paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) assert all(np.allclose(ps, paramsums[]) for ps in paramsums[:])for (lossname, lossval) in zip(loss_names, meanlosses):logger.record_tabular(lossname, lossval)with timed(\"\"):for _ in range(vf_iters):for (mbob, mbret) in dataset.iterbatches((seg[\"\"], seg[\"\"]),include_final_partial_batch=False, batch_size=):g = allmean(compute_vflossandgrad(mbob, mbret))vfadam.update(g, vf_stepsize)logger.record_tabular(\"\", explained_variance(vpredbefore, tdlamret))lrlocal = (seg[\"\"], seg[\"\"]) if MPI is not None:listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) else:listoflrpairs = [lrlocal]lens, rews = map(flatten_lists, zip(*listoflrpairs))lenbuffer.extend(lens)rewbuffer.extend(rews)logger.record_tabular(\"\", np.mean(lenbuffer))logger.record_tabular(\"\", np.mean(rewbuffer))logger.record_tabular(\"\", len(lens))episodes_so_far += len(lens)timesteps_so_far += sum(lens)iters_so_far += logger.record_tabular(\"\", episodes_so_far)logger.record_tabular(\"\", timesteps_so_far)logger.record_tabular(\"\", time.time() - tstart)if rank==:logger.dump_tabular()return pi", "docstring": "learn a policy function with TRPO algorithm\n\nParameters:\n----------\n\nnetwork neural network to learn. Can be either string ('mlp', 'cnn', 'lstm', 'lnlstm' for basic types)\n or function that takes input placeholder and returns tuple (output, None) for feedforward nets\n or (output, (state_placeholder, state_output, mask_placeholder)) for recurrent nets\n\nenv environment (one of the gym environments or wrapped via baselines.common.vec_env.VecEnv-type class\n\ntimesteps_per_batch timesteps per gradient estimation batch\n\nmax_kl max KL divergence between old policy and new policy ( KL(pi_old || pi) )\n\nent_coef coefficient of policy entropy term in the optimization objective\n\ncg_iters number of iterations of conjugate gradient algorithm\n\ncg_damping conjugate gradient damping\n\nvf_stepsize learning rate for adam optimizer used to optimie value function loss\n\nvf_iters number of iterations of value function optimization iterations per each policy optimization step\n\ntotal_timesteps max number of timesteps\n\nmax_episodes max number of episodes\n\nmax_iters maximum number of policy optimization iterations\n\ncallback function to be called with (locals(), globals()) each policy optimization step\n\nload_path str, path to load the model from (default: None, i.e. no model is loaded)\n\n**network_kwargs keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network\n\nReturns:\n-------\n\nlearnt model", "id": "f1305:m2"} {"signature": "def add_vtarg_and_adv(seg, gamma, lam):", "body": "new = np.append(seg[\"\"], ) vpred = np.append(seg[\"\"], seg[\"\"])T = len(seg[\"\"])seg[\"\"] = gaelam = np.empty(T, '')rew = seg[\"\"]lastgaelam = for t in reversed(range(T)):nonterminal = -new[t+]delta = rew[t] + gamma * vpred[t+] * nonterminal - vpred[t]gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelamseg[\"\"] = seg[\"\"] + seg[\"\"]", "docstring": "Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)", "id": "f1306:m1"} {"signature": "def parse_cmdline_kwargs(args):", "body": "def parse(v):assert isinstance(v, str)try:return eval(v)except (NameError, SyntaxError):return vreturn {k: parse(v) for k,v in parse_unknown_args(args).items()}", "docstring": "convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible", "id": "f1313:m7"} {"signature": "def q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma):", "body": "rho_bar = batch_to_seq(tf.minimum(, rho_i), nenvs, nsteps, True) rs = batch_to_seq(R, nenvs, nsteps, True) ds = batch_to_seq(D, nenvs, nsteps, True) q_is = batch_to_seq(q_i, nenvs, nsteps, True)vs = batch_to_seq(v, nenvs, nsteps + , True)v_final = vs[-]qret = v_finalqrets = []for i in range(nsteps - , -, -):check_shape([qret, ds[i], rs[i], rho_bar[i], q_is[i], vs[i]], [[nenvs]] * )qret = rs[i] + gamma * qret * ( - ds[i])qrets.append(qret)qret = (rho_bar[i] * (qret - q_is[i])) + vs[i]qrets = qrets[::-]qret = seq_to_batch(qrets, flat=True)return qret", "docstring": "Calculates q_retrace targets\n\n:param R: Rewards\n:param D: Dones\n:param q_i: Q values for actions taken\n:param v: V values\n:param rho_i: Importance weight for each action\n:return: Q_retrace values", "id": "f1315:m1"} {"signature": "def learn(network, env, seed=None, nsteps=, total_timesteps=int(), q_coef=, ent_coef=,max_grad_norm=, lr=, lrschedule='', rprop_epsilon=, rprop_alpha=, gamma=,log_interval=, buffer_size=, replay_ratio=, replay_start=, c=,trust_region=True, alpha=, delta=, load_path=None, **network_kwargs):", "body": "print(\"\")print(locals())set_global_seeds(seed)if not isinstance(env, VecFrameStack):env = VecFrameStack(env, )policy = build_policy(env, network, estimate_q=True, **network_kwargs)nenvs = env.num_envsob_space = env.observation_spaceac_space = env.action_spacenstack = env.nstackmodel = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps,ent_coef=ent_coef, q_coef=q_coef, gamma=gamma,max_grad_norm=max_grad_norm, lr=lr, rprop_alpha=rprop_alpha, rprop_epsilon=rprop_epsilon,total_timesteps=total_timesteps, lrschedule=lrschedule, c=c,trust_region=trust_region, alpha=alpha, delta=delta)runner = Runner(env=env, model=model, nsteps=nsteps)if replay_ratio > :buffer = Buffer(env=env, nsteps=nsteps, size=buffer_size)else:buffer = Nonenbatch = nenvs*nstepsacer = Acer(runner, model, buffer, log_interval)acer.tstart = time.time()for acer.steps in range(, total_timesteps, nbatch): acer.call(on_policy=True)if replay_ratio > and buffer.has_atleast(replay_start):n = np.random.poisson(replay_ratio)for _ in range(n):acer.call(on_policy=False) return model", "docstring": "Main entrypoint for ACER (Actor-Critic with Experience Replay) algorithm (https://arxiv.org/pdf/1611.01224.pdf)\nTrain an agent with given network architecture on a given environment using ACER.\n\nParameters:\n----------\n\nnetwork: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)\n specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns\n tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward\n neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.\n See baselines.common/policies.py/lstm for more details on using recurrent nets in policies\n\nenv: environment. Needs to be vectorized for parallel environment simulation.\n The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.\n\nnsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where\n nenv is number of environment copies simulated in parallel) (default: 20)\n\nnstack: int, size of the frame stack, i.e. number of the frames passed to the step model. Frames are stacked along channel dimension\n (last image dimension) (default: 4)\n\ntotal_timesteps: int, number of timesteps (i.e. number of actions taken in the environment) (default: 80M)\n\nq_coef: float, value function loss coefficient in the optimization objective (analog of vf_coef for other actor-critic methods)\n\nent_coef: float, policy entropy coefficient in the optimization objective (default: 0.01)\n\nmax_grad_norm: float, gradient norm clipping coefficient. If set to None, no clipping. (default: 10),\n\nlr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)\n\nlrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and\n returns fraction of the learning rate (specified as lr) as output\n\nrprop_epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)\n\nrprop_alpha: float, RMSProp decay parameter (default: 0.99)\n\ngamma: float, reward discounting factor (default: 0.99)\n\nlog_interval: int, number of updates between logging events (default: 100)\n\nbuffer_size: int, size of the replay buffer (default: 50k)\n\nreplay_ratio: int, now many (on average) batches of data to sample from the replay buffer take after batch from the environment (default: 4)\n\nreplay_start: int, the sampling from the replay buffer does not start until replay buffer has at least that many samples (default: 10k)\n\nc: float, importance weight clipping factor (default: 10)\n\ntrust_region bool, whether or not algorithms estimates the gradient KL divergence between the old and updated policy and uses it to determine step size (default: True)\n\ndelta: float, max KL divergence between the old policy and updated policy (default: 1)\n\nalpha: float, momentum factor in the Polyak (exponential moving average) averaging of the model parameters (default: 0.99)\n\nload_path: str, path to load the model from (default: None)\n\n**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network\n For instance, 'mlp' network architecture has arguments num_hidden and num_layers.", "id": "f1315:m2"} {"signature": "def learn(network,env,seed=None,nsteps=,total_timesteps=int(),vf_coef=,ent_coef=,max_grad_norm=,lr=,lrschedule='',epsilon=,alpha=,gamma=,log_interval=,load_path=None,**network_kwargs):", "body": "set_global_seeds(seed)nenvs = env.num_envspolicy = build_policy(env, network, **network_kwargs)model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)if load_path is not None:model.load(load_path)runner = Runner(env, model, nsteps=nsteps, gamma=gamma)epinfobuf = deque(maxlen=)nbatch = nenvs*nstepststart = time.time()for update in range(, total_timesteps//nbatch+):obs, states, rewards, masks, actions, values, epinfos = runner.run()epinfobuf.extend(epinfos)policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)nseconds = time.time()-tstartfps = int((update*nbatch)/nseconds)if update % log_interval == or update == :ev = explained_variance(values, rewards)logger.record_tabular(\"\", update)logger.record_tabular(\"\", update*nbatch)logger.record_tabular(\"\", fps)logger.record_tabular(\"\", float(policy_entropy))logger.record_tabular(\"\", float(value_loss))logger.record_tabular(\"\", float(ev))logger.record_tabular(\"\", safemean([epinfo[''] for epinfo in epinfobuf]))logger.record_tabular(\"\", safemean([epinfo[''] for epinfo in epinfobuf]))logger.dump_tabular()return model", "docstring": "Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.\n\nParameters:\n-----------\n\nnetwork: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)\n specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns\n tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward\n neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.\n See baselines.common/policies.py/lstm for more details on using recurrent nets in policies\n\n\nenv: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)\n\n\nseed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)\n\nnsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where\n nenv is number of environment copies simulated in parallel)\n\ntotal_timesteps: int, total number of timesteps to train on (default: 80M)\n\nvf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)\n\nent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)\n\nmax_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)\n\nlr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)\n\nlrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and\n returns fraction of the learning rate (specified as lr) as output\n\nepsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)\n\nalpha: float, RMSProp decay parameter (default: 0.99)\n\ngamma: float, reward discounting parameter (default: 0.99)\n\nlog_interval: int, specifies how frequently the logs are printed out (default: 100)\n\n**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network\n For instance, 'mlp' network architecture has arguments num_hidden and num_layers.", "id": "f1319:m0"} {"signature": "def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):", "body": "if episode_life:env = EpisodicLifeEnv(env)if '' in env.unwrapped.get_action_meanings():env = FireResetEnv(env)env = WarpFrame(env)if scale:env = ScaledFloatFrame(env)if clip_rewards:env = ClipRewardEnv(env)if frame_stack:env = FrameStack(env, )return env", "docstring": "Configure environment for DeepMind-style Atari.", "id": "f1340:m1"} {"signature": "def __init__(self, env, noop_max=):", "body": "gym.Wrapper.__init__(self, env)self.noop_max = noop_maxself.override_num_noops = Noneself.noop_action = assert env.unwrapped.get_action_meanings()[] == ''", "docstring": "Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.", "id": "f1340:c0:m0"} {"signature": "def reset(self, **kwargs):", "body": "self.env.reset(**kwargs)if self.override_num_noops is not None:noops = self.override_num_noopselse:noops = self.unwrapped.np_random.randint(, self.noop_max + ) assert noops > obs = Nonefor _ in range(noops):obs, _, done, _ = self.env.step(self.noop_action)if done:obs = self.env.reset(**kwargs)return obs", "docstring": "Do no-op action for a number of steps in [1, noop_max].", "id": "f1340:c0:m1"} {"signature": "def __init__(self, env):", "body": "gym.Wrapper.__init__(self, env)assert env.unwrapped.get_action_meanings()[] == ''assert len(env.unwrapped.get_action_meanings()) >= ", "docstring": "Take action on reset for environments that are fixed until firing.", "id": "f1340:c1:m0"} {"signature": "def __init__(self, env):", "body": "gym.Wrapper.__init__(self, env)self.lives = self.was_real_done = True", "docstring": "Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.", "id": "f1340:c2:m0"} {"signature": "def reset(self, **kwargs):", "body": "if self.was_real_done:obs = self.env.reset(**kwargs)else:obs, _, _, _ = self.env.step()self.lives = self.env.unwrapped.ale.lives()return obs", "docstring": "Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.", "id": "f1340:c2:m2"} {"signature": "def __init__(self, env, skip=):", "body": "gym.Wrapper.__init__(self, env)self._obs_buffer = np.zeros((,)+env.observation_space.shape, dtype=np.uint8)self._skip = skip", "docstring": "Return only every `skip`-th frame", "id": "f1340:c3:m0"} {"signature": "def step(self, action):", "body": "total_reward = done = Nonefor i in range(self._skip):obs, reward, done, info = self.env.step(action)if i == self._skip - : self._obs_buffer[] = obsif i == self._skip - : self._obs_buffer[] = obstotal_reward += rewardif done:breakmax_frame = self._obs_buffer.max(axis=)return max_frame, total_reward, done, info", "docstring": "Repeat action, sum reward, and max over last observations.", "id": "f1340:c3:m1"} {"signature": "def reward(self, reward):", "body": "return np.sign(reward)", "docstring": "Bin reward to {+1, 0, -1} by its sign.", "id": "f1340:c4:m1"} {"signature": "def __init__(self, env, width=, height=, grayscale=True):", "body": "gym.ObservationWrapper.__init__(self, env)self.width = widthself.height = heightself.grayscale = grayscaleif self.grayscale:self.observation_space = spaces.Box(low=, high=,shape=(self.height, self.width, ), dtype=np.uint8)else:self.observation_space = spaces.Box(low=, high=,shape=(self.height, self.width, ), dtype=np.uint8)", "docstring": "Warp frames to 84x84 as done in the Nature paper and later work.", "id": "f1340:c5:m0"} {"signature": "def __init__(self, env, k):", "body": "gym.Wrapper.__init__(self, env)self.k = kself.frames = deque([], maxlen=k)shp = env.observation_space.shapeself.observation_space = spaces.Box(low=, high=, shape=(shp[:-] + (shp[-] * k,)), dtype=env.observation_space.dtype)", "docstring": "Stack k last frames.\n\n Returns lazy array, which is much more memory efficient.\n\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames", "id": "f1340:c6:m0"} {"signature": "def __init__(self, frames):", "body": "self._frames = framesself._out = None", "docstring": "This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n\n This object should only be converted to numpy array before being passed to the model.\n\n You'd not believe how complex the previous solution was.", "id": "f1340:c8:m0"} {"signature": "def discount(x, gamma):", "body": "assert x.ndim >= return scipy.signal.lfilter([],[,-gamma],x[::-], axis=)[::-]", "docstring": "computes discounted sums along 0th dimension of x.\n\ninputs\n------\nx: ndarray\ngamma: float\n\noutputs\n-------\ny: ndarray with same shape as x, satisfying\n\n y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],\n where k = len(x) - t - 1", "id": "f1341:m0"} {"signature": "def explained_variance(ypred,y):", "body": "assert y.ndim == and ypred.ndim == vary = np.var(y)return np.nan if vary== else - np.var(y-ypred)/vary", "docstring": "Computes fraction of variance that ypred explains about y.\nReturns 1 - Var[y-ypred] / Var[y]\n\ninterpretation:\n ev=0 => might as well have predicted zero\n ev=1 => perfect prediction\n ev<0 => worse than just predicting zero", "id": "f1341:m1"} {"signature": "def discount_with_boundaries(X, New, gamma):", "body": "Y = np.zeros_like(X)T = X.shape[]Y[T-] = X[T-]for t in range(T-, -, -):Y[t] = X[t] + gamma * Y[t+] * ( - New[t+])return Y", "docstring": "X: 2d array of floats, time x features\nNew: 2d array of bools, indicating when a new episode has started", "id": "f1341:m6"} {"signature": "def make_vec_env(env_id, env_type, num_env, seed,wrapper_kwargs=None,start_index=,reward_scale=,flatten_dict_observations=True,gamestate=None):", "body": "wrapper_kwargs = wrapper_kwargs or {}mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else seed = seed + * mpi_rank if seed is not None else Nonelogger_dir = logger.get_dir()def make_thunk(rank):return lambda: make_env(env_id=env_id,env_type=env_type,mpi_rank=mpi_rank,subrank=rank,seed=seed,reward_scale=reward_scale,gamestate=gamestate,flatten_dict_observations=flatten_dict_observations,wrapper_kwargs=wrapper_kwargs,logger_dir=logger_dir)set_global_seeds(seed)if num_env > :return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])else:return DummyVecEnv([make_thunk(start_index)])", "docstring": "Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.", "id": "f1342:m0"} {"signature": "def make_mujoco_env(env_id, seed, reward_scale=):", "body": "rank = MPI.COMM_WORLD.Get_rank()myseed = seed + * rank if seed is not None else Noneset_global_seeds(myseed)env = gym.make(env_id)logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))env = Monitor(env, logger_path, allow_early_resets=True)env.seed(seed)if reward_scale != :from baselines.common.retro_wrappers import RewardScalerenv = RewardScaler(env, reward_scale)return env", "docstring": "Create a wrapped, monitored gym.Env for MuJoCo.", "id": "f1342:m2"} {"signature": "def make_robotics_env(env_id, seed, rank=):", "body": "set_global_seeds(seed)env = gym.make(env_id)env = FlattenDictWrapper(env, ['', ''])env = Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),info_keywords=('',))env.seed(seed)return env", "docstring": "Create a wrapped, monitored gym.Env for MuJoCo.", "id": "f1342:m3"} {"signature": "def arg_parser():", "body": "import argparsereturn argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)", "docstring": "Create an empty argparse.ArgumentParser.", "id": "f1342:m4"} {"signature": "def atari_arg_parser():", "body": "print('')return common_arg_parser()", "docstring": "Create an argparse.ArgumentParser for run_atari.py.", "id": "f1342:m5"} {"signature": "def common_arg_parser():", "body": "parser = arg_parser()parser.add_argument('', help='', type=str, default='')parser.add_argument('', help='', type=str)parser.add_argument('', help='', type=int, default=None)parser.add_argument('', help='', type=str, default='')parser.add_argument('', type=float, default=),parser.add_argument('', help='', default=None)parser.add_argument('', help='', default=None)parser.add_argument('', help='', default=None, type=int)parser.add_argument('', help='', default=, type=float)parser.add_argument('', help='', default=None, type=str)parser.add_argument('', help='', default=, type=int)parser.add_argument('', help='', default=, type=int)parser.add_argument('', default=False, action='')return parser", "docstring": "Create an argparse.ArgumentParser for run_mujoco.py.", "id": "f1342:m7"} {"signature": "def robotics_arg_parser():", "body": "parser = arg_parser()parser.add_argument('', help='', type=str, default='')parser.add_argument('', help='', type=int, default=None)parser.add_argument('', type=int, default=int())return parser", "docstring": "Create an argparse.ArgumentParser for run_mujoco.py.", "id": "f1342:m8"} {"signature": "def parse_unknown_args(args):", "body": "retval = {}preceded_by_key = Falsefor arg in args:if arg.startswith(''):if '' in arg:key = arg.split('')[][:]value = arg.split('')[]retval[key] = valueelse:key = arg[:]preceded_by_key = Trueelif preceded_by_key:retval[key] = argpreceded_by_key = Falsereturn retval", "docstring": "Parse arguments not consumed by arg parser into a dicitonary", "id": "f1342:m9"} {"signature": "def copy_obs_dict(obs):", "body": "return {k: np.copy(v) for k, v in obs.items()}", "docstring": "Deep-copy an observation dict.", "id": "f1343:m0"} {"signature": "def dict_to_obs(obs_dict):", "body": "if set(obs_dict.keys()) == {None}:return obs_dict[None]return obs_dict", "docstring": "Convert an observation dict into a raw array if the\noriginal observation space was not a Dict space.", "id": "f1343:m1"} {"signature": "def obs_space_info(obs_space):", "body": "if isinstance(obs_space, gym.spaces.Dict):assert isinstance(obs_space.spaces, OrderedDict)subspaces = obs_space.spaceselse:subspaces = {None: obs_space}keys = []shapes = {}dtypes = {}for key, box in subspaces.items():keys.append(key)shapes[key] = box.shapedtypes[key] = box.dtypereturn keys, shapes, dtypes", "docstring": "Get dict-structured information about a gym.Space.\n\nReturns:\n A tuple (keys, shapes, dtypes):\n keys: a list of dict keys.\n shapes: a dict mapping keys to shapes.\n dtypes: a dict mapping keys to dtypes.", "id": "f1343:m2"} {"signature": "def obs_to_dict(obs):", "body": "if isinstance(obs, dict):return obsreturn {None: obs}", "docstring": "Convert an observation into a dict.", "id": "f1343:m3"} {"signature": "def __init__(self, env_fns, spaces=None, context=''):", "body": "self.waiting = Falseself.closed = Falsenenvs = len(env_fns)ctx = mp.get_context(context)self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(nenvs)])self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]for p in self.ps:p.daemon = True with clear_mpi_env_vars():p.start()for remote in self.work_remotes:remote.close()self.remotes[].send(('', None))observation_space, action_space, self.spec = self.remotes[].recv()self.viewer = NoneVecEnv.__init__(self, len(env_fns), observation_space, action_space)", "docstring": "Arguments:\n\nenv_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable", "id": "f1344:c0:m0"} {"signature": "def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys):", "body": "def _write_obs(maybe_dict_obs):flatdict = obs_to_dict(maybe_dict_obs)for k in keys:dst = obs_bufs[k].get_obj()dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) np.copyto(dst_np, flatdict[k])env = env_fn_wrapper.x()parent_pipe.close()try:while True:cmd, data = pipe.recv()if cmd == '':pipe.send(_write_obs(env.reset()))elif cmd == '':obs, reward, done, info = env.step(data)if done:obs = env.reset()pipe.send((_write_obs(obs), reward, done, info))elif cmd == '':pipe.send(env.render(mode=''))elif cmd == '':pipe.send(None)breakelse:raise RuntimeError('' % cmd)except KeyboardInterrupt:print('')finally:env.close()", "docstring": "Control a single environment instance using IPC and\nshared memory.", "id": "f1347:m0"} {"signature": "def __init__(self, env_fns, spaces=None, context=''):", "body": "ctx = mp.get_context(context)if spaces:observation_space, action_space = spaceselse:logger.log('')with logger.scoped_configure(format_strs=[]):dummy = env_fns[]()observation_space, action_space = dummy.observation_space, dummy.action_spacedummy.close()del dummyVecEnv.__init__(self, len(env_fns), observation_space, action_space)self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space)self.obs_bufs = [{k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys}for _ in env_fns]self.parent_pipes = []self.procs = []with clear_mpi_env_vars():for env_fn, obs_buf in zip(env_fns, self.obs_bufs):wrapped_fn = CloudpickleWrapper(env_fn)parent_pipe, child_pipe = ctx.Pipe()proc = ctx.Process(target=_subproc_worker,args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys))proc.daemon = Trueself.procs.append(proc)self.parent_pipes.append(parent_pipe)proc.start()child_pipe.close()self.waiting_step = Falseself.viewer = None", "docstring": "If you don't specify observation_space, we'll have to create a dummy\nenvironment to get it.", "id": "f1347:c0:m0"} {"signature": "def assert_venvs_equal(venv1, venv2, num_steps):", "body": "assert venv1.num_envs == venv2.num_envsassert venv1.observation_space.shape == venv2.observation_space.shapeassert venv1.observation_space.dtype == venv2.observation_space.dtypeassert venv1.action_space.shape == venv2.action_space.shapeassert venv1.action_space.dtype == venv2.action_space.dtypetry:obs1, obs2 = venv1.reset(), venv2.reset()assert np.array(obs1).shape == np.array(obs2).shapeassert np.array(obs1).shape == (venv1.num_envs,) + venv1.observation_space.shapeassert np.allclose(obs1, obs2)venv1.action_space.seed()for _ in range(num_steps):actions = np.array([venv1.action_space.sample() for _ in range(venv1.num_envs)])for venv in [venv1, venv2]:venv.step_async(actions)outs1 = venv1.step_wait()outs2 = venv2.step_wait()for out1, out2 in zip(outs1[:], outs2[:]):assert np.array(out1).shape == np.array(out2).shapeassert np.allclose(out1, out2)assert list(outs1[]) == list(outs2[])finally:venv1.close()venv2.close()", "docstring": "Compare two environments over num_steps steps and make sure\nthat the observations produced by each are the same when given\nthe same actions.", "id": "f1348:m0"} {"signature": "def __init__(self, env_fns):", "body": "self.envs = [fn() for fn in env_fns]env = self.envs[]VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)obs_space = env.observation_spaceself.keys, shapes, dtypes = obs_space_info(obs_space)self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)self.buf_infos = [{} for _ in range(self.num_envs)]self.actions = Noneself.spec = self.envs[].spec", "docstring": "Arguments:\n\nenv_fns: iterable of callables functions that build environments", "id": "f1350:c0:m0"} {"signature": "def __init__(self, venv, directory, record_video_trigger, video_length=):", "body": "VecEnvWrapper.__init__(self, venv)self.record_video_trigger = record_video_triggerself.video_recorder = Noneself.directory = os.path.abspath(directory)if not os.path.exists(self.directory): os.mkdir(self.directory)self.file_prefix = \"\"self.file_infix = ''.format(os.getpid())self.step_id = self.video_length = video_lengthself.recording = Falseself.recorded_frames = ", "docstring": "# Arguments\n venv: VecEnv to wrap\n directory: Where to save videos\n record_video_trigger:\n Function that defines when to start recording.\n The function takes the current number of step,\n and returns whether we should start recording or not.\n video_length: Length of recorded video", "id": "f1351:c0:m0"} {"signature": "@contextlib.contextmanagerdef clear_mpi_env_vars():", "body": "removed_environment = {}for k, v in list(os.environ.items()):for prefix in ['', '']:if k.startswith(prefix):removed_environment[k] = vdel os.environ[k]try:yieldfinally:os.environ.update(removed_environment)", "docstring": "from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.\nThis context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing\nProcesses.", "id": "f1352:m0"} {"signature": "@abstractmethoddef reset(self):", "body": "pass", "docstring": "Reset all the environments and return an array of\nobservations, or a dict of observation arrays.\n\nIf step_async is still doing work, that work will\nbe cancelled and step_wait() should not be called\nuntil step_async() is invoked again.", "id": "f1352:c2:m1"} {"signature": "@abstractmethoddef step_async(self, actions):", "body": "pass", "docstring": "Tell all the environments to start taking a step\nwith the given actions.\nCall step_wait() to get the results of the step.\n\nYou should not call this if a step_async run is\nalready pending.", "id": "f1352:c2:m2"} {"signature": "@abstractmethoddef step_wait(self):", "body": "pass", "docstring": "Wait for the step taken with step_async().\n\nReturns (obs, rews, dones, infos):\n - obs: an array of observations, or a dict of\n arrays of observations.\n - rews: an array of rewards\n - dones: an array of \"episode done\" booleans\n - infos: a sequence of info objects", "id": "f1352:c2:m3"} {"signature": "def close_extras(self):", "body": "pass", "docstring": "Clean up the extra resources, beyond what's in this base class.\nOnly runs when not self.closed.", "id": "f1352:c2:m4"} {"signature": "def step(self, actions):", "body": "self.step_async(actions)return self.step_wait()", "docstring": "Step the environments synchronously.\n\nThis is available for backwards compatibility.", "id": "f1352:c2:m6"} {"signature": "def get_images(self):", "body": "raise NotImplementedError", "docstring": "Return RGB images from each environment", "id": "f1352:c2:m8"} {"signature": "def observation_placeholder(ob_space, batch_size=None, name=''):", "body": "assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete),''dtype = ob_space.dtypeif dtype == np.int8:dtype = np.uint8return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name)", "docstring": "Create placeholder to feed observations into of the size appropriate to the observation space\n\nParameters:\n----------\n\nob_space: gym.Space observation space\n\nbatch_size: int size of the batch to be fed into input. Can be left None in most cases.\n\nname: str name of the placeholder\n\nReturns:\n-------\n\ntensorflow placeholder tensor", "id": "f1356:m0"} {"signature": "def observation_input(ob_space, batch_size=None, name=''):", "body": "placeholder = observation_placeholder(ob_space, batch_size, name)return placeholder, encode_observation(ob_space, placeholder)", "docstring": "Create placeholder to feed observations into of the size appropriate to the observation space, and add input\nencoder of the appropriate type.", "id": "f1356:m1"} {"signature": "def encode_observation(ob_space, placeholder):", "body": "if isinstance(ob_space, Discrete):return tf.to_float(tf.one_hot(placeholder, ob_space.n))elif isinstance(ob_space, Box):return tf.to_float(placeholder)elif isinstance(ob_space, MultiDiscrete):placeholder = tf.cast(placeholder, tf.int32)one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-])]return tf.concat(one_hots, axis=-)else:raise NotImplementedError", "docstring": "Encode input in the way that is appropriate to the observation space\n\nParameters:\n----------\n\nob_space: gym.Space observation space\n\nplaceholder: tf.placeholder observation input placeholder", "id": "f1356:m2"} {"signature": "def __init__(self, capacity, operation, neutral_element):", "body": "assert capacity > and capacity & (capacity - ) == , \"\"self._capacity = capacityself._value = [neutral_element for _ in range( * capacity)]self._operation = operation", "docstring": "Build a Segment Tree data structure.\n\n https://en.wikipedia.org/wiki/Segment_tree\n\n Can be used as regular array, but with two\n important differences:\n\n a) setting item's value is slightly slower.\n It is O(lg capacity) instead of O(1).\n b) user has access to an efficient ( O(log segment size) )\n `reduce` operation which reduces `operation` over\n a contiguous subsequence of items in the array.\n\n Paramters\n ---------\n capacity: int\n Total size of the array - must be a power of two.\n operation: lambda obj, obj -> obj\n and operation for combining elements (eg. sum, max)\n must form a mathematical group together with the set of\n possible values for array elements (i.e. be associative)\n neutral_element: obj\n neutral element for the operation above. eg. float('-inf')\n for max and 0 for sum.", "id": "f1357:c0:m0"} {"signature": "def reduce(self, start=, end=None):", "body": "if end is None:end = self._capacityif end < :end += self._capacityend -= return self._reduce_helper(start, end, , , self._capacity - )", "docstring": "Returns result of applying `self.operation`\n to a contiguous subsequence of the array.\n\n self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))\n\n Parameters\n ----------\n start: int\n beginning of the subsequence\n end: int\n end of the subsequences\n\n Returns\n -------\n reduced: obj\n result of reducing self.operation over the specified range of array elements.", "id": "f1357:c0:m2"} {"signature": "def sum(self, start=, end=None):", "body": "return super(SumSegmentTree, self).reduce(start, end)", "docstring": "Returns arr[start] + ... + arr[end]", "id": "f1357:c1:m1"} {"signature": "def find_prefixsum_idx(self, prefixsum):", "body": "assert <= prefixsum <= self.sum() + idx = while idx < self._capacity: if self._value[ * idx] > prefixsum:idx = * idxelse:prefixsum -= self._value[ * idx]idx = * idx + return idx - self._capacity", "docstring": "Find the highest index `i` in the array such that\n sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum\n\n if array values are probabilities, this function\n allows to sample indexes according to the discrete\n probability efficiently.\n\n Parameters\n ----------\n perfixsum: float\n upperbound on the sum of array prefix\n\n Returns\n -------\n idx: int\n highest index satisfying the prefixsum constraint", "id": "f1357:c1:m2"} {"signature": "def min(self, start=, end=None):", "body": "return super(MinSegmentTree, self).reduce(start, end)", "docstring": "Returns min(arr[start], ..., arr[end])", "id": "f1357:c2:m1"} {"signature": "def mpi_fork(n, bind_to_core=False):", "body": "if n<=:return \"\"if os.getenv(\"\") is None:env = os.environ.copy()env.update(MKL_NUM_THREADS=\"\",OMP_NUM_THREADS=\"\",IN_MPI=\"\")args = [\"\", \"\", str(n)]if bind_to_core:args += [\"\", \"\"]args += [sys.executable] + sys.argvsubprocess.check_call(args, env=env)return \"\"else:return \"\"", "docstring": "Re-launches the current script with workers\n Returns \"parent\" for original parent, \"child\" for MPI children", "id": "f1358:m0"} {"signature": "def check_synced(localval, comm=None):", "body": "comm = comm or MPI.COMM_WORLDvals = comm.gather(localval)if comm.rank == :assert all(val==vals[] for val in vals[:])", "docstring": "It's common to forget to initialize your variables to the same values, or\n(less commonly) if you update them in some other way than adam, to get them out of sync.\nThis function checks that variables on all MPI workers are the same, and raises\nan AssertionError otherwise\n\nArguments:\n comm: MPI communicator\n localval: list of local variables (list of variables on current worker to be compared with the other workers)", "id": "f1362:m0"} {"signature": "def switch(condition, then_expression, else_expression):", "body": "x_shape = copy.copy(then_expression.get_shape())x = tf.cond(tf.cast(condition, ''),lambda: then_expression,lambda: else_expression)x.set_shape(x_shape)return x", "docstring": "Switches between two operations depending on a scalar value (int or bool).\n Note that both `then_expression` and `else_expression`\n should be symbolic tensors of the *same shape*.\n\n # Arguments\n condition: scalar tensor.\n then_expression: TensorFlow operation.\n else_expression: TensorFlow operation.", "id": "f1363:m0"} {"signature": "def huber_loss(x, delta=):", "body": "return tf.where(tf.abs(x) < delta,tf.square(x) * ,delta * (tf.abs(x) - * delta))", "docstring": "Reference: https://en.wikipedia.org/wiki/Huber_loss", "id": "f1363:m2"} {"signature": "def get_session(config=None):", "body": "sess = tf.get_default_session()if sess is None:sess = make_session(config=config, make_default=True)return sess", "docstring": "Get default session or create one with a given config", "id": "f1363:m3"} {"signature": "def make_session(config=None, num_cpu=None, make_default=False, graph=None):", "body": "if num_cpu is None:num_cpu = int(os.getenv('', multiprocessing.cpu_count()))if config is None:config = tf.ConfigProto(allow_soft_placement=True,inter_op_parallelism_threads=num_cpu,intra_op_parallelism_threads=num_cpu)config.gpu_options.allow_growth = Trueif make_default:return tf.InteractiveSession(config=config, graph=graph)else:return tf.Session(config=config, graph=graph)", "docstring": "Returns a session that will use CPU's only", "id": "f1363:m4"} {"signature": "def single_threaded_session():", "body": "return make_session(num_cpu=)", "docstring": "Returns a session which will only use a single CPU", "id": "f1363:m5"} {"signature": "def initialize():", "body": "new_variables = set(tf.global_variables()) - ALREADY_INITIALIZEDget_session().run(tf.variables_initializer(new_variables))ALREADY_INITIALIZED.update(new_variables)", "docstring": "Initialize all the uninitialized variables in the global scope.", "id": "f1363:m7"} {"signature": "def function(inputs, outputs, updates=None, givens=None):", "body": "if isinstance(outputs, list):return _Function(inputs, outputs, updates, givens=givens)elif isinstance(outputs, (dict, collections.OrderedDict)):f = _Function(inputs, outputs.values(), updates, givens=givens)return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))else:f = _Function(inputs, [outputs], updates, givens=givens)return lambda *args, **kwargs: f(*args, **kwargs)[]", "docstring": "Just like Theano function. Take a bunch of tensorflow placeholders and expressions\n computed based on those placeholders and produces f(inputs) -> outputs. Function f takes\n values to be fed to the input's placeholders and produces the values of the expressions\n in outputs.\n\n Input values can be passed in the same order as inputs or can be provided as kwargs based\n on placeholder name (passed to constructor or accessible via placeholder.op.name).\n\n Example:\n x = tf.placeholder(tf.int32, (), name=\"x\")\n y = tf.placeholder(tf.int32, (), name=\"y\")\n z = 3 * x + 2 * y\n lin = function([x, y], z, givens={y: 0})\n\n with single_threaded_session():\n initialize()\n\n assert lin(2) == 6\n assert lin(x=3) == 9\n assert lin(2, 2) == 10\n assert lin(x=2, y=3) == 12\n\n Parameters\n ----------\n inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]\n list of input arguments\n outputs: [tf.Variable] or tf.Variable\n list of outputs or a single output to be returned from function. Returned\n value will also have the same shape.\n updates: [tf.Operation] or tf.Operation\n list of update functions or single update function that will be run whenever\n the function is called. The return is ignored.", "id": "f1363:m10"} {"signature": "def adjust_shape(placeholder, data):", "body": "if not isinstance(data, np.ndarray) and not isinstance(data, list):return dataif isinstance(data, list):data = np.array(data)placeholder_shape = [x or - for x in placeholder.shape.as_list()]assert _check_shape(placeholder_shape, data.shape),''.format(data.shape, placeholder_shape)return np.reshape(data, placeholder_shape)", "docstring": "adjust shape of the data to the shape of the placeholder if possible.\nIf shape is incompatible, AssertionError is thrown\n\nParameters:\n placeholder tensorflow input placeholder\n\n data input data to be (potentially) reshaped to be fed into placeholder\n\nReturns:\n reshaped data", "id": "f1363:m24"} {"signature": "def _check_shape(placeholder_shape, data_shape):", "body": "return Truesqueezed_placeholder_shape = _squeeze_shape(placeholder_shape)squeezed_data_shape = _squeeze_shape(data_shape)for i, s_data in enumerate(squeezed_data_shape):s_placeholder = squeezed_placeholder_shape[i]if s_placeholder != - and s_data != s_placeholder:return Falsereturn True", "docstring": "check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)", "id": "f1363:m25"} {"signature": "def launch_tensorboard_in_background(log_dir):", "body": "import subprocesssubprocess.Popen(['', '', log_dir])", "docstring": "To log the Tensorflow graph when using rl-algs\nalgorithms, you can run the following code\nin your main script:\n import threading, time\n def start_tensorboard(session):\n time.sleep(10) # Wait until graph is setup\n tb_path = osp.join(logger.get_dir(), 'tb')\n summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)\n summary_op = tf.summary.merge_all()\n launch_tensorboard_in_background(tb_path)\n session = tf.get_default_session()\n t = threading.Thread(target=start_tensorboard, args=([session]))\n t.start()", "id": "f1363:m27"} {"signature": "def smooth(y, radius, mode='', valid_only=False):", "body": "assert mode in ('', '')if len(y) < *radius+:return np.ones_like(y) * y.mean()elif mode == '':convkernel = np.ones( * radius+)out = np.convolve(y, convkernel,mode='') / np.convolve(np.ones_like(y), convkernel, mode='')if valid_only:out[:radius] = out[-radius:] = np.nanelif mode == '':convkernel = np.ones(radius)out = np.convolve(y, convkernel,mode='') / np.convolve(np.ones_like(y), convkernel, mode='')out = out[:-radius+]if valid_only:out[:radius] = np.nanreturn out", "docstring": "Smooth signal y, where radius is determines the size of the window\n\nmode='twosided':\n average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]\nmode='causal':\n average over the window [max(index - radius, 0), index]\n\nvalid_only: put nan in entries where the full-sized window is not available", "id": "f1364:m0"} {"signature": "def one_sided_ema(xolds, yolds, low=None, high=None, n=, decay_steps=, low_counts_threshold=):", "body": "low = xolds[] if low is None else lowhigh = xolds[-] if high is None else highassert xolds[] <= low, ''.format(low, xolds[])assert xolds[-] >= high, ''.format(high, xolds[-])assert len(xolds) == len(yolds), ''.format(len(xolds), len(yolds))xolds = xolds.astype('')yolds = yolds.astype('')luoi = sum_y = count_y = xnews = np.linspace(low, high, n)decay_period = (high - low) / (n - ) * decay_stepsinterstep_decay = np.exp(- / decay_steps)sum_ys = np.zeros_like(xnews)count_ys = np.zeros_like(xnews)for i in range(n):xnew = xnews[i]sum_y *= interstep_decaycount_y *= interstep_decaywhile True:xold = xolds[luoi]if xold <= xnew:decay = np.exp(- (xnew - xold) / decay_period)sum_y += decay * yolds[luoi]count_y += decayluoi += else:breakif luoi >= len(xolds):breaksum_ys[i] = sum_ycount_ys[i] = count_yys = sum_ys / count_ysys[count_ys < low_counts_threshold] = np.nanreturn xnews, ys, count_ys", "docstring": "perform one-sided (causal) EMA (exponential moving average)\nsmoothing and resampling to an even grid with n points.\nDoes not do extrapolation, so we assume\nxolds[0] <= low && high <= xolds[-1]\n\nArguments:\n\nxolds: array or list - x values of data. Needs to be sorted in ascending order\nyolds: array of list - y values of data. Has to have the same length as xolds\n\nlow: float - min value of the new x grid. By default equals to xolds[0]\nhigh: float - max value of the new x grid. By default equals to xolds[-1]\n\nn: int - number of points in new x grid\n\ndecay_steps: float - EMA decay factor, expressed in new x grid steps.\n\nlow_counts_threshold: float or int\n - y values with counts less than this value will be set to NaN\n\nReturns:\n tuple sum_ys, count_ys where\n xs - array with new x grid\n ys - array of EMA of y at each point of the new x grid\n count_ys - array of EMA of y counts at each point of the new x grid", "id": "f1364:m1"} {"signature": "def symmetric_ema(xolds, yolds, low=None, high=None, n=, decay_steps=, low_counts_threshold=):", "body": "xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=)_, ys2, count_ys2 = one_sided_ema(-xolds[::-], yolds[::-], -high, -low, n, decay_steps, low_counts_threshold=)ys2 = ys2[::-]count_ys2 = count_ys2[::-]count_ys = count_ys1 + count_ys2ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ysys[count_ys < low_counts_threshold] = np.nanreturn xs, ys, count_ys", "docstring": "perform symmetric EMA (exponential moving average)\nsmoothing and resampling to an even grid with n points.\nDoes not do extrapolation, so we assume\nxolds[0] <= low && high <= xolds[-1]\n\nArguments:\n\nxolds: array or list - x values of data. Needs to be sorted in ascending order\nyolds: array of list - y values of data. Has to have the same length as xolds\n\nlow: float - min value of the new x grid. By default equals to xolds[0]\nhigh: float - max value of the new x grid. By default equals to xolds[-1]\n\nn: int - number of points in new x grid\n\ndecay_steps: float - EMA decay factor, expressed in new x grid steps.\n\nlow_counts_threshold: float or int\n - y values with counts less than this value will be set to NaN\n\nReturns:\n tuple sum_ys, count_ys where\n xs - array with new x grid\n ys - array of EMA of y at each point of the new x grid\n count_ys - array of EMA of y counts at each point of the new x grid", "id": "f1364:m2"} {"signature": "def load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False):", "body": "import reif isinstance(root_dir_or_dirs, str):rootdirs = [osp.expanduser(root_dir_or_dirs)]else:rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs]allresults = []for rootdir in rootdirs:assert osp.exists(rootdir), \"\"%rootdirfor dirname, dirs, files in os.walk(rootdir):if '' in dirname:files[:] = []continuemonitor_re = re.compile(r'')if set(['', '', '', '']).intersection(files) orany([f for f in files if monitor_re.match(f)]): result = {'' : dirname}if \"\" in files:with open(osp.join(dirname, \"\"), \"\") as fh:result[''] = json.load(fh)progjson = osp.join(dirname, \"\")progcsv = osp.join(dirname, \"\")if enable_progress:if osp.exists(progjson):result[''] = pandas.DataFrame(read_json(progjson))elif osp.exists(progcsv):try:result[''] = read_csv(progcsv)except pandas.errors.EmptyDataError:print('', dirname, '')else:if verbose: print(''%dirname)if enable_monitor:try:result[''] = pandas.DataFrame(monitor.load_results(dirname))except monitor.LoadMonitorResultsError:print(''%dirname)except Exception as e:print(''%(dirname, e))if result.get('') is not None or result.get('') is not None:allresults.append(Result(**result))if verbose:print(''%dirname)if verbose: print(''%len(allresults))return allresults", "docstring": "load summaries of runs from a list of directories (including subdirectories)\nArguments:\n\nenable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True\n\nenable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True\n\nverbose: bool - if True, will print out list of directories from which the data is loaded. Default: False\n\n\nReturns:\nList of Result objects with the following fields:\n - dirname - path to the directory data was loaded from\n - metadata - run metadata (such as command-line arguments and anything else in metadata.json file\n - monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory)\n - progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file", "id": "f1364:m3"} {"signature": "def plot_results(allresults, *,xy_fn=default_xy_fn,split_fn=default_split_fn,group_fn=default_split_fn,average_group=False,shaded_std=True,shaded_err=True,figsize=None,legend_outside=False,resample=,smooth_step=):", "body": "if split_fn is None: split_fn = lambda _ : ''if group_fn is None: group_fn = lambda _ : ''sk2r = defaultdict(list) for result in allresults:splitkey = split_fn(result)sk2r[splitkey].append(result)assert len(sk2r) > assert isinstance(resample, int), \"\"nrows = len(sk2r)ncols = figsize = figsize or (, * nrows)f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)groups = list(set(group_fn(result) for result in allresults))default_samples = if average_group:resample = resample or default_samplesfor (isplit, sk) in enumerate(sorted(sk2r.keys())):g2l = {}g2c = defaultdict(int)sresults = sk2r[sk]gresults = defaultdict(list)ax = axarr[isplit][]for result in sresults:group = group_fn(result)g2c[group] += x, y = xy_fn(result)if x is None: x = np.arange(len(y))x, y = map(np.asarray, (x, y))if average_group:gresults[group].append((x,y))else:if resample:x, y, counts = symmetric_ema(x, y, x[], x[-], resample, decay_steps=smooth_step)l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])g2l[group] = lif average_group:for group in sorted(groups):xys = gresults[group]if not any(xys):continuecolor = COLORS[groups.index(group) % len(COLORS)]origxs = [xy[] for xy in xys]minxlen = min(map(len, origxs))def allequal(qs):return all((q==qs[]).all() for q in qs[:])if resample:low = max(x[] for x in origxs)high = min(x[-] for x in origxs)usex = np.linspace(low, high, resample)ys = []for (x, y) in xys:ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[])else:assert allequal([x[:minxlen] for x in origxs]),''usex = origxs[]ys = [xy[][:minxlen] for xy in xys]ymean = np.mean(ys, axis=)ystd = np.std(ys, axis=)ystderr = ystd / np.sqrt(len(ys))l, = axarr[isplit][].plot(usex, ymean, color=color)g2l[group] = lif shaded_err:ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=)if shaded_std:ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=)plt.tight_layout()if any(g2l.keys()):ax.legend(g2l.values(),[''%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(),loc= if legend_outside else None,bbox_to_anchor=(,) if legend_outside else None)ax.set_title(sk)return f, axarr", "docstring": "Plot multiple Results objects\n\nxy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.\n By default, x is cumsum of episode lengths, and y is episode rewards\n\nsplit_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.\n That is, the results r for which split_fn(r) is different will be put on different sub-panels.\n By default, the portion of r.dirname between last / and - is returned. The sub-panels are\n stacked vertically in the figure.\n\ngroup_fn: function Result -> hashable - function that converts results objects into keys to group curves by.\n That is, the results r for which group_fn(r) is the same will be put into the same group.\n Curves in the same group have the same color (if average_group is False), or averaged over\n (if average_group is True). The default value is the same as default value for split_fn\n\naverage_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling\n (if resample = 0, will use 512 steps)\n\nshaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be\n shown (only applicable if average_group = True)\n\nshaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves\n (that is, standard deviation divided by square root of number of curves) will be\n shown (only applicable if average_group = True)\n\nfigsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of\n sub-panels.\n\n\nlegend_outside: bool - if True, will place the legend outside of the sub-panels.\n\nresample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric\n EMA smoothing (see the docstring for symmetric_ema).\n Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default\n value is 512.\n\nsmooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).\n See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.", "id": "f1364:m6"} {"signature": "def cg(f_Ax, b, cg_iters=, callback=None, verbose=False, residual_tol=):", "body": "p = b.copy()r = b.copy()x = np.zeros_like(b)rdotr = r.dot(r)fmtstr = \"\"titlestr = \"\"if verbose: print(titlestr % (\"\", \"\", \"\"))for i in range(cg_iters):if callback is not None:callback(x)if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))z = f_Ax(p)v = rdotr / p.dot(z)x += v*pr -= v*znewrdotr = r.dot(r)mu = newrdotr/rdotrp = r + mu*prdotr = newrdotrif rdotr < residual_tol:breakif callback is not None:callback(x)if verbose: print(fmtstr % (i+, rdotr, np.linalg.norm(x))) return x", "docstring": "Demmel p 312", "id": "f1365:m0"} {"signature": "def sync_from_root(sess, variables, comm=None):", "body": "if comm is None: comm = MPI.COMM_WORLDimport tensorflow as tfvalues = comm.bcast(sess.run(variables))sess.run([tf.assign(var, val)for (var, val) in zip(variables, values)])", "docstring": "Send the root node's parameters to every worker.\nArguments:\n sess: the TensorFlow session.\n variables: all parameter variables including optimizer's", "id": "f1366:m0"} {"signature": "def gpu_count():", "body": "if shutil.which('') is None:return output = subprocess.check_output(['', '', ''])return max(, len(output.split(b'')) - )", "docstring": "Count the GPUs on this machine.", "id": "f1366:m1"} {"signature": "def setup_mpi_gpus():", "body": "if '' not in os.environ:if sys.platform == '': ids = [] else:lrank, _lsize = get_local_rank_size(MPI.COMM_WORLD)ids = [lrank]os.environ[\"\"] = \"\".join(map(str, ids))", "docstring": "Set CUDA_VISIBLE_DEVICES to MPI rank if not already set", "id": "f1366:m2"} {"signature": "def get_local_rank_size(comm):", "body": "this_node = platform.node()ranks_nodes = comm.allgather((comm.Get_rank(), this_node))node2rankssofar = defaultdict(int)local_rank = Nonefor (rank, node) in ranks_nodes:if rank == comm.Get_rank():local_rank = node2rankssofar[node]node2rankssofar[node] += assert local_rank is not Nonereturn local_rank, node2rankssofar[this_node]", "docstring": "Returns the rank of each process on its machine\nThe processes on a given machine will be assigned ranks\n 0, 1, 2, ..., N-1,\nwhere N is the number of processes on this machine.\n\nUseful if you want to assign one gpu per machine", "id": "f1366:m3"} {"signature": "def share_file(comm, path):", "body": "localrank, _ = get_local_rank_size(comm)if comm.Get_rank() == :with open(path, '') as fh:data = fh.read()comm.bcast(data)else:data = comm.bcast(None)if localrank == :os.makedirs(os.path.dirname(path), exist_ok=True)with open(path, '') as fh:fh.write(data)comm.Barrier()", "docstring": "Copies the file from rank 0 to all other ranks\nPuts it in the same place on all machines", "id": "f1366:m4"} {"signature": "def dict_gather(comm, d, op='', assert_all_have_data=True):", "body": "if comm is None: return dalldicts = comm.allgather(d)size = comm.sizek2li = defaultdict(list)for d in alldicts:for (k,v) in d.items():k2li[k].append(v)result = {}for (k,li) in k2li.items():if assert_all_have_data:assert len(li)==size, \"\" % (len(li), size, k)if op=='':result[k] = np.mean(li, axis=)elif op=='':result[k] = np.sum(li, axis=)else:assert , opreturn result", "docstring": "Perform a reduction operation over dicts", "id": "f1366:m5"} {"signature": "def mpi_weighted_mean(comm, local_name2valcount):", "body": "all_name2valcount = comm.gather(local_name2valcount)if comm.rank == :name2sum = defaultdict(float)name2count = defaultdict(float)for n2vc in all_name2valcount:for (name, (val, count)) in n2vc.items():try:val = float(val)except ValueError:if comm.rank == :warnings.warn(''.format(name, val))else:name2sum[name] += val * countname2count[name] += countreturn {name : name2sum[name] / name2count[name] for name in name2sum}else:return {}", "docstring": "Perform a weighted average over dicts that are each on a different node\nInput: local_name2valcount: dict mapping key -> (value, count)\nReturns: key -> mean", "id": "f1366:m6"} {"signature": "def __init__(self, env, observations, latent, estimate_q=False, vf_latent=None, sess=None, **tensors):", "body": "self.X = observationsself.state = tf.constant([])self.initial_state = Noneself.__dict__.update(tensors)vf_latent = vf_latent if vf_latent is not None else latentvf_latent = tf.layers.flatten(vf_latent)latent = tf.layers.flatten(latent)self.pdtype = make_pdtype(env.action_space)self.pd, self.pi = self.pdtype.pdfromlatent(latent, init_scale=)self.action = self.pd.sample()self.neglogp = self.pd.neglogp(self.action)self.sess = sess or tf.get_default_session()if estimate_q:assert isinstance(env.action_space, gym.spaces.Discrete)self.q = fc(vf_latent, '', env.action_space.n)self.vf = self.qelse:self.vf = fc(vf_latent, '', )self.vf = self.vf[:,]", "docstring": "Parameters:\n----------\nenv RL environment\n\nobservations tensorflow placeholder in which the observations will be fed\n\nlatent latent state from which policy distribution parameters should be inferred\n\nvf_latent latent state from which value function should be inferred (if None, then latent is used)\n\nsess tensorflow session to run calculations in (if None, default session is used)\n\n**tensors tensorflow tensors for additional attributes such as state or mask", "id": "f1367:c0:m0"} {"signature": "def step(self, observation, **extra_feed):", "body": "a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed)if state.size == :state = Nonereturn a, v, state, neglogp", "docstring": "Compute next action(s) given the observation(s)\n\nParameters:\n----------\n\nobservation observation data (either single or a batch)\n\n**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)\n\nReturns:\n-------\n(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple", "id": "f1367:c0:m2"} {"signature": "def value(self, ob, *args, **kwargs):", "body": "return self._evaluate(self.vf, ob, *args, **kwargs)", "docstring": "Compute value estimate(s) given the observation(s)\n\nParameters:\n----------\n\nobservation observation data (either single or a batch)\n\n**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)\n\nReturns:\n-------\nvalue estimate", "id": "f1367:c0:m3"} {"signature": "def value(self, t):", "body": "raise NotImplementedError()", "docstring": "Value of the schedule at time t", "id": "f1370:c0:m0"} {"signature": "def __init__(self, value):", "body": "self._v = value", "docstring": "Value remains constant over time.\n\n Parameters\n ----------\n value: float\n Constant value of the schedule", "id": "f1370:c1:m0"} {"signature": "def value(self, t):", "body": "return self._v", "docstring": "See Schedule.value", "id": "f1370:c1:m1"} {"signature": "def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):", "body": "idxes = [e[] for e in endpoints]assert idxes == sorted(idxes)self._interpolation = interpolationself._outside_value = outside_valueself._endpoints = endpoints", "docstring": "Piecewise schedule.\n\n endpoints: [(int, int)]\n list of pairs `(time, value)` meanining that schedule should output\n `value` when `t==time`. All the values for time must be sorted in\n an increasing order. When t is between two times, e.g. `(time_a, value_a)`\n and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs\n `interpolation(value_a, value_b, alpha)` where alpha is a fraction of\n time passed between `time_a` and `time_b` for time `t`.\n interpolation: lambda float, float, float: float\n a function that takes value to the left and to the right of t according\n to the `endpoints`. Alpha is the fraction of distance from left endpoint to\n right endpoint that t has covered. See linear_interpolation for example.\n outside_value: float\n if the value is requested outside of all the intervals sepecified in\n `endpoints` this value is returned. If None then AssertionError is\n raised when outside value is requested.", "id": "f1370:c2:m0"} {"signature": "def value(self, t):", "body": "for (l_t, l), (r_t, r) in zip(self._endpoints[:-], self._endpoints[:]):if l_t <= t and t < r_t:alpha = float(t - l_t) / (r_t - l_t)return self._interpolation(l, r, alpha)assert self._outside_value is not Nonereturn self._outside_value", "docstring": "See Schedule.value", "id": "f1370:c2:m1"} {"signature": "def __init__(self, schedule_timesteps, final_p, initial_p=):", "body": "self.schedule_timesteps = schedule_timestepsself.final_p = final_pself.initial_p = initial_p", "docstring": "Linear interpolation between initial_p and final_p over\n schedule_timesteps. After this many timesteps pass final_p is\n returned.\n\n Parameters\n ----------\n schedule_timesteps: int\n Number of timesteps for which to linearly anneal initial_p\n to final_p\n initial_p: float\n initial output value\n final_p: float\n final output value", "id": "f1370:c3:m0"} {"signature": "def value(self, t):", "body": "fraction = min(float(t) / self.schedule_timesteps, )return self.initial_p + fraction * (self.final_p - self.initial_p)", "docstring": "See Schedule.value", "id": "f1370:c3:m1"} {"signature": "def tile_images(img_nhwc):", "body": "img_nhwc = np.asarray(img_nhwc)N, h, w, c = img_nhwc.shapeH = int(np.ceil(np.sqrt(N)))W = int(np.ceil(float(N)/H))img_nhwc = np.array(list(img_nhwc) + [img_nhwc[]* for _ in range(N, H*W)])img_HWhwc = img_nhwc.reshape(H, W, h, w, c)img_HhWwc = img_HWhwc.transpose(, , , , )img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)return img_Hh_Ww_c", "docstring": "Tile N images into one big PxQ image\n(P,Q) are chosen to be as close as possible, and if N\nis square, then P=Q.\n\ninput: img_nhwc, list or array of images, ndim=4 once turned into array\n n = batch index, h = height, w = width, c = channel\nreturns:\n bigim_HWc, ndarray with ndim=3", "id": "f1371:m0"} {"signature": "def nature_cnn(unscaled_images, **conv_kwargs):", "body": "scaled_images = tf.cast(unscaled_images, tf.float32) / activ = tf.nn.reluh = activ(conv(scaled_images, '', nf=, rf=, stride=, init_scale=np.sqrt(),**conv_kwargs))h2 = activ(conv(h, '', nf=, rf=, stride=, init_scale=np.sqrt(), **conv_kwargs))h3 = activ(conv(h2, '', nf=, rf=, stride=, init_scale=np.sqrt(), **conv_kwargs))h3 = conv_to_fc(h3)return activ(fc(h3, '', nh=, init_scale=np.sqrt()))", "docstring": "CNN from Nature paper.", "id": "f1373:m1"} {"signature": "@register(\"\")def mlp(num_layers=, num_hidden=, activation=tf.tanh, layer_norm=False):", "body": "def network_fn(X):h = tf.layers.flatten(X)for i in range(num_layers):h = fc(h, ''.format(i), nh=num_hidden, init_scale=np.sqrt())if layer_norm:h = tf.contrib.layers.layer_norm(h, center=True, scale=True)h = activation(h)return hreturn network_fn", "docstring": "Stack of fully-connected layers to be used in a policy / q-function approximator\n\nParameters:\n----------\n\nnum_layers: int number of fully-connected layers (default: 2)\n\nnum_hidden: int size of fully-connected layers (default: 64)\n\nactivation: activation function (default: tf.tanh)\n\nReturns:\n-------\n\nfunction that builds fully connected network with a given input tensor / placeholder", "id": "f1373:m2"} {"signature": "@register(\"\")def lstm(nlstm=, layer_norm=False):", "body": "def network_fn(X, nenv=):nbatch = X.shape[]nsteps = nbatch // nenvh = tf.layers.flatten(X)M = tf.placeholder(tf.float32, [nbatch]) S = tf.placeholder(tf.float32, [nenv, *nlstm]) xs = batch_to_seq(h, nenv, nsteps)ms = batch_to_seq(M, nenv, nsteps)if layer_norm:h5, snew = utils.lnlstm(xs, ms, S, scope='', nh=nlstm)else:h5, snew = utils.lstm(xs, ms, S, scope='', nh=nlstm)h = seq_to_batch(h5)initial_state = np.zeros(S.shape.as_list(), dtype=float)return h, {'':S, '':M, '':snew, '':initial_state}return network_fn", "docstring": "Builds LSTM (Long-Short Term Memory) network to be used in a policy.\nNote that the resulting function returns not only the output of the LSTM\n(i.e. hidden state of lstm for each step in the sequence), but also a dictionary\nwith auxiliary tensors to be set as policy attributes.\n\nSpecifically,\n S is a placeholder to feed current state (LSTM state has to be managed outside policy)\n M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too)\n initial_state is a numpy array containing initial lstm state (usually zeros)\n state is the output LSTM state (to be fed into S at the next call)\n\n\nAn example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example\n\nParameters:\n----------\n\nnlstm: int LSTM hidden state size\n\nlayer_norm: bool if True, layer-normalized version of LSTM is used\n\nReturns:\n-------\n\nfunction that builds LSTM with a given input tensor / placeholder", "id": "f1373:m5"} {"signature": "@register(\"\")def conv_only(convs=[(, , ), (, , ), (, , )], **conv_kwargs):", "body": "def network_fn(X):out = tf.cast(X, tf.float32) / with tf.variable_scope(\"\"):for num_outputs, kernel_size, stride in convs:out = layers.convolution2d(out,num_outputs=num_outputs,kernel_size=kernel_size,stride=stride,activation_fn=tf.nn.relu,**conv_kwargs)return outreturn network_fn", "docstring": "convolutions-only net\n\nParameters:\n----------\n\nconv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.\n\nReturns:\n\nfunction that takes tensorflow tensor as input and returns the output of the last convolutional layer", "id": "f1373:m8"} {"signature": "def get_network_builder(name):", "body": "if callable(name):return nameelif name in mapping:return mapping[name]else:raise ValueError(''.format(name))", "docstring": "If you want to register your own network outside models.py, you just need:\n\nUsage Example:\n-------------\nfrom baselines.common.models import register\n@register(\"your_network_name\")\ndef your_network_define(**net_kwargs):\n ...\n return network_fn", "id": "f1373:m10"} {"signature": "def pretty_eta(seconds_left):", "body": "minutes_left = seconds_left // seconds_left %= hours_left = minutes_left // minutes_left %= days_left = hours_left // hours_left %= def helper(cnt, name):return \"\".format(str(cnt), name, ('' if cnt > else ''))if days_left > :msg = helper(days_left, '')if hours_left > :msg += '' + helper(hours_left, '')return msgif hours_left > :msg = helper(hours_left, '')if minutes_left > :msg += '' + helper(minutes_left, '')return msgif minutes_left > :return helper(minutes_left, '')return ''", "docstring": "Print the number of seconds in human readable format.\n\n Examples:\n 2 days\n 2 hours and 37 minutes\n less than a minute\n\n Paramters\n ---------\n seconds_left: int\n Number of seconds to be converted to the ETA\n Returns\n -------\n eta: str\n String representing the pretty ETA.", "id": "f1375:m2"} {"signature": "def boolean_flag(parser, name, default=False, help=None):", "body": "dest = name.replace('', '')parser.add_argument(\"\" + name, action=\"\", default=default, dest=dest, help=help)parser.add_argument(\"\" + name, action=\"\", dest=dest)", "docstring": "Add a boolean flag to argparse parser.\n\n Parameters\n ----------\n parser: argparse.Parser\n parser to add the flag to\n name: str\n -- will enable the flag, while --no- will disable it\n default: bool or None\n default value of the flag\n help: str\n help string for the flag", "id": "f1375:m3"} {"signature": "def get_wrapper_by_name(env, classname):", "body": "currentenv = envwhile True:if classname == currentenv.class_name():return currentenvelif isinstance(currentenv, gym.Wrapper):currentenv = currentenv.envelse:raise ValueError(\"\" % classname)", "docstring": "Given an a gym environment possibly wrapped multiple times, returns a wrapper\n of class named classname or raises ValueError if no such wrapper was applied\n\n Parameters\n ----------\n env: gym.Env of gym.Wrapper\n gym environment\n classname: str\n name of the wrapper\n\n Returns\n -------\n wrapper: gym.Wrapper\n wrapper named classname", "id": "f1375:m4"} {"signature": "def relatively_safe_pickle_dump(obj, path, compression=False):", "body": "temp_storage = path + \"\"if compression:with tempfile.NamedTemporaryFile() as uncompressed_file:pickle.dump(obj, uncompressed_file)uncompressed_file.file.flush()with zipfile.ZipFile(temp_storage, \"\", compression=zipfile.ZIP_DEFLATED) as myzip:myzip.write(uncompressed_file.name, \"\")else:with open(temp_storage, \"\") as f:pickle.dump(obj, f)os.rename(temp_storage, path)", "docstring": "This is just like regular pickle dump, except from the fact that failure cases are\n different:\n\n - It's never possible that we end up with a pickle in corrupted state.\n - If a there was a different file at the path, that file will remain unchanged in the\n even of failure (provided that filesystem rename is atomic).\n - it is sometimes possible that we end up with useless temp file which needs to be\n deleted manually (it will be removed automatically on the next function call)\n\n The indended use case is periodic checkpoints of experiment state, such that we never\n corrupt previous checkpoints if the current one fails.\n\n Parameters\n ----------\n obj: object\n object to pickle\n path: str\n path to the output file\n compression: bool\n if true pickle will be compressed", "id": "f1375:m5"} {"signature": "def pickle_load(path, compression=False):", "body": "if compression:with zipfile.ZipFile(path, \"\", compression=zipfile.ZIP_DEFLATED) as myzip:with myzip.open(\"\") as f:return pickle.load(f)else:with open(path, \"\") as f:return pickle.load(f)", "docstring": "Unpickle a possible compressed pickle.\n\n Parameters\n ----------\n path: str\n path to the output file\n compression: bool\n if true assumes that pickle was compressed when created and attempts decompression.\n\n Returns\n -------\n obj: object\n the unpickled object", "id": "f1375:m6"} {"signature": "def __init__(self, gamma, init_value=None):", "body": "self._value = init_valueself._gamma = gamma", "docstring": "Keep a running estimate of a quantity. This is a bit like mean\n but more sensitive to recent changes.\n\n Parameters\n ----------\n gamma: float\n Must be between 0 and 1, where 0 is the most sensitive to recent\n changes.\n init_value: float or None\n Initial value of the estimate. If None, it will be set on the first update.", "id": "f1375:c1:m0"} {"signature": "def update(self, new_val):", "body": "if self._value is None:self._value = new_valelse:self._value = self._gamma * self._value + ( - self._gamma) * new_val", "docstring": "Update the estimate.\n\n Parameters\n ----------\n new_val: float\n new observated value of estimated quantity.", "id": "f1375:c1:m1"} {"signature": "def __float__(self):", "body": "return self._value", "docstring": "Get the current estimate", "id": "f1375:c1:m2"} {"signature": "def wrap_deepmind_retro(env, scale=True, frame_stack=):", "body": "env = WarpFrame(env)env = ClipRewardEnv(env)if frame_stack > :env = FrameStack(env, frame_stack)if scale:env = ScaledFloatFrame(env)return env", "docstring": "Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind", "id": "f1376:m1"} {"signature": "def __init__(self, env, k, channel=):", "body": "gym.Wrapper.__init__(self, env)shp = env.observation_space.shapeself.channel = channelself.observation_space = gym.spaces.Box(low=, high=,shape=(shp[], shp[], shp[] + k - ),dtype=env.observation_space.dtype)self.k = kself.frames = deque([], maxlen=k)shp = env.observation_space.shape", "docstring": "Stack one channel (channel keyword) from previous frames", "id": "f1376:c1:m0"} {"signature": "def __init__(self, env, ratio):", "body": "gym.ObservationWrapper.__init__(self, env)(oldh, oldw, oldc) = env.observation_space.shapenewshape = (oldh//ratio, oldw//ratio, oldc)self.observation_space = gym.spaces.Box(low=, high=,shape=newshape, dtype=np.uint8)", "docstring": "Downsample images by a factor of ratio", "id": "f1376:c2:m0"} {"signature": "def __init__(self, env):", "body": "gym.ObservationWrapper.__init__(self, env)(oldh, oldw, _oldc) = env.observation_space.shapeself.observation_space = gym.spaces.Box(low=, high=,shape=(oldh, oldw, ), dtype=np.uint8)", "docstring": "Downsample images by a factor of ratio", "id": "f1376:c3:m0"} {"signature": "def logkv(key, val):", "body": "get_current().logkv(key, val)", "docstring": "Log a value of some diagnostic\nCall this once for each diagnostic quantity, each iteration\nIf called many times, last value will be used.", "id": "f1379:m1"} {"signature": "def logkv_mean(key, val):", "body": "get_current().logkv_mean(key, val)", "docstring": "The same as logkv(), but if called many times, values averaged.", "id": "f1379:m2"} {"signature": "def logkvs(d):", "body": "for (k, v) in d.items():logkv(k, v)", "docstring": "Log a dictionary of key-value pairs", "id": "f1379:m3"} {"signature": "def dumpkvs():", "body": "return get_current().dumpkvs()", "docstring": "Write all of the diagnostics from the current iteration", "id": "f1379:m4"} {"signature": "def log(*args, level=INFO):", "body": "get_current().log(*args, level=level)", "docstring": "Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).", "id": "f1379:m6"} {"signature": "def set_level(level):", "body": "get_current().set_level(level)", "docstring": "Set logging threshold on current logger.", "id": "f1379:m11"} {"signature": "def get_dir():", "body": "return get_current().get_dir()", "docstring": "Get directory that log files are being written to.\nwill be None if there is no output directory (i.e., if you didn't call start)", "id": "f1379:m13"} {"signature": "def profile(n):", "body": "def decorator_with_name(func):def func_wrapper(*args, **kwargs):with profile_kv(n):return func(*args, **kwargs)return func_wrapperreturn decorator_with_name", "docstring": "Usage:\n@profile(\"my_func\")\ndef my_func(): code", "id": "f1379:m15"} {"signature": "def configure(dir=None, format_strs=None, comm=None):", "body": "if dir is None:dir = os.getenv('')if dir is None:dir = osp.join(tempfile.gettempdir(),datetime.datetime.now().strftime(\"\"))assert isinstance(dir, str)os.makedirs(dir, exist_ok=True)log_suffix = ''rank = for varname in ['', '']:if varname in os.environ:rank = int(os.environ[varname])if rank > :log_suffix = \"\" % rankif format_strs is None:if rank == :format_strs = os.getenv('', '').split('')else:format_strs = os.getenv('', '').split('')format_strs = filter(None, format_strs)output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)log(''%dir)", "docstring": "If comm is provided, average all numerical stats across that comm", "id": "f1379:m17"} {"signature": "def read_tb(path):", "body": "import pandasimport numpy as npfrom glob import globimport tensorflow as tfif osp.isdir(path):fnames = glob(osp.join(path, \"\"))elif osp.basename(path).startswith(\"\"):fnames = [path]else:raise NotImplementedError(\"\"%path)tag2pairs = defaultdict(list)maxstep = for fname in fnames:for summary in tf.train.summary_iterator(fname):if summary.step > :for v in summary.summary.value:pair = (summary.step, v.simple_value)tag2pairs[v.tag].append(pair)maxstep = max(summary.step, maxstep)data = np.empty((maxstep, len(tag2pairs)))data[:] = np.nantags = sorted(tag2pairs.keys())for (colidx,tag) in enumerate(tags):pairs = tag2pairs[tag]for (step, value) in pairs:data[step-, colidx] = valuereturn pandas.DataFrame(data, columns=tags)", "docstring": "path : a tensorboard file OR a directory, where we will find all TB files\n of the form events.*", "id": "f1379:m24"} {"signature": "@staticmethoddef create_datapoint(name, columns, points):", "body": "return {\"\": \"\",\"\": name,\"\": columns,\"\": points,}", "docstring": "Create datastructure in InfluxDB 0.8 data format\n:param name:\n:param columns:\n:param points:\n:return:", "id": "f1381:c0:m1"} {"signature": "def create_series(self, num_series, batch_size=):", "body": "datapoints = []for _ in range(num_series):name = self.dummy_seriesname()datapoints.append(self.create_datapoint(name, [\"\"], [[]]))for data in tqdm(self.batch(datapoints, batch_size)):self.client.write_points(data)", "docstring": "Write one data point for each series name to initialize the series\n:param num_series: Number of different series names to create\n:param batch_size: Number of series to create at the same time\n:return:", "id": "f1381:c0:m3"} {"signature": "def write_points(self, series_name, start_date, end_date, resolution=, batch_size=):", "body": "start_ts = int(start_date.strftime(\"\"))end_ts = int(end_date.strftime(\"\"))range_seconds = end_ts - start_tsnum_datapoints = range_seconds / resolutiontimestamps = [start_ts + i * resolution for i in range(num_datapoints)]columns = [\"\", \"\"]for batch in tqdm(self.batch(timestamps, batch_size)):points = []for timestamp in batch:point = random.randint(, )points.append([timestamp, point])datapoint = self.create_datapoint(series_name, columns, points)self.client.write_points([datapoint])", "docstring": "Create sample datapoints between two dates with the given resolution (in seconds)\n:param series_name:\n:param start_date:\n:param end_date:\n:param resolution:\n:param batch_size:", "id": "f1381:c0:m5"} {"signature": "def batches(iterable, n=):", "body": "l = len(iterable)for ndx in range(, l, n):yield iterable[ndx:min(ndx + n, l)]", "docstring": "From http://stackoverflow.com/a/8290508/270334\n:param n:\n:param iterable:", "id": "f1382:m0"} {"signature": "def __str__(self):", "body": "return ''.join([Keyword.LIST.upper(), Keyword.SERIES.upper(), self.series_stmt])", "docstring": "Standard string representation of drop query", "id": "f1388:c0:m1"} {"signature": "def __str__(self):", "body": "return ''.join([Keyword.DROP.upper(), Keyword.SERIES.upper(), self.series_stmt])", "docstring": "Standard string representation of drop query", "id": "f1389:c0:m1"} {"signature": "def get_earliest_date(self):", "body": "if not self.time_overlap:return TimeExpression.INFLUXDB_EPOCHreturn self.time_overlap.start", "docstring": "Get the smallest date in the query\nE.g. in simple queries like select * from foo where time > now() - 24h\nthis would be the date of yesterday\nThis can be useful for checking if very old data is queried for example.\n:return:", "id": "f1390:c0:m1"} {"signature": "def __str__(self):", "body": "statements = [(Keyword.SELECT, self.select_stmt),(Keyword.FROM, self.from_stmt)]if self.where_stmt:statements.append((Keyword.WHERE, self.where_stmt))if self.limit_stmt:statements.append((Keyword.LIMIT, self.limit_stmt))if self.group_by_stmt:statements.append((Keyword.GROUP_BY, ['', self.group_by_stmt]))return self._format_statements(statements)", "docstring": "Standard string representation of select query", "id": "f1390:c0:m6"} {"signature": "def get_resolution(self):", "body": "return None", "docstring": "The resolution is the interval given in the the group_by field.\n(e.g. in the expression \"group by time(10s)\" it would be 10s)\nOnly Select queries can have a resolution.\nAll others don't, so we set this to None by default.", "id": "f1391:c0:m3"} {"signature": "def get_duration(self):", "body": "return None", "docstring": "The duration is the timespan to query data for.\nIt can be set in the \"where\" clause.\nFor instance, in the query:\n\"select * from myseries where time > now() - 24h\"\nthe duration would be 24h.\nDurations can only be given in Select and Delete queries\nso we set this to None by default.", "id": "f1391:c0:m4"} {"signature": "def get_datapoints(self):", "body": "return None", "docstring": "Returns an estimate for the number datapoints that this query\nwill return, where datapoints = duration * resolution\nDatapoints can only occur in Select and Delete queries\nso we set this to None by default.", "id": "f1391:c0:m5"} {"signature": "def __str__(self):", "body": "statements = [Keyword.DELETE.upper(), Keyword.FROM.upper(), self.from_stmt]if self.where_stmt:statements.extend([Keyword.WHERE.upper(), self.where_stmt])return ''.join(statements)", "docstring": "Standard string representation of delete query", "id": "f1392:c0:m1"} {"signature": "@staticmethoddef sanitize(query):", "body": "query = urllib.unquote(query).decode('')return query.lower()", "docstring": "Prepared raw query for parsing\n:param query: The query to sanitize", "id": "f1393:c0:m0"} {"signature": "def handle_error(self, request, client_address):", "body": "cls, e = sys.exc_info()[:]if cls is socket.error or cls is ssl.SSLError:passelse:return HTTPServer.handle_error(self, request, client_address)", "docstring": "Overwrite error handling to suppress socket/ssl related errors\n:param client_address: Address of client\n:param request: Request causing an error", "id": "f1396:c0:m1"} {"signature": "def _check_query(self, query_string):", "body": "return self.protector.check(query_string)", "docstring": "Check if the query_string is allowed by the Protector rule set", "id": "f1397:c0:m5"} {"signature": "@staticmethoddef get_queries(parameters):", "body": "parsed_params = urlparse.parse_qs(parameters)if '' not in parsed_params:return []queries = parsed_params['']if not isinstance(queries, list):queries = [queries]return queries", "docstring": "Get a list of all queries (q=... parameters) from an URL parameter string\n:param parameters: The url parameter list", "id": "f1397:c0:m6"} {"signature": "def _handle_request(self, scheme, netloc, path, headers, body=None, method=\"\"):", "body": "backend_url = \"\".format(scheme, netloc, path)try:response = self.http_request.request(backend_url, method=method, body=body, headers=dict(headers))self._return_response(response)except Exception as e:body = \"\".format(e.message)logging.debug(body)self.send_error(httplib.SERVICE_UNAVAILABLE, body)", "docstring": "Run the actual request", "id": "f1397:c0:m9"} {"signature": "def send_error(self, code, message=None):", "body": "message = message.strip()self.log_error(\"\", code, message)self.send_response(code)self.send_header(\"\", \"\")self.send_header('', '')self.end_headers()if message:self.wfile.write(message)", "docstring": "Send and log plain text error reply.\n:param code:\n:param message:", "id": "f1397:c0:m11"} {"signature": "def _return_response(self, response):", "body": "self.filter_headers(response.msg)if \"\" in response.msg:del response.msg[\"\"]self.send_response(response.status, response.reason)for header_key, header_value in response.msg.items():self.send_header(header_key, header_value)body = response.read()self.send_header('', str(len(body)))self.end_headers()self.wfile.write(body)", "docstring": ":type result: HTTPResponse", "id": "f1397:c0:m12"} {"signature": "def naughty_strings(filepath=FILEPATH):", "body": "strings = []with open(filepath, '') as f:strings = f.readlines()strings = [x.strip(u'') for x in strings]strings = [x for x in strings if x and not x.startswith(u'')]strings.insert(, u\"\")return strings", "docstring": "Get the list of naughty_strings.\n\n By default this will get the strings from the blns.txt file\n\n Code is a simple port of what is already in the /scripts directory\n\n :param filepath: Optional filepath the the blns.txt file\n :returns: The list of naughty strings", "id": "f1421:m0"} {"signature": "def load_config():", "body": "config = flatten(default_config.DEFAULT_CONFIG)cli_config = flatten(parse_args())if \"\" in cli_config:logging.info(\"\".format(cli_config['']))configfile = parse_configfile(cli_config[''])config = overwrite_config(config, configfile)config = overwrite_config(config, cli_config)if '' in config:if config[''] == :logging.getLogger().setLevel(logging.INFO)elif config[''] > :logging.getLogger().setLevel(logging.DEBUG)return ObjectView(config)", "docstring": "Load settings from default config and optionally\noverwrite with config file and commandline parameters\n(in that order).", "id": "f1424:m0"} {"signature": "def parse_configfile(configfile):", "body": "with open(configfile) as f:try:return yaml.safe_load(f)except Exception as e:logging.fatal(\"\", e)exit(-)", "docstring": "Read settings from file\n:param configfile:", "id": "f1424:m2"} {"signature": "def flatten(d, parent_key='', sep=''):", "body": "items = []for k, v in d.items():new_key = parent_key + sep + k if parent_key else kif isinstance(v, collections.MutableMapping):items.extend(flatten(v, new_key, sep=sep).items())else:items.append((new_key, v))return dict(items)", "docstring": "Flatten keys in a dictionary\nExample:\nflatten({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})\n=> {'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}\n:param d: Dictionary to flatten\n:param sep: Separator between keys\n:param parent_key: Key to merge with", "id": "f1424:m3"} {"signature": "def main():", "body": "config = loader.load_config()if config.version:show_version()if config.show_rules:show_rules()if not config.configfile and not (hasattr(config, \"\") or hasattr(config, \"\")):show_configfile_warning()check_write_permissions(config.logfile)start_proxy(config)", "docstring": "Setup consumer", "id": "f1428:m0"} {"signature": "def check_write_permissions(file):", "body": "try:open(file, '')except IOError:print(\"\"\"\".format(file))sys.exit()", "docstring": "Check if we can write to the given file\n\nOtherwise since we might detach the process to run in the background\nwe might never find out that writing failed and get an ugly\nexit message on startup. For example:\nERROR: Child exited immediately with non-zero exit code 127\n\nSo we catch this error upfront and print a nicer error message\nwith a hint on how to fix it.", "id": "f1428:m1"} {"signature": "def show_rules():", "body": "from rules.loader import import_rulesfrom rules.rule_list import all_rulesrules = import_rules(all_rules)print(\"\")for name, rule in rules.iteritems():heading = \"\".format(rule.description(), name)print(\"\".format(heading))for line in rule.reason():print(line)print(\"\")sys.exit()", "docstring": "Show the list of available rules and quit\n:return:", "id": "f1428:m2"} {"signature": "def show_version():", "body": "from version import __version__print(\"\".format(__package__, __version__))sys.exit()", "docstring": "Show program version an quit\n:return:", "id": "f1428:m3"} {"signature": "def start_proxy(config):", "body": "protector = Protector(config.rules, config.whitelist)protector_daemon = ProtectorDaemon(config=config, protector=protector)daemon = daemonocle.Daemon(pidfile=config.pidfile,detach=(not config.foreground),shutdown_callback=shutdown,worker=protector_daemon.run)daemon.do_action(config.command)", "docstring": "Start the http proxy\n:param config:\n:return:", "id": "f1428:m6"} {"signature": "def __init__(self, rules, whitelist=[], safe_mode=True):", "body": "self.parser = QueryParser()self.guard = Guard(rules)self.sanitizer = Sanitizer()self.whitelist = whitelistself.safe_mode = safe_mode", "docstring": ":param rules: A list of rules to evaluate\n:param safe_mode: If set to True, allow the query in case it can not be parsed\n:return:", "id": "f1429:c0:m0"} {"signature": "def check(self, query):", "body": "if query.get_type() in {Keyword.LIST, Keyword.DROP}:series = query.series_stmtelse:series = query.from_stmtif len(series) >= self.min_series_name_length:return Ok(True)return Err(\"\")", "docstring": ":param query:", "id": "f1432:c0:m3"} {"signature": "def check(self, query):", "body": "if query.get_type() not in {Keyword.SELECT}:return Ok(True)earliest_date = query.get_earliest_date()if earliest_date >= self.min_start_date:return Ok(True)if query.limit_stmt:return Ok(True)return Err((\"\"\"\").format(self.min_start_date.strftime(\"\"),earliest_date))", "docstring": ":param query:", "id": "f1433:c0:m3"} {"signature": "def check(self, query):", "body": "if query.get_type() not in {Keyword.SELECT, Keyword.DELETE}:return Ok(True)datapoints = query.get_datapoints()if datapoints <= self.max_datapoints:return Ok(True)return Err((\"\"\"\"\"\"\"\").format(datapoints))", "docstring": ":param query:", "id": "f1434:c0:m3"} {"signature": "@staticmethoddef description():", "body": "pass", "docstring": ":return: A short description of the rule", "id": "f1436:c0:m0"} {"signature": "@staticmethoddef reason():", "body": "pass", "docstring": "When and why the rule is useful.\n\nThis should return a list of lines instead of a long string.\nIt's easier to format line breaks this way.\n\n:return: The reason for the rule", "id": "f1436:c0:m1"} {"signature": "def check(self, query):", "body": "pass", "docstring": "Check if a given query is permitted\n:param query:\n:return: result.Ok() if permitted, result.Err() if not.", "id": "f1436:c0:m2"} {"signature": "def check(self, query):", "body": "if query.get_type() != Keyword.DROP:return Ok(True)return Err(\"\")", "docstring": ":param query:", "id": "f1438:c0:m2"} {"signature": "def import_rule(path):", "body": "rule = importlib.import_module(path)return rule", "docstring": "Load the given rule\n:param path: Import path to rule", "id": "f1439:m1"} {"signature": "def check(self, query):", "body": "if query.get_type() != Keyword.DELETE:return Ok(True)return Err(\"\")", "docstring": ":param query:", "id": "f1440:c0:m2"} {"signature": "def check(self, query):", "body": "if query.get_type() not in {Keyword.SELECT}:return Ok(True)if query.get_resolution() > :return Ok(True)return Err(\"\")", "docstring": ":param query:", "id": "f1441:c0:m2"} {"signature": "def __init__(self):", "body": "self.keywords = {Keyword.SELECT, Keyword.FROM, Keyword.WHERE,Keyword.LIMIT, Keyword.ORDER,Keyword.GROUP_BY, Keyword.DROP, Keyword.DELETE,Keyword.LIST, Keyword.SERIES}self.time_parser = TimeRangeParser()self.duration_parser = DurationParser()self.resolution_parser = ResolutionParser()self.datapoints_parser = DatapointsParser()self.parsed_time = Noneself.parsed_resolution = Noneself.parsed_datapoints = Noneself.parsed_time_overlap = None", "docstring": "Initialize query parsers and valid InfluxDB keywords", "id": "f1443:c0:m0"} {"signature": "def parse(self, raw_query_string):", "body": "self._reset()if not isinstance(raw_query_string, basestring):return Nonequery_string = self._cleanup(raw_query_string)parts = self._split(query_string)parts = self._sanitize_keywords(parts)tokens = self._tokenize(parts)if tokens:self.parsed_resolution = self._parse_resolution(tokens)self.parsed_time = self._parse_time(tokens)self.parsed_time_overlap = self._parse_duration(self.parsed_time)self.parsed_datapoints = self._parse_datapoints(self.parsed_time_overlap.timespan_seconds(),self.parsed_resolution,self.parse_keyword(Keyword.LIMIT, tokens))return self.create_query_object(tokens)", "docstring": "Parse a raw query string into fields\n:param raw_query_string: Raw InfluxDB query string", "id": "f1443:c0:m1"} {"signature": "@staticmethoddef _split(query):", "body": "return [word.strip() for word in query.split('')]", "docstring": "Split query strings into tokens\n:param query: A sanitized query string", "id": "f1443:c0:m3"} {"signature": "def create_query_object(self, tokens):", "body": "try:query_type = tokens['']return getattr(self, '' % query_type)(tokens)except (KeyError, TypeError):return self.invalid_query(tokens)", "docstring": "Analyze query tokens and create an InfluxDBStatement from them\nReturn None on error\n:param tokens: A list of InfluxDB query tokens", "id": "f1443:c0:m8"} {"signature": "def create_select_query(self, tokens):", "body": "if not tokens[Keyword.SELECT]:return Noneif not tokens[Keyword.FROM]:return Nonereturn SelectQuery(self.parse_keyword(Keyword.SELECT, tokens),self.parse_keyword(Keyword.FROM, tokens),where_stmt=self.parse_keyword(Keyword.WHERE, tokens),limit_stmt=self.parse_keyword(Keyword.LIMIT, tokens),group_by_stmt=self.parse_group(tokens),duration=self.parsed_time_overlap.timespan_seconds(),resolution=self.parsed_resolution,time_ranges=self.parsed_time,time_overlap=self.parsed_time_overlap,datapoints=self.parsed_datapoints)", "docstring": "Parse tokens of select query\n:param tokens: A list of InfluxDB query tokens", "id": "f1443:c0:m9"} {"signature": "def create_list_query(self, tokens):", "body": "if not tokens[Keyword.SERIES]:tokens[Keyword.SERIES] = ''return ListQuery(self.parse_keyword(Keyword.SERIES, tokens))", "docstring": "Parse tokens of list query\n:param tokens: A list of InfluxDB query tokens", "id": "f1443:c0:m10"} {"signature": "def create_drop_query(self, tokens):", "body": "if not tokens[Keyword.SERIES]:return Nonereturn DropQuery(self.parse_keyword(Keyword.SERIES, tokens))", "docstring": "Parse tokens of drop query\n:param tokens: A list of InfluxDB query tokens", "id": "f1443:c0:m11"} {"signature": "def create_delete_query(self, tokens):", "body": "if not tokens[Keyword.FROM]:return Nonewhere_stmt = self.parse_keyword(Keyword.WHERE, tokens)if where_stmt:if not where_stmt.startswith(''):return Nonereturn DeleteQuery(self.parse_keyword(Keyword.FROM, tokens),self.parse_keyword(Keyword.WHERE, tokens))", "docstring": "Parse tokens of delete query\n:param tokens: A list of InfluxDB query tokens", "id": "f1443:c0:m12"} {"signature": "@staticmethoddef invalid_query(tokens):", "body": "logging.warning(\"\")try:logging.warning(\"\", ''.join(tokens))except TypeError:passreturn None", "docstring": "Handler for invalid queries\n:param tokens: A list of InfluxDB query tokens", "id": "f1443:c0:m13"} {"signature": "def _parse_time(self, tokens):", "body": "return self.time_parser.parse(self.parse_keyword(Keyword.WHERE, tokens))", "docstring": "Parse the date range for the query\n\nE.g. WHERE time > now() - 48h AND time < now() - 24h\nwould result in DateRange(datetime_start, datetime_end)\nwhere\ndatetime_start would be parsed from now() - 48h\nand\ndatetime_end would be parsed from now() - 24h\n\n:param tokens:\n:return:", "id": "f1443:c0:m14"} {"signature": "def _parse_resolution(self, tokens):", "body": "return self.resolution_parser.parse(self.parse_keyword(Keyword.GROUP_BY, tokens))", "docstring": "Parse resolution from the GROUP BY statement.\nE.g. GROUP BY time(10s) would mean a 10 second resolution\n:param tokens:\n:return:", "id": "f1443:c0:m15"} {"signature": "def _parse_duration(self, parsed_time):", "body": "return self.duration_parser.parse(parsed_time)", "docstring": "Parse duration in seconds for the given query.\nThe duration is parsed from the DateRange of the query.\nFor example, consider the following query:\nselect * from bla where time > now() - 2h\nThe DateRange would be [now() - 2h, now()]\nExpressing this as seconds would be 2h => 2*60*60 seconds\nThis would be the duration of the query.\n\n:param parsed_time:\n:return:", "id": "f1443:c0:m16"} {"signature": "def _parse_datapoints(self, parsed_duration, parsed_resolution, limit):", "body": "return self.datapoints_parser.parse(parsed_duration, parsed_resolution, limit)", "docstring": "Parse the number of datapoints of a query.\nThis can be calculated from the given duration and resolution of the query.\nE.g. if the query has a duation of 2*60*60 = 7200 seconds and a resolution of 10 seconds\nthen the number of datapoints would be 7200/10 => 7200 datapoints.\n\n:param parsed_duration:\n:param parsed_resolution:\n:param limit:\n:return:", "id": "f1443:c0:m17"} {"signature": "def parse(self, group_by_stmt):", "body": "if not group_by_stmt:return Resolution.MAX_RESOLUTIONm = self.GROUP_BY_TIME_PATTERN.match(group_by_stmt)if not m:return Nonevalue = int(m.group())unit = m.group()resolution = self.convert_to_seconds(value, unit)return max(resolution, Resolution.MAX_RESOLUTION)", "docstring": "Extract the data resolution of a query in seconds\nE.g. \"group by time(99s)\" => 99\n\n:param group_by_stmt: A raw InfluxDB group by statement", "id": "f1445:c0:m1"} {"signature": "@staticmethoddef parse(duration_seconds, resolution_seconds=Resolution.MAX_RESOLUTION, limit=None):", "body": "if not duration_seconds or duration_seconds < :return if not resolution_seconds or resolution_seconds <= :return Nonenum_datapoints = duration_seconds / resolution_secondsif limit:num_datapoints = min(int(limit), num_datapoints)return int(math.ceil(num_datapoints))", "docstring": "num_datapoints = min(duration/resolution, limit)\n\n:param duration_seconds: Time duration (in seconds) for which datapoints should be returned\n:param resolution_seconds: Time interval (in seconds) between data points\n:param limit: Maximum number of datapoints to return", "id": "f1447:c0:m1"} {"signature": "@classmethoddef get_object(cls, api_token, cert_id):", "body": "certificate = cls(token=api_token, id=cert_id)certificate.load()return certificate", "docstring": "Class method that will return a Certificate object by its ID.", "id": "f1448:c0:m1"} {"signature": "def load(self):", "body": "data = self.get_data(\"\" % self.id)certificate = data[\"\"]for attr in certificate.keys():setattr(self, attr, certificate[attr])return self", "docstring": "Load the Certificate object from DigitalOcean.\n\nRequires self.id to be set.", "id": "f1448:c0:m2"} {"signature": "def create(self):", "body": "params = {\"\": self.name,\"\": self.type,\"\": self.dns_names,\"\": self.private_key,\"\": self.leaf_certificate,\"\": self.certificate_chain}data = self.get_data(\"\", type=POST, params=params)if data:self.id = data['']['']self.not_after = data['']['']self.sha1_fingerprint = data['']['']self.created_at = data['']['']self.type = data['']['']self.dns_names = data['']['']self.state = data['']['']return self", "docstring": "Create the Certificate", "id": "f1448:c0:m3"} {"signature": "def destroy(self):", "body": "return self.get_data(\"\" % self.id, type=DELETE)", "docstring": "Delete the Certificate", "id": "f1448:c0:m4"} {"signature": "def assert_url_query_equal(self, url1, url2):", "body": "base1, qlist1 = self.split_url(url1)base2, qlist2 = self.split_url(url2)self.assertEqual(base1, base2)self.assertEqual(qlist1, qlist2)", "docstring": "Test if two URL queries are equal\n\n The key=value pairs after the ? in a URL can occur in any order\n (especially since dicts in python 3 are not deterministic across runs).\n The method sorts the key=value pairs and then compares the URLs.", "id": "f1460:c0:m3"} {"signature": "@classmethoddef get_object(cls, api_token, domain_name):", "body": "domain = cls(token=api_token, name=domain_name)domain.load()return domain", "docstring": "Class method that will return a Domain object by ID.", "id": "f1464:c0:m1"} {"signature": "def destroy(self):", "body": "return self.get_data(\"\" % self.name, type=DELETE)", "docstring": "Destroy the domain by name", "id": "f1464:c0:m3"} {"signature": "def create_new_domain_record(self, *args, **kwargs):", "body": "data = {\"\": kwargs.get(\"\", None),\"\": kwargs.get(\"\", None),\"\": kwargs.get(\"\", None)}if kwargs.get(\"\", None):data[''] = kwargs.get(\"\", None)if kwargs.get(\"\", None):data[''] = kwargs.get(\"\", None)if kwargs.get(\"\", None):data[''] = kwargs.get(\"\", None)return self.get_data(\"\" % self.name,type=POST,params=data)", "docstring": "Create new domain record.\nhttps://developers.digitalocean.com/#create-a-new-domain-record\n\nArgs:\n type: The record type (A, MX, CNAME, etc).\n name: The host name, alias, or service being defined by the record\n data: Variable data depending on record type.\n\nOptional Args:\n priority: The priority of the host\n port: The port that the service is accessible on\n weight: The weight of records with the same priority", "id": "f1464:c0:m4"} {"signature": "def create(self):", "body": "data = {\"\": self.name,\"\": self.ip_address,}domain = self.get_data(\"\", type=POST, params=data)return domain", "docstring": "Create new doamin", "id": "f1464:c0:m5"} {"signature": "def get_records(self, params=None):", "body": "if params is None:params = {}records = []data = self.get_data(\"\" % self.name, type=GET, params=params)for record_data in data['']:record = Record(domain_name=self.name, **record_data)record.token = self.tokenrecords.append(record)return records", "docstring": "Returns a list of Record objects", "id": "f1464:c0:m6"} {"signature": "@classmethoddef get_object(cls, api_token, ip):", "body": "floating_ip = cls(token=api_token, ip=ip)floating_ip.load()return floating_ip", "docstring": "Class method that will return a FloatingIP object by its IP.\n\nArgs:\n api_token: str - token\n ip: str - floating ip address", "id": "f1465:c0:m1"} {"signature": "def load(self):", "body": "data = self.get_data('' % self.ip, type=GET)floating_ip = data['']for attr in floating_ip.keys():setattr(self, attr, floating_ip[attr])return self", "docstring": "Load the FloatingIP object from DigitalOcean.\n\nRequires self.ip to be set.", "id": "f1465:c0:m2"} {"signature": "def create(self, *args, **kwargs):", "body": "data = self.get_data('',type=POST,params={'': self.droplet_id})if data:self.ip = data['']['']self.region = data['']['']return self", "docstring": "Creates a FloatingIP and assigns it to a Droplet.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\n droplet_id: int - droplet id", "id": "f1465:c0:m3"} {"signature": "def reserve(self, *args, **kwargs):", "body": "data = self.get_data('',type=POST,params={'': self.region_slug})if data:self.ip = data['']['']self.region = data['']['']return self", "docstring": "Creates a FloatingIP in a region without assigning\nit to a specific Droplet.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\n region_slug: str - region's slug (e.g. 'nyc3')", "id": "f1465:c0:m4"} {"signature": "def destroy(self):", "body": "return self.get_data('' % self.ip, type=DELETE)", "docstring": "Destroy the FloatingIP", "id": "f1465:c0:m5"} {"signature": "def assign(self, droplet_id):", "body": "return self.get_data(\"\" % self.ip,type=POST,params={\"\": \"\", \"\": droplet_id})", "docstring": "Assign a FloatingIP to a Droplet.\n\nArgs:\n droplet_id: int - droplet id", "id": "f1465:c0:m6"} {"signature": "def unassign(self):", "body": "return self.get_data(\"\" % self.ip,type=POST,params={\"\": \"\"})", "docstring": "Unassign a FloatingIP.", "id": "f1465:c0:m7"} {"signature": "@classmethoddef get_object(cls, api_token, droplet_id):", "body": "droplet = cls(token=api_token, id=droplet_id)droplet.load()return droplet", "docstring": "Class method that will return a Droplet object by ID.\n\n Args:\n api_token (str): token\n droplet_id (int): droplet id", "id": "f1466:c3:m1"} {"signature": "def get_data(self, *args, **kwargs):", "body": "data = super(Droplet, self).get_data(*args, **kwargs)if \"\" in kwargs:if kwargs[\"\"] == POST:self.__check_actions_in_data(data)return data", "docstring": "Customized version of get_data to perform __check_actions_in_data", "id": "f1466:c3:m4"} {"signature": "def load(self):", "body": "droplets = self.get_data(\"\" % self.id)droplet = droplets['']for attr in droplet.keys():setattr(self, attr, droplet[attr])for net in self.networks['']:if net[''] == '':self.private_ip_address = net['']if net[''] == '':self.ip_address = net['']if self.networks['']:self.ip_v6_address = self.networks[''][]['']if \"\" in self.features:self.backups = Trueelse:self.backups = Falseif \"\" in self.features:self.ipv6 = Trueelse:self.ipv6 = Falseif \"\" in self.features:self.private_networking = Trueelse:self.private_networking = Falseif \"\" in droplets:self.tags = droplets[\"\"]return self", "docstring": "Fetch data about droplet - use this instead of get_data()", "id": "f1466:c3:m5"} {"signature": "def _perform_action(self, params, return_dict=True):", "body": "action = self.get_data(\"\" % self.id,type=POST,params=params)if return_dict:return actionelse:action = action[u'']return_action = Action(token=self.token)for attr in action.keys():setattr(return_action, attr, action[attr])return return_action", "docstring": "Perform a droplet action.\n\nArgs:\n params (dict): parameters of the action\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m6"} {"signature": "def power_on(self, return_dict=True):", "body": "return self._perform_action({'': ''}, return_dict)", "docstring": "Boot up the droplet\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m7"} {"signature": "def shutdown(self, return_dict=True):", "body": "return self._perform_action({'': ''}, return_dict)", "docstring": "shutdown the droplet\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m8"} {"signature": "def reboot(self, return_dict=True):", "body": "return self._perform_action({'': ''}, return_dict)", "docstring": "restart the droplet\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m9"} {"signature": "def power_cycle(self, return_dict=True):", "body": "return self._perform_action({'': ''}, return_dict)", "docstring": "restart the droplet\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m10"} {"signature": "def power_off(self, return_dict=True):", "body": "return self._perform_action({'': ''}, return_dict)", "docstring": "restart the droplet\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m11"} {"signature": "def reset_root_password(self, return_dict=True):", "body": "return self._perform_action({'': ''}, return_dict)", "docstring": "reset the root password\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m12"} {"signature": "def resize(self, new_size_slug, return_dict=True, disk=True):", "body": "options = {\"\": \"\", \"\": new_size_slug}if disk: options[\"\"] = \"\"return self._perform_action(options, return_dict)", "docstring": "Resize the droplet to a new size slug.\n https://developers.digitalocean.com/documentation/v2/#resize-a-droplet\n\n Args:\n new_size_slug (str): name of new size\n\n Optional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n disk (bool): If a permanent resize, with disk changes included.\n\n Returns dict or Action", "id": "f1466:c3:m13"} {"signature": "def take_snapshot(self, snapshot_name, return_dict=True, power_off=False):", "body": "if power_off is True and self.status != \"\":action = self.power_off(return_dict=False)action.wait()self.load()return self._perform_action({\"\": \"\", \"\": snapshot_name},return_dict)", "docstring": "Take a snapshot!\n\n Args:\n snapshot_name (str): name of snapshot\n\n Optional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n power_off (bool): Before taking the snapshot the droplet will be\n turned off with another API call. It will wait until the\n droplet will be powered off.\n\n Returns dict or Action", "id": "f1466:c3:m14"} {"signature": "def restore(self, image_id, return_dict=True):", "body": "return self._perform_action({\"\": \"\", \"\": image_id},return_dict)", "docstring": "Restore the droplet to an image ( snapshot or backup )\n\n Args:\n image_id (int): id of image\n\n Optional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\n Returns dict or Action", "id": "f1466:c3:m15"} {"signature": "def rebuild(self, image_id=None, return_dict=True):", "body": "if not image_id:image_id = self.image['']return self._perform_action({\"\": \"\", \"\": image_id},return_dict)", "docstring": "Restore the droplet to an image ( snapshot or backup )\n\n Args:\n image_id (int): id of image\n\n Optional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\n Returns dict or Action", "id": "f1466:c3:m16"} {"signature": "def enable_backups(self, return_dict=True):", "body": "return self._perform_action({'': ''}, return_dict)", "docstring": "Enable automatic backups\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m17"} {"signature": "def disable_backups(self, return_dict=True):", "body": "return self._perform_action({'': ''}, return_dict)", "docstring": "Disable automatic backups\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m18"} {"signature": "def destroy(self):", "body": "return self.get_data(\"\" % self.id, type=DELETE)", "docstring": "Destroy the droplet\n\nReturns dict", "id": "f1466:c3:m19"} {"signature": "def rename(self, name, return_dict=True):", "body": "return self._perform_action({'': '', '': name},return_dict)", "docstring": "Rename the droplet\n\n Args:\n name (str): new name\n\n Optional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\n Returns dict or Action", "id": "f1466:c3:m20"} {"signature": "def enable_private_networking(self, return_dict=True):", "body": "return self._perform_action({'': ''},return_dict)", "docstring": "Enable private networking on an existing Droplet where available.\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m21"} {"signature": "def enable_ipv6(self, return_dict=True):", "body": "return self._perform_action({'': ''}, return_dict)", "docstring": "Enable IPv6 on an existing Droplet where available.\n\nOptional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m22"} {"signature": "def change_kernel(self, kernel, return_dict=True):", "body": "if type(kernel) != Kernel:raise BadKernelObject(\"\")return self._perform_action({'': '', '': kernel.id},return_dict)", "docstring": "Change the kernel to a new one\n\n Args:\n kernel : instance of digitalocean.Kernel.Kernel\n\n Optional Args:\n return_dict (bool): Return a dict when True (default),\n otherwise return an Action.\n\n Returns dict or Action", "id": "f1466:c3:m23"} {"signature": "@staticmethoddef __get_ssh_keys_id_or_fingerprint(ssh_keys, token, name):", "body": "ssh_keys_id = list()for ssh_key in ssh_keys:if type(ssh_key) in [int, type( ** )]:ssh_keys_id.append(int(ssh_key))elif type(ssh_key) == SSHKey:ssh_keys_id.append(ssh_key.id)elif type(ssh_key) in [type(u''), type('')]:regexp_of_fingerprint = ''match = re.match(regexp_of_fingerprint, ssh_key)if match is not None and match.end() == len(ssh_key) - :ssh_keys_id.append(ssh_key)else:key = SSHKey()key.token = tokenresults = key.load_by_pub_key(ssh_key)if results is None:key.public_key = ssh_keykey.name = \"\" % namekey.create()else:key = resultsssh_keys_id.append(key.id)else:raise BadSSHKeyFormat(\"\"+ \"\")return ssh_keys_id", "docstring": "Check and return a list of SSH key IDs or fingerprints according\nto DigitalOcean's API. This method is used to check and create a\ndroplet with the correct SSH keys.", "id": "f1466:c3:m24"} {"signature": "def create(self, *args, **kwargs):", "body": "for attr in kwargs.keys():setattr(self, attr, kwargs[attr])if not self.size_slug and self.size:self.size_slug = self.sizessh_keys_id = Droplet.__get_ssh_keys_id_or_fingerprint(self.ssh_keys,self.token,self.name)data = {\"\": self.name,\"\": self.size_slug,\"\": self.image,\"\": self.region,\"\": ssh_keys_id,\"\": bool(self.backups),\"\": bool(self.ipv6),\"\": bool(self.private_networking),\"\": self.volumes,\"\": self.tags,\"\": bool(self.monitoring),}if self.user_data:data[\"\"] = self.user_datadata = self.get_data(\"\", type=POST, params=data)if data:self.id = data['']['']action_id = data[''][''][]['']self.action_ids = []self.action_ids.append(action_id)", "docstring": "Create the droplet with object properties.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.", "id": "f1466:c3:m25"} {"signature": "def get_events(self):", "body": "return self.get_actions()", "docstring": "A helper function for backwards compatibility.\nCalls get_actions()", "id": "f1466:c3:m26"} {"signature": "def get_actions(self):", "body": "answer = self.get_data(\"\" % self.id, type=GET)actions = []for action_dict in answer['']:action = Action(**action_dict)action.token = self.tokenaction.droplet_id = self.idaction.load()actions.append(action)return actions", "docstring": "Returns a list of Action objects\nThis actions can be used to check the droplet's status", "id": "f1466:c3:m27"} {"signature": "def get_action(self, action_id):", "body": "return Action.get_object(api_token=self.token,action_id=action_id)", "docstring": "Returns a specific Action by its ID.\n\n Args:\n action_id (int): id of action", "id": "f1466:c3:m28"} {"signature": "def get_snapshots(self):", "body": "snapshots = list()for id in self.snapshot_ids:snapshot = Image()snapshot.id = idsnapshot.token = self.tokensnapshots.append(snapshot)return snapshots", "docstring": "This method will return the snapshots/images connected to that\nspecific droplet.", "id": "f1466:c3:m29"} {"signature": "def get_kernel_available(self):", "body": "kernels = list()data = self.get_data(\"\" % self.id)while True:for jsond in data[u'']:kernel = Kernel(**jsond)kernel.token = self.tokenkernels.append(kernel)try:url = data[u''][u''].get(u'')if not url:breakdata = self.get_data(url)except KeyError: breakreturn kernels", "docstring": "Get a list of kernels available", "id": "f1466:c3:m30"} {"signature": "@classmethoddef get_object(cls, api_token, firewall_id):", "body": "firewall = cls(token=api_token, id=firewall_id)firewall.load()return firewall", "docstring": "Class method that will return a Firewall object by ID.", "id": "f1468:c5:m1"} {"signature": "def add_droplets(self, droplet_ids):", "body": "return self.get_data(\"\" % self.id,type=POST,params={\"\": droplet_ids})", "docstring": "Add droplets to this Firewall.", "id": "f1468:c5:m5"} {"signature": "def remove_droplets(self, droplet_ids):", "body": "return self.get_data(\"\" % self.id,type=DELETE,params={\"\": droplet_ids})", "docstring": "Remove droplets from this Firewall.", "id": "f1468:c5:m6"} {"signature": "def add_tags(self, tags):", "body": "return self.get_data(\"\" % self.id,type=POST,params={\"\": tags})", "docstring": "Add tags to this Firewall.", "id": "f1468:c5:m7"} {"signature": "def remove_tags(self, tags):", "body": "return self.get_data(\"\" % self.id,type=DELETE,params={\"\": tags})", "docstring": "Remove tags from this Firewall.", "id": "f1468:c5:m8"} {"signature": "def destroy(self):", "body": "return self.get_data(\"\" % self.id, type=DELETE)", "docstring": "Destroy the Firewall", "id": "f1468:c5:m9"} {"signature": "@classmethoddef get_object(cls, api_token, ssh_key_id):", "body": "ssh_key = cls(token=api_token, id=ssh_key_id)ssh_key.load()return ssh_key", "docstring": "Class method that will return a SSHKey object by ID.", "id": "f1469:c0:m1"} {"signature": "def load(self):", "body": "identifier = Noneif self.id:identifier = self.idelif self.fingerprint is not None:identifier = self.fingerprintdata = self.get_data(\"\" % identifier, type=GET)ssh_key = data['']for attr in ssh_key.keys():setattr(self, attr, ssh_key[attr])self.id = ssh_key['']", "docstring": "Load the SSHKey object from DigitalOcean.\n\nRequires either self.id or self.fingerprint to be set.", "id": "f1469:c0:m2"} {"signature": "def load_by_pub_key(self, public_key):", "body": "data = self.get_data(\"\")for jsoned in data['']:if jsoned.get('', \"\") == public_key:self.id = jsoned['']self.load()return selfreturn None", "docstring": "This method will load a SSHKey object from DigitalOcean\nfrom a public_key. This method will avoid problems like\nuploading the same public_key twice.", "id": "f1469:c0:m3"} {"signature": "def create(self):", "body": "input_params = {\"\": self.name,\"\": self.public_key,}data = self.get_data(\"\", type=POST, params=input_params)if data:self.id = data['']['']", "docstring": "Create the SSH Key", "id": "f1469:c0:m4"} {"signature": "def edit(self):", "body": "input_params = {\"\": self.name,\"\": self.public_key,}data = self.get_data(\"\" % self.id,type=PUT,params=input_params)if data:self.id = data['']['']", "docstring": "Edit the SSH Key", "id": "f1469:c0:m5"} {"signature": "def destroy(self):", "body": "return self.get_data(\"\" % self.id, type=DELETE)", "docstring": "Destroy the SSH Key", "id": "f1469:c0:m6"} {"signature": "@classmethoddef get_object(cls, api_token, action_id):", "body": "action = cls(token=api_token, id=action_id)action.load_directly()return action", "docstring": "Class method that will return a Action object by ID.", "id": "f1470:c0:m1"} {"signature": "def wait(self, update_every_seconds=):", "body": "while self.status == u'':sleep(update_every_seconds)self.load()return self.status == u''", "docstring": "Wait until the action is marked as completed or with an error.\nIt will return True in case of success, otherwise False.\n\nOptional Args:\n update_every_seconds - int : number of seconds to wait before\n checking if the action is completed.", "id": "f1470:c0:m4"} {"signature": "def get_account(self):", "body": "return Account.get_object(api_token=self.token)", "docstring": "Returns an Account object.", "id": "f1473:c0:m1"} {"signature": "def get_all_regions(self):", "body": "data = self.get_data(\"\")regions = list()for jsoned in data['']:region = Region(**jsoned)region.token = self.tokenregions.append(region)return regions", "docstring": "This function returns a list of Region object.", "id": "f1473:c0:m2"} {"signature": "def get_all_droplets(self, tag_name=None):", "body": "params = dict()if tag_name:params[\"\"] = tag_namedata = self.get_data(\"\", params=params)droplets = list()for jsoned in data['']:droplet = Droplet(**jsoned)droplet.token = self.tokenfor net in droplet.networks['']:if net[''] == '':droplet.private_ip_address = net['']if net[''] == '':droplet.ip_address = net['']if droplet.networks['']:droplet.ip_v6_address = droplet.networks[''][]['']if \"\" in droplet.features:droplet.backups = Trueelse:droplet.backups = Falseif \"\" in droplet.features:droplet.ipv6 = Trueelse:droplet.ipv6 = Falseif \"\" in droplet.features:droplet.private_networking = Trueelse:droplet.private_networking = Falsedroplets.append(droplet)return droplets", "docstring": "This function returns a list of Droplet object.", "id": "f1473:c0:m3"} {"signature": "def get_droplet(self, droplet_id):", "body": "return Droplet.get_object(api_token=self.token, droplet_id=droplet_id)", "docstring": "Return a Droplet by its ID.", "id": "f1473:c0:m4"} {"signature": "def get_all_sizes(self):", "body": "data = self.get_data(\"\")sizes = list()for jsoned in data['']:size = Size(**jsoned)size.token = self.tokensizes.append(size)return sizes", "docstring": "This function returns a list of Size object.", "id": "f1473:c0:m5"} {"signature": "def get_images(self, private=False, type=None):", "body": "params = {}if private:params[''] = ''if type:params[''] = typedata = self.get_data(\"\", params=params)images = list()for jsoned in data['']:image = Image(**jsoned)image.token = self.tokenimages.append(image)return images", "docstring": "This function returns a list of Image object.", "id": "f1473:c0:m6"} {"signature": "def get_all_images(self):", "body": "images = self.get_images()return images", "docstring": "This function returns a list of Image objects containing all\navailable DigitalOcean images, both public and private.", "id": "f1473:c0:m7"} {"signature": "def get_image(self, image_id_or_slug):", "body": "return Image.get_object(api_token=self.token,image_id_or_slug=image_id_or_slug,)", "docstring": "Return a Image by its ID/Slug.", "id": "f1473:c0:m8"} {"signature": "def get_my_images(self):", "body": "images = self.get_images(private=True)return images", "docstring": "This function returns a list of Image objects representing\nprivate DigitalOcean images (e.g. snapshots and backups).", "id": "f1473:c0:m9"} {"signature": "def get_global_images(self):", "body": "data = self.get_images()images = list()for i in data:if i.public:i.token = self.tokenimages.append(i)return images", "docstring": "This function returns a list of Image objects representing\npublic DigitalOcean images (e.g. base distribution images\nand 'One-Click' applications).", "id": "f1473:c0:m10"} {"signature": "def get_distro_images(self):", "body": "images = self.get_images(type='')return images", "docstring": "This function returns a list of Image objects representing\npublic base distribution images.", "id": "f1473:c0:m11"} {"signature": "def get_app_images(self):", "body": "images = self.get_images(type='')return images", "docstring": "This function returns a list of Image objectobjects representing\npublic DigitalOcean 'One-Click' application images.", "id": "f1473:c0:m12"} {"signature": "def get_all_domains(self):", "body": "data = self.get_data(\"\")domains = list()for jsoned in data['']:domain = Domain(**jsoned)domain.token = self.tokendomains.append(domain)return domains", "docstring": "This function returns a list of Domain object.", "id": "f1473:c0:m13"} {"signature": "def get_domain(self, domain_name):", "body": "return Domain.get_object(api_token=self.token, domain_name=domain_name)", "docstring": "Return a Domain by its domain_name", "id": "f1473:c0:m14"} {"signature": "def get_all_sshkeys(self):", "body": "data = self.get_data(\"\")ssh_keys = list()for jsoned in data['']:ssh_key = SSHKey(**jsoned)ssh_key.token = self.tokenssh_keys.append(ssh_key)return ssh_keys", "docstring": "This function returns a list of SSHKey object.", "id": "f1473:c0:m15"} {"signature": "def get_ssh_key(self, ssh_key_id):", "body": "return SSHKey.get_object(api_token=self.token, ssh_key_id=ssh_key_id)", "docstring": "Return a SSHKey object by its ID.", "id": "f1473:c0:m16"} {"signature": "def get_all_tags(self):", "body": "data = self.get_data(\"\")return [Tag(token=self.token, **tag) for tag in data['']]", "docstring": "This method returns a list of all tags.", "id": "f1473:c0:m17"} {"signature": "def get_action(self, action_id):", "body": "return Action.get_object(api_token=self.token, action_id=action_id)", "docstring": "Return an Action object by a specific ID.", "id": "f1473:c0:m18"} {"signature": "def get_all_floating_ips(self):", "body": "data = self.get_data(\"\")floating_ips = list()for jsoned in data['']:floating_ip = FloatingIP(**jsoned)floating_ip.token = self.tokenfloating_ips.append(floating_ip)return floating_ips", "docstring": "This function returns a list of FloatingIP objects.", "id": "f1473:c0:m19"} {"signature": "def get_floating_ip(self, ip):", "body": "return FloatingIP.get_object(api_token=self.token, ip=ip)", "docstring": "Returns a of FloatingIP object by its IP address.", "id": "f1473:c0:m20"} {"signature": "def get_all_load_balancers(self):", "body": "data = self.get_data(\"\")load_balancers = list()for jsoned in data['']:load_balancer = LoadBalancer(**jsoned)load_balancer.token = self.tokenload_balancer.health_check = HealthCheck(**jsoned[''])load_balancer.sticky_sessions = StickySesions(**jsoned[''])forwarding_rules = list()for rule in jsoned['']:forwarding_rules.append(ForwardingRule(**rule))load_balancer.forwarding_rules = forwarding_rulesload_balancers.append(load_balancer)return load_balancers", "docstring": "Returns a list of Load Balancer objects.", "id": "f1473:c0:m21"} {"signature": "def get_load_balancer(self, id):", "body": "return LoadBalancer.get_object(api_token=self.token, id=id)", "docstring": "Returns a Load Balancer object by its ID.\n\nArgs:\n id (str): Load Balancer ID", "id": "f1473:c0:m22"} {"signature": "def get_certificate(self, id):", "body": "return Certificate.get_object(api_token=self.token, cert_id=id)", "docstring": "Returns a Certificate object by its ID.\n\nArgs:\n id (str): Certificate ID", "id": "f1473:c0:m23"} {"signature": "def get_all_certificates(self):", "body": "data = self.get_data(\"\")certificates = list()for jsoned in data['']:cert = Certificate(**jsoned)cert.token = self.tokencertificates.append(cert)return certificates", "docstring": "This function returns a list of Certificate objects.", "id": "f1473:c0:m24"} {"signature": "def get_snapshot(self, snapshot_id):", "body": "return Snapshot.get_object(api_token=self.token, snapshot_id=snapshot_id)", "docstring": "Return a Snapshot by its ID.", "id": "f1473:c0:m25"} {"signature": "def get_all_snapshots(self):", "body": "data = self.get_data(\"\")return [Snapshot(token=self.token, **snapshot)for snapshot in data['']]", "docstring": "This method returns a list of all Snapshots.", "id": "f1473:c0:m26"} {"signature": "def get_droplet_snapshots(self):", "body": "data = self.get_data(\"\")return [Snapshot(token=self.token, **snapshot)for snapshot in data['']]", "docstring": "This method returns a list of all Snapshots based on Droplets.", "id": "f1473:c0:m27"} {"signature": "def get_volume_snapshots(self):", "body": "data = self.get_data(\"\")return [Snapshot(token=self.token, **snapshot)for snapshot in data['']]", "docstring": "This method returns a list of all Snapshots based on volumes.", "id": "f1473:c0:m28"} {"signature": "def get_all_volumes(self, region=None):", "body": "if region:url = \"\".format(region)else:url = \"\"data = self.get_data(url)volumes = list()for jsoned in data['']:volume = Volume(**jsoned)volume.token = self.tokenvolumes.append(volume)return volumes", "docstring": "This function returns a list of Volume objects.", "id": "f1473:c0:m29"} {"signature": "def get_volume(self, volume_id):", "body": "return Volume.get_object(api_token=self.token, volume_id=volume_id)", "docstring": "Returns a Volume object by its ID.", "id": "f1473:c0:m30"} {"signature": "def get_all_firewalls(self):", "body": "data = self.get_data(\"\")firewalls = list()for jsoned in data['']:firewall = Firewall(**jsoned)firewall.token = self.tokenin_rules = list()for rule in jsoned['']:in_rules.append(InboundRule(**rule))firewall.inbound_rules = in_rulesout_rules = list()for rule in jsoned['']:out_rules.append(OutboundRule(**rule))firewall.outbound_rules = out_rulesfirewalls.append(firewall)return firewalls", "docstring": "This function returns a list of Firewall objects.", "id": "f1473:c0:m31"} {"signature": "def get_firewall(self, firewall_id):", "body": "return Firewall.get_object(api_token=self.token,firewall_id=firewall_id,)", "docstring": "Return a Firewall by its ID.", "id": "f1473:c0:m32"} {"signature": "@classmethoddef get_object(cls, api_token, snapshot_id):", "body": "snapshot = cls(token=api_token, id=snapshot_id)snapshot.load()return snapshot", "docstring": "Class method that will return a Snapshot object by ID.", "id": "f1474:c0:m1"} {"signature": "def destroy(self):", "body": "return self.get_data(\"\" % self.id, type=DELETE)", "docstring": "Destroy the image", "id": "f1474:c0:m3"} {"signature": "def __perform_request(self, url, type=GET, params=None):", "body": "if params is None:params = {}if not self.token:raise TokenError(\"\")url = urlparse.urljoin(self.end_point, url)identity = lambda x: xjson_dumps = lambda x: json.dumps(x)lookup = {GET: (self._session.get, {}, '', identity),POST: (self._session.post, {'': ''}, '',json_dumps),PUT: (self._session.put, {'': ''}, '',json_dumps),DELETE: (self._session.delete,{'': ''},'', json_dumps),}requests_method, headers, payload, transform = lookup[type]agent = \"\".format('',__version__,requests.__name__,requests.__version__)headers.update({'': '' + self.token,'': agent})kwargs = {'': headers, payload: transform(params)}timeout = self.get_timeout()if timeout:kwargs[''] = timeoutheaders_str = str(headers).replace(self.token.strip(), '')self._log.debug('' %(type, url, payload, params, headers_str, timeout))return requests_method(url, **kwargs)", "docstring": "This method will perform the real request,\nin this way we can customize only the \"output\" of the API call by\nusing self.__call_api method.\nThis method will return the request object.", "id": "f1475:c5:m3"} {"signature": "def __deal_with_pagination(self, url, method, params, data):", "body": "all_data = datawhile data.get(\"\", {}).get(\"\", {}).get(\"\"):url, query = data[\"\"][\"\"][\"\"].split(\"\", )for key, value in urlparse.parse_qs(query).items():params[key] = valuedata = self.__perform_request(url, method, params).json()for key, value in data.items():if isinstance(value, list) and key in all_data:all_data[key] += valueelse:all_data[key] = valuereturn all_data", "docstring": "Perform multiple calls in order to have a full list of elements\nwhen the API are \"paginated\". (content list is divided in more\nthan one page)", "id": "f1475:c5:m4"} {"signature": "def get_timeout(self):", "body": "timeout_str = os.environ.get(REQUEST_TIMEOUT_ENV_VAR)if timeout_str:try:return float(timeout_str)except:self._log.error('''' %timeout_str)return None", "docstring": "Checks if any timeout for the requests to DigitalOcean is required.\nTo set a timeout, use the REQUEST_TIMEOUT_ENV_VAR environment\nvariable.", "id": "f1475:c5:m6"} {"signature": "def get_data(self, url, type=GET, params=None):", "body": "if params is None:params = dict()if type is GET:params.setdefault(\"\", )req = self.__perform_request(url, type, params)if req.status_code == :return Trueif req.status_code == :raise NotFoundError()try:data = req.json()except ValueError as e:raise JSONReadError('' % str(e))if not req.ok:msg = [data[m] for m in (\"\", \"\") if m in data][]raise DataReadError(msg)self.__init_ratelimit(req.headers)pages = data.get(\"\", {}).get(\"\", {})if pages.get(\"\") and \"\" not in params:return self.__deal_with_pagination(url, type, params, data)else:return data", "docstring": "This method is a basic implementation of __call_api that checks\nerrors too. In case of success the method will return True or the\ncontent of the response to the request.\n\nPagination is automatically detected and handled accordingly", "id": "f1475:c5:m7"} {"signature": "@classmethoddef get_object(cls, api_token, image_id_or_slug):", "body": "if cls._is_string(image_id_or_slug):image = cls(token=api_token, slug=image_id_or_slug)image.load(use_slug=True)else:image = cls(token=api_token, id=image_id_or_slug)image.load()return image", "docstring": "Class method that will return an Image object by ID or slug.\n\nThis method is used to validate the type of the image. If it is a\nnumber, it will be considered as an Image ID, instead if it is a\nstring, it will considered as slug.", "id": "f1476:c0:m1"} {"signature": "@staticmethoddef _is_string(value):", "body": "if type(value) in [type(u''), type('')]:return Trueelif type(value) in [int, type( ** )]:return Falseelse:return None", "docstring": "Checks if the value provided is a string (True) or not integer\n(False) or something else (None).", "id": "f1476:c0:m2"} {"signature": "def create(self):", "body": "params = {'': self.name,'': self.region,'': self.url,'': self.distribution,'': self.description,'': self.tags}data = self.get_data('', type=POST, params=params)if data:for attr in data[''].keys():setattr(self, attr, data[''][attr])return self", "docstring": "Creates a new custom DigitalOcean Image from the Linux virtual machine\nimage located at the provided `url`.", "id": "f1476:c0:m3"} {"signature": "def load(self, use_slug=False):", "body": "identifier = Noneif use_slug or not self.id:identifier = self.slugelse:identifier = self.idif not identifier:raise NotFoundError(\"\")data = self.get_data(\"\" % identifier)image_dict = data['']for attr in image_dict.keys():setattr(self, attr, image_dict[attr])return self", "docstring": "Load slug.\n\nLoads by id, or by slug if id is not present or use slug is True.", "id": "f1476:c0:m4"} {"signature": "def destroy(self):", "body": "return self.get_data(\"\" % self.id, type=DELETE)", "docstring": "Destroy the image", "id": "f1476:c0:m5"} {"signature": "def transfer(self, new_region_slug):", "body": "return self.get_data(\"\" % self.id,type=POST,params={\"\": \"\", \"\": new_region_slug})", "docstring": "Transfer the image", "id": "f1476:c0:m6"} {"signature": "def rename(self, new_name):", "body": "return self.get_data(\"\" % self.id,type=PUT,params={\"\": new_name})", "docstring": "Rename an image", "id": "f1476:c0:m7"} {"signature": "def get_data(self, url, headers=dict(), params=dict(), render_json=True):", "body": "url = urljoin(self.end_point, url)response = requests.get(url, headers=headers, params=params,timeout=self.get_timeout())if render_json:return response.json()return response.content", "docstring": "Customized version of get_data to directly get the data without\nusing the authentication method.", "id": "f1477:c0:m1"} {"signature": "@classmethoddef get_object(cls, api_token, volume_id):", "body": "volume = cls(token=api_token, id=volume_id)volume.load()return volume", "docstring": "Class method that will return an Volume object by ID.", "id": "f1478:c0:m1"} {"signature": "def create(self, *args, **kwargs):", "body": "data = self.get_data('',type=POST,params={'': self.name,'': self.region,'': self.size_gigabytes,'': self.description,'': self.filesystem_type,'': self.filesystem_label})if data:self.id = data['']['']self.created_at = data['']['']return self", "docstring": "Creates a Block Storage volume\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\n name: string - a name for the volume\n region: string - slug identifier for the region\n size_gigabytes: int - size of the Block Storage volume in GiB\n filesystem_type: string, optional - name of the filesystem type the\n volume will be formated with ('ext4' or 'xfs')\n filesystem_label: string, optional - the label to be applied to the\n filesystem, only used in conjunction with filesystem_type\n\nOptional Args:\n description: string - text field to describe a volume", "id": "f1478:c0:m3"} {"signature": "def create_from_snapshot(self, *args, **kwargs):", "body": "data = self.get_data('',type=POST,params={'': self.name,'': self.snapshot_id,'': self.region,'': self.size_gigabytes,'': self.description,'': self.filesystem_type,'': self.filesystem_label})if data:self.id = data['']['']self.created_at = data['']['']return self", "docstring": "Creates a Block Storage volume\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\n name: string - a name for the volume\n snapshot_id: string - unique identifier for the volume snapshot\n size_gigabytes: int - size of the Block Storage volume in GiB\n filesystem_type: string, optional - name of the filesystem type the\n volume will be formated with ('ext4' or 'xfs')\n filesystem_label: string, optional - the label to be applied to the\n filesystem, only used in conjunction with filesystem_type\n\nOptional Args:\n description: string - text field to describe a volume", "id": "f1478:c0:m4"} {"signature": "def destroy(self):", "body": "return self.get_data(\"\" % self.id, type=DELETE)", "docstring": "Destroy a volume", "id": "f1478:c0:m5"} {"signature": "def attach(self, droplet_id, region):", "body": "return self.get_data(\"\" % self.id,type=POST,params={\"\": \"\",\"\": droplet_id,\"\": region})", "docstring": "Attach a Volume to a Droplet.\n\nArgs:\n droplet_id: int - droplet id\n region: string - slug identifier for the region", "id": "f1478:c0:m6"} {"signature": "def detach(self, droplet_id, region):", "body": "return self.get_data(\"\" % self.id,type=POST,params={\"\": \"\",\"\": droplet_id,\"\": region})", "docstring": "Detach a Volume to a Droplet.\n\nArgs:\n droplet_id: int - droplet id\n region: string - slug identifier for the region", "id": "f1478:c0:m7"} {"signature": "def resize(self, size_gigabytes, region):", "body": "return self.get_data(\"\" % self.id,type=POST,params={\"\": \"\",\"\": size_gigabytes,\"\": region})", "docstring": "Detach a Volume to a Droplet.\n\nArgs:\n size_gigabytes: int - size of the Block Storage volume in GiB\n region: string - slug identifier for the region", "id": "f1478:c0:m8"} {"signature": "def snapshot(self, name):", "body": "return self.get_data(\"\" % self.id,type=POST,params={\"\": name})", "docstring": "Create a snapshot of the volume.\n\nArgs:\n name: string - a human-readable name for the snapshot", "id": "f1478:c0:m9"} {"signature": "def get_snapshots(self):", "body": "data = self.get_data(\"\" % self.id)snapshots = list()for jsond in data[u'']:snapshot = Snapshot(**jsond)snapshot.token = self.tokensnapshots.append(snapshot)return snapshots", "docstring": "Retrieve the list of snapshots that have been created from a volume.\n\nArgs:", "id": "f1478:c0:m10"} {"signature": "@classmethoddef get_object(cls, api_token, domain, record_id):", "body": "record = cls(token=api_token, domain=domain, id=record_id)record.load()return record", "docstring": "Class method that will return a Record object by ID and the domain.", "id": "f1479:c0:m1"} {"signature": "def create(self):", "body": "input_params = {\"\": self.type,\"\": self.data,\"\": self.name,\"\": self.priority,\"\": self.port,\"\": self.ttl,\"\": self.weight,\"\": self.flags,\"\": self.tags}data = self.get_data(\"\" % (self.domain),type=POST,params=input_params,)if data:self.id = data['']['']", "docstring": "Creates a new record for a domain.\n\nArgs:\n type (str): The type of the DNS record (e.g. A, CNAME, TXT).\n name (str): The host name, alias, or service being defined by the\n record.\n data (int): Variable data depending on record type.\n priority (int): The priority for SRV and MX records.\n port (int): The port for SRV records.\n ttl (int): The time to live for the record, in seconds.\n weight (int): The weight for SRV records.\n flags (int): An unsigned integer between 0-255 used for CAA records.\n tags (string): The parameter tag for CAA records. Valid values are\n \"issue\", \"wildissue\", or \"iodef\"", "id": "f1479:c0:m2"} {"signature": "def destroy(self):", "body": "return self.get_data(\"\" % (self.domain, self.id),type=DELETE,)", "docstring": "Destroy the record", "id": "f1479:c0:m3"} {"signature": "def save(self):", "body": "data = {\"\": self.type,\"\": self.data,\"\": self.name,\"\": self.priority,\"\": self.port,\"\": self.ttl,\"\": self.weight,\"\": self.flags,\"\": self.tags}return self.get_data(\"\" % (self.domain, self.id),type=PUT,params=data)", "docstring": "Save existing record", "id": "f1479:c0:m4"} {"signature": "@classmethoddef get_object(cls, api_token):", "body": "acct = cls(token=api_token)acct.load()return acct", "docstring": "Class method that will return an Account object.", "id": "f1480:c0:m1"} {"signature": "def load(self):", "body": "tags = self.get_data(\"\" % self.name)tag = tags['']for attr in tag.keys():setattr(self, attr, tag[attr])return self", "docstring": "Fetch data about tag", "id": "f1481:c0:m2"} {"signature": "def create(self, **kwargs):", "body": "for attr in kwargs.keys():setattr(self, attr, kwargs[attr])params = {\"\": self.name}output = self.get_data(\"\", type=\"\", params=params)if output:self.name = output['']['']self.resources = output['']['']", "docstring": "Create the tag.", "id": "f1481:c0:m3"} {"signature": "def __get_resources(self, resources, method):", "body": "tagged = self.get_data('' % self.name, params={\"\": resources},type=method,)return tagged", "docstring": "Method used to talk directly to the API (TAGs' Resources)", "id": "f1481:c0:m5"} {"signature": "def __add_resources(self, resources):", "body": "return self.__get_resources(resources, method='')", "docstring": "Add to the resources to this tag.\n\nAttributes accepted at creation time:\n resources: array - See API.", "id": "f1481:c0:m6"} {"signature": "def __remove_resources(self, resources):", "body": "return self.__get_resources(resources, method='')", "docstring": "Remove resources from this tag.\n\nAttributes accepted at creation time:\n resources: array - See API.", "id": "f1481:c0:m7"} {"signature": "def __extract_resources_from_droplets(self, data):", "body": "resources = []if not isinstance(data, list): return datafor a_droplet in data:res = {}try:if isinstance(a_droplet, unicode):res = {\"\": a_droplet, \"\": \"\"}except NameError:passif isinstance(a_droplet, str) or isinstance(a_droplet, int):res = {\"\": str(a_droplet), \"\": \"\"}elif isinstance(a_droplet, Droplet):res = {\"\": str(a_droplet.id), \"\": \"\"}if len(res) > :resources.append(res)return resources", "docstring": "Private method to extract from a value, the resources.\nIt will check the type of object in the array provided and build\nthe right structure for the API.", "id": "f1481:c0:m8"} {"signature": "def add_droplets(self, droplet):", "body": "droplets = dropletif not isinstance(droplets, list):droplets = [droplet]resources = self.__extract_resources_from_droplets(droplets)if len(resources) > :return self.__add_resources(resources)return False", "docstring": "Add the Tag to a Droplet.\n\nAttributes accepted at creation time:\n droplet: array of string or array of int, or array of Droplets.", "id": "f1481:c0:m9"} {"signature": "def remove_droplets(self, droplet):", "body": "droplets = dropletif not isinstance(droplets, list):droplets = [droplet]resources = self.__extract_resources_from_droplets(droplets)if len(resources) > :return self.__remove_resources(resources)return False", "docstring": "Remove the Tag from the Droplet.\n\nAttributes accepted at creation time:\n droplet: array of string or array of int, or array of Droplets.", "id": "f1481:c0:m10"} {"signature": "@classmethoddef get_object(cls, api_token, id):", "body": "load_balancer = cls(token=api_token, id=id)load_balancer.load()return load_balancer", "docstring": "Class method that will return a LoadBalancer object by its ID.\n\nArgs:\n api_token (str): DigitalOcean API token\n id (str): Load Balancer ID", "id": "f1482:c3:m1"} {"signature": "def load(self):", "body": "data = self.get_data('' % self.id, type=GET)load_balancer = data['']for attr in load_balancer.keys():if attr == '':health_check = HealthCheck(**load_balancer[''])setattr(self, attr, health_check)elif attr == '':sticky_ses = StickySesions(**load_balancer[''])setattr(self, attr, sticky_ses)elif attr == '':rules = list()for rule in load_balancer['']:rules.append(ForwardingRule(**rule))setattr(self, attr, rules)else:setattr(self, attr, load_balancer[attr])return self", "docstring": "Loads updated attributues for a LoadBalancer object.\n\nRequires self.id to be set.", "id": "f1482:c3:m2"} {"signature": "def create(self, *args, **kwargs):", "body": "rules_dict = [rule.__dict__ for rule in self.forwarding_rules]params = {'': self.name, '': self.region,'': rules_dict,'': self.redirect_http_to_https}if self.droplet_ids and self.tag:raise ValueError('')elif self.tag:params[''] = self.tagelse:params[''] = self.droplet_idsif self.algorithm:params[''] = self.algorithmif self.health_check:params[''] = self.health_check.__dict__if self.sticky_sessions:params[''] = self.sticky_sessions.__dict__data = self.get_data('', type=POST, params=params)if data:self.id = data['']['']self.ip = data['']['']self.algorithm = data['']['']self.health_check = HealthCheck(**data[''][''])self.sticky_sessions = StickySesions(**data[''][''])self.droplet_ids = data['']['']self.status = data['']['']self.created_at = data['']['']return self", "docstring": "Creates a new LoadBalancer.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\n name (str): The Load Balancer's name\n region (str): The slug identifier for a DigitalOcean region\n algorithm (str, optional): The load balancing algorithm to be\n used. Currently, it must be either \"round_robin\" or\n \"least_connections\"\n forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects\n health_check (obj, optional): A `HealthCheck` object\n sticky_sessions (obj, optional): A `StickySessions` object\n redirect_http_to_https (bool, optional): A boolean indicating\n whether HTTP requests to the Load Balancer should be\n redirected to HTTPS\n droplet_ids (obj:`list` of `int`): A list of IDs representing\n Droplets to be added to the Load Balancer (mutually\n exclusive with 'tag')\n tag (str): A string representing a DigitalOcean Droplet tag\n (mutually exclusive with 'droplet_ids')", "id": "f1482:c3:m3"} {"signature": "def save(self):", "body": "forwarding_rules = [rule.__dict__ for rule in self.forwarding_rules]data = {'': self.name,'': self.region[''],'': forwarding_rules,'': self.redirect_http_to_https}if self.tag:data[''] = self.tagelse:data[''] = self.droplet_idsif self.algorithm:data[\"\"] = self.algorithmif self.health_check:data[''] = self.health_check.__dict__if self.sticky_sessions:data[''] = self.sticky_sessions.__dict__return self.get_data(\"\" % self.id,type=PUT,params=data)", "docstring": "Save the LoadBalancer", "id": "f1482:c3:m4"} {"signature": "def destroy(self):", "body": "return self.get_data('' % self.id, type=DELETE)", "docstring": "Destroy the LoadBalancer", "id": "f1482:c3:m5"} {"signature": "def add_droplets(self, droplet_ids):", "body": "return self.get_data(\"\" % self.id,type=POST,params={\"\": droplet_ids})", "docstring": "Assign a LoadBalancer to a Droplet.\n\nArgs:\n droplet_ids (obj:`list` of `int`): A list of Droplet IDs", "id": "f1482:c3:m6"} {"signature": "def remove_droplets(self, droplet_ids):", "body": "return self.get_data(\"\" % self.id,type=DELETE,params={\"\": droplet_ids})", "docstring": "Unassign a LoadBalancer.\n\nArgs:\n droplet_ids (obj:`list` of `int`): A list of Droplet IDs", "id": "f1482:c3:m7"} {"signature": "def add_forwarding_rules(self, forwarding_rules):", "body": "rules_dict = [rule.__dict__ for rule in forwarding_rules]return self.get_data(\"\" % self.id,type=POST,params={\"\": rules_dict})", "docstring": "Adds new forwarding rules to a LoadBalancer.\n\nArgs:\n forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects", "id": "f1482:c3:m8"} {"signature": "def remove_forwarding_rules(self, forwarding_rules):", "body": "rules_dict = [rule.__dict__ for rule in forwarding_rules]return self.get_data(\"\" % self.id,type=DELETE,params={\"\": rules_dict})", "docstring": "Removes existing forwarding rules from a LoadBalancer.\n\nArgs:\n forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects", "id": "f1482:c3:m9"} {"signature": "@click.command()@click.option('','',type=Directory(),default='',help='',show_default=True,)@click.option('','',type=Directory(exists=False, writable=True),default='',help='',show_default=True,)@click.option('','',is_flag=True,help=\"\",)@click.option('','',is_flag=True,help=\"\",)@click.option('','',is_flag=True,help=\"\",)@click.option('',is_flag=True,help=\"\",)@click.option('',is_flag=True,help=\"\",)@click.argument('',nargs=-,type=Directory(file_okay=True),)@click.version_option(version=__version__)def main(src, pyi_dir, target_dir, incremental, quiet, replace_any, hg, traceback):", "body": "Config.incremental = incrementalConfig.replace_any = replace_anyreturncode = for src_entry in src:for file, error, exc_type, tb in retype_path(Path(src_entry),pyi_dir=Path(pyi_dir),targets=Path(target_dir),src_explicitly_given=True,quiet=quiet,hg=hg,):print(f'', file=sys.stderr)if traceback:print('', file=sys.stderr)for line in tb:print(line, file=sys.stderr, end='')print(f'', file=sys.stderr)returncode += if not src and not quiet:print('', file=sys.stderr)sys.exit(min(returncode, ))", "docstring": "Re-apply type annotations from .pyi stubs to your codebase.", "id": "f1486:m0"} {"signature": "def retype_path(src, pyi_dir, targets, *, src_explicitly_given=False, quiet=False, hg=False):", "body": "if src.is_dir():for child in src.iterdir():if child == pyi_dir or child == targets:continueyield from retype_path(child, pyi_dir / src.name, targets / src.name, quiet=quiet, hg=hg,)elif src.suffix == '' or src_explicitly_given:try:retype_file(src, pyi_dir, targets, quiet=quiet, hg=hg)except Exception as e:yield (src,str(e),type(e),traceback.format_tb(e.__traceback__),)", "docstring": "Recursively retype files or directories given. Generate errors.", "id": "f1486:m1"} {"signature": "def retype_file(src, pyi_dir, targets, *, quiet=False, hg=False):", "body": "with tokenize.open(src) as src_buffer:src_encoding = src_buffer.encodingsrc_node = lib2to3_parse(src_buffer.read())try:with open((pyi_dir / src.name).with_suffix('')) as pyi_file:pyi_txt = pyi_file.read()except FileNotFoundError:if not quiet:print(f'',file=sys.stderr,)else:pyi_ast = ast3.parse(pyi_txt)assert isinstance(pyi_ast, ast3.Module)reapply_all(pyi_ast.body, src_node)fix_remaining_type_comments(src_node)targets.mkdir(parents=True, exist_ok=True)with open(targets / src.name, '', encoding=src_encoding) as target_file:target_file.write(lib2to3_unparse(src_node, hg=hg))return targets / src.name", "docstring": "Retype `src`, finding types in `pyi_dir`. Save in `targets`.\n\n The file should remain formatted exactly as it was before, save for:\n - annotations\n - additional imports needed to satisfy annotations\n - additional module-level names needed to satisfy annotations\n\n Type comments in sources are normalized to type annotations.", "id": "f1486:m2"} {"signature": "def lib2to3_parse(src_txt):", "body": "grammar = pygram.python_grammar_no_print_statementdrv = driver.Driver(grammar, pytree.convert)if src_txt[-] != '':nl = '' if '' in src_txt[:] else ''src_txt += nltry:result = drv.parse_string(src_txt, True)except ParseError as pe:lineno, column = pe.context[]lines = src_txt.splitlines()try:faulty_line = lines[lineno - ]except IndexError:faulty_line = \"\"raise ValueError(f\"\") from Noneif isinstance(result, Leaf):result = Node(syms.file_input, [result])return result", "docstring": "Given a string with source, return the lib2to3 Node.", "id": "f1486:m3"} {"signature": "def lib2to3_unparse(node, *, hg=False):", "body": "code = str(node)if hg:from retype_hgext import apply_job_securitycode = apply_job_security(code)return code", "docstring": "Given a lib2to3 node, return its string representation.", "id": "f1486:m4"} {"signature": "def reapply_all(ast_node, lib2to3_node):", "body": "late_processing = reapply(ast_node, lib2to3_node)for lazy_func in reversed(late_processing):lazy_func()", "docstring": "Reapplies the typed_ast node into the lib2to3 tree.\n\n Also does post-processing. This is done in reverse order to enable placing\n TypeVars and aliases that depend on one another.", "id": "f1486:m5"} {"signature": "@singledispatchdef reapply(ast_node, lib2to3_node):", "body": "return []", "docstring": "Reapplies the typed_ast node into the lib2to3 tree.\n\n By default does nothing.", "id": "f1486:m6"} {"signature": "@singledispatchdef serialize_attribute(attr):", "body": "return \"\"", "docstring": "serialize_attribute(Attribute()) -> \"self.f1.f2.f3\"\n\n Change an AST object into its string representation.", "id": "f1486:m14"} {"signature": "@singledispatchdef convert_annotation(ann):", "body": "raise NotImplementedError(f\"\")", "docstring": "Converts an AST object into its lib2to3 equivalent.", "id": "f1486:m18"} {"signature": "@singledispatchdef names_already_imported(names, node):", "body": "return False", "docstring": "Returns True if `node` represents `names`.", "id": "f1486:m30"} {"signature": "def fix_remaining_type_comments(node):", "body": "assert node.type == syms.file_inputlast_n = Nonefor n in node.post_order():if last_n is not None:if n.type == token.NEWLINE and is_assignment(last_n):fix_variable_annotation_type_comment(n, last_n)elif n.type == syms.funcdef and last_n.type == syms.suite:fix_signature_annotation_type_comment(n, last_n, offset=)elif n.type == syms.async_funcdef and last_n.type == syms.suite:fix_signature_annotation_type_comment(n, last_n, offset=)last_n = n", "docstring": "Converts type comments in `node` to proper annotated assignments.", "id": "f1486:m39"} {"signature": "def get_function_signature(fun, *, is_method=False):", "body": "args = fun.argsreturns = fun.returnsif fun.type_comment:try:args_tc, returns_tc = parse_signature_type_comment(fun.type_comment)if returns and returns_tc:raise ValueError(\"\")returns = returns_tccopy_arguments_to_annotations(args, args_tc, is_method=is_method)except (SyntaxError, ValueError) as exc:raise ValueError(f\"\" +f\"\")copy_type_comments_to_annotations(args)return args, returns", "docstring": "Returns (args, returns).\n\n `args` is ast3.arguments, `returns` is the return type AST node. The kicker\n about this function is that it pushes type comments into proper annotation\n fields, standardizing type handling.", "id": "f1486:m48"} {"signature": "def parse_signature_type_comment(type_comment):", "body": "try:result = ast3.parse(type_comment, '', '')except SyntaxError:raise ValueError(f\"\")assert isinstance(result, ast3.FunctionType)if len(result.argtypes) == :argtypes = result.argtypes[]else:argtypes = result.argtypesreturn argtypes, result.returns", "docstring": "Parse the fugly signature type comment into AST nodes.\n\n Caveats: ASTifying **kwargs is impossible with the current grammar so we\n hack it into unary subtraction (to differentiate from Starred in vararg).\n\n For example from:\n \"(str, int, *int, **Any) -> 'SomeReturnType'\"\n\n To:\n ([ast3.Name, ast.Name, ast3.Name, ast.Name], ast3.Str)", "id": "f1486:m49"} {"signature": "def parse_type_comment(type_comment):", "body": "try:result = ast3.parse(type_comment, '', '')except SyntaxError:raise ValueError(f\"\") from Noneassert isinstance(result, ast3.Expression)return result.body", "docstring": "Parse a type comment string into AST nodes.", "id": "f1486:m50"} {"signature": "def parse_arguments(arguments):", "body": "arguments = f\"\"try:result = ast3.parse(arguments, '', '')except SyntaxError:raise ValueError(f\"\") from Noneassert isinstance(result, ast3.Module)assert len(result.body) == assert isinstance(result.body[], ast3.FunctionDef)args = result.body[].argscopy_type_comments_to_annotations(args)return args", "docstring": "parse_arguments('(a, b, *, c=False, **d)') -> ast3.arguments\n\n Parse a string with function arguments into an AST node.", "id": "f1486:m51"} {"signature": "def copy_arguments_to_annotations(args, type_comment, *, is_method=False):", "body": "if isinstance(type_comment, ast3.Ellipsis):returnexpected = len(args.args)if args.vararg:expected += expected += len(args.kwonlyargs)if args.kwarg:expected += actual = len(type_comment) if isinstance(type_comment, list) else if expected != actual:if is_method and expected - actual == :pass else:raise ValueError(f\"\" +f\"\")if isinstance(type_comment, list):next_value = type_comment.popelse:_tc = type_commentdef next_value(index: int = ) -> ast3.expr:return _tcfor arg in args.args[expected - actual:]:ensure_no_annotation(arg.annotation)arg.annotation = next_value()if args.vararg:ensure_no_annotation(args.vararg.annotation)args.vararg.annotation = next_value()for arg in args.kwonlyargs:ensure_no_annotation(arg.annotation)arg.annotation = next_value()if args.kwarg:ensure_no_annotation(args.kwarg.annotation)args.kwarg.annotation = next_value()", "docstring": "Copies AST nodes from `type_comment` into the ast3.arguments in `args`.\n\n Does validaation of argument count (allowing for untyped self/cls)\n and type (vararg and kwarg).", "id": "f1486:m52"} {"signature": "def copy_type_comments_to_annotations(args):", "body": "for arg in args.args:copy_type_comment_to_annotation(arg)if args.vararg:copy_type_comment_to_annotation(args.vararg)for arg in args.kwonlyargs:copy_type_comment_to_annotation(arg)if args.kwarg:copy_type_comment_to_annotation(args.kwarg)", "docstring": "Copies argument type comments from the legacy long form to annotations\n in the entire function signature.", "id": "f1486:m53"} {"signature": "def maybe_replace_any_if_equal(name, expected, actual):", "body": "is_equal = expected == actualif not is_equal and Config.replace_any:actual_str = minimize_whitespace(str(actual))if actual_str and actual_str[] in {'', \"\"}:actual_str = actual_str[:-]is_equal = actual_str in {'', '', ''}if not is_equal:expected_annotation = minimize_whitespace(str(expected))actual_annotation = minimize_whitespace(str(actual))raise ValueError(f\"\" +f\"\")return expected or actual", "docstring": "Return the type given in `expected`.\n\n Raise ValueError if `expected` isn't equal to `actual`. If --replace-any is\n used, the Any type in `actual` is considered equal.\n\n The implementation is naively checking if the string representation of\n `actual` is one of \"Any\", \"typing.Any\", or \"t.Any\". This is done for two\n reasons:\n\n 1. I'm lazy.\n 2. We want people to be able to explicitly state that they want Any without it\n being replaced. This way they can use an alias.", "id": "f1486:m55"} {"signature": "def ensure_annotations_equal(name, expected, actual):", "body": "maybe_replace_any_if_equal(name, expected, actual)", "docstring": "Raise ValueError if `expected` isn't equal to `actual`.\n\n If --replace-any is used, the Any type in `actual` is considered equal.", "id": "f1486:m57"} {"signature": "def remove_function_signature_type_comment(body):", "body": "for node in body.children:if node.type == token.INDENT:prefix = node.prefix.lstrip()if prefix.startswith(''):node.prefix = ''.join(prefix.split('')[:])break", "docstring": "Removes the legacy signature type comment, leaving other comments if any.", "id": "f1486:m58"} {"signature": "def flatten_some(children):", "body": "for node in children:if node.type in (syms.try_stmt, syms.suite):yield from flatten_some(node.children)else:yield node", "docstring": "Generates nodes or leaves, unpacking bodies of try:except:finally: statements.", "id": "f1486:m61"} {"signature": "def pop_param(params):", "body": "default = Nonename = params.pop()if name in (_star, _dstar):default = params.pop()if default == _comma:return name, defaulttry:remainder = params.pop()if remainder == _eq:default = params.pop()remainder = params.pop()if remainder != _comma:raise ValueError(f\"\")except IndexError:passreturn name, default", "docstring": "Pops the parameter and the \"remainder\" (comma, default value).\n\n Returns a tuple of ('name', default) or (_star, 'name') or (_dstar, 'name').", "id": "f1486:m62"} {"signature": "def get_offset_and_prefix(body, skip_assignments=False):", "body": "assert body.type in (syms.file_input, syms.suite)_offset = prefix = ''for _offset, child in enumerate(body.children):if child.type == syms.simple_stmt:stmt = child.children[]if stmt.type == syms.expr_stmt:expr = stmt.childrenif not skip_assignments:breakif (len(expr) != orexpr[].type != token.NAME orexpr[].type != syms.annassign or_eq in expr[].children):breakelif stmt.type not in (syms.import_name, syms.import_from, token.STRING):breakelif child.type == token.INDENT:assert isinstance(child, Leaf)prefix = child.valueelif child.type != token.NEWLINE:breakprefix, child.prefix = child.prefix, prefixreturn _offset, prefix", "docstring": "Returns the offset after which a statement can be inserted to the `body`.\n\n This offset is calculated to come after all imports, and maybe existing\n (possibly annotated) assignments if `skip_assignments` is True.\n\n Also returns the indentation prefix that should be applied to the inserted\n node.", "id": "f1486:m65"} {"signature": "@singledispatchdef name_used_in_node(node, name):", "body": "", "docstring": "Returns True if `name` appears in `node`. False otherwise.", "id": "f1486:m66"} {"signature": "def fix_line_numbers(body):", "body": "maxline = for node in body.pre_order():maxline += node.prefix.count('')if isinstance(node, Leaf):node.lineno = maxlinemaxline += str(node.value).count('')", "docstring": "r\"\"\"Recomputes all line numbers based on the number of \\n characters.", "id": "f1486:m69"} {"signature": "def new(n, prefix=None):", "body": "if isinstance(n, Leaf):return Leaf(n.type, n.value, prefix=n.prefix if prefix is None else prefix)n.parent = Noneif prefix is not None:n.prefix = prefixreturn n", "docstring": "lib2to3's AST requires unique objects as children.", "id": "f1486:m70"} {"signature": "def apply_job_security(code):", "body": "buf = io.BytesIO(code.encode(''))tokens = tokenize.tokenize(buf.readline)data = tokenize.untokenize(replacetokens(list(tokens), ''))return cast(str, data.decode(''))", "docstring": "Treat input `code` like Python 2 (implicit strings are byte literals).\n\n The implementation is horribly inefficient but the goal is to be compatible\n with what Mercurial does at runtime.", "id": "f1487:m0"} {"signature": "def create_index(file_path, index_path, index_ratio, index_width):", "body": "i = with file_path.open() as f:with index_path.open(\"\") as idx:idx.write(index_ratio.to_bytes(, byteorder=\"\"))idx.write(index_width.to_bytes(, byteorder=\"\"))idx.write(().to_bytes(, byteorder=\"\")) idx.write(().to_bytes(index_width, byteorder=\"\"))while f.readline():i += if (i % index_ratio) == :pointer = f.tell()b = pointer.to_bytes(index_width, byteorder=\"\")idx.write(b)idx.seek()idx.write(i.to_bytes(, byteorder=\"\"))t = file_path.stat().st_mtimeos.utime(str(index_path), (t, t))", "docstring": "Index format:\n 1st byte: index_ratio\n 2nd byte: index_width\n 3rd byte: line_count", "id": "f1492:m0"} {"signature": "def __init__(self, filepath, encoding=None, errors=None, newline=None, index_ratio=, index_width=):", "body": "self.filepath = Path(filepath)self.file = self.filepath.open(encoding=encoding, errors=errors, newline=newline)self.index = self.get_or_create_index(index_ratio, index_width)", "docstring": "Open a text file which has it's line numbers indexed.\n\n:param filepath:\n:param encoding:\n:param errors:\n:param newline:\n:param index_ratio: how close together should indexed lines be? (1=index all lines, 2=index every 2nd line, etc)\n:param index_width: how many bytes are needed to store the location of lines in the file", "id": "f1492:c0:m0"} {"signature": "def __getitem__(self, slice_obj):", "body": "start, stop, step = normalize_slice(slice_obj, self.index.line_count)if isinstance(slice_obj, slice):if step == :return self._get_lines(start, stop)return [self._get_lines(i)[] for i in range(start, stop, step)]if isinstance(slice_obj, int):return self._get_lines(start)[]return None", "docstring": "Supports slice operations on the file\n\nFor example:\n\n with IndexedOpen(filename) as f:\n print f[6:-2]", "id": "f1492:c0:m4"} {"signature": "def get_or_create_index(self, index_ratio, index_width):", "body": "if not self.index_path.exists() or not self.filepath.stat().st_mtime == self.index_path.stat().st_mtime:create_index(self.filepath, self.index_path, index_ratio=index_ratio, index_width=index_width)return IndexFile(str(self.index_path))", "docstring": "Return an open file-object to the index file", "id": "f1492:c0:m6"} {"signature": "@propertydef index_path(self):", "body": "return Path(str(self.filepath) + \"\")", "docstring": "the path to the index file", "id": "f1492:c0:m7"} {"signature": "def __init__(self, index_file_path):", "body": "self.index_path = Path(index_file_path)self.header_length = self.index_file = self._open_index()", "docstring": "Open a line-number index for a text file.", "id": "f1492:c1:m0"} {"signature": "def normalize_slice(slice_obj, length):", "body": "if isinstance(slice_obj, slice):start, stop, step = slice_obj.start, slice_obj.stop, slice_obj.stepif start is None:start = if stop is None:stop = lengthif step is None:step = if start < :start += lengthif stop < :stop += lengthelif isinstance(slice_obj, int):start = slice_objif start < :start += lengthstop = start + step = else:raise TypeErrorif ( <= start <= length) and ( <= stop <= length):return start, stop, stepraise IndexError", "docstring": "Given a slice object, return appropriate values for use in the range function\n\n:param slice_obj: The slice object or integer provided in the `[]` notation\n:param length: For negative indexing we need to know the max length of the object.", "id": "f1494:m0"} {"signature": "def setUp(self):", "body": "global engineengine = create_engine('', echo=False)global SessionSession = sessionmaker(bind=engine)global sessionsession = Session()session._model_changes = {}Base.metadata.create_all(bind=engine)session.add_all([User(name='', lastname='', uid='', city_id=),User(name='', lastname='', uid='', city_id=),User(name='', lastname='', uid='', city_id=),City(name=''),City(name='')])session.commit()", "docstring": "Initial setup for the test", "id": "f1495:c2:m0"} {"signature": "def elastic_query(model, query, session=None, enabled_fields=None):", "body": "instance = ElasticQuery(model, query, session, enabled_fields)return instance.search()", "docstring": "Public method for init the class ElasticQuery\n :model: SQLAlchemy model\n :query: valid string like a ElasticSearch\n :session: SQLAlchemy session *optional\n :enabled_fields: Fields allowed for make a query *optional", "id": "f1497:m0"} {"signature": "def __init__(self, model, query, session=None, enabled_fields=None):", "body": "self.model = modelself.query = queryif hasattr(model, ''):self.model_query = model.queryelse:self.model_query = session.query(self.model)self.enabled_fields = enabled_fields", "docstring": "Initializator of the class 'ElasticQuery", "id": "f1497:c0:m0"} {"signature": "def search(self):", "body": "try:filters = json.loads(self.query)except ValueError:return Falseresult = self.model_queryif ''in filters.keys():result = self.parse_filter(filters[''])if ''in filters.keys():result = result.order_by(*self.sort(filters['']))return result", "docstring": "This is the most important method", "id": "f1497:c0:m1"} {"signature": "def parse_filter(self, filters):", "body": "for filter_type in filters:if filter_type == '' or filter_type == '':conditions = []for field in filters[filter_type]:if self.is_field_allowed(field):conditions.append(self.create_query(self.parse_field(field, filters[filter_type][field])))if filter_type == '':self.model_query = self.model_query.filter(or_(*conditions))elif filter_type == '':self.model_query = self.model_query.filter(and_(*conditions))else:if self.is_field_allowed(filter_type):conditions = self.create_query(self.parse_field(filter_type, filters[filter_type]))self.model_query = self.model_query.filter(conditions)return self.model_query", "docstring": "This method process the filters", "id": "f1497:c0:m2"} {"signature": "def parse_field(self, field, field_value):", "body": "if type(field_value) is dict:operator = list(field_value)[]if self.verify_operator(operator) is False:return \"\", operatorvalue = field_value[operator]elif type(field_value) is unicode:operator = u''value = field_valuereturn field, operator, value", "docstring": "Parse the operators and traduce: ES to SQLAlchemy operators", "id": "f1497:c0:m3"} {"signature": "@staticmethoddef verify_operator(operator):", "body": "try:if hasattr(OPERATORS[operator], ''):return Trueelse:return Falseexcept ValueError:return False", "docstring": "Verify if the operator is valid", "id": "f1497:c0:m4"} {"signature": "def create_query(self, attr):", "body": "field = attr[]operator = attr[]value = attr[]model = self.modelif '' in field:field_items = field.split('')field_name = getattr(model, field_items[], None)class_name = field_name.property.mapper.class_new_model = getattr(class_name, field_items[])return field_name.has(OPERATORS[operator](new_model, value))return OPERATORS[operator](getattr(model, field, None), value)", "docstring": "Mix all values and make the query", "id": "f1497:c0:m6"} {"signature": "def sort(self, sort_list):", "body": "order = []for sort in sort_list:if sort_list[sort] == \"\":order.append(asc(getattr(self.model, sort, None)))elif sort_list[sort] == \"\":order.append(desc(getattr(self.model, sort, None)))return order", "docstring": "Sort", "id": "f1497:c0:m7"} {"signature": "def read(*paths):", "body": "with open(os.path.join(*paths), '') as f:return f.read()", "docstring": "Build a file path from *paths* and return the contents.", "id": "f1498:m0"} {"signature": "def build_string(iterable):", "body": "return u''.join(iterable)", "docstring": "A utility function to wrap up the converting a list of characters back into a string.", "id": "f1511:m0"} {"signature": "def caseless_string(s):", "body": "return string(zip(s.lower(), s.upper()))", "docstring": "Attempts to match input to the letters in the string, without regard for case.", "id": "f1511:m1"} {"signature": "def lexeme(parser):", "body": "whitespace()v = parser()whitespace()return v", "docstring": "Ignores any whitespace surrounding parser.", "id": "f1511:m2"} {"signature": "def quoted(parser=any_token):", "body": "quote_char = quote()value, _ = many_until(parser, partial(one_of, quote_char))return build_string(value)", "docstring": "Parses as much as possible until it encounters a matching closing quote.\n\n By default matches any_token, but can be provided with a more specific parser if required.\n Returns a string", "id": "f1511:m3"} {"signature": "def make_literal(s):", "body": "return partial(s, tri(string), s)", "docstring": "returns a literal parser", "id": "f1511:m4"} {"signature": "def literal(s):", "body": "return make_literal(s)()", "docstring": "A literal string.", "id": "f1511:m5"} {"signature": "def make_caseless_literal(s):", "body": "return partial(s, tri(caseless_string), s)", "docstring": "returns a literal string, case independant parser.", "id": "f1511:m6"} {"signature": "def caseless_literal(s):", "body": "return make_caseless_literal(s)()", "docstring": "A literal string, case independant.", "id": "f1511:m7"} {"signature": "def compose(f, g):", "body": "return lambda *args, **kwargs: f(g(*args, **kwargs))", "docstring": "Compose returns a two functions composed as a new function.\n\n The first is called with the result of the second as its argument. Any arguments \n are passed to the second.", "id": "f1512:m4"} {"signature": "def chain(*args):", "body": "def chain_block(*args, **kwargs):v = args[](*args, **kwargs)for p in args[:]:v = p(v)return vreturn chain_block", "docstring": "Runs a series of parsers in sequence passing the result of each parser to the next.\n The result of the last parser is returned.", "id": "f1512:m5"} {"signature": "def any_token():", "body": "ch = peek()if ch is EndOfFile:fail([\"\"])next()return ch", "docstring": "Will accept any token in the input stream and step forward.\n\n Fails if there is no more tokens", "id": "f1512:m6"} {"signature": "def one_of(these):", "body": "ch = peek()try:if (ch is EndOfFile) or (ch not in these):fail(list(these))except TypeError:if ch != these:fail([these])next()return ch", "docstring": "Returns the current token if is found in the collection provided.\n\n Fails otherwise.", "id": "f1512:m7"} {"signature": "def not_one_of(these):", "body": "ch = peek()desc = \"\" + repr(these)try:if (ch is EndOfFile) or (ch in these):fail([desc])except TypeError:if ch != these:fail([desc])next()return ch", "docstring": "Returns the current token if it is not found in the collection provided.\n\n The negative of one_of.", "id": "f1512:m8"} {"signature": "def satisfies(guard):", "body": "i = peek()if (i is EndOfFile) or (not guard(i)):fail([\"\" + _fun_to_str(guard) + \">\"])next()return i", "docstring": "Returns the current token if it satisfies the guard function provided.\n\n Fails otherwise.\n This is the a generalisation of one_of.", "id": "f1512:m10"} {"signature": "def optional(parser, default=None):", "body": "return choice(parser, lambda: default)", "docstring": "Tries to apply the provided parser, returning default if the parser fails.", "id": "f1512:m11"} {"signature": "def not_followed_by(parser):", "body": "@tridef not_followed_by_block():failed = object()result = optional(tri(parser), failed)if result != failed:fail([\"\" + _fun_to_str(parser)])choice(not_followed_by_block)", "docstring": "Succeeds if the given parser cannot consume input", "id": "f1512:m12"} {"signature": "def many(parser):", "body": "results = []terminate = object()while local_ps.value:result = optional(parser, terminate)if result == terminate:breakresults.append(result)return results", "docstring": "Applies the parser to input zero or more times.\n\n Returns a list of parser results.", "id": "f1512:m13"} {"signature": "def many1(parser):", "body": "return [parser()] + many(parser)", "docstring": "Like many, but must consume at least one of parser", "id": "f1512:m14"} {"signature": "def many_until(these, term):", "body": "results = []while True:stop, result = choice(_tag(True, term),_tag(False, these))if stop:return results, resultelse:results.append(result)", "docstring": "Consumes as many of these as it can until it term is encountered.\n\n Returns a tuple of the list of these results and the term result", "id": "f1512:m16"} {"signature": "def many_until1(these, term):", "body": "first = [these()]these_results, term_result = many_until(these, term)return (first + these_results, term_result)", "docstring": "Like many_until but must consume at least one of these.", "id": "f1512:m17"} {"signature": "def sep1(parser, separator):", "body": "first = [parser()]def inner():separator()return parser()return first + many(tri(inner))", "docstring": "Like sep but must consume at least one of parser.", "id": "f1512:m18"} {"signature": "def sep(parser, separator):", "body": "return optional(partial(sep1, parser, separator), [])", "docstring": "Consumes zero or more of parser, separated by separator.\n\n Returns a list of parser's results", "id": "f1512:m19"} {"signature": "def n_of(parser, n):", "body": "return [parser() for i in range(n)]", "docstring": "Consumes n of parser, returning a list of the results.", "id": "f1512:m20"} {"signature": "def string(string):", "body": "found = []for c in string:found.append(one_of(c))return found", "docstring": "Iterates over string, matching input to the items provided.\n\n The most obvious usage of this is to accept an entire string of characters,\n However this is function is more general than that. It takes an iterable\n and for each item, it tries one_of for that set. For example, \n string(['aA','bB','cC'])\n will accept 'abc' in either case. \n\n note, If you wish to match caseless strings as in the example, use \n picoparse.text.caseless_string.", "id": "f1512:m21"} {"signature": "def cue(*parsers):", "body": "if not parsers:return Nonefor p in parsers[:-]:p()return parsers[-]()", "docstring": "Runs multiple parsers and returns the result of the last.", "id": "f1512:m22"} {"signature": "def follow(*parsers):", "body": "if not parsers:return Nonev = parsers[]()for p in parsers[:]:p()return v", "docstring": "Runs multiple parsers and returns the result of the first.", "id": "f1512:m23"} {"signature": "def remaining():", "body": "tokens = []while peek() is not EndOfFile:tokens.append(peek())next()return tokens", "docstring": "Returns the remaining input that has not been parsed.", "id": "f1512:m24"} {"signature": "def seq(*sequence):", "body": "results = {}for p in sequence:if callable(p): p()continuek, v = presults[k] = v()return results", "docstring": "Runs a series of parsers in sequence optionally storing results in a returned dictionary.\n\n For example:\n seq(whitespace, ('phone', digits), whitespace, ('name', remaining))", "id": "f1512:m25"} {"signature": "def _fill(self, size):", "body": "try:for i in range(size):self.buffer.append(self.source.__next__())except StopIteration:self.buffer.append((EndOfFile, EndOfFile))self.len = len(self.buffer)", "docstring": "fills the internal buffer from the source iterator", "id": "f1512:c3:m2"} {"signature": "def __next__(self):", "body": "self.index += t = self.peek()if not self.depth:self._cut()return t", "docstring": "Advances to and returns the next token or returns EndOfFile", "id": "f1512:c3:m3"} {"signature": "def current(self):", "body": "if self.index >= self.len:self._fill((self.index - self.len) + )return self.index < self.len and self.buffer[self.index] or (EndOfFile, EndOfFile)", "docstring": "Returns the current (token, position) or (EndOfFile, EndOfFile)", "id": "f1512:c3:m4"} {"signature": "def peek(self):", "body": "return self.current()[]", "docstring": "Returns the current token or EndOfFile", "id": "f1512:c3:m5"} {"signature": "def pos(self):", "body": "return self.current()[]", "docstring": "Returns the current position or EndOfFile", "id": "f1512:c3:m6"} {"signature": "def allele_frequency(expec):", "body": "expec = asarray(expec, float)if expec.ndim != :raise ValueError(\"\")ploidy = expec.shape[-]return expec.sum(-) / ploidy", "docstring": "r\"\"\" Compute allele frequency from its expectation.\n\n Parameters\n ----------\n expec : array_like\n Allele expectations encoded as a samples-by-alleles matrix.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Allele frequencies encoded as a variants-by-alleles matrix.\n\n Examples\n --------\n .. doctest::\n\n >>> from bgen_reader import read_bgen, example_files\n >>> from bgen_reader import allele_expectation, allele_frequency\n >>>\n >>> # Download an example\n >>> example = example_files(\"example.32bits.bgen\")\n >>> filepath = example.filepath\n >>>\n >>> bgen = read_bgen(filepath, verbose=False)\n >>>\n >>> variants = bgen[\"variants\"]\n >>> samples = bgen[\"samples\"]\n >>> genotype = bgen[\"genotype\"]\n >>>\n >>> variant = variants[variants[\"rsid\"] == \"RSID_6\"].compute()\n >>> variant_idx = variant.index.item()\n >>>\n >>> p = genotype[variant_idx].compute()[\"probs\"]\n >>> # For unphased genotypes only.\n >>> e = allele_expectation(bgen, variant_idx)\n >>> f = allele_frequency(e)\n >>>\n >>> alleles = variant[\"allele_ids\"].item().split(\",\")\n >>> print(alleles[0] + \": {}\".format(f[0]))\n A: 229.23103218810434\n >>> print(alleles[1] + \": {}\".format(f[1]))\n G: 270.7689678118956\n >>> print(variant)\n id rsid chrom pos nalleles allele_ids vaddr\n 4 SNPID_6 RSID_6 01 6000 2 A,G 19377\n >>>\n >>> # Clean-up the example\n >>> example.close()", "id": "f1519:m0"} {"signature": "def compute_dosage(expec, alt=None):", "body": "if alt is None:return expec[..., -]try:return expec[:, alt]except NotImplementedError:alt = asarray(alt, int)return asarray(expec, float)[:, alt]", "docstring": "r\"\"\" Compute dosage from allele expectation.\n\n Parameters\n ----------\n expec : array_like\n Allele expectations encoded as a samples-by-alleles matrix.\n alt : array_like, optional\n Alternative allele index. If ``None``, the allele having the minor\n allele frequency for the provided ``expec`` is used as the alternative.\n Defaults to ``None``.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Dosage encoded as an array of size equal to the number of samples.\n\n Examples\n --------\n .. code-block:: python\n :caption: First a quick-start example.\n\n >>> from bgen_reader import allele_expectation, compute_dosage\n >>> from bgen_reader import example_files, read_bgen\n >>>\n >>> # Download an example.\n >>> example = example_files(\"example.32bits.bgen\")\n >>> filepath = example.filepath\n >>>\n >>> # Read the example.\n >>> bgen = read_bgen(filepath, verbose=False)\n >>>\n >>> # Extract the allele expectations of the fourth variant.\n >>> variant_idx = 3\n >>> e = allele_expectation(bgen, variant_idx)\n >>>\n >>> # Compute the dosage when considering the first allele\n >>> # as the reference/alternative one.\n >>> alt_allele_idx = 1\n >>> d = compute_dosage(e, alt=alt_allele_idx)\n >>>\n >>> # Print the dosage of the first five samples only.\n >>> print(d[:5])\n [1.96185308 0.00982666 0.01745552 1.00347899 1.01153563]\n >>>\n >>> # Clean-up the example\n >>> example.close()\n\n .. code-block:: python\n :caption: Genotype probabilities, allele expectations and frequencies.\n\n >>> from bgen_reader import (\n ... allele_expectation,\n ... allele_frequency,\n ... compute_dosage,\n ... example_files,\n ... read_bgen,\n ... )\n >>> from pandas import DataFrame\n >>> from xarray import DataArray\n >>>\n >>> # Download an example\n >>> example = example_files(\"example.32bits.bgen\")\n >>> filepath = example.filepath\n >>>\n >>> # Open the bgen file.\n >>> bgen = read_bgen(filepath, verbose=False)\n >>> variants = bgen[\"variants\"]\n >>> genotype = bgen[\"genotype\"]\n >>> samples = bgen[\"samples\"]\n >>>\n >>> variant_idx = 3\n >>> variant = variants.loc[variant_idx].compute()\n >>> # Print the metadata of the fourth variant.\n >>> print(variant)\n id rsid chrom pos nalleles allele_ids vaddr\n 3 SNPID_5 RSID_5 01 5000 2 A,G 16034\n\n >>> geno = bgen[\"genotype\"][variant_idx].compute()\n >>> metageno = DataFrame({k: geno[k] for k in [\"ploidy\", \"missing\"]},\n ... index=samples)\n >>> metageno.index.name = \"sample\"\n >>> print(metageno) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE\n ploidy missing\n sample\n sample_001 2 False\n sample_002 2 False\n sample_003 2 False\n sample_004 2 False\n ... ... ...\n sample_497 2 False\n sample_498 2 False\n sample_499 2 False\n sample_500 2 False\n \n [500 rows x 2 columns]\n >>> p = DataArray(\n ... geno[\"probs\"],\n ... name=\"probability\",\n ... coords={\"sample\": samples},\n ... dims=[\"sample\", \"genotype\"],\n ... )\n >>> # Print the genotype probabilities.\n >>> print(p.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE\n genotype 0 1 2\n sample\n sample_001 0.00488 0.02838 0.96674\n sample_002 0.99045 0.00928 0.00027\n sample_003 0.98932 0.00391 0.00677\n sample_004 0.00662 0.98328 0.01010\n ... ... ... ...\n sample_497 0.00137 0.01312 0.98550\n sample_498 0.00552 0.99423 0.00024\n sample_499 0.01266 0.01154 0.97580\n sample_500 0.00021 0.98431 0.01547\n \n [500 rows x 3 columns]\n >>> alleles = variant[\"allele_ids\"].item().split(\",\")\n >>> e = DataArray(\n ... allele_expectation(bgen, variant_idx),\n ... name=\"expectation\",\n ... coords={\"sample\": samples, \"allele\": alleles},\n ... dims=[\"sample\", \"allele\"],\n ... )\n >>> # Print the allele expectations.\n >>> print(e.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE\n allele A G\n sample\n sample_001 0.03815 1.96185\n sample_002 1.99017 0.00983\n sample_003 1.98254 0.01746\n sample_004 0.99652 1.00348\n ... ... ...\n sample_497 0.01587 1.98413\n sample_498 1.00528 0.99472\n sample_499 0.03687 1.96313\n sample_500 0.98474 1.01526\n \n [500 rows x 2 columns]\n >>> rsid = variant[\"rsid\"].item()\n >>> chrom = variant[\"chrom\"].item()\n >>> variant_name = f\"{chrom}:{rsid}\"\n >>> f = DataFrame(allele_frequency(e), columns=[variant_name], index=alleles)\n >>> f.index.name = \"allele\"\n >>> # Allele frequencies.\n >>> print(f) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE\n 01:RSID_5\n allele\n A 305.97218\n G 194.02782\n >>> alt = f.idxmin().item()\n >>> alt_idx = alleles.index(alt)\n >>> d = compute_dosage(e, alt=alt_idx).to_series()\n >>> d = DataFrame(d.values, columns=[f\"alt={alt}\"], index=d.index)\n >>> # Dosages when considering G as the alternative allele.\n >>> print(d) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE\n alt=G\n sample\n sample_001 1.96185\n sample_002 0.00983\n sample_003 0.01746\n sample_004 1.00348\n ... ...\n sample_497 1.98413\n sample_498 0.99472\n sample_499 1.96313\n sample_500 1.01526\n \n [500 rows x 1 columns]\n >>>\n >>> # Clean-up the example\n >>> example.close()", "id": "f1519:m1"} {"signature": "def allele_expectation(bgen, variant_idx):", "body": "geno = bgen[\"\"][variant_idx].compute()if geno[\"\"]:raise ValueError(\"\")nalleles = bgen[\"\"].loc[variant_idx, \"\"].compute().item()genotypes = get_genotypes(geno[\"\"], nalleles)expec = []for i in range(len(genotypes)):count = asarray(genotypes_to_allele_counts(genotypes[i]), float)n = count.shape[]expec.append((count.T * geno[\"\"][i, :n]).sum())return stack(expec, axis=)", "docstring": "r\"\"\" Allele expectation.\n\n Compute the expectation of each allele from the genotype probabilities.\n\n Parameters\n ----------\n bgen : bgen_file\n Bgen file handler.\n variant_idx : int\n Variant index.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Samples-by-alleles matrix of allele expectations.\n\n Note\n ----\n This function supports unphased genotypes only.\n\n Examples\n --------\n .. doctest::\n\n >>> from bgen_reader import allele_expectation, example_files, read_bgen\n >>>\n >>> from texttable import Texttable\n >>>\n >>> # Download an example.\n >>> example = example_files(\"example.32bits.bgen\")\n >>> filepath = example.filepath\n >>>\n >>> # Read the example.\n >>> bgen = read_bgen(filepath, verbose=False)\n >>>\n >>> variants = bgen[\"variants\"]\n >>> samples = bgen[\"samples\"]\n >>> genotype = bgen[\"genotype\"]\n >>>\n >>> genotype = bgen[\"genotype\"]\n >>> # This `compute` call will return a pandas data frame,\n >>> variant = variants[variants[\"rsid\"] == \"RSID_6\"].compute()\n >>> # from which we retrieve the variant index.\n >>> variant_idx = variant.index.item()\n >>> print(variant)\n id rsid chrom pos nalleles allele_ids vaddr\n 4 SNPID_6 RSID_6 01 6000 2 A,G 19377\n >>> genotype = bgen[\"genotype\"]\n >>> # Samples is a pandas series, and we retrieve the\n >>> # sample index from the sample name.\n >>> sample_idx = samples[samples == \"sample_005\"].index.item()\n >>>\n >>> genotype = bgen[\"genotype\"]\n >>> # This `compute` call will return a dictionary from which\n >>> # we can get the probability matrix the corresponding\n >>> # variant.\n >>> p = genotype[variant_idx].compute()[\"probs\"][sample_idx]\n >>>\n >>> genotype = bgen[\"genotype\"]\n >>> # Allele expectation makes sense for unphased genotypes only,\n >>> # which is the case here.\n >>> e = allele_expectation(bgen, variant_idx)[sample_idx]\n >>>\n >>> genotype = bgen[\"genotype\"]\n >>> alleles = variant[\"allele_ids\"].item().split(\",\")\n >>>\n >>> genotype = bgen[\"genotype\"]\n >>>\n >>> # Print what we have got in a nice format.\n >>> table = Texttable()\n >>> table = table.add_rows(\n ... [\n ... [\"\", \"AA\", \"AG\", \"GG\", \"E[.]\"],\n ... [\"p\"] + list(p) + [\"na\"],\n ... [\"#\" + alleles[0], 2, 1, 0, e[0]],\n ... [\"#\" + alleles[1], 0, 1, 2, e[1]],\n ... ]\n ... )\n >>> print(table.draw())\n +----+-------+-------+-------+-------+\n | | AA | AG | GG | E[.] |\n +====+=======+=======+=======+=======+\n | p | 0.012 | 0.987 | 0.001 | na |\n +----+-------+-------+-------+-------+\n | #A | 2 | 1 | 0 | 1.011 |\n +----+-------+-------+-------+-------+\n | #G | 0 | 1 | 2 | 0.989 |\n +----+-------+-------+-------+-------+\n >>>\n >>> # Clean-up.\n >>> example.close()", "id": "f1519:m2"} {"signature": "def read_bgen(filepath, metafile_filepath=None, samples_filepath=None, verbose=True):", "body": "assert_file_exist(filepath)assert_file_readable(filepath)metafile_filepath = _get_valid_metafile_filepath(filepath, metafile_filepath)if not os.path.exists(metafile_filepath):if verbose:print(f\"\"\"\"\"\")create_metafile(filepath, metafile_filepath, verbose)samples = get_samples(filepath, samples_filepath, verbose)variants = map_metadata(filepath, metafile_filepath)genotype = map_genotype(filepath, metafile_filepath, verbose)return dict(variants=variants, samples=samples, genotype=genotype)", "docstring": "r\"\"\" Read a given BGEN file.\n\n Parameters\n ----------\n filepath : str\n A bgen file path.\n metafile_filepath : str, optional\n If ``None``, it will try to read the ``filepath + \".metadata\"`` file. If this is\n not possible, it will create one. It tries to create one at\n ``filepath + \".metadata\"``. If that is also no possible, it tries to create one\n at a temporary folder.\n samples_filepath : str, optional\n A sample file in `gen format `_.\n If ``samples_filepath`` is provided, sample ids are read from this file.\n Otherwise, it reads from the bgen file itself if possible. Defaults to ``None``.\n verbose : bool, optional\n ``True`` to show progress; ``False`` otherwise. Defaults to ``True``.\n\n Returns\n -------\n variants : :class:`dask.dataFrame.DataFrame`\n Variant position, chromosomes, rsids, etc.\n samples : :class:`pandas.Series`\n Sample identifications.\n genotype : list\n List of genotypes.\n\n Examples\n --------\n .. doctest::\n\n >>> from bgen_reader import example_files, read_bgen\n >>>\n >>> with example_files(\"haplotypes.bgen\") as filepath:\n ... bgen = read_bgen(filepath, verbose=False)\n ... variants = bgen[\"variants\"]\n ... samples = bgen[\"samples\"]\n ...\n ... v = variants.loc[0].compute()\n ... g = bgen[\"genotype\"][0].compute()\n ... print(v)\n ... print(samples)\n ... print(g[\"probs\"][0])\n id rsid chrom pos nalleles allele_ids vaddr\n 0 SNP1 RS1 1 1 2 A,G 102\n 0 sample_0\n 1 sample_1\n 2 sample_2\n 3 sample_3\n Name: id, dtype: object\n [1. 0. 1. 0.]", "id": "f1520:m0"} {"signature": "def _touch(fname, mode=, dir_fd=None, **kwargs):", "body": "flags = os.O_CREAT | os.O_APPENDwith os.fdopen(os.open(fname, flags=flags, mode=mode, dir_fd=dir_fd)) as f:os.utime(f.fileno() if os.utime in os.supports_fd else fname,dir_fd=None if os.supports_fd else dir_fd,**kwargs,)", "docstring": "Touch a file.\n\n Credits to .", "id": "f1521:m4"} {"signature": "def create_metafile(bgen_filepath, metafile_filepath, verbose=True):", "body": "if verbose:verbose = else:verbose = bgen_filepath = make_sure_bytes(bgen_filepath)metafile_filepath = make_sure_bytes(metafile_filepath)assert_file_exist(bgen_filepath)assert_file_readable(bgen_filepath)if exists(metafile_filepath):raise ValueError(f\"\")with bgen_file(bgen_filepath) as bgen:nparts = _estimate_best_npartitions(lib.bgen_nvariants(bgen))metafile = lib.bgen_create_metafile(bgen, metafile_filepath, nparts, verbose)if metafile == ffi.NULL:raise RuntimeError(f\"\")if lib.bgen_close_metafile(metafile) != :raise RuntimeError(f\"\")", "docstring": "r\"\"\"Create variants metadata file.\n\n Variants metadata file helps speed up subsequent reads of the associated\n bgen file.\n\n Parameters\n ----------\n bgen_filepath : str\n Bgen file path.\n metafile_file : str\n Metafile file path.\n verbose : bool\n ``True`` to show progress; ``False`` otherwise.\n\n Examples\n --------\n .. doctest::\n\n >>> import os\n >>> from bgen_reader import create_metafile, example_files\n >>>\n >>> with example_files(\"example.32bits.bgen\") as filepath:\n ... folder = os.path.dirname(filepath)\n ... metafile_filepath = os.path.join(folder, filepath + \".metadata\")\n ...\n ... try:\n ... create_metafile(filepath, metafile_filepath, verbose=False)\n ... finally:\n ... if os.path.exists(metafile_filepath):\n ... os.remove(metafile_filepath)", "id": "f1529:m0"} {"signature": "def bits_arch():", "body": "return struct.calcsize(\"\") * ", "docstring": "Determines the number of bits of the Python process.\n\n Return ``32`` or ``64``.", "id": "f1538:m0"} {"signature": "def find_libname(self, name):", "body": "names = [\"\", \"\", \"\"]names = [n.format(name) for n in names]dirs = self.get_library_dirs()for d in dirs:for n in names:if exists(join(d, n)):return n[:-]msg = \"\".format(name)raise ValueError(msg)", "docstring": "Try to infer the correct library name.", "id": "f1538:c1:m5"} {"signature": "def miles_to_feet(miles):", "body": "return miles * float()", "docstring": "Converts a number of miles to feet.\n\nArgs:\n miles: Number of miles we want to convert.\n\nReturns:\n Floating point number as the number of\n feet in the given miles.", "id": "f1541:m0"} {"signature": "def total_seconds(hours, minutes, seconds):", "body": "return (hours * + minutes) * + seconds", "docstring": "Returns the number of seconds in the given number of hours,\nminutes, and seconds.\n\nArgs:\n hours:\n Integer, number of hours.\n\n minutes:\n Integer, number of minutes.\n\n seconds:\n Integer, number of seconds.\n\nReturns:\n Integer, time in seconds.", "id": "f1541:m1"} {"signature": "def rectangle_perimeter(width, height):", "body": "return width * + height * ", "docstring": "Returns the perimeter of a rectangle with the given width and height.\n\nArgs:\n width:\n Integer or float, width of the rectangle.\n\n height:\n Integer or float, height of the rectangle.\n\nReturns:\n Integer or float, perimeter of a rectangle.", "id": "f1541:m2"} {"signature": "def rectangle_area(width, height):", "body": "return width * height", "docstring": "Returns the area of a rectangle with the given width and height.\n\n Args:\n width:\n Integer or float, width of the rectangle.\n\n height: Integer or float, height of the rectangle.\n\n Returns:\n The area of a rectangle as an integer or float.", "id": "f1541:m3"} {"signature": "def circle_circumference(radius):", "body": "return radius * * math.pi", "docstring": "Returns the circumference of a circle.\n\nArgs:\n radius: The radius of a circle.\n\nReturns:\n Integer > circumference of a circle.\n\nRequires:\n The math module.", "id": "f1541:m4"} {"signature": "def circle_area(radius):", "body": "return math.pi * radius ** ", "docstring": "Returns the area of a circle.\n\nArgs:\n radius: The radius of a circle.\n\nReturns:\n The area of a circle as an integer.\n\nRequires:\n The math module.", "id": "f1541:m5"} {"signature": "def compound_interest(principal, annual_rate, years):", "body": "return principal * ( + * annual_rate) ** years", "docstring": "Returns the future value of money invested at an annual\ninterest rate, compounded annually for a given number of years.\n\nArgs:\n principal: The beginning ammount of money invested\n\n annual_rate: The interest rate paid out\n\n years: The number of years invested\n\nReturns:\n A basic calculation of compound interest.", "id": "f1541:m6"} {"signature": "def future_value(present_value, annual_rate, periods_per_year, years):", "body": "rate_per_period = annual_rate / float(periods_per_year)periods = periods_per_year * yearsreturn present_value * ( + rate_per_period) ** periods", "docstring": "Calculates the future value of money invested at an anual interest rate,\nx times per year, for a given number of years.\n\nArgs:\n present_value: int or float, the current value of the money (principal).\n\n annual_rate: float 0 to 1 e.g., .5 = 50%), the interest rate paid out.\n\n periods_per_year: int, the number of times money is invested per year.\n\n years: int, the number of years invested.\n\nReturns:\n Float, the future value of the money invested with compound interest.", "id": "f1541:m7"} {"signature": "def point_distance(point1, point2):", "body": "return ((point1[] - point2[]) ** + (point1[] - point2[]) ** ) ** ", "docstring": "Computes the distance beteen two points on a plane.\n\nArgs:\n point1: Tuple or list, the x and y coordinate of the first point.\n\n point2: Tuple or list, the x and y coordinate of the second point.\n\nReturns:\n The distance between the two points as a floating point number.", "id": "f1541:m8"} {"signature": "def triangle_area(point1, point2, point3):", "body": "\"\"\"\"\"\"a = point_distance(point1, point2)b = point_distance(point1, point3)c = point_distance(point2, point3)\"\"\"\"\"\"s = (a + b + c) / \"\"\"\"\"\"return math.sqrt(s * (s - a) * (s - b) * (s - c))", "docstring": "Uses Heron's formula to find the area of a triangle\nbased on the coordinates of three points.\n\nArgs:\n point1: list or tuple, the x y coordinate of point one.\n\n point2: list or tuple, the x y coordinate of point two.\n\n point3: list or tuple, the x y coordinate of point three.\n\nReturns:\n The area of a triangle as a floating point number.\n\nRequires:\n The math module, point_distance().", "id": "f1541:m9"} {"signature": "def is_leap_year(year):", "body": "if (year % ) == :return Trueelif (year % ) == :return Falseelif (year % ) == :return Trueelse:return False", "docstring": "Checks to see if a given year is a leap year.\n\nArgs:\n Integer, the year to test.\n\nReturns:\n Boolean", "id": "f1541:m10"} {"signature": "def regular_polygon_area(number_of_sides, length_of_sides):", "body": "return ( * number_of_sides * length_of_sides ** ) / math.tan(math.pi / number_of_sides)", "docstring": "Calculates the area of a regular polygon (with sides of equal length).\n\nArgs:\n number_of_sides: Integer, the number of sides of the polygon\n\n length_of_sides: Integer or floating point number, the length of the sides\n\nReturns:\n The area of a regular polygon as an integer or floating point number\n\nRequires:\n The math module", "id": "f1541:m11"} {"signature": "def median(data):", "body": "ordered = sorted(data)length = len(ordered)if length % == :return (ordered[math.floor(length / ) - ] + ordered[math.floor(length / )]) / elif length % != :return ordered[math.floor(length / )]", "docstring": "Calculates the median of a list of integers or floating point numbers.\n\nArgs:\n data: A list of integers or floating point numbers\n\nReturns:\n Sorts the list numerically and returns the middle number if the list has an odd number\n of items. If the list contains an even number of items the mean of the two middle numbers\n is returned.", "id": "f1541:m12"} {"signature": "def average(numbers, numtype=''):", "body": "if type == '':return Decimal(sum(numbers)) / len(numbers)else:return float(sum(numbers)) / len(numbers)", "docstring": "Calculates the average or mean of a list of numbers\n\nArgs:\n numbers: a list of integers or floating point numbers.\n\n numtype: string, 'decimal' or 'float'; the type of number to return.\n\nReturns:\n The average (mean) of the numbers as a floating point number\n or a Decimal object.\n\nRequires:\n The math module", "id": "f1541:m13"} {"signature": "def variance(numbers, type=''):", "body": "mean = average(numbers)variance = for number in numbers:variance += (mean - number) ** if type == '':return variance / len(numbers)else:return variance / (len(numbers) - )", "docstring": "Calculates the population or sample variance of a list of numbers.\nA large number means the results are all over the place, while a\nsmall number means the results are comparatively close to the average.\n\nArgs:\n numbers: a list of integers or floating point numbers to compare.\n\n type: string, 'population' or 'sample', the kind of variance to be computed.\n\nReturns:\n The computed population or sample variance.\n Defaults to population variance.\n\nRequires:\n The math module, average()", "id": "f1541:m14"} {"signature": "def standard_deviation(variance):", "body": "return variance ** ", "docstring": "Calculates the standard deviation.\n\nArgs:\n variance: The variance of a group of numbers.\n\nReturns:\n The standard deviation as a floating point number.", "id": "f1541:m15"} {"signature": "def get_percentage(a, b, i=False, r=False):", "body": "if i is False and r is True:percentage = round( * (float(a) / b), )elif (i is True and r is True) or (i is True and r is False):percentage = int(round( * (float(a) / b)))if r is False:warnings.warn(\"\")else:percentage = * (float(a) / b)return percentage", "docstring": "Finds the percentage of one number over another.\n\nArgs:\n a: The number that is a percent, int or float.\n\n b: The base number that a is a percent of, int or float.\n\n i: Optional boolean integer. True if the user wants the result returned as\n a whole number. Assumes False.\n\n r: Optional boolean round. True if the user wants the result rounded.\n Rounds to the second decimal point on floating point numbers. Assumes False.\n\nReturns:\n The argument a as a percentage of b. Throws a warning if integer is set to True\n and round is set to False.", "id": "f1541:m16"} {"signature": "def get_slope(point1, point2):", "body": "return (float(point2[]) - point1[]) / (float(point2[]) - point1[])", "docstring": "Calculate the slope of the line connecting two points on a grid.\n\nArgs:\n point1: Tuple or list, the x and y coordinate of the first point.\n\n point2: Tuple or list, the x and y coordinate of the second point\n\nReturns:\n the slope of a line connecting two points on a grid.", "id": "f1541:m17"} {"signature": "def get_full_binary_tree_leaves(height):", "body": "return ** height", "docstring": "Calculate the number of leaves in a complete binary tree in which each internal\nnode has exactly two children. A full binary tree is complete if every leaf\nin the tree has the same depth. A leaf is a node without children\n\nArgs:\n height: integer, the height of the tree. Height is defined by the number\n of edges from the furthest child to the root. An edge is the line segment\n that runs between and connects nodes.", "id": "f1541:m18"} {"signature": "def get_full_binary_tree_nodes(height):", "body": "return ** (height + ) - ", "docstring": "Calculate the number of internal nodes in a complete binary tree in which each\ninternal node has exactly two children. A full binary tree is complete if every\nleaf in the tree has the same depth. Internal nodes include both leaves and\ninternal nodes. The root node is also included in this calculation.\n\nArgs:\n height: integer, the height of the tree. Height is defined by the number\n of edges from the furthest child to the root. An edge is the line segment\n that runs between and connects nodes.", "id": "f1541:m19"} {"signature": "def take_home_pay(gross_pay, employer_match, taxes_and_fees, numtype=''):", "body": "if numtype == '':return (Decimal(gross_pay) + Decimal(employer_match)) - Decimal(sum(taxes_and_fees))else:return (float(gross_pay) + float(employer_match)) - sum(taxes_and_fees)", "docstring": "Calculate net take-home pay including employer retirement savings match\nusing the formula laid out by Mr. Money Mustache:\nhttp://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/\n\nArgs:\n gross_pay: float or int, gross monthly pay.\n\n employer_match: float or int, the 401(k) match from your employer.\n\n taxes_and_fees: list, taxes and fees that are deducted from your paycheck.\n\n numtype: string, 'decimal' or 'float'; the type of number to return.\n\nReturns:\n your monthly take-home pay.", "id": "f1541:m20"} {"signature": "def savings_rate(take_home_pay, spending, numtype=''):", "body": "if numtype == '':try:return ((Decimal(take_home_pay) - Decimal(spending)) / (Decimal(take_home_pay))) * Decimal()except (InvalidOperation, DivisionByZero):return Decimal()else:try:return ((float(take_home_pay) - float(spending)) / (float(take_home_pay))) * except (ZeroDivisionError):return ", "docstring": "Calculate net take-home pay including employer retirement savings match\nusing the formula laid out by Mr. Money Mustache:\nhttp://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/\n\nArgs:\n take_home_pay: float or int, monthly take-home pay\n\n spending: float or int, monthly spending\n\n numtype: string, 'decimal' or 'float'; the type of number to return.\n\nReturns:\n your monthly savings rate expressed as a percentage.", "id": "f1541:m21"} {"signature": "def foo(a):", "body": "assert a < ", "docstring": "Meaningless...", "id": "f1544:m0"} {"signature": "def __init__(self):", "body": "self.data_structure = ''self.built_in = set(['', ''])", "docstring": "Initialize the object.", "id": "f1547:c0:m0"} {"signature": "def __str__(self):", "body": "return self.data_structure", "docstring": "String representation of the class.", "id": "f1547:c0:m1"} {"signature": "def is_built_in(self, language):", "body": "return language in self.built_in", "docstring": "Tests to see if a language is a built in type\nsupported by python.\n\nArgs:\n language: string, language to test for.\n\nReturns: \n boolean", "id": "f1547:c0:m2"} {"signature": "def get_built_in(self, language, level, data):", "body": "pp = pprint.PrettyPrinter(indent=level)lookup = {'' : pp.pformat(data),'' : str(json.dumps(data, sort_keys=True, indent=level, separators=('', '')))}self.data_structure = lookup[language]", "docstring": "Gets the return string for a language that's supported by python.\nUsed in cases when python provides support for the conversion.\n\nArgs:\n language: string the langage to return for.\n\n level: integer, the indentation level.\n\n data: python data structure being converted (list of tuples)\n\nReturns:\n None, updates self.data_structure", "id": "f1547:c0:m3"} {"signature": "def get_inner_template(self, language, template_type, indentation, key, val):", "body": "inner_templates = {'' : {'' : '' % (indentation, key, indentation, val, indentation),'' : '' % (indentation, key, val) },'' : {'' : '' % (indentation, key, val, indentation),'' : '' % (indentation, key, val)},'' : { '' : '' % (indentation, key, val, indentation),'' : '' % (indentation, key, val)}}return inner_templates[language][template_type]", "docstring": "Gets the requested template for the given language.\n\nArgs:\n language: string, the language of the template to look for.\n\n template_type: string, 'iterable' or 'singular'. \n An iterable template is needed when the value is an iterable\n and needs more unpacking, e.g. list, tuple. A singular template \n is needed when unpacking is complete and the value is singular, \n e.g. string, int, float.\n\n indentation: int, the indentation level.\n\n key: multiple types, the array key.\n\n val: multiple types, the array values\n\nReturns:\n string, template formatting for arrays by language.", "id": "f1547:c0:m4"} {"signature": "def translate_val(self, language, value):", "body": "return self.lang_specific_values[language][value]", "docstring": "Translates string representations of language specific \nvalues that vary between languages. Used to translate\npython values to their counterparts in other languages.\n\nArgs:\n language: string, the language for which to\n return values.\n\n value: string, the value to translate.\n\nReturns:\n string representation of a value in a given language.", "id": "f1547:c0:m5"} {"signature": "def is_iterable(self, data):", "body": "try:iterate = iter(data)return Trueexcept:return False", "docstring": "Checks to see if an object is an iterable.\n\nArgs:\n data: a data object.\n\nReturns:\n boolean", "id": "f1547:c0:m6"} {"signature": "def translate_array(self, string, language, level=, retdata=False):", "body": "language = language.lower()assert self.is_built_in(language) or language in self.outer_templates,\"\" + language + \"\"data = phpserialize.loads(bytes(string, ''), array_hook=list, decode_strings=True)if self.is_built_in(language):self.get_built_in(language, level, data) print(self)return self.data_structure if retdata else Nonedef loop_print(iterable, level=):\"\"\"\"\"\"retval = ''indentation = '' * levelif not self.is_iterable(iterable) or isinstance(iterable, str):non_iterable = str(iterable)return str(non_iterable)for item in iterable:if isinstance(item, tuple) and len(item) == :key = item[]val = loop_print(item[], level=level+)val = self.translate_val(language, val) if language in self.lang_specific_valuesand val in self.lang_specific_values[language] else valkey = str(key) if isinstance(key, int) else '' + str(key) + ''needs_unpacking = hasattr(item[],'') == Falseand hasattr(item[],'') == True if needs_unpacking:retval += self.get_inner_template(language, '', indentation, key, val)else:val = str(val) if val.isdigit() or val in self.lang_specific_values[language].values() else '' + str(val) + ''retval += self.get_inner_template(language, '', indentation, key, val) return retvalself.data_structure = self.outer_templates[language] % (loop_print(data))print(self)return self.data_structure if retdata else None", "docstring": "Unserializes a serialized php array and prints it to\n the console as a data structure in the specified language.\n Used to translate or convert a php array into a data structure \n in another language. Currently supports, PHP, Python, Javascript,\n and JSON. \n\n Args:\n string: a string of serialized php\n\n language: a string representing the desired output \n format for the array.\n\n level: integer, indentation level in spaces. \n Defaults to 3.\n\n retdata: boolean, the method will return the string\n in addition to printing it if set to True. Defaults \n to false.\n\n Returns:\n None but prints a string to the console if retdata is \n False, otherwise returns a string.", "id": "f1547:c0:m7"} {"signature": "def run_get_percentage():", "body": "description = run_get_percentage.__doc__parser = argparse.ArgumentParser(prog='',description=description,epilog=\"\",)parser.add_argument('', help='')parser.add_argument('',help='',)args = parser.parse_args()print(sm.get_percentage(float(args.a), float(args.b)))", "docstring": "Calculate what percentage a given number is of another,\ne.g. 50 is 50% of 100.", "id": "f1548:m0"} {"signature": "def run_excel_to_html():", "body": "parser = argparse.ArgumentParser(prog='')parser.add_argument('', nargs='', help='')parser.add_argument('',nargs='',help='',)parser.add_argument('', nargs='', help='')parser.add_argument('', action='', help='')parser.add_argument('', nargs='', help='')parser.add_argument('',nargs='',help='',)parser.add_argument('', action='', help='')args = parser.parse_args()inputs = {'': args.p,'': args.s,'': args.css,'': args.m,'': args.c,'': args.d,'': args.r,}p = inputs['']s = inputs[''] if inputs[''] else ''css = inputs[''] if inputs[''] else ''m = inputs[''] if inputs[''] else Falsec = inputs[''] if inputs[''] else ''d = inputs[''].split('') if inputs[''] else []r = inputs[''] if inputs[''] else Falsehtml = fp.excel_to_html(p, sheetname=s, css_classes=css, caption=c, details=d, row_headers=r, merge=m)print(html)", "docstring": "Run the excel_to_html function from the\ncommand-line.\n\nArgs:\n -p path to file\n -s name of the sheet to convert\n -css classes to apply\n -m attempt to combine merged cells\n -c caption for accessibility\n -su summary for accessibility\n -d details for accessibility\n\nExample use:\n\n excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true", "id": "f1548:m1"} {"signature": "def get_web_file(path):", "body": "response = urllib.request.urlopen(path)return response.read()", "docstring": "Gets a file over http.\n\n Args:\n path: string url of the desired file.\n\n Returns:\n The desired file as a string.", "id": "f1549:m0"} {"signature": "def copy_web_file_to_local(file_path, target_path):", "body": "response = urllib.request.urlopen(file_path)f = open(target_path, '')f.write(response.read()) f.close()", "docstring": "Copies a file from its location on the web to a designated \n place on the local machine.\n\n Args:\n file_path: Complete url of the file to copy, string (e.g. http://fool.com/input.css).\n\n target_path: Path and name of file on the local machine, string. (e.g. /directory/output.css)\n\n Returns:\n None.", "id": "f1549:m1"} {"signature": "def get_line_count(fname):", "body": "i = with open(fname) as f:for i, l in enumerate(f):passreturn i + ", "docstring": "Counts the number of lines in a file.\n\n Args:\n fname: string, name of the file.\n\n Returns:\n integer, the number of lines in the file.", "id": "f1549:m2"} {"signature": "def indent_css(f, output):", "body": "line_count = get_line_count(f)f = open(f, '')output = open(output, '')for line in range(line_count):string = f.readline().rstrip()if len(string) > :if string[-] == \"\":output.write(\"\" + string + \"\")else:output.write(string + \"\")output.close()f.close()", "docstring": "Indentes css that has not been indented and saves it to a new file.\n A new file is created if the output destination does not already exist.\n\n Args:\n f: string, path to file.\n\n output: string, path/name of the output file (e.g. /directory/output.css).\n print type(response.read())\n\n Returns:\n None.", "id": "f1549:m3"} {"signature": "def add_newlines(f, output, char):", "body": "line_count = get_line_count(f)f = open(f, '')output = open(output, '')for line in range(line_count):string = f.readline()string = re.sub(char, char + '', string)output.write(string)", "docstring": "Adds line breaks after every occurance of a given character in a file.\n\n Args:\n f: string, path to input file.\n\n output: string, path to output file.\n\n Returns:\n None.", "id": "f1549:m4"} {"signature": "def add_whitespace_before(char, input_file, output_file):", "body": "line_count = get_line_count(input_file)input_file = open(input_file, '')output_file = open(output_file, '')for line in range(line_count):string = input_file.readline()if re.search(r'' + char, string) != None:string = re.sub(char, '' + char, string)output_file.write(string)input_file.close()", "docstring": "Adds a space before a character if there's isn't one already.\n\n Args:\n char: string, character that needs a space before it.\n\n input_file: string, path to file to parse.\n\n output_file: string, path to destination file.\n\n Returns:\n None.", "id": "f1549:m5"} {"signature": "def reformat_css(input_file, output_file):", "body": "line_count = get_line_count(input_file)f = open(input_file, '')output = open(output_file, '')for line in range(line_count):string = f.readline().strip()string = re.sub('', '', string)string = re.sub('', '', string)string = re.sub('', '', string)string = re.sub('', '', string)string = re.sub('', '', string)string = re.sub('', '', string)output.write(string)output.close()f.close()indent_css(output_file, output_file)add_whitespace_before(\"\", output_file, output_file)", "docstring": "Reformats poorly written css. This function does not validate or fix errors in the code.\n It only gives code the proper indentation. \n\n Args:\n input_file: string, path to the input file.\n\n output_file: string, path to where the reformatted css should be saved. If the target file\n doesn't exist, a new file is created.\n\n Returns:\n None.", "id": "f1549:m6"} {"signature": "def is_numeric(string):", "body": "try:float(string)return Trueexcept ValueError:return False", "docstring": "Checks if a string is numeric. If the string value is an integer\nor a float, return True, otherwise False. Can be used to test \nsoley for floats as well. \n\nArgs:\n string: a string to test.\n\nReturns: \n boolean", "id": "f1549:m7"} {"signature": "def is_number_of_some_sort(num):", "body": "if is_numeric(num):try:num / return Trueexcept:return Falsereturn False", "docstring": "Test to see if an argument is an acutal number.\nReturns True if passed an int, float, or decimal,\notherwise False.\n\nArgs:\n num: int, float, decimal, or string.\n\nReturns:\n boolean", "id": "f1549:m8"} {"signature": "def are_numeric(string_list):", "body": "for string in string_list:if not is_numeric(string):return stringreturn True", "docstring": "Checks a list of strings to see that all values in the list are\nnumeric. Returns the name of the offending string if it is \nnot numeric.\n\nArgs:\n string_list: a list of strings to test.\n\nReturns:\n boolean or string", "id": "f1549:m9"} {"signature": "def is_int(string):", "body": "try:a = float(string)b = int(a)except ValueError:return Falseelse:return a == b", "docstring": "Checks if a string is an integer. If the string value is an integer\nreturn True, otherwise return False. \n\nArgs:\n string: a string to test.\n\nReturns: \n boolean", "id": "f1549:m10"} {"signature": "def total_hours(input_files):", "body": "hours = allow = set(['', '', '', ''])for input_file in input_files:doc = open(input_file, '')for line in doc:line = line.rstrip()data = line.split('')if (len(data) == ) and (is_numeric(data[])) and (data[].lower() in allow):hours += float(data[])doc.close()return hours", "docstring": "Totals the hours for a given projct. Takes a list of input files for \nwhich to total the hours. Each input file represents a project.\nThere are only multiple files for the same project when the duration \nwas more than a year. A typical entry in an input file might look \nlike this: \n\n8/24/14\n9:30-12:00 wrote foobar code for x, wrote a unit test for foobar code, tested. \n2.5 hours\n\nArgs:\n input_files: a list of files to parse.\n\nReturns:\n float: the total number of hours spent on the project.", "id": "f1549:m11"} {"signature": "def clean_strings(iterable):", "body": "retval = []for val in iterable:try:retval.append(val.strip())except(AttributeError):retval.append(val)return retval", "docstring": "Take a list of strings and clear whitespace \non each one. If a value in the list is not a \nstring pass it through untouched.\n\nArgs:\n iterable: mixed list\n\nReturns: \n mixed list", "id": "f1549:m12"} {"signature": "def is_json(data):", "body": "try:json_object = json.loads(data)except(ValueError):return Falsereturn True", "docstring": "Test if a string is valid json.\n\nArgs:\n data: string\n\nReturns:\n boolean", "id": "f1549:m13"} {"signature": "def excel_to_html(path, sheetname='', css_classes='',caption='', details=[], row_headers=False, merge=False):", "body": "def get_data_on_merged_cells():\"\"\"\"\"\"merged_cells = xls.book.sheet_by_name(sheetname).merged_cellsds = {}for crange in merged_cells:rlo, rhi, clo, chi = crangefor rowx in range(rlo, rhi):for colx in range(clo, chi):parent_cell = (rlo,clo)child_cell = (rowx,colx)if not parent_cell in ds:ds[parent_cell] = [[,], set([])]else:if parent_cell != child_cell and child_cell[] == parent_cell[]:ds[parent_cell][][] += ds[parent_cell][].add('')elif parent_cell != child_cell and child_cell[] > parent_cell[]:if child_cell[] == parent_cell[]:ds[parent_cell][][] += ds[parent_cell][].add('')else:raise RuntimeError('')return dsdef mark_cells_going_right(cell, curr_cell, merged_cells):\"\"\"\"\"\"try:xcount = merged_cells[curr_cell][][]if xcount > : cell[''] = xcountcol_count = xcount - while col_count > :cell = cell.find_next_sibling()cell[''] = ''col_count -= except:passdef mark_cells_going_down(cell, curr_cell, merged_cells):\"\"\"\"\"\"if curr_cell in merged_cells and merged_cells[curr_cell][] == set(['']):ycount = merged_cells[curr_cell][][]cell[''] = ycountrow_count = ycountfor child_row in cell.parent.find_next_siblings(limit=row_count - ):i = for child in child_row.find_all(''):if i == curr_cell[]:child[''] = ''i += def mark_cells_going_down_and_right(cell, curr_cell, merged_cells):\"\"\"\"\"\"if curr_cell in merged_cells and('' in merged_cells[curr_cell][] and'' in merged_cells[curr_cell][]):xcount = merged_cells[curr_cell][][]ycount = merged_cells[curr_cell][][]row_count = ycountcol_count = xcountmark_cells_going_right(cell, curr_cell, merged_cells)flag = Falsefor child_row in [cell.parent] + cell.parent.find_all_next('', limit=row_count - ):i = for child in child_row.find_all(''):if i == curr_cell[]:mark_cells_going_right(child, curr_cell, merged_cells)if not flag:child[''] = col_countchild[''] = row_countflag = Trueelse:child[''] = ''i += def is_empty_th(string):\"\"\"\"\"\"if string[:] == '':data = string.split('')if is_numeric(data[]):return Truereturn Falsedef mark_header_cells(html):\"\"\"\"\"\"th = html.find_all('')for header in th:txt = header.stringif not is_empty_th(txt):header[''] = ''count = for sibling in header.find_next_siblings():if is_empty_th(sibling.string):count += sibling[''] = ''else:breakif count > :header[''] = countheader[''] = ''def create_caption(html, caption):\"\"\"\"\"\"ctag = html.new_tag('')ctag.insert(, caption)html.table.insert(, ctag)def create_summary_and_details(html, details):\"\"\"\"\"\"if len(details) != :msg = ''+ ''+ ''+ ''raise RuntimeError(msg)summary = details[]details = details[]if not caption:create_caption(html, caption)dtag = html.new_tag('')stag = html.new_tag('')ptag = html.new_tag('')stag.insert(, summary)ptag.insert(, details)dtag.insert(, stag)dtag.append(ptag) html.table.caption.insert(, dtag) def format_properly(html):\"\"\"\"\"\"return html.replace('', '').replace('','').replace('', '').replace('','').replace('', '')def add_row_headers(html):\"\"\"\"\"\"for row in html.tbody.find_all(''):spans_rows = '' in row.td.attrsspans_columns = '' in row.td.attrsnew_tag = html.new_tag('')new_tag[''] = ''new_tag.string = row.td.stringif spans_rows:new_tag[''] = row.td.attrs['']new_tag[''] = ''if spans_columns:new_tag[''] = row.td.attrs['']row.td.replace_with(new_tag)def beautify(html):\"\"\"\"\"\"table = html.find('')first_tr = table.find('')del table['']del first_tr['']return format_properly(html.prettify(formatter=''))def parse_html(html, caption, details):\"\"\"\"\"\"new_html = BeautifulSoup(html, '')if merge:row_num = merged_cells = get_data_on_merged_cells()rows = new_html.find('').find('').find_all('')for row in rows:cell_num = cells = row.find_all('')for cell in cells:curr_cell = (row_num, cell_num)mark_cells_going_right(cell, curr_cell, merged_cells) mark_cells_going_down(cell, curr_cell, merged_cells)mark_cells_going_down_and_right(cell, curr_cell, merged_cells)cell_num += row_num += mark_header_cells(new_html)destroy = new_html.find_all(attrs={'' : '' })for item in destroy:item.extract()if row_headers:add_row_headers(new_html)if caption:create_caption(new_html, caption)if details:create_summary_and_details(new_html, details)return beautify(new_html)pd.options.display.max_colwidth = -xls = pd.ExcelFile(path)df = xls.parse(sheetname)panda_html = df.to_html(classes=css_classes, index=False, na_rep='')return parse_html(panda_html, caption, details)", "docstring": "Convert an excel spreadsheet to an html table.\nThis function supports the conversion of merged \ncells. It can be used in code or run from the \ncommand-line. If passed the correct arguments\nit can generate fully accessible html.\n\nArgs:\n path: string, path to the spreadsheet.\n\n sheetname: string, name of the sheet\n to convert. \n\n css_classes: string, space separated\n classnames to append to the table.\n\n caption: string, a short heading-like \n description of the table.\n\n details: list of strings, where the first\n item in the list is a string for the html \n summary element and the second item is\n a string for the details element. The \n summary should be very short, e.g. \"Help\",\n where as the details element should be a \n long description regarding the purpose or \n how to navigate the table.\n\n row_headers: boolean, defaults to False.\n Does the table have row headers? If set\n to True, the first element in each row\n will be a element \n instead of a element.\n\n merge: boolean, whether or not to \n combine cells that were merged in the \n spreadsheet.\n\nReturns:\n string, html table", "id": "f1549:m14"} {"signature": "def assert_assertion_error(f, *args):", "body": "try:f(*args)except AssertionError:return Trueexcept Exception:raise AssertionError(\"\")else:raise AssertionError(\"\")", "docstring": "Test if an AssertionError is thrown. This\nfunction expects to trigger an Assertion\nError. If an AssertionError is triggered\nnothing happens. If a different kind of\nexception is triggered, an AssertionError\nis raised. If no exception is triggered\nan AssertionError is raised.\n\nArgs:\n f: function to test.\n\n *args: unamed arguments.\n\nReturns:\n True or raises an AssertionError.", "id": "f1552:m0"} {"signature": "def camelize_classname(base, tablename, table):", "body": "\"\"return str(tablename[].upper() +re.sub(r'', lambda m: m.group().upper(),tablename[:]))", "docstring": "Produce a 'camelized' class name, e.g.", "id": "f1564:m0"} {"signature": "def pluralize_collection(base, local_cls, referred_cls, constraint):", "body": "\"\"referred_name = referred_cls.__name__uncamelized = re.sub(r'',lambda m: \"\" % m.group().lower(),referred_name)[:]pluralized = _pluralizer.plural(uncamelized)return pluralized", "docstring": "Produce an 'uncamelized', 'pluralized' class name, e.g.", "id": "f1564:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super().__init__(*args, **kwargs)self.last_populate_args = []self.last_populate_kwargs = {}", "docstring": "Instantiate the manager.", "id": "f1569:c1:m0"} {"signature": "def count_model(self) -> int:", "body": "return self._count_model(Model)", "docstring": "Count the test model.", "id": "f1569:c1:m3"} {"signature": "def list_model(self) -> List[Model]:", "body": "return self._list_model(Model)", "docstring": "Get all models.", "id": "f1569:c1:m4"} {"signature": "def is_populated(self) -> bool:", "body": "return < self.count_model()", "docstring": "Check if the database is already populated.", "id": "f1569:c1:m5"} {"signature": "def populate(self, *args, **kwargs) -> None:", "body": "models = [Model.from_id(model_id)for model_id in range(NUMBER_TEST_MODELS)]self.session.add_all(models)self.session.commit()if args:self.last_populate_args = argslog.critical('', args)if kwargs:self.last_populate_kwargs = kwargslog.critical('', kwargs)", "docstring": "Add five models to the store.", "id": "f1569:c1:m6"} {"signature": "def summarize(self) -> Mapping[str, int]:", "body": "return dict(models=self.count_model(),)", "docstring": "Summarize the database.", "id": "f1569:c1:m7"} {"signature": "@classmethoddef populate(cls):", "body": "cls.manager.populate()", "docstring": "Populate the database.", "id": "f1571:c3:m0"} {"signature": "@classmethoddef populate(cls):", "body": "cls.manager.populate(return_true=True)", "docstring": "Populate the database.", "id": "f1571:c4:m0"} {"signature": "def setUp(self):", "body": "self.runner = CliRunner()self.main = Manager.get_cli()", "docstring": "Set up a CliRunner and an accompanying CLI for each test.", "id": "f1574:c0:m0"} {"signature": "def populate(self):", "body": "self.manager.populate()", "docstring": "Populate the manager.", "id": "f1575:c2:m0"} {"signature": "def setUp(self):", "body": "self.runner = CliRunner()self.main = NamespaceManager.get_cli()self.manager = Manager(connection=self.connection)self.manager.populate()", "docstring": "Set up a CliRunner and an accompanying CLI for each test.", "id": "f1575:c3:m0"} {"signature": "def make_downloader(url: str, path: str) -> Callable[[bool], str]: ", "body": "def download_data(force_download: bool = False) -> str:\"\"\"\"\"\"if os.path.exists(path) and not force_download:log.info('', path)else:log.info('', url, path)urlretrieve(url, path)return pathreturn download_data", "docstring": "Make a function that downloads the data for you, or uses a cached version at the given path.\n\n :param url: The URL of some data\n :param path: The path of the cached data, or where data is cached if it does not already exist\n :return: A function that downloads the data and returns the path of the data", "id": "f1576:m0"} {"signature": "def make_df_getter(data_url: str, data_path: str, **kwargs) -> Callable[[Optional[str], bool, bool], pd.DataFrame]:", "body": "download_function = make_downloader(data_url, data_path)def get_df(url: Optional[str] = None, cache: bool = True, force_download: bool = False) -> pd.DataFrame:\"\"\"\"\"\"if url is None and cache:url = download_function(force_download=force_download)return pd.read_csv(url or data_url,**kwargs)return get_df", "docstring": "Build a function that handles downloading tabular data and parsing it into a pandas DataFrame.\n\n :param data_url: The URL of the data\n :param data_path: The path where the data should get stored\n :param kwargs: Any other arguments to pass to :func:`pandas.read_csv`", "id": "f1576:m1"} {"signature": "def make_zipped_df_getter(data_url: str, data_path: str, zip_path: str, **kwargs):", "body": "download_function = make_downloader(data_url, data_path)def get_df(url: Optional[str] = None, cache: bool = True, force_download: bool = False) -> pd.DataFrame:\"\"\"\"\"\"if url is not None:return pd.read_csv(url, **kwargs)if url is None and cache:url = download_function(force_download=force_download)with ZipFile(url) as zip_file:with zip_file.open(zip_path) as file:return pd.read_csv(file, **kwargs)return get_df", "docstring": ":param data_url:\n:param data_path:\n:param zip_path:\n:return:", "id": "f1576:m2"} {"signature": "@classmethoddef get_cli(cls) -> click.Group:", "body": "group_help = ''.format(cls._get_connection(), get_version())@click.group(help=group_help)@click.option('', '', default=cls._get_connection(),help=''.format(cls._get_connection()))@click.pass_contextdef main(ctx, connection):\"\"\"\"\"\"logging.basicConfig(level=logging.INFO, format=\"\")logging.getLogger('').setLevel(logging.WARNING)ctx.obj = cls(connection=connection)return main", "docstring": "Build a :mod:`click` CLI main function.\n\n :param Type[AbstractManager] cls: A Manager class\n :return: The main function for click", "id": "f1577:c0:m0"} {"signature": "def build_engine_session(connection, echo=False, autoflush=None, autocommit=None, expire_on_commit=None,scopefunc=None):", "body": "if connection is None:raise ValueError('')engine = create_engine(connection, echo=echo)autoflush = autoflush if autoflush is not None else Falseautocommit = autocommit if autocommit is not None else Falseexpire_on_commit = expire_on_commit if expire_on_commit is not None else Truelog.debug('', autoflush, autocommit, expire_on_commit)session_maker = sessionmaker(bind=engine,autoflush=autoflush,autocommit=autocommit,expire_on_commit=expire_on_commit,)session = scoped_session(session_maker,scopefunc=scopefunc)return engine, session", "docstring": "Build an engine and a session.\n\n :param str connection: An RFC-1738 database connection string\n :param bool echo: Turn on echoing SQL\n :param Optional[bool] autoflush: Defaults to True if not specified in kwargs or configuration.\n :param Optional[bool] autocommit: Defaults to False if not specified in kwargs or configuration.\n :param Optional[bool] expire_on_commit: Defaults to False if not specified in kwargs or configuration.\n :param scopefunc: Scoped function to pass to :func:`sqlalchemy.orm.scoped_session`\n :rtype: tuple[Engine,Session]\n\n From the Flask-SQLAlchemy documentation:\n\n An extra key ``'scopefunc'`` can be set on the ``options`` dict to\n specify a custom scope function. If it's not provided, Flask's app\n context stack identity is used. This will ensure that sessions are\n created and removed with the request/response cycle, and should be fine\n in most cases.", "id": "f1578:m0"} {"signature": "def __init__(self, connection: Optional[str] = None, engine=None, session=None, **kwargs):", "body": "self._assert_module_name()if connection and (engine or session):raise ValueError('')if engine is None and session is None:if connection is None:connection = self._get_connection()engine, session = build_engine_session(connection=connection, **kwargs)self.engine = engineself.session = sessioncreate_all(self.engine)", "docstring": "Build an abstract manager from either a connection or an engine/session.\n\n The remaining keyword arguments are passed to :func:`build_engine_session`.\n\n :param Optional[str] connection:\n :param engine:\n :param session:", "id": "f1578:c0:m0"} {"signature": "@propertydef connection(self) -> str:", "body": "return str(self.engine.url)", "docstring": "Return this manager's connection string.", "id": "f1578:c0:m1"} {"signature": "@classmethoddef _get_connection(cls, connection: Optional[str] = None) -> str:", "body": "return get_connection(cls.module_name, connection=connection)", "docstring": "Get a default connection string.\n\n Wraps :func:`bio2bel.utils.get_connection` and passing this class's :data:`module_name` to it.", "id": "f1578:c0:m3"} {"signature": "def add_cli_to_bel_namespace(main: click.Group) -> click.Group: ", "body": "@main.command()@click.option('', '', is_flag=True)@click.pass_objdef upload(manager: BELNamespaceManagerMixin, update):\"\"\"\"\"\"namespace = manager.upload_bel_namespace(update=update)click.echo(f'')return main", "docstring": "Add a ``upload_bel_namespace`` command to main :mod:`click` function.", "id": "f1579:m0"} {"signature": "def add_cli_clear_bel_namespace(main: click.Group) -> click.Group: ", "body": "@main.command()@click.pass_objdef drop(manager: BELNamespaceManagerMixin):\"\"\"\"\"\"namespace = manager.drop_bel_namespace()if namespace:click.echo(f'')return main", "docstring": "Add a ``clear_bel_namespace`` command to main :mod:`click` function.", "id": "f1579:m1"} {"signature": "def add_cli_write_bel_namespace(main: click.Group) -> click.Group: ", "body": "@main.command()@click.option('', '', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),help='')@click.pass_objdef write(manager: BELNamespaceManagerMixin, directory: str):\"\"\"\"\"\"manager.write_directory(directory)return main", "docstring": "Add a ``write_bel_namespace`` command to main :mod:`click` function.", "id": "f1579:m2"} {"signature": "def add_cli_write_bel_annotation(main: click.Group) -> click.Group: ", "body": "@main.command()@click.option('', '', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),help='')@click.pass_objdef write(manager: BELNamespaceManagerMixin, directory: str):\"\"\"\"\"\"with open(os.path.join(directory, manager.identifiers_namespace), '') as file:manager.write_bel_annotation(file)return main", "docstring": "Add a ``write_bel_annotation`` command to main :mod:`click` function.", "id": "f1579:m3"} {"signature": "@abstractmethoddef _create_namespace_entry_from_model(self, model, namespace: Namespace) -> NamespaceEntry:", "body": "", "docstring": "Create a PyBEL NamespaceEntry model from a Bio2BEL model.\n\n :param model: The model to convert\n :param namespace: The PyBEL namespace to add to", "id": "f1579:c1:m1"} {"signature": "@classmethoddef _get_identifier(cls, model) -> str:", "body": "return getattr(model, f'')", "docstring": "Extract the identifier from an instance of namespace_model.\n\n :param model: The model to convert", "id": "f1579:c1:m2"} {"signature": "@staticmethoddef _get_encoding(model) -> str:", "body": "return model.bel_encoding", "docstring": "Extract the BEL encoding from an instance of a namespace_model.\n\n :param model: The model to convert", "id": "f1579:c1:m3"} {"signature": "@staticmethoddef _get_name(model) -> str:", "body": "return model.name", "docstring": "Extract the name from an instance of namespace_model.\n\n :param model: The model to convert", "id": "f1579:c1:m4"} {"signature": "def _iterate_namespace_models(self, **kwargs) -> Iterable:", "body": "return tqdm(self._get_query(self.namespace_model),total=self._count_model(self.namespace_model),**kwargs)", "docstring": "Return an iterator over the models to be converted to the namespace.", "id": "f1579:c1:m5"} {"signature": "@classmethoddef _get_namespace_name(cls) -> str:", "body": "return cls.identifiers_recommended or cls.module_name", "docstring": "Get the nicely formatted name of this namespace.", "id": "f1579:c1:m6"} {"signature": "@classmethoddef _get_namespace_keyword(cls) -> str:", "body": "return cls.identifiers_namespace or cls.module_name.upper()", "docstring": "Get the keyword to use as the reference BEL namespace.", "id": "f1579:c1:m7"} {"signature": "@classmethoddef _get_namespace_url(cls) -> str:", "body": "return cls.identifiers_url or f''", "docstring": "Get the URL to use as the reference BEL namespace.", "id": "f1579:c1:m8"} {"signature": "def _get_default_namespace(self) -> Optional[Namespace]:", "body": "return self._get_query(Namespace).filter(Namespace.url == self._get_namespace_url()).one_or_none()", "docstring": "Get the reference BEL namespace if it exists.", "id": "f1579:c1:m9"} {"signature": "def _make_namespace(self) -> Namespace:", "body": "namespace = Namespace(name=self._get_namespace_name(),keyword=self._get_namespace_keyword(),url=self._get_namespace_url(),version=str(time.asctime()),)self.session.add(namespace)entries = self._get_namespace_entries(namespace)self.session.add_all(entries)t = time.time()log.info('')self.session.commit()log.info('', time.time() - t)return namespace", "docstring": "Make a namespace.", "id": "f1579:c1:m11"} {"signature": "@staticmethoddef _get_old_entry_identifiers(namespace: Namespace) -> Set[NamespaceEntry]:", "body": "return {term.identifier for term in namespace.entries}", "docstring": "Convert a PyBEL generalized namespace entries to a set.\n\n Default to using the identifier, but can be overridden to use the name instead.\n\n >>> {term.identifier for term in namespace.entries}", "id": "f1579:c1:m12"} {"signature": "def _update_namespace(self, namespace: Namespace) -> None:", "body": "old_entry_identifiers = self._get_old_entry_identifiers(namespace)new_count = skip_count = for model in self._iterate_namespace_models():if self._get_identifier(model) in old_entry_identifiers:continueentry = self._create_namespace_entry_from_model(model, namespace=namespace)if entry is None or entry.name is None:skip_count += continuenew_count += self.session.add(entry)t = time.time()log.info('', new_count, skip_count)self.session.commit()log.info('', time.time() - t)", "docstring": "Update an already-created namespace.\n\n Note: Only call this if namespace won't be none!", "id": "f1579:c1:m13"} {"signature": "def add_namespace_to_graph(self, graph: BELGraph) -> Namespace:", "body": "namespace = self.upload_bel_namespace()graph.namespace_url[namespace.keyword] = namespace.urlself._add_annotation_to_graph(graph)return namespace", "docstring": "Add this manager's namespace to the graph.", "id": "f1579:c1:m14"} {"signature": "def _add_annotation_to_graph(self, graph: BELGraph) -> None:", "body": "if '' not in graph.annotation_list:graph.annotation_list[''] = set()graph.annotation_list[''].add(self.module_name)", "docstring": "Add this manager as an annotation to the graph.", "id": "f1579:c1:m15"} {"signature": "def upload_bel_namespace(self, update: bool = False) -> Namespace:", "body": "if not self.is_populated():self.populate()namespace = self._get_default_namespace()if namespace is None:log.info('', self._get_namespace_name())return self._make_namespace()if update:self._update_namespace(namespace)return namespace", "docstring": "Upload the namespace to the PyBEL database.\n\n :param update: Should the namespace be updated first?", "id": "f1579:c1:m16"} {"signature": "def drop_bel_namespace(self) -> Optional[Namespace]:", "body": "namespace = self._get_default_namespace()if namespace is not None:for entry in tqdm(namespace.entries, desc=f''):self.session.delete(entry)self.session.delete(namespace)log.info('')self.session.commit()return namespace", "docstring": "Remove the default namespace if it exists.", "id": "f1579:c1:m17"} {"signature": "def write_bel_namespace(self, file: TextIO, use_names: bool = False) -> None:", "body": "if not self.is_populated():self.populate()if use_names and not self.has_names:raise ValueErrorvalues = (self._get_namespace_name_to_encoding(desc='')if use_names elseself._get_namespace_identifier_to_encoding(desc=''))write_namespace(namespace_name=self._get_namespace_name(),namespace_keyword=self._get_namespace_keyword(),namespace_query_url=self.identifiers_url,values=values,file=file,)", "docstring": "Write as a BEL namespace file.", "id": "f1579:c1:m18"} {"signature": "def write_bel_annotation(self, file: TextIO) -> None:", "body": "if not self.is_populated():self.populate()values = self._get_namespace_name_to_encoding(desc='')write_annotation(keyword=self._get_namespace_keyword(),citation_name=self._get_namespace_name(),description='',values=values,file=file,)", "docstring": "Write as a BEL annotation file.", "id": "f1579:c1:m19"} {"signature": "def write_bel_namespace_mappings(self, file: TextIO, **kwargs) -> None:", "body": "json.dump(self._get_namespace_identifier_to_name(**kwargs), file, indent=, sort_keys=True)", "docstring": "Write a BEL namespace mapping file.", "id": "f1579:c1:m20"} {"signature": "def write_directory(self, directory: str) -> bool:", "body": "current_md5_hash = self.get_namespace_hash()md5_hash_path = os.path.join(directory, f'')if not os.path.exists(md5_hash_path):old_md5_hash = Noneelse:with open(md5_hash_path) as file:old_md5_hash = file.read().strip()if old_md5_hash == current_md5_hash:return Falsewith open(os.path.join(directory, f''), '') as file:self.write_bel_namespace(file, use_names=False)with open(md5_hash_path, '') as file:print(current_md5_hash, file=file)if self.has_names:with open(os.path.join(directory, f''), '') as file:self.write_bel_namespace(file, use_names=True)with open(os.path.join(directory, f''), '') as file:self.write_bel_namespace_mappings(file, desc='')return True", "docstring": "Write a BEL namespace for identifiers, names, name hash, and mappings to the given directory.", "id": "f1579:c1:m21"} {"signature": "def get_namespace_hash(self, hash_fn=hashlib.md5) -> str:", "body": "m = hash_fn()if self.has_names:items = self._get_namespace_name_to_encoding(desc='').items()else:items = self._get_namespace_identifier_to_encoding(desc='').items()for name, encoding in items:m.update(f''.encode(''))return m.hexdigest()", "docstring": "Get the namespace hash.\n\n Defaults to MD5.", "id": "f1579:c1:m25"} {"signature": "@staticmethoddef _cli_add_to_bel_namespace(main: click.Group) -> click.Group:", "body": "return add_cli_to_bel_namespace(main)", "docstring": "Add the export BEL namespace command.", "id": "f1579:c1:m26"} {"signature": "@staticmethoddef _cli_add_clear_bel_namespace(main: click.Group) -> click.Group:", "body": "return add_cli_clear_bel_namespace(main)", "docstring": "Add the clear BEL namespace command.", "id": "f1579:c1:m27"} {"signature": "@staticmethoddef _cli_add_write_bel_namespace(main: click.Group) -> click.Group:", "body": "return add_cli_write_bel_namespace(main)", "docstring": "Add the write BEL namespace command.", "id": "f1579:c1:m28"} {"signature": "@staticmethoddef _cli_add_write_bel_annotation(main: click.Group) -> click.Group:", "body": "return add_cli_write_bel_annotation(main)", "docstring": "Add the write BEL namespace command.", "id": "f1579:c1:m29"} {"signature": "@classmethoddef get_cli(cls) -> click.Group:", "body": "main = super().get_cli()if cls.is_namespace:@main.group()def belns():\"\"\"\"\"\"cls._cli_add_to_bel_namespace(belns)cls._cli_add_clear_bel_namespace(belns)cls._cli_add_write_bel_namespace(belns)if cls.is_annotation:@main.group()def belanno():\"\"\"\"\"\"cls._cli_add_write_bel_annotation(belanno)return main", "docstring": "Get a :mod:`click` main function with added BEL namespace commands.", "id": "f1579:c1:m30"} {"signature": "def add_cli_to_bel(main: click.Group) -> click.Group: ", "body": "@main.command()@click.option('', '', type=click.File(''), default=sys.stdout)@click.option('', '', default='', show_default=True, help='')@click.pass_objdef write(manager: BELManagerMixin, output: TextIO, fmt: str):\"\"\"\"\"\"graph = manager.to_bel()graph.serialize(file=output, fmt=fmt)click.echo(graph.summary_str())return main", "docstring": "Add several command to main :mod:`click` function related to export to BEL.", "id": "f1580:m0"} {"signature": "def add_cli_upload_bel(main: click.Group) -> click.Group: ", "body": "@main.command()@host_option@click.pass_objdef upload(manager: BELManagerMixin, host: str):\"\"\"\"\"\"graph = manager.to_bel()pybel.to_web(graph, host=host, public=True)return main", "docstring": "Add several command to main :mod:`click` function related to export to BEL.", "id": "f1580:m1"} {"signature": "def count_relations(self) -> int:", "body": "if self.edge_model is ...:raise Bio2BELMissingEdgeModelError('')elif isinstance(self.edge_model, list):return sum(self._count_model(m) for m in self.edge_model)else:return self._count_model(self.edge_model)", "docstring": "Count the number of BEL relations generated.", "id": "f1580:c1:m0"} {"signature": "@abstractmethoddef to_bel(self, *args, **kwargs) -> pybel.BELGraph:", "body": "", "docstring": "Convert the database to BEL.\n\n Example implementation outline:\n\n .. code-block:: python\n\n from bio2bel import AbstractManager\n from bio2bel.manager.bel_manager import BELManagerMixin\n from pybel import BELGraph\n from .models import Interaction\n\n class MyManager(AbstractManager, BELManagerMixin):\n module_name = 'mirtarbase'\n def to_bel(self):\n rv = BELGraph(\n name='miRTarBase',\n version='1.0.0',\n )\n\n for interaction in self.session.query(Interaction):\n mirna = mirna_dsl('mirtarbase', interaction.mirna.mirtarbase_id)\n rna = rna_dsl('hgnc', interaction.target.hgnc_id)\n\n rv.add_qualified_edge(\n mirna,\n rna,\n DECREASES,\n ...\n )\n\n return rv", "id": "f1580:c1:m1"} {"signature": "def to_indra_statements(self, *args, **kwargs):", "body": "graph = self.to_bel(*args, **kwargs)return to_indra_statements(graph)", "docstring": "Dump as a list of INDRA statements.\n\n :rtype: List[indra.Statement]", "id": "f1580:c1:m2"} {"signature": "@staticmethoddef _cli_add_to_bel(main: click.Group) -> click.Group:", "body": "return add_cli_to_bel(main)", "docstring": "Add the export BEL command.", "id": "f1580:c1:m3"} {"signature": "@staticmethoddef _cli_add_upload_bel(main: click.Group) -> click.Group:", "body": "return add_cli_upload_bel(main)", "docstring": "Add the upload BEL command.", "id": "f1580:c1:m4"} {"signature": "@classmethoddef get_cli(cls) -> click.Group:", "body": "main = super().get_cli()@main.group()def bel():\"\"\"\"\"\"cls._cli_add_to_bel(bel)cls._cli_add_upload_bel(bel)return main", "docstring": "Get a :mod:`click` main function with added BEL commands.", "id": "f1580:c1:m5"} {"signature": "def add_cli_flask(main: click.Group) -> click.Group: ", "body": "@main.command()@click.option('', '', is_flag=True)@click.option('', '')@click.option('', '')@click.option('', '', default=os.urandom())@click.pass_objdef web(manager, debug, port, host, secret_key):\"\"\"\"\"\"if not manager.is_populated():click.echo(''.format(manager.module_name))sys.exit()app = manager.get_flask_admin_app(url='', secret_key=secret_key)app.run(debug=debug, host=host, port=port)return main", "docstring": "Add a ``web`` comand main :mod:`click` function.", "id": "f1582:m0"} {"signature": "def _add_admin(self, app, **kwargs):", "body": "from flask_admin import Adminfrom flask_admin.contrib.sqla import ModelViewadmin = Admin(app, **kwargs)for flask_admin_model in self.flask_admin_models:if isinstance(flask_admin_model, tuple): if len(flask_admin_model) != :raise TypeErrormodel, view = flask_admin_modeladmin.add_view(view(model, self.session))else:admin.add_view(ModelView(flask_admin_model, self.session))return admin", "docstring": "Add a Flask Admin interface to an application.\n\n :param flask.Flask app: A Flask application\n :param kwargs: Keyword arguments are passed through to :class:`flask_admin.Admin`\n :rtype: flask_admin.Admin", "id": "f1582:c0:m1"} {"signature": "def get_flask_admin_app(self, url: Optional[str] = None, secret_key: Optional[str] = None):", "body": "from flask import Flaskapp = Flask(__name__)if secret_key:app.secret_key = secret_keyself._add_admin(app, url=(url or ''))return app", "docstring": "Create a Flask application.\n\n :param url: Optional mount point of the admin application. Defaults to ``'/'``.\n :rtype: flask.Flask", "id": "f1582:c0:m2"} {"signature": "@staticmethoddef _cli_add_flask(main: click.Group) -> click.Group:", "body": "return add_cli_flask(main)", "docstring": "Add the web command.", "id": "f1582:c0:m3"} {"signature": "@classmethoddef get_cli(cls) -> click.Group:", "body": "main = super().get_cli()cls._cli_add_flask(main)return main", "docstring": "Add a :mod:`click` main function to use as a command line interface.", "id": "f1582:c0:m4"} {"signature": "def add_cli_populate(main: click.Group) -> click.Group: ", "body": "@main.command()@click.option('', is_flag=True, help='')@click.option('', is_flag=True, help='')@click.pass_objdef populate(manager: AbstractManager, reset, force):\"\"\"\"\"\"if reset:click.echo('')manager.drop_all()click.echo('')manager.create_all()if manager.is_populated() and not force:click.echo('')sys.exit()manager.populate()return main", "docstring": "Add a ``populate`` command to main :mod:`click` function.", "id": "f1583:m0"} {"signature": "def add_cli_drop(main: click.Group) -> click.Group: ", "body": "@main.command()@click.confirmation_option(prompt='')@click.pass_objdef drop(manager):\"\"\"\"\"\"manager.drop_all()return main", "docstring": "Add a ``drop`` command to main :mod:`click` function.", "id": "f1583:m1"} {"signature": "def add_cli_cache(main: click.Group) -> click.Group: ", "body": "@main.group()def cache():\"\"\"\"\"\"@cache.command()@click.pass_objdef locate(manager):\"\"\"\"\"\"data_dir = get_data_dir(manager.module_name)click.echo(data_dir)@cache.command()@click.pass_objdef ls(manager):\"\"\"\"\"\"data_dir = get_data_dir(manager.module_name)for path in os.listdir(data_dir):click.echo(path)@cache.command()@click.pass_objdef clear(manager):\"\"\"\"\"\"clear_cache(manager.module_name)return main", "docstring": "Add several commands to main :mod:`click` function for handling the cache.", "id": "f1583:m2"} {"signature": "def add_cli_summarize(main: click.Group) -> click.Group: ", "body": "@main.command()@click.pass_objdef summarize(manager: AbstractManager):\"\"\"\"\"\"if not manager.is_populated():click.secho(f'', fg='')sys.exit()for name, count in sorted(manager.summarize().items()):click.echo(f'')return main", "docstring": "Add a ``summarize`` command to main :mod:`click` function.", "id": "f1583:m3"} {"signature": "@property@abstractmethoddef _base(self) -> DeclarativeMeta:", "body": "", "docstring": "Return the declarative base.\n\n It is usually sufficient to return an instance that is module-level.\n\n How to build an instance of :class:`sqlalchemy.ext.declarative.api.DeclarativeMeta` by using\n :func:`sqlalchemy.ext.declarative.declarative_base`:\n\n >>> from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base\n >>> Base: DeclarativeMeta = declarative_base()\n\n Then just override this abstract property like:\n\n >>> @property\n >>> def _base(self) -> DeclarativeMeta:\n >>> return Base\n\n Note that this property could effectively also be a static method.", "id": "f1583:c1:m0"} {"signature": "@abstractmethoddef is_populated(self) -> bool:", "body": "", "docstring": "Check if the database is already populated.", "id": "f1583:c1:m2"} {"signature": "@abstractmethoddef populate(self, *args, **kwargs) -> None:", "body": "", "docstring": "Populate the database.", "id": "f1583:c1:m3"} {"signature": "@abstractmethoddef summarize(self) -> Mapping[str, int]:", "body": "", "docstring": "Summarize the database.", "id": "f1583:c1:m4"} {"signature": "@propertydef _metadata(self):", "body": "return self._base.metadata", "docstring": "Return the metadata object associated with this manager's declarative base.", "id": "f1583:c1:m5"} {"signature": "def create_all(self, check_first: bool = True):", "body": "self._metadata.create_all(self.engine, checkfirst=check_first)", "docstring": "Create the empty database (tables).\n\n :param bool check_first: Defaults to True, don't issue CREATEs for tables already present\n in the target database. Defers to :meth:`sqlalchemy.sql.schema.MetaData.create_all`", "id": "f1583:c1:m6"} {"signature": "def drop_all(self, check_first: bool = True):", "body": "self._metadata.drop_all(self.engine, checkfirst=check_first)self._store_drop()", "docstring": "Drop all tables from the database.\n\n :param bool check_first: Defaults to True, only issue DROPs for tables confirmed to be\n present in the target database. Defers to :meth:`sqlalchemy.sql.schema.MetaData.drop_all`", "id": "f1583:c1:m7"} {"signature": "def _get_query(self, model):", "body": "return self.session.query(model)", "docstring": "Get a query for the given model using this manager's session.\n\n :param model: A SQLAlchemy model class\n :return: a SQLAlchemy query", "id": "f1583:c1:m8"} {"signature": "def _count_model(self, model) -> int:", "body": "return self._get_query(model).count()", "docstring": "Count the number of the given model in the database.\n\n :param model: A SQLAlchemy model class", "id": "f1583:c1:m9"} {"signature": "def _list_model(self, model) -> List:", "body": "return self._get_query(model).all()", "docstring": "Get all instances of the given model in the database.\n\n :param model: A SQLAlchemy model class", "id": "f1583:c1:m10"} {"signature": "@staticmethoddef _cli_add_populate(main: click.Group) -> click.Group:", "body": "return add_cli_populate(main)", "docstring": "Add the populate command.", "id": "f1583:c1:m11"} {"signature": "@staticmethoddef _cli_add_drop(main: click.Group) -> click.Group:", "body": "return add_cli_drop(main)", "docstring": "Add the drop command.", "id": "f1583:c1:m12"} {"signature": "@staticmethoddef _cli_add_cache(main: click.Group) -> click.Group:", "body": "return add_cli_cache(main)", "docstring": "Add the cache command.", "id": "f1583:c1:m13"} {"signature": "@staticmethoddef _cli_add_summarize(main: click.Group) -> click.Group:", "body": "return add_cli_summarize(main)", "docstring": "Add the summarize command.", "id": "f1583:c1:m14"} {"signature": "@classmethoddef get_cli(cls) -> click.Group:", "body": "main = super().get_cli()cls._cli_add_populate(main)cls._cli_add_drop(main)cls._cli_add_cache(main)cls._cli_add_summarize(main)return main", "docstring": "Get the :mod:`click` main function to use as a command line interface.", "id": "f1583:c1:m15"} {"signature": "def _iterate_managers(connection, skip):", "body": "for idx, name, manager_cls in _iterate_manage_classes(skip):if name in skip:continuetry:manager = manager_cls(connection=connection)except TypeError as e:click.secho(f'', fg='')else:yield idx, name, manager", "docstring": "Iterate over instantiated managers.", "id": "f1584:m0"} {"signature": "@main.command()@connection_option@click.option('', is_flag=True, help='')@click.option('', is_flag=True, help='')@click.option('', '', multiple=True, help='')def populate(connection, reset, force, skip):", "body": "for idx, name, manager in _iterate_managers(connection, skip):click.echo(click.style(f'', fg='', bold=True) +click.style(f'', fg='', bold=True))if reset:click.echo('')manager.drop_all()click.echo('')manager.create_all()elif manager.is_populated() and not force:click.echo(f'', color='')continuetry:manager.populate()except Exception:logger.exception('', name)click.secho(f'', fg='', bold=True)", "docstring": "Populate all.", "id": "f1584:m2"} {"signature": "@main.command(help='')@click.confirmation_option('')@connection_option@click.option('', '', multiple=True, help='')def drop(connection, skip):", "body": "for idx, name, manager in _iterate_managers(connection, skip):click.secho(f'', fg='', bold=True)manager.drop_all()", "docstring": "Drop all.", "id": "f1584:m3"} {"signature": "@main.group()def cache():", "body": "", "docstring": "Manage caches.", "id": "f1584:m4"} {"signature": "@cache.command()@click.option('', '', multiple=True, help='')def clear(skip):", "body": "for name in sorted(MODULES):if name in skip:continueclick.secho(f'', fg='', bold=True)clear_cache(name)", "docstring": "Clear all caches.", "id": "f1584:m5"} {"signature": "@main.command()@connection_option@click.option('', '', multiple=True, help='')def summarize(connection, skip):", "body": "for idx, name, manager in _iterate_managers(connection, skip):click.secho(name, fg='', bold=True)if not manager.is_populated():click.echo('')continueif isinstance(manager, BELNamespaceManagerMixin):click.secho(f'', fg='')if isinstance(manager, BELManagerMixin):try:click.secho(f'', fg='')except TypeError as e:click.secho(str(e), fg='')for field_name, count in sorted(manager.summarize().items()):click.echo(click.style('', fg='', bold=True) + f\"\")", "docstring": "Summarize all.", "id": "f1584:m6"} {"signature": "@main.command()@connection_option@click.option('', '', multiple=True, help='')@click.option('', '', type=click.File(''), default=sys.stdout)def sheet(connection, skip, file: TextIO):", "body": "from tabulate import tabulateheader = ['', '', '', '', '']rows = []for i, (idx, name, manager) in enumerate(_iterate_managers(connection, skip), start=):try:if not manager.is_populated():continueexcept AttributeError:click.secho(f'', fg='')continueterms, relations = None, Noneif isinstance(manager, BELNamespaceManagerMixin):terms = manager._count_model(manager.namespace_model)if isinstance(manager, BELManagerMixin):try:relations = manager.count_relations()except TypeError as e:relations = str(e)rows.append((i, name, manager.__doc__.split('')[].strip().strip(''), terms, relations))print(tabulate(rows,headers=header,))", "docstring": "Generate a summary sheet.", "id": "f1584:m7"} {"signature": "@main.group()def belns():", "body": "", "docstring": "Manage BEL namespaces.", "id": "f1584:m8"} {"signature": "@belns.command()@connection_option@click.option('', '', multiple=True, help='')@click.option('', '', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),help='')@click.option('', '', is_flag=True, help='')def write(connection, skip, directory, force):", "body": "os.makedirs(directory, exist_ok=True)from .manager.namespace_manager import BELNamespaceManagerMixinfor idx, name, manager in _iterate_managers(connection, skip):if not (isinstance(manager, AbstractManager) and isinstance(manager, BELNamespaceManagerMixin)):continueclick.secho(name, fg='', bold=True)if force:try:click.echo(f'')manager.drop_all()click.echo('')clear_cache(name)click.echo('')manager.populate()except Exception:click.secho(f'', fg='')continuetry:r = manager.write_directory(directory)except TypeError as e:click.secho(f''.rstrip(), fg='')else:if not r:click.echo('')", "docstring": "Write a BEL namespace names/identifiers to terminology store.", "id": "f1584:m9"} {"signature": "@main.group()def bel():", "body": "", "docstring": "Manage BEL.", "id": "f1584:m10"} {"signature": "@bel.command() @connection_option@click.option('', '', multiple=True, help='')@click.option('', '', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),help='')@click.option('', is_flag=True, help='')def write(connection, skip, directory, force):", "body": "os.makedirs(directory, exist_ok=True)from .manager.bel_manager import BELManagerMixinimport pybelfor idx, name, manager in _iterate_managers(connection, skip):if not isinstance(manager, BELManagerMixin):continueclick.secho(name, fg='', bold=True)path = os.path.join(directory, f'')if os.path.exists(path) and not force:click.echo('')continueif not manager.is_populated():click.echo('')else:graph = manager.to_bel()pybel.to_pickle(graph, path)pybel.to_json_path(graph, os.path.join(directory, f''))", "docstring": "Write all as BEL.", "id": "f1584:m11"} {"signature": "@main.command()@connection_option@click.option('', default='')@click.option('', type=int, default=)def web(connection, host, port):", "body": "from bio2bel.web.application import create_applicationapp = create_application(connection=connection)app.run(host=host, port=port)", "docstring": "Run a combine web interface.", "id": "f1584:m12"} {"signature": "@main.command()@connection_optiondef actions(connection):", "body": "session = _make_session(connection=connection)for action in Action.ls(session=session):click.echo(f'')", "docstring": "List all actions.", "id": "f1584:m13"} {"signature": "def get_global_connection() -> str:", "body": "return config.connection", "docstring": "Return the global connection string.", "id": "f1585:m0"} {"signature": "def __init__(self, group):", "body": "self.group = group", "docstring": "Initialize the importer with the group name.\n\n :param str group: a string representing the package resources entry_points group that will be used", "id": "f1586:c0:m0"} {"signature": "def install(self):", "body": "sys.meta_path.append(self)", "docstring": "Call this method to install the new importer to :code:`sys.meta_path`. Should probably only be done once.", "id": "f1586:c0:m2"} {"signature": "def find_module(self, fullname, path=None):", "body": "if not fullname.startswith(self._group_with_dot):returnend_name = fullname[len(self._group_with_dot):]for entry_point in iter_entry_points(group=self.group, name=None):if entry_point.name == end_name:return self", "docstring": "Find a module if its name starts with :code:`self.group` and is registered.", "id": "f1586:c0:m3"} {"signature": "def load_module(self, fullname):", "body": "if fullname in sys.modules:return sys.modules[fullname]end_name = fullname[len(self._group_with_dot):]for entry_point in iter_entry_points(group=self.group, name=end_name):mod = entry_point.load()sys.modules[fullname] = modreturn mod", "docstring": "Load a module if its name starts with :code:`self.group` and is registered.", "id": "f1586:c0:m4"} {"signature": "def make_obo_getter(data_url: str,data_path: str,*,preparsed_path: Optional[str] = None,) -> Callable[[Optional[str], bool, bool], MultiDiGraph]:", "body": "download_function = make_downloader(data_url, data_path)def get_obo(url: Optional[str] = None, cache: bool = True, force_download: bool = False) -> MultiDiGraph:\"\"\"\"\"\"if preparsed_path is not None and os.path.exists(preparsed_path):return read_gpickle(preparsed_path)if url is None and cache:url = download_function(force_download=force_download)result = obonet.read_obo(url)if preparsed_path is not None:write_gpickle(result, preparsed_path)return resultreturn get_obo", "docstring": "Build a function that handles downloading OBO data and parsing it into a NetworkX object.\n\n :param data_url: The URL of the data\n :param data_path: The path where the data should get stored\n :param preparsed_path: The optional path to cache a pre-parsed json version", "id": "f1589:m0"} {"signature": "@click.group()def main():", "body": "", "docstring": "OBO Utilities.", "id": "f1589:m1"} {"signature": "@main.command()@click.argument('')@click.option('', '', type=click.File(''))@click.option('', '')@click.option('', '', is_flag=True)def belns(keyword: str, file: TextIO, encoding: Optional[str], use_names: bool):", "body": "directory = get_data_dir(keyword)obo_url = f''obo_path = os.path.join(directory, f'')obo_cache_path = os.path.join(directory, f'')obo_getter = make_obo_getter(obo_url, obo_path, preparsed_path=obo_cache_path)graph = obo_getter()convert_obo_graph_to_belns(graph,file=file,encoding=encoding,use_names=use_names,)", "docstring": "Write as a BEL namespace.", "id": "f1589:m2"} {"signature": "@main.command()@click.argument('')@click.option('', '', type=click.File(''))def belanno(keyword: str, file: TextIO):", "body": "directory = get_data_dir(keyword)obo_url = f''obo_path = os.path.join(directory, f'')obo_cache_path = os.path.join(directory, f'')obo_getter = make_obo_getter(obo_url, obo_path, preparsed_path=obo_cache_path)graph = obo_getter()convert_obo_graph_to_belanno(graph,file=file,)", "docstring": "Write as a BEL annotation.", "id": "f1589:m3"} {"signature": "def make_temporary_cache_class_mixin(manager_cls: Type[AbstractManager]) -> Type[AbstractTemporaryCacheClassMixin]: ", "body": "class TemporaryCacheClassMixin(AbstractTemporaryCacheClassMixin):Manager = manager_clsreturn TemporaryCacheClassMixin", "docstring": "Build a testing class that has a Bio2BEL manager instance ready to go.", "id": "f1590:m0"} {"signature": "def setUp(self):", "body": "super().setUp()self.fd, self.path = tempfile.mkstemp()self.connection = '' + self.pathlog.info('', self.connection)", "docstring": "Create a temporary file to use as a persistent database throughout tests in this class.", "id": "f1590:c0:m0"} {"signature": "def tearDown(self):", "body": "os.close(self.fd)os.remove(self.path)", "docstring": "Close the connection to the database and removes the files created for it.", "id": "f1590:c0:m1"} {"signature": "@classmethoddef setUpClass(cls):", "body": "super().setUpClass()cls.fd, cls.path = tempfile.mkstemp()cls.connection = '' + cls.pathlog.info('', cls.connection)", "docstring": "Create a temporary file to use as a persistent database throughout tests in this class.\n\n Subclasses of :class:`TemporaryCacheClsMixin` can extend :func:`TemporaryCacheClsMixin.setUpClass` to populate\n the database.", "id": "f1590:c1:m0"} {"signature": "@classmethoddef tearDownClass(cls):", "body": "os.close(cls.fd)os.remove(cls.path)", "docstring": "Close the connection to the database and removes the files created for it.", "id": "f1590:c1:m1"} {"signature": "def setUp(self):", "body": "super().setUp()def mock_connection() -> str:\"\"\"\"\"\"return self.connectionself.mock_global_connection = mock.patch('', mock_connection)self.mock_module_connection = mock.patch('', mock_connection)", "docstring": "Set up the test with a mock connection string.\n\n Add two class-level variables: ``mock_global_connection`` and ``mock_module_connection`` that can be\n used as context managers to mock the bio2bel connection getter functions.", "id": "f1590:c2:m0"} {"signature": "def setUp(self):", "body": "if self.Manager is ...:raise Bio2BELTestMissingManagerError('''')if not issubclass(self.Manager, AbstractManager):raise Bio2BELManagerTypeError('')super().setUp()self.manager = self.Manager(connection=self.connection)self.populate()", "docstring": "Set up the class with the given manager and allows an optional populate hook to be overridden.", "id": "f1590:c3:m0"} {"signature": "def tearDown(self):", "body": "self.manager.session.close()super().tearDown()", "docstring": "Close the connection in the manager and deletes the temporary database.", "id": "f1590:c3:m1"} {"signature": "def populate(self) -> None:", "body": "", "docstring": "Populate the database.\n\n This stub should be overridden.", "id": "f1590:c3:m2"} {"signature": "@classmethoddef setUpClass(cls):", "body": "if cls.Manager is ...:raise Bio2BELTestMissingManagerError('''')if not issubclass(cls.Manager, AbstractManager):raise Bio2BELManagerTypeError('')super().setUpClass()cls.manager = cls.Manager(connection=cls.connection)cls.populate()", "docstring": "Set up the class with the given manager and allows an optional populate hook to be overridden.", "id": "f1590:c4:m0"} {"signature": "@classmethoddef tearDownClass(cls):", "body": "cls.manager.session.close()super().tearDownClass()", "docstring": "Close the connection in the manager and deletes the temporary database.", "id": "f1590:c4:m1"} {"signature": "@classmethoddef populate(cls):", "body": "", "docstring": "Populate the database.\n\n This stub should be overridden.", "id": "f1590:c4:m2"} {"signature": "def get_data_dir(module_name: str) -> str:", "body": "module_name = module_name.lower()data_dir = os.path.join(BIO2BEL_DIR, module_name)os.makedirs(data_dir, exist_ok=True)return data_dir", "docstring": "Ensure the appropriate Bio2BEL data directory exists for the given module, then returns the file path.\n\n :param module_name: The name of the module. Ex: 'chembl'\n :return: The module's data directory", "id": "f1592:m0"} {"signature": "def get_module_config_cls(module_name: str) -> Type[_AbstractModuleConfig]: ", "body": "class ModuleConfig(_AbstractModuleConfig):NAME = f''FILES = DEFAULT_CONFIG_PATHS + [os.path.join(DEFAULT_CONFIG_DIRECTORY, module_name, '')]return ModuleConfig", "docstring": "Build a module configuration class.", "id": "f1592:m1"} {"signature": "def get_connection(module_name: str, connection: Optional[str] = None) -> str:", "body": "if connection is not None:return connectionmodule_name = module_name.lower()module_config_cls = get_module_config_cls(module_name)module_config = module_config_cls.load()return module_config.connection or config.connection", "docstring": "Return the SQLAlchemy connection string if it is set.\n\n Order of operations:\n\n 1. Return the connection if given as a parameter\n 2. Check the environment for BIO2BEL_{module_name}_CONNECTION\n 3. Look in the bio2bel config file for module-specific connection. Create if doesn't exist. Check the\n module-specific section for ``connection``\n 4. Look in the bio2bel module folder for a config file. Don't create if doesn't exist. Check the default section\n for ``connection``\n 5. Check the environment for BIO2BEL_CONNECTION\n 6. Check the bio2bel config file for default\n 7. Fall back to standard default cache connection\n\n :param module_name: The name of the module to get the configuration for\n :param connection: get the SQLAlchemy connection string\n :return: The SQLAlchemy connection string based on the configuration", "id": "f1592:m2"} {"signature": "def get_version() -> str:", "body": "return VERSION", "docstring": "Get the software version of Bio2BEL.", "id": "f1592:m3"} {"signature": "def get_modules() -> Mapping:", "body": "modules = {}for entry_point in iter_entry_points(group='', name=None):entry = entry_point.nametry:modules[entry] = entry_point.load()except VersionConflict as exc:log.warning('', entry, exc)continueexcept UnknownExtra as exc:log.warning('', entry, exc)continueexcept ImportError as exc:log.exception('', entry, exc)continuereturn modules", "docstring": "Get all Bio2BEL modules.", "id": "f1592:m4"} {"signature": "def clear_cache(module_name: str, keep_database: bool = True) -> None:", "body": "data_dir = get_data_dir(module_name)if not os.path.exists(data_dir):returnfor name in os.listdir(data_dir):if name in {'', ''}:continueif name == '' and keep_database:continuepath = os.path.join(data_dir, name)if os.path.isdir(path):shutil.rmtree(path)else:os.remove(path)os.rmdir(data_dir)", "docstring": "Clear all downloaded files.", "id": "f1592:m5"} {"signature": "def _store_helper(model: Action, session: Optional[Session] = None) -> None:", "body": "if session is None:session = _make_session()session.add(model)session.commit()session.close()", "docstring": "Help store an action.", "id": "f1593:m0"} {"signature": "def _make_session(connection: Optional[str] = None) -> Session:", "body": "if connection is None:connection = get_global_connection()engine = create_engine(connection)create_all(engine)session_cls = sessionmaker(bind=engine)session = session_cls()return session", "docstring": "Make a session.", "id": "f1593:m1"} {"signature": "def create_all(engine, checkfirst=True):", "body": "Base.metadata.create_all(bind=engine, checkfirst=checkfirst)", "docstring": "Create the tables for Bio2BEL.", "id": "f1593:m2"} {"signature": "@staticmethoddef make_populate(resource: str) -> '':", "body": "return Action(resource=resource.lower(), action='')", "docstring": "Make a ``populate`` instance of :class:`Action`.", "id": "f1593:c0:m1"} {"signature": "@staticmethoddef make_populate_failed(resource: str) -> '':", "body": "return Action(resource=resource.lower(), action='')", "docstring": "Make a ``populate_failed`` instance of :class:`Action`.", "id": "f1593:c0:m2"} {"signature": "@staticmethoddef make_drop(resource: str) -> '':", "body": "return Action(resource=resource.lower(), action='')", "docstring": "Make a ``drop`` instance of :class:`Action`.", "id": "f1593:c0:m3"} {"signature": "@classmethoddef store_populate(cls, resource: str, session: Optional[Session] = None) -> '':", "body": "action = cls.make_populate(resource)_store_helper(action, session=session)return action", "docstring": "Store a \"populate\" event.\n\n :param resource: The normalized name of the resource to store\n\n Example:\n\n >>> from bio2bel.models import Action\n >>> Action.store_populate('hgnc')", "id": "f1593:c0:m4"} {"signature": "@classmethoddef store_populate_failed(cls, resource: str, session: Optional[Session] = None) -> '':", "body": "action = cls.make_populate_failed(resource)_store_helper(action, session=session)return action", "docstring": "Store a \"populate failed\" event.\n\n :param resource: The normalized name of the resource to store\n\n Example:\n\n >>> from bio2bel.models import Action\n >>> Action.store_populate_failed('hgnc')", "id": "f1593:c0:m5"} {"signature": "@classmethoddef store_drop(cls, resource: str, session: Optional[Session] = None) -> '':", "body": "action = cls.make_drop(resource)_store_helper(action, session=session)return action", "docstring": "Store a \"drop\" event.\n\n :param resource: The normalized name of the resource to store\n\n Example:\n\n >>> from bio2bel.models import Action\n >>> Action.store_drop('hgnc')", "id": "f1593:c0:m6"} {"signature": "@classmethoddef ls(cls, session: Optional[Session] = None) -> List['']:", "body": "if session is None:session = _make_session()actions = session.query(cls).order_by(cls.created.desc()).all()session.close()return actions", "docstring": "Get all actions.", "id": "f1593:c0:m7"} {"signature": "@classmethoddef count(cls, session: Optional[Session] = None) -> int:", "body": "if session is None:session = _make_session()count = session.query(cls).count()session.close()return count", "docstring": "Count all actions.", "id": "f1593:c0:m8"} {"signature": "@ui.route('')def home():", "body": "return render_template('', entries=sorted(add_admins))", "docstring": "Show the home page.", "id": "f1594:m0"} {"signature": "def create_application(connection: Optional[str] = None) -> Flask:", "body": "app = Flask(__name__)flask_bootstrap.Bootstrap(app)Admin(app)connection = connection or DEFAULT_CACHE_CONNECTIONengine, session = build_engine_session(connection)for name, add_admin in add_admins.items():url = ''.format(name)add_admin(app, session, url=url, endpoint=name, name=name)log.debug('', name, add_admin, url)app.register_blueprint(ui)return app", "docstring": "Create a Flask application.", "id": "f1594:m1"} {"signature": "def read(*parts):", "body": "with codecs.open(os.path.join(HERE, *parts), '', '') as f:return f.read()", "docstring": "Build an absolute path from *parts* and return the contents of the resulting file. Assume UTF-8 encoding.", "id": "f1597:m0"} {"signature": "def find_meta(meta):", "body": "meta_match = re.search(r''.format(meta=meta),META_FILE, re.M)if meta_match:return meta_match.group()raise RuntimeError(''.format(meta=meta))", "docstring": "Extract __*meta*__ from META_FILE.", "id": "f1597:m1"} {"signature": "def get_long_description():", "body": "with codecs.open(os.path.join(HERE, ''), encoding='') as f:long_description = f.read()return long_description", "docstring": "Get the long_description from the README.rst file. Assume UTF-8 encoding.", "id": "f1597:m2"} {"signature": "def mapfivo(ol,*args,**kwargs):", "body": "args = list(args)lngth = args.__len__()if(lngth==):diff_funcs_arr = kwargs['']diff_args_arr = kwargs['']elif(lngth==):if('' in kwargs):diff_funcs_arr = args[]diff_args_arr = kwargs['']else:diff_funcs_arr = kwargs['']diff_args_arr = args[]else:diff_funcs_arr = args[]diff_args_arr = args[]lngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = diff_funcs_arr[i]args = diff_args_arr[i]ele = func(index,value,*args)rslt.append(ele)return(rslt)", "docstring": "#mapfivo f,i,v,o\u56db\u5143\u51b3\u5b9a fivo-4-tuple-engine\n#map_func diff_func(index,value,*diff_args)", "id": "f1599:m0"} {"signature": "def mapfiv(ol,map_func_args,**kwargs):", "body": "lngth = ol.__len__()diff_funcs_arr = kwargs['']common_args_arr = init(lngth,map_func_args)rslt = mapfivo(ol,map_funcs=diff_funcs_arr,map_func_args_array=common_args_arr)return(rslt)", "docstring": "#mapfiv \u5171\u4eab\u76f8\u540c\u7684o share common other_args\n#map_func diff_func(index,value,*common_args)", "id": "f1599:m1"} {"signature": "def mapfio(ol,**kwargs):", "body": "diff_funcs_arr = kwargs['']diff_args_arr = kwargs['']lngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = diff_funcs_arr[i]args = diff_args_arr[i]ele = func(index,*args)rslt.append(ele)return(rslt)", "docstring": "#mapfio v\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570 NOT take value as a param for map_func\n#map_func diff_func(index,*diff_args)", "id": "f1599:m2"} {"signature": "def mapfvo(ol,**kwargs):", "body": "diff_funcs_arr = kwargs['']diff_args_arr = kwargs['']lngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = diff_funcs_arr[i]args = diff_args_arr[i]ele = func(value,*args)rslt.append(ele)return(rslt)", "docstring": "#mapfvo i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570 NOT take index as a param for map_func\n#map_func diff_func(value,*diff_args)", "id": "f1599:m3"} {"signature": "def mapivo(ol,map_func,**kwargs):", "body": "lngth = ol.__len__()common_funcs_arr = init(lngth,map_func)diff_args_arr = kwargs['']rslt = mapfivo(ol,map_funcs=common_funcs_arr,map_func_args_array=diff_args_arr)return(rslt)", "docstring": "#mapivo \u5171\u4eab\u76f8\u540c\u7684f share common map_func\n#map_func common_func(index,value,*diff_args)", "id": "f1599:m4"} {"signature": "def array_dualmap(ol,value_map_func,**kwargs):", "body": "def get_self(obj):return(obj)if('' in kwargs):index_map_func_args = kwargs['']else:index_map_func_args = []if('' in kwargs):value_map_func_args = kwargs['']else:value_map_func_args = []if('' in kwargs):index_map_func = kwargs['']else:index_map_func = get_selflength = ol.__len__()il = list(range(,length))nil = list(map(lambda ele:index_map_func(ele,*index_map_func_args),il))nvl = []for i in range(,length):ele = ol[i]v = value_map_func(nil[i],ele,*value_map_func_args)nvl.append(v)return(nvl)", "docstring": "from elist.elist import *\nol = ['a','b','c','d']\ndef index_map_func(index,prefix,suffix):\n s = prefix +str(index+97)+ suffix\n return(s)\n\ndef value_map_func(mapped_index,ele,prefix,suffix):\n s = prefix+mapped_index+': ' + str(ele) + suffix\n return(s)\n\n####\nrslt = array_dualmap2(ol,index_map_func=index_map_func,index_map_func_args=[': ',' is '],value_map_func=value_map_func,value_map_func_args=['ord',' yes?'])\npobj(rslt)", "id": "f1599:m5"} {"signature": "def array_dualmap2(*refls,**kwargs):", "body": "def get_self(obj,*args):return(obj)if('' in kwargs):value_map_func_args = kwargs['']else:value_map_func_args = []if('' in kwargs):index_map_func = kwargs['']else:index_map_func = get_selfif('' in kwargs):index_map_func_args = kwargs['']else:index_map_func_args = []length = ol.__len__()il = list(range(,length))nil = list(map(lambda ele:index_map_func(ele,*index_map_func_args),il))refls = list(refls)refls = prepend(refls,nil)nvl = array_map2(*refls,map_func = value_map_func,map_func_args=value_map_func_args)return(nvl)", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nrefl1 = ['+','+','+','+']\nrefl2 = [7,7,7,7]\nrefl3 = ['=','=','=','=']\ndef index_map_func(index):\n s =\"<\"+str(index)+\">\"\n return(s)\n\ndef value_map_func(mapped_index,ele,ref_ele1,ref_ele2,ref_ele3,prefix,suffix):\n s = prefix+mapped_index+': ' + str(ele) + str(ref_ele1) + str(ref_ele2) + str(ref_ele3) + suffix\n return(s)\n\n####\nrslt = array_dualmap2(ol,refl1,refl2,refl3,index_map_func=index_map_func,value_map_func=value_map_func,value_map_func_args=['Q','?'])\npobj(rslt)", "id": "f1599:m6"} {"signature": "def mapfi(ol,map_func_args,**kwargs):", "body": "diff_funcs_arr = kwargs['']lngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = diff_funcs_arr[i]args = map_func_argsele = func(index,*args)rslt.append(ele)return(rslt)", "docstring": "#mapfi \u5171\u4eab\u76f8\u540c\u7684o,v\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570\n# share common other_args,NOT take value as a param for map_func\n#map_func diff_func(index,*common_args)", "id": "f1599:m7"} {"signature": "def mapfv(ol,map_func_args,*args,**kwargs):", "body": "args = list(args)lngth = args.__len__()if(lngth == ):diff_funcs_arr = kwargs['']else:diff_funcs_arr = args[]lngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = diff_funcs_arr[i]args = map_func_argsele = func(value,*args)rslt.append(ele)return(rslt)", "docstring": "#mapfv \u5171\u4eab\u76f8\u540c\u7684o,i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570\n# share common other_args,NOT take value as a param for map_func\n#map_func diff_func(value,*common_args)", "id": "f1599:m8"} {"signature": "def mapfo(ol,**kwargs):", "body": "diff_args_arr = kwargs['']diff_funcs_arr = kwargs['']lngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = diff_funcs_arr[i]args = diff_args_arr[i]ele = func(value,*args)rslt.append(ele)return(rslt)", "docstring": "#mapfo i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570,v\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570\n# NOT take value as a param for map_func,NOT take index as a param for map_func\n#map_func diff_func(*diff_args)", "id": "f1599:m10"} {"signature": "def mapiv(ol,map_func,map_func_args=[]):", "body": "lngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = map_funcargs = map_func_argsele = func(index,value,*args)rslt.append(ele) return(rslt)", "docstring": "#mapiv \u5171\u4eab\u76f8\u540c\u7684o,\u5171\u4eab\u76f8\u540c\u7684f share common map_func,share common other_args\n#map_func common_func(index,value,*common_args)", "id": "f1599:m11"} {"signature": "def mapiv2(ol,map_func,*args,**kwargs):", "body": "args = list(args)if(args.__len__() > ):map_func_args = argselse:if('' in kwargs):map_func_args = kwargs['']else:map_func_args = []lngth = ol.__len__()rslt = []for i in range(,lngth):ele = map_func(i,ol[i],*map_func_args)rslt.append(ele)return(rslt)", "docstring": "from elist.elist import *\nol = ['a','b','c','d']\n#1\ndef map_func(index,value,*others):\n return(value * index + others[0] +others[-1])\nmapiv(ol,map_func,'tailA-','tailB')\n#2\nmapiv2(ol,lambda index,value,other:(value*index+other),['-'])\nmapiv2(ol,lambda index,value,other:(value*index+other),'-')\nmapiv2(ol,lambda index,value:(value*index))", "id": "f1599:m12"} {"signature": "def mapio(ol,map_func,**kwargs):", "body": "lngth = ol.__len__()diff_args_arr = kwargs['']rslt = []for i in range(,lngth):index = ivalue = ol[i]func = map_funcargs = diff_args_arr[i]ele = func(index,*args)rslt.append(ele)return(rslt)", "docstring": "#mapvo \u5171\u4eab\u76f8\u540c\u7684f,i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570\n# share common map_func,NOT take index as a param for map_func\n# common_func(value,*priv_args)", "id": "f1599:m13"} {"signature": "def mapvo(ol,map_func,*args,**kwargs):", "body": "lngth = ol.__len__()args = list(args)if(args.__len__()==):diff_args_arr = kwargs['']else:diff_args_arr = args[]rslt = []for i in range(,lngth):index = ivalue = ol[i]func = map_funcargs = diff_args_arr[i]ele = func(value,*args)rslt.append(ele)return(rslt)", "docstring": "#mapvo \u5171\u4eab\u76f8\u540c\u7684f,i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570\n# share common map_func,NOT take index as a param for map_func\n# common_func(value,*priv_args)", "id": "f1599:m14"} {"signature": "def array_map2(*referls,**kwargs):", "body": "map_func = kwargs['']if('' in kwargs):map_func_args = kwargs['']else:map_func_args = []length = referls.__len__()rslt = []anum = list(referls)[].__len__()for j in range(,anum):args = []for i in range(,length):refl = referls[i]args.append(refl[j])args.extend(map_func_args)v = map_func(*args)rslt.append(v)return(rslt)", "docstring": "obseleted just for compatible\nfrom elist.elist import *\nol = [1,2,3,4]\nrefl1 = ['+','+','+','+']\nrefl2 = [7,7,7,7]\nrefl3 = ['=','=','=','=']\ndef map_func(ele,ref_ele1,ref_ele2,ref_ele3,prefix,suffix):\n s = prefix+': ' + str(ele) + str(ref_ele1) + str(ref_ele2) + str(ref_ele3) + suffix\n return(s)\n\n####\nrslt = array_map2(ol,refl1,refl2,refl3,map_func=map_func,map_func_args=['Q','?'])\npobj(rslt)", "id": "f1599:m15"} {"signature": "def mapf(ol,map_func_args,**kwargs):", "body": "diff_funcs_arr = kwargs['']lngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = diff_funcs_arr[i]args = map_func_argsele = func(*args)rslt.append(ele)return(rslt)", "docstring": "#mapf i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570,v\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570,\u5171\u4eab\u76f8\u540c\u7684o\n# NOT take value as a param for map_func\n# NOT take index as a param for map_func\n# share common other_args\n# diff_func(*common_args)", "id": "f1599:m16"} {"signature": "def mapi(ol,map_func,map_func_args=[]):", "body": "lngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = map_funcargs = map_func_argsele = func(index,*args)rslt.append(ele)return(rslt)", "docstring": "#mapi v\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570,\u5171\u4eab\u76f8\u540c\u7684f,\u5171\u4eab\u76f8\u540c\u7684o\n# NOT take value as a param for map_func\n# share common other_args\n# share common map_func\n# common_func(index,*common_args)", "id": "f1599:m17"} {"signature": "def mapv(ol,map_func,map_func_args=[]):", "body": "rslt = list(map(lambda ele:map_func(ele,*map_func_args),ol))return(rslt)", "docstring": "#mapv i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570,\u5171\u4eab\u76f8\u540c\u7684f,\u5171\u4eab\u76f8\u540c\u7684o\n# NOT take index as a param for map_func\n# share common other_args\n# share common map_func\n# common_func(value,*common_args)", "id": "f1599:m18"} {"signature": "def array_map(ol,map_func,*args):", "body": "rslt = list(map(lambda ele:map_func(ele,*args),ol))return(rslt)", "docstring": "obseleted,just for compatible\nfrom elist.elist import *\nol = [1,2,3,4]\ndef map_func(ele,mul,plus):\n return(ele*mul+plus)\n\narray_map(ol,map_func,2,100)", "id": "f1599:m19"} {"signature": "def mapo(ol,map_func,*params,**kwargs):", "body": "params = list(params)if(params.__len__()==):diff_args_arr = kwargs['']elif(isinstance(params[],list)):diff_args_arr = params[]else:diff_args_arr = paramslngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = map_funcargs = diff_args_arr[i]ele = func(*args)rslt.append(ele)return(rslt)", "docstring": "#mapo i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570,v\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570,\u5171\u4eab\u76f8\u540c\u7684f\n# NOT take index as a param for map_func\n# NOT take value as a param for map_func\n# share common map_func\n# common_func(*priv_args)", "id": "f1599:m20"} {"signature": "def findfivo(ol,*args,**kwargs):", "body": "args = list(args)lngth = args.__len__()if(lngth==):diff_funcs_arr = kwargs['']diff_args_arr = kwargs['']elif(lngth==):if('' in kwargs):diff_funcs_arr = args[]diff_args_arr = kwargs['']else:diff_funcs_arr = kwargs['']diff_args_arr = args[]else:diff_funcs_arr = args[]diff_args_arr = args[]lngth = ol.__len__()rslt = []for i in range(,lngth):index = ivalue = ol[i]func = diff_funcs_arr[i]args = diff_args_arr[i]cond = func(index,value,*args)if(cond):rslt.append((index,value))else:passreturn(rslt)", "docstring": "#findfivo f,i,v,o\u56db\u5143\u51b3\u5b9a fivo-4-tuple-engine\n#cond_func diff_func(index,value,*diff_args)", "id": "f1599:m21"} {"signature": "def findfiv(ol,cond_func_args,**kwargs):", "body": "lngth = ol.__len__()diff_funcs_arr = kwargs['']common_args_arr = init(lngth,map_func_args)rslt = findfivo(ol,cond_funcs=diff_funcs_arr,cond_func_args_array=common_args_arr)return(rslt)", "docstring": "#findfiv \u5171\u4eab\u76f8\u540c\u7684o share common other_args\n#cond_func diff_func(index,value,*common_args)", "id": "f1599:m24"} {"signature": "def findv(ol,cond_func,cond_func_args=[]):", "body": "rslt = []for i in range(ol.__len__()):cond = cond_func(ol[i],*cond_func_args)if(cond):rslt.append((i,ol[i]))else:passreturn(rslt)", "docstring": "#mapv i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570,\u5171\u4eab\u76f8\u540c\u7684f,\u5171\u4eab\u76f8\u540c\u7684o\n# NOT take index as a param for map_func\n# share common other_args\n# share common cond_func\n# common_func(value,*common_args)", "id": "f1599:m25"} {"signature": "def cond_select_all(ol,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []founded = find_all(ol,cond_func,*cond_func_args)rslt = array_map(founded,lambda ele:ele[''])return(rslt)", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\ndef test_func(ele,x):\n cond = (ele > x)\n return(cond)\n\nol = [1,2,3,4,5,6,7]\nrslt = cond_select_all(ol,cond_func = test_func,cond_func_args = [3])\npobj(rslt)", "id": "f1599:m28"} {"signature": "def cond_select_all2(ol,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []founded = find_all2(ol,cond_func,*cond_func_args)rslt = array_map(founded,lambda ele:ele[''])return(rslt)", "docstring": "from elist.elist import *\nfrom xdict.jprint import pobj\ndef test_func(ele,index,x):\n cond1 = (ele > x)\n cond2 = (index %2 == 0)\n cond =(cond1 & cond2)\n return(cond)\n\nol = [1,2,3,4,5,6,7]\nrslt = cond_select_all2(ol,cond_func = test_func,cond_func_args = [3])\npobj(rslt)", "id": "f1599:m29"} {"signature": "def cond_select_indexes_all(ol,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []founded = find_all(ol,cond_func,*cond_func_args)rslt = array_map(founded,lambda ele:ele[''])return(rslt)", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\ndef test_func(ele,x):\n cond = (ele > x)\n return(cond)\n\nol = [1,2,3,4,5,6,7]\nrslt = cond_select_indexes_all(ol,cond_func = test_func, cond_func_args = [3])\npobj(rslt)", "id": "f1599:m35"} {"signature": "def cond_select_indexes_all2(ol,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []founded = find_all2(ol,cond_func,*cond_func_args)rslt = array_map(founded,lambda ele:ele[''])return(rslt)", "docstring": "from elist.elist import *\nfrom xdict.jprint import pobj\ndef test_func(ele,index,x):\n cond1 = (ele > x)\n cond2 = (index %2 == 0)\n cond =(cond1 & cond2)\n return(cond)\n\nol = [1,2,3,4,5,6,7]\nrslt = cond_select_indexes_all2(ol,cond_func = test_func,cond_func_args = [3])\npobj(rslt)", "id": "f1599:m36"} {"signature": "def select_seqs(ol,seqs):", "body": "rslt =copy.deepcopy(ol)rslt = itemgetter(*seqs)(ol)if(seqs.__len__()==):rslt = []elif(seqs.__len__()==):rslt = [rslt]else:rslt = list(rslt)return(rslt)", "docstring": "from elist.elist import *\nol = ['a','b','c','d']\nselect_seqs(ol,[1,2])", "id": "f1599:m51"} {"signature": "def select_some(ol,*seqs):", "body": "seqs = list(seqs)return(select_seqs(ol,seqs))", "docstring": "from elist.elist import *\nol = ['a','b','c','d']\nselect_some(ol,1,2)", "id": "f1599:m53"} {"signature": "def append(ol,ele,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):new = copy.deepcopy(ol)new.append(ele)return(new)else:ol.append(ele)return(ol)", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nele = 5\nid(ol)\nappend(ol,ele,mode=\"original\")\nol\nid(ol)\n####\nol = [1,2,3,4]\nele = 5\nid(ol)\nnew = append(ol,ele)\nnew\nid(new)", "id": "f1599:m54"} {"signature": "def append_some(ol,*eles,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"return(extend(ol,list(eles),mode=mode))", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nid(ol)\nappend_some(ol,5,6,7,8,mode=\"original\")\nol\nid(ol)\n####\nol = [1,2,3,4]\nid(ol)\nnew = append_some(ol,5,6,7,8)\nnew\nid(new)", "id": "f1599:m55"} {"signature": "def prepend(ol,ele,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):new = [ele]cpol = copy.deepcopy(ol)new.extend(cpol)return(new)else:length = ol.__len__()ol.append(None)for i in range(length-,-,-):ol[i+] = ol[i]ol[] = elereturn(ol)", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nele = 5\nid(ol)\nprepend(ol,ele,mode=\"original\")\nol\nid(ol)\n####\nol = [1,2,3,4]\nele = 5\nid(ol)\nnew = prepend(ol,ele)\nnew\nid(new)", "id": "f1599:m56"} {"signature": "def prepend_some(ol,*eles,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"return(prextend(ol,list(eles),mode=mode))", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nid(ol)\nprepend_some(ol,5,6,7,8,mode=\"original\")\nol\nid(ol)\n####\nol = [1,2,3,4]\nid(ol)\nnew = prepend_some(ol,5,6,7,8)\nnew\nid(new)\n#####unshift is the same as prepend_some\n>>> unshift(ol,9,10,11,12)\n[9, 10, 11, 12, 1, 2, 3, 4]", "id": "f1599:m57"} {"signature": "def extend(ol,nl,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):new = copy.deepcopy(ol)cpnl = copy.deepcopy(nl)new.extend(cpnl)return(new)else:ol.extend(nl)return(ol)", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nnl = [5,6,7,8]\nid(ol)\nextend(ol,nl,mode=\"original\")\nol\nid(ol)\n####\nol = [1,2,3,4]\nnl = [5,6,7,8]\nid(ol)\nnew = extend(ol,nl)\nnew\nid(new)", "id": "f1599:m58"} {"signature": "def push(ol,*eles,**kwargs):", "body": "if('' in kwargs):mode = kwargs['']else:mode = \"\"eles = list(eles)return(extend(ol,eles,mode=mode))", "docstring": "from elist.elist import *\nol=[1,2,3,4]\nid(ol)\nnew = push(ol,5,6,7)\nnew\nid(new)\n####\nol=[1,2,3,4]\nid(ol)\nrslt = push(ol,5,6,7,mode=\"original\")\nrslt\nid(rslt)", "id": "f1599:m59"} {"signature": "def prextend(ol,nl,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):new = copy.deepcopy(nl)cpol = copy.deepcopy(ol)new.extend(cpol)return(new)else:length = ol.__len__()nl_len = nl.__len__()for i in range(,nl_len):ol.append(None)for i in range(length-,-,-):ol[i+nl_len] = ol[i]for i in range(,nl_len):ol[i] = nl[i]return(ol)", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nnl = [5,6,7,8]\nid(ol)\nid(nl)\nprextend(ol,nl,mode=\"original\")\nol\nid(ol)\n####\nol = [1,2,3,4]\nnl = [5,6,7,8]\nid(ol)\nid(nl)\nnew = prextend(ol,nl)\nnew\nid(new)", "id": "f1599:m60"} {"signature": "def concat(*arrays):", "body": "new = []length = arrays.__len__()for i in range(,length):array = copy.deepcopy(arrays[i])new.extend(array)return(new)", "docstring": "from elist.elist import *\nl1 = [1,2,3]\nl2 = [\"a\",\"b\",\"c\"]\nl3 = [100,200]\nid(l1)\nid(l2)\nid(l3)\narrays = [l1,l2,l3]\nnew = concat(arrays)\nnew\nid(new)", "id": "f1599:m61"} {"signature": "def concat_seqs(arrays):", "body": "return(concat(*tuple(arrays)))", "docstring": "from elist.elist import *\nl1 = [1,2,3]\nl2 = [\"a\",\"b\",\"c\"]\nl3 = [100,200]\nid(l1)\nid(l2)\nid(l3)\narrays = [l1,l2,l3]\nnew = concat_seqs(arrays)\nnew\nid(new)", "id": "f1599:m62"} {"signature": "def car(ol):", "body": "return(ol[])", "docstring": "from elist.elist import *\nol=[1,2,3,4]\ncar(ol)", "id": "f1599:m63"} {"signature": "def cdr(ol,**kwargs):", "body": "if('' in kwargs):mode = kwargs['']else:mode = \"\"if(mode == \"\"):cpol = copy.deepcopy(ol)return(cpol[:])else:ol.pop()return(ol)", "docstring": "from elist.elist import *\nol=[1,2,3,4]\nid(ol)\nnew = cdr(ol)\nnew\nid(new)\n####\nol=[1,2,3,4]\nid(ol)\nrslt = cdr(ol,mode=\"original\")\nrslt\nid(rslt)", "id": "f1599:m64"} {"signature": "def cons(head_ele,l,**kwargs):", "body": "if('' in kwargs):mode = kwargs['']else:mode = \"\"return(prepend(l,head_ele,mode=mode))", "docstring": "from elist.elist import *\nol=[1,2,3,4]\nid(ol)\nnew = cons(5,ol)\nnew\nid(new)\n####\nol=[1,2,3,4]\nid(ol)\nrslt = cons(5,ol,mode=\"original\")\nrslt\nid(rslt)", "id": "f1599:m65"} {"signature": "def uniform_index(index,length):", "body": "if(index<):rl = length+indexif(rl<):index = else:index = rlelif(index>=length):index = lengthelse:index = indexreturn(index)", "docstring": "uniform_index(0,3)\nuniform_index(-1,3)\nuniform_index(-4,3)\nuniform_index(-3,3)\nuniform_index(5,3)", "id": "f1599:m66"} {"signature": "def insert(ol,start_index,ele,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):length = ol.__len__()cpol = copy.deepcopy(ol)si = uniform_index(start_index,length)new = copy.deepcopy(cpol[:si])new.append(ele)new.extend(cpol[si:])return(new)else:ol.insert(start_index,ele)return(ol)", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nele = 5\nid(ol)\ninsert(ol,2,ele,mode=\"original\")\nol\nid(ol)\n####\nol = [1,2,3,4]\nele = 5\nid(ol)\nnew = insert(ol,2,ele)\nnew\nid(new)", "id": "f1599:m67"} {"signature": "def insert_some(ol,*eles,**kwargs):", "body": "start_index = kwargs['']if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"length = ol.__len__()cpol = copy.deepcopy(ol)if(mode == \"\"):si = uniform_index(start_index,length)new = copy.deepcopy(cpol[:si])new.extend(list(eles))new.extend(cpol[si:])return(new)else:si = uniform_index(start_index,length)new = cpol[:si]new.extend(list(eles))new.extend(cpol[si:])ol.clear()for i in range(,new.__len__()):ol.append(new[i])return(ol)", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nid(ol)\ninsert_some(ol,5,6,7,8,index=2,mode=\"original\")\nol\nid(ol)\n####\nol = [1,2,3,4]\nid(ol)\nnew = insert_some(ol,5,6,7,8,index=2)\nnew\nid(new)", "id": "f1599:m68"} {"signature": "def insert_many(ol,eles,locs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"eles = copy.deepcopy(eles)locs = copy.deepcopy(locs)new = []length = ol.__len__()cpol = copy.deepcopy(ol)for i in range(,locs.__len__()):if(locs[i]>=length):passelse:locs[i] = uniform_index(locs[i],length)tmp = sorted_refer_to(eles,locs)eles = tmp['']locs = tmp['']label = eles.__len__()si = ei = for i in range(,locs.__len__()):if(locs[i]>=length):label = ibreakelse:ei = locs[i]new.extend(cpol[si:ei])new.append(eles[i])si = eifor i in range(label,locs.__len__()):new.append(eles[i])new.extend(cpol[ei:])if(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,2,3,4,5]\neles = [7,77,777]\nlocs = [0,2,4]\nid(ol)\nnew = insert_many(ol,eles,locs)\nol\nnew\nid(new)\n####\nol = [1,2,3,4,5]\neles = [7,77,777]\nlocs = [0,2,4]\nid(ol)\nrslt = insert_many(ol,eles,locs,mode=\"original\")\nol\nrslt\nid(rslt)", "id": "f1599:m69"} {"signature": "def insert_sections_some(ol,*secs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"loc = kwargs['']secs = list(secs)secs = [concat(*secs)]locs = [loc]return(insert_sections_many(ol,secs,locs,mode=mode))", "docstring": "ol = initRange(0,20,1)\nol\nloc = 6\nrslt = insert_sections_some(ol,['a','a','a'],['c','c','c','c'],index=loc)\nrslt\n####", "id": "f1599:m70"} {"signature": "def insert_section(ol,sec,loc,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"secs = [sec]locs = [loc]return(insert_sections_many(ol,secs,locs,mode=mode))", "docstring": "ol = initRange(0,20,1)\nol\nloc = 6\nsec = ['a','b','c','d']\nrslt = insert_section(ol,sec,loc)\nrslt\n####", "id": "f1599:m71"} {"signature": "def insert_sections_many(ol,secs,locs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"secs = copy.deepcopy(secs)locs = copy.deepcopy(locs)brked = broken_seqs(ol,locs)seclen = secs.__len__()brklen = brked.__len__()if(locs[]==):new = secs[]length = seclen -if(length < brklen):for i in range(,length):new.extend(brked[i])new.extend(secs[i+])for i in range(length,brklen):new.extend(brked[i])elif(length == brklen):for i in range(,length):new.extend(brked[i])new.extend(secs[i+])else:for i in range(,brklen):new.extend(brked[i])new.extend(secs[i+])for i in range(brklen,length):new.extend(secs[i])else:new = brked[]length = brklen -if(length < seclen):for i in range(,length):new.extend(secs[i])new.extend(brked[i+])for i in range(length,seclen):new.extend(secs[i])elif(length == seclen):for i in range(,length):new.extend(secs[i])new.extend(brked[i+])else:for i in range(,seclen):new.extend(secs[i])new.extend(brked[i+])for i in range(seclen,length):new.extend(brked[i])if(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "ol = initRange(0,20,1)\nol\nlocs = [1,6,14,9]\nsecs = [\n ['a','a','a'],\n ['b','b'],\n ['c','c','c','c'],\n ['d','d']\n]\nrslt = insert_sections_many(ol,secs,locs)\nrslt\n####\nol\nlocs = [0,3,6,9,12,15,16]\nsecs = [\n ['a','a','a'],\n ['b','b'],\n ['c','c','c','c'],\n ['d','d']\n]\nrslt = insert_sections_many(ol,secs,locs)\nrslt\n####\nol\nlocs = [1,6,14,9]\nsecs = [\n ['a','a','a'],\n ['b','b'],\n ['c','c','c','c'],\n ['d','d'],\n ['e'],\n ['f','f','f','f'],\n [777,777,777,777]\n]\nrslt = insert_sections_many(ol,secs,locs)\nrslt", "id": "f1599:m72"} {"signature": "def reorder_sub(ol,sub):", "body": "def cond_func(ele,ol):index = ol.index(ele)return(index)indexes = array_map(sub,cond_func,ol)nsub = sorted_refer_to(sub,indexes)['']return(nsub)", "docstring": "sub = ['query', 'params', 'fragment', 'path']\nol = ['scheme', 'username', 'password', 'hostname', 'port', 'path', 'params', 'query', 'fragment']\nreorder_sub(ol,sub)", "id": "f1599:m73"} {"signature": "def sort(ol,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):new = copy.deepcopy(ol)new.sort()return(new) else:ol.sort()return(ol)", "docstring": "from elist.elist import *\nol = [1,3,4,2]\nid(ol)\nnew = sort(ol)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,3,4,2]\nid(ol)\nrslt = sort(ol,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m74"} {"signature": "def sorted_refer_to(l,referer,reverse=False,**kwargs):", "body": "if(\"\" in kwargs):mode = kwargs[\"\"]else:mode = \"\"tl =[]length = l.__len__()for i in range(,length):ele = (l[i],referer[i])tl.append(ele)tl = sorted(tl,key=itemgetter(),reverse=reverse)sorted_l =[]sorted_r = []for i in range(,length):sorted_l.append(tl[i][])sorted_r.append(tl[i][])if(mode == \"\"):return(sorted_l)elif(mode == \"\"):return(referer)else:return({\"\":sorted_l,\"\":sorted_r})", "docstring": "from elist.elist import *\nl = [\"a\",\"b\",\"c\"]\nreferer = [7,8,6]\nsorted_refer_to(l,referer)\n{'list': ['c', 'a', 'b'], 'referer': [6, 7, 8]}\nl\nreferer\n>>>", "id": "f1599:m75"} {"signature": "def batsorted(referer,*lists,**kwargs):", "body": "if('' in kwargs):reverse = kwargs['']else:reverse = Falselength = referer.__len__()indexes = list(range(,length))rslt = sorted_refer_to(indexes,referer,reverse=reverse)referer = rslt['']indexes = rslt['']rslt = []lists = copy.deepcopy(list(lists))for i in range(,lists.__len__()):l = lists[i]nl = []for j in range(,length):loc = indexes[j]nl.append(l[loc])rslt.append(nl)return(tuple(rslt))", "docstring": "from elist.elist import *\nreferer = [4,2,3,1]\nl1 = ['a','b','c','d']\nl2 = [100,200,300,400]\nl3 = ['A','B','A','B']\nnl1,nl2,nl3 = batsorted(referer,l1,l2,l3)\nnl1\nnl2\nnl3\nnl1,nl2,nl3 = batsorted(referer,l1,l2,l3,reverse=True)\nnl1\nnl2\nnl3\n####the batsorted will not modify the original lists\nl1\nl2\nl3", "id": "f1599:m76"} {"signature": "def sortDictList(dictList,**kwargs):", "body": "def default_eq_func(value1,value2):cond = (value1 == value2)return(cond)def default_gt_func(value1,value2):cond = (value1 > value2)return(cond)def default_lt_func(value1,value2):cond = (value1 < value2)return(cond)if('' in kwargs):eq_func = kwargs['']else:eq_func = default_eq_funcif('' in kwargs):gt_func = kwargs['']else:gt_func = default_gt_funcif('' in kwargs):lt_func = kwargs['']else:lt_func = default_lt_funcif('' in kwargs):reverse = kwargs['']else:reverse = Falsekeys = kwargs['']def cmp_dict(d1,d2):''''''length = keys.__len__()for i in range(,length):key = keys[i]cond = eq_func(d1[key],d2[key])if(cond):passelse:cond = gt_func(d1[key],d2[key])if(cond):return()else:return(-)return()ndl = dictListndl = sorted(ndl,key=functools.cmp_to_key(cmp_dict),reverse=reverse)return(ndl)", "docstring": "students = [\n {'name':'john','class':'A', 'year':15},\n {'name':'jane','class':'B', 'year':12},\n {'name':'dave','class':'B', 'year':10}\n]\n\nrslt = sortDictList(students,cond_keys=['name','class','year'])\npobj(rslt)\nrslt = sortDictList(students,cond_keys=['name','year','class'])\npobj(rslt)\nrslt = sortDictList(students,cond_keys=['class','name','year'])\npobj(rslt)\nrslt = sortDictList(students,cond_keys=['class','year','name'])\npobj(rslt)\nrslt = sortDictList(students,cond_keys=['year','name','class'])\npobj(rslt)\nrslt = sortDictList(students,cond_keys=['year','name','class'])\npobj(rslt)", "id": "f1599:m79"} {"signature": "def index_first(ol,value):", "body": "return(ol.index(''))", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindex_first(ol,'a')\n####index_first, array_index, indexOf are the same\narray_index(ol,'a')\nindexOf(ol,'a')", "id": "f1599:m81"} {"signature": "def index_firstnot(ol,value):", "body": "length = ol.__len__()for i in range(,length):if(value == ol[i]):passelse:return(i)return(None)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindex_firstnot(ol,'a')\n####index_firstnot, array_indexnot, indexOfnot are the same\narray_indexnot(ol,'a')\nindexOfnot(ol,'a')", "id": "f1599:m82"} {"signature": "def index_last(ol,value):", "body": "length = ol.__len__()for i in range(length-,-,-):if(value == ol[i]):return(i)else:passreturn(None)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindex_last(ol,'a')\n####lastIndexOf is the same as index_last\nlastIndexOf(ol,'a')", "id": "f1599:m83"} {"signature": "def index_lastnot(ol,value):", "body": "length = ol.__len__()for i in range(length-,-,-):if(value == ol[i]):passelse:return(i)return(None)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindex_lastnot(ol,'a')\n####lastIndexOfnot is the same as index_lastnot\nlastIndexOfnot(ol,'a')", "id": "f1599:m84"} {"signature": "def index_which(ol,value,which):", "body": "length = ol.__len__()seq = -for i in range(,length):if(value == ol[i]):seq = seq + if(seq == which):return(i)else:passelse:passreturn(None)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindex_which(ol,'a',0)\nindex_which(ol,'a',1)\nindex_which(ol,'a',2)\nindex_which(ol,'a',3) == None", "id": "f1599:m85"} {"signature": "def index_whichnot(ol,value,which):", "body": "length = ol.__len__()seq = -for i in range(,length):if(value == ol[i]):passelse:seq = seq + if(seq == which):return(i)else:passreturn(None)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindex_whichnot(ol,'a',0)\nindex_whichnot(ol,'a',1)\nindex_whichnot(ol,'a',2)", "id": "f1599:m86"} {"signature": "def indexes_all(ol,value):", "body": "length = ol.__len__()indexes =[]for i in range(,length):if(value == ol[i]):indexes.append(i)else:passreturn(indexes)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindexes_all(ol,'a')", "id": "f1599:m87"} {"signature": "def indexes_allnot(ol,value):", "body": "length = ol.__len__()indexes =[]for i in range(,length):if(value == ol[i]):passelse:indexes.append(i)return(indexes)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindexes_allnot(ol,'a')", "id": "f1599:m88"} {"signature": "def indexes_some(ol,value,*seqs):", "body": "seqs = list(seqs)length = ol.__len__()indexes =[]seq = -for i in range(,length):if(value == ol[i]):seq = seq + if(seq in seqs):indexes.append(i)else:passelse:passreturn(indexes)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindexes_some(ol,'a',0,2)\nindexes_some(ol,'a',0,1)\nindexes_some(ol,'a',1,2)\nindexes_some(ol,'a',3,4)", "id": "f1599:m89"} {"signature": "def indexes_somenot(ol,value,*seqs):", "body": "seqs = list(seqs)length = ol.__len__()indexes =[]seq = -for i in range(,length):if(value == ol[i]):passelse:seq = seq + if(seq in seqs):indexes.append(i)else:passreturn(indexes)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindexes_somenot(ol,'a',0,2)\nindexes_somenot(ol,'a',0,1)\nindexes_somenot(ol,'a',1,2)\nindexes_somenot(ol,'a',3,4)", "id": "f1599:m90"} {"signature": "def indexes_seqs(ol,value,seqs):", "body": "seqs = list(seqs)length = ol.__len__()indexes =[]seq = -for i in range(,length):if(value == ol[i]):seq = seq + if(seq in seqs):indexes.append(i)else:passelse:passreturn(indexes)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindexes_seqs(ol,'a',{0,2})\nindexes_seqs(ol,'a',{0,1})\nindexes_seqs(ol,'a',{1,2})\nindexes_seqs(ol,'a',{3,4})", "id": "f1599:m91"} {"signature": "def indexes_seqsnot(ol,value,seqs):", "body": "seqs = list(seqs)length = ol.__len__()indexes =[]seq = -for i in range(,length):if(value == ol[i]):passelse:seq = seq + if(seq in seqs):indexes.append(i)else:passreturn(indexes)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindexes_seqsnot(ol,'a',{0,2})\nindexes_seqsnot(ol,'a',{0,1})\nindexes_seqsnot(ol,'a',{1,2})\nindexes_seqsnot(ol,'a',{3,4})", "id": "f1599:m92"} {"signature": "def first_continuous_indexes_slice(ol,value):", "body": "length = ol.__len__()begin = Noneslice = []for i in range(,length):if(ol[i]==value):begin = ibreakelse:passif(begin == None):return(None)else:slice.append(begin)for i in range(begin+,length):if(ol[i]==value):slice.append(i)else:breakreturn(slice)", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nfirst_continuous_indexes_slice(ol,\"a\")", "id": "f1599:m93"} {"signature": "def first_continuous_indexesnot_slice(ol,value):", "body": "length = ol.__len__()begin = Noneslice = []for i in range(,length):if(not(ol[i]==value)):begin = ibreakelse:passif(begin == None):return(None)else:slice.append(begin)for i in range(begin+,length):if(not(ol[i]==value)):slice.append(i)else:breakreturn(slice)", "docstring": "from elist.elist import *\nol = [\"a\",0,1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nfirst_continuous_indexesnot_slice(ol,\"a\")", "id": "f1599:m94"} {"signature": "def last_continuous_indexes_slice(ol,value):", "body": "length = ol.__len__()end = Noneslice = []for i in range(length-,-,-):if(ol[i]==value):end = ibreakelse:passif(end == None):return(None)else:slice.append(end)for i in range(end-,-,-):if(ol[i]==value):slice.append(i)else:breakslice.reverse()return(slice)", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nlast_continuous_indexes_slice(ol,\"a\")", "id": "f1599:m95"} {"signature": "def last_continuous_indexesnot_slice(ol,value):", "body": "length = ol.__len__()end = Noneslice = []for i in range(length-,-,-):if(not(ol[i]==value)):end = ibreakelse:passif(end == None):return(None)else:slice.append(end)for i in range(end-,-,-):if(not(ol[i]==value)):slice.append(i)else:breakslice.reverse()return(slice)", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nlast_continuous_indexesnot_slice(ol,\"a\")", "id": "f1599:m96"} {"signature": "def which_continuous_indexes_slice(ol,value,which):", "body": "length = ol.__len__()seq = -cursor = begin = Noneslice = []while(cursor < length):cond1 = (ol[cursor] == value)cond2 = (begin == None)if(cond1 & cond2):begin = cursorslice.append(cursor)cursor = cursor + elif(cond1 & (not(cond2))):slice.append(cursor)cursor = cursor + elif((not(cond1)) & (not(cond2))):seq = seq + if(seq == which):return(slice)else:cursor = cursor + begin = Noneslice = []else:cursor = cursor + if(slice):seq = seq + else:passif(seq == which):return(slice)else:return([])", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nwhich_continuous_indexes_slice(ol,\"a\",0)\nwhich_continuous_indexes_slice(ol,\"a\",1)\nwhich_continuous_indexes_slice(ol,\"a\",2)\nwhich_continuous_indexes_slice(ol,\"a\",3)\nwhich_continuous_indexes_slice(ol,\"b\",0)", "id": "f1599:m97"} {"signature": "def which_continuous_indexesnot_slice(ol,value,which):", "body": "length = ol.__len__()seq = -cursor = begin = Noneslice = []while(cursor < length):cond1 = not(ol[cursor] == value)cond2 = (begin == None)if(cond1 & cond2):begin = cursorslice.append(cursor)cursor = cursor + elif(cond1 & (not(cond2))):slice.append(cursor)cursor = cursor + elif((not(cond1)) & (not(cond2))):seq = seq + if(seq == which):return(slice)else:cursor = cursor + begin = Noneslice = []else:cursor = cursor + if(slice):seq = seq + else:passif(seq == which):return(slice)else:return([])", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nwhich_continuous_indexesnot_slice(ol,\"a\",0)\nwhich_continuous_indexesnot_slice(ol,\"a\",1)\nwhich_continuous_indexesnot_slice(ol,\"a\",2)\nwhich_continuous_indexesnot_slice(ol,\"a\",3)\nwhich_continuous_indexesnot_slice(ol,\"b\",0)", "id": "f1599:m98"} {"signature": "def some_continuous_indexes_slices(ol,value,*seqs):", "body": "seqs = list(seqs)rslt = []length = ol.__len__()seq = -cursor = begin = Noneslice = []while(cursor < length):cond1 = (ol[cursor] == value)cond2 = (begin == None)if(cond1 & cond2):begin = cursorslice.append(cursor)elif(cond1 & (not(cond2))):slice.append(cursor)elif((not(cond1)) & (not(cond2))):seq = seq + if(seq in seqs):rslt.append(slice)else:passbegin = Noneslice = []else:passcursor = cursor + if(slice):seq = seq + if(seq in seqs):rslt.append(slice)else:passelse:passreturn(rslt)", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nsome_continuous_indexes_slices(ol,\"a\",0,2)", "id": "f1599:m99"} {"signature": "def some_continuous_indexesnot_slices(ol,value,*seqs):", "body": "seqs = list(seqs)rslt = []length = ol.__len__()seq = -cursor = begin = Noneslice = []while(cursor < length):cond1 = not(ol[cursor] == value)cond2 = (begin == None)if(cond1 & cond2):begin = cursorslice.append(cursor)elif(cond1 & (not(cond2))):slice.append(cursor)elif((not(cond1)) & (not(cond2))):seq = seq + if(seq in seqs):rslt.append(slice)else:passbegin = Noneslice = []else:passcursor = cursor + if(slice):seq = seq + if(seq in seqs):rslt.append(slice)else:passelse:passreturn(rslt)", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nsome_continuous_indexesnot_slices(ol,\"a\",0,2)", "id": "f1599:m100"} {"signature": "def seqs_continuous_indexes_slices(ol,value,seqs):", "body": "rslt = []length = ol.__len__()seq = -cursor = begin = Noneslice = []while(cursor < length):cond1 = (ol[cursor] == value)cond2 = (begin == None)if(cond1 & cond2):begin = cursorslice.append(cursor)elif(cond1 & (not(cond2))):slice.append(cursor)elif((not(cond1)) & (not(cond2))):seq = seq + if(seq in seqs):rslt.append(slice)else:passbegin = Noneslice = []else:passcursor = cursor + if(slice):seq = seq + if(seq in seqs):rslt.append(slice)else:passelse:passreturn(rslt)", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nseqs_continuous_indexes_slices(ol,\"a\",{0,2})", "id": "f1599:m101"} {"signature": "def seqs_continuous_indexesnot_slices(ol,value,seqs):", "body": "rslt = []length = ol.__len__()seq = -cursor = begin = Noneslice = []while(cursor < length):cond1 = not(ol[cursor] == value)cond2 = (begin == None)if(cond1 & cond2):begin = cursorslice.append(cursor)elif(cond1 & (not(cond2))):slice.append(cursor)elif((not(cond1)) & (not(cond2))):seq = seq + if(seq in seqs):rslt.append(slice)else:passbegin = Noneslice = []else:passcursor = cursor + if(slice):seq = seq + if(seq in seqs):rslt.append(slice)else:passelse:passreturn(rslt)", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nseqs_continuous_indexesnot_slices(ol,\"a\",{0,2})", "id": "f1599:m102"} {"signature": "def all_continuous_indexes_slices(ol,value):", "body": "rslt = []length = ol.__len__()cursor = begin = Noneslice = []while(cursor < length):cond1 = (ol[cursor] == value)cond2 = (begin == None)if(cond1 & cond2):begin = cursorslice.append(cursor)elif(cond1 & (not(cond2))):slice.append(cursor)elif((not(cond1)) & (not(cond2))):rslt.append(slice)begin = Noneslice = []else:passcursor = cursor + if(slice):rslt.append(slice)else:passreturn(rslt)", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nall_continuous_indexes_slices(ol,\"a\")", "id": "f1599:m103"} {"signature": "def all_continuous_indexesnot_slices(ol,value):", "body": "rslt = []length = ol.__len__()cursor = begin = Noneslice = []while(cursor < length):cond1 = not(ol[cursor] == value)cond2 = (begin == None)if(cond1 & cond2):begin = cursorslice.append(cursor)elif(cond1 & (not(cond2))):slice.append(cursor)elif((not(cond1)) & (not(cond2))):rslt.append(slice)begin = Noneslice = []else:passcursor = cursor + if(slice):rslt.append(slice)else:passreturn(rslt)", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nall_continuous_indexesnot_slices(ol,\"a\")", "id": "f1599:m104"} {"signature": "def shift(ol,**kwargs):", "body": "if('' in kwargs):mode = kwargs['']else:mode = \"\"length = ol.__len__()rslt = pop(ol,,mode=mode)return(rslt)", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,2,3,4]\nid(ol)\nrslt = shift(ol)\npobj(rslt)\nol\nid(ol)\nid(rslt['list'])\n####\nol = [1,2,3,4]\nid(ol)\nrslt = shift(ol,mode=\"original\")\nrslt\nol\nid(ol)", "id": "f1599:m105"} {"signature": "def pop(ol,index,**kwargs):", "body": "index = uniform_index(index,ol.__len__())if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):new = copy.deepcopy(ol)popped = new.pop(index)return({'':popped,'':new})else:popped = ol.pop(index)return(popped)", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,2,3,4]\nid(ol)\nrslt = pop(ol,2)\npobj(rslt)\nol\nid(ol)\nid(rslt['list'])\n####\nol = [1,2,3,4]\nid(ol)\nrslt = pop(ol,2,mode=\"original\")\nrslt\nol\nid(ol)", "id": "f1599:m106"} {"signature": "def cond_pop(ol,index,**kwargs):", "body": "cond_func = kwargs['']cond_func_args = kwargs['']index = uniform_index(index,ol.__len__())if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"value = ol[index]cond = cond_func(index,value,*cond_func_args)if(mode == \"\"):new = copy.deepcopy(ol)if(cond):popped = new.pop(index)else:popped = newreturn({'':popped,'':new})else:if(cond):popped = ol.pop(index)else:popped = olreturn(popped)", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [{'data':0;'type':'number'},{'data':'x';'type':'str'},{'data':'y';'type':'str'},4]\n#cond_func_args is a array\ndef cond_func(index,value,cond_func_args):", "id": "f1599:m107"} {"signature": "def pop_range(ol,start_index,end_index,**kwargs):", "body": "length = ol.__len__()start_index = uniform_index(start_index,length)end_index = uniform_index(end_index,length)if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):cpol = copy.deepcopy(ol)new = []popped = []for i in range(,start_index):new.append(cpol[i])for i in range(start_index,end_index):popped.append(cpol[i])for i in range(end_index,length):new.append(cpol[i])return({'':popped,'':new})else:tmp = []popped = []for i in range(,start_index):tmp.append(ol[i])for i in range(start_index,end_index):popped.append(ol[i])for i in range(end_index,length):tmp.append(ol[i])ol.clear()for i in range(,tmp.__len__()):ol.append(tmp[i])return(popped)", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_range(ol,2,4)\nol\nid(ol)\nid(rslt['list'])\n####\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_range(ol,2,4,mode=\"original\")\nrslt\nol\nid(ol)", "id": "f1599:m108"} {"signature": "def pop_some(ol,*indexes,**kwargs):", "body": "length = ol.__len__()indexes = list(map(lambda index:uniform_index(index,length),list(indexes)))if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):cpol = copy.deepcopy(ol)new = []popped = []for i in range(,length):if(i in indexes):popped.append(cpol[i])else:new.append(cpol[i])return({'':popped,'':new})else:tmp = []popped = []for i in range(,length):if(i in indexes):popped.append(ol[i])else:tmp.append(ol[i])ol.clear()for i in range(,tmp.__len__()):ol.append(tmp[i])return(popped)", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_some(ol,0,2,5)\nol\nid(ol)\nid(rslt['list'])\n####\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_some(ol,0,2,5,mode=\"original\")\nrslt\nol\nid(ol)", "id": "f1599:m109"} {"signature": "def pop_indexes(ol,indexes,**kwargs):", "body": "length = ol.__len__()indexes = list(map(lambda index:uniform_index(index,length),list(indexes)))if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):cpol = copy.deepcopy(ol)new = []popped = []for i in range(,length):if(i in indexes):popped.append(cpol[i])else:new.append(cpol[i])return({'':popped,'':new})else:tmp = []popped = []for i in range(,length):if(i in indexes):popped.append(ol[i])else:tmp.append(ol[i])ol.clear()for i in range(,tmp.__len__()):ol.append(tmp[i])return(popped)", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_indexes(ol,{0,-3,5})\nol\nid(ol)\nid(rslt['list'])\n####\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_indexes(ol,{0,-3,5},mode=\"original\")\nrslt\nol\nid(ol)", "id": "f1599:m110"} {"signature": "def remove_first(ol,value,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):new = copy.deepcopy(ol)new.remove(value)return(new)else:ol.remove(value)return(ol)", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,'a',3,'a',5,'a']\nid(ol)\nnew = remove_first(ol,'a')\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a']\nid(ol)\nrslt = remove_first(ol,'a',mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)\n####array_remove is the same as remove_first", "id": "f1599:m112"} {"signature": "def remove_firstnot(ol,value,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"length = ol.__len__()if(mode == \"\"):new = copy.deepcopy(ol)for i in range(,length):if(new[i] == value):passelse:new.pop(i)return(new)return(new)else:for i in range(,length):if(ol[i] == value):passelse:ol.pop(i)return(ol)return(ol)", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,'a',3,'a',5,'a']\nid(ol)\nnew = remove_firstnot(ol,'a')\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a']\nid(ol)\nrslt = remove_firstnot(ol,'a',mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)\n####array_removenot is the same as remove_firstnot", "id": "f1599:m113"} {"signature": "def remove_last(ol,value,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"new = copy.deepcopy(ol)new.reverse()new.remove(value)new.reverse()if(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a']\nid(ol)\nnew = remove_last(ol,'a')\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a']\nid(ol)\nrslt = remove_last(ol,'a',mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m114"} {"signature": "def remove_lastnot(ol,value,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"length = ol.__len__()if(mode == \"\"):new = copy.deepcopy(ol)for i in range(length-,-,-):if(new[i] == value):passelse:new.pop(i)return(new)return(new)else:for i in range(length-,-,-):if(ol[i] == value):passelse:ol.pop(i)return(ol)return(ol)", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,'a',3,'a',5,'a']\nid(ol)\nnew = remove_lastnot(ol,'a')\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a']\nid(ol)\nrslt = remove_lastnot(ol,'a',mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m115"} {"signature": "def remove_which(ol,value,which,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"new = copy.deepcopy(ol)length = ol.__len__()if(mode == \"\"):l = new else:l = olseq = -for i in range(,length):if(ol[i]==value):seq = seq + if(seq == which):l.pop(i)breakelse:passelse:passreturn(l)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a']\nid(ol)\nnew = remove_which(ol,'a',1)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a']\nid(ol)\nrslt = remove_which(ol,'a',1,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m116"} {"signature": "def remove_whichnot(ol,value,which,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"new = copy.deepcopy(ol)length = ol.__len__()if(mode == \"\"):l = new else:l = olseq = -for i in range(,length):if(not(ol[i]==value)):seq = seq + if(seq == which):l.pop(i)breakelse:passelse:passreturn(l)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a']\nid(ol)\nnew = remove_whichnot(ol,'a',1)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a']\nid(ol)\nrslt = remove_whichnot(ol,'a',1,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m117"} {"signature": "def remove_some(ol,value,*seqs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"seqs = list(seqs)new = []length = ol.__len__()seq = -cpol = copy.deepcopy(ol)for i in range(,length):if(cpol[i]==value):seq = seq + if(seq in seqs):passelse:new.append(cpol[i])else:new.append(cpol[i])if(mode == \"\"):return(new) else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = remove_some(ol,'a',1,3)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = remove_some(ol,'a',1,3,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m118"} {"signature": "def remove_somenot(ol,value,*seqs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"seqs = list(seqs)new = []length = ol.__len__()seq = -cpol = copy.deepcopy(ol)for i in range(,length):if(not(cpol[i]==value)):seq = seq + if(seq in seqs):passelse:new.append(cpol[i])else:new.append(cpol[i])if(mode == \"\"):return(new) else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = remove_somenot(ol,'a',1,3)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = remove_somenot(ol,'a',1,3,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m119"} {"signature": "def remove_seqs(ol,value,seqs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"seqs = list(seqs)new = []length = ol.__len__()cpol = copy.deepcopy(ol)seq = -for i in range(,length):if(cpol[i]==value):seq = seq + if(seq in seqs):passelse:new.append(cpol[i])else:new.append(cpol[i])if(mode == \"\"):return(new) else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = remove_seqs(ol,'a',{1,3})\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = remove_seqs(ol,'a',{1,3},mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m120"} {"signature": "def remove_seqsnot(ol,value,seqs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"seqs = list(seqs)new = []length = ol.__len__()cpol = copy.deepcopy(ol)seq = -for i in range(,length):if(not(cpol[i]==value)):seq = seq + if(seq in seqs):passelse:new.append(cpol[i])else:new.append(cpol[i])if(mode == \"\"):return(new) else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = remove_seqsnot(ol,'a',{1,3})\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = remove_seqsnot(ol,'a',{1,3},mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m121"} {"signature": "def remove_all(ol,value,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"new = []length = ol.__len__()cpol = copy.deepcopy(ol)for i in range(,length):if(cpol[i]==value):passelse:new.append(cpol[i])if(mode == \"\"):return(new) else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = remove_all(ol,'a')\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = remove_all(ol,'a',mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m122"} {"signature": "def remove_allnot(ol,value,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"new = []length = ol.__len__()cpol = copy.deepcopy(ol)for i in range(,length):if(cpol[i]==value):new.append(cpol[i])else:passif(mode == \"\"):return(new) else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = remove_allnot(ol,'a')\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = remove_allnot(ol,'a',mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m123"} {"signature": "def remove_many(ol,values,seqs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"values = copy.deepcopy(values)seqs = copy.deepcopy(seqs)cursors = [-] * values.__len__()new = []length = ol.__len__()cpol = copy.deepcopy(ol)for i in range(,length):label = Truefor j in range(,cursors.__len__()):which = seqs[j]value = values[j]if(cpol[i] == value):cursors[j] = cursors[j] + if(cursors[j] == which):label = Falsebreakelse:passelse:passif(label):new.append(cpol[i])else:passif(mode == \"\"):return(new) else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'a',3,'b',5,'a',6,'a',7,'b',8,'b',9]\nid(ol)\nnew = remove_many(ol,['a','b'],[1,2])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'b',5,'a',6,'a',7,'b',8,'b',9]\nid(ol)\nrslt = remove_many(ol,['a','b'],[1,2],mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m124"} {"signature": "def remove_manynot(ol,values,seqs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"values = copy.deepcopy(values)seqs = copy.deepcopy(seqs)cursors = [-] * values.__len__()new = []length = ol.__len__()cpol = copy.deepcopy(ol)for i in range(,length):label = Truefor j in range(,cursors.__len__()):which = seqs[j]value = values[j]if(not(cpol[i] == value)):cursors[j] = cursors[j] + if(cursors[j] == which):label = Falsebreakelse:passelse:passif(label):new.append(cpol[i])else:passif(mode == \"\"):return(new) else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'a',3,'b',5,'a',6,'a',7,'b',8,'b',9]\nid(ol)\nnew = remove_manynot(ol,['a','b'],[1,2])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'b',5,'a',6,'a',7,'b',8,'b',9]\nid(ol)\nrslt = remove_manynot(ol,['a','b'],[1,2],mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m125"} {"signature": "def cond_remove_all(ol,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"new = copy.deepcopy(ol)selected = find_all(new,cond_func,*cond_func_args)selected_indexes = array_map(selected,lambda ele:ele[''])new = pop_indexes(new,selected_indexes)['']if(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\ndef afterCH(ele,ch):\n cond = (ord(str(ele)) > ord(ch))\n return(cond)\n\nnew = cond_remove_all(ol,cond_func=afterCH,cond_func_args=['B'])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\nrslt = cond_remove_all(ol,cond_func=afterCH,cond_func_args=['B'],mode='original')\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m126"} {"signature": "def cond_remove_seqs(ol,seqs,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"new = copy.deepcopy(ol)selected = find_all(new,cond_func,*cond_func_args)selected_indexes = array_map(selected,lambda ele:ele[''])selected_indexes = pop_indexes(selected_indexes,seqs)['']new = pop_indexes(new,selected_indexes)['']if(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\ndef afterCH(ele,ch):\n cond = (ord(str(ele)) > ord(ch))\n return(cond)\n\nnew = cond_remove_seqs(ol,[0,2],cond_func=afterCH,cond_func_args=['B'])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\nrslt = cond_remove_seqs(ol,[0,2],cond_func=afterCH,cond_func_args=['B'],mode='original')\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m127"} {"signature": "def cond_remove_some(ol,*some,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"seqs = list(some)rslt = cond_remove_seqs(ol,seqs,cond_func=cond_func,cond_func_args=cond_func_args)return(rslt)", "docstring": "from elist.elist import *\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\n\ndef afterCH(ele,ch):\n cond = (ord(str(ele)) > ord(ch))\n return(cond)\n\nnew = cond_remove_some(ol,0,2,cond_func=afterCH,cond_func_args=['B'])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\nrslt = cond_remove_some(ol,0,2,cond_func=afterCH,cond_func_args=['B'],mode='original')\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m128"} {"signature": "def init(len,default_element=None):", "body": "rslt = []for i in range(,len):rslt.append(copy.deepcopy(default_element))return(rslt)", "docstring": "from elist.elist import *\ninit(5)\ninit(5,\"x\")", "id": "f1599:m129"} {"signature": "def init_range(start,end,step):", "body": "return(list(range(start,end,step)))", "docstring": "init_range(1,20,2)", "id": "f1599:m130"} {"signature": "def intlize(l):", "body": "return(list(map(lambda ele:int(ele),l)))", "docstring": "from elist.elist import *\nl = [\"1\",\"3\",\"4\",\"5\"]\nintlize(l)", "id": "f1599:m131"} {"signature": "def strlize(l):", "body": "return(list(map(lambda ele:str(ele),l)))", "docstring": "from elist.elist import *\nl = [1,3,4,5]\nstrlize(l)", "id": "f1599:m132"} {"signature": "def array_from(obj,func,*args):", "body": "if(func):l = list(obj)rslt = list(map(lambda ele:func(ele,*args),l))return(rslt)else:return(list(obj))", "docstring": "from elist.elist import *\narray_from(\"abcd\",None)\n#####\ndef map_func(ele,x,y):\n return(int(ele)+x+y)\n\narray_from(\"1234\",map_func,1000,100)\n\ndef map_func(ele):\n return(int(ele)*2)\n\narray_from(\"1234\",map_func)\n\narray_from(\"1234\",None)", "id": "f1599:m133"} {"signature": "def array_of(*eles):", "body": "return(list(eles))", "docstring": "from elist.elist import *\narray_of(1,2,4,5,6)", "id": "f1599:m134"} {"signature": "def deepcopy(ol):", "body": "return(copy.deepcopy(ol))", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nid(ol)\nnew = deepcopy(ol)\nnew\nid(new)", "id": "f1599:m135"} {"signature": "def copy_within(ol,target, start=None, end=None):", "body": "length = ol.__len__()if(start==None):start = else:passif(end==None):end = lengthelse:passtarget = uniform_index(target,length)start = uniform_index(start,length)end = uniform_index(end,length)cplen = end - startcpend = target+cplenif(target+cplen > length):cpend = lengthelse:passshift = start - targetif(shift>=):for i in range(target,cpend):ol[i] = ol[i+shift]else:for i in range(cpend-,target-,-):ol[i] = ol[i+shift]return(ol)", "docstring": "from elist.elist import *\nol = [1, 2, 3, 4, 5]\nid(ol)\nrslt = copyWithin(ol,0,3,4)\nrslt\nid(rslt)\n####\nol = [1, 2, 3, 4, 5]\nid(ol)\nrslt = copyWithin(ol,0,3)\nrslt\nid(rslt)\n####\nol = [1, 2, 3, 4, 5]\nid(ol)\nrslt = copyWithin(ol,-2)\nrslt\nid(rslt)\n####copyWithin is the same as copy_within", "id": "f1599:m136"} {"signature": "def reverse(ol,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"if(mode == \"\"):new = copy.deepcopy(ol)new.reverse()return(new) else:ol.reverse()return(ol)'',''", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nid(ol)\nnew = reverse(ol)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,2,3,4]\nid(ol)\nrslt = reverse(ol,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m137"} {"signature": "def comprise(list1,list2,**kwargs):", "body": "if('' in kwargs):mode = kwargs['']else:mode = \"\"len_1 = list1.__len__()len_2 = list2.__len__()if(len_2>len_1):return(False)else:if(mode==\"\"):if(list2 == list1[:len_2]):return(True)else:return(False)else:end = len_1 - len_2for i in range(,end+):if(list2 == list1[i:(i+len_2)]):return(True)else:passreturn(False)", "docstring": "from elist.elist import *\ncomprise([1,2,3,4,5],[2,3,4],mode=\"loose\")\ncomprise([1,2,3,4,5],[2,3,4])\ncomprise([1,2,3,4,5],[2,3,4],mode=\"strict\")\ncomprise([1,2,3,4,5],[1,2,3,4],mode=\"strict\")\n#not recursive ,only one level\n#please refer to ListTree.search for recursive support", "id": "f1599:m138"} {"signature": "def entries(ol):", "body": "rslt = []length = ol.__len__()for i in range(,length):entry = [i,ol[i]]rslt.append(entry)return(rslt)", "docstring": "from elist.elist import *\nol = ['a','b','c']\nrslt = entries(ol)\nrslt", "id": "f1599:m139"} {"signature": "def includes(ol,value):", "body": "return((value in ol))", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nincludes(ol,3)\nincludes(ol,5)", "id": "f1599:m140"} {"signature": "def toString(ol):", "body": "return(ol.__str__())", "docstring": "from elist.elist import *\nol = [1,2,3,4]\ntoString(ol)", "id": "f1599:m141"} {"signature": "def toSource(ol):", "body": "return(ol.__repr__())", "docstring": "from elist.elist import *\nol = [1,2,3,4]\ntoSource(ol)", "id": "f1599:m142"} {"signature": "def splice(ol,start,deleteCount,*eles,**kwargs):", "body": "if('' in kwargs):mode = kwargs['']else:mode = \"\"length = ol.__len__()new = copy.deepcopy(ol)if(start >= length):eles = list(eles)new.extend(eles)else:start = uniform_index(start,length)end = start + deleteCounttmp = pop_range(new,start,end,mode=\"\")['']new = insert_some(tmp,*eles,index=start,mode=\"\")if(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [\"angel\", \"clown\", \"mandarin\", \"surgeon\"]\nid(ol)\nnew = splice(ol,2,0,\"drum\")\nnew\nid(new)\n####\nol = [\"angel\", \"clown\", \"mandarin\", \"surgeon\"]\nid(ol)\nnew = splice(ol,2,1,\"drum\",mode=\"original\")\nnew\nid(new)\n####\nol = [1,2,3,4,5,6]\nid(ol)\nnew = splice(ol,2,2,77,777)\nnew\nid(new)", "id": "f1599:m143"} {"signature": "def slice(ol,start,end=None,**kwargs):", "body": "if('' in kwargs):mode = kwargs['']else:mode = \"\"length = ol.__len__()new = copy.deepcopy(ol)if(end == None):end = lengthelse:end = uniform_index(end,length)start = uniform_index(start,length)if(mode == \"\"):return(new[start:end])else:ol.clear()ol.extend(new[start:end])return(ol)", "docstring": "from elist.elist import *\nol = [1,2,3,4,5]\nid(ol)\nnew = slice(ol,2,4)\nnew\nid(new)\n####\nid(ol)\nrslt = slice(ol,1,-2,mode=\"original\")\nrslt\nid(rslt)", "id": "f1599:m144"} {"signature": "def join(ol,separator=\"\"):", "body": "if(ol.__len__() == ):return(\"\")else:passcond = (type(ol[])==type(b''))if(cond):rslt = b''else:rslt =\"\"length = ol.__len__()for i in range(,length-):ele = ol[i]if(cond):passelse:ele = str(ele)rslt = rslt + ele + separatorif(cond):rslt = rslt + ol[length - ]else:rslt = rslt + str(ol[length - ])return(rslt)", "docstring": "from elist.elist import *\nol = [1,2,3,4]\njoin(ol,separator=\"-\")", "id": "f1599:m145"} {"signature": "def join2(ol,*sps):", "body": "rslt =\"\"length = ol.__len__()for i in range(,length-):rslt = rslt + str(ol[i]) + sps[i]rslt = rslt + str(ol[length - ])return(rslt)", "docstring": "from elist.elist import *\nol = [1,2,3,4]\njoin2(ol,\"-\",\"+\",\"*\")", "id": "f1599:m146"} {"signature": "def htmljoin(ol,sp,**kwargs):", "body": "if('' in kwargs):outer = kwargs['']else:outer = \"\"if(outer):head = \"\" + outer + \">\"tail = \"\" + outer + \">\"else:head = \"\"tail = \"\"rslt = headlength = ol.__len__()begin = \"\" + sp + \">\"end = \"\" + sp + \">\"for i in range(,length):rslt = rslt + begin + str(ol[i]) + endrslt = rslt + tailreturn(rslt)", "docstring": "ol = [1,2,3,4]\nhtmljoin(ol,\"option\",outer=\"select\")", "id": "f1599:m147"} {"signature": "def uniqualize(l,**kwargs):", "body": "if('' in kwargs):mode = kwargs['']else:mode = ''pt = copy.deepcopy(l)seqs =[]freq = {}for i in range(,pt.__len__()):v = pt[i]if(v in freq):freq[v] = freq[v] + else:freq[v] = seqs.append(i)npt = select_seqs(pt,seqs)pt = nptif(mode == ''):return(npt)else:l.clear()l.extend(npt)return(l)", "docstring": "from elist.elist import *\nl = [1, 2, 2]\nnew = uniqualize(l)\nnew\nid(l)\nid(new)\n####\nl = [1, 2, 2]\nrslt = uniqualize(l,mode=\"original\")\nrslt\nid(l)\nid(rslt)", "id": "f1599:m148"} {"signature": "def cond_uniqualize(l,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []if('' in kwargs):reserved_mapping = kwargs['']else:reserved_mapping = Noneif('' in kwargs):mode = kwargs['']else:mode = ''desc = cond_value_indexes_mapping(l,cond_func=cond_func,cond_func_args=cond_func_args,with_none=True)keys = list(desc.keys())if(None in keys):keys.remove(None)else:passrmapping = {}for key in keys:rmapping[key] = if(reserved_mapping == None):passelse:for key in reserved_mapping:rmapping[key] = reserved_mapping[key]reserved_indexes = []for key in keys:indexes = desc[key]index = indexes[rmapping[key]]reserved_indexes.append(index)newcopy = copy.deepcopy(l)new = select_seqs(newcopy,reserved_indexes)if(None in desc):for index in desc[None]:new.append(newcopy[index])else:passif(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nl = [('BIGipServer', 'rd100'), ('TS013d8ed5', '00A0'), ('BIGipServer', 'rd200'), ('TS013d8ed5', '00B0'), ('SID', '1'), ('SID', '2')]\n\ndef cond_func(ele,*args):\n cond = ele[0]\n return(cond)\n\nuniqualized = cond_uniqualize(l,cond_func=cond_func)\npobj(uniqualized)\n\nl = [('BIGipServer', 'rd100'), ('TS013d8ed5', '00A0'), ('BIGipServer', 'rd200'), ('TS013d8ed5', '00B0'), ('SID', '1'), ('SID', '2')]\n\nreserved_mapping = {'BIGipServer':0,'TS013d8ed5':1,'SID':1}\nuniqualized = cond_uniqualize(l,cond_func=cond_func,reserved_mapping=reserved_mapping)\npobj(uniqualized)", "id": "f1599:m149"} {"signature": "def interleave(*arrays,**kwargs):", "body": "anum = arrays.__len__()rslt = []length = arrays[].__len__()for j in range(,length):for i in range(,anum):array = arrays[i]rslt.append(array[j])return(rslt)", "docstring": "arr1 = [1,2,3,4]\narr2 = ['a','b','c','d']\narr3 = ['@','#','%','*']\ninterleave(arr1,arr2,arr3)", "id": "f1599:m150"} {"signature": "def deinterleave(ol,gnum):", "body": "def test_func(ele,index,interval,which):cond= (index % interval == which)return(cond)rslt = []for i in range(,gnum):arr = cond_select_all2(ol,cond_func = test_func,cond_func_args = [gnum,i])rslt.append(arr)return(rslt)", "docstring": "ol = [1, 2, 3, 4, 5, 6, 7, 8, 9]\ndeinterleave(ol,3)", "id": "f1599:m151"} {"signature": "def loose_in(pl,k):", "body": "cond = some(pl,lambda ele:(k in ele))['']return(cond)", "docstring": "pl = ['bcd','xabcxx','x','y']\nloose_in(pl,'abc')", "id": "f1599:m155"} {"signature": "def select_loose_in(pl,k):", "body": "def cond_func(ele,index,k):if(type(ele) == type([])):cond = loose_in(ele,k)else:cond = (k in ele)return(cond)arr = cond_select_values_all2(pl,cond_func=cond_func, cond_func_args =[k])return(arr)", "docstring": "pl = ['bcd','xabcxx','x','y']\nselect_loose_in(pl,'abc')", "id": "f1599:m156"} {"signature": "def select_strict_in(pl,k):", "body": "def cond_func(ele,index,k):if(type(ele) == type([])):cond = (k in ele)else:cond = (k == ele)return(cond)arr = cond_select_values_all2(pl,cond_func=cond_func, cond_func_args =[k])return(arr)", "docstring": "pl = ['bcd','xabcxx','x','y']\nselect_strict_in(pl,'abc')", "id": "f1599:m157"} {"signature": "def regex_in(pl,regex):", "body": "def cond_func(ele,regex):m = regex.search(ele)if(m == None):return(False)else:return(True)cond = some(pl,cond_func,regex)['']return(cond)", "docstring": "regex = re.compile(\"^[a-z]+$\")\npl = ['b1c3d','xab15cxx','1x','y2']\nregex_in(pl,regex)\n\nregex = re.compile(\"^[0-9a-z]+$\")\npl = ['b1c3d','xab15cxx','1x','y2']\nregex_in(pl,regex)", "id": "f1599:m158"} {"signature": "def select_regex_in(pl,regex):", "body": "def cond_func(ele,index,regex):if(type(ele)==type([])):cond = regex_in(ele,regex)else:m = regex.search(ele)if(m == None):cond = Falseelse:cond = Truereturn(cond)arr = cond_select_values_all2(pl,cond_func=cond_func, cond_func_args =[regex])return(arr)", "docstring": "regex = re.compile(\"^x.*x$\")\npl = ['bcd','xabcxx','xx','y']\nselect_regex_in(pl,'abc')", "id": "f1599:m159"} {"signature": "def fill(ol,value,start=None, end=None,**kwargs):", "body": "if('' in kwargs):mode = kwargs['']else:mode = \"\"length = ol.__len__()if(start==None):start = else:passif(end==None):end = lengthelse:passstart = uniform_index(start,length)end = uniform_index(end,length)new = copy.deepcopy(ol)for i in range(start,end):new[i] = valueif(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1, 2, 3,4,5]\nid(ol)\nrslt = fill(ol,4)\nrslt\nid(rslt)\n####\nol = [1, 2, 3,4,5]\nid(ol)\nrslt = fill(ol,4,1)\nrslt\nid(rslt)\n####\nol = [1, 2, 3,4,5]\nid(ol)\nrslt = fill(ol,6,1,3,mode=\"original\")\nrslt\nid(rslt)", "id": "f1599:m161"} {"signature": "def reduce_left(ol,callback,initialValue):", "body": "length = ol.__len__()accumulator = initialValuefor i in range(,length):accumulator = callback(accumulator,ol[i])return(accumulator)", "docstring": "from elist.elist import *\ndef callback(accumulator,currentValue):\n accumulator.append(currentValue[0])\n accumulator.append(currentValue[1])\n return(accumulator)\n\nol = [(1,2),(\"a\",\"b\"),(\"x\",\"y\")]\nreduce_left(ol,callback,[])\n#array_reduce, reduceLeft ,reduce_left are the same", "id": "f1599:m175"} {"signature": "def reduce_right(ol,callback,initialValue):", "body": "length = ol.__len__()accumulator = initialValuefor i in range(length-,-,-):accumulator = callback(accumulator,ol[i])return(accumulator)", "docstring": "from elist.elist import *\ndef callback(accumulator,currentValue):\n accumulator.append(currentValue[0])\n accumulator.append(currentValue[1])\n return(accumulator)\n\nol = [(1,2),(\"a\",\"b\"),(\"x\",\"y\")]\nreduce_right(ol,callback,[])\n#reduceRight,reduce_right are the same", "id": "f1599:m176"} {"signature": "def diff_indexes(l1,l2):", "body": "rslt = []for i in range(,l1.__len__()):if(l1[i]!=l2[i]):rslt.append(i)return(rslt)", "docstring": "from elist.elist import *\nl1 = [1,2,3,5]\nl2 = [0,2,3,4]\ndiff_indexes(l1,l2)", "id": "f1599:m177"} {"signature": "def diff_values(l1,l2):", "body": "rslt = []for i in range(,l1.__len__()):if(l1[i]!=l2[i]):rslt.append(l1[i])return(rslt)", "docstring": "from elist.elist import *\nl1 = [1,2,3,5]\nl2 = [0,2,3,4]\ndiff_values(l1,l2)", "id": "f1599:m178"} {"signature": "def same_indexes(l1,l2):", "body": "rslt = []for i in range(,l1.__len__()):if(l1[i]==l2[i]):rslt.append(i)return(rslt)", "docstring": "from elist.elist import *\nl1 = [1,2,3,5]\nl2 = [0,2,3,4]\nsame_indexes(l1,l2)", "id": "f1599:m179"} {"signature": "def same_values(l1,l2):", "body": "rslt = []for i in range(,l1.__len__()):if(l1[i]==l2[i]):rslt.append(l1[i])return(rslt)", "docstring": "from elist.elist import *\nl1 = [1,2,3,5]\nl2 = [0,2,3,4]\nsame_values(l1,l2)", "id": "f1599:m180"} {"signature": "def value_indexes_mapping(l):", "body": "pt = copy.deepcopy(l)desc = {}vset = set({})for v in pt:vset.add(v)for v in vset:desc[v] = []for i in range(,l.__len__()):desc[l[i]].append(i)return(desc)", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\nl = ['a','b','b','a','c','b']\ndesc = value_indexes_mapping(l)\npobj(desc)", "id": "f1599:m181"} {"signature": "def cond_value_indexes_mapping(l,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []if('' in kwargs):with_none = kwargs['']else:with_none = Falsedesc = {}for i in range(,l.__len__()):ele = l[i]cond = cond_func(ele,*cond_func_args)if((cond == None)&(not(with_none))):passelse:if(cond in desc):desc[cond].append(i)else:desc[cond] = [i]return(desc)", "docstring": "from elist.elist import *\nl = [('BIGipServer', 'rd19'), ('TS013d8ed5', '0105b6b0'), ('BIGipServer', 'rd19'), ('TS013d8ed5', '0105b6b0'), ('SID', '1'), ('SID', '2')]\n\ndef cond_func(ele,*args):\n cond = ele[0]\n return(cond)\n\ndesc = cond_value_indexes_mapping(l,cond_func=cond_func)\npobj(desc)", "id": "f1599:m182"} {"signature": "def getitem_via_pathlist(ol,pathlist):", "body": "this = olfor i in range(,pathlist.__len__()):key = pathlist[i]this = this.__getitem__(key)return(this)", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\ngetitem_via_pathlist(y,[1,1])", "id": "f1599:m183"} {"signature": "def getitem_via_pathlist2(pathlist,ol):", "body": "this = olfor i in range(,pathlist.__len__()):key = pathlist[i]this = this.__getitem__(key)return(this)", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\ngetitem_via_pathlist2([1,1],y)", "id": "f1599:m184"} {"signature": "def getitem_via_sibseqs(ol,*sibseqs):", "body": "pathlist = list(sibseqs)this = olfor i in range(,pathlist.__len__()):key = pathlist[i]this = this.__getitem__(key)return(this)", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\ngetitem_via_sibseqs(y,1,1)", "id": "f1599:m185"} {"signature": "def setitem_via_pathlist(ol,value,pathlist):", "body": "this = olfor i in range(,pathlist.__len__()-):key = pathlist[i]this = this.__getitem__(key)this.__setitem__(pathlist[-],value)return(ol)", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\nsetitem_via_pathlist(y,\"500\",[1,1])\ny", "id": "f1599:m188"} {"signature": "def setitem_via_sibseqs(ol,value,*sibseqs):", "body": "this = olpathlist = list(sibseqs)for i in range(,pathlist.__len__()-):key = pathlist[i]this = this.__getitem__(key)this.__setitem__(pathlist[-],value)return(ol)", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\nsetitem_via_sibseqs(y,\"500\",1,1)\ny", "id": "f1599:m189"} {"signature": "def delitem_via_pathlist(ol,pathlist):", "body": "this = olfor i in range(,pathlist.__len__()-):key = pathlist[i]this = this.__getitem__(key)this.__delitem__(pathlist[-])return(ol)", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\ndelitem_via_pathlist(y,[1,1])\ny", "id": "f1599:m190"} {"signature": "def delitem_via_sibseqs(ol,*sibseqs):", "body": "pathlist = list(sibseqs)this = olfor i in range(,pathlist.__len__()-):key = pathlist[i]this = this.__getitem__(key)this.__delitem__(pathlist[-])return(ol)", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\ndelitem_via_sibseqs(y,1,1)\ny", "id": "f1599:m191"} {"signature": "def replace_seqs(ol,value,indexes,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"indexes = list(indexes)new = []length = ol.__len__()cpol = copy.deepcopy(ol)for i in range(,length):if(i in indexes):new.append(value)else:new.append(cpol[i])if(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = replace_seqs(ol,'AAA',[1,3,7])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = replace_seqs(ol,'AAA',[1,3,7],mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)\n#replace_indexes = replace_seqs", "id": "f1599:m192"} {"signature": "def replace_some(ol,value,*indexes,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"indexes = list(indexes)return(replace_seqs(ol,value,indexes,mode=mode))", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = replace_some(ol,'AAA',1,3,7)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = replace_some(ol,'AAA',1,3,7,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m193"} {"signature": "def replace_value_seqs(ol,src_value,dst_value,seqs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"indexes = indexes_seqs(ol,src_value,seqs)return(replace_indexes(ol,dst_value,indexes,mode=mode))", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = replace_value_seqs(ol,'a','AAA',[0,1])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = replace_value_seqs(ol,'a','AAA',[0,1],mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m194"} {"signature": "def replace_value_some(ol,src_value,dst_value,*seqs,**kwargs):", "body": "if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"return(replace_value_seqs(ol,src_value,dst_value,list(seqs),mode=mode))", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = replace_value_some(ol,'a','AAA',0,1)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = replace_value_some(ol,'a','AAA',0,1,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m195"} {"signature": "def cond_replace_value_all(ol,dst_value,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"new = copy.deepcopy(ol)selected = find_all(new,cond_func,*cond_func_args)selected_indexes = array_map(selected,lambda ele:ele[''])new = replace_seqs(new,dst_value,selected_indexes)if(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\ndef afterCH(ele,ch):\n cond = (ord(str(ele)) > ord(ch))\n return(cond)\n\nnew = cond_replace_value_all(ol,\"REPLACED\",cond_func=afterCH,cond_func_args=['B'])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\nrslt = cond_replace_value_all(ol,\"REPLACED\",cond_func=afterCH,cond_func_args=['B'],mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m196"} {"signature": "def cond_replace_value_seqs(ol,dst_value,seqs,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"new = copy.deepcopy(ol)selected = find_all(new,cond_func,*cond_func_args)selected_indexes = array_map(selected,lambda ele:ele[''])selected_indexes = select_seqs(selected_indexes,seqs)new = replace_seqs(new,dst_value,selected_indexes)if(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\ndef afterCH(ele,ch):\n cond = (ord(str(ele)) > ord(ch))\n return(cond)\n\nnew = cond_replace_value_seqs(ol,\"REPLACED\",[0,2],cond_func=afterCH,cond_func_args=['B'])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\nrslt = cond_replace_value_seqs(ol,\"REPLACED\",[0,2],cond_func=afterCH,cond_func_args=['B'],mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m197"} {"signature": "def cond_replace_value_some(ol,dst_value,*some,**kwargs):", "body": "cond_func = kwargs['']if('' in kwargs):cond_func_args = kwargs['']else:cond_func_args = []if('' in kwargs):mode = kwargs[\"\"]else:mode = \"\"seqs = list(some)new = copy.deepcopy(ol)selected = find_all(new,cond_func,*cond_func_args)selected_indexes = array_map(selected,lambda ele:ele[''])selected_indexes = select_seqs(selected_indexes,seqs)new = replace_seqs(new,dst_value,selected_indexes)if(mode == \"\"):return(new)else:ol.clear()ol.extend(new)return(ol)", "docstring": "from elist.elist import *\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\ndef afterCH(ele,ch):\n cond = (ord(str(ele)) > ord(ch))\n return(cond)\n\nnew = cond_replace_value_some(ol,\"REPLACED\",0,2,cond_func=afterCH,cond_func_args=['B'])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\nrslt = cond_replace_value_some(ol,\"REPLACED\",0,2,cond_func=afterCH,cond_func_args=['B'],mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m198"} {"signature": "def rangize(break_points,length):", "body": "bps = array_map(break_points,uniform_index,length)bps.sort()bps = prepend(bps,)bps = append(bps,length)bps = uniqualize(bps)bpslen = bps.__len__()secs=[(,bps[])]for i in range(,bpslen-):r = (bps[i],bps[i+])secs.append(r)secs.append((bps[bpslen-],length))if(secs[][] == secs[][]):secs.pop()else:passif(secs[-][] == secs[-][]):secs.pop(-)else:passreturn(secs)", "docstring": "break_points = [1,3,9,12,-2]\nlength = 15\nsecs = rangize(break_points,length)\nforEach(secs,print)", "id": "f1599:m199"} {"signature": "def rangize_supplement(spans,lngth):", "body": "rslt = []si = ei = spans[][]if(si == ei):passelse:rslt.append((si,ei))prev_ei = spans[][]for i in range(,spans.__len__()):si = prev_eiei = spans[i][]rslt.append((si,ei))prev_ei = spans[i][]if(prev_ei < lngth):rslt.append((prev_ei,lngth))else:rslt.append((prev_ei,lngth+))return(rslt)", "docstring": "spans = [(0, 3), (4, 7), (8, 10), (11, 12), (13, 16), (17, 20)]\nrangize_supplement(spans,24)", "id": "f1599:m201"} {"signature": "def rangize_supp(spans,lngth):", "body": "rslt = []si = ei = spans[][]if(si == ei):passelse:rslt.append((si,ei))prev_ei = spans[][]for i in range(,spans.__len__()):si = prev_eiei = spans[i][]rslt.append((si,ei))prev_ei = spans[i][]if(prev_ei < lngth):rslt.append((prev_ei,lngth))else:passreturn(rslt)", "docstring": "spans = [(0, 3), (4, 7), (8, 10), (11, 12), (13, 16), (17, 20)]\nrangize_supplement(spans,24)", "id": "f1599:m202"} {"signature": "def range_compress(ol):", "body": "T = (type(ol[]) == type())if(T):l = olelse:l = array_map(ol,ord)length = l.__len__()secs = []si = ei = prev = l[]for i in range(,length):curr = l[i]cond = (curr == (prev+))if(cond):ei = iprev = currelse:if(T):sec = (l[si],l[ei])else:sec = (ol[si],ol[ei])if(si == ei):sec = sec[]else:passsecs.append(sec)si = iei = i prev = currif(T):sec = (l[si],l[ei])else:sec = (ol[si],ol[ei])if(si == ei):sec = sec[]else:passsecs.append(sec)return(secs)", "docstring": "#only support sorted-ints or sorted-ascii\nl = [1,5,6,7,8,13,14,18,30,31,32,33,34]\nrange_compress(l)\nl = [1,5,6,7,8,13,14,18,30,31,32,33,34,40]\nrange_compress(l)\nl = ['a','b','c','d','j','k','l','m','n','u','y','z']\nrange_compress(l)", "id": "f1599:m203"} {"signature": "def range_decompress(cl):", "body": "def cond_func(ele):length = ele.__len__()cond = (length == )if(cond):return(ord(ele))else:x = ord(ele[])y = ord(ele[])return((x,y))if(type(cl[])==type()):T = Trueelif(cl[].__len__() == ):T = (type(cl[]) == type())else:T = (type(cl[][]) == type())if(T):l = cl else:l = array_map(cl,cond_func)rslt = []for i in range(,l.__len__()):ele = l[i]if(type(ele) == type()):arr = [ele]elif(ele.__len__() == ):arr = [ele]else:sv = ele[]ev = ele[]arr = init_range(sv,ev+,)if(T):passelse:arr = array_map(arr,chr)rslt.extend(arr)return(rslt)", "docstring": "#only support sorted-ints or sorted-ascii\ncl = [1, (5, 8), (13, 14), 18, (30, 34)]\nrange_decompress(cl)\ncl = [1, (5, 8), (13, 14), 18, (30, 34), 40]\nrange_decompress(cl)\ncl = [('a', 'd'), ('j', 'n'), 'u', ('y', 'z')]\nrange_decompress(cl)", "id": "f1599:m204"} {"signature": "def is_list(obj):", "body": "if(type(obj)==type([])):return(True)else:return(False)", "docstring": "from elist.elist import *\nis_list([1,2,3])\nis_list(200)", "id": "f1599:m205"} {"signature": "def broken_seqs(ol,break_points):", "body": "bps = list(break_points)length = ol.__len__()rgs = rangize(bps,length)rslt = []for i in range(,rgs.__len__()):si,ei = rgs[i]sec = ol[si:ei]rslt.append(sec)return(rslt)", "docstring": "ol = initRange(0,20,1)\nol\nbreak_points = [1,6,14,9]\nsecs = broken_seqs(ol,break_points)\nforEach(secs,print)", "id": "f1599:m206"} {"signature": "def broken_some(ol,*break_points):", "body": "bps = list(break_points)return(broken_seqs(ol,bps))", "docstring": "ol = initRange(0,20,1)\nol\nsecs = broken_some(ol,1,6,14,9)\nforEach(secs,print)", "id": "f1599:m207"} {"signature": "def brkl2kvlist(arr,interval,sub_pos=,**kwargs):", "body": "lngth = arr.__len__()brkseqs1 = init_range(,lngth,interval)brkseqs2 = init_range(sub_pos,lngth,interval)brkseqs = interleave(brkseqs1,brkseqs2)l = broken_seqs(arr,brkseqs)kl = select_evens(l)vl = select_odds(l)if(\"\" in kwargs):single_key = kwargs['']else:single_key = Trueif(sub_pos == ):if(single_key):kl = mapv(kl,lambda ele:ele[])else:passelse:passreturn((kl,vl))", "docstring": "arr = [\"color1\",\"r1\",\"g1\",\"b1\",\"a1\",\"color2\",\"r2\",\"g2\",\"b2\",\"a2\"]\nbrkl2kvlist(arr,5)\n(['color1', 'color2'], [['r1', 'g1', 'b1', 'a1'], ['r2', 'g2', 'b2', 'a2']])\nbrkl2kvlist(arr,5,2)\n([['color1', 'r1'], ['color2', 'r2']], [['g1', 'b1', 'a1'], ['g2', 'b2', 'a2']])", "id": "f1599:m208"} {"signature": "def divide(ol,interval):", "body": "length = ol.__len__()seqs = initRange(,length,interval)rslt = broken_seqs(ol,seqs)return(rslt)", "docstring": "ol = elel.initRange(0,20,1)\ninterval = 3\nrslt = elel.divide(ol,interval)\nrslt\nrslt = elel.divide(ol,4)\nrslt", "id": "f1599:m209"} {"signature": "def split(ol,value,**kwargs):", "body": "if('' in kwargs):whiches = kwargs[''] else:whiches = Noneindexes = indexes_all(ol,value)if(whiches == None):passelse:indexes = select_indexes(indexes,whiches)rslt = []rslt.append(ol[:indexes[]])si = indexes[]+for i in range(,indexes.__len__()):ei = indexes[i]ele = ol[si:ei]rslt.append(ele)si = ei + ele = ol[si:ol.__len__()]rslt.append(ele)return(rslt)", "docstring": "ol = ['a',1,'a',2,'a',3,'a',4,'a']\nsplit(ol,'a')\nsplit(ol,'a',whiches=[0])\nsplit(ol,'a',whiches=[1])\nsplit(ol,'a',whiches=[2])\nsplit(ol,'a',whiches=[0,2])\nol = [1,'a',2,'a',3,'a',4]\nsplit(ol,'a')\nsplit('x=bcdsef=g','=',whiches=[0])", "id": "f1599:m214"} {"signature": "def where(ol,value):", "body": "si = Noneei = Nonefor i in range(,ol.__len__()):ele = ol[i]if(value >ele):si = i elif(value == ele):return((i,i))else:ei = i return((si,ei))return((si,ei))", "docstring": "ol = [0, 4, 6, 8, 10, 14]\nwhere(ol,-1)\nwhere(ol,1)\nwhere(ol,2)\nwhere(ol,3)\nwhere(ol,4)\nwhere(ol,9)\nwhere(ol,14)\nwhere(ol,17)", "id": "f1599:m216"} {"signature": "def value_interval(ol,value):", "body": "si,ei = where(ol,value)if(si == None):sv = Noneelse:sv = ol[si]if(ei == None):ev = Noneelse:ev = ol[ei]return((sv,ev))", "docstring": "ol = [0, 4, 6, 8, 10, 14]\nvalue_interval(ol,-1)\nvalue_interval(ol,1)\nvalue_interval(ol,2)\nvalue_interval(ol,3)\nvalue_interval(ol,4)\nvalue_interval(ol,9)\nvalue_interval(ol,14)\nvalue_interval(ol,17)", "id": "f1599:m217"} {"signature": "def upper_bound(ol,value):", "body": "return(value_interval(ol,value)[])", "docstring": "ol = [0, 4, 6, 8, 10, 14]\nupper_bound(ol,-1)\nupper_bound(ol,1)\nupper_bound(ol,2)\nupper_bound(ol,3)\nupper_bound(ol,4)\nupper_bound(ol,9)\nupper_bound(ol,14)\nupper_bound(ol,17)", "id": "f1599:m218"} {"signature": "def lower_bound(ol,value):", "body": "return(value_interval(ol,value)[])", "docstring": "ol = [0, 4, 6, 8, 10, 14]\nlower_bound(ol,-1)\nlower_bound(ol,1)\nlower_bound(ol,2)\nlower_bound(ol,3)\nlower_bound(ol,4)\nlower_bound(ol,9)\nlower_bound(ol,14)\nlower_bound(ol,17)", "id": "f1599:m219"} {"signature": "def rand_sub(arr,*args,**kwargs):", "body": "arr = copy.deepcopy(arr)lngth = arr.__len__()args = list(args)if(args.__len__() == ):n = random.randrange(,lngth)else:n = args[]if(n>lngth):n = lngthelse:passindexes = rand_some_indexes(,lngth,n,**kwargs)narr = select_seqs_keep_order(arr,indexes)return(narr)", "docstring": "arr = ['scheme', 'username', 'password', 'hostname', 'port', 'path', 'params', 'query', 'fragment']\nrand_sub(arr,3)\nrand_sub(arr,3)\nrand_sub(arr,3)\nrand_sub(arr)\nrand_sub(arr)\nrand_sub(arr)", "id": "f1599:m221"} {"signature": "def is_leaf(obj):", "body": "if(is_list(obj)):length = obj.__len__()if(length == ):return(True)else:return(False)else:return(True)", "docstring": "the below is for nested-list\nany type is not list will be treated as a leaf\nempty list will be treated as a leaf\nfrom elist.elist import *\nis_leaf(1)\nis_leaf([1,2,3])\nis_leaf([])", "id": "f1599:m226"} {"signature": "def new_ele_description(**kwargs):", "body": "desc = {'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None,'':None}for key in kwargs:desc[key.lower()] = kwargs[key]return(desc)", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\nroot_desc = new_ele_description(leaf=False,depth=0,breadth_path=[],path=[],parent_path=[],parent_breadth_path=[])\npobj(root_desc)\n#None means not handled", "id": "f1599:m230"} {"signature": "def root_list(*args):", "body": "return(list(args))", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\nroot_list([1],2,[1,2,3])", "id": "f1599:m231"} {"signature": "def init_desc_matrix(l):", "body": "leaf = is_leaf(l)root_desc = new_ele_description(leaf=leaf,depth=,breadth_path=[],path=[],parent_path=[],parent_breadth_path=[])if(leaf):root_desc[''] = else:passdesc_matrix = [[root_desc]]return(desc_matrix)", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\nl = [1,[4],2,[3,[5,6]]]\ndesc_matrix = init_desc_matrix(l)\npobj(desc_matrix)", "id": "f1599:m232"} {"signature": "def reset_parent_desc_template(desc):", "body": "tem = new_ele_description()tem[''] = desc['']tem[''] = desc['']return(tem)", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\npobj(desc)\ntem = reset_parent_desc_template(desc)\npobj(tem)\n#only inherit path and breadth_path", "id": "f1599:m233"} {"signature": "def _init_unhandled(l,inited_matrix):", "body": "root_desc = inited_matrix[][]unhandled = {'':[],'':[]}length = l.__len__()root_desc[''] = lengthroot_desc[''] = []root_desc[''] = [] if(length == ):passelse:inited_matrix.append([])level = inited_matrix[]for i in range(,length):child = l[i]desc = copy.deepcopy(root_desc)desc = reset_parent_desc_template(desc)desc[''] = desc[''] = idesc[''] = copy.deepcopy(desc[''])desc[''].append(i)desc[''] = idesc[''] = copy.deepcopy(desc[''])desc[''].append(i)if(i==):passelse:desc[''] = [i-]if(i == (length - )):passelse:desc[''] = [i+]if(is_leaf(child)):desc[''] = Truedesc[''] = root_desc[''].append(copy.deepcopy(desc['']))else:desc[''] = Falseroot_desc[''].append(copy.deepcopy(desc['']))unhandled[''].append(child)unhandled[''].append(desc)level.append(desc)return(unhandled)", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\nl = [1,[4],2,[3,[5,6]]]\ndesc_matrix = init_desc_matrix(l)\nunhandled = _init_unhandled(l,desc_matrix)\nunhandled_data = unhandled['data']\nunhandled_desc = unhandled['desc']\nunhandled_data[0]\nunhandled_desc[0]\nunhandled_data[1]\nunhandled_desc[1]", "id": "f1599:m234"} {"signature": "def update_desc_lsib_path(desc):", "body": "if(desc['']>):lsib_path = copy.deepcopy(desc[''])lsib_path[-] = desc['']-desc[''] = lsib_pathelse:passreturn(desc)", "docstring": "leftSibling\npreviousSibling\nleftSib\nprevSib\nlsib\npsib\n\nhave the same parent,and on the left", "id": "f1599:m235"} {"signature": "def update_desc_rsib_path(desc,sibs_len):", "body": "if(desc['']<(sibs_len-)):rsib_path = copy.deepcopy(desc[''])rsib_path[-] = desc['']+desc[''] = rsib_pathelse:passreturn(desc)", "docstring": "rightSibling\nnextSibling\nrightSib\nnextSib\nrsib\nnsib\n\nhave the same parent,and on the right", "id": "f1599:m236"} {"signature": "def update_desc_lcin_path(desc,pdesc_level):", "body": "parent_breadth = desc[''][-]if(desc['']==):if(parent_breadth==):passelse:parent_lsib_breadth = parent_breadth - plsib_desc = pdesc_level[parent_lsib_breadth]if(plsib_desc['']):passelse:lcin_path = copy.deepcopy(plsib_desc[''])lcin_path.append(plsib_desc[''] - )desc[''] = lcin_pathelse:passreturn(desc)", "docstring": "leftCousin\npreviousCousin\nleftCin\nprevCin\nlcin\npcin\n\nparents are neighbors,and on the left", "id": "f1599:m237"} {"signature": "def update_desc_rcin_path(desc,sibs_len,pdesc_level):", "body": "psibs_len = pdesc_level.__len__()parent_breadth = desc[''][-]if(desc['']==(sibs_len - )):if(parent_breadth==(psibs_len -)):passelse:parent_rsib_breadth = parent_breadth + prsib_desc = pdesc_level[parent_rsib_breadth]if(prsib_desc['']):passelse:rcin_path = copy.deepcopy(prsib_desc[''])rcin_path.append()desc[''] = rcin_pathelse:passreturn(desc)", "docstring": "rightCousin\nnextCousin\nrightCin\nnextCin\nrcin\nncin\n\nparents are neighbors,and on the right", "id": "f1599:m238"} {"signature": "def scan(l,**kwargs):", "body": "if('' in kwargs):itermode = Trueelse:itermode = Falsedesc_matrix = init_desc_matrix(l)if(desc_matrix[][][''] == True):return(desc_matrix)else:passlcache=LevelCache(datas=l,descs=desc_matrix[][])scache=StateCache(desc_matrix)pcache = init_pcache_handler_inline(kwargs)while(lcache.data.__len__() > ):scache.update()for unhandled_seq in range(,lcache.data.__len__()):pcache.update_pdesc(lcache,unhandled_seq)for sib_seq in range(,pcache.sibs_len):pcache.update_desc(lcache,scache,sib_seq)lcache.update()return(desc_matrix)", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\nl = [1,[4],2,[3,[5,6]]]\ndesc = description(l)\nl = [1,2,[4],[3,[5,6]]]\ndesc = description(l)", "id": "f1599:m239"} {"signature": "def fullfill_descendants_info(desc_matrix):", "body": "pathloc_mapping = {}locpath_mapping = {}def leaf_handler(desc,pdesc):desc[''] = []desc[''] = []desc[''] = []desc[''] = []desc[''] = if(pdesc['']):pdesc[''] = pdesc[''] + else:pdesc[''] = def non_leaf_handler(desc,pdesc):pdesc[''].extend(copy.deepcopy(desc['']))pdesc[''].extend(copy.deepcopy(desc['']))if(pdesc['']):pdesc[''] = pdesc[''] + desc['']else:pdesc[''] = desc['']def fill_path_mapping(desc):pmk = tuple(desc[''])pmv = tuple(DescMatrix.loc(desc))pathloc_mapping[pmk] = pmvlocpath_mapping[pmv] = pmkdm = DescMatrix(desc_matrix)depth = desc_matrix.__len__()desc_level = desc_matrix[depth - ]length = desc_level.__len__()for j in range(length - ,-,-):desc = desc_level[j]fill_path_mapping(desc)pdesc = dm.pdesc(desc)leaf_handler(desc,pdesc)for i in range(depth-,,-):desc_level = desc_matrix[i]length = desc_level.__len__()for j in range(length-,-,-):desc = desc_level[j]fill_path_mapping(desc)pdesc = dm.pdesc(desc)if(desc['']):leaf_handler(desc,pdesc)else:non_leaf_handler(desc,pdesc)desc_matrix[][][''] = (,desc_matrix[][][''])for i in range(,depth-):pdesc_level = desc_matrix[i]length = pdesc_level.__len__()for j in range(,length):pdesc = pdesc_level[j]si = pdesc[''][]for i in range(,pdesc['']):spl = append(pdesc[''],i,mode='')pk = tuple(spl)locx,locy = pathloc_mapping[pk]son = desc_matrix[locx][locy]ei = si + son['']son[''] = (si,ei)si = eireturn(desc_matrix,pathloc_mapping,locpath_mapping)", "docstring": "flat_offset", "id": "f1599:m240"} {"signature": "def pathlist_to_getStr(path_list):", "body": "t1 = path_list.__repr__()t1 = t1.lstrip('')t1 = t1.rstrip('')t2 = t1.split(\"\")s = ''for i in range(,t2.__len__()):s = ''.join((s,'',t2[i],''))return(s)", "docstring": ">>> pathlist_to_getStr([1, '1', 2])\n \"[1]['1'][2]\"\n>>>", "id": "f1599:m241"} {"signature": "def getStr_to_pathlist(gs):", "body": "def numize(w):try:int(w)except:try:float(w)except:return(w)else:return(float(w))else:return(int(w))def strip_quote(w):if(type(w) == type('')):if(w[]==w[-]):if((w[]==\"\") |(w[]=='')):return(w[:-])else:return(w)else:return(w)else:return(w)gs = gs[:-]pl = gs.split(\"\")pl = array_map(pl,numize)pl = array_map(pl,strip_quote)return(pl)", "docstring": "gs = \"[1]['1'][2]\"\ngetStr_to_pathlist(gs)\ngs = \"['u']['u1']\"\ngetStr_to_pathlist(gs)", "id": "f1599:m242"} {"signature": "def get_block_op_pairs(pairs_str):", "body": "pairs_str_len = pairs_str.__len__()pairs_len = pairs_str_len // pairs_dict = {}for i in range(,pairs_len +):pairs_dict[i] = pairs_str[i*-],pairs_str[i*-]return(pairs_dict)", "docstring": "# >>> get_block_op_pairs(\"{}[]\") \n# {1: ('{', '}'), 2: ('[', ']')}\n# >>> get_block_op_pairs(\"{}[]()\")\n# {1: ('{', '}'), 2: ('[', ']'), 3: ('(', ')')}\n# >>> get_block_op_pairs(\"{}[]()<>\")\n# {1: ('{', '}'), 2: ('[', ']'), 3: ('(', ')'), 4: ('<', '>')}", "id": "f1599:m243"} {"signature": "def is_lop(ch,block_op_pairs_dict=get_block_op_pairs('')):", "body": "for i in range(,block_op_pairs_dict.__len__()+):if(ch == block_op_pairs_dict[i][]):return(True)else:passreturn(False)", "docstring": "# is_lop('{',block_op_pairs_dict)\n# is_lop('[',block_op_pairs_dict)\n# is_lop('}',block_op_pairs_dict)\n# is_lop(']',block_op_pairs_dict)\n# is_lop('a',block_op_pairs_dict)", "id": "f1599:m244"} {"signature": "def is_rop(ch,block_op_pairs_dict=get_block_op_pairs('')):", "body": "for i in range(,block_op_pairs_dict.__len__()+):if(ch == block_op_pairs_dict[i][]):return(True)else:passreturn(False)", "docstring": "# is_rop('{',block_op_pairs_dict)\n# is_rop('[',block_op_pairs_dict)\n# is_rop('}',block_op_pairs_dict)\n# is_rop(']',block_op_pairs_dict)\n# is_rop('a',block_op_pairs_dict)", "id": "f1599:m245"} {"signature": "def get_next_char_level_in_j_str(curr_lv,curr_seq,j_str,block_op_pairs_dict=get_block_op_pairs(\"\")):", "body": "curr_ch = j_str[curr_seq]next_ch = j_str[curr_seq + ]cond = for i in range(,block_op_pairs_dict.__len__()+):if(curr_ch == block_op_pairs_dict[i][]):if(next_ch == block_op_pairs_dict[i][]):next_lv = curr_lv else:next_lv = curr_lv + cond = breakelif(curr_ch == block_op_pairs_dict[i][]):if(is_rop(next_ch,block_op_pairs_dict)):next_lv = curr_lv - else:next_lv = curr_lvcond = breakelse:passif(cond == ):passelif(is_rop(next_ch,block_op_pairs_dict)):next_lv = curr_lv - else: next_lv = curr_lvcurr_lv = next_lvcurr_seq = curr_seq + return(curr_lv,curr_lv,curr_seq)", "docstring": "the first-char is level-1\n when current is non-op, next-char-level = curr-level\n when current is lop, non-paired-rop-next-char-level = lop-level+1;\n when current is lop, paired-rop-next-char-level = lop-level\n when current is rop, next-char-level = rop-level - 1\n # {\"key_4_UF0aJJ6v\": \"value_1\", \"key_2_Hd0t\": [\"value_16\", \"value_8\", \"value_8\", \"value_15\", \"value_14\", \"value_19\", {......\n # 122222222222222222222222222222222222222222222333333333333333333333333333333333333333333333333333333333333333333333334......\n # {\\n\"key_4_UF0aJJ6v\": \"value_1\", \\n\"key_2_Hd0t\": [\\n\"value_16\", \\n\"value_8\", \\n\"value_8\", \\n\"value_15\", \\n\"value_14\", \\n\"value_19\",...... \n # 1 222222222222222222222222222222 2222222222222222 3333333333333 333333333333 333333333333 3333333333333 3333333333333 3333333333333......", "id": "f1599:m246"} {"signature": "def str_display_width(s):", "body": "s= str(s)width = len = s.__len__()for i in range(,len):sublen = s[i].encode().__len__()sublen = int(sublen/ + /)width = width + sublenreturn(width)", "docstring": "from elist.utils import *\nstr_display_width('a')\nstr_display_width('\u53bb')", "id": "f1599:m248"} {"signature": "def matrix_map(mat,map_func,map_func_args=[]):", "body": "mmat = []for i in range(,mat.__len__()):level = mat[i]mmat.append([])for j in range(,level.__len__()):value = level[j]indexr = iindexc = jele = map_func(value,indexr,indexc,*map_func_args)mmat[i].append(ele)return(mmat)", "docstring": "mat = [\n [1,2,3],\n [4,5,6],\n [7,8,9]\n]\n\ndef map_func(value,indexr,indexc,prefix,suffix):\n msg = prefix + str((indexr,indexc)) + \" : \"+str(value) + suffix\n return(msg)\n\nmmat = matrix_map(mat,map_func,map_func_args=[\"<\",\">\"])\n\nmmat", "id": "f1599:m252"} {"signature": "def get_dfs(l):", "body": "ltree = ListTree(l)dfs = ltree.tree()return(dfs)", "docstring": "l = ['v_7', 'v_3', 'v_1', 'v_4', ['v_4', 'v_2'], 'v_5', 'v_6', 'v_1', 'v_6', 'v_7', 'v_5', ['v_4', ['v_1', 'v_8', 'v_3', 'v_4', 'v_2', 'v_7', [['v_3', 'v_2'], 'v_4', 'v_5', 'v_1', 'v_3', 'v_1', 'v_2', 'v_5', 'v_8', 'v_8', 'v_7'], 'v_5', 'v_8', 'v_7', 'v_1', 'v_5'], 'v_6'], 'v_4', 'v_5', 'v_8', 'v_5']\ndfs = get_dfs(l)", "id": "f1599:m255"} {"signature": "def get_wfsmat(l):", "body": "ltree = ListTree(l)vdescmat = ltree.descwfsmat = matrix_map(vdescmat,lambda v,ix,iy:v[''])wfsmat.pop()return(wfsmat)", "docstring": "l = ['v_7', 'v_3', 'v_1', 'v_4', ['v_4', 'v_2'], 'v_5', 'v_6', 'v_1', 'v_6', 'v_7', 'v_5', ['v_4', ['v_1', 'v_8', 'v_3', 'v_4', 'v_2', 'v_7', [['v_3', 'v_2'], 'v_4', 'v_5', 'v_1', 'v_3', 'v_1', 'v_2', 'v_5', 'v_8', 'v_8', 'v_7'], 'v_5', 'v_8', 'v_7', 'v_1', 'v_5'], 'v_6'], 'v_4', 'v_5', 'v_8', 'v_5']\nget_wfs(l)", "id": "f1599:m256"} {"signature": "def wfs2mat(wfs):", "body": "wfsmat = []depth = level = filter(wfs,lambda ele:ele.__len__()==)while(level.__len__()>):wfsmat.append([])wfsmat[depth] = leveldepth = depth+level = filter(wfs,lambda ele:ele.__len__()==depth+)return(wfsmat)", "docstring": "wfs = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [4, 0], [4, 1], [11, 0], [11, 1], [11, 2], [11, 1, 0], [11, 1, 1], [11, 1, 2], [11, 1, 3], [11, 1, 4], [11, 1, 5], [11, 1, 6], [11, 1, 7], [11, 1, 8], [11, 1, 9], [11, 1, 10], [11, 1, 11], [11, 1, 6, 0], [11, 1, 6, 1], [11, 1, 6, 2], [11, 1, 6, 3], [11, 1, 6, 4], [11, 1, 6, 5], [11, 1, 6, 6], [11, 1, 6, 7], [11, 1, 6, 8], [11, 1, 6, 9], [11, 1, 6, 10], [11, 1, 6, 0, 0], [11, 1, 6, 0, 1]]", "id": "f1599:m258"} {"signature": "def dfs2wfsmat(dfs):", "body": "wfsmat = []depth = level = filter(dfs,lambda ele:ele.__len__()==)while(level.__len__()>):wfsmat.append([])wfsmat[depth] = leveldepth = depth+level = filter(dfs,lambda ele:ele.__len__()==depth+)return(wfsmat)", "docstring": "dfs = [[0], [1], [2], [3], [4], [4, 0], [4, 1], [5], [6], [7], [8], [9], [10], [11], [11, 0], [11, 1], [11, 1, 0], [11, 1, 1], [11, 1, 2], [11, 1, 3], [11, 1, 4], [11, 1, 5], [11, 1, 6], [11, 1, 6, 0], [11, 1, 6, 0, 0], [11, 1, 6, 0, 1], [11, 1, 6, 1], [11, 1, 6, 2], [11, 1, 6, 3], [11, 1, 6, 4], [11, 1, 6, 5], [11, 1, 6, 6], [11, 1, 6, 7], [11, 1, 6, 8], [11, 1, 6, 9], [11, 1, 6, 10], [11, 1, 7], [11, 1, 8], [11, 1, 9], [11, 1, 10], [11, 1, 11], [11, 2], [12], [13], [14], [15]]\n\ndfs2wfs(dfs)", "id": "f1599:m260"} {"signature": "def wfsmat2dfs(wfsmat):", "body": "dfs = mat2wfs(wfsmat)dfs.sort()return(dfs)", "docstring": "wfs = [[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15]], [[4, 0], [4, 1], [11, 0], [11, 1], [11, 2]], [[11, 1, 0], [11, 1, 1], [11, 1, 2], [11, 1, 3], [11, 1, 4], [11, 1, 5], [11, 1, 6], [11, 1, 7], [11, 1, 8], [11, 1, 9], [11, 1, 10], [11, 1, 11]], [[11, 1, 6, 0], [11, 1, 6, 1], [11, 1, 6, 2], [11, 1, 6, 3], [11, 1, 6, 4], [11, 1, 6, 5], [11, 1, 6, 6], [11, 1, 6, 7], [11, 1, 6, 8], [11, 1, 6, 9], [11, 1, 6, 10]], [[11, 1, 6, 0, 0], [11, 1, 6, 0, 1]]]\nwfs2dfs(wfs)", "id": "f1599:m261"} {"signature": "def get_children_handler(self,*args):", "body": "return(self.pdata)", "docstring": "list's children list is self", "id": "f1599:c2:m0"} {"signature": "def parent_handler(self,lcache,i,*args):", "body": "pdesc = lcache.desc[i]pdesc[''] = self.sibs_lenpdesc[''] = []pdesc[''] = []pdesc[''] = []pdesc[''] = []return(pdesc)", "docstring": "_update_pdesc_sons_info", "id": "f1599:c2:m1"} {"signature": "def child_begin_handler(self,scache,*args):", "body": "pdesc = self.pdescdepth = scache.depthsib_seq = self.sib_seqsibs_len = self.sibs_lenpdesc_level = scache.pdesc_leveldesc = copy.deepcopy(pdesc)desc = reset_parent_desc_template(desc)desc[''] = depthdesc[''] = copy.deepcopy(desc[''])desc[''] = sib_seqdesc[''] = copy.deepcopy(desc[''])desc[''].append(sib_seq)update_desc_lsib_path(desc)update_desc_rsib_path(desc,sibs_len)if(depth == ):passelse:update_desc_lcin_path(desc,pdesc_level)update_desc_rcin_path(desc,sibs_len,pdesc_level)return(desc)", "docstring": "_creat_child_desc\nupdate depth,parent_breadth_path,parent_path,sib_seq,path,lsib_path,rsib_path,lcin_path,rcin_path", "id": "f1599:c2:m2"} {"signature": "def leaf_handler(self,*args):", "body": "desc = self.descpdesc = self.pdescdesc[''] = Truedesc[''] = pdesc[''].append(copy.deepcopy(desc['']))pdesc[''].append(copy.deepcopy(desc['']))", "docstring": "leaf child handler", "id": "f1599:c2:m3"} {"signature": "def non_leaf_handler(self,lcache):", "body": "desc = self.descpdesc = self.pdescdesc[''] = Falsepdesc[''].append(copy.deepcopy(desc['']))pdesc[''].append(copy.deepcopy(desc['']))lcache.ndata.append(self.data)lcache.ndesc.append(desc)", "docstring": "nonleaf child handler", "id": "f1599:c2:m4"} {"signature": "def child_end_handler(self,scache):", "body": "desc = self.descdesc_level = scache.desc_levelbreadth = desc_level.__len__()desc[''] = breadthdesc[''].append(breadth)desc_level.append(desc)", "docstring": "_upgrade_breadth_info\nupdate breadth, breadth_path, and add desc to desc_level", "id": "f1599:c2:m5"} {"signature": "def pipe_shell_cmds(shell_CMDs):", "body": "len = shell_CMDs.__len__()p = {}p[] = subprocess.Popen(shlex.split(shell_CMDs[]), stdout=subprocess.PIPE,stderr=subprocess.PIPE)for i in range(,len):p[i] = subprocess.Popen(shlex.split(shell_CMDs[i]), stdin=p[i-].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE)if(len > ):p[len] = subprocess.Popen(shlex.split(shell_CMDs[len]), stdin=p[len-].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE)result = p[len].communicate()if(len > ):for i in range(,len+):returncode = p[i].wait()else:returncode = p[len].wait()return(result)", "docstring": "shell_CMDs = {}\nshell_CMDs[1] = 'netstat -n'\nshell_CMDs[2] = \"awk {'print $6'}\"", "id": "f1601:m0"} {"signature": "def get(self, path):", "body": "return self.request(path)", "docstring": "Performs a HTTP GET request to the Go server\n\n Args:\n path (str): The full path on the Go server to request.\n This includes any query string attributes.\n\n Raises:\n HTTPError: when the HTTP request fails.\n\n Returns:\n file like object: The response from a\n :func:`urllib2.urlopen` call", "id": "f1613:c1:m1"} {"signature": "def post(self, path, **post_args):", "body": "return self.request(path, data=post_args or {})", "docstring": "Performs a HTTP POST request to the Go server\n\n Args:\n path (str): The full path on the Go server to request.\n This includes any query string attributes.\n **post_args: Any POST arguments that should be sent to the server\n\n Raises:\n HTTPError: when the HTTP request fails.\n\n Returns:\n file like object: The response from a\n :func:`urllib2.urlopen` call", "id": "f1613:c1:m2"} {"signature": "def request(self, path, data=None, headers=None, method=None):", "body": "if isinstance(data, str):data = data.encode('')response = urlopen(self._request(path, data=data, headers=headers, method=method))self._set_session_cookie(response)return response", "docstring": "Performs a HTTP request to the Go server\n\n Args:\n path (str): The full path on the Go server to request.\n This includes any query string attributes.\n data (str, dict, bool, optional): If any data is present this\n request will become a POST request.\n headers (dict, optional): Headers to set for this particular\n request\n\n Raises:\n HTTPError: when the HTTP request fails.\n\n Returns:\n file like object: The response from a\n :func:`urllib2.urlopen` call", "id": "f1613:c1:m3"} {"signature": "def add_logged_in_session(self, response=None):", "body": "if not response:response = self.get('')self._set_session_cookie(response)if not self._session_id:raise AuthenticationFailed('')response = self.get('')match = re.search(r'',response.read().decode(''))if match:self._authenticity_token = match.group()else:raise AuthenticationFailed('')", "docstring": "Make the request appear to be coming from a browser\n\n This is to interact with older parts of Go that doesn't have a\n proper API call to be made. What will be done:\n\n 1. If no response passed in a call to `go/api/pipelines.xml` is\n made to get a valid session\n 2. `JSESSIONID` will be populated from this request\n 3. A request to `go/pipelines` will be so the\n `authenticity_token` (CSRF) can be extracted. It will then\n silently be injected into `post_args` on any POST calls that\n doesn't start with `go/api` from this point.\n\n Args:\n response: a :class:`Response` object from a previously successful\n API call. So we won't have to query `go/api/pipelines.xml`\n unnecessarily.\n\n Raises:\n HTTPError: when the HTTP request fails.\n AuthenticationFailed: when failing to get the `session_id`\n or the `authenticity_token`.", "id": "f1613:c1:m4"} {"signature": "def pipeline(self, name):", "body": "return Pipeline(self, name)", "docstring": "Instantiates a :class:`Pipeline` with the given name.\n\n Args:\n name: The name of the pipeline you want to interact with\n\n Returns:\n Pipeline: an instantiated :class:`Pipeline`.", "id": "f1613:c1:m6"} {"signature": "def pipeline_groups(self):", "body": "return PipelineGroups(self)", "docstring": "Returns an instance of :class:`PipelineGroups`\n\n Returns:\n PipelineGroups: an instantiated :class:`PipelineGroups`.", "id": "f1613:c1:m7"} {"signature": "def stage(self, pipeline_name, stage_name, pipeline_counter=None):", "body": "return Stage(self, pipeline_name, stage_name, pipeline_counter=pipeline_counter)", "docstring": "Returns an instance of :class:`Stage`\n\n Args:\n pipeline_name (str): Name of the pipeline the stage belongs to\n stage_name (str): Name of the stage to act on\n pipeline_counter (int): The pipeline instance the stage is for.\n\n Returns:\n Stage: an instantiated :class:`Stage`.", "id": "f1613:c1:m8"} {"signature": "def flatten(d):", "body": "if not isinstance(d, dict):return [[d]]returned = []for key, value in d.items():nested = flatten(value)for nest in nested:current_row = [key]current_row.extend(nest)returned.append(current_row)return returned", "docstring": "Return a dict as a list of lists.\n\n >>> flatten({\"a\": \"b\"})\n [['a', 'b']]\n >>> flatten({\"a\": [1, 2, 3]})\n [['a', [1, 2, 3]]]\n >>> flatten({\"a\": {\"b\": \"c\"}})\n [['a', 'b', 'c']]\n >>> flatten({\"a\": {\"b\": {\"c\": \"e\"}}})\n [['a', 'b', 'c', 'e']]\n >>> flatten({\"a\": {\"b\": \"c\", \"d\": \"e\"}})\n [['a', 'b', 'c'], ['a', 'd', 'e']]\n >>> flatten({\"a\": {\"b\": \"c\", \"d\": \"e\"}, \"b\": {\"c\": \"d\"}})\n [['a', 'b', 'c'], ['a', 'd', 'e'], ['b', 'c', 'd']]", "id": "f1615:m0"} {"signature": "def parametrize(params):", "body": "returned = str(params[])returned += \"\".join(\"\" + str(p) + \"\" for p in params[:])return returned", "docstring": "Return list of params as params.\n\n >>> parametrize(['a'])\n 'a'\n >>> parametrize(['a', 'b'])\n 'a[b]'\n >>> parametrize(['a', 'b', 'c'])\n 'a[b][c]'", "id": "f1615:m1"} {"signature": "def urlencode(params):", "body": "if not isinstance(params, dict):raise TypeError(\"\")params = flatten(params)url_params = {}for param in params:value = param.pop()name = parametrize(param)if isinstance(value, (list, tuple)):name += \"\"url_params[name] = valuereturn _urlencode(url_params, doseq=True)", "docstring": "Urlencode a multidimensional dict.", "id": "f1615:m2"} {"signature": "def __init__(self, server, pipeline, counter, stage, job, stage_counter=):", "body": "self.server = serverself.pipeline = pipelineself.counter = counterself.stage = stageself.job = jobself.stage_counter = stage_counterself._base_path = self.base_path.format(pipeline=self.pipeline,counter=self.counter,stage=self.stage,stage_counter=self.stage_counter,job=self.job)", "docstring": "A wrapper for the `Go artifact API`__\n\n .. __: http://api.go.cd/current/#artifacts\n\n Args:\n server (Server): A configured instance of\n :class:gocd.server.Server\n pipeline (str): The name of the pipeline to work with\n counter (int): The counter of the pipeline to work with\n stage (str): The name of the stage to work with\n job (str): The name of the job to work with\n stage_counter (int): The counter of the stage to work with, defaults to 1", "id": "f1618:c0:m0"} {"signature": "def list(self):", "body": "return self._get('')", "docstring": "Lists all available artifacts in this job.\n\n See the `Go artifact list documentation`__ for example responses.\n\n .. __: http://api.go.cd/current/#get-all-artifacts\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1618:c0:m1"} {"signature": "def get(self, path_to_file):", "body": "return self._get(path_to_file)", "docstring": "Gets an artifact directory by its path.\n\n See the `Go artifact file documentation`__ for example responses.\n\n .. __: http://api.go.cd/current/#get-artifact-file\n\n Args:\n path_to_file (str): The path to file to get. It can be nested eg\n ``dist/foobar-widgets-1.2.0.jar``\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1618:c0:m2"} {"signature": "def get_directory(self, path_to_directory, timeout=, backoff=, max_wait=):", "body": "response = Nonestarted_at = Nonetime_elapsed = i = while time_elapsed < timeout:response = self._get(''.format(path_to_directory))if response:breakelse:if started_at is None:started_at = time.time()time.sleep(min(backoff * ( ** i), max_wait))i += time_elapsed = time.time() - started_atreturn response", "docstring": "Gets an artifact directory by its path.\n\n See the `Go artifact directory documentation`__ for example responses.\n\n .. __: http://api.go.cd/current/#get-artifact-directory\n\n .. note::\n Getting a directory relies on Go creating a zip file of the\n directory in question. Because of this Go will zip the file in\n the background and return a 202 Accepted response. It's then up\n to the client to check again later and get the final file.\n\n To work with normal assumptions this :meth:`get_directory` will\n retry itself up to ``timeout`` seconds to get a 200 response to\n return. At that point it will then return the response as is, no\n matter whether it's still 202 or 200. The retry is done with an\n exponential backoff with a max value between retries. See the\n ``backoff`` and ``max_wait`` variables.\n\n If you want to handle the retry logic yourself then use :meth:`get`\n and add '.zip' as a suffix on the directory.\n\n Args:\n path_to_directory (str): The path to the directory to get.\n It can be nested eg ``target/dist.zip``\n timeout (int): How many seconds we will wait in total for a\n successful response from Go when we're receiving 202\n backoff (float): The initial value used for backoff, raises\n exponentially until it reaches ``max_wait``\n max_wait (int): The max time between retries\n\n Returns:\n Response: :class:`gocd.api.response.Response` object\n A successful response is a zip-file.", "id": "f1618:c0:m3"} {"signature": "def __init__(self, server, name):", "body": "self.server = serverself.name = name", "docstring": "A wrapper for the `Go pipeline API`__\n\n .. __: http://api.go.cd/current/#pipelines\n\n Args:\n server (Server): A configured instance of\n :class:gocd.server.Server\n name (str): The name of the pipeline we're working on", "id": "f1619:c0:m0"} {"signature": "def history(self, offset=):", "body": "return self._get(''.format(offset=offset or ))", "docstring": "Lists previous instances/runs of the pipeline\n\n See the `Go pipeline history documentation`__ for example responses.\n\n .. __: http://api.go.cd/current/#get-pipeline-history\n\n Args:\n offset (int, optional): How many instances to skip for this response.\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1619:c0:m1"} {"signature": "def release(self):", "body": "return self._post('', headers={\"\": True})", "docstring": "Releases a previously locked pipeline\n\n See the `Go pipeline release lock documentation`__ for example\n responses.\n\n .. __: http://api.go.cd/current/#releasing-a-pipeline-lock\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1619:c0:m2"} {"signature": "def pause(self, reason=''):", "body": "return self._post('', headers={\"\": True}, pauseCause=reason)", "docstring": "Pauses the current pipeline\n\n See the `Go pipeline pause documentation`__ for example responses.\n\n .. __: http://api.go.cd/current/#pause-a-pipeline\n\n Args:\n reason (str, optional): The reason the pipeline is being paused.\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1619:c0:m3"} {"signature": "def unpause(self):", "body": "return self._post('', headers={\"\": True})", "docstring": "Unpauses the pipeline\n\n See the `Go pipeline unpause documentation`__ for example responses.\n\n .. __: http://api.go.cd/current/#unpause-a-pipeline\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1619:c0:m4"} {"signature": "def status(self):", "body": "return self._get('')", "docstring": "Returns the current status of this pipeline\n\n See the `Go pipeline status documentation`__ for example responses.\n\n .. __: http://api.go.cd/current/#get-pipeline-status\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1619:c0:m5"} {"signature": "def instance(self, counter=None):", "body": "if not counter:history = self.history()if not history:return historyelse:return Response._from_json(history[''][])return self._get(''.format(counter=counter))", "docstring": "Returns all the information regarding a specific pipeline run\n\n See the `Go pipeline instance documentation`__ for examples.\n\n .. __: http://api.go.cd/current/#get-pipeline-instance\n\n Args:\n counter (int): The pipeline instance to fetch.\n If falsey returns the latest pipeline instance from :meth:`history`.\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1619:c0:m6"} {"signature": "def schedule(self, variables=None, secure_variables=None, materials=None,return_new_instance=False, backoff_time=):", "body": "scheduling_args = dict(variables=variables,secure_variables=secure_variables,material_fingerprint=materials,headers={\"\": True},)scheduling_args = dict((k, v) for k, v in scheduling_args.items() if v is not None)if return_new_instance:pipelines = self.history()['']if len(pipelines) == :last_run = Noneelse:last_run = pipelines[]['']response = self._post('', ok_status=, **scheduling_args)if not response:return responsemax_tries = while max_tries > :current = self.instance()if not last_run and current:return currentelif last_run and current[''] > last_run:return currentelse:time.sleep(backoff_time)max_tries -= return responseelse:return self._post('', ok_status=, **scheduling_args)", "docstring": "Schedule a pipeline run\n\n Aliased as :meth:`run`, :meth:`schedule`, and :meth:`trigger`.\n\n Args:\n variables (dict, optional): Variables to set/override\n secure_variables (dict, optional): Secure variables to set/override\n materials (dict, optional): Material revisions to be used for\n this pipeline run. The exact format for this is a bit iffy,\n have a look at the official\n `Go pipeline scheduling documentation`__ or inspect a call\n from triggering manually in the UI.\n return_new_instance (bool): Returns a :meth:`history` compatible\n response for the newly scheduled instance. This is primarily so\n users easily can get the new instance number. **Note:** This is done\n in a very naive way, it just checks that the instance number is\n higher than before the pipeline was triggered.\n backoff_time (float): How long between each check for\n :arg:`return_new_instance`.\n\n .. __: http://api.go.cd/current/#scheduling-pipelines\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1619:c0:m7"} {"signature": "def artifact(self, counter, stage, job, stage_counter=):", "body": "return Artifact(self.server, self.name, counter, stage, job, stage_counter)", "docstring": "Helper to instantiate an :class:`gocd.api.artifact.Artifact` object\n\n Args:\n counter (int): The pipeline counter to get the artifact for\n stage: Stage name\n job: Job name\n stage_counter: Defaults to 1\n\n Returns:\n Artifact: :class:`gocd.api.artifact.Artifact` object", "id": "f1619:c0:m8"} {"signature": "def console_output(self, instance=None):", "body": "if instance is None:instance = self.instance()for stage in instance['']:for job in stage['']:if job[''] not in self.final_results:continueartifact = self.artifact(instance[''],stage[''],job[''],stage[''])output = artifact.get('')yield ({'': self.name,'': instance[''],'': stage[''],'': stage[''],'': job[''],'': job[''],},output.body)", "docstring": "Yields the output and metadata from all jobs in the pipeline\n\n Args:\n instance: The result of a :meth:`instance` call, if not supplied\n the latest of the pipeline will be used.\n\n Yields:\n tuple: (metadata (dict), output (str)).\n\n metadata contains:\n - pipeline\n - pipeline_counter\n - stage\n - stage_counter\n - job\n - job_result", "id": "f1619:c0:m9"} {"signature": "def stage(self, name, pipeline_counter=None):", "body": "return Stage(self.server,pipeline_name=self.name,stage_name=name,pipeline_counter=pipeline_counter,)", "docstring": "Helper to instantiate a :class:`gocd.api.stage.Stage` object\n\n Args:\n name: The name of the stage\n pipeline_counter:\n\n Returns:", "id": "f1619:c0:m10"} {"signature": "def __init__(self, server, name, api_version=):", "body": "self.server = serverself.name = nameself.api_version = api_version", "docstring": "A wrapper for the `Go template config API`__\n\n .. __: https://api.go.cd/current/#template-config\n\n Args:\n server (Server): A configured instance of\n :class:gocd.server.Server\n name (str): The name of the template we're working on", "id": "f1620:c0:m0"} {"signature": "def get(self):", "body": "return self._get(self.name, headers={\"\": self._accept_header_value})", "docstring": "Get template config for specified template name.\n\n See `The template config object`__ for example responses.\n\n .. __: https://api.go.cd/current/#the-template-config-object\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1620:c0:m1"} {"signature": "def edit(self, config, etag):", "body": "data = self._json_encode(config)headers = self._default_headers()if etag is not None:headers[\"\"] = etagreturn self._request(self.name,ok_status=None,data=data,headers=headers,method=\"\")", "docstring": "Update template config for specified template name.\n\n .. __: https://api.go.cd/current/#edit-template-config\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1620:c0:m2"} {"signature": "def create(self, config):", "body": "assert config[\"\"] == self.name, \"\"data = self._json_encode(config)headers = self._default_headers()return self._request(\"\",ok_status=None,data=data,headers=headers)", "docstring": "Create template config for specified template name.\n\n .. __: https://api.go.cd/current/#create-template-config\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1620:c0:m3"} {"signature": "def delete(self):", "body": "headers = self._default_headers()return self._request(self.name,ok_status=None,data=None,headers=headers,method=\"\")", "docstring": "Delete template config for specified template name.\n\n .. __: https://api.go.cd/current/#delete-a-template\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1620:c0:m4"} {"signature": "def get_pipeline_groups(self):", "body": "self._response = self._get('')return self._response", "docstring": "Makes a call to the Go server to fetch the pipeline groups.\n\n Saves the response to :attr:`response`.\n\n Returns:\n Response: an instance of :class:`gocd.api.Response`", "id": "f1622:c0:m1"} {"signature": "@propertydef response(self):", "body": "if self._response is None:self.get_pipeline_groups()return self._response", "docstring": "Returns the last response from fetching the pipeline groups from Go\n\n If there is no response then one will be fetched and returned.\n\n Returns:\n Response: an instance of :class:`gocd.api.Response`", "id": "f1622:c0:m2"} {"signature": "@propertydef pipelines(self):", "body": "if not self.response:return set()elif self._pipelines is None and self.response:self._pipelines = set()for group in self.response.payload:for pipeline in group['']:self._pipelines.add(pipeline[''])return self._pipelines", "docstring": "Returns a set of all pipelines from the last response\n\n Returns:\n set: Response success: all the pipelines available in the response\n Response failure: an empty set", "id": "f1622:c0:m3"} {"signature": "def __init__(self, server, name=\"\"):", "body": "self.server = serverself.name = name", "docstring": "A wrapper for the `Go pluggable SCM API`__\n\n .. __: https://api.go.cd/current/#scms\n\n Args:\n server (Server): A configured instance of\n :class:gocd.server.Server\n name (str): The name of the SCM material", "id": "f1623:c0:m0"} {"signature": "def list(self):", "body": "return self._get(\"\", headers={\"\": self._accept_header_value})", "docstring": "Lists all available pluggable scm materials,\n these are materials that are present in the in cruise-config.xml.\n\n See the `Go pluggable SCM documentation`__ for example responses.\n\n .. __: https://api.go.cd/current/#get-all-pluggable-scm-materials\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1623:c0:m1"} {"signature": "def get(self):", "body": "return self._get(self.name, headers={\"\": self._accept_header_value})", "docstring": "Gets SCM material for specified material name\n\n See `The global scm config object`__ for example responses.\n\n .. __: https://api.go.cd/current/#the-global-scm-config-object\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1623:c0:m2"} {"signature": "def edit(self, config, etag):", "body": "data = self._json_encode(config)headers = self._default_headers()if etag is not None:headers[\"\"] = etagreturn self._request(self.name,ok_status=None,data=data,headers=headers,method=\"\")", "docstring": "Update SCM material for specified material name.\n\n .. __: https://api.go.cd/current/#update-pluggable-scm-object\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1623:c0:m3"} {"signature": "def create(self, config):", "body": "assert config[\"\"] == self.name, \"\"assert \"\" in config, \"\"data = self._json_encode(config)headers = self._default_headers()return self._request(\"\",ok_status=None,data=data,headers=headers,method=\"\")", "docstring": "Create a global SCM object\n\n .. __: https://api.go.cd/current/#create-a-scm-object\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1623:c0:m4"} {"signature": "def __init__(self, server, name, api_version=):", "body": "self.server = serverself.name = nameself.api_version = api_version", "docstring": "A wrapper for the `Go pipeline config API`__\n\n .. __: https://api.go.cd/current/#pipeline-config\n\n Args:\n server (Server): A configured instance of\n :class:gocd.server.Server\n name (str): The name of the pipeline we're working on", "id": "f1624:c0:m0"} {"signature": "def get(self):", "body": "return self._get(self.name, headers={\"\": self._accept_header_value})", "docstring": "Gets pipeline config for specified pipeline name.\n\n See `The pipeline config object`__ for example responses.\n\n .. __: https://api.go.cd/current/#the-pipeline-config-object\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1624:c0:m1"} {"signature": "def edit(self, config, etag):", "body": "data = self._json_encode(config)headers = self._default_headers()if etag is not None:headers[\"\"] = etagreturn self._request(self.name,ok_status=None,data=data,headers=headers,method=\"\")", "docstring": "Update pipeline config for specified pipeline name.\n\n .. __: https://api.go.cd/current/#edit-pipeline-config\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1624:c0:m2"} {"signature": "def create(self, config):", "body": "assert config[\"\"] == self.name, \"\"assert \"\" in config, \"\"data = self._json_encode(config)headers = self._default_headers()return self._request(\"\",ok_status=None,data=data,headers=headers)", "docstring": "Update pipeline config for specified pipeline name.\n\n .. __: https://api.go.cd/current/#edit-pipeline-config\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1624:c0:m3"} {"signature": "def __init__(self, server, pipeline_name, stage_name, pipeline_counter=None):", "body": "self.server = serverself.pipeline_name = pipeline_nameself.pipeline_counter = pipeline_counterself.stage_name = stage_name", "docstring": "A wrapper for the `Go stage API`__\n\n .. __: http://api.go.cd/current/#stages\n\n Args:\n server (Server): A configured instance of\n :class:gocd.server.Server\n pipeline_name (str): The name of the pipeline we're working on\n stage_name (str): The name of the stage we're working on", "id": "f1625:c0:m0"} {"signature": "def cancel(self):", "body": "return self._post('', headers={\"\": True})", "docstring": "Cancels a currently running stage\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1625:c0:m2"} {"signature": "def history(self, offset=):", "body": "return self._get(''.format(offset=offset or ))", "docstring": "Lists previous instances/runs of the stage\n\n See the `Go stage history documentation`__ for example responses.\n\n .. __: http://api.go.cd/current/#get-stage-history\n\n Args:\n offset (int, optional): How many instances to skip for this response.\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1625:c0:m3"} {"signature": "def instance(self, counter=None, pipeline_counter=None):", "body": "pipeline_counter = pipeline_counter or self.pipeline_counterpipeline_instance = Noneif not pipeline_counter:pipeline_instance = self.server.pipeline(self.pipeline_name).instance()self.pipeline_counter = int(pipeline_instance[''])if not counter:if pipeline_instance is None:pipeline_instance = (self.server.pipeline(self.pipeline_name).instance(pipeline_counter))for stages in pipeline_instance['']:if stages[''] == self.stage_name:return self.instance(counter=int(stages['']),pipeline_counter=pipeline_counter)return self._get(''.format(pipeline_counter=pipeline_counter, counter=counter))", "docstring": "Returns all the information regarding a specific stage run\n\n See the `Go stage instance documentation`__ for examples.\n\n .. __: http://api.go.cd/current/#get-stage-instance\n\n Args:\n counter (int): The stage instance to fetch.\n If falsey returns the latest stage instance from :meth:`history`.\n pipeline_counter (int): The pipeline instance for which to fetch\n the stage. If falsey returns the latest pipeline instance.\n\n Returns:\n Response: :class:`gocd.api.response.Response` object", "id": "f1625:c0:m4"} {"signature": "@propertydef is_ok(self):", "body": "return self.status_code == self.ok_status", "docstring": "Whether this response is considered successful\n\n Returns\n bool: True if `status_code` is `ok_status`", "id": "f1626:c0:m1"} {"signature": "@propertydef is_json(self):", "body": "return (self.content_type.startswith('') orre.match(r'', self.content_type))", "docstring": "Returns:\n bool: True if `content_type` is `application/json`", "id": "f1626:c0:m2"} {"signature": "@propertydef payload(self):", "body": "if self.is_json:if not self._body_parsed:if hasattr(self._body, ''):body = self._body.decode('')else:body = self._bodyself._body_parsed = json.loads(body)return self._body_parsedelse:return self._body", "docstring": "Returns:\n `str` when not json.\n `dict` when json.", "id": "f1626:c0:m6"} {"signature": "@propertydef fp(self):", "body": "if hasattr(self.__body, ''):return self.__bodyreturn None", "docstring": "Returns a file-like object if the class was instantiated with one\n\n Returns:\n None, file-like object: If :attribute:`_body` responds to read else None", "id": "f1626:c0:m9"} {"signature": "def make_formatter(format_name):", "body": "if \"\" in format_name:from json import dumpsimport datetimedef jsonhandler(obj): obj.isoformat() if isinstance(obj, (datetime.datetime, datetime.date)) else objif format_name == \"\":def jsondumps(data): return dumps(data, default=jsonhandler, indent=, separators=('', ''))else:def jsondumps(data): return dumps(data, default=jsonhandler)def jsonify(data):if isinstance(data, dict):print(jsondumps(data))elif isinstance(data, list):print(jsondumps([device._asdict() for device in data]))else:print(dumps({'': data}))return jsonifyelse:def printer(data):if isinstance(data, dict):print(data)else:for row in data:print(row)return printer", "docstring": "Returns a callable that outputs the data. Defaults to print.", "id": "f1630:m0"} {"signature": "def argparser():", "body": "parser = ArgumentParser(prog='')parser.add_argument(\"\", choices=['', '', ''], default='')router_args = parser.add_argument_group(\"\")router_args.add_argument(\"\", help=\"\")router_args.add_argument(\"\", help=\"\")router_args.add_argument(\"\", help=\"\")router_args.add_argument(\"\", help=\"\",dest=\"\", default=False, action=\"\")router_args.add_argument(\"\",help=\"\" +\"\")router_args.add_argument(\"\", help=\"\")router_args.add_argument(\"\",dest=\"\", default=True,action=\"\",help=\"\")subparsers = parser.add_subparsers(description=\"\",dest=\"\")block_parser = subparsers.add_parser(\"\",help=\"\")block_parser.add_argument(\"\")allow_parser = subparsers.add_parser(\"\",help=\"\")allow_parser.add_argument(\"\")subparsers.add_parser(\"\", help=\"\")attached_devices = subparsers.add_parser(\"\", help=\"\")attached_devices.add_argument(\"\", \"\",action=\"\",default=False,help=\"\")subparsers.add_parser(\"\", help=\"\")return parser", "docstring": "Constructs the ArgumentParser for the CLI", "id": "f1630:m1"} {"signature": "def run_subcommand(netgear, args):", "body": "subcommand = args.subcommandif subcommand == \"\" or subcommand == \"\":return netgear.allow_block_device(args.mac_addr, BLOCK if subcommand == \"\" else ALLOW)if subcommand == \"\":if args.verbose:return netgear.get_attached_devices_2()else:return netgear.get_attached_devices()if subcommand == '':return netgear.get_traffic_meter()if subcommand == '':return netgear.login()print(\"\")", "docstring": "Runs the subcommand configured in args on the netgear session", "id": "f1630:m2"} {"signature": "def main():", "body": "args = argparser().parse_args(sys.argv[:])password = os.environ.get('') or args.passwordnetgear = Netgear(password, args.host, args.user, args.port, args.ssl, args.url, args.force_login_v2)results = run_subcommand(netgear, args)formatter = make_formatter(args.format)if results is None:print(\"\")else:formatter(results)", "docstring": "Scan for devices and print results.", "id": "f1630:m3"} {"signature": "def autodetect_url():", "body": "for url in [\"\", \"\",\"\"]:try:r = requests.get(url + \"\",headers=_get_soap_headers(\"\", \"\"),verify=False)if r.status_code == :return urlexcept requests.exceptions.RequestException:passreturn None", "docstring": "Try to autodetect the base URL of the router SOAP service.\n\nReturns None if it can't be found.", "id": "f1631:m0"} {"signature": "def _xml_get(e, name):", "body": "r = e.find(name)if r is not None:return r.textreturn None", "docstring": "Returns the value of the subnode \"name\" of element e.\n\nReturns None if the subnode doesn't exist", "id": "f1631:m2"} {"signature": "def _convert(value, to_type, default=None):", "body": "try:return default if value is None else to_type(value)except ValueError:return default", "docstring": "Convert value to to_type, returns default if fails.", "id": "f1631:m6"} {"signature": "def __init__(self, password=None, host=None, user=None, port=None,ssl=False, url=None, force_login_v2=False):", "body": "if not url and not host and not port:url = autodetect_url()if url:self.soap_url = url + \"\"else:if not host:host = DEFAULT_HOSTif not port:port = DEFAULT_PORTscheme = \"\" if ssl else \"\"self.soap_url = \"\".format(scheme,host, port)if not user:user = DEFAULT_USERself.username = userself.password = passwordself.port = portself.force_login_v2 = force_login_v2self.cookie = Noneself.config_started = False", "docstring": "Initialize a Netgear session.", "id": "f1631:c0:m0"} {"signature": "def login(self):", "body": "if not self.force_login_v2:v1_result = self.login_v1()if v1_result:return v1_resultreturn self.login_v2()", "docstring": "Login to the router.\n\nWill be called automatically by other actions.", "id": "f1631:c0:m1"} {"signature": "def get_attached_devices(self):", "body": "_LOGGER.info(\"\")success, response = self._make_request(SERVICE_DEVICE_INFO,\"\")if not success:_LOGGER.error(\"\")return Nonesuccess, node = _find_node(response.text,\"\")if not success:return Nonedevices = []decoded = node.text.strip().replace(UNKNOWN_DEVICE_ENCODED,UNKNOWN_DEVICE_DECODED)if not decoded or decoded == \"\":_LOGGER.error(\"\")_LOGGER.debug(node.text.strip())return devicesentries = decoded.split(\"\")entry_count = Noneif len(entries) > :entry_count = _convert(entries.pop(), int)if entry_count is not None and entry_count != len(entries):_LOGGER.info(\"\"\"\"\"\", entry_count, len(entries))for entry in entries:info = entry.split(\"\")if len(info) == :continuesignal = Nonelink_type = Nonelink_rate = Noneallow_or_block = Noneif len(info) >= :allow_or_block = info[]if len(info) >= :link_type = info[]link_rate = _convert(info[], int)signal = _convert(info[], int)if len(info) < :_LOGGER.warning(\"\", info)continueipv4, name, mac = info[:]devices.append(Device(name, ipv4, mac,link_type, signal, link_rate, allow_or_block,None, None, None, None))return devices", "docstring": "Return list of connected devices to the router.\n\nReturns None if error occurred.", "id": "f1631:c0:m4"} {"signature": "def get_attached_devices_2(self):", "body": "_LOGGER.info(\"\")success, response = self._make_request(SERVICE_DEVICE_INFO,\"\")if not success:return Nonesuccess, devices_node = _find_node(response.text,\"\")if not success:return Nonexml_devices = devices_node.findall(\"\")devices = []for d in xml_devices:ip = _xml_get(d, '')name = _xml_get(d, '')mac = _xml_get(d, '')signal = _convert(_xml_get(d, ''), int)link_type = _xml_get(d, '')link_rate = _xml_get(d, '')allow_or_block = _xml_get(d, '')device_type = _convert(_xml_get(d, ''), int)device_model = _xml_get(d, '')ssid = _xml_get(d, '')conn_ap_mac = _xml_get(d, '')devices.append(Device(name, ip, mac, link_type, signal, link_rate,allow_or_block, device_type, device_model,ssid, conn_ap_mac))return devices", "docstring": "Return list of connected devices to the router with details.\n\nThis call is slower and probably heavier on the router load.\n\nReturns None if error occurred.", "id": "f1631:c0:m5"} {"signature": "def get_traffic_meter(self):", "body": "_LOGGER.info(\"\")def parse_text(text):\"\"\"\"\"\"def tofloats(lst): return (float(t) for t in lst)try:if \"\" in text: return tuple(tofloats(text.split('')))elif \"\" in text: hour, mins = tofloats(text.split(''))return timedelta(hours=hour, minutes=mins)else:return float(text)except ValueError:return Nonesuccess, response = self._make_request(SERVICE_DEVICE_CONFIG,\"\")if not success:return Nonesuccess, node = _find_node(response.text,\"\")if not success:return Nonereturn {t.tag: parse_text(t.text) for t in node}", "docstring": "Return dict of traffic meter stats.\n\nReturns None if error occurred.", "id": "f1631:c0:m6"} {"signature": "def config_start(self):", "body": "_LOGGER.info(\"\")success, _ = self._make_request(SERVICE_DEVICE_CONFIG, \"\", {\"\": SESSION_ID})self.config_started = successreturn success", "docstring": "Start a configuration session.\nFor managing router admin functionality (ie allowing/blocking devices)", "id": "f1631:c0:m7"} {"signature": "def config_finish(self):", "body": "_LOGGER.info(\"\")if not self.config_started:return Truesuccess, _ = self._make_request(SERVICE_DEVICE_CONFIG, \"\", {\"\": \"\"})self.config_started = not successreturn success", "docstring": "End of a configuration session.\nTells the router we're done managing admin functionality.", "id": "f1631:c0:m8"} {"signature": "def allow_block_device(self, mac_addr, device_status=BLOCK):", "body": "_LOGGER.info(\"\")if self.config_started:_LOGGER.error(\"\")return Falseif not self.config_start():_LOGGER.error(\"\")return Falsesuccess, _ = self._make_request(SERVICE_DEVICE_CONFIG, \"\",{\"\": device_status, \"\": mac_addr})if not success:_LOGGER.error(\"\")return Falseif not self.config_finish():_LOGGER.error(\"\")return Falsereturn True", "docstring": "Allow or Block a device via its Mac Address.\nPass in the mac address for the device that you want to set. Pass in the\ndevice_status you wish to set the device to: Allow (allow device to access the\nnetwork) or Block (block the device from accessing the network).", "id": "f1631:c0:m9"} {"signature": "def _make_request(self, service, method, params=None, body=\"\",need_auth=True):", "body": "if need_auth and not self.cookie:if not self.login():return False, Noneheaders = self._get_headers(service, method, need_auth)if not body:if not params:params = \"\"if isinstance(params, dict):_map = paramsparams = \"\"for k in _map:params += \"\" + k + \">\" + _map[k] + \"\" + k + \"\"body = CALL_BODY.format(service=SERVICE_PREFIX + service,method=method, params=params)message = SOAP_REQUEST.format(session_id=SESSION_ID, body=body)try:response = requests.post(self.soap_url, headers=headers,data=message, timeout=, verify=False)if need_auth and _is_unauthorized_response(response):self.cookie = None_LOGGER.warning(\"\")if self.login():headers = self._get_headers(service, method, need_auth)response = requests.post(self.soap_url, headers=headers,data=message, timeout=, verify=False)success = _is_valid_response(response)if not success:_LOGGER.error(\"\")_LOGGER.debug(\"\", response.status_code, str(response.headers), response.text)return success, responseexcept requests.exceptions.RequestException:_LOGGER.exception(\"\")return False, None", "docstring": "Make an API request to the router.", "id": "f1631:c0:m11"} {"signature": "def __init__(self, auth_client):", "body": "self.client = auth_client", "docstring": "Initializes the instance\n:param auth_client: Client to make (non)authorized requests\n:return:", "id": "f1650:c0:m0"} {"signature": "@classmethoddef get_resource_endpoint(cls, resource_id):", "body": "return urljoin(cls.get_collection_endpoint(), str(resource_id)) if resource_id is not None else None", "docstring": "Get the relative path to a specific API resource\n:param cls: Resource class\n:param resource_id: Resource id\n:return: Relative path to the resource", "id": "f1650:c0:m1"} {"signature": "@classmethoddef get_collection_endpoint(cls):", "body": "return cls.Meta.collection_endpoint if cls.Meta.collection_endpoint is not None else cls.__name__.lower() + \"\"", "docstring": "Get the relative path to the API resource collection\n\nIf self.collection_endpoint is not set, it will default to the lowercase name of the resource class plus an \"s\" and the terminating \"/\"\n:param cls: Resource class\n:return: Relative path to the resource collection", "id": "f1650:c0:m2"} {"signature": "def send(self, url, http_method, **client_args):", "body": "return self.client.send(url, http_method, **client_args)", "docstring": "Make the actual request to the API\n:param url: URL\n:param http_method: The method used to make the request to the API\n:param client_args: Arguments to be sent to the auth client\n:return: requests' response object", "id": "f1650:c0:m3"} {"signature": "def __init__(cls, name, bases, nmspc):", "body": "super(ResourceMetaclass, cls).__init__(name, bases, nmspc)for klass in bases:if hasattr(klass, \"\"):for attribute_name, attribute in iteritems(klass.Meta.__dict__):if not (attribute_name.startswith(\"\") or hasattr(cls.Meta, attribute_name)):setattr(cls.Meta, attribute_name, attribute)cls.fields = []for attribute_name, attribute in iteritems(cls.__dict__):if isinstance(attribute, Field):attribute.name = attribute_namecls.fields.append(attribute_name)", "docstring": "Manage Meta inheritance and create the self.fields list of field attributes\n:param cls: Class object\n:param name: Class name\n:param bases: Class inheritance\n:param nmspc: Class namespace\n:return:", "id": "f1650:c1:m0"} {"signature": "def __init__(self, auth_client, **kwargs):", "body": "for name, value in iteritems(kwargs):setattr(self, name, value)super(Resource, self).__init__(auth_client)", "docstring": "Initializes the resource\n:param auth_client: Client to make (non)authorized requests\n:param kwargs: Initial value for attributes\n:return:", "id": "f1650:c2:m0"} {"signature": "def __str__(self):", "body": "return getattr(self, self.Meta.name_field, super(Resource, self).__str__())", "docstring": "Give a nice representation for the resource\n:param return: Resource friendly representation based on the self.Meta.name_field attribute", "id": "f1650:c2:m1"} {"signature": "def get_resource_endpoint(self):", "body": "return super(Resource, self).get_resource_endpoint(self.get_id())", "docstring": "Get the relative path to the specific API resource\n:return: Relative path to the resource", "id": "f1650:c2:m3"} {"signature": "def update_from_dict(self, attribute_dict):", "body": "for field_name, field_value in iteritems(attribute_dict):if self.fields is None or field_name in self.fields:setattr(self, field_name, field_value)", "docstring": "Update the fields of the resource out of a data dictionary taken out of an API response\n:param attribute_dict: Dictionary to be mapped into object attributes\n:return:", "id": "f1650:c2:m4"} {"signature": "def send(self, url, http_method, **client_args):", "body": "response = super(Resource, self).send(url, http_method, **client_args)if response.status_code in (requests.codes.ok, requests.codes.created):try:self.update_from_dict(self.client.get_response_data(response, self.Meta.parse_json))except ValueError:passreturn response if response is not None else None", "docstring": "Make the actual request to the API, updating the resource if necessary\n:param url: Endpoint URL\n:param http_method: The method used to make the request to the API\n:param client_args: Arguments to be sent to the auth client\n:return:", "id": "f1650:c2:m5"} {"signature": "def save(self, force_create=False, fields=None):", "body": "values = {}fields = fields or self.fieldsfor field_name in fields:value = getattr(self, field_name)if isinstance(value, Resource):value = value.get_id()if isinstance(value, list):if len(value) > and isinstance(value[], Resource):value = Noneelse:final_value_list = []for item in value:final_value_list.append(item.isoformat() if isinstance(item, datetime) else item)value = final_value_listif isinstance(value, datetime):value = value.isoformat()if value is not None:values[field_name] = valuehttp_headers = {'': ''} if self.Meta.json_data is True else Nonejson = values if self.Meta.json_data is True else Nonedata = values if self.Meta.json_data is False else Noneif self.get_resource_endpoint() is not None and force_create is False:return self.send(self.get_resource_endpoint(), \"\", headers=http_headers, json=json, data=data)else:return self.send(self.get_collection_endpoint(), \"\", headers=http_headers, json=json, data=data)", "docstring": "Saves (creates or updates) resource on the server\n:param force_create: If True, forces resource creation even if it already has an Id.\n:param fields: List of fields to be saved. If None, all fields will be saved.\n:return:", "id": "f1650:c2:m6"} {"signature": "def refresh(self):", "body": "if self.get_resource_endpoint() is not None:return self.send(self.get_resource_endpoint(), \"\")", "docstring": "Refreshes a resource by checking against the API\n:return:", "id": "f1650:c2:m7"} {"signature": "def delete(self):", "body": "if self.get_resource_endpoint() is not None:return self.send(self.get_resource_endpoint(), http_method=\"\")", "docstring": "Deletes the resource from the server; Python object remains untouched\n:return:", "id": "f1650:c2:m8"} {"signature": "def __init__(self, auth_client):", "body": "self.paginator = self.paginator_class(auth_client.base_url)super(Manager, self).__init__(auth_client)", "docstring": ":param auth_client: Client to make (non)authorized requests\n:return:", "id": "f1650:c3:m0"} {"signature": "@classmethoddef get_collection_endpoint(cls):", "body": "return cls.resource_class.get_collection_endpoint()", "docstring": "Get the relative path to the API resource collection, using the corresponding resource class\n\n:param cls: Manager class\n:return: Relative path to the resource collection", "id": "f1650:c3:m1"} {"signature": "def get(self, resource_id):", "body": "response = self.send(self.get_resource_endpoint(resource_id), \"\")try:resource = self.resource_class(self.client)except (ValueError, TypeError):return Noneelse:resource.update_from_dict(self.client.get_response_data(response, self.Meta.parse_json))return resource", "docstring": "Get one single resource from the API\n:param resource_id: Id of the resource to be retrieved\n:return: Retrieved resource", "id": "f1650:c3:m2"} {"signature": "def filter(self, **search_args):", "body": "search_args = search_args or {}raw_resources = []for url, paginator_params in self.paginator.get_urls(self.get_collection_endpoint()):search_args.update(paginator_params)response = self.paginator.process_response(self.send(url, \"\", params=search_args))raw_resources += self.client.get_response_data(response, self.Meta.parse_json)[self.json_collection_attribute] if self.json_collection_attribute is not None else self.client.get_response_data(response, self.Meta.parse_json)resources = []for raw_resource in raw_resources:try:resource = self.resource_class(self.client)except (ValueError, TypeError):continueelse:resource.update_from_dict(raw_resource)resources.append(resource)return resources", "docstring": "Get a filtered list of resources\n:param search_args: To be translated into ?arg1=value1&arg2=value2...\n:return: A list of resources", "id": "f1650:c3:m3"} {"signature": "def all(self):", "body": "return self.filter()", "docstring": "Get a list of all the resources\n:return: A list of resources", "id": "f1650:c3:m4"} {"signature": "def create(self, **kwargs):", "body": "resource = self.resource_class(self.client)resource.update_from_dict(kwargs)resource.save(force_create=True)return resource", "docstring": "Create a resource on the server\n:params kwargs: Attributes (field names and values) of the new resource", "id": "f1650:c3:m5"} {"signature": "def __init__(self, base_url, session=None):", "body": "self.base_url = base_urlif session is None:self.session = requests.Session()else:self.session = session", "docstring": ":param base_url: Base URL. API endpoint paths will always be relative to this URL\n:param session: requests' session\n:return:", "id": "f1651:c0:m0"} {"signature": "def send(self, relative_path, http_method, **requests_args):", "body": "url = urljoin(self.base_url, relative_path)return self.session.request(http_method, url, **requests_args)", "docstring": "Subclasses must implement this method, that will be used to send API requests with proper auth\n:param relative_path: URL path relative to self.base_url\n:param http_method: HTTP method\n:param requests_args: kargs to be sent to requests\n:return:", "id": "f1651:c0:m1"} {"signature": "def get_response_data(self, response, parse_json=True):", "body": "if response.status_code in (requests.codes.ok, requests.codes.created):if parse_json:return response.json()return response.contentelif response.status_code == requests.codes.bad_request:response_json = response.json()raise BadRequestException(response_json.get(\"\", False) or response_json.get(\"\",_(\"\").format(text=response.text)))elif response.status_code == requests.codes.not_found:raise NotFoundException(_(\"\").format(url=response.url))elif response.status_code == requests.codes.internal_server_error:raise ServerErrorException(_(\"\"))elif response.status_code in (requests.codes.unauthorized, requests.codes.forbidden):raise AuthErrorException(_(\"\"))elif response.status_code == requests.codes.too_many_requests:raise RateLimitException(_(response.text))else:raise ServerErrorException(_(\"\"))", "docstring": "Get response data or throw an appropiate exception\n:param response: requests response object\n:param parse_json: if True, response will be parsed as JSON\n:return: response data, either as json or as a regular response.content object", "id": "f1651:c0:m2"} {"signature": "def send(self, relative_path, http_method, **requests_args):", "body": "if http_method != \"\":warnings.warn(_(\"\"))return super(NoAuthClient, self).send(relative_path, http_method, **requests_args)", "docstring": "Make a unauthorized request\n:param relative_path: URL path relative to self.base_url\n:param http_method: HTTP method\n:param requests_args: kargs to be sent to requests\n:return: requests' response object", "id": "f1651:c1:m0"} {"signature": "def __init__(self, token, *args, **kwargs):", "body": "if '' in kwargs:header_keyword = kwargs['']del kwargs['']else:header_keyword = \"\"if not self.base_url.startswith(''):warnings.warn(_(\"\"))super(TokenAuthClient, self).__init__(*args, **kwargs)self.session.headers.update({\"\": \"\".format(keyword=header_keyword, token=token)})", "docstring": ":param Token: Authentication token\n:param header_keyword: Authorization HTTP header prefix\n:return:", "id": "f1651:c2:m0"} {"signature": "def __init__(self, user_name, password, *args, **kwargs):", "body": "super(BasicAuthClient, self).__init__(*args, **kwargs)self.session.auth = (user_name, password)", "docstring": ":param user_name: User name for basic authentication\n:param password: Password for basic authentication\n:return:", "id": "f1651:c3:m0"} {"signature": "def __init__(self, many=False):", "body": "self.many = manyself.name = None", "docstring": "Initialize the field\n:param many: Set to True if this field will host a list of items", "id": "f1652:c0:m0"} {"signature": "def __get__(self, instance, owner):", "body": "if instance is not None and self.name is not None:return instance.__dict__.get(self.name)else:return self", "docstring": "Normal descriptor get method\n:param instance: Resource instance where the field lives\n:param instance: Resource class where the field lives\n:return: Value stored in instance.name (TODO: maybe change this in the future to instance.Cache.name)", "id": "f1652:c0:m1"} {"signature": "def __set__(self, instance, value):", "body": "if instance is not None and self.name is not None:instance.__dict__[self.name] = value", "docstring": "Normal descriptor set method\n:param instance: Resource instance where the field lives\n:param value: Value to store in instance.name (TODO: maybe change this in the future to instance.Cache.name)", "id": "f1652:c0:m2"} {"signature": "def __set__(self, instance, value):", "body": "if self.many is False:if isinstance(value, str):value = parse(value)else:datetime_list = []for datetime_value in value:if isinstance(datetime_value, str):datetime_value = parse(datetime_value)datetime_list.append(datetime_value)value = datetime_listsuper(DateTimeField, self).__set__(instance, value)", "docstring": "Normal descriptor set method\n:param instance: Resource instance where the field lives\n:param value: Might be a datetime object or a string to be parsed", "id": "f1652:c5:m0"} {"signature": "def set_real_value_class(self):", "body": "if self.value_class is not None and isinstance(self.value_class, str):module_name, dot, class_name = self.value_class.rpartition(\"\")module = __import__(module_name, fromlist=[class_name])self.value_class = getattr(module, class_name)self._initialized = True", "docstring": "value_class is initially a string with the import path to the resource class, but we need to get the actual class before doing any work\n\nWe do not expect the actual clas to be in value_class since the beginning to avoid nasty import egg-before-chicken errors", "id": "f1652:c7:m0"} {"signature": "def read_file(fpath):", "body": "with io.open(os.path.join(PATH_BASE, fpath)) as f:return f.read()", "docstring": "Reads a file within package directories.", "id": "f1655:m0"} {"signature": "def get_version():", "body": "contents = read_file(os.path.join('', ''))version = re.search('', contents)version = version.group().replace('', '').strip()return version", "docstring": "Returns version number, without module import (which can lead to ImportError\n if some dependencies are unavailable before install.", "id": "f1655:m1"} {"signature": "def get_field_for_proxy(pref_proxy):", "body": "field = {bool: models.BooleanField,int: models.IntegerField,float: models.FloatField,datetime: models.DateTimeField,}.get(type(pref_proxy.default), models.TextField)()update_field_from_proxy(field, pref_proxy)return field", "docstring": "Returns a field object instance for a given PrefProxy object.\n\n :param PrefProxy pref_proxy:\n\n :rtype: models.Field", "id": "f1668:m0"} {"signature": "def update_field_from_proxy(field_obj, pref_proxy):", "body": "attr_names = ('', '', '')for attr_name in attr_names:setattr(field_obj, attr_name, getattr(pref_proxy, attr_name))", "docstring": "Updates field object with data from a PrefProxy object.\n\n :param models.Field field_obj:\n\n :param PrefProxy pref_proxy:", "id": "f1668:m1"} {"signature": "def get_pref_model_class(app, prefs, get_prefs_func):", "body": "module = '' % (app, PREFS_MODULE_NAME)model_dict = {'': app,'': staticmethod(get_prefs_func),'': module,'': type('', (models.options.Options,), {'': _(''),'': _(''),'': app,'': False,})}for field_name, val_proxy in prefs.items():model_dict[field_name] = val_proxy.fieldmodel = type('', (models.Model,), model_dict)def fake_save_base(self, *args, **kwargs):updated_prefs = {f.name: getattr(self, f.name) for f in self._meta.fields if not isinstance(f, models.fields.AutoField)}app_prefs = self._get_prefs(self._prefs_app)for pref in app_prefs.keys():if pref in updated_prefs:app_prefs[pref].db_value = updated_prefs[pref]self.pk = self._prefs_app prefs_save.send(sender=self, app=self._prefs_app, updated_prefs=updated_prefs)return Truemodel.save_base = fake_save_basereturn model", "docstring": "Returns preferences model class dynamically crated for a given app or None on conflict.", "id": "f1668:m2"} {"signature": "def get_frame_locals(stepback=):", "body": "with Frame(stepback=stepback) as frame:locals_dict = frame.f_localsreturn locals_dict", "docstring": "Returns locals dictionary from a given frame.\n\n :param int stepback:\n\n :rtype: dict", "id": "f1668:m4"} {"signature": "def traverse_local_prefs(stepback=):", "body": "locals_dict = get_frame_locals(stepback+)for k in locals_dict:if not k.startswith('') and k.upper() == k:yield k, locals_dict", "docstring": "Generator to walk through variables considered as preferences\n in locals dict of a given frame.\n\n :param int stepback:\n\n :rtype: tuple", "id": "f1668:m5"} {"signature": "def import_module(package, module_name):", "body": "import_app_module(package, module_name)", "docstring": "Imports a module from a given package.\n\n :param str|unicode package:\n :param str|unicode module_name:", "id": "f1668:m6"} {"signature": "def import_prefs():", "body": "settings_locals = get_frame_locals()if '' not in settings_locals: project_package = settings_locals[''] if not project_package:project_package = os.path.split(os.path.dirname(settings_locals['']))[-]import_module(project_package, PREFS_MODULE_NAME)import_project_modules(PREFS_MODULE_NAME)", "docstring": "Imports preferences modules from packages (apps) and project root.", "id": "f1668:m7"} {"signature": "def __init__(self, name, default, category=None, field=None, verbose_name=None, help_text='', static=True,readonly=False):", "body": "self.name = nameself.category = categoryself.default = defaultself.static = staticself.help_text = help_textif static:readonly = Trueself.readonly = readonlyif verbose_name is None:verbose_name = name.replace('', '').capitalize()self.verbose_name = verbose_nameif field is None:self.field = get_field_for_proxy(self)else:self.field = fieldupdate_field_from_proxy(self.field, self)", "docstring": ":param str|unicode name: Preference name.\n\n:param default: Default (initial) value.\n\n:param str|unicode category: Category name the preference belongs to.\n\n:param Field field: Django model field to represent this preference.\n\n:param str|unicode verbose_name: Field verbose name.\n\n:param str|unicode help_text: Field help text.\n\n:param bool static: Leave this preference static (do not store in DB).\n\n:param bool readonly: Make this field read only.", "id": "f1668:c3:m0"} {"signature": "@classmethoddef read_prefs(cls, mem_prefs):", "body": "db_prefs = {'' % (pref[''], pref['']): pref for pref incls.objects.values().order_by('', '')}new_prefs = []for app, prefs in mem_prefs.items():for pref_name, pref_proxy in prefs.items():if not pref_proxy.static: key = '' % (app, pref_name)if key in db_prefs:pref_proxy.db_value = db_prefs[key]['']else:new_prefs.append(cls(app=app, name=pref_name, text=pref_proxy.default))if new_prefs:try:cls.objects.bulk_create(new_prefs)except IntegrityError: pass", "docstring": "Initializes preferences entries in DB according to currently discovered prefs.\n\n :param dict mem_prefs:", "id": "f1669:c0:m1"} {"signature": "def on_pref_update(*args, **kwargs):", "body": "Preference.update_prefs(*args, **kwargs)Preference.read_prefs(get_prefs())", "docstring": "Triggered on dynamic preferences model save.\n Issues DB save and reread.", "id": "f1670:m0"} {"signature": "def get_prefs():", "body": "global __PREFS_REGISTRYif __PREFS_REGISTRY is None:__PREFS_REGISTRY = __PREFS_DEFAULT_REGISTRYreturn __PREFS_REGISTRY", "docstring": "Returns a dictionary with all preferences discovered by siteprefs.", "id": "f1670:m1"} {"signature": "def get_app_prefs(app=None):", "body": "if app is None:with Frame(stepback=) as frame:app = frame.f_globals[''].split('')[]prefs = get_prefs()if app not in prefs:return {}return prefs[app]", "docstring": "Returns a dictionary with preferences for a certain app/module.\n\n :param str|unicode app:\n\n :rtype: dict", "id": "f1670:m2"} {"signature": "def get_prefs_models():", "body": "return __MODELS_REGISTRY", "docstring": "Returns registered preferences models indexed by application names.\n\n :rtype: dict", "id": "f1670:m3"} {"signature": "def bind_proxy(values, category=None, field=None, verbose_name=None, help_text='', static=True, readonly=False):", "body": "addrs = OrderedDict()depth = for local_name, locals_dict in traverse_local_prefs(depth):addrs[id(locals_dict[local_name])] = local_nameproxies = []locals_dict = get_frame_locals(depth)for value in values: id_val = id(value)if id_val in addrs:local_name = addrs[id_val]local_val = locals_dict[local_name]if isinstance(local_val, PatchedLocal) and not isinstance(local_val, PrefProxy):proxy = PrefProxy(local_name, value.val,category=category,field=field,verbose_name=verbose_name,help_text=help_text,static=static,readonly=readonly,)app_name = locals_dict[''].split('')[-] prefs = get_prefs()if app_name not in prefs:prefs[app_name] = OrderedDict()prefs[app_name][local_name.lower()] = proxylocals_dict[local_name] = proxyproxies.append(proxy)return proxies", "docstring": "Binds PrefProxy objects to module variables used by apps as preferences.\n\n :param list|tuple values: Preference values.\n\n :param str|unicode category: Category name the preference belongs to.\n\n :param Field field: Django model field to represent this preference.\n\n :param str|unicode verbose_name: Field verbose name.\n\n :param str|unicode help_text: Field help text.\n\n :param bool static: Leave this preference static (do not store in DB).\n\n :param bool readonly: Make this field read only.\n\n :rtype: list", "id": "f1670:m4"} {"signature": "def register_admin_models(admin_site):", "body": "global __MODELS_REGISTRYprefs = get_prefs()for app_label, prefs_items in prefs.items():model_class = get_pref_model_class(app_label, prefs_items, get_app_prefs)if model_class is not None:__MODELS_REGISTRY[app_label] = model_classadmin_site.register(model_class, get_pref_model_admin_class(prefs_items))", "docstring": "Registers dynamically created preferences models for Admin interface.\n\n :param admin.AdminSite admin_site: AdminSite object.", "id": "f1670:m5"} {"signature": "def autodiscover_siteprefs(admin_site=None):", "body": "if admin_site is None:admin_site = admin.siteif '' not in sys.argv[] or (len(sys.argv) > and sys.argv[] in MANAGE_SAFE_COMMANDS):import_prefs()Preference.read_prefs(get_prefs())register_admin_models(admin_site)", "docstring": "Automatically discovers and registers all preferences available in all apps.\n\n :param admin.AdminSite admin_site: Custom AdminSite object.", "id": "f1670:m6"} {"signature": "def patch_locals(depth=):", "body": "for name, locals_dict in traverse_local_prefs(depth):locals_dict[name] = PatchedLocal(name, locals_dict[name])get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL] = True", "docstring": "Temporarily (see unpatch_locals()) replaces all module variables\n considered preferences with PatchedLocal objects, so that every\n variable has different hash returned by id().", "id": "f1670:m7"} {"signature": "def unpatch_locals(depth=):", "body": "for name, locals_dict in traverse_local_prefs(depth):if isinstance(locals_dict[name], PatchedLocal):locals_dict[name] = locals_dict[name].valdel get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL]", "docstring": "Restores the original values of module variables\n considered preferences if they are still PatchedLocal\n and not PrefProxy.", "id": "f1670:m8"} {"signature": "def proxy_settings_module(depth=):", "body": "proxies = []modules = sys.modulesmodule_name = get_frame_locals(depth)['']module_real = modules[module_name]for name, locals_dict in traverse_local_prefs(depth):value = locals_dict[name]if isinstance(value, PrefProxy):proxies.append(name)new_module = type(module_name, (ModuleType, ModuleProxy), {})(module_name) new_module.bind(module_real, proxies)modules[module_name] = new_module", "docstring": "Replaces a settings module with a Module proxy to intercept\n an access to settings.\n\n :param int depth: Frame count to go backward.", "id": "f1670:m9"} {"signature": "def register_prefs(*args, **kwargs):", "body": "swap_settings_module = bool(kwargs.get('', True))if __PATCHED_LOCALS_SENTINEL not in get_frame_locals():raise SitePrefsException('')bind_proxy(args, **kwargs)unpatch_locals()swap_settings_module and proxy_settings_module()", "docstring": "Registers preferences that should be handled by siteprefs.\n\n Expects preferences as *args.\n\n Use keyword arguments to batch apply params supported by\n ``PrefProxy`` to all preferences not constructed by ``pref`` and ``pref_group``.\n\n Batch kwargs:\n\n :param str|unicode help_text: Field help text.\n\n :param bool static: Leave this preference static (do not store in DB).\n\n :param bool readonly: Make this field read only.\n\n :param bool swap_settings_module: Whether to automatically replace settings module\n with a special ``ProxyModule`` object to access dynamic values of settings\n transparently (so not to bother with calling ``.value`` of ``PrefProxy`` object).", "id": "f1670:m10"} {"signature": "def pref_group(title, prefs, help_text='', static=True, readonly=False):", "body": "bind_proxy(prefs, title, help_text=help_text, static=static, readonly=readonly)for proxy in prefs: if isinstance(proxy, PrefProxy):proxy.category = title", "docstring": "Marks preferences group.\n\n :param str|unicode title: Group title\n\n :param list|tuple prefs: Preferences to group.\n\n :param str|unicode help_text: Field help text.\n\n :param bool static: Leave this preference static (do not store in DB).\n\n :param bool readonly: Make this field read only.", "id": "f1670:m11"} {"signature": "def pref(preference, field=None, verbose_name=None, help_text='', static=True, readonly=False):", "body": "try:bound = bind_proxy((preference,),field=field,verbose_name=verbose_name,help_text=help_text,static=static,readonly=readonly,)return bound[]except IndexError:return", "docstring": "Marks a preference.\n\n :param preference: Preference variable.\n\n :param Field field: Django model field to represent this preference.\n\n :param str|unicode verbose_name: Field verbose name.\n\n :param str|unicode help_text: Field help text.\n\n :param bool static: Leave this preference static (do not store in DB).\n\n :param bool readonly: Make this field read only.\n\n :rtype: PrefProxy|None", "id": "f1670:m12"} {"signature": "def bind(self, module, prefs):", "body": "self._module = moduleself._prefs = set(prefs)", "docstring": ":param ModuleType module:\n:param list prefs: Preference names. Just to speed up __getattr__.", "id": "f1670:c0:m1"} {"signature": "def tree_search(problem, frontier):", "body": "frontier.append(Node(problem.initial))while frontier:node = frontier.pop()if problem.goal_test(node.state):return nodefrontier.extend(node.expand(problem))return None", "docstring": "Search through the successors of a problem to find a goal.\n The argument frontier should be an empty queue.\n Don't worry about repeated paths to a state. [Fig. 3.7]", "id": "f1675:m0"} {"signature": "def graph_search(problem, frontier):", "body": "frontier.append(Node(problem.initial))explored = set()while frontier:node = frontier.pop()if problem.goal_test(node.state):return nodeexplored.add(node.state)frontier.extend(child for child in node.expand(problem)if child.state not in exploredand child not in frontier)return None", "docstring": "Search through the successors of a problem to find a goal.\n The argument frontier should be an empty queue.\n If two paths reach a state, only use the first one. [Fig. 3.7]", "id": "f1675:m1"} {"signature": "def breadth_first_tree_search(problem):", "body": "return tree_search(problem, FIFOQueue())", "docstring": "Search the shallowest nodes in the search tree first.", "id": "f1675:m2"} {"signature": "def depth_first_tree_search(problem):", "body": "return tree_search(problem, Stack())", "docstring": "Search the deepest nodes in the search tree first.", "id": "f1675:m3"} {"signature": "def depth_first_graph_search(problem):", "body": "return graph_search(problem, Stack())", "docstring": "Search the deepest nodes in the search tree first.", "id": "f1675:m4"} {"signature": "def breadth_first_search(problem):", "body": "node = Node(problem.initial)if problem.goal_test(node.state):return nodefrontier = FIFOQueue()frontier.append(node)explored = set()while frontier:node = frontier.pop()explored.add(node.state)for child in node.expand(problem):if child.state not in explored and child not in frontier:if problem.goal_test(child.state):return childfrontier.append(child)return None", "docstring": "[Fig. 3.11]", "id": "f1675:m5"} {"signature": "def best_first_graph_search(problem, f):", "body": "f = memoize(f, '')node = Node(problem.initial)if problem.goal_test(node.state):return nodefrontier = PriorityQueue(min, f)frontier.append(node)explored = set()while frontier:node = frontier.pop()if problem.goal_test(node.state):return nodeexplored.add(node.state)for child in node.expand(problem):if child.state not in explored and child not in frontier:frontier.append(child)elif child in frontier:incumbent = frontier[child]if f(child) < f(incumbent):del frontier[incumbent]frontier.append(child)return None", "docstring": "Search the nodes with the lowest f scores first.\n You specify the function f(node) that you want to minimize; for example,\n if f is a heuristic estimate to the goal, then we have greedy best\n first search; if f is node.depth then we have breadth-first search.\n There is a subtlety: the line \"f = memoize(f, 'f')\" means that the f\n values will be cached on the nodes as they are computed. So after doing\n a best first search you can examine the f values of the path returned.", "id": "f1675:m6"} {"signature": "def uniform_cost_search(problem):", "body": "return best_first_graph_search(problem, lambda node: node.path_cost)", "docstring": "[Fig. 3.14]", "id": "f1675:m7"} {"signature": "def depth_limited_search(problem, limit=):", "body": "def recursive_dls(node, problem, limit):if problem.goal_test(node.state):return nodeelif node.depth == limit:return ''else:cutoff_occurred = Falsefor child in node.expand(problem):result = recursive_dls(child, problem, limit)if result == '':cutoff_occurred = Trueelif result is not None:return resultreturn if_(cutoff_occurred, '', None)return recursive_dls(Node(problem.initial), problem, limit)", "docstring": "[Fig. 3.17]", "id": "f1675:m8"} {"signature": "def iterative_deepening_search(problem):", "body": "for depth in range(sys.maxsize):result = depth_limited_search(problem, depth)if result != '':return result", "docstring": "[Fig. 3.18]", "id": "f1675:m9"} {"signature": "def astar_search(problem, h=None):", "body": "h = memoize(h or problem.h, '')return best_first_graph_search(problem, lambda n: n.path_cost + h(n))", "docstring": "A* search is best-first graph search with f(n) = g(n)+h(n).\n You need to specify the h function when you call astar_search, or\n else in your Problem subclass.", "id": "f1675:m10"} {"signature": "def recursive_best_first_search(problem, h=None):", "body": "h = memoize(h or problem.h, '')def RBFS(problem, node, flimit):if problem.goal_test(node.state):return node, successors = node.expand(problem)if len(successors) == :return None, infinityfor s in successors:s.f = max(s.path_cost + h(s), node.f)while True:successors.sort(lambda x,y: cmp(x.f, y.f)) best = successors[]if best.f > flimit:return None, best.fif len(successors) > :alternative = successors[].felse:alternative = infinityresult, best.f = RBFS(problem, best, min(flimit, alternative))if result is not None:return result, best.fnode = Node(problem.initial)node.f = h(node)result, bestf = RBFS(problem, node, infinity)return result", "docstring": "[Fig. 3.26]", "id": "f1675:m11"} {"signature": "def hill_climbing(problem):", "body": "current = Node(problem.initial)while True:neighbors = current.expand(problem)if not neighbors:breakneighbor = argmax_random_tie(neighbors,lambda node: problem.value(node.state))if problem.value(neighbor.state) <= problem.value(current.state):breakcurrent = neighborreturn current.state", "docstring": "From the initial node, keep choosing the neighbor with highest value,\n stopping when no neighbor is better. [Fig. 4.2]", "id": "f1675:m12"} {"signature": "def exp_schedule(k=, lam=, limit=):", "body": "return lambda t: if_(t < limit, k * math.exp(-lam * t), )", "docstring": "One possible schedule function for simulated annealing", "id": "f1675:m13"} {"signature": "def simulated_annealing(problem, schedule=exp_schedule()):", "body": "current = Node(problem.initial)for t in range(sys.maxsize):T = schedule(t)if T == :return currentneighbors = current.expand(problem)if not neighbors:return currentnext = random.choice(neighbors)delta_e = problem.value(next.state) - problem.value(current.state)if delta_e > or probability(math.exp(delta_e/T)):current = next", "docstring": "[Fig. 4.5]", "id": "f1675:m14"} {"signature": "def and_or_graph_search(problem):", "body": "unimplemented()", "docstring": "[Fig. 4.11]", "id": "f1675:m15"} {"signature": "def online_dfs_agent(s1):", "body": "unimplemented()", "docstring": "[Fig. 4.21]", "id": "f1675:m16"} {"signature": "def lrta_star_agent(s1):", "body": "unimplemented()", "docstring": "[Fig. 4.24]", "id": "f1675:m17"} {"signature": "def genetic_search(problem, fitness_fn, ngen=, pmut=, n=):", "body": "s = problem.initial_statestates = [problem.result(s, a) for a in problem.actions(s)]random.shuffle(states)return genetic_algorithm(states[:n], problem.value, ngen, pmut)", "docstring": "Call genetic_algorithm on the appropriate parts of a problem.\n This requires the problem to have states that can mate and mutate,\n plus a value method that scores states.", "id": "f1675:m18"} {"signature": "def genetic_algorithm(population, fitness_fn, ngen=, pmut=):", "body": "for i in range(ngen):new_population = []for i in len(population):fitnesses = list(map(fitness_fn, population))p1, p2 = weighted_sample_with_replacement(population, fitnesses, )child = p1.mate(p2)if random.uniform(, ) < pmut:child.mutate()new_population.append(child)population = new_populationreturn argmax(population, fitness_fn)", "docstring": "[Fig. 4.8]", "id": "f1675:m19"} {"signature": "def UndirectedGraph(dict=None):", "body": "return Graph(dict=dict, directed=False)", "docstring": "Build a Graph where every edge (including future ones) goes both ways.", "id": "f1675:m20"} {"signature": "def RandomGraph(nodes=list(range()), min_links=, width=, height=,curvature=lambda: random.uniform(, )):", "body": "g = UndirectedGraph()g.locations = {}for node in nodes:g.locations[node] = (random.randrange(width), random.randrange(height))for i in range(min_links):for node in nodes:if len(g.get(node)) < min_links:here = g.locations[node]def distance_to_node(n):if n is node or g.get(node,n): return infinityreturn distance(g.locations[n], here)neighbor = argmin(nodes, distance_to_node)d = distance(g.locations[neighbor], here) * curvature()g.connect(node, neighbor, int(d))return g", "docstring": "Construct a random graph, with the specified nodes, and random links.\n The nodes are laid out randomly on a (width x height) rectangle.\n Then each node is connected to the min_links nearest neighbors.\n Because inverse links are added, some nodes will have more connections.\n The distance between nodes is the hypotenuse times curvature(),\n where curvature() defaults to a random number between 1.1 and 1.5.", "id": "f1675:m21"} {"signature": "def random_boggle(n=):", "body": "cubes = [cubes16[i % ] for i in range(n*n)]random.shuffle(cubes)return list(map(random.choice, cubes))", "docstring": "Return a random Boggle board of size n x n.\n We represent a board as a linear list of letters.", "id": "f1675:m22"} {"signature": "def print_boggle(board):", "body": "n2 = len(board); n = exact_sqrt(n2)for i in range(n2):if i % n == and i > : print()if board[i] == '': print('', end='')else: print(str(board[i]) + '', end='')print()", "docstring": "Print the board in a 2-d array.", "id": "f1675:m23"} {"signature": "def boggle_neighbors(n2, cache={}):", "body": "if cache.get(n2):return cache.get(n2)n = exact_sqrt(n2)neighbors = [None] * n2for i in range(n2):neighbors[i] = []on_top = i < non_bottom = i >= n2 - non_left = i % n == on_right = (i+) % n == if not on_top:neighbors[i].append(i - n)if not on_left: neighbors[i].append(i - n - )if not on_right: neighbors[i].append(i - n + )if not on_bottom:neighbors[i].append(i + n)if not on_left: neighbors[i].append(i + n - )if not on_right: neighbors[i].append(i + n + )if not on_left: neighbors[i].append(i - )if not on_right: neighbors[i].append(i + )cache[n2] = neighborsreturn neighbors", "docstring": "Return a list of lists, where the i-th element is the list of indexes\n for the neighbors of square i.", "id": "f1675:m24"} {"signature": "def exact_sqrt(n2):", "body": "n = int(math.sqrt(n2))assert n * n == n2return n", "docstring": "If n2 is a perfect square, return its square root, else raise error.", "id": "f1675:m25"} {"signature": "def boggle_hill_climbing(board=None, ntimes=, verbose=True):", "body": "finder = BoggleFinder()if board is None:board = random_boggle()best = len(finder.set_board(board))for _ in range(ntimes):i, oldc = mutate_boggle(board)new = len(finder.set_board(board))if new > best:best = newif verbose: print(best, _, board)else:board[i] = oldc if verbose:print_boggle(board)return board, best", "docstring": "Solve inverse Boggle by hill-climbing: find a high-scoring board by\n starting with a random one and changing it.", "id": "f1675:m26"} {"signature": "def compare_graph_searchers():", "body": "compare_searchers(problems=[GraphProblem('', '', romania),GraphProblem('', '', romania),GraphProblem('', '', australia)],header=['', '', '', ''])", "docstring": "Prints a table of results like this:\n >>> compare_graph_searchers()\n Searcher Romania(A, B) Romania(O, N) Australia \n breadth_first_tree_search < 21/ 22/ 59/B> <1158/1159/3288/N> < 7/ 8/ 22/WA>\n breadth_first_search < 7/ 11/ 18/B> < 19/ 20/ 45/N> < 2/ 6/ 8/WA>\n depth_first_graph_search < 8/ 9/ 20/B> < 16/ 17/ 38/N> < 4/ 5/ 11/WA>\n iterative_deepening_search < 11/ 33/ 31/B> < 656/1815/1812/N> < 3/ 11/ 11/WA>\n depth_limited_search < 54/ 65/ 185/B> < 387/1012/1125/N> < 50/ 54/ 200/WA>\n recursive_best_first_search < 5/ 6/ 15/B> <5887/5888/16532/N> < 11/ 12/ 43/WA>", "id": "f1675:m30"} {"signature": "def __init__(self, initial, goal=None):", "body": "self.initial = initial; self.goal = goal", "docstring": "The constructor specifies the initial state, and possibly a goal\n state, if there is a unique goal. Your subclass's constructor can add\n other arguments.", "id": "f1675:c0:m0"} {"signature": "def actions(self, state):", "body": "abstract", "docstring": "Return the actions that can be executed in the given\n state. The result would typically be a list, but if there are\n many actions, consider yielding them one at a time in an\n iterator, rather than building them all at once.", "id": "f1675:c0:m1"} {"signature": "def result(self, state, action):", "body": "abstract", "docstring": "Return the state that results from executing the given\n action in the given state. The action must be one of\n self.actions(state).", "id": "f1675:c0:m2"} {"signature": "def path_cost(self, c, state1, action, state2):", "body": "return c + ", "docstring": "Return the cost of a solution path that arrives at state2 from\n state1 via action, assuming cost c to get up to state1. If the problem\n is such that the path doesn't matter, this function will only look at\n state2. If the path does matter, it will consider c and maybe state1\n and action. The default method costs 1 for every step in the path.", "id": "f1675:c0:m4"} {"signature": "def value(self, state):", "body": "abstract", "docstring": "For optimization problems, each state has a value. Hill-climbing\n and related algorithms try to maximize this value.", "id": "f1675:c0:m5"} {"signature": "def __init__(self, state, parent=None, action=None, path_cost=):", "body": "update(self, state=state, parent=parent, action=action,path_cost=path_cost, depth=)if parent:self.depth = parent.depth + ", "docstring": "Create a search tree Node, derived from a parent by an action.", "id": "f1675:c1:m0"} {"signature": "def expand(self, problem):", "body": "return [self.child_node(problem, action)for action in problem.actions(self.state)]", "docstring": "List the nodes reachable in one step from this node.", "id": "f1675:c1:m2"} {"signature": "def child_node(self, problem, action):", "body": "next = problem.result(self.state, action)return Node(next, self, action,problem.path_cost(self.path_cost, self.state, action, next))", "docstring": "Fig. 3.10", "id": "f1675:c1:m3"} {"signature": "def solution(self):", "body": "return [node.action for node in self.path()[:]]", "docstring": "Return the sequence of actions to go from the root to this node.", "id": "f1675:c1:m4"} {"signature": "def path(self):", "body": "node, path_back = self, []while node:path_back.append(node)node = node.parentreturn list(reversed(path_back))", "docstring": "Return a list of nodes forming the path from the root to this node.", "id": "f1675:c1:m5"} {"signature": "def mate(self, other):", "body": "c = random.randrange(len(self.genes))return self.__class__(self.genes[:c] + other.genes[c:])", "docstring": "Return a new individual crossing self and other.", "id": "f1675:c3:m1"} {"signature": "def mutate(self):", "body": "abstract", "docstring": "Change a few of my genes.", "id": "f1675:c3:m2"} {"signature": "def make_undirected(self):", "body": "for a in list(self.dict.keys()):for (b, distance) in list(self.dict[a].items()):self.connect1(b, a, distance)", "docstring": "Make a digraph into an undirected graph by adding symmetric edges.", "id": "f1675:c4:m1"} {"signature": "def connect(self, A, B, distance=):", "body": "self.connect1(A, B, distance)if not self.directed: self.connect1(B, A, distance)", "docstring": "Add a link from A and B of given distance, and also add the inverse\n link if the graph is undirected.", "id": "f1675:c4:m2"} {"signature": "def connect1(self, A, B, distance):", "body": "self.dict.setdefault(A,{})[B] = distance", "docstring": "Add a link from A to B of given distance, in one direction only.", "id": "f1675:c4:m3"} {"signature": "def get(self, a, b=None):", "body": "links = self.dict.setdefault(a, {})if b is None: return linkselse: return links.get(b)", "docstring": "Return a link distance or a dict of {node: distance} entries.\n .get(a,b) returns the distance or None;\n .get(a) returns a dict of {node: distance} entries, possibly {}.", "id": "f1675:c4:m4"} {"signature": "def nodes(self):", "body": "return list(self.dict.keys())", "docstring": "Return a list of nodes in the graph.", "id": "f1675:c4:m5"} {"signature": "def actions(self, A):", "body": "return list(self.graph.get(A).keys())", "docstring": "The actions at a graph node are just its neighbors.", "id": "f1675:c5:m1"} {"signature": "def result(self, state, action):", "body": "return action", "docstring": "The result of going to a neighbor is just that neighbor.", "id": "f1675:c5:m2"} {"signature": "def h(self, node):", "body": "locs = getattr(self.graph, '', None)if locs:return int(distance(locs[node.state], locs[self.goal]))else:return infinity", "docstring": "h function is straight-line distance from a node's state to goal.", "id": "f1675:c5:m4"} {"signature": "def actions(self, state):", "body": "if state[-] is not None:return [] else:col = state.index(None)return [row for row in range(self.N)if not self.conflicted(state, row, col)]", "docstring": "In the leftmost empty column, try all non-conflicting rows.", "id": "f1675:c6:m1"} {"signature": "def result(self, state, row):", "body": "col = state.index(None)new = state[:]new[col] = rowreturn new", "docstring": "Place the next queen at the given row.", "id": "f1675:c6:m2"} {"signature": "def conflicted(self, state, row, col):", "body": "return any(self.conflict(row, col, state[c], c)for c in range(col))", "docstring": "Would placing a queen at (row, col) conflict with anything?", "id": "f1675:c6:m3"} {"signature": "def conflict(self, row1, col1, row2, col2):", "body": "return (row1 == row2 or col1 == col2 or row1-col1 == row2-col2 or row1+col1 == row2+col2)", "docstring": "Would putting two queens in (row1, col1) and (row2, col2) conflict?", "id": "f1675:c6:m4"} {"signature": "def lookup(self, prefix, lo=, hi=None):", "body": "words = self.wordsif hi is None: hi = len(words)i = bisect.bisect_left(words, prefix, lo, hi)if i < len(words) and words[i].startswith(prefix):return i, (words[i] == prefix)else:return None, False", "docstring": "See if prefix is in dictionary, as a full word or as a prefix.\n Return two values: the first is the lowest i such that\n words[i].startswith(prefix), or is None; the second is\n True iff prefix itself is in the Wordlist.", "id": "f1675:c7:m1"} {"signature": "def set_board(self, board=None):", "body": "if board is None:board = random_boggle()self.board = boardself.neighbors = boggle_neighbors(len(board))self.found = {}for i in range(len(board)):lo, hi = self.wordlist.bounds[board[i]]self.find(lo, hi, i, [], '')return self", "docstring": "Set the board, and find all the words in it.", "id": "f1675:c8:m1"} {"signature": "def find(self, lo, hi, i, visited, prefix):", "body": "if i in visited:returnwordpos, is_word = self.wordlist.lookup(prefix, lo, hi)if wordpos is not None:if is_word:self.found[prefix] = Truevisited.append(i)c = self.board[i]if c == '': c = ''prefix += cfor j in self.neighbors[i]:self.find(wordpos, hi, j, visited, prefix)visited.pop()", "docstring": "Looking in square i, find the words that continue the prefix,\n considering the entries in self.wordlist.words[lo:hi], and not\n revisiting the squares in visited.", "id": "f1675:c8:m2"} {"signature": "def words(self):", "body": "return list(self.found.keys())", "docstring": "The words found.", "id": "f1675:c8:m3"} {"signature": "def score(self):", "body": "return sum([self.scores[len(w)] for w in self.words()])", "docstring": "The total score for the words found, according to the rules.", "id": "f1675:c8:m4"} {"signature": "def __len__(self):", "body": "return len(self.found)", "docstring": "The number of words found.", "id": "f1675:c8:m5"} {"signature": "def TraceAgent(agent):", "body": "old_program = agent.programdef new_program(percept):action = old_program(percept)print('' % (agent, percept, action))return actionagent.program = new_programreturn agent", "docstring": "Wrap the agent's program to print its input and output. This will let\n you see what the agent is doing in the environment.", "id": "f1676:m0"} {"signature": "def TableDrivenAgentProgram(table):", "body": "percepts = []def program(percept):percepts.append(percept)action = table.get(tuple(percepts))return actionreturn program", "docstring": "This agent selects an action based on the percept sequence.\n It is practical only for tiny domains.\n To customize it, provide as table a dictionary of all\n {percept_sequence:action} pairs. [Fig. 2.7]", "id": "f1676:m1"} {"signature": "def RandomAgentProgram(actions):", "body": "return lambda percept: random.choice(actions)", "docstring": "An agent that chooses an action at random, ignoring all percepts.", "id": "f1676:m2"} {"signature": "def SimpleReflexAgentProgram(rules, interpret_input):", "body": "def program(percept):state = interpret_input(percept)rule = rule_match(state, rules)action = rule.actionreturn actionreturn program", "docstring": "This agent takes action based solely on the percept. [Fig. 2.10]", "id": "f1676:m3"} {"signature": "def ModelBasedReflexAgentProgram(rules, update_state):", "body": "def program(percept):program.state = update_state(program.state, program.action, percept)rule = rule_match(program.state, rules)action = rule.actionreturn actionprogram.state = program.action = Nonereturn program", "docstring": "This agent takes action based on the percept and state. [Fig. 2.12]", "id": "f1676:m4"} {"signature": "def rule_match(state, rules):", "body": "for rule in rules:if rule.matches(state):return rule", "docstring": "Find the first rule that matches state.", "id": "f1676:m5"} {"signature": "def RandomVacuumAgent():", "body": "return Agent(RandomAgentProgram(['', '', '', '']))", "docstring": "Randomly choose one of the actions from the vacuum environment.", "id": "f1676:m6"} {"signature": "def TableDrivenVacuumAgent():", "body": "table = {((loc_A, ''),): '',((loc_A, ''),): '',((loc_B, ''),): '',((loc_B, ''),): '',((loc_A, ''), (loc_A, '')): '',((loc_A, ''), (loc_A, '')): '',((loc_A, ''), (loc_A, ''), (loc_A, '')): '',((loc_A, ''), (loc_A, ''), (loc_A, '')): '',}return Agent(TableDrivenAgentProgram(table))", "docstring": "[Fig. 2.3]", "id": "f1676:m7"} {"signature": "def ReflexVacuumAgent():", "body": "def program(xxx_todo_changeme):(location, status) = xxx_todo_changemeif status == '': return ''elif location == loc_A: return ''elif location == loc_B: return ''return Agent(program)", "docstring": "A reflex agent for the two-state vacuum environment. [Fig. 2.8]", "id": "f1676:m8"} {"signature": "def ModelBasedVacuumAgent():", "body": "model = {loc_A: None, loc_B: None}def program(xxx_todo_changeme1):\"\"(location, status) = xxx_todo_changeme1model[location] = status if model[loc_A] == model[loc_B] == '': return ''elif status == '': return ''elif location == loc_A: return ''elif location == loc_B: return ''return Agent(program)", "docstring": "An agent that keeps track of what locations are clean or dirty.", "id": "f1676:m9"} {"signature": "def compare_agents(EnvFactory, AgentFactories, n=, steps=):", "body": "envs = [EnvFactory() for i in range(n)]return [(A, test_agent(A, steps, copy.deepcopy(envs)))for A in AgentFactories]", "docstring": "See how well each of several agents do in n instances of an environment.\n Pass in a factory (constructor) for environments, and several for agents.\n Create n instances of the environment, and run each agent in copies of\n each one for steps. Return a list of (agent, average-score) tuples.", "id": "f1676:m10"} {"signature": "def is_alive(self):", "body": "return hasattr(self, '') and self.alive", "docstring": "Things that are 'alive' should return true.", "id": "f1676:c0:m1"} {"signature": "def show_state(self):", "body": "print(\"\")", "docstring": "Display the agent's internal state. Subclasses should override.", "id": "f1676:c0:m2"} {"signature": "def display(self, canvas, x, y, width, height):", "body": "pass", "docstring": "Display an image of this Thing on the canvas.", "id": "f1676:c0:m3"} {"signature": "def can_grab(self, thing):", "body": "return False", "docstring": "Returns True if this agent can grab this thing.\n Override for appropriate subclasses of Agent and Thing.", "id": "f1676:c1:m1"} {"signature": "def percept(self, agent):", "body": "abstract", "docstring": "Return the percept that the agent sees at this point. (Implement this.)", "id": "f1676:c2:m2"} {"signature": "def execute_action(self, agent, action):", "body": "abstract", "docstring": "Change the world to reflect this action. (Implement this.)", "id": "f1676:c2:m3"} {"signature": "def default_location(self, thing):", "body": "return None", "docstring": "Default location to place a new thing with unspecified location.", "id": "f1676:c2:m4"} {"signature": "def exogenous_change(self):", "body": "pass", "docstring": "If there is spontaneous change in the world, override this.", "id": "f1676:c2:m5"} {"signature": "def is_done(self):", "body": "return not any(agent.is_alive() for agent in self.agents)", "docstring": "By default, we're done when we can't find a live agent.", "id": "f1676:c2:m6"} {"signature": "def step(self):", "body": "if not self.is_done():actions = [agent.program(self.percept(agent))for agent in self.agents]for (agent, action) in zip(self.agents, actions):self.execute_action(agent, action)self.exogenous_change()", "docstring": "Run the environment for one time step. If the\n actions and exogenous changes are independent, this method will\n do. If there are interactions between them, you'll need to\n override this method.", "id": "f1676:c2:m7"} {"signature": "def run(self, steps=):", "body": "for step in range(steps):if self.is_done(): returnself.step()", "docstring": "Run the Environment for given number of time steps.", "id": "f1676:c2:m8"} {"signature": "def list_things_at(self, location, tclass=Thing):", "body": "return [thing for thing in self.thingsif thing.location == location and isinstance(thing, tclass)]", "docstring": "Return all things exactly at a given location.", "id": "f1676:c2:m9"} {"signature": "def some_things_at(self, location, tclass=Thing):", "body": "return self.list_things_at(location, tclass) != []", "docstring": "Return true if at least one of the things at location\n is an instance of class tclass (or a subclass).", "id": "f1676:c2:m10"} {"signature": "def add_thing(self, thing, location=None):", "body": "if not isinstance(thing, Thing):thing = Agent(thing)assert thing not in self.things, \"\"thing.location = location or self.default_location(thing)self.things.append(thing)if isinstance(thing, Agent):thing.performance = self.agents.append(thing)", "docstring": "Add a thing to the environment, setting its location. For\n convenience, if thing is an agent program we make a new agent\n for it. (Shouldn't need to override this.", "id": "f1676:c2:m11"} {"signature": "def delete_thing(self, thing):", "body": "try:self.things.remove(thing)except ValueError as e:print(e)print(\"\")print(\"\" % (thing, thing.location))print(\"\" % [(thing, thing.location)for thing in self.things])if thing in self.agents:self.agents.remove(thing)", "docstring": "Remove a thing from the environment.", "id": "f1676:c2:m12"} {"signature": "def things_near(self, location, radius=None):", "body": "if radius is None: radius = self.perceptible_distanceradius2 = radius * radiusreturn [thing for thing in self.thingsif distance2(location, thing.location) <= radius2]", "docstring": "Return all things within radius of location.", "id": "f1676:c3:m1"} {"signature": "def percept(self, agent):", "body": "return [self.thing_percept(thing, agent)for thing in self.things_near(agent.location)]", "docstring": "By default, agent perceives things within a default radius.", "id": "f1676:c3:m2"} {"signature": "def thing_percept(self, thing, agent): ", "body": "return thing.__class__.__name__", "docstring": "Return the percept for this thing.", "id": "f1676:c3:m4"} {"signature": "def move_to(self, thing, destination):", "body": "thing.bump = self.some_things_at(destination, Obstacle)if not thing.bump:thing.location = destinationfor o in self.observers:o.thing_moved(thing)", "docstring": "Move a thing to a new location.", "id": "f1676:c3:m6"} {"signature": "def add_walls(self):", "body": "for x in range(self.width):self.add_thing(Wall(), (x, ))self.add_thing(Wall(), (x, self.height-))for y in range(self.height):self.add_thing(Wall(), (, y))self.add_thing(Wall(), (self.width-, y))", "docstring": "Put walls around the entire perimeter of the grid.", "id": "f1676:c3:m9"} {"signature": "def add_observer(self, observer):", "body": "self.observers.append(observer)", "docstring": "Adds an observer to the list of observers.\n An observer is typically an EnvGUI.\n\n Each observer is notified of changes in move_to and add_thing,\n by calling the observer's methods thing_moved(thing)\n and thing_added(thing, loc).", "id": "f1676:c3:m10"} {"signature": "def turn_heading(self, heading, inc):", "body": "return turn_heading(heading, inc)", "docstring": "Return the heading to the left (inc=+1) or right (inc=-1) of heading.", "id": "f1676:c3:m11"} {"signature": "def percept(self, agent):", "body": "status = if_(self.some_things_at(agent.location, Dirt),'', '')bump = if_(agent.bump, '', '')return (status, bump)", "docstring": "The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').\n Unlike the TrivialVacuumEnvironment, location is NOT perceived.", "id": "f1676:c7:m2"} {"signature": "def percept(self, agent):", "body": "return (agent.location, self.status[agent.location])", "docstring": "Returns the agent's location, and the location status (Dirty/Clean).", "id": "f1676:c8:m2"} {"signature": "def execute_action(self, agent, action):", "body": "if action == '':agent.location = loc_Bagent.performance -= elif action == '':agent.location = loc_Aagent.performance -= elif action == '':if self.status[agent.location] == '':agent.performance += self.status[agent.location] = ''", "docstring": "Change agent's location and/or location's status; track performance.\n Score 10 for each dirt cleaned; -1 for each move.", "id": "f1676:c8:m3"} {"signature": "def default_location(self, thing):", "body": "return random.choice([loc_A, loc_B])", "docstring": "Agents start in either location at random.", "id": "f1676:c8:m4"} {"signature": "def viterbi_segment(text, P):", "body": "n = len(text)words = [''] + list(text)best = [] + [] * nfor i in range(n+):for j in range(, i):w = text[j:i]if P[w] * best[i - len(w)] >= best[i]:best[i] = P[w] * best[i - len(w)]words[i] = wsequence = []; i = len(words)-while i > :sequence[:] = [words[i]]i = i - len(words[i])return sequence, best[-]", "docstring": "Find the best segmentation of the string of characters, given the\n UnigramTextModel P.", "id": "f1677:m0"} {"signature": "def words(text, reg=re.compile('')):", "body": "return reg.findall(text.lower())", "docstring": "Return a list of the words in text, ignoring punctuation and\n converting everything to lowercase (to canonicalize).\n >>> words(\"``EGAD!'' Edgar cried.\")\n ['egad', 'edgar', 'cried']", "id": "f1677:m1"} {"signature": "def canonicalize(text):", "body": "return ''.join(words(text))", "docstring": "Return a canonical text: only lowercase letters and blanks.\n >>> canonicalize(\"``EGAD!'' Edgar cried.\")\n 'egad edgar cried'", "id": "f1677:m2"} {"signature": "def shift_encode(plaintext, n):", "body": "return encode(plaintext, alphabet[n:] + alphabet[:n])", "docstring": "Encode text with a shift cipher that moves each letter up by n letters.\n >>> shift_encode('abc z', 1)\n 'bcd a'", "id": "f1677:m3"} {"signature": "def rot13(plaintext):", "body": "return shift_encode(plaintext, )", "docstring": "Encode text by rotating letters by 13 spaces in the alphabet.\n >>> rot13('hello')\n 'uryyb'\n >>> rot13(rot13('hello'))\n 'hello'", "id": "f1677:m4"} {"signature": "def encode(plaintext, code):", "body": "from string import maketranstrans = maketrans(alphabet + alphabet.upper(), code + code.upper())return plaintext.translate(trans)", "docstring": "Encodes text, using a code which is a permutation of the alphabet.", "id": "f1677:m5"} {"signature": "def bigrams(text):", "body": "return [text[i:i+] for i in range(len(text) - )]", "docstring": "Return a list of pairs in text (a sequence of letters or words).\n >>> bigrams('this')\n ['th', 'hi', 'is']\n >>> bigrams(['this', 'is', 'a', 'test'])\n [['this', 'is'], ['is', 'a'], ['a', 'test']]", "id": "f1677:m6"} {"signature": "def all_shifts(text):", "body": "return [shift_encode(text, n) for n in range(len(alphabet))]", "docstring": "Return a list of all 26 possible encodings of text by a shift cipher.", "id": "f1677:m7"} {"signature": "def samples(self, n):", "body": "return ''.join([self.sample() for i in range(n)])", "docstring": "Return a string of n words, random according to the model.", "id": "f1677:c0:m0"} {"signature": "def add(self, ngram):", "body": "CountingProbDist.add(self, ngram)self.cond_prob[ngram[:-]].add(ngram[-])", "docstring": "Count 1 for P[(w1, ..., wn)] and for P(wn | (w1, ..., wn-1)", "id": "f1677:c1:m1"} {"signature": "def add_sequence(self, words):", "body": "n = self.nwords = ['',] * (n-) + wordsfor i in range(len(words)-n):self.add(tuple(words[i:i+n]))", "docstring": "Add each of the tuple words[i:i+n], using a sliding window.\n Prefix some copies of the empty word, '', to make the start work.", "id": "f1677:c1:m2"} {"signature": "def samples(self, nwords):", "body": "n = self.nnminus1gram = ('',) * (n-)output = []for i in range(nwords):if nminus1gram not in self.cond_prob:nminus1gram = ('',) * (n-) wn = self.cond_prob[nminus1gram].sample()output.append(wn)nminus1gram = nminus1gram[:] + (wn,)return ''.join(output)", "docstring": "Build up a random sample of text nwords words long, using\n the conditional probability given the n-1 preceding words.", "id": "f1677:c1:m3"} {"signature": "def __init__(self, stopwords=''):", "body": "update(self, index=DefaultDict(DefaultDict()),stopwords=set(words(stopwords)), documents=[])", "docstring": "Create an IR System. Optionally specify stopwords.", "id": "f1677:c2:m0"} {"signature": "def index_collection(self, filenames):", "body": "for filename in filenames:self.index_document(open(filename).read(), filename)", "docstring": "Index a whole collection of files.", "id": "f1677:c2:m1"} {"signature": "def index_document(self, text, url):", "body": "title = text[:text.index('')].strip()docwords = words(text)docid = len(self.documents)self.documents.append(Document(title, url, len(docwords)))for word in docwords:if word not in self.stopwords:self.index[word][docid] += ", "docstring": "Index the text of a document.", "id": "f1677:c2:m2"} {"signature": "def query(self, query_text, n=):", "body": "if query_text.startswith(\"\"):doctext = os.popen(query_text[len(\"\"):], '').read()self.index_document(doctext, query_text)return []qwords = [w for w in words(query_text) if w not in self.stopwords]shortest = argmin(qwords, lambda w: len(self.index[w]))docs = self.index[shortest]results = [(sum([self.score(w, d) for w in qwords]), d) for d in docs]results.sort(); results.reverse()return results[:n]", "docstring": "Return a list of n (score, docid) pairs for the best matches.\n Also handle the special syntax for 'learn: command'.", "id": "f1677:c2:m3"} {"signature": "def score(self, word, docid):", "body": "return (math.log( + self.index[word][docid])/ math.log( + self.documents[docid].nwords))", "docstring": "Compute a score for this word on this docid.", "id": "f1677:c2:m4"} {"signature": "def present(self, results):", "body": "for (score, d) in results:doc = self.documents[d]print (\"\"% ( * score, doc.url, doc.title[:].expandtabs()))", "docstring": "Present the results as a list.", "id": "f1677:c2:m5"} {"signature": "def present_results(self, query_text, n=):", "body": "self.present(self.query(query_text, n))", "docstring": "Get results for the query and present them.", "id": "f1677:c2:m6"} {"signature": "def score(self, plaintext):", "body": "s = for bi in bigrams(plaintext):s = s * self.P2[bi]return s", "docstring": "Return a score for text based on how common letters pairs are.", "id": "f1677:c5:m1"} {"signature": "def decode(self, ciphertext):", "body": "return argmax(all_shifts(ciphertext), self.score)", "docstring": "Return the shift decoding of text with the best score.", "id": "f1677:c5:m2"} {"signature": "def decode(self, ciphertext):", "body": "self.ciphertext = ciphertextproblem = PermutationDecoderProblem(decoder=self)return search.best_first_tree_search(problem, lambda node: self.score(node.state))", "docstring": "Search for a decoding of the ciphertext.", "id": "f1677:c6:m1"} {"signature": "def score(self, code):", "body": "text = permutation_decode(self.ciphertext, code)logP = (sum([log(self.Pwords[word]) for word in words(text)]) +sum([log(self.P1[c]) for c in text]) +sum([log(self.P2[b]) for b in bigrams(text)]))return exp(logP)", "docstring": "Score is product of word scores, unigram scores, and bigram scores.\n This can get very small, so we use logs and exp.", "id": "f1677:c6:m2"} {"signature": "def value_iteration(mdp, epsilon=):", "body": "U1 = dict([(s, ) for s in mdp.states])R, T, gamma = mdp.R, mdp.T, mdp.gammawhile True:U = U1.copy()delta = for s in mdp.states:U1[s] = R(s) + gamma * max([sum([p * U[s1] for (p, s1) in T(s, a)])for a in mdp.actions(s)])delta = max(delta, abs(U1[s] - U[s]))if delta < epsilon * ( - gamma) / gamma:return U", "docstring": "Solving an MDP by value iteration. [Fig. 17.4]", "id": "f1678:m0"} {"signature": "def best_policy(mdp, U):", "body": "pi = {}for s in mdp.states:pi[s] = argmax(mdp.actions(s), lambda a:expected_utility(a, s, U, mdp))return pi", "docstring": "Given an MDP and a utility function U, determine the best policy,\n as a mapping from state to action. (Equation 17.4)", "id": "f1678:m1"} {"signature": "def expected_utility(a, s, U, mdp):", "body": "return sum([p * U[s1] for (p, s1) in mdp.T(s, a)])", "docstring": "The expected utility of doing a in state s, according to the MDP and U.", "id": "f1678:m2"} {"signature": "def policy_iteration(mdp):", "body": "U = dict([(s, ) for s in mdp.states])pi = dict([(s, random.choice(mdp.actions(s))) for s in mdp.states])while True:U = policy_evaluation(pi, U, mdp)unchanged = Truefor s in mdp.states:a = argmax(mdp.actions(s), lambda a: expected_utility(a,s,U,mdp))if a != pi[s]:pi[s] = aunchanged = Falseif unchanged:return pi", "docstring": "Solve an MDP by policy iteration [Fig. 17.7]", "id": "f1678:m3"} {"signature": "def policy_evaluation(pi, U, mdp, k=):", "body": "R, T, gamma = mdp.R, mdp.T, mdp.gammafor i in range(k):for s in mdp.states:U[s] = R(s) + gamma * sum([p * U[s1] for (p, s1) in T(s, pi[s])])return U", "docstring": "Return an updated utility mapping U from each state in the MDP to its\n utility, using an approximation (modified policy iteration).", "id": "f1678:m4"} {"signature": "def R(self, state):", "body": "return self.reward[state]", "docstring": "Return a numeric reward for this state.", "id": "f1678:c0:m1"} {"signature": "def T(self, state, action):", "body": "abstract", "docstring": "Transition model. From a state and an action, return a list\n of (probability, result-state) pairs.", "id": "f1678:c0:m2"} {"signature": "def actions(self, state):", "body": "if state in self.terminals:return [None]else:return self.actlist", "docstring": "Set of actions that can be performed in this state. By default, a\n fixed list of actions, except for terminal states. Override this\n method if you need to specialize by state.", "id": "f1678:c0:m3"} {"signature": "def go(self, state, direction):", "body": "state1 = vector_add(state, direction)return if_(state1 in self.states, state1, state)", "docstring": "Return the state that results from going in this direction.", "id": "f1678:c1:m2"} {"signature": "def to_grid(self, mapping):", "body": "return list(reversed([[mapping.get((x,y), None)for x in range(self.cols)]for y in range(self.rows)]))", "docstring": "Convert a mapping from (x, y) to v into a [[..., v, ...]] grid.", "id": "f1678:c1:m3"} {"signature": "def parse_csv(input, delim=''):", "body": "lines = [line for line in input.splitlines() if line.strip()]return [list(map(num_or_str, line.split(delim))) for line in lines]", "docstring": "r\"\"\"Input is a string consisting of lines, each line has comma-delimited\n fields. Convert this into a list of lists. Blank lines are skipped.\n Fields that look like numbers are converted to numbers.\n The delim defaults to ',' but '\\t' and None are also reasonable values.\n >>> parse_csv('1, 2, 3 \\n 0, 2, na')\n [[1, 2, 3], [0, 2, 'na']]", "id": "f1680:m4"} {"signature": "def PluralityLearner(dataset):", "body": "most_popular = mode([e[dataset.target] for e in dataset.examples])def predict(example):\"\"return most_popularreturn predict", "docstring": "A very dumb algorithm: always pick the result that was most popular\n in the training data. Makes a baseline for comparison.", "id": "f1680:m5"} {"signature": "def NaiveBayesLearner(dataset):", "body": "targetvals = dataset.values[dataset.target]target_dist = CountingProbDist(targetvals)attr_dists = dict(((gv, attr), CountingProbDist(dataset.values[attr]))for gv in targetvalsfor attr in dataset.inputs)for example in dataset.examples:targetval = example[dataset.target]target_dist.add(targetval)for attr in dataset.inputs:attr_dists[targetval, attr].add(example[attr])def predict(example):\"\"\"\"\"\"def class_probability(targetval):return (target_dist[targetval]* product(attr_dists[targetval, attr][example[attr]]for attr in dataset.inputs))return argmax(targetvals, class_probability)return predict", "docstring": "Just count how many times each value of each input attribute\n occurs, conditional on the target value. Count the different\n target values too.", "id": "f1680:m6"} {"signature": "def NearestNeighborLearner(dataset, k=):", "body": "def predict(example):\"\"best = heapq.nsmallest(k, ((dataset.distance(e, example), e)for e in dataset.examples))return mode(e[dataset.target] for (d, e) in best)return predict", "docstring": "k-NearestNeighbor: the k nearest neighbors vote.", "id": "f1680:m7"} {"signature": "def DecisionTreeLearner(dataset):", "body": "target, values = dataset.target, dataset.valuesdef decision_tree_learning(examples, attrs, parent_examples=()):if len(examples) == :return plurality_value(parent_examples)elif all_same_class(examples):return DecisionLeaf(examples[][target])elif len(attrs) == :return plurality_value(examples)else:A = choose_attribute(attrs, examples)tree = DecisionFork(A, dataset.attrnames[A])for (v_k, exs) in split_by(A, examples):subtree = decision_tree_learning(exs, removeall(A, attrs), examples)tree.add(v_k, subtree)return treedef plurality_value(examples):\"\"\"\"\"\"popular = argmax_random_tie(values[target],lambda v: count(target, v, examples))return DecisionLeaf(popular)def count(attr, val, examples):return count_if(lambda e: e[attr] == val, examples)def all_same_class(examples):\"\"class0 = examples[][target]return all(e[target] == class0 for e in examples)def choose_attribute(attrs, examples):\"\"return argmax_random_tie(attrs,lambda a: information_gain(a, examples))def information_gain(attr, examples):\"\"def I(examples):return information_content([count(target, v, examples)for v in values[target]])N = float(len(examples))remainder = sum((len(examples_i) / N) * I(examples_i)for (v, examples_i) in split_by(attr, examples))return I(examples) - remainderdef split_by(attr, examples):\"\"return [(v, [e for e in examples if e[attr] == v])for v in values[attr]]return decision_tree_learning(dataset.examples, dataset.inputs)", "docstring": "[Fig. 18.5]", "id": "f1680:m8"} {"signature": "def information_content(values):", "body": "probabilities = normalize(removeall(, values))return sum(-p * log2(p) for p in probabilities)", "docstring": "Number of bits to represent the probability distribution in values.", "id": "f1680:m9"} {"signature": "def DecisionListLearner(dataset):", "body": "def decision_list_learning(examples):if not examples:return [(True, False)]t, o, examples_t = find_examples(examples)if not t:raise Failurereturn [(t, o)] + decision_list_learning(examples - examples_t)def find_examples(examples):\"\"\"\"\"\"unimplemented()def passes(example, test):\"\"unimplemented()def predict(example):\"\"for test, outcome in predict.decision_list:if passes(example, test):return outcomepredict.decision_list = decision_list_learning(set(dataset.examples))return predict", "docstring": "[Fig. 18.11]", "id": "f1680:m10"} {"signature": "def NeuralNetLearner(dataset, sizes):", "body": "activations = [[ for i in range(n)] for n in sizes]weights = []def predict(example):unimplemented()return predict", "docstring": "Layered feed-forward network.", "id": "f1680:m11"} {"signature": "def Linearlearner(dataset):", "body": "unimplemented()", "docstring": "Fit a linear model to the data.", "id": "f1680:m13"} {"signature": "def EnsembleLearner(learners):", "body": "def train(dataset):predictors = [learner(dataset) for learner in learners]def predict(example):return mode(predictor(example) for predictor in predictors)return predictreturn train", "docstring": "Given a list of learning algorithms, have them vote.", "id": "f1680:m14"} {"signature": "def AdaBoost(L, K):", "body": "def train(dataset):examples, target = dataset.examples, dataset.targetN = len(examples)epsilon = /(*N)w = [/N] * Nh, z = [], []for k in range(K):h_k = L(dataset, w)h.append(h_k)error = sum(weight for example, weight in zip(examples, w)if example[target] != h_k(example))error = clip(error, epsilon, -epsilon)for j, example in enumerate(examples):if example[target] == h_k(example):w[j] *= error / ( - error)w = normalize(w)z.append(math.log(( - error) / error))return WeightedMajority(h, z)return train", "docstring": "[Fig. 18.34]", "id": "f1680:m15"} {"signature": "def WeightedMajority(predictors, weights):", "body": "def predict(example):return weighted_mode((predictor(example) for predictor in predictors),weights)return predict", "docstring": "Return a predictor that takes a weighted vote.", "id": "f1680:m16"} {"signature": "def weighted_mode(values, weights):", "body": "totals = defaultdict(int)for v, w in zip(values, weights):totals[v] += wreturn max(list(totals.keys()), key=totals.get)", "docstring": "Return the value with the greatest total weight.\n >>> weighted_mode('abbaa', [1,2,3,1,2])\n 'b", "id": "f1680:m17"} {"signature": "def WeightedLearner(unweighted_learner):", "body": "def train(dataset, weights):return unweighted_learner(replicated_dataset(dataset, weights))return train", "docstring": "Given a learner that takes just an unweighted dataset, return\n one that takes also a weight for each example. [p. 749 footnote 14]", "id": "f1680:m18"} {"signature": "def replicated_dataset(dataset, weights, n=None):", "body": "n = n or len(dataset.examples)result = copy.copy(dataset)result.examples = weighted_replicate(dataset.examples, weights, n)return result", "docstring": "Copy dataset, replicating each example in proportion to its weight.", "id": "f1680:m19"} {"signature": "def weighted_replicate(seq, weights, n):", "body": "assert len(seq) == len(weights)weights = normalize(weights)wholes = [int(w*n) for w in weights]fractions = [(w*n) % for w in weights]return (flatten([x] * nx for x, nx in zip(seq, wholes))+ weighted_sample_with_replacement(seq, fractions, n - sum(wholes)))", "docstring": "Return n selections from seq, with the count of each element of\n seq proportional to the corresponding weight (filling in fractions\n randomly).\n >>> weighted_replicate('ABC', [1,2,1], 4)\n ['A', 'B', 'B', 'C']", "id": "f1680:m20"} {"signature": "def cross_validation(learner, dataset, k=, trials=):", "body": "if k is None:k = len(dataset.examples)if trials > :return mean([cross_validation(learner, dataset, k, trials=)for t in range(trials)])else:n = len(dataset.examples)random.shuffle(dataset.examples)return mean([train_and_test(learner, dataset, i*(n/k), (i+)*(n/k))for i in range(k)])", "docstring": "Do k-fold cross_validate and return their mean.\n That is, keep out 1/k of the examples for testing on each of k runs.\n Shuffle the examples first; If trials>1, average over several shuffles.", "id": "f1680:m24"} {"signature": "def leave1out(learner, dataset):", "body": "return cross_validation(learner, dataset, k=len(dataset.examples))", "docstring": "Leave one out cross-validation over the dataset.", "id": "f1680:m25"} {"signature": "def RestaurantDataSet(examples=None):", "body": "return DataSet(name='', target='', examples=examples,attrnames=''+ '')", "docstring": "Build a DataSet of Restaurant waiting examples. [Fig. 18.3]", "id": "f1680:m27"} {"signature": "def SyntheticRestaurant(n=):", "body": "def gen():example = list(map(random.choice, restaurant.values))example[restaurant.target] = Fig[,](example)return examplereturn RestaurantDataSet([gen() for i in range(n)])", "docstring": "Generate a DataSet with n examples.", "id": "f1680:m29"} {"signature": "def Majority(k, n):", "body": "examples = []for i in range(n):bits = [random.choice([, ]) for i in range(k)]bits.append(int(sum(bits) > k/))examples.append(bits)return DataSet(name=\"\", examples=examples)", "docstring": "Return a DataSet with n k-bit examples of the majority problem:\n k random bits followed by a 1 if more than half the bits are 1, else 0.", "id": "f1680:m30"} {"signature": "def Parity(k, n, name=\"\"):", "body": "examples = []for i in range(n):bits = [random.choice([, ]) for i in range(k)]bits.append(sum(bits) % )examples.append(bits)return DataSet(name=name, examples=examples)", "docstring": "Return a DataSet with n k-bit examples of the parity problem:\n k random bits followed by a 1 if an odd number of bits are 1, else 0.", "id": "f1680:m31"} {"signature": "def Xor(n):", "body": "return Parity(, n, name=\"\")", "docstring": "Return a DataSet with n examples of 2-input xor.", "id": "f1680:m32"} {"signature": "def ContinuousXor(n):", "body": "examples = []for i in range(n):x, y = [random.uniform(, ) for i in '']examples.append([x, y, int(x) != int(y)])return DataSet(name=\"\", examples=examples)", "docstring": "2 inputs are chosen uniformly from (0.0 .. 2.0]; output is xor of ints.", "id": "f1680:m33"} {"signature": "def compare(algorithms=[PluralityLearner, NaiveBayesLearner,NearestNeighborLearner, DecisionTreeLearner],datasets=[iris, orings, zoo, restaurant, SyntheticRestaurant(),Majority(, ), Parity(, ), Xor()],k=, trials=):", "body": "print_table([[a.__name__.replace('','')] +[cross_validation(a, d, k, trials) for d in datasets]for a in algorithms],header=[''] + [d.name[:] for d in datasets], numfmt='')", "docstring": "Compare various learners on various datasets using cross-validation.\n Print results as a table.", "id": "f1680:m34"} {"signature": "def __init__(self, examples=None, attrs=None, attrnames=None, target=-,inputs=None, values=None, distance=mean_boolean_error,name='', source='', exclude=()):", "body": "update(self, name=name, source=source, values=values, distance=distance)if isinstance(examples, str):self.examples = parse_csv(examples)elif examples is None:self.examples = parse_csv(DataFile(name+'').read())else:self.examples = examplesif not attrs and self.examples:attrs = list(range(len(self.examples[])))self.attrs = attrsif isinstance(attrnames, str):self.attrnames = attrnames.split()else:self.attrnames = attrnames or attrsself.setproblem(target, inputs=inputs, exclude=exclude)", "docstring": "Accepts any of DataSet's fields. Examples can also be a\n string or file from which to parse examples using parse_csv.\n Optional parameter: exclude, as documented in .setproblem().\n >>> DataSet(examples='1, 2, 3')\n ", "id": "f1680:c0:m0"} {"signature": "def setproblem(self, target, inputs=None, exclude=()):", "body": "self.target = self.attrnum(target)exclude = list(map(self.attrnum, exclude))if inputs:self.inputs = removeall(self.target, inputs)else:self.inputs = [a for a in self.attrsif a != self.target and a not in exclude]if not self.values:self.values = list(map(unique, list(zip(*self.examples))))self.check_me()", "docstring": "Set (or change) the target and/or inputs.\n This way, one DataSet can be used multiple ways. inputs, if specified,\n is a list of attributes, or specify exclude as a list of attributes\n to not use in inputs. Attributes can be -n .. n, or an attrname.\n Also computes the list of possible values, if that wasn't done yet.", "id": "f1680:c0:m1"} {"signature": "def check_me(self):", "body": "assert len(self.attrnames) == len(self.attrs)assert self.target in self.attrsassert self.target not in self.inputsassert set(self.inputs).issubset(set(self.attrs))list(map(self.check_example, self.examples))", "docstring": "Check that my fields make sense.", "id": "f1680:c0:m2"} {"signature": "def add_example(self, example):", "body": "self.check_example(example)self.examples.append(example)", "docstring": "Add an example to the list of examples, checking it first.", "id": "f1680:c0:m3"} {"signature": "def check_example(self, example):", "body": "if self.values:for a in self.attrs:if example[a] not in self.values[a]:raise ValueError('' %(example[a], self.attrnames[a], example))", "docstring": "Raise ValueError if example has any invalid values.", "id": "f1680:c0:m4"} {"signature": "def attrnum(self, attr):", "body": "if attr < :return len(self.attrs) + attrelif isinstance(attr, str):return self.attrnames.index(attr)else:return attr", "docstring": "Returns the number used for attr, which can be a name, or -n .. n-1.", "id": "f1680:c0:m5"} {"signature": "def sanitize(self, example):", "body": "return [attr_i if i in self.inputs else Nonefor i, attr_i in enumerate(example)]", "docstring": "Return a copy of example, with non-input attributes replaced by None.", "id": "f1680:c0:m6"} {"signature": "def __init__(self, observations=[], default=):", "body": "update(self, dictionary={}, n_obs=, default=default, sampler=None)for o in observations:self.add(o)", "docstring": "Create a distribution, and optionally add in some observations.\n By default this is an unsmoothed distribution, but saying default=1,\n for example, gives you add-one smoothing.", "id": "f1680:c1:m0"} {"signature": "def add(self, o):", "body": "self.smooth_for(o)self.dictionary[o] += self.n_obs += self.sampler = None", "docstring": "Add an observation o to the distribution.", "id": "f1680:c1:m1"} {"signature": "def smooth_for(self, o):", "body": "if o not in self.dictionary:self.dictionary[o] = self.defaultself.n_obs += self.defaultself.sampler = None", "docstring": "Include o among the possible observations, whether or not\n it's been observed yet.", "id": "f1680:c1:m2"} {"signature": "def __getitem__(self, item):", "body": "self.smooth_for(item)return self.dictionary[item] / self.n_obs", "docstring": "Return an estimate of the probability of item.", "id": "f1680:c1:m3"} {"signature": "def top(self, n):", "body": "return heapq.nlargest(n, [(v, k) for (k, v) in list(self.dictionary.items())])", "docstring": "Return (count, obs) tuples for the n most frequent observations.", "id": "f1680:c1:m4"} {"signature": "def sample(self):", "body": "if self.sampler is None:self.sampler = weighted_sampler(list(self.dictionary.keys()),list(self.dictionary.values()))return self.sampler()", "docstring": "Return a random sample from the distribution.", "id": "f1680:c1:m5"} {"signature": "def __init__(self, attr, attrname=None, branches=None):", "body": "update(self, attr=attr, attrname=attrname or attr,branches=branches or {})", "docstring": "Initialize by saying what attribute this node tests.", "id": "f1680:c2:m0"} {"signature": "def __call__(self, example):", "body": "attrvalue = example[self.attr]return self.branches[attrvalue](example)", "docstring": "Given an example, classify it using the attribute and the branches.", "id": "f1680:c2:m1"} {"signature": "def add(self, val, subtree):", "body": "self.branches[val] = subtree", "docstring": "Add a branch. If self.attr = val, go to the given subtree.", "id": "f1680:c2:m2"} {"signature": "def AC3(csp, queue=None, removals=None):", "body": "if queue is None:queue = [(Xi, Xk) for Xi in csp.vars for Xk in csp.neighbors[Xi]]csp.support_pruning()while queue:(Xi, Xj) = queue.pop()if revise(csp, Xi, Xj, removals):if not csp.curr_domains[Xi]:return Falsefor Xk in csp.neighbors[Xi]:if Xk != Xi:queue.append((Xk, Xi))return True", "docstring": "[Fig. 6.3]", "id": "f1681:m0"} {"signature": "def revise(csp, Xi, Xj, removals):", "body": "revised = Falsefor x in csp.curr_domains[Xi][:]:if every(lambda y: not csp.constraints(Xi, x, Xj, y),csp.curr_domains[Xj]):csp.prune(Xi, x, removals)revised = Truereturn revised", "docstring": "Return true if we remove a value.", "id": "f1681:m1"} {"signature": "def first_unassigned_variable(assignment, csp):", "body": "return find_if(lambda var: var not in assignment, csp.vars)", "docstring": "The default variable order.", "id": "f1681:m2"} {"signature": "def mrv(assignment, csp):", "body": "return argmin_random_tie([v for v in csp.vars if v not in assignment],lambda var: num_legal_values(csp, var, assignment))", "docstring": "Minimum-remaining-values heuristic.", "id": "f1681:m3"} {"signature": "def unordered_domain_values(var, assignment, csp):", "body": "return csp.choices(var)", "docstring": "The default value order.", "id": "f1681:m5"} {"signature": "def lcv(var, assignment, csp):", "body": "return sorted(csp.choices(var),key=lambda val: csp.nconflicts(var, val, assignment))", "docstring": "Least-constraining-values heuristic.", "id": "f1681:m6"} {"signature": "def forward_checking(csp, var, value, assignment, removals):", "body": "for B in csp.neighbors[var]:if B not in assignment:for b in csp.curr_domains[B][:]:if not csp.constraints(var, value, B, b):csp.prune(B, b, removals)if not csp.curr_domains[B]:return Falsereturn True", "docstring": "Prune neighbor values inconsistent with var=value.", "id": "f1681:m8"} {"signature": "def mac(csp, var, value, assignment, removals):", "body": "return AC3(csp, [(X, var) for X in csp.neighbors[var]], removals)", "docstring": "Maintain arc consistency.", "id": "f1681:m9"} {"signature": "def backtracking_search(csp,select_unassigned_variable = first_unassigned_variable,order_domain_values = unordered_domain_values,inference = no_inference):", "body": "def backtrack(assignment):if len(assignment) == len(csp.vars):return assignmentvar = select_unassigned_variable(assignment, csp)for value in order_domain_values(var, assignment, csp):if == csp.nconflicts(var, value, assignment):csp.assign(var, value, assignment)removals = csp.suppose(var, value)if inference(csp, var, value, assignment, removals):result = backtrack(assignment)if result is not None:return resultcsp.restore(removals)csp.unassign(var, assignment)return Noneresult = backtrack({})assert result is None or csp.goal_test(result)return result", "docstring": "[Fig. 6.5]\n >>> backtracking_search(australia) is not None\n True\n >>> backtracking_search(australia, select_unassigned_variable=mrv) is not None\n True\n >>> backtracking_search(australia, order_domain_values=lcv) is not None\n True\n >>> backtracking_search(australia, select_unassigned_variable=mrv, order_domain_values=lcv) is not None\n True\n >>> backtracking_search(australia, inference=forward_checking) is not None\n True\n >>> backtracking_search(australia, inference=mac) is not None\n True\n >>> backtracking_search(usa, select_unassigned_variable=mrv, order_domain_values=lcv, inference=mac) is not None\n True", "id": "f1681:m10"} {"signature": "def min_conflicts(csp, max_steps=):", "body": "csp.current = current = {}for var in csp.vars:val = min_conflicts_value(csp, var, current)csp.assign(var, val, current)for i in range(max_steps):conflicted = csp.conflicted_vars(current)if not conflicted:return currentvar = random.choice(conflicted)val = min_conflicts_value(csp, var, current)csp.assign(var, val, current)return None", "docstring": "Solve a CSP by stochastic hillclimbing on the number of conflicts.", "id": "f1681:m11"} {"signature": "def min_conflicts_value(csp, var, current):", "body": "return argmin_random_tie(csp.domains[var],lambda val: csp.nconflicts(var, val, current))", "docstring": "Return the value that will give var the least number of conflicts.\n If there is a tie, choose at random.", "id": "f1681:m12"} {"signature": "def tree_csp_solver(csp):", "body": "n = len(csp.vars)assignment = {}root = csp.vars[]X, parent = topological_sort(csp.vars, root)for Xj in reversed(X):if not make_arc_consistent(parent[Xj], Xj, csp):return Nonefor Xi in X:if not csp.curr_domains[Xi]:return Noneassignment[Xi] = csp.curr_domains[Xi][]return assignment", "docstring": "[Fig. 6.11]", "id": "f1681:m13"} {"signature": "def different_values_constraint(A, a, B, b):", "body": "return a != b", "docstring": "A constraint saying two neighboring variables must differ in value.", "id": "f1681:m16"} {"signature": "def MapColoringCSP(colors, neighbors):", "body": "if isinstance(neighbors, str):neighbors = parse_neighbors(neighbors)return CSP(list(neighbors.keys()), UniversalDict(colors), neighbors,different_values_constraint)", "docstring": "Make a CSP for the problem of coloring a map with different colors\n for any two adjacent regions. Arguments are a list of colors, and a\n dict of {region: [neighbor,...]} entries. This dict may also be\n specified as a string of the form defined by parse_neighbors.", "id": "f1681:m17"} {"signature": "def parse_neighbors(neighbors, vars=[]):", "body": "dict = DefaultDict([])for var in vars:dict[var] = []specs = [spec.split('') for spec in neighbors.split('')]for (A, Aneighbors) in specs:A = A.strip()dict.setdefault(A, [])for B in Aneighbors.split():dict[A].append(B)dict[B].append(A)return dict", "docstring": "Convert a string of the form 'X: Y Z; Y: Z' into a dict mapping\n regions to neighbors. The syntax is a region name followed by a ':'\n followed by zero or more region names, followed by ';', repeated for\n each region name. If you say 'X: Y' you don't need 'Y: X'.\n >>> parse_neighbors('X: Y Z; Y: Z')\n {'Y': ['X', 'Z'], 'X': ['Y', 'Z'], 'Z': ['X', 'Y']}", "id": "f1681:m18"} {"signature": "def queen_constraint(A, a, B, b):", "body": "return A == B or (a != b and A + a != B + b and A - a != B - b)", "docstring": "Constraint is satisfied (true) if A, B are really the same variable,\n or if they are not in the same row, down diagonal, or up diagonal.", "id": "f1681:m19"} {"signature": "def Zebra():", "body": "Colors = ''.split()Pets = ''.split()Drinks = ''.split()Countries = ''.split()Smokes = ''.split()vars = Colors + Pets + Drinks + Countries + Smokesdomains = {}for var in vars:domains[var] = list(range(, ))domains[''] = []domains[''] = []neighbors = parse_neighbors(\"\"\"\"\"\", vars)for type in [Colors, Pets, Drinks, Countries, Smokes]:for A in type:for B in type:if A != B:if B not in neighbors[A]: neighbors[A].append(B)if A not in neighbors[B]: neighbors[B].append(A)def zebra_constraint(A, a, B, b, recurse=):same = (a == b)next_to = abs(a - b) == if A == '' and B == '': return sameif A == '' and B == '': return sameif A == '' and B == '': return next_toif A == '' and B == '': return next_toif A == '' and B == '': return sameif A == '' and B == '': return sameif A == '' and B == '': return sameif A == '' and B == '': return sameif A == '' and B == '': return sameif A == '' and B == '': return next_toif A == '' and B == '': return sameif A == '' and B == '': return (a - ) == bif recurse == : return zebra_constraint(B, b, A, a, )if ((A in Colors and B in Colors) or(A in Pets and B in Pets) or(A in Drinks and B in Drinks) or(A in Countries and B in Countries) or(A in Smokes and B in Smokes)): return not sameraise ''return CSP(vars, domains, neighbors, zebra_constraint)", "docstring": "Return an instance of the Zebra Puzzle.", "id": "f1681:m21"} {"signature": "def __init__(self, vars, domains, neighbors, constraints):", "body": "vars = vars or list(domains.keys())update(self, vars=vars, domains=domains,neighbors=neighbors, constraints=constraints,initial=(), curr_domains=None, nassigns=)", "docstring": "Construct a CSP problem. If vars is empty, it becomes domains.keys().", "id": "f1681:c0:m0"} {"signature": "def assign(self, var, val, assignment):", "body": "assignment[var] = valself.nassigns += ", "docstring": "Add {var: val} to assignment; Discard the old value if any.", "id": "f1681:c0:m1"} {"signature": "def unassign(self, var, assignment):", "body": "if var in assignment:del assignment[var]", "docstring": "Remove {var: val} from assignment.\n DO NOT call this if you are changing a variable to a new value;\n just call assign for that.", "id": "f1681:c0:m2"} {"signature": "def nconflicts(self, var, val, assignment):", "body": "def conflict(var2):return (var2 in assignmentand not self.constraints(var, val, var2, assignment[var2]))return count_if(conflict, self.neighbors[var])", "docstring": "Return the number of conflicts var=val has with other variables.", "id": "f1681:c0:m3"} {"signature": "def display(self, assignment):", "body": "print('', self, '', assignment)", "docstring": "Show a human-readable representation of the CSP.", "id": "f1681:c0:m4"} {"signature": "def actions(self, state):", "body": "if len(state) == len(self.vars):return []else:assignment = dict(state)var = find_if(lambda v: v not in assignment, self.vars)return [(var, val) for val in self.domains[var]if self.nconflicts(var, val, assignment) == ]", "docstring": "Return a list of applicable actions: nonconflicting\n assignments to an unassigned variable.", "id": "f1681:c0:m5"} {"signature": "def result(self, state, xxx_todo_changeme):", "body": "(var, val) = xxx_todo_changemereturn state + ((var, val),)", "docstring": "Perform an action and return the new state.", "id": "f1681:c0:m6"} {"signature": "def support_pruning(self):", "body": "if self.curr_domains is None:self.curr_domains = dict((v, list(self.domains[v]))for v in self.vars)", "docstring": "Make sure we can prune values from domains. (We want to pay\n for this only if we use it.)", "id": "f1681:c0:m8"} {"signature": "def suppose(self, var, value):", "body": "self.support_pruning()removals = [(var, a) for a in self.curr_domains[var] if a != value]self.curr_domains[var] = [value]return removals", "docstring": "Start accumulating inferences from assuming var=value.", "id": "f1681:c0:m9"} {"signature": "def prune(self, var, value, removals):", "body": "self.curr_domains[var].remove(value)if removals is not None: removals.append((var, value))", "docstring": "Rule out var=value.", "id": "f1681:c0:m10"} {"signature": "def choices(self, var):", "body": "return (self.curr_domains or self.domains)[var]", "docstring": "Return all values for var that aren't currently ruled out.", "id": "f1681:c0:m11"} {"signature": "def infer_assignment(self):", "body": "self.support_pruning()return dict((v, self.curr_domains[v][])for v in self.vars if == len(self.curr_domains[v]))", "docstring": "Return the partial assignment implied by the current inferences.", "id": "f1681:c0:m12"} {"signature": "def restore(self, removals):", "body": "for B, b in removals:self.curr_domains[B].append(b)", "docstring": "Undo a supposition and all inferences from it.", "id": "f1681:c0:m13"} {"signature": "def conflicted_vars(self, current):", "body": "return [var for var in self.varsif self.nconflicts(var, current[var], current) > ]", "docstring": "Return a list of variables in current assignment that are in conflict", "id": "f1681:c0:m14"} {"signature": "def __init__(self, n):", "body": "CSP.__init__(self, list(range(n)), UniversalDict(list(range(n))),UniversalDict(list(range(n))), queen_constraint)update(self, rows=[]*n, ups=[]*(*n - ), downs=[]*(*n - ))", "docstring": "Initialize data structures for n Queens.", "id": "f1681:c2:m0"} {"signature": "def nconflicts(self, var, val, assignment):", "body": "n = len(self.vars)c = self.rows[val] + self.downs[var+val] + self.ups[var-val+n-]if assignment.get(var, None) == val:c -= return c", "docstring": "The number of conflicts, as recorded with each assignment.\n Count conflicts in row and in up, down diagonals. If there\n is a queen there, it can't conflict with itself, so subtract 3.", "id": "f1681:c2:m1"} {"signature": "def assign(self, var, val, assignment):", "body": "oldval = assignment.get(var, None)if val != oldval:if oldval is not None: self.record_conflict(assignment, var, oldval, -)self.record_conflict(assignment, var, val, +)CSP.assign(self, var, val, assignment)", "docstring": "Assign var, and keep track of conflicts.", "id": "f1681:c2:m2"} {"signature": "def unassign(self, var, assignment):", "body": "if var in assignment:self.record_conflict(assignment, var, assignment[var], -)CSP.unassign(self, var, assignment)", "docstring": "Remove var from assignment (if it is there) and track conflicts.", "id": "f1681:c2:m3"} {"signature": "def record_conflict(self, assignment, var, val, delta):", "body": "n = len(self.vars)self.rows[val] += deltaself.downs[var + val] += deltaself.ups[var - val + n - ] += delta", "docstring": "Record conflicts caused by addition or deletion of a Queen.", "id": "f1681:c2:m4"} {"signature": "def display(self, assignment):", "body": "n = len(self.vars)for val in range(n):for var in range(n):if assignment.get(var,'') == val: ch = ''elif (var+val) % == : ch = ''else: ch = ''print(ch, end='')print('', end='')for var in range(n):if assignment.get(var,'') == val: ch = ''else: ch = ''print(str(self.nconflicts(var, val, assignment))+ch, end='')print()", "docstring": "Print the queens and the nconflicts values (for debugging).", "id": "f1681:c2:m5"} {"signature": "def __init__(self, grid):", "body": "squares = iter(re.findall(r'', grid))domains = dict((var, if_(ch in '', [ch], ''))for var, ch in zip(flatten(self.rows), squares))for _ in squares:raise ValueError(\"\", grid) CSP.__init__(self, None, domains, self.neighbors,different_values_constraint)", "docstring": "Build a Sudoku problem from a string representing the grid:\n the digits 1-9 denote a filled cell, '.' or '0' an empty one;\n other characters are ignored.", "id": "f1681:c3:m0"} {"signature": "def KB_AgentProgram(KB):", "body": "steps = itertools.count()def program(percept):t = next(steps)KB.tell(make_percept_sentence(percept, t))action = KB.ask(make_action_query(t))KB.tell(make_action_sentence(action, t))return actiondef make_percept_sentence(self, percept, t):return Expr(\"\")(percept, t)def make_action_query(self, t):return expr(\"\" % t)def make_action_sentence(self, action, t):return Expr(\"\")(action[expr('')], t)return program", "docstring": "A generic logical knowledge-based agent program. [Fig. 7.1]", "id": "f1683:m0"} {"signature": "def expr(s):", "body": "if isinstance(s, Expr): return sif isnumber(s): return Expr(s)s = s.replace('', '').replace('', '')s = s.replace('', '').replace('', '')s = re.sub(r'', r'', s)return eval(s, {'':Expr})", "docstring": "Create an Expr representing a logic expression by parsing the input\n string. Symbols and numbers are automatically converted to Exprs.\n In addition you can use alternative spellings of these operators:\n 'x ==> y' parses as (x >> y) # Implication\n 'x <== y' parses as (x << y) # Reverse implication\n 'x <=> y' parses as (x % y) # Logical equivalence\n 'x =/= y' parses as (x ^ y) # Logical disequality (xor)\n But BE CAREFUL; precedence of implication is wrong. expr('P & Q ==> R & S')\n is ((P & (Q >> R)) & S); so you must use expr('(P & Q) ==> (R & S)').\n >>> expr('P <=> Q(1)')\n (P <=> Q(1))\n >>> expr('P & Q | ~R(x, F(x))')\n ((P & Q) | ~R(x, F(x)))", "id": "f1683:m1"} {"signature": "def is_symbol(s):", "body": "return isinstance(s, str) and s[:].isalpha()", "docstring": "A string s is a symbol if it starts with an alphabetic char.", "id": "f1683:m2"} {"signature": "def is_var_symbol(s):", "body": "return is_symbol(s) and s[].islower()", "docstring": "A logic variable symbol is an initial-lowercase string.", "id": "f1683:m3"} {"signature": "def is_prop_symbol(s):", "body": "return is_symbol(s) and s[].isupper() and s != '' and s != ''", "docstring": "A proposition logic symbol is an initial-uppercase string other than\n TRUE or FALSE.", "id": "f1683:m4"} {"signature": "def variables(s):", "body": "result = set([])def walk(s):if is_variable(s):result.add(s)else:for arg in s.args:walk(arg)walk(s)return result", "docstring": "Return a set of the variables in expression s.\n >>> ppset(variables(F(x, A, y)))\n set([x, y])\n >>> ppset(variables(F(G(x), z)))\n set([x, z])\n >>> ppset(variables(expr('F(x, x) & G(x, y) & H(y, z) & R(A, z, z)')))\n set([x, y, z])", "id": "f1683:m5"} {"signature": "def is_definite_clause(s):", "body": "if is_symbol(s.op):return Trueelif s.op == '':antecedent, consequent = s.argsreturn (is_symbol(consequent.op)and every(lambda arg: is_symbol(arg.op), conjuncts(antecedent)))else:return False", "docstring": "returns True for exprs s of the form A & B & ... & C ==> D,\n where all literals are positive. In clause form, this is\n ~A | ~B | ... | ~C | D, where exactly one clause is positive.\n >>> is_definite_clause(expr('Farmer(Mac)'))\n True\n >>> is_definite_clause(expr('~Farmer(Mac)'))\n False\n >>> is_definite_clause(expr('(Farmer(f) & Rabbit(r)) ==> Hates(f, r)'))\n True\n >>> is_definite_clause(expr('(Farmer(f) & ~Rabbit(r)) ==> Hates(f, r)'))\n False\n >>> is_definite_clause(expr('(Farmer(f) | Rabbit(r)) ==> Hates(f, r)'))\n False", "id": "f1683:m6"} {"signature": "def parse_definite_clause(s):", "body": "assert is_definite_clause(s)if is_symbol(s.op):return [], selse:antecedent, consequent = s.argsreturn conjuncts(antecedent), consequent", "docstring": "Return the antecedents and the consequent of a definite clause.", "id": "f1683:m7"} {"signature": "def tt_entails(kb, alpha):", "body": "assert not variables(alpha)return tt_check_all(kb, alpha, prop_symbols(kb & alpha), {})", "docstring": "Does kb entail the sentence alpha? Use truth tables. For propositional\n kb's and sentences. [Fig. 7.10]\n >>> tt_entails(expr('P & Q'), expr('Q'))\n True", "id": "f1683:m8"} {"signature": "def tt_check_all(kb, alpha, symbols, model):", "body": "if not symbols:if pl_true(kb, model):result = pl_true(alpha, model)assert result in (True, False)return resultelse:return Trueelse:P, rest = symbols[], symbols[:]return (tt_check_all(kb, alpha, rest, extend(model, P, True)) andtt_check_all(kb, alpha, rest, extend(model, P, False)))", "docstring": "Auxiliary routine to implement tt_entails.", "id": "f1683:m9"} {"signature": "def prop_symbols(x):", "body": "if not isinstance(x, Expr):return []elif is_prop_symbol(x.op):return [x]else:return list(set(symbol for arg in x.argsfor symbol in prop_symbols(arg)))", "docstring": "Return a list of all propositional symbols in x.", "id": "f1683:m10"} {"signature": "def tt_true(alpha):", "body": "return tt_entails(TRUE, expr(alpha))", "docstring": "Is the propositional sentence alpha a tautology? (alpha will be\n coerced to an expr.)\n >>> tt_true(expr(\"(P >> Q) <=> (~P | Q)\"))\n True", "id": "f1683:m11"} {"signature": "def pl_true(exp, model={}):", "body": "op, args = exp.op, exp.argsif exp == TRUE:return Trueelif exp == FALSE:return Falseelif is_prop_symbol(op):return model.get(exp)elif op == '':p = pl_true(args[], model)if p is None: return Noneelse: return not pelif op == '':result = Falsefor arg in args:p = pl_true(arg, model)if p is True: return Trueif p is None: result = Nonereturn resultelif op == '':result = Truefor arg in args:p = pl_true(arg, model)if p is False: return Falseif p is None: result = Nonereturn resultp, q = argsif op == '':return pl_true(~p | q, model)elif op == '':return pl_true(p | ~q, model)pt = pl_true(p, model)if pt is None: return Noneqt = pl_true(q, model)if qt is None: return Noneif op == '':return pt == qtelif op == '':return pt != qtelse:raise ValueError(\"\" + str(exp))", "docstring": "Return True if the propositional logic expression is true in the model,\n and False if it is false. If the model does not specify the value for\n every proposition, this may return None to indicate 'not obvious';\n this may happen even when the expression is tautological.", "id": "f1683:m12"} {"signature": "def to_cnf(s):", "body": "if isinstance(s, str): s = expr(s)s = eliminate_implications(s) s = move_not_inwards(s) return distribute_and_over_or(s)", "docstring": "Convert a propositional logical sentence s to conjunctive normal form.\n That is, to the form ((A | ~B | ...) & (B | C | ...) & ...) [p. 253]\n >>> to_cnf(\"~(B|C)\")\n (~B & ~C)\n >>> to_cnf(\"B <=> (P1|P2)\")\n ((~P1 | B) & (~P2 | B) & (P1 | P2 | ~B))\n >>> to_cnf(\"a | (b & c) | d\")\n ((b | a | d) & (c | a | d))\n >>> to_cnf(\"A & (B | (D & E))\")\n (A & (D | B) & (E | B))\n >>> to_cnf(\"A | (B | (C | (D & E)))\")\n ((D | A | B | C) & (E | A | B | C))", "id": "f1683:m13"} {"signature": "def eliminate_implications(s):", "body": "if not s.args or is_symbol(s.op): return s args = list(map(eliminate_implications, s.args))a, b = args[], args[-]if s.op == '':return (b | ~a)elif s.op == '':return (a | ~b)elif s.op == '':return (a | ~b) & (b | ~a)elif s.op == '':assert len(args) == return (a & ~b) | (~a & b)else:assert s.op in ('', '', '')return Expr(s.op, *args)", "docstring": "Change >>, <<, and <=> into &, |, and ~. That is, return an Expr\n that is equivalent to s, but has only &, |, and ~ as logical operators.\n >>> eliminate_implications(A >> (~B << C))\n ((~B | ~C) | ~A)\n >>> eliminate_implications(A ^ B)\n ((A & ~B) | (~A & B))", "id": "f1683:m14"} {"signature": "def move_not_inwards(s):", "body": "if s.op == '':NOT = lambda b: move_not_inwards(~b)a = s.args[]if a.op == '': return move_not_inwards(a.args[]) if a.op =='': return associate('', list(map(NOT, a.args)))if a.op =='': return associate('', list(map(NOT, a.args)))return selif is_symbol(s.op) or not s.args:return selse:return Expr(s.op, *list(map(move_not_inwards, s.args)))", "docstring": "Rewrite sentence s by moving negation sign inward.\n >>> move_not_inwards(~(A | B))\n (~A & ~B)\n >>> move_not_inwards(~(A & B))\n (~A | ~B)\n >>> move_not_inwards(~(~(A | ~B) | ~~C))\n ((A | ~B) & ~C)", "id": "f1683:m15"} {"signature": "def distribute_and_over_or(s):", "body": "if s.op == '':s = associate('', s.args)if s.op != '':return distribute_and_over_or(s)if len(s.args) == :return FALSEif len(s.args) == :return distribute_and_over_or(s.args[])conj = find_if((lambda d: d.op == ''), s.args)if not conj:return sothers = [a for a in s.args if a is not conj]rest = associate('', others)return associate('', [distribute_and_over_or(c|rest)for c in conj.args])elif s.op == '':return associate('', list(map(distribute_and_over_or, s.args)))else:return s", "docstring": "Given a sentence s consisting of conjunctions and disjunctions\n of literals, return an equivalent sentence in CNF.\n >>> distribute_and_over_or((A & B) | C)\n ((A | C) & (B | C))", "id": "f1683:m16"} {"signature": "def associate(op, args):", "body": "args = dissociate(op, args)if len(args) == :return _op_identity[op]elif len(args) == :return args[]else:return Expr(op, *args)", "docstring": "Given an associative op, return an expression with the same\n meaning as Expr(op, *args), but flattened -- that is, with nested\n instances of the same op promoted to the top level.\n >>> associate('&', [(A&B),(B|C),(B&C)])\n (A & B & (B | C) & B & C)\n >>> associate('|', [A|(B|(C|(A&B)))])\n (A | B | C | (A & B))", "id": "f1683:m17"} {"signature": "def dissociate(op, args):", "body": "result = []def collect(subargs):for arg in subargs:if arg.op == op: collect(arg.args)else: result.append(arg)collect(args)return result", "docstring": "Given an associative op, return a flattened list result such\n that Expr(op, *result) means the same as Expr(op, *args).", "id": "f1683:m18"} {"signature": "def conjuncts(s):", "body": "return dissociate('', [s])", "docstring": "Return a list of the conjuncts in the sentence s.\n >>> conjuncts(A & B)\n [A, B]\n >>> conjuncts(A | B)\n [(A | B)]", "id": "f1683:m19"} {"signature": "def disjuncts(s):", "body": "return dissociate('', [s])", "docstring": "Return a list of the disjuncts in the sentence s.\n >>> disjuncts(A | B)\n [A, B]\n >>> disjuncts(A & B)\n [(A & B)]", "id": "f1683:m20"} {"signature": "def pl_resolution(KB, alpha):", "body": "clauses = KB.clauses + conjuncts(to_cnf(~alpha))new = set()while True:n = len(clauses)pairs = [(clauses[i], clauses[j])for i in range(n) for j in range(i+, n)]for (ci, cj) in pairs:resolvents = pl_resolve(ci, cj)if FALSE in resolvents: return Truenew = new.union(set(resolvents))if new.issubset(set(clauses)): return Falsefor c in new:if c not in clauses: clauses.append(c)", "docstring": "Propositional-logic resolution: say if alpha follows from KB. [Fig. 7.12]", "id": "f1683:m21"} {"signature": "def pl_resolve(ci, cj):", "body": "clauses = []for di in disjuncts(ci):for dj in disjuncts(cj):if di == ~dj or ~di == dj:dnew = unique(removeall(di, disjuncts(ci)) +removeall(dj, disjuncts(cj)))clauses.append(associate('', dnew))return clauses", "docstring": "Return all clauses that can be obtained by resolving clauses ci and cj.\n >>> for res in pl_resolve(to_cnf(A|B|C), to_cnf(~B|~C|F)):\n ... ppset(disjuncts(res))\n set([A, C, F, ~C])\n set([A, B, F, ~B])", "id": "f1683:m22"} {"signature": "def pl_fc_entails(KB, q):", "body": "count = dict([(c, len(conjuncts(c.args[]))) for c in KB.clausesif c.op == ''])inferred = DefaultDict(False)agenda = [s for s in KB.clauses if is_prop_symbol(s.op)]while agenda:p = agenda.pop()if p == q: return Trueif not inferred[p]:inferred[p] = Truefor c in KB.clauses_with_premise(p):count[c] -= if count[c] == :agenda.append(c.args[])return False", "docstring": "Use forward chaining to see if a PropDefiniteKB entails symbol q.\n [Fig. 7.15]\n >>> pl_fc_entails(Fig[7,15], expr('Q'))\n True", "id": "f1683:m23"} {"signature": "def dpll_satisfiable(s):", "body": "clauses = conjuncts(to_cnf(s))symbols = prop_symbols(s)return dpll(clauses, symbols, {})", "docstring": "Check satisfiability of a propositional sentence.\n This differs from the book code in two ways: (1) it returns a model\n rather than True when it succeeds; this is more useful. (2) The\n function find_pure_symbol is passed a list of unknown clauses, rather\n than a list of all clauses and the model; this is more efficient.\n >>> ppsubst(dpll_satisfiable(A&~B))\n {A: True, B: False}\n >>> dpll_satisfiable(P&~P)\n False", "id": "f1683:m24"} {"signature": "def dpll(clauses, symbols, model):", "body": "unknown_clauses = [] for c in clauses:val = pl_true(c, model)if val == False:return Falseif val != True:unknown_clauses.append(c)if not unknown_clauses:return modelP, value = find_pure_symbol(symbols, unknown_clauses)if P:return dpll(clauses, removeall(P, symbols), extend(model, P, value))P, value = find_unit_clause(clauses, model)if P:return dpll(clauses, removeall(P, symbols), extend(model, P, value))P, symbols = symbols[], symbols[:]return (dpll(clauses, symbols, extend(model, P, True)) ordpll(clauses, symbols, extend(model, P, False)))", "docstring": "See if the clauses are true in a partial model.", "id": "f1683:m25"} {"signature": "def find_pure_symbol(symbols, clauses):", "body": "for s in symbols:found_pos, found_neg = False, Falsefor c in clauses:if not found_pos and s in disjuncts(c): found_pos = Trueif not found_neg and ~s in disjuncts(c): found_neg = Trueif found_pos != found_neg: return s, found_posreturn None, None", "docstring": "Find a symbol and its value if it appears only as a positive literal\n (or only as a negative) in clauses.\n >>> find_pure_symbol([A, B, C], [A|~B,~B|~C,C|A])\n (A, True)", "id": "f1683:m26"} {"signature": "def find_unit_clause(clauses, model):", "body": "for clause in clauses:P, value = unit_clause_assign(clause, model)if P: return P, valuereturn None, None", "docstring": "Find a forced assignment if possible from a clause with only 1\n variable not bound in the model.\n >>> find_unit_clause([A|B|C, B|~C, ~A|~B], {A:True})\n (B, False)", "id": "f1683:m27"} {"signature": "def unit_clause_assign(clause, model):", "body": "P, value = None, Nonefor literal in disjuncts(clause):sym, positive = inspect_literal(literal)if sym in model:if model[sym] == positive:return None, None elif P:return None, None else:P, value = sym, positivereturn P, value", "docstring": "Return a single variable/value pair that makes clause true in\n the model, if possible.\n >>> unit_clause_assign(A|B|C, {A:True})\n (None, None)\n >>> unit_clause_assign(B|~C, {A:True})\n (None, None)\n >>> unit_clause_assign(~A|~B, {A:True})\n (B, False)", "id": "f1683:m28"} {"signature": "def inspect_literal(literal):", "body": "if literal.op == '':return literal.args[], Falseelse:return literal, True", "docstring": "The symbol in this literal, and the value it should take to\n make the literal true.\n >>> inspect_literal(P)\n (P, True)\n >>> inspect_literal(~P)\n (P, False)", "id": "f1683:m29"} {"signature": "def SAT_plan(init, transition, goal, t_max, SAT_solver=dpll_satisfiable):", "body": "for t in range(t_max):cnf = translate_to_SAT(init, transition, goal, t)model = SAT_solver(cnf)if model is not False:return extract_solution(model)return None", "docstring": "[Fig. 7.22]", "id": "f1683:m32"} {"signature": "def unify(x, y, s):", "body": "if s is None:return Noneelif x == y:return selif is_variable(x):return unify_var(x, y, s)elif is_variable(y):return unify_var(y, x, s)elif isinstance(x, Expr) and isinstance(y, Expr):return unify(x.args, y.args, unify(x.op, y.op, s))elif isinstance(x, str) or isinstance(y, str):return Noneelif issequence(x) and issequence(y) and len(x) == len(y):if not x: return sreturn unify(x[:], y[:], unify(x[], y[], s))else:return None", "docstring": "Unify expressions x,y with substitution s; return a substitution that\n would make x,y equal, or None if x,y can not unify. x and y can be\n variables (e.g. Expr('x')), constants, lists, or Exprs. [Fig. 9.1]\n >>> ppsubst(unify(x + y, y + C, {}))\n {x: y, y: C}", "id": "f1683:m35"} {"signature": "def is_variable(x):", "body": "return isinstance(x, Expr) and not x.args and is_var_symbol(x.op)", "docstring": "A variable is an Expr with no args and a lowercase symbol as the op.", "id": "f1683:m36"} {"signature": "def occur_check(var, x, s):", "body": "if var == x:return Trueelif is_variable(x) and x in s:return occur_check(var, s[x], s)elif isinstance(x, Expr):return (occur_check(var, x.op, s) oroccur_check(var, x.args, s))elif isinstance(x, (list, tuple)):return some(lambda element: occur_check(var, element, s), x)else:return False", "docstring": "Return true if variable var occurs anywhere in x\n (or in subst(s, x), if s has a binding for x).", "id": "f1683:m38"} {"signature": "def extend(s, var, val):", "body": "s2 = s.copy()s2[var] = valreturn s2", "docstring": "Copy the substitution s and extend it by setting var to val;\n return copy.\n >>> ppsubst(extend({x: 1}, y, 2))\n {x: 1, y: 2}", "id": "f1683:m39"} {"signature": "def subst(s, x):", "body": "if isinstance(x, list):return [subst(s, xi) for xi in x]elif isinstance(x, tuple):return tuple([subst(s, xi) for xi in x])elif not isinstance(x, Expr):return xelif is_var_symbol(x.op):return s.get(x, x)else:return Expr(x.op, *[subst(s, arg) for arg in x.args])", "docstring": "Substitute the substitution s into the expression x.\n >>> subst({x: 42, y:0}, F(x) + y)\n (F(42) + 0)", "id": "f1683:m40"} {"signature": "def fol_fc_ask(KB, alpha):", "body": "while True:new = {}for r in KB.clauses:ps, q = parse_definite_clause(standardize_variables(r))raise NotImplementedError", "docstring": "Inefficient forward chaining for first-order logic. [Fig. 9.3]\n KB is a FolKB and alpha must be an atomic sentence.", "id": "f1683:m41"} {"signature": "def standardize_variables(sentence, dic=None):", "body": "if dic is None: dic = {}if not isinstance(sentence, Expr):return sentenceelif is_var_symbol(sentence.op):if sentence in dic:return dic[sentence]else:v = Expr('' % next(standardize_variables.counter))dic[sentence] = vreturn velse:return Expr(sentence.op,*[standardize_variables(a, dic) for a in sentence.args])", "docstring": "Replace all the variables in sentence with new variables.\n >>> e = expr('F(a, b, c) & G(c, A, 23)')\n >>> len(variables(standardize_variables(e)))\n 3\n >>> variables(e).intersection(variables(standardize_variables(e)))\n set([])\n >>> is_variable(standardize_variables(expr('x')))\n True", "id": "f1683:m42"} {"signature": "def fol_bc_ask(KB, query):", "body": "return fol_bc_or(KB, query, {})", "docstring": "A simple backward-chaining algorithm for first-order logic. [Fig. 9.6]\n KB should be an instance of FolKB, and goals a list of literals.\n >>> test_ask('Farmer(x)')\n ['{x: Mac}']\n >>> test_ask('Human(x)')\n ['{x: Mac}', '{x: MrsMac}']\n >>> test_ask('Hates(x, y)')\n ['{x: Mac, y: MrsRabbit}', '{x: Mac, y: Pete}']\n >>> test_ask('Loves(x, y)')\n ['{x: MrsMac, y: Mac}', '{x: MrsRabbit, y: Pete}']\n >>> test_ask('Rabbit(x)')\n ['{x: MrsRabbit}', '{x: Pete}']\n >>> test_ask('Criminal(x)', crime_kb)\n ['{x: West}']", "id": "f1683:m44"} {"signature": "def diff(y, x):", "body": "if y == x: return ONEelif not y.args: return ZEROelse:u, op, v = y.args[], y.op, y.args[-]if op == '': return diff(u, x) + diff(v, x)elif op == '' and len(args) == : return -diff(u, x)elif op == '': return diff(u, x) - diff(v, x)elif op == '': return u * diff(v, x) + v * diff(u, x)elif op == '': return (v*diff(u, x) - u*diff(v, x)) / (v * v)elif op == '' and isnumber(x.op):return (v * u ** (v - ) * diff(u, x))elif op == '': return (v * u ** (v - ) * diff(u, x)+ u ** v * Expr('')(u) * diff(v, x))elif op == '': return diff(u, x) / uelse: raise ValueError(\"\" % (op, y, x))", "docstring": "Return the symbolic derivative, dy/dx, as an Expr.\n However, you probably want to simplify the results with simp.\n >>> diff(x * x, x)\n ((x * 1) + (x * 1))\n >>> simp(diff(x * x, x))\n (2 * x)", "id": "f1683:m47"} {"signature": "def d(y, x):", "body": "return simp(diff(y, x))", "docstring": "Differentiate and then simplify.", "id": "f1683:m49"} {"signature": "def pretty_dict(d):", "body": "return '' % ''.join('' % (k, v)for k, v in sorted(list(d.items()), key=repr))", "docstring": "Return dictionary d's repr but with the items sorted.\n >>> pretty_dict({'m': 'M', 'a': 'A', 'r': 'R', 'k': 'K'})\n \"{'a': 'A', 'k': 'K', 'm': 'M', 'r': 'R'}\"\n >>> pretty_dict({z: C, y: B, x: A})\n '{x: A, y: B, z: C}'", "id": "f1683:m51"} {"signature": "def pretty_set(s):", "body": "return '' % sorted(s, key=repr)", "docstring": "Return set s's repr but with the items sorted.\n >>> pretty_set(set(['A', 'Q', 'F', 'K', 'Y', 'B']))\n \"set(['A', 'B', 'F', 'K', 'Q', 'Y'])\"\n >>> pretty_set(set([z, y, x]))\n 'set([x, y, z])'", "id": "f1683:m52"} {"signature": "def ppsubst(s):", "body": "ppdict(s)", "docstring": "Pretty-print substitution s", "id": "f1683:m54"} {"signature": "def tell(self, sentence):", "body": "abstract", "docstring": "Add the sentence to the KB.", "id": "f1683:c0:m1"} {"signature": "def ask(self, query):", "body": "for result in self.ask_generator(query):return resultreturn False", "docstring": "Return a substitution that makes the query true, or,\n failing that, return False.", "id": "f1683:c0:m2"} {"signature": "def ask_generator(self, query):", "body": "abstract", "docstring": "Yield all the substitutions that make query true.", "id": "f1683:c0:m3"} {"signature": "def retract(self, sentence):", "body": "abstract", "docstring": "Remove sentence from the KB.", "id": "f1683:c0:m4"} {"signature": "def tell(self, sentence):", "body": "self.clauses.extend(conjuncts(to_cnf(sentence)))", "docstring": "Add the sentence's clauses to the KB.", "id": "f1683:c1:m1"} {"signature": "def ask_generator(self, query):", "body": "if tt_entails(Expr('', *self.clauses), query):yield {}", "docstring": "Yield the empty substitution if KB implies query; else nothing.", "id": "f1683:c1:m2"} {"signature": "def retract(self, sentence):", "body": "for c in conjuncts(to_cnf(sentence)):if c in self.clauses:self.clauses.remove(c)", "docstring": "Remove the sentence's clauses from the KB.", "id": "f1683:c1:m3"} {"signature": "def __init__(self, op, *args):", "body": "assert isinstance(op, str) or (isnumber(op) and not args)self.op = num_or_str(op)self.args = list(map(expr, args))", "docstring": "Op is a string or number; args are Exprs (or are coerced to Exprs).", "id": "f1683:c2:m0"} {"signature": "def __call__(self, *args):", "body": "assert is_symbol(self.op) and not self.argsreturn Expr(self.op, *args)", "docstring": "Self must be a symbol with no args, such as Expr('F'). Create a new\n Expr with 'F' as op and the args as arguments.", "id": "f1683:c2:m1"} {"signature": "def __repr__(self):", "body": "if not self.args: return str(self.op)elif is_symbol(self.op): return '' % (self.op, ''.join(map(repr, self.args)))elif len(self.args) == : return self.op + repr(self.args[])else: return '' % (''+self.op+'').join(map(repr, self.args))", "docstring": "Show something like 'P' or 'P(x, y)', or '~P' or '(P | Q | R)", "id": "f1683:c2:m2"} {"signature": "def __eq__(self, other):", "body": "return (other is self) or (isinstance(other, Expr)and self.op == other.op and self.args == other.args)", "docstring": "x and y are equal iff their ops and args are equal.", "id": "f1683:c2:m3"} {"signature": "def __hash__(self):", "body": "return hash(self.op) ^ hash(tuple(self.args))", "docstring": "Need a hash method so Exprs can live in dicts.", "id": "f1683:c2:m5"} {"signature": "def tell(self, sentence):", "body": "assert is_definite_clause(sentence), \"\"self.clauses.append(sentence)", "docstring": "Add a definite clause to this KB.", "id": "f1683:c3:m0"} {"signature": "def ask_generator(self, query):", "body": "if pl_fc_entails(self.clauses, query):yield {}", "docstring": "Yield the empty substitution if KB implies query; else nothing.", "id": "f1683:c3:m1"} {"signature": "def clauses_with_premise(self, p):", "body": "return [c for c in self.clausesif c.op == '' and p in conjuncts(c.args[])]", "docstring": "Return a list of the clauses in KB that have p in their premise.\n This could be cached away for O(1) speed, but we'll recompute it.", "id": "f1683:c3:m3"} {"signature": "def Rules(**rules):", "body": "for (lhs, rhs) in list(rules.items()):rules[lhs] = [alt.strip().split() for alt in rhs.split('')]return rules", "docstring": "Create a dictionary mapping symbols to alternative sequences.\n >>> Rules(A = \"B C | D E\")\n {'A': [['B', 'C'], ['D', 'E']]}", "id": "f1684:m0"} {"signature": "def Lexicon(**rules):", "body": "for (lhs, rhs) in list(rules.items()):rules[lhs] = [word.strip() for word in rhs.split('')]return rules", "docstring": "Create a dictionary mapping symbols to alternative words.\n >>> Lexicon(Art = \"the | a | an\")\n {'Art': ['the', 'a', 'an']}", "id": "f1684:m1"} {"signature": "def generate_random(grammar=E_, s=''):", "body": "import randomdef rewrite(tokens, into):for token in tokens:if token in grammar.rules:rewrite(random.choice(grammar.rules[token]), into)elif token in grammar.lexicon:into.append(random.choice(grammar.lexicon[token]))else:into.append(token)return intoreturn ''.join(rewrite(s.split(), []))", "docstring": "Replace each token in s by a random entry in grammar (recursively).\n This is useful for testing a grammar, e.g. generate_random(E_)", "id": "f1684:m2"} {"signature": "def __init__(self, name, rules, lexicon):", "body": "update(self, name=name, rules=rules, lexicon=lexicon)self.categories = DefaultDict([])for lhs in lexicon:for word in lexicon[lhs]:self.categories[word].append(lhs)", "docstring": "A grammar has a set of rules and a lexicon.", "id": "f1684:c0:m0"} {"signature": "def rewrites_for(self, cat):", "body": "return self.rules.get(cat, ())", "docstring": "Return a sequence of possible rhs's that cat can be rewritten as.", "id": "f1684:c0:m1"} {"signature": "def isa(self, word, cat):", "body": "return cat in self.categories[word]", "docstring": "Return True iff word is of category cat", "id": "f1684:c0:m2"} {"signature": "def __init__(self, grammar, trace=False):", "body": "update(self, grammar=grammar, trace=trace)", "docstring": "A datastructure for parsing a string; and methods to do the parse.\n self.chart[i] holds the edges that end just before the i'th word.\n Edges are 5-element lists of [start, end, lhs, [found], [expects]].", "id": "f1684:c1:m0"} {"signature": "def parses(self, words, S=''):", "body": "if isinstance(words, str):words = words.split()self.parse(words, S)return [[i, j, S, found, []]for (i, j, lhs, found, expects) in self.chart[len(words)]if i == and lhs == S and expects == []]", "docstring": "Return a list of parses; words can be a list or string.\n >>> chart = Chart(E_NP_)\n >>> chart.parses('happy man', 'NP')\n [[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]]", "id": "f1684:c1:m1"} {"signature": "def parse(self, words, S=''):", "body": "self.chart = [[] for i in range(len(words)+)]self.add_edge([, , '', [], [S]])for i in range(len(words)):self.scanner(i, words[i])return self.chart", "docstring": "Parse a list of words; according to the grammar.\n Leave results in the chart.", "id": "f1684:c1:m2"} {"signature": "def add_edge(self, edge):", "body": "start, end, lhs, found, expects = edgeif edge not in self.chart[end]:self.chart[end].append(edge)if self.trace:print('' % (caller(), edge))if not expects:self.extender(edge)else:self.predictor(edge)", "docstring": "Add edge to chart, and see if it extends or predicts another edge.", "id": "f1684:c1:m3"} {"signature": "def scanner(self, j, word):", "body": "for (i, j, A, alpha, Bb) in self.chart[j]:if Bb and self.grammar.isa(word, Bb[]):self.add_edge([i, j+, A, alpha + [(Bb[], word)], Bb[:]])", "docstring": "For each edge expecting a word of this category here, extend the edge.", "id": "f1684:c1:m4"} {"signature": "def predictor(self, xxx_todo_changeme):", "body": "(i, j, A, alpha, Bb) = xxx_todo_changemeB = Bb[]if B in self.grammar.rules:for rhs in self.grammar.rewrites_for(B):self.add_edge([j, j, B, [], rhs])", "docstring": "Add to chart any rules for B that could help extend this edge.", "id": "f1684:c1:m5"} {"signature": "def extender(self, edge):", "body": "(j, k, B, _, _) = edgefor (i, j, A, alpha, B1b) in self.chart[j]:if B1b and B == B1b[]:self.add_edge([i, k, A, alpha + [edge], B1b[:]])", "docstring": "See what edges can be extended by this edge.", "id": "f1684:c1:m6"} {"signature": "def minimax_decision(state, game):", "body": "player = game.to_move(state)def max_value(state):if game.terminal_test(state):return game.utility(state, player)v = -infinityfor a in game.actions(state):v = max(v, min_value(game.result(state, a)))return vdef min_value(state):if game.terminal_test(state):return game.utility(state, player)v = infinityfor a in game.actions(state):v = min(v, max_value(game.result(state, a)))return vreturn argmax(game.actions(state),lambda a: min_value(game.result(state, a)))", "docstring": "Given a state in a game, calculate the best move by searching\n forward all the way to the terminal states. [Fig. 5.3]", "id": "f1685:m0"} {"signature": "def alphabeta_full_search(state, game):", "body": "player = game.to_move(state)def max_value(state, alpha, beta):if game.terminal_test(state):return game.utility(state, player)v = -infinityfor a in game.actions(state):v = max(v, min_value(game.result(state, a), alpha, beta))if v >= beta:return valpha = max(alpha, v)return vdef min_value(state, alpha, beta):if game.terminal_test(state):return game.utility(state, player)v = infinityfor a in game.actions(state):v = min(v, max_value(game.result(state, a), alpha, beta))if v <= alpha:return vbeta = min(beta, v)return vreturn argmax(game.actions(state),lambda a: min_value(game.result(state, a),-infinity, infinity))", "docstring": "Search game to determine best action; use alpha-beta pruning.\n As in [Fig. 5.7], this version searches all the way to the leaves.", "id": "f1685:m1"} {"signature": "def query_player(game, state):", "body": "game.display(state)return num_or_str(input(''))", "docstring": "Make a move by querying standard input.", "id": "f1685:m3"} {"signature": "def random_player(game, state):", "body": "return random.choice(game.actions(state))", "docstring": "A player that chooses a legal move at random.", "id": "f1685:m4"} {"signature": "def play_game(game, *players):", "body": "state = game.initialwhile True:for player in players:move = player(game, state)state = game.result(state, move)if game.terminal_test(state):return game.utility(state, game.to_move(game.initial))", "docstring": "Play an n-person, move-alternating game.\n >>> play_game(Fig52Game(), alphabeta_player, alphabeta_player)\n 3", "id": "f1685:m6"} {"signature": "def actions(self, state):", "body": "abstract", "docstring": "Return a list of the allowable moves at this point.", "id": "f1685:c0:m0"} {"signature": "def result(self, state, move):", "body": "abstract", "docstring": "Return the state that results from making a move from a state.", "id": "f1685:c0:m1"} {"signature": "def utility(self, state, player):", "body": "abstract", "docstring": "Return the value of this final state to player.", "id": "f1685:c0:m2"} {"signature": "def to_move(self, state):", "body": "return state.to_move", "docstring": "Return the player whose move it is in this state.", "id": "f1685:c0:m4"} {"signature": "def display(self, state):", "body": "print(state)", "docstring": "Print or otherwise display the state.", "id": "f1685:c0:m5"} {"signature": "def actions(self, state):", "body": "return state.moves", "docstring": "Legal moves are any square not yet taken.", "id": "f1685:c2:m1"} {"signature": "def utility(self, state, player):", "body": "return if_(player == '', state.utility, -state.utility)", "docstring": "Return the value to player; 1 for win, -1 for loss, 0 otherwise.", "id": "f1685:c2:m3"} {"signature": "def compute_utility(self, board, move, player):", "body": "if (self.k_in_row(board, move, player, (, )) orself.k_in_row(board, move, player, (, )) orself.k_in_row(board, move, player, (, -)) orself.k_in_row(board, move, player, (, ))):return if_(player == '', +, -)else:return ", "docstring": "If X wins with this move, return 1; if O return -1; else return 0.", "id": "f1685:c2:m6"} {"signature": "def k_in_row(self, board, move, player, xxx_todo_changeme):", "body": "(delta_x, delta_y) = xxx_todo_changemex, y = moven = while board.get((x, y)) == player:n += x, y = x + delta_x, y + delta_yx, y = movewhile board.get((x, y)) == player:n += x, y = x - delta_x, y - delta_yn -= return n >= self.k", "docstring": "Return true if there is a line through move on board for player.", "id": "f1685:c2:m7"} {"signature": "def DTAgentProgram(belief_state):", "body": "def program(percept):belief_state.observe(program.action, percept)program.action = argmax(belief_state.actions(),belief_state.expected_outcome_utility)return program.actionprogram.action = Nonereturn program", "docstring": "A decision-theoretic agent. [Fig. 13.1]", "id": "f1686:m0"} {"signature": "def event_values(event, vars):", "body": "if isinstance(event, tuple) and len(event) == len(vars):return eventelse:return tuple([event[var] for var in vars])", "docstring": "Return a tuple of the values of variables vars in event.\n >>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A'])\n (8, 10)\n >>> event_values ((1, 2), ['C', 'A'])\n (1, 2)", "id": "f1686:m1"} {"signature": "def enumerate_joint_ask(X, e, P):", "body": "assert X not in e, \"\"Q = ProbDist(X) Y = [v for v in P.variables if v != X and v not in e] for xi in P.values(X):Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)return Q.normalize()", "docstring": "Return a probability distribution over the values of the variable X,\n given the {var:val} observations e, in the JointProbDist P. [Section 13.3]\n >>> P = JointProbDist(['X', 'Y'])\n >>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125\n >>> enumerate_joint_ask('X', dict(Y=1), P).show_approx()\n '0: 0.667, 1: 0.167, 2: 0.167'", "id": "f1686:m2"} {"signature": "def enumerate_joint(vars, e, P):", "body": "if not vars:return P[e]Y, rest = vars[], vars[:]return sum([enumerate_joint(rest, extend(e, Y, y), P)for y in P.values(Y)])", "docstring": "Return the sum of those entries in P consistent with e,\n provided vars is P's remaining variables (the ones not in e).", "id": "f1686:m3"} {"signature": "def enumeration_ask(X, e, bn):", "body": "assert X not in e, \"\"Q = ProbDist(X)for xi in bn.variable_values(X):Q[xi] = enumerate_all(bn.vars, extend(e, X, xi), bn)return Q.normalize()", "docstring": "Return the conditional probability distribution of variable X\n given evidence e, from BayesNet bn. [Fig. 14.9]\n >>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary\n ... ).show_approx()\n 'False: 0.716, True: 0.284", "id": "f1686:m4"} {"signature": "def enumerate_all(vars, e, bn):", "body": "if not vars:return Y, rest = vars[], vars[:]Ynode = bn.variable_node(Y)if Y in e:return Ynode.p(e[Y], e) * enumerate_all(rest, e, bn)else:return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn)for y in bn.variable_values(Y))", "docstring": "Return the sum of those entries in P(vars | e{others})\n consistent with e, where P is the joint distribution represented\n by bn, and e{others} means e restricted to bn's other variables\n (the ones other than vars). Parents must precede children in vars.", "id": "f1686:m5"} {"signature": "def elimination_ask(X, e, bn):", "body": "assert X not in e, \"\"factors = []for var in reversed(bn.vars):factors.append(make_factor(var, e, bn))if is_hidden(var, X, e):factors = sum_out(var, factors, bn)return pointwise_product(factors, bn).normalize()", "docstring": "Compute bn's P(X|e) by variable elimination. [Fig. 14.11]\n >>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary\n ... ).show_approx()\n 'False: 0.716, True: 0.284", "id": "f1686:m6"} {"signature": "def is_hidden(var, X, e):", "body": "return var != X and var not in e", "docstring": "Is var a hidden variable when querying P(X|e)?", "id": "f1686:m7"} {"signature": "def make_factor(var, e, bn):", "body": "node = bn.variable_node(var)vars = [X for X in [var] + node.parents if X not in e]cpt = dict((event_values(e1, vars), node.p(e1[var], e1))for e1 in all_events(vars, bn, e))return Factor(vars, cpt)", "docstring": "Return the factor for var in bn's joint distribution given e.\n That is, bn's full joint distribution, projected to accord with e,\n is the pointwise product of these factors for bn's variables.", "id": "f1686:m8"} {"signature": "def sum_out(var, factors, bn):", "body": "result, var_factors = [], []for f in factors:(var_factors if var in f.vars else result).append(f)result.append(pointwise_product(var_factors, bn).sum_out(var, bn))return result", "docstring": "Eliminate var from all factors by summing over its values.", "id": "f1686:m10"} {"signature": "def all_events(vars, bn, e):", "body": "if not vars:yield eelse:X, rest = vars[], vars[:]for e1 in all_events(rest, bn, e):for x in bn.variable_values(X):yield extend(e1, X, x)", "docstring": "Yield every way of extending e with values for all vars.", "id": "f1686:m11"} {"signature": "def prior_sample(bn):", "body": "event = {}for node in bn.nodes:event[node.variable] = node.sample(event)return event", "docstring": "Randomly sample from bn's full joint distribution. The result\n is a {variable: value} dict. [Fig. 14.13]", "id": "f1686:m12"} {"signature": "def rejection_sampling(X, e, bn, N):", "body": "counts = dict((x, ) for x in bn.variable_values(X)) for j in range(N):sample = prior_sample(bn) if consistent_with(sample, e):counts[sample[X]] += return ProbDist(X, counts)", "docstring": "Estimate the probability distribution of variable X given\n evidence e in BayesNet bn, using N samples. [Fig. 14.14]\n Raises a ZeroDivisionError if all the N samples are rejected,\n i.e., inconsistent with e.\n >>> seed(47)\n >>> rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T),\n ... burglary, 10000).show_approx()\n 'False: 0.7, True: 0.3'", "id": "f1686:m13"} {"signature": "def consistent_with(event, evidence):", "body": "return every(lambda k_v: evidence.get(k_v[], k_v[]) == k_v[],list(event.items()))", "docstring": "Is event consistent with the given evidence?", "id": "f1686:m14"} {"signature": "def likelihood_weighting(X, e, bn, N):", "body": "W = dict((x, ) for x in bn.variable_values(X))for j in range(N):sample, weight = weighted_sample(bn, e) W[sample[X]] += weightreturn ProbDist(X, W)", "docstring": "Estimate the probability distribution of variable X given\n evidence e in BayesNet bn. [Fig. 14.15]\n >>> seed(1017)\n >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T),\n ... burglary, 10000).show_approx()\n 'False: 0.702, True: 0.298'", "id": "f1686:m15"} {"signature": "def weighted_sample(bn, e):", "body": "w = event = dict(e) for node in bn.nodes:Xi = node.variableif Xi in e:w *= node.p(e[Xi], event)else:event[Xi] = node.sample(event)return event, w", "docstring": "Sample an event from bn that's consistent with the evidence e;\n return the event and its weight, the likelihood that the event\n accords to the evidence.", "id": "f1686:m16"} {"signature": "def gibbs_ask(X, e, bn, N):", "body": "assert X not in e, \"\"counts = dict((x, ) for x in bn.variable_values(X)) Z = [var for var in bn.vars if var not in e]state = dict(e) for Zi in Z:state[Zi] = choice(bn.variable_values(Zi))for j in range(N):for Zi in Z:state[Zi] = markov_blanket_sample(Zi, state, bn)counts[state[X]] += return ProbDist(X, counts)", "docstring": "[Fig. 14.16]\n >>> seed(1017)\n >>> gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary, 1000\n ... ).show_approx()\n 'False: 0.738, True: 0.262'", "id": "f1686:m17"} {"signature": "def markov_blanket_sample(X, e, bn):", "body": "Xnode = bn.variable_node(X)Q = ProbDist(X)for xi in bn.variable_values(X):ei = extend(e, X, xi)Q[xi] = Xnode.p(xi, e) * product(Yj.p(ei[Yj.variable], ei)for Yj in Xnode.children)return probability(Q.normalize()[True])", "docstring": "Return a sample from P(X | mb) where mb denotes that the\n variables in the Markov blanket of X take their values from event\n e (which must assign a value to each). The Markov blanket of X is\n X's parents, children, and children's parents.", "id": "f1686:m18"} {"signature": "def forward_backward(ev, prior):", "body": "unimplemented()", "docstring": "[Fig. 15.4]", "id": "f1686:m19"} {"signature": "def fixed_lag_smoothing(e_t, hmm, d):", "body": "unimplemented()", "docstring": "[Fig. 15.6]", "id": "f1686:m20"} {"signature": "def particle_filtering(e, N, dbn):", "body": "unimplemented()", "docstring": "[Fig. 15.17]", "id": "f1686:m21"} {"signature": "def __init__(self, varname='', freqs=None):", "body": "update(self, prob={}, varname=varname, values=[])if freqs:for (v, p) in list(freqs.items()):self[v] = pself.normalize()", "docstring": "If freqs is given, it is a dictionary of value: frequency pairs,\n and the ProbDist then is normalized.", "id": "f1686:c0:m0"} {"signature": "def __getitem__(self, val):", "body": "try: return self.prob[val]except KeyError: return ", "docstring": "Given a value, return P(value).", "id": "f1686:c0:m1"} {"signature": "def __setitem__(self, val, p):", "body": "if val not in self.values:self.values.append(val)self.prob[val] = p", "docstring": "Set P(val) = p.", "id": "f1686:c0:m2"} {"signature": "def normalize(self):", "body": "total = float(sum(self.prob.values()))if not (-epsilon < total < +epsilon):for val in self.prob:self.prob[val] /= totalreturn self", "docstring": "Make sure the probabilities of all values sum to 1.\n Returns the normalized distribution.\n Raises a ZeroDivisionError if the sum of the values is 0.\n >>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65\n >>> P = P.normalize()\n >>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T'])\n 0.350 0.650", "id": "f1686:c0:m3"} {"signature": "def show_approx(self, numfmt=''):", "body": "return ''.join([('' + numfmt) % (v, p)for (v, p) in sorted(self.prob.items())])", "docstring": "Show the probabilities rounded and sorted by key, for the\n sake of portable doctests.", "id": "f1686:c0:m4"} {"signature": "def __getitem__(self, values):", "body": "values = event_values(values, self.variables)return ProbDist.__getitem__(self, values)", "docstring": "Given a tuple or dict of values, return P(values).", "id": "f1686:c1:m1"} {"signature": "def __setitem__(self, values, p):", "body": "values = event_values(values, self.variables)self.prob[values] = pfor var, val in zip(self.variables, values):if val not in self.vals[var]:self.vals[var].append(val)", "docstring": "Set P(values) = p. Values can be a tuple or a dict; it must\n have a value for each of the variables in the joint. Also keep track\n of the values we have seen so far for each variable.", "id": "f1686:c1:m2"} {"signature": "def values(self, var):", "body": "return self.vals[var]", "docstring": "Return the set of possible values for a variable.", "id": "f1686:c1:m3"} {"signature": "def __init__(self, node_specs=[]):", "body": "update(self, nodes=[], vars=[])for node_spec in node_specs:self.add(node_spec)", "docstring": "nodes must be ordered with parents before children.", "id": "f1686:c2:m0"} {"signature": "def add(self, node_spec):", "body": "node = BayesNode(*node_spec)assert node.variable not in self.varsassert every(lambda parent: parent in self.vars, node.parents)self.nodes.append(node)self.vars.append(node.variable)for parent in node.parents:self.variable_node(parent).children.append(node)", "docstring": "Add a node to the net. Its parents must already be in the\n net, and its variable must not.", "id": "f1686:c2:m1"} {"signature": "def variable_node(self, var):", "body": "for n in self.nodes:if n.variable == var:return nraise Exception(\"\" % var)", "docstring": "Return the node for the variable named var.\n >>> burglary.variable_node('Burglary').variable\n 'Burglary", "id": "f1686:c2:m2"} {"signature": "def variable_values(self, var):", "body": "return [True, False]", "docstring": "Return the domain of var.", "id": "f1686:c2:m3"} {"signature": "def __init__(self, X, parents, cpt):", "body": "if isinstance(parents, str): parents = parents.split()if isinstance(cpt, (float, int)): cpt = {(): cpt}elif isinstance(cpt, dict):if cpt and isinstance(list(cpt.keys())[], bool): cpt = dict(((v,), p) for v, p in list(cpt.items()))assert isinstance(cpt, dict)for vs, p in list(cpt.items()):assert isinstance(vs, tuple) and len(vs) == len(parents)assert every(lambda v: isinstance(v, bool), vs)assert <= p <= update(self, variable=X, parents=parents, cpt=cpt, children=[])", "docstring": "X is a variable name, and parents a sequence of variable\n names or a space-separated string. cpt, the conditional\n probability table, takes one of these forms:\n\n * A number, the unconditional probability P(X=true). You can\n use this form when there are no parents.\n\n * A dict {v: p, ...}, the conditional probability distribution\n P(X=true | parent=v) = p. When there's just one parent.\n\n * A dict {(v1, v2, ...): p, ...}, the distribution P(X=true |\n parent1=v1, parent2=v2, ...) = p. Each key must have as many\n values as there are parents. You can use this form always;\n the first two are just conveniences.\n\n In all cases the probability of X being false is left implicit,\n since it follows from P(X=true).\n\n >>> X = BayesNode('X', '', 0.2)\n >>> Y = BayesNode('Y', 'P', {T: 0.2, F: 0.7})\n >>> Z = BayesNode('Z', 'P Q',\n ... {(T, T): 0.2, (T, F): 0.3, (F, T): 0.5, (F, F): 0.7})", "id": "f1686:c3:m0"} {"signature": "def p(self, value, event):", "body": "assert isinstance(value, bool)ptrue = self.cpt[event_values(event, self.parents)]return if_(value, ptrue, - ptrue)", "docstring": "Return the conditional probability\n P(X=value | parents=parent_values), where parent_values\n are the values of parents in event. (event must assign each\n parent a value.)\n >>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625})\n >>> bn.p(False, {'Burglary': False, 'Earthquake': True})\n 0.375", "id": "f1686:c3:m1"} {"signature": "def sample(self, event):", "body": "return probability(self.p(True, event))", "docstring": "Sample from the distribution for this variable conditioned\n on event's values for parent_vars. That is, return True/False\n at random according with the conditional probability given the\n parents.", "id": "f1686:c3:m2"} {"signature": "def pointwise_product(self, other, bn):", "body": "vars = list(set(self.vars) | set(other.vars))cpt = dict((event_values(e, vars), self.p(e) * other.p(e))for e in all_events(vars, bn, {}))return Factor(vars, cpt)", "docstring": "Multiply two factors, combining their variables.", "id": "f1686:c4:m1"} {"signature": "def sum_out(self, var, bn):", "body": "vars = [X for X in self.vars if X != var]cpt = dict((event_values(e, vars),sum(self.p(extend(e, var, val))for val in bn.variable_values(var)))for e in all_events(vars, bn, {}))return Factor(vars, cpt)", "docstring": "Make a factor eliminating var by summing over its values.", "id": "f1686:c4:m2"} {"signature": "def normalize(self):", "body": "assert len(self.vars) == return ProbDist(self.vars[],dict((k, v) for ((k,), v) in list(self.cpt.items())))", "docstring": "Return my probabilities; must be down to one variable.", "id": "f1686:c4:m3"} {"signature": "def p(self, e):", "body": "return self.cpt[event_values(e, self.vars)]", "docstring": "Look up my value tabulated for e.", "id": "f1686:c4:m4"} {"signature": "def assert_amnesty(self, input_code, errors, expected):", "body": "input_code = textwrap.dedent(input_code)expected = textwrap.dedent(expected)errors_by_line = defaultdict(list)for error in errors:errors_by_line[error.linenum].append(error)output_lines = itertools.chain.from_iterable(fix_pylint(line, errors_by_line[lineno])for lineno, linein enumerate(StringIO(input_code), start=))self.assertEqual(expected.split(u''), \"\".join(output_lines).split(u''))", "docstring": "Assert that fix_pylint produces ``expected`` when fed ``input_code`` and the\nlist of errors ``errors``.\n\nArguments:\n input_code: A string of python code. Will be textwrap.dedented.\n errors: A list of PylintErrors\n expected: A string of python code. Will be textwrap.dedented.", "id": "f1688:c0:m0"} {"signature": "def assert_pylint_exception_match(self, expected, line):", "body": "self.assertEqual(expected,PYLINT_EXCEPTION_REGEX.search(line).group())", "docstring": "Assert that PYLINT_EXCEPTION_REGEX mateches ``expected`` in ``line``.", "id": "f1688:c0:m1"} {"signature": "def find_line_markers(source):", "body": "markers = {}for lineno, line in enumerate(source.splitlines(), start=):m = re.search(r\"\", line)if m:markers[lineno] = m.group()return markers", "docstring": "Find line markers in program source.\n\n Returns a dict mapping line numbers to the marker on that line.", "id": "f1700:m0"} {"signature": "def run_pylint(source, msg_ids):", "body": "with open(\"\", \"\") as f:f.write(textwrap.dedent(source))reporter = SimpleReporter()pylint_args = [\"\", \"\", \"\".format(msg_ids)]if pylint_numversion >= (, ):kwargs = dict(do_exit=False)else:kwargs = dict(exit=False)Run(pylint_args, reporter=reporter, **kwargs) markers = find_line_markers(source)messages = {\"\".format(m=m, line=markers.get(m.line, m.line))for m in reporter.messages}return messages", "docstring": "Run pylint on some source, collecting specific messages.\n\n `source` is the literal text of the program to check. It is\n dedented and written to a temp file for pylint to read.\n\n `msg_ids` is a comma-separated string of msgids we are interested\n in. Use \"all\" to enable all messages.\n\n Returns a set of messages. Each message is a string, formatted\n as \"line:msg-id:message\". \"line\" will be the line number of the\n message, or if the source line has a comment like \"#=Slug\", then\n it will be \"Slug\" instead. This makes it easier to write, read,\n and maintain the tests.", "id": "f1700:m2"} {"signature": "def temp_filename(self):", "body": "fdesc, filename = tempfile.mkstemp(suffix=\"\", prefix=\"\")os.close(fdesc)self.addCleanup(os.remove, filename)return filename", "docstring": "Make a temporary filename that will be deleted after the test.\n\nReturns:\n The name of the temp file.", "id": "f1701:c0:m0"} {"signature": "def write_tamper_evident(self, text, **kwargs):", "body": "filename = self.temp_filename()TamperEvidentFile(filename).write(text, **kwargs)return filename", "docstring": "Helper to write a tamper-evident temp file.\n\nArgs:\n text (byte string): the content of the file.\n\n kwargs: any other arguments to `TamperEvidentFile.write`.\n\nReturns:\n The name of the temp file.", "id": "f1701:c0:m1"} {"signature": "def call_command(self, argv=None):", "body": "return main.main(argv)", "docstring": "Call an edx_lint script command.\n\n Arguments:\n argv (list) -- arguments to pass to the edx_lint script", "id": "f1702:c0:m1"} {"signature": "def assert_file(self, filename, contains=None, not_contains=None):", "body": "self.assertTrue(os.path.isfile(filename))if contains is not None or not_contains is not None:with open(filename) as f:text = f.read()if contains is not None: self.assertIn(contains, text)if not_contains is not None: self.assertNotIn(not_contains, text)", "docstring": "Assert that a file exists, and optionally, contains some text.", "id": "f1702:c0:m2"} {"signature": "def assert_not_file(self, filename):", "body": "self.assertFalse(os.path.isfile(filename))", "docstring": "Assert that a file doesn't exist.", "id": "f1702:c0:m3"} {"signature": "def merge_configs(main, tweaks):", "body": "for section in tweaks.sections():for option in tweaks.options(section):value = tweaks.get(section, option)if option.endswith(\"\"):option = option[:-]value = main.get(section, option) + valuemain.set(section, option, value)", "docstring": "Merge tweaks into a main config file.", "id": "f1703:m0"} {"signature": "def list_main(argv_unused): ", "body": "print(\"\")for filename in pkg_resources.resource_listdir(\"\", \"\"):print(filename)return ", "docstring": "list\n List the FILENAMEs that edx_lint can provide.", "id": "f1704:m0"} {"signature": "def parse_pylint_output(pylint_output):", "body": "for line in pylint_output:if not line.strip():continueif line[:] in (\"\"*, \"\"*):continueparsed = PYLINT_PARSEABLE_REGEX.search(line)if parsed is None:LOG.warning(u\"\"u\"\",line)continueparsed_dict = parsed.groupdict()parsed_dict[''] = int(parsed_dict[''])yield PylintError(**parsed_dict)", "docstring": "Parse the pylint output-format=parseable lines into PylintError tuples.", "id": "f1705:m0"} {"signature": "def format_pylint_disables(error_names, tag=True):", "body": "tag_str = \"\" if tag else \"\"if error_names:return u\"\".format(disabled=\"\".join(sorted(error_names)),tag=tag_str,)else:return \"\"", "docstring": "Format a list of error_names into a 'pylint: disable=' line.", "id": "f1705:m1"} {"signature": "def fix_pylint(line, errors):", "body": "if not errors:yield linereturncurrent = PYLINT_EXCEPTION_REGEX.search(line)if current:original_errors = {disable.strip() for disable in current.group('').split('')}else:original_errors = set()disabled_errors = set(original_errors)for error in errors:if error.error_name == '':parsed = re.search(\"\"\"\"\"\", error.error_msg)disabled_errors.discard(parsed.group(''))elif error.error_name == '' and error.error_msg == '':yield format_pylint_disables({error.error_name}).strip() + ''else:disabled_errors.add(error.error_name)disable_string = format_pylint_disables(disabled_errors, not disabled_errors <= original_errors)if current:yield PYLINT_EXCEPTION_REGEX.sub(disable_string, line)else:yield re.sub(r'', disable_string + r'', line, count=)", "docstring": "Yield any modified versions of ``line`` needed to address the errors in ``errors``.", "id": "f1705:m2"} {"signature": "@click.command()@click.option('', default=sys.stdin, type=click.File(),help=\"\")@click_log.simple_verbosity_option(default=u'')def pylint_amnesty(pylint_output):", "body": "errors = defaultdict(lambda: defaultdict(set))for pylint_error in parse_pylint_output(pylint_output):errors[pylint_error.filename][pylint_error.linenum].add(pylint_error)for file_with_errors in sorted(errors):try:opened_file = open(file_with_errors)except IOError:LOG.warning(u\"\", file_with_errors, exc_info=True)else:with opened_file as input_file:output_lines = []for line_num, line in enumerate(input_file, start=):output_lines.extend(fix_pylint(line,errors[file_with_errors][line_num]))with open(file_with_errors, '') as output_file:output_file.writelines(output_lines)", "docstring": "Add ``# pylint: disable`` clauses to add exceptions to all existing pylint errors in a codebase.", "id": "f1705:m3"} {"signature": "def main(argv=None):", "body": "if argv is None:argv = sys.argv[:]if not argv or argv[] == \"\":show_help()return elif argv[] == \"\":return check_main(argv[:])elif argv[] == \"\":return list_main(argv[:])elif argv[] == \"\":return write_main(argv[:])else:print(u\"\".format(\"\".join(argv)))show_help()return ", "docstring": "The edx_lint command entry point.", "id": "f1706:m0"} {"signature": "def show_help():", "body": "print(", "docstring": "Print the help string for the edx_lint command.", "id": "f1706:m1"} {"signature": "def check_main(argv):", "body": "if len(argv) != :print(\"\")return filename = argv[]if os.path.exists(filename):print(u\"\" % filename)tef = TamperEvidentFile(filename)if tef.validate():print(u\"\" % filename)else:print(u\"\" % filename)else:print(u\"\" % filename)return ", "docstring": "check FILENAME\n Check that FILENAME has not been edited since writing.", "id": "f1707:m0"} {"signature": "def write_main(argv):", "body": "if len(argv) != :print(\"\")return filename = argv[]resource_name = \"\" + filenametweaks_name = amend_filename(filename, \"\")if not pkg_resources.resource_exists(\"\", resource_name):print(u\"\" % filename)return if os.path.exists(filename):print(u\"\" % filename)tef = TamperEvidentFile(filename)if not tef.validate():bak_name = amend_filename(filename, \"\")print(u\"\" % (filename, bak_name))if os.path.exists(bak_name):print(u\"\" % bak_name)os.remove(bak_name)os.rename(filename, bak_name)print(u\"\" % filename)cfg = configparser.RawConfigParser()resource_string = pkg_resources.resource_string(\"\", resource_name).decode(\"\")if six.PY2:cfg.readfp(cStringIO(resource_string), resource_name)else:cfg.read_string(resource_string, resource_name) if os.path.exists(tweaks_name):print(u\"\" % tweaks_name)cfg_tweaks = configparser.RawConfigParser()cfg_tweaks.read([tweaks_name])merge_configs(cfg, cfg_tweaks)print(u\"\" % filename)output_text = cStringIO()output_text.write(WARNING_HEADER.format(filename=filename, tweaks_name=tweaks_name))cfg.write(output_text)out_tef = TamperEvidentFile(filename)if six.PY2:output_bytes = output_text.getvalue()else:output_bytes = output_text.getvalue().encode(\"\")out_tef.write(output_bytes)return ", "docstring": "write FILENAME\n Write a local copy of FILENAME using FILENAME_tweaks for local tweaks.", "id": "f1708:m0"} {"signature": "def amend_filename(filename, amend):", "body": "base, ext = os.path.splitext(filename)amended_name = base + amend + extreturn amended_name", "docstring": "Amend a filename with a suffix.\n\n amend_filename(\"foo.txt\", \"_tweak\") --> \"foo_tweak.txt\"", "id": "f1708:m1"} {"signature": "def register_checkers(linter):", "body": "linter.register_checker(YamlLoadChecker(linter))", "docstring": "Register checkers.", "id": "f1709:m0"} {"signature": "def register_checkers(linter):", "body": "if FILENAME:linter.register_checker(ModuleTracingChecker(linter))", "docstring": "Register checkers.", "id": "f1710:m0"} {"signature": "def visit_module(self, node):", "body": "with open(FILENAME, \"\") as f:f.write(node.file)f.write(\"\")", "docstring": "Called for each module being examined.", "id": "f1710:c0:m0"} {"signature": "def register_checkers(linter):", "body": "linter.register_checker(TranslationStringConstantsChecker(linter))", "docstring": "Register checkers.", "id": "f1711:m0"} {"signature": "@utils.check_messages(MESSAGE_ID)def visit_call(self, node):", "body": "if not isinstance(node.func, astroid.Name):returnif node.func.name not in self.TRANSLATION_FUNCTIONS:returnfirst = node.args[]if isinstance(first, astroid.Const):if isinstance(first.value, six.string_types):returnself.add_message(self.MESSAGE_ID, args=node.func.name, node=node)", "docstring": "Called for every function call in the source code.", "id": "f1711:c0:m0"} {"signature": "def register_checkers(linter):", "body": "linter.register_checker(GetSetAttrLiteralChecker(linter))", "docstring": "Register checkers.", "id": "f1712:m0"} {"signature": "@utils.check_messages(MESSAGE_ID)def visit_call(self, node):", "body": "if not isinstance(node.func, astroid.Name):returnif node.func.name == \"\":if len(node.args) != :returnelif node.func.name in [\"\", \"\"]:passelse:returnsecond = node.args[]if isinstance(second, astroid.Const):if isinstance(second.value, six.string_types):if re.search(r\"\", second.value):self.add_message(self.MESSAGE_ID, args=node.func.name, node=node)", "docstring": "Called for every function call in the source code.", "id": "f1712:c0:m0"} {"signature": "def register_checkers(linter):", "body": "linter.register_checker(AssertChecker(linter))", "docstring": "Register checkers.", "id": "f1713:m0"} {"signature": "@utils.check_messages(MESSAGE_ID)def visit_call(self, node):", "body": "if not isinstance(node.func, astroid.Attribute):returnif node.func.attrname not in self.AFFECTED_ASSERTS:returnfirst_arg = node.args[]existing_code = \"\" % (node.func.attrname, first_arg.as_string())if isinstance(first_arg, astroid.Compare):if len(first_arg.ops) > :returncompare = first_arg.ops[][]right = first_arg.ops[][]if isinstance(right, astroid.Const) and right.value is None:better = self.BETTER_NONE_COMPARES[compare]else:better = self.BETTER_COMPARES[compare]if node.func.attrname == \"\":better = self.INVERTED[better]self.add_message(self.MESSAGE_ID,args=u\"\" % (existing_code, better),node=node,)", "docstring": "Check that various assertTrue/False functions are not misused.", "id": "f1713:c0:m0"} {"signature": "def register_checkers(linter):", "body": "linter.register_checker(RangeChecker(linter))", "docstring": "Register checkers.", "id": "f1714:m0"} {"signature": "@utils.check_messages(MESSAGE_ID)def visit_call(self, node):", "body": "if not isinstance(node.func, astroid.Name):returnif node.func.name not in self.RANGE_FUNCTIONS:returnfirst = node.args[]if not isinstance(first, astroid.Const):returnif not isinstance(first.value, int):returnthree1 = Falseif len(node.args) == :third = node.args[]if isinstance(third, astroid.Const):if isinstance(third.value, int) and third.value == :three1 = Trueif first.value == :if len(node.args) == :self.add_message(self.MESSAGE_ID, args=(node.func.name, \"\"), node=node)elif three1:self.add_message(self.MESSAGE_ID, args=(node.func.name, \"\"), node=node)elif three1:self.add_message(self.MESSAGE_ID, args=(node.func.name, \"\"), node=node)", "docstring": "Called for every function call in the source code.", "id": "f1714:c0:m0"} {"signature": "def register_checkers(linter):", "body": "linter.register_checker(UnitTestSetupSuperChecker(linter))", "docstring": "Register checkers.", "id": "f1715:m0"} {"signature": "@utils.check_messages(NOT_CALLED_MESSAGE_ID, NON_PARENT_MESSAGE_ID)def visit_functiondef(self, node):", "body": "if not node.is_method():returnmethod_name = node.nameif method_name not in self.METHOD_NAMES:returnklass_node = node.parent.frame()to_call = _ancestors_to_call(klass_node, method_name)not_called_yet = dict(to_call)for stmt in node.nodes_of_class(astroid.Call):expr = stmt.funcif not isinstance(expr, astroid.Attribute):continueif expr.attrname != method_name:continueif (isinstance(expr.expr, astroid.Call) andisinstance(expr.expr.func, astroid.Name) andexpr.expr.func.name == ''):returntry:klass = next(expr.expr.infer())if klass is astroid.Uninferable:continueif (isinstance(klass, astroid.Instance) andisinstance(klass._proxied, astroid.ClassDef) andutils.is_builtin_object(klass._proxied) andklass._proxied.name == ''):returnif isinstance(klass, astroid.objects.Super):returntry:del not_called_yet[klass]except KeyError:if klass not in to_call:self.add_message(self.NON_PARENT_MESSAGE_ID,node=expr,args=(method_name, usable_class_name(klass)),)except astroid.InferenceError:continuefor klass, method in six.iteritems(not_called_yet):if klass.name == '' or method.parent.name == '':continueself.add_message(self.NOT_CALLED_MESSAGE_ID,args=(method_name, usable_class_name(klass)),node=node,)", "docstring": "Called for every function definition in the source code.", "id": "f1715:c0:m0"} {"signature": "def check_visitors(cls):", "body": "for name in dir(cls):if name.startswith(\"\"):if name[:] not in CLASS_NAMES:raise Exception(u\"\".format(name))return cls", "docstring": "Check that a checker's visitors are correctly named.\n\n A checker has methods named visit_NODETYPE, but it's easy to mis-name\n a visit method, and it will never be called. This decorator checks\n the class to see that all of its visitors are named after an existing\n node class.", "id": "f1716:m0"} {"signature": "def usable_class_name(node):", "body": "name = node.qname()for prefix in [\"\", \"\", \"\"]:if name.startswith(prefix):name = name[len(prefix):]return name", "docstring": "Make a reasonable class name for a class node.", "id": "f1716:m1"} {"signature": "def register_checkers(linter):", "body": "linter.register_checker(UnicodeFormatStringChecker(linter))", "docstring": "Register checkers.", "id": "f1718:m0"} {"signature": "def process_module(self, node):", "body": "self._unicode_literals = \"\" in node.future_imports", "docstring": "Called for each module being examined.", "id": "f1718:c0:m1"} {"signature": "def register(linter):", "body": "for mod in MODS:mod.register_checkers(linter)", "docstring": "Registering additional checkers.\n However, we will also use it to amend existing checker config.", "id": "f1719:m0"} {"signature": "def register_checkers(linter):", "body": "linter.register_checker(LayeredTestClassChecker(linter))", "docstring": "Register checkers.", "id": "f1720:m0"} {"signature": "@utils.check_messages(MESSAGE_ID)def visit_classdef(self, node):", "body": "if not is_test_case_class(node):returnfor anc in node.ancestors():if not is_test_case_class(anc):continuefor meth in anc.mymethods():if meth.name.startswith(\"\"):self.add_message(self.MESSAGE_ID, args=(node.name, anc.name), node=node)return", "docstring": "Check each class.", "id": "f1720:c0:m0"} {"signature": "def write(self, text, hashline=b\"\"):", "body": "if not text.endswith(b\"\"):text += b\"\"actual_hash = hashlib.sha1(text).hexdigest()with open(self.filename, \"\") as f:f.write(text)f.write(hashline.decode(\"\").format(actual_hash).encode(\"\"))f.write(b\"\")", "docstring": "u\"\"\"\n Write `text` to the file.\n\n Writes the text to the file, with a final line checksumming the\n contents. The entire file must be written with one `.write()` call.\n\n The last line is written with the `hashline` format string, which can\n be changed to accommodate different file syntaxes.\n\n Both arguments are UTF8 byte strings.\n\n Arguments:\n text (UTF8 byte string): the contents of the file to write.\n\n hashline (UTF8 byte string): the format of the last line to append\n to the file, with \"{}\" replaced with the hash.", "id": "f1722:c0:m1"} {"signature": "def validate(self):", "body": "with open(self.filename, \"\") as f:text = f.read()start_last_line = text.rfind(b\"\", , -)if start_last_line == -:return Falseoriginal_text = text[:start_last_line+]last_line = text[start_last_line+:]expected_hash = hashlib.sha1(original_text).hexdigest().encode('')match = re.search(b\"\", last_line)if not match:return Falseactual_hash = match.group()return actual_hash == expected_hash", "docstring": "Check if the file still has its original contents.\n\nReturns True if the file is unchanged, False if it has been tampered\nwith.", "id": "f1722:c0:m2"} {"signature": "def is_integer( value ):", "body": "return isinstance( value, INTEGER_TYPES )", "docstring": "Check if value is a valid unsigned int, int, or long", "id": "f1726:m0"} {"signature": "def guess_array_memory_usage( bam_readers, dtype, use_strand=False ):", "body": "ARRAY_COUNT = if not isinstance( bam_readers, list ):bam_readers = [ bam_readers ]if isinstance( dtype, basestring ):dtype = NUMPY_DTYPES.get( dtype, None )use_strand = use_strand + dtypes = guess_numpy_dtypes_from_idxstats( bam_readers, default=None, force_dtype=False )if not [ dt for dt in dtypes if dt is not None ]:dtypes = guess_numpy_dtypes_from_idxstats( bam_readers, default=dtype or numpy.uint64, force_dtype=True )elif dtype:dtypes = [ dtype if dt else None for dt in dtypes ]read_groups = []no_read_group = Falsefor bam in bam_readers:rgs = bam.get_read_groups()if rgs:for rg in rgs:if rg not in read_groups:read_groups.append( rg )else:no_read_group = Trueread_groups = len( read_groups ) + no_read_groupmax_ref_size = array_byte_overhead = sys.getsizeof( numpy.zeros( ( ), dtype=numpy.uint64 ) )array_count = ARRAY_COUNT * use_strand * read_groupsfor bam in bam_readers:for i, ( name, length ) in enumerate( bam.get_references() ):if dtypes[i] is not None:max_ref_size = max( max_ref_size, ( length + length * dtypes[i]().nbytes * array_count + ( array_byte_overhead * ( array_count + ) ) ) )return max_ref_size", "docstring": "Returns an estimate for the maximum amount of memory to be consumed by numpy arrays.", "id": "f1726:m4"} {"signature": "def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,to_dir=os.curdir, delay=):", "body": "to_dir = os.path.abspath(to_dir)try:from urllib.request import urlopenexcept ImportError:from urllib2 import urlopentgz_name = \"\" % versionurl = download_base + tgz_namesaveto = os.path.join(to_dir, tgz_name)src = dst = Noneif not os.path.exists(saveto): try:log.warn(\"\", url)src = urlopen(url)data = src.read()dst = open(saveto, \"\")dst.write(data)finally:if src:src.close()if dst:dst.close()return os.path.realpath(saveto)", "docstring": "Download distribute from a specified location and return its filename\n\n `version` should be a valid distribute version number that is available\n as an egg for download under the `download_base` URL (which should end\n with a '/'). `to_dir` is the directory where the egg will be downloaded.\n `delay` is the number of seconds to pause before an actual download\n attempt.", "id": "f1732:m4"} {"signature": "def _patch_file(path, content):", "body": "f = open(path)existing_content = f.read()f.close()if existing_content == content:log.warn('')return Falselog.warn('')_rename_path(path)f = open(path, '')try:f.write(content)finally:f.close()return True", "docstring": "Will backup the file then patch it", "id": "f1732:m6"} {"signature": "def _extractall(self, path=\"\", members=None):", "body": "import copyimport operatorfrom tarfile import ExtractErrordirectories = []if members is None:members = selffor tarinfo in members:if tarinfo.isdir():directories.append(tarinfo)tarinfo = copy.copy(tarinfo)tarinfo.mode = self.extract(tarinfo, path)if sys.version_info < (, ):def sorter(dir1, dir2):return cmp(dir1.name, dir2.name)directories.sort(sorter)directories.reverse()else:directories.sort(key=operator.attrgetter(''), reverse=True)for tarinfo in directories:dirpath = os.path.join(path, tarinfo.name)try:self.chown(tarinfo, dirpath)self.utime(tarinfo, dirpath)self.chmod(tarinfo, dirpath)except ExtractError:e = sys.exc_info()[]if self.errorlevel > :raiseelse:self._dbg(, \"\" % e)", "docstring": "Extract all members from the archive to the current working\n directory and set owner, modification time and permissions on\n directories afterwards. `path' specifies a different directory\n to extract to. `members' is optional and must be a subset of the\n list returned by getmembers().", "id": "f1732:m17"} {"signature": "def _build_install_args(options):", "body": "install_args = []if options.user_install:if sys.version_info < (, ):log.warn(\"\")raise SystemExit()install_args.append('')return install_args", "docstring": "Build the arguments to 'python setup.py install' on the distribute package", "id": "f1732:m18"} {"signature": "def _parse_args():", "body": "parser = optparse.OptionParser()parser.add_option('', dest='', action='', default=False,help='')parser.add_option('', dest='', metavar=\"\",default=DEFAULT_URL,help='')options, args = parser.parse_args()return options", "docstring": "Parse the command line for options", "id": "f1732:m19"} {"signature": "def main(version=DEFAULT_VERSION):", "body": "options = _parse_args()tarball = download_setuptools(download_base=options.download_base)return _install(tarball, _build_install_args(options))", "docstring": "Install or upgrade setuptools and EasyInstall", "id": "f1732:m20"} {"signature": "def show():", "body": "with colorful.with_style('') as c:sys.stdout.write(c.bold('') + '')sys.stdout.write(c.dimmed('') + '')sys.stdout.write(c.italic('') + '')sys.stdout.write(c.underlined('') + '')sys.stdout.write(c.inversed('') + '')sys.stdout.write(c.concealed('') + '')sys.stdout.write(c.struckthrough('') + '')sys.stdout.write(c.orange('') + '')sys.stdout.write(c.magenta('') + '')sys.stdout.write(c.purple('') + '')sys.stdout.write(c.blue('') + '')sys.stdout.write(c.seaGreen('') + '')sys.stdout.write(c.green('') + '')sys.stdout.write(c.yellow('') + '')sys.stdout.write(c.on_orange('') + '')sys.stdout.write(c.on_magenta('') + '')sys.stdout.write(c.on_purple('') + '')sys.stdout.write(c.on_blue('') + '')sys.stdout.write(c.on_seaGreen('') + '')sys.stdout.write(c.gray_on_green('') + '')sys.stdout.write(c.gray_on_yellow('') + '')", "docstring": "Show the modifiers and colors", "id": "f1739:m0"} {"signature": "def show():", "body": "with colorful.with_style('') as c:sys.stdout.write(c.bold('') + '')sys.stdout.write(c.dimmed('') + '')sys.stdout.write(c.italic('') + '')sys.stdout.write(c.underlined('') + '')sys.stdout.write(c.inversed('') + '')sys.stdout.write(c.concealed('') + '')sys.stdout.write(c.struckthrough('') + '')sys.stdout.write(c.yellow('') + '')sys.stdout.write(c.red('') + '')sys.stdout.write(c.red('') + '')sys.stdout.write(c.magenta('') + '')sys.stdout.write(c.magenta('') + '')sys.stdout.write(c.blue('') + '')sys.stdout.write(c.cyan('') + '')sys.stdout.write(c.green('') + '')sys.stdout.write(c.on_yellow('') + '')sys.stdout.write(c.on_red('') + '')sys.stdout.write(c.on_red('') + '')sys.stdout.write(c.on_magenta('') + '')sys.stdout.write(c.on_magenta('') + '')sys.stdout.write(c.on_blue('') + '')sys.stdout.write(c.on_cyan('') + '')sys.stdout.write(c.on_green('') + '')", "docstring": "Show the modifiers and colors", "id": "f1740:m0"} {"signature": "def show():", "body": "sys.stdout.write(colorful.bold('') + '')sys.stdout.write(colorful.dimmed('') + '')sys.stdout.write(colorful.italic('') + '')sys.stdout.write(colorful.underlined('') + '')sys.stdout.write(colorful.inversed('') + '')sys.stdout.write(colorful.concealed('') + '')sys.stdout.write(colorful.struckthrough('') + '')sys.stdout.write(colorful.red('') + '')sys.stdout.write(colorful.green('') + '')sys.stdout.write(colorful.yellow('') + '')sys.stdout.write(colorful.blue('') + '')sys.stdout.write(colorful.magenta('') + '')sys.stdout.write(colorful.cyan('') + '')sys.stdout.write(colorful.white('') + '')sys.stdout.write(colorful.on_red('') + '')sys.stdout.write(colorful.on_green('') + '')sys.stdout.write(colorful.on_yellow('') + '')sys.stdout.write(colorful.on_blue('') + '')sys.stdout.write(colorful.on_magenta('') + '')sys.stdout.write(colorful.on_cyan('') + '')sys.stdout.write(colorful.on_white('') + '')", "docstring": "Show the modifiers and colors", "id": "f1741:m0"} {"signature": "def read_version():", "body": "finder = VersionFinder()path = os.path.join(PROJECT_ROOT, '', '')with codecs.open(path, '', encoding='') as fp:file_data = fp.read().encode('')finder.visit(ast.parse(file_data))return finder.version", "docstring": "Read version from __init__.py without loading any files", "id": "f1742:m0"} {"signature": "def detect_color_support(env): ", "body": "if env.get('', '') == '':return NO_COLORSif env.get('', '') == '':return ANSI_8_COLORSif env.get('', '') == '':return ANSI_16_COLORSif env.get('', '') == '':return ANSI_256_COLORSif env.get('', '') == '':return TRUE_COLORSif not sys.stdout.isatty():return NO_COLORScolorterm_env = env.get('')if colorterm_env:if colorterm_env in {'', ''}:return TRUE_COLORSif colorterm_env in {''}:return ANSI_256_COLORStermprog_env = env.get('')if termprog_env:if termprog_env in {'', ''}:return TRUE_COLORSif termprog_env in {''}:return ANSI_256_COLORSterm_env = env.get('')if term_env:if term_env in {'', '', '', ''}:return ANSI_256_COLORSif term_env in {'', '', '', '', '', '', ''}:return ANSI_16_COLORSif colorterm_env:return ANSI_16_COLORSreturn ANSI_8_COLORS", "docstring": "Detect what color palettes are supported.\nIt'll return a valid color mode to use\nwith colorful.\n\n:param dict env: the environment dict like returned by ``os.envion``", "id": "f1743:m0"} {"signature": "def rgb_to_ansi256(r, g, b):", "body": "if r == g and g == b:if r < :return if r > :return return round(((r - ) / ) * ) + ansi_r = * round(r / * )ansi_g = * round(g / * )ansi_b = round(b / * )ansi = + ansi_r + ansi_g + ansi_breturn ansi", "docstring": "Convert RGB to ANSI 256 color", "id": "f1745:m1"} {"signature": "def rgb_to_ansi16(r, g, b, use_bright=False):", "body": "ansi_b = round(b / ) << ansi_g = round(g / ) << ansi_r = round(r / )ansi = ( if use_bright else ) + (ansi_b | ansi_g | ansi_r)return ansi", "docstring": "Convert RGB to ANSI 16 color", "id": "f1745:m2"} {"signature": "@contextmanagerdef with_setup(self, colormode=None, colorpalette=None, extend_colors=False):", "body": "colorful = Colorful(colormode=self.colorful.colormode,colorpalette=copy.copy(self.colorful.colorpalette))colorful.setup(colormode=colormode, colorpalette=colorpalette, extend_colors=extend_colors)yield colorful", "docstring": "Return a new Colorful object with the given color config.", "id": "f1746:c0:m1"} {"signature": "def __getattr__(self, name):", "body": "orig_module = __name__ + ''if orig_module in sys.modules:try:return getattr(sys.modules[orig_module], name)except AttributeError:pass return getattr(self.colorful, name)", "docstring": "Dynamically get methods from Colorful object.", "id": "f1746:c0:m9"} {"signature": "def translate_rgb_to_ansi_code(red, green, blue, offset, colormode):", "body": "if colormode == terminal.NO_COLORS: return '', ''if colormode == terminal.ANSI_8_COLORS or colormode == terminal.ANSI_16_COLORS:color_code = ansi.rgb_to_ansi16(red, green, blue)start_code = ansi.ANSI_ESCAPE_CODE.format(code=color_code + offset - ansi.FOREGROUND_COLOR_OFFSET)end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)return start_code, end_codeif colormode == terminal.ANSI_256_COLORS:color_code = ansi.rgb_to_ansi256(red, green, blue)start_code = ansi.ANSI_ESCAPE_CODE.format(code=''.format(base= + offset, code=color_code))end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)return start_code, end_codeif colormode == terminal.TRUE_COLORS:start_code = ansi.ANSI_ESCAPE_CODE.format(code=''.format(base= + offset, red=red, green=green, blue=blue))end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)return start_code, end_coderaise ColorfulError(''.format(colormode))", "docstring": "Translate the given RGB color into the appropriate ANSI escape code\nfor the given color mode.\nThe offset is used for the base color which is used.\n\nThe ``colormode`` has to be one of:\n * 0: no colors / disabled\n * 8: use ANSI 8 colors\n * 16: use ANSI 16 colors (same as 8 but with brightness)\n * 256: use ANSI 256 colors\n * 0xFFFFFF / 16777215: use 16 Million true colors\n\n:param int red: the red channel value\n:param int green: the green channel value\n:param int blue: the blue channel value\n:param int offset: the offset to use for the base color\n:param int colormode: the color mode to use. See explanation above", "id": "f1747:m0"} {"signature": "def translate_colorname_to_ansi_code(colorname, offset, colormode, colorpalette):", "body": "try:red, green, blue = colorpalette[colorname]except KeyError:raise ColorfulError(''.format( colorname))else:return translate_rgb_to_ansi_code(red, green, blue, offset, colormode)", "docstring": "Translate the given color name to a valid\nANSI escape code.\n\n:parma str colorname: the name of the color to resolve\n:parma str offset: the offset for the color code\n:param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n:parma dict colorpalette: the color palette to use for the color name mapping\n\n:returns str: the color as ANSI escape code\n\n:raises ColorfulError: if the given color name is invalid", "id": "f1747:m1"} {"signature": "def resolve_modifier_to_ansi_code(modifiername, colormode):", "body": "if colormode == terminal.NO_COLORS: return '', ''try:start_code, end_code = ansi.MODIFIERS[modifiername]except KeyError:raise ColorfulError(''.format(modifiername, ansi.MODIFIERS.keys()))else:return ansi.ANSI_ESCAPE_CODE.format(code=start_code), ansi.ANSI_ESCAPE_CODE.format(code=end_code)", "docstring": "Resolve the given modifier name to a valid\nANSI escape code.\n\n:param str modifiername: the name of the modifier to resolve\n:param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n\n:returns str: the ANSI escape code for the modifier\n\n:raises ColorfulError: if the given modifier name is invalid", "id": "f1747:m2"} {"signature": "def translate_style(style, colormode, colorpalette):", "body": "style_parts = iter(style.split(''))ansi_start_sequence = []ansi_end_sequence = []try:part = Nonefor mod_part in style_parts:part = mod_partif part not in ansi.MODIFIERS:break mod_start_code, mod_end_code = resolve_modifier_to_ansi_code(part, colormode)ansi_start_sequence.append(mod_start_code)ansi_end_sequence.append(mod_end_code)else: raise StopIteration()if part != '':ansi_start_code, ansi_end_code = translate_colorname_to_ansi_code(part, ansi.FOREGROUND_COLOR_OFFSET, colormode, colorpalette)ansi_start_sequence.append(ansi_start_code)ansi_end_sequence.append(ansi_end_code)next(style_parts)part = next(style_parts)ansi_start_code, ansi_end_code = translate_colorname_to_ansi_code(part, ansi.BACKGROUND_COLOR_OFFSET, colormode, colorpalette)ansi_start_sequence.append(ansi_start_code)ansi_end_sequence.append(ansi_end_code)except StopIteration: passreturn ''.join(ansi_start_sequence), ''.join(ansi_end_sequence)", "docstring": "Translate the given style to an ANSI escape code\nsequence.\n\n``style`` examples are:\n\n* green\n* bold\n* red_on_black\n* bold_green\n* italic_yellow_on_cyan\n\n:param str style: the style to translate\n:param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n:parma dict colorpalette: the color palette to use for the color name mapping", "id": "f1747:m3"} {"signature": "def style_string(string, ansi_style, colormode, nested=False):", "body": "ansi_start_code, ansi_end_code = ansi_styleif PY2:if isinstance(string, str):string = string.decode(DEFAULT_ENCODING)string = UNICODE(string).replace(ansi.NEST_PLACEHOLDER, ansi_start_code)return ''.format(start_code=ansi_start_code,string=string,end_code=ansi_end_code,nest_ph=ansi.NEST_PLACEHOLDER if nested else '')", "docstring": "Style the given string according to the given\nANSI style string.\n\n:param str string: the string to style\n:param tuple ansi_style: the styling string returned by ``translate_style``\n:param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n\n:returns: a string containing proper ANSI sequence", "id": "f1747:m4"} {"signature": "@propertydef colorpalette(self):", "body": "return self._colorpalette", "docstring": "Get the current used color palette", "id": "f1747:c2:m1"} {"signature": "@colorpalette.setterdef colorpalette(self, colorpalette):", "body": "if isinstance(colorpalette, str): colorpalette = colors.parse_colors(colorpalette)self._colorpalette = colors.sanitize_color_palette(colorpalette)", "docstring": "Set the colorpalette which should be used", "id": "f1747:c2:m2"} {"signature": "def setup(self, colormode=None, colorpalette=None, extend_colors=False):", "body": "if colormode:self.colormode = colormodeif colorpalette:if extend_colors:self.update_palette(colorpalette)else:self.colorpalette = colorpalette", "docstring": "Setup this colorful object by setting a ``colormode`` and\nthe ``colorpalette`. The ``extend_colors`` flag is used\nto extend the currently active color palette instead of\nreplacing it.\n\n:param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n:parma dict colorpalette: the colorpalette to use. This ``dict`` should map\n color names to it's corresponding RGB value\n:param bool extend_colors: extend the active color palette instead of replacing it", "id": "f1747:c2:m3"} {"signature": "def disable(self):", "body": "self.colormode = terminal.NO_COLORS", "docstring": "Disable all colors and styles", "id": "f1747:c2:m4"} {"signature": "def use_8_ansi_colors(self):", "body": "self.colormode = terminal.ANSI_8_COLORS", "docstring": "Use 8 ANSI colors for this colorful object", "id": "f1747:c2:m5"} {"signature": "def use_16_ansi_colors(self):", "body": "self.colormode = terminal.ANSI_16_COLORS", "docstring": "Use 16 ANSI colors for this colorful object", "id": "f1747:c2:m6"} {"signature": "def use_256_ansi_colors(self):", "body": "self.colormode = terminal.ANSI_256_COLORS", "docstring": "Use 256 ANSI colors for this colorful object", "id": "f1747:c2:m7"} {"signature": "def use_true_colors(self):", "body": "self.colormode = terminal.TRUE_COLORS", "docstring": "Use true colors for this colorful object", "id": "f1747:c2:m8"} {"signature": "def use_palette(self, colorpalette):", "body": "self.colorpalette = colorpalette", "docstring": "Use the given color palette", "id": "f1747:c2:m9"} {"signature": "def update_palette(self, colorpalette):", "body": "self.colorpalette.update(colors.sanitize_color_palette(colorpalette))", "docstring": "Update the currently active color palette\nwith the given color palette", "id": "f1747:c2:m10"} {"signature": "def use_style(self, style_name):", "body": "try:style = getattr(styles, style_name.upper())except AttributeError:raise ColorfulError(''.format(style_name))else:self.colorpalette = style", "docstring": "Use a predefined style as color palette\n\n:param str style_name: the name of the style", "id": "f1747:c2:m11"} {"signature": "def format(self, string, *args, **kwargs):", "body": "return string.format(c=self, *args, **kwargs)", "docstring": "Format the given string with the given ``args`` and ``kwargs``.\nThe string can contain references to ``c`` which is provided by\nthis colorful object.\n\n:param str string: the string to format", "id": "f1747:c2:m12"} {"signature": "def str(self, string):", "body": "return ColorfulString(string, string)", "docstring": "Create a new ColorfulString instance of the given\nunstyled string.\n\nThis method should be used to create a ColorfulString\nwhich is actually not styled yet but can safely be concatinated\nwith other ColorfulStrings like:\n\n>>> s = colorful.str('Hello ')\n>>> s =+ colorful.black('World')\n>>> str(s)\n'Hello \\033[30mWorld\\033[39m'\n\n:param str string: the string to use for the ColorfulString", "id": "f1747:c2:m13"} {"signature": "def print(self, *objects, **options):", "body": "allowed_options = {'', '', '', ''}given_options = set(options.keys())if not given_options.issubset(allowed_options):raise TypeError(''.format(''.join(given_options.difference(allowed_options))))sep = options.get('', '')end = options.get('', '')file = options.get('', sys.stdout)flush = options.get('', False)styled_objects = [self.format(o) for o in objects]print(*styled_objects, sep=sep, end=end, file=file)if flush:file.flush()", "docstring": "Print the given objects to the given file stream.\nSee https://docs.python.org/3/library/functions.html#print\n\nThe only difference to the ``print()`` built-in is that\n``Colorful.print()`` formats the string with ``c=self``.\nWith that stylings are possible\n\n:param str sep: the seperater between the objects\n:param str end: the ending delimiter after all objects\n:param file: the file stream to write to\n:param bool flush: if the stream should be flushed", "id": "f1747:c2:m14"} {"signature": "def hex_to_rgb(value):", "body": "value = value.lstrip('')check_hex(value)length = len(value)step = int(length / )return tuple(int(value[i:i+step], ) for i in range(, length, step))", "docstring": "Convert the given hex string to a\nvalid RGB channel triplet.", "id": "f1748:m0"} {"signature": "def check_hex(value):", "body": "length = len(value)if length not in (, ):raise ValueError(''.format(value))regex = r''.format(length=length)if not re.search(regex, value, re.I):raise ValueError(''.format(value))", "docstring": "Check if the given hex value is a valid RGB color\n\nIt should match the format: [0-9a-fA-F]\nand be of length 3 or 6.", "id": "f1748:m1"} {"signature": "def parse_colors(path):", "body": "if path.endswith(\"\"):return parse_rgb_txt_file(path)elif path.endswith(\"\"):return parse_json_color_file(path)raise TypeError(\"\")", "docstring": "Parse the given color files.\n\n Supported are:\n * .txt for X11 colors\n * .json for colornames", "id": "f1749:m0"} {"signature": "def parse_rgb_txt_file(path):", "body": "color_dict = {}with open(path, '') as rgb_txt:for line in rgb_txt:line = line.strip()if not line or line.startswith(''):continue parts = line.split()color_dict[\"\".join(parts[:])] = (int(parts[]), int(parts[]), int(parts[]))return color_dict", "docstring": "Parse the given rgb.txt file into a Python dict.\n\nSee https://en.wikipedia.org/wiki/X11_color_names for more information\n\n:param str path: the path to the X11 rgb.txt file", "id": "f1749:m1"} {"signature": "def parse_json_color_file(path):", "body": "with open(path, \"\") as color_file:color_list = json.load(color_file)color_dict = {c[\"\"]: c[\"\"] for c in color_list}return color_dict", "docstring": "Parse a JSON color file.\n\n The JSON has to be in the following format:\n\n .. code:: json\n\n [{\"name\": \"COLOR_NAME\", \"hex\": \"#HEX\"}, ...]\n\n :param str path: the path to the JSON color file", "id": "f1749:m2"} {"signature": "def sanitize_color_palette(colorpalette):", "body": "new_palette = {}def __make_valid_color_name(name):\"\"\"\"\"\"if len(name) == :name = name[]return name[:].lower() + name[:]return name[].lower() + ''.join(word.capitalize() for word in name[:])for key, value in colorpalette.items():if isinstance(value, str):value = utils.hex_to_rgb(value)new_palette[__make_valid_color_name(key.split())] = valuereturn new_palette", "docstring": "Sanitze the given color palette so it can\nbe safely used by Colorful.\n\nIt will convert colors specified in hex RGB to\na RGB channel triplet.", "id": "f1749:m3"} {"signature": "def join_phonemes(*args):", "body": "if len(args) == :args = args[]if len(args) == :args += (CODAS[],)try:onset, nucleus, coda = argsexcept ValueError:raise TypeError('')offset = ((ONSETS.index(onset) * NUM_NUCLEUSES + NUCLEUSES.index(nucleus)) *NUM_CODAS + CODAS.index(coda))return unichr(FIRST_HANGUL_OFFSET + offset)", "docstring": "Joins a Hangul letter from Korean phonemes.", "id": "f1751:m2"} {"signature": "def split_phonemes(letter, onset=True, nucleus=True, coda=True):", "body": "if len(letter) != or not is_hangul(letter):raise ValueError('' % letter)offset = ord(letter) - FIRST_HANGUL_OFFSETphonemes = [None] * if onset:phonemes[] = ONSETS[offset // (NUM_NUCLEUSES * NUM_CODAS)]if nucleus:phonemes[] = NUCLEUSES[(offset // NUM_CODAS) % NUM_NUCLEUSES]if coda:phonemes[] = CODAS[offset % NUM_CODAS]return tuple(phonemes)", "docstring": "Splits Korean phonemes as known as \"\uc790\uc18c\" from a Hangul letter.\n\n :returns: (onset, nucleus, coda)\n :raises ValueError: `letter` is not a Hangul single letter.", "id": "f1751:m3"} {"signature": "def combine_words(word1, word2):", "body": "if word1 and word2 and is_consonant(word2[]):onset, nucleus, coda = split_phonemes(word1[-])if not coda:glue = join_phonemes(onset, nucleus, word2[])return word1[:-] + glue + word2[:]return word1 + word2", "docstring": "Combines two words. If the first word ends with a vowel and the initial\n letter of the second word is only consonant, it merges them into one\n letter::\n\n >>> combine_words(u'\ub2e4', u'\u313a')\n \ub2ed\n >>> combine_words(u'\uac00\uc624', u'\u3134\ub204\ub9ac')\n \uac00\uc628\ub204\ub9ac", "id": "f1751:m4"} {"signature": "def index_particles(particles):", "body": "patterns, indices = [], {}for x, p in enumerate(particles):group = u'' % xindices[group] = xpatterns.append(u'' % (group, p.regex_pattern()))pattern = re.compile(u''.join(patterns))return pattern, indices", "docstring": "Indexes :class:`Particle` objects. It returns a regex pattern which\n matches to any particle morphs and a dictionary indexes the given particles\n by regex groups.", "id": "f1753:m0"} {"signature": "def parse(morph):", "body": "return registry.parse(morph)", "docstring": "Shortcut for :class:`ParticleRegistry.parse` of the default registry.", "id": "f1753:m1"} {"signature": "def pick(word, morph, **kwargs):", "body": "return registry.pick(word, morph, **kwargs)", "docstring": "Shortcut for :class:`ParticleRegistry.pick` of the default registry.", "id": "f1753:m2"} {"signature": "def postfix(word, morph, **kwargs):", "body": "return registry.postfix(word, morph, **kwargs)", "docstring": "Shortcut for :class:`ParticleRegistry.postfix` of the default registry.", "id": "f1753:m3"} {"signature": "def format(message, *args, **kwargs):", "body": "return formatter.vformat(message, args, kwargs)", "docstring": "Shortcut for :class:`tossi.Formatter.format` of the default registry.", "id": "f1753:m6"} {"signature": "def generate_tolerances(morph1, morph2):", "body": "if morph1 == morph2:returnif not (morph1 and morph2):yield u'' % (morph1 or morph2)returnlen1, len2 = len(morph1), len(morph2)if len1 != len2:longer, shorter = (morph1, morph2) if len1 > len2 else (morph2, morph1)if longer.endswith(shorter):yield u'' % (longer[:-len(shorter)], shorter)returnfor x, (let1, let2) in enumerate(zip(reversed(morph1), reversed(morph2))):if let1 != let2:breakif x:x1, x2 = len(morph1) - x, len(morph2) - xcommon_suffix = morph1[x1:]morph1, morph2 = morph1[:x1], morph2[:x2]else:common_suffix = ''for morph1, morph2 in [(morph1, morph2), (morph2, morph1)]:yield u'' % (morph1, morph2, common_suffix)yield u'' % (morph1, morph2, common_suffix)", "docstring": "Generates all reasonable tolerant particle morphs::\n\n >>> set(generate_tolerances(u'\uc774', u'\uac00'))\n set([u'\uc774(\uac00)', u'(\uc774)\uac00', u'\uac00(\uc774)', u'(\uac00)\uc774'])\n >>> set(generate_tolerances(u'\uc774\uba74', u'\uba74'))\n set([u'(\uc774)\uba74'])", "id": "f1754:m0"} {"signature": "def parse_tolerance_style(style, registry=None):", "body": "if isinstance(style, integer_types):return styleif registry is None:from . import registryparticle = registry.parse(style)if len(particle.tolerances) != :raise ValueError('')return particle.tolerances.index(style)", "docstring": "Resolves a tolerance style of the given tolerant particle morph::\n\n >>> parse_tolerance_style(u'\uc740(\ub294)')\n 0\n >>> parse_tolerance_style(u'(\uc740)\ub294')\n 1\n >>> parse_tolerance_style(OPTIONAL_MORPH2_AND_MORPH1)\n 3", "id": "f1754:m1"} {"signature": "def guess_coda(word):", "body": "word = filter_only_significant(word)return guess_coda_from_significant_word(word)", "docstring": "Guesses the coda of the given word as correct as possible. If it fails\n to guess the coda, returns ``None``.", "id": "f1755:m0"} {"signature": "def filter_only_significant(word):", "body": "if not word:return wordif word.startswith(u'') and word.endswith(u''):return filter_only_significant(word[:-])x = len(word)while x > :x -= c = word[x]if c == u'':m = INSIGNIFICANT_PARENTHESIS_PATTERN.search(word[:x + ])if m is not None:x = m.start()continueunicode_category = unicodedata.category(c)if not SIGNIFICANT_UNICODE_CATEGORY_PATTERN.match(unicode_category):continuebreakreturn word[:x + ]", "docstring": "Gets a word which removes insignificant letters at the end of the given\n word::\n\n >>> pick_significant(u'\ub125\uc2a8(\ucf54\ub9ac\uc544)')\n \ub125\uc2a8\n >>> pick_significant(u'\uba54\uc774\ud50c\uc2a4\ud1a0\ub9ac...')\n \uba54\uc774\ud50c\uc2a4\ud1a0\ub9ac", "id": "f1755:m2"} {"signature": "def pick_coda_from_letter(letter):", "body": "try:__, __, coda =split_phonemes(letter, onset=False, nucleus=False, coda=True)except ValueError:return Noneelse:return coda", "docstring": "Picks only a coda from a Hangul letter. It returns ``None`` if the\n given letter is not Hangul.", "id": "f1755:m3"} {"signature": "def pick_coda_from_decimal(decimal):", "body": "decimal = Decimal(decimal)__, digits, exp = decimal.as_tuple()if exp < :return DIGIT_CODAS[digits[-]]__, digits, exp = decimal.normalize().as_tuple()index = bisect_right(EXP_INDICES, exp) - if index < :return DIGIT_CODAS[digits[-]]else:return EXP_CODAS[EXP_INDICES[index]]", "docstring": "Picks only a coda from a decimal.", "id": "f1755:m4"} {"signature": "def cached_property(f):", "body": "@property@functools.wraps(f)def wrapped(self, name=f.__name__):try:cache = self.__cache__except AttributeError:self.__cache__ = cache = {}try:return cache[name]except KeyError:cache[name] = rv = f(self)return rvreturn wrapped", "docstring": "Similar to `@property` but it calls the function just once and caches\n the result. The object has to can have ``__cache__`` attribute.\n\n If you define `__slots__` for optimization, the metaclass should be a\n :class:`CacheMeta`.", "id": "f1756:m0"} {"signature": "def singleton_particle(*bases):", "body": "return with_metaclass(SingletonParticleMeta, SingletonParticle, *bases)", "docstring": "Defines a singleton instance immediately when defining the class. The\n name of the class will refer the instance instead.", "id": "f1757:m0"} {"signature": "@cached_propertydef tolerances(self):", "body": "return tuple(generate_tolerances(self.morph1, self.morph2))", "docstring": "The tuple containing all the possible tolerant morphs.", "id": "f1757:c0:m1"} {"signature": "def tolerance(self, style=DEFAULT_TOLERANCE_STYLE):", "body": "return get_tolerance(self.tolerances, style)", "docstring": "Gets a tolerant morph.", "id": "f1757:c0:m2"} {"signature": "def rule(self, coda):", "body": "if coda:return self.morph1else:return self.morph2", "docstring": "Determines one of allomorphic morphs based on a coda.", "id": "f1757:c0:m3"} {"signature": "def allomorph(self, word, morph, tolerance_style=DEFAULT_TOLERANCE_STYLE,guess_coda=DEFAULT_GUESS_CODA):", "body": "suffix = self.match(morph)if suffix is None:return Nonecoda = guess_coda(word)if coda is not None:morph = self.rule(coda)elif isinstance(tolerance_style, text_type):morph = tolerance_styleelif not suffix or not is_consonant(suffix[]):morph = self.tolerance(tolerance_style)else:morph1 = (combine_words(self.morph1, suffix)if self.morph1 else suffix[:])morph2 = (combine_words(self.morph2, suffix)if self.morph2 else suffix[:])tolerances = generate_tolerances(morph1, morph2)return get_tolerance_from_iterator(tolerances, tolerance_style)return combine_words(morph, suffix)", "docstring": "Determines one of allomorphic morphs based on a word.\n\n .. see also:: :meth:`allomorph`.", "id": "f1757:c0:m4"} {"signature": "def __getitem__(self, key):", "body": "if isinstance(key, slice):word = key.startmorph = key.stop or self.morph1tolerance_style = key.step or DEFAULT_TOLERANCE_STYLEelse:word, morph = key, self.morph1tolerance_style = DEFAULT_TOLERANCE_STYLEreturn self.allomorph(word, morph, tolerance_style)", "docstring": "The syntax sugar to determine one of allomorphic morphs based on a\n word::\n\n eun = Particle(u'\uc740', u'\ub294')\n assert eun[u'\ub098\uc624'] == u'\ub294'\n assert eun[u'\ubaa8\ub9ac\uc548'] == u'\uc740'", "id": "f1757:c0:m5"} {"signature": "@cached_propertydef morphs(self):", "body": "seen = set()saw = seen.addmorphs = chain([self.morph1, self.morph2], self.tolerances)unique_morphs = (x for x in morphs if x and not (x in seen or saw(x)))return tuple(sorted(unique_morphs, key=len, reverse=True))", "docstring": "The tuple containing the given morphs and all the possible tolerant\n morphs. Longer is first.", "id": "f1757:c0:m7"} {"signature": "def set_signature_passphrases(self, signature_passphrases):", "body": "self.signature_passphrases = self._update_dict(signature_passphrases,{}, replace_data=True)", "docstring": "Set signature passphrases", "id": "f1760:c0:m1"} {"signature": "def get_signature_passphrases(self):", "body": "return self.signature_passphrases", "docstring": "Get signature passphrases", "id": "f1760:c0:m2"} {"signature": "def set_encryption_passphrases(self, encryption_passphrases):", "body": "self.encryption_passphrases = self._update_dict(encryption_passphrases,{}, replace_data=True)", "docstring": "Set encryption passphrases", "id": "f1760:c0:m3"} {"signature": "def get_encryption_passphrases(self):", "body": "return self.encryption_passphrases", "docstring": "Get encryption passphrases", "id": "f1760:c0:m4"} {"signature": "def set_algorithms(self, signature=None, encryption=None,serialization=None, compression=None):", "body": "self.signature_algorithms =self._update_dict(signature, self.DEFAULT_SIGNATURE)self.encryption_algorithms =self._update_dict(encryption, self.DEFAULT_ENCRYPTION)self.serialization_algorithms =self._update_dict(serialization, self.DEFAULT_SERIALIZATION)self.compression_algorithms =self._update_dict(compression, self.DEFAULT_COMPRESSION)", "docstring": "Set algorithms used for sealing. Defaults can not be overridden.", "id": "f1760:c0:m5"} {"signature": "def get_algorithms(self):", "body": "return {'': self.signature_algorithms,'': self.encryption_algorithms,'': self.serialization_algorithms,'': self.compression_algorithms,}", "docstring": "Get algorithms used for sealing", "id": "f1760:c0:m6"} {"signature": "def set_options(self, options):", "body": "self.options = self._set_options(options)", "docstring": "Set options used for sealing", "id": "f1760:c0:m7"} {"signature": "def _set_options(self, options):", "body": "if not options:return self.options.copy()options = options.copy()if '' in options:self.set_magic(options[''])del(options[''])if '' in options:flags = options['']del(options[''])for key, value in flags.iteritems():if not isinstance(value, bool):raise TypeError('' % key)else:flags = self.options['']if '' in options:del(options[''])for key, value in options.iteritems():if not isinstance(value, int):raise TypeError('' % key)if value < or value > :raise ValueError('' % key)new_options = self.options.copy()new_options.update(options)new_options[''].update(flags)return new_options", "docstring": "Private function for setting options used for sealing", "id": "f1760:c0:m8"} {"signature": "def get_options(self):", "body": "return self.options", "docstring": "Get options used for sealing", "id": "f1760:c0:m9"} {"signature": "def set_magic(self, magic):", "body": "if magic is None or isinstance(magic, str):self.magic = magicelse:raise TypeError('')", "docstring": "Set magic (prefix)", "id": "f1760:c0:m10"} {"signature": "def get_magic(self):", "body": "return self.magic", "docstring": "Get magic (prefix)", "id": "f1760:c0:m11"} {"signature": "def seal(self, data, options=None):", "body": "options = self._set_options(options)data = self._serialize_data(data, options)data = self._compress_data(data, options)data = self._encrypt_data(data, options)data = self._add_header(data, options)data = self._add_magic(data)data = self._sign_data(data, options)data = self._remove_magic(data)data = urlsafe_nopadding_b64encode(data)data = self._add_magic(data)return data", "docstring": "Seal data", "id": "f1760:c0:m12"} {"signature": "def unseal(self, data, return_options=False):", "body": "data = self._remove_magic(data)data = urlsafe_nopadding_b64decode(data)options = self._read_header(data)data = self._add_magic(data)data = self._unsign_data(data, options)data = self._remove_magic(data)data = self._remove_header(data, options)data = self._decrypt_data(data, options)data = self._decompress_data(data, options)data = self._unserialize_data(data, options)if return_options:return data, optionselse:return data", "docstring": "Unseal data", "id": "f1760:c0:m13"} {"signature": "def verify_signature(self, data):", "body": "data = self._remove_magic(data)data = urlsafe_nopadding_b64decode(data)options = self._read_header(data)data = self._add_magic(data)self._unsign_data(data, options)", "docstring": "Verify sealed data signature", "id": "f1760:c0:m14"} {"signature": "def get_data_options(self, data, verify_signature=True):", "body": "data = self._remove_magic(data)data = urlsafe_nopadding_b64decode(data)options = self._read_header(data)data = self._add_magic(data)if verify_signature:data = self._unsign_data(data, options)return options", "docstring": "Get sealed data options", "id": "f1760:c0:m15"} {"signature": "def _encode(self, data, algorithm, key=None):", "body": "if algorithm[''] == '':return data + self._hmac_generate(data, algorithm, key)elif algorithm[''] == '':return self._aes_encrypt(data, algorithm, key)elif algorithm[''] == '':return dataelif algorithm[''] == '':return json.dumps(data)elif algorithm[''] == '':return dataelif algorithm[''] == '':return self._zlib_compress(data, algorithm)else:raise Exception('' % algorithm[''])", "docstring": "Encode data with specific algorithm", "id": "f1760:c0:m16"} {"signature": "def _decode(self, data, algorithm, key=None):", "body": "if algorithm[''] == '':verify_signature = data[-algorithm['']:]data = data[:-algorithm['']]signature = self._hmac_generate(data, algorithm, key)if not const_equal(verify_signature, signature):raise Exception('')return dataelif algorithm[''] == '':return self._aes_decrypt(data, algorithm, key)elif algorithm[''] == '':return dataelif algorithm[''] == '':return json.loads(data)elif algorithm[''] == '':return dataelif algorithm[''] == '':return self._zlib_decompress(data, algorithm)else:raise Exception('' % algorithm[''])", "docstring": "Decode data with specific algorithm", "id": "f1760:c0:m17"} {"signature": "def _sign_data(self, data, options):", "body": "if options[''] not in self.signature_algorithms:raise Exception(''% options[''])signature_algorithm =self.signature_algorithms[options['']]algorithm = self._get_algorithm_info(signature_algorithm)key_salt = get_random_bytes(algorithm[''])key = self._generate_key(options[''],self.signature_passphrases, key_salt, algorithm)data = self._encode(data, algorithm, key)return data + key_salt", "docstring": "Add signature to data", "id": "f1760:c0:m18"} {"signature": "def _unsign_data(self, data, options):", "body": "if options[''] not in self.signature_algorithms:raise Exception(''% options[''])signature_algorithm =self.signature_algorithms[options['']]algorithm = self._get_algorithm_info(signature_algorithm)key_salt = ''if algorithm['']:key_salt = data[-algorithm['']:]data = data[:-algorithm['']]key = self._generate_key(options[''],self.signature_passphrases, key_salt, algorithm)data = self._decode(data, algorithm, key)return data", "docstring": "Verify and remove signature", "id": "f1760:c0:m19"} {"signature": "def _encrypt_data(self, data, options):", "body": "if options[''] not in self.encryption_algorithms:raise Exception(''% options[''])encryption_algorithm =self.encryption_algorithms[options['']]algorithm = self._get_algorithm_info(encryption_algorithm)key_salt = get_random_bytes(algorithm[''])key = self._generate_key(options[''],self.encryption_passphrases, key_salt, algorithm)data = self._encode(data, algorithm, key)return data + key_salt", "docstring": "Encrypt data", "id": "f1760:c0:m20"} {"signature": "def _decrypt_data(self, data, options):", "body": "if options[''] not in self.encryption_algorithms:raise Exception(''% options[''])encryption_algorithm =self.encryption_algorithms[options['']]algorithm = self._get_algorithm_info(encryption_algorithm)key_salt = ''if algorithm['']:key_salt = data[-algorithm['']:]data = data[:-algorithm['']]key = self._generate_key(options[''],self.encryption_passphrases, key_salt, algorithm)data = self._decode(data, algorithm, key)return data", "docstring": "Decrypt data", "id": "f1760:c0:m21"} {"signature": "def _serialize_data(self, data, options):", "body": "serialization_algorithm_id = options['']if serialization_algorithm_id not in self.serialization_algorithms:raise Exception(''% serialization_algorithm_id)serialization_algorithm =self.serialization_algorithms[serialization_algorithm_id]algorithm = self._get_algorithm_info(serialization_algorithm)data = self._encode(data, algorithm)return data", "docstring": "Serialize data", "id": "f1760:c0:m22"} {"signature": "def _unserialize_data(self, data, options):", "body": "serialization_algorithm_id = options['']if serialization_algorithm_id not in self.serialization_algorithms:raise Exception(''% serialization_algorithm_id)serialization_algorithm =self.serialization_algorithms[serialization_algorithm_id]algorithm = self._get_algorithm_info(serialization_algorithm)data = self._decode(data, algorithm)return data", "docstring": "Unserialize data", "id": "f1760:c0:m23"} {"signature": "def _compress_data(self, data, options):", "body": "compression_algorithm_id = options['']if compression_algorithm_id not in self.compression_algorithms:raise Exception(''% compression_algorithm_id)compression_algorithm =self.compression_algorithms[compression_algorithm_id]algorithm = self._get_algorithm_info(compression_algorithm)compressed = self._encode(data, algorithm)if len(compressed) < len(data):data = compressedelse:options[''] = return data", "docstring": "Compress data", "id": "f1760:c0:m24"} {"signature": "def _decompress_data(self, data, options):", "body": "compression_algorithm_id = options['']if compression_algorithm_id not in self.compression_algorithms:raise Exception(''% compression_algorithm_id)compression_algorithm =self.compression_algorithms[compression_algorithm_id]algorithm = self._get_algorithm_info(compression_algorithm)data = self._decode(data, algorithm)return data", "docstring": "Decompress data", "id": "f1760:c0:m25"} {"signature": "def _remove_magic(self, data):", "body": "if not self.magic:return datamagic_size = len(self.magic)magic = data[:magic_size]if magic != self.magic:raise Exception('')data = data[magic_size:]return data", "docstring": "Verify and remove magic", "id": "f1760:c0:m26"} {"signature": "def _add_magic(self, data):", "body": "if self.magic:return self.magic + datareturn data", "docstring": "Add magic", "id": "f1760:c0:m27"} {"signature": "def _add_header(self, data, options):", "body": "version_info = self._get_version_info(options[''])flags = options['']header_flags = dict((i, str(int(j))) for i, j in options[''].iteritems())header_flags = ''.join(version_info[''](**header_flags))header_flags = int(header_flags, )options[''] = header_flagsheader = version_info['']header = header(**options)header = pack(version_info[''], *header)if '' in flags and flags['']:timestamp = long(time())timestamp = pack(version_info[''], timestamp)header = header + timestampreturn header + data", "docstring": "Add header to data", "id": "f1760:c0:m28"} {"signature": "def _read_header(self, data):", "body": "version = self._read_version(data)version_info = self._get_version_info(version)header_data = data[:version_info['']]header = version_info['']header = header._make(unpack(version_info[''], header_data))header = dict(header._asdict())flags = list(\"\".format(header['']))flags = dict(version_info['']._make(flags)._asdict())flags = dict((i, bool(int(j))) for i, j in flags.iteritems())header[''] = flagstimestamp = Noneif flags['']:ts_start = version_info['']ts_end = ts_start + version_info['']timestamp_data = data[ts_start:ts_end]timestamp = unpack(version_info[''], timestamp_data)[]header[''] = {'': timestamp}return header", "docstring": "Read header from data", "id": "f1760:c0:m29"} {"signature": "def _remove_header(self, data, options):", "body": "version_info = self._get_version_info(options[''])header_size = version_info['']if options['']['']:header_size += version_info['']data = data[header_size:]return data", "docstring": "Remove header from data", "id": "f1760:c0:m30"} {"signature": "def _read_version(self, data):", "body": "version = ord(data[])if version not in self.VERSIONS:raise Exception('' % version)return version", "docstring": "Read header version from data", "id": "f1760:c0:m31"} {"signature": "def _get_version_info(self, version):", "body": "return self.VERSIONS[version]", "docstring": "Get version info", "id": "f1760:c0:m32"} {"signature": "def _get_algorithm_info(self, algorithm_info):", "body": "if algorithm_info[''] not in self.ALGORITHMS:raise Exception(''% algorithm_info[''])algorithm = self.ALGORITHMS[algorithm_info['']]algorithm_info.update(algorithm)return algorithm_info", "docstring": "Get algorithm info", "id": "f1760:c0:m33"} {"signature": "@staticmethoddef _generate_key(pass_id, passphrases, salt, algorithm):", "body": "if pass_id not in passphrases:raise Exception('' % pass_id)passphrase = passphrases[pass_id]if len(passphrase) < :raise Exception('')digestmod = EncryptedPickle._get_hashlib(algorithm[''])encoder = PBKDF2(passphrase, salt,iterations=algorithm[''],digestmodule=digestmod)return encoder.read(algorithm[''])", "docstring": "Generate and return PBKDF2 key", "id": "f1760:c0:m34"} {"signature": "@staticmethoddef _update_dict(data, default_data, replace_data=False):", "body": "if not data:data = default_data.copy()return dataif not isinstance(data, dict):raise TypeError('')if len(data) > :raise ValueError('')for i in data.keys():if not isinstance(i, int):raise TypeError('')if i < or i > :raise ValueError('')if not replace_data:data.update(default_data)return data", "docstring": "Update algorithm definition type dictionaries", "id": "f1760:c0:m35"} {"signature": "@staticmethoddef _get_hashlib(digestmode):", "body": "if digestmode == '':return SHAif digestmode == '':return SHA256elif digestmode == '':return SHA384elif digestmode == '':return SHA512else:raise Exception(''% digestmode)", "docstring": "Generate HMAC hash", "id": "f1760:c0:m36"} {"signature": "@staticmethoddef _hmac_generate(data, algorithm, key):", "body": "digestmod = EncryptedPickle._get_hashlib(algorithm[''])return HMAC.new(key, data, digestmod).digest()", "docstring": "Generate HMAC hash", "id": "f1760:c0:m37"} {"signature": "@staticmethoddef _aes_encrypt(data, algorithm, key):", "body": "if algorithm[''] == '':mode = AES.MODE_CBCelse:raise Exception(''% algorithm[''])iv_size = algorithm['']block_size = iv_sizeinclude_iv = Trueif ''in algorithm and algorithm['']:if len(algorithm['']) != algorithm['']:raise Exception('')iv_value = algorithm['']include_iv = Falseelse:iv_value = get_random_bytes(iv_size)numpad = block_size - (len(data) % block_size)data = data + numpad * chr(numpad)enc = AES.new(key, mode, iv_value).encrypt(data)if include_iv:enc = iv_value + encreturn enc", "docstring": "AES encrypt", "id": "f1760:c0:m38"} {"signature": "@staticmethoddef _aes_decrypt(data, algorithm, key):", "body": "if algorithm[''] == '':mode = AES.MODE_CBCelse:raise Exception(''% algorithm[''])iv_size = algorithm['']if '' in algorithm and algorithm['']:if len(algorithm['']) != algorithm['']:raise Exception('')iv_value = algorithm['']enc = dataelse:iv_value = data[:iv_size]enc = data[iv_size:]dec = AES.new(key, mode, iv_value).decrypt(enc)numpad = ord(dec[-])dec = dec[:-numpad]return dec", "docstring": "AES decrypt", "id": "f1760:c0:m39"} {"signature": "@staticmethoddef _zlib_compress(data, algorithm):", "body": "if algorithm[''] == '':encoder = zlib.compressobj(algorithm[''], zlib.DEFLATED, -)compressed = encoder.compress(data)compressed += encoder.flush()return compressedelse:raise Exception(''% algorithm[''])", "docstring": "GZIP compress", "id": "f1760:c0:m40"} {"signature": "@staticmethoddef _zlib_decompress(data, algorithm):", "body": "if algorithm[''] == '':return zlib.decompress(data, -)else:raise Exception(''% algorithm[''])", "docstring": "GZIP decompress", "id": "f1760:c0:m41"} {"signature": "def urlsafe_nopadding_b64encode(data):", "body": "return urlsafe_b64encode(data).rstrip('')", "docstring": "URL safe Base64 encode without padding (=)", "id": "f1762:m0"} {"signature": "def urlsafe_nopadding_b64decode(data):", "body": "padding = len(data) % if padding != :padding = - paddingpadding = '' * paddingdata = data + paddingreturn urlsafe_b64decode(data)", "docstring": "URL safe Base64 decode without padding (=)", "id": "f1762:m1"} {"signature": "def const_equal(str_a, str_b):", "body": "if len(str_a) != len(str_b):return Falseresult = Truefor i in range(len(str_a)):result &= (str_a[i] == str_b[i])return result", "docstring": "Constant time string comparison", "id": "f1762:m2"} {"signature": "def DemoCN():", "body": "thisWindow = auto.GetConsoleWindow()auto.Logger.ColorfullyWrite('')time.sleep()auto.SendKeys('')while not isinstance(auto.GetFocusedControl(), auto.EditControl):time.sleep()auto.SendKeys('')cmdWindow = auto.WindowControl(RegexName = '')cmdWindow.TitleBarControl().RightClick()auto.SendKey(auto.Keys.VK_P)optionWindow = cmdWindow.WindowControl(SubName = '')optionWindow.TabItemControl(SubName = '').Click()optionTab = optionWindow.PaneControl(SubName = '')checkBox = optionTab.CheckBoxControl(AutomationId = '')if checkBox.GetTogglePattern().ToggleState != auto.ToggleState.On:checkBox.Click()checkBox = optionTab.CheckBoxControl(AutomationId = '')if checkBox.GetTogglePattern().ToggleState != auto.ToggleState.On:checkBox.Click()optionWindow.TabItemControl(SubName = '').Click()layoutTab = optionWindow.PaneControl(SubName = '')layoutTab.EditControl(AutomationId='').GetValuePattern().SetValue('')layoutTab.EditControl(AutomationId='').GetValuePattern().SetValue('')layoutTab.EditControl(AutomationId='').GetValuePattern().SetValue('')layoutTab.EditControl(AutomationId='').GetValuePattern().SetValue('')optionWindow.ButtonControl(AutomationId = '').Click()cmdWindow.SetActive()rect = cmdWindow.BoundingRectangleauto.DragDrop(rect.left + , rect.top + , , )thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()subprocess.Popen('')notepadWindow = auto.WindowControl(searchDepth = , ClassName = '')cx, cy = auto.GetScreenSize()notepadWindow.MoveWindow(cx // , , cx // , cy // )time.sleep()notepadWindow.EditControl().SendKeys('', )time.sleep()dir = os.path.dirname(__file__)scriptPath = os.path.abspath(os.path.join(dir, ''))thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()cmdWindow.SendKeys(''.format(scriptPath) + '', )time.sleep()thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()cmdWindow.SendKeys(''.format(scriptPath) + '', )time.sleep()thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()cmdWindow.SendKeys(''.format(scriptPath) + '', )notepadWindow.SetActive()notepadWindow.MoveCursorToMyCenter()time.sleep()cmdWindow.SetActive(waitTime = )thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()cmdWindow.SendKeys(''.format(scriptPath) + '', )notepadWindow.SetActive()notepadWindow.MoveCursorToMyCenter()time.sleep()cmdWindow.SetActive(waitTime = )thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()cmdWindow.SendKeys(''.format(scriptPath) + '', )notepadWindow.SetActive()notepadWindow.EditControl().Click()time.sleep()cmdWindow.SetActive(waitTime = )time.sleep()thisWindow.SetActive()auto.Logger.WriteLine('', auto.ConsoleColor.Green)input()", "docstring": "for Chinese language", "id": "f1769:m0"} {"signature": "def DemoEN():", "body": "thisWindow = auto.GetConsoleWindow()auto.Logger.ColorfullyWrite('')time.sleep()auto.SendKeys('')while not isinstance(auto.GetFocusedControl(), auto.EditControl):time.sleep()auto.SendKeys('')cmdWindow = auto.WindowControl(SubName = '')rect = cmdWindow.BoundingRectangleauto.DragDrop(rect.left + , rect.top + , , )thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()subprocess.Popen('')notepadWindow = auto.WindowControl(searchDepth = , ClassName = '')cx, cy = auto.GetScreenSize()notepadWindow.MoveWindow(cx // , , cx // , cy // )time.sleep()notepadWindow.EditControl().SendKeys('', )time.sleep()dir = os.path.dirname(__file__)scriptPath = os.path.abspath(os.path.join(dir, ''))thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()cmdWindow.SendKeys(''.format(scriptPath) + '', )time.sleep()thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()cmdWindow.SendKeys(''.format(scriptPath) + '', )time.sleep()thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()cmdWindow.SendKeys(''.format(scriptPath) + '', )notepadWindow.SetActive()notepadWindow.MoveCursorToMyCenter()time.sleep()cmdWindow.SetActive(waitTime = )thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()cmdWindow.SendKeys(''.format(scriptPath) + '', )notepadWindow.SetActive()notepadWindow.MoveCursorToMyCenter()time.sleep()cmdWindow.SetActive(waitTime = )thisWindow.SetActive()auto.Logger.ColorfullyWrite('')time.sleep()cmdWindow.SendKeys(''.format(scriptPath) + '', )notepadWindow.SetActive()notepadWindow.EditControl().Click()time.sleep()cmdWindow.SetActive(waitTime = )time.sleep()thisWindow.SetActive()auto.Logger.WriteLine('', auto.ConsoleColor.Green)input()", "docstring": "for other language", "id": "f1769:m1"} {"signature": "def threadFunc(root):", "body": "th = threading.currentThread()auto.Logger.WriteLine(''.format(th.ident, th.name), auto.ConsoleColor.Cyan)time.sleep()auto.InitializeUIAutomationInCurrentThread()auto.GetConsoleWindow().CaptureToImage('')newRoot = auto.GetRootControl() auto.EnumAndLogControl(newRoot, )auto.UninitializeUIAutomationInCurrentThread()auto.Logger.WriteLine(''.format(th.ident, th.name), auto.ConsoleColor.Cyan)", "docstring": "If you want to use functionalities related to Controls and Patterns in a new thread.\nYou must call InitializeUIAutomationInCurrentThread first in the thread\n and call UninitializeUIAutomationInCurrentThread when the thread exits.\nBut you can't use use a Control or a Pattern created in a different thread.\nSo you can't create a Control or a Pattern in main thread and then pass it to a new thread and use it.", "id": "f1777:m0"} {"signature": "def SetClipboardText(text: str) -> bool:", "body": "if ctypes.windll.user32.OpenClipboard():ctypes.windll.user32.EmptyClipboard()textByteLen = (len(text) + ) * hClipboardData = ctypes.windll.kernel32.GlobalAlloc(, textByteLen) hDestText = ctypes.windll.kernel32.GlobalLock(hClipboardData)ctypes.cdll.msvcrt.wcsncpy(ctypes.c_wchar_p(hDestText), ctypes.c_wchar_p(text), textByteLen // )ctypes.windll.kernel32.GlobalUnlock(hClipboardData)ctypes.windll.user32.SetClipboardData(, hClipboardData) ctypes.windll.user32.CloseClipboard()return Truereturn False", "docstring": "Return bool, True if succeed otherwise False.", "id": "f1782:m1"} {"signature": "def SetConsoleColor(color: int) -> bool:", "body": "global _ConsoleOutputHandleglobal _DefaultConsoleColorif not _DefaultConsoleColor:if not _ConsoleOutputHandle:_ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(_StdOutputHandle)bufferInfo = ConsoleScreenBufferInfo()ctypes.windll.kernel32.GetConsoleScreenBufferInfo(_ConsoleOutputHandle, ctypes.byref(bufferInfo))_DefaultConsoleColor = int(bufferInfo.wAttributes & )if sys.stdout:sys.stdout.flush()bool(ctypes.windll.kernel32.SetConsoleTextAttribute(_ConsoleOutputHandle, color))", "docstring": "Change the text color on console window.\ncolor: int, a value in class `ConsoleColor`.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m2"} {"signature": "def ResetConsoleColor() -> bool:", "body": "if sys.stdout:sys.stdout.flush()bool(ctypes.windll.kernel32.SetConsoleTextAttribute(_ConsoleOutputHandle, _DefaultConsoleColor))", "docstring": "Reset to the default text color on console window.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m3"} {"signature": "def WindowFromPoint(x: int, y: int) -> int:", "body": "return ctypes.windll.user32.WindowFromPoint(ctypes.wintypes.POINT(x, y))", "docstring": "WindowFromPoint from Win32.\nReturn int, a native window handle.", "id": "f1782:m4"} {"signature": "def GetCursorPos() -> tuple:", "body": "point = ctypes.wintypes.POINT(, )ctypes.windll.user32.GetCursorPos(ctypes.byref(point))return point.x, point.y", "docstring": "GetCursorPos from Win32.\nGet current mouse cursor positon.\nReturn tuple, two ints tuple (x, y).", "id": "f1782:m5"} {"signature": "def SetCursorPos(x: int, y: int) -> bool:", "body": "return bool(ctypes.windll.user32.SetCursorPos(x, y))", "docstring": "SetCursorPos from Win32.\nSet mouse cursor to point x, y.\nx: int.\ny: int.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m6"} {"signature": "def GetDoubleClickTime() -> int:", "body": "return ctypes.windll.user32.GetDoubleClickTime()", "docstring": "GetDoubleClickTime from Win32.\nReturn int, in milliseconds.", "id": "f1782:m7"} {"signature": "def mouse_event(dwFlags: int, dx: int, dy: int, dwData: int, dwExtraInfo: int) -> None:", "body": "ctypes.windll.user32.mouse_event(dwFlags, dx, dy, dwData, dwExtraInfo)", "docstring": "mouse_event from Win32.", "id": "f1782:m8"} {"signature": "def keybd_event(bVk: int, bScan: int, dwFlags: int, dwExtraInfo: int) -> None:", "body": "ctypes.windll.user32.keybd_event(bVk, bScan, dwFlags, dwExtraInfo)", "docstring": "keybd_event from Win32.", "id": "f1782:m9"} {"signature": "def PostMessage(handle: int, msg: int, wParam: int, lParam: int) -> bool:", "body": "return bool(ctypes.windll.user32.PostMessageW(ctypes.c_void_p(handle), msg, wParam, lParam))", "docstring": "PostMessage from Win32.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m10"} {"signature": "def SendMessage(handle: int, msg: int, wParam: int, lParam: int) -> int:", "body": "return ctypes.windll.user32.SendMessageW(ctypes.c_void_p(handle), msg, wParam, lParam)", "docstring": "SendMessage from Win32.\nReturn int, the return value specifies the result of the message processing;\n it depends on the message sent.", "id": "f1782:m11"} {"signature": "def Click(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "SetCursorPos(x, y)screenWidth, screenHeight = GetScreenSize()mouse_event(MouseEventFlag.LeftDown | MouseEventFlag.Absolute, x * // screenWidth, y * // screenHeight, , )time.sleep()mouse_event(MouseEventFlag.LeftUp | MouseEventFlag.Absolute, x * // screenWidth, y * // screenHeight, , )time.sleep(waitTime)", "docstring": "Simulate mouse click at point x, y.\nx: int.\ny: int.\nwaitTime: float.", "id": "f1782:m12"} {"signature": "def MiddleClick(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "SetCursorPos(x, y)screenWidth, screenHeight = GetScreenSize()mouse_event(MouseEventFlag.MiddleDown | MouseEventFlag.Absolute, x * // screenWidth, y * // screenHeight, , )time.sleep()mouse_event(MouseEventFlag.MiddleUp | MouseEventFlag.Absolute, x * // screenWidth, y * // screenHeight, , )time.sleep(waitTime)", "docstring": "Simulate mouse middle click at point x, y.\nx: int.\ny: int.\nwaitTime: float.", "id": "f1782:m13"} {"signature": "def RightClick(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "SetCursorPos(x, y)screenWidth, screenHeight = GetScreenSize()mouse_event(MouseEventFlag.RightDown | MouseEventFlag.Absolute, x * // screenWidth, y * // screenHeight, , )time.sleep()mouse_event(MouseEventFlag.RightUp | MouseEventFlag.Absolute, x * // screenWidth, y * // screenHeight, , )time.sleep(waitTime)", "docstring": "Simulate mouse right click at point x, y.\nx: int.\ny: int.\nwaitTime: float.", "id": "f1782:m14"} {"signature": "def PressMouse(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "SetCursorPos(x, y)screenWidth, screenHeight = GetScreenSize()mouse_event(MouseEventFlag.LeftDown | MouseEventFlag.Absolute, x * // screenWidth, y * // screenHeight, , )time.sleep(waitTime)", "docstring": "Press left mouse.\nx: int.\ny: int.\nwaitTime: float.", "id": "f1782:m15"} {"signature": "def ReleaseMouse(waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "x, y = GetCursorPos()screenWidth, screenHeight = GetScreenSize()mouse_event(MouseEventFlag.LeftUp | MouseEventFlag.Absolute, x * // screenWidth, y * // screenHeight, , )time.sleep(waitTime)", "docstring": "Release left mouse.\nwaitTime: float.", "id": "f1782:m16"} {"signature": "def RightPressMouse(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "SetCursorPos(x, y)screenWidth, screenHeight = GetScreenSize()mouse_event(MouseEventFlag.RightDown | MouseEventFlag.Absolute, x * // screenWidth, y * // screenHeight, , )time.sleep(waitTime)", "docstring": "Press right mouse.\nx: int.\ny: int.\nwaitTime: float.", "id": "f1782:m17"} {"signature": "def RightReleaseMouse(waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "x, y = GetCursorPos()screenWidth, screenHeight = GetScreenSize()mouse_event(MouseEventFlag.RightUp | MouseEventFlag.Absolute, x * // screenWidth, y * // screenHeight, , )time.sleep(waitTime)", "docstring": "Release right mouse.\nwaitTime: float.", "id": "f1782:m18"} {"signature": "def MoveTo(x: int, y: int, moveSpeed: float = , waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "if moveSpeed <= :moveTime = else:moveTime = MAX_MOVE_SECOND / moveSpeedcurX, curY = GetCursorPos()xCount = abs(x - curX)yCount = abs(y - curY)maxPoint = max(xCount, yCount)screenWidth, screenHeight = GetScreenSize()maxSide = max(screenWidth, screenHeight)minSide = min(screenWidth, screenHeight)if maxPoint > minSide:maxPoint = minSideif maxPoint < maxSide:maxPoint = + int((maxSide - ) / maxSide * maxPoint)moveTime = moveTime * maxPoint * / maxSidestepCount = maxPoint // if stepCount > :xStep = (x - curX) * / stepCountyStep = (y - curY) * / stepCountinterval = moveTime / stepCountfor i in range(stepCount):cx = curX + int(xStep * i)cy = curY + int(yStep * i)SetCursorPos(cx, cy)time.sleep(interval)SetCursorPos(x, y)time.sleep(waitTime)", "docstring": "Simulate mouse move to point x, y from current cursor.\nx: int.\ny: int.\nmoveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster.\nwaitTime: float.", "id": "f1782:m19"} {"signature": "def DragDrop(x1: int, y1: int, x2: int, y2: int, moveSpeed: float = , waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "PressMouse(x1, y1, )MoveTo(x2, y2, moveSpeed, )ReleaseMouse(waitTime)", "docstring": "Simulate mouse left button drag from point x1, y1 drop to point x2, y2.\nx1: int.\ny1: int.\nx2: int.\ny2: int.\nmoveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster.\nwaitTime: float.", "id": "f1782:m20"} {"signature": "def RightDragDrop(x1: int, y1: int, x2: int, y2: int, moveSpeed: float = , waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "RightPressMouse(x1, y1, )MoveTo(x2, y2, moveSpeed, )RightReleaseMouse(waitTime)", "docstring": "Simulate mouse right button drag from point x1, y1 drop to point x2, y2.\nx1: int.\ny1: int.\nx2: int.\ny2: int.\nmoveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster.\nwaitTime: float.", "id": "f1782:m21"} {"signature": "def WheelDown(wheelTimes: int = , interval: float = , waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "for i in range(wheelTimes):mouse_event(MouseEventFlag.Wheel, , , -, ) time.sleep(interval)time.sleep(waitTime)", "docstring": "Simulate mouse wheel down.\nwheelTimes: int.\ninterval: float.\nwaitTime: float.", "id": "f1782:m22"} {"signature": "def WheelUp(wheelTimes: int = , interval: float = , waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "for i in range(wheelTimes):mouse_event(MouseEventFlag.Wheel, , , , ) time.sleep(interval)time.sleep(waitTime)", "docstring": "Simulate mouse wheel up.\nwheelTimes: int.\ninterval: float.\nwaitTime: float.", "id": "f1782:m23"} {"signature": "def GetScreenSize() -> tuple:", "body": "SM_CXSCREEN = SM_CYSCREEN = w = ctypes.windll.user32.GetSystemMetrics(SM_CXSCREEN)h = ctypes.windll.user32.GetSystemMetrics(SM_CYSCREEN)return w, h", "docstring": "Return tuple, two ints tuple (width, height).", "id": "f1782:m24"} {"signature": "def GetPixelColor(x: int, y: int, handle: int = ) -> int:", "body": "hdc = ctypes.windll.user32.GetWindowDC(ctypes.c_void_p(handle))bgr = ctypes.windll.gdi32.GetPixel(hdc, x, y)ctypes.windll.user32.ReleaseDC(ctypes.c_void_p(handle), hdc)return bgr", "docstring": "Get pixel color of a native window.\nx: int.\ny: int.\nhandle: int, the handle of a native window.\nReturn int, the bgr value of point (x,y).\nr = bgr & 0x0000FF\ng = (bgr & 0x00FF00) >> 8\nb = (bgr & 0xFF0000) >> 16\nIf handle is 0, get pixel from Desktop window(root control).\nNote:\nNot all devices support GetPixel.\nAn application should call GetDeviceCaps to determine whether a specified device supports this function.\nFor example, console window doesn't support.", "id": "f1782:m25"} {"signature": "def MessageBox(content: str, title: str, flags: int = MB.Ok) -> int:", "body": "return ctypes.windll.user32.MessageBoxW(ctypes.c_void_p(), ctypes.c_wchar_p(content), ctypes.c_wchar_p(title), flags)", "docstring": "MessageBox from Win32.\ncontent: str.\ntitle: str.\nflags: int, a value or some combined values in class `MB`.\nReturn int, a value in MB whose name starts with Id, such as MB.IdOk", "id": "f1782:m26"} {"signature": "def SetForegroundWindow(handle: int) -> bool:", "body": "return bool(ctypes.windll.user32.SetForegroundWindow(ctypes.c_void_p(handle)))", "docstring": "SetForegroundWindow from Win32.\nhandle: int, the handle of a native window.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m27"} {"signature": "def BringWindowToTop(handle: int) -> bool:", "body": "return bool(ctypes.windll.user32.BringWindowToTop(ctypes.c_void_p(handle)))", "docstring": "BringWindowToTop from Win32.\nhandle: int, the handle of a native window.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m28"} {"signature": "def SwitchToThisWindow(handle: int) -> None:", "body": "ctypes.windll.user32.SwitchToThisWindow(ctypes.c_void_p(handle), )", "docstring": "SwitchToThisWindow from Win32.\nhandle: int, the handle of a native window.", "id": "f1782:m29"} {"signature": "def GetAncestor(handle: int, flag: int) -> int:", "body": "return ctypes.windll.user32.GetAncestor(ctypes.c_void_p(handle), flag)", "docstring": "GetAncestor from Win32.\nhandle: int, the handle of a native window.\nindex: int, a value in class `GAFlag`.\nReturn int, a native window handle.", "id": "f1782:m30"} {"signature": "def IsTopLevelWindow(handle: int) -> bool:", "body": "return bool(ctypes.windll.user32.IsTopLevelWindow(ctypes.c_void_p(handle)))", "docstring": "IsTopLevelWindow from Win32.\nhandle: int, the handle of a native window.\nReturn bool.\nOnly available on Windows 7 or Higher.", "id": "f1782:m31"} {"signature": "def GetWindowLong(handle: int, index: int) -> int:", "body": "return ctypes.windll.user32.GetWindowLongW(ctypes.c_void_p(handle), index)", "docstring": "GetWindowLong from Win32.\nhandle: int, the handle of a native window.\nindex: int.", "id": "f1782:m32"} {"signature": "def SetWindowLong(handle: int, index: int, value: int) -> int:", "body": "return ctypes.windll.user32.SetWindowLongW(ctypes.c_void_p(handle), index, value)", "docstring": "SetWindowLong from Win32.\nhandle: int, the handle of a native window.\nindex: int.\nvalue: int.\nReturn int, the previous value before set.", "id": "f1782:m33"} {"signature": "def IsIconic(handle: int) -> bool:", "body": "return bool(ctypes.windll.user32.IsIconic(ctypes.c_void_p(handle)))", "docstring": "IsIconic from Win32.\nDetermine whether a native window is minimized.\nhandle: int, the handle of a native window.\nReturn bool.", "id": "f1782:m34"} {"signature": "def IsZoomed(handle: int) -> bool:", "body": "return bool(ctypes.windll.user32.IsZoomed(ctypes.c_void_p(handle)))", "docstring": "IsZoomed from Win32.\nDetermine whether a native window is maximized.\nhandle: int, the handle of a native window.\nReturn bool.", "id": "f1782:m35"} {"signature": "def IsWindowVisible(handle: int) -> bool:", "body": "return bool(ctypes.windll.user32.IsWindowVisible(ctypes.c_void_p(handle)))", "docstring": "IsWindowVisible from Win32.\nhandle: int, the handle of a native window.\nReturn bool.", "id": "f1782:m36"} {"signature": "def ShowWindow(handle: int, cmdShow: int) -> bool:", "body": "return ctypes.windll.user32.ShowWindow(ctypes.c_void_p(handle), cmdShow)", "docstring": "ShowWindow from Win32.\nhandle: int, the handle of a native window.\ncmdShow: int, a value in clas `SW`.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m37"} {"signature": "def MoveWindow(handle: int, x: int, y: int, width: int, height: int, repaint: int = ) -> bool:", "body": "return bool(ctypes.windll.user32.MoveWindow(ctypes.c_void_p(handle), x, y, width, height, repaint))", "docstring": "MoveWindow from Win32.\nhandle: int, the handle of a native window.\nx: int.\ny: int.\nwidth: int.\nheight: int.\nrepaint: int, use 1 or 0.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m38"} {"signature": "def SetWindowPos(handle: int, hWndInsertAfter: int, x: int, y: int, width: int, height: int, flags: int) -> bool:", "body": "return ctypes.windll.user32.SetWindowPos(ctypes.c_void_p(handle), ctypes.c_void_p(hWndInsertAfter), x, y, width, height, flags)", "docstring": "SetWindowPos from Win32.\nhandle: int, the handle of a native window.\nhWndInsertAfter: int, a value whose name starts with 'HWND' in class SWP.\nx: int.\ny: int.\nwidth: int.\nheight: int.\nflags: int, values whose name starts with 'SWP' in class `SWP`.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m39"} {"signature": "def SetWindowTopmost(handle: int, isTopmost: bool) -> bool:", "body": "topValue = SWP.HWND_Topmost if isTopmost else SWP.HWND_NoTopmostreturn bool(SetWindowPos(handle, topValue, , , , , SWP.SWP_NoSize | SWP.SWP_NoMove))", "docstring": "handle: int, the handle of a native window.\nisTopmost: bool\nReturn bool, True if succeed otherwise False.", "id": "f1782:m40"} {"signature": "def GetWindowText(handle: int) -> str:", "body": "arrayType = ctypes.c_wchar * MAX_PATHvalues = arrayType()ctypes.windll.user32.GetWindowTextW(ctypes.c_void_p(handle), values, MAX_PATH)return values.value", "docstring": "GetWindowText from Win32.\nhandle: int, the handle of a native window.\nReturn str.", "id": "f1782:m41"} {"signature": "def SetWindowText(handle: int, text: str) -> bool:", "body": "return bool(ctypes.windll.user32.SetWindowTextW(ctypes.c_void_p(handle), ctypes.c_wchar_p(text)))", "docstring": "SetWindowText from Win32.\nhandle: int, the handle of a native window.\ntext: str.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m42"} {"signature": "def GetEditText(handle: int) -> str:", "body": "textLen = SendMessage(handle, , , ) + arrayType = ctypes.c_wchar * textLenvalues = arrayType()SendMessage(handle, , textLen, values) return values.value", "docstring": "Get text of a native Win32 Edit.\nhandle: int, the handle of a native window.\nReturn str.", "id": "f1782:m43"} {"signature": "def GetConsoleOriginalTitle() -> str:", "body": "if IsNT6orHigher:arrayType = ctypes.c_wchar * MAX_PATHvalues = arrayType()ctypes.windll.kernel32.GetConsoleOriginalTitleW(values, MAX_PATH)return values.valueelse:raise RuntimeError('')", "docstring": "GetConsoleOriginalTitle from Win32.\nReturn str.\nOnly available on Windows Vista or higher.", "id": "f1782:m44"} {"signature": "def GetConsoleTitle() -> str:", "body": "arrayType = ctypes.c_wchar * MAX_PATHvalues = arrayType()ctypes.windll.kernel32.GetConsoleTitleW(values, MAX_PATH)return values.value", "docstring": "GetConsoleTitle from Win32.\nReturn str.", "id": "f1782:m45"} {"signature": "def SetConsoleTitle(text: str) -> bool:", "body": "return bool(ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(text)))", "docstring": "SetConsoleTitle from Win32.\ntext: str.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m46"} {"signature": "def GetForegroundWindow() -> int:", "body": "return ctypes.windll.user32.GetForegroundWindow()", "docstring": "GetForegroundWindow from Win32.\nReturn int, the native handle of the foreground window.", "id": "f1782:m47"} {"signature": "def IsDesktopLocked() -> bool:", "body": "isLocked = Falsedesk = ctypes.windll.user32.OpenDesktopW(ctypes.c_wchar_p(''), , , ) if desk:isLocked = not ctypes.windll.user32.SwitchDesktop(desk)ctypes.windll.user32.CloseDesktop(desk)return isLocked", "docstring": "Check if desktop is locked.\nReturn bool.\nDesktop is locked if press Win+L, Ctrl+Alt+Del or in remote desktop mode.", "id": "f1782:m48"} {"signature": "def PlayWaveFile(filePath: str = r'', isAsync: bool = False, isLoop: bool = False) -> bool:", "body": "if filePath:SND_ASYNC = SND_NODEFAULT = SND_LOOP = SND_FILENAME = flags = SND_NODEFAULT | SND_FILENAMEif isAsync:flags |= SND_ASYNCif isLoop:flags |= SND_LOOPflags |= SND_ASYNCreturn bool(ctypes.windll.winmm.PlaySoundW(ctypes.c_wchar_p(filePath), ctypes.c_void_p(), flags))else:return bool(ctypes.windll.winmm.PlaySoundW(ctypes.c_wchar_p(), ctypes.c_void_p(), ))", "docstring": "Call PlaySound from Win32.\nfilePath: str, if emtpy, stop playing the current sound.\nisAsync: bool, if True, the sound is played asynchronously and returns immediately.\nisLoop: bool, if True, the sound plays repeatedly until PlayWaveFile(None) is called again, must also set isAsync to True.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m49"} {"signature": "def IsProcess64Bit(processId: int) -> bool:", "body": "try:func = ctypes.windll.ntdll.ZwWow64ReadVirtualMemory64 except Exception as ex:return Falsetry:IsWow64Process = ctypes.windll.kernel32.IsWow64ProcessIsWow64Process.argtypes = (ctypes.c_void_p, ctypes.POINTER(ctypes.c_int))except Exception as ex:return FalsehProcess = ctypes.windll.kernel32.OpenProcess(, , processId) if hProcess:is64Bit = ctypes.c_int32()if IsWow64Process(hProcess, ctypes.byref(is64Bit)):ctypes.windll.kernel32.CloseHandle(ctypes.c_void_p(hProcess))return False if is64Bit.value else Trueelse:ctypes.windll.kernel32.CloseHandle(ctypes.c_void_p(hProcess))", "docstring": "Return True if process is 64 bit.\nReturn False if process is 32 bit.\nReturn None if unknown, maybe caused by having no acess right to the process.", "id": "f1782:m50"} {"signature": "def IsUserAnAdmin() -> bool:", "body": "return bool(ctypes.windll.shell32.IsUserAnAdmin())", "docstring": "IsUserAnAdmin from Win32.\nReturn bool.\nMinimum supported OS: Windows XP, Windows Server 2003", "id": "f1782:m51"} {"signature": "def RunScriptAsAdmin(argv: list, workingDirectory: str = None, showFlag: int = SW.ShowNormal) -> bool:", "body": "args = ''.join(''.format(arg) for arg in argv)return ctypes.windll.shell32.ShellExecuteW(None, \"\", sys.executable, args, workingDirectory, showFlag) > ", "docstring": "Run a python script as administrator.\nSystem will show a popup dialog askes you whether to elevate as administrator if UAC is enabled.\nargv: list, a str list like sys.argv, argv[0] is the script file, argv[1:] are other arguments.\nworkingDirectory: str, the working directory for the script file.\nshowFlag: int, a value in class `SW`.\nReturn bool, True if succeed.", "id": "f1782:m52"} {"signature": "def SendKey(key: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "keybd_event(key, , KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey, )keybd_event(key, , KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey, )time.sleep(waitTime)", "docstring": "Simulate typing a key.\nkey: int, a value in class `Keys`.", "id": "f1782:m53"} {"signature": "def PressKey(key: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "keybd_event(key, , KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey, )time.sleep(waitTime)", "docstring": "Simulate a key down for key.\nkey: int, a value in class `Keys`.\nwaitTime: float.", "id": "f1782:m54"} {"signature": "def ReleaseKey(key: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "keybd_event(key, , KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey, )time.sleep(waitTime)", "docstring": "Simulate a key up for key.\nkey: int, a value in class `Keys`.\nwaitTime: float.", "id": "f1782:m55"} {"signature": "def IsKeyPressed(key: int) -> bool:", "body": "state = ctypes.windll.user32.GetAsyncKeyState(key)return bool(state & )", "docstring": "key: int, a value in class `Keys`.\nReturn bool.", "id": "f1782:m56"} {"signature": "def _CreateInput(structure) -> INPUT:", "body": "if isinstance(structure, MOUSEINPUT):return INPUT(InputType.Mouse, _INPUTUnion(mi=structure))if isinstance(structure, KEYBDINPUT):return INPUT(InputType.Keyboard, _INPUTUnion(ki=structure))if isinstance(structure, HARDWAREINPUT):return INPUT(InputType.Hardware, _INPUTUnion(hi=structure))raise TypeError('')", "docstring": "Create Win32 struct `INPUT` for `SendInput`.\nReturn `INPUT`.", "id": "f1782:m57"} {"signature": "def MouseInput(dx: int, dy: int, mouseData: int = , dwFlags: int = MouseEventFlag.LeftDown, time_: int = ) -> INPUT:", "body": "return _CreateInput(MOUSEINPUT(dx, dy, mouseData, dwFlags, time_, None))", "docstring": "Create Win32 struct `MOUSEINPUT` for `SendInput`.\nReturn `INPUT`.", "id": "f1782:m58"} {"signature": "def KeyboardInput(wVk: int, wScan: int, dwFlags: int = KeyboardEventFlag.KeyDown, time_: int = ) -> INPUT:", "body": "return _CreateInput(KEYBDINPUT(wVk, wScan, dwFlags, time_, None))", "docstring": "Create Win32 struct `KEYBDINPUT` for `SendInput`.", "id": "f1782:m59"} {"signature": "def HardwareInput(uMsg: int, param: int = ) -> INPUT:", "body": "return _CreateInput(HARDWAREINPUT(uMsg, param & , param >> & ))", "docstring": "Create Win32 struct `HARDWAREINPUT` for `SendInput`.", "id": "f1782:m60"} {"signature": "def SendInput(*inputs) -> int:", "body": "nInputs = len(inputs)LPINPUT = INPUT * nInputspInputs = LPINPUT(*inputs)cbSize = ctypes.c_int(ctypes.sizeof(INPUT))return ctypes.windll.user32.SendInput(nInputs, pInputs, cbSize)", "docstring": "SendInput from Win32.\ninput: `INPUT`.\nReturn int, the number of events that it successfully inserted into the keyboard or mouse input stream.\n If the function returns zero, the input was already blocked by another thread.", "id": "f1782:m61"} {"signature": "def SendUnicodeChar(char: str) -> int:", "body": "return SendInput(KeyboardInput(, ord(char), KeyboardEventFlag.KeyUnicode | KeyboardEventFlag.KeyDown),KeyboardInput(, ord(char), KeyboardEventFlag.KeyUnicode | KeyboardEventFlag.KeyUp))", "docstring": "Type a single unicode char.\nchar: str, len(char) must equal to 1.\nReturn int, the number of events that it successfully inserted into the keyboard or mouse input stream.\n If the function returns zero, the input was already blocked by another thread.", "id": "f1782:m62"} {"signature": "def _VKtoSC(key: int) -> int:", "body": "if key in _SCKeys:return _SCKeys[key]scanCode = ctypes.windll.user32.MapVirtualKeyA(key, )if not scanCode:return keyList = [Keys.VK_APPS, Keys.VK_CANCEL, Keys.VK_SNAPSHOT, Keys.VK_DIVIDE, Keys.VK_NUMLOCK]if key in keyList:scanCode |= return scanCode", "docstring": "This function is only for internal use in SendKeys.\nkey: int, a value in class `Keys`.\nReturn int.", "id": "f1782:m63"} {"signature": "def SendKeys(text: str, interval: float = , waitTime: float = OPERATION_WAIT_TIME, debug: bool = False) -> None:", "body": "holdKeys = ('', '', '', '', '', '', '', '', '', '', '', '', '', '', '')keys = []printKeys = []i = insertIndex = length = len(text)hold = Falseinclude = FalselastKeyValue = Nonewhile True:if text[i] == '':rindex = text.find('', i)if rindex == i + :rindex = text.find('', i + )if rindex == -:raise ValueError('')key = text[i + :rindex]key = [it for it in key.split('') if it]if not key:raise ValueError(''.format(text[i:rindex + ]))if (len(key) == and not key[].isdigit()) or len(key) > :raise ValueError(''.format(text[i:rindex + ]))upperKey = key[].upper()count = if len(key) > :count = int(key[])for j in range(count):if hold:if upperKey in SpecialKeyNames:keyValue = SpecialKeyNames[upperKey]if type(lastKeyValue) == type(keyValue) and lastKeyValue == keyValue:insertIndex += printKeys.insert(insertIndex, (key[], ''))printKeys.insert(insertIndex + , (key[], ''))keys.insert(insertIndex, (keyValue, KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey))keys.insert(insertIndex + , (keyValue, KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey))lastKeyValue = keyValueelif key[] in CharacterCodes:keyValue = CharacterCodes[key[]]if type(lastKeyValue) == type(keyValue) and lastKeyValue == keyValue:insertIndex += printKeys.insert(insertIndex, (key[], ''))printKeys.insert(insertIndex + , (key[], ''))keys.insert(insertIndex, (keyValue, KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey))keys.insert(insertIndex + , (keyValue, KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey))lastKeyValue = keyValueelse:printKeys.insert(insertIndex, (key[], ''))keys.insert(insertIndex, (key[], ''))lastKeyValue = key[]if include:insertIndex += else:if upperKey in holdKeys:insertIndex += else:hold = Falseelse:if upperKey in SpecialKeyNames:keyValue = SpecialKeyNames[upperKey]printKeys.append((key[], ''))printKeys.append((key[], ''))keys.append((keyValue, KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey))keys.append((keyValue, KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey))lastKeyValue = keyValueif upperKey in holdKeys:hold = TrueinsertIndex = len(keys) - else:hold = Falseelse:printKeys.append((key[], ''))keys.append((key[], ''))lastKeyValue = key[]i = rindex + elif text[i] == '':if hold:include = Trueelse:printKeys.append((text[i], ''))keys.append((text[i], ''))lastKeyValue = text[i]i += elif text[i] == '':if hold:include = Falsehold = Falseelse:printKeys.append((text[i], ''))keys.append((text[i], ''))lastKeyValue = text[i]i += else:if hold:if text[i] in CharacterCodes:keyValue = CharacterCodes[text[i]]if include and type(lastKeyValue) == type(keyValue) and lastKeyValue == keyValue:insertIndex += printKeys.insert(insertIndex, (text[i], ''))printKeys.insert(insertIndex + , (text[i], ''))keys.insert(insertIndex, (keyValue, KeyboardEventFlag.KeyDown | KeyboardEventFlag.ExtendedKey))keys.insert(insertIndex + , (keyValue, KeyboardEventFlag.KeyUp | KeyboardEventFlag.ExtendedKey))lastKeyValue = keyValueelse:printKeys.append((text[i], ''))keys.append((text[i], ''))lastKeyValue = text[i]if include:insertIndex += else:hold = Falseelse:printKeys.append((text[i], ''))keys.append((text[i], ''))lastKeyValue = text[i]i += if i >= length:breakhotkeyInterval = for i, key in enumerate(keys):if key[] == '':SendUnicodeChar(key[])time.sleep(interval)if debug:Logger.ColorfullyWrite(''.format(printKeys[i], interval), writeToFile=False)else:scanCode = _VKtoSC(key[])keybd_event(key[], scanCode, key[], )if debug:Logger.Write(printKeys[i], ConsoleColor.DarkGreen, writeToFile=False)if i + == len(keys):time.sleep(interval)if debug:Logger.Write(''.format(interval), writeToFile=False)else:if key[] & KeyboardEventFlag.KeyUp:if keys[i + ][] == '' or keys[i + ][] & KeyboardEventFlag.KeyUp == :time.sleep(interval)if debug:Logger.Write(''.format(interval), writeToFile=False)else:time.sleep(hotkeyInterval) if debug:Logger.Write(''.format(hotkeyInterval), writeToFile=False)else: time.sleep(hotkeyInterval)if debug:Logger.Write(''.format(hotkeyInterval), writeToFile=False)time.sleep(waitTime)", "docstring": "Simulate typing keys on keyboard.\ntext: str, keys to type.\ninterval: float, seconds between keys.\nwaitTime: float.\ndebug: bool, if True, print the keys.\nExamples:\n{Ctrl}, {Delete} ... are special keys' name in SpecialKeyNames.\nSendKeys('{Ctrl}a{Delete}{Ctrl}v{Ctrl}s{Ctrl}{Shift}s{Win}e{PageDown}') #press Ctrl+a, Delete, Ctrl+v, Ctrl+s, Ctrl+Shift+s, Win+e, PageDown\nSendKeys('{Ctrl}(AB)({Shift}(123))') #press Ctrl+A+B, type (, press Shift+1+2+3, type ), if () follows a hold key, hold key won't release util )\nSendKeys('{Ctrl}{a 3}') #press Ctrl+a at the same time, release Ctrl+a, then type a 2 times\nSendKeys('{a 3}{B 5}') #type a 3 times, type B 5 times\nSendKeys('{{}Hello{}}abc {a}{b}{c} test{} 3}{!}{a} (){(}{)}') #type: {Hello}abc abc test}}}!a ()()\nSendKeys('0123456789{Enter}')\nSendKeys('ABCDEFGHIJKLMNOPQRSTUVWXYZ{Enter}')\nSendKeys('abcdefghijklmnopqrstuvwxyz{Enter}')\nSendKeys('`~!@#$%^&*()-_=+{Enter}')\nSendKeys('[]{{}{}}\\\\|;:\\'\\\",<.>/?{Enter}')", "id": "f1782:m64"} {"signature": "def GetPatternIdInterface(patternId: int):", "body": "global _PatternIdInterfacesif not _PatternIdInterfaces:_PatternIdInterfaces = {PatternId.DockPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationDockPattern,PatternId.ExpandCollapsePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationExpandCollapsePattern,PatternId.GridItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationGridItemPattern,PatternId.GridPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationGridPattern,PatternId.InvokePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationInvokePattern,PatternId.ItemContainerPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationItemContainerPattern,PatternId.LegacyIAccessiblePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationLegacyIAccessiblePattern,PatternId.MultipleViewPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationMultipleViewPattern,PatternId.RangeValuePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationRangeValuePattern,PatternId.ScrollItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationScrollItemPattern,PatternId.ScrollPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationScrollPattern,PatternId.SelectionItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSelectionItemPattern,PatternId.SelectionPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSelectionPattern,PatternId.SynchronizedInputPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSynchronizedInputPattern,PatternId.TableItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTableItemPattern,PatternId.TablePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTablePattern,PatternId.TextPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTextPattern,PatternId.TogglePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTogglePattern,PatternId.TransformPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTransformPattern,PatternId.ValuePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationValuePattern,PatternId.VirtualizedItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationVirtualizedItemPattern,PatternId.WindowPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationWindowPattern,}debug = Falsetry:_PatternIdInterfaces[PatternId.AnnotationPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationAnnotationPatternexcept:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.CustomNavigationPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationCustomNavigationPatternexcept:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.DragPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationDragPatternexcept:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.DropTargetPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationDropTargetPatternexcept:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.ObjectModelPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationObjectModelPatternexcept:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.SpreadsheetItemPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationSpreadsheetItemPatternexcept:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.SpreadsheetPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationSpreadsheetPatternexcept:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.StylesPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationStylesPatternexcept:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.TextChildPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTextChildPatternexcept:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.TextEditPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTextEditPatternexcept:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.TextPattern2] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTextPattern2except:if debug: Logger.WriteLine('', ConsoleColor.Yellow)try:_PatternIdInterfaces[PatternId.TransformPattern2] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTransformPattern2except:if debug: Logger.WriteLine('', ConsoleColor.Yellow)return _PatternIdInterfaces[patternId]", "docstring": "Get pattern COM interface by pattern id.\npatternId: int, a value in class `PatternId`.\nReturn comtypes._cominterface_meta.", "id": "f1782:m65"} {"signature": "def CreatePattern(patternId: int, pattern: ctypes.POINTER(comtypes.IUnknown)):", "body": "subPattern = pattern.QueryInterface(GetPatternIdInterface(patternId))if subPattern:return PatternConstructors[patternId](pattern=subPattern)", "docstring": "Create a concreate pattern by pattern id and pattern(POINTER(IUnknown)).", "id": "f1782:m66"} {"signature": "def SetGlobalSearchTimeout(seconds: float) -> None:", "body": "global TIME_OUT_SECONDTIME_OUT_SECOND = seconds", "docstring": "seconds: float.\nTo make this available, you need explicitly import uiautomation:\n from uiautomation import uiautomation as auto\n auto.SetGlobalSearchTimeout(10)", "id": "f1782:m67"} {"signature": "def WaitForExist(control: Control, timeout: float) -> bool:", "body": "return control.Exists(timeout, )", "docstring": "Check if control exists in timeout seconds.\ncontrol: `Control` or its subclass.\ntimeout: float.\nReturn bool.", "id": "f1782:m68"} {"signature": "def WaitForDisappear(control: Control, timeout: float) -> bool:", "body": "return control.Disappears(timeout, )", "docstring": "Check if control disappears in timeout seconds.\ncontrol: `Control` or its subclass.\ntimeout: float.\nReturn bool.", "id": "f1782:m69"} {"signature": "def WalkTree(top, getChildren: Callable = None, getFirstChild: Callable = None, getNextSibling: Callable = None, yieldCondition: Callable = None, includeTop: bool = False, maxDepth: int = ):", "body": "if maxDepth <= :returndepth = if getChildren:if includeTop:if not yieldCondition or yieldCondition(top, ):yield top, , children = getChildren(top)childList = [children]while depth >= : lastItems = childList[-]if lastItems:if not yieldCondition or yieldCondition(lastItems[], depth + ):yield lastItems[], depth + , len(lastItems) - if depth + < maxDepth:children = getChildren(lastItems[])if children:depth += childList.append(children)del lastItems[]else:del childList[depth]depth -= elif getFirstChild and getNextSibling:if includeTop:if not yieldCondition or yieldCondition(top, ):yield top, child = getFirstChild(top)childList = [child]while depth >= : lastItem = childList[-]if lastItem:if not yieldCondition or yieldCondition(lastItem, depth + ):yield lastItem, depth + child = getNextSibling(lastItem)childList[depth] = childif depth + < maxDepth:child = getFirstChild(lastItem)if child:depth += childList.append(child)else:del childList[depth]depth -= ", "docstring": "Walk a tree not using recursive algorithm.\ntop: a tree node.\ngetChildren: function(treeNode) -> list.\ngetNextSibling: function(treeNode) -> treeNode.\ngetNextSibling: function(treeNode) -> treeNode.\nyieldCondition: function(treeNode, depth) -> bool.\nincludeTop: bool, if True yield top first.\nmaxDepth: int, enum depth.\n\nIf getChildren is valid, ignore getFirstChild and getNextSibling,\n yield 3 items tuple: (treeNode, depth, remain children count in current depth).\nIf getChildren is not valid, using getFirstChild and getNextSibling,\n yield 2 items tuple: (treeNode, depth).\nIf yieldCondition is not None, only yield tree nodes that yieldCondition(treeNode, depth)->bool returns True.\n\nFor example:\ndef GetDirChildren(dir_):\n if os.path.isdir(dir_):\n return [os.path.join(dir_, it) for it in os.listdir(dir_)]\nfor it, depth, leftCount in WalkTree('D:\\\\', getChildren= GetDirChildren):\n print(it, depth, leftCount)", "id": "f1782:m70"} {"signature": "def GetRootControl() -> PaneControl:", "body": "return Control.CreateControlFromElement(_AutomationClient.instance().IUIAutomation.GetRootElement())", "docstring": "Get root control, the Desktop window.\nReturn `PaneControl`.", "id": "f1782:m71"} {"signature": "def GetFocusedControl() -> Control:", "body": "return Control.CreateControlFromElement(_AutomationClient.instance().IUIAutomation.GetFocusedElement())", "docstring": "Return `Control` subclass.", "id": "f1782:m72"} {"signature": "def GetForegroundControl() -> Control:", "body": "return ControlFromHandle(GetForegroundWindow())", "docstring": "Return `Control` subclass.", "id": "f1782:m73"} {"signature": "def GetConsoleWindow() -> WindowControl:", "body": "return ControlFromHandle(ctypes.windll.kernel32.GetConsoleWindow())", "docstring": "Return `WindowControl`, a console window that runs python.", "id": "f1782:m74"} {"signature": "def ControlFromPoint(x: int, y: int) -> Control:", "body": "element = _AutomationClient.instance().IUIAutomation.ElementFromPoint(ctypes.wintypes.POINT(x, y))return Control.CreateControlFromElement(element)", "docstring": "Call IUIAutomation ElementFromPoint x,y. May return None if mouse is over cmd's title bar icon.\nReturn `Control` subclass or None.", "id": "f1782:m75"} {"signature": "def ControlFromPoint2(x: int, y: int) -> Control:", "body": "return Control.CreateControlFromElement(_AutomationClient.instance().IUIAutomation.ElementFromHandle(WindowFromPoint(x, y)))", "docstring": "Get a native handle from point x,y and call IUIAutomation.ElementFromHandle.\nReturn `Control` subclass.", "id": "f1782:m76"} {"signature": "def ControlFromCursor() -> Control:", "body": "x, y = GetCursorPos()return ControlFromPoint(x, y)", "docstring": "Call ControlFromPoint with current cursor point.\nReturn `Control` subclass.", "id": "f1782:m77"} {"signature": "def ControlFromCursor2() -> Control:", "body": "x, y = GetCursorPos()return ControlFromPoint2(x, y)", "docstring": "Call ControlFromPoint2 with current cursor point.\nReturn `Control` subclass.", "id": "f1782:m78"} {"signature": "def ControlFromHandle(handle: int) -> Control:", "body": "return Control.CreateControlFromElement(_AutomationClient.instance().IUIAutomation.ElementFromHandle(handle))", "docstring": "Call IUIAutomation.ElementFromHandle with a native handle.\nhandle: int, a native window handle.\nReturn `Control` subclass.", "id": "f1782:m79"} {"signature": "def ControlsAreSame(control1: Control, control2: Control) -> bool:", "body": "return bool(_AutomationClient.instance().IUIAutomation.CompareElements(control1.Element, control2.Element))", "docstring": "control1: `Control` or its subclass.\ncontrol2: `Control` or its subclass.\nReturn bool, True if control1 and control2 represent the same control otherwise False.", "id": "f1782:m80"} {"signature": "def WalkControl(control: Control, includeTop: bool = False, maxDepth: int = ):", "body": "if includeTop:yield control, if maxDepth <= :returndepth = child = control.GetFirstChildControl()controlList = [child]while depth >= :lastControl = controlList[-]if lastControl:yield lastControl, depth + child = lastControl.GetNextSiblingControl()controlList[depth] = childif depth + < maxDepth:child = lastControl.GetFirstChildControl()if child:depth += controlList.append(child)else:del controlList[depth]depth -= ", "docstring": "control: `Control` or its subclass.\nincludeTop: bool, if True, yield (control, 0) first.\nmaxDepth: int, enum depth.\nYield 2 items tuple(control: Control, depth: int).", "id": "f1782:m81"} {"signature": "def LogControl(control: Control, depth: int = , showAllName: bool = True) -> None:", "body": "def getKeyName(theDict, theValue):for key in theDict:if theValue == theDict[key]:return keyindent = '' * depth * Logger.Write(''.format(indent))Logger.Write(control.ControlTypeName, ConsoleColor.DarkGreen)Logger.Write('')Logger.Write(control.ClassName, ConsoleColor.DarkGreen)Logger.Write('')Logger.Write(control.AutomationId, ConsoleColor.DarkGreen)Logger.Write('')Logger.Write(control.BoundingRectangle, ConsoleColor.DarkGreen)Logger.Write('')Logger.Write(control.Name, ConsoleColor.DarkGreen, printTruncateLen= if showAllName else )Logger.Write('')Logger.Write(''.format(control.NativeWindowHandle), ConsoleColor.DarkGreen)Logger.Write('')Logger.Write(depth, ConsoleColor.DarkGreen)supportedPatterns = list(filter(lambda t: t[], ((control.GetPattern(id_), name) for id_, name in PatternIdNames.items())))for pt, name in supportedPatterns:if isinstance(pt, ValuePattern):Logger.Write('')Logger.Write(pt.Value, ConsoleColor.DarkGreen, printTruncateLen= if showAllName else )elif isinstance(pt, RangeValuePattern):Logger.Write('')Logger.Write(pt.Value, ConsoleColor.DarkGreen)elif isinstance(pt, TogglePattern):Logger.Write('')Logger.Write('' + getKeyName(ToggleState.__dict__, pt.ToggleState), ConsoleColor.DarkGreen)elif isinstance(pt, SelectionItemPattern):Logger.Write('')Logger.Write(pt.IsSelected, ConsoleColor.DarkGreen)elif isinstance(pt, ExpandCollapsePattern):Logger.Write('')Logger.Write('' + getKeyName(ExpandCollapseState.__dict__, pt.ExpandCollapseState), ConsoleColor.DarkGreen)elif isinstance(pt, ScrollPattern):Logger.Write('')Logger.Write(pt.HorizontalScrollPercent, ConsoleColor.DarkGreen)Logger.Write('')Logger.Write(pt.VerticalScrollPercent, ConsoleColor.DarkGreen)elif isinstance(pt, GridPattern):Logger.Write('')Logger.Write(pt.RowCount, ConsoleColor.DarkGreen)Logger.Write('')Logger.Write(pt.ColumnCount, ConsoleColor.DarkGreen)elif isinstance(pt, GridItemPattern):Logger.Write('')Logger.Write(pt.Column, ConsoleColor.DarkGreen)Logger.Write('')Logger.Write(pt.Column, ConsoleColor.DarkGreen)elif isinstance(pt, TextPattern):Logger.Write('')Logger.Write(pt.DocumentRange.GetText(), ConsoleColor.DarkGreen)Logger.Write('')for pt, name in supportedPatterns:Logger.Write('' + name, ConsoleColor.DarkGreen)Logger.Write('')", "docstring": "Print and log control's properties.\ncontrol: `Control` or its subclass.\ndepth: int, current depth.\nshowAllName: bool, if False, print the first 30 characters of control.Name.", "id": "f1782:m82"} {"signature": "def EnumAndLogControl(control: Control, maxDepth: int = , showAllName: bool = True, startDepth: int = ) -> None:", "body": "for c, d in WalkControl(control, True, maxDepth):LogControl(c, d + startDepth, showAllName)", "docstring": "Print and log control and its descendants' propertyies.\ncontrol: `Control` or its subclass.\nmaxDepth: int, enum depth.\nshowAllName: bool, if False, print the first 30 characters of control.Name.\nstartDepth: int, control's current depth.", "id": "f1782:m83"} {"signature": "def EnumAndLogControlAncestors(control: Control, showAllName: bool = True) -> None:", "body": "lists = []while control:lists.insert(, control)control = control.GetParentControl()for i, control in enumerate(lists):LogControl(control, i, showAllName)", "docstring": "Print and log control and its ancestors' propertyies.\ncontrol: `Control` or its subclass.\nshowAllName: bool, if False, print the first 30 characters of control.Name.", "id": "f1782:m84"} {"signature": "def FindControl(control: Control, compare: Callable, maxDepth: int = , findFromSelf: bool = False, foundIndex: int = ) -> Control:", "body": "foundCount = if not control:control = GetRootControl()traverseCount = for child, depth in WalkControl(control, findFromSelf, maxDepth):traverseCount += if compare(child, depth):foundCount += if foundCount == foundIndex:child.traverseCount = traverseCountreturn child", "docstring": "control: `Control` or its subclass.\ncompare: compare function with parameters (control: Control, depth: int) which returns bool.\nmaxDepth: int, enum depth.\nfindFromSelf: bool, if False, do not compare self.\nfoundIndex: int, starts with 1, >= 1.\nReturn `Control` subclass or None if not find.", "id": "f1782:m85"} {"signature": "def ShowDesktop(waitTime: float = ) -> None:", "body": "SendKeys('')time.sleep(waitTime)", "docstring": "Show Desktop by pressing win + d", "id": "f1782:m86"} {"signature": "def InitializeUIAutomationInCurrentThread() -> None:", "body": "comtypes.CoInitializeEx()", "docstring": "Initialize UIAutomation in a new thread.\nIf you want to use functionalities related to Controls and Patterns in a new thread.\nYou must call this function first in the new thread.\nBut you can't use use a Control or a Pattern created in a different thread.\nSo you can't create a Control or a Pattern in main thread and then pass it to a new thread and use it.", "id": "f1782:m87"} {"signature": "def UninitializeUIAutomationInCurrentThread() -> None:", "body": "comtypes.CoUninitialize()", "docstring": "Uninitialize UIAutomation in a new thread after calling InitializeUIAutomationInCurrentThread.\nYou must call this function when the new thread exits if you have called InitializeUIAutomationInThisThread in the same thread.", "id": "f1782:m88"} {"signature": "def WaitHotKeyReleased(hotkey: tuple) -> None:", "body": "mod = {ModifierKey.Alt: Keys.VK_MENU,ModifierKey.Control: Keys.VK_CONTROL,ModifierKey.Shift: Keys.VK_SHIFT,ModifierKey.Win: Keys.VK_LWIN}while True:time.sleep()if IsKeyPressed(hotkey[]):continuefor k, v in mod.items():if k & hotkey[]:if IsKeyPressed(v):breakelse:break", "docstring": "hotkey: tuple, two ints tuple(modifierKey, key)", "id": "f1782:m89"} {"signature": "def RunByHotKey(keyFunctions: dict, stopHotKey: tuple = None, exitHotKey: tuple = (ModifierKey.Control, Keys.VK_D), waitHotKeyReleased: bool = True) -> None:", "body": "from threading import Thread, Event, currentThreadimport tracebackdef getModName(theDict, theValue):name = ''for key in theDict:if isinstance(theDict[key], int) and theValue & theDict[key]:if name:name += ''name += keyreturn namedef getKeyName(theDict, theValue):for key in theDict:if theValue == theDict[key]:return keydef releaseAllKey():for key, value in Keys.__dict__.items():if isinstance(value, int) and key.startswith(''):if IsKeyPressed(value):ReleaseKey(value)def threadFunc(function, stopEvent, hotkey, hotkeyName):if waitHotKeyReleased:WaitHotKeyReleased(hotkey)try:function(stopEvent)except Exception as ex:Logger.ColorfullyWrite(''.format(ex.__class__.__name__, hotkeyName), writeToFile=False)print(traceback.format_exc())finally:releaseAllKey() Logger.ColorfullyWrite(''.format(currentThread(), function.__name__, hotkeyName), ConsoleColor.DarkYellow, writeToFile=False)stopHotKeyId = exitHotKeyId = hotKeyId = registed = Trueid2HotKey = {}id2Function = {}id2Thread = {}id2Name = {}for hotkey in keyFunctions:id2HotKey[hotKeyId] = hotkeyid2Function[hotKeyId] = keyFunctions[hotkey]id2Thread[hotKeyId] = NonemodName = getModName(ModifierKey.__dict__, hotkey[])keyName = getKeyName(Keys.__dict__, hotkey[])id2Name[hotKeyId] = str((modName, keyName))if ctypes.windll.user32.RegisterHotKey(, hotKeyId, hotkey[], hotkey[]):Logger.ColorfullyWrite(''.format((modName, keyName)), writeToFile=False)else:registed = FalseLogger.ColorfullyWrite(''.format((modName, keyName)), writeToFile=False)hotKeyId += if stopHotKey and len(stopHotKey) == :modName = getModName(ModifierKey.__dict__, stopHotKey[])keyName = getKeyName(Keys.__dict__, stopHotKey[])if ctypes.windll.user32.RegisterHotKey(, stopHotKeyId, stopHotKey[], stopHotKey[]):Logger.ColorfullyWrite(''.format((modName, keyName)), writeToFile=False)else:registed = FalseLogger.ColorfullyWrite(''.format((modName, keyName)), writeToFile=False)if not registed:returnif exitHotKey and len(exitHotKey) == :modName = getModName(ModifierKey.__dict__, exitHotKey[])keyName = getKeyName(Keys.__dict__, exitHotKey[])if ctypes.windll.user32.RegisterHotKey(, exitHotKeyId, exitHotKey[], exitHotKey[]):Logger.ColorfullyWrite(''.format((modName, keyName)), writeToFile=False)else:Logger.ColorfullyWrite(''.format((modName, keyName)), writeToFile=False)funcThread = NonelivingThreads = []stopEvent = Event()msg = ctypes.wintypes.MSG()while ctypes.windll.user32.GetMessageW(ctypes.byref(msg), ctypes.c_void_p(), , ) != :if msg.message == : if msg.wParam in id2HotKey:if msg.lParam & == id2HotKey[msg.wParam][] and msg.lParam >> & == id2HotKey[msg.wParam][]:Logger.ColorfullyWrite(''.format(id2Name[msg.wParam]), writeToFile=False)if not id2Thread[msg.wParam]:stopEvent.clear()funcThread = Thread(None, threadFunc, args=(id2Function[msg.wParam], stopEvent, id2HotKey[msg.wParam], id2Name[msg.wParam]))funcThread.start()id2Thread[msg.wParam] = funcThreadelse:if id2Thread[msg.wParam].is_alive():Logger.WriteLine(''.format(id2Thread[msg.wParam], id2Name[msg.wParam]), ConsoleColor.Yellow, writeToFile=False)else:stopEvent.clear()funcThread = Thread(None, threadFunc, args=(id2Function[msg.wParam], stopEvent, id2HotKey[msg.wParam], id2Name[msg.wParam]))funcThread.start()id2Thread[msg.wParam] = funcThreadelif stopHotKeyId == msg.wParam:if msg.lParam & == stopHotKey[] and msg.lParam >> & == stopHotKey[]:Logger.Write('', ConsoleColor.DarkYellow, writeToFile=False)stopEvent.set()for id_ in id2Thread:if id2Thread[id_]:if id2Thread[id_].is_alive():livingThreads.append((id2Thread[id_], id2Name[id_]))id2Thread[id_] = Noneelif exitHotKeyId == msg.wParam:if msg.lParam & == exitHotKey[] and msg.lParam >> & == exitHotKey[]:Logger.Write('', ConsoleColor.DarkYellow, writeToFile=False)stopEvent.set()for id_ in id2Thread:if id2Thread[id_]:if id2Thread[id_].is_alive():livingThreads.append((id2Thread[id_], id2Name[id_]))id2Thread[id_] = Nonebreakfor thread, hotkeyName in livingThreads:if thread.is_alive():Logger.Write(''.format(thread, hotkeyName), ConsoleColor.DarkYellow, writeToFile=False)thread.join()os._exit()", "docstring": "Bind functions with hotkeys, the function will be run or stopped in another thread when the hotkey is pressed.\nkeyFunctions: hotkey function dict, like {(uiautomation.ModifierKey.Control, uiautomation.Keys.VK_1) : func}\nstopHotKey: hotkey tuple\nexitHotKey: hotkey tuple\nwaitHotKeyReleased: bool, if True, hotkey function will be triggered after the hotkey is released\n\ndef main(stopEvent):\n while True:\n if stopEvent.is_set(): # must check stopEvent.is_set() if you want to stop when stop hotkey is pressed\n break\n print(n)\n n += 1\n stopEvent.wait(1)\n print('main exit')\n\nuiautomation.RunByHotKey({(uiautomation.ModifierKey.Control, uiautomation.Keys.VK_1) : main}\n , (uiautomation.ModifierKey.Control | uiautomation.ModifierKey.Shift, uiautomation.Keys.VK_2))", "id": "f1782:m90"} {"signature": "@classmethoddef instance(cls) -> '':", "body": "if cls._instance is None:cls._instance = cls()return cls._instance", "docstring": "Singleton instance (this prevents com creation on import).", "id": "f1782:c0:m0"} {"signature": "@classmethoddef instance(cls) -> '':", "body": "if cls._instance is None:cls._instance = cls()return cls._instance", "docstring": "Singleton instance (this prevents com creation on import).", "id": "f1782:c1:m0"} {"signature": "@staticmethoddef Write(log: Any, consoleColor: int = ConsoleColor.Default, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None, printTruncateLen: int = ) -> None:", "body": "if not isinstance(log, str):log = str(log)if printToStdout and sys.stdout:isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)if isValidColor:SetConsoleColor(consoleColor)try:if printTruncateLen > and len(log) > printTruncateLen:sys.stdout.write(log[:printTruncateLen] + '')else:sys.stdout.write(log)except Exception as ex:SetConsoleColor(ConsoleColor.Red)isValidColor = Truesys.stdout.write(ex.__class__.__name__ + '')if log.endswith(''):sys.stdout.write('')if isValidColor:ResetConsoleColor()sys.stdout.flush()if not writeToFile:returnfileName = logFile if logFile else Logger.FileNametry:fout = open(fileName, '', encoding='')fout.write(log)except Exception as ex:if sys.stdout:sys.stdout.write(ex.__class__.__name__ + '')finally:if fout:fout.close()", "docstring": "log: any type.\nconsoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.\nwriteToFile: bool.\nprintToStdout: bool.\nlogFile: str, log file path.\nprintTruncateLen: int, if <= 0, log is not truncated when print.", "id": "f1782:c41:m1"} {"signature": "@staticmethoddef WriteLine(log: Any, consoleColor: int = -, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:", "body": "Logger.Write(''.format(log), consoleColor, writeToFile, printToStdout, logFile)", "docstring": "log: any type.\nconsoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.\nwriteToFile: bool.\nprintToStdout: bool.\nlogFile: str, log file path.", "id": "f1782:c41:m2"} {"signature": "@staticmethoddef ColorfullyWrite(log: str, consoleColor: int = -, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:", "body": "text = []start = while True:index1 = log.find('', start)if index1 >= :if index1 > start:text.append((log[start:index1], consoleColor))index2 = log.find('>', index1)colorName = log[index1+:index2]index3 = log.find('', index2 + )text.append((log[index2 + :index3], Logger.ColorNames[colorName]))start = index3 + else:if start < len(log):text.append((log[start:], consoleColor))breakfor t, c in text:Logger.Write(t, c, writeToFile, printToStdout, logFile)", "docstring": "log: str.\nconsoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.\nwriteToFile: bool.\nprintToStdout: bool.\nlogFile: str, log file path.\nColorfullyWrite('Hello Green !!!'), color name must be in Logger.ColorNames.", "id": "f1782:c41:m3"} {"signature": "@staticmethoddef ColorfullyWriteLine(log: str, consoleColor: int = -, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:", "body": "Logger.ColorfullyWrite(log + '', consoleColor, writeToFile, printToStdout, logFile)", "docstring": "log: str.\nconsoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.\nwriteToFile: bool.\nprintToStdout: bool.\nlogFile: str, log file path.\n\nColorfullyWriteLine('Hello Green !!!'), color name must be in Logger.ColorNames.", "id": "f1782:c41:m4"} {"signature": "@staticmethoddef Log(log: Any = '', consoleColor: int = -, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:", "body": "t = datetime.datetime.now()frame = sys._getframe()log = ''.format(t.year, t.month, t.day,t.hour, t.minute, t.second, t.microsecond // , frame.f_code.co_name, frame.f_lineno, log)Logger.Write(log, consoleColor, writeToFile, printToStdout, logFile)", "docstring": "log: any type.\nconsoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.\nwriteToFile: bool.\nprintToStdout: bool.\nlogFile: str, log file path.", "id": "f1782:c41:m5"} {"signature": "@staticmethoddef ColorfullyLog(log: str = '', consoleColor: int = -, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:", "body": "t = datetime.datetime.now()frame = sys._getframe()log = ''.format(t.year, t.month, t.day,t.hour, t.minute, t.second, t.microsecond // , frame.f_code.co_name, frame.f_lineno, log)Logger.ColorfullyWrite(log, consoleColor, writeToFile, printToStdout, logFile)", "docstring": "log: any type.\nconsoleColor: int, a value in class ConsoleColor, such as ConsoleColor.DarkGreen.\nwriteToFile: bool.\nprintToStdout: bool.\nlogFile: str, log file path.\n\nColorfullyLog('Hello Green !!!'), color name must be in Logger.ColorNames", "id": "f1782:c41:m6"} {"signature": "@staticmethoddef DeleteLog() -> None:", "body": "if os.path.exists(Logger.FileName):os.remove(Logger.FileName)", "docstring": "Delete log file.", "id": "f1782:c41:m7"} {"signature": "def __init__(self, width: int = , height: int = ):", "body": "self._width = widthself._height = heightself._bitmap = if width > and height > :self._bitmap = _DllClient.instance().dll.BitmapCreate(width, height)", "docstring": "Create a black bimap of size(width, height).", "id": "f1782:c42:m0"} {"signature": "@propertydef Width(self) -> int:", "body": "return self._width", "docstring": "Property Width.\nReturn int.", "id": "f1782:c42:m4"} {"signature": "@propertydef Height(self) -> int:", "body": "return self._height", "docstring": "Property Height.\nReturn int.", "id": "f1782:c42:m5"} {"signature": "def FromHandle(self, hwnd: int, left: int = , top: int = , right: int = , bottom: int = ) -> bool:", "body": "self.Release()root = GetRootControl()rect = ctypes.wintypes.RECT()ctypes.windll.user32.GetWindowRect(hwnd, ctypes.byref(rect))left, top, right, bottom = left + rect.left, top + rect.top, right + rect.left, bottom + rect.topself._bitmap = _DllClient.instance().dll.BitmapFromWindow(root.NativeWindowHandle, left, top, right, bottom)self._getsize()return self._bitmap > ", "docstring": "Capture a native window to Bitmap by its handle.\nhwnd: int, the handle of a native window.\nleft: int.\ntop: int.\nright: int.\nbottom: int.\nleft, top, right and bottom are control's internal postion(from 0,0).\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m6"} {"signature": "def FromControl(self, control: '', x: int = , y: int = , width: int = , height: int = ) -> bool:", "body": "rect = control.BoundingRectanglewhile rect.width() == or rect.height() == :control = control.GetParentControl()if not control:return Falserect = control.BoundingRectangleif width <= :width = rect.width() + widthif height <= :height = rect.height() + heighthandle = control.NativeWindowHandleif handle:left = xtop = yright = left + widthbottom = top + heightelse:while True:control = control.GetParentControl()handle = control.NativeWindowHandleif handle:pRect = control.BoundingRectangleleft = rect.left - pRect.left + xtop = rect.top - pRect.top + yright = left + widthbottom = top + heightbreakreturn self.FromHandle(handle, left, top, right, bottom)", "docstring": "Capture a control to Bitmap.\ncontrol: `Control` or its subclass.\nx: int.\ny: int.\nwidth: int.\nheight: int.\nx, y: the point in control's internal position(from 0,0)\nwidth, height: image's width and height from x, y, use 0 for entire area,\nIf width(or height) < 0, image size will be control's width(or height) - width(or height).\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m7"} {"signature": "def FromFile(self, filePath: str) -> bool:", "body": "self.Release()self._bitmap = _DllClient.instance().dll.BitmapFromFile(ctypes.c_wchar_p(filePath))self._getsize()return self._bitmap > ", "docstring": "Load image from a file.\nfilePath: str.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m8"} {"signature": "def ToFile(self, savePath: str) -> bool:", "body": "name, ext = os.path.splitext(savePath)extMap = {'': '', '': '', '': '', '': '', '': '', '': '', '': ''}gdiplusImageFormat = extMap.get(ext.lower(), '')return bool(_DllClient.instance().dll.BitmapToFile(self._bitmap, ctypes.c_wchar_p(savePath), ctypes.c_wchar_p(gdiplusImageFormat)))", "docstring": "Save to a file.\nsavePath: str, should end with .bmp, .jpg, .jpeg, .png, .gif, .tif, .tiff.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m9"} {"signature": "def GetPixelColor(self, x: int, y: int) -> int:", "body": "return _DllClient.instance().dll.BitmapGetPixel(self._bitmap, x, y)", "docstring": "Get color value of a pixel.\nx: int.\ny: int.\nReturn int, argb color.\nb = argb & 0x0000FF\ng = (argb & 0x00FF00) >> 8\nr = (argb & 0xFF0000) >> 16\na = (argb & 0xFF0000) >> 24", "id": "f1782:c42:m10"} {"signature": "def SetPixelColor(self, x: int, y: int, argb: int) -> bool:", "body": "return _DllClient.instance().dll.BitmapSetPixel(self._bitmap, x, y, argb)", "docstring": "Set color value of a pixel.\nx: int.\ny: int.\nargb: int, color value.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m11"} {"signature": "def GetPixelColorsHorizontally(self, x: int, y: int, count: int) -> ctypes.Array:", "body": "arrayType = ctypes.c_uint32 * countvalues = arrayType()_DllClient.instance().dll.BitmapGetPixelsHorizontally(ctypes.c_size_t(self._bitmap), x, y, values, count)return values", "docstring": "x: int.\ny: int.\ncount: int.\nReturn `ctypes.Array`, an iterable array of int values in argb form point x,y horizontally.", "id": "f1782:c42:m12"} {"signature": "def SetPixelColorsHorizontally(self, x: int, y: int, colors: Iterable) -> bool:", "body": "count = len(colors)arrayType = ctypes.c_uint32 * countvalues = arrayType(*colors)return _DllClient.instance().dll.BitmapSetPixelsHorizontally(ctypes.c_size_t(self._bitmap), x, y, values, count)", "docstring": "Set pixel colors form x,y horizontally.\nx: int.\ny: int.\ncolors: Iterable, an iterable list of int color values in argb.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m13"} {"signature": "def GetPixelColorsVertically(self, x: int, y: int, count: int) -> ctypes.Array:", "body": "arrayType = ctypes.c_uint32 * countvalues = arrayType()_DllClient.instance().dll.BitmapGetPixelsVertically(ctypes.c_size_t(self._bitmap), x, y, values, count)return values", "docstring": "x: int.\ny: int.\ncount: int.\nReturn `ctypes.Array`, an iterable array of int values in argb form point x,y vertically.", "id": "f1782:c42:m14"} {"signature": "def SetPixelColorsVertically(self, x: int, y: int, colors: Iterable) -> bool:", "body": "count = len(colors)arrayType = ctypes.c_uint32 * countvalues = arrayType(*colors)return _DllClient.instance().dll.BitmapSetPixelsVertically(ctypes.c_size_t(self._bitmap), x, y, values, count)", "docstring": "Set pixel colors form x,y vertically.\nx: int.\ny: int.\ncolors: Iterable, an iterable list of int color values in argb.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m15"} {"signature": "def GetPixelColorsOfRow(self, y: int) -> ctypes.Array:", "body": "return self.GetPixelColorsOfRect(, y, self.Width, )", "docstring": "y: int, row index.\nReturn `ctypes.Array`, an iterable array of int values in argb of y row.", "id": "f1782:c42:m16"} {"signature": "def GetPixelColorsOfColumn(self, x: int) -> ctypes.Array:", "body": "return self.GetPixelColorsOfRect(x, , , self.Height)", "docstring": "x: int, column index.\nReturn `ctypes.Array`, an iterable array of int values in argb of x column.", "id": "f1782:c42:m17"} {"signature": "def GetPixelColorsOfRect(self, x: int, y: int, width: int, height: int) -> ctypes.Array:", "body": "arrayType = ctypes.c_uint32 * (width * height)values = arrayType()_DllClient.instance().dll.BitmapGetPixelsOfRect(ctypes.c_size_t(self._bitmap), x, y, width, height, values)return values", "docstring": "x: int.\ny: int.\nwidth: int.\nheight: int.\nReturn `ctypes.Array`, an iterable array of int values in argb of the input rect.", "id": "f1782:c42:m18"} {"signature": "def SetPixelColorsOfRect(self, x: int, y: int, width: int, height: int, colors: Iterable) -> bool:", "body": "arrayType = ctypes.c_uint32 * (width * height)values = arrayType(*colors)return bool(_DllClient.instance().dll.BitmapSetPixelsOfRect(ctypes.c_size_t(self._bitmap), x, y, width, height, values))", "docstring": "x: int.\ny: int.\nwidth: int.\nheight: int.\ncolors: Iterable, an iterable list of int values, it's length must equal to width*height.\nReturn `ctypes.Array`, an iterable array of int values in argb of the input rect.", "id": "f1782:c42:m19"} {"signature": "def GetPixelColorsOfRects(self, rects: list) -> list:", "body": "rects2 = [(x, y, x + width, y + height) for x, y, width, height in rects]left, top, right, bottom = zip(*rects2)left, top, right, bottom = min(left), min(top), max(right), max(bottom)width, height = right - left, bottom - topallColors = self.GetPixelColorsOfRect(left, top, width, height)colorsOfRects = []for x, y, w, h in rects:x -= lefty -= topcolors = []for row in range(h):colors.extend(allColors[(y + row) * width + x:(y + row) * width + x + w])colorsOfRects.append(colors)return colorsOfRects", "docstring": "rects: a list of rects, such as [(0,0,10,10), (10,10,20,20),(x,y,width,height)].\nReturn list, a list whose elements are ctypes.Array which is an iterable array of int values in argb.", "id": "f1782:c42:m20"} {"signature": "def GetAllPixelColors(self) -> ctypes.Array:", "body": "return self.GetPixelColorsOfRect(, , self.Width, self.Height)", "docstring": "Return `ctypes.Array`, an iterable array of int values in argb.", "id": "f1782:c42:m21"} {"signature": "def GetSubBitmap(self, x: int, y: int, width: int, height: int) -> '':", "body": "colors = self.GetPixelColorsOfRect(x, y, width, height)bitmap = Bitmap(width, height)bitmap.SetPixelColorsOfRect(, , width, height, colors)return bitmap", "docstring": "x: int.\ny: int.\nwidth: int.\nheight: int.\nReturn `Bitmap`, a sub bitmap of the input rect.", "id": "f1782:c42:m22"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationannotationpattern", "id": "f1782:c43:m0"} {"signature": "@propertydef AnnotationTypeId(self) -> int:", "body": "return self.pattern.CurrentAnnotationTypeId", "docstring": "Property AnnotationTypeId.\nCall IUIAutomationAnnotationPattern::get_CurrentAnnotationTypeId.\nReturn int, a value in class `AnnotationType`.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationannotationpattern-get_currentannotationtypeid", "id": "f1782:c43:m1"} {"signature": "@propertydef AnnotationTypeName(self) -> str:", "body": "return self.pattern.CurrentAnnotationTypeName", "docstring": "Property AnnotationTypeName.\nCall IUIAutomationAnnotationPattern::get_CurrentAnnotationTypeName.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationannotationpattern-get_currentannotationtypename", "id": "f1782:c43:m2"} {"signature": "@propertydef Author(self) -> str:", "body": "return self.pattern.CurrentAuthor", "docstring": "Property Author.\nCall IUIAutomationAnnotationPattern::get_CurrentAuthor.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationannotationpattern-get_currentauthor", "id": "f1782:c43:m3"} {"signature": "@propertydef DateTime(self) -> str:", "body": "return self.pattern.CurrentDateTime", "docstring": "Property DateTime.\nCall IUIAutomationAnnotationPattern::get_CurrentDateTime.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationannotationpattern-get_currentdatetime", "id": "f1782:c43:m4"} {"signature": "@propertydef Target(self) -> '':", "body": "ele = self.pattern.CurrentTargetreturn Control.CreateControlFromElement(ele)", "docstring": "Property Target.\nCall IUIAutomationAnnotationPattern::get_CurrentTarget.\nReturn `Control` subclass, the element that is being annotated.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationannotationpattern-get_currenttarget", "id": "f1782:c43:m5"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationcustomnavigationpattern", "id": "f1782:c44:m0"} {"signature": "def Navigate(self, direction: int) -> '':", "body": "ele = self.pattern.Navigate(direction)return Control.CreateControlFromElement(ele)", "docstring": "Call IUIAutomationCustomNavigationPattern::Navigate.\nGet the next control in the specified direction within the logical UI tree.\ndirection: int, a value in class `NavigateDirection`.\nReturn `Control` subclass or None.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationcustomnavigationpattern-navigate", "id": "f1782:c44:m1"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationdockpattern", "id": "f1782:c45:m0"} {"signature": "@propertydef DockPosition(self) -> int:", "body": "return self.pattern.CurrentDockPosition", "docstring": "Property DockPosition.\nCall IUIAutomationDockPattern::get_CurrentDockPosition.\nReturn int, a value in class `DockPosition`.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdockpattern-get_currentdockposition", "id": "f1782:c45:m1"} {"signature": "def SetDockPosition(self, dockPosition: int, waitTime: float = OPERATION_WAIT_TIME) -> int:", "body": "ret = self.pattern.SetDockPosition(dockPosition)time.sleep(waitTime)return ret", "docstring": "Call IUIAutomationDockPattern::SetDockPosition.\ndockPosition: int, a value in class `DockPosition`.\nwaitTime: float.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdockpattern-setdockposition", "id": "f1782:c45:m2"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationdragpattern", "id": "f1782:c46:m0"} {"signature": "@propertydef DropEffect(self) -> str:", "body": "return self.pattern.CurrentDropEffect", "docstring": "Property DropEffect.\nCall IUIAutomationDragPattern::get_CurrentDropEffect.\nReturn str, a localized string that indicates what happens\n when the user drops this element as part of a drag-drop operation.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdragpattern-get_currentdropeffect", "id": "f1782:c46:m1"} {"signature": "@propertydef DropEffects(self) -> list:", "body": "return self.pattern.CurrentDropEffects", "docstring": "Property DropEffects.\nCall IUIAutomationDragPattern::get_CurrentDropEffects, todo SAFEARRAY.\nReturn list, a list of localized strings that enumerate the full set of effects\n that can happen when this element as part of a drag-and-drop operation.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdragpattern-get_currentdropeffects", "id": "f1782:c46:m2"} {"signature": "@propertydef IsGrabbed(self) -> bool:", "body": "return bool(self.pattern.CurrentIsGrabbed)", "docstring": "Property IsGrabbed.\nCall IUIAutomationDragPattern::get_CurrentIsGrabbed.\nReturn bool, indicates whether the user has grabbed this element as part of a drag-and-drop operation.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdragpattern-get_currentisgrabbed", "id": "f1782:c46:m3"} {"signature": "def GetGrabbedItems(self) -> list:", "body": "eleArray = self.pattern.GetCurrentGrabbedItems()if eleArray:controls = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)con = Control.CreateControlFromElement(element=ele)if con:controls.append(con)return controlsreturn []", "docstring": "Call IUIAutomationDragPattern::GetCurrentGrabbedItems.\nReturn list, a list of `Control` subclasses that represent the full set of items\n that the user is dragging as part of a drag operation.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdragpattern-getcurrentgrabbeditems", "id": "f1782:c46:m4"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationdroptargetpattern", "id": "f1782:c47:m0"} {"signature": "@propertydef DropTargetEffect(self) -> str:", "body": "return self.pattern.CurrentDropTargetEffect", "docstring": "Property DropTargetEffect.\nCall IUIAutomationDropTargetPattern::get_CurrentDropTargetEffect.\nReturn str, a localized string that describes what happens\n when the user drops the grabbed element on this drop target.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdragpattern-get_currentdroptargeteffect", "id": "f1782:c47:m1"} {"signature": "@propertydef DropTargetEffects(self) -> list:", "body": "return self.pattern.CurrentDropTargetEffects", "docstring": "Property DropTargetEffects.\nCall IUIAutomationDropTargetPattern::get_CurrentDropTargetEffects, todo SAFEARRAY.\nReturn list, a list of localized strings that enumerate the full set of effects\n that can happen when the user drops a grabbed element on this drop target\n as part of a drag-and-drop operation.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdragpattern-get_currentdroptargeteffects", "id": "f1782:c47:m2"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationexpandcollapsepattern", "id": "f1782:c48:m0"} {"signature": "@propertydef ExpandCollapseState(self) -> int:", "body": "return self.pattern.CurrentExpandCollapseState", "docstring": "Property ExpandCollapseState.\nCall IUIAutomationExpandCollapsePattern::get_CurrentExpandCollapseState.\nReturn int, a value in class ExpandCollapseState.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationexpandcollapsepattern-get_currentexpandcollapsestate", "id": "f1782:c48:m1"} {"signature": "def Collapse(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Collapse() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationExpandCollapsePattern::Collapse.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationexpandcollapsepattern-collapse", "id": "f1782:c48:m2"} {"signature": "def Expand(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Expand() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationExpandCollapsePattern::Expand.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationexpandcollapsepattern-collapse", "id": "f1782:c48:m3"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationgriditempattern", "id": "f1782:c49:m0"} {"signature": "@propertydef Column(self) -> int:", "body": "return self.pattern.CurrentColumn", "docstring": "Property Column.\nCall IUIAutomationGridItemPattern::get_CurrentColumn.\nReturn int, the zero-based index of the column that contains the item.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgriditempattern-get_currentcolumn", "id": "f1782:c49:m1"} {"signature": "@propertydef ColumnSpan(self) -> int:", "body": "return self.pattern.CurrentColumnSpan", "docstring": "Property ColumnSpan.\nCall IUIAutomationGridItemPattern::get_CurrentColumnSpan.\nReturn int, the number of columns spanned by the grid item.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgriditempattern-get_currentcolumnspan", "id": "f1782:c49:m2"} {"signature": "@propertydef ContainingGrid(self) -> '':", "body": "return Control.CreateControlFromElement(self.pattern.CurrentContainingGrid)", "docstring": "Property ContainingGrid.\nCall IUIAutomationGridItemPattern::get_CurrentContainingGrid.\nReturn `Control` subclass, the element that contains the grid item.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgriditempattern-get_currentcontaininggrid", "id": "f1782:c49:m3"} {"signature": "@propertydef Row(self) -> int:", "body": "return self.pattern.CurrentRow", "docstring": "Property Row.\nCall IUIAutomationGridItemPattern::get_CurrentRow.\nReturn int, the zero-based index of the row that contains the grid item.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgriditempattern-get_currentrow", "id": "f1782:c49:m4"} {"signature": "@propertydef RowSpan(self) -> int:", "body": "return self.pattern.CurrentRowSpan", "docstring": "Property RowSpan.\nCall IUIAutomationGridItemPattern::get_CurrentRowSpan.\nReturn int, the number of rows spanned by the grid item.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgriditempattern-get_currentrowspan", "id": "f1782:c49:m5"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationgridpattern", "id": "f1782:c50:m0"} {"signature": "@propertydef ColumnCount(self) -> int:", "body": "return self.pattern.CurrentColumnCount", "docstring": "Property ColumnCount.\nCall IUIAutomationGridPattern::get_CurrentColumnCount.\nReturn int, the number of columns in the grid.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgridpattern-get_currentcolumncount", "id": "f1782:c50:m1"} {"signature": "@propertydef RowCount(self) -> int:", "body": "return self.pattern.CurrentRowCount", "docstring": "Property RowCount.\nCall IUIAutomationGridPattern::get_CurrentRowCount.\nReturn int, the number of rows in the grid.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgridpattern-get_currentrowcount", "id": "f1782:c50:m2"} {"signature": "def GetItem(self) -> '':", "body": "return Control.CreateControlFromElement(self.pattern.GetItem())", "docstring": "Call IUIAutomationGridPattern::GetItem.\nReturn `Control` subclass, a control representing an item in the grid.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgridpattern-getitem", "id": "f1782:c50:m3"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationinvokepattern", "id": "f1782:c51:m0"} {"signature": "def Invoke(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Invoke() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationInvokePattern::Invoke.\nInvoke the action of a control, such as a button click.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationinvokepattern-invoke", "id": "f1782:c51:m1"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationitemcontainerpattern", "id": "f1782:c52:m0"} {"signature": "def FindItemByProperty(control: '', propertyId: int, propertyValue) -> '':", "body": "ele = self.pattern.FindItemByProperty(control.Element, propertyId, propertyValue)return Control.CreateControlFromElement(ele)", "docstring": "Call IUIAutomationItemContainerPattern::FindItemByProperty.\ncontrol: `Control` or its subclass.\npropertyValue: COM VARIANT according to propertyId? todo.\npropertyId: int, a value in class `PropertyId`.\nReturn `Control` subclass, a control within a containing element, based on a specified property value.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationitemcontainerpattern-finditembyproperty", "id": "f1782:c52:m1"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationlegacyiaccessiblepattern", "id": "f1782:c53:m0"} {"signature": "@propertydef ChildId(self) -> int:", "body": "return self.pattern.CurrentChildId", "docstring": "Property ChildId.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentChildId.\nReturn int, the Microsoft Active Accessibility child identifier for the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentchildid", "id": "f1782:c53:m1"} {"signature": "@propertydef DefaultAction(self) -> str:", "body": "return self.pattern.CurrentDefaultAction", "docstring": "Property DefaultAction.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentDefaultAction.\nReturn str, the Microsoft Active Accessibility current default action for the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentdefaultaction", "id": "f1782:c53:m2"} {"signature": "@propertydef Description(self) -> str:", "body": "return self.pattern.CurrentDescription", "docstring": "Property Description.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentDescription.\nReturn str, the Microsoft Active Accessibility description of the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentdescription", "id": "f1782:c53:m3"} {"signature": "@propertydef Help(self) -> str:", "body": "return self.pattern.CurrentHelp", "docstring": "Property Help.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentHelp.\nReturn str, the Microsoft Active Accessibility help string for the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currenthelp", "id": "f1782:c53:m4"} {"signature": "@propertydef KeyboardShortcut(self) -> str:", "body": "return self.pattern.CurrentKeyboardShortcut", "docstring": "Property KeyboardShortcut.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentKeyboardShortcut.\nReturn str, the Microsoft Active Accessibility keyboard shortcut property for the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentkeyboardshortcut", "id": "f1782:c53:m5"} {"signature": "@propertydef Name(self) -> str:", "body": "return self.pattern.CurrentName or ''", "docstring": "Property Name.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentName.\nReturn str, the Microsoft Active Accessibility name property of the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentname", "id": "f1782:c53:m6"} {"signature": "@propertydef Role(self) -> int:", "body": "return self.pattern.CurrentRole", "docstring": "Property Role.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentRole.\nReturn int, a value in calss `AccessibleRole`, the Microsoft Active Accessibility role identifier.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentrole", "id": "f1782:c53:m7"} {"signature": "@propertydef State(self) -> int:", "body": "return self.pattern.CurrentState", "docstring": "Property State.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentState.\nReturn int, a value in calss `AccessibleState`, the Microsoft Active Accessibility state identifier.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentstate", "id": "f1782:c53:m8"} {"signature": "@propertydef Value(self) -> str:", "body": "return self.pattern.CurrentValue", "docstring": "Property Value.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentValue.\nReturn str, the Microsoft Active Accessibility value property.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentvalue", "id": "f1782:c53:m9"} {"signature": "def DoDefaultAction(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.DoDefaultAction() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationLegacyIAccessiblePattern::DoDefaultAction.\nPerform the Microsoft Active Accessibility default action for the element.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-dodefaultaction", "id": "f1782:c53:m10"} {"signature": "def GetSelection(self) -> list:", "body": "eleArray = self.pattern.GetCurrentSelection()if eleArray:controls = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)con = Control.CreateControlFromElement(element=ele)if con:controls.append(con)return controlsreturn []", "docstring": "Call IUIAutomationLegacyIAccessiblePattern::GetCurrentSelection.\nReturn list, a list of `Control` subclasses,\n the Microsoft Active Accessibility property that identifies the selected children of this element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-getcurrentselection", "id": "f1782:c53:m11"} {"signature": "def GetIAccessible(self):", "body": "return self.pattern.GetIAccessible()", "docstring": "Call IUIAutomationLegacyIAccessiblePattern::GetIAccessible, todo.\nReturn an IAccessible object that corresponds to the Microsoft UI Automation element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-getiaccessible\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/oleacc/nn-oleacc-iaccessible", "id": "f1782:c53:m12"} {"signature": "def Select(self, flagsSelect: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Select(flagsSelect) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationLegacyIAccessiblePattern::Select.\nPerform a Microsoft Active Accessibility selection.\nflagsSelect: int, a value in `AccessibleSelection`.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-select", "id": "f1782:c53:m13"} {"signature": "def SetValue(self, value: str, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.SetValue(value) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationLegacyIAccessiblePattern::SetValue.\nSet the Microsoft Active Accessibility value property for the element.\nvalue: str.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-setvalue", "id": "f1782:c53:m14"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationmultipleviewpattern", "id": "f1782:c54:m0"} {"signature": "@propertydef CurrentView(self) -> int:", "body": "return self.pattern.CurrentCurrentView", "docstring": "Property CurrentView.\nCall IUIAutomationMultipleViewPattern::get_CurrentCurrentView.\nReturn int, the control-specific identifier of the current view of the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationmultipleviewpattern-get_currentcurrentview", "id": "f1782:c54:m1"} {"signature": "def GetSupportedViews(self) -> list:", "body": "return self.pattern.GetCurrentSupportedViews()", "docstring": "Call IUIAutomationMultipleViewPattern::GetCurrentSupportedViews, todo.\nReturn list, a list of int, control-specific view identifiers.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationmultipleviewpattern-getcurrentsupportedviews", "id": "f1782:c54:m2"} {"signature": "def GetViewName(self, view: int) -> str:", "body": "return self.pattern.GetViewName(view)", "docstring": "Call IUIAutomationMultipleViewPattern::GetViewName.\nview: int, the control-specific view identifier.\nReturn str, the name of a control-specific view.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationmultipleviewpattern-getviewname", "id": "f1782:c54:m3"} {"signature": "def SetView(self, view: int) -> bool:", "body": "return self.pattern.SetCurrentView(view) == S_OK", "docstring": "Call IUIAutomationMultipleViewPattern::SetCurrentView.\nSet the view of the control.\nview: int, the control-specific view identifier.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationmultipleviewpattern-getviewname", "id": "f1782:c54:m4"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationobjectmodelpattern", "id": "f1782:c55:m0"} {"signature": "def GetUnderlyingObjectModel(self) -> ctypes.POINTER(comtypes.IUnknown):", "body": "return self.pattern.GetUnderlyingObjectModel()", "docstring": "Call IUIAutomationObjectModelPattern::GetUnderlyingObjectModel, todo.\nReturn `ctypes.POINTER(comtypes.IUnknown)`, an interface used to access the underlying object model of the provider.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationobjectmodelpattern-getunderlyingobjectmodel", "id": "f1782:c55:m1"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationrangevaluepattern", "id": "f1782:c56:m0"} {"signature": "@propertydef IsReadOnly(self) -> bool:", "body": "return self.pattern.CurrentIsReadOnly", "docstring": "Property IsReadOnly.\nCall IUIAutomationRangeValuePattern::get_CurrentIsReadOnly.\nReturn bool, indicates whether the value of the element can be changed.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-get_currentisreadonly", "id": "f1782:c56:m1"} {"signature": "@propertydef LargeChange(self) -> float:", "body": "return self.pattern.CurrentLargeChange", "docstring": "Property LargeChange.\nCall IUIAutomationRangeValuePattern::get_CurrentLargeChange.\nReturn float, the value that is added to or subtracted from the value of the control\n when a large change is made, such as when the PAGE DOWN key is pressed.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-get_currentlargechange", "id": "f1782:c56:m2"} {"signature": "@propertydef Maximum(self) -> float:", "body": "return self.pattern.CurrentMaximum", "docstring": "Property Maximum.\nCall IUIAutomationRangeValuePattern::get_CurrentMaximum.\nReturn float, the maximum value of the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-get_currentmaximum", "id": "f1782:c56:m3"} {"signature": "@propertydef Minimum(self) -> float:", "body": "return self.pattern.CurrentMinimum", "docstring": "Property Minimum.\nCall IUIAutomationRangeValuePattern::get_CurrentMinimum.\nReturn float, the minimum value of the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-get_currentminimum", "id": "f1782:c56:m4"} {"signature": "@propertydef SmallChange(self) -> float:", "body": "return self.pattern.CurrentSmallChange", "docstring": "Property SmallChange.\nCall IUIAutomationRangeValuePattern::get_CurrentSmallChange.\nReturn float, the value that is added to or subtracted from the value of the control\n when a small change is made, such as when an arrow key is pressed.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-get_currentsmallchange", "id": "f1782:c56:m5"} {"signature": "@propertydef Value(self) -> float:", "body": "return self.pattern.CurrentValue", "docstring": "Property Value.\nCall IUIAutomationRangeValuePattern::get_CurrentValue.\nReturn float, the value of the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-get_currentvalue", "id": "f1782:c56:m6"} {"signature": "def SetValue(self, value: float, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.SetValue(value) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationRangeValuePattern::SetValue.\nSet the value of the control.\nvalue: int.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-setvalue", "id": "f1782:c56:m7"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationscrollitempattern", "id": "f1782:c57:m0"} {"signature": "def ScrollIntoView(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.ScrollIntoView() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationScrollItemPattern::ScrollIntoView.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollitempattern-scrollintoview", "id": "f1782:c57:m1"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationscrollpattern", "id": "f1782:c58:m0"} {"signature": "@propertydef HorizontallyScrollable(self) -> bool:", "body": "return bool(self.pattern.CurrentHorizontallyScrollable)", "docstring": "Property HorizontallyScrollable.\nCall IUIAutomationScrollPattern::get_CurrentHorizontallyScrollable.\nReturn bool, indicates whether the element can scroll horizontally.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currenthorizontallyscrollable", "id": "f1782:c58:m1"} {"signature": "@propertydef HorizontalScrollPercent(self) -> float:", "body": "return self.pattern.CurrentHorizontalScrollPercent", "docstring": "Property HorizontalScrollPercent.\nCall IUIAutomationScrollPattern::get_CurrentHorizontalScrollPercent.\nReturn float, the horizontal scroll position.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currenthorizontalscrollpercent", "id": "f1782:c58:m2"} {"signature": "@propertydef HorizontalViewSize(self) -> float:", "body": "return self.pattern.CurrentHorizontalViewSize", "docstring": "Property HorizontalViewSize.\nCall IUIAutomationScrollPattern::get_CurrentHorizontalViewSize.\nReturn float, the horizontal size of the viewable region of a scrollable element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currenthorizontalviewsize", "id": "f1782:c58:m3"} {"signature": "@propertydef VerticallyScrollable(self) -> bool:", "body": "return bool(self.pattern.CurrentVerticallyScrollable)", "docstring": "Property VerticallyScrollable.\nCall IUIAutomationScrollPattern::get_CurrentVerticallyScrollable.\nReturn bool, indicates whether the element can scroll vertically.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currentverticallyscrollable", "id": "f1782:c58:m4"} {"signature": "@propertydef VerticalScrollPercent(self) -> float:", "body": "return self.pattern.CurrentVerticalScrollPercent", "docstring": "Property VerticalScrollPercent.\nCall IUIAutomationScrollPattern::get_CurrentVerticalScrollPercent.\nReturn float, the vertical scroll position.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currentverticalscrollpercent", "id": "f1782:c58:m5"} {"signature": "@propertydef VerticalViewSize(self) -> float:", "body": "return self.pattern.CurrentVerticalViewSize", "docstring": "Property VerticalViewSize.\nCall IUIAutomationScrollPattern::get_CurrentVerticalViewSize.\nReturn float, the vertical size of the viewable region of a scrollable element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currentverticalviewsize", "id": "f1782:c58:m6"} {"signature": "def Scroll(self, horizontalAmount: int, verticalAmount: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Scroll(horizontalAmount, verticalAmount) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationScrollPattern::Scroll.\nScroll the visible region of the content area horizontally and vertically.\nhorizontalAmount: int, a value in ScrollAmount.\nverticalAmount: int, a value in ScrollAmount.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-scroll", "id": "f1782:c58:m7"} {"signature": "def SetScrollPercent(self, horizontalPercent: float, verticalPercent: float, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.SetScrollPercent(horizontalPercent, verticalPercent) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationScrollPattern::SetScrollPercent.\nSet the horizontal and vertical scroll positions as a percentage of the total content area within the UI Automation element.\nhorizontalPercent: float or int, a value in [0, 100] or ScrollPattern.NoScrollValue(-1) if no scroll.\nverticalPercent: float or int, a value in [0, 100] or ScrollPattern.NoScrollValue(-1) if no scroll.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-setscrollpercent", "id": "f1782:c58:m8"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationselectionitempattern", "id": "f1782:c59:m0"} {"signature": "def AddToSelection(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.AddToSelection() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationSelectionItemPattern::AddToSelection.\nAdd the current element to the collection of selected items.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationselectionitempattern-addtoselection", "id": "f1782:c59:m1"} {"signature": "@propertydef IsSelected(self) -> bool:", "body": "return bool(self.pattern.CurrentIsSelected)", "docstring": "Property IsSelected.\nCall IUIAutomationScrollPattern::get_CurrentIsSelected.\nReturn bool, indicates whether this item is selected.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currentisselected", "id": "f1782:c59:m2"} {"signature": "@propertydef SelectionContainer(self) -> '':", "body": "ele = self.pattern.CurrentSelectionContainerreturn Control.CreateControlFromElement(ele)", "docstring": "Property SelectionContainer.\nCall IUIAutomationScrollPattern::get_CurrentSelectionContainer.\nReturn `Control` subclass, the element that supports IUIAutomationSelectionPattern and acts as the container for this item.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currentselectioncontainer", "id": "f1782:c59:m3"} {"signature": "def RemoveFromSelection(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.RemoveFromSelection() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationSelectionItemPattern::RemoveFromSelection.\nRemove this element from the selection.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationselectionitempattern-removefromselection", "id": "f1782:c59:m4"} {"signature": "def Select(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Select() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationSelectionItemPattern::Select.\nClear any selected items and then select the current element.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationselectionitempattern-select", "id": "f1782:c59:m5"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationselectionpattern", "id": "f1782:c60:m0"} {"signature": "@propertydef CanSelectMultiple(self) -> bool:", "body": "return bool(self.pattern.CurrentCanSelectMultiple)", "docstring": "Property CanSelectMultiple.\nCall IUIAutomationSelectionPattern::get_CurrentCanSelectMultiple.\nReturn bool, indicates whether more than one item in the container can be selected at one time.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationselectionpattern-get_currentcanselectmultiple", "id": "f1782:c60:m1"} {"signature": "@propertydef IsSelectionRequired(self) -> bool:", "body": "return bool(self.pattern.CurrentIsSelectionRequired)", "docstring": "Property IsSelectionRequired.\nCall IUIAutomationSelectionPattern::get_CurrentIsSelectionRequired.\nReturn bool, indicates whether at least one item must be selected at all times.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationselectionpattern-get_currentisselectionrequired", "id": "f1782:c60:m2"} {"signature": "def GetSelection(self) -> list:", "body": "eleArray = self.pattern.GetCurrentSelection()if eleArray:controls = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)con = Control.CreateControlFromElement(element=ele)if con:controls.append(con)return controlsreturn []", "docstring": "Call IUIAutomationSelectionPattern::GetCurrentSelection.\nReturn list, a list of `Control` subclasses, the selected elements in the container..\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationselectionpattern-getcurrentselection", "id": "f1782:c60:m3"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationspreadsheetitempattern", "id": "f1782:c61:m0"} {"signature": "@propertydef Formula(self) -> str:", "body": "return self.pattern.CurrentFormula", "docstring": "Property Formula.\nCall IUIAutomationSpreadsheetItemPattern::get_CurrentFormula.\nReturn str, the formula for this cell.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationspreadsheetitempattern-get_currentformula", "id": "f1782:c61:m1"} {"signature": "def GetAnnotationObjects(self) -> list:", "body": "eleArray = self.pattern.GetCurrentAnnotationObjects()if eleArray:controls = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)con = Control.CreateControlFromElement(element=ele)if con:controls.append(con)return controlsreturn []", "docstring": "Call IUIAutomationSelectionPattern::GetCurrentAnnotationObjects.\nReturn list, a list of `Control` subclasses representing the annotations associated with this spreadsheet cell.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationspreadsheetitempattern-getcurrentannotationobjects", "id": "f1782:c61:m2"} {"signature": "def GetAnnotationTypes(self) -> list:", "body": "return self.pattern.GetCurrentAnnotationTypes()", "docstring": "Call IUIAutomationSelectionPattern::GetCurrentAnnotationTypes.\nReturn list, a list of int values in class `AnnotationType`,\n indicating the types of annotations that are associated with this spreadsheet cell.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationselectionpattern-getcurrentannotationtypes", "id": "f1782:c61:m3"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationspreadsheetpattern", "id": "f1782:c62:m0"} {"signature": "def GetItemByName(self, name: str) -> '':", "body": "ele = self.pattern.GetItemByName(name)return Control.CreateControlFromElement(element=ele)", "docstring": "Call IUIAutomationSpreadsheetPattern::GetItemByName.\nname: str.\nReturn `Control` subclass or None, represents the spreadsheet cell that has the specified name..\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationspreadsheetpattern-getitembyname", "id": "f1782:c62:m1"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationstylespattern", "id": "f1782:c63:m0"} {"signature": "@propertydef ExtendedProperties(self) -> str:", "body": "return self.pattern.CurrentExtendedProperties", "docstring": "Property ExtendedProperties.\nCall IUIAutomationStylesPattern::get_CurrentExtendedProperties.\nReturn str, a localized string that contains the list of extended properties for an element in a document.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationstylespattern-get_currentextendedproperties", "id": "f1782:c63:m1"} {"signature": "@propertydef FillColor(self) -> int:", "body": "return self.pattern.CurrentFillColor", "docstring": "Property FillColor.\nCall IUIAutomationStylesPattern::get_CurrentFillColor.\nReturn int, the fill color of an element in a document.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationstylespattern-get_currentfillcolor", "id": "f1782:c63:m2"} {"signature": "@propertydef FillPatternColor(self) -> int:", "body": "return self.pattern.CurrentFillPatternColor", "docstring": "Property FillPatternColor.\nCall IUIAutomationStylesPattern::get_CurrentFillPatternColor.\nReturn int, the color of the pattern used to fill an element in a document.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationstylespattern-get_currentfillpatterncolor", "id": "f1782:c63:m3"} {"signature": "@propertydef Shape(self) -> str:", "body": "return self.pattern.CurrentShape", "docstring": "Property Shape.\nCall IUIAutomationStylesPattern::get_CurrentShape.\nReturn str, the shape of an element in a document.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationstylespattern-get_currentshape", "id": "f1782:c63:m4"} {"signature": "@propertydef StyleId(self) -> int:", "body": "return self.pattern.CurrentStyleId", "docstring": "Property StyleId.\nCall IUIAutomationStylesPattern::get_CurrentStyleId.\nReturn int, a value in class `StyleId`, the visual style associated with an element in a document.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationstylespattern-get_currentstyleid", "id": "f1782:c63:m5"} {"signature": "@propertydef StyleName(self) -> str:", "body": "return self.pattern.CurrentStyleName", "docstring": "Property StyleName.\nCall IUIAutomationStylesPattern::get_CurrentStyleName.\nReturn str, the name of the visual style associated with an element in a document.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationstylespattern-get_currentstylename", "id": "f1782:c63:m6"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationsynchronizedinputpattern", "id": "f1782:c64:m0"} {"signature": "def Cancel(self) -> bool:", "body": "return self.pattern.Cancel() == S_OK", "docstring": "Call IUIAutomationSynchronizedInputPattern::Cancel.\nCause the Microsoft UI Automation provider to stop listening for mouse or keyboard input.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationsynchronizedinputpattern-cancel", "id": "f1782:c64:m1"} {"signature": "def StartListening(self) -> bool:", "body": "return self.pattern.StartListening() == S_OK", "docstring": "Call IUIAutomationSynchronizedInputPattern::StartListening.\nCause the Microsoft UI Automation provider to start listening for mouse or keyboard input.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationsynchronizedinputpattern-startlistening", "id": "f1782:c64:m2"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtableitempattern", "id": "f1782:c65:m0"} {"signature": "def GetColumnHeaderItems(self) -> list:", "body": "eleArray = self.pattern.GetCurrentColumnHeaderItems()if eleArray:controls = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)con = Control.CreateControlFromElement(element=ele)if con:controls.append(con)return controlsreturn []", "docstring": "Call IUIAutomationTableItemPattern::GetCurrentColumnHeaderItems.\nReturn list, a list of `Control` subclasses, the column headers associated with a table item or cell.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtableitempattern-getcurrentcolumnheaderitems", "id": "f1782:c65:m1"} {"signature": "def GetRowHeaderItems(self) -> list:", "body": "eleArray = self.pattern.GetCurrentRowHeaderItems()if eleArray:controls = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)con = Control.CreateControlFromElement(element=ele)if con:controls.append(con)return controlsreturn []", "docstring": "Call IUIAutomationTableItemPattern::GetCurrentRowHeaderItems.\nReturn list, a list of `Control` subclasses, the row headers associated with a table item or cell.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtableitempattern-getcurrentrowheaderitems", "id": "f1782:c65:m2"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtablepattern", "id": "f1782:c66:m0"} {"signature": "@propertydef RowOrColumnMajor(self) -> int:", "body": "return self.pattern.CurrentRowOrColumnMajor", "docstring": "Property RowOrColumnMajor.\nCall IUIAutomationTablePattern::get_CurrentRowOrColumnMajor.\nReturn int, a value in class `RowOrColumnMajor`, the primary direction of traversal for the table.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtablepattern-get_currentroworcolumnmajor", "id": "f1782:c66:m1"} {"signature": "def GetColumnHeaders(self) -> list:", "body": "eleArray = self.pattern.GetCurrentColumnHeaders()if eleArray:controls = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)con = Control.CreateControlFromElement(element=ele)if con:controls.append(con)return controlsreturn []", "docstring": "Call IUIAutomationTablePattern::GetCurrentColumnHeaders.\nReturn list, a list of `Control` subclasses, representing all the column headers in a table..\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtablepattern-getcurrentcolumnheaders", "id": "f1782:c66:m2"} {"signature": "def GetRowHeaders(self) -> list:", "body": "eleArray = self.pattern.GetCurrentRowHeaders()if eleArray:controls = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)con = Control.CreateControlFromElement(element=ele)if con:controls.append(con)return controlsreturn []", "docstring": "Call IUIAutomationTablePattern::GetCurrentRowHeaders.\nReturn list, a list of `Control` subclasses, representing all the row headers in a table.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtablepattern-getcurrentrowheaders", "id": "f1782:c66:m3"} {"signature": "def __init__(self, textRange=None):", "body": "self.textRange = textRange", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtextrange", "id": "f1782:c67:m0"} {"signature": "def AddToSelection(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.textRange.AddToSelection() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTextRange::AddToSelection.\nAdd the text range to the collection of selected text ranges in a control that supports multiple, disjoint spans of selected text.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-addtoselection", "id": "f1782:c67:m1"} {"signature": "def Clone(self) -> '':", "body": "return TextRange(textRange=self.textRange.Clone())", "docstring": "Call IUIAutomationTextRange::Clone.\nreturn `TextRange`, identical to the original and inheriting all properties of the original.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-clone", "id": "f1782:c67:m2"} {"signature": "def Compare(self, textRange: '') -> bool:", "body": "return bool(self.textRange.Compare(textRange.textRange))", "docstring": "Call IUIAutomationTextRange::Compare.\ntextRange: `TextRange`.\nReturn bool, specifies whether this text range has the same endpoints as another text range.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-compare", "id": "f1782:c67:m3"} {"signature": "def CompareEndpoints(self, srcEndPoint: int, textRange: '', targetEndPoint: int) -> int:", "body": "return self.textRange.CompareEndpoints(srcEndPoint, textRange, targetEndPoint)", "docstring": "Call IUIAutomationTextRange::CompareEndpoints.\nsrcEndPoint: int, a value in class `TextPatternRangeEndpoint`.\ntextRange: `TextRange`.\ntargetEndPoint: int, a value in class `TextPatternRangeEndpoint`.\nReturn int, a negative value if the caller's endpoint occurs earlier in the text than the target endpoint;\n 0 if the caller's endpoint is at the same location as the target endpoint;\n or a positive value if the caller's endpoint occurs later in the text than the target endpoint.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-compareendpoints", "id": "f1782:c67:m4"} {"signature": "def ExpandToEnclosingUnit(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.textRange.ExpandToEnclosingUnit() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTextRange::ExpandToEnclosingUnit.\nNormalize the text range by the specified text unit.\n The range is expanded if it is smaller than the specified unit,\n or shortened if it is longer than the specified unit.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-expandtoenclosingunit", "id": "f1782:c67:m5"} {"signature": "def FindAttribute(self, textAttributeId: int, val, backward: bool) -> '':", "body": "textRange = self.textRange.FindAttribute(textAttributeId, val, int(backward))if textRange:return TextRange(textRange=textRange)", "docstring": "Call IUIAutomationTextRange::FindAttribute.\ntextAttributeID: int, a value in class `TextAttributeId`.\nval: COM VARIANT according to textAttributeId? todo.\nbackward: bool, True if the last occurring text range should be returned instead of the first; otherwise False.\nreturn `TextRange` or None, a text range subset that has the specified text attribute value.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-findattribute", "id": "f1782:c67:m6"} {"signature": "def FindText(self, text: str, backward: bool, ignoreCase: bool) -> '':", "body": "textRange = self.textRange.FindText(text, int(backward), int(ignoreCase))if textRange:return TextRange(textRange=textRange)", "docstring": "Call IUIAutomationTextRange::FindText.\ntext: str,\nbackward: bool, True if the last occurring text range should be returned instead of the first; otherwise False.\nignoreCase: bool, True if case should be ignored; otherwise False.\nreturn `TextRange` or None, a text range subset that contains the specified text.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-findtext", "id": "f1782:c67:m7"} {"signature": "def GetAttributeValue(self, textAttributeId: int) -> ctypes.POINTER(comtypes.IUnknown):", "body": "return self.textRange.GetAttributeValue(textAttributeId)", "docstring": "Call IUIAutomationTextRange::GetAttributeValue.\ntextAttributeId: int, a value in class `TextAttributeId`.\nReturn `ctypes.POINTER(comtypes.IUnknown)` or None, the value of the specified text attribute across the entire text range, todo.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-getattributevalue", "id": "f1782:c67:m8"} {"signature": "def GetBoundingRectangles(self) -> list:", "body": "floats = self.textRange.GetBoundingRectangles()rects = []for i in range(len(floats) // ):rect = Rect(int(floats[i * ]), int(floats[i * + ]),int(floats[i * ]) + int(floats[i * + ]), int(floats[i * + ]) + int(floats[i * + ]))rects.append(rect)return rects", "docstring": "Call IUIAutomationTextRange::GetBoundingRectangles.\ntextAttributeId: int, a value in class `TextAttributeId`.\nReturn list, a list of `Rect`.\n bounding rectangles for each fully or partially visible line of text in a text range..\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-getboundingrectangles\n\nfor rect in textRange.GetBoundingRectangles():\n print(rect.left, rect.top, rect.right, rect.bottom, rect.width(), rect.height(), rect.xcenter(), rect.ycenter())", "id": "f1782:c67:m9"} {"signature": "def GetChildren(self) -> list:", "body": "eleArray = self.textRange.GetChildren()if eleArray:controls = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)con = Control.CreateControlFromElement(element=ele)if con:controls.append(con)return controlsreturn []", "docstring": "Call IUIAutomationTextRange::GetChildren.\ntextAttributeId: int, a value in class `TextAttributeId`.\nReturn list, a list of `Control` subclasses, embedded objects that fall within the text range..\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-getchildren", "id": "f1782:c67:m10"} {"signature": "def GetEnclosingControl(self) -> '':", "body": "return Control.CreateControlFromElement(self.textRange.GetEnclosingElement())", "docstring": "Call IUIAutomationTextRange::GetEnclosingElement.\nReturn `Control` subclass, the innermost UI Automation element that encloses the text range.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-getenclosingelement", "id": "f1782:c67:m11"} {"signature": "def GetText(self, maxLength: int = -) -> str:", "body": "return self.textRange.GetText(maxLength)", "docstring": "Call IUIAutomationTextRange::GetText.\nmaxLength: int, the maximum length of the string to return, or -1 if no limit is required.\nReturn str, the plain text of the text range.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-gettext", "id": "f1782:c67:m12"} {"signature": "def Move(self, unit: int, count: int, waitTime: float = OPERATION_WAIT_TIME) -> int:", "body": "ret = self.textRange.Move(unit, count)time.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTextRange::Move.\nMove the text range forward or backward by the specified number of text units.\nunit: int, a value in class `TextUnit`.\ncount: int, the number of text units to move.\n A positive value moves the text range forward.\n A negative value moves the text range backward. Zero has no effect.\nwaitTime: float.\nReturn: int, the number of text units actually moved.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-move", "id": "f1782:c67:m13"} {"signature": "def MoveEndpointByRange(self, srcEndPoint: int, textRange: '', targetEndPoint: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.textRange.MoveEndpointByRange(srcEndPoint, textRange.textRange, targetEndPoint) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTextRange::MoveEndpointByRange.\nMove one endpoint of the current text range to the specified endpoint of a second text range.\nsrcEndPoint: int, a value in class `TextPatternRangeEndpoint`.\ntextRange: `TextRange`.\ntargetEndPoint: int, a value in class `TextPatternRangeEndpoint`.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-moveendpointbyrange", "id": "f1782:c67:m14"} {"signature": "def MoveEndpointByUnit(self, endPoint: int, unit: int, count: int, waitTime: float = OPERATION_WAIT_TIME) -> int:", "body": "ret = self.textRange.MoveEndpointByUnit(endPoint, unit, count)time.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTextRange::MoveEndpointByUnit.\nMove one endpoint of the text range the specified number of text units within the document range.\nendPoint: int, a value in class `TextPatternRangeEndpoint`.\nunit: int, a value in class `TextUnit`.\ncount: int, the number of units to move.\n A positive count moves the endpoint forward.\n A negative count moves backward.\n A count of 0 has no effect.\nwaitTime: float.\nReturn int, the count of units actually moved.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-moveendpointbyunit", "id": "f1782:c67:m15"} {"signature": "def RemoveFromSelection(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.textRange.RemoveFromSelection() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTextRange::RemoveFromSelection.\nRemove the text range from an existing collection of selected text in a text container that supports multiple, disjoint selections.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-removefromselection", "id": "f1782:c67:m16"} {"signature": "def ScrollIntoView(self, alignTop: bool = True, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.textRange.ScrollIntoView(int(alignTop)) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTextRange::ScrollIntoView.\nCause the text control to scroll until the text range is visible in the viewport.\nalignTop: bool, True if the text control should be scrolled so that the text range is flush with the top of the viewport;\n False if it should be flush with the bottom of the viewport.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-scrollintoview", "id": "f1782:c67:m17"} {"signature": "def Select(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.textRange.Select() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTextRange::Select.\nSelect the span of text that corresponds to this text range, and remove any previous selection.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-select", "id": "f1782:c67:m18"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtextchildpattern", "id": "f1782:c68:m0"} {"signature": "@propertydef TextContainer(self) -> '':", "body": "return Control.CreateControlFromElement(self.pattern.TextContainer)", "docstring": "Property TextContainer.\nCall IUIAutomationSelectionContainer::get_TextContainer.\nReturn `Control` subclass, the nearest ancestor element that supports the Text control pattern.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextchildpattern-get_textcontainer", "id": "f1782:c68:m1"} {"signature": "@propertydef TextRange(self) -> TextRange:", "body": "return TextRange(self.pattern.TextRange)", "docstring": "Property TextRange.\nCall IUIAutomationSelectionContainer::get_TextRange.\nReturn `TextRange`, a text range that encloses this child element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextchildpattern-get_textrange", "id": "f1782:c68:m2"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtexteditpattern", "id": "f1782:c69:m0"} {"signature": "def GetActiveComposition(self) -> TextRange:", "body": "textRange = self.pattern.GetActiveComposition()if textRange:return TextRange(textRange=textRange)", "docstring": "Call IUIAutomationTextEditPattern::GetActiveComposition.\nReturn `TextRange` or None, the active composition.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtexteditpattern-getactivecomposition", "id": "f1782:c69:m1"} {"signature": "def GetConversionTarget(self) -> TextRange:", "body": "textRange = self.pattern.GetConversionTarget()if textRange:return TextRange(textRange=textRange)", "docstring": "Call IUIAutomationTextEditPattern::GetConversionTarget.\nReturn `TextRange` or None, the current conversion target range..\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtexteditpattern-getconversiontarget", "id": "f1782:c69:m2"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtextpattern", "id": "f1782:c70:m0"} {"signature": "@propertydef DocumentRange(self) -> TextRange:", "body": "return TextRange(self.pattern.DocumentRange)", "docstring": "Property DocumentRange.\nCall IUIAutomationTextPattern::get_DocumentRange.\nReturn `TextRange`, a text range that encloses the main text of a document.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-get_documentrange", "id": "f1782:c70:m1"} {"signature": "@propertydef SupportedTextSelection(self) -> bool:", "body": "return bool(self.pattern.SupportedTextSelection)", "docstring": "Property SupportedTextSelection.\nCall IUIAutomationTextPattern::get_SupportedTextSelection.\nReturn bool, specifies the type of text selection that is supported by the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-get_supportedtextselection", "id": "f1782:c70:m2"} {"signature": "def GetSelection(self) -> list:", "body": "eleArray = self.pattern.GetSelection()if eleArray:textRanges = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)textRanges.append(TextRange(textRange=ele))return textRangesreturn []", "docstring": "Call IUIAutomationTextPattern::GetSelection.\nReturn list, a list of `TextRange`, represents the currently selected text in a text-based control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-getselection", "id": "f1782:c70:m3"} {"signature": "def GetVisibleRanges(self) -> list:", "body": "eleArray = self.pattern.GetVisibleRanges()if eleArray:textRanges = []for i in range(eleArray.Length):ele = eleArray.GetElement(i)textRanges.append(TextRange(textRange=ele))return textRangesreturn []", "docstring": "Call IUIAutomationTextPattern::GetVisibleRanges.\nReturn list, a list of `TextRange`, disjoint text ranges from a text-based control\n where each text range represents a contiguous span of visible text.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-getvisibleranges", "id": "f1782:c70:m4"} {"signature": "def RangeFromChild(self, child) -> TextRange:", "body": "textRange = self.pattern.RangeFromChild(Control.Element)if textRange:return TextRange(textRange=textRange)", "docstring": "Call IUIAutomationTextPattern::RangeFromChild.\nchild: `Control` or its subclass.\nReturn `TextRange` or None, a text range enclosing a child element such as an image,\n hyperlink, Microsoft Excel spreadsheet, or other embedded object.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-rangefromchild", "id": "f1782:c70:m5"} {"signature": "def RangeFromPoint(self, x: int, y: int) -> TextRange:", "body": "textRange = self.pattern.RangeFromPoint(ctypes.wintypes.POINT(x, y))if textRange:return TextRange(textRange=textRange)", "docstring": "Call IUIAutomationTextPattern::RangeFromPoint.\nchild: `Control` or its subclass.\nReturn `TextRange` or None, the degenerate (empty) text range nearest to the specified screen coordinates.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-rangefrompoint", "id": "f1782:c70:m6"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtextpattern2", "id": "f1782:c71:m0"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtogglepattern", "id": "f1782:c72:m0"} {"signature": "@propertydef ToggleState(self) -> int:", "body": "return self.pattern.CurrentToggleState", "docstring": "Property ToggleState.\nCall IUIAutomationTogglePattern::get_CurrentToggleState.\nReturn int, a value in class `ToggleState`, the state of the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtogglepattern-get_currenttogglestate", "id": "f1782:c72:m1"} {"signature": "def Toggle(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Toggle() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTogglePattern::Toggle.\nCycle through the toggle states of the control.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtogglepattern-toggle", "id": "f1782:c72:m2"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtransformpattern", "id": "f1782:c73:m0"} {"signature": "@propertydef CanMove(self) -> bool:", "body": "return bool(self.pattern.CurrentCanMove)", "docstring": "Property CanMove.\nCall IUIAutomationTransformPattern::get_CurrentCanMove.\nReturn bool, indicates whether the element can be moved.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern-get_currentcanmove", "id": "f1782:c73:m1"} {"signature": "@propertydef CanResize(self) -> bool:", "body": "return bool(self.pattern.CurrentCanResize)", "docstring": "Property CanResize.\nCall IUIAutomationTransformPattern::get_CurrentCanResize.\nReturn bool, indicates whether the element can be resized.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern-get_currentcanresize", "id": "f1782:c73:m2"} {"signature": "@propertydef CanRotate(self) -> bool:", "body": "return bool(self.pattern.CurrentCanRotate)", "docstring": "Property CanRotate.\nCall IUIAutomationTransformPattern::get_CurrentCanRotate.\nReturn bool, indicates whether the element can be rotated.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern-get_currentcanrotate", "id": "f1782:c73:m3"} {"signature": "def Move(self, x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Move(x, y) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTransformPattern::Move.\nMove the UI Automation element.\nx: int.\ny: int.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern-move", "id": "f1782:c73:m4"} {"signature": "def Resize(self, width: int, height: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Resize(width, height) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTransformPattern::Resize.\nResize the UI Automation element.\nwidth: int.\nheight: int.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern-resize", "id": "f1782:c73:m5"} {"signature": "def Rotate(self, degrees: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Rotate(degrees) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTransformPattern::Rotate.\nRotates the UI Automation element.\ndegrees: int.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern-rotate", "id": "f1782:c73:m6"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtransformpattern2", "id": "f1782:c74:m0"} {"signature": "@propertydef CanZoom(self) -> bool:", "body": "return bool(self.pattern.CurrentCanZoom)", "docstring": "Property CanZoom.\nCall IUIAutomationTransformPattern2::get_CurrentCanZoom.\nReturn bool, indicates whether the control supports zooming of its viewport.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-get_CurrentCanZoom", "id": "f1782:c74:m1"} {"signature": "@propertydef ZoomLevel(self) -> float:", "body": "return self.pattern.CurrentZoomLevel", "docstring": "Property ZoomLevel.\nCall IUIAutomationTransformPattern2::get_CurrentZoomLevel.\nReturn float, the zoom level of the control's viewport.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-get_currentzoomlevel", "id": "f1782:c74:m2"} {"signature": "@propertydef ZoomMaximum(self) -> float:", "body": "return self.pattern.CurrentZoomMaximum", "docstring": "Property ZoomMaximum.\nCall IUIAutomationTransformPattern2::get_CurrentZoomMaximum.\nReturn float, the maximum zoom level of the control's viewport.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-get_currentzoommaximum", "id": "f1782:c74:m3"} {"signature": "@propertydef ZoomMinimum(self) -> float:", "body": "return self.pattern.CurrentZoomMinimum", "docstring": "Property ZoomMinimum.\nCall IUIAutomationTransformPattern2::get_CurrentZoomMinimum.\nReturn float, the minimum zoom level of the control's viewport.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-get_currentzoomminimum", "id": "f1782:c74:m4"} {"signature": "def Zoom(self, zoomLevel: float, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Zoom(zoomLevel) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTransformPattern2::Zoom.\nZoom the viewport of the control.\nzoomLevel: float for int.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-zoom", "id": "f1782:c74:m5"} {"signature": "def ZoomByUnit(self, zoomUnit: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.ZoomByUnit(zoomUnit) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTransformPattern2::ZoomByUnit.\nZoom the viewport of the control by the specified unit.\nzoomUnit: int, a value in class `ZoomUnit`.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-zoombyunit", "id": "f1782:c74:m6"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationvaluepattern", "id": "f1782:c75:m0"} {"signature": "@propertydef IsReadOnly(self) -> bool:", "body": "return self.pattern.CurrentIsReadOnly", "docstring": "Property IsReadOnly.\nCall IUIAutomationTransformPattern2::IUIAutomationValuePattern::get_CurrentIsReadOnly.\nReturn bool, indicates whether the value of the element is read-only.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationvaluepattern-get_currentisreadonly", "id": "f1782:c75:m1"} {"signature": "@propertydef Value(self) -> str:", "body": "return self.pattern.CurrentValue", "docstring": "Property Value.\nCall IUIAutomationTransformPattern2::IUIAutomationValuePattern::get_CurrentValue.\nReturn str, the value of the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationvaluepattern-get_currentvalue", "id": "f1782:c75:m2"} {"signature": "def SetValue(self, value: str, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.SetValue(value) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationTransformPattern2::IUIAutomationValuePattern::SetValue.\nSet the value of the element.\nvalue: str.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationvaluepattern-setvalue", "id": "f1782:c75:m3"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationvirtualizeditempattern", "id": "f1782:c76:m0"} {"signature": "def Realize(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Realize() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationVirtualizedItemPattern::Realize.\nCreate a full UI Automation element for a virtualized item.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationvirtualizeditempattern-realize", "id": "f1782:c76:m1"} {"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationwindowpattern", "id": "f1782:c77:m0"} {"signature": "def Close(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Close() == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationWindowPattern::Close.\nClose the window.\nwaitTime: float.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-close", "id": "f1782:c77:m1"} {"signature": "@propertydef CanMaximize(self) -> bool:", "body": "return bool(self.pattern.CurrentCanMaximize)", "docstring": "Property CanMaximize.\nCall IUIAutomationWindowPattern::get_CurrentCanMaximize.\nReturn bool, indicates whether the window can be maximized.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-get_currentcanmaximize", "id": "f1782:c77:m2"} {"signature": "@propertydef CanMinimize(self) -> bool:", "body": "return bool(self.pattern.CurrentCanMinimize)", "docstring": "Property CanMinimize.\nCall IUIAutomationWindowPattern::get_CurrentCanMinimize.\nReturn bool, indicates whether the window can be minimized.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-get_currentismodal", "id": "f1782:c77:m3"} {"signature": "@propertydef IsModal(self) -> bool:", "body": "return bool(self.pattern.CurrentIsModal)", "docstring": "Property IsModal.\nCall IUIAutomationWindowPattern::get_CurrentIsModal.\nReturn bool, indicates whether the window is modal.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-get_currentistopmost", "id": "f1782:c77:m4"} {"signature": "@propertydef IsTopmost(self) -> bool:", "body": "return bool(self.pattern.CurrentIsTopmost)", "docstring": "Property IsTopmost.\nCall IUIAutomationWindowPattern::get_CurrentIsTopmost.\nReturn bool, indicates whether the window is the topmost element in the z-order.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-get_currentistopmost", "id": "f1782:c77:m5"} {"signature": "@propertydef WindowInteractionState(self) -> int:", "body": "return self.pattern.CurrentWindowInteractionState", "docstring": "Property WindowInteractionState.\nCall IUIAutomationWindowPattern::get_CurrentWindowInteractionState.\nReturn int, a value in class `WindowInteractionState`,\n the current state of the window for the purposes of user interaction.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-get_currentwindowinteractionstate", "id": "f1782:c77:m6"} {"signature": "@propertydef WindowVisualState(self) -> int:", "body": "return self.pattern.CurrentWindowVisualState", "docstring": "Property WindowVisualState.\nCall IUIAutomationWindowPattern::get_CurrentWindowVisualState.\nReturn int, a value in class `WindowVisualState`,\n the visual state of the window; that is, whether it is in the normal, maximized, or minimized state.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-get_currentwindowvisualstate", "id": "f1782:c77:m7"} {"signature": "def SetWindowVisualState(self, state: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.SetWindowVisualState(state) == S_OKtime.sleep(waitTime)return ret", "docstring": "Call IUIAutomationWindowPattern::SetWindowVisualState.\nMinimize, maximize, or restore the window.\nstate: int, a value in class `WindowVisualState`.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-setwindowvisualstate", "id": "f1782:c77:m8"} {"signature": "def WaitForInputIdle(self, milliseconds: int) -> bool:", "body": "return self.pattern.WaitForInputIdle(milliseconds) == S_OK", "docstring": "Call IUIAutomationWindowPattern::WaitForInputIdle.\nCause the calling code to block for the specified time or\n until the associated process enters an idle state, whichever completes first.\nmilliseconds: int.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-waitforinputidle", "id": "f1782:c77:m9"} {"signature": "def __init__(self, searchFromControl: '' = None, searchDepth: int = , searchWaitTime: float = SEARCH_INTERVAL, foundIndex: int = , element=None, **searchProperties):", "body": "self._element = elementself._elementDirectAssign = True if element else Falseself.searchFromControl = searchFromControlself.searchDepth = searchProperties.get('', searchDepth)self.searchWaitTime = searchWaitTimeself.foundIndex = foundIndexself.searchProperties = searchPropertiesregName = searchProperties.get('', '')self.regexName = re.compile(regName) if regName else Noneself._supportedPatterns = {}", "docstring": "searchFromControl: `Control` or its subclass, if it is None, search from root control(Desktop).\nsearchDepth: int, max search depth from searchFromControl.\nfoundIndex: int, starts with 1, >= 1.\nsearchWaitTime: float, wait searchWaitTime before every search(interval between searchs).\nelement: `ctypes.POINTER(IUIAutomationElement)`, internal use.\nsearchProperties: defines how to search, the following keys can be used:\n ControlType: int, a value in class `ControlType`.\n ClassName: str.\n AutomationId: str.\n Name: str.\n SubName: str, a part str in Name.\n RegexName: str, supports regex using re.match.\n You can only use one of Name, SubName, RegexName in searchProperties.\n Depth: int, only search controls in relative depth from searchFromControl, ignore controls in depth(0~Depth-1),\n if set, searchDepth will be set to Depth too.\n Compare: Callable, custom compare function(control, depth)->bool.\n\n`Control` wraps IUIAutomationElement.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationelement", "id": "f1782:c78:m0"} {"signature": "@staticmethoddef CreateControlFromElement(element) -> '':", "body": "if element:controlType = element.CurrentControlTypeif controlType in ControlConstructors:return ControlConstructors[controlType](element=element)else:Logger.WriteLine(\"\".format(controlType), ConsoleColor.Red)", "docstring": "Create a concreate `Control` from a com type `IUIAutomationElement`.\nelement: `ctypes.POINTER(IUIAutomationElement)`.\nReturn a subclass of `Control`, an instance of the control's real type.", "id": "f1782:c78:m2"} {"signature": "@staticmethoddef CreateControlFromControl(control: '') -> '':", "body": "newControl = Control.CreateControlFromElement(control.Element)return newControl", "docstring": "Create a concreate `Control` from a control instance, copy it.\ncontrol: `Control` or its subclass.\nReturn a subclass of `Control`, an instance of the control's real type.\nFor example: if control's ControlType is EditControl, return an EditControl.", "id": "f1782:c78:m3"} {"signature": "def SetSearchFromControl(self, searchFromControl: '') -> None:", "body": "self.searchFromControl = searchFromControl", "docstring": "searchFromControl: `Control` or its subclass", "id": "f1782:c78:m4"} {"signature": "def AddSearchProperties(self, **searchProperties) -> None:", "body": "self.searchProperties.update(searchProperties)if '' in searchProperties:self.searchDepth = searchProperties['']if '' in searchProperties:regName = searchProperties['']self.regexName = re.compile(regName) if regName else None", "docstring": "Add search properties using `dict.update`.\nsearchProperties: dict, same as searchProperties in `Control.__init__`.", "id": "f1782:c78:m6"} {"signature": "def RemoveSearchProperties(self, **searchProperties) -> None:", "body": "for key in searchProperties:del self.searchProperties[key]if key == '':self.regexName = None", "docstring": "searchProperties: dict, same as searchProperties in `Control.__init__`.", "id": "f1782:c78:m7"} {"signature": "def GetColorfulSearchPropertiesStr(self, keyColor='', valueColor='') -> str:", "body": "strs = [''.format(keyColor if k in Control.ValidKeys else '', k, valueColor,ControlTypeNames[v] if k == '' else repr(v)) for k, v in self.searchProperties.items()]return '' + ''.join(strs) + ''", "docstring": "keyColor, valueColor: str, color name in class ConsoleColor", "id": "f1782:c78:m9"} {"signature": "@propertydef AcceleratorKey(self) -> str:", "body": "return self.Element.CurrentAcceleratorKey", "docstring": "Property AcceleratorKey.\nCall IUIAutomationElement::get_CurrentAcceleratorKey.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentacceleratorkey", "id": "f1782:c78:m10"} {"signature": "@propertydef AccessKey(self) -> str:", "body": "return self.Element.CurrentAccessKey", "docstring": "Property AccessKey.\nCall IUIAutomationElement::get_CurrentAccessKey.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentaccesskey", "id": "f1782:c78:m11"} {"signature": "@propertydef AriaProperties(self) -> str:", "body": "return self.Element.CurrentAriaProperties", "docstring": "Property AriaProperties.\nCall IUIAutomationElement::get_CurrentAriaProperties.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentariaproperties", "id": "f1782:c78:m12"} {"signature": "@propertydef AriaRole(self) -> str:", "body": "return self.Element.CurrentAriaRole", "docstring": "Property AriaRole.\nCall IUIAutomationElement::get_CurrentAriaRole.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentariarole", "id": "f1782:c78:m13"} {"signature": "@propertydef AutomationId(self) -> str:", "body": "return self.Element.CurrentAutomationId", "docstring": "Property AutomationId.\nCall IUIAutomationElement::get_CurrentAutomationId.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentautomationid", "id": "f1782:c78:m14"} {"signature": "@propertydef BoundingRectangle(self) -> Rect:", "body": "rect = self.Element.CurrentBoundingRectanglereturn Rect(rect.left, rect.top, rect.right, rect.bottom)", "docstring": "Property BoundingRectangle.\nCall IUIAutomationElement::get_CurrentBoundingRectangle.\nReturn `Rect`.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentboundingrectangle\n\nrect = control.BoundingRectangle\nprint(rect.left, rect.top, rect.right, rect.bottom, rect.width(), rect.height(), rect.xcenter(), rect.ycenter())", "id": "f1782:c78:m15"} {"signature": "@propertydef ClassName(self) -> str:", "body": "return self.Element.CurrentClassName", "docstring": "Property ClassName.\nCall IUIAutomationElement::get_CurrentClassName.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentclassname", "id": "f1782:c78:m16"} {"signature": "@propertydef ControlType(self) -> int:", "body": "return self.Element.CurrentControlType", "docstring": "Property ControlType.\nReturn int, a value in class `ControlType`.\nCall IUIAutomationElement::get_CurrentControlType.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentcontroltype", "id": "f1782:c78:m17"} {"signature": "@propertydef Culture(self) -> int:", "body": "return self.Element.CurrentCulture", "docstring": "Property Culture.\nCall IUIAutomationElement::get_CurrentCulture.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentculture", "id": "f1782:c78:m18"} {"signature": "@propertydef FrameworkId(self) -> str:", "body": "return self.Element.CurrentFrameworkId", "docstring": "Property FrameworkId.\nCall IUIAutomationElement::get_CurrentFrameworkId.\nReturn str, such as Win32, WPF...\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentframeworkid", "id": "f1782:c78:m19"} {"signature": "@propertydef HasKeyboardFocus(self) -> bool:", "body": "return bool(self.Element.CurrentHasKeyboardFocus)", "docstring": "Property HasKeyboardFocus.\nCall IUIAutomationElement::get_CurrentHasKeyboardFocus.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currenthaskeyboardfocus", "id": "f1782:c78:m20"} {"signature": "@propertydef HelpText(self) -> str:", "body": "return self.Element.CurrentHelpText", "docstring": "Property HelpText.\nCall IUIAutomationElement::get_CurrentHelpText.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currenthelptext", "id": "f1782:c78:m21"} {"signature": "@propertydef IsContentElement(self) -> bool:", "body": "return bool(self.Element.CurrentIsContentElement)", "docstring": "Property IsContentElement.\nCall IUIAutomationElement::get_CurrentIsContentElement.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentiscontentelement", "id": "f1782:c78:m22"} {"signature": "@propertydef IsControlElement(self) -> bool:", "body": "return bool(self.Element.CurrentIsControlElement)", "docstring": "Property IsControlElement.\nCall IUIAutomationElement::get_CurrentIsControlElement.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentiscontrolelement", "id": "f1782:c78:m23"} {"signature": "@propertydef IsDataValidForForm(self) -> bool:", "body": "return bool(self.Element.CurrentIsDataValidForForm)", "docstring": "Property IsDataValidForForm.\nCall IUIAutomationElement::get_CurrentIsDataValidForForm.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentisdatavalidforform", "id": "f1782:c78:m24"} {"signature": "@propertydef IsEnabled(self) -> bool:", "body": "return self.Element.CurrentIsEnabled", "docstring": "Property IsEnabled.\nCall IUIAutomationElement::get_CurrentIsEnabled.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentisenabled", "id": "f1782:c78:m25"} {"signature": "@propertydef IsKeyboardFocusable(self) -> bool:", "body": "return self.Element.CurrentIsKeyboardFocusable", "docstring": "Property IsKeyboardFocusable.\nCall IUIAutomationElement::get_CurrentIsKeyboardFocusable.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentiskeyboardfocusable", "id": "f1782:c78:m26"} {"signature": "@propertydef IsOffscreen(self) -> bool:", "body": "return self.Element.CurrentIsOffscreen", "docstring": "Property IsOffscreen.\nCall IUIAutomationElement::get_CurrentIsOffscreen.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentisoffscreen", "id": "f1782:c78:m27"} {"signature": "@propertydef IsPassword(self) -> bool:", "body": "return self.Element.CurrentIsPassword", "docstring": "Property IsPassword.\nCall IUIAutomationElement::get_CurrentIsPassword.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentispassword", "id": "f1782:c78:m28"} {"signature": "@propertydef IsRequiredForForm(self) -> bool:", "body": "return self.Element.CurrentIsRequiredForForm", "docstring": "Property IsRequiredForForm.\nCall IUIAutomationElement::get_CurrentIsRequiredForForm.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentisrequiredforform", "id": "f1782:c78:m29"} {"signature": "@propertydef ItemStatus(self) -> str:", "body": "return self.Element.CurrentItemStatus", "docstring": "Property ItemStatus.\nCall IUIAutomationElement::get_CurrentItemStatus.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentitemstatus", "id": "f1782:c78:m30"} {"signature": "@propertydef ItemType(self) -> str:", "body": "return self.Element.CurrentItemType", "docstring": "Property ItemType.\nCall IUIAutomationElement::get_CurrentItemType.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentitemtype", "id": "f1782:c78:m31"} {"signature": "@propertydef LocalizedControlType(self) -> str:", "body": "return self.Element.CurrentLocalizedControlType", "docstring": "Property LocalizedControlType.\nCall IUIAutomationElement::get_CurrentLocalizedControlType.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentlocalizedcontroltype", "id": "f1782:c78:m32"} {"signature": "@propertydef Name(self) -> str:", "body": "return self.Element.CurrentName or ''", "docstring": "Property Name.\nCall IUIAutomationElement::get_CurrentName.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentname", "id": "f1782:c78:m33"} {"signature": "@propertydef NativeWindowHandle(self) -> str:", "body": "handle = self.Element.CurrentNativeWindowHandlereturn if handle is None else handle", "docstring": "Property NativeWindowHandle.\nCall IUIAutomationElement::get_CurrentNativeWindowHandle.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentnativewindowhandle", "id": "f1782:c78:m34"} {"signature": "@propertydef Orientation(self) -> int:", "body": "return self.Element.CurrentOrientation", "docstring": "Property Orientation.\nReturn int, a value in class `OrientationType`.\nCall IUIAutomationElement::get_CurrentOrientation.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentorientation", "id": "f1782:c78:m35"} {"signature": "@propertydef ProcessId(self) -> int:", "body": "return self.Element.CurrentProcessId", "docstring": "Property ProcessId.\nCall IUIAutomationElement::get_CurrentProcessId.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentprocessid", "id": "f1782:c78:m36"} {"signature": "@propertydef ProviderDescription(self) -> str:", "body": "return self.Element.CurrentProviderDescription", "docstring": "Property ProviderDescription.\nCall IUIAutomationElement::get_CurrentProviderDescription.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentproviderdescription", "id": "f1782:c78:m37"} {"signature": "def GetClickablePoint(self) -> tuple:", "body": "point, gotClickable = self.Element.GetClickablePoint()return (point.x, point.y, bool(gotClickable))", "docstring": "Call IUIAutomationElement::GetClickablePoint.\nReturn tuple, (x: int, y: int, gotClickable: bool), like (20, 10, True)\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getclickablepoint", "id": "f1782:c78:m38"} {"signature": "def GetPattern(self, patternId: int):", "body": "try:pattern = self.Element.GetCurrentPattern(patternId)if pattern:subPattern = CreatePattern(patternId, pattern)self._supportedPatterns[patternId] = subPatternreturn subPatternexcept comtypes.COMError as ex:pass", "docstring": "Call IUIAutomationElement::GetCurrentPattern.\nGet a new pattern by pattern id if it supports the pattern.\npatternId: int, a value in class `PatternId`.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpattern", "id": "f1782:c78:m39"} {"signature": "def GetPatternAs(self, patternId: int, riid):", "body": "return self.Element.GetCurrentPatternAs(patternId, riid)", "docstring": "Call IUIAutomationElement::GetCurrentPatternAs.\nGet a new pattern by pattern id if it supports the pattern, todo.\npatternId: int, a value in class `PatternId`.\nriid: GUID.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpatternas", "id": "f1782:c78:m40"} {"signature": "def GetPropertyValue(self, propertyId: int) -> Any:", "body": "return self.Element.GetCurrentPropertyValue(propertyId)", "docstring": "Call IUIAutomationElement::GetCurrentPropertyValue.\npropertyId: int, a value in class `PropertyId`.\nReturn Any, corresponding type according to propertyId.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpropertyvalue", "id": "f1782:c78:m41"} {"signature": "def GetPropertyValueEx(self, propertyId: int, ignoreDefaultValue: int) -> Any:", "body": "return self.Element.GetCurrentPropertyValueEx(propertyId, ignoreDefaultValue)", "docstring": "Call IUIAutomationElement::GetCurrentPropertyValueEx.\npropertyId: int, a value in class `PropertyId`.\nignoreDefaultValue: int, 0 or 1.\nReturn Any, corresponding type according to propertyId.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpropertyvalueex", "id": "f1782:c78:m42"} {"signature": "def GetRuntimeId(self) -> list:", "body": "return self.Element.GetRuntimeId()", "docstring": "Call IUIAutomationElement::GetRuntimeId.\nReturn list, a list of int.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getruntimeid", "id": "f1782:c78:m43"} {"signature": "def SetFocus(self) -> bool:", "body": "return self.Element.SetFocus() == S_OK", "docstring": "Call IUIAutomationElement::SetFocus.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-setfocus", "id": "f1782:c78:m44"} {"signature": "@propertydef Element(self):", "body": "if not self._element:self.Refind(maxSearchSeconds=TIME_OUT_SECOND, searchIntervalSeconds=self.searchWaitTime)return self._element", "docstring": "Property Element.\nReturn `ctypes.POINTER(IUIAutomationElement)`.", "id": "f1782:c78:m45"} {"signature": "@propertydef ControlTypeName(self) -> str:", "body": "return ControlTypeNames[self.ControlType]", "docstring": "Property ControlTypeName.", "id": "f1782:c78:m46"} {"signature": "def GetCachedPattern(self, patternId: int, cache: bool):", "body": "if cache:pattern = self._supportedPatterns.get(patternId, None)if pattern:return patternelse:pattern = self.GetPattern(patternId)if pattern:self._supportedPatterns[patternId] = patternreturn patternelse:pattern = self.GetPattern(patternId)if pattern:self._supportedPatterns[patternId] = patternreturn pattern", "docstring": "Get a pattern by patternId.\npatternId: int, a value in class `PatternId`.\nReturn a pattern if it supports the pattern else None.\ncache: bool, if True, store the pattern for later use, if False, get a new pattern by `self.GetPattern`.", "id": "f1782:c78:m47"} {"signature": "def GetLegacyIAccessiblePattern(self) -> LegacyIAccessiblePattern:", "body": "return self.GetPattern(PatternId.LegacyIAccessiblePattern)", "docstring": "Return `LegacyIAccessiblePattern` if it supports the pattern else None.", "id": "f1782:c78:m48"} {"signature": "def GetAncestorControl(self, condition: Callable) -> '':", "body": "ancestor = selfdepth = while True:ancestor = ancestor.GetParentControl()depth -= if ancestor:if condition(ancestor, depth):return ancestorelse:break", "docstring": "Get a ancestor control that matches the condition.\ncondition: Callable, function (control: Control, depth: int)->bool,\n depth starts with -1 and decreses when search goes up.\nReturn `Control` subclass or None.", "id": "f1782:c78:m49"} {"signature": "def GetParentControl(self) -> '':", "body": "ele = _AutomationClient.instance().ViewWalker.GetParentElement(self.Element)return Control.CreateControlFromElement(ele)", "docstring": "Return `Control` subclass or None.", "id": "f1782:c78:m50"} {"signature": "def GetFirstChildControl(self) -> '':", "body": "ele = _AutomationClient.instance().ViewWalker.GetFirstChildElement(self.Element)return Control.CreateControlFromElement(ele)", "docstring": "Return `Control` subclass or None.", "id": "f1782:c78:m51"} {"signature": "def GetLastChildControl(self) -> '':", "body": "ele = _AutomationClient.instance().ViewWalker.GetLastChildElement(self.Element)return Control.CreateControlFromElement(ele)", "docstring": "Return `Control` subclass or None.", "id": "f1782:c78:m52"} {"signature": "def GetNextSiblingControl(self) -> '':", "body": "ele = _AutomationClient.instance().ViewWalker.GetNextSiblingElement(self.Element)return Control.CreateControlFromElement(ele)", "docstring": "Return `Control` subclass or None.", "id": "f1782:c78:m53"} {"signature": "def GetPreviousSiblingControl(self) -> '':", "body": "ele = _AutomationClient.instance().ViewWalker.GetPreviousSiblingElement(self.Element)return Control.CreateControlFromElement(ele)", "docstring": "Return `Control` subclass or None.", "id": "f1782:c78:m54"} {"signature": "def GetSiblingControl(self, condition: Callable, forward: bool = True) -> '':", "body": "if not forward:prev = selfwhile True:prev = prev.GetPreviousSiblingControl()if prev:if condition(prev):return prevelse:breaknext_ = selfwhile True:next_ = next_.GetNextSiblingControl()if next_:if condition(next_):return next_else:break", "docstring": "Find a SiblingControl by condition(control: Control)->bool.\nforward: bool, if True, only search next siblings, if False, search pervious siblings first, then search next siblings.\ncondition: Callable, function (control: Control)->bool.\nReturn `Control` subclass or None.", "id": "f1782:c78:m55"} {"signature": "def GetChildren(self) -> list:", "body": "children = []child = self.GetFirstChildControl()while child:children.append(child)child = child.GetNextSiblingControl()return children", "docstring": "Return list, a list of `Control` subclasses.", "id": "f1782:c78:m56"} {"signature": "def _CompareFunction(self, control: '', depth: int) -> bool:", "body": "for key, value in self.searchProperties.items():if '' == key:if value != control.ControlType:return Falseelif '' == key:if value != control.ClassName:return Falseelif '' == key:if value != control.AutomationId:return Falseelif '' == key:if value != control.Name:return Falseelif '' == key:if value not in control.Name:return Falseelif '' == key:if not self.regexName.match(control.Name):return Falseelif '' == key:if value != depth:return Falseelif '' == key:if not value(control, depth):return Falsereturn True", "docstring": "Define how to search.\ncontrol: `Control` or its subclass.\ndepth: int, tree depth from searchFromControl.\nReturn bool.", "id": "f1782:c78:m57"} {"signature": "def Exists(self, maxSearchSeconds: float = , searchIntervalSeconds: float = SEARCH_INTERVAL, printIfNotExist: bool = False) -> bool:", "body": "if self._element and self._elementDirectAssign:rootElement = GetRootControl().Elementif self._element == rootElement:return Trueelse:parentElement = _AutomationClient.instance().ViewWalker.GetParentElement(self._element)if parentElement:return Trueelse:return Falseif len(self.searchProperties) == :raise LookupError(\"\")self._element = NonestartTime = ProcessTime()prev = self.searchFromControlif prev and not prev._element and not prev.Exists(maxSearchSeconds, searchIntervalSeconds):if printIfNotExist or DEBUG_EXIST_DISAPPEAR:Logger.ColorfullyWriteLine(self.GetColorfulSearchPropertiesStr() + '')return FalsestartTime2 = ProcessTime()if DEBUG_SEARCH_TIME:startDateTime = datetime.datetime.now()while True:control = FindControl(self.searchFromControl, self._CompareFunction, self.searchDepth, False, self.foundIndex)if control:self._element = control.Elementcontrol._element = if DEBUG_SEARCH_TIME:Logger.ColorfullyWriteLine(''.format(self.GetColorfulSearchPropertiesStr(), control.traverseCount, ProcessTime() - startTime2,startDateTime.time(), datetime.datetime.now().time()))return Trueelse:remain = startTime + maxSearchSeconds - ProcessTime()if remain > :time.sleep(min(remain, searchIntervalSeconds))else:if printIfNotExist or DEBUG_EXIST_DISAPPEAR:Logger.ColorfullyWriteLine(self.GetColorfulSearchPropertiesStr() + '')return False", "docstring": "maxSearchSeconds: float\nsearchIntervalSeconds: float\nFind control every searchIntervalSeconds seconds in maxSearchSeconds seconds.\nReturn bool, True if find", "id": "f1782:c78:m58"} {"signature": "def Disappears(self, maxSearchSeconds: float = , searchIntervalSeconds: float = SEARCH_INTERVAL, printIfNotDisappear: bool = False) -> bool:", "body": "global DEBUG_EXIST_DISAPPEARstart = ProcessTime()while True:temp = DEBUG_EXIST_DISAPPEARDEBUG_EXIST_DISAPPEAR = False if not self.Exists(, , False):DEBUG_EXIST_DISAPPEAR = tempreturn TrueDEBUG_EXIST_DISAPPEAR = tempremain = start + maxSearchSeconds - ProcessTime()if remain > :time.sleep(min(remain, searchIntervalSeconds))else:if printIfNotDisappear or DEBUG_EXIST_DISAPPEAR:Logger.ColorfullyWriteLine(self.GetColorfulSearchPropertiesStr() + '')return False", "docstring": "maxSearchSeconds: float\nsearchIntervalSeconds: float\nCheck if control disappears every searchIntervalSeconds seconds in maxSearchSeconds seconds.\nReturn bool, True if control disappears.", "id": "f1782:c78:m59"} {"signature": "def Refind(self, maxSearchSeconds: float = TIME_OUT_SECOND, searchIntervalSeconds: float = SEARCH_INTERVAL, raiseException: bool = True) -> bool:", "body": "if not self.Exists(maxSearchSeconds, searchIntervalSeconds, False if raiseException else DEBUG_EXIST_DISAPPEAR):if raiseException:Logger.ColorfullyWriteLine('' + self.GetColorfulSearchPropertiesStr())raise LookupError('' + self.GetSearchPropertiesStr())else:return Falsereturn True", "docstring": "Refind the control every searchIntervalSeconds seconds in maxSearchSeconds seconds.\nmaxSearchSeconds: float.\nsearchIntervalSeconds: float.\nraiseException: bool, if True, raise a LookupError if timeout.\nReturn bool, True if find.", "id": "f1782:c78:m60"} {"signature": "def MoveCursorToInnerPos(self, x: int = None, y: int = None, ratioX: float = , ratioY: float = , simulateMove: bool = True) -> tuple:", "body": "rect = self.BoundingRectangleif rect.width() == or rect.height() == :Logger.ColorfullyWriteLine(''.format(self.ControlTypeName, rect, self.GetColorfulSearchPropertiesStr()))returnif x is None:x = rect.left + int(rect.width() * ratioX)else:x = (rect.left if x >= else rect.right) + xif y is None:y = rect.top + int(rect.height() * ratioY)else:y = (rect.top if y >= else rect.bottom) + yif simulateMove and MAX_MOVE_SECOND > :MoveTo(x, y, waitTime=)else:SetCursorPos(x, y)return x, y", "docstring": "Move cursor to control's internal position, default to center.\nx: int, if < 0, move to self.BoundingRectangle.right + x, if not None, ignore ratioX.\ny: int, if < 0, move to self.BoundingRectangle.bottom + y, if not None, ignore ratioY.\nratioX: float.\nratioY: float.\nsimulateMove: bool.\nReturn tuple, two ints(x,y), the cursor positon relative to screen(0,0) after moving or None if control's width or height == 0.", "id": "f1782:c78:m61"} {"signature": "def MoveCursorToMyCenter(self, simulateMove: bool = True) -> tuple:", "body": "return self.MoveCursorToInnerPos(simulateMove=simulateMove)", "docstring": "Move cursor to control's center.\nReturn tuple, two ints tuple(x,y), the cursor positon relative to screen(0,0) after moving .", "id": "f1782:c78:m62"} {"signature": "def Click(self, x: int = None, y: int = None, ratioX: float = , ratioY: float = , simulateMove: bool = True, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "point = self.MoveCursorToInnerPos(x, y, ratioX, ratioY, simulateMove)if point:Click(point[], point[], waitTime)", "docstring": "x: int, if < 0, click self.BoundingRectangle.right + x, if not None, ignore ratioX.\ny: int, if < 0, click self.BoundingRectangle.bottom + y, if not None, ignore ratioY.\nratioX: float.\nratioY: float.\nsimulateMove: bool, if True, first move cursor to control smoothly.\nwaitTime: float.\n\nClick(), Click(ratioX=0.5, ratioY=0.5): click center.\nClick(10, 10): click left+10, top+10.\nClick(-10, -10): click right-10, bottom-10.", "id": "f1782:c78:m63"} {"signature": "def MiddleClick(self, x: int = None, y: int = None, ratioX: float = , ratioY: float = , simulateMove: bool = True, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "point = self.MoveCursorToInnerPos(x, y, ratioX, ratioY, simulateMove)if point:MiddleClick(point[], point[], waitTime)", "docstring": "x: int, if < 0, middle click self.BoundingRectangle.right + x, if not None, ignore ratioX.\ny: int, if < 0, middle click self.BoundingRectangle.bottom + y, if not None, ignore ratioY.\nratioX: float.\nratioY: float.\nsimulateMove: bool, if True, first move cursor to control smoothly.\nwaitTime: float.\n\nMiddleClick(), MiddleClick(ratioX=0.5, ratioY=0.5): middle click center.\nMiddleClick(10, 10): middle click left+10, top+10.\nMiddleClick(-10, -10): middle click right-10, bottom-10.", "id": "f1782:c78:m64"} {"signature": "def RightClick(self, x: int = None, y: int = None, ratioX: float = , ratioY: float = , simulateMove: bool = True, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "point = self.MoveCursorToInnerPos(x, y, ratioX, ratioY, simulateMove)if point:RightClick(point[], point[], waitTime)", "docstring": "x: int, if < 0, right click self.BoundingRectangle.right + x, if not None, ignore ratioX.\ny: int, if < 0, right click self.BoundingRectangle.bottom + y, if not None, ignore ratioY.\nratioX: float.\nratioY: float.\nsimulateMove: bool, if True, first move cursor to control smoothly.\nwaitTime: float.\n\nRightClick(), RightClick(ratioX=0.5, ratioY=0.5): right click center.\nRightClick(10, 10): right click left+10, top+10.\nRightClick(-10, -10): right click right-10, bottom-10.", "id": "f1782:c78:m65"} {"signature": "def DoubleClick(self, x: int = None, y: int = None, ratioX: float = , ratioY: float = , simulateMove: bool = True, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "x, y = self.MoveCursorToInnerPos(x, y, ratioX, ratioY, simulateMove)Click(x, y, GetDoubleClickTime() * / )Click(x, y, waitTime)", "docstring": "x: int, if < 0, right click self.BoundingRectangle.right + x, if not None, ignore ratioX.\ny: int, if < 0, right click self.BoundingRectangle.bottom + y, if not None, ignore ratioY.\nratioX: float.\nratioY: float.\nsimulateMove: bool, if True, first move cursor to control smoothly.\nwaitTime: float.\n\nDoubleClick(), DoubleClick(ratioX=0.5, ratioY=0.5): double click center.\nDoubleClick(10, 10): double click left+10, top+10.\nDoubleClick(-10, -10): double click right-10, bottom-10.", "id": "f1782:c78:m66"} {"signature": "def WheelDown(self, wheelTimes: int = , interval: float = , waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "x, y = GetCursorPos()self.SetFocus()self.MoveCursorToMyCenter(False)WheelDown(wheelTimes, interval, waitTime)SetCursorPos(x, y)", "docstring": "Make control have focus first, move cursor to center and mouse wheel down.\nwheelTimes: int.\ninterval: float.\nwaitTime: float.", "id": "f1782:c78:m67"} {"signature": "def WheelUp(self, wheelTimes: int = , interval: float = , waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "x, y = GetCursorPos()self.SetFocus()self.MoveCursorToMyCenter(False)WheelUp(wheelTimes, interval, waitTime)SetCursorPos(x, y)", "docstring": "Make control have focus first, move cursor to center and mouse wheel up.\nwheelTimes: int.\ninterval: float.\nwaitTime: float.", "id": "f1782:c78:m68"} {"signature": "def ShowWindow(self, cmdShow: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "handle = self.NativeWindowHandleif not handle:control = selfwhile not handle:control = control.GetParentControl()handle = control.NativeWindowHandleif handle:ret = ShowWindow(handle, cmdShow)time.sleep(waitTime)return ret", "docstring": "Get a native handle from self or ancestors until valid and call native `ShowWindow` with cmdShow.\ncmdShow: int, a value in in class `SW`.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c78:m69"} {"signature": "def Show(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "return self.ShowWindow(SW.Show, waitTime)", "docstring": "Call native `ShowWindow(SW.Show)`.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c78:m70"} {"signature": "def Hide(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "return self.ShowWindow(SW.Hide, waitTime)", "docstring": "Call native `ShowWindow(SW.Hide)`.\nwaitTime: float\nReturn bool, True if succeed otherwise False.", "id": "f1782:c78:m71"} {"signature": "def MoveWindow(self, x: int, y: int, width: int, height: int, repaint: bool = True) -> bool:", "body": "handle = self.NativeWindowHandleif handle:return MoveWindow(handle, x, y, width, height, int(repaint))return False", "docstring": "Call native MoveWindow if control has a valid native handle.\nx: int.\ny: int.\nwidth: int.\nheight: int.\nrepaint: bool.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c78:m72"} {"signature": "def GetWindowText(self) -> str:", "body": "handle = self.NativeWindowHandleif handle:return GetWindowText(handle)", "docstring": "Call native GetWindowText if control has a valid native handle.", "id": "f1782:c78:m73"} {"signature": "def SetWindowText(self, text: str) -> bool:", "body": "handle = self.NativeWindowHandleif handle:return SetWindowText(handle, text)return False", "docstring": "Call native SetWindowText if control has a valid native handle.", "id": "f1782:c78:m74"} {"signature": "def SendKey(self, key: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "self.SetFocus()SendKey(key, waitTime)", "docstring": "Make control have focus first and type a key.\n`self.SetFocus` may not work for some controls, you may need to click it to make it have focus.\nkey: int, a key code value in class Keys.\nwaitTime: float.", "id": "f1782:c78:m75"} {"signature": "def SendKeys(self, keys: str, interval: float = , waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "self.SetFocus()SendKeys(keys, interval, waitTime)", "docstring": "Make control have focus first and type keys.\n`self.SetFocus` may not work for some controls, you may need to click it to make it have focus.\nkeys: str, keys to type, see the docstring of `SendKeys`.\ninterval: float, seconds between keys.", "id": "f1782:c78:m76"} {"signature": "def GetPixelColor(self, x: int, y: int) -> int:", "body": "handle = self.NativeWindowHandleif handle:return GetPixelColor(x, y, handle)", "docstring": "Call native `GetPixelColor` if control has a valid native handle.\nUse `self.ToBitmap` if control doesn't have a valid native handle or you get many pixels.\nx: int, internal x position.\ny: int, internal y position.\nReturn int, a color value in bgr.\nr = bgr & 0x0000FF\ng = (bgr & 0x00FF00) >> 8\nb = (bgr & 0xFF0000) >> 16", "id": "f1782:c78:m77"} {"signature": "def ToBitmap(self, x: int = , y: int = , width: int = , height: int = ) -> Bitmap:", "body": "bitmap = Bitmap()bitmap.FromControl(self, x, y, width, height)return bitmap", "docstring": "Capture control to a Bitmap object.\nx, y: int, the point in control's internal position(from 0,0).\nwidth, height: int, image's width and height from x, y, use 0 for entire area.\n If width(or height) < 0, image size will be control's width(or height) - width(or height).", "id": "f1782:c78:m78"} {"signature": "def CaptureToImage(self, savePath: str, x: int = , y: int = , width: int = , height: int = ) -> bool:", "body": "bitmap = Bitmap()if bitmap.FromControl(self, x, y, width, height):return bitmap.ToFile(savePath)return False", "docstring": "Capture control to a image file.\nsavePath: str, should end with .bmp, .jpg, .jpeg, .png, .gif, .tif, .tiff.\nx, y: int, the point in control's internal position(from 0,0).\nwidth, height: int, image's width and height from x, y, use 0 for entire area.\n If width(or height) < 0, image size will be control's width(or height) - width(or height).\nReturn bool, True if succeed otherwise False.", "id": "f1782:c78:m79"} {"signature": "def IsTopLevel(self) -> bool:", "body": "handle = self.NativeWindowHandleif handle:return GetAncestor(handle, GAFlag.Root) == handlereturn False", "docstring": "Determine whether current control is top level.", "id": "f1782:c78:m80"} {"signature": "def GetTopLevelControl(self) -> '':", "body": "handle = self.NativeWindowHandleif handle:topHandle = GetAncestor(handle, GAFlag.Root)if topHandle:if topHandle == handle:return selfelse:return ControlFromHandle(topHandle)else:passelse:control = selfwhile True:control = control.GetParentControl()handle = control.NativeWindowHandleif handle:topHandle = GetAncestor(handle, GAFlag.Root)return ControlFromHandle(topHandle)", "docstring": "Get the top level control which current control lays.\nIf current control is top level, return self.\nIf current control is root control, return None.\nReturn `PaneControl` or `WindowControl` or None.", "id": "f1782:c78:m81"} {"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c80:m1"} {"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)", "docstring": "Return `InvokePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c80:m2"} {"signature": "def GetTogglePattern(self) -> TogglePattern:", "body": "return self.GetPattern(PatternId.TogglePattern)", "docstring": "Return `TogglePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c80:m3"} {"signature": "def GetGridPattern(self) -> GridPattern:", "body": "return self.GetPattern(PatternId.GridPattern)", "docstring": "Return `GridPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c81:m1"} {"signature": "def GetTablePattern(self) -> TablePattern:", "body": "return self.GetPattern(PatternId.TablePattern)", "docstring": "Return `TablePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c81:m2"} {"signature": "def GetScrollPattern(self) -> ScrollPattern:", "body": "return self.GetPattern(PatternId.ScrollPattern)", "docstring": "Return `ScrollPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c81:m3"} {"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c81:m4"} {"signature": "def GetTogglePattern(self) -> TogglePattern:", "body": "return self.GetPattern(PatternId.TogglePattern)", "docstring": "Return `TogglePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c82:m1"} {"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c83:m1"} {"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c83:m2"} {"signature": "def GetValuePattern(self) -> ValuePattern:", "body": "return self.GetPattern(PatternId.ValuePattern)", "docstring": "Return `ValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c83:m3"} {"signature": "def Select(self, itemName: str = '', condition: Callable = None, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "expandCollapsePattern = self.GetExpandCollapsePattern()if expandCollapsePattern:expandCollapsePattern.Expand()else:self.Click(x=-, ratioY=, simulateMove=False)find = Falseif condition:listItemControl = self.ListItemControl(Compare=lambda c, d: condition(c.Name))else:listItemControl = self.ListItemControl(Name=itemName)if listItemControl.Exists():scrollItemPattern = listItemControl.GetScrollItemPattern()if scrollItemPattern:scrollItemPattern.ScrollIntoView(waitTime=)listItemControl.Click(waitTime=waitTime)find = Trueelse:listControl = ListControl(searchDepth= )if listControl.Exists():if condition:listItemControl = self.ListItemControl(Compare=lambda c, d: condition(c.Name))else:listItemControl = self.ListItemControl(Name=itemName)if listItemControl.Exists(, ):scrollItemPattern = listItemControl.GetScrollItemPattern()if scrollItemPattern:scrollItemPattern.ScrollIntoView(waitTime=)listItemControl.Click(waitTime=waitTime)find = Trueif not find:Logger.ColorfullyWriteLine(''.format(itemName), ConsoleColor.Yellow)if expandCollapsePattern:expandCollapsePattern.Collapse(waitTime)else:self.Click(x=-, ratioY=, simulateMove=False, waitTime=waitTime)return find", "docstring": "Show combobox's popup menu and select a item by name.\nitemName: str.\ncondition: Callable function(comboBoxItemName: str)->bool, if condition is valid, ignore itemName.\nwaitTime: float.\nSome comboboxs doesn't support SelectionPattern, here is a workaround.\nThis method tries to and selection support.\nIt may not work for some comboboxes, such as comboboxes in older Qt version.\nIf it doesn't work, you should write your own version Select, or it doesn't support selection at all.", "id": "f1782:c83:m4"} {"signature": "def GetGridPattern(self) -> GridPattern:", "body": "return self.GetPattern(PatternId.GridPattern)", "docstring": "Return `GridPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c85:m1"} {"signature": "def GetScrollPattern(self) -> ScrollPattern:", "body": "return self.GetPattern(PatternId.ScrollPattern)", "docstring": "Return `ScrollPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c85:m2"} {"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c85:m3"} {"signature": "def GetTablePattern(self) -> TablePattern:", "body": "return self.GetPattern(PatternId.TablePattern)", "docstring": "Return `TablePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c85:m4"} {"signature": "def GetSelectionItemPattern(self) -> SelectionItemPattern:", "body": "return self.GetPattern(PatternId.SelectionItemPattern)", "docstring": "Return `SelectionItemPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c86:m1"} {"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c86:m2"} {"signature": "def GetGridItemPattern(self) -> GridItemPattern:", "body": "return self.GetPattern(PatternId.GridItemPattern)", "docstring": "Return `GridItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c86:m3"} {"signature": "def GetScrollItemPattern(self) -> ScrollItemPattern:", "body": "return self.GetPattern(PatternId.ScrollItemPattern)", "docstring": "Return `ScrollItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c86:m4"} {"signature": "def GetTableItemPattern(self) -> TableItemPattern:", "body": "return self.GetPattern(PatternId.TableItemPattern)", "docstring": "Return `TableItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c86:m5"} {"signature": "def GetTogglePattern(self) -> TogglePattern:", "body": "return self.GetPattern(PatternId.TogglePattern)", "docstring": "Return `TogglePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c86:m6"} {"signature": "def GetValuePattern(self) -> ValuePattern:", "body": "return self.GetPattern(PatternId.ValuePattern)", "docstring": "Return `ValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c86:m7"} {"signature": "def GetTextPattern(self) -> TextPattern:", "body": "return self.GetPattern(PatternId.TextPattern)", "docstring": "Return `TextPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c87:m1"} {"signature": "def GetScrollPattern(self) -> ScrollPattern:", "body": "return self.GetPattern(PatternId.ScrollPattern)", "docstring": "Return `ScrollPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c87:m2"} {"signature": "def GetValuePattern(self) -> ValuePattern:", "body": "return self.GetPattern(PatternId.ValuePattern)", "docstring": "Return `ValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c87:m3"} {"signature": "def GetRangeValuePattern(self) -> RangeValuePattern:", "body": "return self.GetPattern(PatternId.RangeValuePattern)", "docstring": "Return `RangeValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c88:m1"} {"signature": "def GetTextPattern(self) -> TextPattern:", "body": "return self.GetPattern(PatternId.TextPattern)", "docstring": "Return `TextPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c88:m2"} {"signature": "def GetValuePattern(self) -> ValuePattern:", "body": "return self.GetPattern(PatternId.ValuePattern)", "docstring": "Return `ValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c88:m3"} {"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c89:m1"} {"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)", "docstring": "Return `TransformPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c90:m1"} {"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)", "docstring": "Return `InvokePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c91:m1"} {"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)", "docstring": "Return `TransformPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c91:m2"} {"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)", "docstring": "Return `InvokePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c92:m1"} {"signature": "def GetValuePattern(self) -> ValuePattern:", "body": "return self.GetPattern(PatternId.ValuePattern)", "docstring": "Return `ValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c92:m2"} {"signature": "def GetGridItemPattern(self) -> GridItemPattern:", "body": "return self.GetPattern(PatternId.GridItemPattern)", "docstring": "Return `GridItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c93:m1"} {"signature": "def GetTableItemPattern(self) -> TableItemPattern:", "body": "return self.GetPattern(PatternId.TableItemPattern)", "docstring": "Return `TableItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c93:m2"} {"signature": "def GetGridPattern(self) -> GridPattern:", "body": "return self.GetPattern(PatternId.GridPattern)", "docstring": "Return `GridPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c94:m1"} {"signature": "def GetMultipleViewPattern(self) -> MultipleViewPattern:", "body": "return self.GetPattern(PatternId.MultipleViewPattern)", "docstring": "Return `MultipleViewPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c94:m2"} {"signature": "def GetScrollPattern(self) -> ScrollPattern:", "body": "return self.GetPattern(PatternId.ScrollPattern)", "docstring": "Return `ScrollPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c94:m3"} {"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c94:m4"} {"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c95:m1"} {"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c95:m2"} {"signature": "def GetGridItemPattern(self) -> GridItemPattern:", "body": "return self.GetPattern(PatternId.GridItemPattern)", "docstring": "Return `GridItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c95:m3"} {"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)", "docstring": "Return `InvokePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c95:m4"} {"signature": "def GetScrollItemPattern(self) -> ScrollItemPattern:", "body": "return self.GetPattern(PatternId.ScrollItemPattern)", "docstring": "Return `ScrollItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c95:m5"} {"signature": "def GetTogglePattern(self) -> TogglePattern:", "body": "return self.GetPattern(PatternId.TogglePattern)", "docstring": "Return `TogglePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c95:m6"} {"signature": "def GetValuePattern(self) -> ValuePattern:", "body": "return self.GetPattern(PatternId.ValuePattern)", "docstring": "Return `ValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c95:m7"} {"signature": "def GetDockPattern(self) -> DockPattern:", "body": "return self.GetPattern(PatternId.DockPattern)", "docstring": "Return `DockPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c97:m1"} {"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c97:m2"} {"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)", "docstring": "Return `TransformPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c97:m3"} {"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c98:m1"} {"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)", "docstring": "Return `InvokePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c98:m2"} {"signature": "def GetSelectionItemPattern(self) -> SelectionItemPattern:", "body": "return self.GetPattern(PatternId.SelectionItemPattern)", "docstring": "Return `SelectionItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c98:m3"} {"signature": "def GetTogglePattern(self) -> TogglePattern:", "body": "return self.GetPattern(PatternId.TogglePattern)", "docstring": "Return `TogglePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c98:m4"} {"signature": "def SetTopmost(self, isTopmost: bool = True, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "if self.IsTopLevel():ret = SetWindowTopmost(self.NativeWindowHandle, isTopmost)time.sleep(waitTime)return retreturn False", "docstring": "Set top level window topmost.\nisTopmost: bool.\nwaitTime: float.", "id": "f1782:c99:m0"} {"signature": "def Maximize(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "if self.IsTopLevel():return self.ShowWindow(SW.ShowMaximized, waitTime)return False", "docstring": "Set top level window maximize.", "id": "f1782:c99:m3"} {"signature": "def Restore(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "if self.IsTopLevel():return self.ShowWindow(SW.Restore, waitTime)return False", "docstring": "Restore window to normal state.\nSimilar to SwitchToThisWindow.", "id": "f1782:c99:m7"} {"signature": "def MoveToCenter(self) -> bool:", "body": "if self.IsTopLevel():rect = self.BoundingRectanglescreenWidth, screenHeight = GetScreenSize()x, y = (screenWidth - rect.width()) // , (screenHeight - rect.height()) // if x < : x = if y < : y = return SetWindowPos(self.NativeWindowHandle, SWP.HWND_Top, x, y, , , SWP.SWP_NoSize)return False", "docstring": "Move window to screen center.", "id": "f1782:c99:m8"} {"signature": "def SetActive(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "if self.IsTopLevel():handle = self.NativeWindowHandleif IsIconic(handle):ret = ShowWindow(handle, SW.Restore)elif not IsWindowVisible(handle):ret = ShowWindow(handle, SW.Show)ret = SetForegroundWindow(handle) time.sleep(waitTime)return retreturn False", "docstring": "Set top level window active.", "id": "f1782:c99:m9"} {"signature": "def GetDockPattern(self) -> DockPattern:", "body": "return self.GetPattern(PatternId.DockPattern)", "docstring": "Return `DockPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c100:m1"} {"signature": "def GetScrollPattern(self) -> ScrollPattern:", "body": "return self.GetPattern(PatternId.ScrollPattern)", "docstring": "Return `ScrollPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c100:m2"} {"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)", "docstring": "Return `TransformPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c100:m3"} {"signature": "def GetRangeValuePattern(self) -> RangeValuePattern:", "body": "return self.GetPattern(PatternId.RangeValuePattern)", "docstring": "Return `RangeValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c101:m1"} {"signature": "def GetValuePattern(self) -> ValuePattern:", "body": "return self.GetPattern(PatternId.ValuePattern)", "docstring": "Return `ValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c101:m2"} {"signature": "def GetSelectionItemPattern(self) -> SelectionItemPattern:", "body": "return self.GetPattern(PatternId.SelectionItemPattern)", "docstring": "Return `SelectionItemPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c102:m1"} {"signature": "def GetRangeValuePattern(self) -> RangeValuePattern:", "body": "return self.GetPattern(PatternId.RangeValuePattern)", "docstring": "Return `RangeValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c103:m1"} {"signature": "def GetRangeValuePattern(self) -> RangeValuePattern:", "body": "return self.GetPattern(PatternId.RangeValuePattern)", "docstring": "Return `RangeValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c106:m1"} {"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c106:m2"} {"signature": "def GetValuePattern(self) -> ValuePattern:", "body": "return self.GetPattern(PatternId.ValuePattern)", "docstring": "Return `ValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c106:m3"} {"signature": "def GetRangeValuePattern(self) -> RangeValuePattern:", "body": "return self.GetPattern(PatternId.RangeValuePattern)", "docstring": "Return `RangeValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c107:m1"} {"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c107:m2"} {"signature": "def GetValuePattern(self) -> ValuePattern:", "body": "return self.GetPattern(PatternId.ValuePattern)", "docstring": "Return `ValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c107:m3"} {"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c108:m1"} {"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)", "docstring": "Return `InvokePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c108:m2"} {"signature": "def GetGridPattern(self) -> GridPattern:", "body": "return self.GetPattern(PatternId.GridPattern)", "docstring": "Return `GridPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c109:m1"} {"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c110:m1"} {"signature": "def GetScrollPattern(self) -> ScrollPattern:", "body": "return self.GetPattern(PatternId.ScrollPattern)", "docstring": "Return `ScrollPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c110:m2"} {"signature": "def GetSelectionItemPattern(self) -> SelectionItemPattern:", "body": "return self.GetPattern(PatternId.SelectionItemPattern)", "docstring": "Return `SelectionItemPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c111:m1"} {"signature": "def GetGridPattern(self) -> GridPattern:", "body": "return self.GetPattern(PatternId.GridPattern)", "docstring": "Return `GridPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c112:m1"} {"signature": "def GetGridItemPattern(self) -> GridItemPattern:", "body": "return self.GetPattern(PatternId.GridItemPattern)", "docstring": "Return `GridItemPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c112:m2"} {"signature": "def GetTablePattern(self) -> TablePattern:", "body": "return self.GetPattern(PatternId.TablePattern)", "docstring": "Return `TablePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c112:m3"} {"signature": "def GetTableItemPattern(self) -> TableItemPattern:", "body": "return self.GetPattern(PatternId.TableItemPattern)", "docstring": "Return `TableItemPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c112:m4"} {"signature": "def GetGridItemPattern(self) -> GridItemPattern:", "body": "return self.GetPattern(PatternId.GridItemPattern)", "docstring": "Return `GridItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c113:m1"} {"signature": "def GetTableItemPattern(self) -> TableItemPattern:", "body": "return self.GetPattern(PatternId.TableItemPattern)", "docstring": "Return `TableItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c113:m2"} {"signature": "def GetTextPattern(self) -> TextPattern:", "body": "return self.GetPattern(PatternId.TextPattern)", "docstring": "Return `TextPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c113:m3"} {"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)", "docstring": "Return `TransformPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c114:m1"} {"signature": "def GetDockPattern(self) -> DockPattern:", "body": "return self.GetPattern(PatternId.DockPattern)", "docstring": "Return `DockPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c116:m1"} {"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c116:m2"} {"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)", "docstring": "Return `TransformPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c116:m3"} {"signature": "def GetTextPattern(self) -> TextPattern:", "body": "return self.GetPattern(PatternId.TextPattern)", "docstring": "Return `TextPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c117:m1"} {"signature": "def GetWindowPattern(self) -> WindowPattern:", "body": "return self.GetPattern(PatternId.WindowPattern)", "docstring": "Return `WindowPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c117:m2"} {"signature": "def GetScrollPattern(self) -> ScrollPattern:", "body": "return self.GetPattern(PatternId.ScrollPattern)", "docstring": "Return `ScrollPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c118:m1"} {"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c118:m2"} {"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c119:m1"} {"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)", "docstring": "Return `InvokePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c119:m2"} {"signature": "def GetScrollItemPattern(self) -> ScrollItemPattern:", "body": "return self.GetPattern(PatternId.ScrollItemPattern)", "docstring": "Return `ScrollItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c119:m3"} {"signature": "def GetSelectionItemPattern(self) -> SelectionItemPattern:", "body": "return self.GetPattern(PatternId.SelectionItemPattern)", "docstring": "Return `SelectionItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c119:m4"} {"signature": "def GetTogglePattern(self) -> TogglePattern:", "body": "return self.GetPattern(PatternId.TogglePattern)", "docstring": "Return `TogglePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c119:m5"} {"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)", "docstring": "Return `TransformPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c120:m1"} {"signature": "def GetWindowPattern(self) -> WindowPattern:", "body": "return self.GetPattern(PatternId.WindowPattern)", "docstring": "Return `WindowPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c120:m2"} {"signature": "def GetDockPattern(self) -> DockPattern:", "body": "return self.GetPattern(PatternId.DockPattern)", "docstring": "Return `DockPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c120:m3"} {"signature": "def MetroClose(self, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "if self.ClassName == METRO_WINDOW_CLASS_NAME:screenWidth, screenHeight = GetScreenSize()MoveTo(screenWidth // , , waitTime=)DragDrop(screenWidth // , , screenWidth // , screenHeight, waitTime=waitTime)else:Logger.WriteLine('', ConsoleColor.Yellow)", "docstring": "Only work on Windows 8/8.1, if current window is Metro UI.\nwaitTime: float.", "id": "f1782:c120:m4"} {"signature": "def format(self, record):", "body": "data = record._raw.copy()data[''] = data[''].isoformat()if data.get(''):data[''] = self.formatException(data[''])return json.dumps(data)", "docstring": "JSON-encode a record for serializing through redis.\n\nConvert date to iso format, and stringify any exceptions.", "id": "f1786:c0:m0"} {"signature": "def __init__(self, channel, redis_client, level=logging.NOTSET):", "body": "logging.Handler.__init__(self, level)self.channel = channelself.redis_client = redis_clientself.formatter = RedisFormatter()", "docstring": "Create a new logger for the given channel and redis_client.", "id": "f1786:c1:m1"} {"signature": "def emit(self, record):", "body": "try:self.redis_client.publish(self.channel, self.format(record))except redis.RedisError:pass", "docstring": "Publish record to redis logging channel", "id": "f1786:c1:m2"} {"signature": "def __init__(self, key, max_messages, redis_client, level=logging.NOTSET):", "body": "logging.Handler.__init__(self, level)self.key = keyself.redis_client = redis_clientself.formatter = RedisFormatter()self.max_messages = max_messages", "docstring": "Create a new logger for the given key and redis_client.", "id": "f1786:c2:m1"} {"signature": "def emit(self, record):", "body": "try:if self.max_messages:p = self.redis_client.pipeline()p.rpush(self.key, self.format(record))p.ltrim(self.key, -self.max_messages, -)p.execute()else:self.redis_client.rpush(self.key, self.format(record))except redis.RedisError:pass", "docstring": "Publish record to redis logging list", "id": "f1786:c2:m2"} {"signature": "def _getCallingContext():", "body": "frames = inspect.stack()if len(frames) > :context = frames[]else:context = frames[]modname = context[]lineno = context[]if context[]:funcname = context[]else:funcname = \"\"del contextdel framesreturn modname, funcname, lineno", "docstring": "Utility function for the RedisLogRecord.\n\nReturns the module, function, and lineno of the function \nthat called the logger. \n\nWe look way up in the stack. The stack at this point is:\n[0] logger.py _getCallingContext (hey, that's me!)\n[1] logger.py __init__\n[2] logger.py makeRecord\n[3] _log\n[4] \n[5] caller of logging method", "id": "f1787:m1"} {"signature": "def autocomplete(query, country=None, hurricanes=False, cities=True, timeout=):", "body": "data = {}data[''] = quote(query)data[''] = country or ''data[''] = if hurricanes else data[''] = if cities else data[''] = ''r = requests.get(AUTOCOMPLETE_URL.format(**data), timeout=timeout)results = json.loads(r.content)['']return results", "docstring": "Make an autocomplete API request\n\n This can be used to find cities and/or hurricanes by name\n\n :param string query: city\n :param string country: restrict search to a specific country. Must be a two letter country code\n :param boolean hurricanes: whether to search for hurricanes or not\n :param boolean cities: whether to search for cities or not\n :param integer timeout: timeout of the api request\n :returns: result of the autocomplete API request\n :rtype: dict", "id": "f1791:m0"} {"signature": "def request(key, features, query, timeout=):", "body": "data = {}data[''] = keydata[''] = ''.join([f for f in features if f in FEATURES])data[''] = quote(query)data[''] = ''r = requests.get(API_URL.format(**data), timeout=timeout)results = json.loads(_unicode(r.content))return results", "docstring": "Make an API request\n\n :param string key: API key to use\n :param list features: features to request. It must be a subset of :data:`FEATURES`\n :param string query: query to send\n :param integer timeout: timeout of the request\n :returns: result of the API request\n :rtype: dict", "id": "f1791:m1"} {"signature": "def _unicode(string):", "body": "for encoding in ['', '']:try:result = unicode(string, encoding)return resultexcept UnicodeDecodeError:passresult = unicode(string, '', '')return result", "docstring": "Try to convert a string to unicode using different encodings", "id": "f1791:m2"} {"signature": "def __init__(self, req, required=False):", "body": "self.requirement = reqself.required = requiredself.required_by = None", "docstring": ":param pkg_resources.Requirement req:\n:param bool required: Is this requirement required to be fulfilled? If not, then it is a filter.", "id": "f1794:c1:m0"} {"signature": "@classmethoddef parse(cls, s, required=False):", "body": "req = pkg_resources.Requirement.parse(s)return cls(req, required=required)", "docstring": "Parse string to create an instance\n\n:param str s: String with requirement to parse\n:param bool required: Is this requirement required to be fulfilled? If not, then it is a filter.", "id": "f1794:c1:m1"} {"signature": "def __init__(self, requirements=None):", "body": "self.requirements = defaultdict(list)self.matched_name = Falseself.checked = []if requirements:self.add(requirements)", "docstring": ":param list requirements: List of requirements to manage", "id": "f1794:c2:m0"} {"signature": "def add(self, requirements, required=None):", "body": "if isinstance(requirements, RequirementsManager):requirements = list(requirements)elif not isinstance(requirements, list):requirements = [requirements]for req in requirements:name = req.project_nameif not isinstance(req, BumpRequirement):req = BumpRequirement(req, required=required)elif required is not None:req.required = requiredadd = Trueif name in self.requirements:for existing_req in self.requirements[name]:if req == existing_req:add = Falsebreakreplace = Falseif (req.specs and req.specs[][] == '' and existing_req.specs andexisting_req.specs[][] == ''):if pkg_resources.parse_version(req.specs[][]) < pkg_resources.parse_version(existing_req.specs[][]):req.requirement = existing_req.requirementreplace = Trueif not (req.specs and existing_req.specs):if existing_req.specs:req.requirement = existing_req.requirementreplace = Trueif replace:req.required |= existing_req.requiredif existing_req.required_by and not req.required_by:req.required_by = existing_req.required_byself.requirements[name].remove(existing_req)breakif add:self.requirements[name].append(req)", "docstring": "Add requirements to be managed\n\n:param list/Requirement requirements: List of :class:`BumpRequirement` or :class:`pkg_resources.Requirement`\n:param bool required: Set required flag for each requirement if provided.", "id": "f1794:c2:m5"} {"signature": "def check(self, context, version=None):", "body": "req_str = Noneself.checked.append((context, version))if isinstance(context, str) and not version:context = BumpRequirement.parse(context)if isinstance(context, Bump):name = context.nameif context.new_version and context.new_version[] == '':version = context.new_version[]else:req_str = str(context)elif isinstance(context, (pkg_resources.Requirement, BumpRequirement)):name = context.project_nameif context.specs and context.specs[][] == '':version = context.specs[][]else:req_str = str(context)else:name = contextif name in self:self.matched_name = Truefor req in self[name]:if req.required and (version and version in req or req_str == str(req)):req.required = Falsereturn Truereturn False", "docstring": "Check off requirements that are met by name/version.\n\n:param str|Bump|Requirement context: Either package name, requirement string, :class:`Bump`,\n :class:`BumpRequirement`, or\n :class:`pkg_resources.Requirement instance\n:return: True if any requirement was satisified by context", "id": "f1794:c2:m7"} {"signature": "def satisfied_by_checked(self, req):", "body": "req_man = RequirementsManager([req])return any(req_man.check(*checked) for checked in self.checked)", "docstring": "Check if requirement is already satisfied by what was previously checked\n\n:param Requirement req: Requirement to check", "id": "f1794:c2:m8"} {"signature": "def __init__(self, name, new_version=None, changes=None, requirements=None):", "body": "self.name = nameself.new_version = new_versionself.changes = changes or []self.requirements = []if requirements:self.require(requirements)", "docstring": ":param str name: Name of the product/library that was bumped\n:param tuple new_version: New version that was bumped to in (op, version) format.\n:param list changes: Detailed changelog entries from the old version to the new version\n:param str|list requirements: Any requirements that must be fulfilled for this bump to occur.", "id": "f1794:c3:m0"} {"signature": "@classmethoddef from_requirement(cls, req, changes=None):", "body": "return cls(req.project_name, req.specs and ''.join(req.specs[]) or '', changes=changes)", "docstring": "Create an instance from :class:`pkg_resources.Requirement` instance", "id": "f1794:c3:m5"} {"signature": "def as_requirement(self):", "body": "if self.new_version:return pkg_resources.Requirement.parse(self.name + ''.join(self.new_version))else:return pkg_resources.Requirement.parse(self.name)", "docstring": "Convert back to a :class:`pkg_resources.Requirement` instance", "id": "f1794:c3:m6"} {"signature": "def require(self, req):", "body": "reqs = req if isinstance(req, list) else [req]for req in reqs:if not isinstance(req, BumpRequirement):req = BumpRequirement(req)req.required = Truereq.required_by = selfself.requirements.append(req)", "docstring": "Add new requirements that must be fulfilled for this bump to occur", "id": "f1794:c3:m7"} {"signature": "@classmethoddef requirements_for_changes(self, changes):", "body": "requirements = []reqs_set = set()if isinstance(changes, str):changes = changes.split('')if not changes or changes[].startswith(''):return requirementsfor line in changes:line = line.strip('')if not line:continuematch = IS_REQUIREMENTS_RE2.search(line) if match:for match in REQUIREMENTS_RE.findall(match.group()):if match[]:version = '' + match[] if match[].startswith('') else match[]req_str = match[] + versionelse:req_str = match[]if req_str not in reqs_set:reqs_set.add(req_str)try:requirements.append(pkg_resources.Requirement.parse(req_str))except Exception as e:log.warn('', req_str, e)return requirements", "docstring": "Parse changes for requirements\n\n:param list changes:", "id": "f1794:c4:m1"} {"signature": "@classmethoddef likes(cls, target):", "body": "raise NotImplementedError", "docstring": "Check if this bumper likes the target.", "id": "f1794:c4:m4"} {"signature": "@classmethoddef bump_message(self, bumps, include_changes=False):", "body": "raise NotImplementedError", "docstring": "Compose a bump message for the given bumps\n\n:param list bumps: List of :class:`Bump` instances\n:param bool include_changes: Indicate if the message should include detailed changes.", "id": "f1794:c4:m5"} {"signature": "def requirements(self):", "body": "raise NotImplementedError", "docstring": "Return a list of existing requirements (as :class:`pkg_resources.Requirement`)", "id": "f1794:c4:m6"} {"signature": "def update_requirements(self):", "body": "raise NotImplementedError", "docstring": "Update/persist requirements from `self.bumps`", "id": "f1794:c4:m7"} {"signature": "def _package_changes(self, name, current_version, new_version):", "body": "raise NotImplementedError", "docstring": "List of changes for package name from current_version to new_version, in descending order.\n\n:param str name: Name of package\n:param current_version: Current version\n:param new_version: New version. It is guaranteed to be higher than current version.", "id": "f1794:c4:m8"} {"signature": "def all_package_versions(self, name):", "body": "raise NotImplementedError", "docstring": "List of all versions, in descending order, for the given package name.", "id": "f1794:c4:m9"} {"signature": "def should_pin(self):", "body": "return False", "docstring": "Should requirement be pinned? This should be True for leaf products.", "id": "f1794:c4:m11"} {"signature": "def package_changes(self, name, current_version, new_version):", "body": "if pkg_resources.parse_version(current_version) > pkg_resources.parse_version(new_version):downgrade_sign = ''(current_version, new_version) = (new_version, current_version)else:downgrade_sign = Nonechanges = self._package_changes(name, current_version, new_version)if changes and downgrade_sign:changes = [downgrade_sign + c for c in changes]return changes", "docstring": "List of changes for package name from current_version to new_version, in descending order.\nIf current version is higher than new version (downgrade), then a minus sign will be prefixed to each change.", "id": "f1794:c4:m12"} {"signature": "def _bump(self, existing_req=None, bump_reqs=None):", "body": "if existing_req or bump_reqs and any(r.required for r in bump_reqs):name = existing_req and existing_req.project_name or bump_reqs[].project_namelog.info('', name)bump = current_version = new_version = Noneif bump_reqs:if self.should_pin() and (len(bump_reqs) > or bump_reqs[] andbump_reqs[].specs and bump_reqs[].specs[][] != ''):log.debug('', bump_reqs)new_version = self.latest_version_for_requirements(bump_reqs)current_version = (existing_req and existing_req.specs and existing_req.specs[][] == '' andexisting_req.specs[][])if current_version == new_version:return Nonebump = Bump(name, ('', new_version))elif len(bump_reqs) > :raise BumpAccident('' % (name, ''.join(str(r) for r in bump_reqs)))elif bump_reqs[].specs or not (existing_req or self.should_pin() or bump_reqs[].specs):log.debug('', bump_reqs)latest_version = self.latest_version_for_requirements(bump_reqs)new_version = (bump_reqs[].specs and bump_reqs[].specs[][] == '' andbump_reqs[].specs[][] or latest_version)current_version = (existing_req and existing_req.specs and existing_req.specs[][] == '' andexisting_req.specs[][])if current_version == new_version:return Noneif len(bump_reqs[].specs) > :version = (''.join(s[] + s[] for s in bump_reqs[].specs),)elif bump_reqs[].specs:version = bump_reqs[].specs[]else:version = Nonebump = Bump(name, version)if not bump and (existing_req and existing_req.specs and existing_req.specs[][] == '' orself.should_pin() and not existing_req):log.debug('', bump_reqs or name)current_version = existing_req and existing_req.specs[][]new_version = self.latest_package_version(name)if current_version == new_version:return Noneif not new_version:raise BumpAccident('' % name)bump = Bump(name, ('', new_version))if bump and current_version and new_version and self.detail:changes = self.package_changes(bump.name, current_version, new_version)bump.changes.extend(changes)if self.should_pin():bump.require(self.requirements_for_changes(changes))if bump:log.debug('', bump)if bump.requirements:log.info('',bump.name, ''.join(sorted(str(r) for r in bump.requirements)))return bump if str(bump) != str(existing_req) else None", "docstring": "Bump an existing requirement to the desired requirement if any.\nSubclass can override this `_bump` method to change how each requirement is bumped.\n\nBR = Bump to Requested Version\nBL = Bump to Latest Version\nBLR = Bump to Latest Version per Requested Requirement\nBROL = Bump to Requested Version or Latest (if Pin)\nN = No Bump\nERR = Error\nC = Version Conflict\n\nPin case \"requires=\" will be required.\nFilter case \"requires=\" will be:\n 1) From user = Required\n 2) From bump = bump/require if existing = One, otherwise print warning.\n\nFilter Case::\n Bump: None Any One Many\nExisting:\n None N N N N\n Any N N BR BR\n One BL BL BR BR\n Many N N BR BR\n\nPin Case::\n Bump: None Any One Many\nExisting:\n None N N N N\n Any N N BR BLR*\n One BL BL BR BLR*\n Many N N BR BLR*\n\nAdd/Require Case::\n Bump: None Any One Many\nExisting:\n None N BROL BROL BROL\n\n:param pkg_resources.Requirement existing_req: Existing requirement if any\n:param list bump_reqs: List of `BumpRequirement`\n:return Bump: Either a :class:`Bump` instance or None\n:raise BumpAccident:", "id": "f1794:c4:m14"} {"signature": "def bump(self, bump_reqs=None, **kwargs):", "body": "bumps = {}for existing_req in sorted(self.requirements(), key=lambda r: r.project_name):if bump_reqs and existing_req.project_name not in bump_reqs:continuebump_reqs.check(existing_req)try:bump = self._bump(existing_req, bump_reqs.get(existing_req.project_name))if bump:bumps[bump.name] = bumpbump_reqs.check(bump)except Exception as e:if bump_reqs and bump_reqs.get(existing_req.project_name) and all(r.required_by is None for r in bump_reqs.get(existing_req.project_name)):raiseelse:log.warn(e)for reqs in bump_reqs.required_requirements().values():name = reqs[].project_nameif name not in bumps and self.should_add(name):try:bump = self._bump(None, reqs)if bump:bumps[bump.name] = bumpbump_reqs.check(bump)except Exception as e:if all(r.required_by is None for r in reqs):raiseelse:log.warn(e)self.bumps.update(bumps.values())return bumps.values()", "docstring": "Bump dependencies using given requirements.\n\n:param RequirementsManager bump_reqs: Bump requirements manager\n:param dict kwargs: Additional args from argparse. Some bumpers accept user options, and some not.\n:return: List of :class:`Bump` changes made.", "id": "f1794:c4:m15"} {"signature": "def reverse(self):", "body": "if self._original_target_content:with open(self.target, '') as fp:fp.write(self._original_target_content)", "docstring": "Restore content in target file to be before any changes", "id": "f1794:c4:m16"} {"signature": "def should_add(self, name):", "body": "return True", "docstring": "Should this bumper try to add the given name if requested.", "id": "f1794:c5:m3"} {"signature": "def bump():", "body": "parser = argparse.ArgumentParser(description=bump.__doc__)parser.add_argument('', nargs='', help=\"\"\"\"\"\")parser.add_argument('', '', action='',help='')parser.add_argument('', help='')parser.add_argument('', action='',help='')parser.add_argument('', '', '', action='',help='''')parser.add_argument('', '', action='', help='')parser.add_argument('', action='', help='')args = parser.parse_args()targets = [args.file] if args.file else ['', '']level = logging.DEBUG if args.debug else logging.INFOlogging.basicConfig(level=level, format='')try:bumper = BumperDriver(targets, full_throttle=args.force, detail=args.detail, test_drive=args.dry_run)bumper.bump(args.names, required=args.add, show_detail=args.detail)except Exception as e:if args.debug:raiseelse:log.error(e)sys.exit()", "docstring": "CLI entry point to bump requirements in requirements.txt or pinned.txt", "id": "f1795:m0"} {"signature": "def bump(self, filter_requirements, required=False, show_summary=True, show_detail=False, **kwargs):", "body": "found_targets = [target for target in self.targets if os.path.exists(target)]if not found_targets:raise BumpAccident('' % ''.join(self.targets))bump_reqs = RequirementsManager()if filter_requirements:requirements = parse_requirements(filter_requirements)bump_reqs.add(requirements, required=required)try:for target in found_targets:log.debug('', target)target_bumpers = []target_bump_reqs = RequirementsManager(bump_reqs)loops = while True:loops += if loops > :log.debug('')breakif not target_bumpers:target_bumpers = [model(target, detail=self.detail, test_drive=self.test_drive)for model in self.bumper_models if model.likes(target)]if not target_bumpers:log.debug('', target, self.default_model)target_bumpers = [self.default_model(target, detail=self.detail,test_drive=self.test_drive)]self.bumpers.extend(target_bumpers)new_target_bump_reqs = RequirementsManager()for bumper in target_bumpers:target_bumps = bumper.bump(target_bump_reqs)self.bumps.update(dict((b.name, b) for b in target_bumps))for bump in target_bumps:for new_req in bump.requirements:if not (bump_reqs.satisfied_by_checked(new_req) ortarget_bump_reqs.satisfied_by_checked(new_req)):new_target_bump_reqs.add(new_req)bump_reqs.matched_name |= target_bump_reqs.matched_namebump_reqs.checked.extend(target_bump_reqs.checked)if new_target_bump_reqs:bump_reqs.add(new_target_bump_reqs)target_bump_reqs = RequirementsManager(list(r for r in new_target_bump_reqs if r.project_name not in self.bumps))if not target_bump_reqs:breakif not self.bumpers:raise BumpAccident('' % ''.join(found_targets))if bump_reqs and not bump_reqs.matched_name:raise BumpAccident('' % ''.join(found_targets))if self.bumps:for bump in self.bumps.values():bump_reqs.check(bump)for reqs in bump_reqs.required_requirements().values():for req in reqs:if not self.full_throttle:use_force = '' if req.required_by else ''raise BumpAccident('''' % (req, use_force))if self.test_drive:log.info(\"\")messages = {}for bumper in self.bumpers:if bumper.bumps:if not self.test_drive:bumper.update_requirements()if self.test_drive or show_summary:msg = bumper.bump_message(self.test_drive or show_detail)if self.test_drive:print(msg)else:rewords = [('', ''), ('', ''),('', '')]for word, new_word in rewords:if msg.startswith(word):msg = msg.replace(word, new_word, )breaklog.info(msg)messages[bumper.target] = bumper.bump_message(True)return messages, self.bumpselse:log.info('')return {}, []except Exception:if not self.test_drive and self.bumps:map(lambda b: b.reverse(), self.bumpers)raise", "docstring": "Bump dependency requirements using filter.\n\n:param list filter_requirements: List of dependency filter requirements.\n:param bool required: Require the filter_requirements to be met (by adding if possible).\n:param bool show_summary: Show summary for each bump made.\n:param bool show_detail: Show detail for each bump made if available.\n:return: Tuple with two elements: Dict of target file to bump message, List of :class:`Bump`\n:raise BumpAccident: for any bump errors", "id": "f1795:c0:m1"} {"signature": "def reverse(self):", "body": "if not self.test_drive and self.bumps:map(lambda b: b.reverse(), self.bumpers)", "docstring": "Reverse all bumpers", "id": "f1795:c0:m2"} {"signature": "def _expand_targets(self, targets, base_dir=None):", "body": "all_targets = []for target in targets:target_dirs = [p for p in [base_dir, os.path.dirname(target)] if p]target_dir = target_dirs and os.path.join(*target_dirs) or ''target = os.path.basename(target)target_path = os.path.join(target_dir, target)if os.path.exists(target_path):all_targets.append(target_path)with open(target_path) as fp:for line in fp:if line.startswith(''):_, new_target = line.split('', )all_targets.extend(self._expand_targets([new_target.strip()], base_dir=target_dir))return all_targets", "docstring": "Expand targets by looking for '-r' in targets.", "id": "f1795:c0:m3"} {"signature": "def parse_requirements(requirements, in_file=None):", "body": "try:return list(pkg_resources.parse_requirements(requirements))except Exception as e:in_file = '' % in_file if in_file else ''raise ValueError(''.format(e, in_file))", "docstring": "Parse string requirements into list of :class:`pkg_resources.Requirement` instances\n\n:param str requirements: Requirements text to parse\n:param str in_file: File the requirements came from\n:return: List of requirements\n:raises ValueError: if failed to parse", "id": "f1796:m0"} {"signature": "@classmethoddef package_info(cls, package):", "body": "if package not in cls.package_info_cache:package_json_url = '' % packagetry:logging.getLogger('').setLevel(logging.WARN)response = requests.get(package_json_url)response.raise_for_status()cls.package_info_cache[package] = simplejson.loads(response.text)except Exception as e:log.debug('', package_json_url, e)cls.package_info_cache[package] = Nonereturn cls.package_info_cache[package]", "docstring": "All package info for given package", "id": "f1796:c0:m0"} {"signature": "@staticmethoddef all_package_versions(package):", "body": "info = PyPI.package_info(package)return info and sorted(info[''].keys(), key=lambda x: x.split(), reverse=True) or []", "docstring": "All versions for package", "id": "f1796:c0:m2"} {"signature": "def freeze(self):", "body": "data = super(IndexBuilder, self).freeze()try:base_file_names = data['']except KeyError:base_file_names = data['']store = {}c = itertools.count()for prefix, items in iteritems(data['']):for name, (index, typeindex, _, shortanchor) in iteritems(items):objtype = data[''][typeindex]if objtype.startswith(''):split = name.rsplit('', )if len(split) != :warnings.warn(\"\" % str((prefix, name, objtype)))continueprefix, name = splitlast_prefix = prefix.split('')[-]else:last_prefix = prefix.split('')[-]store[next(c)] = {'': base_file_names[index],'': objtype,'': prefix,'': last_prefix,'': name,'': shortanchor,}data.update({'': store})return data", "docstring": "Create a usable data structure for serializing.", "id": "f1803:c0:m0"} {"signature": "def tokenize(text, custom_dict=None):", "body": "global TOKENIZERif not TOKENIZER:TOKENIZER = DeepcutTokenizer()return TOKENIZER.tokenize(text, custom_dict=custom_dict)", "docstring": "Tokenize given Thai text string\n\nInput\n=====\ntext: str, Thai text string\ncustom_dict: str (or list), path to customized dictionary file\n It allows the function not to tokenize given dictionary wrongly.\n The file should contain custom words separated by line.\n Alternatively, you can provide list of custom words too.\n\nOutput\n======\ntokens: list, list of tokenized words\n\nExample\n=======\n>> deepcut.tokenize('\u0e15\u0e31\u0e14\u0e04\u0e33\u0e44\u0e14\u0e49\u0e14\u0e35\u0e21\u0e32\u0e01')\n>> ['\u0e15\u0e31\u0e14\u0e04\u0e33','\u0e44\u0e14\u0e49','\u0e14\u0e35','\u0e21\u0e32\u0e01']", "id": "f1806:m0"} {"signature": "def _document_frequency(X):", "body": "if sp.isspmatrix_csr(X):return np.bincount(X.indices, minlength=X.shape[])return np.diff(sp.csc_matrix(X, copy=False).indptr)", "docstring": "Count the number of non-zero values for each feature in sparse X.", "id": "f1806:m2"} {"signature": "def _check_stop_list(stop):", "body": "if stop == \"\":return THAI_STOP_WORDSelif isinstance(stop, six.string_types):raise ValueError(\"\" % stop)elif stop is None:return Nonereturn frozenset(stop)", "docstring": "Check stop words list\nref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95", "id": "f1806:m3"} {"signature": "def _word_ngrams(self, tokens):", "body": "if self.stop_words is not None:tokens = [w for w in tokens if w not in self.stop_words]min_n, max_n = self.ngram_rangeif max_n != :original_tokens = tokensif min_n == :tokens = list(original_tokens)min_n += else:tokens = []n_original_tokens = len(original_tokens)tokens_append = tokens.appendspace_join = \"\".joinfor n in range(min_n,min(max_n + , n_original_tokens + )):for i in range(n_original_tokens - n + ):tokens_append(space_join(original_tokens[i: i + n]))return tokens", "docstring": "Turn tokens into a tokens of n-grams\n\nref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L124-L153", "id": "f1806:c0:m1"} {"signature": "def _limit_features(self, X, vocabulary,high=None, low=None, limit=None):", "body": "if high is None and low is None and limit is None:return X, set()dfs = _document_frequency(X)tfs = np.asarray(X.sum(axis=)).ravel()mask = np.ones(len(dfs), dtype=bool)if high is not None:mask &= dfs <= highif low is not None:mask &= dfs >= lowif limit is not None and mask.sum() > limit:mask_inds = (-tfs[mask]).argsort()[:limit]new_mask = np.zeros(len(dfs), dtype=bool)new_mask[np.where(mask)[][mask_inds]] = Truemask = new_masknew_indices = np.cumsum(mask) - removed_terms = set()for term, old_index in list(vocabulary.items()):if mask[old_index]:vocabulary[term] = new_indices[old_index]else:del vocabulary[term]removed_terms.add(term)kept_indices = np.where(mask)[]if len(kept_indices) == :raise ValueError(\"\"\"\")return X[:, kept_indices], vocabulary, removed_terms", "docstring": "Remove too rare or too common features.\n\n ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L734-L773", "id": "f1806:c0:m2"} {"signature": "def fit_tranform(self, raw_documents):", "body": "X = self.transform(raw_documents, new_document=True)return X", "docstring": "Transform given list of raw_documents to document-term matrix in\nsparse CSR format (see scipy)", "id": "f1806:c0:m4"} {"signature": "def generate_words(files):", "body": "repls = {'' : '','' : '','': '','': ''}words_all = []for i, file in enumerate(files):lines = open(file, '')for line in lines:line = reduce(lambda a, kv: a.replace(*kv), repls.items(), line)words = [word for word in line.split(\"\") if word is not '']words_all.extend(words)return words_all", "docstring": "Transform list of files to list of words,\nremoving new line character\nand replace name entity '...' and abbreviation '...' symbol", "id": "f1807:m0"} {"signature": "def create_char_dataframe(words):", "body": "char_dict = []for word in words:for i, char in enumerate(word):if i == :char_dict.append({'': char,'': CHAR_TYPE_FLATTEN.get(char, ''),'': True})else:char_dict.append({'': char,'': CHAR_TYPE_FLATTEN.get(char, ''),'': False})return pd.DataFrame(char_dict)", "docstring": "Give list of input tokenized words,\ncreate dataframe of characters where first character of\nthe word is tagged as 1, otherwise 0\n\nExample\n=======\n['\u0e01\u0e34\u0e19', '\u0e2b\u0e21\u0e14'] to dataframe of\n[{'char': '\u0e01', 'type': ..., 'target': 1}, ...,\n {'char': '\u0e14', 'type': ..., 'target': 0}]", "id": "f1807:m1"} {"signature": "def generate_best_dataset(best_path, output_path='', create_val=False):", "body": "if not os.path.isdir(output_path):os.mkdir(output_path)if not os.path.isdir(os.path.join(output_path, '')):os.makedirs(os.path.join(output_path, ''))if not os.path.isdir(os.path.join(output_path, '')):os.makedirs(os.path.join(output_path, ''))if not os.path.isdir(os.path.join(output_path, '')) and create_val:os.makedirs(os.path.join(output_path, ''))for article_type in article_types:files = glob(os.path.join(best_path, article_type, ''))files_train, files_test = train_test_split(files, random_state=, test_size=)if create_val:files_train, files_val = train_test_split(files_train, random_state=, test_size=)val_words = generate_words(files_val)val_df = create_char_dataframe(val_words)val_df.to_csv(os.path.join(output_path, '', ''.format(article_type)), index=False)train_words = generate_words(files_train)test_words = generate_words(files_test)train_df = create_char_dataframe(train_words)test_df = create_char_dataframe(test_words)train_df.to_csv(os.path.join(output_path, '', ''.format(article_type)), index=False)test_df.to_csv(os.path.join(output_path, '', ''.format(article_type)), index=False)print(\"\".format(article_type))", "docstring": "Generate CSV file for training and testing data\n\nInput\n=====\nbest_path: str, path to BEST folder which contains unzipped subfolder\n 'article', 'encyclopedia', 'news', 'novel'\n\ncleaned_data: str, path to output folder, the cleaned data will be saved\n in the given folder name where training set will be stored in `train` folder\n and testing set will be stored on `test` folder\n\ncreate_val: boolean, True or False, if True, divide training set into training set and\n validation set in `val` folder", "id": "f1807:m2"} {"signature": "def prepare_feature(best_processed_path, option=''):", "body": "n_pad = n_pad_2 = int((n_pad - )/)pad = [{'': '', '': '', '': True}]df_pad = pd.DataFrame(pad * n_pad_2)df = []for article_type in article_types:df.append(pd.read_csv(os.path.join(best_processed_path, option, ''.format(article_type, option))))df = pd.concat(df)df = pd.concat((df_pad, df, df_pad)) df[''] = df[''].map(lambda x: CHARS_MAP.get(x, ))df[''] = df[''].map(lambda x: CHAR_TYPES_MAP.get(x, ))df_pad = create_n_gram_df(df, n_pad=n_pad)char_row = ['' + str(i + ) for i in range(n_pad_2)] +['' + str(i + ) for i in range(n_pad_2)] + ['']type_row = ['' + str(i + ) for i in range(n_pad_2)] +['' + str(i + ) for i in range(n_pad_2)] + ['']x_char = df_pad[char_row].as_matrix()x_type = df_pad[type_row].as_matrix()y = df_pad[''].astype(int).as_matrix()return x_char, x_type, y", "docstring": "Transform processed path into feature matrix and output array\n\nInput\n=====\nbest_processed_path: str, path to processed BEST dataset\n\noption: str, 'train' or 'test'", "id": "f1807:m3"} {"signature": "def train_model(best_processed_path, weight_path='', verbose=):", "body": "x_train_char, x_train_type, y_train = prepare_feature(best_processed_path, option='')x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='')validation_set = Falseif os.path.isdir(os.path.join(best_processed_path, '')):validation_set = Truex_val_char, x_val_type, y_val = prepare_feature(best_processed_path, option='')if not os.path.isdir(os.path.dirname(weight_path)):os.makedirs(os.path.dirname(weight_path)) callbacks_list = [ReduceLROnPlateau(),ModelCheckpoint(weight_path,save_best_only=True,save_weights_only=True,monitor='',mode='',verbose=)]model = get_convo_nn2()train_params = [(, ), (, ), (, ), (, ), (, )]for (epochs, batch_size) in train_params:print(\"\".format(epochs, batch_size))if validation_set:model.fit([x_train_char, x_train_type], y_train,epochs=epochs, batch_size=batch_size,verbose=verbose,callbacks=callbacks_list,validation_data=([x_val_char, x_val_type], y_val))else:model.fit([x_train_char, x_train_type], y_train,epochs=epochs, batch_size=batch_size,verbose=verbose,callbacks=callbacks_list)return model", "docstring": "Given path to processed BEST dataset,\ntrain CNN model for words beginning alongside with\ncharacter label encoder and character type label encoder\n\nInput\n=====\nbest_processed_path: str, path to processed BEST dataset\nweight_path: str, path to weight path file\nverbose: int, verbost option for training Keras model\n\nOutput\n======\nmodel: keras model, keras model for tokenize prediction", "id": "f1807:m4"} {"signature": "def evaluate(best_processed_path, model):", "body": "x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='')y_predict = model.predict([x_test_char, x_test_type])y_predict = (y_predict.ravel() > ).astype(int)f1score = f1_score(y_test, y_predict)precision = precision_score(y_test, y_predict)recall = recall_score(y_test, y_predict)return f1score, precision, recall", "docstring": "Evaluate model on splitted 10 percent testing set", "id": "f1807:m5"} {"signature": "def create_feature_array(text, n_pad=):", "body": "n = len(text)n_pad_2 = int((n_pad - )/)text_pad = [''] * n_pad_2 + [t for t in text] + [''] * n_pad_2x_char, x_type = [], []for i in range(n_pad_2, n_pad_2 + n):char_list = text_pad[i + : i + n_pad_2 + ] +list(reversed(text_pad[i - n_pad_2: i])) +[text_pad[i]]char_map = [CHARS_MAP.get(c, ) for c in char_list]char_type = [CHAR_TYPES_MAP.get(CHAR_TYPE_FLATTEN.get(c, ''), )for c in char_list]x_char.append(char_map)x_type.append(char_type)x_char = np.array(x_char).astype(float)x_type = np.array(x_type).astype(float)return x_char, x_type", "docstring": "Create feature array of character and surrounding characters", "id": "f1809:m0"} {"signature": "def create_n_gram_df(df, n_pad):", "body": "n_pad_2 = int((n_pad - )/)for i in range(n_pad_2):df[''.format(i+)] = df[''].shift(i + )df[''.format(i+)] = df[''].shift(i + )df[''.format(i+)] = df[''].shift(-i - )df[''.format(i+)] = df[''].shift(-i - )return df[n_pad_2: -n_pad_2]", "docstring": "Given input dataframe, create feature dataframe of shifted characters", "id": "f1809:m1"} {"signature": "@blog.command()@click.argument('')@click.pass_contextdef photos(context, path):", "body": "config = context.objheader('')article_filename = find_last_article(config[''])if not article_filename:return click.secho('', fg='')click.echo(os.path.basename(article_filename))header('')images = list(sorted(find_images(path)))if not images:return click.secho('', fg='')for filename in images:click.secho(filename, fg='')if not click.confirm(''):abort(config)url_prefix = os.path.join('', IMAGES_PATH)images_dir = os.path.join(config[''], IMAGES_PATH)os.makedirs(images_dir, exist_ok=True)header('')urls = []for filename in images:image_basename = os.path.basename(filename).replace('', '').lower()urls.append(os.path.join(url_prefix, image_basename))image_filename = os.path.join(images_dir, image_basename)print(filename, image_filename)import_image(filename, image_filename)content = ''for url in urls:url = url.replace('', '')content += ''.format(url)header(''.format(article_filename))with click.open_file(article_filename, '') as f:f.write(content)click.launch(article_filename)", "docstring": "Adds images to the last article", "id": "f1824:m0"} {"signature": "@blog.command()@click.pass_contextdef deploy(context):", "body": "config = context.objheader('')pelican(config, '', production=True)header('')unnecessary_paths = ['', '', '', '', '','', '', '',]for path in unnecessary_paths:remove_path(os.path.join(config[''], path))if os.environ.get(''): header('')run('' +run('', capture=True))run('' +run('', capture=True))github_token = os.environ.get('')repo_slug = os.environ.get('')origin = ''.format(github_token, repo_slug)run('' + origin)header('')run(''.format(message=''.format(choose_commit_emoji()),dir=config[''],))header('')run('')", "docstring": "Uploads new version of the blog website", "id": "f1825:m0"} {"signature": "@blog.command()def update():", "body": "run('')run('')", "docstring": "Gets other people's changes from GitHub", "id": "f1826:m0"} {"signature": "@blog.command()@click.pass_contextdef lint(context):", "body": "config = context.objtry:run(''.format(dir=config[''],exclude=''.join(EXCLUDE),))except SubprocessError:context.exit()", "docstring": "Looks for errors in source code of your blog", "id": "f1827:m0"} {"signature": "@blog.command()@click.pass_contextdef preview(context):", "body": "config = context.objpelican(config, '', '')server_proc = Noneos.chdir(config[''])try:try:command = '' + str(PORT)server_proc = run(command, bg=True)time.sleep()click.launch('')time.sleep()pelican(config, '')except Exception:if server_proc is not None:server_proc.kill()raiseexcept KeyboardInterrupt:abort(context)", "docstring": "Opens local preview of your blog website", "id": "f1830:m0"} {"signature": "@blog.command()@click.pass_contextdef write(context):", "body": "config = context.objtitle = click.prompt('')author = click.prompt('', default=config.get(''))slug = slugify(title)creation_date = datetime.now()basename = ''.format(creation_date, slug)meta = (('', title),('', ''.format(creation_date)),('', ''.format(creation_date)),('', author),)file_content = ''for key, value in meta:file_content += ''.format(key, value)file_content += ''file_content += ''file_content += ''file_content += ''os.makedirs(config[''], exist_ok=True)path = os.path.join(config[''], basename)with click.open_file(path, '') as f:f.write(file_content)click.echo(path)click.launch(path)", "docstring": "Starts a new article", "id": "f1831:m0"} {"signature": "@blog.command()@click.pass_contextdef publish(context):", "body": "header('')run('')header('')run('')if not click.confirm(''):run('')abort(context)header('')try:run(''.format(message=''.format(choose_commit_emoji())), capture=True)except subprocess.CalledProcessError as e:if '' not in e.stdout:raiseelse:click.echo('')header('')branch = get_branch()run(''.format(branch=branch))pr_link = get_pr_link(branch)if pr_link:click.launch(pr_link)", "docstring": "Saves changes and sends them to GitHub", "id": "f1832:m0"} {"signature": "def forwards(self, orm):", "body": "print(\"\")ja_akt_stan=orm.JednostkaAdministracyjna.objects.all().aggregate(Max(''))['']orm.JednostkaAdministracyjna.objects.filter(stan_na__exact=ja_akt_stan).update(aktywny=True)orm.JednostkaAdministracyjna.objects.exclude(stan_na__exact=ja_akt_stan).update(aktywny=False)print(\"\")m_akt_stan=orm.Miejscowosc.objects.all().aggregate(Max(''))['']orm.Miejscowosc.objects.filter(stan_na__exact=m_akt_stan).update(aktywny=True)orm.Miejscowosc.objects.exclude(stan_na__exact=m_akt_stan).update(aktywny=False)print(\"\")rm_akt_stan=orm.RodzajMiejsowosci.objects.all().aggregate(Max(''))['']orm.RodzajMiejsowosci.objects.filter(stan_na__exact=rm_akt_stan).update(aktywny=True)orm.RodzajMiejsowosci.objects.exclude(stan_na__exact=rm_akt_stan).update(aktywny=False)print(\"\")u_akt_stan=orm.Ulica.objects.all().aggregate(Max(''))['']orm.Ulica.objects.filter(stan_na__exact=u_akt_stan).update(aktywny=True)orm.Ulica.objects.exclude(stan_na__exact=u_akt_stan).update(aktywny=False)", "docstring": "Write your forwards methods here.", "id": "f1851:c0:m0"} {"signature": "def backwards(self, orm):", "body": "", "docstring": "Write your backwards methods here.", "id": "f1851:c0:m1"} {"signature": "def forwards(self, orm):", "body": "LEN_TYPE = {: '',: '',: '',}for ja in orm.JednostkaAdministracyjna.objects.all():ja.typ = LEN_TYPE[len(ja.id)]ja.save()", "docstring": "Write your forwards methods here.", "id": "f1854:c0:m0"} {"signature": "def backwards(self, orm):", "body": "", "docstring": "Write your backwards methods here.", "id": "f1854:c0:m1"} {"signature": "def do_filter(qs, keywords, exclude=False):", "body": "and_q = Q()for keyword, value in iteritems(keywords):try:values = value.split(\"\")if len(values) > :or_q = Q()for value in values:or_q |= Q(**{keyword: value})and_q &= or_qexcept AttributeError:and_q &= Q(**{keyword: value})if exclude:qs = qs.exclude(and_q)else:qs = qs.filter(and_q)return qs", "docstring": "Filter queryset based on keywords.\nSupport for multiple-selected parent values.", "id": "f1868:m1"} {"signature": "@never_cachedef filterchain_all(request, app, model, field, foreign_key_app_name,foreign_key_model_name, foreign_key_field_name, value):", "body": "model_class = get_model(app, model)keywords = get_keywords(field, value)foreign_model_class = get_model(foreign_key_app_name, foreign_key_model_name)if not any([(isinstance(f, ChainedManyToManyField) orisinstance(f, ChainedForeignKey))for f in foreign_model_class._meta.get_fields()]):raise PermissionDenied(\"\")limit_choices_to = get_limit_choices_to(foreign_key_app_name, foreign_key_model_name, foreign_key_field_name)queryset = get_queryset(model_class, limit_choices_to=limit_choices_to)filtered = list(do_filter(queryset, keywords))if not getattr(model_class._meta, '', False):sort_results(list(filtered))excluded = list(do_filter(queryset, keywords, exclude=True))if not getattr(model_class._meta, '', False):sort_results(list(excluded))empty_choice = {'': \"\", '': \"\"}serialized_results = (serialize_results(filtered) +[empty_choice] +serialize_results(excluded))return JsonResponse(serialized_results, safe=False)", "docstring": "Returns filtered results followed by excluded results below.", "id": "f1868:m3"} {"signature": "@propertydef media(self):", "body": "media = super(JqueryMediaMixin, self).mediajs = []if JQUERY_URL:js.append(JQUERY_URL)elif JQUERY_URL is not False:vendor = '' if django.VERSION < (, , ) else ''extra = '' if settings.DEBUG else ''jquery_paths = [''.format(vendor, extra),'',]if USE_DJANGO_JQUERY:jquery_paths = [''.format(path) for path in jquery_paths]js.extend(jquery_paths)media += Media(js=js)return media", "docstring": "Media defined as a dynamic property instead of an inner class.", "id": "f1870:c0:m0"} {"signature": "@propertydef media(self):", "body": "media = super(ChainedSelect, self).mediajs = ['','']media += Media(js=js)return media", "docstring": "Media defined as a dynamic property instead of an inner class.", "id": "f1870:c1:m1"} {"signature": "def _get_available_choices(self, queryset, value):", "body": "item = queryset.filter(pk=value).first()if item:try:pk = getattr(item, self.chained_model_field + \"\")filter = {self.chained_model_field: pk}except AttributeError:try: pks = getattr(item, self.chained_model_field).all().values_list('', flat=True)filter = {self.chained_model_field + \"\": pks}except AttributeError:try: pks = getattr(item, self.chained_model_field + \"\").all().values_list('', flat=True)filter = {self.chained_model_field + \"\": pks}except AttributeError: filter = {}filtered = list(get_model(self.to_app_name, self.to_model_name).objects.filter(**filter).distinct())if self.sort:sort_results(filtered)else:filtered = []return filtered", "docstring": "get possible choices for selection", "id": "f1870:c1:m3"} {"signature": "@propertydef media(self):", "body": "media = super(ChainedSelectMultiple, self).mediajs = ['','']if self.horizontal:js.extend([\"\",\"\",\"\"])media += Media(js=js)return media", "docstring": "Media defined as a dynamic property instead of an inner class.", "id": "f1870:c2:m1"} {"signature": "def unicode_sorter(input):", "body": "key1 = input.lower()key1 = key1.replace(u\"\", u\"\")key1 = key1.replace(u\"\", u\"\")key1 = key1.replace(u\"\", u\"\")key1 = key1.replace(u\"\", u\"\")return key1", "docstring": "This function implements sort keys for the german language according to\n DIN 5007.", "id": "f1871:m0"} {"signature": "def sort_results(results):", "body": "results.sort(key=lambda x: unicode_sorter(force_text(x)))", "docstring": "Performs in-place sort of filterchain results.", "id": "f1871:m5"} {"signature": "def __init__(self, to, chained_field=None, chained_model_field=None,auto_choose=False, horizontal=False, **kwargs):", "body": "self.chain_field = chained_fieldself.chained_model_field = chained_model_fieldself.auto_choose = auto_chooseself.horizontal = horizontalself.verbose_name = kwargs.get('', '')super(ChainedManyToManyField, self).__init__(to, **kwargs)", "docstring": "examples:\n\nclass Publication(models.Model):\n name = models.CharField(max_length=255)\n\nclass Writer(models.Model):\n name = models.CharField(max_length=255)\n publications = models.ManyToManyField('Publication', blank=True, null=True)\n\nclass Book(models.Model):\n publication = models.ForeignKey(Publication)\n writer = ChainedManyToManyField(\n Writer,\n chained_field=\"publication\",\n chained_model_field=\"publications\",\n )\n name = models.CharField(max_length=255)\n\n``chained_field`` is the name of the ForeignKey field referenced by ChainedManyToManyField of the same Model.\nin the examples, chained_field is the name of field publication in Model Book.\n\n``chained_model_field`` is the name of the ManyToMany field referenced in the 'to' Model.\nin the examples, chained_model_field is the name of field publications in Model Writer.\n\n``auto_choose`` controls whether auto select the choice when there is only one available choice.", "id": "f1873:c1:m0"} {"signature": "def __init__(self, to, chained_field=None, chained_model_field=None,show_all=False, auto_choose=False, sort=True, view_name=None, **kwargs):", "body": "self.chained_field = chained_fieldself.chained_model_field = chained_model_fieldself.show_all = show_allself.auto_choose = auto_chooseself.sort = sortself.view_name = view_nameif kwargs:kwargs[''] = kwargs.get('', models.CASCADE)else:kwargs = {'': models.CASCADE}super(ChainedForeignKey, self).__init__(to, **kwargs)", "docstring": "examples:\n\nclass Continent(models.Model):\n name = models.CharField(max_length=255)\n\nclass Country(models.Model):\n continent = models.ForeignKey(Continent)\n\nclass Location(models.Model):\n continent = models.ForeignKey(Continent)\n country = ChainedForeignKey(\n Country,\n chained_field=\"continent\",\n chained_model_field=\"continent\",\n show_all=True,\n auto_choose=True,\n sort=True,\n # limit_choices_to={'name':'test'}\n )\n``chained_field`` is the name of the ForeignKey field referenced by ChainedForeignKey of the same Model.\nin the examples, chained_field is the name of field continent in Model Location.\n\n``chained_model_field`` is the name of the ForeignKey field referenced in the 'to' Model.\nin the examples, chained_model_field is the name of field continent in Model Country.\n\n``show_all`` controls whether show other choices below the filtered choices, with separater '----------'.\n\n``auto_choose`` controls whether auto select the choice when there is only one available choice.\n\n``sort`` controls whether or not to sort results lexicographically or not.\n\n``view_name`` controls which view to use, 'chained_filter' or 'chained_filter_all'.", "id": "f1873:c2:m0"} {"signature": "def restart_callback(result=None):", "body": "if result.summary == opendnp3.TaskCompletion.SUCCESS:print(\"\".format(result.restartTime.GetMilliseconds()))else:print(\"\".format(opendnp3.TaskCompletionToString(result.summary)))", "docstring": ":type result: opendnp3.RestartOperationResult", "id": "f1888:m0"} {"signature": "def collection_callback(result=None):", "body": "print(\"\".format(result.headerIndex,result.index,opendnp3.CommandPointStateToString(result.state),opendnp3.CommandStatusToString(result.status)))", "docstring": ":type result: opendnp3.CommandPointResult", "id": "f1888:m1"} {"signature": "def command_callback(result=None):", "body": "print(\"\".format(opendnp3.TaskCompletionToString(result.summary)))result.ForeachItem(collection_callback)", "docstring": ":type result: opendnp3.ICommandTaskResult", "id": "f1888:m2"} {"signature": "def OnReceiveIIN(self, iin):", "body": "self.iin_field = dict(LSB=iin.LSB,MSB=iin.MSB)", "docstring": "Called when a response or unsolicited response is receive from the outstation.", "id": "f1888:c0:m1"} {"signature": "def OnTaskStart(self, type, id):", "body": "self.task_id = dict(id=id.GetId(),defined=id.IsDefined())", "docstring": "Task start notification.", "id": "f1888:c0:m2"} {"signature": "def OnTaskComplete(self, info):", "body": "self.task_info = dict(type=info.type,result=info.result)", "docstring": "Task completion notification.", "id": "f1888:c0:m3"} {"signature": "def OnStateChange(self, value):", "body": "self.link_status = value", "docstring": "Called when a the reset/unreset status of the link layer changes.", "id": "f1888:c0:m4"} {"signature": "def Now(self):", "body": "pass", "docstring": "Returns a UTCTimestamp of the current time.", "id": "f1888:c0:m5"} {"signature": "def OnStateChange(self, state):", "body": "self.state = state", "docstring": "State change notification.", "id": "f1888:c1:m1"} {"signature": "def Log(self, entry):", "body": "if entry.loggerid == \"\":self.tcp_client = True", "docstring": "Log information.", "id": "f1888:c2:m1"} {"signature": "def run_master(hang=False):", "body": "logger = asiodnp3.ConsoleLogger().Create()manager = asiodnp3.DNP3Manager(, asiodnp3.ConsoleLogger().Create())channel = manager.AddTCPClient(\"\",FILTERS,asiopal.ChannelRetry(),HOST,LOCAL,PORT,asiodnp3.PrintingChannelListener().Create())stack_config = asiodnp3.MasterStackConfig()stack_config.master.responseTimeout = openpal.TimeDuration().Seconds()stack_config.link.RemoteAddr = soe_handler = asiodnp3.PrintingSOEHandler().Create()default_master_app = asiodnp3.DefaultMasterApplication().Create()master = channel.AddMaster(\"\",soe_handler,default_master_app,stack_config)master.Enable()time.sleep()if not hang:del channeldel masterprint(\"\".format(hang))manager.Shutdown()", "docstring": "Demonstrate hanging when channel and master are not deleted prior to manager.Shutdown()", "id": "f1889:m0"} {"signature": "def OnStateChange(self, value):", "body": "self.link_status = value", "docstring": "Called when a the reset/unreset status of the link layer changes.", "id": "f1892:c0:m1"} {"signature": "def OnStateChange(self, state):", "body": "self.state = state", "docstring": "State change notification.", "id": "f1892:c1:m1"} {"signature": "def Log(self, entry):", "body": "if entry.loggerid == \"\":self.server = True", "docstring": "Log information.", "id": "f1892:c2:m1"} {"signature": "def startup(self):", "body": "print('')self.do_menu('')self.cmdloop('')exit()", "docstring": "Display the command-line interface's menu and issue a prompt.", "id": "f1897:c0:m1"} {"signature": "def do_menu(self, line):", "body": "print('')print('')print('')print('')print('')print('')print('')print('')print('')print('')print('')print('')print('')print('')print('')print('')print('')print('')print('')", "docstring": "Display a menu of command-line options. Command syntax is: menu", "id": "f1897:c0:m2"} {"signature": "def do_chan_log_all(self, line):", "body": "self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS))print(''.format(opendnp3.levels.ALL_COMMS))", "docstring": "Set the channel log level to ALL_COMMS. Command syntax is: chan_log_all", "id": "f1897:c0:m3"} {"signature": "def do_chan_log_normal(self, line):", "body": "self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.NORMAL))print(''.format(opendnp3.levels.NORMAL))", "docstring": "Set the channel log level to NORMAL. Command syntax is: chan_log_normal", "id": "f1897:c0:m4"} {"signature": "def do_disable_unsol(self, line):", "body": "headers = [opendnp3.Header().AllObjects(, ),opendnp3.Header().AllObjects(, ),opendnp3.Header().AllObjects(, )]self.application.master.PerformFunction(\"\",opendnp3.FunctionCode.DISABLE_UNSOLICITED,headers,opendnp3.TaskConfig().Default())", "docstring": "Perform the function DISABLE_UNSOLICITED. Command syntax is: disable_unsol", "id": "f1897:c0:m5"} {"signature": "def do_mast_log_all(self, line):", "body": "self.application.master.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS))_log.debug(''.format(opendnp3.levels.ALL_COMMS))", "docstring": "Set the master log level to ALL_COMMS. Command syntax is: mast_log_all", "id": "f1897:c0:m6"} {"signature": "def do_mast_log_normal(self, line):", "body": "self.application.master.SetLogFilters(openpal.LogFilters(opendnp3.levels.NORMAL))_log.debug(''.format(opendnp3.levels.NORMAL))", "docstring": "Set the master log level to NORMAL. Command syntax is: mast_log_normal", "id": "f1897:c0:m7"} {"signature": "def do_o1(self, line):", "body": "self.application.send_direct_operate_command(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_ON),,command_callback)", "docstring": "Send a DirectOperate BinaryOutput (group 12) index 5 LATCH_ON to the Outstation. Command syntax is: o1", "id": "f1897:c0:m8"} {"signature": "def do_o2(self, line):", "body": "self.application.send_direct_operate_command(opendnp3.AnalogOutputInt32(),,command_callback)", "docstring": "Send a DirectOperate AnalogOutput (group 41) index 10 value 7 to the Outstation. Command syntax is: o2", "id": "f1897:c0:m9"} {"signature": "def do_o3(self, line):", "body": "self.application.send_direct_operate_command_set(opendnp3.CommandSet([opendnp3.WithIndex(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_ON), ),opendnp3.WithIndex(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_OFF), )]),command_callback)", "docstring": "Send a DirectOperate BinaryOutput (group 12) CommandSet to the Outstation. Command syntax is: o3", "id": "f1897:c0:m10"} {"signature": "def do_restart(self, line):", "body": "self.application.master.Restart(opendnp3.RestartType.COLD, restart_callback)", "docstring": "Request that the Outstation perform a cold restart. Command syntax is: restart", "id": "f1897:c0:m11"} {"signature": "def do_s1(self, line):", "body": "self.application.send_select_and_operate_command(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_ON),,command_callback)", "docstring": "Send a SelectAndOperate BinaryOutput (group 12) index 8 LATCH_ON to the Outstation. Command syntax is: s1", "id": "f1897:c0:m12"} {"signature": "def do_s2(self, line):", "body": "self.application.send_select_and_operate_command_set(opendnp3.CommandSet([opendnp3.WithIndex(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_ON), )]),command_callback)", "docstring": "Send a SelectAndOperate BinaryOutput (group 12) CommandSet to the Outstation. Command syntax is: s2", "id": "f1897:c0:m13"} {"signature": "def do_scan_all(self, line):", "body": "self.application.master.ScanAllObjects(opendnp3.GroupVariationID(, ), opendnp3.TaskConfig().Default())", "docstring": "Call ScanAllObjects. Command syntax is: scan_all", "id": "f1897:c0:m14"} {"signature": "def do_scan_fast(self, line):", "body": "self.application.fast_scan.Demand()", "docstring": "Demand an immediate fast scan. Command syntax is: scan_fast", "id": "f1897:c0:m15"} {"signature": "def do_scan_range(self, line):", "body": "self.application.master.ScanRange(opendnp3.GroupVariationID(, ), , , opendnp3.TaskConfig().Default())", "docstring": "Do an ad-hoc scan of a range of points (group 1, variation 2, indexes 0-3). Command syntax is: scan_range", "id": "f1897:c0:m16"} {"signature": "def do_scan_slow(self, line):", "body": "self.application.slow_scan.Demand()", "docstring": "Demand an immediate slow scan. Command syntax is: scan_slow", "id": "f1897:c0:m17"} {"signature": "def do_write_time(self, line):", "body": "millis_since_epoch = int((datetime.now() - datetime.utcfromtimestamp()).total_seconds() * )self.application.master.Write(opendnp3.TimeAndInterval(opendnp3.DNPTime(millis_since_epoch),,opendnp3.IntervalUnits.Seconds),, opendnp3.TaskConfig().Default())", "docstring": "Write a TimeAndInterval to the Outstation. Command syntax is: write_time", "id": "f1897:c0:m18"} {"signature": "def do_quit(self, line):", "body": "self.application.shutdown()exit()", "docstring": "Quit the command-line interface. Command syntax is: quit", "id": "f1897:c0:m19"} {"signature": "def collection_callback(result=None):", "body": "print(\"\".format(result.headerIndex,result.index,opendnp3.CommandPointStateToString(result.state),opendnp3.CommandStatusToString(result.status)))", "docstring": ":type result: opendnp3.CommandPointResult", "id": "f1899:m0"} {"signature": "def command_callback(result=None):", "body": "print(\"\".format(opendnp3.TaskCompletionToString(result.summary)))result.ForeachItem(collection_callback)", "docstring": ":type result: opendnp3.ICommandTaskResult", "id": "f1899:m1"} {"signature": "def main():", "body": "app = MyMaster(log_handler=MyLogger(),listener=AppChannelListener(),soe_handler=SOEHandler(),master_application=MasterApplication())_log.debug('')app.shutdown()_log.debug('')exit()", "docstring": "The Master has been started from the command line. Execute ad-hoc tests if desired.", "id": "f1899:m3"} {"signature": "def send_direct_operate_command(self, command, index, callback=asiodnp3.PrintingCommandCallback.Get(),config=opendnp3.TaskConfig().Default()):", "body": "self.master.DirectOperate(command, index, callback, config)", "docstring": "Direct operate a single command\n\n:param command: command to operate\n:param index: index of the command\n:param callback: callback that will be invoked upon completion or failure\n:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA", "id": "f1899:c0:m1"} {"signature": "def send_direct_operate_command_set(self, command_set, callback=asiodnp3.PrintingCommandCallback.Get(),config=opendnp3.TaskConfig().Default()):", "body": "self.master.DirectOperate(command_set, callback, config)", "docstring": "Direct operate a set of commands\n\n:param command_set: set of command headers\n:param callback: callback that will be invoked upon completion or failure\n:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA", "id": "f1899:c0:m2"} {"signature": "def send_select_and_operate_command(self, command, index, callback=asiodnp3.PrintingCommandCallback.Get(),config=opendnp3.TaskConfig().Default()):", "body": "self.master.SelectAndOperate(command, index, callback, config)", "docstring": "Select and operate a single command\n\n:param command: command to operate\n:param index: index of the command\n:param callback: callback that will be invoked upon completion or failure\n:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA", "id": "f1899:c0:m3"} {"signature": "def send_select_and_operate_command_set(self, command_set, callback=asiodnp3.PrintingCommandCallback.Get(),config=opendnp3.TaskConfig().Default()):", "body": "self.master.SelectAndOperate(command_set, callback, config)", "docstring": "Select and operate a set of commands\n\n:param command_set: set of command headers\n:param callback: callback that will be invoked upon completion or failure\n:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA", "id": "f1899:c0:m4"} {"signature": "def Process(self, info, values):", "body": "visitor_class_types = {opendnp3.ICollectionIndexedBinary: VisitorIndexedBinary,opendnp3.ICollectionIndexedDoubleBitBinary: VisitorIndexedDoubleBitBinary,opendnp3.ICollectionIndexedCounter: VisitorIndexedCounter,opendnp3.ICollectionIndexedFrozenCounter: VisitorIndexedFrozenCounter,opendnp3.ICollectionIndexedAnalog: VisitorIndexedAnalog,opendnp3.ICollectionIndexedBinaryOutputStatus: VisitorIndexedBinaryOutputStatus,opendnp3.ICollectionIndexedAnalogOutputStatus: VisitorIndexedAnalogOutputStatus,opendnp3.ICollectionIndexedTimeAndInterval: VisitorIndexedTimeAndInterval}visitor_class = visitor_class_types[type(values)]visitor = visitor_class()values.Foreach(visitor)for index, value in visitor.index_and_value:log_string = ''_log.debug(log_string.format(info.gv, info.headerIndex, type(values).__name__, index, value))", "docstring": "Process measurement data.\n\n:param info: HeaderInfo\n:param values: A collection of values received from the Outstation (various data types are possible).", "id": "f1899:c3:m1"} {"signature": "def main():", "body": "app = OutstationApplication()_log.debug('')app.shutdown()_log.debug('')exit()", "docstring": "The Outstation has been started from the command line. Execute ad-hoc tests if desired.", "id": "f1900:m0"} {"signature": "@staticmethoddef configure_stack():", "body": "stack_config = asiodnp3.OutstationStackConfig(opendnp3.DatabaseSizes.AllTypes())stack_config.outstation.eventBufferConfig = opendnp3.EventBufferConfig().AllTypes()stack_config.outstation.params.allowUnsolicited = Truestack_config.link.LocalAddr = stack_config.link.RemoteAddr = stack_config.link.KeepAliveTimeout = openpal.TimeDuration().Max()return stack_config", "docstring": "Set up the OpenDNP3 configuration.", "id": "f1900:c0:m1"} {"signature": "@staticmethoddef configure_database(db_config):", "body": "db_config.analog[].clazz = opendnp3.PointClass.Class2db_config.analog[].svariation = opendnp3.StaticAnalogVariation.Group30Var1db_config.analog[].evariation = opendnp3.EventAnalogVariation.Group32Var7db_config.analog[].clazz = opendnp3.PointClass.Class2db_config.analog[].svariation = opendnp3.StaticAnalogVariation.Group30Var1db_config.analog[].evariation = opendnp3.EventAnalogVariation.Group32Var7db_config.binary[].clazz = opendnp3.PointClass.Class2db_config.binary[].svariation = opendnp3.StaticBinaryVariation.Group1Var2db_config.binary[].evariation = opendnp3.EventBinaryVariation.Group2Var2db_config.binary[].clazz = opendnp3.PointClass.Class2db_config.binary[].svariation = opendnp3.StaticBinaryVariation.Group1Var2db_config.binary[].evariation = opendnp3.EventBinaryVariation.Group2Var2", "docstring": "Configure the Outstation's database of input point definitions.\n\nConfigure two Analog points (group/variation 30.1) at indexes 1 and 2.\nConfigure two Binary points (group/variation 1.2) at indexes 1 and 2.", "id": "f1900:c0:m2"} {"signature": "def shutdown(self):", "body": "self.manager.Shutdown()", "docstring": "Execute an orderly shutdown of the Outstation.\n\nThe debug messages may be helpful if errors occur during shutdown.", "id": "f1900:c0:m3"} {"signature": "@classmethoddef get_outstation(cls):", "body": "return cls.outstation", "docstring": "Get the singleton instance of IOutstation.", "id": "f1900:c0:m4"} {"signature": "@classmethoddef set_outstation(cls, outstn):", "body": "cls.outstation = outstn", "docstring": "Set the singleton instance of IOutstation, as returned from the channel's AddOutstation call.\n\nMaking IOutstation available as a singleton allows other classes (e.g. the command-line UI)\nto send commands to it -- see apply_update().", "id": "f1900:c0:m5"} {"signature": "def ColdRestartSupport(self):", "body": "_log.debug('')return opendnp3.RestartMode.UNSUPPORTED", "docstring": "Return a RestartMode enumerated value indicating whether cold restart is supported.", "id": "f1900:c0:m6"} {"signature": "def GetApplicationIIN(self):", "body": "application_iin = opendnp3.ApplicationIIN()application_iin.configCorrupt = Falseapplication_iin.deviceTrouble = Falseapplication_iin.localControl = Falseapplication_iin.needTime = Falseiin_field = application_iin.ToIIN()_log.debug(''.format(iin_field.LSB,iin_field.MSB))return application_iin", "docstring": "Return the application-controlled IIN field.", "id": "f1900:c0:m7"} {"signature": "def WarmRestartSupport(self):", "body": "_log.debug('')return opendnp3.RestartMode.UNSUPPORTED", "docstring": "Return a RestartMode enumerated value indicating whether a warm restart is supported.", "id": "f1900:c0:m11"} {"signature": "@classmethoddef process_point_value(cls, command_type, command, index, op_type):", "body": "_log.debug(''.format(index, command))", "docstring": "A PointValue was received from the Master. Process its payload.\n\n:param command_type: (string) Either 'Select' or 'Operate'.\n:param command: A ControlRelayOutputBlock or else a wrapped data value (AnalogOutputInt16, etc.).\n:param index: (integer) DNP3 index of the payload's data definition.\n:param op_type: An OperateType, or None if command_type == 'Select'.", "id": "f1900:c0:m12"} {"signature": "def apply_update(self, value, index):", "body": "_log.debug(''.format(type(value).__name__, index, value.value))builder = asiodnp3.UpdateBuilder()builder.Update(value, index)update = builder.Build()OutstationApplication.get_outstation().Apply(update)", "docstring": "Record an opendnp3 data value (Analog, Binary, etc.) in the outstation's database.\n\n The data value gets sent to the Master as a side-effect.\n\n:param value: An instance of Analog, Binary, or another opendnp3 data value.\n:param index: (integer) Index of the data definition in the opendnp3 database.", "id": "f1900:c0:m13"} {"signature": "def Select(self, command, index):", "body": "OutstationApplication.process_point_value('', command, index, None)return opendnp3.CommandStatus.SUCCESS", "docstring": "The Master sent a Select command to the Outstation. Handle it.\n\n:param command: ControlRelayOutputBlock,\n AnalogOutputInt16, AnalogOutputInt32, AnalogOutputFloat32, or AnalogOutputDouble64.\n:param index: int\n:return: CommandStatus", "id": "f1900:c1:m2"} {"signature": "def Operate(self, command, index, op_type):", "body": "OutstationApplication.process_point_value('', command, index, op_type)return opendnp3.CommandStatus.SUCCESS", "docstring": "The Master sent an Operate command to the Outstation. Handle it.\n\n:param command: ControlRelayOutputBlock,\n AnalogOutputInt16, AnalogOutputInt32, AnalogOutputFloat32, or AnalogOutputDouble64.\n:param index: int\n:param op_type: OperateType\n:return: CommandStatus", "id": "f1900:c1:m3"} {"signature": "def startup(self):", "body": "print('')self.do_menu('')self.cmdloop('')exit()", "docstring": "Display the command-line interface's menu and issue a prompt.", "id": "f1901:c0:m1"} {"signature": "def do_a(self, line):", "body": "index, value_string = self.index_and_value_from_line(line)if index and value_string:try:self.application.apply_update(opendnp3.Analog(float(value_string)), index)except ValueError:print('')", "docstring": "Send the Master an AnalogInput (group 32) value. Command syntax is: a index value", "id": "f1901:c0:m2"} {"signature": "def do_a2(self, line):", "body": "self.application.apply_update(opendnp3.Analog(), index=)", "docstring": "Send the Master an AnalogInput (group 32) value of 2 at index 4. Command syntax is: a2", "id": "f1901:c0:m3"} {"signature": "def do_b(self, line):", "body": "index, value_string = self.index_and_value_from_line(line)if index and value_string:if value_string.lower() == '' or value_string.lower() == '':self.application.apply_update(opendnp3.Binary(value_string == ''), index)else:print('')", "docstring": "Send the Master a BinaryInput (group 2) value. Command syntax is: 'b index true' or 'b index false", "id": "f1901:c0:m4"} {"signature": "def do_b0(self, line):", "body": "self.application.apply_update(opendnp3.Binary(False), index=)", "docstring": "Send the Master a BinaryInput (group 2) value of False at index 6. Command syntax is: b0", "id": "f1901:c0:m5"} {"signature": "def do_c(self, line):", "body": "index, value_string = self.index_and_value_from_line(line)if index and value_string:try:self.application.apply_update(opendnp3.Counter(int(value_string)), index)except ValueError:print('')", "docstring": "Send the Master a Counter (group 22) value. Command syntax is: c index value", "id": "f1901:c0:m6"} {"signature": "def do_d(self, line):", "body": "index = self.index_from_line(line)if index:self.application.apply_update(opendnp3.DoubleBitBinary(opendnp3.DoubleBit.DETERMINED_ON), index)", "docstring": "Send the Master a DoubleBitBinaryInput (group 4) value of DETERMINED_ON. Command syntax is: d index", "id": "f1901:c0:m7"} {"signature": "def do_menu(self, line):", "body": "print('')print('')print('')print('')print('')print('')print('')print('')print('')", "docstring": "Display a menu of command-line options. Command syntax is: menu", "id": "f1901:c0:m8"} {"signature": "def do_quit(self, line):", "body": "self.application.shutdown()exit()", "docstring": "Quit the command line interface. Command syntax is: quit", "id": "f1901:c0:m9"} {"signature": "@staticmethoddef index_and_value_from_line(line):", "body": "try:index = int(line.split('')[])except (ValueError, IndexError):print('')index = Nonetry:value_string = line.split('')[]except (ValueError, IndexError):print('')value_string = Nonereturn index, value_string", "docstring": "Parse an index (integer) and value (string) from command line args and return them.", "id": "f1901:c0:m10"} {"signature": "@staticmethoddef index_from_line(line):", "body": "try:index = int(line.split('')[])except (ValueError, IndexError):print('')index = Nonereturn index", "docstring": "Parse an index (integer) from command line args and return it.", "id": "f1901:c0:m11"} {"signature": "def __init__(self, routing=None, default=None, errors='',python_path='', verbose=False,pre_routing=None):", "body": "super().__init__()self.verbose = verboseself.receive = LogErrors(self._receive, errors)default = dict(self.DEFAULT, **(default or {}))self.pre_routing = ActionList(pre_routing)self.routing = Routing(routing or {}, default or {}, python_path)", "docstring": ":param Address pre_routing: This Address is set with with the message\n after the message is received and converted, but before it is\n routed.\n:param errors: either a number, indicating how many errors to report\n before ignoring them, or one of these strings:\n 'raise', meaning to raise an exception\n 'ignore', meaning to ignore all errors\n 'report', meaning to report all errors", "id": "f1905:c0:m0"} {"signature": "def _receive(self, msg):", "body": "msg = self._convert(msg)if msg is None:returnstr_msg = self.verbose and self._msg_to_str(msg)if self.verbose and log.is_debug():log.debug('', str_msg)if self.pre_routing:self.pre_routing.receive(msg)receiver, msg = self.routing.receive(msg)if receiver:receiver.receive(msg)if self.verbose:log.info('', str_msg[:], msg,repr(receiver))", "docstring": "Receive a message from the input source and perhaps raise an Exception.", "id": "f1905:c0:m4"} {"signature": "def _convert(self, msg):", "body": "raise NotImplementedError", "docstring": "Convert the message to a Control-specific format", "id": "f1905:c0:m5"} {"signature": "def _make_thread(self):", "body": "pass", "docstring": "Returns a new thread to run the loop for this control source.", "id": "f1905:c0:m6"} {"signature": "def messages(self):", "body": "raise NotImplementedError", "docstring": "Should yield a sequence of messages from the input source.", "id": "f1905:c1:m1"} {"signature": "def __init__(self, routing, default, python_path):", "body": "def make(x):if isinstance(x, (list, str)):return ActionList(x)assert isinstance(x, dict)if '' in x or '' in x:x = dict(default, **x)return construct.construct_type(x, python_path)return {k: make(v) for k, v in x.items()}routing = flatten.unflatten(routing)self.routing = make(routing)", "docstring": ":param dict routing: `routing` is a dict that maps addresses\n to lists of actions.\n\n The values in the input dictionary `routing` are recursively visited\n to build the routing table:\n\n * values that are strings or lists are used to construct ActionLists\n * dictionaries that contain \"typename\" or \"datatype\" keys are\n used to construct a class of that type.\n * otherwise, dictionaries are visited recursively\n * all other types are forbidden", "id": "f1907:c0:m0"} {"signature": "def set_project(self, project):", "body": "def visit(x):set_project = getattr(x, '', None)if set_project:set_project(project)values = getattr(x, '', lambda: ())for v in values():visit(v)visit(self.routing)", "docstring": "Set the base project for routing.", "id": "f1907:c0:m1"} {"signature": "def receive(self, msg):", "body": "x = self.routingwhile not isinstance(x, ActionList):if not x or not msg:return None, msgif not isinstance(x, dict):raise ValueError('' % type(x))_, value = msg.popitem(last=False)x = x.get(str(value))return x, msg", "docstring": "Returns a (receiver, msg) pair, where receiver is `None` if no route for\nthe message was found, or otherwise an object with a `receive` method\nthat can accept that `msg`.", "id": "f1907:c0:m2"} {"signature": "def single(method):", "body": "@functools.wraps(method)def single(self, address, value=None):address = urllib.parse.unquote_plus(address)try:error = NO_PROJECT_ERRORif not self.project:raise ValueErrorerror = BAD_ADDRESS_ERRORed = editor.Editor(address, self.project)if value is None:error = BAD_GETTER_ERRORresult = method(self, ed)else:error = BAD_SETTER_ERRORresult = method(self, ed, value)result = {'': result}except Exception as e:traceback.print_exc()msg = '' % (error.format(**locals()), e)result = {'': msg}return flask.jsonify(result)return single", "docstring": "Decorator for RestServer methods that take a single address", "id": "f1908:m0"} {"signature": "def multi(method):", "body": "@functools.wraps(method)def multi(self, address=''):values = flask.request.valuesaddress = urllib.parse.unquote_plus(address)if address and values and not address.endswith(''):address += ''result = {}for a in values or '':try:if not self.project:raise ValueError('')ed = editor.Editor(address + a, self.project)result[address + a] = {'': method(self, ed, a)}except:if self.project:traceback.print_exc()result[address + a] = {'': '' % a}return flask.jsonify(result)return multi", "docstring": "Decorator for RestServer methods that take multiple addresses", "id": "f1908:m1"} {"signature": "def __init__(self, segments=(), period=None, **kwds):", "body": "self.segments = _segments.Segments(segments)period = period or self.segments.total_timesuper().__init__(period=period, **kwds)", "docstring": "If period is None, then it is set to be the length of the\nsegments.\n\nEach segment is either a single number, indicating a level, or a pair of\nnumbers, indicating a level and a time. Levels without a time are\nassigned the average delay.", "id": "f1915:c7:m0"} {"signature": "def __init__(self, use_note_off=True, **kwds):", "body": "super().__init__(**kwds)self.use_note_off = use_note_off", "docstring": ":param use_note_off:\n If False, map note_offs to note_ons with velocity 0\n If True, map note_ons with velocity 0 to note_offs\n If None, do not change none_ons or note_offs", "id": "f1916:c0:m0"} {"signature": "def __init__(self, omit=None, normalizers=None, keys_by_type=None,accept=None, reject=None, auto_omit=True):", "body": "def to_set(x):if x is None:return set()if isinstance(x, (list, tuple)):return set(x)return set([x])def make_match(m):return m and {k: to_set(v) for k, v in m.items()}self.accept, self.reject = make_match(accept), make_match(reject)self.omit = to_set(omit)if auto_omit and self.accept:self.omit.update(k for k, v in self.accept.items() if len(v) == )self.normalizers = normalizers or {}if keys_by_type is None:self.keys_by_type = Noneelse:self.keys_by_type = {}for k, v in keys_by_type.items():if isinstance(v, str):v = [v]self.keys_by_type[k] = tuple(i for i in v if i not in self.omit)", "docstring": "Arguments\n\nomit -- A list of keys that will not be extracted.\n\nnormalizers -- Some keys also need to be \"normalized\" -\n scaled and offset so they are between 0 and 1, or -1 and 1.\n The `normalizers` table maps key names to a function that\n normalizes the value of that key.\n\nkeys_by_type -- `keys_by_type` is a dictionary from the `type` in an\n incoming message to a list of message keys to be extracted\n\naccept -- maps keys to a value or a list of values that are\n accepted for that key. A message has to match *all* entries in\n `accept` to be accepted.\n\nreject -- map key to a value or a list of values that are not\n accepted for that key. A message is rejected if it matches *any*\n entry in the reject map.\n\nauto_omit -- if True, omit all keys in `accept` that only have one\n possible value.\n\n auto_omit=True, the default, is probably more useful: if you\n request data for channel=1, type=note_on then you probably don't\n want to see channel=1, type=note_on with each message.", "id": "f1918:c0:m0"} {"signature": "def extract(self, msg):", "body": "def normal(key):v = msg.get(key)if v is None:return vnormalizer = self.normalizers.get(key, lambda x: x)return normalizer(v)def odict(keys):return collections.OrderedDict((k, normal(k)) for k in keys)def match(m):return (msg.get(k) in v for k, v in m.items()) if m else ()accept = all(match(self.accept))reject = any(match(self.reject))if reject or not accept:keys = ()elif self.keys_by_type is None:keys = [k for k in msg.keys() if k not in self.omit]else:keys = self.keys_by_type.get(msg.get(''))return odict(keys)", "docstring": "Yield an ordered dictionary if msg['type'] is in keys_by_type.", "id": "f1918:c0:m1"} {"signature": "def receive(self, msg):", "body": "if self.edit_queue:self.edit_queue.put_edit(self._set, msg)else:self._set(msg)", "docstring": "Receives a message, and either sets it immediately, or puts it on the\nedit queue if there is one.", "id": "f1924:c0:m2"} {"signature": "def start(self, threaded=None):", "body": "if threaded is not None:self.threaded = threadedrun = {'': {'': False}}self.project = project.project(self.desc, run, root_file=self.project_file)self._run = self.project.runself._runner.start(self.threaded)", "docstring": "Creates and starts the project.", "id": "f1925:c0:m1"} {"signature": "def stop(self=None):", "body": "if not self:instance = getattr(Runner.instance(), '', None)self = instance and instance()if not self:returnself._runner.stop()if self.project:self.project.stop()self.project = None", "docstring": "Stop the builder if it's running.", "id": "f1925:c0:m2"} {"signature": "def clear(self):", "body": "self.stop()super().clear()", "docstring": "Stop the project if it's running and clear the project description", "id": "f1925:c0:m3"} {"signature": "@propertydef is_running(self):", "body": "return self._runner.is_running", "docstring": "True if the Builder is currently running", "id": "f1925:c0:m4"} {"signature": "@propertydef threaded(self):", "body": "return self.desc.run.get('', False)", "docstring": "True if the Builder is runs in a separate thread, false if the\nBuilder blocks, waiting for the animation to end.", "id": "f1925:c0:m5"} {"signature": "@staticmethoddef simpixel(new=, autoraise=True):", "body": "simpixel_driver.open_browser(new=new, autoraise=autoraise)", "docstring": "Open an instance of simpixel in the browser", "id": "f1925:c0:m7"} {"signature": "@staticmethoddef animations():", "body": "animations.run(None)", "docstring": "List all the existing animations", "id": "f1925:c0:m8"} {"signature": "def set_one(desc, name, value):", "body": "old_value = desc.get(name)if old_value is None:raise KeyError('' % name)if value is None:value = type(old_value)()elif name in CLASS_SECTIONS:if isinstance(value, str):value = {'': aliases.resolve(value)}elif isinstance(value, type):value = {'': class_name.class_name(value)}elif not isinstance(value, dict):raise TypeError('' % value)typename = value.get('')if typename:s = '' if name == '' else ''path = '' + name + simporter.import_symbol(typename, path)elif name == '':if not isinstance(value, (list, int, tuple, str)):raise TypeError('' % value)elif type(old_value) is not type(value):raise TypeError('' %(type(old_value), value, type(value)))desc[name] = value", "docstring": "Set one section in a Project description", "id": "f1926:m0"} {"signature": "def update(desc, other=None, **kwds):", "body": "other = other and _as_dict(other) or {}for i in other, kwds:for k, v in i.items():if isinstance(v, dict):old_v = desc[k]for k2, v2 in v.items():if v2 is None:old_v.pop(k2, None)else:old_v[k2] = v2else:set_one(desc, k, v)", "docstring": "Update sections in a Project description", "id": "f1926:m1"} {"signature": "def clear(self):", "body": "self._desc = {}for key, value in merge.DEFAULT_PROJECT.items():if key not in self._HIDDEN:self._desc[key] = type(value)()", "docstring": "Clear description to default values", "id": "f1928:c0:m1"} {"signature": "def items(self):", "body": "return self._desc.items()", "docstring": "Return an iterable of (key, section value) pairs", "id": "f1928:c0:m2"} {"signature": "def update(self, desc=None, **kwds):", "body": "sections.update(self._desc, desc, **kwds)", "docstring": "This method updates the description much like dict.update(), *except*:\n\n 1. for description which have dictionary values, it uses update\n to alter the existing value and does not replace them.\n\n 2. `None` is a special value that means \"clear section to default\" or\n \"delete field\".", "id": "f1928:c0:m3"} {"signature": "def as_dict(self):", "body": "return {k: v for k, v in self.items() if v}", "docstring": "Returns a dictionary of non-empty description", "id": "f1928:c0:m4"} {"signature": "def __getitem__(self, index):", "body": "index = self._check_index(index)return self.layout.get(*index)", "docstring": "Returns the r, g, b pixel at a location in the layout. May only be\ncalled if self.is_running is true.", "id": "f1929:c0:m1"} {"signature": "def __setitem__(self, index, color):", "body": "index = self._check_index(index)try:color = make.color(color)except:log.error('', color)raiseindex.append(color)return self.layout.set(*index)", "docstring": "Sets the r, g, b pixel at a location in the layout. May only be called\nif self.is_running is true.", "id": "f1929:c0:m2"} {"signature": "def load(self, project_file=''):", "body": "self._request_project_file(project_file)self.clear()self.desc.update(data_file.load(self._project_file))", "docstring": "Load/reload the description from a YML file. Prompt if no file given.", "id": "f1930:c0:m1"} {"signature": "def save(self, project_file=''):", "body": "self._request_project_file(project_file)data_file.dump(self.desc.as_dict(), self.project_file)", "docstring": "Save the description as a YML file. Prompt if no file given.", "id": "f1930:c0:m2"} {"signature": "def clear(self):", "body": "self.desc.clear()", "docstring": "Clear description to default values", "id": "f1930:c0:m3"} {"signature": "def start(self, threaded):", "body": "self.stop()self.__class__._INSTANCE = weakref.ref(self)self.is_running = Trueif threaded:self.thread = runnable.LoopThread()self.thread.run_once = self._targetself.thread.start()else:self._target()", "docstring": "Creates and starts the project.", "id": "f1931:c0:m1"} {"signature": "def stop(self):", "body": "if self.is_running:log.info('')self.is_running = Falseself.__class__._INSTANCE = Nonetry:self.thread and self.thread.stop()except:log.error('')traceback.print_exc()self.thread = Nonereturn True", "docstring": "Stop the Runner if it's running.\nCalled as a classmethod, stop the running instance if any.", "id": "f1931:c0:m2"} {"signature": "@classmethoddef instance(cls):", "body": "return cls._INSTANCE and cls._INSTANCE()", "docstring": "Return the unique instance of Runner, if any, or None", "id": "f1931:c0:m3"} {"signature": "def __init__(self, *args, limit=None, **kwds):", "body": "super().__init__(*args, **kwds)self.limit = Limit(**(limit or {}))self._math = color_list.Math(self.color_list)", "docstring": ":param dict limit: A construction dictionary for a Limit.", "id": "f1947:c0:m0"} {"signature": "def __init__(self, *args, filename='', render=None,divide=, frames=, time=, scale=, options=None,gif_dir=None, **kwds):", "body": "super().__init__(*args, **kwds)self.movie_writer = _movie_writer.MovieWriter(filename, render, divide, frames, time, scale, options, gif_dir)", "docstring": ":param str filename: Base filename to write the animated GIF file\n\n:param dict render: Parameters to the renderer function -\n see ``bibliopixel.util.image.render.renderer``\n\n:param int divide: If greater than 1, only rendered one in ``divide``\n frames\n\n:param int frames: Number of frames to write\n\n:param float time: Total time to write. If non-zero, takes precedence\n over `frames`\n\n:param float speed: the speed of the GIF is scaled up by this factor,\n so if speed=2 then a 2 second animation will become a 1 second GIF.\n\n:param dict options: Options to\n ``bibliopixel.util.image.gif.write_animation``\n\n:param str gif_dir: If set, write individual GIF frame files to this\n directory, and do not delete them when done. For testing purposes.", "id": "f1948:c0:m0"} {"signature": "@classmethoddef construct(cls, project, *, run=None, name=None, data=None, **desc):", "body": "from . failed import Failedexception = desc.pop('', None)if exception:a = Failed(project.layout, desc, exception)else:try:a = cls(project.layout, **desc)a._set_runner(run or {})except Exception as e:if cls.FAIL_ON_EXCEPTION:raisea = Failed(project.layout, desc, e)a.name = namea.data = datareturn a", "docstring": "Construct an animation, set the runner, and add in the two\n\"reserved fields\" `name` and `data`.", "id": "f1952:c0:m0"} {"signature": "def __init__(self, layout, *, preclear=True, fail_on_exception=None, **kwds):", "body": "self.palette = legacy_palette.pop_legacy_palette(kwds, *self.COLOR_DEFAULTS)self.palette.length = layout.numLEDsattributes.set_reserved(self, '', **kwds)self.layout = layoutassert layoutself.internal_delay = Noneself.on_completion = Noneself.state = runner.STATE.readyself.preclear = preclearself.runner = Noneself.time = time.timeself.preframe_callbacks = []self.fail_on_exception = self.FAIL_ON_EXCEPTION if fail_on_exception is None else fail_on_exception", "docstring": "Arguments:\n preclear: If True, clear the layout before rendering the frame;\n otherwise, the results of the previous frame are preserved\n\n fail_on_exception: If False, exceptions thrown in the animation frame are\n caught and reported;\n if True, exceptions are are raised, potentially ending the\n animation cycle and the program;\n if None or not set, the value of Animation.FAIL_ON_EXCEPTION is used", "id": "f1952:c0:m1"} {"signature": "@propertydef _led(self):", "body": "return self.layout", "docstring": "Many BiblioPixelAnimations use the \"protected\" variable _led.", "id": "f1952:c0:m3"} {"signature": "@propertydef completed(self):", "body": "return self.state == runner.STATE.complete", "docstring": "Many BiblioPixelAnimations use the old `completed` variable.", "id": "f1952:c0:m7"} {"signature": "def add_preframe_callback(self, callback):", "body": "self.preframe_callbacks.append(callback)", "docstring": "The preframe_callbacks are called right before the start of a\nframe rendering pass.\n\nTo avoid race conditions when editing values, the ``Project``\nadds a callback here for the top-level animation, to drain the\nedit_queue at a moment where no rendering is\nhappening.", "id": "f1952:c0:m13"} {"signature": "def __init__(self, *args, overlay=False, detach=True, **kwds):", "body": "super().__init__(*args, **kwds)if detach:self.detach(overlay)", "docstring": "If overlay is True, then preclear is set to False for everything\nother than the first animation.", "id": "f1954:c0:m0"} {"signature": "def __init__(self, layout, random=False, length=None, **kwds):", "body": "super().__init__(layout, **kwds)self.random = randomif isinstance(length, (list, tuple)):self.length = lengthelse:self.length = length and [length]", "docstring": "Arguments\n\nrandom -- If True, a random animation is selected each step\nlength -- if length is a number, run all the animations in a loop\n for `length` seconds each. If `length` is a list of numbers,\n use the numbers successively as times.", "id": "f1955:c0:m0"} {"signature": "def opener(ip_address, port, delay=):", "body": "global WEBPAGE_OPENEDif WEBPAGE_OPENED:returnWEBPAGE_OPENED = Trueraw_opener(ip_address, port, delay)", "docstring": "Wait a little and then open a web browser page for the control panel.", "id": "f1966:m0"} {"signature": "def raw_opener(ip_address, port, delay=):", "body": "def target():time.sleep(delay)url = '' % (ip_address, port)webbrowser.open(url, new=, autoraise=True)threading.Thread(target=target, daemon=True).start()", "docstring": "Wait a little and then open a web browser page for the control panel.", "id": "f1966:m1"} {"signature": "def _clean_animation(desc, parent):", "body": "desc = load.load_if_filename(desc) or descif isinstance(desc, str):animation = {'': desc}elif not isinstance(desc, dict):raise TypeError('' % type(desc))elif '' in desc or '' not in desc:animation = descelse:animation = desc.pop('', {})if isinstance(animation, str):animation = {'': animation}animation[''] = desc.pop('', {})if desc:raise ValueError('' + ''.join(desc))animation.setdefault('', DEFAULT_ANIMATION)animation = construct.to_type_constructor(animation, ANIMATION_PATH)datatype = animation.setdefault('', failed.Failed)animation.setdefault('', datatype.__name__)run = animation.setdefault('', {})run_parent = parent.setdefault('', {})if not ('' in run or '' in run):if '' in run_parent:run.update(fps=run_parent[''])elif '' in run_parent:run.update(sleep_time=run_parent[''])return animation", "docstring": "Cleans up all sorts of special cases that humans want when entering\nan animation from a yaml file.\n\n1. Loading it from a file\n2. Using just a typename instead of a dict\n3. A single dict representing an animation, with a run: section.\n4. (Legacy) Having a dict with parallel elements run: and animation:\n5. (Legacy) A tuple or list: (animation, run )", "id": "f1968:m0"} {"signature": "def _make_names_unique(animations):", "body": "counts = {}for a in animations:c = counts.get(a[''], ) + counts[a['']] = cif c > :a[''] += '' + str(c - )dupes = set(k for k, v in counts.items() if v > )for a in animations:if a[''] in dupes:a[''] += ''", "docstring": "Given a list of animations, some of which might have duplicate names, rename\nthe first one to be _0, the second _1,\n_2, etc.", "id": "f1968:m1"} {"signature": "def detach(self, overlay):", "body": "for i, a in enumerate(self.animations):a.layout = a.layout.clone()if overlay and i:a.preclear = False", "docstring": "Give each animation a unique, mutable layout so they can run\nindependently.", "id": "f1968:c0:m6"} {"signature": "def __init__(self, *args, size=, **kwds):", "body": "super().__init__(*args, detach=False, **kwds)if not size:raise ValueError('')self.size = size if isinstance(size, list) else [size]self.is_numpy = hasattr(self.color_list, '')for animation, begin, end in self._foreach():animation.layout = Strip([], color_list=self.color_list[begin:end])", "docstring": "Arguments --\n size: a number or a list of numbers representing the size of each\n segment from the original layout. If there aren't enough sizes\n for each segment, the list of sizes is reused repeatedly.", "id": "f1971:c0:m0"} {"signature": "@propertydef index(self):", "body": "return self._index", "docstring": ":returns int: index of the current animation within the Collection.", "id": "f1972:c0:m0"} {"signature": "def _on_index(self, old_index):", "body": "if self.animation:log.debug('',self.__class__.__name__, self.current_animation.title)self.frames = self.animation.generate_frames(False)", "docstring": "Override this method to get called right after ``self.index`` is set.\n\n:param int old_index: the previous index, before it was changed.", "id": "f1972:c0:m2"} {"signature": "@propertydef animation(self):", "body": "if <= self._index < len(self.animations):return self.animations[self._index]", "docstring": ":returns: the selected animation based on self.index, or None if\n self.index is out of bounds", "id": "f1972:c0:m7"} {"signature": "@propertydef current_animation(self):", "body": "return self.animation", "docstring": "DEPRECATED:\n:returns: self.animation", "id": "f1972:c0:m8"} {"signature": "def step(self, amt=):", "body": "if not self._stop_event.isSet():self._hold_for_data.wait()self._hold_for_data.clear()", "docstring": "This may seem silly, but on a Receiver step() need not do anything.\nInstead, receive the data on the receive thread and set it on the buffer\nthen call self._hold_for_data.set()", "id": "f1973:c1:m6"} {"signature": "def adapt_animation_layout(animation):", "body": "layout = animation.layoutrequired = getattr(animation, '', None)if not required or isinstance(layout, required):returnmsg = LAYOUT_WARNING % (type(animation).__name__, required.__name__, type(layout).__name__)setter = layout.setadaptor = Noneif required is strip.Strip:if isinstance(layout, matrix.Matrix):width = layout.widthdef adaptor(pixel, color=None):y, x = divmod(pixel, width)setter(x, y, color or BLACK)elif isinstance(layout, cube.Cube):lx, ly = layout.x, layout.ydef adaptor(pixel, color=None):yz, x = divmod(pixel, lx)z, y = divmod(yz, ly)setter(x, y, z, color or BLACK)elif isinstance(layout, circle.Circle):def adaptor(pixel, color=None):layout._set_base(pixel, color or BLACK)elif required is matrix.Matrix:if isinstance(layout, strip.Strip):width = animation.widthdef adaptor(x, y, color=None):setter(x + y * width, color or BLACK)if not adaptor:raise ValueError(msg)log.warning(msg)animation.layout.set = adaptor", "docstring": "Adapt the setter in an animation's layout so that Strip animations can run\non on Matrix, Cube, or Circle layout, and Matrix or Cube animations can run\non a Strip layout.", "id": "f1979:m0"} {"signature": "def __init__(self, offset=, begin=None, end=None):", "body": "self.begin = self.BEGIN if begin is None else beginself.end = self.END if end is None else endif not (self.BEGIN <= self.begin <= self.end <= self.END):raise ValueError('' %(self.BEGIN, self.begin, self.end, self.END))self.offset = offset", "docstring": "Unlike a `range`, an OffsetRange includes both its begin *and* its end,\nso it's closer to how regular people think of a range - for example\nthat DMX channels are in the range 1-512.", "id": "f1982:c0:m0"} {"signature": "def index(self, i, length=None):", "body": "if self.begin <= i <= self.end:index = i - self.BEGIN - self.offsetif length is None:length = self.full_range()else:length = min(length, self.full_range())if <= index < length:return index", "docstring": "Return an integer index or None", "id": "f1982:c0:m2"} {"signature": "def read_from(self, data, pad=):", "body": "for i in range(self.BEGIN, self.END + ):index = self.index(i, len(data))yield pad if index is None else data[index]", "docstring": "Returns a generator with the elements \"data\" taken by offset, restricted\nby self.begin and self.end, and padded on either end by `pad` to get\nback to the original length of `data`", "id": "f1982:c0:m3"} {"signature": "def pointOnCircle(cx, cy, radius, angle):", "body": "angle = math.radians(angle) - (math.pi / )x = cx + radius * math.cos(angle)if x < cx:x = math.ceil(x)else:x = math.floor(x)y = cy + radius * math.sin(angle)if y < cy:y = math.ceil(y)else:y = math.floor(y)return (int(x), int(y))", "docstring": "Calculates the coordinates of a point on a circle given the center point,\nradius, and angle.", "id": "f1983:m6"} {"signature": "def genVector(width, height, x_mult=, y_mult=):", "body": "center_x = (width - ) / center_y = (height - ) / def length(x, y):dx = math.pow(x - center_x, * x_mult)dy = math.pow(y - center_y, * y_mult)return int(math.sqrt(dx + dy))return [[length(x, y) for x in range(width)] for y in range(height)]", "docstring": "Generates a map of vector lengths from the center point to each coordinate.\n\nwidth - width of matrix to generate\nheight - height of matrix to generate\nx_mult - value to scale x-axis by\ny_mult - value to scale y-axis by", "id": "f1983:m7"} {"signature": "def class_name(c):", "body": "if not isinstance(c, type):c = type(c)return '' % (c.__module__, c.__name__)", "docstring": ":param c: either an object or a class\n:return: the classname as a string", "id": "f1984:m0"} {"signature": "def parse(s):", "body": "parts = s.replace('', '').split()if not parts:raise ValueError('')pieces = []for part in parts:m = PART_MATCH(part)pieces.extend(m.groups() if m else [part])if len(pieces) == :pieces.append('')if len(pieces) % :raise ValueError('' % (s, parts, pieces))result = for number, units in zip(*[iter(pieces)] * ):number = float(number)if number < :raise ValueError('')result += number * _get_units(units)return result", "docstring": "Parse a string representing a time interval or duration into seconds,\nor raise an exception\n\n:param str s: a string representation of a time interval\n:raises ValueError: if ``s`` can't be interpreted as a duration", "id": "f1987:m1"} {"signature": "def __init__(self, filename):", "body": "self.__filename = filenamedata = data_file.load(filename) if os.path.exists(filename) else {}super().__init__(data)", "docstring": ":param c: the filename to store the DATA_FILE in", "id": "f1988:c0:m0"} {"signature": "def __init__(self, constructor, **kwds):", "body": "self.servers = {}self.constructor = constructorself.kwds = kwds", "docstring": ":param constructor: a function which takes a key and some keywords,\n and returns a new server\n:param kwds: keywords to the ``constructor`` function", "id": "f1989:c1:m0"} {"signature": "def get_server(self, key, **kwds):", "body": "kwds = dict(self.kwds, **kwds)server = self.servers.get(key)if server:server.check_keywords(self.constructor, kwds)else:server = _CachedServer(self.constructor, key, kwds)self.servers[key] = serverreturn server", "docstring": "Get a new or existing server for this key.\n\n:param int key: key for the server to use", "id": "f1989:c1:m1"} {"signature": "def __init__(self, ratio=, knee=, gain=, enable=True):", "body": "self.ratio = ratioself.knee = kneeself.gain = gainself.enable = enable", "docstring": ":param float ratio: the compression ratio (1 means no compression).\n ratio should usually between 0 and 1.\n\n:param float knee: the ratio where the compression starts to kick in.\n knee should usually be 0 <= knee <= ratio\n\n:param float gain: post limiter output gain. gain should usually be >= 0", "id": "f1990:c0:m0"} {"signature": "def run(self, next_task):", "body": "self.event.wait()self.task()self.event.clear()next_task.event.set()", "docstring": "Wait for the event, run the task, trigger the next task.", "id": "f1991:c0:m1"} {"signature": "@propertydef running(self):", "body": "return self.run_event.is_set() and not self.stop_event.is_set()", "docstring": "Is this Runnable expected to make any progress from here?\n\nThe Runnable might still execute a little code after it has stopped\nrunning.", "id": "f1993:c0:m1"} {"signature": "def is_alive(self):", "body": "return self.running", "docstring": "Is this Runnable still executing code?\n\nIn some cases, such as threads, self.is_alive() might be true for some\ntime after self.running has turned False.", "id": "f1993:c0:m2"} {"signature": "def cleanup(self):", "body": "", "docstring": "Cleans up resources after the Runnable.\n\nself.cleanup() may not throw an exception.", "id": "f1993:c0:m6"} {"signature": "def run_once(self):", "body": "pass", "docstring": "The target code that is repeatedly executed in the run method", "id": "f1993:c0:m8"} {"signature": "@contextlib.contextmanagerdef run_until_stop(self):", "body": "self.start()try:yield selffinally:self.stop()self.wait()", "docstring": "A context manager that starts this Runnable, yields,\nand then waits for it to finish.", "id": "f1993:c0:m9"} {"signature": "def compose_events(events, condition=all):", "body": "events = list(events)master_event = threading.Event()def changed():if condition(e.is_set() for e in events):master_event.set()else:master_event.clear()def add_changed(f):@functools.wraps(f)def wrapped():f()changed()return wrappedfor e in events:e.set = add_changed(e.set)e.clear = add_changed(e.clear)changed()return master_event", "docstring": "Compose a sequence of events into one event.\n\nArguments:\n events: a sequence of objects looking like threading.Event\n condition: a function taking a sequence of bools and returning a bool.", "id": "f1994:m0"} {"signature": "def run(function, *args, use_subprocess=False, daemon=True, **kwds):", "body": "if use_subprocess:Creator, Queue = multiprocessing.Process, multiprocessing.Queueelse:Creator, Queue = threading.Thread, queue.Queueinput, output = Queue(), Queue()args = input, output, function, argssub = Creator(target=_run_locally, args=args, kwargs=kwds, daemon=daemon)sub.start()return sub, input, output", "docstring": "Create input, output queues, call `function` in a subprocess or a thread.\n\n``function`` is called like this: ``function(input, output, *args, **kwds)``\n\n:param use_subprocess: if true, create a new multiprocess;\n if false, create a new thread\n:param function: the function to call\n:param daemon: is the thread or subprocess run as a daemon or not?\n\n:param args: positional arguments to the function\n:param kwds: keyword arguments to the function\n:returns: a tuple with three elements: the subprocess or thread, an input\n queue, and an output queue.", "id": "f1995:m1"} {"signature": "def __init__(self, function, errors):", "body": "assert isinstance(errors, int) or errors in ('', '', '')self.function = functionself.errors = errorsself.error_count = ", "docstring": ":param function: the function to wrap\n:param errors: either a number, indicating how many errors to report\n before ignoring them, or one of these strings:\n 'raise', meaning to raise an exception\n 'ignore', meaning to ignore all errors\n 'report', meaning to report all errors", "id": "f1997:c0:m0"} {"signature": "def __call__(self, *args, **kwds):", "body": "try:return self.function(*args, **kwds)except Exception as e:self.error_count += if self.errors == '':raiseif self.errors == '':returnargs = (class_name.class_name(e),) + e.argsif self.errors == '' or self.error_count <= self.errors:log.error(str(args))elif self.error_count == self.errors + :log.error('', self.errors)", "docstring": "Calls `self.function` with the given arguments and keywords, and\nreturns its value - or if the call throws an exception, returns None.", "id": "f1997:c0:m1"} {"signature": "def _addLoggingLevel(levelName, levelNum, methodName=None):", "body": "if not methodName:methodName = levelName.lower()if hasattr(logging, levelName):raise AttributeError(''.format(levelName))if hasattr(logging, methodName):raise AttributeError(''.format(methodName))if hasattr(logging.getLoggerClass(), methodName):raise AttributeError(''.format(methodName))def logForLevel(self, message, *args, **kwargs):if self.isEnabledFor(levelNum):self._log(levelNum, message, args, **kwargs)def logToRoot(message, *args, **kwargs):logging.log(levelNum, message, *args, **kwargs)logging.addLevelName(levelNum, levelName)setattr(logging, levelName, levelNum)setattr(logging.getLoggerClass(), methodName, logForLevel)setattr(logging, methodName, logToRoot)", "docstring": "Comprehensively adds a new logging level to the `logging` module and the\ncurrently configured logging class.\n\n`levelName` becomes an attribute of the `logging` module with the value\n`levelNum`. `methodName` becomes a convenience method for both `logging`\nitself and the class returned by `logging.getLoggerClass()` (usually just\n`logging.Logger`). If `methodName` is not specified, `levelName.lower()` is\nused.\n\nTo avoid accidental clobberings of existing attributes, this method will\nraise an `AttributeError` if the level name is already an attribute of the\n`logging` module or if the method name is already present\n\nExample\n-------\n>>> addLoggingLevel('TRACE', logging.DEBUG - 5)\n>>> logging.getLogger(__name__).setLevel(\"TRACE\")\n>>> logging.getLogger(__name__).trace('that worked')\n>>> logging.trace('so did this')\n>>> logging.TRACE\n5", "id": "f1998:m2"} {"signature": "def set_log_level(level):", "body": "if isinstance(level, str):level = LOG_NAMES[level.lower()]logger.setLevel(level)", "docstring": ":param level: the level to set - either a string level name from\n 'frame', 'debug', 'info', 'warning', 'error'\n or an integer log level from:\n log.FRAME, log.DEBUG, log.INFO, log.WARNING, log.ERROR", "id": "f1998:m4"} {"signature": "def dumps(data, use_yaml=None, safe=True, **kwds):", "body": "if use_yaml is None:use_yaml = ALWAYS_DUMP_YAMLif use_yaml:dumps = yaml.safe_dump if safe else yaml.dumpelse:dumps = json.dumpskwds.update(indent=, sort_keys=True)if not safe:kwds.update(default=repr)return dumps(data, **kwds)", "docstring": "Dumps data into a nicely formatted JSON string.\n\n:param dict data: a dictionary to dump\n:param kwds: keywords to pass to json.dumps\n:returns: a string with formatted data\n:rtype: str", "id": "f1999:m0"} {"signature": "def dump(data, file=sys.stdout, use_yaml=None, **kwds):", "body": "if use_yaml is None:use_yaml = ALWAYS_DUMP_YAMLdef dump(fp):if use_yaml:yaml.safe_dump(data, stream=fp, **kwds)else:json.dump(data, fp, indent=, sort_keys=True, **kwds)if not isinstance(file, str):return dump(file)if os.path.isabs(file):parent = os.path.dirname(file)if not os.path.exists(parent):os.makedirs(parent, exist_ok=True)with open(file, '') as fp:return dump(fp)", "docstring": "Dumps data as nicely formatted JSON string to a file or file handle\n\n:param dict data: a dictionary to dump\n:param file: a filename or file handle to write to\n:param kwds: keywords to pass to json.dump", "id": "f1999:m1"} {"signature": "def load(file, use_yaml=None):", "body": "if isinstance(file, str):fp = open(file)filename = fileelse:fp = filefilename = getattr(fp, '', '')try:return loads(fp.read(), use_yaml, filename)except Exception as e:e.args = ('', filename) + e.argsraise", "docstring": "Loads not only JSON files but also YAML files ending in .yml.\n\n:param file: a filename or file handle to read from\n:returns: the data loaded from the JSON or YAML file\n:rtype: dict", "id": "f1999:m3"} {"signature": "def load_if(s):", "body": "is_data_file = s.endswith('') or s.endswith('')return load(s) if is_data_file else loads(s)", "docstring": "Load either a filename, or a string representation of yml/json.", "id": "f1999:m4"} {"signature": "def advance_permutation(a, increasing=True, forward=True):", "body": "if not forward:a.reverse()cmp = operator.lt if increasing else operator.gttry:i = next(i for i in reversed(range(len(a) - )) if cmp(a[i], a[i + ]))j = next(j for j in reversed(range(i + , len(a))) if cmp(a[i], a[j]))except StopIteration:if forward:a.reverse()return Falsea[i], a[j] = a[j], a[i]a[i + :] = reversed(a[i + :])if not forward:a.reverse()return True", "docstring": "Advance a list of unique, ordered elements in-place, lexicographically\nincreasing or backward, by rightmost or leftmost digit.\n\nReturns False if the permutation wrapped around - i.e. went from\nlexicographically greatest to least, and True in all other cases.\n\nIf the length of the list is N, then this function will repeat values after\nN! steps, and will return False exactly once.\n\nSee also https://stackoverflow.com/a/34325140/43839", "id": "f2003:m0"} {"signature": "@contextlib.contextmanagerdef pid_context(pid_filename=None):", "body": "pid_filename = pid_filename or DEFAULT_PID_FILENAMEif os.path.exists(pid_filename):contents = open(pid_filename).read()log.warning('',pid_filename, contents)with open(pid_filename, '') as fp:fp.write(str(os.getpid()))fp.write('')try:yieldfinally:try:os.remove(pid_filename)except Exception as e:log.error('',e, pid_filename)", "docstring": "For the duration of this context manager, put the PID for this process into\n`pid_filename`, and then remove the file at the end.", "id": "f2005:m1"} {"signature": "def get_pid(pid_filename=None):", "body": "return int(open(pid_filename or DEFAULT_PID_FILENAME).read())", "docstring": "Return the integer PID for the current bp process, or raise an exception if\nthere is no such process or it hasn't registered a PID.", "id": "f2005:m2"} {"signature": "@contextlib.contextmanagerdef add(*args):", "body": "try:yieldexcept Exception as e:e.args = args + e.argsraise", "docstring": "A context manager that appends arguments to any exception thrown\n\n:param args: Arguments to be appended to the ``.args`` attribute of any\n exception that is thrown while the context manager is active", "id": "f2007:m0"} {"signature": "def report(function, *args, **kwds):", "body": "try:function(*args, **kwds)except Exception:traceback.print_exc()", "docstring": "Run a function, catch, report and discard exceptions", "id": "f2007:m1"} {"signature": "def crop(image, top_offset=, left_offset=, bottom_offset=, right_offset=):", "body": "if bottom_offset or top_offset or left_offset or right_offset:width, height = image.sizebox = (left_offset, top_offset,width - right_offset, height - bottom_offset)image = image.crop(box=box)return image", "docstring": "Return an image cropped on top, bottom, left or right.", "id": "f2011:m0"} {"signature": "def resize(image, x, y, stretch=False, top=None, left=None, mode='',resample=None):", "body": "if x <= :raise ValueError('')if y <= :raise ValueError('')from PIL import Imageresample = Image.ANTIALIAS if resample is None else resampleif not isinstance(resample, numbers.Number):try:resample = getattr(Image, resample.upper())except:raise ValueError(\"\" % resample)if not isinstance(resample, numbers.Number):raise ValueError(\"\" % resample)size = x, yif stretch:return image.resize(size, resample=resample)result = Image.new(mode, size)ratios = [d1 / d2 for d1, d2 in zip(size, image.size)]if ratios[] < ratios[]:new_size = (size[], int(image.size[] * ratios[]))else:new_size = (int(image.size[] * ratios[]), size[])image = image.resize(new_size, resample=resample)if left is None:box_x = int((x - new_size[]) / )elif left:box_x = else:box_x = x - new_size[]if top is None:box_y = int((y - new_size[]) / )elif top:box_y = else:box_y = y - new_size[]result.paste(image, box=(box_x, box_y))return result", "docstring": "Return an image resized.", "id": "f2011:m1"} {"signature": "def _write(self, filename, frames, fps, loop=, palette=):", "body": "from PIL import Imageimages = []for f in frames:data = open(f, '').read()images.append(Image.open(io.BytesIO(data)))duration = round( / fps, )im = images.pop()im.save(filename,save_all=True,append_images=images,duration=duration,loop=loop,palette=palette)", "docstring": "Write a series of frames as a single animated GIF.\n\n:param str filename: the name of the GIF file to write\n\n:param list frames: a list of filenames, each of which represents a single\n frame of the animation. Each frame must have exactly the same\n dimensions, and the code has only been tested with .gif files.\n\n:param float fps:\n The number of frames per second.\n\n:param int loop:\n The number of iterations. Default 0 (meaning loop indefinitely).\n\n:param int palette:\n The number of colors to quantize the image to. Is rounded to\n the nearest power of two. Default 256.", "id": "f2013:c0:m0"} {"signature": "def show_image(setter, width, height,image_path='', image_obj=None, offset=(, ),bgcolor=COLORS.Off, brightness=):", "body": "bgcolor = color_scale(bgcolor, brightness)img = image_objif image_path and not img:from PIL import Imageimg = Image.open(image_path)elif not img:raise ValueError('')w = min(width - offset[], img.size[])h = min(height - offset[], img.size[])ox = offset[]oy = offset[]for x in range(ox, w + ox):for y in range(oy, h + oy):r, g, b, a = (, , , )rgba = img.getpixel((x - ox, y - oy))if isinstance(rgba, int):raise ValueError('')if len(rgba) == :r, g, b = rgbaelif len(rgba) == :r, g, b, a = rgbaelse:raise ValueError('')if a == :r, g, b = bgcolorelse:r, g, b = color_scale((r, g, b), a)if brightness != :r, g, b = color_scale((r, g, b), brightness)setter(x, y, (r, g, b))", "docstring": "Display an image on a matrix.", "id": "f2015:m0"} {"signature": "def showImage(layout, imagePath=\"\", imageObj=None, offset=(, ),bgcolor=COLORS.Off, brightness=):", "body": "if not isinstance(layout, Matrix):raise RuntimeError(\"\")layout.all_off()return show_image(layout.set, layout.width, layout.height, imagePath,imageObj, offset, bgcolor, brightness)", "docstring": "Display an image on the matrix", "id": "f2015:m1"} {"signature": "def loadImage(layout, imagePath=\"\", imageObj=None, offset=(, ),bgcolor=COLORS.Off, brightness=):", "body": "if not isinstance(layout, Matrix):raise RuntimeError(\"\")texture = [[COLORS.Off for x in range(layout.width)]for y in range(layout.height)]def setter(x, y, pixel):if y >= and x >= :texture[y][x] = pixelshow_image(setter, layout.width, layout.height, imagePath, imageObj,offset, bgcolor, brightness)return texture", "docstring": "Display an image on the matrix", "id": "f2015:m2"} {"signature": "def convert_mode(image, mode=''):", "body": "deprecated.deprecated('')return image if (image.mode == mode) else image.convert(mode=mode)", "docstring": "Return an image in the given mode.", "id": "f2016:m0"} {"signature": "def image_to_colorlist(image, container=list):", "body": "deprecated.deprecated('')return container(convert_mode(image).getdata())", "docstring": "Given a PIL.Image, returns a ColorList of its pixels.", "id": "f2016:m1"} {"signature": "def animated_gif_to_colorlists(image, container=list):", "body": "deprecated.deprecated('')from PIL import ImageSequenceit = ImageSequence.Iterator(image)return [image_to_colorlist(i, container) for i in it]", "docstring": "Given an animated GIF, return a list with a colorlist for each frame.", "id": "f2016:m2"} {"signature": "def sender(address, use_queue=True, **kwds):", "body": "return QueuedSender(address, **kwds) if use_queue else Sender(address)", "docstring": ":param str address: a pair (ip_address, port) to pass to socket.connect\n:param bool use_queue: if True, run the connection in a different thread\n with a queue", "id": "f2021:m0"} {"signature": "def __init__(self, address):", "body": "super().__init__()self.address = address", "docstring": ":param str address: a pair (ip_address, port) to pass to socket.connect", "id": "f2021:c0:m0"} {"signature": "def __init__(self, address, **kwds):", "body": "super().__init__(**kwds)self.sender = Sender(address)", "docstring": ":param str address: a pair (ip_address, port) to pass to socket.connect", "id": "f2021:c1:m0"} {"signature": "def flatten(master):", "body": "result = {}def add(value, *keys):if keys in result:raise ValueError('' % keys)result[keys] = valuedef recurse(value, *keys):if isinstance(value, dict):for k, v in value.items():recurse(v, k, *keys)else:key = ''.join(reversed(keys))if key in result:raise ValueError('' % str(keys))result[key] = valuerecurse(master)return result", "docstring": ":param dict master: a multilevel dictionary\n:return: a flattened dictionary\n:rtype: dict\n\nFlattens a multilevel dictionary into a single-level one so that::\n\n {'foo':\n {'bar':\n {\n 'a': 1,\n 'b': True,\n 'c': 'hello',\n },\n },\n }\n\nwould become::\n\n {'foo.bar.a': 1,\n 'foo.bar.b': True,\n 'foo.bar.a': 1,\n }\n\nYou can mix and match both input (hierarchical) and output (dotted) formats\nin the input without problems - and if you call flatten more than once, it\nhas no effect.", "id": "f2023:m0"} {"signature": "def unflatten(master):", "body": "result = {}for k, v in master.items():*first, last = k.split('')r = resultfor i in first:r = r.setdefault(i, {})r[last] = vreturn result", "docstring": ":param dict master: a multilevel dictionary\n:return: a unflattened dictionary\n:rtype: dict\n\nUnflattens a single-level dictionary a multilevel into one so that::\n\n {'foo.bar.a': 1,\n 'foo.bar.b': True,\n 'foo.bar.a': 1,\n }\n\nwould become::\n\n {'foo':\n {'bar':\n {\n 'a': 1,\n 'b': True,\n 'c': 'hello',\n },\n },\n }", "id": "f2023:m1"} {"signature": "def canonical(master):", "body": "return unflatten(flatten(master))", "docstring": ":param dict master: a multilevel dictionary\n:return: a canonicalized dictionary that has been completely flattened\n and then unflattened\n:rtype: dict", "id": "f2023:m2"} {"signature": "def pop_legacy_palette(kwds, *color_defaults):", "body": "palette = kwds.pop('', None)if palette:legacy = [k for k, _ in color_defaults if k in kwds]if legacy:raise ValueError('' + ''.join(legacy))return palettevalues = [kwds.pop(k, v) for k, v in color_defaults]if values and color_defaults[][] in ('', ''):values = values[]return make.colors(values or None)", "docstring": "Older animations in BPA and other areas use all sorts of different names for\nwhat we are now representing with palettes.\n\nThis function mutates a kwds dictionary to remove these legacy fields and\nextract a palette from it, which it returns.", "id": "f2026:m0"} {"signature": "def wheel_color(position):", "body": "return _WHEEL[round(position) % len(_WHEEL)]", "docstring": "Get color from wheel value (0 - 384).\n Provided for those used to using it from Adafruit libraries", "id": "f2027:m1"} {"signature": "def wheel_helper(pos, length, cycle_step):", "body": "return wheel_color((pos * len(_WHEEL) / length) + cycle_step)", "docstring": "Helper for wheel_color that distributes colors over length and\n allows shifting position.", "id": "f2027:m2"} {"signature": "def all_named_colors():", "body": "yield from _TO_COLOR_USER.items()for name, color in _TO_COLOR.items():if name not in _TO_COLOR_USER:yield name, color", "docstring": "Return an iteration over all name, color pairs in tables", "id": "f2028:m5"} {"signature": "def contains(x):", "body": "if isinstance(x, str):x = canonical_name(x)return x in _TO_COLOR_USER or x in _TO_COLORelse:x = tuple(x)return x in _TO_NAME_USER or x in _TO_NAME", "docstring": "Return true if this string or integer tuple appears in tables", "id": "f2028:m6"} {"signature": "@functools.singledispatchdef colors(c):", "body": "raise ValueError(\"\" % type(c), _COLORS_USAGE)", "docstring": "Return a color Palette", "id": "f2029:m5"} {"signature": "def to_triplets(colors):", "body": "try:colors[][]return colorsexcept:passextra = len(colors) % if extra:colors = colors[:-extra]return list(zip(*[iter(colors)] * ))", "docstring": "Coerce a list into a list of triplets.\n\nIf `colors` is a list of lists or strings, return it as is. Otherwise,\ndivide it into tuplets of length three, silently discarding any extra\nelements beyond a multiple of three.", "id": "f2029:m11"} {"signature": "def colors_no_palette(colors=None, **kwds):", "body": "if isinstance(colors, str):colors = _split_colors(colors)else:colors = to_triplets(colors or ())colors = (color(c) for c in colors or ())return palette.Palette(colors, **kwds)", "docstring": "Return a Palette but don't take into account Pallete Names.", "id": "f2029:m13"} {"signature": "def hsv2rgb_raw(hsv):", "body": "HSV_SECTION_3 = h, s, v = hsvinvsat = - sbrightness_floor = (v * invsat) // color_amplitude = v - brightness_floorsection = h // HSV_SECTION_3 offset = h % HSV_SECTION_3 rampup = offsetrampdown = (HSV_SECTION_3 - ) - offsetrampup_amp_adj = (rampup * color_amplitude) // ( // )rampdown_amp_adj = (rampdown * color_amplitude) // ( // )rampup_adj_with_floor = rampup_amp_adj + brightness_floorrampdown_adj_with_floor = rampdown_amp_adj + brightness_floorr, g, b = (, , )if section:if section == :r = brightness_floorg = rampdown_adj_with_floorb = rampup_adj_with_floorelse:r = rampup_adj_with_floorg = brightness_floorb = rampdown_adj_with_floorelse:r = rampdown_adj_with_floorg = rampup_adj_with_floorb = brightness_floorreturn (r, g, b)", "docstring": "Converts an HSV tuple to RGB. Intended for internal use.\nYou should use hsv2rgb_spectrum or hsv2rgb_rainbow instead.", "id": "f2030:m0"} {"signature": "def hsv2rgb_spectrum(hsv):", "body": "h, s, v = hsvreturn hsv2rgb_raw(((h * ) >> , s, v))", "docstring": "Generates RGB values from HSV values in line with a typical light\n spectrum.", "id": "f2030:m1"} {"signature": "def hsv2rgb_rainbow(hsv):", "body": "def nscale8x3_video(r, g, b, scale):nonzeroscale = if scale != :nonzeroscale = if r != :r = ((r * scale) >> ) + nonzeroscaleif g != :g = ((g * scale) >> ) + nonzeroscaleif b != :b = ((b * scale) >> ) + nonzeroscalereturn (r, g, b)def scale8_video_LEAVING_R1_DIRTY(i, scale):nonzeroscale = if scale != :nonzeroscale = if i != :i = ((i * scale) >> ) + nonzeroscalereturn ih, s, v = hsvoffset = h & offset8 = offset * third = (offset8 * ( // )) >> r, g, b = (, , )if not (h & ):if not (h & ):if not (h & ):r = - thirdg = thirdb = else:r = g = + thirdb = else:if not (h & ):twothirds = (third << )r = - twothirdsg = + thirdb = else:r = g = - thirdb = thirdelse:if not (h & ):if not (h & ):r = twothirds = (third << )g = - twothirdsb = + twothirdselse:r = thirdg = b = - thirdelse:if not (h & ):r = + thirdg = b = - thirdelse:r = + thirdg = b = - thirdif s != :r, g, b = nscale8x3_video(r, g, b, s)desat = - sdesat = (desat * desat) >> brightness_floor = desatr = r + brightness_floorg = g + brightness_floorb = b + brightness_floorif v != :v = scale8_video_LEAVING_R1_DIRTY(v, v)r, g, b = nscale8x3_video(r, g, b, v)return (r, g, b)", "docstring": "Generates RGB values from HSV that have an even visual\n distribution. Be careful as this method is only have as fast as\n hsv2rgb_spectrum.", "id": "f2030:m2"} {"signature": "def hsv2rgb_360(hsv):", "body": "h, s, v = hsvr, g, b = colorsys.hsv_to_rgb(h / , s, v)return (int(r * ), int(g * ), int(b * ))", "docstring": "Python default hsv to rgb conversion for when hue values in the\n range 0-359 are preferred. Due to requiring float math, this method\n is slower than hsv2rgb_rainbow and hsv2rgb_spectrum.", "id": "f2030:m3"} {"signature": "def color_cmp(a, b):", "body": "if a == b:return a, b = rgb_to_hsv(a), rgb_to_hsv(b)return - if a < b else ", "docstring": "Order colors by hue, saturation and value, in that order.\n\n Returns -1 if a < b, 0 if a == b and 1 if a < b.", "id": "f2030:m12"} {"signature": "def euclidean(c1, c2):", "body": "diffs = ((i - j) for i, j in zip(c1, c2))return sum(x * x for x in diffs)", "docstring": "Square of the euclidean distance", "id": "f2032:m1"} {"signature": "def __init__(self, colors=(), continuous=False, serpentine=False, scale=,offset=, autoscale=False, length=None):", "body": "super().__init__(colors)if not self:self.append(Black)self.continuous = continuousself.serpentine = serpentineself.scale = scaleself.offset = offsetself.autoscale = autoscaleself.length = length", "docstring": "Arguments:\n colors: an iterable of colors\n\n continuous: if True, interpolate linearly between colors; if False,\n use the nearest color from the original list\n\n serpentine: if True, palette colors are used in reverse order every\n other iteration, giving a back-and-forth effect. If False,\n palette colors always restart on each iteration\n\n scale: Scales the incoming index ``i``. As ``i`` moves from 0\n to ``len(colors) - 1``, the whole palette repeats itself\n ``self.scale`` times\n\n offset: offset to the incoming index ``i``, applied after scaling\n\n autoscale: If True, automatically rescale the Palette size to\n match the length of the output. ``autoscale`` happens before\n ``scale``, so the two work well together to give banding or\n striping effects across your display\n\n ``length``:\n The length of the output color_list. If None, use the length of\n the palette itself. If autoscale=True, ``length`` is used to scale\n the palette to match the output.", "id": "f2033:c0:m0"} {"signature": "def get(self, position=):", "body": "n = len(self)if n == :return self[]pos = positionif self.length and self.autoscale:pos *= len(self)pos /= self.lengthpos *= self.scalepos += self.offsetif not self.continuous:if not self.serpentine:return self[int(pos % n)]m = ( * n) - pos %= mif pos < n:return self[int(pos)]else:return self[int(m - pos)]if self.serpentine:pos %= ( * n)if pos > n:pos = ( * n) - poselse:pos %= npos *= n - pos /= nindex = int(pos)fade = pos - indexif not fade:return self[index]r1, g1, b1 = self[index]r2, g2, b2 = self[(index + ) % len(self)]dr, dg, db = r2 - r1, g2 - g1, b2 - b1return r1 + fade * dr, g1 + fade * dg, b1 + fade * db", "docstring": "Return a color interpolated from the Palette.\n\nIn the case where continuous=False, serpentine=False, scale=1,\nautoscale=False, and offset=0, this is exactly the same as plain old []\nindexing, but with a wrap-around.\n\nThe constructor parameters affect this result as documented in the\nconstructor.\n\nArguments:\n ``position``:\n May be any integer or floating point number", "id": "f2033:c0:m2"} {"signature": "def name_to_color(name):", "body": "def to_color(name):name = name.lower()if '' in name:if name.startswith('') and name.endswith(''):name = name[:-]if name.startswith('') and name.endswith(''):name = name[:-]r, g, b = name.split('')return _from_number(r), _from_number(g), _from_number(b)try:n = _from_number(name)except:color = tables.get_color(name)if color:return colorraise ValueErrorreturn tables.to_triplet(n)try:color = to_color(name)except:raise ValueError('' % str(name))if not all( <= i <= for i in color):raise ValueError('' % color)return color", "docstring": ":param str name: a string identifying a color. It might be a color name\n from the two lists of color names, juce and classic; or\n it might be a list of numeric r, g, b values separated by\n commas.\n:returns: a color as an RGB 3-tuple", "id": "f2035:m0"} {"signature": "def color_to_name(color, use_hex=False):", "body": "if isinstance(color, list):color = tuple(color)elif not isinstance(color, tuple):raise ValueError('')if use_hex:return '' % colorreturn tables.get_name(color) or str(color)", "docstring": ":param tuple color: an RGB 3-tuple of integer colors\n:returns: a string name for this color\n\n``name_to_color(color_to_name(c)) == c`` is guaranteed to be true (but the\nreverse is not true, because name_to_color is a many-to-one function).", "id": "f2035:m1"} {"signature": "def toggle(s):", "body": "is_numeric = '' in s or s.startswith('') or s.startswith('')c = name_to_color(s)return color_to_name(c) if is_numeric else str(c)", "docstring": "Toggle back and forth between a name and a tuple representation.\n\n:param str s: a string which is either a text name, or a tuple-string:\n a string with three numbers separated by commas\n\n:returns: if the string was a text name, return a tuple. If it's a\n tuple-string and it corresponds to a text name, return the text\n name, else return the original tuple-string.", "id": "f2035:m2"} {"signature": "def to_color(c):", "body": "if isinstance(c, numbers.Number):return c, c, cif not c:raise ValueError('' % c)if isinstance(c, str):return name_to_color(c)if isinstance(c, list):c = tuple(c)if isinstance(c, tuple):if len(c) > :return c[:]while len(c) < :c += (c[-],)return craise ValueError('' % c)", "docstring": "Try to coerce the argument into a color - a 3-tuple of numbers-", "id": "f2035:m4"} {"signature": "def color_blend(a, b):", "body": "return ( - ((( - a[]) * ( - b[])) >> ), - ((( - a[]) * ( - b[])) >> ), - ((( - a[]) * ( - b[])) >> ))", "docstring": "Performs a Screen blend on RGB color tuples, a and b", "id": "f2036:m0"} {"signature": "def color_scale(color, level):", "body": "return tuple([int(i * level) >> for i in list(color)])", "docstring": "Scale RGB tuple by level, 0 - 256", "id": "f2036:m1"} {"signature": "def __init__(self, gamma=, offset=, lower_bound=):", "body": "self.gamma = gammaself.offset = offsetself.lower_bound = lower_boundwidth = - lower_bounddef gam(i):return int(lower_bound + pow(i / , gamma) * width + offset)self.table = tuple(gam(i) for i in range())", "docstring": ":param float gamma: the root for gamma computation\n:param float offset: an offset added to the result\n:param int lower_bound: The lowest possible output value - the highest\n is always 255.", "id": "f2037:c0:m0"} {"signature": "def get(self, i):", "body": "return self.table[max(, min(, int(i)))]", "docstring": ":returns: The gamma table entry\n:param int i: the index into the table", "id": "f2037:c0:m1"} {"signature": "def get(name=None):", "body": "if name is None or name == '':return _DEFAULT_PALETTEif isinstance(name, str):return PROJECT_PALETTES.get(name) or BUILT_IN_PALETTES.get(name)", "docstring": "Return a named Palette, or None if no such name exists.\n\nIf ``name`` is omitted, the default value is used.", "id": "f2041:m0"} {"signature": "def UpdateThreading(enable, layout):", "body": "return (UseThreading if enable else NoThreading)(layout)", "docstring": "UpdateThreading handles threading - and eventually multiprocessing - for\nLayout.", "id": "f2042:m0"} {"signature": "def push_to_driver(self):", "body": "self.wait_for_update()self.update_colors()", "docstring": "Push the current pixel state to the driver", "id": "f2042:c2:m3"} {"signature": "def draw_circle(setter, x0, y0, r, color=None):", "body": "f = - rddF_x = ddF_y = - * rx = y = rsetter(x0, y0 + r, color)setter(x0, y0 - r, color)setter(x0 + r, y0, color)setter(x0 - r, y0, color)while x < y:if f >= :y -= ddF_y += f += ddF_yx += ddF_x += f += ddF_xsetter(x0 + x, y0 + y, color)setter(x0 - x, y0 + y, color)setter(x0 + x, y0 - y, color)setter(x0 - x, y0 - y, color)setter(x0 + y, y0 + x, color)setter(x0 - y, y0 + x, color)setter(x0 + y, y0 - x, color)setter(x0 - y, y0 - x, color)", "docstring": "Draws a circle at point x0, y0 with radius r of the specified RGB color", "id": "f2044:m0"} {"signature": "def fill_circle(setter, x0, y0, r, color=None):", "body": "_draw_fast_vline(setter, x0, y0 - r, * r + , color)_fill_circle_helper(setter, x0, y0, r, , , color)", "docstring": "Draws a filled circle at point x0,y0 with radius r and specified color", "id": "f2044:m3"} {"signature": "def bresenham_line(setter, x0, y0, x1, y1, color=None, colorFunc=None):", "body": "steep = abs(y1 - y0) > abs(x1 - x0)if steep:x0, y0 = y0, x0x1, y1 = y1, x1if x0 > x1:x0, x1 = x1, x0y0, y1 = y1, y0dx = x1 - x0dy = abs(y1 - y0)err = dx / if y0 < y1:ystep = else:ystep = -count = for x in range(x0, x1 + ):if colorFunc:color = colorFunc(count)count += if steep:setter(y0, x, color)else:setter(x, y0, color)err -= dyif err < :y0 += ysteperr += dx", "docstring": "Draw line from point x0,y0 to x,1,y1. Will draw beyond matrix bounds.", "id": "f2044:m5"} {"signature": "def draw_rect(setter, x, y, w, h, color=None, aa=False):", "body": "_draw_fast_hline(setter, x, y, w, color, aa)_draw_fast_hline(setter, x, y + h - , w, color, aa)_draw_fast_vline(setter, x, y, h, color, aa)_draw_fast_vline(setter, x + w - , y, h, color, aa)", "docstring": "Draw rectangle with top-left corner at x,y, width w and height h", "id": "f2044:m9"} {"signature": "def fill_rect(setter, x, y, w, h, color=None, aa=False):", "body": "for i in range(x, x + w):_draw_fast_vline(setter, i, y, h, color, aa)", "docstring": "Draw solid rectangle with top-left corner at x,y, width w and height h", "id": "f2044:m10"} {"signature": "def draw_round_rect(setter, x, y, w, h, r, color=None, aa=False):", "body": "_draw_fast_hline(setter, x + r, y, w - * r, color, aa) _draw_fast_hline(setter, x + r, y + h - , w - * r, color, aa) _draw_fast_vline(setter, x, y + r, h - * r, color, aa) _draw_fast_vline(setter, x + w - , y + r, h - * r, color, aa) _draw_circle_helper(setter, x + r, y + r, r, , color, aa)_draw_circle_helper(setter, x + w - r - , y + r, r, , color, aa)_draw_circle_helper(setter, x + w - r - , y + h - r - , r, , color, aa)_draw_circle_helper(setter, x + r, y + h - r - , r, , color, aa)", "docstring": "Draw rectangle with top-left corner at x,y, width w, height h,\n and corner radius r.", "id": "f2044:m11"} {"signature": "def fill_round_rect(setter, x, y, w, h, r, color=None, aa=False):", "body": "fill_rect(setter, x + r, y, w - * r, h, color, aa)_fill_circle_helper(setter, x + w - r - , y + r, r,, h - * r - , color, aa)_fill_circle_helper(setter, x + r, y + r, r, , h - * r - , color, aa)", "docstring": "Draw solid rectangle with top-left corner at x,y, width w, height h,\n and corner radius r", "id": "f2044:m12"} {"signature": "def draw_triangle(setter, x0, y0, x1, y1, x2, y2, color=None, aa=False):", "body": "draw_line(setter, x0, y0, x1, y1, color, aa)draw_line(setter, x1, y1, x2, y2, color, aa)draw_line(setter, x2, y2, x0, y0, color, aa)", "docstring": "Draw triangle with points x0,y0 - x1,y1 - x2,y2", "id": "f2044:m13"} {"signature": "def fill_triangle(setter, x0, y0, x1, y1, x2, y2, color=None, aa=False):", "body": "a = b = y = last = if y0 > y1:y0, y1 = y1, y0x0, x1 = x1, x0if y1 > y2:y2, y1 = y1, y2x2, x1 = x1, x2if y0 > y1:y0, y1 = y1, y0x0, x1 = x1, x0if y0 == y2: a = b = x0if x1 < a:a = x1elif x1 > b:b = x1if x2 < a:a = x2elif x2 > b:b = x2_draw_fast_hline(setter, a, y0, b - a + , color, aa)dx01 = x1 - x0dy01 = y1 - y0dx02 = x2 - x0dy02 = y2 - y0dx12 = x2 - x1dy12 = y2 - y1sa = sb = if y1 == y2:last = y1 else:last = y1 - for y in range(y, last + ):a = x0 + sa / dy01b = x0 + sb / dy02sa += dx01sb += dx02if a > b:a, b = b, a_draw_fast_hline(setter, a, y, b - a + , color, aa)sa = dx12 * (y - y1)sb = dx02 * (y - y0)for y in range(y, y2 + ):a = x1 + sa / dy12b = x0 + sb / dy02sa += dx12sb += dx02if a > b:a, b = b, a_draw_fast_hline(setter, a, y, b - a + , color, aa)", "docstring": "Draw solid triangle with points x0,y0 - x1,y1 - x2,y2", "id": "f2044:m14"} {"signature": "def __init__(self, drivers, width=, height=,rotation=, vert_flip=False, y_flip=False,serpentine=True,threadedUpdate=False, brightness=,pixelSize=(, ), **kwargs):", "body": "self.gen_multi = make_matrix_coord_map_multisuper().__init__(drivers, threadedUpdate, brightness, **kwargs)rot_mod = rotation % self.rotation = * round(rot_mod / )if self.rotation != rot_mod:log.warning(ROTATION_WARNING, rotation, self.rotation)self.width = width or getattr(self.drivers[], '') or self.height = height or getattr(self.drivers[], '') or self.vert_flip = vert_flipself.y_flip = y_flipself.serpentine = serpentineself.pixelSize = pixelSizepw, ph = self.pixelSizeif not (self.width or self.height):square = int(math.sqrt(self.numLEDs))if (square * square) == self.numLEDs:self.width = self.height = squareelse:raise TypeError('''')if self.width * self.height > self.numLEDs:raise ValueError(''% (self.width, self.height, self.numLEDs))if not self.coord_map:if len(self.drivers) == :log.debug('''')y_flip = y_flip or vert_flipself.coord_map = make_matrix_coord_map(self.width, self.height,serpentine=serpentine,rotation=rotation,y_flip=vert_flip)elif self.drivers:raise TypeError('')self.set_pixel_positions(make_matrix_coord_map_positions(self.coord_map))if rotation in (, ):w = self.widthh = self.heightself.width = hself.height = wself.texture = Noneself.set = self._setColorif pw < or pw > self.width or ph < or ph > self.height:raise ValueError('''')if self.width % pw != or self.height % ph != :raise ValueError('')if pw == and ph == :self._set = self.__setNormalelse:self._set = self.__setScaledself.width = self.width / pwself.height = self.height / phself.numLEDs = self.width * self.heightself.fonts = font.fonts", "docstring": "Main class for matricies.\n\n driver -- instance that inherits from DriverBase\n width -- X axis size of matrix\n height -- Y axis size of matrix\n coord_map -- a 2D matrix defining the X,Y to strip index mapping.\n Not needed in most cases\n rotation -- how to rotate when generating the map.\n Not used if coord_map specified\n vert_flip - flips the generated map along the Y axis.\n This along with rotation can achieve any orientation", "id": "f2045:c0:m0"} {"signature": "@propertydef shape(self):", "body": "return self.width, self.height", "docstring": "Returns ``width, height``", "id": "f2045:c0:m1"} {"signature": "def get(self, x, y):", "body": "try:pixel = self.coord_map[y][x]return self._get_base(pixel)except IndexError:return colors.COLORS.Black", "docstring": "Return the pixel color at position (x, y), or Colors.black if that\nposition is out-of-bounds.", "id": "f2045:c0:m2"} {"signature": "def set(self, x, y, color):", "body": "raise NotImplementedError", "docstring": "Set the pixel color at position x, y.", "id": "f2045:c0:m3"} {"signature": "def drawCircle(self, x0, y0, r, color=None):", "body": "md.draw_circle(self.set, x0, y0, r, color)", "docstring": "Draw a circle in an RGB color, with center x0, y0 and radius r.", "id": "f2045:c0:m13"} {"signature": "def fillCircle(self, x0, y0, r, color=None):", "body": "md.fill_circle(self.set, x0, y0, r, color)", "docstring": "Draw a filled circle in an RGB color, with center x0, y0 and radius r.", "id": "f2045:c0:m14"} {"signature": "def drawLine(self, x0, y0, x1, y1, color=None, colorFunc=None, aa=False):", "body": "md.draw_line(self.set, x0, y0, x1, y1, color, colorFunc, aa)", "docstring": "Draw a between x0, y0 and x1, y1 in an RGB color.\n\n:param colorFunc: a function that takes an integer from x0 to x1 and\n returns a color corresponding to that point\n:param aa: if True, use Bresenham's algorithm for line drawing;\n otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m15"} {"signature": "def bresenham_line(self, x0, y0, x1, y1, color=None, colorFunc=None):", "body": "md.bresenham_line(self.set, x0, y0, x1, y1, color, colorFunc)", "docstring": "Draw line from point x0, y0 to x1, y1 using Bresenham's algorithm.\n\nWill draw beyond matrix bounds.", "id": "f2045:c0:m16"} {"signature": "def wu_line(self, x0, y0, x1, y1, color=None, colorFunc=None):", "body": "md.wu_line(self.set, x0, y0, x1, y1, color, colorFunc)", "docstring": "Draw a between x0, y0 and x1, y1 in an RGB color.\n\n:param colorFunc: a function that takes an integer from x0 to x1 and\n returns a color corresponding to that point\n:param aa: if True, use Bresenham's algorithm for line drawing;\n otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m17"} {"signature": "def drawRect(self, x, y, w, h, color=None, aa=False):", "body": "md.draw_rect(self.set, x, y, w, h, color, aa)", "docstring": "Draw rectangle with top-left corner at x,y, width w and height h\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m18"} {"signature": "def fillRect(self, x, y, w, h, color=None, aa=False):", "body": "md.fill_rect(self.set, x, y, w, h, color, aa)", "docstring": "Draw a solid rectangle with top-left corner at (x, y), width w and\nheight h.\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m19"} {"signature": "def fillScreen(self, color=None):", "body": "md.fill_rect(self.set, , , self.width, self.height, color)", "docstring": "Fill the matrix with the given RGB color", "id": "f2045:c0:m20"} {"signature": "def drawRoundRect(self, x, y, w, h, r, color=None, aa=False):", "body": "md.draw_round_rect(self.set, x, y, w, h, r, color, aa)", "docstring": "Draw a rounded rectangle with top-left corner at (x, y), width w,\nheight h, and corner radius r\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m21"} {"signature": "def fillRoundRect(self, x, y, w, h, r, color=None, aa=False):", "body": "md.fill_round_rect(self.set, x, y, w, h, r, color, aa)", "docstring": "Draw a rounded rectangle with top-left corner at (x, y), width w,\nheight h, and corner radius r\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m22"} {"signature": "def drawTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):", "body": "md.draw_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)", "docstring": "Draw triangle with vertices (x0, y0), (x1, y1) and (x2, y2)\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n Otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m23"} {"signature": "def fillTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):", "body": "md.fill_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)", "docstring": "Draw filled triangle with points x0,y0 - x1,y1 - x2,y2\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m24"} {"signature": "def drawChar(self, x, y, c, color, bg,aa=False, font=font.default_font, font_scale=):", "body": "md.draw_char(self.fonts, self.set, self.width, self.height,x, y, c, color, bg, aa, font, font_scale)", "docstring": "Draw a single character c at at (x, y) in an RGB color.", "id": "f2045:c0:m25"} {"signature": "def drawText(self, text, x=, y=, color=None,bg=colors.COLORS.Off, aa=False, font=font.default_font,font_scale=):", "body": "md.draw_text(self.fonts, self.set, text, self.width, self.height,x, y, color, bg, aa, font, font_scale)", "docstring": "Draw a line of text starting at (x, y) in an RGB color.\n\n:param colorFunc: a function that takes an integer from x0 to x1 and\n returns a color corresponding to that point\n:param aa: if True, use Bresenham's algorithm for line drawing;\n otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m26"} {"signature": "@propertydef shape(self):", "body": "return self.ringCount, self.ringSteps", "docstring": "Returns ``ringCount, ringSteps``.", "id": "f2046:c0:m2"} {"signature": "def set(self, ring, angle, color):", "body": "pixel = self.angleToPixel(angle, ring)self._set_base(pixel, color)", "docstring": "Set pixel to RGB color tuple", "id": "f2046:c0:m5"} {"signature": "def get(self, ring, angle):", "body": "pixel = self.angleToPixel(angle, ring)return self._get_base(pixel)", "docstring": "Get RGB color tuple of color at index pixel", "id": "f2046:c0:m6"} {"signature": "@propertydef shape(self):", "body": "return self.x, self.y, self.z", "docstring": "Returns ``x, y, z``", "id": "f2048:c0:m2"} {"signature": "def apply(self, function):", "body": "for cut in self.cuts:value = self.read(cut)function(value)self.write(cut, value)", "docstring": "For each row or column in cuts, read a list of its colors,\napply the function to that list of colors, then write it back\nto the layout.", "id": "f2050:c0:m1"} {"signature": "@propertydef shape(self):", "body": "return self.numLEDs,", "docstring": "Returns a 1-tuple with the length of the strip", "id": "f2051:c0:m1"} {"signature": "def get(self, pixel):", "body": "return self._get_base(pixel * self.pixelWidth)", "docstring": "Get RGB color tuple of color at index pixel", "id": "f2051:c0:m2"} {"signature": "def set(self, pixel, color):", "body": "raise NotImplementedError", "docstring": "Set the pixel color at position x in the strip.", "id": "f2051:c0:m3"} {"signature": "def _set(self, pixel, color):", "body": "self.set_base(pixel, color)", "docstring": "Set pixel to RGB color tuple", "id": "f2051:c0:m5"} {"signature": "def setRGB(self, pixel, r, g, b):", "body": "self.set(pixel, (r, g, b))", "docstring": "Set single pixel using individual RGB values instead of tuple", "id": "f2051:c0:m7"} {"signature": "def setHSV(self, pixel, hsv):", "body": "color = conversions.hsv2rgb(hsv)self.set(pixel, color)", "docstring": "Set single pixel to HSV tuple", "id": "f2051:c0:m8"} {"signature": "def setOff(self, pixel):", "body": "self.set(pixel, (, , ))", "docstring": "Set single pixel off", "id": "f2051:c0:m9"} {"signature": "@classmethoddef construct(cls, project, **desc):", "body": "return cls(project.drivers, maker=project.maker, **desc)", "docstring": "Construct a layout.\n SHOULD BE PRIVATE", "id": "f2052:c0:m0"} {"signature": "def set_pixel_positions(self, pixel_positions):", "body": "for d in self.drivers:d.set_pixel_positions(pixel_positions)", "docstring": "SHOULD BE PRIVATE", "id": "f2052:c0:m2"} {"signature": "def clone(self):", "body": "args = {k: getattr(self, k) for k in self.CLONE_ATTRS}args[''] = copy.copy(self.color_list)return self.__class__([], **args)", "docstring": "Return an independent copy of this layout with a completely separate\ncolor_list and no drivers.", "id": "f2052:c0:m8"} {"signature": "@propertydef shape(self):", "body": "raise NotImplementedError", "docstring": "Return a tuple indicating the dimensions of the layout - (x,) for a\nstrip, (x, y) for an array, (x, y, z) for a cube, and\n(ring_count, ring_steps) for a circle.", "id": "f2052:c0:m9"} {"signature": "def set_color_list(self, color_list, offset=):", "body": "if not len(color_list):returncolor_list = make.colors(color_list)size = len(self._colors) - offsetif len(color_list) > size:color_list = color_list[:size]self._colors[offset:offset + len(color_list)] = color_list", "docstring": "Set the internal colors starting at an optional offset.\n\nIf `color_list` is a list or other 1-dimensional array, it is reshaped\ninto an N x 3 list.\n\nIf `color_list` too long it is truncated; if it is too short then only\nthe initial colors are set.", "id": "f2052:c0:m13"} {"signature": "def push_to_driver(self):", "body": "self.threading.push_to_driver()", "docstring": "Push the current pixel state to the driver\nDo not call this method from user code!", "id": "f2052:c0:m17"} {"signature": "def setRGB(self, pixel, r, g, b):", "body": "self._set_base(pixel, (r, g, b))", "docstring": "Set single pixel using individual RGB values instead of tuple", "id": "f2052:c0:m19"} {"signature": "def setHSV(self, pixel, hsv):", "body": "color = conversions.hsv2rgb(hsv)self._set_base(pixel, color)", "docstring": "Set single pixel to HSV tuple", "id": "f2052:c0:m20"} {"signature": "def setOff(self, pixel):", "body": "self._set_base(pixel, (, , ))", "docstring": "Set single pixel off", "id": "f2052:c0:m21"} {"signature": "def all_off(self):", "body": "self._colors[:] = [(, , )] * self.numLEDs", "docstring": "Set all pixels off", "id": "f2052:c0:m22"} {"signature": "def fill(self, color, start=, end=-):", "body": "start = max(start, )if end < or end >= self.numLEDs:end = self.numLEDs - for led in range(start, end + ): self._set_base(led, color)", "docstring": "Fill the entire strip with RGB color tuple", "id": "f2052:c0:m23"} {"signature": "def fillRGB(self, r, g, b, start=, end=-):", "body": "self.fill((r, g, b), start, end)", "docstring": "Fill entire strip by giving individual RGB values instead of tuple", "id": "f2052:c0:m24"} {"signature": "def fillHSV(self, hsv, start=, end=-):", "body": "self.fill(conversions.hsv2rgb(hsv), start, end)", "docstring": "Fill the entire strip with HSV color tuple", "id": "f2052:c0:m25"} {"signature": "def set_colors(self, buf):", "body": "deprecated.deprecated('')if len(self._colors) != len(buf):raise IOError(\"\"\"\".format(len(self._colors), len(buf)))self._colors[:] = buf", "docstring": "DEPRECATED: use self.color_list\n\nUse with extreme caution!\nDirectly sets the internal buffer and bypasses all brightness and\nrotation control buf must also be in the exact format required by the\ndisplay type.", "id": "f2052:c1:m2"} {"signature": "def reflect_x(x, y, matrix):", "body": "return matrix.columns - - x, y", "docstring": "Reflect the index vertically.", "id": "f2053:m0"} {"signature": "def reflect_y(x, y, matrix):", "body": "return x, matrix.rows - - y", "docstring": "Reflect the index horizontally.", "id": "f2053:m1"} {"signature": "def serpentine_x(x, y, matrix):", "body": "if y % :return matrix.columns - - x, yreturn x, y", "docstring": "Every other row is indexed in reverse.", "id": "f2053:m2"} {"signature": "def serpentine_y(x, y, matrix):", "body": "if x % :return x, matrix.rows - - yreturn x, y", "docstring": "Every other column is indexed in reverse.", "id": "f2053:m3"} {"signature": "def transpose(x, y, _):", "body": "return y, x", "docstring": "Transpose rows and columns.", "id": "f2053:m4"} {"signature": "def make_matrix_coord_map(dx, dy, serpentine=True, offset=, rotation=, y_flip=False):", "body": "result = []for y in range(dy):if not serpentine or y % == :result.append([(dx * y) + x + offset for x in range(dx)])else:result.append([dx * (y + ) - - x + offset for x in range(dx)])result = rotate_and_flip(result, rotation, y_flip)return result", "docstring": "Helper method to generate X,Y coordinate maps for strips", "id": "f2055:m0"} {"signature": "def make_segments(strip, length):", "body": "if len(strip) % length:raise ValueError('')s = []try:while True:s.append(s[-].next(length) if s else Segment(strip, length))except ValueError:return s", "docstring": "Return a list of Segments that evenly split the strip.", "id": "f2059:m0"} {"signature": "def next(self, length):", "body": "return Segment(self.strip, length, self.offset + self.length)", "docstring": "Return a new segment starting right after self in the same buffer.", "id": "f2059:c0:m4"} {"signature": "def fill(strip, item, start=, stop=None, step=):", "body": "if stop is None:stop = len(strip)for i in range(start, stop, step):strip[i] = item", "docstring": "Fill a portion of a strip from start to stop by step with a given item.\n If stop is not given, it defaults to the length of the strip.", "id": "f2060:m0"} {"signature": "@abc.abstractmethoddef __getitem__(self, index):", "body": "pass", "docstring": "`index` must be an integer, not a slice.", "id": "f2060:c0:m0"} {"signature": "@abc.abstractmethoddef __setitem__(self, index, value):", "body": "pass", "docstring": "`index` must be an integer, not a slice.", "id": "f2060:c0:m1"} {"signature": "def recurse(desc, pre='', post=None, python_path=None):", "body": "def call(f, desc):if isinstance(f, str):f = getattr(datatype, f, None)return f and f(desc)desc = load.load_if_filename(desc) or descdesc = construct.to_type_constructor(desc, python_path)datatype = desc.get('')desc = call(pre, desc) or descfor child_name in getattr(datatype, '', []):child = desc.get(child_name)if child:is_plural = child_name.endswith('')remove_s = is_plural and child_name != ''cname = child_name[:-] if remove_s else child_namenew_path = python_path or ('' + cname)if is_plural:if isinstance(child, (dict, str)):child = [child]for i, c in enumerate(child):child[i] = recurse(c, pre, post, new_path)desc[child_name] = childelse:desc[child_name] = recurse(child, pre, post, new_path)d = call(post, desc)return desc if d is None else d", "docstring": "Depth first recursion through a dictionary containing type constructors\n\nThe arguments pre, post and children are independently either:\n\n* None, which means to do nothing\n* a string, which means to use the static class method of that name on the\n class being constructed, or\n* a callable, to be called at each recursion\n\nArguments:\n\ndictionary -- a project dictionary or one of its subdictionaries\npre -- called before children are visited node in the recursion\npost -- called after children are visited in the recursion\npython_path -- relative path to start resolving typenames", "id": "f2061:m0"} {"signature": "def make_object(*args, typename=None, python_path=None, datatype=None, **kwds):", "body": "datatype = datatype or import_symbol(typename, python_path)field_types = getattr(datatype, '', fields.FIELD_TYPES)return datatype(*args, **fields.component(kwds, field_types))", "docstring": "Make an object from a symbol.", "id": "f2062:m4"} {"signature": "def construct(*args, datatype, typename=None, **kwds):", "body": "return datatype(*args, **kwds)", "docstring": "Construct an object from a type constructor.\n\nA type constructor is a dictionary which has a field \"datatype\" which has\na callable method to construct a class, and a field \"typename\" which is the\nPython path of the function or class in \"datatype\".", "id": "f2063:m0"} {"signature": "def to_type_constructor(value, python_path=None):", "body": "if not value:return valueif callable(value):return {'': value}value = to_type(value)typename = value.get('')if typename:r = aliases.resolve(typename)try:value[''] = importer.import_symbol(r, python_path=python_path)del value['']except Exception as e:value[''] = ereturn value", "docstring": "Tries to convert a value to a type constructor.\n\nIf value is a string, then it used as the \"typename\" field.\n\nIf the \"typename\" field exists, the symbol for that name is imported and\nadded to the type constructor as a field \"datatype\".\n\nThrows:\n ImportError -- if \"typename\" is set but cannot be imported\n ValueError -- if \"typename\" is malformed", "id": "f2063:m2"} {"signature": "def merge(*projects):", "body": "result = {}for project in projects:for name, section in (project or {}).items():if name not in PROJECT_SECTIONS:raise ValueError(UNKNOWN_SECTION_ERROR % name)if section is None:result[name] = type(result[name])()continueif name in NOT_MERGEABLE + SPECIAL_CASE:result[name] = sectioncontinueif section and not isinstance(section, (dict, str)):cname = section.__class__.__name__raise ValueError(SECTION_ISNT_DICT_ERROR % (name, cname))if name == '':adesc = load.load_if_filename(section)if adesc:section = adesc.get('', {})section[''] = adesc.get('', {})result_section = result.setdefault(name, {})section = construct.to_type(section)for k, v in section.items():if v is None:result_section.pop(k, None)else:result_section[k] = vreturn result", "docstring": "Merge zero or more dictionaries representing projects with the default\nproject dictionary and return the result", "id": "f2074:m0"} {"signature": "def put_edit(self, f, *args, **kwds):", "body": "self.put_nowait(functools.partial(f, *args, **kwds))", "docstring": "Defer an edit to run on the EditQueue.\n\n:param callable f: The function to be called\n:param tuple args: Positional arguments to the function\n:param tuple kwds: Keyword arguments to the function\n:throws queue.Full: if the queue is full", "id": "f2076:c0:m0"} {"signature": "def get_and_run_edits(self):", "body": "if self.empty():returnedits = []while True:try:edits.append(self.get_nowait())except queue.Empty:breakfor e in edits:try:e()except:log.error('', e)traceback.print_exc()", "docstring": "Get all the edits in the queue, then execute them.\n\nThe algorithm gets all edits, and then executes all of them. It does\n*not* pull off one edit, execute, repeat until the queue is empty, and\nthat means that the queue might not be empty at the end of\n``run_edits``, because new edits might have entered the queue\nwhile the previous edits are being executed.\n\nThis has the advantage that if edits enter the queue faster than they\ncan be processed, ``get_and_run_edits`` won't go into an infinite loop,\nbut rather the queue will grow unboundedly, which that can be\ndetected, and mitigated and reported on - or if Queue.maxsize is\nset, ``bp`` will report a fairly clear error and just dump the edits\non the ground.", "id": "f2076:c0:m1"} {"signature": "def project(*descs, root_file=None):", "body": "load.ROOT_FILE = root_filedesc = merge.merge(merge.DEFAULT_PROJECT, *descs)path = desc.get('', '')if root_file:project_path = os.path.dirname(root_file)if path:path += '' + project_pathelse:path = project_pathwith load.extender(path):desc = recurse.recurse(desc)project = construct.construct(**desc)project.desc = descreturn project", "docstring": "Make a new project, using recursion and alias resolution.\n\nUse this function in preference to calling Project() directly.", "id": "f2080:m0"} {"signature": "def __init__(self, *,drivers, layout, maker, path, animation, controls,edit_queue_maxsize=EDIT_QUEUE_MAXSIZE, **kwds):", "body": "self.needs_cleanup = Falsedef create(root, name):def post(desc):exception = desc.get('')if exception:raise exceptionreturn self.construct_child(name, **desc)with exception.add('' + name):return recurse.recurse(root,pre=None,post=post,python_path='' + name)attributes.check(kwds, '')self.path = pathlayout = layout or fill.fill_layout(animation)self.maker = self.construct_child('', **maker)self.drivers = [create(d, '') for d in drivers]with exception.add(''):self.layout = self.construct_child('', **layout)self.animation = create(animation, '')self.running = Falseself.clock = clock.Clock()eq = edit_queue.EditQueue(maxsize=edit_queue_maxsize)self.layout.edit_queue = self.animation.edit_queue = eqself.animation.add_preframe_callback(eq.get_and_run_edits)self.controls = [create(c, '') for c in controls]for d in self.drivers:d.set_project(self)self.animation.set_project(self)", "docstring": ":param int edit_queue_maxsize: maxsize parameter to queue.Queue.\n 0 means an unbounded queue.", "id": "f2080:c0:m2"} {"signature": "def _add_redundant_arguments(parser):", "body": "parser.add_argument('', '', default=None,help='')if deprecated.allowed(): parser.add_argument('', '', default=None,help='')parser.add_argument('', default=None,help='')parser.add_argument('', '', default=None,help='')parser.add_argument('', '', default='', choices=NUMBER_TYPES,help=NUMBERS_HELP)parser.add_argument('', '', default=None, help=PATH_HELP)", "docstring": "These arguments are redundant with just using a project, and we should\nencouraging that as you don't have to learn any dumb flags!\n\nFor example, instead of\n\n bp foo.yml --animation=wombat --numbers=float\n\nuse\n\n bp foo.yml + '{animation: wombat, numbers: float}'", "id": "f2086:m1"} {"signature": "def __init__(self, *args, filename='', render=None,divide=, frames=, time=, speed=, options=None,gif_dir=None, **kwds):", "body": "super().__init__(*args, **kwds)self.cur_step = self.movie_writer = _movie_writer.MovieWriter(filename, render, divide, frames, time, speed, options, gif_dir)", "docstring": ":param str filename: Base filename to write the animated GIF file\n\n:param dict render: Parameters to the renderer function -\n see ``bibliopixel.util.image.render.renderer``\n\n:param int divide: If greater than 1, only rendered one in ``divide``\n frames\n\n:param int frames: Number of frames to write\n\n:param float time: Total time to write. If non-zero, takes precedence\n over `frames`\n\n:param float speed: the speed of the GIF is scaled up by this factor,\n so if speed=2 then a 2 second animation will become a 1 second GIF.\n\n:param dict options: Options to\n ``bibliopixel.util.image.gif.write_animation``\n\n:param str gif_dir: If set, write individual GIF frame files to this\n directory, and do not delete them when done. For testing purposes.", "id": "f2089:c0:m0"} {"signature": "def find_serial_devices(self):", "body": "if self.devices is not None:return self.devicesself.devices = {}hardware_id = \"\" + self.hardware_id for ports in serial.tools.list_ports.grep(hardware_id):port = ports[]try:id = self.get_device_id(port)ver = self._get_device_version(port)except:log.debug('',port, self.baudrate)if True:raisecontinueif getattr(ports, '', lambda: )():log.debug('',self.hardware_id, id, ver, len(ports))if id < :log.debug('',self.hardware_id, id, ver)else:self.devices[id] = port, verreturn self.devices", "docstring": "Scan and report all compatible serial devices on system.\n\n :returns: List of discovered devices", "id": "f2090:c0:m1"} {"signature": "def get_device(self, id=None):", "body": "if id is None:if not self.devices:raise ValueError('' % self.hardware_id)id, (device, version) = sorted(self.devices.items())[]elif id in self.devices:device, version = self.devices[id]else:error = '' % idlog.error(error)raise ValueError(error)log.info(\"\",device, id, version)return id, device, version", "docstring": "Returns details of either the first or specified device\n\n :param int id: Identifier of desired device. If not given, first device\n found will be returned\n\n :returns tuple: Device ID, Device Address, Firmware Version", "id": "f2090:c0:m2"} {"signature": "def error(self, fail=True, action=''):", "body": "e = ''if action:e = '' % (action, e)log.error(e)if fail:raise IOError(e)", "docstring": "SHOULD BE PRIVATE METHOD", "id": "f2090:c0:m3"} {"signature": "def set_device_id(self, dev, id):", "body": "if id < or id > :raise ValueError(\"\")com, code, ok = io.send_packet(CMDTYPE.SETID, , dev, self.baudrate, , id)if not ok:raise_error(code)", "docstring": "Set device ID to new value.\n\n :param str dev: Serial device address/path\n :param id: Device ID to set", "id": "f2090:c0:m4"} {"signature": "def get_device_id(self, dev):", "body": "com, code, ok = io.send_packet(CMDTYPE.GETID, , dev, self.baudrate, )if code is None:self.error(action='')return code", "docstring": "Get device ID at given address/path.\n\n :param str dev: Serial device address/path\n :param baudrate: Baudrate to use when connecting (optional)", "id": "f2090:c0:m5"} {"signature": "def __init__(self, *args, address, pixel_positions=None, **kwds):", "body": "super().__init__(*args, **kwds)self.address = addressself.server = self.thread = Noneself.pixel_positions = pixel_positionsif pixel_positions:self.set_pixel_positions(pixel_positions)", "docstring": "Args:\n port: the port on which the server is running.\n pixel_positions: the positions of the LEDs in 3-d space.\n **kwds: keywords passed to DriverBase.", "id": "f2098:c0:m0"} {"signature": "@staticmethoddef main():", "body": "if not _curses:if os.name == '':raise ValueError('')raise ValueError('')try:driver = next(iter(Curses.DRIVERS))except:raise ValueError('')_curses.wrapper(driver.run_in_curses)", "docstring": "If a project has a Curses driver, the section \"main\" in the section\n\"run\" must be \"bibliopixel.drivers.curses.Curses.main\".", "id": "f2101:c0:m3"} {"signature": "def __init__(self, num=, columns=, max_colors=, **kwds):", "body": "super().__init__(num)self.columns = columnsself.max_colors = max_colors", "docstring": "Args\n delay: time to wait in seconds to simulate actual hardware\n interface time", "id": "f2102:c0:m0"} {"signature": "def __init__(self, num=, delay=, **kwds):", "body": "super().__init__(num)self._kwds = kwdsself._delay = delay", "docstring": "Args\n delay: time to wait in seconds to simulate actual hardware\n interface time", "id": "f2103:c0:m0"} {"signature": "def send_packet(self, data):", "body": "raise NotImplementedError", "docstring": "SHOULD BE PRIVATE", "id": "f2108:c0:m1"} {"signature": "def compute_packet(self, data):", "body": "return data", "docstring": "SHOULD BE PRIVATE", "id": "f2108:c0:m2"} {"signature": "def error(self, text):", "body": "msg = ''.format(self._dev, self._spi_speed, text)log.error(msg)raise IOError(msg)", "docstring": "SHOULD BE PRIVATE", "id": "f2108:c0:m3"} {"signature": "def send_packet(self, data):", "body": "package_size = for i in range(int(math.ceil(len(data) / package_size))):start = i * package_sizeend = (i + ) * package_sizeself._spi.write(data[start:end])self._spi.flush()", "docstring": "SHOULD BE PRIVATE", "id": "f2108:c1:m1"} {"signature": "def send_packet(self, data):", "body": "package_size = for i in range(int(math.ceil(len(data) / package_size))):start = i * package_sizeend = (i + ) * package_sizeself._spi.transfer(data[start:end])", "docstring": "SHOULD BE PRIVATE", "id": "f2108:c2:m1"} {"signature": "def send_packet(self, data):", "body": "self._spi.xfer2(list(data))", "docstring": "SHOULD BE PRIVATE", "id": "f2108:c3:m1"} {"signature": "def send_packet(self, data):", "body": "pass", "docstring": "do nothing", "id": "f2108:c4:m0"} {"signature": "def SPI(ledtype=None, num=, **kwargs):", "body": "from ...project.types.ledtype import makeif ledtype is None:raise ValueError('')ledtype = make(ledtype)if num == :raise ValueError('')if ledtype not in SPI_DRIVERS.keys():raise ValueError(''.format(ledtype))return SPI_DRIVERS[ledtype](num, **kwargs)", "docstring": "Wrapper function for using SPI device drivers on systems like the\n Raspberry Pi and BeagleBone. This allows using any of the SPI drivers\n from a single entry point instead importing the driver for a specific\n LED type.\n\n Provides the same parameters of\n :py:class:`bibliopixel.drivers.SPI.SPIBase` as\n well as those below:\n\n :param ledtype: One of: LPD8806, WS2801, WS281X, or APA102", "id": "f2112:m0"} {"signature": "def set_device_brightness(self, val):", "body": "self._chipset_brightness = (val >> )self._brightness_list = [ + self._chipset_brightness] * self.numLEDsself._packet[self._start_frame:self._pixel_stop:] = (self._brightness_list)", "docstring": "APA102 & SK9822 support on-chip brightness control, allowing greater\ncolor depth.\n\nAPA102 superimposes a 440Hz PWM on the 19kHz base PWM to control\nbrightness. SK9822 uses a base 4.7kHz PWM but controls brightness with a\nvariable current source.\n\nBecause of this SK9822 will have much less flicker at lower levels.\nEither way, this option is better and faster than scaling in\nBiblioPixel.", "id": "f2113:c0:m1"} {"signature": "def __init__(self, num=, port=, **kwds):", "body": "super().__init__(num, address=port, **kwds)", "docstring": "Args:\n num: number of LEDs being visualizer.\n port: the port on which the SimPixel server is running.\n pixel_positions: the positions of the LEDs in 3-d space.\n **kwds: keywords passed to DriverBase.", "id": "f2117:c1:m1"} {"signature": "def handleMessage(self):", "body": "pass", "docstring": "Called when websocket frame is received.\nTo access the frame data call self.data.\n\nIf the frame is Text then self.data is a unicode object.\nIf the frame is Binary then self.data is a bytearray object.", "id": "f2119:c1:m1"} {"signature": "def handleConnected(self):", "body": "pass", "docstring": "Called when a websocket client connects to the server.", "id": "f2119:c1:m2"} {"signature": "def handleClose(self):", "body": "pass", "docstring": "Called when a websocket server gets a Close frame from a client.", "id": "f2119:c1:m3"} {"signature": "def close(self, status=, reason=u''):", "body": "try:if self.closed is False:close_msg = bytearray()close_msg.extend(struct.pack(\"\", status))if _check_unicode(reason):close_msg.extend(reason.encode(''))else:close_msg.extend(reason)self._sendMessage(False, CLOSE, close_msg)finally:self.closed = True", "docstring": "Send Close frame to the client. The underlying socket is only closed\nwhen the client acknowledges the Close frame.\n\nstatus is the closing identifier.\nreason is the reason for the close.", "id": "f2119:c1:m6"} {"signature": "def sendFragmentStart(self, data):", "body": "opcode = BINARYif _check_unicode(data):opcode = TEXTself._sendMessage(True, opcode, data)", "docstring": "Send the start of a data fragment stream to a websocket client.\nSubsequent data should be sent using sendFragment().\nA fragment stream is completed when sendFragmentEnd() is called.\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f2119:c1:m8"} {"signature": "def sendFragment(self, data):", "body": "self._sendMessage(True, STREAM, data)", "docstring": "see sendFragmentStart()\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f2119:c1:m9"} {"signature": "def sendFragmentEnd(self, data):", "body": "self._sendMessage(False, STREAM, data)", "docstring": "see sendFragmentEnd()\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f2119:c1:m10"} {"signature": "def sendMessage(self, data):", "body": "opcode = BINARYif _check_unicode(data):opcode = TEXTself._sendMessage(False, opcode, data)", "docstring": "Send websocket data frame to the client.\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f2119:c1:m11"} {"signature": "def set_device_brightness(self, brightness):", "body": "packet = util.generate_header(CMDTYPE.BRIGHTNESS, )packet.append(self._brightness)s = self._connect()s.sendall(packet)resp = ord(s.recv())return resp == RETURN_CODES.SUCCESS", "docstring": "Hardware specific method to set the global brightness for\n this driver's output. This method is required to be implemented,\n however, users should call\n :py:meth:`.driver_base.DriverBase.set_brightness`\n instead of calling this method directly.\n\n :param int brightness: 0-255 value representing the desired\n brightness level", "id": "f2122:c1:m4"} {"signature": "def __init__(self, *args, ip_address='', port=artnet_message.UDP_PORT,filter_dupes=True, offset=, **kwds):", "body": "super().__init__(*args, address=(ip_address, port), **kwds)self.filter_dupes = filter_dupesself.offset = offset_range.DMXChannel(offset)self.msg = artnet_message.dmx_message()self.last_message = None", "docstring": ":param dict channel_map: maps DMX channels to positions in\n the color_list\n:param int offset: a DMX channel offset, positive, negative or zero", "id": "f2124:c0:m0"} {"signature": "@classmethoddef construct(cls, project, **desc):", "body": "return cls(maker=project.maker, **desc)", "docstring": "Construct a driver from a project and a description.", "id": "f2125:c0:m0"} {"signature": "def set_pixel_positions(self, pixel_positions):", "body": "pass", "docstring": "Internal Use Only\n\nPlaceholder callback for sending physical pixel layout data to the\n``SimPixel`` driver.", "id": "f2125:c0:m2"} {"signature": "def set_colors(self, colors, pos):", "body": "self._colors = colorsself._pos = posend = self._pos + self.numLEDsif end > len(self._colors):raise ValueError('' % (end, len(self._colors)))", "docstring": "Use with caution!\n\nDirectly set the pixel buffers.\n\n:param colors: A list of color tuples\n:param int pos: Position in color list to begin set operation.", "id": "f2125:c0:m3"} {"signature": "def start(self):", "body": "", "docstring": "Called right before this driver will run. This is the place\nto do things like start threads, not in the constructor.", "id": "f2125:c0:m5"} {"signature": "def stop(self):", "body": "", "docstring": "Called to request any threads or resources to shut down.", "id": "f2125:c0:m6"} {"signature": "def cleanup(self):", "body": "", "docstring": "Called to shut this driver down, and stop all threads and processes.", "id": "f2125:c0:m7"} {"signature": "def join(self, timeout=None):", "body": "", "docstring": "Called to join threads.", "id": "f2125:c0:m8"} {"signature": "def bufByteCount(self):", "body": "return * self.numLEDs", "docstring": "Total number of bytes that the pixel buffer represents.\nMainly used for drivers such as :py:mod:`bibliopixel.drivers.serial`\nand :py:mod:`.network`", "id": "f2125:c0:m9"} {"signature": "def sync(self):", "body": "", "docstring": "The sync() method is called after the entire frame has been\nsent to the device to indicate that it may now be displayed.\n\nThis is particularly useful when there are multiple drivers comprising\none display which all need to display the next frame at exactly the same\ntime.", "id": "f2125:c0:m10"} {"signature": "def _compute_packet(self):", "body": "", "docstring": "Compute the packet from the colors and position.\n\n Eventually, this will run on the compute thread.", "id": "f2125:c0:m11"} {"signature": "def _send_packet(self):", "body": "", "docstring": "Send the packet to the driver.\n\n Eventually, this will run on an I/O thread.", "id": "f2125:c0:m12"} {"signature": "def update_colors(self):", "body": "start = self.clock.time()with self.brightness_lock:brightness, self._waiting_brightness = (self._waiting_brightness, None)if brightness is not None:self._brightness = brightnessif self.set_device_brightness:self.set_device_brightness(brightness)self._compute_packet()self._send_packet()self.lastUpdate = self.clock.time() - start", "docstring": "Apply any corrections to the current color list\n and send the results to the driver output. This function primarily\n provided as a wrapper for each driver's implementation of\n :py:func:`_compute_packet` and :py:func:`_send_packet`.", "id": "f2125:c0:m13"} {"signature": "def set_brightness(self, brightness):", "body": "with self.brightness_lock:self._waiting_brightness = brightness", "docstring": "Set the global brightness for this driver's output.\n\n :param int brightness: 0-255 value representing the desired\n brightness level", "id": "f2125:c0:m14"} {"signature": "def _render(self):", "body": "if self.set_device_brightness:level = else:level = self._brightness / gam, (r, g, b) = self.gamma.get, self.c_orderfor i in range(min(self.numLEDs, len(self._buf) / )):c = [int(level * x) for x in self._colors[i + self._pos]]self._buf[i * :(i + ) * ] = gam(c[r]), gam(c[g]), gam(c[b])", "docstring": "Typically called from :py:func:`_compute_packet` this applies\n brightness and gamma correction to the pixels controlled by this\n driver.", "id": "f2125:c0:m15"} {"signature": "def import_all(root, project_name, blacklist):", "body": "successes, failures = [], []for name in _all_imports(root, project_name):if name not in blacklist:try:importlib.import_module(name)except:failures.append((name, traceback.format_exc()))else:successes.append(name)return successes, failures", "docstring": "Import all files and directories", "id": "f2171:m2"} {"signature": "def text_at(self, x, y):", "body": "return '' if any(self.matrix.get(x, y)) else ''", "docstring": "Return text for a given pixel", "id": "f2179:c0:m0"} {"signature": "def parse_json_form(dictionary, prefix=''):", "body": "output = {}for name, value in get_all_items(dictionary):steps = parse_json_path(name)context = outputfor step in steps:current_value = get_value(context, step.key, Undefined())context = set_json_value(context=context,step=step,current_value=current_value,entry_value=value,is_file=False,)output = clean_undefined(output)output = clean_empty_string(output)result = get_value(output, prefix, Undefined())if isinstance(result, Undefined):return outputelse:return result", "docstring": "Parse an HTML JSON form submission as per the W3C Draft spec\nAn implementation of \"The application/json encoding algorithm\"\nhttp://www.w3.org/TR/html-json-forms/", "id": "f2250:m0"} {"signature": "def parse_json_path(path):", "body": "original_path = pathsteps = []failed = [JsonStep(type=\"\",key=original_path,last=True,failed=True,)]digit_re = re.compile(r'')key_re = re.compile(r'')parts = path.split(\"\")first_key = parts[]if parts[:]:path = \"\" + \"\".join(parts[:])else:path = \"\"steps.append(JsonStep(type=\"\",key=first_key,))if not path:steps[-].last = Truereturn stepswhile path:if path[:] == \"\":path = path[:]steps.append(JsonStep(type=\"\",key=,))continuedigit_match = digit_re.match(path)if digit_match:path = digit_re.sub(\"\", path)steps.append(JsonStep(type=\"\",key=int(digit_match.group()),))continuekey_match = key_re.match(path)if key_match:path = key_re.sub(\"\", path)steps.append(JsonStep(type=\"\",key=key_match.group(),))continuereturn failednext_step = Nonefor step in reversed(steps):if next_step:step.next_type = next_step.typeelse:step.last = Truenext_step = stepreturn steps", "docstring": "Parse a string as a JSON path\nAn implementation of \"steps to parse a JSON encoding path\"\nhttp://www.w3.org/TR/html-json-forms/#dfn-steps-to-parse-a-json-encoding-path", "id": "f2250:m1"} {"signature": "def set_json_value(context, step, current_value, entry_value, is_file):", "body": "if isinstance(context, list) and isinstance(step.key, int):undefined_count = step.key - len(context) + if undefined_count > :raise ParseException(\"\")elif undefined_count > :context += [Undefined()] * undefined_countif step.last:if isinstance(current_value, Undefined):key = step.keyif isinstance(context, dict) and isinstance(key, int):key = str(key)if step.append:context[key] = [entry_value]else:context[key] = entry_valueelif isinstance(current_value, list):context[step.key].append(entry_value)elif isinstance(current_value, dict) and not is_file:return set_json_value(context=current_value,step=JsonStep(type=\"\", key=\"\", last=True),current_value=current_value.get(\"\", Undefined()),entry_value=entry_value,is_file=is_file,)else:context[step.key] = [current_value, entry_value]return contextif isinstance(current_value, Undefined):if step.next_type == \"\":context[step.key] = []else:context[step.key] = {}return context[step.key]elif isinstance(current_value, dict):return get_value(context, step.key, Undefined())elif isinstance(current_value, list):if step.next_type == \"\":return current_valueobj = {}for i, item in enumerate(current_value):if not isinstance(item, Undefined):obj[str(i)] = itemcontext[step.key] = objreturn objelse:obj = {'': current_value}context[step.key] = objreturn obj", "docstring": "Apply a JSON value to a context object\nAn implementation of \"steps to set a JSON encoding value\"\nhttp://www.w3.org/TR/html-json-forms/#dfn-steps-to-set-a-json-encoding-value", "id": "f2250:m2"} {"signature": "def get_value(obj, key, default=None):", "body": "if isinstance(obj, dict):return obj.get(key, default)elif isinstance(obj, list):try:return obj[key]except IndexError:return default", "docstring": "Mimic JavaScript Object/Array behavior by allowing access to nonexistent\nindexes.", "id": "f2250:m3"} {"signature": "def clean_undefined(obj):", "body": "if isinstance(obj, list):return [None if isinstance(item, Undefined) else itemfor item in obj]if isinstance(obj, dict):for key in obj:obj[key] = clean_undefined(obj[key])return obj", "docstring": "Convert Undefined array entries to None (null)", "id": "f2250:m4"} {"signature": "def clean_empty_string(obj):", "body": "if obj == '':return Noneif isinstance(obj, list):return [None if item == '' else itemfor item in obj]if isinstance(obj, dict):for key in obj:obj[key] = clean_empty_string(obj[key])return obj", "docstring": "Replace empty form values with None, since the is_html_input() check in\nField won't work after we convert to JSON.\n(FIXME: What about allow_blank=True?)", "id": "f2250:m5"} {"signature": "def get_all_items(obj):", "body": "if hasattr(obj, ''):items = []for key in obj:for value in obj.getlist(key):items.append((key, value))return itemselse:return obj.items()", "docstring": "dict.items() but with a separate row for each value in a MultiValueDict", "id": "f2250:m6"} {"signature": "def register_type(item_type, item_creator):", "body": "Pipe.pipe_item_types[item_type] = item_creator", "docstring": "Register data type to Pipe class. Check :py:meth:`Pipe.__or__` and\n :py:meth:`Pipe.__ror__` for detail.\n\n :param item_type: The type of data object which used in pipe cascading.\n :param item_creator: A function to convert data to Pipe object.", "id": "f2258:m0"} {"signature": "def unregister_type(item_type):", "body": "if item_type not in Pipe.pipe_item_types:returndel Pipe.pipe_item_types[item_type]", "docstring": "Unregister data type from Pipe class. Check Pipe.__or__ and Pipe.__ror__ for\n detail.\n\n :param item_type: The type of data object which used in pipe cascading.", "id": "f2258:m1"} {"signature": "def unregister_all_types():", "body": "Pipe.pipe_item_types.clear()", "docstring": "Unregister all data types from Pipe class.", "id": "f2258:m2"} {"signature": "def has_registered_type(item_type):", "body": "return item_type in Pipe.pipe_item_types", "docstring": "Check if item_type is registered or not.\n\n :param item_type: The type to be checked.\n :returns: True: The item_type is registered. False: The item_type is not registered.\n :rtype: bool", "id": "f2258:m3"} {"signature": "def get_item_creator(item_type):", "body": "if item_type not in Pipe.pipe_item_types:for registered_type in Pipe.pipe_item_types:if issubclass(item_type, registered_type):return Pipe.pipe_item_types[registered_type]return Noneelse:return Pipe.pipe_item_types[item_type]", "docstring": "Get item creator according registered item type.\n\n :param item_type: The type of item to be checed.\n :type item_type: types.TypeType.\n :returns: Creator function. None if type not found.", "id": "f2258:m4"} {"signature": "def __init__(self, func, *args, **kw):", "body": "self.__name__ = func.__name__self.__doc__ = func.__doc__self.func = funcself.next = Noneself.chained = Falseself.args = argsself.kw = kw", "docstring": "Constructor of Pipe. It takes first argument as a generator function.\n args and kw are default arguments to be used if the Pipe object is\n cascaded directly. The default arguments are replaced by the arguments of\n __call__ operator.\n\n :param self: self reference.\n :param func: The generator function to be be wrapped.\n :param args: The default arguments to be used for generator function.\n :param kw: The default keyword arguments to be used for generator function.", "id": "f2258:c1:m0"} {"signature": "def __or__(self, next):", "body": "if not isinstance(next, Pipe):item_creator = get_item_creator(type(next))if item_creator is None:raise UnregisteredPipeType(type(next))next = item_creator(next)clone = self.clone()if not next.chained:clone.append(next)else:clone.append(next(*next.args, **next.kw))return clone", "docstring": "Set operand of right-hand side to be next Pipe object. Type convertion\n will be applied automatically if next is not a Pipe object and its type\n is registered in Pipe.pipe_item_types. Otherwise, UnregisteredPipeType\n will be raised.\n\n :param next: The next Pipe object to be cascaded.\n :type next: Pipe object or any object whose type is registered.\n\n :returns: The clone of self.", "id": "f2258:c1:m1"} {"signature": "def __ror__(self, prev):", "body": "if not isinstance(prev, Pipe):item_creator = get_item_creator(type(prev))if item_creator is None:raise UnregisteredPipeType(type(prev))prev = item_creator(prev)return prev.__or__(self)", "docstring": "Set operand of left-hand side to be previous Pipe object. Type\n convertion will be applied automatically if prev is not a Pipe object\n and its type is registered in Pipe.pipe_item_types. Otherwise,\n UnregisteredPipeType will be raised.\n\n :param prev: The previous Pipe object which used to cascade this object.\n :type prev: Pipe object or any object whose type is registered.\n\n :returns: previous Pipe object.", "id": "f2258:c1:m2"} {"signature": "def __call__(self, *args,**kw):", "body": "clone = self.clone()clone.chained = Falseclone.args = args;clone.kw = kwreturn clone", "docstring": "A Pipe object to be called means self-cloning with new default\n arguments.\n\n :param args: The default arguments to be used for generator function.\n :param kw: The default keyword arguments to be used for generator function.\n\n :returns: The clone of self.", "id": "f2258:c1:m3"} {"signature": "def clone(self):", "body": "new_object = copy.copy(self)if new_object.next:new_object.next = new_object.next.clone()return new_object", "docstring": "Self-cloning. All its next Pipe objects are cloned too.\n\n :returns: cloned object", "id": "f2258:c1:m4"} {"signature": "def append(self, next):", "body": "next.chained = Trueif self.next:self.next.append(next)else:self.next = next", "docstring": "Append next object to pipe tail.\n\n :param next: The Pipe object to be appended to tail.\n :type next: Pipe object.", "id": "f2258:c1:m5"} {"signature": "def __iter__(self):", "body": "return self.iter()", "docstring": "Make iterator.\n\n :returns: iterator object.", "id": "f2258:c1:m6"} {"signature": "def iter(self, prev=None):", "body": "if self.next:generator = self.next.iter(self.func(prev, *self.args, **self.kw))else:generator = self.func(prev, *self.args, **self.kw)return generator", "docstring": "Return an generator as iterator object.\n\n :param prev: Previous Pipe object which used for data input.\n :returns: A generator for iteration.", "id": "f2258:c1:m7"} {"signature": "def run(self):", "body": "last_data = Nonefor last_data in self.iter():passreturn last_data", "docstring": "Execute the cascading pipe and return the last data processed by\n pipes.\n\n :returns: The last processed data.", "id": "f2258:c1:m8"} {"signature": "def result(self):", "body": "return list(self.iter())", "docstring": "Execute the cascading pipe and return a list which contains all\n processed data.\n\n :returns: The list of processed data.\n :rtype: list", "id": "f2258:c1:m9"} {"signature": "@staticmethoddef func(generator):", "body": "return Pipe(generator)", "docstring": "Wrap a generator function to Pipe object.\n\n :param generator: The generator function to be wrapped.\n :type generator: generator\n :returns: Pipe object", "id": "f2258:c2:m0"} {"signature": "@staticmethoddef map(func):", "body": "def wrapper(prev, *argv, **kw):if prev is None:raise TypeError('')for i in prev:yield func(i, *argv, **kw)return Pipe(wrapper)", "docstring": "Wrap a map function to Pipe object. Map function is a function with\n at least one argument. It is used to convert data. The first argument\n is the data to be converted. The return data from map function will\n be sent to next generator.\n\n :param func: The map function to be wrapped.\n :type func: function object\n :param args: The default arguments to be used for map function.\n :param kw: The default keyword arguments to be used for map function.\n :returns: Pipe object", "id": "f2258:c2:m1"} {"signature": "@staticmethoddef filter(func):", "body": "def wrapper(prev, *argv, **kw):if prev is None:raise TypeError('')for i in prev:if func(i, *argv, **kw):yield ireturn Pipe(wrapper)", "docstring": "Wrap a filter function to Pipe object. Filter function is a function\n with at least one argument. It is used to determine if the data can pass.\n The first argument is the data to be converted. The return data from\n filter function should be a boolean value. If true, data can pass.\n Otherwise, data is omitted.\n\n :param func: The filter function to be wrapped.\n :type func: function object\n :param args: The default arguments to be used for filter function.\n :param kw: The default keyword arguments to be used for filter function.\n :returns: Pipe object", "id": "f2258:c2:m2"} {"signature": "@staticmethoddef reduce(func):", "body": "def wrapper(prev, *argv, **kw):accum_value = None if '' not in kw else kw.pop('')if prev is None:raise TypeError('')for i in prev:accum_value = func(accum_value, i, *argv, **kw)yield accum_valuereturn Pipe(wrapper)", "docstring": "Wrap a reduce function to Pipe object. Reduce function is a function\n with at least two arguments. It works like built-in reduce function.\n It takes first argument for accumulated result, second argument for\n the new data to process. A keyword-based argument named 'init' is\n optional. If init is provided, it is used for the initial value of\n accumulated result. Or, the initial value is None.\n\n The first argument is the data to be converted. The return data from\n filter function should be a boolean value. If true, data can pass.\n Otherwise, data is omitted.\n\n :param func: The filter function to be wrapped.\n :type func: function object\n :param args: The default arguments to be used for filter function.\n :param kw: The default keyword arguments to be used for filter function.\n :returns: Pipe object", "id": "f2258:c2:m3"} {"signature": "@staticmethoddef stopper(func):", "body": "def wrapper(prev, *argv, **kw):if prev is None:raise TypeError('')for i in prev:if func(i, *argv, **kw):breakyield ireturn Pipe(wrapper)", "docstring": "Wrap a conditoinal function(stopper function) to Pipe object.\n\n wrapped function should return boolean value. The cascading pipe will\n stop the execution if wrapped function return True.\n\n Stopper is useful if you have unlimited number of input data.\n\n :param func: The conditoinal function to be wrapped.\n :type func: function object\n :param args: The default arguments to be used for wrapped function.\n :param kw: The default keyword arguments to be used for wrapped function.\n :returns: Pipe object", "id": "f2258:c2:m4"} {"signature": "def run(cmd):", "body": "return cmd.run()", "docstring": "Run pipe object and return its last result.\n\n :param cmd: The Pipe object to be executed.\n :type cmd: Pipe\n :returns: The last result.\n\n .. seealso::\n :py:meth:`cmdlet.Pipe.run`", "id": "f2259:m0"} {"signature": "def result(cmd):", "body": "return cmd.result()", "docstring": "Run pipe object and return its result in a list.\n\n :param cmd: The Pipe object to be executed.\n :type cmd: Pipe\n :returns: The list which contains the result of pipe.\n\n .. seealso::\n :py:meth:`cmdlet.Pipe.result`", "id": "f2259:m1"} {"signature": "@pipe.funcdef seq(prev, sequence):", "body": "for i in sequence:yield i", "docstring": "Pipe wrapper for any iterable object.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param sequence: The iterable object to be wrapped.\n :type sequence: iterator\n :returns: generator", "id": "f2259:m2"} {"signature": "@pipe.funcdef items(prev, dict_obj):", "body": "for kv in dict_obj.items():yield kv", "docstring": "Pipe wrapper for any dict object.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param dict_obj: The dict object to be wrapped.\n :type dict_obj: dict\n :returns: generator", "id": "f2259:m3"} {"signature": "@pipe.funcdef attr(prev, attr_name):", "body": "for obj in prev:if hasattr(obj, attr_name):yield getattr(obj, attr_name)", "docstring": "attr pipe can extract attribute value of object.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param attr_name: The name of attribute\n :type attr_name: str\n :returns: generator", "id": "f2259:m4"} {"signature": "@pipe.funcdef attrs(prev, attr_names):", "body": "for obj in prev:attr_values = []for name in attr_names:if hasattr(obj, name):attr_values.append(getattr(obj, name))yield attr_values", "docstring": "attrs pipe can extract attribute values of object.\n\n If attr_names is a list and its item is not a valid attribute of\n prev's object. It will be excluded from yielded dict.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param attr_names: The list of attribute names\n :type attr_names: str of list\n :returns: generator", "id": "f2259:m5"} {"signature": "@pipe.funcdef attrdict(prev, attr_names):", "body": "if isinstance(attr_names, dict):for obj in prev:attr_values = dict()for name in attr_names.keys():if hasattr(obj, name):attr_values[name] = getattr(obj, name)else:attr_values[name] = attr_names[name]yield attr_valueselse:for obj in prev:attr_values = dict()for name in attr_names:if hasattr(obj, name):attr_values[name] = getattr(obj, name)yield attr_values", "docstring": "attrdict pipe can extract attribute values of object into a dict.\n\n The argument attr_names can be a list or a dict.\n\n If attr_names is a list and its item is not a valid attribute of\n prev's object. It will be excluded from yielded dict.\n\n If attr_names is dict and the key doesn't exist in prev's object.\n the value of corresponding attr_names key will be copy to yielded dict.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param attr_names: The list or dict of attribute names\n :type attr_names: str of list or dict\n :returns: generator", "id": "f2259:m6"} {"signature": "@pipe.funcdef flatten(prev, depth=sys.maxsize):", "body": "def inner_flatten(iterable, curr_level, max_levels):for i in iterable:if hasattr(i, '') and curr_level < max_levels:for j in inner_flatten(i, curr_level + , max_levels):yield jelse:yield ifor d in prev:if hasattr(d, '') and depth > :for inner_d in inner_flatten(d, , depth):yield inner_delse:yield d", "docstring": "flatten pipe extracts nested item from previous pipe.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param depth: The deepest nested level to be extracted. 0 means no extraction.\n :type depth: integer\n :returns: generator", "id": "f2259:m7"} {"signature": "@pipe.funcdef values(prev, *keys, **kw):", "body": "d = next(prev)if isinstance(d, dict):yield [d[k] for k in keys if k in d]for d in prev:yield [d[k] for k in keys if k in d]else:yield [d[i] for i in keys if <= i < len(d)]for d in prev:yield [d[i] for i in keys if <= i < len(d)]", "docstring": "values pipe extract value from previous pipe.\n\n If previous pipe send a dictionary to values pipe, keys should contains\n the key of dictionary which you want to get. If previous pipe send list or\n tuple,\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :returns: generator", "id": "f2259:m8"} {"signature": "@pipe.funcdef counter(prev):", "body": "count = for data in prev:count += yield count", "docstring": "counter pipe count how many data pass from previous pipe.\n\n This pipe will dropped all received data and return counting value after\n last data.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param dict_obj: The dict object to be wrapped.\n :type dict_obj: dict\n :returns: generator", "id": "f2259:m9"} {"signature": "@pipe.funcdef enum(prev, start=):", "body": "for data in enumerate(prev, start):yield data", "docstring": "enum pipe wrap the built-in function *enumerate*. It passes a tuple\n to next pipe. The tuple contains a count(from start which defaults to 0)\n and the values passed from previous pipe.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param start: The start value of enumeration.\n :type start: integer\n :returns: generator", "id": "f2259:m10"} {"signature": "@pipe.funcdef pack(prev, n, rest=False, **kw):", "body": "if '' in kw:use_padding = Truepadding = kw['']else:use_padding = Falsepadding = Noneitems = []for i, data in enumerate(prev, ):items.append(data)if (i % n) == :yield itemsitems = []if len(items) != and rest:if use_padding:items.extend([padding, ] * (n - (i % n)))yield items", "docstring": "pack pipe takes n elements from previous generator and yield one\n list to next.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param rest: Set True to allow to output the rest part of last elements.\n :type prev: boolean\n :param padding: Specify the padding element for the rest part of last elements.\n :type prev: boolean\n :returns: generator\n\n :Example:\n >>> result([1,2,3,4,5,6,7] | pack(3))\n [[1, 2, 3], [4, 5, 6]]\n\n >>> result([1,2,3,4,5,6,7] | pack(3, rest=True))\n [[1, 2, 3], [4, 5, 6], [7,]]\n\n >>> result([1,2,3,4,5,6,7] | pack(3, padding=None))\n [[1, 2, 3], [4, 5, 6], [7, None, None]]", "id": "f2259:m11"} {"signature": "@pipe.funcdef format(prev, format_string):", "body": "for i in prev:yield (format_string % i)", "docstring": "The pipe formats the data passed from previous generator according to\n given format_string argument.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param format_string: format string which used to format the data from\n previous iterator.\n :type sequence: str\n :returns: generator", "id": "f2259:m12"} {"signature": "@pipe.funcdef grep(prev, pattern, *args, **kw):", "body": "inv = False if '' not in kw else kw.pop('')pattern_obj = re.compile(pattern, *args, **kw)for data in prev:if bool(inv) ^ bool(pattern_obj.match(data)):yield data", "docstring": "The pipe greps the data passed from previous generator according to\n given regular expression.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param pattern: The pattern which used to filter out data.\n :type pattern: str|unicode|re pattern object\n :param inv: If true, invert the match condition.\n :type inv: boolean\n :param kw:\n :type kw: dict\n :returns: generator", "id": "f2259:m13"} {"signature": "@pipe.funcdef match(prev, pattern, *args, **kw):", "body": "to = '' in kw and kw.pop('')pattern_obj = re.compile(pattern, *args, **kw)if to is dict:for data in prev:match = pattern_obj.match(data)if match is not None:yield match.groupdict()elif to is tuple:for data in prev:match = pattern_obj.match(data)if match is not None:yield match.groups()elif to is list:for data in prev:match = pattern_obj.match(data)if match is not None:yield list(match.groups())else:for data in prev:match = pattern_obj.match(data)if match is not None:yield match", "docstring": "The pipe greps the data passed from previous generator according to\n given regular expression. The data passed to next pipe is MatchObject\n , dict or tuple which determined by 'to' in keyword argument.\n\n By default, match pipe yields MatchObject. Use 'to' in keyword argument\n to change the type of match result.\n\n If 'to' is dict, yield MatchObject.groupdict().\n If 'to' is tuple, yield MatchObject.groups().\n If 'to' is list, yield list(MatchObject.groups()).\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param pattern: The pattern which used to filter data.\n :type pattern: str|unicode\n :param to: What data type the result should be stored. dict|tuple|list\n :type to: type\n :returns: generator", "id": "f2259:m14"} {"signature": "@pipe.funcdef resplit(prev, pattern, *args, **kw):", "body": "maxsplit = if '' not in kw else kw.pop('')pattern_obj = re.compile(pattern, *args, **kw)for s in prev:yield pattern_obj.split(s, maxsplit=maxsplit)", "docstring": "The resplit pipe split previous pipe input by regular expression.\n\n Use 'maxsplit' keyword argument to limit the number of split.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param pattern: The pattern which used to split string.\n :type pattern: str|unicode", "id": "f2259:m15"} {"signature": "@pipe.funcdef sub(prev, pattern, repl, *args, **kw):", "body": "count = if '' not in kw else kw.pop('')pattern_obj = re.compile(pattern, *args, **kw)for s in prev:yield pattern_obj.sub(repl, s, count=count)", "docstring": "sub pipe is a wrapper of re.sub method.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param pattern: The pattern string.\n :type pattern: str|unicode\n :param repl: Check repl argument in re.sub method.\n :type repl: str|unicode|callable", "id": "f2259:m16"} {"signature": "@pipe.funcdef subn(prev, pattern, repl, *args, **kw):", "body": "count = if '' not in kw else kw.pop('')pattern_obj = re.compile(pattern, *args, **kw)for s in prev:yield pattern_obj.subn(repl, s, count=count)", "docstring": "subn pipe is a wrapper of re.subn method.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param pattern: The pattern string.\n :type pattern: str|unicode\n :param repl: Check repl argument in re.sub method.\n :type repl: str|unicode|callable", "id": "f2259:m17"} {"signature": "@pipe.funcdef wildcard(prev, pattern, *args, **kw):", "body": "import fnmatchinv = '' in kw and kw.pop('')pattern_obj = re.compile(fnmatch.translate(pattern), *args, **kw)if not inv:for data in prev:if pattern_obj.match(data):yield dataelse:for data in prev:if not pattern_obj.match(data):yield data", "docstring": "wildcard pipe greps data passed from previous generator\n according to given regular expression.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param pattern: The wildcard string which used to filter data.\n :type pattern: str|unicode|re pattern object\n :param inv: If true, invert the match condition.\n :type inv: boolean\n :returns: generator", "id": "f2259:m18"} {"signature": "@pipe.funcdef stdout(prev, endl='', thru=False):", "body": "for i in prev:sys.stdout.write(str(i) + endl)if thru:yield i", "docstring": "This pipe read data from previous iterator and write it to stdout.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param endl: The end-of-line symbol for each output.\n :type endl: str\n :param thru: If true, data will passed to next generator. If false, data\n will be dropped.\n :type thru: bool\n :returns: generator", "id": "f2259:m19"} {"signature": "@pipe.funcdef stderr(prev, endl='', thru=False):", "body": "for i in prev:sys.stderr.write(str(i) + endl)if thru:yield i", "docstring": "This pipe read data from previous iterator and write it to stderr.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param endl: The end-of-line symbol for each output.\n :type endl: str\n :param thru: If true, data will passed to next generator. If false, data\n will be dropped.\n :type thru: bool\n :returns: generator", "id": "f2259:m20"} {"signature": "@pipe.funcdef readline(prev, filename=None, mode='', trim=str.rstrip, start=, end=sys.maxsize):", "body": "if prev is None:if filename is None:raise Exception('')elif is_str_type(filename):file_list = [filename, ]else:file_list = filenameelse:file_list = prevfor fn in file_list:if isinstance(fn, file_type):fd = fnelse:fd = open(fn, mode)try:if start <= and end == sys.maxsize:for line in fd:yield trim(line)else:for line_no, line in enumerate(fd, ):if line_no < start:continueyield trim(line)if line_no >= end:breakfinally:if fd != fn:fd.close()", "docstring": "This pipe get filenames or file object from previous pipe and read the\n content of file. Then, send the content of file line by line to next pipe.\n\n The start and end parameters are used to limit the range of reading from file.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param filename: The files to be read. If None, use previous pipe input as filenames.\n :type filename: None|str|unicode|list|tuple\n :param mode: The mode to open file. default is 'r'\n :type mode: str\n :param trim: The function to trim the line before send to next pipe.\n :type trim: function object.\n :param start: if star is specified, only line number larger or equal to start will be sent.\n :type start: integer\n :param end: The last line number to read.\n :type end: integer\n :returns: generator", "id": "f2259:m21"} {"signature": "@pipe.funcdef fileobj(prev, file_handle, endl='', thru=False):", "body": "if prev is not None:for i in prev:file_handle.write(str(i)+endl)if thru:yield ielse:for data in file_handle:yield data", "docstring": "This pipe read/write data from/to file object which specified by\n file_handle.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param file_handle: The file object to read or write.\n :type file_handle: file object\n :param endl: The end-of-line symbol for each output.\n :type endl: str\n :param thru: If true, data will passed to next generator. If false, data\n will be dropped.\n :type thru: bool\n :returns: generator", "id": "f2259:m22"} {"signature": "@pipe.funcdef sh(prev, *args, **kw):", "body": "endl = '' if '' not in kw else kw.pop('')trim = None if '' not in kw else kw.pop('')if trim is None:trim = bytes.rstrip if is_py3 else str.rstripcmdline = ''.join(args)if not cmdline:if prev is not None:for i in prev:yield ielse:while True:yield Noneprocess = subprocess.Popen(cmdline, shell=True,stdin=subprocess.PIPE, stdout=subprocess.PIPE,**kw)if prev is not None:stdin_buffer = StringIO()for i in prev:stdin_buffer.write(i)if endl:stdin_buffer.write(endl)if is_py3:process.stdin.write(stdin_buffer.getvalue().encode(''))else:process.stdin.write(stdin_buffer.getvalue())process.stdin.flush()process.stdin.close()stdin_buffer.close()for line in process.stdout:yield trim(line)process.wait()", "docstring": "sh pipe execute shell command specified by args. If previous pipe exists,\n read data from it and write it to stdin of shell process. The stdout of\n shell process will be passed to next pipe object line by line.\n\n A optional keyword argument 'trim' can pass a function into sh pipe. It is\n used to trim the output from shell process. The default trim function is\n str.rstrip. Therefore, any space characters in tail of\n shell process output line will be removed.\n\n For example:\n\n py_files = result(sh('ls') | strip | wildcard('*.py'))\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param args: The command line arguments. It will be joined by space character.\n :type args: list of string.\n :param kw: arguments for subprocess.Popen.\n :type kw: dictionary of options.\n :returns: generator", "id": "f2259:m23"} {"signature": "@pipe.funcdef walk(prev, inital_path, *args, **kw):", "body": "for dir_path, dir_names, filenames in os.walk(inital_path):for filename in filenames:yield os.path.join(dir_path, filename)", "docstring": "This pipe wrap os.walk and yield absolute path one by one.\n\n :param prev: The previous iterator of pipe.\n :type prev: Pipe\n :param args: The end-of-line symbol for each output.\n :type args: list of string.\n :param kw: The end-of-line symbol for each output.\n :type kw: dictionary of options. Add 'endl' in kw to specify end-of-line symbol.\n :returns: generator", "id": "f2259:m24"} {"signature": "@pipe.funcdef join(prev, sep, *args, **kw):", "body": "yield sep.join(prev, *args, **kw)", "docstring": "alias of str.join", "id": "f2259:m25"} {"signature": "@pipe.funcdef substitute(prev, *args, **kw):", "body": "template_obj = string.Template(*args, **kw)for data in prev:yield template_obj.substitute(data)", "docstring": "alias of string.Template.substitute", "id": "f2259:m26"} {"signature": "@pipe.funcdef safe_substitute(prev, *args, **kw):", "body": "template_obj = string.Template(*args, **kw)for data in prev:yield template_obj.safe_substitute(data)", "docstring": "alias of string.Template.safe_substitute", "id": "f2259:m27"} {"signature": "@pipe.funcdef to_str(prev, encoding=None):", "body": "first = next(prev)if isinstance(first, str):if encoding is None:yield firstfor s in prev:yield selse:yield first.encode(encoding)for s in prev:yield s.encode(encoding)else:if encoding is None:encoding = sys.stdout.encoding or ''yield first.decode(encoding)for s in prev:yield s.decode(encoding)", "docstring": "Convert data from previous pipe with specified encoding.", "id": "f2259:m28"} {"signature": "def register_default_types():", "body": "register_type(type, pipe.map)register_type(types.FunctionType, pipe.map)register_type(types.MethodType, pipe.map)register_type(tuple, seq)register_type(list, seq)register_type(types.GeneratorType, seq)register_type(string_type, sh)register_type(unicode_type, sh)register_type(file_type, fileobj)if is_py3:register_type(range, seq)register_type(map, seq)", "docstring": "Regiser all default type-to-pipe convertors.", "id": "f2259:m29"} {"signature": "def _get_env(self, env_var):", "body": "value = os.environ.get(env_var)if not value:raise ValueError('' % env_var)return value", "docstring": "Helper to read an environment variable", "id": "f2265:c2:m1"} {"signature": "def __bool__(self):", "body": "return bool(self.type)", "docstring": "Return True if an exception occurred", "id": "f2266:c6:m1"} {"signature": "@contextlib.contextmanagerdef NoNoneDictMutator(destination, **changes):", "body": "original = {}for key, value in changes.items():original[key] = destination.get(key)if value is None:if key in destination:del destination[key]else:destination[key] = valueyieldfor key, value in original.items():if value is None:if key in destination:del destination[key]else:destination[key] = value", "docstring": "Helper context manager to make and unmake changes to a dict.\n\n A None is not a valid value for the destination, and so means that the\n associated name should be removed.", "id": "f2267:m0"} {"signature": "def Environ(**changes):", "body": "return NoNoneDictMutator(os.environ, **changes)", "docstring": "A context manager to temporarily change the os.environ", "id": "f2267:m1"} {"signature": "def random_string(k, source=ALPHABET):", "body": "result = ''for i in range(, k):result += random.choice(source)return result", "docstring": "Generate a random string with length k", "id": "f2267:m2"} {"signature": "def main(argv=None):", "body": "if argv is None:argv = sys.argv[:]cli = CommandLineTool()return cli.run(argv)", "docstring": "Main command line interface.", "id": "f2277:m0"} {"signature": "def input_password(self, prompt):", "body": "return self.pass_from_pipe() or getpass.getpass(prompt)", "docstring": "Retrieve password from input.", "id": "f2277:c0:m2"} {"signature": "@classmethoddef pass_from_pipe(cls):", "body": "is_pipe = not sys.stdin.isatty()return is_pipe and cls.strip_last_newline(sys.stdin.read())", "docstring": "Return password from pipe if not on TTY, else False.", "id": "f2277:c0:m3"} {"signature": "@staticmethoddef strip_last_newline(str):", "body": "return str[:-str.endswith('')]", "docstring": "Strip one last newline, if present.", "id": "f2277:c0:m4"} {"signature": "def output_password(self, password):", "body": "print(password, file=sys.stdout)", "docstring": "Output the password to the user.\n\n This mostly exists to ease the testing process.", "id": "f2277:c0:m5"} {"signature": "def once(func):", "body": "def wrapper(*args, **kwargs):if not hasattr(func, ''):func.always_returns = func(*args, **kwargs)return func.always_returnsreturn functools.wraps(func)(wrapper)", "docstring": "Decorate func so it's only ever called the first time.\n\nThis decorator can ensure that an expensive or non-idempotent function\nwill not be expensive on subsequent calls and is idempotent.\n\n>>> func = once(lambda a: a+3)\n>>> func(3)\n6\n>>> func(9)\n6\n>>> func('12')\n6", "id": "f2280:m0"} {"signature": "def suppress_exceptions(callables, exceptions=Exception):", "body": "for callable in callables:try:yield callable()except exceptions:pass", "docstring": "yield the results of calling each element of callables, suppressing\nany indicated exceptions.", "id": "f2280:m1"} {"signature": "def _data_root_Linux():", "body": "fallback = os.path.expanduser('')root = os.environ.get('', None) or fallbackreturn os.path.join(root, '')", "docstring": "Use freedesktop.org Base Dir Specfication to determine storage\nlocation.", "id": "f2281:m3"} {"signature": "def _check_old_config_root():", "body": "globals()[''] = lambda: Noneconfig_file_new = os.path.join(_config_root_Linux(), '')config_file_old = os.path.join(_data_root_Linux(), '')if os.path.isfile(config_file_old) and not os.path.isfile(config_file_new):msg = (\"\"\"\"\"\")raise RuntimeError(msg.format(**locals()))", "docstring": "Prior versions of keyring would search for the config\nin XDG_DATA_HOME, but should probably have been\nsearching for config in XDG_CONFIG_HOME. If the\nconfig exists in the former but not in the latter,\nraise a RuntimeError to force the change.", "id": "f2281:m4"} {"signature": "def _config_root_Linux():", "body": "_check_old_config_root()fallback = os.path.expanduser('')key = ''root = os.environ.get(key, None) or fallbackreturn os.path.join(root, '')", "docstring": "Use freedesktop.org Base Dir Specfication to determine config\nlocation.", "id": "f2281:m5"} {"signature": "def set_keyring(keyring):", "body": "global _keyring_backendif not isinstance(keyring, backend.KeyringBackend):raise TypeError(\"\")_keyring_backend = keyring", "docstring": "Set current keyring backend.", "id": "f2284:m0"} {"signature": "def get_keyring():", "body": "return _keyring_backend", "docstring": "Get current keyring backend.", "id": "f2284:m1"} {"signature": "def disable():", "body": "root = platform.config_root()try:os.makedirs(root)except OSError:passfilename = os.path.join(root, '')if os.path.exists(filename):msg = \"\".format(**locals())raise RuntimeError(msg)with open(filename, '') as file:file.write('')", "docstring": "Configure the null keyring as the default.", "id": "f2284:m2"} {"signature": "def get_password(service_name, username):", "body": "return _keyring_backend.get_password(service_name, username)", "docstring": "Get password from the specified service.", "id": "f2284:m3"} {"signature": "def set_password(service_name, username, password):", "body": "_keyring_backend.set_password(service_name, username, password)", "docstring": "Set password for the user in the specified service.", "id": "f2284:m4"} {"signature": "def delete_password(service_name, username):", "body": "_keyring_backend.delete_password(service_name, username)", "docstring": "Delete the password for the user in the specified service.", "id": "f2284:m5"} {"signature": "def get_credential(service_name, username):", "body": "return _keyring_backend.get_credential(service_name, username)", "docstring": "Get a Credential for the specified service.", "id": "f2284:m6"} {"signature": "def init_backend(limit=None):", "body": "backend._limit = limitkeyrings = filter(limit, backend.get_all_keyring())set_keyring(load_env()or load_config()or max(keyrings, default=fail.Keyring(), key=backend.by_priority))", "docstring": "Load a keyring specified in the config file or infer the best available.\n\nLimit, if supplied, should be a callable taking a backend and returning\nTrue if that backend should be included for consideration.", "id": "f2284:m8"} {"signature": "def _load_keyring_class(keyring_name):", "body": "module_name, sep, class_name = keyring_name.rpartition('')__import__(module_name)module = sys.modules[module_name]return getattr(module, class_name)", "docstring": "Load the keyring class indicated by name.\n\nThese popular names are tested to ensure their presence.\n\n>>> popular_names = [\n... 'keyring.backends.Windows.WinVaultKeyring',\n... 'keyring.backends.OS_X.Keyring',\n... 'keyring.backends.kwallet.DBusKeyring',\n... 'keyring.backends.SecretService.Keyring',\n... ]\n>>> list(map(_load_keyring_class, popular_names))\n[...]\n\nThese legacy names are retained for compatibility.\n\n>>> legacy_names = [\n... ]\n>>> list(map(_load_keyring_class, legacy_names))\n[...]", "id": "f2284:m9"} {"signature": "def load_keyring(keyring_name):", "body": "class_ = _load_keyring_class(keyring_name)class_.priorityreturn class_()", "docstring": "Load the specified keyring by name (a fully-qualified name to the\nkeyring, such as 'keyring.backends.file.PlaintextKeyring')", "id": "f2284:m10"} {"signature": "def load_env():", "body": "try:return load_keyring(os.environ[''])except KeyError:pass", "docstring": "Load a keyring configured in the environment variable.", "id": "f2284:m11"} {"signature": "def load_config():", "body": "filename = ''keyring_cfg = os.path.join(platform.config_root(), filename)if not os.path.exists(keyring_cfg):returnconfig = configparser.RawConfigParser()config.read(keyring_cfg)_load_keyring_path(config)try:if config.has_section(\"\"):keyring_name = config.get(\"\", \"\").strip()else:raise configparser.NoOptionError('', '')except (configparser.NoOptionError, ImportError):logger = logging.getLogger('')logger.warning(\"\"+ \"\" % keyring_cfg)returnreturn load_keyring(keyring_name)", "docstring": "Load a keyring using the config file in the config root.", "id": "f2284:m12"} {"signature": "def _load_keyring_path(config):", "body": "try:path = config.get(\"\", \"\").strip()sys.path.insert(, path)except (configparser.NoOptionError, configparser.NoSectionError):pass", "docstring": "load the keyring-path option (if present)", "id": "f2284:m13"} {"signature": "def get_password(self, service, username):", "body": "if not self.connected(service):raise KeyringLocked(\"\")if not self.iface.hasEntry(self.handle, service, username, self.appid):return Nonepassword = self.iface.readPassword(self.handle, service, username, self.appid)return str(password)", "docstring": "Get password of the username for the service", "id": "f2285:c0:m4"} {"signature": "def set_password(self, service, username, password):", "body": "if not self.connected(service):raise PasswordSetError(\"\")self.iface.writePassword(self.handle, service, username, password, self.appid)", "docstring": "Set password for the username of the service", "id": "f2285:c0:m5"} {"signature": "def delete_password(self, service, username):", "body": "if not self.connected(service):raise PasswordDeleteError(\"\")if not self.iface.hasEntry(self.handle, service, username, self.appid):raise PasswordDeleteError(\"\")self.iface.removeEntry(self.handle, service, username, self.appid)", "docstring": "Delete the password for the username of the service.", "id": "f2285:c0:m6"} {"signature": "@staticmethoddef unpack(word):", "body": "if not isinstance(word, str):return wordval, = struct.unpack('', word.encode(''))return val", "docstring": "r\"\"\"\n >>> PackedAttributes.unpack(0)\n 0\n >>> PackedAttributes.unpack('\\x00\\x00\\x00\\x01')\n 1\n >>> PackedAttributes.unpack('abcd')\n 1633837924", "id": "f2287:c5:m1"} {"signature": "def get_preferred_collection(self):", "body": "bus = secretstorage.dbus_init()try:if hasattr(self, ''):collection = secretstorage.Collection(bus, self.preferred_collection)else:collection = secretstorage.get_default_collection(bus)except exceptions.SecretStorageException as e:raise InitError(\"\" % e)if collection.is_locked():collection.unlock()if collection.is_locked(): raise KeyringLocked(\"\")return collection", "docstring": "If self.preferred_collection contains a D-Bus path,\n the collection at that address is returned. Otherwise,\n the default collection is returned.", "id": "f2288:c0:m1"} {"signature": "def get_password(self, service, username):", "body": "collection = self.get_preferred_collection()items = collection.search_items({\"\": username, \"\": service})for item in items:if hasattr(item, ''):item.unlock()if item.is_locked(): raise KeyringLocked('')return item.get_secret().decode('')", "docstring": "Get password of the username for the service", "id": "f2288:c0:m2"} {"signature": "def set_password(self, service, username, password):", "body": "collection = self.get_preferred_collection()attributes = {\"\": self.appid,\"\": service,\"\": username}label = \"\".format(username, service)collection.create_item(label, attributes, password, replace=True)", "docstring": "Set password for the username of the service", "id": "f2288:c0:m3"} {"signature": "def delete_password(self, service, username):", "body": "collection = self.get_preferred_collection()items = collection.search_items({\"\": username, \"\": service})for item in items:return item.delete()raise PasswordDeleteError(\"\")", "docstring": "Delete the stored password (only the first one)", "id": "f2288:c0:m4"} {"signature": "@properties.ClassProperty@classmethoddef priority(cls):", "body": "if missing_deps:raise RuntimeError(\"\")return ", "docstring": "If available, the preferred backend on Windows.", "id": "f2289:c0:m0"} {"signature": "@properties.ClassProperty@classmethoddef priority(cls):", "body": "return * (len(cls.backends) > )", "docstring": "High-priority if there are backends to chain, otherwise 0.", "id": "f2291:c0:m0"} {"signature": "@properties.ClassProperty@classmethoddef backends(cls):", "body": "allowed = (keyringfor keyring in filter(backend._limit, backend.get_all_keyring())if not isinstance(keyring, ChainerBackend)and keyring.priority > )return sorted(allowed, key=backend.by_priority, reverse=True)", "docstring": "Discover all keyrings for chaining.", "id": "f2291:c0:m1"} {"signature": "@properties.ClassProperty@classmethoddef priority(cls):", "body": "if platform.system() != '':raise RuntimeError(\"\")return ", "docstring": "Preferred for all macOS environments.", "id": "f2292:c0:m0"} {"signature": "def _load_plugins():", "body": "group = ''entry_points = entrypoints.get_group_all(group=group)for ep in entry_points:try:log.info('', ep.name)init_func = ep.load()if callable(init_func):init_func()except Exception:log.exception(\"\" % ep)", "docstring": "Locate all setuptools entry points by the name 'keyring backends'\nand initialize them.\nAny third-party library may register an entry point by adding the\nfollowing to their setup.py::\n\n entry_points = {\n 'keyring.backends': [\n 'plugin_name = mylib.mymodule:initialize_func',\n ],\n },\n\n`plugin_name` can be anything, and is only used to display the name\nof the plugin at initialization time.\n\n`initialize_func` is optional, but will be invoked if callable.", "id": "f2293:m0"} {"signature": "@util.oncedef get_all_keyring():", "body": "_load_plugins()viable_classes = KeyringBackend.get_viable_backends()rings = util.suppress_exceptions(viable_classes, exceptions=TypeError)return list(rings)", "docstring": "Return a list of all implemented keyrings that can be constructed without\nparameters.", "id": "f2293:m1"} {"signature": "def priority(cls):", "body": "", "docstring": "Each backend class must supply a priority, a number (float or integer)\nindicating the priority of the backend relative to all other backends.\nThe priority need not be static -- it may (and should) vary based\nattributes of the environment in which is runs (platform, available\npackages, etc.).\n\nA higher number indicates a higher priority. The priority should raise\na RuntimeError with a message indicating the underlying cause if the\nbackend is not suitable for the current environment.\n\nAs a rule of thumb, a priority between zero but less than one is\nsuitable, but a priority of one or greater is recommended.", "id": "f2293:c1:m0"} {"signature": "@classmethoddef get_viable_backends(cls):", "body": "return filter(operator.attrgetter(''), cls._classes)", "docstring": "Return all subclasses deemed viable.", "id": "f2293:c1:m2"} {"signature": "@properties.ClassProperty@classmethoddef name(cls):", "body": "parent, sep, mod_name = cls.__module__.rpartition('')mod_name = mod_name.replace('', '')return ''.join([mod_name, cls.__name__])", "docstring": "The keyring name, suitable for display.\n\nThe name is derived from module and class name.", "id": "f2293:c1:m3"} {"signature": "@abc.abstractmethoddef get_password(self, service, username):", "body": "return None", "docstring": "Get password of the username for the service", "id": "f2293:c1:m5"} {"signature": "@abc.abstractmethoddef set_password(self, service, username, password):", "body": "raise errors.PasswordSetError(\"\")", "docstring": "Set password for the username of the service.\n\n If the backend cannot store passwords, raise\n NotImplementedError.", "id": "f2293:c1:m6"} {"signature": "def delete_password(self, service, username):", "body": "raise errors.PasswordDeleteError(\"\")", "docstring": "Delete the password for the username of the service.\n\n If the backend cannot store passwords, raise\n NotImplementedError.", "id": "f2293:c1:m7"} {"signature": "def get_credential(self, service, username):", "body": "if username is not None:password = self.get_password(service, username)if password is not None:return credentials.SimpleCredential(username,password,)return None", "docstring": "Gets the username and password for the service.\n Returns a Credential instance.\n\n The *username* argument is optional and may be omitted by\n the caller or ignored by the backend. Callers must use the\n returned username.", "id": "f2293:c1:m8"} {"signature": "@abc.abstractmethoddef encrypt(self, value):", "body": "pass", "docstring": "Encrypt the value.", "id": "f2293:c2:m0"} {"signature": "@abc.abstractmethoddef decrypt(self, value):", "body": "pass", "docstring": "Decrypt the value.", "id": "f2293:c2:m1"} {"signature": "def get_queryset(self):", "body": "return Question.objects.filter(pub_date__lte=timezone.now()).order_by('')[:]", "docstring": "Return the last five published questions.", "id": "f2297:c0:m0"} {"signature": "def get_queryset(self):", "body": "return Question.objects.filter(pub_date__lte=timezone.now())", "docstring": "Excludes any questions that aren't published yet.", "id": "f2297:c1:m0"} {"signature": "def get_queryset(self):", "body": "return Question.objects.filter(pub_date__lte=timezone.now())", "docstring": "Excludes any questions that aren't published yet.", "id": "f2297:c2:m0"} {"signature": "def create_question(question_text, days):", "body": "time = timezone.now() + datetime.timedelta(days=days)return Question.objects.create(question_text=question_text, pub_date=time)", "docstring": "Creates a question with the given `question_text` published the given\nnumber of `days` offset to now (negative for questions published\nin the past, positive for questions that have yet to be published).", "id": "f2300:m0"} {"signature": "def get_default_fields(self):", "body": "field_names = self._meta.get_all_field_names()if '' in field_names:field_names.remove('')return field_names", "docstring": "get all fields of model, execpt id", "id": "f2309:c0:m0"} {"signature": "def get_field_value(self, field, value_verbose=True):", "body": "if not value_verbose:\"\"\"\"\"\"value = field._get_val_from_obj(self)else:if isinstance(field, ForeignKey):value = getattr(self, field.name)else:try:value = self._get_FIELD_display(field)except :value = field._get_val_from_obj(self)if(value == True or value == False or isinstance(value, (int, float))):return valuereturn unicode(value)", "docstring": "\u8fd4\u56de\u663e\u793a\u7684\u503c\uff0c\u800c\u4e0d\u662f\u5355\u7eaf\u7684\u6570\u636e\u5e93\u4e2d\u7684\u503c\nfield \u662fmodel\u4e2d\u7684field type\nvalue_verbose \u4e3aTrue\uff0c\u8fd4\u56de\u6570\u636e\u7684\u663e\u793a\u6570\u636e\uff0c\u4f1a\u8f6c\u6362\u4e3achoice\u7684\u5185\u5bb9\uff0c\n\u5982\u679cvalue_verbose \u4e3aFalse\uff0c \u8fd4\u56de\u6570\u636e\u7684\u5b9e\u9645\u503c", "id": "f2309:c0:m1"} {"signature": "def get_fields(self, field_verbose=True, value_verbose=True, fields=[], extra_fields=[], remove_fields = []):", "body": "field_list = []for field in self.__class__._meta.fields:if field.name in remove_fields:continueif fields and field.name not in fields:continueif field.verbose_name and field_verbose:value_tuple = (field.verbose_name, self.get_field_value(field, value_verbose))else:value_tuple = (field.name, self.get_field_value(field, value_verbose))field_list.append(value_tuple)for name in extra_fields:method = getattr(self, name)result = method()value_tuple = (name, result)field_list.append(value_tuple)return field_list", "docstring": "\u8fd4\u56de\u5b57\u6bb5\u540d\u53ca\u5176\u5bf9\u5e94\u503c\u7684\u5217\u8868\nfield_verbose \u4e3aTrue\uff0c\u8fd4\u56de\u5b9a\u4e49\u4e2d\u7684\u5b57\u6bb5\u7684verbose_name\uff0c False\u8fd4\u56de\u5176name\nvalue_verbose \u4e3aTrue\uff0c\u8fd4\u56de\u6570\u636e\u7684\u663e\u793a\u6570\u636e\uff0c\u4f1a\u8f6c\u6362\u4e3achoice\u7684\u5185\u5bb9\uff0c\u4e3aFalse\uff0c \u8fd4\u56de\u6570\u636e\u7684\u5b9e\u9645\u503c\nfields \u6307\u5b9a\u4e86\u8981\u663e\u793a\u7684\u5b57\u6bb5\nextra_fields \u6307\u5b9a\u4e86\u8981\u7279\u6b8a\u5904\u7406\u7684\u975efield\uff0c\u6bd4\u5982\u662f\u51fd\u6570\nremove_fields \u6307\u5b9a\u4e86\u4e0d\u663e\u793a\u7684\u5b57\u6bb5", "id": "f2309:c0:m2"} {"signature": "def get_querydict(self):", "body": "if self.method:querydict = getattr(self.request, self.method.upper())else:querydict = getattr(self.request, ''.upper())query_dict = dict(list(querydict.items()))return query_dict", "docstring": "\u8fd9\u4e2a\u51fd\u6570\u8ddf self.method\u6709\u5173\nself.method \u6682\u65f6\u6ca1\u7528, querydict\u90fd\u662fPOST\u7684", "id": "f2310:c1:m1"} {"signature": "def get_filter_dict(self):", "body": "querydict = self.get_querydict()if '' in querydict:querydict.pop('')try:page = int(querydict.pop(''))rows = int(querydict.pop('')) setattr(self, '', page)setattr(self, '', rows)except KeyError:setattr(self, '', None)setattr(self, '', None)try:order = querydict.pop('')sort = querydict.pop('')if order == '':setattr(self, '', sort)else:setattr(self, '', ''% sort)except KeyError:setattr(self, '', None)remove_key = []for key in querydict:if querydict[key] == '':remove_key.append(key)for key in remove_key:querydict.pop(key)return querydict", "docstring": "\u5904\u7406\u8fc7\u6ee4\u5b57\u6bb5 \nrows \u4e00\u9875\u663e\u793a\u591a\u5c11\u884c\npage \u7b2c\u51e0\u9875, 1\u5f00\u59cb\norder desc, asc \nsort \u6307\u5b9a\u6392\u5e8f\u7684\u5b57\u6bb5 order_by(sort)\nquerydict \u4e2d\u7684\u5b57\u6bb5\u540d\u548c\u683c\u5f0f\u9700\u8981\u53ef\u4ee5\u76f4\u63a5\u67e5\u8be2", "id": "f2310:c1:m2"} {"signature": "def get_slice_start(self):", "body": "value = Noneif self.easyui_page:value = (self.easyui_page -) * self.easyui_rowsreturn value", "docstring": "\u8fd4\u56dequeryset\u5207\u7247\u7684\u5934", "id": "f2310:c1:m3"} {"signature": "def get_slice_end(self):", "body": "value = Noneif self.easyui_page:value = self.easyui_page * self.easyui_rowsreturn value", "docstring": "\u8fd4\u56dequeryset\u5207\u7247\u7684\u5c3e\u5df4", "id": "f2310:c1:m4"} {"signature": "def get_queryset(self):", "body": "filter_dict = self.get_filter_dict()queryset = super(EasyUIListMixin, self).get_queryset()queryset = queryset.filter(**filter_dict)if self.easyui_order:queryset = queryset.order_by(self.easyui_order)return queryset", "docstring": "queryset", "id": "f2310:c1:m5"} {"signature": "def get_limit_queryset(self):", "body": "queryset = self.get_queryset()limit_queryset = queryset.all()[self.get_slice_start() :self.get_slice_end()] return limit_queryset", "docstring": "\u8fd4\u56de\u5206\u9875\u4e4b\u540e\u7684queryset", "id": "f2310:c1:m6"} {"signature": "def get_easyui_context(self, **kwargs):", "body": "context = {}queryset = self.get_queryset()limit_queryset = self.get_limit_queryset()data = model_serialize(limit_queryset, self.extra_fields, self.remove_fields)count = queryset.count()context.update(rows=data)context.update(total=count)return context", "docstring": "\u521d\u59cb\u5316\u4e00\u4e2a\u7a7a\u7684context", "id": "f2310:c1:m7"} {"signature": "def get_template_names(self):", "body": "names = super(EasyUIDatagridView, self).get_template_names()names.append('')return names", "docstring": "datagrid\u7684\u9ed8\u8ba4\u6a21\u677f", "id": "f2311:c0:m0"} {"signature": "def get_template_names(self):", "body": "names = super(EasyUICreateView, self).get_template_names()names.append('')return names", "docstring": "datagrid\u7684\u9ed8\u8ba4\u6a21\u677f", "id": "f2311:c1:m0"} {"signature": "def get_template_names(self):", "body": "names = super(EasyUIUpdateView, self).get_template_names()names.append('')return names", "docstring": "datagrid\u7684\u9ed8\u8ba4\u6a21\u677f", "id": "f2311:c2:m0"} {"signature": "def get_template_names(self):", "body": "names = super(EasyUIDeleteView, self).get_template_names()names.append('')return names", "docstring": "datagrid\u7684\u9ed8\u8ba4\u6a21\u677f", "id": "f2311:c3:m0"} {"signature": "def get_template_names(self):", "body": "names = super(CommandDatagridView, self).get_template_names()names.append('')return names", "docstring": "datagrid\u7684\u9ed8\u8ba4\u6a21\u677f", "id": "f2311:c4:m0"} {"signature": "@method_decorator(login_required(login_url=reverse_lazy('')))def dispatch(self, request, *args, **kwargs):", "body": "if getattr(self, '', None) and self.permission_required:app_label = self.model._meta.app_labelmodel_name = self.model.__name__.lower()permission_required = self.permission_required.lower()permission = '' % {'':app_label,'':permission_required,'': model_name}if not self.request.user.has_perm(permission):return HttpResponseRedirect(reverse_lazy(''))return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)", "docstring": "\u589e\u52a0\u4e86\u6743\u9650\u63a7\u5236\uff0c\u5f53self\u5b58\u5728model\u548cpermission_required\u65f6\uff0c\u624d\u4f1a\u68c0\u67e5\u6743\u9650", "id": "f2312:c0:m0"} {"signature": "def success(request):", "body": "return HttpResponse('')", "docstring": "\u589e\u5220\u6539\u64cd\u4f5c\u6210\u529f\u4e4b\u540e\u8fd4\u56de\u8fd9\u4e2a\u9875\u9762", "id": "f2313:m0"} {"signature": "def get_url(request):", "body": "menu_id = request.GET.get('')m_object = Menu.objects.get(pk=menu_id)namespace = m_object.namespaceviewname = m_object.viewnameurl_string = '' %(namespace, viewname)url = reverse(url_string)return HttpResponse(url)", "docstring": "\u901a\u8fc7menu_id\uff0c\u83b7\u53d6\u5bf9\u5e94\u7684URL\neg. /easyui/MenuListView/", "id": "f2313:m1"} {"signature": "def post(self, request, *args, **kwargs):", "body": "query_dict = dict(list(self.request.POST.items()))row_index = query_dict.pop('')text = query_dict.pop('', None)app_label = query_dict.pop('')model_name = query_dict.pop('')method = query_dict.pop('')pk = query_dict.pop('')model = get_model(app_label, model_name)object = model.objects.get(pk=pk)try:status = func = getattr(object, method)print(query_dict)return_value = func(**query_dict)message = return_valueexcept Exception as error_message:status = message = str(error_message)if not message:message = text+''return self.render_to_json_response({'':status, '':message, '':row_index})", "docstring": "Handles POST requests only\nargument:\n row_index HTML\u4e2d\u7b2c\u51e0\u884c\u7684\u6807\u8bb0\uff0c\u539f\u503c\u8fd4\u56de\n app_label\n model_name\n pk app_label + model_name + pk \u53ef\u4ee5\u83b7\u53d6\u4e00\u4e2aobject\n method object + method \u5f97\u5230\u8981\u8c03\u7528\u7684\u65b9\u6cd5 \n \u5176\u5b83\u53c2\u6570\uff0chtml\u548cmethod\u4e2d\u540c\u65f6\u5b9a\u4e49, \u5728\u4e0a\u9762\u7684\u65b9\u6cd5\u4e2d\u4f7f\u7528", "id": "f2313:c0:m1"} {"signature": "def post(self, request, *args, **kwargs):", "body": "form_class = self.get_form_class()form = self.get_form(form_class)if form.is_valid():return self.form_valid(form)else:return self.form_invalid(form)", "docstring": "Handles POST requests, instantiating a form instance with the passed\nPOST variables and then checked for validity.", "id": "f2313:c2:m1"} {"signature": "def get_menu_checked(self, request):", "body": "checked_id = []qd = request.GETquery_dict = dict(list(qd.items()))if query_dict:app_label = query_dict['']model_name = query_dict['']pk = query_dict['']model = get_model(app_label, model_name)object = model.objects.get(pk=pk)checked_id = object.menus_checked.split('')return checked_id", "docstring": "\u83b7\u53d6\u7528\u6237\u6216\u8005\u7528\u6237\u7ec4checked\u7684\u83dc\u5355\u5217\u8868\nusermenu_form.html \u4e2d\u5b9a\u4e49\nusermenu \u8fd9\u4e24\u4e2amodel\u7684\u5b9a\u4e49\u7c7b\u4f3c\uff0c\u6bd4\u5982menus_checked\u548cmenus_show\ngroupmenu\n@return eg. ['1', '8', '9', '10' ]\n\u83b7\u53d6\u7528\u6237\u6216\u8005\u7528\u6237\u7ec4\u7684check_ids,\u4f1a\u7ed9\u51faapp_label, model_name, pk eg. /easyui/menulistview/?app_label=easyui&model_name=UserMenu&pk=1", "id": "f2313:c3:m1"} {"signature": "def model_serialize(queryset, extra_fields=[], remove_fields = [], fields = []):", "body": "return_list = []for object in queryset:value_dict = dict(object.get_fields(field_verbose=False, value_verbose=True, fields=fields, remove_fields=remove_fields, extra_fields=extra_fields))return_list.append(value_dict)return return_list", "docstring": "@param queryset queryset\n@return a list of dict [{}, {}]\n\u81ea\u5b9a\u4e49\u7684json\u8f6c\u6362\u51fd\u6570\uff0c\u8ddfextramixin\u4e2d\u7684get_fields\u5bc6\u5207\u76f8\u5173", "id": "f2316:m0"} {"signature": "def register_views(app_name, view_filename, urlpatterns=None):", "body": "app_module = __import__(app_name)view_module = getattr(app_module, view_filename)views = dir(view_module)for view_name in views:if view_name.endswith(''):view = getattr(view_module, view_name)if isinstance(view, object):if urlpatterns:urlpatterns += patterns('',url(r'' % view_name, view.as_view(), name=view_name),)else:urlpatterns = patterns('',url(r'' % view_name, view.as_view(), name=view_name),)else:passreturn urlpatterns", "docstring": "app_name APP\u540d\nview_filename views \u6240\u5728\u7684\u6587\u4ef6\nurlpatterns url\u4e2d\u5df2\u7ecf\u5b58\u5728\u7684urlpatterns\n\nreturn urlpatterns\n\n\u53ea\u5bfc\u5165View\u7ed3\u5c3e\u7684\uff0c\u662f\u7c7b\u7684\u89c6\u56fe", "id": "f2316:m1"} {"signature": "def crypto(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "This will return an array of quotes for all Cryptocurrencies supported by the IEX API. Each element is a standard quote object with four additional keys.\n\n https://iexcloud.io/docs/api/#crypto\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2329:m0"} {"signature": "def cryptoDF(token='', version=''):", "body": "df = pd.DataFrame(crypto(token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This will return an array of quotes for all Cryptocurrencies supported by the IEX API. Each element is a standard quote object with four additional keys.\n\n https://iexcloud.io/docs/api/#crypto\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2329:m1"} {"signature": "def sentiment(symbol, type='', date=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if date:date = _strOrDate(date)return _getJson(''.format(symbol=symbol, type=type, date=date), token, version)return _getJson(''.format(symbol=symbol, type=type), token, version)", "docstring": "This endpoint provides social sentiment data from StockTwits. Data can be viewed as a daily value, or by minute for a given date.\n\n https://iexcloud.io/docs/api/#social-sentiment\n Continuous\n\n Args:\n symbol (string); Ticker to request\n type (string); 'daily' or 'minute'\n date (string); date in YYYYMMDD or datetime\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2329:m2"} {"signature": "def sentimentDF(symbol, type='', date=None, token='', version=''):", "body": "ret = sentiment(symbol, type, date, token, version)if type == '':ret = [ret]df = pd.DataFrame(ret)_toDatetime(df)return df", "docstring": "This endpoint provides social sentiment data from StockTwits. Data can be viewed as a daily value, or by minute for a given date.\n\n https://iexcloud.io/docs/api/#social-sentiment\n Continuous\n\n Args:\n symbol (string); Ticker to request\n type (string); 'daily' or 'minute'\n date (string); date in YYYYMMDD or datetime\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2329:m3"} {"signature": "def balanceSheet(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Pulls balance sheet data. Available quarterly (4 quarters) and annually (4 years)\n\n https://iexcloud.io/docs/api/#balance-sheet\n Updates at 8am, 9am UTC daily\n\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m0"} {"signature": "def balanceSheetDF(symbol, token='', version=''):", "body": "val = balanceSheet(symbol, token, version)df = pd.io.json.json_normalize(val, '', '')_toDatetime(df)_reindex(df, '')return df", "docstring": "Pulls balance sheet data. Available quarterly (4 quarters) and annually (4 years)\n\n https://iexcloud.io/docs/api/#balance-sheet\n Updates at 8am, 9am UTC daily\n\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m1"} {"signature": "def batch(symbols, fields=None, range_='', last=, token='', version=''):", "body": "fields = fields or _BATCH_TYPES[:] if not isinstance(symbols, [].__class__):if not isinstance(symbols, str):raise PyEXception('')if isinstance(fields, str):fields = [fields]if range_ not in _TIMEFRAME_CHART:raise PyEXception('' % str(_TIMEFRAME_CHART))if isinstance(symbols, str):route = ''.format(symbols, ''.join(fields), range_, last)return _getJson(route, token, version)if len(symbols) > :raise PyEXception('')route = ''.format(''.join(symbols), ''.join(fields), range_, last)return _getJson(route, token, version)", "docstring": "Batch several data requests into one invocation\n\n https://iexcloud.io/docs/api/#batch-requests\n\n\n Args:\n symbols (list); List of tickers to request\n fields (list); List of fields to request\n range_ (string); Date range for chart\n last (int);\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: results in json", "id": "f2330:m2"} {"signature": "def batchDF(symbols, fields=None, range_='', last=, token='', version=''):", "body": "x = batch(symbols, fields, range_, last, token, version)ret = {}if isinstance(symbols, str):for field in x.keys():ret[field] = _MAPPING[field](x[field])else:for symbol in x.keys():for field in x[symbol].keys():if field not in ret:ret[field] = pd.DataFrame()dat = x[symbol][field]dat = _MAPPING[field](dat)dat[''] = symbolret[field] = pd.concat([ret[field], dat], sort=True)return ret", "docstring": "Batch several data requests into one invocation\n\n https://iexcloud.io/docs/api/#batch-requests\n\n\n Args:\n symbols (list); List of tickers to request\n fields (list); List of fields to request\n range_ (string); Date range for chart\n last (int);\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: results in json", "id": "f2330:m3"} {"signature": "def bulkBatch(symbols, fields=None, range_='', last=, token='', version=''):", "body": "fields = fields or _BATCH_TYPESargs = []empty_data = []list_orig = empty_data.__class__if not isinstance(symbols, list_orig):raise PyEXception('')for i in range(, len(symbols), ):args.append((symbols[i:i+], fields, range_, last, token, version))pool = ThreadPool()rets = pool.starmap(batch, args)pool.close()ret = {}for i, d in enumerate(rets):symbols_subset = args[i][]if len(d) != len(symbols_subset):empty_data.extend(list_orig(set(symbols_subset) - set(d.keys())))ret.update(d)for k in empty_data:if k not in ret:if isinstance(fields, str):ret[k] = {}else:ret[k] = {x: {} for x in fields}return ret", "docstring": "Optimized batch to fetch as much as possible at once\n\n https://iexcloud.io/docs/api/#batch-requests\n\n\n Args:\n symbols (list); List of tickers to request\n fields (list); List of fields to request\n range_ (string); Date range for chart\n last (int);\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: results in json", "id": "f2330:m4"} {"signature": "def bulkBatchDF(symbols, fields=None, range_='', last=, token='', version=''):", "body": "dat = bulkBatch(symbols, fields, range_, last, token, version)ret = {}for symbol in dat:for field in dat[symbol]:if field not in ret:ret[field] = pd.DataFrame()d = dat[symbol][field]d = _MAPPING[field](d)d[''] = symbolret[field] = pd.concat([ret[field], d], sort=True)return ret", "docstring": "Optimized batch to fetch as much as possible at once\n\n https://iexcloud.io/docs/api/#batch-requests\n\n\n Args:\n symbols (list); List of tickers to request\n fields (list); List of fields to request\n range_ (string); Date range for chart\n last (int);\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: results in json", "id": "f2330:m5"} {"signature": "def book(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Book data\n\n https://iextrading.com/developer/docs/#book\n realtime during Investors Exchange market hours\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m6"} {"signature": "def _bookToDF(b):", "body": "quote = b.get('', [])asks = b.get('', [])bids = b.get('', [])trades = b.get('', [])df1 = pd.io.json.json_normalize(quote)df1[''] = ''df2 = pd.io.json.json_normalize(asks)df2[''] = quote['']df2[''] = ''df3 = pd.io.json.json_normalize(bids)df3[''] = quote['']df3[''] = ''df4 = pd.io.json.json_normalize(trades)df4[''] = quote['']df3[''] = ''df = pd.concat([df1, df2, df3, df4], sort=True)_toDatetime(df)return df", "docstring": "internal", "id": "f2330:m7"} {"signature": "def bookDF(symbol, token='', version=''):", "body": "x = book(symbol, token, version)df = _bookToDF(x)return df", "docstring": "Book data\n\n https://iextrading.com/developer/docs/#book\n realtime during Investors Exchange market hours\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m8"} {"signature": "def cashFlow(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Pulls cash flow data. Available quarterly (4 quarters) or annually (4 years).\n\n https://iexcloud.io/docs/api/#cash-flow\n Updates at 8am, 9am UTC daily\n\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m9"} {"signature": "def cashFlowDF(symbol, token='', version=''):", "body": "val = cashFlow(symbol, token, version)df = pd.io.json.json_normalize(val, '', '')_toDatetime(df)_reindex(df, '')df.replace(to_replace=[None], value=np.nan, inplace=True)return df", "docstring": "Pulls cash flow data. Available quarterly (4 quarters) or annually (4 years).\n\n https://iexcloud.io/docs/api/#cash-flow\n Updates at 8am, 9am UTC daily\n\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m10"} {"signature": "def chart(symbol, timeframe='', date=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if timeframe is not None and timeframe != '':if timeframe not in _TIMEFRAME_CHART:raise PyEXception('' % str(_TIMEFRAME_CHART))return _getJson('' + symbol + '' + '' + timeframe, token, version)if date:date = _strOrDate(date)return _getJson('' + symbol + '' + '' + date, token, version)return _getJson('' + symbol + '', token, version)", "docstring": "Historical price/volume data, daily and intraday\n\n https://iexcloud.io/docs/api/#historical-prices\n Data Schedule\n 1d: -9:30-4pm ET Mon-Fri on regular market trading days\n -9:30-1pm ET on early close trading days\n All others:\n -Prior trading day available after 4am ET Tue-Sat\n\n Args:\n symbol (string); Ticker to request\n timeframe (string); Timeframe to request e.g. 1m\n date (datetime): date, if requesting intraday\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m11"} {"signature": "def _chartToDF(c):", "body": "df = pd.DataFrame(c)_toDatetime(df)_reindex(df, '')return df", "docstring": "internal", "id": "f2330:m12"} {"signature": "def chartDF(symbol, timeframe='', date=None, token='', version=''):", "body": "c = chart(symbol, timeframe, date, token, version)df = pd.DataFrame(c)_toDatetime(df)if timeframe is not None and timeframe != '':_reindex(df, '')else:if not df.empty:df.set_index(['', ''], inplace=True)else:return pd.DataFrame()return df", "docstring": "Historical price/volume data, daily and intraday\n\n https://iexcloud.io/docs/api/#historical-prices\n Data Schedule\n 1d: -9:30-4pm ET Mon-Fri on regular market trading days\n -9:30-1pm ET on early close trading days\n All others:\n -Prior trading day available after 4am ET Tue-Sat\n\n Args:\n symbol (string); Ticker to request\n timeframe (string); Timeframe to request e.g. 1m\n date (datetime): date, if requesting intraday\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m13"} {"signature": "def bulkMinuteBars(symbol, dates, token='', version=''):", "body": "_raiseIfNotStr(symbol)dates = [_strOrDate(date) for date in dates]list_orig = dates.__class__args = []for date in dates:args.append((symbol, '', date, token, version))pool = ThreadPool()rets = pool.starmap(chart, args)pool.close()return list_orig(itertools.chain(*rets))", "docstring": "fetch many dates worth of minute-bars for a given symbol", "id": "f2330:m14"} {"signature": "def bulkMinuteBarsDF(symbol, dates, token='', version=''):", "body": "data = bulkMinuteBars(symbol, dates, token, version)df = pd.DataFrame(data)if df.empty:return df_toDatetime(df)df.set_index(['', ''], inplace=True)return df", "docstring": "fetch many dates worth of minute-bars for a given symbol", "id": "f2330:m15"} {"signature": "def collections(tag, collectionName, token='', version=''):", "body": "if tag not in _COLLECTION_TAGS:raise PyEXception('' % str(_COLLECTION_TAGS))return _getJson('' + tag + '' + collectionName, token, version)", "docstring": "Returns an array of quote objects for a given collection type. Currently supported collection types are sector, tag, and list\n\n\n https://iexcloud.io/docs/api/#collections\n\n Args:\n tag (string); Sector, Tag, or List\n collectionName (string); Associated name for tag\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m16"} {"signature": "def collectionsDF(tag, query, token='', version=''):", "body": "df = pd.DataFrame(collections(tag, query, token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "Returns an array of quote objects for a given collection type. Currently supported collection types are sector, tag, and list\n\n\n https://iexcloud.io/docs/api/#collections\n\n Args:\n tag (string); Sector, Tag, or List\n collectionName (string); Associated name for tag\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m17"} {"signature": "def company(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Company reference data\n\n https://iexcloud.io/docs/api/#company\n Updates at 4am and 5am UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m18"} {"signature": "def _companyToDF(c, token='', version=''):", "body": "df = pd.io.json.json_normalize(c)_toDatetime(df)_reindex(df, '')return df", "docstring": "internal", "id": "f2330:m19"} {"signature": "def companyDF(symbol, token='', version=''):", "body": "c = company(symbol, token, version)df = _companyToDF(c)return df", "docstring": "Company reference data\n\n https://iexcloud.io/docs/api/#company\n Updates at 4am and 5am UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m20"} {"signature": "def delayedQuote(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "This returns the 15 minute delayed market quote.\n\n https://iexcloud.io/docs/api/#delayed-quote\n 15min delayed\n 4:30am - 8pm ET M-F when market is open\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m21"} {"signature": "def delayedQuoteDF(symbol, token='', version=''):", "body": "df = pd.io.json.json_normalize(delayedQuote(symbol, token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This returns the 15 minute delayed market quote.\n\n https://iexcloud.io/docs/api/#delayed-quote\n 15min delayed\n 4:30am - 8pm ET M-F when market is open\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m22"} {"signature": "def dividends(symbol, timeframe='', token='', version=''):", "body": "_raiseIfNotStr(symbol)if timeframe not in _TIMEFRAME_DIVSPLIT:raise PyEXception('' % str(_TIMEFRAME_DIVSPLIT))return _getJson('' + symbol + '' + timeframe, token, version)", "docstring": "Dividend history\n\n https://iexcloud.io/docs/api/#dividends\n Updated at 9am UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m23"} {"signature": "def _dividendsToDF(d):", "body": "df = pd.DataFrame(d)_toDatetime(df)_reindex(df, '')return df", "docstring": "internal", "id": "f2330:m24"} {"signature": "def dividendsDF(symbol, timeframe='', token='', version=''):", "body": "d = dividends(symbol, timeframe, token, version)df = _dividendsToDF(d)return df", "docstring": "Dividend history\n\n https://iexcloud.io/docs/api/#dividends\n Updated at 9am UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m25"} {"signature": "def earnings(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Earnings data for a given company including the actual EPS, consensus, and fiscal period. Earnings are available quarterly (last 4 quarters) and annually (last 4 years).\n\n https://iexcloud.io/docs/api/#earnings\n Updates at 9am, 11am, 12pm UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m26"} {"signature": "def _earningsToDF(e):", "body": "if e:df = pd.io.json.json_normalize(e, '', '')_toDatetime(df)_reindex(df, '')else:df = pd.DataFrame()return df", "docstring": "internal", "id": "f2330:m27"} {"signature": "def earningsDF(symbol, token='', version=''):", "body": "e = earnings(symbol, token, version)df = _earningsToDF(e)return df", "docstring": "Earnings data for a given company including the actual EPS, consensus, and fiscal period. Earnings are available quarterly (last 4 quarters) and annually (last 4 years).\n\n https://iexcloud.io/docs/api/#earnings\n Updates at 9am, 11am, 12pm UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m28"} {"signature": "def earningsToday(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "Returns earnings that will be reported today as two arrays: before the open bto and after market close amc.\n Each array contains an object with all keys from earnings, a quote object, and a headline key.\n\n https://iexcloud.io/docs/api/#earnings-today\n Updates at 9am, 11am, 12pm UTC daily\n\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m29"} {"signature": "def earningsTodayDF(token='', version=''):", "body": "x = earningsToday(token, version)z = []for k in x:ds = x[k]for d in ds:d[''] = kz.extend(ds)df = pd.io.json.json_normalize(z)if not df.empty:df.drop_duplicates(inplace=True)_toDatetime(df)_reindex(df, '')return df", "docstring": "Returns earnings that will be reported today as two arrays: before the open bto and after market close amc.\n Each array contains an object with all keys from earnings, a quote object, and a headline key.\n\n https://iexcloud.io/docs/api/#earnings-today\n Updates at 9am, 11am, 12pm UTC daily\n\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m30"} {"signature": "def spread(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "This returns an array of effective spread, eligible volume, and price improvement of a stock, by market.\n Unlike volume-by-venue, this will only return a venue if effective spread is not \u2018N/A\u2019. Values are sorted in descending order by effectiveSpread.\n Lower effectiveSpread and higher priceImprovement values are generally considered optimal.\n\n Effective spread is designed to measure marketable orders executed in relation to the market center\u2019s\n quoted spread and takes into account hidden and midpoint liquidity available at each market center.\n Effective Spread is calculated by using eligible trade prices recorded to the consolidated tape and\n comparing those trade prices to the National Best Bid and Offer (\u201cNBBO\u201d) at the time of the execution.\n\n View the data disclaimer at the bottom of the stocks app for more information about how these values are calculated.\n\n\n https://iexcloud.io/docs/api/#earnings-today\n 8am ET M-F\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m31"} {"signature": "def spreadDF(symbol, token='', version=''):", "body": "df = pd.DataFrame(spread(symbol, token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This returns an array of effective spread, eligible volume, and price improvement of a stock, by market.\n Unlike volume-by-venue, this will only return a venue if effective spread is not \u2018N/A\u2019. Values are sorted in descending order by effectiveSpread.\n Lower effectiveSpread and higher priceImprovement values are generally considered optimal.\n\n Effective spread is designed to measure marketable orders executed in relation to the market center\u2019s\n quoted spread and takes into account hidden and midpoint liquidity available at each market center.\n Effective Spread is calculated by using eligible trade prices recorded to the consolidated tape and\n comparing those trade prices to the National Best Bid and Offer (\u201cNBBO\u201d) at the time of the execution.\n\n View the data disclaimer at the bottom of the stocks app for more information about how these values are calculated.\n\n\n https://iexcloud.io/docs/api/#earnings-today\n 8am ET M-F\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m32"} {"signature": "def estimates(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Provides the latest consensus estimate for the next fiscal period\n\n https://iexcloud.io/docs/api/#estimates\n Updates at 9am, 11am, 12pm UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m33"} {"signature": "def _estimatesToDF(f):", "body": "if f:df = pd.io.json.json_normalize(f, '', '')_toDatetime(df)_reindex(df, '')else:df = pd.DataFrame()return df", "docstring": "internal", "id": "f2330:m34"} {"signature": "def estimatesDF(symbol, token='', version=''):", "body": "f = estimates(symbol, token, version)df = _estimatesToDF(f)return df", "docstring": "Provides the latest consensus estimate for the next fiscal period\n\n https://iexcloud.io/docs/api/#estimates\n Updates at 9am, 11am, 12pm UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m35"} {"signature": "def financials(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Pulls income statement, balance sheet, and cash flow data from the four most recent reported quarters.\n\n https://iexcloud.io/docs/api/#financials\n Updates at 8am, 9am UTC daily\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m36"} {"signature": "def _financialsToDF(f):", "body": "if f:df = pd.io.json.json_normalize(f, '', '')_toDatetime(df)_reindex(df, '')else:df = pd.DataFrame()return df", "docstring": "internal", "id": "f2330:m37"} {"signature": "def financialsDF(symbol, token='', version=''):", "body": "f = financials(symbol, token, version)df = _financialsToDF(f)return df", "docstring": "Pulls income statement, balance sheet, and cash flow data from the four most recent reported quarters.\n\n https://iexcloud.io/docs/api/#financials\n Updates at 8am, 9am UTC daily\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m38"} {"signature": "def incomeStatement(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Pulls income statement data. Available quarterly (4 quarters) or annually (4 years).\n\n https://iexcloud.io/docs/api/#income-statement\n Updates at 8am, 9am UTC daily\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m39"} {"signature": "def incomeStatementDF(symbol, token='', version=''):", "body": "val = incomeStatement(symbol, token, version)df = pd.io.json.json_normalize(val, '', '')_toDatetime(df)_reindex(df, '')return df", "docstring": "Pulls income statement data. Available quarterly (4 quarters) or annually (4 years).\n\n https://iexcloud.io/docs/api/#income-statement\n Updates at 8am, 9am UTC daily\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m40"} {"signature": "def ipoToday(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "This returns a list of upcoming or today IPOs scheduled for the current and next month. The response is split into two structures:\n rawData and viewData. rawData represents all available data for an IPO. viewData represents data structured for display to a user.\n\n https://iexcloud.io/docs/api/#ipo-calendar\n 10am, 10:30am UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m41"} {"signature": "def ipoTodayDF(token='', version=''):", "body": "val = ipoToday(token, version)if val:df = pd.io.json.json_normalize(val, '')_toDatetime(df)_reindex(df, '')else:df = pd.DataFrame()return df", "docstring": "This returns a list of upcoming or today IPOs scheduled for the current and next month. The response is split into two structures:\n rawData and viewData. rawData represents all available data for an IPO. viewData represents data structured for display to a user.\n\n https://iexcloud.io/docs/api/#ipo-calendar\n 10am, 10:30am UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m42"} {"signature": "def ipoUpcoming(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "This returns a list of upcoming or today IPOs scheduled for the current and next month. The response is split into two structures:\n rawData and viewData. rawData represents all available data for an IPO. viewData represents data structured for display to a user.\n\n https://iexcloud.io/docs/api/#ipo-calendar\n 10am, 10:30am UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m43"} {"signature": "def ipoUpcomingDF(token='', version=''):", "body": "val = ipoUpcoming(token, version)if val:df = pd.io.json.json_normalize(val, '')_toDatetime(df)_reindex(df, '')else:df = pd.DataFrame()return df", "docstring": "This returns a list of upcoming or today IPOs scheduled for the current and next month. The response is split into two structures:\n rawData and viewData. rawData represents all available data for an IPO. viewData represents data structured for display to a user.\n\n https://iexcloud.io/docs/api/#ipo-calendar\n 10am, 10:30am UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m44"} {"signature": "def keyStats(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Key Stats about company\n\n https://iexcloud.io/docs/api/#key-stats\n 8am, 9am ET\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m45"} {"signature": "def _statsToDF(s):", "body": "if s:df = pd.io.json.json_normalize(s)_toDatetime(df)_reindex(df, '')else:df = pd.DataFrame()return df", "docstring": "internal", "id": "f2330:m46"} {"signature": "def keyStatsDF(symbol, token='', version=''):", "body": "s = keyStats(symbol, token, version)df = _statsToDF(s)return df", "docstring": "Key Stats about company\n\n https://iexcloud.io/docs/api/#key-stats\n 8am, 9am ET\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m47"} {"signature": "def largestTrades(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "This returns 15 minute delayed, last sale eligible trades.\n\n https://iexcloud.io/docs/api/#largest-trades\n 9:30-4pm ET M-F during regular market hours\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m48"} {"signature": "def largestTradesDF(symbol, token='', version=''):", "body": "df = pd.DataFrame(largestTrades(symbol, token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This returns 15 minute delayed, last sale eligible trades.\n\n https://iexcloud.io/docs/api/#largest-trades\n 9:30-4pm ET M-F during regular market hours\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m49"} {"signature": "def list(option='', token='', version=''):", "body": "if option not in _LIST_OPTIONS:raise PyEXception('' % str(_LIST_OPTIONS))return _getJson('' + option, token, version)", "docstring": "Returns an array of quotes for the top 10 symbols in a specified list.\n\n\n https://iexcloud.io/docs/api/#list\n Updated intraday\n\n Args:\n option (string); Option to query\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m50"} {"signature": "def listDF(option='', token='', version=''):", "body": "df = pd.DataFrame(list(option, token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "Returns an array of quotes for the top 10 symbols in a specified list.\n\n\n https://iexcloud.io/docs/api/#list\n Updated intraday\n\n Args:\n option (string); Option to query\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m51"} {"signature": "def logo(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "This is a helper function, but the google APIs url is standardized.\n\n https://iexcloud.io/docs/api/#logo\n 8am UTC daily\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m52"} {"signature": "def logoPNG(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)response = requests.get(logo(symbol, token, version)[''])return ImageP.open(BytesIO(response.content))", "docstring": "This is a helper function, but the google APIs url is standardized.\n\n https://iexcloud.io/docs/api/#logo\n 8am UTC daily\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n image: result as png", "id": "f2330:m53"} {"signature": "def logoNotebook(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)url = logo(symbol, token, version)['']return ImageI(url=url)", "docstring": "This is a helper function, but the google APIs url is standardized.\n\n https://iexcloud.io/docs/api/#logo\n 8am UTC daily\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n image: result", "id": "f2330:m54"} {"signature": "def marketVolume(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "This endpoint returns real time traded volume on U.S. markets.\n\n https://iexcloud.io/docs/api/#market-volume-u-s\n 7:45am-5:15pm ET Mon-Fri\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m55"} {"signature": "def marketVolumeDF(token='', version=''):", "body": "return pd.DataFrame(marketVolume())", "docstring": "This endpoint returns real time traded volume on U.S. markets.\n\n https://iexcloud.io/docs/api/#market-volume-u-s\n 7:45am-5:15pm ET Mon-Fri\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m56"} {"signature": "def news(symbol, count=, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '' + str(count), token, version)", "docstring": "News about company\n\n https://iexcloud.io/docs/api/#news\n Continuous\n\n Args:\n symbol (string); Ticker to request\n count (int): limit number of results\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m57"} {"signature": "def _newsToDF(n):", "body": "df = pd.DataFrame(n)_toDatetime(df)_reindex(df, '')return df", "docstring": "internal", "id": "f2330:m58"} {"signature": "def newsDF(symbol, count=, token='', version=''):", "body": "n = news(symbol, count, token, version)df = _newsToDF(n)return df", "docstring": "News about company\n\n https://iexcloud.io/docs/api/#news\n Continuous\n\n Args:\n symbol (string); Ticker to request\n count (int): limit number of results\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m59"} {"signature": "def marketNews(count=, token='', version=''):", "body": "return _getJson('' + str(count), token, version)", "docstring": "News about market\n\n https://iexcloud.io/docs/api/#news\n Continuous\n\n Args:\n count (int): limit number of results\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m60"} {"signature": "def marketNewsDF(count=, token='', version=''):", "body": "df = pd.DataFrame(marketNews(count, token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "News about market\n\n https://iexcloud.io/docs/api/#news\n Continuous\n\n Args:\n count (int): limit number of results\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m61"} {"signature": "def ohlc(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Returns the official open and close for a give symbol.\n\n https://iexcloud.io/docs/api/#news\n 9:30am-5pm ET Mon-Fri\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m62"} {"signature": "def ohlcDF(symbol, token='', version=''):", "body": "o = ohlc(symbol, token, version)if o:df = pd.io.json.json_normalize(o)_toDatetime(df)else:df = pd.DataFrame()return df", "docstring": "Returns the official open and close for a give symbol.\n\n https://iexcloud.io/docs/api/#news\n 9:30am-5pm ET Mon-Fri\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m63"} {"signature": "def marketOhlc(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "Returns the official open and close for whole market.\n\n https://iexcloud.io/docs/api/#news\n 9:30am-5pm ET Mon-Fri\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m64"} {"signature": "def marketOhlcDF(token='', version=''):", "body": "x = marketOhlc(token, version)data = []for key in x:data.append(x[key])data[-][''] = keydf = pd.io.json.json_normalize(data)_toDatetime(df)_reindex(df, '')return df", "docstring": "Returns the official open and close for whole market.\n\n https://iexcloud.io/docs/api/#news\n 9:30am-5pm ET Mon-Fri\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m65"} {"signature": "def peers(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Peers of ticker\n\n https://iexcloud.io/docs/api/#peers\n 8am UTC daily\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m66"} {"signature": "def _peersToDF(p):", "body": "df = pd.DataFrame(p, columns=[''])_toDatetime(df)_reindex(df, '')df[''] = df.indexreturn df", "docstring": "internal", "id": "f2330:m67"} {"signature": "def peersDF(symbol, token='', version=''):", "body": "p = peers(symbol, token, version)df = _peersToDF(p)return df", "docstring": "Peers of ticker\n\n https://iexcloud.io/docs/api/#peers\n 8am UTC daily\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m68"} {"signature": "def yesterday(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "This returns previous day adjusted price data for one or more stocks\n\n https://iexcloud.io/docs/api/#previous-day-prices\n Available after 4am ET Tue-Sat\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m69"} {"signature": "def yesterdayDF(symbol, token='', version=''):", "body": "y = yesterday(symbol, token, version)if y:df = pd.io.json.json_normalize(y)_toDatetime(df)_reindex(df, '')else:df = pd.DataFrame()return df", "docstring": "This returns previous day adjusted price data for one or more stocks\n\n https://iexcloud.io/docs/api/#previous-day-prices\n Available after 4am ET Tue-Sat\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m70"} {"signature": "def marketYesterday(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "This returns previous day adjusted price data for whole market\n\n https://iexcloud.io/docs/api/#previous-day-prices\n Available after 4am ET Tue-Sat\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m71"} {"signature": "def marketYesterdayDF(token='', version=''):", "body": "x = marketYesterday(token, version)data = []for key in x:data.append(x[key])data[-][''] = keydf = pd.DataFrame(data)_toDatetime(df)_reindex(df, '')return df", "docstring": "This returns previous day adjusted price data for whole market\n\n https://iexcloud.io/docs/api/#previous-day-prices\n Available after 4am ET Tue-Sat\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m72"} {"signature": "def price(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Price of ticker\n\n https://iexcloud.io/docs/api/#price\n 4:30am-8pm ET Mon-Fri\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m73"} {"signature": "def priceDF(symbol, token='', version=''):", "body": "df = pd.io.json.json_normalize({'': price(symbol, token, version)})_toDatetime(df)return df", "docstring": "Price of ticker\n\n https://iexcloud.io/docs/api/#price\n 4:30am-8pm ET Mon-Fri\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m74"} {"signature": "def priceTarget(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Provides the latest avg, high, and low analyst price target for a symbol.\n\n https://iexcloud.io/docs/api/#price-target\n Updates at 10am, 11am, 12pm UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m75"} {"signature": "def priceTargetDF(symbol, token='', version=''):", "body": "df = pd.io.json.json_normalize(priceTarget(symbol, token, version))_toDatetime(df)return df", "docstring": "Provides the latest avg, high, and low analyst price target for a symbol.\n\n https://iexcloud.io/docs/api/#price-target\n Updates at 10am, 11am, 12pm UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m76"} {"signature": "def quote(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Get quote for ticker\n\n https://iexcloud.io/docs/api/#quote\n 4:30am-8pm ET Mon-Fri\n\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m77"} {"signature": "def quoteDF(symbol, token='', version=''):", "body": "q = quote(symbol, token, version)if q:df = pd.io.json.json_normalize(q)_toDatetime(df)_reindex(df, '')else:df = pd.DataFrame()return df", "docstring": "Get quote for ticker\n\n https://iexcloud.io/docs/api/#quote\n 4:30am-8pm ET Mon-Fri\n\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m78"} {"signature": "def relevant(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "Same as peers\n\n https://iexcloud.io/docs/api/#relevant\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m79"} {"signature": "def relevantDF(symbol, token='', version=''):", "body": "df = pd.DataFrame(relevant(symbol, token, version))_toDatetime(df)return df", "docstring": "Same as peers\n\n https://iexcloud.io/docs/api/#relevant\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m80"} {"signature": "def sectorPerformance(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "This returns an array of each sector and performance for the current trading day. Performance is based on each sector ETF.\n\n https://iexcloud.io/docs/api/#sector-performance\n 8am-5pm ET Mon-Fri\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m81"} {"signature": "def sectorPerformanceDF(token='', version=''):", "body": "df = pd.DataFrame(sectorPerformance(token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This returns an array of each sector and performance for the current trading day. Performance is based on each sector ETF.\n\n https://iexcloud.io/docs/api/#sector-performance\n 8am-5pm ET Mon-Fri\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m82"} {"signature": "def splits(symbol, timeframe='', token='', version=''):", "body": "_raiseIfNotStr(symbol)if timeframe not in _TIMEFRAME_DIVSPLIT:raise PyEXception('' % str(_TIMEFRAME_DIVSPLIT))return _getJson('' + symbol + '' + timeframe, token, version)", "docstring": "Stock split history\n\n https://iexcloud.io/docs/api/#splits\n Updated at 9am UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m83"} {"signature": "def _splitsToDF(s):", "body": "df = pd.DataFrame(s)_toDatetime(df)_reindex(df, '')return df", "docstring": "internal", "id": "f2330:m84"} {"signature": "def splitsDF(symbol, timeframe='', token='', version=''):", "body": "s = splits(symbol, timeframe, token, version)df = _splitsToDF(s)return df", "docstring": "Stock split history\n\n https://iexcloud.io/docs/api/#splits\n Updated at 9am UTC every day\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m85"} {"signature": "def volumeByVenue(symbol, token='', version=''):", "body": "_raiseIfNotStr(symbol)return _getJson('' + symbol + '', token, version)", "docstring": "This returns 15 minute delayed and 30 day average consolidated volume percentage of a stock, by market.\n This call will always return 13 values, and will be sorted in ascending order by current day trading volume percentage.\n\n https://iexcloud.io/docs/api/#volume-by-venue\n Updated during regular market hours 9:30am-4pm ET\n\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m86"} {"signature": "def volumeByVenueDF(symbol, token='', version=''):", "body": "df = pd.DataFrame(volumeByVenue(symbol, token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This returns 15 minute delayed and 30 day average consolidated volume percentage of a stock, by market.\n This call will always return 13 values, and will be sorted in ascending order by current day trading volume percentage.\n\n https://iexcloud.io/docs/api/#volume-by-venue\n Updated during regular market hours 9:30am-4pm ET\n\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m87"} {"signature": "def threshold(date=None, token='', version=''):", "body": "if date:date = _strOrDate(date)return _getJson('' + date, token, version)return _getJson('', token, version)", "docstring": "The following are IEX-listed securities that have an aggregate fail to deliver position for five consecutive settlement days at a registered clearing agency, totaling 10,000 shares or more and equal to at least 0.5% of the issuer\u2019s total shares outstanding (i.e., \u201cthreshold securities\u201d).\n The report data will be published to the IEX website daily at 8:30 p.m. ET with data for that trading day.\n\n https://iexcloud.io/docs/api/#listed-regulation-sho-threshold-securities-list-in-dev\n\n Args:\n date (datetime); Effective Datetime\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m88"} {"signature": "def thresholdDF(date=None, token='', version=''):", "body": "df = pd.DataFrame(threshold(date, token, version))_toDatetime(df)return df", "docstring": "The following are IEX-listed securities that have an aggregate fail to deliver position for five consecutive settlement days at a registered clearing agency, totaling 10,000 shares or more and equal to at least 0.5% of the issuer\u2019s total shares outstanding (i.e., \u201cthreshold securities\u201d).\n The report data will be published to the IEX website daily at 8:30 p.m. ET with data for that trading day.\n\n https://iexcloud.io/docs/api/#listed-regulation-sho-threshold-securities-list-in-dev\n\n Args:\n date (datetime); Effective Datetime\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m89"} {"signature": "def shortInterest(symbol, date=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if date:date = _strOrDate(date)return _getJson('' + symbol + '' + date, token, version)return _getJson('' + symbol + '', token, version)", "docstring": "The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.\n\n The report data will be published daily at 4:00pm ET.\n\n https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev\n\n Args:\n symbol (string); Ticker to request\n date (datetime); Effective Datetime\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m90"} {"signature": "def shortInterestDF(symbol, date=None, token='', version=''):", "body": "df = pd.DataFrame(shortInterest(symbol, date, token, version))_toDatetime(df)return df", "docstring": "The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.\n\n The report data will be published daily at 4:00pm ET.\n\n https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev\n\n Args:\n symbol (string); Ticker to request\n date (datetime); Effective Datetime\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m91"} {"signature": "def marketShortInterest(date=None, token='', version=''):", "body": "if date:date = _strOrDate(date)return _getJson('' + date, token, version)return _getJson('', token, version)", "docstring": "The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.\n\n The report data will be published daily at 4:00pm ET.\n\n https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev\n\n Args:\n date (datetime); Effective Datetime\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2330:m92"} {"signature": "def marketShortInterestDF(date=None, token='', version=''):", "body": "df = pd.DataFrame(marketShortInterest(date, token, version))_toDatetime(df)return df", "docstring": "The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.\n\n The report data will be published daily at 4:00pm ET.\n\n https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev\n\n Args:\n date (datetime); Effective Datetime\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2330:m93"} {"signature": "def exchanges(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "Returns an array of U.S. exchanges.\n\n https://iexcloud.io/docs/api/#u-s-exchanges\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2331:m0"} {"signature": "def exchangesDF(token='', version=''):", "body": "return pd.DataFrame(exchanges())", "docstring": "Returns an array of U.S. exchanges.\n\n https://iexcloud.io/docs/api/#u-s-exchanges\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2331:m1"} {"signature": "def calendar(type='', direction='', last=, startDate=None, token='', version=''):", "body": "if startDate:startDate = _strOrDate(startDate)return _getJson(''.format(type=type, direction=direction, last=last, date=startDate), token, version)return _getJson('' + type + '' + direction + '' + str(last), token, version)", "docstring": "This call allows you to fetch a number of trade dates or holidays from a given date. For example, if you want the next trading day, you would call /ref-data/us/dates/trade/next/1.\n\n https://iexcloud.io/docs/api/#u-s-exchanges\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n type (string); \"holiday\" or \"trade\"\n direction (string); \"next\" or \"last\"\n last (int); number to move in direction\n startDate (date); start date for next or last, YYYYMMDD\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2331:m2"} {"signature": "def calendarDF(type='', direction='', last=, startDate=None, token='', version=''):", "body": "dat = pd.DataFrame(calendar(type, direction, last, startDate, token, version))_toDatetime(dat)return dat", "docstring": "This call allows you to fetch a number of trade dates or holidays from a given date. For example, if you want the next trading day, you would call /ref-data/us/dates/trade/next/1.\n\n https://iexcloud.io/docs/api/#u-s-exchanges\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n type (string); \"holiday\" or \"trade\"\n direction (string); \"next\" or \"last\"\n last (int); number to move in direction\n startDate (date); start date for next or last, YYYYMMDD\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2331:m3"} {"signature": "def holidays(direction='', last=, startDate=None, token='', version=''):", "body": "return calendar('', direction, last, startDate, token, version)", "docstring": "This call allows you to fetch a number of trade dates or holidays from a given date. For example, if you want the next trading day, you would call /ref-data/us/dates/trade/next/1.\n\n https://iexcloud.io/docs/api/#u-s-exchanges\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n direction (string); \"next\" or \"last\"\n last (int); number to move in direction\n startDate (date); start date for next or last, YYYYMMDD\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2331:m4"} {"signature": "def holidaysDF(direction='', last=, startDate=None, token='', version=''):", "body": "return calendarDF('', direction, last, startDate, token, version)", "docstring": "This call allows you to fetch a number of trade dates or holidays from a given date. For example, if you want the next trading day, you would call /ref-data/us/dates/trade/next/1.\n\n https://iexcloud.io/docs/api/#u-s-exchanges\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n direction (string); \"next\" or \"last\"\n last (int); number to move in direction\n startDate (date); start date for next or last, YYYYMMDD\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2331:m5"} {"signature": "def symbols(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "This call returns an array of symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2331:m6"} {"signature": "def iexSymbols(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "This call returns an array of symbols the Investors Exchange supports for trading.\n This list is updated daily as of 7:45 a.m. ET. Symbols may be added or removed by the Investors Exchange after the list was produced.\n\n https://iexcloud.io/docs/api/#iex-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2331:m7"} {"signature": "def mutualFundSymbols(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "This call returns an array of mutual fund symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#mutual-fund-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2331:m8"} {"signature": "def otcSymbols(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "This call returns an array of OTC symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#otc-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2331:m9"} {"signature": "def internationalSymbols(region='', exchange='', token='', version=''):", "body": "if region:return _getJson(''.format(region=region), token, version)elif exchange:return _getJson(''.format(exchange=exchange), token, version)return _getJson('', token, version)", "docstring": "This call returns an array of international symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#international-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n region (string); region, 2 letter case insensitive string of country codes using ISO 3166-1 alpha-2\n exchange (string): Case insensitive string of Exchange using IEX Supported Exchanges list\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2331:m10"} {"signature": "def symbolsDF(token='', version=''):", "body": "df = pd.DataFrame(symbols(token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This call returns an array of symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dataframe: result", "id": "f2331:m11"} {"signature": "def iexSymbolsDF(token='', version=''):", "body": "df = pd.DataFrame(iexSymbols(token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This call returns an array of symbols the Investors Exchange supports for trading.\n This list is updated daily as of 7:45 a.m. ET. Symbols may be added or removed by the Investors Exchange after the list was produced.\n\n https://iexcloud.io/docs/api/#iex-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2331:m12"} {"signature": "def mutualFundSymbolsDF(token='', version=''):", "body": "df = pd.DataFrame(mutualFundSymbols(token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This call returns an array of mutual fund symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#mutual-fund-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2331:m13"} {"signature": "def otcSymbolsDF(token='', version=''):", "body": "df = pd.DataFrame(otcSymbols(token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This call returns an array of OTC symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#otc-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2331:m14"} {"signature": "def internationalSymbolsDF(region='', exchange='', token='', version=''):", "body": "df = pd.DataFrame(internationalSymbols(region, exchange, token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "This call returns an array of international symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#international-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n region (string); region, 2 letter case insensitive string of country codes using ISO 3166-1 alpha-2\n exchange (string): Case insensitive string of Exchange using IEX Supported Exchanges list\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2331:m15"} {"signature": "def symbolsList(token='', version=''):", "body": "return symbolsDF(token, version).index.tolist()", "docstring": "This call returns an array of symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n list: result", "id": "f2331:m16"} {"signature": "def iexSymbolsList(token='', version=''):", "body": "return iexSymbolsDF(token, version).index.tolist()", "docstring": "This call returns an array of symbols the Investors Exchange supports for trading.\n This list is updated daily as of 7:45 a.m. ET. Symbols may be added or removed by the Investors Exchange after the list was produced.\n\n https://iexcloud.io/docs/api/#iex-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n list: result", "id": "f2331:m17"} {"signature": "def mutualFundSymbolsList(token='', version=''):", "body": "return mutualFundSymbolsDF(token, version).index.tolist()", "docstring": "This call returns an array of mutual fund symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#mutual-fund-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n List: result", "id": "f2331:m18"} {"signature": "def otcSymbolsList(token='', version=''):", "body": "return otcSymbolsDF(token, version).index.tolist()", "docstring": "This call returns an array of OTC symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#otc-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n list: result", "id": "f2331:m19"} {"signature": "def internationalSymbolsList(region='', exchange='', token='', version=''):", "body": "return internationalSymbolsDF(region, exchange, token, version).index.tolist()", "docstring": "This call returns an array of international symbols that IEX Cloud supports for API calls.\n\n https://iexcloud.io/docs/api/#international-symbols\n 8am, 9am, 12pm, 1pm UTC daily\n\n Args:\n region (string); region, 2 letter case insensitive string of country codes using ISO 3166-1 alpha-2\n exchange (string): Case insensitive string of Exchange using IEX Supported Exchanges list\n token (string); Access token\n version (string); API version\n\n Returns:\n list: result", "id": "f2331:m20"} {"signature": "@deprecated(details='')def corporateActions(date=None, token='', version=''):", "body": "if date:date = _strOrDate(date)return _getJson('' + date, token, version)return _getJson('', token, version)", "docstring": "Args:\n date (datetime): Effective date\n token (string); Access token\n version (string); API version\n\nReturns:\n dict: result", "id": "f2331:m21"} {"signature": "@deprecated(details='')def corporateActionsDF(date=None, token='', version=''):", "body": "df = pd.DataFrame(corporateActions(date, token, version))_toDatetime(df)return df", "docstring": "Args:\n date (datetime): Effective date\n token (string); Access token\n version (string); API version\n\nReturns:\n DataFrame: result", "id": "f2331:m22"} {"signature": "@deprecated(details='')def dividends(date=None, token='', version=''):", "body": "if date:date = _strOrDate(date)return _getJson('' + date, token, version)return _getJson('', token, version)", "docstring": "Args:\n date (datetime): Effective date\n token (string); Access token\n version (string); API version\n\nReturns:\n dict: result", "id": "f2331:m23"} {"signature": "@deprecated(details='')def dividendsDF(date=None, token='', version=''):", "body": "df = pd.DataFrame(dividends(date, token, version))_toDatetime(df)return df", "docstring": "Args:\n date (datetime): Effective date\n token (string); Access token\n version (string); API version\n\nReturns:\n DataFrame: result", "id": "f2331:m24"} {"signature": "@deprecated(details='')def nextDayExtDate(date=None, token='', version=''):", "body": "if date:date = _strOrDate(date)return _getJson('' + date, token, version)return _getJson('', token, version)", "docstring": "Args:\n date (datetime): Effective date\n token (string); Access token\n version (string); API version\n\nReturns:\n dict: result", "id": "f2331:m25"} {"signature": "@deprecated(details='')def nextDayExtDateDF(date=None, token='', version=''):", "body": "df = pd.DataFrame(nextDayExtDate(date, token, version))_toDatetime(df)return df", "docstring": "Args:\n date (datetime): Effective date\n token (string); Access token\n version (string); API version\n\nReturns:\n DataFrame: result", "id": "f2331:m26"} {"signature": "@deprecated(details='')def directory(date=None, token='', version=''):", "body": "if date:date = _strOrDate(date)return _getJson('' + date, token, version)return _getJson('', token, version)", "docstring": "Args:\n date (datetime): Effective date\n token (string); Access token\n version (string); API version\n\nReturns:\n dict: result", "id": "f2331:m27"} {"signature": "@deprecated(details='')def directoryDF(date=None, token='', version=''):", "body": "df = pd.DataFrame(directory(date, token, version))_toDatetime(df)return df", "docstring": "Args:\n date (datetime): Effective date\n token (string); Access token\n version (string); API version\n\nReturns:\n dict: result", "id": "f2331:m28"} {"signature": "def topsSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "TOPS provides IEX\u2019s aggregated best quoted bid and offer position in near real time for all securities on IEX\u2019s displayed limit order book.\n TOPS is ideal for developers needing both quote and trade data.\n\n https://iexcloud.io/docs/api/#tops\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m1"} {"signature": "def lastSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.\n Last is ideal for developers that need a lightweight stock quote.\n\n https://iexcloud.io/docs/api/#last\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m2"} {"signature": "def deepSSE(symbols=None, channels=None, on_data=None, token='', version=''):", "body": "symbols = _strCommaSeparatedString(symbols)channels = channels or []if isinstance(channels, str):if channels not in DeepChannelsSSE.options():raise PyEXception('', type(channels))channels = [channels]elif isinstance(channels, DeepChannelsSSE):channels = [channels.value]elif isinstance(channels, list):for i, c in enumerate(channels):if isinstance(c, DeepChannelsSSE):channels[i] = c.valueelif not isinstance(c, str) or isinstance(c, str) and c not in DeepChannelsSSE.options():raise PyEXception('', c)channels = _strCommaSeparatedString(channels)return _streamSSE(_SSE_DEEP_URL_PREFIX.format(symbols=symbols, channels=channels, token=token, version=version), on_data)", "docstring": "DEEP is used to receive real-time depth of book quotations direct from IEX.\n The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side,\n and do not indicate the size or number of individual orders at any price level.\n Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP.\n\n DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported.\n\n https://iexcloud.io/docs/api/#deep\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m3"} {"signature": "def tradesSSE(symbols=None, on_data=None, token='', version=''):", "body": "symbols = _strCommaSeparatedString(symbols)return _streamSSE(_SSE_DEEP_URL_PREFIX.format(symbols=symbols, channels='', token=token, version=version), on_data)", "docstring": "Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.\n\n https://iexcloud.io/docs/api/#deep-trades\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m4"} {"signature": "def auctionSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "DEEP broadcasts an Auction Information Message every one second between the Lock-in Time and the auction match for Opening and Closing Auctions,\n and during the Display Only Period for IPO, Halt, and Volatility Auctions. Only IEX listed securities are eligible for IEX Auctions.\n\n https://iexcloud.io/docs/api/#deep-auction\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m5"} {"signature": "def bookSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "Book shows IEX\u2019s bids and asks for given symbols.\n\n https://iexcloud.io/docs/api/#deep-book\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m6"} {"signature": "def opHaltStatusSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "The Exchange may suspend trading of one or more securities on IEX for operational reasons and indicates such operational halt using the Operational halt status message.\n\n IEX disseminates a full pre-market spin of Operational halt status messages indicating the operational halt status of all securities.\n In the spin, IEX will send out an Operational Halt Message with \u201cN\u201d (Not operationally halted on IEX) for all securities that are eligible for trading at the start of the Pre-Market Session.\n If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System at the start of the Pre-Market Session.\n\n After the pre-market spin, IEX will use the Operational halt status message to relay changes in operational halt status for an individual security.\n\n https://iexcloud.io/docs/api/#deep-operational-halt-status\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m7"} {"signature": "def officialPriceSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "The Official Price message is used to disseminate the IEX Official Opening and Closing Prices.\n\n These messages will be provided only for IEX Listed Securities.\n\n https://iexcloud.io/docs/api/#deep-official-price\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m8"} {"signature": "def securityEventSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs\n\n https://iexcloud.io/docs/api/#deep-security-event\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m9"} {"signature": "def ssrStatusSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "In association with Rule 201 of Regulation SHO, the Short Sale Price Test Message is used to indicate when a short sale price test restriction is in effect for a security.\n\n IEX disseminates a full pre-market spin of Short sale price test status messages indicating the Rule 201 status of all securities. After the pre-market spin, IEX will use the Short sale price test status message in the event of an intraday status change.\n\n The IEX Trading System will process orders based on the latest short sale price test restriction status.\n\n https://iexcloud.io/docs/api/#deep-short-sale-price-test-status\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m10"} {"signature": "def systemEventSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "The System event message is used to indicate events that apply to the market or the data feed.\n\n There will be a single message disseminated per channel for each System Event type within a given trading session.\n\n https://iexcloud.io/docs/api/#deep-system-event\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m11"} {"signature": "def tradeBreaksSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.\n\n https://iexcloud.io/docs/api/#deep-trades\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m12"} {"signature": "def tradingStatusSSE(symbols=None, on_data=None, token='', version=''):", "body": "return _runSSE('', symbols, on_data, token, version)", "docstring": "The Trading status message is used to indicate the current trading status of a security.\n For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.\n For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.\n\n IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.\n In the spin, IEX will send out a Trading status message with \u201cT\u201d (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.\n If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.\n\n After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:\n\n Halted\n Paused*\n Released into an Order Acceptance Period*\n Released for trading\n *The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.\n\n https://iexcloud.io/docs/api/#deep-trading-status\n\n Args:\n symbols (string); Tickers to request\n on_data (function): Callback on data\n token (string); Access token\n version (string); API version", "id": "f2332:m13"} {"signature": "def tops(symbols=None, token='', version=''):", "body": "symbols = _strToList(symbols)if symbols:return _getJson('' + ''.join(symbols) + '', token, version)return _getJson('', token, version)", "docstring": "TOPS provides IEX\u2019s aggregated best quoted bid and offer position in near real time for all securities on IEX\u2019s displayed limit order book.\n TOPS is ideal for developers needing both quote and trade data.\n\n https://iexcloud.io/docs/api/#tops\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m0"} {"signature": "def topsDF(symbols=None, token='', version=''):", "body": "df = pd.io.json.json_normalize(tops(symbols, token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "TOPS provides IEX\u2019s aggregated best quoted bid and offer position in near real time for all securities on IEX\u2019s displayed limit order book.\n TOPS is ideal for developers needing both quote and trade data.\n\n https://iexcloud.io/docs/api/#tops\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m1"} {"signature": "def last(symbols=None, token='', version=''):", "body": "symbols = _strToList(symbols)if symbols:return _getJson('' + ''.join(symbols) + '', token, version)return _getJson('', token, version)", "docstring": "Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.\n Last is ideal for developers that need a lightweight stock quote.\n\n https://iexcloud.io/docs/api/#last\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m2"} {"signature": "def lastDF(symbols=None, token='', version=''):", "body": "df = pd.io.json.json_normalize(last(symbols, token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.\n Last is ideal for developers that need a lightweight stock quote.\n\n https://iexcloud.io/docs/api/#last\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m3"} {"signature": "def deep(symbol=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if symbol:return _getJson('' + symbol, token, version)return _getJson('', token, version)", "docstring": "DEEP is used to receive real-time depth of book quotations direct from IEX.\n The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side,\n and do not indicate the size or number of individual orders at any price level.\n Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP.\n\n DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported.\n\n https://iexcloud.io/docs/api/#deep\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m4"} {"signature": "def deepDF(symbol=None, token='', version=''):", "body": "df = pd.io.json.json_normalize(deep(symbol, token, version))_toDatetime(df)return df", "docstring": "DEEP is used to receive real-time depth of book quotations direct from IEX.\n The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side,\n and do not indicate the size or number of individual orders at any price level.\n Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP.\n\n DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported.\n\n https://iexcloud.io/docs/api/#deep\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m5"} {"signature": "def auction(symbol=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if symbol:return _getJson('' + symbol, token, version)return _getJson('', token, version)", "docstring": "DEEP broadcasts an Auction Information Message every one second between the Lock-in Time and the auction match for Opening and Closing Auctions,\n and during the Display Only Period for IPO, Halt, and Volatility Auctions. Only IEX listed securities are eligible for IEX Auctions.\n\n https://iexcloud.io/docs/api/#deep-auction\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m6"} {"signature": "def auctionDF(symbol=None, token='', version=''):", "body": "df = pd.io.json.json_normalize(auction(symbol, token, version))_toDatetime(df)return df", "docstring": "DEEP broadcasts an Auction Information Message every one second between the Lock-in Time and the auction match for Opening and Closing Auctions,\n and during the Display Only Period for IPO, Halt, and Volatility Auctions. Only IEX listed securities are eligible for IEX Auctions.\n\n https://iexcloud.io/docs/api/#deep-auction\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m7"} {"signature": "def book(symbol=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if symbol:return _getJson('' + symbol, token, version)return _getJson('', token, version)", "docstring": "Book shows IEX\u2019s bids and asks for given symbols.\n\n https://iexcloud.io/docs/api/#deep-book\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m8"} {"signature": "def bookDF(symbol=None, token='', version=''):", "body": "x = book(symbol, token, version)data = []for key in x:d = x[key]d[''] = keydata.append(d)df = pd.io.json.json_normalize(data)_toDatetime(df)return df", "docstring": "Book shows IEX\u2019s bids and asks for given symbols.\n\n https://iexcloud.io/docs/api/#deep-book\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m9"} {"signature": "def opHaltStatus(symbol=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if symbol:return _getJson('' + symbol, token, version)return _getJson('', token, version)", "docstring": "The Exchange may suspend trading of one or more securities on IEX for operational reasons and indicates such operational halt using the Operational halt status message.\n\n IEX disseminates a full pre-market spin of Operational halt status messages indicating the operational halt status of all securities.\n In the spin, IEX will send out an Operational Halt Message with \u201cN\u201d (Not operationally halted on IEX) for all securities that are eligible for trading at the start of the Pre-Market Session.\n If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System at the start of the Pre-Market Session.\n\n After the pre-market spin, IEX will use the Operational halt status message to relay changes in operational halt status for an individual security.\n\n https://iexcloud.io/docs/api/#deep-operational-halt-status\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m10"} {"signature": "def opHaltStatusDF(symbol=None, token='', version=''):", "body": "x = opHaltStatus(symbol, token, version)data = []for key in x:d = x[key]d[''] = keydata.append(d)df = pd.DataFrame(data)_toDatetime(df)return df", "docstring": "The Exchange may suspend trading of one or more securities on IEX for operational reasons and indicates such operational halt using the Operational halt status message.\n\n IEX disseminates a full pre-market spin of Operational halt status messages indicating the operational halt status of all securities.\n In the spin, IEX will send out an Operational Halt Message with \u201cN\u201d (Not operationally halted on IEX) for all securities that are eligible for trading at the start of the Pre-Market Session.\n If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System at the start of the Pre-Market Session.\n\n After the pre-market spin, IEX will use the Operational halt status message to relay changes in operational halt status for an individual security.\n\n https://iexcloud.io/docs/api/#deep-operational-halt-status\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m11"} {"signature": "def officialPrice(symbol=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if symbol:return _getJson('' + symbol, token, version)return _getJson('', token, version)", "docstring": "The Official Price message is used to disseminate the IEX Official Opening and Closing Prices.\n\n These messages will be provided only for IEX Listed Securities.\n\n https://iexcloud.io/docs/api/#deep-official-price\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m12"} {"signature": "def officialPriceDF(symbol=None, token='', version=''):", "body": "df = pd.io.json.json_normalize(officialPrice(symbol, token, version))_toDatetime(df)return df", "docstring": "The Official Price message is used to disseminate the IEX Official Opening and Closing Prices.\n\n These messages will be provided only for IEX Listed Securities.\n\n https://iexcloud.io/docs/api/#deep-official-price\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m13"} {"signature": "def securityEvent(symbol=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if symbol:return _getJson('' + symbol, token, version)return _getJson('', token, version)", "docstring": "The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs\n\n https://iexcloud.io/docs/api/#deep-security-event\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m14"} {"signature": "def securityEventDF(symbol=None, token='', version=''):", "body": "x = securityEvent(symbol, token, version)data = []for key in x:d = x[key]d[''] = keydata.append(d)df = pd.DataFrame(data)_toDatetime(df)return df", "docstring": "The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs\n\n https://iexcloud.io/docs/api/#deep-security-event\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m15"} {"signature": "def ssrStatus(symbol=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if symbol:return _getJson('' + symbol, token, version)return _getJson('', token, version)", "docstring": "In association with Rule 201 of Regulation SHO, the Short Sale Price Test Message is used to indicate when a short sale price test restriction is in effect for a security.\n\n IEX disseminates a full pre-market spin of Short sale price test status messages indicating the Rule 201 status of all securities.\n After the pre-market spin, IEX will use the Short sale price test status message in the event of an intraday status change.\n\n The IEX Trading System will process orders based on the latest short sale price test restriction status.\n\n https://iexcloud.io/docs/api/#deep-short-sale-price-test-status\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m16"} {"signature": "def ssrStatusDF(symbol=None, token='', version=''):", "body": "x = ssrStatus(symbol, token, version)data = []for key in x:d = x[key]d[''] = keydata.append(d)df = pd.DataFrame(data)_toDatetime(df)return df", "docstring": "In association with Rule 201 of Regulation SHO, the Short Sale Price Test Message is used to indicate when a short sale price test restriction is in effect for a security.\n\n IEX disseminates a full pre-market spin of Short sale price test status messages indicating the Rule 201 status of all securities.\n After the pre-market spin, IEX will use the Short sale price test status message in the event of an intraday status change.\n\n The IEX Trading System will process orders based on the latest short sale price test restriction status.\n\n https://iexcloud.io/docs/api/#deep-short-sale-price-test-status\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m17"} {"signature": "def systemEvent(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "The System event message is used to indicate events that apply to the market or the data feed.\n\n There will be a single message disseminated per channel for each System Event type within a given trading session.\n\n https://iexcloud.io/docs/api/#deep-system-event\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m18"} {"signature": "def systemEventDF(token='', version=''):", "body": "df = pd.io.json.json_normalize(systemEvent(token, version))_toDatetime(df)return df", "docstring": "The System event message is used to indicate events that apply to the market or the data feed.\n\n There will be a single message disseminated per channel for each System Event type within a given trading session.\n\n https://iexcloud.io/docs/api/#deep-system-event\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m19"} {"signature": "def trades(symbol=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if symbol:return _getJson('' + symbol, token, version)return _getJson('', token, version)", "docstring": "Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.\n\n https://iexcloud.io/docs/api/#deep-trades\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m20"} {"signature": "def tradesDF(symbol=None, token='', version=''):", "body": "x = trades(symbol, token, version)data = []for key in x:dat = x[key]for d in dat:d[''] = keydata.append(d)df = pd.DataFrame(data)_toDatetime(df)return df", "docstring": "Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.\n\n https://iexcloud.io/docs/api/#deep-trades\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m21"} {"signature": "def tradeBreak(symbol=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if symbol:return _getJson('' + symbol, token, version)return _getJson('', token, version)", "docstring": "Trade break messages are sent when an execution on IEX is broken on that same trading day. Trade breaks are rare and only affect applications that rely upon IEX execution based data.\n\n https://iexcloud.io/docs/api/#deep-trade-break\n\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m22"} {"signature": "def tradeBreakDF(symbol=None, token='', version=''):", "body": "df = pd.io.json.json_normalize(tradeBreak(symbol, token, version))_toDatetime(df)return df", "docstring": "Trade break messages are sent when an execution on IEX is broken on that same trading day. Trade breaks are rare and only affect applications that rely upon IEX execution based data.\n\n https://iexcloud.io/docs/api/#deep-trade-break\n\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m23"} {"signature": "def tradingStatus(symbol=None, token='', version=''):", "body": "_raiseIfNotStr(symbol)if symbol:return _getJson('' + symbol, token, version)return _getJson('', token, version)", "docstring": "The Trading status message is used to indicate the current trading status of a security.\n For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.\n For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.\n\n IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.\n In the spin, IEX will send out a Trading status message with \u201cT\u201d (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.\n If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.\n\n\n After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:\n\n Halted\n Paused*\n Released into an Order Acceptance Period*\n Released for trading\n *The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.\n\n https://iexcloud.io/docs/api/#deep-trading-status\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2333:m24"} {"signature": "def tradingStatusDF(symbol=None, token='', version=''):", "body": "x = tradingStatus(symbol, token, version)data = []for key in x:d = x[key]d[''] = keydata.append(d)df = pd.DataFrame(data)_toDatetime(df)return df", "docstring": "The Trading status message is used to indicate the current trading status of a security.\n For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.\n For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.\n\n IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.\n In the spin, IEX will send out a Trading status message with \u201cT\u201d (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.\n If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.\n\n After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:\n\n Halted\n Paused*\n Released into an Order Acceptance Period*\n Released for trading\n *The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.\n\n https://iexcloud.io/docs/api/#deep-trading-status\n\n Args:\n symbol (string); Ticker to request\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2333:m25"} {"signature": "def hist(date=None, token='', version=''):", "body": "if date is None:return _getJson('', token, version)else:date = _strOrDate(date)return _getJson('' + date, token, version)", "docstring": "Args:\n date (datetime); Effective date\n token (string); Access token\n version (string); API version\n\nReturns:\n dict: result", "id": "f2333:m26"} {"signature": "def histDF(date=None, token='', version=''):", "body": "x = hist(date, token, version)data = []for key in x:dat = x[key]for item in dat:item[''] = keydata.append(item)df = pd.DataFrame(data)_toDatetime(df)_reindex(df, '')return df", "docstring": "https://iextrading.com/developer/docs/#hist", "id": "f2333:m27"} {"signature": "@deprecated(details='')def topsWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)if symbols:sendinit = ('', ''.join(symbols))return _stream(_wsURL(''), sendinit, on_data)return _stream(_wsURL(''), on_data=on_data)", "docstring": "https://iextrading.com/developer/docs/#tops", "id": "f2334:m0"} {"signature": "@deprecated(details='')def lastWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)if symbols:sendinit = ('', ''.join(symbols))return _stream(_wsURL(''), sendinit, on_data)return _stream(_wsURL(''), on_data=on_data)", "docstring": "https://iextrading.com/developer/docs/#last", "id": "f2334:m1"} {"signature": "@deprecated(details='')def deepWS(symbols=None, channels=None, on_data=None):", "body": "symbols = _strToList(symbols)channels = channels or []if isinstance(channels, str):if channels not in DeepChannels.options():raise PyEXception('', type(channels))channels = [channels]elif isinstance(channels, DeepChannels):channels = [channels.value]elif isinstance(channels, list):for i, c in enumerate(channels):if isinstance(c, DeepChannels):channels[i] = c.valueelif not isinstance(c, str) or isinstance(c, str) and c not in DeepChannels.options():raise PyEXception('', c)sendinit = ({'': symbols, '': channels},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#deep", "id": "f2334:m2"} {"signature": "@deprecated(details='')def bookWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)sendinit = ({'': symbols, '': ['']},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#book51", "id": "f2334:m3"} {"signature": "@deprecated(details='')def tradesWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)sendinit = ({'': symbols, '': ['']},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#trades", "id": "f2334:m4"} {"signature": "@deprecated(details='')def systemEventWS(on_data=None):", "body": "sendinit = ({'': ['']},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#system-event", "id": "f2334:m5"} {"signature": "@deprecated(details='')def tradingStatusWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)sendinit = ({'': symbols, '': ['']},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#trading-status", "id": "f2334:m6"} {"signature": "@deprecated(details='')def opHaltStatusWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)sendinit = ({'': symbols, '': ['']},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#operational-halt-status", "id": "f2334:m7"} {"signature": "@deprecated(details='')def ssrStatusWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)sendinit = ({'': symbols, '': ['']},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#short-sale-price-test-status", "id": "f2334:m8"} {"signature": "@deprecated(details='')def securityEventWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)sendinit = ({'': symbols, '': ['']},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#security-event", "id": "f2334:m9"} {"signature": "@deprecated(details='')def tradeBreakWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)sendinit = ({'': symbols, '': ['']},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#trade-break", "id": "f2334:m10"} {"signature": "@deprecated(details='')def auctionWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)sendinit = ({'': symbols, '': ['']},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#auction", "id": "f2334:m11"} {"signature": "@deprecated(details='')def officialPriceWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)sendinit = ({'': symbols, '': ['']},)return _stream(_wsURL(''), sendinit, on_data)", "docstring": "https://iextrading.com/developer/docs/#official-price", "id": "f2334:m12"} {"signature": "@deprecated(details='')def markets(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "https://iextrading.com/developer/docs/#intraday", "id": "f2337:m0"} {"signature": "@deprecated(details='')def marketsDF(token='', version=''):", "body": "df = pd.DataFrame(markets(token, version))_toDatetime(df)return df", "docstring": "https://iextrading.com/developer/docs/#intraday", "id": "f2337:m1"} {"signature": "def _getJson(url, token='', version=''):", "body": "if token:return _getJsonIEXCloud(url, token, version)return _getJsonOrig(url)", "docstring": "for backwards compat, accepting token and version but ignoring", "id": "f2338:m0"} {"signature": "def _getJsonOrig(url):", "body": "url = _URL_PREFIX + urlresp = requests.get(urlparse(url).geturl(), proxies=_PYEX_PROXIES)if resp.status_code == :return resp.json()raise PyEXception('' % resp.status_code, resp.text)", "docstring": "internal", "id": "f2338:m1"} {"signature": "def _getJsonIEXCloud(url, token='', version=''):", "body": "url = _URL_PREFIX2.format(version=version) + urlresp = requests.get(urlparse(url).geturl(), proxies=_PYEX_PROXIES, params={'': token})if resp.status_code == :return resp.json()raise PyEXception('' % resp.status_code, resp.text)", "docstring": "for iex cloud", "id": "f2338:m2"} {"signature": "def _wsURL(url):", "body": "return '' + url", "docstring": "internal", "id": "f2338:m3"} {"signature": "def _strToList(st):", "body": "if isinstance(st, string_types):return [st]return st", "docstring": "internal", "id": "f2338:m4"} {"signature": "def _strCommaSeparatedString(st):", "body": "return ''.join(_strToList(st))", "docstring": "internal", "id": "f2338:m5"} {"signature": "def _strOrDate(st):", "body": "if isinstance(st, string_types):return stelif isinstance(st, datetime):return st.strftime('')raise PyEXception('', str(st))", "docstring": "internal", "id": "f2338:m6"} {"signature": "def _raiseIfNotStr(s):", "body": "if s is not None and not isinstance(s, string_types):raise PyEXception('' % str(type(s)))", "docstring": "internal", "id": "f2338:m7"} {"signature": "def _tryJson(data, raw=True):", "body": "if raw:return datatry:return json.loads(data)except ValueError:return data", "docstring": "internal", "id": "f2338:m8"} {"signature": "def _stream(url, sendinit=None, on_data=print):", "body": "cl = WSClient(url, sendinit=sendinit, on_data=on_data)return cl", "docstring": "internal", "id": "f2338:m9"} {"signature": "def _streamSSE(url, on_data=print, accrue=False):", "body": "messages = SSEClient(url)if accrue:ret = []for msg in messages:data = msg.dataon_data(json.loads(data))if accrue:ret.append(msg)return ret", "docstring": "internal", "id": "f2338:m10"} {"signature": "def _reindex(df, col):", "body": "if col in df.columns:df.set_index(col, inplace=True)", "docstring": "internal", "id": "f2338:m11"} {"signature": "def _toDatetime(df, cols=None, tcols=None):", "body": "cols = cols or _STANDARD_DATE_FIELDStcols = tcols = _STANDARD_TIME_FIELDSfor col in cols:if col in df:df[col] = pd.to_datetime(df[col], infer_datetime_format=True, errors='')for tcol in tcols:if tcol in df:df[tcol] = pd.to_datetime(df[tcol], unit='', errors='')", "docstring": "internal", "id": "f2338:m12"} {"signature": "def setProxy(proxies=None):", "body": "global _PYEX_PROXIES_PYEX_PROXIES = proxies", "docstring": "Set proxies argument for requests\n\n Args:\n proxies (dict): Proxies to set", "id": "f2338:m13"} {"signature": "def __init__(self, addr, sendinit=None, on_data=None, on_open=None, on_close=None, raw=True):", "body": "self.addr = addrself.sendinit = sendiniton_data = on_data or printclass Namespace(BaseNamespace):def on_connect(self, *data):if on_open:on_open(_tryJson(data, raw))def on_disconnect(self, *data):if on_close:on_close(_tryJson(data, raw))def on_message(self, data):on_data(_tryJson(data, raw))self._Namespace = Namespace", "docstring": "addr: path to sio\nsendinit: tuple to emit\non_data, on_open, on_close: functions to call", "id": "f2338:c1:m0"} {"signature": "def stats(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "https://iexcloud.io/docs/api/#stats-intraday\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2339:m0"} {"signature": "def statsDF(token='', version=''):", "body": "df = pd.DataFrame(stats(token, version))_toDatetime(df)return df", "docstring": "https://iexcloud.io/docs/api/#stats-intraday\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2339:m1"} {"signature": "def recent(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "https://iexcloud.io/docs/api/#stats-recent\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2339:m2"} {"signature": "def recentDF(token='', version=''):", "body": "df = pd.DataFrame(recent(token, version))_toDatetime(df)_reindex(df, '')return df", "docstring": "https://iexcloud.io/docs/api/#stats-recent\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2339:m3"} {"signature": "def records(token='', version=''):", "body": "return _getJson('', token, version)", "docstring": "https://iexcloud.io/docs/api/#stats-records\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2339:m4"} {"signature": "def recordsDF(token='', version=''):", "body": "df = pd.DataFrame(records(token, version))_toDatetime(df)return df", "docstring": "https://iexcloud.io/docs/api/#stats-records\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2339:m5"} {"signature": "def summary(date=None, token='', version=''):", "body": "if date:if isinstance(date, str):return _getJson('' + date, token, version)elif isinstance(date, datetime):return _getJson('' + date.strftime(''), token, version)else:raise PyEXception(\"\" % str(type(date)), token, version)return _getJson('', token, version)", "docstring": "https://iexcloud.io/docs/api/#stats-historical-summary\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2339:m6"} {"signature": "def summaryDF(date=None, token='', version=''):", "body": "df = pd.DataFrame(summary(date, token, version))_toDatetime(df)return df", "docstring": "https://iexcloud.io/docs/api/#stats-historical-summary\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2339:m7"} {"signature": "def daily(date=None, last='', token='', version=''):", "body": "if date:date = _strOrDate(date)return _getJson('' + date, token, version)elif last:return _getJson('' + last, token, version)return _getJson('', token, version)", "docstring": "https://iexcloud.io/docs/api/#stats-historical-daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n dict: result", "id": "f2339:m8"} {"signature": "def dailyDF(date=None, last='', token='', version=''):", "body": "df = pd.DataFrame(daily(date, last, token, version))_toDatetime(df)return df", "docstring": "https://iexcloud.io/docs/api/#stats-historical-daily\n\n Args:\n token (string); Access token\n version (string); API version\n\n Returns:\n DataFrame: result", "id": "f2339:m9"} {"signature": "def get_config_value(name, fallback=None):", "body": "cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX)return cli_config.get('', name, fallback)", "docstring": "Gets a config by name.\n\n In the case where the config name is not found, will use fallback value.", "id": "f2344:m0"} {"signature": "def get_config_bool(name):", "body": "cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX)return cli_config.getboolean('', name, False)", "docstring": "Checks if a config value is set to a valid bool value.", "id": "f2344:m1"} {"signature": "def set_config_value(name, value):", "body": "cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX)cli_config.set_value('', name, value)", "docstring": "Set a config by name to a value.", "id": "f2344:m2"} {"signature": "def client_endpoint():", "body": "return get_config_value('', None)", "docstring": "Cluster HTTP gateway endpoint address and port, represented as a URL.", "id": "f2344:m3"} {"signature": "def security_type():", "body": "return get_config_value('', None)", "docstring": "The selected security type of client.", "id": "f2344:m4"} {"signature": "def set_cluster_endpoint(endpoint):", "body": "set_config_value('', endpoint)", "docstring": "Configure cluster endpoint", "id": "f2344:m5"} {"signature": "def no_verify_setting():", "body": "return get_config_bool('')", "docstring": "True to skip certificate SSL validation and verification", "id": "f2344:m6"} {"signature": "def set_no_verify(no_verify):", "body": "if no_verify:set_config_value('', '')else:set_config_value('', '')", "docstring": "Configure if cert verification should be skipped.", "id": "f2344:m7"} {"signature": "def ca_cert_info():", "body": "if get_config_bool(''):return get_config_value('', fallback=None)return None", "docstring": "CA certificate(s) path", "id": "f2344:m8"} {"signature": "def set_ca_cert(ca_path=None):", "body": "if ca_path:set_config_value('', ca_path)set_config_value('', '')else:set_config_value('', '')", "docstring": "Configure paths to CA cert(s).", "id": "f2344:m9"} {"signature": "def cert_info():", "body": "sec_type = security_type()if sec_type == '':return get_config_value('', fallback=None)if sec_type == '':cert_path = get_config_value('', fallback=None)key_path = get_config_value('', fallback=None)return cert_path, key_pathreturn None", "docstring": "Path to certificate related files, either a single file path or a\n tuple. In the case of no security, returns None.", "id": "f2344:m10"} {"signature": "def aad_cache():", "body": "return jsonpickle.decode(get_config_value('', fallback=None)),jsonpickle.decode(get_config_value('', fallback=None))", "docstring": "AAD token cache.", "id": "f2344:m11"} {"signature": "def set_aad_cache(token, cache):", "body": "set_config_value('', jsonpickle.encode(token))set_config_value('', jsonpickle.encode(cache))", "docstring": "Set AAD token cache.", "id": "f2344:m12"} {"signature": "def aad_metadata():", "body": "return get_config_value('', fallback=None),get_config_value('', fallback=None),get_config_value('', fallback=None)", "docstring": "AAD metadata.", "id": "f2344:m13"} {"signature": "def set_aad_metadata(uri, resource, client):", "body": "set_config_value('', uri)set_config_value('', resource)set_config_value('', client)", "docstring": "Set AAD metadata.", "id": "f2344:m14"} {"signature": "def set_auth(pem=None, cert=None, key=None, aad=False):", "body": "if any([cert, key]) and pem:raise ValueError('')if any([cert, key]) and not all([cert, key]):raise ValueError('')if pem:set_config_value('', '')set_config_value('', pem)elif cert or key:set_config_value('', '')set_config_value('', cert)set_config_value('', key)elif aad:set_config_value('', '')else:set_config_value('', '')", "docstring": "Set certificate usage paths", "id": "f2344:m15"} {"signature": "def select_arg_verify(endpoint, cert, key, pem, ca, aad, no_verify): ", "body": "if not (endpoint.lower().startswith('')or endpoint.lower().startswith('')):raise CLIError('')usage = ('''')if ca and not (pem or all([key, cert])):raise CLIError(usage)if no_verify and not (pem or all([key, cert]) or aad):raise CLIError(usage)if no_verify and ca:raise CLIError(usage)if any([cert, key]) and not all([cert, key]):raise CLIError(usage)if aad and any([pem, cert, key]):raise CLIError(usage)if pem and any([cert, key]):raise CLIError(usage)", "docstring": "Verify arguments for select command", "id": "f2345:m0"} {"signature": "def select(endpoint, cert=None, key=None, pem=None, ca=None, aad=False, no_verify=False):", "body": "from sfctl.config import (set_ca_cert, set_auth, set_aad_cache,set_cluster_endpoint,set_no_verify)from msrest import ServiceClient, Configurationfrom sfctl.auth import ClientCertAuthentication, AdalAuthenticationselect_arg_verify(endpoint, cert, key, pem, ca, aad, no_verify)if aad:new_token, new_cache = get_aad_token(endpoint, no_verify)set_aad_cache(new_token, new_cache)rest_client = ServiceClient(AdalAuthentication(no_verify),Configuration(endpoint))rest_client.send(rest_client.get('')).raise_for_status()else:client_cert = Noneif pem:client_cert = pemelif cert:client_cert = (cert, key)rest_client = ServiceClient(ClientCertAuthentication(client_cert, ca, no_verify),Configuration(endpoint))rest_client.send(rest_client.get('')).raise_for_status()set_cluster_endpoint(endpoint)set_no_verify(no_verify)set_ca_cert(ca)set_auth(pem, cert, key, aad)", "docstring": "Connects to a Service Fabric cluster endpoint.\nIf connecting to secure cluster specify an absolute path to a cert (.crt)\nand key file (.key) or a single file with both (.pem). Do not specify both.\nOptionally, if connecting to a secure cluster, specify also an absolute\npath to a CA bundle file or directory of trusted CA certs.\n:param str endpoint: Cluster endpoint URL, including port and HTTP or HTTPS\nprefix\n:param str cert: Absolute path to a client certificate file\n:param str key: Absolute path to client certificate key file\n:param str pem: Absolute path to client certificate, as a .pem file\n:param str ca: Absolute path to CA certs directory to treat as valid\nor CA bundle\nfile\n:param bool aad: Use Azure Active Directory for authentication\n:param bool no_verify: Disable verification for certificates when using\nHTTPS, note: this is an insecure option and should not be used for\nproduction environments", "id": "f2345:m1"} {"signature": "def get_aad_token(endpoint, no_verify):", "body": "from azure.servicefabric.service_fabric_client_ap_is import (ServiceFabricClientAPIs)from sfctl.auth import ClientCertAuthenticationfrom sfctl.config import set_aad_metadataauth = ClientCertAuthentication(None, None, no_verify)client = ServiceFabricClientAPIs(auth, base_url=endpoint)aad_metadata = client.get_aad_metadata()if aad_metadata.type != \"\":raise CLIError(\"\")aad_resource = aad_metadata.metadatatenant_id = aad_resource.tenantauthority_uri = aad_resource.login + '' + tenant_idcontext = adal.AuthenticationContext(authority_uri,api_version=None)cluster_id = aad_resource.clusterclient_id = aad_resource.clientset_aad_metadata(authority_uri, cluster_id, client_id)code = context.acquire_user_code(cluster_id, client_id)print(code[''])token = context.acquire_token_with_device_code(cluster_id, code, client_id)print(\"\")return token, context.cache", "docstring": "Get AAD token", "id": "f2345:m2"} {"signature": "def cli():", "body": "return VersionedCLI(cli_name=SF_CLI_NAME,config_dir=SF_CLI_CONFIG_DIR,config_env_var_prefix=SF_CLI_ENV_VAR_PREFIX,commands_loader_cls=SFCommandLoader,help_cls=SFCommandHelp)", "docstring": "Create CLI environment", "id": "f2346:m0"} {"signature": "def launch():", "body": "cli_env = cli()return cli_env.invoke(sys.argv[:])", "docstring": "Entry point for Service Fabric CLI.\n\n Configures and invokes CLI with arguments passed during the time the python\n session is launched", "id": "f2346:m1"} {"signature": "def signed_session(self, session=None):", "body": "if session:session = super(ClientCertAuthentication, self).signed_session(session)else:session = super(ClientCertAuthentication, self).signed_session()if self.cert is not None:session.cert = self.certif self.ca_cert is not None:session.verify = self.ca_certif self.no_verify:session.verify = Falsereturn session", "docstring": "Create requests session with any required auth headers\n applied.\n\n :rtype: requests.Session.", "id": "f2347:c0:m1"} {"signature": "def signed_session(self, session=None):", "body": "from sfctl.config import (aad_metadata, aad_cache)if session:session = super(AdalAuthentication, self).signed_session(session)else:session = super(AdalAuthentication, self).signed_session()if self.no_verify:session.verify = Falseauthority_uri, cluster_id, client_id = aad_metadata()existing_token, existing_cache = aad_cache()context = adal.AuthenticationContext(authority_uri,cache=existing_cache)new_token = context.acquire_token(cluster_id,existing_token[''], client_id)header = \"\".format(\"\", new_token[''])session.headers[''] = headerreturn session", "docstring": "Create requests session with AAD auth headers\n\n :rtype: requests.Session.", "id": "f2347:c1:m1"} {"signature": "def create(_):", "body": "endpoint = client_endpoint()if not endpoint:raise CLIError(\"\"\"\"\"\")no_verify = no_verify_setting()if security_type() == '':auth = AdalAuthentication(no_verify)else:cert = cert_info()ca_cert = ca_cert_info()auth = ClientCertAuthentication(cert, ca_cert, no_verify)return ServiceFabricClientAPIs(auth, base_url=endpoint)", "docstring": "Create a client for Service Fabric APIs.", "id": "f2348:m0"} {"signature": "def load_command_table(self, args): ", "body": "with CommandSuperGroup(__name__, self,'') as super_group:with super_group.group('') as group:group.command('', '')with CommandSuperGroup(__name__, self, '',client_factory=client_create) as super_group: with super_group.group('') as group:group.command('', '')group.command('', '')group.command('', '')group.command('', '')group.command('', '')with ArgumentsContext(self, '') as ac:ac.argument('', options_list=['', ''])ac.argument('', options_list=['', ''])ac.argument('', options_list=['', ''])ac.argument('', options_list=['', ''])ac.argument('', options_list=['', ''])ac.argument('', options_list=['', ''])ac.argument('', options_list=['', ''])return OrderedDict(self.command_table)", "docstring": "Load all Service Fabric commands", "id": "f2349:c1:m0"} {"signature": "def get_reliabledictionary_list(client, application_name, service_name):", "body": "cluster = Cluster.from_sfclient(client)service = cluster.get_application(application_name).get_service(service_name)for dictionary in service.get_dictionaries():print(dictionary.name)", "docstring": "List existing reliable dictionaries.\n\n List existing reliable dictionaries and respective schema for given application and service.\n\n :param application_name: Name of the application.\n :type application_name: str\n :param service_name: Name of the service.\n :type service_name: str", "id": "f2353:m0"} {"signature": "def get_reliabledictionary_schema(client, application_name, service_name, dictionary_name, output_file=None):", "body": "cluster = Cluster.from_sfclient(client)dictionary = cluster.get_application(application_name).get_service(service_name).get_dictionary(dictionary_name)result = json.dumps(dictionary.get_information(), indent=)if (output_file == None):output_file = \"\".format(application_name, service_name, dictionary_name)with open(output_file, \"\") as output:output.write(result)print('' + output_file)print(result)", "docstring": "Query Schema information for existing reliable dictionaries.\n\n Query Schema information existing reliable dictionaries for given application and service.\n\n :param application_name: Name of the application.\n :type application_name: str\n :param service_name: Name of the service.\n :type service_name: str\n :param dictionary: Name of the reliable dictionary.\n :type dictionary: str\n :param output_file: Optional file to save the schema.", "id": "f2353:m1"} {"signature": "def get_reliabledictionary_type_schema(client, application_name, service_name, dictionary_name, type_name, output_file=None):", "body": "cluster = Cluster.from_sfclient(client)dictionary = cluster.get_application(application_name).get_service(service_name).get_dictionary(dictionary_name)result = json.dumps(dictionary.get_complex_type(type_name), indent=)if (output_file == None):output_file = \"\".format(application_name, service_name, dictionary_name, type_name)with open(output_file, \"\") as output:output.write(result)print('' + output_file)print(result)", "docstring": "Query complex type information existing reliable dictionaries for given application and service. Make sure to provide entire namespace for your type if necessary.\n\n :param application_name: Name of the application.\n :type application_name: str\n :param service_name: Name of the service.\n :type service_name: str\n :param dictionary_name: Name of the reliable dictionary.\n :type dictionary_name: str\n :param type_name: Name of the complex type.\n :type type_name: str\n :param output_file: Optional file to save the schema.", "id": "f2353:m2"} {"signature": "def query_reliabledictionary(client, application_name, service_name, dictionary_name, query_string, partition_key=None, partition_id=None, output_file=None):", "body": "cluster = Cluster.from_sfclient(client)dictionary = cluster.get_application(application_name).get_service(service_name).get_dictionary(dictionary_name)start = time.time()if (partition_id != None):result = dictionary.query(query_string, PartitionLookup.ID, partition_id)elif (partition_key != None):result = dictionary.query(query_string, PartitionLookup.KEY, partition_key)else:result = dictionary.query(query_string)if type(result) is str:print(result)returnelse:result = json.dumps(result.get(\"\"), indent=)print(\"\" + str(time.time() - start) + \"\")if (output_file == None):output_file = \"\".format(application_name, service_name, dictionary_name)with open(output_file, \"\") as output:output.write(result)print()print('' + output_file)print(result)", "docstring": "Query existing reliable dictionary.\n\n Query existing reliable dictionaries for given application and service.\n\n :param application_name: Name of the application.\n :type application_name: str\n :param service_name: Name of the service.\n :type service_name: str\n :param dictionary_name: Name of the reliable dictionary.\n :type dictionary_name: str\n :param query_string: An OData query string. For example $top=10. Check https://www.odata.org/documentation/ for more information.\n :type query_string: str\n :param partition_key: Optional partition key of the desired partition, either a string if named schema or int if Int64 schema\n :type partition_id: str\n :param partition_id: Optional partition GUID of the owning reliable dictionary.\n :type partition_id: str\n :param output_file: Optional file to save the schema.", "id": "f2353:m3"} {"signature": "def execute_reliabledictionary(client, application_name, service_name, input_file):", "body": "cluster = Cluster.from_sfclient(client)service = cluster.get_application(application_name).get_service(service_name)with open(input_file) as json_file:json_data = json.load(json_file)service.execute(json_data)return", "docstring": "Execute create, update, delete operations on existing reliable dictionaries.\n\n carry out create, update and delete operations on existing reliable dictionaries for given application and service.\n\n :param application_name: Name of the application.\n :type application_name: str\n :param service_name: Name of the service.\n :type service_name: str\n :param output_file: input file with list of json to provide the operation information for reliable dictionaries.", "id": "f2353:m4"} {"signature": "def _check_input(self, input):", "body": "if isinstance(input, str):return ''elif isinstance(input, list):if all(isinstance(item, str) for item in input):return ''raise ValueError(\"\"\"\")", "docstring": "Checks the validity of the input.\n\n In case of an invalid input throws ValueError.", "id": "f2362:c0:m1"} {"signature": "def build(self, x):", "body": "type = self._check_input(x)if type == '':x += next(self._terminalSymbolsGenerator())self._build(x)if type == '':self._build_generalized(x)", "docstring": "Builds the Suffix tree on the given input.\n If the input is of type List of Strings:\n Generalized Suffix Tree is built.\n\n :param x: String or List of Strings", "id": "f2362:c0:m2"} {"signature": "def _build(self, x):", "body": "self.word = xself._build_McCreight(x)", "docstring": "Builds a Suffix tree.", "id": "f2362:c0:m3"} {"signature": "def _build_McCreight(self, x):", "body": "u = self.rootd = for i in range(len(x)):while u.depth == d and u._has_transition(x[d+i]):u = u._get_transition_link(x[d+i])d = d + while d < u.depth and x[u.idx + d] == x[i + d]:d = d + if d < u.depth:u = self._create_node(x, u, d)self._create_leaf(x, i, u, d)if not u._get_suffix_link():self._compute_slink(x, u)u = u._get_suffix_link()d = d - if d < :d = ", "docstring": "Builds a Suffix tree using McCreight O(n) algorithm.\n\n Algorithm based on:\n McCreight, Edward M. \"A space-economical suffix tree construction algorithm.\" - ACM, 1976.\n Implementation based on:\n UH CS - 58093 String Processing Algorithms Lecture Notes", "id": "f2362:c0:m4"} {"signature": "def _build_Ukkonen(self, x):", "body": "raise NotImplementedError()", "docstring": "Builds a Suffix tree using Ukkonen's online O(n) algorithm.\n\n Algorithm based on:\n Ukkonen, Esko. \"On-line construction of suffix trees.\" - Algorithmica, 1995.", "id": "f2362:c0:m8"} {"signature": "def _build_generalized(self, xs):", "body": "terminal_gen = self._terminalSymbolsGenerator()_xs = ''.join([x + next(terminal_gen) for x in xs])self.word = _xsself._generalized_word_starts(xs)self._build(_xs)self.root._traverse(self._label_generalized)", "docstring": "Builds a Generalized Suffix Tree (GST) from the array of strings provided.", "id": "f2362:c0:m9"} {"signature": "def _label_generalized(self, node):", "body": "if node.is_leaf():x = {self._get_word_start_index(node.idx)}else:x = {n for ns in node.transition_links for n in ns[].generalized_idxs}node.generalized_idxs = x", "docstring": "Helper method that labels the nodes of GST with indexes of strings\n found in their descendants.", "id": "f2362:c0:m10"} {"signature": "def _get_word_start_index(self, idx):", "body": "i = for _idx in self.word_starts[:]:if idx < _idx:return ielse:i+=return i", "docstring": "Helper method that returns the index of the string based on node's\n starting index", "id": "f2362:c0:m11"} {"signature": "def lcs(self, stringIdxs=-):", "body": "if stringIdxs == - or not isinstance(stringIdxs, list):stringIdxs = set(range(len(self.word_starts)))else:stringIdxs = set(stringIdxs)deepestNode = self._find_lcs(self.root, stringIdxs)start = deepestNode.idxend = deepestNode.idx + deepestNode.depthreturn self.word[start:end]", "docstring": "Returns the Largest Common Substring of Strings provided in stringIdxs.\n If stringIdxs is not provided, the LCS of all strings is returned.\n\n ::param stringIdxs: Optional: List of indexes of strings.", "id": "f2362:c0:m12"} {"signature": "def _find_lcs(self, node, stringIdxs):", "body": "nodes = [self._find_lcs(n, stringIdxs)for (n,_) in node.transition_linksif n.generalized_idxs.issuperset(stringIdxs)]if nodes == []:return nodedeepestNode = max(nodes, key=lambda n: n.depth)return deepestNode", "docstring": "Helper method that finds LCS by traversing the labeled GSD.", "id": "f2362:c0:m13"} {"signature": "def _generalized_word_starts(self, xs):", "body": "self.word_starts = []i = for n in range(len(xs)):self.word_starts.append(i)i += len(xs[n]) + ", "docstring": "Helper method returns the starting indexes of strings in GST", "id": "f2362:c0:m14"} {"signature": "def find(self, y):", "body": "node = self.rootwhile True:edge = self._edgeLabel(node, node.parent)if edge.startswith(y):return node.idxi = while(i < len(edge) and edge[i] == y[]):y = y[:]i += if i != :if i == len(edge) and y != '':passelse:return -node = node._get_transition_link(y[])if not node:return -", "docstring": "Returns starting position of the substring y in the string used for\n building the Suffix tree.\n\n :param y: String\n :return: Index of the starting position of string y in the string used for building the Suffix tree\n -1 if y is not a substring.", "id": "f2362:c0:m15"} {"signature": "def _edgeLabel(self, node, parent):", "body": "return self.word[node.idx + parent.depth : node.idx + node.depth]", "docstring": "Helper method, returns the edge label between a node and it's parent", "id": "f2362:c0:m17"} {"signature": "def _terminalSymbolsGenerator(self):", "body": "py2 = sys.version[] < ''UPPAs = list(list(range(,+)) + list(range(,+)) + list(range(, +)))for i in UPPAs:if py2:yield(unichr(i))else:yield(chr(i))raise ValueError(\"\")", "docstring": "Generator of unique terminal symbols used for building the Generalized Suffix Tree.\n Unicode Private Use Area U+E000..U+F8FF is used to ensure that terminal symbols\n are not part of the input string.", "id": "f2362:c0:m18"} {"signature": "def run_dummy_main(*command_lines):", "body": "import shlexwith kxg.quickstart.ProcessPool(time_limit=) as pool:for command_line in command_lines:name = ''.format(command_line)pool.start(name, dummy_main, shlex.split(command_line))", "docstring": "Run the specified command lines in threads so that clients and servers \ncan talk to each other if they need to. In some cases only one thread \nwill be started, but that's not a problem. Use threads instead of \nprocesses for the benefit of the code coverage analysis.", "id": "f2366:m1"} {"signature": "def unsafe_method(self, x):", "body": "pass", "docstring": "Docstring.", "id": "f2370:c10:m4"} {"signature": "def receive_id_from_server(self):", "body": "for message in self.pipe.receive():if isinstance(message, IdFactory):self.actor_id_factory = messagereturn Truereturn False", "docstring": "Listen for an id from the server.\n\nAt the beginning of a game, each client receives an IdFactory from the \nserver. This factory are used to give id numbers that are guaranteed \nto be unique to tokens that created locally. This method checks to see if such \na factory has been received. If it hasn't, this method does not block \nand immediately returns False. If it has, this method returns True \nafter saving the factory internally. At this point it is safe to enter \nthe GameStage.", "id": "f2375:c0:m1"} {"signature": "def execute_sync(self, message):", "body": "info(\"\")with self.world._unlock_temporarily():message._sync(self.world)self.world._react_to_sync_response(message)for actor in self.actors:actor._react_to_sync_response(message)", "docstring": "Respond when the server indicates that the client is out of sync.\n\nThe server can request a sync when this client sends a message that \nfails the check() on the server. If the reason for the failure isn't \nvery serious, then the server can decide to send it as usual in the \ninterest of a smooth gameplay experience. When this happens, the \nserver sends out an extra response providing the clients with the\ninformation they need to resync themselves.", "id": "f2375:c0:m4"} {"signature": "def execute_undo(self, message):", "body": "info(\"\")with self.world._unlock_temporarily():message._undo(self.world)self.world._react_to_undo_response(message)for actor in self.actors:actor._react_to_undo_response(message)", "docstring": "Manage the response when the server rejects a message.\n\nAn undo is when required this client sends a message that the server \nrefuses to pass on to the other clients playing the game. When this \nhappens, the client must undo the changes that the message made to the \nworld before being sent or crash. Note that unlike sync requests, undo \nrequests are only reported to the client that sent the offending \nmessage.", "id": "f2375:c0:m5"} {"signature": "def _relay_message(self, message):", "body": "info(\"\")if not message.was_sent_by(self._id_factory):self.pipe.send(message)self.pipe.deliver()", "docstring": "Relay messages from the forum on the server to the client represented \nby this actor.", "id": "f2375:c1:m6"} {"signature": "def _react_to_message(self, message):", "body": "pass", "docstring": "Don't ever change the world in response to a message.\n\nThis method is defined is called by the game engine to trigger \ncallbacks tied by this actor to particular messages. This is useful \nfor ordinary actors, but remote actors are only meant to shuttle \nmessage between clients and should never react to individual messages.", "id": "f2375:c1:m7"} {"signature": "def update(self, time):", "body": "total_acceleration = Vector.null()max_jerk = self.max_accelerationfor behavior in self.behaviors:acceleration, importance = behavior.update()weighted_acceleration = acceleration * importance\"\"\"\"\"\"total_acceleration += weighted_accelerationself.acceleration = total_accelerationSprite.update(self, time)if self.velocity.magnitude > :self.facing = self.velocity.normal", "docstring": "Update acceleration. Accounts for the importance and\n priority (order) of multiple behaviors.", "id": "f2380:c1:m2"} {"signature": "def update (self):", "body": "delta_velocity = Vector.null()target_position = self.target.get_position()sprite_position = self.sprite.get_position()desired_direction = target_position - sprite_positionif == self.los or desired_direction.magnitude <= self.los:desired_normal = desired_direction.normaldesired_velocity = desired_normal * self.sprite.get_max_velocity()delta_velocity = desired_velocity - self.sprite.get_velocity()self.last_delta_velocity = delta_velocityreturn delta_velocity, self.power", "docstring": "Calculate what the desired change in velocity is. \n delta_velocity = acceleration * delta_time\n Time will be dealt with by the sprite.", "id": "f2380:c4:m1"} {"signature": "def update (self):", "body": "delta_velocity = Vector.null()target_position = self.target.get_position()sprite_position = self.sprite.get_position()desired_direction = target_position - sprite_positionif == self.los or desired_direction.magnitude <= self.los:try:desired_normal = desired_direction.normalexcept NullVectorError:desired_normal = Vector.null()desired_velocity = desired_normal * self.sprite.get_max_speed()delta_velocity = desired_velocity - self.sprite.get_velocity()self.last_delta_velocity = delta_velocityreturn delta_velocity, self.power", "docstring": "Calculate what the desired change in velocity is. \n delta_velocity = acceleration * delta_time\n Time will be dealt with by the sprite.", "id": "f2380:c5:m1"} {"signature": "def watch_token(method):", "body": "method._kxg_watch_token = Truereturn method", "docstring": "Mark a token extension method that should automatically be called when a \ntoken method of the same name is called.\n\nThis decorator must only be used on TokenExtension methods, otherwise it \nwill silently do nothing. The reason is that the decorator itself can't do \nanything but label the given method, because at the time of decoration the \ntoken to watch isn't known. The method is actually setup to watch a token \nin the TokenExtension constructor, which searches for the label added here. \nBut other classes won't make this search and will silently do nothing.", "id": "f2381:m1"} {"signature": "@debug_onlydef require_token(object):", "body": "require_instance(Token(), object)", "docstring": "Raise an ApiUsageError if the given object is not a fully constructed \ninstance of a Token subclass.", "id": "f2381:m2"} {"signature": "@debug_onlydef require_active_token(object):", "body": "require_token(object)token = objectif not token.has_id:raise ApiUsageError(\"\"\"\"\"\")if not token.has_world:raise ApiUsageError(\"\"\"\"\"\")", "docstring": "Raise an ApiUsageError if the given object is not a token that is currently \nparticipating in the game. To be participating in the game, the given \ntoken must have an id number and be associated with the world.", "id": "f2381:m3"} {"signature": "def __new__(meta, name, bases, members):", "body": "if __debug__:meta.add_safety_checks(members)return super().__new__(meta, name, bases, members)", "docstring": "Add checks to make sure token methods are being called safely.\n\nIn order to keep multiplayer games in sync, the world should only be \nmodified at particular times (e.g. token update methods and messages). \nThe purpose of this metaclass is to stop you from accidentally trying \nto modify the world outside of these defined times. These mistakes \nwould otherwise cause hard-to-debug sync errors.\n\nThe engine indicates when it is safe to modify the world by setting a \nboolean lock flag in the world. This metaclass adds a bit of logic to \nnon-read-only token methods that makes sure the world is unlocked \nbefore continuing. The kxg.read_only() decorator can be used to \nindicate which methods are read-only, and are therefore excluded from \nthese checks.\n\nThe checks configured by this metaclass help find bugs, but may also \nincur significant computational expense. By invoking python with \noptimization enabled (i.e. passing -O) these checks are skipped.", "id": "f2381:c0:m0"} {"signature": "@classmethoddef add_safety_checks(meta, members):", "body": "for member_name, member_value in members.items():members[member_name] = meta.add_safety_check(member_name, member_value)", "docstring": "Iterate through each member of the class being created and add a \nsafety check to every method that isn't marked as read-only.", "id": "f2381:c0:m1"} {"signature": "@staticmethoddef add_safety_check(member_name, member_value):", "body": "import functoolsfrom types import FunctionTypeis_method = isinstance(member_value, FunctionType)is_read_only = hasattr(member_value, '')is_private = member_name.startswith('')if not is_method or is_read_only or is_private:return member_valuedef safety_checked_method(self, *args, **kwargs):\"\"\"\"\"\"world = getattr(self, '', None)if world and world.is_locked():nonlocal member_nameraise ApiUsageError(\"\"\"\"\"\")return member_value(self, *args, **kwargs)functools.update_wrapper(safety_checked_method, member_value,assigned=functools.WRAPPER_ASSIGNMENTS + ('','','',))return safety_checked_method", "docstring": "If the given member is a method that is public (i.e. doesn't start with \nan underscore) and hasn't been marked as read-only, replace it with a \nversion that will check to make sure the world is locked. This ensures \nthat methods that alter the token are only called from update methods \nor messages.", "id": "f2381:c0:m2"} {"signature": "@read_onlydef watch_method(self, method_name, callback):", "body": "try:method = getattr(self, method_name)except AttributeError:raise ApiUsageError(\"\"\"\"\"\")if not isinstance(method, Token.WatchedMethod):setattr(self, method_name, Token.WatchedMethod(method))method = getattr(self, method_name)method.add_watcher(callback)", "docstring": "Register the given callback to be called whenever the method with the \ngiven name is called. You can easily take advantage of this feature in \ntoken extensions by using the @watch_token decorator.", "id": "f2381:c2:m12"} {"signature": "def _check_if_forum_observation_enabled(self):", "body": "try:super()._check_if_forum_observation_enabled()except ApiUsageError:raise ApiUsageError(\"\"\"\"\"\")", "docstring": "Give a helpful error if the user attempts to subscribe or unsubscribe \nfrom messages while the token is not registered with a world. This can \neasily happen if the user attempts to subscribe to messages in the \nconstructor. However, because the constructor is only called on one \nclient and message handlers cannot be pickled, subscribing at this time \nwould create hard-to-find synchronization bugs.", "id": "f2381:c2:m18"} {"signature": "def _remove_from_world(self):", "body": "self.on_remove_from_world()self._extensions = {}self._disable_forum_observation()self._world = Noneself._id = None", "docstring": "Clear all the internal data the token needed while it was part of \nthe world.\n\nNote that this method doesn't actually remove the token from the \nworld. That's what World._remove_token() does. This method is just \nresponsible for setting the internal state of the token being removed.", "id": "f2381:c2:m21"} {"signature": "@read_onlydef get_token(self, id):", "body": "return self._tokens[id]", "docstring": "Return the token with the given id. If no token with the given id is \nregistered to the world, an IndexError is thrown.", "id": "f2381:c3:m7"} {"signature": "@read_onlydef get_last_id(self):", "body": "return max(self._tokens)", "docstring": "Return the largest token id registered with the world. If no tokens \nhave been added to the world, the id for the world itself (0) is \nreturned. This means that the first \"real\" token id is 1.", "id": "f2381:c3:m8"} {"signature": "@read_onlydef is_locked(self):", "body": "return self._is_locked", "docstring": "Return whether or not the world is currently allowed to be modified.", "id": "f2381:c3:m9"} {"signature": "@read_onlydef has_game_ended(self):", "body": "return self._has_game_ended", "docstring": "Return true if the game has ended.", "id": "f2381:c3:m11"} {"signature": "@contextlib.contextmanagerdef _unlock_temporarily(self):", "body": "if not self._is_locked:yieldelse:try:self._is_locked = Falseyieldfinally:self._is_locked = True", "docstring": "Allow tokens to modify the world for the duration of a with-block.\n\nIt's important that tokens only modify the world at appropriate times, \notherwise the changes they make may not be communicated across the \nnetwork to other clients. To help catch and prevent these kinds of \nerrors, the game engine keeps the world locked most of the time and \nonly briefly unlocks it (using this method) when tokens are allowed to \nmake changes. When the world is locked, token methods that aren't \nmarked as being read-only can't be called. When the world is unlocked, \nany token method can be called. These checks can be disabled by \nrunning python with optimization enabled.\n\nYou should never call this method manually from within your own game. \nThis method is intended to be used by the game engine, which was \ncarefully designed to allow the world to be modified only when safe. \nCalling this method yourself disables an important safety check.", "id": "f2381:c3:m15"} {"signature": "def _set_actors(self, actors):", "body": "self._actors = actors", "docstring": "Tell the world which actors are running on this machine. This \ninformation is used to create extensions for new tokens.", "id": "f2381:c3:m19"} {"signature": "def exit_stage(self):", "body": "self.is_finished = True", "docstring": "Stop this stage from executing once the current update ends.", "id": "f2382:c2:m2"} {"signature": "def exit_theater(self):", "body": "self.theater.exit()", "docstring": "Exit the game once the current update ends.", "id": "f2382:c2:m3"} {"signature": "def on_enter_stage(self):", "body": "pass", "docstring": "Give the stage a chance to set itself up before it is updated for the \nfirst time.", "id": "f2382:c2:m4"} {"signature": "def on_update_stage(self, dt):", "body": "pass", "docstring": "Give the stage a chance to react to each clock cycle.\n\nThe amount of time that passed since the last clock cycle is provided \nas an argument.", "id": "f2382:c2:m5"} {"signature": "def on_exit_stage(self):", "body": "pass", "docstring": "Give the stage a chance to react before it is stopped and the next \nstage is started.\n\nYou can define the next stage by setting the Stage.successor attribute. \nIf the successor is static, you can just set it in the constructor. \nBut if it will differ depending on the context, this method may be a \ngood place to calculate it because it is called only once and just \nbefore the theater queries for the successor.", "id": "f2382:c2:m6"} {"signature": "def on_enter_stage(self):", "body": "with self.world._unlock_temporarily():self.forum.connect_everyone(self.world, self.actors)self.forum.on_start_game()with self.world._unlock_temporarily():self.world.on_start_game()num_players = len(self.actors) - for actor in self.actors:actor.on_setup_gui(self.gui)for actor in self.actors:actor.on_start_game(num_players)", "docstring": "Prepare the actors, the world, and the messaging system to begin \nplaying the game.\n\nThis method is guaranteed to be called exactly once upon entering the \ngame stage.", "id": "f2382:c3:m1"} {"signature": "def on_update_stage(self, dt):", "body": "for actor in self.actors:actor.on_update_game(dt)self.forum.on_update_game()with self.world._unlock_temporarily():self.world.on_update_game(dt)if self.world.has_game_ended():self.exit_stage()", "docstring": "Sequentially update the actors, the world, and the messaging system. \nThe theater terminates once all of the actors indicate that they are done.", "id": "f2382:c3:m2"} {"signature": "def on_exit_stage(self):", "body": "self.forum.on_finish_game()for actor in self.actors:actor.on_finish_game()with self.world._unlock_temporarily():self.world.on_finish_game()", "docstring": "Give the actors, the world, and the messaging system a chance to react \nto the end of the game.", "id": "f2382:c3:m3"} {"signature": "def tokens_referenced(self):", "body": "tokens = set()def persistent_id(obj):from .tokens import Tokenif isinstance(obj, Token):tokens.add(obj)return obj.idfrom pickle import Picklerfrom io import BytesIOpickler = Pickler(BytesIO())pickler.persistent_id = persistent_idpickler.dump(self)return tokens", "docstring": "Return a list of all the tokens that are referenced (i.e. contained in) \nthis message. Tokens that haven't been assigned an id yet are searched \nrecursively for tokens. So this method may return fewer results after \nthe message is sent. This information is used by the game engine to \ncatch mistakes like forgetting to add a token to the world or keeping a \nstale reference to a token after its been removed.", "id": "f2384:c0:m6"} {"signature": "def _assign_token_ids(self, id_factory):", "body": "for token in self.tokens_to_add():token._give_id(id_factory)", "docstring": "Assign id numbers to any tokens that will be added to the world by this \nmessage.\n\nThis method is called by Actor but not by ServerActor, so it's \nguaranteed to be called exactly once. In fact, this method is not \nreally different from the constructor, except that the id_factory \nobject is nicely provided. That's useful for assigning ids to tokens \nbut probably nothing else. This method is called before _check() so \nthat _check() can make sure that valid ids were assigned (although by \ndefault it doesn't).", "id": "f2384:c0:m17"} {"signature": "def main(world_cls, referee_cls, gui_cls, gui_actor_cls, ai_actor_cls,theater_cls=PygletTheater, default_host=DEFAULT_HOST,default_port=DEFAULT_PORT, argv=None):", "body": "import sys, os, docopt, nonstdlibexe_name = os.path.basename(sys.argv[])usage = main.__doc__.format(**locals()).strip()args = docopt.docopt(usage, argv or sys.argv[:])num_guis = int(args[''] or )num_ais = int(args[''] or )host, port = args[''], int(args[''])logging.basicConfig(format='',level=nonstdlib.verbosity(args['']),)if args['']:print(\"\"\"\"\"\")game = MultiplayerDebugger(world_cls, referee_cls, gui_cls, gui_actor_cls, num_guis,ai_actor_cls, num_ais, theater_cls, host, port)else:game = theater_cls()ai_actors = [ai_actor_cls() for i in range(num_ais)]if args['']:game.gui = gui_cls()game.initial_stage = UniplayerGameStage(world_cls(), referee_cls(), gui_actor_cls(), ai_actors)game.initial_stage.successor = PostgameSplashStage()if args['']:game.gui = gui_cls()game.initial_stage = ClientConnectionStage(world_cls(), gui_actor_cls(), host, port)if args['']:game.initial_stage = ServerConnectionStage(world_cls(), referee_cls(), num_guis, ai_actors,host, port)game.play()", "docstring": "Run a game being developed with the kxg game engine.\n\nUsage:\n {exe_name} sandbox [] [-v...]\n {exe_name} client [--host HOST] [--port PORT] [-v...]\n {exe_name} server [] [--host HOST] [--port PORT] [-v...] \n {exe_name} debug [] [--host HOST] [--port PORT] [-v...]\n {exe_name} --help\n\nCommands:\n sandbox\n Play a single-player game with the specified number of AIs. None of \n the multiplayer machinery will be used.\n\n client\n Launch a client that will try to connect to a server on the given host \n and port. Once it connects and the game starts, the client will allow \n you to play the game against any other connected clients.\n\n server\n Launch a server that will manage a game between the given number of \n human and AI players. The human players must connect using this \n command's client mode.\n\n debug\n Debug a multiplayer game locally. This command launches a server and \n the given number of clients all in different processes, and configures \n the logging system such that the output from each process can be easily \n distinguished.\n\nArguments:\n \n The number of human players that will be playing the game. Only needed \n by commands that will launch some sort of multiplayer server.\n\n \n The number of AI players that will be playing the game. Only needed by \n commands that will launch single-player games or multiplayer servers.\n\nOptions:\n -x --host HOST [default: {default_host}]\n The address of the machine running the server. Must be accessible from \n the machines running the clients.\n\n -p --port PORT [default: {default_port}]\n The port that the server should listen on. Don't specify a value less \n than 1024 unless the server is running with root permissions.\n\n -v --verbose \n Have the game engine log more information about what it's doing. You \n can specify this option several times to get more and more information.\n\nThis command is provided so that you can start writing your game with the least \npossible amount of boilerplate code. However, the clients and servers provided \nby this command are not capable of running a production game. Once you have \nwritten your game and want to give it a polished set of menus and options, \nyou'll have to write new Stage subclasses encapsulating that logic and you'll \nhave to call those stages yourself by interacting more directly with the \nTheater class. The online documentation has more information on this process.", "id": "f2386:m0"} {"signature": "def _run_supervisor(self):", "body": "import timestill_supervising = lambda: (multiprocessing.active_children()or not self.log_queue.empty()or not self.exception_queue.empty())try:while still_supervising():try:record = self.log_queue.get_nowait()logger = logging.getLogger(record.name)logger.handle(record)except queue.Empty:passtry:exception = self.exception_queue.get_nowait()except queue.Empty:passelse:raise exceptiontime.sleep(/self.frame_rate)self.elapsed_time += /self.frame_rateif self.time_limit and self.elapsed_time > self.time_limit:raise RuntimeError(\"\")finally:for process in multiprocessing.active_children():process.terminate()", "docstring": "Poll the queues that the worker can use to communicate with the \nsupervisor, until all the workers are done and all the queues are \nempty. Handle messages as they appear.", "id": "f2386:c3:m5"} {"signature": "def play(self, song):", "body": "pass", "docstring": "Called once when a song starts playing", "id": "f2388:c0:m0"} {"signature": "def pre_poll(self):", "body": "pass", "docstring": "Called before polling for process status", "id": "f2388:c0:m1"} {"signature": "def post_poll(self):", "body": "pass", "docstring": "Called after polling for process status", "id": "f2388:c0:m2"} {"signature": "def input(self, value, song):", "body": "pass", "docstring": "Called after user input during song playback", "id": "f2388:c0:m3"} {"signature": "def station_selection_menu(self, error=None):", "body": "self.screen.clear()if error:self.screen.print_error(\"\".format(error))for i, station in enumerate(self.stations):i = \"\".format(i)print(\"\".format(Colors.yellow(i), station.name))return self.stations[self.screen.get_integer(\"\")]", "docstring": "Format a station menu and make the user select a station", "id": "f2388:c1:m3"} {"signature": "def play(self, song):", "body": "if song.is_ad:print(\"\".format(Colors.cyan(\"\")))else:print(\"\".format(Colors.cyan(song.song_name),Colors.yellow(song.artist_name)))", "docstring": "Play callback", "id": "f2388:c1:m4"} {"signature": "def input(self, input, song):", "body": "try:cmd = getattr(self, self.CMD_MAP[input][])except (IndexError, KeyError):return self.screen.print_error(\"\".format(input))cmd(song)", "docstring": "Input callback, handles key presses", "id": "f2388:c1:m17"} {"signature": "def __init__(self, callbacks, control_channel):", "body": "self._control_channel = control_channelself._control_fd = control_channel.fileno()self._callbacks = callbacksself._process = Noneself._cmd = [self._find_path()]", "docstring": "Constructor\n\n Will attempt to find the player binary on construction and fail if it\n is not found. Subclasses should append any additional arguments to\n _cmd.", "id": "f2389:c3:m0"} {"signature": "def _find_path(self):", "body": "raise NotImplementedError", "docstring": "Find the path to the backend binary\n\n This method may fail with a PlayerUnusable exception in which case the\n consumer should opt for another backend.", "id": "f2389:c3:m1"} {"signature": "def _load_track(self, song):", "body": "raise NotImplementedError", "docstring": "Load a track into the audio backend by song model", "id": "f2389:c3:m2"} {"signature": "def _player_stopped(self, value):", "body": "raise NotImplementedError", "docstring": "Determine if player has stopped", "id": "f2389:c3:m3"} {"signature": "def raise_volume(self):", "body": "raise NotImplementedError", "docstring": "Raise the volume of the audio output\n\n The player backend may not support this functionality in which case it\n should not override this method.", "id": "f2389:c3:m4"} {"signature": "def lower_volume(self):", "body": "raise NotImplementedError", "docstring": "Lower the volume of the audio output\n\n The player backend may not support this functionality in which case it\n should not override this method.", "id": "f2389:c3:m5"} {"signature": "def _post_start(self):", "body": "return", "docstring": "Optionally, do something after the audio backend is started", "id": "f2389:c3:m6"} {"signature": "def _loop_hook(self):", "body": "return", "docstring": "Optionally, do something each main loop iteration", "id": "f2389:c3:m7"} {"signature": "def _read_from_process(self, handle):", "body": "return handle.readline().strip()", "docstring": "Read a line from the process and clean it\n\n Different audio backends return text in different formats so provides a\n hook for each subclass to customize reader behaviour.", "id": "f2389:c3:m8"} {"signature": "def _send_cmd(self, cmd):", "body": "self._process.stdin.write(\"\".format(cmd).encode(\"\"))self._process.stdin.flush()", "docstring": "Write command to remote process", "id": "f2389:c3:m9"} {"signature": "def stop(self):", "body": "self._send_cmd(\"\")", "docstring": "Stop the currently playing song", "id": "f2389:c3:m10"} {"signature": "def pause(self):", "body": "self._send_cmd(\"\")", "docstring": "Pause the player", "id": "f2389:c3:m11"} {"signature": "def start(self):", "body": "self._ensure_started()", "docstring": "Start the audio backend process for the player\n\n This is just a friendlier API for consumers", "id": "f2389:c3:m13"} {"signature": "def _ensure_started(self):", "body": "if self._process and self._process.poll() is None:returnif not getattr(self, \"\"):raise RuntimeError(\"\")log.debug(\"\", self._cmd)self._process = SilentPopen(self._cmd)self._post_start()", "docstring": "Ensure player backing process is started", "id": "f2389:c3:m14"} {"signature": "def _get_select_readers(self):", "body": "return [self._control_channel, self._process.stdout]", "docstring": "Return a list of file-like objects for reading\n\n Will be passed to select() to poll for readers.", "id": "f2389:c3:m15"} {"signature": "def play(self, song):", "body": "self._callbacks.play(song)self._load_track(song)time.sleep() while True:try:self._callbacks.pre_poll()self._ensure_started()self._loop_hook()readers, _, _ = select.select(self._get_select_readers(), [], [], )for handle in readers:if handle.fileno() == self._control_fd:self._callbacks.input(handle.readline().strip(), song)else:value = self._read_from_process(handle)if self._player_stopped(value):returnfinally:self._callbacks.post_poll()", "docstring": "Play a new song from a Pandora model\n\n Returns once the stream starts but does not shut down the remote audio\n output backend process. Calls the input callback when the user has\n input.", "id": "f2389:c3:m16"} {"signature": "def end_station(self):", "body": "raise StopIteration", "docstring": "Stop playing the station", "id": "f2389:c3:m17"} {"signature": "def play_station(self, station):", "body": "for song in iterate_forever(station.get_playlist):try:self.play(song)except StopIteration:self.stop()return", "docstring": "Play the station until something ends it\n\n This function will run forever until termintated by calling\n end_station.", "id": "f2389:c3:m18"} {"signature": "def _post_start(self):", "body": "flags = fcntl.fcntl(self._process.stdout, fcntl.F_GETFL)fcntl.fcntl(self._process.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)", "docstring": "Set stdout to non-blocking\n\n VLC does not always return a newline when reading status so in order to\n be lazy and still use the read API without caring about how much output\n there is we switch stdout to nonblocking mode and just read a large\n chunk of datin order to be lazy and still use the read API without\n caring about how much output there is we switch stdout to nonblocking\n mode and just read a large chunk of data.", "id": "f2389:c5:m4"} {"signature": "def iterate_forever(func, *args, **kwargs):", "body": "output = func(*args, **kwargs)while True:try:playlist_item = next(output)playlist_item.prepare_playback()yield playlist_itemexcept StopIteration:output = func(*args, **kwargs)", "docstring": "Iterate over a finite iterator forever\n\n When the iterator is exhausted will call the function again to generate a\n new iterator and keep iterating.", "id": "f2390:m0"} {"signature": "@staticmethoddef get_integer(prompt):", "body": "while True:try:return int(input(prompt).strip())except ValueError:print(Colors.red(\"\"))", "docstring": "Gather user input and convert it to an integer\n\n Will keep trying till the user enters an interger or until they ^C the\n program.", "id": "f2390:c4:m7"} {"signature": "def formatter(self, api_client, data, newval):", "body": "raise NotImplementedError", "docstring": "Format Value for Model\n\n The return value of this method is used as a value for the field in the\n model of which this field is a member\n\n api_client\n instance of a Pandora API client\n data\n complete JSON data blob for the parent model of which this field is\n a member\n newval\n the value of this field as retrieved from the JSON data after\n having resolved default value logic", "id": "f2402:c1:m0"} {"signature": "@classmethoddef from_json_list(cls, api_client, data):", "body": "return [cls.from_json(api_client, item) for item in data]", "docstring": "Convert a list of JSON values to a list of models", "id": "f2402:c4:m0"} {"signature": "@staticmethoddef populate_fields(api_client, instance, data):", "body": "for key, value in instance.__class__._fields.items():default = getattr(value, \"\", None)newval = data.get(value.field, default)if isinstance(value, SyntheticField):newval = value.formatter(api_client, data, newval)setattr(instance, key, newval)continuemodel_class = getattr(value, \"\", None)if newval and model_class:if isinstance(newval, list):newval = model_class.from_json_list(api_client, newval)else:newval = model_class.from_json(api_client, newval)if newval and value.formatter:newval = value.formatter(api_client, newval)setattr(instance, key, newval)", "docstring": "Populate all fields of a model with data\n\n Given a model with a PandoraModel superclass will enumerate all\n declared fields on that model and populate the values of their Field\n and SyntheticField classes. All declared fields will have a value after\n this function runs even if they are missing from the incoming JSON.", "id": "f2402:c4:m2"} {"signature": "@classmethoddef from_json(cls, api_client, data):", "body": "self = cls(api_client)PandoraModel.populate_fields(api_client, self, data)return self", "docstring": "Convert one JSON value to a model object", "id": "f2402:c4:m3"} {"signature": "def _base_repr(self, and_also=None):", "body": "items = [\"\".join((key, repr(getattr(self, key))))for key in sorted(self._fields.keys())]if items:output = \"\".join(items)else:output = Noneif and_also:return \"\".format(self.__class__.__name__,output, and_also)else:return \"\".format(self.__class__.__name__, output)", "docstring": "Common repr logic for subclasses to hook", "id": "f2402:c4:m4"} {"signature": "def formatter(self, api_client, data, newval):", "body": "url_map = data.get(\"\")audio_url = data.get(\"\")if audio_url and not url_map:url_map = {BaseAPIClient.HIGH_AUDIO_QUALITY: {\"\": audio_url,\"\": ,\"\": \"\",}}elif not url_map: return Nonevalid_audio_formats = [BaseAPIClient.HIGH_AUDIO_QUALITY,BaseAPIClient.MED_AUDIO_QUALITY,BaseAPIClient.LOW_AUDIO_QUALITY]preferred_quality = api_client.default_audio_qualityif preferred_quality in valid_audio_formats:i = valid_audio_formats.index(preferred_quality)valid_audio_formats = valid_audio_formats[i:]for quality in valid_audio_formats:audio_url = url_map.get(quality)if audio_url:return audio_url[self.field]return audio_url[self.field] if audio_url else None", "docstring": "Get audio-related fields\n\n Try to find fields for the audio url for specified preferred quality\n level, or next-lowest available quality url otherwise.", "id": "f2404:c2:m0"} {"signature": "def formatter(self, api_client, data, newval):", "body": "if newval is None:return Noneuser_param = data['']urls = {}if isinstance(newval, str):urls[user_param[]] = newvalelse:for key, url in zip(user_param, newval):urls[key] = urlreturn urls", "docstring": "Parse additional url fields and map them to inputs\n\n Attempt to create a dictionary with keys being user input, and\n response being the returned URL", "id": "f2404:c3:m0"} {"signature": "def prepare_playback(self):", "body": "return self", "docstring": "Prepare Track for Playback\n\n This method must be called by clients before beginning playback\n otherwise the track recieved may not be playable.", "id": "f2404:c4:m1"} {"signature": "def retries(max_tries, exceptions=(Exception,)):", "body": "def decorator(func):def function(*args, **kwargs):retries_left = max_trieswhile retries_left > :try:retries_left -= return func(*args, **kwargs)except exceptions as exc:if isinstance(exc, PandoraException):raiseif retries_left > :time.sleep(delay_exponential(, , max_tries - retries_left))else:raisereturn functionreturn decorator", "docstring": "Function decorator implementing retrying logic.\n\n exceptions: A tuple of exception classes; default (Exception,)\n\n The decorator will call the function up to max_tries times if it raises\n an exception.\n\n By default it catches instances of the Exception class and subclasses.\n This will recover after all but the most fatal errors. You may specify a\n custom tuple of exception classes with the 'exceptions' argument; the\n function will only be retried if it raises one of the specified\n exceptions.", "id": "f2408:m0"} {"signature": "def delay_exponential(base, growth_factor, attempts):", "body": "if base == '':base = random.random()elif base <= :raise ValueError(\"\"\"\".format(base))time_to_sleep = base * (growth_factor ** (attempts - ))return time_to_sleep", "docstring": "Calculate time to sleep based on exponential function.\n The format is::\n\n base * growth_factor ^ (attempts - 1)\n\n If ``base`` is set to 'rand' then a random number between\n 0 and 1 will be used as the base.\n Base must be greater than 0, otherwise a ValueError will be\n raised.", "id": "f2408:m1"} {"signature": "def set_up_gae_environment(sdk_path):", "body": "if '' in sys.modules:reload_module(sys.modules[''])sys.path.insert(, sdk_path)import dev_appserverdev_appserver.fix_sys_path()import google.appengine.tools.os_compat", "docstring": "Set up appengine SDK third-party imports.\n\n The App Engine SDK does terrible things to the global interpreter state.\n Because of this, this stuff can't be neatly undone. As such, it can't be\n a fixture.", "id": "f2428:m0"} {"signature": "@contextlib.contextmanagerdef mock_module_import(module):", "body": "parts = module.split('')entries = [''.join(parts[:i + ]) for i in range(len(parts))]for entry in entries:sys.modules[entry] = object()try:yieldfinally:for entry in entries:del sys.modules[entry]", "docstring": "Place a dummy objects in sys.modules to mock an import test.", "id": "f2442:m3"} {"signature": "def reset_env(self, env):", "body": "os.environ.pop(env, None)", "docstring": "Set the environment variable 'env' to 'value'.", "id": "f2442:c2:m2"} {"signature": "def __init__(self, headers=None, data=None):", "body": "if headers is None:headers = {'': http_client.OK}self.data = dataself.response_headers = headersself.headers = Noneself.uri = Noneself.method = Noneself.body = Noneself.headers = Noneself.requests = ", "docstring": "HttpMock constructor.\n\n Args:\n headers: dict, header to return with response", "id": "f2444:c1:m0"} {"signature": "def __init__(self, iterable):", "body": "self._iterable = iterableself.requests = []", "docstring": "HttpMockSequence constructor.\n\n Args:\n iterable: iterable, a sequence of pairs of (headers, body)", "id": "f2444:c2:m0"} {"signature": "def positional(max_positional_args):", "body": "def positional_decorator(wrapped):@functools.wraps(wrapped)def positional_wrapper(*args, **kwargs):if len(args) > max_positional_args:plural_s = ''if max_positional_args != :plural_s = ''message = (''''.format(function=wrapped.__name__,args_max=max_positional_args,args_given=len(args),plural=plural_s))if positional_parameters_enforcement == POSITIONAL_EXCEPTION:raise TypeError(message)elif positional_parameters_enforcement == POSITIONAL_WARNING:logger.warning(message)return wrapped(*args, **kwargs)return positional_wrapperif isinstance(max_positional_args, six.integer_types):return positional_decoratorelse:args, _, _, defaults = inspect.getargspec(max_positional_args)return positional(len(args) - len(defaults))(max_positional_args)", "docstring": "A decorator to declare that only the first N arguments my be positional.\n\n This decorator makes it easy to support Python 3 style keyword-only\n parameters. For example, in Python 3 it is possible to write::\n\n def fn(pos1, *, kwonly1=None, kwonly1=None):\n ...\n\n All named parameters after ``*`` must be a keyword::\n\n fn(10, 'kw1', 'kw2') # Raises exception.\n fn(10, kwonly1='kw1') # Ok.\n\n Example\n ^^^^^^^\n\n To define a function like above, do::\n\n @positional(1)\n def fn(pos1, kwonly1=None, kwonly2=None):\n ...\n\n If no default value is provided to a keyword argument, it becomes a\n required keyword argument::\n\n @positional(0)\n def fn(required_kw):\n ...\n\n This must be called with the keyword parameter::\n\n fn() # Raises exception.\n fn(10) # Raises exception.\n fn(required_kw=10) # Ok.\n\n When defining instance or class methods always remember to account for\n ``self`` and ``cls``::\n\n class MyClass(object):\n\n @positional(2)\n def my_method(self, pos1, kwonly1=None):\n ...\n\n @classmethod\n @positional(2)\n def my_method(cls, pos1, kwonly1=None):\n ...\n\n The positional decorator behavior is controlled by\n ``_helpers.positional_parameters_enforcement``, which may be set to\n ``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or\n ``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do\n nothing, respectively, if a declaration is violated.\n\n Args:\n max_positional_arguments: Maximum number of positional arguments. All\n parameters after the this index must be\n keyword only.\n\n Returns:\n A decorator that prevents using arguments after max_positional_args\n from being used as positional parameters.\n\n Raises:\n TypeError: if a key-word only argument is provided as a positional\n parameter, but only if\n _helpers.positional_parameters_enforcement is set to\n POSITIONAL_EXCEPTION.", "id": "f2445:m0"} {"signature": "def scopes_to_string(scopes):", "body": "if isinstance(scopes, six.string_types):return scopeselse:return ''.join(scopes)", "docstring": "Converts scope value to a string.\n\n If scopes is a string then it is simply passed through. If scopes is an\n iterable then a string is returned that is all the individual scopes\n concatenated with spaces.\n\n Args:\n scopes: string or iterable of strings, the scopes.\n\n Returns:\n The scopes formatted as a single string.", "id": "f2445:m1"} {"signature": "def string_to_scopes(scopes):", "body": "if not scopes:return []elif isinstance(scopes, six.string_types):return scopes.split('')else:return scopes", "docstring": "Converts stringifed scope value to a list.\n\n If scopes is a list then it is simply passed through. If scopes is an\n string then a list of each individual scope is returned.\n\n Args:\n scopes: a string or iterable of strings, the scopes.\n\n Returns:\n The scopes in a list.", "id": "f2445:m2"} {"signature": "def parse_unique_urlencoded(content):", "body": "urlencoded_params = urllib.parse.parse_qs(content)params = {}for key, value in six.iteritems(urlencoded_params):if len(value) != :msg = ('''' % (key, ''.join(value)))raise ValueError(msg)params[key] = value[]return params", "docstring": "Parses unique key-value parameters from urlencoded content.\n\n Args:\n content: string, URL-encoded key-value pairs.\n\n Returns:\n dict, The key-value pairs from ``content``.\n\n Raises:\n ValueError: if one of the keys is repeated.", "id": "f2445:m3"} {"signature": "def update_query_params(uri, params):", "body": "parts = urllib.parse.urlparse(uri)query_params = parse_unique_urlencoded(parts.query)query_params.update(params)new_query = urllib.parse.urlencode(query_params)new_parts = parts._replace(query=new_query)return urllib.parse.urlunparse(new_parts)", "docstring": "Updates a URI with new query parameters.\n\n If a given key from ``params`` is repeated in the ``uri``, then\n the URI will be considered invalid and an error will occur.\n\n If the URI is valid, then each value from ``params`` will\n replace the corresponding value in the query parameters (if\n it exists).\n\n Args:\n uri: string, A valid URI, with potential existing query parameters.\n params: dict, A dictionary of query parameters.\n\n Returns:\n The same URI but with the new query parameters added.", "id": "f2445:m4"} {"signature": "def _add_query_parameter(url, name, value):", "body": "if value is None:return urlelse:return update_query_params(url, {name: value})", "docstring": "Adds a query parameter to a url.\n\n Replaces the current value if it already exists in the URL.\n\n Args:\n url: string, url to add the query parameter to.\n name: string, query parameter name.\n value: string, query parameter value.\n\n Returns:\n Updated query parameter. Does not update the url if value is None.", "id": "f2445:m5"} {"signature": "def _parse_pem_key(raw_key_input):", "body": "offset = raw_key_input.find(b'')if offset != -:return raw_key_input[offset:]", "docstring": "Identify and extract PEM keys.\n\n Determines whether the given key is in the format of PEM key, and extracts\n the relevant part of the key if it is.\n\n Args:\n raw_key_input: The contents of a private key file (either PEM or\n PKCS12).\n\n Returns:\n string, The actual key if the contents are from a PEM file, or\n else None.", "id": "f2445:m7"} {"signature": "def _to_bytes(value, encoding=''):", "body": "result = (value.encode(encoding)if isinstance(value, six.text_type) else value)if isinstance(result, six.binary_type):return resultelse:raise ValueError(''.format(value))", "docstring": "Converts a string value to bytes, if necessary.\n\n Unfortunately, ``six.b`` is insufficient for this task since in\n Python2 it does not modify ``unicode`` objects.\n\n Args:\n value: The string/bytes value to be converted.\n encoding: The encoding to use to convert unicode to bytes. Defaults\n to \"ascii\", which will not allow any characters from ordinals\n larger than 127. Other useful values are \"latin-1\", which\n which will only allows byte ordinals (up to 255) and \"utf-8\",\n which will encode any unicode that needs to be.\n\n Returns:\n The original value converted to bytes (if unicode) or as passed in\n if it started out as bytes.\n\n Raises:\n ValueError if the value could not be converted to bytes.", "id": "f2445:m9"} {"signature": "def _from_bytes(value):", "body": "result = (value.decode('')if isinstance(value, six.binary_type) else value)if isinstance(result, six.text_type):return resultelse:raise ValueError(''.format(value))", "docstring": "Converts bytes to a string value, if necessary.\n\n Args:\n value: The string/bytes value to be converted.\n\n Returns:\n The original value converted to unicode (if bytes) or as passed in\n if it started out as unicode.\n\n Raises:\n ValueError if the value could not be converted to unicode.", "id": "f2445:m10"} {"signature": "def __init__(self, pubkey):", "body": "self._pubkey = pubkey", "docstring": "Constructor.\n\n Args:\n pubkey: OpenSSL.crypto.PKey (or equiv), The public key to verify\n with.", "id": "f2446:c0:m0"} {"signature": "def verify(self, message, signature):", "body": "message = _helpers._to_bytes(message, encoding='')return PKCS1_v1_5.new(self._pubkey).verify(SHA256.new(message), signature)", "docstring": "Verifies a message against a signature.\n\n Args:\n message: string or bytes, The message to verify. If string, will be\n encoded to bytes as utf-8.\n signature: string or bytes, The signature on the message.\n\n Returns:\n True if message was signed by the private key associated with the\n public key that this object was constructed with.", "id": "f2446:c0:m1"} {"signature": "@staticmethoddef from_string(key_pem, is_x509_cert):", "body": "if is_x509_cert:key_pem = _helpers._to_bytes(key_pem)pemLines = key_pem.replace(b'', b'').split()certDer = _helpers._urlsafe_b64decode(b''.join(pemLines[:-]))certSeq = DerSequence()certSeq.decode(certDer)tbsSeq = DerSequence()tbsSeq.decode(certSeq[])pubkey = RSA.importKey(tbsSeq[])else:pubkey = RSA.importKey(key_pem)return PyCryptoVerifier(pubkey)", "docstring": "Construct a Verified instance from a string.\n\n Args:\n key_pem: string, public key in PEM format.\n is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it\n is expected to be an RSA key in PEM format.\n\n Returns:\n Verifier instance.", "id": "f2446:c0:m2"} {"signature": "def __init__(self, pkey):", "body": "self._key = pkey", "docstring": "Constructor.\n\n Args:\n pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.", "id": "f2446:c1:m0"} {"signature": "def sign(self, message):", "body": "message = _helpers._to_bytes(message, encoding='')return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))", "docstring": "Signs a message.\n\n Args:\n message: string, Message to be signed.\n\n Returns:\n string, The signature of the message for the given key.", "id": "f2446:c1:m1"} {"signature": "@staticmethoddef from_string(key, password=''):", "body": "parsed_pem_key = _helpers._parse_pem_key(_helpers._to_bytes(key))if parsed_pem_key:pkey = RSA.importKey(parsed_pem_key)else:raise NotImplementedError('''''')return PyCryptoSigner(pkey)", "docstring": "Construct a Signer instance from a string.\n\n Args:\n key: string, private key in PEM format.\n password: string, password for private key file. Unused for PEM\n files.\n\n Returns:\n Signer instance.\n\n Raises:\n NotImplementedError if the key isn't in PEM format.", "id": "f2446:c1:m2"} {"signature": "def _make_flow(request, scopes, return_url=None):", "body": "csrf_token = hashlib.sha256(os.urandom()).hexdigest()request.session[_CSRF_KEY] = csrf_tokenstate = json.dumps({'': csrf_token,'': return_url,})flow = client.OAuth2WebServerFlow(client_id=django_util.oauth2_settings.client_id,client_secret=django_util.oauth2_settings.client_secret,scope=scopes,state=state,redirect_uri=request.build_absolute_uri(urlresolvers.reverse(\"\")))flow_key = _FLOW_KEY.format(csrf_token)request.session[flow_key] = jsonpickle.encode(flow)return flow", "docstring": "Creates a Web Server Flow\n\n Args:\n request: A Django request object.\n scopes: the request oauth2 scopes.\n return_url: The URL to return to after the flow is complete. Defaults\n to the path of the current request.\n\n Returns:\n An OAuth2 flow object that has been stored in the session.", "id": "f2449:m0"} {"signature": "def _get_flow_for_token(csrf_token, request):", "body": "flow_pickle = request.session.get(_FLOW_KEY.format(csrf_token), None)return None if flow_pickle is None else jsonpickle.decode(flow_pickle)", "docstring": "Looks up the flow in session to recover information about requested\n scopes.\n\n Args:\n csrf_token: The token passed in the callback request that should\n match the one previously generated and stored in the request on the\n initial authorization view.\n\n Returns:\n The OAuth2 Flow object associated with this flow based on the\n CSRF token.", "id": "f2449:m1"} {"signature": "def oauth2_callback(request):", "body": "if '' in request.GET:reason = request.GET.get('', request.GET.get('', ''))reason = html.escape(reason)return http.HttpResponseBadRequest(''.format(reason))try:encoded_state = request.GET['']code = request.GET['']except KeyError:return http.HttpResponseBadRequest('')try:server_csrf = request.session[_CSRF_KEY]except KeyError:return http.HttpResponseBadRequest('')try:state = json.loads(encoded_state)client_csrf = state['']return_url = state['']except (ValueError, KeyError):return http.HttpResponseBadRequest('')if client_csrf != server_csrf:return http.HttpResponseBadRequest('')flow = _get_flow_for_token(client_csrf, request)if not flow:return http.HttpResponseBadRequest('')try:credentials = flow.step2_exchange(code)except client.FlowExchangeError as exchange_error:return http.HttpResponseBadRequest(''.format(exchange_error))get_storage(request).put(credentials)signals.oauth2_authorized.send(sender=signals.oauth2_authorized,request=request, credentials=credentials)return shortcuts.redirect(return_url)", "docstring": "View that handles the user's return from OAuth2 provider.\n\n This view verifies the CSRF state and OAuth authorization code, and on\n success stores the credentials obtained in the storage provider,\n and redirects to the return_url specified in the authorize view and\n stored in the session.\n\n Args:\n request: Django request.\n\n Returns:\n A redirect response back to the return_url.", "id": "f2449:m2"} {"signature": "def oauth2_authorize(request):", "body": "return_url = request.GET.get('', None)if not return_url:return_url = request.META.get('', '')scopes = request.GET.getlist('', django_util.oauth2_settings.scopes)if django_util.oauth2_settings.storage_model:if not request.user.is_authenticated():return redirect(''.format(settings.LOGIN_URL, parse.quote(request.get_full_path())))else:user_oauth = django_util.UserOAuth2(request, scopes, return_url)if user_oauth.has_credentials():return redirect(return_url)flow = _make_flow(request=request, scopes=scopes, return_url=return_url)auth_url = flow.step1_get_authorize_url()return shortcuts.redirect(auth_url)", "docstring": "View to start the OAuth2 Authorization flow.\n\n This view starts the OAuth2 authorization flow. If scopes is passed in\n as a GET URL parameter, it will authorize those scopes, otherwise the\n default scopes specified in settings. The return_url can also be\n specified as a GET parameter, otherwise the referer header will be\n checked, and if that isn't found it will return to the root path.\n\n Args:\n request: The Django request object.\n\n Returns:\n A redirect to Google OAuth2 Authorization.", "id": "f2449:m3"} {"signature": "def __init__(self, model_class, key_name, key_value, property_name):", "body": "super(DjangoORMStorage, self).__init__()self.model_class = model_classself.key_name = key_nameself.key_value = key_valueself.property_name = property_name", "docstring": "Constructor for Storage.\n\n Args:\n model: string, fully qualified name of db.Model model class.\n key_name: string, key name for the entity that has the credentials\n key_value: string, key value for the entity that has the\n credentials.\n property_name: string, name of the property that is an\n CredentialsProperty.", "id": "f2450:c0:m0"} {"signature": "def locked_get(self):", "body": "query = {self.key_name: self.key_value}entities = self.model_class.objects.filter(**query)if len(entities) > :credential = getattr(entities[], self.property_name)if getattr(credential, '', None) is not None:credential.set_store(self)return credentialelse:return None", "docstring": "Retrieve stored credential from the Django ORM.\n\n Returns:\n oauth2client.Credentials retrieved from the Django ORM, associated\n with the ``model``, ``key_value``->``key_name`` pair used to query\n for the model, and ``property_name`` identifying the\n ``CredentialsProperty`` field, all of which are defined in the\n constructor for this Storage object.", "id": "f2450:c0:m1"} {"signature": "def locked_put(self, credentials):", "body": "entity, _ = self.model_class.objects.get_or_create(**{self.key_name: self.key_value})setattr(entity, self.property_name, credentials)entity.save()", "docstring": "Write a Credentials to the Django datastore.\n\n Args:\n credentials: Credentials, the credentials to store.", "id": "f2450:c0:m2"} {"signature": "def locked_delete(self):", "body": "query = {self.key_name: self.key_value}self.model_class.objects.filter(**query).delete()", "docstring": "Delete Credentials from the datastore.", "id": "f2450:c0:m3"} {"signature": "def _load_client_secrets(filename):", "body": "client_type, client_info = clientsecrets.loadfile(filename)if client_type != clientsecrets.TYPE_WEB:raise ValueError(''''.format(client_type))return client_info[''], client_info['']", "docstring": "Loads client secrets from the given filename.\n\n Args:\n filename: The name of the file containing the JSON secret key.\n\n Returns:\n A 2-tuple, the first item containing the client id, and the second\n item containing a client secret.", "id": "f2452:m0"} {"signature": "def _get_oauth2_client_id_and_secret(settings_instance):", "body": "secret_json = getattr(settings_instance,'', None)if secret_json is not None:return _load_client_secrets(secret_json)else:client_id = getattr(settings_instance, \"\",None)client_secret = getattr(settings_instance,\"\", None)if client_id is not None and client_secret is not None:return client_id, client_secretelse:raise exceptions.ImproperlyConfigured(\"\"\"\"\"\")", "docstring": "Initializes client id and client secret based on the settings.\n\n Args:\n settings_instance: An instance of ``django.conf.settings``.\n\n Returns:\n A 2-tuple, the first item is the client id and the second\n item is the client secret.", "id": "f2452:m1"} {"signature": "def _get_storage_model():", "body": "storage_model_settings = getattr(django.conf.settings,'', None)if storage_model_settings is not None:return (storage_model_settings[''],storage_model_settings[''],storage_model_settings[''])else:return None, None, None", "docstring": "This configures whether the credentials will be stored in the session\n or the Django ORM based on the settings. By default, the credentials\n will be stored in the session, unless `GOOGLE_OAUTH2_STORAGE_MODEL`\n is found in the settings. Usually, the ORM storage is used to integrate\n credentials into an existing Django user system.\n\n Returns:\n A tuple containing three strings, or None. If\n ``GOOGLE_OAUTH2_STORAGE_MODEL`` is configured, the tuple\n will contain the fully qualifed path of the `django.db.model`,\n the name of the ``django.contrib.auth.models.User`` field on the\n model, and the name of the\n :class:`oauth2client.contrib.django_util.models.CredentialsField`\n field on the model. If Django ORM storage is not configured,\n this function returns None.", "id": "f2452:m2"} {"signature": "def get_storage(request):", "body": "storage_model = oauth2_settings.storage_modeluser_property = oauth2_settings.storage_model_user_propertycredentials_property = oauth2_settings.storage_model_credentials_propertyif storage_model:module_name, class_name = storage_model.rsplit('', )module = importlib.import_module(module_name)storage_model_class = getattr(module, class_name)return storage.DjangoORMStorage(storage_model_class,user_property,request.user,credentials_property)else:return dictionary_storage.DictionaryStorage(request.session, key=_CREDENTIALS_KEY)", "docstring": "Gets a Credentials storage object provided by the Django OAuth2 Helper\n object.\n\n Args:\n request: Reference to the current request object.\n\n Returns:\n An :class:`oauth2.client.Storage` object.", "id": "f2452:m3"} {"signature": "def _redirect_with_params(url_name, *args, **kwargs):", "body": "url = urlresolvers.reverse(url_name, args=args)params = parse.urlencode(kwargs, True)return \"\".format(url, params)", "docstring": "Helper method to create a redirect response with URL params.\n\n This builds a redirect string that converts kwargs into a\n query string.\n\n Args:\n url_name: The name of the url to redirect to.\n kwargs: the query string param and their values to build.\n\n Returns:\n A properly formatted redirect string.", "id": "f2452:m4"} {"signature": "def _credentials_from_request(request):", "body": "if (oauth2_settings.storage_model is None orrequest.user.is_authenticated()):return get_storage(request).get()else:return None", "docstring": "Gets the authorized credentials for this flow, if they exist.", "id": "f2452:m5"} {"signature": "def __init__(self, request, scopes=None, return_url=None):", "body": "self.request = requestself.return_url = return_url or request.get_full_path()if scopes:self._scopes = set(oauth2_settings.scopes) | set(scopes)else:self._scopes = set(oauth2_settings.scopes)", "docstring": "Initialize the Oauth2 Object.\n\n Args:\n request: Django request object.\n scopes: Scopes desired for this OAuth2 flow.\n return_url: The url to return to after the OAuth flow is complete,\n defaults to the request's current URL path.", "id": "f2452:c1:m0"} {"signature": "def get_authorize_redirect(self):", "body": "get_params = {'': self.return_url,'': self._get_scopes()}return _redirect_with_params('', **get_params)", "docstring": "Creates a URl to start the OAuth2 authorization flow.", "id": "f2452:c1:m1"} {"signature": "def has_credentials(self):", "body": "credentials = _credentials_from_request(self.request)return (credentials and not credentials.invalid andcredentials.has_scopes(self._get_scopes()))", "docstring": "Returns True if there are valid credentials for the current user\n and required scopes.", "id": "f2452:c1:m2"} {"signature": "def _get_scopes(self):", "body": "if _credentials_from_request(self.request):return (self._scopes |_credentials_from_request(self.request).scopes)else:return self._scopes", "docstring": "Returns the scopes associated with this object, kept up to\n date for incremental auth.", "id": "f2452:c1:m3"} {"signature": "@propertydef scopes(self):", "body": "return self._get_scopes()", "docstring": "Returns the scopes associated with this OAuth2 object.", "id": "f2452:c1:m4"} {"signature": "@propertydef credentials(self):", "body": "return _credentials_from_request(self.request)", "docstring": "Gets the authorized credentials for this flow, if they exist.", "id": "f2452:c1:m5"} {"signature": "@propertydef http(self):", "body": "if self.has_credentials():return self.credentials.authorize(transport.get_http_object())return None", "docstring": "Helper: create HTTP client authorized with OAuth2 credentials.", "id": "f2452:c1:m6"} {"signature": "def from_db_value(self, value, expression, connection, context):", "body": "return self.to_python(value)", "docstring": "Overrides ``models.Field`` method. This converts the value\n returned from the database to an instance of this class.", "id": "f2453:c0:m2"} {"signature": "def to_python(self, value):", "body": "if value is None:return Noneelif isinstance(value, oauth2client.client.Credentials):return valueelse:try:return jsonpickle.decode(base64.b64decode(encoding.smart_bytes(value)).decode())except ValueError:return pickle.loads(base64.b64decode(encoding.smart_bytes(value)))", "docstring": "Overrides ``models.Field`` method. This is used to convert\n bytes (from serialization etc) to an instance of this class", "id": "f2453:c0:m3"} {"signature": "def get_prep_value(self, value):", "body": "if value is None:return Noneelse:return encoding.smart_text(base64.b64encode(jsonpickle.encode(value).encode()))", "docstring": "Overrides ``models.Field`` method. This is used to convert\n the value from an instances of this class to bytes that can be\n inserted into the database.", "id": "f2453:c0:m4"} {"signature": "def value_to_string(self, obj):", "body": "value = self._get_val_from_obj(obj)return self.get_prep_value(value)", "docstring": "Convert the field value from the provided model to a string.\n\n Used during model serialization.\n\n Args:\n obj: db.Model, model object\n\n Returns:\n string, the serialized field value", "id": "f2453:c0:m5"} {"signature": "def oauth_required(decorated_function=None, scopes=None, **decorator_kwargs):", "body": "def curry_wrapper(wrapped_function):@wraps(wrapped_function)def required_wrapper(request, *args, **kwargs):if not (django_util.oauth2_settings.storage_model is None orrequest.user.is_authenticated()):redirect_str = ''.format(django.conf.settings.LOGIN_URL,parse.quote(request.path))return shortcuts.redirect(redirect_str)return_url = decorator_kwargs.pop('',request.get_full_path())user_oauth = django_util.UserOAuth2(request, scopes, return_url)if not user_oauth.has_credentials():return shortcuts.redirect(user_oauth.get_authorize_redirect())setattr(request, django_util.oauth2_settings.request_prefix,user_oauth)return wrapped_function(request, *args, **kwargs)return required_wrapperif decorated_function:return curry_wrapper(decorated_function)else:return curry_wrapper", "docstring": "Decorator to require OAuth2 credentials for a view.\n\n\n .. code-block:: python\n :caption: views.py\n :name: views_required_2\n\n\n from oauth2client.django_util.decorators import oauth_required\n\n @oauth_required\n def requires_default_scopes(request):\n email = request.credentials.id_token['email']\n service = build(serviceName='calendar', version='v3',\n http=request.oauth.http,\n developerKey=API_KEY)\n events = service.events().list(\n calendarId='primary').execute()['items']\n return HttpResponse(\n \"email: {0}, calendar: {1}\".format(email, str(events)))\n\n Args:\n decorated_function: View function to decorate, must have the Django\n request object as the first argument.\n scopes: Scopes to require, will default.\n decorator_kwargs: Can include ``return_url`` to specify the URL to\n return to after OAuth2 authorization is complete.\n\n Returns:\n An OAuth2 Authorize view if credentials are not found or if the\n credentials are missing the required scopes. Otherwise,\n the decorated view.", "id": "f2454:m0"} {"signature": "def oauth_enabled(decorated_function=None, scopes=None, **decorator_kwargs):", "body": "def curry_wrapper(wrapped_function):@wraps(wrapped_function)def enabled_wrapper(request, *args, **kwargs):return_url = decorator_kwargs.pop('',request.get_full_path())user_oauth = django_util.UserOAuth2(request, scopes, return_url)setattr(request, django_util.oauth2_settings.request_prefix,user_oauth)return wrapped_function(request, *args, **kwargs)return enabled_wrapperif decorated_function:return curry_wrapper(decorated_function)else:return curry_wrapper", "docstring": "Decorator to enable OAuth Credentials if authorized, and setup\n the oauth object on the request object to provide helper functions\n to start the flow otherwise.\n\n .. code-block:: python\n :caption: views.py\n :name: views_enabled3\n\n from oauth2client.django_util.decorators import oauth_enabled\n\n @oauth_enabled\n def optional_oauth2(request):\n if request.oauth.has_credentials():\n # this could be passed into a view\n # request.oauth.http is also initialized\n return HttpResponse(\"User email: {0}\".format(\n request.oauth.credentials.id_token['email'])\n else:\n return HttpResponse('Here is an OAuth Authorize link:\n Authorize'.format(\n request.oauth.get_authorize_redirect()))\n\n\n Args:\n decorated_function: View function to decorate.\n scopes: Scopes to require, will default.\n decorator_kwargs: Can include ``return_url`` to specify the URL to\n return to after OAuth2 authorization is complete.\n\n Returns:\n The decorated view function.", "id": "f2454:m1"} {"signature": "def __init__(self, session, model_class, key_name,key_value, property_name):", "body": "super(Storage, self).__init__()self.session = sessionself.model_class = model_classself.key_name = key_nameself.key_value = key_valueself.property_name = property_name", "docstring": "Constructor for Storage.\n\n Args:\n session: An instance of :class:`sqlalchemy.orm.Session`.\n model_class: SQLAlchemy declarative mapping.\n key_name: string, key name for the entity that has the credentials\n key_value: key value for the entity that has the credentials\n property_name: A string indicating which property on the\n ``model_class`` to store the credentials.\n This property must be a\n :class:`CredentialsType` column.", "id": "f2455:c1:m0"} {"signature": "def locked_get(self):", "body": "filters = {self.key_name: self.key_value}query = self.session.query(self.model_class).filter_by(**filters)entity = query.first()if entity:credential = getattr(entity, self.property_name)if credential and hasattr(credential, ''):credential.set_store(self)return credentialelse:return None", "docstring": "Retrieve stored credential.\n\n Returns:\n A :class:`oauth2client.Credentials` instance or `None`.", "id": "f2455:c1:m1"} {"signature": "def locked_put(self, credentials):", "body": "filters = {self.key_name: self.key_value}query = self.session.query(self.model_class).filter_by(**filters)entity = query.first()if not entity:entity = self.model_class(**filters)setattr(entity, self.property_name, credentials)self.session.add(entity)", "docstring": "Write a credentials to the SQLAlchemy datastore.\n\n Args:\n credentials: :class:`oauth2client.Credentials`", "id": "f2455:c1:m2"} {"signature": "def locked_delete(self):", "body": "filters = {self.key_name: self.key_value}self.session.query(self.model_class).filter_by(**filters).delete()", "docstring": "Delete credentials from the SQLAlchemy datastore.", "id": "f2455:c1:m3"} {"signature": "@_helpers.positional()def generate_token(key, user_id, action_id='', when=None):", "body": "digester = hmac.new(_helpers._to_bytes(key, encoding=''))digester.update(_helpers._to_bytes(str(user_id), encoding=''))digester.update(DELIMITER)digester.update(_helpers._to_bytes(action_id, encoding=''))digester.update(DELIMITER)when = _helpers._to_bytes(str(when or int(time.time())), encoding='')digester.update(when)digest = digester.digest()token = base64.urlsafe_b64encode(digest + DELIMITER + when)return token", "docstring": "Generates a URL-safe token for the given user, action, time tuple.\n\n Args:\n key: secret key to use.\n user_id: the user ID of the authenticated user.\n action_id: a string identifier of the action they requested\n authorization for.\n when: the time in seconds since the epoch at which the user was\n authorized for this action. If not set the current time is used.\n\n Returns:\n A string XSRF protection token.", "id": "f2456:m0"} {"signature": "@_helpers.positional()def validate_token(key, token, user_id, action_id=\"\", current_time=None):", "body": "if not token:return Falsetry:decoded = base64.urlsafe_b64decode(token)token_time = int(decoded.split(DELIMITER)[-])except (TypeError, ValueError, binascii.Error):return Falseif current_time is None:current_time = time.time()if current_time - token_time > DEFAULT_TIMEOUT_SECS:return Falseexpected_token = generate_token(key, user_id, action_id=action_id,when=token_time)if len(token) != len(expected_token):return Falsedifferent = for x, y in zip(bytearray(token), bytearray(expected_token)):different |= x ^ yreturn not different", "docstring": "Validates that the given token authorizes the user for the action.\n\n Tokens are invalid if the time of issue is too old or if the token\n does not match what generateToken outputs (i.e. the token was forged).\n\n Args:\n key: secret key to use.\n token: a string of the token generated by generateToken.\n user_id: the user ID of the authenticated user.\n action_id: a string identifier of the action they requested\n authorization for.\n\n Returns:\n A boolean - True if the user is authorized for the action, False\n otherwise.", "id": "f2456:m1"} {"signature": "def __init__(self, dictionary, key, lock=None):", "body": "super(DictionaryStorage, self).__init__(lock=lock)self._dictionary = dictionaryself._key = key", "docstring": "Construct a DictionaryStorage instance.", "id": "f2457:c0:m0"} {"signature": "def locked_get(self):", "body": "serialized = self._dictionary.get(self._key)if serialized is None:return Nonecredentials = client.OAuth2Credentials.from_json(serialized)credentials.set_store(self)return credentials", "docstring": "Retrieve the credentials from the dictionary, if they exist.\n\n Returns: A :class:`oauth2client.client.OAuth2Credentials` instance.", "id": "f2457:c0:m1"} {"signature": "def locked_put(self, credentials):", "body": "serialized = credentials.to_json()self._dictionary[self._key] = serialized", "docstring": "Save the credentials to the dictionary.\n\n Args:\n credentials: A :class:`oauth2client.client.OAuth2Credentials`\n instance.", "id": "f2457:c0:m2"} {"signature": "def locked_delete(self):", "body": "self._dictionary.pop(self._key, None)", "docstring": "Remove the credentials from the dictionary, if they exist.", "id": "f2457:c0:m3"} {"signature": "def __init__(self, email=None, *args, **kwargs):", "body": "if '' in kwargs:warnings.warn(_SCOPES_WARNING)kwargs[''] = Nonesuper(AppAssertionCredentials, self).__init__(None, *args, **kwargs)self.service_account_email = emailself.scopes = Noneself.invalid = True", "docstring": "Constructor for AppAssertionCredentials\n\n Args:\n email: an email that specifies the service account to use.\n Only necessary if using custom service accounts\n (see https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#createdefaultserviceaccount).", "id": "f2458:c0:m0"} {"signature": "def retrieve_scopes(self, http):", "body": "self._retrieve_info(http)return self.scopes", "docstring": "Retrieves the canonical list of scopes for this access token.\n\n Overrides client.Credentials.retrieve_scopes. Fetches scopes info\n from the metadata server.\n\n Args:\n http: httplib2.Http, an http object to be used to make the refresh\n request.\n\n Returns:\n A set of strings containing the canonical list of scopes.", "id": "f2458:c0:m3"} {"signature": "def _retrieve_info(self, http):", "body": "if self.invalid:info = _metadata.get_service_account_info(http,service_account=self.service_account_email or '')self.invalid = Falseself.service_account_email = info['']self.scopes = info['']", "docstring": "Retrieves service account info for invalid credentials.\n\n Args:\n http: an object to be used to make HTTP requests.", "id": "f2458:c0:m4"} {"signature": "def _refresh(self, http):", "body": "try:self._retrieve_info(http)self.access_token, self.token_expiry = _metadata.get_token(http, service_account=self.service_account_email)except http_client.HTTPException as err:raise client.HttpAccessTokenRefreshError(str(err))", "docstring": "Refreshes the access token.\n\n Skip all the storage hoops and just refresh using the API.\n\n Args:\n http: an object to be used to make HTTP requests.\n\n Raises:\n HttpAccessTokenRefreshError: When the refresh fails.", "id": "f2458:c0:m5"} {"signature": "def sign_blob(self, blob):", "body": "raise NotImplementedError('')", "docstring": "Cryptographically sign a blob (of bytes).\n\n This method is provided to support a common interface, but\n the actual key used for a Google Compute Engine service account\n is not available, so it can't be used to sign content.\n\n Args:\n blob: bytes, Message to be signed.\n\n Raises:\n NotImplementedError, always.", "id": "f2458:c0:m8"} {"signature": "@classmethoddef _get_kind(cls):", "body": "return ''", "docstring": "Return the kind name for this class.", "id": "f2459:c0:m0"} {"signature": "def _validate(self, value):", "body": "_LOGGER.info('', type(value))if value is not None and not isinstance(value, client.Flow):raise TypeError(''''.format(self._name, value))", "docstring": "Validates a value as a proper Flow object.\n\n Args:\n value: A value to be set on the property.\n\n Raises:\n TypeError if the value is not an instance of Flow.", "id": "f2459:c1:m0"} {"signature": "def _validate(self, value):", "body": "_LOGGER.info('', type(value))if value is not None and not isinstance(value, client.Credentials):raise TypeError(''''.format(self._name, value))", "docstring": "Validates a value as a proper credentials object.\n\n Args:\n value: A value to be set on the property.\n\n Raises:\n TypeError if the value is not an instance of Credentials.", "id": "f2459:c2:m0"} {"signature": "def _to_base_type(self, value):", "body": "if value is None:return ''else:return value.to_json()", "docstring": "Converts our validated value to a JSON serialized string.\n\n Args:\n value: A value to be set in the datastore.\n\n Returns:\n A JSON serialized version of the credential, else '' if value\n is None.", "id": "f2459:c2:m1"} {"signature": "def _from_base_type(self, value):", "body": "if not value:return Nonetry:credentials = client.Credentials.new_from_json(value)except ValueError:credentials = Nonereturn credentials", "docstring": "Converts our stored JSON string back to the desired type.\n\n Args:\n value: A value from the datastore to be converted to the\n desired type.\n\n Returns:\n A deserialized Credentials (or subclass) object, else None if\n the value can't be parsed.", "id": "f2459:c2:m2"} {"signature": "@classmethoddef _get_kind(cls):", "body": "return ''", "docstring": "Return the kind name for this class.", "id": "f2459:c3:m0"} {"signature": "def get(http, path, root=METADATA_ROOT, recursive=None):", "body": "url = urlparse.urljoin(root, path)url = _helpers._add_query_parameter(url, '', recursive)response, content = transport.request(http, url, headers=METADATA_HEADERS)if response.status == http_client.OK:decoded = _helpers._from_bytes(content)if response[''] == '':return json.loads(decoded)else:return decodedelse:raise http_client.HTTPException(''''.format(url, response))", "docstring": "Fetch a resource from the metadata server.\n\n Args:\n http: an object to be used to make HTTP requests.\n path: A string indicating the resource to retrieve. For example,\n 'instance/service-accounts/default'\n root: A string indicating the full path to the metadata server root.\n recursive: A boolean indicating whether to do a recursive query of\n metadata. See\n https://cloud.google.com/compute/docs/metadata#aggcontents\n\n Returns:\n A dictionary if the metadata server returns JSON, otherwise a string.\n\n Raises:\n http_client.HTTPException if an error corrured while\n retrieving metadata.", "id": "f2460:m0"} {"signature": "def get_service_account_info(http, service_account=''):", "body": "return get(http,''.format(service_account),recursive=True)", "docstring": "Get information about a service account from the metadata server.\n\n Args:\n http: an object to be used to make HTTP requests.\n service_account: An email specifying the service account for which to\n look up information. Default will be information for the \"default\"\n service account of the current compute engine instance.\n\n Returns:\n A dictionary with information about the specified service account,\n for example:\n\n {\n 'email': '...',\n 'scopes': ['scope', ...],\n 'aliases': ['default', '...']\n }", "id": "f2460:m1"} {"signature": "def get_token(http, service_account=''):", "body": "token_json = get(http,''.format(service_account))token_expiry = client._UTCNOW() + datetime.timedelta(seconds=token_json[''])return token_json[''], token_expiry", "docstring": "Fetch an oauth token for the\n\n Args:\n http: an object to be used to make HTTP requests.\n service_account: An email specifying the service account this token\n should represent. Default will be a token for the \"default\" service\n account of the current compute engine instance.\n\n Returns:\n A tuple of (access token, token expiration), where access token is the\n access token as a string and token expiration is a datetime object\n that indicates when the access token will expire.", "id": "f2460:m2"} {"signature": "def _create_file_if_needed(filename):", "body": "if os.path.exists(filename):return Falseelse:open(filename, '').close()logger.info(''.format(filename))return True", "docstring": "Creates the an empty file if it does not already exist.\n\n Returns:\n True if the file was created, False otherwise.", "id": "f2461:m0"} {"signature": "def _load_credentials_file(credentials_file):", "body": "try:credentials_file.seek()data = json.load(credentials_file)except Exception:logger.warning('''')return {}if data.get('') != :logger.warning('''')return {}credentials = {}for key, encoded_credential in iteritems(data.get('', {})):try:credential_json = base64.b64decode(encoded_credential)credential = client.Credentials.new_from_json(credential_json)credentials[key] = credentialexcept:logger.warning(''.format(key))return credentials", "docstring": "Load credentials from the given file handle.\n\n The file is expected to be in this format:\n\n {\n \"file_version\": 2,\n \"credentials\": {\n \"key\": \"base64 encoded json representation of credentials.\"\n }\n }\n\n This function will warn and return empty credentials instead of raising\n exceptions.\n\n Args:\n credentials_file: An open file handle.\n\n Returns:\n A dictionary mapping user-defined keys to an instance of\n :class:`oauth2client.client.Credentials`.", "id": "f2461:m1"} {"signature": "def _write_credentials_file(credentials_file, credentials):", "body": "data = {'': , '': {}}for key, credential in iteritems(credentials):credential_json = credential.to_json()encoded_credential = _helpers._from_bytes(base64.b64encode(_helpers._to_bytes(credential_json)))data[''][key] = encoded_credentialcredentials_file.seek()json.dump(data, credentials_file)credentials_file.truncate()", "docstring": "Writes credentials to a file.\n\n Refer to :func:`_load_credentials_file` for the format.\n\n Args:\n credentials_file: An open file handle, must be read/write.\n credentials: A dictionary mapping user-defined keys to an instance of\n :class:`oauth2client.client.Credentials`.", "id": "f2461:m2"} {"signature": "def _get_backend(filename):", "body": "filename = os.path.abspath(filename)with _backends_lock:if filename not in _backends:_backends[filename] = _MultiprocessStorageBackend(filename)return _backends[filename]", "docstring": "A helper method to get or create a backend with thread locking.\n\n This ensures that only one backend is used per-file per-process, so that\n thread and process locks are appropriately shared.\n\n Args:\n filename: The full path to the credential storage file.\n\n Returns:\n An instance of :class:`_MultiprocessStorageBackend`.", "id": "f2461:m3"} {"signature": "def _load_credentials(self):", "body": "if not self._file:returnloaded_credentials = _load_credentials_file(self._file)self._credentials.update(loaded_credentials)logger.debug('')", "docstring": "(Re-)loads the credentials from the file.", "id": "f2461:c0:m1"} {"signature": "def locked_get(self):", "body": "credential = self._backend.locked_get(self._key)if credential is not None:credential.set_store(self)return credential", "docstring": "Retrieves the current credentials from the store.\n\n Returns:\n An instance of :class:`oauth2client.client.Credentials` or `None`.", "id": "f2461:c1:m3"} {"signature": "def locked_put(self, credentials):", "body": "return self._backend.locked_put(self._key, credentials)", "docstring": "Writes the given credentials to the store.\n\n Args:\n credentials: an instance of\n :class:`oauth2client.client.Credentials`.", "id": "f2461:c1:m4"} {"signature": "def locked_delete(self):", "body": "return self._backend.locked_delete(self._key)", "docstring": "Deletes the current credentials from the store.", "id": "f2461:c1:m5"} {"signature": "def _safe_html(s):", "body": "return cgi.escape(s, quote=).replace(\"\", '')", "docstring": "Escape text to make it safe to display.\n\n Args:\n s: string, The text to escape.\n\n Returns:\n The escaped text as a string.", "id": "f2463:m0"} {"signature": "def _generate_new_xsrf_secret_key():", "body": "return os.urandom().encode(\"\")", "docstring": "Returns a random XSRF secret key.", "id": "f2463:m1"} {"signature": "def xsrf_secret_key():", "body": "secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE)if not secret:model = SiteXsrfSecretKey.get_or_insert(key_name='')if not model.secret:model.secret = _generate_new_xsrf_secret_key()model.put()secret = model.secretmemcache.add(XSRF_MEMCACHE_ID, secret,namespace=OAUTH2CLIENT_NAMESPACE)return str(secret)", "docstring": "Return the secret key for use for XSRF protection.\n\n If the Site entity does not have a secret key, this method will also create\n one and persist it.\n\n Returns:\n The secret key.", "id": "f2463:m2"} {"signature": "def _build_state_value(request_handler, user):", "body": "uri = request_handler.request.urltoken = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),action_id=str(uri))return uri + '' + token", "docstring": "Composes the value for the 'state' parameter.\n\n Packs the current request URI and an XSRF token into an opaque string that\n can be passed to the authentication server via the 'state' parameter.\n\n Args:\n request_handler: webapp.RequestHandler, The request.\n user: google.appengine.api.users.User, The current user.\n\n Returns:\n The state value as a string.", "id": "f2463:m3"} {"signature": "def _parse_state_value(state, user):", "body": "uri, token = state.rsplit('', )if xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(),action_id=uri):return urielse:return None", "docstring": "Parse the value of the 'state' parameter.\n\n Parses the value and validates the XSRF token in the state parameter.\n\n Args:\n state: string, The value of the state parameter.\n user: google.appengine.api.users.User, The current user.\n\n Returns:\n The redirect URI, or None if XSRF token is not valid.", "id": "f2463:m4"} {"signature": "@_helpers.positional()def oauth2decorator_from_clientsecrets(filename, scope,message=None, cache=None):", "body": "return OAuth2DecoratorFromClientSecrets(filename, scope,message=message, cache=cache)", "docstring": "Creates an OAuth2Decorator populated from a clientsecrets file.\n\n Args:\n filename: string, File name of client secrets.\n scope: string or list of strings, scope(s) of the credentials being\n requested.\n message: string, A friendly string to display to the user if the\n clientsecrets file is missing or invalid. The message may\n contain HTML and will be presented on the web interface for\n any method that uses the decorator.\n cache: An optional cache service client that implements get() and set()\n methods. See clientsecrets.loadfile() for details.\n\n Returns: An OAuth2Decorator", "id": "f2463:m5"} {"signature": "@_helpers.positional()def __init__(self, scope, **kwargs):", "body": "self.scope = _helpers.scopes_to_string(scope)self._kwargs = kwargsself.service_account_id = kwargs.get('', None)self._service_account_email = Nonesuper(AppAssertionCredentials, self).__init__(None)", "docstring": "Constructor for AppAssertionCredentials\n\n Args:\n scope: string or iterable of strings, scope(s) of the credentials\n being requested.\n **kwargs: optional keyword args, including:\n service_account_id: service account id of the application. If None\n or unspecified, the default service account for\n the app is used.", "id": "f2463:c1:m0"} {"signature": "def _refresh(self, http):", "body": "try:scopes = self.scope.split()(token, _) = app_identity.get_access_token(scopes, service_account_id=self.service_account_id)except app_identity.Error as e:raise client.AccessTokenRefreshError(str(e))self.access_token = token", "docstring": "Refreshes the access token.\n\n Since the underlying App Engine app_identity implementation does its\n own caching we can skip all the storage hoops and just to a refresh\n using the API.\n\n Args:\n http: unused HTTP object\n\n Raises:\n AccessTokenRefreshError: When the refresh fails.", "id": "f2463:c1:m2"} {"signature": "def sign_blob(self, blob):", "body": "return app_identity.sign_blob(blob)", "docstring": "Cryptographically sign a blob (of bytes).\n\n Implements abstract method\n :meth:`oauth2client.client.AssertionCredentials.sign_blob`.\n\n Args:\n blob: bytes, Message to be signed.\n\n Returns:\n tuple, A pair of the private key ID used to sign the blob and\n the signed contents.", "id": "f2463:c1:m6"} {"signature": "@propertydef service_account_email(self):", "body": "if self._service_account_email is None:self._service_account_email = (app_identity.get_service_account_name())return self._service_account_email", "docstring": "Get the email for the current service account.\n\n Returns:\n string, The email associated with the Google App Engine\n service account.", "id": "f2463:c1:m7"} {"signature": "@_helpers.positional()def __init__(self, model, key_name, property_name, cache=None, user=None):", "body": "super(StorageByKeyName, self).__init__()if key_name is None:if user is None:raise ValueError('''')key_name = user.user_id()self._model = modelself._key_name = key_nameself._property_name = property_nameself._cache = cache", "docstring": "Constructor for Storage.\n\n Args:\n model: db.Model or ndb.Model, model class\n key_name: string, key name for the entity that has the credentials\n property_name: string, name of the property that is a\n CredentialsProperty or CredentialsNDBProperty.\n cache: memcache, a write-through cache to put in front of the\n datastore. If the model you are using is an NDB model, using\n a cache will be redundant since the model uses an instance\n cache and memcache for you.\n user: users.User object, optional. Can be used to grab user ID as a\n key_name if no key name is specified.", "id": "f2463:c4:m0"} {"signature": "def _is_ndb(self):", "body": "if isinstance(self._model, type):if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL):return Trueelif issubclass(self._model, db.Model):return Falseraise TypeError(''.format(self._model))", "docstring": "Determine whether the model of the instance is an NDB model.\n\n Returns:\n Boolean indicating whether or not the model is an NDB or DB model.", "id": "f2463:c4:m1"} {"signature": "def _get_entity(self):", "body": "if self._is_ndb():return self._model.get_by_id(self._key_name)else:return self._model.get_by_key_name(self._key_name)", "docstring": "Retrieve entity from datastore.\n\n Uses a different model method for db or ndb models.\n\n Returns:\n Instance of the model corresponding to the current storage object\n and stored using the key name of the storage object.", "id": "f2463:c4:m2"} {"signature": "def _delete_entity(self):", "body": "if self._is_ndb():_NDB_KEY(self._model, self._key_name).delete()else:entity_key = db.Key.from_path(self._model.kind(), self._key_name)db.delete(entity_key)", "docstring": "Delete entity from datastore.\n\n Attempts to delete using the key_name stored on the object, whether or\n not the given key is in the datastore.", "id": "f2463:c4:m3"} {"signature": "@db.non_transactional(allow_existing=True)def locked_get(self):", "body": "credentials = Noneif self._cache:json = self._cache.get(self._key_name)if json:credentials = client.Credentials.new_from_json(json)if credentials is None:entity = self._get_entity()if entity is not None:credentials = getattr(entity, self._property_name)if self._cache:self._cache.set(self._key_name, credentials.to_json())if credentials and hasattr(credentials, ''):credentials.set_store(self)return credentials", "docstring": "Retrieve Credential from datastore.\n\n Returns:\n oauth2client.Credentials", "id": "f2463:c4:m4"} {"signature": "@db.non_transactional(allow_existing=True)def locked_put(self, credentials):", "body": "entity = self._model.get_or_insert(self._key_name)setattr(entity, self._property_name, credentials)entity.put()if self._cache:self._cache.set(self._key_name, credentials.to_json())", "docstring": "Write a Credentials to the datastore.\n\n Args:\n credentials: Credentials, the credentials to store.", "id": "f2463:c4:m5"} {"signature": "@db.non_transactional(allow_existing=True)def locked_delete(self):", "body": "if self._cache:self._cache.delete(self._key_name)self._delete_entity()", "docstring": "Delete Credential from datastore.", "id": "f2463:c4:m6"} {"signature": "def get_credentials(self):", "body": "return getattr(self._tls, '', None)", "docstring": "A thread local Credentials object.\n\n Returns:\n A client.Credentials object, or None if credentials hasn't been set\n in this thread yet, which may happen when calling has_credentials\n inside oauth_aware.", "id": "f2463:c6:m1"} {"signature": "def get_flow(self):", "body": "return getattr(self._tls, '', None)", "docstring": "A thread local Flow object.\n\n Returns:\n A credentials.Flow object, or None if the flow hasn't been set in\n this thread yet, which happens in _create_flow() since Flows are\n created lazily.", "id": "f2463:c6:m3"} {"signature": "@_helpers.positional()def __init__(self, client_id, client_secret, scope,auth_uri=oauth2client.GOOGLE_AUTH_URI,token_uri=oauth2client.GOOGLE_TOKEN_URI,revoke_uri=oauth2client.GOOGLE_REVOKE_URI,user_agent=None,message=None,callback_path='',token_response_param=None,_storage_class=StorageByKeyName,_credentials_class=CredentialsModel,_credentials_property_name='',**kwargs):", "body": "self._tls = threading.local()self.flow = Noneself.credentials = Noneself._client_id = client_idself._client_secret = client_secretself._scope = _helpers.scopes_to_string(scope)self._auth_uri = auth_uriself._token_uri = token_uriself._revoke_uri = revoke_uriself._user_agent = user_agentself._kwargs = kwargsself._message = messageself._in_error = Falseself._callback_path = callback_pathself._token_response_param = token_response_paramself._storage_class = _storage_classself._credentials_class = _credentials_classself._credentials_property_name = _credentials_property_name", "docstring": "Constructor for OAuth2Decorator\n\n Args:\n client_id: string, client identifier.\n client_secret: string client secret.\n scope: string or iterable of strings, scope(s) of the credentials\n being requested.\n auth_uri: string, URI for authorization endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0 provider\n can be used.\n token_uri: string, URI for token endpoint. For convenience defaults\n to Google's endpoints but any OAuth 2.0 provider can be\n used.\n revoke_uri: string, URI for revoke endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0\n provider can be used.\n user_agent: string, User agent of your application, default to\n None.\n message: Message to display if there are problems with the\n OAuth 2.0 configuration. The message may contain HTML and\n will be presented on the web interface for any method that\n uses the decorator.\n callback_path: string, The absolute path to use as the callback\n URI. Note that this must match up with the URI given\n when registering the application in the APIs\n Console.\n token_response_param: string. If provided, the full JSON response\n to the access token request will be encoded\n and included in this query parameter in the\n callback URI. This is useful with providers\n (e.g. wordpress.com) that include extra\n fields that the client may want.\n _storage_class: \"Protected\" keyword argument not typically provided\n to this constructor. A storage class to aid in\n storing a Credentials object for a user in the\n datastore. Defaults to StorageByKeyName.\n _credentials_class: \"Protected\" keyword argument not typically\n provided to this constructor. A db or ndb Model\n class to hold credentials. Defaults to\n CredentialsModel.\n _credentials_property_name: \"Protected\" keyword argument not\n typically provided to this constructor.\n A string indicating the name of the\n field on the _credentials_class where a\n Credentials object will be stored.\n Defaults to 'credentials'.\n **kwargs: dict, Keyword arguments are passed along as kwargs to\n the OAuth2WebServerFlow constructor.", "id": "f2463:c6:m4"} {"signature": "def oauth_required(self, method):", "body": "def check_oauth(request_handler, *args, **kwargs):if self._in_error:self._display_error_message(request_handler)returnuser = users.get_current_user()if not user:request_handler.redirect(users.create_login_url(request_handler.request.uri))returnself._create_flow(request_handler)self.flow.params[''] = _build_state_value(request_handler, user)self.credentials = self._storage_class(self._credentials_class, None,self._credentials_property_name, user=user).get()if not self.has_credentials():return request_handler.redirect(self.authorize_url())try:resp = method(request_handler, *args, **kwargs)except client.AccessTokenRefreshError:return request_handler.redirect(self.authorize_url())finally:self.credentials = Nonereturn respreturn check_oauth", "docstring": "Decorator that starts the OAuth 2.0 dance.\n\n Starts the OAuth dance for the logged in user if they haven't already\n granted access for this application.\n\n Args:\n method: callable, to be decorated method of a webapp.RequestHandler\n instance.", "id": "f2463:c6:m6"} {"signature": "def _create_flow(self, request_handler):", "body": "if self.flow is None:redirect_uri = request_handler.request.relative_url(self._callback_path) self.flow = client.OAuth2WebServerFlow(self._client_id, self._client_secret, self._scope,redirect_uri=redirect_uri, user_agent=self._user_agent,auth_uri=self._auth_uri, token_uri=self._token_uri,revoke_uri=self._revoke_uri, **self._kwargs)", "docstring": "Create the Flow object.\n\n The Flow is calculated lazily since we don't know where this app is\n running until it receives a request, at which point redirect_uri can be\n calculated and then the Flow object can be constructed.\n\n Args:\n request_handler: webapp.RequestHandler, the request handler.", "id": "f2463:c6:m7"} {"signature": "def oauth_aware(self, method):", "body": "def setup_oauth(request_handler, *args, **kwargs):if self._in_error:self._display_error_message(request_handler)returnuser = users.get_current_user()if not user:request_handler.redirect(users.create_login_url(request_handler.request.uri))returnself._create_flow(request_handler)self.flow.params[''] = _build_state_value(request_handler,user)self.credentials = self._storage_class(self._credentials_class, None,self._credentials_property_name, user=user).get()try:resp = method(request_handler, *args, **kwargs)finally:self.credentials = Nonereturn respreturn setup_oauth", "docstring": "Decorator that sets up for OAuth 2.0 dance, but doesn't do it.\n\n Does all the setup for the OAuth dance, but doesn't initiate it.\n This decorator is useful if you want to create a page that knows\n whether or not the user has granted access to this application.\n From within a method decorated with @oauth_aware the has_credentials()\n and authorize_url() methods can be called.\n\n Args:\n method: callable, to be decorated method of a webapp.RequestHandler\n instance.", "id": "f2463:c6:m8"} {"signature": "def has_credentials(self):", "body": "return self.credentials is not None and not self.credentials.invalid", "docstring": "True if for the logged in user there are valid access Credentials.\n\n Must only be called from with a webapp.RequestHandler subclassed method\n that had been decorated with either @oauth_required or @oauth_aware.", "id": "f2463:c6:m9"} {"signature": "def authorize_url(self):", "body": "url = self.flow.step1_get_authorize_url()return str(url)", "docstring": "Returns the URL to start the OAuth dance.\n\n Must only be called from with a webapp.RequestHandler subclassed method\n that had been decorated with either @oauth_required or @oauth_aware.", "id": "f2463:c6:m10"} {"signature": "def http(self, *args, **kwargs):", "body": "return self.credentials.authorize(transport.get_http_object(*args, **kwargs))", "docstring": "Returns an authorized http instance.\n\n Must only be called from within an @oauth_required decorated method, or\n from within an @oauth_aware decorated method where has_credentials()\n returns True.\n\n Args:\n *args: Positional arguments passed to httplib2.Http constructor.\n **kwargs: Positional arguments passed to httplib2.Http constructor.", "id": "f2463:c6:m11"} {"signature": "@propertydef callback_path(self):", "body": "return self._callback_path", "docstring": "The absolute path where the callback will occur.\n\n Note this is the absolute path, not the absolute URI, that will be\n calculated by the decorator at runtime. See callback_handler() for how\n this should be used.\n\n Returns:\n The callback path as a string.", "id": "f2463:c6:m12"} {"signature": "def callback_handler(self):", "body": "decorator = selfclass OAuth2Handler(webapp.RequestHandler):\"\"\"\"\"\"@login_requireddef get(self):error = self.request.get('')if error:errormsg = self.request.get('', error)self.response.out.write(''.format(_safe_html(errormsg)))else:user = users.get_current_user()decorator._create_flow(self)credentials = decorator.flow.step2_exchange(self.request.params)decorator._storage_class(decorator._credentials_class, None,decorator._credentials_property_name,user=user).put(credentials)redirect_uri = _parse_state_value(str(self.request.get('')), user)if redirect_uri is None:self.response.out.write('')returnif (decorator._token_response_param andcredentials.token_response):resp_json = json.dumps(credentials.token_response)redirect_uri = _helpers._add_query_parameter(redirect_uri, decorator._token_response_param,resp_json)self.redirect(redirect_uri)return OAuth2Handler", "docstring": "RequestHandler for the OAuth 2.0 redirect callback.\n\n Usage::\n\n app = webapp.WSGIApplication([\n ('/index', MyIndexHandler),\n ...,\n (decorator.callback_path, decorator.callback_handler())\n ])\n\n Returns:\n A webapp.RequestHandler that handles the redirect back from the\n server during the OAuth 2.0 dance.", "id": "f2463:c6:m13"} {"signature": "def callback_application(self):", "body": "return webapp.WSGIApplication([(self.callback_path, self.callback_handler())])", "docstring": "WSGI application for handling the OAuth 2.0 redirect callback.\n\n If you need finer grained control use `callback_handler` which returns\n just the webapp.RequestHandler.\n\n Returns:\n A webapp.WSGIApplication that handles the redirect back from the\n server during the OAuth 2.0 dance.", "id": "f2463:c6:m14"} {"signature": "@_helpers.positional()def __init__(self, filename, scope, message=None, cache=None, **kwargs):", "body": "client_type, client_info = clientsecrets.loadfile(filename,cache=cache)if client_type not in (clientsecrets.TYPE_WEB,clientsecrets.TYPE_INSTALLED):raise clientsecrets.InvalidClientSecretsError(\"\")constructor_kwargs = dict(kwargs)constructor_kwargs.update({'': client_info[''],'': client_info[''],'': message,})revoke_uri = client_info.get('')if revoke_uri is not None:constructor_kwargs[''] = revoke_urisuper(OAuth2DecoratorFromClientSecrets, self).__init__(client_info[''], client_info[''],scope, **constructor_kwargs)if message is not None:self._message = messageelse:self._message = ''", "docstring": "Constructor\n\n Args:\n filename: string, File name of client secrets.\n scope: string or iterable of strings, scope(s) of the credentials\n being requested.\n message: string, A friendly string to display to the user if the\n clientsecrets file is missing or invalid. The message may\n contain HTML and will be presented on the web interface\n for any method that uses the decorator.\n cache: An optional cache service client that implements get() and\n set()\n methods. See clientsecrets.loadfile() for details.\n **kwargs: dict, Keyword arguments are passed along as kwargs to\n the OAuth2WebServerFlow constructor.", "id": "f2463:c7:m0"} {"signature": "def _get_flow_for_token(csrf_token):", "body": "flow_pickle = session.pop(_FLOW_KEY.format(csrf_token), None)if flow_pickle is None:return Noneelse:return pickle.loads(flow_pickle)", "docstring": "Retrieves the flow instance associated with a given CSRF token from\n the Flask session.", "id": "f2464:m0"} {"signature": "def init_app(self, app, scopes=None, client_secrets_file=None,client_id=None, client_secret=None, authorize_callback=None,storage=None, **kwargs):", "body": "self.app = appself.authorize_callback = authorize_callbackself.flow_kwargs = kwargsif storage is None:storage = dictionary_storage.DictionaryStorage(session, key=_CREDENTIALS_KEY)self.storage = storageif scopes is None:scopes = app.config.get('', _DEFAULT_SCOPES)self.scopes = scopesself._load_config(client_secrets_file, client_id, client_secret)app.register_blueprint(self._create_blueprint())", "docstring": "Initialize this extension for the given app.\n\n Arguments:\n app: A Flask application.\n scopes: Optional list of scopes to authorize.\n client_secrets_file: Path to a file containing client secrets. You\n can also specify the GOOGLE_OAUTH2_CLIENT_SECRETS_FILE config\n value.\n client_id: If not specifying a client secrets file, specify the\n OAuth2 client id. You can also specify the\n GOOGLE_OAUTH2_CLIENT_ID config value. You must also provide a\n client secret.\n client_secret: The OAuth2 client secret. You can also specify the\n GOOGLE_OAUTH2_CLIENT_SECRET config value.\n authorize_callback: A function that is executed after successful\n user authorization.\n storage: A oauth2client.client.Storage subclass for storing the\n credentials. By default, this is a Flask session based storage.\n kwargs: Any additional args are passed along to the Flow\n constructor.", "id": "f2464:c0:m1"} {"signature": "def _load_config(self, client_secrets_file, client_id, client_secret):", "body": "if client_id and client_secret:self.client_id, self.client_secret = client_id, client_secretreturnif client_secrets_file:self._load_client_secrets(client_secrets_file)returnif '' in self.app.config:self._load_client_secrets(self.app.config[''])returntry:self.client_id, self.client_secret = (self.app.config[''],self.app.config[''])except KeyError:raise ValueError('''''''''')", "docstring": "Loads oauth2 configuration in order of priority.\n\n Priority:\n 1. Config passed to the constructor or init_app.\n 2. Config passed via the GOOGLE_OAUTH2_CLIENT_SECRETS_FILE app\n config.\n 3. Config passed via the GOOGLE_OAUTH2_CLIENT_ID and\n GOOGLE_OAUTH2_CLIENT_SECRET app config.\n\n Raises:\n ValueError if no config could be found.", "id": "f2464:c0:m2"} {"signature": "def _load_client_secrets(self, filename):", "body": "client_type, client_info = clientsecrets.loadfile(filename)if client_type != clientsecrets.TYPE_WEB:raise ValueError(''.format(client_type))self.client_id = client_info['']self.client_secret = client_info['']", "docstring": "Loads client secrets from the given filename.", "id": "f2464:c0:m3"} {"signature": "def _make_flow(self, return_url=None, **kwargs):", "body": "csrf_token = hashlib.sha256(os.urandom()).hexdigest()session[_CSRF_KEY] = csrf_tokenstate = json.dumps({'': csrf_token,'': return_url})kw = self.flow_kwargs.copy()kw.update(kwargs)extra_scopes = kw.pop('', [])scopes = set(self.scopes).union(set(extra_scopes))flow = client.OAuth2WebServerFlow(client_id=self.client_id,client_secret=self.client_secret,scope=scopes,state=state,redirect_uri=url_for('', _external=True),**kw)flow_key = _FLOW_KEY.format(csrf_token)session[flow_key] = pickle.dumps(flow)return flow", "docstring": "Creates a Web Server Flow", "id": "f2464:c0:m4"} {"signature": "def authorize_view(self):", "body": "args = request.args.to_dict()args[''] = request.args.getlist('')return_url = args.pop('', None)if return_url is None:return_url = request.referrer or ''flow = self._make_flow(return_url=return_url, **args)auth_url = flow.step1_get_authorize_url()return redirect(auth_url)", "docstring": "Flask view that starts the authorization flow.\n\n Starts flow by redirecting the user to the OAuth2 provider.", "id": "f2464:c0:m6"} {"signature": "def callback_view(self):", "body": "if '' in request.args:reason = request.args.get('', request.args.get('', ''))reason = markupsafe.escape(reason)return (''.format(reason),httplib.BAD_REQUEST)try:encoded_state = request.args['']server_csrf = session[_CSRF_KEY]code = request.args['']except KeyError:return '', httplib.BAD_REQUESTtry:state = json.loads(encoded_state)client_csrf = state['']return_url = state['']except (ValueError, KeyError):return '', httplib.BAD_REQUESTif client_csrf != server_csrf:return '', httplib.BAD_REQUESTflow = _get_flow_for_token(server_csrf)if flow is None:return '', httplib.BAD_REQUESTtry:credentials = flow.step2_exchange(code)except client.FlowExchangeError as exchange_error:current_app.logger.exception(exchange_error)content = ''.format(exchange_error)return content, httplib.BAD_REQUESTself.storage.put(credentials)if self.authorize_callback:self.authorize_callback(credentials)return redirect(return_url)", "docstring": "Flask view that handles the user's return from OAuth2 provider.\n\n On return, exchanges the authorization code for credentials and stores\n the credentials.", "id": "f2464:c0:m7"} {"signature": "@propertydef credentials(self):", "body": "ctx = _app_ctx_stack.topif not hasattr(ctx, _CREDENTIALS_KEY):ctx.google_oauth2_credentials = self.storage.get()return ctx.google_oauth2_credentials", "docstring": "The credentials for the current user or None if unavailable.", "id": "f2464:c0:m8"} {"signature": "def has_credentials(self):", "body": "if not self.credentials:return Falseelif (self.credentials.access_token_expired andnot self.credentials.refresh_token):return Falseelse:return True", "docstring": "Returns True if there are valid credentials for the current user.", "id": "f2464:c0:m9"} {"signature": "@propertydef email(self):", "body": "if not self.credentials:return Nonetry:return self.credentials.id_token['']except KeyError:current_app.logger.error(''.format(self.credentials.id_token))", "docstring": "Returns the user's email address or None if there are no credentials.\n\n The email address is provided by the current credentials' id_token.\n This should not be used as unique identifier as the user can change\n their email. If you need a unique identifier, use user_id.", "id": "f2464:c0:m10"} {"signature": "@propertydef user_id(self):", "body": "if not self.credentials:return Nonetry:return self.credentials.id_token['']except KeyError:current_app.logger.error(''.format(self.credentials.id_token))", "docstring": "Returns the a unique identifier for the user\n\n Returns None if there are no credentials.\n\n The id is provided by the current credentials' id_token.", "id": "f2464:c0:m11"} {"signature": "def authorize_url(self, return_url, **kwargs):", "body": "return url_for('', return_url=return_url, **kwargs)", "docstring": "Creates a URL that can be used to start the authorization flow.\n\n When the user is directed to the URL, the authorization flow will\n begin. Once complete, the user will be redirected to the specified\n return URL.\n\n Any kwargs are passed into the flow constructor.", "id": "f2464:c0:m12"} {"signature": "def required(self, decorated_function=None, scopes=None,**decorator_kwargs):", "body": "def curry_wrapper(wrapped_function):@wraps(wrapped_function)def required_wrapper(*args, **kwargs):return_url = decorator_kwargs.pop('', request.url)requested_scopes = set(self.scopes)if scopes is not None:requested_scopes |= set(scopes)if self.has_credentials():requested_scopes |= self.credentials.scopesrequested_scopes = list(requested_scopes)if (self.has_credentials() andself.credentials.has_scopes(requested_scopes)):return wrapped_function(*args, **kwargs)else:auth_url = self.authorize_url(return_url,scopes=requested_scopes,**decorator_kwargs)return redirect(auth_url)return required_wrapperif decorated_function:return curry_wrapper(decorated_function)else:return curry_wrapper", "docstring": "Decorator to require OAuth2 credentials for a view.\n\n If credentials are not available for the current user, then they will\n be redirected to the authorization flow. Once complete, the user will\n be redirected back to the original page.", "id": "f2464:c0:m13"} {"signature": "def http(self, *args, **kwargs):", "body": "if not self.credentials:raise ValueError('')return self.credentials.authorize(transport.get_http_object(*args, **kwargs))", "docstring": "Returns an authorized http instance.\n\n Can only be called if there are valid credentials for the user, such\n as inside of a view that is decorated with @required.\n\n Args:\n *args: Positional arguments passed to httplib2.Http constructor.\n **kwargs: Positional arguments passed to httplib2.Http constructor.\n\n Raises:\n ValueError if no credentials are available.", "id": "f2464:c0:m14"} {"signature": "def __init__(self, service_name, user_name):", "body": "super(Storage, self).__init__(lock=threading.Lock())self._service_name = service_nameself._user_name = user_name", "docstring": "Constructor.\n\n Args:\n service_name: string, The name of the service under which the\n credentials are stored.\n user_name: string, The name of the user to store credentials for.", "id": "f2465:c0:m0"} {"signature": "def locked_get(self):", "body": "credentials = Nonecontent = keyring.get_password(self._service_name, self._user_name)if content is not None:try:credentials = client.Credentials.new_from_json(content)credentials.set_store(self)except ValueError:passreturn credentials", "docstring": "Retrieve Credential from file.\n\n Returns:\n oauth2client.client.Credentials", "id": "f2465:c0:m1"} {"signature": "def locked_put(self, credentials):", "body": "keyring.set_password(self._service_name, self._user_name,credentials.to_json())", "docstring": "Write Credentials to file.\n\n Args:\n credentials: Credentials, the credentials to store.", "id": "f2465:c0:m2"} {"signature": "def locked_delete(self):", "body": "keyring.set_password(self._service_name, self._user_name, '')", "docstring": "Delete Credentials file.\n\n Args:\n credentials: Credentials, the credentials to store.", "id": "f2465:c0:m3"} {"signature": "def _SendRecv():", "body": "port = int(os.getenv(DEVSHELL_ENV, ))if port == :raise NoDevshellServer()sock = socket.socket()sock.connect(('', port))data = CREDENTIAL_INFO_REQUEST_JSONmsg = ''.format(len(data), data)sock.sendall(_helpers._to_bytes(msg, encoding=''))header = sock.recv().decode()if '' not in header:raise CommunicationError('')len_str, json_str = header.split('', )to_read = int(len_str) - len(json_str)if to_read > :json_str += sock.recv(to_read, socket.MSG_WAITALL).decode()return CredentialInfoResponse(json_str)", "docstring": "Communicate with the Developer Shell server socket.", "id": "f2466:m0"} {"signature": "def __init__(self, json_string):", "body": "pbl = json.loads(json_string)if not isinstance(pbl, list):raise ValueError('' + str(pbl))pbl_len = len(pbl)self.user_email = pbl[] if pbl_len > else Noneself.project_id = pbl[] if pbl_len > else Noneself.access_token = pbl[] if pbl_len > else Noneself.expires_in = pbl[] if pbl_len > else None", "docstring": "Initialize the response data from JSON PBLite array.", "id": "f2466:c3:m0"} {"signature": "def _refresh(self, http):", "body": "self.devshell_response = _SendRecv()self.access_token = self.devshell_response.access_tokenexpires_in = self.devshell_response.expires_inif expires_in is not None:delta = datetime.timedelta(seconds=expires_in)self.token_expiry = client._UTCNOW() + deltaelse:self.token_expiry = None", "docstring": "Refreshes the access token.\n\n Args:\n http: unused HTTP object", "id": "f2466:c4:m1"} {"signature": "def _bit_list_to_bytes(bit_list):", "body": "num_bits = len(bit_list)byte_vals = bytearray()for start in six.moves.xrange(, num_bits, ):curr_bits = bit_list[start:start + ]char_val = sum(val * digitfor val, digit in zip(_POW2, curr_bits))byte_vals.append(char_val)return bytes(byte_vals)", "docstring": "Converts an iterable of 1's and 0's to bytes.\n\n Combines the list 8 at a time, treating each group of 8 bits\n as a single byte.", "id": "f2467:m0"} {"signature": "def verify(self, message, signature):", "body": "message = _helpers._to_bytes(message, encoding='')try:return rsa.pkcs1.verify(message, signature, self._pubkey)except (ValueError, rsa.pkcs1.VerificationError):return False", "docstring": "Verifies a message against a signature.\n\n Args:\n message: string or bytes, The message to verify. If string, will be\n encoded to bytes as utf-8.\n signature: string or bytes, The signature on the message. If\n string, will be encoded to bytes as utf-8.\n\n Returns:\n True if message was signed by the private key associated with the\n public key that this object was constructed with.", "id": "f2467:c0:m1"} {"signature": "@classmethoddef from_string(cls, key_pem, is_x509_cert):", "body": "key_pem = _helpers._to_bytes(key_pem)if is_x509_cert:der = rsa.pem.load_pem(key_pem, '')asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())if remaining != b'':raise ValueError('', remaining)cert_info = asn1_cert['']['']key_bytes = _bit_list_to_bytes(cert_info[''])pubkey = rsa.PublicKey.load_pkcs1(key_bytes, '')else:pubkey = rsa.PublicKey.load_pkcs1(key_pem, '')return cls(pubkey)", "docstring": "Construct an RsaVerifier instance from a string.\n\n Args:\n key_pem: string, public key in PEM format.\n is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it\n is expected to be an RSA key in PEM format.\n\n Returns:\n RsaVerifier instance.\n\n Raises:\n ValueError: if the key_pem can't be parsed. In either case, error\n will begin with 'No PEM start marker'. If\n ``is_x509_cert`` is True, will fail to find the\n \"-----BEGIN CERTIFICATE-----\" error, otherwise fails\n to find \"-----BEGIN RSA PUBLIC KEY-----\".", "id": "f2467:c0:m2"} {"signature": "def sign(self, message):", "body": "message = _helpers._to_bytes(message, encoding='')return rsa.pkcs1.sign(message, self._key, '')", "docstring": "Signs a message.\n\n Args:\n message: bytes, Message to be signed.\n\n Returns:\n string, The signature of the message for the given key.", "id": "f2467:c1:m1"} {"signature": "@classmethoddef from_string(cls, key, password=''):", "body": "key = _helpers._from_bytes(key) marker_id, key_bytes = pem.readPemBlocksFromFile(six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)if marker_id == :pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,format='')elif marker_id == :key_info, remaining = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)if remaining != b'':raise ValueError('', remaining)pkey_info = key_info.getComponentByName('')pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),format='')else:raise ValueError('')return cls(pkey)", "docstring": "Construct an RsaSigner instance from a string.\n\n Args:\n key: string, private key in PEM format.\n password: string, password for private key file. Unused for PEM\n files.\n\n Returns:\n RsaSigner instance.\n\n Raises:\n ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in\n PEM format.", "id": "f2467:c1:m2"} {"signature": "def make_signed_jwt(signer, payload, key_id=None):", "body": "header = {'': '', '': ''}if key_id is not None:header[''] = key_idsegments = [_helpers._urlsafe_b64encode(_helpers._json_encode(header)),_helpers._urlsafe_b64encode(_helpers._json_encode(payload)),]signing_input = b''.join(segments)signature = signer.sign(signing_input)segments.append(_helpers._urlsafe_b64encode(signature))logger.debug(str(segments))return b''.join(segments)", "docstring": "Make a signed JWT.\n\n See http://self-issued.info/docs/draft-jones-json-web-token.html.\n\n Args:\n signer: crypt.Signer, Cryptographic signer.\n payload: dict, Dictionary of data to convert to JSON and then sign.\n key_id: string, (Optional) Key ID header.\n\n Returns:\n string, The JWT for the payload.", "id": "f2468:m1"} {"signature": "def _verify_signature(message, signature, certs):", "body": "for pem in certs:verifier = Verifier.from_string(pem, is_x509_cert=True)if verifier.verify(message, signature):returnraise AppIdentityError('')", "docstring": "Verifies signed content using a list of certificates.\n\n Args:\n message: string or bytes, The message to verify.\n signature: string or bytes, The signature on the message.\n certs: iterable, certificates in PEM format.\n\n Raises:\n AppIdentityError: If none of the certificates can verify the message\n against the signature.", "id": "f2468:m2"} {"signature": "def _check_audience(payload_dict, audience):", "body": "if audience is None:returnaudience_in_payload = payload_dict.get('')if audience_in_payload is None:raise AppIdentityError(''.format(payload_dict))if audience_in_payload != audience:raise AppIdentityError(''.format(audience_in_payload, audience, payload_dict))", "docstring": "Checks audience field from a JWT payload.\n\n Does nothing if the passed in ``audience`` is null.\n\n Args:\n payload_dict: dict, A dictionary containing a JWT payload.\n audience: string or NoneType, an audience to check for in\n the JWT payload.\n\n Raises:\n AppIdentityError: If there is no ``'aud'`` field in the payload\n dictionary but there is an ``audience`` to check.\n AppIdentityError: If the ``'aud'`` field in the payload dictionary\n does not match the ``audience``.", "id": "f2468:m3"} {"signature": "def _verify_time_range(payload_dict):", "body": "now = int(time.time())issued_at = payload_dict.get('')if issued_at is None:raise AppIdentityError(''.format(payload_dict))expiration = payload_dict.get('')if expiration is None:raise AppIdentityError(''.format(payload_dict))if expiration >= now + MAX_TOKEN_LIFETIME_SECS:raise AppIdentityError(''.format(payload_dict))earliest = issued_at - CLOCK_SKEW_SECSif now < earliest:raise AppIdentityError(''.format(now, earliest, payload_dict))latest = expiration + CLOCK_SKEW_SECSif now > latest:raise AppIdentityError(''.format(now, latest, payload_dict))", "docstring": "Verifies the issued at and expiration from a JWT payload.\n\n Makes sure the current time (in UTC) falls between the issued at and\n expiration for the JWT (with some skew allowed for via\n ``CLOCK_SKEW_SECS``).\n\n Args:\n payload_dict: dict, A dictionary containing a JWT payload.\n\n Raises:\n AppIdentityError: If there is no ``'iat'`` field in the payload\n dictionary.\n AppIdentityError: If there is no ``'exp'`` field in the payload\n dictionary.\n AppIdentityError: If the JWT expiration is too far in the future (i.e.\n if the expiration would imply a token lifetime\n longer than what is allowed.)\n AppIdentityError: If the token appears to have been issued in the\n future (up to clock skew).\n AppIdentityError: If the token appears to have expired in the past\n (up to clock skew).", "id": "f2468:m4"} {"signature": "def verify_signed_jwt_with_certs(jwt, certs, audience=None):", "body": "jwt = _helpers._to_bytes(jwt)if jwt.count(b'') != :raise AppIdentityError(''.format(jwt))header, payload, signature = jwt.split(b'')message_to_sign = header + b'' + payloadsignature = _helpers._urlsafe_b64decode(signature)payload_bytes = _helpers._urlsafe_b64decode(payload)try:payload_dict = json.loads(_helpers._from_bytes(payload_bytes))except:raise AppIdentityError(''.format(payload_bytes))_verify_signature(message_to_sign, signature, certs.values())_verify_time_range(payload_dict)_check_audience(payload_dict, audience)return payload_dict", "docstring": "Verify a JWT against public certs.\n\n See http://self-issued.info/docs/draft-jones-json-web-token.html.\n\n Args:\n jwt: string, A JWT.\n certs: dict, Dictionary where values of public keys in PEM format.\n audience: string, The audience, 'aud', that this JWT should contain. If\n None then the JWT's 'aud' parameter is not verified.\n\n Returns:\n dict, The deserialized JSON payload in the JWT.\n\n Raises:\n AppIdentityError: if any checks are failed.", "id": "f2468:m5"} {"signature": "def _validate_clientsecrets(clientsecrets_dict):", "body": "_INVALID_FILE_FORMAT_MSG = ('''''')if clientsecrets_dict is None:raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG)try:(client_type, client_info), = clientsecrets_dict.items()except (ValueError, AttributeError):raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG + '''''')if client_type not in VALID_CLIENT:raise InvalidClientSecretsError(''.format(client_type))for prop_name in VALID_CLIENT[client_type]['']:if prop_name not in client_info:raise InvalidClientSecretsError(''.format(prop_name, client_type))for prop_name in VALID_CLIENT[client_type]['']:if client_info[prop_name].startswith(''):raise InvalidClientSecretsError(''.format(prop_name))return client_type, client_info", "docstring": "Validate parsed client secrets from a file.\n\n Args:\n clientsecrets_dict: dict, a dictionary holding the client secrets.\n\n Returns:\n tuple, a string of the client type and the information parsed\n from the file.", "id": "f2469:m0"} {"signature": "def loadfile(filename, cache=None):", "body": "_SECRET_NAMESPACE = ''if not cache:return _loadfile(filename)obj = cache.get(filename, namespace=_SECRET_NAMESPACE)if obj is None:client_type, client_info = _loadfile(filename)obj = {client_type: client_info}cache.set(filename, obj, namespace=_SECRET_NAMESPACE)return next(six.iteritems(obj))", "docstring": "Loading of client_secrets JSON file, optionally backed by a cache.\n\n Typical cache storage would be App Engine memcache service,\n but you can pass in any other cache client that implements\n these methods:\n\n * ``get(key, namespace=ns)``\n * ``set(key, value, namespace=ns)``\n\n Usage::\n\n # without caching\n client_type, client_info = loadfile('secrets.json')\n # using App Engine memcache service\n from google.appengine.api import memcache\n client_type, client_info = loadfile('secrets.json', cache=memcache)\n\n Args:\n filename: string, Path to a client_secrets.json file on a filesystem.\n cache: An optional cache service client that implements get() and set()\n methods. If not specified, the file is always being loaded from\n a filesystem.\n\n Raises:\n InvalidClientSecretsError: In case of a validation error or some\n I/O failure. Can happen only on cache miss.\n\n Returns:\n (client_type, client_info) tuple, as _loadfile() normally would.\n JSON contents is validated only during first load. Cache hits are not\n validated.", "id": "f2469:m4"} {"signature": "def code_verifier(n_bytes=):", "body": "verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'')if len(verifier) < :raise ValueError(\"\")elif len(verifier) > :raise ValueError(\"\")else:return verifier", "docstring": "Generates a 'code_verifier' as described in section 4.1 of RFC 7636.\n\nThis is a 'high-entropy cryptographic random string' that will be\nimpractical for an attacker to guess.\n\nArgs:\n n_bytes: integer between 31 and 96, inclusive. default: 64\n number of bytes of entropy to include in verifier.\n\nReturns:\n Bytestring, representing urlsafe base64-encoded random data.", "id": "f2470:m0"} {"signature": "def code_challenge(verifier):", "body": "digest = hashlib.sha256(verifier).digest()return base64.urlsafe_b64encode(digest).rstrip(b'')", "docstring": "Creates a 'code_challenge' as described in section 4.2 of RFC 7636\nby taking the sha256 hash of the verifier and then urlsafe\nbase64-encoding it.\n\nArgs:\n verifier: bytestring, representing a code_verifier as generated by\n code_verifier().\n\nReturns:\n Bytestring, representing a urlsafe base64-encoded sha256 hash digest,\n without '=' padding.", "id": "f2470:m1"} {"signature": "def pkcs12_key_as_pem(private_key_bytes, private_key_password):", "body": "private_key_password = _helpers._to_bytes(private_key_password)pkcs12 = crypto.load_pkcs12(private_key_bytes, private_key_password)return crypto.dump_privatekey(crypto.FILETYPE_PEM,pkcs12.get_privatekey())", "docstring": "Convert the contents of a PKCS#12 key to PEM using pyOpenSSL.\n\n Args:\n private_key_bytes: Bytes. PKCS#12 key in DER format.\n private_key_password: String. Password for PKCS#12 key.\n\n Returns:\n String. PEM contents of ``private_key_bytes``.", "id": "f2471:m0"} {"signature": "def __init__(self, pubkey):", "body": "self._pubkey = pubkey", "docstring": "Constructor.\n\n Args:\n pubkey: OpenSSL.crypto.PKey, The public key to verify with.", "id": "f2471:c0:m0"} {"signature": "def verify(self, message, signature):", "body": "message = _helpers._to_bytes(message, encoding='')signature = _helpers._to_bytes(signature, encoding='')try:crypto.verify(self._pubkey, signature, message, '')return Trueexcept crypto.Error:return False", "docstring": "Verifies a message against a signature.\n\n Args:\n message: string or bytes, The message to verify. If string, will be\n encoded to bytes as utf-8.\n signature: string or bytes, The signature on the message. If string,\n will be encoded to bytes as utf-8.\n\n Returns:\n True if message was signed by the private key associated with the\n public key that this object was constructed with.", "id": "f2471:c0:m1"} {"signature": "@staticmethoddef from_string(key_pem, is_x509_cert):", "body": "key_pem = _helpers._to_bytes(key_pem)if is_x509_cert:pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)else:pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)return OpenSSLVerifier(pubkey)", "docstring": "Construct a Verified instance from a string.\n\n Args:\n key_pem: string, public key in PEM format.\n is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it\n is expected to be an RSA key in PEM format.\n\n Returns:\n Verifier instance.\n\n Raises:\n OpenSSL.crypto.Error: if the key_pem can't be parsed.", "id": "f2471:c0:m2"} {"signature": "def __init__(self, pkey):", "body": "self._key = pkey", "docstring": "Constructor.\n\n Args:\n pkey: OpenSSL.crypto.PKey (or equiv), The private key to sign with.", "id": "f2471:c1:m0"} {"signature": "def sign(self, message):", "body": "message = _helpers._to_bytes(message, encoding='')return crypto.sign(self._key, message, '')", "docstring": "Signs a message.\n\n Args:\n message: bytes, Message to be signed.\n\n Returns:\n string, The signature of the message for the given key.", "id": "f2471:c1:m1"} {"signature": "@staticmethoddef from_string(key, password=b''):", "body": "key = _helpers._to_bytes(key)parsed_pem_key = _helpers._parse_pem_key(key)if parsed_pem_key:pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key)else:password = _helpers._to_bytes(password, encoding='')pkey = crypto.load_pkcs12(key, password).get_privatekey()return OpenSSLSigner(pkey)", "docstring": "Construct a Signer instance from a string.\n\n Args:\n key: string, private key in PKCS12 or PEM format.\n password: string, password for the private key file.\n\n Returns:\n Signer instance.\n\n Raises:\n OpenSSL.crypto.Error if the key can't be parsed.", "id": "f2471:c1:m2"} {"signature": "def _detect_gce_environment():", "body": "http = transport.get_http_object(timeout=GCE_METADATA_TIMEOUT)try:response, _ = transport.request(http, _GCE_METADATA_URI, headers=_GCE_HEADERS)return (response.status == http_client.OK andresponse.get(_METADATA_FLAVOR_HEADER) == _DESIRED_METADATA_FLAVOR)except socket.error: logger.info('')return False", "docstring": "Determine if the current environment is Compute Engine.\n\n Returns:\n Boolean indicating whether or not the current environment is Google\n Compute Engine.", "id": "f2472:m1"} {"signature": "def _in_gae_environment():", "body": "if SETTINGS.env_name is not None:return SETTINGS.env_name in ('', '')try:import google.appengine except ImportError:passelse:server_software = os.environ.get(_SERVER_SOFTWARE, '')if server_software.startswith(''):SETTINGS.env_name = ''return Trueelif server_software.startswith(''):SETTINGS.env_name = ''return Truereturn False", "docstring": "Detects if the code is running in the App Engine environment.\n\n Returns:\n True if running in the GAE environment, False otherwise.", "id": "f2472:m2"} {"signature": "def _in_gce_environment():", "body": "if SETTINGS.env_name is not None:return SETTINGS.env_name == ''if NO_GCE_CHECK != '' and _detect_gce_environment():SETTINGS.env_name = ''return Truereturn False", "docstring": "Detect if the code is running in the Compute Engine environment.\n\n Returns:\n True if running in the GCE environment, False otherwise.", "id": "f2472:m3"} {"signature": "def _save_private_file(filename, json_contents):", "body": "temp_filename = tempfile.mktemp()file_desc = os.open(temp_filename, os.O_WRONLY | os.O_CREAT, )with os.fdopen(file_desc, '') as file_handle:json.dump(json_contents, file_handle, sort_keys=True,indent=, separators=('', ''))shutil.move(temp_filename, filename)", "docstring": "Saves a file with read-write permissions on for the owner.\n\n Args:\n filename: String. Absolute path to file.\n json_contents: JSON serializable object to be saved.", "id": "f2472:m4"} {"signature": "def save_to_well_known_file(credentials, well_known_file=None):", "body": "if well_known_file is None:well_known_file = _get_well_known_file()config_dir = os.path.dirname(well_known_file)if not os.path.isdir(config_dir):raise OSError(''.format(config_dir))credentials_data = credentials.serialization_data_save_private_file(well_known_file, credentials_data)", "docstring": "Save the provided GoogleCredentials to the well known file.\n\n Args:\n credentials: the credentials to be saved to the well known file;\n it should be an instance of GoogleCredentials\n well_known_file: the name of the file where the credentials are to be\n saved; this parameter is supposed to be used for\n testing only", "id": "f2472:m5"} {"signature": "def _get_well_known_file():", "body": "default_config_dir = os.getenv(_CLOUDSDK_CONFIG_ENV_VAR)if default_config_dir is None:if os.name == '':try:default_config_dir = os.path.join(os.environ[''],_CLOUDSDK_CONFIG_DIRECTORY)except KeyError:drive = os.environ.get('', '')default_config_dir = os.path.join(drive, '',_CLOUDSDK_CONFIG_DIRECTORY)else:default_config_dir = os.path.join(os.path.expanduser(''),'',_CLOUDSDK_CONFIG_DIRECTORY)return os.path.join(default_config_dir, _WELL_KNOWN_CREDENTIALS_FILE)", "docstring": "Get the well known file produced by command 'gcloud auth login'.", "id": "f2472:m7"} {"signature": "def _get_application_default_credential_from_file(filename):", "body": "with open(filename) as file_obj:client_credentials = json.load(file_obj)credentials_type = client_credentials.get('')if credentials_type == AUTHORIZED_USER:required_fields = set(['', '', ''])elif credentials_type == SERVICE_ACCOUNT:required_fields = set(['', '', '',''])else:raise ApplicationDefaultCredentialsError(\"\" +AUTHORIZED_USER + \"\" + SERVICE_ACCOUNT + \"\")missing_fields = required_fields.difference(client_credentials.keys())if missing_fields:_raise_exception_for_missing_fields(missing_fields)if client_credentials[''] == AUTHORIZED_USER:return GoogleCredentials(access_token=None,client_id=client_credentials[''],client_secret=client_credentials[''],refresh_token=client_credentials[''],token_expiry=None,token_uri=oauth2client.GOOGLE_TOKEN_URI,user_agent='')else: from oauth2client import service_accountreturn service_account._JWTAccessCredentials.from_json_keyfile_dict(client_credentials)", "docstring": "Build the Application Default Credentials from file.", "id": "f2472:m8"} {"signature": "def _require_crypto_or_die():", "body": "if not HAS_CRYPTO:raise CryptoUnavailableError('')", "docstring": "Ensure we have a crypto library, or throw CryptoUnavailableError.\n\n The oauth2client.crypt module requires either PyCrypto or PyOpenSSL\n to be available in order to function, but these are optional\n dependencies.", "id": "f2472:m13"} {"signature": "@_helpers.positional()def verify_id_token(id_token, audience, http=None,cert_uri=ID_TOKEN_VERIFICATION_CERTS):", "body": "_require_crypto_or_die()if http is None:http = transport.get_cached_http()resp, content = transport.request(http, cert_uri)if resp.status == http_client.OK:certs = json.loads(_helpers._from_bytes(content))return crypt.verify_signed_jwt_with_certs(id_token, certs, audience)else:raise VerifyJwtTokenError(''.format(resp.status))", "docstring": "Verifies a signed JWT id_token.\n\n This function requires PyOpenSSL and because of that it does not work on\n App Engine.\n\n Args:\n id_token: string, A Signed JWT.\n audience: string, The audience 'aud' that the token should be for.\n http: httplib2.Http, instance to use to make the HTTP request. Callers\n should supply an instance that has caching enabled.\n cert_uri: string, URI of the certificates in JSON format to\n verify the JWT against.\n\n Returns:\n The deserialized JSON in the JWT.\n\n Raises:\n oauth2client.crypt.AppIdentityError: if the JWT fails to verify.\n CryptoUnavailableError: if no crypto library is available.", "id": "f2472:m14"} {"signature": "def _extract_id_token(id_token):", "body": "if type(id_token) == bytes:segments = id_token.split(b'')else:segments = id_token.split(u'')if len(segments) != :raise VerifyJwtTokenError(''.format(id_token))return json.loads(_helpers._from_bytes(_helpers._urlsafe_b64decode(segments[])))", "docstring": "Extract the JSON payload from a JWT.\n\n Does the extraction w/o checking the signature.\n\n Args:\n id_token: string or bytestring, OAuth 2.0 id_token.\n\n Returns:\n object, The deserialized JSON payload.", "id": "f2472:m15"} {"signature": "def _parse_exchange_token_response(content):", "body": "resp = {}content = _helpers._from_bytes(content)try:resp = json.loads(content)except Exception:resp = _helpers.parse_unique_urlencoded(content)if resp and '' in resp:resp[''] = resp.pop('')return resp", "docstring": "Parses response of an exchange token request.\n\n Most providers return JSON but some (e.g. Facebook) return a\n url-encoded string.\n\n Args:\n content: The body of a response\n\n Returns:\n Content as a dictionary object. Note that the dict could be empty,\n i.e. {}. That basically indicates a failure.", "id": "f2472:m16"} {"signature": "@_helpers.positional()def credentials_from_code(client_id, client_secret, scope, code,redirect_uri='', http=None,user_agent=None,token_uri=oauth2client.GOOGLE_TOKEN_URI,auth_uri=oauth2client.GOOGLE_AUTH_URI,revoke_uri=oauth2client.GOOGLE_REVOKE_URI,device_uri=oauth2client.GOOGLE_DEVICE_URI,token_info_uri=oauth2client.GOOGLE_TOKEN_INFO_URI,pkce=False,code_verifier=None):", "body": "flow = OAuth2WebServerFlow(client_id, client_secret, scope,redirect_uri=redirect_uri,user_agent=user_agent,auth_uri=auth_uri,token_uri=token_uri,revoke_uri=revoke_uri,device_uri=device_uri,token_info_uri=token_info_uri,pkce=pkce,code_verifier=code_verifier)credentials = flow.step2_exchange(code, http=http)return credentials", "docstring": "Exchanges an authorization code for an OAuth2Credentials object.\n\n Args:\n client_id: string, client identifier.\n client_secret: string, client secret.\n scope: string or iterable of strings, scope(s) to request.\n code: string, An authorization code, most likely passed down from\n the client\n redirect_uri: string, this is generally set to 'postmessage' to match\n the redirect_uri that the client specified\n http: httplib2.Http, optional http instance to use to do the fetch\n token_uri: string, URI for token endpoint. For convenience defaults\n to Google's endpoints but any OAuth 2.0 provider can be\n used.\n auth_uri: string, URI for authorization endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0 provider\n can be used.\n revoke_uri: string, URI for revoke endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0 provider\n can be used.\n device_uri: string, URI for device authorization endpoint. For\n convenience defaults to Google's endpoints but any OAuth\n 2.0 provider can be used.\n pkce: boolean, default: False, Generate and include a \"Proof Key\n for Code Exchange\" (PKCE) with your authorization and token\n requests. This adds security for installed applications that\n cannot protect a client_secret. See RFC 7636 for details.\n code_verifier: bytestring or None, default: None, parameter passed\n as part of the code exchange when pkce=True. If\n None, a code_verifier will automatically be\n generated as part of step1_get_authorize_url(). See\n RFC 7636 for details.\n\n Returns:\n An OAuth2Credentials object.\n\n Raises:\n FlowExchangeError if the authorization code cannot be exchanged for an\n access token", "id": "f2472:m17"} {"signature": "@_helpers.positional()def credentials_from_clientsecrets_and_code(filename, scope, code,message=None,redirect_uri='',http=None,cache=None,device_uri=None):", "body": "flow = flow_from_clientsecrets(filename, scope, message=message,cache=cache, redirect_uri=redirect_uri,device_uri=device_uri)credentials = flow.step2_exchange(code, http=http)return credentials", "docstring": "Returns OAuth2Credentials from a clientsecrets file and an auth code.\n\n Will create the right kind of Flow based on the contents of the\n clientsecrets file or will raise InvalidClientSecretsError for unknown\n types of Flows.\n\n Args:\n filename: string, File name of clientsecrets.\n scope: string or iterable of strings, scope(s) to request.\n code: string, An authorization code, most likely passed down from\n the client\n message: string, A friendly string to display to the user if the\n clientsecrets file is missing or invalid. If message is\n provided then sys.exit will be called in the case of an error.\n If message in not provided then\n clientsecrets.InvalidClientSecretsError will be raised.\n redirect_uri: string, this is generally set to 'postmessage' to match\n the redirect_uri that the client specified\n http: httplib2.Http, optional http instance to use to do the fetch\n cache: An optional cache service client that implements get() and set()\n methods. See clientsecrets.loadfile() for details.\n device_uri: string, OAuth 2.0 device authorization endpoint\n pkce: boolean, default: False, Generate and include a \"Proof Key\n for Code Exchange\" (PKCE) with your authorization and token\n requests. This adds security for installed applications that\n cannot protect a client_secret. See RFC 7636 for details.\n code_verifier: bytestring or None, default: None, parameter passed\n as part of the code exchange when pkce=True. If\n None, a code_verifier will automatically be\n generated as part of step1_get_authorize_url(). See\n RFC 7636 for details.\n\n Returns:\n An OAuth2Credentials object.\n\n Raises:\n FlowExchangeError: if the authorization code cannot be exchanged for an\n access token\n UnknownClientSecretsFlowError: if the file describes an unknown kind\n of Flow.\n clientsecrets.InvalidClientSecretsError: if the clientsecrets file is\n invalid.", "id": "f2472:m18"} {"signature": "def _oauth2_web_server_flow_params(kwargs):", "body": "params = {'': '','': '',}params.update(kwargs)approval_prompt = params.get('')if approval_prompt is not None:logger.warning('''')if approval_prompt == '':logger.warning('''')params[''] = ''del params['']return params", "docstring": "Configures redirect URI parameters for OAuth2WebServerFlow.", "id": "f2472:m19"} {"signature": "@_helpers.positional()def flow_from_clientsecrets(filename, scope, redirect_uri=None,message=None, cache=None, login_hint=None,device_uri=None, pkce=None, code_verifier=None,prompt=None):", "body": "try:client_type, client_info = clientsecrets.loadfile(filename,cache=cache)if client_type in (clientsecrets.TYPE_WEB,clientsecrets.TYPE_INSTALLED):constructor_kwargs = {'': redirect_uri,'': client_info[''],'': client_info[''],'': login_hint,}revoke_uri = client_info.get('')optional = ('','','','','')for param in optional:if locals()[param] is not None:constructor_kwargs[param] = locals()[param]return OAuth2WebServerFlow(client_info[''], client_info[''],scope, **constructor_kwargs)except clientsecrets.InvalidClientSecretsError as e:if message is not None:if e.args:message = (''''.format(e, message))sys.exit(message)else:raiseelse:raise UnknownClientSecretsFlowError(''.format(client_type))", "docstring": "Create a Flow from a clientsecrets file.\n\n Will create the right kind of Flow based on the contents of the\n clientsecrets file or will raise InvalidClientSecretsError for unknown\n types of Flows.\n\n Args:\n filename: string, File name of client secrets.\n scope: string or iterable of strings, scope(s) to request.\n redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for\n a non-web-based application, or a URI that handles the\n callback from the authorization server.\n message: string, A friendly string to display to the user if the\n clientsecrets file is missing or invalid. If message is\n provided then sys.exit will be called in the case of an error.\n If message in not provided then\n clientsecrets.InvalidClientSecretsError will be raised.\n cache: An optional cache service client that implements get() and set()\n methods. See clientsecrets.loadfile() for details.\n login_hint: string, Either an email address or domain. Passing this\n hint will either pre-fill the email box on the sign-in form\n or select the proper multi-login session, thereby\n simplifying the login flow.\n device_uri: string, URI for device authorization endpoint. For\n convenience defaults to Google's endpoints but any\n OAuth 2.0 provider can be used.\n\n Returns:\n A Flow object.\n\n Raises:\n UnknownClientSecretsFlowError: if the file describes an unknown kind of\n Flow.\n clientsecrets.InvalidClientSecretsError: if the clientsecrets file is\n invalid.", "id": "f2472:m20"} {"signature": "def authorize(self, http):", "body": "raise NotImplementedError", "docstring": "Take an httplib2.Http instance (or equivalent) and authorizes it.\n\n Authorizes it for the set of credentials, usually by replacing\n http.request() with a method that adds in the appropriate headers and\n then delegates to the original Http.request() method.\n\n Args:\n http: httplib2.Http, an http object to be used to make the refresh\n request.", "id": "f2472:c13:m0"} {"signature": "def refresh(self, http):", "body": "raise NotImplementedError", "docstring": "Forces a refresh of the access_token.\n\n Args:\n http: httplib2.Http, an http object to be used to make the refresh\n request.", "id": "f2472:c13:m1"} {"signature": "def revoke(self, http):", "body": "raise NotImplementedError", "docstring": "Revokes a refresh_token and makes the credentials void.\n\n Args:\n http: httplib2.Http, an http object to be used to make the revoke\n request.", "id": "f2472:c13:m2"} {"signature": "def apply(self, headers):", "body": "raise NotImplementedError", "docstring": "Add the authorization to the headers.\n\n Args:\n headers: dict, the headers to add the Authorization header to.", "id": "f2472:c13:m3"} {"signature": "def _to_json(self, strip, to_serialize=None):", "body": "curr_type = self.__class__if to_serialize is None:to_serialize = copy.copy(self.__dict__)else:to_serialize = copy.copy(to_serialize)for member in strip:if member in to_serialize:del to_serialize[member]to_serialize[''] = _parse_expiry(to_serialize.get(''))to_serialize[''] = curr_type.__name__to_serialize[''] = curr_type.__module__for key, val in to_serialize.items():if isinstance(val, bytes):to_serialize[key] = val.decode('')if isinstance(val, set):to_serialize[key] = list(val)return json.dumps(to_serialize)", "docstring": "Utility function that creates JSON repr. of a Credentials object.\n\n Args:\n strip: array, An array of names of members to exclude from the\n JSON.\n to_serialize: dict, (Optional) The properties for this object\n that will be serialized. This allows callers to\n modify before serializing.\n\n Returns:\n string, a JSON representation of this instance, suitable to pass to\n from_json().", "id": "f2472:c13:m4"} {"signature": "def to_json(self):", "body": "return self._to_json(self.NON_SERIALIZED_MEMBERS)", "docstring": "Creating a JSON representation of an instance of Credentials.\n\n Returns:\n string, a JSON representation of this instance, suitable to pass to\n from_json().", "id": "f2472:c13:m5"} {"signature": "@classmethoddef new_from_json(cls, json_data):", "body": "json_data_as_unicode = _helpers._from_bytes(json_data)data = json.loads(json_data_as_unicode)module_name = data['']try:module_obj = __import__(module_name)except ImportError:module_name = module_name.replace('', '')module_obj = __import__(module_name)module_obj = __import__(module_name,fromlist=module_name.split('')[:-])kls = getattr(module_obj, data[''])return kls.from_json(json_data_as_unicode)", "docstring": "Utility class method to instantiate a Credentials subclass from JSON.\n\n Expects the JSON string to have been produced by to_json().\n\n Args:\n json_data: string or bytes, JSON from to_json().\n\n Returns:\n An instance of the subclass of Credentials that was serialized with\n to_json().", "id": "f2472:c13:m6"} {"signature": "@classmethoddef from_json(cls, unused_data):", "body": "return Credentials()", "docstring": "Instantiate a Credentials object from a JSON description of it.\n\n The JSON should have been produced by calling .to_json() on the object.\n\n Args:\n unused_data: dict, A deserialized JSON object.\n\n Returns:\n An instance of a Credentials subclass.", "id": "f2472:c13:m7"} {"signature": "def __init__(self, lock=None):", "body": "self._lock = lock", "docstring": "Create a Storage instance.\n\n Args:\n lock: An optional threading.Lock-like object. Must implement at\n least acquire() and release(). Does not need to be\n re-entrant.", "id": "f2472:c15:m0"} {"signature": "def acquire_lock(self):", "body": "if self._lock is not None:self._lock.acquire()", "docstring": "Acquires any lock necessary to access this Storage.\n\n This lock is not reentrant.", "id": "f2472:c15:m1"} {"signature": "def release_lock(self):", "body": "if self._lock is not None:self._lock.release()", "docstring": "Release the Storage lock.\n\n Trying to release a lock that isn't held will result in a\n RuntimeError in the case of a threading.Lock or multiprocessing.Lock.", "id": "f2472:c15:m2"} {"signature": "def locked_get(self):", "body": "raise NotImplementedError", "docstring": "Retrieve credential.\n\n The Storage lock must be held when this is called.\n\n Returns:\n oauth2client.client.Credentials", "id": "f2472:c15:m3"} {"signature": "def locked_put(self, credentials):", "body": "raise NotImplementedError", "docstring": "Write a credential.\n\n The Storage lock must be held when this is called.\n\n Args:\n credentials: Credentials, the credentials to store.", "id": "f2472:c15:m4"} {"signature": "def locked_delete(self):", "body": "raise NotImplementedError", "docstring": "Delete a credential.\n\n The Storage lock must be held when this is called.", "id": "f2472:c15:m5"} {"signature": "def get(self):", "body": "self.acquire_lock()try:return self.locked_get()finally:self.release_lock()", "docstring": "Retrieve credential.\n\n The Storage lock must *not* be held when this is called.\n\n Returns:\n oauth2client.client.Credentials", "id": "f2472:c15:m6"} {"signature": "def put(self, credentials):", "body": "self.acquire_lock()try:self.locked_put(credentials)finally:self.release_lock()", "docstring": "Write a credential.\n\n The Storage lock must be held when this is called.\n\n Args:\n credentials: Credentials, the credentials to store.", "id": "f2472:c15:m7"} {"signature": "def delete(self):", "body": "self.acquire_lock()try:return self.locked_delete()finally:self.release_lock()", "docstring": "Delete credential.\n\n Frees any resources associated with storing the credential.\n The Storage lock must *not* be held when this is called.\n\n Returns:\n None", "id": "f2472:c15:m8"} {"signature": "@_helpers.positional()def __init__(self, access_token, client_id, client_secret, refresh_token,token_expiry, token_uri, user_agent, revoke_uri=None,id_token=None, token_response=None, scopes=None,token_info_uri=None, id_token_jwt=None):", "body": "self.access_token = access_tokenself.client_id = client_idself.client_secret = client_secretself.refresh_token = refresh_tokenself.store = Noneself.token_expiry = token_expiryself.token_uri = token_uriself.user_agent = user_agentself.revoke_uri = revoke_uriself.id_token = id_tokenself.id_token_jwt = id_token_jwtself.token_response = token_responseself.scopes = set(_helpers.string_to_scopes(scopes or []))self.token_info_uri = token_info_uriself.invalid = False", "docstring": "Create an instance of OAuth2Credentials.\n\n This constructor is not usually called by the user, instead\n OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.\n\n Args:\n access_token: string, access token.\n client_id: string, client identifier.\n client_secret: string, client secret.\n refresh_token: string, refresh token.\n token_expiry: datetime, when the access_token expires.\n token_uri: string, URI of token endpoint.\n user_agent: string, The HTTP User-Agent to provide for this\n application.\n revoke_uri: string, URI for revoke endpoint. Defaults to None; a\n token can't be revoked if this is None.\n id_token: object, The identity of the resource owner.\n token_response: dict, the decoded response to the token request.\n None if a token hasn't been requested yet. Stored\n because some providers (e.g. wordpress.com) include\n extra fields that clients may want.\n scopes: list, authorized scopes for these credentials.\n token_info_uri: string, the URI for the token info endpoint.\n Defaults to None; scopes can not be refreshed if\n this is None.\n id_token_jwt: string, the encoded and signed identity JWT. The\n decoded version of this is stored in id_token.\n\n Notes:\n store: callable, A callable that when passed a Credential\n will store the credential back to where it came from.\n This is needed to store the latest access_token if it\n has expired and been refreshed.", "id": "f2472:c16:m0"} {"signature": "def authorize(self, http):", "body": "transport.wrap_http_for_auth(self, http)return http", "docstring": "Authorize an httplib2.Http instance with these credentials.\n\n The modified http.request method will add authentication headers to\n each request and will refresh access_tokens when a 401 is received on a\n request. In addition the http.request method has a credentials\n property, http.request.credentials, which is the Credentials object\n that authorized it.\n\n Args:\n http: An instance of ``httplib2.Http`` or something that acts\n like it.\n\n Returns:\n A modified instance of http that was passed in.\n\n Example::\n\n h = httplib2.Http()\n h = credentials.authorize(h)\n\n You can't create a new OAuth subclass of httplib2.Authentication\n because it never gets passed the absolute URI, which is needed for\n signing. So instead we have to overload 'request' with a closure\n that adds in the Authorization header and then calls the original\n version of 'request()'.", "id": "f2472:c16:m1"} {"signature": "def refresh(self, http):", "body": "self._refresh(http)", "docstring": "Forces a refresh of the access_token.\n\n Args:\n http: httplib2.Http, an http object to be used to make the refresh\n request.", "id": "f2472:c16:m2"} {"signature": "def revoke(self, http):", "body": "self._revoke(http)", "docstring": "Revokes a refresh_token and makes the credentials void.\n\n Args:\n http: httplib2.Http, an http object to be used to make the revoke\n request.", "id": "f2472:c16:m3"} {"signature": "def apply(self, headers):", "body": "headers[''] = '' + self.access_token", "docstring": "Add the authorization to the headers.\n\n Args:\n headers: dict, the headers to add the Authorization header to.", "id": "f2472:c16:m4"} {"signature": "def has_scopes(self, scopes):", "body": "scopes = _helpers.string_to_scopes(scopes)return set(scopes).issubset(self.scopes)", "docstring": "Verify that the credentials are authorized for the given scopes.\n\n Returns True if the credentials authorized scopes contain all of the\n scopes given.\n\n Args:\n scopes: list or string, the scopes to check.\n\n Notes:\n There are cases where the credentials are unaware of which scopes\n are authorized. Notably, credentials obtained and stored before\n this code was added will not have scopes, AccessTokenCredentials do\n not have scopes. In both cases, you can use refresh_scopes() to\n obtain the canonical set of scopes.", "id": "f2472:c16:m5"} {"signature": "def retrieve_scopes(self, http):", "body": "self._retrieve_scopes(http)return self.scopes", "docstring": "Retrieves the canonical list of scopes for this access token.\n\n Gets the scopes from the OAuth2 provider.\n\n Args:\n http: httplib2.Http, an http object to be used to make the refresh\n request.\n\n Returns:\n A set of strings containing the canonical list of scopes.", "id": "f2472:c16:m6"} {"signature": "@classmethoddef from_json(cls, json_data):", "body": "data = json.loads(_helpers._from_bytes(json_data))if (data.get('') andnot isinstance(data[''], datetime.datetime)):try:data[''] = datetime.datetime.strptime(data[''], EXPIRY_FORMAT)except ValueError:data[''] = Noneretval = cls(data[''],data[''],data[''],data[''],data[''],data[''],data[''],revoke_uri=data.get('', None),id_token=data.get('', None),id_token_jwt=data.get('', None),token_response=data.get('', None),scopes=data.get('', None),token_info_uri=data.get('', None))retval.invalid = data['']return retval", "docstring": "Instantiate a Credentials object from a JSON description of it.\n\n The JSON should have been produced by calling .to_json() on the object.\n\n Args:\n json_data: string or bytes, JSON to deserialize.\n\n Returns:\n An instance of a Credentials subclass.", "id": "f2472:c16:m7"} {"signature": "@propertydef access_token_expired(self):", "body": "if self.invalid:return Trueif not self.token_expiry:return Falsenow = _UTCNOW()if now >= self.token_expiry:logger.info('',now, self.token_expiry)return Truereturn False", "docstring": "True if the credential is expired or invalid.\n\n If the token_expiry isn't set, we assume the token doesn't expire.", "id": "f2472:c16:m8"} {"signature": "def get_access_token(self, http=None):", "body": "if not self.access_token or self.access_token_expired:if not http:http = transport.get_http_object()self.refresh(http)return AccessTokenInfo(access_token=self.access_token,expires_in=self._expires_in())", "docstring": "Return the access token and its expiration information.\n\n If the token does not exist, get one.\n If the token expired, refresh it.", "id": "f2472:c16:m9"} {"signature": "def set_store(self, store):", "body": "self.store = store", "docstring": "Set the Storage for the credential.\n\n Args:\n store: Storage, an implementation of Storage object.\n This is needed to store the latest access_token if it\n has expired and been refreshed. This implementation uses\n locking to check for updates before updating the\n access_token.", "id": "f2472:c16:m10"} {"signature": "def _expires_in(self):", "body": "if self.token_expiry:now = _UTCNOW()if self.token_expiry > now:time_delta = self.token_expiry - nowreturn time_delta.days * + time_delta.secondselse:return ", "docstring": "Return the number of seconds until this token expires.\n\n If token_expiry is in the past, this method will return 0, meaning the\n token has already expired.\n\n If token_expiry is None, this method will return None. Note that\n returning 0 in such a case would not be fair: the token may still be\n valid; we just don't know anything about it.", "id": "f2472:c16:m11"} {"signature": "def _updateFromCredential(self, other):", "body": "self.__dict__.update(other.__getstate__())", "docstring": "Update this Credential from another instance.", "id": "f2472:c16:m12"} {"signature": "def __getstate__(self):", "body": "d = copy.copy(self.__dict__)del d['']return d", "docstring": "Trim the state down to something that can be pickled.", "id": "f2472:c16:m13"} {"signature": "def __setstate__(self, state):", "body": "self.__dict__.update(state)self.store = None", "docstring": "Reconstitute the state of the object from being pickled.", "id": "f2472:c16:m14"} {"signature": "def _generate_refresh_request_body(self):", "body": "body = urllib.parse.urlencode({'': '','': self.client_id,'': self.client_secret,'': self.refresh_token,})return body", "docstring": "Generate the body that will be used in the refresh request.", "id": "f2472:c16:m15"} {"signature": "def _generate_refresh_request_headers(self):", "body": "headers = {'': '',}if self.user_agent is not None:headers[''] = self.user_agentreturn headers", "docstring": "Generate the headers that will be used in the refresh request.", "id": "f2472:c16:m16"} {"signature": "def _refresh(self, http):", "body": "if not self.store:self._do_refresh_request(http)else:self.store.acquire_lock()try:new_cred = self.store.locked_get()if (new_cred and not new_cred.invalid andnew_cred.access_token != self.access_token andnot new_cred.access_token_expired):logger.info('')self._updateFromCredential(new_cred)else:self._do_refresh_request(http)finally:self.store.release_lock()", "docstring": "Refreshes the access_token.\n\n This method first checks by reading the Storage object if available.\n If a refresh is still needed, it holds the Storage lock until the\n refresh is completed.\n\n Args:\n http: an object to be used to make HTTP requests.\n\n Raises:\n HttpAccessTokenRefreshError: When the refresh fails.", "id": "f2472:c16:m17"} {"signature": "def _do_refresh_request(self, http):", "body": "body = self._generate_refresh_request_body()headers = self._generate_refresh_request_headers()logger.info('')resp, content = transport.request(http, self.token_uri, method='',body=body, headers=headers)content = _helpers._from_bytes(content)if resp.status == http_client.OK:d = json.loads(content)self.token_response = dself.access_token = d['']self.refresh_token = d.get('', self.refresh_token)if '' in d:delta = datetime.timedelta(seconds=int(d['']))self.token_expiry = delta + _UTCNOW()else:self.token_expiry = Noneif '' in d:self.id_token = _extract_id_token(d[''])self.id_token_jwt = d['']else:self.id_token = Noneself.id_token_jwt = Noneself.invalid = Falseif self.store:self.store.locked_put(self)else:logger.info('', content)error_msg = ''.format(resp.status)try:d = json.loads(content)if '' in d:error_msg = d['']if '' in d:error_msg += '' + d['']self.invalid = Trueif self.store is not None:self.store.locked_put(self)except (TypeError, ValueError):passraise HttpAccessTokenRefreshError(error_msg, status=resp.status)", "docstring": "Refresh the access_token using the refresh_token.\n\n Args:\n http: an object to be used to make HTTP requests.\n\n Raises:\n HttpAccessTokenRefreshError: When the refresh fails.", "id": "f2472:c16:m18"} {"signature": "def _revoke(self, http):", "body": "self._do_revoke(http, self.refresh_token or self.access_token)", "docstring": "Revokes this credential and deletes the stored copy (if it exists).\n\n Args:\n http: an object to be used to make HTTP requests.", "id": "f2472:c16:m19"} {"signature": "def _do_revoke(self, http, token):", "body": "logger.info('')query_params = {'': token}token_revoke_uri = _helpers.update_query_params(self.revoke_uri, query_params)resp, content = transport.request(http, token_revoke_uri)if resp.status == http_client.METHOD_NOT_ALLOWED:body = urllib.parse.urlencode(query_params)resp, content = transport.request(http, token_revoke_uri,method='', body=body)if resp.status == http_client.OK:self.invalid = Trueelse:error_msg = ''.format(resp.status)try:d = json.loads(_helpers._from_bytes(content))if '' in d:error_msg = d['']except (TypeError, ValueError):passraise TokenRevokeError(error_msg)if self.store:self.store.delete()", "docstring": "Revokes this credential and deletes the stored copy (if it exists).\n\n Args:\n http: an object to be used to make HTTP requests.\n token: A string used as the token to be revoked. Can be either an\n access_token or refresh_token.\n\n Raises:\n TokenRevokeError: If the revoke request does not return with a\n 200 OK.", "id": "f2472:c16:m20"} {"signature": "def _retrieve_scopes(self, http):", "body": "self._do_retrieve_scopes(http, self.access_token)", "docstring": "Retrieves the list of authorized scopes from the OAuth2 provider.\n\n Args:\n http: an object to be used to make HTTP requests.", "id": "f2472:c16:m21"} {"signature": "def _do_retrieve_scopes(self, http, token):", "body": "logger.info('')query_params = {'': token, '': ''}token_info_uri = _helpers.update_query_params(self.token_info_uri, query_params)resp, content = transport.request(http, token_info_uri)content = _helpers._from_bytes(content)if resp.status == http_client.OK:d = json.loads(content)self.scopes = set(_helpers.string_to_scopes(d.get('', '')))else:error_msg = ''.format(resp.status)try:d = json.loads(content)if '' in d:error_msg = d['']except (TypeError, ValueError):passraise Error(error_msg)", "docstring": "Retrieves the list of authorized scopes from the OAuth2 provider.\n\n Args:\n http: an object to be used to make HTTP requests.\n token: A string used as the token to identify the credentials to\n the provider.\n\n Raises:\n Error: When refresh fails, indicating the the access token is\n invalid.", "id": "f2472:c16:m22"} {"signature": "def __init__(self, access_token, user_agent, revoke_uri=None):", "body": "super(AccessTokenCredentials, self).__init__(access_token,None,None,None,None,None,user_agent,revoke_uri=revoke_uri)", "docstring": "Create an instance of OAuth2Credentials\n\n This is one of the few types if Credentials that you should contrust,\n Credentials objects are usually instantiated by a Flow.\n\n Args:\n access_token: string, access token.\n user_agent: string, The HTTP User-Agent to provide for this\n application.\n revoke_uri: string, URI for revoke endpoint. Defaults to None; a\n token can't be revoked if this is None.", "id": "f2472:c17:m0"} {"signature": "def _refresh(self, http):", "body": "raise AccessTokenCredentialsError('')", "docstring": "Refreshes the access token.\n\n Args:\n http: unused HTTP object.\n\n Raises:\n AccessTokenCredentialsError: always", "id": "f2472:c17:m2"} {"signature": "def _revoke(self, http):", "body": "self._do_revoke(http, self.access_token)", "docstring": "Revokes the access_token and deletes the store if available.\n\n Args:\n http: an object to be used to make HTTP requests.", "id": "f2472:c17:m3"} {"signature": "def __init__(self, access_token, client_id, client_secret, refresh_token,token_expiry, token_uri, user_agent,revoke_uri=oauth2client.GOOGLE_REVOKE_URI):", "body": "super(GoogleCredentials, self).__init__(access_token, client_id, client_secret, refresh_token,token_expiry, token_uri, user_agent, revoke_uri=revoke_uri)", "docstring": "Create an instance of GoogleCredentials.\n\n This constructor is not usually called by the user, instead\n GoogleCredentials objects are instantiated by\n GoogleCredentials.from_stream() or\n GoogleCredentials.get_application_default().\n\n Args:\n access_token: string, access token.\n client_id: string, client identifier.\n client_secret: string, client secret.\n refresh_token: string, refresh token.\n token_expiry: datetime, when the access_token expires.\n token_uri: string, URI of token endpoint.\n user_agent: string, The HTTP User-Agent to provide for this\n application.\n revoke_uri: string, URI for revoke endpoint. Defaults to\n oauth2client.GOOGLE_REVOKE_URI; a token can't be\n revoked if this is None.", "id": "f2472:c18:m0"} {"signature": "def create_scoped_required(self):", "body": "return False", "docstring": "Whether this Credentials object is scopeless.\n\n create_scoped(scopes) method needs to be called in order to create\n a Credentials object for API calls.", "id": "f2472:c18:m1"} {"signature": "def create_scoped(self, scopes):", "body": "return self", "docstring": "Create a Credentials object for the given scopes.\n\n The Credentials type is preserved.", "id": "f2472:c18:m2"} {"signature": "@propertydef serialization_data(self):", "body": "return {'': '','': self.client_id,'': self.client_secret,'': self.refresh_token}", "docstring": "Get the fields and values identifying the current credentials.", "id": "f2472:c18:m4"} {"signature": "@staticmethoddef _implicit_credentials_from_gae():", "body": "if not _in_gae_environment():return Nonereturn _get_application_default_credential_GAE()", "docstring": "Attempts to get implicit credentials in Google App Engine env.\n\n If the current environment is not detected as App Engine, returns None,\n indicating no Google App Engine credentials can be detected from the\n current environment.\n\n Returns:\n None, if not in GAE, else an appengine.AppAssertionCredentials\n object.", "id": "f2472:c18:m5"} {"signature": "@staticmethoddef _implicit_credentials_from_gce():", "body": "if not _in_gce_environment():return Nonereturn _get_application_default_credential_GCE()", "docstring": "Attempts to get implicit credentials in Google Compute Engine env.\n\n If the current environment is not detected as Compute Engine, returns\n None, indicating no Google Compute Engine credentials can be detected\n from the current environment.\n\n Returns:\n None, if not in GCE, else a gce.AppAssertionCredentials object.", "id": "f2472:c18:m6"} {"signature": "@staticmethoddef _implicit_credentials_from_files():", "body": "credentials_filename = _get_environment_variable_file()if not credentials_filename:credentials_filename = _get_well_known_file()if os.path.isfile(credentials_filename):extra_help = ('''')else:credentials_filename = Noneelse:extra_help = ('' + GOOGLE_APPLICATION_CREDENTIALS +'')if not credentials_filename:returnSETTINGS.env_name = DEFAULT_ENV_NAMEtry:return _get_application_default_credential_from_file(credentials_filename)except (ApplicationDefaultCredentialsError, ValueError) as error:_raise_exception_for_reading_json(credentials_filename,extra_help, error)", "docstring": "Attempts to get implicit credentials from local credential files.\n\n First checks if the environment variable GOOGLE_APPLICATION_CREDENTIALS\n is set with a filename and then falls back to a configuration file (the\n \"well known\" file) associated with the 'gcloud' command line tool.\n\n Returns:\n Credentials object associated with the\n GOOGLE_APPLICATION_CREDENTIALS file or the \"well known\" file if\n either exist. If neither file is define, returns None, indicating\n no credentials from a file can detected from the current\n environment.", "id": "f2472:c18:m7"} {"signature": "@classmethoddef _get_implicit_credentials(cls):", "body": "environ_checkers = [cls._implicit_credentials_from_files,cls._implicit_credentials_from_gae,cls._implicit_credentials_from_gce,]for checker in environ_checkers:credentials = checker()if credentials is not None:return credentialsraise ApplicationDefaultCredentialsError(ADC_HELP_MSG)", "docstring": "Gets credentials implicitly from the environment.\n\n Checks environment in order of precedence:\n - Environment variable GOOGLE_APPLICATION_CREDENTIALS pointing to\n a file with stored credentials information.\n - Stored \"well known\" file associated with `gcloud` command line tool.\n - Google App Engine (production and testing)\n - Google Compute Engine production environment.\n\n Raises:\n ApplicationDefaultCredentialsError: raised when the credentials\n fail to be retrieved.", "id": "f2472:c18:m8"} {"signature": "@staticmethoddef get_application_default():", "body": "return GoogleCredentials._get_implicit_credentials()", "docstring": "Get the Application Default Credentials for the current environment.\n\n Raises:\n ApplicationDefaultCredentialsError: raised when the credentials\n fail to be retrieved.", "id": "f2472:c18:m9"} {"signature": "@staticmethoddef from_stream(credential_filename):", "body": "if credential_filename and os.path.isfile(credential_filename):try:return _get_application_default_credential_from_file(credential_filename)except (ApplicationDefaultCredentialsError, ValueError) as error:extra_help = ('''')_raise_exception_for_reading_json(credential_filename,extra_help,error)else:raise ApplicationDefaultCredentialsError('''')", "docstring": "Create a Credentials object by reading information from a file.\n\n It returns an object of type GoogleCredentials.\n\n Args:\n credential_filename: the path to the file from where the\n credentials are to be read\n\n Raises:\n ApplicationDefaultCredentialsError: raised when the credentials\n fail to be retrieved.", "id": "f2472:c18:m10"} {"signature": "@_helpers.positional()def __init__(self, assertion_type, user_agent=None,token_uri=oauth2client.GOOGLE_TOKEN_URI,revoke_uri=oauth2client.GOOGLE_REVOKE_URI,**unused_kwargs):", "body": "super(AssertionCredentials, self).__init__(None,None,None,None,None,token_uri,user_agent,revoke_uri=revoke_uri)self.assertion_type = assertion_type", "docstring": "Constructor for AssertionFlowCredentials.\n\n Args:\n assertion_type: string, assertion type that will be declared to the\n auth server\n user_agent: string, The HTTP User-Agent to provide for this\n application.\n token_uri: string, URI for token endpoint. For convenience defaults\n to Google's endpoints but any OAuth 2.0 provider can be\n used.\n revoke_uri: string, URI for revoke endpoint.", "id": "f2472:c19:m0"} {"signature": "def _generate_assertion(self):", "body": "raise NotImplementedError", "docstring": "Generate assertion string to be used in the access token request.", "id": "f2472:c19:m2"} {"signature": "def _revoke(self, http):", "body": "self._do_revoke(http, self.access_token)", "docstring": "Revokes the access_token and deletes the store if available.\n\n Args:\n http: an object to be used to make HTTP requests.", "id": "f2472:c19:m3"} {"signature": "def sign_blob(self, blob):", "body": "raise NotImplementedError('')", "docstring": "Cryptographically sign a blob (of bytes).\n\n Args:\n blob: bytes, Message to be signed.\n\n Returns:\n tuple, A pair of the private key ID used to sign the blob and\n the signed contents.", "id": "f2472:c19:m4"} {"signature": "@classmethoddef FromResponse(cls, response):", "body": "kwargs = {'': response[''],'': response[''],}verification_url = response.get('', response.get(''))if verification_url is None:raise OAuth2DeviceCodeError('')kwargs[''] = verification_urlkwargs.update({'': response.get(''),'': None,})if '' in response:kwargs[''] = (_UTCNOW() +datetime.timedelta(seconds=int(response[''])))return cls(**kwargs)", "docstring": "Create a DeviceFlowInfo from a server response.\n\n The response should be a dict containing entries as described here:\n\n http://tools.ietf.org/html/draft-ietf-oauth-v2-05#section-3.7.1", "id": "f2472:c20:m0"} {"signature": "@_helpers.positional()def __init__(self, client_id,client_secret=None,scope=None,redirect_uri=None,user_agent=None,auth_uri=oauth2client.GOOGLE_AUTH_URI,token_uri=oauth2client.GOOGLE_TOKEN_URI,revoke_uri=oauth2client.GOOGLE_REVOKE_URI,login_hint=None,device_uri=oauth2client.GOOGLE_DEVICE_URI,token_info_uri=oauth2client.GOOGLE_TOKEN_INFO_URI,authorization_header=None,pkce=False,code_verifier=None,**kwargs):", "body": "if scope is None:raise TypeError(\"\")self.client_id = client_idself.client_secret = client_secretself.scope = _helpers.scopes_to_string(scope)self.redirect_uri = redirect_uriself.login_hint = login_hintself.user_agent = user_agentself.auth_uri = auth_uriself.token_uri = token_uriself.revoke_uri = revoke_uriself.device_uri = device_uriself.token_info_uri = token_info_uriself.authorization_header = authorization_headerself._pkce = pkceself.code_verifier = code_verifierself.params = _oauth2_web_server_flow_params(kwargs)", "docstring": "Constructor for OAuth2WebServerFlow.\n\n The kwargs argument is used to set extra query parameters on the\n auth_uri. For example, the access_type and prompt\n query parameters can be set via kwargs.\n\n Args:\n client_id: string, client identifier.\n client_secret: string client secret.\n scope: string or iterable of strings, scope(s) of the credentials\n being requested.\n redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'\n for a non-web-based application, or a URI that\n handles the callback from the authorization server.\n user_agent: string, HTTP User-Agent to provide for this\n application.\n auth_uri: string, URI for authorization endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0 provider\n can be used.\n token_uri: string, URI for token endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0\n provider can be used.\n revoke_uri: string, URI for revoke endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0\n provider can be used.\n login_hint: string, Either an email address or domain. Passing this\n hint will either pre-fill the email box on the sign-in\n form or select the proper multi-login session, thereby\n simplifying the login flow.\n device_uri: string, URI for device authorization endpoint. For\n convenience defaults to Google's endpoints but any\n OAuth 2.0 provider can be used.\n authorization_header: string, For use with OAuth 2.0 providers that\n require a client to authenticate using a\n header value instead of passing client_secret\n in the POST body.\n pkce: boolean, default: False, Generate and include a \"Proof Key\n for Code Exchange\" (PKCE) with your authorization and token\n requests. This adds security for installed applications that\n cannot protect a client_secret. See RFC 7636 for details.\n code_verifier: bytestring or None, default: None, parameter passed\n as part of the code exchange when pkce=True. If\n None, a code_verifier will automatically be\n generated as part of step1_get_authorize_url(). See\n RFC 7636 for details.\n **kwargs: dict, The keyword arguments are all optional and required\n parameters for the OAuth calls.", "id": "f2472:c21:m0"} {"signature": "@_helpers.positional()def step1_get_authorize_url(self, redirect_uri=None, state=None):", "body": "if redirect_uri is not None:logger.warning((''''''''))self.redirect_uri = redirect_uriif self.redirect_uri is None:raise ValueError('')query_params = {'': self.client_id,'': self.redirect_uri,'': self.scope,}if state is not None:query_params[''] = stateif self.login_hint is not None:query_params[''] = self.login_hintif self._pkce:if not self.code_verifier:self.code_verifier = _pkce.code_verifier()challenge = _pkce.code_challenge(self.code_verifier)query_params[''] = challengequery_params[''] = ''query_params.update(self.params)return _helpers.update_query_params(self.auth_uri, query_params)", "docstring": "Returns a URI to redirect to the provider.\n\n Args:\n redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'\n for a non-web-based application, or a URI that\n handles the callback from the authorization server.\n This parameter is deprecated, please move to passing\n the redirect_uri in via the constructor.\n state: string, Opaque state string which is passed through the\n OAuth2 flow and returned to the client as a query parameter\n in the callback.\n\n Returns:\n A URI as a string to redirect the user to begin the authorization\n flow.", "id": "f2472:c21:m1"} {"signature": "@_helpers.positional()def step1_get_device_and_user_codes(self, http=None):", "body": "if self.device_uri is None:raise ValueError('')body = urllib.parse.urlencode({'': self.client_id,'': self.scope,})headers = {'': '',}if self.user_agent is not None:headers[''] = self.user_agentif http is None:http = transport.get_http_object()resp, content = transport.request(http, self.device_uri, method='', body=body, headers=headers)content = _helpers._from_bytes(content)if resp.status == http_client.OK:try:flow_info = json.loads(content)except ValueError as exc:raise OAuth2DeviceCodeError(''''.format(content, exc))return DeviceFlowInfo.FromResponse(flow_info)else:error_msg = ''.format(resp.status)try:error_dict = json.loads(content)if '' in error_dict:error_msg += ''.format(error_dict[''])except ValueError:passraise OAuth2DeviceCodeError(error_msg)", "docstring": "Returns a user code and the verification URL where to enter it\n\n Returns:\n A user code as a string for the user to authorize the application\n An URL as a string where the user has to enter the code", "id": "f2472:c21:m2"} {"signature": "@_helpers.positional()def step2_exchange(self, code=None, http=None, device_flow_info=None):", "body": "if code is None and device_flow_info is None:raise ValueError('')if code is not None and device_flow_info is not None:raise ValueError('')if code is None:code = device_flow_info.device_codeelif not isinstance(code, (six.string_types, six.binary_type)):if '' not in code:raise FlowExchangeError(code.get('', ''))code = code['']post_data = {'': self.client_id,'': code,'': self.scope,}if self.client_secret is not None:post_data[''] = self.client_secretif self._pkce:post_data[''] = self.code_verifierif device_flow_info is not None:post_data[''] = ''else:post_data[''] = ''post_data[''] = self.redirect_uribody = urllib.parse.urlencode(post_data)headers = {'': '',}if self.authorization_header is not None:headers[''] = self.authorization_headerif self.user_agent is not None:headers[''] = self.user_agentif http is None:http = transport.get_http_object()resp, content = transport.request(http, self.token_uri, method='', body=body, headers=headers)d = _parse_exchange_token_response(content)if resp.status == http_client.OK and '' in d:access_token = d['']refresh_token = d.get('', None)if not refresh_token:logger.info(''\"\")token_expiry = Noneif '' in d:delta = datetime.timedelta(seconds=int(d['']))token_expiry = delta + _UTCNOW()extracted_id_token = Noneid_token_jwt = Noneif '' in d:extracted_id_token = _extract_id_token(d[''])id_token_jwt = d['']logger.info('')return OAuth2Credentials(access_token, self.client_id, self.client_secret,refresh_token, token_expiry, self.token_uri, self.user_agent,revoke_uri=self.revoke_uri, id_token=extracted_id_token,id_token_jwt=id_token_jwt, token_response=d, scopes=self.scope,token_info_uri=self.token_info_uri)else:logger.info('', content)if '' in d:error_msg = (str(d['']) +str(d.get('', '')))else:error_msg = ''.format(str(resp.status))raise FlowExchangeError(error_msg)", "docstring": "Exchanges a code for OAuth2Credentials.\n\n Args:\n code: string, a dict-like object, or None. For a non-device\n flow, this is either the response code as a string, or a\n dictionary of query parameters to the redirect_uri. For a\n device flow, this should be None.\n http: httplib2.Http, optional http instance to use when fetching\n credentials.\n device_flow_info: DeviceFlowInfo, return value from step1 in the\n case of a device flow.\n\n Returns:\n An OAuth2Credentials object that can be used to authorize requests.\n\n Raises:\n FlowExchangeError: if a problem occurred exchanging the code for a\n refresh_token.\n ValueError: if code and device_flow_info are both provided or both\n missing.", "id": "f2472:c21:m3"} {"signature": "def get_cached_http():", "body": "return _CACHED_HTTP", "docstring": "Return an HTTP object which caches results returned.\n\n This is intended to be used in methods like\n oauth2client.client.verify_id_token(), which calls to the same URI\n to retrieve certs.\n\n Returns:\n httplib2.Http, an HTTP object with a MemoryCache", "id": "f2474:m0"} {"signature": "def get_http_object(*args, **kwargs):", "body": "return httplib2.Http(*args, **kwargs)", "docstring": "Return a new HTTP object.\n\n Args:\n *args: tuple, The positional arguments to be passed when\n contructing a new HTTP object.\n **kwargs: dict, The keyword arguments to be passed when\n contructing a new HTTP object.\n\n Returns:\n httplib2.Http, an HTTP object.", "id": "f2474:m1"} {"signature": "def _initialize_headers(headers):", "body": "return {} if headers is None else dict(headers)", "docstring": "Creates a copy of the headers.\n\n Args:\n headers: dict, request headers to copy.\n\n Returns:\n dict, the copied headers or a new dictionary if the headers\n were None.", "id": "f2474:m2"} {"signature": "def _apply_user_agent(headers, user_agent):", "body": "if user_agent is not None:if '' in headers:headers[''] = (user_agent + '' + headers[''])else:headers[''] = user_agentreturn headers", "docstring": "Adds a user-agent to the headers.\n\n Args:\n headers: dict, request headers to add / modify user\n agent within.\n user_agent: str, the user agent to add.\n\n Returns:\n dict, the original headers passed in, but modified if the\n user agent is not None.", "id": "f2474:m3"} {"signature": "def clean_headers(headers):", "body": "clean = {}try:for k, v in six.iteritems(headers):if not isinstance(k, six.binary_type):k = str(k)if not isinstance(v, six.binary_type):v = str(v)clean[_helpers._to_bytes(k)] = _helpers._to_bytes(v)except UnicodeEncodeError:from oauth2client.client import NonAsciiHeaderErrorraise NonAsciiHeaderError(k, '', v)return clean", "docstring": "Forces header keys and values to be strings, i.e not unicode.\n\n The httplib module just concats the header keys and values in a way that\n may make the message header a unicode string, which, if it then tries to\n contatenate to a binary request body may result in a unicode decode error.\n\n Args:\n headers: dict, A dictionary of headers.\n\n Returns:\n The same dictionary but with all the keys converted to strings.", "id": "f2474:m4"} {"signature": "def wrap_http_for_auth(credentials, http):", "body": "orig_request_method = http.requestdef new_request(uri, method='', body=None, headers=None,redirections=httplib2.DEFAULT_MAX_REDIRECTS,connection_type=None):if not credentials.access_token:_LOGGER.info('''')credentials._refresh(orig_request_method)headers = _initialize_headers(headers)credentials.apply(headers)_apply_user_agent(headers, credentials.user_agent)body_stream_position = Noneif all(getattr(body, stream_prop, None) for stream_prop in_STREAM_PROPERTIES):body_stream_position = body.tell()resp, content = request(orig_request_method, uri, method, body,clean_headers(headers),redirections, connection_type)max_refresh_attempts = for refresh_attempt in range(max_refresh_attempts):if resp.status not in REFRESH_STATUS_CODES:break_LOGGER.info('',resp.status, refresh_attempt + ,max_refresh_attempts)credentials._refresh(orig_request_method)credentials.apply(headers)if body_stream_position is not None:body.seek(body_stream_position)resp, content = request(orig_request_method, uri, method, body,clean_headers(headers),redirections, connection_type)return resp, contenthttp.request = new_requesthttp.request.credentials = credentials", "docstring": "Prepares an HTTP object's request method for auth.\n\n Wraps HTTP requests with logic to catch auth failures (typically\n identified via a 401 status code). In the event of failure, tries\n to refresh the token used and then retry the original request.\n\n Args:\n credentials: Credentials, the credentials used to identify\n the authenticated user.\n http: httplib2.Http, an http object to be used to make\n auth requests.", "id": "f2474:m5"} {"signature": "def wrap_http_for_jwt_access(credentials, http):", "body": "orig_request_method = http.requestwrap_http_for_auth(credentials, http)authenticated_request_method = http.requestdef new_request(uri, method='', body=None, headers=None,redirections=httplib2.DEFAULT_MAX_REDIRECTS,connection_type=None):if '' in credentials._kwargs:if (credentials.access_token is None orcredentials.access_token_expired):credentials.refresh(None)return request(authenticated_request_method, uri,method, body, headers, redirections,connection_type)else:headers = _initialize_headers(headers)_apply_user_agent(headers, credentials.user_agent)uri_root = uri.split('', )[]token, unused_expiry = credentials._create_token({'': uri_root})headers[''] = '' + tokenreturn request(orig_request_method, uri, method, body,clean_headers(headers),redirections, connection_type)http.request = new_requesthttp.request.credentials = credentials", "docstring": "Prepares an HTTP object's request method for JWT access.\n\n Wraps HTTP requests with logic to catch auth failures (typically\n identified via a 401 status code). In the event of failure, tries\n to refresh the token used and then retry the original request.\n\n Args:\n credentials: _JWTAccessCredentials, the credentials used to identify\n a service account that uses JWT access tokens.\n http: httplib2.Http, an http object to be used to make\n auth requests.", "id": "f2474:m6"} {"signature": "def request(http, uri, method='', body=None, headers=None,redirections=httplib2.DEFAULT_MAX_REDIRECTS,connection_type=None):", "body": "http_callable = getattr(http, '', http)return http_callable(uri, method=method, body=body, headers=headers,redirections=redirections,connection_type=connection_type)", "docstring": "Make an HTTP request with an HTTP object and arguments.\n\n Args:\n http: httplib2.Http, an http object to be used to make requests.\n uri: string, The URI to be requested.\n method: string, The HTTP method to use for the request. Defaults\n to 'GET'.\n body: string, The payload / body in HTTP request. By default\n there is no payload.\n headers: dict, Key-value pairs of request headers. By default\n there are no headers.\n redirections: int, The number of allowed 203 redirects for\n the request. Defaults to 5.\n connection_type: httplib.HTTPConnection, a subclass to be used for\n establishing connection. If not set, the type\n will be determined from the ``uri``.\n\n Returns:\n tuple, a pair of a httplib2.Response with the status code and other\n headers and the bytes of the content returned.", "id": "f2474:m7"} {"signature": "def locked_get(self):", "body": "credentials = None_helpers.validate_file(self._filename)try:f = open(self._filename, '')content = f.read()f.close()except IOError:return credentialstry:credentials = client.Credentials.new_from_json(content)credentials.set_store(self)except ValueError:passreturn credentials", "docstring": "Retrieve Credential from file.\n\n Returns:\n oauth2client.client.Credentials\n\n Raises:\n IOError if the file is a symbolic link.", "id": "f2475:c0:m1"} {"signature": "def _create_file_if_needed(self):", "body": "if not os.path.exists(self._filename):old_umask = os.umask()try:open(self._filename, '').close()finally:os.umask(old_umask)", "docstring": "Create an empty file if necessary.\n\n This method will not initialize the file. Instead it implements a\n simple version of \"touch\" to ensure the file has been created.", "id": "f2475:c0:m2"} {"signature": "def locked_put(self, credentials):", "body": "self._create_file_if_needed()_helpers.validate_file(self._filename)f = open(self._filename, '')f.write(credentials.to_json())f.close()", "docstring": "Write Credentials to file.\n\n Args:\n credentials: Credentials, the credentials to store.\n\n Raises:\n IOError if the file is a symbolic link.", "id": "f2475:c0:m3"} {"signature": "def locked_delete(self):", "body": "os.unlink(self._filename)", "docstring": "Delete Credentials file.\n\n Args:\n credentials: Credentials, the credentials to store.", "id": "f2475:c0:m4"} {"signature": "@_helpers.positional()def run_flow(flow, storage, flags=None, http=None):", "body": "if flags is None:flags = argparser.parse_args()logging.getLogger().setLevel(getattr(logging, flags.logging_level))if not flags.noauth_local_webserver:success = Falseport_number = for port in flags.auth_host_port:port_number = porttry:httpd = ClientRedirectServer((flags.auth_host_name, port),ClientRedirectHandler)except socket.error:passelse:success = Truebreakflags.noauth_local_webserver = not successif not success:print(_FAILED_START_MESSAGE)if not flags.noauth_local_webserver:oauth_callback = ''.format(host=flags.auth_host_name, port=port_number)else:oauth_callback = client.OOB_CALLBACK_URNflow.redirect_uri = oauth_callbackauthorize_url = flow.step1_get_authorize_url()if not flags.noauth_local_webserver:import webbrowserwebbrowser.open(authorize_url, new=, autoraise=True)print(_BROWSER_OPENED_MESSAGE.format(address=authorize_url))else:print(_GO_TO_LINK_MESSAGE.format(address=authorize_url))code = Noneif not flags.noauth_local_webserver:httpd.handle_request()if '' in httpd.query_params:sys.exit('')if '' in httpd.query_params:code = httpd.query_params['']else:print('''')sys.exit('')else:code = input('').strip()try:credential = flow.step2_exchange(code, http=http)except client.FlowExchangeError as e:sys.exit(''.format(e))storage.put(credential)credential.set_store(storage)print('')return credential", "docstring": "Core code for a command-line application.\n\n The ``run()`` function is called from your application and runs\n through all the steps to obtain credentials. It takes a ``Flow``\n argument and attempts to open an authorization server page in the\n user's default web browser. The server asks the user to grant your\n application access to the user's data. If the user grants access,\n the ``run()`` function returns new credentials. The new credentials\n are also stored in the ``storage`` argument, which updates the file\n associated with the ``Storage`` object.\n\n It presumes it is run from a command-line application and supports the\n following flags:\n\n ``--auth_host_name`` (string, default: ``localhost``)\n Host name to use when running a local web server to handle\n redirects during OAuth authorization.\n\n ``--auth_host_port`` (integer, default: ``[8080, 8090]``)\n Port to use when running a local web server to handle redirects\n during OAuth authorization. Repeat this option to specify a list\n of values.\n\n ``--[no]auth_local_webserver`` (boolean, default: ``True``)\n Run a local web server to handle redirects during OAuth\n authorization.\n\n The tools module defines an ``ArgumentParser`` the already contains the\n flag definitions that ``run()`` requires. You can pass that\n ``ArgumentParser`` to your ``ArgumentParser`` constructor::\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args(argv)\n\n Args:\n flow: Flow, an OAuth 2.0 Flow to step through.\n storage: Storage, a ``Storage`` to store the credential in.\n flags: ``argparse.Namespace``, (Optional) The command-line flags. This\n is the object returned from calling ``parse_args()`` on\n ``argparse.ArgumentParser`` as described above. Defaults\n to ``argparser.parse_args()``.\n http: An instance of ``httplib2.Http.request`` or something that\n acts like it.\n\n Returns:\n Credentials, the obtained credential.", "id": "f2476:m1"} {"signature": "def message_if_missing(filename):", "body": "return _CLIENT_SECRETS_MESSAGE.format(file_path=filename)", "docstring": "Helpful message to display if the CLIENT_SECRETS file is missing.", "id": "f2476:m2"} {"signature": "def do_GET(self):", "body": "self.send_response(http_client.OK)self.send_header('', '')self.end_headers()parts = urllib.parse.urlparse(self.path)query = _helpers.parse_unique_urlencoded(parts.query)self.server.query_params = queryself.wfile.write(b'')self.wfile.write(b'')self.wfile.write(b'')", "docstring": "Handle a GET request.\n\n Parses the query parameters and prints a message\n if the flow has completed. Note that we can't detect\n if an error occurred.", "id": "f2476:c1:m0"} {"signature": "def log_message(self, format, *args):", "body": "", "docstring": "Do not log messages to stdout while running as cmd. line program.", "id": "f2476:c1:m1"} {"signature": "def _to_json(self, strip, to_serialize=None):", "body": "if to_serialize is None:to_serialize = copy.copy(self.__dict__)pkcs12_val = to_serialize.get(_PKCS12_KEY)if pkcs12_val is not None:to_serialize[_PKCS12_KEY] = base64.b64encode(pkcs12_val)return super(ServiceAccountCredentials, self)._to_json(strip, to_serialize=to_serialize)", "docstring": "Utility function that creates JSON repr. of a credentials object.\n\n Over-ride is needed since PKCS#12 keys will not in general be JSON\n serializable.\n\n Args:\n strip: array, An array of names of members to exclude from the\n JSON.\n to_serialize: dict, (Optional) The properties for this object\n that will be serialized. This allows callers to\n modify before serializing.\n\n Returns:\n string, a JSON representation of this instance, suitable to pass to\n from_json().", "id": "f2477:c0:m1"} {"signature": "@classmethoddef _from_parsed_json_keyfile(cls, keyfile_dict, scopes,token_uri=None, revoke_uri=None):", "body": "creds_type = keyfile_dict.get('')if creds_type != client.SERVICE_ACCOUNT:raise ValueError('', creds_type,'', client.SERVICE_ACCOUNT)service_account_email = keyfile_dict['']private_key_pkcs8_pem = keyfile_dict['']private_key_id = keyfile_dict['']client_id = keyfile_dict['']if not token_uri:token_uri = keyfile_dict.get('',oauth2client.GOOGLE_TOKEN_URI)if not revoke_uri:revoke_uri = keyfile_dict.get('',oauth2client.GOOGLE_REVOKE_URI)signer = crypt.Signer.from_string(private_key_pkcs8_pem)credentials = cls(service_account_email, signer, scopes=scopes,private_key_id=private_key_id,client_id=client_id, token_uri=token_uri,revoke_uri=revoke_uri)credentials._private_key_pkcs8_pem = private_key_pkcs8_pemreturn credentials", "docstring": "Helper for factory constructors from JSON keyfile.\n\n Args:\n keyfile_dict: dict-like object, The parsed dictionary-like object\n containing the contents of the JSON keyfile.\n scopes: List or string, Scopes to use when acquiring an\n access token.\n token_uri: string, URI for OAuth 2.0 provider token endpoint.\n If unset and not present in keyfile_dict, defaults\n to Google's endpoints.\n revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.\n If unset and not present in keyfile_dict, defaults\n to Google's endpoints.\n\n Returns:\n ServiceAccountCredentials, a credentials object created from\n the keyfile contents.\n\n Raises:\n ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.\n KeyError, if one of the expected keys is not present in\n the keyfile.", "id": "f2477:c0:m2"} {"signature": "@classmethoddef from_json_keyfile_name(cls, filename, scopes='',token_uri=None, revoke_uri=None):", "body": "with open(filename, '') as file_obj:client_credentials = json.load(file_obj)return cls._from_parsed_json_keyfile(client_credentials, scopes,token_uri=token_uri,revoke_uri=revoke_uri)", "docstring": "Factory constructor from JSON keyfile by name.\n\n Args:\n filename: string, The location of the keyfile.\n scopes: List or string, (Optional) Scopes to use when acquiring an\n access token.\n token_uri: string, URI for OAuth 2.0 provider token endpoint.\n If unset and not present in the key file, defaults\n to Google's endpoints.\n revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.\n If unset and not present in the key file, defaults\n to Google's endpoints.\n\n Returns:\n ServiceAccountCredentials, a credentials object created from\n the keyfile.\n\n Raises:\n ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.\n KeyError, if one of the expected keys is not present in\n the keyfile.", "id": "f2477:c0:m3"} {"signature": "@classmethoddef from_json_keyfile_dict(cls, keyfile_dict, scopes='',token_uri=None, revoke_uri=None):", "body": "return cls._from_parsed_json_keyfile(keyfile_dict, scopes,token_uri=token_uri,revoke_uri=revoke_uri)", "docstring": "Factory constructor from parsed JSON keyfile.\n\n Args:\n keyfile_dict: dict-like object, The parsed dictionary-like object\n containing the contents of the JSON keyfile.\n scopes: List or string, (Optional) Scopes to use when acquiring an\n access token.\n token_uri: string, URI for OAuth 2.0 provider token endpoint.\n If unset and not present in keyfile_dict, defaults\n to Google's endpoints.\n revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.\n If unset and not present in keyfile_dict, defaults\n to Google's endpoints.\n\n Returns:\n ServiceAccountCredentials, a credentials object created from\n the keyfile.\n\n Raises:\n ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.\n KeyError, if one of the expected keys is not present in\n the keyfile.", "id": "f2477:c0:m4"} {"signature": "@classmethoddef _from_p12_keyfile_contents(cls, service_account_email,private_key_pkcs12,private_key_password=None, scopes='',token_uri=oauth2client.GOOGLE_TOKEN_URI,revoke_uri=oauth2client.GOOGLE_REVOKE_URI):", "body": "if private_key_password is None:private_key_password = _PASSWORD_DEFAULTif crypt.Signer is not crypt.OpenSSLSigner:raise NotImplementedError(_PKCS12_ERROR)signer = crypt.Signer.from_string(private_key_pkcs12,private_key_password)credentials = cls(service_account_email, signer, scopes=scopes,token_uri=token_uri, revoke_uri=revoke_uri)credentials._private_key_pkcs12 = private_key_pkcs12credentials._private_key_password = private_key_passwordreturn credentials", "docstring": "Factory constructor from JSON keyfile.\n\n Args:\n service_account_email: string, The email associated with the\n service account.\n private_key_pkcs12: string, The contents of a PKCS#12 keyfile.\n private_key_password: string, (Optional) Password for PKCS#12\n private key. Defaults to ``notasecret``.\n scopes: List or string, (Optional) Scopes to use when acquiring an\n access token.\n token_uri: string, URI for token endpoint. For convenience defaults\n to Google's endpoints but any OAuth 2.0 provider can be\n used.\n revoke_uri: string, URI for revoke endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0\n provider can be used.\n\n Returns:\n ServiceAccountCredentials, a credentials object created from\n the keyfile.\n\n Raises:\n NotImplementedError if pyOpenSSL is not installed / not the\n active crypto library.", "id": "f2477:c0:m5"} {"signature": "@classmethoddef from_p12_keyfile(cls, service_account_email, filename,private_key_password=None, scopes='',token_uri=oauth2client.GOOGLE_TOKEN_URI,revoke_uri=oauth2client.GOOGLE_REVOKE_URI):", "body": "with open(filename, '') as file_obj:private_key_pkcs12 = file_obj.read()return cls._from_p12_keyfile_contents(service_account_email, private_key_pkcs12,private_key_password=private_key_password, scopes=scopes,token_uri=token_uri, revoke_uri=revoke_uri)", "docstring": "Factory constructor from JSON keyfile.\n\n Args:\n service_account_email: string, The email associated with the\n service account.\n filename: string, The location of the PKCS#12 keyfile.\n private_key_password: string, (Optional) Password for PKCS#12\n private key. Defaults to ``notasecret``.\n scopes: List or string, (Optional) Scopes to use when acquiring an\n access token.\n token_uri: string, URI for token endpoint. For convenience defaults\n to Google's endpoints but any OAuth 2.0 provider can be\n used.\n revoke_uri: string, URI for revoke endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0\n provider can be used.\n\n Returns:\n ServiceAccountCredentials, a credentials object created from\n the keyfile.\n\n Raises:\n NotImplementedError if pyOpenSSL is not installed / not the\n active crypto library.", "id": "f2477:c0:m6"} {"signature": "@classmethoddef from_p12_keyfile_buffer(cls, service_account_email, file_buffer,private_key_password=None, scopes='',token_uri=oauth2client.GOOGLE_TOKEN_URI,revoke_uri=oauth2client.GOOGLE_REVOKE_URI):", "body": "private_key_pkcs12 = file_buffer.read()return cls._from_p12_keyfile_contents(service_account_email, private_key_pkcs12,private_key_password=private_key_password, scopes=scopes,token_uri=token_uri, revoke_uri=revoke_uri)", "docstring": "Factory constructor from JSON keyfile.\n\n Args:\n service_account_email: string, The email associated with the\n service account.\n file_buffer: stream, A buffer that implements ``read()``\n and contains the PKCS#12 key contents.\n private_key_password: string, (Optional) Password for PKCS#12\n private key. Defaults to ``notasecret``.\n scopes: List or string, (Optional) Scopes to use when acquiring an\n access token.\n token_uri: string, URI for token endpoint. For convenience defaults\n to Google's endpoints but any OAuth 2.0 provider can be\n used.\n revoke_uri: string, URI for revoke endpoint. For convenience\n defaults to Google's endpoints but any OAuth 2.0\n provider can be used.\n\n Returns:\n ServiceAccountCredentials, a credentials object created from\n the keyfile.\n\n Raises:\n NotImplementedError if pyOpenSSL is not installed / not the\n active crypto library.", "id": "f2477:c0:m7"} {"signature": "def _generate_assertion(self):", "body": "now = int(time.time())payload = {'': self.token_uri,'': self._scopes,'': now,'': now + self.MAX_TOKEN_LIFETIME_SECS,'': self._service_account_email,}payload.update(self._kwargs)return crypt.make_signed_jwt(self._signer, payload,key_id=self._private_key_id)", "docstring": "Generate the assertion that will be used in the request.", "id": "f2477:c0:m8"} {"signature": "def sign_blob(self, blob):", "body": "return self._private_key_id, self._signer.sign(blob)", "docstring": "Cryptographically sign a blob (of bytes).\n\n Implements abstract method\n :meth:`oauth2client.client.AssertionCredentials.sign_blob`.\n\n Args:\n blob: bytes, Message to be signed.\n\n Returns:\n tuple, A pair of the private key ID used to sign the blob and\n the signed contents.", "id": "f2477:c0:m9"} {"signature": "@propertydef service_account_email(self):", "body": "return self._service_account_email", "docstring": "Get the email for the current service account.\n\n Returns:\n string, The email associated with the service account.", "id": "f2477:c0:m10"} {"signature": "@classmethoddef from_json(cls, json_data):", "body": "if not isinstance(json_data, dict):json_data = json.loads(_helpers._from_bytes(json_data))private_key_pkcs8_pem = Nonepkcs12_val = json_data.get(_PKCS12_KEY)password = Noneif pkcs12_val is None:private_key_pkcs8_pem = json_data['']signer = crypt.Signer.from_string(private_key_pkcs8_pem)else:pkcs12_val = base64.b64decode(pkcs12_val)password = json_data['']signer = crypt.Signer.from_string(pkcs12_val, password)credentials = cls(json_data[''],signer,scopes=json_data[''],private_key_id=json_data[''],client_id=json_data[''],user_agent=json_data[''],**json_data[''])if private_key_pkcs8_pem is not None:credentials._private_key_pkcs8_pem = private_key_pkcs8_pemif pkcs12_val is not None:credentials._private_key_pkcs12 = pkcs12_valif password is not None:credentials._private_key_password = passwordcredentials.invalid = json_data['']credentials.access_token = json_data['']credentials.token_uri = json_data['']credentials.revoke_uri = json_data['']token_expiry = json_data.get('', None)if token_expiry is not None:credentials.token_expiry = datetime.datetime.strptime(token_expiry, client.EXPIRY_FORMAT)return credentials", "docstring": "Deserialize a JSON-serialized instance.\n\n Inverse to :meth:`to_json`.\n\n Args:\n json_data: dict or string, Serialized JSON (as a string or an\n already parsed dictionary) representing a credential.\n\n Returns:\n ServiceAccountCredentials from the serialized data.", "id": "f2477:c0:m12"} {"signature": "def create_with_claims(self, claims):", "body": "new_kwargs = dict(self._kwargs)new_kwargs.update(claims)result = self.__class__(self._service_account_email,self._signer,scopes=self._scopes,private_key_id=self._private_key_id,client_id=self.client_id,user_agent=self._user_agent,**new_kwargs)result.token_uri = self.token_uriresult.revoke_uri = self.revoke_uriresult._private_key_pkcs8_pem = self._private_key_pkcs8_pemresult._private_key_pkcs12 = self._private_key_pkcs12result._private_key_password = self._private_key_passwordreturn result", "docstring": "Create credentials that specify additional claims.\n\n Args:\n claims: dict, key-value pairs for claims.\n\n Returns:\n ServiceAccountCredentials, a copy of the current service account\n credentials with updated claims to use when obtaining access\n tokens.", "id": "f2477:c0:m15"} {"signature": "def create_delegated(self, sub):", "body": "return self.create_with_claims({'': sub})", "docstring": "Create credentials that act as domain-wide delegation of authority.\n\n Use the ``sub`` parameter as the subject to delegate on behalf of\n that user.\n\n For example::\n\n >>> account_sub = 'foo@email.com'\n >>> delegate_creds = creds.create_delegated(account_sub)\n\n Args:\n sub: string, An email address that this service account will\n act on behalf of (via domain-wide delegation).\n\n Returns:\n ServiceAccountCredentials, a copy of the current service account\n updated to act on behalf of ``sub``.", "id": "f2477:c0:m16"} {"signature": "def authorize(self, http):", "body": "transport.wrap_http_for_jwt_access(self, http)return http", "docstring": "Authorize an httplib2.Http instance with a JWT assertion.\n\n Unless specified, the 'aud' of the assertion will be the base\n uri of the request.\n\n Args:\n http: An instance of ``httplib2.Http`` or something that acts\n like it.\n Returns:\n A modified instance of http that was passed in.\n Example::\n h = httplib2.Http()\n h = credentials.authorize(h)", "id": "f2477:c1:m1"} {"signature": "def get_access_token(self, http=None, additional_claims=None):", "body": "if additional_claims is None:if self.access_token is None or self.access_token_expired:self.refresh(None)return client.AccessTokenInfo(access_token=self.access_token, expires_in=self._expires_in())else:token, unused_expiry = self._create_token(additional_claims)return client.AccessTokenInfo(access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)", "docstring": "Create a signed jwt.\n\n Args:\n http: unused\n additional_claims: dict, additional claims to add to\n the payload of the JWT.\n Returns:\n An AccessTokenInfo with the signed jwt", "id": "f2477:c1:m2"} {"signature": "def revoke(self, http):", "body": "pass", "docstring": "Cannot revoke JWTAccessCredentials tokens.", "id": "f2477:c1:m3"} {"signature": "def refresh(self, http):", "body": "self._refresh(None)", "docstring": "Refreshes the access_token.\n\n The HTTP object is unused since no request needs to be made to\n get a new token, it can just be generated locally.\n\n Args:\n http: unused HTTP object", "id": "f2477:c1:m6"} {"signature": "def _refresh(self, http):", "body": "self.access_token, self.token_expiry = self._create_token()", "docstring": "Refreshes the access_token.\n\n Args:\n http: unused HTTP object", "id": "f2477:c1:m7"} {"signature": "def __init__(self,package_name,rst_extension='',package_skip_patterns=None,module_skip_patterns=None,other_defines = True):", "body": "if package_skip_patterns is None:package_skip_patterns = ['']if module_skip_patterns is None:module_skip_patterns = ['', '']self.package_name = package_nameself.rst_extension = rst_extensionself.package_skip_patterns = package_skip_patternsself.module_skip_patterns = module_skip_patternsself.other_defines = other_defines", "docstring": "Initialize package for parsing\n\n Parameters\n ----------\n package_name : string\n Name of the top-level package. *package_name* must be the\n name of an importable package\n rst_extension : string, optional\n Extension for reST files, default '.rst'\n package_skip_patterns : None or sequence of {strings, regexps}\n Sequence of strings giving URIs of packages to be excluded\n Operates on the package path, starting at (including) the\n first dot in the package path, after *package_name* - so,\n if *package_name* is ``sphinx``, then ``sphinx.util`` will\n result in ``.util`` being passed for searching by these\n regexps. If is None, gives default. Default is:\n ['\\.tests$']\n module_skip_patterns : None or sequence\n Sequence of strings giving URIs of modules to be excluded\n Operates on the module name including preceding URI path,\n back to the first dot after *package_name*. For example\n ``sphinx.util.console`` results in the string to search of\n ``.util.console``\n If is None, gives default. Default is:\n ['\\.setup$', '\\._']\n other_defines : {True, False}, optional\n Whether to include classes and functions that are imported in a\n particular module but not defined there.", "id": "f2494:c0:m0"} {"signature": "def set_package_name(self, package_name):", "body": "self._package_name = package_nameroot_module = self._import(package_name)self.root_path = root_module.__path__[-]self.written_modules = None", "docstring": "Set package_name\n\n >>> docwriter = ApiDocWriter('sphinx')\n >>> import sphinx\n >>> docwriter.root_path == sphinx.__path__[0]\n True\n >>> docwriter.package_name = 'docutils'\n >>> import docutils\n >>> docwriter.root_path == docutils.__path__[0]\n True", "id": "f2494:c0:m2"} {"signature": "def _import(self, name):", "body": "mod = __import__(name)components = name.split('')for comp in components[:]:mod = getattr(mod, comp)return mod", "docstring": "Import namespace package", "id": "f2494:c0:m3"} {"signature": "def _get_object_name(self, line):", "body": "name = line.split()[].split('')[].strip()return name.rstrip('')", "docstring": "Get second token in line\n >>> docwriter = ApiDocWriter('sphinx')\n >>> docwriter._get_object_name(\" def func(): \")\n 'func'\n >>> docwriter._get_object_name(\" class Klass(object): \")\n 'Klass'\n >>> docwriter._get_object_name(\" class Klass: \")\n 'Klass'", "id": "f2494:c0:m4"} {"signature": "def _uri2path(self, uri):", "body": "if uri == self.package_name:return os.path.join(self.root_path, '')path = uri.replace(self.package_name + '', '')path = path.replace('', os.path.sep)path = os.path.join(self.root_path, path)if os.path.exists(path + ''): path += ''elif os.path.exists(os.path.join(path, '')):path = os.path.join(path, '')else:return Nonereturn path", "docstring": "Convert uri to absolute filepath\n\n Parameters\n ----------\n uri : string\n URI of python module to return path for\n\n Returns\n -------\n path : None or string\n Returns None if there is no valid path for this URI\n Otherwise returns absolute file system path for URI\n\n Examples\n --------\n >>> docwriter = ApiDocWriter('sphinx')\n >>> import sphinx\n >>> modpath = sphinx.__path__[0]\n >>> res = docwriter._uri2path('sphinx.builder')\n >>> res == os.path.join(modpath, 'builder.py')\n True\n >>> res = docwriter._uri2path('sphinx')\n >>> res == os.path.join(modpath, '__init__.py')\n True\n >>> docwriter._uri2path('sphinx.does_not_exist')", "id": "f2494:c0:m5"} {"signature": "def _path2uri(self, dirpath):", "body": "package_dir = self.package_name.replace('', os.path.sep)relpath = dirpath.replace(self.root_path, package_dir)if relpath.startswith(os.path.sep):relpath = relpath[:]return relpath.replace(os.path.sep, '')", "docstring": "Convert directory path to uri", "id": "f2494:c0:m6"} {"signature": "def _parse_module(self, uri):", "body": "filename = self._uri2path(uri)if filename is None:print(filename, '')return ([],[])f = open(filename, '')functions, classes = self._parse_lines(f)f.close()return functions, classes", "docstring": "Parse module defined in *uri*", "id": "f2494:c0:m7"} {"signature": "def _parse_module_with_import(self, uri):", "body": "mod = __import__(uri, fromlist=[uri])obj_strs = [obj for obj in dir(mod) if not obj.startswith('')]functions = []classes = []for obj_str in obj_strs:if obj_str not in mod.__dict__:continueobj = mod.__dict__[obj_str]if not self.other_defines and not getmodule(obj) == mod:continueif hasattr(obj, '') orisinstance(obj, BuiltinFunctionType) orisinstance(obj, FunctionType):functions.append(obj_str)else:try:issubclass(obj, object)classes.append(obj_str)except TypeError:passreturn functions, classes", "docstring": "Look for functions and classes in an importable module.\n\n Parameters\n ----------\n uri : str\n The name of the module to be parsed. This module needs to be\n importable.\n\n Returns\n -------\n functions : list of str\n A list of (public) function names in the module.\n classes : list of str\n A list of (public) class names in the module.", "id": "f2494:c0:m8"} {"signature": "def _parse_lines(self, linesource):", "body": "functions = []classes = []for line in linesource:if line.startswith('') and line.count(''):name = self._get_object_name(line)if not name.startswith(''):functions.append(name)elif line.startswith(''):name = self._get_object_name(line)if not name.startswith(''):classes.append(name)else:passfunctions.sort()classes.sort()return functions, classes", "docstring": "Parse lines of text for functions and classes", "id": "f2494:c0:m9"} {"signature": "def generate_api_doc(self, uri):", "body": "functions, classes = self._parse_module_with_import(uri)if not len(functions) and not len(classes) and DEBUG:print('', uri) uri_short = re.sub(r'' % self.package_name,'',uri)head = ''body = ''if '' in uri_short:title = '' + uri_short + ''head += title + '' + self.rst_section_levels[] * len(title)else:title = '' + uri_short + ''head += title + '' + self.rst_section_levels[] * len(title)head += '' + uri + ''head += '' + uri + ''body += '' + uri + ''for c in classes:body += '' + c + ''+ self.rst_section_levels[] *(len(c)+) + ''body += '' + c + ''body += ''''''''''head += ''for f in classes + functions:head += '' + f + ''head += ''for f in functions:body += f + ''body += self.rst_section_levels[] * len(f) + ''body += '' + f + ''return head, body", "docstring": "Make autodoc documentation template string for a module\n\n Parameters\n ----------\n uri : string\n python location of module - e.g 'sphinx.builder'\n\n Returns\n -------\n head : string\n Module name, table of contents.\n body : string\n Function and class docstrings.", "id": "f2494:c0:m10"} {"signature": "def _survives_exclude(self, matchstr, match_type):", "body": "if match_type == '':patterns = self.module_skip_patternselif match_type == '':patterns = self.package_skip_patternselse:raise ValueError(''% match_type)L = len(self.package_name)if matchstr[:L] == self.package_name:matchstr = matchstr[L:]for pat in patterns:try:pat.searchexcept AttributeError:pat = re.compile(pat)if pat.search(matchstr):return Falsereturn True", "docstring": "Returns True if *matchstr* does not match patterns\n\n ``self.package_name`` removed from front of string if present\n\n Examples\n --------\n >>> dw = ApiDocWriter('sphinx')\n >>> dw._survives_exclude('sphinx.okpkg', 'package')\n True\n >>> dw.package_skip_patterns.append('^\\\\.badpkg$')\n >>> dw._survives_exclude('sphinx.badpkg', 'package')\n False\n >>> dw._survives_exclude('sphinx.badpkg', 'module')\n True\n >>> dw._survives_exclude('sphinx.badmod', 'module')\n True\n >>> dw.module_skip_patterns.append('^\\\\.badmod$')\n >>> dw._survives_exclude('sphinx.badmod', 'module')\n False", "id": "f2494:c0:m11"} {"signature": "def discover_modules(self):", "body": "modules = [self.package_name]for dirpath, dirnames, filenames in os.walk(self.root_path):root_uri = self._path2uri(os.path.join(self.root_path,dirpath))filenames = [f[:-] for f in filenames iff.endswith('') and not f.startswith('')]for filename in filenames:package_uri = ''.join((dirpath, filename))for subpkg_name in dirnames + filenames:package_uri = ''.join((root_uri, subpkg_name))package_path = self._uri2path(package_uri)if (package_path andself._survives_exclude(package_uri, '')):modules.append(package_uri)return sorted(modules)", "docstring": "Return module sequence discovered from ``self.package_name``\n\n\n Parameters\n ----------\n None\n\n Returns\n -------\n mods : sequence\n Sequence of module names within ``self.package_name``\n\n Examples\n --------\n >>> dw = ApiDocWriter('sphinx')\n >>> mods = dw.discover_modules()\n >>> 'sphinx.util' in mods\n True\n >>> dw.package_skip_patterns.append('\\.util$')\n >>> 'sphinx.util' in dw.discover_modules()\n False\n >>>", "id": "f2494:c0:m12"} {"signature": "def write_api_docs(self, outdir):", "body": "if not os.path.exists(outdir):os.mkdir(outdir)modules = self.discover_modules()self.write_modules_api(modules,outdir)", "docstring": "Generate API reST files.\n\n Parameters\n ----------\n outdir : string\n Directory name in which to store files\n We create automatic filenames for each module\n\n Returns\n -------\n None\n\n Notes\n -----\n Sets self.written_modules to list of written modules", "id": "f2494:c0:m14"} {"signature": "def write_index(self, outdir, froot='', relative_to=None):", "body": "if self.written_modules is None:raise ValueError('')path = os.path.join(outdir, froot+self.rst_extension)if relative_to is not None:relpath = (outdir + os.path.sep).replace(relative_to + os.path.sep, '')else:relpath = outdiridx = open(path,'')w = idx.writew('')title = \"\"w(title + \"\")w(\"\" * len(title) + \"\")w('')for f in self.written_modules:w('' % os.path.join(relpath,f))idx.close()", "docstring": "Make a reST API index file from written files\n\n Parameters\n ----------\n path : string\n Filename to write index to\n outdir : string\n Directory to which to write generated index file\n froot : string, optional\n root (filename without extension) of filename to write to\n Defaults to 'gen'. We add ``self.rst_extension``.\n relative_to : string\n path to which written filenames are relative. This\n component of the written file path will be removed from\n outdir, in the generated index. Default is None, meaning,\n leave path as it is.", "id": "f2494:c0:m15"} {"signature": "def calc_inbag(n_samples, forest):", "body": "if not forest.bootstrap:e_s = \"\"e_s = \"\"raise ValueError(e_s)n_trees = forest.n_estimatorsinbag = np.zeros((n_samples, n_trees))sample_idx = []for t_idx in range(n_trees):sample_idx.append(_generate_sample_indices(forest.estimators_[t_idx].random_state,n_samples))inbag[:, t_idx] = np.bincount(sample_idx[-], minlength=n_samples)return inbag", "docstring": "Derive samples used to create trees in scikit-learn RandomForest objects.\n\nRecovers the samples in each tree from the random state of that tree using\n:func:`forest._generate_sample_indices`.\n\nParameters\n----------\nn_samples : int\n The number of samples used to fit the scikit-learn RandomForest object.\n\nforest : RandomForest\n Regressor or Classifier object that is already fit by scikit-learn.\n\nReturns\n-------\nArray that records how many times a data point was placed in a tree.\nColumns are individual trees. Rows are the number of times a sample was\nused in a tree.", "id": "f2501:m0"} {"signature": "def _bias_correction(V_IJ, inbag, pred_centered, n_trees):", "body": "n_train_samples = inbag.shape[]n_var = np.mean(np.square(inbag[:n_trees]).mean(axis=).T.view() -np.square(inbag[:n_trees].mean(axis=)).T.view())boot_var = np.square(pred_centered).sum(axis=) / n_treesbias_correction = n_train_samples * n_var * boot_var / n_treesV_IJ_unbiased = V_IJ - bias_correctionreturn V_IJ_unbiased", "docstring": "Helper functions that implements bias correction\n\nParameters\n----------\nV_IJ : ndarray\n Intermediate result in the computation.\n\ninbag : ndarray\n The inbag matrix that fit the data. If set to `None` (default) it\n will be inferred from the forest. However, this only works for trees\n for which bootstrapping was set to `True`. That is, if sampling was\n done with replacement. Otherwise, users need to provide their own\n inbag matrix.\n\npred_centered : ndarray\n Centered predictions that are an intermediate result in the\n computation.\n\nn_trees : int\n The number of trees in the forest object.", "id": "f2501:m2"} {"signature": "def gfit(X, sigma, p=, nbin=, unif_fraction=):", "body": "min_x = min(min(X) - * np.std(X, ddof=), )max_x = max(max(X) + * np.std(X, ddof=),np.std(X, ddof=))xvals = np.linspace(min_x, max_x, nbin)binw = (max_x - min_x) / (nbin - )zero_idx = max(np.where(xvals <= )[])noise_kernel = norm().pdf(xvals / sigma) * binw / sigmaif zero_idx > :noise_rotate = noise_kernel[list(np.arange(zero_idx, len(xvals))) +list(np.arange(, zero_idx))]else:noise_rotate = noise_kernelXX = np.zeros((p, len(xvals)), dtype=np.float)for ind, exp in enumerate(range(, p+)):mask = np.ones_like(xvals)mask[np.where(xvals <= )[]] = XX[ind, :] = pow(xvals, exp) * maskXX = XX.Tdef neg_loglik(eta):mask = np.ones_like(xvals)mask[np.where(xvals <= )[]] = g_eta_raw = np.exp(np.dot(XX, eta)) * maskif ((np.sum(g_eta_raw) == np.inf) |(np.sum(g_eta_raw) <= * np.finfo(np.double).tiny)):return ( * (len(X) + sum(eta ** )))g_eta_main = g_eta_raw / sum(g_eta_raw)g_eta = (( - unif_fraction) * g_eta_main +unif_fraction * mask / sum(mask))f_eta = fftconvolve(g_eta, noise_rotate, mode='')return np.sum(np.interp(X, xvals,-np.log(np.maximum(f_eta, ))))eta_hat = minimize(neg_loglik,list(itertools.repeat(-, p))).xg_eta_raw = np.exp(np.dot(XX, eta_hat)) * maskg_eta_main = g_eta_raw / sum(g_eta_raw)g_eta = (( - unif_fraction) * g_eta_main +unif_fraction * mask) / sum(mask)return xvals, g_eta", "docstring": "Fit empirical Bayes prior in the hierarchical model [Efron2014]_.\n\n.. math::\n\n mu ~ G, X ~ N(mu, sigma^2)\n\nParameters\n----------\nX: ndarray\n A 1D array of observations.\nsigma: float\n Noise estimate on X.\np: int\n Number of parameters used to fit G. Default: 5\nnbin: int\n Number of bins used for discrete approximation.\n Default: 200\nunif_fraction: float\n Fraction of G modeled as \"slab\". Default: 0.1\n\nReturns\n-------\nAn array of the posterior density estimate g.", "id": "f2502:m0"} {"signature": "def gbayes(x0, g_est, sigma):", "body": "Kx = norm().pdf((g_est[] - x0) / sigma)post = Kx * g_est[]post /= sum(post)return sum(post * g_est[])", "docstring": "Estimate Bayes posterior with Gaussian noise [Efron2014]_.\n\nParameters\n----------\nx0: ndarray\n an observation\ng_est: float\n a prior density, as returned by gfit\nsigma: int\n noise estimate\n\nReturns\n-------\nAn array of the posterior estimate E[mu | x0]", "id": "f2502:m1"} {"signature": "def calibrateEB(variances, sigma2):", "body": "if (sigma2 <= or min(variances) == max(variances)):return(np.maximum(variances, ))sigma = np.sqrt(sigma2)eb_prior = gfit(variances, sigma)part = functools.partial(gbayes, g_est=eb_prior,sigma=sigma)if len(variances) >= :calib_x = np.percentile(variances,np.arange(, , ))calib_y = list(map(part, calib_x))calib_all = np.interp(variances, calib_x, calib_y)else:calib_all = list(map(part, variances))return np.asarray(calib_all)", "docstring": "Calibrate noisy variance estimates with empirical Bayes.\n\nParameters\n----------\nvars: ndarray\n List of variance estimates.\nsigma2: int\n Estimate of the Monte Carlo noise in vars.\n\nReturns\n-------\nAn array of the calibrated variance estimates", "id": "f2502:m2"} {"signature": "def _donothing_func(*args, **kwargs):", "body": "pass", "docstring": "Perform no good and no bad", "id": "f2504:m0"} {"signature": "def _donothing(self, *args, **kwargs):", "body": "pass", "docstring": "Perform no good and no bad", "id": "f2504:c0:m0"} {"signature": "def dcite(self, *args, **kwargs):", "body": "def nondecorating_decorator(func):return funcreturn nondecorating_decorator", "docstring": "If I could cite I would", "id": "f2504:c0:m1"} {"signature": "@staticmethoddef parse(type: Type):", "body": "def decorator(parser):EnvVar.parsers[type] = parserreturn parserreturn decorator", "docstring": "Register a parser for a attribute type.\n\nParsers will be used to parse `str` type objects from either\nthe commandline arguments or environment variables.\n\nArgs:\n type: the type the decorated function will be responsible\n for parsing a environment variable to.", "id": "f2508:c2:m0"} {"signature": "@staticmethoddef status(s):", "body": "print(''.format(s))", "docstring": "Prints things in bold.", "id": "f2509:c0:m0"} {"signature": "def repr_failure(self, excinfo):", "body": "return \"\".format(excinfo,self.indent(self.code),excinfo.getrepr(funcargs=True, style=''))", "docstring": "called when self.runtest() raises an exception.", "id": "f2522:c1:m4"} {"signature": "def flag(argument):", "body": "if argument and argument.strip():raise ValueError('' % argument)else:return True", "docstring": "Reimplement directives.flag to return True instead of None\n Check for a valid flag option (no argument) and return ``None``.\n (Directive option conversion function.)\n\n Raise ``ValueError`` if an argument is found.", "id": "f2523:m0"} {"signature": "def phrase_to_filename(self, phrase):", "body": "name = re.sub(r\"\", '', phrase.strip().lower())name = re.sub(r\"\", '', name)return name + ''", "docstring": "Convert phrase to normilized file name.", "id": "f2523:c1:m2"} {"signature": "def backend_extras(*requirements):", "body": "return [\"\"] + list(requirements)", "docstring": "Construct list of requirements for backend integration.\n\n All built-in backends depend on PyOpenGL so add it as default requirement.", "id": "f2536:m3"} {"signature": "def __init__(self, shape, cols=[]):", "body": "self._arrays = {}self._names = []self.shape = shapefor colname, value in cols:self.set(colname, value)self._dtype = None", "docstring": "cols is a list of (colname, values), shape has to be 1D.", "id": "f2538:c1:m0"} {"signature": "def __init__(self, files, fetch):", "body": "self.files = filesself.fetch = fetchself.pool = fetch(None)", "docstring": "fetch is a method that takes \n fetch(filename)\n\n it shall read in the content of filename in ndarray\n if fetch(None), returns an empty size=0 ndarray\n with the same spec of the correct file\n\n\n pool needs to support copy() and indexing.", "id": "f2540:c0:m0"} {"signature": "def read(self, n):", "body": "while len(self.pool) < n:self.cur = self.files.next()self.pool = numpy.append(self.pool,self.fetch(self.cur), axis=)rt = self.pool[:n]if n == len(self.pool):self.pool = self.fetch(None)else:self.pool = self.pool[n:]return rt", "docstring": "return at most n array items, move the cursor.", "id": "f2540:c0:m2"} {"signature": "def savetxt2(fname, X, delimiter='', newline='', comment_character='',header='', save_dtype=False, fmt={}):", "body": "prefixfmt = {}for key in fmt:prefixfmt[key] = fmt[key]olddtype = X.dtypenewdtype = flatten_dtype(numpy.dtype([('', (X.dtype, X.shape[:]))]))X = X.view(dtype=newdtype)dtype = X.dtypeX = numpy.atleast_1d(X.squeeze())header2 = _mkheader(dtype)fmtstr = _mkfmtstr(dtype, prefixfmt, delimiter, _default_fmt)if hasattr(fname, ''):fh = fnamecleanup = lambda : Noneelse:fh = file(fname, '')cleanup = lambda : fh.close()try:fh.write (header)if header[:-] != newline:fh.write(newline)fh.write (comment_character)fh.write ('')fh.write (header2)fh.write (delimiter)fh.write ('' % len(X))fh.write(newline)if save_dtype:fh.write (comment_character)fh.write ('')fh.write (base64.b64encode(pickle.dumps(olddtype)))fh.write (newline)for row in X:fh.write(fmtstr % tuple(row))fh.write(newline)if hasattr(fh, ''):fh.flush()finally:cleanup()", "docstring": "format of table header:\n\n # ID [type]:name(index) .... * number of items\n\n user's header is not prefixed by comment_character\n\n name of nested dtype elements are split by .", "id": "f2541:m0"} {"signature": "def loadtxt2(fname, dtype=None, delimiter='', newline='', comment_character='',skiplines=):", "body": "dtypert = [None, None, None]def preparedtype(dtype):dtypert[] = dtypeflatten = flatten_dtype(dtype)dtypert[] = flattendtypert[] = numpy.dtype([('', (numpy.int8,flatten.itemsize))])buf = numpy.empty((), dtype=dtypert[])converters = [_default_conv[flatten[name].char] for name in flatten.names]return buf, converters, flatten.namesdef fileiter(fh):converters = []buf = Noneif dtype is not None:buf, converters, names = preparedtype(dtype)yield Nonefor lineno, line in enumerate(fh):if lineno < skiplines: continueif line[] in comment_character:if buf is None and line[] == '':ddtype = pickle.loads(base64.b64decode(line[:]))buf, converters, names = preparedtype(ddtype)yield Nonecontinuefor word, c, name in zip(line.split(), converters, names):buf[name] = c(word)buf2 = buf.copy().view(dtype=dtypert[])yield buf2if isinstance(fname, str):fh = file(fh, '')cleanup = lambda : fh.close()else:fh = iter(fname)cleanup = lambda : Nonetry:i = fileiter(fh)next(i)return numpy.fromiter(i, dtype=dtypert[]).view(dtype=dtypert[]) finally:cleanup()", "docstring": "Known issues delimiter and newline is not respected. \n string quotation with space is broken.", "id": "f2541:m1"} {"signature": "def flatten_dtype(dtype, _next=None):", "body": "types = []if _next is None: _next = [, '']primary = Trueelse:primary = Falseprefix = _next[]if dtype.names is None:for i in numpy.ndindex(dtype.shape):if dtype.base == dtype:types.append(('' % (prefix, simplerepr(i)), dtype))_next[] += else:_next[] = '' % (prefix, simplerepr(i))types.extend(flatten_dtype(dtype.base, _next))else:for field in dtype.names:typ_fields = dtype.fields[field]if len(prefix) > :_next[] = prefix + '' + fieldelse:_next[] = '' + fieldflat_dt = flatten_dtype(typ_fields[], _next)types.extend(flat_dt)_next[] = prefixif primary:return numpy.dtype(types)else:return types", "docstring": "Unpack a structured data-type.", "id": "f2541:m8"} {"signature": "def __init__(self, func, ins=None, outdtype=None, altreduce=None):", "body": "if isinstance(func, numpy.ufunc):self.ufunc = funcself.nin = func.ninself.ins = (, , , )[:func.nin]self.nout = func.noutself.outdtype = Noneself.altreduce = Noneelse:self.ufunc = funcself.nin = len(ins)self.ins = insself.nout = self.outdtype = outdtypeself.altreduce = altreduceself.__doc__ = func.__doc__if self.nout != :raise TypeError(\"\")", "docstring": "if func is not ufunc, a bit complicated:\n ins tells which positional argument will be striped\n after done, reducefunc is called on the results", "id": "f2542:c0:m0"} {"signature": "def call(self, args, axis=, out=None, chunksize= * , **kwargs):", "body": "if self.altreduce is not None:ret = [None]else:if out is None :if self.outdtype is not None:dtype = self.outdtypeelse:try:dtype = numpy.result_type(*[args[i] for i in self.ins] * )except:dtype = Noneout = sharedmem.empty(numpy.broadcast(*[args[i] for i in self.ins] * ).shape,dtype=dtype)if axis != :for i in self.ins:args[i] = numpy.rollaxis(args[i], axis)out = numpy.rollaxis(out, axis)size = numpy.max([len(args[i]) for i in self.ins])with sharedmem.MapReduce() as pool:def work(i):sl = slice(i, i+chunksize)myargs = args[:]for j in self.ins:try: tmp = myargs[j][sl]a, b, c = sl.indices(len(args[j]))myargs[j] = tmpexcept Exception as e:print(tmp)print(j, e)passif b == a: return Nonert = self.ufunc(*myargs, **kwargs)if self.altreduce is not None:return rtelse:out[sl] = rtdef reduce(rt):if self.altreduce is None:returnif ret[] is None:ret[] = rtelif rt is not None:ret[] = self.altreduce(ret[], rt)pool.map(work, list(range(, size, chunksize)), reduce=reduce)if self.altreduce is None:if axis != :out = numpy.rollaxis(out, , axis + )return out else:return ret[]", "docstring": "axis is the axis to chop it off.\n if self.altreduce is set, the results will\n be reduced with altreduce and returned\n otherwise will be saved to out, then return out.", "id": "f2542:c0:m3"} {"signature": "def __new__(cls, array, start=None, end=None):", "body": "self = array.view(type=cls)if end is None and start is None:start = numpy.array([len(arr) for arr in array], dtype='')array = numpy.concatenate(array)if end is None:sizes = startself.start = numpy.zeros(shape=len(sizes), dtype='')self.end = numpy.zeros(shape=len(sizes), dtype='')self.end[:] = sizes.cumsum()self.start[:] = self.end[:-]else:self.start = startself.end = endself.A = arrayreturn self", "docstring": "if end is none, start contains the sizes. \n if start is also none, array is a list of arrays to concatenate", "id": "f2542:c1:m0"} {"signature": "@classmethoddef adapt(cls, source, template):", "body": "if not isinstance(template, packarray):raise TypeError('')return cls(source, template.start, template.end)", "docstring": "adapt source to a packarray according to the layout of template", "id": "f2542:c1:m1"} {"signature": "def argsort(data, out=None, chunksize=None, baseargsort=None, argmerge=None, np=None):", "body": "if baseargsort is None:baseargsort = lambda x:x.argsort()if argmerge is None:argmerge = default_argmergeif chunksize is None:chunksize = * * if out is None:arg1 = numpy.empty(len(data), dtype='')out = arg1else:assert out.dtype == numpy.dtype('')assert len(out) == len(data)arg1 = outif np is None:np = sharedmem.cpu_count()if np <= or len(data) < chunksize: out[:] = baseargsort(data)return outCHK = [slice(i, i + chunksize) for i in range(, len(data), chunksize)]DUMMY = slice(len(data), len(data))if len(CHK) % : CHK.append(DUMMY)with sharedmem.TPool() as pool:def work(i):C = CHK[i]start, stop, step = C.indices(len(data))arg1[C] = baseargsort(data[C])arg1[C] += startpool.map(work, range(len(CHK)))arg2 = numpy.empty_like(arg1)flip = while len(CHK) > :with sharedmem.TPool() as pool:def work(i):C1 = CHK[i]C2 = CHK[i+]start1, stop1, step1 = C1.indices(len(data))start2, stop2, step2 = C2.indices(len(data))assert start2 == stop1argmerge(data, arg1[C1], arg1[C2], arg2[start1:stop2])return slice(start1, stop2)CHK = pool.map(work, range(, len(CHK), ))arg1, arg2 = arg2, arg1flip = flip + if len(CHK) == : breakif len(CHK) % : CHK.append(DUMMY)if flip % != :out[:] = arg1return out", "docstring": "parallel argsort, like numpy.argsort\n\nuse sizeof(intp) * len(data) as scratch space\n\nuse baseargsort for serial sort \n ind = baseargsort(data)\n\nuse argmerge to merge\n def argmerge(data, A, B, out):\n ensure data[out] is sorted\n and out[:] = A join B\n\nTODO: shall try to use the inplace merge mentioned in \n http://keithschwarz.com/interesting/code/?dir=inplace-merge.", "id": "f2543:m1"} {"signature": "def set_debug(flag):", "body": "global __shmdebug____shmdebug__ = flag", "docstring": "Set the debug mode.\n\n In debug mode (flag==True), the MapReduce pool will\n run the work function on the master thread / process.\n This ensures all exceptions can be properly inspected by \n a debugger, e.g. pdb.\n\n Parameters\n ----------\n flag : boolean\n True for debug mode, False for production mode.", "id": "f2546:m0"} {"signature": "def get_debug():", "body": "global __shmdebug__return __shmdebug__", "docstring": "Get the debug mode.\n\n Returns\n -------\n The debug mode. True if currently in debugging mode.", "id": "f2546:m1"} {"signature": "def total_memory():", "body": "with file('', '') as f:for line in f:words = line.split()if words[].upper() == '':return int(words[]) * raise IOError('')", "docstring": "Returns the the amount of memory available for use.\n\n The memory is obtained from MemTotal entry in /proc/meminfo.\n\n Notes\n =====\n This function is not very useful and not very portable.", "id": "f2546:m2"} {"signature": "def cpu_count():", "body": "num = os.getenv(\"\")if num is None:num = os.getenv(\"\")try:return int(num)except:return multiprocessing.cpu_count()", "docstring": "Returns the default number of slave processes to be spawned.\n\n The default value is the number of physical cpu cores seen by python.\n :code:`OMP_NUM_THREADS` environment variable overrides it.\n\n On PBS/torque systems if OMP_NUM_THREADS is empty, we try to\n use the value of :code:`PBS_NUM_PPN` variable.\n\n Notes\n -----\n On some machines the physical number of cores does not equal\n the number of cpus shall be used. PSC Blacklight for example.", "id": "f2546:m3"} {"signature": "def MapReduceByThread(np=None):", "body": "return MapReduce(backend=ThreadBackend, np=np)", "docstring": "Creates a MapReduce object but with the Thread backend.\n\n The process backend is usually preferred.", "id": "f2546:m4"} {"signature": "def empty_like(array, dtype=None):", "body": "array = numpy.asarray(array)if dtype is None: dtype = array.dtypereturn anonymousmemmap(array.shape, dtype)", "docstring": "Create a shared memory array from the shape of array.", "id": "f2546:m5"} {"signature": "def empty(shape, dtype=''):", "body": "return anonymousmemmap(shape, dtype)", "docstring": "Create an empty shared memory array.", "id": "f2546:m6"} {"signature": "def full_like(array, value, dtype=None):", "body": "shared = empty_like(array, dtype)shared[:] = valuereturn shared", "docstring": "Create a shared memory array with the same shape and type as a given array, filled with `value`.", "id": "f2546:m7"} {"signature": "def full(shape, value, dtype=''):", "body": "shared = empty(shape, dtype)shared[:] = valuereturn shared", "docstring": "Create a shared memory array of given shape and type, filled with `value`.", "id": "f2546:m8"} {"signature": "def copy(a):", "body": "shared = anonymousmemmap(a.shape, dtype=a.dtype)shared[:] = a[:]return shared", "docstring": "Copy an array to the shared memory. \n\n Notes\n -----\n copy is not always necessary because the private memory is always copy-on-write.\n\n Use :code:`a = copy(a)` to immediately dereference the old 'a' on private memory", "id": "f2546:m9"} {"signature": "def get(self, Q):", "body": "while self.Errors.empty():try:return Q.get(timeout=)except queue.Empty:if not self.is_alive():try:return Q.get(timeout=)except queue.Empty:raise StopProcessGroupelse:continueelse:raise StopProcessGroup", "docstring": "Protected get. Get an item from Q.\n Will block. but if the process group has errors,\n raise an StopProcessGroup exception.\n\n A slave process will terminate upon StopProcessGroup.\n The master process shall read the error from the process group.", "id": "f2546:c3:m8"} {"signature": "def wait(self):", "body": "e, r = self.result.get()self.slave.join()self.slave = Noneself.result = Noneif isinstance(e, Exception):raise SlaveException(e, r)return r", "docstring": "Wait and join the child process. \n The return value of the function call is returned.\n If any exception occurred it is wrapped and raised.", "id": "f2546:c7:m2"} {"signature": "def map(self, func, sequence, reduce=None, star=False, minlength=):", "body": "def realreduce(r):if reduce:if isinstance(r, tuple):return reduce(*r)else:return reduce(r)return rdef realfunc(i):if star: return func(*i)else: return func(i)if len(sequence) <= or self.np == or get_debug():self.local = lambda : Noneself.local.rank = rt = [realreduce(realfunc(i)) for i in sequence]self.local = Nonereturn rtnp = min([self.np, len(sequence)])Q = self.backend.QueueFactory()R = self.backend.QueueFactory()self.ordered.reset()pg = ProcessGroup(main=self._main, np=np,backend=self.backend,args=(Q, R, sequence, realfunc))pg.start()L = []N = []def feeder(pg, Q, N):j = try:for i, work in enumerate(sequence):if not hasattr(sequence, ''):pg.put(Q, (i, work))else:pg.put(Q, (i, ))j = j + N.append(j)for i in range(np):pg.put(Q, None)except StopProcessGroup:returnfinally:passfeeder = threading.Thread(None, feeder, args=(pg, Q, N))feeder.start() count = try:while True:try:capsule = pg.get(R)except queue.Empty:continueexcept StopProcessGroup:raise pg.get_exception()capsule = capsule[], realreduce(capsule[])heapq.heappush(L, capsule)count = count + if len(N) > and count == N[]: breakrt = []R.close()R.join_thread()while len(L) > :rt.append(heapq.heappop(L)[])pg.join()feeder.join()assert N[] == len(rt)return rtexcept BaseException as e:pg.killall()pg.join()feeder.join()raise", "docstring": "Map-reduce with multile processes.\n\n Apply func to each item on the sequence, in parallel. \n As the results are collected, reduce is called on the result.\n The reduced result is returned as a list.\n\n Parameters\n ----------\n func : callable\n The function to call. It must accept the same number of\n arguments as the length of an item in the sequence.\n\n .. warning::\n\n func is not supposed to use exceptions for flow control.\n In non-debug mode all exceptions will be wrapped into\n a :py:class:`SlaveException`.\n\n sequence : list or array_like\n The sequence of arguments to be applied to func.\n\n reduce : callable, optional\n Apply an reduction operation on the \n return values of func. If func returns a tuple, they\n are treated as positional arguments of reduce.\n\n star : boolean\n if True, the items in sequence are treated as positional\n arguments of reduce.\n\n minlength: integer\n Minimal length of `sequence` to start parallel processing.\n if len(sequence) < minlength, fall back to sequential\n processing. This can be used to avoid the overhead of starting\n the worker processes when there is little work.\n\n Returns\n -------\n results : list\n The list of reduced results from the map operation, in\n the order of the arguments of sequence.\n\n Raises\n ------\n SlaveException\n If any of the slave process encounters\n an exception. Inspect :py:attr:`SlaveException.reason` for the underlying exception.", "id": "f2546:c8:m4"} {"signature": "def MetaOrdered(parallel, done, turnstile):", "body": "class Ordered:def __init__(self, iterref):if parallel.master:done[...] = self.iterref = iterrefparallel.barrier()@classmethoddef abort(self):turnstile.release()def __enter__(self):while self.iterref != done:passturnstile.acquire()return selfdef __exit__(self, *args):done[...] += turnstile.release()return Ordered", "docstring": "meta class for Ordered construct.", "id": "f2547:m2"} {"signature": "def kill_all(self):", "body": "for pid in self.children:try:os.kill(pid, signal.SIGTRAP)except OSError:continueself.join()", "docstring": "kill all slaves and reap the monitor", "id": "f2547:c2:m3"} {"signature": "def join(self):", "body": "try:self.thread.join()except:passfinally:self.thread = None", "docstring": "master only", "id": "f2547:c2:m5"} {"signature": "def haserror(self):", "body": "return self.message is not None", "docstring": "master only", "id": "f2547:c3:m2"} {"signature": "def start(self):", "body": "self.thread = Thread(target=self.main)self.thread.daemon = Trueself.thread.start()", "docstring": "master only", "id": "f2547:c3:m3"} {"signature": "def join(self):", "body": "try:self.pipe.put('')self.thread.join()except:passfinally:self.thread = None", "docstring": "master only", "id": "f2547:c3:m4"} {"signature": "def slaveraise(self, type, error, traceback):", "body": "message = '' * + pickle.dumps((type,''.join(tb.format_exception(type, error, traceback))))if self.pipe is not None:self.pipe.put(message)", "docstring": "slave only", "id": "f2547:c3:m5"} {"signature": "def forloop(self, range, ordered=False, schedule=('', )):", "body": "if isinstance(schedule, tuple):schedule, chunk = scheduleelse:chunk = Noneif schedule == '':return self._StaticForLoop(range, ordered, chunk)elif schedule == '':return self._DynamicForLoop(range, ordered, chunk, guided=False)elif schedule == '':return self._DynamicForLoop(range, ordered, chunk, guided=True)else:raise \"\"", "docstring": "schedule can be\n (sch, chunk) or sch;\n sch is 'static', 'dynamic' or 'guided'.\n\n chunk defaults to 1\n\n if ordered, create an ordred", "id": "f2547:c4:m7"} {"signature": "def abort(self):", "body": "self.mutex.release()self.turnstile.release()self.mutex.release()self.turnstile2.release()", "docstring": "ensure the master exit from Barrier", "id": "f2547:c5:m1"} {"signature": "def beforefork(self, parallel):", "body": "pass", "docstring": "allocate self.data with dtype", "id": "f2547:c7:m1"} {"signature": "@staticmethoddef _get_classes(package_name, base_class):", "body": "classes = {}base_dir = os.getcwd()root_module_name = base_dir.split('')[-]package_dir = base_dir + '' % package_nameif os.path.isdir(package_dir):for module_path in os.listdir(package_dir):if not module_path.endswith(''):continuemodule_name = os.path.splitext(module_path)[]module_full_name = '' % (root_module_name, package_name, module_name)__import__(module_full_name)work_module = sys.modules[module_full_name]for module_item in work_module.__dict__.values():if type(module_item) is typeand issubclass(module_item, base_class)and module_item is not base_classand hasattr(module_item, '') and module_item.name:classes.setdefault(module_item.name, []).append(module_item)for work_name, work_modules in classes.items():if len(work_modules) > :raise DuplicatedNameException('' % (''.join(map(str, work_modules)),work_name))return tuple([(work_name, work_modules[]) for work_name, work_modules in classes.items()])", "docstring": "search monits or works classes. Class must have 'name' attribute\n:param package_name: 'monits' or 'works'\n:param base_class: Monit or Work\n:return: tuple of tuples monit/work-name and class", "id": "f2558:c4:m3"} {"signature": "def format_pathname(pathname,max_length):", "body": "if max_length <= :raise ValueError(\"\")if len(pathname) > max_length:pathname = \"\".format(pathname[-(max_length-):])return pathname", "docstring": "Format a pathname\n\n:param str pathname: Pathname to format\n:param int max_length: Maximum length of result pathname (> 3)\n:return: Formatted pathname\n:rtype: str\n:raises ValueError: If *max_length* is not larger than 3\n\nThis function formats a pathname so it is not longer than *max_length*\ncharacters. The resulting pathname is returned. It does so by replacing\ncharacters at the start of the *pathname* with three dots, if necessary.\nThe idea is that the end of the *pathname* is the most important part\nto be able to identify the file.", "id": "f2570:m0"} {"signature": "def format_time_point(time_point_string):", "body": "time_point = dateutil.parser.parse(time_point_string)if not is_aware(time_point):time_point = make_aware(time_point)time_point = local_time_point(time_point)return time_point.strftime(\"\")", "docstring": ":param str time_point_string: String representation of a time point\n to format\n:return: Formatted time point\n:rtype: str\n:raises ValueError: If *time_point_string* is not formatted by\n dateutil.parser.parse\n\nSee :py:meth:`datetime.datetime.isoformat` function for supported formats.", "id": "f2570:m1"} {"signature": "def format_uuid(uuid,max_length=):", "body": "if max_length <= :raise ValueError(\"\")if len(uuid) > max_length:uuid = \"\".format(uuid[:max_length-])return uuid", "docstring": "Format a UUID string\n\n:param str uuid: UUID to format\n:param int max_length: Maximum length of result string (> 3)\n:return: Formatted UUID\n:rtype: str\n:raises ValueError: If *max_length* is not larger than 3\n\nThis function formats a UUID so it is not longer than *max_length*\ncharacters. The resulting string is returned. It does so by replacing\ncharacters at the end of the *uuid* with three dots, if necessary.\nThe idea is that the start of the *uuid* is the most important part\nto be able to identify the related entity.\n\nThe default *max_length* is 10, which will result in a string\ncontaining the first 7 characters of the *uuid* passed in. Most of\nthe time, such a string is still unique within a collection of UUIDs.", "id": "f2570:m2"} {"signature": "def register(app):", "body": "error_handler = json.http_exception_error_handler@app.errorhandler()def handle_bad_request(exception):return error_handler(exception)@app.errorhandler()def handle_not_found(exception):return error_handler(exception)@app.errorhandler()def handle_method_not_allowed(exception):return error_handler(exception)@app.errorhandler()def handle_unprocessable_entity(exception):return error_handler(exception)@app.errorhandler()def handle_internal_server_error(exception):return error_handler(exception)", "docstring": "Register all HTTP error code error handlers\n\nCurrently, errors are handled by the JSON error handler.", "id": "f2572:m0"} {"signature": "def register(app):", "body": "error_code.register(app)", "docstring": "Register all available error handlers\n\n:param flask.Flask app: Application instance", "id": "f2573:m0"} {"signature": "def response(code,description):", "body": "payload = jsonify({\"\": code,\"\": description})return payload, code", "docstring": "Format a response\n\n:param int code: HTTP error code\n:param str description: Error message\n:return: Tuple of a wrapped JSON snippet and the error code\n:rtype: Tuple of :py:class:`flask.Response` containing a JSON snippet,\n and the error code\n\nThe JSON snippet is formatted like this:\n\n.. code-block:: json\n\n {\n \"status_code\": 404,\n \"message\": \"The requested URL was not found on the server\"\n }", "id": "f2574:m0"} {"signature": "def http_exception_error_handler(exception):", "body": "assert issubclass(type(exception), HTTPException), type(exception)assert hasattr(exception, \"\")assert hasattr(exception, \"\")return response(exception.code, exception.description)", "docstring": "Handle HTTP exception\n\n:param werkzeug.exceptions.HTTPException exception: Raised exception\n\nA response is returned, as formatted by the :py:func:`response` function.", "id": "f2574:m1"} {"signature": "def notify_client(notifier_uri,client_id,status_code,message=None):", "body": "payload = {\"\": client_id,\"\": {\"\": {\"\": status_code}}}if message is not None:payload[\"\"][\"\"][\"\"] = messageresponse = requests.post(notifier_uri, json=payload)if response.status_code != :sys.stderr.write(\"\".format(payload))sys.stderr.flush()", "docstring": "Notify the client of the result of handling a request\n\nThe payload contains two elements:\n\n- client_id\n- result\n\nThe *client_id* is the id of the client to notify. It is assumed\nthat the notifier service is able to identify the client by this id\nand that it can pass the *result* to it.\n\nThe *result* always contains a *status_code* element. In case the\nmessage passed in is not None, it will also contain a *message*\nelement.\n\nIn case the notifier service does not exist or returns an error,\nan error message will be logged to *stderr*.", "id": "f2576:m0"} {"signature": "def consume_message(method):", "body": "def wrapper(self,channel,method_frame,header_frame,body):sys.stdout.write(\"\".format(body))sys.stdout.flush()try:body = body.decode(\"\")data = json.loads(body)method(self, data)except Exception as exception:sys.stderr.write(\"\".format(traceback.format_exc()))sys.stderr.flush()channel.basic_ack(delivery_tag=method_frame.delivery_tag)return wrapper", "docstring": "Decorator for methods handling requests from RabbitMQ\n\nThe goal of this decorator is to perform the tasks common to all\nmethods handling requests:\n\n- Log the raw message to *stdout*\n- Decode the message into a Python dictionary\n- Log errors to *stderr*\n- Signal the broker that we're done handling the request\n\nThe method passed in will be called with the message body as a\ndictionary. It is assumed here that the message body is a JSON string\nencoded in UTF8.", "id": "f2576:m1"} {"signature": "def consume_message_with_notify(notifier_uri_getter):", "body": "def consume_message_with_notify_decorator(method):@consume_messagedef wrapper(self,data):notifier_uri = notifier_uri_getter(self)client_id = data[\"\"]try:method(self, data)notify_client(notifier_uri, client_id, )except Exception as exception:notify_client(notifier_uri, client_id, , str(exception))raisereturn wrapperreturn consume_message_with_notify_decorator", "docstring": "Decorator for methods handling requests from RabbitMQ\n\nThis decorator builds on the :py:func:`consume_message` decorator. It extents\nit by logic for notifying a client of the result of handling the\nrequest.\n\nThe *notifier_uri_getter* argument must be a callable which accepts\n*self* and returns the uri of the notifier service.", "id": "f2576:m2"} {"signature": "def utc_now():", "body": "return datetime.now(timezone.utc)", "docstring": "Return an aware :py:class:`datetime.datetime` instance of the current\ndate and time, in UTC timezone\n\n:return: Current date and time, in UTC timezone\n:rtype: datetime.datetime", "id": "f2578:m0"} {"signature": "def is_aware(time_point):", "body": "return time_point.tzinfo is not None andtime_point.utcoffset() is not None", "docstring": "Return whether ``time_point`` is aware of the timezone", "id": "f2578:m1"} {"signature": "def make_aware(time_point):", "body": "assert not is_aware(time_point)return time_point.replace(tzinfo=UTC)", "docstring": "Return an aware time point\n\n:param datetime.datetime time_point: Unaware time point in UTC\n:return: Aware time point in UTC timezone\n:rtype: datetime.datetime", "id": "f2578:m2"} {"signature": "def local_time_point(time_point):", "body": "assert is_aware(time_point)return time_point.astimezone(get_localzone())", "docstring": "Return a time point with the same UTC time as ``time_point``, but\nin the local time zone\n\n:param datetime.datetime time_point: Aware time point\n:return: Time point in local timezone\n:rtype: datetime.datetime", "id": "f2578:m3"} {"signature": "def predict(list_items):", "body": "return [i* for i in list_items]", "docstring": "Returns the double of the items", "id": "f2580:m0"} {"signature": "def predict(list_items):", "body": "return [i+ for i in list_items]", "docstring": "Returns items+10", "id": "f2581:m0"} {"signature": "@typeguard.typecheckeddef __init__(self, url: str):", "body": "self.url = urlparsed_url = urlparse(self.url)self.scheme = parsed_url.scheme if parsed_url.scheme else ''self.netloc = parsed_url.netlocself.path = parsed_url.pathself.filename = os.path.basename(self.path)", "docstring": "Construct a File object from a url string.\n\n Args:\n - url (string) : url string of the file e.g.\n - 'input.txt'\n - 'file:///scratch/proj101/input.txt'\n - 'globus://go#ep1/~/data/input.txt'\n - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'", "id": "f2583:c0:m0"} {"signature": "@propertydef filepath(self):", "body": "if hasattr(self, ''):return self.local_pathif self.scheme in ['', '', '', '']:return self.filenameelif self.scheme in ['']:return self.pathelse:raise Exception(''.format(self.scheme))", "docstring": "Return the resolved filepath on the side where it is called from.\n\n The appropriate filepath will be returned when called from within\n an app running remotely as well as regular python on the client side.\n\n Args:\n - self\n Returns:\n - filepath (string)", "id": "f2583:c0:m5"} {"signature": "@classmethoddef get_data_manager(cls):", "body": "from parsl.dataflow.dflow import DataFlowKernelLoaderdfk = DataFlowKernelLoader.dfk()return dfk.executors['']", "docstring": "Return the DataManager of the currently loaded DataFlowKernel.", "id": "f2586:c0:m0"} {"signature": "def __init__(self, dfk, max_threads=):", "body": "self._scaling_enabled = Falseself.label = ''self.dfk = dfkself.max_threads = max_threadsself.globus = Noneself.managed = True", "docstring": "Initialize the DataManager.\n\n Args:\n - dfk (DataFlowKernel): The DataFlowKernel that this DataManager is managing data for.\n\n Kwargs:\n - max_threads (int): Number of threads. Default is 10.\n - executors (list of Executors): Executors for which data transfer will be managed.", "id": "f2586:c0:m1"} {"signature": "def submit(self, *args, **kwargs):", "body": "return self.executor.submit(*args, **kwargs)", "docstring": "Submit a staging app. All optimization should be here.", "id": "f2586:c0:m3"} {"signature": "def shutdown(self, block=False):", "body": "x = self.executor.shutdown(wait=block)logger.debug(\"\")return x", "docstring": "Shutdown the ThreadPool.\n\n Kwargs:\n - block (bool): To block for confirmations or not", "id": "f2586:c0:m6"} {"signature": "def stage_in(self, file, executor):", "body": "if file.scheme == '':working_dir = self.dfk.executors[executor].working_dirstage_in_app = self._ftp_stage_in_app(executor=executor)app_fut = stage_in_app(working_dir, outputs=[file])return app_fut._outputs[]elif file.scheme == '' or file.scheme == '':working_dir = self.dfk.executors[executor].working_dirstage_in_app = self._http_stage_in_app(executor=executor)app_fut = stage_in_app(working_dir, outputs=[file])return app_fut._outputs[]elif file.scheme == '':globus_ep = self._get_globus_endpoint(executor)stage_in_app = self._globus_stage_in_app()app_fut = stage_in_app(globus_ep, outputs=[file])return app_fut._outputs[]else:raise Exception(''.format(file.scheme))", "docstring": "Transport the file from the input source to the executor.\n\n This function returns a DataFuture.\n\n Args:\n - self\n - file (File) : file to stage in\n - executor (str) : an executor the file is going to be staged in to.\n If the executor argument is not specified for a file\n with 'globus' scheme, the file will be staged in to\n the first executor with the \"globus\" key in a config.", "id": "f2586:c0:m10"} {"signature": "def stage_out(self, file, executor):", "body": "if file.scheme == '' or file.scheme == '':raise Exception('')elif file.scheme == '':raise Exception('')elif file.scheme == '':globus_ep = self._get_globus_endpoint(executor)stage_out_app = self._globus_stage_out_app()return stage_out_app(globus_ep, inputs=[file])else:raise Exception(''.format(file.scheme))", "docstring": "Transport the file from the local filesystem to the remote Globus endpoint.\n\n This function returns a DataFuture.\n\n Args:\n - self\n - file (File) - file to stage out\n - executor (str) - Which executor the file is going to be staged out from.\n If the executor argument is not specified for a file\n with the 'globus' scheme, the file will be staged in to\n the first executor with the \"globus\" key in a config.", "id": "f2586:c0:m15"} {"signature": "@abstractmethoddef execute_wait(self, cmd, walltime=None, envs={}, *args, **kwargs):", "body": "pass", "docstring": "Executes the cmd, with a defined walltime.\n\n Args:\n - cmd (string): Command string to execute over the channel\n - walltime (int) : Timeout in seconds, optional\n\n KWargs:\n - envs (Dict[str, str]) : Environment variables to push to the remote side\n\n Returns:\n - (exit_code, stdout, stderr) (int, optional string, optional string)\n If the exit code is a failure code, the stdout and stderr return values\n may be None.", "id": "f2589:c0:m0"} {"signature": "@abstractpropertydef script_dir(self):", "body": "pass", "docstring": "This is a property. Returns the directory assigned for storing all internal scripts such as\n scheduler submit scripts. This is usually where error logs from the scheduler would reside on the\n channel destination side.\n\n Args:\n - None\n\n Returns:\n - Channel script dir", "id": "f2589:c0:m1"} {"signature": "@abstractmethoddef execute_no_wait(self, cmd, walltime, envs={}, *args, **kwargs):", "body": "pass", "docstring": "Execute asynchronousely without waiting for exitcode\n\n Args:\n - cmd (string): Command string to execute over the channel\n - walltime (int) : Timeout in seconds\n\n KWargs:\n - envs (dict) : Environment variables to push to the remote side\n\n Returns:\n - the type of return value is channel specific", "id": "f2589:c0:m2"} {"signature": "@abstractmethoddef push_file(self, source, dest_dir):", "body": "pass", "docstring": "Channel will take care of moving the file from source to the destination\n directory\n\n Args:\n source (string) : Full filepath of the file to be moved\n dest_dir (string) : Absolute path of the directory to move to\n\n Returns:\n destination_path (string)", "id": "f2589:c0:m3"} {"signature": "@abstractmethoddef close(self):", "body": "pass", "docstring": "Closes the channel. Clean out any auth credentials.\n\n Args:\n None\n\n Returns:\n Bool", "id": "f2589:c0:m4"} {"signature": "@abstractmethoddef makedirs(self, path, mode=, exist_ok=False):", "body": "pass", "docstring": "Create a directory.\n\n If intermediate directories do not exist, they will be created.\n\n Parameters\n ----------\n path : str\n Path of directory to create.\n mode : int\n Permissions (posix-style) for the newly-created directory.\n exist_ok : bool\n If False, raise an OSError if the target directory already exists.", "id": "f2589:c0:m5"} {"signature": "@abstractmethoddef isdir(self, path):", "body": "pass", "docstring": "Return true if the path refers to an existing directory.\n\n Parameters\n ----------\n path : str\n Path of directory to check.", "id": "f2589:c0:m6"} {"signature": "@abstractmethoddef abspath(self, path):", "body": "pass", "docstring": "Return the absolute path.\n\n Parameters\n ----------\n path : str\n Path for which the absolute path will be returned.", "id": "f2589:c0:m7"} {"signature": "def __init__(self, userhome=\"\", envs={}, script_dir=None, **kwargs):", "body": "self.userhome = os.path.abspath(userhome)self.hostname = \"\"self.envs = envslocal_env = os.environ.copy()self._envs = copy.deepcopy(local_env)self._envs.update(envs)self.script_dir = script_dir", "docstring": "Initialize the local channel. script_dir is required by set to a default.\n\n KwArgs:\n - userhome (string): (default='.') This is provided as a way to override and set a specific userhome\n - envs (dict) : A dictionary of env variables to be set when launching the shell\n - script_dir (string): Directory to place scripts", "id": "f2590:c0:m0"} {"signature": "def execute_wait(self, cmd, walltime=None, envs={}):", "body": "retcode = -stdout = Nonestderr = Nonecurrent_env = copy.deepcopy(self._envs)current_env.update(envs)try:proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,cwd=self.userhome,env=current_env,shell=True)proc.wait(timeout=walltime)stdout = proc.stdout.read()stderr = proc.stderr.read()retcode = proc.returncodeexcept Exception as e:print(\"\".format(e))logger.warn(\"\", cmd, e)if retcode == :retcode = -return (retcode, None, None)return (retcode, stdout.decode(\"\"), stderr.decode(\"\"))", "docstring": "Synchronously execute a commandline string on the shell.\n\n Args:\n - cmd (string) : Commandline string to execute\n - walltime (int) : walltime in seconds, this is not really used now.\n\n Kwargs:\n - envs (dict) : Dictionary of env variables. This will be used\n to override the envs set at channel initialization.\n\n Returns:\n - retcode : Return code from the execution, -1 on fail\n - stdout : stdout string\n - stderr : stderr string\n\n Raises:\n None.", "id": "f2590:c0:m1"} {"signature": "def execute_no_wait(self, cmd, walltime, envs={}):", "body": "current_env = copy.deepcopy(self._envs)current_env.update(envs)try:proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,cwd=self.userhome,env=current_env,shell=True,preexec_fn=os.setpgrp)pid = proc.pidexcept Exception as e:logger.warn(\"\", (cmd, e))raisereturn pid, proc", "docstring": "Synchronously execute a commandline string on the shell.\n\n Args:\n - cmd (string) : Commandline string to execute\n - walltime (int) : walltime in seconds, this is not really used now.\n\n Returns a tuple containing:\n\n - pid : process id\n - proc : a subprocess.Popen object\n\n Raises:\n None.", "id": "f2590:c0:m2"} {"signature": "def push_file(self, source, dest_dir):", "body": "local_dest = dest_dir + '' + os.path.basename(source)if os.path.dirname(source) != dest_dir:try:shutil.copyfile(source, local_dest)os.chmod(local_dest, )except OSError as e:raise FileCopyException(e, self.hostname)return local_dest", "docstring": "If the source files dirpath is the same as dest_dir, a copy\n is not necessary, and nothing is done. Else a copy is made.\n\n Args:\n - source (string) : Path to the source file\n - dest_dir (string) : Path to the directory to which the files is to be copied\n\n Returns:\n - destination_path (String) : Absolute path of the destination file\n\n Raises:\n - FileCopyException : If file copy failed.", "id": "f2590:c0:m3"} {"signature": "def close(self):", "body": "return False", "docstring": "There's nothing to close here, and this really doesn't do anything\n\n Returns:\n - False, because it really did not \"close\" this channel.", "id": "f2590:c0:m4"} {"signature": "def isdir(self, path):", "body": "return os.path.isdir(path)", "docstring": "Return true if the path refers to an existing directory.\n\n Parameters\n ----------\n path : str\n Path of directory to check.", "id": "f2590:c0:m5"} {"signature": "def makedirs(self, path, mode=, exist_ok=False):", "body": "return os.makedirs(path, mode, exist_ok)", "docstring": "Create a directory.\n\n If intermediate directories do not exist, they will be created.\n\n Parameters\n ----------\n path : str\n Path of directory to create.\n mode : int\n Permissions (posix-style) for the newly-created directory.\n exist_ok : bool\n If False, raise an OSError if the target directory already exists.", "id": "f2590:c0:m6"} {"signature": "def abspath(self, path):", "body": "return os.path.abspath(path)", "docstring": "Return the absolute path.\n\n Parameters\n ----------\n path : str\n Path for which the absolute path will be returned.", "id": "f2590:c0:m7"} {"signature": "def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, gssapi_auth=False, skip_auth=False, **kwargs):", "body": "self.hostname = hostnameself.username = usernameself.password = passwordself.kwargs = kwargsself.script_dir = script_dirself.skip_auth = skip_authself.gssapi_auth = gssapi_authif self.skip_auth:self.ssh_client = NoAuthSSHClient()else:self.ssh_client = paramiko.SSHClient()self.ssh_client.load_system_host_keys()self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())self.envs = {}if envs is not None:self.envs = envstry:self.ssh_client.connect(hostname,username=username,password=password,allow_agent=True,gss_auth=gssapi_auth,gss_kex=gssapi_auth,)t = self.ssh_client.get_transport()self.sftp_client = paramiko.SFTPClient.from_transport(t)except paramiko.BadHostKeyException as e:raise BadHostKeyException(e, self.hostname)except paramiko.AuthenticationException as e:raise AuthException(e, self.hostname)except paramiko.SSHException as e:raise SSHException(e, self.hostname)except Exception as e:raise SSHException(e, self.hostname)", "docstring": "Initialize a persistent connection to the remote system.\n We should know at this point whether ssh connectivity is possible\n\n Args:\n - hostname (String) : Hostname\n\n KWargs:\n - username (string) : Username on remote system\n - password (string) : Password for remote system\n - script_dir (string) : Full path to a script dir where\n generated scripts could be sent to.\n - envs (dict) : A dictionary of environment variables to be set when executing commands\n\n Raises:", "id": "f2592:c1:m0"} {"signature": "def execute_wait(self, cmd, walltime=, envs={}):", "body": "stdin, stdout, stderr = self.ssh_client.exec_command(self.prepend_envs(cmd, envs), bufsize=-, timeout=walltime)exit_status = stdout.channel.recv_exit_status()return exit_status, stdout.read().decode(\"\"), stderr.read().decode(\"\")", "docstring": "Synchronously execute a commandline string on the shell.\n\n Args:\n - cmd (string) : Commandline string to execute\n - walltime (int) : walltime in seconds\n\n Kwargs:\n - envs (dict) : Dictionary of env variables\n\n Returns:\n - retcode : Return code from the execution, -1 on fail\n - stdout : stdout string\n - stderr : stderr string\n\n Raises:\n None.", "id": "f2592:c1:m2"} {"signature": "def execute_no_wait(self, cmd, walltime=, envs={}):", "body": "stdin, stdout, stderr = self.ssh_client.exec_command(self.prepend_envs(cmd, envs), bufsize=-, timeout=walltime)return None, stdout, stderr", "docstring": "Execute asynchronousely without waiting for exitcode\n\n Args:\n - cmd (string): Commandline string to be executed on the remote side\n - walltime (int): timeout to exec_command\n\n KWargs:\n - envs (dict): A dictionary of env variables\n\n Returns:\n - None, stdout (readable stream), stderr (readable stream)\n\n Raises:\n - ChannelExecFailed (reason)", "id": "f2592:c1:m3"} {"signature": "def push_file(self, local_source, remote_dir):", "body": "remote_dest = remote_dir + '' + os.path.basename(local_source)try:self.makedirs(remote_dir, exist_ok=True)except IOError as e:logger.exception(\"\".format(local_source, remote_dir))if e.errno == :raise BadScriptPath(e, self.hostname)elif e.errno == :raise BadPermsScriptPath(e, self.hostname)else:logger.exception(\"\")raise FileCopyException(e, self.hostname)try:self.sftp_client.put(local_source, remote_dest, confirm=True)self.sftp_client.chmod(remote_dest, )except Exception as e:logger.exception(\"\".format(local_source, remote_dest))raise FileCopyException(e, self.hostname)return remote_dest", "docstring": "Transport a local file to a directory on a remote machine\n\n Args:\n - local_source (string): Path\n - remote_dir (string): Remote path\n\n Returns:\n - str: Path to copied file on remote machine\n\n Raises:\n - BadScriptPath : if script path on the remote side is bad\n - BadPermsScriptPath : You do not have perms to make the channel script dir\n - FileCopyException : FileCopy failed.", "id": "f2592:c1:m4"} {"signature": "def pull_file(self, remote_source, local_dir):", "body": "local_dest = local_dir + '' + os.path.basename(remote_source)try:os.makedirs(local_dir)except OSError as e:if e.errno != errno.EEXIST:logger.exception(\"\".format(script_dir))raise BadScriptPath(e, self.hostname)if os.path.exists(local_dest):logger.exception(\"\".format(local_dest))raise FileExists(None, self.hostname, filename=local_dest)try:self.sftp_client.get(remote_source, local_dest)except Exception as e:logger.exception(\"\")raise FileCopyException(e, self.hostname)return local_dest", "docstring": "Transport file on the remote side to a local directory\n\n Args:\n - remote_source (string): remote_source\n - local_dir (string): Local directory to copy to\n\n\n Returns:\n - str: Local path to file\n\n Raises:\n - FileExists : Name collision at local directory.\n - FileCopyException : FileCopy failed.", "id": "f2592:c1:m5"} {"signature": "def isdir(self, path):", "body": "result = Truetry:self.sftp_client.lstat(path)except FileNotFoundError:result = Falsereturn result", "docstring": "Return true if the path refers to an existing directory.\n\n Parameters\n ----------\n path : str\n Path of directory on the remote side to check.", "id": "f2592:c1:m7"} {"signature": "def makedirs(self, path, mode=, exist_ok=False):", "body": "if exist_ok is False and self.isdir(path):raise OSError(''.format(path))self.execute_wait(''.format(path))self.sftp_client.chmod(path, mode)", "docstring": "Create a directory on the remote side.\n\n If intermediate directories do not exist, they will be created.\n\n Parameters\n ----------\n path : str\n Path of directory on the remote side to create.\n mode : int\n Permissions (posix-style) for the newly-created directory.\n exist_ok : bool\n If False, raise an OSError if the target directory already exists.", "id": "f2592:c1:m8"} {"signature": "def abspath(self, path):", "body": "return self.sftp_client.normalize(path)", "docstring": "Return the absolute path on the remote side.\n\n Parameters\n ----------\n path : str\n Path for which the absolute path will be returned.", "id": "f2592:c1:m9"} {"signature": "def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, **kwargs):", "body": "self.hostname = hostnameself.username = usernameself.password = passwordself.kwargs = kwargsself.ssh_client = paramiko.SSHClient()self.ssh_client.load_system_host_keys()self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())self.script_dir = script_dirself.envs = {}if envs is not None:self.envs = envstry:self.ssh_client.connect(hostname, username=username, password=password, allow_agent=True)except Exception:logger.debug(\"\")pass''''''transport = self.ssh_client.get_transport()il_password = getpass.getpass(''.format(hostname))transport.auth_password(username, il_password)self.sftp_client = paramiko.SFTPClient.from_transport(transport)", "docstring": "Initialize a persistent connection to the remote system.\n We should know at this point whether ssh connectivity is possible\n\n Args:\n - hostname (String) : Hostname\n\n KWargs:\n - username (string) : Username on remote system\n - password (string) : Password for remote system\n - script_dir (string) : Full path to a script dir where\n generated scripts could be sent to.\n - envs (dict) : A dictionary of env variables to be set when executing commands\n\n Raises:", "id": "f2593:c0:m0"} {"signature": "def ping_time(ip, n=):", "body": "cmd = \"\".format(ip, n)p = subprocess.Popen(cmd.split(\"\"), stdout=subprocess.PIPE)output = str(p.communicate()[])stats = output.split(\"\")[-].split(\"\")[-].split(\"\")avg_ping_time = float(stats[]) return avg_ping_time * ", "docstring": "Returns the average ping time in microseconds.\n\nNote: This function is inherently platform specific.\nIt currently works on Midway.", "id": "f2620:m0"} {"signature": "def execute_task(f, args, kwargs, user_ns):", "body": "fname = getattr(f, '', '')prefix = \"\"fname = prefix + \"\"argname = prefix + \"\"kwargname = prefix + \"\"resultname = prefix + \"\"user_ns.update({fname: f,argname: args,kwargname: kwargs,resultname: resultname})code = \"\".format(resultname, fname,argname, kwargname)try:exec(code, user_ns, user_ns)except Exception as e:logger.warning(\"\".format(e))raise eelse:return user_ns.get(resultname)", "docstring": "Deserialize the buffer and execute the task.\n\nReturns the result or exception.", "id": "f2621:m0"} {"signature": "@App('')def succeed_on_retry(filename, success_on=, stdout=\"\"):", "body": "return \"\"\"\"\"\"", "docstring": "If the input file does not exist it creates it.\n Then, if the file contains success_on lines it exits with 0", "id": "f2647:m1"} {"signature": "def run_checkpointed(n=, mode=\"\"):", "body": "from parsl.tests.configs.local_threads import configconfig[\"\"][\"\"] = modedfk = DataFlowKernel(config=config)@App('', dfk, cache=True)def cached_rand(x):import randomreturn random.randint(, )@App('', dfk, cache=True)def cached_failing(x): / return items = []for i in range(, n):x = cached_failing()items.append(x)try:x.result()except Exception:print(\"\")passx = cached_rand()print(x.result())rundir = dfk.rundirif mode == \"\":dfk.cleanup()return rundir", "docstring": "This test runs n apps that will fail with Division by zero error,\n followed by 1 app that will succeed. The checkpoint should only have 1 task.", "id": "f2687:m0"} {"signature": "def run_checkpointed(n=, mode=\"\", sleep_dur=):", "body": "from parsl.tests.configs.local_threads import configconfig[''][''] = ''config[\"\"][\"\"] = modedfk = DataFlowKernel(config=config)@App('', dfk, cache=True)def cached_rand(x, sleep_dur=):import randomimport timetime.sleep(sleep_dur)return random.randint(, )items = []for i in range(, n):x = cached_rand(i, sleep_dur=sleep_dur)items.append(x)[i.result() for i in items]with open(\"\", '') as f:f.write(\"\")time.sleep()", "docstring": "This test runs n apps that will fail with Division by zero error,\n followed by 1 app that will succeed. The checkpoint should only have 1 task", "id": "f2689:m0"} {"signature": "@App('')def xor_split():", "body": "x = rand()if x.result() > :print(\"\")return else:print(\"\")return ", "docstring": "Test XOR split. Do A if x else B", "id": "f2693:m23"} {"signature": "def address_by_interface(ifname):", "body": "s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)return socket.inet_ntoa(fcntl.ioctl(s.fileno(),, struct.pack('', bytes(ifname[:], '')))[:])", "docstring": "Returns the IP address of the given interface name, e.g. 'eth0'\n\n Parameters\n ----------\n ifname : str\n Name of the interface whose address is to be returned. Required.\n\n Taken from this Stack Overflow answer: https://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python#24196955", "id": "f2749:m3"} {"signature": "def get_parsl_logger(logger_name='',is_logging_server=False,monitoring_config=None,**kwargs):", "body": "logger = logging.getLogger(logger_name)if monitoring_config is None:logger.addHandler(NullHandler())return loggerif monitoring_config.store is None:raise ValueError('')if is_logging_server:handler = DatabaseHandler(monitoring_config.store.connection_string)logger = logging.getLogger(logger_name)logger.setLevel(logging.INFO)logger.addHandler(handler)else:handler = RemoteHandler(monitoring_config.store.logging_server_host, monitoring_config.store.logging_server_port)logger = logging.getLogger(logger_name)logger.setLevel(logging.INFO)logger.addHandler(handler)return logger", "docstring": "Parameters\n----------\nlogger_name : str, optional\n Name of the logger to use. Prevents adding repeat handlers or incorrect handlers\nis_logging_server : Bool, optional\n Used internally to determine which handler to return when using local db logging\nmonitoring_config : MonitoringConfig, optional\n Pass in a logger class object to use for generating loggers.\n\nReturns\n-------\nlogging.logger object\n\nRaises\n------\nOptionalModuleMissing", "id": "f2750:m0"} {"signature": "def __init__(self,host=None,port=None,logging_server_host='',logging_server_port=):", "body": "self.host = hostself.port = portself.logging_server_host = logging_server_hostself.logging_server_port = logging_server_port", "docstring": "Parameters\n----------\nhost : str\n The hostname for running the visualization interface.\nport : int\n The port for the visualization interface.\nlogging_server_host : str\n The hostname for the logging server.\nlogging_server_port : int\n The port for the logging server.", "id": "f2750:c1:m0"} {"signature": "def __init__(self,connection_string=None, **kwargs):", "body": "super().__init__(**kwargs)self.connection_string = connection_string", "docstring": "Initializes a monitoring configuration class.\n\n Parameters\n ----------\n connection_string : str, optional\n Database connection string that defines how to connect to the database. If not set, DFK init will use a sqlite3\n database inside the rundir.", "id": "f2750:c2:m0"} {"signature": "def __init__(self,host='',port=):", "body": "self.host = hostself.port = port", "docstring": "Parameters\n----------\nhost : str\n The hostname for running the visualization interface.\nport : int\n The port for the visualization interface", "id": "f2750:c3:m0"} {"signature": "def __init__(self,store=None,visualization_server=None,monitoring_interval=,workflow_name=None,version=''):", "body": "self.store = storeself.visualization_server = visualization_serverself.version = versionself.monitoring_interval = monitoring_intervalself.workflow_name = workflow_nameself.dashboard_link = None", "docstring": "Initializes a monitoring configuration class.\n\n Parameters\n ----------\n\n monitoring_interval : float, optional\n The amount of time in seconds to sleep in between resource monitoring logs per task.\n workflow_name : str, optional\n Name to record as the workflow base name, defaults to the name of the parsl script file if left as None.\n version : str, optional\n Optional workflow identification to distinguish between workflows with the same name, not used internally only for display to user.\n\n\n Example\n -------\n .. code-block:: python\n\n import parsl\n from parsl.config import Config\n from parsl.executors.threads import ThreadPoolExecutor\n from parsl.monitoring.db_logger import MonitoringConfig\n\n config = Config(\n executors=[ThreadPoolExecutor()],\n monitoring_config=MonitoringConfig(\n MonitoringStore=DatabaseStore(\n connection_string='sqlite///monitoring.db'\n )\n VisualizationInterface=VisualizationInterface(\n host='http:localhost'\n port='9999'\n )\n )\n )\n parsl.load(config)", "id": "f2750:c4:m0"} {"signature": "def start_file_logger(filename, name='', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:format_string = \"\"global loggerlogger = logging.getLogger(name)logger.setLevel(level)handler = logging.FileHandler(filename)handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)return logger", "docstring": "Add a stream log handler.\n Parameters\n ---------\n filename: string\n Name of the file to write logs to. Required.\n name: string\n Logger name. Default=\"parsl.executors.interchange\"\n level: logging.LEVEL\n Set the logging level. Default=logging.DEBUG\n - format_string (string): Set the format string\n format_string: string\n Format string to use.\n Returns\n -------\n None.", "id": "f2762:m0"} {"signature": "def dbm_starter(priority_msgs, resource_msgs, *args, **kwargs):", "body": "dbm = DatabaseManager(*args, **kwargs)dbm.start(priority_msgs, resource_msgs)", "docstring": "Start the database manager process\n\n The DFK should start this function. The args, kwargs match that of the monitoring config", "id": "f2762:m1"} {"signature": "def start_file_logger(filename, name='', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:format_string = \"\"logger = logging.getLogger(name)logger.setLevel(level)handler = logging.FileHandler(filename)handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)return logger", "docstring": "Add a stream log handler.\n\n Parameters\n ---------\n\n filename: string\n Name of the file to write logs to. Required.\n name: string\n Logger name. Default=\"parsl.executors.interchange\"\n level: logging.LEVEL\n Set the logging level. Default=logging.DEBUG\n - format_string (string): Set the format string\n format_string: string\n Format string to use.\n\n Returns\n -------\n None.", "id": "f2764:m0"} {"signature": "def monitor(pid, task_id, monitoring_hub_url, run_id, sleep_dur=):", "body": "import psutilradio = UDPRadio(monitoring_hub_url,source_id=task_id)simple = [\"\", '', '', '', '', '', '', '', '', '', '', '', '']summable_values = ['', '', '']pm = psutil.Process(pid)pm.cpu_percent()first_msg = Truewhile True:try:d = {\"\" + str(k): v for k, v in pm.as_dict().items() if k in simple}d[\"\"] = run_idd[\"\"] = task_idd[''] = sleep_durd[''] = first_msgd[''] = datetime.datetime.now()children = pm.children(recursive=True)d[\"\"] = psutil.cpu_count()d[''] = pm.memory_info().vmsd[''] = pm.memory_info().rssd[''] = pm.cpu_times().userd[''] = pm.cpu_times().systemd[''] = len(children)try:d[''] = pm.io_counters().write_bytesd[''] = pm.io_counters().read_bytesexcept psutil._exceptions.AccessDenied:d[''] = d[''] = for child in children:for k, v in child.as_dict(attrs=summable_values).items():d['' + str(k)] += vd[''] += child.cpu_times().userd[''] += child.cpu_times().systemd[''] += child.memory_info().vmsd[''] += child.memory_info().rsstry:d[''] += child.io_counters().write_bytesd[''] += child.io_counters().read_bytesexcept psutil._exceptions.AccessDenied:d[''] += d[''] += finally:radio.send(MessageType.TASK_INFO, task_id, d)time.sleep(sleep_dur)first_msg = False", "docstring": "Internal\n Monitors the Parsl task's resources by pointing psutil to the task's pid and watching it and its children.", "id": "f2764:m2"} {"signature": "def __init__(self, monitoring_url, source_id=None, timeout=):", "body": "self.monitoring_url = monitoring_urlself.sock_timeout = timeoutself.source_id = source_idtry:self.scheme, self.ip, port = (x.strip('') for x in monitoring_url.split(''))self.port = int(port)except Exception:raise Exception(\"\".format(monitoring_url))self.sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM,socket.IPPROTO_UDP) self.sock.settimeout(self.sock_timeout)", "docstring": "Parameters\n----------\n\nmonitoring_url : str\n URL of the form ://:\nmessage : py obj\n Python object to send, this will be pickled\nsource_id : str\n String identifier of the source\ntimeout : int\n timeout, default=10s", "id": "f2764:c0:m0"} {"signature": "def send(self, message_type, task_id, message):", "body": "x = try:buffer = pickle.dumps((self.source_id, int(time.time()), message_type,message))except Exception as e:print(\"\".format(e))returntry:x = self.sock.sendto(buffer, (self.ip, self.port))except socket.timeout:print(\"\")return Falsereturn x", "docstring": "Sends a message to the UDP receiver\n\n Parameter\n ---------\n\n message_type: monitoring.MessageType (enum)\n In this case message type is RESOURCE_INFO most often\n task_id: int\n Task identifier of the task for which resource monitoring is being reported\n message: object\n Arbitrary pickle-able object that is to be sent\n\n Returns:\n # bytes sent", "id": "f2764:c0:m1"} {"signature": "def __init__(self,hub_address,hub_port=None,hub_port_range=(, ),client_address=\"\",client_port_range=(, ),workflow_name=None,workflow_version=None,logging_endpoint='',logdir=None,logging_level=logging.INFO,resource_monitoring_enabled=True,resource_monitoring_interval=): ", "body": "self.logger = Noneself._dfk_channel = Noneif _db_manager_excepts:raise(_db_manager_excepts)self.client_address = client_addressself.client_port_range = client_port_rangeself.hub_address = hub_addressself.hub_port = hub_portself.hub_port_range = hub_port_rangeself.logging_endpoint = logging_endpointself.logdir = logdirself.logging_level = logging_levelself.workflow_name = workflow_nameself.workflow_version = workflow_versionself.resource_monitoring_enabled = resource_monitoring_enabledself.resource_monitoring_interval = resource_monitoring_interval", "docstring": "Update docs here.", "id": "f2764:c1:m0"} {"signature": "@staticmethoddef monitor_wrapper(f, task_id, monitoring_hub_url, run_id, sleep_dur):", "body": "def wrapped(*args, **kwargs):p = Process(target=monitor, args=(os.getpid(), task_id, monitoring_hub_url, run_id, sleep_dur))p.start()try:return f(*args, **kwargs)finally:p.terminate()p.join()return wrapped", "docstring": "Internal\n Wrap the Parsl app with a function that will call the monitor function and point it at the correct pid when the task begins.", "id": "f2764:c1:m5"} {"signature": "def __init__(self,hub_address,hub_port=None,hub_port_range=(, ),database=None, visualization_server=None, client_address=\"\",client_port=None,monitoring_hub_address=\"\",logdir=\"\",logging_level=logging.DEBUG,atexit_timeout= ):", "body": "try:os.makedirs(logdir)except FileExistsError:passself.logger = start_file_logger(\"\".format(logdir),name=\"\",level=logging_level)self.logger.debug(\"\")if not hub_port:self.logger.critical(\"\")self.hub_port = hub_portself.hub_address = hub_addressself.database = databaseself.visualization_server = visualization_serverself.atexit_timeout = atexit_timeoutself.loop_freq = self.logger.debug(\"\".format(hub_port))try:self.sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM,socket.IPPROTO_UDP)self.sock.bind(('', hub_port))self.sock.settimeout(self.loop_freq / )except OSError:self.logger.critical(\"\")self.hub_port = -self._context = zmq.Context()self.dfk_channel = self._context.socket(zmq.DEALER)self.dfk_channel.set_hwm()self.dfk_channel.RCVTIMEO = int(self.loop_freq) self.dfk_channel.connect(\"\".format(client_address, client_port))", "docstring": "Initializes a monitoring configuration class.\n\n Parameters\n ----------\n address : str\n IP address of the node on which the monitoring hub will run, this address must be\n reachable from the Parsl client as well as the worker nodes. Eg. ...\n\n port : int\n Used with Elasticsearch logging, the port of where to access Elasticsearch. Required when using logging_type = 'elasticsearch'.\n\n logging_endpoint : Endpoint object\n This is generally a database object to which logging data can be pushed to from the\n monitoring HUB.\n\n workflow_name : str, optional\n Name to record as the workflow base name, defaults to the name of the parsl script file if left as None.\n\n workflow_version : str, optional\n Optional workflow identification to distinguish between workflows with the same name, not used internally only for display to user.\n\n atexit_timeout : float, optional\n The amount of time in seconds to terminate the hub without receiving any messages, after the last dfk workflow message is received.", "id": "f2764:c2:m0"} {"signature": "def __init__(self, dfk, memoize=True, checkpoint={}):", "body": "self.dfk = dfkself.memoize = memoizeif self.memoize:logger.info(\"\")self.memo_lookup_table = checkpointelse:logger.info(\"\")self.memo_lookup_table = {}", "docstring": "Initialize the memoizer.\n\n Args:\n - dfk (DFK obj): The DFK object\n\n KWargs:\n - memoize (Bool): enable memoization or not.\n - checkpoint (Dict): A checkpoint loaded as a dict.", "id": "f2767:c0:m0"} {"signature": "def make_hash(self, task):", "body": "t = [serialize_object(task[''])[],serialize_object(task[''])[],serialize_object(task[''])[],serialize_object(task[''])[],serialize_object(task[''])[]]x = b''.join(t)hashedsum = hashlib.md5(x).hexdigest()return hashedsum", "docstring": "Create a hash of the task inputs.\n\n This uses a serialization library borrowed from ipyparallel.\n If this fails here, then all ipp calls are also likely to fail due to failure\n at serialization.\n\n Args:\n - task (dict) : Task dictionary from dfk.tasks\n\n Returns:\n - hash (str) : A unique hash string", "id": "f2767:c0:m1"} {"signature": "def check_memo(self, task_id, task):", "body": "if not self.memoize or not task['']:task[''] = Nonereturn None, Nonehashsum = self.make_hash(task)present = Falseresult = Noneif hashsum in self.memo_lookup_table:present = Trueresult = self.memo_lookup_table[hashsum]logger.info(\"\", task_id)task[''] = hashsumreturn present, result", "docstring": "Create a hash of the task and its inputs and check the lookup table for this hash.\n\n If present, the results are returned. The result is a tuple indicating whether a memo\n exists and the result, since a Null result is possible and could be confusing.\n This seems like a reasonable option without relying on an cache_miss exception.\n\n Args:\n - task(task) : task from the dfk.tasks table\n\n Returns:\n Tuple of the following:\n - present (Bool): Is this present in the memo_lookup_table\n - Result (Py Obj): Result of the function if present in table\n\n This call will also set task['hashsum'] to the unique hashsum for the func+inputs.", "id": "f2767:c0:m2"} {"signature": "def hash_lookup(self, hashsum):", "body": "return self.memo_lookup_table[hashsum]", "docstring": "Lookup a hash in the memoization table.\n\n Will raise a KeyError if hash is not in the memoization lookup table.\n\n Args:\n - hashsum (str?): The same hashes used to uniquely identify apps+inputs\n\n Returns:\n - Lookup result, this is unlikely to be None, since the hashes are set by this\n library and could not miss entried in it's dict.\n\n Raises:\n - KeyError: if hash not in table", "id": "f2767:c0:m3"} {"signature": "def update_memo(self, task_id, task, r):", "body": "if not self.memoize or not task['']:returnif task[''] in self.memo_lookup_table:logger.info('' %(task[''], task_id))self.memo_lookup_table[task['']] = relse:self.memo_lookup_table[task['']] = r", "docstring": "Updates the memoization lookup table with the result from a task.\n\n Args:\n - task_id (int): Integer task id\n - task (dict) : A task dict from dfk.tasks\n - r (Result future): Result future\n\n A warning is issued when a hash collision occurs during the update.\n This is not likely.", "id": "f2767:c0:m4"} {"signature": "def __init__(self, dfk):", "body": "self.dfk = dfkself.config = dfk.configself.executors = {}self.max_idletime = * for e in self.dfk.config.executors:self.executors[e.label] = {'': None, '': e.label}self.strategies = {None: self._strategy_noop, '': self._strategy_simple}self.strategize = self.strategies[self.config.strategy]self.logger_flag = Falseself.prior_loghandlers = set(logging.getLogger().handlers)logger.debug(\"\".format(self.config.strategy))", "docstring": "Initialize strategy.", "id": "f2768:c0:m0"} {"signature": "def _strategy_noop(self, tasks, *args, kind=None, **kwargs):", "body": "", "docstring": "Do nothing.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)", "id": "f2768:c0:m2"} {"signature": "def unset_logging(self):", "body": "if self.logger_flag is True:returnroot_logger = logging.getLogger()for hndlr in root_logger.handlers:if hndlr not in self.prior_loghandlers:hndlr.setLevel(logging.ERROR)self.logger_flag = True", "docstring": "Mute newly added handlers to the root level, right after calling executor.status", "id": "f2768:c0:m3"} {"signature": "def _strategy_simple(self, tasks, *args, kind=None, **kwargs):", "body": "for label, executor in self.dfk.executors.items():if not executor.scaling_enabled:continueactive_tasks = executor.outstandingstatus = executor.status()self.unset_logging()min_blocks = executor.provider.min_blocksmax_blocks = executor.provider.max_blocksif isinstance(executor, IPyParallelExecutor):tasks_per_node = executor.workers_per_nodeelif isinstance(executor, HighThroughputExecutor):tasks_per_node = elif isinstance(executor, ExtremeScaleExecutor):tasks_per_node = executor.ranks_per_nodenodes_per_block = executor.provider.nodes_per_blockparallelism = executor.provider.parallelismrunning = sum([ for x in status if x == ''])submitting = sum([ for x in status if x == ''])pending = sum([ for x in status if x == ''])active_blocks = running + submitting + pendingactive_slots = active_blocks * tasks_per_node * nodes_per_blockif hasattr(executor, ''):logger.debug(''.format(label, active_tasks, running, submitting, pending, executor.connected_workers))else:logger.debug(''.format(label, active_tasks, running, submitting, pending))if active_tasks > and self.executors[executor.label]['']:self.executors[executor.label][''] = Noneif active_tasks == :if active_blocks <= min_blocks:passelse:if not self.executors[executor.label]['']:logger.debug(\"\".format(label, self.max_idletime))self.executors[executor.label][''] = time.time()idle_since = self.executors[executor.label]['']if (time.time() - idle_since) > self.max_idletime:logger.debug(\"\".format(self.max_idletime, label))executor.scale_in(active_blocks - min_blocks)else:passelif (float(active_slots) / active_tasks) < parallelism:if active_blocks >= max_blocks:passelse:excess = math.ceil((active_tasks * parallelism) - active_slots)excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block))logger.debug(\"\".format(excess_blocks))executor.scale_out(excess_blocks)elif active_slots == and active_tasks > :logger.debug(\"\")executor.scale_out()else:pass", "docstring": "Peek at the DFK and the executors specified.\n\n We assume here that tasks are not held in a runnable\n state, and that all tasks from an app would be sent to\n a single specific executor, i.e tasks cannot be specified\n to go to one of more executors.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)", "id": "f2768:c0:m4"} {"signature": "def __init__(self, config=Config()):", "body": "self.cleanup_called = Falseif isinstance(config, dict):raise ConfigurationError('''')self._config = configself.run_dir = make_rundir(config.run_dir)parsl.set_file_logger(\"\".format(self.run_dir), level=logging.DEBUG)logger.debug(\"\".format(config))logger.info(\"\".format(get_version()))self.checkpoint_lock = threading.Lock()self.usage_tracker = UsageTracker(self)self.usage_tracker.send_message()self.tasks_completed_count = self.tasks_failed_count = self.monitoring = config.monitoringif self.monitoring:if self.monitoring.logdir is None:self.monitoring.logdir = self.run_dirself.monitoring.start()self.time_began = datetime.datetime.now()self.time_completed = Noneself.run_id = str(uuid4())logger.info(\"\" + self.run_id)self.workflow_name = Noneif self.monitoring is not None and self.monitoring.workflow_name is not None:self.workflow_name = self.monitoring.workflow_nameelse:for frame in inspect.stack():fname = os.path.basename(str(frame.filename))parsl_file_names = ['']if fname not in parsl_file_names:self.workflow_name = fnamebreakself.workflow_version = str(self.time_began)if self.monitoring is not None and self.monitoring.workflow_version is not None:self.workflow_version = self.monitoring.workflow_versionworkflow_info = {'': \"\".format(sys.version_info.major,sys.version_info.minor,sys.version_info.micro),'': get_version(),\"\": self.time_began,'': None,'': None,'': self.run_id,'': self.workflow_name,'': self.workflow_version,'': self.run_dir,'': self.tasks_completed_count,'': self.tasks_failed_count,'': getuser(),'': gethostname(),}if self.monitoring:self.monitoring.send(MessageType.WORKFLOW_INFO,workflow_info)checkpoints = self.load_checkpoints(config.checkpoint_files)self.memoizer = Memoizer(self, memoize=config.app_cache, checkpoint=checkpoints)self.checkpointed_tasks = self._checkpoint_timer = Noneself.checkpoint_mode = config.checkpoint_modeself.data_manager = DataManager(self, max_threads=config.data_management_max_threads)self.executors = {}self.add_executors(config.executors + [self.data_manager])if self.checkpoint_mode == \"\":try:h, m, s = map(int, config.checkpoint_period.split(''))checkpoint_period = (h * ) + (m * ) + sself._checkpoint_timer = Timer(self.checkpoint, interval=checkpoint_period)except Exception:logger.error(\"\".format(config.checkpoint_period))self._checkpoint_timer = Timer(self.checkpoint, interval=( * ))if any([x.managed for x in config.executors]):self.flowcontrol = FlowControl(self)else:self.flowcontrol = FlowNoControl(self)self.task_count = self.tasks = {}self.submitter_lock = threading.Lock()atexit.register(self.atexit_cleanup)", "docstring": "Initialize the DataFlowKernel.\n\n Parameters\n ----------\n config : Config\n A specification of all configuration options. For more details see the\n :class:~`parsl.config.Config` documentation.", "id": "f2769:c0:m0"} {"signature": "def _create_task_log_info(self, task_id, fail_mode=None):", "body": "info_to_monitor = ['', '', '', '', '','', '', '', '', '', '']task_log_info = {\"\" + k: self.tasks[task_id][k] for k in info_to_monitor}task_log_info[''] = self.run_idtask_log_info[''] = datetime.datetime.now()task_log_info[''] = self.tasks[task_id][''].nametask_log_info[''] = self.tasks_failed_counttask_log_info[''] = self.tasks_completed_counttask_log_info[''] = str(self.tasks[task_id][''].get('', None))task_log_info[''] = str(self.tasks[task_id][''].get('', None))task_log_info[''] = self.tasks[task_id][''].get('', None)task_log_info[''] = self.tasks[task_id][''].get('', None)task_log_info[''] = Noneif self.tasks[task_id][''] is not None:task_log_info[''] = \"\".join([str(t._tid) for t in self.tasks[task_id]['']])task_log_info[''] = Noneif self.tasks[task_id][''] is not None:task_log_info[''] = (self.tasks[task_id][''] -self.tasks[task_id]['']).total_seconds()if fail_mode is not None:task_log_info[''] = fail_modereturn task_log_info", "docstring": "Create the dictionary that will be included in the log.", "id": "f2769:c0:m1"} {"signature": "def _count_deps(self, depends):", "body": "count = for dep in depends:if isinstance(dep, Future):if not dep.done():count += return count", "docstring": "Internal.\n\n Count the number of unresolved futures in the list depends.", "id": "f2769:c0:m2"} {"signature": "@propertydef config(self):", "body": "return self._config", "docstring": "Returns the fully initialized config that the DFK is actively using.\n\n DO *NOT* update.\n\n Returns:\n - config (dict)", "id": "f2769:c0:m3"} {"signature": "def handle_exec_update(self, task_id, future):", "body": "try:res = future.result()if isinstance(res, RemoteExceptionWrapper):res.reraise()except Exception:logger.exception(\"\".format(task_id))self.tasks[task_id][''].append(future._exception)self.tasks[task_id][''] += if not self._config.lazy_errors:logger.debug(\"\")self.tasks[task_id][''] = States.failedif self.monitoring:task_log_info = self._create_task_log_info(task_id, '')self.monitoring.send(MessageType.TASK_INFO, task_log_info)returnif self.tasks[task_id][''] <= self._config.retries:self.tasks[task_id][''] = States.pendinglogger.debug(\"\".format(task_id))else:logger.info(\"\".format(task_id,self._config.retries))self.tasks[task_id][''] = States.failedself.tasks_failed_count += self.tasks[task_id][''] = datetime.datetime.now()else:self.tasks[task_id][''] = States.doneself.tasks_completed_count += logger.info(\"\".format(task_id))self.tasks[task_id][''] = datetime.datetime.now()if self.monitoring:task_log_info = self._create_task_log_info(task_id, '')self.monitoring.send(MessageType.TASK_INFO, task_log_info)if self.tasks[task_id][''] == States.pending:self.launch_if_ready(task_id)return", "docstring": "This function is called only as a callback from an execution\n attempt reaching a final state (either successfully or failing).\n\n It will launch retries if necessary, and update the task\n structure.\n\n Args:\n task_id (string) : Task id which is a uuid string\n future (Future) : The future object corresponding to the task which\n makes this callback\n\n KWargs:\n memo_cbk(Bool) : Indicates that the call is coming from a memo update,\n that does not require additional memo updates.", "id": "f2769:c0:m4"} {"signature": "def handle_app_update(self, task_id, future, memo_cbk=False):", "body": "if not self.tasks[task_id][''].done():logger.error(\"\".format(task_id))if not self.tasks[task_id][''] == future:logger.error(\"\".format(task_id))if not memo_cbk:self.memoizer.update_memo(task_id, self.tasks[task_id], future)if self.checkpoint_mode == '':self.checkpoint(tasks=[task_id])if (self.tasks[task_id][''] andself.tasks[task_id][''].done() andself.tasks[task_id][''].exception() is None andself.tasks[task_id][''] != '' andself.tasks[task_id][''] != '' andself.tasks[task_id][''] != ''):for dfu in self.tasks[task_id][''].outputs:f = dfu.file_objif isinstance(f, File) and f.is_remote():self.data_manager.stage_out(f, self.tasks[task_id][''])return", "docstring": "This function is called as a callback when an AppFuture\n is in its final state.\n\n It will trigger post-app processing such as checkpointing\n and stageout.\n\n Args:\n task_id (string) : Task id\n future (Future) : The relevant app future (which should be\n consistent with the task structure 'app_fu' entry\n\n KWargs:\n memo_cbk(Bool) : Indicates that the call is coming from a memo update,\n that does not require additional memo updates.", "id": "f2769:c0:m5"} {"signature": "def launch_if_ready(self, task_id):", "body": "if self._count_deps(self.tasks[task_id]['']) == :new_args, kwargs, exceptions = self.sanitize_and_wrap(task_id,self.tasks[task_id][''],self.tasks[task_id][''])self.tasks[task_id][''] = new_argsself.tasks[task_id][''] = kwargsif not exceptions:exec_fu = Nonewith self.tasks[task_id]['']:if self.tasks[task_id][''] == States.pending:exec_fu = self.launch_task(task_id, self.tasks[task_id][''], *new_args, **kwargs)if exec_fu:try:exec_fu.add_done_callback(partial(self.handle_exec_update, task_id))except Exception as e:logger.error(\"\".format(e))self.tasks[task_id][''] = exec_futry:self.tasks[task_id][''].update_parent(exec_fu)self.tasks[task_id][''] = exec_fuexcept AttributeError as e:logger.error(\"\".format(task_id))raise eelse:logger.info(\"\".format(task_id))self.tasks[task_id][''] = States.dep_failif self.monitoring is not None:task_log_info = self._create_task_log_info(task_id, '')self.monitoring.send(MessageType.TASK_INFO, task_log_info)try:fu = Future()fu.retries_left = self.tasks[task_id][''] = fuself.tasks[task_id][''].update_parent(fu)fu.set_exception(DependencyError(exceptions,task_id,None))except AttributeError as e:logger.error(\"\".format(task_id))raise e", "docstring": "launch_if_ready will launch the specified task, if it is ready\nto run (for example, without dependencies, and in pending state).\n\nThis should be called by any piece of the DataFlowKernel that\nthinks a task may have become ready to run.\n\nIt is not an error to call launch_if_ready on a task that is not\nready to run - launch_if_ready will not incorrectly launch that\ntask.\n\nlaunch_if_ready is thread safe, so may be called from any thread\nor callback.", "id": "f2769:c0:m6"} {"signature": "def launch_task(self, task_id, executable, *args, **kwargs):", "body": "self.tasks[task_id][''] = datetime.datetime.now()hit, memo_fu = self.memoizer.check_memo(task_id, self.tasks[task_id])if hit:logger.info(\"\".format(task_id))return memo_fuexecutor_label = self.tasks[task_id][\"\"]try:executor = self.executors[executor_label]except Exception:logger.exception(\"\".format(task_id, executor_label, self._config))if self.monitoring is not None and self.monitoring.resource_monitoring_enabled:executable = self.monitoring.monitor_wrapper(executable, task_id,self.monitoring.monitoring_hub_url,self.run_id,self.monitoring.resource_monitoring_interval)with self.submitter_lock:exec_fu = executor.submit(executable, *args, **kwargs)self.tasks[task_id][''] = States.launchedif self.monitoring is not None:task_log_info = self._create_task_log_info(task_id, '')self.monitoring.send(MessageType.TASK_INFO, task_log_info)exec_fu.retries_left = self._config.retries -self.tasks[task_id]['']logger.info(\"\".format(task_id, executor.label))return exec_fu", "docstring": "Handle the actual submission of the task to the executor layer.\n\n If the app task has the executors attributes not set (default=='all')\n the task is launched on a randomly selected executor from the\n list of executors. This behavior could later be updated to support\n binding to executors based on user specified criteria.\n\n If the app task specifies a particular set of executors, it will be\n targeted at those specific executors.\n\n Args:\n task_id (uuid string) : A uuid string that uniquely identifies the task\n executable (callable) : A callable object\n args (list of positional args)\n kwargs (arbitrary keyword arguments)\n\n\n Returns:\n Future that tracks the execution of the submitted executable", "id": "f2769:c0:m7"} {"signature": "def _add_input_deps(self, executor, args, kwargs):", "body": "if executor == '':return args, kwargsinputs = kwargs.get('', [])for idx, f in enumerate(inputs):if isinstance(f, File) and f.is_remote():inputs[idx] = self.data_manager.stage_in(f, executor)for kwarg, f in kwargs.items():if isinstance(f, File) and f.is_remote():kwargs[kwarg] = self.data_manager.stage_in(f, executor)newargs = list(args)for idx, f in enumerate(newargs):if isinstance(f, File) and f.is_remote():newargs[idx] = self.data_manager.stage_in(f, executor)return tuple(newargs), kwargs", "docstring": "Look for inputs of the app that are remote files. Submit stage_in\n apps for such files and replace the file objects in the inputs list with\n corresponding DataFuture objects.\n\n Args:\n - executor (str) : executor where the app is going to be launched\n - args (List) : Positional args to app function\n - kwargs (Dict) : Kwargs to app function", "id": "f2769:c0:m8"} {"signature": "def _gather_all_deps(self, args, kwargs):", "body": "depends = []count = for dep in args:if isinstance(dep, Future):if self.tasks[dep.tid][''] not in FINAL_STATES:count += depends.extend([dep])for key in kwargs:dep = kwargs[key]if isinstance(dep, Future):if self.tasks[dep.tid][''] not in FINAL_STATES:count += depends.extend([dep])for dep in kwargs.get('', []):if isinstance(dep, Future):if self.tasks[dep.tid][''] not in FINAL_STATES:count += depends.extend([dep])return count, depends", "docstring": "Count the number of unresolved futures on which a task depends.\n\n Args:\n - args (List[args]) : The list of args list to the fn\n - kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn\n\n Returns:\n - count, [list of dependencies]", "id": "f2769:c0:m9"} {"signature": "def sanitize_and_wrap(self, task_id, args, kwargs):", "body": "dep_failures = []new_args = []for dep in args:if isinstance(dep, Future):try:new_args.extend([dep.result()])except Exception as e:if self.tasks[dep.tid][''] in FINAL_FAILURE_STATES:dep_failures.extend([e])else:new_args.extend([dep])for key in kwargs:dep = kwargs[key]if isinstance(dep, Future):try:kwargs[key] = dep.result()except Exception as e:if self.tasks[dep.tid][''] in FINAL_FAILURE_STATES:dep_failures.extend([e])if '' in kwargs:new_inputs = []for dep in kwargs['']:if isinstance(dep, Future):try:new_inputs.extend([dep.result()])except Exception as e:if self.tasks[dep.tid][''] in FINAL_FAILURE_STATES:dep_failures.extend([e])else:new_inputs.extend([dep])kwargs[''] = new_inputsreturn new_args, kwargs, dep_failures", "docstring": "This function should be called **ONLY** when all the futures we track have been resolved.\n\n If the user hid futures a level below, we will not catch\n it, and will (most likely) result in a type error.\n\n Args:\n task_id (uuid str) : Task id\n func (Function) : App function\n args (List) : Positional args to app function\n kwargs (Dict) : Kwargs to app function\n\n Return:\n partial function evaluated with all dependencies in args, kwargs and kwargs['inputs'] evaluated.", "id": "f2769:c0:m10"} {"signature": "def submit(self, func, *args, executors='', fn_hash=None, cache=False, **kwargs):", "body": "if self.cleanup_called:raise ValueError(\"\")task_id = self.task_countself.task_count += if isinstance(executors, str) and executors.lower() == '':choices = list(e for e in self.executors if e != '')elif isinstance(executors, list):choices = executorsexecutor = random.choice(choices)args, kwargs = self._add_input_deps(executor, args, kwargs)task_def = {'': None,'': executor,'': func,'': func.__name__,'': args,'': kwargs,'': fn_hash,'': cache,'': None,'': None,'': None,'': ,'': [],'': None,'': States.unsched,'': task_id,'': None,'': None,'': None}if task_id in self.tasks:raise DuplicateTaskError(\"\".format(task_id))else:self.tasks[task_id] = task_defdep_cnt, depends = self._gather_all_deps(args, kwargs)self.tasks[task_id][''] = dependstask_stdout = kwargs.get('')task_stderr = kwargs.get('')logger.info(\"\".format(task_id,task_def[''],[fu.tid for fu in depends]))self.tasks[task_id][''] = threading.Lock()app_fu = AppFuture(tid=task_id,stdout=task_stdout,stderr=task_stderr)self.tasks[task_id][''] = app_fuapp_fu.add_done_callback(partial(self.handle_app_update, task_id))self.tasks[task_id][''] = States.pendinglogger.debug(\"\".format(task_id, task_def['']))for d in depends:def callback_adapter(dep_fut):self.launch_if_ready(task_id)try:d.add_done_callback(callback_adapter)except Exception as e:logger.error(\"\".format(e))self.launch_if_ready(task_id)return task_def['']", "docstring": "Add task to the dataflow system.\n\n If the app task has the executors attributes not set (default=='all')\n the task will be launched on a randomly selected executor from the\n list of executors. If the app task specifies a particular set of\n executors, it will be targeted at the specified executors.\n\n >>> IF all deps are met:\n >>> send to the runnable queue and launch the task\n >>> ELSE:\n >>> post the task in the pending queue\n\n Args:\n - func : A function object\n - *args : Args to the function\n\n KWargs :\n - executors (list or string) : List of executors this call could go to.\n Default='all'\n - fn_hash (Str) : Hash of the function and inputs\n Default=None\n - cache (Bool) : To enable memoization or not\n - kwargs (dict) : Rest of the kwargs to the fn passed as dict.\n\n Returns:\n (AppFuture) [DataFutures,]", "id": "f2769:c0:m11"} {"signature": "def wait_for_current_tasks(self):", "body": "logger.info(\"\")for task_id in self.tasks:fut = self.tasks[task_id]['']if not fut.done():logger.debug(\"\".format(task_id))fut.exception()logger.info(\"\")", "docstring": "Waits for all tasks in the task list to be completed, by waiting for their\n AppFuture to be completed. This method will not necessarily wait for any tasks\n added after cleanup has started (such as data stageout?)", "id": "f2769:c0:m15"} {"signature": "def cleanup(self):", "body": "logger.info(\"\")if self.cleanup_called:raise Exception(\"\")self.cleanup_called = Trueself.log_task_states()if self.checkpoint_mode is not None:self.checkpoint()if self._checkpoint_timer:logger.info(\"\")self._checkpoint_timer.close()self.usage_tracker.send_message()self.usage_tracker.close()logger.info(\"\")self.flowcontrol.close()for executor in self.executors.values():if executor.managed:if executor.scaling_enabled:job_ids = executor.provider.resources.keys()executor.scale_in(len(job_ids))executor.shutdown()self.time_completed = datetime.datetime.now()if self.monitoring:self.monitoring.send(MessageType.WORKFLOW_INFO,{'': self.tasks_failed_count,'': self.tasks_completed_count,\"\": self.time_began,'': self.time_completed,'': (self.time_completed - self.time_began).total_seconds(),'': self.run_id, '': self.run_dir})self.monitoring.close()\"\"\"\"\"\"logger.info(\"\")", "docstring": "DataFlowKernel cleanup.\n\n This involves killing resources explicitly and sending die messages to IPP workers.\n\n If the executors are managed (created by the DFK), then we call scale_in on each of\n the executors and call executor.shutdown. Otherwise, we do nothing, and executor\n cleanup is left to the user.", "id": "f2769:c0:m16"} {"signature": "def checkpoint(self, tasks=None):", "body": "with self.checkpoint_lock:checkpoint_queue = Noneif tasks:checkpoint_queue = taskselse:checkpoint_queue = self.taskscheckpoint_dir = ''.format(self.run_dir)checkpoint_dfk = checkpoint_dir + ''checkpoint_tasks = checkpoint_dir + ''if not os.path.exists(checkpoint_dir):try:os.makedirs(checkpoint_dir)except FileExistsError:passwith open(checkpoint_dfk, '') as f:state = {'': self.run_dir,'': self.task_count}pickle.dump(state, f)count = with open(checkpoint_tasks, '') as f:for task_id in checkpoint_queue:if not self.tasks[task_id][''] andself.tasks[task_id][''].done() andself.tasks[task_id][''].exception() is None:hashsum = self.tasks[task_id]['']if not hashsum:continuet = {'': hashsum,'': None,'': None}try:r = self.memoizer.hash_lookup(hashsum).result()except Exception as e:t[''] = eelse:t[''] = rpickle.dump(t, f)count += self.tasks[task_id][''] = Truelogger.debug(\"\".format(task_id))self.checkpointed_tasks += countif count == :if self.checkpointed_tasks == :logger.warn(\"\")else:logger.debug(\"\")else:logger.info(\"\".format(count))return checkpoint_dir", "docstring": "Checkpoint the dfk incrementally to a checkpoint file.\n\n When called, every task that has been completed yet not\n checkpointed is checkpointed to a file.\n\n Kwargs:\n - tasks (List of task ids) : List of task ids to checkpoint. Default=None\n if set to None, we iterate over all tasks held by the DFK.\n\n .. note::\n Checkpointing only works if memoization is enabled\n\n Returns:\n Checkpoint dir if checkpoints were written successfully.\n By default the checkpoints are written to the RUNDIR of the current\n run under RUNDIR/checkpoints/{tasks.pkl, dfk.pkl}", "id": "f2769:c0:m17"} {"signature": "def _load_checkpoints(self, checkpointDirs):", "body": "memo_lookup_table = {}for checkpoint_dir in checkpointDirs:logger.info(\"\".format(checkpoint_dir))checkpoint_file = os.path.join(checkpoint_dir, '')try:with open(checkpoint_file, '') as f:while True:try:data = pickle.load(f)memo_fu = Future()if data['']:memo_fu.set_exception(data[''])else:memo_fu.set_result(data[''])memo_lookup_table[data['']] = memo_fuexcept EOFError:breakexcept FileNotFoundError:reason = \"\".format(checkpoint_file)logger.error(reason)raise BadCheckpoint(reason)except Exception:reason = \"\".format(checkpoint_file)logger.error(reason)raise BadCheckpoint(reason)logger.info(\"\".format(checkpoint_file,len(memo_lookup_table.keys())))return memo_lookup_table", "docstring": "Load a checkpoint file into a lookup table.\n\n The data being loaded from the pickle file mostly contains input\n attributes of the task: func, args, kwargs, env...\n To simplify the check of whether the exact task has been completed\n in the checkpoint, we hash these input params and use it as the key\n for the memoized lookup table.\n\n Args:\n - checkpointDirs (list) : List of filepaths to checkpoints\n Eg. ['runinfo/001', 'runinfo/002']\n\n Returns:\n - memoized_lookup_table (dict)", "id": "f2769:c0:m18"} {"signature": "def load_checkpoints(self, checkpointDirs):", "body": "self.memo_lookup_table = Noneif not checkpointDirs:return {}if type(checkpointDirs) is not list:raise BadCheckpoint(\"\")return self._load_checkpoints(checkpointDirs)", "docstring": "Load checkpoints from the checkpoint files into a dictionary.\n\n The results are used to pre-populate the memoizer's lookup_table\n\n Kwargs:\n - checkpointDirs (list) : List of run folder to use as checkpoints\n Eg. ['runinfo/001', 'runinfo/002']\n\n Returns:\n - dict containing, hashed -> future mappings", "id": "f2769:c0:m19"} {"signature": "@classmethoddef clear(cls):", "body": "cls._dfk = None", "docstring": "Clear the active DataFlowKernel so that a new one can be loaded.", "id": "f2769:c1:m0"} {"signature": "@classmethod@typeguard.typecheckeddef load(cls, config: Optional[Config] = None):", "body": "if cls._dfk is not None:raise RuntimeError('')if config is None:cls._dfk = DataFlowKernel(Config())else:cls._dfk = DataFlowKernel(config)return cls._dfk", "docstring": "Load a DataFlowKernel.\n\n Args:\n - config (Config) : Configuration to load. This config will be passed to a\n new DataFlowKernel instantiation which will be set as the active DataFlowKernel.\n Returns:\n - DataFlowKernel : The loaded DataFlowKernel object.", "id": "f2769:c1:m1"} {"signature": "@classmethoddef wait_for_current_tasks(cls):", "body": "cls.dfk().wait_for_current_tasks()", "docstring": "Waits for all tasks in the task list to be completed, by waiting for their\n AppFuture to be completed. This method will not necessarily wait for any tasks\n added after cleanup has started such as data stageout.", "id": "f2769:c1:m2"} {"signature": "@classmethoddef dfk(cls):", "body": "if cls._dfk is None:raise RuntimeError('')return cls._dfk", "docstring": "Return the currently-loaded DataFlowKernel.", "id": "f2769:c1:m3"} {"signature": "def __init__(self, dfk, *args, threshold=, interval=):", "body": "pass", "docstring": "Initialize the flowcontrol object. This does nothing.\n\n Args:\n - dfk (DataFlowKernel) : DFK object to track parsl progress\n\n KWargs:\n - threshold (int) : Tasks after which the callback is triggered\n - interval (int) : seconds after which timer expires", "id": "f2770:c0:m0"} {"signature": "def notify(self, event_id):", "body": "pass", "docstring": "This notifiy fn does nothing.", "id": "f2770:c0:m1"} {"signature": "def close(self):", "body": "pass", "docstring": "This close fn does nothing.", "id": "f2770:c0:m2"} {"signature": "def __init__(self, dfk, *args, threshold=, interval=):", "body": "self.dfk = dfkself.threshold = thresholdself.interval = intervalself.cb_args = argsself.strategy = Strategy(dfk)self.callback = self.strategy.strategizeself._handle = Noneself._event_count = self._event_buffer = []self._wake_up_time = time.time() + self._kill_event = threading.Event()self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))self._thread.daemon = Trueself._thread.start()", "docstring": "Initialize the flowcontrol object.\n\n We start the timer thread here\n\n Args:\n - dfk (DataFlowKernel) : DFK object to track parsl progress\n\n KWargs:\n - threshold (int) : Tasks after which the callback is triggered\n - interval (int) : seconds after which timer expires", "id": "f2770:c1:m0"} {"signature": "def _wake_up_timer(self, kill_event):", "body": "while True:prev = self._wake_up_timetime_to_die = kill_event.wait(float(max(prev - time.time(), )))if time_to_die:returnif prev == self._wake_up_time:self.make_callback(kind='')else:print(\"\")", "docstring": "Internal. This is the function that the thread will execute.\n waits on an event so that the thread can make a quick exit when close() is called\n\n Args:\n - kill_event (threading.Event) : Event to wait on", "id": "f2770:c1:m1"} {"signature": "def notify(self, event_id):", "body": "self._event_buffer.extend([event_id])self._event_count += if self._event_count >= self.threshold:logger.debug(\"\")self.make_callback(kind=\"\")", "docstring": "Let the FlowControl system know that there is an event.", "id": "f2770:c1:m2"} {"signature": "def make_callback(self, kind=None):", "body": "self._wake_up_time = time.time() + self.intervalself.callback(tasks=self._event_buffer, kind=kind)self._event_buffer = []", "docstring": "Makes the callback and resets the timer.\n\n KWargs:\n - kind (str): Default=None, used to pass information on what\n triggered the callback", "id": "f2770:c1:m3"} {"signature": "def close(self):", "body": "self._kill_event.set()self._thread.join()", "docstring": "Merge the threads and terminate.", "id": "f2770:c1:m4"} {"signature": "def __init__(self, callback, *args, interval=):", "body": "self.interval = intervalself.cb_args = argsself.callback = callbackself._wake_up_time = time.time() + self._kill_event = threading.Event()self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,))self._thread.daemon = Trueself._thread.start()", "docstring": "Initialize the flowcontrol object\n We start the timer thread here\n\n Args:\n - dfk (DataFlowKernel) : DFK object to track parsl progress\n\n KWargs:\n - threshold (int) : Tasks after which the callback is triggered\n - interval (int) : seconds after which timer expires", "id": "f2770:c2:m0"} {"signature": "def _wake_up_timer(self, kill_event):", "body": "while True:prev = self._wake_up_timetime_to_die = kill_event.wait(float(max(prev - time.time(), )))if time_to_die:returnif prev == self._wake_up_time:self.make_callback(kind='')else:print(\"\")", "docstring": "Internal. This is the function that the thread will execute.\n waits on an event so that the thread can make a quick exit when close() is called\n\n Args:\n - kill_event (threading.Event) : Event to wait on", "id": "f2770:c2:m1"} {"signature": "def make_callback(self, kind=None):", "body": "self._wake_up_time = time.time() + self.intervalself.callback(*self.cb_args)", "docstring": "Makes the callback and resets the timer.", "id": "f2770:c2:m2"} {"signature": "def close(self):", "body": "self._kill_event.set()self._thread.join()", "docstring": "Merge the threads and terminate.", "id": "f2770:c2:m3"} {"signature": "def async_process(fn):", "body": "def run(*args, **kwargs):proc = mp.Process(target=fn, args=args, kwargs=kwargs)proc.start()return procreturn run", "docstring": "Decorator function to launch a function as a separate process", "id": "f2771:m0"} {"signature": "@async_processdef udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):", "body": "try:if message is None:raise ValueError(\"\")encoded_message = bytes(message, \"\")if encoded_message is None:raise ValueError(\"\")if domain_name:try:UDP_IP = socket.gethostbyname(domain_name)except Exception:passif UDP_IP is None:raise Exception(\"\")if UDP_PORT is None:raise Exception(\"\")sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(sock_timeout)sock.sendto(bytes(message, \"\"), (UDP_IP, UDP_PORT))sock.close()except socket.timeout:logger.debug(\"\")except OSError as e:logger.debug(\"\".format(e))except Exception as e:logger.debug(\"\".format(e))", "docstring": "Send UDP messages to usage tracker asynchronously\n\n This multiprocessing based messenger was written to overcome the limitations\n of signalling/terminating a thread that is blocked on a system call. This\n messenger is created as a separate process, and initialized with 2 queues,\n to_send to receive messages to be sent to the internet.\n\n Args:\n - domain_name (str) : Domain name string\n - UDP_IP (str) : IP address YYY.YYY.YYY.YYY\n - UDP_PORT (int) : UDP port to send out on\n - sock_timeout (int) : Socket timeout\n - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet", "id": "f2771:m1"} {"signature": "def __init__(self, dfk, ip='', port=,domain_name=''):", "body": "self.domain_name = domain_nameself.ip = ipself.sock_timeout = self.UDP_PORT = portself.UDP_IP = Noneself.procs = []self.dfk = dfkself.config = self.dfk.configself.uuid = str(uuid.uuid4())self.parsl_version = PARSL_VERSIONself.python_version = \"\".format(sys.version_info.major,sys.version_info.minor,sys.version_info.micro)self.test_mode, self.tracking_enabled = self.check_tracking_enabled()logger.debug(\"\".format(self.tracking_enabled))logger.debug(\"\".format(self.test_mode))self.initialized = False", "docstring": "Initialize usage tracking unless the user has opted-out.\n\n We will try to resolve the hostname specified in kwarg:domain_name\n and if that fails attempt to use the kwarg:ip. Determining the\n IP and sending message is threaded to avoid slowing down DFK\n initialization.\n\n Tracks usage stats by inspecting the internal state of the dfk.\n\n Args:\n - dfk (DFK object) : Data Flow Kernel object\n\n KWargs:\n - ip (string) : IP address\n - port (int) : Port number, Default:50077\n - domain_name (string) : Domain name, will override IP\n Default: tracking.parsl-project.org", "id": "f2771:c0:m0"} {"signature": "def check_tracking_enabled(self):", "body": "track = True test = False testvar = str(os.environ.get(\"\", '')).lower()if testvar == '':test = Trueif not self.config.usage_tracking:track = Falseenvvar = str(os.environ.get(\"\", True)).lower()if envvar == \"\":track = Falsereturn test, track", "docstring": "By default tracking is enabled.\n\n If Test mode is set via env variable PARSL_TESTING, a test flag is set\n\n Tracking is disabled if :\n 1. config[\"globals\"][\"usageTracking\"] is set to False (Bool)\n 2. Environment variable PARSL_TRACKING is set to false (case insensitive)", "id": "f2771:c0:m1"} {"signature": "def construct_start_message(self):", "body": "uname = getpass.getuser().encode('')hashed_username = hashlib.sha256(uname).hexdigest()[:]hname = socket.gethostname().encode('')hashed_hostname = hashlib.sha256(hname).hexdigest()[:]message = {'': self.uuid,'': hashed_username,'': hashed_hostname,'': self.test_mode,'': self.parsl_version,'': self.python_version,'': platform.system(),'': platform.release(),'': time.time()}return json.dumps(message)", "docstring": "Collect preliminary run info at the start of the DFK.\n\n Returns :\n - Message dict dumped as json string, ready for UDP", "id": "f2771:c0:m2"} {"signature": "def construct_end_message(self):", "body": "app_count = self.dfk.task_countsite_count = len([x for x in self.dfk.config.executors if x.managed])app_fails = len([t for t in self.dfk.tasks ifself.dfk.tasks[t][''] in FINAL_FAILURE_STATES])message = {'': self.uuid,'': time.time(),'': app_count,'': site_count,'': None,'': app_fails,'': self.test_mode,}return json.dumps(message)", "docstring": "Collect the final run information at the time of DFK cleanup.\n\n Returns:\n - Message dict dumped as json string, ready for UDP", "id": "f2771:c0:m3"} {"signature": "def send_UDP_message(self, message):", "body": "x = if self.tracking_enabled:try:proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)self.procs.append(proc)except Exception as e:logger.debug(\"\".format(e))else:x = -return x", "docstring": "Send UDP message.", "id": "f2771:c0:m4"} {"signature": "def send_message(self):", "body": "start = time.time()message = Noneif not self.initialized:message = self.construct_start_message()self.initialized = Trueelse:message = self.construct_end_message()self.send_UDP_message(message)end = time.time()return end - start", "docstring": "Send message over UDP.\n\n If tracking is disables, the bytes_sent will always be set to -1\n\n Returns:\n (bytes_sent, time_taken)", "id": "f2771:c0:m5"} {"signature": "def close(self):", "body": "for proc in self.procs:proc.terminate()", "docstring": "We terminate (SIGTERM) the processes added to the self.procs list", "id": "f2771:c0:m7"} {"signature": "def __init__(self, tid=None, stdout=None, stderr=None):", "body": "self._tid = tidsuper().__init__()self.parent = Noneself._update_lock = threading.Lock()self._outputs = []self._stdout = stdoutself._stderr = stderr", "docstring": "Initialize the AppFuture.\n\n Args:\n\n KWargs:\n - tid (Int) : Task id should be any unique identifier. Now Int.\n - stdout (str) : Stdout file of the app.\n Default: None\n - stderr (str) : Stderr file of the app.\n Default: None", "id": "f2773:c0:m0"} {"signature": "def parent_callback(self, executor_fu):", "body": "with self._update_lock:if not executor_fu.done():raise ValueError(\"\")if executor_fu != self.parent:if executor_fu.exception() is None and not isinstance(executor_fu.result(), RemoteExceptionWrapper):raise ValueError(\"\")try:res = executor_fu.result()if isinstance(res, RemoteExceptionWrapper):res.reraise()super().set_result(executor_fu.result())except Exception as e:if executor_fu.retries_left > :passelse:super().set_exception(e)", "docstring": "Callback from a parent future to update the AppFuture.\n\n Used internally by AppFuture, and should not be called by code using AppFuture.\n\n Args:\n - executor_fu (Future): Future returned by the executor along with callback.\n This may not be the current parent future, as the parent future may have\n already been updated to point to a retrying execution, and in that case,\n this is logged.\n\n In the case that a new parent has been attached, we must immediately discard\n this result no matter what it contains (although it might be interesting\n to log if it was successful...)\n\n Returns:\n - None\n\n Updates the super() with the result() or exception()", "id": "f2773:c0:m1"} {"signature": "def update_parent(self, fut):", "body": "self.parent = futtry:fut.add_done_callback(self.parent_callback)except Exception as e:logger.error(\"\".format(e))", "docstring": "Add a callback to the parent to update the state.\n\n This handles the case where the user has called result on the AppFuture\n before the parent exists.", "id": "f2773:c0:m5"} {"signature": "def make_rundir(path):", "body": "try:if not os.path.exists(path):os.makedirs(path)prev_rundirs = glob(os.path.join(path, \"\"))current_rundir = os.path.join(path, '')if prev_rundirs:x = sorted([int(os.path.basename(x)) for x in prev_rundirs])[-]current_rundir = os.path.join(path, ''.format(x + ))os.makedirs(current_rundir)logger.debug(\"\".format(current_rundir))return os.path.abspath(current_rundir)except Exception as e:logger.error(\"\")logger.error(\"\".format(e))raise", "docstring": "When a path has not been specified, make the run directory.\n\n Creates a rundir with the following hierarchy:\n ./runinfo <- Home of all run directories\n |----000\n |----001 <- Directories for each run\n | ....\n |----NNN\n\n Kwargs:\n - path (str): String path to a specific run dir\n Default : None.", "id": "f2774:m0"} {"signature": "def remote_side_bash_executor(func, *args, **kwargs):", "body": "import osimport timeimport subprocessimport loggingimport parsl.app.errors as pelogging.basicConfig(filename=''.format(time.time()), level=logging.DEBUG)func_name = func.__name__partial_cmdline = Nonetry:partial_cmdline = func(*args, **kwargs)executable = partial_cmdline.format(*args, **kwargs)except AttributeError as e:if partial_cmdline is not None:raise pe.AppBadFormatting(\"\".format(func_name, e))else:raise pe.BashAppNoReturn(\"\".format(func_name, e), None)except IndexError as e:raise pe.AppBadFormatting(\"\".format(func_name, e))except Exception as e:logging.error(\"\".format(func_name, e))raise elogging.debug(\"\", executable)def open_std_fd(fdname):stdfspec = kwargs.get(fdname) if stdfspec is None:return Noneelif isinstance(stdfspec, str):fname = stdfspecmode = ''elif isinstance(stdfspec, tuple):if len(stdfspec) != :raise pe.BadStdStreamFile(\"\" % (fdname, len(stdfspec)), TypeError(''))fname, mode = stdfspecelse:raise pe.BadStdStreamFile(\"\" % (fdname, str(type(stdfspec))), TypeError(''))try:fd = open(fname, mode)except Exception as e:raise pe.BadStdStreamFile(fname, e)return fdstd_out = open_std_fd('')std_err = open_std_fd('')timeout = kwargs.get('')returncode = Nonetry:proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='')proc.wait(timeout=timeout)returncode = proc.returncodeexcept subprocess.TimeoutExpired:raise pe.AppTimeout(\"\".format(func_name, timeout))except Exception as e:raise pe.AppException(\"\".format(func_name, proc.returncode), e)if returncode != :raise pe.AppFailure(\"\".format(func_name, proc.returncode), proc.returncode)missing = []for outputfile in kwargs.get('', []):fpath = outputfileif type(outputfile) != str:fpath = outputfile.filepathif not os.path.exists(fpath):missing.extend([outputfile])if missing:raise pe.MissingOutputs(\"\".format(func_name), missing)return returncode", "docstring": "Execute the bash app type function and return the command line string.\n\n This string is reformatted with the *args, and **kwargs\n from call time.", "id": "f2777:m0"} {"signature": "def __call__(self, *args, **kwargs):", "body": "self.kwargs.update(kwargs)if self.data_flow_kernel is None:dfk = DataFlowKernelLoader.dfk()else:dfk = self.data_flow_kernelapp_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args,executors=self.executors,fn_hash=self.func_hash,cache=self.cache,**self.kwargs)out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)for o in kwargs.get('', [])]app_fut._outputs = out_futsreturn app_fut", "docstring": "Handle the call to a Bash app.\n\n Args:\n - Arbitrary\n\n Kwargs:\n - Arbitrary\n\n Returns:\n If outputs=[...] was a kwarg then:\n App_fut, [Data_Futures...]\n else:\n App_fut", "id": "f2777:c0:m1"} {"signature": "def __call__(self, *args, **kwargs):", "body": "if self.data_flow_kernel is None:dfk = DataFlowKernelLoader.dfk()else:dfk = self.data_flow_kernelapp_fut = dfk.submit(self.func, *args,executors=self.executors,fn_hash=self.func_hash,cache=self.cache,**kwargs)out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)for o in kwargs.get('', [])]app_fut._outputs = out_futsreturn app_fut", "docstring": "This is where the call to a python app is handled.\n\n Args:\n - Arbitrary\n Kwargs:\n - Arbitrary\n\n Returns:\n If outputs=[...] was a kwarg then:\n App_fut, [Data_Futures...]\n else:\n App_fut", "id": "f2778:c0:m1"} {"signature": "def parent_callback(self, parent_fu):", "body": "if parent_fu.done() is True:e = parent_fu._exceptionif e:super().set_exception(e)else:super().set_result(self.file_obj)return", "docstring": "Callback from executor future to update the parent.\n\n Args:\n - parent_fu (Future): Future returned by the executor along with callback\n\n Returns:\n - None\n\n Updates the super() with the result() or exception()", "id": "f2779:c0:m0"} {"signature": "def __init__(self, fut, file_obj, tid=None):", "body": "super().__init__()self._tid = tidif isinstance(file_obj, str):self.file_obj = File(file_obj)elif isinstance(file_obj, File):self.file_obj = file_objelse:raise ValueError(\"\")self.parent = futself._exception = Noneif fut is None:logger.debug(\"\")self.set_result(self.file_obj)else:if isinstance(fut, Future):self.parent.add_done_callback(self.parent_callback)else:raise NotFutureError(\"\")logger.debug(\"\", self.parent)logger.debug(\"\", self.filepath)", "docstring": "Construct the DataFuture object.\n\n If the file_obj is a string convert to a File.\n\n Args:\n - fut (AppFuture) : AppFuture that this DataFuture will track\n - file_obj (string/File obj) : Something representing file(s)\n\n Kwargs:\n - tid (task_id) : Task id that this DataFuture tracks", "id": "f2779:c0:m1"} {"signature": "@propertydef tid(self):", "body": "return self._tid", "docstring": "Returns the task_id of the task that will resolve this DataFuture.", "id": "f2779:c0:m2"} {"signature": "@propertydef filepath(self):", "body": "return self.file_obj.filepath", "docstring": "Filepath of the File object this datafuture represents.", "id": "f2779:c0:m3"} {"signature": "@propertydef filename(self):", "body": "return self.filepath", "docstring": "Filepath of the File object this datafuture represents.", "id": "f2779:c0:m4"} {"signature": "def App(apptype, data_flow_kernel=None, walltime=, cache=False, executors=''):", "body": "from parsl.app.python import PythonAppfrom parsl.app.bash import BashApplogger.warning(\"\")if apptype == '':app_class = PythonAppelif apptype == '':app_class = BashAppelse:raise InvalidAppTypeError(\"\".format(apptype))def wrapper(f):return app_class(f,data_flow_kernel=data_flow_kernel,walltime=walltime,cache=cache,executors=executors)return wrapper", "docstring": "The App decorator function.\n\n Args:\n - apptype (string) : Apptype can be bash|python\n\n Kwargs:\n - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for\n managing this app. This can be omitted only\n after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.\n - walltime (int) : Walltime for app in seconds,\n default=60\n - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.\n - cache (Bool) : Enable caching of the app call\n default=False\n\n Returns:\n A PythonApp or BashApp object, which when called runs the apps through the executor.", "id": "f2780:m0"} {"signature": "def python_app(function=None, data_flow_kernel=None, walltime=, cache=False, executors=''):", "body": "from parsl.app.python import PythonAppdef decorator(func):def wrapper(f):return PythonApp(f,data_flow_kernel=data_flow_kernel,walltime=walltime,cache=cache,executors=executors)return wrapper(func)if function is not None:return decorator(function)return decorator", "docstring": "Decorator function for making python apps.\n\n Parameters\n ----------\n function : function\n Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,\n for example, `@python_app` if using all defaults or `@python_app(walltime=120)`. If the\n decorator is used alone, function will be the actual function being decorated, whereas if it\n is called with arguments, function will be None. Default is None.\n data_flow_kernel : DataFlowKernel\n The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can\n be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.\n walltime : int\n Walltime for app in seconds. Default is 60.\n executors : string or list\n Labels of the executors that this app can execute over. Default is 'all'.\n cache : bool\n Enable caching of the app call. Default is False.", "id": "f2780:m1"} {"signature": "def bash_app(function=None, data_flow_kernel=None, walltime=, cache=False, executors=''):", "body": "from parsl.app.bash import BashAppdef decorator(func):def wrapper(f):return BashApp(f,data_flow_kernel=data_flow_kernel,walltime=walltime,cache=cache,executors=executors)return wrapper(func)if function is not None:return decorator(function)return decorator", "docstring": "Decorator function for making bash apps.\n\n Parameters\n ----------\n function : function\n Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,\n for example, `@bash_app` if using all defaults or `@bash_app(walltime=120)`. If the\n decorator is used alone, function will be the actual function being decorated, whereas if it\n is called with arguments, function will be None. Default is None.\n data_flow_kernel : DataFlowKernel\n The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can\n be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.\n walltime : int\n Walltime for app in seconds. Default is 60.\n executors : string or list\n Labels of the executors that this app can execute over. Default is 'all'.\n cache : bool\n Enable caching of the app call. Default is False.", "id": "f2780:m2"} {"signature": "def __init__(self, func, data_flow_kernel=None, walltime=, executors='', cache=False):", "body": "self.__name__ = func.__name__self.func = funcself.data_flow_kernel = data_flow_kernelself.status = ''self.executors = executorsself.cache = cacheif not (isinstance(executors, list) or isinstance(executors, str)):logger.error(\"\".format(func.__name__))if cache is True:try:self.fn_source = getsource(func)except OSError:logger.debug(\"\")self.fn_source = func.__name__self.func_hash = md5(self.fn_source.encode('')).hexdigest()else:self.func_hash = func.__name__params = signature(func).parametersself.kwargs = {}if '' in params:self.kwargs[''] = params[''].defaultif '' in params:self.kwargs[''] = params[''].defaultself.outputs = params[''].default if '' in params else []self.inputs = params[''].default if '' in params else []", "docstring": "Construct the App object.\n\n Args:\n - func (function): Takes the function to be made into an App\n\n Kwargs:\n - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for\n managing this app. This can be omitted only\n after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.\n - walltime (int) : Walltime in seconds for the app execution.\n - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.\n - cache (Bool) : Enable caching of this app ?\n\n Returns:\n - App object.", "id": "f2780:c0:m0"} {"signature": "@typeguard.typecheckeddef set_stream_logger(name: str = '', level: int = logging.DEBUG, format_string: Optional[str] = None):", "body": "if format_string is None:format_string = \"\"logger = logging.getLogger(name)logger.setLevel(logging.DEBUG)handler = logging.StreamHandler()handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)futures_logger = logging.getLogger(\"\")futures_logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\n Args:\n - name (string) : Set the logger name.\n - level (logging.LEVEL) : Set to logging.DEBUG by default.\n - format_string (string) : Set to None by default.\n\n Returns:\n - None", "id": "f2781:m0"} {"signature": "@typeguard.typecheckeddef set_file_logger(filename: str, name: str = '', level: int = logging.DEBUG, format_string: Optional[str] = None):", "body": "if format_string is None:format_string = \"\"logger = logging.getLogger(name)logger.setLevel(logging.DEBUG)handler = logging.FileHandler(filename)handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)futures_logger = logging.getLogger(\"\")futures_logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\n Args:\n - filename (string): Name of the file to write logs to\n - name (string): Logger name\n - level (logging.LEVEL): Set the logging level.\n - format_string (string): Set the format string\n\n Returns:\n - None", "id": "f2781:m1"} {"signature": "def initialize_boto_client(self):", "body": "self.session = self.create_session()self.client = self.session.client('')self.ec2 = self.session.resource('')self.instances = []self.instance_states = {}self.vpc_id = self.sg_id = self.sn_ids = []", "docstring": "Initialize the boto client.", "id": "f2782:c0:m1"} {"signature": "def read_state_file(self, state_file):", "body": "try:fh = open(state_file, '')state = json.load(fh)self.vpc_id = state['']self.sg_id = state['']self.sn_ids = state['']self.instances = state['']except Exception as e:logger.debug(\"\".format(e))raise elogger.debug(\"\")", "docstring": "Read the state file, if it exists.\n\n If this script has been run previously, resource IDs will have been written to a\n state file. On starting a run, a state file will be looked for before creating new\n infrastructure. Information on VPCs, security groups, and subnets are saved, as\n well as running instances and their states.\n\n AWS has a maximum number of VPCs per region per account, so we do not want to\n clutter users' AWS accounts with security groups and VPCs that will be used only\n once.", "id": "f2782:c0:m2"} {"signature": "def write_state_file(self):", "body": "fh = open('', '')state = {}state[''] = self.vpc_idstate[''] = self.sg_idstate[''] = self.sn_idsstate[''] = self.instancesstate[\"\"] = self.instance_statesfh.write(json.dumps(state, indent=))", "docstring": "Save information that must persist to a file.\n\n We do not want to create a new VPC and new identical security groups, so we save\n information about them in a file between runs.", "id": "f2782:c0:m3"} {"signature": "def create_session(self):", "body": "session = Noneif self.key_file is not None:credfile = os.path.expandvars(os.path.expanduser(self.key_file))try:with open(credfile, '') as f:creds = json.load(f)except json.JSONDecodeError as e:logger.error(\"\".format(self.label, credfile))raise eexcept Exception as e:logger.debug(\"\".format(self.label, credfile))raise elogger.debug(\"\".format(self.label))session = boto3.session.Session(region_name=self.region, **creds)elif self.profile is not None:logger.debug(\"\".format(self.label))session = boto3.session.Session(profile_name=self.profile, region_name=self.region)else:logger.debug(\"\".format(self.label))session = boto3.session.Session(region_name=self.region)return session", "docstring": "Create a session.\n\n First we look in self.key_file for a path to a json file with the\n credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'.\n\n Next we look at self.profile for a profile name and try\n to use the Session call to automatically pick up the keys for the profile from\n the user default keys file ~/.aws/config.\n\n Finally, boto3 will look for the keys in environment variables:\n AWS_ACCESS_KEY_ID: The access key for your AWS account.\n AWS_SECRET_ACCESS_KEY: The secret key for your AWS account.\n AWS_SESSION_TOKEN: The session key for your AWS account.\n This is only needed when you are using temporary credentials.\n The AWS_SECURITY_TOKEN environment variable can also be used,\n but is only supported for backwards compatibility purposes.\n AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python.", "id": "f2782:c0:m4"} {"signature": "def create_vpc(self):", "body": "try:vpc = self.ec2.create_vpc(CidrBlock='',AmazonProvidedIpv6CidrBlock=False,)except Exception as e:logger.error(\"\".format(e))raise einternet_gateway = self.ec2.create_internet_gateway()internet_gateway.attach_to_vpc(VpcId=vpc.vpc_id) self.internet_gateway = internet_gateway.idroute_table = self.config_route_table(vpc, internet_gateway)self.route_table = route_table.idavailability_zones = self.client.describe_availability_zones()for num, zone in enumerate(availability_zones['']):if zone[''] == \"\":subnet = vpc.create_subnet(CidrBlock=''.format( * num), AvailabilityZone=zone[''])subnet.meta.client.modify_subnet_attribute(SubnetId=subnet.id, MapPublicIpOnLaunch={\"\": True})route_table.associate_with_subnet(SubnetId=subnet.id)self.sn_ids.append(subnet.id)else:logger.info(\"\".format(zone['']))self.security_group(vpc)self.vpc_id = vpc.idreturn vpc", "docstring": "Create and configure VPC\n\n We create a VPC with CIDR 10.0.0.0/16, which provides up to 64,000 instances.\n\n We attach a subnet for each availability zone within the region specified in the\n config. We give each subnet an ip range like 10.0.X.0/20, which is large enough\n for approx. 4000 instances.\n\n Security groups are configured in function security_group.", "id": "f2782:c0:m5"} {"signature": "def security_group(self, vpc):", "body": "sg = vpc.create_security_group(GroupName=\"\", Description=\"\")ip_ranges = [{'': ''}]in_permissions = [{'': '','': ,'': ,'': ip_ranges,}, {'': '','': ,'': ,'': ip_ranges,}, {'': '','': -,'': -,'': [{'': ''}],}, {'': '','': ,'': ,'': [{'': ''}],}]out_permissions = [{'': '','': ,'': ,'': [{'': ''}],},{'': '','': ,'': ,'': ip_ranges,},{'': '','': ,'': ,'': ip_ranges,},]sg.authorize_ingress(IpPermissions=in_permissions)sg.authorize_egress(IpPermissions=out_permissions)self.sg_id = sg.idreturn sg", "docstring": "Create and configure a new security group.\n\n Allows all ICMP in, all TCP and UDP in within VPC.\n\n This security group is very open. It allows all incoming ping requests on all\n ports. It also allows all outgoing traffic on all ports. This can be limited by\n changing the allowed port ranges.\n\n Parameters\n ----------\n vpc : VPC instance\n VPC in which to set up security group.", "id": "f2782:c0:m6"} {"signature": "def config_route_table(self, vpc, internet_gateway):", "body": "route_table = vpc.create_route_table()route_table.create_route(DestinationCidrBlock='', GatewayId=internet_gateway.internet_gateway_id)return route_table", "docstring": "Configure route table for Virtual Private Cloud (VPC).\n\n Parameters\n ----------\n vpc : dict\n Representation of the VPC (created by create_vpc()).\n internet_gateway : dict\n Representation of the internet gateway (created by create_vpc()).", "id": "f2782:c0:m7"} {"signature": "def spin_up_instance(self, command, job_name):", "body": "command = Template(template_string).substitute(jobname=job_name,user_script=command,linger=str(self.linger).lower(),worker_init=self.worker_init)instance_type = self.instance_typesubnet = self.sn_ids[]ami_id = self.image_idtotal_instances = len(self.instances)if float(self.spot_max_bid) > :spot_options = {'': '','': {'': str(self.spot_max_bid),'': '','': ''}}else:spot_options = {}if total_instances > self.max_nodes:logger.warn(\"\".format(self.max_nodes))return [None]try:tag_spec = [{\"\": \"\", \"\": [{'': '', '': job_name}]}]instance = self.ec2.create_instances(MinCount=,MaxCount=,InstanceType=instance_type,ImageId=ami_id,KeyName=self.key_name,SubnetId=subnet,SecurityGroupIds=[self.sg_id],TagSpecifications=tag_spec,InstanceMarketOptions=spot_options,InstanceInitiatedShutdownBehavior='',IamInstanceProfile={'': self.iam_instance_profile_arn},UserData=command)except ClientError as e:print(e)logger.error(e.response)return [None]except Exception as e:logger.error(\"\".format(e))return [None]self.instances.append(instance[].id)logger.info(\"\".format(instance[].id, instance_type))return instance", "docstring": "Start an instance in the VPC in the first available subnet.\n\n N instances will be started if nodes_per_block > 1.\n Not supported. We only do 1 node per block.\n\n Parameters\n ----------\n command : str\n Command string to execute on the node.\n job_name : str\n Name associated with the instances.", "id": "f2782:c0:m9"} {"signature": "def shut_down_instance(self, instances=None):", "body": "if instances and len(self.instances) > :print(instances)try:print([i.id for i in instances])except Exception as e:print(e)term = self.client.terminate_instances(InstanceIds=instances)logger.info(\"\".format(len(instances), str(instances)))elif len(self.instances) > :instance = self.instances.pop()term = self.client.terminate_instances(InstanceIds=[instance])logger.info(\"\".format(instance))else:logger.warn(\"\")return -self.get_instance_state()return term", "docstring": "Shut down a list of instances, if provided.\n\n If no instance is provided, the last instance started up will be shut down.", "id": "f2782:c0:m10"} {"signature": "def get_instance_state(self, instances=None):", "body": "if instances:desc = self.client.describe_instances(InstanceIds=instances)else:desc = self.client.describe_instances(InstanceIds=self.instances)for i in range(len(desc[''])):instance = desc[''][i][''][]self.instance_states[instance['']] = instance['']['']return self.instance_states", "docstring": "Get states of all instances on EC2 which were started by this file.", "id": "f2782:c0:m11"} {"signature": "def status(self, job_ids):", "body": "all_states = []status = self.client.describe_instances(InstanceIds=job_ids)for r in status['']:for i in r['']:instance_id = i['']instance_state = translate_table.get(i[''][''], '')self.resources[instance_id][''] = instance_stateall_states.extend([instance_state])return all_states", "docstring": "Get the status of a list of jobs identified by their ids.\n\n Parameters\n ----------\n job_ids : list of str\n Identifiers for the jobs.\n\n Returns\n -------\n list of int\n The status codes of the requsted jobs.", "id": "f2782:c0:m12"} {"signature": "def submit(self, command='', blocksize=, tasks_per_node=, job_name=\"\"):", "body": "job_name = \"\".format(time.time())wrapped_cmd = self.launcher(command,tasks_per_node,self.nodes_per_block)[instance, *rest] = self.spin_up_instance(command=wrapped_cmd, job_name=job_name)if not instance:logger.error(\"\")return Nonelogger.debug(\"\".format(instance.instance_id))state = translate_table.get(instance.state[''], \"\")self.resources[instance.instance_id] = {\"\": instance.instance_id,\"\": instance,\"\": state}return instance.instance_id", "docstring": "Submit the command onto a freshly instantiated AWS EC2 instance.\n\n Submit returns an ID that corresponds to the task that was just submitted.\n\n Parameters\n ----------\n command : str\n Command to be invoked on the remote side.\n blocksize : int\n Number of blocks requested.\n tasks_per_node : int (default=1)\n Number of command invocations to be launched per node\n job_name : str\n Prefix for the job name.\n\n Returns\n -------\n None or str\n If at capacity, None will be returned. Otherwise, the job identifier will be returned.", "id": "f2782:c0:m13"} {"signature": "def cancel(self, job_ids):", "body": "if self.linger is True:logger.debug(\"\")return [False for x in job_ids]try:self.client.terminate_instances(InstanceIds=list(job_ids))except Exception as e:logger.error(\"\".format(job_ids))raise eelse:logger.debug(\"\".format(job_ids))for job_id in job_ids:self.resources[job_id][\"\"] = \"\"for job_id in job_ids:self.instances.remove(job_id)return [True for x in job_ids]", "docstring": "Cancel the jobs specified by a list of job ids.\n\n Parameters\n ----------\n job_ids : list of str\n List of of job identifiers\n\n Returns\n -------\n list of bool\n Each entry in the list will contain False if the operation fails. Otherwise, the entry will be True.", "id": "f2782:c0:m14"} {"signature": "def show_summary(self):", "body": "self.get_instance_state()status_string = \"\".format(self.vpc_id, self.sn_ids, self.sg_id, self.instances)status_string += \"\"self.get_instance_state()for state in self.instance_states.keys():status_string += \"\".format(state, self.instance_states[state])status_string += \"\"logger.info(status_string)return status_string", "docstring": "Print human readable summary of current AWS state to log and to console.", "id": "f2782:c0:m15"} {"signature": "def teardown(self):", "body": "self.shut_down_instance(self.instances)self.instances = []try:self.client.delete_internet_gateway(InternetGatewayId=self.internet_gateway)self.internet_gateway = Noneself.client.delete_route_table(RouteTableId=self.route_table)self.route_table = Nonefor subnet in list(self.sn_ids):self.client.delete_subnet(SubnetId=subnet)self.sn_ids.remove(subnet)self.client.delete_security_group(GroupId=self.sg_id)self.sg_id = Noneself.client.delete_vpc(VpcId=self.vpc_id)self.vpc_id = Noneexcept Exception as e:logger.error(\"\".format(e))raise eself.show_summary()os.remove(self.config[''])", "docstring": "Teardown the EC2 infastructure.\n\n Terminate all EC2 instances, delete all subnets, delete security group, delete VPC,\n and reset all instance variables.", "id": "f2782:c0:m16"} {"signature": "@propertydef current_capacity(self):", "body": "return len(self.instances)", "docstring": "Returns the current blocksize.", "id": "f2782:c0:m19"} {"signature": "def _status(self):", "body": "job_id_list = ''.join(self.resources.keys())cmd = \"\".format(job_id_list)retcode, stdout, stderr = super().execute_wait(cmd)if retcode != :returnjobs_missing = list(self.resources.keys())for line in stdout.split(''):parts = line.split()if parts and parts[] != '':job_id = parts[]status = translate_table.get(parts[], '')self.resources[job_id][''] = statusjobs_missing.remove(job_id)for missing_job in jobs_missing:if self.resources[missing_job][''] in ['', '']:self.resources[missing_job][''] = ''", "docstring": "Internal: Do not call. Returns the status list for a list of job_ids\n\n Args:\n self\n\n Returns:\n [status...] : Status list of all jobs", "id": "f2785:c0:m1"} {"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"\"):", "body": "if self.provisioned_blocks >= self.max_blocks:logger.warn(\"\".format(self.label))return Nonejob_name = \"\".format(job_name, time.time())script_path = \"\".format(self.script_dir, job_name)script_path = os.path.abspath(script_path)logger.debug(\"\".format(self.nodes_per_block))job_config = {}job_config[\"\"] = self.channel.script_dirjob_config[\"\"] = self.nodes_per_blockjob_config[\"\"] = tasks_per_nodejob_config[\"\"] = wtime_to_minutes(self.walltime)job_config[\"\"] = self.scheduler_optionsjob_config[\"\"] = self.worker_initjob_config[\"\"] = self.partitionjob_config[\"\"] = commandjob_config[\"\"] = self.launcher(command,tasks_per_node,self.nodes_per_block)logger.debug(\"\")self._write_submit_script(template_string, script_path, job_name, job_config)if self.move_files:logger.debug(\"\")channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)else:logger.debug(\"\")channel_script_path = script_pathretcode, stdout, stderr = super().execute_wait(\"\".format(channel_script_path))job_id = Noneif retcode == :for line in stdout.split(''):if line.startswith(\"\"):job_id = line.split(\"\")[].strip()self.resources[job_id] = {'': job_id, '': '', '': blocksize}else:print(\"\")logger.error(\"\", retcode, stdout.strip(), stderr.strip())return job_id", "docstring": "Submit the command as a slurm job of blocksize parallel elements.\n\n Parameters\n ----------\n command : str\n Command to be made on the remote side.\n blocksize : int\n Not implemented.\n tasks_per_node : int\n Command invocations to be launched per node\n job_name : str\n Name for the job (must be unique).\n Returns\n -------\n None or str\n If at capacity, returns None; otherwise, a string identifier for the job", "id": "f2785:c0:m2"} {"signature": "def cancel(self, job_ids):", "body": "job_id_list = ''.join(job_ids)retcode, stdout, stderr = super().execute_wait(\"\".format(job_id_list))rets = Noneif retcode == :for jid in job_ids:self.resources[jid][''] = translate_table[''] rets = [True for i in job_ids]else:rets = [False for i in job_ids]return rets", "docstring": "Cancels the jobs specified by a list of job ids\n\n Args:\n job_ids : [ ...]\n\n Returns :\n [True/False...] : If the cancel operation fails the entire list will be False.", "id": "f2785:c0:m3"} {"signature": "def get_configs(self, command, tasks_per_node):", "body": "logger.debug(\"\".format(self.nodes_per_block, tasks_per_node))job_config = {}job_config[\"\"] = self.channel.script_dirjob_config[\"\"] = self.nodes_per_blockjob_config[\"\"] = wtime_to_minutes(self.walltime)job_config[\"\"] = self.scheduler_optionsjob_config[\"\"] = self.worker_initjob_config[\"\"] = commandjob_config[\"\"] = self.launcher(command,tasks_per_node,self.nodes_per_block)return job_config", "docstring": "Compose a dictionary with information for writing the submit script.", "id": "f2787:c0:m1"} {"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"\"):", "body": "if blocksize < self.nodes_per_block:blocksize = self.nodes_per_blockjob_name = \"\".format(job_name, time.time())script_path = \"\".format(self.script_dir, job_name)script_path = os.path.abspath(script_path)job_config = self.get_configs(command, tasks_per_node)logger.debug(\"\")self._write_submit_script(template_string, script_path, job_name, job_config)channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)cmd = \"\".format(channel_script_path)retcode, stdout, stderr = super().execute_wait(cmd, )if retcode == :for line in stdout.split(''):job_id = line.strip()if not job_id:continueself.resources[job_id] = {'': job_id, '': '', '': blocksize}return job_idelse:print(\"\")logger.error(\"\", retcode, stdout.strip(), stderr.strip())", "docstring": "The submit method takes the command string to be executed upon\n instantiation of a resource most often to start a pilot (such as IPP engine\n or even Swift-T engines).\n\n Args :\n - command (str) : The bash command string to be executed.\n - blocksize (int) : Blocksize to be requested\n - tasks_per_node (int) : command invocations to be launched per node\n\n KWargs:\n - job_name (str) : Human friendly name to be assigned to the job request\n\n Returns:\n - A job identifier, this could be an integer, string etc\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2787:c0:m2"} {"signature": "def _status(self):", "body": "cmd = \"\"retcode, stdout, stderr = super().execute_wait(cmd)if retcode != :returnjobs_missing = list(self.resources.keys())for line in stdout.split(''):parts = line.split()if parts and parts[].lower().lower() != ''and not parts[].startswith(''):job_id = parts[]status = translate_table.get(parts[].lower(), '')if job_id in self.resources:self.resources[job_id][''] = statusjobs_missing.remove(job_id)for missing_job in jobs_missing:if self.resources[missing_job][''] in ['', '']:self.resources[missing_job][''] = ''", "docstring": "Get the status of a list of jobs identified by the job identifiers\n returned from the submit request.\n\n Returns:\n - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',\n 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2787:c0:m3"} {"signature": "def cancel(self, job_ids):", "body": "job_id_list = ''.join(job_ids)cmd = \"\".format(job_id_list)retcode, stdout, stderr = super().execute_wait(cmd, )rets = Noneif retcode == :for jid in job_ids:self.resources[jid][''] = \"\"rets = [True for i in job_ids]else:rets = [False for i in job_ids]return rets", "docstring": "Cancels the resources identified by the job_ids provided by the user.\n\n Args:\n - job_ids (list): A list of job identifiers\n\n Returns:\n - A list of status from cancelling the job which can be True, False\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2787:c0:m4"} {"signature": "def submit(self, cmd_string, blocksize, tasks_per_node, job_name=\"\"):", "body": "if not self.resources:cur_timestamp = str(time.time() * ).split(\"\")[]job_name = \"\".format(job_name, cur_timestamp)if not self.deployment_name:deployment_name = ''.format(job_name)else:deployment_name = ''.format(self.deployment_name,cur_timestamp)formatted_cmd = template_string.format(command=cmd_string,worker_init=self.worker_init)self.deployment_obj = self._create_deployment_object(job_name,self.image,deployment_name,cmd_string=formatted_cmd,replicas=self.init_blocks,volumes=self.persistent_volumes)logger.debug(\"\".format(deployment_name))self._create_deployment(self.deployment_obj)self.resources[deployment_name] = {'': '','': self.init_blocks}return deployment_name", "docstring": "Submit a job\n Args:\n - cmd_string :(String) - Name of the container to initiate\n - blocksize :(float) - Number of replicas\n - tasks_per_node (int) : command invocations to be launched per node\n\n Kwargs:\n - job_name (String): Name for job, must be unique\n Returns:\n - None: At capacity, cannot provision more\n - job_id: (string) Identifier for the job", "id": "f2788:c0:m1"} {"signature": "def status(self, job_ids):", "body": "self._status()return ['' for jid in job_ids]", "docstring": "Get the status of a list of jobs identified by the job identifiers\n returned from the submit request.\n Args:\n - job_ids (list) : A list of job identifiers\n Returns:\n - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',\n 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.\n Raises:\n - ExecutionProviderExceptions or its subclasses", "id": "f2788:c0:m2"} {"signature": "def cancel(self, job_ids):", "body": "for job in job_ids:logger.debug(\"\".format(job))self._delete_deployment(job)self.resources[job][''] = ''rets = [True for i in job_ids]return rets", "docstring": "Cancels the jobs specified by a list of job ids\n Args:\n job_ids : [ ...]\n Returns :\n [True/False...] : If the cancel operation fails the entire list will be False.", "id": "f2788:c0:m3"} {"signature": "def _status(self):", "body": "jobs_ids = list(self.resources.keys())return jobs_ids", "docstring": "Internal: Do not call. Returns the status list for a list of job_ids\n Args:\n self\n Returns:\n [status...] : Status list of all jobs", "id": "f2788:c0:m4"} {"signature": "def _create_deployment_object(self, job_name, job_image,deployment_name, port=,replicas=,cmd_string=None,engine_json_file='',engine_dir='',volumes=[]):", "body": "security_context = Noneif self.user_id and self.group_id:security_context = client.V1SecurityContext(run_as_group=self.group_id,run_as_user=self.user_id,run_as_non_root=self.run_as_non_root)environment_vars = client.V1EnvVar(name=\"\", value=\"\")launch_args = [\"\", \"\".format(cmd_string)]volume_mounts = []for volume in volumes:volume_mounts.append(client.V1VolumeMount(mount_path=volume[],name=volume[]))container = Noneif security_context:container = client.V1Container(name=job_name,image=job_image,ports=[client.V1ContainerPort(container_port=port)],volume_mounts=volume_mounts,command=[''],args=launch_args,env=[environment_vars],security_context=security_context)else:container = client.V1Container(name=job_name,image=job_image,ports=[client.V1ContainerPort(container_port=port)],volume_mounts=volume_mounts,command=[''],args=launch_args,env=[environment_vars])secret = Noneif self.secret:secret = client.V1LocalObjectReference(name=self.secret)volume_defs = []for volume in volumes:volume_defs.append(client.V1Volume(name=volume[],persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=volume[])))template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta(labels={\"\": job_name}),spec=client.V1PodSpec(containers=[container],image_pull_secrets=[secret],volumes=volume_defs))spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,template=template)deployment = client.ExtensionsV1beta1Deployment(api_version=\"\",kind=\"\",metadata=client.V1ObjectMeta(name=deployment_name),spec=spec)return deployment", "docstring": "Create a kubernetes deployment for the job.\n Args:\n - job_name (string) : Name of the job and deployment\n - job_image (string) : Docker image to launch\n KWargs:\n - port (integer) : Container port\n - replicas : Number of replica containers to maintain\n Returns:\n - True: The deployment object to launch", "id": "f2788:c0:m5"} {"signature": "def _create_deployment(self, deployment):", "body": "api_response = self.kube_client.create_namespaced_deployment(body=deployment,namespace=self.namespace)logger.debug(\"\".format(str(api_response.status)))", "docstring": "Create the kubernetes deployment", "id": "f2788:c0:m6"} {"signature": "def _delete_deployment(self, deployment_name):", "body": "api_response = self.kube_client.delete_namespaced_deployment(name=deployment_name,namespace=self.namespace,body=client.V1DeleteOptions(propagation_policy='',grace_period_seconds=))logger.debug(\"\".format(str(api_response.status)))", "docstring": "Delete deployment", "id": "f2788:c0:m7"} {"signature": "def _status(self):", "body": "job_id_list = ''.join(self.resources.keys())cmd = \"\".format(job_id_list)retcode, stdout, stderr = super().execute_wait(cmd)\"\"\"\"\"\"for line in stdout.strip().split(''):parts = line.split()job_id = parts[]status = translate_table.get(parts[], '')self.resources[job_id][''] = status", "docstring": "Update the resource dictionary with job statuses.", "id": "f2790:c0:m1"} {"signature": "def status(self, job_ids):", "body": "self._status()return [self.resources[jid][''] for jid in job_ids]", "docstring": "Get the status of a list of jobs identified by their ids.\n\n Parameters\n ----------\n job_ids : list of int\n Identifiers of jobs for which the status will be returned.\n\n Returns\n -------\n List of int\n Status codes for the requested jobs.", "id": "f2790:c0:m2"} {"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"\"):", "body": "logger.debug(\"\".format(blocksize))if self.provisioned_blocks >= self.max_blocks:template = \"\"logger.warn(template.format(self.label, self.provisioned_blocks, self.max_blocks))return Noneblocksize = max(self.nodes_per_block, blocksize)job_name = \"\".format(job_name, time.time())script_path = \"\".format(self.script_dir, job_name)script_path = os.path.abspath(script_path)userscript_path = \"\".format(self.script_dir, job_name)userscript_path = os.path.abspath(userscript_path)self.environment[\"\"] = \"\".format(job_name)job_config = {}job_config[\"\"] = job_namejob_config[\"\"] = self.channel.script_dirjob_config[\"\"] = self.projectjob_config[\"\"] = self.nodes_per_blockjob_config[\"\"] = self.scheduler_optionsjob_config[\"\"] = self.worker_initjob_config[\"\"] = commandjob_config[\"\"] = tasks_per_nodejob_config[\"\"] = self.requirementsjob_config[\"\"] = ''.join([''.format(key, value) for key, value in self.environment.items()])wrapped_command = self.launcher(command,tasks_per_node,self.nodes_per_block)with open(userscript_path, '') as f:f.write(job_config[\"\"] + '' + wrapped_command)user_script_path = self.channel.push_file(userscript_path, self.channel.script_dir)the_input_files = [user_script_path] + self.transfer_input_filesjob_config[\"\"] = ''.join(the_input_files)job_config[\"\"] = os.path.basename(user_script_path)self._write_submit_script(template_string, script_path, job_name, job_config)channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)cmd = \"\".format(channel_script_path)retcode, stdout, stderr = super().execute_wait(cmd, )logger.debug(\"\", retcode, stdout.strip(), stderr.strip())job_id = []if retcode == :for line in stdout.split(''):if re.match('', line) is not None:cluster = line.split(\"\")[]processes = [str(x) for x in range(, int(line[]))]job_id += [cluster + process for process in processes]self._add_resource(job_id)return job_id[]", "docstring": "Submits the command onto an Local Resource Manager job of blocksize parallel elements.\n\n example file with the complex case of multiple submits per job:\n Universe =vanilla\n output = out.$(Cluster).$(Process)\n error = err.$(Cluster).$(Process)\n log = log.$(Cluster)\n leave_in_queue = true\n executable = test.sh\n queue 5\n executable = foo\n queue 1\n\n $ condor_submit test.sub\n Submitting job(s)......\n 5 job(s) submitted to cluster 118907.\n 1 job(s) submitted to cluster 118908.\n\n Parameters\n ----------\n command : str\n Command to execute\n blocksize : int\n Number of blocks to request.\n job_name : str\n Job name prefix.\n tasks_per_node : int\n command invocations to be launched per node\n Returns\n -------\n None or str\n None if at capacity and cannot provision more; otherwise the identifier for the job.", "id": "f2790:c0:m3"} {"signature": "def cancel(self, job_ids):", "body": "job_id_list = ''.join(job_ids)cmd = \"\".format(job_id_list)logger.debug(\"\".format(cmd))retcode, stdout, stderr = self.channel.execute_wait(cmd, )rets = Noneif retcode == :for jid in job_ids:self.resources[jid][''] = ''rets = [True for i in job_ids]else:rets = [False for i in job_ids]return rets", "docstring": "Cancels the jobs specified by a list of job IDs.\n\n Parameters\n ----------\n job_ids : list of str\n The job IDs to cancel.\n\n Returns\n -------\n list of bool\n Each entry in the list will be True if the job is cancelled succesfully, otherwise False.", "id": "f2790:c0:m4"} {"signature": "def status(self, job_ids):", "body": "logger.debug(\"\".format(job_ids))for job_id in self.resources:if self.resources[job_id]['']:poll_code = self.resources[job_id][''].poll()if self.resources[job_id][''] in ['', '']:continueif poll_code is None:self.resources[job_id][''] = ''elif poll_code == :self.resources[job_id][''] = ''elif poll_code != :self.resources[job_id][''] = ''else:logger.error(\"\")elif self.resources[job_id]['']:retcode, stdout, stderr = self.channel.execute_wait('',self.cmd_timeout)for line in stdout.split(''):if line.startswith(\"\"):status = line.split(\"\")[].strip()if status == \"\":self.resources[job_id][''] = ''else:self.resources[job_id][''] = ''return [self.resources[jid][''] for jid in job_ids]", "docstring": "Get the status of a list of jobs identified by their ids.\n\n Args:\n - job_ids (List of ids) : List of identifiers for the jobs\n\n Returns:\n - List of status codes.", "id": "f2792:c0:m1"} {"signature": "def _write_submit_script(self, script_string, script_filename):", "body": "try:with open(script_filename, '') as f:f.write(script_string)except KeyError as e:logger.error(\"\", e)raise (SchedulerMissingArgs(e.args, self.label))except IOError as e:logger.error(\"\", script_filename)raise (ScriptPathError(script_filename, e))return True", "docstring": "Load the template string with config values and write the generated submit script to\na submit script file.\n\nArgs:\n - template_string (string) : The template string to be used for the writing submit script\n - script_filename (string) : Name of the submit script\n\nReturns:\n - True: on success\n\nRaises:\n SchedulerMissingArgs : If template is missing args\n ScriptPathError : Unable to write submit script out", "id": "f2792:c0:m2"} {"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"\"):", "body": "job_name = \"\".format(job_name, time.time())script_path = \"\".format(self.script_dir, job_name)script_path = os.path.abspath(script_path)wrap_command = self.worker_init + '' + self.launcher(command, tasks_per_node, self.nodes_per_block)self._write_submit_script(wrap_command, script_path)job_id = Noneproc = Noneremote_pid = Noneif (self.move_files is None and not isinstance(self.channel, LocalChannel)) or (self.move_files):logger.debug(\"\")script_path = self.channel.push_file(script_path, self.channel.script_dir)if not isinstance(self.channel, LocalChannel):logger.debug(\"\")cmd = ''.format(script_path)retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout)for line in stdout.split(''):if line.startswith(\"\"):remote_pid = line.split(\"\")[].strip()job_id = remote_pidif job_id is None:logger.warning(\"\")else:try:job_id, proc = self.channel.execute_no_wait(''.format(script_path), self.cmd_timeout)except Exception as e:logger.debug(\"\".format(self.channel, e))raiseself.resources[job_id] = {'': job_id, '': '','': blocksize,'': remote_pid,'': proc}return job_id", "docstring": "Submits the command onto an Local Resource Manager job of blocksize parallel elements.\n Submit returns an ID that corresponds to the task that was just submitted.\n\n If tasks_per_node < 1:\n 1/tasks_per_node is provisioned\n\n If tasks_per_node == 1:\n A single node is provisioned\n\n If tasks_per_node > 1 :\n tasks_per_node * blocksize number of nodes are provisioned.\n\n Args:\n - command :(String) Commandline invocation to be made on the remote side.\n - blocksize :(float) - Not really used for local\n - tasks_per_node (int) : command invocations to be launched per node\n\n Kwargs:\n - job_name (String): Name for job, must be unique\n\n Returns:\n - None: At capacity, cannot provision more\n - job_id: (string) Identifier for the job", "id": "f2792:c0:m3"} {"signature": "def cancel(self, job_ids):", "body": "for job in job_ids:logger.debug(\"\".format(job))if self.resources[job]['']:proc = self.resources[job]['']os.killpg(os.getpgid(proc.pid), signal.SIGTERM)self.resources[job][''] = ''elif self.resources[job]['']:cmd = \"\".format(self.resources[job][''])retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout)if retcode != :logger.warning(\"\".format(self.resources[job][''],self.label))rets = [True for i in job_ids]return rets", "docstring": "Cancels the jobs specified by a list of job ids\n\n Args:\n job_ids : [ ...]\n\n Returns :\n [True/False...] : If the cancel operation fails the entire list will be False.", "id": "f2792:c0:m4"} {"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"\"):", "body": "wrapped_cmd = self.launcher(command,tasks_per_node,)instance, name = self.create_instance(command=wrapped_cmd)self.provisioned_blocks += self.resources[name] = {\"\": name, \"\": translate_table[instance['']]}return name", "docstring": "The submit method takes the command string to be executed upon\n instantiation of a resource most often to start a pilot.\n\n Args :\n - command (str) : The bash command string to be executed.\n - blocksize (int) : Blocksize to be requested\n - tasks_per_node (int) : command invocations to be launched per node\n\n KWargs:\n - job_name (str) : Human friendly name to be assigned to the job request\n\n Returns:\n - A job identifier, this could be an integer, string etc\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2793:c0:m1"} {"signature": "def status(self, job_ids):", "body": "statuses = []for job_id in job_ids:instance = self.client.instances().get(instance=job_id, project=self.project_id, zone=self.zone).execute()self.resources[job_id][''] = translate_table[instance['']]statuses.append(translate_table[instance['']])return statuses", "docstring": "Get the status of a list of jobs identified by the job identifiers\n returned from the submit request.\n\n Args:\n - job_ids (list) : A list of job identifiers\n\n Returns:\n - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',\n 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2793:c0:m2"} {"signature": "def cancel(self, job_ids):", "body": "statuses = []for job_id in job_ids:try:self.delete_instance(job_id)statuses.append(True)self.provisioned_blocks -= except Exception:statuses.append(False)return statuses", "docstring": "Cancels the resources identified by the job_ids provided by the user.\n\n Args:\n - job_ids (list): A list of job identifiers\n\n Returns:\n - A list of status from cancelling the job which can be True, False\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2793:c0:m3"} {"signature": "@propertydef scaling_enabled(self):", "body": "return True", "docstring": "Scaling is enabled\n\n Returns:\n - Status (Bool)", "id": "f2793:c0:m4"} {"signature": "@propertydef current_capacity(self):", "body": "return self.provisioned_blocks", "docstring": "Returns the number of currently provisioned blocks.", "id": "f2793:c0:m5"} {"signature": "def _status(self):", "body": "job_id_list = ''.join(self.resources.keys())jobs_missing = list(self.resources.keys())retcode, stdout, stderr = super().execute_wait(\"\".format(job_id_list))for line in stdout.split(''):parts = line.split()if not parts or parts[].upper().startswith('') or parts[].startswith(''):continuejob_id = parts[]status = translate_table.get(parts[], '')self.resources[job_id][''] = statusjobs_missing.remove(job_id)for missing_job in jobs_missing:if self.resources[missing_job][''] in ['', '']:self.resources[missing_job][''] = translate_table['']", "docstring": "Internal: Do not call. Returns the status list for a list of job_ids\n\n Args:\n self\n\n Returns:\n [status...] : Status list of all jobs", "id": "f2795:c0:m1"} {"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"\"):", "body": "if self.provisioned_blocks >= self.max_blocks:logger.warn(\"\", self.label)return Noneif blocksize < self.nodes_per_block:blocksize = self.nodes_per_blockjob_name = \"\".format(job_name, time.time())script_path = \"\".format(self.script_dir, job_name)script_path = os.path.abspath(script_path)logger.debug(\"\", blocksize, self.nodes_per_block,tasks_per_node)job_config = {}job_config[\"\"] = self.channel.script_dirjob_config[\"\"] = self.nodes_per_blockjob_config[\"\"] = self.nodes_per_block * tasks_per_nodejob_config[\"\"] = self.nodes_per_blockjob_config[\"\"] = tasks_per_nodejob_config[\"\"] = self.walltimejob_config[\"\"] = self.scheduler_optionsjob_config[\"\"] = self.worker_initjob_config[\"\"] = commandjob_config[\"\"] = self.launcher(command,tasks_per_node,self.nodes_per_block)logger.debug(\"\")self._write_submit_script(template_string, script_path, job_name, job_config)channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)submit_options = ''if self.queue is not None:submit_options = ''.format(submit_options, self.queue)if self.account is not None:submit_options = ''.format(submit_options, self.account)launch_cmd = \"\".format(submit_options, channel_script_path)retcode, stdout, stderr = super().execute_wait(launch_cmd)job_id = Noneif retcode == :for line in stdout.split(''):if line.strip():job_id = line.strip()self.resources[job_id] = {'': job_id, '': '', '': blocksize}else:message = \"\".format(launch_cmd, retcode)if (stdout is not None) and (stderr is not None):message += \"\".format(stderr.strip(), stdout.strip())logger.error(message)return job_id", "docstring": "Submits the command onto an Local Resource Manager job of blocksize parallel elements.\n Submit returns an ID that corresponds to the task that was just submitted.\n\n If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer\n\n If tasks_per_node == 1:\n A single node is provisioned\n\n If tasks_per_node > 1 :\n tasks_per_node * blocksize number of nodes are provisioned.\n\n Args:\n - command :(String) Commandline invocation to be made on the remote side.\n - blocksize :(float)\n - tasks_per_node (int) : command invocations to be launched per node\n\n Kwargs:\n - job_name (String): Name for job, must be unique\n\n Returns:\n - None: At capacity, cannot provision more\n - job_id: (string) Identifier for the job", "id": "f2795:c0:m2"} {"signature": "def cancel(self, job_ids):", "body": "job_id_list = ''.join(job_ids)retcode, stdout, stderr = super().execute_wait(\"\".format(job_id_list))rets = Noneif retcode == :for jid in job_ids:self.resources[jid][''] = translate_table[''] rets = [True for i in job_ids]else:rets = [False for i in job_ids]return rets", "docstring": "Cancels the jobs specified by a list of job ids\n\n Args:\n job_ids : [ ...]\n\n Returns :\n [True/False...] : If the cancel operation fails the entire list will be False.", "id": "f2795:c0:m3"} {"signature": "def scale_out(self, blocks=, block_size=):", "body": "self.config[''.format(self.pool)]['']count = if blocks == :block_id = len(self.blocks)self.blocks[block_id] = []for instance_id in range(, block_size):instances = self.server_manager.create(''.format(block_id, instance_id), self.client.images.get(''), self.client.flavors.list()[],min_count=,max_count=,userdata=setup_script.format(engine_config=self.engine_config),key_name='',security_groups=[''],nics=[{\"\": '',\"\": '',\"\": ''}])self.blocks[block_id].extend([instances])count += return count", "docstring": "Scale out the existing resources.", "id": "f2798:c0:m1"} {"signature": "def scale_in(self, blocks=, machines=, strategy=None):", "body": "count = instances = self.client.servers.list()for instance in instances[:machines]:print(\"\", instance)instance.delete()count += return count", "docstring": "Scale in resources", "id": "f2798:c0:m2"} {"signature": "@abstractmethoddef submit(self, command, blocksize, tasks_per_node, job_name=\"\"):", "body": "pass", "docstring": "The submit method takes the command string to be executed upon\n instantiation of a resource most often to start a pilot (such as IPP engine\n or even Swift-T engines).\n\n Args :\n - command (str) : The bash command string to be executed\n - blocksize (int) : Blocksize to be requested\n - tasks_per_node (int) : command invocations to be launched per node\n\n KWargs:\n - job_name (str) : Human friendly name to be assigned to the job request\n\n Returns:\n - A job identifier, this could be an integer, string etc\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2799:c0:m0"} {"signature": "@abstractmethoddef status(self, job_ids):", "body": "pass", "docstring": "Get the status of a list of jobs identified by the job identifiers\n returned from the submit request.\n\n Args:\n - job_ids (list) : A list of job identifiers\n\n Returns:\n - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',\n 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2799:c0:m1"} {"signature": "@abstractmethoddef cancel(self, job_ids):", "body": "pass", "docstring": "Cancels the resources identified by the job_ids provided by the user.\n\n Args:\n - job_ids (list): A list of job identifiers\n\n Returns:\n - A list of status from cancelling the job which can be True, False\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2799:c0:m2"} {"signature": "@abstractpropertydef scaling_enabled(self):", "body": "pass", "docstring": "The callers of ParslExecutors need to differentiate between Executors\n and Executors wrapped in a resource provider\n\n Returns:\n - Status (Bool)", "id": "f2799:c0:m3"} {"signature": "@abstractpropertydef label(self):", "body": "pass", "docstring": "Provides the label for this provider", "id": "f2799:c0:m4"} {"signature": "def _write_submit_script(self, template, script_filename, job_name, configs):", "body": "try:submit_script = Template(template).substitute(jobname=job_name, **configs)with open(script_filename, '') as f:f.write(submit_script)except KeyError as e:logger.error(\"\", e)raise (SchedulerMissingArgs(e.args, self.sitename))except IOError as e:logger.error(\"\", script_filename)raise (ScriptPathError(script_filename, e))except Exception as e:print(\"\", template)print(\"\", job_name)print(\"\", configs)logger.error(\"\", e)raise (e)return True", "docstring": "Generate submit script and write it to a file.\n\n Args:\n - template (string) : The template string to be used for the writing submit script\n - script_filename (string) : Name of the submit script\n - job_name (string) : job name\n - configs (dict) : configs that get pushed into the template\n\n Returns:\n - True: on success\n\n Raises:\n SchedulerMissingArgs : If template is missing args\n ScriptPathError : Unable to write submit script out", "id": "f2800:c0:m2"} {"signature": "def submit(self, cmd_string, blocksize, tasks_per_node, job_name=\"\"):", "body": "raise NotImplementedError", "docstring": "The submit method takes the command string to be executed upon\n instantiation of a resource most often to start a pilot (such as IPP engine\n or even Swift-T engines).\n\n Args :\n - cmd_string (str) : The bash command string to be executed\n - blocksize (int) : Blocksize to be requested\n - tasks_per_node (int) : command invocations to be launched per node\n\n KWargs:\n - job_name (str) : Human friendly name to be assigned to the job request\n\n Returns:\n - A job identifier, this could be an integer, string etc\n\n Raises:\n - ExecutionProviderExceptions or its subclasses", "id": "f2800:c0:m3"} {"signature": "def status(self, job_ids):", "body": "if job_ids:self._status()return [self.resources[jid][''] for jid in job_ids]", "docstring": "Get the status of a list of jobs identified by the job identifiers\n returned from the submit request.\n\n Args:\n - job_ids (list) : A list of job identifiers\n\n Returns:\n - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',\n 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2800:c0:m5"} {"signature": "def cancel(self, job_ids):", "body": "raise NotImplementedError", "docstring": "Cancels the resources identified by the job_ids provided by the user.\n\n Args:\n - job_ids (list): A list of job identifiers\n\n Returns:\n - A list of status from cancelling the job which can be True, False\n\n Raises:\n - ExecutionProviderException or its subclasses", "id": "f2800:c0:m6"} {"signature": "@propertydef scaling_enabled(self):", "body": "return self._scaling_enabled", "docstring": "The callers of ParslExecutors need to differentiate between Executors\n and Executors wrapped in a resource provider\n\n Returns:\n - Status (Bool)", "id": "f2800:c0:m7"} {"signature": "@propertydef current_capacity(self):", "body": "return self.provisioned_blocks", "docstring": "Returns the currently provisioned blocks.\n This may need to return more information in the futures :\n { minsize, maxsize, current_requested }", "id": "f2800:c0:m8"} {"signature": "def _status(self):", "body": "jobs_missing = list(self.resources.keys())retcode, stdout, stderr = super().execute_wait(\"\")if retcode != :returnfor line in stdout.split(''):if line.startswith(''):continueparts = line.upper().split()if parts and parts[] != '':job_id = parts[]if job_id not in self.resources:continuestatus = translate_table.get(parts[], '')self.resources[job_id][''] = statusjobs_missing.remove(job_id)for missing_job in jobs_missing:if self.resources[missing_job][''] in ['', '', '']:self.resources[missing_job][''] = translate_table['']", "docstring": "Internal: Do not call. Returns the status list for a list of job_ids\n\n Args:\n self\n\n Returns:\n [status...] : Status list of all jobs", "id": "f2802:c0:m1"} {"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"\"):", "body": "if self.provisioned_blocks >= self.max_blocks:logger.warn(\"\", self.label)return Noneif blocksize < self.nodes_per_block:blocksize = self.nodes_per_blockaccount_opt = ''.format(self.account) if self.account is not None else ''job_name = \"\".format(job_name, time.time())script_path = \"\".format(self.script_dir, job_name)script_path = os.path.abspath(script_path)job_config = {}job_config[\"\"] = self.scheduler_optionsjob_config[\"\"] = self.worker_initlogger.debug(\"\",blocksize, self.nodes_per_block, tasks_per_node)job_config[\"\"] = self.launcher(command, tasks_per_node, self.nodes_per_block)queue_opt = ''.format(self.queue) if self.queue is not None else ''logger.debug(\"\")self._write_submit_script(template_string, script_path, job_name, job_config)channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)command = ''.format(self.nodes_per_block, queue_opt, wtime_to_minutes(self.walltime), account_opt, channel_script_path)logger.debug(\"\".format(command))retcode, stdout, stderr = super().execute_wait(command)if retcode != :logger.error(\"\".format(command))logger.error(\"\".format(stdout, stderr))logger.debug(\"\", retcode, stdout.strip(), stderr.strip())job_id = Noneif retcode == :job_id = stdout.strip()self.resources[job_id] = {'': job_id, '': '', '': blocksize}else:logger.error(\"\".format(stderr))raise (ScaleOutFailed(self.__class__, \"\"))logger.debug(\"\".format(job_id))return job_id", "docstring": "Submits the command onto an Local Resource Manager job of blocksize parallel elements.\n Submit returns an ID that corresponds to the task that was just submitted.\n\n If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer\n\n If tasks_per_node == 1:\n A single node is provisioned\n\n If tasks_per_node > 1 :\n tasks_per_node * blocksize number of nodes are provisioned.\n\n Args:\n - command :(String) Commandline invocation to be made on the remote side.\n - blocksize :(float)\n - tasks_per_node (int) : command invocations to be launched per node\n\n Kwargs:\n - job_name (String): Name for job, must be unique\n\n Returns:\n - None: At capacity, cannot provision more\n - job_id: (string) Identifier for the job", "id": "f2802:c0:m2"} {"signature": "def cancel(self, job_ids):", "body": "job_id_list = ''.join(job_ids)retcode, stdout, stderr = super().execute_wait(\"\".format(job_id_list))rets = Noneif retcode == :for jid in job_ids:self.resources[jid][''] = translate_table[''] rets = [True for i in job_ids]else:rets = [False for i in job_ids]return rets", "docstring": "Cancels the jobs specified by a list of job ids\n\n Args:\n job_ids : [ ...]\n\n Returns :\n [True/False...] : If the cancel operation fails the entire list will be False.", "id": "f2802:c0:m3"} {"signature": "def submit(self, *args, **kwargs):", "body": "return self.executor.submit(*args, **kwargs)", "docstring": "Submits work to the thread pool.\n\n This method is simply pass through and behaves like a submit call as described\n here `Python docs: `_", "id": "f2803:c0:m3"} {"signature": "def scale_out(self, workers=):", "body": "raise NotImplementedError", "docstring": "Scales out the number of active workers by 1.\n\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception", "id": "f2803:c0:m4"} {"signature": "def scale_in(self, blocks):", "body": "raise NotImplementedError", "docstring": "Scale in the number of active blocks by specified amount.\n\n This method is not implemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception", "id": "f2803:c0:m5"} {"signature": "def shutdown(self, block=False):", "body": "x = self.executor.shutdown(wait=block)logger.debug(\"\")return x", "docstring": "Shutdown the ThreadPool.\n\n Kwargs:\n - block (Bool): To block for confirmations or not", "id": "f2803:c0:m6"} {"signature": "def start(self):", "body": "if self.mode == \"\":returnif self.ipython_dir != '':self.ipython_dir = os.path.abspath(os.path.expanduser(self.ipython_dir))if self.log:stdout = open(os.path.join(self.ipython_dir, \"\".format(self.profile)), '')stderr = open(os.path.join(self.ipython_dir, \"\".format(self.profile)), '')else:stdout = open(os.devnull, '')stderr = open(os.devnull, '')try:opts = ['','' if self.ipython_dir == '' else ''.format(self.ipython_dir),self.interfaces if self.interfaces is not None else '','' if self.profile == '' else ''.format(self.profile),'' if self.reuse else '',''.format(self.public_ip) if self.public_ip else '',''.format(self.port) if self.port is not None else '']if self.port_range is not None:opts += [''.format(self.hb_ping, self.hb_pong),''.format(self.control_client, self.control_engine),''.format(self.mux_client, self.mux_engine),''.format(self.task_client, self.task_engine)]logger.debug(\"\".format(''.join([str(x) for x in opts])))self.proc = subprocess.Popen(opts, stdout=stdout, stderr=stderr, preexec_fn=os.setsid)except FileNotFoundError:msg = \"\"logger.error(msg)raise ControllerError(msg)except Exception as e:msg = \"\".format(e)logger.error(msg)raise ControllerError(msg)", "docstring": "Start the controller.", "id": "f2805:c0:m1"} {"signature": "@propertydef engine_file(self):", "body": "return os.path.join(self.ipython_dir,''.format(self.profile),'')", "docstring": "Specify path to the ipcontroller-engine.json file.\n\n This file is stored in in the ipython_dir/profile folders.\n\n Returns :\n - str, File path to engine file", "id": "f2805:c0:m2"} {"signature": "@propertydef client_file(self):", "body": "return os.path.join(self.ipython_dir,''.format(self.profile),'')", "docstring": "Specify path to the ipcontroller-client.json file.\n\n This file is stored in in the ipython_dir/profile folders.\n\n Returns :\n - str, File path to client file", "id": "f2805:c0:m3"} {"signature": "def close(self):", "body": "if self.reuse:logger.debug(\"\")returnif self.mode == \"\":logger.debug(\"\")returntry:pgid = os.getpgid(self.proc.pid)os.killpg(pgid, signal.SIGTERM)time.sleep()os.killpg(pgid, signal.SIGKILL)try:self.proc.wait(timeout=)x = self.proc.returncodeif x == :logger.debug(\"\".format(x))else:logger.error(\"\".format(x))except subprocess.TimeoutExpired:logger.warn(\"\".format(self.proc.pid))except Exception as e:logger.warn(\"\".format(self.proc.pid, e))", "docstring": "Terminate the controller process and its child processes.\n\n Args:\n - None", "id": "f2805:c0:m4"} {"signature": "def __init__(self, ip_address, port_range):", "body": "self.context = zmq.Context()self.zmq_socket = self.context.socket(zmq.DEALER)self.zmq_socket.set_hwm()self.port = self.zmq_socket.bind_to_random_port(\"\".format(ip_address),min_port=port_range[],max_port=port_range[])self.poller = zmq.Poller()self.poller.register(self.zmq_socket, zmq.POLLOUT)", "docstring": "TODO: docstring", "id": "f2806:c0:m0"} {"signature": "def put(self, task_id, buffer):", "body": "task_id_bytes = task_id.to_bytes(, \"\")message = [b\"\", task_id_bytes] + bufferself.zmq_socket.send_multipart(message)logger.debug(\"\".format(task_id))", "docstring": "TODO: docstring", "id": "f2806:c0:m1"} {"signature": "def __init__(self, ip_address, port_range):", "body": "self.context = zmq.Context()self.zmq_socket = self.context.socket(zmq.DEALER)self.zmq_socket.set_hwm()self.port = self.zmq_socket.bind_to_random_port(\"\".format(ip_address),min_port=port_range[],max_port=port_range[])", "docstring": "TODO: docstring", "id": "f2806:c1:m0"} {"signature": "def execute_task(f, args, kwargs, user_ns):", "body": "fname = getattr(f, '', '')prefix = \"\"fname = prefix + \"\"argname = prefix + \"\"kwargname = prefix + \"\"resultname = prefix + \"\"user_ns.update({fname: f,argname: args,kwargname: kwargs,resultname: resultname})code = \"\".format(resultname, fname,argname, kwargname)try:exec(code, user_ns, user_ns)except Exception as e:logger.warning(\"\".format(e))raise eelse:return user_ns.get(resultname)", "docstring": "Deserialize the buffer and execute the task.\n\n# Returns the result or exception.", "id": "f2807:m0"} {"signature": "def start_file_logger(filename, rank, name='', level=logging.DEBUG, format_string=None):", "body": "try:os.makedirs(os.path.dirname(filename), , True)except Exception as e:print(\"\".format(e))if format_string is None:format_string = \"\".format(rank)global loggerlogger = logging.getLogger(name)logger.setLevel(logging.DEBUG)handler = logging.FileHandler(filename)handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\n Args:\n - filename (string): Name of the file to write logs to\n - name (string): Logger name\n - level (logging.LEVEL): Set the logging level.\n - format_string (string): Set the format string\n\n Returns:\n - None", "id": "f2807:m1"} {"signature": "def worker(worker_id, task_url, debug=True, logdir=\"\", uid=\"\"):", "body": "start_file_logger(''.format(logdir, uid, worker_id),,level=logging.DEBUG if debug is True else logging.INFO)logger.info(\"\".format(worker_id))task_ids_received = []message_q = zmq_pipes.WorkerMessages(task_url)while True:print(\"\")task_id, buf = message_q.get()task_ids_received.append(task_id)user_ns = locals()user_ns.update({'': __builtins__})f, args, kwargs = unpack_apply_message(buf, user_ns, copy=False)logger.debug(\"\".format(worker_id, task_id))result = execute_task(f, args, kwargs, user_ns)logger.debug(\"\".format(worker_id, task_id))reply = {\"\": result, \"\": worker_id}message_q.put(task_id, serialize_object(reply))logger.debug(\"\")", "docstring": "TODO: docstring\n\n TODO : Cleanup debug, logdir and uid to function correctly", "id": "f2807:m2"} {"signature": "def start_file_logger(filename, name='', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:format_string = \"\"global loggerlogger = logging.getLogger(name)logger.setLevel(level)handler = logging.FileHandler(filename)handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\n Parameters\n ---------\n\n filename: string\n Name of the file to write logs to. Required.\n name: string\n Logger name. Default=\"parsl.executors.interchange\"\n level: logging.LEVEL\n Set the logging level. Default=logging.DEBUG\n - format_string (string): Set the format string\n format_string: string\n Format string to use.\n\n Returns\n -------\n None.", "id": "f2808:m0"} {"signature": "def starter(comm_q, *args, **kwargs):", "body": "ic = Interchange(*args, **kwargs)comm_q.put(ic.worker_port)ic.start()logger.debug(\"\")", "docstring": "Start the interchange process\n\n The executor is expected to call this function. The args, kwargs match that of the Interchange.__init__", "id": "f2808:m1"} {"signature": "def start(self):", "body": "logger.info(\"\")while True:socks = dict(self.poller.poll())if socks.get(self.task_incoming) == zmq.POLLIN:message = self.task_incoming.recv_multipart()logger.debug(\"\")self.worker_messages.send_multipart(message)logger.debug(\"\")if socks.get(self.worker_messages) == zmq.POLLIN:message = self.worker_messages.recv_multipart()logger.debug(\"\")self.result_outgoing.send_multipart(message[:])logger.debug(\"\")", "docstring": "TODO: docstring", "id": "f2808:c0:m1"} {"signature": "def start(self):", "body": "self.outgoing_q = zmq_pipes.TasksOutgoing(\"\", self.interchange_port_range)self.incoming_q = zmq_pipes.ResultsIncoming(\"\", self.interchange_port_range)self.is_alive = Trueself._queue_management_thread = Noneself._start_queue_management_thread()self._start_local_queue_process()logger.debug(\"\".format(self._queue_management_thread))if self.provider:l_cmd = self.launch_cmd.format( task_url=self.worker_task_url,workers_per_node=self.workers_per_node,logdir=\"\".format(self.run_dir, self.label))self.launch_cmd = l_cmdlogger.debug(\"\".format(self.launch_cmd))self._scaling_enabled = self.provider.scaling_enabledlogger.debug(\"\", self.provider)if hasattr(self.provider, ''):try:for i in range(self.provider.init_blocks):block = self.provider.submit(self.launch_cmd, , self.workers_per_node)logger.debug(\"\".format(i, block))if not block:raise(ScalingFailed(self.provider.label,\"\"))self.blocks.extend([block])except Exception as e:logger.error(\"\".format(e))raise eelse:self._scaling_enabled = Falselogger.debug(\"\")", "docstring": "Create the Interchange process and connect to it.", "id": "f2809:c0:m1"} {"signature": "def _start_local_queue_process(self):", "body": "comm_q = Queue(maxsize=)self.queue_proc = Process(target=interchange.starter,args=(comm_q,),kwargs={\"\": (self.outgoing_q.port,self.incoming_q.port),\"\": self.worker_port,\"\": self.worker_port_range})self.queue_proc.start()try:worker_port = comm_q.get(block=True, timeout=)logger.debug(\"\".format(worker_port))except queue.Empty:logger.error(\"\")raise Exception(\"\")self.worker_task_url = \"\".format(self.address, worker_port)", "docstring": "TODO: docstring", "id": "f2809:c0:m2"} {"signature": "def _start_queue_management_thread(self):", "body": "if self._queue_management_thread is None:logger.debug(\"\")self._queue_management_thread = threading.Thread(target=self._queue_management_worker)self._queue_management_thread.daemon = Trueself._queue_management_thread.start()logger.debug(\"\")else:logger.debug(\"\")", "docstring": "TODO: docstring", "id": "f2809:c0:m3"} {"signature": "def _queue_management_worker(self):", "body": "logger.debug(\"\")while True:task_id, buf = self.incoming_q.get() msg = deserialize_object(buf)[]task_fut = self.tasks[task_id]logger.debug(\"\".format(task_id))if \"\" in msg:task_fut.set_result(msg[\"\"])elif \"\" in msg:passelif '' in msg:logger.warning(\"\")try:s, _ = deserialize_object(msg[''])exception = ValueError(\"\".format(s))task_fut.set_exception(exception)except Exception as e:task_fut.set_exception(DeserializationError(\"\".format(e)))else:raise BadMessage(\"\")if not self.is_alive:breaklogger.info(\"\")", "docstring": "TODO: docstring", "id": "f2809:c0:m4"} {"signature": "def submit(self, func, *args, **kwargs):", "body": "self._task_counter += task_id = self._task_counterlogger.debug(\"\".format(func, args))self.tasks[task_id] = Future()fn_buf = pack_apply_message(func, args, kwargs,buffer_threshold= * ,item_threshold=)self.outgoing_q.put(task_id, fn_buf)return self.tasks[task_id]", "docstring": "TODO: docstring", "id": "f2809:c0:m5"} {"signature": "def scale_out(self, blocks=):", "body": "r = []for i in range(blocks):if self.provider:block = self.provider.submit(self.launch_cmd, , self.workers_per_node)logger.debug(\"\".format(i, block))if not block:raise(ScalingFailed(self.provider.label,\"\"))self.blocks.extend([block])else:logger.error(\"\")r = Nonereturn r", "docstring": "Scales out the number of active workers by the number of blocks specified.\n\n Parameters\n ----------\n\n blocks : int\n # of blocks to scale out. Default=1\n\n Raises:\n NotImplementedError", "id": "f2809:c0:m7"} {"signature": "def scale_in(self, blocks):", "body": "to_kill = self.blocks[:blocks]if self.provider:r = self.provider.cancel(to_kill)return r", "docstring": "Scale in the number of active blocks by specified amount.\n\n The scale in method here is very rude. It doesn't give the workers\n the opportunity to finish current tasks or cleanup. This is tracked\n in issue #530\n\n Raises:\n NotImplementedError", "id": "f2809:c0:m8"} {"signature": "def status(self):", "body": "status = []if self.provider:status = self.provider.status(self.blocks)return status", "docstring": "Return status of all blocks.", "id": "f2809:c0:m9"} {"signature": "def shutdown(self, hub=True, targets='', block=False):", "body": "logger.warning(\"\")self.queue_proc.terminate()logger.warning(\"\")return True", "docstring": "Shutdown the executor, including all workers and controllers.\n\n This is not implemented.\n\n Kwargs:\n - hub (Bool): Whether the hub should be shutdown, Default:True,\n - targets (list of ints| 'all'): List of block id's to kill, Default:'all'\n - block (Bool): To block for confirmations or not\n\n Raises:\n NotImplementedError", "id": "f2809:c0:m10"} {"signature": "@abstractmethoddef start(self, *args, **kwargs):", "body": "pass", "docstring": "Start the executor.\n\n Any spin-up operations (for example: starting thread pools) should be performed here.", "id": "f2810:c0:m0"} {"signature": "@abstractmethoddef submit(self, *args, **kwargs):", "body": "pass", "docstring": "Submit.\n\n We haven't yet decided on what the args to this can be,\n whether it should just be func, args, kwargs or be the partially evaluated\n fn", "id": "f2810:c0:m1"} {"signature": "@abstractmethoddef scale_out(self, *args, **kwargs):", "body": "pass", "docstring": "Scale out method.\n\n We should have the scale out method simply take resource object\n which will have the scaling methods, scale_out itself should be a coroutine, since\n scaling tasks can be slow.", "id": "f2810:c0:m2"} {"signature": "@abstractmethoddef scale_in(self, count):", "body": "pass", "docstring": "Scale in method.\n\n Cause the executor to reduce the number of blocks by count.\n\n We should have the scale in method simply take resource object\n which will have the scaling methods, scale_in itself should be a coroutine, since\n scaling tasks can be slow.", "id": "f2810:c0:m3"} {"signature": "@abstractmethoddef shutdown(self, *args, **kwargs):", "body": "pass", "docstring": "Shutdown the executor.\n\n This includes all attached resources such as workers and controllers.", "id": "f2810:c0:m4"} {"signature": "@abstractpropertydef scaling_enabled(self):", "body": "pass", "docstring": "Specify if scaling is enabled.\n\n The callers of ParslExecutors need to differentiate between Executors\n and Executors wrapped in a resource provider", "id": "f2810:c0:m5"} {"signature": "@propertydef run_dir(self):", "body": "return self._run_dir", "docstring": "Path to the run directory.", "id": "f2810:c0:m6"} {"signature": "def runner(incoming_q, outgoing_q):", "body": "logger.debug(\"\")def execute_task(bufs):\"\"\"\"\"\"user_ns = locals()user_ns.update({'': __builtins__})f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)fname = getattr(f, '', '')prefix = \"\"fname = prefix + \"\"argname = prefix + \"\"kwargname = prefix + \"\"resultname = prefix + \"\"user_ns.update({fname: f,argname: args,kwargname: kwargs,resultname: resultname})code = \"\".format(resultname, fname,argname, kwargname)try:logger.debug(\"\".format(code))exec(code, user_ns, user_ns)except Exception as e:logger.warning(\"\".format(e))raise eelse:logger.debug(\"\".format(user_ns.get(resultname)))return user_ns.get(resultname)while True:try:msg = incoming_q.get(block=True, timeout=)except queue.Empty:logger.debug(\"\")except IOError as e:logger.debug(\"\".format(e))try:outgoing_q.put(None)except Exception:passbreakexcept Exception as e:logger.debug(\"\".format(e))else:if not msg:logger.debug(\"\")outgoing_q.put(None)breakelse:logger.debug(\"\".format(msg[\"\"]))try:response_obj = execute_task(msg[''])response = {\"\": msg[\"\"],\"\": serialize_object(response_obj)}logger.debug(\"\".format(deserialize_object(response[\"\"])))except Exception as e:logger.debug(\"\".format(e))response = {\"\": msg[\"\"],\"\": serialize_object(e)}outgoing_q.put(response)logger.debug(\"\")", "docstring": "This is a function that mocks the Swift-T side.\n\n It listens on the the incoming_q for tasks and posts returns on the outgoing_q.\n\n Args:\n - incoming_q (Queue object) : The queue to listen on\n - outgoing_q (Queue object) : Queue to post results on\n\n The messages posted on the incoming_q will be of the form :\n\n .. code:: python\n\n {\n \"task_id\" : ,\n \"buffer\" : serialized buffer containing the fn, args and kwargs\n }\n\n If ``None`` is received, the runner will exit.\n\n Response messages should be of the form:\n\n .. code:: python\n\n {\n \"task_id\" : ,\n \"result\" : serialized buffer containing result\n \"exception\" : serialized exception object\n }\n\n On exiting the runner will post ``None`` to the outgoing_q", "id": "f2812:m0"} {"signature": "def __init__(self, label='', storage_access=None, working_dir=None, managed=True):", "body": "logger.debug(\"\")self.label = labelself.storage_access = storage_access if storage_access is not None else []if len(self.storage_access) > :raise ConfigurationError('')self.working_dir = working_dirself.managed = managed", "docstring": "Initialize the thread pool.\n\n Trying to implement the emews model.", "id": "f2812:c0:m0"} {"signature": "def _queue_management_worker(self):", "body": "while True:logger.debug(\"\")try:msg = self.incoming_q.get(block=True, timeout=)except queue.Empty:passexcept IOError as e:logger.debug(\"\".format(e.errno, e))returnexcept Exception as e:logger.debug(\"\".format(e))else:if msg is None:logger.debug(\"\")returnelse:logger.debug(\"\".format(msg))task_fut = self.tasks[msg['']]if '' in msg:result, _ = deserialize_object(msg[''])task_fut.set_result(result)elif '' in msg:exception, _ = deserialize_object(msg[''])task_fut.set_exception(exception)if not self.is_alive:break", "docstring": "Listen to the queue for task status messages and handle them.\n\n Depending on the message, tasks will be updated with results, exceptions,\n or updates. It expects the following messages:\n\n .. code:: python\n\n {\n \"task_id\" : \n \"result\" : serialized result object, if task succeeded\n ... more tags could be added later\n }\n\n {\n \"task_id\" : \n \"exception\" : serialized exception object, on failure\n }\n\n We do not support these yet, but they could be added easily.\n\n .. code:: python\n\n {\n \"task_id\" : \n \"cpu_stat\" : <>\n \"mem_stat\" : <>\n \"io_stat\" : <>\n \"started\" : tstamp\n }\n\n The `None` message is a die request.", "id": "f2812:c0:m2"} {"signature": "def weakref_cb(self, q=None):", "body": "q.put(None)", "docstring": "We do not use this yet.", "id": "f2812:c0:m3"} {"signature": "def _start_queue_management_thread(self):", "body": "logging.debug(\"\", \"\" * )if self._queue_management_thread is None:logging.debug(\"\")self._queue_management_thread = threading.Thread(target=self._queue_management_worker)self._queue_management_thread.daemon = Trueself._queue_management_thread.start()else:logging.debug(\"\")", "docstring": "Method to start the management thread as a daemon.\n\n Checks if a thread already exists, then starts it.\n Could be used later as a restart if the management thread dies.", "id": "f2812:c0:m4"} {"signature": "def shutdown(self):", "body": "self.is_alive = Falselogging.debug(\"\")self.incoming_q.put(None) self._queue_management_thread.join() logging.debug(\"\")self.worker.join()return True", "docstring": "Shutdown method, to kill the threads and workers.", "id": "f2812:c0:m5"} {"signature": "def submit(self, func, *args, **kwargs):", "body": "task_id = uuid.uuid4()logger.debug(\"\".format(func, args))self.tasks[task_id] = Future()fn_buf = pack_apply_message(func, args, kwargs,buffer_threshold= * ,item_threshold=)msg = {\"\": task_id,\"\": fn_buf}self.outgoing_q.put(msg)return self.tasks[task_id]", "docstring": "Submits work to the the outgoing_q.\n\n The outgoing_q is an external process listens on this\n queue for new work. This method is simply pass through and behaves like a\n submit call as described here `Python docs: `_\n\n Args:\n - func (callable) : Callable function\n - *args (list) : List of arbitrary positional arguments.\n\n Kwargs:\n - **kwargs (dict) : A dictionary of arbitrary keyword args for func.\n\n Returns:\n Future", "id": "f2812:c0:m6"} {"signature": "def scale_out(self, workers=):", "body": "raise NotImplementedError", "docstring": "Scales out the number of active workers by 1.\n\n This method is not implemented for threads and will raise the error if called.\n This would be nice to have, and can be done\n\n Raises:\n NotImplementedError", "id": "f2812:c0:m8"} {"signature": "def scale_in(self, workers):", "body": "raise NotImplementedError", "docstring": "Scale in the number of active blocks by specified amount.\n\n This method is not implemented for turbine and will raise an error if called.\n\n Raises:\n NotImplementedError", "id": "f2812:c0:m9"} {"signature": "def compose_launch_cmd(self, filepath, engine_dir, container_image):", "body": "self.engine_file = os.path.expanduser(filepath)uid = str(uuid.uuid4())engine_json = Nonetry:with open(self.engine_file, '') as f:engine_json = f.read()except OSError as e:logger.error(\"\", self.engine_file)raise ereturn", "docstring": "Reads the json contents from filepath and uses that to compose the engine launch command.\n\n Args:\n filepath: Path to the engine file\n engine_dir: CWD for the engines", "id": "f2813:c0:m2"} {"signature": "def compose_containerized_launch_cmd(self, filepath, engine_dir, container_image):", "body": "self.engine_file = os.path.expanduser(filepath)uid = str(uuid.uuid4())engine_json = Nonetry:with open(self.engine_file, '') as f:engine_json = f.read()except OSError as e:logger.error(\"\", self.engine_file)raise ereturn", "docstring": "Reads the json contents from filepath and uses that to compose the engine launch command.\n\n Notes: Add this to the ipengine launch for debug logs :\n --log-to-file --debug\n Args:\n filepath (str): Path to the engine file\n engine_dir (str): CWD for the engines .\n container_image (str): The container to be used to launch workers", "id": "f2813:c0:m3"} {"signature": "def submit(self, *args, **kwargs):", "body": "return self.lb_view.apply_async(*args, **kwargs)", "docstring": "Submits work to the thread pool.\n\n This method is simply pass through and behaves like a submit call as described\n here `Python docs: `_\n\n Returns:\n Future", "id": "f2813:c0:m7"} {"signature": "def scale_out(self, blocks=):", "body": "r = []for i in range(blocks):if self.provider:block = self.provider.submit(self.launch_cmd, , self.workers_per_node)logger.debug(\"\".format(i, block))if not block:raise(ScalingFailed(self.provider.label,\"\"))self.engines.extend([block])r.extend([block])else:logger.error(\"\")r = Nonereturn r", "docstring": "Scales out the number of active workers by 1.\n\n This method is notImplemented for threads and will raise the error if called.\n\n Parameters:\n blocks : int\n Number of blocks to be provisioned.", "id": "f2813:c0:m8"} {"signature": "def scale_in(self, blocks):", "body": "status = dict(zip(self.engines, self.provider.status(self.engines)))to_kill = [engine for engine in status if status[engine] == \"\"][:blocks]if self.provider:r = self.provider.cancel(to_kill)else:logger.error(\"\")r = Nonereturn r", "docstring": "Scale in the number of active blocks by the specified number.", "id": "f2813:c0:m9"} {"signature": "def status(self):", "body": "if self.provider:status = self.provider.status(self.engines)else:status = []return status", "docstring": "Returns the status of the executor via probing the execution providers.", "id": "f2813:c0:m10"} {"signature": "def shutdown(self, hub=True, targets='', block=False):", "body": "if self.controller:logger.debug(\"\")self.controller.close()logger.debug(\"\")return True", "docstring": "Shutdown the executor, including all workers and controllers.\n\n The interface documentation for IPP is `here `_\n\n Kwargs:\n - hub (Bool): Whether the hub should be shutdown, Default:True,\n - targets (list of ints| 'all'): List of engine id's to kill, Default:'all'\n - block (Bool): To block for confirmations or not\n\n Raises:\n NotImplementedError", "id": "f2813:c0:m11"} {"signature": "def _nbytes(buf):", "body": "if isinstance(buf, memoryview):if PY3:return buf.nbyteselse:size = buf.itemsizefor dim in buf.shape:size *= dimreturn sizeelse:return len(buf)", "docstring": "Return byte-size of a memoryview or buffer.", "id": "f2814:m0"} {"signature": "def _extract_buffers(obj, threshold=MAX_BYTES):", "body": "buffers = []if isinstance(obj, CannedObject) and obj.buffers:for i, buf in enumerate(obj.buffers):nbytes = _nbytes(buf)if nbytes > threshold:obj.buffers[i] = Nonebuffers.append(buf)elif isinstance(buf, memoryview):obj.buffers[i] = buf.tobytes()elif isinstance(buf, buffer):obj.buffers[i] = bytes(buf)return buffers", "docstring": "Extract buffers larger than a certain threshold.", "id": "f2814:m1"} {"signature": "def _restore_buffers(obj, buffers):", "body": "if isinstance(obj, CannedObject) and obj.buffers:for i, buf in enumerate(obj.buffers):if buf is None:obj.buffers[i] = buffers.pop()", "docstring": "Restore extracted buffers.", "id": "f2814:m2"} {"signature": "def serialize_object(obj, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):", "body": "buffers = []if istype(obj, sequence_types) and len(obj) < item_threshold:cobj = can_sequence(obj)for c in cobj:buffers.extend(_extract_buffers(c, buffer_threshold))elif istype(obj, dict) and len(obj) < item_threshold:cobj = {}for k in sorted(obj):c = can(obj[k])buffers.extend(_extract_buffers(c, buffer_threshold))cobj[k] = celse:cobj = can(obj)buffers.extend(_extract_buffers(cobj, buffer_threshold))buffers.insert(, pickle.dumps(cobj, PICKLE_PROTOCOL))return buffers", "docstring": "Serialize an object into a list of sendable buffers.\n\n Parameters\n ----------\n\n obj : object\n The object to be serialized\n buffer_threshold : int\n The threshold (in bytes) for pulling out data buffers\n to avoid pickling them.\n item_threshold : int\n The maximum number of items over which canning will iterate.\n Containers (lists, dicts) larger than this will be pickled without\n introspection.\n\n Returns\n -------\n [bufs] : list of buffers representing the serialized object.", "id": "f2814:m3"} {"signature": "def deserialize_object(buffers, g=None):", "body": "bufs = list(buffers)pobj = buffer_to_bytes_py2(bufs.pop())canned = pickle.loads(pobj)if istype(canned, sequence_types) and len(canned) < MAX_ITEMS:for c in canned:_restore_buffers(c, bufs)newobj = uncan_sequence(canned, g)elif istype(canned, dict) and len(canned) < MAX_ITEMS:newobj = {}for k in sorted(canned):c = canned[k]_restore_buffers(c, bufs)newobj[k] = uncan(c, g)else:_restore_buffers(canned, bufs)newobj = uncan(canned, g)return newobj, bufs", "docstring": "Reconstruct an object serialized by serialize_object from data buffers.\n\n Parameters\n ----------\n\n bufs : list of buffers/bytes\n\n g : globals to be used when uncanning\n\n Returns\n -------\n\n (newobj, bufs) : unpacked object, and the list of remaining unused buffers.", "id": "f2814:m4"} {"signature": "def pack_apply_message(f, args, kwargs, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):", "body": "arg_bufs = list(chain.from_iterable(serialize_object(arg, buffer_threshold, item_threshold) for arg in args))kw_keys = sorted(kwargs.keys())kwarg_bufs = list(chain.from_iterable(serialize_object(kwargs[key], buffer_threshold, item_threshold) for key in kw_keys))info = dict(nargs=len(args), narg_bufs=len(arg_bufs), kw_keys=kw_keys)msg = [pickle.dumps(can(f), PICKLE_PROTOCOL)]msg.append(pickle.dumps(info, PICKLE_PROTOCOL))msg.extend(arg_bufs)msg.extend(kwarg_bufs)return msg", "docstring": "Pack up a function, args, and kwargs to be sent over the wire.\n\n Each element of args/kwargs will be canned for special treatment,\n but inspection will not go any deeper than that.\n\n Any object whose data is larger than `threshold` will not have their data copied\n (only numpy arrays and bytes/buffers support zero-copy)\n\n Message will be a list of bytes/buffers of the format:\n\n [ cf, pinfo, , ]\n\n With length at least two + len(args) + len(kwargs)", "id": "f2814:m5"} {"signature": "def unpack_apply_message(bufs, g=None, copy=True):", "body": "bufs = list(bufs) assert len(bufs) >= , \"\"pf = buffer_to_bytes_py2(bufs.pop())f = uncan(pickle.loads(pf), g)pinfo = buffer_to_bytes_py2(bufs.pop())info = pickle.loads(pinfo)arg_bufs, kwarg_bufs = bufs[:info['']], bufs[info['']:]args = []for i in range(info['']):arg, arg_bufs = deserialize_object(arg_bufs, g)args.append(arg)args = tuple(args)assert not arg_bufs, \"\"kwargs = {}for key in info['']:kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g)kwargs[key] = kwargassert not kwarg_bufs, \"\"return f, args, kwargs", "docstring": "Unpack f,args,kwargs from buffers packed by pack_apply_message().\n\n Returns: original f,args,kwargs", "id": "f2814:m6"} {"signature": "def _get_cell_type(a=None):", "body": "def inner():return areturn type(py3compat.get_closure(inner)[])", "docstring": "The type of a closure cell doesn't seem to be importable, so just create one.", "id": "f2817:m0"} {"signature": "def interactive(f):", "body": "if isinstance(f, FunctionType):mainmod = __import__('')f = FunctionType(f.__code__, mainmod.__dict__,f.__name__, f.__defaults__,)f.__module__ = ''return f", "docstring": "Decorator for making functions appear as interactively defined.\n\n This results in the function being linked to the user_ns as globals()\n instead of the module globals().", "id": "f2817:m1"} {"signature": "def use_dill():", "body": "import dillfrom . import serializeserialize.pickle = dillcan_map.pop(FunctionType, None)", "docstring": "Use dill to expand serialization support.\n\n Adds support for object methods and closures to serialization.", "id": "f2817:m2"} {"signature": "def use_cloudpickle():", "body": "import cloudpicklefrom . import serializeserialize.pickle = cloudpicklecan_map.pop(FunctionType, None)", "docstring": "Use cloudpickle to expand serialization support.\n\n Adds support for object methods and closures to serialization.", "id": "f2817:m3"} {"signature": "def use_pickle():", "body": "from . import serializeserialize.pickle = serialize._stdlib_picklecan_map[FunctionType] = _original_can_map[FunctionType]", "docstring": "Revert to using stdlib pickle.\n\n Reverts custom serialization enabled by use_dill|cloudpickle.", "id": "f2817:m4"} {"signature": "def _import_mapping(mapping, original=None):", "body": "for key, value in list(mapping.items()):if isinstance(key, string_types):try:cls = import_item(key)except Exception:if original and key not in original:print(\"\", key, exc_info=True)mapping.pop(key)else:mapping[cls] = mapping.pop(key)", "docstring": "Import any string-keys in a type mapping.", "id": "f2817:m5"} {"signature": "def istype(obj, check):", "body": "if isinstance(check, tuple):for cls in check:if type(obj) is cls:return Truereturn Falseelse:return type(obj) is check", "docstring": "Like isinstance(obj, check), but strict.\n\n This won't catch subclasses.", "id": "f2817:m6"} {"signature": "def can(obj):", "body": "import_needed = Falsefor cls, canner in iteritems(can_map):if isinstance(cls, string_types):import_needed = Truebreakelif istype(obj, cls):return canner(obj)if import_needed:_import_mapping(can_map, _original_can_map)return can(obj)return obj", "docstring": "Prepare an object for pickling.", "id": "f2817:m7"} {"signature": "def can_dict(obj):", "body": "if istype(obj, dict):newobj = {}for k, v in iteritems(obj):newobj[k] = can(v)return newobjelse:return obj", "docstring": "Can the *values* of a dict.", "id": "f2817:m9"} {"signature": "def can_sequence(obj):", "body": "if istype(obj, sequence_types):t = type(obj)return t([can(i) for i in obj])else:return obj", "docstring": "Can the elements of a sequence.", "id": "f2817:m10"} {"signature": "def uncan(obj, g=None):", "body": "import_needed = Falsefor cls, uncanner in iteritems(uncan_map):if isinstance(cls, string_types):import_needed = Truebreakelif isinstance(obj, cls):return uncanner(obj, g)if import_needed:_import_mapping(uncan_map, _original_uncan_map)return uncan(obj, g)return obj", "docstring": "Invert canning.", "id": "f2817:m11"} {"signature": "def __init__(self, obj, keys=[], hook=None):", "body": "self.keys = keysself.obj = copy.copy(obj)self.hook = can(hook)for key in keys:setattr(self.obj, key, can(getattr(obj, key)))self.buffers = []", "docstring": "Can an object for safe pickling.\n\n Parameters\n ==========\n\n obj:\n The object to be canned\n keys: list (optional)\n list of attribute names that will be explicitly canned / uncanned\n hook: callable (optional)\n An optional extra callable,\n which can do additional processing of the uncanned object.\n\n Large data may be offloaded into the buffers list,\n used for zero-copy transfers.", "id": "f2817:c0:m0"} {"signature": "def __init__(self, ip_address, port_range):", "body": "self.context = zmq.Context()self.zmq_socket = self.context.socket(zmq.REQ)self.port = self.zmq_socket.bind_to_random_port(\"\".format(ip_address),min_port=port_range[],max_port=port_range[])", "docstring": "Parameters\n----------\n\nip_address: str\n IP address of the client (where Parsl runs)\nport_range: tuple(int, int)\n Port range for the comms between client and interchange", "id": "f2819:c0:m0"} {"signature": "def run(self, message):", "body": "self.zmq_socket.send_pyobj(message, copy=True)reply = self.zmq_socket.recv_pyobj()return reply", "docstring": "This function needs to be fast at the same time aware of the possibility of\n ZMQ pipes overflowing.\n\n The timeout increases slowly if contention is detected on ZMQ pipes.\n We could set copy=False and get slightly better latency but this results\n in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.\n This issue can be magnified if each the serialized buffer itself is larger.", "id": "f2819:c0:m1"} {"signature": "def __init__(self, ip_address, port_range):", "body": "self.context = zmq.Context()self.zmq_socket = self.context.socket(zmq.DEALER)self.zmq_socket.set_hwm()self.port = self.zmq_socket.bind_to_random_port(\"\".format(ip_address),min_port=port_range[],max_port=port_range[])self.poller = zmq.Poller()self.poller.register(self.zmq_socket, zmq.POLLOUT)", "docstring": "Parameters\n----------\n\nip_address: str\n IP address of the client (where Parsl runs)\nport_range: tuple(int, int)\n Port range for the comms between client and interchange", "id": "f2819:c1:m0"} {"signature": "def put(self, message):", "body": "timeout_ms = while True:socks = dict(self.poller.poll(timeout=timeout_ms))if self.zmq_socket in socks and socks[self.zmq_socket] == zmq.POLLOUT:self.zmq_socket.send_pyobj(message, copy=True)returnelse:timeout_ms += logger.debug(\"\".format(timeout_ms))", "docstring": "This function needs to be fast at the same time aware of the possibility of\n ZMQ pipes overflowing.\n\n The timeout increases slowly if contention is detected on ZMQ pipes.\n We could set copy=False and get slightly better latency but this results\n in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.\n This issue can be magnified if each the serialized buffer itself is larger.", "id": "f2819:c1:m1"} {"signature": "def __init__(self, ip_address, port_range):", "body": "self.context = zmq.Context()self.results_receiver = self.context.socket(zmq.DEALER)self.results_receiver.set_hwm()self.port = self.results_receiver.bind_to_random_port(\"\".format(ip_address),min_port=port_range[],max_port=port_range[])", "docstring": "Parameters\n----------\n\nip_address: str\n IP address of the client (where Parsl runs)\nport_range: tuple(int, int)\n Port range for the comms between client and interchange", "id": "f2819:c2:m0"} {"signature": "def execute_task(bufs):", "body": "user_ns = locals()user_ns.update({'': __builtins__})f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)prefix = \"\"fname = prefix + \"\"argname = prefix + \"\"kwargname = prefix + \"\"resultname = prefix + \"\"user_ns.update({fname: f,argname: args,kwargname: kwargs,resultname: resultname})code = \"\".format(resultname, fname,argname, kwargname)try:exec(code, user_ns, user_ns)except Exception as e:logger.warning(\"\".format(e), exc_info=True)raise eelse:return user_ns.get(resultname)", "docstring": "Deserialize the buffer and execute the task.\n\n Returns the result or throws exception.", "id": "f2820:m0"} {"signature": "def worker(worker_id, pool_id, task_queue, result_queue, worker_queue):", "body": "start_file_logger(''.format(args.logdir, pool_id, worker_id),worker_id,name=\"\",level=logging.DEBUG if args.debug else logging.INFO)logger.info(''.format(worker_id))if args.debug:logger.debug(\"\")while True:worker_queue.put(worker_id)req = task_queue.get()tid = req['']logger.info(\"\".format(tid))try:worker_queue.get()except queue.Empty:logger.warning(\"\".format(worker_id))passtry:result = execute_task(req[''])serialized_result = serialize_object(result)except Exception:result_package = {'': tid, '': serialize_object(RemoteExceptionWrapper(*sys.exc_info()))}else:result_package = {'': tid, '': serialized_result}logger.info(\"\".format(tid))pkl_package = pickle.dumps(result_package)result_queue.put(pkl_package)", "docstring": "Put request token into queue\nGet task from task_queue\nPop request from queue\nPut result into result_queue", "id": "f2820:m1"} {"signature": "def start_file_logger(filename, rank, name='', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:format_string = \"\".format(rank)global loggerlogger = logging.getLogger(name)logger.setLevel(logging.DEBUG)handler = logging.FileHandler(filename)handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\n Args:\n - filename (string): Name of the file to write logs to\n - name (string): Logger name\n - level (logging.LEVEL): Set the logging level.\n - format_string (string): Set the format string\n\n Returns:\n - None", "id": "f2820:m2"} {"signature": "def set_stream_logger(name='', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:format_string = \"\"global loggerlogger = logging.getLogger(name)logger.setLevel(logging.DEBUG)handler = logging.StreamHandler()handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\n Args:\n - name (string) : Set the logger name.\n - level (logging.LEVEL) : Set to logging.DEBUG by default.\n - format_string (sting) : Set to None by default.\n\n Returns:\n - None", "id": "f2820:m3"} {"signature": "def __init__(self,task_q_url=\"\",result_q_url=\"\",cores_per_worker=,max_workers=float(''),prefetch_capacity=,uid=None,block_id=None,heartbeat_threshold=,heartbeat_period=,poll_period=):", "body": "logger.info(\"\")self.context = zmq.Context()self.task_incoming = self.context.socket(zmq.DEALER)self.task_incoming.setsockopt(zmq.IDENTITY, uid.encode(''))self.task_incoming.setsockopt(zmq.LINGER, )self.task_incoming.connect(task_q_url)self.result_outgoing = self.context.socket(zmq.DEALER)self.result_outgoing.setsockopt(zmq.IDENTITY, uid.encode(''))self.result_outgoing.setsockopt(zmq.LINGER, )self.result_outgoing.connect(result_q_url)logger.info(\"\")self.uid = uidself.block_id = block_idcores_on_node = multiprocessing.cpu_count()self.max_workers = max_workersself.prefetch_capacity = prefetch_capacityself.worker_count = min(max_workers,math.floor(cores_on_node / cores_per_worker))logger.info(\"\".format(self.worker_count))self.pending_task_queue = multiprocessing.Queue()self.pending_result_queue = multiprocessing.Queue()self.ready_worker_queue = multiprocessing.Queue()self.max_queue_size = self.prefetch_capacity + self.worker_countself.tasks_per_round = self.heartbeat_period = heartbeat_periodself.heartbeat_threshold = heartbeat_thresholdself.poll_period = poll_period", "docstring": "Parameters\n----------\nworker_url : str\n Worker url on which workers will attempt to connect back\n\nuid : str\n string unique identifier\n\nblock_id : str\n Block identifier that maps managers to the provider blocks they belong to.\n\ncores_per_worker : float\n cores to be assigned to each worker. Oversubscription is possible\n by setting cores_per_worker < 1.0. Default=1\n\nmax_workers : int\n caps the maximum number of workers that can be launched.\n default: infinity\n\nprefetch_capacity : int\n Number of tasks that could be prefetched over available worker capacity.\n When there are a few tasks (<100) or when tasks are long running, this option should\n be set to 0 for better load balancing. Default is 0.\n\nheartbeat_threshold : int\n Seconds since the last message from the interchange after which the\n interchange is assumed to be un-available, and the manager initiates shutdown. Default:120s\n\n Number of seconds since the last message from the interchange after which the worker\n assumes that the interchange is lost and the manager shuts down. Default:120\n\nheartbeat_period : int\n Number of seconds after which a heartbeat message is sent to the interchange\n\npoll_period : int\n Timeout period used by the manager in milliseconds. Default: 10ms", "id": "f2820:c0:m0"} {"signature": "def create_reg_message(self):", "body": "msg = {'': PARSL_VERSION,'': \"\".format(sys.version_info.major,sys.version_info.minor,sys.version_info.micro),'': self.worker_count,'': self.block_id,'': self.prefetch_capacity,'': self.worker_count + self.prefetch_capacity,'': platform.system(),'': platform.node(),'': os.getcwd(),}b_msg = json.dumps(msg).encode('')return b_msg", "docstring": "Creates a registration message to identify the worker to the interchange", "id": "f2820:c0:m1"} {"signature": "def heartbeat(self):", "body": "heartbeat = (HEARTBEAT_CODE).to_bytes(, \"\")r = self.task_incoming.send(heartbeat)logger.debug(\"\".format(r))", "docstring": "Send heartbeat to the incoming task queue", "id": "f2820:c0:m2"} {"signature": "def pull_tasks(self, kill_event):", "body": "logger.info(\"\")poller = zmq.Poller()poller.register(self.task_incoming, zmq.POLLIN)msg = self.create_reg_message()logger.debug(\"\".format(msg))self.task_incoming.send(msg)last_beat = time.time()last_interchange_contact = time.time()task_recv_counter = poll_timer = self.poll_periodwhile not kill_event.is_set():ready_worker_count = self.ready_worker_queue.qsize()pending_task_count = self.pending_task_queue.qsize()logger.debug(\"\".format(ready_worker_count,pending_task_count))if time.time() > last_beat + self.heartbeat_period:self.heartbeat()last_beat = time.time()if pending_task_count < self.max_queue_size and ready_worker_count > :logger.debug(\"\".format(ready_worker_count))msg = ((ready_worker_count).to_bytes(, \"\"))self.task_incoming.send(msg)socks = dict(poller.poll(timeout=poll_timer))if self.task_incoming in socks and socks[self.task_incoming] == zmq.POLLIN:poll_timer = _, pkl_msg = self.task_incoming.recv_multipart()tasks = pickle.loads(pkl_msg)last_interchange_contact = time.time()if tasks == '':logger.critical(\"\")kill_event.set()breakelif tasks == HEARTBEAT_CODE:logger.debug(\"\")else:task_recv_counter += len(tasks)logger.debug(\"\".format([t[''] for t in tasks],task_recv_counter))for task in tasks:self.pending_task_queue.put(task)else:logger.debug(\"\")if not poll_timer:poll_timer = self.poll_periodpoll_timer = min(self.heartbeat_period * , poll_timer * )if time.time() > last_interchange_contact + self.heartbeat_threshold:logger.critical(\"\")kill_event.set()logger.critical(\"\")break", "docstring": "Pull tasks from the incoming tasks 0mq pipe onto the internal\n pending task queue\n\n Parameters:\n -----------\n kill_event : threading.Event\n Event to let the thread know when it is time to die.", "id": "f2820:c0:m3"} {"signature": "def push_results(self, kill_event):", "body": "logger.debug(\"\")push_poll_period = max(, self.poll_period) / logger.debug(\"\".format(push_poll_period))last_beat = time.time()items = []while not kill_event.is_set():try:r = self.pending_result_queue.get(block=True, timeout=push_poll_period)items.append(r)except queue.Empty:passexcept Exception as e:logger.exception(\"\".format(e))if len(items) >= self.max_queue_size or time.time() > last_beat + push_poll_period:last_beat = time.time()if items:self.result_outgoing.send_multipart(items)items = []logger.critical(\"\")", "docstring": "Listens on the pending_result_queue and sends out results via 0mq\n\n Parameters:\n -----------\n kill_event : threading.Event\n Event to let the thread know when it is time to die.", "id": "f2820:c0:m4"} {"signature": "def start(self):", "body": "start = time.time()self._kill_event = threading.Event()self.procs = {}for worker_id in range(self.worker_count):p = multiprocessing.Process(target=worker, args=(worker_id,self.uid,self.pending_task_queue,self.pending_result_queue,self.ready_worker_queue,))p.start()self.procs[worker_id] = plogger.debug(\"\")self._task_puller_thread = threading.Thread(target=self.pull_tasks,args=(self._kill_event,))self._result_pusher_thread = threading.Thread(target=self.push_results,args=(self._kill_event,))self._task_puller_thread.start()self._result_pusher_thread.start()logger.info(\"\")self._kill_event.wait()logger.critical(\"\")self._task_puller_thread.join()self._result_pusher_thread.join()for proc_id in self.procs:self.procs[proc_id].terminate()logger.critical(\"\".format(self.procs[proc_id],self.procs[proc_id].is_alive()))self.procs[proc_id].join()logger.debug(\"\".format(self.procs[proc_id]))self.task_incoming.close()self.result_outgoing.close()self.context.term()delta = time.time() - startlogger.info(\"\".format(delta))return", "docstring": "Start the worker processes.\n\n TODO: Move task receiving to a thread", "id": "f2820:c0:m5"} {"signature": "def start_file_logger(filename, name='', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:format_string = \"\"global loggerlogger = logging.getLogger(name)logger.setLevel(level)handler = logging.FileHandler(filename)handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\n Parameters\n ---------\n\n filename: string\n Name of the file to write logs to. Required.\n name: string\n Logger name. Default=\"parsl.executors.interchange\"\n level: logging.LEVEL\n Set the logging level. Default=logging.DEBUG\n - format_string (string): Set the format string\n format_string: string\n Format string to use.\n\n Returns\n -------\n None.", "id": "f2821:m0"} {"signature": "def starter(comm_q, *args, **kwargs):", "body": "ic = Interchange(*args, **kwargs)comm_q.put((ic.worker_task_port,ic.worker_result_port))ic.start()", "docstring": "Start the interchange process\n\n The executor is expected to call this function. The args, kwargs match that of the Interchange.__init__", "id": "f2821:m1"} {"signature": "def __init__(self,client_address=\"\",interchange_address=\"\",client_ports=(, , ),worker_ports=None,worker_port_range=(, ),heartbeat_threshold=,logdir=\"\",logging_level=logging.INFO,poll_period=,suppress_failure=False,):", "body": "self.logdir = logdirtry:os.makedirs(self.logdir)except FileExistsError:passstart_file_logger(\"\".format(self.logdir), level=logging_level)logger.debug(\"\")self.client_address = client_addressself.interchange_address = interchange_addressself.suppress_failure = suppress_failureself.poll_period = poll_periodlogger.info(\"\".format(client_address, client_ports[], client_ports[], client_ports[]))self.context = zmq.Context()self.task_incoming = self.context.socket(zmq.DEALER)self.task_incoming.set_hwm()self.task_incoming.RCVTIMEO = self.task_incoming.connect(\"\".format(client_address, client_ports[]))self.results_outgoing = self.context.socket(zmq.DEALER)self.results_outgoing.set_hwm()self.results_outgoing.connect(\"\".format(client_address, client_ports[]))self.command_channel = self.context.socket(zmq.REP)self.command_channel.RCVTIMEO = self.command_channel.connect(\"\".format(client_address, client_ports[]))logger.info(\"\")self.pending_task_queue = queue.Queue(maxsize= ** )self.worker_ports = worker_portsself.worker_port_range = worker_port_rangeself.task_outgoing = self.context.socket(zmq.ROUTER)self.task_outgoing.set_hwm()self.results_incoming = self.context.socket(zmq.ROUTER)self.results_incoming.set_hwm()if self.worker_ports:self.worker_task_port = self.worker_ports[]self.worker_result_port = self.worker_ports[]self.task_outgoing.bind(\"\".format(self.worker_task_port))self.results_incoming.bind(\"\".format(self.worker_result_port))else:self.worker_task_port = self.task_outgoing.bind_to_random_port('',min_port=worker_port_range[],max_port=worker_port_range[], max_tries=)self.worker_result_port = self.results_incoming.bind_to_random_port('',min_port=worker_port_range[],max_port=worker_port_range[], max_tries=)logger.info(\"\".format(self.worker_task_port, self.worker_result_port))self._ready_manager_queue = {}self.heartbeat_threshold = heartbeat_thresholdself.current_platform = {'': PARSL_VERSION,'': \"\".format(sys.version_info.major,sys.version_info.minor,sys.version_info.micro),'': platform.system(),'': platform.node(),'': os.getcwd()}logger.info(\"\".format(self.current_platform))", "docstring": "Parameters\n----------\nclient_address : str\n The ip address at which the parsl client can be reached. Default: \"127.0.0.1\"\n\ninterchange_address : str\n The ip address at which the workers will be able to reach the Interchange. Default: \"127.0.0.1\"\n\nclient_ports : triple(int, int, int)\n The ports at which the client can be reached\n\nworker_ports : tuple(int, int)\n The specific two ports at which workers will connect to the Interchange. Default: None\n\nworker_port_range : tuple(int, int)\n The interchange picks ports at random from the range which will be used by workers.\n This is overridden when the worker_ports option is set. Defauls: (54000, 55000)\n\nheartbeat_threshold : int\n Number of seconds since the last heartbeat after which worker is considered lost.\n\nlogdir : str\n Parsl log directory paths. Logs and temp files go here. Default: '.'\n\nlogging_level : int\n Logging level as defined in the logging module. Default: logging.INFO (20)\n\npoll_period : int\n The main thread polling period, in milliseconds. Default: 10ms\n\nsuppress_failure : Bool\n When set to True, the interchange will attempt to suppress failures. Default: False", "id": "f2821:c3:m0"} {"signature": "def get_tasks(self, count):", "body": "tasks = []for i in range(, count):try:x = self.pending_task_queue.get(block=False)except queue.Empty:breakelse:tasks.append(x)return tasks", "docstring": "Obtains a batch of tasks from the internal pending_task_queue\n\n Parameters\n ----------\n count: int\n Count of tasks to get from the queue\n\n Returns\n -------\n List of upto count tasks. May return fewer than count down to an empty list\n eg. [{'task_id':, 'buffer':} ... ]", "id": "f2821:c3:m1"} {"signature": "def migrate_tasks_to_internal(self, kill_event):", "body": "logger.info(\"\")task_counter = poller = zmq.Poller()poller.register(self.task_incoming, zmq.POLLIN)while not kill_event.is_set():try:msg = self.task_incoming.recv_pyobj()except zmq.Again:logger.debug(\"\".format(self.pending_task_queue.qsize()))continueif msg == '':kill_event.set()breakelse:self.pending_task_queue.put(msg)task_counter += logger.debug(\"\".format(task_counter))", "docstring": "Pull tasks from the incoming tasks 0mq pipe onto the internal\n pending task queue\n\n Parameters:\n -----------\n kill_event : threading.Event\n Event to let the thread know when it is time to die.", "id": "f2821:c3:m2"} {"signature": "def _command_server(self, kill_event):", "body": "logger.debug(\"\")while not kill_event.is_set():try:command_req = self.command_channel.recv_pyobj()logger.debug(\"\".format(command_req))if command_req == \"\":outstanding = self.pending_task_queue.qsize()for manager in self._ready_manager_queue:outstanding += len(self._ready_manager_queue[manager][''])reply = outstandingelif command_req == \"\":num_workers = for manager in self._ready_manager_queue:num_workers += self._ready_manager_queue[manager]['']reply = num_workerselif command_req == \"\":reply = []for manager in self._ready_manager_queue:resp = {'': manager.decode(''),'': self._ready_manager_queue[manager][''],'': self._ready_manager_queue[manager][''],'': len(self._ready_manager_queue[manager]['']),'': self._ready_manager_queue[manager]['']}reply.append(resp)elif command_req.startswith(\"\"):cmd, s_manager = command_req.split('')manager = s_manager.encode('')logger.info(\"\".format(manager))if manager in self._ready_manager_queue:self._ready_manager_queue[manager][''] = Falsereply = Trueelse:reply = Falseelif command_req == \"\":logger.info(\"\")kill_event.set()reply = Trueelse:reply = Nonelogger.debug(\"\".format(reply))self.command_channel.send_pyobj(reply)except zmq.Again:logger.debug(\"\")continue", "docstring": "Command server to run async command to the interchange", "id": "f2821:c3:m3"} {"signature": "def start(self, poll_period=None):", "body": "logger.info(\"\")if poll_period is None:poll_period = self.poll_periodstart = time.time()count = self._kill_event = threading.Event()self._task_puller_thread = threading.Thread(target=self.migrate_tasks_to_internal,args=(self._kill_event,))self._task_puller_thread.start()self._command_thread = threading.Thread(target=self._command_server,args=(self._kill_event,))self._command_thread.start()poller = zmq.Poller()poller.register(self.task_outgoing, zmq.POLLIN)poller.register(self.results_incoming, zmq.POLLIN)interesting_managers = set()while not self._kill_event.is_set():self.socks = dict(poller.poll(timeout=poll_period))if self.task_outgoing in self.socks and self.socks[self.task_outgoing] == zmq.POLLIN:logger.debug(\"\")message = self.task_outgoing.recv_multipart()manager = message[]if manager not in self._ready_manager_queue:reg_flag = Falsetry:msg = json.loads(message[].decode(''))reg_flag = Trueexcept Exception:logger.warning(\"\".format(manager))logger.debug(\"\".format(message[]))self._ready_manager_queue[manager] = {'': time.time(),'': ,'': None,'': ,'': True,'': []}if reg_flag is True:interesting_managers.add(manager)logger.info(\"\".format(manager))self._ready_manager_queue[manager].update(msg)logger.info(\"\".format(manager, msg))if (msg[''].rsplit(\"\", )[] != self.current_platform[''].rsplit(\"\", )[] ormsg[''] != self.current_platform['']):logger.warn(\"\".format(manager))if self.suppress_failure is False:logger.debug(\"\")self._kill_event.set()e = ManagerLost(manager)result_package = {'': -, '': serialize_object(e)}pkl_package = pickle.dumps(result_package)self.results_outgoing.send(pkl_package)logger.warning(\"\")else:logger.debug(\"\")else:logger.info(\"\".format(manager, msg['']))logger.info(\"\".format(manager,msg[''].rsplit(\"\", )[]))else:if self.suppress_failure is False:self._kill_event.set()e = BadRegistration(manager, critical=True)result_package = {'': -, '': serialize_object(e)}pkl_package = pickle.dumps(result_package)self.results_outgoing.send(pkl_package)else:logger.debug(\"\".format(manager))else:tasks_requested = int.from_bytes(message[], \"\")self._ready_manager_queue[manager][''] = time.time()if tasks_requested == HEARTBEAT_CODE:logger.debug(\"\".format(manager))self.task_outgoing.send_multipart([manager, b'', PKL_HEARTBEAT_CODE])else:logger.debug(\"\".format(manager, tasks_requested))self._ready_manager_queue[manager][''] = tasks_requestedinteresting_managers.add(manager)logger.debug(\"\")logger.debug(\"\".format(len(self._ready_manager_queue),len(interesting_managers)))if interesting_managers and not self.pending_task_queue.empty():shuffled_managers = list(interesting_managers)random.shuffle(shuffled_managers)while shuffled_managers and not self.pending_task_queue.empty(): manager = shuffled_managers.pop()tasks_inflight = len(self._ready_manager_queue[manager][''])real_capacity = min(self._ready_manager_queue[manager][''],self._ready_manager_queue[manager][''] - tasks_inflight)if (real_capacity and self._ready_manager_queue[manager]['']):tasks = self.get_tasks(real_capacity)if tasks:self.task_outgoing.send_multipart([manager, b'', pickle.dumps(tasks)])task_count = len(tasks)count += task_counttids = [t[''] for t in tasks]self._ready_manager_queue[manager][''] -= task_countself._ready_manager_queue[manager][''].extend(tids)logger.debug(\"\".format(tids, manager))if self._ready_manager_queue[manager][''] > :logger.debug(\"\".format(manager, self._ready_manager_queue[manager]['']))else:logger.debug(\"\".format(manager))interesting_managers.remove(manager)else:interesting_managers.remove(manager)logger.debug(\"\".format(len(interesting_managers)))else:logger.debug(\"\")if self.results_incoming in self.socks and self.socks[self.results_incoming] == zmq.POLLIN:logger.debug(\"\")manager, *b_messages = self.results_incoming.recv_multipart()if manager not in self._ready_manager_queue:logger.warning(\"\".format(manager))else:logger.debug(\"\".format(len(b_messages)))for b_message in b_messages:r = pickle.loads(b_message)self._ready_manager_queue[manager][''].remove(r[''])self.results_outgoing.send_multipart(b_messages)logger.debug(\"\".format(self._ready_manager_queue[manager]['']))logger.debug(\"\")logger.debug(\"\")bad_managers = [manager for manager in self._ready_manager_queue iftime.time() - self._ready_manager_queue[manager][''] > self.heartbeat_threshold]for manager in bad_managers:logger.debug(\"\".format(self._ready_manager_queue[manager][''], time.time()))logger.warning(\"\".format(manager))for tid in self._ready_manager_queue[manager]['']:try:raise ManagerLost(manager)except Exception:result_package = {'': tid, '': serialize_object(RemoteExceptionWrapper(*sys.exc_info()))}pkl_package = pickle.dumps(result_package)self.results_outgoing.send(pkl_package)logger.warning(\"\")self._ready_manager_queue.pop(manager, '')logger.debug(\"\")logger.debug(\"\")delta = time.time() - startlogger.info(\"\".format(count, delta))logger.warning(\"\")", "docstring": "Start the NeedNameQeueu\n\n Parameters:\n ----------\n\n TODO: Move task receiving to a thread", "id": "f2821:c3:m4"} {"signature": "def initialize_scaling(self):", "body": "debug_opts = \"\" if self.worker_debug else \"\"max_workers = \"\" if self.max_workers == float('') else \"\".format(self.max_workers)worker_logdir = \"\".format(self.run_dir, self.label)if self.worker_logdir_root is not None:worker_logdir = \"\".format(self.worker_logdir_root, self.label)l_cmd = self.launch_cmd.format(debug=debug_opts,prefetch_capacity=self.prefetch_capacity,task_url=self.worker_task_url,result_url=self.worker_result_url,cores_per_worker=self.cores_per_worker,max_workers=max_workers,nodes_per_block=self.provider.nodes_per_block,heartbeat_period=self.heartbeat_period,heartbeat_threshold=self.heartbeat_threshold,poll_period=self.poll_period,logdir=worker_logdir)self.launch_cmd = l_cmdlogger.debug(\"\".format(self.launch_cmd))self._scaling_enabled = self.provider.scaling_enabledlogger.debug(\"\", self.provider)if hasattr(self.provider, ''):try:self.scale_out(blocks=self.provider.init_blocks)except Exception as e:logger.error(\"\".format(e))raise e", "docstring": "Compose the launch command and call the scale_out\n\n This should be implemented in the child classes to take care of\n executor specific oddities.", "id": "f2822:c0:m1"} {"signature": "def start(self):", "body": "self.outgoing_q = zmq_pipes.TasksOutgoing(\"\", self.interchange_port_range)self.incoming_q = zmq_pipes.ResultsIncoming(\"\", self.interchange_port_range)self.command_client = zmq_pipes.CommandClient(\"\", self.interchange_port_range)self.is_alive = Trueself._executor_bad_state = threading.Event()self._executor_exception = Noneself._queue_management_thread = Noneself._start_queue_management_thread()self._start_local_queue_process()logger.debug(\"\".format(self._queue_management_thread))if self.provider:self.initialize_scaling()else:self._scaling_enabled = Falselogger.debug(\"\")", "docstring": "Create the Interchange process and connect to it.", "id": "f2822:c0:m2"} {"signature": "def _queue_management_worker(self):", "body": "logger.debug(\"\")while not self._executor_bad_state.is_set():try:msgs = self.incoming_q.get(timeout=)except queue.Empty:logger.debug(\"\")passexcept IOError as e:logger.exception(\"\".format(e.errno, e))returnexcept Exception as e:logger.exception(\"\".format(e))returnelse:if msgs is None:logger.debug(\"\")returnelse:for serialized_msg in msgs:try:msg = pickle.loads(serialized_msg)tid = msg['']except pickle.UnpicklingError:raise BadMessage(\"\")except Exception:raise BadMessage(\"\")if tid == - and '' in msg:logger.warning(\"\")self._executor_exception, _ = deserialize_object(msg[''])logger.exception(\"\".format(self._executor_exception))self._executor_bad_state.set()for task in self.tasks:self.tasks[task].set_exception(self._executor_exception)breaktask_fut = self.tasks[tid]if '' in msg:result, _ = deserialize_object(msg[''])task_fut.set_result(result)elif '' in msg:try:s, _ = deserialize_object(msg[''])try:s.reraise()except Exception as e:task_fut.set_exception(e)except Exception as e:task_fut.set_exception(DeserializationError(\"\".format(e)))else:raise BadMessage(\"\")if not self.is_alive:breaklogger.info(\"\")", "docstring": "Listen to the queue for task status messages and handle them.\n\n Depending on the message, tasks will be updated with results, exceptions,\n or updates. It expects the following messages:\n\n .. code:: python\n\n {\n \"task_id\" : \n \"result\" : serialized result object, if task succeeded\n ... more tags could be added later\n }\n\n {\n \"task_id\" : \n \"exception\" : serialized exception object, on failure\n }\n\n We do not support these yet, but they could be added easily.\n\n .. code:: python\n\n {\n \"task_id\" : \n \"cpu_stat\" : <>\n \"mem_stat\" : <>\n \"io_stat\" : <>\n \"started\" : tstamp\n }\n\n The `None` message is a die request.", "id": "f2822:c0:m3"} {"signature": "def weakref_cb(self, q=None):", "body": "q.put(None)", "docstring": "We do not use this yet.", "id": "f2822:c0:m4"} {"signature": "def _start_local_queue_process(self):", "body": "comm_q = Queue(maxsize=)self.queue_proc = Process(target=interchange.starter,args=(comm_q,),kwargs={\"\": (self.outgoing_q.port,self.incoming_q.port,self.command_client.port),\"\": self.worker_ports,\"\": self.worker_port_range,\"\": \"\".format(self.run_dir, self.label),\"\": self.suppress_failure,\"\": self.heartbeat_threshold,\"\": self.poll_period,\"\": logging.DEBUG if self.worker_debug else logging.INFO},)self.queue_proc.start()try:(worker_task_port, worker_result_port) = comm_q.get(block=True, timeout=)except queue.Empty:logger.error(\"\")raise Exception(\"\")self.worker_task_url = \"\".format(self.address, worker_task_port)self.worker_result_url = \"\".format(self.address, worker_result_port)", "docstring": "Starts the interchange process locally\n\n Starts the interchange process locally and uses an internal command queue to\n get the worker task and result ports that the interchange has bound to.", "id": "f2822:c0:m5"} {"signature": "def _start_queue_management_thread(self):", "body": "if self._queue_management_thread is None:logger.debug(\"\")self._queue_management_thread = threading.Thread(target=self._queue_management_worker)self._queue_management_thread.daemon = Trueself._queue_management_thread.start()logger.debug(\"\")else:logger.debug(\"\")", "docstring": "Method to start the management thread as a daemon.\n\n Checks if a thread already exists, then starts it.\n Could be used later as a restart if the management thread dies.", "id": "f2822:c0:m6"} {"signature": "def hold_worker(self, worker_id):", "body": "c = self.command_client.run(\"\".format(worker_id))logger.debug(\"\".format(worker_id))return c", "docstring": "Puts a worker on hold, preventing scheduling of additional tasks to it.\n\n This is called \"hold\" mostly because this only stops scheduling of tasks,\n and does not actually kill the worker.\n\n Parameters\n ----------\n\n worker_id : str\n Worker id to be put on hold", "id": "f2822:c0:m7"} {"signature": "def _hold_block(self, block_id):", "body": "managers = self.connected_managersfor manager in managers:if manager[''] == block_id:logger.debug(\"\".format(manager['']))self.hold_worker(manager[''])", "docstring": "Sends hold command to all managers which are in a specific block\n\n Parameters\n ----------\n block_id : str\n Block identifier of the block to be put on hold", "id": "f2822:c0:m11"} {"signature": "def submit(self, func, *args, **kwargs):", "body": "if self._executor_bad_state.is_set():raise self._executor_exceptionself._task_counter += task_id = self._task_counterlogger.debug(\"\".format(func, args))self.tasks[task_id] = Future()fn_buf = pack_apply_message(func, args, kwargs,buffer_threshold= * ,item_threshold=)msg = {\"\": task_id,\"\": fn_buf}self.outgoing_q.put(msg)return self.tasks[task_id]", "docstring": "Submits work to the the outgoing_q.\n\n The outgoing_q is an external process listens on this\n queue for new work. This method behaves like a\n submit call as described here `Python docs: `_\n\n Args:\n - func (callable) : Callable function\n - *args (list) : List of arbitrary positional arguments.\n\n Kwargs:\n - **kwargs (dict) : A dictionary of arbitrary keyword args for func.\n\n Returns:\n Future", "id": "f2822:c0:m12"} {"signature": "def scale_out(self, blocks=):", "body": "r = []for i in range(blocks):if self.provider:external_block_id = str(len(self.blocks))launch_cmd = self.launch_cmd.format(block_id=external_block_id)internal_block = self.provider.submit(launch_cmd, , )logger.debug(\"\".format(external_block_id, internal_block))if not internal_block:raise(ScalingFailed(self.provider.label,\"\"))r.extend([external_block_id])self.blocks[external_block_id] = internal_blockelse:logger.error(\"\")r = Nonereturn r", "docstring": "Scales out the number of blocks by \"blocks\"\n\n Raises:\n NotImplementedError", "id": "f2822:c0:m14"} {"signature": "def scale_in(self, blocks=None, block_ids=[]):", "body": "if block_ids:block_ids_to_kill = block_idselse:block_ids_to_kill = list(self.blocks.keys())[:blocks]for block_id in block_ids_to_kill:self._hold_block(block_id)to_kill = [self.blocks.pop(bid) for bid in block_ids_to_kill]if self.provider:r = self.provider.cancel(to_kill)return r", "docstring": "Scale in the number of active blocks by specified amount.\n\n The scale in method here is very rude. It doesn't give the workers\n the opportunity to finish current tasks or cleanup. This is tracked\n in issue #530\n\n Parameters\n ----------\n\n blocks : int\n Number of blocks to terminate and scale_in by\n\n block_ids : list\n List of specific block ids to terminate. Optional\n\n Raises:\n NotImplementedError", "id": "f2822:c0:m15"} {"signature": "def status(self):", "body": "status = []if self.provider:status = self.provider.status(self.blocks.values())return status", "docstring": "Return status of all blocks.", "id": "f2822:c0:m16"} {"signature": "def shutdown(self, hub=True, targets='', block=False):", "body": "logger.info(\"\")self.queue_proc.terminate()logger.info(\"\")return True", "docstring": "Shutdown the executor, including all workers and controllers.\n\n This is not implemented.\n\n Kwargs:\n - hub (Bool): Whether the hub should be shutdown, Default:True,\n - targets (list of ints| 'all'): List of block id's to kill, Default:'all'\n - block (Bool): To block for confirmations or not\n\n Raises:\n NotImplementedError", "id": "f2822:c0:m17"} {"signature": "def execute_task(bufs):", "body": "user_ns = locals()user_ns.update({'': __builtins__})f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)fname = getattr(f, '', '')prefix = \"\"fname = prefix + \"\"argname = prefix + \"\"kwargname = prefix + \"\"resultname = prefix + \"\"user_ns.update({fname: f,argname: args,kwargname: kwargs,resultname: resultname})code = \"\".format(resultname, fname,argname, kwargname)try:logger.debug(\"\".format(code))exec(code, user_ns, user_ns)except Exception as e:logger.warning(\"\".format(e))raise eelse:logger.debug(\"\".format(user_ns.get(resultname)))return user_ns.get(resultname)", "docstring": "Deserialize the buffer and execute the task.\n\n Returns the serialized result or exception.", "id": "f2823:m0"} {"signature": "def start_file_logger(filename, rank, name='', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:format_string = \"\".format(rank)global loggerlogger = logging.getLogger(name)logger.setLevel(logging.DEBUG)handler = logging.FileHandler(filename)handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\n Args:\n - filename (string): Name of the file to write logs to\n - name (string): Logger name\n - level (logging.LEVEL): Set the logging level.\n - format_string (string): Set the format string\n\n Returns:\n - None", "id": "f2823:m2"} {"signature": "def set_stream_logger(name='', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:format_string = \"\"global loggerlogger = logging.getLogger(name)logger.setLevel(logging.DEBUG)handler = logging.StreamHandler()handler.setLevel(level)formatter = logging.Formatter(format_string, datefmt='')handler.setFormatter(formatter)logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\n Args:\n - name (string) : Set the logger name.\n - level (logging.LEVEL) : Set to logging.DEBUG by default.\n - format_string (sting) : Set to None by default.\n\n Returns:\n - None", "id": "f2823:m3"} {"signature": "def __init__(self,comm, rank,task_q_url=\"\",result_q_url=\"\",max_queue_size=,heartbeat_threshold=,heartbeat_period=,uid=None):", "body": "self.uid = uidself.context = zmq.Context()self.task_incoming = self.context.socket(zmq.DEALER)self.task_incoming.setsockopt(zmq.IDENTITY, uid.encode(''))self.task_incoming.setsockopt(zmq.LINGER, )self.task_incoming.connect(task_q_url)self.result_outgoing = self.context.socket(zmq.DEALER)self.result_outgoing.setsockopt(zmq.IDENTITY, uid.encode(''))self.result_outgoing.setsockopt(zmq.LINGER, )self.result_outgoing.connect(result_q_url)logger.info(\"\")self.max_queue_size = max_queue_size + comm.sizeself.pending_task_queue = queue.Queue()self.pending_result_queue = queue.Queue()self.ready_worker_queue = queue.Queue()self.tasks_per_round = self.heartbeat_period = heartbeat_periodself.heartbeat_threshold = heartbeat_thresholdself.comm = commself.rank = rank", "docstring": "Parameters\n----------\nworker_url : str\n Worker url on which workers will attempt to connect back\n\nheartbeat_threshold : int\n Number of seconds since the last message from the interchange after which the worker\n assumes that the interchange is lost and the manager shuts down. Default:120\n\nheartbeat_period : int\n Number of seconds after which a heartbeat message is sent to the interchange", "id": "f2823:c0:m0"} {"signature": "def create_reg_message(self):", "body": "msg = {'': PARSL_VERSION,'': \"\".format(sys.version_info.major,sys.version_info.minor,sys.version_info.micro),'': platform.system(),'': platform.node(),'': os.getcwd(),}b_msg = json.dumps(msg).encode('')return b_msg", "docstring": "Creates a registration message to identify the worker to the interchange", "id": "f2823:c0:m1"} {"signature": "def heartbeat(self):", "body": "heartbeat = (HEARTBEAT_CODE).to_bytes(, \"\")r = self.task_incoming.send(heartbeat)logger.debug(\"\".format(r))", "docstring": "Send heartbeat to the incoming task queue", "id": "f2823:c0:m2"} {"signature": "def recv_result_from_workers(self):", "body": "info = MPI.Status()result = self.comm.recv(source=MPI.ANY_SOURCE, tag=RESULT_TAG, status=info)logger.debug(\"\".format(result))return result", "docstring": "Receives a results from the MPI worker pool and send it out via 0mq\n\n Returns:\n --------\n result: task result from the workers", "id": "f2823:c0:m3"} {"signature": "def recv_task_request_from_workers(self):", "body": "info = MPI.Status()comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)worker_rank = info.Get_source()logger.info(\"\".format(worker_rank))return worker_rank", "docstring": "Receives 1 task request from MPI comm\n\n Returns:\n --------\n worker_rank: worker_rank id", "id": "f2823:c0:m4"} {"signature": "def pull_tasks(self, kill_event):", "body": "logger.info(\"\")poller = zmq.Poller()poller.register(self.task_incoming, zmq.POLLIN)msg = self.create_reg_message()logger.debug(\"\".format(msg))self.task_incoming.send(msg)last_beat = time.time()last_interchange_contact = time.time()task_recv_counter = poll_timer = while not kill_event.is_set():time.sleep(LOOP_SLOWDOWN)ready_worker_count = self.ready_worker_queue.qsize()pending_task_count = self.pending_task_queue.qsize()logger.debug(\"\".format(ready_worker_count,pending_task_count))if time.time() > last_beat + self.heartbeat_period:self.heartbeat()last_beat = time.time()if pending_task_count < self.max_queue_size and ready_worker_count > :logger.debug(\"\".format(ready_worker_count))msg = ((ready_worker_count).to_bytes(, \"\"))self.task_incoming.send(msg)socks = dict(poller.poll(timeout=poll_timer))if self.task_incoming in socks and socks[self.task_incoming] == zmq.POLLIN:_, pkl_msg = self.task_incoming.recv_multipart()tasks = pickle.loads(pkl_msg)last_interchange_contact = time.time()if tasks == '':logger.critical(\"\")kill_event.set()breakelif tasks == HEARTBEAT_CODE:logger.debug(\"\")else:poll_timer = task_recv_counter += len(tasks)logger.debug(\"\".format([t[''] for t in tasks],task_recv_counter))for task in tasks:self.pending_task_queue.put(task)else:logger.debug(\"\")poll_timer = min(self.heartbeat_period * , poll_timer * )if time.time() > last_interchange_contact + self.heartbeat_threshold:logger.critical(\"\")kill_event.set()logger.critical(\"\")break", "docstring": "Pulls tasks from the incoming tasks 0mq pipe onto the internal\n pending task queue\n\n Parameters:\n -----------\n kill_event : threading.Event\n Event to let the thread know when it is time to die.", "id": "f2823:c0:m5"} {"signature": "def push_results(self, kill_event):", "body": "timeout = logger.debug(\"\")while not kill_event.is_set():time.sleep(LOOP_SLOWDOWN)try:items = []while not self.pending_result_queue.empty():r = self.pending_result_queue.get(block=True)items.append(r)if items:self.result_outgoing.send_multipart(items)except queue.Empty:logger.debug(\"\".format(timeout))except Exception as e:logger.exception(\"\".format(e))logger.critical(\"\")", "docstring": "Listens on the pending_result_queue and sends out results via 0mq\n\n Parameters:\n -----------\n kill_event : threading.Event\n Event to let the thread know when it is time to die.", "id": "f2823:c0:m6"} {"signature": "def start(self):", "body": "self.comm.Barrier()logger.debug(\"\")self._kill_event = threading.Event()self._task_puller_thread = threading.Thread(target=self.pull_tasks,args=(self._kill_event,))self._result_pusher_thread = threading.Thread(target=self.push_results,args=(self._kill_event,))self._task_puller_thread.start()self._result_pusher_thread.start()start = Noneresult_counter = task_recv_counter = task_sent_counter = logger.info(\"\")while not self._kill_event.is_set():time.sleep(LOOP_SLOWDOWN)timer = time.time() + counter = min(, comm.size)while time.time() < timer:info = MPI.Status()if counter > :logger.debug(\"\")breakif not self.comm.Iprobe(status=info):logger.debug(\"\".format(counter))breakelse:tag = info.Get_tag()logger.info(\"\".format(tag))counter += if tag == RESULT_TAG:result = self.recv_result_from_workers()self.pending_result_queue.put(result)result_counter += elif tag == TASK_REQUEST_TAG:worker_rank = self.recv_task_request_from_workers()self.ready_worker_queue.put(worker_rank)else:logger.error(\"\".format(tag))available_worker_cnt = self.ready_worker_queue.qsize()available_task_cnt = self.pending_task_queue.qsize()logger.debug(\"\".format(available_worker_cnt,available_task_cnt))this_round = min(available_worker_cnt, available_task_cnt)for i in range(this_round):worker_rank = self.ready_worker_queue.get()task = self.pending_task_queue.get()comm.send(task, dest=worker_rank, tag=worker_rank)task_sent_counter += logger.debug(\"\".format(worker_rank, task['']))if not start:start = time.time()logger.debug(\"\".format(task_recv_counter, task_sent_counter, result_counter))self._task_puller_thread.join()self._result_pusher_thread.join()self.task_incoming.close()self.result_outgoing.close()self.context.term()delta = time.time() - startlogger.info(\"\".format(delta))", "docstring": "Start the Manager process.\n\n The worker loops on this:\n\n 1. If the last message sent was older than heartbeat period we send a heartbeat\n 2.\n\n\n TODO: Move task receiving to a thread", "id": "f2823:c0:m7"} {"signature": "@abstractmethoddef __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "pass", "docstring": "Wraps the command with the Launcher calls.\n *MUST* be implemented by the concrete child classes", "id": "f2826:c0:m0"} {"signature": "def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "return command", "docstring": "Args:\n- command (string): The command string to be launched\n- task_block (string) : bash evaluated string.\n\nKWargs:\n- walltime (int) : This is not used by this launcher.", "id": "f2826:c1:m0"} {"signature": "def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "task_blocks = tasks_per_node * nodes_per_blockx =", "docstring": "Args:\n- command (string): The command string to be launched\n- task_block (string) : bash evaluated string.\n\nKWargs:\n- walltime (int) : This is not used by this launcher.", "id": "f2826:c2:m0"} {"signature": "def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "task_blocks = tasks_per_node * nodes_per_blockx =", "docstring": "Args:\n- command (string): The command string to be launched\n- task_block (string) : bash evaluated string.\n\nKWargs:\n- walltime (int) : This is not used by this launcher.", "id": "f2826:c3:m0"} {"signature": "def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "task_blocks = tasks_per_node * nodes_per_blockx =", "docstring": "Args:\n- command (string): The command string to be launched\n- task_block (string) : bash evaluated string.\n\nKWargs:\n- walltime (int) : This is not used by this launcher.", "id": "f2826:c4:m0"} {"signature": "def __init__(self, overrides=''):", "body": "self.overrides = overrides", "docstring": "Parameters\n----------\n\noverrides: str\n This string will be passed to the srun launcher. Default: ''", "id": "f2826:c5:m0"} {"signature": "def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "task_blocks = tasks_per_node * nodes_per_blockx =", "docstring": "Args:\n- command (string): The command string to be launched\n- task_block (string) : bash evaluated string.\n\nKWargs:\n- walltime (int) : This is not used by this launcher.", "id": "f2826:c5:m1"} {"signature": "def __init__(self, overrides=''):", "body": "self.overrides = overrides", "docstring": "Parameters\n----------\n\noverrides: str\n This string will be passed to the launcher. Default: ''", "id": "f2826:c6:m0"} {"signature": "def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "task_blocks = tasks_per_node * nodes_per_blockx =", "docstring": "Args:\n- command (string): The command string to be launched\n- task_block (string) : bash evaluated string.\n\nKWargs:\n- walltime (int) : This is not used by this launcher.", "id": "f2826:c6:m1"} {"signature": "def __init__(self, overrides=''):", "body": "self.overrides = overrides", "docstring": "Parameters\n----------\n\noverrides: str\n This string will be passed to the aprun launcher. Default: ''", "id": "f2826:c7:m0"} {"signature": "def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "tasks_per_block = tasks_per_node * nodes_per_blockx =", "docstring": "Args:\n- command (string): The command string to be launched\n- tasks_per_node (int) : Workers to launch per node\n- nodes_per_block (int) : Number of nodes in a block\n\nKWargs:\n- walltime (int) : This is not used by this launcher.", "id": "f2826:c7:m1"} {"signature": "def get_all_checkpoints(rundir=\"\"):", "body": "if(not os.path.isdir(rundir)):return []dirs = sorted(os.listdir(rundir))checkpoints = []for runid in dirs:checkpoint = os.path.abspath(''.format(rundir, runid))if os.path.isdir(checkpoint):checkpoints.append(checkpoint)return checkpoints", "docstring": "Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor", "id": "f2829:m1"} {"signature": "def get_last_checkpoint(rundir=\"\"):", "body": "if not os.path.isdir(rundir):return []dirs = sorted(os.listdir(rundir))if len(dirs) == :return []last_runid = dirs[-]last_checkpoint = os.path.abspath(''.format(rundir, last_runid))if(not(os.path.isdir(last_checkpoint))):return []return [last_checkpoint]", "docstring": "Find the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements", "id": "f2829:m2"} {"signature": "def wtime_to_minutes(time_string):", "body": "hours, mins, seconds = time_string.split('')total_mins = int(hours) * + int(mins)if total_mins < :logger.warning(\"\".format(time_string, total_mins))return total_mins", "docstring": "wtime_to_minutes\n\n Convert standard wallclock time string to minutes.\n\n Args:\n - Time_string in HH:MM:SS format\n\n Returns:\n (int) minutes", "id": "f2829:m6"} {"signature": "def teardown_module(module):", "body": "p_this = Path(__file__)to_remove_list = [p_this.change(new_basename=\"\"),]for p in p_this.parent.select_by_ext(\"\"):to_remove_list.append(p)for p in to_remove_list:if p.is_file():p.remove()elif p.is_dir():shutil.rmtree(p.abspath)", "docstring": "Remove temp file and dir for test.", "id": "f2867:m0"} {"signature": "def teardown_module(module):", "body": "p_this = Path(__file__)to_remove_list = list(p_this.parent.select_by_ext(\"\"))for p in to_remove_list:if p.exists():p.remove()", "docstring": "Remove temp file and dir for test.", "id": "f2875:m0"} {"signature": "def setup_module(module):", "body": "p = Path(__file__).change(new_basename=\"\")try:shutil.copytree(p.abspath, p.change(new_basename=\"\").abspath)except Exception as e:passp = Path(__file__).change(new_basename=\"\")with open(p.abspath, \"\") as f:f.write(\"\".encode(\"\"))p = Path(__file__).change(new_basename=\"\")with open(p.abspath, \"\") as f:f.write(\"\".encode(\"\"))", "docstring": "Create temp file and dir for test.\n\n- create a new folder ``/wow``\n- create two file `/`wow/file_to_move.txt``, ``wow/file_to_copy.txt``", "id": "f2877:m0"} {"signature": "def teardown_module(module):", "body": "p_this = Path(__file__)to_remove_list = [p_this.change(new_basename=\"\"),p_this.change(new_basename=\"\"),p_this.change(new_basename=\"\"),p_this.change(new_basename=\"\"),p_this.change(new_basename=\"\"),p_this.change(new_basename=\"\"),p_this.change(new_basename=\"\"),]for p in to_remove_list:if p.is_file():p.remove()elif p.is_dir():shutil.rmtree(p.abspath)", "docstring": "Remove temp file and dir for test.", "id": "f2877:m1"} {"signature": "def get_text_fingerprint(text, hash_meth, encoding=\"\"): ", "body": "m = hash_meth()m.update(text.encode(encoding))return m.hexdigest()", "docstring": "Use default hash method to return hash value of a piece of string\ndefault setting use 'utf-8' encoding.", "id": "f2879:m0"} {"signature": "def md5file(abspath, nbytes=, chunk_size=DEFAULT_CHUNK_SIZE):", "body": "return get_file_fingerprint(abspath, hashlib.md5, nbytes=nbytes, chunk_size=chunk_size)", "docstring": "Return md5 hash value of a piece of a file\n\nEstimate processing time on:\n\n:param abspath: the absolute path to the file\n:param nbytes: only has first N bytes of the file. if 0 or None,\n hash all file\n\nCPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB\n1 second can process 0.25GB data\n\n- 0.59G - 2.43 sec\n- 1.3G - 5.68 sec\n- 1.9G - 7.72 sec\n- 2.5G - 10.32 sec\n- 3.9G - 16.0 sec", "id": "f2879:m2"} {"signature": "def sha256file(abspath, nbytes=, chunk_size=DEFAULT_CHUNK_SIZE):", "body": "return get_file_fingerprint(abspath, hashlib.sha256, nbytes=nbytes, chunk_size=chunk_size)", "docstring": "Return sha256 hash value of a piece of a file\n\nEstimate processing time on:\n\n:param abspath: the absolute path to the file\n:param nbytes: only has first N bytes of the file. if 0 or None,\n hash all file", "id": "f2879:m3"} {"signature": "def sha512file(abspath, nbytes=, chunk_size=DEFAULT_CHUNK_SIZE):", "body": "return get_file_fingerprint(abspath, hashlib.sha512, nbytes=nbytes, chunk_size=chunk_size)", "docstring": "Return sha512 hash value of a piece of a file\n\nEstimate processing time on:\n\n:param abspath: the absolute path to the file\n:param nbytes: only has first N bytes of the file. if 0 or None,\n hash all file", "id": "f2879:m4"} {"signature": "def get_partial_md5(self, nbytes):", "body": "return md5file(abspath=self.abspath, nbytes=nbytes)", "docstring": "Return md5 check sum of first n bytes of this file.", "id": "f2880:c0:m0"} {"signature": "@propertydef md5(self):", "body": "return md5file(self.abspath)", "docstring": "Return md5 check sum of this file.", "id": "f2880:c0:m1"} {"signature": "def get_partial_sha256(self, nbytes):", "body": "return sha256file(abspath=self.abspath, nbytes=nbytes)", "docstring": "Return sha256 check sum of first n bytes of this file.", "id": "f2880:c0:m2"} {"signature": "@propertydef sha256(self):", "body": "return sha256file(self.abspath)", "docstring": "Return sha256 check sum of this file.", "id": "f2880:c0:m3"} {"signature": "def get_partial_sha512(self, nbytes):", "body": "return sha512file(abspath=self.abspath, nbytes=nbytes)", "docstring": "Return sha512 check sum of first n bytes of this file.", "id": "f2880:c0:m4"} {"signature": "@propertydef sha512(self):", "body": "return sha512file(self.abspath)", "docstring": "Return md5 check sum of this file.", "id": "f2880:c0:m5"} {"signature": "def make_zip_archive(self,dst=None,filters=all_true,compress=True,overwrite=False,makedirs=False,verbose=False): ", "body": "self.assert_exists()if dst is None:dst = self._auto_zip_archive_dst()else:dst = self.change(new_abspath=dst)if not dst.basename.lower().endswith(\"\"):raise ValueError(\"\")if dst.exists():if not overwrite:raise IOError(\"\" % dst)if compress:compression = ZIP_DEFLATEDelse:compression = ZIP_STOREDif not dst.parent.exists():if makedirs:os.makedirs(dst.parent.abspath)if verbose:msg = \"\" % selfprint(msg)current_dir = os.getcwd()if self.is_dir():total_size = selected = list()for p in self.glob(\"\"):if filters(p):selected.append(p)total_size += p.sizeif verbose:msg = \"\".format(len(selected), repr_data_size(total_size),)print(msg)with ZipFile(dst.abspath, \"\", compression) as f:os.chdir(self.abspath)for p in selected:relpath = p.relative_to(self).__str__()f.write(relpath)elif self.is_file():with ZipFile(dst.abspath, \"\", compression) as f:os.chdir(self.parent.abspath)f.write(self.basename)os.chdir(current_dir)if verbose:msg = \"\".format(dst.size_in_text)print(msg)", "docstring": "Make a zip archive.\n\n:param dst: output file path. if not given, will be automatically assigned.\n:param filters: custom path filter. By default it allows any file.\n:param compress: compress or not.\n:param overwrite: overwrite exists or not.\n:param verbose: display log or not.\n:return:", "id": "f2881:c0:m1"} {"signature": "def backup(self,dst=None,ignore=None,ignore_ext=None,ignore_pattern=None,ignore_size_smaller_than=None,ignore_size_larger_than=None,case_sensitive=False): ", "body": "def preprocess_arg(arg): if arg is None:return []if isinstance(arg, (tuple, list)):return list(arg)else:return [arg, ]self.assert_is_dir_and_exists()ignore = preprocess_arg(ignore)for i in ignore:if i.startswith(\"\") or i.startswith(\"\"):raise ValueErrorignore_ext = preprocess_arg(ignore_ext)for ext in ignore_ext:if not ext.startswith(\"\"):raise ValueErrorignore_pattern = preprocess_arg(ignore_pattern)if case_sensitive:passelse:ignore = [i.lower() for i in ignore]ignore_ext = [i.lower() for i in ignore_ext]ignore_pattern = [i.lower() for i in ignore_pattern]def filters(p):relpath = p.relative_to(self).abspathif not case_sensitive:relpath = relpath.lower()for i in ignore:if relpath.startswith(i):return Falseif case_sensitive:ext = p.extelse:ext = p.ext.lower()if ext in ignore_ext:return Falsefor pattern in ignore_pattern:if pattern in relpath:return Falseif ignore_size_smaller_than:if p.size < ignore_size_smaller_than:return Falseif ignore_size_larger_than:if p.size > ignore_size_larger_than:return Falsereturn Trueself.make_zip_archive(dst=dst, filters=filters, compress=True, overwrite=False, verbose=True,)", "docstring": "Create a compressed zip archive backup for a directory.\n\n:param dst: the output file path.\n:param ignore: file or directory defined in this list will be ignored.\n:param ignore_ext: file with extensions defined in this list will be ignored.\n:param ignore_pattern: any file or directory that contains this pattern\n will be ignored.\n:param ignore_size_smaller_than: any file size smaller than this\n will be ignored.\n:param ignore_size_larger_than: any file size larger than this\n will be ignored.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u4e3a\u4e00\u4e2a\u76ee\u5f55\u521b\u5efa\u4e00\u4e2a\u5907\u4efd\u538b\u7f29\u5305\u3002\u53ef\u4ee5\u901a\u8fc7\u8fc7\u6ee4\u5668\u9009\u62e9\u4f60\u8981\u5907\u4efd\u7684\u6587\u4ef6\u3002", "id": "f2881:c0:m2"} {"signature": "@propertydef abspath(self):", "body": "return self.absolute().__str__()", "docstring": "r\"\"\"\n Absolute path.\n\n Example: ``C:\\User\\admin\\readme.txt`` for ``C:\\User\\admin\\readme.txt``", "id": "f2882:c0:m0"} {"signature": "@propertydef abspath_hexstr(self):", "body": "return encode_hexstr(self.abspath)", "docstring": "Return absolute path encoded in hex string.", "id": "f2882:c0:m1"} {"signature": "@propertydef dirpath(self):", "body": "return self.parent.abspath", "docstring": "r\"\"\"\n Parent dir full absolute path.\n\n Example: ``C:\\User\\admin`` for ``C:\\User\\admin\\readme.txt``", "id": "f2882:c0:m2"} {"signature": "@propertydef dirpath_hexstr(self):", "body": "return encode_hexstr(self.dirpath)", "docstring": "Return dir full absolute path encoded in hex string.", "id": "f2882:c0:m3"} {"signature": "@propertydef dirname(self):", "body": "return self.parent.name", "docstring": "r\"\"\"\n Parent dir name.\n\n Example: ``admin`` for ``C:\\User\\admin\\readme.txt``", "id": "f2882:c0:m4"} {"signature": "@propertydef dirname_hexstr(self):", "body": "return encode_hexstr(self.dirname)", "docstring": "Parent dir name in hex string.", "id": "f2882:c0:m5"} {"signature": "@propertydef basename(self):", "body": "return self.name", "docstring": "r\"\"\"\n File name with extension, path is not included.\n\n Example: ``readme.txt`` for ``C:\\User\\admin\\readme.txt``", "id": "f2882:c0:m6"} {"signature": "@propertydef basename_hexstr(self):", "body": "return encode_hexstr(self.basename)", "docstring": "File name with extension encoded in hex string.", "id": "f2882:c0:m7"} {"signature": "@propertydef fname(self):", "body": "return self.stem", "docstring": "r\"\"\"\n File name without extension.\n\n Example: ``readme`` for ``C:\\User\\admin\\readme.txt``", "id": "f2882:c0:m8"} {"signature": "@propertydef fname_hexstr(self):", "body": "return encode_hexstr(self.fname)", "docstring": "File name encoded in hex string.", "id": "f2882:c0:m9"} {"signature": "@propertydef ext(self):", "body": "return self.suffix", "docstring": "r\"\"\"\n File extension. If it's a dir, then return empty str.\n\n Example: ``.txt`` for ``C:\\User\\admin\\readme.txt``", "id": "f2882:c0:m10"} {"signature": "@propertydef size(self):", "body": "try:return self._stat.st_sizeexcept: self._stat = self.stat()return self.size", "docstring": "File size in bytes.", "id": "f2882:c0:m11"} {"signature": "@propertydef size_in_text(self):", "body": "return repr_data_size(self.size, precision=)", "docstring": "File size as human readable string.", "id": "f2882:c0:m12"} {"signature": "@propertydef mtime(self):", "body": "try:return self._stat.st_mtimeexcept: self._stat = self.stat()return self.mtime", "docstring": "Get most recent modify time in timestamp.", "id": "f2882:c0:m13"} {"signature": "@propertydef atime(self):", "body": "try:return self._stat.st_atimeexcept: self._stat = self.stat()return self.atime", "docstring": "Get most recent access time in timestamp.", "id": "f2882:c0:m14"} {"signature": "@propertydef ctime(self):", "body": "try:return self._stat.st_ctimeexcept: self._stat = self.stat()return self.ctime", "docstring": "Get most recent create time in timestamp.", "id": "f2882:c0:m15"} {"signature": "@propertydef modify_datetime(self):", "body": "return datetime.fromtimestamp(self.mtime)", "docstring": "Get most recent modify time in datetime.", "id": "f2882:c0:m16"} {"signature": "@propertydef access_datetime(self):", "body": "return datetime.fromtimestamp(self.atime)", "docstring": "Get most recent access time in datetime.", "id": "f2882:c0:m17"} {"signature": "@propertydef create_datetime(self):", "body": "return datetime.fromtimestamp(self.ctime)", "docstring": "Get most recent create time in datetime.", "id": "f2882:c0:m18"} {"signature": "def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):", "body": "if root2:if not drv2 and drv:return drv, root2, [drv + root2] + parts2[:]elif drv2:if drv2 == drv or self.casefold(drv2) == self.casefold(drv):return drv, root, parts + parts2[:]else:return drv, root, parts + parts2return drv2, root2, parts2", "docstring": "Join the two paths represented by the respective\n(drive, root, parts) tuples. Return a new (drive, root, parts) tuple.", "id": "f2883:c0:m2"} {"signature": "def select_from(self, parent_path):", "body": "path_cls = type(parent_path)is_dir = path_cls.is_direxists = path_cls.existsscandir = parent_path._accessor.scandirif not is_dir(parent_path):return iter([])return self._select_from(parent_path, is_dir, exists, scandir)", "docstring": "Iterate over all child paths of `parent_path` matched by this\n selector. This can contain parent_path itself.", "id": "f2883:c5:m1"} {"signature": "def __new__(cls, *args):", "body": "if cls is PurePath:cls = PureWindowsPath if os.name == '' else PurePosixPathreturn cls._from_parts(args)", "docstring": "Construct a PurePath from one or several strings and or existing\n PurePath objects. The strings and path objects are combined so as\n to yield a canonicalized path, which is incorporated into the\n new PurePath object.", "id": "f2883:c11:m0"} {"signature": "def __str__(self):", "body": "try:return self._strexcept AttributeError:self._str = self._format_parsed_parts(self._drv, self._root,self._parts) or ''return self._str", "docstring": "Return the string representation of the path, suitable for\n passing to system calls.", "id": "f2883:c11:m8"} {"signature": "def as_posix(self):", "body": "f = self._flavourreturn str(self).replace(f.sep, '')", "docstring": "Return the string representation of the path with forward (/)\n slashes.", "id": "f2883:c11:m10"} {"signature": "def __bytes__(self):", "body": "if sys.version_info < (, ):raise NotImplementedError(\"\")return os.fsencode(str(self))", "docstring": "Return the bytes representation of the path. This is only\n recommended to use under Unix.", "id": "f2883:c11:m11"} {"signature": "def as_uri(self):", "body": "if not self.is_absolute():raise ValueError(\"\")return self._flavour.make_uri(self)", "docstring": "Return the path as a 'file' URI.", "id": "f2883:c11:m13"} {"signature": "@propertydef anchor(self):", "body": "anchor = self._drv + self._rootreturn anchor", "docstring": "The concatenation of the drive and root, or ''.", "id": "f2883:c11:m22"} {"signature": "@propertydef name(self):", "body": "parts = self._partsif len(parts) == ( if (self._drv or self._root) else ):return ''return parts[-]", "docstring": "The final path component, if any.", "id": "f2883:c11:m23"} {"signature": "@propertydef suffix(self):", "body": "name = self.namei = name.rfind('')if < i < len(name) - :return name[i:]else:return ''", "docstring": "The final component's last suffix, if any.", "id": "f2883:c11:m24"} {"signature": "@propertydef suffixes(self):", "body": "name = self.nameif name.endswith(''):return []name = name.lstrip('')return ['' + suffix for suffix in name.split('')[:]]", "docstring": "A list of the final component's suffixes, if any.", "id": "f2883:c11:m25"} {"signature": "@propertydef stem(self):", "body": "name = self.namei = name.rfind('')if < i < len(name) - :return name[:i]else:return name", "docstring": "The final path component, minus its last suffix.", "id": "f2883:c11:m26"} {"signature": "def with_name(self, name):", "body": "if not self.name:raise ValueError(\"\" % (self,))drv, root, parts = self._flavour.parse_parts((name,))if (not name or name[-] in [self._flavour.sep, self._flavour.altsep]or drv or root or len(parts) != ):raise ValueError(\"\" % (name))return self._from_parsed_parts(self._drv, self._root,self._parts[:-] + [name])", "docstring": "Return a new path with the file name changed.", "id": "f2883:c11:m27"} {"signature": "def with_suffix(self, suffix):", "body": "f = self._flavourif f.sep in suffix or f.altsep and f.altsep in suffix:raise ValueError(\"\" % (suffix))if suffix and not suffix.startswith('') or suffix == '':raise ValueError(\"\" % (suffix))name = self.nameif not name:raise ValueError(\"\" % (self,))old_suffix = self.suffixif not old_suffix:name = name + suffixelse:name = name[:-len(old_suffix)] + suffixreturn self._from_parsed_parts(self._drv, self._root,self._parts[:-] + [name])", "docstring": "Return a new path with the file suffix changed (or added, if\n none).", "id": "f2883:c11:m28"} {"signature": "def relative_to(self, *other):", "body": "if not other:raise TypeError(\"\")parts = self._partsdrv = self._drvroot = self._rootif root:abs_parts = [drv, root] + parts[:]else:abs_parts = partsto_drv, to_root, to_parts = self._parse_args(other)if to_root:to_abs_parts = [to_drv, to_root] + to_parts[:]else:to_abs_parts = to_partsn = len(to_abs_parts)cf = self._flavour.casefold_partsif (root or drv) if n == else cf(abs_parts[:n]) != cf(to_abs_parts):formatted = self._format_parsed_parts(to_drv, to_root, to_parts)raise ValueError(\"\".format(str(self), str(formatted)))return self._from_parsed_parts('', root if n == else '',abs_parts[n:])", "docstring": "Return the relative path to another path identified by the passed\n arguments. If the operation is not possible (because this is not\n a subpath of the other path), raise ValueError.", "id": "f2883:c11:m29"} {"signature": "@propertydef parts(self):", "body": "try:return self._ppartsexcept AttributeError:self._pparts = tuple(self._parts)return self._pparts", "docstring": "An object providing sequence-like access to the\n components in the filesystem path.", "id": "f2883:c11:m30"} {"signature": "def joinpath(self, *args):", "body": "return self._make_child(args)", "docstring": "Combine this path with one or several arguments, and return a\n new path representing either a subpath (if all arguments are relative\n paths) or a totally different path (if one of the arguments is\n anchored).", "id": "f2883:c11:m31"} {"signature": "@propertydef parent(self):", "body": "drv = self._drvroot = self._rootparts = self._partsif len(parts) == and (drv or root):return selfreturn self._from_parsed_parts(drv, root, parts[:-])", "docstring": "The logical parent of the path.", "id": "f2883:c11:m34"} {"signature": "@propertydef parents(self):", "body": "return _PathParents(self)", "docstring": "A sequence of this path's logical parents.", "id": "f2883:c11:m35"} {"signature": "def is_absolute(self):", "body": "if not self._root:return Falsereturn not self._flavour.has_drv or bool(self._drv)", "docstring": "True if the path is absolute (has both a root and, if applicable,\n a drive).", "id": "f2883:c11:m36"} {"signature": "def is_reserved(self):", "body": "return self._flavour.is_reserved(self._parts)", "docstring": "Return True if the path contains one of the special names reserved\n by the system, if any.", "id": "f2883:c11:m37"} {"signature": "def match(self, path_pattern):", "body": "cf = self._flavour.casefoldpath_pattern = cf(path_pattern)drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))if not pat_parts:raise ValueError(\"\")if drv and drv != cf(self._drv):return Falseif root and root != cf(self._root):return Falseparts = self._cpartsif drv or root:if len(pat_parts) != len(parts):return Falsepat_parts = pat_parts[:]elif len(pat_parts) > len(parts):return Falsefor part, pat in zip(reversed(parts), reversed(pat_parts)):if not fnmatch.fnmatchcase(part, pat):return Falsereturn True", "docstring": "Return True if this path matches the given pattern.", "id": "f2883:c11:m38"} {"signature": "def _raw_open(self, flags, mode=):", "body": "if self._closed:self._raise_closed()return self._accessor.open(self, flags, mode)", "docstring": "Open the file pointed by this path and return a file descriptor,\nas os.open() does.", "id": "f2883:c14:m7"} {"signature": "@classmethoddef cwd(cls):", "body": "return cls(os.getcwd())", "docstring": "Return a new path pointing to the current working directory\n (as returned by os.getcwd()).", "id": "f2883:c14:m8"} {"signature": "@classmethoddef home(cls):", "body": "return cls(cls()._flavour.gethomedir(None))", "docstring": "Return a new path pointing to the user's home directory (as\n returned by os.path.expanduser('~')).", "id": "f2883:c14:m9"} {"signature": "def samefile(self, other_path):", "body": "if hasattr(os.path, \"\"):st = self.stat()try:other_st = other_path.stat()except AttributeError:other_st = os.stat(other_path)return os.path.samestat(st, other_st)else:filename1 = six.text_type(self)filename2 = six.text_type(other_path)st1 = _win32_get_unique_path_id(filename1)st2 = _win32_get_unique_path_id(filename2)return st1 == st2", "docstring": "Return whether other_path is the same or not as this file\n (as returned by os.path.samefile()).", "id": "f2883:c14:m10"} {"signature": "def iterdir(self):", "body": "if self._closed:self._raise_closed()for name in self._accessor.listdir(self):if name in ('', ''):continueyield self._make_child_relpath(name)if self._closed:self._raise_closed()", "docstring": "Iterate over the files in this directory. Does not yield any\n result for the special paths '.' and '..'.", "id": "f2883:c14:m11"} {"signature": "def glob(self, pattern):", "body": "if not pattern:raise ValueError(\"\".format(pattern))pattern = self._flavour.casefold(pattern)drv, root, pattern_parts = self._flavour.parse_parts((pattern,))if drv or root:raise NotImplementedError(\"\")selector = _make_selector(tuple(pattern_parts))for p in selector.select_from(self):yield p", "docstring": "Iterate over this subtree and yield all existing files (of any\n kind, including directories) matching the given pattern.", "id": "f2883:c14:m12"} {"signature": "def rglob(self, pattern):", "body": "pattern = self._flavour.casefold(pattern)drv, root, pattern_parts = self._flavour.parse_parts((pattern,))if drv or root:raise NotImplementedError(\"\")selector = _make_selector((\"\",) + tuple(pattern_parts))for p in selector.select_from(self):yield p", "docstring": "Recursively yield all existing files (of any kind, including\n directories) matching the given pattern, anywhere in this subtree.", "id": "f2883:c14:m13"} {"signature": "def absolute(self):", "body": "if self._closed:self._raise_closed()if self.is_absolute():return selfobj = self._from_parts([os.getcwd()] + self._parts, init=False)obj._init(template=self)return obj", "docstring": "Return an absolute version of this path. This function works\n even if the path doesn't point to anything.\n\n No normalization is done, i.e. all '.' and '..' will be kept along.\n Use resolve() to get the canonical path to a file.", "id": "f2883:c14:m14"} {"signature": "def resolve(self, strict=False):", "body": "if self._closed:self._raise_closed()s = self._flavour.resolve(self, strict=strict)if s is None:self.stat()s = str(self.absolute())normed = self._flavour.pathmod.normpath(s)obj = self._from_parts((normed,), init=False)obj._init(template=self)return obj", "docstring": "Make the path absolute, resolving all symlinks on the way and also\nnormalizing it (for example turning slashes into backslashes under\nWindows).", "id": "f2883:c14:m15"} {"signature": "def stat(self):", "body": "return self._accessor.stat(self)", "docstring": "Return the result of the stat() system call on this path, like\nos.stat() does.", "id": "f2883:c14:m16"} {"signature": "def owner(self):", "body": "import pwdreturn pwd.getpwuid(self.stat().st_uid).pw_name", "docstring": "Return the login name of the file owner.", "id": "f2883:c14:m17"} {"signature": "def group(self):", "body": "import grpreturn grp.getgrgid(self.stat().st_gid).gr_name", "docstring": "Return the group name of the file gid.", "id": "f2883:c14:m18"} {"signature": "def open(self, mode='', buffering=-, encoding=None,errors=None, newline=None):", "body": "if self._closed:self._raise_closed()if sys.version_info >= (, ):return io.open(str(self), mode, buffering, encoding, errors, newline,opener=self._opener)else:return io.open(str(self), mode, buffering,encoding, errors, newline)", "docstring": "Open the file pointed by this path and return a file object, as\nthe built-in open() function does.", "id": "f2883:c14:m19"} {"signature": "def read_bytes(self):", "body": "with self.open(mode='') as f:return f.read()", "docstring": "Open the file in bytes mode, read it, and close the file.", "id": "f2883:c14:m20"} {"signature": "def read_text(self, encoding=None, errors=None):", "body": "with self.open(mode='', encoding=encoding, errors=errors) as f:return f.read()", "docstring": "Open the file in text mode, read it, and close the file.", "id": "f2883:c14:m21"} {"signature": "def write_bytes(self, data):", "body": "if not isinstance(data, six.binary_type):raise TypeError('' %(six.binary_type.__name__, data.__class__.__name__))with self.open(mode='') as f:return f.write(data)", "docstring": "Open the file in bytes mode, write to it, and close the file.", "id": "f2883:c14:m22"} {"signature": "def write_text(self, data, encoding=None, errors=None):", "body": "if not isinstance(data, six.text_type):raise TypeError('' %(six.text_type.__name__, data.__class__.__name__))with self.open(mode='', encoding=encoding, errors=errors) as f:return f.write(data)", "docstring": "Open the file in text mode, write to it, and close the file.", "id": "f2883:c14:m23"} {"signature": "def touch(self, mode=, exist_ok=True):", "body": "if self._closed:self._raise_closed()if exist_ok:try:self._accessor.utime(self, None)except OSError:passelse:returnflags = os.O_CREAT | os.O_WRONLYif not exist_ok:flags |= os.O_EXCLfd = self._raw_open(flags, mode)os.close(fd)", "docstring": "Create this file with the given access mode, if it doesn't exist.", "id": "f2883:c14:m24"} {"signature": "def mkdir(self, mode=, parents=False, exist_ok=False):", "body": "if self._closed:self._raise_closed()def _try_func():self._accessor.mkdir(self, mode)def _exc_func(exc):if not parents or self.parent == self:raise excself.parent.mkdir(parents=True, exist_ok=True)self.mkdir(mode, parents=False, exist_ok=exist_ok)try:_try_except_filenotfounderror(_try_func, _exc_func)except OSError:if not exist_ok or not self.is_dir():raise", "docstring": "Create a new directory at this given path.", "id": "f2883:c14:m25"} {"signature": "def chmod(self, mode):", "body": "if self._closed:self._raise_closed()self._accessor.chmod(self, mode)", "docstring": "Change the permissions of the path, like os.chmod().", "id": "f2883:c14:m26"} {"signature": "def lchmod(self, mode):", "body": "if self._closed:self._raise_closed()self._accessor.lchmod(self, mode)", "docstring": "Like chmod(), except if the path points to a symlink, the symlink's\npermissions are changed, rather than its target's.", "id": "f2883:c14:m27"} {"signature": "def unlink(self):", "body": "if self._closed:self._raise_closed()self._accessor.unlink(self)", "docstring": "Remove this file or link.\nIf the path is a directory, use rmdir() instead.", "id": "f2883:c14:m28"} {"signature": "def rmdir(self):", "body": "if self._closed:self._raise_closed()self._accessor.rmdir(self)", "docstring": "Remove this directory. The directory must be empty.", "id": "f2883:c14:m29"} {"signature": "def lstat(self):", "body": "if self._closed:self._raise_closed()return self._accessor.lstat(self)", "docstring": "Like stat(), except if the path points to a symlink, the symlink's\nstatus information is returned, rather than its target's.", "id": "f2883:c14:m30"} {"signature": "def rename(self, target):", "body": "if self._closed:self._raise_closed()self._accessor.rename(self, target)", "docstring": "Rename this path to the given path.", "id": "f2883:c14:m31"} {"signature": "def replace(self, target):", "body": "if sys.version_info < (, ):raise NotImplementedError(\"\"\"\")if self._closed:self._raise_closed()self._accessor.replace(self, target)", "docstring": "Rename this path to the given path, clobbering the existing\ndestination if it exists.", "id": "f2883:c14:m32"} {"signature": "def symlink_to(self, target, target_is_directory=False):", "body": "if self._closed:self._raise_closed()self._accessor.symlink(target, self, target_is_directory)", "docstring": "Make this path a symlink pointing to the given path.\nNote the order of arguments (self, target) is the reverse of\nos.symlink's.", "id": "f2883:c14:m33"} {"signature": "def exists(self):", "body": "try:self.stat()except OSError as e:if e.errno not in (ENOENT, ENOTDIR):raisereturn Falsereturn True", "docstring": "Whether this path exists.", "id": "f2883:c14:m34"} {"signature": "def is_dir(self):", "body": "try:return S_ISDIR(self.stat().st_mode)except OSError as e:if e.errno not in (ENOENT, ENOTDIR):raisereturn False", "docstring": "Whether this path is a directory.", "id": "f2883:c14:m35"} {"signature": "def is_file(self):", "body": "try:return S_ISREG(self.stat().st_mode)except OSError as e:if e.errno not in (ENOENT, ENOTDIR):raisereturn False", "docstring": "Whether this path is a regular file (also True for symlinks pointing\nto regular files).", "id": "f2883:c14:m36"} {"signature": "def is_symlink(self):", "body": "try:return S_ISLNK(self.lstat().st_mode)except OSError as e:if e.errno not in (ENOENT, ENOTDIR):raisereturn False", "docstring": "Whether this path is a symbolic link.", "id": "f2883:c14:m37"} {"signature": "def is_block_device(self):", "body": "try:return S_ISBLK(self.stat().st_mode)except OSError as e:if e.errno not in (ENOENT, ENOTDIR):raisereturn False", "docstring": "Whether this path is a block device.", "id": "f2883:c14:m38"} {"signature": "def is_char_device(self):", "body": "try:return S_ISCHR(self.stat().st_mode)except OSError as e:if e.errno not in (ENOENT, ENOTDIR):raisereturn False", "docstring": "Whether this path is a character device.", "id": "f2883:c14:m39"} {"signature": "def is_fifo(self):", "body": "try:return S_ISFIFO(self.stat().st_mode)except OSError as e:if e.errno not in (ENOENT, ENOTDIR):raisereturn False", "docstring": "Whether this path is a FIFO.", "id": "f2883:c14:m40"} {"signature": "def is_socket(self):", "body": "try:return S_ISSOCK(self.stat().st_mode)except OSError as e:if e.errno not in (ENOENT, ENOTDIR):raisereturn False", "docstring": "Whether this path is a socket.", "id": "f2883:c14:m41"} {"signature": "def expanduser(self):", "body": "if (not (self._drv or self._root)and self._parts and self._parts[][:] == ''):homedir = self._flavour.gethomedir(self._parts[][:])return self._from_parts([homedir] + self._parts[:])return self", "docstring": "Return a new path with expanded ~ and ~user constructs\n (as returned by os.path.expanduser)", "id": "f2883:c14:m42"} {"signature": "def _sort_by(key):", "body": "@staticmethoddef sort_by(p_list, reverse=False):return sorted(p_list,key=lambda p: getattr(p, key),reverse=reverse,)return sort_by", "docstring": "High order function for sort methods.", "id": "f2884:m1"} {"signature": "def assert_is_file_and_exists(self):", "body": "if not self.is_file():msg = \"\" % selfraise EnvironmentError(msg)", "docstring": "Assert it is a directory and exists in file system.", "id": "f2884:c0:m0"} {"signature": "def assert_is_dir_and_exists(self):", "body": "if not self.is_dir():msg = \"\" % selfraise EnvironmentError(msg)", "docstring": "Assert it is a directory and exists in file system.", "id": "f2884:c0:m1"} {"signature": "def assert_exists(self):", "body": "if not self.exists():msg = \"\" % selfraise EnvironmentError(msg)", "docstring": "Assert it exists.", "id": "f2884:c0:m2"} {"signature": "def select(self, filters=all_true, recursive=True):", "body": "self.assert_is_dir_and_exists()if recursive:for p in self.glob(\"\"):if filters(p):yield pelse:for p in self.iterdir():if filters(p):yield p", "docstring": "Select path by criterion.\n\n :param filters: a lambda function that take a `pathlib.Path` as input,\n boolean as a output.\n :param recursive: include files in subfolder or not.\n\n **\u4e2d\u6587\u6587\u6863**\n\n \u6839\u636efilters\u4e2d\u5b9a\u4e49\u7684\u6761\u4ef6\u9009\u62e9\u8def\u5f84\u3002", "id": "f2884:c0:m3"} {"signature": "def select_file(self, filters=all_true, recursive=True):", "body": "for p in self.select(filters, recursive):if p.is_file():yield p", "docstring": "Select file path by criterion.\n\n **\u4e2d\u6587\u6587\u6863**\n\n \u6839\u636efilters\u4e2d\u5b9a\u4e49\u7684\u6761\u4ef6\u9009\u62e9\u6587\u4ef6\u3002", "id": "f2884:c0:m4"} {"signature": "def select_dir(self, filters=all_true, recursive=True):", "body": "for p in self.select(filters, recursive):if p.is_dir():yield p", "docstring": "Select dir path by criterion.\n\n **\u4e2d\u6587\u6587\u6863**\n\n \u6839\u636efilters\u4e2d\u5b9a\u4e49\u7684\u6761\u4ef6\u9009\u62e9\u6587\u4ef6\u5939\u3002", "id": "f2884:c0:m5"} {"signature": "@propertydef n_file(self):", "body": "self.assert_is_dir_and_exists()n = for _ in self.select_file(recursive=True):n += return n", "docstring": "Count how many files in this directory. Including file in sub folder.", "id": "f2884:c0:m6"} {"signature": "@propertydef n_dir(self):", "body": "self.assert_is_dir_and_exists()n = for _ in self.select_dir(recursive=True):n += return n", "docstring": "Count how many folders in this directory. Including folder in sub folder.", "id": "f2884:c0:m7"} {"signature": "@propertydef n_subfile(self):", "body": "self.assert_is_dir_and_exists()n = for _ in self.select_file(recursive=False):n += return n", "docstring": "Count how many files in this directory (doesn't include files in\nsub folders).", "id": "f2884:c0:m8"} {"signature": "@propertydef n_subdir(self):", "body": "self.assert_is_dir_and_exists()n = for _ in self.select_dir(recursive=False):n += return n", "docstring": "Count how many folders in this directory (doesn't include folder in\nsub folders).", "id": "f2884:c0:m9"} {"signature": "def select_by_ext(self, ext, recursive=True):", "body": "ext = [ext.strip().lower() for ext in ensure_list(ext)]def filters(p): return p.suffix.lower() in extreturn self.select_file(filters, recursive)", "docstring": "Select file path by extension.\n\n:param ext:\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9009\u62e9\u4e0e\u9884\u5b9a\u4e49\u7684\u82e5\u5e72\u4e2a\u6269\u5c55\u540d\u5339\u914d\u7684\u6587\u4ef6\u3002", "id": "f2884:c0:m10"} {"signature": "def select_by_pattern_in_fname(self,pattern,recursive=True,case_sensitive=False):", "body": "if case_sensitive:def filters(p):return pattern in p.fnameelse:pattern = pattern.lower()def filters(p):return pattern in p.fname.lower()return self.select_file(filters, recursive)", "docstring": "Select file path by text pattern in file name.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9009\u62e9\u6587\u4ef6\u540d\u4e2d\u5305\u542b\u6307\u5b9a\u5b50\u5b57\u7b26\u4e32\u7684\u6587\u4ef6\u3002", "id": "f2884:c0:m11"} {"signature": "def select_by_pattern_in_abspath(self,pattern,recursive=True,case_sensitive=False):", "body": "if case_sensitive:def filters(p):return pattern in p.abspathelse:pattern = pattern.lower()def filters(p):return pattern in p.abspath.lower()return self.select_file(filters, recursive)", "docstring": "Select file path by text pattern in absolute path.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9009\u62e9\u7edd\u5bf9\u8def\u5f84\u4e2d\u5305\u542b\u6307\u5b9a\u5b50\u5b57\u7b26\u4e32\u7684\u6587\u4ef6\u3002", "id": "f2884:c0:m12"} {"signature": "def select_by_size(self, min_size=, max_size= << , recursive=True):", "body": "def filters(p): return min_size <= p.size <= max_sizereturn self.select_file(filters, recursive)", "docstring": "Select file path by size.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9009\u62e9\u6240\u6709\u6587\u4ef6\u5927\u5c0f\u5728\u4e00\u5b9a\u8303\u56f4\u5185\u7684\u6587\u4ef6\u3002", "id": "f2884:c0:m13"} {"signature": "def select_by_mtime(self, min_time=, max_time=ts_2100,recursive=True):", "body": "def filters(p): return min_time <= p.mtime <= max_timereturn self.select_file(filters, recursive)", "docstring": "Select file path by modify time.\n\n:param min_time: lower bound timestamp\n:param max_time: upper bound timestamp\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9009\u62e9\u6240\u6709 :attr:`pathlib_mate.pathlib2.Path.mtime` \u5728\u4e00\u5b9a\u8303\u56f4\u5185\u7684\u6587\u4ef6\u3002", "id": "f2884:c0:m14"} {"signature": "def select_by_atime(self, min_time=, max_time=ts_2100, recursive=True):", "body": "def filters(p): return min_time <= p.atime <= max_timereturn self.select_file(filters, recursive)", "docstring": "Select file path by access time.\n\n:param min_time: lower bound timestamp\n:param max_time: upper bound timestamp\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9009\u62e9\u6240\u6709 :attr:`pathlib_mate.pathlib2.Path.atime` \u5728\u4e00\u5b9a\u8303\u56f4\u5185\u7684\u6587\u4ef6\u3002", "id": "f2884:c0:m15"} {"signature": "def select_by_ctime(self, min_time=, max_time=ts_2100,recursive=True):", "body": "def filters(p): return min_time <= p.ctime <= max_timereturn self.select_file(filters, recursive)", "docstring": "Select file path by create time.\n\n:param min_time: lower bound timestamp\n:param max_time: upper bound timestamp\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9009\u62e9\u6240\u6709 :attr:`pathlib_mate.pathlib2.Path.ctime` \u5728\u4e00\u5b9a\u8303\u56f4\u5185\u7684\u6587\u4ef6\u3002", "id": "f2884:c0:m16"} {"signature": "def select_image(self, recursive=True):", "body": "return self.select_by_ext(self._image_ext, recursive)", "docstring": "Select image file.", "id": "f2884:c0:m17"} {"signature": "def select_audio(self, recursive=True): ", "body": "return self.select_by_ext(self._audio_ext, recursive)", "docstring": "Select audio file.", "id": "f2884:c0:m18"} {"signature": "def select_video(self, recursive=True): ", "body": "return self.select_by_ext(self._video_ext, recursive)", "docstring": "Select video file.", "id": "f2884:c0:m19"} {"signature": "def select_word(self, recursive=True): ", "body": "return self.select_by_ext(self._ms_word_ext, recursive)", "docstring": "Select Microsoft Word file.", "id": "f2884:c0:m20"} {"signature": "def select_excel(self, recursive=True): ", "body": "return self.select_by_ext(self._ms_excel_ext, recursive)", "docstring": "Select Microsoft Excel file.", "id": "f2884:c0:m21"} {"signature": "def select_archive(self, recursive=True): ", "body": "return self.select_by_ext(self._archive_ext, recursive)", "docstring": "Select compressed archive file.", "id": "f2884:c0:m22"} {"signature": "@propertydef dirsize(self):", "body": "total = for p in self.select_file(recursive=True):try:total += p.sizeexcept: print(\"\" % p)return total", "docstring": "Return total file size (include sub folder). Symlink doesn't count.", "id": "f2884:c0:m23"} {"signature": "def ensure_str(value):", "body": "if isinstance(value, six.string_types):return valueelse:return six.text_type(value)", "docstring": "Ensure value is string.", "id": "f2885:m0"} {"signature": "def ensure_list(path_or_path_list):", "body": "if isinstance(path_or_path_list, (tuple, list, set)):return [ensure_str(path) for path in path_or_path_list]else:return [ensure_str(path_or_path_list), ]", "docstring": "Pre-process input argument, whether if it is:\n\n1. abspath\n2. Path instance\n3. string\n4. list or set of any of them\n\nIt returns list of path.\n\n:return path_or_path_list: always return list of path in string\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9884\u5904\u7406\u8f93\u5165\u53c2\u6570\u3002", "id": "f2885:m1"} {"signature": "def repr_data_size(size_in_bytes, precision=):", "body": "if size_in_bytes < :return \"\" % size_in_bytesindex = while :index += size_in_bytes, mod = divmod(size_in_bytes, )if size_in_bytes < :breaktemplate = \"\" % precisions = template.format(size_in_bytes + mod / , MAGNITUDE_OF_DATA[index])return s", "docstring": "Return human readable string represent of a file size. Doesn't support\nsize greater than 1EB.\n\nFor example:\n\n- 100 bytes => 100 B\n- 100,000 bytes => 97.66 KB\n- 100,000,000 bytes => 95.37 MB\n- 100,000,000,000 bytes => 93.13 GB\n- 100,000,000,000,000 bytes => 90.95 TB\n- 100,000,000,000,000,000 bytes => 88.82 PB\n...\n\nMagnitude of data::\n\n 1000 kB kilobyte\n 1000 ** 2 MB megabyte\n 1000 ** 3 GB gigabyte\n 1000 ** 4 TB terabyte\n 1000 ** 5 PB petabyte\n 1000 ** 6 EB exabyte\n 1000 ** 7 ZB zettabyte\n 1000 ** 8 YB yottabyte", "id": "f2885:m2"} {"signature": "def zip_a_folder(src, dst):", "body": "if os.path.exists(dst):print(\"\" % dst)returnsrc, dst = os.path.abspath(src), os.path.abspath(dst)cwd = os.getcwd()todo = list()dirname, basename = os.path.split(src)os.chdir(dirname)for dirname, _, fnamelist in os.walk(basename):for fname in fnamelist:newname = os.path.join(dirname, fname)todo.append(newname)with ZipFile(dst, \"\") as f:for newname in todo:f.write(newname)os.chdir(cwd)", "docstring": "Add a folder and everything inside to zip archive.\n\nExample::\n\n |---paper\n |--- algorithm.pdf\n |--- images\n |--- 1.jpg\n\n zip_a_folder(\"paper\", \"paper.zip\")\n\n paper.zip\n |---paper\n |--- algorithm.pdf\n |--- images\n |--- 1.jpg\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u6574\u4e2a\u6587\u4ef6\u5939\u6dfb\u52a0\u5230\u538b\u7f29\u5305, \u5305\u62ec\u6839\u76ee\u5f55\u672c\u8eab\u3002", "id": "f2887:m0"} {"signature": "def zip_everything_in_a_folder(src, dst):", "body": "if os.path.exists(dst):print(\"\" % dst)returnsrc, dst = os.path.abspath(src), os.path.abspath(dst)cwd = os.getcwd()todo = list()os.chdir(src)for dirname, _, fnamelist in os.walk(os.getcwd()):for fname in fnamelist:newname = os.path.relpath(os.path.join(dirname, fname), src)todo.append(newname)with ZipFile(dst, \"\") as f:for newname in todo:f.write(newname)os.chdir(cwd)", "docstring": "Add everything in a folder except the root folder it self to zip archive.\n\nExample::\n\n |---paper\n |--- algorithm.pdf\n |--- images\n |--- 1.jpg\n\n zip_everything_in_folder(\"paper\", \"paper.zip\")\n\n paper.zip\n |--- algorithm.pdf\n |--- images\n |--- 1.jpg\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u76ee\u5f55\u5185\u90e8\u7684\u6240\u6709\u6587\u4ef6\u6dfb\u52a0\u5230\u538b\u7f29\u5305, \u4e0d\u5305\u62ec\u6839\u76ee\u5f55\u672c\u8eab\u3002", "id": "f2887:m1"} {"signature": "def zip_many_files(list_of_abspath, dst):", "body": "if os.path.exists(dst):print(\"\" % dst)returnbase_dir = os.getcwd()with ZipFile(dst, \"\") as f:for abspath in list_of_abspath:dirname, basename = os.path.split(abspath)os.chdir(dirname)f.write(basename)os.chdir(base_dir)", "docstring": "Add many files to a zip archive.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u4e00\u7cfb\u5217\u7684\u6587\u4ef6\u538b\u7f29\u5230\u4e00\u4e2a\u538b\u7f29\u5305\u4e2d, \u82e5\u6709\u91cd\u590d\u7684\u6587\u4ef6\u540d, \u5728zip\u4e2d\u4fdd\u7559\u6240\u6709\u7684\u526f\u672c\u3002", "id": "f2887:m2"} {"signature": "def is_empty(self, strict=True):", "body": "if self.exists():if self.is_file():return self.size == elif self.is_dir():if strict:return len(list(self.select(recursive=True))) == else: return len(list(self.select_file(recursive=True))) == else: msg = \"\" % selfraise EnvironmentError(msg)else:raise EnvironmentError(\"\" % self)", "docstring": "- If it's a file, check if it is a empty file. (0 bytes content)\n- If it's a directory, check if there's no file and dir in it.\n But if ``strict = False``, then only check if there's no file in it.\n\n:param strict: only useful when it is a directory. if True, only\n return True if this dir has no dir and file. if False, return True\n if it doesn't have any file.", "id": "f2888:c0:m4"} {"signature": "def auto_complete_choices(self, case_sensitive=False):", "body": "self_basename = self.basenameself_basename_lower = self.basename.lower()if case_sensitive: def match(basename):return basename.startswith(self_basename)else:def match(basename):return basename.lower().startswith(self_basename_lower)choices = list()if self.is_dir():choices.append(self)for p in self.sort_by_abspath(self.select(recursive=False)):choices.append(p)else:p_parent = self.parentif p_parent.is_dir():for p in self.sort_by_abspath(p_parent.select(recursive=False)):if match(p.basename):choices.append(p)else: raise ValueError(\"\" % p_parent)return choices", "docstring": "A command line auto complete similar behavior. Find all item with same\nprefix of this one.\n\n:param case_sensitive: toggle if it is case sensitive.\n:return: list of :class:`pathlib_mate.pathlib2.Path`.", "id": "f2888:c0:m5"} {"signature": "def print_big_dir(self, top_n=):", "body": "self.assert_is_dir_and_exists()size_table = sorted([(p, p.dirsize) for p in self.select_dir(recursive=False)],key=lambda x: x[],reverse=True,)for p, size in size_table[:top_n]:print(\"\".format(repr_data_size(size), p.abspath))", "docstring": "Print ``top_n`` big dir in this dir.", "id": "f2888:c0:m6"} {"signature": "def print_big_file(self, top_n=):", "body": "self.assert_is_dir_and_exists()size_table = sorted([(p, p.size) for p in self.select_file(recursive=True)],key=lambda x: x[],reverse=True,)for p, size in size_table[:top_n]:print(\"\".format(repr_data_size(size), p.abspath))", "docstring": "Print ``top_n`` big file in this dir.", "id": "f2888:c0:m7"} {"signature": "def print_big_dir_and_big_file(self, top_n=):", "body": "self.assert_is_dir_and_exists()size_table1 = sorted([(p, p.dirsize) for p in self.select_dir(recursive=False)],key=lambda x: x[],reverse=True,)for p1, size1 in size_table1[:top_n]:print(\"\".format(repr_data_size(size1), p1.abspath))size_table2 = sorted([(p, p.size) for p in p1.select_file(recursive=True)],key=lambda x: x[],reverse=True,)for p2, size2 in size_table2[:top_n]:print(\"\".format(repr_data_size(size2), p2.abspath))", "docstring": "Print ``top_n`` big dir and ``top_n`` big file in each dir.", "id": "f2888:c0:m8"} {"signature": "def file_stat_for_all(self, filters=all_true): ", "body": "self.assert_is_dir_and_exists()from collections import OrderedDictstat = OrderedDict()stat[self.abspath] = {\"\": , \"\": , \"\": }for p in self.select(filters=filters, recursive=True):if p.is_file():size = p.sizewhile :parent = p.parentstat[parent.abspath][\"\"] += stat[parent.abspath][\"\"] += sizeif parent.abspath == self.abspath:breakp = parentelif p.is_dir():stat[p.abspath] = {\"\": , \"\": , \"\": }while :parent = p.parentstat[parent.abspath][\"\"] += if parent.abspath == self.abspath:breakp = parentreturn stat", "docstring": "Find out how many files, directories and total size (Include file in\nit's sub-folder) it has for each folder and sub-folder.\n\n:returns: stat, a dict like ``{\"directory path\": {\n \"file\": number of files, \"dir\": number of directories,\n \"size\": total size in bytes}}``\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u8fd4\u56de\u4e00\u4e2a\u76ee\u5f55\u4e2d\u7684\u6bcf\u4e2a\u5b50\u76ee\u5f55\u7684, \u6587\u4ef6, \u6587\u4ef6\u5939, \u5927\u5c0f\u7684\u7edf\u8ba1\u6570\u636e\u3002", "id": "f2888:c0:m9"} {"signature": "def file_stat(self, filters=all_true):", "body": "self.assert_is_dir_and_exists()stat = {\"\": , \"\": , \"\": }for p in self.select(filters=filters, recursive=True):if p.is_file():stat[\"\"] += stat[\"\"] += p.sizeelif p.is_dir():stat[\"\"] += return stat", "docstring": "Find out how many files, directorys and total size (Include file in\n it's sub-folder).\n\n :returns: stat, a dict like ``{\"file\": number of files,\n \"dir\": number of directorys, \"size\": total size in bytes}``\n\n **\u4e2d\u6587\u6587\u6863**\n\n \u8fd4\u56de\u4e00\u4e2a\u76ee\u5f55\u4e2d\u7684\u6587\u4ef6, \u6587\u4ef6\u5939, \u5927\u5c0f\u7684\u7edf\u8ba1\u6570\u636e\u3002", "id": "f2888:c0:m10"} {"signature": "def mirror_to(self, dst): ", "body": "self.assert_is_dir_and_exists()src = self.abspathdst = os.path.abspath(dst)if os.path.exists(dst): raise Exception(\"\")folder_to_create = list()file_to_create = list()for current_folder, _, file_list in os.walk(self.abspath):current_folder = current_folder.replace(src, dst)try:os.mkdir(current_folder)except: passfor basename in file_list:abspath = os.path.join(current_folder, basename)with open(abspath, \"\") as _:pass", "docstring": "Create a new folder having exactly same structure with this directory.\nHowever, all files are just empty file with same file name.\n\n:param dst: destination directory. The directory can't exists before\nyou execute this.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u521b\u5efa\u4e00\u4e2a\u76ee\u5f55\u7684\u955c\u50cf\u62f7\u8d1d, \u4e0e\u62f7\u8d1d\u64cd\u4f5c\u4e0d\u540c\u7684\u662f, \u6587\u4ef6\u7684\u526f\u672c\u53ea\u662f\u5728\u6587\u4ef6\u540d\u4e0a\n\u4e0e\u539f\u4ef6\u4e00\u81f4, \u4f46\u662f\u662f\u7a7a\u6587\u4ef6, \u5b8c\u5168\u6ca1\u6709\u5185\u5bb9, \u6587\u4ef6\u5927\u5c0f\u4e3a0\u3002", "id": "f2888:c0:m11"} {"signature": "def execute_pyfile(self, py_exe=None): ", "body": "import subprocessself.assert_is_dir_and_exists()if py_exe is None:if six.PY2:py_exe = \"\"elif six.PY3:py_exe = \"\"for p in self.select_by_ext(\"\"):subprocess.Popen('' % (py_exe, p.abspath))", "docstring": "Execute every ``.py`` file as main script.\n\n:param py_exe: str, python command or python executable path.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u76ee\u5f55\u4e0b\u7684\u6240\u6709Python\u6587\u4ef6\u4f5c\u4e3a\u4e3b\u811a\u672c\u7528\u5f53\u524d\u89e3\u91ca\u5668\u8fd0\u884c\u3002", "id": "f2888:c0:m12"} {"signature": "def trail_space(self, filters=lambda p: p.ext == \"\"): ", "body": "self.assert_is_dir_and_exists()for p in self.select_file(filters):try:with open(p.abspath, \"\") as f:lines = list()for line in f:lines.append(line.decode(\"\").rstrip())with open(p.abspath, \"\") as f:f.write(\"\".join(lines).encode(\"\"))except Exception as e: raise e", "docstring": "Trail white space at end of each line for every ``.py`` file.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u76ee\u5f55\u4e0b\u7684\u6240\u6709\u88ab\u9009\u62e9\u7684\u6587\u4ef6\u4e2d\u884c\u672b\u7684\u7a7a\u683c\u5220\u9664\u3002", "id": "f2888:c0:m13"} {"signature": "def autopep8(self, **kwargs): ", "body": "self.assert_is_dir_and_exists()for p in self.select_by_ext(\"\"):with open(p.abspath, \"\") as f:code = f.read().decode(\"\")formatted_code = autopep8.fix_code(code, **kwargs)with open(p.abspath, \"\") as f:f.write(formatted_code.encode(\"\"))", "docstring": "Auto convert your python code in a directory to pep8 styled code.\n\n:param kwargs: arguments for ``autopep8.fix_code`` method.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u76ee\u5f55\u4e0b\u7684\u6240\u6709Python\u6587\u4ef6\u7528pep8\u98ce\u683c\u683c\u5f0f\u5316\u3002\u589e\u52a0\u5176\u53ef\u8bfb\u6027\u548c\u89c4\u8303\u6027\u3002", "id": "f2888:c0:m14"} {"signature": "def encode_hexstr(text):", "body": "return binascii.b2a_hex(text.encode(\"\")).decode(\"\")", "docstring": "Convert any utf-8 string to hex string.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u4efb\u610futf-8\u5b57\u7b26\u4e32\u7f16\u7801\u4e3a16\u8fdb\u5236\u5b57\u7b26\u4e32\u3002", "id": "f2889:m0"} {"signature": "def decode_hexstr(text):", "body": "return binascii.a2b_hex(text.encode(\"\")).decode(\"\")", "docstring": "Reverse operation of :func:`encode_hexstr`.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c0616\u8fdb\u5236\u5b57\u7b26\u4e32\u89e3\u7801\u4e3a\u539f\u5b57\u7b26\u4e32\u3002", "id": "f2889:m1"} {"signature": "def drop_parts(self, n=):", "body": "return self.__class__(*self.parts[:-n])", "docstring": "Drop parts from the ends.\n\n:param n: integer, number of parts you wants to drop from ends.\n n has to greater equal than 0.\n\n:returns: a new Path object.\n\nExample::\n\n >>> self.__class__(\"/usr/bin/python\").drop_parts(1)\n \"/user/bin\"\n\n >>> self.__class__(\"/usr/bin/python\").drop_parts(2)\n \"/user\"", "id": "f2890:c0:m0"} {"signature": "def append_parts(self, *parts):", "body": "return self.__class__(self, *parts)", "docstring": "Append some parts to the end of this path.\n\n:returns: a new Path object.\n\nExample::\n\n >>> self.__class__(\"/usr/bin/python\").append_parts(\"lib\")\n \"/user/bin/python/lib\"\n\n >>> self.__class__(\"/usr/bin/python\").append_parts(\"lib\", \"core.py\")\n \"/user/bin/python/lib/core.py\"", "id": "f2890:c0:m1"} {"signature": "def change(self,new_abspath=None,new_dirpath=None,new_dirname=None,new_basename=None,new_fname=None,new_ext=None):", "body": "if new_abspath is not None:p = self.__class__(new_abspath)return pif (new_dirpath is None) and (new_dirname is not None):new_dirpath = os.path.join(self.parent.dirpath, new_dirname)elif (new_dirpath is not None) and (new_dirname is None):new_dirpath = new_dirpathelif (new_dirpath is None) and (new_dirname is None):new_dirpath = self.dirpathelif (new_dirpath is not None) and (new_dirname is not None):raise ValueError(\"\")if new_basename is None:if new_fname is None:new_fname = self.fnameif new_ext is None:new_ext = self.extnew_basename = new_fname + new_extelse:if new_fname is not None or new_ext is not None:raise ValueError(\"\"\"\")return self.__class__(new_dirpath, new_basename)", "docstring": "Return a new :class:`pathlib_mate.pathlib2.Path` object with updated information.", "id": "f2890:c0:m2"} {"signature": "def is_not_exist_or_allow_overwrite(self, overwrite=False):", "body": "if self.exists() and overwrite is False:return Falseelse: return True", "docstring": "Test whether a file target is not exists or it exists but allow\noverwrite.", "id": "f2890:c0:m3"} {"signature": "def moveto(self,new_abspath=None,new_dirpath=None,new_dirname=None,new_basename=None,new_fname=None,new_ext=None,overwrite=False,makedirs=False):", "body": "self.assert_exists()p = self.change(new_abspath=new_abspath,new_dirpath=new_dirpath,new_dirname=new_dirname,new_basename=new_basename,new_fname=new_fname,new_ext=new_ext,)if p.is_not_exist_or_allow_overwrite(overwrite=overwrite):if self.abspath != p.abspath:if makedirs:parent = p.parentif not parent.exists():os.makedirs(parent.abspath)self.rename(p)return p", "docstring": "An advanced :meth:`pathlib_mate.pathlib2.Path.rename` method provide ability to rename by\neach components of a path. A new ``Path`` instance will returns.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9ad8\u7ea7\u91cd\u547d\u540d\u51fd\u6570, \u5141\u8bb8\u7528\u4e8e\u6839\u636e\u8def\u5f84\u7684\u5404\u4e2a\u7ec4\u6210\u90e8\u5206\u8fdb\u884c\u91cd\u547d\u540d\u3002\u4f46\u548cos.rename\n\u65b9\u6cd5\u4e00\u6837, \u9700\u8981\u4fdd\u8bc1\u6bcd\u6587\u4ef6\u5939\u5b58\u5728\u3002", "id": "f2890:c0:m4"} {"signature": "def copyto(self,new_abspath=None,new_dirpath=None,new_dirname=None,new_basename=None,new_fname=None,new_ext=None,overwrite=False,makedirs=False):", "body": "self.assert_exists()p = self.change(new_abspath=new_abspath,new_dirpath=new_dirpath,new_dirname=new_dirname,new_basename=new_basename,new_fname=new_fname,new_ext=new_ext,)if p.is_not_exist_or_allow_overwrite(overwrite=overwrite):if self.abspath != p.abspath:try:shutil.copy(self.abspath, p.abspath)except IOError as e:if makedirs:os.makedirs(p.parent.abspath)shutil.copy(self.abspath, p.abspath)else:raise ereturn p", "docstring": "Copy this file to other place.", "id": "f2890:c0:m5"} {"signature": "def remove(self, *args, **kwargs):", "body": "self.unlink(*args, **kwargs)", "docstring": "Remove it.", "id": "f2890:c0:m6"} {"signature": "def get_data(self, **kwargs):", "body": "limit = int(kwargs.get('', ))end_date = kwargs.get('', False)if end_date and isinstance(end_date, datetime.datetime):end_date = self.convert_datetime(end_date)if self.mac_address is not None:service_address = '' % self.mac_addressself.api_instance.log('' % service_address)data = dict(limit=limit)if end_date:data.update({'': end_date})self.api_instance.log('')self.api_instance.log(data)return self.api_instance.api_call(service_address, **data)", "docstring": "Get the data for a specific device for a specific end date\n\nKeyword Arguments:\n limit - max 288\n end_date - is Epoch in milliseconds\n\n:return:", "id": "f2896:c0:m4"} {"signature": "def get_devices(self):", "body": "retn = []api_devices = self.api_call('')self.log('')self.log(api_devices)for device in api_devices:retn.append(AmbientWeatherStation(self, device))self.log('')self.log(retn)return retn", "docstring": "Get all devices\n\n:return:\n A list of AmbientWeatherStation instances.", "id": "f2896:c1:m3"} {"signature": "def __init__(self, appid):", "body": "self.appid = appid", "docstring": "Create a Tungsten object with a set appid", "id": "f2901:c0:m0"} {"signature": "def query(self, input = '', params = {}):", "body": "payload = {'': input,'': self.appid}for key, value in list(params.items()):if isinstance(value, (list, tuple)):payload[key] = ''.join(value)else:payload[key] = valuetry:r = requests.get(\"\", params=payload)if r.status_code != :raise Exception('' % (r.status_code))if r.encoding != '':raise Exception('' % (r.encoding))except Exception as e:return Result(error = e)return Result(xml = r.text)", "docstring": "Query Wolfram Alpha and return a Result object", "id": "f2901:c0:m1"} {"signature": "@propertydef pods(self):", "body": "if not self.xml_tree:return []return [Pod(elem) for elem in self.xml_tree.findall('')]", "docstring": "Return list of all Pod objects in result", "id": "f2901:c1:m3"} {"signature": "def __init__(self, pod_root):", "body": "self.root = pod_rootself.xml_tree = ElementTree(pod_root)", "docstring": "Create a Pod object using the ElementTree at the root", "id": "f2901:c2:m0"} {"signature": "@propertydef format(self):", "body": "formats = {}for subpod in self.root.findall(''):for elem in list(subpod):if elem.tag == '':continuecontent = elem.textif elem.tag == '':content = {'': elem.get(''),'': elem.get(''),'': elem.get(''),'': int(elem.get('', )),'': int(elem.get('', ))}if elem.tag not in formats:formats[elem.tag] = [content]else:formats[elem.tag].append(content)return formats", "docstring": "Dictionary of available formats, corresponding to a list of the values\nExample: pod.format['plaintext'] will return a list of every plaintext\n content in the pod's subpods", "id": "f2901:c2:m4"} {"signature": "def __init__(self, host='', port=, database='', user=None, password=None,sslmode=None, sslcert=None, sslkey=None, application_name=''):", "body": "if user is None:user = getpass.getuser()self._connect_args = dict(application_name=application_name,database=database, user=user, password=password,host=host, port=port,sslmode=sslmode, sslcert=sslcert, sslkey=sslkey,)self._bin_paths = {}", "docstring": "Constructor.\n\nAll arguments are optional and sensible defaults. Override using args depending on your needs.\n\nParameters\n----------\nhost: str, optional\n remote server IP address or hostname\nport: int, optional\n remote server port\ndatabase: str, optional\n name of database to connect to\nuser: str\n name of user (with required admin privileges)\npassword:\n password for user\nsslmode: str, optional\n mode for SSL connection\nsslcert: str, optional\n file path to SSL certificate for connection\nsslkey: str, optional\n file path to SSL key for connection\napplication_name: str, optional\n allow user to specify the app name in the connection", "id": "f2908:c0:m0"} {"signature": "def names(self):", "body": "stmt = \"\"\"\"\"\"return [x[''] for x in self._iter_results(stmt)]", "docstring": "Returns a list of all database names.", "id": "f2908:c0:m5"} {"signature": "def exists(self, name):", "body": "return name in self.names()", "docstring": "Returns True if named database exists, False otherwise.", "id": "f2908:c0:m6"} {"signature": "def create(self, name):", "body": "log.info('' % name)self._run_stmt('' % name)", "docstring": "Creates a new database.", "id": "f2908:c0:m7"} {"signature": "def drop(self, name):", "body": "log.info('' % name)self._run_stmt('' % name)", "docstring": "Drops an existing database.", "id": "f2908:c0:m8"} {"signature": "def rename(self, from_name, to_name):", "body": "log.info('' % (from_name, to_name))self._run_stmt('' % (from_name, to_name))", "docstring": "Renames an existing database.", "id": "f2908:c0:m9"} {"signature": "def connections(self, name):", "body": "stmt = \"\"\"\"\"\".format(fields=''.join(CONNECTION_FIELDS), datname=name)return list(Connection(**x) for x in self._iter_results(stmt))", "docstring": "Returns a list of existing connections to the named database.", "id": "f2908:c0:m10"} {"signature": "def kill_connections(self, name):", "body": "log.info('' % name)self._run_stmt(\"\"\"\"\"\" % name)", "docstring": "Drops all connections to the specified database.", "id": "f2908:c0:m11"} {"signature": "def available(self, timeout=):", "body": "host = self._connect_args['']port = self._connect_args['']try:sock = socket.create_connection((host, port), timeout=timeout)sock.close()return Trueexcept socket.error:passreturn False", "docstring": "Returns True if database server is running, False otherwise.", "id": "f2908:c0:m12"} {"signature": "def dump(self, name, filename):", "body": "if not self.exists(name):raise DatabaseError('')log.info('' % (name, filename))self._run_cmd('', '', '', '','' % filename, name)", "docstring": "Saves the state of a database to a file.\n\nParameters\n----------\nname: str\n the database to be backed up.\nfilename: str\n path to a file where database backup will be written.", "id": "f2908:c0:m13"} {"signature": "def restore(self, name, filename):", "body": "if not self.exists(name):self.create(name)else:log.warn('' % name)log.info('' % (name, filename))self._run_cmd('', '', '' % name, filename)", "docstring": "Loads state of a backup file to a database.\n\nNote\n----\nIf database name does not exist, it will be created.\n\nParameters\n----------\nname: str\n the database to which backup will be restored.\nfilename: str\n path to a file contain a postgres database backup.", "id": "f2908:c0:m14"} {"signature": "def connection_dsn(self, name=None):", "body": "return ''.join(\"\" % (param, value) for param, value in self._connect_options(name))", "docstring": "Provides a connection string for database.\n\nParameters\n----------\nname: str, optional\n an override database name for the connection string.\n\nReturns\n-------\nstr: the connection string (e.g. 'dbname=db1 user=user1 host=localhost port=5432')", "id": "f2908:c0:m16"} {"signature": "def connection_url(self, name=None):", "body": "return ''.format(**{k: v for k, v in self._connect_options(name)})", "docstring": "Provides a connection string for database as a sqlalchemy compatible URL.\n\nNB - this doesn't include special arguments related to SSL connectivity (which are outside the scope\nof the connection URL format).\n\nParameters\n----------\nname: str, optional\n an override database name for the connection string.\n\nReturns\n-------\nstr: the connection URL (e.g. postgresql://user1@localhost:5432/db1)", "id": "f2908:c0:m17"} {"signature": "def shell(self, expect=pexpect):", "body": "dsn = self.connection_dsn()log.debug('' % dsn)child = expect.spawn('' % dsn)if self._connect_args[''] is not None:child.expect('')child.sendline(self._connect_args[''])child.interact()", "docstring": "Connects the database client shell to the database.\n\nParameters\n----------\nexpect_module: str\n the database to which backup will be restored.", "id": "f2908:c0:m18"} {"signature": "def settings(self):", "body": "stmt = \"\".format(fields=''.join(SETTINGS_FIELDS))settings = []for row in self._iter_results(stmt):row[''] = self._vartype_map[row['']](row[''])settings.append(Settings(**row))return settings", "docstring": "Returns settings from the server.", "id": "f2908:c0:m19"} {"signature": "def temp_name():", "body": "return '' + str(uuid.uuid4()).replace('', '')", "docstring": "Returns a \"safe\" (globally unique) name that avoids clashes\nwith existing names.", "id": "f2910:m0"} {"signature": "@contextmanagerdef temp_db(db, name=None):", "body": "if name is None:name = temp_name()db.create(name)if not db.exists(name):raise DatabaseError('')try:yield namefinally:db.drop(name)if db.exists(name):raise DatabaseError('')", "docstring": "A context manager that creates a temporary database.\n\nUseful for automated tests.\n\nParameters\n----------\ndb: object\n a preconfigured DB object\nname: str, optional\n name of the database to be created. (default: globally unique name)", "id": "f2910:m1"} {"signature": "def request_resource(self, url, **kwargs):", "body": "raise NotImplementedError", "docstring": "Get an OEmbedResource from one of the providers configured in this \nprovider according to the resource url.\n\nArgs:\n url: The url of the resource to get.\n format: Desired response format (json or xml).\n **kwargs: Optional parameters to pass in to the provider.\n\nReturns:\n OEmbedResource object.\n\nIf no object returned, raises OEmbedException", "id": "f2914:c0:m0"} {"signature": "def _fetch(self, url):", "body": "return fetch_url(url)", "docstring": "Fetches from a URL, respecting GZip encoding, etc.\n\nReturns an OEmbedResource instance", "id": "f2914:c1:m2"} {"signature": "def request_resource(self, url, **kwargs):", "body": "params = kwargsparams[''] = urlparams[''] = ''if '' in self.endpoint_url:url_with_qs = '' % (self.endpoint_url.rstrip(''), urlencode(params))else:url_with_qs = \"\" % (self.endpoint_url, urlencode(params))headers, raw_response = self._fetch(url_with_qs)resource = self.convert_to_resource(headers, raw_response, params)return resource", "docstring": "Request an OEmbedResource for a given url. Some valid keyword args:\n- format\n- maxwidth\n- maxheight", "id": "f2914:c1:m4"} {"signature": "def _image_field(self):", "body": "for field in self.model._meta.fields:if isinstance(field, ImageField):return field.name", "docstring": "Try to automatically detect an image field", "id": "f2914:c2:m2"} {"signature": "def _date_field(self):", "body": "for field in self.model._meta.fields:if isinstance(field, (DateTimeField, DateField)):return field.name", "docstring": "Try to automatically detect an image field", "id": "f2914:c2:m3"} {"signature": "def __new__(cls, name, bases, attrs):", "body": "meta = attrs.pop('', None)provider_class = super(DjangoProviderMetaclass, cls).__new__(cls, name, bases, attrs)for b in bases:base_meta = getattr(b, '', None)if not base_meta:continuefor (k, v) in base_meta.__dict__.items():if not k.startswith('') and k != '' and k not in meta.__dict__:meta.__dict__[k] = vif meta:_meta = DjangoProviderOptions(meta, provider_class)provider_class._meta = _metareturn provider_class", "docstring": "Provides namespacing of Meta options", "id": "f2914:c3:m0"} {"signature": "def _build_regex(self):", "body": "url_patterns = resolver.reverse_dict.get(self._meta.named_view)try:regex = url_patterns[]except TypeError:raise OEmbedException('' % self._meta.named_view)cleaned_sites = self.get_cleaned_sites()site_regexes = []for site in self.get_sites():site_regexes.append(cleaned_sites[site.pk][])sites = ''.join(site_regexes)regex = re.compile('' % (sites, regex))return regex", "docstring": "Performs a reverse lookup on a named view and generates\na list of regexes that match that object. It generates\nregexes with the domain name included, using sites provided\nby the get_sites() method.\n\n>>> regex = provider.regex\n>>> regex.pattern\n'http://(www2.kusports.com|www2.ljworld.com|www.lawrence.com)/photos/(?P\\\\d{4})/(?P\\\\w{3})/(?P\\\\d{1,2})/(?P\\\\d+)/$'", "id": "f2914:c4:m2"} {"signature": "def get_sites(self):", "body": "return Site.objects.all()", "docstring": "Return sites whose domains should be checked against", "id": "f2914:c4:m3"} {"signature": "def get_cleaned_sites(self):", "body": "if not getattr(self, '', None):self._clean_sites = cleaned_sites()return self._clean_sites", "docstring": "Attribute-caches the sites/regexes returned by\n`oembed.utils.cleaned_sites`", "id": "f2914:c4:m4"} {"signature": "def provider_from_url(self, url):", "body": "domain = get_domain(url)site_tuples = self.get_cleaned_sites().values()for domain_re, name, normalized_domain in site_tuples:if re.match(domain_re, domain):return normalized_domain, namesite = Site.objects.get_current()return site.domain, site.name", "docstring": "Given a URL for any of our sites, try and match it to one, returning\nthe domain & name of the match. If no match is found, return current.\n\nReturns a tuple of domain, site name -- used to determine 'provider'", "id": "f2914:c4:m5"} {"signature": "def get_params(self, url):", "body": "match = re.match(self.regex, url)if match is not None:params = match.groupdict()if not params:params = {}for i, group in enumerate(match.groups()[:]):params['' % i] = groupreturn paramsraise OEmbedException('' % (url))", "docstring": "Extract the named parameters from a url regex. If the url regex does not contain\nnamed parameters, they will be keyed _0, _1, ...\n\n* Named parameters\nRegex:\n/photos/^(?P\\d{4})/(?P\\w{3})/(?P\\d{1,2})/(?P\\d+)/\n\nURL:\nhttp://www2.ljworld.com/photos/2009/oct/11/12345/\n\nReturn Value:\n{u'day': '11', u'month': 'oct', u'object_id': '12345', u'year': '2009'}\n\n* Unnamed parameters\nRegex:\n/blah/([\\w-]+)/(\\d+)/\n\nURL:\nhttp://www.example.com/blah/hello/123/\n\nReturn Value:\n{u'_0': 'hello', u'_1': '123'}", "id": "f2914:c4:m6"} {"signature": "def get_object(self, url):", "body": "params = self.get_params(url)query = {}for key, value in self._meta.fields_to_match.iteritems():try:query[value] = params[key]except KeyError:raise OEmbedException('' % (key, ''.join(params.keys())))try:obj = self.get_queryset().get(**query)except self._meta.model.DoesNotExist:raise OEmbedException('')return obj", "docstring": "Fields to match is a mapping of url params to fields, so for\nthe photos example above, it would be:\n\nfields_to_match = { 'object_id': 'id' }\n\nThis procedure parses out named params from a URL and then uses\nthe fields_to_match dictionary to generate a query.", "id": "f2914:c4:m8"} {"signature": "def render_html(self, obj, context=None):", "body": "provided_context = context or Context()context = RequestContext(mock_request())context.update(provided_context)context.push()context[self._meta.context_varname] = objrendered = render_to_string(self._meta.template_name, context)context.pop()return rendered", "docstring": "Generate the 'html' attribute of an oembed resource using a template.\nSort of a corollary to the parser's render_oembed method. By default,\nthe current mapping will be passed in as the context.\n\nOEmbed templates are stored in:\n\noembed/provider/[app_label]_[model].html\n\n-- or --\n\noembed/provider/media_video.html", "id": "f2914:c4:m9"} {"signature": "def map_attr(self, mapping, attr, obj):", "body": "if attr not in mapping and hasattr(self, attr):if not callable(getattr(self, attr)):mapping[attr] = getattr(self, attr)else:mapping[attr] = getattr(self, attr)(obj)", "docstring": "A kind of cheesy method that allows for callables or attributes to\nbe used interchangably", "id": "f2914:c4:m10"} {"signature": "def get_image(self, obj):", "body": "if self._meta.image_field:return getattr(obj, self._meta.image_field)", "docstring": "Return an ImageFileField instance", "id": "f2914:c4:m11"} {"signature": "def resize(self, image_field, new_width=None, new_height=None):", "body": "if isinstance(image_field, ImageFieldFile) andimage_field.field.width_field andimage_field.field.height_field:current_width = getattr(image_field.instance, image_field.field.width_field)current_height = getattr(image_field.instance, image_field.field.height_field)else:try:file_obj = storage.default_storage.open(image_field.name, '')img_obj = Image.open(file_obj)current_width, current_height = img_obj.sizeexcept IOError:return (image_field.url, , ) if current_width < new_width:if not new_height or current_height < new_height:return (image_field.url, current_width, current_height)new_width, new_height = scale(current_width, current_height, new_width, new_height)return self._meta.image_processor.resize(image_field, new_width, new_height)", "docstring": "Resize an image to the 'best fit' width & height, maintaining\nthe scale of the image, so a 500x500 image sized to 300x400\nwill actually be scaled to 300x300.\n\nParams:\nimage: ImageFieldFile to be resized (i.e. model.image_field)\nnew_width & new_height: desired maximums for resizing\n\nReturns:\nthe url to the new image and the new width & height\n(http://path-to-new-image, 300, 300)", "id": "f2914:c4:m12"} {"signature": "def preprocess(self, obj, mapping, **kwargs):", "body": "pass", "docstring": "Pre-processing hook. Called by map_to_dictionary()", "id": "f2914:c4:m15"} {"signature": "def postprocess(self, obj, mapping, **kwargs):", "body": "pass", "docstring": "Post-processing hook. Called by map_to_dictionary()", "id": "f2914:c4:m16"} {"signature": "def map_to_dictionary(self, url, obj, **kwargs):", "body": "maxwidth = kwargs.get('', None)maxheight = kwargs.get('', None)provider_url, provider_name = self.provider_from_url(url)mapping = {'': '','': url,'': provider_name,'': provider_url,'': self.resource_type}self.preprocess(obj, mapping, **kwargs)if self.resource_type == '' and self.get_image(obj):self.resize_photo(obj, mapping, maxwidth, maxheight)elif self.resource_type in ('', '', ''):width, height = size_to_nearest(maxwidth,maxheight,self._meta.valid_sizes,self._meta.force_fit)mapping.update(width=width, height=height)if self.get_image(obj):self.thumbnail(obj, mapping)for attr in ('', '', '', ''):self.map_attr(mapping, attr, obj)if '' in mapping:mapping[''] = relative_to_full(mapping[''], url)if '' in mapping:mapping[''] = relative_to_full(mapping[''], url)if '' not in mapping and mapping[''] in ('', ''):mapping[''] = self.render_html(obj, context=Context(mapping))self.postprocess(obj, mapping, **kwargs)return mapping", "docstring": "Build a dictionary of metadata for the requested object.", "id": "f2914:c4:m17"} {"signature": "def request_resource(self, url, **kwargs):", "body": "obj = self.get_object(url)mapping = self.map_to_dictionary(url, obj, **kwargs)resource = OEmbedResource.create(mapping)resource.content_object = objreturn resource", "docstring": "Request an OEmbedResource for a given url. Some valid keyword args:\n- format\n- maxwidth\n- maxheight", "id": "f2914:c4:m18"} {"signature": "def get_object(self, url, month_format='', day_format=''):", "body": "params = self.get_params(url)try:year = params[self._meta.year_part]month = params[self._meta.month_part]day = params[self._meta.day_part]except KeyError:try:year, month, day = params[''], params[''], params['']except KeyError:raise OEmbedException('')try:tt = time.strptime('' % (year, month, day),'' % ('', month_format, day_format))date = datetime.date(*tt[:])except ValueError:raise OEmbedException('' % url)if isinstance(self._meta.model._meta.get_field(self._meta.date_field), DateTimeField):min_date = datetime.datetime.combine(date, datetime.time.min)max_date = datetime.datetime.combine(date, datetime.time.max)query = {'' % self._meta.date_field: (min_date, max_date)}else:query = {self._meta.date_field: date}for key, value in self._meta.fields_to_match.iteritems():try:query[value] = params[key]except KeyError:raise OEmbedException('' % (key, ''.join(params.keys())))try:obj = self.get_queryset().get(**query)except self._meta.model.DoesNotExist:raise OEmbedException('')return obj", "docstring": "Parses the date from a url and uses it in the query. For objects which\nare unique for date.", "id": "f2914:c5:m1"} {"signature": "def setUp(self):", "body": "super(ConsumerTestCase, self).setUp()self.oembed_client = OEmbedConsumer()", "docstring": "Set up test environment", "id": "f2923:c0:m0"} {"signature": "def setUp(self):", "body": "oembed.autodiscover()oembed.site._db_updated = Noneself.storage = DummyMemoryStorage()self.orig_default_storage = storage.default_storagestorage.default_storage = self.storageself.media_root, self.media_url = settings.MEDIA_ROOT, settings.MEDIA_URLsettings.MEDIA_ROOT = MEDIA_ROOTsettings.MEDIA_URL = MEDIA_URLself.template_dirs = settings.TEMPLATE_DIRScur_dir = os.path.dirname(__file__)settings.TEMPLATE_DIRS = [os.path.join(os.path.dirname(cur_dir), '')]self.orig_file_storage = settings.DEFAULT_FILE_STORAGEsettings.DEFAULT_FILE_STORAGE = DEFAULT_FILE_STORAGEtest_image = Image.new('', (, ), (, , , ))self.test_img_buffer = StringIO()test_image.save(self.test_img_buffer, '')self.test_img_file = ContentFile(self.test_img_buffer.getvalue())self.test_img_location = ''storage.default_storage.save(self.test_img_location, self.test_img_file)", "docstring": "Set up test environment", "id": "f2925:c0:m0"} {"signature": "@register.filterdef extract_oembeds(text, args=None):", "body": "resource_type = width = height = Noneif args:dimensions = args.lower().split('')if len(dimensions) in (, ):resource_type = dimensions.pop()if len(dimensions) == :width, height = map(lambda x: int(x), dimensions)client = OEmbedConsumer()return client.extract(text, width, height, resource_type)", "docstring": "Extract oembed resources from a block of text. Returns a list\nof dictionaries.\n\nMax width & height can be specified:\n{% for embed in block_of_text|extract_oembeds:\"400x300\" %}\n\nResource type can be specified:\n{% for photo_embed in block_of_text|extract_oembeds:\"photo\" %}\n\nOr both:\n{% for embed in block_of_text|extract_oembeds:\"400x300xphoto\" %}", "id": "f2937:m1"} {"signature": "@register.filterdef strip_oembeds(text, args=None):", "body": "resource_type = width = height = Noneif args:dimensions = args.lower().split('')if len(dimensions) in (, ):resource_type = dimensions.pop()if len(dimensions) == :width, height = map(lambda x: int(x), dimensions)client = OEmbedConsumer()return mark_safe(client.strip(text, width, height, resource_type))", "docstring": "Take a block of text and strip all the embeds from it, optionally taking\na maxwidth, maxheight / resource_type\n\nUsage:\n{{ post.content|strip_embeds }}\n\n{{ post.content|strip_embeds:\"600x600xphoto\" }}\n\n{{ post.content|strip_embeds:\"video\" }}", "id": "f2937:m2"} {"signature": "def do_oembed(parser, token):", "body": "args = token.split_contents()template_dir = Nonevar_name = Noneif len(args) > :if len(args) == and args[] == '':template_dir = args[]elif len(args) == and args[] == '':var_name = args[]elif len(args) == and args[] == '':template_dir = args[]elif len(args) == and args[] == '':var_name = args[]elif len(args) == and args[] == '':template_dir = args[]var_name = args[]else:raise template.TemplateSyntaxError(\"\"\"\"\"\"\"\")if template_dir:if not (template_dir[] == template_dir[-] and template_dir[] in ('', \"\")):raise template.TemplateSyntaxError(\"\")template_dir = template_dir[:-]if len(args) >= and '' in args[]:width, height = args[].lower().split('')if not width and height:raise template.TemplateSyntaxError(\"\"\"\")else:width, height = None, Nonenodelist = parser.parse(('',))parser.delete_first_token()return OEmbedNode(nodelist, width, height, template_dir, var_name)", "docstring": "A node which parses everything between its two nodes, and replaces any links\nwith OEmbed-provided objects, if possible.\n\nSupports two optional argument, which is the maximum width and height,\nspecified like so:\n\n{% oembed 640x480 %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %}\n\nand or the name of a sub tempalte directory to render templates from:\n\n{% oembed 320x240 in \"comments\" %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %}\n\nor:\n\n{% oembed in \"comments\" %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %}\n\neither of those will render templates in oembed/comments/oembedtype.html\n\nAdditionally, you can specify a context variable to drop the rendered text in:\n\n{% oembed 600x400 in \"comments\" as var_name %}...{% endoembed %}\n{% oembed as var_name %}...{% endoembed %}", "id": "f2937:m3"} {"signature": "def do_autodiscover(parser, token):", "body": "args = token.split_contents()if len(args) != :raise template.TemplateSyntaxError('' % args[])else:obj = args[]return OEmbedAutodiscoverNode(obj)", "docstring": "Generates a <link> tag with oembed autodiscovery bits for an object.\n\n{% oembed_autodiscover video %}", "id": "f2937:m4"} {"signature": "def do_url_scheme(parser, token):", "body": "args = token.split_contents()if len(args) != :raise template.TemplateSyntaxError('' % args[])return OEmbedURLSchemeNode()", "docstring": "Generates a <link> tag with oembed autodiscovery bits.\n\n{% oembed_url_scheme %}", "id": "f2937:m5"} {"signature": "def extract_oembeds(self, text, maxwidth=None, maxheight=None, resource_type=None):", "body": "parser = text_parser()urls = parser.extract_urls(text)return self.handle_extracted_urls(urls, maxwidth, maxheight, resource_type)", "docstring": "Scans a block of text and extracts oembed data on any urls,\nreturning it in a list of dictionaries", "id": "f2939:c0:m4"} {"signature": "def strip(self, text, *args, **kwargs):", "body": "if OEMBED_DEFAULT_PARSE_HTML:extracted = self.extract_oembeds_html(text, *args, **kwargs)else:extracted = self.extract_oembeds(text, *args, **kwargs)matches = [r[''] for r in extracted]match_handler = lambda m: m.group() not in matches and m.group() or ''return re.sub(URL_RE, match_handler, text)", "docstring": "Try to maintain parity with what is extracted by extract since strip\nwill most likely be used in conjunction with extract", "id": "f2939:c0:m7"} {"signature": "def json(request, *args, **kwargs):", "body": "params = dict(list(request.GET.items()))callback = params.pop('', None)url = params.pop('', None)if not url:return HttpResponseBadRequest('')try:provider = oembed.site.provider_for_url(url)if not provider.provides:raise OEmbedMissingEndpoint()except OEmbedMissingEndpoint:raise Http404('' % url)query = dict([(smart_str(k), smart_str(v)) for k, v in list(params.items()) if v])try:resource = oembed.site.embed(url, **query)except OEmbedException as e:raise Http404('' % (url, str(e)))response = HttpResponse(mimetype='')json = resource.jsonif callback:response.write('' % (defaultfilters.force_escape(callback), json))else:response.write(json)return response", "docstring": "The oembed endpoint, or the url to which requests for metadata are passed.\nThird parties will want to access this view with URLs for your site's\ncontent and be returned OEmbed metadata.", "id": "f2940:m0"} {"signature": "def consume_json(request):", "body": "client = OEmbedConsumer()urls = request.GET.getlist('')width = request.GET.get('')height = request.GET.get('')template_dir = request.GET.get('')output = {}ctx = RequestContext(request)for url in urls:try:provider = oembed.site.provider_for_url(url)except OEmbedMissingEndpoint:oembeds = Nonerendered = Noneelse:oembeds = urlrendered = client.parse_text(url, width, height, context=ctx, template_dir=template_dir)output[url] = {'': oembeds,'': rendered,}return HttpResponse(simplejson.dumps(output), mimetype='')", "docstring": "Extract and return oembed content for given urls.\n\nRequired GET params:\n urls - list of urls to consume\n\nOptional GET params:\n width - maxwidth attribute for oembed content\n height - maxheight attribute for oembed content\n template_dir - template_dir to use when rendering oembed\n\nReturns:\n list of dictionaries with oembed metadata and renderings, json encoded", "id": "f2940:m1"} {"signature": "def oembed_schema(request):", "body": "current_domain = Site.objects.get_current().domainurl_schemes = [] endpoint = reverse('') providers = oembed.site.get_providers()for provider in providers:if not provider.provides:continuematch = Noneif isinstance(provider, DjangoProvider):url_pattern = resolver.reverse_dict.get(provider._meta.named_view)if url_pattern:regex = re.sub(r'', '', url_pattern[][][])match = '' % (current_domain, regex)elif isinstance(provider, HTTPProvider):match = provider.url_schemeelse:match = provider.regexif match:url_schemes.append({'': provider.resource_type,'': match,'': endpoint})url_schemes.sort(key=lambda item: item[''])response = HttpResponse(mimetype='')response.write(simplejson.dumps(url_schemes))return response", "docstring": "A site profile detailing valid endpoints for a given domain. Allows for\nbetter auto-discovery of embeddable content.\n\nOEmbed-able content lives at a URL that maps to a provider.", "id": "f2940:m2"} {"signature": "def register(self, provider_class):", "body": "if not issubclass(provider_class, BaseProvider):raise TypeError('' % provider_class.__name__)if provider_class in self._registered_providers:raise AlreadyRegistered('' % provider_class.__name__)if issubclass(provider_class, DjangoProvider):signals.post_save.connect(self.invalidate_stored_oembeds,sender=provider_class._meta.model)self._registered_providers.append(provider_class)self.invalidate_providers()", "docstring": "Registers a provider with the site.", "id": "f2942:c0:m3"} {"signature": "def unregister(self, provider_class):", "body": "if not issubclass(provider_class, BaseProvider):raise TypeError('' % provider_class.__name__)if provider_class not in self._registered_providers:raise NotRegistered('' % provider_class.__name__)self._registered_providers.remove(provider_class)self.invalidate_providers()", "docstring": "Unregisters a provider from the site.", "id": "f2942:c0:m4"} {"signature": "def populate(self):", "body": "self._registry = {}for provider_class in self._registered_providers:instance = provider_class()self._registry[instance] = instance.regexfor stored_provider in StoredProvider.objects.active():self._registry[stored_provider] = stored_provider.regexself._populated = True", "docstring": "Populate the internal registry's dictionary with the regexes for each\nprovider instance", "id": "f2942:c0:m5"} {"signature": "def ensure_populated(self):", "body": "if not self._populated:self.populate()", "docstring": "Ensure not only that the internal registry of Python-class providers is\npopulated, but also make sure the cached queryset of database-providers\nis up-to-date", "id": "f2942:c0:m6"} {"signature": "def get_registry(self):", "body": "self.ensure_populated()return self._registry", "docstring": "Return a dictionary of {provider_instance: regex}", "id": "f2942:c0:m7"} {"signature": "def get_providers(self):", "body": "return self.get_registry().keys()", "docstring": "Provide a list of all oembed providers that are being used.", "id": "f2942:c0:m8"} {"signature": "def provider_for_url(self, url):", "body": "for provider, regex in self.get_registry().items():if re.match(regex, url) is not None:return providerraise OEmbedMissingEndpoint('' % url)", "docstring": "Find the right provider for a URL", "id": "f2942:c0:m9"} {"signature": "def invalidate_stored_oembeds(self, sender, instance, created, **kwargs):", "body": "ctype = ContentType.objects.get_for_model(instance)StoredOEmbed.objects.filter(object_id=instance.pk,content_type=ctype).delete()", "docstring": "A hook for django-based oembed providers to delete any stored oembeds", "id": "f2942:c0:m10"} {"signature": "def embed(self, url, **kwargs):", "body": "try:provider = self.provider_for_url(url)except OEmbedMissingEndpoint:raiseelse:try:stored_match = StoredOEmbed.objects.filter(match=url, maxwidth=kwargs.get('', None), maxheight=kwargs.get('', None),date_expires__gte=datetime.datetime.now())[]return OEmbedResource.create_json(stored_match.response_json)except IndexError:params = dict([(k, v) for k, v in kwargs.items() if v])resource = provider.request_resource(url, **params)try:cache_age = int(resource.cache_age)if cache_age < MIN_OEMBED_TTL:cache_age = MIN_OEMBED_TTLexcept:cache_age = DEFAULT_OEMBED_TTLdate_expires = datetime.datetime.now() + datetime.timedelta(seconds=cache_age)stored_oembed, created = StoredOEmbed.objects.get_or_create(match=url,maxwidth=kwargs.get('', None),maxheight=kwargs.get('', None))stored_oembed.response_json = resource.jsonstored_oembed.resource_type = resource.typestored_oembed.date_expires = date_expiresif resource.content_object:stored_oembed.content_object = resource.content_objectstored_oembed.save()return resource", "docstring": "The heart of the matter", "id": "f2942:c0:m11"} {"signature": "def autodiscover(self, url):", "body": "headers, response = fetch_url(url)if headers[''].split('')[] in ('', ''):provider_data = json.loads(response)return self.store_providers(provider_data)", "docstring": "Load up StoredProviders from url if it is an oembed scheme", "id": "f2942:c0:m12"} {"signature": "def store_providers(self, provider_data):", "body": "if not hasattr(provider_data, ''):raise OEmbedException('')provider_pks = []for provider in provider_data:if '' not in provider or'' not in provider:continueresource_type = provider.get('')if resource_type not in RESOURCE_TYPES:continuestored_provider, created = StoredProvider.objects.get_or_create(wildcard_regex=provider[''])if created:stored_provider.endpoint_url = relative_to_full( provider[''],provider[''])stored_provider.resource_type = resource_typestored_provider.save()provider_pks.append(stored_provider.pk)return StoredProvider.objects.filter(pk__in=provider_pks)", "docstring": "Iterate over the returned json and try to sort out any new providers", "id": "f2942:c0:m13"} {"signature": "def register_field(cls, field):", "body": "FieldRegistry.add_field(cls, field)signals.post_save.connect(handle_save_embeds, sender=cls,dispatch_uid='' %(cls._meta.app_label, cls._meta.module_name, field.name))", "docstring": "Handles registering the fields with the FieldRegistry and creating a \npost-save signal for the model.", "id": "f2943:m0"} {"signature": "def contribute_to_class(self, cls, name):", "body": "super(EmbeddedMediaField, self).contribute_to_class(cls, name)register_field(cls, self)cls._meta.add_virtual_field(EmbeddedSignalCreator(self))", "docstring": "I need a way to ensure that this signal gets created for all child\nmodels, and since model inheritance doesn't have a 'contrubite_to_class'\nstyle hook, I am creating a fake virtual field which will be added to\nall subclasses and handles creating the signal", "id": "f2943:c2:m1"} {"signature": "def autodiscover():", "body": "import impfrom django.conf import settingsfor app in settings.INSTALLED_APPS:try:app_path = __import__(app, {}, {}, [app.split('')[-]]).__path__except AttributeError:continuetry:imp.find_module('', app_path)except ImportError:continue__import__(\"\" % app)", "docstring": "Automatically build the provider index.", "id": "f2944:m0"} {"signature": "def render_oembed(self, oembed_resource, original_url, template_dir=None,context=None):", "body": "provided_context = context or Context()context = RequestContext(context.get(\"\") or mock_request())context.update(provided_context)template_name = '' % oembed_resource.typetemplates = [os.path.join('', template_name), '']if template_dir:templates.insert(, os.path.join('', template_dir, template_name))template = select_template(templates)context.push()context[''] = oembed_resourcecontext[''] = original_urlrendered = template.render(context)context.pop()return rendered.strip()", "docstring": "Render the oembed resource and return as a string.\n\nTemplate directory will always fall back to 'oembed/[type].html', but\na custom template dir can be passed in using the kwargs.\n\nTemplates are given two context variables:\n- response: an OEmbedResource\n- original_url: the url that was passed to the consumer", "id": "f2946:c0:m0"} {"signature": "def parse(self, text, maxwidth=None, maxheight=None, template_dir=None,context=None, urlize_all_links=CONSUMER_URLIZE_ALL):", "body": "context = context or Context()context[''] = maxwidthcontext[''] = maxheighttry:text = unicode(text)except UnicodeDecodeError:text = unicode(text.decode(''))return self.parse_data(text, maxwidth, maxheight, template_dir,context, urlize_all_links)", "docstring": "Scans a block of text, replacing anything matching a provider pattern\nwith an OEmbed html snippet, if possible.\n\nTemplates should be stored at oembed/{format}.html, so for example:\n\n oembed/video.html\n\nAn optional template_dir can be provided, allowing for\n\n oembed/[template_dir]/video.html\n\nThese templates are passed a context variable, ``response``, which is\nan OEmbedResource, as well as the ``original_url``", "id": "f2946:c0:m1"} {"signature": "def parse_data(self, text, maxwidth, maxheight, template_dir, context,urlize_all_links):", "body": "raise NotImplementedError('')", "docstring": "Implemented on all subclasses, this method contains the logic to\nprocess embeddable content in ``text``", "id": "f2946:c0:m2"} {"signature": "def extract_urls(self, text):", "body": "raise NotImplementedError('')", "docstring": "Implemented on all subclasses, this method contains the logic to\nextract a list or set of urls from text", "id": "f2946:c0:m3"} {"signature": "def parse_data(self, text, maxwidth, maxheight, template_dir, context,urlize_all_links):", "body": "replacements = {}user_urls = set(re.findall(URL_RE, text))for user_url in user_urls:try:resource = oembed.site.embed(user_url, maxwidth=maxwidth, maxheight=maxheight)except OEmbedException:if urlize_all_links:replacements[user_url] = '' % {'': user_url}else:context[''] = min(maxwidth, resource.width)context[''] = min(maxheight, resource.height)replacement = self.render_oembed(resource, user_url, template_dir=template_dir, context=context)replacements[user_url] = replacement.strip()user_urls = re.finditer(URL_RE, text)matches = []for match in user_urls:if match.group() in replacements:matches.append([match.start(), match.end(), match.group()])for indx, (start, end, user_url) in enumerate(matches):replacement = replacements[user_url]difference = len(replacement) - len(user_url)text = text[:start] + replacement + text[end:]for j in xrange(indx + , len(matches)):matches[j][] += differencematches[j][] += differencereturn mark_safe(text)", "docstring": "Parses a block of text indiscriminately", "id": "f2947:c0:m0"} {"signature": "def parse_data(self, text, maxwidth, maxheight, template_dir, context, urlize_all_links):", "body": "block_parser = TextBlockParser()lines = text.splitlines()parsed = []for line in lines:if STANDALONE_URL_RE.match(line):user_url = line.strip()try:resource = oembed.site.embed(user_url, maxwidth=maxwidth, maxheight=maxheight)context[''] = min(maxwidth, resource.width)context[''] = min(maxheight, resource.height)except OEmbedException:if urlize_all_links:line = '' % {'': user_url}else:context[''] = min(maxwidth, resource.width)context[''] = min(maxheight, resource.height)line = self.render_oembed(resource, user_url, template_dir=template_dir, context=context)else:line = block_parser.parse(line, maxwidth, maxheight, '',context, urlize_all_links)parsed.append(line)return mark_safe(''.join(parsed))", "docstring": "Parses a block of text rendering links that occur on their own line\nnormally but rendering inline links using a special template dir", "id": "f2947:c1:m0"} {"signature": "def size_to_nearest(width=None, height=None, allowed_sizes=OEMBED_ALLOWED_SIZES,force_fit=False):", "body": "minwidth, minheight = min(allowed_sizes)maxwidth, maxheight = max(allowed_sizes)if not width and not height:return maxwidth, maxheightif width:width = int(width) > minwidth and int(width) or minwidthelif force_fit:width = maxwidthif height:height = int(height) > minheight and int(height) or minheightelif force_fit:height = maxheightfor size in sorted(allowed_sizes):if width and not height:if width >= size[]:maxwidth = size[]if force_fit:maxheight = size[]else:breakelif height and not width:if height >= size[]:maxheight = size[]if force_fit:maxwidth = size[]else:breakelse:if force_fit:if (width >= size[]) and (height >= size[]):maxwidth, maxheight = sizeelse:breakelse:if width >= size[]:maxwidth = size[]if height >= size[]:maxheight = size[]return maxwidth, maxheight", "docstring": "Generate some dimensions for resizing an object. This function DOES NOT handle\nscaling, it simply calculates maximums. These values should then be passed to\nthe resize() method which will scale it and return the scaled width & height.", "id": "f2950:m0"} {"signature": "def fetch_url(url, method='', user_agent='', timeout=SOCKET_TIMEOUT):", "body": "sock = httplib2.Http(timeout=timeout)request_headers = {'': user_agent,'': ''}try:headers, raw = sock.request(url, headers=request_headers, method=method)except:raise OEmbedHTTPException('' % url)return headers, raw", "docstring": "Fetch response headers and data from a URL, raising a generic exception\nfor any kind of failure.", "id": "f2950:m2"} {"signature": "def relative_to_full(url, example_url):", "body": "if re.match('', url):return urldomain = get_domain(example_url)if domain:return '' % (domain, url)return url", "docstring": "Given a url which may or may not be a relative url, convert it to a full\nurl path given another full url as an example", "id": "f2950:m4"} {"signature": "def mock_request():", "body": "current_site = Site.objects.get_current()request = HttpRequest()request.META[''] = current_site.domainreturn request", "docstring": "Generate a fake request object to allow oEmbeds to use context processors.", "id": "f2950:m5"} {"signature": "def load_class(path):", "body": "package, klass = path.rsplit('', )module = import_module(package)return getattr(module, klass)", "docstring": "dynamically load a class given a string of the format\n\npackage.Class", "id": "f2950:m6"} {"signature": "def cleaned_sites():", "body": "mappings = {}for site in Site.objects.all():match = re.match(r'', site.domain)if match is not None:http, www, domain = match.groups()http_re = http or r''www_re = r''domain_re = http_re + www_re + domainhttp = http or r''www = www or ''normalized = http + www + domainmappings[site.pk] = (domain_re, site.name, normalized)return mappings", "docstring": "Create a list of tuples mapping domains from the sites table to their\nsite name. The domains will be cleaned into regexes that may be\nmore permissive than the site domain is in the db.\n\n[(domain_regex, domain_name, domain_string), ...]", "id": "f2950:m7"} {"signature": "def getDevicesCodenames():", "body": "return config.sections()", "docstring": "Returns a list containing devices codenames", "id": "f2959:m0"} {"signature": "def getDevicesReadableNames():", "body": "return [{'': s,'': config.get(s).get('')}for s in getDevicesCodenames()]", "docstring": "Returns codename and readable name for each device", "id": "f2959:m1"} {"signature": "def encryptPassword(self, login, passwd):", "body": "binaryKey = b64decode(config.GOOGLE_PUBKEY)i = utils.readInt(binaryKey, )modulus = utils.toBigInt(binaryKey[:][:i])j = utils.readInt(binaryKey, i + )exponent = utils.toBigInt(binaryKey[i + :][:j])digest = hashes.Hash(hashes.SHA1(), backend=default_backend())digest.update(binaryKey)h = b'' + digest.finalize()[:]der_data = encode_dss_signature(modulus, exponent)publicKey = load_der_public_key(der_data, backend=default_backend())to_be_encrypted = login.encode() + b'' + passwd.encode()ciphertext = publicKey.encrypt(to_be_encrypted,padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA1()),algorithm=hashes.SHA1(),label=None))return urlsafe_b64encode(h + ciphertext)", "docstring": "Encrypt credentials using the google publickey, with the\n RSA algorithm", "id": "f2960:c3:m3"} {"signature": "def getHeaders(self, upload_fields=False):", "body": "if upload_fields:headers = self.deviceBuilder.getDeviceUploadHeaders()else:headers = self.deviceBuilder.getBaseHeaders()if self.gsfId is not None:headers[\"\"] = \"\".format(self.gsfId)if self.authSubToken is not None:headers[\"\"] = \"\" % self.authSubTokenif self.device_config_token is not None:headers[\"\"] = self.device_config_tokenif self.deviceCheckinConsistencyToken is not None:headers[\"\"] = self.deviceCheckinConsistencyTokenif self.dfeCookie is not None:headers[\"\"] = self.dfeCookiereturn headers", "docstring": "Return the default set of request headers, which\n can later be expanded, based on the request type", "id": "f2960:c3:m5"} {"signature": "def uploadDeviceConfig(self):", "body": "upload = googleplay_pb2.UploadDeviceConfigRequest()upload.deviceConfiguration.CopyFrom(self.deviceBuilder.getDeviceConfig())headers = self.getHeaders(upload_fields=True)stringRequest = upload.SerializeToString()response = requests.post(UPLOAD_URL, data=stringRequest,headers=headers,verify=ssl_verify,timeout=,proxies=self.proxies_config)response = googleplay_pb2.ResponseWrapper.FromString(response.content)try:if response.payload.HasField(''):self.device_config_token = response.payload.uploadDeviceConfigResponseself.device_config_token = self.device_config_token.uploadDeviceConfigTokenexcept ValueError:pass", "docstring": "Upload the device configuration of the fake device\n selected in the __init__ methodi to the google account.", "id": "f2960:c3:m7"} {"signature": "def login(self, email=None, password=None, gsfId=None, authSubToken=None):", "body": "if email is not None and password is not None:encryptedPass = self.encryptPassword(email, password).decode('')params = self.deviceBuilder.getLoginParams(email, encryptedPass)params[''] = ''params[''] = ''params[''] = ''headers = self.deviceBuilder.getAuthHeaders(self.gsfId)headers[''] = ''response = requests.post(AUTH_URL, data=params, verify=ssl_verify,proxies=self.proxies_config)data = response.text.split()params = {}for d in data:if \"\" not in d:continuek, v = d.split(\"\", )params[k.strip().lower()] = v.strip()if \"\" in params:ac2dmToken = params[\"\"]elif \"\" in params:if \"\" in params[\"\"]:raise SecurityCheckError(\"\"\"\"\"\")raise LoginError(\"\" + params[\"\"])else:raise LoginError(\"\")self.gsfId = self.checkin(email, ac2dmToken)self.getAuthSubToken(email, encryptedPass)self.uploadDeviceConfig()elif gsfId is not None and authSubToken is not None:self.gsfId = gsfIdself.setAuthSubToken(authSubToken)self.search('')else:raise LoginError('')", "docstring": "Login to your Google Account.\n For first time login you should provide:\n * email\n * password\n For the following logins you need to provide:\n * gsfId\n * authSubToken", "id": "f2960:c3:m8"} {"signature": "def search(self, query):", "body": "if self.authSubToken is None:raise LoginError(\"\")path = SEARCH_URL + \"\".format(requests.utils.quote(query))self.toc()data = self.executeRequestApi2(path)if utils.hasPrefetch(data):response = data.preFetch[].responseelse:response = dataresIterator = response.payload.listResponse.docreturn list(map(utils.parseProtobufObj, resIterator))", "docstring": "Search the play store for an app.\n\n nb_result (int): is the maximum number of result to be returned\n\n offset (int): is used to take result starting from an index.", "id": "f2960:c3:m13"} {"signature": "def details(self, packageName):", "body": "path = DETAILS_URL + \"\".format(requests.utils.quote(packageName))data = self.executeRequestApi2(path)return utils.parseProtobufObj(data.payload.detailsResponse.docV2)", "docstring": "Get app details from a package name.\n\n packageName is the app unique ID (usually starting with 'com.').", "id": "f2960:c3:m14"} {"signature": "def bulkDetails(self, packageNames):", "body": "params = {'': ''}req = googleplay_pb2.BulkDetailsRequest()req.docid.extend(packageNames)data = req.SerializeToString()message = self.executeRequestApi2(BULK_URL,post_data=data.decode(\"\"),content_type=CONTENT_TYPE_PROTO,params=params)response = message.payload.bulkDetailsResponsereturn [None if not utils.hasDoc(entry) elseutils.parseProtobufObj(entry.doc)for entry in response.entry]", "docstring": "Get several apps details from a list of package names.\n\n This is much more efficient than calling N times details() since it\n requires only one request. If an item is not found it returns an empty object\n instead of throwing a RequestError('Item not found') like the details() function\n\n Args:\n packageNames (list): a list of app IDs (usually starting with 'com.').\n\n Returns:\n a list of dictionaries containing docv2 data, or None\n if the app doesn't exist", "id": "f2960:c3:m15"} {"signature": "def browse(self, cat=None, subCat=None):", "body": "path = BROWSE_URL + \"\"if cat is not None:path += \"\".format(requests.utils.quote(cat))if subCat is not None:path += \"\".format(requests.utils.quote(subCat))data = self.executeRequestApi2(path)return utils.parseProtobufObj(data.payload.browseResponse)", "docstring": "Browse categories. If neither cat nor subcat are specified,\n return a list of categories, otherwise it return a list of apps\n using cat (category ID) and subCat (subcategory ID) as filters.", "id": "f2960:c3:m17"} {"signature": "def list(self, cat, ctr=None, nb_results=None, offset=None):", "body": "path = LIST_URL + \"\".format(requests.utils.quote(cat))if ctr is not None:path += \"\".format(requests.utils.quote(ctr))if nb_results is not None:path += \"\".format(requests.utils.quote(str(nb_results)))if offset is not None:path += \"\".format(requests.utils.quote(str(offset)))data = self.executeRequestApi2(path)clusters = []docs = []if ctr is None:for pf in data.preFetch:for cluster in pf.response.payload.listResponse.doc:clusters.extend(cluster.child)return [c.docid for c in clusters]else:apps = []for d in data.payload.listResponse.doc: for c in d.child: for a in c.child: apps.append(utils.parseProtobufObj(a))return apps", "docstring": "List all possible subcategories for a specific category. If\n also a subcategory is provided, list apps from this category.\n\n Args:\n cat (str): category id\n ctr (str): subcategory id\n nb_results (int): if a subcategory is specified, limit number\n of results to this number\n offset (int): if a subcategory is specified, start counting from this\n result\n Returns:\n A list of categories. If subcategory is specified, a list of apps in this\n category.", "id": "f2960:c3:m18"} {"signature": "def reviews(self, packageName, filterByDevice=False, sort=,nb_results=None, offset=None):", "body": "path = REVIEWS_URL + \"\".format(requests.utils.quote(packageName), sort)if nb_results is not None:path += \"\".format(nb_results)if offset is not None:path += \"\".format(offset)if filterByDevice:path += \"\"data = self.executeRequestApi2(path)output = []for review in data.payload.reviewResponse.getResponse.review:output.append(utils.parseProtobufObj(review))return output", "docstring": "Browse reviews for an application\n\n Args:\n packageName (str): app unique ID.\n filterByDevice (bool): filter results for current device\n sort (int): sorting criteria (values are unknown)\n nb_results (int): max number of reviews to return\n offset (int): return reviews starting from an offset value\n\n Returns:\n dict object containing all the protobuf data returned from\n the api", "id": "f2960:c3:m19"} {"signature": "def delivery(self, packageName, versionCode=None, offerType=,downloadToken=None, expansion_files=False):", "body": "if versionCode is None:versionCode = self.details(packageName).get('')params = {'': str(offerType),'': packageName,'': str(versionCode)}headers = self.getHeaders()if downloadToken is not None:params[''] = downloadTokenresponse = requests.get(DELIVERY_URL, headers=headers,params=params, verify=ssl_verify,timeout=,proxies=self.proxies_config)response = googleplay_pb2.ResponseWrapper.FromString(response.content)if response.commands.displayErrorMessage != \"\":raise RequestError(response.commands.displayErrorMessage)elif response.payload.deliveryResponse.appDeliveryData.downloadUrl == \"\":raise RequestError('')else:result = {}result[''] = packageNameresult[''] = []downloadUrl = response.payload.deliveryResponse.appDeliveryData.downloadUrlcookie = response.payload.deliveryResponse.appDeliveryData.downloadAuthCookie[]cookies = {str(cookie.name): str(cookie.value)}result[''] = self._deliver_data(downloadUrl, cookies)if not expansion_files:return resultfor obb in response.payload.deliveryResponse.appDeliveryData.additionalFile:a = {}if obb.fileType == :obbType = ''else:obbType = ''a[''] = obbTypea[''] = obb.versionCodea[''] = self._deliver_data(obb.downloadUrl, None)result[''].append(a)return result", "docstring": "Download an already purchased app.\n\n Args:\n packageName (str): app unique ID (usually starting with 'com.')\n versionCode (int): version to download\n offerType (int): different type of downloads (mostly unused for apks)\n downloadToken (str): download token returned by 'purchase' API\n progress_bar (bool): wether or not to print a progress bar to stdout\n\n Returns:\n Dictionary containing apk data and a list of expansion files. As stated\n in android documentation, there can be at most 2 expansion files, one with\n main content, and one for patching the main content. Their names should\n follow this format:\n\n [main|patch]...obb\n\n Data to build this name string is provided in the dict object. For more\n info check https://developer.android.com/google/play/expansion-files.html", "id": "f2960:c3:m21"} {"signature": "def download(self, packageName, versionCode=None, offerType=, expansion_files=False):", "body": "if self.authSubToken is None:raise LoginError(\"\")if versionCode is None:appDetails = self.details(packageName).get('').get('')versionCode = appDetails.get('')headers = self.getHeaders()params = {'': str(offerType),'': packageName,'': str(versionCode)}self.log(packageName)response = requests.post(PURCHASE_URL, headers=headers,params=params, verify=ssl_verify,timeout=,proxies=self.proxies_config)response = googleplay_pb2.ResponseWrapper.FromString(response.content)if response.commands.displayErrorMessage != \"\":raise RequestError(response.commands.displayErrorMessage)else:dlToken = response.payload.buyResponse.downloadTokenreturn self.delivery(packageName, versionCode, offerType, dlToken,expansion_files=expansion_files)", "docstring": "Download an app and return its raw data (APK file). Free apps need\n to be \"purchased\" first, in order to retrieve the download cookie.\n If you want to download an already purchased app, use *delivery* method.\n\n Args:\n packageName (str): app unique ID (usually starting with 'com.')\n versionCode (int): version to download\n offerType (int): different type of downloads (mostly unused for apks)\n downloadToken (str): download token returned by 'purchase' API\n progress_bar (bool): wether or not to print a progress bar to stdout\n\n Returns\n Dictionary containing apk data and optional expansion files\n (see *delivery*)", "id": "f2960:c3:m22"} {"signature": "def readInt(byteArray, start):", "body": "return struct.unpack(\"\", byteArray[start:][:])[]", "docstring": "Read the byte array, starting from *start* position,\n as an 32-bit unsigned integer", "id": "f2961:m1"} {"signature": "def toBigInt(byteArray):", "body": "array = byteArray[::-] out = for key, value in enumerate(array):decoded = struct.unpack(\"\", bytes([value]))[]out = out | decoded << key * return out", "docstring": "Convert the byte array to a BigInteger", "id": "f2961:m2"} {"signature": "def split_grafs (lines):", "body": "graf = []for line in lines:line = line.strip()if len(line) < :if len(graf) > :yield \"\".join(graf)graf = []else:graf.append(line)if len(graf) > :yield \"\".join(graf)", "docstring": "segment the raw text into paragraphs", "id": "f2966:m0"} {"signature": "def filter_quotes (text, is_email=True):", "body": "global DEBUGglobal PAT_FORWARD, PAT_REPLIED, PAT_UNSUBSCif is_email:text = filter(lambda x: x in string.printable, text)if DEBUG:print(\"\", text)m = PAT_FORWARD.split(text, re.M)if m and len(m) > :text = m[]m = PAT_REPLIED.split(text, re.M)if m and len(m) > :text = m[]m = PAT_UNSUBSC.split(text, re.M)if m:text = m[]lines = []for line in text.split(\"\"):if line.startswith(\">\"):lines.append(\"\")else:lines.append(line)return list(split_grafs(lines))", "docstring": "filter the quoted text out of a message", "id": "f2966:m1"} {"signature": "def get_word_id (root):", "body": "global UNIQ_WORDSif root not in UNIQ_WORDS:UNIQ_WORDS[root] = len(UNIQ_WORDS)return UNIQ_WORDS[root]", "docstring": "lookup/assign a unique identify for each word root", "id": "f2966:m3"} {"signature": "def fix_microsoft (foo):", "body": "i = bar = []while i < len(foo):text, lemma, pos, tag = foo[i]if (text == \"\") and (i > ):prev_tok = bar[-]prev_tok[] += \"\"prev_tok[] += \"\"bar[-] = prev_tokelse:bar.append(foo[i])i += return bar", "docstring": "fix special case for `c#`, `f#`, etc.; thanks Microsoft", "id": "f2966:m4"} {"signature": "def fix_hypenation (foo):", "body": "i = bar = []while i < len(foo):text, lemma, pos, tag = foo[i]if (tag == \"\") and (i > ) and (i < len(foo) - ):prev_tok = bar[-]next_tok = foo[i + ]prev_tok[] += \"\" + next_tok[]prev_tok[] += \"\" + next_tok[]bar[-] = prev_toki += else:bar.append(foo[i])i += return bar", "docstring": "fix hyphenation in the word list for a parsed sentence", "id": "f2966:m5"} {"signature": "def parse_graf (doc_id, graf_text, base_idx, spacy_nlp=None):", "body": "global DEBUGglobal POS_KEEPS, POS_LEMMA, SPACY_NLPif not spacy_nlp:if not SPACY_NLP:SPACY_NLP = spacy.load(\"\")spacy_nlp = SPACY_NLPmarkup = []new_base_idx = base_idxdoc = spacy_nlp(graf_text, parse=True)for span in doc.sents:graf = []digest = hashlib.sha1()if DEBUG:print(span)word_list = []for tag_idx in range(span.start, span.end):token = doc[tag_idx]if DEBUG:print(\"\", tag_idx, token.text, token.tag_, token.pos_)print(\"\", is_not_word(token.text))word_list.append([token.text, token.lemma_, token.pos_, token.tag_])corrected_words = fix_microsoft(fix_hypenation(word_list))for tok_text, tok_lemma, tok_pos, tok_tag in corrected_words:word = WordNode(word_id=, raw=tok_text, root=tok_text.lower(), pos=tok_tag, keep=, idx=new_base_idx)if is_not_word(tok_text) or (tok_tag == \"\"):pos_family = ''word = word._replace(pos=pos_family)else:pos_family = tok_tag.lower()[]if pos_family in POS_LEMMA:word = word._replace(root=tok_lemma)if pos_family in POS_KEEPS:word = word._replace(word_id=get_word_id(word.root), keep=)digest.update(word.root.encode(''))if DEBUG:print(word)graf.append(list(word))new_base_idx += markup.append(ParsedGraf(id=doc_id, sha1=digest.hexdigest(), graf=graf))return markup, new_base_idx", "docstring": "CORE ALGORITHM: parse and markup sentences in the given paragraph", "id": "f2966:m6"} {"signature": "def parse_doc (json_iter):", "body": "global DEBUGfor meta in json_iter:base_idx = for graf_text in filter_quotes(meta[\"\"], is_email=False):if DEBUG:print(\"\", graf_text)grafs, new_base_idx = parse_graf(meta[\"\"], graf_text, base_idx)base_idx = new_base_idxfor graf in grafs:yield graf", "docstring": "parse one document to prep for TextRank", "id": "f2966:m7"} {"signature": "def get_tiles (graf, size=):", "body": "keeps = list(filter(lambda w: w.word_id > , graf))keeps_len = len(keeps)for i in iter(range(, keeps_len - )):w0 = keeps[i]for j in iter(range(i + , min(keeps_len, i + + size))):w1 = keeps[j]if (w1.idx - w0.idx) <= size:yield (w0.root, w1.root,)", "docstring": "generate word pairs for the TextRank graph", "id": "f2966:m8"} {"signature": "def build_graph (json_iter):", "body": "global DEBUG, WordNodegraph = nx.DiGraph()for meta in json_iter:if DEBUG:print(meta[\"\"])for pair in get_tiles(map(WordNode._make, meta[\"\"])):if DEBUG:print(pair)for word_id in pair:if not graph.has_node(word_id):graph.add_node(word_id)try:graph.edge[pair[]][pair[]][\"\"] += except KeyError:graph.add_edge(pair[], pair[], weight=)return graph", "docstring": "construct the TextRank graph from parsed paragraphs", "id": "f2966:m9"} {"signature": "def write_dot (graph, ranks, path=\"\"):", "body": "dot = Digraph()for node in graph.nodes():dot.node(node, \"\" % (node, ranks[node]))for edge in graph.edges():dot.edge(edge[], edge[], constraint=\"\")with open(path, '') as f:f.write(dot.source)", "docstring": "output the graph in Dot file format", "id": "f2966:m10"} {"signature": "def render_ranks (graph, ranks, dot_file=\"\"):", "body": "if dot_file:write_dot(graph, ranks, path=dot_file)", "docstring": "render the TextRank graph for visual formats", "id": "f2966:m11"} {"signature": "def text_rank (path):", "body": "graph = build_graph(json_iter(path))ranks = nx.pagerank(graph)return graph, ranks", "docstring": "run the TextRank algorithm", "id": "f2966:m12"} {"signature": "def find_chunk (phrase, np):", "body": "for i in iter(range(, len(phrase))):parsed_np = find_chunk_sub(phrase, np, i)if parsed_np:return parsed_np", "docstring": "leverage noun phrase chunking", "id": "f2966:m15"} {"signature": "def enumerate_chunks (phrase, spacy_nlp):", "body": "if (len(phrase) > ):found = Falsetext = \"\".join([rl.text for rl in phrase])doc = spacy_nlp(text.strip(), parse=True)for np in doc.noun_chunks:if np.text != text:found = Trueyield np.text, find_chunk(phrase, np.text.split(\"\"))if not found and all([rl.pos[] != \"\" for rl in phrase]):yield text, phrase", "docstring": "iterate through the noun phrases", "id": "f2966:m16"} {"signature": "def collect_keyword (sent, ranks, stopwords):", "body": "for w in sent:if (w.word_id > ) and (w.root in ranks) and (w.pos[] in \"\") and (w.root not in stopwords):rl = RankedLexeme(text=w.raw.lower(), rank=ranks[w.root]/, ids=[w.word_id], pos=w.pos.lower(), count=)if DEBUG:print(rl)yield rl", "docstring": "iterator for collecting the single-word keyphrases", "id": "f2966:m17"} {"signature": "def collect_entities (sent, ranks, stopwords, spacy_nlp):", "body": "global DEBUGsent_text = \"\".join([w.raw for w in sent])if DEBUG:print(\"\", sent_text)for ent in spacy_nlp(sent_text).ents:if DEBUG:print(\"\", ent.label_, ent.text)if (ent.label_ not in [\"\"]) and (ent.text.lower() not in stopwords):w_ranks, w_ids = find_entity(sent, ranks, ent.text.split(\"\"), )if w_ranks and w_ids:rl = RankedLexeme(text=ent.text.lower(), rank=w_ranks, ids=w_ids, pos=\"\", count=)if DEBUG:print(rl)yield rl", "docstring": "iterator for collecting the named-entities", "id": "f2966:m19"} {"signature": "def collect_phrases (sent, ranks, spacy_nlp):", "body": "tail = last_idx = sent[].idx - phrase = []while tail < len(sent):w = sent[tail]if (w.word_id > ) and (w.root in ranks) and ((w.idx - last_idx) == ):rl = RankedLexeme(text=w.raw.lower(), rank=ranks[w.root], ids=w.word_id, pos=w.pos.lower(), count=)phrase.append(rl)else:for text, p in enumerate_chunks(phrase, spacy_nlp):if p:id_list = [rl.ids for rl in p]rank_list = [rl.rank for rl in p]np_rl = RankedLexeme(text=text, rank=rank_list, ids=id_list, pos=\"\", count=)if DEBUG:print(np_rl)yield np_rlphrase = []last_idx = w.idxtail += ", "docstring": "iterator for collecting the noun phrases", "id": "f2966:m20"} {"signature": "def calc_rms (values):", "body": "return max(values)", "docstring": "calculate a root-mean-squared metric for a list of float values", "id": "f2966:m21"} {"signature": "def normalize_key_phrases (path, ranks, stopwords=None, spacy_nlp=None, skip_ner=True):", "body": "global STOPWORDS, SPACY_NLPif (type(stopwords) is list) or (type(stopwords) is set):stopwords = set(stopwords)else:if not STOPWORDS:STOPWORDS = load_stopwords(stopwords)stopwords = STOPWORDSif not spacy_nlp:if not SPACY_NLP:SPACY_NLP = spacy.load(\"\")spacy_nlp = SPACY_NLPsingle_lex = {}phrase_lex = {}if isinstance(path, str):path = json_iter(path)for meta in path:sent = [w for w in map(WordNode._make, meta[\"\"])]for rl in collect_keyword(sent, ranks, stopwords):id = str(rl.ids)if id not in single_lex:single_lex[id] = rlelse:prev_lex = single_lex[id]single_lex[id] = rl._replace(count = prev_lex.count + )if not skip_ner:for rl in collect_entities(sent, ranks, stopwords, spacy_nlp):id = str(rl.ids)if id not in phrase_lex:phrase_lex[id] = rlelse:prev_lex = phrase_lex[id]phrase_lex[id] = rl._replace(count = prev_lex.count + )for rl in collect_phrases(sent, ranks, spacy_nlp):id = str(rl.ids)if id not in phrase_lex:phrase_lex[id] = rlelse:prev_lex = phrase_lex[id]phrase_lex[id] = rl._replace(count = prev_lex.count + )rank_list = [rl.rank for rl in single_lex.values()]if len(rank_list) < :max_single_rank = else:max_single_rank = max(rank_list)repeated_roots = {}for rl in sorted(phrase_lex.values(), key=lambda rl: len(rl), reverse=True):rank_list = []for i in iter(range(, len(rl.ids))):id = rl.ids[i]if not id in repeated_roots:repeated_roots[id] = rank_list.append(rl.rank[i])else:repeated_roots[id] += rank_list.append(rl.rank[i] / repeated_roots[id])phrase_rank = calc_rms(rank_list)single_lex[str(rl.ids)] = rl._replace(rank = phrase_rank)sum_ranks = sum([rl.rank for rl in single_lex.values()])for rl in sorted(single_lex.values(), key=lambda rl: rl.rank, reverse=True):if sum_ranks > :rl = rl._replace(rank=rl.rank / sum_ranks)elif rl.rank == :rl = rl._replace(rank=)rl = rl._replace(text=re.sub(r\"\", r\"\", rl.text))yield rl", "docstring": "collect keyphrases, named entities, etc., while removing stop words", "id": "f2966:m22"} {"signature": "def mh_digest (data):", "body": "num_perm = m = MinHash(num_perm)for d in data:m.update(d.encode(''))return m", "docstring": "create a MinHash digest", "id": "f2966:m23"} {"signature": "def rank_kernel (path):", "body": "kernel = []if isinstance(path, str):path = json_iter(path)for meta in path:if not isinstance(meta, RankedLexeme):rl = RankedLexeme(**meta)else:rl = metam = mh_digest(map(lambda x: str(x), rl.ids))kernel.append((rl, m,))return kernel", "docstring": "return a list (matrix-ish) of the key phrases and their ranks", "id": "f2966:m24"} {"signature": "def top_sentences (kernel, path):", "body": "key_sent = {}i = if isinstance(path, str):path = json_iter(path)for meta in path:graf = meta[\"\"]tagged_sent = [WordNode._make(x) for x in graf]text = \"\".join([w.raw for w in tagged_sent])m_sent = mh_digest([str(w.word_id) for w in tagged_sent])dist = sum([m_sent.jaccard(m) * rl.rank for rl, m in kernel])key_sent[text] = (dist, i)i += for text, (dist, i) in sorted(key_sent.items(), key=lambda x: x[][], reverse=True):yield SummarySent(dist=dist, idx=i, text=text)", "docstring": "determine distance for each sentence", "id": "f2966:m25"} {"signature": "def limit_keyphrases (path, phrase_limit=):", "body": "rank_thresh = Noneif isinstance(path, str):lex = []for meta in json_iter(path):rl = RankedLexeme(**meta)lex.append(rl)else:lex = pathif len(lex) > :rank_thresh = statistics.mean([rl.rank for rl in lex])else:rank_thresh = used = for rl in lex:if rl.pos[] != \"\":if (used > phrase_limit) or (rl.rank < rank_thresh):returnused += yield rl.text.replace(\"\", \"\")", "docstring": "iterator for the most significant key phrases", "id": "f2966:m26"} {"signature": "def limit_sentences (path, word_limit=):", "body": "word_count = if isinstance(path, str):path = json_iter(path)for meta in path:if not isinstance(meta, SummarySent):p = SummarySent(**meta)else:p = metasent_text = p.text.strip().split(\"\")sent_len = len(sent_text)if (word_count + sent_len) > word_limit:breakelse:word_count += sent_lenyield sent_text, p.idx", "docstring": "iterator for the most significant sentences, up to a specified limit", "id": "f2966:m27"} {"signature": "def make_sentence (sent_text):", "body": "lex = []idx = for word in sent_text:if len(word) > :if (idx > ) and not (word[] in \"\"):lex.append(\"\")lex.append(word)idx += return \"\".join(lex)", "docstring": "construct a sentence text, with proper spacing", "id": "f2966:m28"} {"signature": "def json_iter (path):", "body": "with open(path, '') as f:for line in f.readlines():yield json.loads(line)", "docstring": "iterator for JSON-per-line in a file pattern", "id": "f2966:m29"} {"signature": "def pretty_print (obj, indent=False):", "body": "if indent:return json.dumps(obj, sort_keys=True, indent=, separators=('', ''))else:return json.dumps(obj, sort_keys=True)", "docstring": "pretty print a JSON object", "id": "f2966:m30"} {"signature": "def cleanup_text (text):", "body": "x = \"\".join(map(lambda s: s.strip(), text.split(\"\"))).strip()x = x.replace('', '').replace('', '')x = x.replace(\"\", \"\").replace(\"\", \"\").replace(\"\", \"\")x = x.replace('', '').replace('', '')x = str(unicodedata.normalize('', x).encode('', '').decode(''))try:assert type(x).__name__ == ''except AssertionError:print(\"\", type(line), line)return x", "docstring": "It scrubs the garbled from its stream...\nOr it gets the debugger again.", "id": "f2970:m0"} {"signature": "@classmethoddef from_resolver(cls, spec_resolver):", "body": "spec_validators = cls._get_spec_validators(spec_resolver)return validators.extend(Draft4Validator, spec_validators)", "docstring": "Creates a customized Draft4ExtendedValidator.\n\n :param spec_resolver: resolver for the spec\n :type resolver: :class:`jsonschema.RefResolver`", "id": "f2979:c0:m0"} {"signature": "def create(self, spec_resolver):", "body": "validator_cls = self.spec_validator_factory.from_resolver(spec_resolver)return validator_cls(self.schema, resolver=self.schema_resolver)", "docstring": "Creates json documents validator from spec resolver.\n :param spec_resolver: reference resolver.\n\n :return: RefResolver for spec with cached remote $refs used during\n validation.\n :rtype: :class:`jsonschema.RefResolver`", "id": "f2979:c1:m2"} {"signature": "@contextmanagerdef visit(self, key):", "body": "self[key] = keytry:yield keyfinally:del self[key]", "docstring": "Visits key and marks as visited.\n Support context manager interface.\n\n :param key: key being visited.", "id": "f2981:c0:m0"} {"signature": "def read_yaml_file(path, loader=ExtendedSafeLoader):", "body": "with open(path) as fh:return load(fh, loader)", "docstring": "Open a file, read it and return its contents.", "id": "f2983:m1"} {"signature": "@classmethoddef from_spec_resolver(cls, spec_resolver):", "body": "deref = DerefValidatorDecorator(spec_resolver)for key, validator_callable in iteritems(cls.validators):yield key, deref(validator_callable)", "docstring": "Creates validators generator for the spec resolver.\n\n :param spec_resolver: resolver for the spec\n :type instance_resolver: :class:`jsonschema.RefResolver`", "id": "f2985:c0:m0"} {"signature": "def construct_mapping(self, node, deep=False):", "body": "mapping = super(ExtendedSafeConstructor, self).construct_mapping(node, deep)return {(str(key) if isinstance(key, int) else key): mapping[key]for key in mapping}", "docstring": "While yaml supports integer keys, these are not valid in\n json, and will break jsonschema. This method coerces all keys\n to strings.", "id": "f2986:c0:m0"} {"signature": "def read_file(filename):", "body": "path = os.path.join(os.path.dirname(__file__), filename)with open(path) as f:return f.read()", "docstring": "Open and a file, read it and return its contents.", "id": "f2990:m0"} {"signature": "def get_metadata(init_file):", "body": "return dict(re.findall(\"\", init_file))", "docstring": "Read metadata from a given file and return a dictionary of them", "id": "f2990:m1"} {"signature": "def position_to_path(tree, position):", "body": "return PositionFinder().find(tree, position)", "docstring": "Path to the node located at the given line and column\n\n This function locates a node in the rendered source code", "id": "f3011:m0"} {"signature": "def path_to_node(tree, path):", "body": "if path is None:return Nonenode = treefor key in path:node = child_by_key(node, key)return node", "docstring": "FST node located at the given path", "id": "f3011:m1"} {"signature": "def position_to_node(tree, position):", "body": "return path_to_node(tree, position_to_path(tree, position))", "docstring": "FST node located at the given line and column", "id": "f3011:m2"} {"signature": "def node_to_bounding_box(node):", "body": "return BoundingBoxFinder().compute(node)", "docstring": "Bounding box of the given node\n\n The bounding box of a node represents its left most and right most\n position in the rendered source code. Its left position is here\n always (1, 1).", "id": "f3011:m3"} {"signature": "def path_to_bounding_box(tree, path):", "body": "return BoundingBoxFinder().compute(tree, path)", "docstring": "Absolute bounding box of the node located at the given path", "id": "f3011:m4"} {"signature": "def advance_columns(self, columns):", "body": "self.column += columns", "docstring": "(3, 10) -> (3, 11)", "id": "f3011:c0:m1"} {"signature": "def advance_line(self):", "body": "self.line += self.column = ", "docstring": "(3, 10) -> (4, 1)", "id": "f3011:c0:m2"} {"signature": "@propertydef left(self):", "body": "return Position((self.line, self.column - ))", "docstring": "(3, 10) -> (3, 9)", "id": "f3011:c0:m3"} {"signature": "@propertydef right(self):", "body": "return Position((self.line, self.column + ))", "docstring": "(3, 10) -> (3, 11)", "id": "f3011:c0:m4"} {"signature": "def __add__(self, other):", "body": "other = Position(other)return Position((self.line + other.line,self.column + other.column))", "docstring": "(1, 1) + (1, 1) -> (2, 2)", "id": "f3011:c0:m5"} {"signature": "def __neg__(self):", "body": "return Position((-self.line, -self.column))", "docstring": "(1, -1) -> (-1, 1)", "id": "f3011:c0:m6"} {"signature": "def __sub__(self, other):", "body": "other = Position(other)return Position((self.line - other.line,self.column - other.column))", "docstring": "(1, 1) - (1, 1) -> (0, 0)", "id": "f3011:c0:m7"} {"signature": "def __eq__(self, other):", "body": "if not (hasattr(other, '') and hasattr(other, '')) and len(other) < :return Falseother = Position(other)return self.line == other.line and self.column == other.column", "docstring": "Compares Positions or Position and tuple\n\n Will not fail if other is an unsupported type", "id": "f3011:c0:m10"} {"signature": "def __lt__(self, other):", "body": "other = Position(other)return (self.line, self.column) < (other.line, other.column)", "docstring": "Compares Position with Position or indexable object", "id": "f3011:c0:m11"} {"signature": "def __eq__(self, other):", "body": "if not (hasattr(other, '') and hasattr(other, '')) and len(other) < :return Falseother = BoundingBox(other)return self.top_left == other.top_left and self.bottom_right == other.bottom_right", "docstring": "Compares BoundingBox with BoundingBox or indexable object", "id": "f3011:c1:m1"} {"signature": "def before_constant(self, constant, key):", "body": "newlines_split = split_on_newlines(constant)for c in newlines_split:if is_newline(c):self.current.advance_line()if self.current.line > self.target.line:return self.STOPelse:advance_by = len(c)if self.is_on_targetted_node(advance_by):self.found_path = deepcopy(self.current_path)return self.STOPself.current.advance_columns(advance_by)", "docstring": "Determine if we're on the targetted node.\n\n If the targetted column is reached, `stop` and `path_found` are\n set. If the targetted line is passed, only `stop` is set. This\n prevents unnecessary tree travelling when the targetted column\n is out of bounds.", "id": "f3011:c3:m1"} {"signature": "def render(node, strict=False):", "body": "if isinstance(node, list):return render_list(node)elif isinstance(node, dict):return render_node(node, strict=strict)else:raise NotImplementedError(\"\" % node.__class__.__name__)", "docstring": "Recipe to render a given FST node.\n\n The FST is composed of branch nodes which are either lists or dicts\n and of leaf nodes which are strings. Branch nodes can have other\n list, dict or leaf nodes as childs.\n\n To render a string, simply output it. To render a list, render each\n of its elements in order. To render a dict, you must follow the\n node's entry in the nodes_rendering_order dictionary and its\n dependents constraints.\n\n This function hides all this algorithmic complexity by returning\n a structured rendering recipe, whatever the type of node. But even\n better, you should subclass the RenderWalker which simplifies\n drastically working with the rendered FST.\n\n The recipe is a list of steps, each step correspond to a child and is actually a 3-uple composed of the following fields:\n\n - `key_type` is a string determining the type of the child in the second field (`item`) of the tuple. It can be one of:\n\n - 'constant': the child is a string\n - 'node': the child is a dict\n - 'key': the child is an element of a dict\n - 'list': the child is a list\n - 'formatting': the child is a list specialized in formatting\n\n - `item` is the child itself: either a string, a dict or a list.\n - `render_key` gives the key used to access this child from the parent node. It's a string if the node is a dict or a number if its a list.\n\n Please note that \"bool\" `key_types` are never rendered, that's why\n they are not shown here.", "id": "f3014:m0"} {"signature": "def get_space(node):", "body": "if len(node) < or len(node[]) == :return Nonereturn transform_tabs_to_spaces(node[][][])", "docstring": "Return space formatting information of node.\n\n If the node does not have a third formatting item - like in\n a ('ENDL', '\\n') node - then we return None as a flag value. This is\n maybe not the best behavior but it seems to work for now.", "id": "f3016:m2"} {"signature": "def string_is_bigger(s1, s2):", "body": "if s1 is None:return Falseelif s2 is None:return Trueelse:return s1 > s2", "docstring": "Return s1 > s2 by taking into account None values.\n\n None is always smaller than any string.\n\n None > \"string\" works in python2 but not in python3. This function\n makes it work in python3 too.", "id": "f3016:m4"} {"signature": "def gettokentype(self):", "body": "return self.name", "docstring": "Returns the type or name of the token.", "id": "f3029:c0:m5"} {"signature": "def getstr(self):", "body": "return self.value", "docstring": "Returns the string represented by this token.", "id": "f3029:c0:m6"} {"signature": "@classmethoddef regular_polygon(cls, center, radius, n_vertices, start_angle=, **kwargs):", "body": "angles = (np.arange(n_vertices) * * np.pi / n_vertices) + start_anglereturn cls(center + radius * np.array([np.cos(angles), np.sin(angles)]).T, **kwargs)", "docstring": "Construct a regular polygon.\n\n Parameters\n ----------\n center : array-like\n radius : float\n n_vertices : int\n start_angle : float, optional\n Where to put the first point, relative to `center`,\n in radians counter-clockwise starting from the horizontal axis.\n kwargs\n Other keyword arguments are passed to the |Shape| constructor.", "id": "f3037:c0:m1"} {"signature": "@classmethoddef circle(cls, center, radius, n_vertices=, **kwargs):", "body": "return cls.regular_polygon(center, radius, n_vertices, **kwargs)", "docstring": "Construct a circle.\n\n Parameters\n ----------\n center : array-like\n radius : float\n n_vertices : int, optional\n Number of points to draw.\n Decrease for performance, increase for appearance.\n kwargs\n Other keyword arguments are passed to the |Shape| constructor.", "id": "f3037:c0:m2"} {"signature": "@classmethoddef rectangle(cls, vertices, **kwargs):", "body": "bottom_left, top_right = verticestop_left = [bottom_left[], top_right[]]bottom_right = [top_right[], bottom_left[]]return cls([bottom_left, bottom_right, top_right, top_left], **kwargs)", "docstring": "Shortcut for creating a rectangle aligned with the screen axes from only two corners.\n\n Parameters\n ----------\n vertices : array-like\n An array containing the ``[x, y]`` positions of two corners.\n kwargs\n Other keyword arguments are passed to the |Shape| constructor.", "id": "f3037:c0:m3"} {"signature": "@classmethoddef from_dict(cls, spec):", "body": "spec = spec.copy()center = spec.pop('', None)radius = spec.pop('', None)if center and radius:return cls.circle(center, radius, **spec)vertices = spec.pop('')if len(vertices) == :return cls.rectangle(vertices, **spec)return cls(vertices, **spec)", "docstring": "Create a |Shape| from a dictionary specification.\n\n Parameters\n ----------\n spec : dict\n A dictionary with either the fields ``'center'`` and ``'radius'`` (for a circle),\n ``'center'``, ``'radius'``, and ``'n_vertices'`` (for a regular polygon),\n or ``'vertices'``.\n If only two vertices are given, they are assumed to be lower left and top right corners of a rectangle.\n Other fields are interpreted as keyword arguments.", "id": "f3037:c0:m4"} {"signature": "@propertydef _kwargs(self):", "body": "return dict(color=self.color, velocity=self.velocity, colors=self.colors)", "docstring": "Keyword arguments for recreating the Shape from the vertices.", "id": "f3037:c0:m8"} {"signature": "def distance_to(self, point):", "body": "return np.linalg.norm(self.center - point)", "docstring": "Distance from center to arbitrary point.\n\n Parameters\n ----------\n point : array-like\n\n Returns\n -------\n float", "id": "f3037:c0:m15"} {"signature": "def scale(self, factor, center=None):", "body": "factor = np.asarray(factor)if len(factor.shape):args = list(factor)else:args = [factor, factor]if center is not None:args.extend(center)self.poly.scale(*args)return self", "docstring": "Resize the shape by a proportion (e.g., 1 is unchanged), in-place.\n\n Parameters\n ----------\n factor : float or array-like\n If a scalar, the same factor will be applied in the x and y dimensions.\n center : array-like, optional\n Point around which to perform the scaling.\n If not passed, the center of the shape is used.", "id": "f3037:c0:m16"} {"signature": "def translate(self, vector):", "body": "self.poly.shift(*vector)", "docstring": "Translate the shape along a vector, in-place.\n\n Parameters\n ----------\n vector : array-like", "id": "f3037:c0:m17"} {"signature": "def rotate(self, angle, center=None):", "body": "args = [angle]if center is not None:args.extend(center)self.poly.rotate(*args)return self", "docstring": "Rotate the shape, in-place.\n\n Parameters\n ----------\n angle : float\n Angle to rotate, in radians counter-clockwise.\n center : array-like, optional\n Point about which to rotate.\n If not passed, the center of the shape will be used.", "id": "f3037:c0:m18"} {"signature": "def flip_x(self, center=None):", "body": "if center is None:self.poly.flip()else:self.poly.flip(center[])", "docstring": "Flip the shape in the x direction, in-place.\n\n Parameters\n ----------\n center : array-like, optional\n Point about which to flip.\n If not passed, the center of the shape will be used.", "id": "f3037:c0:m19"} {"signature": "def flip_y(self, center=None):", "body": "if center is None:self.poly.flop()else:self.poly.flop(center[])return self", "docstring": "Flip the shape in the y direction, in-place.\n\n Parameters\n ----------\n center : array-like, optional\n Point about which to flip.\n If not passed, the center of the shape will be used.", "id": "f3037:c0:m20"} {"signature": "def flip(self, angle, center=None):", "body": "return self.rotate(-angle, center=center).flip_y(center=center).rotate(angle, center=center)", "docstring": "Flip the shape in an arbitrary direction.\n\n Parameters\n ----------\n angle : array-like\n The angle, in radians counter-clockwise from the horizontal axis,\n defining the angle about which to flip the shape (of a line through `center`).\n center : array-like, optional\n The point about which to flip.\n If not passed, the center of the shape will be used.", "id": "f3037:c0:m21"} {"signature": "def draw(self):", "body": "if self.enabled:self._vertex_list.colors = self._gl_colorsself._vertex_list.vertices = self._gl_verticesself._vertex_list.draw(pyglet.gl.GL_TRIANGLES)", "docstring": "Draw the shape in the current OpenGL context.", "id": "f3037:c0:m23"} {"signature": "def update(self, dt):", "body": "self.translate(dt * self.velocity)self.rotate(dt * self.angular_velocity)", "docstring": "Update the shape's position by moving it forward according to its velocity.\n\n Parameters\n ----------\n dt : float", "id": "f3037:c0:m24"} {"signature": "def enable(self, enabled):", "body": "self.enabled = enabledreturn self", "docstring": "Set whether the shape should be drawn.\n\n Parameters\n ----------\n\n enabled : bool", "id": "f3037:c0:m25"} {"signature": "def overlaps(self, other):", "body": "return bool(self.poly.overlaps(other.poly))", "docstring": "Check if two shapes overlap.\n\n Parameters\n ----------\n other : |Shape|\n\n Returns\n -------\n bool", "id": "f3037:c0:m26"} {"signature": "def covers(self, other):", "body": "return bool(self.poly.covers(other.poly))", "docstring": "Check if the shape completely covers another shape.\n\n Parameters\n ----------\n other : |Shape|\n\n Returns\n -------\n bool", "id": "f3037:c0:m27"} {"signature": "def _get_or_create_subscription(self):", "body": "topic_path = self._get_topic_path()subscription_name = ''.format(PUBSUB_OBJECT_PREFIX, self.name)subscription_path = self.subscriber_client.subscription_path(self.project, subscription_name)try:self.subscriber_client.get_subscription(subscription_path)except google.cloud.exceptions.NotFound:logger.info(\"\".format(subscription_name))try:self.subscriber_client.create_subscription(subscription_path, topic=topic_path)except google.cloud.exceptions.Conflict:passreturn subscription_path", "docstring": "Workers all share the same subscription so that tasks are\n distributed across all workers.", "id": "f3044:c0:m3"} {"signature": "def enqueue(self, f, *args, **kwargs):", "body": "task = Task(uuid4().hex, f, args, kwargs)self.storage.put_task(task)return self.enqueue_task(task)", "docstring": "Enqueues a function for the task queue to execute.", "id": "f3044:c0:m4"} {"signature": "def enqueue_task(self, task):", "body": "data = dumps(task)if self._async:self.publisher_client.publish(self.topic_path, data=data)logger.info(''.format(task.id))else:unpickled_task = unpickle(data)logger.info(''.format(unpickled_task.id))with measure_time() as summary, self.queue_context():unpickled_task.execute(queue=self)summary(unpickled_task.summary())return TaskResult(task.id, self)", "docstring": "Enqueues a task directly. This is used when a task is retried or if\n a task was manually created.\n\n Note that this does not store the task.", "id": "f3044:c0:m5"} {"signature": "def cleanup(self):", "body": "pass", "docstring": "Does nothing for this queue, but other queues types may use this to\n perform clean-up after listening for tasks.", "id": "f3044:c0:m8"} {"signature": "def queue_context(self):", "body": "return queue_context(self)", "docstring": "Returns a context manager that sets this queue as the current_queue\nglobal. Similar to flask's app.app_context. This is used by the workers\nto make the global available inside of task functions.", "id": "f3044:c0:m9"} {"signature": "def task_context(self):", "body": "return task_context(self)", "docstring": "Returns a context manager that sets this task as the current_task\nglobal. Similar to flask's app.request_context. This is used by the\nworkers to make the global available inside of task functions.", "id": "f3045:c2:m9"} {"signature": "def result(self, timeout=None):", "body": "start = time.time()while True:task = self.get_task()if not task or task.status not in (FINISHED, FAILED):if not timeout:continueelif time.time() - start < timeout:continueelse:raise TimeoutError()if task.status == FAILED:raise task.resultreturn task.result", "docstring": "Gets the result of the task.\n\n Arguments:\n timeout: Maximum seconds to wait for a result before raising a\n TimeoutError. If set to None, this will wait forever. If the\n queue doesn't store results and timeout is None, this call will\n never return.", "id": "f3045:c3:m2"} {"signature": "def _get_or_create_subscription(self):", "body": "topic_path = self._get_topic_path()subscription_name = ''.format(queue.PUBSUB_OBJECT_PREFIX, self.name, uuid4().hex)subscription_path = self.subscriber_client.subscription_path(self.project, subscription_name)try:self.subscriber_client.get_subscription(subscription_path)except google.cloud.exceptions.NotFound:logger.info(\"\".format(subscription_name))self.subscriber_client.create_subscription(subscription_path, topic_path)return subscription_path", "docstring": "In a broadcast queue, workers have a unique subscription ensuring\n that every worker recieves a copy of every task.", "id": "f3046:c0:m1"} {"signature": "def cleanup(self):", "body": "if self.subscription:logger.info(\"\")self.subscriber_client.delete_subscription(self.subscription)", "docstring": "Deletes this worker's subscription.", "id": "f3046:c0:m2"} {"signature": "def unpickle(pickled_string):", "body": "try:obj = loads(pickled_string)except Exception as e:raise UnpickleError('', pickled_string, e)return obj", "docstring": "Unpickles a string, but raises a unified UnpickleError in case anything\n fails.\n This is a helper method to not have to deal with the fact that `loads()`\n potentially raises many types of exceptions (e.g. AttributeError,\n IndexError, TypeError, KeyError, etc.)", "id": "f3055:m0"} {"signature": "@click.command()@click.option('', '',help='')@click.option('',help='')@click.argument('',nargs=,required=True)def main(path, pid, queue):", "body": "setup_logging()if pid:with open(os.path.expanduser(pid), \"\") as f:f.write(str(os.getpid()))if not path:path = os.getcwd()sys.path.insert(, path)queue = import_queue(queue)import psqworker = psq.Worker(queue=queue)worker.listen()", "docstring": "Standalone PSQ worker.\n\nThe queue argument must be the full importable path to a psq.Queue\ninstance.\n\nExample usage:\n\n psqworker config.q\n\n psqworker --path /opt/app queues.fast", "id": "f3058:m2"} {"signature": "def busybox_single_app_bundle_fixture(num_bundles=, command=[''], app_name_transformer=None):", "body": "if app_name_transformer is None:app_name_transformer = lambda x: xapp_dict = {'': '','': '','': '','': {'': command},'': {'': '','': [''],'': [{'': '','': [''],'': ''},{'': '','': [''],'': ''},{'': '','': [''],'': ''}]}}for bundle in range(num_bundles):app_name = app_name_transformer(''.format(_num_to_alpha(bundle)))bundle_name = ''.format(_num_to_alpha(bundle))_write('', bundle_name, {'': '', '': [app_name]})_write('', app_name, app_dict)", "docstring": "Fixture for use in integration tests. The local repo at\n /tmp/fake-repo should be set up before using this fixture. Optionally takes in\n a name transformer function which is applied to the default names of the apps.", "id": "f3059:m9"} {"signature": "@propertydef CommandError(self):", "body": "return CommandError", "docstring": "Pure convenience to avoid having to import this explicitly", "id": "f3121:c4:m4"} {"signature": "@propertydef stdout(self):", "body": "out = sys.stdout.getvalue()[self.stdout_start:].strip()if isinstance(out, str):out = out.encode('')return out", "docstring": "Returns any stdout that has been generated *since* the last time\n clear_stdout was called.", "id": "f3121:c4:m5"} {"signature": "@patch('')def run_command(self, args, fake_exit, raise_on_error=True):", "body": "with patch('', wraps=self.exec_docker_patch) as fake_exec_docker:fake_exit.side_effect = SysExit('')self.fake_exec_docker = fake_exec_dockersys.argv = [''] + args.split('')try:client_entrypoint()except SysExit:passfor call in fake_exit.mock_calls:name, args, kwargs = callif len(args) == and args[] > and raise_on_error:self._clear_stdout()raise CommandError(''.format(''.join(sys.argv), args[]))result = self.stdoutself._clear_stdout()return result", "docstring": "Run a command through the Dusty client entrypoint, e.g. simulating\n the Dusty CLI as close as possible without having to call a subprocess.\n This command raises if the command fails, otherwise it returns the\n stdout generated by the command.", "id": "f3121:c4:m7"} {"signature": "def verify_mac_username(username):", "body": "try:pwd.getpwnam(username)except:raise RuntimeError(''.format(username))", "docstring": "Raise an error if the user doesn't exist", "id": "f3124:m10"} {"signature": "def _load_ssh_auth_post_yosemite(mac_username):", "body": "user_id = subprocess.check_output(['', '', mac_username])ssh_auth_sock = subprocess.check_output(['', '', user_id, '', '', '']).rstrip()_set_ssh_auth_sock(ssh_auth_sock)", "docstring": "Starting with Yosemite, launchd was rearchitected and now only one\n launchd process runs for all users. This allows us to much more easily\n impersonate a user through launchd and extract the environment\n variables from their running processes.", "id": "f3124:m13"} {"signature": "def _load_ssh_auth_pre_yosemite():", "body": "for process in psutil.process_iter():if process.name() == '':ssh_auth_sock = subprocess.check_output(['', '', str(process.pid), '', '', '']).rstrip()if ssh_auth_sock:_set_ssh_auth_sock(ssh_auth_sock)breakelse:daemon_warnings.warn('', '')", "docstring": "For OS X versions before Yosemite, many launchd processes run simultaneously under\n different users and different permission models. The simpler `asuser` trick we use\n in Yosemite doesn't work, since it gets routed to the wrong launchd. We instead need\n to find the running ssh-agent process and use its PID to navigate ourselves\n to the correct launchd.", "id": "f3124:m14"} {"signature": "def check_and_load_ssh_auth():", "body": "mac_username = get_config_value(constants.CONFIG_MAC_USERNAME_KEY)if not mac_username:logging.info(\"\")returnif not _running_on_mac(): logging.info(\"\")returnif _mac_version_is_post_yosemite():_load_ssh_auth_post_yosemite(mac_username)else:_load_ssh_auth_pre_yosemite()", "docstring": "Will check the mac_username config value; if it is present, will load that user's\nSSH_AUTH_SOCK environment variable to the current environment. This allows git clones\nto behave the same for the daemon as they do for the user", "id": "f3124:m16"} {"signature": "def parent_dir(path):", "body": "return os.path.split(path)[]", "docstring": "Return the parent directory of a file or directory.\n This is commonly useful for creating parent directories\n prior to creating a file.", "id": "f3125:m0"} {"signature": "def case_insensitive_rename(src, dst):", "body": "temp_dir = tempfile.mkdtemp()shutil.rmtree(temp_dir)shutil.move(src, temp_dir)shutil.move(temp_dir, dst)", "docstring": "A hack to allow us to rename paths in a case-insensitive filesystem like HFS.", "id": "f3125:m5"} {"signature": "def _compile_docker_commands(app_name, assembled_specs, port_spec):", "body": "app_spec = assembled_specs[''][app_name]commands = ['']commands += _lib_install_commands_for_app(app_name, assembled_specs)if app_spec['']:commands.append(\"\".format(container_code_path(app_spec)))commands.append(\"\".format(container_code_path(app_spec)))commands += _copy_assets_commands_for_app(app_spec, assembled_specs)commands += _get_once_commands(app_spec, port_spec)commands += _get_always_commands(app_spec)return commands", "docstring": "This is used to compile the command that will be run when the docker container starts\n up. This command has to install any libs that the app uses, run the `always` command, and\n run the `once` command if the container is being launched for the first time", "id": "f3127:m8"} {"signature": "def _lib_install_commands_for_app(app_name, assembled_specs):", "body": "libs = assembled_specs[''][app_name]['']['']return _lib_install_commands_for_libs(assembled_specs, libs)", "docstring": "This returns a list of all the commands that will install libraries for a\n given app", "id": "f3127:m11"} {"signature": "def _lib_install_commands_for_lib(app_name, assembled_specs):", "body": "libs = assembled_specs[''][app_name]['']['']return _lib_install_commands_for_libs(assembled_specs, libs)", "docstring": "This returns a list of all the commands that will install libraries for a\n given lib", "id": "f3127:m12"} {"signature": "def _lib_install_commands(lib_spec):", "body": "if not lib_spec['']:return []return [\"\".format(lib_spec[''])] + lib_spec['']", "docstring": "This returns a single commmand that will install a library in a docker container", "id": "f3127:m14"} {"signature": "def repo_mount_validator():", "body": "def validator(document):if '' in document and '' in document:returnelif '' not in document and '' not in document:returnreturn ''return validator", "docstring": "If either repo or mount are provided, they must both be provided.", "id": "f3133:m1"} {"signature": "def _mount_repo(repo, wait_for_server=False):", "body": "check_call_on_vm(''.format(repo.vm_path))if wait_for_server:for i in range(,):try:_run_mount_command(repo)returnexcept CalledProcessError as e:if '' in e.output:logging.info('')time.sleep()else:logging.info(e.output)raise elog_to_client(''.format(repo.short_name))raise RuntimeError('')else:_run_mount_command(repo)", "docstring": "This function will create the VM directory where a repo will be mounted, if it\ndoesn't exist. If wait_for_server is set, it will wait up to 10 seconds for\nthe nfs server to start, by retrying mounts that fail with 'Connection Refused'.\n\nIf wait_for_server is not set, it will attempt to run the mount command once", "id": "f3136:m6"} {"signature": "def configure_nfs_server():", "body": "repos_for_export = get_all_repos(active_only=True, include_specs_repo=False)current_exports = _get_current_exports()needed_exports = _get_exports_for_repos(repos_for_export)_ensure_managed_repos_dir_exists()if not needed_exports.difference(current_exports):if not _server_is_running():_restart_server()return_write_exports_config(needed_exports)_restart_server()", "docstring": "This function is used with `dusty up`. It will check all active repos to see if\nthey are exported. If any are missing, it will replace current dusty exports with\nexports that are needed for currently active repos, and restart\nthe nfs server", "id": "f3137:m0"} {"signature": "def add_exports_for_repos(repos):", "body": "current_exports = _get_current_exports()needed_exports = _get_exports_for_repos(repos)if not needed_exports.difference(current_exports):if not _server_is_running():_restart_server()return_write_exports_config(current_exports.union(needed_exports))_restart_server()", "docstring": "This function will add needed entries to /etc/exports. It will not remove any\nentries from the file. It will then restart the server if necessary", "id": "f3137:m1"} {"signature": "def _ensure_managed_repos_dir_exists():", "body": "if not os.path.exists(constants.REPOS_DIR):os.makedirs(constants.REPOS_DIR)", "docstring": "Our exports file will be invalid if this folder doesn't exist, and the NFS server\nwill not run correctly.", "id": "f3137:m2"} {"signature": "def vm_path_is_directory(remote_path):", "body": "try:check_call_on_vm(''.format(remote_path))except CalledProcessError:return Falsereturn True", "docstring": "A weak check of whether a path in the Dusty VM is a directory.\n This function returns False on any process error, so False may indicate\n other failures such as the path not actually existing.", "id": "f3139:m2"} {"signature": "def registry_from_image(image_name):", "body": "if '' not in image_name: return constants.PUBLIC_DOCKER_REGISTRYprefix = image_name.split('')[]if '' not in prefix: return constants.PUBLIC_DOCKER_REGISTRYreturn prefix", "docstring": "Returns the Docker registry host associated with\n a given image name.", "id": "f3140:m0"} {"signature": "@memoizeddef get_authed_registries():", "body": "result = set()if not os.path.exists(constants.DOCKER_CONFIG_PATH):return resultconfig = json.load(open(constants.DOCKER_CONFIG_PATH, ''))for registry in config.get('', {}).iterkeys():try:parsed = urlparse(registry)except Exception:log_to_client('').format(registry)result.add(parsed.netloc) if parsed.netloc else result.add(parsed.path)return result", "docstring": "Reads the local Docker client config for the current user\n and returns all registries to which the user may be logged in.\n This is intended to be run client-side, not by the daemon.", "id": "f3140:m1"} {"signature": "def get_dusty_images():", "body": "specs = get_specs()dusty_image_names = [spec[''] for spec in specs[''].values() + specs[''].values() if '' in spec]dusty_images = set([name if '' in name else \"\".format(name) for name in dusty_image_names])return dusty_images", "docstring": "Returns all images listed in dusty specs (apps + bundles), in the form repository:tag. Tag will be set to latest\n if no tag is specified in the specs", "id": "f3144:m1"} {"signature": "@memoizeddef get_docker_client():", "body": "env = get_docker_env()host, cert_path, tls_verify = env[''], env[''], env['']params = {'': host.replace('', ''),'': None,'': ''}if tls_verify and cert_path:params[''] = docker.tls.TLSConfig(client_cert=(os.path.join(cert_path, ''),os.path.join(cert_path, '')),ca_cert=os.path.join(cert_path, ''),verify=True,ssl_version=None,assert_hostname=False)return docker.Client(**params)", "docstring": "Ripped off and slightly modified based on docker-py's\n kwargs_from_env utility function.", "id": "f3144:m4"} {"signature": "def get_dusty_containers(services, include_exited=False):", "body": "client = get_docker_client()if services:containers = [get_container_for_app_or_service(service, include_exited=include_exited) for service in services]return [container for container in containers if container]else:return [containerfor container in client.containers(all=include_exited)if any(name.startswith('') for name in container.get('', []))]", "docstring": "Get a list of containers associated with the list\n of services. If no services are provided, attempts to\n return all containers associated with Dusty.", "id": "f3144:m5"} {"signature": "def get_canonical_container_name(container):", "body": "return sorted(container[''], key=lambda name: len(name))[][:]", "docstring": "Return the canonical container name, which should be\n of the form dusty__1. Containers are returned\n from the Python client with many names based on the containers\n to which they are linked, but simply taking the shortest name\n should be sufficient to get us the shortest one.", "id": "f3144:m7"} {"signature": "def _compose_restart(services):", "body": "def _restart_container(client, container):log_to_client(''.format(get_canonical_container_name(container)))client.restart(container[''], timeout=)assembled_specs = get_assembled_specs()if services == []:services = [spec.name for spec in assembled_specs.get_apps_and_services()]logging.info(''.format(services))client = get_docker_client()for service in services:container = get_container_for_app_or_service(service, include_exited=True)if container is None:log_to_client(''.format(service))continuestopped_linked_containers = _check_stopped_linked_containers(container, assembled_specs)if stopped_linked_containers:log_to_client(''.format(stopped_linked_containers, service))else:_restart_container(client, container)", "docstring": "Well, this is annoying. Compose 1.2 shipped with the\n restart functionality fucking broken, so we can't set a faster\n timeout than 10 seconds (which is way too long) using Compose.\n We are therefore resigned to trying to hack this together\n ourselves. Lame.\n\n Relevant fix which will make it into the next release:\n https://github.com/docker/compose/pull/1318", "id": "f3145:m6"} {"signature": "def update_running_containers_from_spec(compose_config, recreate_containers=True):", "body": "write_composefile(compose_config, constants.COMPOSEFILE_PATH)compose_up(constants.COMPOSEFILE_PATH, '', recreate_containers=recreate_containers)", "docstring": "Takes in a Compose spec from the Dusty Compose compiler,\n writes it to the Compose spec folder so Compose can pick it\n up, then does everything needed to make sure the Docker VM is\n up and running containers with the updated config.", "id": "f3145:m7"} {"signature": "def stop_running_services(services=None):", "body": "if services is None:services = []_compose_stop(constants.COMPOSEFILE_PATH, '', services)", "docstring": "Stop running containers owned by Dusty, or a specific\n list of Compose services if provided.\n\n Here, \"services\" refers to the Compose version of the term,\n so any existing running container, by name. This includes Dusty\n apps and services.", "id": "f3145:m8"} {"signature": "def restart_running_services(services=None):", "body": "if services is None:services = []_compose_restart(services)", "docstring": "Restart containers owned by Dusty, or a specific\n list of Compose services if provided.\n\n Here, \"services\" refers to the Compose version of the term,\n so any existing running container, by name. This includes Dusty\n apps and services.", "id": "f3145:m9"} {"signature": "def remove_exited_dusty_containers():", "body": "client = get_docker_client()exited_containers = get_exited_dusty_containers()removed_containers = []for container in exited_containers:log_to_client(\"\".format(container[''][]))try:client.remove_container(container[''], v=True)removed_containers.append(container)except Exception as e:log_to_client(e.message or str(e))return removed_containers", "docstring": "Removed all dusty containers with 'Exited' in their status", "id": "f3146:m1"} {"signature": "def remove_images():", "body": "client = get_docker_client()removed = _remove_dangling_images()dusty_images = get_dusty_images()all_images = client.images(all=True)for image in all_images:if set(image['']).intersection(dusty_images):try:client.remove_image(image[''])except Exception as e:logging.info(\"\".format(image['']))else:log_to_client(\"\".format(image['']))removed.append(image)return removed", "docstring": "Removes all dangling images as well as all images referenced in a dusty spec; forceful removal is not used", "id": "f3146:m3"} {"signature": "def _write_nginx_config(nginx_config, path):", "body": "with open(path, '') as f:f.write(nginx_config)", "docstring": "Writes the config file from the Dusty Nginx compiler\n to the Nginx includes directory, which should be included\n in the main nginx.conf.", "id": "f3147:m0"} {"signature": "def update_nginx_from_config(nginx_config):", "body": "logging.info('')temp_dir = tempfile.mkdtemp()os.mkdir(os.path.join(temp_dir, ''))_write_nginx_config(constants.NGINX_BASE_CONFIG, os.path.join(temp_dir, constants.NGINX_PRIMARY_CONFIG_NAME))_write_nginx_config(nginx_config[''], os.path.join(temp_dir, constants.NGINX_HTTP_CONFIG_NAME))_write_nginx_config(nginx_config[''], os.path.join(temp_dir, constants.NGINX_STREAM_CONFIG_NAME))_write_nginx_config(constants.NGINX_502_PAGE_HTML, os.path.join(temp_dir, '', constants.NGINX_502_PAGE_NAME))sync_local_path_to_vm(temp_dir, constants.NGINX_CONFIG_DIR_IN_VM)", "docstring": "Write the given config to disk as a Dusty sub-config\n in the Nginx includes directory. Then, either start nginx\n or tell it to reload its config to pick up what we've\n just written.", "id": "f3147:m1"} {"signature": "def remove_current_dusty_config(config):", "body": "return constants.DUSTY_CONFIG_REGEX.sub(\"\", config)", "docstring": "Given a string representing the contents of a\n file, this function strips out the Dusty config section\n denominated by the Dusty header and footer. Returns\n the stripped string.", "id": "f3148:m2"} {"signature": "def _dusty_vm_exists():", "body": "existing_vms = check_output_demoted(['', '', ''])for line in existing_vms.splitlines():if ''.format(constants.VM_MACHINE_NAME) in line:return Truereturn False", "docstring": "We use VBox directly instead of Docker Machine because it\n shaves about 0.5 seconds off the runtime of this check.", "id": "f3150:m11"} {"signature": "def _apply_nat_dns_host_resolver():", "body": "check_and_log_output_and_error_demoted(['', '', constants.VM_MACHINE_NAME, '', ''],quiet_on_success=True)", "docstring": "This will make the Dusty VM always use the host's DNS resolver for lookups.\nIt solves an issue we were seeing where the VM's resolving settings would get\nout of date when a laptop was moved between routers with different settings,\nresulting in DNS lookup failures on the VM.", "id": "f3150:m12"} {"signature": "def _apply_nat_net_less_greedy_subnet():", "body": "check_and_log_output_and_error_demoted(['', '', constants.VM_MACHINE_NAME, '', ''],quiet_on_success=True)", "docstring": "By default, VirtualBox claims 10.0.2.x for itself as part of its NAT routing\n scheme. This subnet is commonly used on internal networks, making this a pretty\n damn greedy choice. We instead alter the VM to use the less greedy subnet of\n 10.174.249.x which is less likely to conflict.", "id": "f3150:m13"} {"signature": "def _init_docker_vm():", "body": "if not _dusty_vm_exists():log_to_client('')machine_options = ['', '','', '','', constants.CONFIG_BOOT2DOCKER_URL,'', str(get_config_value(constants.CONFIG_VM_MEM_SIZE)),'', constants.VM_NIC_TYPE]check_call_demoted(['', ''] + machine_options + [constants.VM_MACHINE_NAME],redirect_stderr=True)", "docstring": "Initialize the Dusty VM if it does not already exist.", "id": "f3150:m14"} {"signature": "def _start_docker_vm():", "body": "is_running = docker_vm_is_running()if not is_running:log_to_client(''.format(constants.VM_MACHINE_NAME))_apply_nat_dns_host_resolver()_apply_nat_net_less_greedy_subnet()check_and_log_output_and_error_demoted(['', '', constants.VM_MACHINE_NAME], quiet_on_success=True)return is_running", "docstring": "Start the Dusty VM if it is not already running.", "id": "f3150:m15"} {"signature": "def _stop_docker_vm():", "body": "check_call_demoted(['', '', constants.VM_MACHINE_NAME], redirect_stderr=True)", "docstring": "Stop the Dusty VM if it is not already stopped.", "id": "f3150:m16"} {"signature": "def docker_vm_is_running():", "body": "running_vms = check_output_demoted(['', '', ''])for line in running_vms.splitlines():if ''.format(constants.VM_MACHINE_NAME) in line:return Truereturn False", "docstring": "Using VBoxManage is 0.5 seconds or so faster than Machine.", "id": "f3150:m18"} {"signature": "def _apply_nic_fix():", "body": "log_to_client('')check_call_demoted(['', '', constants.VM_MACHINE_NAME, '', constants.VM_NIC_TYPE])", "docstring": "Set NIC 1 to use PCnet-FAST III. The host-only NIC type is\n set during docker-machine create (and Machine will change it\n back if it is changed manually), which is why we only change\n NIC 1 here.", "id": "f3150:m20"} {"signature": "def delete_docker_vm_host_only_interface():", "body": "adapter_name = get_vm_hostonly_adapter()log_to_client(''.format(adapter_name))check_call_demoted(['', '', '', adapter_name])", "docstring": "Attempt to delete the host-only interface attached\n to the current Dusty VM. VM should be stopped\n before calling this.", "id": "f3150:m21"} {"signature": "def regenerate_docker_vm_certificates():", "body": "log_to_client('')check_call_demoted(['', '', '', constants.VM_MACHINE_NAME])", "docstring": "Regenerate certificates for a running VM through Docker Machine.\n This may be necessary following a restart if there were previously\n networking issues preventing Machine from doing this as part\n of normal startup.", "id": "f3150:m22"} {"signature": "def _get_localhost_ssh_port():", "body": "for line in _get_vm_config():if line.startswith(''):spec = line.split('')[].strip('')name, protocol, host, host_port, target, target_port = spec.split('')if name == '' and protocol == '' and target_port == '':return host_portraise ValueError('')", "docstring": "Something in the VM chain, either VirtualBox or Machine, helpfully\n sets up localhost-to-VM forwarding on port 22. We can inspect this\n rule to determine the port on localhost which gets forwarded to\n 22 in the VM.", "id": "f3150:m26"} {"signature": "def _get_host_only_mac_address():", "body": "vm_config = _get_vm_config()for line in vm_config:if line.startswith(''):adapter_number = int(line[:])breakelse:raise ValueError('')for line in vm_config:if line.startswith(''.format(adapter_number)):return line.split('')[].strip('').lower()raise ValueError(''.format(adapter_number))", "docstring": "Returns the MAC address assigned to the host-only adapter,\n using output from VBoxManage. Returned MAC address has no colons\n and is lower-cased.", "id": "f3150:m27"} {"signature": "def _ip_for_mac_from_ip_addr_show(ip_addr_show, target_mac):", "body": "return_next_ip = Falsefor line in ip_addr_show.splitlines():line = line.strip()if line.startswith(''):line_mac = line.split('')[].replace('', '')if line_mac == target_mac:return_next_ip = Trueelif return_next_ip and line.startswith('') and not line.startswith(''):ip = line.split('')[].split('')[]return ip", "docstring": "Given the rather-complex output from an 'ip addr show' command\n on the VM, parse the output to determine the IP address\n assigned to the interface with the given MAC.", "id": "f3150:m28"} {"signature": "def _get_host_only_ip():", "body": "mac = _get_host_only_mac_address()ip_addr_show = check_output_demoted(['', '', '','', '','', _vm_key_path(), '', _get_localhost_ssh_port(),'', ''])return _ip_for_mac_from_ip_addr_show(ip_addr_show, mac)", "docstring": "Determine the host-only IP of the Dusty VM through Virtualbox and SSH\n directly, bypassing Docker Machine. We do this because Docker Machine is\n much slower, taking about 600ms total. We are basically doing the same\n flow Docker Machine does in its own code.", "id": "f3150:m29"} {"signature": "def _dusty_hosts_config(hosts_specs):", "body": "rules = ''.join([''.format(spec[''], spec['']) for spec in hosts_specs])return config_file.create_config_section(rules)", "docstring": "Return a string of all host rules required to match\n the given spec. This string is wrapped in the Dusty hosts\n header and footer so it can be easily removed later.", "id": "f3151:m0"} {"signature": "def update_hosts_file_from_port_spec(port_spec):", "body": "logging.info('')hosts_specs = port_spec['']current_hosts = config_file.read(constants.HOSTS_PATH)cleared_hosts = config_file.remove_current_dusty_config(current_hosts)updated_hosts = cleared_hosts + _dusty_hosts_config(hosts_specs)config_file.write(constants.HOSTS_PATH, updated_hosts)", "docstring": "Given a port spec, update the hosts file specified at\n constants.HOST_PATH to contain the port mappings specified\n in the spec. Any existing Dusty configurations are replaced.", "id": "f3151:m1"} {"signature": "def _nginx_location_spec(port_spec, bridge_ip):", "body": "location_string_spec = \"\"for location_setting in ['','','','','',_nginx_proxy_string(port_spec, bridge_ip)]:location_string_spec += \"\".format(location_setting)location_string_spec += \"\"return location_string_spec", "docstring": "This will output the nginx location config string for specific port spec", "id": "f3153:m1"} {"signature": "def _nginx_http_spec(port_spec, bridge_ip):", "body": "server_string_spec = \"\"server_string_spec += \"\".format(_nginx_max_file_size_string())server_string_spec += \"\".format(_nginx_listen_string(port_spec))server_string_spec += \"\".format(_nginx_server_name_string(port_spec))server_string_spec += _nginx_location_spec(port_spec, bridge_ip)server_string_spec += _custom_502_page()server_string_spec += \"\"return server_string_spec", "docstring": "This will output the nginx HTTP config string for specific port spec", "id": "f3153:m6"} {"signature": "def _nginx_stream_spec(port_spec, bridge_ip):", "body": "server_string_spec = \"\"server_string_spec += \"\".format(_nginx_listen_string(port_spec))server_string_spec += \"\".format(_nginx_proxy_string(port_spec, bridge_ip))server_string_spec += \"\"return server_string_spec", "docstring": "This will output the nginx stream config string for specific port spec", "id": "f3153:m7"} {"signature": "def get_nginx_configuration_spec(port_spec_dict, docker_bridge_ip):", "body": "nginx_http_config, nginx_stream_config = \"\", \"\"for port_spec in port_spec_dict['']:if port_spec[''] == '':nginx_http_config += _nginx_http_spec(port_spec, docker_bridge_ip)elif port_spec[''] == '':nginx_stream_config += _nginx_stream_spec(port_spec, docker_bridge_ip)return {'': nginx_http_config, '': nginx_stream_config}", "docstring": "This function will take in a port spec as specified by the port_spec compiler and\n will output an nginx web proxy config string. This string can then be written to a file\n and used running nginx", "id": "f3153:m8"} {"signature": "def get_port_spec_document(expanded_active_specs, docker_vm_ip):", "body": "forwarding_port = port_spec = {'':{}, '':[], '':[]}host_full_addresses, host_names, stream_host_ports = set(), set(), set()for app_name in sorted(expanded_active_specs[''].keys()):app_spec = expanded_active_specs[''][app_name]if '' not in app_spec:continueport_spec[''][app_name] = []for host_forwarding_spec in app_spec['']:_add_full_addresses(host_forwarding_spec, host_full_addresses)if host_forwarding_spec[''] == '':_add_stream_host_port(host_forwarding_spec, stream_host_ports)port_spec[''][app_name].append(_docker_compose_port_spec(host_forwarding_spec, forwarding_port))port_spec[''].append(_nginx_port_spec(host_forwarding_spec, forwarding_port, docker_vm_ip))_add_host_names(host_forwarding_spec, docker_vm_ip, port_spec, host_names)forwarding_port += return port_spec", "docstring": "Given a dictionary containing the expanded dusty DAG specs this function will\n return a dictionary containing the port mappings needed by downstream methods. Currently\n this includes docker_compose, virtualbox, nginx and hosts_file.", "id": "f3154:m6"} {"signature": "def get_lib_volume_mounts(base_lib_name, assembled_specs):", "body": "volumes = [_get_lib_repo_volume_mount(assembled_specs[''][base_lib_name])]volumes.append(get_command_files_volume_mount(base_lib_name, test=True))for lib_name in assembled_specs[''][base_lib_name]['']['']:lib_spec = assembled_specs[''][lib_name]volumes.append(_get_lib_repo_volume_mount(lib_spec))return volumes", "docstring": "Returns a list of the formatted volume specs for a lib", "id": "f3155:m4"} {"signature": "def _get_app_repo_volume_mount(app_spec):", "body": "if app_spec['']:return \"\".format(Repo(app_spec['']).vm_path, container_code_path(app_spec))", "docstring": "This returns the formatted volume mount spec to mount the local code for an app in the\n container", "id": "f3155:m5"} {"signature": "def _get_lib_repo_volume_mount(lib_spec):", "body": "return \"\".format(Repo(lib_spec['']).vm_path, container_code_path(lib_spec))", "docstring": "This returns the formatted volume mount spec to mount the local code for a lib in the\n container", "id": "f3155:m6"} {"signature": "def container_code_path(spec):", "body": "return spec['']", "docstring": "Returns the path inside the docker container that a spec (for an app or lib) says it wants\n to live at", "id": "f3155:m7"} {"signature": "def _get_app_libs_volume_mounts(app_name, assembled_specs):", "body": "volumes = []for lib_name in assembled_specs[''][app_name]['']['']:lib_spec = assembled_specs[''][lib_name]volumes.append(\"\".format(Repo(lib_spec['']).vm_path, container_code_path(lib_spec)))return volumes", "docstring": "Returns a list of the formatted volume mounts for all libs that an app uses", "id": "f3155:m8"} {"signature": "def _compose_dict_for_nginx(port_specs):", "body": "spec = {'': constants.NGINX_IMAGE,'': [''.format(constants.NGINX_CONFIG_DIR_IN_VM, constants.NGINX_CONFIG_DIR_IN_CONTAINER)],'': '','': ''.format(constants.DUSTY_NGINX_NAME)}all_host_ports = set([nginx_spec[''] for nginx_spec in port_specs['']])if all_host_ports:spec[''] = []for port in all_host_ports:spec[''].append(''.format(port))return {constants.DUSTY_NGINX_NAME: spec}", "docstring": "Return a dictionary containing the Compose spec required to run\n Dusty's nginx container used for host forwarding.", "id": "f3156:m1"} {"signature": "def get_compose_dict(assembled_specs, port_specs):", "body": "compose_dict = _compose_dict_for_nginx(port_specs)for app_name in assembled_specs[''].keys():compose_dict[app_name] = _composed_app_dict(app_name, assembled_specs, port_specs)for service_spec in assembled_specs[''].values():compose_dict[service_spec.name] = _composed_service_dict(service_spec)return compose_dict", "docstring": "This function returns a dictionary representation of a docker-compose.yml file, based on assembled_specs from\n the spec_assembler, and port_specs from the port_spec compiler", "id": "f3156:m2"} {"signature": "def _conditional_links(assembled_specs, app_name):", "body": "link_to_apps = []potential_links = assembled_specs[''][app_name]['']for potential_link in potential_links['']:if potential_link in assembled_specs['']:link_to_apps.append(potential_link)for potential_link in potential_links['']:if potential_link in assembled_specs['']:link_to_apps.append(potential_link)return link_to_apps", "docstring": "Given the assembled specs and app_name, this function will return all apps and services specified in\n 'conditional_links' if they are specified in 'apps' or 'services' in assembled_specs. That means that\n some other part of the system has declared them as necessary, so they should be linked to this app", "id": "f3156:m4"} {"signature": "def _get_build_path(app_spec):", "body": "if os.path.isabs(app_spec['']):return app_spec['']return os.path.join(Repo(app_spec['']).local_path, app_spec[''])", "docstring": "Given a spec for an app, returns the value of the `build` field for docker-compose.\n If the path is relative, it is expanded and added to the path of the app's repo.", "id": "f3156:m5"} {"signature": "def _composed_app_dict(app_name, assembled_specs, port_specs):", "body": "logging.info(\"\".format(app_name))app_spec = assembled_specs[''][app_name]compose_dict = app_spec[\"\"]_apply_env_overrides(env_overrides_for_app_or_service(app_name), compose_dict)if '' in app_spec and '' in app_spec:raise RuntimeError(\"\".format(app_name))elif '' in app_spec:logging.infocompose_dict[''] = app_spec['']elif '' in app_spec:compose_dict[''] = _get_build_path(app_spec)else:raise RuntimeError(\"\".format(app_name))compose_dict[''] = []compose_dict[''] = _compile_docker_command(app_spec)compose_dict[''] = \"\".format(app_name)logging.info(\"\".format(compose_dict['']))compose_dict[''] = _links_for_app(app_spec, assembled_specs)logging.info(\"\".format(compose_dict['']))compose_dict[''] = compose_dict[''] + _get_compose_volumes(app_name, assembled_specs)logging.info(\"\".format(compose_dict['']))port_list = _get_ports_list(app_name, port_specs)if port_list:compose_dict[''] = port_listlogging.info(\"\".format(port_list))compose_dict[''] = ''return compose_dict", "docstring": "This function returns a dictionary of the docker-compose.yml specifications for one app", "id": "f3156:m10"} {"signature": "def _composed_service_dict(service_spec):", "body": "compose_dict = service_spec.plain_dict()_apply_env_overrides(env_overrides_for_app_or_service(service_spec.name), compose_dict)compose_dict.setdefault('', []).append(_get_cp_volume_mount(service_spec.name))compose_dict[''] = \"\".format(service_spec.name)return compose_dict", "docstring": "This function returns a dictionary of the docker_compose specifications\n for one service. Currently, this is just the Dusty service spec with\n an additional volume mount to support Dusty's cp functionality.", "id": "f3156:m11"} {"signature": "def _get_ports_list(app_name, port_specs):", "body": "if app_name not in port_specs['']:return []return [\"\".format(port_spec[''], port_spec[''])for port_spec in port_specs[''][app_name]]", "docstring": "Returns a list of formatted port mappings for an app", "id": "f3156:m12"} {"signature": "def _get_compose_volumes(app_name, assembled_specs):", "body": "volumes = []volumes.append(_get_cp_volume_mount(app_name))volumes += get_app_volume_mounts(app_name, assembled_specs)return volumes", "docstring": "This returns formatted volume specifications for a docker-compose app. We mount the app\n as well as any libs it needs so that local code is used in our container, instead of whatever\n code was in the docker image.\n\n Additionally, we create a volume for the /cp directory used by Dusty to facilitate\n easy file transfers using `dusty cp`.", "id": "f3156:m13"} {"signature": "def _get_dependent(dependent_type, name, specs, root_spec_type):", "body": "spec = specs[root_spec_type].get(name)if spec is None:raise RuntimeError(\"\".format(root_spec_type, name))dependents = spec[''][dependent_type]all_dependents = set(dependents)for dep in dependents:all_dependents |= _get_dependent(dependent_type, dep, specs, dependent_type)return all_dependents", "docstring": "Returns everything of type that , of type depends on\nNames only are returned in a set", "id": "f3157:m0"} {"signature": "def _get_referenced_apps(specs):", "body": "activated_bundles = specs[constants.CONFIG_BUNDLES_KEY].keys()all_active_apps = set()for active_bundle in activated_bundles:bundle_spec = specs[constants.CONFIG_BUNDLES_KEY].get(active_bundle)for app_name in bundle_spec['']:all_active_apps.add(app_name)all_active_apps |= _get_dependent('', app_name, specs, '')return all_active_apps", "docstring": "Returns a set of all apps that are required to run any bundle in specs[constants.CONFIG_BUNDLES_KEY]", "id": "f3157:m2"} {"signature": "def _expand_libs_in_apps(specs):", "body": "for app_name, app_spec in specs[''].iteritems():if '' in app_spec and '' in app_spec['']:app_spec[''][''] = _get_dependent('', app_name, specs, '')", "docstring": "Expands specs.apps.depends.libs to include any indirectly required libs", "id": "f3157:m3"} {"signature": "def _expand_libs_in_libs(specs):", "body": "for lib_name, lib_spec in specs[''].iteritems():if '' in lib_spec and '' in lib_spec['']:lib_spec[''][''] = _get_dependent('', lib_name, specs, '')", "docstring": "Expands specs.libs.depends.libs to include any indirectly required libs", "id": "f3157:m4"} {"signature": "def _get_referenced_libs(specs):", "body": "active_libs = set()for app_spec in specs[''].values():for lib in app_spec['']['']:active_libs.add(lib)return active_libs", "docstring": "Returns all libs that are referenced in specs.apps.depends.libs", "id": "f3157:m5"} {"signature": "def _get_referenced_services(specs):", "body": "active_services = set()for app_spec in specs[''].values():for service in app_spec['']['']:active_services.add(service)for bundle_spec in specs[''].values():for service in bundle_spec['']:active_services.add(service)return active_services", "docstring": "Returns all services that are referenced in specs.apps.depends.services,\nor in specs.bundles.services", "id": "f3157:m6"} {"signature": "def _add_active_assets(specs):", "body": "specs[''] = {}for spec in specs.get_apps_and_libs():for asset in spec['']:if not specs[''].get(asset['']):specs[''][asset['']] = {}specs[''][asset['']][''] = set()specs[''][asset['']][''] = set()specs[''][asset['']][''].add(spec.name)if asset['']:specs[''][asset['']][''].add(spec.name)", "docstring": "This function adds an assets key to the specs, which is filled in with a dictionary\nof all assets defined by apps and libs in the specs", "id": "f3157:m8"} {"signature": "def _get_expanded_active_specs(specs):", "body": "_filter_active(constants.CONFIG_BUNDLES_KEY, specs)_filter_active('', specs)_expand_libs_in_apps(specs)_filter_active('', specs)_filter_active('', specs)_add_active_assets(specs)", "docstring": "This function removes any unnecessary bundles, apps, libs, and services that aren't needed by\nthe activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed\nindirectly by each app", "id": "f3157:m9"} {"signature": "def get_repo_of_app_or_library(app_or_library_name):", "body": "specs = get_specs()repo_name = specs.get_app_or_lib(app_or_library_name)['']if not repo_name:return Nonereturn Repo(repo_name)", "docstring": "This function takes an app or library name and will return the corresponding repo\n for that app or library", "id": "f3157:m16"} {"signature": "def get_same_container_repos_from_spec(app_or_library_spec):", "body": "repos = set()app_or_lib_repo = get_repo_of_app_or_library(app_or_library_spec.name)if app_or_lib_repo is not None:repos.add(app_or_lib_repo)for dependent_name in app_or_library_spec['']['']:repos.add(get_repo_of_app_or_library(dependent_name))return repos", "docstring": "Given the spec of an app or library, returns all repos that are guaranteed\n to live in the same container", "id": "f3157:m19"} {"signature": "def get_same_container_repos(app_or_library_name):", "body": "specs = get_expanded_libs_specs()spec = specs.get_app_or_lib(app_or_library_name)return get_same_container_repos_from_spec(spec)", "docstring": "Given the name of an app or library, returns all repos that are guaranteed\n to live in the same container", "id": "f3157:m20"} {"signature": "@contextmanagerdef parallel_task_queue(pool_size=multiprocessing.cpu_count()):", "body": "task_queue = TaskQueue(pool_size)yield task_queuetask_queue.execute()", "docstring": "Context manager for setting up a TaskQueue. Upon leaving the\n context manager, all tasks that were enqueued will be executed\n in parallel subject to `pool_size` concurrency constraints.", "id": "f3158:m0"} {"signature": "@contextlib.contextmanagerdef streaming_to_client():", "body": "for handler in client_logger.handlers:if hasattr(handler, ''):breakelse:handler = Noneold_propagate = client_logger.propagateclient_logger.propagate = Falseif handler is not None:old_append = handler.append_newlineshandler.append_newlines = Falseyieldclient_logger.propagate = old_propagateif handler is not None:handler.append_newlines = old_append", "docstring": "Puts the client logger into streaming mode, which sends\n unbuffered input through to the socket one character at a time.\n We also disable propagation so the root logger does not\n receive many one-byte emissions. This context handler\n was originally created for streaming Compose up's\n terminal output through to the client and should only be\n used for similarly complex circumstances.", "id": "f3160:m5"} {"signature": "def nfs_path_exists(path):", "body": "split_path = path.lstrip('').split('')recreated_path = ''for path_element in split_path:if path_element not in os.listdir(recreated_path):return Falserecreated_path = \"\".format(recreated_path, path_element)return True", "docstring": "The normal HFS file system that your mac uses does not work the same way\nas the NFS file system. In HFS, capitalization does not matter, but in\nNFS it does. This function checks if a folder exists in HFS file system\nusing NFS semantics (case sensitive)", "id": "f3169:m1"} {"signature": "@daemon_commanddef update_managed_repos(force=False):", "body": "log_to_client('')update_specs_repo_and_known_hosts()repos_to_update = get_all_repos(active_only=True, include_specs_repo=False)with parallel_task_queue() as queue:log_to_client('')for repo in repos_to_update:if not repo.is_overridden:repo.update_local_repo_async(queue, force=force)", "docstring": "For any active, managed repos, update the Dusty-managed\n copy to bring it up to date with the latest master.", "id": "f3169:m9"} {"signature": "def _move_temp_binary_to_path(tmp_binary_path):", "body": "binary_path = _get_binary_location()if not binary_path.endswith(constants.DUSTY_BINARY_NAME):raise RuntimeError(''.format(binary_path))st = os.stat(binary_path)permissions = st.st_modeowner = st.st_uidgroup = st.st_gidshutil.move(tmp_binary_path, binary_path)os.chown(binary_path, owner, group)os.chmod(binary_path, permissions)return binary_path", "docstring": "Moves the temporary binary to the location of the binary that's currently being run.\n Preserves owner, group, and permissions of original binary", "id": "f3170:m5"} {"signature": "@daemon_commanddef prep_for_start_local_env(pull_repos):", "body": "if pull_repos:update_managed_repos(force=True)assembled_spec = spec_assembler.get_assembled_specs()if not assembled_spec[constants.CONFIG_BUNDLES_KEY]:raise RuntimeError('')virtualbox.initialize_docker_vm()", "docstring": "Daemon-side command to ensure we're running the latest\n versions of any managed repos, including the\n specs repo, before we do anything else in the up flow.", "id": "f3172:m0"} {"signature": "def log_in_to_required_registries():", "body": "registries = set()specs = spec_assembler.get_assembled_specs()for spec in specs.get_apps_and_services():if '' in spec and spec.get(''):registries.add(registry_from_image(spec['']))unauthed_registries = registries.difference(get_authed_registries())for registry in unauthed_registries:log_in_to_registry(registry)", "docstring": "Client-side command which runs the user through a login flow\n (via the Docker command-line client so auth is persisted)\n for any registries of active images which require a login. This\n is based on the `image_requires_login` key in the individual specs.", "id": "f3172:m1"} {"signature": "@daemon_commanddef start_local_env(recreate_containers):", "body": "assembled_spec = spec_assembler.get_assembled_specs()required_absent_assets = virtualbox.required_absent_assets(assembled_spec)if required_absent_assets:raise RuntimeError(''.format(required_absent_assets))docker_ip = virtualbox.get_docker_vm_ip()if os.path.exists(constants.COMPOSEFILE_PATH):try:stop_apps_or_services(rm_containers=recreate_containers)except CalledProcessError as e:log_to_client(\"\")log_to_client(str(e))daemon_warnings.clear_namespace('')df_info = virtualbox.get_docker_vm_disk_info(as_dict=True)if '' in df_info[''] or '' in df_info['']:warning_msg = ''.format(df_info[''])daemon_warnings.warn('', warning_msg)log_to_client(warning_msg)log_to_client(\"\")active_repos = spec_assembler.get_all_repos(active_only=True, include_specs_repo=False)log_to_client(\"\")port_spec = port_spec_compiler.get_port_spec_document(assembled_spec, docker_ip)log_to_client(\"\")docker_bridge_ip = virtualbox.get_docker_bridge_ip()nginx_config = nginx_compiler.get_nginx_configuration_spec(port_spec, docker_bridge_ip)log_to_client(\"\")make_up_command_files(assembled_spec, port_spec)log_to_client(\"\")compose_config = compose_compiler.get_compose_dict(assembled_spec, port_spec)log_to_client(\"\")hosts.update_hosts_file_from_port_spec(port_spec)log_to_client(\"\")nfs.configure_nfs()log_to_client(\"\")nginx.update_nginx_from_config(nginx_config)log_to_client(\"\")compose.update_running_containers_from_spec(compose_config, recreate_containers=recreate_containers)log_to_client(\"\")", "docstring": "This command will use the compilers to get compose specs\n will pass those specs to the systems that need them. Those\n systems will in turn launch the services needed to make the\n local environment go.", "id": "f3172:m2"} {"signature": "@daemon_commanddef stop_apps_or_services(app_or_service_names=None, rm_containers=False):", "body": "if app_or_service_names:log_to_client(\"\".format(''.join(app_or_service_names)))else:log_to_client(\"\")compose.stop_running_services(app_or_service_names)if rm_containers:compose.rm_containers(app_or_service_names)", "docstring": "Stop any currently running Docker containers associated with\n Dusty, or associated with the provided apps_or_services. Does not remove\n the service's containers.", "id": "f3172:m3"} {"signature": "@daemon_commanddef restart_apps_or_services(app_or_service_names=None):", "body": "if app_or_service_names:log_to_client(\"\".format(''.join(app_or_service_names)))else:log_to_client(\"\")if app_or_service_names:specs = spec_assembler.get_assembled_specs()specs_list = [specs[''][app_name] for app_name in app_or_service_names if app_name in specs['']]repos = set()for spec in specs_list:if spec['']:repos = repos.union(spec_assembler.get_same_container_repos_from_spec(spec))nfs.update_nfs_with_repos(repos)else:nfs.update_nfs_with_repos(spec_assembler.get_all_repos(active_only=True, include_specs_repo=False))compose.restart_running_services(app_or_service_names)", "docstring": "Restart any containers associated with Dusty, or associated with\n the provided app_or_service_names.", "id": "f3172:m4"} {"signature": "def _assert_app_or_service_exists(app_or_service):", "body": "get_specs().get_app_or_service(app_or_service)", "docstring": "get_app_or_service will throw a KeyError if the app_or_service\nis not found.", "id": "f3174:m1"} {"signature": "def _env_vars_from_file(filename):", "body": "def split_env(env):if '' in env:return env.split('', )else:return env, Noneenv = {}for line in open(filename, ''):line = line.strip()if line and not line.startswith(''):k, v = split_env(line)env[k] = vreturn env", "docstring": "This code is copied from Docker Compose, so that we're exactly compatible\nwith their `env_file` option", "id": "f3174:m4"} {"signature": "def validate_specs_from_path(specs_path):", "body": "log_to_client(\"\".format(specs_path))if not os.path.exists(specs_path):raise RuntimeError(\"\".format(specs_path))specs = get_specs_from_path(specs_path)_check_bare_minimum(specs)_validate_spec_names(specs)_validate_cycle_free(specs)log_to_client(\"\")", "docstring": "Validates Dusty specs at the given path. The following checks are performed:\n -That the given path exists\n -That there are bundles in the given path\n -That the fields in the specs match those allowed in our schemas\n -That references to apps, libs, and services point at defined specs\n -That there are no cycles in app and lib dependencies", "id": "f3176:m9"} {"signature": "@daemon_commanddef validate_specs():", "body": "validate_specs_from_path(get_specs_path())", "docstring": "Validates specs using the path configured in Dusty's configuration", "id": "f3176:m10"} {"signature": "def pty_fork(*args):", "body": "updated_env = copy(os.environ)updated_env.update(get_docker_env())args += (updated_env,)executable = args[]demote_fn = demote_to_user(get_config_value(constants.CONFIG_MAC_USERNAME_KEY))child_pid, pty_fd = pty.fork()if child_pid == :demote_fn()os.execle(_executable_path(executable), *args)else:child_process = psutil.Process(child_pid)terminal = os.fdopen(pty_fd, '', )with streaming_to_client():while child_process.status() == '':output = terminal.read()log_to_client(output)_, exit_code = os.waitpid(child_pid, )if exit_code != :raise subprocess.CalledProcessError(exit_code, ''.join(args[:-]))", "docstring": "Runs a subprocess with a PTY attached via fork and exec.\n The output from the PTY is streamed through log_to_client.\n This should not be necessary for most subprocesses, we\n built this to handle Compose up which only streams pull\n progress if it is attached to a TTY.", "id": "f3177:m3"} {"signature": "@contextmanagerdef _cleanup_path(path):", "body": "try:yieldfinally:if os.path.exists(path):if os.path.isdir(path):shutil.rmtree(path)else:os.remove(path)", "docstring": "Recursively delete a path upon exiting this context\n manager. Supports targets that are files or directories.", "id": "f3179:m0"} {"signature": "@daemon_commanddef copy_between_containers(source_name, source_path, dest_name, dest_path):", "body": "if not container_path_exists(source_name, source_path):raise RuntimeError(''.format(source_path, source_name))temp_path = os.path.join(tempfile.mkdtemp(), str(uuid.uuid1()))with _cleanup_path(temp_path):copy_to_local(temp_path, source_name, source_path, demote=False)copy_from_local(temp_path, dest_name, dest_path, demote=False)", "docstring": "Copy a file from the source container to an intermediate staging\n area on the local filesystem, then from that staging area to the\n destination container.\n\n These moves take place without demotion for two reasons:\n 1. There should be no permissions vulnerabilities with copying\n between containers because it is assumed the non-privileged\n user has full access to all Dusty containers.\n 2. The temp dir created by mkdtemp is owned by the owner of the\n Dusty daemon process, so if we demoted our moves to/from that location\n they would encounter permission errors.", "id": "f3179:m1"} {"signature": "@daemon_commanddef copy_from_local(local_path, remote_name, remote_path, demote=True):", "body": "if not os.path.exists(local_path):raise RuntimeError(''.format(local_path))temp_identifier = str(uuid.uuid1())if os.path.isdir(local_path):sync_local_path_to_vm(local_path, os.path.join(vm_cp_path(remote_name), temp_identifier), demote=demote)move_dir_inside_container(remote_name, os.path.join(constants.CONTAINER_CP_DIR, temp_identifier), remote_path)else:sync_local_path_to_vm(local_path, os.path.join(vm_cp_path(remote_name), temp_identifier), demote=demote)move_file_inside_container(remote_name, os.path.join(constants.CONTAINER_CP_DIR, temp_identifier), remote_path)", "docstring": "Copy a path from the local filesystem to a path inside a Dusty\n container. The files on the local filesystem must be accessible\n by the user specified in mac_username.", "id": "f3179:m2"} {"signature": "@daemon_commanddef copy_to_local(local_path, remote_name, remote_path, demote=True):", "body": "if not container_path_exists(remote_name, remote_path):raise RuntimeError(''.format(remote_path, remote_name))temp_identifier = str(uuid.uuid1())copy_path_inside_container(remote_name, remote_path, os.path.join(constants.CONTAINER_CP_DIR, temp_identifier))vm_path = os.path.join(vm_cp_path(remote_name), temp_identifier)is_dir = vm_path_is_directory(vm_path)sync_local_path_from_vm(local_path, vm_path, demote=demote, is_dir=is_dir)", "docstring": "Copy a path from inside a Dusty container to a path on the\n local filesystem. The path on the local filesystem must be\n wrist-accessible by the user specified in mac_username.", "id": "f3179:m3"} {"signature": "@classmethoddef resolve(cls, all_known_repos, name):", "body": "match = Nonefor repo in all_known_repos:if repo.remote_path == name: return repoif name == repo.short_name:if match is None:match = repoelse:raise RuntimeError(''.format(name,match.remote_path,repo.remote_path))if match is None:raise RuntimeError(''.format(name))return match", "docstring": "We require the list of all remote repo paths to be passed in\n to this because otherwise we would need to import the spec assembler\n in this module, which would give us circular imports.", "id": "f3182:c0:m3"} {"signature": "def ensure_local_repo(self):", "body": "if os.path.exists(self.managed_path):logging.debug(''.format(self.remote_path))returnlogging.info(''.format(self.remote_path))repo_path_parent = parent_dir(self.managed_path)if not os.path.exists(repo_path_parent):os.makedirs(repo_path_parent)with git_error_handling():git.Repo.clone_from(self.assemble_remote_path(), self.managed_path)", "docstring": "Given a Dusty repo object, clone the remote into Dusty's local repos\n directory if it does not already exist.", "id": "f3182:c0:m14"} {"signature": "def update_local_repo(self, force=False):", "body": "self.ensure_local_repo()logging.info(''.format(self.remote_path))managed_repo = git.Repo(self.managed_path)with git_error_handling():managed_repo.remote().pull('')log_to_client(''.format(self.remote_path))if not self.local_is_up_to_date():if force:with git_error_handling():managed_repo.git.reset('', '')else:log_to_client(''''''''.format(self.managed_path))", "docstring": "Given a remote path (e.g. github.com/gamechanger/gclib), pull the latest\n commits from master to bring the local copy up to date.", "id": "f3182:c0:m18"} {"signature": "def update_local_repo_async(self, task_queue, force=False):", "body": "self.ensure_local_repo()task_queue.enqueue_task(self.update_local_repo, force=force)", "docstring": "Local repo updating suitable for asynchronous, parallel execution.\n We still need to run `ensure_local_repo` synchronously because it\n does a bunch of non-threadsafe filesystem operations.", "id": "f3182:c0:m19"} {"signature": "def memoized(fn):", "body": "@functools.wraps(fn)def memoizer(*args, **kwargs):key = function_key(fn) + pickle.dumps(args) + pickle.dumps(_hash_kwargs(kwargs))if key not in cache:cache[key] = fn(*args, **kwargs)return cache[key]return memoizer", "docstring": "Decorator. Caches a function's return value each time it is called.\nIf called later with the same arguments, the cached value is returned\n(not reevaluated). The cache lasts for the duration of each request.", "id": "f3183:m1"} {"signature": "def init_yaml_constructor():", "body": "def utf_encoding_string_constructor(loader, node):return loader.construct_scalar(node).encode('')yaml.SafeLoader.add_constructor(u'', utf_encoding_string_constructor)", "docstring": "This dark magic is used to make yaml.safe_load encode all strings as utf-8,\nwhere otherwise python unicode strings would be returned for non-ascii chars", "id": "f3207:m3"} {"signature": "def close_client_connection(terminator=SOCKET_TERMINATOR):", "body": "try:connection.sendall(terminator)finally:close_socket_logger()connection.close()", "docstring": "This function allows downstream functions to close the connection with the client.\n This is necessary for the upgrade command, where execvp replaces the process before\n the main daemon loop can close the client connection", "id": "f3208:m6"} {"signature": "def _increase_file_handle_limit():", "body": "logging.info(''.format(constants.FILE_HANDLE_LIMIT))resource.setrlimit(resource.RLIMIT_NOFILE,(constants.FILE_HANDLE_LIMIT, resource.RLIM_INFINITY))", "docstring": "Raise the open file handles permitted by the Dusty daemon process\n and its child processes. The number we choose here needs to be within\n the OS X default kernel hard limit, which is 10240.", "id": "f3208:m7"} {"signature": "def _start_http_server():", "body": "logging.info(''.format(constants.DAEMON_HTTP_BIND_IP,constants.DAEMON_HTTP_BIND_PORT))thread = threading.Thread(target=http_server.app.run, args=(constants.DAEMON_HTTP_BIND_IP,constants.DAEMON_HTTP_BIND_PORT))thread.daemon = Truethread.start()", "docstring": "Start the daemon's HTTP server on a separate thread.\n This server is only used for servicing container status\n requests from Dusty's custom 502 page.", "id": "f3208:m9"} {"signature": "@app.route('', methods=[''])def register_consumer():", "body": "global _consumershostname, port = request.form[''], request.form['']app_name = _app_name_from_forwarding_info(hostname, port)containers = get_dusty_containers([app_name], include_exited=True)if not containers:raise ValueError(''.format(app_name))container = containers[]new_id = uuid1()new_consumer = Consumer(container[''], datetime.utcnow())_consumers[str(new_id)] = new_consumerresponse = jsonify({'': app_name, '': new_id})response.headers[''] = ''response.headers[''] = ''return response", "docstring": "Given a hostname and port attempting to be accessed,\n return a unique consumer ID for accessing logs from\n the referenced container.", "id": "f3209:m2"} {"signature": "@app.route('', methods=[''])def consume(consumer_id):", "body": "global _consumersconsumer = _consumers[consumer_id]client = get_docker_client()try:status = client.inspect_container(consumer.container_id)['']['']except Exception as e:status = ''new_logs = client.logs(consumer.container_id,stdout=True,stderr=True,stream=False,timestamps=False,since=calendar.timegm(consumer.offset.timetuple()))updated_consumer = Consumer(consumer.container_id, datetime.utcnow())_consumers[str(consumer_id)] = updated_consumerresponse = jsonify({'': new_logs, '': status})response.headers[''] = ''response.headers[''] = ''return response", "docstring": "Given an existing consumer ID, return any new lines from the\n log since the last time the consumer was consumed.", "id": "f3209:m3"} {"signature": "def _cmp(self, other):", "body": "if not isinstance(other, Version):other = Version(other)num1 = self.version_numsnum2 = other.version_numsver_len = max(len(num1), len(num2))num1 += tuple([ for n in range(len(num1), ver_len)])num2 += tuple([ for n in range(len(num2), ver_len)])for (p1, p2) in zip(num1, num2):if p1 < p2:return -elif p1 > p2:return if self.version_extra is None:if other.version_extra is None:return else:return -elif other.version_extra is None:return elif self.version_extra == other.version_extra:return elif self.version_extra < other.version_extra:return -else:return ", "docstring": "Compare two Project Haystack version strings, then return\n -1 if self < other,\n 0 if self == other\n or 1 if self > other.", "id": "f3225:c0:m2"} {"signature": "@classmethoddef nearest(self, ver):", "body": "if not isinstance(ver, Version):ver = Version(ver)if ver in OFFICIAL_VERSIONS:return verversions = list(OFFICIAL_VERSIONS)versions.sort(reverse=True)best = Nonefor candidate in versions:if candidate == ver:return candidateif (best is None) and (candidate < ver):warnings.warn(''''''% (ver, candidate))return candidateif candidate > ver:best = candidateassert best is not Nonewarnings.warn(''''''% (ver, best))return best", "docstring": "Retrieve the official version nearest the one given.", "id": "f3225:c0:m10"} {"signature": "def parse(grid_str, mode=MODE_ZINC, charset=''):", "body": "if isinstance(grid_str, six.binary_type):grid_str = grid_str.decode(encoding=charset)_parse = functools.partial(parse_grid, mode=mode,charset=charset)if mode == MODE_JSON:if isinstance(grid_str, six.string_types):grid_data = json.loads(grid_str)else:grid_data = grid_strif isinstance(grid_data, dict):return _parse(grid_data)else:return list(map(_parse, grid_data))else:return list(map(_parse, GRID_SEP.split(grid_str.rstrip())))", "docstring": "Parse the given Zinc text and return the equivalent data.", "id": "f3226:m0"} {"signature": "def __init__(self, version=None, metadata=None, columns=None):", "body": "version_given = version is not Noneif version_given:version = Version(version)else:version = VER_2_0self._version = versionself._version_given = version_givenself.metadata = MetadataObject(validate_fn=self._detect_or_validate)self.column = SortableDict()self._row = []if metadata is not None:self.metadata.update(metadata.items())if columns is not None:if isinstance(columns, dict) or isinstance(columns, SortableDict):columns = list(columns.items())for col_id, col_meta in columns:if isinstance(col_meta, dict) orisinstance(col_meta, SortableDict):col_meta = list(col_meta.items())mo = MetadataObject(validate_fn=self._detect_or_validate)mo.extend(col_meta)self.column.add_item(col_id, mo)", "docstring": "Create a new Grid.", "id": "f3227:c0:m0"} {"signature": "def __repr__(self): ", "body": "parts = [u'' % self.ver_str]if bool(self.metadata):parts.append(u'' % self.metadata)column_meta = []for col, col_meta in self.column.items():if bool(col_meta):column_meta.append(u'' % (col, col_meta))else:column_meta.append(u'' % col)if bool(column_meta):parts.append(u'' % ''.join(column_meta))elif len(self.column):parts.append(u'' % ''.join(self.column.keys()))else:parts.append(u'')if bool(self):parts.extend([u'' % (row, u''.join([((u'' % (col, data[col]))if col in data else(u'' % col)) for colin self.column.keys()]))for (row, data) in enumerate(self)])else:parts.append(u'')class_name = self.__class__.__name__return u'' % (class_name, u''.join(parts), class_name)", "docstring": "Return a representation of this grid.", "id": "f3227:c0:m4"} {"signature": "def __getitem__(self, index):", "body": "return self._row[index]", "docstring": "Retrieve the row at index.", "id": "f3227:c0:m5"} {"signature": "def __len__(self):", "body": "return len(self._row)", "docstring": "Return the number of rows in the grid.", "id": "f3227:c0:m6"} {"signature": "def __setitem__(self, index, value):", "body": "if not isinstance(value, dict):raise TypeError('')for val in value.values():self._detect_or_validate(val)self._row[index] = value", "docstring": "Replace the row at index.", "id": "f3227:c0:m7"} {"signature": "def __delitem__(self, index):", "body": "del self._row[index]", "docstring": "Delete the row at index.", "id": "f3227:c0:m8"} {"signature": "def insert(self, index, value):", "body": "if not isinstance(value, dict):raise TypeError('')for val in value.values():self._detect_or_validate(val)self._row.insert(index, value)", "docstring": "Insert a new row before index.", "id": "f3227:c0:m9"} {"signature": "def _detect_or_validate(self, val):", "body": "if isinstance(val, list)or isinstance(val, dict)or isinstance(val, SortableDict)or isinstance(val, Grid):self._assert_version(VER_3_0)", "docstring": "Detect the version used from the row content, or validate against\nthe version if given.", "id": "f3227:c0:m10"} {"signature": "def _assert_version(self, version):", "body": "if self.nearest_version < version:if self._version_given:raise ValueError(''% version)else:self._version = version", "docstring": "Assert that the grid version is equal to or above the given value.\nIf no version is set, set the version.", "id": "f3227:c0:m11"} {"signature": "def dump(grids, mode=MODE_ZINC):", "body": "if isinstance(grids, Grid):return dump_grid(grids, mode=mode)_dump = functools.partial(dump_grid, mode=mode)if mode == MODE_ZINC:return ''.join(map(_dump, grids))elif mode == MODE_JSON:return '' % ''.join(map(_dump, grids))else: raise NotImplementedError('' % mode)", "docstring": "Dump the given grids in the specified over-the-wire format.", "id": "f3228:m0"} {"signature": "def add_item(self, key, value, after=False, index=None, pos_key=None,replace=True):", "body": "if self._validate_fn:self._validate_fn(value)if (index is not None) and (pos_key is not None):raise ValueError('')elif pos_key is not None:try:index = self.index(pos_key)except ValueError:raise KeyError('' % pos_key)if after and (index is not None):index += if key in self._values:if not replace:raise KeyError('' % key)if index is not None:del self[key]else:self._values[key] = valuereturnif index is not None:self._order.insert(index, key)else:self._order.append(key)self._values[key] = value", "docstring": "Add an item at a specific location, possibly replacing the\nexisting item.\n\nIf after is True, we insert *after* the given index, otherwise we\ninsert before.\n\nThe position is specified using either index or pos_key, the former\nspecifies the position from the start of the array (base 0). pos_key\nspecifies the name of another key, and positions the new key relative\nto that key.\n\nWhen replacing, the position will be left un-changed unless a location\nis specified explicitly.", "id": "f3230:c0:m7"} {"signature": "def at(self, index):", "body": "return self._order[index]", "docstring": "Return the key at the given index.", "id": "f3230:c0:m8"} {"signature": "def value_at(self, index):", "body": "return self[self.at(index)]", "docstring": "Return the value at the given index.", "id": "f3230:c0:m9"} {"signature": "def pop_at(self, index):", "body": "return self.pop(self.at(index))", "docstring": "Remove the key at the given index and return its value.", "id": "f3230:c0:m13"} {"signature": "def _map_timezones():", "body": "tz_map = {}todo = HAYSTACK_TIMEZONES_SET.copy()for full_tz in pytz.all_timezones:if not bool(todo): breakif full_tz in todo:tz_map[full_tz] = full_tz todo.discard(full_tz)continueif '' not in full_tz:continue(prefix, suffix) = full_tz.split('',)if '' in suffix:continueif suffix in todo:tz_map[suffix] = full_tztodo.discard(suffix)continuereturn tz_map", "docstring": "Map the official Haystack timezone list to those recognised by pytz.", "id": "f3233:m0"} {"signature": "def to_haystack(unit):", "body": "unit = str(unit)global HAYSTACK_CONVERSIONglobal PINT_CONVERSIONif unit == '' orunit == '' orunit == '' orunit == '' orunit == '' orunit == '' orunit == None:return ''for pint_value, haystack_value in PINT_CONVERSION:unit = unit.replace(pint_value, haystack_value)for haystack_value, pint_value in HAYSTACK_CONVERSION:if pint_value == '':continueunit = unit.replace(pint_value, haystack_value)return unit", "docstring": "Some parsing tweaks to fit pint units / handling of edge cases.", "id": "f3234:m0"} {"signature": "def to_pint(unit):", "body": "global HAYSTACK_CONVERSIONif unit == '' orunit == '' orunit == '' orunit == '' orunit == '' orunit == '' orunit == None:return ''for haystack_value, pint_value in HAYSTACK_CONVERSION:unit = unit.replace(haystack_value, pint_value)return unit", "docstring": "Some parsing tweaks to fit pint units / handling of edge cases.", "id": "f3234:m1"} {"signature": "def define_haystack_units():", "body": "ureg = UnitRegistry()ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')ureg.define('')return ureg", "docstring": "Missing units found in project-haystack\nAdded to the registry", "id": "f3234:m2"} {"signature": "def dump_grid(grid):", "body": "header = '' % dump_str(str(grid._version), version=grid._version)if bool(grid.metadata):header += '' + dump_meta(grid.metadata, version=grid._version)columns = dump_columns(grid.column, version=grid._version)rows = dump_rows(grid)return ''.join([header, columns] + rows + [''])", "docstring": "Dump a single grid to its ZINC representation.", "id": "f3236:m2"} {"signature": "def _unescape(s, uri=False):", "body": "out = ''while len(s) > :c = s[]if c == '':esc_c = s[]if esc_c in ('', ''):out += six.unichr(int(s[:], base=))s = s[:]continueelse:if esc_c == '':out += ''elif esc_c == '':out += ''elif esc_c == '':out += ''elif esc_c == '':out += ''elif esc_c == '':out += ''else:if uri and (esc_c == ''):out += ''out += esc_cs = s[:]continueelse:out += cs = s[:]return out", "docstring": "Iterative parser for string escapes.", "id": "f3237:m2"} {"signature": "def parse_grid(grid_data):", "body": "try:grid_parts = NEWLINE_RE.split(grid_data)if len(grid_parts) < :raise ZincParseException('',grid_data, , )grid_meta_str = grid_parts.pop()col_meta_str = grid_parts.pop()ver_match = VERSION_RE.match(grid_meta_str)if ver_match is None:raise ZincParseException('' % grid_meta_str,grid_data, , )version = Version(ver_match.group())try:grid_meta = hs_gridMeta[version].parseString(grid_meta_str, parseAll=True)[]except pp.ParseException as pe:raise ZincParseException('' % pe,grid_data, , pe.col)except: LOG.debug('', grid_meta_str)raisetry:col_meta = hs_cols[version].parseString(col_meta_str, parseAll=True)[]except pp.ParseException as pe:raise ZincParseException(''% reformat_exception(pe, ),grid_data, , pe.col)except: LOG.debug('', col_meta_str)raiserow_grammar = hs_row[version]def _parse_row(row_num_and_data):(row_num, row) = row_num_and_dataline_num = row_num + try:return dict(zip(col_meta.keys(),row_grammar.parseString(row, parseAll=True)[].asList()))except pp.ParseException as pe:raise ZincParseException(''% reformat_exception(pe, line_num),grid_data, line_num, pe.col)except: LOG.debug('', row)raiseg = Grid(version=grid_meta.pop(''),metadata=grid_meta,columns=list(col_meta.items()))g.extend(map(_parse_row, filter(lambda gp : bool(gp[]), enumerate(grid_parts))))return gexcept:LOG.debug('', grid_data)raise", "docstring": "Parse the incoming grid.", "id": "f3237:m6"} {"signature": "def parse_scalar(scalar_data, version):", "body": "try:return hs_scalar[version].parseString(scalar_data, parseAll=True)[]except pp.ParseException as pe:raise ZincParseException('' % reformat_exception(pe),scalar_data, , pe.col)except:LOG.debug('',scalar_data, version)", "docstring": "Parse a Project Haystack scalar in ZINC format.", "id": "f3237:m7"} {"signature": "def __getitem__(self, ver):", "body": "try:return self._known_grammars[ver]except KeyError:passnearest = Version.nearest(ver)g = self._known_grammars[nearest]self._known_grammars[ver] = greturn g", "docstring": "Retrieve the grammar that closest matches the version string given.", "id": "f3237:c1:m1"} {"signature": "def append(self, key, value=MARKER, replace=True):", "body": "return self.add_item(key, value, replace=replace)", "docstring": "Append the item to the metadata.", "id": "f3238:c0:m0"} {"signature": "def extend(self, items, replace=True):", "body": "if isinstance(items, dict) or isinstance(items, SortableDict):items = list(items.items())for (key, value) in items:self.append(key, value, replace=replace)", "docstring": "Append the items to the metadata.", "id": "f3238:c0:m1"} {"signature": "def with_tz(request):", "body": "dt = datetime.now() t = Template('') c = RequestContext(request)response = t.render(c)return HttpResponse(response)", "docstring": "Get the time with TZ enabled", "id": "f3241:m0"} {"signature": "def without_tz(request):", "body": "t = Template('') c = RequestContext(request)response = t.render(c)return HttpResponse(response)", "docstring": "Get the time without TZ enabled", "id": "f3241:m1"} {"signature": "def process_request(self, request):", "body": "if not request:returnif not db_loaded:load_db()tz = request.session.get('')if not tz:tz = timezone.get_default_timezone()client_ip = get_ip_address_from_request(request)ip_addrs = client_ip.split('')for ip in ip_addrs:if is_valid_ip(ip) and not is_local_ip(ip):if '' in ip:tz = db_v6.time_zone_by_addr(ip)breakelse:tz = db.time_zone_by_addr(ip)breakif tz:timezone.activate(tz)request.session[''] = str(tz)if getattr(settings, '', None) and getattr(request, '', None):detected_timezone.send(sender=get_user_model(), instance=request.user, timezone=tz)else:timezone.deactivate()", "docstring": "If we can get a valid IP from the request,\nlook up that address in the database to get the appropriate timezone\nand activate it.\n\nElse, use the default.", "id": "f3242:c0:m0"} {"signature": "def is_valid_ip(ip_address):", "body": "try:ip = ipaddress.ip_address(u'' + ip_address)return Trueexcept ValueError as e:return False", "docstring": "Check Validity of an IP address", "id": "f3244:m0"} {"signature": "def is_local_ip(ip_address):", "body": "try:ip = ipaddress.ip_address(u'' + ip_address)return ip.is_loopbackexcept ValueError as e:return None", "docstring": "Check if IP is local", "id": "f3244:m1"} {"signature": "def get_ip_address_from_request(request):", "body": "PRIVATE_IPS_PREFIX = ('', '', '', '')ip_address = ''x_forwarded_for = request.META.get('', '')if x_forwarded_for and '' not in x_forwarded_for:if not x_forwarded_for.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_forwarded_for):ip_address = x_forwarded_for.strip()else:ips = [ip.strip() for ip in x_forwarded_for.split('')]for ip in ips:if ip.startswith(PRIVATE_IPS_PREFIX):continueelif not is_valid_ip(ip):continueelse:ip_address = ipbreakif not ip_address:x_real_ip = request.META.get('', '')if x_real_ip:if not x_real_ip.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(x_real_ip):ip_address = x_real_ip.strip()if not ip_address:remote_addr = request.META.get('', '')if remote_addr:if not remote_addr.startswith(PRIVATE_IPS_PREFIX) and is_valid_ip(remote_addr):ip_address = remote_addr.strip()if not ip_address:ip_address = ''return ip_address", "docstring": "Makes the best attempt to get the client's real IP or return the loopback", "id": "f3244:m2"} {"signature": "def __init__(self, install=False, passthru=None):", "body": "self.entries = {}self._installed = Falseself.impostors = dict()self.originals = dict()self.vaporized = Noneself.fds = dict()self.passthru = passthru or []if self.passthru:if not morph.isseq(self.passthru):self.passthru = [self.passthru]self.passthru = [re.compile(expr) if morph.isstr(expr) else exprfor expr in self.passthru]self._makeImpostors()if install:self.install()", "docstring": ":Parameters:\n\ninstall : bool, optional, default: false\n\n Flag indicating whether or not this overlay should be\n installed upon instantiation.\n\npassthru : list({str, regex}), optional, default: none\n\n A regular expression (or list thereof) that will be matched\n against any file that is operated on; if it matches, no overlay\n will be applied, i.e. this list excludes a set of files. The\n specified regexes can be either strings or re.RegexObject\n instances. Note that these regexes will be given only the\n fully-dereferenced paths to be tested.", "id": "f3252:c4:m0"} {"signature": "def _stat(self, path):", "body": "if path not in self.entries:return OverlayStat(*self.originals[''](path)[:], st_overlay=)st = self.entries[path].statif stat.S_ISLNK(st.st_mode):return self._stat(self.deref(path))return st", "docstring": "IMPORTANT: expects `path`'s parent to already be deref()'erenced.", "id": "f3252:c4:m14"} {"signature": "def _lstat(self, path):", "body": "if path not in self.entries:return OverlayStat(*self.originals[''](path)[:], st_overlay=)return self.entries[path].stat", "docstring": "IMPORTANT: expects `path`'s parent to already be deref()'erenced.", "id": "f3252:c4:m15"} {"signature": "def fso_lstat(self, path):", "body": "return self.fso_anystat(path, link=True)", "docstring": "overlays os.lstat()", "id": "f3252:c4:m17"} {"signature": "def fso_stat(self, path):", "body": "return self.fso_anystat(path, link=False)", "docstring": "overlays os.stat()", "id": "f3252:c4:m18"} {"signature": "def _exists(self, path):", "body": "try:return bool(self._stat(path))except os.error:return False", "docstring": "IMPORTANT: expects `path` to already be deref()'erenced.", "id": "f3252:c4:m19"} {"signature": "def _lexists(self, path):", "body": "try:return bool(self._lstat(path))except os.error:return False", "docstring": "IMPORTANT: expects `path` to already be deref()'erenced.", "id": "f3252:c4:m20"} {"signature": "def fso_exists(self, path):", "body": "try:return self._exists(self.deref(path))except os.error:return False", "docstring": "overlays os.path.exists()", "id": "f3252:c4:m21"} {"signature": "def fso_lexists(self, path):", "body": "try:return self._lexists(self.deref(path, to_parent=True))except os.error:return False", "docstring": "overlays os.path.lexists()", "id": "f3252:c4:m22"} {"signature": "def fso_listdir(self, path):", "body": "path = self.deref(path)if not stat.S_ISDIR(self._stat(path).st_mode):raise OSError(, '', path)try:ret = self.originals[''](path)except Exception:ret = []for entry in list(self.entries.values()):if not entry.path.startswith(path + ''):continuesubpath = entry.path[len(path) + :]if '' in subpath:continueif entry.mode is None:if subpath in ret:ret.remove(subpath)else:if subpath not in ret:ret.append(subpath)return ret", "docstring": "overlays os.listdir()", "id": "f3252:c4:m24"} {"signature": "def fso_mkdir(self, path, mode=None):", "body": "path = self.deref(path, to_parent=True)if self._lexists(path):raise OSError(, '', path)self._addentry(OverlayEntry(self, path, stat.S_IFDIR))", "docstring": "overlays os.mkdir()", "id": "f3252:c4:m25"} {"signature": "def fso_makedirs(self, path, mode=None):", "body": "path = self.abs(path)cur = ''segments = path.split('')for idx, seg in enumerate(segments):cur = os.path.join(cur, seg)try:st = self.fso_stat(cur)except OSError:st = Noneif st is None:self.fso_mkdir(cur)continueif idx + == len(segments):raise OSError(, '', path)if not stat.S_ISDIR(st.st_mode):raise OSError(, '', path)", "docstring": "overlays os.makedirs()", "id": "f3252:c4:m26"} {"signature": "def fso_rmdir(self, path):", "body": "st = self.fso_lstat(path)if not stat.S_ISDIR(st.st_mode):raise OSError(, '', path)if len(self.fso_listdir(path)) > :raise OSError(, '', path)self._addentry(OverlayEntry(self, path, None))", "docstring": "overlays os.rmdir()", "id": "f3252:c4:m27"} {"signature": "def fso_readlink(self, path):", "body": "path = self.deref(path, to_parent=True)st = self.fso_lstat(path)if not stat.S_ISLNK(st.st_mode):raise OSError(, '', path)if st.st_overlay:return self.entries[path].contentreturn self.originals[''](path)", "docstring": "overlays os.readlink()", "id": "f3252:c4:m28"} {"signature": "def fso_symlink(self, source, link_name):", "body": "path = self.deref(link_name, to_parent=True)if self._exists(path):raise OSError(, '')self._addentry(OverlayEntry(self, path, stat.S_IFLNK, source))", "docstring": "overlays os.symlink()", "id": "f3252:c4:m29"} {"signature": "def fso_unlink(self, path):", "body": "path = self.deref(path, to_parent=True)if not self._lexists(path):raise OSError(, '', path)self._addentry(OverlayEntry(self, path, None))", "docstring": "overlays os.unlink()", "id": "f3252:c4:m30"} {"signature": "def fso_remove(self, path):", "body": "return self.fso_unlink(path)", "docstring": "overlays os.remove()", "id": "f3252:c4:m31"} {"signature": "def fso_islink(self, path):", "body": "try:return stat.S_ISLNK(self.fso_lstat(path).st_mode)except OSError:return False", "docstring": "overlays os.path.islink()", "id": "f3252:c4:m32"} {"signature": "def fso_rmtree(self, path, ignore_errors=False, onerror=None):", "body": "if ignore_errors:def onerror(*args):passelif onerror is None:def onerror(*args):raisetry:if self.fso_islink(path):raise OSError('')except OSError:onerror(os.path.islink, path, sys.exc_info())returnnames = []try:names = self.fso_listdir(path)except os.error as err:onerror(os.listdir, path, sys.exc_info())for name in names:fullname = os.path.join(path, name)try:mode = self.fso_lstat(fullname).st_modeexcept os.error:mode = if stat.S_ISDIR(mode):self.fso_rmtree(fullname, ignore_errors, onerror)else:try:self.fso_remove(fullname)except OSError as err:onerror(os.remove, fullname, sys.exc_info())try:self.fso_rmdir(path)except os.error:onerror(os.rmdir, path, sys.exc_info())", "docstring": "overlays shutil.rmtree()", "id": "f3252:c4:m33"} {"signature": "def no_ansi(text):", "body": "return re.sub(r\"\", \"\", text)", "docstring": "Kill any ANSI escape sequences.", "id": "f3261:m0"} {"signature": "def assert_sets_equal(s1, s2):", "body": "assert list(sorted(s1)) == list(sorted(s2))", "docstring": "Helper to compare sets.", "id": "f3270:m1"} {"signature": "def get_project_root():", "body": "try:tasks_py = sys.modules['']except KeyError:return Noneelse:return os.path.abspath(os.path.dirname(tasks_py.__file__))", "docstring": "Determine location of `tasks.py`.", "id": "f3273:m0"} {"signature": "def load():", "body": "cfg = Bunch(DEFAULTS)cfg.project_root = get_project_root()if not cfg.project_root:raise RuntimeError(\"\")cfg.rootjoin = lambda *names: os.path.join(cfg.project_root, *names)cfg.srcjoin = lambda *names: cfg.rootjoin(cfg.srcdir, *names)cfg.testjoin = lambda *names: cfg.rootjoin(cfg.testdir, *names)cfg.cwd = os.getcwd()os.chdir(cfg.project_root)if cfg.project_root not in sys.path:sys.path.append(cfg.project_root)try:from setup import project except ImportError:from setup import setup_args as project cfg.project = Bunch(project)return cfg", "docstring": "Load and return configuration as a ``Bunch``.\n\n Values are based on ``DEFAULTS``, and metadata from ``setup.py``.", "id": "f3273:m1"} {"signature": "def set_maven_layout():", "body": "DEFAULTS.update(srcdir = '',testdir = '',)", "docstring": "Switch default project layout to Maven-like.", "id": "f3273:m2"} {"signature": "def set_flat_layout():", "body": "DEFAULTS.update(srcdir = '',testdir = '',)", "docstring": "Switch default project layout to everything top-level.", "id": "f3273:m3"} {"signature": "def fail(message, exitcode=):", "body": "sys.stderr.write(''.format(message))sys.stderr.flush()sys.exit(exitcode)", "docstring": "Exit with error code and message.", "id": "f3274:m0"} {"signature": "def get_pypi_auth(configfile=''):", "body": "pypi_cfg = ConfigParser()if pypi_cfg.read(os.path.expanduser(configfile)):try:user = pypi_cfg.get('', '')pwd = pypi_cfg.get('', '')return user, pwdexcept ConfigError:notify.warning(\"\"\"\".format(configfile))return None", "docstring": "Read auth from pip config.", "id": "f3275:m0"} {"signature": "def watchdogctl(ctx, kill=False, verbose=True):", "body": "tries = if kill else cmd = ''.format(ctx.rituals.docs.watchdog.port)pidno = pidinfo = capture(cmd, ignore_failures=True)while pidinfo:pidline = next(filter(None, [re.match(r'', x) for x in pidinfo.splitlines()]))if not pidline:raise ValueError(\"\".format(pidinfo))pidno = int(pidline.group(), )if verbose:ctx.run(\"\".format(pidno), echo=False)verbose = Falsetries -= if tries <= :breakelse:try:os.kill(pidno, )except OSError as exc: if exc.errno == :breakraiseelse:notify.info(\"\".format(pidno))ctx.run(\"\".format(pidno), echo=False)time.sleep()pid = capture(cmd, ignore_failures=True)return pidno", "docstring": "Control / check a running Sphinx autobuild process.", "id": "f3275:m1"} {"signature": "@task(default=True, help={'': \"\",'': \"\",'': \"\",'': \"\",'': \"\",'': \"\",})def sphinx(ctx, browse=False, clean=False, watchdog=False, kill=False, status=False, opts=''):", "body": "cfg = config.load()if kill or status:if not watchdogctl(ctx, kill=kill):notify.info(\"\".format(ctx.rituals.docs.watchdog.port))returnif clean:ctx.run(\"\")for basename in ('', ''):markdown = cfg.rootjoin(basename + '')if os.path.exists(markdown):try:import pypandocexcept ImportError as exc:notify.warning(\"\".format(exc))breakelse:pypandoc.convert(markdown, '', outputfile=os.path.join(ctx.rituals.docs.sources, basename + ''))if os.path.exists(''):with io.open('', '') as inp:license_text = inp.read()try:_, copyright_text = cfg.project[''].split('', )except (KeyError, ValueError):copyright_text = cfg.project.get('', '')with io.open(os.path.join(ctx.rituals.docs.sources, ''), '') as out:out.write(''''''''''''''''''''.format(copyright_text))license_text = textwrap.dedent(license_text)license_text = ''.join(license_text.splitlines())out.write(''.format(license_text))if cfg.project.get('') and str(ctx.rituals.docs.apidoc).lower()[:] in '':cmd = ['', '', '', '', '']for package in cfg.project.packages:if '' not in package:cmd.append(cfg.srcjoin(package))with pushd(ctx.rituals.docs.sources):ctx.run(''.join(cmd))cmd = ['', '', '']if opts:cmd.append(opts)cmd.extend(['', ctx.rituals.docs.build])index_url = index_file = os.path.join(ctx.rituals.docs.sources, ctx.rituals.docs.build, '')if watchdog:watchdogctl(ctx, kill=True)cmd[:] = ['', '']cmd.extend(['', ctx.rituals.docs.watchdog.host,'', ''.format(ctx.rituals.docs.watchdog.port),\"\".format(''),\"\".format(''),\"\".format(''),\"\", \"\", \"\",])index_url = \"\".format(ctx.rituals.docs.watchdog.host, ctx.rituals.docs.watchdog.port)notify.info(\"\".format('' if watchdog else ''))with pushd(ctx.rituals.docs.sources):ctx.run(''.join(cmd), pty=not watchdog)if watchdog:def activity(what=None, i=None):\"\"if i is None:sys.stdout.write(what + '')else:sys.stdout.write(''.format(r''[i % ], what or ''))sys.stdout.flush()for i in range():activity('', i)if watchdogctl(ctx):activity('')breaktime.sleep()else:activity('')if os.path.exists(os.path.join(ctx.rituals.docs.sources, '')):os.utime(os.path.join(ctx.rituals.docs.sources, ''), None)for i in range():activity('', i)if os.path.exists(index_file):activity('')breaktime.sleep()else:activity('')if browse:time.sleep()webbrowser.open_new_tab(index_url)", "docstring": "Build Sphinx docs.", "id": "f3275:m2"} {"signature": "@task(help={'': \"\",'': \"\",'': \"\",})def confluence(ctx, no_publish=False, clean=False, opts=''):", "body": "cfg = config.load()if clean:ctx.run(\"\")cmd = ['', '', '']cmd.extend(['', '']) if opts:cmd.append(opts)cmd.extend(['', ctx.rituals.docs.build + ''])if no_publish:cmd.extend([''])notify.info(\"\")with pushd(ctx.rituals.docs.sources):ctx.run(''.join(cmd), pty=True)", "docstring": "Build Sphinx docs and publish to Confluence.", "id": "f3275:m3"} {"signature": "@contextmanagerdef _zipped(self, docs_base):", "body": "with pushd(docs_base):with tempfile.NamedTemporaryFile(prefix='', delete=False) as ziphandle:passzip_name = shutil.make_archive(ziphandle.name, '')notify.info(\"\".format(os.path.getsize(zip_name) / , zip_name, self.target))with io.open(zip_name, '') as zipread:try:yield zipreadfinally:os.remove(ziphandle.name)os.remove(ziphandle.name + '')", "docstring": "Provide a zipped stream of the docs tree.", "id": "f3275:c0:m1"} {"signature": "def _to_pypi(self, docs_base, release):", "body": "url = Nonewith self._zipped(docs_base) as handle:reply = requests.post(self.params[''], auth=get_pypi_auth(), allow_redirects=False,files=dict(content=(self.cfg.project.name + '', handle, '')),data={'': '', '': self.cfg.project.name})if reply.status_code in range(, ):notify.info(\"\".format(**vars(reply)))elif reply.status_code == :url = reply.headers['']else:data = self.cfg.copy()data.update(self.params)data.update(vars(reply))notify.error(\"\".format(**data))return url", "docstring": "Upload to PyPI.", "id": "f3275:c0:m2"} {"signature": "def _to_webdav(self, docs_base, release):", "body": "try:git_path = subprocess.check_output('', shell=True)except subprocess.CalledProcessError:git_path = ''else:git_path = git_path.decode('').strip()git_path = git_path.replace('', '').replace('', '').replace('', '')git_path = re.search(r'', git_path)git_path = git_path.group().replace('', '') if git_path else ''url = Nonewith self._zipped(docs_base) as handle:url_ns = dict(name=self.cfg.project.name, version=release, git_path=git_path)reply = requests.put(self.params[''].format(**url_ns),data=handle.read(), headers={'': ''})if reply.status_code in range(, ):notify.info(\"\".format(**vars(reply)))try:data = reply.json()except ValueError as exc:notify.warning(\"\".format(exc))else:if '' in data: url = data[''] + ''elif reply.status_code == :url = reply.headers['']else:data = self.cfg.copy()data.update(self.params)data.update(vars(reply))notify.error(\"\".format(**data))if not url:notify.warning(\"\")return url", "docstring": "Upload to WebDAV store.", "id": "f3275:c0:m3"} {"signature": "def upload(self, docs_base, release):", "body": "return getattr(self, '' + self.target)(docs_base, release)", "docstring": "Upload docs in ``docs_base`` to the target of this uploader.", "id": "f3275:c0:m4"} {"signature": "@task(name='')def sync_readme(_dummy_ctx):", "body": "_ = config.load()notify.banner(\"\")notify.failure(\"\")", "docstring": "Update GH pages from project's README.", "id": "f3276:m0"} {"signature": "@task(default=True)def help(_dummy_ctx): ", "body": "shell.run(\"\")shell.run(\"\")notify.info(\"\")", "docstring": "Invoked with no arguments.", "id": "f3277:m0"} {"signature": "@task(help=dict(docs=\"\",backups=\"\",bytecode=\"\",dist=\"\",all=\"\",venv=\"\",tox=\"\",extra=\"\",))def clean(_dummy_ctx, docs=False, backups=False, bytecode=False, dist=False, all=False, venv=False, tox=False, extra=''): ", "body": "cfg = config.load()notify.banner(\"\")venv_dirs = ['', '', '', '', '', '']patterns = ['', '']excludes = ['', '', '', '']if docs or all:patterns.extend(['', ''])if dist or all:patterns.append('')if backups or all:patterns.extend([''])if bytecode or all:patterns.extend(['', '', '',cfg.srcjoin('')[len(cfg.project_root)+:],])if venv:patterns.extend([i + '' for i in venv_dirs])if tox:patterns.append('')else:excludes.append('')if extra:patterns.extend(shlex.split(extra))patterns = [antglob.includes(i) for i in patterns] + [antglob.excludes(i) for i in excludes]if not venv:patterns.extend([antglob.excludes(i + '') for i in venv_dirs])fileset = antglob.FileSet(cfg.project_root, patterns)for name in fileset:notify.info(''.format(name))if name.endswith(''):shutil.rmtree(os.path.join(cfg.project_root, name))else:os.unlink(os.path.join(cfg.project_root, name))", "docstring": "Perform house-keeping.", "id": "f3277:m1"} {"signature": "@task(help=dict(docs=\"\",))def build(ctx, docs=False):", "body": "cfg = config.load()ctx.run(\"\")if docs:for doc_path in ('', ''):if os.path.exists(cfg.rootjoin(doc_path, '')):breakelse:doc_path = Noneif doc_path:ctx.run(\"\")else:notify.warning(\"\")", "docstring": "Build the project.", "id": "f3277:m2"} {"signature": "@task(help=dict(local=\"\",))def freeze(ctx, local=False):", "body": "cmd = ''.format('' if local else '')frozen = ctx.run(cmd, hide='').stdout.replace('', '')with io.open('', '', encoding='') as out:out.write(\"\".format(isodate()))out.write(frozen)notify.info(\"\".format(len(frozen.splitlines()),))", "docstring": "Freeze currently installed requirements.", "id": "f3277:m3"} {"signature": "@task(help={'': \"\",'': \"\",'': \"\",'': \"\",})def tox(ctx, verbose=False, clean=False, env_list='', opts=''):", "body": "cfg = config.load()add_dir2pypath(cfg.project_root)snakepits = ctx.rituals.snakepits.split(os.pathsep)cmd = []snakepits = [i for i in snakepits if os.path.isdir(i)]if snakepits:cmd += [''.format(os.pathsep.join(snakepits),)]if clean and os.path.exists(cfg.rootjoin('')):shutil.rmtree(cfg.rootjoin(''))cmd += ['']if verbose:cmd += ['']if env_list:cmd += ['', env_list]cmd += optsctx.run(''.join(cmd))", "docstring": "Perform multi-environment tests.", "id": "f3278:m1"} {"signature": "def get_devpi_url(ctx):", "body": "cmd = ''lines = ctx.run(cmd, hide='', echo=False).stdout.splitlines()for line in lines:try:line, base_url = line.split('', )except ValueError:notify.warning(''.format(line))else:if line.split()[-].strip() == '':return base_url.split('')[].strip().rstrip('')raise LookupError(\"\".format(cmd, ''.join(lines),))", "docstring": "Get currently used 'devpi' base URL.", "id": "f3281:m0"} {"signature": "@task(help=dict(markdown=\"\",))def description(_dummy_ctx, markdown=False):", "body": "cfg = config.load()markup = '' if markdown else ''description_file = cfg.rootjoin(\"\".format(markup))notify.banner(\"\".format(description_file))long_description = cfg.project.long_descriptionlong_description = long_description.replace('', '')long_description = re.sub(r'', r'', long_description)text = DESCRIPTION_TEMPLATES[markup].format(keywords=''.join(cfg.project.keywords),classifiers=''.join(cfg.project.classifiers),classifiers_indented='' + ''.join(cfg.project.classifiers),packages=''.join(cfg.project.packages),long_description_html=''.format(long_description),**cfg)with io.open(description_file, '', encoding='') as handle:handle.write(text)", "docstring": "Dump project metadata for Jenkins Description Setter Plugin.", "id": "f3282:m0"} {"signature": "def get_egg_info(cfg, verbose=False):", "body": "result = Bunch()setup_py = cfg.rootjoin('')if not os.path.exists(setup_py):return resultegg_info = shell.capture(\"\".format(setup_py), echo=True if verbose else None)for info_line in egg_info.splitlines():if info_line.endswith(''):pkg_info_file = info_line.split(None, )[]result[''] = pkg_info_filewith io.open(pkg_info_file, encoding='') as handle:lastkey = Nonefor line in handle:if line.lstrip() != line:assert lastkey, \"\".format(pkg_info_file, line)result[lastkey] += '' + lineelse:lastkey, value = line.split('', )lastkey = lastkey.strip().lower().replace('', '')value = value.strip()if lastkey in result:try:result[lastkey].append(value)except AttributeError:result[lastkey] = [result[lastkey], value]else:result[lastkey] = valuefor multikey in PKG_INFO_MULTIKEYS:if not isinstance(result.get(multikey, []), list):result[multikey] = [result[multikey]]return result", "docstring": "Call 'setup egg_info' and return the parsed meta-data.", "id": "f3283:m0"} {"signature": "@task(help=dict(verbose=\"\",pypi=\"\",))def bump(ctx, verbose=False, pypi=False):", "body": "cfg = config.load()scm = scm_provider(cfg.project_root, commit=False, ctx=ctx)if not scm.workdir_is_clean():notify.warning(\"\")pep440 = scm.pep440_dev_version(verbose=verbose, non_local=pypi)setup_cfg = cfg.rootjoin('')if not pep440:notify.info(\"\")elif os.path.exists(setup_cfg):with io.open(setup_cfg, encoding='') as handle:data = handle.readlines()changed = Falsefor i, line in enumerate(data):if re.match(r\"\", line):verb, _ = data[i].split('', )data[i] = ''.format(verb, pep440)changed = Trueif changed:notify.info(\"\")with io.open(setup_cfg, '', encoding='') as handle:handle.write(''.join(data))else:notify.warning(\"\")else:notify.warning(\"\")if os.path.exists(setup_cfg):egg_info = shell.capture(\"\", echo=True if verbose else None)for line in egg_info.splitlines():if line.endswith(''):pkg_info_file = line.split(None, )[]with io.open(pkg_info_file, encoding='') as handle:notify.info(''.join(i for i in handle.readlines() if i.startswith('')).strip())ctx.run(\"\", echo=True if verbose else None)", "docstring": "Bump a development version.", "id": "f3283:m1"} {"signature": "@task(help=dict(devpi=\"\",egg=\"\",wheel=\"\",auto=\"\",))def dist(ctx, devpi=False, egg=False, wheel=False, auto=True):", "body": "config.load()cmd = [\"\", \"\", \"\"]if auto:egg = sys.version_info.major == try:import wheel as _wheel = Trueexcept ImportError:wheel = Falseif egg:cmd.append(\"\")if wheel:cmd.append(\"\")ctx.run(\"\")ctx.run(''.join(cmd))if devpi:ctx.run(\"\")", "docstring": "Distribute the project.", "id": "f3283:m2"} {"signature": "@task(help=dict(pyrun=\"\",upload=\"\",opts=\"\",))def pex(ctx, pyrun='', upload=False, opts=''):", "body": "cfg = config.load()ctx.run(\"\")pkg_info = get_egg_info(cfg)version = pkg_info.version if pkg_info else cfg.project.versionpex_files = []for script in cfg.project.entry_points['']:script, entry_point = script.split('', )script, entry_point = script.strip(), entry_point.strip()pex_file = cfg.rootjoin('', ''.format(script, version))cmd = ['', '', cfg.rootjoin(''), cfg.project_root, '', script, '', pex_file]if opts:cmd.append(opts)ctx.run(''.join(cmd))non_universal = set()with closing(zipfile.ZipFile(pex_file, mode=\"\")) as pex_contents:for pex_name in pex_contents.namelist(): if pex_name.endswith('') and '' not in pex_name:non_universal.add(pex_name.split('')[].split('')[-])if non_universal:notify.warning(\"\".format(pex_file.replace(os.getcwd(), ''), ''.join(sorted(non_universal))))envs = [i.split('')[-:] for i in non_universal]envs = {i[]: i[:] for i in envs}if len(envs) > :envs = {k: v for k, v in envs.items() if not k.startswith('')}env_id = []for k, v in sorted(envs.items()):env_id.append(k)env_id.extend(v)env_id = ''.join(env_id)else:env_id = ''new_pex_file = pex_file.replace('', ''.format(env_id))notify.info(\"\".format(os.path.basename(new_pex_file)))os.rename(pex_file, new_pex_file)pex_file = new_pex_filepex_files.append(pex_file)if not pex_files:notify.warning(\"\")else:if pyrun:if any(pyrun.startswith(i) for i in ('', '', '')):pyrun_url = pyrunelse:pyrun_cfg = dict(ctx.rituals.pyrun)pyrun_cfg.update(parse_qsl(pyrun.replace(os.pathsep, '')))pyrun_url = (pyrun_cfg[''] + '' +pyrun_cfg['']).format(**pyrun_cfg)notify.info(\"\".format(pyrun_url))with url_as_file(pyrun_url, ext='') as pyrun_tarball:pyrun_tar = tarfile.TarFile.gzopen(pyrun_tarball)for pex_file in pex_files[:]:pyrun_exe = pyrun_tar.extractfile('')with open(pex_file, '') as pex_handle:pyrun_pex_file = ''.format(pex_file[:-], pyrun_url.rsplit('')[-][:-])with open(pyrun_pex_file, '') as pyrun_pex:pyrun_pex.write(INSTALLER_BASH.replace('', ''.format(len(INSTALLER_BASH) + )))shutil.copyfileobj(pyrun_exe, pyrun_pex)shutil.copyfileobj(pex_handle, pyrun_pex)shutil.copystat(pex_file, pyrun_pex_file)notify.info(\"\".format(pretty_path(pyrun_pex_file)))pex_files.append(pyrun_pex_file)if upload:base_url = ctx.rituals.release.upload.base_url.rstrip('')if not base_url:notify.failure(\"\")for pex_file in pex_files:url = base_url + '' + ctx.rituals.release.upload.path.lstrip('').format(name=cfg.project.name, version=cfg.project.version, filename=os.path.basename(pex_file))notify.info(\"\".format(url))with io.open(pex_file, '') as handle:reply = requests.put(url, data=handle.read())if reply.status_code in range(, ):notify.info(\"\".format(**vars(reply)))else:notify.warning(\"\".format(**vars(reply)))", "docstring": "Package the project with PEX.", "id": "f3283:m3"} {"signature": "@task(default=True, help=dict(dput=\"\",opts=\"\",))def build(ctx, dput='', opts=''):", "body": "with io.open('', encoding='') as changes:metadata = re.match(r'', changes.readline().rstrip())if not metadata:notify.failure('')name, version, _, _ = metadata.groups()ctx.run(''.format(ctx.rituals.deb.build.opts, opts))if not os.path.exists(''):os.makedirs('')artifact_pattern = ''.format(name, re.sub(r'', '', version))changes_files = []for debfile in glob.glob('' + artifact_pattern):shutil.move(debfile, '')if debfile.endswith(''):changes_files.append(os.path.join('', os.path.basename(debfile)))ctx.run(''.format(artifact_pattern))if dput:ctx.run(''.format(dput, ''.join(changes_files)))", "docstring": "Build a DEB package.", "id": "f3284:m0"} {"signature": "def _get_registered_executable(exe_name):", "body": "registered = Noneif sys.platform.startswith(''):if os.path.splitext(exe_name)[].lower() != '':exe_name += ''import _winreg try:key = \"\" + exe_namevalue = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)registered = (value, \"\"+key)except _winreg.error:passif registered and not os.path.exists(registered[]):registered = Nonereturn registered", "docstring": "Windows allow application paths to be registered in the registry.", "id": "f3285:m0"} {"signature": "def _samefile(fname1, fname2):", "body": "if sys.platform.startswith(''):return os.path.normpath(os.path.normcase(fname1)) == os.path.normpath(os.path.normcase(fname2))else:return os.path.samefile(fname1, fname2)", "docstring": "OS independent `samefile` implementation.", "id": "f3285:m1"} {"signature": "def _cull(potential, matches, verbose=):", "body": "for match in matches: if _samefile(potential[], match[]):if verbose:sys.stderr.write(\"\" % potential)return Noneif not stat.S_ISREG(os.stat(potential[]).st_mode):if verbose:sys.stderr.write(\"\" % potential)elif not os.access(potential[], os.X_OK):if verbose:sys.stderr.write(\"\" % potential)else:matches.append(potential)return potentialreturn None", "docstring": "Cull inappropriate matches. Possible reasons:\n - a duplicate of a previous match\n - not a disk file\n - not executable (non-Windows)\n If 'potential' is approved it is returned and added to 'matches'.\n Otherwise, None is returned.", "id": "f3285:m2"} {"signature": "def whichgen(command, path=None, verbose=, exts=None): ", "body": "matches = []if path is None:using_given_path = path = os.environ.get(\"\", \"\").split(os.pathsep)if sys.platform.startswith(\"\"):path.insert(, os.curdir) else:using_given_path = if sys.platform.startswith(\"\"):if exts is None:exts = os.environ.get(\"\", \"\").split(os.pathsep)for ext in exts:if ext.lower() == \"\":breakelse:exts = ['', '', '']elif not isinstance(exts, list):raise TypeError(\"\")else:if exts is not None:raise WhichError(\"\" % sys.platform)exts = []if os.sep in command or os.altsep and os.altsep in command:passelse:for i, dir_name in enumerate(path):if sys.platform.startswith(\"\") and len(dir_name) >= and dir_name[] == '' and dir_name[-] == '':dir_name = dir_name[:-]for ext in ['']+exts:abs_name = os.path.abspath(os.path.normpath(os.path.join(dir_name, command+ext)))if os.path.isfile(abs_name):if using_given_path:from_where = \"\" % ielif not sys.platform.startswith(\"\"):from_where = \"\" % ielif i == :from_where = \"\"else:from_where = \"\" % (i-)match = _cull((abs_name, from_where), matches, verbose)if match:if verbose:yield matchelse:yield match[]match = _get_registered_executable(command)if match is not None:match = _cull(match, matches, verbose)if match:if verbose:yield matchelse:yield match[]", "docstring": "Return a generator of full paths to the given command.\n\n \"command\" is a the name of the executable to search for.\n \"path\" is an optional alternate path list to search. The default it\n to use the PATH environment variable.\n \"verbose\", if true, will cause a 2-tuple to be returned for each\n match. The second element is a textual description of where the\n match was found.\n \"exts\" optionally allows one to specify a list of extensions to use\n instead of the standard list for this system. This can\n effectively be used as an optimization to, for example, avoid\n stat's of \"foo.vbs\" when searching for \"foo\" and you know it is\n not a VisualBasic script but \".vbs\" is on PATHEXT. This option\n is only supported on Windows.\n\n This method returns a generator which yields either full paths to\n the given command or, if verbose, tuples of the form (, ).", "id": "f3285:m3"} {"signature": "def which(command, path=None, verbose=, exts=None):", "body": "matched = whichgen(command, path, verbose, exts)try:match = next(matched)except StopIteration:raise WhichError(\"\" % command)else:return match", "docstring": "Return the full path to the first match of the given command on\n the path.\n\n \"command\" is a the name of the executable to search for.\n \"path\" is an optional alternate path list to search. The default it\n to use the PATH environment variable.\n \"verbose\", if true, will cause a 2-tuple to be returned. The second\n element is a textual description of where the match was found.\n \"exts\" optionally allows one to specify a list of extensions to use\n instead of the standard list for this system. This can\n effectively be used as an optimization to, for example, avoid\n stat's of \"foo.vbs\" when searching for \"foo\" and you know it is\n not a VisualBasic script but \".vbs\" is on PATHEXT. This option\n is only supported on Windows.\n\n If no match is found for the command, a WhichError is raised.", "id": "f3285:m4"} {"signature": "def whichall(command, path=None, verbose=, exts=None):", "body": "return list(whichgen(command, path, verbose, exts))", "docstring": "Return a list of full paths to all matches of the given command on the path.\n\n \"command\" is a the name of the executable to search for.\n \"path\" is an optional alternate path list to search. The default it\n to use the PATH environment variable.\n \"verbose\", if true, will cause a 2-tuple to be returned for each\n match. The second element is a textual description of where the\n match was found.\n \"exts\" optionally allows one to specify a list of extensions to use\n instead of the standard list for this system. This can\n effectively be used as an optimization to, for example, avoid\n stat's of \"foo.vbs\" when searching for \"foo\" and you know it is\n not a VisualBasic script but \".vbs\" is on PATHEXT. This option\n is only supported on Windows.", "id": "f3285:m5"} {"signature": "def workdir_is_clean(self, quiet=False):", "body": "self.run('', **RUN_KWARGS)unchanged = Truetry:self.run('', report_error=False, **RUN_KWARGS)except exceptions.Failure:unchanged = Falseif not quiet:notify.warning('')self.run('', **RUN_KWARGS)try:self.run('', report_error=False, **RUN_KWARGS)except exceptions.Failure:unchanged = Falseif not quiet:notify.warning('')self.run('', **RUN_KWARGS)return unchanged", "docstring": "Check for uncommitted changes, return `True` if everything is clean.\n\n Inspired by http://stackoverflow.com/questions/3878624/.", "id": "f3286:c0:m0"} {"signature": "def add_file(self, filename):", "body": "self.run(''.format(filename), **RUN_KWARGS)", "docstring": "Stage a file for committing.", "id": "f3286:c0:m1"} {"signature": "def commit(self, message):", "body": "self.run_elective(''.format(message))", "docstring": "Commit pending changes.", "id": "f3286:c0:m2"} {"signature": "def tag(self, label, message=None):", "body": "options = ''.format(message) if message else ''self.run_elective(''.format(options, label))", "docstring": "Tag the current workdir state.", "id": "f3286:c0:m3"} {"signature": "def pep440_dev_version(self, verbose=False, non_local=False):", "body": "version = capture(\"\", echo=verbose)if verbose:notify.info(\"\".format(version))now = ''.format(datetime.now())tag = capture(\"\".format(now), echo=verbose)if verbose:notify.info(\"\".format(tag))try:tag, date, time = tag.split('')except ValueError:date = time = ''tag, commits, short_hash = tag.rsplit('', )label = tagif re.match(r\"\", label):label = label[:]if commits == '' and label == version:pep440 = Noneelse:local_part = [re.sub(r\"\", '', label).strip(''), short_hash,date + ('' + time if time else ''),]build_number = os.environ.get('', '')if build_number.isdigit():local_part.extend(['', build_number])if verbose:notify.info(\"\".format(build_number))local_part = [i for i in local_part if i]pep440 = ''.format(commits, ''.join(local_part).strip(''))if non_local:pep440, _ = pep440.split('', )return pep440", "docstring": "Return a PEP-440 dev version appendix to the main version number.\n\n Result is ``None`` if the workdir is in a release-ready state\n (i.e. clean and properly tagged).", "id": "f3286:c0:m4"} {"signature": "def workdir_is_clean(self, quiet=False):", "body": "if not quiet:notify.warning('''')return True", "docstring": "Check for uncommitted changes, return `True` if everything is clean.", "id": "f3287:c0:m0"} {"signature": "def add_file(self, filename):", "body": "notify.warning(''.format(filename))", "docstring": "Stage a file for committing, or commit it directly (depending on the SCM).", "id": "f3287:c0:m1"} {"signature": "def commit(self, message):", "body": "notify.warning(''.format(message))", "docstring": "Commit pending changes.", "id": "f3287:c0:m2"} {"signature": "def tag(self, label, message=None):", "body": "notify.warning(''.format(label, ''.format(message) if message else '',))", "docstring": "Tag the current workdir state.", "id": "f3287:c0:m3"} {"signature": "def pep440_dev_version(self, verbose=False, non_local=False):", "body": "pep440 = ''.format(datetime.now())if not non_local:build_number = os.environ.get('', '')if build_number.isdigit():pep440 += ''.format(build_number)if verbose:notify.info(\"\".format(build_number))return pep440", "docstring": "Return a PEP-440 dev version appendix to the main version number.", "id": "f3287:c0:m4"} {"signature": "def run(self, cmd, *args, **kwargs):", "body": "runner = self.ctx.run if self.ctx else Nonereturn run(cmd, runner=runner, *args, **kwargs)", "docstring": "Run a command.", "id": "f3288:c0:m1"} {"signature": "def run_elective(self, cmd, *args, **kwargs):", "body": "if self._commit:return self.run(cmd, *args, **kwargs)else:notify.warning(\"\".format(cmd))kwargs = kwargs.copy()kwargs[''] = Falsereturn self.run('', *args, **kwargs)", "docstring": "Run a command, or just echo it, depending on `commit`.", "id": "f3288:c0:m2"} {"signature": "def auto_detect(workdir):", "body": "if os.path.isdir(os.path.join(workdir, '')) and os.path.isfile(os.path.join(workdir, '', '')):return ''return ''", "docstring": "Return string signifying the SCM used in the given directory.\n\n Currently, 'git' is supported. Anything else returns 'unknown'.", "id": "f3289:m0"} {"signature": "def provider(workdir, commit=True, **kwargs):", "body": "return SCM_PROVIDER[auto_detect(workdir)](workdir, commit=commit, **kwargs)", "docstring": "Factory for the correct SCM provider in `workdir`.", "id": "f3289:m1"} {"signature": "def glob2re(part):", "body": "return \"\".join(re.escape(bit).replace(r'', '').replace(r'', '').replace(r'', '')for bit in part.split(\"\"))", "docstring": "Convert a path part to regex syntax.", "id": "f3290:m0"} {"signature": "def parse_glob(pattern):", "body": "if not pattern:returnbits = pattern.split(\"\")dirs, filename = bits[:-], bits[-]for dirname in dirs:if dirname == \"\":yield \"\"else:yield glob2re(dirname) + \"\"yield glob2re(filename)", "docstring": "Generate parts of regex transformed from glob pattern.", "id": "f3290:m1"} {"signature": "def compile_glob(spec):", "body": "parsed = \"\".join(parse_glob(spec))regex = \"\".format(parsed)return re.compile(regex)", "docstring": "Convert the given glob `spec` to a compiled regex.", "id": "f3290:m2"} {"signature": "def includes(pattern):", "body": "return Pattern(pattern, inclusive=True)", "docstring": "A single inclusive glob pattern.", "id": "f3290:m3"} {"signature": "def excludes(pattern):", "body": "return Pattern(pattern, inclusive=False)", "docstring": "A single exclusive glob pattern.", "id": "f3290:m4"} {"signature": "def __init__(self, spec, inclusive):", "body": "self.compiled = compile_glob(spec.rstrip(''))self.inclusive = inclusiveself.is_dir = spec.endswith('')", "docstring": "Create regex-based pattern matcher from glob `spec`.", "id": "f3290:c0:m0"} {"signature": "def __str__(self):", "body": "return ('' if self.inclusive else '') + self.compiled.pattern", "docstring": "Return inclusiveness indicator and original glob pattern.", "id": "f3290:c0:m1"} {"signature": "def matches(self, path):", "body": "return bool(self.compiled.match(path))", "docstring": "Check this pattern against given `path`.", "id": "f3290:c0:m2"} {"signature": "def included(self, path, is_dir=False):", "body": "inclusive = Nonefor pattern in self.patterns:if pattern.is_dir == is_dir and pattern.matches(path):inclusive = pattern.inclusivereturn inclusive", "docstring": "Check patterns in order, last match that includes or excludes `path` wins. Return `None` on undecided.", "id": "f3290:c1:m2"} {"signature": "def walk(self, **kwargs):", "body": "lead = ''if '' in kwargs and kwargs.pop(''):lead = self.root.rstrip(os.sep) + os.sepfor base, dirs, files in os.walk(self.root, **kwargs):prefix = base[len(self.root):].lstrip(os.sep)bits = prefix.split(os.sep) if prefix else []for dirname in dirs[:]:path = ''.join(bits + [dirname])inclusive = self.included(path, is_dir=True)if inclusive:yield lead + path + ''elif inclusive is False:dirs.remove(dirname)for filename in files:path = ''.join(bits + [filename])if self.included(path):yield lead + path", "docstring": "Like `os.walk` and taking the same keyword arguments,\n but generating paths relative to the root.\n\n Starts in the fileset's root and filters based on its patterns.\n If ``with_root=True`` is passed in, the generated paths include\n the root path.", "id": "f3290:c1:m8"} {"signature": "def capture(cmd, **kw):", "body": "kw = kw.copy()kw[''] = ''if not kw.get('', False):kw[''] = Falseignore_failures = kw.pop('', False)try:return invoke_run(cmd, **kw).stdout.strip()except exceptions.Failure as exc:if not ignore_failures:notify.error(\"\".format(cmd, exc.result.return_code,))raise", "docstring": "Run a command and return its stripped captured output.", "id": "f3292:m0"} {"signature": "def run(cmd, **kw):", "body": "kw = kw.copy()kw.setdefault('', False) report_error = kw.pop('', True)runner = kw.pop('', invoke_run)try:return runner(cmd, **kw)except exceptions.Failure as exc:sys.stdout.flush()sys.stderr.flush()if report_error:notify.error(\"\".format(cmd, exc.result.return_code,))raisefinally:sys.stdout.flush()sys.stderr.flush()", "docstring": "Run a command and flush its output.", "id": "f3292:m1"} {"signature": "def search_file_upwards(name, base=None):", "body": "base = base or os.getcwd()while base != os.path.dirname(base):if os.path.exists(os.path.join(base, name)):return basebase = os.path.dirname(base)return None", "docstring": "Search for a file named `name` from cwd or given directory to root.\n Return None if nothing's found.", "id": "f3293:m0"} {"signature": "def add_dir2pypath(path):", "body": "py_path = os.environ.get('', '')if path not in py_path.split(os.pathsep):py_path = ''.join([path, os.pathsep if py_path else '', py_path])os.environ[''] = py_path", "docstring": "Add given directory to PYTHONPATH, e.g. for pylint.", "id": "f3293:m1"} {"signature": "def _flush():", "body": "sys.stdout.flush()sys.stderr.flush()", "docstring": "Flush all console output.", "id": "f3294:m0"} {"signature": "def banner(msg):", "body": "if ECHO:_flush()sys.stderr.write(\"\".format(msg))sys.stderr.flush()", "docstring": "Emit a banner just like Invoke's `run(\u2026, echo=True)`.", "id": "f3294:m1"} {"signature": "def info(msg):", "body": "_flush()sys.stdout.write(msg + '')sys.stdout.flush()", "docstring": "Emit a normal message.", "id": "f3294:m2"} {"signature": "def warning(msg):", "body": "_flush()sys.stderr.write(\"\".format(msg))sys.stderr.flush()", "docstring": "Emit a warning message.", "id": "f3294:m3"} {"signature": "def error(msg):", "body": "_flush()sys.stderr.write(\"\".format(msg))sys.stderr.flush()", "docstring": "Emit an error message to stderr.", "id": "f3294:m4"} {"signature": "def failure(msg):", "body": "error(msg)sys.exit()", "docstring": "Emit a fatal message and exit.", "id": "f3294:m5"} {"signature": "def pretty_path(path, _home_re=re.compile('' + re.escape(os.path.expanduser('') + os.sep))):", "body": "path = decode_filename(path)path = _home_re.sub('' + os.sep, path)return path", "docstring": "Prettify path for humans, and make it Unicode.", "id": "f3295:m0"} {"signature": "@contextmanagerdef pushd(path):", "body": "saved = os.getcwd()os.chdir(path)try:yield savedfinally:os.chdir(saved)", "docstring": "A context that enters a given directory and restores the old state on exit.\n\n The original directory is returned as the context variable.", "id": "f3295:m1"} {"signature": "@contextmanagerdef url_as_file(url, ext=None):", "body": "if ext:ext = '' + ext.strip('') url_hint = ''.format(urlparse(url).hostname or '')if url.startswith(''):url = os.path.abspath(url[len(''):])if os.path.isabs(url):with open(url, '') as handle:content = handle.read()else:content = requests.get(url).contentwith tempfile.NamedTemporaryFile(suffix=ext or '', prefix=url_hint, delete=False) as handle:handle.write(content)try:yield handle.namefinally:if os.path.exists(handle.name):os.remove(handle.name)", "docstring": "Context manager that GETs a given `url` and provides it as a local file.\n\nThe file is in a closed state upon entering the context,\nand removed when leaving it, if still there.\n\nTo give the file name a specific extension, use `ext`;\nthe extension can optionally include a separating dot,\notherwise it will be added.\n\nParameters:\n url (str): URL to retrieve.\n ext (str, optional): Extension for the generated filename.\n\nYields:\n str: The path to a temporary file with the content of the URL.\n\nRaises:\n requests.RequestException: Base exception of ``requests``, see its\n docs for more detailed ones.\n\nExample:\n >>> import io, re, json\n >>> with url_as_file('https://api.github.com/meta', ext='json') as meta:\n ... meta, json.load(io.open(meta, encoding='ascii'))['hooks']\n (u'/tmp/www-api.github.com-Ba5OhD.json', [u'192.30.252.0/22'])", "id": "f3295:m2"} {"signature": "def srcfile(*args):", "body": "return os.path.join(*((project_root,) + args))", "docstring": "Helper for path building.", "id": "f3297:m0"} {"signature": "def _build_metadata(): ", "body": "expected_keys = ('', '', '', '', '', '', '')metadata = {}with open(srcfile('', package_name, ''), encoding='') as handle:pkg_init = handle.read()metadata[''] = re.search(r'', pkg_init, re.DOTALL|re.MULTILINE).group()for line in pkg_init.splitlines():match = re.match(r\"\"\"\"\"\".format(''.join(expected_keys)), line)if match:metadata[match.group()] = match.group()if not all(i in metadata for i in expected_keys):raise RuntimeError(\"\".format(name, ''.join(sorted(set(expected_keys) - set(metadata.keys()))),))text = metadata[''].strip()if text:metadata[''], text = text.split('', )metadata[''] = ''.join(metadata[''].split()).strip() + '' metadata[''] = textwrap.dedent(text).strip()metadata[''] = metadata[''].replace('', '').strip().split()requirements_files = dict(install = '',setup = '',test = '',)requires = {}for key, filename in requirements_files.items():requires[key] = []if os.path.exists(srcfile(filename)):with open(srcfile(filename), encoding='') as handle:for line in handle:line = line.strip()if line and not line.startswith(''):if line.startswith(''):line = line.split()[].split('')[]requires[key].append(line)if not any('' == re.split('', i.lower())[] for i in requires['']):requires[''].append('') console_scripts = []for path, dirs, files in os.walk(srcfile('', package_name)):dirs = [i for i in dirs if not i.startswith('')]if '' in files:path = path[len(srcfile('') + os.sep):]appname = path.split(os.sep)[-]with open(srcfile('', path, ''), encoding='') as handle:for line in handle.readlines():match = re.match(r\"\"\"\"\"\", line)if match:appname = match.group()console_scripts.append(''.format(appname, path.replace(os.sep, '')))candidate_files = ['', '','', '', '', '','', '', '',]data_files = defaultdict(list)for filename in candidate_files:if os.path.exists(srcfile(filename)):data_files[''].append(filename)classifiers = []for classifiers_txt in ('', ''):classifiers_txt = srcfile(classifiers_txt)if os.path.exists(classifiers_txt):with open(classifiers_txt, encoding='') as handle:classifiers = [i.strip() for i in handle if i.strip() and not i.startswith('')]breakmetadata.update(dict(name = name,package_dir = {'': ''},packages = find_packages(srcfile(''), exclude=['']),data_files = data_files.items(),zip_safe = False,include_package_data = True,install_requires = requires[''],setup_requires = requires[''],tests_require = requires[''],classifiers = classifiers,cmdclass = dict(test = PyTest,),entry_points = dict(console_scripts = console_scripts,),))return metadata", "docstring": "Return project's metadata as a dict.", "id": "f3297:m1"} {"signature": "def try_until_positive(req):", "body": "response = yield reqwhile response < :try:response = yield ''except GeneratorExit:returnexcept ValueError:yield ''return response", "docstring": "an example relay", "id": "f3299:m2"} {"signature": "def try_until_even(req):", "body": "response = yield reqwhile response % :try:response = yield ''except GeneratorExit:returnexcept ValueError:yield ''return response", "docstring": "an example relay", "id": "f3299:m3"} {"signature": "def mymax(val):", "body": "while val < :try:sent = yield valexcept GeneratorExit:returnexcept ValueError:sent = yield ''except TypeError:return ''if sent > val:val = sentreturn val * ", "docstring": "an example generator function", "id": "f3299:m4"} {"signature": "@py2_compatibledef try_until_positive(req):", "body": "response = yield reqwhile response < :try:response = yield ''except GeneratorExit:returnexcept ValueError:yield ''return_(response)", "docstring": "an example relay", "id": "f3300:m2"} {"signature": "@py2_compatibledef try_until_even(req):", "body": "response = yield reqwhile response % :try:response = yield ''except GeneratorExit:returnexcept ValueError:yield ''return_(response)", "docstring": "an example relay", "id": "f3300:m3"} {"signature": "@py2_compatibledef mymax(val):", "body": "while val < :try:sent = yield valexcept GeneratorExit:returnexcept ValueError:sent = yield ''except TypeError:return_('')if sent > val:val = sentreturn_(val * )", "docstring": "an example generator function", "id": "f3300:m4"} {"signature": "def with_generator(name):", "body": "gens = [getattr(common, name)]if not PY2:from . import py3gens.append(getattr(py3, name))return pytest.mark.parametrize(name, gens)", "docstring": "use a python 2/3 parametrized generator", "id": "f3302:m0"} {"signature": "def _add_metaclass(metaclass): ", "body": "def wrapper(cls):orig_vars = cls.__dict__.copy()slots = orig_vars.get('')if slots is not None:if isinstance(slots, str):slots = [slots]for slots_var in slots:orig_vars.pop(slots_var)orig_vars.pop('', None)orig_vars.pop('', None)return metaclass(cls.__name__, cls.__bases__, orig_vars)return wrapper", "docstring": "Class decorator for creating a class with a metaclass.", "id": "f3303:m0"} {"signature": "@abc.abstractmethoddef __iter__(self):", "body": "raise NotImplementedError()", "docstring": "Returns\n-------\n~typing.Generator[T_yield, T_send, T_return]\n the generator iterator", "id": "f3303:c0:m0"} {"signature": "def __call__(self, *args, **kwargs):", "body": "raise NotImplementedError()", "docstring": "Returns\n-------\n~typing.Generator[T_yield, T_send, T_return]\n the resulting generator", "id": "f3303:c1:m0"} {"signature": "def replace(self, **kwargs):", "body": "copied = self.__signature__.bind(*self._bound_args.args,**self._bound_args.kwargs)copied.arguments.update(**kwargs)return self.__class__(*copied.args, **copied.kwargs)", "docstring": "create a new instance with certain fields replaced\n\n Parameters\n ----------\n **kwargs\n fields to replace\n\n Returns\n -------\n ReusableGenerator\n a copy with replaced fields", "id": "f3303:c3:m6"} {"signature": "def py2_compatible(func):", "body": "return compose(GeneratorProxy, func)", "docstring": "Decorate a generator function to make it Python 2/3 compatible.\n Use together with :func:`return_`.\n\n Example\n -------\n\n >>> @py2_compatible\n ... def my_max(value):\n ... while value < 100:\n ... newvalue = yield value\n ... if newvalue > value:\n ... value = newvalue\n ... return_(value)\n\n is equivalent to:\n\n >>> def my_max(value):\n ... while value < 100:\n ... newvalue = yield value\n ... if newvalue > value:\n ... value = newvalue\n ... return value\n\n Note\n ----\n This is necessary because PEP479 makes it impossible to replace\n ``return`` with ``raise StopIteration`` in newer python 3 versions.\n\n Warning\n -------\n Although the wrapped generator acts like a generator,\n it is not an strict generator instance.\n For most purposes (e.g. ``yield from``) it works fine,\n but :func:`~inspect.isgenerator` will return ``False``.\n\n See also\n --------\n `PEP 479 `_", "id": "f3305:m1"} {"signature": "def return_(value):", "body": "raise GeneratorReturn(value)", "docstring": "Python 2/3 compatible way to return a value from a generator\n\n Use only with the :func:`py2_compatible` decorator", "id": "f3305:m2"} {"signature": "def reusable(func):", "body": "sig = signature(func)origin = funcwhile hasattr(origin, ''):origin = origin.__wrapped__return type(origin.__name__,(ReusableGenerator, ),dict([('', origin.__doc__),('', origin.__module__),('', sig),('', staticmethod(func)),] + [(name, property(compose(itemgetter(name),attrgetter(''))))for name in sig.parameters] + ([('', origin.__qualname__),] if sys.version_info > (, ) else [])))", "docstring": "Create a reusable class from a generator function\n\n Parameters\n ----------\n func: GeneratorCallable[T_yield, T_send, T_return]\n the function to wrap\n\n Note\n ----\n * the callable must have an inspectable signature\n * If bound to a class, the new reusable generator is callable as a method.\n To opt out of this, add a :func:`staticmethod` decorator above\n this decorator.", "id": "f3305:m3"} {"signature": "def sendreturn(gen, value):", "body": "try:gen.send(value)except StopIteration as e:return stopiter_value(e)else:raise RuntimeError('')", "docstring": "Send an item into a generator expecting a final return value\n\n Parameters\n ----------\n gen: ~typing.Generator[T_yield, T_send, T_return]\n the generator to send the value to\n value: T_send\n the value to send\n\n Raises\n ------\n RuntimeError\n if the generator did not return as expected\n\n Returns\n -------\n T_return\n the generator's return value", "id": "f3305:m5"} {"signature": "@py2_compatibledef imap_yield(func, gen):", "body": "gen = iter(gen)assert _is_just_started(gen)yielder = yield_from(gen)for item in yielder:with yielder:yielder.send((yield func(item)))return_(yielder.result)", "docstring": "Apply a function to all ``yield`` values of a generator\n\n Parameters\n ----------\n func: ~typing.Callable[[T_yield], T_mapped]\n the function to apply\n gen: Generable[T_yield, T_send, T_return]\n the generator iterable.\n\n Returns\n -------\n ~typing.Generator[T_mapped, T_send, T_return]\n the mapped generator", "id": "f3305:m6"} {"signature": "@py2_compatibledef imap_send(func, gen):", "body": "gen = iter(gen)assert _is_just_started(gen)yielder = yield_from(gen)for item in yielder:with yielder:yielder.send(func((yield item)))return_(yielder.result)", "docstring": "Apply a function to all ``send`` values of a generator\n\n Parameters\n ----------\n func: ~typing.Callable[[T_send], T_mapped]\n the function to apply\n gen: Generable[T_yield, T_mapped, T_return]\n the generator iterable.\n\n Returns\n -------\n ~typing.Generator[T_yield, T_send, T_return]\n the mapped generator", "id": "f3305:m7"} {"signature": "@py2_compatibledef imap_return(func, gen):", "body": "gen = iter(gen)assert _is_just_started(gen)yielder = yield_from(gen)for item in yielder:with yielder:yielder.send((yield item))return_(func(yielder.result))", "docstring": "Apply a function to the ``return`` value of a generator\n\n Parameters\n ----------\n func: ~typing.Callable[[T_return], T_mapped]\n the function to apply\n gen: Generable[T_yield, T_send, T_return]\n the generator iterable.\n\n Returns\n -------\n ~typing.Generator[T_yield, T_send, T_mapped]", "id": "f3305:m8"} {"signature": "@py2_compatibledef irelay(gen, thru):", "body": "gen = iter(gen)assert _is_just_started(gen)yielder = yield_from(gen)for item in yielder:with yielder:subgen = thru(item)subyielder = yield_from(subgen)for subitem in subyielder:with subyielder:subyielder.send((yield subitem))yielder.send(subyielder.result)return_(yielder.result)", "docstring": "Create a new generator by relaying yield/send interactions\n through another generator\n\n Parameters\n ----------\n gen: Generable[T_yield, T_send, T_return]\n the original generator\n thru: ~typing.Callable[[T_yield], ~typing.Generator]\n the generator callable through which each interaction is relayed\n\n Returns\n -------\n ~typing.Generator\n the relayed generator", "id": "f3305:m9"} {"signature": "def modify(self):", "body": "pass", "docstring": "Sub test classes can modify the blok here for testing, before all tests are ran", "id": "f3310:c0:m1"} {"signature": "def add(self, *names):", "body": "def decorator(blok):for name in names or (blok.__name__, ):self[name] = blokreturn blokreturn decorator", "docstring": "Returns back a class decorator that enables registering Blox to this factory", "id": "f3315:c0:m1"} {"signature": "def __call__(self, product_name, **properties):", "body": "if not product_name in self:return self.default(tag=product_name, **properties)return self[product_name](**properties)", "docstring": "Builds and returns a Blok object", "id": "f3315:c0:m2"} {"signature": "def output(self, to=None, *args, **kwargs):", "body": "for blok in self:blok.output(to, *args, **kwargs)return self", "docstring": "Outputs to a stream (like a file or request)", "id": "f3317:c0:m3"} {"signature": "def render(self, *args, **kwargs):", "body": "render_to = StringIO()self.output(render_to, *args, **kwargs)return render_to.getvalue()", "docstring": "Renders as a str", "id": "f3317:c0:m4"} {"signature": "def __new__(metaclass, name, parents, class_dict, *kargs, **kwargs):", "body": "attributes = {name: attribute for name, attribute in class_dict.items() if isinstance(attribute,AbstractAttribute)}if attributes:if hasattr(parents[], ''):full_attributes = parents[].attribute_descriptors.copy()full_attributes.update(attributes)attributes = full_attributesblok_attributes = {}render_attributes = []direct_attributes = []init_attributes = []accessor_attributes = []attribute_map = {}for attribute_name, attribute in attributes.items():if not hasattr(attribute, ''):attribute.name = attribute_nameif isinstance(attribute, DirectAttribute):direct_attributes.append(attribute)if hasattr(attribute, ''):render_attributes.append(attribute)if not hasattr(attribute, ''):attribute.object_attribute = ''.format(attribute_name)if getattr(attribute, '', False):init_attributes.append(attribute_name)if isinstance(attribute, (BlokAttribute, NestedBlokAttribute)) and hasattr(attribute.type, ''):blok_attributes[attribute.type.tag] = attributeif isinstance(attribute, AccessorAttribute):accessor_attributes.append(attribute)if not hasattr(attribute, ''):attribute.parent_attribute = ''.format(attribute_name)attribute_map[attribute.name] = attribute_nameif direct_attributes and not name == '' and '' in class_dict:class_dict[''] += tuple(attribute.object_attribute for attribute in direct_attributes)class_dict[''] += tuple(attribute.parent_attribute for attribute in accessor_attributes)if render_attributes:if hasattr(parents[], ''):render_attributes = list(parents[].render_attributes) + render_attributesclass_dict[''] = set(render_attributes)if init_attributes:if hasattr(parents[], ''):init_attributes = list(parents[].init_attributes) + init_attributesclass_dict[''] = init_attributesif blok_attributes:if hasattr(parents[], ''):full_blok_attributes = dict(parents[].blok_attributes)full_blok_attributes.update(blok_attributes)blok_attributes = full_blok_attributesclass_dict[''] = blok_attributesif attribute_map:if hasattr(parents[], ''):full_attribute_map = dict(parents[].attribute_map)full_attribute_map.update(attribute_map)attribute_map = full_attribute_mapclass_dict[''] = attribute_mapclass_dict[''] = attributesattribute_signals = (attribute.signal for attribute in attributes.values() if getattr(attribute, ''))if attribute_signals:class_dict[''] = class_dict.get('', ()) + tuple(attribute_signals)return super(TagAttributes, metaclass).__new__(metaclass, name, parents, class_dict, *kargs, **kwargs)", "docstring": "Updates a tag class to automatically register all signals", "id": "f3317:c1:m0"} {"signature": "def output(self, to=None, *args, **kwargs):", "body": "to.write('')return self", "docstring": "Outputs to a stream (like a file or request)", "id": "f3317:c2:m0"} {"signature": "def render(self, *args, **kwargs):", "body": "render_to = StringIO()self.output(render_to, *args, **kwargs)return render_to.getvalue()", "docstring": "Renders as a str", "id": "f3317:c2:m1"} {"signature": "@propertydef blox_container(self):", "body": "return self", "docstring": "Returns the container that should be responsible adding children, outside of init", "id": "f3317:c4:m1"} {"signature": "@propertydef blox(self):", "body": "if not hasattr(self, ''):self._blox = Blox()return self._blox", "docstring": "Lazily creates and returns the list of child blox", "id": "f3317:c4:m2"} {"signature": "def __call__(self, *blox, position=None):", "body": "if position is not None:for blok in blox:self.blox_container.blox.insert(position, blok)else:for blok in blox:self.blox_container.blox.append(blok)return blok", "docstring": "Adds a nested blok to this blok", "id": "f3317:c4:m3"} {"signature": "def output(self, to=None, formatted=False, indent=, indentation='', *args, **kwargs):", "body": "if formatted and self.blox:self.blox[].output(to=to, formatted=True, indent=indent, indentation=indentation, *args, **kwargs)for blok in self.blox[:]:to.write('')to.write(indent * indentation)blok.output(to=to, formatted=True, indent=indent, indentation=indentation, *args, **kwargs)if not indent:to.write('')else:for blok in self.blox:blok.output(to=to, *args, **kwargs)return self", "docstring": "Outputs to a stream (like a file or request)", "id": "f3317:c4:m14"} {"signature": "@propertydef attributes(self):", "body": "if not hasattr(self, ''):self._attributes = {}return self._attributes", "docstring": "Lazily creates and returns a tags attributes", "id": "f3317:c5:m1"} {"signature": "@propertydef start_tag(self):", "body": "direct_attributes = (attribute.render(self) for attribute in self.render_attributes)attributes = ()if hasattr(self, ''):attributes = (''.format(key, value)for key, value in self.attributes.items() if value)rendered_attributes = \"\".join(filter(bool, chain(direct_attributes, attributes)))return ''.format(self.tag, '' if rendered_attributes else '',rendered_attributes, '' if self.tag_self_closes else \"\")", "docstring": "Returns the elements HTML start tag", "id": "f3317:c5:m2"} {"signature": "@propertydef end_tag(self):", "body": "if self.tag_self_closes:return ''return \"\".format(self.tag)", "docstring": "Returns the elements HTML end tag", "id": "f3317:c5:m3"} {"signature": "def output(self, to=None, *args, **kwargs):", "body": "to.write(self.start_tag)if not self.tag_self_closes:to.write(self.end_tag)", "docstring": "Outputs to a stream (like a file or request)", "id": "f3317:c5:m4"} {"signature": "def output(self, to=None, formatted=False, indent=, indentation='', *args, **kwargs):", "body": "if formatted:to.write(self.start_tag)to.write('')if not self.tag_self_closes:for blok in self.blox:to.write(indentation * (indent + ))blok.output(to=to, indent=indent + , formatted=True, indentation=indentation, *args, **kwargs)to.write('')to.write(indentation * indent)to.write(self.end_tag)if not indentation:to.write('')else:to.write(self.start_tag)if not self.tag_self_closes:for blok in self.blox:blok.output(to=to, *args, **kwargs)to.write(self.end_tag)", "docstring": "Outputs to a stream (like a file or request)", "id": "f3317:c8:m1"} {"signature": "def output(self, to=None, *args, **kwargs):", "body": "to.write(str(self._value))", "docstring": "Outputs the set text", "id": "f3318:c0:m3"} {"signature": "def __call__(self, text):", "body": "self.value = textreturn self", "docstring": "Updates the text value", "id": "f3318:c0:m4"} {"signature": "def output(self, to=None, *args, **kwargs):", "body": "to.write(cgi.escape(str(self._value)))", "docstring": "Outputs the set text", "id": "f3318:c1:m0"} {"signature": "def string(html, start_on=None, ignore=(), use_short=True, **queries):", "body": "if use_short:html = grow_short(html)return _to_template(fromstring(html), start_on=start_on,ignore=ignore, **queries)", "docstring": "Returns a blox template from an html string", "id": "f3320:m0"} {"signature": "def file(file_object, start_on=None, ignore=(), use_short=True, **queries):", "body": "return string(file_object.read(), start_on=start_on, ignore=ignore, use_short=use_short, **queries)", "docstring": "Returns a blox template from a file stream object", "id": "f3320:m1"} {"signature": "def filename(file_name, start_on=None, ignore=(), use_short=True, **queries):", "body": "with open(file_name) as template_file:return file(template_file, start_on=start_on, ignore=ignore, use_short=use_short, **queries)", "docstring": "Returns a blox template from a valid file path", "id": "f3320:m2"} {"signature": "def serve(self, sock, request_handler, error_handler, debug=False,request_timeout=, ssl=None, request_max_size=None,reuse_port=False, loop=None, protocol=HttpProtocol,backlog=, **kwargs):", "body": "if debug:loop.set_debug(debug)server = partial(protocol,loop=loop,connections=self.connections,signal=self.signal,request_handler=request_handler,error_handler=error_handler,request_timeout=request_timeout,request_max_size=request_max_size,)server_coroutine = loop.create_server(server,host=None,port=None,ssl=ssl,reuse_port=reuse_port,sock=sock,backlog=backlog)loop.call_soon(partial(update_current_time, loop))return server_coroutine", "docstring": "Start asynchronous HTTP Server on an individual process.\n\n :param request_handler: Sanic request handler with middleware\n :param error_handler: Sanic error handler with middleware\n :param debug: enables debug output (slows server)\n :param request_timeout: time in seconds\n :param ssl: SSLContext\n :param sock: Socket for the server to accept connections from\n :param request_max_size: size in bytes, `None` for no limit\n :param reuse_port: `True` for multiple workers\n :param loop: asyncio compatible event loop\n :param protocol: subclass of asyncio protocol class\n :return: Nothing", "id": "f3325:c0:m5"} {"signature": "@staticmethoddef _create_ssl_context(cfg):", "body": "ctx = ssl.SSLContext(cfg.ssl_version)ctx.load_cert_chain(cfg.certfile, cfg.keyfile)ctx.verify_mode = cfg.cert_reqsif cfg.ca_certs:ctx.load_verify_locations(cfg.ca_certs)if cfg.ciphers:ctx.set_ciphers(cfg.ciphers)return ctx", "docstring": "Creates SSLContext instance for usage in asyncio.create_server.\n See ssl.SSLSocket.__init__ for more details.", "id": "f3325:c0:m8"} {"signature": "def _pop_none(self, kwargs):", "body": "for key, value in copy(kwargs).items():if value is None or value == ():kwargs.pop(key)if hasattr(value, ''):kwargs[key] = value.read()", "docstring": "Remove default values (anything where the value is None). click is unfortunately bad at the way it\n sends through unspecified defaults.", "id": "f3329:c1:m0"} {"signature": "def _lookup(self, fail_on_missing=False, fail_on_found=False, include_debug_header=True, **kwargs):", "body": "read_params = {}for field_name in self.identity:if field_name in kwargs:read_params[field_name] = kwargs[field_name]if '' in self.identity and len(self.identity) == :return {}if not read_params:raise exc.BadRequest('''')try:existing_data = self.get(include_debug_header=include_debug_header, **read_params)if fail_on_found:raise exc.Found('' %read_params)return existing_dataexcept exc.NotFound:if fail_on_missing:raise exc.NotFound('' %read_params)return {}", "docstring": "=====API DOCS=====\nAttempt to perform a lookup that is expected to return a single result, and return the record.\n\nThis method is a wrapper around `get` that strips out non-unique keys, and is used internally by\n`write` and `delete`.\n\n:param fail_on_missing: Flag that raise exception if no resource is found.\n:type fail_on_missing: bool\n:param fail_on_found: Flag that raise exception if a resource is found.\n:type fail_on_found: bool\n:param include_debug_header: Flag determining whether to print debug messages when querying\n Tower backend.\n:type include_debug_header: bool\n:param `**kwargs`: Keyword arguments list of available fields used for searching resource.\n:returns: A JSON object containing details of the resource returned by Tower backend.\n:rtype: dict\n\n:raises tower_cli.exceptions.BadRequest: When no field are provided in kwargs.\n:raises tower_cli.exceptions.Found: When a resource is found and fail_on_found flag is on.\n:raises tower_cli.exceptions.NotFound: When no resource is found and fail_on_missing flag\n is on.\n=====API DOCS=====", "id": "f3329:c1:m1"} {"signature": "def _convert_pagenum(self, kwargs):", "body": "for key in ('', ''):if not kwargs.get(key):continuematch = re.search(r'', kwargs[key])if match is None and key == '':kwargs[key] = continuekwargs[key] = int(match.groupdict()[''])", "docstring": "Convert next and previous from URLs to integers", "id": "f3329:c1:m2"} {"signature": "def read(self, pk=None, fail_on_no_results=False, fail_on_multiple_results=False, **kwargs):", "body": "url = self.endpointif pk:url += '' % pkqueries = kwargs.pop('', [])self._pop_none(kwargs)for field in self.fields:if field.no_lookup and field.name in kwargs:kwargs.pop(field.name)params = list(kwargs.items())for query in queries:params.append((query[], query[]))r = client.get(url, params=params)resp = r.json()if pk:return {'': , '': [resp]}if fail_on_no_results and resp[''] == :raise exc.NotFound('')if fail_on_multiple_results and resp[''] >= :raise exc.MultipleResults('''' % resp[''])return resp", "docstring": "=====API DOCS=====\nRetrieve and return objects from the Ansible Tower API.\n\n:param pk: Primary key of the resource to be read. Tower CLI will only attempt to read that object\n if ``pk`` is provided (not ``None``).\n:type pk: int\n:param fail_on_no_results: Flag that if set, zero results is considered a failure case and raises\n an exception; otherwise, empty list is returned. (Note: This is always True\n if a primary key is included.)\n:type fail_on_no_results: bool\n:param fail_on_multiple_results: Flag that if set, at most one result is expected, and more results\n constitutes a failure case. (Note: This is meaningless if a primary\n key is included, as there can never be multiple results.)\n:type fail_on_multiple_results: bool\n:param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n:type query: list\n:param `**kwargs`: Keyword arguments which, all together, will be used as query parameters to filter\n resulting resource objects.\n:returns: loaded JSON from Tower backend response body.\n:rtype: dict\n:raises tower_cli.exceptions.BadRequest: When 2-tuples in ``query`` overlaps key-value pairs in\n ``**kwargs``.\n:raises tower_cli.exceptions.NotFound: When no objects are found and ``fail_on_no_results`` flag is on.\n:raises tower_cli.exceptions.MultipleResults: When multiple objects are found and\n ``fail_on_multiple_results`` flag is on.\n\n=====API DOCS=====", "id": "f3329:c1:m3"} {"signature": "def _get_patch_url(self, url, pk):", "body": "return url + '' % pk", "docstring": "Overwrite this method to handle specific corner cases to the url passed to PATCH method.", "id": "f3329:c1:m4"} {"signature": "def write(self, pk=None, create_on_missing=False, fail_on_found=False, force_on_exists=True, **kwargs):", "body": "existing_data = {}self._pop_none(kwargs)if not pk:debug.log('', header='')existing_data = self._lookup(fail_on_found=fail_on_found, fail_on_missing=not create_on_missing, include_debug_header=False,**kwargs)if existing_data:pk = existing_data['']else:debug.log('', header='')existing_data = self.get(pk)missing_fields = []for i in self.fields:if i.key not in kwargs and i.name not in kwargs and i.required:missing_fields.append(i.key or i.name)if missing_fields and not pk:raise exc.BadRequest('' % ''.join(missing_fields).replace('', ''))if pk and not force_on_exists:debug.log('', header='', nl=)answer = OrderedDict((('', False), ('', pk)))answer.update(existing_data)return answerif all([kwargs[k] == existing_data.get(k, None) for k in kwargs.keys()]):debug.log('', header='', nl=)answer = OrderedDict((('', False), ('', pk)))answer.update(existing_data)return answerfor key in kwargs:if kwargs[key] == '':kwargs[key] = Noneurl = self.endpointmethod = ''if pk:url = self._get_patch_url(url, pk)method = ''debug.log('', header='')r = getattr(client, method.lower())(url, data=kwargs)answer = OrderedDict((('', True), ('', r.json()[''])))answer.update(r.json())return answer", "docstring": "=====API DOCS=====\nModify the given object using the Ansible Tower API.\n\n:param pk: Primary key of the resource to be read. Tower CLI will only attempt to read that object\n if ``pk`` is provided (not ``None``).\n:type pk: int\n:param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects\n matching the appropriate unique criteria is not found.\n:type create_on_missing: bool\n:param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n already exists.\n:type fail_on_found: bool\n:param force_on_exists: Flag that if set, then if an object is modified based on matching via unique\n fields (as opposed to the primary key), other fields are updated based on data\n sent; If unset, then the non-unique values are only written in a creation case.\n:type force_on_exists: bool\n:param `**kwargs`: Keyword arguments which, all together, will be used as POST/PATCH body to create/modify\n the resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are\n also in resource's identity will be used to lookup existing reosource.\n:returns: A dictionary combining the JSON output of the resource, as well as two extra fields: \"changed\",\n a flag indicating if the resource is created or successfully updated; \"id\", an integer which\n is the primary key of the specified object.\n:rtype: dict\n:raises tower_cli.exceptions.BadRequest: When required fields are missing in ``**kwargs`` when creating\n a new resource object.\n\n=====API DOCS=====", "id": "f3329:c1:m5"} {"signature": "@resources.commanddef delete(self, pk=None, fail_on_missing=False, **kwargs):", "body": "if not pk:existing_data = self._lookup(fail_on_missing=fail_on_missing, **kwargs)if not existing_data:return {'': False}pk = existing_data['']url = '' % (self.endpoint, pk)debug.log('' % url, fg='', bold=True)try:client.delete(url)return {'': True}except exc.NotFound:if fail_on_missing:raisereturn {'': False}", "docstring": "Remove the given object.\n\n If `fail_on_missing` is True, then the object's not being found is considered a failure; otherwise,\n a success with no change is reported.\n\n =====API DOCS=====\n Remove the given object.\n\n :param pk: Primary key of the resource to be deleted.\n :type pk: int\n :param fail_on_missing: Flag that if set, the object's not being found is considered a failure; otherwise,\n a success with no change is reported.\n :type fail_on_missing: bool\n :param `**kwargs`: Keyword arguments used to look up resource object to delete if ``pk`` is not provided.\n :returns: dictionary of only one field \"changed\", which is a flag indicating whether the specified resource\n is successfully deleted.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3329:c1:m6"} {"signature": "@resources.command(ignore_defaults=True)def get(self, pk=None, **kwargs):", "body": "if kwargs.pop('', True):debug.log('', header='')response = self.read(pk=pk, fail_on_no_results=True, fail_on_multiple_results=True, **kwargs)return response[''][]", "docstring": "Return one and exactly one object.\n\n Lookups may be through a primary key, specified as a positional argument, and/or through filters specified\n through keyword arguments.\n\n If the number of results does not equal one, raise an exception.\n\n =====API DOCS=====\n Retrieve one and exactly one object.\n\n :param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object\n if ``pk`` is provided (not ``None``).\n :type pk: int\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.\n :returns: loaded JSON of the retrieved resource object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3329:c1:m7"} {"signature": "@resources.command(ignore_defaults=True, no_args_is_help=False)@click.option('', '', '', is_flag=True, default=False, show_default=True,help='')@click.option('', default=, type=int, show_default=True,help='')@click.option('', type=int, show_default=True, required=False,help='')@click.option('', '', required=False, nargs=, multiple=True,help='''''')def list(self, all_pages=False, **kwargs):", "body": "if kwargs.get('', None) and '' in kwargs['']:all_status = kwargs.pop('').strip('').split('')queries = list(kwargs.pop('', ()))for status in all_status:if status in STATUS_CHOICES:queries.append(('', status))else:raise exc.TowerCLIError(''.format(status))kwargs[''] = tuple(queries)if all_pages:kwargs.pop('', None)kwargs.pop('', None)debug.log('', header='')response = self.read(**kwargs)self._convert_pagenum(response)if all_pages and response['']:cursor = copy(response)while cursor['']:cursor = self.read(**dict(kwargs, page=cursor['']))self._convert_pagenum(cursor)response[''] += cursor['']response[''] += cursor['']response[''] = Nonereturn response", "docstring": "Return a list of objects.\n\n If one or more filters are provided through keyword arguments, filter the results accordingly.\n\n If no filters are provided, return all results.\n\n =====API DOCS=====\n Retrieve a list of objects.\n\n :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n :type all_pages: bool\n :param page: The page to show. Ignored if all_pages is set.\n :type page: int\n :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n :type query: list\n :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n :returns: A JSON object containing details of all resource objects returned by Tower backend.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3329:c1:m8"} {"signature": "def _assoc(self, url_fragment, me, other):", "body": "url = self.endpoint + '' % (me, url_fragment)r = client.get(url, params={'': other}).json()if r[''] > :return {'': False}r = client.post(url, data={'': True, '': other})return {'': True}", "docstring": "Associate the `other` record with the `me` record.", "id": "f3329:c1:m9"} {"signature": "def _disassoc(self, url_fragment, me, other):", "body": "url = self.endpoint + '' % (me, url_fragment)r = client.get(url, params={'': other}).json()if r[''] == :return {'': False}r = client.post(url, data={'': True, '': other})return {'': True}", "docstring": "Disassociate the `other` record from the `me` record.", "id": "f3329:c1:m10"} {"signature": "@resources.command@click.option('', default=False, show_default=True, type=bool, is_flag=True,help='')@click.option('', default=False, show_default=True, type=bool, is_flag=True,help='''')def create(self, **kwargs):", "body": "return self.write(create_on_missing=True, **kwargs)", "docstring": "Create an object.\n\n Fields in the resource's `identity` tuple are used for a lookup; if a match is found, then no-op\n (unless `force_on_exists` is set) but do not fail (unless `fail_on_found` is set).\n\n =====API DOCS=====\n Create an object.\n\n :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n already exists.\n :type fail_on_found: bool\n :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n be updated to the provided values.; If unset, a match causes the request to be\n a no-op.\n :type force_on_exists: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the\n resource object.\n :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is created successfully; \"id\", an integer which\n is the primary key of the created object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3329:c2:m0"} {"signature": "@resources.command(ignore_defaults=True)@click.option('', default=None,help='''')def copy(self, pk=None, new_name=None, **kwargs):", "body": "orig = self.read(pk, fail_on_no_results=True, fail_on_multiple_results=True)orig = orig[''][]self._pop_none(kwargs)newresource = copy(orig)newresource.pop('')basename = newresource[''].split('', )[].strip()for field in self.fields:if field.multiple and field.name in newresource:newresource[field.name] = (newresource.get(field.name),)if new_name is None:newresource[''] = \"\" % (basename, time.strftime(''))newresource.update(kwargs)return self.write(create_on_missing=True, fail_on_found=True,**newresource)else:if kwargs:raise exc.TowerCLIError(''.format(kwargs.keys()))copy_endpoint = ''.format(self.endpoint.strip(''), pk)return client.post(copy_endpoint, data={'': new_name}).json()", "docstring": "Copy an object.\n\n Only the ID is used for the lookup. All provided fields are used to override the old data from the\n copied resource.\n\n =====API DOCS=====\n Copy an object.\n\n :param pk: Primary key of the resource object to be copied\n :param new_name: The new name to give the resource if deep copying via the API\n :type pk: int\n :param `**kwargs`: Keyword arguments of fields whose given value will override the original value.\n :returns: loaded JSON of the copied new resource object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3329:c2:m1"} {"signature": "@resources.command(ignore_defaults=True)@click.option('', default=False, show_default=True, type=bool, is_flag=True,help='''')def modify(self, pk=None, create_on_missing=False, **kwargs):", "body": "return self.write(pk, create_on_missing=create_on_missing, force_on_exists=True, **kwargs)", "docstring": "Modify an already existing object.\n\n Fields in the resource's `identity` tuple can be used in lieu of a primary key for a lookup; in such a case,\n only other fields are written.\n\n To modify unique fields, you must use the primary key for the lookup.\n\n =====API DOCS=====\n Modify an already existing object.\n\n :param pk: Primary key of the resource to be modified.\n :type pk: int\n :param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects\n matching the appropriate unique criteria is not found.\n :type create_on_missing: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to modify the\n resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are\n also in resource's identity will be used to lookup existing reosource.\n :returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is successfully updated; \"id\", an integer which\n is the primary key of the updated object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3329:c2:m2"} {"signature": "def status(self, pk, detail=False):", "body": "raise NotImplementedError('')", "docstring": "A stub method requesting the status of the resource.", "id": "f3329:c4:m1"} {"signature": "def last_job_data(self, pk=None, **kwargs):", "body": "ujt = self.get(pk, include_debug_header=True, **kwargs)if '' in ujt['']:debug.log('', header='')return client.get(ujt[''][''][:]).json()elif ujt[''].get('', None):debug.log('', header='')return client.get(ujt[''][''][:]).json()else:raise exc.NotFound('')", "docstring": "Internal utility function for Unified Job Templates. Returns data about the last job run off of that UJT", "id": "f3329:c4:m2"} {"signature": "def lookup_stdout(self, pk=None, start_line=None, end_line=None, full=True):", "body": "stdout_url = '' % (self.unified_job_type, pk)payload = {'': '', '': '', '': ''}if start_line:payload[''] = start_lineif end_line:payload[''] = end_linedebug.log('', header='')resp = client.get(stdout_url, params=payload).json()content = b64decode(resp[''])return content.decode('', '')", "docstring": "Internal utility function to return standard out. Requires the pk of a unified job.", "id": "f3329:c4:m3"} {"signature": "@resources.command@click.option('', required=False, type=int, help='')@click.option('', required=False, type=int, help='')@click.option('', required=False, type=str, help='')def stdout(self, pk, start_line=None, end_line=None, outfile=sys.stdout, **kwargs):", "body": "if self.unified_job_type != self.endpoint:unified_job = self.last_job_data(pk, **kwargs)pk = unified_job['']elif not pk:unified_job = self.get(**kwargs)pk = unified_job['']content = self.lookup_stdout(pk, start_line, end_line)opened = Falseif isinstance(outfile, six.string_types):outfile = open(outfile, '')opened = Trueif len(content) > :click.echo(content, nl=, file=outfile)if opened:outfile.close()return {\"\": False}", "docstring": "Print out the standard out of a unified job to the command line or output file.\nFor Projects, print the standard out of most recent update.\nFor Inventory Sources, print standard out of most recent sync.\nFor Jobs, print the job's standard out.\nFor Workflow Jobs, print a status table of its jobs.\n\n=====API DOCS=====\nPrint out the standard out of a unified job to the command line or output file.\nFor Projects, print the standard out of most recent update.\nFor Inventory Sources, print standard out of most recent sync.\nFor Jobs, print the job's standard out.\nFor Workflow Jobs, print a status table of its jobs.\n\n:param pk: Primary key of the job resource object to be monitored.\n:type pk: int\n:param start_line: Line at which to start printing job output\n:param end_line: Line at which to end printing job output\n:param outfile: Alternative file than stdout to write job stdout to.\n:type outfile: file\n:param `**kwargs`: Keyword arguments used to look up job resource object to monitor if ``pk`` is\n not provided.\n:returns: A dictionary containing changed=False\n:rtype: dict\n\n=====API DOCS=====", "id": "f3329:c4:m4"} {"signature": "@resources.command@click.option('', default=, help='')@click.option('', required=False, type=int,help='')def monitor(self, pk, parent_pk=None, timeout=None, interval=, outfile=sys.stdout, **kwargs):", "body": "if pk is None:pk = self.last_job_data(parent_pk, **kwargs)['']job_endpoint = '' % (self.unified_job_type, pk)self.wait(pk, exit_on=['', ''], outfile=outfile)start = time.time()start_line = result = client.get(job_endpoint).json()click.echo('', nl=, file=outfile)while not result[''] and result[''] != '':result = client.get(job_endpoint).json()time.sleep(interval)content = self.lookup_stdout(pk, start_line, full=False)if not content.startswith(\"\"):line_count = len(content.splitlines())start_line += line_countclick.echo(content, nl=, file=outfile)if timeout and time.time() - start > timeout:raise exc.Timeout('')if self.endpoint == '':click.echo(self.lookup_stdout(pk, start_line, full=True), nl=)click.echo('', nl=, file=outfile)if result['']:raise exc.JobFailure('')answer = OrderedDict((('', True), ('', pk)))answer.update(result)if parent_pk:answer[''] = parent_pkelse:answer[''] = pkreturn answer", "docstring": "Stream the standard output from a job, project update, or inventory udpate.\n\n=====API DOCS=====\nStream the standard output from a job run to stdout.\n\n:param pk: Primary key of the job resource object to be monitored.\n:type pk: int\n:param parent_pk: Primary key of the unified job template resource object whose latest job run will be\n monitored if ``pk`` is not set.\n:type parent_pk: int\n:param timeout: Number in seconds after which this method will time out.\n:type timeout: float\n:param interval: Polling interval to refresh content from Tower.\n:type interval: float\n:param outfile: Alternative file than stdout to write job stdout to.\n:type outfile: file\n:param `**kwargs`: Keyword arguments used to look up job resource object to monitor if ``pk`` is\n not provided.\n:returns: A dictionary combining the JSON output of the finished job resource object, as well as\n two extra fields: \"changed\", a flag indicating if the job resource object is finished\n as expected; \"id\", an integer which is the primary key of the job resource object being\n monitored.\n:rtype: dict\n:raises tower_cli.exceptions.Timeout: When monitor time reaches time out.\n:raises tower_cli.exceptions.JobFailure: When the job being monitored runs into failure.\n\n=====API DOCS=====", "id": "f3329:c4:m5"} {"signature": "@resources.command@click.option('', default=, help='')@click.option('', default=, help='')@click.option('', required=False, type=int,help='')def wait(self, pk, parent_pk=None, min_interval=, max_interval=, timeout=None, outfile=sys.stdout,exit_on=[''], **kwargs):", "body": "if pk is None:pk = self.last_job_data(parent_pk, **kwargs)['']job_endpoint = '' % (self.unified_job_type, pk)dots = itertools.cycle([, , , ])longest_string = interval = min_intervalstart = time.time()result = client.get(job_endpoint).json()last_poll = time.time()timeout_check = while result[''] not in exit_on:if result['']:if is_tty(outfile) and not settings.verbose:secho('' + '' * longest_string + '', file=outfile)raise exc.JobFailure('')if timeout and timeout_check - start > timeout:raise exc.Timeout('')output = '' % (result[''], '' * next(dots))if longest_string > len(output):output += '' * (longest_string - len(output))else:longest_string = len(output)if is_tty(outfile) and not settings.verbose:secho(output, nl=False, file=outfile)time.sleep()timeout_check = time.time()if timeout and timeout_check - start > timeout:last_poll -= intervalif time.time() - last_poll > interval:result = client.get(job_endpoint).json()last_poll = time.time()interval = min(interval * , max_interval)if not is_tty(outfile) or settings.verbose:click.echo('' % result[''], file=outfile)if is_tty(outfile) and not settings.verbose:secho('' + '' * longest_string, file=outfile, nl=False)secho('', file=outfile, nl=False)answer = OrderedDict((('', True), ('', pk)))answer.update(result)if parent_pk:answer[''] = parent_pkelse:answer[''] = pkreturn answer", "docstring": "Wait for a running job to finish. Blocks further input until the job completes (whether successfully\nor unsuccessfully) and a final status can be given.\n\n=====API DOCS=====\nWait for a job resource object to enter certain status.\n\n:param pk: Primary key of the job resource object to wait.\n:type pk: int\n:param parent_pk: Primary key of the unified job template resource object whose latest job run will be\n waited if ``pk`` is not set.\n:type parent_pk: int\n:param timeout: Number in seconds after which this method will time out.\n:type timeout: float\n:param min_interval: Minimum polling interval to request an update from Tower.\n:type min_interval: float\n:param max_interval: Maximum polling interval to request an update from Tower.\n:type max_interval: float\n:param outfile: Alternative file than stdout to write job status updates on.\n:type outfile: file\n:param exit_on: Job resource object statuses to wait on.\n:type exit_on: array\n:param `**kwargs`: Keyword arguments used to look up job resource object to wait if ``pk`` is\n not provided.\n:returns: A dictionary combining the JSON output of the status-changed job resource object, as well\n as two extra fields: \"changed\", a flag indicating if the job resource object is status-changed\n as expected; \"id\", an integer which is the primary key of the job resource object being\n status-changed.\n:rtype: dict\n:raises tower_cli.exceptions.Timeout: When wait time reaches time out.\n:raises tower_cli.exceptions.JobFailure: When the job being waited on runs into failure.\n=====API DOCS=====", "id": "f3329:c4:m6"} {"signature": "@resources.command@click.option('', is_flag=True, default=False, help='')def status(self, pk=None, detail=False, **kwargs):", "body": "self._pop_none(kwargs)if not pk:job = self.get(include_debug_header=True, **kwargs)else:debug.log('', header='')finished_endpoint = '' % (self.endpoint, pk)job = client.get(finished_endpoint).json()if detail:return jobreturn {'': job[''],'': job[''],'': job[''],}", "docstring": "Print the current job status. This is used to check a running job. You can look up the job with\n the same parameters used for a get request.\n\n =====API DOCS=====\n Retrieve the current job status.\n\n :param pk: Primary key of the resource to retrieve status from.\n :type pk: int\n :param detail: Flag that if set, return the full JSON of the job resource rather than a status summary.\n :type detail: bool\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve status from if ``pk``\n is not provided.\n :returns: full loaded JSON of the specified unified job if ``detail`` flag is on; trimed JSON containing\n only \"elapsed\", \"failed\" and \"status\" fields of the unified job if ``detail`` flag is off.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3329:c5:m0"} {"signature": "@resources.command@click.option('', is_flag=True, default=False,help='')def cancel(self, pk=None, fail_if_not_running=False, **kwargs):", "body": "if not pk:existing_data = self.get(**kwargs)pk = existing_data['']cancel_endpoint = '' % (self.endpoint, pk)try:client.post(cancel_endpoint)changed = Trueexcept exc.MethodNotAllowed:changed = Falseif fail_if_not_running:raise exc.TowerCLIError('')return {'': '', '': changed}", "docstring": "Cancel a currently running job.\n\n Fails with a non-zero exit status if the job cannot be canceled.\n You must provide either a pk or parameters in the job's identity.\n\n =====API DOCS=====\n Cancel a currently running job.\n\n :param pk: Primary key of the job resource to restart.\n :type pk: int\n :param fail_if_not_running: Flag that if set, raise exception if the job resource cannot be canceled.\n :type fail_if_not_running: bool\n :param `**kwargs`: Keyword arguments used to look up job resource object to restart if ``pk`` is not\n provided.\n :returns: A dictionary of two keys: \"status\", which is \"canceled\", and \"changed\", which indicates if\n the job resource has been successfully canceled.\n :rtype: dict\n :raises tower_cli.exceptions.TowerCLIError: When the job resource cannot be canceled and\n ``fail_if_not_running`` flag is on.\n =====API DOCS=====", "id": "f3329:c5:m1"} {"signature": "@resources.commanddef relaunch(self, pk=None, **kwargs):", "body": "if not pk:existing_data = self.get(**kwargs)pk = existing_data['']relaunch_endpoint = '' % (self.endpoint, pk)data = {}answer = {}try:result = client.post(relaunch_endpoint, data=data).json()if '' in result:answer.update(result)answer[''] = Trueexcept exc.MethodNotAllowed:answer[''] = Falsereturn answer", "docstring": "Relaunch a stopped job.\n\n Fails with a non-zero exit status if the job cannot be relaunched.\n You must provide either a pk or parameters in the job's identity.\n\n =====API DOCS=====\n Relaunch a stopped job resource.\n\n :param pk: Primary key of the job resource to relaunch.\n :type pk: int\n :param `**kwargs`: Keyword arguments used to look up job resource object to relaunch if ``pk`` is not\n provided.\n :returns: A dictionary combining the JSON output of the relaunched job resource object, as well\n as an extra field \"changed\", a flag indicating if the job resource object is status-changed\n as expected.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3329:c5:m2"} {"signature": "@resources.commanddef survey(self, pk=None, **kwargs):", "body": "job_template = self.get(pk=pk, **kwargs)if settings.format == '':settings.format = ''return client.get(self._survey_endpoint(job_template[''])).json()", "docstring": "Get the survey_spec for the job template.\n To write a survey, use the modify command with the --survey-spec parameter.\n\n =====API DOCS=====\n Get the survey specification of a resource object.\n\n :param pk: Primary key of the resource to retrieve survey from. Tower CLI will only attempt to\n read *that* object if ``pk`` is provided (not ``None``).\n :type pk: int\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve survey if ``pk``\n is not provided.\n :returns: loaded JSON of the retrieved survey specification of the resource object.\n :rtype: dict\n =====API DOCS=====", "id": "f3329:c6:m2"} {"signature": "@propertydef help(self):", "body": "if self.help_text:return self.help_textreturn '' % self.name", "docstring": "Return the help text that was passed to the constructor, or a\n sensible default if none was provided.", "id": "f3330:c1:m3"} {"signature": "@propertydef option(self):", "body": "return '' + self.name.replace('', '')", "docstring": "Return the field name as a bash option string\n (e.g. \"--field-name\").", "id": "f3330:c1:m4"} {"signature": "def configure_model(self, attrs, field_name):", "body": "self.relationship = field_nameself._set_method_names(relationship=field_name)if self.res_name is None:self.res_name = grammar.singularize(attrs.get('', '').strip(''))", "docstring": "Hook for ResourceMeta class to call when initializing model class.\nSaves fields obtained from resource class backlinks", "id": "f3330:c2:m2"} {"signature": "def _produce_raw_method(self):", "body": "def method(res_self, **kwargs):obj_pk = kwargs.get(method._res_name)other_obj_pk = kwargs.get(method._other_name)internal_method = getattr(res_self, method._internal_name)return internal_method(method._relationship, obj_pk, other_obj_pk)return method", "docstring": "Returns a callable which becomes the associate or disassociate\nmethod for the related field.\nMethod can be overridden to add additional functionality, but\n`_produce_method` may also need to be subclassed to decorate\nit appropriately.", "id": "f3330:c2:m6"} {"signature": "def __repr__(self):", "body": "return '' + ''.join(['' % (k, v)for k, v in self.items()]) + ''", "docstring": "Print a repr that resembles dict's repr, but preserves\n key order.", "id": "f3332:c0:m0"} {"signature": "def parse_kv(var_string):", "body": "return_dict = {}if var_string is None:return {}fix_encoding_26 = Falseif sys.version_info < (, ) and '' in shlex.split(u'')[]:fix_encoding_26 = Trueis_unicode = Falseif fix_encoding_26 or not isinstance(var_string, str):if isinstance(var_string, six.text_type):var_string = var_string.encode('')is_unicode = Trueelse:var_string = str(var_string)for token in shlex.split(var_string):if (is_unicode):token = token.decode('')if fix_encoding_26:token = six.text_type(token)if '' in token:(k, v) = token.split('', )if len(k) == or len(v) == :raise Exceptiontry:return_dict[k] = ast.literal_eval(v)except Exception:return_dict[k] = velse:raise Exceptionreturn return_dict", "docstring": "Similar to the Ansible function of the same name, parses file\n with a key=value pattern and stores information in a dictionary,\n but not as fully featured as the corresponding Ansible code.", "id": "f3333:m0"} {"signature": "def string_to_dict(var_string, allow_kv=True, require_dict=True):", "body": "try:return_dict = yaml.load(var_string, Loader=yaml.SafeLoader)if require_dict:assert type(return_dict) is dictexcept (AttributeError, yaml.YAMLError, AssertionError):try:assert allow_kvreturn_dict = parse_kv(var_string)except Exception:raise exc.TowerCLIError('''' % var_string)return return_dict", "docstring": "Returns a dictionary given a string with yaml or json syntax.\n If data is not present in a key: value format, then it return\n an empty dictionary.\n\n Attempts processing string by 3 different methods in order:\n 1. as JSON 2. as YAML 3. as custom key=value syntax\n Throws an error if all of these fail in the standard ways.", "id": "f3333:m1"} {"signature": "def process_extra_vars(extra_vars_list, force_json=True):", "body": "extra_vars = {}extra_vars_yaml = \"\"for extra_vars_opt in extra_vars_list:if extra_vars_opt.startswith(\"\"):with open(extra_vars_opt[:], '') as f:extra_vars_opt = f.read()opt_dict = string_to_dict(extra_vars_opt, allow_kv=False)else:opt_dict = string_to_dict(extra_vars_opt, allow_kv=True)if any(line.startswith(\"\") for line in extra_vars_opt.split('')):extra_vars_yaml += extra_vars_opt + \"\"elif extra_vars_opt != \"\":extra_vars_yaml += yaml.dump(opt_dict, default_flow_style=False) + \"\"extra_vars.update(opt_dict)if not force_json:try:try_dict = yaml.load(extra_vars_yaml, Loader=yaml.SafeLoader)assert type(try_dict) is dictdebug.log('', header='', nl=)return extra_vars_yaml.rstrip()except Exception:debug.log('',header='', nl=)if extra_vars == {}:return \"\"return json.dumps(extra_vars, ensure_ascii=False)", "docstring": "Returns a string that is valid JSON or YAML and contains all the\n variables in every extra_vars_opt inside of extra_vars_list.\n\n Args:\n parse_kv (bool): whether to allow key=value syntax.\n force_json (bool): if True, always output json.", "id": "f3333:m2"} {"signature": "def ordered_dump(data, Dumper=yaml.Dumper, **kws):", "body": "class OrderedDumper(Dumper):passdef _dict_representer(dumper, data):return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,data.items())OrderedDumper.add_representer(OrderedDict,_dict_representer)return yaml.dump(data, None, OrderedDumper, **kws)", "docstring": "Expand PyYAML's built-in dumper to support parsing OrderedDict. Return\n a string as parse result of the original data structure, which includes\n OrderedDict.\n\n Args:\n data: the data structure to be dumped(parsed) which is supposed to\n contain OrderedDict.\n Dumper: the yaml serializer to be expanded and used.\n kws: extra key-value arguments to be passed to yaml.dump.", "id": "f3333:m3"} {"signature": "@functools.wraps(click.secho)def secho(message, **kwargs):", "body": "if not settings.color:for key in ('', '', '', ''):kwargs.pop(key, None)return click.secho(message, **kwargs)", "docstring": "A wrapper around click.secho that disables any coloring being used\n if colors have been disabled.", "id": "f3335:m0"} {"signature": "def log(s, header='', file=sys.stderr, nl=, **kwargs):", "body": "if not settings.verbose:returnif header:word_arr = s.split('')multi = []word_arr.insert(, '' % header.upper())i = while i < len(word_arr):to_add = ['']count = while count <= :count += len(word_arr[i]) + if count <= :to_add.append(word_arr[i])i += if i == len(word_arr):breakif len(to_add) == :to_add.append(word_arr[i])i += if i != len(word_arr):count -= len(word_arr[i]) + to_add.append('' * ( - count))multi.append(''.join(to_add))s = ''.join(multi)lines = len(multi)else:lines = if isinstance(nl, int) and nl > lines:s += '' * (nl - lines)return secho(s, file=file, **kwargs)", "docstring": "Log the given output to stderr if and only if we are in\n verbose mode.\n\n If we are not in verbose mode, this is a no-op.", "id": "f3336:m0"} {"signature": "def unified_job_template_options(method):", "body": "jt_dec = click.option('', type=types.Related(''),help='')prj_dec = click.option('', type=types.Related(''),help='')inv_src_dec = click.option('', type=types.Related(''),help='')def ujt_translation(_method):def _ujt_translation(*args, **kwargs):for fd in ['', '', '']:if fd in kwargs and kwargs[fd] is not None:kwargs[''] = kwargs.pop(fd)return _method(*args, **kwargs)return functools.wraps(_method)(_ujt_translation)return ujt_translation(inv_src_dec(prj_dec(jt_dec(method))))", "docstring": "Adds the decorators for all types of unified job templates,\nand if the non-unified type is specified, converts it into the\nunified_job_template kwarg.", "id": "f3338:m0"} {"signature": "def get_resource(name):", "body": "module = importlib.import_module('' % name)return module.Resource()", "docstring": "Return an instance of the requested Resource class.\n\n Since all of the resource classes are named `Resource`, this provides\n a slightly cleaner interface for using these classes via. importing rather\n than through the CLI.", "id": "f3339:m0"} {"signature": "@click.command()@with_global_optionsdef version():", "body": "click.echo('' % __version__)click.echo('' % CUR_API_VERSION)try:r = client.get('')except RequestException as ex:raise exc.TowerCLIError('' %six.text_type(ex))config = r.json()license = config.get('', {}).get('', '')if license == '':server_type = ''else:server_type = ''click.echo('' % (server_type, config['']))click.echo('' % config[''])", "docstring": "Display full version information.", "id": "f3340:m0"} {"signature": "def _echo_setting(key):", "body": "value = getattr(settings, key)secho('' % key, fg='', bold=True, nl=False)secho(six.text_type(value),bold=True,fg='' if isinstance(value, six.text_type) else '',)", "docstring": "Echo a setting to the CLI.", "id": "f3340:m1"} {"signature": "@click.command()@click.argument('', required=False)@click.argument('', required=False)@click.option('', '', is_flag=True,help='''''')@click.option('', type=click.Choice(['', '', '']),default='',help='''''''''')@click.option('', is_flag=True,help='''')def config(key=None, value=None, scope='', global_=False, unset=False):", "body": "if global_:scope = ''warnings.warn('''',DeprecationWarning)if not key:seen = set()parser_desc = {'': '','': '','': '''''','': '''','': '''','': '',}click.echo('')for name, parser in zip(settings._parser_names, settings._parsers):will_echo = Falsefor option in parser.options(''):if option in seen:continuewill_echo = Trueif will_echo:secho('' % parser_desc[name], fg='', bold=True)for option in parser.options(''):if option in seen:continue_echo_setting(option)seen.add(option)if will_echo:click.echo('')returnif not hasattr(settings, key):raise exc.TowerCLIError('' % key)if value and unset:raise exc.UsageError('')if key and not value and not unset:_echo_setting(key)returnfilename = os.path.expanduser('')if scope == '':if not os.path.isdir(''):raise exc.TowerCLIError('''')filename = ''elif scope == '':filename = ''parser = Parser()parser.add_section('')parser.read(filename)if unset:parser.remove_option('', key)else:parser.set('', key, value)with open(filename, '') as config_file:parser.write(config_file)try:os.chmod(filename, stat.S_IRUSR | stat.S_IWUSR)except Exception as e:warnings.warn(''.format(filename, e),UserWarning)click.echo('')", "docstring": "Read or write tower-cli configuration.\n\n `tower config` saves the given setting to the appropriate Tower CLI;\n either the user's ~/.tower_cli.cfg file, or the /etc/tower/tower_cli.cfg\n file if --global is used.\n\n Writing to /etc/tower/tower_cli.cfg is likely to require heightened\n permissions (in other words, sudo).", "id": "f3340:m2"} {"signature": "@click.command()@click.argument('', required=True)@click.option('', required=True, prompt=True, hide_input=True)@click.option('', required=False)@click.option('', required=False)@click.option('', required=False, default='',type=click.Choice(['', '']))@click.option('', '', default=None,help='', is_flag=True,required=False, callback=_apply_runtime_setting, is_eager=True)def login(username, password, scope, client_id, client_secret, verbose):", "body": "if not supports_oauth():raise exc.TowerCLIError('')req = collections.namedtuple('', '')({})if client_id and client_secret:HTTPBasicAuth(client_id, client_secret)(req)req.headers[''] = ''r = client.post('',data={\"\": \"\",\"\": username,\"\": password,\"\": scope},headers=req.headers)elif client_id:req.headers[''] = ''r = client.post('',data={\"\": \"\",\"\": username,\"\": password,\"\": client_id,\"\": scope},headers=req.headers)else:HTTPBasicAuth(username, password)(req)r = client.post(''.format(username),data={\"\": \"\", \"\": None, \"\": scope},headers=req.headers)if r.ok:result = r.json()result.pop('', None)result.pop('', None)if client_id:token = result.pop('', None)else:token = result.pop('', None)if settings.verbose:result[''] = tokensecho(json.dumps(result, indent=), fg='', bold=True)config.main(['', token, ''])", "docstring": "Retrieves and stores an OAuth2 personal auth token.", "id": "f3340:m3"} {"signature": "@click.command()def logout():", "body": "if not supports_oauth():raise exc.TowerCLIError('')config.main(['', '', ''])", "docstring": "Removes an OAuth2 personal auth token from config.", "id": "f3340:m4"} {"signature": "@click.command()@with_global_options@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', is_flag=True)def receive(organization=None, user=None, team=None, credential_type=None, credential=None,notification_template=None, inventory_script=None, inventory=None, project=None, job_template=None,workflow=None, all=None):", "body": "from tower_cli.cli.transfer.receive import Receiverreceiver = Receiver()assets_to_export = {}for asset_type in SEND_ORDER:assets_to_export[asset_type] = locals()[asset_type]receiver.receive(all=all, asset_input=assets_to_export)", "docstring": "Export assets from Tower.\n\n 'tower receive' exports one or more assets from a Tower instance\n\n For all of the possible assets types the TEXT can either be the assets name\n (or username for the case of a user) or the keyword all. Specifying all\n will export all of the assets of that type.", "id": "f3340:m5"} {"signature": "@click.command()@with_global_options@click.argument('', required=False, nargs=-)@click.option('', multiple=True, required=False,help='''''')@click.option('', multiple=True, required=False, help='''''')@click.option('', multiple=False, required=False, default='',type=click.Choice(['', '', '']),help='''''''')@click.option('', is_flag=True,help=\"\")def send(source=None, prevent=None, exclude=None, secret_management='', no_color=False):", "body": "from tower_cli.cli.transfer.send import Sendersender = Sender(no_color)sender.send(source, prevent, exclude, secret_management)", "docstring": "Import assets into Tower.\n\n 'tower send' imports one or more assets into a Tower instance\n\n The import can take either JSON or YAML.\n Data can be sent on stdin (i.e. from tower-cli receive pipe) and/or from files\n or directories passed as parameters.\n\n If a directory is specified only files that end in .json, .yaml or .yml will be\n imported. Other files will be ignored.", "id": "f3340:m6"} {"signature": "@click.command()@with_global_options@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', required=False, multiple=True)@click.option('', is_flag=True)@click.option('', is_flag=True,help=\"\")def empty(organization=None, user=None, team=None, credential_type=None, credential=None, notification_template=None,inventory_script=None, inventory=None, project=None, job_template=None, workflow=None,all=None, no_color=False):", "body": "from tower_cli.cli.transfer.cleaner import Cleanerdestroyer = Cleaner(no_color)assets_to_export = {}for asset_type in SEND_ORDER:assets_to_export[asset_type] = locals()[asset_type]destroyer.go_ham(all=all, asset_input=assets_to_export)", "docstring": "Empties assets from Tower.\n\n 'tower empty' removes all assets from Tower", "id": "f3340:m7"} {"signature": "def list_commands(self, ctx):", "body": "return self.resource.commands", "docstring": "Return a list of all methods decorated with the\n @resources.command decorator.", "id": "f3341:c0:m1"} {"signature": "def _auto_help_text(self, help_text):", "body": "api_doc_delimiter = ''begin_api_doc = help_text.find(api_doc_delimiter)if begin_api_doc >= :end_api_doc = help_text.rfind(api_doc_delimiter) + len(api_doc_delimiter)help_text = help_text[:begin_api_doc] + help_text[end_api_doc:]an_prefix = ('', '', '', '')if not self.resource_name.lower().startswith(an_prefix):help_text = help_text.replace('','' % self.resource_name)if self.resource_name.lower().endswith(''):help_text = help_text.replace('','' % self.resource_name[:-],)help_text = help_text.replace('', self.resource_name)help_text = help_text.replace('', '')help_text = help_text.replace('','')for match in re.findall(r'', help_text):option = '' % match.replace('', '')help_text = help_text.replace('' % match, option)return help_text", "docstring": "Given a method with a docstring, convert the docstring\n to more CLI appropriate wording, and also disambiguate the\n word \"object\" on the base class docstrings.", "id": "f3341:c0:m2"} {"signature": "def _echo_method(self, method):", "body": "@functools.wraps(method)def func(*args, **kwargs):if getattr(method, '', False):debug.log('', header='')result = method(*args, **kwargs)color_info = {}if isinstance(result, dict) and '' in result:if result['']:color_info[''] = ''else:color_info[''] = ''format = getattr(self, '' % (getattr(method, '', None) or settings.format))output = format(result)secho(output, **color_info)return func", "docstring": "Given a method, return a method that runs the internal\n method and echos the result.", "id": "f3341:c0:m3"} {"signature": "def _format_json(self, payload):", "body": "return json.dumps(payload, indent=)", "docstring": "Convert the payload into a JSON string with proper\n indentation and return it.", "id": "f3341:c0:m4"} {"signature": "def _format_yaml(self, payload):", "body": "return parser.ordered_dump(payload, Dumper=yaml.SafeDumper,default_flow_style=False)", "docstring": "Convert the payload into a YAML string with proper\n indentation and return it.", "id": "f3341:c0:m5"} {"signature": "def _format_id(self, payload):", "body": "if '' in payload:return str(payload[''])if '' in payload:return ''.join([six.text_type(item['']) for item in payload['']])raise MultipleRelatedError('')", "docstring": "Echos only the id", "id": "f3341:c0:m6"} {"signature": "def _format_human(self, payload):", "body": "page = Nonetotal_pages = Nonecolumns = [field.name for field in self.resource.fieldsif field.display or settings.description_on andfield.name == '']columns.insert(, '')fields_by_name = {}for field in self.resource.fields:fields_by_name[field.name] = fieldif '' in payload and '' not in payload:return ''.format(six.text_type(payload['']).lower(),)if '' not in payload and '' not in payload:columns = [i for i in payload.keys()]if '' in payload:raw_rows = payload['']if payload.get('', ) > len(payload['']):prev = payload.get('', ) or page = prev + count = payload['']if payload.get('', None):total_pages = math.ceil(count / len(raw_rows))else:total_pages = pageelse:raw_rows = [payload]if not raw_rows:return ''widths = {}for col in columns:widths[col] = max(len(col),*[len(self.get_print_value(i, col)) for i in raw_rows])fd = fields_by_name.get(col, None)if fd is not None and fd.col_width is not None:widths[col] = fd.col_widthdivider_row = ''for col in columns:divider_row += '' * widths[col] + ''divider_row.rstrip()header_row = ''for col in columns:header_row += ('' % widths[col]).format(col) + ''header_row.rstrip()data_rows = []for raw_row in raw_rows:data_row = ''for col in columns:template = six.text_type('') % widths[col]value = self.get_print_value(raw_row, col)if isinstance(raw_row.get(col, ''), (bool, int)):template = template.replace('', '')fd = fields_by_name.get(col, None)if fd is not None and fd.col_width is not None:str_value = template.format(value or '')if len(str_value) > fd.col_width:value = str_value[:fd.col_width]data_row += template.format(value or '') + ''data_rows.append(data_row.rstrip())response = ''.join((divider_row, header_row, divider_row,''.join(data_rows),divider_row,))if page and total_pages != :response += '' % (page, total_pages)if payload.get('', False):response = '' + responsereturn response", "docstring": "Convert the payload into an ASCII table suitable for\n printing on screen and return it.", "id": "f3341:c0:m8"} {"signature": "def get_command(self, ctx, name):", "body": "if not hasattr(self.resource, name):return Nonemethod = getattr(self.resource, name)attrs = getattr(method, '', {})help_text = inspect.getdoc(method)attrs[''] = self._auto_help_text(help_text or '')ignore_defaults = attrs.pop('', False)new_method = self._echo_method(method)click_params = getattr(method, '', [])new_method.__click_params__ = copy(click_params)new_method = with_global_options(new_method)fao = attrs.pop('', True)if fao:for field in reversed(self.resource.fields):if not field.is_option:continueif not isinstance(fao, bool) and field.name not in fao:continueargs = [field.option]if field.key:args.insert(, field.key)short_fields = {'': '','': '','': '','': ''}if field.name in short_fields:args.append(''+short_fields[field.name])option_help = field.helpif isinstance(field.type, StructuredInput):option_help += ''if field.required:option_help = '' + option_helpelif field.read_only:option_help = '' + option_helpoption_help = '' + option_helpclick.option(*args,default=field.default if not ignore_defaults else None,help=option_help,type=field.type,show_default=field.show_default,multiple=field.multiple,is_eager=False)(new_method)cmd = click.command(name=name, cls=ActionSubcommand, **attrs)(new_method)code = six.get_function_code(method)if '' in code.co_varnames:click.argument('', nargs=, required=False, type=str, metavar='')(cmd)return cmd", "docstring": "Retrieve the appropriate method from the Resource,\n decorate it as a click command, and return that method.", "id": "f3341:c0:m9"} {"signature": "def format_command_subsection(self, ctx, formatter, commands, header):", "body": "rows = []for subcommand in commands:cmd = self.get_command(ctx, subcommand)if cmd is None:continuehelp = cmd.short_help or ''rows.append((subcommand, help))if rows:with formatter.section(header):formatter.write_dl(rows)", "docstring": "Writes help text for a sub-section of commands,\n specifically to be reused for resource commands\n and system/configuration commands.", "id": "f3342:c0:m2"} {"signature": "def format_commands(self, ctx, formatter):", "body": "self.format_command_subsection(ctx, formatter, self.list_misc_commands(), '')self.format_command_subsection(ctx, formatter, self.list_resource_commands(), '')", "docstring": "Extra format methods for multi methods that adds all the commands\n after the options.", "id": "f3342:c0:m3"} {"signature": "def list_commands(self, ctx):", "body": "commands = set(self.list_resource_commands())commands.union(set(self.list_misc_commands()))return sorted(commands)", "docstring": "Return a list of commands present in the commands and resources\n folders, but not subcommands.", "id": "f3342:c0:m4"} {"signature": "def list_resource_commands(self):", "body": "resource_path = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,''))answer = set([])for _, name, _ in pkgutil.iter_modules([resource_path]):res = tower_cli.get_resource(name)if not getattr(res, '', False):answer.add(name)return sorted(answer)", "docstring": "Returns a list of multi-commands for each resource type.", "id": "f3342:c0:m5"} {"signature": "def list_misc_commands(self):", "body": "answer = set([])for cmd_name in misc.__all__:answer.add(cmd_name)return sorted(answer)", "docstring": "Returns a list of global commands, realted to CLI\n configuration or system management in general.", "id": "f3342:c0:m6"} {"signature": "def get_command(self, ctx, name):", "body": "if name in misc.__all__:return getattr(misc, name)try:resource = tower_cli.get_resource(name)return ResSubcommand(resource)except ImportError:passsecho('' % name, fg='', bold=True)sys.exit()", "docstring": "Given a command identified by its name, import the appropriate\n module and return the decorated command.\n\n Resources are automatically commands, but if both a resource and\n a command are defined, the command takes precedence.", "id": "f3342:c0:m7"} {"signature": "def convert(self, value, param, ctx):", "body": "if not isinstance(value, str):return valueif isinstance(value, six.binary_type):value = value.decode('')if value.startswith(''):filename = os.path.expanduser(value[:])file_obj = super(Variables, self).convert(filename, param, ctx)if hasattr(file_obj, ''):return file_obj.read()return file_objreturn value", "docstring": "Return file content if file, else, return value as-is", "id": "f3343:c1:m0"} {"signature": "def convert(self, value, param, ctx):", "body": "choice = super(MappedChoice, self).convert(value, param, ctx)ix = self.choices.index(choice)return self.actual_choices[ix]", "docstring": "Match against the appropriate choice value using the superclass\n implementation, and then return the actual choice.", "id": "f3343:c3:m1"} {"signature": "def convert(self, value, param, ctx):", "body": "resource = tower_cli.get_resource(self.resource_name)if value is None:return Noneif isinstance(value, int):return valueif re.match(r'', value):return int(value)if value == '':return valuetry:debug.log('''' % param.name, header='')lookup_data = {resource.identity[-]: value}rel = resource.get(**lookup_data)except exc.MultipleResults:raise exc.MultipleRelatedError(''''''''''.format(self.resource_name,value),)except exc.TowerCLIError as ex:raise exc.RelatedError('' %(self.resource_name, str(ex)))return rel['']", "docstring": "Return the appropriate integer value. If a non-integer is\n provided, attempt a name-based lookup and return the primary key.", "id": "f3343:c4:m1"} {"signature": "def parse_args(self, ctx, args):", "body": "if not args and self.no_args_is_help and not ctx.resilient_parsing:click.echo(ctx.get_help())ctx.exit()return super(ActionSubcommand, self).parse_args(ctx, args)", "docstring": "Parse arguments sent to this command.\n\n The code for this method is taken from MultiCommand:\n https://github.com/mitsuhiko/click/blob/master/click/core.py\n\n It is Copyright (c) 2014 by Armin Ronacher.\n See the license:\n https://github.com/mitsuhiko/click/blob/master/LICENSE", "id": "f3345:c0:m1"} {"signature": "def format_options(self, ctx, formatter):", "body": "field_opts = []global_opts = []local_opts = []other_opts = []for param in self.params:if param.name in SETTINGS_PARMS:opts = global_optselif getattr(param, '', None) and param.help.startswith(''):opts = field_optsparam.help = param.help[len(''):]else:opts = local_optsrv = param.get_help_record(ctx)if rv is None:continueelse:opts.append(rv)if self.add_help_option:help_options = self.get_help_option_names(ctx)if help_options:other_opts.append([join_options(help_options)[], ''])if field_opts:with formatter.section(''):formatter.write_dl(field_opts)if local_opts:with formatter.section(''):formatter.write_dl(local_opts)if global_opts:with formatter.section(''):formatter.write_dl(global_opts)if other_opts:with formatter.section(''):formatter.write_dl(other_opts)", "docstring": "Monkey-patch click's format_options method to support option categorization.", "id": "f3345:c0:m2"} {"signature": "def config_from_environment():", "body": "kwargs = {}for k in CONFIG_OPTIONS:env = '' + k.upper()v = os.getenv(env, None)if v is not None:kwargs[k] = vreturn kwargs", "docstring": "Read tower-cli config values from the environment if present, being\n careful not to override config values that were explicitly passed in.", "id": "f3351:m0"} {"signature": "def with_global_options(method):", "body": "method = click.option('', '',help='''''''',required=False, callback=_apply_runtime_setting,is_eager=True,expose_value=False)(method)method = click.option('', '',help='''''',required=False, callback=_apply_runtime_setting,is_eager=True,expose_value=False)(method)method = click.option('', '',help='''''',required=False, callback=_apply_runtime_setting,is_eager=True,expose_value=False)(method)method = click.option('', '',help='''''''',required=False, callback=_apply_runtime_setting,is_eager=True,expose_value=False)(method)method = click.option('', '',help='''''',type=click.Choice(['', '', '', '']),required=False, callback=_apply_runtime_setting,is_eager=True,expose_value=False)(method)method = click.option('', '',default=None,help='',is_flag=True,required=False, callback=_apply_runtime_setting,is_eager=True,expose_value=False)(method)method = click.option('',default=None,help='',is_flag=True,required=False, callback=_apply_runtime_setting,is_eager=True,expose_value=False)(method)method = click.option('',default=None,help='''',is_flag=True,required=False, callback=_apply_runtime_setting,is_eager=True,expose_value=False)(method)method = click.option('',default=None,help='''',required=False, callback=_apply_runtime_setting,is_eager=True,expose_value=False)(method)method = click.option('',default=None,help='''',is_flag=True,required=False, callback=_apply_runtime_setting,is_eager=True,expose_value=False)(method)method = runtime_context_manager(method)return method", "docstring": "Apply the global options that we desire on every method within\n tower-cli to the given click command.", "id": "f3351:m3"} {"signature": "def pop_option(function, name):", "body": "for option in getattr(function, '', tuple()):if option.name == name:function.__click_params__.remove(option)", "docstring": "Used to remove an option applied by the @click.option decorator.\n\nThis is useful for when you want to subclass a decorated resource command\nand *don't* want all of the options provided by the parent class'\nimplementation.", "id": "f3351:m4"} {"signature": "def _read(self, fp, fpname):", "body": "if os.path.isfile(fpname):file_permission = os.stat(fpname)if fpname != os.path.join(tower_dir, '') and ((file_permission.st_mode & stat.S_IRGRP) or(file_permission.st_mode & stat.S_IROTH)):warnings.warn(''.format(fpname), RuntimeWarning)try:return configparser.ConfigParser._read(self, fp, fpname)except configparser.MissingSectionHeaderError:fp.seek()string = '' % fp.read()flo = StringIO(string) return configparser.ConfigParser._read(self, flo, fpname)", "docstring": "Read the configuration from the given file.\n\n If the file lacks any section header, add a [general] section\n header that encompasses the whole thing.", "id": "f3351:c0:m0"} {"signature": "def __init__(self):", "body": "self._cache = {}defaults = {}for key in CONFIG_OPTIONS:defaults[key] = ''defaults.update({'': '','': '','': '','': '','': '','': '','': '',})self._defaults = self._new_parser(defaults=defaults)self._environment = self._new_parser(defaults=config_from_environment())self._global = self._new_parser()if os.path.isdir(tower_dir):try:os.listdir(tower_dir)except OSError:warnings.warn('''''',RuntimeWarning)self._global.read(os.path.join(tower_dir, ''))self._user = self._new_parser()user_filename = os.path.join(user_dir, CONFIG_FILENAME)self._user.read(user_filename)self._local = self._new_parser()local_dir = os.getcwd()local_dirs = [local_dir] if local_dir not in (user_dir, tower_dir) else []while os.path.split(local_dir)[]:local_dir, _ = os.path.split(local_dir)if local_dir not in (user_dir, tower_dir):local_dirs = [local_dir] + local_dirsfor local_dir in local_dirs:local_filename = os.path.join(local_dir, CONFIG_FILENAME)self._local.read(local_filename)self._runtime = self._new_parser()", "docstring": "Create the settings object, and read from appropriate files as\n well as from `sys.argv`.", "id": "f3351:c1:m1"} {"signature": "def __getattr__(self, key):", "body": "if key in self._cache:return self._cache[key]for parser in self._parsers:try:value = parser.get('', key)except configparser.NoOptionError:continuetry:if CONFIG_PARAM_TYPE[key] == click.STRING or CONFIG_PARAM_TYPE[key] == click.Choice:value = parser.get('', key)elif CONFIG_PARAM_TYPE[key] == click.BOOL:value = parser.getboolean('', key)elif CONFIG_PARAM_TYPE[key] == click.FLOAT:value = parser.getfloat('', key)elif CONFIG_PARAM_TYPE[key] == click.INT:value = parser.getint('', key)except ValueError:click.secho('' % key)self._cache[key] = valuereturn self._cache[key]raise AttributeError('' % key.lower())", "docstring": "Return the approprate value, intelligently type-casted in the\n case of numbers or booleans.", "id": "f3351:c1:m2"} {"signature": "@propertydef _parsers(self):", "body": "return tuple([getattr(self, '' % i) for i in self._parser_names])", "docstring": "Return a tuple of all parsers, in order.\n\n This is referenced at runtime, to avoid gleefully ignoring the\n `runtime_values` context manager.", "id": "f3351:c1:m3"} {"signature": "def set_or_reset_runtime_param(self, key, value):", "body": "if self._runtime.has_option('', key):self._runtime = self._new_parser()if value is None:returnsettings._runtime.set('', key.replace('', ''),six.text_type(value))", "docstring": "Maintains the context of the runtime settings for invoking\n a command.\n\n This should be called by a click.option callback, and only\n called once for each setting for each command invocation.\n\n If the setting exists, it follows that the runtime settings are\n stale, so the entire runtime settings are reset.", "id": "f3351:c1:m4"} {"signature": "@contextlib.contextmanagerdef runtime_values(self, **kwargs):", "body": "for k, v in copy.copy(kwargs).items():if v is None:kwargs.pop(k)continueself._cache.pop(k, None)kwargs[k] = six.text_type(v)old_runtime_parser = self._runtimetry:self._runtime = Parser(defaults=kwargs)self._runtime.add_section('')yield selffinally:self._runtime = old_runtime_parserfor key in kwargs:self._cache.pop(k, None)", "docstring": "=====API DOCS=====\nContext manager that temporarily override runtime level configurations.\n\n:param kwargs: Keyword arguments specifying runtime configuration settings.\n:type kwargs: arbitrary keyword arguments\n:returns: N/A\n\n:Example:\n\n>>> import tower_cli\n>>> from tower_cli.conf import settings\n>>> with settings.runtime_values(username='user', password='pass'):\n>>> print(tower_cli.get_resource('credential').list())\n\n=====API DOCS=====", "id": "f3351:c1:m5"} {"signature": "def get_prefix(self, include_version=True):", "body": "host = settings.hostif '' not in host:host = '' % host.strip('')elif host.startswith('') and settings.verify_ssl:raise exc.TowerCLIError('''')url_pieces = urlparse(host)if url_pieces[] not in ['', '']:raise exc.ConnectionError(''.format(url_pieces[]))prefix = urljoin(host, '')if include_version:prefix = urljoin(prefix, \"\".format(CUR_API_VERSION))return prefix", "docstring": "Return the appropriate URL prefix to prepend to requests,\n based on the host provided in settings.", "id": "f3352:c1:m2"} {"signature": "@functools.wraps(Session.request)def request(self, method, url, *args, **kwargs):", "body": "import reurl = re.sub(\"\", \"\", url)use_version = not url.startswith('')url = '' % (self.get_prefix(use_version), url.lstrip(''))kwargs.setdefault('',BasicTowerAuth(settings.username,settings.password,self))headers = kwargs.get('', {})if method.upper() in ('', '', ''):headers.setdefault('', '')kwargs[''] = headersdebug.log('' % (method, url), fg='', bold=True)if method in ('', '', ''):debug.log('' % kwargs.get('', {}),fg='', bold=True)if method == '' or kwargs.get('', None):debug.log('' % kwargs.get('', {}),fg='', bold=True)debug.log('')if headers.get('', '') == '':kwargs[''] = json.dumps(kwargs.get('', {}))r = self._make_request(method, url, args, kwargs)if r.status_code >= :raise exc.ServerError('''')if r.status_code == :raise exc.AuthError('')if r.status_code == :raise exc.Forbidden(\"\")if r.status_code == :raise exc.NotFound('')if r.status_code == :raise exc.MethodNotAllowed(\"\"\"\" % (method, url),)if r.status_code >= :raise exc.BadRequest('''' %(method, url, kwargs.get('', None),kwargs.get('', None), r.content.decode('')))r.__class__ = APIResponsereturn r", "docstring": "Make a request to the Ansible Tower API, and return the\n response.", "id": "f3352:c1:m3"} {"signature": "@resources.command@click.option('', help='')def create(self, fail_on_found=False, force_on_exists=False, **kwargs):", "body": "if kwargs.get('', None):parent_data = self.set_child_endpoint(parent=kwargs[''], inventory=kwargs.get('', None))kwargs[''] = parent_data['']elif '' not in kwargs:raise exc.UsageError('')return super(Resource, self).create(fail_on_found=fail_on_found, force_on_exists=force_on_exists, **kwargs)", "docstring": "Create a group.\n\n =====API DOCS=====\n Create a group.\n\n :param parent: Primary key or name of the group which will be the parent of created group.\n :type parent: str\n :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n already exists.\n :type fail_on_found: bool\n :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n be updated to the provided values.; If unset, a match causes the request to be\n a no-op.\n :type force_on_exists: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the\n resource object.\n :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is created successfully; \"id\", an integer which\n is the primary key of the created object.\n :rtype: dict\n :raises tower_cli.exceptions.UsageError: When inventory is not provided in ``**kwargs`` and ``parent``\n is not provided.\n\n =====API DOCS=====", "id": "f3355:c0:m2"} {"signature": "@resources.command(ignore_defaults=True, no_args_is_help=False)@click.option('', is_flag=True, default=False,help='')@click.option('', help='')def list(self, root=False, **kwargs):", "body": "if kwargs.get('', None):self.set_child_endpoint(parent=kwargs[''], inventory=kwargs.get('', None))kwargs.pop('')if root and not kwargs.get('', None):raise exc.UsageError('')if root:inventory_id = kwargs['']r = client.get('' % inventory_id)return r.json()return super(Resource, self).list(**kwargs)", "docstring": "Return a list of groups.\n\n =====API DOCS=====\n Retrieve a list of groups.\n\n :param root: Flag that if set, only root groups of a specific inventory will be listed.\n :type root: bool\n :param parent: Primary key or name of the group whose child groups will be listed.\n :type parent: str\n :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n :type all_pages: bool\n :param page: The page to show. Ignored if all_pages is set.\n :type page: int\n :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n :type query: list\n :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n :returns: A JSON object containing details of all resource objects returned by Tower backend.\n :rtype: dict\n :raises tower_cli.exceptions.UsageError: When ``root`` flag is on and ``inventory`` is not present in\n ``**kwargs``.\n\n =====API DOCS=====", "id": "f3355:c0:m3"} {"signature": "@resources.command(use_fields_as_options=False)@click.option('', help='')@click.option('', help='')@click.option('', type=types.Related(''))def associate(self, group, parent, **kwargs):", "body": "parent_id = self.lookup_with_inventory(parent, kwargs.get('', None))['']group_id = self.lookup_with_inventory(group, kwargs.get('', None))['']return self._assoc('', parent_id, group_id)", "docstring": "Associate this group with the specified group.\n\n =====API DOCS=====\n Associate this group with the specified group.\n\n :param group: Primary key or name of the child group to associate.\n :type group: str\n :param parent: Primary key or name of the parent group to associate to.\n :type parent: str\n :param inventory: Primary key or name of the inventory the association should happen in.\n :type inventory: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3355:c0:m4"} {"signature": "@resources.command(use_fields_as_options=False)@click.option('', help='')@click.option('', help='')@click.option('', type=types.Related(''))def disassociate(self, group, parent, **kwargs):", "body": "parent_id = self.lookup_with_inventory(parent, kwargs.get('', None))['']group_id = self.lookup_with_inventory(group, kwargs.get('', None))['']return self._disassoc('', parent_id, group_id)", "docstring": "Disassociate this group from the specified group.\n\n =====API DOCS=====\n Disassociate this group with the specified group.\n\n :param group: Primary key or name of the child group to disassociate.\n :type group: str\n :param parent: Primary key or name of the parent group to disassociate from.\n :type parent: str\n :param inventory: Primary key or name of the inventory the disassociation should happen in.\n :type inventory: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3355:c0:m5"} {"signature": "def __getattribute__(self, name):", "body": "if name in ['', '', '']:raise AttributeErrorelse:return object.__getattribute__(self, name)", "docstring": "Disable inherited methods that cannot be applied to this\n particular resource.", "id": "f3356:c0:m0"} {"signature": "@staticmethoddef obj_res(data, fail_on=['', '', '']):", "body": "errors = []if not data.get('', None) and '' in fail_on:errors += ['']obj = Noneobj_type = Nonefor fd in ACTOR_FIELDS:if data.get(fd, False):if not obj:obj = data[fd]obj_type = fdelse:errors += ['''']breakif not obj and '' in fail_on:errors += ['''']res = Noneres_type = Nonefor fd in RESOURCE_FIELDS:if data.get(fd, False):if not res:res = data[fd]res_type = fdif res_type == '':res_type = ''else:errors += ['''']breakif not res and '' in fail_on:errors += ['''']if errors:raise exc.UsageError(\"\".join(errors))return obj, obj_type, res, res_type", "docstring": "Given some CLI input data,\nReturns the following and their types:\nobj - the role grantee\nres - the resource that the role applies to", "id": "f3356:c0:m1"} {"signature": "@classmethoddef data_endpoint(cls, in_data, ignore=[]):", "body": "obj, obj_type, res, res_type = cls.obj_res(in_data, fail_on=[])data = {}if '' in ignore:obj = Noneif '' in ignore:res = Noneif obj and obj_type == '':data[''] = objif obj and obj_type == '':endpoint = '' % (grammar.pluralize(obj_type), obj)if res is not None:data[''] = reselif res:endpoint = '' % (grammar.pluralize(res_type), res)else:endpoint = ''if in_data.get('', False):data[''] = '' % in_data[''].lower()for key, value in in_data.items():if key not in RESOURCE_FIELDS and key not in ['', '', '']:data[key] = valuereturn data, endpoint", "docstring": "Converts a set of CLI input arguments, `in_data`, into\nrequest data and an endpoint that can be used to look\nup a role or list of roles.\n\nAlso changes the format of `type` in data to what the server\nexpects for the role model, as it exists in the database.", "id": "f3356:c0:m2"} {"signature": "@staticmethoddef populate_resource_columns(item_dict):", "body": "item_dict[''] = item_dict['']if len(item_dict['']) == :item_dict[''] = Noneitem_dict[''] = Noneelse:sf = item_dict['']item_dict[''] = sf.get('', '')item_dict[''] = sf.get('', '')", "docstring": "Operates on item_dict\n\n Promotes the resource_name and resource_type fields to the\n top-level of the serialization so they can be printed as columns.\n Also makes a copies name field to type, which is a default column.", "id": "f3356:c0:m3"} {"signature": "def set_display_columns(self, set_true=[], set_false=[]):", "body": "for i in range(len(self.fields)):if self.fields[i].name in set_true:self.fields[i].display = Trueelif self.fields[i].name in set_false:self.fields[i].display = False", "docstring": "Add or remove columns from the output.", "id": "f3356:c0:m4"} {"signature": "def configure_display(self, data, kwargs=None, write=False):", "body": "if settings.format != '':return if write:obj, obj_type, res, res_type = self.obj_res(kwargs)data[''] = kwargs['']data[obj_type] = objdata[res_type] = resself.set_display_columns(set_false=['' if obj_type == '' else ''],set_true=['' if res_type == '' else res_type])else:self.set_display_columns(set_false=['', ''],set_true=['', ''])if '' in data:for i in range(len(data[''])):self.populate_resource_columns(data[''][i])else:self.populate_resource_columns(data)", "docstring": "Populates columns and sets display attribute as needed.\n Operates on data.", "id": "f3356:c0:m5"} {"signature": "def role_write(self, fail_on_found=False, disassociate=False, **kwargs):", "body": "data, self.endpoint = self.data_endpoint(kwargs, ignore=[''])debug.log('', header='')response = self.read(pk=None, fail_on_no_results=True,fail_on_multiple_results=True, **data)role_data = response[''][]role_id = role_data['']self.configure_display(role_data, kwargs, write=True)obj, obj_type, res, res_type = self.obj_res(kwargs)debug.log('' % obj_type,header='')data, self.endpoint = self.data_endpoint(kwargs)data[''] = res_type.replace('', '')response = self.read(pk=None, fail_on_no_results=False,fail_on_multiple_results=False, **data)msg = ''if response[''] > and not disassociate:msg = '' % obj_typeelif response[''] == and disassociate:msg = '' % obj_typeif msg:role_data[''] = Falseif fail_on_found:raise exc.NotFound(msg)else:debug.log(msg, header='')return role_datadebug.log('' % ('' if disassociate else '', obj_type), header='')post_data = {'': role_id}if disassociate:post_data[''] = Trueclient.post('' % (grammar.pluralize(obj_type), obj),data=post_data)role_data[''] = Truereturn role_data", "docstring": "Re-implementation of the parent `write` method specific to roles.\n Adds a grantee (user or team) to the resource's role.", "id": "f3356:c0:m6"} {"signature": "@resources.command(use_fields_as_options=ACTOR_FIELDS+RESOURCE_FIELDS+[''])def list(self, **kwargs):", "body": "data, self.endpoint = self.data_endpoint(kwargs)r = super(Resource, self).list(**data)self.configure_display(r)return r", "docstring": "Return a list of roles.\n\n =====API DOCS=====\n Retrieve a list of objects.\n\n :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n :type all_pages: bool\n :param page: The page to show. Ignored if all_pages is set.\n :type page: int\n :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n :type query: list\n :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n :returns: A JSON object containing details of all resource objects returned by Tower backend.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3356:c0:m7"} {"signature": "@resources.command(use_fields_as_options=ACTOR_FIELDS+RESOURCE_FIELDS+[''])def get(self, pk=None, **kwargs):", "body": "if kwargs.pop('', True):debug.log('', header='')data, self.endpoint = self.data_endpoint(kwargs)response = self.read(pk=pk, fail_on_no_results=True,fail_on_multiple_results=True, **data)item_dict = response[''][]self.configure_display(item_dict)return item_dict", "docstring": "Get information about a role.\n\n =====API DOCS=====\n Retrieve one and exactly one object.\n\n :param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object\n if ``pk`` is provided (not ``None``).\n :type pk: int\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.\n :returns: loaded JSON of the retrieved resource object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3356:c0:m8"} {"signature": "@resources.command(use_fields_as_options=ACTOR_FIELDS+RESOURCE_FIELDS+[''])@click.option('', default=False,show_default=True, type=bool, is_flag=True,help='''')def grant(self, fail_on_found=False, **kwargs):", "body": "return self.role_write(fail_on_found=fail_on_found, **kwargs)", "docstring": "Add a user or a team to a role. Required information:\n 1) Type of the role\n 2) Resource of the role, inventory, credential, or any other\n 3) A user or a team to add to the role\n\n =====API DOCS=====\n Add a user or a team to a role. Required information:\n * Type of the role.\n * Resource of the role, inventory, credential, or any other.\n * A user or a team to add to the role.\n\n :param fail_on_found: Flag that if set, the operation fails if a user/team already has the role.\n :type fail_on_found: bool\n :param `**kwargs`: The user to be associated and the role to associate.\n :returns: parsed JSON of role grant.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3356:c0:m9"} {"signature": "@resources.command(use_fields_as_options=ACTOR_FIELDS+RESOURCE_FIELDS+[''])@click.option('', default=False,show_default=True, type=bool, is_flag=True,help='''')def revoke(self, fail_on_found=False, **kwargs):", "body": "return self.role_write(fail_on_found=fail_on_found,disassociate=True, **kwargs)", "docstring": "Remove a user or a team from a role. Required information:\n 1) Type of the role\n 2) Resource of the role, inventory, credential, or any other\n 3) A user or a team to add to the role\n\n =====API DOCS=====\n Remove a user or a team from a role. Required information:\n * Type of the role.\n * Resource of the role, inventory, credential, or any other.\n * A user or a team to add to the role.\n\n :param fail_on_found: Flag that if set, the operation fails if a user/team dose not have the role.\n :type fail_on_found: bool\n :param `**kwargs`: The user to be disassociated and the role to disassociate.\n :returns: parsed JSON of role revoke.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3356:c0:m10"} {"signature": "def _separate(self, kwargs):", "body": "self._pop_none(kwargs)result = {}for field in Resource.config_fields:if field in kwargs:result[field] = kwargs.pop(field)if field in Resource.json_fields:if not isinstance(result[field], six.string_types):continuetry:data = json.loads(result[field])result[field] = dataexcept ValueError:raise exc.TowerCLIError('''')return result", "docstring": "Remove None-valued and configuration-related keyworded arguments", "id": "f3357:c0:m0"} {"signature": "def _configuration(self, kwargs, config_item):", "body": "if '' not in config_item:if '' not in kwargs:returnnc = kwargs[''] = {}for field in Resource.configuration[kwargs['']]:if field not in config_item:raise exc.TowerCLIError('''' % field)else:nc[field] = config_item[field]else:kwargs[''] =config_item['']", "docstring": "Combine configuration-related keyworded arguments into\n notification_configuration.", "id": "f3357:c0:m1"} {"signature": "@resources.command@click.option('', type=types.Related(''),required=False, help='')@click.option('', type=click.Choice(['', '']),required=False, help='''')def create(self, fail_on_found=False, force_on_exists=False, **kwargs):", "body": "config_item = self._separate(kwargs)jt_id = kwargs.pop('', None)status = kwargs.pop('', '')old_endpoint = self.endpointif jt_id is not None:jt = get_resource('')jt.get(pk=jt_id)try:nt_id = self.get(**copy.deepcopy(kwargs))['']except exc.NotFound:passelse:if fail_on_found:raise exc.TowerCLIError('''''''''')else:debug.log('''',header='')return jt.associate_notification_template(jt_id, nt_id, status=status)self.endpoint = '' %(jt_id, status)self._configuration(kwargs, config_item)result = super(Resource, self).create(**kwargs)self.endpoint = old_endpointreturn result", "docstring": "Create a notification template.\n\n All required configuration-related fields (required according to\n notification_type) must be provided.\n\n There are two types of notification template creation: isolatedly\n creating a new notification template and creating a new notification\n template under a job template. Here the two types are discriminated by\n whether to provide --job-template option. --status option controls\n more specific, job-run-status-related association.\n\n Fields in the resource's `identity` tuple are used for a lookup;\n if a match is found, then no-op (unless `force_on_exists` is set) but\n do not fail (unless `fail_on_found` is set).\n\n =====API DOCS=====\n Create an object.\n\n :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n already exists.\n :type fail_on_found: bool\n :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n be updated to the provided values.; If unset, a match causes the request to be\n a no-op.\n :type force_on_exists: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the\n resource object.\n :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is created successfully; \"id\", an integer which\n is the primary key of the created object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3357:c0:m2"} {"signature": "@resources.commanddef modify(self, pk=None, create_on_missing=False, **kwargs):", "body": "if pk is None and create_on_missing:try:self.get(**copy.deepcopy(kwargs))except exc.NotFound:return self.create(**kwargs)config_item = self._separate(kwargs)notification_type = kwargs.pop('', None)debug.log('''', header='')part_result = super(Resource, self).modify(pk=pk, create_on_missing=create_on_missing, **kwargs)if notification_type is None ornotification_type == part_result['']:for item in part_result['']:if item not in config_item or not config_item[item]:to_add = part_result[''][item]if not (to_add == '' anditem in Resource.encrypted_fields):config_item[item] = to_addif notification_type is None:kwargs[''] = part_result['']else:kwargs[''] = notification_typeself._configuration(kwargs, config_item)debug.log('',header='')result = super(Resource, self).modify(pk=pk, create_on_missing=create_on_missing, **kwargs)if '' in result and '' in part_result:result[''] = result[''] or part_result['']return result", "docstring": "Modify an existing notification template.\n\n Not all required configuration-related fields (required according to\n notification_type) should be provided.\n\n Fields in the resource's `identity` tuple can be used in lieu of a\n primary key for a lookup; in such a case, only other fields are\n written.\n\n To modify unique fields, you must use the primary key for the lookup.\n\n =====API DOCS=====\n Modify an already existing object.\n\n :param pk: Primary key of the resource to be modified.\n :type pk: int\n :param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects\n matching the appropriate unique criteria is not found.\n :type create_on_missing: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to modify the\n resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are\n also in resource's identity will be used to lookup existing reosource.\n :returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is successfully updated; \"id\", an integer which\n is the primary key of the updated object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3357:c0:m3"} {"signature": "@resources.commanddef delete(self, pk=None, fail_on_missing=False, **kwargs):", "body": "self._separate(kwargs)return super(Resource, self).delete(pk=pk, fail_on_missing=fail_on_missing, **kwargs)", "docstring": "Remove the given notification template.\n\n Note here configuration-related fields like\n 'notification_configuration' and 'channels' will not be\n used even provided.\n\n If `fail_on_missing` is True, then the object's not being found is\n considered a failure; otherwise, a success with no change is reported.\n\n =====API DOCS=====\n Remove the given object.\n\n :param pk: Primary key of the resource to be deleted.\n :type pk: int\n :param fail_on_missing: Flag that if set, the object's not being found is considered a failure; otherwise,\n a success with no change is reported.\n :type fail_on_missing: bool\n :param `**kwargs`: Keyword arguments used to look up resource object to delete if ``pk`` is not provided.\n :returns: dictionary of only one field \"changed\", which is a flag indicating whether the specified resource\n is successfully deleted.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3357:c0:m4"} {"signature": "@resources.commanddef list(self, all_pages=False, **kwargs):", "body": "self._separate(kwargs)return super(Resource, self).list(all_pages=all_pages, **kwargs)", "docstring": "Return a list of notification templates.\n\n Note here configuration-related fields like\n 'notification_configuration' and 'channels' will not be\n used even provided.\n\n If one or more filters are provided through keyword arguments,\n filter the results accordingly.\n\n If no filters are provided, return all results.\n\n =====API DOCS=====\n Retrieve a list of objects.\n\n :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n :type all_pages: bool\n :param page: The page to show. Ignored if all_pages is set.\n :type page: int\n :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n :type query: list\n :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n :returns: A JSON object containing details of all resource objects returned by Tower backend.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3357:c0:m5"} {"signature": "@resources.commanddef get(self, pk=None, **kwargs):", "body": "self._separate(kwargs)return super(Resource, self).get(pk=pk, **kwargs)", "docstring": "Return one and exactly one notification template.\n\n Note here configuration-related fields like\n 'notification_configuration' and 'channels' will not be\n used even provided.\n\n Lookups may be through a primary key, specified as a positional\n argument, and/or through filters specified through keyword arguments.\n\n If the number of results does not equal one, raise an exception.\n\n =====API DOCS=====\n Retrieve one and exactly one object.\n\n :param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object\n if ``pk`` is provided (not ``None``).\n :type pk: int\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.\n :returns: loaded JSON of the retrieved resource object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3357:c0:m6"} {"signature": "@click.argument('', type=types.Related(''))@click.option('', is_flag=True, default=False,help='''')@click.option('', is_flag=True, default=False,help='')@click.option('', required=False, type=int,help='''''')@resources.command(use_fields_as_options=False, no_args_is_help=True)def update(self, inventory_source, monitor=False, wait=False,timeout=None, **kwargs):", "body": "debug.log('', header='')r = client.get('' % (self.endpoint, inventory_source))if not r.json()['']:raise exc.BadRequest('')debug.log('', header='')r = client.post('' % (self.endpoint, inventory_source), data={})inventory_update_id = r.json()['']if monitor or wait:if monitor:result = self.monitor(inventory_update_id, parent_pk=inventory_source, timeout=timeout)elif wait:result = self.wait(inventory_update_id, parent_pk=inventory_source, timeout=timeout)inventory = client.get('' % result['']).json()['']result[''] = int(inventory)return resultreturn {'': inventory_update_id,'': ''}", "docstring": "Update the given inventory source.\n\n =====API DOCS=====\n Update the given inventory source.\n\n :param inventory_source: Primary key or name of the inventory source to be updated.\n :type inventory_source: str\n :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched inventory update\n rather than exiting with a success.\n :type monitor: bool\n :param wait: Flag that if set, monitor the status of the inventory update, but do not print while it is\n in progress.\n :type wait: bool\n :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n of seconds.\n :type timeout: int\n :param `**kwargs`: Fields used to override underlyingl inventory source fields when creating and launching\n an inventory update.\n :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait``\n call if ``wait`` flag is on; dictionary of \"status\" if none of the two flags are on.\n :rtype: dict\n :raises tower_cli.exceptions.BadRequest: When the inventory source cannot be updated.\n\n =====API DOCS=====", "id": "f3358:c0:m0"} {"signature": "@resources.command@click.option('', is_flag=True, default=False,help='')def status(self, pk, detail=False, **kwargs):", "body": "job = self.last_job_data(pk, **kwargs)if detail:return jobreturn {'': job[''],'': job[''],'': job[''],}", "docstring": "Print the status of the most recent sync.\n\n =====API DOCS=====\n Retrieve the current inventory update status.\n\n :param pk: Primary key of the resource to retrieve status from.\n :type pk: int\n :param detail: Flag that if set, return the full JSON of the job resource rather than a status summary.\n :type detail: bool\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve status from if ``pk``\n is not provided.\n :returns: full loaded JSON of the specified unified job if ``detail`` flag is on; trimed JSON containing\n only \"elapsed\", \"failed\" and \"status\" fields of the unified job if ``detail`` flag is off.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3358:c0:m1"} {"signature": "def __getattribute__(self, name):", "body": "if name in ['']:raise AttributeErrorelse:return object.__getattribute__(self, name)", "docstring": "Disable inherited methods that cannot be applied to this particular resource.", "id": "f3359:c0:m0"} {"signature": "@resources.command@click.option('', type=types.Related(''),required=False, help='')def create(self, fail_on_found=False, force_on_exists=False, **kwargs):", "body": "jt_id = kwargs.pop('', None)old_endpoint = self.endpointif jt_id is not None:jt = get_resource('')jt.get(pk=jt_id)try:label_id = self.get(name=kwargs.get('', None), organization=kwargs.get('', None))['']except exc.NotFound:passelse:if fail_on_found:raise exc.TowerCLIError('''')else:debug.log('', header='')return jt.associate_label(job_template=jt_id, label=label_id)self.endpoint = '' % jt_idresult = super(Resource, self).create(fail_on_found=fail_on_found, force_on_exists=force_on_exists, **kwargs)self.endpoint = old_endpointreturn result", "docstring": "Create a new label.\n\n There are two types of label creation: isolatedly creating a new label and creating a new label under\n a job template. Here the two types are discriminated by whether to provide --job-template option.\n\n Fields in the resource's `identity` tuple are used for a lookup; if a match is found, then no-op (unless\n `force_on_exists` is set) but do not fail (unless `fail_on_found` is set).\n\n =====API DOCS=====\n Create a label.\n\n :param job_template: Primary key or name of the job template for the created label to associate to.\n :type job_template: str\n :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n already exists.\n :type fail_on_found: bool\n :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n be updated to the provided values.; If unset, a match causes the request to be\n a no-op.\n :type force_on_exists: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the\n resource object.\n :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is created successfully; \"id\", an integer which\n is the primary key of the created object.\n :rtype: dict\n :raises tower_cli.exceptions.TowerCLIError: When the label already exists and ``fail_on_found`` flag is on.\n\n =====API DOCS=====", "id": "f3359:c0:m1"} {"signature": "def _compare_node_lists(old, new):", "body": "to_expand = []to_delete = []to_recurse = []old_records = {}new_records = {}for tree_node in old:old_records.setdefault(tree_node.unified_job_template, [])old_records[tree_node.unified_job_template].append(tree_node)for tree_node in new:new_records.setdefault(tree_node.unified_job_template, [])new_records[tree_node.unified_job_template].append(tree_node)for ujt_id in old_records:if ujt_id not in new_records:to_delete.extend(old_records[ujt_id])continueold_list = old_records[ujt_id]new_list = new_records.pop(ujt_id)if len(old_list) == and len(new_list) == :to_recurse.append((old_list[], new_list[]))else:to_delete.extend(old_list)to_expand.extend(new_list)for nodes in new_records.values():to_expand.extend(nodes)return to_expand, to_delete, to_recurse", "docstring": "Investigate two lists of workflow TreeNodes and categorize them.\n\nThere will be three types of nodes after categorization:\n 1. Nodes that only exists in the new list. These nodes will later be\n created recursively.\n 2. Nodes that only exists in the old list. These nodes will later be\n deleted recursively.\n 3. Node pairs that makes an exact match. These nodes will be further\n investigated.\n\nCorresponding nodes of old and new lists will be distinguished by their\nunified_job_template value. A special case is that both the old and the new\nlists contain one type of node, say A, and at least one of them contains\nduplicates. In this case all A nodes in the old list will be categorized as\nto-be-deleted and all A nodes in the new list will be categorized as\nto-be-created.", "id": "f3361:m0"} {"signature": "@staticmethoddef _workflow_node_structure(node_results):", "body": "node_list_pos = {}for i, node_result in enumerate(node_results):for rel in ['', '', '']:node_result[''.format(rel)] = []node_list_pos[node_result['']] = ifor node_result in node_results:for rel in ['', '', '']:for sub_node_id in node_result[''.format(rel)]:j = node_list_pos[sub_node_id]node_results[j][''.format(rel)].append(node_result[''])root_nodes = []for node_result in node_results:is_root = Truefor rel in ['', '', '']:if node_result[''.format(rel)] != []:is_root = Falsebreakif is_root:root_nodes.append(node_result[''])def branch_schema(node_id):i = node_list_pos[node_id]node_dict = node_results[i]ret_dict = {\"\": node_id}for fd in NODE_STANDARD_FIELDS:val = node_dict.get(fd, None)if val is not None:if fd == '':job_type = node_dict['']['']['']ujt_key = JOB_TYPES[job_type]ret_dict[ujt_key] = valelse:ret_dict[fd] = valfor rel in ['', '', '']:sub_node_id_list = node_dict[''.format(rel)]if len(sub_node_id_list) == :continuerelationship_name = ''.format(rel)ret_dict[relationship_name] = []for sub_node_id in sub_node_id_list:ret_dict[relationship_name].append(branch_schema(sub_node_id))return ret_dictschema_dict = []for root_node_id in root_nodes:schema_dict.append(branch_schema(root_node_id))return schema_dict", "docstring": "Takes the list results from the API in `node_results` and\ntranslates this data into a dictionary organized in a\nhuman-readable heirarchial structure", "id": "f3361:c1:m0"} {"signature": "def _get_schema(self, wfjt_id):", "body": "node_res = get_resource('')node_results = node_res.list(workflow_job_template=wfjt_id,all_pages=True)['']return self._workflow_node_structure(node_results)", "docstring": "Returns a dictionary that represents the node network of the\nworkflow job template", "id": "f3361:c1:m1"} {"signature": "@resources.command(use_fields_as_options=False)@click.argument('', type=types.Related(''))@click.argument('', type=types.Variables(), required=False)def schema(self, wfjt, node_network=None):", "body": "existing_network = self._get_schema(wfjt)if not isinstance(existing_network, list):existing_network = []if node_network is None:if settings.format == '':settings.format = ''return existing_networkif hasattr(node_network, ''):node_network = node_network.read()node_network = string_to_dict(node_network, allow_kv=False, require_dict=False)if not isinstance(node_network, list):node_network = []_update_workflow([TreeNode(x, wfjt, include_id=True) for x in existing_network],[TreeNode(x, wfjt) for x in node_network])if settings.format == '':settings.format = ''return self._get_schema(wfjt)", "docstring": "Convert YAML/JSON content into workflow node objects if\nnode_network param is given.\nIf not, print a YAML representation of the node network.\n\n=====API DOCS=====\nConvert YAML/JSON content into workflow node objects if ``node_network`` param is given. If not,\nprint a YAML representation of the node network.\n\n:param wfjt: Primary key or name of the workflow job template to run schema against.\n:type wfjt: str\n:param node_network: JSON- or YAML-formatted string representing the topology of the workflow job\n template be updated to.\n:type node_network: str\n:returns: The latest topology (possibly after modification) of the workflow job template.\n:rtype: dict\n\n=====API DOCS=====", "id": "f3361:c1:m2"} {"signature": "@resources.command(use_fields_as_options=False)@click.option('', type=types.Related(''))@click.option('',type=types.Related(''))@click.option('', type=click.Choice(['', '', '']),required=False, default='', help='''')def associate_notification_template(self, workflow,notification_template, status):", "body": "return self._assoc('' % status,workflow, notification_template)", "docstring": "Associate a notification template from this workflow.\n\n =====API DOCS=====\n Associate a notification template from this workflow job template.\n\n :param workflow: The workflow job template to associate to.\n :type workflow: str\n :param notification_template: The notification template to be associated.\n :type notification_template: str\n :param status: type of notification this notification template should be associated to.\n :type status: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3361:c1:m3"} {"signature": "@resources.command(use_fields_as_options=False)@click.option('', type=types.Related(''))@click.option('',type=types.Related(''))@click.option('', type=click.Choice(['', '', '']),required=False, default='', help='''')def disassociate_notification_template(self, workflow,notification_template, status):", "body": "return self._disassoc('' % status,workflow, notification_template)", "docstring": "Disassociate a notification template from this workflow.\n\n =====API DOCS=====\n Disassociate a notification template from this workflow job template.\n\n :param job_template: The workflow job template to disassociate from.\n :type job_template: str\n :param notification_template: The notification template to be disassociated.\n :type notification_template: str\n :param status: type of notification this notification template should be disassociated from.\n :type status: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3361:c1:m4"} {"signature": "def read(self, *args, **kwargs):", "body": "if '' in kwargs:kwargs[''] = kwargs.pop('')r = super(Resource, self).read(*args, **kwargs)if '' in r:for d in r['']:self._promote_actor(d)else:self._promote_actor(d)return r", "docstring": "Do extra processing so we can display the actor field as\na top-level field", "id": "f3363:c0:m3"} {"signature": "@resources.command(use_fields_as_options=('',))@click.option('', is_flag=True, default=False,help='''')@click.option('', is_flag=True, default=False,help='''')@click.option('', required=False, type=int,help='''''')@click.option('', is_flag=True, default=False,help='')@click.option('', '', required=False, multiple=True,help='''')@click.option('', type=bool, required=False, help='')@click.option('', required=False, help='')@click.option('', required=False, help='')@click.option('', required=False, help='')@click.option('', required=False, type=click.Choice(['', '']),help='')@click.option('', type=int, required=False, help='')@click.option('', required=False, type=types.Related(''),help='')@click.option('', required=False, type=types.Related(''),help='')def launch(self, job_template=None, monitor=False, wait=False,timeout=None, no_input=True, extra_vars=None, **kwargs):", "body": "tags = kwargs.get('', None)jt_resource = get_resource('')jt = jt_resource.get(job_template)data = {}if tags:data[''] = tagsextra_vars_list = []if '' in data and len(data['']) > :debug.log('', header='')r = client.get('')if LooseVersion(r.json()['']) < LooseVersion(''):extra_vars_list = [data['']]if extra_vars:extra_vars_list += list(extra_vars) if jt.get('', False) and not no_inputand not extra_vars:initial = parser.process_extra_vars([jt['']], force_json=False)initial = ''.join(('','',initial,))extra_vars = click.edit(initial) or ''if extra_vars != initial:extra_vars_list = [extra_vars]data.pop('', None)modified = set()for resource in PROMPT_LIST:if jt.pop('' + resource + '', False) and not no_input:resource_object = kwargs.get(resource, None)if type(resource_object) == types.Related:resource_class = get_resource(resource)resource_object = resource_class.get(resource).pop('', None)if resource_object is None:debug.log(''.format(resource), header='')elif resource != '':data[resource] = resource_objectmodified.add(resource)if len(extra_vars_list) > :data[''] = parser.process_extra_vars(extra_vars_list, force_json=True)start_data = {}endpoint = '' % jt['']if '' in data and len(data['']) > :start_data[''] = data['']if tags:start_data[''] = data['']for resource in PROMPT_LIST:if resource in modified:start_data[resource] = data[resource]debug.log('',header='')job_start_info = client.get(endpoint).json()for password in job_start_info.get('', []):start_data[password] = getpass('' % password)debug.log('', header='')self._pop_none(kwargs)kwargs.update(start_data)job_started = client.post(endpoint, data=kwargs)job_id = job_started.json()['']if job_started.text == '':ignored_fields = {}else:ignored_fields = job_started.json().get('', {})has_ignored_fields = Falsefor key, value in ignored_fields.items():if value and value != '':if not has_ignored_fields:debug.log('',header='')has_ignored_fields = Truedebug.log(''.format(key, value))result = self.status(pk=job_id, detail=True)result[''] = Trueif monitor:return self.monitor(job_id, timeout=timeout)elif wait:return self.wait(job_id, timeout=timeout)return result", "docstring": "Launch a new job based on a job template.\n\n Creates a new job in Ansible Tower, immediately starts it, and\n returns back an ID in order for its status to be monitored.\n\n =====API DOCS=====\n Launch a new job based on a job template.\n\n :param job_template: Primary key or name of the job template to launch new job.\n :type job_template: str\n :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched job rather\n than exiting with a success.\n :type monitor: bool\n :param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress.\n :type wait: bool\n :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n of seconds.\n :type timeout: int\n :param no_input: Flag that if set, suppress any requests for input.\n :type no_input: bool\n :param extra_vars: yaml formatted texts that contains extra variables to pass on.\n :type extra_vars: array of strings\n :param diff_mode: Specify diff mode for job template to run.\n :type diff_mode: bool\n :param limit: Specify host limit for job template to run.\n :type limit: str\n :param tags: Specify tagged actions in the playbook to run.\n :type tags: str\n :param skip_tags: Specify tagged actions in the playbook to omit.\n :type skip_tags: str\n :param job_type: Specify job type for job template to run.\n :type job_type: str\n :param verbosity: Specify verbosity of the playbook run.\n :type verbosity: int\n :param inventory: Specify machine credential for job template to run.\n :type inventory: str\n :param credential: Specify machine credential for job template to run.\n :type credential: str\n :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent\n ``wait`` call if ``wait`` flag is on; Result of subsequent ``status`` call if none of\n the two flags are on.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3364:c0:m0"} {"signature": "@resources.command(use_fields_as_options=('', '', '', '', '', '', '','', '', '', '',))@click.option('', is_flag=True, default=False,help='''')@click.option('', is_flag=True, default=False,help='''')@click.option('', required=False, type=int,help='''''')def launch(self, monitor=False, wait=False, timeout=None, **kwargs):", "body": "r = client.get('')if '' not in r.json():raise exc.TowerCLIError('''''')self._pop_none(kwargs)debug.log('', header='')result = client.post(self.endpoint, data=kwargs)command = result.json()command_id = command['']if monitor:return self.monitor(command_id, timeout=timeout)elif wait:return self.wait(command_id, timeout=timeout)answer = OrderedDict((('', True),('', command_id),))answer.update(result.json())return answer", "docstring": "Launch a new ad-hoc command.\n\n Runs a user-defined command from Ansible Tower, immediately starts it,\n and returns back an ID in order for its status to be monitored.\n\n =====API DOCS=====\n Launch a new ad-hoc command.\n\n :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched command rather\n than exiting with a success.\n :type monitor: bool\n :param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress.\n :type wait: bool\n :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n of seconds.\n :type timeout: int\n :param `**kwargs`: Fields needed to create and launch an ad hoc command.\n :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait``\n call if ``wait`` flag is on; dictionary of \"id\" and \"changed\" if none of the two flags are on.\n :rtype: dict\n :raises tower_cli.exceptions.TowerCLIError: When ad hoc commands are not available in Tower backend.\n\n =====API DOCS=====", "id": "f3365:c0:m0"} {"signature": "@resources.command(use_fields_as_options=False)@click.option('', type=types.Related(''))@click.option('', type=types.Related(''))def associate_credential(self, job_template, credential):", "body": "return self._assoc('', job_template, credential)", "docstring": "Associate a credential with this job template.\n\n =====API DOCS=====\n Associate a credential with this job template.\n\n :param job_template: The job template to associate to.\n :type job_template: str\n :param credential: The credential to be associated.\n :type credential: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3368:c0:m1"} {"signature": "@resources.command(use_fields_as_options=False)@click.option('', type=types.Related(''))@click.option('', type=types.Related(''))def disassociate_credential(self, job_template, credential):", "body": "return self._disassoc('', job_template, credential)", "docstring": "Disassociate a credential with this job template.\n\n =====API DOCS=====\n Disassociate a credential from this job template.\n\n :param job_template: The job template to disassociate fom.\n :type job_template: str\n :param credential: The credential to be disassociated.\n :type credential: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3368:c0:m2"} {"signature": "@resources.command(use_fields_as_options=False)@click.option('', type=types.Related(''))@click.option('',type=types.Related(''))@click.option('', type=click.Choice(['', '', '']),required=False, default='', help='''')def associate_notification_template(self, job_template,notification_template, status):", "body": "return self._assoc('' % status,job_template, notification_template)", "docstring": "Associate a notification template from this job template.\n\n =====API DOCS=====\n Associate a notification template from this job template.\n\n :param job_template: The job template to associate to.\n :type job_template: str\n :param notification_template: The notification template to be associated.\n :type notification_template: str\n :param status: type of notification this notification template should be associated to.\n :type status: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3368:c0:m3"} {"signature": "@resources.command(use_fields_as_options=False)@click.option('', type=types.Related(''))@click.option('',type=types.Related(''))@click.option('', type=click.Choice(['', '', '']),required=False, default='', help='''')def disassociate_notification_template(self, job_template,notification_template, status):", "body": "return self._disassoc('' % status,job_template, notification_template)", "docstring": "Disassociate a notification template from this job template.\n\n =====API DOCS=====\n Disassociate a notification template from this job template.\n\n :param job_template: The job template to disassociate from.\n :type job_template: str\n :param notification_template: The notification template to be disassociated.\n :type notification_template: str\n :param status: type of notification this notification template should be disassociated from.\n :type status: str\n :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3368:c0:m4"} {"signature": "@resources.command(use_fields_as_options=(''))@click.option('', help='''')def callback(self, pk=None, host_config_key='', extra_vars=None):", "body": "url = self.endpoint + '' % pkif not host_config_key:host_config_key = client.get(url).json()['']post_data = {'': host_config_key}if extra_vars:post_data[''] = parser.process_extra_vars(list(extra_vars), force_json=True)r = client.post(url, data=post_data, auth=None)if r.status_code == :return {'': True}", "docstring": "Contact Tower and request a configuration update using this job template.\n\n =====API DOCS=====\n Contact Tower and request a provisioning callback using this job template.\n\n :param pk: Primary key of the job template to run provisioning callback against.\n :type pk: int\n :param host_config_key: Key string used to authenticate the callback host.\n :type host_config_key: str\n :param extra_vars: Extra variables that are passed to provisioning callback.\n :type extra_vars: array of str\n :returns: A dictionary of a single key \"changed\", which indicates whether the provisioning callback\n is successful.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3368:c0:m5"} {"signature": "@resources.command(ignore_defaults=True)def batch_update(self, pk=None, **kwargs):", "body": "res = self.get(pk=pk, **kwargs)url = self.endpoint + '' % (res[''], '')return client.post(url, data={}).json()", "docstring": "Update all related inventory sources of the given inventory.\n\n Note global option --format is not available here, as the output would always be JSON-formatted.\n\n =====API DOCS=====\n Update all related inventory sources of the given inventory.\n\n :param pk: Primary key of the given inventory.\n :type pk: int\n :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n :returns: A JSON object of update status of the given inventory.\n :rtype: dict\n =====API DOCS=====", "id": "f3369:c0:m0"} {"signature": "def command(method=None, **kwargs):", "body": "def actual_decorator(method):method._cli_command = Truemethod._cli_command_attrs = kwargsreturn methodif method and isinstance(method, types.FunctionType):return actual_decorator(method)else:return actual_decorator", "docstring": "Mark this method as a CLI command.\n\n This will only have any meaningful effect in methods that are members of a\n Resource subclass.", "id": "f3374:m0"} {"signature": "@resources.command(ignore_defaults=True, no_args_is_help=False)@click.option('', '', '',help='')def list(self, **kwargs):", "body": "self.custom_category = kwargs.get('', '')try:result = super(Resource, self).list(**kwargs)except exc.NotFound as e:categories = map(lambda category: category[''],client.get('').json()[''])e.message = '' % (kwargs[''],''.join(categories))raise efinally:self.custom_category = Nonereturn {'': [{'': k, '': v} for k, v in result.items()]}", "docstring": "Return a list of objects.\n\n =====API DOCS=====\n Retrieve a list of Tower settings.\n\n :param category: The category slug in which to look up indevidual settings.\n :type category: str\n :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n :returns: A JSON object containing details of all resource objects returned by Tower backend.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3376:c0:m0"} {"signature": "@resources.command(use_fields_as_options=False)def get(self, pk):", "body": "try:return next(s for s in self.list()[''] if s[''] == pk)except StopIteration:raise exc.NotFound('')", "docstring": "Return one and exactly one object\n\n =====API DOCS=====\n Return one and exactly one Tower setting.\n\n :param pk: Primary key of the Tower setting to retrieve\n :type pk: int\n :returns: loaded JSON of the retrieved Tower setting object.\n :rtype: dict\n :raises tower_cli.exceptions.NotFound: When no specified Tower setting exists.\n\n =====API DOCS=====", "id": "f3376:c0:m1"} {"signature": "@resources.command(use_fields_as_options=False)@click.argument('')@click.argument('', default=None, required=False,type=types.Variables())def modify(self, setting, value):", "body": "prev_value = new_value = self.get(setting)['']answer = OrderedDict()encrypted = '' in six.text_type(prev_value)if encrypted or six.text_type(prev_value) != six.text_type(value):if setting == '':r = client.post('',data=self.coerce_type(setting, value))new_value = r.json()else:r = client.patch(self.endpoint,data={setting: self.coerce_type(setting, value)})new_value = r.json()[setting]answer.update(r.json())changed = encrypted or (prev_value != new_value)answer.update({'': changed,'': setting,'': new_value,})return answer", "docstring": "Modify an already existing object.\n\n Positional argument SETTING is the setting name and VALUE is its value,\n which can be provided directly or obtained from a file name if prefixed with '@'.\n\n =====API DOCS=====\n Modify an already existing Tower setting.\n\n :param setting: The name of the Tower setting to be modified.\n :type setting: str\n :param value: The new value of the Tower setting.\n :type value: str\n :returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is successfully updated; \"id\", an integer which\n is the primary key of the updated object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3376:c0:m2"} {"signature": "def __getattribute__(self, name):", "body": "if name in ['', '']:raise AttributeErrorelse:return object.__getattribute__(self, name)", "docstring": "Disable inherited methods that cannot be applied to this\n particular resource.", "id": "f3376:c0:m5"} {"signature": "def jt_aggregate(func, is_create=False, has_pk=False):", "body": "def helper(kwargs, obj):\"\"\"\"\"\"unified_job_template = Nonefor item in UNIFIED_JT:if kwargs.get(item, None) is not None:jt_id = kwargs.pop(item)if unified_job_template is None:unified_job_template = (item, jt_id)else:raise exc.UsageError('''')if unified_job_template is not None:kwargs[''] = unified_job_template[]obj.identity = tuple(list(obj.identity) + [''])return ''.join([UNIFIED_JT[unified_job_template[]],str(unified_job_template[]), ''])elif is_create:raise exc.UsageError('''')def decorator_without_pk(obj, *args, **kwargs):old_endpoint = obj.endpointnew_endpoint = helper(kwargs, obj)if is_create:obj.endpoint = new_endpointresult = func(obj, *args, **kwargs)obj.endpoint = old_endpointreturn resultdef decorator_with_pk(obj, pk=None, *args, **kwargs):old_endpoint = obj.endpointnew_endpoint = helper(kwargs, obj)if is_create:obj.endpoint = new_endpointresult = func(obj, pk=pk, *args, **kwargs)obj.endpoint = old_endpointreturn resultdecorator = decorator_with_pk if has_pk else decorator_without_pkfor item in CLICK_ATTRS:setattr(decorator, item, getattr(func, item, []))decorator.__doc__ = func.__doc__return decorator", "docstring": "Decorator to aggregate unified_jt-related fields.\n\n Args:\n func: The CURD method to be decorated.\n is_create: Boolean flag showing whether this method is create.\n has_pk: Boolean flag showing whether this method uses pk as argument.\n\n Returns:\n A function with necessary click-related attributes whose keyworded\n arguments are aggregated.\n\n Raises:\n exc.UsageError: Either more than one unified jt fields are\n provided, or none is provided when is_create flag is set.", "id": "f3379:m0"} {"signature": "def __getattribute__(self, attr):", "body": "if attr == '':return object.__getattribute__(self, '')elif attr == '':raise AttributeErrorreturn super(Resource, self).__getattribute__(attr)", "docstring": "Alias the stdout to `summary` specially for workflow", "id": "f3380:c0:m0"} {"signature": "def lookup_stdout(self, pk=None, start_line=None, end_line=None,full=True):", "body": "uj_res = get_resource('')query_params = (('', pk),('', ''),('', ''))jobs_list = uj_res.list(all_pages=True, query=query_params)if jobs_list[''] == :return ''return_content = ResSubcommand(uj_res)._format_human(jobs_list)lines = return_content.split('')if not full:lines = lines[:-]N = len(lines)start_range = start_lineif start_line is None:start_range = elif start_line > N:start_range = Nend_range = end_lineif end_line is None or end_line > N:end_range = Nlines = lines[start_range:end_range]return_content = ''.join(lines)if len(lines) > :return_content += ''return return_content", "docstring": "Internal method that lies to our `monitor` method by returning\na scorecard for the workflow job where the standard out\nwould have been expected.", "id": "f3380:c0:m1"} {"signature": "@resources.commanddef summary(self):", "body": "pass", "docstring": "Placeholder to get swapped out for `stdout`.\n\n =====API DOCS=====\n foobar\n =====API DOCS=====", "id": "f3380:c0:m2"} {"signature": "@resources.command(use_fields_as_options=('', ''))@click.option('', is_flag=True, default=False,help='''')@click.option('', is_flag=True, default=False,help='''')@click.option('', required=False, type=int,help='''''')def launch(self, workflow_job_template=None, monitor=False, wait=False,timeout=None, extra_vars=None, **kwargs):", "body": "if extra_vars is not None and len(extra_vars) > :kwargs[''] = parser.process_extra_vars(extra_vars)debug.log('', header='')self._pop_none(kwargs)post_response = client.post(''.format(workflow_job_template), data=kwargs).json()workflow_job_id = post_response['']post_response[''] = Trueif monitor:return self.monitor(workflow_job_id, timeout=timeout)elif wait:return self.wait(workflow_job_id, timeout=timeout)return post_response", "docstring": "Launch a new workflow job based on a workflow job template.\n\n Creates a new workflow job in Ansible Tower, starts it, and\n returns back an ID in order for its status to be monitored.\n\n =====API DOCS=====\n Launch a new workflow job based on a workflow job template.\n\n :param workflow_job_template: Primary key or name of the workflow job template to launch new job.\n :type workflow_job_template: str\n :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched workflow job rather\n than exiting with a success.\n :type monitor: bool\n :param wait: Flag that if set, monitor the status of the workflow job, but do not print while job is\n in progress.\n :type wait: bool\n :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n of seconds.\n :type timeout: int\n :param extra_vars: yaml formatted texts that contains extra variables to pass on.\n :type extra_vars: array of strings\n :param `**kwargs`: Fields needed to create and launch a workflow job.\n :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait``\n call if ``wait`` flag is on; loaded JSON output of the job launch if none of the two flags are on.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3380:c0:m3"} {"signature": "@resources.command@click.option('', is_flag=True, default=False,help='''''')@click.option('', is_flag=True, default=False,help='')@click.option('', required=False, type=int,help='''''')def create(self, organization=None, monitor=False, wait=False,timeout=None, fail_on_found=False, force_on_exists=False,**kwargs):", "body": "if '' in kwargs and '' not in kwargs:kwargs[''] = kwargs.pop('')post_associate = Falseif organization:debug.log('', header='')r = client.options('')if '' in r.json().get('', {}).get('', {}):kwargs[''] = organizationelse:post_associate = Trueanswer = super(Resource, self).write(create_on_missing=True,fail_on_found=fail_on_found, force_on_exists=force_on_exists,**kwargs)project_id = answer['']if post_associate:org_resource = get_resource('')org_data = org_resource.get(organization)org_pk = org_data['']debug.log(\"\",header='', nl=)org_resource._assoc('', org_pk, project_id)if monitor and answer.get('', False):return self.monitor(pk=None, parent_pk=project_id, timeout=timeout)elif wait and answer.get('', False):return self.wait(pk=None, parent_pk=project_id, timeout=timeout)return answer", "docstring": "Create a new item of resource, with or w/o org.\n This would be a shared class with user, but it needs the ability\n to monitor if the flag is set.\n\n =====API DOCS=====\n Create a project and, if related flags are set, monitor or wait the triggered initial project update.\n\n :param monitor: Flag that if set, immediately calls ``monitor`` on the newly triggered project update\n rather than exiting with a success.\n :type monitor: bool\n :param wait: Flag that if set, monitor the status of the triggered project update, but do not print\n while it is in progress.\n :type wait: bool\n :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n of seconds.\n :type timeout: bool\n :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n already exists.\n :type fail_on_found: bool\n :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n be updated to the provided values.; If unset, a match causes the request to be\n a no-op.\n :type force_on_exists: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the\n resource object.\n :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is created successfully; \"id\", an integer which\n is the primary key of the created object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3383:c0:m0"} {"signature": "@resources.command(use_fields_as_options=('', '', '', '', '','', '', '', '','', '', ''))def modify(self, pk=None, create_on_missing=False, **kwargs):", "body": "if '' in kwargs and '' not in kwargs:kwargs[''] = kwargs.pop('')return super(Resource, self).write(pk, create_on_missing=create_on_missing,force_on_exists=True, **kwargs)", "docstring": "Modify an already existing.\n\n To edit the project's organizations, see help for organizations.\n\n Fields in the resource's `identity` tuple can be used in lieu of a\n primary key for a lookup; in such a case, only other fields are\n written.\n\n To modify unique fields, you must use the primary key for the lookup.\n\n =====API DOCS=====\n Modify an already existing project.\n\n :param pk: Primary key of the resource to be modified.\n :type pk: int\n :param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects\n matching the appropriate unique criteria is not found.\n :type create_on_missing: bool\n :param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to modify the\n resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are\n also in resource's identity will be used to lookup existing reosource.\n :returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:\n \"changed\", a flag indicating if the resource is successfully updated; \"id\", an integer which\n is the primary key of the updated object.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3383:c0:m1"} {"signature": "@resources.command(use_fields_as_options=('', ''))@click.option('', is_flag=True, default=False,help='''')@click.option('', is_flag=True, default=False,help='')@click.option('', required=False, type=int,help='''''')def update(self, pk=None, create_on_missing=False, monitor=False,wait=False, timeout=None, name=None, organization=None):", "body": "project = self.get(pk, name=name, organization=organization)pk = project['']debug.log('',header='')result = client.get('' % pk)if not result.json()['']:raise exc.CannotStartJob('')debug.log('', header='')result = client.post('' % pk)project_update_id = result.json()['']if monitor:return self.monitor(project_update_id, parent_pk=pk,timeout=timeout)elif wait:return self.wait(project_update_id, parent_pk=pk, timeout=timeout)return {'': project_update_id,'': True,}", "docstring": "Trigger a project update job within Ansible Tower.\n Only meaningful on non-manual projects.\n\n =====API DOCS=====\n Update the given project.\n\n :param pk: Primary key of the project to be updated.\n :type pk: int\n :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched project update\n rather than exiting with a success.\n :type monitor: bool\n :param wait: Flag that if set, monitor the status of the project update, but do not print while it is\n in progress.\n :type wait: bool\n :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n of seconds.\n :type timeout: int\n :param name: Name of the project to be updated if ``pk`` is not set.\n :type name: str\n :param organization: Primary key or name of the organization the project to be updated belonging to if\n ``pk`` is not set.\n :type organization: str\n :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait``\n call if ``wait`` flag is on; dictionary of \"status\" if none of the two flags are on.\n :rtype: dict\n :raises tower_cli.exceptions.CannotStartJob: When the project cannot be updated.\n\n =====API DOCS=====", "id": "f3383:c0:m2"} {"signature": "@resources.command@click.option('', is_flag=True, default=False,help='')def status(self, pk=None, detail=False, **kwargs):", "body": "job = self.last_job_data(pk, **kwargs)if detail:return jobreturn {'': job[''],'': job[''],'': job[''],}", "docstring": "Print the status of the most recent update.\n\n =====API DOCS=====\n Print the status of the most recent update.\n\n :param pk: Primary key of the resource to retrieve status from.\n :type pk: int\n :param detail: Flag that if set, return the full JSON of the job resource rather than a status summary.\n :type detail: bool\n :param `**kwargs`: Keyword arguments used to look up resource object to retrieve status from if ``pk``\n is not provided.\n :returns: full loaded JSON of the specified unified job if ``detail`` flag is on; trimed JSON containing\n only \"elapsed\", \"failed\" and \"status\" fields of the unified job if ``detail`` flag is off.\n :rtype: dict\n =====API DOCS=====", "id": "f3383:c0:m3"} {"signature": "def _parent_filter(self, parent, relationship, **kwargs):", "body": "if parent is None or relationship is None:return {}parent_filter_kwargs = {}query_params = ((self._reverse_rel_name(relationship), parent),)parent_filter_kwargs[''] = query_paramsif kwargs.get('', None) is None:parent_data = self.read(pk=parent)[''][]parent_filter_kwargs[''] = parent_data['']return parent_filter_kwargs", "docstring": "Returns filtering parameters to limit a search to the children\nof a particular node by a particular relationship.", "id": "f3384:c0:m3"} {"signature": "@resources.command@unified_job_template_options@click.argument('', type=types.Related(''))@click.argument('', type=types.Related(''), required=False)def associate_success_node(self, parent, child=None, **kwargs):", "body": "return self._assoc_or_create('', parent, child, **kwargs)", "docstring": "Add a node to run on success.\n\n =====API DOCS=====\n Add a node to run on success.\n\n :param parent: Primary key of parent node to associate success node to.\n :type parent: int\n :param child: Primary key of child node to be associated.\n :type child: int\n :param `**kwargs`: Fields used to create child node if ``child`` is not provided.\n :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3384:c0:m6"} {"signature": "@resources.command(use_fields_as_options=False)@click.argument('', type=types.Related(''))@click.argument('', type=types.Related(''))def disassociate_success_node(self, parent, child):", "body": "return self._disassoc(self._forward_rel_name(''), parent, child)", "docstring": "Remove success node.\n The resulatant 2 nodes will both become root nodes.\n\n =====API DOCS=====\n Remove success node.\n\n :param parent: Primary key of parent node to disassociate success node from.\n :type parent: int\n :param child: Primary key of child node to be disassociated.\n :type child: int\n :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3384:c0:m7"} {"signature": "@resources.command@unified_job_template_options@click.argument('', type=types.Related(''))@click.argument('', type=types.Related(''), required=False)def associate_failure_node(self, parent, child=None, **kwargs):", "body": "return self._assoc_or_create('', parent, child, **kwargs)", "docstring": "Add a node to run on failure.\n\n =====API DOCS=====\n Add a node to run on failure.\n\n :param parent: Primary key of parent node to associate failure node to.\n :type parent: int\n :param child: Primary key of child node to be associated.\n :type child: int\n :param `**kwargs`: Fields used to create child node if ``child`` is not provided.\n :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3384:c0:m8"} {"signature": "@resources.command(use_fields_as_options=False)@click.argument('', type=types.Related(''))@click.argument('', type=types.Related(''))def disassociate_failure_node(self, parent, child):", "body": "return self._disassoc(self._forward_rel_name(''), parent, child)", "docstring": "Remove a failure node link.\n The resulatant 2 nodes will both become root nodes.\n\n =====API DOCS=====\n Remove a failure node link.\n\n :param parent: Primary key of parent node to disassociate failure node from.\n :type parent: int\n :param child: Primary key of child node to be disassociated.\n :type child: int\n :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3384:c0:m9"} {"signature": "@resources.command@unified_job_template_options@click.argument('', type=types.Related(''))@click.argument('', type=types.Related(''), required=False)def associate_always_node(self, parent, child=None, **kwargs):", "body": "return self._assoc_or_create('', parent, child, **kwargs)", "docstring": "Add a node to always run after the parent is finished.\n\n =====API DOCS=====\n Add a node to always run after the parent is finished.\n\n :param parent: Primary key of parent node to associate always node to.\n :type parent: int\n :param child: Primary key of child node to be associated.\n :type child: int\n :param `**kwargs`: Fields used to create child node if ``child`` is not provided.\n :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3384:c0:m10"} {"signature": "@resources.command(use_fields_as_options=False)@click.argument('', type=types.Related(''))@click.argument('', type=types.Related(''))def disassociate_always_node(self, parent, child):", "body": "return self._disassoc(self._forward_rel_name(''), parent, child)", "docstring": "Remove an always node link.\n The resultant 2 nodes will both become root nodes.\n\n =====API DOCS=====\n Remove an always node link.\n\n :param parent: Primary key of parent node to disassociate always node from.\n :type parent: int\n :param child: Primary key of child node to be disassociated.\n :type child: int\n :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3384:c0:m11"} {"signature": "@resources.command(ignore_defaults=True, no_args_is_help=False)@click.option('', type=types.Related(''), help='')@click.option('', help='')def list(self, group=None, host_filter=None, **kwargs):", "body": "if group:kwargs[''] = kwargs.get('', ()) + (('', group),)if host_filter:kwargs[''] = kwargs.get('', ()) + (('', host_filter),)return super(Resource, self).list(**kwargs)", "docstring": "Return a list of hosts.\n\n =====API DOCS=====\n Retrieve a list of hosts.\n\n :param group: Primary key or name of the group whose hosts will be listed.\n :type group: str\n :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n :type all_pages: bool\n :param page: The page to show. Ignored if all_pages is set.\n :type page: int\n :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n :type query: list\n :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n :returns: A JSON object containing details of all resource objects returned by Tower backend.\n :rtype: dict\n\n =====API DOCS=====", "id": "f3385:c0:m0"} {"signature": "@resources.command(ignore_defaults=True)def list_facts(self, pk=None, **kwargs):", "body": "res = self.get(pk=pk, **kwargs)url = self.endpoint + '' % (res[''], '')return client.get(url, params={}).json()", "docstring": "Return a JSON object of all available facts of the given host.\n\n Note global option --format is not available here, as the output would always be JSON-formatted.\n\n =====API DOCS=====\n List all available facts of the given host.\n\n :param pk: Primary key of the target host.\n :type pk: int\n :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n :returns: A JSON object of all available facts of the given host.\n :rtype: dict\n =====API DOCS=====", "id": "f3385:c0:m1"} {"signature": "@resources.command(ignore_defaults=True)def insights(self, pk=None, **kwargs):", "body": "res = self.get(pk=pk, **kwargs)url = self.endpoint + '' % (res[''], '')return client.get(url, params={}).json()", "docstring": "Return a JSON object of host insights.\n\n Note global option --format is not available here, as the output would always be JSON-formatted.\n\n =====API DOCS=====\n List host insights.\n\n :param pk: Primary key of the target host.\n :type pk: int\n :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n :returns: A JSON object of host insights.\n :rtype: dict\n =====API DOCS=====", "id": "f3385:c0:m2"} {"signature": "def register_get(t):", "body": "t.register_json('',{'': , '': , '': '','': , '': ,}, method='')", "docstring": "After starting job, the launch method may grab info about\n the job just launched from this endpoint", "id": "f3399:m0"} {"signature": "def standard_registration(t, **kwargs):", "body": "t.register_json('', {'': ,'': '','': {'': ''},})register_get(t)t.register_json('', {}, method='')data = {'': }data.update(kwargs)t.register_json('', data, method='')", "docstring": "Endpoints common to launching any job with template #1 and\n is automatically assigned to job #42. kwargs is used to provide\n extra return fields of job launch", "id": "f3399:m1"} {"signature": "def jt_vars_registration(t, extra_vars):", "body": "t.register_json('', {'': True,'': extra_vars,'': ,'': '','': {'': ''},})register_get(t)t.register_json('', {'': ''}, method='')t.register_json('', {}, method='')t.register_json('', {'': },method='')", "docstring": "Endpoints that are needed to get information from job template.\n This particular combination also entails\n 1) version of Tower - 2.2.0\n 2) successful job launch, id=42\n 3) prompts user for variables on launch", "id": "f3399:m2"} {"signature": "def setUp(self):", "body": "class BasicResource(models.Resource):endpoint = ''name = models.Field(unique=True)self.resource = BasicResource()self.command = ResSubcommand(self.resource)", "docstring": "Install a resource instance sufficient for testing common\n things with subcommands.", "id": "f3414:c0:m0"} {"signature": "def parse_requirements(filename):", "body": "reqs = []version_spec_in_play = Nonefor line in open(filename, '').read().strip().split(''):if not line.strip():continueif not line.startswith(''):reqs.append(line)continuematch = re.search(r''r'', line)if match:version_spec_in_play = match.groupdict()for key in ('', ''):version_spec_in_play[key] = int(version_spec_in_play[key])continueif '' not in line[:].strip() and version_spec_in_play:package = line[:].strip()op = version_spec_in_play['']vspec = (version_spec_in_play[''],version_spec_in_play[''])if '' in op and sys.version_info[:] == vspec:reqs.append(package)elif '>' in op and sys.version_info[:] > vspec:reqs.append(package)elif '' in op and sys.version_info[:] < vspec:reqs.append(package)return reqs", "docstring": "Parse out a list of requirements from the given requirements\n requirements file.", "id": "f3422:m0"} {"signature": "def combine_files(*args):", "body": "file_contents = []for filename in args:with codecs.open(filename, mode='', encoding='') as f:file_contents.append(f.read())return \"\".join(file_contents)", "docstring": "returns a string of all the strings in *args combined together,\n with two line breaks between them", "id": "f3422:m1"} {"signature": "@classmethoddef lookup(cls, key, get=False):", "body": "if get:item = cls._item_dict.get(key)return item.name if item else keyreturn cls._item_dict[key].name", "docstring": "Returns the label for a given Enum key", "id": "f3426:c2:m4"} {"signature": "@classmethoddef keys(cls):", "body": "return cls._item_dict.keys()", "docstring": "Returns all of the Enum keys", "id": "f3426:c2:m5"} {"signature": "@classmethoddef values(cls):", "body": "return [x.name for x in cls._item_dict.values()]", "docstring": "Returns all of the Enum values", "id": "f3426:c2:m6"} {"signature": "@classmethoddef items(cls):", "body": "return [(x[], x[].name) for x in cls._item_dict.items()]", "docstring": "Returns pairs of Enum keys and values", "id": "f3426:c2:m7"} {"signature": "@classmethoddef verbose(cls, key=False, default=''):", "body": "if key is False:items = cls._item_dict.values()return [(x.key, x.value) for x in sorted(items, key=lambda x:x.sort or x.key)]item = cls._item_dict.get(key)return item.value if item else default", "docstring": "Returns the verbose name for a given enum value", "id": "f3426:c2:m8"} {"signature": "def logger(name=None, save=False):", "body": "logger = logging.getLogger(name)if save:logformat = ''log_file_path = '' open(log_file_path, '').write('') logger.setLevel(logging.DEBUG)logger_handler = logging.FileHandler(log_file_path)logger_handler.setFormatter(logging.Formatter(logformat))else:logger_handler = NullHandler()logger.addHandler(logger_handler)return logger", "docstring": "Init and configure logger.", "id": "f3432:m0"} {"signature": "def num2hex(self, num):", "body": "temp = ''for i in range(, ):x = self.hexChars[ ( num >> (i * + ) ) & ]y = self.hexChars[ ( num >> (i * ) ) & ]temp += (x + y)return temp", "docstring": "Convert a decimal number to hexadecimal", "id": "f3435:c0:m2"} {"signature": "def baseId(resource_id, return_version=False):", "body": "version = resource_id = resource_id + while resource_id > : version += if version == :resource_id -= elif version == :resource_id -= else:resource_id -= if return_version:return resource_id, version - return resource_id", "docstring": "Calculate base id and version from a resource id.\n\n :params resource_id: Resource id.\n :params return_version: (optional) True if You need version, returns (resource_id, version).", "id": "f3437:m0"} {"signature": "def itemParse(item_data, full=True):", "body": "return_data = {'': item_data.get(''),'': item_data.get(''),'': item_data.get(''),'': item_data.get(''),'': item_data.get(''),'': item_data.get('', {'': None})[''] or item_data.get('', {'': None})[''],'': item_data.get(''),'': item_data.get(''),'': item_data.get(''), '': item_data.get(''),'': item_data.get(''),'': item_data.get(''),'': item_data.get(''),'': item_data.get(''), '': item_data.get(''), }if full:if '' in item_data:return_data.update({'': item_data[''].get(''), '': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''), '': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''), '': item_data[''].get(''), '': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''), '': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''), '': item_data[''].get(''), '': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data.get(''),})elif '' in item_data: return_data.update({'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data[''].get(''),'': item_data.get(''), '': item_data.get(''), })return return_data", "docstring": "Parser for item data. Returns nice dictionary.\n\n :params item_data: Item data received from ea servers.\n :params full: (optional) False if you're sniping and don't need extended info. Anyone really use this?", "id": "f3437:m1"} {"signature": "def nations(timeout=timeout):", "body": "rc = requests.get(messages_url, timeout=timeout)rc.encoding = '' rc = rc.textdata = re.findall('', rc)nations = {}for i in data:nations[int(i[])] = i[]return nations", "docstring": "Return all nations in dict {id0: nation0, id1: nation1}.\n\n :params year: Year.", "id": "f3437:m2"} {"signature": "def leagues(year=, timeout=timeout):", "body": "rc = requests.get(messages_url, timeout=timeout)rc.encoding = '' rc = rc.textdata = re.findall('' % year, rc)leagues = {}for i in data:leagues[int(i[])] = i[]return leagues", "docstring": "Return all leagues in dict {id0: league0, id1: legaue1}.\n\n :params year: Year.", "id": "f3437:m3"} {"signature": "def teams(year=, timeout=timeout):", "body": "rc = requests.get(messages_url, timeout=timeout)rc.encoding = '' rc = rc.textdata = re.findall('' % year, rc)teams = {}for i in data:teams[int(i[])] = i[]return teams", "docstring": "Return all teams in dict {id0: team0, id1: team1}.\n\n :params year: Year.", "id": "f3437:m4"} {"signature": "def stadiums(year=, timeout=timeout):", "body": "rc = requests.get(messages_url, timeout=timeout)rc.encoding = '' rc = rc.textdata = re.findall('' % year, rc)stadiums = {}for i in data:stadiums[int(i[])] = i[]return stadiums", "docstring": "Return all stadium in dict {id0: stadium0, id1: stadium1}.\n\n :params year: Year.", "id": "f3437:m5"} {"signature": "def balls(timeout=timeout):", "body": "rc = requests.get(messages_url, timeout=timeout)rc.encoding = '' rc = rc.textdata = re.findall('', rc)balls = {}for i in data:balls[int(i[])] = i[]return balls", "docstring": "Return all balls in dict {id0: ball0, id1: ball1}.", "id": "f3437:m6"} {"signature": "def players(timeout=timeout):", "body": "rc = requests.get(''.format(card_info_url, ''), timeout=timeout).json()players = {}for i in rc[''] + rc['']:players[i['']] = {'': i[''],'': i[''],'': i[''],'': i.get(''),'': i['']}return players", "docstring": "Return all players in dict {id: c, f, l, n, r}.\n id, rank, nationality(?), first name, last name.", "id": "f3437:m7"} {"signature": "def playstyles(year=, timeout=timeout):", "body": "rc = requests.get(messages_url, timeout=timeout)rc.encoding = '' rc = rc.textdata = re.findall('' % year, rc)playstyles = {}for i in data:playstyles[int(i[])] = i[]return playstyles", "docstring": "Return all playstyles in dict {id0: playstyle0, id1: playstyle1}.\n\n :params year: Year.", "id": "f3437:m8"} {"signature": "def __login__(self, email, passwd, code=None, totp=None, sms=False):", "body": "params = {'': '','': '','': client_id,'': '','': '','': '','': '','': '','': ''}self.r.headers[''] = ''rc = self.r.get('', params=params, timeout=self.timeout)if rc.url != '': self.r.headers[''] = rc.urldata = {'': email,'': passwd,'': '', '': '', '': '','': '','': '', '': '','': '','': '','': ''}rc = self.r.post(rc.url, data=data, timeout=self.timeout)if \"\" in rc.text:failedReason = re.search('', rc.text).group()raise FutError(reason=failedReason)if '' in rc.text:rc = self.r.get(rc.url, params={'': ''}) if '' in rc.text: if totp:rc = self.r.post(rc.url, {'': '', '': ''})code = pyotp.TOTP(totp).now()elif sms:rc = self.r.post(rc.url, {'': '', '': ''})else: rc = self.r.post(rc.url, {'': '', '': ''})if '' in rc.text:if not code:code = input('')self.r.headers[''] = url = rc.urlrc = self.r.post(url.replace('', ''),{'': code,'': '','': ''}, timeout=self.timeout)if '' in rc.text or '' in rc.text:raise FutError(reason='')if '' in rc.text: rc = self.r.post(url.replace('', ''), {'': '', '': ''},timeout=self.timeout)rc = re.match('',rc.url)self.access_token = rc.group()self.token_type = rc.group()self.saveSession()", "docstring": "Log in - needed only if we don't have access token or it's expired.", "id": "f3437:c0:m1"} {"signature": "def __launch__(self, email, passwd, secret_answer, platform='', code=None, totp=None, sms=False, emulate=None,proxies=None, anticaptcha_client_key=None):", "body": "self.emulate = emulatesecret_answer_hash = EAHashingAlgorithm().EAHash(secret_answer)self.r = requests.Session() if proxies is not None:self.r.proxies = proxiesif self.cookies_file:self.r.cookies = LWPCookieJar(self.cookies_file)try:with open(self.token_file, '') as f:self.token_type, self.access_token = f.readline().replace('', '').replace('', '').split('') except FileNotFoundError:self.__login__(email=email, passwd=passwd, code=code, totp=totp, sms=sms)try:self.r.cookies.load(ignore_discard=True) except IOError:passelse:self.__login__(email=email, passwd=passwd, code=code, totp=totp, sms=sms)if emulate == '':raise FutError(reason='')self.r.headers = headers_and.copy() elif emulate == '':raise FutError(reason='')self.r.headers = headers_ios.copy() else:self.r.headers = headers.copy() pre_game_sku = '' if platform == '': game_sku = '' % pre_game_skuelif platform == '':game_sku = '' % pre_game_skuelif platform == '':game_sku = '' % pre_game_skuelif platform == '':game_sku = '' % pre_game_sku elif platform == '':game_sku = '' % pre_game_skuelse:raise FutError(reason='')pre_sku = '' if emulate == '':sku = '' % pre_skuclientVersion = elif emulate == '':sku = '' % pre_skuclientVersion = elif not emulate:sku = '' % pre_skuclientVersion = else:raise FutError(reason='') self.sku = sku self.sku_b = '' params = {'': self.access_token,'': client_id,'': '','': '','': '','': '','': '','': ''}rc = self.r.get('', params=params)rc = re.match('',rc.url)if not rc:raise FutError('')self.access_token = rc.group()self.token_type = rc.group()rc = self.r.get('', timeout=self.timeout).textself.r.headers[''] = ''self.r.headers[''] = ''self.r.headers[''] = '' % (self.token_type, self.access_token)rc = self.r.get('').json() if rc.get('') == '':print('')self.__login__(email=email, passwd=passwd, totp=totp, sms=sms)return self.__launch__(email=email, passwd=passwd, secret_answer=secret_answer, platform=platform,code=code, totp=totp, sms=sms, emulate=emulate, proxies=proxies,anticaptcha_client_key=anticaptcha_client_key)self.nucleus_id = rc[''][''] self.dob = rc['']['']del self.r.headers['']self.r.headers[''] = self.nucleus_idrc = self.r.get('' % auth_url).json()self.fut_host = {'': '','': '','': '','': '',}self.fut_host = self.fut_host[platform]data = {'': '','': self.sku,'': ''} rc = self.r.get('' % (self.fut_host, self.gameUrl), params=data).json()personas = rc['']['']for p in personas:for c in p['']:if c[''] and game_sku in c['']:self.persona_id = p['']breakif not hasattr(self, ''):raise FutError(reason='')del self.r.headers['']self.r.headers[''] = ''params = {'': '', '': '','': '','': self.access_token,'': ''}rc = self.r.get('', params=params).json()auth_code = rc['']self.r.headers[''] = ''data = {'': '','': self.sku,'': clientVersion,'': self.persona_id,'': game_sku,'': '','': '','': ,'': {'': auth_code,'': ''}}rc = self.r.post('' % self.fut_host, data=json.dumps(data),timeout=self.timeout)if rc.status_code == : raise FutError('')if rc.status_code == :raise InternalServerError('')rc = rc.json()if rc.get('') == '':raise MultipleSessionelif rc.get('') == '':raise MaxSessionselif rc.get('') == '':raise DoLoginFailelif rc.get(''):raise UnknownError(rc.__str__())self.r.headers[''] = self.sid = rc['']self.r.headers[''] = self.nucleus_idrc = self.r.get('' % (self.fut_host, self.gameUrl),timeout=self.timeout).json()if rc.get('') == '':if anticaptcha_client_key:if not proxies:raise FutError('')self.logger.debug('')anticaptcha = AnticaptchaClient(anticaptcha_client_key)attempt = while True:attempt += if attempt > :raise FutError('')try:self.logger.debug(''.format(attempt))task = FunCaptchaTask('',fun_captcha_public_key,proxy=Proxy.parse_url(proxies.get('') or proxies.get('')),user_agent=self.r.headers[''])job = anticaptcha.createTask(task)job.join()fun_captcha_token = job.get_token_response()self.logger.debug(''.format(fun_captcha_token))self.__request__('', '', data=json.dumps({'': fun_captcha_token,}))rc = self.r.get('' % (self.fut_host, self.gameUrl), timeout=self.timeout).json()breakexcept AnticaptchaException as e:if e.error_code in ['', '','', '']:self.logger.exception('' + e.error_code)time.sleep()continueelse:raiseelse:raise Captcha(code=rc.get(''), string=rc.get(''), reason=rc.get(''))if rc[''] != '' and rc[''] != '':params = {'': secret_answer_hash}rc = self.r.post('' % (self.fut_host, self.gameUrl), params=params,timeout=self.timeout).json()if rc[''] == '':print(rc[''])raise FutError('')elif rc[''] != '': print(rc[''])raise FutError(reason='' % (rc['']))self.r.headers[''] = self.token = rc['']rc = self.r.get('' % (self.fut_host, self.gameUrl), timeout=self.timeout).json()self.r.headers[''] = self.token = rc['']self.pin = Pin(sid=self.sid, nucleus_id=self.nucleus_id, persona_id=self.persona_id, dob=self.dob[:-],platform=platform)events = [self.pin.event('', status='')]self.pin.send(events)self._usermassinfo = self.r.get('' % (self.fut_host, self.gameUrl), timeout=self.timeout).json()if self._usermassinfo[''][''][''] == :raise FutError(reason='') self.base_time = int(time.time() * )self._ = self.base_timeself.r.get('' % (self.fut_host, self.gameUrl), params={'': self._}, timeout=self.timeout)piles = self.pileSize()self.tradepile_size = piles['']self.watchlist_size = piles['']self.saveSession()events = [self.pin.event('', '')]self.pin.send(events)self.keepalive()", "docstring": "Launch futweb\n\n :params email: Email.\n :params passwd: Password.\n :params secret_answer: Answer for secret question.\n :params platform: (optional) [pc/xbox/xbox360/ps3/ps4] Platform.\n :params code: (optional) Security code generated in origin or sent via mail/sms.\n :params emulate: (optional) [and/ios] Emulate mobile device.\n :params proxies: (optional) [dict] http/socks proxies in requests's format. http://docs.python-requests.org/en/master/user/advanced/#proxies", "id": "f3437:c0:m2"} {"signature": "def __request__(self, method, url, data=None, params=None, fast=False):", "body": "self.n += if self.stats:self.stats.save_requests(write_file=not fast)data = data or {}params = params or {}url = '' % (self.fut_host, self.gameUrl, url)self.logger.debug(\"\".format(url, data, params))if not fast: time.sleep(max(self.request_time - time.time() + random.randrange(self.delay[], self.delay[] + ), ))self.r.options(url, params=params)else:time.sleep(max(self.request_time - time.time() + , )) self.request_time = time.time() try:if method.upper() == '':rc = self.r.get(url, data=data, params=params, timeout=self.timeout)elif method.upper() == '':rc = self.r.post(url, data=data, params=params, timeout=self.timeout)elif method.upper() == '':rc = self.r.put(url, data=data, params=params, timeout=self.timeout)elif method.upper() == '':rc = self.r.delete(url, data=data, params=params, timeout=self.timeout)except requests.exceptions.Timeout as e:raise Timeout(e)self.logger.debug(\"\".format(rc.content))if not rc.ok: if rc.status_code == :print(rc.content)raise ExpiredSession()elif rc.status_code == :raise Conflict()elif rc.status_code == :raise FutError('')elif rc.status_code == :raise FutError('')elif rc.status_code == :print(rc.headers)print(rc.status_code)print(rc.cookies)print(rc.content)if url != '' % self.fut_host:events = [self.pin.event('')]self.pin.send(events)self.logout()raise Captcha()elif rc.status_code == :raise PermissionDenied()elif rc.status_code == :raise PermissionDenied() elif rc.status_code == :raise MarketLocked()elif rc.status_code in (, ):raise FutError('')elif rc.status_code == :raise NoTradeExistingError()print(rc.url)print(data)print(params)print(rc.headers)print(rc.status_code)print(rc.cookies)print(rc.content)raise UnknownError(rc.content)if rc.text == '':rc = {}else:rc = rc.json()if '' in rc and rc['']:self.credits = rc['']if '' in rc:self.duplicates = [i[''] for i in rc['']]self.saveSession()return rc", "docstring": "Prepare headers and sends request. Returns response as a json object.\n\n :params method: Rest method.\n :params url: Url.", "id": "f3437:c0:m3"} {"signature": "def __sendToPile__(self, pile, trade_id=None, item_id=None):", "body": "method = ''url = ''if not isinstance(item_id, (list, tuple)):item_id = (item_id,)data = {\"\": [{'': pile, '': str(i)} for i in item_id]}rc = self.__request__(method, url, data=json.dumps(data))if rc[''][]['']:self.logger.info(\"\".format(trade_id, item_id, pile))else:self.logger.error(\"\".format(trade_id, item_id, pile,rc[''][]['']))return rc[''][]['']", "docstring": "Send to pile.\n\n :params trade_id: (optional?) Trade id.\n :params item_id: Iteam id.", "id": "f3437:c0:m4"} {"signature": "def logout(self, save=True):", "body": "self.r.delete('' % self.fut_host, timeout=self.timeout)if save:self.saveSession()return True", "docstring": "Log out nicely (like clicking on logout button).\n\n :params save: False if You don't want to save cookies.", "id": "f3437:c0:m5"} {"signature": "@propertydef players(self):", "body": "if not self._players:self._players = players()return self._players", "docstring": "Return all players in dict {id: c, f, l, n, r}.", "id": "f3437:c0:m6"} {"signature": "@propertydef playstyles(self, year=):", "body": "if not self._playstyles:self._playstyles = playstyles()return self._playstyles", "docstring": "Return all playstyles in dict {id0: playstyle0, id1: playstyle1}.\n\n :params year: Year.", "id": "f3437:c0:m7"} {"signature": "@propertydef nations(self):", "body": "if not self._nations:self._nations = nations()return self._nations", "docstring": "Return all nations in dict {id0: nation0, id1: nation1}.\n\n :params year: Year.", "id": "f3437:c0:m8"} {"signature": "@propertydef leagues(self, year=):", "body": "if year not in self._leagues:self._leagues[year] = leagues(year)return self._leagues[year]", "docstring": "Return all leagues in dict {id0: league0, id1: league1}.\n\n :params year: Year.", "id": "f3437:c0:m9"} {"signature": "@propertydef teams(self, year=):", "body": "if year not in self._teams:self._teams[year] = teams(year)return self._teams[year]", "docstring": "Return all teams in dict {id0: team0, id1: team1}.\n\n :params year: Year.", "id": "f3437:c0:m10"} {"signature": "@propertydef stadiums(self):", "body": "if not self._stadiums:self._stadiums = stadiums()return self._stadiums", "docstring": "Return all stadiums in dict {id0: stadium0, id1: stadium1}.\n\n :params year: Year.", "id": "f3437:c0:m11"} {"signature": "def saveSession(self):", "body": "if self.cookies_file:self.r.cookies.save(ignore_discard=True)with open(self.token_file, '') as f:f.write('' % (self.token_type, self.access_token))", "docstring": "Save cookies/session.", "id": "f3437:c0:m12"} {"signature": "def baseId(self, *args, **kwargs):", "body": "return baseId(*args, **kwargs)", "docstring": "Calculate base id and version from a resource id.", "id": "f3437:c0:m13"} {"signature": "def cardInfo(self, resource_id):", "body": "base_id = baseId(resource_id)if base_id in self.players:return self.players[base_id]else: url = ''.format(card_info_url, base_id)return requests.get(url, timeout=self.timeout).json()", "docstring": "Return card info.\n\n :params resource_id: Resource id.", "id": "f3437:c0:m14"} {"signature": "def searchDefinition(self, asset_id, start=, page_size=itemsPerPage[''], count=None):", "body": "method = ''url = ''if count: page_size = countbase_id = baseId(asset_id)if base_id not in self.players:raise FutError(reason='')params = {'': base_id,'': start,'': '','': page_size}rc = self.__request__(method, url, params=params)return [itemParse({'': i}) for i in rc['']]", "docstring": "Return variations of the given asset id, e.g. IF cards.\n\n :param asset_id: Asset id / Definition id.\n :param start: (optional) Start page.\n :param count: (optional) Number of definitions you want to request.", "id": "f3437:c0:m15"} {"signature": "def search(self, ctype, level=None, category=None, assetId=None, defId=None,min_price=None, max_price=None, min_buy=None, max_buy=None,league=None, club=None, position=None, zone=None, nationality=None,rare=False, playStyle=None, start=, page_size=itemsPerPage[''],fast=False):", "body": "method = ''url = ''if start == :events = [self.pin.event('', ''), self.pin.event('', '')]self.pin.send(events, fast=fast)params = {'': start,'': page_size,'': ctype, }if level:params[''] = levelif category:params[''] = categoryif assetId:params[''] = assetIdif defId:params[''] = defIdif min_price:params[''] = min_priceif max_price:params[''] = max_priceif min_buy:params[''] = min_buyif max_buy:params[''] = max_buyif league:params[''] = leagueif club:params[''] = clubif position:params[''] = positionif zone:params[''] = zoneif nationality:params[''] = nationalityif rare:params[''] = ''if playStyle:params[''] = playStylerc = self.__request__(method, url, params=params, fast=fast)if start == :events = [self.pin.event('', ''), self.pin.event('', '')]self.pin.send(events, fast=fast)return [itemParse(i) for i in rc.get('', ())]", "docstring": "Prepare search request, send and return parsed data as a dict.\n\n :param ctype: [development / ? / ?] Card type.\n :param level: (optional) [?/?/gold] Card level.\n :param category: (optional) [fitness/?/?] Card category.\n :param assetId: (optional) Asset id.\n :param defId: (optional) Definition id.\n :param min_price: (optional) Minimal price.\n :param max_price: (optional) Maximum price.\n :param min_buy: (optional) Minimal buy now price.\n :param max_buy: (optional) Maximum buy now price.\n :param league: (optional) League id.\n :param club: (optional) Club id.\n :param position: (optional) Position.\n :param nationality: (optional) Nation id.\n :param rare: (optional) [boolean] True for searching special cards.\n :param playStyle: (optional) Play style.\n :param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n)\n :param page_size: (optional) Page size (items per page).", "id": "f3437:c0:m16"} {"signature": "def searchAuctions(self, *args, **kwargs):", "body": "return self.search(*args, **kwargs)", "docstring": "Alias for search method, just to keep compatibility.", "id": "f3437:c0:m17"} {"signature": "def bid(self, trade_id, bid, fast=False):", "body": "method = ''url = '' % trade_idif not fast:rc = self.tradeStatus(trade_id)[]if rc[''] >= bid or self.credits < bid:return False data = {'': bid}try:rc = self.__request__(method, url, data=json.dumps(data), params={'': self.sku_b}, fast=fast)[''][]except PermissionDenied: return Falseif rc[''] == '' or (rc[''] == '' and rc[''] == ''): return Trueelse:return False", "docstring": "Make a bid.\n\n :params trade_id: Trade id.\n :params bid: Amount of credits You want to spend.\n :params fast: True for fastest bidding (skips trade status & credits check).", "id": "f3437:c0:m18"} {"signature": "def club(self, sort='', ctype='', defId='', start=, count=None, page_size=itemsPerPage[''],level=None, category=None, assetId=None, league=None, club=None,position=None, zone=None, nationality=None, rare=False, playStyle=None):", "body": "method = ''url = ''if count: page_size = countparams = {'': sort, '': ctype, '': defId, '': start, '': page_size}if level:params[''] = levelif category:params[''] = categoryif assetId:params[''] = assetIdif league:params[''] = leagueif club:params[''] = clubif position:params[''] = positionif zone:params[''] = zoneif nationality:params[''] = nationalityif rare:params[''] = ''if playStyle:params[''] = playStylerc = self.__request__(method, url, params=params)if start == :if ctype == '':pgid = ''elif ctype == '':pgid = ''elif ctype in ('', '', '', '', ''):pgid = ''events = [self.pin.event('', ''), self.pin.event('', pgid)]if rc['']:events.append(self.pin.event('', ''))self.pin.send(events)return [itemParse({'': i}) for i in rc['']]", "docstring": "Return items in your club, excluding consumables.\n\n :param ctype: [development / ? / ?] Card type.\n :param level: (optional) [?/?/gold] Card level.\n :param category: (optional) [fitness/?/?] Card category.\n :param assetId: (optional) Asset id.\n :param defId: (optional) Definition id.\n :param min_price: (optional) Minimal price.\n :param max_price: (optional) Maximum price.\n :param min_buy: (optional) Minimal buy now price.\n :param max_buy: (optional) Maximum buy now price.\n :param league: (optional) League id.\n :param club: (optional) Club id.\n :param position: (optional) Position.\n :param nationality: (optional) Nation id.\n :param rare: (optional) [boolean] True for searching special cards.\n :param playStyle: (optional) Play style.\n :param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n)\n :param page_size: (optional) Page size (items per page)", "id": "f3437:c0:m19"} {"signature": "def clubStaff(self):", "body": "method = ''url = ''rc = self.__request__(method, url)return rc", "docstring": "Return staff in your club.", "id": "f3437:c0:m20"} {"signature": "def clubConsumables(self, fast=False):", "body": "method = ''url = ''rc = self.__request__(method, url)events = [self.pin.event('', '')]self.pin.send(events, fast=fast)events = [self.pin.event('', '')]self.pin.send(events, fast=fast)events = [self.pin.event('', '')]self.pin.send(events, fast=fast)return [itemParse(i) for i in rc.get('', ())]", "docstring": "Return all consumables from club.", "id": "f3437:c0:m21"} {"signature": "def squad(self, squad_id=, persona_id=None):", "body": "method = ''url = '' % (squad_id, persona_id or self.persona_id)events = [self.pin.event('', '')]self.pin.send(events)rc = self.__request__(method, url)events = [self.pin.event('', ''), self.pin.event('', '')]self.pin.send(events)return [itemParse(i) for i in rc.get('', ())]", "docstring": "Return a squad.\n\n :params squad_id: Squad id.", "id": "f3437:c0:m22"} {"signature": "def tradeStatus(self, trade_id):", "body": "method = ''url = ''if not isinstance(trade_id, (list, tuple)):trade_id = (trade_id,)trade_id = (str(i) for i in trade_id)params = {'': ''.join(trade_id)} rc = self.__request__(method, url, params=params)return [itemParse(i, full=False) for i in rc['']]", "docstring": "Return trade status.\n\n :params trade_id: Trade id.", "id": "f3437:c0:m23"} {"signature": "def tradepile(self):", "body": "method = ''url = ''rc = self.__request__(method, url)events = [self.pin.event('', ''), self.pin.event('', '')]if rc.get(''):events.append(self.pin.event('', ''))self.pin.send(events)return [itemParse(i) for i in rc.get('', ())]", "docstring": "Return items in tradepile.", "id": "f3437:c0:m24"} {"signature": "def watchlist(self):", "body": "method = ''url = ''rc = self.__request__(method, url)events = [self.pin.event('', ''), self.pin.event('', '')]if rc.get(''):events.append(self.pin.event('', ''))self.pin.send(events)return [itemParse(i) for i in rc.get('', ())]", "docstring": "Return items in watchlist.", "id": "f3437:c0:m25"} {"signature": "def unassigned(self):", "body": "method = ''url = ''rc = self.__request__(method, url)events = [self.pin.event('', '')]if rc.get(''):events.append(self.pin.event('', ''))self.pin.send(events)return [itemParse({'': i}) for i in rc.get('', ())]", "docstring": "Return Unassigned items (i.e. buyNow items).", "id": "f3437:c0:m26"} {"signature": "def sell(self, item_id, bid, buy_now, duration=, fast=False):", "body": "method = ''url = ''data = {'': buy_now, '': bid, '': duration, '': {'': item_id}}rc = self.__request__(method, url, data=json.dumps(data), params={'': self.sku_b})if not fast: self.tradeStatus(rc[''])return rc['']", "docstring": "Start auction. Returns trade_id.\n\n :params item_id: Item id.\n :params bid: Stard bid.\n :params buy_now: Buy now price.\n :params duration: Auction duration in seconds (Default: 3600).", "id": "f3437:c0:m27"} {"signature": "def quickSell(self, item_id):", "body": "method = ''url = ''if not isinstance(item_id, (list, tuple)):item_id = (item_id,)item_id = (str(i) for i in item_id)params = {'': ''.join(item_id)}self.__request__(method, url, params=params) return True", "docstring": "Quick sell.\n\n :params item_id: Item id.", "id": "f3437:c0:m28"} {"signature": "def watchlistDelete(self, trade_id):", "body": "method = ''url = ''if not isinstance(trade_id, (list, tuple)):trade_id = (trade_id,)trade_id = (str(i) for i in trade_id)params = {'': ''.join(trade_id)}self.__request__(method, url, params=params) return True", "docstring": "Remove cards from watchlist.\n\n :params trade_id: Trade id.", "id": "f3437:c0:m29"} {"signature": "def tradepileDelete(self, trade_id): ", "body": "method = ''url = '' % trade_idself.__request__(method, url) return True", "docstring": "Remove card from tradepile.\n\n :params trade_id: Trade id.", "id": "f3437:c0:m30"} {"signature": "def tradepileClear(self):", "body": "method = ''url = ''self.__request__(method, url)", "docstring": "Removes all sold items from tradepile.", "id": "f3437:c0:m31"} {"signature": "def sendToTradepile(self, item_id, safe=True):", "body": "if safe and len(self.tradepile()) >= self.tradepile_size: return Falsereturn self.__sendToPile__('', item_id=item_id)", "docstring": "Send to tradepile (alias for __sendToPile__).\n\n :params item_id: Item id.\n :params safe: (optional) False to disable tradepile free space check.", "id": "f3437:c0:m32"} {"signature": "def sendToClub(self, item_id):", "body": "return self.__sendToPile__('', item_id=item_id)", "docstring": "Send to club (alias for __sendToPile__).\n\n :params item_id: Item id.", "id": "f3437:c0:m33"} {"signature": "def sendToWatchlist(self, trade_id):", "body": "method = ''url = ''data = {'': [{'': trade_id}]}return self.__request__(method, url, data=json.dumps(data))", "docstring": "Send to watchlist.\n\n :params trade_id: Trade id.", "id": "f3437:c0:m34"} {"signature": "def sendToSbs(self, challenge_id, item_id):", "body": "method = ''url = '' % challenge_idsquad = self.sbsSquad(challenge_id)players = []moved = Falsen = for i in squad['']['']:if i[''][''] == item_id: return Falseif i[''][''] == and not moved:i[''][''] = item_idmoved = Trueplayers.append({\"\": n,\"\": {\"\": i[''][''],\"\": False}})n += data = {'': players}if not moved:return Falseelse:self.__request__(method, url, data=json.dumps(data))return True", "docstring": "Send card FROM CLUB to first free slot in sbs squad.", "id": "f3437:c0:m35"} {"signature": "def relist(self):", "body": "method = ''url = ''return self.__request__(method, url)", "docstring": "ReList all cards in tradepile. EA method - might(?) change prices.", "id": "f3437:c0:m36"} {"signature": "def applyConsumable(self, item_id, resource_id):", "body": "method = ''url = '' % resource_iddata = {'': [{'': item_id}]}self.__request__(method, url, data=json.dumps(data))", "docstring": "Apply consumable on player.\n\n :params item_id: Item id of player.\n :params resource_id: Resource id of consumable.", "id": "f3437:c0:m37"} {"signature": "def keepalive(self):", "body": "method = ''url = ''return self.__request__(method, url)['']", "docstring": "Refresh credit amount to let know that we're still online. Returns credit amount.", "id": "f3437:c0:m38"} {"signature": "def pileSize(self):", "body": "rc = self._usermassinfo['']['']return {'': rc[][''],'': rc[]['']}", "docstring": "Return size of tradepile and watchlist.", "id": "f3437:c0:m39"} {"signature": "def messages(self):", "body": "method = ''url = ''rc = self.__request__(method, url)return rc['']", "docstring": "Return active messages.", "id": "f3437:c0:m40"} {"signature": "def packs(self):", "body": "method = ''url = ''params = {'': True}return self.__request__(method, url, params=params)", "docstring": "List all (currently?) available packs.", "id": "f3437:c0:m41"} {"signature": "def __init__(self, api_key=None, site=None, suppress_warnings=False, proxies=None):", "body": "api_key, site = get_preferred_credentials(api_key, site)self.models = ModelsClient(api_key, site, suppress_warnings=suppress_warnings, proxies=proxies)self.search = SearchClient(api_key, site, suppress_warnings=suppress_warnings, proxies=proxies)self.data = DataClient(api_key, site, suppress_warnings=suppress_warnings, proxies=proxies)self.data_views = DataViewsClient(api_key, site, suppress_warnings=suppress_warnings, proxies=proxies)", "docstring": "Constructor.\n\n:param api_key: Your API key for Citrination\n:type api_key: str\n:param site: The domain name of your Citrination deployment\n (the default is https://citrination.com)\n:type site: str\n:param suppress_warnings: A flag allowing you to suppress warning\n statements guarding against misuse printed to stdout.\n:type suppress_warnings: bool\n:param proxies: proxies to use when making HTTP requests. E.g.,\n proxies = {\n 'http': 'http://10.10.1.10:3128',\n 'https': 'http://10.10.1.10:1080',\n }\n:type proxies: dict(string, string)", "id": "f3444:c0:m0"} {"signature": "def __init__(self, name, role, balance_element, group_by_key=False, units=None, basis=):", "body": "super(AlloyCompositionColumn, self).__init__(name=name,role=role,group_by_key=group_by_key,units=units)self._balance_element = balance_elementself._basis = basis", "docstring": "Constructor.\n\n:param name: The name of the column\n:type name: str\n:param role: The role the column will play in machine learning:\n \"Input\"\n \"Output\"\n \"Latent Variable\"\n \"Ignore\"\n:type role: str\n:param group_by_key: Whether or not this column should be used for\n grouping during cross validation\n:type group_by_key: bool\n:param units: Optionally, the units for the column\n:type units: str\n:param balance_element: The element making up the balance in the\n composition\n:type balance_element: str\n:param basis: The total amount of composition when deciding how to fill\n the balance\n:type basis: float", "id": "f3452:c0:m0"} {"signature": "def __init__(self, name, role, group_by_key=False, units=None, categories=[]):", "body": "super(CategoricalColumn, self).__init__(name=name,role=role,group_by_key=group_by_key,units=units)self.categories = categories", "docstring": "Constructor.\n\n:param name: The name of the column\n:type name: str\n:param role: The role the column will play in machine learning:\n \"Input\"\n \"Output\"\n \"Latent Variable\"\n \"Ignore\"\n:type role: str\n:param group_by_key: Whether or not this column should be used for\n grouping during cross validation\n:type group_by_key: bool\n:param units: Optionally, the units for the column\n:type units: str\n:param categories: An array of strings that are valid values for data\n in this column\n:type categories: list of str", "id": "f3454:c0:m0"} {"signature": "def __init__(self, name, role, group_by_key=False, units=None):", "body": "self._name = nameself._units = unitsself.role = roleself._type = self.__class__.TYPEself._group_by_key = group_by_key", "docstring": "Constructor.\n\n:param name: The name of the column\n:type name: str\n:param role: The role the column will play in machine learning:\n \"Input\"\n \"Output\"\n \"Latent Variable\"\n \"Ignore\"\n:type role: str\n:param group_by_key: Whether or not this column should be used for\n grouping during cross validation\n:type group_by_key: bool\n:param units: Optionally, the units for the column\n:type units: str", "id": "f3455:c0:m0"} {"signature": "def to_dict(self):", "body": "return {\"\": self.type,\"\": self.name,\"\": self.group_by_key,\"\": self.role,\"\": self.units,\"\": self.build_options()}", "docstring": "Converts the column to a dictionary representation accepted\nby the Citrination server.\n\n:return: Dictionary with basic options, plus any column type specific\n options held under the \"options\" key\n:rtype: dict", "id": "f3455:c0:m1"} {"signature": "def build_options(self):", "body": "return None", "docstring": "Default value for optional column configuration. Only child\nclasses will have non-None values for this field. Value\ndepends on child class implementation.\n\nNote: Some child classes do not require any extra options\n(e.g. OrganicChemicalFormula), in that case, this implementation\nwill be invoked and no options will be present in the dictionary.\n\n:return: Options dictionary, or None if not implemented in child\n:rtype: dict or None", "id": "f3455:c0:m2"} {"signature": "@propertydef type(self):", "body": "return self._type", "docstring": "Returns the type of the column. This value is\nread only because the valid value is defined by the\ncolumn type and is set internally.", "id": "f3455:c0:m15"} {"signature": "def _validate_role(self, role):", "body": "valid_roles = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"]if role not in valid_roles:raise CitrinationClientError(\"\".format(valid_roles))", "docstring": "Validates that the role parameter has a valid value.\n\n:param role: Name of the column's role\n:type role: str", "id": "f3455:c0:m16"} {"signature": "def __init__(self, name, role, group_by_key=False, units=None, length=None):", "body": "super(VectorColumn, self).__init__(name=name,role=role,group_by_key=group_by_key,units=units)self.length = length", "docstring": "Constructor.\n\n:param name: The name of the column\n:type name: str\n:param role: The role the column will play in machine learning:\n \"Input\"\n \"Output\"\n \"Latent Variable\"\n \"Ignore\"\n:type role: str\n:param group_by_key: Whether or not this column should be used for\n grouping during cross validation\n:type group_by_key: bool\n:param units: Optionally, the units for this column\n:type units: str\n:param length: The length of vectors in this column\n:type length: int", "id": "f3457:c0:m0"} {"signature": "def __init__(self, name, role, group_by_key=False, units=None, lower_bound=None, upper_bound=None):", "body": "super(RealColumn, self).__init__(name=name,role=role,group_by_key=group_by_key,units=units)self._lower_bound = Noneself._upper_bound = Noneself.lower_bound = lower_boundself.upper_bound = upper_bound", "docstring": "Constructor.\n\n:param name: The name of the column\n:type name: str\n:param role: The role the column will play in machine learning:\n \"Input\"\n \"Output\"\n \"Latent Variable\"\n \"Ignore\"\n:type role: str\n:param group_by_key: Whether or not this column should be used for\n grouping during cross validation\n:type group_by_key: bool\n:param units: Optionally, the units for the column\n:type units: str\n:param lower_bound: The lower bound for valid values for this column\n:type lower_bound: float\n:param upper_bound: The upper bound for valid values for this column\n:type upper_bound: float", "id": "f3461:c0:m0"} {"signature": "def _assert_prediction_values(prediction):", "body": "egap = ''voltage = ''assert prediction.get_value('') is not None, \"\"assert prediction.get_value(egap) is not None, \"\"assert prediction.get_value(voltage) is not None, \"\"assert _almost_equal(prediction.get_value('').value, ,), \"\"assert _almost_equal(prediction.get_value('').loss, ,), \"\"assert _almost_equal(prediction.get_value(egap).value, ,), \"\"assert _almost_equal(prediction.get_value(egap).loss, ,), \"\"assert _almost_equal(prediction.get_value(voltage).value, ,), \"\"assert _almost_equal(prediction.get_value(voltage).loss, ,), \"\"", "docstring": "Assertions for the test_predict and test_predict_distribution methods", "id": "f3462:m1"} {"signature": "def __init__(self, key, value, loss=None):", "body": "self._key = keyself._value = valueself._loss = loss", "docstring": "Constructor.\n\n:param key: The descriptor key for the prediction\n:type key: str\n:param value: The predicted value\n:type value: str or float\n:param loss: The loss for the prediction\n:type loss: float", "id": "f3467:c0:m0"} {"signature": "def __init__(self):", "body": "self._projections = {}", "docstring": "Constructor", "id": "f3468:c0:m0"} {"signature": "def add_projection(self, key, projection):", "body": "self._projections[key] = projection", "docstring": "Register a projection under a descriptor key.\n\n:param key: The descriptor key for the projection\n:type key: str\n:param projection: The projection for the provided descriptor key\n:type projection: :class:`Projection`", "id": "f3468:c0:m1"} {"signature": "def projections(self):", "body": "return self._projections.keys()", "docstring": "List the descriptor keys with registered projections.\n\n:return: List of descriptor keys", "id": "f3468:c0:m2"} {"signature": "def get_projection(self, key):", "body": "return self._projections.get(key)", "docstring": "Retrieves the projection registered under a particular\ndescriptor key.\n\n:param key: A descriptor key\n:return: A :class:`Projection`", "id": "f3468:c0:m3"} {"signature": "def tsne(self, data_view_id):", "body": "analysis = self._data_analysis(data_view_id)projections = analysis['']tsne = Tsne()for k, v in projections.items():projection = Projection(xs=v[''],ys=v[''],responses=v[''],tags=v[''],uids=v[''])tsne.add_projection(k, projection)return tsne", "docstring": "Get the t-SNE projection, including responses and tags.\n\n:param data_view_id: The ID of the data view to retrieve TSNE from\n:type data_view_id: int\n:return: The TSNE analysis\n:rtype: :class:`Tsne`", "id": "f3469:c0:m1"} {"signature": "def predict(self, data_view_id, candidates, method=\"\", use_prior=True):", "body": "uid = self.submit_predict_request(data_view_id, candidates, method, use_prior)while self.check_predict_status(data_view_id, uid)[''] not in [\"\", \"\", \"\"]:time.sleep()result = self.check_predict_status(data_view_id, uid)if result[\"\"] == \"\":paired = zip(result[\"\"][\"\"], result[\"\"][\"\"])prediction_result_format = [{k: (p[][k], p[][k]) for k in p[].keys()} for p in paired]return list(map(lambda c: _get_prediction_result_from_candidate(c), prediction_result_format))else:raise RuntimeError(\"\".format(uid, result[\"\"]))", "docstring": "Predict endpoint. This simply wraps the async methods (submit and poll for status/results).\n\n:param data_view_id: The ID of the data view to use for prediction\n:type data_view_id: str\n:param candidates: A list of candidates to make predictions on\n:type candidates: list of dicts\n:param method: Method for propagating predictions through model graphs. \"scalar\" uses linearized uncertainty\npropagation, whereas \"scalar_from_distribution\" still returns scalar predictions but uses sampling to\npropagate uncertainty without a linear approximation.\n:type method: str (\"scalar\" or \"scalar_from_distribution\")\n:param use_prior: Whether to apply prior values implied by the property descriptors\n:type use_prior: bool\n:return: The results of the prediction\n:rtype: list of :class:`PredictionResult`", "id": "f3469:c0:m2"} {"signature": "def retrain(self, dataview_id):", "body": "url = ''.format(dataview_id)response = self._post_json(url, data={})if response.status_code != requests.codes.ok:raise RuntimeError('' + str(response.status_code) + '' + str(response.message))return True", "docstring": "Start a model retraining\n:param dataview_id: The ID of the views\n:return:", "id": "f3469:c0:m3"} {"signature": "def _data_analysis(self, data_view_id):", "body": "failure_message = \"\".format(data_view_id)return self._get_success_json(self._get(routes.data_analysis(data_view_id), failure_message=failure_message))", "docstring": "Data analysis endpoint.\n\n:param data_view_id: The model identifier (id number for data views)\n:type data_view_id: str\n:return: dictionary containing information about the data, e.g. dCorr and tsne", "id": "f3469:c0:m6"} {"signature": "def submit_predict_request(self, data_view_id, candidates, prediction_source='', use_prior=True):", "body": "data = {\"\":prediction_source,\"\":use_prior,\"\":candidates}failure_message = \"\"post_url = '' + str(data_view_id) + ''return self._get_success_json(self._post_json(post_url, data, failure_message=failure_message))['']['']", "docstring": "Submits an async prediction request.\n\n:param data_view_id: The id returned from create\n:param candidates: Array of candidates\n:param prediction_source: 'scalar' or 'scalar_from_distribution'\n:param use_prior: True to use prior prediction, otherwise False\n:return: Predict request Id (used to check status)", "id": "f3469:c0:m8"} {"signature": "def check_predict_status(self, view_id, predict_request_id):", "body": "failure_message = \"\"bare_response = self._get_success_json(self._get('' + str(view_id) + '' + str(predict_request_id) + '',None, failure_message=failure_message))result = bare_response[\"\"]return result", "docstring": "Returns a string indicating the status of the prediction job\n\n:param view_id: The data view id returned from data view create\n:param predict_request_id: The id returned from predict\n:return: Status data, also includes results if state is finished", "id": "f3469:c0:m9"} {"signature": "def submit_design_run(self, data_view_id, num_candidates, effort, target=None, constraints=[], sampler=\"\"):", "body": "if effort > :raise CitrinationClientError(\"\")if target is not None:target = target.to_dict()constraint_dicts = [c.to_dict() for c in constraints]body = {\"\": num_candidates,\"\": target,\"\": effort,\"\": constraint_dicts,\"\": sampler}url = routes.submit_data_view_design(data_view_id)response = self._post_json(url, body).json()return DesignRun(response[\"\"][\"\"][\"\"])", "docstring": "Submits a new experimental design run.\n\n:param data_view_id: The ID number of the data view to which the\n run belongs, as a string\n:type data_view_id: str\n:param num_candidates: The number of candidates to return\n:type num_candidates: int\n:param target: An :class:``Target`` instance representing\n the design run optimization target\n:type target: :class:``Target``\n:param constraints: An array of design constraints (instances of\n objects which extend :class:``BaseConstraint``)\n:type constraints: list of :class:``BaseConstraint``\n:param sampler: The name of the sampler to use during the design run:\n either \"Default\" or \"This view\"\n:type sampler: str\n:return: A :class:`DesignRun` instance containing the UID of the\n new run", "id": "f3469:c0:m10"} {"signature": "def get_design_run_status(self, data_view_id, run_uuid):", "body": "url = routes.get_data_view_design_status(data_view_id, run_uuid)response = self._get(url).json()status = response[\"\"]return ProcessStatus(result=status.get(\"\"),progress=status.get(\"\"),status=status.get(\"\"),messages=status.get(\"\"))", "docstring": "Retrieves the status of an in progress or completed design run\n\n:param data_view_id: The ID number of the data view to which the\n run belongs, as a string\n:type data_view_id: str\n:param run_uuid: The UUID of the design run to retrieve status for\n:type run_uuid: str\n:return: A :class:`ProcessStatus` object", "id": "f3469:c0:m11"} {"signature": "def get_design_run_results(self, data_view_id, run_uuid):", "body": "url = routes.get_data_view_design_results(data_view_id, run_uuid)response = self._get(url).json()result = response[\"\"]return DesignResults(best_materials=result.get(\"\"),next_experiments=result.get(\"\"))", "docstring": "Retrieves the results of an existing designrun\n\n:param data_view_id: The ID number of the data view to which the\n run belongs, as a string\n:type data_view_id: str\n:param run_uuid: The UUID of the design run to retrieve results from\n:type run_uuid: str\n:return: A :class:`DesignResults` object", "id": "f3469:c0:m12"} {"signature": "def get_data_view(self, data_view_id):", "body": "url = routes.get_data_view(data_view_id)response = self._get(url).json()result = response[\"\"][\"\"]datasets_list = []for dataset in result[\"\"]:datasets_list.append(Dataset(name=dataset[\"\"],id=dataset[\"\"],description=dataset[\"\"]))columns_list = []for column in result[\"\"]:columns_list.append(ColumnFactory.from_dict(column))return DataView(view_id=data_view_id,name=result[\"\"],description=result[\"\"],datasets=datasets_list,columns=columns_list,)", "docstring": "Retrieves a summary of information for a given data view\n - view id\n - name\n - description\n - columns\n\n:param data_view_id: The ID number of the data view to which the\n run belongs, as a string\n:type data_view_id: str", "id": "f3469:c0:m13"} {"signature": "def kill_design_run(self, data_view_id, run_uuid):", "body": "url = routes.kill_data_view_design_run(data_view_id, run_uuid)response = self._delete(url).json()return response[\"\"][\"\"]", "docstring": "Kills an in progress experimental design run\n\n:param data_view_id: The ID number of the data view to which the\n run belongs, as a string\n:type data_view_id: str\n:param run_uuid: The UUID of the design run to kill\n:type run_uuid: str\n:return: The UUID of the design run", "id": "f3469:c0:m14"} {"signature": "def get_data_view_service_status(self, data_view_id):", "body": "url = routes.get_data_view_status(data_view_id)response = self._get(url).json()result = response[\"\"][\"\"]return DataViewStatus(predict=ServiceStatus.from_response_dict(result[\"\"]),experimental_design=ServiceStatus.from_response_dict(result[\"\"]),data_reports=ServiceStatus.from_response_dict(result[\"\"]),model_reports=ServiceStatus.from_response_dict(result[\"\"]))", "docstring": "Retrieves the status for all of the services associated with a data view:\n - predict\n - experimental_design\n - data_reports\n - model_reports\n\n:param data_view_id: The ID number of the data view to which the\n run belongs, as a string\n:type data_view_id: str\n:return: A :class:`DataViewStatus`\n:rtype: DataViewStatus", "id": "f3469:c0:m15"} {"signature": "def __init__(self, predict=None, experimental_design=None, data_reports=None, model_reports=None):", "body": "self._predict = predictself._experimental_design = experimental_designself._data_reports = data_reportsself._model_reports = model_reports", "docstring": "Constructor.\n\n:param predict: The status predict\n:type predict: ServiceStatus\n:param experimental_design: The status of experimental_design\n:type experimental_design: ServiceStatus\n:param data_reports: The status of data_analysis\n:type data_reports: ServiceStatus\n:param model_reports: The status of model reports\n:type model_reports: ServiceStatus", "id": "f3470:c0:m0"} {"signature": "def __init__(self, view_id, name, description, datasets=[], column_names=[], columns=[]):", "body": "self._id = view_idself._name = nameself._description = descriptionself._datasets = datasetsself._column_names = column_namesself._columns = columns", "docstring": "Constructor.\n\n:param view_id: The ID of the data view\n:type view_id: str\n:param name: The name of the data view\n:type name: str\n:param description: The description of the data view\n:type description: str\n:param datasets: The datasets used in the view\n:type datasets: list of :class: Dataset\n:param column_names: The column names in the view\n:type column_names: list of str", "id": "f3471:c0:m0"} {"signature": "def __init__(self, ready, context, reason, event):", "body": "self._ready = readyself._context = contextself._event = eventself._reason = reason", "docstring": "Constructor.\n\n:param ready: Boolean indicating whether or not the service can be used\n:type ready: bool\n:param context: A contextual description of the current status: \"notice\",\n \"success\", \"error\"\n:type context: str\n:param reason: A full sentence explanation of the service's status\n:type reason: str\n:param event: An event object describing the current state of the service's\n progress toward readiness\n:type event: Event", "id": "f3473:c0:m1"} {"signature": "def is_ready(self):", "body": "return self.ready == True", "docstring": "Indicates whether or not the service is ready to be used.\n\n:return: A boolean\n:rtype: bool", "id": "f3473:c0:m14"} {"signature": "def __init__(self, xs, ys, responses, tags, uids):", "body": "self._xs = xsself._ys = ysself._responses = responsesself._tags = tagsself._uids = uids", "docstring": "Constructor.\n\n:param xs: A list of x values of the projection.\n:type xs: list of floats\n:param ys: A list of y values of the projection.\n:type ys: list of floats\n:param responses: A list of z values of the projection.\n:type responses: list of floats\n:param tags: A list of tags for the projected points\n:type tags: list of strings\n:param uids: A list of record UIDs for the projected points\n:type uids: list of strings", "id": "f3474:c0:m0"} {"signature": "def get_data_view_status(data_view_id):", "body": "return \"\".format(data_view_id)", "docstring": "URL for retrieving the statuses of all services\nassociated with a data view.\n\n:param data_view_id: The ID of the desired data views\n:type data_view_id: str", "id": "f3475:m7"} {"signature": "def __init__(self, title, normalized_progress, subtitle=None, subevent=None):", "body": "self._title = titleself._subtitle = subtitleself._subevent = subeventself._normalized_progress = normalized_progress", "docstring": "Constructor.\n\n:param title: The title of the event\n:type title: str\n:param subtitle: More detail about the event\n:type subtitle: str\n:param subevent: An event object describing the current state of the service's\n progress toward readiness\n:type subevent: Event\n:param normalized_progress: The fractional representation of the status of the event\n:type normalized_progress: float", "id": "f3476:c0:m0"} {"signature": "def add_value(self, key, value):", "body": "self._values[key] = value", "docstring": "Registers a predicted value in the result.\n\n:param key: The descriptor key for the predicted value\n:type key: str\n:param value: A :class:`PredictedValue`\n:type value: object\n:return: None\n:rtype: NoneType", "id": "f3477:c0:m1"} {"signature": "def get_value(self, key):", "body": "try:return self._values[key]except KeyError:return None", "docstring": "Retrieves a predicted value.\n\n:param key: A descriptor key for a registered predicted value.\n:type key: str\n:return: The value stored at the provided descriptor key. None if no key is provided.\n:rtype: :class:`PredictedValue`", "id": "f3477:c0:m2"} {"signature": "def all_keys(self):", "body": "return self._values.keys()", "docstring": "Retrieves a list of all the values which were predicted.\n\n:return: A list of keys for which predictions have been made and can\n be retrieved using `get_value`\n:rtype: list of str", "id": "f3477:c0:m3"} {"signature": "def __init__(self, result, progress, status, messages=None):", "body": "self._status = statusself._result = resultself._progress = progressself._messages = messages", "docstring": "Constructor.\n\n:param result: The result of the process\n:type result: any\n:param progress: The progress of the process as as percentage\n:type progress: int\n:param status: The status string for the process\n:type status: str\n:param messages: A list of messages representing the steps the process\n has already progressed through\n:type messages: list of str", "id": "f3478:c0:m0"} {"signature": "def __init__(self, uuid):", "body": "self._uuid = uuid", "docstring": "Constructor.\n\n:param uuid: The UUID of an in progress design run.\n:type uuid: str", "id": "f3480:c0:m0"} {"signature": "def __init__(self, best_materials, next_experiments):", "body": "self._best_materials = best_materialsself._next_experiments = next_experiments", "docstring": "Constructor.\n\n:param best_materials: An array of candidate dictionaries\n:type best_materials: list of dictionaries\n:param next_experiments: An array of candidate dictionaries\n:type next_experiments: list of dictionaries", "id": "f3481:c0:m0"} {"signature": "def __init__(self, name, objective):", "body": "try:self._objective = float(objective)except ValueError:if objective.lower() not in [\"\", \"\"]:raise CitrinationClientError(\"\")self._objective = objectiveself._name = name", "docstring": "Constructor.\n\n:param name: The name of the target output column\n:type name: str\n:param objective: The optimization objective; \"Min\", \"Max\", or a scalar value (such as \"5.0\")\n:type objective: str", "id": "f3482:c0:m0"} {"signature": "def __init__(self, name, minimum, maximum):", "body": "try:minimum_f = float(minimum)maximum_f = float(maximum)except TypeError:raise CitrinationClientError(\"\")if minimum_f > maximum_f:raise CitrinationClientError(\"\")self._min = minimum_fself._max = maximum_fself._type = \"\"self._name = name", "docstring": "Constructor.\n\n:param name: The name of the column in the data\n view to which this constraint should be applied\n:type name: str\n:param minimum: The minimum allowed value\n:type minimum: float\n:param maximum: The maximum allowed value\n:type maximum: float", "id": "f3484:c0:m0"} {"signature": "def __init__(self, name, accepted_categories):", "body": "self._type = \"\"self._name = nameself._categories = accepted_categories", "docstring": "Constructor.\n\n:param name: The name of the column in the data\n view to which this constraint should be applied\n:type name: str\n:param accepted_categories: An array of categories to constrain the name to\n:type accepted_categories: list of str", "id": "f3486:c0:m0"} {"signature": "def to_dict(self):", "body": "return {\"\": self._name,\"\": self._type,\"\": self.options()}", "docstring": "Returns a dictionary representing the constraint.\nAssists in JSON serialization.\n\n:return: A dictionary with the name and type of the constraint\n along with any type-specific metadata the constraint may\n require", "id": "f3487:c0:m0"} {"signature": "def __init__(self, name, elements, logic):", "body": "bad_logic_msg = \"\"if logic not in [\"\", \"\", \"\"]:raise CitrinationClientError(bad_logic_msg)self._name = nameself._type = \"\"self._elements = elementsself._logic = logic", "docstring": "Constructor.\n\n:param name: The name of the column in the data\n view to which this constraint should be applied\n:type name: str\n:param elements: An array of element abbreviations as\n strings, e.g. [\"Mg\", \"C\"]\n:type elements: list of str\n:param logic: The logic to apply to the constraint; either\n \"must\", \"should\", or \"exclude\"\n:type logic: str", "id": "f3489:c0:m0"} {"signature": "def __init__(self, name, elements, minimum, maximum):", "body": "if not <= minimum <= :raise CitrinationClientError(\"\")if not <= maximum <= :raise CitrinationClientError(\"\")if not maximum >= minimum:raise CitrinationClientError(\"\")self._type = \"\"self._elements = elementsself._name = nameself._min = minimumself._max = maximum", "docstring": "Constructor.\n\n:param name: The name of the column in the data\n view to which this constraint should be applied\n:type name: str\n:param elements: An array of element abbreviations as\n strings, e.g. [\"Mg\", \"C\"]\n:type elements: list of str\n:param minimum: The minimum value (<= 100) as a percentage\n at which the specified elements should appear in\n candidate compositions\n:type minimum: float\n:param maximum: The maximum value (<= 100) as a percentage\n at which the specified elements should appear in\n candidate compositions\n:type maximum: float", "id": "f3490:c0:m0"} {"signature": "def __init__(self, name, value=None):", "body": "self._type = \"\" self._name = nameself._value = value", "docstring": "Constructor.\n\n:param name: The name of the column in the data\n view to which this constraint should be applied\n:type name: str\n:param value: The value the column should be constrained to\n:type value: float", "id": "f3491:c0:m0"} {"signature": "def check_for_rate_limiting(response, response_lambda, timeout=, attempts=):", "body": "if attempts >= :raise RateLimitingException()if response.status_code == :sleep(timeout)new_timeout = timeout + new_attempts = attempts + return check_for_rate_limiting(response_lambda(timeout, attempts), response_lambda, timeout=new_timeout, attempts=new_attempts)return response", "docstring": "Takes an initial response, and a way to repeat the request that produced it and retries the request with an increasing sleep period between requests if rate limiting resposne codes are encountered.\n\nIf more than 3 attempts are made, a RateLimitingException is raised\n\n:param response: A response from Citrination\n:type response: requests.Response\n:param response_lambda: a callable that runs the request that returned the\n response\n:type response_lambda: function\n:param timeout: the time to wait before retrying\n:type timeout: int\n:param attempts: the number of the retry being executed\n:type attempts: int", "id": "f3495:m0"} {"signature": "def __init__(self, api_key, webserver_host, api_members=[], suppress_warnings=False, proxies = None):", "body": "if api_key == None or len(api_key) == :raise CitrinationClientError(\"\")self.proxies = proxiesself.headers = {'': quote(api_key),'': '','': ''}self.suppress_warnings = suppress_warningsself.api_url = webserver_host + ''self.api_members = api_members", "docstring": "Constructor.\n\n:param api_key: Authentication token for the Citrination site\n:type api_key: str\n:param host: The base URL of the citrination site, e.g. https://citrination.com\n:type host: str\n:param api_members: The names of the member methods for this client\n:type api_members: str[]\n:param suppress_warnings: A flag indicating whether or not warning\n messages to stdout should be printed\n:type suppress_warnings: bool\n:param proxies: proxies to use when making HTTP requests. E.g.,\n proxies = {\n 'http': 'http://10.10.1.10:3128',\n 'https': 'http://10.10.1.10:1080',\n }\n:type proxies: dict(string, string)", "id": "f3496:c0:m0"} {"signature": "def _get_qualified_route(self, route):", "body": "return \"\".format(self.api_url, route)", "docstring": "Get a fully qualified api route.\n:param route: the route (e.g., /model)\n:return: the fully qualified route (e.g., https://citrination.com/model)", "id": "f3496:c0:m3"} {"signature": "def _get(self, route, headers=None, failure_message=None):", "body": "headers = self._get_headers(headers)response_lambda = (lambda: requests.get(self._get_qualified_route(route), headers=headers, verify=False, proxies=self.proxies))response = check_for_rate_limiting(response_lambda(), response_lambda)return self._handle_response(response, failure_message)", "docstring": "Execute a post request and return the result\n:param headers:\n:return:", "id": "f3496:c0:m6"} {"signature": "def _post(self, route, data, headers=None, failure_message=None):", "body": "headers = self._get_headers(headers)response_lambda = (lambda: requests.post(self._get_qualified_route(route), headers=headers, data=data, verify=False, proxies=self.proxies))response = check_for_rate_limiting(response_lambda(), response_lambda)return self._handle_response(response, failure_message)", "docstring": "Execute a post request and return the result\n:param data:\n:param headers:\n:return:", "id": "f3496:c0:m8"} {"signature": "def _put(self, route, data, headers=None, failure_message=None):", "body": "headers = self._get_headers(headers)response_lambda = (lambda: requests.put(self._get_qualified_route(route), headers=headers, data=data, verify=False, proxies=self.proxies))response = check_for_rate_limiting(response_lambda(), response_lambda)return self._handle_response(response, failure_message)", "docstring": "Execute a put request and return the result\n:param data:\n:param headers:\n:return:", "id": "f3496:c0:m10"} {"signature": "def _patch(self, route, data, headers=None, failure_message=None):", "body": "headers = self._get_headers(headers)response_lambda = (lambda: requests.patch(self._get_qualified_route(route), headers=headers, data=data, verify=False, proxies=self.proxies))response = check_for_rate_limiting(response_lambda(), response_lambda)return self._handle_response(response, failure_message)", "docstring": "Execute a patch request and return the result", "id": "f3496:c0:m12"} {"signature": "def _delete(self, route, headers=None, failure_message=None):", "body": "headers = self._get_headers(headers)response_lambda = (lambda: requests.delete(self._get_qualified_route(route), headers=headers, verify=False, proxies=self.proxies))response = check_for_rate_limiting(response_lambda(), response_lambda)return self._handle_response(response, failure_message)", "docstring": "Execute a delete request and return the result\n:param headers:\n:return:", "id": "f3496:c0:m13"} {"signature": "def load_file_as_yaml(path):", "body": "with open(path, \"\") as f:raw_yaml = f.read()parsed_dict = yaml.load(raw_yaml)return parsed_dict", "docstring": "Given a filepath, loads the file as a dictionary from YAML\n\n:param path: The path to a YAML file", "id": "f3499:m0"} {"signature": "def get_credentials_from_file(filepath):", "body": "try:creds = load_file_as_yaml(filepath)except Exception:creds = {}profile_name = os.environ.get(citr_env_vars.CITRINATION_PROFILE)if profile_name is None or len(profile_name) == :profile_name = DEFAULT_CITRINATION_PROFILEapi_key = Nonesite = Nonetry:profile = creds[profile_name]api_key = profile[CREDENTIALS_API_KEY_KEY]site = profile[CREDENTIALS_SITE_KEY]except KeyError:passreturn (api_key, site)", "docstring": "Extracts credentials from the yaml formatted credential filepath\npassed in. Uses the default profile if the CITRINATION_PROFILE env var\nis not set, otherwise looks for a profile with that name in the credentials file.\n\n:param filepath: The path of the credentials file", "id": "f3499:m1"} {"signature": "def get_preferred_credentials(api_key, site, cred_file=DEFAULT_CITRINATION_CREDENTIALS_FILE):", "body": "profile_api_key, profile_site = get_credentials_from_file(cred_file)if api_key is None:api_key = os.environ.get(citr_env_vars.CITRINATION_API_KEY)if api_key is None or len(api_key) == :api_key = profile_api_keyif site is None:site = os.environ.get(citr_env_vars.CITRINATION_SITE)if site is None or len(site) == :site = profile_siteif site is None:site = \"\"return api_key, site", "docstring": "Given an API key, a site url and a credentials file path, runs through a prioritized list of credential sources to find credentials.\n\nSpecifically, this method ranks credential priority as follows:\n 1. Those passed in as the first two parameters to this method\n 2. Those found in the environment as variables\n 3. Those found in the credentials file at the profile specified\n by the profile environment variable\n 4. Those found in the default stanza in the credentials file\n\n:param api_key: A Citrination API Key or None\n:param site: A Citrination site URL or None\n:param cred_file: The path to a credentials file", "id": "f3499:m2"} {"signature": "def _get_s3_presigned_url(response_dict):", "body": "url = response_dict['']return url['']+''+url['']+url['']+''+url['']", "docstring": "Helper method to create an S3 presigned url from the response dictionary.", "id": "f3511:m2"} {"signature": "def __init__(self, api_key, host=\"\", suppress_warnings=False, proxies=None):", "body": "members = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"]super(DataClient, self).__init__(api_key, host, members, suppress_warnings, proxies)", "docstring": "Constructor.\n\n:param api_key: A users API key, as a string\n:type api_key: str\n:param host: The base URL of the citrination site, e.g. https://citrination.com\n:type host: str\n:param suppress_warnings: Whether or not usage warnings should be\n printed to stdout\n:type suppress_warnings: bool", "id": "f3511:c0:m0"} {"signature": "def upload(self, dataset_id, source_path, dest_path=None):", "body": "upload_result = UploadResult()source_path = str(source_path)if not dest_path:dest_path = source_pathelse:dest_path = str(dest_path)if os.path.isdir(source_path):for path, subdirs, files in os.walk(source_path):relative_path = os.path.relpath(path, source_path)current_dest_prefix = dest_pathif relative_path is not \"\":current_dest_prefix = os.path.join(current_dest_prefix, relative_path)for name in files:current_dest_path = os.path.join(current_dest_prefix, name)current_source_path = os.path.join(path, name)try:if self.upload(dataset_id, current_source_path, current_dest_path).successful():upload_result.add_success(current_source_path)else:upload_result.add_failure(current_source_path,\"\")except (CitrinationClientError, ValueError) as e:upload_result.add_failure(current_source_path, str(e))return upload_resultelif os.path.isfile(source_path):file_data = { \"\": str(dest_path), \"\": str(source_path)}j = self._get_success_json(self._post_json(routes.upload_to_dataset(dataset_id), data=file_data))s3url = _get_s3_presigned_url(j)with open(source_path, '') as f:if os.stat(source_path).st_size == :data = \"\"else:data = fr = requests.put(s3url, data=data, headers=j[\"\"])if r.status_code == :data = {'': j[''][''], '': j['']}self._post_json(routes.update_file(j['']), data=data)upload_result.add_success(source_path)return upload_resultelse:raise CitrinationClientError(\"\".format(source_path))else:raise ValueError(\"\".format(source_path))", "docstring": "Upload a file, specifying source and dest paths a file (acts as the scp command).asdfasdf\n\n:param source_path: The path to the file on the source host asdf\n:type source_path: str\n:param dest_path: The path to the file where the contents of the upload will be written (on the dest host)\n:type dest_path: str\n:return: The result of the upload process\n:rtype: :class:`UploadResult`", "id": "f3511:c0:m1"} {"signature": "def list_files(self, dataset_id, glob=\"\", is_dir=False):", "body": "data = {\"\": {\"\": glob,\"\": is_dir}}return self._get_success_json(self._post_json(routes.list_files(dataset_id), data, failure_message=\"\".format(dataset_id)))['']", "docstring": "List matched filenames in a dataset on Citrination.\n\n:param dataset_id: The ID of the dataset to search for files.\n:type dataset_id: int\n:param glob: A pattern which will be matched against files in the dataset.\n:type glob: str\n:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.\n:type is_dir: bool\n:return: A list of filepaths in the dataset matching the provided glob.\n:rtype: list of strings", "id": "f3511:c0:m2"} {"signature": "def matched_file_count(self, dataset_id, glob=\"\", is_dir=False):", "body": "list_result = self.list_files(dataset_id, glob, is_dir)return len(list_result)", "docstring": "Returns the number of files matching a pattern in a dataset.\n\n:param dataset_id: The ID of the dataset to search for files.\n:type dataset_id: int\n:param glob: A pattern which will be matched against files in the dataset.\n:type glob: str\n:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.\n:type is_dir: bool\n:return: The number of matching files\n:rtype: int", "id": "f3511:c0:m3"} {"signature": "def get_ingest_status(self, dataset_id):", "body": "failure_message = \"\".format(dataset_id)response = self._get_success_json(self._get('' + str(dataset_id) + '',failure_message=failure_message))['']if '' in response:return response['']return ''", "docstring": "Returns the current status of dataset ingestion. If any file uploaded to a dataset is in an error/failure state\nthis endpoint will return error/failure. If any files are still processing, will return processing.\n\n:param dataset_id: Dataset identifier\n:return: Status of dataset ingestion as a string", "id": "f3511:c0:m4"} {"signature": "def get_dataset_files(self, dataset_id, glob=\"\", is_dir=False, version_number=None):", "body": "if version_number is None:latest = Trueelse:latest = Falsedata = {\"\": {\"\": glob,\"\": is_dir,\"\": latest}}failure_message = \"\".format(dataset_id)versions = self._get_success_json(self._post_json(routes.matched_files(dataset_id), data, failure_message=failure_message))['']if version_number is None:version = versions[]else:try:version = list(filter(lambda v: v[''] == version_number, versions))[]except IndexError:raise ResourceNotFoundException()return list(map(lambda f: DatasetFile(path=f[''], url=f['']), version['']))", "docstring": "Retrieves URLs for the files matched by a glob or a path to a directory\nin a given dataset.\n\n:param dataset_id: The id of the dataset to retrieve files from\n:type dataset_id: int\n:param glob: A regex used to select one or more files in the dataset\n:type glob: str\n:param is_dir: Whether or not the supplied pattern should be treated as a directory to search in\n:type is_dir: bool\n:param version_number: The version number of the dataset to retrieve files from\n:type version_number: int\n:return: A list of dataset files whose paths match the provided pattern.\n:rtype: list of :class:`DatasetFile`", "id": "f3511:c0:m5"} {"signature": "def get_dataset_file(self, dataset_id, file_path, version = None):", "body": "return self.get_dataset_files(dataset_id, \"\".format(file_path), version_number=version)[]", "docstring": "Retrieves a dataset file matching a provided file path\n\n:param dataset_id: The id of the dataset to retrieve file from\n:type dataset_id: int\n:param file_path: The file path within the dataset\n:type file_path: str\n:param version: The dataset version to look for the file in. If nothing is supplied, the latest dataset version will be searched\n:type version: int\n:return: A dataset file matching the filepath provided\n:rtype: :class:`DatasetFile`", "id": "f3511:c0:m6"} {"signature": "def download_files(self, dataset_files, destination=''):", "body": "if not isinstance(dataset_files, list):dataset_files = [dataset_files]for f in dataset_files:filename = f.path.lstrip('')local_path = os.path.join(destination, filename)if not os.path.isdir(os.path.dirname(local_path)):os.makedirs(os.path.dirname(local_path))r = requests.get(f.url, stream=True)with open(local_path, '') as output_file:shutil.copyfileobj(r.raw, output_file)", "docstring": "Downloads file(s) to a local destination.\n\n:param dataset_files:\n:type dataset_files: list of :class: `DatasetFile`\n:param destination: The path to the desired local download destination\n:type destination: str\n:param chunk: Whether or not to chunk the file. Default True\n:type chunk: bool", "id": "f3511:c0:m7"} {"signature": "def get_pif(self, dataset_id, uid, dataset_version = None):", "body": "failure_message = \"\".format(uid)if dataset_version == None:response = self._get(routes.pif_dataset_uid(dataset_id, uid), failure_message=failure_message)else:response = self._get(routes.pif_dataset_version_uid(dataset_id, uid, dataset_version), failure_message=failure_message)return pif.loads(response.content.decode(\"\"))", "docstring": "Retrieves a PIF from a given dataset.\n\n:param dataset_id: The id of the dataset to retrieve PIF from\n:type dataset_id: int\n:param uid: The uid of the PIF to retrieve\n:type uid: str\n:param dataset_version: The dataset version to look for the PIF in. If nothing is supplied, the latest dataset version will be searched\n:type dataset_version: int\n:return: A :class:`Pif` object\n:rtype: :class:`Pif`", "id": "f3511:c0:m8"} {"signature": "def create_dataset(self, name=None, description=None, public=False):", "body": "data = {\"\": _convert_bool_to_public_value(public)}if name:data[\"\"] = nameif description:data[\"\"] = descriptiondataset = {\"\": data}failure_message = \"\"result = self._get_success_json(self._post_json(routes.create_dataset(), dataset, failure_message=failure_message))return _dataset_from_response_dict(result)", "docstring": "Create a new data set.\n\n:param name: name of the dataset\n:type name: str\n:param description: description for the dataset\n:type description: str\n:param public: A boolean indicating whether or not the dataset should be public.\n:type public: bool\n:return: The newly created dataset.\n:rtype: :class:`Dataset`", "id": "f3511:c0:m9"} {"signature": "def update_dataset(self, dataset_id, name=None, description=None, public=None):", "body": "data = {\"\": _convert_bool_to_public_value(public)}if name:data[\"\"] = nameif description:data[\"\"] = descriptiondataset = {\"\": data}failure_message = \"\".format(dataset_id)response = self._get_success_json(self._post_json(routes.update_dataset(dataset_id), data=dataset, failure_message=failure_message))return _dataset_from_response_dict(response)", "docstring": "Update a data set.\n\n:param dataset_id: The ID of the dataset to update\n:type dataset_id: int\n:param name: name of the dataset\n:type name: str\n:param description: description for the dataset\n:type description: str\n:param public: A boolean indicating whether or not the dataset should\n be public.\n:type public: bool\n:return: The updated dataset.\n:rtype: :class:`Dataset`", "id": "f3511:c0:m10"} {"signature": "def create_dataset_version(self, dataset_id):", "body": "failure_message = \"\".format(dataset_id)number = self._get_success_json(self._post_json(routes.create_dataset_version(dataset_id), data={}, failure_message=failure_message))['']return DatasetVersion(number=number)", "docstring": "Create a new data set version.\n\n:param dataset_id: The ID of the dataset for which the version must be bumped.\n:type dataset_id: int\n:return: The new dataset version.\n:rtype: :class:`DatasetVersion`", "id": "f3511:c0:m11"} {"signature": "def __init__(self, id, name=None, description=None,created_at=None):", "body": "self._name = nameself._description = descriptionself._id = idself._created_at = created_at", "docstring": "Constructor.\n\n:param id: The ID of the dataset (required for instantiation)\n:type id: int\n:param name: The name of the dataset\n:type name: str\n:param description: The description of the dataset\n:type description: str\n:param created_at: The timestamp for creation of the dataset\n:type created_at: str", "id": "f3512:c0:m0"} {"signature": "def __init__(self):", "body": "self._failures = []self._successes = []", "docstring": "Constructor.", "id": "f3514:c0:m0"} {"signature": "def successful(self):", "body": "return len(self._failures) == ", "docstring": "Indicates whether or not the entire upload was successful.\n\n:return: Whether or not the upload was successful\n:rtype: bool", "id": "f3514:c0:m3"} {"signature": "def add_failure(self, filepath, reason):", "body": "self._failures.append({\"\": filepath,\"\": reason})", "docstring": "Registers a file as a failure to upload.\n\n:param filepath: The path to the file which was to be uploaded.\n:type filepath: str\n:param reason: The reason the file failed to upload\n:type reason: str", "id": "f3514:c0:m4"} {"signature": "def add_success(self, filepath):", "body": "self._successes.append({\"\": filepath})", "docstring": "Registers a file as successfully uploaded.\n\n:param filepath: The path to the successfully uploaded file.\n:type filepath: str", "id": "f3514:c0:m5"} {"signature": "def __init__(self, path, url=None):", "body": "self._path = pathself._url = url", "docstring": "Constructor.\n\n:param path: The files path\n:type path: str\n:param url: If present, a download URL for the file\n:type url: str", "id": "f3516:c0:m0"} {"signature": "def __init__(self, number):", "body": "self._number = number", "docstring": "Constructor.\n\n:param number: The number of the dataset version\n:type number: ints", "id": "f3517:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, id=None, is_featured=None,name=None, description=None, owner=None, email=None, updated_at=None, query=None, **kwargs):", "body": "self._logic = Noneself.logic = logicself._weight = Noneself.weight = weightself._simple = Noneself.simple = simpleself._simple_weight = Noneself.simple_weight = simple_weightself._id = Noneself.id = idself._is_featured = Noneself.is_featured = is_featuredself._name = Noneself.name = nameself._description = Noneself.description = descriptionself._owner = Noneself.owner = ownerself._email = Noneself.email = emailself._updated_at = Noneself.updated_at = updated_atself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: The logic to apply to the query ('SHOULD', 'MUST', 'MUST_NOT', or 'OPTIONAL').\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param id: One or more :class:`Filter` objects with filters against the id field.\n:param is_featured: One or more :class:`BooleanFilter` objects with filters against the isFeatured field.\n:param name: One or more :class:`Filter` objects with filters against the name field.\n:param description: One or more :class:`Filter` objects with filters against the description field.\n:param owner: One or more :class:`Filter` objects with filters against the owner field.\n:param email: One or more :class:`Filter` objects with filters against the email field.\n:param updated_at: One or more :class:`Filter` objects with filters against the time that the dataset was last updated.\n:param query: One or more :class:`DatasetQuery` objects with nested queries.", "id": "f3518:c0:m0"} {"signature": "def __init__(self, query=None, from_index=None, size=None, random_results=None, random_seed=None,score_relevance=None, return_max_score=None, timeout=None, count_pifs=None, **kwargs):", "body": "super(DatasetReturningQuery, self).__init__(query=query, from_index=from_index, size=size, random_results=random_results, random_seed=random_seed,score_relevance=score_relevance, return_max_score=return_max_score, timeout=timeout, **kwargs)self._count_pifs = Noneself.count_pifs = count_pifs", "docstring": "Constructor.\n\n:param query: One or more :class:`DataQuery` objects with the queries to run.\n:param from_index: Index of the first hit that should be returned.\n:param size: Total number of hits the should be returned.\n:param random_results: Whether to return a random set of records.\n:param random_seed: The random seed to use.\n:param score_relevance: Whether to use relevance scoring.\n:param return_max_score: Whether to return the maximum score.\n:param timeout: The number of milliseconds to wait for the query to execute.\n:param count_pifs: Whether to return counts of PIFs for each dataset.", "id": "f3519:c0:m0"} {"signature": "def __init__(self, took=None, total_num_hits=None, max_score=None, hits=None, **kwargs):", "body": "super(DatasetSearchResult, self).__init__(took=took, total_num_hits=total_num_hits, max_score=max_score,hits=self._get_object(DatasetSearchHit, hits), **kwargs)", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param total_num_hits: Total number of hits.\n:param max_score: The maximum score.\n:param hits: List of :class:`DatasetSearchHit` objects.", "id": "f3520:c0:m0"} {"signature": "def __init__(self, id=None, score=None, is_featured=None, name=None, description=None, owner=None, email=None, num_pifs=None, updated_at=None, **kwargs):", "body": "self._id = Noneself.id = idself._score = Noneself.score = scoreself._is_featured = Noneself.is_featured = is_featuredself._name = Noneself.name = nameself._description = Noneself.description = descriptionself._owner = Noneself.owner = ownerself._email = Noneself.email = emailself._num_pifs = Noneself.num_pifs = num_pifsself._updated_at = Noneself.updated_at = updated_at", "docstring": "Constructor.\n\n:param id: String with the ID of the record.\n:param score: Score with the relevancy of the result.\n:param is_featured: Whether the dataset is a featured one.\n:param name: Name of the dataset.\n:param description: Description of the dataset.\n:param owner: Name of the owner of the dataset.\n:param email: Email address of the owner of the dataset.\n:param num_pifs: Number of PIFs in the dataset.\n:param updated_at: String with the last time that the dataset was updated.", "id": "f3521:c0:m0"} {"signature": "def __init__(self, result=None, status=None, **kwargs):", "body": "self._result = Noneself.result = resultself._status = Noneself.status = status", "docstring": "Constructor.\n\n:param result: A single :class:`DatasetSearchResult` object with the query results.\n:param status: 'SUCCESS', 'ERROR', or 'NOT_EXECUTED'.", "id": "f3522:c0:m0"} {"signature": "def __init__(self, took=None, results=None, **kwargs):", "body": "self._took = Noneself.took = tookself._results = Noneself.results = results", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param results: List of :class:`DatasetMultiSearchResultElement` objects.", "id": "f3523:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, id=None, name=None, content=None,updated_at=None, query=None, **kwargs):", "body": "self._logic = Noneself.logic = logicself._weight = Noneself.weight = weightself._simple = Noneself.simple = simpleself._simple_weight = Noneself.simple_weight = simple_weightself._id = Noneself.id = idself._name = Noneself.name = nameself._content = Noneself.content = contentself._updated_at = Noneself.updated_at = updated_atself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: The logic to apply to the query ('SHOULD', 'MUST', 'MUST_NOT', or 'OPTIONAL').\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param id: One or more :class:`Filter` objects with filters against the id field.\n:param name: One or more :class:`Filter` objects with filters against the name field.\n:param content: One or more :class:`Filter` objects with filters against the content field.\n:param updated_at: One or more :class:`Filter` objects with filters against the time that the dataset was last updated.\n:param query: One or more :class:`DatasetQuery` objects with nested queries.", "id": "f3525:c0:m0"} {"signature": "def __init__(self, query=None, from_index=None, size=None, random_results=None, random_seed=None,score_relevance=None, return_max_score=None, timeout=None, max_content_highlights=None,highlight_pre_tag=None, highlight_post_tag=None, **kwargs):", "body": "super(FileReturningQuery, self).__init__(query=query, from_index=from_index, size=size, random_results=random_results, random_seed=random_seed,score_relevance=score_relevance, return_max_score=return_max_score, timeout=timeout, **kwargs)self._max_content_highlights = Noneself.max_content_highlights = max_content_highlightsself._highlight_pre_tag = Noneself.highlight_pre_tag = highlight_pre_tagself._highlight_post_tag = Noneself.highlight_post_tag = highlight_post_tag", "docstring": "Constructor.\n\n:param query: One or more :class:`DataQuery` objects with the queries to run.\n:param from_index: Index of the first hit that should be returned.\n:param size: Total number of hits the should be returned.\n:param random_results: Whether to return a random set of records.\n:param random_seed: The random seed to use.\n:param score_relevance: Whether to use relevance scoring.\n:param return_max_score: Whether to return the maximum score.\n:param timeout: The number of milliseconds to wait for the query to execute.\n:param max_content_highlights: The maximum number of highlighted results to return.\n:param highlight_pre_tag: The tag to use at the beginning of a highlight.\n:param highlight_post_tag: The tag to use at the end of a highlight.", "id": "f3526:c0:m0"} {"signature": "def __init__(self, took=None, results=None, **kwargs):", "body": "self._took = Noneself.took = tookself._results = Noneself.results = results", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param results: List of :class:`FileMultiSearchResultElement` objects.", "id": "f3527:c0:m0"} {"signature": "def __init__(self, dataset_id=None, dataset_version=None, id=None, score=None, name=None, updated_at=None, highlights=None, **kwargs):", "body": "self._dataset_id = Noneself.dataset_id = dataset_idself._dataset_version = Noneself.dataset_version = dataset_versionself._id = Noneself.id = idself._score = Noneself.score = scoreself._name = Noneself.name = nameself._updated_at = Noneself.updated_at = updated_atself._highlights = Noneself.highlights = highlights", "docstring": "Constructor.\n\n:param dataset_id: String with the ID of the dataset.\n:param dataset_version: Long with the version of the dataset.\n:param id: String with the ID of the record.\n:param score: Score with the relevancy of the result.\n:param name: Name of the dataset.\n:param updated_at: String with the last time that the dataset was updated.\n:param highlights: List of strings with the highlighted results.", "id": "f3528:c0:m0"} {"signature": "def __init__(self, result=None, status=None, **kwargs):", "body": "self._result = Noneself.result = resultself._status = Noneself.status = status", "docstring": "Constructor.\n\n:param result: A single :class:`FileSearchResult` object with the query results.\n:param status: 'SUCCESS', 'ERROR', or 'NOT_EXECUTED'.", "id": "f3529:c0:m0"} {"signature": "def __init__(self, took=None, total_num_hits=None, max_score=None, hits=None, **kwargs):", "body": "super(FileSearchResult, self).__init__(took=took, total_num_hits=total_num_hits, max_score=max_score,hits=self._get_object(FileSearchHit, hits), **kwargs)", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param total_num_hits: Total number of hits.\n:param max_score: The maximum score.\n:param hits: List of :class:`FileSearchHit` objects.", "id": "f3530:c0:m0"} {"signature": "def default(self, obj):", "body": "if obj is None:return []elif isinstance(obj, list):return [i.as_dictionary() for i in obj]elif isinstance(obj, dict):return self._keys_to_camel_case(obj)else:return obj.as_dictionary()", "docstring": "Convert an object to a form ready to dump to json.\n\n:param obj: Object being serialized. The type of this object must be one of the following: None; a single object derived from the Pio class; or a list of objects, each derived from the Pio class.\n:return: List of dictionaries, each representing a physical information object, ready to be serialized.", "id": "f3531:c0:m0"} {"signature": "def _keys_to_camel_case(self, obj):", "body": "return dict((to_camel_case(key), value) for (key, value) in obj.items())", "docstring": "Make a copy of a dictionary with all keys converted to camel case. This is just calls to_camel_case on each of the keys in the dictionary and returns a new dictionary.\n\n:param obj: Dictionary to convert keys to camel case.\n:return: Dictionary with the input values and all keys in camel case", "id": "f3531:c0:m1"} {"signature": "def _validate_search_query(self, returning_query):", "body": "start_index = returning_query.from_index or size = returning_query.size or if start_index < :raise CitrinationClientError(\"\")if size < :raise CitrinationClientError(\"\")if start_index + size > MAX_QUERY_DEPTH:raise CitrinationClientError(\"\".format(MAX_QUERY_DEPTH))", "docstring": "Checks to see that the query will not exceed the max query depth\n\n:param returning_query: The PIF system or Dataset query to execute.\n:type returning_query: :class:`PifSystemReturningQuery` or :class: `DatasetReturningQuery`", "id": "f3532:c0:m2"} {"signature": "def pif_search(self, pif_system_returning_query):", "body": "self._validate_search_query(pif_system_returning_query)return self._execute_search_query(pif_system_returning_query,PifSearchResult)", "docstring": "Run a PIF query against Citrination.\n\n:param pif_system_returning_query: The PIF system query to execute.\n:type pif_system_returning_query: :class:`PifSystemReturningQuery`\n:return: :class:`PifSearchResult` object with the results of the query.\n:rtype: :class:`PifSearchResult`", "id": "f3532:c0:m3"} {"signature": "def dataset_search(self, dataset_returning_query):", "body": "self._validate_search_query(dataset_returning_query)return self._execute_search_query(dataset_returning_query,DatasetSearchResult)", "docstring": "Run a dataset query against Citrination.\n\n:param dataset_returning_query: :class:`DatasetReturningQuery` to execute.\n:type dataset_returning_query: :class:`DatasetReturningQuery`\n:return: Dataset search result object with the results of the query.\n:rtype: :class:`DatasetSearchResult`", "id": "f3532:c0:m4"} {"signature": "def _execute_search_query(self, returning_query, result_class):", "body": "if returning_query.from_index:from_index = returning_query.from_indexelse:from_index = if returning_query.size != None:size = min(returning_query.size, client_config.max_query_size)else:size = client_config.max_query_sizeif (size == client_config.max_query_size andsize != returning_query.size):self._warn(\"\".format(size))time = ;hits = [];while True:sub_query = deepcopy(returning_query)sub_query.from_index = from_index + len(hits)partial_results = self._search_internal(sub_query, result_class)total = partial_results.total_num_hitstime += partial_results.tookif partial_results.hits is not None:hits.extend(partial_results.hits)if len(hits) >= size or len(hits) >= total or sub_query.from_index >= total:breakreturn result_class(hits=hits, total_num_hits=total, took=time)", "docstring": "Run a PIF query against Citrination.\n\n:param returning_query: :class:`BaseReturningQuery` to execute.\n:param result_class: The class of the result to return.\n:return: ``result_class`` object with the results of the query.", "id": "f3532:c0:m5"} {"signature": "def pif_multi_search(self, multi_query):", "body": "failure_message = \"\"response_dict = self._get_success_json(self._post(routes.pif_multi_search, data=json.dumps(multi_query, cls=QueryEncoder),failure_message=failure_message))return PifMultiSearchResult(**keys_to_snake_case(response_dict['']))", "docstring": "Run each in a list of PIF queries against Citrination.\n\n:param multi_query: :class:`MultiQuery` object to execute.\n:return: :class:`PifMultiSearchResult` object with the results of the query.", "id": "f3532:c0:m7"} {"signature": "def generate_simple_chemical_query(self, name=None, chemical_formula=None, property_name=None, property_value=None,property_min=None, property_max=None, property_units=None, reference_doi=None,include_datasets=[], exclude_datasets=[], from_index=None, size=None):", "body": "pif_system_query = PifSystemQuery()pif_system_query.names = FieldQuery(extract_as='',filter=[Filter(equal=i) for i in self._get_list(name)])pif_system_query.chemical_formula = ChemicalFieldQuery(extract_as='',filter=[ChemicalFilter(equal=i) for i in self._get_list(chemical_formula)])pif_system_query.references = ReferenceQuery(doi=FieldQuery(extract_as='',filter=[Filter(equal=i) for i in self._get_list(reference_doi)]))property_name_query = FieldQuery(extract_as='',filter=[Filter(equal=i) for i in self._get_list(property_name)])property_units_query = FieldQuery(extract_as='',filter=[Filter(equal=i) for i in self._get_list(property_units)])property_value_query = FieldQuery(extract_as='',filter=[])for i in self._get_list(property_value):property_value_query.filter.append(Filter(equal=i))if property_min is not None or property_max is not None:property_value_query.filter.append(Filter(min=property_min, max=property_max))pif_system_query.properties = PropertyQuery(name=property_name_query,value=property_value_query,units=property_units_query)dataset_query = list()if include_datasets:dataset_query.append(DatasetQuery(logic='', id=[Filter(equal=i) for i in include_datasets]))if exclude_datasets:dataset_query.append(DatasetQuery(logic='', id=[Filter(equal=i) for i in exclude_datasets]))pif_system_returning_query = PifSystemReturningQuery(query=DataQuery(system=pif_system_query,dataset=dataset_query),from_index=from_index,size=size,score_relevance=True)return pif_system_returning_query", "docstring": "This method generates a :class:`PifSystemReturningQuery` object using the\nsupplied arguments. All arguments that accept lists have logical OR's on the queries that they generate.\nThis means that, for example, simple_chemical_search(name=['A', 'B']) will match records that have name\nequal to 'A' or 'B'.\n\nResults will be pulled into the extracted field of the :class:`PifSearchHit` objects that are returned. The\nname will appear under the key \"name\", chemical formula under \"chemical_formula\", property name under\n\"property_name\", value of the property under \"property_value\", units of the property under \"property_units\",\nand reference DOI under \"reference_doi\".\n\nThis method is only meant for execution of very simple queries. More complex queries must use the search method\nthat accepts a :class:`PifSystemReturningQuery` object.\n\n:param name: One or more strings with the names of the chemical system to match.\n:type name: str or list of str\n:param chemical_formula: One or more strings with the chemical formulas to match.\n:type chemical_formula: str or list of str\n:param property_name: One or more strings with the names of the property to match.\n:type property_name: str or list of str\n:param property_value: One or more strings or numbers with the exact values to match.\n:type property_value: str or int or float or list of str or int or float\n:param property_min: A single string or number with the minimum value to match.\n:type property_min: str or int or float\n:param property_max: A single string or number with the maximum value to match.\n:type property_max: str or int or float\n:param property_units: One or more strings with the property units to match.\n:type property_units: str or list of str\n:param reference_doi: One or more strings with the DOI to match.\n:type reference_doin: str or list of str\n:param include_datasets: One or more integers with dataset IDs to match.\n:type include_datasets: int or list of int\n:param exclude_datasets: One or more integers with dataset IDs that must not match.\n:type exclude_datasets: int or list of int\n:param from_index: Index of the first record to match.\n:type from_index: int\n:param size: Total number of records to return.\n:type size: int\n:return: A query to to be submitted with the pif_search method\n:rtype: :class:`PifSystemReturningQuery`", "id": "f3532:c0:m8"} {"signature": "@staticmethoddef _get_list(values):", "body": "if values is None:return []elif isinstance(values, list):return valueselse:return [values]", "docstring": "Helper method that wraps values in a list. If the input is a list then it is returned. If the input is None then an empty list is returned. For anything else, the input value is wrapped as a single-element list.\n\n:param values: Value to make sure exists in a list.\n:return: List with the input values.", "id": "f3532:c0:m9"} {"signature": "def __init__(self, logic=None, weight=None, exists=None, equal=None, filter=None, **kwargs):", "body": "self._logic = Noneself.logic = logicself._weight = Noneself.weight = weightself._exists = Noneself.exists = existsself._equal = Noneself.equal = equalself._filter = Noneself.filter = filter", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight for the filter.\n:param exists: True/False to simply test whether the field exists and has a non-null value.\n:param equal: String with the phrase to match against.\n:param filter: List of :class:`BooleanFilter` objects with sub-filters.", "id": "f3533:c0:m0"} {"signature": "def __init__(self, query=None, extraction_sort=None, from_index=None, size=None, random_results=None,random_seed=None, score_relevance=None, return_max_score=None, timeout=None, **kwargs):", "body": "super(BaseReturningQuery, self).__init__(query=query, extraction_sort=extraction_sort, **kwargs)if '' in '':self.from_index = kwargs['']self._from = Noneself.from_index = from_indexself._size = Noneself.size = sizeself._random_results = Noneself.random_results = random_resultsself._random_seed = Noneself.random_seed = random_seedself._score_relevance = Noneself.score_relevance = score_relevanceself._return_max_score = Noneself.return_max_score = return_max_scoreself._timeout = Noneself.timeout = timeout", "docstring": "Base class for all queries against datasets and the items that they contain on Citrination.\n\n:param query: One or more :class:`DataQuery` objects with the queries to run.\n:param extraction_sort: A single :class:`ExtractionSort` object for sorting.\n:param from_index: Index of the first hit that should be returned.\n:param size: Total number of hits the should be returned.\n:param random_results: Whether to return a random set of records.\n:param random_seed: The random seed to use.\n:param score_relevance: Whether to use relevance scoring.\n:param return_max_score: Whether to return the maximum score.\n:param timeout: The number of milliseconds to wait for the query to execute.", "id": "f3534:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, exists=None, equal=None, min=None, max=None, exact=None,filter=None, **kwargs):", "body": "self._logic = Noneself.logic = logicself._weight = Noneself.weight = weightself._exists = Noneself.exists = existsself._equal = Noneself.equal = equalself._min = Noneself.min = minself._max = Noneself.max = maxself._exact = Noneself.exact = exactself._filter = Noneself.filter = filter", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight for the filter.\n:param exists: True/False to simply test whether the field exists and has a non-null value.\n:param equal: String with the phrase to match against.\n:param min: String with the minimum value of a range to match against.\n:param max: String with the maximum value of a range to match against.\n:param exact: True/False to set whether the \"equal\" filter should be an exact match.\n:param filter: List of :class:`Filter` objects with sub-filters.", "id": "f3535:c0:m0"} {"signature": "def __init__(self, query=None, **kwargs):", "body": "self._query = Noneself.query = query", "docstring": "Constructor.\n\n:param query: One or more :class:`DataQuery` objects with the queries to run.", "id": "f3536:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, dataset=None, system=None, file=None,query=None, **kwargs):", "body": "self._logic = Noneself.logic = logicself._weight = Noneself.weight = weightself._simple = Noneself.simple = simpleself._simple_weight = Noneself.simple_weight = simple_weightself._dataset = Noneself.dataset = datasetself._system = Noneself.system = systemself._file = Noneself.file = fileself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: The logic to apply to the query ('SHOULD', 'MUST', 'MUST_NOT', or 'OPTIONAL').\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param dataset: One or more :class:`DatasetQuery` objects with queries against dataset metadata.\n:param system: One or more :class:`PifSystemQuery` objects with queries against PIF systems\n:param file: One or more :class:`FileQuery` objects with queries against file content or metadata.\n:type file: :class:`FileQuery`\n:param query: Nested list of :class:`DataQuery` objects.", "id": "f3537:c0:m0"} {"signature": "def __init__(self, queries=None, **kwargs):", "body": "self._queries = Noneself.queries = queries", "docstring": "Constructor.\n\n:param queries: One or more queries to run.", "id": "f3538:c0:m0"} {"signature": "def __init__(self, took=None, total_num_hits=None, max_score=None, hits=None, **kwargs):", "body": "self._took = Noneself.took = tookself._total_num_hits = Noneself.total_num_hits = total_num_hitsself._max_score = Noneself.max_score = max_scoreself._hits = Noneself.hits = hits", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param total_num_hits: Total number of hits.\n:param max_score: The maximum score.\n:param hits: List of hits.", "id": "f3539:c0:m0"} {"signature": "def __init__(self, sort=None, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None,extract_all=None, extract_when_missing=None, length=None, offset=None, filter=None, **kwargs):", "body": "super(ChemicalFieldQuery, self).__init__(sort=sort, weight=weight, logic=logic, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, length=length, offset=offset, **kwargs)self._filter = Noneself.filter = filter", "docstring": "Constructor.\n\n:param sort: ASCENDING or DESCENDING to set the sort order on this field.\n:param logic: Logic for this query. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param length: One or more :class:`FieldOperation` objects against the length field.\n:param offset: One or more :class:`FieldOperation` objects against the offset field.\n:param filter: One or more :class:`ChemicalFilter` objects against this field.", "id": "f3542:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None,extract_all=None, extract_when_missing=None, tags=None, length=None, offset=None, element=None,actual_weight_percent=None, actual_atomic_percent=None, ideal_weight_percent=None,ideal_atomic_percent=None, query=None, **kwargs):", "body": "super(CompositionQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._element = Noneself.element = elementself._actual_weight_percent = Noneself.actual_weight_percent = actual_weight_percentself._actual_atomic_percent = Noneself.actual_atomic_percent = actual_atomic_percentself._ideal_weight_percent = Noneself.ideal_weight_percent = ideal_weight_percentself._ideal_atomic_percent = Noneself.ideal_atomic_percent = ideal_atomic_percentself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param element: One or more :class:`ChemicalFieldQuery` operations against the element field.\n:param actual_weight_percent: One or more :class:`FieldQuery` operations against the actual weight percent field.\n:param actual_atomic_percent: One or more :class:`FieldQuery` operations against the actual atomic percent field.\n:param ideal_weight_percent: One or more :class:`FieldQuery` operations against the ideal weight percent field.\n:param ideal_atomic_percent: One or more :class:`FieldQuery` operations against the ideal atomic percent field.\n:param query: One or more :class:`CompositionQuery` objects with the nest queries.", "id": "f3543:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, exists=None, equal=None, element=None, partial=None, exact=None,filter=None, **kwargs):", "body": "self._logic = Noneself.logic = logicself._weight = Noneself.weight = weightself._exists = Noneself.exists = existsself._equal = Noneself.equal = equalself._element = Noneself.element = elementself._partial = Noneself.partial = partialself._exact = Noneself.exact = exactself._filter = Noneself.filter = filter", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight for the query.\n:param exists: True/False to simply test whether the field exists and has a non-null value.\n:param equal: String with the phrase to match against.\n:param element: True to match against the single element field.\n:param partial: True to match against the partial formula field.\n:param exact: True if matches should be exact.\n:param filter: List of :class:`ChemicalFilter` objects with sub-filters.", "id": "f3544:c0:m0"} {"signature": "def __init__(self, key=None, order=None, **kwargs):", "body": "self._key = Noneself.key = keyself._order = Noneself.order = order", "docstring": "Constructor.\n\n:param key: String with the key that will be sorted on.\n:param order: The order to use. Either ASCENDING or DESCENDING.", "id": "f3545:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, conditions=None, data_type=None,name=None, value=None, file=None, units=None, references=None, query=None, **kwargs):", "body": "super(PropertyQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, name=name, value=value, file=file, units=units, **kwargs)self._conditions = Noneself.conditions = conditionsself._data_type = Noneself.data_type = data_typeself._references = Noneself.references = referencesself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param conditions: One or more :class:`ValueQuery` operations against the conditions.\n:param data_type: One or more :class:`FieldQuery` operations against the dataType field.\n:param name: One or more :class:`FieldQuery` operations against the name field.\n:param value: One or more :class:`FieldQuery` operations against the value.\n:param file: One or more :class:`FileReferenceQuery` operations against the file.\n:param units: One or more :class:`FieldQuery` operations against the units field.\n:param references: One or more :class:`ReferenceQuery` operations against the references field.\n:param query: One or more :class:`PropertyQuery` objects with nested queries.", "id": "f3546:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, given=None, family=None,title=None, suffix=None, query=None, **kwargs):", "body": "super(NameQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._given = Noneself.given = givenself._family = Noneself.family = familyself._title = Noneself.title = titleself._suffix = Noneself.suffix = suffixself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param given: One or more :class:`FieldQuery` operations against the given name field.\n:param family: One or more :class:`FieldQuery` operations against the family name field.\n:param title: One or more :class:`FieldQuery` operations against the title field.\n:param suffix: One or more :class:`FieldQuery` operations against the suffix field.\n:param query: One or more :class:`NameQuery` objects with nested queries.", "id": "f3547:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, name=None, value=None,query=None, **kwargs):", "body": "super(ClassificationQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._name = Noneself.name = nameself._value = Noneself.value = valueself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param name: One or more :class:`FieldQuery` operations against the name field.\n:param value: One or more :class:`FieldQuery` operations against the value field.\n:param query: One or more :class:`ClassificationQuery` objects for nested queries.", "id": "f3548:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, relative_path=None,mime_type=None, sha256=None, md5=None, query=None, **kwargs):", "body": "super(FileReferenceQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._relative_path = Noneself.relative_path = relative_pathself._mime_type = Noneself.mime_type = mime_typeself._sha256 = Noneself.sha256 = sha256self._md5 = Noneself.md5 = md5self._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param relative_path: One or more :class:`FieldQuery` operations against the relative path field.\n:param mime_type: One or more :class:`FieldQuery` operations against the mime type field.\n:param sha256: One or more :class:`FieldQuery` operations against the sha256 field.\n:param md5: One or more :class:`FieldQuery` operations against the md5 field.\n:param query: One or more :class:`FileReferenceQuery` objects as nested queries.", "id": "f3549:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, name=None, value=None,query=None, **kwargs):", "body": "super(IdQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._name = Noneself.name = nameself._value = Noneself.value = valueself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the query to run over all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param name: One or more :class:`FieldQuery` operations against the name field.\n:param value: One or more :class:`FieldQuery` operations against the value field.\n:param query: One or more :class:`IdQuery` objects with nested queries.", "id": "f3550:c0:m0"} {"signature": "def __init__(self, sort=None, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None,extract_all=None, extract_when_missing=None, length=None, offset=None, **kwargs):", "body": "self._sort = Noneself.sort = sortself._logic = Noneself.logic = logicself._weight = Noneself.weight = weightself._simple = Noneself.simple = simpleself._simple_weight = Noneself.simple_weight = simple_weightself._extract_as = Noneself.extract_as = extract_asself._extract_all = Noneself.extract_all = extract_allself._extract_when_missing = Noneself.extract_when_missing = extract_when_missingself._length = Noneself.length = lengthself._offset = Noneself.offset = offset", "docstring": "Constructor.\n\n:param sort: ASCENDING or DESCENDING to set the sort order on this field.\n:param logic: Logic for this query. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.", "id": "f3551:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, name=None, value=None,file=None, units=None, query=None, **kwargs):", "body": "super(ValueQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._name = Noneself.name = nameself._value = Noneself.value = valueself._file = Noneself.file = fileself._units = Noneself.units = unitsself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param name: One or more :class:`FieldQuery` operations against the name field.\n:param value: One or more :class:`FieldQuery` operations against the value.\n:param file: One or more :class:`FileReferenceQuery` operations against the file.\n:param units: One or more :class:`FieldQuery` operations against the units field.\n:param query: One or more :class:`ValueQuery` objects with nested queries.", "id": "f3552:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, name=None, details=None,query=None, **kwargs):", "body": "super(ProcessStepQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._name = Noneself.name = nameself._details = Noneself.details = detailsself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param name: One or more :class:`FieldQuery` operations against the name field.\n:param details: One or more :class:`ValueQuery` operations against the details of the step.\n:param query: One or more :class:`ProcessStepQuery` objects with nested queries.", "id": "f3553:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, number=None, title=None,caption=None, query=None, **kwargs):", "body": "super(DisplayItemQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._title = Noneself.title = titleself._number = Noneself.number = numberself._caption = Noneself.caption = captionself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param number: One or more :class:`FieldQuery` operations against the number field.\n:param title: One or more :class:`FieldQuery` operations against the title field.\n:param caption: One or more :class:`FieldQuery` operations against the caption field.\n:param query: One or more :class:`DisplayItemQuery` objects as nested queries.", "id": "f3554:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, actual_mass_percent=None,actual_volume_percent=None, actual_number_percent=None, ideal_mass_percent=None,ideal_volume_percent=None, ideal_number_percent=None, query=None, **kwargs):", "body": "super(QuantityQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._actual_mass_percent = Noneself.actual_mass_percent = actual_mass_percentself._actual_volume_percent = Noneself.actual_volume_percent = actual_volume_percentself._actual_number_percent = Noneself.actual_number_percent = actual_number_percentself._ideal_mass_percent = Noneself.ideal_mass_percent = ideal_mass_percentself._ideal_volume_percent = Noneself.ideal_volume_percent = ideal_volume_percentself._ideal_number_percent = Noneself.ideal_number_percent = ideal_number_percentself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param actual_mass_percent: One or more :class:`FieldQuery` operations against the actual mass percent field.\n:param actual_volume_percent: One or more :class:`FieldQuery` operations against the actual volume percent field.\n:param actual_number_percent: One or more :class:`FieldQuery` operations against the actual number percent field.\n:param ideal_mass_percent: One or more :class:`FieldQuery` operations against the ideal mass percent field.\n:param ideal_volume_percent: One or more :class:`FieldQuery` operations against the ideal volume percent field.\n:param ideal_number_percent: One or more :class:`FieldQuery` operations against the ideal number percent field.\n:param query: One or more :class:`QuantityQuery` objects with nested queries.", "id": "f3555:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, producer=None, url=None,query=None, **kwargs):", "body": "super(SourceQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._producer = Noneself.producer = producerself._url = Noneself.url = urlself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param producer: One or more :class:`FieldQuery` operations against the producer field.\n:param url: One or more :class:`FieldQuery` operations against the url field.\n:param query: One or more :class:`SourceQuery` objects with nested queries.", "id": "f3556:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, doi=None, isbn=None, issn=None,url=None, title=None, publisher=None, journal=None, volume=None, issue=None, year=None,figure=None, table=None, pages=None, authors=None, editors=None, affiliations=None,acknowledgements=None, references=None, query=None, **kwargs):", "body": "super(ReferenceQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._doi = Noneself.doi = doiself._isbn = Noneself.isbn = isbnself._issn = Noneself.issn = issnself._url = Noneself.url = urlself._title = Noneself.title = titleself._publisher = Noneself.publisher = publisherself._journal = Noneself.journal = journalself._volume = Noneself.volume = volumeself._issue = Noneself.issue = issueself._year = Noneself.year = yearself._figure = Noneself.figure = figureself._table = Noneself.table = tableself._pages = Noneself.pages = pagesself._authors = Noneself.authors = authorsself._editors = Noneself.editors = editorsself._affiliations = Noneself.affiliations = affiliationsself._acknowledgements = Noneself.acknowledgements = acknowledgementsself._references = Noneself.references = referencesself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param doi: One or more :class:`FieldQuery` operations against the doi field.\n:param isbn: One or more :class:`FieldQuery` operations against the isbn field.\n:param issn: One or more :class:`FieldQuery` operations against the issn field.\n:param url: One or more :class:`FieldQuery` operations against the url field.\n:param title: One or more :class:`FieldQuery` operations against the title field.\n:param publisher: One or more :class:`FieldQuery` operations against the publisher field.\n:param journal: One or more :class:`FieldQuery` operations against the journal field.\n:param volume: One or more :class:`FieldQuery` operations against the volume field.\n:param issue: One or more :class:`FieldQuery` operations against the issue field.\n:param year: One or more :class:`FieldQuery` operations against the year field.\n:param figure: One or more :class:`DisplayItemQuery` operations against the figure field.\n:param table: One or more :class:`DisplayItemQuery` operations against the table field.\n:param pages: One or more :class:`PagesQuery` operations against the pages field.\n:param authors: One or more :class:`NameQuery` operations against the authors field.\n:param editors: One or more :class:`NameQuery` operations against the editors field.\n:param affiliations: One or more :class:`FieldQuery` operations against the affiliations field.\n:param acknowledgements: One or more :class:`FieldQuery` operations against the acknowledgements field.\n:param references: One or more :class:`ReferenceQuery` operations against the references field.\n:param query: One or more :class:`ReferenceQuery` objects with nested queries.", "id": "f3557:c0:m0"} {"signature": "def __init__(self, sort=None, weight=None, logic=None, simple=None, simple_weight=None, extract_as=None,extract_all=None, extract_when_missing=None, length=None, offset=None, filter=None, **kwargs):", "body": "super(FieldQuery, self).__init__(sort=sort, weight=weight, logic=logic, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, length=length, offset=offset, **kwargs)self._filter = Noneself.filter = filter", "docstring": "Constructor.\n\n:param sort: ASCENDING or DESCENDING to set the sort order on this field.\n:param weight: Weight of the query.\n:param logic: Logic for this query. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param length: One or more :class:`FieldQuery` objects against the length field.\n:param offset: One or more :class:`FieldQuery` objects against the offset field.\n:param filter: One or more :class:`Filter` objects against this field.", "id": "f3558:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, **kwargs):", "body": "self._logic = Noneself.logic = logicself._weight = Noneself.weight = weightself._simple = Noneself.simple = simpleself._simple_weight = Noneself.simple_weight = simple_weightself._extract_as = Noneself.extract_as = extract_asself._extract_all = Noneself.extract_all = extract_allself._extract_when_missing = Noneself.extract_when_missing = extract_when_missingself._tags = Noneself.tags = tagsself._length = Noneself.length = lengthself._offset = Noneself.offset = offset", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.", "id": "f3559:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, start=None, end=None,query=None, **kwargs):", "body": "super(PagesQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._start = Noneself.start = startself._end = Noneself.end = endself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param start: One or more :class:`FieldQuery` operations against the starting page field.\n:param end: One or more :class:`FieldQuery` operations against the ending page field.\n:param query: One or more :class:`PagesQuery` objects with nested queries.", "id": "f3560:c0:m0"} {"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,extract_when_missing=None, tags=None, length=None, offset=None, uid=None, updated_at=None,names=None, ids=None, classifications=None, source=None, quantity=None, chemical_formula=None,composition=None, properties=None, preparation=None, references=None, sub_systems=None,query=None, **kwargs):", "body": "super(PifSystemQuery, self).__init__(logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,offset=offset, **kwargs)self._uid = Noneself.uid = uidself._updated_at = Noneself.updated_at = updated_atself._names = Noneself.names = namesself._ids = Noneself.ids = idsself._classifications = Noneself.classifications = classificationsself._source = Noneself.source = sourceself._quantity = Noneself.quantity = quantityself._chemical_formula = Noneself.chemical_formula = chemical_formulaself._composition = Noneself.composition = compositionself._properties = Noneself.properties = propertiesself._preparation = Noneself.preparation = preparationself._references = Noneself.references = referencesself._sub_systems = Noneself.sub_systems = sub_systemsself._query = Noneself.query = query", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param uid: One or more :class:`Filter` objects with the filters against the uid field.\n:param updated_at: One or more :class:`Filter` objects with filters against the time that the PIF record was last updated.\n:param names: One or more :class:`FieldQuery` objects with queries against the names field.\n:param ids: One or more :class:`IdQuery` objects with queries against the ids field.\n:param classifications: One or more :class:`ClassificationQuery` objects with queries against the classifications field.\n:param source: One or more :class:`SourceQuery` objects with queries against the source field.\n:param quantity: One or more :class:`QuantityQuery` objects with queries against the quantity field.\n:param chemical_formula: One or more :class:`ChemicalFieldQuery` objects with queries against the chemicalFormula field.\n:param composition: One or more :class:`CompositionQuery` objects with queries against the composition field.\n:param properties: One or more :class:`PropertyQuery` objects with queries against the properties field.\n:param preparation: One or more :class:`ProcessStepQuery` objects with queries against the preparation field.\n:param references: One or more :class:`ReferenceQuery` objects with queries against the references field.\n:param sub_systems: One or more :class:`PifSystemQuery` objects with queries against the subSystems field.\n:param query: One or more :class:`PifSystemQuery` objects with nested queries.", "id": "f3561:c0:m0"} {"signature": "def __init__(self, query=None, extraction_sort=None, from_index=None, size=None, random_results=None,random_seed=None, score_relevance=None, return_max_score=None, timeout=None, return_system=None,add_latex=None, return_extracted_path=None, unwrap_single_value_extractions=None, **kwargs):", "body": "super(PifSystemReturningQuery, self).__init__(query=query, from_index=from_index, size=size, random_results=random_results, random_seed=random_seed,score_relevance=score_relevance, return_max_score=return_max_score, timeout=timeout, **kwargs)self._return_system = Noneself.return_system = return_systemself._add_latex = Noneself.add_latex = add_latexself._return_extracted_path = Noneself.return_extracted_path = return_extracted_pathself._unwrap_single_value_extractions = Noneself.unwrap_single_value_extractions = unwrap_single_value_extractionsself._extraction_sort = Noneself.extraction_sort = extraction_sort", "docstring": "Constructor.\n\n:param query: One or more :class:`DataQuery` objects with the queries to run.\n:param extraction_sort: A single :class:`ExtractionSort` object for sorting.\n:param from_index: Index of the first hit that should be returned.\n:param size: Total number of hits the should be returned.\n:param random_results: Whether to return a random set of records.\n:param random_seed: The random seed to use.\n:param score_relevance: Whether to use relevance scoring.\n:param return_max_score: Whether to return the maximum score.\n:param timeout: The number of milliseconds to wait for the query to execute.\n:param return_system: Whether to return the matched PIF systems.\n:param add_latex: Whether to add latex formatting where possible in results.\n:param return_extracted_path: Whether to return the path in PIFs for extracted values.\n:param unwrap_single_value_extractions: Whether to unwrap extracted values when they are lists with one value.", "id": "f3562:c0:m0"} {"signature": "def __init__(self, result=None, status=None, **kwargs):", "body": "self._result = Noneself.result = resultself._status = Noneself.status = status", "docstring": "Constructor.\n\n:param result: A single :class:`PifSearchResult` object with the query results.\n:param status: 'SUCCESS', 'ERROR', or 'NOT_EXECUTED'.", "id": "f3563:c0:m0"} {"signature": "def __init__(self, id=None, dataset=None, dataset_version=None, score=None, updated_at=None, system=None, extracted=None, extracted_path=None, **kwargs):", "body": "self._id = Noneself.id = idself._dataset = Noneself.dataset = datasetself._dataset_version = Noneself.dataset_version = dataset_versionself._score = Noneself.score = scoreself._updated_at = Noneself.updated_at = updated_atself._system = Noneself.system = systemself._extracted = Noneself.extracted = extractedself._extracted_path = Noneself.extracted_path = extracted_path", "docstring": "Constructor.\n\n:param id: String with the ID of the record.\n:param dataset: Integer with the dataset of the record.\n:param dataset_version: Integer with the dataset version of the record.\n:param score: Score with the relevancy of the result.\n:param updated_at: String with the last time that the record was updated.\n:param system: Pif System object that matched.\n:param extracted: Dictionary with a map of extracted property names to values.\n:param extracted_path: Dictionary with a map of extracted property names to paths in a PIF.", "id": "f3564:c0:m0"} {"signature": "def __init__(self, took=None, results=None, **kwargs):", "body": "self._took = Noneself.took = tookself._results = Noneself.results = results", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param results: List of :class:`PifMultiSearchResultElement` objects.", "id": "f3565:c0:m0"} {"signature": "def __init__(self, took=None, total_num_hits=None, max_score=None, hits=None, **kwargs):", "body": "super(PifSearchResult, self).__init__(took=took, total_num_hits=total_num_hits, max_score=max_score,hits=self._get_object(PifSearchHit, hits), **kwargs)", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param total_num_hits: Total number of hits.\n:param max_score: The maximum score.\n:param hits: List of :class:`PifSearchHit` objects.", "id": "f3566:c0:m0"} {"signature": "def load_file_as_json(path):", "body": "with open(path, \"\") as f:parsed_dict = json.load(f)return parsed_dict", "docstring": "Given a filepath, loads the file as a dictionary from JSON\n\n:param path: The path to a JSON file", "id": "f3568:m0"} {"signature": "def create(self, configuration, name, description):", "body": "data = {\"\":configuration,\"\":name,\"\":description}failure_message = \"\"result = self._get_success_json(self._post_json('', data, failure_message=failure_message))data_view_id = result['']['']return data_view_id", "docstring": "Creates a data view from the search template and ml template given\n\n:param configuration: Information to construct the data view from (eg descriptors, datasets etc)\n:param name: Name of the data view\n:param description: Description for the data view\n:return: The data view id", "id": "f3579:c0:m1"} {"signature": "def update(self, id, configuration, name, description):", "body": "data = {\"\":configuration,\"\":name,\"\":description}failure_message = \"\"self._patch_json('' + id, data, failure_message=failure_message)", "docstring": "Updates an existing data view from the search template and ml template given\n\n:param id: Identifier for the data view. This returned from the create method.\n:param configuration: Information to construct the data view from (eg descriptors, datasets etc)\n:param name: Name of the data view\n:param description: Description for the data view", "id": "f3579:c0:m2"} {"signature": "def delete(self, id):", "body": "failure_message = \"\"self._delete('' + id, None, failure_message=failure_message)", "docstring": "Deletes a data view.\n\n:param id: Identifier of the data view", "id": "f3579:c0:m3"} {"signature": "def get(self, data_view_id):", "body": "failure_message = \"\"return self._get_success_json(self._get('' + data_view_id, None, failure_message=failure_message))['']['']", "docstring": "Gets basic information about a view\n\n:param data_view_id: Identifier of the data view\n:return: Metadata about the view as JSON", "id": "f3579:c0:m4"} {"signature": "def get_data_view_service_status(self, data_view_id):", "body": "url = \"\".format(data_view_id)response = self._get(url).json()result = response[\"\"][\"\"]return DataViewStatus(predict=ServiceStatus.from_response_dict(result[\"\"]),experimental_design=ServiceStatus.from_response_dict(result[\"\"]),data_reports=ServiceStatus.from_response_dict(result[\"\"]),model_reports=ServiceStatus.from_response_dict(result[\"\"]))", "docstring": "Retrieves the status for all of the services associated with a data view:\n - predict\n - experimental_design\n - data_reports\n - model_reports\n\n:param data_view_id: The ID number of the data view to which the\n run belongs, as a string\n:type data_view_id: str\n:return: A :class:`DataViewStatus`\n:rtype: DataViewStatus", "id": "f3579:c0:m5"} {"signature": "def create_ml_configuration_from_datasets(self, dataset_ids):", "body": "available_columns = self.search_template_client.get_available_columns(dataset_ids)search_template = self.search_template_client.create(dataset_ids, available_columns)return self.create_ml_configuration(search_template, available_columns, dataset_ids)", "docstring": "Creates an ml configuration from dataset_ids and extract_as_keys\n\n:param dataset_ids: Array of dataset identifiers to make search template from\n:return: An identifier used to request the status of the builder job (get_ml_configuration_status)", "id": "f3579:c0:m6"} {"signature": "def create_ml_configuration(self, search_template, extract_as_keys, dataset_ids):", "body": "data = {\"\":search_template,\"\":extract_as_keys}failure_message = \"\"config_job_id = self._get_success_json(self._post_json('', data, failure_message=failure_message))['']['']['']while True:config_status = self.__get_ml_configuration_status(config_job_id)print('', config_status)if config_status[''] == '':ml_config = self.__convert_response_to_configuration(config_status[''], dataset_ids)return ml_configtime.sleep()", "docstring": "This method will spawn a server job to create a default ML configuration based on a search template and\nthe extract as keys.\nThis function will submit the request to build, and wait for the configuration to finish before returning.\n\n:param search_template: A search template defining the query (properties, datasets etc)\n:param extract_as_keys: Array of extract-as keys defining the descriptors\n:param dataset_ids: Array of dataset identifiers to make search template from\n:return: An identifier used to request the status of the builder job (get_ml_configuration_status)", "id": "f3579:c0:m7"} {"signature": "def __convert_response_to_configuration(self, result_blob, dataset_ids):", "body": "builder = DataViewBuilder()builder.dataset_ids(dataset_ids)for i, (k, v) in enumerate(result_blob[''].items()):try:descriptor = self.__snake_case(v[])print(json.dumps(descriptor))descriptor[''] = kbuilder.add_raw_descriptor(descriptor)except IndexError:passfor i, (k, v) in enumerate(result_blob[''].items()):builder.set_role(k, v.lower())return builder.build()", "docstring": "Utility function to turn the result object from the configuration builder endpoint into something that\ncan be used directly as a configuration.\n\n:param result_blob: Nested dicts representing the possible descriptors\n:param dataset_ids: Array of dataset identifiers to make search template from\n:return: An object suitable to be used as a parameter to data view create", "id": "f3579:c0:m8"} {"signature": "def __snake_case(self, descriptor):", "body": "newdict = {}for i, (k, v) in enumerate(descriptor.items()):newkey = \"\"for j, c in enumerate(k):if c.isupper():if len(newkey) != :newkey += ''newkey += c.lower()else:newkey += cnewdict[newkey] = vreturn newdict", "docstring": "Utility method to convert camelcase to snake\n:param descriptor: The dictionary to convert", "id": "f3579:c0:m9"} {"signature": "def __get_ml_configuration_status(self, job_id):", "body": "failure_message = \"\"response = self._get_success_json(self._get('' + job_id + '', None, failure_message=failure_message))['']return response", "docstring": "After invoking the create_ml_configuration async method, you can use this method to\ncheck on the status of the builder job.\n\n:param job_id: The identifier returned from create_ml_configuration\n:return: Job status", "id": "f3579:c0:m10"} {"signature": "def validate(self, ml_template):", "body": "data = {\"\":ml_template}failure_message = \"\"res = self._get_success_json(self._post_json('', data, failure_message=failure_message))['']if res['']:return ''return res['']", "docstring": "Runs the template against the validation endpoint, returns a message indicating status of the templte\n\n:param ml_template: Template to validate\n:return: OK or error message if validation failed", "id": "f3580:c0:m1"} {"signature": "def dataset_ids(self, dataset_ids):", "body": "self.configuration[''] = dataset_ids", "docstring": "Sets the dataset ids to use for the view\n\n:param dataset_ids: Array of strings, one for each dataset id", "id": "f3581:c0:m1"} {"signature": "def model_type(self, model_type):", "body": "self.configuration[''] = model_type", "docstring": "Sets the model type for the view\n\n:param model_type: A string of either linear, or default", "id": "f3581:c0:m2"} {"signature": "def add_descriptor(self, descriptor, role='', group_by_key=False):", "body": "descriptor.validate()if descriptor.key in self.configuration[\"\"]:raise ValueError(\"\")self.configuration[''].append(descriptor.as_dict())self.configuration[\"\"][descriptor.key] = roleif group_by_key:self.configuration[\"\"].append(descriptor.key)", "docstring": "Add a descriptor column.\n\n:param descriptor: A Descriptor instance (e.g., RealDescriptor, InorganicDescriptor, etc.)\n:param role: Specify a role (input, output, latentVariable, or ignore)\n:param group_by_key: Whether or not to group by this key during cross validation", "id": "f3581:c0:m3"} {"signature": "def add_raw_descriptor(self, descriptor):", "body": "self.configuration[''].append(descriptor)", "docstring": "Add a raw descriptor dictionary object.\n:param descriptor: A descriptor as a dictionary", "id": "f3581:c0:m4"} {"signature": "def get_available_columns(self, dataset_ids):", "body": "if not isinstance(dataset_ids, list):dataset_ids = [dataset_ids]data = {\"\":dataset_ids}failure_message = \"\".format(dataset_ids)return self._get_success_json(self._post_json('', data, failure_message=failure_message))['']", "docstring": "Retrieves the set of columns from the combination of dataset ids given\n\n:param dataset_ids: The id of the dataset to retrieve columns from\n:type dataset_ids: list of int\n:return: A list of column names from the dataset ids given.\n:rtype: list of str", "id": "f3582:c0:m2"} {"signature": "def __generate_search_template(self, dataset_ids):", "body": "data = {\"\":dataset_ids}failure_message = \"\".format(dataset_ids)return self._get_success_json(self._post_json('', data, failure_message=failure_message))['']", "docstring": "Generates a default search templates from the available columns in the dataset ids given.\n\n:param dataset_ids: The id of the dataset to retrieve files from\n:type dataset_ids: list of int\n:return: A search template based on the columns in the datasets given", "id": "f3582:c0:m3"} {"signature": "def __prune_search_template(self, extract_as_keys, search_template):", "body": "data = {\"\":extract_as_keys,\"\":search_template}failure_message = \"\"return self._get_success_json(self._post_json('', data, failure_message=failure_message))['']", "docstring": "Returns a new search template, but the new template has only the extract_as_keys given.\n\n:param extract_as_keys: List of extract as keys to keep\n:param search_template: The search template to prune\n:return: New search template with pruned columns", "id": "f3582:c0:m4"} {"signature": "def _get_notation(notation):", "body": "return _NOTATION_KEYS.get(notation, None)", "docstring": "Given a numeric value or string value, returns one in IP_DOT, IP_HEX,\n IP_BIN, etc., or None if unable to convert to the internally\n used numeric convention.", "id": "f3601:m0"} {"signature": "def p_notation(notation):", "body": "return NOTATION_MAP[_get_notation(notation) or IP_UNKNOWN][]", "docstring": "Return a string representing the given notation.", "id": "f3601:m1"} {"signature": "def is_dot(ip):", "body": "octets = str(ip).split('')if len(octets) != :return Falsefor i in octets:try:val = int(i)except ValueError:return Falseif val > or val < :return Falsereturn True", "docstring": "Return true if the IP address is in dotted decimal notation.", "id": "f3601:m2"} {"signature": "def is_hex(ip):", "body": "try:dec = int(str(ip), )except (TypeError, ValueError):return Falseif dec > or dec < :return Falsereturn True", "docstring": "Return true if the IP address is in hexadecimal notation.", "id": "f3601:m3"} {"signature": "def is_bin(ip):", "body": "try:ip = str(ip)if len(ip) != :return Falsedec = int(ip, )except (TypeError, ValueError):return Falseif dec > or dec < :return Falsereturn True", "docstring": "Return true if the IP address is in binary notation.", "id": "f3601:m4"} {"signature": "def is_oct(ip):", "body": "try:dec = int(str(ip), )except (TypeError, ValueError):return Falseif dec > or dec < :return Falsereturn True", "docstring": "Return true if the IP address is in octal notation.", "id": "f3601:m5"} {"signature": "def is_dec(ip):", "body": "try:dec = int(str(ip))except ValueError:return Falseif dec > or dec < :return Falsereturn True", "docstring": "Return true if the IP address is in decimal notation.", "id": "f3601:m6"} {"signature": "def _check_nm(nm, notation):", "body": "_NM_CHECK_FUNCT = {NM_DOT: _dot_to_dec,NM_HEX: _hex_to_dec,NM_BIN: _bin_to_dec,NM_OCT: _oct_to_dec,NM_DEC: _dec_to_dec_long}try:dec = _NM_CHECK_FUNCT[notation](nm, check=True)except ValueError:return Falseif dec in _NETMASKS_VALUES:return Truereturn False", "docstring": "Function internally used to check if the given netmask\n is of the specified notation.", "id": "f3601:m7"} {"signature": "def is_dot_nm(nm):", "body": "return _check_nm(nm, NM_DOT)", "docstring": "Return true if the netmask is in dotted decimal notatation.", "id": "f3601:m8"} {"signature": "def is_hex_nm(nm):", "body": "return _check_nm(nm, NM_HEX)", "docstring": "Return true if the netmask is in hexadecimal notatation.", "id": "f3601:m9"} {"signature": "def is_bin_nm(nm):", "body": "return _check_nm(nm, NM_BIN)", "docstring": "Return true if the netmask is in binary notatation.", "id": "f3601:m10"} {"signature": "def is_oct_nm(nm):", "body": "return _check_nm(nm, NM_OCT)", "docstring": "Return true if the netmask is in octal notatation.", "id": "f3601:m11"} {"signature": "def is_dec_nm(nm):", "body": "return _check_nm(nm, NM_DEC)", "docstring": "Return true if the netmask is in decimal notatation.", "id": "f3601:m12"} {"signature": "def is_bits_nm(nm):", "body": "try:bits = int(str(nm))except ValueError:return Falseif bits > or bits < :return Falsereturn True", "docstring": "Return true if the netmask is in bits notatation.", "id": "f3601:m13"} {"signature": "def is_wildcard_nm(nm):", "body": "try:dec = - _dot_to_dec(nm, check=True)except ValueError:return Falseif dec in _NETMASKS_VALUES:return Truereturn False", "docstring": "Return true if the netmask is in wildcard bits notatation.", "id": "f3601:m14"} {"signature": "def _dot_to_dec(ip, check=True):", "body": "if check and not is_dot(ip):raise ValueError('' % ip)octets = str(ip).split('')dec = dec |= int(octets[]) << dec |= int(octets[]) << dec |= int(octets[]) << dec |= int(octets[])return dec", "docstring": "Dotted decimal notation to decimal conversion.", "id": "f3601:m15"} {"signature": "def _dec_to_dot(ip):", "body": "first = int((ip >> ) & )second = int((ip >> ) & )third = int((ip >> ) & )fourth = int(ip & )return '' % (first, second, third, fourth)", "docstring": "Decimal to dotted decimal notation conversion.", "id": "f3601:m16"} {"signature": "def _hex_to_dec(ip, check=True):", "body": "if check and not is_hex(ip):raise ValueError('' % ip)if isinstance(ip, int):ip = hex(ip)return int(str(ip), )", "docstring": "Hexadecimal to decimal conversion.", "id": "f3601:m17"} {"signature": "def _dec_to_hex(ip):", "body": "return hex(ip)", "docstring": "Decimal to hexadecimal conversion.", "id": "f3601:m18"} {"signature": "def _oct_to_dec(ip, check=True):", "body": "if check and not is_oct(ip):raise ValueError('' % ip)if isinstance(ip, int):ip = oct(ip)return int(str(ip), )", "docstring": "Octal to decimal conversion.", "id": "f3601:m19"} {"signature": "def _dec_to_oct(ip):", "body": "return oct(ip)", "docstring": "Decimal to octal conversion.", "id": "f3601:m20"} {"signature": "def _bin_to_dec(ip, check=True):", "body": "if check and not is_bin(ip):raise ValueError('' % ip)if isinstance(ip, int):ip = str(ip)return int(str(ip), )", "docstring": "Binary to decimal conversion.", "id": "f3601:m21"} {"signature": "def _BYTES_TO_BITS():", "body": "the_table = *[None]bits_per_byte = list(range(, -, -))for n in range():l = nbits = *[None]for i in bits_per_byte:bits[i] = ''[n & ]n >>= the_table[l] = ''.join(bits)return the_table", "docstring": "Generate a table to convert a whole byte to binary.\n This code was taken from the Python Cookbook, 2nd edition - O'Reilly.", "id": "f3601:m22"} {"signature": "def _dec_to_bin(ip):", "body": "bits = []while ip:bits.append(_BYTES_TO_BITS[ip & ])ip >>= bits.reverse()return ''.join(bits) or *''", "docstring": "Decimal to binary conversion.", "id": "f3601:m23"} {"signature": "def _dec_to_dec_long(ip, check=True):", "body": "if check and not is_dec(ip):raise ValueError('' % ip)return int(str(ip))", "docstring": "Decimal to decimal (long) conversion.", "id": "f3601:m24"} {"signature": "def _dec_to_dec_str(ip):", "body": "return str(ip)", "docstring": "Decimal to decimal (string) conversion.", "id": "f3601:m25"} {"signature": "def _bits_to_dec(nm, check=True):", "body": "if check and not is_bits_nm(nm):raise ValueError('' % nm)bits = int(str(nm))return VALID_NETMASKS[bits]", "docstring": "Bits to decimal conversion.", "id": "f3601:m26"} {"signature": "def _dec_to_bits(nm):", "body": "return str(_NETMASKS_INV[nm])", "docstring": "Decimal to bits conversion.", "id": "f3601:m27"} {"signature": "def _wildcard_to_dec(nm, check=False):", "body": "if check and not is_wildcard_nm(nm):raise ValueError('' % nm)return - _dot_to_dec(nm, check=False)", "docstring": "Wildcard bits to decimal conversion.", "id": "f3601:m28"} {"signature": "def _dec_to_wildcard(nm):", "body": "return _dec_to_dot( - nm)", "docstring": "Decimal to wildcard bits conversion.", "id": "f3601:m29"} {"signature": "def _is_notation(ip, notation, _isnm):", "body": "notation_orig = notationnotation = _get_notation(notation)if notation not in _CHECK_FUNCT_KEYS:raise ValueError('' % notation_orig)return _CHECK_FUNCT[notation][_isnm](ip)", "docstring": "Internally used to check if an IP/netmask is in the given notation.", "id": "f3601:m30"} {"signature": "def is_notation(ip, notation):", "body": "return _is_notation(ip, notation, _isnm=False)", "docstring": "Return true if the given address is in the given notation.", "id": "f3601:m31"} {"signature": "def is_notation_nm(nm, notation):", "body": "return _is_notation(nm, notation, _isnm=True)", "docstring": "Return true if the given netmask is in the given notation.", "id": "f3601:m32"} {"signature": "def _detect(ip, _isnm):", "body": "ip = str(ip)if len(ip) > :if ip[:] == '':if _CHECK_FUNCT[IP_HEX][_isnm](ip):return IP_HEXelif ip[] == '':if _CHECK_FUNCT[IP_OCT][_isnm](ip):return IP_OCTif _CHECK_FUNCT[IP_DOT][_isnm](ip):return IP_DOTelif _isnm and _CHECK_FUNCT[NM_BITS][_isnm](ip):return NM_BITSelif _CHECK_FUNCT[IP_DEC][_isnm](ip):return IP_DECelif _isnm and _CHECK_FUNCT[NM_WILDCARD][_isnm](ip):return NM_WILDCARDelif _CHECK_FUNCT[IP_BIN][_isnm](ip):return IP_BINreturn IP_UNKNOWN", "docstring": "Function internally used to detect the notation of the\n given IP or netmask.", "id": "f3601:m33"} {"signature": "def detect(ip):", "body": "return _detect(ip, _isnm=False)", "docstring": "Detect the notation of an IP address.\n\n @param ip: the IP address.\n @type ip: integers, strings or object with an appropriate __str()__ method.\n @return: one of the IP_* constants; IP_UNKNOWN if undetected.", "id": "f3601:m34"} {"signature": "def detect_nm(nm):", "body": "return _detect(nm, _isnm=True)", "docstring": "Detect the notation of a netmask.\n @param nm: the netmask.\n @type nm: integers, strings or object with an appropriate __str()__ method.\n @return: one of the NM_* constants; NM_UNKNOWN if undetected.", "id": "f3601:m35"} {"signature": "def p_detect(ip):", "body": "return NOTATION_MAP[detect(ip)][]", "docstring": "Return the notation of an IP address (string).", "id": "f3601:m36"} {"signature": "def p_detect_nm(nm):", "body": "return NOTATION_MAP[detect_nm(nm)][]", "docstring": "Return the notation of a netmask (string).", "id": "f3601:m37"} {"signature": "def _convert(ip, notation, inotation, _check, _isnm):", "body": "inotation_orig = inotationnotation_orig = notationinotation = _get_notation(inotation)notation = _get_notation(notation)if inotation is None:raise ValueError('' % inotation_orig)if notation is None:raise ValueError('' % notation_orig)docheck = _check or Falseif inotation == IP_UNKNOWN:inotation = _detect(ip, _isnm)if inotation == IP_UNKNOWN:raise ValueError('')if _check is None:docheck = Trueif _isnm:docheck = Falsedec = if inotation == IP_DOT:dec = _dot_to_dec(ip, docheck)elif inotation == IP_HEX:dec = _hex_to_dec(ip, docheck)elif inotation == IP_BIN:dec = _bin_to_dec(ip, docheck)elif inotation == IP_OCT:dec = _oct_to_dec(ip, docheck)elif inotation == IP_DEC:dec = _dec_to_dec_long(ip, docheck)elif _isnm and inotation == NM_BITS:dec = _bits_to_dec(ip, docheck)elif _isnm and inotation == NM_WILDCARD:dec = _wildcard_to_dec(ip, docheck)else:raise ValueError('' % inotation_orig)if _isnm and dec not in _NETMASKS_VALUES:raise ValueError('' % ip)if notation == IP_DOT:return _dec_to_dot(dec)elif notation == IP_HEX:return _dec_to_hex(dec)elif notation == IP_BIN:return _dec_to_bin(dec)elif notation == IP_OCT:return _dec_to_oct(dec)elif notation == IP_DEC:return _dec_to_dec_str(dec)elif _isnm and notation == NM_BITS:return _dec_to_bits(dec)elif _isnm and notation == NM_WILDCARD:return _dec_to_wildcard(dec)else:raise ValueError('' % notation_orig)", "docstring": "Internally used to convert IPs and netmasks to other notations.", "id": "f3601:m38"} {"signature": "def convert(ip, notation=IP_DOT, inotation=IP_UNKNOWN, check=True):", "body": "return _convert(ip, notation, inotation, _check=check, _isnm=False)", "docstring": "Convert among IP address notations.\n\n Given an IP address, this function returns the address\n in another notation.\n\n @param ip: the IP address.\n @type ip: integers, strings or object with an appropriate __str()__ method.\n\n @param notation: the notation of the output (default: IP_DOT).\n @type notation: one of the IP_* constants, or the equivalent strings.\n\n @param inotation: force the input to be considered in the given notation\n (default the notation of the input is autodetected).\n @type inotation: one of the IP_* constants, or the equivalent strings.\n\n @param check: force the notation check on the input.\n @type check: True force the check, False force not to check and None\n do the check only if the inotation is unknown.\n\n @return: a string representing the IP in the selected notation.\n\n @raise ValueError: raised when the input is in unknown notation.", "id": "f3601:m39"} {"signature": "def convert_nm(nm, notation=IP_DOT, inotation=IP_UNKNOWN, check=True):", "body": "return _convert(nm, notation, inotation, _check=check, _isnm=True)", "docstring": "Convert a netmask to another notation.", "id": "f3601:m40"} {"signature": "def __init__(self, ip, notation=IP_UNKNOWN):", "body": "self.set(ip, notation)", "docstring": "Initialize the object.", "id": "f3601:c0:m0"} {"signature": "def set(self, ip, notation=IP_UNKNOWN):", "body": "self._ip_dec = int(_convert(ip, notation=IP_DEC, inotation=notation,_check=True, _isnm=self._isnm))self._ip = _convert(self._ip_dec, notation=IP_DOT, inotation=IP_DEC,_check=False, _isnm=self._isnm)", "docstring": "Set the IP address/netmask.", "id": "f3601:c0:m1"} {"signature": "def get(self):", "body": "return self.get_dot()", "docstring": "Return the address/netmask.", "id": "f3601:c0:m2"} {"signature": "def get_dot(self):", "body": "return self._ip", "docstring": "Return the dotted decimal notation of the address/netmask.", "id": "f3601:c0:m3"} {"signature": "def get_hex(self):", "body": "return _convert(self._ip_dec, notation=IP_HEX,inotation=IP_DEC, _check=False, _isnm=self._isnm)", "docstring": "Return the hexadecimal notation of the address/netmask.", "id": "f3601:c0:m4"} {"signature": "def get_bin(self):", "body": "return _convert(self._ip_dec, notation=IP_BIN,inotation=IP_DEC, _check=False, _isnm=self._isnm)", "docstring": "Return the binary notation of the address/netmask.", "id": "f3601:c0:m5"} {"signature": "def get_dec(self):", "body": "return str(self._ip_dec)", "docstring": "Return the decimal notation of the address/netmask.", "id": "f3601:c0:m6"} {"signature": "def get_oct(self):", "body": "return _convert(self._ip_dec, notation=IP_OCT,inotation=IP_DEC, _check=False, _isnm=self._isnm)", "docstring": "Return the octal notation of the address/netmask.", "id": "f3601:c0:m7"} {"signature": "def __str__(self):", "body": "return self.get()", "docstring": "Print this address/netmask.", "id": "f3601:c0:m8"} {"signature": "def _cmp_prepare(self, other):", "body": "if isinstance(other, self.__class__):return other._ip_decelif isinstance(other, int):return otherreturn self.__class__(other)._ip_dec", "docstring": "Prepare the item to be compared with this address/netmask.", "id": "f3601:c0:m9"} {"signature": "def __int__(self):", "body": "return self._ip_dec", "docstring": "Return the decimal representation of the address/netmask.", "id": "f3601:c0:m16"} {"signature": "def __repr__(self):", "body": "return '' % self.get()", "docstring": "The representation string for this address.", "id": "f3601:c1:m0"} {"signature": "def _add(self, other):", "body": "if isinstance(other, self.__class__):sum_ = self._ip_dec + other._ip_decelif isinstance(other, int):sum_ = self._ip_dec + otherelse:other = self.__class__(other)sum_ = self._ip_dec + other._ip_decreturn sum_", "docstring": "Sum two IP addresses.", "id": "f3601:c1:m1"} {"signature": "def __add__(self, other):", "body": "return IPv4Address(self._add(other), notation=IP_DEC)", "docstring": "Sum two IP addresses.", "id": "f3601:c1:m2"} {"signature": "def __iadd__(self, other):", "body": "self.set(self._add(other), notation=IP_DEC)return self", "docstring": "Augmented arithmetic sum.", "id": "f3601:c1:m3"} {"signature": "def _sub(self, other):", "body": "if isinstance(other, self.__class__):sub = self._ip_dec - other._ip_decif isinstance(other, int):sub = self._ip_dec - otherelse:other = self.__class__(other)sub = self._ip_dec - other._ip_decreturn sub", "docstring": "Subtract two IP addresses.", "id": "f3601:c1:m4"} {"signature": "def __sub__(self, other):", "body": "return IPv4Address(self._sub(other), notation=IP_DEC)", "docstring": "Subtract two IP addresses.", "id": "f3601:c1:m5"} {"signature": "def __isub__(self, other):", "body": "self.set(self._sub(other), notation=IP_DEC)return self", "docstring": "Augmented arithmetic subtraction.", "id": "f3601:c1:m6"} {"signature": "def get_bits(self):", "body": "return _convert(self._ip, notation=NM_BITS,inotation=IP_DOT, _check=False, _isnm=self._isnm)", "docstring": "Return the bits notation of the netmask.", "id": "f3601:c2:m0"} {"signature": "def get_wildcard(self):", "body": "return _convert(self._ip, notation=NM_WILDCARD,inotation=IP_DOT, _check=False, _isnm=self._isnm)", "docstring": "Return the wildcard bits notation of the netmask.", "id": "f3601:c2:m1"} {"signature": "def __repr__(self):", "body": "return '' % self.get()", "docstring": "The representation string for this netmask.", "id": "f3601:c2:m2"} {"signature": "def set(self, ip, netmask=None):", "body": "if isinstance(ip, str) and netmask is None:ipnm = ip.split('')if len(ipnm) != :raise ValueError('' % ip)ip = ipnm[]netmask = ipnm[]if isinstance(ip, IPv4Address):self._ip = ipelse:self._ip = IPv4Address(ip)if isinstance(netmask, IPv4NetMask):self._nm = netmaskelse:self._nm = IPv4NetMask(netmask)ipl = int(self._ip)nml = int(self._nm)base_add = ipl & nmlself._ip_num = - - nmlif self._ip_num in (-, ):if self._ip_num == -:self._ip_num = else:self._ip_num = self._net_ip = Noneself._bc_ip = Noneself._first_ip_dec = base_addself._first_ip = IPv4Address(self._first_ip_dec, notation=IP_DEC)if self._ip_num == :last_ip_dec = self._first_ip_decelse:last_ip_dec = self._first_ip_dec + self._last_ip = IPv4Address(last_ip_dec, notation=IP_DEC)returnself._net_ip = IPv4Address(base_add, notation=IP_DEC)self._bc_ip = IPv4Address(base_add + self._ip_num + , notation=IP_DEC)self._first_ip_dec = base_add + self._first_ip = IPv4Address(self._first_ip_dec, notation=IP_DEC)self._last_ip = IPv4Address(base_add + self._ip_num, notation=IP_DEC)", "docstring": "Set the IP address and the netmask.", "id": "f3601:c3:m1"} {"signature": "def get(self):", "body": "return '' % (str(self._ip), str(self._nm))", "docstring": "Print this CIDR address.", "id": "f3601:c3:m2"} {"signature": "def set_ip(self, ip):", "body": "self.set(ip=ip, netmask=self._nm)", "docstring": "Change the current IP.", "id": "f3601:c3:m3"} {"signature": "def get_ip(self):", "body": "return self._ip", "docstring": "Return the given address.", "id": "f3601:c3:m4"} {"signature": "def set_netmask(self, netmask):", "body": "self.set(ip=self._ip, netmask=netmask)", "docstring": "Change the current netmask.", "id": "f3601:c3:m5"} {"signature": "def get_netmask(self):", "body": "return self._nm", "docstring": "Return the netmask.", "id": "f3601:c3:m6"} {"signature": "def get_first_ip(self):", "body": "return self._first_ip", "docstring": "Return the first usable IP address.", "id": "f3601:c3:m7"} {"signature": "def get_last_ip(self):", "body": "return self._last_ip", "docstring": "Return the last usable IP address.", "id": "f3601:c3:m8"} {"signature": "def get_network_ip(self):", "body": "return self._net_ip", "docstring": "Return the network address.", "id": "f3601:c3:m9"} {"signature": "def get_broadcast_ip(self):", "body": "return self._bc_ip", "docstring": "Return the broadcast address.", "id": "f3601:c3:m10"} {"signature": "def get_ip_number(self):", "body": "return self._ip_num", "docstring": "Return the number of usable IP addresses.", "id": "f3601:c3:m11"} {"signature": "def get_all_valid_ip(self):", "body": "return list(self.__iter__())", "docstring": "Return a list of IPv4Address objects, one for every usable IP.\n\n WARNING: it's slow and can take a huge amount of memory for\n subnets with a large number of addresses.\n Use __iter__ instead ('for ip in ...').", "id": "f3601:c3:m12"} {"signature": "def is_valid_ip(self, ip):", "body": "if not isinstance(ip, (IPv4Address, CIDR)):if str(ip).find('') == -:ip = IPv4Address(ip)else:ip = CIDR(ip)if isinstance(ip, IPv4Address):if ip < self._first_ip or ip > self._last_ip:return Falseelif isinstance(ip, CIDR):if ip._nm._ip_dec == and self._nm._ip_dec != :compare_to_first = self._net_ip._ip_deccompare_to_last = self._bc_ip._ip_decelse:compare_to_first = self._first_ip._ip_deccompare_to_last = self._last_ip._ip_decif ip._first_ip._ip_dec < compare_to_first orip._last_ip._ip_dec > compare_to_last:return Falsereturn True", "docstring": "Return true if the given address in amongst the usable addresses,\n or if the given CIDR is contained in this one.", "id": "f3601:c3:m13"} {"signature": "def __str__(self):", "body": "return self.get()", "docstring": "Print this CIDR address.", "id": "f3601:c3:m14"} {"signature": "def __repr__(self):", "body": "return '' % (str(self.get_ip()), str(self.get_netmask()))", "docstring": "The representation string for this netmask.", "id": "f3601:c3:m15"} {"signature": "def __len__(self):", "body": "return self.get_ip_number()", "docstring": "Return the number of usable IP address.", "id": "f3601:c3:m16"} {"signature": "def __contains__(self, item):", "body": "return self.is_valid_ip(item)", "docstring": "Return true if the given address in amongst the usable addresses,\n or if the given CIDR is contained in this one.", "id": "f3601:c3:m23"} {"signature": "def __iter__(self):", "body": "for i in range(, self._ip_num):yield IPv4Address(self._first_ip_dec + i, notation=IP_DEC)", "docstring": "Iterate over IPv4Address objects, one for every usable IP.", "id": "f3601:c3:m24"} {"signature": "def guid(*args):", "body": "t = float(time.time() * )r = float(random.random()*)a = random.random() * data = str(t) + '' + str(r) + '' + str(a) + '' + str(args)data = hashlib.md5(data.encode()).hexdigest()[:]return data", "docstring": "Generates a universally unique ID.\nAny arguments only create more randomness.", "id": "f3604:m1"} {"signature": "def generate(length=DEFAULT_LENGTH):", "body": "return ''.join(random.SystemRandom().choice(ALPHABET)for _ in range(length))", "docstring": "Generate a random string of the specified length.\n\nThe returned string is composed of an alphabet that shouldn't include any\ncharacters that are easily mistakeable for one another (I, 1, O, 0), and\nhopefully won't accidentally contain any English-language curse words.", "id": "f3605:m0"} {"signature": "@classmethoddef coerce(cls, key, value):", "body": "if not isinstance(value, MutableDict):if isinstance(value, dict):return MutableDict(value)return Mutable.coerce(key, value)else:return value", "docstring": "Convert plain dictionaries to MutableDict.", "id": "f3607:c4:m0"} {"signature": "def __setitem__(self, key, value):", "body": "dict.__setitem__(self, key, value)self.changed()", "docstring": "Detect dictionary set events and emit change events.", "id": "f3607:c4:m1"} {"signature": "def __delitem__(self, key):", "body": "dict.__delitem__(self, key)self.changed()", "docstring": "Detect dictionary del events and emit change events.", "id": "f3607:c4:m2"} {"signature": "@compiles(utcnow)def _default_utcnow(element, compiler, **kw):", "body": "return \"\"", "docstring": "default compilation handler.\n\n Note that there is no SQL \"utcnow()\" function; this is a\n \"fake\" string so that we can produce SQL strings that are dialect-agnostic,\n such as within tests.", "id": "f3608:m0"} {"signature": "@compiles(utcnow, '')def _sqlite_utcnow(element, compiler, **kw):", "body": "return \"\"", "docstring": "Mysql-specific compilation handler.", "id": "f3608:m1"} {"signature": "@compiles(utcnow, '')def _sqlite_utcnow(element, compiler, **kw):", "body": "return \"\"", "docstring": "SQLite-specific compilation handler.", "id": "f3608:m2"} {"signature": "@compiles(utcnow, '')def _pg_utcnow(element, compiler, **kw):", "body": "return \"\"", "docstring": "Postgresql-specific compilation handler.", "id": "f3608:m3"} {"signature": "@classmethoddef __declare_first__(cls):", "body": "for lcl, rmt in cls._to_ref:cls._decl_class_registry[lcl]._reference_table(cls._decl_class_registry[rmt].__table__)cls._to_ref.clear()", "docstring": "declarative hook called within the 'before_configure' mapper event.", "id": "f3608:c0:m0"} {"signature": "@classmethoddef _reference_table(cls, ref_table):", "body": "cols = [(sa.Column(), refcol) for refcol in ref_table.primary_key]for col, refcol in cols:setattr(cls, \"\" % (ref_table.name, refcol.name), col)cls.__table__.append_constraint(sa.ForeignKeyConstraint(*zip(*cols)))", "docstring": "Create a foreign key reference from the local class to the given remote\n table.\n\n Adds column references to the declarative class and adds a\n ForeignKeyConstraint.", "id": "f3608:c0:m1"} {"signature": "def __json__(self, request):", "body": "props = {}json_eager_load = set(getattr(self, '', []))for prop in json_eager_load:getattr(self, prop, None)options = self.__dict__.copy()blacklist = set(getattr(self, '', []))blacklist.update(getattr(self, '', []))for key in options:if key in blacklist:continueif key.startswith(('', '')):continueobj = getattr(self, key)if isinstance(obj, (datetime, date, time)):props[key] = obj.isoformat()continueattr = getattr(self, key)if key in json_eager_load and attr:if hasattr(attr, ''):props[key] = self.__try_to_json(request, attr)else:props[key] = [self.__try_to_json(request, x) for x in attr]continueif attr and not isinstance(attr, (int, float)):try:props[key] = str(attr)except UnicodeEncodeError:props[key] = unicode(attr) continueprops[key] = attrreturn props", "docstring": "Main JSONify method\n\n:param request: Pyramid Request object\n:type request: \n:return: dictionary ready to be jsonified\n:rtype: ", "id": "f3608:c1:m0"} {"signature": "def __try_to_json(self, request, attr):", "body": "if hasattr(attr, ''):return attr.__json__(request)raise TypeError('' % str(attr))", "docstring": "Try to run __json__ on the given object.\nRaise TypeError is __json__ is missing\n\n:param request: Pyramid Request object\n:type request: \n:param obj: Object to JSONify\n:type obj: any object that has __json__ method\n:exception: TypeError", "id": "f3608:c1:m1"} {"signature": "def get_tm_session(session_factory, transaction_manager):", "body": "dbsession = session_factory()zope.sqlalchemy.register(dbsession, transaction_manager=transaction_manager)return dbsession", "docstring": "Get a ``sqlalchemy.orm.Session`` instance backed by a transaction.\n\nThis function will hook the session to the transaction manager which\nwill take care of committing any changes.\n\n- When using pyramid_tm it will automatically be committed or aborted\n depending on whether an exception is raised.\n\n- When using scripts you should wrap the session in a manager yourself.\n For example::\n\n import transaction\n\n engine = get_engine(settings)\n session_factory = get_session_factory(engine)\n with transaction.manager:\n dbsession = get_tm_session(session_factory, transaction.manager)", "id": "f3609:m2"} {"signature": "def many_to_one(clsname, **kw):", "body": "@declared_attrdef m2o(cls):cls._references((cls.__name__, clsname))return relationship(clsname, **kw)return m2o", "docstring": "Use an event to build a many-to-one relationship on a class.\n\n This makes use of the :meth:`.References._reference_table` method\n to generate a full foreign key relationship to the remote table.", "id": "f3611:m0"} {"signature": "def one_to_many(clsname, **kw):", "body": "@declared_attrdef o2m(cls):cls._references((clsname, cls.__name__))return relationship(clsname, **kw)return o2m", "docstring": "Use an event to build a one-to-many relationship on a class.\n\n This makes use of the :meth:`.References._reference_table` method\n to generate a full foreign key relationship from the remote table.", "id": "f3611:m1"} {"signature": "def includeme(config):", "body": "settings = config.get_settings()should_create = asbool(settings.get('', False))should_drop = asbool(settings.get('', False))config.add_settings({\"\": ,\"\": tm_activate_hook,\"\": False,})config.include('')config.include('')engine = get_engine(settings)session_factory = get_session_factory(engine)config.registry[''] = session_factoryconfig.add_request_method(lambda r: get_tm_session(session_factory, r.tm),'',reify=True)config.include('')config.action(None, bind_engine, (engine,), {'': should_create,'': should_drop}, order=)", "docstring": "Initialize the model for a Pyramid app.\n\nActivate this setup using ``config.include('baka_model')``.", "id": "f3612:m1"} {"signature": "def get_version():", "body": "with open(VERSION_FILE, encoding='') as fp:content = fp.read()match = re.search(r'', content, re.M)if match:return match.group()raise RuntimeError(\"\")", "docstring": "Extract package __version__", "id": "f3614:m0"} {"signature": "def get_keywords():", "body": "git_refnames = \"\"git_full = \"\"git_date = \"\"keywords = {\"\": git_refnames, \"\": git_full, \"\": git_date}return keywords", "docstring": "Get the keywords needed to look up the version information.", "id": "f3617:m0"} {"signature": "def get_config():", "body": "cfg = VersioneerConfig()cfg.VCS = \"\"cfg.style = \"\"cfg.tag_prefix = \"\"cfg.parentdir_prefix = \"\"cfg.versionfile_source = \"\"cfg.verbose = Falsereturn cfg", "docstring": "Create, populate and return the VersioneerConfig() object.", "id": "f3617:m1"} {"signature": "def register_vcs_handler(vcs, method): ", "body": "def decorate(f):\"\"\"\"\"\"if vcs not in HANDLERS:HANDLERS[vcs] = {}HANDLERS[vcs][method] = freturn freturn decorate", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f3617:m2"} {"signature": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,env=None):", "body": "assert isinstance(commands, list)p = Nonefor c in commands:try:dispcmd = str([c] + args)p = subprocess.Popen([c] + args, cwd=cwd, env=env,stdout=subprocess.PIPE,stderr=(subprocess.PIPE if hide_stderrelse None))breakexcept EnvironmentError:e = sys.exc_info()[]if e.errno == errno.ENOENT:continueif verbose:print(\"\" % dispcmd)print(e)return None, Noneelse:if verbose:print(\"\" % (commands,))return None, Nonestdout = p.communicate()[].strip()if sys.version_info[] >= :stdout = stdout.decode()if p.returncode != :if verbose:print(\"\" % dispcmd)print(\"\" % stdout)return None, p.returncodereturn stdout, p.returncode", "docstring": "Call the given command(s).", "id": "f3617:m3"} {"signature": "def versions_from_parentdir(parentdir_prefix, root, verbose):", "body": "rootdirs = []for i in range():dirname = os.path.basename(root)if dirname.startswith(parentdir_prefix):return {\"\": dirname[len(parentdir_prefix):],\"\": None,\"\": False, \"\": None, \"\": None}else:rootdirs.append(root)root = os.path.dirname(root) if verbose:print(\"\" %(str(rootdirs), parentdir_prefix))raise NotThisMethod(\"\")", "docstring": "Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory", "id": "f3617:m4"} {"signature": "@register_vcs_handler(\"\", \"\")def git_get_keywords(versionfile_abs):", "body": "keywords = {}try:f = open(versionfile_abs, \"\")for line in f.readlines():if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()f.close()except EnvironmentError:passreturn keywords", "docstring": "Extract version information from the given file.", "id": "f3617:m5"} {"signature": "@register_vcs_handler(\"\", \"\")def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:raise NotThisMethod(\"\")date = keywords.get(\"\")if date is not None:date = date.strip().replace(\"\", \"\", ).replace(\"\", \"\", )refnames = keywords[\"\"].strip()if refnames.startswith(\"\"):if verbose:print(\"\")raise NotThisMethod(\"\")refs = set([r.strip() for r in refnames.strip(\"\").split(\"\")])TAG = \"\"tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])if not tags:tags = set([r for r in refs if re.search(r'', r)])if verbose:print(\"\" % \"\".join(refs - tags))if verbose:print(\"\" % \"\".join(sorted(tags)))for ref in sorted(tags):if ref.startswith(tag_prefix):r = ref[len(tag_prefix):]if verbose:print(\"\" % r)return {\"\": r,\"\": keywords[\"\"].strip(),\"\": False, \"\": None,\"\": date}if verbose:print(\"\")return {\"\": \"\",\"\": keywords[\"\"].strip(),\"\": False, \"\": \"\", \"\": None}", "docstring": "Get version information from git keywords.", "id": "f3617:m6"} {"signature": "@register_vcs_handler(\"\", \"\")def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):", "body": "GITS = [\"\"]if sys.platform == \"\":GITS = [\"\", \"\"]out, rc = run_command(GITS, [\"\", \"\"], cwd=root,hide_stderr=True)if rc != :if verbose:print(\"\" % root)raise NotThisMethod(\"\")describe_out, rc = run_command(GITS, [\"\", \"\", \"\",\"\", \"\",\"\", \"\" % tag_prefix],cwd=root)if describe_out is None:raise NotThisMethod(\"\")describe_out = describe_out.strip()full_out, rc = run_command(GITS, [\"\", \"\"], cwd=root)if full_out is None:raise NotThisMethod(\"\")full_out = full_out.strip()pieces = {}pieces[\"\"] = full_outpieces[\"\"] = full_out[:] pieces[\"\"] = Nonegit_describe = describe_outdirty = git_describe.endswith(\"\")pieces[\"\"] = dirtyif dirty:git_describe = git_describe[:git_describe.rindex(\"\")]if \"\" in git_describe:mo = re.search(r'', git_describe)if not mo:pieces[\"\"] = (\"\"% describe_out)return piecesfull_tag = mo.group()if not full_tag.startswith(tag_prefix):if verbose:fmt = \"\"print(fmt % (full_tag, tag_prefix))pieces[\"\"] = (\"\"% (full_tag, tag_prefix))return piecespieces[\"\"] = full_tag[len(tag_prefix):]pieces[\"\"] = int(mo.group())pieces[\"\"] = mo.group()else:pieces[\"\"] = Nonecount_out, rc = run_command(GITS, [\"\", \"\", \"\"],cwd=root)pieces[\"\"] = int(count_out) date = run_command(GITS, [\"\", \"\", \"\", \"\"],cwd=root)[].strip()pieces[\"\"] = date.strip().replace(\"\", \"\", ).replace(\"\", \"\", )return pieces", "docstring": "Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.", "id": "f3617:m7"} {"signature": "def plus_or_dot(pieces):", "body": "if \"\" in pieces.get(\"\", \"\"):return \"\"return \"\"", "docstring": "Return a + if we don't already have one, else return a .", "id": "f3617:m8"} {"signature": "def render_pep440(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += plus_or_dot(pieces)rendered += \"\" % (pieces[\"\"], pieces[\"\"])if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % (pieces[\"\"],pieces[\"\"])if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f3617:m9"} {"signature": "def render_pep440_pre(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE", "id": "f3617:m10"} {"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += plus_or_dot(pieces)rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f3617:m11"} {"signature": "def render_pep440_old(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f3617:m12"} {"signature": "def render_git_describe(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f3617:m13"} {"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f3617:m14"} {"signature": "def render(pieces, style):", "body": "if pieces[\"\"]:return {\"\": \"\",\"\": pieces.get(\"\"),\"\": None,\"\": pieces[\"\"],\"\": None}if not style or style == \"\":style = \"\" if style == \"\":rendered = render_pep440(pieces)elif style == \"\":rendered = render_pep440_pre(pieces)elif style == \"\":rendered = render_pep440_post(pieces)elif style == \"\":rendered = render_pep440_old(pieces)elif style == \"\":rendered = render_git_describe(pieces)elif style == \"\":rendered = render_git_describe_long(pieces)else:raise ValueError(\"\" % style)return {\"\": rendered, \"\": pieces[\"\"],\"\": pieces[\"\"], \"\": None,\"\": pieces.get(\"\")}", "docstring": "Render the given version pieces into the requested style.", "id": "f3617:m15"} {"signature": "def get_versions():", "body": "cfg = get_config()verbose = cfg.verbosetry:return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,verbose)except NotThisMethod:passtry:root = os.path.realpath(__file__)for i in cfg.versionfile_source.split(''):root = os.path.dirname(root)except NameError:return {\"\": \"\", \"\": None,\"\": None,\"\": \"\",\"\": None}try:pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)return render(pieces, cfg.style)except NotThisMethod:passtry:if cfg.parentdir_prefix:return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)except NotThisMethod:passreturn {\"\": \"\", \"\": None,\"\": None,\"\": \"\", \"\": None}", "docstring": "Get version information or return default if unable to do so.", "id": "f3617:m16"} {"signature": "def parse_line(line, document=None):", "body": "result = re.match(line_pattern, line)if result:_, lineno, offset, severity, msg = result.groups()lineno = int(lineno or )offset = int(offset or )errno = if severity == '':errno = diag = {'': '','': {'': {'': lineno - , '': offset},'': {'': lineno - , '': offset + }},'': msg,'': errno}if document:word = document.word_at_position(diag[''][''])if word:diag[''][''][''] = (diag[''][''][''] + len(word))return diag", "docstring": "Return a language-server diagnostic from a line of the Mypy error report;\noptionally, use the whole document to provide more context on it.", "id": "f3619:m0"} {"signature": "def get_root():", "body": "root = os.path.realpath(os.path.abspath(os.getcwd()))setup_py = os.path.join(root, \"\")versioneer_py = os.path.join(root, \"\")if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[])))setup_py = os.path.join(root, \"\")versioneer_py = os.path.join(root, \"\")if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):err = (\"\"\"\"\"\"\"\"\"\")raise VersioneerBadRootError(err)try:me = os.path.realpath(os.path.abspath(__file__))me_dir = os.path.normcase(os.path.splitext(me)[])vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[])if me_dir != vsr_dir:print(\"\"% (os.path.dirname(me), versioneer_py))except NameError:passreturn root", "docstring": "Get the project root directory.\n\n We require that all commands are run from the project root, i.e. the\n directory that contains setup.py, setup.cfg, and versioneer.py .", "id": "f3620:m0"} {"signature": "def get_config_from_root(root):", "body": "setup_cfg = os.path.join(root, \"\")parser = configparser.SafeConfigParser()with open(setup_cfg, \"\") as f:parser.readfp(f)VCS = parser.get(\"\", \"\") def get(parser, name):if parser.has_option(\"\", name):return parser.get(\"\", name)return Nonecfg = VersioneerConfig()cfg.VCS = VCScfg.style = get(parser, \"\") or \"\"cfg.versionfile_source = get(parser, \"\")cfg.versionfile_build = get(parser, \"\")cfg.tag_prefix = get(parser, \"\")if cfg.tag_prefix in (\"\", ''):cfg.tag_prefix = \"\"cfg.parentdir_prefix = get(parser, \"\")cfg.verbose = get(parser, \"\")return cfg", "docstring": "Read the project setup.cfg file to determine Versioneer config.", "id": "f3620:m1"} {"signature": "def register_vcs_handler(vcs, method): ", "body": "def decorate(f):\"\"\"\"\"\"if vcs not in HANDLERS:HANDLERS[vcs] = {}HANDLERS[vcs][method] = freturn freturn decorate", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f3620:m2"} {"signature": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,env=None):", "body": "assert isinstance(commands, list)p = Nonefor c in commands:try:dispcmd = str([c] + args)p = subprocess.Popen([c] + args, cwd=cwd, env=env,stdout=subprocess.PIPE,stderr=(subprocess.PIPE if hide_stderrelse None))breakexcept EnvironmentError:e = sys.exc_info()[]if e.errno == errno.ENOENT:continueif verbose:print(\"\" % dispcmd)print(e)return None, Noneelse:if verbose:print(\"\" % (commands,))return None, Nonestdout = p.communicate()[].strip()if sys.version_info[] >= :stdout = stdout.decode()if p.returncode != :if verbose:print(\"\" % dispcmd)print(\"\" % stdout)return None, p.returncodereturn stdout, p.returncode", "docstring": "Call the given command(s).", "id": "f3620:m3"} {"signature": "@register_vcs_handler(\"\", \"\")def git_get_keywords(versionfile_abs):", "body": "keywords = {}try:f = open(versionfile_abs, \"\")for line in f.readlines():if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()f.close()except EnvironmentError:passreturn keywords", "docstring": "Extract version information from the given file.", "id": "f3620:m4"} {"signature": "@register_vcs_handler(\"\", \"\")def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:raise NotThisMethod(\"\")date = keywords.get(\"\")if date is not None:date = date.strip().replace(\"\", \"\", ).replace(\"\", \"\", )refnames = keywords[\"\"].strip()if refnames.startswith(\"\"):if verbose:print(\"\")raise NotThisMethod(\"\")refs = set([r.strip() for r in refnames.strip(\"\").split(\"\")])TAG = \"\"tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])if not tags:tags = set([r for r in refs if re.search(r'', r)])if verbose:print(\"\" % \"\".join(refs - tags))if verbose:print(\"\" % \"\".join(sorted(tags)))for ref in sorted(tags):if ref.startswith(tag_prefix):r = ref[len(tag_prefix):]if verbose:print(\"\" % r)return {\"\": r,\"\": keywords[\"\"].strip(),\"\": False, \"\": None,\"\": date}if verbose:print(\"\")return {\"\": \"\",\"\": keywords[\"\"].strip(),\"\": False, \"\": \"\", \"\": None}", "docstring": "Get version information from git keywords.", "id": "f3620:m5"} {"signature": "@register_vcs_handler(\"\", \"\")def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):", "body": "GITS = [\"\"]if sys.platform == \"\":GITS = [\"\", \"\"]out, rc = run_command(GITS, [\"\", \"\"], cwd=root,hide_stderr=True)if rc != :if verbose:print(\"\" % root)raise NotThisMethod(\"\")describe_out, rc = run_command(GITS, [\"\", \"\", \"\",\"\", \"\",\"\", \"\" % tag_prefix],cwd=root)if describe_out is None:raise NotThisMethod(\"\")describe_out = describe_out.strip()full_out, rc = run_command(GITS, [\"\", \"\"], cwd=root)if full_out is None:raise NotThisMethod(\"\")full_out = full_out.strip()pieces = {}pieces[\"\"] = full_outpieces[\"\"] = full_out[:] pieces[\"\"] = Nonegit_describe = describe_outdirty = git_describe.endswith(\"\")pieces[\"\"] = dirtyif dirty:git_describe = git_describe[:git_describe.rindex(\"\")]if \"\" in git_describe:mo = re.search(r'', git_describe)if not mo:pieces[\"\"] = (\"\"% describe_out)return piecesfull_tag = mo.group()if not full_tag.startswith(tag_prefix):if verbose:fmt = \"\"print(fmt % (full_tag, tag_prefix))pieces[\"\"] = (\"\"% (full_tag, tag_prefix))return piecespieces[\"\"] = full_tag[len(tag_prefix):]pieces[\"\"] = int(mo.group())pieces[\"\"] = mo.group()else:pieces[\"\"] = Nonecount_out, rc = run_command(GITS, [\"\", \"\", \"\"],cwd=root)pieces[\"\"] = int(count_out) date = run_command(GITS, [\"\", \"\", \"\", \"\"],cwd=root)[].strip()pieces[\"\"] = date.strip().replace(\"\", \"\", ).replace(\"\", \"\", )return pieces", "docstring": "Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.", "id": "f3620:m6"} {"signature": "def do_vcs_install(manifest_in, versionfile_source, ipy):", "body": "GITS = [\"\"]if sys.platform == \"\":GITS = [\"\", \"\"]files = [manifest_in, versionfile_source]if ipy:files.append(ipy)try:me = __file__if me.endswith(\"\") or me.endswith(\"\"):me = os.path.splitext(me)[] + \"\"versioneer_file = os.path.relpath(me)except NameError:versioneer_file = \"\"files.append(versioneer_file)present = Falsetry:f = open(\"\", \"\")for line in f.readlines():if line.strip().startswith(versionfile_source):if \"\" in line.strip().split()[:]:present = Truef.close()except EnvironmentError:passif not present:f = open(\"\", \"\")f.write(\"\" % versionfile_source)f.close()files.append(\"\")run_command(GITS, [\"\", \"\"] + files)", "docstring": "Git-specific installation logic for Versioneer.\n\n For Git, this means creating/changing .gitattributes to mark _version.py\n for export-subst keyword substitution.", "id": "f3620:m7"} {"signature": "def versions_from_parentdir(parentdir_prefix, root, verbose):", "body": "rootdirs = []for i in range():dirname = os.path.basename(root)if dirname.startswith(parentdir_prefix):return {\"\": dirname[len(parentdir_prefix):],\"\": None,\"\": False, \"\": None, \"\": None}else:rootdirs.append(root)root = os.path.dirname(root) if verbose:print(\"\" %(str(rootdirs), parentdir_prefix))raise NotThisMethod(\"\")", "docstring": "Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory", "id": "f3620:m8"} {"signature": "def versions_from_file(filename):", "body": "try:with open(filename) as f:contents = f.read()except EnvironmentError:raise NotThisMethod(\"\")mo = re.search(r\"\",contents, re.M | re.S)if not mo:mo = re.search(r\"\",contents, re.M | re.S)if not mo:raise NotThisMethod(\"\")return json.loads(mo.group())", "docstring": "Try to determine the version from _version.py if present.", "id": "f3620:m9"} {"signature": "def write_to_version_file(filename, versions):", "body": "os.unlink(filename)contents = json.dumps(versions, sort_keys=True,indent=, separators=(\"\", \"\"))with open(filename, \"\") as f:f.write(SHORT_VERSION_PY % contents)print(\"\" % (filename, versions[\"\"]))", "docstring": "Write the given version number to the given _version.py file.", "id": "f3620:m10"} {"signature": "def plus_or_dot(pieces):", "body": "if \"\" in pieces.get(\"\", \"\"):return \"\"return \"\"", "docstring": "Return a + if we don't already have one, else return a .", "id": "f3620:m11"} {"signature": "def render_pep440(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += plus_or_dot(pieces)rendered += \"\" % (pieces[\"\"], pieces[\"\"])if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % (pieces[\"\"],pieces[\"\"])if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f3620:m12"} {"signature": "def render_pep440_pre(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE", "id": "f3620:m13"} {"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += plus_or_dot(pieces)rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f3620:m14"} {"signature": "def render_pep440_old(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f3620:m15"} {"signature": "def render_git_describe(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f3620:m16"} {"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f3620:m17"} {"signature": "def render(pieces, style):", "body": "if pieces[\"\"]:return {\"\": \"\",\"\": pieces.get(\"\"),\"\": None,\"\": pieces[\"\"],\"\": None}if not style or style == \"\":style = \"\" if style == \"\":rendered = render_pep440(pieces)elif style == \"\":rendered = render_pep440_pre(pieces)elif style == \"\":rendered = render_pep440_post(pieces)elif style == \"\":rendered = render_pep440_old(pieces)elif style == \"\":rendered = render_git_describe(pieces)elif style == \"\":rendered = render_git_describe_long(pieces)else:raise ValueError(\"\" % style)return {\"\": rendered, \"\": pieces[\"\"],\"\": pieces[\"\"], \"\": None,\"\": pieces.get(\"\")}", "docstring": "Render the given version pieces into the requested style.", "id": "f3620:m18"} {"signature": "def get_versions(verbose=False):", "body": "if \"\" in sys.modules:del sys.modules[\"\"]root = get_root()cfg = get_config_from_root(root)assert cfg.VCS is not None, \"\"handlers = HANDLERS.get(cfg.VCS)assert handlers, \"\" % cfg.VCSverbose = verbose or cfg.verboseassert cfg.versionfile_source is not None,\"\"assert cfg.tag_prefix is not None, \"\"versionfile_abs = os.path.join(root, cfg.versionfile_source)get_keywords_f = handlers.get(\"\")from_keywords_f = handlers.get(\"\")if get_keywords_f and from_keywords_f:try:keywords = get_keywords_f(versionfile_abs)ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)if verbose:print(\"\" % ver)return verexcept NotThisMethod:passtry:ver = versions_from_file(versionfile_abs)if verbose:print(\"\" % (versionfile_abs, ver))return verexcept NotThisMethod:passfrom_vcs_f = handlers.get(\"\")if from_vcs_f:try:pieces = from_vcs_f(cfg.tag_prefix, root, verbose)ver = render(pieces, cfg.style)if verbose:print(\"\" % ver)return verexcept NotThisMethod:passtry:if cfg.parentdir_prefix:ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)if verbose:print(\"\" % ver)return verexcept NotThisMethod:passif verbose:print(\"\")return {\"\": \"\", \"\": None,\"\": None, \"\": \"\",\"\": None}", "docstring": "Get the project version from whatever source is available.\n\n Returns dict with two keys: 'version' and 'full'.", "id": "f3620:m19"} {"signature": "def get_version():", "body": "return get_versions()[\"\"]", "docstring": "Get the short version string for this project.", "id": "f3620:m20"} {"signature": "def get_cmdclass():", "body": "if \"\" in sys.modules:del sys.modules[\"\"]cmds = {}from distutils.core import Commandclass cmd_version(Command):description = \"\"user_options = []boolean_options = []def initialize_options(self):passdef finalize_options(self):passdef run(self):vers = get_versions(verbose=True)print(\"\" % vers[\"\"])print(\"\" % vers.get(\"\"))print(\"\" % vers.get(\"\"))print(\"\" % vers.get(\"\"))if vers[\"\"]:print(\"\" % vers[\"\"])cmds[\"\"] = cmd_versionif \"\" in sys.modules:from setuptools.command.build_py import build_py as _build_pyelse:from distutils.command.build_py import build_py as _build_pyclass cmd_build_py(_build_py):def run(self):root = get_root()cfg = get_config_from_root(root)versions = get_versions()_build_py.run(self)if cfg.versionfile_build:target_versionfile = os.path.join(self.build_lib,cfg.versionfile_build)print(\"\" % target_versionfile)write_to_version_file(target_versionfile, versions)cmds[\"\"] = cmd_build_pyif \"\" in sys.modules: from cx_Freeze.dist import build_exe as _build_execlass cmd_build_exe(_build_exe):def run(self):root = get_root()cfg = get_config_from_root(root)versions = get_versions()target_versionfile = cfg.versionfile_sourceprint(\"\" % target_versionfile)write_to_version_file(target_versionfile, versions)_build_exe.run(self)os.unlink(target_versionfile)with open(cfg.versionfile_source, \"\") as f:LONG = LONG_VERSION_PY[cfg.VCS]f.write(LONG %{\"\": \"\",\"\": cfg.style,\"\": cfg.tag_prefix,\"\": cfg.parentdir_prefix,\"\": cfg.versionfile_source,})cmds[\"\"] = cmd_build_exedel cmds[\"\"]if '' in sys.modules: try:from py2exe.distutils_buildexe import py2exe as _py2exe except ImportError:from py2exe.build_exe import py2exe as _py2exe class cmd_py2exe(_py2exe):def run(self):root = get_root()cfg = get_config_from_root(root)versions = get_versions()target_versionfile = cfg.versionfile_sourceprint(\"\" % target_versionfile)write_to_version_file(target_versionfile, versions)_py2exe.run(self)os.unlink(target_versionfile)with open(cfg.versionfile_source, \"\") as f:LONG = LONG_VERSION_PY[cfg.VCS]f.write(LONG %{\"\": \"\",\"\": cfg.style,\"\": cfg.tag_prefix,\"\": cfg.parentdir_prefix,\"\": cfg.versionfile_source,})cmds[\"\"] = cmd_py2exeif \"\" in sys.modules:from setuptools.command.sdist import sdist as _sdistelse:from distutils.command.sdist import sdist as _sdistclass cmd_sdist(_sdist):def run(self):versions = get_versions()self._versioneer_generated_versions = versionsself.distribution.metadata.version = versions[\"\"]return _sdist.run(self)def make_release_tree(self, base_dir, files):root = get_root()cfg = get_config_from_root(root)_sdist.make_release_tree(self, base_dir, files)target_versionfile = os.path.join(base_dir, cfg.versionfile_source)print(\"\" % target_versionfile)write_to_version_file(target_versionfile,self._versioneer_generated_versions)cmds[\"\"] = cmd_sdistreturn cmds", "docstring": "Get the custom setuptools/distutils subclasses used by Versioneer.", "id": "f3620:m21"} {"signature": "def do_setup():", "body": "root = get_root()try:cfg = get_config_from_root(root)except (EnvironmentError, configparser.NoSectionError,configparser.NoOptionError) as e:if isinstance(e, (EnvironmentError, configparser.NoSectionError)):print(\"\",file=sys.stderr)with open(os.path.join(root, \"\"), \"\") as f:f.write(SAMPLE_CONFIG)print(CONFIG_ERROR, file=sys.stderr)return print(\"\" % cfg.versionfile_source)with open(cfg.versionfile_source, \"\") as f:LONG = LONG_VERSION_PY[cfg.VCS]f.write(LONG % {\"\": \"\",\"\": cfg.style,\"\": cfg.tag_prefix,\"\": cfg.parentdir_prefix,\"\": cfg.versionfile_source,})ipy = os.path.join(os.path.dirname(cfg.versionfile_source),\"\")if os.path.exists(ipy):try:with open(ipy, \"\") as f:old = f.read()except EnvironmentError:old = \"\"if INIT_PY_SNIPPET not in old:print(\"\" % ipy)with open(ipy, \"\") as f:f.write(INIT_PY_SNIPPET)else:print(\"\" % ipy)else:print(\"\" % ipy)ipy = Nonemanifest_in = os.path.join(root, \"\")simple_includes = set()try:with open(manifest_in, \"\") as f:for line in f:if line.startswith(\"\"):for include in line.split()[:]:simple_includes.add(include)except EnvironmentError:passif \"\" not in simple_includes:print(\"\")with open(manifest_in, \"\") as f:f.write(\"\")else:print(\"\")if cfg.versionfile_source not in simple_includes:print(\"\" %cfg.versionfile_source)with open(manifest_in, \"\") as f:f.write(\"\" % cfg.versionfile_source)else:print(\"\")do_vcs_install(manifest_in, cfg.versionfile_source, ipy)return ", "docstring": "Main VCS-independent setup function for installing Versioneer.", "id": "f3620:m22"} {"signature": "def scan_setup_py():", "body": "found = set()setters = Falseerrors = with open(\"\", \"\") as f:for line in f.readlines():if \"\" in line:found.add(\"\")if \"\" in line:found.add(\"\")if \"\" in line:found.add(\"\")if \"\" in line:setters = Trueif \"\" in line:setters = Trueif len(found) != :print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")errors += if setters:print(\"\")print(\"\")print(\"\")print(\"\")errors += return errors", "docstring": "Validate the contents of setup.py against Versioneer's expectations.", "id": "f3620:m23"} {"signature": "def _default_json_default(obj):", "body": "if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):return obj.strftime(default_date_fmt)else:return str(obj)", "docstring": "Coerce everything to strings.\n All objects representing time get output according to default_date_fmt.", "id": "f3624:m0"} {"signature": "def init_logs(path=None,target=None,logger_name='',level=logging.DEBUG,maxBytes=**,backupCount=,application_name='',server_hostname=None,fields=None):", "body": "log_file = os.path.abspath(os.path.join(path, target))logger = logging.getLogger(logger_name)logger.setLevel(level)handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=maxBytes, backupCount=backupCount)handler.setLevel(level)handler.setFormatter(JsonFormatter(application_name=application_name,server_hostname=server_hostname,fields=fields))logger.addHandler(handler)", "docstring": "Initialize the zlogger.\n\n Sets up a rotating file handler to the specified path and file with\n the given size and backup count limits, sets the default\n application_name, server_hostname, and default/whitelist fields.\n\n :param path: path to write the log file\n :param target: name of the log file\n :param logger_name: name of the logger (defaults to root)\n :param level: log level for this logger (defaults to logging.DEBUG)\n :param maxBytes: size of the file before rotation (default 1MB)\n :param application_name: app name to add to each log entry\n :param server_hostname: hostname to add to each log entry\n :param fields: default/whitelist fields.\n :type path: string\n :type target: string\n :type logger_name: string\n :type level: int\n :type maxBytes: int\n :type backupCount: int\n :type application_name: string\n :type server_hostname: string\n :type fields: dict", "id": "f3624:m1"} {"signature": "@_log_fn()def emergency(**kwargs):", "body": "pass", "docstring": "log with pyzlog level EMERGENCY", "id": "f3624:m4"} {"signature": "@_log_fn()def alert(**kwargs):", "body": "pass", "docstring": "log with pyzlog level ALERT", "id": "f3624:m5"} {"signature": "@_log_fn()def notice(**kwargs):", "body": "pass", "docstring": "log with pyzlog level NOTICE", "id": "f3624:m6"} {"signature": "@_log_fn()def info(**kwargs):", "body": "pass", "docstring": "log with pyzlog level INFO", "id": "f3624:m7"} {"signature": "@_log_fn()def warning(**kwargs):", "body": "pass", "docstring": "log with pyzlog level WARNING", "id": "f3624:m8"} {"signature": "@_log_fn(exc_info=True)def error(**kwargs):", "body": "pass", "docstring": "log with pyzlog level ERROR\n\n exception info is added if it exists", "id": "f3624:m9"} {"signature": "@_log_fn()def critical(**kwargs):", "body": "pass", "docstring": "log with pyzlog level CRITICAL", "id": "f3624:m10"} {"signature": "@_log_fn()def debug(**kwargs):", "body": "pass", "docstring": "log with pyzlog level DEBUG", "id": "f3624:m11"} {"signature": "def remove_log(self, path=None, target=None):", "body": "path = path if path is not None else self.pathtarget = target if target is not None else self.targettry:os.remove(os.path.abspath(os.path.join(path, target)))except OSError:pass", "docstring": "remove the specified log file.\n\n Generally called in setUp and tearDown methods to ensure\n isolation. If path or target are not specified, will default to\n path and target properties on the object.\n\n :param path: path to find the log file\n :param target: name of the log file\n :type path: string\n :type target: string", "id": "f3624:c0:m0"} {"signature": "def get_log_messages(self, path=None, target=None):", "body": "path = path if path is not None else self.pathtarget = target if target is not None else self.targetwith open(os.path.abspath(os.path.join(path, target))) as f:return f.readlines()", "docstring": "fetch all log entries in the given file\n\n Intended to be used to assert that the expected entries were\n written out to the correct log file. If path or target are not\n specified, will default to path and target properties on the\n object.\n\n :param path: path to find the log file\n :param target: name of the log file\n :type path: string\n :type target: string", "id": "f3624:c0:m1"} {"signature": "def init_logs(self, path=None, target=None, level=None,server_hostname=None, extra=None):", "body": "path = path if path is not None else self.pathtarget = target if target is not None else self.targetlevel = level if level is not None else logging.DEBUGserver_hostname = (server_hostname if server_hostname is not Noneelse '')extra = extra if extra is not None else {'': None}init_logs(path=path, target=target, level=level,server_hostname=server_hostname, fields=extra)", "docstring": "Simple canned way to initialize pyzlog.\n\n Initialize pyslog for tests. If path or target are not\n specified, will default to path and target properties on the\n object. leve will default to logging.DEBUG, server_hostname\n defaults to localhost, and extra defaults to {'extra': None}\n\n :param path: path to find the log file\n :param target: name of the log file\n :param level: log level for this instance\n :param server_hostname: hostname to put in each entry\n :param extra: whitelist/defaults of extra fields to add to each entry\n :type path: string\n :type target: string\n :type level: int\n :type server_hostname: string\n :type extra: dict", "id": "f3624:c0:m2"} {"signature": "def format(self, record):", "body": "record_fields = record.__dict__.copy()self._set_exc_info(record_fields)event_name = ''if record_fields.get(''):event_name = record_fields.pop('')log_level = ''if record_fields.get(''):log_level = record_fields.pop('')[record_fields.pop(k) for k in record_fields.keys()if k not in self.fields]defaults = self.defaults.copy()fields = self.fields.copy()fields.update(record_fields)filtered_fields = {}for k, v in fields.iteritems():if v is not None:filtered_fields[k] = vdefaults.update({'': self._get_now(),'': event_name,'': log_level,'': filtered_fields})return json.dumps(defaults, default=self.json_default)", "docstring": "formats a logging.Record into a standard json log entry\n\n :param record: record to be formatted\n :type record: logging.Record\n :return: the formatted json string\n :rtype: string", "id": "f3624:c1:m2"} {"signature": "def render_to_response(self, context, **response_kwargs):", "body": "if self.request.is_ajax():template = self.page_templateelse:template = self.get_template_names()return self.response_class(request=self.request,template=template,context=context,**response_kwargs)", "docstring": "Returns a response with a template depending if the request is ajax \nor not and it renders with the given context.", "id": "f3630:c2:m3"} {"signature": "def get_version():", "body": "return ''.join(map(str, VERSION))", "docstring": "Return the Django Simple Blog version as a string.", "id": "f3632:m0"} {"signature": "def get_current_date(self):", "body": "return self.current_announce_date", "docstring": "Return the date the query has been using.\n\n:rtype: String, date, Format: YYYY-MM-DD", "id": "f3639:c0:m3"} {"signature": "def asn(self, ip, announce_date=None):", "body": "assignations, announce_date, _ = self.run(ip, announce_date)return next((assign for assign in assignations if assign is not None), None), announce_date", "docstring": "Give an IP, maybe a date, get the ASN.\nThis is the fastest command.\n\n:param ip: IP address to search for\n:param announce_date: Date of the announcement\n\n:rtype: String, ASN.", "id": "f3639:c0:m6"} {"signature": "def date_asn_block(self, ip, announce_date=None):", "body": "assignations, announce_date, keys = self.run(ip, announce_date)pos = next((i for i, j in enumerate(assignations) if j is not None), None)if pos is not None:block = keys[pos]if block != '':return announce_date, assignations[pos], blockreturn None", "docstring": "Get the ASN and the IP Block announcing the IP at a specific date.\n\n:param ip: IP address to search for\n:param announce_date: Date of the announcement\n\n:rtype: tuple\n\n .. code-block:: python\n\n (announce_date, asn, block)\n\n.. note::\n\n the returned announce_date might be different of the one\n given in parameter because some raw files are missing and we\n don't have the information. In this case, the nearest known\n date will be chosen,", "id": "f3639:c0:m7"} {"signature": "def history(self, ip, days_limit=None):", "body": "all_dates = sorted(self.routing_db.smembers(''), reverse=True)if days_limit is not None:all_dates = all_dates[:days_limit]return [self.date_asn_block(ip, date) for date in all_dates]", "docstring": "Get the full history of an IP. It takes time.\n\n:param ip: IP address to search for\n:param days_limit: Max amount of days to query. (None means no limit)\n\n:rtype: list. For each day in the database: day, asn, block", "id": "f3639:c0:m8"} {"signature": "def aggregate_history(self, ip, days_limit=None):", "body": "first_date = Nonelast_date = Noneprec_asn = Noneprec_block = Nonefor entry in self.history(ip, days_limit):if entry is None:continuedate, asn, block = entryif first_date is None:last_date = datefirst_date = dateprec_asn = asnprec_block = blockelif prec_asn == asn and prec_block == block:first_date = dateelse:yield first_date, last_date, prec_asn, prec_blocklast_date = datefirst_date = dateprec_asn = asnprec_block = blockif first_date is not None:yield first_date, last_date, prec_asn, prec_block", "docstring": "Get the full history of an IP, aggregate the result instead of\nreturning one line per day.\n\n:param ip: IP address to search for\n:param days_limit: Max amount of days to query. (None means no limit)\n\n:rtype: list. For each change: FirstDay, LastDay, ASN, Block", "id": "f3639:c0:m9"} {"signature": "def checkURL(url):", "body": "p = urlparse(url)h = HTTPConnection(p[])h.request('', p[])reply = h.getresponse()h.close()if reply.status == :return else:return ", "docstring": "Check if the URL exists by getting the header of the response.", "id": "f3641:m0"} {"signature": "def downloadURL(url):", "body": "urlretrieve(url, os.path.join(c.raw_data, path_temp_bviewfile))os.rename(os.path.join(c.raw_data, path_temp_bviewfile), c.path_bviewfile)", "docstring": "Inconditianilly download the URL in a temporary directory.\nWhen finished, the file is moved in the real directory.\nLike this an other process will not attempt to extract an inclomplete file.", "id": "f3641:m1"} {"signature": "def already_downloaded(date, hour):", "body": "if os.path.exists(c.path_bviewtimesamp):ts = open(c.path_bviewtimesamp, '').read().split()if ts[] == date:if int(ts[]) >= int(hour):return Trueopen(c.path_bviewtimesamp, '').write(date + '' + hour)return False", "docstring": "Verify that the date and the hour of the file we try to\ndownload is newer than the latest downloaded file.", "id": "f3641:m2"} {"signature": "def downloadURL(url, filename):", "body": "path_temp_bviewfile = os.path.join(c.raw_data, c.bview_dir, '', filename)path_bviewfile = os.path.join(c.raw_data, c.bview_dir, filename)try:f = urlopen(url)except:return Falseif f.getcode() != :publisher.warning(''.format(url, f.getcode()))return Falsetry:with open(path_temp_bviewfile, '') as outfile:outfile.write(f.read())os.rename(path_temp_bviewfile, path_bviewfile)except:os.remove(path_temp_bviewfile)return Falsereturn True", "docstring": "Inconditianilly download the URL in a temporary directory.\nWhen finished, the file is moved in the real directory.\nLike this an other process will not attempt to extract an inclomplete file.", "id": "f3643:m1"} {"signature": "def already_downloaded(filename):", "body": "cur_file = os.path.join(c.bview_dir, filename)old_file = os.path.join(c.bview_dir, '', filename)if not os.path.exists(cur_file) and not os.path.exists(old_file):return Falsereturn True", "docstring": "Verify that the file has not already been downloaded.", "id": "f3643:m2"} {"signature": "def to_download():", "body": "first_day = parse(interval_first)last_day = parse(interval_last)format_change = parse('')one_day = datetime.timedelta()cur_day = first_dayurl_list = []while cur_day < last_day:fname = filename.format(day=cur_day.strftime(\"\"))if cur_day > format_change:cur_day += one_dayurl = base_url.format(year_month=cur_day.strftime(\"\"),file_day=cur_day.strftime(\"\"))else:url = base_url_old.format(year_month=cur_day.strftime(\"\"),file_day=cur_day.strftime(\"\"))cur_day += one_dayurl_list.append((fname, url))return sorted(url_list, key=lambda tup: tup[], reverse=True)", "docstring": "Build interval of urls to download.\nWe always get the first file of the next day.\nEx: 2013-01-01 => 2013-01-02.0000", "id": "f3643:m3"} {"signature": "def fsplit(file_to_split):", "body": "dirname = file_to_split + ''if not os.path.exists(dirname):os.mkdir(dirname)part_file_size = os.path.getsize(file_to_split) / number_of_files + splitted_files = []with open(file_to_split, \"\") as f:number = actual = while :prec = actualf.seek(part_file_size, os.SEEK_CUR)s = f.readline()if len(s) == :s = f.readline()while len(s) != and s != separator:s = f.readline()actual = f.tell()new_file = os.path.join(dirname, str(number))with open(file_to_split, \"\") as temp:temp.seek(prec)copy = temp.read(actual - prec)open(new_file, '').write(copy)splitted_files.append(new_file)number += if len(s) == :breakreturn splitted_files", "docstring": "Split the file and return the list of filenames.", "id": "f3644:m0"} {"signature": "def service_start(service=None, param=None):", "body": "if service is not None:to_run = [\"\", service]if param is not None:to_run += paramreturn subprocess.Popen(to_run)return False", "docstring": "Launch a Process, return his pid", "id": "f3646:m0"} {"signature": "def update_running_pids(old_procs):", "body": "new_procs = []for proc in old_procs:if proc.poll() is None and check_pid(proc.pid):publisher.debug(str(proc.pid) + '')new_procs.append(proc)else:try:publisher.debug(str(proc.pid) + '')os.kill(proc.pid, signal.SIGKILL)except:passreturn new_procs", "docstring": "Update the list of the running process and return the list", "id": "f3646:m1"} {"signature": "def check_pid(pid):", "body": "try:os.kill(pid, )except OSError:return Falseelse:return True", "docstring": "Check For the existence of a unix pid.", "id": "f3646:m2"} {"signature": "def run_splitted_processing(max_simultaneous_processes, process_name,filenames):", "body": "pids = []while len(filenames) > :while len(filenames) > and len(pids) < max_simultaneous_processes:filename = filenames.pop()pids.append(service_start(service=process_name,param=['', filename, '',imported_day]))while len(pids) == max_simultaneous_processes:time.sleep(sleep_timer)pids = update_running_pids(pids)while len(pids) > :time.sleep(sleep_timer)pids = update_running_pids(pids)", "docstring": "Run processes which push the routing dump of the RIPE in a redis\ndatabase.\nThe dump has been splitted in multiple files and each process run\non one of this files.", "id": "f3646:m3"} {"signature": "@app.route('', methods=[''])def __entry_point():", "body": "ip = request.remote_addrua = request.headers.get('', '')method = request.json.get('')if method is None:__query_logging(ip, ua, method, level='')return json.dumps({'': ''})if method not in authorized_methods:__query_logging(ip, ua, method, level='')return json.dumps({'': ''})fct = globals().get(method)if fct is None:__query_logging(ip, ua, method, level='')return json.dumps({'': ''})if request.json.get('') is None:__query_logging(ip, ua, method, level='')return json.dumps({'': ''})try:result = fct(request.json)__query_logging(ip, ua, method, request.json.get(''),request.json.get(''), request.json.get(''))return resultexcept Exception:__query_logging(ip, ua, method, request.json.get(''), level='')return json.dumps({'': ''})", "docstring": "Function called when an query is made on /json. Expects a JSON\nobject with at least a 'method' entry.", "id": "f3650:m2"} {"signature": "def pack(fmt, *args):", "body": "return CompiledFormat(fmt).pack(*args)", "docstring": "Return a bytes object containing the values v1, v2, ... packed\n according to given format string `fmt`. If the total number of\n bits are not a multiple of 8, padding will be added at the end of\n the last byte.\n\n `fmt` is a string of bitorder-type-length groups, and optionally a\n byteorder identifier after the groups. Bitorder and byteorder may\n be omitted.\n\n Bitorder is either ``>`` or ``<``, where ``>`` means MSB first and\n ``<`` means LSB first. If bitorder is omitted, the previous\n values' bitorder is used for the current value. For example, in\n the format string ``'u1`` or ``<``, where ``>`` means most\n significant byte first and ``<`` means least significant byte\n first. If byteorder is omitted, most significant byte first is\n used.\n\n There are eight types; ``u``, ``s``, ``f``, ``b``, ``t``, ``r``,\n ``p`` and ``P``.\n\n - ``u`` -- unsigned integer\n - ``s`` -- signed integer\n - ``f`` -- floating point number of 16, 32, or 64 bits\n - ``b`` -- boolean\n - ``t`` -- text (ascii or utf-8)\n - ``r`` -- raw, bytes\n - ``p`` -- padding with zeros, ignore\n - ``P`` -- padding with ones, ignore\n\n Length is the number of bits to pack the value into.\n\n Example format string with default bit and byte ordering:\n ``'u1u3p7s16'``\n\n Same format string, but with least significant byte first:\n ``'u1u3p7s16<'``\n\n Same format string, but with LSB first (``<`` prefix) and least\n significant byte first (``<`` suffix): ``'", "docstring": "Unpack `data` (bytes or bytearray) according to given format string\n `fmt`. The result is a tuple even if it contains exactly one item.", "id": "f3652:m4"} {"signature": "def pack_into(fmt, buf, offset, *args, **kwargs):", "body": "return CompiledFormat(fmt).pack_into(buf,offset,*args,**kwargs)", "docstring": "Pack given values v1, v2, ... into given bytearray `buf`, starting\n at given bit offset `offset`. Pack according to given format\n string `fmt`. Give `fill_padding` as ``False`` to leave padding\n bits in `buf` unmodified.", "id": "f3652:m5"} {"signature": "def unpack_from(fmt, data, offset=):", "body": "return CompiledFormat(fmt).unpack_from(data, offset)", "docstring": "Unpack `data` (bytes or bytearray) according to given format string\n `fmt`, starting at given bit offset `offset`. The result is a\n tuple even if it contains exactly one item.", "id": "f3652:m6"} {"signature": "def pack_dict(fmt, names, data):", "body": "return CompiledFormatDict(fmt, names).pack(data)", "docstring": "Same as :func:`~bitstruct.pack()`, but data is read from a\n dictionary.\n\n The names list `names` contains the format group names, used as\n keys in the dictionary.\n\n >>> pack_dict('u4u4', ['foo', 'bar'], {'foo': 1, 'bar': 2})\n b'\\\\x12'", "id": "f3652:m7"} {"signature": "def unpack_dict(fmt, names, data):", "body": "return CompiledFormatDict(fmt, names).unpack(data)", "docstring": "Same as :func:`~bitstruct.unpack()`, but returns a dictionary.\n\n See :func:`~bitstruct.pack_dict()` for details on `names`.\n\n >>> unpack_dict('u4u4', ['foo', 'bar'], b'\\\\x12')\n {'foo': 1, 'bar': 2}", "id": "f3652:m8"} {"signature": "def pack_into_dict(fmt, names, buf, offset, data, **kwargs):", "body": "return CompiledFormatDict(fmt, names).pack_into(buf,offset,data,**kwargs)", "docstring": "Same as :func:`~bitstruct.pack_into()`, but data is read from a\n dictionary.\n\n See :func:`~bitstruct.pack_dict()` for details on `names`.", "id": "f3652:m9"} {"signature": "def unpack_from_dict(fmt, names, data, offset=):", "body": "return CompiledFormatDict(fmt, names).unpack_from(data, offset)", "docstring": "Same as :func:`~bitstruct.unpack_from_dict()`, but returns a\n dictionary.\n\n See :func:`~bitstruct.pack_dict()` for details on `names`.", "id": "f3652:m10"} {"signature": "def calcsize(fmt):", "body": "return CompiledFormat(fmt).calcsize()", "docstring": "Return the number of bits in given format string `fmt`.\n\n >>> calcsize('u1s3p4')\n 8", "id": "f3652:m11"} {"signature": "def byteswap(fmt, data, offset=):", "body": "data = BytesIO(data)data.seek(offset)data_swapped = BytesIO()for f in fmt:swapped = data.read(int(f))[::-]data_swapped.write(swapped)return data_swapped.getvalue()", "docstring": "Swap bytes in `data` according to `fmt`, starting at byte `offset`\n and return the result. `fmt` must be an iterable, iterating over\n number of bytes to swap. For example, the format string ``'24'``\n applied to the bytes ``b'\\\\x00\\\\x11\\\\x22\\\\x33\\\\x44\\\\x55'`` will\n produce the result ``b'\\\\x11\\\\x00\\\\x55\\\\x44\\\\x33\\\\x22'``.", "id": "f3652:m12"} {"signature": "def compile(fmt, names=None):", "body": "if names is None:return CompiledFormat(fmt)else:return CompiledFormatDict(fmt, names)", "docstring": "Compile given format string `fmt` and return a compiled format\n object that can be used to pack and/or unpack data multiple times.\n\n Returns a :class:`~bitstruct.CompiledFormat` object if `names` is\n ``None``, and otherwise a :class:`~bitstruct.CompiledFormatDict`\n object.\n\n See :func:`~bitstruct.pack_dict()` for details on `names`.", "id": "f3652:m13"} {"signature": "def calcsize(self):", "body": "return self._number_of_bits_to_unpack", "docstring": "Return the number of bits in the compiled format string.", "id": "f3652:c11:m5"} {"signature": "def pack(self, *args):", "body": "if len(args) < self._number_of_arguments:raise Error(\"\".format(self._number_of_arguments,len(args)))return self.pack_any(args)", "docstring": "See :func:`~bitstruct.pack()`.", "id": "f3652:c12:m1"} {"signature": "def unpack(self, data):", "body": "return self.unpack_from(data)", "docstring": "See :func:`~bitstruct.unpack()`.", "id": "f3652:c12:m2"} {"signature": "def pack_into(self, buf, offset, *args, **kwargs):", "body": "if len(args) < self._number_of_arguments:raise Error(\"\".format(self._number_of_arguments,len(args)))self.pack_into_any(buf, offset, args, **kwargs)", "docstring": "See :func:`~bitstruct.pack_into()`.", "id": "f3652:c12:m3"} {"signature": "def unpack_from(self, data, offset=):", "body": "return tuple([v[] for v in self.unpack_from_any(data, offset)])", "docstring": "See :func:`~bitstruct.unpack_from()`.", "id": "f3652:c12:m4"} {"signature": "def pack(self, data):", "body": "try:return self.pack_any(data)except KeyError as e:raise Error(''.format(str(e)))", "docstring": "See :func:`~bitstruct.pack_dict()`.", "id": "f3652:c13:m0"} {"signature": "def unpack(self, data):", "body": "return self.unpack_from(data)", "docstring": "See :func:`~bitstruct.unpack_dict()`.", "id": "f3652:c13:m1"} {"signature": "def pack_into(self, buf, offset, data, **kwargs):", "body": "try:self.pack_into_any(buf, offset, data, **kwargs)except KeyError as e:raise Error(''.format(str(e)))", "docstring": "See :func:`~bitstruct.pack_into_dict()`.", "id": "f3652:c13:m2"} {"signature": "def unpack_from(self, data, offset=):", "body": "return {info.name: v for info, v in self.unpack_from_any(data, offset)}", "docstring": "See :func:`~bitstruct.unpack_from_dict()`.", "id": "f3652:c13:m3"} {"signature": "def open(name=None, fileobj=None, closefd=True):", "body": "return Guesser().open(name=name, fileobj=fileobj, closefd=closefd)", "docstring": "Use all decompressor possible to make the stream", "id": "f3668:m0"} {"signature": "def make_seekable(fileobj):", "body": "if sys.version_info < (, ) and isinstance(fileobj, file):filename = fileobj.namefileobj = io.FileIO(fileobj.fileno(), closefd=False)fileobj.name = filenameassert isinstance(fileobj, io.IOBase),\"\"% type(fileobj)return fileobj if fileobj.seekable()else ArchiveTemp(fileobj)", "docstring": "If the file-object is not seekable, return ArchiveTemp of the fileobject,\notherwise return the file-object itself", "id": "f3671:m0"} {"signature": "def getCleanClient(self, name):", "body": "client = Client(name, port=self.server.port)try:client.drop_index()except:passreturn client", "docstring": "Gets a client client attached to an index name which is ready to be\ncreated", "id": "f3674:c0:m2"} {"signature": "def __init__(self, field, *byfields):", "body": "fieldstrs = []if len(byfields) == and isinstance(byfields[], type) andissubclass(byfields[], SortDirection):byfields = [byfields[](field)]for f in byfields:fieldstrs += [f.field, f.DIRSTRING]args = [field]if fieldstrs:args += [''] + fieldstrssuper(first_value, self).__init__(*args)self._field = field", "docstring": "Selects the first value of the given field within the group.\n\n### Parameter\n\n- **field**: Source field used for the value\n- **byfields**: How to sort the results. This can be either the\n *class* of `aggregation.Asc` or `aggregation.Desc` in which\n case the field `field` is also used as the sort input.\n\n `byfields` can also be one or more *instances* of `Asc` or `Desc`\n indicating the sort order for these fields", "id": "f3676:c11:m0"} {"signature": "def __init__(self, field, size):", "body": "args = [field, str(size)]super(random_sample, self).__init__(*args)self._field = field", "docstring": "### Parameter\n\n**field**: Field to sample from\n**size**: Return this many items (can be less)", "id": "f3676:c12:m0"} {"signature": "def __init__(self, index_name, host='', port=, conn=None):", "body": "self.index_name = index_nameself.redis = conn if conn is not None else Redis(connection_pool=ConnectionPool(host=host, port=port))", "docstring": "Create a new Client for the given index_name, and optional host and port\n\nIf conn is not None, we employ an already existing redis connection", "id": "f3679:c5:m0"} {"signature": "def batch_indexer(self, chunk_size=):", "body": "return Client.BatchIndexer(self, chunk_size=chunk_size)", "docstring": "Create a new batch indexer from the client with a given chunk size", "id": "f3679:c5:m1"} {"signature": "def create_index(self, fields, no_term_offsets=False,no_field_flags=False, stopwords = None):", "body": "args = [self.CREATE_CMD, self.index_name]if no_term_offsets:args.append(self.NOOFFSETS)if no_field_flags:args.append(self.NOFIELDS)if stopwords is not None and isinstance(stopwords, (list, tuple, set)):args += [self.STOPWORDS, len(stopwords)]if len(stopwords) > :args += list(stopwords)args.append('')args += list(itertools.chain(*(f.redis_args() for f in fields)))return self.redis.execute_command(*args)", "docstring": "Create the search index. The index must not already exist.\n\n### Parameters:\n\n- **fields**: a list of TextField or NumericField objects\n- **no_term_offsets**: If true, we will not save term offsets in the index\n- **no_field_flags**: If true, we will not save field flags that allow searching in specific fields\n- **stopwords**: If not None, we create the index with this custom stopword list. The list can be empty", "id": "f3679:c5:m2"} {"signature": "def drop_index(self):", "body": "return self.redis.execute_command(self.DROP_CMD, self.index_name)", "docstring": "Drop the index if it exists", "id": "f3679:c5:m3"} {"signature": "def _add_document(self, doc_id, conn=None, nosave=False, score=, payload=None,replace=False, partial=False, language=None, **fields):", "body": "if conn is None:conn = self.redisif partial:replace = Trueargs = [self.ADD_CMD, self.index_name, doc_id, score]if nosave:args.append('')if payload is not None:args.append('')args.append(payload)if replace:args.append('')if partial:args.append('')if language:args += ['', language]args.append('')args += list(itertools.chain(*fields.items()))return conn.execute_command(*args)", "docstring": "Internal add_document used for both batch and single doc indexing", "id": "f3679:c5:m4"} {"signature": "def add_document(self, doc_id, nosave=False, score=, payload=None,replace=False, partial=False, language=None, **fields):", "body": "return self._add_document(doc_id, conn=None, nosave=nosave, score=score, payload=payload, replace=replace,partial=partial, language=language, **fields)", "docstring": "Add a single document to the index.\n\n### Parameters\n\n- **doc_id**: the id of the saved document.\n- **nosave**: if set to true, we just index the document, and don't save a copy of it. This means that searches will just return ids.\n- **score**: the document ranking, between 0.0 and 1.0 \n- **payload**: optional inner-index payload we can save for fast access in scoring functions\n- **replace**: if True, and the document already is in the index, we perform an update and reindex the document\n- **partial**: if True, the fields specified will be added to the existing document.\n This has the added benefit that any fields specified with `no_index`\n will not be reindexed again. Implies `replace`\n- **language**: Specify the language used for document tokenization.\n- **fields** kwargs dictionary of the document fields to be saved and/or indexed. \n NOTE: Geo points shoule be encoded as strings of \"lon,lat\"", "id": "f3679:c5:m5"} {"signature": "def delete_document(self, doc_id, conn=None):", "body": "if conn is None:conn = self.redisreturn conn.execute_command(self.DEL_CMD, self.index_name, doc_id)", "docstring": "Delete a document from index\nReturns 1 if the document was deleted, 0 if not", "id": "f3679:c5:m6"} {"signature": "def load_document(self, id):", "body": "fields = self.redis.hgetall(id)if six.PY3:f2 = {to_string(k): to_string(v) for k, v in fields.items()}fields = f2try:del fields['']except KeyError:passreturn Document(id=id, **fields)", "docstring": "Load a single document by id", "id": "f3679:c5:m7"} {"signature": "def info(self):", "body": "res = self.redis.execute_command('', self.index_name)it = six.moves.map(to_string, res)return dict(six.moves.zip(it, it))", "docstring": "Get info an stats about the the current index, including the number of documents, memory consumption, etc", "id": "f3679:c5:m8"} {"signature": "def search(self, query):", "body": "args, query = self._mk_query_args(query)st = time.time()res = self.redis.execute_command(self.SEARCH_CMD, *args)return Result(res,not query._no_content,duration=(time.time() - st) * ,has_payload=query._with_payloads)", "docstring": "Search the index for a given query, and return a result of documents\n\n### Parameters\n\n- **query**: the search query. Either a text for simple queries with default parameters, or a Query object for complex queries.\n See RediSearch's documentation on query format\n- **snippet_sizes**: A dictionary of {field: snippet_size} used to trim and format the result. e.g.e {'body': 500}", "id": "f3679:c5:m10"} {"signature": "def aggregate(self, query):", "body": "if isinstance(query, AggregateRequest):has_schema = query._with_schemahas_cursor = bool(query._cursor)cmd = [self.AGGREGATE_CMD, self.index_name] + query.build_args()elif isinstance(query, Cursor):has_schema = Falsehas_cursor = Truecmd = [self.CURSOR_CMD, '', self.index_name] + query.build_args()else:raise ValueError('', query)raw = self.redis.execute_command(*cmd)if has_cursor:if isinstance(query, Cursor):query.cid = raw[]cursor = queryelse:cursor = Cursor(raw[])raw = raw[]else:cursor = Noneif query._with_schema:schema = raw[]rows = raw[:]else:schema = Nonerows = raw[:]res = AggregateResult(rows, cursor, schema)return res", "docstring": "Issue an aggregation query\n\n### Parameters\n\n**query**: This can be either an `AggeregateRequest`, or a `Cursor`\n\nAn `AggregateResult` object is returned. You can access the rows from its\n`rows` property, which will always yield the rows of the result", "id": "f3679:c5:m12"} {"signature": "def __init__(self, query_string):", "body": "self._query_string = query_stringself._offset = self._num = self._no_content = Falseself._no_stopwords = Falseself._fields = Noneself._verbatim = Falseself._with_payloads = Falseself._filters = list()self._ids = Noneself._slop = -self._in_order = Falseself._sortby = Noneself._return_fields = []self._summarize_fields = []self._highlight_fields = []self._language = None", "docstring": "Create a new query object. \nThe query string is set in the constructor, and other options have setter functions.", "id": "f3680:c0:m0"} {"signature": "def query_string(self):", "body": "return self._query_string", "docstring": "Return the query string of this query only", "id": "f3680:c0:m1"} {"signature": "def limit_ids(self, *ids):", "body": "self._ids = idsreturn self", "docstring": "Limit the results to a specific set of pre-known document ids of any length", "id": "f3680:c0:m2"} {"signature": "def return_fields(self, *fields):", "body": "self._return_fields = fieldsreturn self", "docstring": "Only return values from these fields", "id": "f3680:c0:m3"} {"signature": "def summarize(self, fields=None, context_len=None, num_frags=None, sep=None):", "body": "args = ['']fields = self._mk_field_list(fields)if fields:args += ['', str(len(fields))] + fieldsif context_len is not None:args += ['', str(context_len)]if num_frags is not None:args += ['', str(num_frags)]if sep is not None:args += ['', sep]self._summarize_fields = argsreturn self", "docstring": "Return an abridged format of the field, containing only the segments of\nthe field which contain the matching term(s).\n\nIf `fields` is specified, then only the mentioned fields are\nsummarized; otherwise all results are summarized.\n\nServer side defaults are used for each option (except `fields`) if not specified\n\n- **fields** List of fields to summarize. All fields are summarized if not specified\n- **context_len** Amount of context to include with each fragment\n- **num_frags** Number of fragments per document\n- **sep** Separator string to separate fragments", "id": "f3680:c0:m5"} {"signature": "def highlight(self, fields=None, tags=None):", "body": "args = ['']fields = self._mk_field_list(fields)if fields:args += ['', str(len(fields))] + fieldsif tags:args += [''] + list(tags)self._highlight_fields = argsreturn self", "docstring": "Apply specified markup to matched term(s) within the returned field(s)\n\n- **fields** If specified then only those mentioned fields are highlighted, otherwise all fields are highlighted\n- **tags** A list of two strings to surround the match.", "id": "f3680:c0:m6"} {"signature": "def language(self, language):", "body": "self._language = languagereturn self", "docstring": "Analyze the query as being in the specified language\n:param language: The language (e.g. `chinese` or `english`)", "id": "f3680:c0:m7"} {"signature": "def slop(self, slop):", "body": "self._slop = slopreturn self", "docstring": "Allow a masimum of N intervening non matched terms between phrase terms (0 means exact phrase)", "id": "f3680:c0:m8"} {"signature": "def in_order(self):", "body": "self._in_order = Truereturn self", "docstring": "Match only documents where the query terms appear in the same order in the document.\ni.e. for the query 'hello world', we do not match 'world hello'", "id": "f3680:c0:m9"} {"signature": "def get_args(self):", "body": "args = [self._query_string]if self._no_content:args.append('')if self._fields:args.append('')args.append(len(self._fields))args += self._fieldsif self._verbatim:args.append('')if self._no_stopwords:args.append('')if self._filters:for flt in self._filters:assert isinstance(flt, Filter)args += flt.argsif self._with_payloads:args.append('')if self._ids:args.append('')args.append(len(self._ids))args += self._idsif self._slop >= :args += ['', self._slop]if self._in_order:args.append('')if self._return_fields:args.append('')args.append(len(self._return_fields))args += self._return_fieldsif self._sortby:assert isinstance(self._sortby, SortbyField)args.append('')args += self._sortby.argsif self._language:args += ['', self._language]args += self._summarize_fields + self._highlight_fieldsargs += [\"\", self._offset, self._num]return args", "docstring": "Format the redis arguments for this query and return them", "id": "f3680:c0:m10"} {"signature": "def paging(self, offset, num):", "body": "self._offset = offsetself._num = numreturn self", "docstring": "Set the paging for the query (defaults to 0..10).\n\n- **offset**: Paging offset for the results. Defaults to 0\n- **num**: How many results do we want", "id": "f3680:c0:m11"} {"signature": "def verbatim(self):", "body": "self._verbatim = Truereturn self", "docstring": "Set the query to be verbatim, i.e. use no query expansion or stemming", "id": "f3680:c0:m12"} {"signature": "def no_content(self):", "body": "self._no_content = Truereturn self", "docstring": "Set the query to only return ids and not the document content", "id": "f3680:c0:m13"} {"signature": "def no_stopwords(self):", "body": "self._no_stopwords = Truereturn self", "docstring": "Prevent the query from being filtered for stopwords. \nOnly useful in very big queries that you are certain contain no stopwords.", "id": "f3680:c0:m14"} {"signature": "def with_payloads(self):", "body": "self._with_payloads = Truereturn self", "docstring": "Ask the engine to return document payloads", "id": "f3680:c0:m15"} {"signature": "def limit_fields(self, *fields):", "body": "self._fields = fieldsreturn self", "docstring": "Limit the search to specific TEXT fields only\n\n- **fields**: A list of strings, case sensitive field names from the defined schema", "id": "f3680:c0:m16"} {"signature": "def add_filter(self, flt):", "body": "self._filters.append(flt)return self", "docstring": "Add a numeric or geo filter to the query. \n**Currently only one of each filter is supported by the engine**\n\n- **flt**: A NumericFilter or GeoFilter object, used on a corresponding field", "id": "f3680:c0:m17"} {"signature": "def sort_by(self, field, asc=True):", "body": "self._sortby = SortbyField(field, asc)return self", "docstring": "Add a sortby field to the query\n\n- **field** - the name of the field to sort by\n- **asc** - when `True`, sorting will be done in asceding order", "id": "f3680:c0:m18"} {"signature": "def tags(*t):", "body": "if not t:raise ValueError('')return TagValue(*t)", "docstring": "Indicate that the values should be matched to a tag field\n\n### Parameters\n\n- **t**: Tags to search for", "id": "f3682:m0"} {"signature": "def between(a, b, inclusive_min=True, inclusive_max=True):", "body": "return RangeValue(a, b,inclusive_min=inclusive_min, inclusive_max=inclusive_max)", "docstring": "Indicate that value is a numeric range", "id": "f3682:m1"} {"signature": "def equal(n):", "body": "return between(n, n)", "docstring": "Match a numeric value", "id": "f3682:m2"} {"signature": "def lt(n):", "body": "return between(None, n, inclusive_max=False)", "docstring": "Match any value less than n", "id": "f3682:m3"} {"signature": "def le(n):", "body": "return between(None, n, inclusive_max=True)", "docstring": "Match any value less or equal to n", "id": "f3682:m4"} {"signature": "def gt(n):", "body": "return between(n, None, inclusive_min=False)", "docstring": "Match any value greater than n", "id": "f3682:m5"} {"signature": "def ge(n):", "body": "return between(n, None, inclusive_min=True)", "docstring": "Match any value greater or equal to n", "id": "f3682:m6"} {"signature": "def geo(lat, lon, radius, unit=''):", "body": "return GeoValue(lat, lon, radius, unit)", "docstring": "Indicate that value is a geo region", "id": "f3682:m7"} {"signature": "@propertydef combinable(self):", "body": "return False", "docstring": "Whether this type of value may be combined with other values for the same\nfield. This makes the filter potentially more efficient", "id": "f3682:c0:m0"} {"signature": "@staticmethoddef make_value(v):", "body": "if isinstance(v, Value):return vreturn ScalarValue(v)", "docstring": "Convert an object to a value, if it is not a value already", "id": "f3682:c0:m1"} {"signature": "def __init__(self, *children, **kwparams):", "body": "self.params = []kvparams = {}for k, v in kwparams.items():curvals = kvparams.setdefault(k, [])if isinstance(v, (string_types, integer_types, float)):curvals.append(Value.make_value(v))elif isinstance(v, Value):curvals.append(v)else:curvals.extend(Value.make_value(subv) for subv in v)self.params += [Node.to_node(p) for p in children]for k, v in kvparams.items():self.params.extend(self.join_fields(k, v))", "docstring": "Create a node\n\n### Parameters\n\n- **children**: One or more sub-conditions. These can be additional\n `intersect`, `disjunct`, `union`, `optional`, or any other `Node`\n type.\n\n The semantics of multiple conditions are dependent on the type of\n query. For an `intersection` node, this amounts to a logical AND,\n for a `union` node, this amounts to a logical `OR`.\n\n- **kwparams**: key-value parameters. Each key is the name of a field,\n and the value should be a field value. This can be one of the\n following:\n\n - Simple string (for text field matches)\n - value returned by one of the helper functions\n - list of either a string or a value\n\n\n### Examples\n\nField `num` should be between 1 and 10\n```\nintersect(num=between(1, 10)\n```\n\nName can either be `bob` or `john`\n\n```\nunion(name=('bob', 'john'))\n```\n\nDon't select countries in Israel, Japan, or US\n\n```\ndisjunct_union(country=('il', 'jp', 'us'))\n```", "id": "f3682:c5:m0"} {"signature": "def __init__(self, res, hascontent, duration=, has_payload = False):", "body": "self.total = res[]self.duration = durationself.docs = []step = if hascontent:step = if has_payload else else:has_payload = Falsefor i in xrange(, len(res), step):id = to_string(res[i])payload = to_string(res[i+]) if has_payload else Nonefields_offset = if has_payload else fields = {} if hascontent:fields = dict(dict(izip(map(to_string, res[i + fields_offset][::]),map(to_string, res[i + fields_offset][::])))) if hascontent else {}try:del fields['']except KeyError:passdoc = Document(id, payload=payload, **fields)self.docs.append(doc)", "docstring": "- **snippets**: An optional dictionary of the form {field: snippet_size} for snippet formatting", "id": "f3683:c0:m0"} {"signature": "def __init__(self, key, host='', port=, conn = None):", "body": "self.key = keyself.redis = conn if conn is not None else Redis(connection_pool = ConnectionPool(host=host, port=port))", "docstring": "Create a new AutoCompleter client for the given key, and optional host and port\n\nIf conn is not None, we employ an already existing redis connection", "id": "f3684:c2:m0"} {"signature": "def add_suggestions(self, *suggestions, **kwargs):", "body": "pipe = self.redis.pipeline()for sug in suggestions:args = [AutoCompleter.SUGADD_COMMAND, self.key, sug.string, sug.score]if kwargs.get(''):args.append(AutoCompleter.INCR)if sug.payload:args.append('')args.append(sug.payload)pipe.execute_command(*args)return pipe.execute()[-]", "docstring": "Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string.\n\nIf kwargs['increment'] is true and the terms are already in the server's dictionary, we increment their scores", "id": "f3684:c2:m1"} {"signature": "def len(self):", "body": "return self.redis.execute_command(AutoCompleter.SUGLEN_COMMAND, self.key)", "docstring": "Return the number of entries in the AutoCompleter index", "id": "f3684:c2:m2"} {"signature": "def delete(self, string):", "body": "return self.redis.execute_command(AutoCompleter.SUGDEL_COMMAND, self.key, string)", "docstring": "Delete a string from the AutoCompleter index.\nReturns 1 if the string was found and deleted, 0 otherwise", "id": "f3684:c2:m3"} {"signature": "def get_suggestions(self, prefix, fuzzy = False, num = , with_scores = False, with_payloads=False):", "body": "args = [AutoCompleter.SUGGET_COMMAND, self.key, prefix, '', num]if fuzzy:args.append(AutoCompleter.FUZZY)if with_scores:args.append(AutoCompleter.WITHSCORES)if with_payloads:args.append(AutoCompleter.WITHPAYLOADS)ret = self.redis.execute_command(*args)results = []if not ret:return resultsparser = SuggestionParser(with_scores, with_payloads, ret)return [s for s in parser]", "docstring": "Get a list of suggestions from the AutoCompleter, for a given prefix\n\n### Parameters:\n- **prefix**: the prefix we are searching. **Must be valid ascii or utf-8**\n- **fuzzy**: If set to true, the prefix search is done in fuzzy mode. \n **NOTE**: Running fuzzy searches on short (<3 letters) prefixes can be very slow, and even scan the entire index.\n- **with_scores**: if set to true, we also return the (refactored) score of each suggestion. \n This is normally not needed, and is NOT the original score inserted into the index\n- **with_payloads**: Return suggestion payloads\n- **num**: The maximum number of results we return. Note that we might return less. The algorithm trims irrelevant suggestions.\n\nReturns a list of Suggestion objects. If with_scores was False, the score of all suggestions is 1.", "id": "f3684:c2:m4"} {"signature": "def alias(self, alias):", "body": "if alias is FIELDNAME:if not self._field:raise ValueError(\"\")alias = self._field[:]self._alias = aliasreturn self", "docstring": "Set the alias for this reducer.\n\n### Parameters\n\n- **alias**: The value of the alias for this reducer. If this is the\n special value `aggregation.FIELDNAME` then this reducer will be\n aliased using the same name as the field upon which it operates.\n Note that using `FIELDNAME` is only possible on reducers which\n operate on a single field value.\n\nThis method returns the `Reducer` object making it suitable for\nchaining.", "id": "f3685:c1:m1"} {"signature": "def __init__(self, query=''):", "body": "self._query = queryself._groups = []self._projections = []self._loadfields = []self._limit = Limit()self._sortby = []self._max = self._with_schema = Falseself._verbatim = Falseself._cursor = []", "docstring": "Create an aggregation request. This request may then be passed to\n`client.aggregate()`.\n\nIn order for the request to be usable, it must contain at least one\ngroup.\n\n- **query** Query string for filtering records.\n\nAll member methods (except `build_args()`)\nreturn the object itself, making them useful for chaining.", "id": "f3685:c6:m0"} {"signature": "def load(self, *fields):", "body": "self._loadfields.extend(fields)return self", "docstring": "Indicate the fields to be returned in the response. These fields are\nreturned in addition to any others implicitly specified.\n\n### Parameters\n\n- **fields**: One or more fields in the format of `@field`", "id": "f3685:c6:m1"} {"signature": "def group_by(self, fields, *reducers):", "body": "group = Group(fields, reducers)self._groups.append(group)return self", "docstring": "Specify by which fields to group the aggregation.\n\n### Parameters\n\n- **fields**: Fields to group by. This can either be a single string,\n or a list of strings. both cases, the field should be specified as\n `@field`.\n- **reducers**: One or more reducers. Reducers may be found in the\n `aggregation` module.", "id": "f3685:c6:m2"} {"signature": "def apply(self, **kwexpr):", "body": "for alias, expr in kwexpr.items():self._projections.append([alias, expr])return self", "docstring": "Specify one or more projection expressions to add to each result\n\n### Parameters\n\n- **kwexpr**: One or more key-value pairs for a projection. The key is\n the alias for the projection, and the value is the projection\n expression itself, for example `apply(square_root=\"sqrt(@foo)\")`", "id": "f3685:c6:m3"} {"signature": "def limit(self, offset, num):", "body": "limit = Limit(offset, num)if self._groups:self._groups[-].limit = limitelse:self._limit = limitreturn self", "docstring": "Sets the limit for the most recent group or query.\n\nIf no group has been defined yet (via `group_by()`) then this sets\nthe limit for the initial pool of results from the query. Otherwise,\nthis limits the number of items operated on from the previous group.\n\nSetting a limit on the initial search results may be useful when\nattempting to execute an aggregation on a sample of a large data set.\n\n### Parameters\n\n- **offset**: Result offset from which to begin paging\n- **num**: Number of results to return\n\n\nExample of sorting the initial results:\n\n```\nAggregateRequest('@sale_amount:[10000, inf]')\\\n .limit(0, 10)\\\n .group_by('@state', r.count())\n```\n\nWill only group by the states found in the first 10 results of the\nquery `@sale_amount:[10000, inf]`. On the other hand,\n\n```\nAggregateRequest('@sale_amount:[10000, inf]')\\\n .limit(0, 1000)\\\n .group_by('@state', r.count()\\\n .limit(0, 10)\n```\n\nWill group all the results matching the query, but only return the\nfirst 10 groups.\n\nIf you only wish to return a *top-N* style query, consider using\n`sort_by()` instead.", "id": "f3685:c6:m4"} {"signature": "def sort_by(self, *fields, **kwargs):", "body": "self._max = kwargs.get('', )if isinstance(fields, (string_types, SortDirection)):fields = [fields]for f in fields:if isinstance(f, SortDirection):self._sortby += [f.field, f.DIRSTRING]else:self._sortby.append(f)return self", "docstring": "Indicate how the results should be sorted. This can also be used for\n*top-N* style queries\n\n### Parameters\n\n- **fields**: The fields by which to sort. This can be either a single\n field or a list of fields. If you wish to specify order, you can\n use the `Asc` or `Desc` wrapper classes.\n- **max**: Maximum number of results to return. This can be used instead\n of `LIMIT` and is also faster.\n\n\nExample of sorting by `foo` ascending and `bar` descending:\n\n```\nsort_by(Asc('@foo'), Desc('@bar'))\n```\n\nReturn the top 10 customers:\n\n```\nAggregateRequest()\\\n .group_by('@customer', r.sum('@paid').alias(FIELDNAME))\\\n .sort_by(Desc('@paid'), max=10)\n```", "id": "f3685:c6:m5"} {"signature": "def with_schema(self):", "body": "self._with_schema = Truereturn self", "docstring": "If set, the `schema` property will contain a list of `[field, type]`\nentries in the result object.", "id": "f3685:c6:m6"} {"signature": "def reload(self, *fields, **kwargs):", "body": "max_depth = if fields and isinstance(fields[], int):max_depth = fields[]fields = fields[:]elif \"\" in kwargs:max_depth = kwargs[\"\"]if not self.pk:raise self.DoesNotExist(\"\")obj = self._qs.read_preference(ReadPreference.PRIMARY).filter(**self._object_key).only(*fields).limit().select_related(max_depth=max_depth)if obj:obj = obj[]else:raise self.DoesNotExist(\"\")for field in self._fields_ordered:if not fields or field in fields:try:setattr(self, field, self._reload(field, obj[field]))except KeyError:delattr(self, field)if not fields:self._changed_fields = obj._changed_fieldselse:for field in fields:field = self._db_field_map.get(field, field)if field in self._changed_fields:self._changed_fields.remove(field)self._created = Falsereturn self", "docstring": "Reloads all attributes from the database.\n :param fields: (optional) args list of fields to reload\n :param max_depth: (optional) depth of dereferencing to follow\n .. versionadded:: 0.1.2\n .. versionchanged:: 0.6 Now chainable\n .. versionchanged:: 0.9 Can provide specific fields to reload", "id": "f3693:c2:m2"} {"signature": "def create_user(self, **kwargs):", "body": "defaults = {'': self.get_random_string(),'': '','': self.get_random_string(),}defaults.update(kwargs)return User.objects.create_user(**defaults)", "docstring": "Factory method for creating Users.", "id": "f3697:c0:m3"} {"signature": "def require_template_debug(f):", "body": "def _(*args, **kwargs):TEMPLATE_DEBUG = getattr(settings, '', False)return f(*args, **kwargs) if TEMPLATE_DEBUG else ''return _", "docstring": "Decorated function is a no-op if TEMPLATE_DEBUG is False", "id": "f3701:m0"} {"signature": "def _display_details(var_data):", "body": "meta_keys = (key for key in list(var_data.keys())if key.startswith(''))for key in meta_keys:display_key = key[:].capitalize()pprint(''.format(display_key, var_data.pop(key)))pprint(var_data)", "docstring": "Given a dictionary of variable attribute data from get_details display the\ndata in the terminal.", "id": "f3701:m1"} {"signature": "@require_template_debug@register.simple_tag(takes_context=True)def variables(context):", "body": "availables = get_variables(context)pprint(availables)return availables", "docstring": "Given a context, return a flat list of variables available in the context.", "id": "f3701:m2"} {"signature": "@require_template_debug@register.simple_tagdef attributes(var):", "body": "attrs = get_attributes(var)pprint(attrs)return attrs", "docstring": "Given a variable in the template's context, print and return the list of\nattributes thare accessible inside of the template. For example, private\nattributes or callables that require arguments are excluded.", "id": "f3701:m3"} {"signature": "@require_template_debug@register.simple_tagdef details(var):", "body": "var_details = get_details(var)_display_details(var_details)return var_details", "docstring": "Prints a dictionary showing the attributes of a variable, and if possible,\ntheir corresponding values.", "id": "f3701:m4"} {"signature": "@require_template_debug@register.simple_tag(takes_context=True)def set_trace(context):", "body": "try:import ipdb as pdbexcept ImportError:import pdbprint(\"\")print(\"\")render = lambda s: template.Template(s).render(context)availables = get_variables(context)pprint(availables)print('')print('')print('')for var in availables:locals()[var] = context[var]pdb.set_trace()return ''", "docstring": "Start a pdb set_trace inside of the template with the context available as\n'context'. Uses ipdb if available.", "id": "f3701:m5"} {"signature": "@require_template_debug@register.simple_tag(takes_context=True)def pydevd(context):", "body": "global pdevd_not_availableif pdevd_not_available:return ''try:import pydevdexcept ImportError:pdevd_not_available = Truereturn ''render = lambda s: template.Template(s).render(context)availables = get_variables(context)for var in availables:locals()[var] = context[var]try:pydevd.settrace()except socket.error:pdevd_not_available = Truereturn ''", "docstring": "Start a pydev settrace", "id": "f3701:m6"} {"signature": "def _flatten(iterable):", "body": "for i in iterable:if isinstance(i, Iterable) and not isinstance(i, string_types):for sub_i in _flatten(i):yield sub_ielse:yield i", "docstring": "Given an iterable with nested iterables, generate a flat iterable", "id": "f3703:m0"} {"signature": "def get_variables(context):", "body": "return sorted(set(_flatten(context.dicts)))", "docstring": "Given a context, return a sorted list of variable names in the context", "id": "f3703:m1"} {"signature": "def get_details(var):", "body": "var_data = {}module = getattr(var, '', '')kls = getattr(getattr(var, '', ''), '', '')if module:var_data[''] = moduleif kls:var_data[''] = klsfor attr in get_attributes(var):value = _get_detail_value(var, attr)if value is not None:var_data[attr] = valuereturn var_data", "docstring": "Given a variable inside the context, obtain the attributes/callables,\ntheir values where possible, and the module name and class name if possible", "id": "f3703:m2"} {"signature": "def _get_detail_value(var, attr):", "body": "value = getattr(var, attr)kls = getattr(getattr(value, '', ''), '', '')if kls in ('', '', ''):return klsif callable(value):return ''return value", "docstring": "Given a variable and one of its attributes that are available inside of\na template, return its 'method' if it is a callable, its class name if it\nis a model manager, otherwise return its value", "id": "f3703:m3"} {"signature": "def get_attributes(var):", "body": "is_valid = partial(is_valid_in_template, var)return list(filter(is_valid, dir(var)))", "docstring": "Given a varaible, return the list of attributes that are available inside\nof a template", "id": "f3703:m4"} {"signature": "def is_valid_in_template(var, attr):", "body": "if attr.startswith(''):return Falsetry:value = getattr(var, attr)except:return Falseif isroutine(value):if getattr(value, '', False):return Falseelse:try:argspec = getargspec(value)num_args = len(argspec.args) if argspec.args else num_defaults = len(argspec.defaults) if argspec.defaults else if num_args - num_defaults > :return Falseexcept TypeError:passreturn True", "docstring": "Given a variable and one of its attributes, determine if the attribute is\naccessible inside of a Django template and return True or False accordingly", "id": "f3703:m5"} {"signature": "def read_file(filename):", "body": "path = os.path.abspath(os.path.dirname(__file__))filepath = os.path.join(path, filename)try:return open(filepath).read()except IOError:return ''", "docstring": "Read a file into a string", "id": "f3709:m0"} {"signature": "def volcano(differential_dfs, title='', scripts_mode=\"\", data_mode=\"\",organism=\"\", q_value_column_name=\"\", log2FC_column_name=\"\",output_dir=\"\", filename=\"\", version=this_version):", "body": "output_dir = Path(output_dir)output_dir.mkdir(exist_ok=True, parents=True)if isinstance(differential_dfs, pd.DataFrame):differential_dfs = {'': differential_dfs}for name, df in differential_dfs.items():df = df[[q_value_column_name, log2FC_column_name]]df.columns = ['', '']df = df.round()_verify_differential_df(df)del differential_dfs[name]differential_dfs[_sanitize(name)] = dfnames_and_differentials = f\"\"data_block = _data_block(data_mode, [('', names_and_differentials)], output_dir, include_gene_sets=False, organism=organism)scripts = third_party_scripts + [CDN_url(version)+\"\", CDN_url(version)+\"\", CDN_url(version)+\"\"]scripts_block = _scripts_block(scripts, scripts_mode, output_dir)html = templateEnv.get_template('').render(title=title, scripts_block=scripts_block+''+data_block, organism=\"\")(output_dir / filename).write_text(html)return (output_dir / filename).resolve()", "docstring": "Arguments:\n differential_dfs (dict or pandas.DataFrame): python dict of names to pandas dataframes, or a single dataframe, indexed by gene symbols which must have columns named log2FC and qval.\n title (str): The title of the plot (to be embedded in the html).\n scripts_mode (str): Choose from [`\"CDN\"`, `\"directory\"`, `\"inline\"`]:\n\n - `\"CDN\"` compiles a single HTML page with links to scripts hosted on a CDN,\n\n - `\"directory\"` compiles a directory with all scripts locally cached,\n\n - `\"inline\"` compiles a single HTML file with all scripts/styles inlined.\n\n data_mode (str): Choose from [\"directory\", \"inline\"]:\n\n - \"directory\" compiles a directory with all data locally cached,\n\n - \"inline\" compiles a single HTML file with all data inlined.\n\n organism (str): `\"human\"` or `\"mouse\"`\n q_value_column_name (str):\n log2FC_column_name (str):\n output_dir (str): the directory in which to output the file\n filename (str): the filename of the output file\n version (str): the version of the javascripts to use.\n Leave the default to pin the version, or choose \"latest\" to get updates,\n or choose part of the version string to get minor updates.\nReturns:\n Path: The filepath which the html was outputted to.", "id": "f3712:m9"} {"signature": "def bar(differential_dfs, title='', scripts_mode=\"\", data_mode=\"\",organism=\"\", q_value_column_name=\"\", log2FC_column_name=\"\",output_dir=\"\", filename=\"\", version=this_version):", "body": "output_dir = Path(output_dir)output_dir.mkdir(exist_ok=True, parents=True)if isinstance(differential_dfs, pd.DataFrame):differential_dfs = {'': differential_dfs}for name, df in differential_dfs.items():df = df[[q_value_column_name, log2FC_column_name]]df.columns = ['', '']df = df.round()_verify_differential_df(df)del differential_dfs[name]differential_dfs[_sanitize(name)] = dfnames_and_differentials = f\"\"data_block = _data_block(data_mode, [('', names_and_differentials)], output_dir, organism=organism)scripts = third_party_scripts + [ CDN_url(version)+\"\", CDN_url(version)+\"\"]scripts_block = _scripts_block(scripts, scripts_mode, output_dir)html = templateEnv.get_template('').render(title=title, scripts_block=scripts_block+''+data_block)(output_dir / filename).write_text(html)return (output_dir / filename).resolve()", "docstring": "Arguments:\n differential_dfs (dict or pandas.DataFrame): python dict of names to pandas dataframes, or a single dataframe, indexed by gene symbols which must have columns named log2FC and qval.\n title (str): The title of the plot (to be embedded in the html).\n scripts_mode (str): Choose from [`\"CDN\"`, `\"directory\"`, `\"inline\"`]:\n\n - `\"CDN\"` compiles a single HTML page with links to scripts hosted on a CDN,\n\n - `\"directory\"` compiles a directory with all scripts locally cached,\n\n - `\"inline\"` compiles a single HTML file with all scripts/styles inlined.\n\n data_mode (str): Choose from [\"directory\", \"inline\"]:\n\n - \"directory\" compiles a directory with all data locally cached,\n\n - \"inline\" compiles a single HTML file with all data inlined.\n\n organism (str): `\"human\"` or `\"mouse\"`\n q_value_column_name (str):\n log2FC_column_name (str):\n output_dir (str): the directory in which to output the file\n filename (str): the filename of the output file\n version (str): the version of the javascripts to use.\n Leave the default to pin the version, or choose \"latest\" to get updates,\n or choose part of the version string to get minor updates.\nReturns:\n Path: The filepath which the html was outputted to.", "id": "f3712:m10"} {"signature": "def braid(genes_by_samples_matrix, sample_attributes, title='', scripts_mode=\"\", data_mode=\"\",organism=\"\", output_dir=\"\", filename=\"\", version=this_version):", "body": "output_dir = Path(output_dir)output_dir.mkdir(exist_ok=True, parents=True)_verify_sample_by_genes_matrix(genes_by_samples_matrix)_verify_sample_attributes(genes_by_samples_matrix, sample_attributes)genes_by_samples_matrix = genes_by_samples_matrix.round()matrix = f\"\"classes = f\"\"data_block = _data_block(data_mode, [('', matrix), ('', classes)], output_dir, organism=organism)scripts = third_party_scripts + [CDN_url(version)+\"\", CDN_url(version)+\"\", CDN_url(version)+\"\"]scripts_block = _scripts_block(scripts, scripts_mode, output_dir)html = templateEnv.get_template('').render(title=title, scripts_block=scripts_block+''+data_block)(output_dir / filename).write_text(html)return (output_dir / filename).resolve()", "docstring": "Arguments:\n genes_by_samples_matrix (pandas.DataFrame): dataframe indexed by genes, columns are samples\n sample_attributes (pandas.DataFrame): dataframe indexed by samples, columns are sample attributes (e.g. classes)\n title (str): The title of the plot (to be embedded in the html).\n scripts_mode (str): Choose from [`\"CDN\"`, `\"directory\"`, `\"inline\"`]:\n\n - `\"CDN\"` compiles a single HTML page with links to scripts hosted on a CDN,\n\n - `\"directory\"` compiles a directory with all scripts locally cached,\n\n - `\"inline\"` compiles a single HTML file with all scripts/styles inlined.\n\n data_mode (str): Choose from [\"directory\", \"inline\"]:\n\n - \"directory\" compiles a directory with all data locally cached,\n\n - \"inline\" compiles a single HTML file with all data inlined.\n\n organism (str): `\"human\"` or `\"mouse\"`\n output_dir (str): the directory in which to output the file\n filename (str): the filename of the output file\n version (str): the version of the javascripts to use.\n Leave the default to pin the version, or choose \"latest\" to get updates,\n or choose part of the version string to get minor updates.\nReturns:\n Path: The filepath which the html was outputted to.", "id": "f3712:m11"} {"signature": "def heatmap(genes_by_samples_matrix, sample_attributes, title='', scripts_mode=\"\", data_mode=\"\",organism=\"\", separate_zscore_by=[\"\"],output_dir=\"\", filename=\"\", version=this_version):", "body": "output_dir = Path(output_dir)output_dir.mkdir(exist_ok=True, parents=True)_verify_sample_by_genes_matrix(genes_by_samples_matrix)_verify_sample_attributes(genes_by_samples_matrix, sample_attributes)genes_by_samples_matrix = genes_by_samples_matrix.round()matrix = f\"\"classes = f\"\"data_block = _data_block(data_mode, [('', matrix), ('', classes)], output_dir, organism=organism)scripts = third_party_scripts + [CDN_url(version)+\"\", CDN_url(version)+\"\", CDN_url(version)+\"\"]scripts_block = _scripts_block(scripts, scripts_mode, output_dir)html = templateEnv.get_template('').render(title=title, scripts_block=scripts_block+''+data_block, separate_zscore_by=separate_zscore_by)(output_dir / filename).write_text(html)return (output_dir / filename).resolve()", "docstring": "Arguments:\n genes_by_samples_matrix (pandas.DataFrame): dataframe indexed by genes, columns are samples\n sample_attributes (pandas.DataFrame): dataframe indexed by samples, columns are sample attributes (e.g. classes)\n title (str): The title of the plot (to be embedded in the html).\n scripts_mode (str): Choose from [`\"CDN\"`, `\"directory\"`, `\"inline\"`]:\n\n - `\"CDN\"` compiles a single HTML page with links to scripts hosted on a CDN,\n\n - `\"directory\"` compiles a directory with all scripts locally cached,\n\n - `\"inline\"` compiles a single HTML file with all scripts/styles inlined.\n\n data_mode (str): Choose from [\"directory\", \"inline\"]:\n\n - \"directory\" compiles a directory with all data locally cached,\n\n - \"inline\" compiles a single HTML file with all data inlined.\n\n organism (str): `\"human\"` or `\"mouse\"`\n separate_zscore_by (list):\n output_dir (str): the directory in which to output the file\n filename (str): the filename of the output file\n version (str): the version of the javascripts to use.\n Leave the default to pin the version, or choose \"latest\" to get updates,\n or choose part of the version string to get minor updates.\nReturns:\n Path: The filepath which the html was outputted to.", "id": "f3712:m12"} {"signature": "def graph(networkx_graph, title='', scripts_mode=\"\", data_mode=\"\",output_dir=\"\", filename=\"\", version=this_version):", "body": "output_dir = Path(output_dir)output_dir.mkdir(exist_ok=True, parents=True)scripts = third_party_scripts + [CDN_url(version)+\"\", CDN_url(version)+\"\"]scripts_block = _scripts_block(scripts, scripts_mode, output_dir)graph_json = nx_json.node_link_data(networkx_graph)for node in graph_json['']:for attr, val in node.items():if isinstance(val, numbers.Number):node[attr] = round(val, )for link in graph_json['']:for attr, val in link.items():if isinstance(val, numbers.Number):link[attr] = round(val, )graph_json = f\"\"data_block = _data_block(data_mode, [('', graph_json)], output_dir)html = templateEnv.get_template('').render(title=title, scripts_block=scripts_block+''+data_block, nodes=networkx_graph.nodes())(output_dir / filename).write_text(html)return (output_dir / filename).resolve()", "docstring": "Arguments:\n networkx_graph (networkx.Graph): any instance of networkx.Graph\n title (str): The title of the plot (to be embedded in the html).\n scripts_mode (str): Choose from [`\"CDN\"`, `\"directory\"`, `\"inline\"`]:\n\n - `\"CDN\"` compiles a single HTML page with links to scripts hosted on a CDN,\n\n - `\"directory\"` compiles a directory with all scripts locally cached,\n\n - `\"inline\"` compiles a single HTML file with all scripts/styles inlined.\n\n data_mode (str): Choose from [\"directory\", \"inline\"]:\n\n - \"directory\" compiles a directory with all data locally cached,\n\n - \"inline\" compiles a single HTML file with all data inlined.\n\n output_dir (str): the directory in which to output the file\n filename (str): the filename of the output file\n version (str): the version of the javascripts to use.\n Leave the default to pin the version, or choose \"latest\" to get updates,\n or choose part of the version string to get minor updates.\nReturns:\n Path: The filepath which the html was outputted to.", "id": "f3712:m13"} {"signature": "def version(self):", "body": "ver = Version()ver.conn = self.connver.attrs = {'': self.attrs[''],}ver.save()return ver", "docstring": "Create a new version under this service.", "id": "f3722:c1:m2"} {"signature": "def vcl(self, name, content):", "body": "vcl = VCL()vcl.conn = self.connvcl.attrs = {'': self.attrs[''],'': self.attrs[''],'': name,'': content,}vcl.save()return vcl", "docstring": "Create a new VCL under this version.", "id": "f3722:c2:m8"} {"signature": "def _setVirtualEnv():", "body": "try:activate = options.virtualenv.activate_cmdexcept AttributeError:activate = Noneif activate is None:virtualenv = path(os.environ.get('', ''))if not virtualenv:virtualenv = options.paved.cwdelse:virtualenv = path(virtualenv)activate = virtualenv / '' / ''if activate.exists():info('' % activate)options.setdotted('', '' % activate)", "docstring": "Attempt to set the virtualenv activate command, if it hasn't been specified.", "id": "f3724:m0"} {"signature": "def rmFilePatterns(*patterns, **kwargs):", "body": "kwargs[''] = ''kwargs[''] = ''return _walkWithAction(*patterns, **kwargs)", "docstring": "Remove all files under the given path with the given patterns.", "id": "f3724:m2"} {"signature": "def rmDirPatterns(*patterns, **kwargs):", "body": "kwargs[''] = ''kwargs[''] = ''return _walkWithAction(*patterns, **kwargs)", "docstring": "Remove all directories under the current path with the given patterns.", "id": "f3724:m3"} {"signature": "def shv(command, capture=False, ignore_error=False, cwd=None):", "body": "_setVirtualEnv()try:command = \"\" % (options.virtualenv.activate_cmd, command)except AttributeError:passreturn bash(command, capture=capture, ignore_error=ignore_error, cwd=cwd)", "docstring": "Run the given command inside the virtual environment, if available:", "id": "f3724:m5"} {"signature": "def update(dst, src):", "body": "stack = [(dst, src)]def isdict(o):return hasattr(o, '')while stack:current_dst, current_src = stack.pop()for key in current_src:if key not in current_dst:current_dst[key] = current_src[key]else:if isdict(current_src[key]) and isdict(current_dst[key]):stack.append((current_dst[key], current_src[key]))else:current_dst[key] = current_src[key]return dst", "docstring": "Recursively update the destination dict-like object with the source dict-like object.\n\n Useful for merging options and Bunches together!\n\n Based on:\n http://code.activestate.com/recipes/499335-recursively-update-a-dictionary-without-hitting-py/#c1", "id": "f3724:m6"} {"signature": "def pip_install(*args):", "body": "download_cache = ('' % options.paved.pip.download_cache) if options.paved.pip.download_cache else ''shv('' % (download_cache, ''.join(args)))", "docstring": "Send the given arguments to `pip install`.", "id": "f3724:m7"} {"signature": "def easy_install(*args):", "body": "shv('' % (''.join(args)))", "docstring": "Send the given arguments to `easy_install`.", "id": "f3724:m8"} {"signature": "@task@consume_argsdef manage(args):", "body": "args = ''.join(args) if args else \"\"call_manage(args)", "docstring": "Run the provided commands against Django's manage.py\n\n `options.paved.django.settings`: the dotted path to the django\n project module containing settings.\n\n `options.paved.django.manage_py`: the path where the django\n project's `manage.py` resides.", "id": "f3725:m0"} {"signature": "def call_manage(cmd, capture=False, ignore_error=False):", "body": "settings = (options.paved.django.settings oros.environ.get(''))if settings is None:raise BuildFailure(\"\")manage_py = options.paved.django.manage_pyif manage_py is None:manage_py = ''else:manage_py = path(manage_py)manage_py = ''.format(**locals())return util.shv(''.format(**locals()), capture=capture, ignore_error=ignore_error)", "docstring": "Utility function to run commands against Django's `django-admin.py`/`manage.py`.\n\n `options.paved.django.project`: the path to the django project\n files (where `settings.py` typically resides).\n Will fall back to a DJANGO_SETTINGS_MODULE environment variable.\n\n `options.paved.django.manage_py`: the path where the django\n project's `manage.py` resides.", "id": "f3725:m1"} {"signature": "@task@consume_argsdef syncdb(args):", "body": "cmd = args and '' % ''.join(options.args) or ''call_manage(cmd)for fixture in options.paved.django.syncdb.fixtures:call_manage(\"\" % fixture)", "docstring": "Update the database with model schema. Shorthand for `paver manage syncdb`.", "id": "f3725:m3"} {"signature": "@taskdef shell(info):", "body": "cmd = ''try:import django_extensionscmd = ''except ImportError:info(\"\")call_manage(cmd)", "docstring": "Run the ipython shell. Shorthand for `paver manage shell`.\n\n Uses `django_extensions `, if\n available, to provide `shell_plus`.", "id": "f3725:m4"} {"signature": "@taskdef start(info):", "body": "cmd = options.paved.django.runserverif cmd == '':try:import django_extensionsexcept ImportError:info(\"\")cmd = ''port = options.paved.django.runserver_portif port:cmd = '' % (cmd, port)call_manage(cmd)", "docstring": "Run the dev server.\n\n Uses `django_extensions `, if\n available, to provide `runserver_plus`.\n\n Set the command to use with `options.paved.django.runserver`\n Set the port to use with `options.paved.django.runserver_port`", "id": "f3725:m5"} {"signature": "@task@consume_argsdef schema(args):", "body": "try:import southcmd = args and '' % ''.join(options.args) or ''call_manage(cmd)except ImportError:error('')", "docstring": "Run South's schemamigration command.", "id": "f3725:m6"} {"signature": "@task@consume_argsdef migrate(args):", "body": "try:import southcmd = args and '' % ''.join(options.args) or ''call_manage(cmd)except ImportError:error('')", "docstring": "Run South's migrate command.", "id": "f3725:m7"} {"signature": "@taskdef sloccount():", "body": "setup = options.get('')packages = options.get('') if setup else Noneif packages:dirs = [x for x in packages if '' not in x]else:dirs = ['']ls=[]for d in dirs:ls += list(path(d).walkfiles())files=''.join(ls)param=options.paved.pycheck.sloccount.paramsh(''.format(param=param, files=files))", "docstring": "Print \"Source Lines of Code\" and export to file.\n\n Export is hudson_ plugin_ compatible: sloccount.sc\n\n requirements:\n - sloccount_ should be installed.\n - tee and pipes are used\n\n options.paved.pycheck.sloccount.param\n\n .. _sloccount: http://www.dwheeler.com/sloccount/\n .. _hudson: http://hudson-ci.org/\n .. _plugin: http://wiki.hudson-ci.org/display/HUDSON/SLOCCount+Plugin", "id": "f3726:m0"} {"signature": "@taskdef findimports():", "body": "packages = [x for x in options.setup.packages if '' not in x]sh(''.format(param=options.paved.pycheck.findimports.param, files=''.join(packages)))", "docstring": "print python module dependencies by findimports.\n\n requirements:\n - findimports_ should be installed. ``easy_install findimports``\n\n options.paved.pycheck.findimports.param\n\n .. _findimports: http://pypi.python.org/pypi/findimports", "id": "f3726:m1"} {"signature": "@taskdef pyflakes():", "body": "packages = [x for x in options.setup.packages if '' not in x]sh(''.format(param=options.paved.pycheck.pyflakes.param, files=''.join(packages)))", "docstring": "passive check of python programs by pyflakes.\n\n requirements:\n - pyflakes_ should be installed. ``easy_install pyflakes``\n\n options.paved.pycheck.pyflakes.param\n\n .. _pyflakes: http://pypi.python.org/pypi/pyflakes", "id": "f3726:m2"} {"signature": "@taskdef pychecker():", "body": "packages = [x for x in options.setup.packages if '' not in x]sh(''.format(param=options.paved.pycheck.pychecker.param, files=''.join(packages)))", "docstring": "check of python programs by pychecker.\n\n requirements:\n - pychecker_ should be installed.\n\n options.paved.pycheck.pychecker.param\n\n .. _pychecker: http://pychecker.sourceforge.net/", "id": "f3726:m3"} {"signature": "@taskdef nose():", "body": "sh(''.format(param=options.paved.pycheck.nose.param))", "docstring": "Run unit tests using nosetests.\n\n requirements:\n - nose_ should be installed.\n\n options.paved.pycheck.nose.param\n\n .. _nose: http://somethingaboutorange.com/mrl/projects/nose/1.0.0/", "id": "f3726:m4"} {"signature": "@task@needs('', '', '', '', '')def pycheckall():", "body": "", "docstring": "All pycheck tasks.", "id": "f3726:m5"} {"signature": "@task@needs('')def clean(options, info):", "body": "info(\"\", options.paved.clean.patterns)for wd in options.paved.clean.dirs:info(\"\", wd)for p in options.paved.clean.patterns:for f in wd.walkfiles(p):f.remove()", "docstring": "Clean up extra files littering the source tree.\n\n options.paved.clean.dirs: directories to search recursively\n options.paved.clean.patterns: patterns to search for and remove", "id": "f3727:m0"} {"signature": "@taskdef printoptions():", "body": "x = json.dumps(environment.options,indent=,sort_keys=True,skipkeys=True,cls=MyEncoder)print(x)", "docstring": "print paver options.\n\n Prettified by json.\n `long_description` is removed", "id": "f3727:m1"} {"signature": "def open_s3(bucket):", "body": "conn = boto.connect_s3(options.paved.s3.access_id, options.paved.s3.secret)try:bucket = conn.get_bucket(bucket)except boto.exception.S3ResponseError:bucket = conn.create_bucket(bucket)return bucket", "docstring": "Opens connection to S3 returning bucket and key", "id": "f3729:m0"} {"signature": "def upload_s3(file_path, bucket_name, file_key, force=False, acl=''):", "body": "file_path = path(file_path)bucket = open_s3(bucket_name)if file_path.isdir():paths = file_path.listdir()paths_keys = list(zip(paths, ['' % (file_key, p.name) for p in paths]))else:paths_keys = [(file_path, file_key)]for p, k in paths_keys:headers = {}s3_key = bucket.get_key(k)if not s3_key:from boto.s3.key import Keys3_key = Key(bucket, k)content_type = mimetypes.guess_type(p)[]if content_type:headers[''] = content_typefile_size = p.stat().st_sizefile_data = p.bytes()file_md5, file_md5_64 = s3_key.get_md5_from_hexdigest(hashlib.md5(file_data).hexdigest())if s3_key.etag:s3_md5 = s3_key.etag.replace('', '')if s3_md5 == file_md5:info('' % file_path)continueelif not force:s3_datetime = datetime.datetime(*time.strptime(s3_key.last_modified, '')[:])local_datetime = datetime.datetime.utcfromtimestamp(p.stat().st_mtime)if local_datetime < s3_datetime:info(\"\"\"\" % (file_key))continueinfo(\"\" % (file_key))try:s3_key.set_contents_from_string(file_data, headers, policy=acl, replace=True, md5=(file_md5, file_md5_64))except Exception as e:error(\"\" % e)raise", "docstring": "Upload a local file to S3.", "id": "f3729:m1"} {"signature": "def download_s3(bucket_name, file_key, file_path, force=False):", "body": "file_path = path(file_path)bucket = open_s3(bucket_name)file_dir = file_path.dirname()file_dir.makedirs()s3_key = bucket.get_key(file_key)if file_path.exists():file_data = file_path.bytes()file_md5, file_md5_64 = s3_key.get_md5_from_hexdigest(hashlib.md5(file_data).hexdigest())try:s3_md5 = s3_key.etag.replace('', '')except KeyError:passelse:if s3_md5 == file_md5:info('' % file_path)returnelif not force:s3_datetime = datetime.datetime(*time.strptime(s3_key.last_modified, '')[:])local_datetime = datetime.datetime.utcfromtimestamp(file_path.stat().st_mtime)if s3_datetime < local_datetime:info(\"\" % (file_key))returninfo(\"\" % (file_key))try:with open(file_path, '') as fo:s3_key.get_contents_to_file(fo)except Exception as e:error(\"\" % e)raise", "docstring": "Download a remote file from S3.", "id": "f3729:m2"} {"signature": "def sphinx_make(*targets):", "body": "sh('' % ''.join(targets), cwd=options.paved.docs.path)", "docstring": "Call the Sphinx Makefile with the specified targets.\n\n `options.paved.docs.path`: the path to the Sphinx folder (where the Makefile resides).", "id": "f3730:m0"} {"signature": "@taskdef docs():", "body": "sphinx_make(*options.paved.docs.targets)", "docstring": "Make Sphinx docs.\n\n `options.paved.docs.path`: the path to the Sphinx folder (where the Makefile resides).\n\n `options.paved.docs.targets`: the Make targets to send to `sphinx_make`. Default is `html`.", "id": "f3730:m1"} {"signature": "@taskdef clean_docs():", "body": "sphinx_make('')", "docstring": "Clean Sphinx docs.\n\n `options.paved.docs.path`: the path to the Sphinx folder (where the Makefile resides).", "id": "f3730:m2"} {"signature": "@task@needs('')def rsync_docs():", "body": "assert options.paved.docs.rsync_location, \"\"sh('' % (path(options.paved.docs.path) / options.paved.docs.build_rel,options.paved.docs.rsync_location))", "docstring": "Upload the docs to a remote location via rsync.\n\n `options.paved.docs.rsync_location`: the target location to rsync files to.\n\n `options.paved.docs.path`: the path to the Sphinx folder (where the Makefile resides).\n\n `options.paved.docs.build_rel`: the path of the documentation\n build folder, relative to `options.paved.docs.path`.", "id": "f3730:m3"} {"signature": "@taskdef ghpages():", "body": "opts = optionsdocroot = path(opts.get('', ''))if not docroot.exists():raise BuildFailure(\"\"% docroot)builddir = docroot / opts.get(\"\", \"\")builddir=builddir / ''if not builddir.exists():raise BuildFailure(\"\"% builddir)nojekyll = path(builddir) / ''nojekyll.touch()sh('' % (builddir))", "docstring": "Push Sphinx docs to github_ gh-pages branch.\n\n 1. Create file .nojekyll\n 2. Push the branch to origin/gh-pages\n after committing using ghp-import_\n\n Requirements:\n - easy_install ghp-import\n\n Options:\n - `options.paved.docs.*` is not used\n - `options.sphinx.docroot` is used (default=docs)\n - `options.sphinx.builddir` is used (default=.build)\n\n .. warning::\n This will DESTROY your gh-pages branch.\n If you love it, you'll want to take backups\n before playing with this. This script assumes\n that gh-pages is 100% derivative. You should\n never edit files in your gh-pages branch by hand\n if you're using this script because you will\n lose your work.\n\n .. _github: https://github.com\n .. _ghp-import: https://github.com/davisp/ghp-import", "id": "f3730:m4"} {"signature": "@taskdef showhtml():", "body": "import webbrowseropts = optionsdocroot = path(opts.get('', ''))if not docroot.exists():raise BuildFailure(\"\"% docroot)builddir = docroot / opts.get(\"\", \"\")builddir=builddir / ''if not builddir.exists():raise BuildFailure(\"\"% builddir)webbrowser.open(builddir / '')", "docstring": "Open your web browser and display the generated html documentation.", "id": "f3730:m5"} {"signature": "@taskdef manifest():", "body": "prune = options.paved.dist.manifest.prunegraft = set()if options.paved.dist.manifest.include_sphinx_docroot:docroot = options.get('', '')graft.update([docroot])if options.paved.dist.manifest.exclude_sphinx_builddir:builddir = docroot + '' + options.get(\"\", \"\")prune.update([builddir])with open(options.paved.cwd / '', '') as fo:for item in graft:fo.write('' % item)for item in options.paved.dist.manifest.include:fo.write('' % item)for item in options.paved.dist.manifest.recursive_include:fo.write('' % item)for item in prune:fo.write('' % item)", "docstring": "Guarantee the existence of a basic MANIFEST.in.\n\n manifest doc: http://docs.python.org/distutils/sourcedist.html#manifest\n\n `options.paved.dist.manifest.include`: set of files (or globs) to include with the `include` directive.\n\n `options.paved.dist.manifest.recursive_include`: set of files (or globs) to include with the `recursive-include` directive.\n\n `options.paved.dist.manifest.prune`: set of files (or globs) to exclude with the `prune` directive.\n\n `options.paved.dist.manifest.include_sphinx_docroot`: True -> sphinx docroot is added as `graft`\n\n `options.paved.dist.manifest.include_sphinx_docroot`: True -> sphinx builddir is added as `prune`", "id": "f3731:m0"} {"signature": "@task@needs('', '', '', '')def sdist():", "body": "pass", "docstring": "Overrides sdist to make sure that our setup.py is generated.", "id": "f3731:m1"} {"signature": "@task@needs('', '')def upload():", "body": "pass", "docstring": "Upload the package to PyPI.", "id": "f3731:m2"} {"signature": "@task@consume_argsdef pip_install(args):", "body": "util.pip_install(*args)", "docstring": "Send the given arguments to `pip install`.", "id": "f3732:m0"} {"signature": "@taskdef easy_install(args):", "body": "util.easy_install(*args)", "docstring": "Send the given arguments to `easy_install`.", "id": "f3732:m1"} {"signature": "def validate(self, messages):", "body": "messages = self.validate_creators(messages)messages = self.validate_created(messages)return messages", "docstring": "Returns True if the fields are valid according to the SPDX standard.\n Appends user friendly messages to the messages parameter.", "id": "f3735:c4:m6"} {"signature": "def load_license_list(file_name):", "body": "licenses_map = {}with codecs.open(file_name, '', encoding='') as lics:licenses = json.load(lics)version = licenses[''].split('')for lic in licenses['']:if lic.get(''):continuename = lic['']identifier = lic['']licenses_map[name] = identifierlicenses_map[identifier] = namereturn version, licenses_map", "docstring": "Return the licenses list version tuple and a mapping of licenses\nname->id and id->name loaded from a JSON file\nfrom https://github.com/spdx/license-list-data", "id": "f3736:m0"} {"signature": "def _add_parens(required, text):", "body": "return ''.format(text) if required else text", "docstring": "Add parens around a license expression if `required` is True, otherwise\nreturn `text` unmodified.", "id": "f3738:m0"} {"signature": "def validate(self, messages):", "body": "messages = self.validate_ext_doc_id(messages)messages = self.validate_spdx_doc_uri(messages)messages = self.validate_checksum(messages)return messages", "docstring": "Validate all fields of the ExternalDocumentRef class and update the\nmessages list with user friendly error messages for display.", "id": "f3738:c0:m3"} {"signature": "@classmethoddef from_identifier(cls, identifier):", "body": "if identifier in config.LICENSE_MAP.keys():return cls(config.LICENSE_MAP[identifier], identifier)else:return cls(identifier, identifier)", "docstring": "If identifier exists in config.LICENSE_MAP\n the full_name is retrieved from it. Otherwise\n the full_name is the same as the identifier.", "id": "f3738:c1:m1"} {"signature": "@classmethoddef from_full_name(cls, full_name):", "body": "if full_name in config.LICENSE_MAP.keys():return cls(full_name, config.LICENSE_MAP[full_name])else:return cls(full_name, full_name)", "docstring": "Returna new License for a full_name. If the full_name exists in\nconfig.LICENSE_MAP the identifier is retrieved from it.\nOtherwise the identifier is the same as the full_name.", "id": "f3738:c1:m2"} {"signature": "def validate(self, messages):", "body": "messages = self.validate_version(messages)messages = self.validate_data_lics(messages)messages = self.validate_name(messages)messages = self.validate_spdx_id(messages)messages = self.validate_namespace(messages)messages = self.validate_ext_document_references(messages)messages = self.validate_creation_info(messages)messages = self.validate_package(messages)messages = self.validate_extracted_licenses(messages)messages = self.validate_reviews(messages)return messages", "docstring": "Validate all fields of the document and update the\nmessages list with user friendly error messages for display.", "id": "f3738:c5:m8"} {"signature": "@classmethoddef from_str(cls, value):", "body": "m = cls.VERS_STR_REGEX.match(value)if m is not None:return cls(int(m.group()), int(m.group()))else:return None", "docstring": "Constructs a Version from a string.\n Returns None if string not in N.N form where N represents a\n number.", "id": "f3739:c0:m1"} {"signature": "def tv_to_rdf(infile_name, outfile_name):", "body": "parser = Parser(Builder(), StandardLogger())parser.build()with open(infile_name) as infile:data = infile.read()document, error = parser.parse(data)if not error:with open(outfile_name, mode='') as outfile:write_document(document, outfile)return Trueelse:print('')messages = []document.validate(messages)print(''.join(messages))return False", "docstring": "Convert a SPDX file from tag/value format to RDF format.\nReturn True on sucess, False otherwise.", "id": "f3740:m0"} {"signature": "def validate(self, messages):", "body": "messages = self.validate_reviewer(messages)messages = self.validate_review_date(messages)return messages", "docstring": "Returns True if all the fields are valid.\n Appends any error messages to messages parameter.", "id": "f3741:c0:m6"} {"signature": "def write_document(document, out, validate=True):", "body": "if validate:messages = []messages = document.validate(messages)if messages:raise InvalidDocumentError(messages)writer = Writer(document, out)writer.write()", "docstring": "Write an SPDX RDF document.\n- document - spdx.document instance.\n- out - file like object that will be written to.\nOptionally `validate` the document before writing and raise\nInvalidDocumentError if document.validate returns False.", "id": "f3742:m0"} {"signature": "def create_checksum_node(self, chksum):", "body": "chksum_node = BNode()type_triple = (chksum_node, RDF.type, self.spdx_namespace.Checksum)self.graph.add(type_triple)algorithm_triple = (chksum_node, self.spdx_namespace.algorithm, Literal(chksum.identifier))self.graph.add(algorithm_triple)value_triple = (chksum_node, self.spdx_namespace.checksumValue, Literal(chksum.value))self.graph.add(value_triple)return chksum_node", "docstring": "Return a node representing spdx.checksum.", "id": "f3742:c0:m1"} {"signature": "def to_special_value(self, value):", "body": "if isinstance(value, utils.NoAssert):return self.spdx_namespace.noassertionelif isinstance(value, utils.SPDXNone):return self.spdx_namespace.noneelse:return Literal(value)", "docstring": "Return proper spdx term or Literal", "id": "f3742:c0:m2"} {"signature": "def licenses_from_tree(self, tree):", "body": "licenses = set()self.licenses_from_tree_helper(tree, licenses)return licenses", "docstring": "Traverse conjunctions and disjunctions like trees and return a\nset of all licenses in it as nodes.", "id": "f3742:c1:m2"} {"signature": "def create_conjunction_node(self, conjunction):", "body": "node = BNode()type_triple = (node, RDF.type, self.spdx_namespace.ConjunctiveLicenseSet)self.graph.add(type_triple)licenses = self.licenses_from_tree(conjunction)for lic in licenses:member_triple = (node, self.spdx_namespace.member, lic)self.graph.add(member_triple)return node", "docstring": "Return a node representing a conjunction of licenses.", "id": "f3742:c1:m3"} {"signature": "def create_disjunction_node(self, disjunction):", "body": "node = BNode()type_triple = (node, RDF.type, self.spdx_namespace.DisjunctiveLicenseSet)self.graph.add(type_triple)licenses = self.licenses_from_tree(disjunction)for lic in licenses:member_triple = (node, self.spdx_namespace.member, lic)self.graph.add(member_triple)return node", "docstring": "Return a node representing a disjunction of licenses.", "id": "f3742:c1:m4"} {"signature": "def create_license_helper(self, lic):", "body": "if isinstance(lic, document.ExtractedLicense):return self.create_extracted_license(lic)if lic.identifier.rstrip('') in config.LICENSE_MAP:return URIRef(lic.url)else:matches = [l for l in self.document.extracted_licenses if l.identifier == lic.identifier]if len(matches) != :return self.create_extracted_license(matches[])else:raise InvalidDocumentError(''.format(lic.identifier))", "docstring": "Handle single(no conjunction/disjunction) licenses.\nReturn the created node.", "id": "f3742:c1:m5"} {"signature": "def create_extracted_license(self, lic):", "body": "licenses = list(self.graph.triples((None, self.spdx_namespace.licenseId, lic.identifier)))if len(licenses) != :return licenses[][] else:license_node = BNode()type_triple = (license_node, RDF.type, self.spdx_namespace.ExtractedLicensingInfo)self.graph.add(type_triple)ident_triple = (license_node, self.spdx_namespace.licenseId, Literal(lic.identifier))self.graph.add(ident_triple)text_triple = (license_node, self.spdx_namespace.extractedText, Literal(lic.text))self.graph.add(text_triple)if lic.full_name is not None:name_triple = (license_node, self.spdx_namespace.licenseName, self.to_special_value(lic.full_name))self.graph.add(name_triple)for ref in lic.cross_ref:triple = (license_node, RDFS.seeAlso, URIRef(ref))self.graph.add(triple)if lic.comment is not None:comment_triple = (license_node, RDFS.comment, Literal(lic.comment))self.graph.add(comment_triple)return license_node", "docstring": "Handle extracted license.\nReturn the license node.", "id": "f3742:c1:m6"} {"signature": "def create_license_node(self, lic):", "body": "if isinstance(lic, document.LicenseConjunction):return self.create_conjunction_node(lic)elif isinstance(lic, document.LicenseDisjunction):return self.create_disjunction_node(lic)else:return self.create_license_helper(lic)", "docstring": "Return a node representing a license.\nCould be a single license (extracted or part of license list.) or\na conjunction/disjunction of licenses.", "id": "f3742:c1:m7"} {"signature": "def license_or_special(self, lic):", "body": "if isinstance(lic, utils.NoAssert):return self.spdx_namespace.noassertionelif isinstance(lic, utils.SPDXNone):return self.spdx_namespace.noneelse:return self.create_license_node(lic)", "docstring": "Check for special values spdx:none and spdx:noassertion.\nReturn the term for the special value or the result of passing\nlicense to create_license_node.", "id": "f3742:c1:m8"} {"signature": "def create_file_node(self, doc_file):", "body": "file_node = URIRef(''.format(id=str(doc_file.spdx_id)))type_triple = (file_node, RDF.type, self.spdx_namespace.File)self.graph.add(type_triple)name_triple = (file_node, self.spdx_namespace.fileName, Literal(doc_file.name))self.graph.add(name_triple)if doc_file.has_optional_field(''):comment_triple = (file_node, RDFS.comment, Literal(doc_file.comment))self.graph.add(comment_triple)if doc_file.has_optional_field(''):ftype = self.spdx_namespace[self.FILE_TYPES[doc_file.type]]ftype_triple = (file_node, self.spdx_namespace.fileType, ftype)self.graph.add(ftype_triple)self.graph.add((file_node, self.spdx_namespace.checksum, self.create_checksum_node(doc_file.chk_sum)))conc_lic_node = self.license_or_special(doc_file.conc_lics)conc_lic_triple = (file_node, self.spdx_namespace.licenseConcluded, conc_lic_node)self.graph.add(conc_lic_triple)license_info_nodes = map(self.license_or_special, doc_file.licenses_in_file)for lic in license_info_nodes:triple = (file_node, self.spdx_namespace.licenseInfoInFile, lic)self.graph.add(triple)if doc_file.has_optional_field(''):comment_triple = (file_node, self.spdx_namespace.licenseComments, Literal(doc_file.license_comment))self.graph.add(comment_triple)cr_text_node = self.to_special_value(doc_file.copyright)cr_text_triple = (file_node, self.spdx_namespace.copyrightText, cr_text_node)self.graph.add(cr_text_triple)if doc_file.has_optional_field(''):notice_triple = (file_node, self.spdx_namespace.noticeText, doc_file.notice)self.graph.add(notice_triple)contrib_nodes = map(lambda c: Literal(c), doc_file.contributors)contrib_triples = [(file_node, self.spdx_namespace.fileContributor, node) for node in contrib_nodes]for triple in contrib_triples:self.graph.add(triple)return file_node", "docstring": "Create a node for spdx.file.", "id": "f3742:c2:m1"} {"signature": "def files(self):", "body": "return map(self.create_file_node, self.document.files)", "docstring": "Return list of file nodes.", "id": "f3742:c2:m2"} {"signature": "def add_file_dependencies_helper(self, doc_file):", "body": "subj_triples = list(self.graph.triples((None, self.spdx_namespace.fileName, Literal(doc_file.name))))if len(subj_triples) != :raise InvalidDocumentError(''.format(doc_file.name))subject_node = subj_triples[][]for dependency in doc_file.dependencies:dep_triples = list(self.graph.triples((None, self.spdx_namespace.fileName, Literal(dependency))))if len(dep_triples) == :dep_node = dep_triples[][]dep_triple = (subject_node, self.spdx_namespace.fileDependency, dep_node)self.graph.add(dep_triple)else:print(''.format(doc_file.name, dependency))", "docstring": "Handle dependencies for a single file.\n- doc_file - instance of spdx.file.File.", "id": "f3742:c2:m3"} {"signature": "def add_file_dependencies(self):", "body": "for doc_file in self.document.files:self.add_file_dependencies_helper(doc_file)", "docstring": "Add file dependencies to the graph.\nCalled after all files have been added.", "id": "f3742:c2:m4"} {"signature": "def create_review_node(self, review):", "body": "review_node = BNode()type_triple = (review_node, RDF.type, self.spdx_namespace.Review)self.graph.add(type_triple)reviewer_node = Literal(review.reviewer.to_value())self.graph.add((review_node, self.spdx_namespace.reviewer, reviewer_node))reviewed_date_node = Literal(review.review_date_iso_format)reviewed_triple = (review_node, self.spdx_namespace.reviewDate, reviewed_date_node)self.graph.add(reviewed_triple)if review.has_comment:comment_node = Literal(review.comment)comment_triple = (review_node, RDFS.comment, comment_node)self.graph.add(comment_triple)return review_node", "docstring": "Return a review node.", "id": "f3742:c3:m1"} {"signature": "def reviews(self):", "body": "return map(self.create_review_node, self.document.reviews)", "docstring": "Returns a list of review nodes", "id": "f3742:c3:m2"} {"signature": "def create_annotation_node(self, annotation):", "body": "annotation_node = URIRef(str(annotation.spdx_id))type_triple = (annotation_node, RDF.type, self.spdx_namespace.Annotation)self.graph.add(type_triple)annotator_node = Literal(annotation.annotator.to_value())self.graph.add((annotation_node, self.spdx_namespace.annotator, annotator_node))annotation_date_node = Literal(annotation.annotation_date_iso_format)annotation_triple = (annotation_node, self.spdx_namespace.annotationDate, annotation_date_node)self.graph.add(annotation_triple)if annotation.has_comment:comment_node = Literal(annotation.comment)comment_triple = (annotation_node, RDFS.comment, comment_node)self.graph.add(comment_triple)annotation_type_node = Literal(annotation.annotation_type)annotation_type_triple = (annotation_node, self.spdx_namespace.annotationType, annotation_type_node)self.graph.add(annotation_type_triple)return annotation_node", "docstring": "Return an annotation node.", "id": "f3742:c4:m1"} {"signature": "def annotations(self):", "body": "return map(self.create_annotation_node, self.document.annotations)", "docstring": "Returns a list of annotation nodes", "id": "f3742:c4:m2"} {"signature": "def creators(self):", "body": "return map(lambda c: Literal(c.to_value()), self.document.creation_info.creators)", "docstring": "Return a list of creator nodes.\nNote: Does not add anything to the graph.", "id": "f3742:c5:m1"} {"signature": "def create_creation_info(self):", "body": "ci_node = BNode()type_triple = (ci_node, RDF.type, self.spdx_namespace.CreationInfo)self.graph.add(type_triple)created_date = Literal(self.document.creation_info.created_iso_format)created_triple = (ci_node, self.spdx_namespace.created, created_date)self.graph.add(created_triple)creators = self.creators()for creator in creators:self.graph.add((ci_node, self.spdx_namespace.creator, creator))if self.document.creation_info.has_comment:comment_node = Literal(self.document.creation_info.comment)comment_triple = (ci_node, RDFS.comment, comment_node)self.graph.add(comment_triple)return ci_node", "docstring": "Add and return a creation info node to graph", "id": "f3742:c5:m2"} {"signature": "def create_external_document_ref_node(self, ext_document_references):", "body": "ext_doc_ref_node = BNode()type_triple = (ext_doc_ref_node, RDF.type, self.spdx_namespace.ExternalDocumentRef)self.graph.add(type_triple)ext_doc_id = Literal(ext_document_references.external_document_id)ext_doc_id_triple = (ext_doc_ref_node, self.spdx_namespace.externalDocumentId, ext_doc_id)self.graph.add(ext_doc_id_triple)doc_uri = Literal(ext_document_references.spdx_document_uri)doc_uri_triple = (ext_doc_ref_node, self.spdx_namespace.spdxDocument, doc_uri)self.graph.add(doc_uri_triple)checksum_node = self.create_checksum_node(ext_document_references.check_sum)self.graph.add((ext_doc_ref_node, self.spdx_namespace.checksum, checksum_node))return ext_doc_ref_node", "docstring": "Add and return a creation info node to graph", "id": "f3742:c6:m1"} {"signature": "def ext_doc_refs(self):", "body": "return map(self.create_external_document_ref_node,self.document.ext_document_references)", "docstring": "Returns a list of review nodes", "id": "f3742:c6:m2"} {"signature": "def package_verif_node(self, package):", "body": "verif_node = BNode()type_triple = (verif_node, RDF.type, self.spdx_namespace.PackageVerificationCode)self.graph.add(type_triple)value_triple = (verif_node, self.spdx_namespace.packageVerificationCodeValue, Literal(package.verif_code))self.graph.add(value_triple)excl_file_nodes = map(lambda excl: Literal(excl), package.verif_exc_files)excl_predicate = self.spdx_namespace.packageVerificationCodeExcludedFileexcl_file_triples = [(verif_node, excl_predicate, xcl_file) for xcl_file in excl_file_nodes]for trp in excl_file_triples:self.graph.add(trp)return verif_node", "docstring": "Return a node representing package verification code.", "id": "f3742:c7:m1"} {"signature": "def handle_package_literal_optional(self, package, package_node, predicate, field):", "body": "if package.has_optional_field(field):value = getattr(package, field, None)value_node = self.to_special_value(value)triple = (package_node, predicate, value_node)self.graph.add(triple)", "docstring": "Check if optional field is set.\nIf so it adds the triple (package_node, predicate, $) to the graph.\nWhere $ is a literal or special value term of the value of the field.", "id": "f3742:c7:m2"} {"signature": "def handle_pkg_optional_fields(self, package, package_node):", "body": "self.handle_package_literal_optional(package, package_node, self.spdx_namespace.versionInfo, '')self.handle_package_literal_optional(package, package_node, self.spdx_namespace.packageFileName, '')self.handle_package_literal_optional(package, package_node, self.spdx_namespace.supplier, '')self.handle_package_literal_optional(package, package_node, self.spdx_namespace.originator, '')self.handle_package_literal_optional(package, package_node, self.spdx_namespace.sourceInfo, '')self.handle_package_literal_optional(package, package_node, self.spdx_namespace.licenseComments, '')self.handle_package_literal_optional(package, package_node, self.spdx_namespace.summary, '')self.handle_package_literal_optional(package, package_node, self.spdx_namespace.description, '')if package.has_optional_field(''):checksum_node = self.create_checksum_node(package.check_sum)self.graph.add((package_node, self.spdx_namespace.checksum, checksum_node))if package.has_optional_field(''):homepage_node = URIRef(self.to_special_value(package.homepage))homepage_triple = (package_node, self.doap_namespace.homepage, homepage_node)self.graph.add(homepage_triple)", "docstring": "Write package optional fields.", "id": "f3742:c7:m3"} {"signature": "def create_package_node(self, package):", "body": "package_node = BNode()type_triple = (package_node, RDF.type, self.spdx_namespace.Package)self.graph.add(type_triple)self.handle_pkg_optional_fields(package, package_node)name_triple = (package_node, self.spdx_namespace.name, Literal(package.name))self.graph.add(name_triple)down_loc_node = (package_node, self.spdx_namespace.downloadLocation, self.to_special_value(package.download_location))self.graph.add(down_loc_node)verif_node = self.package_verif_node(package)verif_triple = (package_node, self.spdx_namespace.packageVerificationCode, verif_node)self.graph.add(verif_triple)conc_lic_node = self.license_or_special(package.conc_lics)conc_lic_triple = (package_node, self.spdx_namespace.licenseConcluded, conc_lic_node)self.graph.add(conc_lic_triple)decl_lic_node = self.license_or_special(package.license_declared)decl_lic_triple = (package_node, self.spdx_namespace.licenseDeclared, decl_lic_node)self.graph.add(decl_lic_triple)licenses_from_files_nodes = map(lambda el: self.license_or_special(el), package.licenses_from_files)lic_from_files_predicate = self.spdx_namespace.licenseInfoFromFileslic_from_files_triples = [(package_node, lic_from_files_predicate, node) for node in licenses_from_files_nodes]for triple in lic_from_files_triples:self.graph.add(triple)cr_text_node = self.to_special_value(package.cr_text)cr_text_triple = (package_node, self.spdx_namespace.copyrightText, cr_text_node)self.graph.add(cr_text_triple)self.handle_package_has_file(package, package_node)return package_node", "docstring": "Return a Node representing the package.\nFiles must have been added to the graph before this method is called.", "id": "f3742:c7:m4"} {"signature": "def packages(self):", "body": "return self.create_package_node(self.document.package)", "docstring": "Return a node that represents the package in the graph.\nCall this function to write package info.", "id": "f3742:c7:m5"} {"signature": "def handle_package_has_file_helper(self, pkg_file):", "body": "nodes = list(self.graph.triples((None, self.spdx_namespace.fileName, Literal(pkg_file.name))))if len(nodes) == :return nodes[][]else:raise InvalidDocumentError('' +''.format(pkg_file.name))", "docstring": "Return node representing pkg_file\npkg_file should be instance of spdx.file.", "id": "f3742:c7:m6"} {"signature": "def handle_package_has_file(self, package, package_node):", "body": "file_nodes = map(self.handle_package_has_file_helper, package.files)triples = [(package_node, self.spdx_namespace.hasFile, node) for node in file_nodes]for triple in triples:self.graph.add(triple)", "docstring": "Add hasFile triples to graph.\nMust be called after files have been added.", "id": "f3742:c7:m7"} {"signature": "def __init__(self, document, out):", "body": "super(Writer, self).__init__(document, out)", "docstring": "- document is spdx.document instance that will be written.\n- out is a file-like object that will be written to.", "id": "f3742:c8:m0"} {"signature": "def create_doc(self):", "body": "doc_node = URIRef('')self.graph.add((doc_node, RDF.type, self.spdx_namespace.SpdxDocument))vers_literal = Literal(str(self.document.version))self.graph.add((doc_node, self.spdx_namespace.specVersion, vers_literal))data_lics = URIRef(self.document.data_license.url)self.graph.add((doc_node, self.spdx_namespace.dataLicense, data_lics))doc_name = URIRef(self.document.name)self.graph.add((doc_node, self.spdx_namespace.name, doc_name))return doc_node", "docstring": "Add and return the root document node to graph.", "id": "f3742:c8:m1"} {"signature": "def write_creation_info(creation_info, out):", "body": "out.write('')for creator in sorted(creation_info.creators):write_value('', creator, out)write_value('', creation_info.created_iso_format, out)if creation_info.has_comment:write_text_value('', creation_info.comment, out)", "docstring": "Write the creation info to out.", "id": "f3743:m4"} {"signature": "def write_review(review, out):", "body": "out.write('')write_value('', review.reviewer, out)write_value('', review.review_date_iso_format, out)if review.has_comment:write_text_value('', review.comment, out)", "docstring": "Write the fields of a single review to out.", "id": "f3743:m5"} {"signature": "def write_annotation(annotation, out):", "body": "out.write('')write_value('', annotation.annotator, out)write_value('', annotation.annotation_date_iso_format, out)if annotation.has_comment:write_text_value('', annotation.comment, out)write_value('', annotation.annotation_type, out)write_value('', annotation.spdx_id, out)", "docstring": "Write the fields of a single annotation to out.", "id": "f3743:m6"} {"signature": "def write_file(spdx_file, out):", "body": "out.write('')write_value('', spdx_file.name, out)write_value('', spdx_file.spdx_id, out)if spdx_file.has_optional_field(''):write_file_type(spdx_file.type, out)write_value('', spdx_file.chk_sum.to_tv(), out)if isinstance(spdx_file.conc_lics, (document.LicenseConjunction, document.LicenseDisjunction)):write_value('', u''.format(spdx_file.conc_lics), out)else:write_value('', spdx_file.conc_lics, out)for lics in sorted(spdx_file.licenses_in_file):write_value('', lics, out)if isinstance(spdx_file.copyright, six.string_types):write_text_value('', spdx_file.copyright, out)else:write_value('', spdx_file.copyright, out)if spdx_file.has_optional_field(''):write_text_value('', spdx_file.license_comment, out)if spdx_file.has_optional_field(''):write_text_value('', spdx_file.comment, out)if spdx_file.has_optional_field(''):write_text_value('', spdx_file.notice, out)for contributor in sorted(spdx_file.contributors):write_value('', contributor, out)for dependency in sorted(spdx_file.dependencies):write_value('', dependency, out)names = spdx_file.artifact_of_project_namehomepages = spdx_file.artifact_of_project_homeuris = spdx_file.artifact_of_project_urifor name, homepage, uri in sorted(zip_longest(names, homepages, uris)):write_value('', name, out)if homepage is not None:write_value('', homepage, out)if uri is not None:write_value('', uri, out)", "docstring": "Write a file fields to out.", "id": "f3743:m8"} {"signature": "def write_package(package, out):", "body": "out.write('')write_value('', package.name, out)if package.has_optional_field(''):write_value('', package.version, out)write_value('', package.download_location, out)if package.has_optional_field(''):write_text_value('', package.summary, out)if package.has_optional_field(''):write_text_value('', package.source_info, out)if package.has_optional_field(''):write_value('', package.file_name, out)if package.has_optional_field(''):write_value('', package.supplier, out)if package.has_optional_field(''):write_value('', package.originator, out)if package.has_optional_field(''):write_value('', package.check_sum.to_tv(), out)write_value('', format_verif_code(package), out)if package.has_optional_field(''):write_text_value('', package.description, out)if isinstance(package.license_declared, (document.LicenseConjunction,document.LicenseDisjunction)):write_value('', u''.format(package.license_declared), out)else:write_value('', package.license_declared, out)if isinstance(package.conc_lics, (document.LicenseConjunction,document.LicenseDisjunction)):write_value('', u''.format(package.conc_lics), out)else:write_value('', package.conc_lics, out)for lics in sorted(package.licenses_from_files):write_value('', lics, out)if package.has_optional_field(''):write_text_value('', package.license_comment, out)if isinstance(package.cr_text, six.string_types):write_text_value('', package.cr_text, out)else:write_value('', package.cr_text, out)if package.has_optional_field(''):write_value('', package.homepage, out)for spdx_file in sorted(package.files):write_separators(out)write_file(spdx_file, out)", "docstring": "Write a package fields to out.", "id": "f3743:m9"} {"signature": "def write_extracted_licenses(lics, out):", "body": "write_value('', lics.identifier, out)if lics.full_name is not None:write_value('', lics.full_name, out)if lics.comment is not None:write_text_value('', lics.comment, out)for xref in sorted(lics.cross_ref):write_value('', xref, out)write_text_value('', lics.text, out)", "docstring": "Write extracted licenses fields to out.", "id": "f3743:m10"} {"signature": "def write_document(document, out, validate=True):", "body": "messages = []messages = document.validate(messages)if validate and messages:raise InvalidDocumentError(messages)out.write('')write_value('', str(document.version), out)write_value('', document.data_license.identifier, out)write_value('', document.name, out)write_value('', '', out)write_value('', document.namespace, out)if document.has_comment:write_text_value('', document.comment, out)for doc_ref in document.ext_document_references:doc_ref_str = ''.join([doc_ref.external_document_id,doc_ref.spdx_document_uri,doc_ref.check_sum.identifier + '' +doc_ref.check_sum.value])write_value('', doc_ref_str, out)write_separators(out)write_creation_info(document.creation_info, out)write_separators(out)for review in sorted(document.reviews):write_review(review, out)write_separators(out)for annotation in sorted(document.annotations):write_annotation(annotation, out)write_separators(out)write_package(document.package, out)write_separators(out)out.write('')for lic in sorted(document.extracted_licenses):write_extracted_licenses(lic, out)write_separators(out)", "docstring": "Write an SPDX tag value document.\n- document - spdx.document instance.\n- out - file like object that will be written to.\nOptionally `validate` the document before writing and raise\nInvalidDocumentError if document.validate returns False.", "id": "f3743:m11"} {"signature": "def add_artifact(self, symbol, value):", "body": "symbol = ''.format(symbol)artifact = getattr(self, symbol)artifact.append(value)", "docstring": "Add value as artifact_of_project{symbol}.", "id": "f3745:c1:m6"} {"signature": "def validate(self, messages):", "body": "messages = self.validate_concluded_license(messages)messages = self.validate_type(messages)messages = self.validate_checksum(messages)messages = self.validate_licenses_in_file(messages)messages = self.validate_copyright(messages)messages = self.validate_artifacts(messages)messages = self.validate_spdx_id(messages)return messages", "docstring": "Validates the fields and appends user friendly messages\n to messages parameter if there are errors.", "id": "f3745:c1:m7"} {"signature": "def validate(self, messages):", "body": "messages = self.validate_checksum(messages)messages = self.validate_optional_str_fields(messages)messages = self.validate_mandatory_str_fields(messages)messages = self.validate_files(messages)messages = self.validate_mandatory_fields(messages)messages = self.validate_optional_fields(messages)return messages", "docstring": "Validate the package fields.\nAppend user friendly error messages to the `messages` list.", "id": "f3746:c0:m4"} {"signature": "def validate_optional_str_fields(self, messages):", "body": "FIELDS = ['','','','','','']messages = self.validate_str_fields(FIELDS, True, messages)return messages", "docstring": "Fields marked as optional and of type string in class\n docstring must be of a type that provides __str__ method.", "id": "f3746:c0:m8"} {"signature": "def validate_mandatory_str_fields(self, messages):", "body": "FIELDS = ['', '', '', '']messages = self.validate_str_fields(FIELDS, False, messages)return messages", "docstring": "Fields marked as Mandatory and of type string in class\n docstring must be of a type that provides __str__ method.", "id": "f3746:c0:m9"} {"signature": "def validate_str_fields(self, fields, optional, messages):", "body": "for field_str in fields:field = getattr(self, field_str)if field is not None:attr = getattr(field, '', None)if not callable(attr):messages = messages + [''.format(field)]elif not optional:messages = messages + [''.format(field_str)]return messages", "docstring": "Helper for validate_mandatory_str_field and\n validate_optional_str_fields", "id": "f3746:c0:m10"} {"signature": "def set_doc_version(self, doc, value):", "body": "if not self.doc_version_set:self.doc_version_set = Truem = self.VERS_STR_REGEX.match(value)if m is None:raise SPDXValueError('')else:doc.version = version.Version(major=int(m.group()),minor=int(m.group()))return Trueelse:raise CardinalityError('')", "docstring": "Set the document version.\nRaise exceptions:\n- SPDXValueError if malformed value,\n- CardinalityError if already defined,", "id": "f3750:c0:m1"} {"signature": "def set_doc_data_lic(self, doc, res):", "body": "if not self.doc_data_lics_set:self.doc_data_lics_set = Trueres_parts = res.split('')if len(res_parts) != :identifier = res_parts[-]doc.data_license = document.License.from_identifier(identifier)else:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Set the document data license.\nRaise exceptions:\n- SPDXValueError if malformed value,\n- CardinalityError if already defined.", "id": "f3750:c0:m2"} {"signature": "def set_doc_name(self, doc, name):", "body": "if not self.doc_name_set:doc.name = nameself.doc_name_set = Truereturn Trueelse:raise CardinalityError('')", "docstring": "Sets the document name, raises CardinalityError if already defined.", "id": "f3750:c0:m3"} {"signature": "def set_doc_spdx_id(self, doc, doc_spdx_id_line):", "body": "if not self.doc_spdx_id_set:if validations.validate_doc_spdx_id(doc_spdx_id_line):doc.spdx_id = doc_spdx_id_lineself.doc_spdx_id_set = Truereturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the document SPDX Identifier.\n Raises value error if malformed value, CardinalityError\n if already defined.", "id": "f3750:c0:m4"} {"signature": "def set_doc_comment(self, doc, comment):", "body": "if not self.doc_comment_set:self.doc_comment_set = Truedoc.comment = commentelse:raise CardinalityError('')", "docstring": "Sets document comment, Raises CardinalityError if\n comment already set.", "id": "f3750:c0:m5"} {"signature": "def set_doc_namespace(self, doc, namespace):", "body": "if not self.doc_namespace_set:self.doc_namespace_set = Trueif validations.validate_doc_namespace(namespace):doc.namespace = namespacereturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the document namespace.\n Raise SPDXValueError if malformed value, CardinalityError\n if already defined.", "id": "f3750:c0:m6"} {"signature": "def reset_document(self):", "body": "self.doc_version_set = Falseself.doc_comment_set = Falseself.doc_namespace_set = Falseself.doc_data_lics_set = Falseself.doc_name_set = Falseself.doc_spdx_id_set = False", "docstring": "Reset the internal state to allow building new document", "id": "f3750:c0:m7"} {"signature": "def set_chksum(self, doc, chk_sum):", "body": "if chk_sum:doc.ext_document_references[-].check_sum = checksum.Algorithm('', chk_sum)else:raise SPDXValueError('')", "docstring": "Sets the external document reference's check sum, if not already set.\nchk_sum - The checksum value in the form of a string.", "id": "f3750:c1:m0"} {"signature": "def set_creation_comment(self, doc, comment):", "body": "if not self.creation_comment_set:self.creation_comment_set = Truedoc.creation_info.comment = commentreturn Trueelse:raise CardinalityError('')", "docstring": "Sets creation comment, Raises CardinalityError if\n comment already set.\n Raises SPDXValueError if not free form text.", "id": "f3750:c3:m1"} {"signature": "def set_pkg_chk_sum(self, doc, chk_sum):", "body": "self.assert_package_exists()if not self.package_chk_sum_set:self.package_chk_sum_set = Truedoc.package.check_sum = checksum.Algorithm('', chk_sum)else:raise CardinalityError('')", "docstring": "Sets the package check sum, if not already set.\n chk_sum - A string\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.", "id": "f3750:c4:m1"} {"signature": "def set_pkg_source_info(self, doc, text):", "body": "self.assert_package_exists()if not self.package_source_info_set:self.package_source_info_set = Truedoc.package.source_info = textreturn Trueelse:raise CardinalityError('')", "docstring": "Sets the package's source information, if not already set.\n text - Free form text.\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.", "id": "f3750:c4:m2"} {"signature": "def set_pkg_verif_code(self, doc, code):", "body": "self.assert_package_exists()if not self.package_verif_set:self.package_verif_set = Truedoc.package.verif_code = codeelse:raise CardinalityError('')", "docstring": "Sets the package verification code, if not already set.\n code - A string.\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.", "id": "f3750:c4:m3"} {"signature": "def set_pkg_excl_file(self, doc, filename):", "body": "self.assert_package_exists()doc.package.add_exc_file(filename)", "docstring": "Sets the package's verification code excluded file.\n Raises OrderError if no package previously defined.", "id": "f3750:c4:m4"} {"signature": "def set_pkg_license_comment(self, doc, text):", "body": "self.assert_package_exists()if not self.package_license_comment_set:self.package_license_comment_set = Truedoc.package.license_comment = textreturn Trueelse:raise CardinalityError('')", "docstring": "Sets the package's license comment.\n Raises OrderError if no package previously defined.\n Raises CardinalityError if already set.", "id": "f3750:c4:m5"} {"signature": "def set_pkg_cr_text(self, doc, text):", "body": "self.assert_package_exists()if not self.package_cr_text_set:self.package_cr_text_set = Truedoc.package.cr_text = textelse:raise CardinalityError('')", "docstring": "Sets the package's license comment.\n Raises OrderError if no package previously defined.\n Raises CardinalityError if already set.", "id": "f3750:c4:m6"} {"signature": "def set_pkg_summary(self, doc, text):", "body": "self.assert_package_exists()if not self.package_summary_set:self.package_summary_set = Truedoc.package.summary = textelse:raise CardinalityError('')", "docstring": "Set's the package summary.\n Raises CardinalityError if summary already set.\n Raises OrderError if no package previously defined.", "id": "f3750:c4:m7"} {"signature": "def set_pkg_desc(self, doc, text):", "body": "self.assert_package_exists()if not self.package_desc_set:self.package_desc_set = Truedoc.package.description = textelse:raise CardinalityError('')", "docstring": "Set's the package's description.\n Raises CardinalityError if description already set.\n Raises OrderError if no package previously defined.", "id": "f3750:c4:m8"} {"signature": "def set_file_chksum(self, doc, chk_sum):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_chksum_set:self.file_chksum_set = Trueself.file(doc).chk_sum = checksum.Algorithm('', chk_sum)return Trueelse:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the file check sum, if not already set.\n chk_sum - A string\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.", "id": "f3750:c5:m1"} {"signature": "def set_file_license_comment(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_license_comment_set:self.file_license_comment_set = Trueself.file(doc).license_comment = textreturn Trueelse:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.\nRaises CardinalityError if more than one per file.", "id": "f3750:c5:m2"} {"signature": "def set_file_copyright(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_copytext_set:self.file_copytext_set = Trueself.file(doc).copyright = textreturn Trueelse:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.\n Raises CardinalityError if more than one.", "id": "f3750:c5:m3"} {"signature": "def set_file_comment(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_comment_set:self.file_comment_set = Trueself.file(doc).comment = textreturn Trueelse:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or no file defined.\n Raises CardinalityError if more than one comment set.", "id": "f3750:c5:m4"} {"signature": "def set_file_notice(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_notice_set:self.file_notice_set = Trueself.file(doc).notice = tagvaluebuilders.str_from_text(text)return Trueelse:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.\n Raises CardinalityError if more than one.", "id": "f3750:c5:m5"} {"signature": "def add_review_comment(self, doc, comment):", "body": "if len(doc.reviews) != :if not self.review_comment_set:self.review_comment_set = Truedoc.reviews[-].comment = commentreturn Trueelse:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the review comment. Raises CardinalityError if\n already set. OrderError if no reviewer defined before.", "id": "f3750:c6:m1"} {"signature": "def add_annotation_comment(self, doc, comment):", "body": "if len(doc.annotations) != :if not self.annotation_comment_set:self.annotation_comment_set = Truedoc.annotations[-].comment = commentreturn Trueelse:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the annotation comment. Raises CardinalityError if\n already set. OrderError if no annotator defined before.", "id": "f3750:c7:m1"} {"signature": "def add_annotation_type(self, doc, annotation_type):", "body": "if len(doc.annotations) != :if not self.annotation_type_set:if annotation_type.endswith(''):self.annotation_type_set = Truedoc.annotations[-].annotation_type = ''return Trueelif annotation_type.endswith(''):self.annotation_type_set = Truedoc.annotations[-].annotation_type = ''return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the annotation type. Raises CardinalityError if\n already set. OrderError if no annotator defined before.", "id": "f3750:c7:m2"} {"signature": "def reset(self):", "body": "self.reset_creation_info()self.reset_document()self.reset_package()self.reset_file_stat()self.reset_reviews()self.reset_annotations()", "docstring": "Resets builder's state for building new documents.\n Must be called between usage with different documents.", "id": "f3750:c8:m1"} {"signature": "def more_than_one_error(self, field):", "body": "msg = ''.format(field)self.logger.log(msg)self.error = True", "docstring": "Logs a more than one error.\n field is the field/property that has more than one defined.", "id": "f3751:c0:m1"} {"signature": "def value_error(self, key, bad_value):", "body": "msg = ERROR_MESSAGES[key].format(bad_value)self.logger.log(msg)self.error = True", "docstring": "Reports a value error using ERROR_MESSAGES dict.\n key - key to use for ERROR_MESSAGES.\n bad_value - is passed to format which is called on what key maps to\n in ERROR_MESSAGES.", "id": "f3751:c0:m2"} {"signature": "def to_special_value(self, value):", "body": "if value == self.spdx_namespace.none:return utils.SPDXNone()elif value == self.spdx_namespace.noassertion:return utils.NoAssert()elif value == self.spdx_namespace.unknown:return utils.UnKnown()else:return value", "docstring": "Checks if value is a special SPDX value such as\n NONE, NOASSERTION or UNKNOWN if so returns proper model.\n else returns value", "id": "f3751:c0:m3"} {"signature": "def handle_lics(self, lics):", "body": "if (lics, RDF.type, self.spdx_namespace['']) in self.graph:return self.parse_only_extr_license(lics)ident_start = lics.rfind('') + if ident_start == :special = self.to_special_value(lics)if special == lics:if self.LICS_REF_REGEX.match(lics):return document.License.from_identifier(lics)else:raise SPDXValueError('')else:return specialelse:return document.License.from_identifier(lics[ident_start:])", "docstring": "Return a License from a `lics` license resource.", "id": "f3751:c1:m1"} {"signature": "def get_extr_license_ident(self, extr_lic):", "body": "identifier_tripples = list(self.graph.triples((extr_lic, self.spdx_namespace[''], None)))if not identifier_tripples:self.error = Truemsg = ''self.logger.log(msg)returnif len(identifier_tripples) > :self.more_than_one_error('')returnidentifier_tripple = identifier_tripples[]_s, _p, identifier = identifier_tripplereturn identifier", "docstring": "Return an a license identifier from an ExtractedLicense or None.", "id": "f3751:c1:m2"} {"signature": "def get_extr_license_text(self, extr_lic):", "body": "text_tripples = list(self.graph.triples((extr_lic, self.spdx_namespace[''], None)))if not text_tripples:self.error = Truemsg = ''self.logger.log(msg)returnif len(text_tripples) > :self.more_than_one_error('')returntext_tripple = text_tripples[]_s, _p, text = text_tripplereturn text", "docstring": "Return extracted text from an ExtractedLicense or None.", "id": "f3751:c1:m3"} {"signature": "def get_extr_lic_name(self, extr_lic):", "body": "extr_name_list = list(self.graph.triples((extr_lic, self.spdx_namespace[''], None)))if len(extr_name_list) > :self.more_than_one_error('')returnelif len(extr_name_list) == :returnreturn self.to_special_value(extr_name_list[][])", "docstring": "Return the license name from an ExtractedLicense or None", "id": "f3751:c1:m4"} {"signature": "def get_extr_lics_xref(self, extr_lic):", "body": "xrefs = list(self.graph.triples((extr_lic, RDFS.seeAlso, None)))return map(lambda xref_triple: xref_triple[], xrefs)", "docstring": "Return a list of cross references.", "id": "f3751:c1:m5"} {"signature": "def get_extr_lics_comment(self, extr_lics):", "body": "comment_list = list(self.graph.triples((extr_lics, RDFS.comment, None)))if len(comment_list) > :self.more_than_one_error('')returnelif len(comment_list) == :return comment_list[][]else:return", "docstring": "Return license comment or None.", "id": "f3751:c1:m6"} {"signature": "def parse_only_extr_license(self, extr_lic):", "body": "ident = self.get_extr_license_ident(extr_lic)text = self.get_extr_license_text(extr_lic)comment = self.get_extr_lics_comment(extr_lic)xrefs = self.get_extr_lics_xref(extr_lic)name = self.get_extr_lic_name(extr_lic)if not ident:returnlic = document.ExtractedLicense(ident)if text is not None:lic.text = textif name is not None:lic.full_name = nameif comment is not None:lic.comment = commentlic.cross_ref = map(lambda x: six.text_type(x), xrefs)return lic", "docstring": "Return an ExtractedLicense object to represent a license object.\nBut does not add it to the SPDXDocument model.\nReturn None if failed.", "id": "f3751:c1:m7"} {"signature": "def handle_extracted_license(self, extr_lic):", "body": "lic = self.parse_only_extr_license(extr_lic)if lic is not None:self.doc.add_extr_lic(lic)return lic", "docstring": "Build and return an ExtractedLicense or None.\nNote that this function adds the license to the document.", "id": "f3751:c1:m8"} {"signature": "def _handle_license_list(self, lics_set, cls=None):", "body": "licenses = []for _, _, lics_member in self.graph.triples((lics_set, self.spdx_namespace[''], None)):try:if (lics_member, RDF.type, self.spdx_namespace['']) in self.graph:lics = self.handle_extracted_license(lics_member)if lics is not None:licenses.append(lics)else:licenses.append(self.handle_lics(lics_member))except CardinalityError:self.value_error('', lics_member)breakif len(licenses) > :return reduce(lambda a, b: cls(a, b), licenses)else:self.value_error('', '')return", "docstring": "Return a license representing a `cls` object (LicenseConjunction\nor LicenseDisjunction) from a list of license resources or None.", "id": "f3751:c1:m9"} {"signature": "def handle_conjunctive_list(self, lics_set):", "body": "return self._handle_license_list(lics_set, cls=document.LicenseConjunction)", "docstring": "Return a license representing the conjunction from a list of\nlicense resources or None.", "id": "f3751:c1:m10"} {"signature": "def handle_disjunctive_list(self, lics_set):", "body": "return self._handle_license_list(lics_set, cls=document.LicenseDisjunction)", "docstring": "Return a license representing the disjunction from a list of\nlicense resources or None.", "id": "f3751:c1:m11"} {"signature": "def parse_package(self, p_term):", "body": "if not (p_term, self.spdx_namespace[''], None) in self.graph:self.error = Trueself.logger.log('')self.builder.create_package(self.doc, '')else:for _s, _p, o in self.graph.triples((p_term, self.spdx_namespace[''], None)):try:self.builder.create_package(self.doc, six.text_type(o))except CardinalityError:self.more_than_one_error('')breakself.p_pkg_vinfo(p_term, self.spdx_namespace[''])self.p_pkg_fname(p_term, self.spdx_namespace[''])self.p_pkg_suppl(p_term, self.spdx_namespace[''])self.p_pkg_originator(p_term, self.spdx_namespace[''])self.p_pkg_down_loc(p_term, self.spdx_namespace[''])self.p_pkg_homepg(p_term, self.doap_namespace[''])self.p_pkg_chk_sum(p_term, self.spdx_namespace[''])self.p_pkg_src_info(p_term, self.spdx_namespace[''])self.p_pkg_verif_code(p_term, self.spdx_namespace[''])self.p_pkg_lic_conc(p_term, self.spdx_namespace[''])self.p_pkg_lic_decl(p_term, self.spdx_namespace[''])self.p_pkg_lics_info_from_files(p_term, self.spdx_namespace[''])self.p_pkg_comments_on_lics(p_term, self.spdx_namespace[''])self.p_pkg_cr_text(p_term, self.spdx_namespace[''])self.p_pkg_summary(p_term, self.spdx_namespace[''])self.p_pkg_descr(p_term, self.spdx_namespace[''])", "docstring": "Parses package fields.", "id": "f3751:c2:m1"} {"signature": "def handle_pkg_lic(self, p_term, predicate, builder_func):", "body": "try:for _, _, licenses in self.graph.triples((p_term, predicate, None)):if (licenses, RDF.type, self.spdx_namespace['']) in self.graph:lics = self.handle_conjunctive_list(licenses)builder_func(self.doc, lics)elif (licenses, RDF.type, self.spdx_namespace['']) in self.graph:lics = self.handle_disjunctive_list(licenses)builder_func(self.doc, lics)else:try:lics = self.handle_lics(licenses)builder_func(self.doc, lics)except SPDXValueError:self.value_error('', licenses)except CardinalityError:self.more_than_one_error(''.format(predicate))", "docstring": "Handles package lics concluded or declared.", "id": "f3751:c2:m8"} {"signature": "def get_file_name(self, f_term):", "body": "for _, _, name in self.graph.triples((f_term, self.spdx_namespace[''], None)):return namereturn", "docstring": "Returns first found fileName property or None if not found.", "id": "f3751:c3:m2"} {"signature": "def p_file_depends(self, f_term, predicate):", "body": "for _, _, other_file in self.graph.triples((f_term, predicate, None)):name = self.get_file_name(other_file)if name is not None:self.builder.add_file_dep(six.text_type(name))else:self.error = Truemsg = ''self.logger.log(msg)", "docstring": "Sets file dependencies.", "id": "f3751:c3:m3"} {"signature": "def p_file_contributor(self, f_term, predicate):", "body": "for _, _, contributor in self.graph.triples((f_term, predicate, None)):self.builder.add_file_contribution(self.doc, six.text_type(contributor))", "docstring": "Parse all file contributors and adds them to the model.", "id": "f3751:c3:m4"} {"signature": "def p_file_notice(self, f_term, predicate):", "body": "try:for _, _, notice in self.graph.triples((f_term, predicate, None)):self.builder.set_file_notice(self.doc, six.text_type(notice))except CardinalityError:self.more_than_one_error('')", "docstring": "Sets file notice text.", "id": "f3751:c3:m5"} {"signature": "def p_file_comment(self, f_term, predicate):", "body": "try:for _, _, comment in self.graph.triples((f_term, predicate, None)):self.builder.set_file_comment(self.doc, six.text_type(comment))except CardinalityError:self.more_than_one_error('')", "docstring": "Sets file comment text.", "id": "f3751:c3:m6"} {"signature": "def p_file_artifact(self, f_term, predicate):", "body": "for _, _, project in self.graph.triples((f_term, predicate, None)):if (project, RDF.type, self.doap_namespace['']):self.p_file_project(project)else:self.error = Truemsg = ''self.logger.log(msg)", "docstring": "Handles file artifactOf.\n Note: does not handle artifact of project URI.", "id": "f3751:c3:m7"} {"signature": "def p_file_project(self, project):", "body": "for _, _, name in self.graph.triples((project, self.doap_namespace[''], None)):self.builder.set_file_atrificat_of_project(self.doc, '', six.text_type(name))for _, _, homepage in self.graph.triples((project, self.doap_namespace[''], None)):self.builder.set_file_atrificat_of_project(self.doc, '', six.text_type(homepage))", "docstring": "Helper function for parsing doap:project name and homepage.\n and setting them using the file builder.", "id": "f3751:c3:m8"} {"signature": "def p_file_cr_text(self, f_term, predicate):", "body": "try:for _, _, cr_text in self.graph.triples((f_term, predicate, None)):self.builder.set_file_copyright(self.doc, six.text_type(cr_text))except CardinalityError:self.more_than_one_error('')", "docstring": "Sets file copyright text.", "id": "f3751:c3:m9"} {"signature": "def p_file_comments_on_lics(self, f_term, predicate):", "body": "try:for _, _, comment in self.graph.triples((f_term, predicate, None)):self.builder.set_file_license_comment(self.doc, six.text_type(comment))except CardinalityError:self.more_than_one_error('')", "docstring": "Sets file license comment.", "id": "f3751:c3:m10"} {"signature": "def p_file_lic_info(self, f_term, predicate):", "body": "for _, _, info in self.graph.triples((f_term, predicate, None)):lic = self.handle_lics(info)if lic is not None:self.builder.set_file_license_in_file(self.doc, lic)", "docstring": "Sets file license information.", "id": "f3751:c3:m11"} {"signature": "def p_file_type(self, f_term, predicate):", "body": "try:for _, _, ftype in self.graph.triples((f_term, predicate, None)):try:if ftype.endswith(''):ftype = ''elif ftype.endswith(''):ftype = ''elif ftype.endswith(''):ftype = ''elif ftype.endswith(''):ftype = ''self.builder.set_file_type(self.doc, ftype)except SPDXValueError:self.value_error('', ftype)except CardinalityError:self.more_than_one_error('')", "docstring": "Sets file type.", "id": "f3751:c3:m13"} {"signature": "def p_file_chk_sum(self, f_term, predicate):", "body": "try:for _s, _p, checksum in self.graph.triples((f_term, predicate, None)):for _, _, value in self.graph.triples((checksum, self.spdx_namespace[''], None)):self.builder.set_file_chksum(self.doc, six.text_type(value))except CardinalityError:self.more_than_one_error('')", "docstring": "Sets file checksum. Assumes SHA1 algorithm without checking.", "id": "f3751:c3:m14"} {"signature": "def p_file_lic_conc(self, f_term, predicate):", "body": "try:for _, _, licenses in self.graph.triples((f_term, predicate, None)):if (licenses, RDF.type, self.spdx_namespace['']) in self.graph:lics = self.handle_conjunctive_list(licenses)self.builder.set_concluded_license(self.doc, lics)elif (licenses, RDF.type, self.spdx_namespace['']) in self.graph:lics = self.handle_disjunctive_list(licenses)self.builder.set_concluded_license(self.doc, lics)else:try:lics = self.handle_lics(licenses)self.builder.set_concluded_license(self.doc, lics)except SPDXValueError:self.value_error('', licenses)except CardinalityError:self.more_than_one_error(''.format(predicate))", "docstring": "Sets file licenses concluded.", "id": "f3751:c3:m15"} {"signature": "def get_review_comment(self, r_term):", "body": "comment_list = list(self.graph.triples((r_term, RDFS.comment, None)))if len(comment_list) > :self.error = Truemsg = ''self.logger.log(msg)returnelse:return six.text_type(comment_list[][])", "docstring": "Returns review comment or None if found none or more than one.\n Reports errors.", "id": "f3751:c4:m2"} {"signature": "def get_review_date(self, r_term):", "body": "reviewed_list = list(self.graph.triples((r_term, self.spdx_namespace[''], None)))if len(reviewed_list) != :self.error = Truemsg = ''self.logger.log(msg)returnreturn six.text_type(reviewed_list[][])", "docstring": "Returns review date or None if not found.\n Reports error on failure.\n Note does not check value format.", "id": "f3751:c4:m3"} {"signature": "def get_reviewer(self, r_term):", "body": "reviewer_list = list(self.graph.triples((r_term, self.spdx_namespace[''], None)))if len(reviewer_list) != :self.error = Truemsg = ''self.logger.log(msg)returntry:return self.builder.create_entity(self.doc, six.text_type(reviewer_list[][]))except SPDXValueError:self.value_error('', reviewer_list[][])", "docstring": "Returns reviewer as creator object or None if failed.\n Reports errors on failure.", "id": "f3751:c4:m4"} {"signature": "def get_annotation_type(self, r_term):", "body": "for _, _, typ in self.graph.triples((r_term, self.spdx_namespace[''], None)):if typ is not None:return typelse:self.error = Truemsg = ''self.logger.log(msg)return", "docstring": "Returns annotation type or None if found none or more than one.\n Reports errors on failure.", "id": "f3751:c5:m2"} {"signature": "def get_annotation_comment(self, r_term):", "body": "comment_list = list(self.graph.triples((r_term, RDFS.comment, None)))if len(comment_list) > :self.error = Truemsg = ''self.logger.log(msg)returnelse:return six.text_type(comment_list[][])", "docstring": "Returns annotation comment or None if found none or more than one.\n Reports errors.", "id": "f3751:c5:m3"} {"signature": "def get_annotation_date(self, r_term):", "body": "annotation_date_list = list(self.graph.triples((r_term, self.spdx_namespace[''], None)))if len(annotation_date_list) != :self.error = Truemsg = ''self.logger.log(msg)returnreturn six.text_type(annotation_date_list[][])", "docstring": "Returns annotation date or None if not found.\n Reports error on failure.\n Note does not check value format.", "id": "f3751:c5:m4"} {"signature": "def get_annotator(self, r_term):", "body": "annotator_list = list(self.graph.triples((r_term, self.spdx_namespace[''], None)))if len(annotator_list) != :self.error = Truemsg = ''self.logger.log(msg)returntry:return self.builder.create_entity(self.doc, six.text_type(annotator_list[][]))except SPDXValueError:self.value_error('', annotator_list[][])", "docstring": "Returns annotator as creator object or None if failed.\n Reports errors on failure.", "id": "f3751:c5:m5"} {"signature": "def parse(self, fil):", "body": "self.error = Falseself.graph = Graph()self.graph.parse(file=fil, format='')self.doc = document.Document()for s, _p, o in self.graph.triples((None, RDF.type, self.spdx_namespace[''])):self.parse_doc_fields(s)for s, _p, o in self.graph.triples((None, RDF.type, self.spdx_namespace[''])):self.parse_ext_doc_ref(s)for s, _p, o in self.graph.triples((None, RDF.type, self.spdx_namespace[''])):self.parse_creation_info(s)for s, _p, o in self.graph.triples((None, RDF.type, self.spdx_namespace[''])):self.parse_package(s)for s, _p, o in self.graph.triples((None, self.spdx_namespace[''], None)):self.parse_file(o)for s, _p, o in self.graph.triples((None, self.spdx_namespace[''], None)):self.parse_review(o)for s, _p, o in self.graph.triples((None, self.spdx_namespace[''], None)):self.parse_annotation(o)validation_messages = []validation_messages = self.doc.validate(validation_messages)if not self.error:if validation_messages:for msg in validation_messages:self.logger.log(msg)self.error = Truereturn self.doc, self.error", "docstring": "Parses a file and returns a document object.\n File, a file like object.", "id": "f3751:c6:m1"} {"signature": "def parse_creation_info(self, ci_term):", "body": "for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace[''], None)):try:ent = self.builder.create_entity(self.doc, six.text_type(o))self.builder.add_creator(self.doc, ent)except SPDXValueError:self.value_error('', o)for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace[''], None)):try:self.builder.set_created_date(self.doc, six.text_type(o))except SPDXValueError:self.value_error('', o)except CardinalityError:self.more_than_one_error('')breakfor _s, _p, o in self.graph.triples((ci_term, RDFS.comment, None)):try:self.builder.set_creation_comment(self.doc, six.text_type(o))except CardinalityError:self.more_than_one_error('')breakfor _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace[''], None)):try:self.builder.set_lics_list_ver(self.doc, six.text_type(o))except CardinalityError:self.more_than_one_error('')breakexcept SPDXValueError:self.value_error('', o)", "docstring": "Parse creators, created and comment.", "id": "f3751:c6:m2"} {"signature": "def parse_doc_fields(self, doc_term):", "body": "try:self.builder.set_doc_spdx_id(self.doc, doc_term)except SPDXValueError:self.value_error('', doc_term)try:if doc_term.count('', , len(doc_term)) <= :doc_namespace = doc_term.split('')[]self.builder.set_doc_namespace(self.doc, doc_namespace)else:self.value_error('', doc_term)except SPDXValueError:self.value_error('', doc_term)for _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace[''], None)):try:self.builder.set_doc_version(self.doc, six.text_type(o))except SPDXValueError:self.value_error('', o)except CardinalityError:self.more_than_one_error('')breakfor _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace[''], None)):try:self.builder.set_doc_data_lic(self.doc, six.text_type(o))except SPDXValueError:self.value_error('', o)except CardinalityError:self.more_than_one_error('')breakfor _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace[''], None)):try:self.builder.set_doc_name(self.doc, six.text_type(o))except CardinalityError:self.more_than_one_error('')breakfor _s, _p, o in self.graph.triples((doc_term, RDFS.comment, None)):try:self.builder.set_doc_comment(self.doc, six.text_type(o))except CardinalityError:self.more_than_one_error('')break", "docstring": "Parses the version, data license, name, SPDX Identifier, namespace,\n and comment.", "id": "f3751:c6:m3"} {"signature": "def parse_ext_doc_ref(self, ext_doc_ref_term):", "body": "for _s, _p, o in self.graph.triples((ext_doc_ref_term,self.spdx_namespace[''],None)):try:self.builder.set_ext_doc_id(self.doc, six.text_type(o))except SPDXValueError:self.value_error('', '')breakfor _s, _p, o in self.graph.triples((ext_doc_ref_term,self.spdx_namespace[''],None)):try:self.builder.set_spdx_doc_uri(self.doc, six.text_type(o))except SPDXValueError:self.value_error('', '')breakfor _s, _p, checksum in self.graph.triples((ext_doc_ref_term, self.spdx_namespace[''], None)):for _, _, value in self.graph.triples((checksum, self.spdx_namespace[''], None)):try:self.builder.set_chksum(self.doc, six.text_type(value))except SPDXValueError:self.value_error('', '')break", "docstring": "Parses the External Document ID, SPDX Document URI and Checksum.", "id": "f3751:c6:m4"} {"signature": "def p_start_1(self, p):", "body": "pass", "docstring": "start : start attrib", "id": "f3753:c0:m1"} {"signature": "def p_start_2(self, p):", "body": "pass", "docstring": "start : attrib", "id": "f3753:c0:m2"} {"signature": "def p_attrib(self, p):", "body": "pass", "docstring": "attrib : spdx_version\n | spdx_id\n | data_lics\n | doc_name\n | ext_doc_ref\n | doc_comment\n | doc_namespace\n | creator\n | created\n | creator_comment\n | locs_list_ver\n | reviewer\n | review_date\n | review_comment\n | annotator\n | annotation_date\n | annotation_comment\n | annotation_type\n | annotation_spdx_id\n | package_name\n | package_version\n | pkg_down_location\n | pkg_home\n | pkg_summary\n | pkg_src_info\n | pkg_file_name\n | pkg_supplier\n | pkg_orig\n | pkg_chksum\n | pkg_verif\n | pkg_desc\n | pkg_lic_decl\n | pkg_lic_conc\n | pkg_lic_ff\n | pkg_lic_comment\n | pkg_cr_text\n | file_name\n | file_type\n | file_chksum\n | file_conc\n | file_lics_info\n | file_cr_text\n | file_lics_comment\n | file_notice\n | file_comment\n | file_contrib\n | file_dep\n | file_artifact\n | extr_lic_id\n | extr_lic_text\n | extr_lic_name\n | lic_xref\n | lic_comment\n | unknown_tag", "id": "f3753:c0:m3"} {"signature": "def order_error(self, first_tag, second_tag, line):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(first_tag, second_tag, line)self.logger.log(msg)", "docstring": "Reports an OrderError. Error message will state that\n first_tag came before second_tag.", "id": "f3753:c0:m5"} {"signature": "def p_lic_xref_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.add_lic_xref(self.document, value)except OrderError:self.order_error('', '', p.lineno())", "docstring": "lic_xref : LICS_CRS_REF LINE", "id": "f3753:c0:m6"} {"signature": "def p_lic_xref_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "lic_xref : LICS_CRS_REF error", "id": "f3753:c0:m7"} {"signature": "def p_lic_comment_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_lic_comment(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "lic_comment : LICS_COMMENT TEXT", "id": "f3753:c0:m8"} {"signature": "def p_lic_comment_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "lic_comment : LICS_COMMENT error", "id": "f3753:c0:m9"} {"signature": "def p_extr_lic_name_1(self, p):", "body": "try:self.builder.set_lic_name(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "extr_lic_name : LICS_NAME extr_lic_name_value", "id": "f3753:c0:m10"} {"signature": "def p_extr_lic_name_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "extr_lic_name : LICS_NAME error", "id": "f3753:c0:m11"} {"signature": "def p_extr_lic_name_value_1(self, p):", "body": "if six.PY2:p[] = p[].decode(encoding='')else:p[] = p[]", "docstring": "extr_lic_name_value : LINE", "id": "f3753:c0:m12"} {"signature": "def p_extr_lic_name_value_2(self, p):", "body": "p[] = utils.NoAssert()", "docstring": "extr_lic_name_value : NO_ASSERT", "id": "f3753:c0:m13"} {"signature": "def p_extr_lic_text_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_lic_text(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "extr_lic_text : LICS_TEXT TEXT", "id": "f3753:c0:m14"} {"signature": "def p_extr_lic_text_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "extr_lic_text : LICS_TEXT error", "id": "f3753:c0:m15"} {"signature": "def p_extr_lic_id_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_lic_id(self.document, value)except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "extr_lic_id : LICS_ID LINE", "id": "f3753:c0:m16"} {"signature": "def p_extr_lic_id_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "extr_lic_id : LICS_ID error", "id": "f3753:c0:m17"} {"signature": "def p_uknown_tag(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p[], p.lineno())self.logger.log(msg)", "docstring": "unknown_tag : UNKNOWN_TAG LINE", "id": "f3753:c0:m18"} {"signature": "def p_file_artifact_1(self, p):", "body": "pass", "docstring": "file_artifact : prj_name_art file_art_rest\n | prj_name_art", "id": "f3753:c0:m19"} {"signature": "def p_file_artificat_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_artifact : prj_name_art error", "id": "f3753:c0:m20"} {"signature": "def p_file_art_rest(self, p):", "body": "pass", "docstring": "file_art_rest : prj_home_art prj_uri_art\n | prj_uri_art prj_home_art\n | prj_home_art\n | prj_uri_art", "id": "f3753:c0:m21"} {"signature": "def p_prj_uri_art_1(self, p):", "body": "try:self.builder.set_file_atrificat_of_project(self.document,'', utils.UnKnown())except OrderError:self.order_error('', '', p.lineno())", "docstring": "prj_uri_art : ART_PRJ_URI UN_KNOWN", "id": "f3753:c0:m22"} {"signature": "def p_prj_uri_art_2(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_file_atrificat_of_project(self.document, '', value)except OrderError:self.order_error('', '', p.lineno())", "docstring": "prj_uri_art : ART_PRJ_URI LINE", "id": "f3753:c0:m23"} {"signature": "def p_prj_uri_art_3(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "prj_uri_art : ART_PRJ_URI error", "id": "f3753:c0:m24"} {"signature": "def p_prj_home_art_1(self, p):", "body": "try:self.builder.set_file_atrificat_of_project(self.document, '', p[])except OrderError:self.order_error('', '', p.lineno())", "docstring": "prj_home_art : ART_PRJ_HOME LINE", "id": "f3753:c0:m25"} {"signature": "def p_prj_home_art_2(self, p):", "body": "try:self.builder.set_file_atrificat_of_project(self.document,'', utils.UnKnown())except OrderError:self.order_error('', '', p.lineno())", "docstring": "prj_home_art : ART_PRJ_HOME UN_KNOWN", "id": "f3753:c0:m26"} {"signature": "def p_prj_home_art_3(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "prj_home_art : ART_PRJ_HOME error", "id": "f3753:c0:m27"} {"signature": "def p_prj_name_art_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_file_atrificat_of_project(self.document, '', value)except OrderError:self.order_error('', '', p.lineno())", "docstring": "prj_name_art : ART_PRJ_NAME LINE", "id": "f3753:c0:m28"} {"signature": "def p_prj_name_art_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "prj_name_art : ART_PRJ_NAME error", "id": "f3753:c0:m29"} {"signature": "def p_file_dep_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.add_file_dep(self.document, value)except OrderError:self.order_error('', '', p.lineno())", "docstring": "file_dep : FILE_DEP LINE", "id": "f3753:c0:m30"} {"signature": "def p_file_dep_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_dep : FILE_DEP error", "id": "f3753:c0:m31"} {"signature": "def p_file_contrib_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.add_file_contribution(self.document, value)except OrderError:self.order_error('', '', p.lineno())", "docstring": "file_contrib : FILE_CONTRIB LINE", "id": "f3753:c0:m32"} {"signature": "def p_file_contrib_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_contrib : FILE_CONTRIB error", "id": "f3753:c0:m33"} {"signature": "def p_file_notice_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_file_notice(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "file_notice : FILE_NOTICE TEXT", "id": "f3753:c0:m34"} {"signature": "def p_file_notice_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_notice : FILE_NOTICE error", "id": "f3753:c0:m35"} {"signature": "def p_file_cr_text_1(self, p):", "body": "try:self.builder.set_file_copyright(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "file_cr_text : FILE_CR_TEXT file_cr_value", "id": "f3753:c0:m36"} {"signature": "def p_file_cr_text_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_cr_text : FILE_CR_TEXT error", "id": "f3753:c0:m37"} {"signature": "def p_file_cr_value_1(self, p):", "body": "if six.PY2:p[] = p[].decode(encoding='')else:p[] = p[]", "docstring": "file_cr_value : TEXT", "id": "f3753:c0:m38"} {"signature": "def p_file_cr_value_2(self, p):", "body": "p[] = utils.SPDXNone()", "docstring": "file_cr_value : NONE", "id": "f3753:c0:m39"} {"signature": "def p_file_cr_value_3(self, p):", "body": "p[] = utils.NoAssert()", "docstring": "file_cr_value : NO_ASSERT", "id": "f3753:c0:m40"} {"signature": "def p_file_lics_comment_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_file_license_comment(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "file_lics_comment : FILE_LICS_COMMENT TEXT", "id": "f3753:c0:m41"} {"signature": "def p_file_lics_comment_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_lics_comment : FILE_LICS_COMMENT error", "id": "f3753:c0:m42"} {"signature": "def p_file_lics_info_1(self, p):", "body": "try:self.builder.set_file_license_in_file(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_lics_info : FILE_LICS_INFO file_lic_info_value", "id": "f3753:c0:m43"} {"signature": "def p_file_lics_info_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_lics_info : FILE_LICS_INFO error", "id": "f3753:c0:m44"} {"signature": "def p_file_lic_info_value_1(self, p):", "body": "p[] = utils.SPDXNone()", "docstring": "file_lic_info_value : NONE", "id": "f3753:c0:m45"} {"signature": "def p_file_lic_info_value_2(self, p):", "body": "p[] = utils.NoAssert()", "docstring": "file_lic_info_value : NO_ASSERT", "id": "f3753:c0:m46"} {"signature": "def p_file_lic_info_value_3(self, p):", "body": "if six.PY2:value = p[].decode(encoding='')else:value = p[]p[] = document.License.from_identifier(value)", "docstring": "file_lic_info_value : LINE", "id": "f3753:c0:m47"} {"signature": "def p_conc_license_1(self, p):", "body": "p[] = utils.NoAssert()", "docstring": "conc_license : NO_ASSERT", "id": "f3753:c0:m48"} {"signature": "def p_conc_license_2(self, p):", "body": "p[] = utils.SPDXNone()", "docstring": "conc_license : NONE", "id": "f3753:c0:m49"} {"signature": "def p_conc_license_3(self, p):", "body": "if six.PY2:value = p[].decode(encoding='')else:value = p[]ref_re = re.compile('', re.UNICODE)if (p[] in config.LICENSE_MAP.keys()) or (ref_re.match(p[]) is not None):p[] = document.License.from_identifier(value)else:p[] = self.license_list_parser.parse(value)", "docstring": "conc_license : LINE", "id": "f3753:c0:m50"} {"signature": "def p_file_name_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_file_name(self.document, value)except OrderError:self.order_error('', '', p.lineno())", "docstring": "file_name : FILE_NAME LINE", "id": "f3753:c0:m51"} {"signature": "def p_file_name_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_name : FILE_NAME error", "id": "f3753:c0:m52"} {"signature": "def p_spdx_id(self, p):", "body": "if six.PY2:value = p[].decode(encoding='')else:value = p[]if not self.builder.doc_spdx_id_set:self.builder.set_doc_spdx_id(self.document, value)else:self.builder.set_file_spdx_id(self.document, value)", "docstring": "spdx_id : SPDX_ID LINE", "id": "f3753:c0:m53"} {"signature": "def p_file_comment_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_file_comment(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "file_comment : FILE_COMMENT TEXT", "id": "f3753:c0:m54"} {"signature": "def p_file_comment_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_comment : FILE_COMMENT error", "id": "f3753:c0:m55"} {"signature": "def p_file_type_1(self, p):", "body": "try:self.builder.set_file_type(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "file_type : FILE_TYPE file_type_value", "id": "f3753:c0:m56"} {"signature": "def p_file_type_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_type : FILE_TYPE error", "id": "f3753:c0:m57"} {"signature": "def p_file_chksum_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_file_chksum(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "file_chksum : FILE_CHKSUM CHKSUM", "id": "f3753:c0:m58"} {"signature": "def p_file_chksum_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_chksum : FILE_CHKSUM error", "id": "f3753:c0:m59"} {"signature": "def p_file_conc_1(self, p):", "body": "try:self.builder.set_concluded_license(self.document, p[])except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "file_conc : FILE_LICS_CONC conc_license", "id": "f3753:c0:m60"} {"signature": "def p_file_conc_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "file_conc : FILE_LICS_CONC error", "id": "f3753:c0:m61"} {"signature": "def p_file_type_value(self, p):", "body": "if six.PY2:p[] = p[].decode(encoding='')else:p[] = p[]", "docstring": "file_type_value : OTHER\n | SOURCE\n | ARCHIVE\n | BINARY", "id": "f3753:c0:m62"} {"signature": "def p_pkg_desc_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_pkg_desc(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())except OrderError:self.order_error('', '', p.lineno())", "docstring": "pkg_desc : PKG_DESC TEXT", "id": "f3753:c0:m63"} {"signature": "def p_pkg_desc_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_desc : PKG_DESC error", "id": "f3753:c0:m64"} {"signature": "def p_pkg_summary_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_pkg_summary(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "pkg_summary : PKG_SUM TEXT", "id": "f3753:c0:m65"} {"signature": "def p_pkg_summary_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_summary : PKG_SUM error", "id": "f3753:c0:m66"} {"signature": "def p_pkg_cr_text_1(self, p):", "body": "try:self.builder.set_pkg_cr_text(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "pkg_cr_text : PKG_CPY_TEXT pkg_cr_text_value", "id": "f3753:c0:m67"} {"signature": "def p_pkg_cr_text_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_cr_text : PKG_CPY_TEXT error", "id": "f3753:c0:m68"} {"signature": "def p_pkg_cr_text_value_1(self, p):", "body": "if six.PY2:p[] = p[].decode(encoding='')else:p[] = p[]", "docstring": "pkg_cr_text_value : TEXT", "id": "f3753:c0:m69"} {"signature": "def p_pkg_cr_text_value_2(self, p):", "body": "p[] = utils.SPDXNone()", "docstring": "pkg_cr_text_value : NONE", "id": "f3753:c0:m70"} {"signature": "def p_pkg_cr_text_value_3(self, p):", "body": "p[] = utils.NoAssert()", "docstring": "pkg_cr_text_value : NO_ASSERT", "id": "f3753:c0:m71"} {"signature": "def p_pkg_lic_comment_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_pkg_license_comment(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "pkg_lic_comment : PKG_LICS_COMMENT TEXT", "id": "f3753:c0:m72"} {"signature": "def p_pkg_lic_comment_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_lic_comment : PKG_LICS_COMMENT error", "id": "f3753:c0:m73"} {"signature": "def p_pkg_lic_decl_1(self, p):", "body": "try:self.builder.set_pkg_license_declared(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_lic_decl : PKG_LICS_DECL conc_license", "id": "f3753:c0:m74"} {"signature": "def p_pkg_lic_decl_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_lic_decl : PKG_LICS_DECL error", "id": "f3753:c0:m75"} {"signature": "def p_pkg_lic_ff_1(self, p):", "body": "try:self.builder.set_pkg_license_from_file(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_lic_ff : PKG_LICS_FFILE pkg_lic_ff_value", "id": "f3753:c0:m76"} {"signature": "def p_pkg_lic_ff_value_1(self, p):", "body": "p[] = utils.SPDXNone()", "docstring": "pkg_lic_ff_value : NONE", "id": "f3753:c0:m77"} {"signature": "def p_pkg_lic_ff_value_2(self, p):", "body": "p[] = utils.NoAssert()", "docstring": "pkg_lic_ff_value : NO_ASSERT", "id": "f3753:c0:m78"} {"signature": "def p_pkg_lic_ff_value_3(self, p):", "body": "if six.PY2:value = p[].decode(encoding='')else:value = p[]p[] = document.License.from_identifier(value)", "docstring": "pkg_lic_ff_value : LINE", "id": "f3753:c0:m79"} {"signature": "def p_pkg_lic_ff_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_lic_ff : PKG_LICS_FFILE error", "id": "f3753:c0:m80"} {"signature": "def p_pkg_lic_conc_1(self, p):", "body": "try:self.builder.set_pkg_licenses_concluded(self.document, p[])except CardinalityError:self.more_than_one_error('', p.lineno())except OrderError:self.order_error('', '', p.lineno())except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_lic_conc : PKG_LICS_CONC conc_license", "id": "f3753:c0:m81"} {"signature": "def p_pkg_lic_conc_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_lic_conc : PKG_LICS_CONC error", "id": "f3753:c0:m82"} {"signature": "def p_pkg_src_info_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_pkg_source_info(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())except OrderError:self.order_error('', '', p.lineno())", "docstring": "pkg_src_info : PKG_SRC_INFO TEXT", "id": "f3753:c0:m83"} {"signature": "def p_pkg_src_info_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_src_info : PKG_SRC_INFO error", "id": "f3753:c0:m84"} {"signature": "def p_pkg_chksum_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_pkg_chk_sum(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "pkg_chksum : PKG_CHKSUM CHKSUM", "id": "f3753:c0:m85"} {"signature": "def p_pkg_chksum_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_chksum : PKG_CHKSUM error", "id": "f3753:c0:m86"} {"signature": "def p_pkg_verif_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_pkg_verif_code(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_verif : PKG_VERF_CODE LINE", "id": "f3753:c0:m87"} {"signature": "def p_pkg_verif_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_verif : PKG_VERF_CODE error", "id": "f3753:c0:m88"} {"signature": "def p_pkg_home_1(self, p):", "body": "try:self.builder.set_pkg_down_location(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "pkg_home : PKG_HOME pkg_home_value", "id": "f3753:c0:m89"} {"signature": "def p_pkg_home_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES['']self.logger.log(msg)", "docstring": "pkg_home : PKG_HOME error", "id": "f3753:c0:m90"} {"signature": "def p_pkg_home_value_1(self, p):", "body": "if six.PY2:p[] = p[].decode(encoding='')else:p[] = p[]", "docstring": "pkg_home_value : LINE", "id": "f3753:c0:m91"} {"signature": "def p_pkg_home_value_2(self, p):", "body": "p[] = utils.SPDXNone()", "docstring": "pkg_home_value : NONE", "id": "f3753:c0:m92"} {"signature": "def p_pkg_home_value_3(self, p):", "body": "p[] = utils.NoAssert()", "docstring": "pkg_home_value : NO_ASSERT", "id": "f3753:c0:m93"} {"signature": "def p_pkg_down_location_1(self, p):", "body": "try:self.builder.set_pkg_down_location(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "pkg_down_location : PKG_DOWN pkg_down_value", "id": "f3753:c0:m94"} {"signature": "def p_pkg_down_location_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_down_location : PKG_DOWN error", "id": "f3753:c0:m95"} {"signature": "def p_pkg_down_value_1(self, p):", "body": "if six.PY2:p[] = p[].decode(encoding='')else:p[] = p[]", "docstring": "pkg_down_value : LINE", "id": "f3753:c0:m96"} {"signature": "def p_pkg_down_value_2(self, p):", "body": "p[] = utils.SPDXNone()", "docstring": "pkg_down_value : NONE", "id": "f3753:c0:m97"} {"signature": "def p_pkg_down_value_3(self, p):", "body": "p[] = utils.NoAssert()", "docstring": "pkg_down_value : NO_ASSERT", "id": "f3753:c0:m98"} {"signature": "def p_pkg_orig_1(self, p):", "body": "try:self.builder.set_pkg_originator(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "pkg_orig : PKG_ORIG pkg_supplier_values", "id": "f3753:c0:m99"} {"signature": "def p_pkg_orig_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_orig : PKG_ORIG error", "id": "f3753:c0:m100"} {"signature": "def p_pkg_supplier_1(self, p):", "body": "try:self.builder.set_pkg_supplier(self.document, p[])except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_supplier : PKG_SUPPL pkg_supplier_values", "id": "f3753:c0:m101"} {"signature": "def p_pkg_supplier_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_supplier : PKG_SUPPL error", "id": "f3753:c0:m102"} {"signature": "def p_pkg_supplier_values_1(self, p):", "body": "p[] = utils.NoAssert()", "docstring": "pkg_supplier_values : NO_ASSERT", "id": "f3753:c0:m103"} {"signature": "def p_pkg_supplier_values_2(self, p):", "body": "p[] = p[]", "docstring": "pkg_supplier_values : entity", "id": "f3753:c0:m104"} {"signature": "def p_pkg_file_name(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_pkg_file_name(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "pkg_file_name : PKG_FILE_NAME LINE", "id": "f3753:c0:m105"} {"signature": "def p_pkg_file_name_1(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "pkg_file_name : PKG_FILE_NAME error", "id": "f3753:c0:m106"} {"signature": "def p_package_version_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_pkg_vers(self.document, value)except OrderError:self.order_error('', '', p.lineno())except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "package_version : PKG_VERSION LINE", "id": "f3753:c0:m107"} {"signature": "def p_package_version_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "package_version : PKG_VERSION error", "id": "f3753:c0:m108"} {"signature": "def p_package_name(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.create_package(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "package_name : PKG_NAME LINE", "id": "f3753:c0:m109"} {"signature": "def p_package_name_1(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "package_name : PKG_NAME error", "id": "f3753:c0:m110"} {"signature": "def p_reviewer_1(self, p):", "body": "self.builder.add_reviewer(self.document, p[])", "docstring": "reviewer : REVIEWER entity", "id": "f3753:c0:m111"} {"signature": "def p_reviewer_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "reviewer : REVIEWER error", "id": "f3753:c0:m112"} {"signature": "def p_review_date_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.add_review_date(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())except OrderError:self.order_error('', '', p.lineno())", "docstring": "review_date : REVIEW_DATE DATE", "id": "f3753:c0:m113"} {"signature": "def p_review_date_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "review_date : REVIEW_DATE error", "id": "f3753:c0:m114"} {"signature": "def p_review_comment_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.add_review_comment(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())except OrderError:self.order_error('', '', p.lineno())", "docstring": "review_comment : REVIEW_COMMENT TEXT", "id": "f3753:c0:m115"} {"signature": "def p_review_comment_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "review_comment : REVIEW_COMMENT error", "id": "f3753:c0:m116"} {"signature": "def p_annotator_1(self, p):", "body": "self.builder.add_annotator(self.document, p[])", "docstring": "annotator : ANNOTATOR entity", "id": "f3753:c0:m117"} {"signature": "def p_annotator_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "annotator : ANNOTATOR error", "id": "f3753:c0:m118"} {"signature": "def p_annotation_date_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.add_annotation_date(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())except OrderError:self.order_error('', '', p.lineno())", "docstring": "annotation_date : ANNOTATION_DATE DATE", "id": "f3753:c0:m119"} {"signature": "def p_annotation_date_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "annotation_date : ANNOTATION_DATE error", "id": "f3753:c0:m120"} {"signature": "def p_annotation_comment_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.add_annotation_comment(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())except OrderError:self.order_error('', '', p.lineno())", "docstring": "annotation_comment : ANNOTATION_COMMENT TEXT", "id": "f3753:c0:m121"} {"signature": "def p_annotation_comment_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "annotation_comment : ANNOTATION_COMMENT error", "id": "f3753:c0:m122"} {"signature": "def p_annotation_type_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.add_annotation_type(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)except OrderError:self.order_error('', '', p.lineno())", "docstring": "annotation_type : ANNOTATION_TYPE LINE", "id": "f3753:c0:m123"} {"signature": "def p_annotation_type_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "annotation_type : ANNOTATION_TYPE error", "id": "f3753:c0:m124"} {"signature": "def p_annotation_spdx_id_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_annotation_spdx_id(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())except OrderError:self.order_error('', '', p.lineno())", "docstring": "annotation_spdx_id : ANNOTATION_SPDX_ID LINE", "id": "f3753:c0:m125"} {"signature": "def p_annotation_spdx_id_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "annotation_spdx_id : ANNOTATION_SPDX_ID error", "id": "f3753:c0:m126"} {"signature": "def p_lics_list_ver_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_lics_list_ver(self.document, value)except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p[], p.lineno())self.logger.log(msg)except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "locs_list_ver : LIC_LIST_VER LINE", "id": "f3753:c0:m127"} {"signature": "def p_lics_list_ver_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "locs_list_ver : LIC_LIST_VER error", "id": "f3753:c0:m128"} {"signature": "def p_doc_comment_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_doc_comment(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "doc_comment : DOC_COMMENT TEXT", "id": "f3753:c0:m129"} {"signature": "def p_doc_comment_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "doc_comment : DOC_COMMENT error", "id": "f3753:c0:m130"} {"signature": "def p_doc_namespace_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_doc_namespace(self.document, value)except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p[], p.lineno())self.logger.log(msg)except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "doc_namespace : DOC_NAMESPACE LINE", "id": "f3753:c0:m131"} {"signature": "def p_doc_namespace_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "doc_namespace : DOC_NAMESPACE error", "id": "f3753:c0:m132"} {"signature": "def p_data_license_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_doc_data_lics(self.document, value)except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p[], p.lineno())self.logger.log(msg)except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "data_lics : DOC_LICENSE LINE", "id": "f3753:c0:m133"} {"signature": "def p_data_license_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "data_lics : DOC_LICENSE error", "id": "f3753:c0:m134"} {"signature": "def p_doc_name_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_doc_name(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "doc_name : DOC_NAME LINE", "id": "f3753:c0:m135"} {"signature": "def p_doc_name_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "doc_name : DOC_NAME error", "id": "f3753:c0:m136"} {"signature": "def p_ext_doc_refs_1(self, p):", "body": "try:if six.PY2:doc_ref_id = p[].decode(encoding='')doc_uri = p[].decode(encoding='')ext_doc_chksum = p[].decode(encoding='')else:doc_ref_id = p[]doc_uri = p[]ext_doc_chksum = p[]self.builder.add_ext_doc_refs(self.document, doc_ref_id, doc_uri,ext_doc_chksum)except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "ext_doc_ref : EXT_DOC_REF DOC_REF_ID DOC_URI EXT_DOC_REF_CHKSUM", "id": "f3753:c0:m137"} {"signature": "def p_ext_doc_refs_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "ext_doc_ref : EXT_DOC_REF error", "id": "f3753:c0:m138"} {"signature": "def p_spdx_version_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_doc_version(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())except SPDXValueError:self.error = Truemsg = ERROR_MESSAGES[''].format(p[], p.lineno())self.logger.log(msg)", "docstring": "spdx_version : DOC_VERSION LINE", "id": "f3753:c0:m139"} {"signature": "def p_spdx_version_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "spdx_version : DOC_VERSION error", "id": "f3753:c0:m140"} {"signature": "def p_creator_comment_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_creation_comment(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "creator_comment : CREATOR_COMMENT TEXT", "id": "f3753:c0:m141"} {"signature": "def p_creator_comment_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "creator_comment : CREATOR_COMMENT error", "id": "f3753:c0:m142"} {"signature": "def p_creator_1(self, p):", "body": "self.builder.add_creator(self.document, p[])", "docstring": "creator : CREATOR entity", "id": "f3753:c0:m143"} {"signature": "def p_creator_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "creator : CREATOR error", "id": "f3753:c0:m144"} {"signature": "def p_created_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]self.builder.set_created_date(self.document, value)except CardinalityError:self.more_than_one_error('', p.lineno())", "docstring": "created : CREATED DATE", "id": "f3753:c0:m145"} {"signature": "def p_created_2(self, p):", "body": "self.error = Truemsg = ERROR_MESSAGES[''].format(p.lineno())self.logger.log(msg)", "docstring": "created : CREATED error", "id": "f3753:c0:m146"} {"signature": "def p_entity_1(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]p[] = self.builder.build_tool(self.document, value)except SPDXValueError:msg = ERROR_MESSAGES[''].format(p[], p.lineno())self.logger.log(msg)self.error = Truep[] = None", "docstring": "entity : TOOL_VALUE", "id": "f3753:c0:m147"} {"signature": "def p_entity_2(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]p[] = self.builder.build_org(self.document, value)except SPDXValueError:msg = ERROR_MESSAGES[''].format(p[], p.lineno())self.logger.log(msg)self.error = Truep[] = None", "docstring": "entity : ORG_VALUE", "id": "f3753:c0:m148"} {"signature": "def p_entity_3(self, p):", "body": "try:if six.PY2:value = p[].decode(encoding='')else:value = p[]p[] = self.builder.build_person(self.document, value)except SPDXValueError:msg = ERROR_MESSAGES[''].format(p[], p.lineno())self.logger.log(msg)self.error = Truep[] = None", "docstring": "entity : PERSON_VALUE", "id": "f3753:c0:m149"} {"signature": "def checksum_from_sha1(value):", "body": "CHECKSUM_RE = re.compile('', re.UNICODE)match = CHECKSUM_RE.match(value)if match:return checksum.Algorithm(identifier='', value=match.group())else:return None", "docstring": "Return an spdx.checksum.Algorithm instance representing the SHA1\nchecksum or None if does not match CHECKSUM_RE.", "id": "f3754:m0"} {"signature": "def str_from_text(text):", "body": "REGEX = re.compile('', re.UNICODE)match = REGEX.match(text)if match:return match.group()else:return None", "docstring": "Return content of a free form text block as a string.", "id": "f3754:m1"} {"signature": "def set_doc_version(self, doc, value):", "body": "if not self.doc_version_set:self.doc_version_set = Truem = self.VERS_STR_REGEX.match(value)if m is None:raise SPDXValueError('')else:doc.version = version.Version(major=int(m.group()),minor=int(m.group()))return Trueelse:raise CardinalityError('')", "docstring": "Set the document version.\nRaise SPDXValueError if malformed value, CardinalityError\nif already defined", "id": "f3754:c0:m1"} {"signature": "def set_doc_data_lics(self, doc, lics):", "body": "if not self.doc_data_lics_set:self.doc_data_lics_set = Trueif validations.validate_data_lics(lics):doc.data_license = document.License.from_identifier(lics)return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the document data license.\n Raises value error if malformed value, CardinalityError\n if already defined.", "id": "f3754:c0:m2"} {"signature": "def set_doc_name(self, doc, name):", "body": "if not self.doc_name_set:doc.name = nameself.doc_name_set = Truereturn Trueelse:raise CardinalityError('')", "docstring": "Sets the document name.\n Raises CardinalityError if already defined.", "id": "f3754:c0:m3"} {"signature": "def set_doc_spdx_id(self, doc, doc_spdx_id_line):", "body": "if not self.doc_spdx_id_set:if doc_spdx_id_line == '':doc.spdx_id = doc_spdx_id_lineself.doc_spdx_id_set = Truereturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the document SPDX Identifier.\n Raises value error if malformed value, CardinalityError\n if already defined.", "id": "f3754:c0:m4"} {"signature": "def set_doc_comment(self, doc, comment):", "body": "if not self.doc_comment_set:self.doc_comment_set = Trueif validations.validate_doc_comment(comment):doc.comment = str_from_text(comment)return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets document comment, Raises CardinalityError if\n comment already set.\n Raises SPDXValueError if comment is not free form text.", "id": "f3754:c0:m5"} {"signature": "def set_doc_namespace(self, doc, namespace):", "body": "if not self.doc_namespace_set:self.doc_namespace_set = Trueif validations.validate_doc_namespace(namespace):doc.namespace = namespacereturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the document namespace.\n Raise SPDXValueError if malformed value, CardinalityError\n if already defined.", "id": "f3754:c0:m6"} {"signature": "def reset_document(self):", "body": "self.doc_version_set = Falseself.doc_comment_set = Falseself.doc_namespace_set = Falseself.doc_data_lics_set = Falseself.doc_name_set = Falseself.doc_spdx_id_set = False", "docstring": "Resets the state to allow building new documents", "id": "f3754:c0:m7"} {"signature": "def set_ext_doc_id(self, doc, ext_doc_id):", "body": "doc.add_ext_document_reference(ExternalDocumentRef(external_document_id=ext_doc_id))", "docstring": "Sets the `external_document_id` attribute of the `ExternalDocumentRef`\nobject.", "id": "f3754:c1:m0"} {"signature": "def set_spdx_doc_uri(self, doc, spdx_doc_uri):", "body": "if validations.validate_doc_namespace(spdx_doc_uri):doc.ext_document_references[-].spdx_document_uri = spdx_doc_urielse:raise SPDXValueError('')", "docstring": "Sets the `spdx_document_uri` attribute of the `ExternalDocumentRef`\nobject.", "id": "f3754:c1:m1"} {"signature": "def set_chksum(self, doc, chksum):", "body": "doc.ext_document_references[-].check_sum = checksum_from_sha1(chksum)", "docstring": "Sets the `check_sum` attribute of the `ExternalDocumentRef`\nobject.", "id": "f3754:c1:m2"} {"signature": "def build_tool(self, doc, entity):", "body": "match = self.tool_re.match(entity)if match and validations.validate_tool_name(match.group(self.TOOL_NAME_GROUP)):name = match.group(self.TOOL_NAME_GROUP)return creationinfo.Tool(name)else:raise SPDXValueError('')", "docstring": "Builds a tool object out of a string representation.\n Returns built tool. Raises SPDXValueError if failed to extract\n tool name or name is malformed", "id": "f3754:c2:m0"} {"signature": "def build_org(self, doc, entity):", "body": "match = self.org_re.match(entity)if match and validations.validate_org_name(match.group(self.ORG_NAME_GROUP)):name = match.group(self.ORG_NAME_GROUP).strip()email = match.group(self.ORG_EMAIL_GROUP)if (email is not None) and (len(email) != ):return creationinfo.Organization(name=name, email=email.strip())else:return creationinfo.Organization(name=name, email=None)else:raise SPDXValueError('')", "docstring": "Builds an organization object of of a string representation.\n Returns built organization. Raises SPDXValueError if failed to extract\n name.", "id": "f3754:c2:m1"} {"signature": "def build_person(self, doc, entity):", "body": "match = self.person_re.match(entity)if match and validations.validate_person_name(match.group(self.PERSON_NAME_GROUP)):name = match.group(self.PERSON_NAME_GROUP).strip()email = match.group(self.PERSON_EMAIL_GROUP)if (email is not None) and (len(email) != ):return creationinfo.Person(name=name, email=email.strip())else:return creationinfo.Person(name=name, email=None)else:raise SPDXValueError('')", "docstring": "Builds an organization object of of a string representation.\n Returns built organization. Raises SPDXValueError if failed to extract\n name.", "id": "f3754:c2:m2"} {"signature": "def add_creator(self, doc, creator):", "body": "if validations.validate_creator(creator):doc.creation_info.add_creator(creator)return Trueelse:raise SPDXValueError('')", "docstring": "Adds a creator to the document's creation info.\n Returns true if creator is valid.\n Creator must be built by an EntityBuilder.\n Raises SPDXValueError if not a creator type.", "id": "f3754:c3:m1"} {"signature": "def set_created_date(self, doc, created):", "body": "if not self.created_date_set:self.created_date_set = Truedate = utils.datetime_from_iso_format(created)if date is not None:doc.creation_info.created = datereturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets created date, Raises CardinalityError if\n created date already set.\n Raises SPDXValueError if created is not a date.", "id": "f3754:c3:m2"} {"signature": "def set_creation_comment(self, doc, comment):", "body": "if not self.creation_comment_set:self.creation_comment_set = Trueif validations.validate_creation_comment(comment):doc.creation_info.comment = str_from_text(comment)return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets creation comment, Raises CardinalityError if\n comment already set.\n Raises SPDXValueError if not free form text.", "id": "f3754:c3:m3"} {"signature": "def set_lics_list_ver(self, doc, value):", "body": "if not self.lics_list_ver_set:self.lics_list_ver_set = Truevers = version.Version.from_str(value)if vers is not None:doc.creation_info.license_list_version = versreturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the license list version, Raises CardinalityError if\n already set, SPDXValueError if incorrect value.", "id": "f3754:c3:m4"} {"signature": "def reset_creation_info(self):", "body": "self.created_date_set = Falseself.creation_comment_set = Falseself.lics_list_ver_set = False", "docstring": "Resets builder state to allow building new creation info.", "id": "f3754:c3:m5"} {"signature": "def reset_reviews(self):", "body": "self.review_date_set = Falseself.review_comment_set = False", "docstring": "Resets the builder's state to allow building new reviews.", "id": "f3754:c4:m1"} {"signature": "def add_reviewer(self, doc, reviewer):", "body": "self.reset_reviews()if validations.validate_reviewer(reviewer):doc.add_review(review.Review(reviewer=reviewer))return Trueelse:raise SPDXValueError('')", "docstring": "Adds a reviewer to the SPDX Document.\n Reviwer is an entity created by an EntityBuilder.\n Raises SPDXValueError if not a valid reviewer type.", "id": "f3754:c4:m2"} {"signature": "def add_review_date(self, doc, reviewed):", "body": "if len(doc.reviews) != :if not self.review_date_set:self.review_date_set = Truedate = utils.datetime_from_iso_format(reviewed)if date is not None:doc.reviews[-].review_date = datereturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the review date. Raises CardinalityError if\n already set. OrderError if no reviewer defined before.\n Raises SPDXValueError if invalid reviewed value.", "id": "f3754:c4:m3"} {"signature": "def add_review_comment(self, doc, comment):", "body": "if len(doc.reviews) != :if not self.review_comment_set:self.review_comment_set = Trueif validations.validate_review_comment(comment):doc.reviews[-].comment = str_from_text(comment)return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the review comment. Raises CardinalityError if\n already set. OrderError if no reviewer defined before.\n Raises SPDXValueError if comment is not free form text.", "id": "f3754:c4:m4"} {"signature": "def reset_annotations(self):", "body": "self.annotation_date_set = Falseself.annotation_comment_set = Falseself.annotation_type_set = Falseself.annotation_spdx_id_set = False", "docstring": "Resets the builder's state to allow building new annotations.", "id": "f3754:c5:m1"} {"signature": "def add_annotator(self, doc, annotator):", "body": "self.reset_annotations()if validations.validate_annotator(annotator):doc.add_annotation(annotation.Annotation(annotator=annotator))return Trueelse:raise SPDXValueError('')", "docstring": "Adds an annotator to the SPDX Document.\n Annotator is an entity created by an EntityBuilder.\n Raises SPDXValueError if not a valid annotator type.", "id": "f3754:c5:m2"} {"signature": "def add_annotation_date(self, doc, annotation_date):", "body": "if len(doc.annotations) != :if not self.annotation_date_set:self.annotation_date_set = Truedate = utils.datetime_from_iso_format(annotation_date)if date is not None:doc.annotations[-].annotation_date = datereturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the annotation date. Raises CardinalityError if\n already set. OrderError if no annotator defined before.\n Raises SPDXValueError if invalid value.", "id": "f3754:c5:m3"} {"signature": "def add_annotation_comment(self, doc, comment):", "body": "if len(doc.annotations) != :if not self.annotation_comment_set:self.annotation_comment_set = Trueif validations.validate_annotation_comment(comment):doc.annotations[-].comment = str_from_text(comment)return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the annotation comment. Raises CardinalityError if\n already set. OrderError if no annotator defined before.\n Raises SPDXValueError if comment is not free form text.", "id": "f3754:c5:m4"} {"signature": "def add_annotation_type(self, doc, annotation_type):", "body": "if len(doc.annotations) != :if not self.annotation_type_set:self.annotation_type_set = Trueif validations.validate_annotation_type(annotation_type):doc.annotations[-].annotation_type = annotation_typereturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the annotation type. Raises CardinalityError if\n already set. OrderError if no annotator defined before.\n Raises SPDXValueError if invalid value.", "id": "f3754:c5:m5"} {"signature": "def set_annotation_spdx_id(self, doc, spdx_id):", "body": "if len(doc.annotations) != :if not self.annotation_spdx_id_set:self.annotation_spdx_id_set = Truedoc.annotations[-].spdx_id = spdx_idreturn Trueelse:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the annotation SPDX Identifier.\n Raises CardinalityError if already set. OrderError if no annotator\n defined before.", "id": "f3754:c5:m6"} {"signature": "def reset_package(self):", "body": "self.package_set = Falseself.package_vers_set = Falseself.package_file_name_set = Falseself.package_supplier_set = Falseself.package_originator_set = Falseself.package_down_location_set = Falseself.package_home_set = Falseself.package_verif_set = Falseself.package_chk_sum_set = Falseself.package_source_info_set = Falseself.package_conc_lics_set = Falseself.package_license_declared_set = Falseself.package_license_comment_set = Falseself.package_cr_text_set = Falseself.package_summary_set = Falseself.package_desc_set = False", "docstring": "Resets the builder's state in order to build new packages.", "id": "f3754:c6:m1"} {"signature": "def create_package(self, doc, name):", "body": "if not self.package_set:self.package_set = Truedoc.package = package.Package(name=name)return Trueelse:raise CardinalityError('')", "docstring": "Creates a package for the SPDX Document.\n name - any string.\n Raises CardinalityError if package already defined.", "id": "f3754:c6:m2"} {"signature": "def set_pkg_vers(self, doc, version):", "body": "self.assert_package_exists()if not self.package_vers_set:self.package_vers_set = Truedoc.package.version = versionreturn Trueelse:raise CardinalityError('')", "docstring": "Sets package version, if not already set.\n version - Any string.\n Raises CardinalityError if already has a version.\n Raises OrderError if no package previously defined.", "id": "f3754:c6:m3"} {"signature": "def set_pkg_file_name(self, doc, name):", "body": "self.assert_package_exists()if not self.package_file_name_set:self.package_file_name_set = Truedoc.package.file_name = namereturn Trueelse:raise CardinalityError('')", "docstring": "Sets the package file name, if not already set.\n name - Any string.\n Raises CardinalityError if already has a file_name.\n Raises OrderError if no pacakge previously defined.", "id": "f3754:c6:m4"} {"signature": "def set_pkg_supplier(self, doc, entity):", "body": "self.assert_package_exists()if not self.package_supplier_set:self.package_supplier_set = Trueif validations.validate_pkg_supplier(entity):doc.package.supplier = entityreturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the package supplier, if not already set.\n entity - Organization, Person or NoAssert.\n Raises CardinalityError if already has a supplier.\n Raises OrderError if no package previously defined.", "id": "f3754:c6:m5"} {"signature": "def set_pkg_originator(self, doc, entity):", "body": "self.assert_package_exists()if not self.package_originator_set:self.package_originator_set = Trueif validations.validate_pkg_originator(entity):doc.package.originator = entityreturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the package originator, if not already set.\n entity - Organization, Person or NoAssert.\n Raises CardinalityError if already has an originator.\n Raises OrderError if no package previously defined.", "id": "f3754:c6:m6"} {"signature": "def set_pkg_down_location(self, doc, location):", "body": "self.assert_package_exists()if not self.package_down_location_set:self.package_down_location_set = Truedoc.package.download_location = locationreturn Trueelse:raise CardinalityError('')", "docstring": "Sets the package download location, if not already set.\n location - A string\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.", "id": "f3754:c6:m7"} {"signature": "def set_pkg_home(self, doc, location):", "body": "self.assert_package_exists()if not self.package_home_set:self.package_home_set = Trueif validations.validate_pkg_homepage(location):doc.package.homepage = locationreturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the package homepage location if not already set.\n location - A string or None or NoAssert.\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.\n Raises SPDXValueError if location has incorrect value.", "id": "f3754:c6:m8"} {"signature": "def set_pkg_verif_code(self, doc, code):", "body": "self.assert_package_exists()if not self.package_verif_set:self.package_verif_set = Truematch = self.VERIF_CODE_REGEX.match(code)if match:doc.package.verif_code = match.group(self.VERIF_CODE_CODE_GRP)if match.group(self.VERIF_CODE_EXC_FILES_GRP) is not None:doc.package.verif_exc_files = match.group(self.VERIF_CODE_EXC_FILES_GRP).split('')return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the package verification code, if not already set.\n code - A string.\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.\n Raises Value error if doesn't match verifcode form", "id": "f3754:c6:m9"} {"signature": "def set_pkg_chk_sum(self, doc, chk_sum):", "body": "self.assert_package_exists()if not self.package_chk_sum_set:self.package_chk_sum_set = Truedoc.package.check_sum = checksum_from_sha1(chk_sum)return Trueelse:raise CardinalityError('')", "docstring": "Sets the package check sum, if not already set.\n chk_sum - A string\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.", "id": "f3754:c6:m10"} {"signature": "def set_pkg_source_info(self, doc, text):", "body": "self.assert_package_exists()if not self.package_source_info_set:self.package_source_info_set = Trueif validations.validate_pkg_src_info(text):doc.package.source_info = str_from_text(text)return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the package's source information, if not already set.\n text - Free form text.\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.\n SPDXValueError if text is not free form text.", "id": "f3754:c6:m11"} {"signature": "def set_pkg_licenses_concluded(self, doc, licenses):", "body": "self.assert_package_exists()if not self.package_conc_lics_set:self.package_conc_lics_set = Trueif validations.validate_lics_conc(licenses):doc.package.conc_lics = licensesreturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the package's concluded licenses.\n licenses - License info.\n Raises CardinalityError if already defined.\n Raises OrderError if no package previously defined.\n Raises SPDXValueError if data malformed.", "id": "f3754:c6:m12"} {"signature": "def set_pkg_license_from_file(self, doc, lic):", "body": "self.assert_package_exists()if validations.validate_lics_from_file(lic):doc.package.licenses_from_files.append(lic)return Trueelse:raise SPDXValueError('')", "docstring": "Adds a license from a file to the package.\n Raises SPDXValueError if data malformed.\n Raises OrderError if no package previously defined.", "id": "f3754:c6:m13"} {"signature": "def set_pkg_license_declared(self, doc, lic):", "body": "self.assert_package_exists()if not self.package_license_declared_set:self.package_license_declared_set = Trueif validations.validate_lics_conc(lic):doc.package.license_declared = licreturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the package's declared license.\n Raises SPDXValueError if data malformed.\n Raises OrderError if no package previously defined.\n Raises CardinalityError if already set.", "id": "f3754:c6:m14"} {"signature": "def set_pkg_license_comment(self, doc, text):", "body": "self.assert_package_exists()if not self.package_license_comment_set:self.package_license_comment_set = Trueif validations.validate_pkg_lics_comment(text):doc.package.license_comment = str_from_text(text)return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the package's license comment.\n Raises OrderError if no package previously defined.\n Raises CardinalityError if already set.\n Raises SPDXValueError if text is not free form text.", "id": "f3754:c6:m15"} {"signature": "def set_pkg_cr_text(self, doc, text):", "body": "self.assert_package_exists()if not self.package_cr_text_set:self.package_cr_text_set = Trueif validations.validate_pkg_cr_text(text):if isinstance(text, string_types):doc.package.cr_text = str_from_text(text)else:doc.package.cr_text = text else:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Sets the package's copyright text.\n Raises OrderError if no package previously defined.\n Raises CardinalityError if already set.\n Raises value error if text is not one of [None, NOASSERT, TEXT].", "id": "f3754:c6:m16"} {"signature": "def set_pkg_summary(self, doc, text):", "body": "self.assert_package_exists()if not self.package_summary_set:self.package_summary_set = Trueif validations.validate_pkg_summary(text):doc.package.summary = str_from_text(text)else:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Set's the package summary.\n Raises SPDXValueError if text is not free form text.\n Raises CardinalityError if summary already set.\n Raises OrderError if no package previously defined.", "id": "f3754:c6:m17"} {"signature": "def set_pkg_desc(self, doc, text):", "body": "self.assert_package_exists()if not self.package_desc_set:self.package_desc_set = Trueif validations.validate_pkg_desc(text):doc.package.description = str_from_text(text)else:raise SPDXValueError('')else:raise CardinalityError('')", "docstring": "Set's the package's description.\n Raises SPDXValueError if text is not free form text.\n Raises CardinalityError if description already set.\n Raises OrderError if no package previously defined.", "id": "f3754:c6:m18"} {"signature": "def set_file_name(self, doc, name):", "body": "if self.has_package(doc):doc.package.files.append(file.File(name))self.reset_file_stat()return Trueelse:raise OrderError('')", "docstring": "Raises OrderError if no package defined.", "id": "f3754:c7:m1"} {"signature": "def set_file_spdx_id(self, doc, spdx_id):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_spdx_id_set:self.file_spdx_id_set = Trueif validations.validate_file_spdx_id(spdx_id):self.file(doc).spdx_id = spdx_idreturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets the file SPDX Identifier.\nRaises OrderError if no package or no file defined.\nRaises SPDXValueError if malformed value.\nRaises CardinalityError if more than one spdx_id set.", "id": "f3754:c7:m2"} {"signature": "def set_file_comment(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_comment_set:self.file_comment_set = Trueif validations.validate_file_comment(text):self.file(doc).comment = str_from_text(text)return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or no file defined.\nRaises CardinalityError if more than one comment set.\nRaises SPDXValueError if text is not free form text.", "id": "f3754:c7:m3"} {"signature": "def set_file_type(self, doc, type_value):", "body": "type_dict = {'': file.FileType.SOURCE,'': file.FileType.BINARY,'': file.FileType.ARCHIVE,'': file.FileType.OTHER}if self.has_package(doc) and self.has_file(doc):if not self.file_type_set:self.file_type_set = Trueif type_value in type_dict.keys():self.file(doc).type = type_dict[type_value]return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.\nRaises CardinalityError if more than one type set.\nRaises SPDXValueError if type is unknown.", "id": "f3754:c7:m4"} {"signature": "def set_file_chksum(self, doc, chksum):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_chksum_set:self.file_chksum_set = Trueself.file(doc).chk_sum = checksum_from_sha1(chksum)return Trueelse:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.\nRaises CardinalityError if more than one chksum set.", "id": "f3754:c7:m5"} {"signature": "def set_concluded_license(self, doc, lic):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_conc_lics_set:self.file_conc_lics_set = Trueif validations.validate_lics_conc(lic):self.file(doc).conc_lics = licreturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.\nRaises CardinalityError if already set.\nRaises SPDXValueError if malformed.", "id": "f3754:c7:m6"} {"signature": "def set_file_license_in_file(self, doc, lic):", "body": "if self.has_package(doc) and self.has_file(doc):if validations.validate_file_lics_in_file(lic):self.file(doc).add_lics(lic)return Trueelse:raise SPDXValueError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.\nRaises SPDXValueError if malformed value.", "id": "f3754:c7:m7"} {"signature": "def set_file_license_comment(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_license_comment_set:self.file_license_comment_set = Trueif validations.validate_file_lics_comment(text):self.file(doc).license_comment = str_from_text(text)else:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.\nRaises SPDXValueError if text is not free form text.\nRaises CardinalityError if more than one per file.", "id": "f3754:c7:m8"} {"signature": "def set_file_copyright(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_copytext_set:self.file_copytext_set = Trueif validations.validate_file_cpyright(text):if isinstance(text, string_types):self.file(doc).copyright = str_from_text(text)else:self.file(doc).copyright = text return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.\n Raises SPDXValueError if not free form text or NONE or NO_ASSERT.\n Raises CardinalityError if more than one.", "id": "f3754:c7:m9"} {"signature": "def set_file_notice(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):if not self.file_notice_set:self.file_notice_set = Trueif validations.validate_file_notice(text):self.file(doc).notice = str_from_text(text)else:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.\n Raises SPDXValueError if not free form text.\n Raises CardinalityError if more than one.", "id": "f3754:c7:m10"} {"signature": "def add_file_contribution(self, doc, value):", "body": "if self.has_package(doc) and self.has_file(doc):self.file(doc).add_contrib(value)else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.", "id": "f3754:c7:m11"} {"signature": "def add_file_dep(self, doc, value):", "body": "if self.has_package(doc) and self.has_file(doc):self.file(doc).add_depend(value)else:raise OrderError('')", "docstring": "Raises OrderError if no package or file defined.", "id": "f3754:c7:m12"} {"signature": "def set_file_atrificat_of_project(self, doc, symbol, value):", "body": "if self.has_package(doc) and self.has_file(doc):self.file(doc).add_artifact(symbol, value)else:raise OrderError('')", "docstring": "Sets a file name, uri or home artificat.\n Raises OrderError if no package or file defined.", "id": "f3754:c7:m13"} {"signature": "def file(self, doc):", "body": "return doc.package.files[-]", "docstring": "Returns the last file in the document's package's file list.", "id": "f3754:c7:m14"} {"signature": "def has_file(self, doc):", "body": "return len(doc.package.files) != ", "docstring": "Returns true if the document's package has at least one file.\n Does not test if the document has a package.", "id": "f3754:c7:m15"} {"signature": "def has_package(self, doc):", "body": "return doc.package is not None", "docstring": "Returns true if the document has a package.", "id": "f3754:c7:m16"} {"signature": "def reset_file_stat(self):", "body": "self.file_spdx_id_set = Falseself.file_comment_set = Falseself.file_type_set = Falseself.file_chksum_set = Falseself.file_conc_lics_set = Falseself.file_license_comment_set = Falseself.file_notice_set = Falseself.file_copytext_set = False", "docstring": "Resets the builder's state to enable building new files.", "id": "f3754:c7:m17"} {"signature": "def extr_lic(self, doc):", "body": "return doc.extracted_licenses[-]", "docstring": "Retrieves last license in extracted license list", "id": "f3754:c8:m1"} {"signature": "def set_lic_id(self, doc, lic_id):", "body": "self.reset_extr_lics()if validations.validate_extracted_lic_id(lic_id):doc.add_extr_lic(document.ExtractedLicense(lic_id))return Trueelse:raise SPDXValueError('')", "docstring": "Adds a new extracted license to the document.\n Raises SPDXValueError if data format is incorrect.", "id": "f3754:c8:m3"} {"signature": "def set_lic_text(self, doc, text):", "body": "if self.has_extr_lic(doc):if not self.extr_text_set:self.extr_text_set = Trueif validations.validate_is_free_form_text(text):self.extr_lic(doc).text = str_from_text(text)return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets license extracted text.\n Raises SPDXValueError if text is not free form text.\n Raises OrderError if no license ID defined.", "id": "f3754:c8:m4"} {"signature": "def set_lic_name(self, doc, name):", "body": "if self.has_extr_lic(doc):if not self.extr_lic_name_set:self.extr_lic_name_set = Trueif validations.validate_extr_lic_name(name):self.extr_lic(doc).full_name = namereturn Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets license name.\n Raises SPDXValueError if name is not str or utils.NoAssert\n Raises OrderError if no license id defined.", "id": "f3754:c8:m5"} {"signature": "def set_lic_comment(self, doc, comment):", "body": "if self.has_extr_lic(doc):if not self.extr_lic_comment_set:self.extr_lic_comment_set = Trueif validations.validate_is_free_form_text(comment):self.extr_lic(doc).comment = str_from_text(comment)return Trueelse:raise SPDXValueError('')else:raise CardinalityError('')else:raise OrderError('')", "docstring": "Sets license comment.\n Raises SPDXValueError if comment is not free form text.\n Raises OrderError if no license ID defined.", "id": "f3754:c8:m6"} {"signature": "def add_lic_xref(self, doc, ref):", "body": "if self.has_extr_lic(doc):self.extr_lic(doc).add_xref(ref)return Trueelse:raise OrderError('')", "docstring": "Adds a license cross reference.\n Raises OrderError if no License ID defined.", "id": "f3754:c8:m7"} {"signature": "def reset(self):", "body": "self.reset_creation_info()self.reset_document()self.reset_package()self.reset_file_stat()self.reset_reviews()self.reset_annotations()self.reset_extr_lics()", "docstring": "Resets builder's state for building new documents.\n Must be called between usage with different documents.", "id": "f3754:c9:m1"} {"signature": "def t_text(self, t):", "body": "t.lexer.text_start = t.lexer.lexpos - len('')t.lexer.begin('')", "docstring": "r':\\s*", "id": "f3755:c0:m0"} {"signature": "def t_text_end(self, t):", "body": "t.type = ''t.value = t.lexer.lexdata[t.lexer.text_start:t.lexer.lexpos]t.lexer.lineno += t.value.count('')t.value = t.value.strip()t.lexer.begin('')return t", "docstring": "r'\\s*", "id": "f3755:c0:m1"} {"signature": "def t_text_any(self, t):", "body": "pass", "docstring": "r'.|\\n", "id": "f3755:c0:m2"} {"signature": "def t_CHKSUM(self, t):", "body": "t.value = t.value[:].strip()return t", "docstring": "r':\\s*SHA1:\\s*[a-f0-9]{40,40}", "id": "f3755:c0:m4"} {"signature": "def t_DOC_REF_ID(self, t):", "body": "t.value = t.value[:].strip()return t", "docstring": "r':\\s*DocumentRef-([A-Za-z0-9\\+\\.\\-]+)", "id": "f3755:c0:m5"} {"signature": "def t_DOC_URI(self, t):", "body": "t.value = t.value.strip()return t", "docstring": "r'\\s*((ht|f)tps?:\\/\\/\\S*)", "id": "f3755:c0:m6"} {"signature": "def t_EXT_DOC_REF_CHKSUM(self, t):", "body": "t.value = t.value[:].strip()return t", "docstring": "r'\\s*SHA1:\\s*[a-f0-9]{40,40}", "id": "f3755:c0:m7"} {"signature": "def t_TOOL_VALUE(self, t):", "body": "t.value = t.value[:].strip()return t", "docstring": "r':\\s*Tool:.+", "id": "f3755:c0:m8"} {"signature": "def t_ORG_VALUE(self, t):", "body": "t.value = t.value[:].strip()return t", "docstring": "r':\\s*Organization:.+", "id": "f3755:c0:m9"} {"signature": "def t_PERSON_VALUE(self, t):", "body": "t.value = t.value[:].strip()return t", "docstring": "r':\\s*Person:.+", "id": "f3755:c0:m10"} {"signature": "def t_DATE(self, t):", "body": "t.value = t.value[:].strip()return t", "docstring": "r':\\s*\\d\\d\\d\\d-\\d\\d-\\d\\dT\\d\\d:\\d\\d:\\d\\dZ", "id": "f3755:c0:m11"} {"signature": "def t_KEYWORD_AS_TAG(self, t):", "body": "t.type = self.reserved.get(t.value, '')t.value = t.value.strip()return t", "docstring": "r'[a-zA-Z]+", "id": "f3755:c0:m12"} {"signature": "def t_LINE_OR_KEYWORD_VALUE(self, t):", "body": "t.value = t.value[:].strip()if t.value in self.reserved.keys():t.type = self.reserved[t.value]else:t.type = ''return t", "docstring": "r':.+", "id": "f3755:c0:m13"} {"signature": "def t_comment(self, t):", "body": "pass", "docstring": "r'\\#.*", "id": "f3755:c0:m14"} {"signature": "def t_newline(self, t):", "body": "t.lexer.lineno += len(t.value)", "docstring": "r'\\n+", "id": "f3755:c0:m15"} {"signature": "def t_whitespace(self, t):", "body": "pass", "docstring": "r'\\s+", "id": "f3755:c0:m16"} {"signature": "def datetime_iso_format(date):", "body": "return \"\".format(date.year, date.month, date.day, date.hour,date.minute, date.second)", "docstring": "Return an ISO-8601 representation of a datetime object.", "id": "f3756:m0"} {"signature": "def datetime_from_iso_format(string):", "body": "match = DATE_ISO_REGEX.match(string)if match:date = datetime.datetime(year=int(match.group(DATE_ISO_YEAR_GRP)),month=int(match.group(DATE_ISO_MONTH_GRP)),day=int(match.group(DATE_ISO_DAY_GRP)),hour=int(match.group(DATE_ISO_HOUR_GRP)),second=int(match.group(DATE_ISO_SEC_GRP)),minute=int(match.group(DATE_ISO_MIN_GRP)))return dateelse:return None", "docstring": "Return a datetime object from an iso 8601 representation.\nReturn None if string is non conforming.", "id": "f3756:m1"} {"signature": "def t_LP(self, t):", "body": "return t", "docstring": "r'\\(", "id": "f3756:c3:m0"} {"signature": "def t_RP(self, t):", "body": "return t", "docstring": "r'\\)", "id": "f3756:c3:m1"} {"signature": "def t_AND(self, t):", "body": "t.value = t.value.strip()return t", "docstring": "r'\\s(and|AND)\\s", "id": "f3756:c3:m2"} {"signature": "def t_OR(self, t):", "body": "t.value = t.value.strip()return t", "docstring": "r'\\s(or|OR)\\s", "id": "f3756:c3:m3"} {"signature": "def t_whitespace(self, t):", "body": "pass", "docstring": "r'\\s+", "id": "f3756:c3:m4"} {"signature": "def t_LICENSE(self, t):", "body": "t.value = t.value.strip()return t", "docstring": "r'[A-Za-z.0-9\\-+]+", "id": "f3756:c3:m5"} {"signature": "def input(self, data):", "body": "self.lexer.input(data)", "docstring": "Set input, data - str.", "id": "f3756:c3:m7"} {"signature": "def token(self):", "body": "return self.lexer.token()", "docstring": "Get the next token or None if exhausted input.", "id": "f3756:c3:m8"} {"signature": "def build(self, **kwargs):", "body": "self.lexer = lex.lex(module=self, **kwargs)", "docstring": "Build lexer, must be called before input or token methods.\n Only need to build once.", "id": "f3756:c3:m9"} {"signature": "def p_disjunction_1(self, p):", "body": "p[] = document.LicenseDisjunction(p[], p[])", "docstring": "disjunction : disjunction OR conjunction", "id": "f3756:c4:m1"} {"signature": "def p_disjunction_2(self, p):", "body": "p[] = p[]", "docstring": "disjunction : conjunction", "id": "f3756:c4:m2"} {"signature": "def p_conjunction_1(self, p):", "body": "p[] = document.LicenseConjunction(p[], p[])", "docstring": "conjunction : conjunction AND license_atom", "id": "f3756:c4:m3"} {"signature": "def p_conjunction_2(self, p):", "body": "p[] = p[]", "docstring": "conjunction : license_atom", "id": "f3756:c4:m4"} {"signature": "def p_license_atom_1(self, p):", "body": "p[] = document.License.from_identifier(p[])", "docstring": "license_atom : LICENSE", "id": "f3756:c4:m5"} {"signature": "def p_license_atom_2(self, p):", "body": "p[] = p[]", "docstring": "license_atom : LP disjunction RP", "id": "f3756:c4:m6"} {"signature": "def build(self, **kwargs):", "body": "self.yacc = yacc.yacc(module=self, **kwargs)", "docstring": "Must be called before parse.", "id": "f3756:c4:m8"} {"signature": "def parse(self, data):", "body": "try:return self.yacc.parse(data, lexer=self.lex)except:return None", "docstring": "Parses a license list and returns a License or None if it failed.", "id": "f3756:c4:m9"} {"signature": "def validate(self, messages):", "body": "messages = self.validate_annotator(messages)messages = self.validate_annotation_date(messages)messages = self.validate_annotation_type(messages)messages = self.validate_spdx_id(messages)return messages", "docstring": "Returns True if all the fields are valid.\n Appends any error messages to messages parameter.", "id": "f3757:c0:m6"} {"signature": "def make_decorator(func):", "body": "def decorate(newfunc):if hasattr(func, ''):name = func.compat_func_nameelse:name = func.__name__newfunc.__dict__ = func.__dict__newfunc.__doc__ = func.__doc__newfunc.__module__ = func.__module__if not hasattr(newfunc, ''):newfunc.compat_co_firstlineno = six.get_function_code(func).co_firstlinenotry:newfunc.__name__ = nameexcept TypeError:newfunc.compat_func_name = namereturn newfuncreturn decorate", "docstring": "Wraps a test decorator so as to properly replicate metadata\nof the decorated function, including nose's additional stuff\n(namely, setup and teardown).", "id": "f3761:m0"} {"signature": "def raises(*exceptions):", "body": "valid = ''.join([e.__name__ for e in exceptions])def decorate(func):name = func.__name__def newfunc(*arg, **kw):try:func(*arg, **kw)except exceptions:passexcept:raiseelse:message = \"\" % (name, valid)raise AssertionError(message)newfunc = make_decorator(func)(newfunc)return newfuncreturn decorate", "docstring": "Test must raise one of expected exceptions to pass.\n\n Example use::\n\n @raises(TypeError, ValueError)\n def test_raises_type_error():\n raise TypeError(\"This test passes\")\n\n @raises(Exception)\n def test_that_fails_by_passing():\n pass\n\n If you want to test many assertions about exceptions in a single test,\n you may want to use `assert_raises` instead.", "id": "f3761:m1"} {"signature": "def get_temp_file(extension=''):", "body": "if extension and not extension.startswith(''):extension = '' + extensionfile_name = '' + extensiontemp_dir = tempfile.mkdtemp()return os.path.join(temp_dir, file_name)", "docstring": "Return a unique new temporary file location to a non-existing\ntemporary file that can safely be created without a risk of name\ncollision.", "id": "f3762:m0"} {"signature": "def parse_rdf_file(self, file_name):", "body": "with open(file_name, mode='') as infile:rdfparser = RDFParser(RDFBuilder(), StandardLogger())return rdfparser.parse(infile)", "docstring": "Returns tuple error, document.", "id": "f3762:c0:m0"} {"signature": "def parse_tagvalue_file(self, file_name):", "body": "with open(file_name, mode='') as infile:tvparser = TVParser(TVBuilder(), StandardLogger())tvparser.build()return tvparser.parse(infile.read())", "docstring": "Returns tuple error, document.", "id": "f3762:c0:m1"} {"signature": "def to_os_native_path(path):", "body": "path = path.replace(posixpath.sep, os.path.sep)path = path.replace(ntpath.sep, os.path.sep)path = path.rstrip(os.path.sep)return path", "docstring": "Normalize a path to use the native OS path separator.", "id": "f3764:m1"} {"signature": "def strip_variable_text(rdf_text):", "body": "replace_nid = re.compile('').subrdf_text = replace_nid('', rdf_text)replace_creation = re.compile('', re.DOTALL).subrdf_text = replace_creation('', rdf_text)replace_pcc = re.compile('', re.DOTALL).subrdf_text = replace_pcc('', rdf_text)return rdf_text", "docstring": "Return rdf_text stripped from variable parts such as rdf nodeids", "id": "f3764:m2"} {"signature": "def load_and_clean_rdf(location):", "body": "content = codecs.open(location, encoding='').read()content = strip_variable_text(content)data = xmltodict.parse(content, dict_constructor=dict)return sort_nested(data)", "docstring": "Return plain Python nested data for the SPDX RDF file at location\nsuitable for comparison. The file content is cleaned from variable\nparts such as dates, generated UUIDs and versions\n\nNOTE: we use plain dicts to avoid ordering issues in XML. the SPDX\ntool and lxml do not seem to return a consistent ordering that is\nneeded for tests.", "id": "f3764:m3"} {"signature": "def sort_nested(data):", "body": "if isinstance(data, dict):new_data = {}for k, v in data.items():if isinstance(v, list):v = sorted(v)if isinstance(v, dict):v = sort_nested(v)new_data[k] = vreturn new_dataelif isinstance(data, list):new_data = []for v in sorted(data):if isinstance(v, list):v = sort_nested(v)if isinstance(v, dict):v = sort_nested(v)new_data.append(v)return new_data", "docstring": "Return a new dict with any nested list sorted recursively.", "id": "f3764:m4"} {"signature": "def check_rdf_scan(expected_file, result_file, regen=False):", "body": "import jsonresult = load_and_clean_rdf(result_file)if regen:expected = resultwith codecs.open(expected_file, '', encoding='') as o:json.dump(expected, o, indent=)else:with codecs.open(expected_file, '', encoding='') as i:expected = sort_nested(json.load(i))assert expected == result", "docstring": "Check that expected and result_file are equal.\nBoth are paths to SPDX RDF XML files, UTF-8 encoded.", "id": "f3764:m5"} {"signature": "def load_and_clean_tv(location):", "body": "content = codecs.open(location, encoding='').read()content = [l for l in content.splitlines(False)if l and l.strip() and not l.startswith(('', '',))]return ''.join(content)", "docstring": "Return a mapping for the SPDX TV file at location suitable for\ncomparison. The file content is cleaned from variable parts such as\ndates, generated UUIDs and versions", "id": "f3764:m6"} {"signature": "def check_tv_scan(expected_file, result_file, regen=False):", "body": "result = load_and_clean_tv(result_file)if regen:with codecs.open(expected_file, '', encoding='') as o:o.write(result)expected = load_and_clean_tv(expected_file)assert expected == result", "docstring": "Check that expected and result_file are equal.\nBoth are paths to plain SPDX tv text files, UTF-8 encoded.", "id": "f3764:m7"} {"signature": "def __init__(self, default_args):", "body": "self._default_args = default_args", "docstring": ":param default_args: default arguments\n:type default_args: string or list of string", "id": "f3777:c0:m0"} {"signature": "def parse(self, args):", "body": "if args is None:args = self._default_argsif isinstance(args, six.string_types):args = shlex.split(args)return args", "docstring": ":param args: arguments\n:type args: None or string or list of string\n:return: formatted arguments if specified else ``self.default_args``\n:rtype: list of string", "id": "f3777:c0:m1"} {"signature": "@propertydef default_args(self):", "body": "return self._default_args", "docstring": ":return: default arguments given on ``init``.\n:rtype: string or list of string", "id": "f3777:c0:m2"} {"signature": "def __init__(self, vim):", "body": "self._ref = weakref.ref(vim)self._list = self.parse(vim.command(''))", "docstring": ":param vim: ``Vim`` object which owns this object.\n:type vim: Vim", "id": "f3778:c0:m0"} {"signature": "def insert(self, index, value):", "body": "self._list.insert(index, value)self._sync()", "docstring": "Insert object before index.\n\n:param int index: index to insert in\n:param string value: path to insert", "id": "f3778:c0:m7"} {"signature": "def format(self, list):", "body": "values = ''.join(list)return '' + values", "docstring": "Format list to runtime path representation.\n\n:param list: list of paths to format\n:type list: list of string\n:return: *Vim* style runtime path string representation\n:rtype: string", "id": "f3778:c0:m8"} {"signature": "def parse(self, string):", "body": "var, eq, values = string.strip().partition('')assert var == ''assert eq == ''return values.split('')", "docstring": "Parse runtime path representation to list.\n\n:param string string: runtime path string\n:return: list of runtime paths\n:rtype: list of string", "id": "f3778:c0:m9"} {"signature": "def open(**kwargs):", "body": "return Vim(**kwargs)", "docstring": "A factory function to open new ``Vim`` object.\n``with`` statement can be used for this.", "id": "f3780:m0"} {"signature": "def __init__(self,executable='',args=None,env=None,encoding='',size=(, ),timeout=):", "body": "parser = arguments.Parser(self.default_args)args = parser.parse(args)self._process = process.Process(executable, args, env)self._encoding = encodingself._screen = pyte.Screen(*size)self._stream = pyte.Stream()self._stream.attach(self._screen)self._timeout = timeoutself._tempfile = tempfile.NamedTemporaryFile(mode='')self._runtimepath = Noneself.wait()", "docstring": ":param string executable: command name to execute *Vim*\n:param args: arguments to execute *Vim*\n:type args: None or string or list of string\n:param env: environment variables to execute *Vim*\n:type env: None or dict of (string, string)\n:param string encoding: internal encoding of *Vim*\n:param size: (lines, columns) of a screen connected to *Vim*\n:type size: (int, int)\n:param float timeout: seconds to wait I/O", "id": "f3780:c0:m0"} {"signature": "def close(self):", "body": "self._tempfile.close()self._process.terminate()if self._process.is_alive():self._process.kill()", "docstring": "Disconnect and close *Vim*.", "id": "f3780:c0:m5"} {"signature": "def is_alive(self):", "body": "return self._process.is_alive()", "docstring": "Check if the background *Vim* process is alive.\n\n:return: True if the process is alive, else False\n:rtype: boolean", "id": "f3780:c0:m6"} {"signature": "def display(self):", "body": "return ''.join(self.display_lines())", "docstring": "Shows the terminal screen connecting to *Vim*.\n\nExample:\n\n>>> import headlessvim\n>>> with headlessvim.open(size=(64, 16)) as vim: # doctest: +SKIP\n... print(vim.display())\n...\n~\n~ VIM - Vi IMproved\n~\n~ version 7.4.52\n~ by Bram Moolenaar et al.\n~\n~ Vim is open source and freely distributable\n~\n~ Sponsor Vim development!\n~ type :help sponsor for information\n~\n~ type :q to exit\n~ type :help or for on-line help\n~ type :help version7 for version info\n~\n~\n\n:return: screen as a text\n:rtype: string", "id": "f3780:c0:m7"} {"signature": "def display_lines(self):", "body": "return self._screen.display", "docstring": "Shows the terminal screen splitted by newlines.\n\nAlmost equals to ``self.display().splitlines()``\n\n:return: screen as a list of strings\n:rtype: list of string", "id": "f3780:c0:m8"} {"signature": "def send_keys(self, keys, wait=True):", "body": "self._process.stdin.write(bytearray(keys, self._encoding))self._process.stdin.flush()if wait:self.wait()", "docstring": "Send a raw key sequence to *Vim*.\n\n.. note:: *Vim* style key sequence notation (like ````)\n is not recognized.\n Use escaped characters (like ``'\\033'``) instead.\n\nExample:\n\n>>> import headlessvim\n>>> with headlessvim.open() as vim:\n... vim.send_keys('ispam\\033')\n... str(vim.display_lines()[0].strip())\n...\n'spam'\n\n:param strgin keys: key sequence to send\n:param boolean wait: whether if wait a response", "id": "f3780:c0:m9"} {"signature": "def wait(self, timeout=None):", "body": "if timeout is None:timeout = self._timeoutwhile self._process.check_readable(timeout):self._flush()", "docstring": "Wait for response until timeout.\nIf timeout is specified to None, ``self.timeout`` is used.\n\n:param float timeout: seconds to wait I/O", "id": "f3780:c0:m10"} {"signature": "def install_plugin(self, dir, entry_script=None):", "body": "self.runtimepath.append(dir)if entry_script is not None:self.command(''.format(entry_script), False)", "docstring": "Install *Vim* plugin.\n\n:param string dir: the root directory contains *Vim* script\n:param string entry_script: path to the initializing script", "id": "f3780:c0:m11"} {"signature": "def command(self, command, capture=True):", "body": "if capture:self.command(''.format(self._tempfile.name), False)self.set_mode('')self.send_keys(''.format(command))if capture:self.command('', False)return self._tempfile.read().strip('')", "docstring": "Execute command on *Vim*.\n.. warning:: Do not use ``redir`` command if ``capture`` is ``True``.\nIt's already enabled for internal use.\n\nIf ``capture`` argument is set ``False``,\nthe command execution becomes slightly faster.\n\nExample:\n\n>>> import headlessvim\n>>> with headlessvim.open() as vim:\n... vim.command('echo 0')\n...\n'0'\n>>> with headlessvim.open() as vim:\n... vim.command('let g:spam = \"ham\"', False)\n... vim.echo('g:spam')\n...\n'ham'\n\n:param string command: a command to execute\n:param boolean capture: ``True`` if command's output needs to be\n captured, else ``False``\n:return: the output of the given command\n:rtype: string", "id": "f3780:c0:m12"} {"signature": "def echo(self, expr):", "body": "return self.command(''.format(expr))", "docstring": "Execute ``:echo`` command on *Vim*.\n\n.. note:: The given string is passed to *Vim* as it is.\n Make sure to quote bare words.\n\nExample:\n\n>>> import headlessvim\n>>> with headlessvim.open() as vim:\n... vim.echo('0')\n...\n'0'\n>>> with headlessvim.open() as vim:\n... vim.echo('\"spam\"')\n...\n'spam'\n\n:param string expr: a expr to ``:echo``\n:return: the result of ``:echo`` command\n:rtype: string", "id": "f3780:c0:m13"} {"signature": "def set_mode(self, mode):", "body": "keys = ''if mode == '':passelif mode == '':keys += ''elif mode == '':keys += ''elif mode == '':keys += ''elif mode == '':keys += ''else:raise ValueError(''.format(mode))self.send_keys(keys)", "docstring": "Set *Vim* mode to ``mode``.\nSupported modes:\n\n* ``normal``\n* ``insert``\n* ``command``\n* ``visual``\n* ``visual-block``\n\n\nThis method behave as setter-only property.\n\nExample:\n\n>>> import headlessvim\n>>> with headlessvim.open() as vim:\n... vim.set_mode('insert')\n... vim.mode = 'normal' # also accessible as property\n...\n\n:param string mode: *Vim* mode to set\n:raises ValueError: if ``mode`` is not supported", "id": "f3780:c0:m14"} {"signature": "@propertydef executable(self):", "body": "return self._process.executable", "docstring": ":return: the absolute path to the process.\n:rtype: string", "id": "f3780:c0:m15"} {"signature": "@propertydef args(self):", "body": "return self._process.args", "docstring": ":return: arguments for the process.\n:rtype: list of string", "id": "f3780:c0:m16"} {"signature": "@propertydef encoding(self):", "body": "return self._encoding", "docstring": ":return: internal encoding of *Vim*.\n:rtype: string", "id": "f3780:c0:m17"} {"signature": "@propertydef screen_size(self):", "body": "return self._swap(self._screen.size)", "docstring": ":return: (lines, columns) tuple of a screen connected to *Vim*.\n:rtype: (int, int)", "id": "f3780:c0:m18"} {"signature": "@screen_size.setterdef screen_size(self, size):", "body": "if self.screen_size != size:self._screen.resize(*self._swap(size))", "docstring": ":param size: (lines, columns) tuple of a screen connected to *Vim*.\n:type size: (int, int)", "id": "f3780:c0:m19"} {"signature": "@propertydef timeout(self):", "body": "return self._timeout", "docstring": ":return: seconds to wait I/O.\n:rtype: float", "id": "f3780:c0:m20"} {"signature": "@timeout.setterdef timeout(self, timeout):", "body": "self._timeout = timeout", "docstring": ":param float timeout: seconds to wait I/O.", "id": "f3780:c0:m21"} {"signature": "@propertydef runtimepath(self):", "body": "if self._runtimepath is None:self._runtimepath = runtimepath.RuntimePath(self)return self._runtimepath", "docstring": ":return: runtime path of *Vim*\n:rtype: runtimepath.RuntimePath", "id": "f3780:c0:m22"} {"signature": "def __init__(self, executable, args, env):", "body": "self._executable = distutils.spawn.find_executable(executable)self._args = argsself._env = envself._open_process()", "docstring": ":param str executable: command name to execute *Vim*\n:param args: arguments to execute *Vim*\n:type args: None or string or list of string\n:param env: environment variables to execute *Vim*\n:type env: None or dict of (string, string)", "id": "f3781:c0:m0"} {"signature": "def terminate(self):", "body": "with self._close():self._process.terminate()", "docstring": "Terminate this process.\nUse this method rather than ``self.kill``.", "id": "f3781:c0:m1"} {"signature": "def kill(self):", "body": "with self._close():self._process.kill()", "docstring": "Kill this process.\nUse this only when the process seems to be hanging up.", "id": "f3781:c0:m2"} {"signature": "def check_readable(self, timeout):", "body": "rlist, wlist, xlist = select.select([self._stdout], [], [], timeout)return bool(len(rlist))", "docstring": "Poll ``self.stdout`` and return True if it is readable.\n\n:param float timeout: seconds to wait I/O\n:return: True if readable, else False\n:rtype: boolean", "id": "f3781:c0:m3"} {"signature": "def is_alive(self):", "body": "return self._process.poll() is None", "docstring": "Check if the process is alive.\n\n:return: True if the process is alive, else False\n:rtype: boolean", "id": "f3781:c0:m4"} {"signature": "@propertydef executable(self):", "body": "return self._executable", "docstring": ":return: the absolute path to the process.\n:rtype: strIng", "id": "f3781:c0:m5"} {"signature": "@propertydef args(self):", "body": "return self._args", "docstring": ":return: launch arguments of the process.\n:rtype: string or list of string", "id": "f3781:c0:m6"} {"signature": "@propertydef stdin(self):", "body": "return self._stdin", "docstring": ":return: file-like object representing the standard input\n of the process\n:rtype: flie-like object", "id": "f3781:c0:m7"} {"signature": "@propertydef stdout(self):", "body": "return self._stdout", "docstring": ":return: non blocking file-like object\n representing the standard output of the process\n:rtype: file-like object", "id": "f3781:c0:m8"} {"signature": "def run(self):", "body": "run_once = Truewhile (run_once or self._threaded) and self.end is False:self.service_tx_queue()self.parse_messages()run_once = Falseif self._threaded:time.sleep(self._timeout)if self._threaded:logger.info('')", "docstring": "Receives the serial data into the self._raw buffer\n:return:", "id": "f3786:c0:m23"} {"signature": "def tx(self, message):", "body": "message = message if isinstance(message, list) else [message]length = len(message)length_high_byte = (length & ) >> length_low_byte = length & message_with_length = [length_low_byte, length_high_byte] + messagesum1, sum2 = self._fletcher16_checksum(message_with_length)message_with_length.append(sum1)message_with_length.append(sum2)message = [self._START_OF_FRAME]for b in message_with_length:if b in [self._START_OF_FRAME, self._END_OF_FRAME, self._ESC]:message.append(self._ESC)message.append(b ^ self._ESC_XOR)else:message.append(b)message.append(self._END_OF_FRAME)self._port.write(message)", "docstring": "Transmit a series of bytes\n:param message: a list of bytes to send\n:return: None", "id": "f3788:c0:m1"} {"signature": "def rx(self):", "body": "if not self._threaded:self.run()try:return tuple(self._messages.pop())except IndexError:return None", "docstring": "Receive a series of bytes that have been verified\n:return: a series of bytes as a tuple or None if empty", "id": "f3788:c0:m2"} {"signature": "def _parse_raw_data(self):", "body": "if self._START_OF_FRAME in self._raw and self._END_OF_FRAME in self._raw:while self._raw[] != self._START_OF_FRAME and len(self._raw) > :self._raw.pop()if self._raw[] == self._START_OF_FRAME:self._raw.pop()eof_index = self._raw.index(self._END_OF_FRAME)raw_message = self._raw[:eof_index]self._raw = self._raw[eof_index:]logger.debug(''.format(raw_message))message = self._remove_esc_chars(raw_message)logger.debug(''.format(message))expected_checksum = (message[-] << ) | message[-]logger.debug(''.format(expected_checksum))message = message[:-] logger.debug(''.format(message))sum1, sum2 = self._fletcher16_checksum(message)calculated_checksum = (sum2 << ) | sum1if expected_checksum == calculated_checksum:message = message[:] logger.debug(''.format(message))self._messages.append(message)else:logger.warning(''.format(message))logger.debug(''.format(expected_checksum, calculated_checksum))try:while self._raw[] != self._START_OF_FRAME and len(self._raw) > :self._raw.pop()except IndexError:pass", "docstring": "Parses the incoming data and determines if it is valid. Valid\ndata gets placed into self._messages\n:return: None", "id": "f3788:c0:m4"} {"signature": "def _fletcher16_checksum(self, data):", "body": "sum1 = sum2 = for i, b in enumerate(data):sum1 += bsum1 &= sum2 += sum1sum2 &= logger.debug(''.format(sum1, sum2))return sum1, sum2", "docstring": "Calculates a fletcher16 checksum for the list of bytes\n:param data: a list of bytes that comprise the message\n:return:", "id": "f3788:c0:m5"} {"signature": "def _remove_esc_chars(self, raw_message):", "body": "message = []escape_next = Falsefor c in raw_message:if escape_next:message.append(c ^ self._ESC_XOR)escape_next = Falseelse:if c == self._ESC:escape_next = Trueelse:message.append(c)return message", "docstring": "Removes any escape characters from the message\n:param raw_message: a list of bytes containing the un-processed data\n:return: a message that has the escaped characters appropriately un-escaped", "id": "f3788:c0:m6"} {"signature": "def run(self):", "body": "run_once = Truewhile run_once or self._threaded:waiting = self._port.in_waitingif waiting > :temp = [int(c) for c in self._port.read(waiting)]self._raw += tempself._parse_raw_data()run_once = Falseif self._threaded:time.sleep(self._timeout)", "docstring": "Receives the serial data into the self._raw buffer\n:return:", "id": "f3788:c0:m7"} {"signature": "def check_pidfile(pidfile, debug):", "body": "if os.path.isfile(pidfile):pidfile_handle = open(pidfile, '')try:pid = int(pidfile_handle.read())pidfile_handle.close()if check_pid(pid, debug):return Trueexcept:passos.unlink(pidfile)pid = str(os.getpid())open(pidfile, '').write(pid)return False", "docstring": "Check that a process is not running more than once, using PIDFILE", "id": "f3794:m1"} {"signature": "def check_pid(pid, debug):", "body": "try:os.kill(pid, )if debug > :print(\"\")return Trueexcept OSError:if debug > :print(\"\")return False", "docstring": "This function will check whether a PID is currently running", "id": "f3794:m2"} {"signature": "def get_datetime_str():", "body": "return time.strftime('')", "docstring": "This function gets the local time with local timezone offset", "id": "f3794:m3"} {"signature": "def convert_to_int(value):", "body": "try:ret_val = int(value)return ret_val, Trueexcept ValueError:return , False", "docstring": "Convert a string to INT", "id": "f3794:m4"} {"signature": "def convert_to_float(value):", "body": "try:ret_val = float(value)return ret_val, Trueexcept ValueError:return , False", "docstring": "Convert a string to FLOAT", "id": "f3794:m5"} {"signature": "def convert_int32(high_word, low_word):", "body": "return convert_words_to_uint(high_word, low_word)", "docstring": "Convert two words to a 32 bit unsigned integer", "id": "f3794:m6"} {"signature": "def convert_words_to_uint(high_word, low_word):", "body": "try:low_num = int(low_word)if low_num < :low_num = abs(low_num) + **number = (int(high_word) << ) | low_numreturn number, Trueexcept:return , False", "docstring": "Convert two words to a floating point", "id": "f3794:m7"} {"signature": "def convert_words_to_float(high_word, low_word):", "body": "number, retval = convert_words_to_uint(high_word, low_word)if not retval:return , Falsetry:packed_float = struct.pack('', number)return struct.unpack('', packed_float)[], Trueexcept:return , False", "docstring": "Convert two words to a floating point", "id": "f3794:m8"} {"signature": "def disown(debug):", "body": "pid = os.getpid()cgroup_file = \"\" + str(pid) + \"\"try:infile = open(cgroup_file, \"\")except IOError:print(\"\", cgroup_file)return Falsefor line in infile:if line.find(\"\") == -:continueline = line.replace(\"\", \"\")items_list = line.split('')accounts = items_list[]dir_str = accounts + \"\"if not accounts:continuefull_dir = \"\" + dir_strif not os.path.exists(full_dir):os.makedirs(full_dir)if debug >= :print(\"\", full_dir)else:if debug >= :print(\"\", full_dir)full_path = full_dir + \"\"prog_list = [\"\", str(pid), \">\", full_path]run_program(prog_list, debug, True)if accounts.find(\"\") != -:acct_list = accounts.split('')accounts = acct_list[] + \"\" + acct_list[]dir_str = accounts + \"\"full_dir = \"\" + dir_strtry:if not os.path.exists(full_dir):os.makedirs(full_dir)except:continuefull_path = full_dir + \"\"prog_list = [\"\", str(pid), \">\", full_path]run_program(prog_list, debug, True)infile.close()if debug >= :prog_list = [\"\", cgroup_file]run_program(prog_list, debug, False)prog_list = [\"\", \"\", \"\", cgroup_file]if run_program(prog_list, debug, False):return Falsereturn True", "docstring": "This function will disown, so the Ardexa service can be restarted", "id": "f3794:m9"} {"signature": "def run_program(prog_list, debug, shell):", "body": "try:if not shell:process = Popen(prog_list, stdout=PIPE, stderr=PIPE)stdout, stderr = process.communicate()retcode = process.returncodeif debug >= :print(\"\", \"\".join(prog_list))print(\"\", retcode)print(\"\", stdout)print(\"\", stderr)return bool(retcode)else:command = \"\".join(prog_list)os.system(command)return Trueexcept:return False", "docstring": "Run a program and check program return code Note that some commands don't work\n well with Popen. So if this function is specifically called with 'shell=True',\n then it will run the old 'os.system'. In which case, there is no program output", "id": "f3794:m10"} {"signature": "def parse_address_list(addrs):", "body": "for addr in addrs.split(''):elem = addr.split('')if len(elem) == : yield int(elem[])elif len(elem) == : start, end = list(map(int, elem))for i in range(start, end+):yield ielse: raise ValueError('' % addr)", "docstring": "Yield each integer from a complex range string like \"1-9,12,15-20,23\"\n\n >>> list(parse_address_list('1-9,12,15-20,23'))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 18, 19, 20, 23]\n\n >>> list(parse_address_list('1-9,12,15-20,2-3-4'))\n Traceback (most recent call last):\n ...\n ValueError: format error in 2-3-4", "id": "f3794:m11"} {"signature": "@contextmanagerdef environment(**kwargs):", "body": "old_values = {}nonexistent = set()for key in kwargs:if key not in os.environ:nonexistent.add(key)else:old_values[key] = os.environ[key]os.environ[key] = kwargs[key]try:yieldfinally:for key in old_values:os.environ[key] = old_values[key]for key in nonexistent:os.environ.pop(key)", "docstring": "Context manager to tempolrarily change environment variables. On exit all\nvariables are set to their original value.", "id": "f3795:m0"} {"signature": "def emit(self, record):", "body": "self.records.append(record)", "docstring": "Overrides :py:meth:`logging.Handler.emit`.", "id": "f3795:c0:m1"} {"signature": "def contains(self, logger, level, message, is_regex=False):", "body": "for record in self.records:if record.name != logger or record.levelno != level:continueif is_regex:if re.search(message, (record.msg % record.args)):return Trueelse:if message in (record.msg % record.args):return Truereturn False", "docstring": "Checks whether a message has been logged to a specific logger with a\nspecific level.\n\n:param logger: The logger.\n:param level: The log level.\n:param messgae: The message contents.\n:param is_regex: Whether the expected message is a regex or not.\n Non-regex messages are simply tested for inclusion.", "id": "f3795:c0:m4"} {"signature": "def get_new_call(group_name, app_name, search_path, filename, require_load,version, secure):", "body": "new_call_kwargs = {'': group_name,'': filename} new_call_lookup_options = {} new_call_lookup_options[''] = secureif search_path:new_call_lookup_options[''] = search_pathif require_load:new_call_lookup_options[''] = require_loadif version:new_call_lookup_options[''] = versionif new_call_lookup_options:new_call_kwargs[''] = new_call_lookup_optionsoutput = build_call_str('', (app_name,), new_call_kwargs)return output", "docstring": "Build a call to use the new ``get_config`` function from args passed to\n``Config.__init__``.", "id": "f3801:m0"} {"signature": "def build_call_str(prefix, args, kwargs):", "body": "kwargs_str = ''.join(['' % (key, value) for key, value inkwargs.items()])args_str = ''.join([repr(arg) for arg in args])output = [prefix, '']if args:output.append(args_str)if args and kwargs:output.append('')if kwargs:output.append(kwargs_str)output.append('')return ''.join(output)", "docstring": "Build a callable Python string for a function call. The output will be\ncombined similar to this template::\n\n (, )\n\nExample::\n\n >>> build_call_str('foo', (1, 2), {'a': '10'})\n \"foo(1, 2, a='10')\"", "id": "f3801:m1"} {"signature": "def get_xdg_dirs(self):", "body": "config_dirs = getenv('', '')if config_dirs:self._log.debug('', config_dirs)output = []for path in reversed(config_dirs.split('')):output.append(join(path, self.group_name, self.app_name))return outputreturn ['' % (self.group_name, self.app_name)]", "docstring": "Returns a list of paths specified by the XDG_CONFIG_DIRS environment\nvariable or the appropriate default.\n\nThe list is sorted by precedence, with the most important item coming\n*last* (required by the existing config_resolver logic).", "id": "f3801:c0:m3"} {"signature": "def get_xdg_home(self):", "body": "config_home = getenv('', '')if config_home:self._log.debug('', config_home)return expanduser(join(config_home, self.group_name, self.app_name))return expanduser('' % (self.group_name, self.app_name))", "docstring": "Returns the value specified in the XDG_CONFIG_HOME environment variable\nor the appropriate default.", "id": "f3801:c0:m4"} {"signature": "def _effective_filename(self):", "body": "config_filename = ''if self.filename:config_filename = self.filenameenv_filename = getenv(self.env_filename_name)if env_filename:self._log.info('''',env_filename,self.env_filename_name)config_filename = env_filenamereturn config_filename", "docstring": "Returns the filename which is effectively used by the application. If\noverridden by an environment variable, it will return that filename.", "id": "f3801:c0:m5"} {"signature": "def _effective_path(self):", "body": "path = (['' % (self.group_name, self.app_name)] +self.get_xdg_dirs() +[expanduser('' % (self.group_name, self.app_name)),self.get_xdg_home(),join(getcwd(), ''.format(self.group_name), self.app_name)])if self.search_path:path = self.search_path.split(pathsep)env_path = getenv(self.env_path_name)if env_path and env_path.startswith(''):additional_paths = env_path[:].split(pathsep)self._log.info('''',additional_paths,self.env_path_name)path.extend(additional_paths)elif env_path:self._log.info(\"\"\"\",env_path,self.env_path_name)path = env_path.split(pathsep)return path", "docstring": "Returns a list of paths to search for config files in reverse order of\nprecedence. In other words: the last path element will override the\nsettings from the first one.", "id": "f3801:c0:m6"} {"signature": "def check_file(self, filename):", "body": "if not exists(filename):return Falsenew_config = ConfigResolverBase()new_config.read(filename)if self.version and not new_config.has_option('', ''):raise NoVersionError(\"\"\"\".format(filename,self.version))elif not self.version and new_config.has_option('', ''):self.version = StrictVersion(new_config.get('', ''))self._log.info('''''''',filename, self.version)elif self.version:file_version = new_config.get('', '')major, minor, _ = StrictVersion(file_version).versionexpected_major, expected_minor, _ = self.version.versionif expected_major != major:self._log.error('',abspath(filename),str(self.version),file_version)return Falseif expected_minor != minor:self._log.warning('''',abspath(filename),str(self.version),file_version)return Truereturn True", "docstring": "Check if ``filename`` can be read. Will return boolean which is True if\nthe file can be read, False otherwise.", "id": "f3801:c0:m7"} {"signature": "def get(self, section, option, **kwargs): ", "body": "if \"\" in kwargs:default = kwargs.pop(\"\")new_kwargs = {'': default}new_kwargs.update(kwargs)new_call = build_call_str('', (section, option), new_kwargs)warn('''''''' % new_call,DeprecationWarning,stacklevel=)have_default = Trueelse:have_default = Falsetry:value = super(Config, self).get(section, option, **kwargs)return valueexcept (NoSectionError, NoOptionError) as exc:if have_default:self._log.debug(\"\", exc, default)return defaultelse:raise", "docstring": "Overrides :py:meth:`configparser.ConfigParser.get`.\n\nIn addition to ``section`` and ``option``, this call takes an optional\n``default`` value. This behaviour works in *addition* to the\n:py:class:`configparser.ConfigParser` default mechanism. Note that\na default value from ``ConfigParser`` takes precedence.\n\nThe reason this additional functionality is added, is because the\ndefaults of :py:class:`configparser.ConfigParser` are not dependent\non sections. If you specify a default for the option ``test``, then\nthis value will be returned for both ``section1.test`` and for\n``section2.test``. Using the default on the ``get`` call gives you more\nfine-grained control over this.\n\nAlso note, that if a default value was used, it will be logged with\nlevel ``logging.DEBUG``.\n\n:param section: The config file section.\n:param option: The option name.\n:param kwargs: These keyword args are passed through to\n :py:meth:`configparser.ConfigParser.get`.", "id": "f3801:c0:m8"} {"signature": "def load(self, reload=False, require_load=False):", "body": "if reload: self.config = Noneif self.config: self._log.debug('''')returnpath = self._effective_path()config_filename = self._effective_filename()self._active_path = [join(_, config_filename) for _ in path]for dirname in path:conf_name = join(dirname, config_filename)readable = self.check_file(conf_name)if readable:action = '' if self._loaded_files else ''self._log.info('', action, conf_name)self.read(conf_name)if conf_name == expanduser(\"\" % (self.group_name, self.app_name, self.filename)):self._log.warning(\"\"\"\"\"\"\"\"\"\"\"\"\"\", expanduser(\"\"), self.group_name,self.app_name, expanduser(\"\"), self.group_name,self.app_name)self._loaded_files.append(conf_name)if not self._loaded_files and not require_load:self._log.warning(\"\",config_filename,path)elif not self._loaded_files and require_load:raise IOError(\"\"\"\" % (config_filename, path))", "docstring": "Searches for an appropriate config file. If found, loads the file into\nthe current instance. This method can also be used to reload a\nconfiguration. Note that you may want to set ``reload`` to ``True`` to\nclear the configuration before loading in that case. Without doing\nthat, values will remain available even if they have been removed from\nthe config files.\n\n:param reload: if set to ``True``, the existing values are cleared\n before reloading.\n:param require_load: If set to ``True`` this will raise a\n :py:exc:`IOError` if no config file has been found\n to load.", "id": "f3801:c0:m9"} {"signature": "def check_file(self, filename):", "body": "can_read = super(SecuredConfig, self).check_file(filename)if not can_read:return Falsemode = get_stat(filename).st_modeif (mode & stat.S_IRGRP) or (mode & stat.S_IROTH):msg = \"\"self._log.warning(msg, filename)return Falsereturn True", "docstring": "Overrides :py:meth:`.Config.check_file`", "id": "f3801:c1:m0"} {"signature": "def _validate_file_formats(input_filepath_list, combine_type):", "body": "_validate_sample_rates(input_filepath_list, combine_type)if combine_type == '':_validate_num_channels(input_filepath_list, combine_type)", "docstring": "Validate that combine method can be performed with given files.\n Raises IOError if input file formats are incompatible.", "id": "f3808:m0"} {"signature": "def _validate_sample_rates(input_filepath_list, combine_type):", "body": "sample_rates = [file_info.sample_rate(f) for f in input_filepath_list]if not core.all_equal(sample_rates):raise IOError(\"\"\"\".format(combine_type))", "docstring": "Check if files in input file list have the same sample rate", "id": "f3808:m1"} {"signature": "def _validate_num_channels(input_filepath_list, combine_type):", "body": "channels = [file_info.channels(f) for f in input_filepath_list]if not core.all_equal(channels):raise IOError(\"\"\"\"\"\".format(combine_type))", "docstring": "Check if files in input file list have the same number of channels", "id": "f3808:m2"} {"signature": "def _build_input_format_list(input_filepath_list, input_volumes=None,input_format=None):", "body": "n_inputs = len(input_filepath_list)input_format_list = []for _ in range(n_inputs):input_format_list.append([])if input_volumes is None:vols = [] * n_inputselse:n_volumes = len(input_volumes)if n_volumes < n_inputs:logger.warning('''',n_volumes, n_inputs, n_inputs - n_volumes)vols = input_volumes + [] * (n_inputs - n_volumes)elif n_volumes > n_inputs:logger.warning('''',n_volumes, n_inputs, n_volumes - n_inputs)vols = input_volumes[:n_inputs]else:vols = [v for v in input_volumes]if input_format is None:fmts = [[] for _ in range(n_inputs)]else:n_fmts = len(input_format)if n_fmts < n_inputs:logger.warning('''',n_fmts, n_inputs, n_inputs - n_fmts)fmts = [f for f in input_format]fmts.extend([[] for _ in range(n_inputs - n_fmts)])elif n_fmts > n_inputs:logger.warning('''',n_fmts, n_inputs, n_fmts - n_inputs)fmts = input_format[:n_inputs]else:fmts = [f for f in input_format]for i, (vol, fmt) in enumerate(zip(vols, fmts)):input_format_list[i].extend(['', ''.format(vol)])input_format_list[i].extend(fmt)return input_format_list", "docstring": "Set input formats given input_volumes.\n\n Parameters\n ----------\n input_filepath_list : list of str\n List of input files\n input_volumes : list of float, default=None\n List of volumes to be applied upon combining input files. Volumes\n are applied to the input files in order.\n If None, input files will be combined at their original volumes.\n input_format : list of lists, default=None\n List of input formats to be applied to each input file. Formatting\n arguments are applied to the input files in order.\n If None, the input formats will be inferred from the file header.", "id": "f3808:m3"} {"signature": "def _build_input_args(input_filepath_list, input_format_list):", "body": "if len(input_format_list) != len(input_filepath_list):raise ValueError(\"\")input_args = []zipped = zip(input_filepath_list, input_format_list)for input_file, input_fmt in zipped:input_args.extend(input_fmt)input_args.append(input_file)return input_args", "docstring": "Builds input arguments by stitching input filepaths and input\n formats together.", "id": "f3808:m4"} {"signature": "def _validate_combine_type(combine_type):", "body": "if combine_type not in COMBINE_VALS:raise ValueError(''.format(COMBINE_VALS))", "docstring": "Check that the combine_type is valid.\n\n Parameters\n ----------\n combine_type : str\n Combine type.", "id": "f3808:m5"} {"signature": "def _validate_volumes(input_volumes):", "body": "if not (input_volumes is None or isinstance(input_volumes, list)):raise TypeError(\"\")if isinstance(input_volumes, list):for vol in input_volumes:if not core.is_number(vol):raise ValueError(\"\".format(vol))", "docstring": "Check input_volumes contains a valid list of volumes.\n\n Parameters\n ----------\n input_volumes : list\n list of volume values. Castable to numbers.", "id": "f3808:m6"} {"signature": "def build(self, input_filepath_list, output_filepath, combine_type,input_volumes=None):", "body": "file_info.validate_input_file_list(input_filepath_list)file_info.validate_output_file(output_filepath)_validate_combine_type(combine_type)_validate_volumes(input_volumes)input_format_list = _build_input_format_list(input_filepath_list, input_volumes, self.input_format)try:_validate_file_formats(input_filepath_list, combine_type)except SoxiError:logger.warning(\"\")args = []args.extend(self.globals)args.extend(['', combine_type])input_args = _build_input_args(input_filepath_list, input_format_list)args.extend(input_args)args.extend(self.output_format)args.append(output_filepath)args.extend(self.effects)status, out, err = sox(args)if status != :raise SoxError(\"\".format(out, err))else:logger.info(\"\",output_filepath,combine_type,\"\".join(self.effects_log))if out is not None:logger.info(\"\".format(out))return True", "docstring": "Builds the output_file by executing the current set of commands.\n\n Parameters\n ----------\n input_filepath_list : list of str\n List of paths to input audio files.\n output_filepath : str\n Path to desired output file. If a file already exists at the given\n path, the file will be overwritten.\n combine_type : str\n Input file combining method. One of the following values:\n * concatenate : combine input files by concatenating in the\n order given.\n * merge : combine input files by stacking each input file into\n a new channel of the output file.\n * mix : combine input files by summing samples in corresponding\n channels.\n * mix-power : combine input files with volume adjustments such\n that the output volume is roughly equivlent to one of the\n input signals.\n * multiply : combine input files by multiplying samples in\n corresponding samples.\n input_volumes : list of float, default=None\n List of volumes to be applied upon combining input files. Volumes\n are applied to the input files in order.\n If None, input files will be combined at their original volumes.", "id": "f3808:c0:m1"} {"signature": "def preview(self, input_filepath_list, combine_type, input_volumes=None):", "body": "args = [\"\", \"\"]args.extend(self.globals)args.extend(['', combine_type])input_format_list = _build_input_format_list(input_filepath_list, input_volumes, self.input_format)input_args = _build_input_args(input_filepath_list, input_format_list)args.extend(input_args)args.extend(self.effects)play(args)", "docstring": "Play a preview of the output with the current set of effects\n\n Parameters\n ----------\n input_filepath_list : list of str\n List of paths to input audio files.\n combine_type : str\n Input file combining method. One of the following values:\n * concatenate : combine input files by concatenating in the\n order given.\n * merge : combine input files by stacking each input file into\n a new channel of the output file.\n * mix : combine input files by summing samples in corresponding\n channels.\n * mix-power : combine input files with volume adjustments such\n that the output volume is roughly equivlent to one of the\n input signals.\n * multiply : combine input files by multiplying samples in\n corresponding samples.\n input_volumes : list of float, default=None\n List of volumes to be applied upon combining input files. Volumes\n are applied to the input files in order.\n If None, input files will be combined at their original volumes.", "id": "f3808:c0:m2"} {"signature": "def set_input_format(self, file_type=None, rate=None, bits=None,channels=None, encoding=None, ignore_length=None):", "body": "if file_type is not None and not isinstance(file_type, list):raise ValueError(\"\")if file_type is not None:if not all([f in VALID_FORMATS for f in file_type]):raise ValueError(''''.format(VALID_FORMATS))else:file_type = []if rate is not None and not isinstance(rate, list):raise ValueError(\"\")if rate is not None:if not all([is_number(r) and r > for r in rate]):raise ValueError('')else:rate = []if bits is not None and not isinstance(bits, list):raise ValueError(\"\")if bits is not None:if not all([isinstance(b, int) and b > for b in bits]):raise ValueError('')else:bits = []if channels is not None and not isinstance(channels, list):raise ValueError(\"\")if channels is not None:if not all([isinstance(c, int) and c > for c in channels]):raise ValueError('')else:channels = []if encoding is not None and not isinstance(encoding, list):raise ValueError(\"\")if encoding is not None:if not all([e in ENCODING_VALS for e in encoding]):raise ValueError(''''.format(ENCODING_VALS))else:encoding = []if ignore_length is not None and not isinstance(ignore_length, list):raise ValueError(\"\")if ignore_length is not None:if not all([isinstance(l, bool) for l in ignore_length]):raise ValueError(\"\")else:ignore_length = []max_input_arg_len = max([len(file_type), len(rate), len(bits), len(channels),len(encoding), len(ignore_length)])input_format = []for _ in range(max_input_arg_len):input_format.append([])for i, f in enumerate(file_type):input_format[i].extend(['', ''.format(f)])for i, r in enumerate(rate):input_format[i].extend(['', ''.format(r)])for i, b in enumerate(bits):input_format[i].extend(['', ''.format(b)])for i, c in enumerate(channels):input_format[i].extend(['', ''.format(c)])for i, e in enumerate(encoding):input_format[i].extend(['', ''.format(e)])for i, l in enumerate(ignore_length):if l is True:input_format[i].append('')self.input_format = input_formatreturn self", "docstring": "Sets input file format arguments. This is primarily useful when\n dealing with audio files without a file extension. Overwrites any\n previously set input file arguments.\n\n If this function is not explicity called the input format is inferred\n from the file extension or the file's header.\n\n Parameters\n ----------\n file_type : list of str or None, default=None\n The file type of the input audio file. Should be the same as what\n the file extension would be, for ex. 'mp3' or 'wav'.\n rate : list of float or None, default=None\n The sample rate of the input audio file. If None the sample rate\n is inferred.\n bits : list of int or None, default=None\n The number of bits per sample. If None, the number of bits per\n sample is inferred.\n channels : list of int or None, default=None\n The number of channels in the audio file. If None the number of\n channels is inferred.\n encoding : list of str or None, default=None\n The audio encoding type. Sometimes needed with file-types that\n support more than one encoding type. One of:\n * signed-integer : PCM data stored as signed (\u2018two\u2019s\n complement\u2019) integers. Commonly used with a 16 or 24\u2212bit\n encoding size. A value of 0 represents minimum signal\n power.\n * unsigned-integer : PCM data stored as unsigned integers.\n Commonly used with an 8-bit encoding size. A value of 0\n represents maximum signal power.\n * floating-point : PCM data stored as IEEE 753 single precision\n (32-bit) or double precision (64-bit) floating-point\n (\u2018real\u2019) numbers. A value of 0 represents minimum signal\n power.\n * a-law : International telephony standard for logarithmic\n encoding to 8 bits per sample. It has a precision\n equivalent to roughly 13-bit PCM and is sometimes encoded\n with reversed bit-ordering.\n * u-law : North American telephony standard for logarithmic\n encoding to 8 bits per sample. A.k.a. \u03bc-law. It has a\n precision equivalent to roughly 14-bit PCM and is sometimes\n encoded with reversed bit-ordering.\n * oki-adpcm : OKI (a.k.a. VOX, Dialogic, or Intel) 4-bit ADPCM;\n it has a precision equivalent to roughly 12-bit PCM. ADPCM\n is a form of audio compression that has a good compromise\n between audio quality and encoding/decoding speed.\n * ima-adpcm : IMA (a.k.a. DVI) 4-bit ADPCM; it has a precision\n equivalent to roughly 13-bit PCM.\n * ms-adpcm : Microsoft 4-bit ADPCM; it has a precision\n equivalent to roughly 14-bit PCM.\n * gsm-full-rate : GSM is currently used for the vast majority\n of the world\u2019s digital wireless telephone calls. It\n utilises several audio formats with different bit-rates and\n associated speech quality. SoX has support for GSM\u2019s\n original 13kbps \u2018Full Rate\u2019 audio format. It is usually\n CPU-intensive to work with GSM audio.\n ignore_length : list of bool or None, default=None\n If True, overrides an (incorrect) audio length given in an audio\n file\u2019s header. If this option is given then SoX will keep reading\n audio until it reaches the end of the input file.", "id": "f3808:c0:m3"} {"signature": "def __init__(self):", "body": "self.input_format = []self.output_format = []self.effects = []self.effects_log = []self.globals = []self.set_globals()", "docstring": "Attributes\n----------\ninput_format : list of str\n Input file format arguments that will be passed to SoX.\noutput_format : list of str\n Output file format arguments that will be bassed to SoX.\neffects : list of str\n Effects arguments that will be passed to SoX.\neffects_log : list of str\n Ordered sequence of effects applied.\nglobals : list of str\n Global arguments that will be passed to SoX.", "id": "f3809:c0:m0"} {"signature": "def set_globals(self, dither=False, guard=False, multithread=False,replay_gain=False, verbosity=):", "body": "if not isinstance(dither, bool):raise ValueError('')if not isinstance(guard, bool):raise ValueError('')if not isinstance(multithread, bool):raise ValueError('')if not isinstance(replay_gain, bool):raise ValueError('')if verbosity not in VERBOSITY_VALS:raise ValueError(''.format(VERBOSITY_VALS))global_args = []if not dither:global_args.append('')if guard:global_args.append('')if multithread:global_args.append('')if replay_gain:global_args.append('')global_args.append('')global_args.append(''.format(verbosity))self.globals = global_argsreturn self", "docstring": "Sets SoX's global arguments.\n Overwrites any previously set global arguments.\n If this function is not explicity called, globals are set to this\n function's defaults.\n\n Parameters\n ----------\n dither : bool, default=False\n If True, dithering is applied for low files with low bit rates.\n guard : bool, default=False\n If True, invokes the gain effect to guard against clipping.\n multithread : bool, default=False\n If True, each channel is processed in parallel.\n replay_gain : bool, default=False\n If True, applies replay-gain adjustment to input-files.\n verbosity : int, default=2\n SoX's verbosity level. One of:\n * 0 : No messages are shown at all\n * 1 : Only error messages are shown. These are generated if SoX\n cannot complete the requested commands.\n * 2 : Warning messages are also shown. These are generated if\n SoX can complete the requested commands, but not exactly\n according to the requested command parameters, or if\n clipping occurs.\n * 3 : Descriptions of SoX\u2019s processing phases are also shown.\n Useful for seeing exactly how SoX is processing your audio.\n * 4, >4 : Messages to help with debugging SoX are also shown.", "id": "f3809:c0:m1"} {"signature": "def set_input_format(self, file_type=None, rate=None, bits=None,channels=None, encoding=None, ignore_length=False):", "body": "if file_type not in VALID_FORMATS + [None]:raise ValueError(''.format(VALID_FORMATS))if not is_number(rate) and rate is not None:raise ValueError('')if rate is not None and rate <= :raise ValueError('')if not isinstance(bits, int) and bits is not None:raise ValueError('')if bits is not None and bits <= :raise ValueError('')if not isinstance(channels, int) and channels is not None:raise ValueError('')if channels is not None and channels <= :raise ValueError('')if encoding not in ENCODING_VALS + [None]:raise ValueError(''.format(ENCODING_VALS))if not isinstance(ignore_length, bool):raise ValueError('')input_format = []if file_type is not None:input_format.extend(['', ''.format(file_type)])if rate is not None:input_format.extend(['', ''.format(rate)])if bits is not None:input_format.extend(['', ''.format(bits)])if channels is not None:input_format.extend(['', ''.format(channels)])if encoding is not None:input_format.extend(['', ''.format(encoding)])if ignore_length:input_format.append('')self.input_format = input_formatreturn self", "docstring": "Sets input file format arguments. This is primarily useful when\n dealing with audio files without a file extension. Overwrites any\n previously set input file arguments.\n\n If this function is not explicity called the input format is inferred\n from the file extension or the file's header.\n\n Parameters\n ----------\n file_type : str or None, default=None\n The file type of the input audio file. Should be the same as what\n the file extension would be, for ex. 'mp3' or 'wav'.\n rate : float or None, default=None\n The sample rate of the input audio file. If None the sample rate\n is inferred.\n bits : int or None, default=None\n The number of bits per sample. If None, the number of bits per\n sample is inferred.\n channels : int or None, default=None\n The number of channels in the audio file. If None the number of\n channels is inferred.\n encoding : str or None, default=None\n The audio encoding type. Sometimes needed with file-types that\n support more than one encoding type. One of:\n * signed-integer : PCM data stored as signed (\u2018two\u2019s\n complement\u2019) integers. Commonly used with a 16 or 24\u2212bit\n encoding size. A value of 0 represents minimum signal\n power.\n * unsigned-integer : PCM data stored as unsigned integers.\n Commonly used with an 8-bit encoding size. A value of 0\n represents maximum signal power.\n * floating-point : PCM data stored as IEEE 753 single precision\n (32-bit) or double precision (64-bit) floating-point\n (\u2018real\u2019) numbers. A value of 0 represents minimum signal\n power.\n * a-law : International telephony standard for logarithmic\n encoding to 8 bits per sample. It has a precision\n equivalent to roughly 13-bit PCM and is sometimes encoded\n with reversed bit-ordering.\n * u-law : North American telephony standard for logarithmic\n encoding to 8 bits per sample. A.k.a. \u03bc-law. It has a\n precision equivalent to roughly 14-bit PCM and is sometimes\n encoded with reversed bit-ordering.\n * oki-adpcm : OKI (a.k.a. VOX, Dialogic, or Intel) 4-bit ADPCM;\n it has a precision equivalent to roughly 12-bit PCM. ADPCM\n is a form of audio compression that has a good compromise\n between audio quality and encoding/decoding speed.\n * ima-adpcm : IMA (a.k.a. DVI) 4-bit ADPCM; it has a precision\n equivalent to roughly 13-bit PCM.\n * ms-adpcm : Microsoft 4-bit ADPCM; it has a precision\n equivalent to roughly 14-bit PCM.\n * gsm-full-rate : GSM is currently used for the vast majority\n of the world\u2019s digital wireless telephone calls. It\n utilises several audio formats with different bit-rates and\n associated speech quality. SoX has support for GSM\u2019s\n original 13kbps \u2018Full Rate\u2019 audio format. It is usually\n CPU-intensive to work with GSM audio.\n ignore_length : bool, default=False\n If True, overrides an (incorrect) audio length given in an audio\n file\u2019s header. If this option is given then SoX will keep reading\n audio until it reaches the end of the input file.", "id": "f3809:c0:m2"} {"signature": "def set_output_format(self, file_type=None, rate=None, bits=None,channels=None, encoding=None, comments=None,append_comments=True):", "body": "if file_type not in VALID_FORMATS + [None]:raise ValueError(''.format(VALID_FORMATS))if not is_number(rate) and rate is not None:raise ValueError('')if rate is not None and rate <= :raise ValueError('')if not isinstance(bits, int) and bits is not None:raise ValueError('')if bits is not None and bits <= :raise ValueError('')if not isinstance(channels, int) and channels is not None:raise ValueError('')if channels is not None and channels <= :raise ValueError('')if encoding not in ENCODING_VALS + [None]:raise ValueError(''.format(ENCODING_VALS))if comments is not None and not isinstance(comments, str):raise ValueError('')if not isinstance(append_comments, bool):raise ValueError('')output_format = []if file_type is not None:output_format.extend(['', ''.format(file_type)])if rate is not None:output_format.extend(['', ''.format(rate)])if bits is not None:output_format.extend(['', ''.format(bits)])if channels is not None:output_format.extend(['', ''.format(channels)])if encoding is not None:output_format.extend(['', ''.format(encoding)])if comments is not None:if append_comments:output_format.extend(['', comments])else:output_format.extend(['', comments])self.output_format = output_formatreturn self", "docstring": "Sets output file format arguments. These arguments will overwrite\n any format related arguments supplied by other effects (e.g. rate).\n\n If this function is not explicity called the output format is inferred\n from the file extension or the file's header.\n\n Parameters\n ----------\n file_type : str or None, default=None\n The file type of the output audio file. Should be the same as what\n the file extension would be, for ex. 'mp3' or 'wav'.\n rate : float or None, default=None\n The sample rate of the output audio file. If None the sample rate\n is inferred.\n bits : int or None, default=None\n The number of bits per sample. If None, the number of bits per\n sample is inferred.\n channels : int or None, default=None\n The number of channels in the audio file. If None the number of\n channels is inferred.\n encoding : str or None, default=None\n The audio encoding type. Sometimes needed with file-types that\n support more than one encoding type. One of:\n * signed-integer : PCM data stored as signed (\u2018two\u2019s\n complement\u2019) integers. Commonly used with a 16 or 24\u2212bit\n encoding size. A value of 0 represents minimum signal\n power.\n * unsigned-integer : PCM data stored as unsigned integers.\n Commonly used with an 8-bit encoding size. A value of 0\n represents maximum signal power.\n * floating-point : PCM data stored as IEEE 753 single precision\n (32-bit) or double precision (64-bit) floating-point\n (\u2018real\u2019) numbers. A value of 0 represents minimum signal\n power.\n * a-law : International telephony standard for logarithmic\n encoding to 8 bits per sample. It has a precision\n equivalent to roughly 13-bit PCM and is sometimes encoded\n with reversed bit-ordering.\n * u-law : North American telephony standard for logarithmic\n encoding to 8 bits per sample. A.k.a. \u03bc-law. It has a\n precision equivalent to roughly 14-bit PCM and is sometimes\n encoded with reversed bit-ordering.\n * oki-adpcm : OKI (a.k.a. VOX, Dialogic, or Intel) 4-bit ADPCM;\n it has a precision equivalent to roughly 12-bit PCM. ADPCM\n is a form of audio compression that has a good compromise\n between audio quality and encoding/decoding speed.\n * ima-adpcm : IMA (a.k.a. DVI) 4-bit ADPCM; it has a precision\n equivalent to roughly 13-bit PCM.\n * ms-adpcm : Microsoft 4-bit ADPCM; it has a precision\n equivalent to roughly 14-bit PCM.\n * gsm-full-rate : GSM is currently used for the vast majority\n of the world\u2019s digital wireless telephone calls. It\n utilises several audio formats with different bit-rates and\n associated speech quality. SoX has support for GSM\u2019s\n original 13kbps \u2018Full Rate\u2019 audio format. It is usually\n CPU-intensive to work with GSM audio.\n comments : str or None, default=None\n If not None, the string is added as a comment in the header of the\n output audio file. If None, no comments are added.\n append_comments : bool, default=True\n If True, comment strings are appended to SoX's default comments. If\n False, the supplied comment replaces the existing comment.", "id": "f3809:c0:m3"} {"signature": "def clear_effects(self):", "body": "self.effects = list()self.effects_log = list()return self", "docstring": "Remove all effects processes.", "id": "f3809:c0:m4"} {"signature": "def build(self, input_filepath, output_filepath, extra_args=None,return_output=False):", "body": "file_info.validate_input_file(input_filepath)if output_filepath is not None:file_info.validate_output_file(output_filepath)else:output_filepath = ''if input_filepath == output_filepath:raise ValueError(\"\")args = []args.extend(self.globals)args.extend(self.input_format)args.append(input_filepath)args.extend(self.output_format)args.append(output_filepath)args.extend(self.effects)if extra_args is not None:if not isinstance(extra_args, list):raise ValueError(\"\")args.extend(extra_args)status, out, err = sox(args)if status != :raise SoxError(\"\".format(out, err))else:logger.info(\"\",output_filepath,\"\".join(self.effects_log))if out is not None:logger.info(\"\".format(out))if return_output:return status, out, errelse:return True", "docstring": "Builds the output_file by executing the current set of commands.\n\n Parameters\n ----------\n input_filepath : str\n Path to input audio file.\n output_filepath : str or None\n Path to desired output file. If a file already exists at the given\n path, the file will be overwritten.\n If None, no file will be created.\n extra_args : list or None, default=None\n If a list is given, these additional arguments are passed to SoX\n at the end of the list of effects.\n Don't use this argument unless you know exactly what you're doing!\n return_output : bool, default=False\n If True, returns the status and information sent to stderr and\n stdout as a tuple (status, stdout, stderr).\n Otherwise returns True on success.", "id": "f3809:c0:m5"} {"signature": "def preview(self, input_filepath):", "body": "args = [\"\", \"\"]args.extend(self.globals)args.extend(self.input_format)args.append(input_filepath)args.extend(self.effects)play(args)", "docstring": "Play a preview of the output with the current set of effects\n\n Parameters\n ----------\n input_filepath : str\n Path to input audio file.", "id": "f3809:c0:m6"} {"signature": "def allpass(self, frequency, width_q=):", "body": "if not is_number(frequency) or frequency <= :raise ValueError(\"\")if not is_number(width_q) or width_q <= :raise ValueError(\"\")effect_args = ['', ''.format(frequency), ''.format(width_q)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a two-pole all-pass filter. An all-pass filter changes the\n audio\u2019s frequency to phase relationship without changing its frequency\n to amplitude relationship. The filter is described in detail in at\n http://musicdsp.org/files/Audio-EQ-Cookbook.txt\n\n Parameters\n ----------\n frequency : float\n The filter's center frequency in Hz.\n width_q : float, default=2.0\n The filter's width as a Q-factor.\n\n See Also\n --------\n equalizer, highpass, lowpass, sinc", "id": "f3809:c0:m7"} {"signature": "def bandpass(self, frequency, width_q=, constant_skirt=False):", "body": "if not is_number(frequency) or frequency <= :raise ValueError(\"\")if not is_number(width_q) or width_q <= :raise ValueError(\"\")if not isinstance(constant_skirt, bool):raise ValueError(\"\")effect_args = ['']if constant_skirt:effect_args.append('')effect_args.extend([''.format(frequency), ''.format(width_q)])self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a two-pole Butterworth band-pass filter with the given central\n frequency, and (3dB-point) band-width. The filter rolls off at 6dB per\n octave (20dB per decade) and is described in detail in\n http://musicdsp.org/files/Audio-EQ-Cookbook.txt\n\n Parameters\n ----------\n frequency : float\n The filter's center frequency in Hz.\n width_q : float, default=2.0\n The filter's width as a Q-factor.\n constant_skirt : bool, default=False\n If True, selects constant skirt gain (peak gain = width_q).\n If False, selects constant 0dB peak gain.\n\n See Also\n --------\n bandreject, sinc", "id": "f3809:c0:m8"} {"signature": "def bandreject(self, frequency, width_q=):", "body": "if not is_number(frequency) or frequency <= :raise ValueError(\"\")if not is_number(width_q) or width_q <= :raise ValueError(\"\")effect_args = ['', ''.format(frequency), ''.format(width_q)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a two-pole Butterworth band-reject filter with the given\n central frequency, and (3dB-point) band-width. The filter rolls off at\n 6dB per octave (20dB per decade) and is described in detail in\n http://musicdsp.org/files/Audio-EQ-Cookbook.txt\n\n Parameters\n ----------\n frequency : float\n The filter's center frequency in Hz.\n width_q : float, default=2.0\n The filter's width as a Q-factor.\n constant_skirt : bool, default=False\n If True, selects constant skirt gain (peak gain = width_q).\n If False, selects constant 0dB peak gain.\n\n See Also\n --------\n bandreject, sinc", "id": "f3809:c0:m9"} {"signature": "def bass(self, gain_db, frequency=, slope=):", "body": "if not is_number(gain_db):raise ValueError(\"\")if not is_number(frequency) or frequency <= :raise ValueError(\"\")if not is_number(slope) or slope <= or slope > :raise ValueError(\"\")effect_args = ['', ''.format(gain_db), ''.format(frequency),''.format(slope)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Boost or cut the bass (lower) frequencies of the audio using a\n two-pole shelving filter with a response similar to that of a standard\n hi-fi\u2019s tone-controls. This is also known as shelving equalisation.\n\n The filters are described in detail in\n http://musicdsp.org/files/Audio-EQ-Cookbook.txt\n\n Parameters\n ----------\n gain_db : float\n The gain at 0 Hz.\n For a large cut use -20, for a large boost use 20.\n frequency : float, default=100.0\n The filter's cutoff frequency in Hz.\n slope : float, default=0.5\n The steepness of the filter's shelf transition.\n For a gentle slope use 0.3, and use 1.0 for a steep slope.\n\n See Also\n --------\n treble, equalizer", "id": "f3809:c0:m10"} {"signature": "def bend(self, n_bends, start_times, end_times, cents, frame_rate=,oversample_rate=):", "body": "if not isinstance(n_bends, int) or n_bends < :raise ValueError(\"\")if not isinstance(start_times, list) or len(start_times) != n_bends:raise ValueError(\"\")if any([(not is_number(p) or p <= ) for p in start_times]):raise ValueError(\"\")if sorted(start_times) != start_times:raise ValueError(\"\")if not isinstance(end_times, list) or len(end_times) != n_bends:raise ValueError(\"\")if any([(not is_number(p) or p <= ) for p in end_times]):raise ValueError(\"\")if sorted(end_times) != end_times:raise ValueError(\"\")if any([e <= s for s, e in zip(start_times, end_times)]):raise ValueError(\"\")if any([e > s for s, e in zip(start_times[:], end_times[:-])]):raise ValueError(\"\")if not isinstance(cents, list) or len(cents) != n_bends:raise ValueError(\"\")if any([not is_number(p) for p in cents]):raise ValueError(\"\")if (not isinstance(frame_rate, int) orframe_rate < or frame_rate > ):raise ValueError(\"\")if (not isinstance(oversample_rate, int) oroversample_rate < or oversample_rate > ):raise ValueError(\"\")effect_args = ['','', ''.format(frame_rate),'', ''.format(oversample_rate)]last = for i in range(n_bends):t_start = round(start_times[i] - last, )t_end = round(end_times[i] - start_times[i], )effect_args.append(''.format(t_start, cents[i], t_end))last = end_times[i]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Changes pitch by specified amounts at specified times.\n The pitch-bending algorithm utilises the Discrete Fourier Transform\n (DFT) at a particular frame rate and over-sampling rate.\n\n Parameters\n ----------\n n_bends : int\n The number of intervals to pitch shift\n start_times : list of floats\n A list of absolute start times (in seconds), in order\n end_times : list of floats\n A list of absolute end times (in seconds) in order.\n [start_time, end_time] intervals may not overlap!\n cents : list of floats\n A list of pitch shifts in cents. A positive value shifts the pitch\n up, a negative value shifts the pitch down.\n frame_rate : int, default=25\n The number of DFT frames to process per second, between 10 and 80\n oversample_rate: int, default=16\n The number of frames to over sample per second, between 4 and 32\n\n See Also\n --------\n pitch", "id": "f3809:c0:m11"} {"signature": "def biquad(self, b, a):", "body": "if not isinstance(b, list):raise ValueError('')if not isinstance(a, list):raise ValueError('')if len(b) != :raise ValueError('')if len(a) != :raise ValueError('')if not all([is_number(b_val) for b_val in b]):raise ValueError('')if not all([is_number(a_val) for a_val in a]):raise ValueError('')effect_args = ['', ''.format(b[]), ''.format(b[]),''.format(b[]), ''.format(a[]),''.format(a[]), ''.format(a[])]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a biquad IIR filter with the given coefficients.\n\n Parameters\n ----------\n b : list of floats\n Numerator coefficients. Must be length 3\n a : list of floats\n Denominator coefficients. Must be length 3\n\n See Also\n --------\n fir, treble, bass, equalizer", "id": "f3809:c0:m12"} {"signature": "def channels(self, n_channels):", "body": "if not isinstance(n_channels, int) or n_channels <= :raise ValueError('')effect_args = ['', ''.format(n_channels)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Change the number of channels in the audio signal. If decreasing the\n number of channels it mixes channels together, if increasing the number\n of channels it duplicates.\n\n Note: This overrides arguments used in the convert effect!\n\n Parameters\n ----------\n n_channels : int\n Desired number of channels.\n\n See Also\n --------\n convert", "id": "f3809:c0:m13"} {"signature": "def chorus(self, gain_in=, gain_out=, n_voices=, delays=None,decays=None, speeds=None, depths=None, shapes=None):", "body": "if not is_number(gain_in) or gain_in <= or gain_in > :raise ValueError(\"\")if not is_number(gain_out) or gain_out <= or gain_out > :raise ValueError(\"\")if not isinstance(n_voices, int) or n_voices <= :raise ValueError(\"\")if not (delays is None or isinstance(delays, list)):raise ValueError(\"\")if delays is not None:if len(delays) != n_voices:raise ValueError(\"\")if any((not is_number(p) or p < ) for p in delays):raise ValueError(\"\")else:delays = [random.uniform(, ) for _ in range(n_voices)]if not (decays is None or isinstance(decays, list)):raise ValueError(\"\")if decays is not None:if len(decays) != n_voices:raise ValueError(\"\")if any((not is_number(p) or p <= or p > ) for p in decays):raise ValueError(\"\")else:decays = [random.uniform(, ) for _ in range(n_voices)]if not (speeds is None or isinstance(speeds, list)):raise ValueError(\"\")if speeds is not None:if len(speeds) != n_voices:raise ValueError(\"\")if any((not is_number(p) or p <= ) for p in speeds):raise ValueError(\"\")else:speeds = [random.uniform(, ) for _ in range(n_voices)]if not (depths is None or isinstance(depths, list)):raise ValueError(\"\")if depths is not None:if len(depths) != n_voices:raise ValueError(\"\")if any((not is_number(p) or p <= ) for p in depths):raise ValueError(\"\")else:depths = [random.uniform(, ) for _ in range(n_voices)]if not (shapes is None or isinstance(shapes, list)):raise ValueError(\"\")if shapes is not None:if len(shapes) != n_voices:raise ValueError(\"\")if any((p not in ['', '']) for p in shapes):raise ValueError(\"\")else:shapes = [random.choice(['', '']) for _ in range(n_voices)]effect_args = ['', ''.format(gain_in), ''.format(gain_out)]for i in range(n_voices):effect_args.extend([''.format(delays[i]),''.format(decays[i]),''.format(speeds[i]),''.format(depths[i]),''.format(shapes[i])])self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Add a chorus effect to the audio. This can makeasingle vocal sound\n like a chorus, but can also be applied to instrumentation.\n\n Chorus resembles an echo effect with a short delay, but whereas with\n echo the delay is constant, with chorus, it is varied using sinusoidal\n or triangular modulation. The modulation depth defines the range the\n modulated delay is played before or after the delay. Hence the delayed\n sound will sound slower or faster, that is the delayed sound tuned\n around the original one, like in a chorus where some vocals are\n slightly off key.\n\n Parameters\n ----------\n gain_in : float, default=0.3\n The time in seconds over which the instantaneous level of the input\n signal is averaged to determine increases in volume.\n gain_out : float, default=0.8\n The time in seconds over which the instantaneous level of the input\n signal is averaged to determine decreases in volume.\n n_voices : int, default=3\n The number of voices in the chorus effect.\n delays : list of floats > 20 or None, default=None\n If a list, the list of delays (in miliseconds) of length n_voices.\n If None, the individual delay parameters are chosen automatically\n to be between 40 and 60 miliseconds.\n decays : list of floats or None, default=None\n If a list, the list of decays (as a fraction of gain_in) of length\n n_voices.\n If None, the individual decay parameters are chosen automatically\n to be between 0.3 and 0.4.\n speeds : list of floats or None, default=None\n If a list, the list of modulation speeds (in Hz) of length n_voices\n If None, the individual speed parameters are chosen automatically\n to be between 0.25 and 0.4 Hz.\n depths : list of floats or None, default=None\n If a list, the list of depths (in miliseconds) of length n_voices.\n If None, the individual delay parameters are chosen automatically\n to be between 1 and 3 miliseconds.\n shapes : list of 's' or 't' or None, deault=None\n If a list, the list of modulation shapes - 's' for sinusoidal or\n 't' for triangular - of length n_voices.\n If None, the individual shapes are chosen automatically.", "id": "f3809:c0:m14"} {"signature": "def compand(self, attack_time=, decay_time=, soft_knee_db=,tf_points=[(-, -), (-, -), (, )],):", "body": "if not is_number(attack_time) or attack_time <= :raise ValueError(\"\")if not is_number(decay_time) or decay_time <= :raise ValueError(\"\")if attack_time > decay_time:logger.warning(\"\"\"\"\"\"\"\")if not (is_number(soft_knee_db) or soft_knee_db is None):raise ValueError(\"\")if not isinstance(tf_points, list):raise TypeError(\"\")if len(tf_points) == :raise ValueError(\"\")if any(not isinstance(pair, tuple) for pair in tf_points):raise ValueError(\"\")if any(len(pair) != for pair in tf_points):raise ValueError(\"\")if any(not (is_number(p[]) and is_number(p[])) for p in tf_points):raise ValueError(\"\")if any((p[] > or p[] > ) for p in tf_points):raise ValueError(\"\")if len(tf_points) > len(set([p[] for p in tf_points])):raise ValueError(\"\")tf_points = sorted(tf_points,key=lambda tf_points: tf_points[])transfer_list = []for point in tf_points:transfer_list.extend([\"\".format(point[]), \"\".format(point[])])effect_args = ['',\"\".format(attack_time, decay_time)]if soft_knee_db is not None:effect_args.append(\"\".format(soft_knee_db, \"\".join(transfer_list)))else:effect_args.append(\"\".join(transfer_list))self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Compand (compress or expand) the dynamic range of the audio.\n\n Parameters\n ----------\n attack_time : float, default=0.3\n The time in seconds over which the instantaneous level of the input\n signal is averaged to determine increases in volume.\n decay_time : float, default=0.8\n The time in seconds over which the instantaneous level of the input\n signal is averaged to determine decreases in volume.\n soft_knee_db : float or None, default=6.0\n The ammount (in dB) for which the points at where adjacent line\n segments on the transfer function meet will be rounded.\n If None, no soft_knee is applied.\n tf_points : list of tuples\n Transfer function points as a list of tuples corresponding to\n points in (dB, dB) defining the compander's transfer function.\n\n See Also\n --------\n mcompand, contrast", "id": "f3809:c0:m15"} {"signature": "def contrast(self, amount=):", "body": "if not is_number(amount) or amount < or amount > :raise ValueError('')effect_args = ['', ''.format(amount)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Comparable with compression, this effect modifies an audio signal to\n make it sound louder.\n\n Parameters\n ----------\n amount : float\n Amount of enhancement between 0 and 100.\n\n See Also\n --------\n compand, mcompand", "id": "f3809:c0:m16"} {"signature": "def convert(self, samplerate=None, n_channels=None, bitdepth=None):", "body": "bitdepths = [, , , , ]if bitdepth is not None:if bitdepth not in bitdepths:raise ValueError(\"\".format(str(bitdepths)))self.output_format.extend(['', ''.format(bitdepth)])if n_channels is not None:if not isinstance(n_channels, int) or n_channels <= :raise ValueError(\"\")self.output_format.extend(['', ''.format(n_channels)])if samplerate is not None:if not is_number(samplerate) or samplerate <= :raise ValueError(\"\")self.rate(samplerate)return self", "docstring": "Converts output audio to the specified format.\n\n Parameters\n ----------\n samplerate : float, default=None\n Desired samplerate. If None, defaults to the same as input.\n n_channels : int, default=None\n Desired number of channels. If None, defaults to the same as input.\n bitdepth : int, default=None\n Desired bitdepth. If None, defaults to the same as input.\n\n See Also\n --------\n rate", "id": "f3809:c0:m17"} {"signature": "def dcshift(self, shift=):", "body": "if not is_number(shift) or shift < - or shift > :raise ValueError('')effect_args = ['', ''.format(shift)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a DC shift to the audio.\n\n Parameters\n ----------\n shift : float\n Amount to shift audio between -2 and 2. (Audio is between -1 and 1)\n\n See Also\n --------\n highpass", "id": "f3809:c0:m18"} {"signature": "def deemph(self):", "body": "effect_args = ['']self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply Compact Disc (IEC 60908) de-emphasis (a treble attenuation\n shelving filter). Pre-emphasis was applied in the mastering of some\n CDs issued in the early 1980s. These included many classical music\n albums, as well as now sought-after issues of albums by The Beatles,\n Pink Floyd and others. Pre-emphasis should be removed at playback time\n by a de-emphasis filter in the playback device. However, not all modern\n CD players have this filter, and very few PC CD drives have it; playing\n pre-emphasised audio without the correct de-emphasis filter results in\n audio that sounds harsh and is far from what its creators intended.\n\n The de-emphasis filter is implemented as a biquad and requires the\n input audio sample rate to be either 44.1kHz or 48kHz. Maximum\n deviation from the ideal response is only 0.06dB (up to 20kHz).\n\n See Also\n --------\n bass, treble", "id": "f3809:c0:m19"} {"signature": "def delay(self, positions):", "body": "if not isinstance(positions, list):raise ValueError(\"\")if not all((is_number(p) and p >= ) for p in positions):raise ValueError(\"\")effect_args = ['']effect_args.extend([''.format(p) for p in positions])self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Delay one or more audio channels such that they start at the given\n positions.\n\n Parameters\n ----------\n positions: list of floats\n List of times (in seconds) to delay each audio channel.\n If fewer positions are given than the number of channels, the\n remaining channels will be unaffected.", "id": "f3809:c0:m20"} {"signature": "def downsample(self, factor=):", "body": "if not isinstance(factor, int) or factor < :raise ValueError('')effect_args = ['', ''.format(factor)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Downsample the signal by an integer factor. Only the first out of\n each factor samples is retained, the others are discarded.\n\n No decimation filter is applied. If the input is not a properly\n bandlimited baseband signal, aliasing will occur. This may be desirable\n e.g., for frequency translation.\n\n For a general resampling effect with anti-aliasing, see rate.\n\n Parameters\n ----------\n factor : int, default=2\n Downsampling factor.\n\n See Also\n --------\n rate, upsample", "id": "f3809:c0:m21"} {"signature": "def earwax(self):", "body": "effect_args = ['']self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Makes audio easier to listen to on headphones. Adds \u2018cues\u2019 to 44.1kHz\n stereo audio so that when listened to on headphones the stereo image is\n moved from inside your head (standard for headphones) to outside and in\n front of the listener (standard for speakers).\n\n Warning: Will only work properly on 44.1kHz stereo audio!", "id": "f3809:c0:m22"} {"signature": "def echo(self, gain_in=, gain_out=, n_echos=, delays=[],decays=[]):", "body": "if not is_number(gain_in) or gain_in <= or gain_in > :raise ValueError(\"\")if not is_number(gain_out) or gain_out <= or gain_out > :raise ValueError(\"\")if not isinstance(n_echos, int) or n_echos <= :raise ValueError(\"\")if not isinstance(delays, list):raise ValueError(\"\")if len(delays) != n_echos:raise ValueError(\"\")if any((not is_number(p) or p <= ) for p in delays):raise ValueError(\"\")if not isinstance(decays, list):raise ValueError(\"\")if len(decays) != n_echos:raise ValueError(\"\")if any((not is_number(p) or p <= or p > ) for p in decays):raise ValueError(\"\")effect_args = ['', ''.format(gain_in), ''.format(gain_out)]for i in range(n_echos):effect_args.extend([''.format(delays[i]),''.format(decays[i])])self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Add echoing to the audio.\n\n Echoes are reflected sound and can occur naturally amongst mountains\n (and sometimes large buildings) when talking or shouting; digital echo\n effects emulate this behav- iour and are often used to help fill out\n the sound of a single instrument or vocal. The time differ- ence\n between the original signal and the reflection is the 'delay' (time),\n and the loudness of the reflected signal is the 'decay'. Multiple\n echoes can have different delays and decays.\n\n Parameters\n ----------\n gain_in : float, default=0.8\n Input volume, between 0 and 1\n gain_out : float, default=0.9\n Output volume, between 0 and 1\n n_echos : int, default=1\n Number of reflections\n delays : list, default=[60]\n List of delays in miliseconds\n decays : list, default=[0.4]\n List of decays, relative to gain in between 0 and 1\n\n See Also\n --------\n echos, reverb, chorus", "id": "f3809:c0:m23"} {"signature": "def echos(self, gain_in=, gain_out=, n_echos=, delays=[],decays=[]):", "body": "if not is_number(gain_in) or gain_in <= or gain_in > :raise ValueError(\"\")if not is_number(gain_out) or gain_out <= or gain_out > :raise ValueError(\"\")if not isinstance(n_echos, int) or n_echos <= :raise ValueError(\"\")if not isinstance(delays, list):raise ValueError(\"\")if len(delays) != n_echos:raise ValueError(\"\")if any((not is_number(p) or p <= ) for p in delays):raise ValueError(\"\")if not isinstance(decays, list):raise ValueError(\"\")if len(decays) != n_echos:raise ValueError(\"\")if any((not is_number(p) or p <= or p > ) for p in decays):raise ValueError(\"\")effect_args = ['', ''.format(gain_in), ''.format(gain_out)]for i in range(n_echos):effect_args.extend([''.format(delays[i]),''.format(decays[i])])self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Add a sequence of echoes to the audio.\n\n Like the echo effect, echos stand for \u2018ECHO in Sequel\u2019, that is the\n first echos takes the input, the second the input and the first echos,\n the third the input and the first and the second echos, ... and so on.\n Care should be taken using many echos; a single echos has the same\n effect as a single echo.\n\n Parameters\n ----------\n gain_in : float, default=0.8\n Input volume, between 0 and 1\n gain_out : float, default=0.9\n Output volume, between 0 and 1\n n_echos : int, default=1\n Number of reflections\n delays : list, default=[60]\n List of delays in miliseconds\n decays : list, default=[0.4]\n List of decays, relative to gain in between 0 and 1\n\n See Also\n --------\n echo, reverb, chorus", "id": "f3809:c0:m24"} {"signature": "def equalizer(self, frequency, width_q, gain_db):", "body": "if not is_number(frequency) or frequency <= :raise ValueError(\"\")if not is_number(width_q) or width_q <= :raise ValueError(\"\")if not is_number(gain_db):raise ValueError(\"\")effect_args = ['',''.format(frequency),''.format(width_q),''.format(gain_db)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a two-pole peaking equalisation (EQ) filter to boost or\n reduce around a given frequency.\n This effect can be applied multiple times to produce complex EQ curves.\n\n Parameters\n ----------\n frequency : float\n The filter's central frequency in Hz.\n width_q : float\n The filter's width as a Q-factor.\n gain_db : float\n The filter's gain in dB.\n\n See Also\n --------\n bass, treble", "id": "f3809:c0:m25"} {"signature": "def fade(self, fade_in_len=, fade_out_len=, fade_shape=''):", "body": "fade_shapes = ['', '', '', '', '']if fade_shape not in fade_shapes:raise ValueError(\"\".format(\"\".join(fade_shapes)))if not is_number(fade_in_len) or fade_in_len < :raise ValueError(\"\")if not is_number(fade_out_len) or fade_out_len < :raise ValueError(\"\")effect_args = []if fade_in_len > :effect_args.extend(['', ''.format(fade_shape), ''.format(fade_in_len)])if fade_out_len > :effect_args.extend(['', '', ''.format(fade_shape),''.format(fade_out_len), ''])if len(effect_args) > :self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Add a fade in and/or fade out to an audio file.\n Default fade shape is 1/4 sine wave.\n\n Parameters\n ----------\n fade_in_len : float, default=0.0\n Length of fade-in (seconds). If fade_in_len = 0,\n no fade in is applied.\n fade_out_len : float, defaut=0.0\n Length of fade-out (seconds). If fade_out_len = 0,\n no fade in is applied.\n fade_shape : str, default='q'\n Shape of fade. Must be one of\n * 'q' for quarter sine (default),\n * 'h' for half sine,\n * 't' for linear,\n * 'l' for logarithmic\n * 'p' for inverted parabola.\n\n See Also\n --------\n splice", "id": "f3809:c0:m26"} {"signature": "def fir(self, coefficients):", "body": "if not isinstance(coefficients, list):raise ValueError(\"\")if not all([is_number(c) for c in coefficients]):raise ValueError(\"\")effect_args = ['']effect_args.extend([''.format(c) for c in coefficients])self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Use SoX\u2019s FFT convolution engine with given FIR filter coefficients.\n\n Parameters\n ----------\n coefficients : list\n fir filter coefficients", "id": "f3809:c0:m27"} {"signature": "def flanger(self, delay=, depth=, regen=, width=, speed=,shape='', phase=, interp=''):", "body": "if not is_number(delay) or delay < or delay > :raise ValueError(\"\")if not is_number(depth) or depth < or depth > :raise ValueError(\"\")if not is_number(regen) or regen < - or regen > :raise ValueError(\"\")if not is_number(width) or width < or width > :raise ValueError(\"\")if not is_number(speed) or speed < or speed > :raise ValueError(\"\")if shape not in ['', '']:raise ValueError(\"\")if not is_number(phase) or phase < or phase > :raise ValueError(\"\")if interp not in ['', '']:raise ValueError(\"\")effect_args = ['',''.format(delay),''.format(depth),''.format(regen),''.format(width),''.format(speed),''.format(shape),''.format(phase),''.format(interp)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a flanging effect to the audio.\n\n Parameters\n ----------\n delay : float, default=0\n Base delay (in miliseconds) between 0 and 30.\n depth : float, default=2\n Added swept delay (in miliseconds) between 0 and 10.\n regen : float, default=0\n Percentage regeneration between -95 and 95.\n width : float, default=71,\n Percentage of delayed signal mixed with original between 0 and 100.\n speed : float, default=0.5\n Sweeps per second (in Hz) between 0.1 and 10.\n shape : 'sine' or 'triangle', default='sine'\n Swept wave shape\n phase : float, default=25\n Swept wave percentage phase-shift for multi-channel flange between\n 0 and 100. 0 = 100 = same phase on each channel\n interp : 'linear' or 'quadratic', default='linear'\n Digital delay-line interpolation type.\n\n See Also\n --------\n tremolo", "id": "f3809:c0:m28"} {"signature": "def gain(self, gain_db=, normalize=True, limiter=False, balance=None):", "body": "if not is_number(gain_db):raise ValueError(\"\")if not isinstance(normalize, bool):raise ValueError(\"\")if not isinstance(limiter, bool):raise ValueError(\"\")if balance not in [None, '', '', '']:raise ValueError(\"\")effect_args = ['']if balance is not None:effect_args.append(''.format(balance))if normalize:effect_args.append('')if limiter:effect_args.append('')effect_args.append(''.format(gain_db))self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply amplification or attenuation to the audio signal.\n\n Parameters\n ----------\n gain_db : float, default=0.0\n Gain adjustment in decibels (dB).\n normalize : bool, default=True\n If True, audio is normalized to gain_db relative to full scale.\n If False, simply adjusts the audio power level by gain_db.\n limiter : bool, default=False\n If True, a simple limiter is invoked to prevent clipping.\n balance : str or None, default=None\n Balance gain across channels. Can be one of:\n * None applies no balancing (default)\n * 'e' applies gain to all channels other than that with the\n highest peak level, such that all channels attain the same\n peak level\n * 'B' applies gain to all channels other than that with the\n highest RMS level, such that all channels attain the same\n RMS level\n * 'b' applies gain with clipping protection to all channels other\n than that with the highest RMS level, such that all channels\n attain the same RMS level\n If normalize=True, 'B' and 'b' are equivalent.\n\n See Also\n --------\n loudness", "id": "f3809:c0:m29"} {"signature": "def highpass(self, frequency, width_q=, n_poles=):", "body": "if not is_number(frequency) or frequency <= :raise ValueError(\"\")if not is_number(width_q) or width_q <= :raise ValueError(\"\")if n_poles not in [, ]:raise ValueError(\"\")effect_args = ['', ''.format(n_poles), ''.format(frequency)]if n_poles == :effect_args.append(''.format(width_q))self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a high-pass filter with 3dB point frequency. The filter can be\n either single-pole or double-pole. The filters roll off at 6dB per pole\n per octave (20dB per pole per decade).\n\n Parameters\n ----------\n frequency : float\n The filter's cutoff frequency in Hz.\n width_q : float, default=0.707\n The filter's width as a Q-factor. Applies only when n_poles=2.\n The default gives a Butterworth response.\n n_poles : int, default=2\n The number of poles in the filter. Must be either 1 or 2\n\n See Also\n --------\n lowpass, equalizer, sinc, allpass", "id": "f3809:c0:m30"} {"signature": "def lowpass(self, frequency, width_q=, n_poles=):", "body": "if not is_number(frequency) or frequency <= :raise ValueError(\"\")if not is_number(width_q) or width_q <= :raise ValueError(\"\")if n_poles not in [, ]:raise ValueError(\"\")effect_args = ['', ''.format(n_poles), ''.format(frequency)]if n_poles == :effect_args.append(''.format(width_q))self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a low-pass filter with 3dB point frequency. The filter can be\n either single-pole or double-pole. The filters roll off at 6dB per pole\n per octave (20dB per pole per decade).\n\n Parameters\n ----------\n frequency : float\n The filter's cutoff frequency in Hz.\n width_q : float, default=0.707\n The filter's width as a Q-factor. Applies only when n_poles=2.\n The default gives a Butterworth response.\n n_poles : int, default=2\n The number of poles in the filter. Must be either 1 or 2\n\n See Also\n --------\n highpass, equalizer, sinc, allpass", "id": "f3809:c0:m31"} {"signature": "def hilbert(self, num_taps=None):", "body": "if num_taps is not None and not isinstance(num_taps, int):raise ValueError(\"\")if num_taps is not None and num_taps % == :raise ValueError(\"\")effect_args = ['']if num_taps is not None:effect_args.extend(['', ''.format(num_taps)])self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply an odd-tap Hilbert transform filter, phase-shifting the signal\n by 90 degrees. This is used in many matrix coding schemes and for\n analytic signal generation. The process is often written as a\n multiplication by i (or j), the imaginary unit. An odd-tap Hilbert\n transform filter has a bandpass characteristic, attenuating the lowest\n and highest frequencies.\n\n Parameters\n ----------\n num_taps : int or None, default=None\n Number of filter taps - must be odd. If none, it is chosen to have\n a cutoff frequency of about 75 Hz.", "id": "f3809:c0:m32"} {"signature": "def loudness(self, gain_db=-, reference_level=):", "body": "if not is_number(gain_db):raise ValueError('')if not is_number(reference_level):raise ValueError('')if reference_level > or reference_level < :raise ValueError('')effect_args = ['',''.format(gain_db),''.format(reference_level)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Loudness control. Similar to the gain effect, but provides\n equalisation for the human auditory system.\n\n The gain is adjusted by gain_db and the signal is equalised according\n to ISO 226 w.r.t. reference_level.\n\n Parameters\n ----------\n gain_db : float, default=-10.0\n Loudness adjustment amount (in dB)\n reference_level : float, default=65.0\n Reference level (in dB) according to which the signal is equalized.\n Must be between 50 and 75 (dB)\n\n See Also\n --------\n gain", "id": "f3809:c0:m33"} {"signature": "def mcompand(self, n_bands=, crossover_frequencies=[],attack_time=[, ], decay_time=[, ],soft_knee_db=[, None],tf_points=[[(-, -), (-, -), (-, -), (, )],[(-, -), (-, -), (-, -), (, )]],gain=[None, None]):", "body": "if not isinstance(n_bands, int) or n_bands < :raise ValueError(\"\")if (not isinstance(crossover_frequencies, list) orlen(crossover_frequencies) != n_bands - ):raise ValueError(\"\")if any([not is_number(f) or f < for f in crossover_frequencies]):raise ValueError(\"\")if not isinstance(attack_time, list) or len(attack_time) != n_bands:raise ValueError(\"\")if any([not is_number(a) or a <= for a in attack_time]):raise ValueError(\"\")if not isinstance(decay_time, list) or len(decay_time) != n_bands:raise ValueError(\"\")if any([not is_number(d) or d <= for d in decay_time]):raise ValueError(\"\")if any([a > d for a, d in zip(attack_time, decay_time)]):logger.warning(\"\"\"\"\"\"\"\")if not isinstance(soft_knee_db, list) or len(soft_knee_db) != n_bands:raise ValueError(\"\")if any([(not is_number(d) and d is not None) for d in soft_knee_db]):raise ValueError(\"\")if not isinstance(tf_points, list) or len(tf_points) != n_bands:raise ValueError(\"\")if any([not isinstance(t, list) or len(t) == for t in tf_points]):raise ValueError(\"\")for tfp in tf_points:if any(not isinstance(pair, tuple) for pair in tfp):raise ValueError(\"\")if any(len(pair) != for pair in tfp):raise ValueError(\"\")if any(not (is_number(p[]) and is_number(p[])) for p in tfp):raise ValueError(\"\")if any((p[] > or p[] > ) for p in tfp):raise ValueError(\"\")if len(tf_points) > len(set([p[] for p in tfp])):raise ValueError(\"\")if not isinstance(gain, list) or len(gain) != n_bands:raise ValueError(\"\")if any([not (is_number(g) or g is None) for g in gain]):print(gain)raise ValueError(\"\")effect_args = ['']for i in range(n_bands):if i > :effect_args.append(''.format(crossover_frequencies[i - ]))intermed_args = [\"\".format(attack_time[i], decay_time[i])]tf_points_band = tf_points[i]tf_points_band = sorted(tf_points_band,key=lambda tf_points_band: tf_points_band[])transfer_list = []for point in tf_points_band:transfer_list.extend([\"\".format(point[]), \"\".format(point[])])if soft_knee_db[i] is not None:intermed_args.append(\"\".format(soft_knee_db[i], \"\".join(transfer_list)))else:intermed_args.append(\"\".join(transfer_list))if gain[i] is not None:intermed_args.append(\"\".format(gain[i]))effect_args.append(''.join(intermed_args))self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "The multi-band compander is similar to the single-band compander but\n the audio is first divided into bands using Linkwitz-Riley cross-over\n filters and a separately specifiable compander run on each band.\n\n When used with n_bands=1, this effect is identical to compand.\n When using n_bands > 1, the first set of arguments applies a single\n band compander, and each subsequent set of arugments is applied on\n each of the crossover frequencies.\n\n Parameters\n ----------\n n_bands : int, default=2\n The number of bands.\n crossover_frequencies : list of float, default=[1600]\n A list of crossover frequencies in Hz of length n_bands-1.\n The first band is always the full spectrum, followed by the bands\n specified by crossover_frequencies.\n attack_time : list of float, default=[0.005, 0.000625]\n A list of length n_bands, where each element is the time in seconds\n over which the instantaneous level of the input signal is averaged\n to determine increases in volume over the current band.\n decay_time : list of float, default=[0.1, 0.0125]\n A list of length n_bands, where each element is the time in seconds\n over which the instantaneous level of the input signal is averaged\n to determine decreases in volume over the current band.\n soft_knee_db : list of float or None, default=[6.0, None]\n A list of length n_bands, where each element is the ammount (in dB)\n for which the points at where adjacent line segments on the\n transfer function meet will be rounded over the current band.\n If None, no soft_knee is applied.\n tf_points : list of list of tuples, default=[\n [(-47, -40), (-34, -34), (-17, -33), (0, 0)],\n [(-47, -40), (-34, -34), (-15, -33), (0, 0)]]\n A list of length n_bands, where each element is the transfer\n function points as a list of tuples corresponding to points in\n (dB, dB) defining the compander's transfer function over the\n current band.\n gain : list of floats or None\n A list of gain values for each frequency band.\n If None, no gain is applied.\n\n See Also\n --------\n compand, contrast", "id": "f3809:c0:m34"} {"signature": "def noiseprof(self, input_filepath, profile_path):", "body": "if os.path.isdir(profile_path):raise ValueError(\"\".format(profile_path))if os.path.dirname(profile_path) == '' and profile_path != '':_abs_profile_path = os.path.join(os.getcwd(), profile_path)else:_abs_profile_path = profile_pathif not os.access(os.path.dirname(_abs_profile_path), os.W_OK):raise IOError(\"\".format(_abs_profile_path))effect_args = ['', profile_path]self.build(input_filepath, None, extra_args=effect_args)return None", "docstring": "Calculate a profile of the audio for use in noise reduction.\n Running this command does not effect the Transformer effects\n chain. When this function is called, the calculated noise profile\n file is saved to the `profile_path`.\n\n Parameters\n ----------\n input_filepath : str\n Path to audiofile from which to compute a noise profile.\n profile_path : str\n Path to save the noise profile file.\n\n See Also\n --------\n noisered", "id": "f3809:c0:m35"} {"signature": "def noisered(self, profile_path, amount=):", "body": "if not os.path.exists(profile_path):raise IOError(\"\".format(profile_path))if not is_number(amount) or amount < or amount > :raise ValueError(\"\")effect_args = ['',profile_path,''.format(amount)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Reduce noise in the audio signal by profiling and filtering.\n This effect is moderately effective at removing consistent\n background noise such as hiss or hum.\n\n Parameters\n ----------\n profile_path : str\n Path to a noise profile file.\n This file can be generated using the `noiseprof` effect.\n amount : float, default=0.5\n How much noise should be removed is specified by amount. Should\n be between 0 and 1. Higher numbers will remove more noise but\n present a greater likelihood of removing wanted components of\n the audio signal.\n\n See Also\n --------\n noiseprof", "id": "f3809:c0:m36"} {"signature": "def norm(self, db_level=-):", "body": "if not is_number(db_level):raise ValueError('')effect_args = ['',''.format(db_level)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Normalize an audio file to a particular db level.\n This behaves identically to the gain effect with normalize=True.\n\n Parameters\n ----------\n db_level : float, default=-3.0\n Output volume (db)\n\n See Also\n --------\n gain, loudness", "id": "f3809:c0:m37"} {"signature": "def oops(self):", "body": "effect_args = ['']self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Out Of Phase Stereo effect. Mixes stereo to twin-mono where each\n mono channel contains the difference between the left and right stereo\n channels. This is sometimes known as the 'karaoke' effect as it often\n has the effect of removing most or all of the vocals from a recording.", "id": "f3809:c0:m38"} {"signature": "def overdrive(self, gain_db=, colour=):", "body": "if not is_number(gain_db):raise ValueError('')if not is_number(colour):raise ValueError('')effect_args = ['',''.format(gain_db),''.format(colour)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply non-linear distortion.\n\n Parameters\n ----------\n gain_db : float, default=20\n Controls the amount of distortion (dB).\n colour : float, default=20\n Controls the amount of even harmonic content in the output (dB).", "id": "f3809:c0:m39"} {"signature": "def pad(self, start_duration=, end_duration=):", "body": "if not is_number(start_duration) or start_duration < :raise ValueError(\"\")if not is_number(end_duration) or end_duration < :raise ValueError(\"\")effect_args = ['',''.format(start_duration),''.format(end_duration)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Add silence to the beginning or end of a file.\n Calling this with the default arguments has no effect.\n\n Parameters\n ----------\n start_duration : float\n Number of seconds of silence to add to beginning.\n end_duration : float\n Number of seconds of silence to add to end.\n\n See Also\n --------\n delay", "id": "f3809:c0:m40"} {"signature": "def phaser(self, gain_in=, gain_out=, delay=, decay=, speed=,modulation_shape=''):", "body": "if not is_number(gain_in) or gain_in <= or gain_in > :raise ValueError(\"\")if not is_number(gain_out) or gain_out <= or gain_out > :raise ValueError(\"\")if not is_number(delay) or delay <= or delay > :raise ValueError(\"\")if not is_number(decay) or decay < or decay > :raise ValueError(\"\")if not is_number(speed) or speed < or speed > :raise ValueError(\"\")if modulation_shape not in ['', '']:raise ValueError(\"\")effect_args = ['',''.format(gain_in),''.format(gain_out),''.format(delay),''.format(decay),''.format(speed)]if modulation_shape == '':effect_args.append('')elif modulation_shape == '':effect_args.append('')self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a phasing effect to the audio.\n\n Parameters\n ----------\n gain_in : float, default=0.8\n Input volume between 0 and 1\n gain_out: float, default=0.74\n Output volume between 0 and 1\n delay : float, default=3\n Delay in miliseconds between 0 and 5\n decay : float, default=0.4\n Decay relative to gain_in, between 0.1 and 0.5.\n speed : float, default=0.5\n Modulation speed in Hz, between 0.1 and 2\n modulation_shape : str, defaul='sinusoidal'\n Modulation shpae. One of 'sinusoidal' or 'triangular'\n\n See Also\n --------\n flanger, tremolo", "id": "f3809:c0:m41"} {"signature": "def pitch(self, n_semitones, quick=False):", "body": "if not is_number(n_semitones):raise ValueError(\"\")if n_semitones < - or n_semitones > :logger.warning(\"\"\"\")if not isinstance(quick, bool):raise ValueError(\"\")effect_args = ['']if quick:effect_args.append('')effect_args.append(''.format(n_semitones * ))self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Pitch shift the audio without changing the tempo.\n\n This effect uses the WSOLA algorithm. The audio is chopped up into\n segments which are then shifted in the time domain and overlapped\n (cross-faded) at points where their waveforms are most similar as\n determined by measurement of least squares.\n\n Parameters\n ----------\n n_semitones : float\n The number of semitones to shift. Can be positive or negative.\n quick : bool, default=False\n If True, this effect will run faster but with lower sound quality.\n\n See Also\n --------\n bend, speed, tempo", "id": "f3809:c0:m42"} {"signature": "def rate(self, samplerate, quality=''):", "body": "quality_vals = ['', '', '', '', '']if not is_number(samplerate) or samplerate <= :raise ValueError(\"\")if quality not in quality_vals:raise ValueError(\"\".format(''.join(quality_vals)))effect_args = ['',''.format(quality),''.format(samplerate)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Change the audio sampling rate (i.e. resample the audio) to any\n given `samplerate`. Better the resampling quality = slower runtime.\n\n Parameters\n ----------\n samplerate : float\n Desired sample rate.\n quality : str\n Resampling quality. One of:\n * q : Quick - very low quality,\n * l : Low,\n * m : Medium,\n * h : High (default),\n * v : Very high\n\n See Also\n --------\n upsample, downsample, convert", "id": "f3809:c0:m43"} {"signature": "def remix(self, remix_dictionary=None, num_output_channels=None):", "body": "if not (isinstance(remix_dictionary, dict) orremix_dictionary is None):raise ValueError(\"\")if remix_dictionary is not None:if not all([isinstance(i, int) and i > for iin remix_dictionary.keys()]):raise ValueError(\"\")if not all([isinstance(v, list) for vin remix_dictionary.values()]):raise ValueError(\"\")for v_list in remix_dictionary.values():if not all([isinstance(v, int) and v > for v in v_list]):raise ValueError(\"\"\"\")if not ((isinstance(num_output_channels, int) andnum_output_channels > ) or num_output_channels is None):raise ValueError(\"\")effect_args = ['']if remix_dictionary is None:effect_args.append('')else:if num_output_channels is None:num_output_channels = max(remix_dictionary.keys())for channel in range(, num_output_channels + ):if channel in remix_dictionary.keys():out_channel = ''.join([str(i) for i in remix_dictionary[channel]])else:out_channel = ''effect_args.append(out_channel)self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Remix the channels of an audio file.\n\n Note: volume options are not yet implemented\n\n Parameters\n ----------\n remix_dictionary : dict or None\n Dictionary mapping output channel to list of input channel(s).\n Empty lists indicate the corresponding output channel should be\n empty. If None, mixes all channels down to a single mono file.\n num_output_channels : int or None\n The number of channels in the output file. If None, the number of\n output channels is equal to the largest key in remix_dictionary.\n If remix_dictionary is None, this variable is ignored.\n\n Examples\n --------\n Remix a 4-channel input file. The output file will have\n input channel 2 in channel 1, a mixdown of input channels 1 an 3 in\n channel 2, an empty channel 3, and a copy of input channel 4 in\n channel 4.\n\n >>> import sox\n >>> tfm = sox.Transformer()\n >>> remix_dictionary = {1: [2], 2: [1, 3], 4: [4]}\n >>> tfm.remix(remix_dictionary)", "id": "f3809:c0:m44"} {"signature": "def repeat(self, count=):", "body": "if not isinstance(count, int) or count < :raise ValueError(\"\")effect_args = ['', ''.format(count)]self.effects.extend(effect_args)self.effects_log.append('')", "docstring": "Repeat the entire audio count times.\n\n Parameters\n ----------\n count : int, default=1\n The number of times to repeat the audio.", "id": "f3809:c0:m45"} {"signature": "def reverb(self, reverberance=, high_freq_damping=, room_scale=,stereo_depth=, pre_delay=, wet_gain=, wet_only=False):", "body": "if (not is_number(reverberance) or reverberance < orreverberance > ):raise ValueError(\"\")if (not is_number(high_freq_damping) or high_freq_damping < orhigh_freq_damping > ):raise ValueError(\"\")if (not is_number(room_scale) or room_scale < orroom_scale > ):raise ValueError(\"\")if (not is_number(stereo_depth) or stereo_depth < orstereo_depth > ):raise ValueError(\"\")if not is_number(pre_delay) or pre_delay < :raise ValueError(\"\")if not is_number(wet_gain):raise ValueError(\"\")if not isinstance(wet_only, bool):raise ValueError(\"\")effect_args = ['']if wet_only:effect_args.append('')effect_args.extend([''.format(reverberance),''.format(high_freq_damping),''.format(room_scale),''.format(stereo_depth),''.format(pre_delay),''.format(wet_gain)])self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Add reverberation to the audio using the \u2018freeverb\u2019 algorithm.\n A reverberation effect is sometimes desirable for concert halls that\n are too small or contain so many people that the hall\u2019s natural\n reverberance is diminished. Applying a small amount of stereo reverb\n to a (dry) mono signal will usually make it sound more natural.\n\n Parameters\n ----------\n reverberance : float, default=50\n Percentage of reverberance\n high_freq_damping : float, default=50\n Percentage of high-frequency damping.\n room_scale : float, default=100\n Scale of the room as a percentage.\n stereo_depth : float, default=100\n Stereo depth as a percentage.\n pre_delay : float, default=0\n Pre-delay in milliseconds.\n wet_gain : float, default=0\n Amount of wet gain in dB\n wet_only : bool, default=False\n If True, only outputs the wet signal.\n\n See Also\n --------\n echo", "id": "f3809:c0:m46"} {"signature": "def reverse(self):", "body": "effect_args = ['']self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Reverse the audio completely", "id": "f3809:c0:m47"} {"signature": "def silence(self, location=, silence_threshold=,min_silence_duration=, buffer_around_silence=False):", "body": "if location not in [-, , ]:raise ValueError(\"\")if not is_number(silence_threshold) or silence_threshold < :raise ValueError(\"\")elif silence_threshold >= :raise ValueError(\"\")if not is_number(min_silence_duration) or min_silence_duration <= :raise ValueError(\"\")if not isinstance(buffer_around_silence, bool):raise ValueError(\"\")effect_args = []if location == -:effect_args.append('')if buffer_around_silence:effect_args.extend(['', ''])else:effect_args.append('')effect_args.extend(['',''.format(min_silence_duration),''.format(silence_threshold)])if location == :effect_args.extend(['',''.format(min_silence_duration),''.format(silence_threshold)])if location == -:effect_args.append('')self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Removes silent regions from an audio file.\n\n Parameters\n ----------\n location : int, default=0\n Where to remove silence. One of:\n * 0 to remove silence throughout the file (default),\n * 1 to remove silence from the beginning,\n * -1 to remove silence from the end,\n silence_threshold : float, default=0.1\n Silence threshold as percentage of maximum sample amplitude.\n Must be between 0 and 100.\n min_silence_duration : float, default=0.1\n The minimum ammount of time in seconds required for a region to be\n considered non-silent.\n buffer_around_silence : bool, default=False\n If True, leaves a buffer of min_silence_duration around removed\n silent regions.\n\n See Also\n --------\n vad", "id": "f3809:c0:m48"} {"signature": "def sinc(self, filter_type='', cutoff_freq=,stop_band_attenuation=, transition_bw=None,phase_response=None):", "body": "filter_types = ['', '', '', '']if filter_type not in filter_types:raise ValueError(\"\".format(''.join(filter_types)))if not (is_number(cutoff_freq) or isinstance(cutoff_freq, list)):raise ValueError(\"\")if filter_type in ['', ''] and isinstance(cutoff_freq, list):raise ValueError(\"\"\"\")if filter_type in ['', ''] and is_number(cutoff_freq):raise ValueError(\"\"\"\")if is_number(cutoff_freq) and cutoff_freq <= :raise ValueError(\"\")if isinstance(cutoff_freq, list):if len(cutoff_freq) != :raise ValueError(\"\")if any([not is_number(f) or f <= for f in cutoff_freq]):raise ValueError(\"\")cutoff_freq = sorted(cutoff_freq)if not is_number(stop_band_attenuation) or stop_band_attenuation < :raise ValueError(\"\")if not (is_number(transition_bw) orisinstance(transition_bw, list) or transition_bw is None):raise ValueError(\"\")if filter_type in ['', ''] and isinstance(transition_bw, list):raise ValueError(\"\"\"\")if is_number(transition_bw) and transition_bw <= :raise ValueError(\"\")if isinstance(transition_bw, list):if any([not is_number(f) or f <= for f in transition_bw]):raise ValueError(\"\")if len(transition_bw) != :raise ValueError(\"\")if phase_response is not None and not is_number(phase_response):raise ValueError(\"\")if (is_number(phase_response) and(phase_response < or phase_response > )):raise ValueError(\"\")effect_args = ['']effect_args.extend(['', ''.format(stop_band_attenuation)])if phase_response is not None:effect_args.extend(['', ''.format(phase_response)])if filter_type == '':if transition_bw is not None:effect_args.extend(['', ''.format(transition_bw)])effect_args.append(''.format(cutoff_freq))elif filter_type == '':effect_args.append(''.format(cutoff_freq))if transition_bw is not None:effect_args.extend(['', ''.format(transition_bw)])else:if is_number(transition_bw):effect_args.extend(['', ''.format(transition_bw)])elif isinstance(transition_bw, list):effect_args.extend(['', ''.format(transition_bw[])])if filter_type == '':effect_args.append(''.format(cutoff_freq[], cutoff_freq[]))elif filter_type == '':effect_args.append(''.format(cutoff_freq[], cutoff_freq[]))if isinstance(transition_bw, list):effect_args.extend(['', ''.format(transition_bw[])])self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a sinc kaiser-windowed low-pass, high-pass, band-pass, or\n band-reject filter to the signal.\n\n Parameters\n ----------\n filter_type : str, default='high'\n Type of filter. One of:\n - 'high' for a high-pass filter\n - 'low' for a low-pass filter\n - 'pass' for a band-pass filter\n - 'reject' for a band-reject filter\n cutoff_freq : float or list, default=3000\n A scalar or length 2 list indicating the filter's critical\n frequencies. The critical frequencies are given in Hz and must be\n positive. For a high-pass or low-pass filter, cutoff_freq\n must be a scalar. For a band-pass or band-reject filter, it must be\n a length 2 list.\n stop_band_attenuation : float, default=120\n The stop band attenuation in dB\n transition_bw : float, list or None, default=None\n The transition band-width in Hz.\n If None, sox's default of 5% of the total bandwith is used.\n If a float, the given transition bandwith is used for both the\n upper and lower bands (if applicable).\n If a list, the first argument is used for the lower band and the\n second for the upper band.\n phase_response : float or None\n The filter's phase response between 0 (minimum) and 100 (maximum).\n If None, sox's default phase repsonse is used.\n\n See Also\n --------\n band, bandpass, bandreject, highpass, lowpass", "id": "f3809:c0:m49"} {"signature": "def speed(self, factor):", "body": "if not is_number(factor) or factor <= :raise ValueError(\"\")if factor < or factor > :logger.warning(\"\")effect_args = ['', ''.format(factor)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Adjust the audio speed (pitch and tempo together).\n\n Technically, the speed effect only changes the sample rate information,\n leaving the samples themselves untouched. The rate effect is invoked\n automatically to resample to the output sample rate, using its default\n quality/speed. For higher quality or higher speed resampling, in\n addition to the speed effect, specify the rate effect with the desired\n quality option.\n\n Parameters\n ----------\n factor : float\n The ratio of the new speed to the old speed.\n For ex. 1.1 speeds up the audio by 10%; 0.9 slows it down by 10%.\n Note - this argument is the inverse of what is passed to the sox\n stretch effect for consistency with speed.\n\n See Also\n --------\n rate, tempo, pitch", "id": "f3809:c0:m50"} {"signature": "def stat(self, input_filepath, scale=None, rms=False):", "body": "effect_args = ['', '', '']if scale is not None:if not is_number(scale) or scale <= :raise ValueError(\"\")effect_args.extend(['', ''.format(scale)])if rms:effect_args.append('')_, _, stat_output = self.build(input_filepath, None, extra_args=effect_args, return_output=True)stat_dict = {}lines = stat_output.split('')for line in lines:split_line = line.split()if len(split_line) == :continuevalue = split_line[-]key = ''.join(split_line[:-])stat_dict[key.strip('')] = valuereturn stat_dict", "docstring": "Display time and frequency domain statistical information about the\n audio. Audio is passed unmodified through the SoX processing chain.\n\n Unlike other Transformer methods, this does not modify the transformer\n effects chain. Instead it computes statistics on the output file that\n would be created if the build command were invoked.\n\n Note: The file is downmixed to mono prior to computation.\n\n Parameters\n ----------\n input_filepath : str\n Path to input file to compute stats on.\n scale : float or None, default=None\n If not None, scales the input by the given scale factor.\n rms : bool, default=False\n If True, scales all values by the average rms amplitude.\n\n Returns\n -------\n stat_dict : dict\n Dictionary of statistics.\n\n See Also\n --------\n stats, power_spectrum, sox.file_info", "id": "f3809:c0:m51"} {"signature": "def power_spectrum(self, input_filepath):", "body": "effect_args = ['', '', '', '']_, _, stat_output = self.build(input_filepath, None, extra_args=effect_args, return_output=True)power_spectrum = []lines = stat_output.split('')for line in lines:split_line = line.split()if len(split_line) != :continuefreq, amp = split_linepower_spectrum.append([float(freq), float(amp)])return power_spectrum", "docstring": "Calculates the power spectrum (4096 point DFT). This method\n internally invokes the stat command with the -freq option.\n\n Note: The file is downmixed to mono prior to computation.\n\n Parameters\n ----------\n input_filepath : str\n Path to input file to compute stats on.\n\n Returns\n -------\n power_spectrum : list\n List of frequency (Hz), amplitude pairs.\n\n See Also\n --------\n stat, stats, sox.file_info", "id": "f3809:c0:m52"} {"signature": "def stats(self, input_filepath):", "body": "effect_args = ['', '', '']_, _, stats_output = self.build(input_filepath, None, extra_args=effect_args, return_output=True)stats_dict = {}lines = stats_output.split('')for line in lines:split_line = line.split()if len(split_line) == :continuevalue = split_line[-]key = ''.join(split_line[:-])stats_dict[key] = valuereturn stats_dict", "docstring": "Display time domain statistical information about the audio\n channels. Audio is passed unmodified through the SoX processing chain.\n Statistics are calculated and displayed for each audio channel\n\n Unlike other Transformer methods, this does not modify the transformer\n effects chain. Instead it computes statistics on the output file that\n would be created if the build command were invoked.\n\n Note: The file is downmixed to mono prior to computation.\n\n Parameters\n ----------\n input_filepath : str\n Path to input file to compute stats on.\n\n Returns\n -------\n stats_dict : dict\n List of frequency (Hz), amplitude pairs.\n\n See Also\n --------\n stat, sox.file_info", "id": "f3809:c0:m53"} {"signature": "def stretch(self, factor, window=):", "body": "if not is_number(factor) or factor <= :raise ValueError(\"\")if factor < or factor > :logger.warning(\"\"\"\")if abs(factor - ) > :logger.warning(\"\"\"\")if not is_number(window) or window <= :raise ValueError(\"\")effect_args = ['', ''.format(factor), ''.format(window)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Change the audio duration (but not its pitch).\n **Unless factor is close to 1, use the tempo effect instead.**\n\n This effect is broadly equivalent to the tempo effect with search set\n to zero, so in general, its results are comparatively poor; it is\n retained as it can sometimes out-perform tempo for small factors.\n\n Parameters\n ----------\n factor : float\n The ratio of the new tempo to the old tempo.\n For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%.\n Note - this argument is the inverse of what is passed to the sox\n stretch effect for consistency with tempo.\n window : float, default=20\n Window size in miliseconds\n\n See Also\n --------\n tempo, speed, pitch", "id": "f3809:c0:m54"} {"signature": "def swap(self):", "body": "effect_args = ['']self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Swap stereo channels. If the input is not stereo, pairs of channels\n are swapped, and a possible odd last channel passed through.\n\n E.g., for seven channels, the output order will be 2, 1, 4, 3, 6, 5, 7.\n\n See Also\n ----------\n remix", "id": "f3809:c0:m55"} {"signature": "def tempo(self, factor, audio_type=None, quick=False):", "body": "if not is_number(factor) or factor <= :raise ValueError(\"\")if factor < or factor > :logger.warning(\"\"\"\")if abs(factor - ) <= :logger.warning(\"\"\"\")if audio_type not in [None, '', '', '']:raise ValueError(\"\")if not isinstance(quick, bool):raise ValueError(\"\")effect_args = ['']if quick:effect_args.append('')if audio_type is not None:effect_args.append(''.format(audio_type))effect_args.append(''.format(factor))self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Time stretch audio without changing pitch.\n\n This effect uses the WSOLA algorithm. The audio is chopped up into\n segments which are then shifted in the time domain and overlapped\n (cross-faded) at points where their waveforms are most similar as\n determined by measurement of least squares.\n\n Parameters\n ----------\n factor : float\n The ratio of new tempo to the old tempo.\n For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%.\n audio_type : str\n Type of audio, which optimizes algorithm parameters. One of:\n * m : Music,\n * s : Speech,\n * l : Linear (useful when factor is close to 1),\n quick : bool, default=False\n If True, this effect will run faster but with lower sound quality.\n\n See Also\n --------\n stretch, speed, pitch", "id": "f3809:c0:m56"} {"signature": "def treble(self, gain_db, frequency=, slope=):", "body": "if not is_number(gain_db):raise ValueError(\"\")if not is_number(frequency) or frequency <= :raise ValueError(\"\")if not is_number(slope) or slope <= or slope > :raise ValueError(\"\")effect_args = ['', ''.format(gain_db), ''.format(frequency),''.format(slope)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Boost or cut the treble (lower) frequencies of the audio using a\n two-pole shelving filter with a response similar to that of a standard\n hi-fi\u2019s tone-controls. This is also known as shelving equalisation.\n\n The filters are described in detail in\n http://musicdsp.org/files/Audio-EQ-Cookbook.txt\n\n Parameters\n ----------\n gain_db : float\n The gain at the Nyquist frequency.\n For a large cut use -20, for a large boost use 20.\n frequency : float, default=100.0\n The filter's cutoff frequency in Hz.\n slope : float, default=0.5\n The steepness of the filter's shelf transition.\n For a gentle slope use 0.3, and use 1.0 for a steep slope.\n\n See Also\n --------\n bass, equalizer", "id": "f3809:c0:m57"} {"signature": "def tremolo(self, speed=, depth=):", "body": "if not is_number(speed) or speed <= :raise ValueError(\"\")if not is_number(depth) or depth <= or depth > :raise ValueError(\"\")effect_args = ['',''.format(speed),''.format(depth)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply a tremolo (low frequency amplitude modulation) effect to the\n audio. The tremolo frequency in Hz is giv en by speed, and the depth\n as a percentage by depth (default 40).\n\n Parameters\n ----------\n speed : float\n Tremolo speed in Hz.\n depth : float\n Tremolo depth as a percentage of the total amplitude.\n\n See Also\n --------\n flanger\n\n Examples\n --------\n >>> tfm = sox.Transformer()\n\n For a growl-type effect\n\n >>> tfm.tremolo(speed=100.0)", "id": "f3809:c0:m58"} {"signature": "def trim(self, start_time, end_time=None):", "body": "if not is_number(start_time) or start_time < :raise ValueError(\"\")effect_args = ['',''.format(start_time)]if end_time is not None:if not is_number(end_time) or end_time < :raise ValueError(\"\")if start_time >= end_time:raise ValueError(\"\")effect_args.append(''.format(end_time - start_time))self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Excerpt a clip from an audio file, given the start timestamp and end timestamp of the clip within the file, expressed in seconds. If the end timestamp is set to `None` or left unspecified, it defaults to the duration of the audio file.\n\n Parameters\n ----------\n start_time : float\n Start time of the clip (seconds)\n end_time : float or None, default=None\n End time of the clip (seconds)", "id": "f3809:c0:m59"} {"signature": "def upsample(self, factor=):", "body": "if not isinstance(factor, int) or factor < :raise ValueError('')effect_args = ['', ''.format(factor)]self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Upsample the signal by an integer factor: zero-value samples are\n inserted between each pair of input samples. As a result, the original\n spectrum is replicated into the new frequency space (imaging) and\n attenuated. The upsample effect is typically used in combination with\n filtering effects.\n\n Parameters\n ----------\n factor : int, default=2\n Integer upsampling factor.\n\n See Also\n --------\n rate, downsample", "id": "f3809:c0:m60"} {"signature": "def vad(self, location=, normalize=True, activity_threshold=,min_activity_duration=, initial_search_buffer=,max_gap=, initial_pad=):", "body": "if location not in [-, ]:raise ValueError(\"\")if not isinstance(normalize, bool):raise ValueError(\"\")if not is_number(activity_threshold):raise ValueError(\"\")if not is_number(min_activity_duration) or min_activity_duration < :raise ValueError(\"\")if not is_number(initial_search_buffer) or initial_search_buffer < :raise ValueError(\"\")if not is_number(max_gap) or max_gap < :raise ValueError(\"\")if not is_number(initial_pad) or initial_pad < :raise ValueError(\"\")effect_args = []if normalize:effect_args.append('')if location == -:effect_args.append('')effect_args.extend(['','', ''.format(activity_threshold),'', ''.format(min_activity_duration),'', ''.format(initial_search_buffer),'', ''.format(max_gap),'', ''.format(initial_pad)])if location == -:effect_args.append('')self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Voice Activity Detector. Attempts to trim silence and quiet\n background sounds from the ends of recordings of speech. The algorithm\n currently uses a simple cepstral power measurement to detect voice, so\n may be fooled by other things, especially music.\n\n The effect can trim only from the front of the audio, so in order to\n trim from the back, the reverse effect must also be used.\n\n Parameters\n ----------\n location : 1 or -1, default=1\n If 1, trims silence from the beginning\n If -1, trims silence from the end\n normalize : bool, default=True\n If true, normalizes audio before processing.\n activity_threshold : float, default=7.0\n The measurement level used to trigger activity detection. This may\n need to be cahnged depending on the noise level, signal level, and\n other characteristics of the input audio.\n min_activity_duration : float, default=0.25\n The time constant (in seconds) used to help ignore short bursts of\n sound.\n initial_search_buffer : float, default=1.0\n The amount of audio (in seconds) to search for quieter/shorter\n bursts of audio to include prior to the detected trigger point.\n max_gap : float, default=0.25\n The allowed gap (in seconds) between quiteter/shorter bursts of\n audio to include prior to the detected trigger point\n initial_pad : float, default=0.0\n The amount of audio (in seconds) to preserve before the trigger\n point and any found quieter/shorter bursts.\n\n See Also\n --------\n silence\n\n Examples\n --------\n >>> tfm = sox.Transformer()\n\n Remove silence from the beginning of speech\n\n >>> tfm.vad(initial_pad=0.3)\n\n Remove silence from the end of speech\n\n >>> tfm.vad(location=-1, initial_pad=0.2)", "id": "f3809:c0:m61"} {"signature": "def vol(self, gain, gain_type='', limiter_gain=None):", "body": "if not is_number(gain):raise ValueError('')if limiter_gain is not None:if (not is_number(limiter_gain) orlimiter_gain <= or limiter_gain >= ):raise ValueError('')if gain_type in ['', ''] and gain < :raise ValueError(\"\")effect_args = ['']effect_args.append(''.format(gain))if gain_type == '':effect_args.append('')elif gain_type == '':effect_args.append('')elif gain_type == '':effect_args.append('')else:raise ValueError('')if limiter_gain is not None:if gain_type in ['', ''] and gain > :effect_args.append(''.format(limiter_gain))elif gain_type == '' and gain > :effect_args.append(''.format(limiter_gain))self.effects.extend(effect_args)self.effects_log.append('')return self", "docstring": "Apply an amplification or an attenuation to the audio signal.\n\n Parameters\n ----------\n gain : float\n Interpreted according to the given `gain_type`.\n If `gain_type' = 'amplitude', `gain' is a positive amplitude ratio.\n If `gain_type' = 'power', `gain' is a power (voltage squared).\n If `gain_type' = 'db', `gain' is in decibels.\n gain_type : string, default='amplitude'\n Type of gain. One of:\n - 'amplitude'\n - 'power'\n - 'db'\n limiter_gain : float or None, default=None\n If specified, a limiter is invoked on peaks greater than\n `limiter_gain' to prevent clipping.\n `limiter_gain` should be a positive value much less than 1.\n\n See Also\n --------\n gain, compand", "id": "f3809:c0:m62"} {"signature": "def bitrate(input_filepath):", "body": "validate_input_file(input_filepath)output = soxi(input_filepath, '')if output == '':logger.warning(\"\", input_filepath)return int(output)", "docstring": "Number of bits per sample (0 if not applicable).\n\nParameters\n----------\ninput_filepath : str\n Path to audio file.\n\nReturns\n-------\nbitrate : int\n number of bits per sample\n returns 0 if not applicable", "id": "f3811:m0"} {"signature": "def channels(input_filepath):", "body": "validate_input_file(input_filepath)output = soxi(input_filepath, '')return int(output)", "docstring": "Show number of channels.\n\nParameters\n----------\ninput_filepath : str\n Path to audio file.\n\nReturns\n-------\nchannels : int\n number of channels", "id": "f3811:m1"} {"signature": "def comments(input_filepath):", "body": "validate_input_file(input_filepath)output = soxi(input_filepath, '')return str(output)", "docstring": "Show file comments (annotations) if available.\n\nParameters\n----------\ninput_filepath : str\n Path to audio file.\n\nReturns\n-------\ncomments : str\n File comments from header.\n If no comments are present, returns an empty string.", "id": "f3811:m2"} {"signature": "def duration(input_filepath):", "body": "validate_input_file(input_filepath)output = soxi(input_filepath, '')if output == '':logger.warning(\"\", input_filepath)return float(output)", "docstring": "Show duration in seconds (0 if unavailable).\n\nParameters\n----------\ninput_filepath : str\n Path to audio file.\n\nReturns\n-------\nduration : float\n Duration of audio file in seconds.\n If unavailable or empty, returns 0.", "id": "f3811:m3"} {"signature": "def encoding(input_filepath):", "body": "validate_input_file(input_filepath)output = soxi(input_filepath, '')return str(output)", "docstring": "Show the name of the audio encoding.\n\nParameters\n----------\ninput_filepath : str\n Path to audio file.\n\nReturns\n-------\nencoding : str\n audio encoding type", "id": "f3811:m4"} {"signature": "def file_type(input_filepath):", "body": "validate_input_file(input_filepath)output = soxi(input_filepath, '')return str(output)", "docstring": "Show detected file-type.\n\nParameters\n----------\ninput_filepath : str\n Path to audio file.\n\nReturns\n-------\nfile_type : str\n file format type (ex. 'wav')", "id": "f3811:m5"} {"signature": "def num_samples(input_filepath):", "body": "validate_input_file(input_filepath)output = soxi(input_filepath, '')if output == '':logger.warning(\"\", input_filepath)return int(output)", "docstring": "Show number of samples (0 if unavailable).\n\nParameters\n----------\ninput_filepath : str\n Path to audio file.\n\nReturns\n-------\nn_samples : int\n total number of samples in audio file.\n Returns 0 if empty or unavailable", "id": "f3811:m6"} {"signature": "def sample_rate(input_filepath):", "body": "validate_input_file(input_filepath)output = soxi(input_filepath, '')return float(output)", "docstring": "Show sample-rate.\n\nParameters\n----------\ninput_filepath : str\n Path to audio file.\n\nReturns\n-------\nsamplerate : float\n number of samples/second", "id": "f3811:m7"} {"signature": "def silent(input_filepath, threshold=):", "body": "validate_input_file(input_filepath)stat_dictionary = stat(input_filepath)mean_norm = stat_dictionary['']if mean_norm is not float(''):if mean_norm >= threshold:return Falseelse:return Trueelse:return True", "docstring": "Determine if an input file is silent.\n\nParameters\n----------\ninput_filepath : str\n The input filepath.\nthreshold : float\n Threshold for determining silence\n\nReturns\n-------\nis_silent : bool\n True if file is determined silent.", "id": "f3811:m8"} {"signature": "def validate_input_file(input_filepath):", "body": "if not os.path.exists(input_filepath):raise IOError(\"\".format(input_filepath))ext = file_extension(input_filepath)if ext not in VALID_FORMATS:logger.info(\"\", \"\".join(VALID_FORMATS))logger.warning(\"\".format(ext))", "docstring": "Input file validation function. Checks that file exists and can be\n processed by SoX.\n\n Parameters\n ----------\n input_filepath : str\n The input filepath.", "id": "f3811:m9"} {"signature": "def validate_input_file_list(input_filepath_list):", "body": "if not isinstance(input_filepath_list, list):raise TypeError(\"\")elif len(input_filepath_list) < :raise ValueError(\"\")for input_filepath in input_filepath_list:validate_input_file(input_filepath)", "docstring": "Input file list validation function. Checks that object is a list and\n contains valid filepaths that can be processed by SoX.\n\n Parameters\n ----------\n input_filepath_list : list\n A list of filepaths.", "id": "f3811:m10"} {"signature": "def validate_output_file(output_filepath):", "body": "nowrite_conditions = [bool(os.path.dirname(output_filepath)) ornot os.access(os.getcwd(), os.W_OK),not os.access(os.path.dirname(output_filepath), os.W_OK)]if all(nowrite_conditions):raise IOError(\"\".format(output_filepath))ext = file_extension(output_filepath)if ext not in VALID_FORMATS:logger.info(\"\", \"\".join(VALID_FORMATS))logger.warning(\"\".format(ext))if os.path.exists(output_filepath):logger.warning('',output_filepath)", "docstring": "Output file validation function. Checks that file can be written, and\n has a valid file extension. Throws a warning if the path already exists,\n as it will be overwritten on build.\n\n Parameters\n ----------\n output_filepath : str\n The output filepath.\n\n Returns:\n --------\n output_filepath : str\n The output filepath.", "id": "f3811:m11"} {"signature": "def file_extension(filepath):", "body": "return os.path.splitext(filepath)[][:]", "docstring": "Get the extension of a filepath.\n\n Parameters\n ----------\n filepath : str\n File path.\n\n Returns:\n --------\n extension : str\n The file's extension", "id": "f3811:m12"} {"signature": "def info(filepath):", "body": "info_dictionary = {'': channels(filepath),'': sample_rate(filepath),'': bitrate(filepath),'': duration(filepath),'': num_samples(filepath),'': encoding(filepath),'': silent(filepath)}return info_dictionary", "docstring": "Get a dictionary of file information\n\n Parameters\n ----------\n filepath : str\n File path.\n\n Returns:\n --------\n info_dictionary : dict\n Dictionary of file information. Fields are:\n * channels\n * sample_rate\n * bitrate\n * duration\n * num_samples\n * encoding\n * silent", "id": "f3811:m13"} {"signature": "def stat(filepath):", "body": "stat_output = _stat_call(filepath)stat_dictionary = _parse_stat(stat_output)return stat_dictionary", "docstring": "Returns a dictionary of audio statistics.\n\n Parameters\n ----------\n filepath : str\n File path.\n\n Returns\n -------\n stat_dictionary : dict\n Dictionary of audio statistics.", "id": "f3811:m14"} {"signature": "def _stat_call(filepath):", "body": "validate_input_file(filepath)args = ['', filepath, '', '']_, _, stat_output = sox(args)return stat_output", "docstring": "Call sox's stat function.\n\n Parameters\n ----------\n filepath : str\n File path.\n\n Returns\n -------\n stat_output : str\n Sox output from stderr.", "id": "f3811:m15"} {"signature": "def _parse_stat(stat_output):", "body": "lines = stat_output.split('')stat_dict = {}for line in lines:split_line = line.split('')if len(split_line) == :key = split_line[]val = split_line[].strip('')try:val = float(val)except ValueError:val = Nonestat_dict[key] = valreturn stat_dict", "docstring": "Parse the string output from sox's stat function\n\n Parameters\n ----------\n stat_output : str\n Sox output from stderr.\n\n Returns\n -------\n stat_dictionary : dict\n Dictionary of audio statistics.", "id": "f3811:m16"} {"signature": "def sox(args):", "body": "if args[].lower() != \"\":args.insert(, \"\")else:args[] = \"\"try:logger.info(\"\", ''.join(args))process_handle = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)out, err = process_handle.communicate()out = out.decode(\"\")err = err.decode(\"\")status = process_handle.returncodereturn status, out, errexcept OSError as error_msg:logger.error(\"\", error_msg)except TypeError as error_msg:logger.error(\"\", error_msg)return , None, None", "docstring": "Pass an argument list to SoX.\n\n Parameters\n ----------\n args : iterable\n Argument list for SoX. The first item can, but does not\n need to, be 'sox'.\n\n Returns:\n --------\n status : bool\n True on success.", "id": "f3813:m0"} {"signature": "def _get_valid_formats():", "body": "if NO_SOX:return []so = subprocess.check_output(['', ''])if type(so) is not str:so = str(so, encoding='')so = so.split('')idx = [i for i in range(len(so)) if '' in so[i]][]formats = so[idx].split('')[:]return formats", "docstring": "Calls SoX help for a lists of audio formats available with the current\n install of SoX.\n\n Returns:\n --------\n formats : list\n List of audio file extensions that SoX can process.", "id": "f3813:m1"} {"signature": "def soxi(filepath, argument):", "body": "if argument not in SOXI_ARGS:raise ValueError(\"\".format(argument))args = ['', '']args.append(\"\".format(argument))args.append(filepath)try:shell_output = subprocess.check_output(args,stderr=subprocess.PIPE)except CalledProcessError as cpe:logger.info(\"\".format(cpe.output))raise SoxiError(\"\".format(cpe.returncode))shell_output = shell_output.decode(\"\")return str(shell_output).strip('')", "docstring": "Base call to SoXI.\n\n Parameters\n ----------\n filepath : str\n Path to audio file.\n\n argument : str\n Argument to pass to SoXI.\n\n Returns\n -------\n shell_output : str\n Command line output of SoXI", "id": "f3813:m2"} {"signature": "def play(args):", "body": "if args[].lower() != \"\":args.insert(, \"\")else:args[] = \"\"try:logger.info(\"\", \"\".join(args))process_handle = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)status = process_handle.wait()if process_handle.stderr is not None:logger.info(process_handle.stderr)if status == :return Trueelse:logger.info(\"\", status)return Falseexcept OSError as error_msg:logger.error(\"\", error_msg)except TypeError as error_msg:logger.error(\"\", error_msg)return False", "docstring": "Pass an argument list to play.\n\n Parameters\n ----------\n args : iterable\n Argument list for play. The first item can, but does not\n need to, be 'play'.\n\n Returns:\n --------\n status : bool\n True on success.", "id": "f3813:m3"} {"signature": "def is_number(var):", "body": "try:float(var)return Trueexcept ValueError:return Falseexcept TypeError:return False", "docstring": "Check if variable is a numeric value.\n\n Parameters\n ----------\n var : object\n\n Returns:\n --------\n bool\n True if var is numeric, False otherwise.", "id": "f3813:m4"} {"signature": "def all_equal(list_of_things):", "body": "return len(set(list_of_things)) <= ", "docstring": "Check if a list contains identical elements.\n\n Parameters\n ----------\n list_of_things : list\n list of objects\n\n Returns:\n --------\n bool\n True if all list elements are the same.", "id": "f3813:m5"} {"signature": "@app.websocket_route(\"\")def ws(session):", "body": "pass", "docstring": "ws", "id": "f3834:m0"} {"signature": "@app.route(\"\", methods=[\"\", \"\"])def list_users(request):", "body": "pass", "docstring": "responses:\n 200:\n description: A list of users.\n examples:\n [{\"username\": \"tom\"}, {\"username\": \"lucy\"}]", "id": "f3834:m1"} {"signature": "@app.route(\"\", methods=[\"\"])def create_user(request):", "body": "pass", "docstring": "responses:\n 200:\n description: A user.\n examples:\n {\"username\": \"tom\"}", "id": "f3834:m2"} {"signature": "@app.route(\"\")def regular_docstring_and_schema(request):", "body": "pass", "docstring": "This a regular docstring example (not included in schema)\n\n---\n\nresponses:\n 200:\n description: This is included in the schema.", "id": "f3834:m3"} {"signature": "@app.route(\"\")def regular_docstring(request):", "body": "pass", "docstring": "This a regular docstring example (not included in schema)", "id": "f3834:m4"} {"signature": "@subapp.route(\"\")def subapp_endpoint(request):", "body": "pass", "docstring": "responses:\n 200:\n description: This endpoint is part of a subapp.", "id": "f3834:m6"} {"signature": "def get(self, request):", "body": "pass", "docstring": "responses:\n 200:\n description: A list of organisations.\n examples:\n [{\"name\": \"Foo Corp.\"}, {\"name\": \"Acme Ltd.\"}]", "id": "f3834:c0:m0"} {"signature": "def post(self, request):", "body": "pass", "docstring": "responses:\n 200:\n description: An organisation.\n examples:\n {\"name\": \"Foo Corp.\"}", "id": "f3834:c0:m1"} {"signature": "def get_version(package):", "body": "with open(os.path.join(package, \"\")) as f:return re.search(\"\", f.read()).group()", "docstring": "Return package version as listed in `__version__` in `init.py`.", "id": "f3842:m0"} {"signature": "def get_long_description():", "body": "with open(\"\", encoding=\"\") as f:return f.read()", "docstring": "Return the README.", "id": "f3842:m1"} {"signature": "def get_packages(package):", "body": "return [dirpathfor dirpath, dirnames, filenames in os.walk(package)if os.path.exists(os.path.join(dirpath, \"\"))]", "docstring": "Return root package and all sub-packages.", "id": "f3842:m2"} {"signature": "async def on_connect(self, websocket: WebSocket) -> None:", "body": "await websocket.accept()", "docstring": "Override to handle an incoming websocket connection", "id": "f3845:c1:m4"} {"signature": "async def on_receive(self, websocket: WebSocket, data: typing.Any) -> None:", "body": "", "docstring": "Override to handle an incoming websocket message", "id": "f3845:c1:m5"} {"signature": "async def on_disconnect(self, websocket: WebSocket, close_code: int) -> None:", "body": "", "docstring": "Override to handle a disconnecting websocket", "id": "f3845:c1:m6"} {"signature": "def request_response(func: typing.Callable) -> ASGIApp:", "body": "is_coroutine = asyncio.iscoroutinefunction(func)async def app(scope: Scope, receive: Receive, send: Send) -> None:request = Request(scope, receive=receive)if is_coroutine:response = await func(request)else:response = await run_in_threadpool(func, request)await response(scope, receive, send)return app", "docstring": "Takes a function or coroutine `func(request) -> response`,\nand returns an ASGI application.", "id": "f3847:m0"} {"signature": "def websocket_session(func: typing.Callable) -> ASGIApp:", "body": "async def app(scope: Scope, receive: Receive, send: Send) -> None:session = WebSocket(scope, receive=receive, send=send)await func(session)return app", "docstring": "Takes a coroutine `func(session)`, and returns an ASGI application.", "id": "f3847:m1"} {"signature": "def compile_path(path: str) -> typing.Tuple[typing.Pattern, str, typing.Dict[str, Convertor]]:", "body": "path_regex = \"\"path_format = \"\"idx = param_convertors = {}for match in PARAM_REGEX.finditer(path):param_name, convertor_type = match.groups(\"\")convertor_type = convertor_type.lstrip(\"\")assert (convertor_type in CONVERTOR_TYPES), f\"\"convertor = CONVERTOR_TYPES[convertor_type]path_regex += path[idx : match.start()]path_regex += f\"\"path_format += path[idx : match.start()]path_format += \"\" % param_nameparam_convertors[param_name] = convertoridx = match.end()path_regex += path[idx:] + \"\"path_format += path[idx:]return re.compile(path_regex), path_format, param_convertors", "docstring": "Given a path string, like: \"/{username:str}\", return a three-tuple\nof (regex, format, {param_name:convertor}).\n\nregex: \"/(?P[^/]+)\"\nformat: \"/{username}\"\nconvertors: {\"username\": StringConvertor()}", "id": "f3847:m4"} {"signature": "def build_environ(scope: Scope, body: bytes) -> dict:", "body": "environ = {\"\": scope[\"\"],\"\": scope.get(\"\", \"\"),\"\": scope[\"\"],\"\": scope[\"\"].decode(\"\"),\"\": f\"\",\"\": (, ),\"\": scope.get(\"\", \"\"),\"\": io.BytesIO(body),\"\": sys.stdout,\"\": True,\"\": True,\"\": False,}server = scope.get(\"\") or (\"\", )environ[\"\"] = server[]environ[\"\"] = server[]if scope.get(\"\"):environ[\"\"] = scope[\"\"][]for name, value in scope.get(\"\", []):name = name.decode(\"\")if name == \"\":corrected_name = \"\"elif name == \"\":corrected_name = \"\"else:corrected_name = f\"\".upper().replace(\"\", \"\")value = value.decode(\"\")if corrected_name in environ:value = environ[corrected_name] + \"\" + valueenviron[corrected_name] = valuereturn environ", "docstring": "Builds a scope and request body into a WSGI environ object.", "id": "f3853:m0"} {"signature": "def __setitem__(self, key: str, value: str) -> None:", "body": "set_key = key.lower().encode(\"\")set_value = value.encode(\"\")found_indexes = []for idx, (item_key, item_value) in enumerate(self._list):if item_key == set_key:found_indexes.append(idx)for idx in reversed(found_indexes[:]):del self._list[idx]if found_indexes:idx = found_indexes[]self._list[idx] = (set_key, set_value)else:self._list.append((set_key, set_value))", "docstring": "Set the header `key` to `value`, removing any duplicate entries.\nRetains insertion order.", "id": "f3859:c10:m0"} {"signature": "def __delitem__(self, key: str) -> None:", "body": "del_key = key.lower().encode(\"\")pop_indexes = []for idx, (item_key, item_value) in enumerate(self._list):if item_key == del_key:pop_indexes.append(idx)for idx in reversed(pop_indexes):del self._list[idx]", "docstring": "Remove the header `key`.", "id": "f3859:c10:m1"} {"signature": "def setdefault(self, key: str, value: str) -> str:", "body": "set_key = key.lower().encode(\"\")set_value = value.encode(\"\")for idx, (item_key, item_value) in enumerate(self._list):if item_key == set_key:return item_value.decode(\"\")self._list.append((set_key, set_value))return value", "docstring": "If the header `key` does not exist, then set it to `value`.\nReturns the header value.", "id": "f3859:c10:m3"} {"signature": "def append(self, key: str, value: str) -> None:", "body": "append_key = key.lower().encode(\"\")append_value = value.encode(\"\")self._list.append((append_key, append_value))", "docstring": "Append a header, preserving any duplicate entries.", "id": "f3859:c10:m5"} {"signature": "def get_endpoints(self, routes: typing.List[BaseRoute]) -> typing.List[EndpointInfo]:", "body": "endpoints_info: list = []for route in routes:if isinstance(route, Mount):routes = route.routes or []sub_endpoints = [EndpointInfo(path=\"\".join((route.path, sub_endpoint.path)),http_method=sub_endpoint.http_method,func=sub_endpoint.func,)for sub_endpoint in self.get_endpoints(routes)]endpoints_info.extend(sub_endpoints)elif not isinstance(route, Route) or not route.include_in_schema:continueelif inspect.isfunction(route.endpoint) or inspect.ismethod(route.endpoint):for method in route.methods or [\"\"]:if method == \"\":continueendpoints_info.append(EndpointInfo(route.path, method.lower(), route.endpoint))else:for method in [\"\", \"\", \"\", \"\", \"\", \"\"]:if not hasattr(route.endpoint, method):continuefunc = getattr(route.endpoint, method)endpoints_info.append(EndpointInfo(route.path, method.lower(), func))return endpoints_info", "docstring": "Given the routes, yields the following information:\n\n- path\n eg: /users/\n- http_method\n one of 'get', 'post', 'put', 'patch', 'delete', 'options'\n- func\n method ready to extract the docstring", "id": "f3862:c2:m1"} {"signature": "def parse_docstring(self, func_or_method: typing.Callable) -> dict:", "body": "docstring = func_or_method.__doc__if not docstring:return {}docstring = docstring.split(\"\")[-]parsed = yaml.safe_load(docstring)if not isinstance(parsed, dict):return {}return parsed", "docstring": "Given a function, parse the docstring as YAML and return a dictionary of info.", "id": "f3862:c2:m2"} {"signature": "def get_directories(self, directory: str = None, packages: typing.List[str] = None) -> typing.List[str]:", "body": "directories = []if directory is not None:directories.append(directory)for package in packages or []:spec = importlib.util.find_spec(package)assert spec is not None, f\"\"assert (spec.origin is not None), \"\"directory = os.path.normpath(os.path.join(spec.origin, \"\", \"\"))assert os.path.isdir(directory), \"\"directories.append(directory)return directories", "docstring": "Given `directory` and `packages` arugments, return a list of all the\ndirectories that should be used for serving static files from.", "id": "f3863:c1:m1"} {"signature": "async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:", "body": "assert scope[\"\"] == \"\"if not self.config_checked:await self.check_config()self.config_checked = Truepath = self.get_path(scope)response = await self.get_response(path, scope)await response(scope, receive, send)", "docstring": "The ASGI entry point.", "id": "f3863:c1:m2"} {"signature": "def get_path(self, scope: Scope) -> str:", "body": "return os.path.normpath(os.path.join(*scope[\"\"].split(\"\")))", "docstring": "Given the ASGI scope, return the `path` string to serve up,\nwith OS specific path seperators, and any '..', '.' components removed.", "id": "f3863:c1:m3"} {"signature": "async def get_response(self, path: str, scope: Scope) -> Response:", "body": "if scope[\"\"] not in (\"\", \"\"):return PlainTextResponse(\"\", status_code=)if path.startswith(\"\"):return PlainTextResponse(\"\", status_code=)full_path, stat_result = await self.lookup_path(path)if stat_result and stat.S_ISREG(stat_result.st_mode):return self.file_response(full_path, stat_result, scope)elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:index_path = os.path.join(path, \"\")full_path, stat_result = await self.lookup_path(index_path)if stat_result is not None and stat.S_ISREG(stat_result.st_mode):if not scope[\"\"].endswith(\"\"):url = URL(scope=scope)url = url.replace(path=url.path + \"\")return RedirectResponse(url=url)return self.file_response(full_path, stat_result, scope)if self.html:full_path, stat_result = await self.lookup_path(\"\")if stat_result is not None and stat.S_ISREG(stat_result.st_mode):return self.file_response(full_path, stat_result, scope, status_code=)return PlainTextResponse(\"\", status_code=)", "docstring": "Returns an HTTP response, given the incoming path, method and request headers.", "id": "f3863:c1:m4"} {"signature": "async def check_config(self) -> None:", "body": "if self.directory is None:returntry:stat_result = await aio_stat(self.directory)except FileNotFoundError:raise RuntimeError(f\"\")if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):raise RuntimeError(f\"\")", "docstring": "Perform a one-off configuration check that StaticFiles is actually\npointed at a directory, so that we can raise loud errors rather than\njust returning 404 responses.", "id": "f3863:c1:m7"} {"signature": "def is_not_modified(self, response_headers: Headers, request_headers: Headers) -> bool:", "body": "try:if_none_match = request_headers[\"\"]etag = response_headers[\"\"]if if_none_match == etag:return Trueexcept KeyError:passtry:if_modified_since = parsedate(request_headers[\"\"])last_modified = parsedate(response_headers[\"\"])if (if_modified_since is not Noneand last_modified is not Noneand if_modified_since >= last_modified):return Trueexcept KeyError:passreturn False", "docstring": "Given the request and response headers, return `True` if an HTTP\n\"Not Modified\" response could be returned instead.", "id": "f3863:c1:m8"} {"signature": "def _run(self) -> None:", "body": "scope = self.scopereceive = self._asgi_receivesend = self._asgi_sendtry:self._loop.run_until_complete(self.app(scope, receive, send))except BaseException as exc:self._send_queue.put(exc)", "docstring": "The sub-thread in which the websocket session runs.", "id": "f3871:c5:m3"} {"signature": "async def receive(self) -> Message:", "body": "if self.client_state == WebSocketState.CONNECTING:message = await self._receive()message_type = message[\"\"]assert message_type == \"\"self.client_state = WebSocketState.CONNECTEDreturn messageelif self.client_state == WebSocketState.CONNECTED:message = await self._receive()message_type = message[\"\"]assert message_type in {\"\", \"\"}if message_type == \"\":self.client_state = WebSocketState.DISCONNECTEDreturn messageelse:raise RuntimeError('')", "docstring": "Receive ASGI websocket messages, ensuring valid state transitions.", "id": "f3872:c2:m1"} {"signature": "async def send(self, message: Message) -> None:", "body": "if self.application_state == WebSocketState.CONNECTING:message_type = message[\"\"]assert message_type in {\"\", \"\"}if message_type == \"\":self.application_state = WebSocketState.DISCONNECTEDelse:self.application_state = WebSocketState.CONNECTEDawait self._send(message)elif self.application_state == WebSocketState.CONNECTED:message_type = message[\"\"]assert message_type in {\"\", \"\"}if message_type == \"\":self.application_state = WebSocketState.DISCONNECTEDawait self._send(message)else:raise RuntimeError('')", "docstring": "Send ASGI websocket messages, ensuring valid state transitions.", "id": "f3872:c2:m2"} {"signature": "def filter_symlog(y, base=):", "body": "log_base = np.log(base)sign = np.sign(y)logs = np.log(np.abs(y) / log_base)return sign * logs", "docstring": "Symmetrical logarithmic scale.\n\n Optional arguments:\n\n *base*:\n The base of the logarithm.", "id": "f3874:m0"} {"signature": "def filter_savitzky_golay(y, window_size=, order=, deriv=, rate=):", "body": "try:window_size = np.abs(np.int(window_size))order = np.abs(np.int(order))except ValueError:raise ValueError('')if window_size % != or window_size < :raise ValueError('')if window_size < order + :raise ValueError('')order_range = range(order + )half_window = (window_size - ) // minimum = np.min(y)maximum = np.max(y)b = np.mat([[k ** i for i in order_range]for k in range(-half_window, half_window + )])m = np.linalg.pinv(b).A[deriv] * rate ** deriv * math.factorial(deriv)firstvals = y[] - np.abs(y[:half_window+][::-] - y[])lastvals = y[-] + np.abs(y[-half_window-:-][::-] - y[-])y = np.concatenate((firstvals, y, lastvals))return np.clip(np.convolve(m[::-], y, mode=''),minimum,maximum,)", "docstring": "Smooth (and optionally differentiate) with a Savitzky-Golay filter.", "id": "f3874:m1"} {"signature": "def usage_function(parser):", "body": "parser.print_usage()print('')print('')for function in sorted(FUNCTION):doc = FUNCTION[function].__doc__.strip().splitlines()[]print('' % (function + '', doc))return ", "docstring": "Show usage and available curve functions.", "id": "f3874:m2"} {"signature": "def usage_palette(parser):", "body": "parser.print_usage()print('')print('')for palette in sorted(PALETTE):print('' % (palette,))return ", "docstring": "Show usage and available palettes.", "id": "f3874:m3"} {"signature": "def run():", "body": "import argparseparser = argparse.ArgumentParser(description=(''),epilog=", "docstring": "Main entrypoint if invoked via the command line.", "id": "f3874:m4"} {"signature": "def __init__(self):", "body": "curses.setupterm()", "docstring": "Initialize curses.", "id": "f3874:c0:m0"} {"signature": "@propertydef colors(self):", "body": "number = curses.tigetnum('') or return if number == else number", "docstring": "Get the number of colors supported by this terminal.", "id": "f3874:c0:m1"} {"signature": "@propertydef encoding(self):", "body": "_, encoding = locale.getdefaultlocale()return encoding", "docstring": "Get the current terminal encoding.", "id": "f3874:c0:m2"} {"signature": "@propertydef height(self):", "body": "return self.size[]", "docstring": "Get the current terminal height.", "id": "f3874:c0:m3"} {"signature": "@propertydef size(self):", "body": "for fd in range():cr = self._ioctl_GWINSZ(fd)if cr:breakif not cr:try:fd = os.open(os.ctermid(), os.O_RDONLY)cr = self._ioctl_GWINSZ(fd)os.close(fd)except Exception:passif not cr:env = os.environcr = (env.get('', ), env.get('', ))return int(cr[]), int(cr[])", "docstring": "Get the current terminal size.", "id": "f3874:c0:m4"} {"signature": "@propertydef width(self):", "body": "return self.size[]", "docstring": "Get the current terminal width.", "id": "f3874:c0:m5"} {"signature": "def _ioctl_GWINSZ(self, fd):", "body": "try:import fcntlimport termiosimport structreturn struct.unpack('',fcntl.ioctl(fd, termios.TIOCGWINSZ, ''))except Exception:return None", "docstring": "Get terminal size using ``TIOCGWINSZ``.\n\n Internal function that will try to request the ``TIOCGWINSZ`` against\n the selected file descriptor ``fd``.", "id": "f3874:c0:m6"} {"signature": "def color(self, index):", "body": "if self.colors == :if index >= :return self.csi('') + self.csi('', index - )else:return self.csi('') + self.csi('', index)else:return self.csi('', index)", "docstring": "Get the escape sequence for indexed color ``index``.\n\n The ``index`` is a color index in the 256 color space. The color space\n consists of:\n\n * 0x00-0x0f: default EGA colors\n * 0x10-0xe7: 6x6x6 RGB cubes\n * 0xe8-0xff: gray scale ramp", "id": "f3874:c0:m7"} {"signature": "def csi(self, capname, *args):", "body": "value = curses.tigetstr(capname)if value is None:return b''else:return curses.tparm(value, *args)", "docstring": "Return the escape sequence for the selected Control Sequence.", "id": "f3874:c0:m8"} {"signature": "def csi_wrap(self, value, capname, *args):", "body": "if isinstance(value, str):value = value.encode('')return b''.join([self.csi(capname, *args),value,self.csi(''),])", "docstring": "Return a value wrapped in the selected CSI and does a reset.", "id": "f3874:c0:m9"} {"signature": "def __init__(self, coordinates):", "body": "self.x = coordinates[]self.y = coordinates[]", "docstring": "Point with ``(x, y)`` coordinates.", "id": "f3874:c1:m0"} {"signature": "def copy(self):", "body": "return Point((self.x, self.y))", "docstring": "Return a fresh copy of the current point.", "id": "f3874:c1:m2"} {"signature": "@propertydef width(self):", "body": "return self.size.x", "docstring": "Get the buffer width.", "id": "f3874:c2:m1"} {"signature": "@propertydef height(self):", "body": "return self.size.y", "docstring": "Get the buffer height.", "id": "f3874:c2:m2"} {"signature": "def __contains__(self, point):", "body": "if not isinstance(point, Point):point = Point(point)if point.y not in self.canvas:return Falseelse:return point.x in self.canvas[point.y]", "docstring": "Check if a point has a value.", "id": "f3874:c2:m3"} {"signature": "def __setitem__(self, point, value):", "body": "if not isinstance(point, Point):point = Point(point)if point.y > self.size.y:if self.extend_y:self.size.y = point.yelse:raise OverflowError('' % (self,point.y,self.size.y,))if point.x > self.size.x:if self.extend_x:self.size.x = point.xelse:raise OverflowError('' % (self,point.x,self.size.x,))self.canvas[point.y][point.x] = value", "docstring": "Set a point value.", "id": "f3874:c2:m5"} {"signature": "def __getitem__(self, point):", "body": "if not isinstance(point, Point):point = Point(point)return self.canvas[point.y][point.x]", "docstring": "Get a point value or None.", "id": "f3874:c2:m6"} {"signature": "def consume(self, istream, ostream, batch=False):", "body": "datapoints = [] if batch:sleep = max(, self.option.sleep)fd = istream.fileno()while True:try:if select.select([fd], [], [], sleep):try:line = istream.readline()if line == '':breakdatapoints.append(self.consume_line(line))except ValueError:continueif self.option.sort_by_column:datapoints = sorted(datapoints, key=itemgetter(self.option.sort_by_column - ))if len(datapoints) > :datapoints = datapoints[-self.maximum_points:]self.update([dp[] for dp in datapoints], [dp[] for dp in datapoints])self.render(ostream)time.sleep(sleep)except KeyboardInterrupt:breakelse:for line in istream:try:datapoints.append(self.consume_line(line))except ValueError:passif self.option.sort_by_column:datapoints = sorted(datapoints, key=itemgetter(self.option.sort_by_column - ))self.update([dp[] for dp in datapoints], [dp[] for dp in datapoints])self.render(ostream)", "docstring": "Read points from istream and output to ostream.", "id": "f3874:c3:m1"} {"signature": "def consume_line(self, line):", "body": "data = RE_VALUE_KEY.split(line.strip(), )if len(data) == :return float(data[]), Noneelse:return float(data[]), data[].strip()", "docstring": "Consume data from a line.", "id": "f3874:c3:m2"} {"signature": "@propertydef scale(self):", "body": "return ", "docstring": "Graph scale.", "id": "f3874:c3:m3"} {"signature": "def update(self, points, values=None):", "body": "self.values = values or [None] * len(points)if np is None:if self.option.function:warnings.warn('')self.points = pointsself.minimum = min(self.points)self.maximum = max(self.points)self.current = self.points[-]else:self.points = self.apply_function(points)self.minimum = np.min(self.points)self.maximum = np.max(self.points)self.current = self.points[-]if self.maximum == self.minimum:self.extents = else:self.extents = (self.maximum - self.minimum)self.extents = (self.maximum - self.minimum)", "docstring": "Add a set of data points.", "id": "f3874:c3:m4"} {"signature": "def color_ramp(self, size):", "body": "color = PALETTE.get(self.option.palette, {})color = color.get(self.term.colors, None)color_ramp = []if color is not None:ratio = len(color) / float(size)for i in range(int(size)):color_ramp.append(self.term.color(color[int(ratio * i)]))return color_ramp", "docstring": "Generate a color ramp for the current screen height.", "id": "f3874:c3:m5"} {"signature": "def human(self, size, base=, units=''):", "body": "sign = '' if size >= else ''size = abs(size)if size < :return '' % (sign, size)for i, suffix in enumerate(units):unit = ** (i + )if size < unit:return ('' % (sign,size / float(unit) * base,suffix,)).strip()raise OverflowError", "docstring": "Convert the input ``size`` to human readable, short form.", "id": "f3874:c3:m6"} {"signature": "def apply_function(self, points):", "body": "if not self.option.function:return pointsif np is None:raise ImportError('')if '' in self.option.function:function, arguments = self.option.function.split('', )arguments = arguments.split('')else:function = self.option.functionarguments = []arguments = list(map(self._function_argument, arguments))filter_function = FUNCTION.get(function)if filter_function is None:raise TypeError('' % (function,))else:return filter_function(np.array(list(points)), *arguments)", "docstring": "Run the filter function on the provided points.", "id": "f3874:c3:m7"} {"signature": "def _function_argument(self, value):", "body": "if value in FUNCTION_CONSTANT:return FUNCTION_CONSTANT[value]else:return float(value)", "docstring": "Resolve function, convert to float if not found.", "id": "f3874:c3:m8"} {"signature": "def line(self, p1, p2, resolution=):", "body": "xdiff = max(p1.x, p2.x) - min(p1.x, p2.x)ydiff = max(p1.y, p2.y) - min(p1.y, p2.y)xdir = [-, ][int(p1.x <= p2.x)]ydir = [-, ][int(p1.y <= p2.y)]r = int(round(max(xdiff, ydiff)))if r == :returnfor i in range((r + ) * resolution):x = p1.xy = p1.yif xdiff:x += (float(i) * xdiff) / r * xdir / resolutionif ydiff:y += (float(i) * ydiff) / r * ydir / resolutionyield Point((x, y))", "docstring": "Resolve the points to make a line between two points.", "id": "f3874:c3:m9"} {"signature": "@propertydef maximum_points(self):", "body": "raise NotImplementedError()", "docstring": "Override in subclass.", "id": "f3874:c3:m10"} {"signature": "def render(self, stream):", "body": "raise NotImplementedError()", "docstring": "Render the graph to the selected output stream.", "id": "f3874:c3:m11"} {"signature": "def round(self, value):", "body": "return int(value)", "docstring": "Get an integer value for the input value.", "id": "f3874:c3:m12"} {"signature": "def set_text(self, point, text):", "body": "if not self.option.legend:returnif not isinstance(point, Point):point = Point(point)for offset, char in enumerate(str(text)):self.screen.canvas[point.y][point.x + offset] = char", "docstring": "Set a text value in the screen canvas.", "id": "f3874:c3:m13"} {"signature": "@propertydef width(self):", "body": "return self.size.x * ", "docstring": "Buffer width.", "id": "f3874:c4:m0"} {"signature": "@propertydef height(self):", "body": "return self.size.y * ", "docstring": "Buffer height.", "id": "f3874:c4:m1"} {"signature": "def render(self, stream):", "body": "encoding = self.option.encoding or self.term.encoding or \"\"if self.option.color:ramp = self.color_ramp(self.size.y)[::-]else:ramp = Noneif self.cycle >= and self.lines:stream.write(self.term.csi('', self.lines))zero = int(self.null / ) lines = for y in range(self.screen.size.y):if y == zero and self.size.y > :stream.write(self.term.csi(''))if ramp:stream.write(ramp[y])for x in range(self.screen.size.x):point = Point((x, y))if point in self.screen:value = self.screen[point]if isinstance(value, int):stream.write(chr(self.base + value).encode(encoding))else:stream.write(self.term.csi(''))stream.write(self.term.csi_wrap(value.encode(encoding),''))if y == zero and self.size.y > :stream.write(self.term.csi(''))if ramp:stream.write(ramp[y])else:stream.write(b'')if y == zero and self.size.y > :stream.write(self.term.csi(''))if ramp:stream.write(self.term.csi(''))stream.write(b'')lines += stream.flush()self.cycle = self.cycle + self.lines = lines", "docstring": "Render graph to stream.", "id": "f3874:c5:m1"} {"signature": "@propertydef normalised(self):", "body": "if np is None:return self._normalised_python()else:return self._normalised_numpy()", "docstring": "Normalised data points.", "id": "f3874:c5:m2"} {"signature": "def _normalised_numpy(self):", "body": "dx = (self.screen.width / float(len(self.points)))oy = (self.screen.height)points = np.array(self.points) - self.minimumpoints = points * / self.extents * self.size.yfor x, y in enumerate(points):yield Point((dx * x,min(oy, oy - y),))", "docstring": "Normalised data points using numpy.", "id": "f3874:c5:m3"} {"signature": "def _normalised_python(self):", "body": "dx = (self.screen.width / float(len(self.points)))oy = (self.screen.height)for x, point in enumerate(self.points):y = (point - self.minimum) * / self.extents * self.size.yyield Point((dx * x,min(oy, oy - y),))", "docstring": "Normalised data points using pure Python.", "id": "f3874:c5:m4"} {"signature": "@propertydef maximum_points(self):", "body": "return self.size.x", "docstring": "Maximum width.", "id": "f3874:c5:m5"} {"signature": "@propertydef null(self):", "body": "if not self.option.axis:return -else:return self.screen.height - (-self.minimum * / self.extents * self.size.y)", "docstring": "Zero crossing value.", "id": "f3874:c5:m6"} {"signature": "def set(self, point):", "body": "if not isinstance(point, Point):point = Point(point)rx = self.round(point.x)ry = self.round(point.y)item = Point((rx >> , min(ry >> , self.size.y)))self.screen[item] |= self.pixels[ry & ][rx & ]", "docstring": "Set pixel at (x, y) point.", "id": "f3874:c5:m7"} {"signature": "def unset(self, point):", "body": "if not isinstance(point, Point):point = Point(point)x, y = self.round(point.x) >> , self.round(point.y) >> if (x, y) not in self.screen:returnif isinstance(self.screen[y][x], int):self.screen[(x, y)] &= ~self.pixels[y & ][x & ]else:del self.screen[(x, y)]if not self.screen.canvas.get(y):del self.screen[y]", "docstring": "Unset pixel at (x, y) point.", "id": "f3874:c5:m8"} {"signature": "def __init__(self, dg_option=None, ostream=None, data=None):", "body": "self.dg_option = dg_optionif self.dg_option == None:self.dg_option = DOption()self.ostream = ostreamif self.ostream == None:try:self.ostream = sys.stdout.bufferexcept AttributeError:self.ostream = sys.stdoutif self.dg_option.mode == '':self.dg = HorizontalBarGraph(self.dg_option.size,self.dg_option)elif self.dg_option.mode == '':self.dg = VerticalBarGraph(self.dg_option.size,self.dg_option)else:self.dg = AxisGraph(self.dg_option.size,self.dg_option)self.dg.update(data[], data[])", "docstring": "Handle some of the setup functions for the graph in the\n diagram package. Specifically hide all of the requirements that\n are computed in run() inside diagram.py.", "id": "f3874:c10:m0"} {"signature": "def show(self):", "body": "self.dg.render(self.ostream)", "docstring": "Actually show the graph on screen.", "id": "f3874:c10:m1"} {"signature": "def fetch_deputies(data_dir):", "body": "deputies = DeputiesDataset()df = deputies.fetch()save_to_csv(df, data_dir, \"\")holders = df.condition == ''substitutes = df.condition == ''log.info(\"\", len(df))log.info(\"\", len(df[holders]))log.info(\"\", len(df[substitutes]))return df", "docstring": ":param data_dir: (str) directory in which the output file will be saved", "id": "f3892:m0"} {"signature": "def fetch(self):", "body": "xml = urllib.request.urlopen(self.URL)tree = ET.ElementTree(file=xml)records = self._parse_deputies(tree.getroot())df = pd.DataFrame(records, columns=('','','','','','','','','','','',''))return self._translate(df)", "docstring": "Fetches the list of deputies for the current term.", "id": "f3892:c0:m0"} {"signature": "def fetch_speeches(data_dir, range_start, range_end):", "body": "speeches = SpeechesDataset()df = speeches.fetch(range_start, range_end)save_to_csv(df, data_dir, \"\")return df", "docstring": ":param data_dir: (str) directory in which the output file will be saved\n:param range_start: (str) date in the format dd/mm/yyyy\n:param range_end: (str) date in the format dd/mm/yyyy", "id": "f3893:m0"} {"signature": "def fetch(self, range_start, range_end):", "body": "range_dates = {'': range_start, '': range_end}url = self.URL.format(**range_dates)xml = urllib.request.urlopen(url)tree = ET.ElementTree(file=xml)records = self._parse_speeches(tree.getroot())return pd.DataFrame(records, columns=['','','','','','','','','','','',''])", "docstring": "Fetches speeches from the ListarDiscursosPlenario endpoint of the\nSessoesReunioes (SessionsReunions) API.\n\nThe date range provided should be specified as a string using the\nformat supported by the API (%d/%m/%Y)", "id": "f3893:c0:m0"} {"signature": "def fetch_session_start_times(data_dir, pivot, session_dates):", "body": "session_start_times = SessionStartTimesDataset()df = session_start_times.fetch(pivot, session_dates)save_to_csv(df, data_dir, \"\")log.info(\"\", len(session_dates))found = pd.to_datetime(df[''], format=\"\").dt.date.unique()log.info(\"\", len(found))return df", "docstring": ":param data_dir: (str) directory in which the output file will be saved\n:param pivot: (int) congressperson document to use as a pivot for scraping the data\n:param session_dates: (list) datetime objects to fetch the start times for", "id": "f3894:m0"} {"signature": "def fetch(self, pivot, session_dates):", "body": "records = self._all_start_times(pivot, session_dates)return pd.DataFrame(records, columns=('','',''))", "docstring": ":param pivot: (int) a congressperson document to use as a pivot for scraping the data\n:param session_dates: (list) datetime objects to fetch the start times for", "id": "f3894:c0:m0"} {"signature": "def fetch_official_missions(data_dir, start_date, end_date):", "body": "official_missions = OfficialMissionsDataset()df = official_missions.fetch(start_date, end_date)save_to_csv(df, data_dir, \"\")return df", "docstring": ":param data_dir: (str) directory in which the output file will be saved\n:param start_date: (datetime) first date of the range to be scraped\n:param end_date: (datetime) last date of the range to be scraped", "id": "f3895:m0"} {"signature": "def fetch(self, start_date, end_date):", "body": "records = []for two_months_range in self._generate_ranges(start_date, end_date):log.debug(two_months_range)for record in self._fetch_missions_for_range(two_months_range[], two_months_range[]):records.append(record)df = pd.DataFrame(records, columns=['','','','','','','',''])translate_column(df, '', {'': '','': '','': '','': ''})translate_column(df, '', {'': '','': ''})return df.drop_duplicates()", "docstring": "Fetches official missions within the given date range", "id": "f3895:c0:m0"} {"signature": "@staticmethoddef _generate_ranges(start_date, end_date):", "body": "range_start = start_datewhile range_start < end_date:range_end = range_start + timedelta(days=)yield (range_start.strftime(\"\"),range_end.strftime(\"\"))range_start += timedelta(days=)", "docstring": "Generate a list of 2 month ranges for the range requested with an\nintersection between months. This is necessary because we can't search\nfor ranges longer than 3 months and the period searched has to encompass\nthe whole period of the mission.", "id": "f3895:c0:m1"} {"signature": "def fetch_presences(data_dir, deputies, date_start, date_end):", "body": "presences = PresencesDataset()df = presences.fetch(deputies, date_start, date_end)save_to_csv(df, data_dir, \"\")log.info(\"\", len(df))log.info(\"\", len(df[df.presence == '']))log.info(\"\", len(df[df.presence == '']))return df", "docstring": ":param data_dir: (str) directory in which the output file will be saved\n:param deputies: (pandas.DataFrame) a dataframe with deputies data\n:param date_start: (str) a date in the format dd/mm/yyyy\n:param date_end: (str) a date in the format dd/mm/yyyy", "id": "f3897:m0"} {"signature": "def fetch(self, deputies, start_date, end_date):", "body": "log.debug(\"\".format(len(deputies), start_date, end_date))records = self._all_presences(deputies, start_date, end_date)df = pd.DataFrame(records, columns=('','','','','','','','','',''))return self._translate(df)", "docstring": ":param deputies: (pandas.DataFrame) a dataframe with deputies data\n:param date_start: (str) date in the format dd/mm/yyyy\n:param date_end: (str) date in the format dd/mm/yyyy", "id": "f3897:c0:m1"} {"signature": "def xml_extract_text(node, xpath):", "body": "text = node.find(xpath).textif text is not None:text = text.strip()return text", "docstring": ":param node: the node to be queried\n:param xpath: the path to fetch the child node that has the wanted text", "id": "f3902:m0"} {"signature": "def xml_extract_date(node, xpath, date_format=''):", "body": "return datetime.strptime(xml_extract_text(node, xpath), date_format)", "docstring": ":param node: the node to be queried\n:param xpath: the path to fetch the child node that has the wanted date", "id": "f3902:m1"} {"signature": "def xml_extract_datetime(node, xpath, datetime_format=''):", "body": "return datetime.strptime(xml_extract_text(node, xpath), datetime_format)", "docstring": ":param node: the node to be queried\n:param xpath: the path to fetch the child node that has the wanted datetime", "id": "f3902:m2"} {"signature": "def translate_column(df, column, translations):", "body": "df[column] = df[column].astype('')translations = [translations[cat]for cat in df[column].cat.categories]df[column].cat.rename_categories(translations, inplace=True)", "docstring": ":param df: (pandas.Dataframe) the dataframe to be translated\n:param column: (str) the column to be translated\n:param translations: (dict) a dictionary of the strings to be categorized and translated", "id": "f3902:m3"} {"signature": "def render(self, obj):", "body": "self.obj = objattrs = ''.join(['' % (attr_name, attr.resolve(obj))if isinstance(attr, Accessor)else '' % (attr_name, attr)for attr_name, attr in self.attrs.items()])return mark_safe(u'' % (attrs, self.text))", "docstring": "Render link as HTML output tag .", "id": "f3914:c1:m4"} {"signature": "def get_column_span(self, index):", "body": "return str(self.get_days_span(index))", "docstring": "Get `colspan` value for tag.\nIt will render as ", "id": "f3915:c5:m3"} {"signature": "def get_days_span(self, month_index):", "body": "is_first_month = month_index == is_last_month = month_index == self.__len__() - y = int(self.start_date.year + (self.start_date.month + month_index) / )m = int((self.start_date.month + month_index) % or )total = calendar.monthrange(y, m)[]if is_first_month and is_last_month:return (self.end_date - self.start_date).days + else:if is_first_month:return total - self.start_date.day + elif is_last_month:return self.end_date.dayelse:return total", "docstring": "Calculate how many days the month spans.", "id": "f3915:c5:m4"} {"signature": "def render_to_json_response(self, context, **response_kwargs):", "body": "return HttpResponse(self.convert_context_to_json(context),content_type='',**response_kwargs)", "docstring": "Returns a JSON response, transforming 'context' to make the payload.", "id": "f3923:c0:m0"} {"signature": "def convert_context_to_json(self, context):", "body": "return json.dumps(context, cls=DjangoJSONEncoder)", "docstring": "Convert the context dictionary into a JSON object.", "id": "f3923:c0:m1"} {"signature": "def get_context_data(self, **kwargs):", "body": "sEcho = self.query_data[\"\"]context = super(BaseListView, self).get_context_data(**kwargs)queryset = context[\"\"]if queryset is not None:total_length = self.get_queryset_length(queryset)queryset = self.filter_queryset(queryset)display_length = self.get_queryset_length(queryset)queryset = self.sort_queryset(queryset)queryset = self.paging_queryset(queryset)values_list = self.convert_queryset_to_values_list(queryset)context = {\"\": sEcho,\"\": total_length,\"\": display_length,\"\": values_list,}else:context = {\"\": sEcho,\"\": ,\"\": ,\"\": [],}return context", "docstring": "Get context data for datatable server-side response.\nSee http://www.datatables.net/usage/server-side", "id": "f3923:c1:m7"} {"signature": "@propertydef header_rows(self):", "body": "header_rows = []headers = [col.header for col in self.columns]for header in headers:if len(header_rows) <= header.row_order:header_rows.append([])header_rows[header.row_order].append(header)return header_rows", "docstring": "[ [header1], [header3, header4] ]", "id": "f3925:c0:m2"} {"signature": "def resolve(self, context, quiet=True):", "body": "try:obj = contextfor level in self.levels:if isinstance(obj, dict):obj = obj[level]elif isinstance(obj, list) or isinstance(obj, tuple):obj = obj[int(level)]else:if callable(getattr(obj, level)):try:obj = getattr(obj, level)()except KeyError:obj = getattr(obj, level)else:display = '' % levelobj = getattr(obj, display)() if hasattr(obj, display) else getattr(obj, level)if not obj:breakreturn objexcept Exception as e:if quiet:return ''else:raise e", "docstring": "Return an object described by the accessor by traversing the attributes\nof context.", "id": "f3929:c0:m0"} {"signature": "def get_meta_image_url(request, image):", "body": "rendition = image.get_rendition(filter='')return request.build_absolute_uri(rendition.url)", "docstring": "Resize an image for metadata tags, and return an absolute URL to it.", "id": "f3952:m0"} {"signature": "def get_meta_url(self):", "body": "raise NotImplementedError()", "docstring": "The full URL to this object, including protocol and domain.", "id": "f3960:c0:m0"} {"signature": "def get_meta_image(self):", "body": "return None", "docstring": "Get the image to use for this object.\nCan be None if there is no relevant image.", "id": "f3960:c0:m3"} {"signature": "def get_meta_twitter_card_type(self):", "body": "if self.get_meta_image() is not None:return ''else:return ''", "docstring": "Get the Twitter card type for this object.\nSee https://dev.twitter.com/cards/types.\nDefaults to 'summary_large_image' if the object has an image,\notherwise 'summary'.", "id": "f3960:c0:m4"} {"signature": "def train_encoder(X, y, fold_count, encoder):", "body": "kf = StratifiedKFold(n_splits=fold_count, shuffle=True, random_state=)encoder = deepcopy(encoder) imputer = SimpleImputer(strategy='')scaler = StandardScaler()folds = []fit_encoder_time = score_encoder_time = for train_index, test_index in kf.split(X, y):X_train, X_test = X.iloc[train_index, :].reset_index(drop=True), X.iloc[test_index, :].reset_index(drop=True)y_train, y_test = y[train_index].reset_index(drop=True), y[test_index].reset_index(drop=True)start_time = time.time()X_train = encoder.fit_transform(X_train, y_train)fit_encoder_time += time.time() - start_timeX_train = imputer.fit_transform(X_train)X_train = scaler.fit_transform(X_train)start_time = time.time()X_test = encoder.transform(X_test)score_encoder_time += time.time() - start_timeX_test = imputer.transform(X_test)X_test = scaler.transform(X_test)folds.append([X_train, y_train, X_test, y_test])return folds, fit_encoder_time/fold_count, score_encoder_time/fold_count", "docstring": "Defines folds and performs the data preprocessing (categorical encoding, NaN imputation, normalization)\nReturns a list with {X_train, y_train, X_test, y_test}, average fit_encoder_time and average score_encoder_time\n\nNote: We normalize all features (not only numerical features) because otherwise SVM would\n get stuck for hours on ordinal encoded cylinder.bands.arff dataset due to presence of\n unproportionally high values.\n\nNote: The fold count is variable because there are datasets, which have less than 10 samples in the minority class.\n\nNote: We do not use pipelines because of:\n https://github.com/scikit-learn/scikit-learn/issues/11832", "id": "f3962:m0"} {"signature": "def train_model(folds, model):", "body": "scores = []fit_model_time = score_model_time = for X_train, y_train, X_test, y_test in folds:start_time = time.time()with ignore_warnings(category=ConvergenceWarning): model.fit(X_train, y_train)fit_model_time += time.time() - start_timeprediction_train_proba = model.predict_proba(X_train)[:, ]prediction_train = (prediction_train_proba >= ).astype('')start_time = time.time()prediction_test_proba = model.predict_proba(X_test)[:, ]score_model_time += time.time() - start_timeprediction_test = (prediction_test_proba >= ).astype('')with warnings.catch_warnings():warnings.simplefilter(\"\")scores.append([sklearn.metrics.matthews_corrcoef(y_test, prediction_test),sklearn.metrics.matthews_corrcoef(y_train, prediction_train),sklearn.metrics.roc_auc_score(y_test, prediction_test_proba),sklearn.metrics.roc_auc_score(y_train, prediction_train_proba),sklearn.metrics.brier_score_loss(y_test, prediction_test_proba),sklearn.metrics.brier_score_loss(y_train, prediction_train_proba)])return np.mean(scores, axis=), fit_model_time/len(folds), score_model_time/len(folds)", "docstring": "Evaluation with:\n Matthews correlation coefficient: represents thresholding measures\n AUC: represents ranking measures\n Brier score: represents calibration measures", "id": "f3962:m1"} {"signature": "def score_models(clf, X, y, encoder, runs=):", "body": "scores = []X_test = Nonefor _ in range(runs):X_test = encoder().fit_transform(X, y)X_test = StandardScaler().fit_transform(X_test)scores.append(cross_validate(clf, X_test, y, n_jobs=, cv=)[''])gc.collect()scores = [y for z in [x for x in scores] for y in z]return float(np.mean(scores)), float(np.std(scores)), scores, X_test.shape[]", "docstring": "Takes in a classifier that supports multiclass classification, and X and a y, and returns a cross validation score.", "id": "f3967:m0"} {"signature": "def main(loader, name):", "body": "scores = []raw_scores_ds = {}X, y, mapping = loader()clf = linear_model.LogisticRegression(solver='', multi_class='', max_iter=, random_state=)encoders = (set(category_encoders.__all__) - {''}) for encoder_name in encoders:encoder = getattr(category_encoders, encoder_name)start_time = time.time()score, stds, raw_scores, dim = score_models(clf, X, y, encoder)scores.append([encoder_name, name, dim, score, stds, time.time() - start_time])raw_scores_ds[encoder_name] = raw_scoresgc.collect()results = pd.DataFrame(scores, columns=['', '', '', '', '', ''])raw = pd.DataFrame.from_dict(raw_scores_ds)ax = raw.plot(kind='', return_type='')plt.title('' % (name,))plt.ylabel('')for tick in ax.get_xticklabels():tick.set_rotation()plt.grid()plt.tight_layout()plt.show()return results, raw", "docstring": "Here we iterate through the datasets and score them with a classifier using different encodings.", "id": "f3967:m1"} {"signature": "def get_cars_data():", "body": "df = pd.read_csv('')X = df.reindex(columns=[x for x in df.columns.values if x != ''])y = df.reindex(columns=[''])y = preprocessing.LabelEncoder().fit_transform(y.values.reshape(-, ))mapping = [{'': '', '': [('', ), ('', ), ('', ), ('', )]},{'': '', '': [('', ), ('', ), ('', ), ('', )]},{'': '', '': [('', ), ('', ), ('', ), ('', )]},{'': '', '': [('', ), ('', ), ('', )]},{'': '', '': [('', ), ('', ), ('', )]},{'': '', '': [('', ), ('', ), ('', )]},]return X, y, mapping", "docstring": "Load the cars dataset, split it into X and y, and then call the label encoder to get an integer y column.\n\n:return:", "id": "f3969:m0"} {"signature": "def get_mushroom_data():", "body": "df = pd.read_csv('')X = df.reindex(columns=[x for x in df.columns.values if x != ''])y = df.reindex(columns=[''])y = preprocessing.LabelEncoder().fit_transform(y.values.reshape(-, ))mapping = Nonereturn X, y, mapping", "docstring": "Load the mushroom dataset, split it into X and y, and then call the label encoder to get an integer y column.\n\n:return:", "id": "f3969:m1"} {"signature": "def get_splice_data():", "body": "df = pd.read_csv('')X = df.reindex(columns=[x for x in df.columns.values if x != ''])X[''] = X[''].map(lambda x: list(str(x).strip()))for idx in range():X['' % (idx, )] = X[''].map(lambda x: x[idx])del X['']y = df.reindex(columns=[''])y = preprocessing.LabelEncoder().fit_transform(y.values.reshape(-, ))mapping = Nonereturn X, y, mapping", "docstring": "Load the mushroom dataset, split it into X and y, and then call the label encoder to get an integer y column.\n\n:return:", "id": "f3969:m2"} {"signature": "def create_array(n_rows=, extras=False, has_none=True):", "body": "ds = [[random.random(),random.random(),random.choice(['', '', '']),random.choice(['', '', '', '']) if extras else random.choice(['', '', '']),random.choice(['', '', '', None, np.nan]) if has_none else random.choice(['', '', '']),random.choice([''])] for _ in range(n_rows)]return np.array(ds)", "docstring": "Creates a numpy dataset with some categorical variables.", "id": "f3981:m1"} {"signature": "def create_dataset(n_rows=, extras=False, has_none=True):", "body": "random.seed()ds = [[random.random(), random.choice([float(''), float(''), float(''), -, , , -, math.pi]), row, str(row), random.choice(['', '']) if extras else '', random.choice(['', '', '']), random.choice(['', '', '', None]) if has_none else random.choice(['', '', '']), random.choice(['', '', '', '']) if extras else random.choice(['', '', '']), random.choice([, , -]), random.choice(['', '', '']), random.choice(['', '', '', np.nan]) ] for row in range(n_rows)]df = pd.DataFrame(ds, columns=['', '', '', '', '', '', '', '', , '', ''])df[''] = pd.Categorical(df[''], categories=['', '', ''])df[''] = pd.Categorical(df[''], categories=['', '', ''])return df", "docstring": "Creates a dataset with some categorical variables.", "id": "f3981:m2"} {"signature": "def verify_inverse_transform(x, x_inv):", "body": "assert x.equals(x_inv)", "docstring": "Verify x is equal to x_inv. The test returns true for NaN.equals(NaN) as it should.", "id": "f3981:m3"} {"signature": "def deep_round(A, ndigits=):", "body": "return [[round(val, ndigits) for val in sublst] for sublst in A]", "docstring": "Rounds numbers in a list of lists. Useful for approximate equality testing.", "id": "f3981:m4"} {"signature": "def fit(self, X, y=None, **kwargs):", "body": "X = util.convert_input(X)self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')_, categories = self.ordinal_encoding(X,mapping=self.mapping,cols=self.cols,handle_unknown=self.handle_unknown,handle_missing=self.handle_missing)self.mapping = categoriesX_temp = self.transform(X, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f3990:c0:m2"} {"signature": "def transform(self, X, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim,))if not self.cols:return X if self.return_df else X.valuesX, _ = self.ordinal_encoding(X,mapping=self.mapping,cols=self.cols,handle_unknown=self.handle_unknown,handle_missing=self.handle_missing)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n Will use the mapping (if available) and the column list (if available, otherwise every column) to encode the\n data ordinarily.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f3990:c0:m3"} {"signature": "def inverse_transform(self, X_in):", "body": "X = X_in.copy(deep=True)X = util.convert_input(X)if self._dim is None:raise ValueError('')if X.shape[] != self._dim:if self.drop_invariant:raise ValueError(\"\"\"\" % (X.shape[],))else:raise ValueError('' % (X.shape[], self._dim,))if not self.cols:return X if self.return_df else X.valuesif self.handle_unknown == '':for col in self.cols:if any(X[col] == -):warnings.warn(\"\"\"\" % (col,))if self.handle_unknown == '' and self.handle_missing == '':for col in self.cols:if X[col].isnull().any():warnings.warn(\"\"\"\" % (col,))for switch in self.mapping:column_mapping = switch.get('')inverse = pd.Series(data=column_mapping.index, index=column_mapping.get_values())X[switch.get('')] = X[switch.get('')].map(inverse).astype(switch.get(''))return X if self.return_df else X.values", "docstring": "Perform the inverse transformation to encoded data. Will attempt best case reconstruction, which means\nit will return nan for handle_missing and handle_unknown settings that break the bijection. We issue\nwarnings when some of those cases occur.\n\nParameters\n----------\nX_in : array-like, shape = [n_samples, n_features]\n\nReturns\n-------\np: array, the same size of X_in", "id": "f3990:c0:m4"} {"signature": "@staticmethoddef ordinal_encoding(X_in, mapping=None, cols=None, handle_unknown='', handle_missing=''):", "body": "return_nan_series = pd.Series(data=[np.nan], index=[-])X = X_in.copy(deep=True)if cols is None:cols = X.columns.valuesif mapping is not None:mapping_out = mappingfor switch in mapping:column = switch.get('')X[column] = X[column].map(switch[''])try:X[column] = X[column].astype(int)except ValueError as e:X[column] = X[column].astype(float)if handle_unknown == '':X[column].fillna(-, inplace=True)elif handle_unknown == '':missing = X[column].isnull()if any(missing):raise ValueError('' % column)if handle_missing == '':X[column] = X[column].map(return_nan_series).where(X[column] == -, X[column])else:mapping_out = []for col in cols:nan_identity = np.nanif util.is_category(X[col].dtype):categories = X[col].cat.categorieselse:categories = X[col].unique()index = pd.Series(categories).fillna(nan_identity).unique()data = pd.Series(index=index, data=range(, len(index) + ))if handle_missing == '' and ~data.index.isnull().any():data.loc[nan_identity] = -elif handle_missing == '':data.loc[nan_identity] = -mapping_out.append({'': col, '': data, '': X[col].dtype}, )return X, mapping_out", "docstring": "Ordinal encoding uses a single column of integers to represent the classes. An optional mapping dict can be passed\nin, in this case we use the knowledge that there is some true order to the classes themselves. Otherwise, the classes\nare assumed to have no true order and integers are selected at random.", "id": "f3990:c0:m5"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError(\"\")else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f3990:c0:m6"} {"signature": "def fit(self, X, y, **kwargs):", "body": "X = util.convert_input(X)y = util.convert_input_vector(y, X.index)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose,cols=self.cols,handle_unknown='',handle_missing='')self.ordinal_encoder = self.ordinal_encoder.fit(X)X_ordinal = self.ordinal_encoder.transform(X)self.mapping = self.fit_target_encoding(X_ordinal, y)X_temp = self.transform(X, override_return_df=True)self.feature_names = list(X_temp.columns)if self.drop_invariant:self.drop_cols = []X_temp = self.transform(X)generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n self : encoder\n Returns self.", "id": "f3991:c0:m1"} {"signature": "def transform(self, X, y=None, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim,))if y is not None:y = util.convert_input_vector(y, X.index)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")if not self.cols:return XX = self.ordinal_encoder.transform(X)if self.handle_unknown == '':if X[self.cols].isin([-]).any().any():raise ValueError('')X = self.target_encode(X)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n y : array-like, shape = [n_samples] when transform by leave one out\n None, when transform without target info (such as transform test set)\n\n Returns\n -------\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f3991:c0:m3"} {"signature": "def fit_transform(self, X, y=None, **fit_params):", "body": "return self.fit(X, y, **fit_params).transform(X, y)", "docstring": "Encoders that utilize the target must make sure that the training data are transformed with:\n transform(X, y)\nand not with:\n transform(X)", "id": "f3991:c0:m4"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError('')else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f3991:c0:m6"} {"signature": "def fit(self, X, y=None, **kwargs):", "body": "X = util.convert_input(X)self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose,cols=self.cols,handle_unknown='',handle_missing='')self.ordinal_encoder = self.ordinal_encoder.fit(X)self.mapping = self.fit_base_n_encoding(X)X_temp = self.transform(X, override_return_df=True)self._encoded_columns = X_temp.columns.valuesself.feature_names = list(X_temp.columns)if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f3992:c0:m1"} {"signature": "def transform(self, X, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim,))if not self.cols:return XX_out = self.ordinal_encoder.transform(X)if self.handle_unknown == '':if X_out[self.cols].isin([-]).any().any():raise ValueError('')X_out = self.basen_encode(X_out, cols=self.cols)if self.drop_invariant:for col in self.drop_cols:X_out.drop(col, , inplace=True)if self.return_df or override_return_df:return X_outelse:return X_out.values", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f3992:c0:m3"} {"signature": "def inverse_transform(self, X_in):", "body": "X = X_in.copy(deep=True)X = util.convert_input(X)if self._dim is None:raise ValueError('')X = self.basen_to_integer(X, self.cols, self.base)if X.shape[] != self._dim:if self.drop_invariant:raise ValueError(\"\"\"\" % (X.shape[],))else:raise ValueError('' % (X.shape[], self._dim,))if not self.cols:return X if self.return_df else X.valuesfor switch in self.ordinal_encoder.mapping:column_mapping = switch.get('')inverse = pd.Series(data=column_mapping.index, index=column_mapping.get_values())X[switch.get('')] = X[switch.get('')].map(inverse).astype(switch.get(''))if self.handle_unknown == '' and self.handle_missing == '':for col in self.cols:if X[switch.get('')].isnull().any():warnings.warn(\"\"\"\" % (col,))return X if self.return_df else X.values", "docstring": "Perform the inverse transformation to encoded data.\n\nParameters\n----------\nX_in : array-like, shape = [n_samples, n_features]\n\nReturns\n-------\np: array, the same size of X_in", "id": "f3992:c0:m4"} {"signature": "def basen_encode(self, X_in, cols=None):", "body": "X = X_in.copy(deep=True)cols = X.columns.values.tolist()for switch in self.mapping:col = switch.get('')mod = switch.get('')base_df = mod.reindex(X[col])base_df.set_index(X.index, inplace=True)X = pd.concat([base_df, X], axis=)old_column_index = cols.index(col)cols[old_column_index: old_column_index + ] = mod.columnsreturn X.reindex(columns=cols)", "docstring": "Basen encoding encodes the integers as basen code with one column per digit.\n\nParameters\n----------\nX_in: DataFrame\ncols: list-like, default None\n Column names in the DataFrame to be encoded\n\nReturns\n-------\ndummies : DataFrame", "id": "f3992:c0:m6"} {"signature": "def basen_to_integer(self, X, cols, base):", "body": "out_cols = X.columns.values.tolist()for col in cols:col_list = [col0 for col0 in out_cols if str(col0).startswith(str(col))]insert_at = out_cols.index(col_list[])if base == :value_array = np.array([int(col0.split('')[-]) for col0 in col_list])else:len0 = len(col_list)value_array = np.array([base ** (len0 - - i) for i in range(len0)])X.insert(insert_at, col, np.dot(X[col_list].values, value_array.T))X.drop(col_list, axis=, inplace=True)out_cols = X.columns.values.tolist()return X", "docstring": "Convert basen code as integers.\n\nParameters\n----------\nX : DataFrame\n encoded data\ncols : list-like\n Column names in the DataFrame that be encoded\nbase : int\n The base of transform\n\nReturns\n-------\nnumerical: DataFrame", "id": "f3992:c0:m7"} {"signature": "def col_transform(self, col, digits):", "body": "if col is None or float(col) < :return Noneelse:col = self.number_to_base(int(col), self.base, digits)if len(col) == digits:return colelse:return [ for _ in range(digits - len(col))] + col", "docstring": "The lambda body to transform the column values", "id": "f3992:c0:m8"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError('')else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f3992:c0:m10"} {"signature": "def fit(self, X, y=None, **kwargs):", "body": "X = util.convert_input(X)self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose,cols=self.cols,handle_unknown='',handle_missing='')self.ordinal_encoder = self.ordinal_encoder.fit(X)ordinal_mapping = self.ordinal_encoder.category_mappingmappings_out = []for switch in ordinal_mapping:values = switch.get('')col = switch.get('')column_mapping = self.fit_polynomial_coding(col, values, self.handle_missing, self.handle_unknown)mappings_out.append({'': switch.get(''), '': column_mapping, })self.mapping = mappings_outX_temp = self.transform(X, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f3994:c0:m1"} {"signature": "def transform(self, X, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim, ))if not self.cols:return XX = self.ordinal_encoder.transform(X)if self.handle_unknown == '':if X[self.cols].isin([-]).any().any():raise ValueError('')X = self.polynomial_coding(X, self.mapping)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f3994:c0:m2"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError(\"\")else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f3994:c0:m5"} {"signature": "def fit(self, X, y=None, **kwargs):", "body": "X = util.convert_input(X)self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose,cols=self.cols,handle_unknown='',handle_missing='')self.ordinal_encoder = self.ordinal_encoder.fit(X)ordinal_mapping = self.ordinal_encoder.category_mappingmappings_out = []for switch in ordinal_mapping:values = switch.get('')col = switch.get('')column_mapping = self.fit_sum_coding(col, values, self.handle_missing, self.handle_unknown)mappings_out.append({'': switch.get(''), '': column_mapping, })self.mapping = mappings_outX_temp = self.transform(X, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f3995:c0:m1"} {"signature": "def transform(self, X, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim, ))if not self.cols:return XX = self.ordinal_encoder.transform(X)if self.handle_unknown == '':if X[self.cols].isin([-]).any().any():raise ValueError('')X = self.sum_coding(X, mapping=self.mapping)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f3995:c0:m2"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError(\"\")else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f3995:c0:m5"} {"signature": "def fit(self, X, y, **kwargs):", "body": "X = util.convert_input(X)y = util.convert_input_vector(y, X.index).astype(float)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")self._dim = X.shape[]if self.use_default_cols:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')categories = self.fit_leave_one_out(X, y,cols=self.cols)self.mapping = categoriesX_temp = self.transform(X, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f3996:c0:m1"} {"signature": "def transform(self, X, y=None, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim,))if y is not None:y = util.convert_input_vector(y, X.index).astype(float)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")if not self.cols:return XX = self.transform_leave_one_out(X, y,mapping=self.mapping)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n y : array-like, shape = [n_samples] when transform by leave one out\n None, when transform without target information (such as transform test set)\n\n\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f3996:c0:m2"} {"signature": "def fit_transform(self, X, y=None, **fit_params):", "body": "return self.fit(X, y, **fit_params).transform(X, y)", "docstring": "Encoders that utilize the target must make sure that the training data are transformed with:\n transform(X, y)\nand not with:\n transform(X)", "id": "f3996:c0:m3"} {"signature": "def transform_leave_one_out(self, X_in, y, mapping=None):", "body": "X = X_in.copy(deep=True)random_state_ = check_random_state(self.random_state)for col, colmap in mapping.items():level_notunique = colmap[''] > unique_train = colmap.indexunseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train])is_nan = X[col].isnull()is_unknown_value = X[col].isin(unseen_values.dropna())if self.handle_unknown == '' and is_unknown_value.any():raise ValueError('')if y is None: level_means = (colmap[''] / colmap['']).where(level_notunique, self._mean)X[col] = X[col].map(level_means)else: level_means = (X[col].map(colmap['']) - y) / (X[col].map(colmap['']) - )X[col] = level_means.where(X[col].map(colmap[''][level_notunique]).notnull(), self._mean)if self.handle_unknown == '':X.loc[is_unknown_value, col] = self._meanelif self.handle_unknown == '':X.loc[is_unknown_value, col] = np.nanif self.handle_missing == '':X.loc[is_nan & unseen_values.isnull().any(), col] = self._meanelif self.handle_missing == '':X.loc[is_nan, col] = np.nanif self.sigma is not None and y is not None:X[col] = X[col] * random_state_.normal(, self.sigma, X[col].shape[])return X", "docstring": "Leave one out encoding uses a single column of floats to represent the means of the target variables.", "id": "f3996:c0:m6"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError('')else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f3996:c0:m7"} {"signature": "def fit(self, X, y, **kwargs):", "body": "X = util.convert_input(X)y = util.convert_input_vector(y, X.index).astype(float)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")unique = y.unique()if len(unique) != :raise ValueError(\"\" + str(len(unique)) + \"\")if y.isnull().any():raise ValueError(\"\")if np.max(unique) < :raise ValueError(\"\")if np.min(unique) > :raise ValueError(\"\")self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose,cols=self.cols,handle_unknown='',handle_missing='')self.ordinal_encoder = self.ordinal_encoder.fit(X)X_ordinal = self.ordinal_encoder.transform(X)self.mapping = self._train(X_ordinal, y)X_temp = self.transform(X, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and binary y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Binary target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f3997:c0:m1"} {"signature": "def transform(self, X, y=None, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim,))if y is not None:y = util.convert_input_vector(y, X.index).astype(float)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")if not self.cols:return XX = X.copy(deep=True)X = self.ordinal_encoder.transform(X)if self.handle_unknown == '':if X[self.cols].isin([-]).any().any():raise ValueError('')X = self._score(X, y)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data. When the data are used for model training,\n it is important to also pass the target in order to apply leave one out.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n y : array-like, shape = [n_samples] when transform by leave one out\n None, when transform without target information (such as transform test set)\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f3997:c0:m2"} {"signature": "def fit_transform(self, X, y=None, **fit_params):", "body": "return self.fit(X, y, **fit_params).transform(X, y)", "docstring": "Encoders that utilize the target must make sure that the training data are transformed with:\n transform(X, y)\nand not with:\n transform(X)", "id": "f3997:c0:m3"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError(\"\")else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f3997:c0:m6"} {"signature": "def fit(self, X, y=None, **kwargs):", "body": "X = util.convert_input(X)self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose,cols=self.cols,handle_unknown='',handle_missing='')self.ordinal_encoder = self.ordinal_encoder.fit(X)self.mapping = self.generate_mapping()X_temp = self.transform(X, override_return_df=True)self.feature_names = list(X_temp.columns)if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f3998:c0:m2"} {"signature": "def transform(self, X, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim, ))if not self.cols:return X if self.return_df else X.valuesX = self.ordinal_encoder.transform(X)if self.handle_unknown == '':if X[self.cols].isin([-]).any().any():raise ValueError('')X = self.get_dummies(X)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f3998:c0:m4"} {"signature": "def inverse_transform(self, X_in):", "body": "X = X_in.copy(deep=True)X = util.convert_input(X)if self._dim is None:raise ValueError('')X = self.reverse_dummies(X, self.mapping)if X.shape[] != self._dim:if self.drop_invariant:raise ValueError(\"\"\"\" % (X.shape[],))else:raise ValueError('' % (X.shape[], self._dim, ))if not self.cols:return X if self.return_df else X.valuesfor switch in self.ordinal_encoder.mapping:column_mapping = switch.get('')inverse = pd.Series(data=column_mapping.index, index=column_mapping.get_values())X[switch.get('')] = X[switch.get('')].map(inverse).astype(switch.get(''))if self.handle_unknown == '' and self.handle_missing == '':for col in self.cols:if X[switch.get('')].isnull().any():warnings.warn(\"\"\"\" % (col,))return X if self.return_df else X.values", "docstring": "Perform the inverse transformation to encoded data.\n\nParameters\n----------\nX_in : array-like, shape = [n_samples, n_features]\n\nReturns\n-------\np: array, the same size of X_in", "id": "f3998:c0:m5"} {"signature": "def get_dummies(self, X_in):", "body": "X = X_in.copy(deep=True)cols = X.columns.values.tolist()for switch in self.mapping:col = switch.get('')mod = switch.get('')base_df = mod.reindex(X[col])base_df = base_df.set_index(X.index)X = pd.concat([base_df, X], axis=)old_column_index = cols.index(col)cols[old_column_index: old_column_index + ] = mod.columnsX = X.reindex(columns=cols)return X", "docstring": "Convert numerical variable into dummy variables\n\nParameters\n----------\nX_in: DataFrame\n\nReturns\n-------\ndummies : DataFrame", "id": "f3998:c0:m6"} {"signature": "def reverse_dummies(self, X, mapping):", "body": "out_cols = X.columns.values.tolist()mapped_columns = []for switch in mapping:col = switch.get('')mod = switch.get('')insert_at = out_cols.index(mod.columns[])X.insert(insert_at, col, )positive_indexes = mod.index[mod.index > ]for i in range(positive_indexes.shape[]):existing_col = mod.columns[i]val = positive_indexes[i]X.loc[X[existing_col] == , col] = valmapped_columns.append(existing_col)X.drop(mod.columns, axis=, inplace=True)out_cols = X.columns.values.tolist()return X", "docstring": "Convert dummy variable into numerical variables\n\nParameters\n----------\nX : DataFrame\nmapping: list-like\n Contains mappings of column to be transformed to it's new columns and value represented\n\nReturns\n-------\nnumerical: DataFrame", "id": "f3998:c0:m7"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError('')else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f3998:c0:m8"} {"signature": "def get_obj_cols(df):", "body": "obj_cols = []for idx, dt in enumerate(df.dtypes):if dt == '' or is_category(dt):obj_cols.append(df.columns.values[idx])return obj_cols", "docstring": "Returns names of 'object' columns in the DataFrame.", "id": "f3999:m1"} {"signature": "def convert_input(X):", "body": "if not isinstance(X, pd.DataFrame):if isinstance(X, list):X = pd.DataFrame(X)elif isinstance(X, (np.generic, np.ndarray)):X = pd.DataFrame(X)elif isinstance(X, csr_matrix):X = pd.DataFrame(X.todense())elif isinstance(X, pd.Series):X = pd.DataFrame(X)else:raise ValueError('' % (str(type(X))))X = X.apply(lambda x: pd.to_numeric(x, errors=''))return X", "docstring": "Unite data into a DataFrame.", "id": "f3999:m3"} {"signature": "def convert_input_vector(y, index):", "body": "if y is None:return Noneif isinstance(y, pd.Series):return yelif isinstance(y, np.ndarray):if len(np.shape(y))==: return pd.Series(y, name='', index=index)elif len(np.shape(y))== and np.shape(y)[]==: return pd.Series(y[, :], name='', index=index)elif len(np.shape(y))== and np.shape(y)[]==: return pd.Series(y[:, ], name='', index=index)else:raise ValueError('' % (str(np.shape(y))))elif np.isscalar(y):return pd.Series([y], name='', index=index)elif isinstance(y, list):if len(y)== or (len(y)> and not isinstance(y[], list)): return pd.Series(y, name='', index=index)elif len(y)> and isinstance(y[], list) and len(y[])==: flatten = lambda y: [item for sublist in y for item in sublist]return pd.Series(flatten(y), name='', index=index)elif len(y)== and isinstance(y[], list): return pd.Series(y[], name='', index=index)else:raise ValueError('')elif isinstance(y, pd.DataFrame):if len(list(y))==: return pd.Series(y, name='')if len(list(y))==: return y.iloc[:, ]else:raise ValueError('' % (str(y.shape)))else:return pd.Series(y, name='', index=index)", "docstring": "Unite target data type into a Series.\nIf the target is a Series or a DataFrame, we preserve its index.\nBut if the target does not contain index attribute, we use the index from the argument.", "id": "f3999:m4"} {"signature": "def get_generated_cols(X_original, X_transformed, to_transform):", "body": "original_cols = list(X_original.columns)if len(to_transform) > :[original_cols.remove(c) for c in to_transform]current_cols = list(X_transformed.columns)if len(original_cols) > :[current_cols.remove(c) for c in original_cols]return current_cols", "docstring": "Returns a list of the generated/transformed columns.\n\nArguments:\n X_original: df\n the original (input) DataFrame.\n X_transformed: df\n the transformed (current) DataFrame.\n to_transform: [str]\n a list of columns that were transformed (as in the original DataFrame), commonly self.cols.\n\nOutput:\n a list of columns that were transformed (as in the current DataFrame).", "id": "f3999:m5"} {"signature": "def fit(self, X, y=None, **kwargs):", "body": "X = util.convert_input(X)self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)X_temp = self.transform(X, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f4000:c0:m1"} {"signature": "def transform(self, X, override_return_df=False):", "body": "if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim, ))if not self.cols:return XX = self.hashing_trick(X, hashing_method=self.hash_method, N=self.n_components, cols=self.cols)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f4000:c0:m2"} {"signature": "@staticmethoddef hashing_trick(X_in, hashing_method='', N=, cols=None, make_copy=False):", "body": "try:if hashing_method not in hashlib.algorithms_available:raise ValueError('' % (hashing_method,''.join([str(x) for x in hashlib.algorithms_available])))except Exception as e:try:_ = hashlib.new(hashing_method)except Exception as e:raise ValueError('')if make_copy:X = X_in.copy(deep=True)else:X = X_inif cols is None:cols = X.columns.valuesdef hash_fn(x):tmp = [ for _ in range(N)]for val in x.values:if val is not None:hasher = hashlib.new(hashing_method)if sys.version_info[] == :hasher.update(str(val))else:hasher.update(bytes(str(val), ''))tmp[int(hasher.hexdigest(), ) % N] += return pd.Series(tmp, index=new_cols)new_cols = ['' % d for d in range(N)]X_cat = X.loc[:, cols]X_num = X.loc[:, [x for x in X.columns.values if x not in cols]]X_cat = X_cat.apply(hash_fn, axis=)X_cat.columns = new_colsX = pd.concat([X_cat, X_num], axis=)return X", "docstring": "A basic hashing implementation with configurable dimensionality/precision\n\n Performs the hashing trick on a pandas dataframe, `X`, using the hashing method from hashlib\n identified by `hashing_method`. The number of output dimensions (`N`), and columns to hash (`cols`) are\n also configurable.\n\n Parameters\n ----------\n\n X_in: pandas dataframe\n description text\n hashing_method: string, optional\n description text\n N: int, optional\n description text\n cols: list, optional\n description text\n make_copy: bool, optional\n description text\n\n Returns\n -------\n\n out : dataframe\n A hashing encoded dataframe.\n\n References\n ----------\n Cite the relevant literature, e.g. [1]_. You may also cite these\n references in the notes section above.\n .. [1] Kilian Weinberger; Anirban Dasgupta; John Langford; Alex Smola; Josh Attenberg (2009). Feature Hashing\n for Large Scale Multitask Learning. Proc. ICML.", "id": "f4000:c0:m3"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError('')else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f4000:c0:m4"} {"signature": "def fit(self, X, y, **kwargs):", "body": "X = util.convert_input(X)y = util.convert_input_vector(y, X.index).astype(float)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose,cols=self.cols,handle_unknown='',handle_missing='')self.ordinal_encoder = self.ordinal_encoder.fit(X)X_ordinal = self.ordinal_encoder.transform(X)self.mapping = self._train(X_ordinal, y)X_temp = self.transform(X, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and binary y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Binary target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f4001:c0:m1"} {"signature": "def transform(self, X, y=None, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim,))if y is not None:y = util.convert_input_vector(y, X.index).astype(float)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")if not self.cols:return XX = X.copy(deep=True)X = self.ordinal_encoder.transform(X)if self.handle_unknown == '':if X[self.cols].isin([-]).any().any():raise ValueError('')X = self._score(X, y)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n When the data are used for model training, it is important to also pass the target in order to apply leave one out.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n y : array-like, shape = [n_samples] when transform by leave one out\n None, when transform without target information (such as transform test set)\n\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f4001:c0:m2"} {"signature": "def fit_transform(self, X, y=None, **fit_params):", "body": "return self.fit(X, y, **fit_params).transform(X, y)", "docstring": "Encoders that utilize the target must make sure that the training data are transformed with:\n transform(X, y)\nand not with:\n transform(X)", "id": "f4001:c0:m3"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError(\"\")else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f4001:c0:m6"} {"signature": "def fit(self, X, y=None, **kwargs):", "body": "X = util.convert_input(X)self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose,cols=self.cols,handle_unknown='',handle_missing='')self.ordinal_encoder = self.ordinal_encoder.fit(X)ordinal_mapping = self.ordinal_encoder.category_mappingmappings_out = []for switch in ordinal_mapping:values = switch.get('')col = switch.get('')column_mapping = self.fit_helmert_coding(col, values, self.handle_missing, self.handle_unknown)mappings_out.append({'': col, '': column_mapping, })self.mapping = mappings_outX_temp = self.transform(X, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f4002:c0:m1"} {"signature": "def transform(self, X, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim, ))if not self.cols:return XX = self.ordinal_encoder.transform(X)if self.handle_unknown == '':if X[self.cols].isin([-]).any().any():raise ValueError('')X = self.helmert_coding(X, mapping=self.mapping)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f4002:c0:m2"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError('')else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f4002:c0:m5"} {"signature": "def fit(self, X, y, **kwargs):", "body": "X = util.convert_input(X)y = util.convert_input_vector(y, X.index).astype(float)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose,cols=self.cols,handle_unknown='',handle_missing='')self.ordinal_encoder = self.ordinal_encoder.fit(X)X_ordinal = self.ordinal_encoder.transform(X)if self.model == '':self.mapping = self._train_independent(X_ordinal, y)elif self.model == '':self.mapping = self._train_pooled(X_ordinal, y)elif self.model == '':self.mapping = self._train_beta(X_ordinal, y)elif self.model == '':unique = y.unique()if len(unique) != :raise ValueError(\"\" + str(len(unique)) + \"\")if y.isnull().any():raise ValueError(\"\")if np.max(unique) < :raise ValueError(\"\")if np.min(unique) > :raise ValueError(\"\")self.mapping = self._train_log_odds_ratio(X_ordinal, y)else:raise ValueError(\"\" + str(self.model) + \"\")X_temp = self.transform(X, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and binary y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Binary target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f4003:c0:m1"} {"signature": "def transform(self, X, y=None, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim,))if y is not None:y = util.convert_input_vector(y, X.index).astype(float)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")if not self.cols:return XX = X.copy(deep=True)X = self.ordinal_encoder.transform(X)if self.handle_unknown == '':if X[self.cols].isin([-]).any().any():raise ValueError('')X = self._score(X, y)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data. When the data are used for model training,\n it is important to also pass the target in order to apply leave one out.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n y : array-like, shape = [n_samples] when transform by leave one out\n None, when transform without target information (such as transform test set)\n\n\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f4003:c0:m2"} {"signature": "def fit_transform(self, X, y=None, **fit_params):", "body": "return self.fit(X, y, **fit_params).transform(X, y)", "docstring": "Encoders that utilize the target must make sure that the training data are transformed with:\n transform(X, y)\nand not with:\n transform(X)", "id": "f4003:c0:m3"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError(\"\")else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f4003:c0:m9"} {"signature": "def fit(self, X, y, **kwargs):", "body": "X = util.convert_input(X)y = util.convert_input_vector(y, X.index).astype(float)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")self._dim = X.shape[]if self.use_default_cols:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')categories = self.fit_leave_one_out(X, y,cols=self.cols)self.mapping = categoriesX_temp = self.transform(X, y, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f4004:c0:m1"} {"signature": "def transform(self, X, y=None, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim,))if y is not None:y = util.convert_input_vector(y, X.index).astype(float)if X.shape[] != y.shape[]:raise ValueError(\"\" + str(X.shape[]) + \"\" + str(y.shape[]) + \"\")if not self.cols:return XX = self.transform_leave_one_out(X, y,mapping=self.mapping)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n y : array-like, shape = [n_samples] when transform by leave one out\n None, when transform without target information (such as transform test set)\n\n\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f4004:c0:m2"} {"signature": "def fit_transform(self, X, y=None, **fit_params):", "body": "return self.fit(X, y, **fit_params).transform(X, y)", "docstring": "Encoders that utilize the target must make sure that the training data are transformed with:\n transform(X, y)\nand not with:\n transform(X)", "id": "f4004:c0:m3"} {"signature": "def transform_leave_one_out(self, X_in, y, mapping=None):", "body": "X = X_in.copy(deep=True)random_state_ = check_random_state(self.random_state)if y is not None:y = y.astype('')for cat_col in X.select_dtypes('').columns.values:X[cat_col] = X[cat_col].cat.add_categories(-)X = X.fillna(-)for col, colmap in mapping.items():level_notunique = colmap[''] > unique_train = colmap.indexunseen_values = pd.Series([x for x in X_in[col].unique() if x not in unique_train])is_nan = X_in[col].isnull()is_unknown_value = X_in[col].isin(unseen_values.dropna())if self.handle_unknown == '' and is_unknown_value.any():raise ValueError('')if y is None: level_means = ((colmap[''] + self._mean) / (colmap[''] + )).where(level_notunique, self._mean)X[col] = X[col].map(level_means)else:temp = y.groupby(X[col]).agg(['', ''])X[col] = (temp[''] - y + self._mean) / (temp[''] + )if self.handle_unknown == '':X.loc[is_unknown_value, col] = self._meanelif self.handle_unknown == '':X.loc[is_unknown_value, col] = np.nanif self.handle_missing == '':X.loc[is_nan & unseen_values.isnull().any(), col] = self._meanelif self.handle_missing == '':X.loc[is_nan, col] = np.nanif self.sigma is not None and y is not None:X[col] = X[col] * random_state_.normal(, self.sigma, X[col].shape[])return X", "docstring": "Leave one out encoding uses a single column of floats to represent the means of the target variables.", "id": "f4004:c0:m6"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError('')else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f4004:c0:m7"} {"signature": "def fit(self, X, y=None, **kwargs):", "body": "self.base_n_encoder.fit(X, y, **kwargs)return self", "docstring": "Fit encoder according to X and y.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f4005:c0:m1"} {"signature": "def transform(self, X, override_return_df=False):", "body": "return self.base_n_encoder.transform(X)", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f4005:c0:m2"} {"signature": "def inverse_transform(self, X_in):", "body": "return self.base_n_encoder.inverse_transform(X_in)", "docstring": "Perform the inverse transformation to encoded data.\n\nParameters\n----------\nX_in : array-like, shape = [n_samples, n_features]\n\nReturns\n-------\np: array, the same size of X_in", "id": "f4005:c0:m3"} {"signature": "def get_feature_names(self):", "body": "return self.base_n_encoder.get_feature_names()", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f4005:c0:m4"} {"signature": "def fit(self, X, y=None, **kwargs):", "body": "X = util.convert_input(X)self._dim = X.shape[]if self.cols is None:self.cols = util.get_obj_cols(X)else:self.cols = util.convert_cols_to_list(self.cols)if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose,cols=self.cols,handle_unknown='',handle_missing='')self.ordinal_encoder = self.ordinal_encoder.fit(X)ordinal_mapping = self.ordinal_encoder.category_mappingmappings_out = []for switch in ordinal_mapping:values = switch.get('')col = switch.get('')column_mapping = self.fit_backward_difference_coding(col, values, self.handle_missing, self.handle_unknown)mappings_out.append({'': col, '': column_mapping, })self.mapping = mappings_outX_temp = self.transform(X, override_return_df=True)self.feature_names = X_temp.columns.tolist()if self.drop_invariant:self.drop_cols = []generated_cols = util.get_generated_cols(X, X_temp, self.cols)self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= ]try:[self.feature_names.remove(x) for x in self.drop_cols]except KeyError as e:if self.verbose > :print(\"\"\"\".format(e))return self", "docstring": "Fits an ordinal encoder to produce a consistent mapping across applications and optionally finds\n generally invariant columns to drop consistently.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n\n self : encoder\n Returns self.", "id": "f4006:c0:m1"} {"signature": "def transform(self, X, override_return_df=False):", "body": "if self.handle_missing == '':if X[self.cols].isnull().any().bool():raise ValueError('')if self._dim is None:raise ValueError('')X = util.convert_input(X)if X.shape[] != self._dim:raise ValueError('' % (X.shape[], self._dim, ))if not self.cols:return XX = self.ordinal_encoder.transform(X)if self.handle_unknown == '':if X[self.cols].isin([-]).any().any():raise ValueError('')X = self.backward_difference_coding(X, mapping=self.mapping)if self.drop_invariant:for col in self.drop_cols:X.drop(col, , inplace=True)if self.return_df or override_return_df:return Xelse:return X.values", "docstring": "Perform the transformation to new categorical data.\n\n Parameters\n ----------\n\n X : array-like, shape = [n_samples, n_features]\n\n Returns\n -------\n\n p : array, shape = [n_samples, n_numeric + N]\n Transformed values with encoding applied.", "id": "f4006:c0:m2"} {"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):raise ValueError('')else:return self.feature_names", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n A list with all feature names transformed or added.\n Note: potentially dropped features are not included!", "id": "f4006:c0:m5"} {"signature": "def djeffify_string(string_to_djeff):", "body": "string_to_djeff = re.sub(r'', '', string_to_djeff, flags=re.IGNORECASE) string_to_djeff = re.sub(r'', '', string_to_djeff, flags=re.IGNORECASE) string_to_djeff = re.sub(r'', '', string_to_djeff, flags=re.IGNORECASE) return string_to_djeff", "docstring": "Djeffifies string_to_djeff", "id": "f4011:m0"} {"signature": "def djeffify_html(rendered_string):", "body": "parser = DjeffParser()parser.feed(rendered_string)return parser.djhtml", "docstring": "This function contains the core logic for a\nmiddleware, template tag or Template engine approach", "id": "f4011:m1"} {"signature": "def __init__(self, convert_charrefs=True, *args, **kwargs):", "body": "try:HTMLParser.__init__(self, convert_charrefs=convert_charrefs)except TypeError:HTMLParser.__init__(self)self.djhtml = ''", "docstring": "Explicitly set convert_charrefs to keep deprecation warnings at bay.\n\nSee:\nhttps://docs.python.org/3/library/html.parser.html#html.parser.HTMLParser", "id": "f4011:c2:m0"} {"signature": "def handle_data(self, data):", "body": "if data.strip():data = djeffify_string(data)self.djhtml += data", "docstring": "Djeffify data between tags", "id": "f4011:c2:m3"} {"signature": "def load_key(pubkey):", "body": "try:return load_pem_public_key(pubkey.encode(), default_backend())except ValueError:pubkey = pubkey.replace('', '').replace('', '')return load_pem_public_key(pubkey.encode(), default_backend())", "docstring": "Load public RSA key, with work-around for keys using\n incorrect header/footer format.\n\n Read more about RSA encryption with cryptography:\n https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/", "id": "f4013:m0"} {"signature": "def encrypt(pubkey, password):", "body": "key = load_key(pubkey)encrypted_password = key.encrypt(password, PKCS1v15())return base64.b64encode(encrypted_password)", "docstring": "Encrypt password using given RSA public key and encode it with base64.\n\n The encrypted password can only be decrypted by someone with the\n private key (in this case, only Travis).", "id": "f4013:m1"} {"signature": "def fetch_public_key(repo):", "body": "keyurl = ''.format(repo)data = json.loads(urlopen(keyurl).read().decode())if '' not in data:errmsg = \"\".format(repo)errmsg += \"\"raise ValueError(errmsg)return data['']", "docstring": "Download RSA public key Travis will use for this repo.\n\n Travis API docs: http://docs.travis-ci.com/api/#repository-keys", "id": "f4013:m2"} {"signature": "def prepend_line(filepath, line):", "body": "with open(filepath) as f:lines = f.readlines()lines.insert(, line)with open(filepath, '') as f:f.writelines(lines)", "docstring": "Rewrite a file adding a line to its beginning.", "id": "f4013:m3"} {"signature": "def update_travis_deploy_password(encrypted_password):", "body": "config = load_yaml_config(TRAVIS_CONFIG_FILE)config[''][''] = dict(secure=encrypted_password)save_yaml_config(TRAVIS_CONFIG_FILE, config)line = ('''')prepend_line(TRAVIS_CONFIG_FILE, line)", "docstring": "Update the deploy section of the .travis.yml file\n to use the given encrypted password.", "id": "f4013:m6"} {"signature": "def events(self, **kwargs):", "body": "return self.__api.events(query=EqualsOperator(\"\", self.hash_),**kwargs)", "docstring": "Get all events for this report. Additional arguments may also be\n specified that will be passed to the query function.", "id": "f4024:c1:m4"} {"signature": "def facts(self, **kwargs):", "body": "return self.__api.facts(query=EqualsOperator(\"\", self.name),**kwargs)", "docstring": "Get all facts of this node. Additional arguments may also be\n specified that will be passed to the query function.", "id": "f4024:c4:m4"} {"signature": "def fact(self, name):", "body": "facts = self.facts(name=name)return next(fact for fact in facts)", "docstring": "Get a single fact from this node.", "id": "f4024:c4:m5"} {"signature": "def resources(self, type_=None, title=None, **kwargs):", "body": "if type_ is None:resources = self.__api.resources(query=EqualsOperator(\"\", self.name),**kwargs)elif type_ is not None and title is None:resources = self.__api.resources(type_=type_,query=EqualsOperator(\"\", self.name),**kwargs)else:resources = self.__api.resources(type_=type_,title=title,query=EqualsOperator(\"\", self.name),**kwargs)return resources", "docstring": "Get all resources of this node or all resources of the specified\n type. Additional arguments may also be specified that will be passed\n to the query function.", "id": "f4024:c4:m6"} {"signature": "def resource(self, type_, title, **kwargs):", "body": "resources = self.__api.resources(type_=type_,title=title,query=EqualsOperator(\"\", self.name),**kwargs)return next(resource for resource in resources)", "docstring": "Get a resource matching the supplied type and title. Additional\n arguments may also be specified that will be passed to the query\n function.", "id": "f4024:c4:m7"} {"signature": "def reports(self, **kwargs):", "body": "return self.__api.reports(query=EqualsOperator(\"\", self.name),**kwargs)", "docstring": "Get all reports for this node. Additional arguments may also be\n specified that will be passed to the query function.", "id": "f4024:c4:m8"} {"signature": "def connect(host='', port=, ssl_verify=False, ssl_key=None,ssl_cert=None, timeout=, protocol=None, url_path='',username=None, password=None, token=None):", "body": "return BaseAPI(host=host, port=port,timeout=timeout, ssl_verify=ssl_verify, ssl_key=ssl_key,ssl_cert=ssl_cert, protocol=protocol, url_path=url_path,username=username, password=password, token=token)", "docstring": "Connect with PuppetDB. This will return an object allowing you\n to query the API through its methods.\n\n :param host: (Default: 'localhost;) Hostname or IP of PuppetDB.\n :type host: :obj:`string`\n\n :param port: (Default: '8080') Port on which to talk to PuppetDB.\n :type port: :obj:`int`\n\n :param ssl_verify: (optional) Verify PuppetDB server certificate.\n :type ssl_verify: :obj:`bool` or :obj:`string` True, False or filesystem \\\n path to CA certificate.\n\n :param ssl_key: (optional) Path to our client secret key.\n :type ssl_key: :obj:`None` or :obj:`string` representing a filesystem\\\n path.\n\n :param ssl_cert: (optional) Path to our client certificate.\n :type ssl_cert: :obj:`None` or :obj:`string` representing a filesystem\\\n path.\n\n :param timeout: (Default: 10) Number of seconds to wait for a response.\n :type timeout: :obj:`int`\n\n :param protocol: (optional) Explicitly specify the protocol to be used\n (especially handy when using HTTPS with ssl_verify=False and\n without certs)\n :type protocol: :obj:`None` or :obj:`string`\n\n :param url_path: (Default: '/') The URL path where PuppetDB is served\n :type url_path: :obj:`None` or :obj:`string`\n\n :param username: (optional) The username to use for HTTP basic\n authentication\n :type username: :obj:`None` or :obj:`string`\n\n :param password: (optional) The password to use for HTTP basic\n authentication\n :type password: :obj:`None` or :obj:`string`\n\n :param token: (optional) The x-auth token to use for X-Authentication\n :type token: :obj:`None` or :obj:`string`", "id": "f4025:m0"} {"signature": "def json_to_datetime(date):", "body": "return datetime.datetime.strptime(date, '').replace(tzinfo=UTC())", "docstring": "Tranforms a JSON datetime string into a timezone aware datetime\n object with a UTC tzinfo object.\n\n :param date: The datetime representation.\n :type date: :obj:`string`\n\n :returns: A timezone aware datetime object.\n :rtype: :class:`datetime.datetime`", "id": "f4027:m0"} {"signature": "def versioncmp(v1, v2):", "body": "def normalize(v):\"\"\"\"\"\"return [int(x) for x in re.sub(r'', '', v).split(\"\")]try:return cmp(normalize(v1), normalize(v2))except NameError:return (normalize(v1) > normalize(v2)) - (normalize(v1) < normalize(v2))", "docstring": "Compares two objects, x and y, and returns an integer according to the\n outcome. The return value is negative if x < y, zero if x == y and\n positive if x > y.\n\n :param v1: The first object to compare.\n :param v2: The second object to compare.\n\n :returns: -1, 0 or 1.\n :rtype: :obj:`int`", "id": "f4027:m1"} {"signature": "def __init__(self, host='', port=, ssl_verify=True,ssl_key=None, ssl_cert=None, timeout=, protocol=None,url_path=None, username=None, password=None, token=None):", "body": "self.api_version = ''self.host = hostself.port = portself.ssl_verify = ssl_verifyself.ssl_key = ssl_keyself.ssl_cert = ssl_certself.timeout = timeoutself.token = tokenif url_path:if not url_path.startswith(''):url_path = '' + url_pathif url_path.endswith(''):url_path = url_path[:-]else:url_path = ''self.url_path = url_pathif username and password:self.username = usernameself.password = passwordelse:self.username = Noneself.password = Noneself._session = requests.Session()self._session.headers = {'': '','': '','': ''}if self.token:self._session.headers[''] = self.tokenif protocol is not None:protocol = protocol.lower()if protocol not in ['', '']:raise ValueError('')self.protocol = protocolelif self.ssl_key is not None and self.ssl_cert is not None:self.protocol = ''elif self.token is not None:self.protocol = ''else:self.protocol = ''", "docstring": "Initialises our BaseAPI object passing the parameters needed in\n order to be able to create the connection strings, set up SSL and\n timeouts and so forth.", "id": "f4028:c0:m0"} {"signature": "@propertydef version(self):", "body": "return self.api_version", "docstring": "The version of the API we're querying against.\n\n :returns: Current API version.\n :rtype: :obj:`string`", "id": "f4028:c0:m1"} {"signature": "@propertydef base_url(self):", "body": "return ''.format(proto=self.protocol,host=self.host,port=self.port,url_path=self.url_path,)", "docstring": "A base_url that will be used to construct the final\n URL we're going to query against.\n\n :returns: A URL of the form: ``proto://host:port``.\n :rtype: :obj:`string`", "id": "f4028:c0:m2"} {"signature": "@propertydef total(self):", "body": "if self.last_total is not None:return int(self.last_total)", "docstring": "The total-count of the last request to PuppetDB\n if enabled as parameter in _query method\n\n :returns Number of total results\n :rtype :obj:`int`", "id": "f4028:c0:m3"} {"signature": "def _normalize_resource_type(self, type_):", "body": "return ''.join([s.capitalize() for s in type_.split('')])", "docstring": "Normalizes the type passed to the api by capitalizing each part\n of the type. For example:\n\n sysctl::value -> Sysctl::Value\n user -> User", "id": "f4028:c0:m4"} {"signature": "def _url(self, endpoint, path=None):", "body": "log.debug(''.format(endpoint, path))try:endpoint = ENDPOINTS[endpoint]except KeyError:raise APIErrorurl = ''.format(base_url=self.base_url,endpoint=endpoint,)if path is not None:url = ''.format(url, quote(path))return url", "docstring": "The complete URL we will end up querying. Depending on the\n endpoint we pass in this will result in different URL's with\n different prefixes.\n\n :param endpoint: The PuppetDB API endpoint we want to query.\n :type endpoint: :obj:`string`\n :param path: An additional path if we don't wish to query the\\\n bare endpoint.\n :type path: :obj:`string`\n\n :returns: A URL constructed from :func:`base_url` with the\\\n apropraite API version/prefix and the rest of the path added\\\n to it.\n :rtype: :obj:`string`", "id": "f4028:c0:m5"} {"signature": "def _query(self, endpoint, path=None, query=None,order_by=None, limit=None, offset=None, include_total=False,summarize_by=None, count_by=None, count_filter=None,request_method=''):", "body": "log.debug(''''''.format(endpoint, path, query, limit,offset, summarize_by, count_by,count_filter))url = self._url(endpoint, path=path)payload = {}if query is not None:payload[''] = queryif order_by is not None:payload[PARAMETERS['']] = order_byif limit is not None:payload[''] = limitif include_total is True:payload[PARAMETERS['']] =json.dumps(include_total)if offset is not None:payload[''] = offsetif summarize_by is not None:payload[PARAMETERS['']] = summarize_byif count_by is not None:payload[PARAMETERS['']] = count_byif count_filter is not None:payload[PARAMETERS['']] = count_filterif not (payload):payload = Noneif not self.token:auth = (self.username, self.password)else:auth = Nonetry:if request_method.upper() == '':r = self._session.get(url, params=payload,verify=self.ssl_verify,cert=(self.ssl_cert, self.ssl_key),timeout=self.timeout,auth=auth)elif request_method.upper() == '':r = self._session.post(url,data=json.dumps(payload, default=str),verify=self.ssl_verify,cert=(self.ssl_cert, self.ssl_key),timeout=self.timeout,auth=auth)else:log.error(\"\".format(request_method))raise APIErrorr.raise_for_status()if '' in r.headers:self.last_total = r.headers['']else:self.last_total = Nonejson_body = r.json()if json_body is not None:return json_bodyelse:del json_bodyraise EmptyResponseErrorexcept requests.exceptions.Timeout:log.error(\"\".format(ERROR_STRINGS[''],self.host, self.port,self.protocol.upper()))raiseexcept requests.exceptions.ConnectionError:log.error(\"\".format(ERROR_STRINGS[''],self.host, self.port,self.protocol.upper()))raiseexcept requests.exceptions.HTTPError as err:log.error(\"\".format(err.response.text,self.host, self.port,self.protocol.upper()))raise", "docstring": "This method actually querries PuppetDB. Provided an endpoint and an\n optional path and/or query it will fire a request at PuppetDB. If\n PuppetDB can be reached and answers within the timeout we'll decode\n the response and give it back or raise for the HTTP Status Code\n PuppetDB gave back.\n\n :param endpoint: The PuppetDB API endpoint we want to query.\n :type endpoint: :obj:`string`\n :param path: An additional path if we don't wish to query the\\\n bare endpoint.\n :type path: :obj:`string`\n :param query: (optional) A query to further narrow down the resultset.\n :type query: :obj:`string`\n :param order_by: (optional) Set the order parameters for the resultset.\n :type order_by: :obj:`string`\n :param limit: (optional) Tell PuppetDB to limit it's response to this\\\n number of objects.\n :type limit: :obj:`int`\n :param offset: (optional) Tell PuppetDB to start it's response from\\\n the given offset. This is useful for implementing pagination\\\n but is not supported just yet.\n :type offset: :obj:`string`\n :param include_total: (optional) Include the total number of results\n :type order_by: :obj:`bool`\n :param summarize_by: (optional) Specify what type of object you'd like\\\n to see counts at the event-counts and aggregate-event-counts \\\n endpoints\n :type summarize_by: :obj:`string`\n :param count_by: (optional) Specify what type of object is counted\n :type count_by: :obj:`string`\n :param count_filter: (optional) Specify a filter for the results\n :type count_filter: :obj:`string`\n\n :raises: :class:`~pypuppetdb.errors.EmptyResponseError`\n\n :returns: The decoded response from PuppetDB\n :rtype: :obj:`dict` or :obj:`list`", "id": "f4028:c0:m6"} {"signature": "def nodes(self, unreported=, with_status=False, **kwargs):", "body": "nodes = self._query('', **kwargs)now = datetime.datetime.utcnow()if type(nodes) == dict:nodes = [nodes, ]if with_status:latest_events = self.event_counts(query=EqualsOperator(\"\", True),summarize_by='')for node in nodes:node[''] = Nonenode[''] = Noneif with_status:status = [s for s in latest_eventsif s[''][''] == node['']]try:node[''] = node['']if status:node[''] = status[]except KeyError:if status:node[''] = status = status[]if status[''] > :node[''] = ''if status[''] > :node[''] = ''if status[''] > :node[''] = ''else:node[''] = ''if node[''] is not None:try:last_report = json_to_datetime(node[''])last_report = last_report.replace(tzinfo=None)unreported_border = now - timedelta(hours=unreported)if last_report < unreported_border:delta = (now - last_report)node[''] = Truenode[''] = ''.format(delta.days,int(delta.seconds / ),int((delta.seconds % ) / ))except AttributeError:node[''] = Trueif not node['']:node[''] = Trueyield Node(self,name=node[''],deactivated=node[''],expired=node[''],report_timestamp=node[''],catalog_timestamp=node[''],facts_timestamp=node[''],status_report=node[''],noop=node.get(''),noop_pending=node.get(''),events=node[''],unreported=node.get(''),unreported_time=node.get(''),report_environment=node[''],catalog_environment=node[''],facts_environment=node[''],latest_report_hash=node.get(''),cached_catalog_status=node.get(''))", "docstring": "Query for nodes by either name or query. If both aren't\n provided this will return a list of all nodes. This method\n also fetches the nodes status and event counts of the latest\n report from puppetdb.\n\n :param with_status: (optional) include the node status in the\\\n returned nodes\n :type with_status: :bool:\n :param unreported: (optional) amount of hours when a node gets\n marked as unreported\n :type unreported: :obj:`None` or integer\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function\n\n :returns: A generator yieling Nodes.\n :rtype: :class:`pypuppetdb.types.Node`", "id": "f4028:c0:m7"} {"signature": "def node(self, name):", "body": "nodes = self.nodes(path=name)return next(node for node in nodes)", "docstring": "Gets a single node from PuppetDB.\n\n :param name: The name of the node search.\n :type name: :obj:`string`\n\n :return: An instance of Node\n :rtype: :class:`pypuppetdb.types.Node`", "id": "f4028:c0:m8"} {"signature": "def edges(self, **kwargs):", "body": "edges = self._query('', **kwargs)for edge in edges:identifier_source = edge[''] +'' + edge[''] + ''identifier_target = edge[''] +'' + edge[''] + ''yield Edge(source=self.resources[identifier_source],target=self.resources[identifier_target],relationship=edge[''],node=edge[''])", "docstring": "Get the known catalog edges, formed between two resources.\n\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function.\n\n :returns: A generating yielding Edges.\n :rtype: :class:`pypuppetdb.types.Edge`", "id": "f4028:c0:m9"} {"signature": "def environments(self, **kwargs):", "body": "return self._query('', **kwargs)", "docstring": "Get all known environments from Puppetdb.\n\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function.\n\n :returns: A list of dictionaries containing the results.\n :rtype: :obj:`list` of :obj:`dict`", "id": "f4028:c0:m10"} {"signature": "def facts(self, name=None, value=None, **kwargs):", "body": "if name is not None and value is not None:path = ''.format(name, value)elif name is not None and value is None:path = nameelse:path = Nonefacts = self._query('', path=path, **kwargs)for fact in facts:yield Fact(node=fact[''],name=fact[''],value=fact[''],environment=fact[''])", "docstring": "Query for facts limited by either name, value and/or query.\n\n :param name: (Optional) Only return facts that match this name.\n :type name: :obj:`string`\n :param value: (Optional) Only return facts of `name` that\\\n match this value. Use of this parameter requires the `name`\\\n parameter be set.\n :type value: :obj:`string`\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function\n\n :returns: A generator yielding Facts.\n :rtype: :class:`pypuppetdb.types.Fact`", "id": "f4028:c0:m11"} {"signature": "def factsets(self, **kwargs):", "body": "return self._query('', **kwargs)", "docstring": "Returns a set of all facts or for a single certname.\n\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function.\n\n :returns: A list of dictionaries containg the results.\n :rtype: :obj:`list` of :obj:`dict`", "id": "f4028:c0:m12"} {"signature": "def fact_contents(self, **kwargs):", "body": "return self._query('', **kwargs)", "docstring": "To complement fact_paths(), this endpoint provides the capability\n to descend into structured facts and retreive the values associated\n with fact paths.\n\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function.\n\n :returns: A list of dictionaries containg the results.\n :rtype: :obj:`list` of :obj:`dict`", "id": "f4028:c0:m13"} {"signature": "def fact_paths(self, **kwargs):", "body": "return self._query('', **kwargs)", "docstring": "Fact Paths are intended to be a counter-part of the fact-names\n endpoint. It provides increased granularity around structured\n facts and may be used for building GUI autocompletions or other\n applications that require a basic top-level view of fact paths.\n\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function.\n\n :returns: A list of dictionaries containg the results.\n :rtype: :obj:`list` of :obj:`dict`", "id": "f4028:c0:m14"} {"signature": "def resources(self, type_=None, title=None, **kwargs):", "body": "path = Noneif type_ is not None:type_ = self._normalize_resource_type(type_)if title is not None:path = ''.format(type_, title)elif title is None:path = type_resources = self._query('', path=path, **kwargs)for resource in resources:yield Resource(node=resource[''],name=resource[''],type_=resource[''],tags=resource[''],exported=resource[''],sourcefile=resource[''],sourceline=resource[''],parameters=resource[''],environment=resource[''],)", "docstring": "Query for resources limited by either type and/or title or query.\n This will yield a Resources object for every returned resource.\n\n :param type_: (Optional) The resource type. This can be any resource\n type referenced in\\\n 'https://docs.puppetlabs.com/references/latest/type.html'\n :type type_: :obj:`string`\n :param title: (Optional) The name of the resource as declared as the\n 'namevar' in the Puppet Manifests. This parameter requires the\\\n `type_` parameter be set.\n :type title: :obj:`string`\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function\n\n :returns: A generator yielding Resources\n :rtype: :class:`pypuppetdb.types.Resource`", "id": "f4028:c0:m15"} {"signature": "def catalog(self, node):", "body": "catalogs = self.catalogs(path=node)return next(x for x in catalogs)", "docstring": "Get the available catalog for a given node.\n\n :param node: (Required) The name of the PuppetDB node.\n :type: :obj:`string`\n\n :returns: An instance of Catalog\n :rtype: :class:`pypuppetdb.types.Catalog`", "id": "f4028:c0:m16"} {"signature": "def catalogs(self, **kwargs):", "body": "catalogs = self._query('', **kwargs)if type(catalogs) == dict:catalogs = [catalogs, ]for catalog in catalogs:yield Catalog(node=catalog[''],edges=catalog[''][''],resources=catalog[''][''],version=catalog[''],transaction_uuid=catalog[''],environment=catalog[''],code_id=catalog.get(''),catalog_uuid=catalog.get(''))", "docstring": "Get the catalog information from the infrastructure based on path\n and/or query results. It is strongly recommended to include query\n and/or paging parameters for this endpoint to prevent large result\n sets or PuppetDB performance bottlenecks.\n\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function.\n\n :returns: A generator yielding Catalogs\n :rtype: :class:`pypuppetdb.types.Catalog`", "id": "f4028:c0:m17"} {"signature": "def events(self, **kwargs):", "body": "events = self._query('', **kwargs)for event in events:yield Event(node=event[''],status=event[''],timestamp=event[''],hash_=event[''],title=event[''],property_=event[''],message=event[''],new_value=event[''],old_value=event[''],type_=event[''],class_=event[''],execution_path=event[''],source_file=event[''],line_number=event[''],)", "docstring": "A report is made up of events which can be queried either\n individually or based on their associated report hash. It is strongly\n recommended to include query and/or paging parameters for this\n endpoint to prevent large result sets or PuppetDB performance\n bottlenecks.\n\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function\n\n :returns: A generator yielding Events\n :rtype: :class:`pypuppetdb.types.Event`", "id": "f4028:c0:m18"} {"signature": "def event_counts(self, summarize_by, **kwargs):", "body": "return self._query('',summarize_by=summarize_by,**kwargs)", "docstring": "Get event counts from puppetdb.\n\n :param summarize_by: (Required) The object type to be counted on.\n Valid values are 'containing_class', 'resource'\n and 'certname'.\n :type summarize_by: :obj:`string`\n :param count_by: (Optional) The object type that is counted when\n building the counts of 'successes', 'failures',\n 'noops' and 'skips'. Support values are 'certname'\n and 'resource' (default)\n :type count_by: :obj:`string`\n :param count_filter: (Optional) A JSON query that is applied to the\n event-counts output but before the results are\n aggregated. Supported operators are `=`, `>`,\n `<`, `>=`, and `<=`. Supported fields are\n `failures`, `successes`, `noops`, and `skips`.\n :type count_filter: :obj:`string`\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function.\n\n :returns: A list of dictionaries containing the results.\n :rtype: :obj:`list`", "id": "f4028:c0:m19"} {"signature": "def aggregate_event_counts(self, summarize_by, query=None,count_by=None, count_filter=None):", "body": "return self._query('',query=query, summarize_by=summarize_by,count_by=count_by, count_filter=count_filter)", "docstring": "Get event counts from puppetdb aggregated into a single map.\n\n :param summarize_by: (Required) The object type to be counted on.\n Valid values are 'containing_class', 'resource'\n and 'certname' or any comma-separated value\n thereof.\n :type summarize_by: :obj:`string`\n :param query: (Optional) The PuppetDB query to filter the results.\n This query is passed to the `events` endpoint.\n :type query: :obj:`string`\n :param count_by: (Optional) The object type that is counted when\n building the counts of 'successes', 'failures',\n 'noops' and 'skips'. Support values are 'certname'\n and 'resource' (default)\n :type count_by: :obj:`string`\n :param count_filter: (Optional) A JSON query that is applied to the\n event-counts output but before the results are\n aggregated. Supported operators are `=`, `>`,\n `<`, `>=`, and `<=`. Supported fields are\n `failures`, `successes`, `noops`, and `skips`.\n :type count_filter: :obj:`string`\n\n :returns: A dictionary of name/value results.\n :rtype: :obj:`dict`", "id": "f4028:c0:m20"} {"signature": "def server_time(self):", "body": "return self._query('')[self.parameters['']]", "docstring": "Get the current time of the clock on the PuppetDB server.\n :returns: An ISO-8091 formatting timestamp.\n :rtype: :obj:`string`", "id": "f4028:c0:m21"} {"signature": "def current_version(self):", "body": "return self._query('')['']", "docstring": "Get version information about the running PuppetDB server.\n\n :returns: A string representation of the PuppetDB version.\n :rtype: :obj:`string`", "id": "f4028:c0:m22"} {"signature": "def fact_names(self):", "body": "return self._query('')", "docstring": "Get a list of all known facts.", "id": "f4028:c0:m23"} {"signature": "def metric(self, metric=None):", "body": "return self._query('', path=metric)", "docstring": "Query for a specific metrc.\n\n :param metric: The name of the metric we want.\n :type metric: :obj:`string`\n\n :returns: The return of :meth:`~pypuppetdb.api.BaseAPI._query`.", "id": "f4028:c0:m24"} {"signature": "def reports(self, **kwargs):", "body": "reports = self._query('', **kwargs)for report in reports:yield Report(api=self,node=report[''],hash_=report[''],start=report[''],end=report[''],received=report[''],version=report[''],format_=report[''],agent_version=report[''],transaction=report[''],environment=report[''],status=report[''],noop=report.get(''),noop_pending=report.get(''),metrics=report[''][''],logs=report[''][''],code_id=report.get(''),catalog_uuid=report.get(''),cached_catalog_status=report.get(''))", "docstring": "Get reports for our infrastructure. It is strongly recommended\n to include query and/or paging parameters for this endpoint to\n prevent large result sets and potential PuppetDB performance\n bottlenecks.\n\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function\n\n :returns: A generating yielding Reports\n :rtype: :class:`pypuppetdb.types.Report`", "id": "f4028:c0:m25"} {"signature": "def inventory(self, **kwargs):", "body": "inventory = self._query('', **kwargs)for inv in inventory:yield Inventory(node=inv[''],time=inv[''],environment=inv[''],facts=inv[''],trusted=inv[''])", "docstring": "Get Node and Fact information with an alternative query syntax\n for structured facts instead of using the facts, fact-contents and\n factsets endpoints for many fact-related queries.\n\n :param \\*\\*kwargs: The rest of the keyword arguments are passed\n to the _query function.\n\n :returns: A generator yielding Inventory\n :rtype: :class:`pypuppetdb.types.Inventory`", "id": "f4028:c0:m26"} {"signature": "def status(self):", "body": "return self._query('')", "docstring": "Get PuppetDB server status.\n\n :returns: A dict with the PuppetDB status information\n :rtype: :obj:`dict`", "id": "f4028:c0:m27"} {"signature": "def http_get_provider(provider,request_url, params, token_secret, token_cookie = None):", "body": "if not validate_provider(provider):raise InvalidUsage('')klass = getattr(socialauth.providers, provider.capitalize())provider = klass(request_url, params, token_secret, token_cookie)if provider.status == :ret = dict(status = , redirect = provider.redirect)tc = getattr(provider, '', None)if tc is not None:ret[''] = tcreturn retif provider.status == and provider.user_id is not None:ret = dict(status = , provider_user_id = provider.user_id)if provider.user_name is not None:ret[''] = provider.user_namereturn retraise InvalidUsage('')", "docstring": "Handle HTTP GET requests on an authentication endpoint.\n\n Authentication flow begins when ``params`` has a ``login`` key with a value\n of ``start``. For instance, ``/auth/twitter?login=start``.\n\n :param str provider: An provider to obtain a user ID from.\n :param str request_url: The authentication endpoint/callback.\n :param dict params: GET parameters from the query string.\n :param str token_secret: An app secret to encode/decode JSON web tokens.\n :param str token_cookie: The current JSON web token, if available.\n :return: A dict containing any of the following possible keys:\n\n ``status``: an HTTP status code the server should sent\n\n ``redirect``: where the client should be directed to continue the flow\n\n ``set_token_cookie``: contains a JSON web token and should be stored by\n the client and passed in the next call.\n\n ``provider_user_id``: the user ID from the login provider\n\n ``provider_user_name``: the user name from the login provider", "id": "f4032:m1"} {"signature": "def are_arrays_identical(arr1, arr2):", "body": "orig1 = arr1[]orig2 = arr2[]arr1[] = if arr2[] != orig2:arr1[] = orig1return Truearr1[] = if arr2[] != orig2:arr1[] = orig1return Truearr1[] = orig1return False", "docstring": "Check if two 1-dimensional array point to the same buffer.\n\n The check is performed only on the first value of the arrays. For\n this test to be reliable, arr2 must not point to a subset of arr1.\n For example, if arr2 = arr1[1:] has been executed just before calling\n this function, the test will FAIL, even if the same buffer is used by\n both arrays. arr2 = arr1[:1] will succeed though.\n\n dtypes are not supposed to be identical.", "id": "f4044:m0"} {"signature": "def get_requirements(*args):", "body": "install_deps = []try:for fpath in args:install_deps.extend([str(d.req or d.url) for d in parse_requirements(fpath)])except:print(''.format(fpath))return [dep for dep in install_deps if dep != '']", "docstring": "Parse all requirements files given and return a list of the dependencies", "id": "f4053:m0"} {"signature": "def recursive_glob(base_directory, regex=None):", "body": "if regex is None:regex = ''files = glob(os.path.join(base_directory, regex))for path, dirlist, filelist in os.walk(base_directory):for ignored in IGNORE:try:dirlist.remove(ignored)except:passfor dir_name in dirlist:files.extend(glob(os.path.join(path, dir_name, regex)))return files", "docstring": "Uses glob to find all files that match the regex in base_directory.\n\n @param base_directory: string\n\n @param regex: string\n\n @return: set", "id": "f4053:m1"} {"signature": "def _openpyxl_read_xl(xl_path: str):", "body": "try:wb = load_workbook(filename=xl_path, read_only=True)except:raiseelse:return wb", "docstring": "Use openpyxl to read an Excel file.", "id": "f4056:m0"} {"signature": "def _xlrd_read_xl(xl_path: str):", "body": "try:wb = xlrd.open_workbook(xl_path)except:raiseelse:return wb", "docstring": "Use xlrd to get the list of sheet names from `xl_path`.", "id": "f4056:m1"} {"signature": "def _check_xl_path(xl_path: str):", "body": "xl_path = op.abspath(op.expanduser(xl_path))if not op.isfile(xl_path):raise IOError(\"\".format(xl_path))return xl_path, _use_openpyxl_or_xlrf(xl_path)", "docstring": "Return the expanded absolute path of `xl_path` if\n if exists and 'xlrd' or 'openpyxl' depending on\n which module should be used for the Excel file in `xl_path`.\n\n Parameters\n ----------\n xl_path: str\n Path to an Excel file\n\n Returns\n -------\n xl_path: str\n User expanded and absolute path to `xl_path`\n\n module: str\n The name of the module you should use to process the\n Excel file.\n Choices: 'xlrd', 'pyopenxl'\n\n Raises\n ------\n IOError\n If the file does not exist\n\n RuntimError\n If a suitable reader for xl_path is not found", "id": "f4056:m3"} {"signature": "def read_xl(xl_path: str):", "body": "xl_path, choice = _check_xl_path(xl_path)reader = XL_READERS[choice]return reader(xl_path)", "docstring": "Return the workbook from the Excel file in `xl_path`.", "id": "f4056:m4"} {"signature": "def get_sheet_list(xl_path: str) -> List:", "body": "wb = read_xl(xl_path)if hasattr(wb, ''):return wb.sheetnameselse:return wb.sheet_names()", "docstring": "Return a list with the name of the sheets in\n the Excel file in `xl_path`.", "id": "f4056:m5"} {"signature": "def concat_sheets(xl_path: str, sheetnames=None, add_tab_names=False):", "body": "xl_path, choice = _check_xl_path(xl_path)if sheetnames is None:sheetnames = get_sheet_list(xl_path)sheets = pd.read_excel(xl_path, sheetname=sheetnames)if add_tab_names:for tab in sheets:sheets[tab][''] = [tab] * len(sheets[tab])return pd.concat([sheets[tab] for tab in sheets])", "docstring": "Return a pandas DataFrame with the concat'ed\n content of the `sheetnames` from the Excel file in\n `xl_path`.\n\n Parameters\n ----------\n xl_path: str\n Path to the Excel file\n\n sheetnames: list of str\n List of existing sheet names of `xl_path`.\n If None, will use all sheets from `xl_path`.\n\n add_tab_names: bool\n If True will add a 'Tab' column which says from which\n tab the row comes from.\n\n Returns\n -------\n df: pandas.DataFrame", "id": "f4056:m6"} {"signature": "def _check_cols(df, col_names):", "body": "for col in col_names:if not hasattr(df, col):raise AttributeError(\"\".format(col,df.columns))", "docstring": "Raise an AttributeError if `df` does not have a column named as an item of\n the list of strings `col_names`.", "id": "f4056:m7"} {"signature": "def col_values(df, col_name):", "body": "_check_cols(df, [col_name])if '' in df[col_name] or pd.np.issubdtype(df[col_name].dtype, str): return [nom.lower() for nom in df[pd.notnull(df)][col_name] if not pd.isnull(nom)]else:return [nom for nom in df[pd.notnull(df)][col_name] if not pd.isnull(nom)]", "docstring": "Return a list of not null values from the `col_name` column of `df`.", "id": "f4056:m8"} {"signature": "def duplicated_rows(df, col_name):", "body": "_check_cols(df, [col_name])dups = df[pd.notnull(df[col_name]) & df.duplicated(subset=[col_name])]return dups", "docstring": "Return a DataFrame with the duplicated values of the column `col_name`\n in `df`.", "id": "f4056:m9"} {"signature": "def duplicated(values: Sequence):", "body": "vals = pd.Series(values)return vals[vals.duplicated()]", "docstring": "Return the duplicated items in `values`", "id": "f4056:m10"} {"signature": "def check_file_exists(filepath):", "body": "if not op.exists(filepath):raise IOError(''.format(filepath))", "docstring": "Check if the file path exists, if not raise IOError", "id": "f4057:m0"} {"signature": "def dir_search(regex, wd=os.curdir):", "body": "ls = os.listdir(wd)filt = re.compile(regex).searchreturn filter_list(ls, filt)", "docstring": "@param regex: string\n@param wd: string\n working directory\n@return:", "id": "f4057:m1"} {"signature": "def dir_match(regex, wd=os.curdir):", "body": "ls = os.listdir(wd)filt = re.compile(regex).matchreturn filter_list(ls, filt)", "docstring": "Create a list of regex matches that result from the match_regex\n of all file names within wd.\n The list of files will have wd as path prefix.\n\n @param regex: string\n @param wd: string\n working directory\n @return:", "id": "f4057:m2"} {"signature": "def recursive_dir_match(folder_path, regex=''):", "body": "outlist = []for root, dirs, files in os.walk(folder_path):outlist.extend([op.join(root, f) for f in dirsif re.match(regex, f)])return outlist", "docstring": "Returns absolute paths of folders that match the regex within folder_path and\nall its children folders.\n\nNote: The regex matching is done using the match function\nof the re module.\n\nParameters\n----------\nfolder_path: string\n\nregex: string\n\nReturns\n-------\nA list of strings.", "id": "f4057:m3"} {"signature": "def get_file_list(file_dir, regex=''):", "body": "file_list = os.listdir(file_dir)file_list.sort()if regex:file_list = search_list(file_list, regex)file_list = [op.join(file_dir, fname) for fname in file_list]return file_list", "docstring": "Creates a list of files that match the search_regex within file_dir.\nThe list of files will have file_dir as path prefix.\n\nParameters\n----------\n@param file_dir:\n\n@param search_regex:\n\nReturns:\n--------\nList of paths to files that match the search_regex", "id": "f4057:m4"} {"signature": "def recursive_find(folder_path, regex=''):", "body": "return recursive_find_search(folder_path, regex)", "docstring": "Returns absolute paths of files that match the regex within file_dir and\nall its children folders.\n\nNote: The regex matching is done using the search function\nof the re module.\n\nParameters\n----------\nfolder_path: string\n\nregex: string\n\nReturns\n-------\nA list of strings.", "id": "f4057:m5"} {"signature": "def recursive_find_match(folder_path, regex=''):", "body": "outlist = []for root, dirs, files in os.walk(folder_path):outlist.extend([op.join(root, f) for f in filesif re.match(regex, f)])return outlist", "docstring": "Returns absolute paths of files that match the regex within folder_path and\nall its children folders.\n\nNote: The regex matching is done using the match function\nof the re module.\n\nParameters\n----------\nfolder_path: string\n\nregex: string\n\nReturns\n-------\nA list of strings.", "id": "f4057:m6"} {"signature": "def recursive_find_search(folder_path, regex=''):", "body": "outlist = []for root, dirs, files in os.walk(folder_path):outlist.extend([op.join(root, f) for f in filesif re.search(regex, f)])return outlist", "docstring": "Returns absolute paths of files that match the regex within file_dir and\nall its children folders.\n\nNote: The regex matching is done using the search function\nof the re module.\n\nParameters\n----------\nfolder_path: string\n\nregex: string\n\nReturns\n-------\nA list of strings.", "id": "f4057:m7"} {"signature": "def iter_recursive_find(folder_path, *regex):", "body": "for root, dirs, files in os.walk(folder_path):if len(files) > :outlist = []for f in files:for reg in regex:if re.search(reg, f):outlist.append(op.join(root, f))if len(outlist) == len(regex):yield outlist", "docstring": "Returns absolute paths of files that match the regexs within folder_path and\nall its children folders.\n\nThis is an iterator function that will use yield to return each set of\nfile_paths in one iteration.\n\nWill only return value if all the strings in regex match a file name.\n\nNote: The regex matching is done using the search function\nof the re module.\n\nParameters\n----------\nfolder_path: string\n\nregex: strings\n\nReturns\n-------\nA list of strings.", "id": "f4057:m8"} {"signature": "def get_all_files(folder):", "body": "for path, dirlist, filelist in os.walk(folder):for fn in filelist:yield op.join(path, fn)", "docstring": "Generator that loops through all absolute paths of the files within folder\n\nParameters\n----------\nfolder: str\nRoot folder start point for recursive search.\n\nYields\n------\nfpath: str\nAbsolute path of one file in the folders", "id": "f4057:m9"} {"signature": "def find_match(base_directory, regex=''):", "body": "return glob(op.join(base_directory, regex))", "docstring": "Uses glob to find all files that match the regex\nin base_directory.\n\n@param base_directory: string\n\n@param regex: string\n\n@return: set", "id": "f4057:m10"} {"signature": "def recursive_glob(base_directory, regex=''):", "body": "files = glob(op.join(base_directory, regex))for path, dirlist, filelist in os.walk(base_directory):for dir_name in dirlist:files.extend(glob(op.join(path, dir_name, regex)))return files", "docstring": "Uses glob to find all files or folders that match the regex\nstarting from the base_directory.\n\nParameters\n----------\nbase_directory: str\n\nregex: str\n\nReturns\n-------\nfiles: list", "id": "f4057:m11"} {"signature": "def get_last_file(input_dir, glob_pattern='', key=op.getctime, reverse=True):", "body": "files = glob(op.join(input_dir, glob_pattern))files.sort(key=key, reverse=reverse)return files[]", "docstring": "Return the path to the latest file in `input_dir`.\n The `key` argument defines which information to use for sorting\n the list of files, could be:\n - creation date: os.path.getctime,\n - modification date: os.path.getmtime,\n etc.\n\n Parameters\n ----------\n input_dir: str\n Path to the folder where to perform the `glob`.\n\n glob_pattern: str\n `glob` Pattern to filter the files in `input_dir`.\n\n key: str\n Sorting key function\n\n reverse: bool\n Set to True if you want the sorting to be in decreasing order,\n False otherwise.\n\n Returns\n -------\n latest_filepath: str\n Path to the latest modified file in `input_dir`.", "id": "f4057:m14"} {"signature": "def get_last_modified_file(input_dir, glob_pattern=''):", "body": "return get_last_file(input_dir, glob_pattern, key=op.getmtime)", "docstring": "Return the path to the last modified (using `os.path.getmtime`) file in `input_dir`.\n See `get_last_file` docstring for description of the parameters.", "id": "f4057:m15"} {"signature": "def get_last_created_file(input_dir, glob_pattern=''):", "body": "return get_last_file(input_dir, glob_pattern, key=op.getctime)", "docstring": "Return the path to the last created file in `input_dir`.\n See `get_last_file` docstring for description of the parameters.", "id": "f4057:m16"} {"signature": "def filter_list(lst, pattern):", "body": "if is_fnmatch_regex(pattern) and not is_regex(pattern):log.info(''.format(pattern))filst = fnmatch.filter(lst, pattern)else:log.info(''.format(pattern))filst = match_list(lst, pattern)if filst:filst.sort()return filst", "docstring": "Filters the lst using pattern.\nIf pattern starts with '(' it will be considered a re regular expression,\notherwise it will use fnmatch filter.\n\n:param lst: list of strings\n\n:param pattern: string\n\n:return: list of strings\nFiltered list of strings", "id": "f4058:m0"} {"signature": "def remove_hidden_files(file_lst):", "body": "return [fnom for fnom in file_lst if not fnom.startswith('')]", "docstring": "Removes the filenames that start with '.'\n\n:param file_lst: list of strings\n\n:return: list of strings", "id": "f4058:m1"} {"signature": "def get_subdict(adict, path, sep=os.sep):", "body": "return reduce(adict.__class__.get, [p for p in op.split(sep) if p], adict)", "docstring": "Given a nested dictionary adict.\nThis returns its childen just below the path.\nThe path is a string composed of adict keys separated by sep.\n\n:param adict: nested dict\n\n:param path: str\n\n:param sep: str\n\n:return: dict or list or leaf of treemap", "id": "f4058:m2"} {"signature": "def get_dict_leaves(data):", "body": "result = []if isinstance(data, dict):for item in data.values():result.extend(get_dict_leaves(item))elif isinstance(data, list):result.extend(data)else:result.append(data)return result", "docstring": "Given a nested dictionary, this returns all its leave elements in a list.\n\n:param adict:\n\n:return: list", "id": "f4058:m4"} {"signature": "def get_possible_paths(base_path, path_regex):", "body": "if not path_regex:return []if len(path_regex) < :return []if path_regex[] == os.sep:path_regex = path_regex[:]rest_files = ''if os.sep in path_regex:node_names = path_regex.partition(os.sep)first_node = node_names[]rest_nodes = node_names[]folder_names = filter_list(os.listdir(base_path), first_node)for nom in folder_names:new_base = op.join(base_path, nom)if op.isdir(new_base):rest_files = get_possible_paths(new_base, rest_nodes)else:rest_files = filter_list(os.listdir(base_path), path_regex)files = []if rest_files:files = [op.join(base_path, f) for f in rest_files]return files", "docstring": "Looks for path_regex within base_path. Each match is append\nin the returned list.\npath_regex may contain subfolder structure.\nIf any part of the folder structure is a\n\n:param base_path: str\n\n:param path_regex: str\n\n:return list of strings", "id": "f4058:m5"} {"signature": "def process_tuple_node(basepath, treemap, ignore_hidden=True):", "body": "if not isinstance(treemap, tuple):raise FileTreeMapError(log, '')if len(treemap) != :raise FileTreeMapError(log, '')file_nodes = OrderedDict()file_lst = os.listdir(basepath)if ignore_hidden:file_lst = remove_hidden_files(file_lst)children_names = filter_list(file_lst, treemap[])child_map = treemap[]if len(children_names) == :file_nodes.update(populate_subtree(op.join(basepath, children_names[]),child_map))else:for cname in children_names:child_basepath = op.join(basepath, cname)if op.isdir(child_basepath):subtrs = populate_subtree(child_basepath,child_map)else:subtrs = child_basepathif subtrs:file_nodes[cname] = subtrsreturn file_nodes", "docstring": ":param basepath:\n\n:param treemap: 2-tuple\n\n:param rootkey:\n\n:return:", "id": "f4058:m6"} {"signature": "def populate_subtree(basepath, treemap, verbose=False):", "body": "file_nodes = OrderedDict()if isinstance(treemap, tuple):try:file_nodes = process_tuple_node(basepath, treemap)except:raise FileTreeMapError(''''.format(basepath, treemap))if isinstance(treemap, list):for node in treemap:try:file_nodes.update(process_tuple_node(basepath, node))except:raise FileTreeMapError(''''.format(basepath, node))elif isinstance(treemap, dict):for k in treemap.keys():cname = kchild_map = treemap[k]if isinstance(child_map, tuple) or isinstance(child_map, dict):try:file_nodes[cname] = populate_subtree(basepath, child_map)except:raise FileTreeMapError(''''.format(basepath,child_map))elif isinstance(child_map, str):if child_map[] == os.sep:raise FileTreeMapError(''''''.format(str(child_map),os.sep))subpaths = get_possible_paths(basepath, child_map)if subpaths:file_nodes[cname] = subpathsif verbose:log.info(''.format(basepath, file_nodes.keys()))return file_nodes", "docstring": ":param path: str\n\n:param treemap: dict\n\n:return: dict", "id": "f4058:m7"} {"signature": "def __init__(self):", "body": "self._filetree = {}self._treemap = {}self._basepath = ''self._ignore_regexes = []", "docstring": ":return:", "id": "f4058:c1:m0"} {"signature": "def __str__(self):", "body": "return ''.format(self._basepath,self._filetree)", "docstring": ":return:", "id": "f4058:c1:m1"} {"signature": "def from_config_file(self, config_file, verbose=False):", "body": "assert(op.isfile(config_file))self.__init__()try:self._basepath, self._treemap = self._import_config(config_file)self.update(verbose)except Exception as exc:raise EnvironmentError(''.format(config_file)) from exc", "docstring": ":param config_file: str\n Path to a configuration file.\n This file must declare a root_path and a filetree regex tree.\n\n:param verbose: bool", "id": "f4058:c1:m2"} {"signature": "def from_dict(self, root_path, filetree, verbose=False):", "body": "self.__init__()self._basepath = root_pathself._treemap = filetreeself.update(verbose)", "docstring": ":param root_path: string\n:param filetree: dict", "id": "f4058:c1:m3"} {"signature": "@staticmethoddef create_folder(dirpath, overwrite=False):", "body": "if not overwrite:while op.exists(dirpath):dirpath += ''os.makedirs(dirpath, exist_ok=overwrite)return dirpath", "docstring": "Will create dirpath folder. If dirpath already exists and overwrite is False,\n will append a '+' suffix to dirpath until dirpath does not exist.", "id": "f4058:c1:m6"} {"signature": "@staticmethoddef _import_config(filepath):", "body": "if not op.isfile(filepath):raise IOError(''''.format(filepath))cfg = import_pyfile(filepath)if not hasattr(cfg, ''):raise KeyError('')if not hasattr(cfg, ''):raise KeyError('')return cfg.root_path, cfg.filetree", "docstring": "Imports filetree and root_path variable values from the filepath.\n\n:param filepath:\n:return: root_path and filetree", "id": "f4058:c1:m7"} {"signature": "def get_root_nodes(self):", "body": "return self._filetree.keys()", "docstring": "Return a list of the names of the root nodes.", "id": "f4058:c1:m8"} {"signature": "def get_node_filepaths(self, nodepath):", "body": "files = self.get_node(nodepath)return get_dict_leaves(files)", "docstring": "Returns all leaves in filetree.", "id": "f4058:c1:m10"} {"signature": "def get_common_filepath(self, nodepath):", "body": "return commonprefix(self.get_node_filepaths(nodepath))", "docstring": "Returns the common filepath between all leaves in the filetree.", "id": "f4058:c1:m11"} {"signature": "def remove_nodes(self, pattern, adict):", "body": "mydict = self._filetree if adict is None else adictif isinstance(mydict, dict):for nom in mydict.keys():if isinstance(mydict[nom], dict):matchs = filter_list(mydict[nom], pattern)for nom in matchs:mydict = self.remove_nodes(pattern, mydict[nom])mydict.pop(nom)else:mydict[nom] = filter_list(mydict[nom], pattern)else:matchs = set(filter_list(mydict, pattern))mydict = set(mydict) - matchsreturn mydict", "docstring": "Remove the nodes that match the pattern.", "id": "f4058:c1:m12"} {"signature": "def count_node_match(self, pattern, adict=None):", "body": "mydict = self._filetree if adict is None else adictk = if isinstance(mydict, dict):names = mydict.keys()k += len(filter_list(names, pattern))for nom in names:k += self.count_node_match(pattern, mydict[nom])else:k = len(filter_list(mydict, pattern))return k", "docstring": "Return the number of nodes that match the pattern.\n\n:param pattern:\n\n:param adict:\n:return: int", "id": "f4058:c1:m13"} {"signature": "def __init__(self, config_map):", "body": "for key in config_map:if config_map[key] == '':config_map[key] = Nonesetattr(self, key, config_map[key])", "docstring": ":param config_map: dict", "id": "f4059:c0:m0"} {"signature": "def copy_w_ext(srcfile, destdir, basename):", "body": "ext = get_extension(op.basename(srcfile))dstpath = op.join(destdir, basename + ext)return copy_w_plus(srcfile, dstpath)", "docstring": "Copy `srcfile` in `destdir` with name `basename + get_extension(srcfile)`.\n Add pluses to the destination path basename if a file with the same name already\n exists in `destdir`.\n\n Parameters\n ----------\n srcfile: str\n\n destdir: str\n\n basename:str\n\n Returns\n -------\n dstpath: str", "id": "f4060:m0"} {"signature": "def copy_w_plus(src, dst):", "body": "dst_ext = get_extension(dst)dst_pre = remove_ext (dst)while op.exists(dst_pre + dst_ext):dst_pre += ''shutil.copy(src, dst_pre + dst_ext)return dst_pre + dst_ext", "docstring": "Copy file from `src` path to `dst` path. If `dst` already exists, will add '+' characters\n to the end of the basename without extension.\n\n Parameters\n ----------\n src: str\n\n dst: str\n\n Returns\n -------\n dstpath: str", "id": "f4060:m1"} {"signature": "def get_abspath(folderpath):", "body": "if not op.exists(folderpath):raise FolderNotFound(folderpath)return op.abspath(folderpath)", "docstring": "Returns the absolute path of folderpath.\n If the path does not exist, will raise IOError.", "id": "f4061:m0"} {"signature": "def make_dir(dirpath):", "body": "if not op.exists(dirpath):os.mkdir(dirpath)", "docstring": "Call os.mkdir only if `dirpath` does not exist.", "id": "f4061:m2"} {"signature": "def get_extension(filepath, check_if_exists=False, allowed_exts=ALLOWED_EXTS):", "body": "if check_if_exists:if not op.exists(filepath):raise IOError('' + filepath)rest, ext = op.splitext(filepath)if ext in allowed_exts:alloweds = allowed_exts[ext]_, ext2 = op.splitext(rest)if ext2 in alloweds:ext = ext2 + extreturn ext", "docstring": "Return the extension of fpath.\n\n Parameters\n ----------\n fpath: string\n File name or path\n\n check_if_exists: bool\n\n allowed_exts: dict\n Dictionary of strings, where the key if the last part of a complex ('.' separated) extension\n and the value is the previous part.\n For example: for the '.nii.gz' extension I would have a dict as {'.gz': ['.nii',]}\n\n Returns\n -------\n str\n The extension of the file name or path", "id": "f4061:m3"} {"signature": "def add_extension_if_needed(filepath, ext, check_if_exists=False):", "body": "if not filepath.endswith(ext):filepath += extif check_if_exists:if not op.exists(filepath):raise IOError('' + filepath)return filepath", "docstring": "Add the extension ext to fpath if it doesn't have it.\n\n Parameters\n ----------\n filepath: str\n File name or path\n\n ext: str\n File extension\n\n check_if_exists: bool\n\n Returns\n -------\n File name or path with extension added, if needed.", "id": "f4061:m4"} {"signature": "def remove_ext(filepath):", "body": "return filepath[:filepath.rindex(get_extension(filepath))]", "docstring": "Removes the extension of the file.\n\n Parameters\n ----------\n filepath: str\n File path or name\n\n Returns\n -------\n str\n File path or name without extension", "id": "f4061:m5"} {"signature": "def write_lines(filepath, lines):", "body": "with open(filepath, '') as f:f.writelines(lines)", "docstring": "Write the given lines to the file in filepath\n\n Parameters\n ----------\n filepath: str\n\n lines: list of str", "id": "f4061:m6"} {"signature": "def grep_one(srch_str, filepath):", "body": "for line in open(filepath):if srch_str in line:return linereturn None", "docstring": "Return the first line in file defined by filepath\n that contains srch_str\n\n Parameters\n ----------\n srch_str: str\n\n filepath: str\n\n Returns\n ----------\n str", "id": "f4061:m7"} {"signature": "def parse_subjects_list(filepath, datadir='', split='', labelsf=None):", "body": "labels = []subjs = []if datadir:datadir += op.sepwith open(filepath, '') as f:for s in f:line = s.strip().split(split)if len(line) == :labels.append(np.float(line[]))subjf = line[].strip()else:subjf = line.strip()if not op.isabs(subjf):subjs.append(datadir + subjf)else:subjs.append(subjf)if labelsf is not None:labels = np.loadtxt(labelsf)return [labels, subjs]", "docstring": "Parses a file with a list of: :.\n\n Parameters\n ----------\n filepath: str\n Path to file with a list of: :.\n Where ':' can be any split character\n\n datadir: str\n String to be path prefix of each line of the fname content,\n only in case the lines are relative file paths.\n\n split: str\n Split character for each line\n\n labelsf: str\n Path to file with a list of the labels if it is not included in\n fname. It will overwrite the labels from fname.\n\n Returns\n -------\n [labels, subjs] where labels is a list of labels and subjs a list of\n filepaths", "id": "f4061:m8"} {"signature": "def create_subjects_file(filelist, labels, output_file, split=''):", "body": "if len(filelist) != len(labels):raise ValueError(''''.format(len(filelist), len(labels)))lines = []for i, subj in enumerate(filelist):lab = labels[i]line = subj + split + str(lab)lines.append(line)lines = np.array(lines)np.savetxt(output_file, lines, fmt='')", "docstring": "Creates a file where each line is :.\n\n Parameters\n ----------\n filelist: list of str\n List of filepaths\n\n labels: list of int, str or labels that can be transformed with str()\n List of labels\n\n output_file: str\n Output file path\n\n split: str\n Split character for each line", "id": "f4061:m9"} {"signature": "def join_path_to_filelist(path, filelist):", "body": "return [op.join(path, str(item)) for item in filelist]", "docstring": "Joins path to each line in filelist\n\n Parameters\n ----------\n path: str\n\n filelist: list of str\n\n Returns\n -------\n list of filepaths", "id": "f4061:m10"} {"signature": "def remove_all(filelist, folder=''):", "body": "if not folder:for f in filelist:os.remove(f)else:for f in filelist:os.remove(op.join(folder, f))", "docstring": "Deletes all files in filelist\n\n Parameters\n ----------\n filelist: list of str\n List of the file paths to be removed\n\n folder: str\n Path to be used as common directory for all file paths in filelist", "id": "f4061:m11"} {"signature": "def get_folder_subpath(path, folder_depth):", "body": "if path[] == op.sep:folder_depth += return op.sep.join(path.split(op.sep)[:folder_depth])", "docstring": "Returns a folder path of path with depth given by folder_dept:\n\nParameters\n----------\npath: str\n\nfolder_depth: int > 0\n\nReturns\n-------\nA folder path\n\nExample\n-------\n>>> get_folder_subpath('/home/user/mydoc/work/notes.txt', 3)\n>>> '/home/user/mydoc'", "id": "f4061:m12"} {"signature": "def get_temp_file(dirpath=None, suffix=''):", "body": "return tempfile.NamedTemporaryFile(dir=dirpath, suffix=suffix)", "docstring": "Uses tempfile to create a NamedTemporaryFile using\nthe default arguments.\n\nParameters\n----------\ndirpath: str\nDirectory where it must be created.\nIf dir is specified, the file will be created\nin that directory, otherwise, a default directory is used.\nThe default directory is chosen from a platform-dependent\nlist, but the user of the application can control the\ndirectory location by setting the TMPDIR, TEMP or TMP\nenvironment variables.\n\nsuffix: str\nFile name suffix.\nIt does not put a dot between the file name and the\nsuffix; if you need one, put it at the beginning of suffix.\n\nReturns\n-------\nfile object\n\nNote\n----\nPlease, close it once you have used the file.", "id": "f4061:m13"} {"signature": "def get_temp_dir(prefix=None, basepath=None):", "body": "if basepath is None:return tempfile.TemporaryDirectory(dir=basepath)else:return tempfile.TemporaryDirectory(prefix=prefix, dir=basepath)", "docstring": "Uses tempfile to create a TemporaryDirectory using\nthe default arguments.\nThe folder is created using tempfile.mkdtemp() function.\n\nParameters\n----------\nprefix: str\nName prefix for the temporary folder.\n\nbasepath: str\nDirectory where the new folder must be created.\nThe default directory is chosen from a platform-dependent\nlist, but the user of the application can control the\ndirectory location by setting the TMPDIR, TEMP or TMP\nenvironment variables.\n\nReturns\n-------\nfolder object", "id": "f4061:m14"} {"signature": "def ux_file_len(filepath):", "body": "p = subprocess.Popen(['', '', filepath], stdout=subprocess.PIPE,stderr=subprocess.PIPE)result, err = p.communicate()if p.returncode != :raise IOError(err)l = result.strip()l = int(l.split()[])return l", "docstring": "Returns the length of the file using the 'wc' GNU command\n\n Parameters\n ----------\n filepath: str\n\n Returns\n -------\n float", "id": "f4061:m15"} {"signature": "def count_lines(filepath):", "body": "statinfo = os.stat(filepath)return statinfo.st_size", "docstring": "Return the number of lines in file in filepath\n\n Parameters\n ----------\n filepath: str\n\n Returns\n -------\n int", "id": "f4061:m16"} {"signature": "def file_size(filepath):", "body": "return op.getsize(filepath)", "docstring": "Returns the length of the file\n\n Parameters\n ----------\n filepath: str\n\n Returns\n -------\n float", "id": "f4061:m17"} {"signature": "def fileobj_size(file_obj):", "body": "file_obj.seek(, os.SEEK_END)return file_obj.tell()", "docstring": "Returns the length of the size of the file\n\n Parameters\n ----------\n file_obj: file-like object\n\n Returns\n -------\n float", "id": "f4061:m18"} {"signature": "def dictify(a_named_tuple):", "body": "return dict((s, getattr(a_named_tuple, s)) for s in a_named_tuple._fields)", "docstring": "Transform a named tuple into a dictionary", "id": "f4062:m0"} {"signature": "def merge_dict_of_lists(adict, indices, pop_later=True, copy=True):", "body": "def check_indices(idxs, x):for i in chain(*idxs):if i < or i >= x:raise IndexError(\"\")check_indices(indices, len(adict))rdict = adict.copy() if copy else adictdict_keys = list(rdict.keys())for i, j in zip(*indices):rdict[dict_keys[i]].extend(rdict[dict_keys[j]])if pop_later:for i, j in zip(*indices):rdict.pop(dict_keys[j], '')return rdict", "docstring": "Extend the within a dict of lists. The indices will indicate which\n list have to be extended by which other list.\n\n Parameters\n ----------\n adict: OrderedDict\n An ordered dictionary of lists\n\n indices: list or tuple of 2 iterables of int, bot having the same length\n The indices of the lists that have to be merged, both iterables items\n will be read pair by pair, the first is the index to the list that\n will be extended with the list of the second index.\n The indices can be constructed with Numpy e.g.,\n indices = np.where(square_matrix)\n\n pop_later: bool\n If True will oop out the lists that are indicated in the second\n list of indices.\n\n copy: bool\n If True will perform a deep copy of the input adict before\n modifying it, hence not changing the original input.\n\n Returns\n -------\n Dictionary of lists\n\n Raises\n ------\n IndexError\n If the indices are out of range", "id": "f4062:m1"} {"signature": "def append_dict_values(list_of_dicts, keys=None):", "body": "if keys is None:keys = list(list_of_dicts[].keys())dict_of_lists = DefaultOrderedDict(list)for d in list_of_dicts:for k in keys:dict_of_lists[k].append(d[k])return dict_of_lists", "docstring": "Return a dict of lists from a list of dicts with the same keys.\nFor each dict in list_of_dicts with look for the values of the\ngiven keys and append it to the output dict.\n\nParameters\n----------\nlist_of_dicts: list of dicts\n\nkeys: list of str\n List of keys to create in the output dict\n If None will use all keys in the first element of list_of_dicts\nReturns\n-------\nDefaultOrderedDict of lists", "id": "f4062:m2"} {"signature": "def __init__(self, dir=None, filepath=None, hdf_basepath='',overwrite_if_exist=False, remove_on_destroy=False):", "body": "if dir is None:dir = tempfile.gettempdir()if filepath is None:self._fname = self.get_temp_file(dir)else:self._fname = filepathself._fname = self.get_temp_file(dir).nameself._remove_on_destroy = remove_on_destroyself._overwrite = overwrite_if_existself._hdf_basepath = hdf_basepathself._hdf_file = Noneself._group = Noneself._datasets = {}self.create_hdf_file()", "docstring": ":param dir: string\n\n:param filepath: string\n\n:param hdf_basepath: string\n\n:param overwrite_if_exist: bool\n\n:param remove_on_destroy: bool", "id": "f4063:c0:m0"} {"signature": "@staticmethoddef get_temp_file(dir=None, suffix=''):", "body": "return tempfile.NamedTemporaryFile(dir=dir, suffix=suffix)", "docstring": "Uses tempfile to create a NamedTemporaryFile using\nthe default arguments.\n\n@param dir: string\nDirectory where it must be created.\nIf dir is specified, the file will be created\nin that directory, otherwise, a default directory is used.\nThe default directory is chosen from a platform-dependent\nlist, but the user of the application can control the\ndirectory location by setting the TMPDIR, TEMP or TMP\nenvironment variables.\n\n@param suffix: string\nFile name suffix.\nIt does not put a dot between the file name and the\nsuffix; if you need one, put it at the beginning of suffix.\n\n@return: file object\n\n@note:\nClose it once you have used the file.", "id": "f4063:c0:m1"} {"signature": "def __del__(self):", "body": "self._hdf_file.close()if self._remove_on_destroy:os.remove(self._fname)", "docstring": "Class destructor", "id": "f4063:c0:m2"} {"signature": "def create_hdf_file(self):", "body": "mode = ''if not self._overwrite and os.path.exists(self._fname):mode = ''self._hdf_file = h5py.File(self._fname, mode)if self._hdf_basepath == '':self._group = self._hdf_file['']else:self._group = self._hdf_file.create_group(self._hdf_basepath)", "docstring": ":return: h5py DataSet", "id": "f4063:c0:m3"} {"signature": "def get_dataset(self, ds_name, mode=''):", "body": "if ds_name in self._datasets:return self._datasets[ds_name]else:return self.create_empty_dataset(ds_name)", "docstring": "Returns a h5py dataset given its registered name.\n\n:param ds_name: string\nName of the dataset to be returned.\n\n:return:", "id": "f4063:c0:m4"} {"signature": "def create_empty_dataset(self, ds_name, dtype=np.float32):", "body": "if ds_name in self._datasets:return self._datasets[ds_name]ds = self._group.create_dataset(ds_name, (, ), maxshape=None,dtype=dtype)self._datasets[ds_name] = dsreturn ds", "docstring": "Creates a Dataset with unknown size.\nResize it before using.\n\n:param ds_name: string\n\n:param dtype: dtype\nDatatype of the dataset\n\n:return: h5py DataSet", "id": "f4063:c0:m5"} {"signature": "def create_dataset(self, ds_name, data, attrs=None, dtype=None):", "body": "if ds_name in self._datasets:ds = self._datasets[ds_name]if ds.dtype != data.dtype:warnings.warn('')else:if dtype is None:dtype = data.dtypeds = self._group.create_dataset(ds_name, data.shape,dtype=dtype)if attrs is not None:for key in attrs:setattr(ds.attrs, key, attrs[key])ds.read_direct(data)self._datasets[ds_name] = dsreturn ds", "docstring": "Saves a Numpy array in a dataset in the HDF file, registers it as\nds_name and returns the h5py dataset.\n\n:param ds_name: string\nRegistration name of the dataset to be registered.\n\n:param data: Numpy ndarray\n\n:param dtype: dtype\nDatatype of the dataset\n\n:return: h5py dataset", "id": "f4063:c0:m6"} {"signature": "def save(self, ds_name, data, dtype=None):", "body": "return self.create_dataset(ds_name, data, dtype)", "docstring": "See create_dataset.", "id": "f4063:c0:m7"} {"signature": "@staticmethoddef _fill_missing_values(df, range_values, fill_value=, fill_method=None):", "body": "idx_colnames = df.index.namesidx_colranges = [range_values[x] for x in idx_colnames]fullindex = pd.Index([p for p in product(*idx_colranges)],name=tuple(idx_colnames))fulldf = df.reindex(index=fullindex, fill_value=fill_value,method=fill_method)fulldf.index.names = idx_colnamesreturn fulldf, idx_colranges", "docstring": "Will get the names of the index colums of df, obtain their ranges from\nrange_values dict and return a reindexed version of df with the given\nrange values.\n\n:param df: pandas DataFrame\n\n:param range_values: dict or array-like\nMust contain for each index column of df an entry with all the values\nwithin the range of the column.\n\n:param fill_value: scalar or 'nearest', default 0\nValue to use for missing values. Defaults to 0, but can be any\n\"compatible\" value, e.g., NaN.\nThe 'nearest' mode will fill the missing value with the nearest value in\n the column.\n\n:param fill_method: {'backfill', 'bfill', 'pad', 'ffill', None}, default None\nMethod to use for filling holes in reindexed DataFrame\n'pad' / 'ffill': propagate last valid observation forward to next valid\n'backfill' / 'bfill': use NEXT valid observation to fill gap\n\n:return: pandas Dataframe and used column ranges\nreindexed DataFrame and dict with index column ranges", "id": "f4063:c1:m2"} {"signature": "def get(self, key):", "body": "node = self.get_node(key)if node is None:raise KeyError('' % key)if hasattr(node, ''):if '' in node.attrs:return self._read_group(node)return self._read_array(node)", "docstring": "Retrieve pandas object or group of Numpy ndarrays\nstored in file\n\nParameters\n----------\nkey : object\n\nReturns\n-------\nobj : type of object stored in file", "id": "f4063:c1:m3"} {"signature": "def put(self, key, value, attrs=None, format=None, append=False, **kwargs):", "body": "if not isinstance(value, np.ndarray):super(NumpyHDFStore, self).put(key, value, format, append, **kwargs)else:group = self.get_node(key)if group is not None and not append:self._handle.removeNode(group, recursive=True)group = Noneif group is None:paths = key.split('')path = ''for p in paths:if not len(p):continuenew_path = pathif not path.endswith(''):new_path += ''new_path += pgroup = self.get_node(new_path)if group is None:group = self._handle.createGroup(path, p)path = new_pathds_name = kwargs.get('', self._array_dsname)ds = self._handle.createArray(group, ds_name, value)if attrs is not None:for key in attrs:setattr(ds.attrs, key, attrs[key])self._handle.flush()return ds", "docstring": "Store object in HDFStore\n\nParameters\n----------\nkey : str\n\nvalue : {Series, DataFrame, Panel, Numpy ndarray}\n\nformat : 'fixed(f)|table(t)', default is 'fixed'\n fixed(f) : Fixed format\n Fast writing/reading. Not-appendable, nor searchable\n\n table(t) : Table format\n Write as a PyTables Table structure which may perform worse but allow more flexible operations\n like searching/selecting subsets of the data\n\nappend : boolean, default False\n This will force Table format, append the input data to the\n existing.\n\nencoding : default None, provide an encoding for strings", "id": "f4063:c1:m4"} {"signature": "def _push_dfblock(self, key, df, ds_name, range_values):", "body": "vals_colranges = [range_values[x] for x in df.index.names]nu_shape = [len(x) for x in vals_colranges]return self.put(key, np.reshape(df.values, tuple(nu_shape)),attrs={'': df.index.names},ds_name=ds_name, append=True)", "docstring": ":param key: string\n:param df: pandas Dataframe\n:param ds_name: string", "id": "f4063:c1:m5"} {"signature": "def put_df_as_ndarray(self, key, df, range_values, loop_multiindex=False,unstack=False, fill_value=, fill_method=None):", "body": "idx_colnames = df.index.namesif key is None:key = idx_colnames[]if loop_multiindex:idx_values = df.index.get_level_values().unique()for idx in idx_values:vals, _ = self._fill_missing_values(df.xs((idx,), level=idx_colnames[]),range_values,fill_value=fill_value,fill_method=fill_method)ds_name = str(idx) + '' + ''.join(vals.columns)self._push_dfblock(key, vals, ds_name, range_values)return self._handle.get_node('' + str(key))else:if unstack:df = df.unstack(idx_colnames[])for idx in df:vals, _ = self._fill_missing_values(df[idx], range_values,fill_value=fill_value,fill_method=fill_method)vals = np.nan_to_num(vals)ds_name = ''.join([str(x) for x in vals.name])self._push_dfblock(key, vals, ds_name, range_values)return self._handle.get_node('' + str(key))vals, _ = self._fill_missing_values(df, range_values,fill_value=fill_value,fill_method=fill_method)ds_name = self._array_dsnamereturn self._push_dfblock(key, vals, ds_name, range_values)", "docstring": "Returns a PyTables HDF Array from df in the shape given by its index columns range values.\n\n :param key: string object\n\n :param df: pandas DataFrame\n\n :param range_values: dict or array-like\n Must contain for each index column of df an entry with all the values\n within the range of the column.\n\n :param loop_multiindex: bool\n Will loop through the first index in a multiindex dataframe, extract a\n dataframe only for one value, complete and fill the missing values and\n store in the HDF.\n If this is True, it will not use unstack.\n This is as fast as unstacking.\n\n :param unstack: bool\n Unstack means that this will use the first index name to\n unfold the DataFrame, and will create a group with as many datasets\n as valus has this first index.\n Use this if you think the filled dataframe won't fit in your RAM memory.\n If set to False, this will transform the dataframe in memory first\n and only then save it.\n\n :param fill_value: scalar or 'nearest', default 0\n Value to use for missing values. Defaults to 0, but can be any\n \"compatible\" value, e.g., NaN.\n The 'nearest' mode will fill the missing value with the nearest value in\n the column.\n\n :param fill_method: {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed DataFrame\n 'pad' / 'ffill': propagate last valid observation forward to next valid\n 'backfill' / 'bfill': use NEXT valid observation to fill gap\n\n :return: PyTables data node", "id": "f4063:c1:m6"} {"signature": "def sav_to_pandas_rpy2(input_file):", "body": "import pandas.rpy.common as comw = com.robj.r('' % input_file)return com.convert_robj(w)", "docstring": "SPSS .sav files to Pandas DataFrame through Rpy2\n\n:param input_file: string\n\n:return:", "id": "f4065:m0"} {"signature": "def sav_to_pandas_savreader(input_file):", "body": "from savReaderWriter import SavReaderlines = []with SavReader(input_file, returnHeader=True) as reader:header = next(reader)for line in reader:lines.append(line)return pd.DataFrame(data=lines, columns=header)", "docstring": "SPSS .sav files to Pandas DataFrame through savreader module\n\n:param input_file: string\n\n:return:", "id": "f4065:m1"} {"signature": "def save_variables_to_shelve(file_path, variables):", "body": "mashelf = shelve.open(file_path, '')for vn in variables.keys():try:mashelf[vn] = variables[vn]except KeyError as ke:raise Exception(''.format(vn)) from kemashelf.close()", "docstring": "Parameters\n----------\nfile_path: str\n\nvariables: dict\n Dictionary with objects. Object name -> object\n\nNotes\n-----\n Before calling this function, create a varlist this way:\n\n shelfvars = []\n for v in varnames:\n shelfvars.append(eval(v))\n\n #to_restore variables from shelf\n my_shelf = shelve.open(filename)\n for key in my_shelf:\n globals()[key]=my_shelf[key]\n my_shelf.close()", "id": "f4065:m2"} {"signature": "def save_variables_to_mat(file_path, variables, format=''):", "body": "try:sio.savemat(file_path, variables, format=format)except IOError as ioe:raise IOError(''.format(file_path)) from ioe", "docstring": "Parameters\n---------\nfile_path: str\n\nvariables: dict\n Dictionary with objects. Object name -> object\n\nformat : {'5', '4'}, string, optional\n '5' (the default) for MATLAB 5 and up (to 7.2),\n '4' for MATLAB 4 .mat files\n See scipy.io.savemat dostrings.", "id": "f4065:m3"} {"signature": "@staticmethoddef save_variables(filename, variables):", "body": "ext = get_extension(filename).lower()out_exts = {'', '', '', '', ''}output_file = filenameif not ext in out_exts:output_file = add_extension_if_needed(filename, '')ext = get_extension(filename)if ext == '' or ext == '':save_variables_to_shelve(output_file, variables)elif ext == '':save_variables_to_mat(output_file, variables)elif ext == '' or ext == '':from .hdf5 import save_variables_to_hdf5save_variables_to_hdf5(output_file, variables)else:raise ValueError(''.format(ext))", "docstring": "Save given variables in a file.\n Valid extensions: '.pyshelf' or '.shelf' (Python shelve)\n '.mat' (Matlab archive),\n '.hdf5' or '.h5' (HDF5 file)\n\n Parameters\n ----------\n filename: str\n Output file path.\n\n variables: dict\n Dictionary varname -> variable\n\n Raises\n ------\n ValueError: if the extension of the filesname is not recognized.", "id": "f4065:c0:m1"} {"signature": "@staticmethoddef save_varlist(filename, varnames, varlist):", "body": "variables = {}for i, vn in enumerate(varnames):variables[vn] = varlist[i]ExportData.save_variables(filename, variables)", "docstring": "Valid extensions '.pyshelf', '.mat', '.hdf5' or '.h5'\n\n@param filename: string\n\n@param varnames: list of strings\nNames of the variables\n\n@param varlist: list of objects\nThe objects to be saved", "id": "f4065:c0:m2"} {"signature": "def save_variables_to_hdf5(file_path, variables, mode='', h5path=''):", "body": "if not isinstance(variables, dict):raise ValueError(''.format(type(variables)))if not variables:raise ValueError('')h5file = h5py.File(file_path, mode=mode)h5group = h5file.require_group(h5path)for vn in variables:data = variables[vn]if hasattr(data, '') and (data.dtype.type is np.string_ or data.dtype.type is np.unicode_):dt = h5py.special_dtype(vlen=str)data = data.astype(dt)if isinstance(data, dict):for key in data:h5group[str(key)] = data[key]elif isinstance(data, list):for idx, item in enumerate(data):h5group[str(idx)] = itemelse:h5group[vn] = datah5file.close()", "docstring": "Parameters\n----------\nfile_path: str\n\nvariables: dict\n Dictionary with objects. Object name -> object\n\nmode: str\n HDF5 file access mode\n See h5py documentation for details.\n r Readonly, file must exist\n r+ Read/write, file must exist\n w Create file, truncate if exists\n w- Create file, fail if exists\n a Read/write if exists, create otherwise (default)\n\nNotes\n-----\nIt is recommended to use numpy arrays as objects.\nList or tuples of strings won't work, convert them into numpy.arrays before.", "id": "f4066:m0"} {"signature": "def get_h5file(file_path, mode=''):", "body": "if not op.exists(file_path):raise IOError(''.format(file_path))try:h5file = h5py.File(file_path, mode=mode)except:raiseelse:return h5file", "docstring": "Return the h5py.File given its file path.\n\n Parameters\n ----------\n file_path: string\n HDF5 file path\n\n mode: string\n r Readonly, file must exist\n r+ Read/write, file must exist\n w Create file, truncate if exists\n w- Create file, fail if exists\n a Read/write if exists, create otherwise (default)\n\n Returns\n -------\n h5file: h5py.File", "id": "f4066:m1"} {"signature": "def get_group_names(h5file, h5path=''):", "body": "return _get_node_names(h5file, h5path, node_type=h5py.Group)", "docstring": "Return the groups names within h5file/h5path\n\n Parameters\n ----------\n h5file: h5py.File or path to hdf5 file\n HDF5 file object\n\n h5path: str\n HDF5 group path to get the group names from\n\n Returns\n -------\n groupnames: list of str\n List of group names", "id": "f4066:m2"} {"signature": "def get_dataset_names(h5file, h5path=''):", "body": "return _get_node_names(h5file, h5path, node_type=h5py.Dataset)", "docstring": "Return all dataset names from h5path group in h5file.\n\n Parameters\n ----------\n h5file: h5py.File\n HDF5 file object\n\n h5path: str\n HDF5 group path to read datasets from\n\n Returns\n -------\n dsnames: list of str\n List of dataset names contained in h5file/h5path", "id": "f4066:m3"} {"signature": "def get_datasets(h5file, h5path=''):", "body": "return _get_nodes(h5file, h5path, node_type=h5py.Dataset)", "docstring": "Return all datasets from h5path group in file_path.\n\n Parameters\n ----------\n h5file: h5py.File\n HDF5 file object\n\n h5path: str\n HDF5 group path to read datasets from\n\n Returns\n -------\n datasets: dict\n Dict with all h5py.Dataset contained in file_path/h5path", "id": "f4066:m4"} {"signature": "def extract_datasets(h5file, h5path=''):", "body": "if isinstance(h5file, str):_h5file = h5py.File(h5file, mode='')else:_h5file = h5file_datasets = get_datasets(_h5file, h5path)datasets = OrderedDict()try:for ds in _datasets:datasets[ds.name.split('')[-]] = ds[:]except:raise RuntimeError(''.format(_h5file.filename, h5path))finally:if isinstance(h5file, str):_h5file.close()return datasets", "docstring": "Return all dataset contents from h5path group in h5file in an OrderedDict.\n\n Parameters\n ----------\n h5file: h5py.File\n HDF5 file object\n\n h5path: str\n HDF5 group path to read datasets from\n\n Returns\n -------\n datasets: OrderedDict\n Dict with variables contained in file_path/h5path", "id": "f4066:m5"} {"signature": "def _get_node_names(h5file, h5path='', node_type=h5py.Dataset):", "body": "if isinstance(h5file, str):_h5file = get_h5file(h5file, mode='')else:_h5file = h5fileif not h5path.startswith(''):h5path = '' + h5pathnames = []try:h5group = _h5file.require_group(h5path)for node in _hdf5_walk(h5group, node_type=node_type):names.append(node.name)except:raise RuntimeError(''.format(_h5file.filename, h5path))finally:if isinstance(h5file, str):_h5file.close()return names", "docstring": "Return the node of type node_type names within h5path of h5file.\n\n Parameters\n ----------\n h5file: h5py.File\n HDF5 file object\n\n h5path: str\n HDF5 group path to get the group names from\n\n node_type: h5py object type\n HDF5 object type\n\n Returns\n -------\n names: list of str\n List of names", "id": "f4066:m7"} {"signature": "def _get_nodes(h5file, h5path='', node_type=h5py.Dataset):", "body": "if isinstance(h5file, str):_h5file = get_h5file(h5file, mode='')else:_h5file = h5fileif not h5path.startswith(''):h5path = '' + h5pathnames = []try:h5group = _h5file.require_group(h5path)for node in _hdf5_walk(h5group, node_type=node_type):names.append(node)except:raise RuntimeError(''.format(str(node_type), _h5file.filename, h5path))finally:if isinstance(h5file, str):_h5file.close()return names", "docstring": "Return the nodes within h5path of the h5file.\n\n Parameters\n ----------\n h5file: h5py.File\n HDF5 file object\n\n h5path: str\n HDF5 group path to get the nodes from\n\n node_type: h5py object type\n The type of the nodes that you want to get\n\n Returns\n -------\n nodes: list of node_type objects", "id": "f4066:m8"} {"signature": "def create_dicom_subject_folders(out_path, dicom_sets):", "body": "import shutilif not os.path.exists(out_path):os.mkdir(out_path)new_groups = defaultdict(list)for group in dicom_sets:group_path = os.path.join(out_path, str(group))os.mkdir(group_path)group_dicoms = dicom_sets[group]for idx, dcm in enumerate(group_dicoms):num = str(idx).zfill()new_dcm = os.path.join(group_path, num + DICOM_FILE_EXTENSIONS[].lower())log.info(''.format(dcm, new_dcm))shutil.copyfile(dcm, new_dcm)new_groups[group].append(new_dcm)return new_groups", "docstring": ":param out_path: str\n Path to the output directory\n\n:param dicom_sets: dict of {str: list of strs}\n Groups of dicom files", "id": "f4067:m0"} {"signature": "def rename_file_group_to_serial_nums(file_lst):", "body": "file_lst.sort()c = for f in file_lst:dirname = get_abspath(f.dirname())fdest = f.joinpath(dirname, \"\".format(c) +OUTPUT_DICOM_EXTENSION)log.info(''.format(f, fdest))f.rename(fdest)c += ", "docstring": "Will rename all files in file_lst to a padded serial\n number plus its extension\n\n :param file_lst: list of path.py paths", "id": "f4067:m1"} {"signature": "def __init__(self, folders=None):", "body": "self.items = []if folders is not None:self._store_dicom_paths(folders)", "docstring": ":param folders: str or list of strs\n Path or paths to folders to be searched for Dicom files", "id": "f4067:c0:m0"} {"signature": "def _store_dicom_paths(self, folders):", "body": "if isinstance(folders, str):folders = [folders]for folder in folders:if not os.path.exists(folder):raise FolderNotFound(folder)self.items.extend(list(find_all_dicom_files(folder)))", "docstring": "Search for dicoms in folders and save file paths into\n self.dicom_paths set.\n\n :param folders: str or list of str", "id": "f4067:c0:m1"} {"signature": "def from_folders(self, folders):", "body": "self.items = []self._store_dicom_paths(folders)", "docstring": "Restart the self.items and stores all dicom file paths found\nwithin folders\n\nParameters\n----------\nfolders: str or list of str", "id": "f4067:c0:m2"} {"signature": "def from_set(self, fileset, check_if_dicoms=True):", "body": "if check_if_dicoms:self.items = []for f in fileset:if is_dicom_file(f):self.items.append(f)else:self.items = fileset", "docstring": "Overwrites self.items with the given set of files.\n Will filter the fileset and keep only Dicom files.\n\n Parameters\n ----------\n fileset: iterable of str\n Paths to files\n\n check_if_dicoms: bool\n Whether to check if the items in fileset are dicom file paths", "id": "f4067:c0:m3"} {"signature": "def update(self, dicomset):", "body": "if not isinstance(dicomset, DicomFileSet):raise ValueError('')self.items = list(set(self.items).update(dicomset))", "docstring": "Update this set with the union of itself and dicomset.\n\n Parameters\n ----------\n dicomset: DicomFileSet", "id": "f4067:c0:m4"} {"signature": "def copy_files_to_other_folder(self, output_folder, rename_files=True,mkdir=True, verbose=False):", "body": "import shutilif not os.path.exists(output_folder):os.mkdir(output_folder)if not rename_files:for dcmf in self.items:outf = os.path.join(output_folder, os.path.basename(dcmf))if verbose:print(''.format(dcmf, outf))shutil.copyfile(dcmf, outf)else:n_pad = len(self.items)+for idx, dcmf in enumerate(self.items):outf = ''.format(width=n_pad, number=idx)outf = os.path.join(output_folder, outf)if verbose:print(''.format(dcmf, outf))shutil.copyfile(dcmf, outf)", "docstring": "Copies all files within this set to the output_folder\n\nParameters\n----------\noutput_folder: str\nPath of the destination folder of the files\n\nrename_files: bool\nWhether or not rename the files to a sequential format\n\nmkdir: bool\nWhether to make the folder if it does not exist\n\nverbose: bool\nWhether to print to stdout the files that are beind copied", "id": "f4067:c0:m5"} {"signature": "def __init__(self, folders, read_metadata=True, header_fields=None):", "body": "DicomFileSet.__init__(self, folders)self.read_dcm = self.get_dcm_reader(read_metadata, header_fields)", "docstring": ":param folders: str or list of strs\nPath or paths to folders to be searched for Dicom files\n\n:param read_metadata: bool\nIf True, will make a list of DicomFiles, otherwise will store\na simple DICOM header (namedtuples) with the fields specified\nin header_fields.\n\n:param header_fields: set of strings\nSet of header fields to be read from each DICOM file in a DicomHeader.\nIf store_metadata is False, this won't be used. Else and if this is\nNone, will store the whole DicomFile.", "id": "f4067:c1:m0"} {"signature": "@staticmethoddef get_dcm_reader(store_metadata=True, header_fields=None):", "body": "if not store_metadata:return lambda fpath: fpathif header_fields is None:build_dcm = lambda fpath: DicomFile(fpath)else:dicom_header = namedtuple('', header_fields)build_dcm = lambda fpath: dicom_header._make(DicomFile(fpath).get_attributes(header_fields))return build_dcm", "docstring": "Creates a lambda function to read DICOM files.\nIf store_store_metadata is False, will only return the file path.\nElse if you give header_fields, will return only the set of of\nheader_fields within a DicomFile object or the whole DICOM file if\nNone.\n\n:return: function\nThis function has only one parameter: file_path", "id": "f4067:c1:m1"} {"signature": "def scrape_all_files(self):", "body": "try:for dcmf in self.items:yield self.read_dcm(dcmf)except IOError as ioe:raise IOError(''.format(dcmf)) from ioe", "docstring": "Generator that yields one by one the return value for self.read_dcm\nfor each file within this set", "id": "f4067:c1:m2"} {"signature": "def group_dicom_files(dicom_file_paths, header_fields):", "body": "dist = SimpleDicomFileDistance(field_weights=header_fields)path_list = dicom_file_paths.copy()path_groups = DefaultOrderedDict(DicomFileSet)while len(path_list) > :file_path1 = path_list.pop()file_subgroup = [file_path1]dist.set_dicom_file1(file_path1)j = len(path_list)-while j >= :file_path2 = path_list[j]dist.set_dicom_file2(file_path2)if dist.transform():file_subgroup.append(file_path2)path_list.pop(j)j -= path_groups[file_path1].from_set(file_subgroup, check_if_dicoms=False)return path_groups", "docstring": "Gets a list of DICOM file absolute paths and returns a list of lists of\nDICOM file paths. Each group contains a set of DICOM files that have\nexactly the same headers.\n\nParameters\n----------\ndicom_file_paths: list of str\n List or set of DICOM file paths\n\nheader_fields: list of str\n List of header field names to check on the comparisons of the DICOM files.\n\nReturns\n-------\ndict of DicomFileSets\n The key is one filepath representing the group (the first found).", "id": "f4068:m0"} {"signature": "def copy_groups_to_folder(dicom_groups, folder_path, groupby_field_name):", "body": "if dicom_groups is None or not dicom_groups:raise ValueError('')if not os.path.exists(folder_path):os.makedirs(folder_path, exist_ok=False)for dcmg in dicom_groups:if groupby_field_name is not None and len(groupby_field_name) > :dfile = DicomFile(dcmg)dir_name = ''for att in groupby_field_name:dir_name = os.path.join(dir_name, dfile.get_attributes(att))dir_name = str(dir_name)else:dir_name = os.path.basename(dcmg)group_folder = os.path.join(folder_path, dir_name)os.makedirs(group_folder, exist_ok=False)log.debug(''.format(group_folder))import shutildcm_files = dicom_groups[dcmg]for srcf in dcm_files:destf = os.path.join(group_folder, os.path.basename(srcf))while os.path.exists(destf):destf += ''shutil.copy2(srcf, destf)", "docstring": "Copy the DICOM file groups to folder_path. Each group will be copied into\n a subfolder with named given by groupby_field.\n\n Parameters\n ----------\n dicom_groups: boyle.dicom.sets.DicomFileSet\n\n folder_path: str\n Path to where copy the DICOM files.\n\n groupby_field_name: str\n DICOM field name. Will get the value of this field to name the group\n folder.", "id": "f4068:m1"} {"signature": "def calculate_file_distances(dicom_files, field_weights=None,dist_method_cls=None, **kwargs):", "body": "if dist_method_cls is None:dist_method = LevenshteinDicomFileDistance(field_weights)else:try:dist_method = dist_method_cls(field_weights=field_weights, **kwargs)except:log.exception(''''.format(dist_method_cls, kwargs))dist_dtype = np.float16n_files = len(dicom_files)try:file_dists = np.zeros((n_files, n_files), dtype=dist_dtype)except MemoryError as mee:import scipy.sparsefile_dists = scipy.sparse.lil_matrix((n_files, n_files),dtype=dist_dtype)for idxi in range(n_files):dist_method.set_dicom_file1(dicom_files[idxi])for idxj in range(idxi+, n_files):dist_method.set_dicom_file2(dicom_files[idxj])if idxi != idxj:file_dists[idxi, idxj] = dist_method.transform()return file_dists", "docstring": "Calculates the DicomFileDistance between all files in dicom_files, using an\nweighted Levenshtein measure between all field names in field_weights and\ntheir corresponding weights.\n\nParameters\n----------\ndicom_files: iterable of str\n Dicom file paths\n\nfield_weights: dict of str to float\n A dict with header field names to float scalar values, that\n indicate a distance measure ratio for the levenshtein distance\n averaging of all the header field names in it. e.g., {'PatientID': 1}\n\ndist_method_cls: DicomFileDistance class\n Distance method object to compare the files.\n If None, the default DicomFileDistance method using Levenshtein\n distance between the field_wieghts will be used.\n\nkwargs: DicomFileDistance instantiation named arguments\n Apart from the field_weitghts argument.\n\nReturns\n-------\nfile_dists: np.ndarray or scipy.sparse.lil_matrix of shape NxN\n Levenshtein distances between each of the N items in dicom_files.", "id": "f4068:m2"} {"signature": "def fit(self, samples, targets):", "body": "samples, targets = check_X_y(samples, targets, ['', '', ''])if not callable(self.score_func):raise TypeError(\"\"\"\"% (self.score_func, type(self.score_func)))self._check_params(samples, targets)self.scores_ = np.asarray(self.score_func(samples, targets))return self", "docstring": "Parameters\n----------\nsamples: array-like, shape = [n_samples, n_features]\n The training input samples.\n\n\ntargets: array-like, shape = [n_samples]\n The target values (class labels in classification, real numbers in\n regression).\n\nReturns\n-------\nself : object\n Returns self.", "id": "f4068:c0:m1"} {"signature": "def fit(self, dcm_file1, dcm_file2):", "body": "self.set_dicom_file1(dcm_file1)self.set_dicom_file2(dcm_file2)", "docstring": "Parameters\n----------\ndcm_file1: str (path to file) or DicomFile or namedtuple\n\ndcm_file2: str (path to file) or DicomFile or namedtuple", "id": "f4068:c1:m1"} {"signature": "def set_dicom_file1(self, dcm_file):", "body": "self.dcmf1 = self._read_dcmfile(dcm_file)", "docstring": "Parameters\n----------\ndcm_file: str (path to file) or DicomFile or namedtuple", "id": "f4068:c1:m2"} {"signature": "def set_dicom_file2(self, dcm_file):", "body": "self.dcmf2 = self._read_dcmfile(dcm_file)", "docstring": "Parameters\n----------\ndcm_file: str (path to file) or DicomFile or namedtuple", "id": "f4068:c1:m3"} {"signature": "@staticmethoddef _read_dcmfile(dcm_file):", "body": "if isinstance(dcm_file, str):return DicomFile(dcm_file)else:return dcm_file", "docstring": "Parameters\n----------\ndcm_file: str or DicomFile\n File path or DicomFile\n\nReturns\n-------\nDicomFile", "id": "f4068:c1:m4"} {"signature": "def transform(self):", "body": "if self.dcmf1 is None or self.dcmf2 is None:return np.inffor field_name in self.field_weights:if (str(getattr(self.dcmf1, field_name, ''))!= str(getattr(self.dcmf2, field_name, ''))):return Falsereturn True", "docstring": "Check the field values in self.dcmf1 and self.dcmf2 and returns True\n if all the field values are the same, False otherwise.\n\n Returns\n -------\n bool", "id": "f4068:c1:m6"} {"signature": "def levenshtein_analysis(self, field_weights=None):", "body": "if field_weights is None:if not isinstance(self.field_weights, dict):raise ValueError(''''.format(type(self.field_weights)))key_dicoms = list(self.dicom_groups.keys())file_dists = calculate_file_distances(key_dicoms, field_weights, self._dist_method_cls)return file_dists", "docstring": "Updates the status of the file clusters comparing the cluster\nkey files with a levenshtein weighted measure using either the\nheader_fields or self.header_fields.\n\nParameters\n----------\nfield_weights: dict of strings with floats\n A dict with header field names to float scalar values, that indicate a distance measure\n ratio for the levenshtein distance averaging of all the header field names in it.\n e.g., {'PatientID': 1}", "id": "f4068:c3:m1"} {"signature": "@staticmethoddef dist_percentile_threshold(dist_matrix, perc_thr=, k=):", "body": "triu_idx = np.triu_indices(dist_matrix.shape[], k=k)upper = np.zeros_like(dist_matrix)upper[triu_idx] = dist_matrix[triu_idx] < np.percentile(dist_matrix[triu_idx], perc_thr)return upper", "docstring": "Thresholds a distance matrix and returns the result.\n\n Parameters\n ----------\n\n dist_matrix: array_like\n Input array or object that can be converted to an array.\n\n perc_thr: float in range of [0,100]\n Percentile to compute which must be between 0 and 100 inclusive.\n\n k: int, optional\n Diagonal above which to zero elements.\n k = 0 (the default) is the main diagonal,\n k < 0 is below it and k > 0 is above.\n\n Returns\n -------\n array_like", "id": "f4068:c3:m2"} {"signature": "def get_groups_in_same_folder(self, folder_depth=):", "body": "group_pairs = []key_dicoms = list(self.dicom_groups.keys())idx = len(key_dicoms)while idx > :group1 = key_dicoms.pop()dir_group1 = get_folder_subpath(group1, folder_depth)for group in key_dicoms:if group.startswith(dir_group1):group_pairs.append((group1, group))idx -= return group_pairs", "docstring": "Returns a list of 2-tuples with pairs of dicom groups that\nare in the same folder within given depth.\n\nParameters\n----------\nfolder_depth: int\nPath depth to check for folder equality.\n\nReturns\n-------\nlist of tuples of str", "id": "f4068:c3:m3"} {"signature": "@staticmethoddef plot_file_distances(dist_matrix):", "body": "import matplotlib.pyplot as pltfig = plt.figure()ax = fig.add_subplot()ax.matshow(dist_matrix, interpolation='',cmap=plt.cm.get_cmap(''))", "docstring": "Plots dist_matrix\n\nParameters\n----------\ndist_matrix: np.ndarray", "id": "f4068:c3:m4"} {"signature": "def merge_groups(self, indices):", "body": "try:merged = merge_dict_of_lists(self.dicom_groups, indices,pop_later=True, copy=True)self.dicom_groups = mergedexcept IndexError:raise IndexError('')", "docstring": "Extend the lists within the DICOM groups dictionary.\n The indices will indicate which list have to be extended by which\n other list.\n\n Parameters\n ----------\n indices: list or tuple of 2 iterables of int, bot having the same len\n The indices of the lists that have to be merged, both iterables\n items will be read pair by pair, the first is the index to the\n list that will be extended with the list of the second index.\n The indices can be constructed with Numpy e.g.,\n indices = np.where(square_matrix)", "id": "f4068:c3:m7"} {"signature": "def move_to_folder(self, folder_path, groupby_field_name=None):", "body": "try:copy_groups_to_folder(self.dicom_groups, folder_path, groupby_field_name)except IOError as ioe:raise IOError(''.format(folder_path)) from ioe", "docstring": "Copy the file groups to folder_path. Each group will be copied into\n a subfolder with named given by groupby_field.\n\n Parameters\n ----------\n folder_path: str\n Path to where copy the DICOM files.\n\n groupby_field_name: str\n DICOM field name. Will get the value of this field to name the group\n folder. If empty or None will use the basename of the group key file.", "id": "f4068:c3:m8"} {"signature": "def get_unique_field_values_per_group(self, field_name,field_to_use_as_key=None):", "body": "unique_vals = DefaultOrderedDict(set)for dcmg in self.dicom_groups:for f in self.dicom_groups[dcmg]:field_val = DicomFile(f).get_attributes(field_name)key_val = dcmgif field_to_use_as_key is not None:try:key_val = str(DicomFile(dcmg).get_attributes(field_to_use_as_key))except KeyError as ke:raise KeyError(''''.format(field_to_use_as_key,dcmg)) from keunique_vals[key_val].add(field_val)return unique_vals", "docstring": "Return a dictionary where the key is the group key file path and\n the values are sets of unique values of the field name of all DICOM\n files in the group.\n\n Parameters\n ----------\n field_name: str\n Name of the field to read from all files\n\n field_to_use_as_key: str\n Name of the field to get the value and use as key.\n If None, will use the same key as the dicom_groups.\n\n Returns\n -------\n Dict of sets", "id": "f4068:c3:m9"} {"signature": "def generate_config(output_directory):", "body": "if not op.isdir(output_directory):os.makedirs(output_directory)config_file = op.join(output_directory, \"\")open_file = open(config_file, \"\")open_file.write(\"\")open_file.close()return config_file", "docstring": "Generate a dcm2nii configuration file that disable the interactive\n mode.", "id": "f4069:m0"} {"signature": "def add_meta_to_nii(nii_file, dicom_file, dcm_tags=''):", "body": "dcmimage = dicom.read_file(dicom_file)image = nibabel.load(nii_file)if not isinstance(image, nibabel.nifti1.Nifti1Image):raise Exception(\"\".format(type(image)))if isinstance(dcm_tags, str):dcm_tags = [dcm_tags]header = image.get_header()repetition_time = float(dcmimage[(\"\", \"\")].value)header.set_dim_info(slice=)nb_slices = header.get_n_slices()slice_duration = round(repetition_time / nb_slices, )header.set_slice_duration(slice_duration)if dcm_tags:content = [\"\".format(name, dcmimage[tag].value)for name, tag in dcm_tags]free_field = numpy.array(\"\".join(content),dtype=header[\"\"].dtype)image.get_header()[\"\"] = free_fieldimage.update_header()nibabel.save(image, nii_file)", "docstring": "Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`.\n It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time})\n to the NifTI file as well as any other tag in `dcm_tags`.\n All selected DICOM tags values are set in the `descrip` nifti header field.\n Note that this will modify the header content of `nii_file`.\n\n Parameters\n ----------\n nii_files: str\n Path to the NifTI file to modify.\n\n dicom_file: str\n Paths to the DICOM file from where to get the meta data.\n\n dcm_tags: list of str\n List of tags from the DICOM file to read and store in the nifti file.", "id": "f4069:m1"} {"signature": "def call_dcm2nii(work_dir, arguments=''):", "body": "if not op.exists(work_dir):raise IOError(''.format(work_dir))cmd_line = ''.format(arguments, work_dir)log.info(cmd_line)return subprocess.check_call(cmd_line, shell=True)", "docstring": "Converts all DICOM files within `work_dir` into one or more\n NifTi files by calling dcm2nii on this folder.\n\n Parameters\n ----------\n work_dir: str\n Path to the folder that contain the DICOM files\n\n arguments: str\n String containing all the flag arguments for `dcm2nii` CLI.\n\n Returns\n -------\n sys_code: int\n dcm2nii execution return code", "id": "f4069:m2"} {"signature": "def convert_dcm2nii(input_dir, output_dir, filename):", "body": "if not op.exists(input_dir):raise IOError(''.format(input_dir))if not op.exists(output_dir):raise IOError(''.format(output_dir))tmpdir = tempfile.TemporaryDirectory(prefix='')arguments = ''.format(tmpdir.name)try:call_out = call_dcm2nii(input_dir, arguments)except:raiseelse:log.info(''.format(input_dir))filenames = glob(op.join(tmpdir.name, ''))cleaned_filenames = remove_dcm2nii_underprocessed(filenames)filepaths = []for srcpath in cleaned_filenames:dstpath = op.join(output_dir, filename)realpath = copy_w_plus(srcpath, dstpath)filepaths.append(realpath)basename = op.basename(remove_ext(srcpath))aux_files = set(glob(op.join(tmpdir.name, '' .format(basename)))) -set(glob(op.join(tmpdir.name, ''.format(basename))))for aux_file in aux_files:aux_dstpath = copy_w_ext(aux_file, output_dir, remove_ext(op.basename(realpath)))filepaths.append(aux_dstpath)return filepaths", "docstring": "Call MRICron's `dcm2nii` to convert the DICOM files inside `input_dir`\n to Nifti and save the Nifti file in `output_dir` with a `filename` prefix.\n\n Parameters\n ----------\n input_dir: str\n Path to the folder that contains the DICOM files\n\n output_dir: str\n Path to the folder where to save the NifTI file\n\n filename: str\n Output file basename\n\n Returns\n -------\n filepaths: list of str\n List of file paths created in `output_dir`.", "id": "f4069:m3"} {"signature": "def remove_dcm2nii_underprocessed(filepaths):", "body": "cln_flist = []len_sorted = sorted(filepaths, key=len)for idx, fpath in enumerate(len_sorted):remove = Falsefname = op.basename(fpath)rest = len_sorted[idx+:]for rest_fpath in rest:rest_file = op.basename(rest_fpath)if rest_file.endswith(fname):remove = Truebreakif not remove:cln_flist.append(fpath)return cln_flist", "docstring": "Return a subset of `filepaths`. Keep only the files that have a basename longer than the\n others with same suffix.\n This works based on that dcm2nii appends a preffix character for each processing\n step it does automatically in the DICOM to NifTI conversion.\n\n Parameters\n ----------\n filepaths: iterable of str\n\n Returns\n -------\n cleaned_paths: iterable of str", "id": "f4069:m4"} {"signature": "def treefall(iterable):", "body": "num_elems = len(iterable)for i in range(num_elems, -, -):for c in combinations(iterable, i):yield c", "docstring": "Generate all combinations of the elements of iterable and its subsets.\n\nParameters\n----------\niterable: list, set or dict or any iterable object\n\nReturns\n-------\nA generator of all possible combinations of the iterable.\n\nExample:\n-------\n>>> for i in treefall([1, 2, 3, 4, 5]): print(i)\n>>> (1, 2, 3)\n>>> (1, 2)\n>>> (1, 3)\n>>> (2, 3)\n>>> (1,)\n>>> (2,)\n>>> (3,)\n>>> ()", "id": "f4070:m0"} {"signature": "def get_unique_field_values(dcm_file_list, field_name):", "body": "field_values = set()for dcm in dcm_file_list:field_values.add(str(DicomFile(dcm).get_attributes(field_name)))return field_values", "docstring": "Return a set of unique field values from a list of DICOM files\n\n Parameters\n ----------\n dcm_file_list: iterable of DICOM file paths\n\n field_name: str\n Name of the field from where to get each value\n\n Returns\n -------\n Set of field values", "id": "f4071:m1"} {"signature": "def find_all_dicom_files(root_path):", "body": "dicoms = set()try:for fpath in get_all_files(root_path):if is_dicom_file(fpath):dicoms.add(fpath)except IOError as ioe:raise IOError(''.format(fpath)) from ioereturn dicoms", "docstring": "Returns a list of the dicom files within root_path\n\nParameters\n----------\nroot_path: str\nPath to the directory to be recursively searched for DICOM files.\n\nReturns\n-------\ndicoms: set\nSet of DICOM absolute file paths", "id": "f4071:m2"} {"signature": "def is_dicom_file(filepath):", "body": "if not os.path.exists(filepath):raise IOError(''.format(filepath))filename = os.path.basename(filepath)if filename == '':return Falsetry:_ = dicom.read_file(filepath)except Exception as exc:log.debug(''''.format(filepath))return Falsereturn True", "docstring": "Tries to read the file using dicom.read_file,\nif the file exists and dicom.read_file does not raise\nand Exception returns True. False otherwise.\n\n:param filepath: str\n Path to DICOM file\n\n:return: bool", "id": "f4071:m3"} {"signature": "def group_dicom_files(dicom_paths, hdr_field=''):", "body": "dicom_groups = defaultdict(list)try:for dcm in dicom_paths:hdr = dicom.read_file(dcm)group_key = getattr(hdr, hdr_field)dicom_groups[group_key].append(dcm)except KeyError as ke:raise KeyError(''.format(hdr_field, dcm)) from kereturn dicom_groups", "docstring": "Group in a dictionary all the DICOM files in dicom_paths\n separated by the given `hdr_field` tag value.\n\n Parameters\n ----------\n dicom_paths: str\n Iterable of DICOM file paths.\n\n hdr_field: str\n Name of the DICOM tag whose values will be used as key for the group.\n\n Returns\n -------\n dicom_groups: dict of dicom_paths", "id": "f4071:m4"} {"signature": "def decompress(input_dir, dcm_pattern=''):", "body": "dcmfiles = sorted(recursive_glob(input_dir, dcm_pattern))for dcm in dcmfiles:cmd = ''.format(dcm)log.debug(''.format(cmd))subprocess.check_call(cmd, shell=True)", "docstring": "Decompress all *.dcm files recursively found in DICOM_DIR.\n This uses 'gdcmconv --raw'.\n It works when 'dcm2nii' shows the `Unsupported Transfer Syntax` error. This error is\n usually caused by lack of JPEG2000 support in dcm2nii compilation.\n\n Read more:\n http://www.nitrc.org/plugins/mwiki/index.php/dcm2nii:MainPage#Transfer_Syntaxes_and_Compressed_Images\n\n Parameters\n ----------\n input_dir: str\n Folder path\n\n dcm_patther: str\n Pattern of the DICOM file names in `input_dir`.\n\n Notes\n -----\n The *.dcm files in `input_folder` will be overwritten.", "id": "f4071:m5"} {"signature": "def get_attributes(self, attributes, default=''):", "body": "if isinstance(attributes, str):attributes = [attributes]attrs = [getattr(self, attr, default) for attr in attributes]if len(attrs) == :return attrs[]return tuple(attrs)", "docstring": "Return the attributes values from this DicomFile\n\n Parameters\n ----------\n attributes: str or list of str\n DICOM field names\n\n default: str\n Default value if the attribute does not exist.\n\n Returns\n -------\n Value of the field or list of values.", "id": "f4071:c0:m1"} {"signature": "def as_ndarray(arr, copy=False, dtype=None, order=''):", "body": "if order not in ('', '', '', '', None):raise ValueError(\"\".format(str(order)))if isinstance(arr, np.memmap):if dtype is None:if order in ('', '', None):ret = np.array(np.asarray(arr), copy=True)else:ret = np.array(np.asarray(arr), copy=True, order=order)else:if order in ('', '', None):ret = np.asarray(arr).astype(dtype)else:ret = _asarray(np.array(arr, copy=True), dtype=dtype, order=order)elif isinstance(arr, np.ndarray):ret = _asarray(arr, dtype=dtype, order=order)if np.may_share_memory(ret, arr) and copy:ret = ret.T.copy().T if ret.flags[''] else ret.copy()elif isinstance(arr, (list, tuple)):if order in (\"\", \"\"):ret = np.asarray(arr, dtype=dtype)else:ret = np.asarray(arr, dtype=dtype, order=order)else:raise ValueError(\"\".format(arr.__class__))return ret", "docstring": "Convert an arbitrary array to numpy.ndarray.\n\n In the case of a memmap array, a copy is automatically made to break the\n link with the underlying file (whatever the value of the \"copy\" keyword).\n\n The purpose of this function is mainly to get rid of memmap objects, but\n it can be used for other purposes. In particular, combining copying and\n casting can lead to performance improvements in some cases, by avoiding\n unnecessary copies.\n\n If not specified, input array order is preserved, in all cases, even when\n a copy is requested.\n\n Caveat: this function does not copy during bool to/from 1-byte dtype\n conversions. This can lead to some surprising results in some rare cases.\n Example:\n\n a = numpy.asarray([0, 1, 2], dtype=numpy.int8)\n b = as_ndarray(a, dtype=bool) # array([False, True, True], dtype=bool)\n c = as_ndarray(b, dtype=numpy.int8) # array([0, 1, 2], dtype=numpy.int8)\n\n The usually expected result for the last line would be array([0, 1, 1])\n because True evaluates to 1. Since there is no copy made here, the original\n array is recovered.\n\n Parameters\n ----------\n arr: array-like\n input array. Any value accepted by numpy.asarray is valid.\n\n copy: bool\n if True, force a copy of the array. Always True when arr is a memmap.\n\n dtype: any numpy dtype\n dtype of the returned array. Performing copy and type conversion at the\n same time can in some cases avoid an additional copy.\n\n order: string\n gives the order of the returned array.\n Valid values are: \"C\", \"F\", \"A\", \"K\", None.\n default is \"K\". See ndarray.copy() for more information.\n\n Returns\n -------\n ret: np.ndarray\n Numpy array containing the same data as arr, always of class\n numpy.ndarray, and with no link to any underlying file.", "id": "f4072:m1"} {"signature": "def import_pyfile(filepath, mod_name=None):", "body": "import sysif sys.version_info.major == :import importlib.machineryloader = importlib.machinery.SourceFileLoader('', filepath)mod = loader.load_module(mod_name)else:import impmod = imp.load_source(mod_name, filepath)return mod", "docstring": "Imports the contents of filepath as a Python module.\n\n:param filepath: string\n\n:param mod_name: string\nName of the module when imported\n\n:return: module\nImported module", "id": "f4073:m0"} {"signature": "def _assert_all_finite(X):", "body": "X = np.asanyarray(X)if (X.dtype.char in np.typecodes[''] and not np.isfinite(X.sum())and not np.isfinite(X).all()):raise ValueError(\"\"\"\" % X.dtype)", "docstring": "Like assert_all_finite, but only for ndarray.", "id": "f4075:m0"} {"signature": "def assert_all_finite(X):", "body": "_assert_all_finite(X.data if sp.issparse(X) else X)", "docstring": "Throw a ValueError if X contains NaN or infinity.\n\n Input MUST be an np.ndarray instance or a scipy.sparse matrix.", "id": "f4075:m1"} {"signature": "def as_float_array(X, copy=True, force_all_finite=True):", "body": "if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)and not sp.issparse(X)):return check_array(X, ['', '', ''], dtype=np.float64,copy=copy, force_all_finite=force_all_finite,ensure_2d=False)elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:return X.copy() if copy else Xelif X.dtype in [np.float32, np.float64]: return X.copy('' if X.flags[''] else '') if copy else Xelse:return X.astype(np.float32 if X.dtype == np.int32 else np.float64)", "docstring": "Converts an array-like to an array of floats\n\n The new dtype will be np.float32 or np.float64, depending on the original\n type. The function can create a copy or modify the argument depending\n on the argument copy.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n\n copy : bool, optional\n If True, a copy of X will be created. If False, a copy may still be\n returned if X's dtype is not a floating point type.\n\n Returns\n -------\n XT : {array, sparse matrix}\n An array of type np.float", "id": "f4075:m2"} {"signature": "def _num_samples(x):", "body": "if not hasattr(x, '') and not hasattr(x, ''):if hasattr(x, ''):x = np.asarray(x)else:raise TypeError(\"\" % x)return x.shape[] if hasattr(x, '') else len(x)", "docstring": "Return number of samples in array-like x.", "id": "f4075:m3"} {"signature": "def check_consistent_length(*arrays):", "body": "uniques = np.unique([_num_samples(X) for X in arrays if X is not None])if len(uniques) > :raise ValueError(\"\"% str(uniques))", "docstring": "Check that all arrays have consistent first dimensions.\n\n Checks whether all objects in arrays have the same shape or length.\n\n Parameters\n ----------\n arrays : list or tuple of input objects.\n Objects that will be checked for consistent length.", "id": "f4075:m4"} {"signature": "def indexable(*iterables):", "body": "result = []for X in iterables:if sp.issparse(X):result.append(X.tocsr())elif hasattr(X, \"\") or hasattr(X, \"\"):result.append(X)elif X is None:result.append(X)else:result.append(np.array(X))check_consistent_length(*result)return result", "docstring": "Make arrays indexable for cross-validation.\n\n Checks consistent length, passes through None, and ensures that everything\n can be indexed by converting sparse matrices to csr and converting\n non-interable objects to arrays.\n\n Parameters\n ----------\n iterables : lists, dataframes, arrays, sparse matrices\n List of objects to ensure sliceability.", "id": "f4075:m5"} {"signature": "def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy,force_all_finite):", "body": "if accept_sparse is None:raise TypeError('''''')sparse_type = spmatrix.formatif dtype is None:dtype = spmatrix.dtypeif sparse_type in accept_sparse:if dtype == spmatrix.dtype:if copy:spmatrix = spmatrix.copy()else:spmatrix = spmatrix.astype(dtype)else:spmatrix = spmatrix.asformat(accept_sparse[]).astype(dtype)if force_all_finite:if not hasattr(spmatrix, \"\"):warnings.warn(\"\"% spmatrix.format)else:_assert_all_finite(spmatrix.data)if hasattr(spmatrix, \"\"):spmatrix.data = np.array(spmatrix.data, copy=False, order=order)return spmatrix", "docstring": "Convert a sparse matrix to a given format.\n\n Checks the sparse format of spmatrix and converts if necessary.\n\n Parameters\n ----------\n spmatrix : scipy sparse matrix\n Input to validate and convert.\n\n accept_sparse : string, list of string or None (default=None)\n String[s] representing allowed sparse matrix formats ('csc',\n 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse\n matrix input will raise an error. If the input is sparse but not in\n the allowed format, it will be converted to the first listed format.\n\n order : 'F', 'C' or None (default=None)\n Whether an array will be forced to be fortran or c-style.\n\n copy : boolean (default=False)\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n force_all_finite : boolean (default=True)\n Whether to raise an error on np.inf and np.nan in X.\n\n Returns\n -------\n spmatrix_converted : scipy sparse matrix.\n Matrix that is ensured to have an allowed type.", "id": "f4075:m6"} {"signature": "def check_array(array, accept_sparse=None, dtype=None, order=None, copy=False,force_all_finite=True, ensure_2d=True, allow_nd=False):", "body": "if isinstance(accept_sparse, str):accept_sparse = [accept_sparse]if sp.issparse(array):array = _ensure_sparse_format(array, accept_sparse, dtype, order,copy, force_all_finite)else:if ensure_2d:array = np.atleast_2d(array)array = np.array(array, dtype=dtype, order=order, copy=copy)if not allow_nd and array.ndim >= :raise ValueError(\"\" %array.ndim)if force_all_finite:_assert_all_finite(array)return array", "docstring": "Input validation on an array, list, sparse matrix or similar.\n\n By default, the input is converted to an at least 2nd numpy array.\n\n Parameters\n ----------\n array : object\n Input object to check / convert.\n\n accept_sparse : string, list of string or None (default=None)\n String[s] representing allowed sparse matrix formats, such as 'csc',\n 'csr', etc. None means that sparse matrix input will raise an error.\n If the input is sparse but not in the allowed format, it will be\n converted to the first listed format.\n\n order : 'F', 'C' or None (default=None)\n Whether an array will be forced to be fortran or c-style.\n\n copy : boolean (default=False)\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n force_all_finite : boolean (default=True)\n Whether to raise an error on np.inf and np.nan in X.\n\n ensure_2d : boolean (default=True)\n Whether to make X at least 2d.\n\n allow_nd : boolean (default=False)\n Whether to allow X.ndim > 2.\n\n Returns\n -------\n X_converted : object\n The converted and validated X.", "id": "f4075:m7"} {"signature": "def check_X_y(X, y, accept_sparse=None, dtype=None, order=None, copy=False,force_all_finite=True, ensure_2d=True, allow_nd=False,multi_output=False):", "body": "X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,ensure_2d, allow_nd)if multi_output:y = check_array(y, '', force_all_finite=True, ensure_2d=False)else:y = column_or_1d(y, warn=True)_assert_all_finite(y)check_consistent_length(X, y)return X, y", "docstring": "Input validation for standard estimators.\n\n Checks X and y for consistent length, enforces X 2d and y 1d.\n Standard input checks are only applied to y. For multi-label y,\n set multi_ouput=True to allow 2d and sparse y.\n\n Parameters\n ----------\n X : nd-array, list or sparse matrix\n Input data.\n\n y : nd-array, list or sparse matrix\n Labels.\n\n accept_sparse : string, list of string or None (default=None)\n String[s] representing allowed sparse matrix formats, such as 'csc',\n 'csr', etc. None means that sparse matrix input will raise an error.\n If the input is sparse but not in the allowed format, it will be\n converted to the first listed format.\n\n order : 'F', 'C' or None (default=None)\n Whether an array will be forced to be fortran or c-style.\n\n copy : boolean (default=False)\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n force_all_finite : boolean (default=True)\n Whether to raise an error on np.inf and np.nan in X.\n\n ensure_2d : boolean (default=True)\n Whether to make X at least 2d.\n\n allow_nd : boolean (default=False)\n Whether to allow X.ndim > 2.\n\n Returns\n -------\n X_converted : object\n The converted and validated X.", "id": "f4075:m8"} {"signature": "def column_or_1d(y, warn=False):", "body": "shape = np.shape(y)if len(shape) == :return np.ravel(y)if len(shape) == and shape[] == :if warn:warnings.warn(\"\"\"\"\"\",DataConversionWarning, stacklevel=)return np.ravel(y)raise ValueError(\"\".format(shape))", "docstring": "Ravel column or 1d numpy array, else raises an error\n\n Parameters\n ----------\n y : array-like\n\n Returns\n -------\n y : array", "id": "f4075:m9"} {"signature": "def warn_if_not_float(X, estimator=''):", "body": "if not isinstance(estimator, str):estimator = estimator.__class__.__name__if X.dtype.kind != '':warnings.warn(\"\"\"\" % (estimator, X.dtype))return Truereturn False", "docstring": "Warning utility function to check that data type is floating point.\n\n Returns True if a warning was raised (i.e. the input is not float) and\n False otherwise, for easier input validation.", "id": "f4075:m10"} {"signature": "def check_random_state(seed):", "body": "if seed is None or seed is np.random:return np.random.mtrand._randif isinstance(seed, (numbers.Integral, np.integer)):return np.random.RandomState(seed)if isinstance(seed, np.random.RandomState):return seedraise ValueError('''' % seed)", "docstring": "Turn seed into a np.random.RandomState instance\n\n If seed is None, return the RandomState singleton used by np.random.\n If seed is an int, return a new RandomState instance seeded with seed.\n If seed is already a RandomState instance, return it.\n Otherwise raise ValueError.", "id": "f4075:m11"} {"signature": "def filter_objlist(olist, fieldname, fieldval):", "body": "return [x for x in olist if getattr(x, fieldname) == fieldval]", "docstring": "Returns a list with of the objects in olist that have a fieldname valued as fieldval\n\nParameters\n----------\nolist: list of objects\n\nfieldname: string\n\nfieldval: anything\n\nReturns\n-------\nlist of objets", "id": "f4077:m0"} {"signature": "def filter_list(lst, filt):", "body": "return [m for s in lst for m in (filt(s),) if m]", "docstring": "Parameters\n----------\nlst: list\n\nfilter: function\n Unary string filter function\n\nReturns\n-------\nlist\n List of items that passed the filter\n\nExample\n-------\n>>> l = ['12123123', 'N123213']\n>>> filt = re.compile('\\d*').match\n>>> nu_l = list_filter(l, filt)", "id": "f4077:m1"} {"signature": "def match_list(lst, pattern, group_names=[]):", "body": "filtfn = re.compile(pattern).matchfiltlst = filter_list(lst, filtfn)if not group_names:return [m.string for m in filtlst]else:return [m.group(group_names) for m in filtlst]", "docstring": "Parameters\n----------\nlst: list of str\n\nregex: string\n\ngroup_names: list of strings\n See re.MatchObject group docstring\n\nReturns\n-------\nlist of strings\n Filtered list, with the strings that match the pattern", "id": "f4077:m2"} {"signature": "def search_list(lst, pattern):", "body": "filt = re.compile(pattern).searchreturn filter_list(lst, filt)", "docstring": "Parameters\n----------\npattern: string\n\nlst: list of strings\n\nReturns\n-------\nfiltered_list: list of str\n Filtered lists with the strings in which the pattern is found.", "id": "f4077:m3"} {"signature": "def append_to_keys(adict, preffix):", "body": "return {preffix + str(key): (value if isinstance(value, dict) else value)for key, value in list(adict.items())}", "docstring": "Parameters\n----------\nadict:\npreffix:\n\nReturns\n-------", "id": "f4077:m4"} {"signature": "def append_to_list(lst, preffix):", "body": "return [preffix + str(item) for item in lst]", "docstring": "Parameters\n----------\nlst:\npreffix:\n\nReturns\n-------", "id": "f4077:m5"} {"signature": "def is_valid_regex(string):", "body": "try:re.compile(string)is_valid = Trueexcept re.error:is_valid = Falsereturn is_valid", "docstring": "Checks whether the re module can compile the given regular expression.\n\nParameters\n----------\nstring: str\n\nReturns\n-------\nboolean", "id": "f4077:m6"} {"signature": "def is_regex(string):", "body": "is_regex = Falseregex_chars = ['', '', '', '', '']for c in regex_chars:if string.find(c) > -:return is_valid_regex(string)return is_regex", "docstring": "TODO: improve this!\n\nReturns True if the given string is considered a regular expression,\nFalse otherwise.\nIt will be considered a regex if starts with a non alphabetic character\nand then correctly compiled by re.compile\n\n:param string: str", "id": "f4077:m7"} {"signature": "def is_fnmatch_regex(string):", "body": "is_regex = Falseregex_chars = ['', '', '']for c in regex_chars:if string.find(c) > -:return Truereturn is_regex", "docstring": "Returns True if the given string is considered a fnmatch\nregular expression, False otherwise.\nIt will look for\n\n:param string: str", "id": "f4077:m8"} {"signature": "def remove_from_string(string, values):", "body": "for v in values:string = string.replace(v, '')return string", "docstring": "Parameters\n----------\nstring:\nvalues:\n\nReturns\n-------", "id": "f4077:m9"} {"signature": "def where_is(strings, pattern, n=, lookup_func=re.match):", "body": "count = for idx, item in enumerate(strings):if lookup_func(pattern, item):count += if count == n:return idxreturn -", "docstring": "Return index of the nth match found of pattern in strings\n\n Parameters\n ----------\n strings: list of str\n List of strings\n\n pattern: str\n Pattern to be matched\n\n nth: int\n Number of times the match must happen to return the item index.\n\n lookup_func: callable\n Function to match each item in strings to the pattern, e.g., re.match or re.search.\n\n Returns\n -------\n index: int\n Index of the nth item that matches the pattern.\n If there are no n matches will return -1", "id": "f4077:m11"} {"signature": "def merge(dict_1, dict_2):", "body": "return dict((str(key), dict_1.get(key) or dict_2.get(key))for key in set(dict_2) | set(dict_1))", "docstring": "Merge two dictionaries.\n\n Values that evaluate to true take priority over falsy values.\n `dict_1` takes priority over `dict_2`.", "id": "f4080:m0"} {"signature": "def get_sys_path(rcpath, app_name, section_name=None):", "body": "if op.exists(rcpath):return op.realpath(op.expanduser(rcpath))try:settings = rcfile(app_name, section_name)except:raisetry:sys_path = op.expanduser(settings[rcpath])except KeyError:raise IOError(''''''.format(rcpath,section_name,app_name))else:if not op.exists(sys_path):raise IOError(''''''.format(rcpath, section_name, app_name,sys_path))return op.realpath(op.expanduser(sys_path))", "docstring": "Return a folder path if it exists.\n\n First will check if it is an existing system path, if it is, will return it\n expanded and absoluted.\n\n If this fails will look for the rcpath variable in the app_name rcfiles or\n exclusively within the given section_name, if given.\n\n Parameters\n ----------\n rcpath: str\n Existing folder path or variable name in app_name rcfile with an\n existing one.\n\n section_name: str\n Name of a section in the app_name rcfile to look exclusively there for\n variable names.\n\n app_name: str\n Name of the application to look for rcfile configuration files.\n\n Returns\n -------\n sys_path: str\n A expanded absolute file or folder path if the path exists.\n\n Raises\n ------\n IOError if the proposed sys_path does not exist.", "id": "f4080:m5"} {"signature": "def rcfile(appname, section=None, args={}, strip_dashes=True):", "body": "if strip_dashes:for k in args.keys():args[k.lstrip('')] = args.pop(k)environ = get_environment(appname)if section is None:section = appnameconfig = get_config(appname,section,args.get('', ''),args.get('', ''))config = merge(merge(args, config), environ)if not config:raise IOError(''''.format(appname))return config", "docstring": "Read environment variables and config files and return them merged with\n predefined list of arguments.\n\n Parameters\n ----------\n appname: str\n Application name, used for config files and environment variable\n names.\n\n section: str\n Name of the section to be read. If this is not set: appname.\n\n args:\n arguments from command line (optparse, docopt, etc).\n\n strip_dashes: bool\n Strip dashes prefixing key names from args dict.\n\n Returns\n --------\n dict\n containing the merged variables of environment variables, config\n files and args.\n\n Raises\n ------\n IOError\n In case the return value is empty.\n\n Notes\n -----\n Environment variables are read if they start with appname in uppercase\n with underscore, for example:\n\n TEST_VAR=1\n\n Config files compatible with ConfigParser are read and the section name\n appname is read, example:\n\n [appname]\n var=1\n\n We can also have host-dependent configuration values, which have\n priority over the default appname values.\n\n [appname]\n var=1\n\n [appname:mylinux]\n var=3\n\n\n For boolean flags do not try to use: 'True' or 'False',\n 'on' or 'off',\n '1' or '0'.\n Unless you are willing to parse this values by yourself.\n We recommend commenting the variables out with '#' if you want to set a\n flag to False and check if it is in the rcfile cfg dict, i.e.:\n\n flag_value = 'flag_variable' in cfg\n\n\n Files are read from: /etc/appname/config,\n /etc/appfilerc,\n ~/.config/appname/config,\n ~/.config/appname,\n ~/.appname/config,\n ~/.appnamerc,\n appnamerc,\n .appnamerc,\n appnamerc file found in 'path' folder variable in args,\n .appnamerc file found in 'path' folder variable in args,\n file provided by 'config' variable in args.\n\n Example\n -------\n args = rcfile(__name__, docopt(__doc__, version=__version__))", "id": "f4080:m6"} {"signature": "def get_rcfile_section(app_name, section_name):", "body": "try:settings = rcfile(app_name, section_name)except IOError:raiseexcept:raise KeyError(''''.format(section_name, app_name))else:return settings", "docstring": "Return the dictionary containing the rcfile section configuration\n variables.\n\n Parameters\n ----------\n section_name: str\n Name of the section in the rcfiles.\n\n app_name: str\n Name of the application to look for its rcfiles.\n\n Returns\n -------\n settings: dict\n Dict with variable values", "id": "f4080:m7"} {"signature": "def get_rcfile_variable_value(var_name, app_name, section_name=None):", "body": "cfg = get_rcfile_section(app_name, section_name)if var_name in cfg:raise KeyError(''''.format(var_name, section_name))return cfg[var_name]", "docstring": "Return the value of the variable in the section_name section of the\n app_name rc file.\n\n Parameters\n ----------\n var_name: str\n Name of the variable to be searched for.\n\n section_name: str\n Name of the section in the rcfiles.\n\n app_name: str\n Name of the application to look for its rcfiles.\n\n Returns\n -------\n var_value: str\n The value of the variable with given var_name.", "id": "f4080:m8"} {"signature": "def find_in_sections(var_name, app_name):", "body": "sections = get_sections(app_name)if not sections:raise ValueError(''.format(app_name))for s in sections:try:var_value = get_rcfile_variable_value(var_name, section_name=s,app_name=app_name)except:passelse:return s, var_valueraise KeyError(''''.format(var_name, app_name))", "docstring": "Return the section and the value of the variable where the first\n var_name is found in the app_name rcfiles.\n\n Parameters\n ----------\n var_name: str\n Name of the variable to be searched for.\n\n app_name: str\n Name of the application to look for its rcfiles.\n\n Returns\n -------\n section_name: str\n Name of the section in the rcfiles where var_name was first found.\n\n var_value: str\n The value of the first variable with given var_name.", "id": "f4080:m9"} {"signature": "def setup_logging(log_config_file=op.join(op.dirname(__file__), ''),log_default_level=LOG_LEVEL,env_key=MODULE_NAME.upper() + ''):", "body": "path = log_config_filevalue = os.getenv(env_key, None)if value:path = valueif op.exists(path):log_cfg = yaml.load(read(path).format(MODULE_NAME))logging.config.dictConfig(log_cfg)else:logging.basicConfig(level=log_default_level)log = logging.getLogger(__name__)log.debug('')", "docstring": "Setup logging configuration.", "id": "f4081:m0"} {"signature": "def _safe_cache(memory, func, **kwargs):", "body": "cachedir = memory.cachedirif cachedir is None or cachedir in __CACHE_CHECKED:return memory.cache(func, **kwargs)version_file = os.path.join(cachedir, '')versions = dict()if os.path.exists(version_file):with open(version_file, '') as _version_file:versions = json.load(_version_file)modules = (nibabel, )my_versions = dict((m.__name__, LooseVersion(m.__version__).version[:])for m in modules)commons = set(versions.keys()).intersection(set(my_versions.keys()))collisions = [m for m in commons if versions[m] != my_versions[m]]if len(collisions) > :if nilearn.CHECK_CACHE_VERSION:warnings.warn(\"\"\"\"\"\"\"\"% cachedir)try:tmp_dir = (os.path.split(cachedir)[:-]+ ('' % os.getpid(), ))tmp_dir = os.path.join(*tmp_dir)os.rename(cachedir, tmp_dir)shutil.rmtree(tmp_dir)except OSError:passtry:os.makedirs(cachedir)except OSError:passelse:warnings.warn(\"\"\"\" % cachedir)if versions != my_versions:with open(version_file, '') as _version_file:json.dump(my_versions, _version_file)__CACHE_CHECKED[cachedir] = Truereturn memory.cache(func, **kwargs)", "docstring": "A wrapper for mem.cache that flushes the cache if the version\n number of nibabel has changed.", "id": "f4082:m0"} {"signature": "def cache(func, memory, func_memory_level=None, memory_level=None,**kwargs):", "body": "verbose = kwargs.get('', )memory_levels = [memory_level, func_memory_level]both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels)both_params_none = all(lvl is None for lvl in memory_levels)if not (both_params_integers or both_params_none):raise ValueError('''')if memory is not None and (func_memory_level is None ormemory_level >= func_memory_level):if isinstance(memory, _basestring):memory = Memory(cachedir=memory, verbose=verbose)if not isinstance(memory, MEMORY_CLASSES):raise TypeError(\"\"\"\"\"\" % (memory, type(memory)))if (memory.cachedir is None and memory_level is not Noneand memory_level > ):warnings.warn(\"\"\"\"\"\"\"\" %(memory_level, func.__name__),stacklevel=)else:memory = Memory(cachedir=None, verbose=verbose)return _safe_cache(memory, func, **kwargs)", "docstring": "Return a joblib.Memory object.\n\n The memory_level determines the level above which the wrapped\n function output is cached. By specifying a numeric value for\n this level, the user can to control the amount of cache memory\n used. This function will cache the function call or not\n depending on the cache level.\n\n Parameters\n ----------\n func: function\n The function which output is to be cached.\n\n memory: instance of joblib.Memory or string\n Used to cache the function call.\n\n func_memory_level: int, optional\n The memory_level from which caching must be enabled for the wrapped\n function.\n\n memory_level: int, optional\n The memory_level used to determine if function call must\n be cached or not (if user_memory_level is equal of greater than\n func_memory_level the function is cached)\n\n kwargs: keyword arguments\n The keyword arguments passed to memory.cache\n\n Returns\n -------\n mem: joblib.MemorizedFunc\n object that wraps the function func. This object may be\n a no-op, if the requested level is lower than the value given\n to _cache()). For consistency, a joblib.Memory object is always\n returned.", "id": "f4082:m1"} {"signature": "def _cache(self, func, func_memory_level=, **kwargs):", "body": "verbose = getattr(self, '', )if not hasattr(self, \"\"):self.memory_level = if not hasattr(self, \"\"):self.memory = Memory(cachedir=None, verbose=verbose)if isinstance(self.memory, _basestring):self.memory = Memory(cachedir=self.memory, verbose=verbose)if self.memory_level == :if (isinstance(self.memory, _basestring)or self.memory.cachedir is not None):warnings.warn(\"\"\"\"\"\")self.memory_level = return cache(func, self.memory, func_memory_level=func_memory_level,memory_level=self.memory_level, **kwargs)", "docstring": "Return a joblib.Memory object.\n\n The memory_level determines the level above which the wrapped\n function output is cached. By specifying a numeric value for\n this level, the user can to control the amount of cache memory\n used. This function will cache the function call or not\n depending on the cache level.\n\n Parameters\n ----------\n func: function\n The function the output of which is to be cached.\n\n memory_level: int\n The memory_level from which caching must be enabled for the wrapped\n function.\n\n Returns\n -------\n mem: joblib.Memory\n object that wraps the function func. This object may be\n a no-op, if the requested level is lower than the value given\n to _cache()). For consistency, a joblib.Memory object is always\n returned.", "id": "f4082:c0:m0"} {"signature": "def which(program):", "body": "if (sys.version_info > (, )):return which_py3(program)else:return which_py2(program)", "docstring": "Returns the absolute path of the given CLI program name.", "id": "f4083:m0"} {"signature": "def whoami():", "body": "return inspect.stack()[][]", "docstring": "Get the name of the current function", "id": "f4083:m3"} {"signature": "def whosdaddy():", "body": "return inspect.stack()[][]", "docstring": "Get the name of the current function", "id": "f4083:m4"} {"signature": "def die(msg, code=-):", "body": "sys.stderr.write(msg + \"\")sys.exit(code)", "docstring": "Writes msg to stderr and exits with return code", "id": "f4083:m5"} {"signature": "def check_call(cmd_args):", "body": "p = subprocess.Popen(cmd_args, stdout=subprocess.PIPE)(output, err) = p.communicate()return output", "docstring": "Calls the command\n\nParameters\n----------\ncmd_args: list of str\n Command name to call and its arguments in a list.\n\nReturns\n-------\nCommand output", "id": "f4083:m6"} {"signature": "def call_command(cmd_name, args_strings):", "body": "if not op.isabs(cmd_name):cmd_fullpath = which(cmd_name)else:cmd_fullpath = cmd_nametry:cmd_line = [cmd_fullpath] + args_stringslog.info(''.format(cmd_line))retval = subprocess.check_call(cmd_line)except CalledProcessError as ce:log.exception(\"\"\"\".format(cmd_name, args_strings,ce.returncode))raiseelse:return retval", "docstring": "Call CLI command with arguments and returns its return value.\n\n Parameters\n ----------\n cmd_name: str\n Command name or full path to the binary file.\n\n arg_strings: list of str\n Argument strings list.\n\n Returns\n -------\n return_value\n Command return value.", "id": "f4083:m7"} {"signature": "def condor_call(cmd, shell=True):", "body": "log.info(cmd)ret = condor_submit(cmd)if ret != :subprocess.call(cmd, shell=shell)", "docstring": "Tries to submit cmd to HTCondor, if it does not succeed, it will\nbe called with subprocess.call.\n\nParameters\n----------\ncmd: string\n Command to be submitted\n\nReturns\n-------", "id": "f4083:m8"} {"signature": "def condor_submit(cmd):", "body": "is_running = subprocess.call('', shell=True) == if not is_running:raise CalledProcessError('')sub_cmd = ''+ cmd.split()[] + ''log.info('' + sub_cmd)return subprocess.call(sub_cmd + '' + cmd, shell=True)", "docstring": "Submits cmd to HTCondor queue\n\nParameters\n----------\ncmd: string\n Command to be submitted\n\nReturns\n-------\nint\n returncode value from calling the submission command.", "id": "f4083:m9"} {"signature": "@mask.setterdef mask(self, image):", "body": "if image is None:self._mask = Nonetry:mask = load_mask(image)except Exception as exc:raise Exception(''.format(image)) from excelse:self._mask = mask", "docstring": "self.mask setter\n\n Parameters\n ----------\n image: str or img-like object.\n See NeuroImage constructor docstring.", "id": "f4085:c0:m4"} {"signature": "def check_compatibility(self, one_img, another_img=None):", "body": "if another_img is None:if len(self.items) > :another_img = self.items[]else:raise ValueError(''''.format(repr_imgs(one_img)))try:if self.all_compatible:check_img_compatibility(one_img, another_img)if self.mask is not None:check_img_compatibility(one_img, self.mask, only_check_3d=True)except:raise", "docstring": "Parameters\n----------\none_img: str or img-like object.\n See NeuroImage constructor docstring.\n\nanoter_img: str or img-like object.\n See NeuroImage constructor docstring.\n If None will use the first image of self.images, if there is any.\n\nRaises\n------\nNiftiFilesNotCompatible\n If one_img and another_img aren't compatible.\n\nValueError\n If another_img is None and there are no other images in this set.", "id": "f4085:c0:m7"} {"signature": "def set_labels(self, labels):", "body": "if not isinstance(labels, string_types) and len(labels) != self.n_subjs:raise ValueError(''''.format(len(labels), self.n_subjs))self.labels = labels", "docstring": "Parameters\n----------\nlabels: list of int or str\n This list will be checked to have the same size as\n\nRaises\n------\nValueError\n if len(labels) != self.n_subjs", "id": "f4085:c0:m9"} {"signature": "def _load_images_and_labels(self, images, labels=None):", "body": "if not isinstance(images, (list, tuple)):raise ValueError(''''.format(type(images)))if not len(images) > :raise ValueError(''''.format(len(images)))if labels is not None and len(labels) != len(images):raise ValueError(''''.format(len(images), len(labels)))first_file = images[]if first_file:first_img = NeuroImage(first_file)else:raise(''.format(repr_imgs(first_file)))for idx, image in enumerate(images):try:img = NeuroImage(image)self.check_compatibility(img, first_img)except:log.exception(''.format(repr_imgs(image)))raiseelse:self.items.append(img)self.set_labels(labels)", "docstring": "Read the images, load them into self.items and set the labels.", "id": "f4085:c0:m10"} {"signature": "def to_matrix(self, smooth_fwhm=, outdtype=None):", "body": "if not self.all_compatible:raise ValueError(\"\")if not outdtype:outdtype = self.items[].dtypen_voxels = Nonemask_indices = Nonemask_shape = self.items[].shape[:]if self.has_mask:mask_arr = self.mask.get_data()mask_indices = np.nonzero(mask_arr)mask_shape = self.mask.shapen_voxels = np.count_nonzero(mask_arr)if n_voxels is None:log.debug(''.format(self.mask))n_voxels = np.prod(mask_shape)mask_indices = Nonendims = self.items[].ndimif ndims == :subj_flat_shape = (n_voxels, )elif ndims == :subj_flat_shape = (n_voxels, self.items[].shape[])else:raise NotImplementedError(''''.format(ndims))outmat = np.zeros((self.n_subjs, ) + subj_flat_shape, dtype=outdtype)try:for i, image in enumerate(self.items):if smooth_fwhm > :image.fwhm = smooth_fwhmif self.has_mask:image.set_mask(self.mask)outmat[i, :], _, _ = image.mask_and_flatten()image.clear_data()except Exception as exc:raise Exception(''.format(image)) from excelse:return outmat, mask_indices, mask_shape", "docstring": "Return numpy.ndarray with the masked or flatten image data and\n the relevant information (mask indices and volume shape).\n\n Parameters\n ----------\n smooth__fwhm: int\n Integer indicating the size of the FWHM Gaussian smoothing kernel\n to smooth the subject volumes before creating the data matrix\n\n outdtype: dtype\n Type of the elements of the array, if None will obtain the dtype from\n the first nifti file.\n\n Returns\n -------\n outmat, mask_indices, vol_shape\n\n outmat: Numpy array with shape N x prod(vol.shape)\n containing the N files as flat vectors.\n\n mask_indices: matrix with indices of the voxels in the mask\n\n vol_shape: Tuple with shape of the volumes, for reshaping.", "id": "f4085:c0:m11"} {"signature": "def to_file(self, output_file, smooth_fwhm=, outdtype=None):", "body": "outmat, mask_indices, mask_shape = self.to_matrix(smooth_fwhm, outdtype)exporter = ExportData()content = {'': outmat,'': self.labels,'': mask_indices,'': mask_shape, }if self.others:content.update(self.others)log.debug(''.format(output_file))try:exporter.save_variables(output_file, content)except Exception as exc:raise Exception(''.format(output_file)) from exc", "docstring": "Save the Numpy array created from to_matrix function to the output_file.\n\n Will save into the file: outmat, mask_indices, vol_shape and self.others (put here whatever you want)\n\n data: Numpy array with shape N x prod(vol.shape)\n containing the N files as flat vectors.\n\n mask_indices: matrix with indices of the voxels in the mask\n\n vol_shape: Tuple with shape of the volumes, for reshaping.\n\n Parameters\n ----------\n output_file: str\n Path to the output file. The extension of the file will be taken into account for the file format.\n Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)\n '.mat' (Matlab archive),\n '.hdf5' or '.h5' (HDF5 file)\n\n smooth_fwhm: int\n Integer indicating the size of the FWHM Gaussian smoothing kernel\n to smooth the subject volumes before creating the data matrix\n\n outdtype: dtype\n Type of the elements of the array, if None will obtain the dtype from\n the first nifti file.", "id": "f4085:c0:m12"} {"signature": "def _init_subj_data(self, subj_files):", "body": "try:if isinstance(subj_files, list):self.from_list(subj_files)elif isinstance(subj_files, dict):self.from_dict(subj_files)else:raise ValueError('')except Exception as exc:raise Exception('') from exc", "docstring": "Parameters\n----------\nsubj_files: list or dict of str\n file_path -> int/str", "id": "f4085:c1:m1"} {"signature": "@staticmethoddef _load_image(file_path):", "body": "if not os.path.exists(file_path):raise FileNotFound(file_path)try:nii_img = load_nipy_img(file_path)nii_img.file_path = file_pathreturn nii_imgexcept Exception as exc:raise Exception(''.format(file_path)) from exc", "docstring": "Parameters\n----------\nfile_path: str\n Path to the nifti file\n\nReturns\n-------\nnipy.Image with a file_path member", "id": "f4085:c1:m3"} {"signature": "@staticmethoddef _smooth_img(nii_img, smooth_fwhm):", "body": "from nipy.algorithms.kernel_smooth import LinearFilterif smooth_fwhm <= :return nii_imgfilter = LinearFilter(nii_img.coordmap, nii_img.shape)return filter.smooth(nii_img)", "docstring": "Parameters\n----------\nnii_img: nipy.Image\n\nsmooth_fwhm: float\n\nReturns\n-------\nsmoothed nipy.Image", "id": "f4085:c1:m4"} {"signature": "def from_dict(self, subj_files):", "body": "for group_label in subj_files:try:group_files = subj_files[group_label]self.items.extend([self._load_image(get_abspath(imgf)) for imgf in group_files])self.labels.extend([group_label]*len(group_files))except Exception as exc:raise Exception(''''.format(group_label)) from exc", "docstring": "Parameters\n----------\nsubj_files: dict of str\n file_path -> int/str", "id": "f4085:c1:m5"} {"signature": "def from_list(self, subj_files):", "body": "for sf in subj_files:try:nii_img = self._load_image(get_abspath(sf))self.items.append(nii_img)except Exception as exc:raise Exception(''.format(sf)) from exc", "docstring": "Parameters\n----------\nsubj_files: list of str\n file_paths", "id": "f4085:c1:m6"} {"signature": "def set_labels(self, subj_labels):", "body": "if len(subj_labels) != self.n_subjs:raise ValueError('')self.labels = subj_labels", "docstring": "Parameters\n----------\nsubj_labels: list of int or str\n This list will be checked to have the same size as files list\n (self.items)", "id": "f4085:c1:m10"} {"signature": "def to_matrix(self, smooth_fwhm=, outdtype=None):", "body": "vol = self.items[].get_data()if not outdtype:outdtype = vol.dtypen_voxels = Nonemask_indices = Nonemask_shape = self.items[].shapeif self.has_mask:mask_arr = get_img_data(self.mask_file)mask_indices = np.where(mask_arr > )mask_shape = mask_arr.shapen_voxels = np.count_nonzero(mask_arr)if n_voxels is None:log.debug(''.format(self.mask_file))n_voxels = np.prod(vol.shape)outmat = np.zeros((self.n_subjs, n_voxels), dtype=outdtype)try:for i, nipy_img in enumerate(self.items):vol = self._smooth_img(nipy_img, smooth_fwhm).get_data()if self.has_mask is not None:outmat[i, :] = vol[mask_indices]else:outmat[i, :] = vol.flatten()except Exception as exc:raise Exception(''.format(nipy_img.file_path)) from excelse:return outmat, mask_indices, mask_shape", "docstring": "Create a Numpy array with the data and return the relevant information (mask indices and volume shape).\n\n Parameters\n ----------\n smooth_fwhm: int\n Integer indicating the size of the FWHM Gaussian smoothing kernel\n to smooth the subject volumes before creating the data matrix\n\n outdtype: dtype\n Type of the elements of the array, if None will obtain the dtype from\n the first nifti file.\n\n Returns\n -------\n outmat, mask_indices, vol_shape\n\n outmat: Numpy array with shape N x prod(vol.shape)\n containing the N files as flat vectors.\n\n mask_indices: matrix with indices of the voxels in the mask\n\n vol_shape: Tuple with shape of the volumes, for reshaping.", "id": "f4085:c1:m11"} {"signature": "def to_file(self, output_file, smooth_fwhm=, outdtype=None):", "body": "outmat, mask_indices, mask_shape = self.to_matrix(smooth_fwhm, outdtype)exporter = ExportData()content = {'': outmat,'': self.labels,'': mask_indices,'': mask_shape, }if self.others:content.update(self.others)log.debug(''.format(output_file))try:exporter.save_variables(output_file, content)except Exception as exc:raise Exception(''.format(output_file)) from exc", "docstring": "Save the Numpy array created from to_matrix function to the output_file.\n\n Will save into the file: outmat, mask_indices, vol_shape\n\n data: Numpy array with shape N x prod(vol.shape)\n containing the N files as flat vectors.\n\n mask_indices: matrix with indices of the voxels in the mask\n\n vol_shape: Tuple with shape of the volumes, for reshaping.\n\n Parameters\n ----------\n output_file: str\n Path to the output file. The extension of the file will be taken into account for the file format.\n Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)\n '.mat' (Matlab archive),\n '.hdf5' or '.h5' (HDF5 file)\n\n smooth_fwhm: int\n Integer indicating the size of the FWHM Gaussian smoothing kernel\n to smooth the subject volumes before creating the data matrix\n\n # TODO\n #smooth_mask: bool\n # If True, will smooth the mask with the same kernel.\n\n outdtype: dtype\n Type of the elements of the array, if None will obtain the dtype from\n the first nifti file.", "id": "f4085:c1:m12"} {"signature": "def voxspace_to_mmspace(img):", "body": "shape, affine = img.shape[:], img.affinecoords = np.array(np.meshgrid(*(range(i) for i in shape), indexing=''))coords = np.rollaxis(coords, , len(shape) + )mm_coords = nib.affines.apply_affine(affine, coords)return mm_coords", "docstring": "Return a grid with coordinates in 3D physical space for `img`.", "id": "f4086:m0"} {"signature": "def voxcoord_to_mm(cm, i, j, k):", "body": "try:mm = cm([i, j, k])except Exception as exc:raise Exception('') from excelse:return mm", "docstring": "Parameters\n----------\ncm: nipy.core.reference.coordinate_map.CoordinateMap\n\ni, j, k: floats\n Voxel coordinates\n\nReturns\n-------\nTriplet with real 3D world coordinates", "id": "f4086:m1"} {"signature": "def mm_to_voxcoord(cm, x, y, z):", "body": "try:vox = cm.inverse()([x, y, z])except Exception as exc:raise Exception('') from excelse:return vox", "docstring": "Parameters\n----------\ncm: nipy.core.reference.coordinate_map.CoordinateMap\n\nx, y, z: floats\n Physical coordinates\n\nReturns\n-------\nTriplet with 3D voxel coordinates", "id": "f4086:m2"} {"signature": "def get_3D_coordmap(img):", "body": "if isinstance(img, nib.Nifti1Image):img = nifti2nipy(img)if img.ndim == :from nipy.core.reference.coordinate_map import drop_io_dimcm = drop_io_dim(img.coordmap, )else:cm = img.coordmapreturn cm", "docstring": "Gets a 3D CoordinateMap from img.\n\nParameters\n----------\nimg: nib.Nifti1Image or nipy Image\n\nReturns\n-------\nnipy.core.reference.coordinate_map.CoordinateMap", "id": "f4086:m3"} {"signature": "def get_coordmap_array(coordmap, shape):", "body": "return ArrayCoordMap(coordmap, shape)", "docstring": "See: http://nipy.org/nipy/stable/api/generated/nipy.core.reference.array_coords.html?highlight=grid#nipy.core.reference.array_coords.Grid", "id": "f4086:m4"} {"signature": "def save_niigz(filepath, vol, header=None, affine=None):", "body": "we_have_nipy = Falsetry:import nipy.core.image as niimfrom nipy import save_imageexcept:passelse:we_have_nipy = Trueif isinstance(vol, np.ndarray):log.debug(''.format(filepath))ni = nib.Nifti1Image(vol, affine, header)nib.save(ni, filepath)elif isinstance(vol, nib.Nifti1Image):log.debug(''.format(filepath))nib.save(vol, filepath)elif we_have_nipy and isinstance(vol, niim.Image):log.debug(''.format(filepath))save_image(vol, filepath)else:raise ValueError(''.format(repr_imgs(vol)))", "docstring": "Saves a volume into a Nifti (.nii.gz) file.\n\n Parameters\n ----------\n vol: Numpy 3D or 4D array\n Volume with the data to be saved.\n\n file_path: string\n Output file name path\n\n affine: (optional) 4x4 Numpy array\n Array with the affine transform of the file.\n This is needed if vol is a np.ndarray.\n\n header: (optional) nibabel.nifti1.Nifti1Header, optional\n Header for the file, optional but recommended.\n This is needed if vol is a np.ndarray.\n\n Note\n ----\n affine and header only work for numpy volumes.", "id": "f4087:m0"} {"signature": "def spatialimg_to_hdfgroup(h5group, spatial_img):", "body": "try:h5group[''] = spatial_img.get_data()h5group[''] = spatial_img.get_affine()if hasattr(h5group, ''):h5group[''] = spatial_img.get_extra()hdr = spatial_img.get_header()for k in list(hdr.keys()):h5group[''].attrs[k] = hdr[k]except ValueError as ve:raise Exception('' + h5group.name) from ve", "docstring": "Saves a Nifti1Image into an HDF5 group.\n\n Parameters\n ----------\n h5group: h5py Group\n Output HDF5 file path\n\n spatial_img: nibabel SpatialImage\n Image to be saved\n\n h5path: str\n HDF5 group path where the image data will be saved.\n Datasets will be created inside the given group path:\n 'data', 'extra', 'affine', the header information will\n be set as attributes of the 'data' dataset.", "id": "f4087:m1"} {"signature": "def spatialimg_to_hdfpath(file_path, spatial_img, h5path=None, append=True):", "body": "if h5path is None:h5path = ''mode = ''if os.path.exists(file_path):if append:mode = ''with h5py.File(file_path, mode) as f:try:h5img = f.create_group(h5path)spatialimg_to_hdfgroup(h5img, spatial_img)except ValueError as ve:raise Exception('' + h5path) from ve", "docstring": "Saves a Nifti1Image into an HDF5 file.\n\n Parameters\n ----------\n file_path: string\n Output HDF5 file path\n\n spatial_img: nibabel SpatialImage\n Image to be saved\n\n h5path: string\n HDF5 group path where the image data will be saved.\n Datasets will be created inside the given group path:\n 'data', 'extra', 'affine', the header information will\n be set as attributes of the 'data' dataset.\n Default: '/img'\n\n append: bool\n True if you don't want to erase the content of the file\n if it already exists, False otherwise.\n\n Note\n ----\n HDF5 open modes\n >>> 'r' Readonly, file must exist\n >>> 'r+' Read/write, file must exist\n >>> 'w' Create file, truncate if exists\n >>> 'w-' Create file, fail if exists\n >>> 'a' Read/write if exists, create otherwise (default)", "id": "f4087:m2"} {"signature": "def hdfpath_to_nifti1image(file_path, h5path):", "body": "with h5py.File(file_path, '') as f:return hdfgroup_to_nifti1image(f[h5path])", "docstring": "Returns a nibabel Nifti1Image from a HDF5 group datasets\n\n Parameters\n ----------\n file_path: string\n HDF5 file path\n\n h5path:\n HDF5 group path in file_path\n\n Returns\n -------\n nibabel Nifti1Image", "id": "f4087:m3"} {"signature": "def hdfgroup_to_nifti1image(h5group):", "body": "try:data = h5group[''][:]affine = h5group[''][:]extra = Noneif '' in h5group:extra = h5group[''][:]header = get_nifti1hdr_from_h5attrs(h5group[''].attrs)img = nib.Nifti1Image(data, affine, header=header, extra=extra)return imgexcept KeyError as ke:raise Exception('' + h5group.name) from ke", "docstring": "Returns a nibabel Nifti1Image from a HDF5 group datasets\n\n Parameters\n ----------\n h5group: h5py.Group\n HDF5 group\n\n Returns\n -------\n nibabel Nifti1Image", "id": "f4087:m4"} {"signature": "def get_nifti1hdr_from_h5attrs(h5attrs):", "body": "hdr = nib.Nifti1Header()for k in list(h5attrs.keys()):hdr[str(k)] = np.array(h5attrs[k])return hdr", "docstring": "Transforms an H5py Attributes set to a dict.\n Converts unicode string keys into standard strings\n and each value into a numpy array.\n\n Parameters\n ----------\n h5attrs: H5py Attributes\n\n Returns\n --------\n dict", "id": "f4087:m5"} {"signature": "def all_childnodes_to_nifti1img(h5group):", "body": "child_nodes = []def append_parent_if_dataset(name, obj):if isinstance(obj, h5py.Dataset):if name.split('')[-] == '':child_nodes.append(obj.parent)vols = []h5group.visititems(append_parent_if_dataset)for c in child_nodes:vols.append(hdfgroup_to_nifti1image(c))return vols", "docstring": "Returns in a list all images found under h5group.\n\n Parameters\n ----------\n h5group: h5py.Group\n HDF group\n\n Returns\n -------\n list of nifti1Image", "id": "f4087:m6"} {"signature": "def insert_volumes_in_one_dataset(file_path, h5path, file_list, newshape=None,concat_axis=, dtype=None, append=True):", "body": "def isalambda(v):return isinstance(v, type(lambda: None)) and v.__name__ == ''mode = ''if os.path.exists(file_path):if append:mode = ''imgs = [nib.load(vol) for vol in file_list]shapes = [np.array(img.get_shape()) for img in imgs]if newshape is not None:if isalambda(newshape):nushapes = np.array([newshape(shape) for shape in shapes])else:nushapes = np.array([shape for shape in shapes])for nushape in nushapes:assert(len(nushape) - < concat_axis)n_dims = nushapes.shape[]ds_shape = np.zeros(n_dims, dtype=np.int)for a in list(range(n_dims)):if a == concat_axis:ds_shape[a] = np.sum(nushapes[:, concat_axis])else:ds_shape[a] = np.max(nushapes[:, a])if dtype is None:dtype = imgs[].get_data_dtype()with h5py.File(file_path, mode) as f:try:ic = h5grp = f.create_group(os.path.dirname(h5path))h5ds = h5grp.create_dataset(os.path.basename(h5path),ds_shape, dtype)for img in imgs:nushape = nushapes[ic, :]def append_to_dataset(h5ds, idx, data, concat_axis):\"\"\"\"\"\"shape = data.shapendims = len(shape)if ndims == :if concat_axis == :h5ds[idx] = dataelif ndims == :if concat_axis == :h5ds[idx ] = dataelif concat_axis == :h5ds[idx ] = dataelif ndims == :if concat_axis == :h5ds[idx ] = dataelif concat_axis == :h5ds[idx ] = dataelif concat_axis == :h5ds[idx ] = dataappend_to_dataset(h5ds, ic,np.reshape(img.get_data(), tuple(nushape)),concat_axis)ic += except ValueError as ve:raise Exception(''.format(h5path, file_path)) from ve", "docstring": "Inserts all given nifti files from file_list into one dataset in fname.\n This will not check if the dimensionality of all files match.\n\n Parameters\n ----------\n file_path: string\n HDF5 file path\n\n h5path: string\n\n file_list: list of strings\n\n newshape: tuple or lambda function\n If None, it will not reshape the images.\n If a lambda function, this lambda will receive only the shape array.\n e.g., newshape = lambda x: (np.prod(x[0:3]), x[3])\n If a tuple, it will try to reshape all the images with the same shape.\n It must work for all the images in file_list.\n\n concat_axis: int\n Axis of concatenation after reshaping\n\n dtype: data type\n Dataset data type\n If not set, will use the type of the first file.\n\n append: bool\n\n Raises\n ------\n ValueError if concat_axis is bigger than data dimensionality.\n\n Note\n ----\n For now, this only works if the dataset ends up being a 2D matrix.\n I haven't tested for multi-dimensionality concatenations.", "id": "f4087:m7"} {"signature": "def drain_rois(img):", "body": "img_data = get_img_data(img)out = np.zeros(img_data.shape, dtype=img_data.dtype)krn_dim = [] * img_data.ndimkernel = np.ones(krn_dim, dtype=int)vals = np.unique(img_data)vals = vals[vals != ]for i in vals:roi = img_data == ihits = scn.binary_hit_or_miss(roi, kernel)roi[hits] = out[roi > ] = ireturn out", "docstring": "Find all the ROIs in img and returns a similar volume with the ROIs\n emptied, keeping only their border voxels.\n\n This is useful for DTI tractography.\n\n Parameters\n ----------\n img: img-like object or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n Returns\n -------\n np.ndarray\n an array of same shape as img_data", "id": "f4088:m0"} {"signature": "def pick_rois(rois_img, roi_values, bg_val=):", "body": "img = read_img(rois_img)img_data = img.get_data()if bg_val == :out = np.zeros(img_data.shape, dtype=img_data.dtype)else:out = np.ones(img_data.shape, dtype=img_data.dtype) * bg_valfor r in roi_values:out[img_data == r] = rreturn nib.Nifti2Image(out, affine=img.affine, header=img.header)", "docstring": "Return the `rois_img` only with the ROI values from `roi_values`.\n Parameters\n ----------\n rois_img: niimg-like\n\n roi_values: list of int or float\n The list of values from rois_img.\n\n bg_val: int or float\n The background value of `rois_img`.\n\n Returns\n -------\n subset_rois_img: nibabel.Nifti2Image", "id": "f4088:m1"} {"signature": "def largest_connected_component(volume):", "body": "volume = np.asarray(volume)labels, num_labels = scn.label(volume)if not num_labels:raise ValueError('')if num_labels == :return volume.astype(np.bool)label_count = np.bincount(labels.ravel().astype(np.int))label_count[] = return labels == label_count.argmax()", "docstring": "Return the largest connected component of a 3D array.\n\n Parameters\n -----------\n volume: numpy.array\n 3D boolean array.\n\n Returns\n --------\n volume: numpy.array\n 3D boolean array with only one connected component.", "id": "f4088:m2"} {"signature": "def large_clusters_mask(volume, min_cluster_size):", "body": "labels, num_labels = scn.label(volume)labels_to_keep = set([i for i in range(num_labels)if np.sum(labels == i) >= min_cluster_size])clusters_mask = np.zeros_like(volume, dtype=int)for l in range(num_labels):if l in labels_to_keep:clusters_mask[labels == l] = return clusters_mask", "docstring": "Return as mask for `volume` that includes only areas where\n the connected components have a size bigger than `min_cluster_size`\n in number of voxels.\n\n Parameters\n -----------\n volume: numpy.array\n 3D boolean array.\n\n min_cluster_size: int\n Minimum size in voxels that the connected component must have.\n\n Returns\n --------\n volume: numpy.array\n 3D int array with a mask excluding small connected components.", "id": "f4088:m3"} {"signature": "def create_rois_mask(roislist, filelist):", "body": "roifiles = []for roi in roislist:try:roi_file = search_list(roi, filelist)[]except Exception as exc:raise Exception(''.format(str(exc)))else:roifiles.append(roi_file)return binarise(roifiles)", "docstring": "Look for the files in filelist containing the names in roislist, these files will be opened, binarised\n and merged in one mask.\n\n Parameters\n ----------\n roislist: list of strings\n Names of the ROIs, which will have to be in the names of the files in filelist.\n\n filelist: list of strings\n List of paths to the volume files containing the ROIs.\n\n Returns\n -------\n numpy.ndarray\n Mask volume", "id": "f4088:m4"} {"signature": "def get_unique_nonzeros(arr):", "body": "rois = np.unique(arr)rois = rois[np.nonzero(rois)]rois.sort()return rois", "docstring": "Return a sorted list of the non-zero unique values of arr.\n\n Parameters\n ----------\n arr: numpy.ndarray\n The data array\n\n Returns\n -------\n list of items of arr.", "id": "f4088:m5"} {"signature": "def get_roilist_from_atlas(atlas_img):", "body": "return get_unique_nonzeros(check_img(atlas_img).get_data())", "docstring": "Extract unique values from the atlas and returns them as an ordered list.\n\nParameters\n----------\natlas_img: img-like object or str\n Volume defining different ROIs.\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\nReturns\n-------\nnp.ndarray\n An 1D array of roi values from atlas volume.\n\nNote\n----\nThe roi with value 0 will be considered background so will be removed.", "id": "f4088:m6"} {"signature": "def get_rois_centers_of_mass(vol):", "body": "from scipy.ndimage.measurements import center_of_massroisvals = np.unique(vol)roisvals = roisvals[roisvals != ]rois_centers = OrderedDict()for r in roisvals:rois_centers[r] = center_of_mass(vol, vol, r)return rois_centers", "docstring": "Get the center of mass for each ROI in the given volume.\n\n Parameters\n ----------\n vol: numpy ndarray\n Volume with different values for each ROI.\n\n Returns\n -------\n OrderedDict\n Each entry in the dict has the ROI value as key and the center_of_mass coordinate as value.", "id": "f4088:m7"} {"signature": "def partition_timeseries(image, roi_img, mask_img=None, zeroe=True, roi_values=None, outdict=False):", "body": "img = read_img(image)rois = read_img(roi_img)check_img_compatibility(img, rois, only_check_3d=True)roi_data = rois.get_data()if roi_values is not None:for rv in roi_values:if not np.any(roi_data == rv):raise ValueError(''.format(rv, repr_imgs(roi_img)))else:roi_values = get_unique_nonzeros(roi_data)if mask_img is None:mask_data = Noneelse:mask = load_mask(mask_img)check_img_compatibility(img, mask, only_check_3d=True)mask_data = mask.get_data()if outdict:extract_data = _extract_timeseries_dictelse:extract_data = _extract_timeseries_listtry:return extract_data(img.get_data(), rois.get_data(), mask_data,roi_values=roi_values, zeroe=zeroe)except:raise", "docstring": "Partition the timeseries in tsvol according to the ROIs in roivol.\n If a mask is given, will use it to exclude any voxel outside of it.\n\n The outdict indicates whether you want a dictionary for each set of timeseries keyed by the ROI value\n or a list of timeseries sets. If True and roi_img is not None will return an OrderedDict, if False\n or roi_img or roi_list is None will return a list.\n\n Background value is assumed to be 0 and won't be used here.\n\n Parameters\n ----------\n image: img-like object or str\n 4D timeseries volume\n\n roi_img: img-like object or str\n 3D volume defining different ROIs.\n\n mask_img: img-like object or str\n 3D mask volume\n\n zeroe: bool\n If true will remove the null timeseries voxels.\n\n roi_values: list of ROI values (int?)\n List of the values of the ROIs to indicate the\n order and which ROIs will be processed.\n\n outdict: bool\n If True will return an OrderedDict of timeseries sets, otherwise a list.\n\n Returns\n -------\n timeseries: list or OrderedDict\n A dict with the timeseries as items and keys as the ROIs voxel values or\n a list where each element is the timeseries set ordered by the sorted values in roi_img or by the roi_values\n argument.", "id": "f4088:m8"} {"signature": "def partition_volume(*args, **kwargs):", "body": "return partition_timeseries(*args, **kwargs)", "docstring": "Look at partition_timeseries function docstring.", "id": "f4088:m9"} {"signature": "def _partition_data(datavol, roivol, roivalue, maskvol=None, zeroe=True):", "body": "if maskvol is not None:indices = (roivol == roivalue) * (maskvol > )else:indices = roivol == roivalueif datavol.ndim == :ts = datavol[indices, :]else:ts = datavol[indices]if zeroe:if datavol.ndim == :ts = ts[ts.sum(axis=) != , :]return ts", "docstring": "Extracts the values in `datavol` that are in the ROI with value `roivalue` in `roivol`.\n The ROI can be masked by `maskvol`.\n\n Parameters\n ----------\n datavol: numpy.ndarray\n 4D timeseries volume or a 3D volume to be partitioned\n\n roivol: numpy.ndarray\n 3D ROIs volume\n\n roivalue: int or float\n A value from roivol that represents the ROI to be used for extraction.\n\n maskvol: numpy.ndarray\n 3D mask volume\n\n zeroe: bool\n If true will remove the null timeseries voxels. Only applied to timeseries (4D) data.\n\n Returns\n -------\n values: np.array\n An array of the values in the indicated ROI.\n A 2D matrix if `datavol` is 4D or a 1D vector if `datavol` is 3D.", "id": "f4088:m11"} {"signature": "def _extract_timeseries_dict(tsvol, roivol, maskvol=None, roi_values=None, zeroe=True):", "body": "_check_for_partition(tsvol, roivol, maskvol)if roi_values is None:roi_values = get_unique_nonzeros(roivol)ts_dict = OrderedDict()for r in roi_values:ts = _partition_data(tsvol, roivol, r, maskvol, zeroe)if len(ts) == :ts = np.zeros(tsvol.shape[-])ts_dict[r] = tsreturn ts_dict", "docstring": "Partition the timeseries in tsvol according to the ROIs in roivol.\n If a mask is given, will use it to exclude any voxel outside of it.\n\n Parameters\n ----------\n tsvol: numpy.ndarray\n 4D timeseries volume or a 3D volume to be partitioned\n\n roivol: numpy.ndarray\n 3D ROIs volume\n\n maskvol: numpy.ndarray\n 3D mask volume\n\n zeroe: bool\n If true will remove the null timeseries voxels.\n\n roi_values: list of ROI values (int?)\n List of the values of the ROIs to indicate the\n order and which ROIs will be processed.\n\n Returns\n -------\n ts_dict: OrderedDict\n A dict with the timeseries as items and keys as the ROIs voxel values.", "id": "f4088:m12"} {"signature": "def _extract_timeseries_list(tsvol, roivol, maskvol=None, roi_values=None, zeroe=True):", "body": "_check_for_partition(tsvol, roivol, maskvol)if roi_values is None:roi_values = get_unique_nonzeros(roivol)ts_list = []for r in roi_values:ts = _partition_data(tsvol, roivol, r, maskvol, zeroe)if len(ts) == :ts = np.zeros(tsvol.shape[-])ts_list.append(ts)return ts_list", "docstring": "Partition the timeseries in tsvol according to the ROIs in roivol.\n If a mask is given, will use it to exclude any voxel outside of it.\n\n Parameters\n ----------\n tsvol: numpy.ndarray\n 4D timeseries volume or a 3D volume to be partitioned\n\n roivol: numpy.ndarray\n 3D ROIs volume\n\n maskvol: numpy.ndarray\n 3D mask volume\n\n zeroe: bool\n If true will remove the null timeseries voxels. Only applied to timeseries (4D) data.\n\n roi_values: list of ROI values (int?)\n List of the values of the ROIs to indicate the\n order and which ROIs will be processed.\n\n Returns\n -------\n ts_list: list\n A list with the timeseries arrays as items", "id": "f4088:m13"} {"signature": "def get_3D_from_4D(image, vol_idx=):", "body": "img = check_img(image)hdr, aff = get_img_info(img)if len(img.shape) != :raise AttributeError(''.format(repr_imgs(img)))if not <= vol_idx < img.shape[]:raise IndexError(''''.format(repr_imgs(img), img.shape[], vol_idx))img_data = img.get_data()new_vol = img_data[:, :, :, vol_idx].copy()hdr.set_data_shape(hdr.get_data_shape()[:])return new_vol, hdr, aff", "docstring": "Pick one 3D volume from a 4D nifti image file\n\n Parameters\n ----------\n image: img-like object or str\n Volume defining different ROIs.\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n vol_idx: int\n Index of the 3D volume to be extracted from the 4D volume.\n\n Returns\n -------\n vol, hdr, aff\n The data array, the image header and the affine transform matrix.", "id": "f4088:m14"} {"signature": "def load_mask(image, allow_empty=True):", "body": "img = check_img(image, make_it_3d=True)values = np.unique(img.get_data())if len(values) == :if values[] == and not allow_empty:raise ValueError('')elif len(values) == :if not in values:raise ValueError(''''.format(values))elif len(values) != :raise ValueError(''''.format(values))return nib.Nifti1Image(as_ndarray(get_img_data(img), dtype=bool), img.get_affine(), img.get_header())", "docstring": "Load a Nifti mask volume.\n\n Parameters\n ----------\n image: img-like object or boyle.nifti.NeuroImage or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n allow_empty: boolean, optional\n Allow loading an empty mask (full of 0 values)\n\n Returns\n -------\n nibabel.Nifti1Image with boolean data.", "id": "f4089:m0"} {"signature": "def load_mask_data(image, allow_empty=True):", "body": "mask = load_mask(image, allow_empty=allow_empty)return get_img_data(mask), mask.get_affine()", "docstring": "Load a Nifti mask volume and return its data matrix as boolean and affine.\n\n Parameters\n ----------\n image: img-like object or boyle.nifti.NeuroImage or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n allow_empty: boolean, optional\n Allow loading an empty mask (full of 0 values)\n\n Returns\n -------\n numpy.ndarray with dtype==bool, numpy.ndarray of affine transformation", "id": "f4089:m1"} {"signature": "def binarise(image, threshold=):", "body": "img = check_img(image)return img.get_data() > threshold", "docstring": "Binarise image with the given threshold\n\n Parameters\n ----------\n image: img-like object or boyle.nifti.NeuroImage or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n threshold: float\n\n Returns\n -------\n binarised_img: numpy.ndarray\n Mask volume", "id": "f4089:m2"} {"signature": "def union_mask(filelist):", "body": "firstimg = check_img(filelist[])mask = np.zeros_like(firstimg.get_data())try:for volf in filelist:roiimg = check_img(volf)check_img_compatibility(firstimg, roiimg)mask += get_img_data(roiimg)except Exception as exc:raise ValueError(''.format(repr_imgs(firstimg), repr_imgs(volf))) from excelse:return as_ndarray(mask > , dtype=bool)", "docstring": "Creates a binarised mask with the union of the files in filelist.\n\nParameters\n----------\nfilelist: list of img-like object or boyle.nifti.NeuroImage or str\n List of paths to the volume files containing the ROIs.\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\nReturns\n-------\nndarray of bools\n Mask volume\n\nRaises\n------\nValueError", "id": "f4089:m3"} {"signature": "def apply_mask(image, mask_img):", "body": "img = check_img(image)mask = check_img(mask_img)check_img_compatibility(img, mask)vol = img.get_data()mask_data, _ = load_mask_data(mask)return vol[mask_data], mask_data", "docstring": "Read a Nifti file nii_file and a mask Nifti file.\n Returns the voxels in nii_file that are within the mask, the mask indices\n and the mask shape.\n\n Parameters\n ----------\n image: img-like object or boyle.nifti.NeuroImage or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n mask_img: img-like object or boyle.nifti.NeuroImage or str\n 3D mask array: True where a voxel should be used.\n See img description.\n\n Returns\n -------\n vol[mask_indices], mask_indices\n\n Note\n ----\n nii_file and mask_file must have the same shape.\n\n Raises\n ------\n NiftiFilesNotCompatible, ValueError", "id": "f4089:m4"} {"signature": "def apply_mask_4d(image, mask_img): ", "body": "img = check_img(image)mask = check_img(mask_img)check_img_compatibility(img, mask, only_check_3d=True)vol = get_data(img)series, mask_data = _apply_mask_to_4d_data(vol, mask)return series, mask_data", "docstring": "Read a Nifti file nii_file and a mask Nifti file.\n Extract the signals in nii_file that are within the mask, the mask indices\n and the mask shape.\n\n Parameters\n ----------\n image: img-like object or boyle.nifti.NeuroImage or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n mask_img: img-like object or boyle.nifti.NeuroImage or str\n 3D mask array: True where a voxel should be used.\n See img description.\n\n smooth_mm: float #TBD\n (optional) The size in mm of the FWHM Gaussian kernel to smooth the signal.\n If True, remove_nans is True.\n\n remove_nans: bool #TBD\n If remove_nans is True (default), the non-finite values (NaNs and\n infs) found in the images will be replaced by zeros.\n\n Returns\n -------\n session_series, mask_data\n\n session_series: numpy.ndarray\n 2D array of series with shape (voxel number, image number)\n\n Note\n ----\n nii_file and mask_file must have the same shape.\n\n Raises\n ------\n FileNotFound, NiftiFilesNotCompatible", "id": "f4089:m5"} {"signature": "def _apply_mask_to_4d_data(vol_data, mask_img):", "body": "mask_data = load_mask_data(mask_img)return vol_data[mask_data], mask_data", "docstring": "Parameters\n----------\nvol_data:\nmask_img:\n\nReturns\n-------\nmasked_data, mask_indices\n\nmasked_data: numpy.ndarray\n 2D array of series with shape (image number, voxel number)\n\nNote\n----\nvol_data and mask_file must have the same shape.", "id": "f4089:m6"} {"signature": "def vector_to_volume(arr, mask, order=''):", "body": "if mask.dtype != np.bool:raise ValueError(\"\")if arr.ndim != :raise ValueError(\"\")if arr.ndim == and any(v == for v in arr.shape):log.debug(''.format(arr.shape))arr = arr.flatten()volume = np.zeros(mask.shape[:], dtype=arr.dtype, order=order)volume[mask] = arrreturn volume", "docstring": "Transform a given vector to a volume. This is a reshape function for\n 3D flattened and maybe masked vectors.\n\n Parameters\n ----------\n arr: np.array\n 1-Dimensional array\n\n mask: numpy.ndarray\n Mask image. Must have 3 dimensions, bool dtype.\n\n Returns\n -------\n np.ndarray", "id": "f4089:m7"} {"signature": "def matrix_to_4dvolume(arr, mask, order=''):", "body": "if mask.dtype != np.bool:raise ValueError(\"\")if arr.ndim != :raise ValueError(\"\")if mask.sum() != arr.shape[]:raise ValueError(''.format(mask.sum(), arr.shape))data = np.zeros(mask.shape + (arr.shape[],), dtype=arr.dtype,order=order)data[mask, :] = arrreturn data", "docstring": "Transform a given vector to a volume. This is a reshape function for\n 4D flattened masked matrices where the second dimension of the matrix\n corresponds to the original 4th dimension.\n\n Parameters\n ----------\n arr: numpy.array\n 2D numpy.array\n\n mask: numpy.ndarray\n Mask image. Must have 3 dimensions, bool dtype.\n\n dtype: return type\n If None, will get the type from vector\n\n Returns\n -------\n data: numpy.ndarray\n Unmasked data.\n Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[1])", "id": "f4089:m8"} {"signature": "def niftilist_mask_to_array(img_filelist, mask_file=None, outdtype=None):", "body": "img = check_img(img_filelist[])if not outdtype:outdtype = img.dtypemask_data, _ = load_mask_data(mask_file)indices = np.where (mask_data)mask = check_img(mask_file)outmat = np.zeros((len(img_filelist), np.count_nonzero(mask_data)),dtype=outdtype)for i, img_item in enumerate(img_filelist):img = check_img(img_item)if not are_compatible_imgs(img, mask):raise NiftiFilesNotCompatible(repr_imgs(img), repr_imgs(mask_file))vol = get_img_data(img)outmat[i, :] = vol[indices]return outmat, mask_data", "docstring": "From the list of absolute paths to nifti files, creates a Numpy array\n with the masked data.\n\n Parameters\n ----------\n img_filelist: list of str\n List of absolute file paths to nifti files. All nifti files must have\n the same shape.\n\n mask_file: str\n Path to a Nifti mask file.\n Should be the same shape as the files in nii_filelist.\n\n outdtype: dtype\n Type of the elements of the array, if not set will obtain the dtype from\n the first nifti file.\n\n Returns\n -------\n outmat:\n Numpy array with shape N x prod(vol.shape) containing the N files as flat vectors.\n\n mask_indices:\n Tuple with the 3D spatial indices of the masking voxels, for reshaping\n with vol_shape and remapping.\n\n vol_shape:\n Tuple with shape of the volumes, for reshaping.", "id": "f4089:m9"} {"signature": "def fwhm2sigma(fwhm):", "body": "fwhm = np.asarray(fwhm)return fwhm / np.sqrt( * np.log())", "docstring": "Convert a FWHM value to sigma in a Gaussian kernel.\n\n Parameters\n ----------\n fwhm: float or numpy.array\n fwhm value or values\n\n Returns\n -------\n fwhm: float or numpy.array\n sigma values", "id": "f4090:m0"} {"signature": "def sigma2fwhm(sigma):", "body": "sigma = np.asarray(sigma)return np.sqrt( * np.log()) * sigma", "docstring": "Convert a sigma in a Gaussian kernel to a FWHM value.\n\n Parameters\n ----------\n sigma: float or numpy.array\n sigma value or values\n\n Returns\n -------\n fwhm: float or numpy.array\n fwhm values corresponding to `sigma` values", "id": "f4090:m1"} {"signature": "def smooth_volume(image, smoothmm):", "body": "return smooth_imgs(image, smoothmm)", "docstring": "See smooth_img.", "id": "f4090:m2"} {"signature": "def _smooth_data_array(arr, affine, fwhm, copy=True):", "body": "if arr.dtype.kind == '':if arr.dtype == np.int64:arr = arr.astype(np.float64)else:arr = arr.astype(np.float32)if copy:arr = arr.copy()arr[np.logical_not(np.isfinite(arr))] = try:affine = affine[:, :]fwhm_sigma_ratio = np.sqrt( * np.log())vox_size = np.sqrt(np.sum(affine ** , axis=))sigma = fwhm / (fwhm_sigma_ratio * vox_size)for n, s in enumerate(sigma):ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)except:raise ValueError('')else:return arr", "docstring": "Smooth images with a a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of arr.\n\n Parameters\n ----------\n arr: numpy.ndarray\n 3D or 4D array, with image number as last dimension.\n\n affine: numpy.ndarray\n Image affine transformation matrix for image.\n\n fwhm: scalar, numpy.ndarray\n Smoothing kernel size, as Full-Width at Half Maximum (FWHM) in millimeters.\n If a scalar is given, kernel width is identical on all three directions.\n A numpy.ndarray must have 3 elements, giving the FWHM along each axis.\n\n copy: bool\n if True, will make a copy of the input array. Otherwise will directly smooth the input array.\n\n Returns\n -------\n smooth_arr: numpy.ndarray", "id": "f4090:m3"} {"signature": "def smooth_imgs(images, fwhm):", "body": "if fwhm <= :return imagesif not isinstance(images, string_types) and hasattr(images, ''):only_one = Falseelse:only_one = Trueimages = [images]result = []for img in images:img = check_img(img)affine = img.get_affine()smooth = _smooth_data_array(img.get_data(), affine, fwhm=fwhm, copy=True)result.append(nib.Nifti1Image(smooth, affine))if only_one:return result[]else:return result", "docstring": "Smooth images using a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of each image in images.\n In all cases, non-finite values in input are zeroed.\n\n Parameters\n ----------\n imgs: str or img-like object or iterable of img-like objects\n See boyle.nifti.read.read_img\n Image(s) to smooth.\n\n fwhm: scalar or numpy.ndarray\n Smoothing kernel size, as Full-Width at Half Maximum (FWHM) in millimeters.\n If a scalar is given, kernel width is identical on all three directions.\n A numpy.ndarray must have 3 elements, giving the FWHM along each axis.\n\n Returns\n -------\n smooth_imgs: nibabel.Nifti1Image or list of.\n Smooth input image/s.", "id": "f4090:m4"} {"signature": "def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True, **kwargs):", "body": "if arr.dtype.kind == '':if arr.dtype == np.int64:arr = arr.astype(np.float64)else:arr = arr.astype(np.float32)if copy:arr = arr.copy()if ensure_finite:arr[np.logical_not(np.isfinite(arr))] = if fwhm == '':arr = _fast_smooth_array(arr)elif fwhm is not None:affine = affine[:, :]fwhm_over_sigma_ratio = np.sqrt( * np.log())vox_size = np.sqrt(np.sum(affine ** , axis=))sigma = fwhm / (fwhm_over_sigma_ratio * vox_size)for n, s in enumerate(sigma):ndimage.gaussian_filter1d(arr, s, output=arr, axis=n, **kwargs)return arr", "docstring": "Smooth images by applying a Gaussian filter.\n Apply a Gaussian filter along the three first dimensions of arr.\n\n This is copied and slightly modified from nilearn:\n https://github.com/nilearn/nilearn/blob/master/nilearn/image/image.py\n Added the **kwargs argument.\n\n Parameters\n ==========\n arr: numpy.ndarray\n 4D array, with image number as last dimension. 3D arrays are also\n accepted.\n affine: numpy.ndarray\n (4, 4) matrix, giving affine transformation for image. (3, 3) matrices\n are also accepted (only these coefficients are used).\n If fwhm='fast', the affine is not used and can be None\n fwhm: scalar, numpy.ndarray, 'fast' or None\n Smoothing strength, as a full-width at half maximum, in millimeters.\n If a scalar is given, width is identical on all three directions.\n A numpy.ndarray must have 3 elements, giving the FWHM along each axis.\n If fwhm == 'fast', a fast smoothing will be performed with\n a filter [0.2, 1, 0.2] in each direction and a normalisation\n to preserve the local average value.\n If fwhm is None, no filtering is performed (useful when just removal\n of non-finite values is needed).\n ensure_finite: bool\n if True, replace every non-finite values (like NaNs) by zero before\n filtering.\n copy: bool\n if True, input array is not modified. False by default: the filtering\n is performed in-place.\n kwargs: keyword-arguments\n Arguments for the ndimage.gaussian_filter1d function.\n\n Returns\n =======\n filtered_arr: numpy.ndarray\n arr, filtered.\n Notes\n =====\n This function is most efficient with arr in C order.", "id": "f4090:m5"} {"signature": "def smooth_img(imgs, fwhm, **kwargs):", "body": "if hasattr(imgs, \"\")and not isinstance(imgs, string_types):single_img = Falseelse:single_img = Trueimgs = [imgs]ret = []for img in imgs:img = check_niimg(img)affine = img.get_affine()filtered = _smooth_array(img.get_data(), affine, fwhm=fwhm,ensure_finite=True, copy=True, **kwargs)ret.append(new_img_like(img, filtered, affine, copy_header=True))if single_img:return ret[]else:return ret", "docstring": "Smooth images by applying a Gaussian filter.\n Apply a Gaussian filter along the three first dimensions of arr.\n In all cases, non-finite values in input image are replaced by zeros.\n\n This is copied and slightly modified from nilearn:\n https://github.com/nilearn/nilearn/blob/master/nilearn/image/image.py\n Added the **kwargs argument.\n\n Parameters\n ==========\n imgs: Niimg-like object or iterable of Niimg-like objects\n See http://nilearn.github.io/manipulating_images/manipulating_images.html#niimg.\n Image(s) to smooth.\n fwhm: scalar, numpy.ndarray, 'fast' or None\n Smoothing strength, as a Full-Width at Half Maximum, in millimeters.\n If a scalar is given, width is identical on all three directions.\n A numpy.ndarray must have 3 elements, giving the FWHM along each axis.\n If fwhm == 'fast', a fast smoothing will be performed with\n a filter [0.2, 1, 0.2] in each direction and a normalisation\n to preserve the scale.\n If fwhm is None, no filtering is performed (useful when just removal\n of non-finite values is needed)\n Returns\n =======\n filtered_img: nibabel.Nifti1Image or list of.\n Input image, filtered. If imgs is an iterable, then filtered_img is a\n list.", "id": "f4090:m6"} {"signature": "def is_img(obj):", "body": "try:get_data = getattr(obj, '')get_affine = getattr(obj, '')return isinstance(get_data, collections.Callable) andisinstance(get_affine, collections.Callable)except AttributeError:return False", "docstring": "Check for get_data and get_affine method in an object\n\n Parameters\n ----------\n obj: any object\n Tested object\n\n Returns\n -------\n is_img: boolean\n True if get_data and get_affine methods are present and callable,\n False otherwise.", "id": "f4091:m0"} {"signature": "def get_data(img):", "body": "if hasattr(img, '') and img._data_cache is None:img = copy.deepcopy(img)gc.collect()return img.get_data()", "docstring": "Get the data in the image without having a side effect on the Nifti1Image object\n\n Parameters\n ----------\n img: Nifti1Image\n\n Returns\n -------\n np.ndarray", "id": "f4091:m1"} {"signature": "def get_shape(img):", "body": "if hasattr(img, ''):shape = img.shapeelse:shape = img.get_data().shapereturn shape", "docstring": "Return the shape of img.\n\n Paramerers\n -----------\n img:\n\n Returns\n -------\n shape: tuple", "id": "f4091:m2"} {"signature": "def is_valid_coordinate(img, i, j, k):", "body": "imgx, imgy, imgz = get_shape(img)return (i >= and i < imgx) and(j >= and j < imgy) and(k >= and k < imgz)", "docstring": "Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries.\n\n Parameters\n ----------\n @param img:\n @param i:\n @param j:\n @param k:\n\n Returns\n -------\n bool", "id": "f4091:m3"} {"signature": "def are_compatible_imgs(one_img, another_img):", "body": "try:check_img_compatibility(one_img, another_img)except :return Falseelse:return True", "docstring": "Return true if one_img and another_img have the same shape.\n False otherwise.\n If both are nibabel.Nifti1Image will also check for affine matrices.\n\n Parameters\n ----------\n one_img: nibabel.Nifti1Image or np.ndarray\n\n another_img: nibabel.Nifti1Image or np.ndarray\n\n Returns\n -------\n bool", "id": "f4091:m4"} {"signature": "def check_img_compatibility(one_img, another_img, only_check_3d=False):", "body": "nd_to_check = Noneif only_check_3d:nd_to_check = if hasattr(one_img, '') and hasattr(another_img, ''):if not have_same_shape(one_img, another_img, nd_to_check=nd_to_check):msg = ''.format(one_img.shape,another_img.shape)raise NiftiFilesNotCompatible(repr_imgs(one_img), repr_imgs(another_img), message=msg)if hasattr(one_img, '') and hasattr(another_img, ''):if not have_same_affine(one_img, another_img, only_check_3d=only_check_3d):msg = ''''.format(one_img.get_affine(), another_img.get_affine())raise NiftiFilesNotCompatible(repr_imgs(one_img), repr_imgs(another_img), message=msg)", "docstring": "Return true if one_img and another_img have the same shape.\n False otherwise.\n If both are nibabel.Nifti1Image will also check for affine matrices.\n\n Parameters\n ----------\n one_img: nibabel.Nifti1Image or np.ndarray\n\n another_img: nibabel.Nifti1Image or np.ndarray\n\n only_check_3d: bool\n If True will check only the 3D part of the affine matrices when they have more dimensions.\n\n Raises\n ------\n NiftiFilesNotCompatible", "id": "f4091:m5"} {"signature": "def have_same_affine(one_img, another_img, only_check_3d=False):", "body": "img1 = check_img(one_img)img2 = check_img(another_img)ndim1 = len(img1.shape)ndim2 = len(img2.shape)if ndim1 < :raise ValueError(''.format(repr_imgs(img1), ndim1))if ndim2 < :raise ValueError(''.format(repr_imgs(img2), ndim1))affine1 = img1.get_affine()affine2 = img2.get_affine()if only_check_3d:affine1 = affine1[:, :]affine2 = affine2[:, :]try:return np.allclose(affine1, affine2)except ValueError:return Falseexcept:raise", "docstring": "Return True if the affine matrix of one_img is close to the affine matrix of another_img.\n False otherwise.\n\n Parameters\n ----------\n one_img: nibabel.Nifti1Image\n\n another_img: nibabel.Nifti1Image\n\n only_check_3d: bool\n If True will extract only the 3D part of the affine matrices when they have more dimensions.\n\n Returns\n -------\n bool\n\n Raises\n ------\n ValueError", "id": "f4091:m6"} {"signature": "def _make_it_3d(img):", "body": "shape = get_shape(img)if len(shape) == :return imgelif (len(shape) == and shape[] == ):try:data = get_data(img)affine = img.get_affine()img = nib.Nifti1Image(data[:, :, :, ], affine)except Exception as exc:raise Exception(\"\".format(img)) from excelse:return imgelse:raise TypeError(\"\".format(shape))", "docstring": "Enforce that img is a 3D img-like object, if it is not, raise a TypeError.\n i.e., remove dimensions of size 1.\n\n Parameters\n ----------\n img: img-like object\n\n Returns\n -------\n 3D img-like object", "id": "f4091:m7"} {"signature": "def check_img(image, make_it_3d=False):", "body": "if isinstance(image, string_types):if not op.exists(image):raise FileNotFound(image)try:img = nib.load(image)if make_it_3d:img = _make_it_3d(img)except Exception as exc:raise Exception(''.format(image)) from excelse:return imgelif isinstance(image, nib.Nifti1Image) or is_img(image):return imageelse:raise TypeError(''''''.format(type(image)))", "docstring": "Check that image is a proper img. Turn filenames into objects.\n\n Parameters\n ----------\n image: img-like object or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n make_it_3d: boolean, optional\n If True, check if the image is a 3D image and raise an error if not.\n\n Returns\n -------\n result: nifti-like\n result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed\n that the returned object has get_data() and get_affine() methods.", "id": "f4091:m8"} {"signature": "def repr_imgs(imgs):", "body": "if isinstance(imgs, string_types):return imgsif isinstance(imgs, collections.Iterable):return ''.format(''.join(repr_imgs(img) for img in imgs))try:filename = imgs.get_filename()if filename is not None:img_str = \"\".format(imgs.__class__.__name__, filename)else:img_str = \"\".format(imgs.__class__.__name__,repr(get_shape(imgs)),repr(imgs.get_affine()))except Exception as exc:log.error('')return repr(imgs)else:return img_str", "docstring": "Printing of img or imgs", "id": "f4091:m9"} {"signature": "def repr_img(img):", "body": "return repr_imgs(img)", "docstring": "Printing of img or imgs. See repr_imgs.", "id": "f4091:m10"} {"signature": "def have_same_shape(array1, array2, nd_to_check=None):", "body": "shape1 = array1.shapeshape2 = array2.shapeif nd_to_check is not None:if len(shape1) < nd_to_check:msg = ''.format(shape1)raise ValueError(msg)elif len(shape2) < nd_to_check:msg = ''.format(shape2)raise ValueError(msg)shape1 = shape1[:nd_to_check]shape2 = shape2[:nd_to_check]return shape1 == shape2", "docstring": "Returns true if array1 and array2 have the same shapes, false\notherwise.\n\nParameters\n----------\narray1: numpy.ndarray\n\narray2: numpy.ndarray\n\nnd_to_check: int\n Number of the dimensions to check, i.e., if == 3 then will check only the 3 first numbers of array.shape.\nReturns\n-------\nbool", "id": "f4091:m11"} {"signature": "def have_same_geometry(fname1, fname2):", "body": "img1shape = nib.load(fname1).get_shape()img2shape = nib.load(fname2).get_shape()return have_same_shape(img1shape, img2shape)", "docstring": "@param fname1: string\nFile path of an image\n\n@param fname2: string\nFile path of an image\n\n@return: bool\nTrue if both have the same geometry", "id": "f4091:m12"} {"signature": "def have_same_spatial_geometry(fname1, fname2):", "body": "img1shape = nib.load(fname1).get_shape()img2shape = nib.load(fname2).get_shape()return img1shape[:] == img2shape[:]", "docstring": "@param fname1: string\nFile path of an image\n\n@param fname2: string\nFile path of an image\n\n@return: bool\nTrue if both have the same geometry", "id": "f4091:m13"} {"signature": "def check_have_same_geometry(fname1, fname2):", "body": "if not have_same_geometry(fname1, fname2):raise ArithmeticError('' + fname1 + '' + fname2)", "docstring": "@param fname1:\n@param fname2:", "id": "f4091:m14"} {"signature": "def check_have_same_spatial_geometry(fname1, fname2):", "body": "if not have_same_spatial_geometry(fname1, fname2):raise ArithmeticError('' + fname1 + '' + fname2)", "docstring": "@param fname1:\n@param fname2:", "id": "f4091:m15"} {"signature": "def get_sampling_interval(func_img):", "body": "return func_img.get_header().get_zooms()[-]", "docstring": "Extracts the supposed sampling interval (TR) from the nifti file header.\n\n@param func_img: a NiBabel SpatialImage\n\n@return: float\nThe TR value from the image header", "id": "f4091:m16"} {"signature": "def xfm_atlas_to_functional(atlas_filepath, anatbrain_filepath, meanfunc_filepath,atlas2anat_nonlin_xfm_filepath, is_atlas2anat_inverted,anat2func_lin_xfm_filepath,atlasinanat_out_filepath, atlasinfunc_out_filepath,interp='', rewrite=True, parallel=False):", "body": "if is_atlas2anat_inverted:anat_to_mni_nl_inv = atlas2anat_nonlin_xfm_filepathelse:output_dir = op.abspath (op.dirname(atlasinanat_out_filepath))ext = get_extension(atlas2anat_nonlin_xfm_filepath)anat_to_mni_nl_inv = op.join(output_dir, remove_ext(op.basename(atlas2anat_nonlin_xfm_filepath)) + '' + ext)invwarp_cmd = op.join('', '', '')applywarp_cmd = op.join('', '', '')fslsub_cmd = op.join('', '', '')if parallel:invwarp_cmd = fslsub_cmd + '' + invwarp_cmdapplywarp_cmd = fslsub_cmd + '' + applywarp_cmdif rewrite or (not is_atlas2anat_inverted and not op.exists(anat_to_mni_nl_inv)):log.debug(''.format(anat_to_mni_nl_inv))cmd = invwarp_cmd + ''cmd += ''.format(atlas2anat_nonlin_xfm_filepath)cmd += ''.format(anat_to_mni_nl_inv)cmd += ''.format(anatbrain_filepath)log.debug(''.format(cmd))check_call(cmd)if rewrite or not op.exists(atlasinanat_out_filepath):log.debug(''.format(atlasinanat_out_filepath))cmd = applywarp_cmd + ''cmd += ''.format(atlas_filepath)cmd += ''.format(anatbrain_filepath)cmd += ''.format(anat_to_mni_nl_inv)cmd += ''.format(interp)cmd += ''.format(atlasinanat_out_filepath)log.debug(''.format(cmd))check_call(cmd)if rewrite or not op.exists(atlasinfunc_out_filepath):log.debug(''.format(atlasinfunc_out_filepath))cmd = applywarp_cmd + ''cmd += ''.format(atlasinanat_out_filepath)cmd += ''.format(meanfunc_filepath)cmd += ''.format(anat2func_lin_xfm_filepath)cmd += ''.format(interp)cmd += ''.format(atlasinfunc_out_filepath)log.debug(''.format(cmd))check_call(cmd)", "docstring": "Call FSL tools to apply transformations to a given atlas to a functional image.\n Given the transformation matrices.\n\n Parameters\n ----------\n atlas_filepath: str\n Path to the 3D atlas volume file.\n\n anatbrain_filepath: str\n Path to the anatomical brain volume file (skull-stripped and registered to the same space as the atlas,\n e.g., MNI).\n\n meanfunc_filepath: str\n Path to the average functional image to be used as reference in the last applywarp step.\n\n atlas2anat_nonlin_xfm_filepath: str\n Path to the atlas to anatomical brain linear transformation .mat file.\n If you have the inverse transformation, i.e., anatomical brain to atlas, set is_atlas2anat_inverted to True.\n\n is_atlas2anat_inverted: bool\n If False will have to calculate the inverse atlas2anat transformation to apply the transformations.\n This step will be performed with FSL invwarp.\n\n anat2func_lin_xfm_filepath: str\n Path to the anatomical to functional .mat linear transformation file.\n\n atlasinanat_out_filepath: str\n Path to output file which will contain the 3D atlas in the subject anatomical space.\n\n atlasinfunc_out_filepath: str\n Path to output file which will contain the 3D atlas in the subject functional space.\n\n verbose: bool\n If verbose will show DEBUG log info.\n\n rewrite: bool\n If True will re-run all the commands overwriting any existing file. Otherwise will check if\n each file exists and if it does won't run the command.\n\n parallel: bool\n If True will launch the commands using ${FSLDIR}/fsl_sub to use the cluster infrastructure you have setup\n with FSL (SGE or HTCondor).", "id": "f4092:m0"} {"signature": "def merge_images(images, axis=''):", "body": "if not images:return Noneaxis_dim = {'': ,'': ,'': ,'': ,}if axis not in axis_dim:raise ValueError(''.format(set(axis_dim.keys()), axis))img1 = images[]for img in images:check_img_compatibility(img1, img)image_data = []for img in images:image_data.append(check_img(img).get_data())work_axis = axis_dim[axis]ndim = image_data[].ndimif ndim - < work_axis:image_data = [np.expand_dims(img, axis=work_axis) for img in image_data]return np.concatenate(image_data, axis=work_axis)", "docstring": "Concatenate `images` in the direction determined in `axis`.\n\n Parameters\n ----------\n images: list of str or img-like object.\n See NeuroImage constructor docstring.\n\n axis: str\n 't' : concatenate images in time\n 'x' : concatenate images in the x direction\n 'y' : concatenate images in the y direction\n 'z' : concatenate images in the z direction\n\n Returns\n -------\n merged: img-like object", "id": "f4093:m0"} {"signature": "def nifti_out(f):", "body": "@wraps(f)def wrapped(*args, **kwargs):r = f(*args, **kwargs)img = read_img(args[])return nib.Nifti1Image(r, affine=img.get_affine(), header=img.header)return wrapped", "docstring": "Picks a function whose first argument is an `img`, processes its\n data and returns a numpy array. This decorator wraps this numpy array\n into a nibabel.Nifti1Image.", "id": "f4093:m1"} {"signature": "@nifti_outdef thr_img(img, thr=, mode=''):", "body": "vol = read_img(img).get_data()if mode == '':mask = vol > threlif mode == '' or mode == '':mask = np.abs(vol) > threlif mode == '':mask = vol < -threlse:raise ValueError(\"\"\"\".format(mode))return vol * mask", "docstring": "Use the given magic function name `func` to threshold with value `thr`\n the data of `img` and return a new nibabel.Nifti1Image.\n Parameters\n ----------\n img: img-like\n\n thr: float or int\n The threshold value.\n\n mode: str\n Choices: '+' for positive threshold,\n '+-' for positive and negative threshold and\n '-' for negative threshold.\n Returns\n -------\n thr_img: nibabel.Nifti1Image\n Thresholded image", "id": "f4093:m2"} {"signature": "@nifti_outdef bin_img(img):", "body": "return read_img(img).get_data() > ", "docstring": "Return an image with the positive voxels of the data of `img`.", "id": "f4093:m3"} {"signature": "@nifti_outdef positive_img(img):", "body": "bool_img = read_img(img).get_data() > return bool_img.astype(int)", "docstring": "Return an image with the positive voxels of the data of `img`.", "id": "f4093:m4"} {"signature": "@nifti_outdef negative_img(img):", "body": "bool_img = read_img(img).get_data() < return bool_img.astype(int)", "docstring": "Return an image with the negative voxels of the data of `img`.", "id": "f4093:m5"} {"signature": "@nifti_outdef div_img(img1, div2):", "body": "if is_img(div2):return img1.get_data()/div2.get_data()elif isinstance(div2, (float, int)):return img1.get_data()/div2else:raise NotImplementedError(''''.format(type(img1),img1,type(div2),div2))", "docstring": "Pixelwise division or divide by a number", "id": "f4093:m8"} {"signature": "@nifti_outdef apply_mask(img, mask):", "body": "from .mask import apply_maskvol, _ = apply_mask(img, mask)return vector_to_volume(vol, read_img(mask).get_data().astype(bool))", "docstring": "Return the image with the given `mask` applied.", "id": "f4093:m9"} {"signature": "@nifti_outdef abs_img(img):", "body": "bool_img = np.abs(read_img(img).get_data())return bool_img.astype(int)", "docstring": "Return an image with the binarised version of the data of `img`.", "id": "f4093:m10"} {"signature": "@nifti_outdef icc_img_to_zscore(icc, center_image=False):", "body": "vol = read_img(icc).get_data()v2 = vol[vol != ]if center_image:v2 = detrend(v2, axis=)vstd = np.linalg.norm(v2, ord=) / np.sqrt(np.prod(v2.shape) - )eps = np.finfo(vstd.dtype).epsvol /= (eps + vstd)return vol", "docstring": "Return a z-scored version of `icc`.\n This function is based on GIFT `icatb_convertImageToZScores` function.", "id": "f4093:m11"} {"signature": "@nifti_outdef spatial_map(icc, thr, mode=''):", "body": "return thr_img(icc_img_to_zscore(icc), thr=thr, mode=mode).get_data()", "docstring": "Return the thresholded z-scored `icc`.", "id": "f4093:m12"} {"signature": "def filter_icc(icc, mask=None, thr=, zscore=True, mode=\"\"):", "body": "if zscore:icc_filt = thr_img(icc_img_to_zscore(icc), thr=thr, mode=mode)else:icc_filt = thr_img(icc, thr=thr, mode=mode)if mask is not None:icc_filt = apply_mask(icc_filt, mask)return icc_filt", "docstring": "Threshold then mask an IC correlation map.\n Parameters\n ----------\n icc: img-like\n The 'raw' ICC map.\n\n mask: img-like\n If not None. Will apply this masks in the end of the process.\n\n thr: float\n The threshold value.\n\n zscore: bool\n If True will calculate the z-score of the ICC before thresholding.\n\n mode: str\n Choices: '+' for positive threshold,\n '+-' for positive and negative threshold and\n '-' for negative threshold.\n\n Returns\n -------\n icc_filt: nibabel.NiftiImage\n Thresholded and masked ICC.", "id": "f4093:m13"} {"signature": "def read_img(img_file):", "body": "return check_img(img_file)", "docstring": "Return a representation of the image, either a nibabel.Nifti1Image or the same object as img_file.\n See boyle.nifti.check_img.\n\n Parameters\n ----------\n img: img-like object or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n Returns\n -------\n img", "id": "f4094:m0"} {"signature": "def get_nii_info(img_file):", "body": "warnings.warn(\"\",DeprecationWarning)return get_img_info(img_file)", "docstring": "See get_img_info", "id": "f4094:m1"} {"signature": "def get_nii_data(nii_file):", "body": "warnings.warn(\"\",DeprecationWarning)return get_img_data(nii_file)", "docstring": "See get_img_data", "id": "f4094:m2"} {"signature": "def get_img_info(image):", "body": "try:img = check_img(image)except Exception as exc:raise Exception(''.format(repr_imgs(image))) from excelse:return img.get_header(), img.get_affine()", "docstring": "Return the header and affine matrix from a Nifti file.\n\n Parameters\n ----------\n image: img-like object or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n Returns\n -------\n hdr, aff", "id": "f4094:m3"} {"signature": "def get_img_data(image, copy=True):", "body": "try:img = check_img(image)if copy:return get_data(img)else:return img.get_data()except Exception as exc:raise Exception(''.format(repr_imgs(image))) from exc", "docstring": "Return the voxel matrix of the Nifti file.\n If safe_mode will make a copy of the img before returning the data, so the input image is not modified.\n\n Parameters\n ----------\n image: img-like object or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n copy: bool\n If safe_mode will make a copy of the img before returning the data, so the input image is not modified.\n\n Returns\n -------\n array_like", "id": "f4094:m4"} {"signature": "def load_nipy_img(nii_file):", "body": "import nipyif not os.path.exists(nii_file):raise FileNotFound(nii_file)try:return nipy.load_image(nii_file)except Exception as exc:raise Exception(''.format(repr_imgs(nii_file))) from exc", "docstring": "Read a Nifti file and return as nipy.Image\n\n Parameters\n ----------\n param nii_file: str\n Nifti file path\n\n Returns\n -------\n nipy.Image", "id": "f4094:m5"} {"signature": "def niftilist_to_array(img_filelist, outdtype=None):", "body": "try:first_img = img_filelist[]vol = get_img_data(first_img)except IndexError as ie:raise Exception(''.format(repr_imgs(img_filelist[]))) from ieif not outdtype:outdtype = vol.dtypeoutmat = np.zeros((len(img_filelist), np.prod(vol.shape)), dtype=outdtype)try:for i, img_file in enumerate(img_filelist):vol = get_img_data(img_file)outmat[i, :] = vol.flatten()except Exception as exc:raise Exception(''.format(img_file)) from excreturn outmat, vol.shape", "docstring": "From the list of absolute paths to nifti files, creates a Numpy array\nwith the data.\n\nParameters\n----------\nimg_filelist: list of str\n List of absolute file paths to nifti files. All nifti files must have\n the same shape.\n\noutdtype: dtype\n Type of the elements of the array, if not set will obtain the dtype from\n the first nifti file.\n\nReturns\n-------\noutmat: Numpy array with shape N x prod(vol.shape)\n containing the N files as flat vectors.\n\nvol_shape: Tuple with shape of the volumes, for reshaping.", "id": "f4094:m6"} {"signature": "def _crop_img_to(image, slices, copy=True):", "body": "img = check_img(image)data = img.get_data()affine = img.get_affine()cropped_data = data[slices]if copy:cropped_data = cropped_data.copy()linear_part = affine[:, :]old_origin = affine[:, ]new_origin_voxel = np.array([s.start for s in slices])new_origin = old_origin + linear_part.dot(new_origin_voxel)new_affine = np.eye()new_affine[:, :] = linear_partnew_affine[:, ] = new_originnew_img = nib.Nifti1Image(cropped_data, new_affine)return new_img", "docstring": "Crops image to a smaller size\n\n Crop img to size indicated by slices and modify the affine accordingly.\n\n Parameters\n ----------\n image: img-like object or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n Image to be cropped.\n\n slices: list of slices\n Defines the range of the crop.\n E.g. [slice(20, 200), slice(40, 150), slice(0, 100)]\n defines a 3D cube\n\n If slices has less entries than image has dimensions,\n the slices will be applied to the first len(slices) dimensions.\n\n copy: boolean\n Specifies whether cropped data is to be copied or not.\n Default: True\n\n Returns\n -------\n cropped_img: img-like object\n Cropped version of the input image", "id": "f4094:m7"} {"signature": "def crop_img(image, rtol=, copy=True):", "body": "img = check_img(image)data = img.get_data()infinity_norm = max(-data.min(), data.max())passes_threshold = np.logical_or(data < -rtol * infinity_norm,data > rtol * infinity_norm)if data.ndim == :passes_threshold = np.any(passes_threshold, axis=-)coords = np.array(np.where(passes_threshold))start = coords.min(axis=)end = coords.max(axis=) + start = np.maximum(start - , )end = np.minimum(end + , data.shape[:])slices = [slice(s, e) for s, e in zip(start, end)]return _crop_img_to(img, slices, copy=copy)", "docstring": "Crops img as much as possible\n\n Will crop img, removing as many zero entries as possible\n without touching non-zero entries. Will leave one voxel of\n zero padding around the obtained non-zero area in order to\n avoid sampling issues later on.\n\n Parameters\n ----------\n image: img-like object or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n Image to be cropped.\n\n rtol: float\n relative tolerance (with respect to maximal absolute\n value of the image), under which values are considered\n negligeable and thus croppable.\n\n copy: boolean\n Specifies whether cropped data is copied or not.\n\n Returns\n -------\n cropped_img: image\n Cropped version of the input image", "id": "f4094:m8"} {"signature": "def new_img_like(ref_niimg, data, affine=None, copy_header=False):", "body": "if not (hasattr(ref_niimg, '')and hasattr(ref_niimg,'')):if isinstance(ref_niimg, _basestring):ref_niimg = nib.load(ref_niimg)elif operator.isSequenceType(ref_niimg):ref_niimg = nib.load(ref_niimg[])else:raise TypeError(('''') % ref_niimg )if affine is None:affine = ref_niimg.get_affine()if data.dtype == bool:default_dtype = np.int8if (LooseVersion(nib.__version__) >= LooseVersion('') andisinstance(ref_niimg, nib.freesurfer.mghformat.MGHImage)):default_dtype = np.uint8data = as_ndarray(data, dtype=default_dtype)header = Noneif copy_header:header = copy.copy(ref_niimg.get_header())header[''] = header[''] = header[''] = header[''] = np.max(data) if data.size > else header[''] = np.min(data) if data.size > else return ref_niimg.__class__(data, affine, header=header)", "docstring": "Create a new image of the same class as the reference image\n\n Parameters\n ----------\n ref_niimg: image\n Reference image. The new image will be of the same type.\n\n data: numpy array\n Data to be stored in the image\n\n affine: 4x4 numpy array, optional\n Transformation matrix\n\n copy_header: boolean, optional\n Indicated if the header of the reference image should be used to\n create the new image\n\n Returns\n -------\n new_img: image\n A loaded image with the same type (and header) as the reference image.", "id": "f4094:m9"} {"signature": "def check_mhd_img(image, make_it_3d=False):", "body": "if isinstance(image, string_types):if not op.exists(image):raise FileNotFound(image)ext = get_extension(image).lower()if not '' in ext:warnings.warn(''.format(image))img, hdr = load_raw_data_with_mhd(image)if make_it_3d:img = _make_it_3d(img)return imgelif is_img(image):return imageelse:raise TypeError(''''''.format(type(image)))", "docstring": "Check that image is a proper img. Turn filenames into objects.\n\n Parameters\n ----------\n image: img-like object or str\n Can either be:\n - a file path to a .mhd file. (if it is a .raw file, this won't work).\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to .mhd image and\n call load_raw_data_with_mhd on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n make_it_3d: boolean, optional\n If True, check if the image is a 3D image and raise an error if not.\n\n Returns\n -------\n result: nifti-like\n result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed\n that the returned object has get_data() and get_affine() methods.", "id": "f4096:m0"} {"signature": "def _make_it_3d(img):", "body": "shape = img.shapeif len(shape) == :return imgelif len(shape) == and shape[] == :return img[:, :, :, ]else:raise TypeError(''.format(shape))", "docstring": "Enforce that img is a 3D img-like object, if it is not, raise a TypeError.\n i.e., remove dimensions of size 1.\n\n Parameters\n ----------\n img: numpy.ndarray\n Image data array\n\n Returns\n -------\n 3D numpy ndarray object", "id": "f4096:m1"} {"signature": "def write_meta_header(filename, meta_dict):", "body": "header = ''for tag in MHD_TAGS:if tag in meta_dict.keys():header += ''.format(tag, meta_dict[tag])with open(filename, '') as f:f.write(header)", "docstring": "Write the content of the `meta_dict` into `filename`.\n\n Parameters\n ----------\n filename: str\n Path to the output file\n\n meta_dict: dict\n Dictionary with the fields of the metadata .mhd file", "id": "f4098:m0"} {"signature": "def dump_raw_data(filename, data):", "body": "if data.ndim == :data = data.reshape([data.shape[], data.shape[]*data.shape[]])a = array.array('')for o in data:a.fromlist(list(o.flatten()))with open(filename, '') as rawf:a.tofile(rawf)", "docstring": "Write the data into a raw format file. Big endian is always used.\n\n Parameters\n ----------\n filename: str\n Path to the output file\n\n data: numpy.ndarray\n n-dimensional image data array.", "id": "f4098:m1"} {"signature": "def write_mhd_file(filename, data, shape=None, meta_dict=None):", "body": "ext = get_extension(filename)fname = op.basename(filename)if ext != '' or ext != '':mhd_filename = fname + ''raw_filename = fname + ''elif ext == '':mhd_filename = fnameraw_filename = remove_ext(fname) + ''elif ext == '':mhd_filename = remove_ext(fname) + ''raw_filename = fnameelse:raise ValueError(''''.format(ext, filename))if meta_dict is None:meta_dict = {}if shape is None:shape = data.shapemeta_dict[''] = meta_dict.get('', '')meta_dict[''] = meta_dict.get('', '' )meta_dict[''] = meta_dict.get('', '')meta_dict[''] = meta_dict.get('', NUMPY_TO_MHD_TYPE[data.dtype.type])meta_dict[''] = meta_dict.get('', str(len(shape)))meta_dict[''] = meta_dict.get('', ''.join([str(i) for i in shape]))meta_dict[''] = meta_dict.get('', raw_filename)mhd_filename = op.join(op.dirname(filename), mhd_filename)raw_filename = op.join(op.dirname(filename), raw_filename)write_meta_header(mhd_filename, meta_dict)dump_raw_data(raw_filename, data)return mhd_filename, raw_filename", "docstring": "Write the `data` and `meta_dict` in two files with names\n that use `filename` as a prefix.\n\n Parameters\n ----------\n filename: str\n Path to the output file.\n This is going to be used as a preffix.\n Two files will be created, one with a '.mhd' extension\n and another with '.raw'. If `filename` has any of these already\n they will be taken into account to build the filenames.\n\n data: numpy.ndarray\n n-dimensional image data array.\n\n shape: tuple\n Tuple describing the shape of `data`\n Default: data.shape\n\n meta_dict: dict\n Dictionary with the fields of the metadata .mhd file\n Default: {}\n\n Returns\n -------\n mhd_filename: str\n Path to the .mhd file\n\n raw_filename: str\n Path to the .raw file", "id": "f4098:m2"} {"signature": "def copy_mhd_and_raw(src, dst):", "body": "if not op.exists(src):raise IOError(''.format(src))ext = get_extension(src)if ext != '':msg = ''.format(src)raise ValueError(msg)meta_src = _read_meta_header(src)src_raw = meta_src['']if not op.isabs(src_raw):src_raw = op.join(op.dirname(src), src_raw)if op.isdir(dst):shutil.copyfile(src, dst)shutil.copyfile(src_raw, dst)return dstdst_raw = op.join(op.dirname(dst), remove_ext(op.basename(dst))) + ''if get_extension(dst) != '':dst += ''log.debug(''.format(src, dst))log.debug(''.format(src_raw, dst_raw))shutil.copyfile(src, dst)shutil.copyfile(src_raw, dst_raw)if op.basename(dst) != op.basename(src):log.debug(''.format(dst, src_raw,op.basename(dst_raw)))meta_dst = _read_meta_header(dst)meta_dst[''] = op.basename(dst_raw)write_meta_header(dst, meta_dst)return dst", "docstring": "Copy .mhd and .raw files to dst.\n\n If dst is a folder, won't change the file, but if dst is another filepath,\n will modify the ElementDataFile field in the .mhd to point to the\n new renamed .raw file.\n\n Parameters\n ----------\n src: str\n Path to the .mhd file to be copied\n\n dst: str\n Path to the destination of the .mhd and .raw files.\n If a new file name is given, the extension will be ignored.\n\n Returns\n -------\n dst: str", "id": "f4098:m3"} {"signature": "def _read_meta_header(filename):", "body": "fileIN = open(filename, '')line = fileIN.readline()meta_dict = {}tag_flag = [False]*len(MHD_TAGS)while line:tags = str.split(line, '')for i in range(len(MHD_TAGS)):tag = MHD_TAGS[i]if (str.strip(tags[]) == tag) and (not tag_flag[i]):meta_dict[tag] = str.strip(tags[])tag_flag[i] = Trueline = fileIN.readline()fileIN.close()return meta_dict", "docstring": "Return a dictionary of meta data from meta header file.\n\n Parameters\n ----------\n filename: str\n Path to a .mhd file\n\n Returns\n -------\n meta_dict: dict\n A dictionary with the .mhd header content.", "id": "f4099:m0"} {"signature": "def load_raw_data_with_mhd(filename):", "body": "meta_dict = _read_meta_header(filename)dim = int(meta_dict[''])assert (meta_dict[''] in MHD_TO_NUMPY_TYPE)arr = [int(i) for i in meta_dict[''].split()]volume = reduce(lambda x, y: x*y, arr[:dim-], )pwd = op.dirname(filename)raw_file = meta_dict['']data_file = op.join(pwd, raw_file)ndtype = MHD_TO_NUMPY_TYPE[meta_dict['']]arrtype = NDARRAY_TO_ARRAY_TYPE[ndtype]with open(data_file, '') as fid:binvalues = array.array(arrtype)binvalues.fromfile(fid, volume*arr[dim-])data = np.array (binvalues, ndtype)data = np.reshape(data, (arr[dim-], volume))if dim >= :dimensions = [int(i) for i in meta_dict[''].split()]data = data.reshape(dimensions)return data, meta_dict", "docstring": "Return a dictionary of meta data from meta header file.\n\n Parameters\n ----------\n filename: str\n Path to a .mhd file\n\n Returns\n -------\n data: numpy.ndarray\n n-dimensional image data array.\n\n meta_dict: dict\n A dictionary with the .mhd header content.", "id": "f4099:m1"} {"signature": "def get_3D_from_4D(filename, vol_idx=):", "body": "def remove_4th_element_from_hdr_string(hdr, fieldname):if fieldname in hdr:hdr[fieldname] = ''.join(hdr[fieldname].split()[:])vol, hdr = load_raw_data_with_mhd(filename)if vol.ndim != :raise ValueError(''.format(op.join(op.dirname(filename),hdr[''])))if not <= vol_idx < vol.shape[]:raise IndexError(''.format(filename,vol.shape[], vol_idx))new_vol = vol[:, :, :, vol_idx].copy()hdr[''] = remove_4th_element_from_hdr_string(hdr, '')remove_4th_element_from_hdr_string(hdr, '')return new_vol, hdr", "docstring": "Return a 3D volume from a 4D nifti image file\n\n Parameters\n ----------\n filename: str\n Path to the 4D .mhd file\n\n vol_idx: int\n Index of the 3D volume to be extracted from the 4D volume.\n\n Returns\n -------\n vol, hdr\n The data array and the new 3D image header.", "id": "f4099:m2"} {"signature": "def tabulate(self, tablefmt=''):", "body": "return tabulate(self, tablefmt=tablefmt)", "docstring": ":param tablefmt: string\n Supported table formats are:\n\"plain\"\n\"simple\"\n\"grid\"\n\"pipe\"\n\"orgtbl\"\n\"rst\"\n\"mediawiki\"\n\"latex\"\n\n:return: tabulate\nTabulated content", "id": "f4100:c0:m1"} {"signature": "def tabulate(self, tablefmt=''):", "body": "return tabulate(list(self.items()), tablefmt=tablefmt)", "docstring": ":param tablefmt: string\n Supported table formats are:\n\"plain\"\n\"simple\"\n\"grid\"\n\"pipe\"\n\"orgtbl\"\n\"rst\"\n\"mediawiki\"\n\"latex\"\n\n:return: tabulate\nTabulated content", "id": "f4100:c1:m1"} {"signature": "@propertydef affine(self):", "body": "return self.img.affine", "docstring": "Return the affine matrix from the image", "id": "f4101:c0:m7"} {"signature": "@propertydef header(self):", "body": "return self.img.header", "docstring": "Return the header from the image", "id": "f4101:c0:m8"} {"signature": "def pixdim(self):", "body": "return self.get_header().get_zooms()", "docstring": "Return the voxel size in the header of the file.", "id": "f4101:c0:m10"} {"signature": "def get_data(self, safe_copy=False):", "body": "if safe_copy:data = get_data(self.img)else:data = self.img.get_data(caching=self._caching)return data", "docstring": "Get the data in the image.\n If save_copy is True, will perform a deep copy of the data and return it.\n\n Parameters\n ----------\n smoothed: (optional) bool\n If True and self._smooth_fwhm > 0 will smooth the data before masking.\n\n masked: (optional) bool\n If True and self.has_mask will return the masked data, the plain data otherwise.\n\n safe_copy: (optional) bool\n\n Returns\n -------\n np.ndarray", "id": "f4101:c0:m11"} {"signature": "def to_file(self, outpath):", "body": "save_niigz(outpath, self.img)", "docstring": "Save this object instance in outpath.\n\n Parameters\n ----------\n outpath: str\n Output file path", "id": "f4101:c0:m12"} {"signature": "@smooth_fwhm.setterdef smooth_fwhm(self, fwhm):", "body": "if fwhm != self._smooth_fwhm:self._is_data_smooth = Falseself._smooth_fwhm = fwhm", "docstring": "Set a smoothing Gaussian kernel given its FWHM in mm.", "id": "f4101:c1:m5"} {"signature": "def get_data(self, smoothed=True, masked=True, safe_copy=False):", "body": "if not safe_copy and smoothed == self._is_data_smooth and masked == self._is_data_masked:if self.has_data_loaded() and self._caching == '':return self.get_data()if safe_copy:data = get_data(self.img)else:data = self.img.get_data(caching=self._caching)is_smoothed = Falseif smoothed and self._smooth_fwhm > :try:data = _smooth_data_array(data, self.get_affine(), self._smooth_fwhm, copy=False)except ValueError as ve:raise ValueError(''''.format(self.img, self._smooth_fwhm)) from veelse:is_smoothed = Trueis_data_masked = Falseif masked and self.has_mask():try:data = self.unmask(self._mask_data(data)[])except:raiseelse:is_data_masked = Trueif not safe_copy:self._is_data_masked = is_data_maskedself._is_data_smooth = is_smoothedreturn data", "docstring": "Get the data in the image.\n If save_copy is True, will perform a deep copy of the data and return it.\n\n Parameters\n ----------\n smoothed: (optional) bool\n If True and self._smooth_fwhm > 0 will smooth the data before masking.\n\n masked: (optional) bool\n If True and self.has_mask will return the masked data, the plain data otherwise.\n\n safe_copy: (optional) bool\n\n Returns\n -------\n np.ndarray", "id": "f4101:c1:m10"} {"signature": "def apply_mask(self, mask_img):", "body": "self.set_mask(mask_img)return self.get_data(masked=True, smoothed=True, safe_copy=True)", "docstring": "First set_mask and the get_masked_data.\n\n Parameters\n ----------\n mask_img: nifti-like image, NeuroImage or str\n 3D mask array: True where a voxel should be used.\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n Returns\n -------\n The masked data deepcopied", "id": "f4101:c1:m13"} {"signature": "def set_mask(self, mask_img):", "body": "mask = load_mask(mask_img, allow_empty=True)check_img_compatibility(self.img, mask, only_check_3d=True) self.mask = mask", "docstring": "Sets a mask img to this. So every operation to self, this mask will be taken into account.\n\n Parameters\n ----------\n mask_img: nifti-like image, NeuroImage or str\n 3D mask array: True where a voxel should be used.\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n Note\n ----\n self.img and mask_file must have the same shape.\n\n Raises\n ------\n FileNotFound, NiftiFilesNotCompatible", "id": "f4101:c1:m14"} {"signature": "def _mask_data(self, data):", "body": "self._check_for_mask()msk_data = self.mask.get_data()if self.ndim == :return data[msk_data], np.where(msk_data)elif self.ndim == :return _apply_mask_to_4d_data(data, self.mask)else:raise ValueError(''.format(self, self.ndim, self.mask))", "docstring": "Return the data masked with self.mask\n\n Parameters\n ----------\n data: np.ndarray\n\n Returns\n -------\n masked np.ndarray\n\n Raises\n ------\n ValueError if the data and mask dimensions are not compatible.\n Other exceptions related to numpy computations.", "id": "f4101:c1:m15"} {"signature": "def apply_smoothing(self, smooth_fwhm):", "body": "if smooth_fwhm <= :returnold_smooth_fwhm = self._smooth_fwhmself._smooth_fwhm = smooth_fwhmtry:data = self.get_data(smoothed=True, masked=True, safe_copy=True)except ValueError as ve:self._smooth_fwhm = old_smooth_fwhmraiseelse:self._smooth_fwhm = smooth_fwhmreturn data", "docstring": "Set self._smooth_fwhm and then smooths the data.\n See boyle.nifti.smooth.smooth_imgs.\n\n Returns\n -------\n the smoothed data deepcopied.", "id": "f4101:c1:m16"} {"signature": "def mask_and_flatten(self):", "body": "self._check_for_mask()return self.get_data(smoothed=True, masked=True, safe_copy=False)[self.get_mask_indices()],self.get_mask_indices(), self.mask.shape", "docstring": "Return a vector of the masked data.\n\n Returns\n -------\n np.ndarray, tuple of indices (np.ndarray), tuple of the mask shape", "id": "f4101:c1:m17"} {"signature": "def unmask(self, arr):", "body": "self._check_for_mask()if > arr.ndim > :raise ValueError(''''.format(arr.ndim,len(self.mask.shape)))if arr.ndim == :return matrix_to_4dvolume(arr, self.mask.get_data())elif arr.ndim == :return vector_to_volume(arr, self.mask.get_data())", "docstring": "Use self.mask to reshape arr and self.img to get an affine and header to create\n a new self.img using the data in arr.\n If self.has_mask() is False, will return the same arr.", "id": "f4101:c1:m18"} {"signature": "def to_file(self, outpath):", "body": "if not self.has_mask() and not self.is_smoothed():save_niigz(outpath, self.img)else:save_niigz(outpath, self.get_data(masked=True, smoothed=True),self.get_header(), self.get_affine())", "docstring": "Save this object instance in outpath.\n\n Parameters\n ----------\n outpath: str\n Output file path", "id": "f4101:c1:m19"} {"signature": "def open_volume_file(filepath):", "body": "if not op.exists(filepath):raise IOError(''.format(filepath))def open_nifti_file(filepath):return NiftiImage(filepath)def open_mhd_file(filepath):return MedicalImage(filepath)vol_data, hdr_data = load_raw_data_with_mhd(filepath)return vol_data, hdr_datadef open_mha_file(filepath):raise NotImplementedError('')def _load_file(filepath, loader):return loader(filepath)filext_loader = {'': open_nifti_file,'': open_mhd_file,'': open_mha_file,}ext = get_extension(filepath)loader = Nonefor e in filext_loader:if ext in e:loader = filext_loader[e]if loader is None:raise ValueError(''.format(filepath))return _load_file(filepath, loader)", "docstring": "Open a volumetric file using the tools following the file extension.\n\n Parameters\n ----------\n filepath: str\n Path to a volume file\n\n Returns\n -------\n volume_data: np.ndarray\n Volume data\n\n pixdim: 1xN np.ndarray\n Vector with the description of the voxels physical size (usually in mm) for each volume dimension.\n\n Raises\n ------\n IOError\n In case the file is not found.", "id": "f4102:m0"} {"signature": "def _check_medimg(image, make_it_3d=True):", "body": "if isinstance(image, string_types):img = open_volume_file(image)if make_it_3d:img = _make_it_3d(img)return imgelif isinstance(image, np.array):return nib.Nifti2Image(image, affine=np.eye(image.ndim + ))elif isinstance(image, nib.Nifti1Image) or is_img(image):return imageelse:raise TypeError(''''''.format(type(image)))", "docstring": "Check that image is a proper img. Turn filenames into objects.\n\n Parameters\n ----------\n image: img-like object or str\n Can either be:\n - a file path to a medical image file, e.g. NifTI, .mhd/raw, .mha\n - any object with get_data() method and affine & header attributes, e.g., nibabel.Nifti1Image.\n - a Numpy array, which will be wrapped by a nibabel.Nifti2Image class with an `eye` affine.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n make_it_3d: boolean, optional\n If True, check if the image is a 3D image and raise an error if not.\n\n Returns\n -------\n result: nifti-like\n result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed\n that the returned object has get_data() and get_affine() methods.", "id": "f4102:m1"} {"signature": "def compose_err_msg(msg, **kwargs):", "body": "updated_msg = msgfor k, v in sorted(kwargs.items()):if isinstance(v, _basestring): updated_msg += \"\" + k + \"\" + vreturn updated_msg", "docstring": "Append key-value pairs to msg, for display.\n\n Parameters\n ----------\n msg: string\n arbitrary message\n kwargs: dict\n arbitrary dictionary\n\n Returns\n -------\n updated_msg: string\n msg, with \"key: value\" appended. Only string values are appended.\n\n Example\n -------\n >>> compose_err_msg('Error message with arguments...', arg_num=123, \\\n arg_str='filename.nii', arg_bool=True)\n 'Error message with arguments...\\\\narg_str: filename.nii'\n >>>", "id": "f4103:m0"} {"signature": "def timestamp_with_tzinfo(dt):", "body": "utc = tzutc()if dt.tzinfo:dt = dt.astimezone(utc).replace(tzinfo=None)return dt.isoformat() + ''", "docstring": "Serialize a date/time value into an ISO8601 text representation\nadjusted (if needed) to UTC timezone.\n\nFor instance:\n>>> serialize_date(datetime(2012, 4, 10, 22, 38, 20, 604391))\n'2012-04-10T22:38:20.604391Z'", "id": "f4104:m0"} {"signature": "def timestamp_to_date_str(dt):", "body": "return str(dt.date())", "docstring": "Serialize a date/time value into YYYY-MM-DD date string.", "id": "f4104:m1"} {"signature": "def _to_string(data):", "body": "sdata = data.copy()for k, v in data.items():if isinstance(v, datetime):sdata[k] = timestamp_to_date_str(v)elif not isinstance(v, (string_types, float, int)):sdata[k] = str(v)return sdata", "docstring": "Convert to string all values in `data`.\n\n Parameters\n ----------\n data: dict[str]->object\n\n Returns\n -------\n string_data: dict[str]->str", "id": "f4104:m2"} {"signature": "def insert_unique(table, data, unique_fields=None, *, raise_if_found=False):", "body": "item = find_unique(table, data, unique_fields)if item is not None:if raise_if_found:raise NotUniqueItemError(''''.format(unique_fields,data,table.get(eid=item),item))else:return itemreturn table.insert(data)", "docstring": "Insert `data` into `table` ensuring that data has unique values\n in `table` for the fields listed in `unique_fields`.\n\n If `raise_if_found` is True, will raise an NotUniqueItemError if\n another item with the same `unique_fields` values are found\n previously in `table`.\n If False, will return the `eid` from the item found.\n\n Parameters\n ----------\n table: tinydb.Table\n\n data: dict\n\n unique_fields: list of str\n Name of fields (keys) from `data` which are going to be used to build\n a sample to look for exactly the same values in the database.\n If None, will use every key in `data`.\n\n raise_if_found: bool\n\n Returns\n -------\n eid: int\n Id of the object inserted or the one found with same `unique_fields`.\n\n Raises\n ------\n MoreThanOneItemError\n Raise even with `raise_with_found` == False if it finds more than one item\n with the same values as the sample.\n\n NotUniqueItemError\n If `raise_if_found` is True and an item with the same `unique_fields`\n values from `data` is found in `table`.", "id": "f4104:m3"} {"signature": "def search_sample(table, sample):", "body": "query = _query_sample(sample=sample, operators='')return table.search(query)", "docstring": "Search for items in `table` that have the same field sub-set values as in `sample`.\n\n Parameters\n ----------\n table: tinydb.table\n\n sample: dict\n Sample data\n\n Returns\n -------\n search_result: list of dict\n List of the items found. The list is empty if no item is found.", "id": "f4104:m4"} {"signature": "def search_unique(table, sample, unique_fields=None):", "body": "if unique_fields is None:unique_fields = list(sample.keys())query = _query_data(sample, field_names=unique_fields, operators='')items = table.search(query)if len(items) == :return items[]if len(items) == :return Noneraise MoreThanOneItemError(''''.format(len(items)))", "docstring": "Search for items in `table` that have the same field sub-set values as in `sample`.\n Expecting it to be unique, otherwise will raise an exception.\n\n Parameters\n ----------\n table: tinydb.table\n sample: dict\n Sample data\n\n Returns\n -------\n search_result: tinydb.database.Element\n Unique item result of the search.\n\n Raises\n ------\n KeyError:\n If the search returns for more than one entry.", "id": "f4104:m5"} {"signature": "def find_unique(table, sample, unique_fields=None):", "body": "res = search_unique(table, sample, unique_fields)if res is not None:return res.eidelse:return res", "docstring": "Search in `table` an item with the value of the `unique_fields` in the `sample` sample.\n Check if the the obtained result is unique. If nothing is found will return an empty list,\n if there is more than one item found, will raise an IndexError.\n\n Parameters\n ----------\n table: tinydb.table\n\n sample: dict\n Sample data\n\n unique_fields: list of str\n Name of fields (keys) from `data` which are going to be used to build\n a sample to look for exactly the same values in the database.\n If None, will use every key in `data`.\n\n Returns\n -------\n eid: int\n Id of the object found with same `unique_fields`.\n None if none is found.\n\n Raises\n ------\n MoreThanOneItemError\n If more than one example is found.", "id": "f4104:m6"} {"signature": "def _query_sample(sample, operators=''):", "body": "if isinstance(operators, str):operators = [operators] * len(sample)if len(sample) != len(operators):raise ValueError(''''.format(len(sample),operators))queries = []for i, fn in enumerate(sample):fv = sample[fn]op = operators[i]queries.append(_build_query(field_name=fn,field_value=fv,operator=op))return _concat_queries(queries, operators='')", "docstring": "Create a TinyDB query that looks for items that have each field in `sample` with a value\n compared with the correspondent operation in `operators`.\n\n Parameters\n ----------\n sample: dict\n The sample data\n\n operators: str or list of str\n A list of comparison operations for each field value in `sample`.\n If this is a str, will use the same operator for all `sample` fields.\n If you want different operators for each field, remember to use an OrderedDict for `sample`.\n Check TinyDB.Query class for possible choices.\n\n Returns\n -------\n query: tinydb.database.Query", "id": "f4104:m7"} {"signature": "def _query_data(data, field_names=None, operators=''):", "body": "if field_names is None:field_names = list(data.keys())if isinstance(field_names, str):field_names = [field_names]sample = OrderedDict([(fn, data[fn]) for fn in field_names])return _query_sample(sample, operators=operators)", "docstring": "Create a tinyDB Query object that looks for items that confirms the correspondent operator\n from `operators` for each `field_names` field values from `data`.\n\n Parameters\n ----------\n data: dict\n The data sample\n\n field_names: str or list of str\n The name of the fields in `data` that will be used for the query.\n\n operators: str or list of str\n A list of comparison operations for each field value in `field_names`.\n If this is a str, will use the same operator for all `field_names`.\n If you want different operators for each field, remember to use an OrderedDict for `data`.\n Check TinyDB.Query class for possible choices.\n\n Returns\n -------\n query: tinydb.database.Query", "id": "f4104:m8"} {"signature": "def _concat_queries(queries, operators=''):", "body": "if not queries:raise ValueError(''.format(queries))if len(queries) == :return queries[]if isinstance(operators, str):operators = [operators] * (len(queries) - )if len(queries) - != len(operators):raise ValueError(''''.format(len(queries),operators))first, rest, end = queries[], queries[:-], queries[-:][]bigop = getattr(first, operators[])for i, q in enumerate(rest):bigop = getattr(bigop(q), operators[i])return bigop(end)", "docstring": "Create a tinyDB Query object that is the concatenation of each query in `queries`.\n The concatenation operator is taken from `operators`.\n\n Parameters\n ----------\n queries: list of tinydb.Query\n The list of tinydb.Query to be joined.\n\n operators: str or list of str\n List of binary operators to join `queries` into one query.\n Check TinyDB.Query class for possible choices.\n\n Returns\n -------\n query: tinydb.database.Query", "id": "f4104:m9"} {"signature": "def _build_query(field_name, field_value, operator=''):", "body": "qelem = where(field_name)if not hasattr(qelem, operator):raise NotImplementedError(''.format(operator))else:query = getattr(qelem, operator)return query(field_value)", "docstring": "Create a tinyDB Query object with the format:\n (where(`field_name`) `operator` `field_value`)\n\n Parameters\n ----------\n field_name: str\n The name of the field to be queried.\n\n field_value:\n The value of the field\n\n operator: str\n The comparison operator.\n Check TinyDB.Query class for possible choices.\n\n Returns\n -------\n query: tinydb.database.Query", "id": "f4104:m10"} {"signature": "def search_by_eid(self, table_name, eid):", "body": "elem = self.table(table_name).get(eid=eid)if elem is None:raise KeyError(''.format(table_name, eid))return elem", "docstring": "Return the element in `table_name` with Object ID `eid`.\n If None is found will raise a KeyError exception.\n\n Parameters\n ----------\n table_name: str\n The name of the table to look in.\n\n eid: int\n The Object ID of the element to look for.\n\n Returns\n -------\n elem: tinydb.database.Element\n\n Raises\n ------\n KeyError\n If the element with ID `eid` is not found.", "id": "f4104:c2:m1"} {"signature": "def insert_unique(self, table_name, data, unique_fields=None, *, raise_if_found=False):", "body": "return insert_unique(table=self.table(table_name),data=_to_string(data),unique_fields=unique_fields,raise_if_found=raise_if_found)", "docstring": "Insert `data` into `table` ensuring that data has unique values\n in `table` for the fields listed in `unique_fields`.\n\n If `raise_if_found` is True, will raise an NotUniqueItemError if\n another item with the same `unique_fields` values are found\n previously in `table`.\n If False, will return the `eid` from the item found.\n\n Parameters\n ----------\n table_name: str\n\n data: dict\n\n unique_fields: list of str\n Name of fields (keys) from `data` which are going to be used to build\n a sample to look for exactly the same values in the database.\n If None, will use every key in `data`.\n\n raise_if_found: bool\n\n Returns\n -------\n eid: int\n Id of the object inserted or the one found with same `unique_fields`.\n\n Raises\n ------\n MoreThanOneItemError\n Raise even with `raise_with_found` == False if it finds more than one item\n with the same values as the sample.\n\n NotUniqueItemError\n If `raise_if_found` is True and an item with the same `unique_fields`\n values from `data` is found in `table`.", "id": "f4104:c2:m2"} {"signature": "def search_unique(self, table_name, sample, unique_fields=None):", "body": "return search_unique(table=self.table(table_name),sample=sample,unique_fields=unique_fields)", "docstring": "Search in `table` an item with the value of the `unique_fields` in the `data` sample.\n Check if the the obtained result is unique. If nothing is found will return an empty list,\n if there is more than one item found, will raise an IndexError.\n\n Parameters\n ----------\n table_name: str\n\n sample: dict\n Sample data\n\n unique_fields: list of str\n Name of fields (keys) from `data` which are going to be used to build\n a sample to look for exactly the same values in the database.\n If None, will use every key in `data`.\n\n Returns\n -------\n eid: int\n Id of the object found with same `unique_fields`.\n None if none is found.\n\n Raises\n ------\n MoreThanOneItemError\n If more than one example is found.", "id": "f4104:c2:m3"} {"signature": "def search_sample(self, table_name, sample):", "body": "return search_sample(table=self.table(table_name),sample=sample)", "docstring": "Search for items in `table` that have the same field sub-set values as in `sample`.\n\n Parameters\n ----------\n table_name: str\n\n sample: dict\n Sample data\n\n Returns\n -------\n search_result: list of dict\n List of the items found. The list is empty if no item is found.", "id": "f4104:c2:m4"} {"signature": "def is_unique(self, table_name, sample, unique_fields=None):", "body": "try:eid = find_unique(self.table(table_name),sample=sample,unique_fields=unique_fields)except:return Falseelse:return eid is not None", "docstring": "Return True if an item with the value of `unique_fields`\n from `data` is unique in the table with `table_name`.\n False if no sample is found or more than one is found.\n\n See function `find_unique` for more details.\n\n Parameters\n ----------\n table_name: str\n\n sample: dict\n Sample data for query\n\n unique_fields: str or list of str\n\n Returns\n -------\n is_unique: bool", "id": "f4104:c2:m5"} {"signature": "def update_unique(self, table_name, fields, data, cond=None, unique_fields=None,*, raise_if_not_found=False):", "body": "eid = find_unique(self.table(table_name), data, unique_fields)if eid is None:if raise_if_not_found:msg = ''.format(table_name, data)if cond is not None:msg += ''.format(cond)raise IndexError(msg)else:self.table(table_name).update(_to_string(fields), cond=cond, eids=[eid])return eid", "docstring": "Update the unique matching element to have a given set of fields.\n\n Parameters\n ----------\n table_name: str\n\n fields: dict or function[dict -> None]\n new data/values to insert into the unique element\n or a method that will update the elements.\n\n data: dict\n Sample data for query\n\n cond: tinydb.Query\n which elements to update\n\n unique_fields: list of str\n\n raise_if_not_found: bool\n Will raise an exception if the element is not found for update.\n\n Returns\n -------\n eid: int\n The eid of the updated element if found, None otherwise.", "id": "f4104:c2:m6"} {"signature": "def count(self, table_name, sample):", "body": "return len(list(search_sample(table=self.table(table_name),sample=sample)))", "docstring": "Return the number of items that match the `sample` field values\n in table `table_name`.\n Check function search_sample for more details.", "id": "f4104:c2:m7"} {"signature": "def get_requirements(*args):", "body": "install_deps = []try:for fpath in args:install_deps.extend([str(d.req or d.url) for d in parse_requirements(fpath)])except:print(''.format(fpath))return [dep for dep in install_deps if dep != '']", "docstring": "Parse all requirements files given and return a list of the dependencies", "id": "f4105:m0"} {"signature": "def recursive_glob(base_directory, regex=None):", "body": "if regex is None:regex = ''files = glob(os.path.join(base_directory, regex))for path, dirlist, filelist in os.walk(base_directory):for ignored in IGNORE:try:dirlist.remove(ignored)except:passfor dir_name in dirlist:files.extend(glob(os.path.join(path, dir_name, regex)))return files", "docstring": "Uses glob to find all files that match the regex in base_directory.\n\n @param base_directory: string\n\n @param regex: string\n\n @return: set", "id": "f4105:m1"} {"signature": "@baker.command(default=True,shortopts={'': '','': '','': '','': ''})def copy(configfile='', destpath='', overwrite=False, sub_node=''):", "body": "log.info(''.format(os.path.basename(__file__),whoami(),locals()))assert(os.path.isfile(configfile))if os.path.exists(destpath):if os.listdir(destpath):raise FolderAlreadyExists(''''.format(destpath))else:log.info(''.format(destpath))path(destpath).makedirs_p()from boyle.files.file_tree_map import FileTreeMapfile_map = FileTreeMap()try:file_map.from_config_file(configfile)except Exception as e:raise FileTreeMapError(str(e))if sub_node:sub_map = file_map.get_node(sub_node)if not sub_map:raise FileTreeMapError(''''.format(sub_node))file_map._filetree = {}file_map._filetree[sub_node] = sub_maptry:file_map.copy_to(destpath, overwrite=overwrite)except Exception as e:raise FileTreeMapError(str(e))", "docstring": "Copies the files in the built file tree map\n to despath.\n\n :param configfile: string\n Path to the FileTreeMap config file\n\n :param destpath: string\n Path to the files destination\n\n :param overwrite: bool\n Overwrite files if they already exist.\n\n :param sub_node: string\n Tree map configuration sub path.\n Will copy only the contents within this sub-node", "id": "f4107:m0"} {"signature": "def get_noneid_references(self):", "body": "try:nun = np.array(None).astype(self.dtype)return np.array(self.reflst)[self == nun]except:nun = Nonereturn np.array(self.reflst)[self is None]", "docstring": "Returns\n-------\nndarray\nArray of references in self.reflst whose self id is None.", "id": "f4108:c1:m3"} {"signature": "def _print_general_vs_table(self, idset1, idset2):", "body": "ref1name = ''set1_hasref = isinstance(idset1, idset_with_reference)if set1_hasref:ref1arr = np.array(idset1.reflst)ref1name = idset1.refnameref2name = ref1nameset2_hasref = isinstance(idset2, idset_with_reference)if set2_hasref:ref2arr = np.array(idset2.reflst)ref2name = idset2.refnameelse:ref2name = ref1namehdr11 = ''.format(idset1.name, idset2.name)hdr12 = ''.format(idset1.name, idset2.name, ref2name)hdr13 = ''.format(idset1.name, idset2.name)hdr14 = ''.format(idset1.name, idset2.name, ref1name)table = [[hdr11, hdr12, hdr13, hdr14]]set1 = set(idset1)set2 = set(idset2)row11 = list(set1 - set2)if set1_hasref:row12 = [ref1arr[np.where(idset1 == nom)][] for nom in row11]else:row12 = ['' for _ in row11]row13 = list(set2 - set1)if set2_hasref:row14 = [ref2arr[np.where(idset2 == nom)][] for nom in row13]else:row14 = ['' for _ in row13]tablst = self._tabulate_4_lists(row11, row12, row13, row14)table.extend(tablst)if len(table) > :print(tabulate(table, headers=''))print('')", "docstring": ":param idset1:\n:param idset2:", "id": "f4108:c2:m4"} {"signature": "def _print_foreign_repetition_table(self, idset1, idset2):", "body": "assert(isinstance(idset1, idset_with_reference))assert(isinstance(idset2, idset))reps = idset2.get_repetitions()if len(reps) < :returnrefs = np.array(idset1.reflst)table = [[''.format(idset1.name,idset1.refname,idset2.name),'']]for rep in reps:if np.any(idset1 == rep):matches = refs[np.where(idset1 == rep)]myrep = repfor m in matches:table.append([myrep, m])myrep = ''print(tabulate(table, headers=''))print('')", "docstring": ":param idset1:\n:param idset2:", "id": "f4108:c2:m5"} {"signature": "def print_compare_idsets_one_ref(self, idset1_name, idset2_name):", "body": "try:idset1 = self[idset1_name]idset2 = self[idset2_name]except KeyError as ke:log.error(''.format(idset1_name,idset2_name))import sys, pdbpdb.post_mortem(sys.exc_info()[])raiseassert(isinstance(idset1, idset_with_reference))assert(isinstance(idset2, idset))self._print_general_vs_table(idset1, idset2)self._print_foreign_repetition_table(idset1, idset2)", "docstring": "idset1_name: string\nkey of an idset_with_reference\n\nidset2_name: string\nkey of an idset", "id": "f4108:c2:m7"} {"signature": "def print_compare_idsets_two_refs(self, idset1_name, idset2_name):", "body": "try:idset1 = self[idset1_name]idset2 = self[idset2_name]except KeyError as ke:log.error(''.format(idset1_name,idset2_name))import sys, pdbpdb.post_mortem(sys.exc_info()[])raiseassert(isinstance(idset1, idset_with_reference))assert(isinstance(idset2, idset_with_reference))self._print_general_vs_table(idset1, idset2)self._print_foreign_repetition_table(idset1, idset2)self._print_foreign_repetition_table(idset2, idset1)", "docstring": "idset1_name: string\nkey of an idset_with_reference\n\nidset2_name: string\nkey of an idset", "id": "f4108:c2:m8"} {"signature": "@baker.command(name='',params={\"\": \"\",\"\": \"\",\"\": \"\"\"\"\"\",\"\": \"\"\"\"\"\"},shortopts={'': '', '': '', '': '', '': ''})def convert_sav(inputfile, outputfile=None, method='', otype=''):", "body": "assert(os.path.isfile(inputfile))assert(method=='' or method=='')if method == '':df = sav_to_pandas_rpy2(inputfile)elif method == '':df = sav_to_pandas_savreader(inputfile)otype_exts = {'': '', '': '', '': '','': '','': '','': '','': ''}if outputfile is None:outputfile = inputfile.replace(path(inputfile).ext, '')outputfile = add_extension_if_needed(outputfile, otype_exts[otype])if otype == '':df.to_csv(outputfile)elif otype == '':df.to_hdf(outputfile, os.path.basename(outputfile))elif otype == '':df.to_stata(outputfile)elif otype == '':df.to_json(outputfile)elif otype == '':df.to_pickle(outputfile)elif otype == '':df.to_excel(outputfile)elif otype == '':df.to_html(outputfile)else:df.to_csv(outputfile)", "docstring": "Transforms the input .sav SPSS file into other format.\n If you don't specify an outputfile, it will use the\n inputfile and change its extension to .csv", "id": "f4109:m0"} {"signature": "def findGoodTests():", "body": "pathSplit = os.environ[''].split('')if '' not in pathSplit:pathSplit = [''] + pathSplitos.environ[''] = ''.join(pathSplit)result = ''success = Falsefor path in pathSplit:if path.endswith(''):path = path[:-]guess = path + ''if os.path.exists(guess):success = Trueresult = guessbreakreturn {'' : result,\"\" : success }", "docstring": "findGoodTests - Tries to find GoodTests.py\n\n@return {\n 'path' -> Path to GoodTests.py (for execution)\n 'success' -> True/False if we successfully found GoodTests.py\n}", "id": "f4110:m0"} {"signature": "def setDefaultIREncoding(encoding):", "body": "try:b''.decode(encoding)except:raise ValueError('' %(str(encoding), ))global defaultIREncodingdefaultIREncoding = encoding", "docstring": "setDefaultIREncoding - Sets the default encoding used by IndexedRedis.\n This will be the default encoding used for field data. You can override this on a\n per-field basis by using an IRField (such as IRUnicodeField or IRRawField)\n\n@param encoding - An encoding (like utf-8)", "id": "f4140:m0"} {"signature": "def getDefaultIREncoding():", "body": "global defaultIREncodingreturn defaultIREncoding", "docstring": "getDefaultIREncoding - Get the default encoding that IndexedRedis will use for all field data.\n You can override this on a per-field basis by using an IRField (such as IRUnicodeField or IRRawField)\n\n @return - Default encoding string", "id": "f4140:m1"} {"signature": "def isEncodedString(x):", "body": "return issubclass(x.__class__, encoded_str_type)", "docstring": "isEncodedString - Check if a given string is \"encoded\" with a codepage.\n\n Note this means UNICODE, not BYTES, even though python uses \"decode\" to apply an encoding and \"encode\" to get the raw bytes..", "id": "f4140:m2"} {"signature": "def __init__(self, name=''):", "body": "self.valueType = Noneself.defaultValue = ''", "docstring": "__init__ - Create an IRRawField. Only takes a name\n\n@param name - Field name\n\nThis field type does not support indexing.", "id": "f4142:c0:m0"} {"signature": "def __init__(self, name='', defaultValue=irNull, encoding=None):", "body": "self.valueType = Noneself.encoding = encodingself.defaultValue = defaultValue", "docstring": "__init__ - Create an IRBase64Field object.\n\n@param name - Field name\n\n@param defaultValue (Default irNull) - Default value of field\n\n@param encoding (default None) - An explicit encoding to use when converting to bytes. If None, the global defaultIREncoding will be used.\n\n\nAn IRBytesField is indexable and the index is forced to be hashed", "id": "f4143:c0:m0"} {"signature": "def __new__(self, val=''):", "body": "return IrNullBaseType.__new__(self, '')", "docstring": "Don't let this be assigned a value.", "id": "f4144:c0:m0"} {"signature": "def __init__(self, name='', compressMode=COMPRESS_MODE_ZLIB, defaultValue=irNull):", "body": "self.valueType = Noneself.defaultValue = defaultValueif compressMode == COMPRESS_MODE_ZLIB or compressMode in _COMPRESS_MODE_ALIASES_ZLIB:self.compressMode = COMPRESS_MODE_ZLIBself.header = b''self.extraCompressArgs = (, )elif compressMode == COMPRESS_MODE_BZ2 or compressMode in _COMPRESS_MODE_ALIASES_BZ2:self.compressMode = COMPRESS_MODE_BZ2self.header = b''self.extraCompressArgs = (, )elif compressMode == COMPRESS_MODE_LZMA or compressMode in _COMPRESS_MODE_ALIASES_LZMA:self.compressMode = COMPRESS_MODE_LZMAself.header = b''self.extraCompressArgs = tuple()self.getCompressMod() else:raise ValueError('' %(str(compressMode), name))", "docstring": "__init__ - Create this object\n\n@param name - Field name\n@param compressMode , default \"zlib\". Determines the compression module to use\n for this field. See COMPRESS_MODE_* variables in this module.\n\n Supported values as of 5.0.0 are:\n\n \"zlib\" / \"gz\" / \"gzip\" - zlib compression\n\n \"bz2\" / \"bzip2\" - bzip2 compression\n\n \"lzma\" / \"xz\" - LZMA compression.\n NOTE: This is provided in python3 by default, but in python2 you will need an external module.\n IndexedRedis will automatically detect if \"backports.lzma\" or \"lzmaffi\" are installed, and use them\n if the core \"lzma\" module is not available.\n\n@param defaultValue - The default value for this field\n\nAn IRCompressedField is indexable, and forces the index to be hashed.", "id": "f4145:c0:m0"} {"signature": "def getCompressMod(self):", "body": "if self.compressMode == COMPRESS_MODE_ZLIB:return zlibif self.compressMode == COMPRESS_MODE_BZ2:return bz2if self.compressMode == COMPRESS_MODE_LZMA:global _lzmaModif _lzmaMod is not None:return _lzmaModtry:import lzma_lzmaMod = lzmareturn _lzmaModexcept:try:from backports import lzma_lzmaMod = lzmareturn _lzmaModexcept:passtry:import lzmaffi as lzma_lzmaMod = lzmareturn _lzmaModexcept:passraise ImportError(\"\")", "docstring": "getCompressMod - Return the module used for compression on this field\n\n@return - The module for compression", "id": "f4145:c0:m1"} {"signature": "def __init__(self, pk=None, foreignModel=None, obj=None):", "body": "self.pk = pkself.obj = objif foreignModel is not None:if issubclass(foreignModel.__class__, weakref.ReferenceType):foreignModel = foreignModel()self._foreignModel = weakref.ref(foreignModel)else:self._foreignModel = None", "docstring": "__init__ - Create a ForeignLinkData object\n\n@param pk - The primary key of the foreign object\n@param obj - The resolved object, or None if not yet resolved", "id": "f4146:c1:m0"} {"signature": "@propertydef foreignModel(self):", "body": "return self._foreignModel()", "docstring": "foreignModel - Resolve and return the weakref to the associated foreign model", "id": "f4146:c1:m2"} {"signature": "def getObj(self):", "body": "if self.obj is None:if not self.pk:return Noneself.obj = self.foreignModel.objects.get(self.pk)return self.obj", "docstring": "getObj - Fetch (if not fetched) and return the obj associated with this data.", "id": "f4146:c1:m3"} {"signature": "def getObjs(self):", "body": "return [ self.getObj() ]", "docstring": "getObjs - Fetch (if not fetched) and return the obj associated with this data.\n\n Output is iterable.", "id": "f4146:c1:m4"} {"signature": "def getPk(self):", "body": "if not self.pk and self.obj:if self.obj._id:self.pk = self.obj._idreturn self.pk", "docstring": "getPk - Resolve any absent pk's off the obj's (like if an obj has been saved), and return the pk.", "id": "f4146:c1:m5"} {"signature": "def isFetched(self):", "body": "return not bool(self.obj is None)", "docstring": "isFetched - Check if the associated obj has been fetched or not.", "id": "f4146:c1:m7"} {"signature": "def objHasUnsavedChanges(self):", "body": "if not self.obj:return Falsereturn self.obj.hasUnsavedChanges(cascadeObjects=True)", "docstring": "objHasUnsavedChanges - Check if any object has unsaved changes, cascading.", "id": "f4146:c1:m11"} {"signature": "def __init__(self, pk=None, foreignModel=None, obj=None):", "body": "ForeignLinkData.__init__(self, pk, foreignModel, obj)pk = self.pkobj = self.objif pk and not obj:self.obj = [None for i in range(len(pk))]elif obj and not pk:self.pk = []for thisObj in obj:if thisObj._id:self.pk.append(thisObj._id)else:raise ValueError('')elif len(obj) != len(pk):if len(pk) > len(obj):self.obj += [None for i in range( len(pk) - len(obj) ) ]else:for thisObj in obj[len(pk):]:if thisObj._id:self.pk.append(thisObj._id)else:raise ValueError('')", "docstring": "__init__ - Create a ForeignLinkMultiData\n\n@see ForeignLinkData", "id": "f4146:c2:m0"} {"signature": "def getPk(self):", "body": "if not self.pk or None in self.pk:for i in range( len(self.pk) ):if self.pk[i]:continueif self.obj[i] and self.obj[i]._id:self.pk[i] = self.obj[i]._idreturn self.pk", "docstring": "getPk - @see ForeignLinkData.getPk", "id": "f4146:c2:m1"} {"signature": "def getObj(self):", "body": "if self.obj:needPks = [ (i, self.pk[i]) for i in range(len(self.obj)) if self.obj[i] is None]if not needPks:return self.objfetched = list(self.foreignModel.objects.getMultiple([needPk[] for needPk in needPks]))i = for objIdx, pk in needPks:self.obj[objIdx] = fetched[i]i += return self.obj", "docstring": "getObj - @see ForeignLinkData.getObj\n\n Except this always returns a list", "id": "f4146:c2:m3"} {"signature": "def getObjs(self):", "body": "return self.getObj()", "docstring": "getObjs - @see ForeignLinkData.getObjs", "id": "f4146:c2:m4"} {"signature": "def isFetched(self):", "body": "if not self.obj:return Falseif not self.pk or None in self.obj:return Falsereturn not bool(self.obj is None)", "docstring": "isFetched - @see ForeignLinkData.isFetched", "id": "f4146:c2:m5"} {"signature": "def objHasUnsavedChanges(self):", "body": "if not self.obj:return Falsefor thisObj in self.obj:if not thisObj:continueif thisObj.hasUnsavedChanges(cascadeObjects=True):return Truereturn False", "docstring": "objHasUnsavedChanges - @see ForeignLinkData.objHasUnsavedChanges\n\nTrue if ANY object has unsaved changes.", "id": "f4146:c2:m6"} {"signature": "def __init__(self, name='', foreignModel=None):", "body": "IRField.__init__(self, name, valueType=int, defaultValue=irNull)if foreignModel:if not isinstance(foreignModel, type):raise ValueError('')if not hasattr(foreignModel, ''):raise ValueError('')self._foreignModel = weakref.ref(foreignModel)", "docstring": "__init__ - Create an IRForeignLinkField. Only takes a name\n\n@param name - Field name\n\nThis field type does not support indexing.", "id": "f4146:c4:m0"} {"signature": "@propertydef foreignModel(self):", "body": "return self._foreignModel()", "docstring": "foreignModel - Resolve and return the weakref to the associated Foreign Model", "id": "f4146:c4:m1"} {"signature": "def isMulti(self):", "body": "return False", "docstring": "isMulti - Returns True if this is a MultiLink object (expects lists), otherwise False (expects object)\n\n@return ", "id": "f4146:c4:m8"} {"signature": "def isMulti(self):", "body": "return True", "docstring": "isMulti - Returns True if this is a MultiLink object (expects lists), otherwise False (expects object)\n\n@return ", "id": "f4146:c5:m3"} {"signature": "def __init__(self, name='', defaultValue=irNull):", "body": "self.valueType = Noneself.defaultValue = defaultValue", "docstring": "__init__ - Create an IRPickleField\n\n@param name - Field name\n\n@param defaultValue - The default value of this field\n\nBecause even with the same format, python2 and python3 can output different pickle strings for the same object,\n as well as different host configurations may lead to different output, this field type is not indexable.", "id": "f4147:c0:m0"} {"signature": "def __init__(self, name='', decimalPlaces=, defaultValue=irNull):", "body": "self.decimalPlaces = decimalPlacesif isinstance(defaultValue, int):defaultValue = float(defaultValue)elif isinstance(defaultValue, float):defaultValue = round(defaultValue, decimalPlaces)self.defaultValue = defaultValue", "docstring": "__init__ - Create this object.\n\n@param name - Field name (or blank if used in an IRFieldChain)\n\n@param decimalPlaces - The number of decimal places to use (precision). Values will be rounded to this many places, and always have\n this many digits after the decimal point.\n\n@param defaultValue - The default value for this field\n\nAn IRFixedPointField is indexable, and has no option to hash the index.", "id": "f4148:c0:m0"} {"signature": "def __init__(self, name='', valueType=str, defaultValue=irNull, hashIndex=False):", "body": "self.defaultValue = defaultValueself.hashIndex = hashIndexif valueType in (str, unicode):valueType = strself._fromStorage = self._convertStrself._fromInput = self._convertStrself._toStorage = self._convertStrelif bytes != str and valueType == bytes:valueType = bytesself._fromStorage = self._convertBytesself._fromInput = self._convertBytesself._toStorage = self._convertBytesself.CAN_INDEX = Falseelif valueType in (None, type(None)):self._fromStorage = self._noConvertself._fromInput = self._noConvertself._toStorage = self._noConvertself.CAN_INDEX = Falseelif valueType in (dict, list, tuple):ecatedMessage('' %(repr(name), valueType.__name__, repr(name)), printStack=True)valueType = IRJsonValueself.CAN_INDEX = IRJsonValue.CAN_INDEXelif valueType == datetime:ecatedMessage('' %(repr(name), repr(name)), printStack=True)valueType = IRDatetimeValueself.CAN_INDEX = IRDatetimeValue.CAN_INDEXelse:if not isinstance(valueType, type):raise TypeError('' %(repr(valueType,)))if valueType == bool:self._fromStorage = self._convertBoolself._fromInput = self._convertBoolelif isinstance(valueType, (set, frozenset, )):raise TypeError('')self.valueType = valueTypeif valueType in (str, unicode, int, bool):self.CAN_INDEX = Trueelif hasattr(valueType, ''):self.CAN_INDEX = valueType.CAN_INDEX", "docstring": "__init__ - Create an IRField. Use this directly in the FIELDS array for advanced functionality on a field.\n\n@param name - The name of this field\n\n@param valueType - The type that will be used for this field. Default str/unicode (and bytes on python2)\n act the same as non-IRField FIELDS entries (just plain string), i.e. they are encoded to unicode to and from Redis.\n\n If you pass in None, then no decoding will take place (so whatever you provide goes in, and bytes come out of Redis).\n This is similar to IRFieldRaw\n\n On python3, if you pass bytes, than the field will be left as bytes.\n To be both python2 and python3 compatible, however, you can use IRBytesField\n\n If bool is used, then \"1\" and \"true\" are True, \"0\" and \"false\" are False, any other value is an exception.\n\n You can also pass an advanced type (see IndexedRedis.fields.FieldValueTypes) like datetime and json.\n\n All types other than string/unicode/bytes/None will be assigned 'irNull' if they were not provided a value.\n @see irNull - Equals only irNull (or other IRNullType instances). You can use this to check if an integer is defined versus 0, for example.\n\n While this class is create for primitive types (like int's and datetimes), more complex types extend IRField (such as pickle, compressed, or unicode with a specific encoding).\n\n@param defaultValue (default irNull) - The value assigned to this field as a \"default\", i.e. when no value has yet been set. Generally, it makes sense to keep this as irNull, but you may want a different default.\n\n@param hashIndex (default False) - If true, the md5 hash of the value will be used for indexing and filtering. This may be useful for very long fields.\n\nAn IRField may be indexable (depending on the type), and has the option to hash the index\n\nNOTE: If you are extending IRField, you should probably not call this __init__ function. So long as you implement your own \"convert\", any fields used are set on a class-level.", "id": "f4149:c0:m0"} {"signature": "def toStorage(self, value):", "body": "if value == irNull or None:return IR_NULL_STRreturn self._toStorage(value)", "docstring": "toStorage - Convert the value to a string representation for storage.\n\n The default implementation will work here for basic types.\n\n@param value - The value of the item to convert\n@return A string value suitable for storing.", "id": "f4149:c0:m1"} {"signature": "def _toStorage(self, value):", "body": "return to_unicode(value)", "docstring": "_toStorage - Convert the value to a string for storage.\n\nThe default implementation works for most valueTypes within IRField, override this for extending types.\n\nYou don't need to handle null\n\n@param value - Value of item to convert\n\n@return - A string value suitable for storing", "id": "f4149:c0:m2"} {"signature": "def fromStorage(self, value):", "body": "if value in IR_NULL_STRINGS:return irNullreturn self._fromStorage(value)", "docstring": "fromStorage - Convert the value from storage to the value type.\n\n@param value - Value to convert\n\n@return - The converted value", "id": "f4149:c0:m3"} {"signature": "def _fromStorage(self, value):", "body": "return self.valueType(value)", "docstring": "_fromStorage - Convert the value from storage to the value type.\n\n This default impl works fine for most value types, should be implemented by extending types.\n\n @param value - Value to convert\n\n @return - Converted value", "id": "f4149:c0:m4"} {"signature": "def fromInput(self, value):", "body": "if value == irNull:return irNullreturn self._fromInput(value)", "docstring": "fromInput - Convert the value from input (like assigning this through constructor or as an item assignment on the object\n\n@param value - Value to convert\n\n@return - Converted value", "id": "f4149:c0:m6"} {"signature": "def _fromInput(self, value):", "body": "return self.valueType(value)", "docstring": "_fromInput - Convert the value from input. Implement this in extending types.\n\n@param value - Value to convert\n\n@return converted value", "id": "f4149:c0:m7"} {"signature": "def toIndex(self, value):", "body": "if self._isIrNull(value):ret = IR_NULL_STRelse:ret = self._toIndex(value)if self.isIndexHashed is False:return retreturn md5(tobytes(ret)).hexdigest()", "docstring": "toIndex - An optional method which will return the value prepped for index.\n\nBy default, \"toStorage\" will be called. If you provide \"hashIndex=True\" on the constructor,\nthe field will be md5summed for indexing purposes. This is useful for large strings, etc.", "id": "f4149:c0:m8"} {"signature": "def getDefaultValue(self):", "body": "return self.defaultValue", "docstring": "getDefaultValue - Gets the default value associated with this field.\n\n This is the value used when no value has been explicitly set.\n\n @return - The default value", "id": "f4149:c0:m9"} {"signature": "@propertydef isIndexHashed(self):", "body": "return bool(self.hashIndex)", "docstring": "isIndexHashed - Returns if the index value should be hashed\n\n@return - True if this field should be hashed before indexing / filtering", "id": "f4149:c0:m10"} {"signature": "@propertydef name(self):", "body": "return str(self)", "docstring": "name - Property, return this field's name\n\n@return - Field name", "id": "f4149:c0:m11"} {"signature": "@staticmethoddef _isNullValue(value):", "body": "return bool(value in (b'', '', irNull, None) or value in IR_NULL_STRINGS )", "docstring": "_isNullValue - Tests value if it should be represented by irNull.\n\nconvert and toStorage should test if value is null and return null (for most types)", "id": "f4149:c0:m16"} {"signature": "def _getReprProperties(self):", "body": "ret = []if getattr(self, '', None) is not None:ret.append('' %(self.valueType.__name__, ))if hasattr(self, ''):ret.append('' %(self.hashIndex, ))return ret", "docstring": "_getReprProperties - Get the properties of this field to display in repr().\n\n These should be in the form of $propertyName=$propertyRepr\n\n The default IRField implementation handles just the \"hashIndex\" property.\n\n defaultValue is part of \"__repr__\" impl. You should just extend this method\n with your object's properties instead of rewriting repr.", "id": "f4149:c0:m18"} {"signature": "def __repr__(self):", "body": "ret = [ self.__class__.__name__, '', '' %(str(self), ) ]reprProperties = self._getReprProperties()defaultValue = self.getDefaultValue()if defaultValue == irNull:reprProperties.append('')else:reprProperties.append('' %(repr(defaultValue), ))ret.append('')ret.append(''.join(reprProperties))ret.append('')return ''.join(ret)", "docstring": "__repr__ - Return an object-representation string of this field instance.\n\nYou should NOT need to extend this on your IRField, instead just implement _getReprProperties\n\n to return your type's specific properties associated with this instance.\n\n @see _getReprProperties", "id": "f4149:c0:m19"} {"signature": "def copy(self):", "body": "return self.__class__(name=self.name, valueType=self.valueType, defaultValue=self.defaultValue, hashIndex=self.hashIndex)", "docstring": "copy - Create a copy of this IRField.\n\n Each subclass should implement this, as you'll need to pass in the args to constructor.\n\n@return - Another IRField that has all the same values as this one.", "id": "f4149:c0:m20"} {"signature": "def __init__(self, name=''):", "body": "IRField.__init__(self, name=name, valueType=str, defaultValue='')", "docstring": "__init__ - Create an IRClassicField.\n This field behaves as a plain string entry in FIELDS did prior to 5.0.0.\n\n The default value is empty string, and it stores strings using defaultIREncoding.\n\n Consider changing to an IRField or one of the other many types.\n\n @param name - Field name", "id": "f4151:c0:m0"} {"signature": "def __init__(self, name='', encoding=None, defaultValue=irNull):", "body": "self.valueType = Noneself.encoding = encodingself.defaultValue = defaultValue", "docstring": "__init__ - Create an IRUnicodeField\n\n@param name - The field name\n\n@param encoding - A specific encoding to use. If None, defaultIREncoding will be used.\n\n@param defaultValue - The default value for this field\n\nThis field type is indeaxble, and the index is forced to be hashed.", "id": "f4152:c0:m0"} {"signature": "def getEncoding(self):", "body": "if not self.encoding:return getDefaultIREncoding()return self.encoding", "docstring": "getEncoding - Get the encoding codec associated with this field.\n\n If you provided None, this will return the defaultIREncoding\n\n@return - Encoding", "id": "f4152:c0:m1"} {"signature": "def toBytes(self, value):", "body": "if type(value) == bytes:return valuereturn value.encode(self.getEncoding())", "docstring": "toBytes - Convert a value to bytes using the encoding specified on this field\n\n@param value - The field to convert to bytes\n\n@return - The object encoded using the codec specified on this field.\n\nNOTE: This method may go away.", "id": "f4152:c0:m5"} {"signature": "def __init__(self, name, chainedFields, defaultValue=irNull, hashIndex=False):", "body": "if not name:raise ValueError('')self.chainedFields = []if not chainedFields:raise ValueError('')for field in chainedFields:if type(field) == type:field = field()if str(field) != '':raise ValueError('')if not issubclass(field.__class__, IRField):raise ValueError(\"\" %(str(type(field)), repr(field)))self.chainedFields.append(field)self.defaultValue = defaultValue(canIndex, forceHashIndex) = self._checkCanIndex()self.CAN_INDEX = canIndexif hashIndex is False:hashIndex = forceHashIndexself.hashIndex = hashIndex", "docstring": "__init__ - Create an IRFieldChain object.\n\n These chain together the operations from multiple fields.\n\n toStorage is applied left-to-right,\n fromInput and fromStorage is applied right-to-left.\n\n The output of one field is the input of the next.\n\n An IRFieldChain is indexable so long as ALL of its contained fields are indexable.\n An IRFieldChain is forced to be a hashed index if the rightmost field forces it (as some types do),\n otherwise hashIndex will determine if it is hashed.\n\n@see IRField.__init__", "id": "f4153:c0:m0"} {"signature": "def _toStorage(self, value):", "body": "for chainedField in self.chainedFields:value = chainedField.toStorage(value)return value", "docstring": "_toStorage - Convert the value to a string representation for storage.\n\n@param value - The value of the item to convert\n@return A string value suitable for storing.", "id": "f4153:c0:m1"} {"signature": "def _fromStorage(self, value):", "body": "for chainedField in reversed(self.chainedFields):value = chainedField._fromStorage(value)return value", "docstring": "_fromStorage - Convert the value from storage (string) to the value type.\n\n@return - The converted value, or \"irNull\" if no value was defined (and field type is not default/string)", "id": "f4153:c0:m2"} {"signature": "def _checkCanIndex(self):", "body": "if not self.chainedFields:return (False, False)for chainedField in self.chainedFields:if chainedField.CAN_INDEX is False:return (False, False)return (True, self.chainedFields[-].hashIndex)", "docstring": "_checkCanIndex - Check if we CAN index (if all fields are indexable).\n Also checks the right-most field for \"hashIndex\" - if it needs to hash we will hash.", "id": "f4153:c0:m6"} {"signature": "def __init__(self, name='', defaultValue=irNull, encoding=None):", "body": "self.valueType = Noneself.defaultValue = defaultValueself.encoding = encoding", "docstring": "__init__ - Create an IRBytesField object\n\n@param name - Field name\n\n@param defaultValue default irNull - Default value for this field\n\n@param encoding - If None, defaultIREncoding will be used when converting to bytes,\n otherwise you can provide an explicit encoding\n\nAn IRBytesField is indexable, and the index is forced to be hashed.", "id": "f4154:c0:m0"} {"signature": "def __init__(self, val=None, mdl=None):", "body": "if val is None:QueryableListObjs.__init__(self)else:QueryableListObjs.__init__(self, val)self.mdl = mdlif not mdl:self.mdl = self.getModel()else:self.__validate_model(mdl)", "docstring": "__init__ - Create this object\n\n@param val - None for empty list, IndexedRedisModel for a one-item list, or a list/tuple or subclass of initial values.\n@param mdl - The IndexedRedisModel that this list will contain. Provide this now if you can, otherwise it will be inferred from\n the first item added or present in the list.\n\n@raises ValueError if \"mdl\" is not an IndexedRedisModel", "id": "f4155:c0:m0"} {"signature": "@staticmethoddef __validate_model(mdl):", "body": "if not hasattr(mdl, ''):raise ValueError('' %(str(mdl.__class__.__name__),))", "docstring": "__validate_model - Internal function to check that model is of correct type.\n\nUses a class variable that has been defined for IndexedRedisModel s for a long time, not the type itself, to prevent circular imports etc.\n\n@param mdl - type to validate", "id": "f4155:c0:m1"} {"signature": "def getModel(self):", "body": "if not self.mdl and len(self) > :mdl = self[].__class__self.__validate_model(mdl)self.mdl = mdlreturn self.mdl", "docstring": "getModel - get the IndexedRedisModel associated with this list. If one was not provided in constructor,\n it will be inferred from the first item in the list (if present)\n\n @return - None if none could be found, otherwise the IndexedRedisModel type of the items in this list.\n\n@raises ValueError if first item is not the expected type.", "id": "f4155:c0:m2"} {"signature": "def delete(self):", "body": "if len(self) == :return mdl = self.getModel()return mdl.deleter.deleteMultiple(self)", "docstring": "delete - Delete all objects in this list.\n\n@return - Number of objects deleted", "id": "f4155:c0:m3"} {"signature": "def save(self):", "body": "if len(self) == :return []mdl = self.getModel()return mdl.saver.save(self)", "docstring": "save - Save all objects in this list", "id": "f4155:c0:m4"} {"signature": "def reload(self):", "body": "if len(self) == :return []ret = []for obj in self:res = Nonetry:res = obj.reload()except Exception as e:res = eret.append(res)return ret", "docstring": "reload - Reload all objects in this list. \n Updates in-place. To just fetch all these objects again, use \"refetch\"\n\n@return - List (same order as current objects) of either exception (KeyError) if operation failed,\n or a dict of fields changed -> (old, new)", "id": "f4155:c0:m5"} {"signature": "def refetch(self):", "body": "if len(self) == :return IRQueryableList()mdl = self.getModel()pks = [item._id for item in self if item._id]return mdl.objects.getMultiple(pks)", "docstring": "refetch - Fetch a fresh copy of all items in this list.\n Returns a new list. To update in-place, use \"reload\".\n\n@return IRQueryableList - List of fetched items", "id": "f4155:c0:m6"} {"signature": "def setDefaultRedisConnectionParams( connectionParams ):", "body": "global _defaultRedisConnectionParams_defaultRedisConnectionParams.clear()for key, value in connectionParams.items():_defaultRedisConnectionParams[key] = valueclearRedisPools()", "docstring": "setDefaultRedisConnectionParams - Sets the default parameters used when connecting to Redis.\n\n This should be the args to redis.Redis in dict (kwargs) form.\n\n @param connectionParams - A dict of connection parameters.\n Common keys are:\n\n host - hostname/ip of Redis server (default '127.0.0.1')\n port - Port number\t\t\t(default 6379)\n db - Redis DB number\t\t(default 0)\n\n Omitting any of those keys will ensure the default value listed is used.\n\n This connection info will be used by default for all connections to Redis, unless explicitly set otherwise.\n The common way to override is to define REDIS_CONNECTION_PARAMS on a model, or use AltConnectedModel = MyModel.connectAlt( PARAMS )\n\n Any omitted fields in these connection overrides will inherit the value from the global default.\n\n For example, if your global default connection params define host = 'example.com', port=15000, and db=0, \n and then one of your models has\n\n REDIS_CONNECTION_PARAMS = { 'db' : 1 }\n\n as an attribute, then that model's connection will inherit host='example.com\" and port=15000 but override db and use db=1\n\n\n NOTE: Calling this function will clear the connection_pool attribute of all stored managed connections, disconnect all managed connections,\n and close-out the connection pool.\n It may not be safe to call this function while other threads are potentially hitting Redis (not that it would make sense anyway...)\n\n @see clearRedisPools for more info", "id": "f4156:m0"} {"signature": "def getDefaultRedisConnectionParams():", "body": "global _defaultRedisConnectionParamsreturn copy.copy(_defaultRedisConnectionParams)", "docstring": "getDefaultRedisConnectionParams - Gets A COPY OF the default Redis connection params.\n\n@see setDefaultRedisConnectionParams for more info\n\n@return - copy of default Redis connection parameters", "id": "f4156:m1"} {"signature": "def clearRedisPools():", "body": "global RedisPoolsglobal _redisManagedConnectionParamsfor pool in RedisPools.values():try:pool.disconnect()except:passfor paramsList in _redisManagedConnectionParams.values():for params in paramsList:if '' in params:del params['']RedisPools.clear()_redisManagedConnectionParams.clear()", "docstring": "clearRedisPools - Disconnect all managed connection pools, \n and clear the connectiobn_pool attribute on all stored managed connection pools.\n\n A \"managed\" connection pool is one where REDIS_CONNECTION_PARAMS does not define the \"connection_pool\" attribute.\n If you define your own pools, IndexedRedis will use them and leave them alone.\n\n This method will be called automatically after calling setDefaultRedisConnectionParams.\n\n Otherwise, you shouldn't have to call it.. Maybe as some sort of disaster-recovery call..", "id": "f4156:m2"} {"signature": "def getRedisPool(params):", "body": "global RedisPoolsglobal _defaultRedisConnectionParamsglobal _redisManagedConnectionParamsif not params:params = _defaultRedisConnectionParamsisDefaultParams = Trueelse:isDefaultParams = bool(params is _defaultRedisConnectionParams)if '' in params:return params['']hashValue = hashDictOneLevel(params)if hashValue in RedisPools:params[''] = RedisPools[hashValue]return RedisPools[hashValue]if not isDefaultParams:origParams = paramsparams = copy.copy(params)else:origParams = paramscheckAgain = Falseif '' not in params:if not isDefaultParams and '' in _defaultRedisConnectionParams:params[''] = _defaultRedisConnectionParams['']else:params[''] = ''checkAgain = Trueif '' not in params:if not isDefaultParams and '' in _defaultRedisConnectionParams:params[''] = _defaultRedisConnectionParams['']else:params[''] = checkAgain = Trueif '' not in params:if not isDefaultParams and '' in _defaultRedisConnectionParams:params[''] = _defaultRedisConnectionParams['']else:params[''] = checkAgain = Trueif not isDefaultParams:otherGlobalKeys = set(_defaultRedisConnectionParams.keys()) - set(params.keys())for otherKey in otherGlobalKeys:if otherKey == '':continueparams[otherKey] = _defaultRedisConnectionParams[otherKey]checkAgain = Trueif checkAgain:hashValue = hashDictOneLevel(params)if hashValue in RedisPools:params[''] = RedisPools[hashValue]return RedisPools[hashValue]connectionPool = redis.ConnectionPool(**params)origParams[''] = params[''] = connectionPoolRedisPools[hashValue] = connectionPoolorigParamsHash = hashDictOneLevel(origParams)if origParamsHash not in _redisManagedConnectionParams:_redisManagedConnectionParams[origParamsHash] = [origParams]elif origParams not in _redisManagedConnectionParams[origParamsHash]:_redisManagedConnectionParams[origParamsHash].append(origParams)return connectionPool", "docstring": "getRedisPool - Returns and possibly also creates a Redis connection pool\n based on the REDIS_CONNECTION_PARAMS passed in.\n\n The goal of this method is to keep a small connection pool rolling\n to each unique Redis instance, otherwise during network issues etc\n python-redis will leak connections and in short-order can exhaust\n all the ports on a system. There's probably also some minor\n performance gain in sharing Pools.\n\n Will modify \"params\", if \"host\" and/or \"port\" are missing, will fill\n them in with defaults, and prior to return will set \"connection_pool\"\n on params, which will allow immediate return on the next call,\n and allow access to the pool directly from the model object.\n\n @param params - REDIS_CONNECTION_PARAMS - kwargs to redis.Redis\n\n @return redis.ConnectionPool corrosponding to this unique server.", "id": "f4156:m3"} {"signature": "def __init__(self, *args, **kwargs):", "body": "self.validateModel()osetattr = object.__setattr__ogetattr = object.__getattribute__osetattr(self, '', {})if kwargs.get('', False) is True:convertFunctionName = ''else:convertFunctionName = ''fields = ogetattr(self, '')for thisField in fields:if thisField not in kwargs:val = thisField.getDefaultValue()else:val = kwargs[thisField]val = getattr(thisField, convertFunctionName) ( val )osetattr(self, thisField, val)try:self._origData[thisField] = copy.deepcopy(val)except:try:self._origData[thisField] = copy.copy(val)except:try:deprecatedMessage(\"\" %(self.__class__.__name__, thisField.name, val.__class__.__name__, repr(val)))except Exception as reprErr:deprecatedMessage(\"\" %(self.__class__.__name__, thisField.name, val.__class__.__name__, reprErr.__class__.__name__, str(reprErr)) )self._origData[thisField] = val_id = kwargs.get('', None)if _id:_id = int(_id)object.__setattr__(self, '', _id)", "docstring": "__init__ - Set the values on this object. MAKE SURE YOU CALL THE SUPER HERE, or else things will not work.", "id": "f4156:c1:m0"} {"signature": "def __setattr__(self, keyName, value):", "body": "oga = object.__getattribute__if keyName not in ('', ''):fields = oga(self, '')try:idx = fields.index(keyName)except:idx = -if idx != -:value = fields[idx].fromInput(value)object.__setattr__(self, keyName, value)", "docstring": "__setattr__ - Will be used to set an attribute on this object.\n\n If the attribute is a field (in self.FIELDS), it will be converted via the field type's #fromInput method.\n\n Otherwise, it will just set the attribute on this object.", "id": "f4156:c1:m1"} {"signature": "def asDict(self, includeMeta=False, forStorage=False, strKeys=False):", "body": "ret = {}for thisField in self.FIELDS:val = object.__getattribute__(self, thisField)if forStorage is True:val = thisField.toStorage(val)if strKeys:ret[str(thisField)] = valelse:ret[thisField] = valif includeMeta is True:ret[''] = getattr(self, '', None)return ret", "docstring": "toDict / asDict - Get a dictionary representation of this model.\n\n@param includeMeta - Include metadata in return. For now, this is only pk stored as \"_id\"\n\n@param convertValueTypes - default True. If False, fields with fieldValue defined will be converted to that type.\n Use True when saving, etc, as native type is always either str or bytes.\n\n@param strKeys Default False - If True, just the string value of the field name will be used as the key.\n Otherwise, the IRField itself will be (although represented and indexed by string)\n\n@return - Dictionary reprensetation of this object and all fields", "id": "f4156:c1:m3"} {"signature": "def pprint(self, stream=None):", "body": "pprint.pprint(self.asDict(includeMeta=True, forStorage=False, strKeys=True), stream=stream)", "docstring": "pprint - Pretty-print a dict representation of this object.\n\n@param stream - Either a stream to output, or None to default to sys.stdout", "id": "f4156:c1:m4"} {"signature": "def hasUnsavedChanges(self, cascadeObjects=False):", "body": "if not self._id or not self._origData:return Truefor thisField in self.FIELDS:thisVal = object.__getattribute__(self, thisField)if self._origData.get(thisField, '') != thisVal:return Trueif cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase):if thisVal.objHasUnsavedChanges():return Truereturn False", "docstring": "hasUnsavedChanges - Check if any unsaved changes are present in this model, or if it has never been saved.\n\n@param cascadeObjects default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively).\n Otherwise, will just check if the pk has changed.\n\n@return - True if any fields have changed since last fetch, or if never saved. Otherwise, False", "id": "f4156:c1:m5"} {"signature": "def getUpdatedFields(self, cascadeObjects=False):", "body": "updatedFields = {}for thisField in self.FIELDS:thisVal = object.__getattribute__(self, thisField)if self._origData.get(thisField, '') != thisVal:updatedFields[thisField] = (self._origData[thisField], thisVal)if cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase) and thisVal.objHasUnsavedChanges():updatedFields[thisField] = (self._origData[thisField], thisVal)return updatedFields", "docstring": "getUpdatedFields - See changed fields.\n\n@param cascadeObjects default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively).\n Otherwise, will just check if the pk has changed.\n\n@return - a dictionary of fieldName : tuple(old, new).\n\nfieldName may be a string or may implement IRField (which implements string, and can be used just like a string)", "id": "f4156:c1:m6"} {"signature": "def diff(firstObj, otherObj, includeMeta=False):", "body": "if not isIndexedRedisModel(firstObj): raise ValueError('' %( type(firstObj).__name__ , ) )if not isIndexedRedisModel(otherObj): raise ValueError('' %( type(otherObj).__name__ , ) )firstObj.validateModel()otherObj.validateModel()if getattr(firstObj, '') != getattr(otherObj, ''):raise ValueError('' %( firstObj.__class__, otherObj.__class__) )diffFields = {}for thisField in firstObj.FIELDS:thisFieldStr = str(thisField)firstVal = object.__getattribute__( firstObj, thisFieldStr )otherVal = object.__getattribute__( otherObj, thisFieldStr )if firstVal != otherVal:diffFields[ thisFieldStr ] = ( (firstVal, otherVal) )if includeMeta:firstPk = firstObj.getPk()otherPk = otherObj.getPk()if firstPk != otherPk:diffFields[''] = ( firstPk, otherPk )return diffFields", "docstring": "diff - Compare the field values on two IndexedRedisModels.\n\n@param firstObj - First object (or self)\n\n@param otherObj - Second object\n\n@param includeMeta - If meta information (like pk) should be in the diff results.\n\n\n@return - Dict of 'field' : ( value_firstObjForField, value_otherObjForField ).\n\n Keys are names of fields with different values.\n Value is a tuple of ( value_firstObjForField, value_otherObjForField )\n\nCan be called statically, like: IndexedRedisModel.diff ( obj1, obj2 )\n\n or in reference to an obj : obj1.diff(obj2)", "id": "f4156:c1:m7"} {"signature": "@classpropertydef objects(cls):", "body": "return IndexedRedisQuery(cls)", "docstring": "objects - Start filtering", "id": "f4156:c1:m8"} {"signature": "@classpropertydef saver(cls):", "body": "return IndexedRedisSave(cls)", "docstring": "saver - Get an IndexedRedisSave associated with this model", "id": "f4156:c1:m9"} {"signature": "@classpropertydef deleter(cls):", "body": "return IndexedRedisDelete(cls)", "docstring": "deleter - Get access to IndexedRedisDelete for this model.\n@see IndexedRedisDelete.\nUsually you'll probably just do Model.objects.filter(...).delete()", "id": "f4156:c1:m10"} {"signature": "def save(self, cascadeSave=True):", "body": "saver = IndexedRedisSave(self.__class__)return saver.save(self, cascadeSave=cascadeSave)", "docstring": "save - Save this object.\n\nWill perform an \"insert\" if this object had not been saved before,\n otherwise will update JUST the fields changed on THIS INSTANCE of the model.\n\n i.e. If you have two processes fetch the same object and change different fields, they will not overwrite\n eachother, but only save the ones each process changed.\n\nIf you want to save multiple objects of type MyModel in a single transaction,\nand you have those objects in a list, myObjs, you can do the following:\n\n MyModel.saver.save(myObjs)\n\n@param cascadeSave Default True - If True, any Foreign models linked as attributes that have been altered\n or created will be saved with this object. If False, only this object (and the reference to an already-saved foreign model) will be saved.\n\n@see #IndexedRedisSave.save\n\n@return - Single element list, id of saved object (if successful)", "id": "f4156:c1:m11"} {"signature": "def delete(self):", "body": "deleter = IndexedRedisDelete(self.__class__)return deleter.deleteOne(self)", "docstring": "delete - Delete this object", "id": "f4156:c1:m12"} {"signature": "def getPk(self):", "body": "return self._id", "docstring": "getPk - Gets the internal primary key associated with this object", "id": "f4156:c1:m13"} {"signature": "@classmethoddef reset(cls, newObjs):", "body": "conn = cls.objects._get_new_connection()transaction = conn.pipeline()transaction.eval(", "docstring": "reset - Remove all stored data associated with this model (i.e. all objects of this type),\n and then save all the provided objects in #newObjs , all in one atomic transaction.\n\nUse this method to move from one complete set of objects to another, where any querying applications\nwill only see the complete before or complete after.\n\n@param newObjs list - A list of objects that will replace the current dataset\n\nTo just replace a specific subset of objects in a single transaction, you can do MyModel.saver.save(objs)\n and just the objs in \"objs\" will be inserted/updated in one atomic step.\n\nThis method, on the other hand, will delete all previous objects and add the newly provided objects in a single atomic step,\n and also reset the primary key ID generator\n\n@return list - The new primary keys associated with each object (same order as provided #newObjs list)", "id": "f4156:c1:m14"} {"signature": "def hasSameValues(self, other, cascadeObject=True):", "body": "if self.FIELDS != other.FIELDS:return Falseoga = object.__getattribute__for field in self.FIELDS:thisVal = oga(self, field)otherVal = oga(other, field)if thisVal != otherVal:return Falseif cascadeObject is True and issubclass(field.__class__, IRForeignLinkFieldBase):if thisVal and thisVal.isFetched():if otherVal and otherVal.isFetched():theseForeign = thisVal.getObjs()othersForeign = otherVal.getObjs()for i in range(len(theseForeign)):if not theseForeign[i].hasSameValues(othersForeign[i]):return Falseelse:theseForeign = thisVal.getObjs()for i in range(len(theseForeign)):if theseForeign[i].hasUnsavedChanges(cascadeObjects=True):return Falseelse:if otherVal and otherVal.isFetched():othersForeign = otherVal.getObjs()for i in range(len(othersForeign)):if othersForeign[i].hasUnsavedChanges(cascadeObjects=True):return Falsereturn True", "docstring": "hasSameValues - Check if this and another model have the same fields and values.\n\nThis does NOT include id, so the models can have the same values but be different objects in the database.\n\n@param other - Another model\n\n@param cascadeObject default True - If True, foreign link values with changes will be considered a difference.\n Otherwise, only the immediate values are checked.\n\n@return - True if all fields have the same value, otherwise False", "id": "f4156:c1:m15"} {"signature": "def __eq__(self, other):", "body": "if type(self) != type(other):return Falseif not self.hasSameValues(other):return Falseif getattr(self, '', None) != getattr(other, '', None):return Falsereturn True", "docstring": "__eq__ - Check if two IndexedRedisModels are equal.\n\nThey are equal if they have the same type and same field values (including id).\n\nTo check if two models have the same values (but can have different ids), use #hasSameValues method.", "id": "f4156:c1:m16"} {"signature": "def __ne__(self, other):", "body": "return not self.__eq__(other)", "docstring": "__ne__ - Check if two IndexedRedisModels are NOT equal.\n\n@see IndexedRedisModel.__eq__", "id": "f4156:c1:m17"} {"signature": "def __str__(self):", "body": "myClassName = self.__class__.__name__myDict = self.asDict(True, forStorage=False, strKeys=True)_id = myDict.pop('', '')myPointerLoc = \"\" %(id(self),)if not _id or _id == '':return '' %(myClassName, myPointerLoc)elif self.hasUnsavedChanges():return '' %(myClassName, to_unicode(_id), myPointerLoc)return '' %(myClassName, to_unicode(_id), myPointerLoc)", "docstring": "__str__ - Returns a string representation of this object's state.\n See implementation.\n\n@return - \n Some samples:\n (Pdb) str(z)\n ''\n (Pdb) z.artist = 'New Artist'\n (Pdb) str(z)\n ''", "id": "f4156:c1:m18"} {"signature": "def __repr__(self):", "body": "myDict = self.asDict(True, forStorage=False, strKeys=True)myClassName = self.__class__.__name__ret = [myClassName, '']_id = myDict.pop('', '')if _id:ret += ['', to_unicode(_id), '']key = Nonefor key, value in myDict.items():ret += [key, '', repr(value), '']if key is not None or not _id:ret.pop()ret.append('')return ''.join(ret)", "docstring": "__repr__ - Returns a string of the constructor/params to recreate this object.\n Example: objCopy = eval(repr(obj))\n\n @return - String of python init call to recreate this object", "id": "f4156:c1:m19"} {"signature": "def copy(self, copyPrimaryKey=False, copyValues=False):", "body": "cpy = self.__class__(**self.asDict(copyPrimaryKey, forStorage=False))if copyValues is True:for fieldName in cpy.FIELDS:setattr(cpy, fieldName, copy.deepcopy(getattr(cpy, fieldName)))return cpy", "docstring": "copy - Copies this object.\n\n@param copyPrimaryKey default False - If True, any changes to the copy will save over-top the existing entry in Redis.\n If False, only the data is copied, and nothing is saved.\n\n@param copyValues default False - If True, every field value on this object will be explicitly copied. If False,\n an object will be created with the same values, and depending on the type may share the same reference.\n\n This is the difference between a copy and a deepcopy.\n\n@return - Copy of this object, per above\n\nIf you need a copy that IS linked, @see IndexedRedisModel.copy", "id": "f4156:c1:m20"} {"signature": "def __copy__(self):", "body": "return self.copy(copyPrimaryKey=False, copyValues=False)", "docstring": "__copy__ - Used by the \"copy\" module to make a copy,\n which will NOT be linked to the original entry in the database, but will contain the same data\n\n@return - Copy of this object, per above", "id": "f4156:c1:m21"} {"signature": "def __deepcopy__(self, *args, **kwargs):", "body": "cpy = self.copy(copyPrimaryKey=False, copyValues=True)cpy.FIELDS = cpy.FIELDS[:]cpy.INDEXED_FIELDS = cpy.INDEXED_FIELDS[:]return cpy", "docstring": "__deepcopy__ - Used by the \"copy\" module to make a deepcopy.\n\n Will perform a deepcopy of all attributes, which will NOT be linked to the original entry in the database.\n\n\n If you need a copy that IS linked, @see IndexedRedisModel.copy\n\n@return - Deep copy of this object, per above", "id": "f4156:c1:m22"} {"signature": "def saveToExternal(self, redisCon):", "body": "if type(redisCon) == dict:conn = redis.Redis(**redisCon)elif hasattr(conn, '') and issubclass(conn.__class__, redis.Redis):conn = redisConelse:raise ValueError('')saver = self.saverforceID = saver._getNextID(conn) myCopy = self.copy(False)return saver.save(myCopy, usePipeline=True, forceID=forceID, conn=conn)", "docstring": "saveToExternal - Saves this object to a different Redis than that specified by REDIS_CONNECTION_PARAMS on this model.\n\n@param redisCon - Either a dict of connection params, a la REDIS_CONNECTION_PARAMS, or an existing Redis connection.\n If you are doing a lot of bulk copies, it is recommended that you create a Redis connection and pass it in rather than establish a new\n connection with each call.\n\n@note - You will generate a new primary key relative to the external Redis environment. If you need to reference a \"shared\" primary key, it is better\n to use an indexed field than the internal pk.", "id": "f4156:c1:m23"} {"signature": "def reload(self, cascadeObjects=True):", "body": "_id = self._idif not _id:raise KeyError('')currentData = self.asDict(False, forStorage=False)newDataObj = self.objects.get(_id)if not newDataObj:raise KeyError('' %(_id,))newData = newDataObj.asDict(False, forStorage=False)if currentData == newData and not self.foreignFields:return []updatedFields = {}for thisField, newValue in newData.items():defaultValue = thisField.getDefaultValue()currentValue = currentData.get(thisField, defaultValue)fieldIsUpdated = Falseif currentValue != newValue:fieldIsUpdated = Trueelif cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase):if currentValue.isFetched():oldObjs = currentValue.getObjs()newObjs = newValue.getObjs()if oldObjs != newObjs: fieldIsUpdated = Trueelse:for i in range(len(oldObjs)):if not oldObjs[i].hasSameValues(newObjs[i], cascadeObjects=True):fieldIsUpdated = Truebreakif fieldIsUpdated is True:updatedFields[thisField] = ( currentValue, newValue) setattr(self, thisField, newValue)self._origData[thisField] = newDataObj._origData[thisField]return updatedFields", "docstring": "reload - Reload this object from the database, overriding any local changes and merging in any updates.\n\n\n @param cascadeObjects Default True. If True, foreign-linked objects will be reloaded if their values have changed\n since last save/fetch. If False, only if the pk changed will the foreign linked objects be reloaded.\n\n @raises KeyError - if this object has not been saved (no primary key)\n\n @return - Dict with the keys that were updated. Key is field name that was updated,\n and value is tuple of (old value, new value). \n\n NOTE: Currently, this will cause a fetch of all Foreign Link objects, one level", "id": "f4156:c1:m24"} {"signature": "def cascadeFetch(self):", "body": "IndexedRedisQuery._doCascadeFetch(self)", "docstring": "cascadeFetch - Immediately fetch all foreign links on this field, and all their links, etc.\n\n Normally, this would be done on access of the foreign members, or at .all() time by passing cascadeFetch=True into\n the fetch function\n\n e.x. MyModel.objects.filter(...).all(cascadeFetch=True)", "id": "f4156:c1:m25"} {"signature": "def __getstate__(self):", "body": "myData = self.asDict(True, forStorage=False)myData[''] = self._origDatareturn myData", "docstring": "pickle uses this", "id": "f4156:c1:m26"} {"signature": "def __setstate__(self, stateDict):", "body": "self.__class__.validateModel()for key, value in stateDict.items():setattr(self, key, value)self._origData = stateDict['']", "docstring": "pickle uses this", "id": "f4156:c1:m27"} {"signature": "@classmethoddef copyModel(mdl):", "body": "copyNum = _modelCopyMap[mdl]_modelCopyMap[mdl] += mdlCopy = type(mdl.__name__ + '' + str(copyNum), mdl.__bases__, copy.deepcopy(dict(mdl.__dict__)))mdlCopy.FIELDS = [field.copy() for field in mdl.FIELDS]mdlCopy.INDEXED_FIELDS = [str(idxField) for idxField in mdl.INDEXED_FIELDS] mdlCopy.validateModel()return mdlCopy", "docstring": "copyModel - Copy this model, and return that copy.\n\n The copied model will have all the same data, but will have a fresh instance of the FIELDS array and all members,\n and the INDEXED_FIELDS array.\n\n This is useful for converting, like changing field types or whatever, where you can load from one model and save into the other.\n\n@return - A copy class of this model class with a unique name.", "id": "f4156:c1:m28"} {"signature": "@classmethoddef validateModel(model):", "body": "if model == IndexedRedisModel:import reif re.match('', sys.argv[]):returnraise ValueError('')global validatedModelskeyName = model.KEY_NAMEif not keyName:raise InvalidModelException('' %(str(model.__name__), ) )if model in validatedModels:return TruefailedValidationStr = '' %(str(model.__name__), ) fieldSet = set(model.FIELDS)indexedFieldSet = set(model.INDEXED_FIELDS)if not fieldSet:raise InvalidModelException('' %(failedValidationStr,))if hasattr(model, ''):raise InvalidModelException('')if hasattr(model, ''):raise InvalidModelException('')newFields = []updatedFields = []mustUpdateFields = FalseforeignFields = []for thisField in fieldSet:if thisField == '':raise InvalidModelException('' %(failedValidationStr,))try:codecs.ascii_encode(thisField)except UnicodeDecodeError as e:raise InvalidModelException('' %(failedValidationStr, to_unicode(thisField), str(e)))if issubclass(thisField.__class__, IRForeignLinkFieldBase):foreignFields.append(thisField)if issubclass(thisField.__class__, IRField):newFields.append(thisField)else:mustUpdateFields = TruenewField = IRClassicField(thisField)newFields.append(newField)updatedFields.append(thisField)thisField = newFieldif str(thisField) == '':raise InvalidModelException('' %(failedValidationStr, str(type(thisField)), repr(thisField) ) )if thisField in indexedFieldSet and thisField.CAN_INDEX is False:raise InvalidModelException('' %(failedValidationStr, str(thisField.__class__.__name__), repr(thisField)))if hasattr(IndexedRedisModel, thisField) is True:raise InvalidModelException('' %(failedValidationStr, str(thisField)))if mustUpdateFields is True:model.FIELDS = newFieldsdeprecatedMessage('' %(model.__name__, repr(updatedFields)), '' + model.__name__)model.FIELDS = KeyList(model.FIELDS)if bool(indexedFieldSet - fieldSet):raise InvalidModelException('' %(failedValidationStr, str(list(indexedFieldSet - fieldSet)), ) )model.foreignFields = foreignFieldsvalidatedModels.add(model)return True", "docstring": "validateModel - Class method that validates a given model is implemented correctly. Will only be validated once, on first model instantiation.\n\n@param model - Implicit of own class\n\n@return - True\n\n@raises - InvalidModelException if there is a problem with the model, and the message contains relevant information.", "id": "f4156:c1:m29"} {"signature": "@deprecated('')@classmethoddef connect(cls, redisConnectionParams):", "body": "return cls.connectAlt(redisConnectionParams)", "docstring": "connect - DEPRECATED NAME - @see connectAlt\n Create a class of this model which will use an alternate connection than the one specified by REDIS_CONNECTION_PARAMS on this model.\n\n@param redisConnectionParams - Dictionary of arguments to redis.Redis, same as REDIS_CONNECTION_PARAMS.\n\n@return - A class that can be used in all the same ways as the existing IndexedRedisModel, but that connects to a different instance.", "id": "f4156:c1:m30"} {"signature": "@classmethoddef connectAlt(cls, redisConnectionParams):", "body": "if not isinstance(redisConnectionParams, dict):raise ValueError('')hashVal = hashDictOneLevel(redisConnectionParams)modelDictCopy = copy.deepcopy(dict(cls.__dict__))modelDictCopy[''] = redisConnectionParamsConnectedIndexedRedisModel = type('' + cls.__name__ + str(hashVal), cls.__bases__, modelDictCopy)return ConnectedIndexedRedisModel", "docstring": "connectAlt - Create a class of this model which will use an alternate connection than the one specified by REDIS_CONNECTION_PARAMS on this model.\n\n@param redisConnectionParams - Dictionary of arguments to redis.Redis, same as REDIS_CONNECTION_PARAMS.\n\n@return - A class that can be used in all the same ways as the existing IndexedRedisModel, but that connects to a different instance.\n\n The fields and key will be the same here, but the connection will be different. use #copyModel if you want an independent class for the model", "id": "f4156:c1:m31"} {"signature": "def __init__(self, mdl):", "body": "mdl.validateModel()self.mdl = mdlself.keyName = mdl.KEY_NAMEfields = mdl.FIELDSself.fields = mdl.FIELDSself.indexedFields = [fields[fieldName] for fieldName in mdl.INDEXED_FIELDS]self._connection = None", "docstring": "Internal constructor\n\n@param mdl - IndexedRedisModel implementer", "id": "f4156:c2:m0"} {"signature": "def _get_new_connection(self):", "body": "pool = getRedisPool(self.mdl.REDIS_CONNECTION_PARAMS)return redis.Redis(connection_pool=pool)", "docstring": "_get_new_connection - Get a new connection\ninternal", "id": "f4156:c2:m2"} {"signature": "def _get_connection(self):", "body": "if self._connection is None:self._connection = self._get_new_connection() return self._connection", "docstring": "_get_connection - Maybe get a new connection, or reuse if passed in.\n Will share a connection with a model\ninternal", "id": "f4156:c2:m3"} {"signature": "def _get_ids_key(self):", "body": "return ''.join([INDEXED_REDIS_PREFIX, self.keyName + ''])", "docstring": "_get_ids_key - Gets the key holding primary keys\ninternal", "id": "f4156:c2:m4"} {"signature": "def _add_id_to_keys(self, pk, conn=None):", "body": "if conn is None:conn = self._get_connection()conn.sadd(self._get_ids_key(), pk)", "docstring": "_add_id_to_keys - Adds primary key to table\ninternal", "id": "f4156:c2:m5"} {"signature": "def _rem_id_from_keys(self, pk, conn=None):", "body": "if conn is None:conn = self._get_connection()conn.srem(self._get_ids_key(), pk)", "docstring": "_rem_id_from_keys - Remove primary key from table\ninternal", "id": "f4156:c2:m6"} {"signature": "def _add_id_to_index(self, indexedField, pk, val, conn=None):", "body": "if conn is None:conn = self._get_connection()conn.sadd(self._get_key_for_index(indexedField, val), pk)", "docstring": "_add_id_to_index - Adds an id to an index\ninternal", "id": "f4156:c2:m7"} {"signature": "def _rem_id_from_index(self, indexedField, pk, val, conn=None):", "body": "if conn is None:conn = self._get_connection()conn.srem(self._get_key_for_index(indexedField, val), pk)", "docstring": "_rem_id_from_index - Removes an id from an index\ninternal", "id": "f4156:c2:m8"} {"signature": "def _get_key_for_index(self, indexedField, val):", "body": "if hasattr(indexedField, ''):val = indexedField.toIndex(val)else:val = self.fields[indexedField].toIndex(val)return ''.join( [INDEXED_REDIS_PREFIX, self.keyName, '', indexedField, '', val] )", "docstring": "_get_key_for_index - Returns the key name that would hold the indexes on a value\nInternal - does not validate that indexedFields is actually indexed. Trusts you. Don't let it down.\n\n@param indexedField - string of field name\n@param val - Value of field\n\n@return - Key name string, potentially hashed.", "id": "f4156:c2:m9"} {"signature": "def _compat_get_str_key_for_index(self, indexedField, val):", "body": "return ''.join([INDEXED_REDIS_PREFIX, self.keyName, '', indexedField, '', getattr(indexedField, '', to_unicode)(val)])", "docstring": "_compat_get_str_key_for_index - Return the key name as a string, even if it is a hashed index field.\n This is used in converting unhashed fields to a hashed index (called by _compat_rem_str_id_from_index which is called by compat_convertHashedIndexes)\n\n @param inde\n@param indexedField - string of field name\n@param val - Value of field\n\n@return - Key name string, always a string regardless of hash", "id": "f4156:c2:m10"} {"signature": "@deprecated('')def _compat_rem_str_id_from_index(self, indexedField, pk, val, conn=None):", "body": "if conn is None:conn = self._get_connection()conn.srem(self._compat_get_str_key_for_index(indexedField, val), pk)", "docstring": "_compat_rem_str_id_from_index - Used in compat_convertHashedIndexes to remove the old string repr of a field,\n in order to later add the hashed value,", "id": "f4156:c2:m11"} {"signature": "def _get_key_for_id(self, pk):", "body": "return ''.join([INDEXED_REDIS_PREFIX, self.keyName, '', to_unicode(pk)])", "docstring": "_get_key_for_id - Returns the key name that holds all the data for an object\nInternal\n\n@param pk - primary key\n\n@return - Key name string", "id": "f4156:c2:m12"} {"signature": "def _get_next_id_key(self):", "body": "return ''.join([INDEXED_REDIS_PREFIX, self.keyName, ''])", "docstring": "_get_next_id_key - Returns the key name that holds the generator for primary key values\nInternal\n\n@return - Key name string", "id": "f4156:c2:m13"} {"signature": "def _peekNextID(self, conn=None):", "body": "if conn is None:conn = self._get_connection()return to_unicode(conn.get(self._get_next_id_key()) or )", "docstring": "_peekNextID - Look at, but don't increment the primary key for this model.\n Internal.\n\n@return int - next pk", "id": "f4156:c2:m14"} {"signature": "def _getNextID(self, conn=None):", "body": "if conn is None:conn = self._get_connection()return int(conn.incr(self._get_next_id_key()))", "docstring": "_getNextID - Get (and increment) the next primary key for this model.\n If you don't want to increment, @see _peekNextID .\n Internal.\n This is done automatically on save. No need to call it.\n\n@return int - next pk", "id": "f4156:c2:m15"} {"signature": "def _getTempKey(self):", "body": "return self._get_ids_key() + '' + uuid.uuid4().__str__()", "docstring": "_getTempKey - Generates a temporary key for intermediate storage", "id": "f4156:c2:m16"} {"signature": "def filter(self, **kwargs):", "body": "selfCopy = self.__copy__()return IndexedRedisQuery._filter(selfCopy, **kwargs)", "docstring": "filter - Add filters based on INDEXED_FIELDS having or not having a value.\n Note, no objects are actually fetched until .all() is called\n\n Use the field name [ model.objects.filter(some_field='value')] to filter on items containing that value.\n Use the field name suffxed with '__ne' for a negation filter [ model.objects.filter(some_field__ne='value') ]\n\nExample:\n query = Model.objects.filter(field1='value', field2='othervalue')\n\n objs1 = query.filter(something__ne='value').all()\n objs2 = query.filter(something__ne=7).all()\n\n\n@returns - A copy of this object, with the additional filters. If you want to work inline on this object instead, use the filterInline method.", "id": "f4156:c3:m3"} {"signature": "def filterInline(self, **kwargs):", "body": "return IndexedRedisQuery._filter(self, **kwargs)", "docstring": "filterInline - @see IndexedRedisQuery.filter. This is the same as filter, but works inline on this object instead of creating a copy.\n Use this is you do not need to retain the previous filter object.", "id": "f4156:c3:m4"} {"signature": "@staticmethoddef _filter(filterObj, **kwargs):", "body": "for key, value in kwargs.items():if key.endswith(''):notFilter = Truekey = key[:-]else:notFilter = Falseif key not in filterObj.indexedFields:raise ValueError('' + key + '')if notFilter is False:filterObj.filters.append( (key, value) )else:filterObj.notFilters.append( (key, value) )return filterObj", "docstring": "Internal for handling filters; the guts of .filter and .filterInline", "id": "f4156:c3:m5"} {"signature": "def count(self):", "body": "conn = self._get_connection()numFilters = len(self.filters)numNotFilters = len(self.notFilters)if numFilters + numNotFilters == :return conn.scard(self._get_ids_key())if numNotFilters == :if numFilters == :(filterFieldName, filterValue) = self.filters[]return conn.scard(self._get_key_for_index(filterFieldName, filterValue))indexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.filters]return len(conn.sinter(indexKeys))notIndexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.notFilters]if numFilters == :return len(conn.sdiff(self._get_ids_key(), *notIndexKeys))indexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.filters]tempKey = self._getTempKey()pipeline = conn.pipeline()pipeline.sinterstore(tempKey, *indexKeys)pipeline.sdiff(tempKey, *notIndexKeys)pipeline.delete(tempKey)pks = pipeline.execute()[] return len(pks)", "docstring": "count - gets the number of records matching the filter criteria\n\nExample:\n theCount = Model.objects.filter(field1='value').count()", "id": "f4156:c3:m6"} {"signature": "def exists(self, pk):", "body": "conn = self._get_connection()key = self._get_key_for_id(pk)return conn.exists(key)", "docstring": "exists - Tests whether a record holding the given primary key exists.\n\n@param pk - Primary key (see getPk method)\n\nExample usage: Waiting for an object to be deleted without fetching the object or running a filter. \n\nThis is a very cheap operation.\n\n@return - True if object with given pk exists, otherwise False", "id": "f4156:c3:m7"} {"signature": "def getPrimaryKeys(self, sortByAge=False):", "body": "conn = self._get_connection()numFilters = len(self.filters)numNotFilters = len(self.notFilters)if numFilters + numNotFilters == :conn = self._get_connection()matchedKeys = conn.smembers(self._get_ids_key())elif numNotFilters == :if numFilters == :(filterFieldName, filterValue) = self.filters[]matchedKeys = conn.smembers(self._get_key_for_index(filterFieldName, filterValue))else:indexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.filters]matchedKeys = conn.sinter(indexKeys)else:notIndexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.notFilters]if numFilters == :matchedKeys = conn.sdiff(self._get_ids_key(), *notIndexKeys)else:indexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.filters]tempKey = self._getTempKey()pipeline = conn.pipeline()pipeline.sinterstore(tempKey, *indexKeys)pipeline.sdiff(tempKey, *notIndexKeys)pipeline.delete(tempKey)matchedKeys = pipeline.execute()[] matchedKeys = [ int(_key) for _key in matchedKeys ]if sortByAge is False:return list(matchedKeys)else:matchedKeys = list(matchedKeys)matchedKeys.sort()return matchedKeys", "docstring": "getPrimaryKeys - Returns all primary keys matching current filterset.\n\n@param sortByAge - If False, return will be a set and may not be ordered.\n If True, return will be a list and is guarenteed to represent objects oldest->newest\n\n@return - A set of all primary keys associated with current filters.", "id": "f4156:c3:m8"} {"signature": "def all(self, cascadeFetch=False):", "body": "matchedKeys = self.getPrimaryKeys()if matchedKeys:return self.getMultiple(matchedKeys, cascadeFetch=cascadeFetch)return IRQueryableList([], mdl=self.mdl)", "docstring": "all - Get the underlying objects which match the filter criteria.\n\nExample: objs = Model.objects.filter(field1='value', field2='value2').all()\n\n@param cascadeFetch Default False, If True, all Foreign objects associated with this model\n will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@return - Objects of the Model instance associated with this query.", "id": "f4156:c3:m9"} {"signature": "def allByAge(self, cascadeFetch=False):", "body": "matchedKeys = self.getPrimaryKeys(sortByAge=True)if matchedKeys:return self.getMultiple(matchedKeys, cascadeFetch=cascadeFetch)return IRQueryableList([], mdl=self.mdl)", "docstring": "allByAge - Get the underlying objects which match the filter criteria, ordered oldest -> newest\n If you are doing a queue or just need the head/tail, consider .first() and .last() instead.\n\n\n@param cascadeFetch Default False, If True, all Foreign objects associated with this model\n will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@return - Objects of the Model instance associated with this query, sorted oldest->newest", "id": "f4156:c3:m10"} {"signature": "def allOnlyFields(self, fields, cascadeFetch=False):", "body": "matchedKeys = self.getPrimaryKeys()if matchedKeys:return self.getMultipleOnlyFields(matchedKeys, fields, cascadeFetch=cascadeFetch)return IRQueryableList([], mdl=self.mdl)", "docstring": "allOnlyFields - Get the objects which match the filter criteria, only fetching given fields.\n\n@param fields - List of fields to fetch\n\n@param cascadeFetch Default False, If True, all Foreign objects associated with this model\n will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n\n@return - Partial objects with only the given fields fetched", "id": "f4156:c3:m11"} {"signature": "def allOnlyIndexedFields(self):", "body": "matchedKeys = self.getPrimaryKeys()if matchedKeys:return self.getMultipleOnlyIndexedFields(matchedKeys)return IRQueryableList([], mdl=self.mdl)", "docstring": "allOnlyIndexedFields - Get the objects which match the filter criteria, only fetching indexed fields.\n\n@return - Partial objects with only the indexed fields fetched", "id": "f4156:c3:m12"} {"signature": "def first(self, cascadeFetch=False):", "body": "obj = NonematchedKeys = self.getPrimaryKeys(sortByAge=True)if matchedKeys:while matchedKeys and obj is None:obj = self.get(matchedKeys.pop(), cascadeFetch=cascadeFetch)return obj", "docstring": "First - Returns the oldest record (lowerst primary key) with current filters.\n This makes an efficient queue, as it only fetches a single object.\n\n\n@param cascadeFetch Default False, If True, all Foreign objects associated with this model\n will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@return - Instance of Model object, or None if no items match current filters", "id": "f4156:c3:m13"} {"signature": "def last(self, cascadeFetch=False):", "body": "obj = NonematchedKeys = self.getPrimaryKeys(sortByAge=True)if matchedKeys:while matchedKeys and obj is None:obj = self.get(matchedKeys.pop(), cascadeFetch=cascadeFetch)return obj", "docstring": "Last - Returns the newest record (highest primary key) with current filters.\n This makes an efficient queue, as it only fetches a single object.\n\n\n@param cascadeFetch Default False, If True, all Foreign objects associated with this model\n will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@return - Instance of Model object, or None if no items match current filters", "id": "f4156:c3:m14"} {"signature": "def random(self, cascadeFetch=False):", "body": "matchedKeys = list(self.getPrimaryKeys())obj = Nonewhile matchedKeys and not obj:key = matchedKeys.pop(random.randint(, len(matchedKeys)-))obj = self.get(key, cascadeFetch=cascadeFetch)return obj", "docstring": "Random - Returns a random record in current filterset.\n\n\n@param cascadeFetch Default False, If True, all Foreign objects associated with this model\n will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@return - Instance of Model object, or None if no items math current filters", "id": "f4156:c3:m15"} {"signature": "def delete(self):", "body": "if self.filters or self.notFilters:return self.mdl.deleter.deleteMultiple(self.allOnlyIndexedFields())return self.mdl.deleter.destroyModel()", "docstring": "delete - Deletes all entries matching the filter criteria", "id": "f4156:c3:m16"} {"signature": "def get(self, pk, cascadeFetch=False):", "body": "conn = self._get_connection()key = self._get_key_for_id(pk)res = conn.hgetall(key)if type(res) != dict or not len(res.keys()):return Noneres[''] = pkret = self._redisResultToObj(res)if cascadeFetch is True:self._doCascadeFetch(ret)return ret", "docstring": "get - Get a single value with the internal primary key.\n\n\n@param cascadeFetch Default False, If True, all Foreign objects associated with this model\n will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@param pk - internal primary key (can be found via .getPk() on an item)", "id": "f4156:c3:m17"} {"signature": "@staticmethoddef _doCascadeFetch(obj):", "body": "obj.validateModel()if not obj.foreignFields:returnNOTE: Currently this fetches using one transaction per object. Implementation for actual resolution is inIndexedRedisModel.__getattribute__ for foreignField in obj.foreignFields:subObjsData = object.__getattribute__(obj, foreignField)if not subObjsData:setattr(obj, str(foreignField), irNull)continuesubObjs = subObjsData.getObjs()for subObj in subObjs:if isIndexedRedisModel(subObj):IndexedRedisQuery._doCascadeFetch(subObj)", "docstring": "_doCascadeFetch - Takes an object and performs a cascading fetch on all foreign links, and all theirs, and so on.\n\n@param obj - A fetched model", "id": "f4156:c3:m18"} {"signature": "def getMultiple(self, pks, cascadeFetch=False):", "body": "if type(pks) == set:pks = list(pks)if len(pks) == :return IRQueryableList([self.get(pks[], cascadeFetch=cascadeFetch)], mdl=self.mdl)conn = self._get_connection()pipeline = conn.pipeline()for pk in pks:key = self._get_key_for_id(pk)pipeline.hgetall(key)res = pipeline.execute()ret = IRQueryableList(mdl=self.mdl)i = pksLen = len(pks)while i < pksLen:if res[i] is None:ret.append(None)i += continueres[i][''] = pks[i]obj = self._redisResultToObj(res[i])ret.append(obj)i += if cascadeFetch is True:for obj in ret:if not obj:continueself._doCascadeFetch(obj)return ret", "docstring": "getMultiple - Gets multiple objects with a single atomic operation\n\n\n@param cascadeFetch Default False, If True, all Foreign objects associated with this model\n will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@param pks - list of internal keys", "id": "f4156:c3:m19"} {"signature": "def getOnlyFields(self, pk, fields, cascadeFetch=False):", "body": "conn = self._get_connection()key = self._get_key_for_id(pk)res = conn.hmget(key, fields)if type(res) != list or not len(res):return NoneobjDict = {}numFields = len(fields)i = anyNotNone = Falsewhile i < numFields:objDict[fields[i]] = res[i]if res[i] != None:anyNotNone = Truei += if anyNotNone is False:return NoneobjDict[''] = pkret = self._redisResultToObj(objDict)if cascadeFetch is True:self._doCascadeFetch(ret)return ret", "docstring": "getOnlyFields - Gets only certain fields from a paticular primary key. For working on entire filter set, see allOnlyFields\n\n@param pk - Primary Key\n\n@param fields list - List of fields\n\n@param cascadeFetch Default False, If True, all Foreign objects associated with this model\n will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n\nreturn - Partial objects with only fields applied", "id": "f4156:c3:m20"} {"signature": "def getMultipleOnlyFields(self, pks, fields, cascadeFetch=False):", "body": "if type(pks) == set:pks = list(pks)if len(pks) == :return IRQueryableList([self.getOnlyFields(pks[], fields, cascadeFetch=cascadeFetch)], mdl=self.mdl)conn = self._get_connection()pipeline = conn.pipeline()for pk in pks:key = self._get_key_for_id(pk)pipeline.hmget(key, fields)res = pipeline.execute()ret = IRQueryableList(mdl=self.mdl)pksLen = len(pks)i = numFields = len(fields)while i < pksLen:objDict = {}anyNotNone = FalsethisRes = res[i]if thisRes is None or type(thisRes) != list:ret.append(None)i += continuej = while j < numFields:objDict[fields[j]] = thisRes[j]if thisRes[j] != None:anyNotNone = Truej += if anyNotNone is False:ret.append(None)i += continueobjDict[''] = pks[i]obj = self._redisResultToObj(objDict)ret.append(obj)i += if cascadeFetch is True:for obj in ret:self._doCascadeFetch(obj)return ret", "docstring": "getMultipleOnlyFields - Gets only certain fields from a list of primary keys. For working on entire filter set, see allOnlyFields\n\n@param pks list - Primary Keys\n\n@param fields list - List of fields\n\n\n@param cascadeFetch Default False, If True, all Foreign objects associated with this model\n will be fetched immediately. If False, foreign objects will be fetched on-access.\n\nreturn - List of partial objects with only fields applied", "id": "f4156:c3:m21"} {"signature": "def getOnlyIndexedFields(self, pk):", "body": "return self.getOnlyFields(pk, self.indexedFields)", "docstring": "getOnlyIndexedFields - Get only the indexed fields on an object. This is the minimum to delete.\n\n@param pk - Primary key\n\n@return - Object with only indexed fields fetched.", "id": "f4156:c3:m22"} {"signature": "def getMultipleOnlyIndexedFields(self, pks):", "body": "return self.getMultipleOnlyFields(pks, self.indexedFields)", "docstring": "getMultipleOnlyIndexedFields - Get only the indexed fields on an object. This is the minimum to delete.\n\n@param pks - List of primary keys\n\n@return - List of objects with only indexed fields fetched", "id": "f4156:c3:m23"} {"signature": "def reindex(self):", "body": "objs = self.all()saver = IndexedRedisSave(self.mdl)saver.reindex(objs)", "docstring": "reindex - Reindexes the objects matching current filterset. Use this if you add/remove a field to INDEXED_FIELDS.\n\n NOTE - This will NOT remove entries from the old index if you change index type, or change decimalPlaces on a\n IRFixedPointField. To correct these indexes, you'll need to run:\n\n Model.reset(Model.objects.all())\n\nIf you change the value of \"hashIndex\" on a field, you need to call #compat_convertHashedIndexes instead.", "id": "f4156:c3:m24"} {"signature": "def compat_convertHashedIndexes(self, fetchAll=True):", "body": "saver = IndexedRedisSave(self.mdl)if fetchAll is True:objs = self.all()saver.compat_convertHashedIndexes(objs)else:didWarnOnce = Falsepks = self.getPrimaryKeys()for pk in pks:obj = self.get(pk)if not obj:if didWarnOnce is False:sys.stderr.write('''''')didWarnOnce = Truecontinuesaver.compat_convertHashedIndexes([obj])", "docstring": "compat_convertHashedIndexes - Reindex fields, used for when you change the propery \"hashIndex\" on one or more fields.\n\nFor each field, this will delete both the hash and unhashed keys to an object, \n and then save a hashed or unhashed value, depending on that field's value for \"hashIndex\".\n\nFor an IndexedRedisModel class named \"MyModel\", call as \"MyModel.objects.compat_convertHashedIndexes()\"\n\nNOTE: This works one object at a time (regardless of #fetchAll), so that an unhashable object does not trash all data.\n\nThis method is intended to be used while your application is offline,\n as it doesn't make sense to be changing your model while applications are actively using it.\n\n@param fetchAll , Default True - If True, all objects will be fetched first, then converted.\n This is generally what you want to do, as it is more efficient. If you are memory contrainted,\n you can set this to \"False\", and it will fetch one object at a time, convert it, and save it back.", "id": "f4156:c3:m25"} {"signature": "def save(self, obj, usePipeline=True, forceID=False, cascadeSave=True, conn=None):", "body": "if conn is None:conn = self._get_connection()if usePipeline is True:idConn = connelse:idConn = self._get_new_connection()if issubclass(obj.__class__, (list, tuple)):objs = objelse:objs = [obj]if usePipeline is True:pipeline = conn.pipeline()else:pipeline = connoga = object.__getattribute__if cascadeSave is True:ignPipelines = OrderedDict()foreignSavers = {}for thisObj in objs:if not thisObj.foreignFields:continueforeignFields = thisObj.foreignFieldsfor foreignField in foreignFields:rawObj = oga(thisObj, str(foreignField))if rawObj in (None, irNull) or not rawObj.isFetched():continueforeignObjects = oga(thisObj, str(foreignField)).getObjs()for foreignObject in foreignObjects:doSaveForeign = Falseif getattr(foreignObject, '', None):if foreignObject.hasUnsavedChanges(cascadeObjects=True):doSaveForeign = Trueelse:doSaveForeign = Trueif doSaveForeign is True:if foreignField not in foreignSavers:foreignPipelines[foreignField] = self._get_new_connection().pipeline()foreignSavers[foreignField] = IndexedRedisSave(foreignObject.__class__)", "docstring": "save - Save an object / objects associated with this model. \n\nYou probably want to just do object.save() instead of this, but to save multiple objects at once in a single transaction, \n you can use:\n\n MyModel.saver.save(myObjs)\n\n@param obj - The object to save, or a list of objects to save\n\n@param usePipeline - Use a pipeline for saving. You should always want this, unless you are calling this function from within an existing pipeline.\n\n@param forceID - if not False, force ID to this. If obj is list, this is also list. Forcing IDs also forces insert. Up to you to ensure ID will not clash.\n@param cascadeSave Default True - If True, any Foreign models linked as attributes that have been altered\n or created will be saved with this object. If False, only this object (and the reference to an already-saved foreign model) will be saved.\n\n@param conn - A connection or None\n\n@note - if no ID is specified\n\n@return - List of pks", "id": "f4156:c4:m0"} {"signature": "def saveMultiple(self, objs):", "body": "return self.save(objs)", "docstring": "saveMultiple - Save a list of objects using a pipeline.\n\n@param objs < list > - List of objects to save", "id": "f4156:c4:m1"} {"signature": "def _doSave(self, obj, isInsert, conn, pipeline=None):", "body": "if pipeline is None:pipeline = connnewDict = obj.asDict(forStorage=True)key = self._get_key_for_id(obj._id)if isInsert is True:for thisField in self.fields:fieldValue = newDict.get(thisField, thisField.getDefaultValue())pipeline.hset(key, thisField, fieldValue)if fieldValue == IR_NULL_STR:obj._origData[thisField] = irNullelse:obj._origData[thisField] = object.__getattribute__(obj, str(thisField))self._add_id_to_keys(obj._id, pipeline)for indexedField in self.indexedFields:self._add_id_to_index(indexedField, obj._id, obj._origData[indexedField], pipeline)else:updatedFields = obj.getUpdatedFields()for thisField, fieldValue in updatedFields.items():(oldValue, newValue) = fieldValueoldValueForStorage = thisField.toStorage(oldValue)newValueForStorage = thisField.toStorage(newValue)pipeline.hset(key, thisField, newValueForStorage)if thisField in self.indexedFields:self._rem_id_from_index(thisField, obj._id, oldValueForStorage, pipeline)self._add_id_to_index(thisField, obj._id, newValueForStorage, pipeline)obj._origData[thisField] = newValue", "docstring": "_doSave - Internal function to save a single object. Don't call this directly. \n Use \"save\" instead.\n\n If a pipeline is provided, the operations (setting values, updating indexes, etc)\n will be queued into that pipeline.\n Otherwise, everything will be executed right away.\n\n @param obj - Object to save\n @param isInsert - Bool, if insert or update. Either way, obj._id is expected to be set.\n @param conn - Redis connection\n @param pipeline - Optional pipeline, if present the items will be queued onto it. Otherwise, go directly to conn.", "id": "f4156:c4:m2"} {"signature": "def reindex(self, objs, conn=None):", "body": "if conn is None:conn = self._get_connection()pipeline = conn.pipeline()objDicts = [obj.asDict(True, forStorage=True) for obj in objs]for indexedFieldName in self.indexedFields:for objDict in objDicts:self._rem_id_from_index(indexedFieldName, objDict[''], objDict[indexedFieldName], pipeline)self._add_id_to_index(indexedFieldName, objDict[''], objDict[indexedFieldName], pipeline)pipeline.execute()", "docstring": "reindex - Reindexes a given list of objects. Probably you want to do Model.objects.reindex() instead of this directly.\n\n@param objs list - List of objects to reindex\n@param conn - Specific Redis connection or None to reuse", "id": "f4156:c4:m3"} {"signature": "def compat_convertHashedIndexes(self, objs, conn=None):", "body": "if conn is None:conn = self._get_connection()fields = [] for indexedField in self.indexedFields:origField = self.fields[indexedField]if '' not in origField.__class__.__new__.__code__.co_varnames:continueif indexedField.hashIndex is True:hashingField = origFieldregField = origField.copy()regField.hashIndex = Falseelse:regField = origFieldhashingField = origField.copy()hashingField.hashIndex = Truefields.append ( (origField, regField, hashingField) )objDicts = [obj.asDict(True, forStorage=True) for obj in objs]for objDict in objDicts:pipeline = conn.pipeline()pk = objDict['']for origField, regField, hashingField in fields:val = objDict[indexedField]self._rem_id_from_index(regField, pk, val, pipeline)self._rem_id_from_index(hashingField, pk, val, pipeline)self._add_id_to_index(origField, pk, val, pipeline)pipeline.execute()", "docstring": "compat_convertHashedIndexes - Reindex all fields for the provided objects, where the field value is hashed or not.\nIf the field is unhashable, do not allow.\n\nNOTE: This works one object at a time. It is intended to be used while your application is offline,\n as it doesn't make sense to be changing your model while applications are actively using it.\n\n@param objs \n@param conn - Specific Redis connection or None to reuse.", "id": "f4156:c4:m4"} {"signature": "def deleteOne(self, obj, conn=None):", "body": "if not getattr(obj, '', None):return if conn is None:conn = self._get_connection()pipeline = conn.pipeline()executeAfter = Trueelse:pipeline = conn executeAfter = Falsepipeline.delete(self._get_key_for_id(obj._id))self._rem_id_from_keys(obj._id, pipeline)for indexedFieldName in self.indexedFields:self._rem_id_from_index(indexedFieldName, obj._id, obj._origData[indexedFieldName], pipeline)obj._id = Noneif executeAfter is True:pipeline.execute()return ", "docstring": "deleteOne - Delete one object\n\n@param obj - object to delete\n@param conn - Connection to reuse, or None\n\n@return - number of items deleted (0 or 1)", "id": "f4156:c5:m0"} {"signature": "def deleteByPk(self, pk):", "body": "obj = self.mdl.objects.getOnlyIndexedFields(pk)if not obj:return return self.deleteOne(obj)", "docstring": "deleteByPk - Delete object associated with given primary key", "id": "f4156:c5:m1"} {"signature": "def deleteMultiple(self, objs):", "body": "conn = self._get_connection()pipeline = conn.pipeline()numDeleted = for obj in objs:numDeleted += self.deleteOne(obj, pipeline)pipeline.execute()return numDeleted", "docstring": "deleteMultiple - Delete multiple objects\n\n@param objs - List of objects\n\n@return - Number of objects deleted", "id": "f4156:c5:m2"} {"signature": "def deleteMultipleByPks(self, pks):", "body": "if type(pks) == set:pks = list(pks)if len(pks) == :return self.deleteByPk(pks[])objs = self.mdl.objects.getMultipleOnlyIndexedFields(pks)return self.deleteMultiple(objs)", "docstring": "deleteMultipleByPks - Delete multiple objects given their primary keys\n\n@param pks - List of primary keys\n\n@return - Number of objects deleted", "id": "f4156:c5:m3"} {"signature": "def destroyModel(self):", "body": "conn = self._get_connection()pipeline = conn.pipeline()pipeline.eval(", "docstring": "destroyModel - Destroy everything related to this model in one swoop.\n\n Same effect as Model.reset([]) - Except slightly more efficient.\n\n This function is called if you do Model.objects.delete() with no filters set.\n\n@return - Number of keys deleted. Note, this is NOT number of models deleted, but total keys.", "id": "f4156:c5:m4"} {"signature": "def toggleDeprecatedMessages(enabled):", "body": "global __deprecatedMessagesEnabled__deprecatedMessagesEnabled = enabled", "docstring": "toggleDeprecatedMessages - Normally, a deprecated message will log up to once per message.\n Call toggleDeprecatedMessages(False) to turn them off altogether (like on a Production codebase)\n\n@param enabled - False to disable deprecated messages, otherwise True.", "id": "f4157:m0"} {"signature": "def deprecatedMessage(msg, key=None, printStack=False):", "body": "if __deprecatedMessagesEnabled is False:returnif not _alreadyWarned:sys.stderr.write('')if key is None:from .compat_str import tobyteskey = md5(tobytes(msg)).hexdigest()if key not in _alreadyWarned:_alreadyWarned[key] = Truesys.stderr.write('' %(msg, ))if printStack:sys.stderr.write('')curStack = traceback.extract_stack()sys.stderr.write('' + ''.join(traceback.format_list(curStack[:-])).replace('', '') + '')", "docstring": "deprecatedMessage - Print a deprecated messsage (unless they are toggled off). Will print a message only once (based on \"key\")\n\n@param msg - Deprecated message to possibly print\n\n@param key - A key that is specific to this message. \n If None is provided (default), one will be generated from the md5 of the message.\n However, better to save cycles and provide a unique key if at all possible.\n The decorator uses the function itself as the key.\n\n@param printStack Default False, if True print a stack trace", "id": "f4157:m1"} {"signature": "def hashDictOneLevel(myDict):", "body": "keys = [str(x) for x in myDict.keys()]keys.sort()lst = []for key in keys:lst.append(str(myDict[key]) + '')return ''.join(lst).__hash__()", "docstring": "A function which can generate a hash of a one-level \n dict containing strings (like REDIS_CONNECTION_PARAMS)\n\n@param myDict - Dict with string keys and values\n\n@return - Hash of myDict", "id": "f4158:m0"} {"signature": "def _rindex(mylist: Sequence[T], x: T) -> int:", "body": "return len(mylist) - mylist[::-].index(x) - ", "docstring": "Index of the last occurrence of x in the sequence.", "id": "f4166:m0"} {"signature": "def raw_to_delimited(header: Header, raw_payload: RawPayload) -> DelimitedMsg:", "body": "return tuple(header) + (b'',) + tuple(raw_payload)", "docstring": "\\\n Returns a message consisting of header frames, delimiter frame, and payload frames.\n The payload frames may be given as sequences of bytes (raw) or as `Message`s.", "id": "f4166:m1"} {"signature": "def to_delimited(header: Header, payload: Payload, side: CommSide) -> DelimitedMsg:", "body": "return raw_to_delimited(header, [side.serialize(msg) for msg in payload])", "docstring": "\\\n Returns a message consisting of header frames, delimiter frame, and payload frames.\n The payload frames may be given as sequences of bytes (raw) or as `Message`s.", "id": "f4166:m2"} {"signature": "def raw_from_delimited(msgs: DelimitedMsg) -> RawMsgs:", "body": "delim = _rindex(msgs, b'')return tuple(msgs[:delim]), tuple(msgs[delim + :])", "docstring": "\\\n From a message consisting of header frames, delimiter frame, and payload frames, return a tuple `(header, payload)`.\n The payload frames may be returned as sequences of bytes (raw) or as `Message`s.", "id": "f4166:m3"} {"signature": "def from_delimited(msgs: DelimitedMsg, side: CommSide) -> Msgs:", "body": "header, raw_payload = raw_from_delimited(msgs)return header, tuple(side.parse(msg_raw) for msg_raw in raw_payload)", "docstring": "\\\n From a message consisting of header frames, delimiter frame, and payload frames, return a tuple `(header, payload)`.\n The payload frames may be returned as sequences of bytes (raw) or as `Message`s.", "id": "f4166:m4"} {"signature": "def error(code: int, *args, **kwargs) -> HedgehogCommandError:", "body": "if code == FAILED_COMMAND and len(args) >= and args[] == \"\":return EmergencyShutdown(*args, **kwargs)return _errors[code](*args, **kwargs)", "docstring": "Creates an error from the given code, and args and kwargs.\n\n:param code: The acknowledgement code\n:param args: Exception args\n:param kwargs: Exception kwargs\n:return: the error for the given acknowledgement code", "id": "f4167:m0"} {"signature": "def to_message(self):", "body": "from .messages import ackreturn ack.Acknowledgement(self.code, self.args[] if len(self.args) > else '')", "docstring": "Creates an error Acknowledgement message.\nThe message's code and message are taken from this exception.\n\n:return: the message representing this exception", "id": "f4167:c0:m0"} {"signature": "def parse(self, data: RawMessage) -> Message:", "body": "try:return self.receiver.parse(data)except KeyError as err:raise UnknownCommandError from errexcept DecodeError as err:raise UnknownCommandError(f\"\") from err", "docstring": "\\\n Parses a binary protobuf message into a Message object.", "id": "f4168:c0:m1"} {"signature": "def serialize(self, msg: Message) -> RawMessage:", "body": "return self.sender.serialize(msg)", "docstring": "\\\n Serializes a Message object into a binary protobuf message.", "id": "f4168:c0:m2"} {"signature": "async def get_ltd_product_urls(session):", "body": "product_url = ''async with session.get(product_url) as response:data = await response.json()return data['']", "docstring": "Get URLs for LSST the Docs (LTD) products from the LTD Keeper API.\n\n Parameters\n ----------\n session : `aiohttp.ClientSession`\n Your application's aiohttp client session.\n See http://aiohttp.readthedocs.io/en/stable/client.html.\n\n Returns\n -------\n product_urls : `list`\n List of product URLs.", "id": "f4201:m0"} {"signature": "async def get_ltd_product(session, slug=None, url=None):", "body": "if url is None:url = ''.format(slug)async with session.get(url) as response:data = await response.json()return data", "docstring": "Get the product resource (JSON document) from the LSST the Docs API.\n\n Parameters\n ----------\n session : `aiohttp.ClientSession`\n Your application's aiohttp client session.\n See http://aiohttp.readthedocs.io/en/stable/client.html.\n slug : `str`, optional\n Slug identfying the product. This is the same as the subdomain.\n For example, ``'ldm-151'`` is the slug for ``ldm-151.lsst.io``.\n A full product URL can be provided instead, see ``url``.\n url : `str`, optional\n The full LTD Keeper URL for the product resource. For example,\n ``'https://keeper.lsst.codes/products/ldm-151'``. The ``slug``\n can be provided instead.\n\n Returns\n -------\n product : `dict`\n Product dataset. See\n https://ltd-keeper.lsst.io/products.html#get--products-(slug)\n for fields.", "id": "f4201:m1"} {"signature": "def ensure_pandoc(func):", "body": "logger = logging.getLogger(__name__)@functools.wraps(func)def _install_and_run(*args, **kwargs):try:result = func(*args, **kwargs)except OSError:message = \"\"logger.warning(message)pypandoc.download_pandoc(version='')logger.debug(\"\")result = func(*args, **kwargs)return resultreturn _install_and_run", "docstring": "Decorate a function that uses pypandoc to ensure that pandoc is\n installed if necessary.", "id": "f4202:m0"} {"signature": "@ensure_pandocdef convert_text(content, from_fmt, to_fmt, deparagraph=False, mathjax=False,smart=True, extra_args=None):", "body": "logger = logging.getLogger(__name__)if extra_args is not None:extra_args = list(extra_args)else:extra_args = []if mathjax:extra_args.append('')if smart:extra_args.append('')if deparagraph:extra_args.append('')extra_args.append('')extra_args = set(extra_args)logger.debug('',from_fmt, to_fmt, extra_args)output = pypandoc.convert_text(content, to_fmt, format=from_fmt,extra_args=extra_args)return output", "docstring": "Convert text from one markup format to another using pandoc.\n\n This function is a thin wrapper around `pypandoc.convert_text`.\n\n Parameters\n ----------\n content : `str`\n Original content.\n\n from_fmt : `str`\n Format of the original ``content``. Format identifier must be one of\n those known by Pandoc. See https://pandoc.org/MANUAL.html for details.\n\n to_fmt : `str`\n Output format for the content.\n\n deparagraph : `bool`, optional\n If `True`, then the\n `lsstprojectmeta.pandoc.filters.deparagraph.deparagraph` filter is\n used to remove paragraph (``

``, for example) tags around a single\n paragraph of content. That filter does not affect content that\n consists of multiple blocks (several paragraphs, or lists, for\n example). Default is `False`.\n\n For example, **without** this filter Pandoc will convert\n the string ``\"Title text\"`` to ``\"

Title text

\"`` in HTML. The\n paragraph tags aren't useful if you intend to wrap the converted\n content in different tags, like ``

``, using your own templating\n system.\n\n **With** this filter, Pandoc will convert the string ``\"Title text\"``\n to ``\"Title text\"`` in HTML.\n\n mathjax : `bool`, optional\n If `True` then Pandoc will markup output content to work with MathJax.\n Default is False.\n\n smart : `bool`, optional\n If `True` (default) then ascii characters will be converted to unicode\n characters like smart quotes and em dashes.\n\n extra_args : `list`, optional\n Sequence of Pandoc arguments command line arguments (such as\n ``'--normalize'``). The ``deparagraph``, ``mathjax``, and ``smart``\n arguments are convenience arguments that are equivalent to items\n in ``extra_args``.\n\n Returns\n -------\n output : `str`\n Content in the output (``to_fmt``) format.\n\n Notes\n -----\n This function will automatically install Pandoc if it is not available.\n See `ensure_pandoc`.", "id": "f4202:m1"} {"signature": "def convert_lsstdoc_tex(content, to_fmt, deparagraph=False, mathjax=False,smart=True, extra_args=None):", "body": "augmented_content = ''.join((LSSTDOC_MACROS, content))return convert_text(augmented_content, '', to_fmt,deparagraph=deparagraph, mathjax=mathjax,smart=smart, extra_args=extra_args)", "docstring": "Convert lsstdoc-class LaTeX to another markup format.\n\n This function is a thin wrapper around `convert_text` that automatically\n includes common lsstdoc LaTeX macros.\n\n Parameters\n ----------\n content : `str`\n Original content.\n\n to_fmt : `str`\n Output format for the content (see https://pandoc.org/MANUAL.html).\n For example, 'html5'.\n\n deparagraph : `bool`, optional\n If `True`, then the\n `lsstprojectmeta.pandoc.filters.deparagraph.deparagraph` filter is\n used to remove paragraph (``

``, for example) tags around a single\n paragraph of content. That filter does not affect content that\n consists of multiple blocks (several paragraphs, or lists, for\n example). Default is `False`.\n\n For example, **without** this filter Pandoc will convert\n the string ``\"Title text\"`` to ``\"

Title text

\"`` in HTML. The\n paragraph tags aren't useful if you intend to wrap the converted\n content in different tags, like ``

``, using your own templating\n system.\n\n **With** this filter, Pandoc will convert the string ``\"Title text\"``\n to ``\"Title text\"`` in HTML.\n\n mathjax : `bool`, optional\n If `True` then Pandoc will markup output content to work with MathJax.\n Default is False.\n\n smart : `bool`, optional\n If `True` (default) then ascii characters will be converted to unicode\n characters like smart quotes and em dashes.\n\n extra_args : `list`, optional\n Sequence of Pandoc arguments command line arguments (such as\n ``'--normalize'``). The ``deparagraph``, ``mathjax``, and ``smart``\n arguments are convenience arguments that are equivalent to items\n in ``extra_args``.\n\n Returns\n -------\n output : `str`\n Content in the output (``to_fmt``) format.\n\n Notes\n -----\n This function will automatically install Pandoc if it is not available.\n See `ensure_pandoc`.", "id": "f4202:m2"} {"signature": "def deparagraph(element, doc):", "body": "if isinstance(element, Para):if element.next is not None:return elementelif element.prev is not None:return elementreturn Plain(*element.content)", "docstring": "Panflute filter function that converts content wrapped in a Para to\n Plain.\n\n Use this filter with pandoc as::\n\n pandoc [..] --filter=lsstprojectmeta-deparagraph\n\n Only lone paragraphs are affected. Para elements with siblings (like a\n second Para) are left unaffected.\n\n This filter is useful for processing strings like titles or author names so\n that the output isn't wrapped in paragraph tags. For example, without\n this filter, pandoc converts a string ``\"The title\"`` to\n ``

The title

`` in HTML. These ``

`` tags aren't useful if you\n intend to put the title text in ``

`` tags using your own templating\n system.", "id": "f4204:m0"} {"signature": "def main():", "body": "toJSONFilter(deparagraph)", "docstring": "Setuptools entrypoint for the deparagraph CLI.\n\n Use this filter as::\n\n pandoc [..] --filter=lsstprojectmeta-deparagraph", "id": "f4204:m1"} {"signature": "def get_macros(tex_source):", "body": "macros = {}macros.update(get_def_macros(tex_source))macros.update(get_newcommand_macros(tex_source))return macros", "docstring": "r\"\"\"Get all macro definitions from TeX source, supporting multiple\n declaration patterns.\n\n Parameters\n ----------\n tex_source : `str`\n TeX source content.\n\n Returns\n -------\n macros : `dict`\n Keys are macro names (including leading ``\\``) and values are the\n content (as `str`) of the macros.\n\n Notes\n -----\n This function uses the following function to scrape macros of different\n types:\n\n - `get_def_macros`\n - `get_newcommand_macros`\n\n This macro scraping has the following caveats:\n\n - Macro definition (including content) must all occur on one line.\n - Macros with arguments are not supported.", "id": "f4205:m0"} {"signature": "def get_def_macros(tex_source):", "body": "macros = {}for match in DEF_PATTERN.finditer(tex_source):macros[match.group('')] = match.group('')return macros", "docstring": "r\"\"\"Get all ``\\def`` macro definition from TeX source.\n\n Parameters\n ----------\n tex_source : `str`\n TeX source content.\n\n Returns\n -------\n macros : `dict`\n Keys are macro names (including leading ``\\``) and values are the\n content (as `str`) of the macros.\n\n Notes\n -----\n ``\\def`` macros with arguments are not supported.", "id": "f4205:m1"} {"signature": "def get_newcommand_macros(tex_source):", "body": "macros = {}command = LatexCommand('',{'': '', '': True, '': ''},{'': '', '': True, '': ''})for macro in command.parse(tex_source):macros[macro['']] = macro['']return macros", "docstring": "r\"\"\"Get all ``\\newcommand`` macro definition from TeX source.\n\n Parameters\n ----------\n tex_source : `str`\n TeX source content.\n\n Returns\n -------\n macros : `dict`\n Keys are macro names (including leading ``\\``) and values are the\n content (as `str`) of the macros.\n\n Notes\n -----\n ``\\newcommand`` macros with arguments are not supported.", "id": "f4205:m2"} {"signature": "def __call__(self, tex_source):", "body": "for linker in self._linkers:tex_source = linker(tex_source)return tex_source", "docstring": "r\"\"\"Convert citations in LaTeX source to Hyperref links.\n\n Parameters\n ----------\n tex_source : `str`\n LaTeX document source.\n\n Returns\n -------\n processed_tex : `str`\n LaTeX document source with all citation commands converted to\n ``\\hyperref`` commands.", "id": "f4206:c0:m1"} {"signature": "def __call__(self, tex_source):", "body": "while True:try:parsed = next(self.command.parse(tex_source))except StopIteration:breaktex_source = self._replace_command(tex_source, parsed)return tex_source", "docstring": "r\"\"\"Convert commands of type ``command`` in LaTeX source to Hyperref\n links.\n\n Parameters\n ----------\n tex_source : `str`\n LaTeX document source.\n\n Returns\n -------\n processed_tex : `str`\n LaTeX document source with commands of type ``command`` to\n ``\\hyperref`` commands.", "id": "f4206:c1:m0"} {"signature": "def remove_comments(tex_source):", "body": "return re.sub(r'', r'', tex_source, flags=re.M)", "docstring": "Delete latex comments from TeX source.\n\n Parameters\n ----------\n tex_source : str\n TeX source content.\n\n Returns\n -------\n tex_source : str\n TeX source without comments.", "id": "f4207:m0"} {"signature": "def remove_trailing_whitespace(tex_source):", "body": "return re.sub(r'', '', tex_source, flags=re.M)", "docstring": "Delete trailing whitespace from TeX source.\n\n Parameters\n ----------\n tex_source : str\n TeX source content.\n\n Returns\n -------\n tex_source : str\n TeX source without trailing whitespace.", "id": "f4207:m1"} {"signature": "def read_tex_file(root_filepath, root_dir=None):", "body": "with open(root_filepath, '') as f:tex_source = f.read()if root_dir is None:root_dir = os.path.dirname(root_filepath)tex_source = remove_comments(tex_source)tex_source = remove_trailing_whitespace(tex_source)tex_source = process_inputs(tex_source, root_dir=root_dir)return tex_source", "docstring": "r\"\"\"Read a TeX file, automatically processing and normalizing it\n (including other input files, removing comments, and deleting trailing\n whitespace).\n\n Parameters\n ----------\n root_filepath : `str`\n Filepath to a TeX file.\n root_dir : `str`\n Root directory of the TeX project. This only needs to be set when\n recursively reading in ``\\input`` or ``\\include`` files.\n\n Returns\n -------\n tex_source : `str`\n TeX source.", "id": "f4207:m2"} {"signature": "def process_inputs(tex_source, root_dir=None):", "body": "logger = logging.getLogger(__name__)def _sub_line(match):\"\"\"\"\"\"fname = match.group('')if not fname.endswith(''):full_fname = \"\".join((fname, ''))else:full_fname = fnamefull_path = os.path.abspath(os.path.join(root_dir, full_fname))try:included_source = read_tex_file(full_path, root_dir=root_dir)except IOError:logger.error(\"\".format(full_path))raiseelse:return included_sourcetex_source = input_include_pattern.sub(_sub_line, tex_source)return tex_source", "docstring": "r\"\"\"Insert referenced TeX file contents (from ``\\input`` and ``\\include``\n commands) into the source.\n\n Parameters\n ----------\n tex_source : `str`\n TeX source where referenced source files will be found and inserted.\n root_dir : `str`, optional\n Name of the directory containing the TeX project's root file. Files\n referenced by TeX ``\\input`` and ``\\include`` commands are relative to\n this directory. If not set, the current working directory is assumed.\n\n Returns\n -------\n tex_source : `str`\n TeX source.\n\n See also\n --------\n `read_tex_file`\n Recommended API for reading a root TeX source file and inserting\n referenced files.", "id": "f4207:m3"} {"signature": "def replace_macros(tex_source, macros):", "body": "for macro_name, macro_content in macros.items():pattern = re.escape(macro_name) + r\"\"tex_source = re.sub(pattern, lambda _: macro_content, tex_source)return tex_source", "docstring": "r\"\"\"Replace macros in the TeX source with their content.\n\n Parameters\n ----------\n tex_source : `str`\n TeX source content.\n macros : `dict`\n Keys are macro names (including leading ``\\``) and values are the\n content (as `str`) of the macros. See\n `lsstprojectmeta.tex.scraper.get_macros`.\n\n Returns\n -------\n tex_source : `str`\n TeX source with known macros replaced.\n\n Notes\n -----\n Macros with arguments are not supported.\n\n Examples\n --------\n >>> macros = {r'\\handle': 'LDM-nnn'}\n >>> sample = r'This is document \\handle.'\n >>> replace_macros(sample, macros)\n 'This is document LDM-nnn.'\n\n Any trailing slash after the macro command is also replaced by this\n function.\n\n >>> macros = {r'\\product': 'Data Management'}\n >>> sample = r'\\title [Test Plan] { \\product\\ Test Plan}'\n >>> replace_macros(sample, macros)\n '\\\\title [Test Plan] { Data Management Test Plan}'", "id": "f4207:m4"} {"signature": "@classmethoddef read(cls, root_tex_path):", "body": "root_dir = os.path.dirname(root_tex_path)tex_source = read_tex_file(root_tex_path)tex_macros = get_macros(tex_source)tex_source = replace_macros(tex_source, tex_macros)return cls(tex_source, root_dir=root_dir)", "docstring": "Construct an `LsstLatexDoc` instance by reading and parsing the\n LaTeX source.\n\n Parameters\n ----------\n root_tex_path : `str`\n Path to the LaTeX source on the filesystem. For multi-file LaTeX\n projects this should be the path to the root document.\n\n Notes\n -----\n This method implements the following pipeline:\n\n 1. `lsstprojectmeta.tex.normalizer.read_tex_file`\n 2. `lsstprojectmeta.tex.scraper.get_macros`\n 3. `lsstprojectmeta.tex.normalizer.replace_macros`\n\n Thus ``input`` and ``includes`` are resolved along with simple macros.", "id": "f4210:c0:m1"} {"signature": "@propertydef plain_content(self):", "body": "return self.format_content(format='', mathjax=False, smart=True)", "docstring": "Plain-text-formatted document content (`str`).", "id": "f4210:c0:m2"} {"signature": "@propertydef html_title(self):", "body": "return self.format_title(format='', deparagraph=True,mathjax=False, smart=True)", "docstring": "HTML5-formatted document title (`str`).", "id": "f4210:c0:m3"} {"signature": "@propertydef plain_title(self):", "body": "return self.format_title(format='', deparagraph=True,mathjax=False, smart=True)", "docstring": "Plain-text-formatted document title (`str`).", "id": "f4210:c0:m4"} {"signature": "@propertydef title(self):", "body": "if not hasattr(self, ''):self._parse_title()return self._title", "docstring": "LaTeX-formatted document title (`str`).", "id": "f4210:c0:m5"} {"signature": "@propertydef html_short_title(self):", "body": "return self.format_short_title(format='', deparagraph=True,mathjax=False, smart=True)", "docstring": "HTML5-formatted document short title (`str`).", "id": "f4210:c0:m6"} {"signature": "@propertydef plain_short_title(self):", "body": "return self.format_short_title(format='', deparagraph=True,mathjax=False, smart=True)", "docstring": "Plaintext-formatted document short title (`str`).", "id": "f4210:c0:m7"} {"signature": "@propertydef short_title(self):", "body": "if not hasattr(self, ''):self._parse_title()return self._short_title", "docstring": "LaTeX-formatted document short title (`str`).", "id": "f4210:c0:m8"} {"signature": "@propertydef html_authors(self):", "body": "return self.format_authors(format='', deparagraph=True,mathjax=False, smart=True)", "docstring": "HTML5-formatted authors (`list` of `str`).", "id": "f4210:c0:m9"} {"signature": "@propertydef plain_authors(self):", "body": "return self.format_authors(format='', deparagraph=True,mathjax=False, smart=True)", "docstring": "Plaintext-formatted authors (`list` of `str`).", "id": "f4210:c0:m10"} {"signature": "@propertydef authors(self):", "body": "if not hasattr(self, ''):self._parse_author()return self._authors", "docstring": "LaTeX-formatted authors (`list` of `str`).", "id": "f4210:c0:m11"} {"signature": "@propertydef html_abstract(self):", "body": "return self.format_abstract(format='', deparagraph=False,mathjax=False, smart=True)", "docstring": "HTML5-formatted document abstract (`str`).", "id": "f4210:c0:m12"} {"signature": "@propertydef plain_abstract(self):", "body": "return self.format_abstract(format='', deparagraph=False,mathjax=False, smart=True)", "docstring": "Plaintext-formatted document abstract (`str`).", "id": "f4210:c0:m13"} {"signature": "@propertydef abstract(self):", "body": "if not hasattr(self, ''):self._parse_abstract()return self._abstract", "docstring": "LaTeX-formatted abstract (`str`).", "id": "f4210:c0:m14"} {"signature": "@propertydef handle(self):", "body": "if not hasattr(self, ''):self._parse_doc_ref()return self._handle", "docstring": "LaTeX-formatted document handle (`str`).", "id": "f4210:c0:m15"} {"signature": "@propertydef series(self):", "body": "if not hasattr(self, ''):self._parse_doc_ref()return self._series", "docstring": "Document series identifier (`str`).", "id": "f4210:c0:m16"} {"signature": "@propertydef serial(self):", "body": "if not hasattr(self, ''):self._parse_doc_ref()return self._serial", "docstring": "Document serial number within series (`str`).", "id": "f4210:c0:m17"} {"signature": "@propertydef is_draft(self):", "body": "if not hasattr(self, ''):self._parse_documentclass()if '' in self._document_options:return Trueelse:return False", "docstring": "Document is a draft if ``'lsstdoc'`` is included in the\n documentclass options (`bool`).", "id": "f4210:c0:m18"} {"signature": "@propertydef revision_datetime(self):", "body": "if not hasattr(self, ''):self._parse_revision_date()return self._datetime", "docstring": "Current revision date of the document (`datetime.datetime`).\n\n The `revision_datetime_source` describes how the revision date\n is computed.\n\n This ``revision datetime`` is cached the first time you access it. This\n means that a datetime computed via ``now`` or ``git`` will not change\n during the lifetime of an `LsstLatexDoc` object.", "id": "f4210:c0:m19"} {"signature": "@propertydef revision_datetime_source(self):", "body": "if not hasattr(self, ''):self._parse_revision_date()return self._revision_datetime_source", "docstring": "r\"\"\"Data source for the `revision_datetime` attribute (`str`).\n\n Possible string values are:\n\n - ``'tex'``: The document revision date is defined in the ``\\date``\n command. ``YYYY-MM-DD`` dates are converted to UTC datetimes by\n assuming the document is released at the beginning of the day in the\n ``US/Pacific`` timezone. Note: the ``\\date`` command is ignored\n for draft documents (`is_draft` is `True`) so that drafts always\n fall back to ``'git'`` or ``'now'``.\n\n - ``'git'``: The latest Git commit's timestamp that affected document\n content. Content is considered any file with a ``tex``, ``bib``,\n ``pdf``, ``jpg``, or ``png`` extension. Git timestamps are used when\n the ``\\date`` command is missing or can't be parsed.\n\n - ``'now'``: The current date and time. This source is used as a\n fallback when the LaTeX and Git-based methods of determining a\n document's date fail.\n\n The `revision datetime` is cached the first time you access it. This\n means that a datetime computed via ``now`` or ``git`` will not change\n during the lifetime of an `LsstLatexDoc` object.", "id": "f4210:c0:m20"} {"signature": "@propertydef bib_db(self):", "body": "if self._bib_db is None:self._load_bib_db()return self._bib_db", "docstring": "Bibliography database referenced by the document\n (`pybtex.database.BibliographyData`).", "id": "f4210:c0:m21"} {"signature": "def format_content(self, format='', mathjax=False,smart=True, extra_args=None):", "body": "output_text = convert_lsstdoc_tex(self._tex, format,mathjax=mathjax,smart=smart,extra_args=extra_args)return output_text", "docstring": "Get the document content in the specified markup format.\n\n Parameters\n ----------\n format : `str`, optional\n Output format (such as ``'html5'`` or ``'plain'``).\n mathjax : `bool`, optional\n Allow pandoc to use MathJax math markup.\n smart : `True`, optional\n Allow pandoc to create \"smart\" unicode punctuation.\n extra_args : `list`, optional\n Additional command line flags to pass to Pandoc. See\n `lsstprojectmeta.pandoc.convert.convert_text`.\n\n Returns\n -------\n output_text : `str`\n Converted content.", "id": "f4210:c0:m22"} {"signature": "def format_title(self, format='', deparagraph=True, mathjax=False,smart=True, extra_args=None):", "body": "if self.title is None:return Noneoutput_text = convert_lsstdoc_tex(self.title, format,deparagraph=deparagraph,mathjax=mathjax,smart=smart,extra_args=extra_args)return output_text", "docstring": "Get the document title in the specified markup format.\n\n Parameters\n ----------\n format : `str`, optional\n Output format (such as ``'html5'`` or ``'plain'``).\n deparagraph : `bool`, optional\n Remove the paragraph tags from single paragraph content.\n mathjax : `bool`, optional\n Allow pandoc to use MathJax math markup.\n smart : `True`, optional\n Allow pandoc to create \"smart\" unicode punctuation.\n extra_args : `list`, optional\n Additional command line flags to pass to Pandoc. See\n `lsstprojectmeta.pandoc.convert.convert_text`.\n\n Returns\n -------\n output_text : `str`\n Converted content or `None` if the title is not available in\n the document.", "id": "f4210:c0:m23"} {"signature": "def format_short_title(self, format='', deparagraph=True,mathjax=False, smart=True, extra_args=None):", "body": "if self.short_title is None:return Noneoutput_text = convert_lsstdoc_tex(self.short_title, '',deparagraph=deparagraph,mathjax=mathjax,smart=smart,extra_args=extra_args)return output_text", "docstring": "Get the document short title in the specified markup format.\n\n Parameters\n ----------\n format : `str`, optional\n Output format (such as ``'html5'`` or ``'plain'``).\n deparagraph : `bool`, optional\n Remove the paragraph tags from single paragraph content.\n mathjax : `bool`, optional\n Allow pandoc to use MathJax math markup.\n smart : `True`, optional\n Allow pandoc to create \"smart\" unicode punctuation.\n extra_args : `list`, optional\n Additional command line flags to pass to Pandoc. See\n `lsstprojectmeta.pandoc.convert.convert_text`.\n\n Returns\n -------\n output_text : `str`\n Converted content or `None` if the short title is not available in\n the document.", "id": "f4210:c0:m24"} {"signature": "def format_abstract(self, format='', deparagraph=False, mathjax=False,smart=True, extra_args=None):", "body": "if self.abstract is None:return Noneabstract_latex = self._prep_snippet_for_pandoc(self.abstract)output_text = convert_lsstdoc_tex(abstract_latex, format,deparagraph=deparagraph,mathjax=mathjax,smart=smart,extra_args=extra_args)return output_text", "docstring": "Get the document abstract in the specified markup format.\n\n Parameters\n ----------\n format : `str`, optional\n Output format (such as ``'html5'`` or ``'plain'``).\n deparagraph : `bool`, optional\n Remove the paragraph tags from single paragraph content.\n mathjax : `bool`, optional\n Allow pandoc to use MathJax math markup.\n smart : `True`, optional\n Allow pandoc to create \"smart\" unicode punctuation.\n extra_args : `list`, optional\n Additional command line flags to pass to Pandoc. See\n `lsstprojectmeta.pandoc.convert.convert_text`.\n\n Returns\n -------\n output_text : `str`\n Converted content or `None` if the title is not available in\n the document.", "id": "f4210:c0:m25"} {"signature": "def format_authors(self, format='', deparagraph=True, mathjax=False,smart=True, extra_args=None):", "body": "formatted_authors = []for latex_author in self.authors:formatted_author = convert_lsstdoc_tex(latex_author, format,deparagraph=deparagraph,mathjax=mathjax,smart=smart,extra_args=extra_args)formatted_author = formatted_author.strip()formatted_authors.append(formatted_author)return formatted_authors", "docstring": "Get the document authors in the specified markup format.\n\n Parameters\n ----------\n format : `str`, optional\n Output format (such as ``'html5'`` or ``'plain'``).\n deparagraph : `bool`, optional\n Remove the paragraph tags from single paragraph content.\n mathjax : `bool`, optional\n Allow pandoc to use MathJax math markup.\n smart : `True`, optional\n Allow pandoc to create \"smart\" unicode punctuation.\n extra_args : `list`, optional\n Additional command line flags to pass to Pandoc. See\n `lsstprojectmeta.pandoc.convert.convert_text`.\n\n Returns\n -------\n output_text : `list` of `str`\n Sequence of author names in the specified output markup format.", "id": "f4210:c0:m26"} {"signature": "def _parse_documentclass(self):", "body": "command = LatexCommand('',{'': '', '': False, '': ''},{'': '', '': True, '': ''})try:parsed = next(command.parse(self._tex))except StopIteration:self._logger.warning('')self._document_options = []try:content = parsed['']self._document_options = [opt.strip()for opt in content.split('')]except KeyError:self._logger.warning('')self._document_options = []", "docstring": "Parse documentclass options.\n\n Sets the the ``_document_options`` attribute.", "id": "f4210:c0:m27"} {"signature": "def _parse_title(self):", "body": "command = LatexCommand('',{'': '', '': False, '': ''},{'': '', '': True, '': ''})try:parsed = next(command.parse(self._tex))except StopIteration:self._logger.warning('')self._title = Noneself._short_title = Noneself._title = parsed['']try:self._short_title = parsed['']except KeyError:self._logger.warning('')self._short_title = None", "docstring": "Parse the title from TeX source.\n\n Sets these attributes:\n\n - ``_title``\n - ``_short_title``", "id": "f4210:c0:m28"} {"signature": "def _parse_doc_ref(self):", "body": "command = LatexCommand('',{'': '', '': True, '': ''})try:parsed = next(command.parse(self._tex))except StopIteration:self._logger.warning('')self._handle = Noneself._series = Noneself._serial = Nonereturnself._handle = parsed['']try:self._series, self._serial = self._handle.split('', )except ValueError:self._logger.warning('''', self._handle)self._series = Noneself._serial = None", "docstring": "Parse the document handle.\n\n Sets the ``_series``, ``_serial``, and ``_handle`` attributes.", "id": "f4210:c0:m29"} {"signature": "def _parse_author(self):", "body": "command = LatexCommand('',{'': '', '': True, '': ''})try:parsed = next(command.parse(self._tex))except StopIteration:self._logger.warning('')self._authors = []returntry:content = parsed['']except KeyError:self._logger.warning('')self._authors = []returncontent = content.replace('', '')content = content.replace('', '')content = content.strip()authors = []for part in content.split(''):part = part.strip()for split_part in part.split(''):split_part = split_part.strip()if len(split_part) > :authors.append(split_part)self._authors = authors", "docstring": "r\"\"\"Parse the author from TeX source.\n\n Sets the ``_authors`` attribute.\n\n Goal is to parse::\n\n \\author{\n A.~Author,\n B.~Author,\n and\n C.~Author}\n\n Into::\n\n ['A. Author', 'B. Author', 'C. Author']", "id": "f4210:c0:m30"} {"signature": "def _parse_abstract(self):", "body": "command = LatexCommand('',{'': '', '': True, '': ''})try:parsed = next(command.parse(self._tex))except StopIteration:self._logger.warning('')self._abstract = Nonereturntry:content = parsed['']except KeyError:self._logger.warning('')self._abstract = Nonereturncontent = content.strip()self._abstract = content", "docstring": "Parse the abstract from the TeX source.\n\n Sets the ``_abstract`` attribute.", "id": "f4210:c0:m31"} {"signature": "def _prep_snippet_for_pandoc(self, latex_text):", "body": "replace_cite = CitationLinker(self.bib_db)latex_text = replace_cite(latex_text)return latex_text", "docstring": "Process a LaTeX snippet of content for better transformation\n with pandoc.\n\n Currently runs the CitationLinker to convert BibTeX citations to\n href links.", "id": "f4210:c0:m32"} {"signature": "def _load_bib_db(self):", "body": "command = LatexCommand('',{'': '', '': True, '': ''})try:parsed = next(command.parse(self._tex))bib_names = [n.strip() for n in parsed[''].split('')]except StopIteration:self._logger.warning('')bib_names = []custom_bib_names = [n for n in bib_namesif n not in KNOWN_LSSTTEXMF_BIB_NAMES]custom_bibs = []for custom_bib_name in custom_bib_names:custom_bib_path = os.path.join(os.path.join(self._root_dir),custom_bib_name + '')if not os.path.exists(custom_bib_path):self._logger.warning('',custom_bib_path)continuewith open(custom_bib_path, '') as file_handle:custom_bibs.append(file_handle.read())if len(custom_bibs) > :custom_bibtex = ''.join(custom_bibs)else:custom_bibtex = Nonedb = get_bibliography(bibtex=custom_bibtex)self._bib_db = db", "docstring": "r\"\"\"Load the BibTeX bibliography referenced by the document.\n\n This method triggered by the `bib_db` attribute and populates the\n `_bib_db` private attribute.\n\n The ``\\bibliography`` command is parsed to identify the bibliographies\n referenced by the document.", "id": "f4210:c0:m33"} {"signature": "def _parse_revision_date(self):", "body": "doc_datetime = Noneif not self.is_draft:date_command = LatexCommand('',{'': '', '': True, '': ''})try:parsed = next(date_command.parse(self._tex))command_content = parsed[''].strip()except StopIteration:command_content = Noneself._logger.warning('')if command_content is not None and command_content != r'':try:doc_datetime = datetime.datetime.strptime(command_content,'')project_tz = timezone('')localized_datetime = project_tz.localize(doc_datetime)doc_datetime = localized_datetime.astimezone(pytz.utc)self._revision_datetime_source = ''except ValueError:self._logger.warning('''',command_content)if doc_datetime is None:content_extensions = ('', '', '', '', '')try:doc_datetime = get_content_commit_date(content_extensions,root_dir=self._root_dir)self._revision_datetime_source = ''except RuntimeError:self._logger.warning('''',self._root_dir)if doc_datetime is None:doc_datetime = pytz.utc.localize(datetime.datetime.now())self._revision_datetime_source = ''self._datetime = doc_datetime", "docstring": "r\"\"\"Parse the ``\\date`` command, falling back to getting the\n most recent Git commit date and the current datetime.\n\n Result is available from the `revision_datetime` attribute.", "id": "f4210:c0:m34"} {"signature": "def build_jsonld(self, url=None, code_url=None, ci_url=None,readme_url=None, license_id=None):", "body": "jsonld = {'': [\"\"\"\",\"\"],'': ['', ''],'': '','': self.handle,'': self.plain_title,'': self.plain_abstract,'': [{'': '', '': author_name}for author_name in self.plain_authors],'': self.revision_datetime}try:jsonld[''] = self.plain_contentjsonld[''] = '' except RuntimeError:self._logger.exception('''')self._logger.warning('')jsonld[''] = self._texjsonld[''] = '' if url is not None:jsonld[''] = urljsonld[''] = urlelse:jsonld[''] = self.handleif code_url is not None:jsonld[''] = code_urlif ci_url is not None:jsonld[''] = ci_urlif readme_url is not None:jsonld[''] = readme_urlif license_id is not None:jsonld[''] = Nonereturn jsonld", "docstring": "Create a JSON-LD representation of this LSST LaTeX document.\n\n Parameters\n ----------\n url : `str`, optional\n URL where this document is published to the web. Prefer\n the LSST the Docs URL if possible.\n Example: ``'https://ldm-151.lsst.io'``.\n code_url : `str`, optional\n Path the the document's repository, typically on GitHub.\n Example: ``'https://github.com/lsst/LDM-151'``.\n ci_url : `str`, optional\n Path to the continuous integration service dashboard for this\n document's repository.\n Example: ``'https://travis-ci.org/lsst/LDM-151'``.\n readme_url : `str`, optional\n URL to the document repository's README file. Example:\n ``https://raw.githubusercontent.com/lsst/LDM-151/master/README.rst``.\n license_id : `str`, optional\n License identifier, if known. The identifier should be from the\n listing at https://spdx.org/licenses/. Example: ``CC-BY-4.0``.\n\n Returns\n -------\n jsonld : `dict`\n JSON-LD-formatted dictionary.", "id": "f4210:c0:m35"} {"signature": "def parse(self, source):", "body": "command_regex = self._make_command_regex(self.name)for match in re.finditer(command_regex, source):self._logger.debug(match)start_index = match.start()yield self._parse_command(source, start_index)", "docstring": "Parse command content from the LaTeX source.\n\n Parameters\n ----------\n source : `str`\n The full source of the tex document.\n\n Yields\n ------\n parsed_command : `ParsedCommand`\n Yields parsed commands instances for each occurence of the command\n in the source.", "id": "f4211:c0:m1"} {"signature": "@staticmethoddef _make_command_regex(name):", "body": "return r'' + name + r''", "docstring": "r\"\"\"Given a command name, build a regular expression to detect that\n command in TeX source.\n\n The regular expression is designed to discern \"\\title{..}\" from\n \"\\titlename{..}\". It does this by ensuring that the command is\n followed by a whitespace character, argument brackets, or a comment\n character.\n\n Parameters\n ----------\n name : `str`\n Name of the command (with a backslash prefix).\n\n Returns\n -------\n regex : `str`\n Regular expression pattern for detecting the command.", "id": "f4211:c0:m2"} {"signature": "def _parse_command(self, source, start_index):", "body": "parsed_elements = []running_index = start_indexfor element in self.elements:opening_bracket = element['']closing_bracket = self._brackets[opening_bracket]element_start = Noneelement_end = Nonefor i, c in enumerate(source[running_index:], start=running_index):if c == element['']:element_start = ibreakelif c == '':if element[''] is True:content = self._parse_whitespace_argument(source[running_index:],self.name)return ParsedCommand(self.name,[{'': element[''],'': element[''],'': content.strip()}],start_index,source[start_index:i])else:breakif element_start is None and element[''] is False:continueelif element_start is None and element[''] is True:message = (''''.format(self.name,start_index,element['']))raise CommandParserError(message)balance = for i, c in enumerate(source[element_start + :],start=element_start + ):if c == opening_bracket:balance += elif c == closing_bracket:balance -= if balance == :element_end = ibreakif balance > :message = (''''''.format(self.name,start_index,element['']))raise CommandParserError(message)element_content = source[element_start + :element_end]parsed_element = {'': element[''],'': element[''],'': element_content.strip()}parsed_elements.append(parsed_element)running_index = element_end + command_source = source[start_index:running_index]parsed_command = ParsedCommand(self.name, parsed_elements,start_index, command_source)return parsed_command", "docstring": "Parse a single command.\n\n Parameters\n ----------\n source : `str`\n The full source of the tex document.\n start_index : `int`\n Character index in ``source`` where the command begins.\n\n Returns\n -------\n parsed_command : `ParsedCommand`\n The parsed command from the source at the given index.", "id": "f4211:c0:m3"} {"signature": "@staticmethoddef _parse_whitespace_argument(source, name):", "body": "command_pattern = r'' + name + r''command_match = re.search(command_pattern, source)if command_match is not None:source = source[command_match.end():]pattern = r''match = re.search(pattern, source)if match is None:message = ('''')raise CommandParserError(message.format(name))content = match.group('')content.strip()return content", "docstring": "r\"\"\"Attempt to parse a single token on the first line of this source.\n\n This method is used for parsing whitespace-delimited arguments, like\n ``\\input file``. The source should ideally contain `` file`` along\n with a newline character.\n\n >>> source = 'Line 1\\n' r'\\input test.tex' '\\nLine 2'\n >>> LatexCommand._parse_whitespace_argument(source, 'input')\n 'test.tex'\n\n Bracket delimited arguments (``\\input{test.tex}``) are handled in\n the normal logic of `_parse_command`.", "id": "f4211:c0:m4"} {"signature": "async def _download_text(url, session):", "body": "logger = logging.getLogger(__name__)async with session.get(url) as response:logger.info('', url)return await response.text()", "docstring": "Asynchronously request a URL and get the encoded text content of the\n body.\n\n Parameters\n ----------\n url : `str`\n URL to download.\n session : `aiohttp.ClientSession`\n An open aiohttp session.\n\n Returns\n -------\n content : `str`\n Content downloaded from the URL.", "id": "f4212:m0"} {"signature": "async def _download_lsst_bibtex(bibtex_names):", "body": "blob_url_template = ('''')urls = [blob_url_template.format(name=name) for name in bibtex_names]tasks = []async with ClientSession() as session:for url in urls:task = asyncio.ensure_future(_download_text(url, session))tasks.append(task)return await asyncio.gather(*tasks)", "docstring": "Asynchronously download a set of lsst-texmf BibTeX bibliographies from\n GitHub.\n\n Parameters\n ----------\n bibtex_names : sequence of `str`\n Names of lsst-texmf BibTeX files to download. For example:\n\n .. code-block:: python\n\n ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads']\n\n Returns\n -------\n bibtexs : `list` of `str`\n List of BibTeX file content, in the same order as ``bibtex_names``.", "id": "f4212:m1"} {"signature": "def get_lsst_bibtex(bibtex_filenames=None):", "body": "logger = logging.getLogger(__name__)if bibtex_filenames is None:bibtex_names = KNOWN_LSSTTEXMF_BIB_NAMESelse:bibtex_names = []for filename in bibtex_filenames:name = os.path.basename(os.path.splitext(filename)[])if name not in KNOWN_LSSTTEXMF_BIB_NAMES:logger.warning('',name)continuebibtex_names.append(name)uncached_names = [name for name in bibtex_namesif name not in _LSSTTEXMF_BIB_CACHE]if len(uncached_names) > :loop = asyncio.get_event_loop()future = asyncio.ensure_future(_download_lsst_bibtex(uncached_names))loop.run_until_complete(future)for name, text in zip(bibtex_names, future.result()):_LSSTTEXMF_BIB_CACHE[name] = textreturn {name: _LSSTTEXMF_BIB_CACHE[name] for name in bibtex_names}", "docstring": "Get content of lsst-texmf bibliographies.\n\n BibTeX content is downloaded from GitHub (``master`` branch of\n https://github.com/lsst/lsst-texmf or retrieved from an in-memory cache.\n\n Parameters\n ----------\n bibtex_filenames : sequence of `str`, optional\n List of lsst-texmf BibTeX files to retrieve. These can be the filenames\n of lsst-bibtex files (for example, ``['lsst.bib', 'lsst-dm.bib']``)\n or names without an extension (``['lsst', 'lsst-dm']``). The default\n (recommended) is to get *all* lsst-texmf bibliographies:\n\n .. code-block:: python\n\n ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads']\n\n Returns\n -------\n bibtex : `dict`\n Dictionary with keys that are bibtex file names (such as ``'lsst'``,\n ``'lsst-dm'``). Values are the corresponding bibtex file content\n (`str`).", "id": "f4212:m2"} {"signature": "def get_bibliography(lsst_bib_names=None, bibtex=None):", "body": "bibtex_data = get_lsst_bibtex(bibtex_filenames=lsst_bib_names)pybtex_data = [pybtex.database.parse_string(_bibtex, '')for _bibtex in bibtex_data.values()]if bibtex is not None:pybtex_data.append(pybtex.database.parse_string(bibtex, ''))bib = pybtex_data[]if len(pybtex_data) > :for other_bib in pybtex_data[:]:for key, entry in other_bib.entries.items():bib.add_entry(key, entry)return bib", "docstring": "Make a pybtex BibliographyData instance from standard lsst-texmf\n bibliography files and user-supplied bibtex content.\n\n Parameters\n ----------\n lsst_bib_names : sequence of `str`, optional\n Names of lsst-texmf BibTeX files to include. For example:\n\n .. code-block:: python\n\n ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads']\n\n Default is `None`, which includes all lsst-texmf bibtex files.\n\n bibtex : `str`\n BibTeX source content not included in lsst-texmf. This can be content\n from a import ``local.bib`` file.\n\n Returns\n -------\n bibliography : `pybtex.database.BibliographyData`\n A pybtex bibliography database that includes all given sources:\n lsst-texmf bibliographies and ``bibtex``.", "id": "f4212:m3"} {"signature": "def get_url_from_entry(entry):", "body": "if '' in entry.fields:return entry.fields['']elif entry.type.lower() == '':return '' + entry.fields['']elif '' in entry.fields:return entry.fields['']elif '' in entry.fields:return '' + entry.fields['']else:raise NoEntryUrlError()", "docstring": "Get a usable URL from a pybtex entry.\n\n Parameters\n ----------\n entry : `pybtex.database.Entry`\n A pybtex bibliography entry.\n\n Returns\n -------\n url : `str`\n Best available URL from the ``entry``.\n\n Raises\n ------\n NoEntryUrlError\n Raised when no URL can be made from the bibliography entry.\n\n Notes\n -----\n The order of priority is:\n\n 1. ``url`` field\n 2. ``ls.st`` URL from the handle for ``@docushare`` entries.\n 3. ``adsurl``\n 4. DOI", "id": "f4212:m4"} {"signature": "def get_authoryear_from_entry(entry, paren=False):", "body": "def _format_last(person):\"\"\"\"\"\"return ''.join([n.strip('') for n in person.last_names])if len(entry.persons['']) > :persons = entry.persons['']elif len(entry.persons['']) > :persons = entry.persons['']else:raise AuthorYearErrortry:year = entry.fields['']except KeyError:raise AuthorYearErrorif paren and len(persons) == :template = ''return template.format(author=_format_last(persons[]),year=year)elif not paren and len(persons) == :template = ''return template.format(author=_format_last(persons[]),year=year)elif paren and len(persons) == :template = ''return template.format(author1=_format_last(persons[]),author2=_format_last(persons[]),year=year)elif not paren and len(persons) == :template = ''return template.format(author1=_format_last(persons[]),author2=_format_last(persons[]),year=year)elif not paren and len(persons) > :template = ''return template.format(author=_format_last(persons[]),year=year)elif paren and len(persons) > :template = ''return template.format(author=_format_last(persons[]),year=year)", "docstring": "Get and format author-year text from a pybtex entry to emulate\n natbib citations.\n\n Parameters\n ----------\n entry : `pybtex.database.Entry`\n A pybtex bibliography entry.\n parens : `bool`, optional\n Whether to add parentheses around the year. Default is `False`.\n\n Returns\n -------\n authoryear : `str`\n The author-year citation text.", "id": "f4212:m5"} {"signature": "def get_installation_token(installation_id, integration_jwt):", "body": "api_root = ''url = ''.format(api_root=api_root,id_=installation_id)headers = {'': ''.format(integration_jwt.decode('')),'': ''}resp = requests.post(url, headers=headers)resp.raise_for_status()return resp.json()", "docstring": "Create a GitHub token for an integration installation.\n\n Parameters\n ----------\n installation_id : `int`\n Installation ID. This is available in the URL of the integration's\n **installation** ID.\n integration_jwt : `bytes`\n The integration's JSON Web Token (JWT). You can create this with\n `create_jwt`.\n\n Returns\n -------\n token_obj : `dict`\n GitHub token object. Includes the fields:\n\n - ``token``: the token string itself.\n - ``expires_at``: date time string when the token expires.\n\n Example\n -------\n The typical workflow for authenticating to an integration installation is:\n\n .. code-block:: python\n\n from dochubadapter.github import auth\n jwt = auth.create_jwt(integration_id, private_key_path)\n token_obj = auth.get_installation_token(installation_id, jwt)\n print(token_obj['token'])\n\n Notes\n -----\n See\n https://developer.github.com/early-access/integrations/authentication/#as-an-installation\n for more information", "id": "f4213:m0"} {"signature": "def create_jwt(integration_id, private_key_path):", "body": "integration_id = int(integration_id)with open(private_key_path, '') as f:cert_bytes = f.read()now = datetime.datetime.now()expiration_time = now + datetime.timedelta(minutes=)payload = {'': int(now.timestamp()),'': int(expiration_time.timestamp()),'': integration_id}return jwt.encode(payload, cert_bytes, algorithm='')", "docstring": "Create a JSON Web Token to authenticate a GitHub Integration or\n installation.\n\n Parameters\n ----------\n integration_id : `int`\n Integration ID. This is available from the GitHub integration's\n homepage.\n private_key_path : `str`\n Path to the integration's private key (a ``.pem`` file).\n\n Returns\n -------\n jwt : `bytes`\n JSON Web Token that is good for 9 minutes.\n\n Notes\n -----\n The JWT is encoded with the RS256 algorithm. It includes a payload with\n fields:\n\n - ``'iat'``: The current time, as an `int` timestamp.\n - ``'exp'``: Expiration time, as an `int timestamp. The expiration\n time is set of 9 minutes in the future (maximum allowance is 10 minutes).\n - ``'iss'``: The integration ID (`int`).\n\n For more information, see\n https://developer.github.com/early-access/integrations/authentication/.", "id": "f4213:m1"} {"signature": "def parse_repo_slug_from_url(github_url):", "body": "match = GITHUB_SLUG_PATTERN.match(github_url)if not match:message = ''.format(github_url)raise RuntimeError(message)_full = ''.join((match.group(''),match.group('')))return RepoSlug(_full, match.group(''), match.group(''))", "docstring": "Get the slug, /, for a GitHub repository from\n its URL.\n\n Parameters\n ----------\n github_url : `str`\n URL of a GitHub repository.\n\n Returns\n -------\n repo_slug : `RepoSlug`\n Repository slug with fields ``full``, ``owner``, and ``repo``.\n See `RepoSlug` for details.\n\n Raises\n ------\n RuntimeError\n Raised if the URL cannot be parsed.", "id": "f4215:m0"} {"signature": "def make_raw_content_url(repo_slug, git_ref, file_path):", "body": "if isinstance(repo_slug, RepoSlug):slug_str = repo_slug.fullelse:slug_str = repo_slugif file_path.startswith(''):file_path = file_path.lstrip('')template = ''return template.format(slug=slug_str,git_ref=git_ref,path=file_path)", "docstring": "Make a raw content (raw.githubusercontent.com) URL to a file.\n\n Parameters\n ----------\n repo_slug : `str` or `RepoSlug`\n The repository slug, formatted as either a `str` (``'owner/name'``)\n or a `RepoSlug` object (created by `parse_repo_slug_from_url`).\n git_ref : `str`\n The git ref: a branch name, commit hash, or tag name.\n file_path : `str`\n The POSIX path of the file in the repository tree.", "id": "f4215:m1"} {"signature": "def normalize_repo_root_url(url):", "body": "if url.endswith(''):url = url[:-]return url", "docstring": "Normalize a GitHub URL into the root repository URL.\n\n Parameters\n ----------\n url : `str`\n A GitHub URL\n\n Returns\n -------\n url : `str`\n Normalized URL of a GitHub repository.\n\n Examples\n --------\n >>> normalize_repo_root_url('https://github.com/lsst/LDM-151.git')\n 'https://github.com/lsst/LDM-151'", "id": "f4215:m2"} {"signature": "async def github_request(session, api_token,query=None, mutation=None, variables=None):", "body": "payload = {}if query is not None:payload[''] = str(query) if mutation is not None:payload[''] = str(mutation) if variables is not None:payload[''] = variablesheaders = {'': ''.format(api_token)}url = ''async with session.post(url, json=payload, headers=headers) as response:data = await response.json()return data", "docstring": "Send a request to the GitHub v4 (GraphQL) API.\n\n The request is asynchronous, with asyncio.\n\n Parameters\n ----------\n session : `aiohttp.ClientSession`\n Your application's aiohttp client session.\n api_token : `str`\n A GitHub personal API token. See the `GitHub personal access token\n guide`_.\n query : `str` or `GitHubQuery`\n GraphQL query string. If provided, then the ``mutation`` parameter\n should not be set. For examples, see the `GitHub guide to query and\n mutation operations`_.\n mutation : `str` or `GitHubQuery`\n GraphQL mutation string. If provided, then the ``query`` parameter\n should not be set. For examples, see the `GitHub guide to query and\n mutation operations`_.\n variables : `dict`\n GraphQL variables, as a JSON-compatible dictionary. This is only\n required if the ``query`` or ``mutation`` uses GraphQL variables.\n\n Returns\n -------\n data : `dict`\n Parsed JSON as a `dict` object.\n\n .. `GitHub personal access token guide`: https://ls.st/41d\n .. `GitHub guide to query and mutation operations`: https://ls.st/9s7", "id": "f4216:m0"} {"signature": "@classmethoddef load(cls, query_name):", "body": "template_path = os.path.join(os.path.dirname(__file__),'',query_name + '')with open(template_path) as f:query_data = f.read()return cls(query_data, name=query_name)", "docstring": "Load a pre-made query.\n\n These queries are distributed with lsstprojectmeta. See\n :file:`lsstrojectmeta/data/githubv4/README.rst` inside the\n package repository for details on available queries.\n\n Parameters\n ----------\n query_name : `str`\n Name of the query, such as ``'technote_repo'``.\n\n Returns\n -------\n github_query : `GitHubQuery\n A GitHub query or mutation object that you can pass to\n `github_request` to execute the request itself.", "id": "f4216:c0:m1"} {"signature": "def main():", "body": "parser = argparse.ArgumentParser(description='''''''')parser.add_argument('',dest='',help='''''')parser.add_argument('',help='')parser.add_argument('',help='''''')parser.add_argument('',default='',help='')parser.add_argument('',default='',help='')args = parser.parse_args()stream_handler = logging.StreamHandler()stream_formatter = logging.Formatter('')stream_handler.setFormatter(stream_formatter)root_logger = logging.getLogger()root_logger.addHandler(stream_handler)root_logger.setLevel(logging.WARNING)app_logger = logging.getLogger('')app_logger.setLevel(logging.DEBUG)if args.mongodb_uri is not None:mongo_client = AsyncIOMotorClient(args.mongodb_uri, ssl=True)collection = mongo_client[args.mongodb_db][args.mongodb_collection]else:collection = Noneloop = asyncio.get_event_loop()if args.ltd_product_url is not None:loop.run_until_complete(run_single_ltd_doc(args.ltd_product_url,args.github_token,collection))else:loop.run_until_complete(run_bulk_etl(args.github_token,collection))", "docstring": "Command line entrypoint to reduce technote metadata.", "id": "f4218:m0"} {"signature": "async def process_ltd_doc_products(session, product_urls, github_api_token,mongo_collection=None):", "body": "tasks = [asyncio.ensure_future(process_ltd_doc(session, github_api_token,product_url,mongo_collection=mongo_collection))for product_url in product_urls]await asyncio.gather(*tasks)", "docstring": "Run a pipeline to process extract, transform, and load metadata for\n multiple LSST the Docs-hosted projects\n\n Parameters\n ----------\n session : `aiohttp.ClientSession`\n Your application's aiohttp client session.\n See http://aiohttp.readthedocs.io/en/stable/client.html.\n product_urls : `list` of `str`\n List of LSST the Docs product URLs.\n github_api_token : `str`\n A GitHub personal API token. See the `GitHub personal access token\n guide`_.\n mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional\n MongoDB collection. This should be the common MongoDB collection for\n LSST projectmeta JSON-LD records.", "id": "f4218:m3"} {"signature": "async def process_ltd_doc(session, github_api_token, ltd_product_url,mongo_collection=None):", "body": "logger = logging.getLogger(__name__)ltd_product_data = await get_ltd_product(session, url=ltd_product_url)product_name = ltd_product_data['']doc_handle_match = DOCUMENT_HANDLE_PATTERN.match(product_name)if doc_handle_match is None:logger.debug('', product_name)returntry:return await process_sphinx_technote(session,github_api_token,ltd_product_data,mongo_collection=mongo_collection)except NotSphinxTechnoteError:logger.debug('', product_name)except Exception:logger.exception('', product_name)returntry:return await process_lander_page(session,github_api_token,ltd_product_data,mongo_collection=mongo_collection)except NotLanderPageError:logger.debug('',product_name)except Exception:logger.exception('', product_name)return", "docstring": "Ingest any kind of LSST document hosted on LSST the Docs from its\n source.\n\n Parameters\n ----------\n session : `aiohttp.ClientSession`\n Your application's aiohttp client session.\n See http://aiohttp.readthedocs.io/en/stable/client.html.\n github_api_token : `str`\n A GitHub personal API token. See the `GitHub personal access token\n guide`_.\n ltd_product_url : `str`\n URL of the technote's product resource in the LTD Keeper API.\n mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional\n MongoDB collection. This should be the common MongoDB collection for\n LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted\n into the MongoDB collection.\n\n Returns\n -------\n metadata : `dict`\n JSON-LD-formatted dictionary.\n\n .. `GitHub personal access token guide`: https://ls.st/41d", "id": "f4218:m4"} {"signature": "def encode_jsonld(jsonld_dataset, **kwargs):", "body": "encoder = JsonLdEncoder(**kwargs)return encoder.encode(jsonld_dataset)", "docstring": "Encode a JSON-LD dataset into a string.\n\n Parameters\n ----------\n jsonld_dataset : `dict`\n A JSON-LD dataset.\n kwargs\n Keyword argument passed to the encoder. See `json.JSONEncoder`.\n\n Returns\n -------\n encoded_dataset : `str`\n The JSON-LD dataset encoded as a string.", "id": "f4219:m0"} {"signature": "def decode_jsonld(jsonld_text):", "body": "decoder = json.JSONDecoder(object_pairs_hook=_decode_object_pairs)return decoder.decode(jsonld_text)", "docstring": "Decode a JSON-LD dataset, including decoding datetime\n strings into `datetime.datetime` objects.\n\n Parameters\n ----------\n encoded_dataset : `str`\n The JSON-LD dataset encoded as a string.\n\n Returns\n -------\n jsonld_dataset : `dict`\n A JSON-LD dataset.\n\n Examples\n --------\n\n >>> doc = '{\"dt\": \"2018-01-01T12:00:00Z\"}'\n >>> decode_jsonld(doc)\n {'dt': datetime.datetime(2018, 1, 1, 12, 0, tzinfo=datetime.timezone.utc)}", "id": "f4219:m1"} {"signature": "def default(self, obj):", "body": "if isinstance(obj, datetime.datetime):return self._encode_datetime(obj)return json.JSONEncoder.default(self, obj)", "docstring": "Encode values as JSON strings.\n\n This method overrides the default implementation from\n `json.JSONEncoder`.", "id": "f4219:c0:m0"} {"signature": "def _encode_datetime(self, dt):", "body": "if dt.tzinfo is None:dt = dt.replace(tzinfo=datetime.timezone.utc)dt = dt.astimezone(datetime.timezone.utc)return dt.strftime('')", "docstring": "Encode a datetime in the format '%Y-%m-%dT%H:%M:%SZ'.\n\n The datetime can be naieve (doesn't have timezone info) or aware\n (it does have a tzinfo attribute set). Regardless, the datetime\n is transformed into UTC.", "id": "f4219:c0:m1"} {"signature": "async def process_sphinx_technote(session, github_api_token, ltd_product_data,mongo_collection=None):", "body": "logger = logging.getLogger(__name__)github_url = ltd_product_data['']github_url = normalize_repo_root_url(github_url)repo_slug = parse_repo_slug_from_url(github_url)try:metadata_yaml = await download_metadata_yaml(session, github_url)except aiohttp.ClientResponseError as err:logger.debug('',ltd_product_data[''], err.code)raise NotSphinxTechnoteError()github_query = GitHubQuery.load('')github_variables = {\"\": repo_slug.owner,\"\": repo_slug.repo}github_data = await github_request(session, github_api_token,query=github_query,variables=github_variables)try:jsonld = reduce_technote_metadata(github_url, metadata_yaml, github_data, ltd_product_data)except Exception as exception:message = \"\"logger.exception(message, github_url, exception)raiseif mongo_collection is not None:await _upload_to_mongodb(mongo_collection, jsonld)logger.info('', github_url)return jsonld", "docstring": "Extract, transform, and load Sphinx-based technote metadata.\n\n Parameters\n ----------\n session : `aiohttp.ClientSession`\n Your application's aiohttp client session.\n See http://aiohttp.readthedocs.io/en/stable/client.html.\n github_api_token : `str`\n A GitHub personal API token. See the `GitHub personal access token\n guide`_.\n ltd_product_data : `dict`\n Contents of ``metadata.yaml``, obtained via `download_metadata_yaml`.\n Data for this technote from the LTD Keeper API\n (``GET /products/``). Usually obtained via\n `lsstprojectmeta.ltd.get_ltd_product`.\n mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional\n MongoDB collection. This should be the common MongoDB collection for\n LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted\n into the MongoDB collection.\n\n Returns\n -------\n metadata : `dict`\n JSON-LD-formatted dictionary.\n\n Raises\n ------\n NotSphinxTechnoteError\n Raised when the LTD product cannot be interpreted as a Sphinx-based\n technote project because it's missing a metadata.yaml file in its\n GitHub repository. This implies that the LTD product *could* be of a\n different format.\n\n .. `GitHub personal access token guide`: https://ls.st/41d", "id": "f4221:m0"} {"signature": "def reduce_technote_metadata(github_url, metadata, github_data,ltd_product_data):", "body": "repo_slug = parse_repo_slug_from_url(github_url)jsonld = {'': [\"\"\"\",\"\"],'': ['', ''],'': github_url}if '' in metadata:url = metadata['']elif '' in ltd_product_data:url = ltd_product_data['']else:raise RuntimeError(''''.format(github_url))jsonld[''] = urljsonld[''] = urlif '' in metadata and '' in metadata:jsonld[''] = ''.format(**metadata)else:raise RuntimeError(''.format(github_url))if '' in metadata:jsonld[''] = metadata['']if '' in metadata:jsonld[''] = metadata['']if '' in metadata:jsonld[''] = [{'': '', '': author_name}for author_name in metadata['']]if '' in metadata:jsonld[''] = datetime.datetime.strptime(metadata[''],'')else:try:_repo_data = github_data['']['']_master_data = _repo_data['']jsonld[''] = datetime.datetime.strptime(_master_data[''][''],'')except KeyError:passtry:_license_data = github_data['']['']['']_spdxId = _license_data['']if _spdxId is not None:_spdx_url = ''.format(_spdxId)jsonld[''] = _spdx_urlexcept KeyError:passtry:_master_data = github_data['']['']['']_files = _master_data['']['']['']for _node in _files:filename = _node['']normalized_filename = filename.lower()if normalized_filename.startswith(''):readme_url = make_raw_content_url(repo_slug, '',filename)jsonld[''] = readme_urlbreakexcept KeyError:passtravis_url = ''.format(repo_slug.full)jsonld[''] = travis_urlreturn jsonld", "docstring": "Reduce a technote project's metadata from multiple sources into a\n single JSON-LD resource.\n\n Parameters\n ----------\n github_url : `str`\n URL of the technote's GitHub repository.\n metadata : `dict`\n The parsed contents of ``metadata.yaml`` found in a technote's\n repository.\n github_data : `dict`\n The contents of the ``technote_repo`` GitHub GraphQL API query.\n ltd_product_data : `dict`\n JSON dataset for the technote corresponding to the\n ``/products/`` of LTD Keeper.\n\n Returns\n -------\n metadata : `dict`\n JSON-LD-formatted dictionary.\n\n .. `GitHub personal access token guide`: https://ls.st/41d", "id": "f4221:m1"} {"signature": "async def download_metadata_yaml(session, github_url):", "body": "metadata_yaml_url = _build_metadata_yaml_url(github_url)async with session.get(metadata_yaml_url) as response:response.raise_for_status()yaml_data = await response.text()return yaml.safe_load(yaml_data)", "docstring": "Download the metadata.yaml file from a technote's GitHub repository.", "id": "f4221:m2"} {"signature": "def _build_metadata_yaml_url(github_url):", "body": "repo_slug = parse_repo_slug_from_url(github_url)return make_raw_content_url(repo_slug, '', '')", "docstring": "Compute the URL to the raw metadata.yaml resource given the technote's\n GitHub repository URL.\n\n Parameters\n ----------\n github_url : `str`\n URL of the technote's GitHub repository.\n\n Returns\n -------\n metadata_yaml_url : `str`\n metadata.yaml URL (using the ``raw.githubusercontent.com`` domain).", "id": "f4221:m3"} {"signature": "async def _upload_to_mongodb(collection, jsonld):", "body": "document = {'': jsonld}query = {'': jsonld['']}await collection.update(query, document, upsert=True, multi=False)", "docstring": "Upsert the technote resource into the projectmeta MongoDB collection.\n\n Parameters\n ----------\n collection : `motor.motor_asyncio.AsyncIOMotorCollection`\n The MongoDB collection.\n jsonld : `dict`\n The JSON-LD document that reprsents the technote resource.", "id": "f4221:m4"} {"signature": "async def process_lander_page(session, github_api_token, ltd_product_data,mongo_collection=None):", "body": "logger = logging.getLogger(__name__)published_url = ltd_product_data['']jsonld_url = urljoin(published_url, '')try:async with session.get(jsonld_url) as response:logger.debug('', jsonld_url, response.status)response.raise_for_status()json_data = await response.text()except aiohttp.ClientResponseError as err:logger.debug('',jsonld_url, err.code)raise NotLanderPageError()metadata = decode_jsonld(json_data)if mongo_collection is not None:await _upload_to_mongodb(mongo_collection, metadata)return metadata", "docstring": "Extract, transform, and load metadata from Lander-based projects.\n\n Parameters\n ----------\n session : `aiohttp.ClientSession`\n Your application's aiohttp client session.\n See http://aiohttp.readthedocs.io/en/stable/client.html.\n github_api_token : `str`\n A GitHub personal API token. See the `GitHub personal access token\n guide`_.\n ltd_product_data : `dict`\n Contents of ``metadata.yaml``, obtained via `download_metadata_yaml`.\n Data for this technote from the LTD Keeper API\n (``GET /products/``). Usually obtained via\n `lsstprojectmeta.ltd.get_ltd_product`.\n mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional\n MongoDB collection. This should be the common MongoDB collection for\n LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted\n into the MongoDB collection.\n\n Returns\n -------\n metadata : `dict`\n JSON-LD-formatted dictionary.\n\n Raises\n ------\n NotLanderPageError\n Raised when the LTD product cannot be interpreted as a Lander page\n because the ``/metadata.jsonld`` file is absent. This implies that\n the LTD product *could* be of a different format.\n\n .. `GitHub personal access token guide`: https://ls.st/41d", "id": "f4222:m0"} {"signature": "async def _upload_to_mongodb(collection, jsonld):", "body": "document = {'': jsonld}query = {'': jsonld['']}await collection.update(query, document, upsert=True, multi=False)", "docstring": "Upsert the technote resource into the projectmeta MongoDB collection.\n\n Parameters\n ----------\n collection : `motor.motor_asyncio.AsyncIOMotorCollection`\n The MongoDB collection.\n jsonld : `dict`\n The JSON-LD document that represents the document resource.", "id": "f4222:m1"} {"signature": "def read_git_commit_timestamp(repo_path=None, repo=None):", "body": "if repo is None:repo = git.repo.base.Repo(path=repo_path,search_parent_directories=True)head_commit = repo.head.commitreturn head_commit.committed_datetime", "docstring": "Obtain the timestamp from the current head commit of a Git repository.\n\n Parameters\n ----------\n repo_path : `str`, optional\n Path to the Git repository. Leave as `None` to use the current working\n directory.\n\n Returns\n -------\n commit_timestamp : `datetime.datetime`\n The datetime of the head commit.", "id": "f4223:m0"} {"signature": "def read_git_commit_timestamp_for_file(filepath, repo_path=None, repo=None):", "body": "logger = logging.getLogger(__name__)if repo is None:repo = git.repo.base.Repo(path=repo_path,search_parent_directories=True)repo_path = repo.working_tree_dirhead_commit = repo.head.commitlogger.debug('', repo_path)filepath = os.path.relpath(os.path.abspath(filepath),start=repo_path)logger.debug('', filepath)for commit in head_commit.iter_items(repo,head_commit,[filepath],skip=):return commit.committed_datetimeraise IOError(''.format(filepath))", "docstring": "Obtain the timestamp for the most recent commit to a given file in a\n Git repository.\n\n Parameters\n ----------\n filepath : `str`\n Absolute or repository-relative path for a file.\n repo_path : `str`, optional\n Path to the Git repository. Leave as `None` to use the current working\n directory or if a ``repo`` argument is provided.\n repo : `git.Repo`, optional\n A `git.Repo` instance.\n\n Returns\n -------\n commit_timestamp : `datetime.datetime`\n The datetime of the most recent commit to the given file.\n\n Raises\n ------\n IOError\n Raised if the ``filepath`` does not exist in the Git repository.", "id": "f4223:m1"} {"signature": "def get_content_commit_date(extensions, acceptance_callback=None,root_dir=''):", "body": "logger = logging.getLogger(__name__)def _null_callback(_):return Trueif acceptance_callback is None:acceptance_callback = _null_callbackroot_dir = os.path.abspath(root_dir)repo = git.repo.base.Repo(path=root_dir, search_parent_directories=True)newest_datetime = Noneiters = [_iter_filepaths_with_extension(ext, root_dir=root_dir)for ext in extensions]for content_path in itertools.chain(*iters):content_path = os.path.abspath(os.path.join(root_dir, content_path))if acceptance_callback(content_path):logger.debug('', content_path)try:commit_datetime = read_git_commit_timestamp_for_file(content_path, repo=repo)logger.debug('',content_path, commit_datetime)except IOError:logger.warning('',content_path)continueif not newest_datetime or commit_datetime > newest_datetime:newest_datetime = commit_datetimelogger.debug('', newest_datetime)logger.debug('', newest_datetime)if newest_datetime is None:raise RuntimeError(''.format(root_dir))return newest_datetime", "docstring": "Get the datetime for the most recent commit to a project that\n affected certain types of content.\n\n Parameters\n ----------\n extensions : sequence of 'str'\n Extensions of files to consider in getting the most recent commit\n date. For example, ``('rst', 'svg', 'png')`` are content extensions\n for a Sphinx project. **Extension comparision is case sensitive.** add\n uppercase variants to match uppercase extensions.\n acceptance_callback : callable\n Callable function whose sole argument is a file path, and returns\n `True` or `False` depending on whether the file's commit date should\n be considered or not. This callback is only run on files that are\n included by ``extensions``. Thus this callback is a way to exclude\n specific files that would otherwise be included by their extension.\n root_dir : 'str`, optional\n Only content contained within this root directory is considered.\n This directory must be, or be contained by, a Git repository. This is\n the current working directory by default.\n\n Returns\n -------\n commit_date : `datetime.datetime`\n Datetime of the most recent content commit.\n\n Raises\n ------\n RuntimeError\n Raised if no content files are found.", "id": "f4223:m2"} {"signature": "def _iter_filepaths_with_extension(extname, root_dir=''):", "body": "if not extname.startswith(''):extname = '' + extnameroot_dir = os.path.abspath(root_dir)for dirname, sub_dirnames, filenames in os.walk(root_dir):for filename in filenames:if os.path.splitext(filename)[-] == extname:full_filename = os.path.join(dirname, filename)rel_filepath = os.path.relpath(full_filename, start=root_dir)yield rel_filepath", "docstring": "Iterative over relative filepaths of files in a directory, and\n sub-directories, with the given extension.\n\n Parameters\n ----------\n extname : `str`\n Extension name (such as 'txt' or 'rst'). Extension comparison is\n case sensitive.\n root_dir : 'str`, optional\n Root directory. Current working directory by default.\n\n Yields\n ------\n filepath : `str`\n File path, relative to ``root_dir``, with the given extension.", "id": "f4223:m3"} {"signature": "def __init__(self, path_to_tagger):", "body": "self._path_to_tagger = path_to_taggerself._dir_to_tagger = os.path.dirname(path_to_tagger)self._tagger = subprocess.Popen(''+os.path.basename(path_to_tagger),cwd=self._dir_to_tagger,stdin=subprocess.PIPE, stdout=subprocess.PIPE)", "docstring": "Arguments:\n- `path_to_tagger`:", "id": "f4225:c0:m0"} {"signature": "def parse(self, text):", "body": "results = list()for oneline in text.split(''):self._tagger.stdin.write(oneline+'')while True:r = self._tagger.stdout.readline()[:-]if not r:breakresults.append(tuple(r.split('')))return results", "docstring": "Arguments:\n- `self`:\n- `text`:", "id": "f4225:c0:m1"} {"signature": "def _gen(d, limit=, count=False, grouprefs=None):", "body": "if grouprefs is None:grouprefs = {}ret = ['']strings = literal = Falsefor i in d:if i[] == sre_parse.IN:subs = _in(i[])if count:strings = (strings or ) * len(subs)ret = comb(ret, subs)elif i[] == sre_parse.LITERAL:literal = Trueret = mappend(ret, unichr(i[]))elif i[] == sre_parse.CATEGORY:subs = CATEGORIES.get(i[], [''])if count:strings = (strings or ) * len(subs)ret = comb(ret, subs)elif i[] == sre_parse.ANY:subs = CATEGORIES['']if count:strings = (strings or ) * len(subs)ret = comb(ret, subs)elif i[] == sre_parse.MAX_REPEAT or i[] == sre_parse.MIN_REPEAT:items = list(i[][])if i[][] + - i[][] >= limit:r1 = i[][]r2 = i[][] + limitelse:r1 = i[][]r2 = i[][] + ran = range(r1, r2)if count:branch_count = for p in ran:branch_count += pow(_gen(items, limit, True, grouprefs), p)strings = (strings or ) * branch_countret = prods(ret, ran, items, limit, grouprefs)elif i[] == sre_parse.BRANCH:if count:for x in i[][]:strings += _gen(x, limit, True, grouprefs) or ret = concit(ret, i[][], limit, grouprefs)elif i[] == sre_parse.SUBPATTERN or i[] == sre_parse.ASSERT:subexpr = i[][]if IS_PY36_OR_GREATER and i[] == sre_parse.SUBPATTERN:subexpr = i[][]if count:strings = (strings or ) * (sum(ggen([], _gen, subexpr, limit=limit, count=True, grouprefs=grouprefs)) or )ret = ggen(ret, _gen, subexpr, limit=limit, count=False, grouprefs=grouprefs, groupref=i[][])elif i[] == sre_parse.AT:continueelif i[] == sre_parse.NOT_LITERAL:subs = list(CATEGORIES[''])if unichr(i[]) in subs:subs.remove(unichr(i[]))if count:strings = (strings or ) * len(subs)ret = comb(ret, subs)elif i[] == sre_parse.GROUPREF:ret = dappend(ret, grouprefs, i[])elif i[] == sre_parse.ASSERT_NOT:passelse:print('' + repr(i))if count:if strings == and literal:inc = Truefor i in d:if i[] not in (sre_parse.AT, sre_parse.LITERAL):inc = Falseif inc:strings = return stringsreturn ret", "docstring": "docstring for _gen", "id": "f4227:m8"} {"signature": "def sre_to_string(sre_obj, paren=True):", "body": "ret = u''for i in sre_obj:if i[] == sre_parse.IN:prefix = ''if len(i[]) and i[][][] == sre_parse.NEGATE:prefix = ''ret += u''.format(prefix, sre_to_string(i[], paren=paren))elif i[] == sre_parse.LITERAL:u = unichr(i[])ret += u if u not in sre_parse.SPECIAL_CHARS else ''.format(u)elif i[] == sre_parse.CATEGORY:ret += REVERSE_CATEGORIES[i[]]elif i[] == sre_parse.ANY:ret += ''elif i[] == sre_parse.BRANCH:parts = [sre_to_string(x, paren=paren) for x in i[][]]if not any(parts):continueif i[][]:if len(parts) == :paren = Falseprefix = ''else:prefix = ''branch = ''.join(parts)if paren:ret += ''.format(prefix, branch)else:ret += ''.format(branch)elif i[] == sre_parse.SUBPATTERN:subexpr = i[][]if IS_PY36_OR_GREATER and i[] == sre_parse.SUBPATTERN:subexpr = i[][]if i[][]:ret += ''.format(sre_to_string(subexpr, paren=False))else:ret += ''.format(sre_to_string(subexpr, paren=paren))elif i[] == sre_parse.NOT_LITERAL:ret += ''.format(unichr(i[]))elif i[] == sre_parse.MAX_REPEAT:if i[][] == i[][]:range_str = ''.format(i[][])else:if i[][] == and i[][] - i[][] == sre_parse.MAXREPEAT:range_str = ''elif i[][] == and i[][] - i[][] == sre_parse.MAXREPEAT - :range_str = ''else:range_str = ''.format(i[][], i[][])ret += sre_to_string(i[][], paren=paren) + range_strelif i[] == sre_parse.MIN_REPEAT:if i[][] == and i[][] == sre_parse.MAXREPEAT:range_str = ''elif i[][] == and i[][] == sre_parse.MAXREPEAT:range_str = ''elif i[][] == sre_parse.MAXREPEAT:range_str = ''.format(i[][])else:range_str = ''.format(i[][], i[][])ret += sre_to_string(i[][], paren=paren) + range_strelif i[] == sre_parse.GROUPREF:ret += ''.format(i[])elif i[] == sre_parse.AT:if i[] == sre_parse.AT_BEGINNING:ret += ''elif i[] == sre_parse.AT_END:ret += ''elif i[] == sre_parse.NEGATE:passelif i[] == sre_parse.RANGE:ret += ''.format(unichr(i[][]), unichr(i[][]))elif i[] == sre_parse.ASSERT:if i[][]:ret += ''.format(sre_to_string(i[][], paren=False))else:ret += ''.format(sre_to_string(i[][], paren=paren))elif i[] == sre_parse.ASSERT_NOT:passelse:print('' % str(i))return ret", "docstring": "sre_parse object to string\n\n :param sre_obj: Output of sre_parse.parse()\n :type sre_obj: list\n :rtype: str", "id": "f4227:m10"} {"signature": "def simplify(regex_string):", "body": "r = parse(regex_string)return sre_to_string(r)", "docstring": "Simplify a regular expression\n\n :param regex_string: Regular expression\n :type regex_string: str\n :rtype: str", "id": "f4227:m11"} {"signature": "def parse(s):", "body": "if IS_PY3:r = sre_parse.parse(s, flags=U)else:r = sre_parse.parse(s.decode(''), flags=U)return list(r)", "docstring": "Regular expression parser\n\n :param s: Regular expression\n :type s: str\n :rtype: list", "id": "f4227:m12"} {"signature": "def generate(s, limit=):", "body": "return _gen(parse(s), limit)", "docstring": "Creates a generator that generates all matching strings to a given regular expression\n\n :param s: Regular expression\n :type s: str\n :param limit: Range limit\n :type limit: int\n :returns: string generator object", "id": "f4227:m13"} {"signature": "def count(s, limit=):", "body": "return _gen(parse(s), limit, count=True)", "docstring": "Counts all matching strings to a given regular expression\n\n :param s: Regular expression\n :type s: str\n :param limit: Range limit\n :type limit: int\n :rtype: int\n :returns: number of matching strings", "id": "f4227:m14"} {"signature": "def getone(regex_string, limit=):", "body": "return _randone(parse(regex_string), limit)", "docstring": "Returns a random matching string to a given regular expression", "id": "f4227:m15"} {"signature": "def safe_setting(self, name_hyphen, default=None):", "body": "try:return self.setting(name_hyphen)except UserFeedback:return default", "docstring": "Retrieves the setting value, but returns a default value rather than\nraising an error if the setting does not exist.", "id": "f4241:c0:m7"} {"signature": "def setting(self, name_hyphen):", "body": "if name_hyphen in self._instance_settings:value = self._instance_settings[name_hyphen][]else:msg = \"\" % name_hyphenraise UserFeedback(msg)if hasattr(value, '') and value.startswith(\"\"):env_var = value.lstrip(\"\")if env_var in os.environ:return os.getenv(env_var)else:msg = \"\" % env_varraise UserFeedback(msg)elif hasattr(value, '') and value.startswith(\"\"):return value.replace(\"\", \"\")else:return value", "docstring": "Retrieves the setting value whose name is indicated by name_hyphen.\n\nValues starting with $ are assumed to reference environment variables,\nand the value stored in environment variables is retrieved. It's an\nerror if thes corresponding environment variable it not set.", "id": "f4241:c0:m8"} {"signature": "def setting_values(self, skip=None):", "body": "if not skip:skip = []return dict((k, v[])for k, v in six.iteritems(self._instance_settings)if not k in skip)", "docstring": "Returns dict of all setting values (removes the helpstrings).", "id": "f4241:c0:m9"} {"signature": "def update_settings(self, new_settings):", "body": "self._update_settings(new_settings, False)", "docstring": "Update settings for this instance based on the provided dictionary of\nsetting keys: setting values. Values should be a tuple of (helpstring,\nvalue,) unless the setting has already been defined in a parent class,\nin which case just pass the desired value.", "id": "f4241:c0:m10"} {"signature": "def _update_settings(self, new_settings, enforce_helpstring=True):", "body": "for raw_setting_name, value in six.iteritems(new_settings):setting_name = raw_setting_name.replace(\"\", \"\")setting_already_exists = setting_name in self._instance_settingsvalue_is_list_len_2 = isinstance(value, list) and len(value) == treat_as_tuple = not setting_already_exists and value_is_list_len_2if isinstance(value, tuple) or treat_as_tuple:self._instance_settings[setting_name] = valueelse:if setting_name not in self._instance_settings:if enforce_helpstring:msg = \"\"raise InternalCashewException(msg % setting_name)else:self._instance_settings[setting_name] = ('', value,)else:orig = self._instance_settings[setting_name]self._instance_settings[setting_name] = (orig[], value,)", "docstring": "This method does the work of updating settings. Can be passed with\nenforce_helpstring = False which you may want if allowing end users to\nadd arbitrary metadata via the settings system.\n\nPreferable to use update_settings (without leading _) in code to do the\nright thing and always have docstrings.", "id": "f4241:c0:m11"} {"signature": "def settings_and_attributes(self):", "body": "attrs = self.setting_values()attrs.update(self.__dict__)skip = [\"\", \"\"]for a in skip:del attrs[a]return attrs", "docstring": "Return a combined dictionary of setting values and attribute values.", "id": "f4241:c0:m12"} {"signature": "def standardize_alias_or_aliases(cls, alias_or_aliases):", "body": "if isinstance(alias_or_aliases, string_types):return [alias_or_aliases]else:return alias_or_aliases", "docstring": "Make sure we don't attempt to iterate over an alias string thinking\nit's an array.", "id": "f4241:c1:m4"} {"signature": "def get_reference_to_class(cls, class_or_class_name):", "body": "if isinstance(class_or_class_name, type):return class_or_class_nameelif isinstance(class_or_class_name, string_types):if \"\" in class_or_class_name:mod_name, class_name = class_or_class_name.split(\"\")if not mod_name in sys.modules:__import__(mod_name)mod = sys.modules[mod_name]return mod.__dict__[class_name]else:return cls.load_class_from_locals(class_or_class_name)else:msg = \"\" % type(class_or_class_name)raise InternalCashewException(msg)", "docstring": "Detect if we get a class or a name, convert a name to a class.", "id": "f4241:c1:m5"} {"signature": "def check_docstring(cls):", "body": "docstring = inspect.getdoc(cls)if not docstring:breadcrumbs = \"\".join(t.__name__ for t in inspect.getmro(cls)[:-][::-])msg = \"\"args = (cls.__name__, breadcrumbs, cls.__module__)raise InternalCashewException(msg % args)max_line_length = cls._class_settings.get('')if max_line_length:for i, line in enumerate(docstring.splitlines()):if len(line) > max_line_length:msg = \"\" args = (i, cls.__name__, len(line) - max_line_length)raise Exception(msg % args)return docstring", "docstring": "Asserts that the class has a docstring, returning it if successful.", "id": "f4241:c1:m7"} {"signature": "def imro(cls):", "body": "return reversed(inspect.getmro(cls)[:-])", "docstring": "Returns MRO in reverse order, skipping 'object/type' class.", "id": "f4241:c1:m14"} {"signature": "def __iter__(cls, *instanceargs):", "body": "processed_aliases = set()for alias in sorted(cls.plugins, cmp=lambda x,y: cmp(x.lower(), y.lower())):if alias in processed_aliases:continuetry:instance = cls.create_instance(alias, *instanceargs)instance.alias = instance.standard_alias()yield(instance)for alias in instance.setting(''):processed_aliases.add(alias)except InactivePlugin:pass", "docstring": "Lets you iterate over instances of all plugins which are not marked as\n'inactive'. If there are multiple aliases, the resulting plugin is only\ncalled once.", "id": "f4241:c1:m15"} {"signature": "def expand_args(command):", "body": "if isinstance(command, (str, unicode)):splitter = shlex.shlex(command.encode(''))splitter.whitespace = ''splitter.whitespace_split = Truecommand = []while True:token = splitter.get_token()if token:command.append(token)else:breakcommand = list(map(shlex.split, command))return command", "docstring": "Parses command strings and returns a Popen-ready list.", "id": "f4246:m3"} {"signature": "def run(command, data=None, timeout=None, kill_timeout=None, env=None, cwd=None):", "body": "command = expand_args(command)history = []for c in command:if len(history):data = history[-].std_out[:*]cmd = Command(c)try:out, err = cmd.run(data, timeout, kill_timeout, env, cwd)status_code = cmd.returncodeexcept OSError as e:out, err = '', u\"\".join([e.strerror, traceback.format_exc()])status_code = r = Response(process=cmd)r.command = cr.std_out = outr.std_err = errr.status_code = status_codehistory.append(r)r = history.pop()r.history = historyreturn r", "docstring": "Executes a given commmand and returns Response.\n\n Blocks until process is complete, or timeout is reached.", "id": "f4246:m4"} {"signature": "def connect(command, data=None, env=None, cwd=None):", "body": "command_str = expand_args(command).pop()environ = dict(os.environ)environ.update(env or {})process = subprocess.Popen(command_str,universal_newlines=True,shell=False,env=environ,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,bufsize=,cwd=cwd,)return ConnectedCommand(process=process)", "docstring": "Spawns a new process from the given command.", "id": "f4246:m5"} {"signature": "@propertydef status_code(self):", "body": "return self._status_code", "docstring": "The status code of the process.\n If the code is None, assume that it's still running.", "id": "f4246:c1:m3"} {"signature": "@propertydef pid(self):", "body": "return self._process.pid", "docstring": "The process' PID.", "id": "f4246:c1:m4"} {"signature": "def kill(self):", "body": "return self._process.kill()", "docstring": "Kills the process.", "id": "f4246:c1:m5"} {"signature": "def expect(self, bytes, stream=None):", "body": "if stream is None:stream = self.std_out", "docstring": "Block until given bytes appear in the stream.", "id": "f4246:c1:m6"} {"signature": "def send(self, str, end=''):", "body": "return self._process.stdin.write(str+end)", "docstring": "Sends a line to std_in.", "id": "f4246:c1:m7"} {"signature": "def block(self):", "body": "self._status_code = self._process.wait()", "docstring": "Blocks until command finishes. Returns Response instance.", "id": "f4246:c1:m8"} {"signature": "def run_cell(self, cell):", "body": "globals = self.ipy_shell.user_global_nslocals = self.ipy_shell.user_nsglobals.update({\"\": None,})try:with redirect_stdout(self.stdout):self.run(cell, globals, locals)except:self.code_error = Trueif self.options.debug:raise BdbQuitfinally:self.finalize()", "docstring": "Run the Cell code using the IPython globals and locals\n\n Args:\n cell (str): Python code to be executed", "id": "f4257:c0:m1"} {"signature": "def user_call(self, frame, argument_list):", "body": "self.get_stack_data(frame, None, '')", "docstring": "This method is called when there is the remote possibility that we\n ever need to stop in this function.", "id": "f4257:c0:m2"} {"signature": "def user_line(self, frame):", "body": "self.get_stack_data(frame, None, '')", "docstring": "This function is called when we stop or break at this line.", "id": "f4257:c0:m3"} {"signature": "def user_return(self, frame, return_value):", "body": "self.get_stack_data(frame, None, '')", "docstring": "This function is called when a return trap is set here.", "id": "f4257:c0:m4"} {"signature": "def user_exception(self, frame, exc_info):", "body": "pass", "docstring": "This function is called if an exception occurs,\n but only if we are to stop at or just below this level.", "id": "f4257:c0:m5"} {"signature": "def is_notebook_frame(self, frame):", "body": "return \"\" in frame.f_globals.keys()", "docstring": "Return True if the current frame belongs to the notebook, else\n False", "id": "f4257:c0:m6"} {"signature": "def is_other_cell_frame(self, frame):", "body": "return frame.f_code.co_filename.startswith(\"\")", "docstring": "Return True if the current frame belongs to the Cell, else\n False", "id": "f4257:c0:m7"} {"signature": "def get_stack_data(self, frame, traceback, event_type):", "body": "heap_data = Heap(self.options)stack_data = StackFrames(self.options)stack_frames, cur_frame_ind = self.get_stack(frame, traceback)for frame_ind, (frame, lineno) in enumerate(stack_frames):skip_this_stack = Falseif frame_ind == :continueif len(stack_data) > self.options.depth:skip_this_stack = Truebreakif (not self.is_notebook_frame(frame) orself.is_other_cell_frame(frame)):if not self.options.step_all:skip_this_stack = Truebreaklineno = else:lineno += user_locals = filter_dict(frame.f_locals,ignore_vars + list(self.ipy_shell.user_ns_hidden.keys()))stack_data.add(frame, lineno, event_type, user_locals)heap_data.add(user_locals)if not skip_this_stack and not stack_data.is_empty():self.trace_history.append(stack_data,heap_data,self.stdout.getvalue())", "docstring": "Get the stack frames data at each of the hooks above (Ie. for each\n line of the Python code)", "id": "f4257:c0:m8"} {"signature": "def finalize(self):", "body": "self.trace_history.sort_frame_locals()", "docstring": "Finalize the trace history data after execution is complete", "id": "f4257:c0:m9"} {"signature": "def filter_dict(d, exclude):", "body": "ret = {}for key, value in d.items():if key not in exclude:ret.update({key: value})return ret", "docstring": "Return a new dict with specified keys excluded from the origional dict\n\n Args:\n d (dict): origional dict\n exclude (list): The keys that are excluded", "id": "f4259:m0"} {"signature": "@contextmanagerdef redirect_stdout(new_stdout):", "body": "old_stdout, sys.stdout = sys.stdout, new_stdouttry:yield Nonefinally:sys.stdout = old_stdout", "docstring": "Redirect the stdout\n\n Args:\n new_stdout (io.StringIO): New stdout to use instead", "id": "f4259:m1"} {"signature": "def format(obj, options):", "body": "formatters = {float_types: lambda x: ''.format(x, options.digits),}for _types, fmtr in formatters.items():if isinstance(obj, _types):return fmtr(obj)try:if six.PY2 and isinstance(obj, six.string_types):return str(obj.encode(''))return str(obj)except:return ''", "docstring": "Return a string representation of the Python object\n\n Args:\n obj: The Python object\n options: Format options", "id": "f4259:m2"} {"signature": "def get_type_info(obj):", "body": "if isinstance(obj, primitive_types):return ('', type(obj).__name__)if isinstance(obj, sequence_types):return ('', type(obj).__name__)if isinstance(obj, array_types):return ('', type(obj).__name__)if isinstance(obj, key_value_types):return ('', type(obj).__name__)if isinstance(obj, types.ModuleType):return ('', type(obj).__name__)if isinstance(obj, (types.FunctionType, types.MethodType)):return ('', type(obj).__name__)if isinstance(obj, type):if hasattr(obj, ''):return ('', obj.__name__)if isinstance(type(obj), type):if hasattr(obj, ''):cls_name = type(obj).__name__if cls_name == '':cls_name = obj.__name__return ('', ''.format(cls_name))if cls_name == '':cls_name = obj.__class__.__name__return ('', ''.format(cls_name))return ('', type(obj).__name__)", "docstring": "Get type information for a Python object\n\n Args:\n obj: The Python object\n\n Returns:\n tuple: (object type \"catagory\", object type name)", "id": "f4259:m3"} {"signature": "def which(program):", "body": "if os.path.split(program)[]:program_path = find_exe(program)if program_path:return program_pathelse:for path in get_path_list():program_path = find_exe(os.path.join(path, program))if program_path:return program_pathreturn None", "docstring": "Identify the location of an executable file.", "id": "f4266:m0"} {"signature": "def print_unicode(text):", "body": "if sys.version_info[] < :text = text.encode('')print(text)", "docstring": "Print in a portable manner.", "id": "f4269:m3"} {"signature": "def _consume(stdout):", "body": "while stdout.readline():pass", "docstring": "Consume/ignore the rest of the server output.\n\n Without this, the server will end up hanging due to the buffer\n filling up.", "id": "f4271:m2"} {"signature": "def correct(text: str, matches: [Match]) -> str:", "body": "ltext = list(text)matches = [match for match in matches if match.replacements]errors = [ltext[match.offset:match.offset + match.errorlength]for match in matches]correct_offset = for n, match in enumerate(matches):frompos, topos = (correct_offset + match.offset,correct_offset + match.offset + match.errorlength)if ltext[frompos:topos] != errors[n]:continuerepl = match.replacements[]ltext[frompos:topos] = list(repl)correct_offset += len(repl) - len(errors[n])return ''.join(ltext)", "docstring": "Automatically apply suggestions to the text.", "id": "f4271:m3"} {"signature": "def get_version():", "body": "version = _get_attrib().get('')if not version:match = re.search(r\"\", get_directory())if match:version = match.group()return version", "docstring": "Get LanguageTool version.", "id": "f4271:m5"} {"signature": "def get_build_date():", "body": "return _get_attrib().get('')", "docstring": "Get LanguageTool build date.", "id": "f4271:m6"} {"signature": "def get_languages() -> set:", "body": "try:languages = cache['']except KeyError:languages = LanguageTool._get_languages()cache[''] = languagesreturn languages", "docstring": "Get supported languages.", "id": "f4271:m7"} {"signature": "def get_directory():", "body": "try:language_check_dir = cache['']except KeyError:def version_key(string):return [int(e) if e.isdigit() else efor e in re.split(r\"\", string)]def get_lt_dir(base_dir):paths = [path for path inglob.glob(os.path.join(base_dir, ''))if os.path.isdir(path)]return max(paths, key=version_key) if paths else Nonebase_dir = os.path.dirname(sys.argv[])language_check_dir = get_lt_dir(base_dir)if not language_check_dir:try:base_dir = os.path.dirname(os.path.abspath(__file__))except NameError:passelse:language_check_dir = get_lt_dir(base_dir)if not language_check_dir:raise PathError(\"\".format(base_dir))cache[''] = language_check_dirreturn language_check_dir", "docstring": "Get LanguageTool directory.", "id": "f4271:m8"} {"signature": "def set_directory(path=None):", "body": "old_path = get_directory()terminate_server()cache.clear()if path:cache[''] = pathtry:get_jar_info()except Error:cache[''] = old_pathraise", "docstring": "Set LanguageTool directory.", "id": "f4271:m9"} {"signature": "def get_locale_language():", "body": "return locale.getlocale()[] or locale.getdefaultlocale()[]", "docstring": "Get the language code for the current locale setting.", "id": "f4271:m12"} {"signature": "@atexit.registerdef terminate_server():", "body": "if LanguageTool._server_is_alive():LanguageTool._terminate_server()", "docstring": "Terminate the server.", "id": "f4271:m13"} {"signature": "@propertydef language(self):", "body": "return self._language", "docstring": "The language to be used.", "id": "f4271:c5:m3"} {"signature": "@propertydef motherTongue(self):", "body": "return self._motherTongue", "docstring": "The user's mother tongue or None.\n\n The mother tongue may also be used as a source language for\n checking bilingual texts.", "id": "f4271:c5:m5"} {"signature": "def check(self, text: str, srctext=None) -> [Match]:", "body": "root = self._get_root(self._url, self._encode(text, srctext))return [Match(e.attrib) for e in root if e.tag == '']", "docstring": "Match text against enabled rules.", "id": "f4271:c5:m8"} {"signature": "def _check_api(self, text: str, srctext=None) -> bytes:", "body": "root = self._get_root(self._url, self._encode(text, srctext))return (b'' +ElementTree.tostring(root) + b\"\")", "docstring": "Match text against enabled rules (result in XML format).", "id": "f4271:c5:m9"} {"signature": "def correct(self, text: str, srctext=None) -> str:", "body": "return correct(text, self.check(text, srctext))", "docstring": "Automatically apply suggestions to the text.", "id": "f4271:c5:m11"} {"signature": "def enable_spellchecking(self):", "body": "self.disabled.difference_update(self._spell_checking_rules)", "docstring": "Enable spell-checking rules.", "id": "f4271:c5:m12"} {"signature": "def disable_spellchecking(self):", "body": "self.disabled.update(self._spell_checking_rules)", "docstring": "Disable spell-checking rules.", "id": "f4271:c5:m13"} {"signature": "@classmethoddef _get_languages(cls) -> set:", "body": "cls._start_server_if_needed()url = urllib.parse.urljoin(cls._url, '')languages = set()for e in cls._get_root(url, num_tries=):languages.add(e.get(''))languages.add(e.get(''))return languages", "docstring": "Get supported languages (by querying the server).", "id": "f4271:c5:m14"} {"signature": "@classmethoddef _get_attrib(cls):", "body": "cls._start_server_if_needed()params = {'': FAILSAFE_LANGUAGE, '': ''}data = urllib.parse.urlencode(params).encode()root = cls._get_root(cls._url, data, num_tries=)return root.attrib", "docstring": "Get matches element attributes.", "id": "f4271:c5:m15"} {"signature": "def parse_java_version(version_text):", "body": "match = re.search(JAVA_VERSION_REGEX, version_text)if not match:raise SystemExit(''.format(version_text))return (int(match.group('')), int(match.group('')))", "docstring": "Return Java version (major1, major2).\n\n >>> parse_java_version('''java version \"1.6.0_65\"\n ... Java(TM) SE Runtime Environment (build 1.6.0_65-b14-462-11M4609)\n ... Java HotSpot(TM) 64-Bit Server VM (build 20.65-b04-462, mixed mode))\n ... ''')\n (1, 6)\n\n >>> parse_java_version('''\n ... openjdk version \"1.8.0_60\"\n ... OpenJDK Runtime Environment (build 1.8.0_60-b27)\n ... OpenJDK 64-Bit Server VM (build 25.60-b23, mixed mode))\n ... ''')\n (1, 8)", "id": "f4272:m0"} {"signature": "def get_newest_possible_languagetool_version():", "body": "java_path = find_executable('')if not java_path:return JAVA_6_COMPATIBLE_VERSIONoutput = subprocess.check_output([java_path, ''],stderr=subprocess.STDOUT,universal_newlines=True)java_version = parse_java_version(output)if java_version >= (, ):return LATEST_VERSIONelif java_version >= (, ):return JAVA_7_COMPATIBLE_VERSIONelif java_version >= (, ):warn('''')return JAVA_6_COMPATIBLE_VERSIONelse:raise SystemExit('')", "docstring": "Return newest compatible version.\n\n >>> version = get_newest_possible_languagetool_version()\n >>> version in [JAVA_6_COMPATIBLE_VERSION,\n ... JAVA_7_COMPATIBLE_VERSION,\n ... LATEST_VERSION]\n True", "id": "f4272:m1"} {"signature": "def get_common_prefix(z):", "body": "name_list = z.namelist()if name_list and all(n.startswith(name_list[]) for n in name_list[:]):return name_list[]return None", "docstring": "Get common directory in a zip file if any.", "id": "f4272:m2"} {"signature": "def get_version():", "body": "with io.open('', encoding='') as input_file:for line in input_file:if line.startswith(''):return ast.parse(line).body[].value.s", "docstring": "Return version string.", "id": "f4275:m0"} {"signature": "def which(program, win_allow_cross_arch=True):", "body": "def is_exe(path):return os.path.isfile(path) and os.access(path, os.X_OK)def _get_path_list():return os.environ[''].split(os.pathsep)if os.name == '':def find_exe(program):root, ext = os.path.splitext(program)if ext:if is_exe(program):return programelse:for ext in os.environ[''].split(os.pathsep):program_path = root + ext.lower()if is_exe(program_path):return program_pathreturn Nonedef get_path_list():paths = _get_path_list()if win_allow_cross_arch:alt_sys_path = os.path.expandvars(r\"\")if os.path.isdir(alt_sys_path):paths.insert(, alt_sys_path)else:alt_sys_path = os.path.expandvars(r\"\")if os.path.isdir(alt_sys_path):paths.append(alt_sys_path)return pathselse:def find_exe(program):return program if is_exe(program) else Noneget_path_list = _get_path_listif os.path.split(program)[]:program_path = find_exe(program)if program_path:return program_pathelse:for path in get_path_list():program_path = find_exe(os.path.join(path, program))if program_path:return program_pathreturn None", "docstring": "Identify the location of an executable file.", "id": "f4275:m1"} {"signature": "def split_multiline(value):", "body": "return [element for element in (line.strip() for line in value.split(''))if element]", "docstring": "Split a multiline string into a list, excluding blank lines.", "id": "f4275:m2"} {"signature": "def split_elements(value):", "body": "items = [v.strip() for v in value.split('')]if len(items) == :items = value.split()return items", "docstring": "Split a string with comma or space-separated elements into a list.", "id": "f4275:m3"} {"signature": "def eval_environ(value):", "body": "def eval_environ_str(value):parts = value.split('')if len(parts) < :return valueexpr = parts[].lstrip()if not re.match(\"\"''\"\"'', expr):raise ValueError('' % expr)expr = re.sub(r\"\", r\"\", expr)return parts[] if eval(expr) else ''if isinstance(value, list):new_value = []for element in value:element = eval_environ_str(element)if element:new_value.append(element)elif isinstance(value, str):new_value = eval_environ_str(value)else:new_value = valuereturn new_value", "docstring": "Evaluate environment markers.", "id": "f4275:m4"} {"signature": "def get_cfg_value(config, section, option):", "body": "try:value = config[section][option]except KeyError:if (section, option) in MULTI_OPTIONS:return []else:return ''if (section, option) in MULTI_OPTIONS:value = split_multiline(value)if (section, option) in ENVIRON_OPTIONS:value = eval_environ(value)return value", "docstring": "Get configuration value.", "id": "f4275:m5"} {"signature": "def set_cfg_value(config, section, option, value):", "body": "if isinstance(value, list):value = ''.join(value)config[section][option] = value", "docstring": "Set configuration value.", "id": "f4275:m6"} {"signature": "def cfg_to_args(config):", "body": "kwargs = {}opts_to_args = {'': [('', ''),('', ''),('', ''),('', ''),('', ''),('', ''),('', ''),('', ''),('', ''),('', ''),('', ''),('', ''),('', ''),],'': [('', ''),('', ''),('', ''),('', ''),('', ''),('', ''),],}opts_to_args[''].append(('', ''))if IS_PY2K and not which(''):kwargs[''] = ['']kwargs[''] = Falsefor section in opts_to_args:for option, argname in opts_to_args[section]:value = get_cfg_value(config, section, option)if value:kwargs[argname] = valueif '' not in kwargs:kwargs[''] = read_description_file(config)if '' in kwargs:kwargs[''] = {'': kwargs['']}if '' in kwargs:kwargs[''] = split_elements(kwargs[''])if '' in kwargs:kwargs[''] = get_package_data(kwargs[''])if '' in kwargs:kwargs[''] = get_data_files(kwargs[''])kwargs[''] = get_version()if not IS_PY2K:kwargs[''] = ''return kwargs", "docstring": "Compatibility helper to use setup.cfg in setup.py.", "id": "f4275:m10"} {"signature": "def run_3to2(args=None):", "body": "args = BASE_ARGS_3TO2 if args is None else BASE_ARGS_3TO2 + argstry:proc = subprocess.Popen([''] + args, stderr=subprocess.PIPE)except OSError:for path in glob.glob(''):if os.path.isdir(path) and path not in sys.path:sys.path.append(path)try:from lib3to2.main import main as lib3to2_mainexcept ImportError:raise OSError('')else:if lib3to2_main('', args):raise Exception('')else:num_errors = while proc.poll() is None:line = proc.stderr.readline()sys.stderr.write(line)num_errors += line.count('')if proc.returncode or num_errors:raise Exception('')", "docstring": "Convert Python files using lib3to2.", "id": "f4275:m11"} {"signature": "def write_py2k_header(file_list):", "body": "if not isinstance(file_list, list):file_list = [file_list]python_re = re.compile(br\"\")coding_re = re.compile(br\"\")new_line_re = re.compile(br\"\")version_3 = LooseVersion('')for file in file_list:if not os.path.getsize(file):continuerewrite_needed = Falsepython_found = Falsecoding_found = Falselines = []f = open(file, '')try:while len(lines) < :line = f.readline()match = python_re.match(line)if match:python_found = Trueversion = LooseVersion(match.group().decode() or '')try:version_test = version >= version_3except TypeError:version_test = Trueif version_test:line = python_re.sub(br\"\", line)rewrite_needed = Trueelif coding_re.search(line):coding_found = Truelines.append(line)if not coding_found:match = new_line_re.search(lines[])newline = match.group() if match else b\"\"line = b\"\" + newlinelines.insert( if python_found else , line)rewrite_needed = Trueif rewrite_needed:lines += f.readlines()finally:f.close()if rewrite_needed:f = open(file, '')try:f.writelines(lines)finally:f.close()", "docstring": "Write Python 2 shebang and add encoding cookie if needed.", "id": "f4275:m12"} {"signature": "def default_hook(config):", "body": "if (any(arg.startswith('') for arg in sys.argv) andos.path.isdir(PY2K_DIR) != IS_PY2K and os.path.isdir(LIB_DIR)):shutil.rmtree(LIB_DIR)if IS_PY2K and any(arg.startswith(('', '', '', ''))for arg in sys.argv):generate_py2k(config)packages_root = get_cfg_value(config, '', '')packages_root = os.path.join(PY2K_DIR, packages_root)set_cfg_value(config, '', '', packages_root)", "docstring": "Default setup hook.", "id": "f4275:m16"} {"signature": "def main():", "body": "config = load_config()run_setup_hooks(config)setup(**cfg_to_args(config))", "docstring": "Running with distutils or setuptools.", "id": "f4275:m18"} {"signature": "def _python_cmd(*args):", "body": "args = (sys.executable,) + argsreturn subprocess.call(args) == ", "docstring": "Return True if the command succeeded.", "id": "f4276:m0"} {"signature": "def get_zip_class():", "body": "class ContextualZipFile(zipfile.ZipFile):def __enter__(self):return selfdef __exit__(self, type, value, traceback):self.closereturn zipfile.ZipFile if hasattr(zipfile.ZipFile, '') elseContextualZipFile", "docstring": "Supplement ZipFile class to support context manager for Python 2.6", "id": "f4276:m3"} {"signature": "def _clean_check(cmd, target):", "body": "try:subprocess.check_call(cmd)except subprocess.CalledProcessError:if os.access(target, os.F_OK):os.unlink(target)raise", "docstring": "Run the command to download target. If the command fails, clean up before\nre-raising the error.", "id": "f4276:m7"} {"signature": "def download_file_powershell(url, target):", "body": "target = os.path.abspath(target)cmd = ['','',\"\" % vars(),]_clean_check(cmd, target)", "docstring": "Download the file at url to target using Powershell (which will validate\ntrust). Raise an exception if the command cannot complete.", "id": "f4276:m8"} {"signature": "def download_file_insecure(url, target):", "body": "try:from urllib.request import urlopenexcept ImportError:from urllib2 import urlopensrc = dst = Nonetry:src = urlopen(url)data = src.read()dst = open(target, \"\")dst.write(data)finally:if src:src.close()if dst:dst.close()", "docstring": "Use Python to download the file, even though it cannot authenticate the\nconnection.", "id": "f4276:m14"} {"signature": "def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,to_dir=os.curdir, delay=, downloader_factory=get_best_downloader):", "body": "to_dir = os.path.abspath(to_dir)zip_name = \"\" % versionurl = download_base + zip_namesaveto = os.path.join(to_dir, zip_name)if not os.path.exists(saveto): log.warn(\"\", url)downloader = downloader_factory()downloader(url, saveto)return os.path.realpath(saveto)", "docstring": "Download setuptools from a specified location and return its filename\n\n`version` should be a valid setuptools version number that is available\nas an egg for download under the `download_base` URL (which should end\nwith a '/'). `to_dir` is the directory where the egg will be downloaded.\n`delay` is the number of seconds to pause before an actual download\nattempt.\n\n``downloader_factory`` should be a function taking no arguments and\nreturning a function for downloading a URL to a target.", "id": "f4276:m16"} {"signature": "def _build_install_args(options):", "body": "return [''] if options.user_install else []", "docstring": "Build the arguments to 'python setup.py install' on the setuptools package", "id": "f4276:m17"} {"signature": "def _parse_args():", "body": "parser = optparse.OptionParser()parser.add_option('', dest='', action='', default=False,help='')parser.add_option('', dest='', metavar=\"\",default=DEFAULT_URL,help='')parser.add_option('', dest='', action='',const=lambda: download_file_insecure, default=get_best_downloader,help='')parser.add_option('', help=\"\",default=DEFAULT_VERSION,)options, args = parser.parse_args()return options", "docstring": "Parse the command line for options", "id": "f4276:m18"} {"signature": "def main():", "body": "options = _parse_args()archive = download_setuptools(version=options.version,download_base=options.download_base,downloader_factory=options.downloader_factory,)return _install(archive, _build_install_args(options))", "docstring": "Install or upgrade setuptools and EasyInstall", "id": "f4276:m19"} {"signature": "def is_aware(value):", "body": "return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None", "docstring": "Determines if a given datetime.datetime is aware.\n\nThe logic is described in Python's docs:\nhttp://docs.python.org/library/datetime.html#datetime.tzinfo", "id": "f4281:m1"} {"signature": "def is_naive(value):", "body": "return value.tzinfo is None or value.tzinfo.utcoffset(value) is None", "docstring": "Determines if a given datetime.datetime is naive.\n\nThe logic is described in Python's docs:\nhttp://docs.python.org/library/datetime.html#datetime.tzinfo", "id": "f4281:m2"} {"signature": "def make_aware(value, timezone):", "body": "if hasattr(timezone, '') and value not in (datetime.datetime.min, datetime.datetime.max):return timezone.localize(value, is_dst=None)else:return value.replace(tzinfo=timezone)", "docstring": "Makes a naive datetime.datetime in a given time zone aware.", "id": "f4281:m3"} {"signature": "def make_naive(value, timezone):", "body": "value = value.astimezone(timezone)if hasattr(timezone, ''):value = timezone.normalize(value)return value.replace(tzinfo=None)", "docstring": "Makes an aware datetime.datetime naive in a given time zone.", "id": "f4281:m4"} {"signature": "@staticmethoddef from_element(root, timezone):", "body": "assert root.tag == ''if root.xpath(''):return _ScheduleIntervals(root, timezone)elif root.xpath(''):return _ScheduleRecurring(root, timezone)raise NotImplementedError", "docstring": "Return a Schedule object based on an lxml Element for the \n tag. timezone is a tzinfo object, ideally from pytz.", "id": "f4285:c0:m1"} {"signature": "def to_timezone(self, dt):", "body": "if timezone.is_aware(dt):return dt.astimezone(self.timezone)else:return timezone.make_aware(dt, self.timezone)", "docstring": "Converts a datetime to the timezone of this Schedule.", "id": "f4285:c0:m2"} {"signature": "def intervals(self, range_start=datetime.datetime.min, range_end=datetime.datetime.max):", "body": "raise NotImplementedError", "docstring": "Returns a list of tuples of start/end datetimes for when the schedule\n is active during the provided range.", "id": "f4285:c0:m3"} {"signature": "def next_interval(self, after=None):", "body": "if after is None:after = timezone.now()after = self.to_timezone(after)return next(self.intervals(range_start=after), None)", "docstring": "Returns the next Period this event is in effect, or None if the event\n has no remaining periods.", "id": "f4285:c0:m4"} {"signature": "def active_within_range(self, query_start, query_end):", "body": "return any(self.intervals(query_start, query_end))", "docstring": "Is the event ever active within the given range?", "id": "f4285:c0:m5"} {"signature": "def includes(self, query):", "body": "query = self.to_timezone(query)return any(self.intervals(range_start=query, range_end=query))", "docstring": "Does this schedule include the provided time?\n query should be a datetime (naive or timezone-aware)", "id": "f4285:c0:m6"} {"signature": "def has_remaining_intervals(self, after=None):", "body": "return bool(self.next_interval(after))", "docstring": "Is this schedule ever in effect at or after the given time?\n If no time is given, uses the current time.", "id": "f4285:c0:m7"} {"signature": "@property@memoize_methoddef exceptions(self):", "body": "ex = {}for sd in self.root.xpath(''):bits = str(sd.text).split('')date = text_to_date(bits.pop())ex.setdefault(date, []).extend([_time_text_to_period(t)for t in bits])return ex", "docstring": "A dict of dates -> [Period time tuples] representing exceptions\n to the base recurrence pattern.", "id": "f4285:c2:m1"} {"signature": "def exception_periods(self, range_start=datetime.date.min, range_end=datetime.date.max):", "body": "periods = []for exception_date, exception_times in self.exceptions.items():if exception_date >= range_start and exception_date <= range_end:for exception_time in exception_times:periods.append(Period(self.timezone.localize(datetime.datetime.combine(exception_date, exception_time.start)),self.timezone.localize(datetime.datetime.combine(exception_date, exception_time.end))))periods.sort()return periods", "docstring": "Returns a list of Period tuples for each period represented in an \n that falls between range_start and range_end.", "id": "f4285:c2:m2"} {"signature": "def includes(self, query):", "body": "query = self.to_timezone(query)query_date = query.date()query_time = query.time()specific = self.exceptions.get(query_date)if specific is not None:if len(specific) == :return Falsefor period in specific:if query_time >= period.start and query_time <= period.end:return Truereturn Falsereturn any(sched.includes(query_date, query_time) for sched in self._recurring_schedules)", "docstring": "Does this schedule include the provided time?\n query should be a datetime (naive or timezone-aware)", "id": "f4285:c2:m3"} {"signature": "def _daily_periods(self, range_start, range_end):", "body": "specific = set(self.exceptions.keys())return heapq.merge(self.exception_periods(range_start, range_end), *[sched.daily_periods(range_start=range_start, range_end=range_end, exclude_dates=specific)for sched in self._recurring_schedules])", "docstring": "Returns an iterator of Period tuples for every day this event is in effect, between range_start\n and range_end.", "id": "f4285:c2:m4"} {"signature": "def intervals(self, range_start=datetime.datetime.min, range_end=datetime.datetime.max):", "body": "current_period = Nonemax_continuous_days = range_start = self.to_timezone(range_start)range_end = self.to_timezone(range_end)for period in self._daily_periods(range_start.date(), range_end.date()):if period.end < range_start or period.start > range_end:continueif current_period is None:current_period = periodelse:if ( ((period.start < current_period.end)or (period.start - current_period.end) <= datetime.timedelta(minutes=))and (current_period.end - current_period.start) < datetime.timedelta(days=max_continuous_days)):current_period = Period(current_period.start, period.end)else:yield current_periodcurrent_period = periodif current_period:yield current_period", "docstring": "Returns an iterator of Period tuples for continuous stretches of time during\n which this event is in effect, between range_start and range_end.", "id": "f4285:c2:m5"} {"signature": "def includes(self, query_date, query_time=None):", "body": "if self.start_date and query_date < self.start_date:return Falseif self.end_date and query_date > self.end_date:return Falseif query_date.weekday() not in self.weekdays:return Falseif not query_time:return Trueif query_time >= self.period.start and query_time <= self.period.end:return Truereturn False", "docstring": "Does this schedule include the provided time?\n query_date and query_time are date and time objects, interpreted\n in this schedule's timezone", "id": "f4285:c3:m1"} {"signature": "def daily_periods(self, range_start=datetime.date.min, range_end=datetime.date.max, exclude_dates=tuple()):", "body": "tz = self.timezoneperiod = self.periodweekdays = self.weekdayscurrent_date = max(range_start, self.start_date)end_date = range_endif self.end_date:end_date = min(end_date, self.end_date)while current_date <= end_date:if current_date.weekday() in weekdays and current_date not in exclude_dates:yield Period(tz.localize(datetime.datetime.combine(current_date, period.start)),tz.localize(datetime.datetime.combine(current_date, period.end)))current_date += datetime.timedelta(days=)", "docstring": "Returns an iterator of Period tuples for every day this schedule is in effect, between range_start\n and range_end.", "id": "f4285:c3:m2"} {"signature": "@property@memoize_methoddef period(self):", "body": "start_time = self.root.findtext('')if start_time:return Period(text_to_time(start_time), text_to_time(self.root.findtext('')))return Period(datetime.time(, ), datetime.time(, ))", "docstring": "A Period tuple representing the daily start and end time.", "id": "f4285:c3:m3"} {"signature": "@propertydef weekdays(self):", "body": "if not self.root.xpath(''):return set(range())return set(int(d) - for d in self.root.xpath(''))", "docstring": "A set of integers representing the weekdays the schedule recurs on,\n with Monday = 0 and Sunday = 6.", "id": "f4285:c3:m4"} {"signature": "@property@memoize_methoddef start_date(self):", "body": "return text_to_date(self.root.findtext(''))", "docstring": "Start date of event recurrence, as datetime.date or None.", "id": "f4285:c3:m5"} {"signature": "@property@memoize_methoddef end_date(self):", "body": "return text_to_date(self.root.findtext(''))", "docstring": "End date of event recurrence, as datetime.date or None.", "id": "f4285:c3:m6"} {"signature": "def _tmdd_datetime_to_iso(dt, include_offset=True, include_seconds=True):", "body": "datestring = dt.findtext('')timestring = dt.findtext('')assert len(datestring) == assert len(timestring) >= iso = datestring[:] + '' + datestring[:] + '' + datestring[:] + ''+ timestring[:] + '' + timestring[:]if include_seconds:iso += '' + timestring[:]if include_offset:offset = dt.findtext('')if offset:assert len(offset) == iso += offset[:] + '' + offset[:]else:raise Exception(\"\" % etree.tostring(dt))return iso", "docstring": "dt is an xml Element with , \n... \n... \n... '''\n>>> html_parser = lxml.etree.HTMLParser()\n>>> doc = lxml.etree.fromstring(html, parser=html_parser)\n>>> doc\n\n>>> rules = {\n... \"headingcss\": \"#main\",\n... \"headingxpath\": \"//h1[@id='main']\"\n... }\n>>> p = parslepy.Parselet(rules)\n>>> p.extract(doc)\n{'headingcss': u'What\\u2019s new', 'headingxpath': u'What\\u2019s new'}", "id": "f9180:c4:m10"} {"signature": "def _extract(self, parselet_node, document, level=):", "body": "if self.DEBUG:debug_offset = \"\".join([\"\" for x in range(level)])if isinstance(parselet_node, ParsleyNode):output = {}for ctx, v in list(parselet_node.items()):if self.DEBUG:print(debug_offset, \"\", ctx, v)extracted=Nonetry:if ctx.scope:extracted = []selected = self.selector_handler.select(document, ctx.scope)if selected:for i, elem in enumerate(selected, start=):parse_result = self._extract(v, elem, level=level+)if isinstance(parse_result, (list, tuple)):extracted.extend(parse_result)else:extracted.append(parse_result)if not ctx.iterate:breakif self.DEBUG:print(debug_offset,\"\" % (i, ctx.scope))else:extracted = self._extract(v, document, level=level+)except NonMatchingNonOptionalKey as e:if self.DEBUG:print(debug_offset, str(e))if not ctx.required or not self.STRICT_MODE:output[ctx.key] = {}else:raiseexcept Exception as e:if self.DEBUG:print(str(e))raiseif ( isinstance(extracted, list)and not extractedand not ctx.iterate):extracted = {}if self.KEEP_ONLY_FIRST_ELEMENT_IF_LIST:try:if ( isinstance(extracted, list)and extractedand not ctx.iterate):if self.DEBUG:print(debug_offset, \"\")extracted = extracted[]except Exception as e:if self.DEBUG:print(str(e))print(debug_offset, \"\")if ( self.STRICT_MODEand ctx.requiredand extracted is None):raise NonMatchingNonOptionalKey('' % (ctx.key,document.getroottree().getpath(document),v))if ctx.key == self.SPECIAL_LEVEL_KEY:if isinstance(extracted, dict):output.update(extracted)elif isinstance(extracted, list):if extracted:raise RuntimeError(\"\")else:passelse:if extracted is not None:output[ctx.key] = extractedelse:passreturn outputelif isinstance(parselet_node, Selector):return self.selector_handler.extract(document, parselet_node)else:pass", "docstring": "Extract values at this document node level\nusing the parselet_node instructions:\n- go deeper in tree\n- or call selector handler in case of a terminal selector leaf", "id": "f9180:c4:m11"} {"signature": "def keys(self):", "body": "return self._keys(self.parselet_tree)", "docstring": "Return a list of 1st level keys of the output data model\n\n>>> import parslepy\n>>> rules = {\n... \"headingcss\": \"#main\",\n... \"headingxpath\": \"//h1[@id='main']\"\n... }\n>>> p = parslepy.Parselet(rules)\n>>> sorted(p.keys())\n['headingcss', 'headingxpath']", "id": "f9180:c4:m12"} {"signature": "def make(self, selection_string):", "body": "raise NotImplementedError", "docstring": "Interpret a selection_string as a selector\nfor elements or element attributes in a (semi-)structured document.\nIn case of XPath selectors, this can also be a function call.\n\n:param selection_string: a string representing a selector\n:rtype: :class:`.Selector`", "id": "f9183:c1:m1"} {"signature": "def select(self, document, selector):", "body": "raise NotImplementedError", "docstring": "Apply the selector on the document\n\n:param document: lxml-parsed document\n:param selector: input :class:`.Selector` to apply on the document\n:rtype: lxml.etree.Element list", "id": "f9183:c1:m2"} {"signature": "def extract(self, document, selector):", "body": "raise NotImplementedError", "docstring": "Apply the selector on the document\nand return a value for the matching elements (text content or\nelement attributes)\n\n:param document: lxml-parsed document\n:param selector: input :class:`.Selector` to apply on the document\n:rtype: depends on the selector (string, boolean value, ...)\n\nReturn value can be single- or multi-valued.", "id": "f9183:c1:m3"} {"signature": "def __init__(self, namespaces=None, extensions=None, context=None, debug=False):", "body": "super(XPathSelectorHandler, self).__init__(debug=debug)self.namespaces = copy.copy(self.EXSLT_NAMESPACES)self._add_parsley_ns(self.namespaces)self.extensions = copy.copy(self.LOCAL_XPATH_EXTENSIONS)self._user_extensions = Noneself.context = contextif namespaces:self.namespaces.update(namespaces)if extensions:self._user_extensions = extensionsself._process_extensions(extensions)self._set_smart_strings_regexps()", "docstring": ":param namespaces: namespace mapping as :class:`dict`\n:param extensions: extension :class:`dict`\n:param context: user-context passed to XPath extension functions\n\n`namespaces` and `extensions` dicts should have the same format\nas for `lxml`_:\nsee http://lxml.de/xpathxslt.html#namespaces-and-prefixes\nand ``_\n\nExtension functions have a slightly different signature than\npure-lxml extension functions: they must expect a user-context\nas first argument; all other arguments are the same as for\n`lxml` extensions.\n\n`context` will be passed as first argument to extension functions\nregistered through `extensions`.\nAlternative: user-context can also be passed to :meth:`parslepy.base.Parselet.parse`", "id": "f9183:c2:m0"} {"signature": "@classmethoddef _add_parsley_ns(cls, namespace_dict):", "body": "namespace_dict.update({'' : cls.LOCAL_NAMESPACE,'' : cls.LOCAL_NAMESPACE,})return namespace_dict", "docstring": "Extend XPath evaluation with Parsley extensions' namespace", "id": "f9183:c2:m6"} {"signature": "def make(self, selection):", "body": "cached = self._selector_cache.get(selection)if cached:return cachedtry:selector = lxml.etree.XPath(selection,namespaces = self.namespaces,extensions = self.extensions,smart_strings=(self.SMART_STRINGSor self._test_smart_strings_needed(selection)),)except lxml.etree.XPathSyntaxError as syntax_error:syntax_error.msg += \"\" % selectionraise syntax_errorexcept Exception as e:if self.DEBUG:print(repr(e), selection)raiseself._selector_cache[selection] = Selector(selector)return self._selector_cache[selection]", "docstring": "XPath expression can also use EXSLT functions (as long as they are\nunderstood by libxslt)", "id": "f9183:c2:m7"} {"signature": "def extract(self, document, selector, debug_offset=''):", "body": "selected = self.select(document, selector)if selected is not None:if isinstance(selected, (list, tuple)):if not len(selected):returnreturn [self._extract_single(m) for m in selected]else:return self._extract_single(selected)else:if self.DEBUG:print(debug_offset, \"\")return None", "docstring": "Try and convert matching Elements to unicode strings.\n\nIf this fails, the selector evaluation probably already\nreturned some string(s) of some sort, or boolean value,\nor int/float, so return that instead.", "id": "f9183:c2:m9"} {"signature": "def _default_element_extract(self, element):", "body": "return parslepy.funcs.extract_text(element)", "docstring": "Overridable method to change how matching Elements\nare represented in output", "id": "f9183:c2:m10"} {"signature": "def make(self, selection):", "body": "cached = self._selector_cache.get(selection)if cached:return cachednamespaces = self.EXSLT_NAMESPACESself._add_parsley_ns(namespaces)try:m = self.REGEX_ENDING_ATTRIBUTE.match(selection)if m:cssxpath = css_to_xpath(m.group(\"\"))attribute = m.group(\"\").replace('', '')cssxpath = \"\" % (cssxpath, attribute)else:cssxpath = css_to_xpath(selection)selector = lxml.etree.XPath(cssxpath,namespaces = self.namespaces,extensions = self.extensions,smart_strings=(self.SMART_STRINGSor self._test_smart_strings_needed(selection)),)except tuple(self.CSSSELECT_SYNTAXERROR_EXCEPTIONS) as syntax_error:if self.DEBUG:print(repr(syntax_error), selection)print(\"\")try:selector = lxml.etree.XPath(selection,namespaces = self.namespaces,extensions = self.extensions,smart_strings=(self.SMART_STRINGSor self._test_smart_strings_needed(selection)),)except lxml.etree.XPathSyntaxError as syntax_error:syntax_error.msg += \"\" % selectionraise syntax_errorexcept Exception as e:if self.DEBUG:print(repr(e), selection)raiseexcept lxml.etree.XPathSyntaxError as syntax_error:syntax_error.msg += \"\" % selectionraise syntax_errorexcept Exception as e:if self.DEBUG:print(repr(e), selection)raiseself._selector_cache[selection] = Selector(selector)return self._selector_cache[selection]", "docstring": "Scopes and selectors are tested in this order:\n* is this a CSS selector with an appended @something attribute?\n* is this a regular CSS selector?\n* is this an XPath expression?\n\nXPath expression can also use EXSLT functions (as long as they are\nunderstood by libxslt)", "id": "f9183:c3:m0"} {"signature": "def _check_template():", "body": "extended = \"\"\"\"\"\"rendered = render_template_string(extended,pid=dict(pid_value=None),record=dict())assert '' in renderedassert '' in renderedassert '' in renderedassert '' in renderedassert '' in renderedassert '' in renderedassert '' in rendered", "docstring": "Check template.", "id": "f9188:m0"} {"signature": "def object_as_dict(obj):", "body": "return {c.key: getattr(obj, c.key)for c in inspect(obj).mapper.column_attrs}", "docstring": "Make a dict from SQLAlchemy object.", "id": "f9190:m0"} {"signature": "def stringio_to_base64(stringio_obj):", "body": "return binascii.b2a_base64(stringio_obj.getvalue())", "docstring": "Get base64 encoded version of a BytesIO object.", "id": "f9191:m0"} {"signature": "def make_file_fixture(filename, base64_file):", "body": "fp = BytesIO(binascii.a2b_base64(base64_file))return fp, filename", "docstring": "Generate a file fixture suitable for use with the Flask test client.\n\n :param base64_file: A string encoding a file in base64. Use\n file_to_base64() to get the base64 encoding of a file. If not provided\n a PDF file be generated instead, including", "id": "f9191:m1"} {"signature": "def make_pdf_fixture(filename, text=None):", "body": "if text is None:text = \"\" % filenamefrom reportlab.pdfgen import canvasoutput = BytesIO()c = canvas.Canvas(output)c.drawString(, , text)c.showPage()c.save()return make_file_fixture(filename, stringio_to_base64(output))", "docstring": "Generate a PDF fixture.\n\n It's suitable for use with Werkzeug test client and Flask test request\n context.\n Use of this function requires that reportlab have been installed.\n\n :param filename: Desired filename.\n :param text: Text to include in PDF. Defaults to \"Filename: \", if\n not specified.", "id": "f9191:m2"} {"signature": "def fill_oauth2_headers(json_headers, token):", "body": "headers = deepcopy(json_headers)headers.append(('', ''.format(token.access_token)))return headers", "docstring": "Create authentication headers (with a valid oauth2 token).", "id": "f9191:m3"} {"signature": "@classmethoddef create(cls, object_type=None, object_uuid=None, **kwargs):", "body": "assert '' in kwargskwargs.setdefault('', cls.default_status)return super(DepositProvider, cls).create(object_type=object_type, object_uuid=object_uuid, **kwargs)", "docstring": "Create a new deposit identifier.\n\n :param object_type: The object type (Default: ``None``)\n :param object_uuid: The object UUID (Default: ``None``)\n :param kwargs: It contains the pid value.", "id": "f9195:c0:m0"} {"signature": "def deposit_minter(record_uuid, data):", "body": "provider = DepositProvider.create(object_type='',object_uuid=record_uuid,pid_value=uuid.uuid4().hex,)data[''] = {'': provider.pid.pid_value,'': '',}return provider.pid", "docstring": "Mint a deposit identifier.\n\n A PID with the following characteristics is created:\n\n .. code-block:: python\n\n {\n \"object_type\": \"rec\",\n \"object_uuid\": record_uuid,\n \"pid_value\": \"\",\n \"pid_type\": \"depid\",\n }\n\n The following deposit meta information are updated:\n\n .. code-block:: python\n\n deposit['_deposit'] = {\n \"id\": \"\",\n \"status\": \"draft\",\n }\n\n :param record_uuid: Record UUID.\n :param data: Record content.\n :returns: A :class:`invenio_pidstore.models.PersistentIdentifier` object.", "id": "f9197:m0"} {"signature": "def json_serializer(pid, data, *args):", "body": "if data is not None:response = Response(json.dumps(data.dumps()),mimetype='')else:response = Response(mimetype='')return response", "docstring": "Build a JSON Flask response using the given data.\n\n :param pid: The `invenio_pidstore.models.PersistentIdentifier` of the\n record.\n :param data: The record metadata.\n :returns: A Flask response with JSON data.\n :rtype: :py:class:`flask.Response`.", "id": "f9202:m0"} {"signature": "def file_serializer(obj):", "body": "return {\"\": str(obj.file_id),\"\": obj.key,\"\": obj.file.size,\"\": obj.file.checksum,}", "docstring": "Serialize a object.\n\n :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance.\n :returns: A dictionary with the fields to serialize.", "id": "f9202:m1"} {"signature": "def json_file_serializer(obj, status=None):", "body": "return make_response(jsonify(file_serializer(obj)), status)", "docstring": "JSON File Serializer.\n\n :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance.\n :param status: A HTTP Status. (Default: ``None``)\n :returns: A Flask response with JSON data.\n :rtype: :py:class:`flask.Response`.", "id": "f9202:m2"} {"signature": "def json_files_serializer(objs, status=None):", "body": "files = [file_serializer(obj) for obj in objs]return make_response(json.dumps(files), status)", "docstring": "JSON Files Serializer.\n\n :parma objs: A list of:class:`invenio_files_rest.models.ObjectVersion`\n instances.\n :param status: A HTTP Status. (Default: ``None``)\n :returns: A Flask response with JSON data.\n :rtype: :py:class:`flask.Response`.", "id": "f9202:m3"} {"signature": "def json_file_response(obj=None, pid=None, record=None, status=None):", "body": "from invenio_records_files.api import FilesIteratorif isinstance(obj, FilesIterator):return json_files_serializer(obj, status=status)else:return json_file_serializer(obj, status=status)", "docstring": "JSON Files/File serializer.\n\n :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance or\n a :class:`invenio_records_files.api.FilesIterator` if it's a list of\n files.\n :param pid: PID value. (not used)\n :param record: The record metadata. (not used)\n :param status: The HTTP status code.\n :returns: A Flask response with JSON data.\n :rtype: :py:class:`flask.Response`.", "id": "f9202:m4"} {"signature": "def process_minter(value):", "body": "try:return current_pidstore.minters[value]except KeyError:raise click.BadParameter(''.format(value, ''.join(current_pidstore.minters.keys())))", "docstring": "Load minter from PIDStore registry based on given value.\n\n :param value: Name of the minter.\n :returns: The minter.", "id": "f9204:m0"} {"signature": "def process_schema(value):", "body": "schemas = current_app.extensions[''].schemastry:return schemas[value]except KeyError:raise click.BadParameter(''.format(value, ''.join(schemas.keys())))", "docstring": "Load schema from JSONSchema registry based on given value.\n\n :param value: Schema path, relative to the directory when it was\n registered.\n :returns: The schema absolute path.", "id": "f9204:m1"} {"signature": "@click.group()def deposit():", "body": "", "docstring": "Deposit management commands.", "id": "f9204:m2"} {"signature": "@deposit.command()@click.argument('')@with_appcontextdef schema(source):", "body": "click.echo(process_schema(source))", "docstring": "Create deposit schema from an existing schema.", "id": "f9204:m3"} {"signature": "@deposit.command()@click.argument('', type=click.File(''), default=sys.stdin)@click.option('', '', '', multiple=True)@click.option('', is_flag=True, default=False)@with_appcontextdef create(source, ids, force, pid_minter=None):", "body": "", "docstring": "Create new deposit.", "id": "f9204:m4"} {"signature": "@deposit.command()@click.option('', '', '', multiple=True)def publish(ids):", "body": "", "docstring": "Publish selected deposits.", "id": "f9204:m5"} {"signature": "@deposit.command()@click.option('', '', '', multiple=True)def edit(ids):", "body": "", "docstring": "Make selected deposits editable.", "id": "f9204:m6"} {"signature": "@deposit.command()@click.option('', '', '', multiple=True)def discard(ids):", "body": "", "docstring": "Discard selected deposits.", "id": "f9204:m7"} {"signature": "def __init__(self, app):", "body": "self.app = app", "docstring": "Initialize state.", "id": "f9205:c0:m0"} {"signature": "@cached_propertydef jsonschemas(self):", "body": "_jsonschemas = {k: v['']for k, v in self.app.config[''].items()if '' in v}return defaultdict(lambda: self.app.config[''], _jsonschemas)", "docstring": "Load deposit JSON schemas.", "id": "f9205:c0:m1"} {"signature": "@cached_propertydef schemaforms(self):", "body": "_schemaforms = {k: v['']for k, v in self.app.config[''].items()if '' in v}return defaultdict(lambda: self.app.config[''], _schemaforms)", "docstring": "Load deposit schema forms.", "id": "f9205:c0:m2"} {"signature": "def __init__(self, app=None):", "body": "if app:self.init_app(app)", "docstring": "Extension initialization.", "id": "f9205:c1:m0"} {"signature": "def init_app(self, app):", "body": "self.init_config(app)app.register_blueprint(ui.create_blueprint(app.config['']))app.extensions[''] = _DepositState(app)if app.config['']:post_action.connect(index_deposit_after_publish, sender=app,weak=False)", "docstring": "Flask application initialization.\n\n Initialize the UI endpoints. Connect all signals if\n `DEPOSIT_REGISTER_SIGNALS` is ``True``.\n\n :param app: An instance of :class:`flask.Flask`.", "id": "f9205:c1:m1"} {"signature": "def init_config(self, app):", "body": "app.config.setdefault('',app.config.get('',''))for k in dir(config):if k.startswith(''):app.config.setdefault(k, getattr(config, k))", "docstring": "Initialize configuration.\n\n :param app: An instance of :class:`flask.Flask`.", "id": "f9205:c1:m2"} {"signature": "def __init__(self, app=None):", "body": "if app:self.init_app(app)", "docstring": "Extension initialization.\n\n :param app: An instance of :class:`flask.Flask`.", "id": "f9205:c2:m0"} {"signature": "def init_app(self, app):", "body": "self.init_config(app)blueprint = rest.create_blueprint(app.config[''])@app.before_first_requestdef extend_default_endpoint_prefixes():\"\"\"\"\"\"endpoint_prefixes = utils.build_default_endpoint_prefixes(dict(app.config['']))current_records_rest = app.extensions['']overlap = set(endpoint_prefixes.keys()) & set(current_records_rest.default_endpoint_prefixes)if overlap:raise RuntimeError(''.format(''.join(overlap)))current_records_rest.default_endpoint_prefixes.update(endpoint_prefixes)app.register_blueprint(blueprint)app.extensions[''] = _DepositState(app)if app.config['']:post_action.connect(index_deposit_after_publish, sender=app,weak=False)", "docstring": "Flask application initialization.\n\n Initialize the REST endpoints. Connect all signals if\n `DEPOSIT_REGISTER_SIGNALS` is True.\n\n :param app: An instance of :class:`flask.Flask`.", "id": "f9205:c2:m1"} {"signature": "def init_config(self, app):", "body": "for k in dir(config):if k.startswith(''):app.config.setdefault(k, getattr(config, k))", "docstring": "Initialize configuration.\n\n :param app: An instance of :class:`flask.Flask`.", "id": "f9205:c2:m2"} {"signature": "def deposits_filter():", "body": "if not has_request_context() or admin_permission_factory().can():return Q()else:return Q('', **{'': getattr(current_user, '', )})", "docstring": "Filter list of deposits.\n\n Permit to the user to see all if:\n\n * The user is an admin (see\n func:`invenio_deposit.permissions:admin_permission_factory`).\n\n * It's called outside of a request.\n\n Otherwise, it filters out any deposit where user is not the owner.", "id": "f9206:m0"} {"signature": "def index_deposit_after_publish(sender, action=None, pid=None, deposit=None):", "body": "if action == '':_, record = deposit.fetch_published()index_record.delay(str(record.id))", "docstring": "Index the record after publishing.\n\n .. note:: if the record is not published, it doesn't index.\n\n :param sender: Who send the signal.\n :param action: Action executed by the sender. (Default: ``None``)\n :param pid: PID object. (Default: ``None``)\n :param deposit: Deposit object. (Default: ``None``)", "id": "f9210:m0"} {"signature": "def __init__(self, id_, *args, **kwargs):", "body": "super(DepositScope, self).__init__(id_=''.format(id_),group='', *args, **kwargs)", "docstring": "Define the scope.", "id": "f9211:c0:m0"} {"signature": "def mark_as_action(f):", "body": "f.__deposit_action__ = Truereturn f", "docstring": "Decorator for marking method as deposit action.\n\n Allows creation of new REST API action on ``Deposit`` subclass.\n Following example shows possible `cloning` of a deposit instance.\n\n .. code-block:: python\n\n from invenio_deposit.api import Deposit\n\n class CustomDeposit(Deposit):\n @mark_as_action\n def clone(self, pid=None):\n new_bucket = self.files.bucket.clone()\n new_deposit = Deposit.create(self.dumps())\n new_deposit.files.bucket = new_bucket\n new_deposit.commit()\n\n @mark_as_action\n def edit(self, pid=None):\n # Extend existing action.\n self['_last_editor'] = current_user.get_id()\n return super(CustomDeposit, self).edit(pid=pid)\n\n # Disable publish action from REST API.\n def publish(self, pid=None):\n return super(CustomDeposit, self).publish(pid=pid)\n\n :param f: Decorated method.", "id": "f9212:m0"} {"signature": "def extract_actions_from_class(record_class):", "body": "for name in dir(record_class):method = getattr(record_class, name, None)if method and getattr(method, '', False):yield method.__name__", "docstring": "Extract actions from class.", "id": "f9212:m1"} {"signature": "def check_oauth2_scope(can_method, *myscopes):", "body": "def check(record, *args, **kwargs):@require_api_auth()@require_oauth_scopes(*myscopes)def can(self):return can_method(record)return type('', (), {'': can})()return check", "docstring": "Base permission factory that check OAuth2 scope and can_method.\n\n :param can_method: Permission check function that accept a record in input\n and return a boolean.\n :param myscopes: List of scopes required to permit the access.\n :returns: A :class:`flask_principal.Permission` factory.", "id": "f9212:m2"} {"signature": "def can_elasticsearch(record):", "body": "search = request._methodview.search_class()search = search.get_record(str(record.id))return search.count() == ", "docstring": "Check if a given record is indexed.\n\n :param record: A record object.\n :returns: If the record is indexed returns `True`, otherwise `False`.", "id": "f9212:m3"} {"signature": "def index(method=None, delete=False):", "body": "if method is None:return partial(index, delete=delete)@wraps(method)def wrapper(self_or_cls, *args, **kwargs):\"\"\"\"\"\"result = method(self_or_cls, *args, **kwargs)try:if delete:self_or_cls.indexer.delete(result)else:self_or_cls.indexer.index(result)except RequestError:current_app.logger.exception(''.format(result))return resultreturn wrapper", "docstring": "Decorator to update index.\n\n :param method: Function wrapped. (Default: ``None``)\n :param delete: If `True` delete the indexed record. (Default: ``None``)", "id": "f9213:m0"} {"signature": "def has_status(method=None, status=''):", "body": "if method is None:return partial(has_status, status=status)@wraps(method)def wrapper(self, *args, **kwargs):\"\"\"\"\"\"if status != self.status:raise PIDInvalidAction()return method(self, *args, **kwargs)return wrapper", "docstring": "Check that deposit has a defined status (default: draft).\n\n :param method: Function executed if record has a defined status.\n (Default: ``None``)\n :param status: Defined status to check. (Default: ``'draft'``)", "id": "f9213:m1"} {"signature": "def preserve(method=None, result=True, fields=None):", "body": "if method is None:return partial(preserve, result=result, fields=fields)fields = fields or ('', )@wraps(method)def wrapper(self, *args, **kwargs):\"\"\"\"\"\"data = {field: self[field] for field in fields if field in self}result_ = method(self, *args, **kwargs)replace = result_ if result else selffor field in data:replace[field] = data[field]return result_return wrapper", "docstring": "Preserve fields in deposit.\n\n :param method: Function to execute. (Default: ``None``)\n :param result: If `True` returns the result of method execution,\n otherwise `self`. (Default: ``True``)\n :param fields: List of fields to preserve (default: ``('_deposit',)``).", "id": "f9213:m2"} {"signature": "@propertydef pid(self):", "body": "pid = self.deposit_fetcher(self.id, self)return PersistentIdentifier.get(pid.pid_type,pid.pid_value)", "docstring": "Return an instance of deposit PID.", "id": "f9213:c0:m0"} {"signature": "@propertydef record_schema(self):", "body": "schema_path = current_jsonschemas.url_to_path(self[''])schema_prefix = current_app.config['']if schema_path and schema_path.startswith(schema_prefix):return current_jsonschemas.path_to_url(schema_path[len(schema_prefix):])", "docstring": "Convert deposit schema to a valid record schema.", "id": "f9213:c0:m1"} {"signature": "def build_deposit_schema(self, record):", "body": "schema_path = current_jsonschemas.url_to_path(record[''])schema_prefix = current_app.config['']if schema_path:return current_jsonschemas.path_to_url(schema_prefix + schema_path)", "docstring": "Convert record schema to a valid deposit schema.\n\n :param record: The record used to build deposit schema.\n :returns: The absolute URL to the schema or `None`.", "id": "f9213:c0:m2"} {"signature": "def fetch_published(self):", "body": "pid_type = self['']['']['']pid_value = self['']['']['']resolver = Resolver(pid_type=pid_type, object_type='',getter=partial(self.published_record_class.get_record,with_deleted=True))return resolver.resolve(pid_value)", "docstring": "Return a tuple with PID and published record.", "id": "f9213:c0:m3"} {"signature": "@preserve(fields=('', ''))def merge_with_published(self):", "body": "pid, first = self.fetch_published()lca = first.revisions[self['']['']['']]args = [lca.dumps(), first.dumps(), self.dumps()]for arg in args:del arg[''], arg['']args.append({})m = Merger(*args)try:m.run()except UnresolvedConflictsException:raise MergeConflict()return patch(m.unified_patches, lca)", "docstring": "Merge changes with latest published version.", "id": "f9213:c0:m4"} {"signature": "@indexdef commit(self, *args, **kwargs):", "body": "return super(Deposit, self).commit(*args, **kwargs)", "docstring": "Store changes on current instance in database and index it.", "id": "f9213:c0:m5"} {"signature": "@classmethod@indexdef create(cls, data, id_=None):", "body": "data.setdefault('', current_jsonschemas.path_to_url(current_app.config['']))if '' not in data:id_ = id_ or uuid.uuid4()cls.deposit_minter(id_, data)data[''].setdefault('', list())if current_user and current_user.is_authenticated:creator_id = int(current_user.get_id())if creator_id not in data['']['']:data[''][''].append(creator_id)data[''][''] = creator_idreturn super(Deposit, cls).create(data, id_=id_)", "docstring": "Create a deposit.\n\n Initialize the follow information inside the deposit:\n\n .. code-block:: python\n\n deposit['_deposit'] = {\n 'id': pid_value,\n 'status': 'draft',\n 'owners': [user_id],\n 'created_by': user_id,\n }\n\n The deposit index is updated.\n\n :param data: Input dictionary to fill the deposit.\n :param id_: Default uuid for the deposit.\n :returns: The new created deposit.", "id": "f9213:c0:m6"} {"signature": "@contextmanagerdef _process_files(self, record_id, data):", "body": "if self.files:assert not self.files.bucket.lockedself.files.bucket.locked = Truesnapshot = self.files.bucket.snapshot(lock=True)data[''] = self.files.dumps(bucket=snapshot.id)yield datadb.session.add(RecordsBuckets(record_id=record_id, bucket_id=snapshot.id))else:yield data", "docstring": "Snapshot bucket and add files in record during first publishing.", "id": "f9213:c0:m7"} {"signature": "def _publish_new(self, id_=None):", "body": "minter = current_pidstore.minters[current_app.config['']]id_ = id_ or uuid.uuid4()record_pid = minter(id_, self)self[''][''] = {'': record_pid.pid_type,'': record_pid.pid_value,'': ,}data = dict(self.dumps())data[''] = self.record_schemawith self._process_files(id_, data):record = self.published_record_class.create(data, id_=id_)return record", "docstring": "Publish new deposit.\n\n :param id_: The forced record UUID.", "id": "f9213:c0:m8"} {"signature": "def _publish_edited(self):", "body": "record_pid, record = self.fetch_published()if record.revision_id == self['']['']['']:data = dict(self.dumps())else:data = self.merge_with_published()data[''] = self.record_schemadata[''] = self['']record = record.__class__(data, model=record.model)return record", "docstring": "Publish the deposit after for editing.", "id": "f9213:c0:m9"} {"signature": "@has_status@mark_as_actiondef publish(self, pid=None, id_=None):", "body": "pid = pid or self.pidif not pid.is_registered():raise PIDInvalidAction()self[''][''] = ''if self[''].get('') is None: self._publish_new(id_=id_)else: record = self._publish_edited()record.commit()self.commit()return self", "docstring": "Publish a deposit.\n\n If it's the first time:\n\n * it calls the minter and set the following meta information inside\n the deposit:\n\n .. code-block:: python\n\n deposit['_deposit'] = {\n 'type': pid_type,\n 'value': pid_value,\n 'revision_id': 0,\n }\n\n * A dump of all information inside the deposit is done.\n\n * A snapshot of the files is done.\n\n Otherwise, published the new edited version.\n In this case, if in the mainwhile someone already published a new\n version, it'll try to merge the changes with the latest version.\n\n .. note:: no need for indexing as it calls `self.commit()`.\n\n Status required: ``'draft'``.\n\n :param pid: Force the new pid value. (Default: ``None``)\n :param id_: Force the new uuid value as deposit id. (Default: ``None``)\n :returns: Returns itself.", "id": "f9213:c0:m10"} {"signature": "def _prepare_edit(self, record):", "body": "data = record.dumps()data[''][''][''] = record.revision_iddata[''][''] = ''data[''] = self.build_deposit_schema(record)return data", "docstring": "Update selected keys.\n\n :param record: The record to prepare.", "id": "f9213:c0:m11"} {"signature": "@has_status(status='')@index@mark_as_actiondef edit(self, pid=None):", "body": "pid = pid or self.pidwith db.session.begin_nested():before_record_update.send(current_app._get_current_object(), record=self)record_pid, record = self.fetch_published()assert PIDStatus.REGISTERED == record_pid.statusassert record[''] == self['']self.model.json = self._prepare_edit(record)flag_modified(self.model, '')db.session.merge(self.model)after_record_update.send(current_app._get_current_object(), record=self)return self.__class__(self.model.json, model=self.model)", "docstring": "Edit deposit.\n\n #. The signal :data:`invenio_records.signals.before_record_update`\n is sent before the edit execution.\n\n #. The following meta information are saved inside the deposit:\n\n .. code-block:: python\n\n deposit['_deposit']['pid'] = record.revision_id\n deposit['_deposit']['status'] = 'draft'\n deposit['$schema'] = deposit_schema_from_record_schema\n\n #. The signal :data:`invenio_records.signals.after_record_update` is\n sent after the edit execution.\n\n #. The deposit index is updated.\n\n Status required: `published`.\n\n .. note:: the process fails if the pid has status\n :attr:`invenio_pidstore.models.PIDStatus.REGISTERED`.\n\n :param pid: Force a pid object. (Default: ``None``)\n :returns: A new Deposit object.", "id": "f9213:c0:m12"} {"signature": "@has_status@index@mark_as_actiondef discard(self, pid=None):", "body": "pid = pid or self.pidwith db.session.begin_nested():before_record_update.send(current_app._get_current_object(), record=self)_, record = self.fetch_published()self.model.json = deepcopy(record.model.json)self.model.json[''] = self.build_deposit_schema(record)flag_modified(self.model, '')db.session.merge(self.model)after_record_update.send(current_app._get_current_object(), record=self)return self.__class__(self.model.json, model=self.model)", "docstring": "Discard deposit changes.\n\n #. The signal :data:`invenio_records.signals.before_record_update` is\n sent before the edit execution.\n\n #. It restores the last published version.\n\n #. The following meta information are saved inside the deposit:\n\n .. code-block:: python\n\n deposit['$schema'] = deposit_schema_from_record_schema\n\n #. The signal :data:`invenio_records.signals.after_record_update` is\n sent after the edit execution.\n\n #. The deposit index is updated.\n\n Status required: ``'draft'``.\n\n :param pid: Force a pid object. (Default: ``None``)\n :returns: A new Deposit object.", "id": "f9213:c0:m13"} {"signature": "@has_status@index(delete=True)def delete(self, force=True, pid=None):", "body": "pid = pid or self.pidif self[''].get(''):raise PIDInvalidAction()if pid:pid.delete()return super(Deposit, self).delete(force=force)", "docstring": "Delete deposit.\n\n Status required: ``'draft'``.\n\n :param force: Force deposit delete. (Default: ``True``)\n :param pid: Force pid object. (Default: ``None``)\n :returns: A new Deposit object.", "id": "f9213:c0:m14"} {"signature": "@has_status@preserve(result=False)def clear(self, *args, **kwargs):", "body": "super(Deposit, self).clear(*args, **kwargs)", "docstring": "Clear only drafts.\n\n Status required: ``'draft'``.\n\n Meta information inside `_deposit` are preserved.", "id": "f9213:c0:m15"} {"signature": "@has_status@preserve(result=False)def update(self, *args, **kwargs):", "body": "super(Deposit, self).update(*args, **kwargs)", "docstring": "Update only drafts.\n\n Status required: ``'draft'``.\n\n Meta information inside `_deposit` are preserved.", "id": "f9213:c0:m16"} {"signature": "@has_status@preservedef patch(self, *args, **kwargs):", "body": "return super(Deposit, self).patch(*args, **kwargs)", "docstring": "Patch only drafts.\n\n Status required: ``'draft'``.\n\n Meta information inside `_deposit` are preserved.", "id": "f9213:c0:m17"} {"signature": "def _create_bucket(self):", "body": "return Bucket.create(storage_class=current_app.config[''])", "docstring": "Override bucket creation.", "id": "f9213:c0:m18"} {"signature": "@propertydef status(self):", "body": "return self['']['']", "docstring": "Property for accessing deposit status.", "id": "f9213:c0:m19"} {"signature": "@propertydef files(self):", "body": "files_ = super(Deposit, self).filesif files_:sort_by_ = files_.sort_bydef sort_by(*args, **kwargs):\"\"\"\"\"\"if '' != self.status:raise PIDInvalidAction()return sort_by_(*args, **kwargs)files_.sort_by = sort_byreturn files_", "docstring": "List of Files inside the deposit.\n\n Add validation on ``sort_by`` method: if, at the time of files access,\n the record is not a ``'draft'`` then a\n :exc:`invenio_pidstore.errors.PIDInvalidAction` is rised.", "id": "f9213:c0:m20"} {"signature": "def deposit_links_factory(pid):", "body": "links = default_links_factory(pid)def _url(name, **kwargs):\"\"\"\"\"\"endpoint = ''.format(current_records_rest.default_endpoint_prefixes[pid.pid_type],name,)return url_for(endpoint, pid_value=pid.pid_value, _external=True,**kwargs)links[''] = _url('')ui_endpoint = current_app.config.get('')if ui_endpoint is not None:links[''] = ui_endpoint.format(host=request.host,scheme=request.scheme,pid_value=pid.pid_value,)deposit_cls = Depositif '' in request.view_args:deposit_cls = request.view_args[''].data[].__class__for action in extract_actions_from_class(deposit_cls):links[action] = _url('', action=action)return links", "docstring": "Factory for record links generation.\n\n The dictionary is formed as:\n\n .. code-block:: python\n\n {\n 'files': '/url/to/files',\n 'publish': '/url/to/publish',\n 'edit': '/url/to/edit',\n 'discard': '/url/to/discard',\n ...\n }\n\n :param pid: The record PID object.\n :returns: A dictionary that contains all the links.", "id": "f9214:m0"} {"signature": "def deposit_fetcher(record_uuid, data):", "body": "return FetchedPID(provider=DepositProvider,pid_type=DepositProvider.pid_type,pid_value=str(data['']['']),)", "docstring": "Fetch a deposit identifier.\n\n :param record_uuid: Record UUID.\n :param data: Record content.\n :returns: A :class:`invenio_pidstore.fetchers.FetchedPID` that contains\n data['_deposit']['id'] as pid_value.", "id": "f9215:m0"} {"signature": "def admin_permission_factory():", "body": "try:pkg_resources.get_distribution('')from invenio_access.permissions import DynamicPermission as Permissionexcept pkg_resources.DistributionNotFound:from flask_principal import Permissionreturn Permission(action_admin_access)", "docstring": "Factory for creating a permission for an admin `deposit-admin-access`.\n\n If `invenio-access` module is installed, it returns a\n :class:`invenio_access.permissions.DynamicPermission` object.\n Otherwise, it returns a :class:`flask_principal.Permission` object.\n\n :returns: Permission instance.", "id": "f9216:m0"} {"signature": "def create_error_handlers(blueprint):", "body": "blueprint.errorhandler(PIDInvalidAction)(create_api_errorhandler(status=, message=''))records_rest_error_handlers(blueprint)", "docstring": "Create error handlers on blueprint.", "id": "f9217:m0"} {"signature": "def create_blueprint(endpoints):", "body": "blueprint = Blueprint('',__name__,url_prefix='',)create_error_handlers(blueprint)for endpoint, options in (endpoints or {}).items():options = deepcopy(options)if '' in options:files_serializers = options.get('')files_serializers = {mime: obj_or_import_string(func)for mime, func in files_serializers.items()}del options['']else:files_serializers = {}if '' in options:serializers = options.get('')serializers = {mime: obj_or_import_string(func)for mime, func in serializers.items()}else:serializers = {}file_list_route = options.pop('',''.format(options['']))file_item_route = options.pop('',''.format(options['']))options.setdefault('', DepositSearch)search_class = obj_or_import_string(options[''])options.setdefault('', Deposit)record_class = obj_or_import_string(options[''])options.setdefault('', None)for rule in records_rest_url_rules(endpoint, **options):blueprint.add_url_rule(**rule)search_class_kwargs = {}if options.get(''):search_class_kwargs[''] = options['']if options.get(''):search_class_kwargs[''] = options['']ctx = dict(read_permission_factory=obj_or_import_string(options.get('')),create_permission_factory=obj_or_import_string(options.get('')),update_permission_factory=obj_or_import_string(options.get('')),delete_permission_factory=obj_or_import_string(options.get('')),record_class=record_class,search_class=partial(search_class, **search_class_kwargs),default_media_type=options.get(''),)deposit_actions = DepositActionResource.as_view(DepositActionResource.view_name.format(endpoint),serializers=serializers,pid_type=options[''],ctx=ctx,)blueprint.add_url_rule(''.format(options[''],''.join(extract_actions_from_class(record_class)),),view_func=deposit_actions,methods=[''],)deposit_files = DepositFilesResource.as_view(DepositFilesResource.view_name.format(endpoint),serializers=files_serializers,pid_type=options[''],ctx=ctx,)blueprint.add_url_rule(file_list_route,view_func=deposit_files,methods=['', '', ''],)deposit_file = DepositFileResource.as_view(DepositFileResource.view_name.format(endpoint),serializers=files_serializers,pid_type=options[''],ctx=ctx,)blueprint.add_url_rule(file_item_route,view_func=deposit_file,methods=['', '', ''],)return blueprint", "docstring": "Create Invenio-Deposit-REST blueprint.\n\n See: :data:`invenio_deposit.config.DEPOSIT_REST_ENDPOINTS`.\n\n :param endpoints: List of endpoints configuration.\n :returns: The configured blueprint.", "id": "f9217:m1"} {"signature": "def __init__(self, serializers, pid_type, ctx, *args, **kwargs):", "body": "super(DepositActionResource, self).__init__(serializers,default_media_type=ctx.get(''),*args,**kwargs)for key, value in ctx.items():setattr(self, key, value)", "docstring": "Constructor.", "id": "f9217:c0:m0"} {"signature": "@pass_record@need_record_permission('')def post(self, pid, record, action):", "body": "record = getattr(record, action)(pid=pid)db.session.commit()db.session.refresh(pid)db.session.refresh(record.model)post_action.send(current_app._get_current_object(), action=action,pid=pid, deposit=record)response = self.make_response(pid, record, if action == '' else )endpoint = ''.format(pid.pid_type)location = url_for(endpoint, pid_value=pid.pid_value, _external=True)response.headers.extend(dict(Location=location))return response", "docstring": "Handle deposit action.\n\n After the action is executed, a\n :class:`invenio_deposit.signals.post_action` signal is sent.\n\n Permission required: `update_permission_factory`.\n\n :param pid: Pid object (from url).\n :param record: Record object resolved from the pid.\n :param action: The action to execute.", "id": "f9217:c0:m1"} {"signature": "def __init__(self, serializers, pid_type, ctx, *args, **kwargs):", "body": "super(DepositFilesResource, self).__init__(serializers,*args,**kwargs)for key, value in ctx.items():setattr(self, key, value)", "docstring": "Constructor.", "id": "f9217:c1:m0"} {"signature": "@pass_record@need_record_permission('')def get(self, pid, record):", "body": "return self.make_response(obj=record.files, pid=pid, record=record)", "docstring": "Get files.\n\n Permission required: `read_permission_factory`.\n\n :param pid: Pid object (from url).\n :param record: Record object resolved from the pid.\n :returns: The files.", "id": "f9217:c1:m1"} {"signature": "@require_api_auth()@require_oauth_scopes(write_scope.id)@pass_record@need_record_permission('')def post(self, pid, record):", "body": "uploaded_file = request.files['']key = secure_filename(request.form.get('') or uploaded_file.filename)if key in record.files:raise FileAlreadyExists()record.files[key] = uploaded_file.streamrecord.commit()db.session.commit()return self.make_response(obj=record.files[key].obj, pid=pid, record=record, status=)", "docstring": "Handle POST deposit files.\n\n Permission required: `update_permission_factory`.\n\n :param pid: Pid object (from url).\n :param record: Record object resolved from the pid.", "id": "f9217:c1:m2"} {"signature": "@require_api_auth()@require_oauth_scopes(write_scope.id)@pass_record@need_record_permission('')def put(self, pid, record):", "body": "try:ids = [data[''] for data in json.loads(request.data.decode(''))]except KeyError:raise WrongFile()record.files.sort_by(*ids)record.commit()db.session.commit()return self.make_response(obj=record.files, pid=pid, record=record)", "docstring": "Handle the sort of the files through the PUT deposit files.\n\n Expected input in body PUT:\n\n .. code-block:: javascript\n\n [\n {\n \"id\": 1\n },\n {\n \"id\": 2\n },\n ...\n }\n\n Permission required: `update_permission_factory`.\n\n :param pid: Pid object (from url).\n :param record: Record object resolved from the pid.\n :returns: The files.", "id": "f9217:c1:m3"} {"signature": "def __init__(self, serializers, pid_type, ctx, *args, **kwargs):", "body": "super(DepositFileResource, self).__init__(serializers,*args,**kwargs)for key, value in ctx.items():setattr(self, key, value)", "docstring": "Constructor.", "id": "f9217:c2:m0"} {"signature": "@use_kwargs(get_args)@pass_record@need_record_permission('')def get(self, pid, record, key, version_id, **kwargs):", "body": "try:obj = record.files[str(key)].get_version(version_id=version_id)return self.make_response(obj=obj or abort(), pid=pid, record=record)except KeyError:abort()", "docstring": "Get file.\n\n Permission required: `read_permission_factory`.\n\n :param pid: Pid object (from url).\n :param record: Record object resolved from the pid.\n :param key: Unique identifier for the file in the deposit.\n :param version_id: File version. Optional. If no version is provided,\n the last version is retrieved.\n :returns: the file content.", "id": "f9217:c2:m1"} {"signature": "@require_api_auth()@require_oauth_scopes(write_scope.id)@pass_record@need_record_permission('')def put(self, pid, record, key):", "body": "try:data = json.loads(request.data.decode(''))new_key = data['']except KeyError:raise WrongFile()new_key_secure = secure_filename(new_key)if not new_key_secure or new_key != new_key_secure:raise WrongFile()try:obj = record.files.rename(str(key), new_key_secure)except KeyError:abort()record.commit()db.session.commit()return self.make_response(obj=obj, pid=pid, record=record)", "docstring": "Handle the file rename through the PUT deposit file.\n\n Permission required: `update_permission_factory`.\n\n :param pid: Pid object (from url).\n :param record: Record object resolved from the pid.\n :param key: Unique identifier for the file in the deposit.", "id": "f9217:c2:m2"} {"signature": "@require_api_auth()@require_oauth_scopes(write_scope.id)@pass_record@need_record_permission('')def delete(self, pid, record, key):", "body": "try:del record.files[str(key)]record.commit()db.session.commit()return make_response('', )except KeyError:abort(, '''')", "docstring": "Handle DELETE deposit file.\n\n Permission required: `update_permission_factory`.\n\n :param pid: Pid object (from url).\n :param record: Record object resolved from the pid.\n :param key: Unique identifier for the file in the deposit.", "id": "f9217:c2:m3"} {"signature": "def create_blueprint(endpoints):", "body": "from invenio_records_ui.views import create_url_ruleblueprint = Blueprint('',__name__,static_folder='',template_folder='',url_prefix='',)@blueprint.errorhandler(PIDDeletedError)def tombstone_errorhandler(error):\"\"\"\"\"\"return render_template(current_app.config[''],pid=error.pid,record=error.record or {},), for endpoint, options in (endpoints or {}).items():options = deepcopy(options)options.pop('', None)options.pop('', None)blueprint.add_url_rule(**create_url_rule(endpoint, **options))@blueprint.route('')@login_requireddef index():\"\"\"\"\"\"return render_template(current_app.config[''])@blueprint.route('')@login_requireddef new():\"\"\"\"\"\"deposit_type = request.values.get('')return render_template(current_app.config[''],record={'': {'': None}},jsonschema=current_deposit.jsonschemas[deposit_type],schemaform=current_deposit.schemaforms[deposit_type],)return blueprint", "docstring": "Create Invenio-Deposit-UI blueprint.\n\n See: :data:`invenio_deposit.config.DEPOSIT_RECORDS_UI_ENDPOINTS`.\n\n :param endpoints: List of endpoints configuration.\n :returns: The configured blueprint.", "id": "f9219:m0"} {"signature": "def default_view_method(pid, record, template=None):", "body": "record_viewed.send(current_app._get_current_object(),pid=pid,record=record,)deposit_type = request.values.get('')return render_template(template,pid=pid,record=record,jsonschema=current_deposit.jsonschemas[deposit_type],schemaform=current_deposit.schemaforms[deposit_type],)", "docstring": "Default view method.\n\n Sends ``record_viewed`` signal and renders template.", "id": "f9219:m1"} {"signature": "@app.cli.group()def fixtures():", "body": "", "docstring": "Command for working with test data.", "id": "f9220:m0"} {"signature": "@fixtures.command()@cli.with_appcontextdef records():", "body": "import pkg_resourcesfrom dojson.contrib.marc21 import marc21from dojson.contrib.marc21.utils import create_record, split_blobfrom flask_login import login_user, logout_userfrom invenio_accounts.models import Userfrom invenio_deposit.api import Depositusers = User.query.all()data_path = pkg_resources.resource_filename('', '')with open(data_path) as source:with current_app.test_request_context():indexer = RecordIndexer()with db.session.begin_nested():for index, data in enumerate(split_blob(source.read()),start=):login_user(users[index % len(users)])record = marc21.do(create_record(data))indexer.index(Deposit.create(record))logout_user()db.session.commit()", "docstring": "Load records.", "id": "f9220:m1"} {"signature": "@fixtures.command()@cli.with_appcontextdef location():", "body": "d = current_app.config['']with db.session.begin_nested():Location.query.delete()loc = Location(name='', uri=d, default=True)db.session.add(loc)db.session.commit()", "docstring": "Load default location.", "id": "f9220:m2"} {"signature": "def universal_exception(coro):", "body": "@functools.wraps(coro)async def wrapper(*args, **kwargs):try:return await coro(*args, **kwargs)except (asyncio.CancelledError, NotImplementedError,StopAsyncIteration):raiseexcept Exception:raise errors.PathIOError(reason=sys.exc_info())return wrapper", "docstring": "Decorator. Reraising any exception (except `CancelledError` and\n`NotImplementedError`) with universal exception\n:py:class:`aioftp.PathIOError`", "id": "f9224:m0"} {"signature": "def defend_file_methods(coro):", "body": "@functools.wraps(coro)async def wrapper(self, file, *args, **kwargs):if isinstance(file, AsyncPathIOContext):raise ValueError(\"\"\"\")return await coro(self, file, *args, **kwargs)return wrapper", "docstring": "Decorator. Raises exception when file methods called with wrapped by\n:py:class:`aioftp.AsyncPathIOContext` file object.", "id": "f9224:m1"} {"signature": "@propertydef state(self):", "body": "", "docstring": "Shared pathio state per server", "id": "f9224:c2:m1"} {"signature": "@universal_exception@abc.abstractmethodasync def exists(self, path):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nCheck if path exists\n\n:param path: path to check\n:type path: :py:class:`pathlib.Path`\n\n:rtype: :py:class:`bool`", "id": "f9224:c2:m2"} {"signature": "@universal_exception@abc.abstractmethodasync def is_dir(self, path):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nCheck if path is directory\n\n:param path: path to check\n:type path: :py:class:`pathlib.Path`\n\n:rtype: :py:class:`bool`", "id": "f9224:c2:m3"} {"signature": "@universal_exception@abc.abstractmethodasync def is_file(self, path):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nCheck if path is file\n\n:param path: path to check\n:type path: :py:class:`pathlib.Path`\n\n:rtype: :py:class:`bool`", "id": "f9224:c2:m4"} {"signature": "@universal_exception@abc.abstractmethodasync def mkdir(self, path, *, parents=False, exist_ok=False):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nMake directory\n\n:param path: path to create\n:type path: :py:class:`pathlib.Path`\n\n:param parents: create parents is does not exists\n:type parents: :py:class:`bool`\n\n:param exist_ok: do not raise exception if directory already exists\n:type exist_ok: :py:class:`bool`", "id": "f9224:c2:m5"} {"signature": "@universal_exception@abc.abstractmethodasync def rmdir(self, path):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nRemove directory\n\n:param path: path to remove\n:type path: :py:class:`pathlib.Path`", "id": "f9224:c2:m6"} {"signature": "@universal_exception@abc.abstractmethodasync def unlink(self, path):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nRemove file\n\n:param path: path to remove\n:type path: :py:class:`pathlib.Path`", "id": "f9224:c2:m7"} {"signature": "@abc.abstractmethoddef list(self, path):", "body": "", "docstring": "Create instance of subclass of :py:class:`aioftp.AbstractAsyncLister`.\nYou should subclass and implement `__anext__` method\nfor :py:class:`aioftp.AbstractAsyncLister` and return new instance.\n\n:param path: path to list\n:type path: :py:class:`pathlib.Path`\n\n:rtype: :py:class:`aioftp.AbstractAsyncLister`\n\nUsage:\n::\n\n >>> async for p in pathio.list(path):\n ... # do\n\nor borring instance of :py:class:`list`:\n::\n\n >>> paths = await pathio.list(path)\n >>> paths\n [path, path, path, ...]", "id": "f9224:c2:m8"} {"signature": "@universal_exception@abc.abstractmethodasync def stat(self, path):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nGet path stats\n\n:param path: path, which stats need\n:type path: :py:class:`pathlib.Path`\n\n:return: path stats. For proper work you need only this stats:\n st_size, st_mtime, st_ctime, st_nlink, st_mode\n:rtype: same as :py:class:`os.stat_result`", "id": "f9224:c2:m9"} {"signature": "@universal_exception@abc.abstractmethodasync def _open(self, path, mode):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nOpen file. You should implement \"mode\" argument, which can be:\n\"rb\", \"wb\", \"ab\" (read, write, append. all binary). Return type depends\non implementation, anyway the only place you need this file-object\nis in your implementation of read, write and close\n\n:param path: path to create\n:type path: :py:class:`pathlib.Path`\n\n:param mode: specifies the mode in which the file is opened (\"rb\",\n \"wb\", \"ab\", \"r+b\" (read, write, append, read/write, all binary))\n:type mode: :py:class:`str`\n\n:return: file-object", "id": "f9224:c2:m10"} {"signature": "def open(self, *args, **kwargs):", "body": "return AsyncPathIOContext(self, args, kwargs)", "docstring": "Create instance of :py:class:`aioftp.pathio.AsyncPathIOContext`,\nparameters passed to :py:meth:`aioftp.AbstractPathIO._open`\n\n:rtype: :py:class:`aioftp.pathio.AsyncPathIOContext`", "id": "f9224:c2:m11"} {"signature": "@universal_exception@defend_file_methods@abc.abstractmethodasync def seek(self, file, offset, whence=io.SEEK_SET):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nChange the stream position to the given byte `offset`. Same behaviour\nas :py:meth:`io.IOBase.seek`\n\n:param file: file-object from :py:class:`aioftp.AbstractPathIO.open`\n\n:param offset: relative byte offset\n:type offset: :py:class:`int`\n\n:param whence: base position for offset\n:type whence: :py:class:`int`", "id": "f9224:c2:m12"} {"signature": "@universal_exception@defend_file_methods@abc.abstractmethodasync def write(self, file, data):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nWrite some data to file\n\n:param file: file-object from :py:class:`aioftp.AbstractPathIO.open`\n\n:param data: data to write\n:type data: :py:class:`bytes`", "id": "f9224:c2:m13"} {"signature": "@universal_exception@defend_file_methods@abc.abstractmethodasync def read(self, file, block_size):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nRead some data from file\n\n:param file: file-object from :py:class:`aioftp.AbstractPathIO.open`\n\n:param block_size: bytes count to read\n:type block_size: :py:class:`int`\n\n:rtype: :py:class:`bytes`", "id": "f9224:c2:m14"} {"signature": "@universal_exception@defend_file_methods@abc.abstractmethodasync def close(self, file):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nClose file\n\n:param file: file-object from :py:class:`aioftp.AbstractPathIO.open`", "id": "f9224:c2:m15"} {"signature": "@universal_exception@abc.abstractmethodasync def rename(self, source, destination):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nRename path\n\n:param source: rename from\n:type source: :py:class:`pathlib.Path`\n\n:param destination: rename to\n:type destination: :py:class:`pathlib.Path`", "id": "f9224:c2:m16"} {"signature": "def matches(self, mask):", "body": "return all(map(lambda m, c: not m.isdigit() or m == c, mask, self))", "docstring": ":param mask: Template for comparision. If mask symbol is not digit\n then it passes.\n:type mask: :py:class:`str`\n\n::\n\n >>> Code(\"123\").matches(\"1\")\n True\n >>> Code(\"123\").matches(\"1x3\")\n True", "id": "f9225:c0:m0"} {"signature": "async def finish(self, expected_codes=\"\", wait_codes=\"\"):", "body": "self.close()await self.client.command(None, expected_codes, wait_codes)", "docstring": ":py:func:`asyncio.coroutine`\n\nClose connection and wait for `expected_codes` response from server\npassing `wait_codes`.\n\n:param expected_codes: tuple of expected codes or expected code\n:type expected_codes: :py:class:`tuple` of :py:class:`str` or\n :py:class:`str`\n\n:param wait_codes: tuple of wait codes or wait code\n:type wait_codes: :py:class:`tuple` of :py:class:`str` or\n :py:class:`str`", "id": "f9225:c1:m1"} {"signature": "def close(self):", "body": "if self.stream is not None:self.stream.close()", "docstring": "Close connection.", "id": "f9225:c2:m2"} {"signature": "async def parse_line(self):", "body": "line = await self.stream.readline()if not line:self.stream.close()raise ConnectionResetErrors = line.decode(encoding=self.encoding).rstrip()logger.info(s)return Code(s[:]), s[:]", "docstring": ":py:func:`asyncio.coroutine`\n\nParsing server response line.\n\n:return: (code, line)\n:rtype: (:py:class:`aioftp.Code`, :py:class:`str`)\n\n:raises ConnectionResetError: if received data is empty (this\n means, that connection is closed)\n:raises asyncio.TimeoutError: if there where no data for `timeout`\n period", "id": "f9225:c2:m3"} {"signature": "async def parse_response(self):", "body": "code, rest = await self.parse_line()info = [rest]curr_code = codewhile rest.startswith(\"\") or not curr_code.isdigit():curr_code, rest = await self.parse_line()if curr_code.isdigit():info.append(rest)if curr_code != code:raise errors.StatusCodeError(code, curr_code, info)else:info.append(curr_code + rest)return code, info", "docstring": ":py:func:`asyncio.coroutine`\n\nParsing full server response (all lines).\n\n:return: (code, lines)\n:rtype: (:py:class:`aioftp.Code`, :py:class:`list` of :py:class:`str`)\n\n:raises aioftp.StatusCodeError: if received code does not matches all\n already received codes", "id": "f9225:c2:m4"} {"signature": "def check_codes(self, expected_codes, received_code, info):", "body": "if not any(map(received_code.matches, expected_codes)):raise errors.StatusCodeError(expected_codes, received_code, info)", "docstring": "Checks if any of expected matches received.\n\n:param expected_codes: tuple of expected codes\n:type expected_codes: :py:class:`tuple`\n\n:param received_code: received code for matching\n:type received_code: :py:class:`aioftp.Code`\n\n:param info: list of response lines from server\n:type info: :py:class:`list`\n\n:raises aioftp.StatusCodeError: if received code does not matches any\n expected code", "id": "f9225:c2:m5"} {"signature": "async def command(self, command=None, expected_codes=(), wait_codes=()):", "body": "expected_codes = wrap_with_container(expected_codes)wait_codes = wrap_with_container(wait_codes)if command:logger.info(command)message = command + END_OF_LINEawait self.stream.write(message.encode(encoding=self.encoding))if expected_codes or wait_codes:code, info = await self.parse_response()while any(map(code.matches, wait_codes)):code, info = await self.parse_response()if expected_codes:self.check_codes(expected_codes, code, info)return code, info", "docstring": ":py:func:`asyncio.coroutine`\n\nBasic command logic.\n\n1. Send command if not omitted.\n2. Yield response until no wait code matches.\n3. Check code for expected.\n\n:param command: command line\n:type command: :py:class:`str`\n\n:param expected_codes: tuple of expected codes or expected code\n:type expected_codes: :py:class:`tuple` of :py:class:`str` or\n :py:class:`str`\n\n:param wait_codes: tuple of wait codes or wait code\n:type wait_codes: :py:class:`tuple` of :py:class:`str` or\n :py:class:`str`", "id": "f9225:c2:m6"} {"signature": "@staticmethoddef parse_epsv_response(s):", "body": "matches = tuple(re.finditer(r\"\", s))s = matches[-].group()port = int(s[:-])return None, port", "docstring": "Parsing `EPSV` (`message (|||port|)`) response.\n\n:param s: response line\n:type s: :py:class:`str`\n\n:return: (ip, port)\n:rtype: (:py:class:`None`, :py:class:`int`)", "id": "f9225:c2:m7"} {"signature": "@staticmethoddef parse_pasv_response(s):", "body": "sub, *_ = re.findall(r\"\", s)nums = tuple(map(int, sub.split(\"\")))ip = \"\".join(map(str, nums[:]))port = (nums[] << ) | nums[]return ip, port", "docstring": "Parsing `PASV` server response.\n\n:param s: response line\n:type s: :py:class:`str`\n\n:return: (ip, port)\n:rtype: (:py:class:`str`, :py:class:`int`)", "id": "f9225:c2:m8"} {"signature": "@staticmethoddef parse_directory_response(s):", "body": "seq_quotes = start = Falsedirectory = \"\"for ch in s:if not start:if ch == \"\":start = Trueelse:if ch == \"\":seq_quotes += else:if seq_quotes == :breakelif seq_quotes == :seq_quotes = directory += ''directory += chreturn pathlib.PurePosixPath(directory)", "docstring": "Parsing directory server response.\n\n:param s: response line\n:type s: :py:class:`str`\n\n:rtype: :py:class:`pathlib.PurePosixPath`", "id": "f9225:c2:m9"} {"signature": "@staticmethoddef parse_unix_mode(s):", "body": "parse_rw = {\"\": , \"\": , \"\": , \"\": }mode = mode |= parse_rw[s[:]] << mode |= parse_rw[s[:]] << mode |= parse_rw[s[:]]if s[] == \"\":mode |= elif s[] == \"\":mode |= elif s[] != \"\":raise ValueErrorif s[] == \"\":mode |= elif s[] == \"\":mode |= elif s[] != \"\":raise ValueErrorif s[] == \"\":mode |= elif s[] == \"\":mode |= elif s[] != \"\":raise ValueErrorreturn mode", "docstring": "Parsing unix mode strings (\"rwxr-x--t\") into hexacimal notation.\n\n:param s: mode string\n:type s: :py:class:`str`\n\n:return mode:\n:rtype: :py:class:`int`", "id": "f9225:c2:m10"} {"signature": "@staticmethoddef format_date_time(d):", "body": "return d.strftime(\"\")", "docstring": "Formats dates from strptime in a consistent format\n\n:param d: return value from strptime\n:type d: :py:class:`datetime`\n\n:rtype: :py:class`str`", "id": "f9225:c2:m11"} {"signature": "def parse_ls_date(self, s, *, now=None):", "body": "with setlocale(\"\"):try:if now is None:now = datetime.datetime.now()d = datetime.datetime.strptime(s, \"\")d = d.replace(year=now.year)diff = (now - d).total_seconds()if diff > HALF_OF_YEAR_IN_SECONDS:d = d.replace(year=now.year + )elif diff < -HALF_OF_YEAR_IN_SECONDS:d = d.replace(year=now.year - )except ValueError:d = datetime.datetime.strptime(s, \"\")return self.format_date_time(d)", "docstring": "Parsing dates from the ls unix utility. For example,\n\"Nov 18 1958\" and \"Nov 18 12:29\".\n\n:param s: ls date\n:type s: :py:class:`str`\n\n:rtype: :py:class:`str`", "id": "f9225:c2:m12"} {"signature": "def parse_list_line_unix(self, b):", "body": "s = b.decode(encoding=self.encoding).rstrip()info = {}if s[] == \"\":info[\"\"] = \"\"elif s[] == \"\":info[\"\"] = \"\"elif s[] == \"\":info[\"\"] = \"\"else:info[\"\"] = \"\"info[\"\"] = self.parse_unix_mode(s[:])s = s[:].lstrip()i = s.index(\"\")info[\"\"] = s[:i]if not info[\"\"].isdigit():raise ValueErrors = s[i:].lstrip()i = s.index(\"\")info[\"\"] = s[:i]s = s[i:].lstrip()i = s.index(\"\")info[\"\"] = s[:i]s = s[i:].lstrip()i = s.index(\"\")info[\"\"] = s[:i]if not info[\"\"].isdigit():raise ValueErrors = s[i:].lstrip()info[\"\"] = self.parse_ls_date(s[:])s = s[:].strip()if info[\"\"] == \"\":i = s.rindex(\"\")link_dst = s[i + :]link_src = s[:i]i = - if link_dst[-] == \"\" or link_dst[-] == \"\" else -info[\"\"] = \"\" if link_dst[i] == \"\" else \"\"s = link_srcreturn pathlib.PurePosixPath(s), info", "docstring": "Attempt to parse a LIST line (similar to unix ls utility).\n\n:param b: response line\n:type b: :py:class:`bytes` or :py:class:`str`\n\n:return: (path, info)\n:rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)", "id": "f9225:c2:m13"} {"signature": "def parse_list_line_windows(self, b):", "body": "line = b.decode(encoding=self.encoding).rstrip(\"\")date_time_end = line.index(\"\")date_time_str = line[:date_time_end + ].strip().split(\"\")date_time_str = \"\".join([x for x in date_time_str if len(x) > ])line = line[date_time_end + :].lstrip()with setlocale(\"\"):strptime = datetime.datetime.strptimedate_time = strptime(date_time_str, \"\")info = {}info[\"\"] = self.format_date_time(date_time)next_space = line.index(\"\")if line.startswith(\"\"):info[\"\"] = \"\"else:info[\"\"] = \"\"info[\"\"] = line[:next_space].replace(\"\", \"\")if not info[\"\"].isdigit():raise ValueErrorfilename = line[next_space:].lstrip()if filename == \"\" or filename == \"\":raise ValueErrorreturn pathlib.PurePosixPath(filename), info", "docstring": "Parsing Microsoft Windows `dir` output\n\n:param b: response line\n:type b: :py:class:`bytes` or :py:class:`str`\n\n:return: (path, info)\n:rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)", "id": "f9225:c2:m14"} {"signature": "def parse_list_line(self, b):", "body": "ex = []parsers = (self.parse_list_line_unix, self.parse_list_line_windows)for parser in parsers:try:return parser(b)except (ValueError, KeyError, IndexError) as e:ex.append(e)raise ValueError(\"\", b, ex)", "docstring": "Parse LIST response with both Microsoft Windows\u00ae parser and\nUNIX parser\n\n:param b: response line\n:type b: :py:class:`bytes` or :py:class:`str`\n\n:return: (path, info)\n:rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)", "id": "f9225:c2:m15"} {"signature": "def parse_mlsx_line(self, b):", "body": "if isinstance(b, bytes):s = b.decode(encoding=self.encoding)else:s = bline = s.rstrip()facts_found, _, name = line.partition(\"\")entry = {}for fact in facts_found[:-].split(\"\"):key, _, value = fact.partition(\"\")entry[key.lower()] = valuereturn pathlib.PurePosixPath(name), entry", "docstring": "Parsing MLS(T|D) response.\n\n:param b: response line\n:type b: :py:class:`bytes` or :py:class:`str`\n\n:return: (path, info)\n:rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)", "id": "f9225:c2:m16"} {"signature": "async def connect(self, host, port=DEFAULT_PORT):", "body": "await super().connect(host, port)code, info = await self.command(None, \"\", \"\")return info", "docstring": ":py:func:`asyncio.coroutine`\n\nConnect to server.\n\n:param host: host name for connection\n:type host: :py:class:`str`\n\n:param port: port number for connection\n:type port: :py:class:`int`", "id": "f9225:c3:m0"} {"signature": "async def login(self, user=DEFAULT_USER, password=DEFAULT_PASSWORD,account=DEFAULT_ACCOUNT):", "body": "code, info = await self.command(\"\" + user, (\"\", \"\"))while code.matches(\"\"):if code == \"\":cmd = \"\" + passwordelif code == \"\":cmd = \"\" + accountelse:raise errors.StatusCodeError(\"\", code, info)code, info = await self.command(cmd, (\"\", \"\"))", "docstring": ":py:func:`asyncio.coroutine`\n\nServer authentication.\n\n:param user: username\n:type user: :py:class:`str`\n\n:param password: password\n:type password: :py:class:`str`\n\n:param account: account (almost always blank)\n:type account: :py:class:`str`\n\n:raises aioftp.StatusCodeError: if unknown code received", "id": "f9225:c3:m1"} {"signature": "async def get_current_directory(self):", "body": "code, info = await self.command(\"\", \"\")directory = self.parse_directory_response(info[-])return directory", "docstring": ":py:func:`asyncio.coroutine`\n\nGetting current working directory.\n\n:rtype: :py:class:`pathlib.PurePosixPath`", "id": "f9225:c3:m2"} {"signature": "async def change_directory(self, path=\"\"):", "body": "path = pathlib.PurePosixPath(path)if path == pathlib.PurePosixPath(\"\"):cmd = \"\"else:cmd = \"\" + str(path)await self.command(cmd, \"\")", "docstring": ":py:func:`asyncio.coroutine`\n\nChange current directory. Goes \u00abup\u00bb if no parameters passed.\n\n:param path: new directory, goes \u00abup\u00bb if omitted\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`", "id": "f9225:c3:m3"} {"signature": "async def make_directory(self, path, *, parents=True):", "body": "path = pathlib.PurePosixPath(path)need_create = []while path.name and not await self.exists(path):need_create.append(path)path = path.parentif not parents:breakneed_create.reverse()for path in need_create:await self.command(\"\" + str(path), \"\")", "docstring": ":py:func:`asyncio.coroutine`\n\nMake directory.\n\n:param path: path to directory to create\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param parents: create parents if does not exists\n:type parents: :py:class:`bool`", "id": "f9225:c3:m4"} {"signature": "async def remove_directory(self, path):", "body": "await self.command(\"\" + str(path), \"\")", "docstring": ":py:func:`asyncio.coroutine`\n\nLow level remove method for removing empty directory.\n\n:param path: empty directory to remove\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`", "id": "f9225:c3:m5"} {"signature": "def list(self, path=\"\", *, recursive=False, raw_command=None):", "body": "class AsyncLister(AsyncListerMixin):stream = Noneasync def _new_stream(cls, local_path):cls.path = local_pathcls.parse_line = self.parse_mlsx_lineif raw_command not in [None, \"\", \"\"]:raise ValueError(\"\"f\"\")if raw_command in [None, \"\"]:try:command = (\"\" + str(cls.path)).strip()return await self.get_stream(command, \"\")except errors.StatusCodeError as e:code = e.received_codes[-]if not code.matches(\"\") or raw_command is not None:raiseif raw_command in [None, \"\"]:cls.parse_line = self.parse_list_linecommand = (\"\" + str(cls.path)).strip()return await self.get_stream(command, \"\")def __aiter__(cls):cls.directories = collections.deque()return clsasync def __anext__(cls):if cls.stream is None:cls.stream = await cls._new_stream(path)while True:line = await cls.stream.readline()while not line:await cls.stream.finish()if cls.directories:current_path, info = cls.directories.popleft()cls.stream = await cls._new_stream(current_path)line = await cls.stream.readline()else:raise StopAsyncIterationtry:name, info = cls.parse_line(line)except Exception:continuestat = cls.path / name, infoif info[\"\"] == \"\" and recursive:cls.directories.append(stat)return statreturn AsyncLister()", "docstring": ":py:func:`asyncio.coroutine`\n\nList all files and directories in \"path\".\n\n:param path: directory or file path\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param recursive: list recursively\n:type recursive: :py:class:`bool`\n\n:param raw_command: optional ftp command to use in place of\n fallback logic (must be one of \"MLSD\", \"LIST\")\n:type raw_command: :py:class:`str`\n\n:rtype: :py:class:`list` or `async for` context\n\n::\n\n >>> # lazy list\n >>> async for path, info in client.list():\n ... # no interaction with client should be here(!)\n\n >>> # eager list\n >>> for path, info in (await client.list()):\n ... # interaction with client allowed, since all paths are\n ... # collected already\n\n::\n\n >>> stats = await client.list()", "id": "f9225:c3:m6"} {"signature": "async def stat(self, path):", "body": "path = pathlib.PurePosixPath(path)try:code, info = await self.command(\"\" + str(path), \"\")name, info = self.parse_mlsx_line(info[].lstrip())return infoexcept errors.StatusCodeError as e:if not e.received_codes[-].matches(\"\"):raisefor p, info in await self.list(path.parent):if p.name == path.name:return infoelse:raise errors.StatusCodeError(Code(\"\"),Code(\"\"),\"\",)", "docstring": ":py:func:`asyncio.coroutine`\n\nGetting path stats.\n\n:param path: path for getting info\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:return: path info\n:rtype: :py:class:`dict`", "id": "f9225:c3:m7"} {"signature": "async def is_file(self, path):", "body": "info = await self.stat(path)return info[\"\"] == \"\"", "docstring": ":py:func:`asyncio.coroutine`\n\nChecks if path is file.\n\n:param path: path to check\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:rtype: :py:class:`bool`", "id": "f9225:c3:m8"} {"signature": "async def is_dir(self, path):", "body": "info = await self.stat(path)return info[\"\"] == \"\"", "docstring": ":py:func:`asyncio.coroutine`\n\nChecks if path is dir.\n\n:param path: path to check\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:rtype: :py:class:`bool`", "id": "f9225:c3:m9"} {"signature": "async def exists(self, path):", "body": "try:await self.stat(path)return Trueexcept errors.StatusCodeError as e:if e.received_codes[-].matches(\"\"):return Falseraise", "docstring": ":py:func:`asyncio.coroutine`\n\nCheck path for existence.\n\n:param path: path to check\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:rtype: :py:class:`bool`", "id": "f9225:c3:m10"} {"signature": "async def rename(self, source, destination):", "body": "await self.command(\"\" + str(source), \"\")await self.command(\"\" + str(destination), \"\")", "docstring": ":py:func:`asyncio.coroutine`\n\nRename (move) file or directory.\n\n:param source: path to rename\n:type source: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param destination: path new name\n:type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath`", "id": "f9225:c3:m11"} {"signature": "async def remove_file(self, path):", "body": "await self.command(\"\" + str(path), \"\")", "docstring": ":py:func:`asyncio.coroutine`\n\nLow level remove method for removing file.\n\n:param path: file to remove\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`", "id": "f9225:c3:m12"} {"signature": "async def remove(self, path):", "body": "if await self.exists(path):info = await self.stat(path)if info[\"\"] == \"\":await self.remove_file(path)elif info[\"\"] == \"\":for name, info in (await self.list(path)):if info[\"\"] in (\"\", \"\"):await self.remove(name)await self.remove_directory(path)", "docstring": ":py:func:`asyncio.coroutine`\n\nHigh level remove method for removing path recursively (file or\ndirectory).\n\n:param path: path to remove\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`", "id": "f9225:c3:m13"} {"signature": "def upload_stream(self, destination, *, offset=):", "body": "return self.get_stream(\"\" + str(destination),\"\",offset=offset,)", "docstring": "Create stream for write data to `destination` file.\n\n:param destination: destination path of file on server side\n:type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param offset: byte offset for stream start position\n:type offset: :py:class:`int`\n\n:rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO`", "id": "f9225:c3:m14"} {"signature": "def append_stream(self, destination, *, offset=):", "body": "return self.get_stream(\"\" + str(destination),\"\",offset=offset,)", "docstring": "Create stream for append (write) data to `destination` file.\n\n:param destination: destination path of file on server side\n:type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param offset: byte offset for stream start position\n:type offset: :py:class:`int`\n\n:rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO`", "id": "f9225:c3:m15"} {"signature": "async def upload(self, source, destination=\"\", *, write_into=False,block_size=DEFAULT_BLOCK_SIZE):", "body": "source = pathlib.Path(source)destination = pathlib.PurePosixPath(destination)if not write_into:destination = destination / source.nameif await self.path_io.is_file(source):await self.make_directory(destination.parent)async with self.path_io.open(source, mode=\"\") as file_in,self.upload_stream(destination) as stream:async for block in file_in.iter_by_block(block_size):await stream.write(block)elif await self.path_io.is_dir(source):await self.make_directory(destination)sources = collections.deque([source])while sources:src = sources.popleft()async for path in self.path_io.list(src):if write_into:relative = destination.name / path.relative_to(source)else:relative = path.relative_to(source.parent)if await self.path_io.is_dir(path):await self.make_directory(relative)sources.append(path)else:await self.upload(path,relative,write_into=True,block_size=block_size)", "docstring": ":py:func:`asyncio.coroutine`\n\nHigh level upload method for uploading files and directories\nrecursively from file system.\n\n:param source: source path of file or directory on client side\n:type source: :py:class:`str` or :py:class:`pathlib.Path`\n\n:param destination: destination path of file or directory on server\n side\n:type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param write_into: write source into destination (if you want upload\n file and change it name, as well with directories)\n:type write_into: :py:class:`bool`\n\n:param block_size: block size for transaction\n:type block_size: :py:class:`int`", "id": "f9225:c3:m16"} {"signature": "def download_stream(self, source, *, offset=):", "body": "return self.get_stream(\"\" + str(source), \"\", offset=offset)", "docstring": ":py:func:`asyncio.coroutine`\n\nCreate stream for read data from `source` file.\n\n:param source: source path of file on server side\n:type source: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param offset: byte offset for stream start position\n:type offset: :py:class:`int`\n\n:rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO`", "id": "f9225:c3:m17"} {"signature": "async def download(self, source, destination=\"\", *, write_into=False,block_size=DEFAULT_BLOCK_SIZE):", "body": "source = pathlib.PurePosixPath(source)destination = pathlib.Path(destination)if not write_into:destination = destination / source.nameif await self.is_file(source):await self.path_io.mkdir(destination.parent,parents=True, exist_ok=True)async with self.path_io.open(destination, mode=\"\") as file_out,self.download_stream(source) as stream:async for block in stream.iter_by_block(block_size):await file_out.write(block)elif await self.is_dir(source):await self.path_io.mkdir(destination, parents=True, exist_ok=True)for name, info in (await self.list(source)):full = destination / name.relative_to(source)if info[\"\"] in (\"\", \"\"):await self.download(name, full, write_into=True,block_size=block_size)", "docstring": ":py:func:`asyncio.coroutine`\n\nHigh level download method for downloading files and directories\nrecursively and save them to the file system.\n\n:param source: source path of file or directory on server side\n:type source: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param destination: destination path of file or directory on client\n side\n:type destination: :py:class:`str` or :py:class:`pathlib.Path`\n\n:param write_into: write source into destination (if you want download\n file and change it name, as well with directories)\n:type write_into: :py:class:`bool`\n\n:param block_size: block size for transaction\n:type block_size: :py:class:`int`", "id": "f9225:c3:m18"} {"signature": "async def quit(self):", "body": "await self.command(\"\", \"\")self.close()", "docstring": ":py:func:`asyncio.coroutine`\n\nSend \"QUIT\" and close connection.", "id": "f9225:c3:m19"} {"signature": "async def get_passive_connection(self, conn_type=\"\",commands=(\"\", \"\")):", "body": "functions = {\"\": self._do_epsv,\"\": self._do_pasv,}if not commands:raise ValueError(\"\")await self.command(\"\" + conn_type, \"\")for i, name in enumerate(commands, start=):name = name.lower()if name not in functions:raise ValueError(f\"\")try:ip, port = await functions[name]()breakexcept errors.StatusCodeError as e:is_last = i == len(commands)if is_last or not e.received_codes[-].matches(\"\"):raiseif ip in (\"\", None):ip = self.server_hostreader, writer = await open_connection(ip,port,self.create_connection,self.ssl,)return reader, writer", "docstring": ":py:func:`asyncio.coroutine`\n\nGetting pair of reader, writer for passive connection with server.\n\n:param conn_type: connection type (\"I\", \"A\", \"E\", \"L\")\n:type conn_type: :py:class:`str`\n\n:param commands: sequence of commands to try to initiate passive\n server creation. First success wins. Default is EPSV, then PASV.\n:type commands: :py:class:`list`\n\n:rtype: (:py:class:`asyncio.StreamReader`,\n :py:class:`asyncio.StreamWriter`)", "id": "f9225:c3:m22"} {"signature": "@async_enterableasync def get_stream(self, *command_args, conn_type=\"\", offset=):", "body": "reader, writer = await self.get_passive_connection(conn_type)if offset:await self.command(\"\" + str(offset), \"\")await self.command(*command_args)stream = DataConnectionThrottleStreamIO(self,reader,writer,throttles={\"\": self.throttle},timeout=self.socket_timeout,)return stream", "docstring": ":py:func:`asyncio.coroutine`\n\nCreate :py:class:`aioftp.DataConnectionThrottleStreamIO` for straight\nread/write io.\n\n:param command_args: arguments for :py:meth:`aioftp.Client.command`\n\n:param conn_type: connection type (\"I\", \"A\", \"E\", \"L\")\n:type conn_type: :py:class:`str`\n\n:param offset: byte offset for stream start position\n:type offset: :py:class:`int`\n\n:rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO`", "id": "f9225:c3:m23"} {"signature": "async def abort(self, *, wait=True):", "body": "if wait:await self.command(\"\", \"\", \"\")else:await self.command(\"\")", "docstring": ":py:func:`asyncio.coroutine`\n\nRequest data transfer abort.\n\n:param wait: wait for abort response [426]\u2192226 if `True`\n:type wait: :py:class:`bool`", "id": "f9225:c3:m24"} {"signature": "def worker(f):", "body": "@functools.wraps(f)async def wrapper(cls, connection, rest):try:await f(cls, connection, rest)except asyncio.CancelledError:connection.response(\"\", \"\")connection.response(\"\", \"\")return wrapper", "docstring": "Decorator. Abortable worker. If wrapped task will be cancelled by\ndispatcher, decorator will send ftp codes of successful interrupt.\n\n::\n\n >>> @worker\n ... async def worker(self, connection, rest):\n ... ...", "id": "f9227:m0"} {"signature": "def get_permissions(self, path):", "body": "path = pathlib.PurePosixPath(path)parents = filter(lambda p: p.is_parent(path), self.permissions)perm = min(parents,key=lambda p: len(path.relative_to(p.path).parts),default=Permission(),)return perm", "docstring": "Return nearest parent permission for `path`.\n\n:param path: path which permission you want to know\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:rtype: :py:class:`aioftp.Permission`", "id": "f9227:c1:m1"} {"signature": "@abc.abstractmethodasync def get_user(self, login):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nGet user and response for USER call\n\n:param login: user's login\n:type login: :py:class:`str`", "id": "f9227:c2:m1"} {"signature": "@abc.abstractmethodasync def authenticate(self, user, password):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nCheck if user can be authenticated with provided password\n\n:param user: user\n:type user: :py:class:`aioftp.User`\n\n:param password: password\n:type password: :py:class:`str`\n\n:rtype: :py:class:`bool`", "id": "f9227:c2:m2"} {"signature": "async def notify_logout(self, user):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nCalled when user connection is closed if user was initiated\n\n:param user: user\n:type user: :py:class:`aioftp.User`", "id": "f9227:c2:m3"} {"signature": "def locked(self):", "body": "return self.value == ", "docstring": "Returns True if semaphore-like can not be acquired.\n\n:rtype: :py:class:`bool`", "id": "f9227:c5:m1"} {"signature": "def acquire(self):", "body": "if self.value is not None:self.value -= if self.value < :raise ValueError(\"\")", "docstring": "Acquire, decrementing the internal counter by one.", "id": "f9227:c5:m2"} {"signature": "def release(self):", "body": "if self.value is not None:self.value += if self.value > self.maximum_value:raise ValueError(\"\")", "docstring": "Release, incrementing the internal counter by one.", "id": "f9227:c5:m3"} {"signature": "async def start(self, host=None, port=, **kwargs):", "body": "self._start_server_extra_arguments = kwargsself.connections = {}self.server_host = hostself.server_port = portself.server = await asyncio.start_server(self.dispatcher,host,port,ssl=self.ssl,**self._start_server_extra_arguments,)for sock in self.server.sockets:if sock.family in (socket.AF_INET, socket.AF_INET6):host, port, *_ = sock.getsockname()if not self.server_port:self.server_port = portif not self.server_host:self.server_host = hostlogger.info(\"\", host, port)", "docstring": ":py:func:`asyncio.coroutine`\n\nStart server.\n\n:param host: ip address to bind for listening.\n:type host: :py:class:`str`\n\n:param port: port number to bind for listening.\n:type port: :py:class:`int`\n\n:param kwargs: keyword arguments, they passed to\n :py:func:`asyncio.start_server`", "id": "f9227:c6:m0"} {"signature": "@propertydef address(self):", "body": "return self.server_host, self.server_port", "docstring": "Server listen socket host and port as :py:class:`tuple`", "id": "f9227:c6:m1"} {"signature": "async def close(self):", "body": "self.server.close()tasks = [self.server.wait_closed()]for connection in self.connections.values():connection._dispatcher.cancel()tasks.append(connection._dispatcher)logger.info(\"\", len(tasks))await asyncio.wait(tasks)", "docstring": ":py:func:`asyncio.coroutine`\n\nShutdown the server and close all connections.", "id": "f9227:c6:m2"} {"signature": "async def write_response(self, stream, code, lines=\"\", list=False):", "body": "lines = wrap_with_container(lines)write = functools.partial(self.write_line, stream)if list:head, *body, tail = linesawait write(code + \"\" + head)for line in body:await write(\"\" + line)await write(code + \"\" + tail)else:*body, tail = linesfor line in body:await write(code + \"\" + line)await write(code + \"\" + tail)", "docstring": ":py:func:`asyncio.coroutine`\n\nComplex method for sending response.\n\n:param stream: command connection stream\n:type stream: :py:class:`aioftp.StreamIO`\n\n:param code: server response code\n:type code: :py:class:`str`\n\n:param lines: line or lines, which are response information\n:type lines: :py:class:`str` or :py:class:`collections.Iterable`\n\n:param list: if true, then lines will be sended without code prefix.\n This is useful for **LIST** FTP command and some others.\n:type list: :py:class:`bool`", "id": "f9227:c6:m4"} {"signature": "async def parse_command(self, stream):", "body": "line = await stream.readline()if not line:raise ConnectionResetErrors = line.decode(encoding=self.encoding).rstrip()logger.info(s)cmd, _, rest = s.partition(\"\")return cmd.lower(), rest", "docstring": ":py:func:`asyncio.coroutine`\n\nComplex method for getting command.\n\n:param stream: connection steram\n:type stream: :py:class:`asyncio.StreamIO`\n\n:return: (code, rest)\n:rtype: (:py:class:`str`, :py:class:`str`)", "id": "f9227:c6:m5"} {"signature": "async def response_writer(self, stream, response_queue):", "body": "while True:args = await response_queue.get()try:await self.write_response(stream, *args)finally:response_queue.task_done()", "docstring": ":py:func:`asyncio.coroutine`\n\nWorker for write_response with current connection. Get data to response\nfrom queue, this is for right order of responses. Exits if received\n:py:class:`None`.\n\n:param stream: command connection stream\n:type connection: :py:class:`aioftp.StreamIO`\n\n:param response_queue:\n:type response_queue: :py:class:`asyncio.Queue`", "id": "f9227:c6:m6"} {"signature": "@abc.abstractmethodasync def dispatcher(self, reader, writer):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nServer connection handler (main routine per user).", "id": "f9227:c6:m7"} {"signature": "def get_paths(self, connection, path):", "body": "virtual_path = pathlib.PurePosixPath(path)if not virtual_path.is_absolute():virtual_path = connection.current_directory / virtual_pathresolved_virtual_path = pathlib.PurePosixPath(\"\")for part in virtual_path.parts[:]:if part == \"\":resolved_virtual_path = resolved_virtual_path.parentelse:resolved_virtual_path /= partbase_path = connection.user.base_pathreal_path = base_path / resolved_virtual_path.relative_to(\"\")return real_path, resolved_virtual_path", "docstring": "Return *real* and *virtual* paths, resolves \"..\" with \"up\" action.\n*Real* path is path for path_io, when *virtual* deals with\n\"user-view\" and user requests\n\n:param connection: internal options for current connected user\n:type connection: :py:class:`dict`\n\n:param path: received path from user\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:return: (real_path, virtual_path)\n:rtype: (:py:class:`pathlib.Path`, :py:class:`pathlib.PurePosixPath`)", "id": "f9227:c10:m2"} {"signature": "async def syst(self, connection, rest):", "body": "connection.response(\"\", \"\")return True", "docstring": "Return system type (always returns UNIX type: L8).", "id": "f9227:c10:m32"} {"signature": "def with_timeout(name):", "body": "if isinstance(name, str):return _with_timeout(name)else:return _with_timeout(\"\")(name)", "docstring": "Method decorator, wraps method with :py:func:`asyncio.wait_for`. `timeout`\nargument takes from `name` decorator argument or \"timeout\".\n\n:param name: name of timeout attribute\n:type name: :py:class:`str`\n\n:raises asyncio.TimeoutError: if coroutine does not finished in timeout\n\nWait for `self.timeout`\n::\n\n >>> def __init__(self, ...):\n ...\n ... self.timeout = 1\n ...\n ... @with_timeout\n ... async def foo(self, ...):\n ...\n ... pass\n\nWait for custom timeout\n::\n\n >>> def __init__(self, ...):\n ...\n ... self.foo_timeout = 1\n ...\n ... @with_timeout(\"foo_timeout\")\n ... async def foo(self, ...):\n ...\n ... pass", "id": "f9228:m2"} {"signature": "def async_enterable(f):", "body": "@functools.wraps(f)def wrapper(*args, **kwargs):class AsyncEnterableInstance:async def __aenter__(self):self.context = await f(*args, **kwargs)return await self.context.__aenter__()async def __aexit__(self, *args, **kwargs):await self.context.__aexit__(*args, **kwargs)def __await__(self):return f(*args, **kwargs).__await__()return AsyncEnterableInstance()return wrapper", "docstring": "Decorator. Bring coroutine result up, so it can be used as async context\n\n::\n\n >>> async def foo():\n ...\n ... ...\n ... return AsyncContextInstance(...)\n ...\n ... ctx = await foo()\n ... async with ctx:\n ...\n ... # do\n\n::\n\n >>> @async_enterable\n ... async def foo():\n ...\n ... ...\n ... return AsyncContextInstance(...)\n ...\n ... async with foo() as ctx:\n ...\n ... # do\n ...\n ... ctx = await foo()\n ... async with ctx:\n ...\n ... # do", "id": "f9228:m3"} {"signature": "@contextmanagerdef setlocale(name):", "body": "with LOCALE_LOCK:old_locale = locale.setlocale(locale.LC_ALL)try:yield locale.setlocale(locale.LC_ALL, name)finally:locale.setlocale(locale.LC_ALL, old_locale)", "docstring": "Context manager with threading lock for set locale on enter, and set it\nback to original state on exit.\n\n::\n\n >>> with setlocale(\"C\"):\n ... ...", "id": "f9228:m5"} {"signature": "@with_timeout@abc.abstractmethodasync def __anext__(self):", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nAbstract method", "id": "f9228:c2:m2"} {"signature": "@with_timeout(\"\")async def readline(self):", "body": "return await self.reader.readline()", "docstring": ":py:func:`asyncio.coroutine`\n\nProxy for :py:meth:`asyncio.StreamReader.readline`.", "id": "f9228:c3:m1"} {"signature": "@with_timeout(\"\")async def read(self, count=-):", "body": "return await self.reader.read(count)", "docstring": ":py:func:`asyncio.coroutine`\n\nProxy for :py:meth:`asyncio.StreamReader.read`.\n\n:param count: block size for read operation\n:type count: :py:class:`int`", "id": "f9228:c3:m2"} {"signature": "@with_timeout(\"\")async def write(self, data):", "body": "self.writer.write(data)await self.writer.drain()", "docstring": ":py:func:`asyncio.coroutine`\n\nCombination of :py:meth:`asyncio.StreamWriter.write` and\n:py:meth:`asyncio.StreamWriter.drain`.\n\n:param data: data to write\n:type data: :py:class:`bytes`", "id": "f9228:c3:m3"} {"signature": "def close(self):", "body": "self.writer.close()", "docstring": "Close connection.", "id": "f9228:c3:m4"} {"signature": "async def wait(self):", "body": "if self._limit is not None and self._limit > andself._start is not None:now = _now()end = self._start + self._sum / self._limitawait asyncio.sleep(max(, end - now))", "docstring": ":py:func:`asyncio.coroutine`\n\nWait until can do IO", "id": "f9228:c4:m1"} {"signature": "def append(self, data, start):", "body": "if self._limit is not None and self._limit > :if self._start is None:self._start = startif start - self._start > self.reset_rate:self._sum -= round((start - self._start) * self._limit)self._start = startself._sum += len(data)", "docstring": "Count `data` for throttle\n\n:param data: bytes of data for count\n:type data: :py:class:`bytes`\n\n:param start: start of read/write time from\n :py:meth:`asyncio.BaseEventLoop.time`\n:type start: :py:class:`float`", "id": "f9228:c4:m2"} {"signature": "@propertydef limit(self):", "body": "return self._limit", "docstring": "Throttle limit", "id": "f9228:c4:m3"} {"signature": "@limit.setterdef limit(self, value):", "body": "self._limit = valueself._start = Noneself._sum = ", "docstring": "Set throttle limit\n\n:param value: bytes per second\n:type value: :py:class:`int` or :py:class:`None`", "id": "f9228:c4:m4"} {"signature": "def clone(self):", "body": "return Throttle(limit=self._limit, reset_rate=self.reset_rate)", "docstring": "Clone throttle without memory", "id": "f9228:c4:m5"} {"signature": "def clone(self):", "body": "return StreamThrottle(read=self.read.clone(),write=self.write.clone())", "docstring": "Clone throttles without memory", "id": "f9228:c5:m0"} {"signature": "@classmethoddef from_limits(cls, read_speed_limit=None, write_speed_limit=None):", "body": "return cls(read=Throttle(limit=read_speed_limit),write=Throttle(limit=write_speed_limit))", "docstring": "Simple wrapper for creation :py:class:`aioftp.StreamThrottle`\n\n:param read_speed_limit: stream read speed limit in bytes or\n :py:class:`None` for unlimited\n:type read_speed_limit: :py:class:`int` or :py:class:`None`\n\n:param write_speed_limit: stream write speed limit in bytes or\n :py:class:`None` for unlimited\n:type write_speed_limit: :py:class:`int` or :py:class:`None`", "id": "f9228:c5:m1"} {"signature": "async def wait(self, name):", "body": "waiters = []for throttle in self.throttles.values():curr_throttle = getattr(throttle, name)if curr_throttle.limit:waiters.append(curr_throttle.wait())if waiters:await asyncio.wait(waiters)", "docstring": ":py:func:`asyncio.coroutine`\n\nWait for all throttles\n\n:param name: name of throttle to acquire (\"read\" or \"write\")\n:type name: :py:class:`str`", "id": "f9228:c6:m1"} {"signature": "def append(self, name, data, start):", "body": "for throttle in self.throttles.values():getattr(throttle, name).append(data, start)", "docstring": "Update timeout for all throttles\n\n:param name: name of throttle to append to (\"read\" or \"write\")\n:type name: :py:class:`str`\n\n:param data: bytes of data for count\n:type data: :py:class:`bytes`\n\n:param start: start of read/write time from\n :py:meth:`asyncio.BaseEventLoop.time`\n:type start: :py:class:`float`", "id": "f9228:c6:m2"} {"signature": "async def read(self, count=-):", "body": "await self.wait(\"\")start = _now()data = await super().read(count)self.append(\"\", data, start)return data", "docstring": ":py:func:`asyncio.coroutine`\n\n:py:meth:`aioftp.StreamIO.read` proxy", "id": "f9228:c6:m3"} {"signature": "async def readline(self):", "body": "await self.wait(\"\")start = _now()data = await super().readline()self.append(\"\", data, start)return data", "docstring": ":py:func:`asyncio.coroutine`\n\n:py:meth:`aioftp.StreamIO.readline` proxy", "id": "f9228:c6:m4"} {"signature": "async def write(self, data):", "body": "await self.wait(\"\")start = _now()await super().write(data)self.append(\"\", data, start)", "docstring": ":py:func:`asyncio.coroutine`\n\n:py:meth:`aioftp.StreamIO.write` proxy", "id": "f9228:c6:m5"} {"signature": "def iter_by_line(self):", "body": "return AsyncStreamIterator(self.readline)", "docstring": "Read/iterate stream by line.\n\n:rtype: :py:class:`aioftp.AsyncStreamIterator`\n\n::\n\n >>> async for line in stream.iter_by_line():\n ... ...", "id": "f9228:c6:m8"} {"signature": "def iter_by_block(self, count=DEFAULT_BLOCK_SIZE):", "body": "return AsyncStreamIterator(lambda: self.read(count))", "docstring": "Read/iterate stream by block.\n\n:rtype: :py:class:`aioftp.AsyncStreamIterator`\n\n::\n\n >>> async for block in stream.iter_by_block(block_size):\n ... ...", "id": "f9228:c6:m9"} {"signature": "def bytes2human(n, format=\"\"):", "body": "symbols = ('', '', '', '', '', '', '', '', '')prefix = {}for i, s in enumerate(symbols[:]):prefix[s] = << (i + ) * for symbol in reversed(symbols[:]):if n >= prefix[symbol]:value = float(n) / prefix[symbol]return format % locals()return format % dict(symbol=symbols[], value=n)", "docstring": ">>> bytes2human(10000)\n'9K'\n>>> bytes2human(100001221)\n'95M'", "id": "f9248:m1"} {"signature": "def human2bytes(s):", "body": "symbols = ('', '', '', '', '', '', '', '', '')letter = s[-:].strip().upper()num = s[:-]assert num.isdigit() and letter in symbols, snum = float(num)prefix = {symbols[]: }for i, s in enumerate(symbols[:]):prefix[s] = << (i + ) * return int(num * prefix[letter])", "docstring": ">>> human2bytes('1M')\n1048576\n>>> human2bytes('1G')\n1073741824", "id": "f9248:m2"} {"signature": "def register_memory():", "body": "def get_mem(proc):if os.name == '':mem = proc.memory_info_ex()counter = mem.rssif '' in mem._fields:counter -= mem.sharedreturn counterelse:return proc.get_memory_info().rssif SERVER_PROC is not None:mem = get_mem(SERVER_PROC)for child in SERVER_PROC.children():mem += get_mem(child)server_memory.append(bytes2human(mem))", "docstring": "Register an approximation of memory used by FTP server process\n and all of its children.", "id": "f9248:m3"} {"signature": "def timethis(what):", "body": "@contextlib.contextmanagerdef benchmark():timer = time.clock if sys.platform == \"\" else time.timestart = timer()yieldstop = timer()res = (stop - start)print_bench(what, res, \"\")if hasattr(what, \"\"):def timed(*args, **kwargs):with benchmark():return what(*args, **kwargs)return timedelse:return benchmark()", "docstring": "Utility function for making simple benchmarks (calculates time calls).\n It can be used either as a context manager or as a decorator.", "id": "f9248:m4"} {"signature": "def connect():", "body": "ftp_class = ftplib.FTP if not SSL else ftplib.FTP_TLSftp = ftp_class(timeout=TIMEOUT)ftp.connect(HOST, PORT)ftp.login(USER, PASSWORD)if SSL:ftp.prot_p() return ftp", "docstring": "Connect to FTP server, login and return an ftplib.FTP instance.", "id": "f9248:m5"} {"signature": "def retr(ftp):", "body": "ftp.voidcmd('')with contextlib.closing(ftp.transfercmd(\"\" + TESTFN)) as conn:recv_bytes = while True:data = conn.recv(BUFFER_LEN)if not data:breakrecv_bytes += len(data)ftp.voidresp()", "docstring": "Same as ftplib's retrbinary() but discard the received data.", "id": "f9248:m6"} {"signature": "def stor(ftp=None):", "body": "if ftp is None:ftp = connect()quit = Trueelse:quit = Falseftp.voidcmd('')with contextlib.closing(ftp.transfercmd(\"\" + TESTFN)) as conn:chunk = b'' * BUFFER_LENtotal_sent = while True:sent = conn.send(chunk)total_sent += sentif total_sent >= FILE_SIZE:breakftp.voidresp()if quit:ftp.quit()return ftp", "docstring": "Same as ftplib's storbinary() but just sends dummy data\n instead of reading it from a real file.", "id": "f9248:m7"} {"signature": "def bytes_per_second(ftp, retr=True):", "body": "tot_bytes = if retr:def request_file():ftp.voidcmd('')conn = ftp.transfercmd(\"\" + TESTFN)return connwith contextlib.closing(request_file()) as conn:register_memory()stop_at = time.time() + while stop_at > time.time():chunk = conn.recv(BUFFER_LEN)if not chunk:a = time.time()ftp.voidresp()conn.close()conn = request_file()stop_at += time.time() - atot_bytes += len(chunk)try:while chunk:chunk = conn.recv(BUFFER_LEN)ftp.voidresp()conn.close()except (ftplib.error_temp, ftplib.error_perm):passelse:ftp.voidcmd('')with contextlib.closing(ftp.transfercmd(\"\" + TESTFN)) as conn:register_memory()chunk = b'' * BUFFER_LENstop_at = time.time() + while stop_at > time.time():tot_bytes += conn.send(chunk)ftp.voidresp()return tot_bytes", "docstring": "Return the number of bytes transmitted in 1 second.", "id": "f9248:m8"} {"signature": "def heappop_max(heap):", "body": "lastelt = heap.pop() if heap:returnitem = heap[]heap[] = lastelt_siftup_max(heap, )return returnitemreturn lastelt", "docstring": "Maxheap version of a heappop.", "id": "f9253:m0"} {"signature": "def heapreplace_max(heap, item):", "body": "returnitem = heap[] heap[] = item_siftup_max(heap, )return returnitem", "docstring": "Maxheap version of a heappop followed by a heappush.", "id": "f9253:m1"} {"signature": "def heappush_max(heap, item):", "body": "heap.append(item)_siftdown_max(heap, , len(heap) - )", "docstring": "Push item onto heap, maintaining the heap invariant.", "id": "f9253:m2"} {"signature": "def heappushpop_max(heap, item):", "body": "if heap and heap[] > item:item, heap[] = heap[], item_siftup_max(heap, )return item", "docstring": "Fast version of a heappush followed by a heappop.", "id": "f9253:m3"} {"signature": "def select_rawls_with_extra_column(self, rawl_id):", "body": "cols = self.columns.copy()cols.append('')res = self.select(\"\"\"\"\"\", self.columns, rawl_id, columns=cols)if len(res) > :return res[]else:return None", "docstring": "Return the rawls from the rawl table but in a way to test more stuff", "id": "f9255:c1:m1"} {"signature": "def query_rawls_with_asterisk(self, rawl_id):", "body": "res = self.query(\"\"\"\"\"\", rawl_id, columns=self.columns)if len(res) > :return res[]else:return None", "docstring": "Test out self.query directly using columns", "id": "f9255:c1:m2"} {"signature": "def delete_rawl(self, rawl_id):", "body": "return self.query(\"\", rawl_id, commit=True)", "docstring": "Test a delete", "id": "f9255:c1:m3"} {"signature": "def delete_rawl_without_commit(self, rawl_id):", "body": "return self.query(\"\", rawl_id, commit=False)", "docstring": "Test a delete", "id": "f9255:c1:m4"} {"signature": "def _assemble_with_columns(self, sql_str, columns, *args, **kwargs):", "body": "qcols = []for col in columns:if '' in col:wlist = col.split('')qcols.append(sql.SQL('').join([sql.Identifier(x) for x in wlist]))else:qcols.append(sql.Identifier(col))query_string = sql.SQL(sql_str).format(sql.SQL('').join(qcols),*[sql.Literal(a) for a in args])return query_string", "docstring": "Format a select statement with specific columns \n\n:sql_str: An SQL string template\n:columns: The columns to be selected and put into {0}\n:*args: Arguments to use as query parameters.\n:returns: Psycopg2 compiled query", "id": "f9256:c3:m1"} {"signature": "def _assemble_select(self, sql_str, columns, *args, **kwargs):", "body": "warnings.warn(\"\", DeprecationWarning)return self._assemble_with_columns(sql_str, columns, *args, **kwargs)", "docstring": "Alias for _assemble_with_columns", "id": "f9256:c3:m2"} {"signature": "def _assemble_simple(self, sql_str, *args, **kwargs):", "body": "query_string = sql.SQL(sql_str).format(*[sql.Literal(a) for a in args])return query_string", "docstring": "Format a select statement with specific columns \n\n:sql_str: An SQL string template\n:*args: Arguments to use as query parameters.\n:returns: Psycopg2 compiled query", "id": "f9256:c3:m3"} {"signature": "def _execute(self, query, commit=False, working_columns=None):", "body": "log.debug(\"\")result = []if working_columns is None:working_columns = self.columnswith RawlConnection(self.dsn) as conn:query_id = random.randrange()curs = conn.cursor()try:log.debug(\"\" % (query_id, query.as_string(curs)))except:log.exception(\"\")curs.execute(query)log.debug(\"\")if commit == True:log.debug(\"\" % query_id)conn.commit()log.debug(\"\" % curs.rowcount)if curs.rowcount > :result_rows = curs.fetchall()for row in result_rows:i = row_dict = {}for col in working_columns:try:col = col.replace('', '')row_dict[col] = row[i]except IndexError: passi += log.debug(\"\" % row_dict)rr = RawlResult(working_columns, row_dict)result.append(rr)curs.close()return result", "docstring": "Execute a query with provided parameters \n\nParameters\n:query: SQL string with parameter placeholders\n:commit: If True, the query will commit\n:returns: List of rows", "id": "f9256:c3:m4"} {"signature": "def process_columns(self, columns):", "body": "if type(columns) == list:self.columns = columnselif type(columns) == str:self.columns = [c.strip() for c in columns.split()]elif type(columns) == IntEnum:self.columns = [str(c) for c in columns]else:raise RawlException(\"\")", "docstring": "Handle provided columns and if necessary, convert columns to a list for \ninternal strage.\n\n:columns: A sequence of columns for the table. Can be list, comma\n -delimited string, or IntEnum.", "id": "f9256:c3:m5"} {"signature": "def query(self, sql_string, *args, **kwargs):", "body": "commit = Nonecolumns = Noneif kwargs.get('') is not None:commit = kwargs.pop('')if kwargs.get('') is not None:columns = kwargs.pop('')query = self._assemble_simple(sql_string, *args, **kwargs)return self._execute(query, commit=commit, working_columns=columns)", "docstring": "Execute a DML query \n\n:sql_string: An SQL string template\n:*args: Arguments to be passed for query parameters.\n:commit: Whether or not to commit the transaction after the query\n:returns: Psycopg2 result", "id": "f9256:c3:m6"} {"signature": "def select(self, sql_string, cols, *args, **kwargs):", "body": "working_columns = Noneif kwargs.get('') is not None:working_columns = kwargs.pop('')query = self._assemble_select(sql_string, cols, *args, *kwargs)return self._execute(query, working_columns=working_columns)", "docstring": "Execute a SELECT statement \n\n:sql_string: An SQL string template\n:columns: A list of columns to be returned by the query\n:*args: Arguments to be passed for query parameters.\n:returns: Psycopg2 result", "id": "f9256:c3:m7"} {"signature": "def insert_dict(self, value_dict, commit=False):", "body": "for key in value_dict.keys():if key not in self.columns:raise ValueError(\"\" % key)insert_cols = []value_set = []for col in self.columns:if col in value_dict:insert_cols.append(col)value_set.append(value_dict[col])placeholders = ''.join([\"\" % x for x in range(, len(value_set) + )])query = self._assemble_with_columns('''''' + self.table + '''''' + placeholders + '''''' + self.pk + '''''', insert_cols, *value_set)result = self._execute(query, commit=commit)if len(result) > :if hasattr(result[], self.pk):return getattr(result[], self.pk)else:return result[]else:return None", "docstring": "Execute an INSERT statement using a python dict\n\n:value_dict: A dictionary representing all the columns(keys) and \n values that should be part of the INSERT statement\n:commit: Whether to automatically commit the transaction\n:returns: Psycopg2 result", "id": "f9256:c3:m8"} {"signature": "def get(self, pk):", "body": "if type(pk) == str:try:pk = int(pk)except ValueError: passreturn self.select(\"\" + self.table + \"\" + self.pk + \"\",self.columns, pk)", "docstring": "Retreive a single record from the table. Lots of reasons this might be\nbest implemented in the model\n\n:pk: The primary key ID for the record\n:returns: List of single result", "id": "f9256:c3:m9"} {"signature": "def all(self):", "body": "return self.select(\"\" + self.table + \"\", self.columns)", "docstring": "Retreive all single record from the table. Should be implemented but not\nrequired.\n:returns: List of results", "id": "f9256:c3:m10"} {"signature": "def retry(exceptions=(Exception,), interval=, max_retries=, success=None,timeout=-):", "body": "if not exceptions and success is None:raise TypeError('')exceptions = exceptions or (_DummyException,)_retries_error_msg = ('''')_timeout_error_msg = ''@decoratordef wrapper(func, *args, **kwargs):signal.signal(signal.SIGALRM, _timeout(_timeout_error_msg.format(timeout, func.__name__)))run_func = functools.partial(func, *args, **kwargs)logger = logging.getLogger(func.__module__)if max_retries < :iterator = itertools.count()else:iterator = range(max_retries)if timeout > :signal.alarm(timeout)for num, _ in enumerate(iterator, ):try:result = run_func()if success is None or success(result):signal.alarm()return resultexcept exceptions:logger.exception(''.format(func.__name__))if num == max_retries:raiselogger.warning(''.format(func.__name__, interval))time.sleep(interval)else:raise MaximumRetriesExceeded(_retries_error_msg.format(max_retries, interval, func.__name__))return wrapper", "docstring": "Decorator to retry a function 'max_retries' amount of times\n\n :param tuple exceptions: Exceptions to be caught for retries\n :param int interval: Interval between retries in seconds\n :param int max_retries: Maximum number of retries to have, if\n set to -1 the decorator will loop forever\n :param function success: Function to indicate success criteria\n :param int timeout: Timeout interval in seconds, if -1 will retry forever\n :raises MaximumRetriesExceeded: Maximum number of retries hit without\n reaching the success criteria\n :raises TypeError: Both exceptions and success were left None causing the\n decorator to have no valid exit criteria.\n\n Example:\n Use it to decorate a function!\n\n .. sourcecode:: python\n\n from retry import retry\n\n @retry(exceptions=(ArithmeticError,), success=lambda x: x > 0)\n def foo(bar):\n if bar < 0:\n raise ArithmeticError('testing this')\n return bar\n foo(5)\n # Should return 5\n foo(-1)\n # Should raise ArithmeticError\n foo(0)\n # Should raise MaximumRetriesExceeded", "id": "f9258:m1"} {"signature": "def validate_activatable_models():", "body": "for model in get_activatable_models():activatable_field = next((f for f in model._meta.fieldsif f.__class__ == models.BooleanField and f.name == model.ACTIVATABLE_FIELD_NAME), None)if activatable_field is None:raise ValidationError((''''.format(model)))if not model.ALLOW_CASCADE_DELETE:for field in model._meta.fields:if field.__class__ in (models.ForeignKey, models.OneToOneField):if field.remote_field.on_delete == models.CASCADE:raise ValidationError(('''''''').format(model))", "docstring": "Raises a ValidationError for any ActivatableModel that has ForeignKeys or OneToOneFields that will\ncause cascading deletions to occur. This function also raises a ValidationError if the activatable\nmodel has not defined a Boolean field with the field name defined by the ACTIVATABLE_FIELD_NAME variable\non the model.", "id": "f9268:m1"} {"signature": "def save(self, *args, **kwargs):", "body": "current_activable_value = getattr(self, self.ACTIVATABLE_FIELD_NAME)is_active_changed = self.id is None or self.__original_activatable_value != current_activable_valueself.__original_activatable_value = current_activable_valueret_val = super(BaseActivatableModel, self).save(*args, **kwargs)if is_active_changed:model_activations_changed.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value)if self.activatable_field_updated:model_activations_updated.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value)return ret_val", "docstring": "A custom save method that handles figuring out when something is activated or deactivated.", "id": "f9272:c2:m2"} {"signature": "def delete(self, force=False, **kwargs):", "body": "if force:return super(BaseActivatableModel, self).delete(**kwargs)else:setattr(self, self.ACTIVATABLE_FIELD_NAME, False)return self.save(update_fields=[self.ACTIVATABLE_FIELD_NAME])", "docstring": "It is impossible to delete an activatable model unless force is True. This function instead sets it to inactive.", "id": "f9272:c2:m3"} {"signature": "def configure_settings():", "body": "if not settings.configured:test_db = os.environ.get('', None)if test_db is None:db_config = {'': '','': '','': '','': '','': '','': {'': '',}}elif test_db == '':db_config = {'': '','': '','': '',}elif test_db == '':db_config = {'': '','': '',}else:raise RuntimeError(''.format(test_db))travis_ci = os.environ.get('', None)if travis_ci:db_config.update({'': '','': '','': '',})settings.configure(TEST_RUNNER='',NOSE_ARGS=['', '', ''],MIDDLEWARE_CLASSES={},DATABASES={'': db_config,},INSTALLED_APPS=('','','','','','',),ROOT_URLCONF='',DEBUG=False,)", "docstring": "Configures settings for manage.py and for run_tests.py.", "id": "f9273:m0"} {"signature": "def get_version():", "body": "VERSION_FILE = ''mo = re.search(r'', open(VERSION_FILE, '').read(), re.M)if mo:return mo.group()else:raise RuntimeError(''.format(VERSION_FILE))", "docstring": "Extracts the version number from the version.py file.", "id": "f9275:m0"} {"signature": "def assert_that(val, description=''):", "body": "global _soft_ctxif _soft_ctx:return AssertionBuilder(val, description, '')return AssertionBuilder(val, description)", "docstring": "Factory method for the assertion builder with value to be tested and optional description.", "id": "f9307:m1"} {"signature": "def assert_warn(val, description=''):", "body": "return AssertionBuilder(val, description, '')", "docstring": "Factory method for the assertion builder with value to be tested, optional description, and\n just warn on assertion failures instead of raisings exceptions.", "id": "f9307:m2"} {"signature": "def contents_of(f, encoding=''):", "body": "try:contents = f.read()except AttributeError:try:with open(f, '') as fp:contents = fp.read()except TypeError:raise ValueError('' % type(f).__name__)except OSError:if not isinstance(f, str_types):raise ValueError('' % type(f).__name__)raiseif sys.version_info[] == and type(contents) is bytes:return contents.decode(encoding, '')elif sys.version_info[] == and encoding == '':return contents.encode('', '')else:try:return contents.decode(encoding, '')except AttributeError:passreturn contents", "docstring": "Helper to read the contents of the given file or path into a string with the given encoding.\n Encoding defaults to 'utf-8', other useful encodings are 'ascii' and 'latin-1'.", "id": "f9307:m3"} {"signature": "def fail(msg=''):", "body": "raise AssertionError('' % msg if msg else '')", "docstring": "Force test failure with the given message.", "id": "f9307:m4"} {"signature": "def soft_fail(msg=''):", "body": "global _soft_ctxif _soft_ctx:global _soft_err_soft_err.append('' % msg if msg else '')returnfail(msg)", "docstring": "Adds error message to soft errors list if within soft assertions context.\n Either just force test failure with the given message.", "id": "f9307:m5"} {"signature": "def __init__(self, val, description='', kind=None, expected=None):", "body": "self.val = valself.description = descriptionself.kind = kindself.expected = expected", "docstring": "Construct the assertion builder.", "id": "f9307:c0:m0"} {"signature": "def described_as(self, description):", "body": "self.description = str(description)return self", "docstring": "Describes the assertion. On failure, the description is included in the error message.", "id": "f9307:c0:m1"} {"signature": "def is_equal_to(self, other, **kwargs):", "body": "if self._check_dict_like(self.val, check_values=False, return_as_bool=True) andself._check_dict_like(other, check_values=False, return_as_bool=True):if self._dict_not_equal(self.val, other, ignore=kwargs.get(''), include=kwargs.get('')):self._dict_err(self.val, other, ignore=kwargs.get(''), include=kwargs.get(''))else:if self.val != other:self._err('' % (self.val, other))return self", "docstring": "Asserts that val is equal to other.", "id": "f9307:c0:m2"} {"signature": "def is_not_equal_to(self, other):", "body": "if self.val == other:self._err('' % (self.val, other))return self", "docstring": "Asserts that val is not equal to other.", "id": "f9307:c0:m3"} {"signature": "def is_same_as(self, other):", "body": "if self.val is not other:self._err('' % (self.val, other))return self", "docstring": "Asserts that the val is identical to other, via 'is' compare.", "id": "f9307:c0:m4"} {"signature": "def is_not_same_as(self, other):", "body": "if self.val is other:self._err('' % (self.val, other))return self", "docstring": "Asserts that the val is not identical to other, via 'is' compare.", "id": "f9307:c0:m5"} {"signature": "def is_true(self):", "body": "if not self.val:self._err('')return self", "docstring": "Asserts that val is true.", "id": "f9307:c0:m6"} {"signature": "def is_false(self):", "body": "if self.val:self._err('')return self", "docstring": "Asserts that val is false.", "id": "f9307:c0:m7"} {"signature": "def is_none(self):", "body": "if self.val is not None:self._err('' % self.val)return self", "docstring": "Asserts that val is none.", "id": "f9307:c0:m8"} {"signature": "def is_not_none(self):", "body": "if self.val is None:self._err('')return self", "docstring": "Asserts that val is not none.", "id": "f9307:c0:m9"} {"signature": "def is_type_of(self, some_type):", "body": "if type(some_type) is not type andnot issubclass(type(some_type), type):raise TypeError('')if type(self.val) is not some_type:if hasattr(self.val, ''):t = self.val.__name__elif hasattr(self.val, ''):t = self.val.__class__.__name__else:t = ''self._err('' % (self.val, t, some_type.__name__))return self", "docstring": "Asserts that val is of the given type.", "id": "f9307:c0:m10"} {"signature": "def is_instance_of(self, some_class):", "body": "try:if not isinstance(self.val, some_class):if hasattr(self.val, ''):t = self.val.__name__elif hasattr(self.val, ''):t = self.val.__class__.__name__else:t = ''self._err('' % (self.val, t, some_class.__name__))except TypeError:raise TypeError('')return self", "docstring": "Asserts that val is an instance of the given class.", "id": "f9307:c0:m11"} {"signature": "def is_length(self, length):", "body": "if type(length) is not int:raise TypeError('')if length < :raise ValueError('')if len(self.val) != length:self._err('' % (self.val, length, len(self.val)))return self", "docstring": "Asserts that val is the given length.", "id": "f9307:c0:m12"} {"signature": "def contains(self, *items):", "body": "if len(items) == :raise ValueError('')elif len(items) == :if items[] not in self.val:if self._check_dict_like(self.val, return_as_bool=True):self._err('' % (self.val, items[]))else:self._err('' % (self.val, items[]))else:missing = []for i in items:if i not in self.val:missing.append(i)if missing:if self._check_dict_like(self.val, return_as_bool=True):self._err('' % (self.val, self._fmt_items(items), '' if len(missing) == else '', self._fmt_items(missing)))else:self._err('' % (self.val, self._fmt_items(items), self._fmt_items(missing)))return self", "docstring": "Asserts that val contains the given item or items.", "id": "f9307:c0:m13"} {"signature": "def does_not_contain(self, *items):", "body": "if len(items) == :raise ValueError('')elif len(items) == :if items[] in self.val:self._err('' % (self.val, items[]))else:found = []for i in items:if i in self.val:found.append(i)if found:self._err('' % (self.val, self._fmt_items(items), self._fmt_items(found)))return self", "docstring": "Asserts that val does not contain the given item or items.", "id": "f9307:c0:m14"} {"signature": "def contains_only(self, *items):", "body": "if len(items) == :raise ValueError('')else:extra = []for i in self.val:if i not in items:extra.append(i)if extra:self._err('' % (self.val, self._fmt_items(items), self._fmt_items(extra)))missing = []for i in items:if i not in self.val:missing.append(i)if missing:self._err('' % (self.val, self._fmt_items(items), self._fmt_items(missing)))return self", "docstring": "Asserts that val contains only the given item or items.", "id": "f9307:c0:m15"} {"signature": "def contains_sequence(self, *items):", "body": "if len(items) == :raise ValueError('')else:try:for i in xrange(len(self.val) - len(items) + ):for j in xrange(len(items)):if self.val[i+j] != items[j]:breakelse:return selfexcept TypeError:raise TypeError('')self._err('' % (self.val, self._fmt_items(items)))", "docstring": "Asserts that val contains the given sequence of items in order.", "id": "f9307:c0:m16"} {"signature": "def contains_duplicates(self):", "body": "try:if len(self.val) != len(set(self.val)):return selfexcept TypeError:raise TypeError('')self._err('' % self.val)", "docstring": "Asserts that val is iterable and contains duplicate items.", "id": "f9307:c0:m17"} {"signature": "def does_not_contain_duplicates(self):", "body": "try:if len(self.val) == len(set(self.val)):return selfexcept TypeError:raise TypeError('')self._err('' % self.val)", "docstring": "Asserts that val is iterable and does not contain any duplicate items.", "id": "f9307:c0:m18"} {"signature": "def is_empty(self):", "body": "if len(self.val) != :if isinstance(self.val, str_types):self._err('' % self.val)else:self._err('' % self.val)return self", "docstring": "Asserts that val is empty.", "id": "f9307:c0:m19"} {"signature": "def is_not_empty(self):", "body": "if len(self.val) == :if isinstance(self.val, str_types):self._err('')else:self._err('')return self", "docstring": "Asserts that val is not empty.", "id": "f9307:c0:m20"} {"signature": "def is_in(self, *items):", "body": "if len(items) == :raise ValueError('')else:for i in items:if self.val == i:return selfself._err('' % (self.val, self._fmt_items(items)))", "docstring": "Asserts that val is equal to one of the given items.", "id": "f9307:c0:m21"} {"signature": "def is_not_in(self, *items):", "body": "if len(items) == :raise ValueError('')else:for i in items:if self.val == i:self._err('' % (self.val, self._fmt_items(items)))return self", "docstring": "Asserts that val is not equal to one of the given items.", "id": "f9307:c0:m22"} {"signature": "def _validate_number(self):", "body": "if isinstance(self.val, numbers.Number) is False:raise TypeError('')", "docstring": "Raise TypeError if val is not numeric.", "id": "f9307:c0:m24"} {"signature": "def _validate_real(self):", "body": "if isinstance(self.val, numbers.Real) is False:raise TypeError('')", "docstring": "Raise TypeError if val is not real number.", "id": "f9307:c0:m25"} {"signature": "def is_zero(self):", "body": "self._validate_number()return self.is_equal_to()", "docstring": "Asserts that val is numeric and equal to zero.", "id": "f9307:c0:m26"} {"signature": "def is_not_zero(self):", "body": "self._validate_number()return self.is_not_equal_to()", "docstring": "Asserts that val is numeric and not equal to zero.", "id": "f9307:c0:m27"} {"signature": "def is_nan(self):", "body": "self._validate_number()self._validate_real()if not math.isnan(self.val):self._err('' % self.val)return self", "docstring": "Asserts that val is real number and NaN (not a number).", "id": "f9307:c0:m28"} {"signature": "def is_not_nan(self):", "body": "self._validate_number()self._validate_real()if math.isnan(self.val):self._err('')return self", "docstring": "Asserts that val is real number and not NaN (not a number).", "id": "f9307:c0:m29"} {"signature": "def is_inf(self):", "body": "self._validate_number()self._validate_real()if not math.isinf(self.val):self._err('' % self.val)return self", "docstring": "Asserts that val is real number and Inf (infinity).", "id": "f9307:c0:m30"} {"signature": "def is_not_inf(self):", "body": "self._validate_number()self._validate_real()if math.isinf(self.val):self._err('')return self", "docstring": "Asserts that val is real number and not Inf (infinity).", "id": "f9307:c0:m31"} {"signature": "def is_greater_than(self, other):", "body": "self._validate_compareable(other)if self.val <= other:if type(self.val) is datetime.datetime:self._err('' % (self.val.strftime(''), other.strftime('')))else:self._err('' % (self.val, other))return self", "docstring": "Asserts that val is numeric and is greater than other.", "id": "f9307:c0:m32"} {"signature": "def is_greater_than_or_equal_to(self, other):", "body": "self._validate_compareable(other)if self.val < other:if type(self.val) is datetime.datetime:self._err('' % (self.val.strftime(''), other.strftime('')))else:self._err('' % (self.val, other))return self", "docstring": "Asserts that val is numeric and is greater than or equal to other.", "id": "f9307:c0:m33"} {"signature": "def is_less_than(self, other):", "body": "self._validate_compareable(other)if self.val >= other:if type(self.val) is datetime.datetime:self._err('' % (self.val.strftime(''), other.strftime('')))else:self._err('' % (self.val, other))return self", "docstring": "Asserts that val is numeric and is less than other.", "id": "f9307:c0:m34"} {"signature": "def is_less_than_or_equal_to(self, other):", "body": "self._validate_compareable(other)if self.val > other:if type(self.val) is datetime.datetime:self._err('' % (self.val.strftime(''), other.strftime('')))else:self._err('' % (self.val, other))return self", "docstring": "Asserts that val is numeric and is less than or equal to other.", "id": "f9307:c0:m35"} {"signature": "def is_positive(self):", "body": "return self.is_greater_than()", "docstring": "Asserts that val is numeric and greater than zero.", "id": "f9307:c0:m36"} {"signature": "def is_negative(self):", "body": "return self.is_less_than()", "docstring": "Asserts that val is numeric and less than zero.", "id": "f9307:c0:m37"} {"signature": "def is_between(self, low, high):", "body": "val_type = type(self.val)self._validate_between_args(val_type, low, high)if self.val < low or self.val > high:if val_type is datetime.datetime:self._err('' % (self.val.strftime(''), low.strftime(''), high.strftime('')))else:self._err('' % (self.val, low, high))return self", "docstring": "Asserts that val is numeric and is between low and high.", "id": "f9307:c0:m38"} {"signature": "def is_not_between(self, low, high):", "body": "val_type = type(self.val)self._validate_between_args(val_type, low, high)if self.val >= low and self.val <= high:if val_type is datetime.datetime:self._err('' % (self.val.strftime(''), low.strftime(''), high.strftime('')))else:self._err('' % (self.val, low, high))return self", "docstring": "Asserts that val is numeric and is between low and high.", "id": "f9307:c0:m39"} {"signature": "def is_close_to(self, other, tolerance):", "body": "self._validate_close_to_args(self.val, other, tolerance)if self.val < (other-tolerance) or self.val > (other+tolerance):if type(self.val) is datetime.datetime:tolerance_seconds = tolerance.days * + tolerance.seconds + tolerance.microseconds / h, rem = divmod(tolerance_seconds, )m, s = divmod(rem, )self._err('' % (self.val.strftime(''), other.strftime(''), h, m, s))else:self._err('' % (self.val, other, tolerance))return self", "docstring": "Asserts that val is numeric and is close to other within tolerance.", "id": "f9307:c0:m40"} {"signature": "def is_not_close_to(self, other, tolerance):", "body": "self._validate_close_to_args(self.val, other, tolerance)if self.val >= (other-tolerance) and self.val <= (other+tolerance):if type(self.val) is datetime.datetime:tolerance_seconds = tolerance.days * + tolerance.seconds + tolerance.microseconds / h, rem = divmod(tolerance_seconds, )m, s = divmod(rem, )self._err('' % (self.val.strftime(''), other.strftime(''), h, m, s))else:self._err('' % (self.val, other, tolerance))return self", "docstring": "Asserts that val is numeric and is not close to other within tolerance.", "id": "f9307:c0:m41"} {"signature": "def is_equal_to_ignoring_case(self, other):", "body": "if not isinstance(self.val, str_types):raise TypeError('')if not isinstance(other, str_types):raise TypeError('')if self.val.lower() != other.lower():self._err('' % (self.val, other))return self", "docstring": "Asserts that val is case-insensitive equal to other.", "id": "f9307:c0:m42"} {"signature": "def contains_ignoring_case(self, *items):", "body": "if len(items) == :raise ValueError('')if isinstance(self.val, str_types):if len(items) == :if not isinstance(items[], str_types):raise TypeError('')if items[].lower() not in self.val.lower():self._err('' % (self.val, items[]))else:missing = []for i in items:if not isinstance(i, str_types):raise TypeError('')if i.lower() not in self.val.lower():missing.append(i)if missing:self._err('' % (self.val, self._fmt_items(items), self._fmt_items(missing)))elif isinstance(self.val, Iterable):missing = []for i in items:if not isinstance(i, str_types):raise TypeError('')found = Falsefor v in self.val:if not isinstance(v, str_types):raise TypeError('')if i.lower() == v.lower():found = Truebreakif not found:missing.append(i)if missing:self._err('' % (self.val, self._fmt_items(items), self._fmt_items(missing)))else:raise TypeError('')return self", "docstring": "Asserts that val is string and contains the given item or items.", "id": "f9307:c0:m43"} {"signature": "def starts_with(self, prefix):", "body": "if prefix is None:raise TypeError('')if isinstance(self.val, str_types):if not isinstance(prefix, str_types):raise TypeError('')if len(prefix) == :raise ValueError('')if not self.val.startswith(prefix):self._err('' % (self.val, prefix))elif isinstance(self.val, Iterable):if len(self.val) == :raise ValueError('')first = next(iter(self.val))if first != prefix:self._err('' % (self.val, prefix))else:raise TypeError('')return self", "docstring": "Asserts that val is string or iterable and starts with prefix.", "id": "f9307:c0:m44"} {"signature": "def ends_with(self, suffix):", "body": "if suffix is None:raise TypeError('')if isinstance(self.val, str_types):if not isinstance(suffix, str_types):raise TypeError('')if len(suffix) == :raise ValueError('')if not self.val.endswith(suffix):self._err('' % (self.val, suffix))elif isinstance(self.val, Iterable):if len(self.val) == :raise ValueError('')last = Nonefor last in self.val:passif last != suffix:self._err('' % (self.val, suffix))else:raise TypeError('')return self", "docstring": "Asserts that val is string or iterable and ends with suffix.", "id": "f9307:c0:m45"} {"signature": "def matches(self, pattern):", "body": "if not isinstance(self.val, str_types):raise TypeError('')if not isinstance(pattern, str_types):raise TypeError('')if len(pattern) == :raise ValueError('')if re.search(pattern, self.val) is None:self._err('' % (self.val, pattern))return self", "docstring": "Asserts that val is string and matches regex pattern.", "id": "f9307:c0:m46"} {"signature": "def does_not_match(self, pattern):", "body": "if not isinstance(self.val, str_types):raise TypeError('')if not isinstance(pattern, str_types):raise TypeError('')if len(pattern) == :raise ValueError('')if re.search(pattern, self.val) is not None:self._err('' % (self.val, pattern))return self", "docstring": "Asserts that val is string and does not match regex pattern.", "id": "f9307:c0:m47"} {"signature": "def is_alpha(self):", "body": "if not isinstance(self.val, str_types):raise TypeError('')if len(self.val) == :raise ValueError('')if not self.val.isalpha():self._err('' % self.val)return self", "docstring": "Asserts that val is non-empty string and all characters are alphabetic.", "id": "f9307:c0:m48"} {"signature": "def is_digit(self):", "body": "if not isinstance(self.val, str_types):raise TypeError('')if len(self.val) == :raise ValueError('')if not self.val.isdigit():self._err('' % self.val)return self", "docstring": "Asserts that val is non-empty string and all characters are digits.", "id": "f9307:c0:m49"} {"signature": "def is_lower(self):", "body": "if not isinstance(self.val, str_types):raise TypeError('')if len(self.val) == :raise ValueError('')if self.val != self.val.lower():self._err('' % self.val)return self", "docstring": "Asserts that val is non-empty string and all characters are lowercase.", "id": "f9307:c0:m50"} {"signature": "def is_upper(self):", "body": "if not isinstance(self.val, str_types):raise TypeError('')if len(self.val) == :raise ValueError('')if self.val != self.val.upper():self._err('' % self.val)return self", "docstring": "Asserts that val is non-empty string and all characters are uppercase.", "id": "f9307:c0:m51"} {"signature": "def is_unicode(self):", "body": "if type(self.val) is not unicode:self._err('' % (self.val, type(self.val).__name__))return self", "docstring": "Asserts that val is a unicode string.", "id": "f9307:c0:m52"} {"signature": "def is_iterable(self):", "body": "if not isinstance(self.val, Iterable):self._err('')return self", "docstring": "Asserts that val is iterable collection.", "id": "f9307:c0:m53"} {"signature": "def is_not_iterable(self):", "body": "if isinstance(self.val, Iterable):self._err('')return self", "docstring": "Asserts that val is not iterable collection.", "id": "f9307:c0:m54"} {"signature": "def is_subset_of(self, *supersets):", "body": "if not isinstance(self.val, Iterable):raise TypeError('')if len(supersets) == :raise ValueError('')missing = []if hasattr(self.val, '') and callable(getattr(self.val, '')) and hasattr(self.val, ''):superdict = {}for l,j in enumerate(supersets):self._check_dict_like(j, check_values=False, name='' % (l+))for k in j.keys():superdict.update({k: j[k]})for i in self.val.keys():if i not in superdict:missing.append({i: self.val[i]}) elif self.val[i] != superdict[i]:missing.append({i: self.val[i]}) if missing:self._err('' % (self.val, self._fmt_items(superdict), self._fmt_items(missing), '' if len(missing) == else ''))else:superset = set()for j in supersets:try:for k in j:superset.add(k)except Exception:superset.add(j)for i in self.val:if i not in superset:missing.append(i)if missing:self._err('' % (self.val, self._fmt_items(superset), self._fmt_items(missing), '' if len(missing) == else ''))return self", "docstring": "Asserts that val is iterable and a subset of the given superset or flattened superset if multiple supersets are given.", "id": "f9307:c0:m55"} {"signature": "def contains_key(self, *keys):", "body": "self._check_dict_like(self.val, check_values=False, check_getitem=False)return self.contains(*keys)", "docstring": "Asserts the val is a dict and contains the given key or keys. Alias for contains().", "id": "f9307:c0:m56"} {"signature": "def does_not_contain_key(self, *keys):", "body": "self._check_dict_like(self.val, check_values=False, check_getitem=False)return self.does_not_contain(*keys)", "docstring": "Asserts the val is a dict and does not contain the given key or keys. Alias for does_not_contain().", "id": "f9307:c0:m57"} {"signature": "def contains_value(self, *values):", "body": "self._check_dict_like(self.val, check_getitem=False)if len(values) == :raise ValueError('')missing = []for v in values:if v not in self.val.values():missing.append(v)if missing:self._err('' % (self.val, self._fmt_items(values), self._fmt_items(missing)))return self", "docstring": "Asserts that val is a dict and contains the given value or values.", "id": "f9307:c0:m58"} {"signature": "def does_not_contain_value(self, *values):", "body": "self._check_dict_like(self.val, check_getitem=False)if len(values) == :raise ValueError('')else:found = []for v in values:if v in self.val.values():found.append(v)if found:self._err('' % (self.val, self._fmt_items(values), self._fmt_items(found)))return self", "docstring": "Asserts that val is a dict and does not contain the given value or values.", "id": "f9307:c0:m59"} {"signature": "def contains_entry(self, *args, **kwargs):", "body": "self._check_dict_like(self.val, check_values=False)entries = list(args) + [{k:v} for k,v in kwargs.items()]if len(entries) == :raise ValueError('')missing = []for e in entries:if type(e) is not dict:raise TypeError('')if len(e) != :raise ValueError('')k = next(iter(e))if k not in self.val:missing.append(e) elif self.val[k] != e[k]:missing.append(e) if missing:self._err('' % (self.val, self._fmt_items(entries), self._fmt_items(missing)))return self", "docstring": "Asserts that val is a dict and contains the given entry or entries.", "id": "f9307:c0:m60"} {"signature": "def does_not_contain_entry(self, *args, **kwargs):", "body": "self._check_dict_like(self.val, check_values=False)entries = list(args) + [{k:v} for k,v in kwargs.items()]if len(entries) == :raise ValueError('')found = []for e in entries:if type(e) is not dict:raise TypeError('')if len(e) != :raise ValueError('')k = next(iter(e))if k in self.val and e[k] == self.val[k]:found.append(e)if found:self._err('' % (self.val, self._fmt_items(entries), self._fmt_items(found)))return self", "docstring": "Asserts that val is a dict and does not contain the given entry or entries.", "id": "f9307:c0:m61"} {"signature": "def is_before(self, other):", "body": "if type(self.val) is not datetime.datetime:raise TypeError('' % type(self.val).__name__)if type(other) is not datetime.datetime:raise TypeError('' % type(other).__name__)if self.val >= other:self._err('' % (self.val.strftime(''), other.strftime('')))return self", "docstring": "Asserts that val is a date and is before other date.", "id": "f9307:c0:m62"} {"signature": "def is_after(self, other):", "body": "if type(self.val) is not datetime.datetime:raise TypeError('' % type(self.val).__name__)if type(other) is not datetime.datetime:raise TypeError('' % type(other).__name__)if self.val <= other:self._err('' % (self.val.strftime(''), other.strftime('')))return self", "docstring": "Asserts that val is a date and is after other date.", "id": "f9307:c0:m63"} {"signature": "def exists(self):", "body": "if not isinstance(self.val, str_types):raise TypeError('')if not os.path.exists(self.val):self._err('' % self.val)return self", "docstring": "Asserts that val is a path and that it exists.", "id": "f9307:c0:m67"} {"signature": "def does_not_exist(self):", "body": "if not isinstance(self.val, str_types):raise TypeError('')if os.path.exists(self.val):self._err('' % self.val)return self", "docstring": "Asserts that val is a path and that it does not exist.", "id": "f9307:c0:m68"} {"signature": "def is_file(self):", "body": "self.exists()if not os.path.isfile(self.val):self._err('' % self.val)return self", "docstring": "Asserts that val is an existing path to a file.", "id": "f9307:c0:m69"} {"signature": "def is_directory(self):", "body": "self.exists()if not os.path.isdir(self.val):self._err('' % self.val)return self", "docstring": "Asserts that val is an existing path to a directory.", "id": "f9307:c0:m70"} {"signature": "def is_named(self, filename):", "body": "self.is_file()if not isinstance(filename, str_types):raise TypeError('')val_filename = os.path.basename(os.path.abspath(self.val))if val_filename != filename:self._err('' % (val_filename, filename))return self", "docstring": "Asserts that val is an existing path to a file and that file is named filename.", "id": "f9307:c0:m71"} {"signature": "def is_child_of(self, parent):", "body": "self.is_file()if not isinstance(parent, str_types):raise TypeError('')val_abspath = os.path.abspath(self.val)parent_abspath = os.path.abspath(parent)if not val_abspath.startswith(parent_abspath):self._err('' % (val_abspath, parent_abspath))return self", "docstring": "Asserts that val is an existing path to a file and that file is a child of parent.", "id": "f9307:c0:m72"} {"signature": "def extracting(self, *names, **kwargs):", "body": "if not isinstance(self.val, Iterable):raise TypeError('')if isinstance(self.val, str_types):raise TypeError('')if len(names) == :raise ValueError('')def _extract(x, name):if self._check_dict_like(x, check_values=False, return_as_bool=True):if name in x:return x[name]else:raise ValueError('' % (list(x.keys()), name))elif isinstance(x, Iterable):self._check_iterable(x, name='')return x[name]elif hasattr(x, name):attr = getattr(x, name)if callable(attr):try:return attr()except TypeError:raise ValueError('' % name)else:return attrelse:raise ValueError('' % name)def _filter(x):if '' in kwargs:if isinstance(kwargs[''], str_types):return bool(_extract(x, kwargs['']))elif self._check_dict_like(kwargs[''], check_values=False, return_as_bool=True):for k in kwargs['']:if isinstance(k, str_types):if _extract(x, k) != kwargs[''][k]:return Falsereturn Trueelif callable(kwargs['']):return kwargs[''](x)return Falsereturn Truedef _sort(x):if '' in kwargs:if isinstance(kwargs[''], str_types):return _extract(x, kwargs[''])elif isinstance(kwargs[''], Iterable):items = []for k in kwargs['']:if isinstance(k, str_types):items.append(_extract(x, k))return tuple(items)elif callable(kwargs['']):return kwargs[''](x)return extracted = []for i in sorted(self.val, key=lambda x: _sort(x)):if _filter(i):items = [_extract(i, name) for name in names]extracted.append(tuple(items) if len(items) > else items[])return AssertionBuilder(extracted, self.description, self.kind)", "docstring": "Asserts that val is collection, then extracts the named properties or named zero-arg methods into a list (or list of tuples if multiple names are given).", "id": "f9307:c0:m73"} {"signature": "def __getattr__(self, attr):", "body": "if not attr.startswith(''):raise AttributeError('' % attr)attr_name = attr[:]err_msg = Falseis_dict = isinstance(self.val, Iterable) and hasattr(self.val, '')if not hasattr(self.val, attr_name):if is_dict:if attr_name not in self.val:err_msg = '' % (attr_name, attr_name)else:err_msg = '' % (attr_name, attr_name)def _wrapper(*args, **kwargs):if err_msg:self._err(err_msg) else:if len(args) != :raise TypeError('' % (attr, len(args)))try:val_attr = getattr(self.val, attr_name)except AttributeError:val_attr = self.val[attr_name]if callable(val_attr):try:actual = val_attr()except TypeError:raise TypeError('' % attr_name)else:actual = val_attrexpected = args[]if actual != expected:self._err('' % (actual, expected, '' if is_dict else '', attr_name))return selfreturn _wrapper", "docstring": "Asserts that val has attribute attr and that attribute's value is equal to other via a dynamic assertion of the form: has_().", "id": "f9307:c0:m74"} {"signature": "def raises(self, ex):", "body": "if not callable(self.val):raise TypeError('')if not issubclass(ex, BaseException):raise TypeError('')return AssertionBuilder(self.val, self.description, self.kind, ex)", "docstring": "Asserts that val is callable and that when called raises the given error.", "id": "f9307:c0:m75"} {"signature": "def when_called_with(self, *some_args, **some_kwargs):", "body": "if not self.expected:raise TypeError('')try:self.val(*some_args, **some_kwargs)except BaseException as e:if issubclass(type(e), self.expected):return AssertionBuilder(str(e), self.description, self.kind)else:self._err('' % (self.val.__name__,self.expected.__name__,self._fmt_args_kwargs(*some_args, **some_kwargs),type(e).__name__))self._err('' % (self.val.__name__,self.expected.__name__,self._fmt_args_kwargs(*some_args, **some_kwargs)))", "docstring": "Asserts the val callable when invoked with the given args and kwargs raises the expected exception.", "id": "f9307:c0:m76"} {"signature": "def _err(self, msg):", "body": "out = '' % ('' % self.description if len(self.description) > else '', msg)if self.kind == '':print(out)return selfelif self.kind == '':global _soft_err_soft_err.append(out)return selfelse:raise AssertionError(out)", "docstring": "Helper to raise an AssertionError, and optionally prepend custom description.", "id": "f9307:c0:m77"} {"signature": "def _fmt_args_kwargs(self, *some_args, **some_kwargs):", "body": "if some_args:out_args = str(some_args).lstrip('').rstrip('')if some_kwargs:out_kwargs = ''.join([str(i).lstrip('').rstrip('').replace('','') for i in [(k,some_kwargs[k]) for k in sorted(some_kwargs.keys())]])if some_args and some_kwargs:return out_args + '' + out_kwargselif some_args:return out_argselif some_kwargs:return out_kwargselse:return ''", "docstring": "Helper to convert the given args and kwargs into a string.", "id": "f9307:c0:m79"} {"signature": "def __init__(self, pymata):", "body": "self.pymata = pymataself.last_pin_query_results = []self.capability_query_results = []self.analog_mapping_query_results = []self.total_pins_discovered = self.number_of_analog_pins_discovered = threading.Thread.__init__(self)self.daemon = Trueself.stop_event = threading.Event()", "docstring": "constructor for CommandHandler class\n\n:param pymata: A reference to the pymata instance.", "id": "f9310:c0:m0"} {"signature": "def auto_discover_board(self, verbose):", "body": "start_time = time.time()while len(self.analog_mapping_query_results) == :if time.time() - start_time > :return Falseself.send_sysex(self.ANALOG_MAPPING_QUERY)time.sleep()if verbose:print(\"\" % (time.time() - start_time))for pin in self.analog_mapping_query_results:self.total_pins_discovered += if pin != self.pymata.IGNORE:self.number_of_analog_pins_discovered += if verbose:print('' % self.total_pins_discovered)print('' % self.number_of_analog_pins_discovered)for pin in range(, self.total_pins_discovered):response_entry = [self.pymata.INPUT, , None]self.digital_response_table.append(response_entry)for pin in range(, self.number_of_analog_pins_discovered):response_entry = [self.pymata.INPUT, , None]self.analog_response_table.append(response_entry)for pin in range(, self.total_pins_discovered):digital_latch_table_entry = [, , , , None]self.digital_latch_table.append(digital_latch_table_entry)for pin in range(, self.number_of_analog_pins_discovered):analog_latch_table_entry = [, , , , , None]self.analog_latch_table.append(analog_latch_table_entry)return True", "docstring": "This method will allow up to 30 seconds for discovery (communicating with) an Arduino board\nand then will determine a pin configuration table for the board.\n:return: True if board is successfully discovered or False upon timeout", "id": "f9310:c0:m3"} {"signature": "def report_version(self, data):", "body": "self.firmata_version.append(data[]) self.firmata_version.append(data[])", "docstring": "This method processes the report version message, sent asynchronously by Firmata when it starts up\nor after refresh_report_version() is called\n\nUse the api method api_get_version to retrieve this information\n\n:param data: Message data from Firmata\n\n:return: No return value.", "id": "f9310:c0:m4"} {"signature": "def set_analog_latch(self, pin, threshold_type, threshold_value, cb):", "body": "with self.pymata.data_lock:self.analog_latch_table[pin] = [self.LATCH_ARMED, threshold_type, threshold_value, , , cb]", "docstring": "This method \"arms\" a pin to allow data latching for the pin.\n\n:param pin: Analog pin number (value following an 'A' designator, i.e. A5 = 5\n\n:param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT | ANALOG_LATCH_GTE | ANALOG_LATCH_LTE\n\n:param threshold_value: numerical value\n\n:param cb: User provided callback function", "id": "f9310:c0:m5"} {"signature": "def set_digital_latch(self, pin, threshold_type, cb):", "body": "with self.pymata.data_lock:self.digital_latch_table[pin] = [self.LATCH_ARMED, threshold_type, , , cb]", "docstring": "This method \"arms\" a pin to allow data latching for the pin.\n\n:param pin: digital pin number\n\n:param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW\n\n:param cb: User provided callback function", "id": "f9310:c0:m6"} {"signature": "def get_analog_latch_data(self, pin):", "body": "with self.pymata.data_lock:pin_data = self.analog_latch_table[pin]current_latch_data = [pin,pin_data[self.LATCH_STATE],pin_data[self.ANALOG_LATCHED_DATA],pin_data[self.ANALOG_TIME_STAMP],pin_data[self.ANALOG_LATCH_CALLBACK]]if pin_data[self.LATCH_STATE] == self.LATCH_LATCHED:self.analog_latch_table[pin] = [, , , , , None]return current_latch_data", "docstring": "This method reads the analog latch table for the specified pin and returns a list that contains:\n[latch_state, latched_data, and time_stamp].\nIf the latch state is latched, the entry in the table is cleared\n\n:param pin: pin number\n\n:return: [latch_state, latched_data, and time_stamp]", "id": "f9310:c0:m7"} {"signature": "def get_digital_latch_data(self, pin):", "body": "with self.pymata.data_lock:pin_data = self.digital_latch_table[pin]current_latch_data = [pin,pin_data[self.LATCH_STATE],pin_data[self.DIGITAL_LATCHED_DATA],pin_data[self.DIGITAL_TIME_STAMP],pin_data[self.DIGITAL_LATCH_CALLBACK]]if pin_data[self.LATCH_STATE] == self.LATCH_LATCHED:self.digital_latch_table[pin] = [, , , , None]return current_latch_data", "docstring": "This method reads the digital latch table for the specified pin and returns a list that contains:\n[latch_state, latched_data, and time_stamp].\nIf the latch state is latched, the entry in the table is cleared\n\n:param pin: pin number\n\n:return: [latch_state, latched_data, and time_stamp]", "id": "f9310:c0:m8"} {"signature": "def report_firmware(self, data):", "body": "self.firmata_firmware.append(data[]) self.firmata_firmware.append(data[]) name_data = data[:]file_name = []for i in name_data[::]:file_name.append(chr(i))self.firmata_firmware.append(\"\".join(file_name))", "docstring": "This method processes the report firmware message, sent asynchronously by Firmata when it starts up\nor after refresh_report_firmware() is called\n\nUse the api method api_get_firmware_version to retrieve this information\n\n:param data: Message data from Firmata\n\n:return: No return value.", "id": "f9310:c0:m9"} {"signature": "def analog_message(self, data):", "body": "with self.pymata.data_lock:previous_value =self.analog_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]self.analog_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]= (data[self.MSB] << ) + data[self.LSB]pin = data[]pin_response_data_data = self.analog_response_table[pin]value = pin_response_data_data[self.RESPONSE_TABLE_PIN_DATA_VALUE]callback = self.analog_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_CALLBACK]if callback is not None:if value != previous_value:callback([self.pymata.ANALOG, pin, value])latching_entry = self.analog_latch_table[pin]if latching_entry[self.LATCH_STATE] == self.LATCH_ARMED:if latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_GT:if value > latching_entry[self.ANALOG_LATCH_DATA_TARGET]:if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:self.analog_latch_table[pin] = [, , , , , None]latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,pin, value, time.time()])else:updated_latch_entry = latching_entryupdated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHEDupdated_latch_entry[self.ANALOG_LATCHED_DATA] = valueupdated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()self.analog_latch_table[pin] = updated_latch_entryelse:pass elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_GTE:if value >= latching_entry[self.ANALOG_LATCH_DATA_TARGET]:if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:self.analog_latch_table[pin] = [, , , , , None]latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,pin, value, time.time()])else:updated_latch_entry = latching_entryupdated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHEDupdated_latch_entry[self.ANALOG_LATCHED_DATA] = valueupdated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()self.analog_latch_table[pin] = updated_latch_entryelse:pass elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_LT:if value < latching_entry[self.ANALOG_LATCH_DATA_TARGET]:if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,pin, value, time.time()])self.analog_latch_table[pin] = [, , , , , None]else:updated_latch_entry = latching_entryupdated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHEDupdated_latch_entry[self.ANALOG_LATCHED_DATA] = valueupdated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()self.analog_latch_table[pin] = updated_latch_entryelse:pass elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.ANALOG_LATCH_LTE:if value <= latching_entry[self.ANALOG_LATCH_DATA_TARGET]:if latching_entry[self.ANALOG_LATCH_CALLBACK] is not None:latching_entry[self.ANALOG_LATCH_CALLBACK]([self.pymata.ANALOG | self.pymata.LATCH_MODE,pin, value, time.time()])self.analog_latch_table[pin] = [, , , , , None]else:updated_latch_entry = latching_entryupdated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHEDupdated_latch_entry[self.ANALOG_LATCHED_DATA] = valueupdated_latch_entry[self.ANALOG_TIME_STAMP] = time.time()self.analog_latch_table[pin] = updated_latch_entryelse:pass else:pass", "docstring": "This method handles the incoming analog data message.\nIt stores the data value for the pin in the analog response table.\nIf a callback function was associated with this pin, the callback function is invoked.\nThis method also checks to see if latching was requested for the pin. If the latch criteria was met,\nthe latching table is updated. If a latching callback function was provided by the user, a latching\nnotification callback message is sent to the user in place of updating the latching table.\n\n:param data: Message data from Firmata\n\n:return: No return value.", "id": "f9310:c0:m10"} {"signature": "def digital_message(self, data):", "body": "port = data[]port_data = (data[self.MSB] << ) + data[self.LSB]pin = port * for pin in range(pin, min(pin + , self.total_pins_discovered)):with self.pymata.data_lock:prev_data = self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE] = port_data & if prev_data != port_data & :callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]if callback:callback([self.pymata.DIGITAL, pin,self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]])latching_entry = self.digital_latch_table[pin]if latching_entry[self.LATCH_STATE] == self.LATCH_ARMED:if latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.DIGITAL_LATCH_LOW:if (port_data & ) == :if latching_entry[self.DIGITAL_LATCH_CALLBACK] is not None:self.digital_latch_table[pin] = [, , , , None]latching_entry[self.DIGITAL_LATCH_CALLBACK]([self.pymata.OUTPUT | self.pymata.LATCH_MODE,pin, , time.time()])else:updated_latch_entry = latching_entryupdated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHEDupdated_latch_entry[self.DIGITAL_LATCHED_DATA] = self.DIGITAL_LATCH_LOWupdated_latch_entry[self.DIGITAL_TIME_STAMP] = time.time()else:passelif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.DIGITAL_LATCH_HIGH:if port_data & :if latching_entry[self.DIGITAL_LATCH_CALLBACK] is not None:self.digital_latch_table[pin] = [, , , , None]latching_entry[self.DIGITAL_LATCH_CALLBACK]([self.pymata.OUTPUT | self.pymata.LATCH_MODE,pin, , time.time()])else:updated_latch_entry = latching_entryupdated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHEDupdated_latch_entry[self.DIGITAL_LATCHED_DATA] = self.DIGITAL_LATCH_HIGHupdated_latch_entry[self.DIGITAL_TIME_STAMP] = time.time()else:passelse:passport_data >>= ", "docstring": "This method handles the incoming digital message.\nIt stores the data values in the digital response table.\nData is stored for all 8 bits of a digital port\n\n:param data: Message data from Firmata\n\n:return: No return value.", "id": "f9310:c0:m11"} {"signature": "def encoder_data(self, data):", "body": "prev_val = self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]val = int((data[self.MSB] << ) + data[self.LSB])if val > :val -= pin = data[]with self.pymata.data_lock:self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = valif prev_val != val:callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]if callback is not None:callback([self.pymata.ENCODER, pin,self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]])", "docstring": "This method handles the incoming encoder data message and stores\nthe data in the digital response table.\n\n:param data: Message data from Firmata\n\n:return: No return value.", "id": "f9310:c0:m12"} {"signature": "def sonar_data(self, data):", "body": "val = int((data[self.MSB] << ) + data[self.LSB])pin_number = data[]with self.pymata.data_lock:sonar_pin_entry = self.active_sonar_map[pin_number]self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = valif sonar_pin_entry[] is not None:if sonar_pin_entry[] != val:self.active_sonar_map[pin_number][]([self.pymata.SONAR, pin_number, val])sonar_pin_entry[] = valself.active_sonar_map[pin_number] = sonar_pin_entry", "docstring": "This method handles the incoming sonar data message and stores\nthe data in the response table.\n\n:param data: Message data from Firmata\n\n:return: No return value.", "id": "f9310:c0:m13"} {"signature": "def get_analog_response_table(self):", "body": "with self.pymata.data_lock:data = self.analog_response_tablereturn data", "docstring": "This method returns the entire analog response table to the caller\n:return: The analog response table.", "id": "f9310:c0:m14"} {"signature": "def get_digital_response_table(self):", "body": "with self.pymata.data_lock:data = self.digital_response_tablereturn data", "docstring": "This method returns the entire digital response table to the caller\n:return: The digital response table.", "id": "f9310:c0:m15"} {"signature": "def send_sysex(self, sysex_command, sysex_data=None):", "body": "if not sysex_data:sysex_data = []sysex_message = chr(self.START_SYSEX)sysex_message += chr(sysex_command)if len(sysex_data):for d in sysex_data:sysex_message += chr(d)sysex_message += chr(self.END_SYSEX)for data in sysex_message:self.pymata.transport.write(data)", "docstring": "This method will send a Sysex command to Firmata with any accompanying data\n\n:param sysex_command: sysex command\n\n:param sysex_data: data for command\n\n:return : No return value.", "id": "f9310:c0:m16"} {"signature": "def send_command(self, command):", "body": "send_message = \"\"for i in command:send_message += chr(i)for data in send_message:self.pymata.transport.write(data)", "docstring": "This method is used to transmit a non-sysex command.\n\n:param command: Command to send to firmata includes command + data formatted by caller\n\n:return : No return value.", "id": "f9310:c0:m17"} {"signature": "def system_reset(self):", "body": "data = chr(self.SYSTEM_RESET)self.pymata.transport.write(data)with self.pymata.data_lock:for _ in range(len(self.digital_response_table)):self.digital_response_table.pop()for _ in range(len(self.analog_response_table)):self.analog_response_table.pop()for pin in range(, self.total_pins_discovered):response_entry = [self.pymata.INPUT, , None]self.digital_response_table.append(response_entry)for pin in range(, self.number_of_analog_pins_discovered):response_entry = [self.pymata.INPUT, , None]self.analog_response_table.append(response_entry)", "docstring": "Send the reset command to the Arduino.\nIt resets the response tables to their initial values\n\n:return: No return value", "id": "f9310:c0:m18"} {"signature": "def _string_data(self, data):", "body": "print(\"\")string_to_print = []for i in data[::]:string_to_print.append(chr(i))print(\"\".join(string_to_print))", "docstring": "This method handles the incoming string data message from Firmata.\nThe string is printed to the console\n\n:param data: Message data from Firmata\n\n:return: No return value.s", "id": "f9310:c0:m19"} {"signature": "def i2c_reply(self, data):", "body": "reply_data = []address = (data[] & ) + (data[] << )register = data[] & + data[] << reply_data.append(register)for i in range(, len(data), ):data_item = (data[i] & ) + (data[i + ] << )reply_data.append(data_item)if address in self.i2c_map:i2c_data = self.i2c_map.get(address, None)i2c_data[] = reply_dataself.i2c_map[address] = i2c_dataif i2c_data[] is not None:i2c_data[]([self.pymata.I2C, address, reply_data])", "docstring": "This method receives replies to i2c_read requests. It stores the data for each i2c device\naddress in a dictionary called i2c_map. The data is retrieved via a call to i2c_get_read_data()\nin pymata.py\nIt a callback was specified in pymata.i2c_read, the raw data is sent through the callback\n\n:param data: raw data returned from i2c device", "id": "f9310:c0:m20"} {"signature": "def capability_response(self, data):", "body": "self.capability_query_results = data", "docstring": "This method handles a capability response message and stores the results to be retrieved\nvia get_capability_query_results() in pymata.py\n\n:param data: raw capability data", "id": "f9310:c0:m21"} {"signature": "def pin_state_response(self, data):", "body": "self.last_pin_query_results = data", "docstring": "This method handles a pin state response message and stores the results to be retrieved\nvia get_pin_state_query_results() in pymata.py\n\n:param data: raw pin state data", "id": "f9310:c0:m22"} {"signature": "def analog_mapping_response(self, data):", "body": "self.analog_mapping_query_results = data", "docstring": "This method handles an analog mapping query response message and stores the results to be retrieved\nvia get_analog_mapping_request_results() in pymata.py\n\n:param data: raw analog mapping data", "id": "f9310:c0:m23"} {"signature": "def stepper_version_response(self, data):", "body": "self.stepper_library_version = (data[] & ) + (data[] << )", "docstring": "This method handles a stepper library version message sent from the Arduino", "id": "f9310:c0:m24"} {"signature": "def run(self):", "body": "self.command_dispatch.update({self.REPORT_VERSION: [self.report_version, ]})self.command_dispatch.update({self.REPORT_FIRMWARE: [self.report_firmware, ]})self.command_dispatch.update({self.ANALOG_MESSAGE: [self.analog_message, ]})self.command_dispatch.update({self.DIGITAL_MESSAGE: [self.digital_message, ]})self.command_dispatch.update({self.ENCODER_DATA: [self.encoder_data, ]})self.command_dispatch.update({self.SONAR_DATA: [self.sonar_data, ]})self.command_dispatch.update({self.STRING_DATA: [self._string_data, ]})self.command_dispatch.update({self.I2C_REPLY: [self.i2c_reply, ]})self.command_dispatch.update({self.CAPABILITY_RESPONSE: [self.capability_response, ]})self.command_dispatch.update({self.PIN_STATE_RESPONSE: [self.pin_state_response, ]})self.command_dispatch.update({self.ANALOG_MAPPING_RESPONSE: [self.analog_mapping_response, ]})self.command_dispatch.update({self.STEPPER_DATA: [self.stepper_version_response, ]})while not self.is_stopped():if len(self.pymata.command_deque):data = self.pymata.command_deque.popleft()command_data = []if data == self.START_SYSEX:while len(self.pymata.command_deque) == :passsysex_command = self.pymata.command_deque.popleft()dispatch_entry = self.command_dispatch.get(sysex_command)method = dispatch_entry[]end_of_sysex = Falsewhile not end_of_sysex:while len(self.pymata.command_deque) == :passdata = self.pymata.command_deque.popleft()if data != self.END_SYSEX:command_data.append(data)else:end_of_sysex = Truemethod(command_data)continueelif <= data <= :if <= data <= :port = data & command_data.append(port)data = elif <= data <= :pin = data & command_data.append(pin)data = else:passdispatch_entry = self.command_dispatch.get(data)method = dispatch_entry[]num_args = dispatch_entry[]for i in range(num_args):while len(self.pymata.command_deque) == :passdata = self.pymata.command_deque.popleft()command_data.append(data)method(command_data)continueelse:time.sleep()", "docstring": "This method starts the thread that continuously runs to receive and interpret\nmessages coming from Firmata. This must be the last method in this file\nIt also checks the deque for messages to be sent to Firmata.", "id": "f9310:c0:m25"} {"signature": "def __init__(self, port_id='', bluetooth=True, verbose=True, baud_rate=):", "body": "self.baud_rate = baud_ratetry:self.verbose = verboseif self.verbose:print(\"\" % sys.version)print('')self.transport = PyMataSerial(port_id, self.command_deque, self.baud_rate)if bluetooth:time.sleep()self.transport.open(self.verbose)if bluetooth:time.sleep()else:time.sleep()self.transport.start()self._command_handler = PyMataCommandHandler(self)self._command_handler.system_reset()self.LATCH_IGNORE = self._command_handler.LATCH_IGNOREself.LATCH_ARMED = self._command_handler.LATCH_ARMEDself.LATCH_LATCHED = self._command_handler.LATCH_LATCHEDself.DIGITAL_LATCH_HIGH = self._command_handler.DIGITAL_LATCH_HIGHself.DIGITAL_LATCH_LOW = self._command_handler.DIGITAL_LATCH_LOWself.ANALOG_LATCH_GT = self._command_handler.ANALOG_LATCH_GTself.ANALOG_LATCH_LT = self._command_handler.ANALOG_LATCH_LTself.ANALOG_LATCH_GTE = self._command_handler.ANALOG_LATCH_GTEself.ANALOG_LATCH_LTE = self._command_handler.ANALOG_LATCH_LTEself.LATCH_PIN = self.LATCH_STATE = self.LATCHED_DATA = self.LATCHED_TIME_STAMP = self._command_handler.start()if self.verbose:print('')if not self._command_handler.auto_discover_board(self.verbose):if self.verbose:print(\"\")self._command_handler.stop()self.transport.stop()self._command_handler.join()self.transport.join()time.sleep()except KeyboardInterrupt:if self.verbose:print(\"\")sys.exit()", "docstring": "The \"constructor\" instantiates the entire interface. It starts the operational threads for the serial\ninterface as well as for the command handler.\n\n:param port_id: Communications port specifier (COM3, /dev/ttyACM0, etc)\n\n:param bluetooth: Sets start up delays for bluetooth connectivity. Set to False for faster start up.\n\n:param verbose: If set to False, the status print statements are suppressed.\n\n:param baud_rate: Set serial baud rate. Must match that of Firmata sketch on Arduino", "id": "f9311:c0:m0"} {"signature": "def analog_mapping_query(self):", "body": "self._command_handler.send_sysex(self._command_handler.ANALOG_MAPPING_QUERY, None)", "docstring": "Send an analog mapping query message via sysex. Client retrieves the results with a\ncall to get_analog_mapping_request_results()", "id": "f9311:c0:m1"} {"signature": "def analog_read(self, pin):", "body": "with self.data_lock:data = self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_PIN_DATA_VALUE]return data", "docstring": "Retrieve the last analog data value received for the specified pin.\n\n:param pin: Selected pin\n\n:return: The last value entered into the analog response table.", "id": "f9311:c0:m2"} {"signature": "def analog_write(self, pin, value):", "body": "if self._command_handler.ANALOG_MESSAGE + pin < :command = [self._command_handler.ANALOG_MESSAGE + pin, value & , (value >> ) & ]self._command_handler.send_command(command)else:self.extended_analog(pin, value)", "docstring": "Set the specified pin to the specified value.\n\n:param pin: Pin number\n\n:param value: Pin value\n\n:return: No return value", "id": "f9311:c0:m3"} {"signature": "def capability_query(self):", "body": "self._command_handler.send_sysex(self._command_handler.CAPABILITY_QUERY, None)", "docstring": "Send a Firmata capability query message via sysex. Client retrieves the results with a\ncall to get_capability_query_results()\nThe Arduino can be rather slow in responding to this command. For \nthe Mega 2560 R3 it has taken up to 25 seconds for a response.", "id": "f9311:c0:m4"} {"signature": "def close(self):", "body": "self._command_handler.system_reset()self._command_handler.stop()self.transport.stop()self.transport.close()if self.verbose:print(\"\")sys.exit()", "docstring": "This method will close the transport (serial port) and exit\n:return: No return value, but sys.exit(0) is called.", "id": "f9311:c0:m5"} {"signature": "def digital_read(self, pin):", "body": "with self.data_lock:data =self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_PIN_DATA_VALUE]return data", "docstring": "Retrieve the last digital data value received for the specified pin.\nNOTE: This command will return values for digital, pwm, etc, pin types\n\n:param pin: Selected pin\n\n:return: The last value entered into the digital response table.", "id": "f9311:c0:m6"} {"signature": "def digital_write(self, pin, value):", "body": "port = pin // calculated_command = self._command_handler.DIGITAL_MESSAGE + portmask = << (pin % )if value == :self.digital_output_port_pins[port] |= maskelse:self.digital_output_port_pins[port] &= ~maskcommand = (calculated_command, self.digital_output_port_pins[port] & ,(self.digital_output_port_pins[port] >> ) & )self._command_handler.send_command(command)", "docstring": "Set the specified pin to the specified value.\n\n:param pin: pin number\n\n:param value: pin value\n\n:return: No return value", "id": "f9311:c0:m7"} {"signature": "def disable_analog_reporting(self, pin):", "body": "command = [self._command_handler.REPORT_ANALOG + pin, self.REPORTING_DISABLE]self._command_handler.send_command(command)", "docstring": "Disables analog reporting for a single analog pin.\n\n:param pin: Analog pin number. For example for A0, the number is 0.\n\n:return: No return value", "id": "f9311:c0:m8"} {"signature": "def disable_digital_reporting(self, pin):", "body": "port = pin // command = [self._command_handler.REPORT_DIGITAL + port, self.REPORTING_DISABLE]self._command_handler.send_command(command)", "docstring": "Disables digital reporting. By turning reporting off for this pin, reporting\nis disabled for all 8 bits in the \"port\" -\n\n:param pin: Pin and all pins for this port\n\n:return: No return value", "id": "f9311:c0:m9"} {"signature": "def enable_analog_reporting(self, pin):", "body": "command = [self._command_handler.REPORT_ANALOG + pin, self.REPORTING_ENABLE]self._command_handler.send_command(command)", "docstring": "Enables analog reporting. By turning reporting on for a single pin.\n\n:param pin: Analog pin number. For example for A0, the number is 0.\n\n:return: No return value", "id": "f9311:c0:m10"} {"signature": "def enable_digital_reporting(self, pin):", "body": "port = pin // command = [self._command_handler.REPORT_DIGITAL + port, self.REPORTING_ENABLE]self._command_handler.send_command(command)", "docstring": "Enables digital reporting. By turning reporting on for all 8 bits in the \"port\" -\nthis is part of Firmata's protocol specification.\n\n:param pin: Pin and all pins for this port\n\n:return: No return value", "id": "f9311:c0:m11"} {"signature": "def encoder_config(self, pin_a, pin_b, cb=None):", "body": "data = [pin_a, pin_b]self._command_handler.digital_response_table[pin_a][self._command_handler.RESPONSE_TABLE_MODE]= self.ENCODERself._command_handler.digital_response_table[pin_a][self._command_handler.RESPONSE_TABLE_CALLBACK] = cbself.enable_digital_reporting(pin_a)self._command_handler.digital_response_table[pin_b][self._command_handler.RESPONSE_TABLE_MODE]= self.ENCODERself._command_handler.digital_response_table[pin_b][self._command_handler.RESPONSE_TABLE_CALLBACK] = cbself.enable_digital_reporting(pin_b)self._command_handler.send_sysex(self._command_handler.ENCODER_CONFIG, data)", "docstring": "This command enables the rotary encoder (2 pin + ground) and will\nenable encoder reporting.\n\nNOTE: This command is not currently part of standard arduino firmata, but is provided for legacy\nsupport of CodeShield on an Arduino UNO.\n\nEncoder data is retrieved by performing a digital_read from pin a (encoder pin 1)\n\n:param pin_a: Encoder pin 1.\n\n:param pin_b: Encoder pin 2.\n\n:param cb: callback function to report encoder changes\n\n:return: No return value", "id": "f9311:c0:m12"} {"signature": "def extended_analog(self, pin, data):", "body": "analog_data = [pin, data & , (data >> ) & , (data >> ) & ]self._command_handler.send_sysex(self._command_handler.EXTENDED_ANALOG, analog_data)", "docstring": "This method will send an extended data analog output command to the selected pin\n\n:param pin: 0 - 127\n\n:param data: 0 - 0xfffff", "id": "f9311:c0:m13"} {"signature": "def get_analog_latch_data(self, pin):", "body": "return self._command_handler.get_analog_latch_data(pin)", "docstring": "A list is returned containing the latch state for the pin, the latched value, and the time stamp\n[pin_num, latch_state, latched_value, time_stamp]\nIf the the latch state is LATCH_LATCHED, the table is reset (data and timestamp set to zero)\n\n:param pin: Pin number.\n\n:return: [pin, latch_state, latch_data_value, time_stamp]", "id": "f9311:c0:m14"} {"signature": "def get_analog_mapping_request_results(self):", "body": "return self._command_handler.analog_mapping_query_results", "docstring": "Call this method after calling analog_mapping_query() to retrieve its results\n:return: raw data returned by firmata", "id": "f9311:c0:m15"} {"signature": "def get_analog_response_table(self):", "body": "return self._command_handler.get_analog_response_table()", "docstring": "This method returns a list of lists representing the current pin mode and\nassociated data values for all analog pins.\nAll configured pin types, both input and output will be listed. Output pin data will contain zero.\n:return: The last update of the digital response table", "id": "f9311:c0:m16"} {"signature": "def get_capability_query_results(self):", "body": "return self._command_handler.capability_query_results", "docstring": "Retrieve the data returned by a previous call to capability_query()\n:return: Raw capability data returned by firmata", "id": "f9311:c0:m17"} {"signature": "def get_digital_latch_data(self, pin):", "body": "return self._command_handler.get_digital_latch_data(pin)", "docstring": "A list is returned containing the latch state for the pin, the latched value, and the time stamp\n[pin_num, latch_state, latched_value, time_stamp]\nIf the the latch state is LATCH_LATCHED, the table is reset (data and timestamp set to zero)\n\n:param pin: Pin number.\n\n:return: [pin, latch_state, latch_data_value, time_stamp]", "id": "f9311:c0:m18"} {"signature": "def get_digital_response_table(self):", "body": "return self._command_handler.get_digital_response_table()", "docstring": "This method returns a list of lists representing the current pin mode\nand associated data for all digital pins.\nAll pin types, both input and output will be listed. Output pin data will contain zero.\n:return: The last update of the digital response table", "id": "f9311:c0:m19"} {"signature": "def get_firmata_version(self):", "body": "return self._command_handler.firmata_version", "docstring": "Retrieve the firmata version information returned by a previous call to refresh_report_version()\n:return: Firmata_version list [major, minor] or None", "id": "f9311:c0:m20"} {"signature": "def get_firmata_firmware_version(self):", "body": "return self._command_handler.firmata_firmware", "docstring": "Retrieve the firmware id information returned by a previous call to refresh_report_firmware()\n:return: Firmata_firmware list [major, minor, file_name] or None", "id": "f9311:c0:m21"} {"signature": "def get_pin_state_query_results(self):", "body": "r_data = self._command_handler.last_pin_query_resultsself._command_handler.last_pin_query_results = []return r_data", "docstring": "This method returns the results of a previous call to pin_state_query() and then resets\nthe pin state query data to None\n\n:return: Raw pin state query data", "id": "f9311:c0:m22"} {"signature": "def get_pymata_version(self):", "body": "return ['', '']", "docstring": "Returns the PyMata version number in a list: [Major Number, Minor Number]\n\n:return:", "id": "f9311:c0:m23"} {"signature": "def get_sonar_data(self):", "body": "return self._command_handler.active_sonar_map", "docstring": "Retrieve Ping (HC-SR04 type) data. The data is presented as a dictionary.\nThe 'key' is the trigger pin specified in sonar_config() and the 'data' is the\ncurrent measured distance (in centimeters)\nfor that pin. If there is no data, the value is set to IGNORE (127).\n\n:return: active_sonar_map", "id": "f9311:c0:m24"} {"signature": "def get_stepper_version(self, timeout=):", "body": "start_time = time.time()while self._command_handler.stepper_library_version <= :if time.time() - start_time > timeout:if self.verbose is True:print(\"\"\"\")returnelse:passreturn self._command_handler.stepper_library_version", "docstring": "Get the stepper library version number.\n\n:param timeout: specify a time to allow arduino to process and return a version\n\n:return: the stepper version number if it was set.", "id": "f9311:c0:m25"} {"signature": "def i2c_config(self, read_delay_time=, pin_type=None, clk_pin=, data_pin=):", "body": "data = [read_delay_time & , (read_delay_time >> ) & ]self._command_handler.send_sysex(self._command_handler.I2C_CONFIG, data)if pin_type:if pin_type == self.DIGITAL:self._command_handler.digital_response_table[clk_pin][self._command_handler.RESPONSE_TABLE_MODE]= self.I2Cself._command_handler.digital_response_table[data_pin][self._command_handler.RESPONSE_TABLE_MODE]= self.I2Celse:self._command_handler.analog_response_table[clk_pin][self._command_handler.RESPONSE_TABLE_MODE]= self.I2Cself._command_handler.analog_response_table[data_pin][self._command_handler.RESPONSE_TABLE_MODE]= self.I2C", "docstring": "NOTE: THIS METHOD MUST BE CALLED BEFORE ANY I2C REQUEST IS MADE\nThis method initializes Firmata for I2c operations.\nIt allows setting of a read time delay amount, and to optionally track\nthe pins as I2C in the appropriate response table.\nTo track pins: Set the pin_type to ANALOG or DIGITAL and provide the pin numbers.\nIf using ANALOG, pin numbers use the analog number, for example A4: use 4.\n\n:param read_delay_time: an optional parameter, default is 0\n\n:param pin_type: ANALOG or DIGITAL to select response table type to track pin numbers\n\n:param clk_pin: pin number (see comment above).\n\n:param data_pin: pin number (see comment above).\n\n:return: No Return Value", "id": "f9311:c0:m26"} {"signature": "def i2c_read(self, address, register, number_of_bytes, read_type, cb=None):", "body": "data = [address, read_type, register & , (register >> ) & ,number_of_bytes & , (number_of_bytes >> ) & ]self._command_handler.i2c_map[address] = [cb, None]self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data)", "docstring": "This method requests the read of an i2c device. Results are retrieved by a call to\ni2c_get_read_data().\nIf a callback method is provided, when data is received from the device it will be sent to the callback method\n\n:param address: i2c device address\n\n:param register: register number (can be set to zero)\n\n:param number_of_bytes: number of bytes expected to be returned\n\n:param read_type: I2C_READ or I2C_READ_CONTINUOUSLY\n\n:param cb: Optional callback function to report i2c data as result of read command", "id": "f9311:c0:m27"} {"signature": "def i2c_write(self, address, *args):", "body": "data = [address, self.I2C_WRITE]for item in args:data.append(item & )data.append((item >> ) & )self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data)", "docstring": "Write data to an i2c device.\n\n:param address: i2c device address\n\n:param args: A variable number of bytes to be sent to the device", "id": "f9311:c0:m28"} {"signature": "def i2c_stop_reading(self, address):", "body": "data = [address, self.I2C_STOP_READING]self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data)", "docstring": "This method stops an I2C_READ_CONTINUOUSLY operation for the i2c device address specified.\n\n:param address: address of i2c device", "id": "f9311:c0:m29"} {"signature": "def i2c_get_read_data(self, address):", "body": "if address in self._command_handler.i2c_map:map_entry = self._command_handler.i2c_map[address]return map_entry[]", "docstring": "This method retrieves the i2c read data as the result of an i2c_read() command.\n\n:param address: i2c device address\n\n:return: raw data read from device", "id": "f9311:c0:m30"} {"signature": "def pin_state_query(self, pin):", "body": "self._command_handler.send_sysex(self._command_handler.PIN_STATE_QUERY, [pin])", "docstring": "This method issues a pin state query command. Data returned is retrieved via\na call to get_pin_state_query_results()\n:param pin: pin number", "id": "f9311:c0:m31"} {"signature": "def play_tone(self, pin, tone_command, frequency, duration):", "body": "if tone_command == self.TONE_TONE:if duration:data = [tone_command, pin, frequency & , (frequency >> ) & , duration & , (duration >> ) & ]else:data = [tone_command, pin, frequency & , (frequency >> ) & , , ]self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] =self.TONEelse:data = [tone_command, pin]self._command_handler.send_sysex(self._command_handler.TONE_PLAY, data)", "docstring": "This method will call the Tone library for the selected pin.\nIf the tone command is set to TONE_TONE, then the specified tone will be played.\nElse, if the tone command is TONE_NO_TONE, then any currently playing tone will be disabled.\nIt is intended for a future release of Arduino Firmata\n\n:param pin: Pin number\n\n:param tone_command: Either TONE_TONE, or TONE_NO_TONE\n\n:param frequency: Frequency of tone in hz\n\n:param duration: Duration of tone in milliseconds\n\n:return: No return value", "id": "f9311:c0:m32"} {"signature": "def refresh_report_version(self):", "body": "command = [self._command_handler.REPORT_VERSION]self._command_handler.send_command(command)", "docstring": "This method will query firmata for the report version.\nRetrieve the report version via a call to get_firmata_version()", "id": "f9311:c0:m33"} {"signature": "def refresh_report_firmware(self):", "body": "self._command_handler.send_sysex(self._command_handler.REPORT_FIRMWARE, None)", "docstring": "This method will query firmata to report firmware. Retrieve the report via a\ncall to get_firmata_firmware_version()", "id": "f9311:c0:m34"} {"signature": "def reset(self):", "body": "for pin in range(, self._command_handler.total_pins_discovered):if self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE]== self.PWM:self.analog_write(pin, )elif self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE]== self.SERVO:self.analog_write(pin, )elif self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE]== self.TONE:data = [self.TONE_NO_TONE, pin]self._command_handler.send_sysex(self._command_handler.TONE_PLAY, data)else:self.digital_write(pin, )self._command_handler.system_reset()", "docstring": "This command sends a reset message to the Arduino. The response tables will be reinitialized\n:return: No return value.", "id": "f9311:c0:m35"} {"signature": "def set_analog_latch(self, pin, threshold_type, threshold_value, cb=None):", "body": "if self.ANALOG_LATCH_GT <= threshold_type <= self.ANALOG_LATCH_LTE:if <= threshold_value <= :self._command_handler.set_analog_latch(pin, threshold_type, threshold_value, cb)return Trueelse:return False", "docstring": "This method \"arms\" an analog pin for its data to be latched and saved in the latching table\nIf a callback method is provided, when latching criteria is achieved, the callback function is called\nwith latching data notification. In that case, the latching table is not updated.\n\n:param pin: Analog pin number (value following an 'A' designator, i.e. A5 = 5\n\n:param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT | ANALOG_LATCH_GTE | ANALOG_LATCH_LTE\n\n:param threshold_value: numerical value - between 0 and 1023\n\n:param cb: callback method\n\n:return: True if successful, False if parameter data is invalid", "id": "f9311:c0:m36"} {"signature": "def set_digital_latch(self, pin, threshold_type, cb=None):", "body": "if <= threshold_type <= :self._command_handler.set_digital_latch(pin, threshold_type, cb)return Trueelse:return False", "docstring": "This method \"arms\" a digital pin for its data to be latched and saved in the latching table\nIf a callback method is provided, when latching criteria is achieved, the callback function is called\nwith latching data notification. In that case, the latching table is not updated.\n\n:param pin: Digital pin number\n\n:param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW\n\n:param cb: callback function\n\n:return: True if successful, False if parameter data is invalid", "id": "f9311:c0:m37"} {"signature": "def set_pin_mode(self, pin, mode, pin_type, cb=None):", "body": "command = [self._command_handler.SET_PIN_MODE, pin, mode]self._command_handler.send_command(command)if mode == self.INPUT or mode == self.PULLUP:if pin_type == self.ANALOG:self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] =self.INPUTself._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_CALLBACK] = cbself.enable_analog_reporting(pin)else:self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] =self.INPUTself._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_CALLBACK] = cbself.enable_digital_reporting(pin)else: if pin_type == self.ANALOG:self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = modeelse:self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = mode", "docstring": "This method sets a pin to the desired pin mode for the pin_type.\nIt automatically enables data reporting.\nNOTE: DO NOT CALL THIS METHOD FOR I2C. See i2c_config().\n\n:param pin: Pin number (for analog use the analog number, for example A4: use 4)\n\n:param mode: INPUT, OUTPUT, PWM, PULLUP\n\n:param pin_type: ANALOG or DIGITAL\n\n:param cb: This is an optional callback function to report data changes to the user\n\n:return: No return value", "id": "f9311:c0:m38"} {"signature": "def set_sampling_interval(self, interval):", "body": "data = [interval & , (interval >> ) & ]self._command_handler.send_sysex(self._command_handler.SAMPLING_INTERVAL, data)", "docstring": "This method sends the desired sampling interval to Firmata.\nNote: Standard Firmata will ignore any interval less than 10 milliseconds\n\n:param interval: Integer value for desired sampling interval in milliseconds\n\n:return: No return value.", "id": "f9311:c0:m39"} {"signature": "def servo_config(self, pin, min_pulse=, max_pulse=):", "body": "self.set_pin_mode(pin, self.SERVO, self.OUTPUT)command = [pin, min_pulse & , (min_pulse >> ) & ,max_pulse & , (max_pulse >> ) & ]self._command_handler.send_sysex(self._command_handler.SERVO_CONFIG, command)", "docstring": "Configure a pin as a servo pin. Set pulse min, max in ms.\n\n:param pin: Servo Pin.\n\n:param min_pulse: Min pulse width in ms.\n\n:param max_pulse: Max pulse width in ms.\n\n:return: No return value", "id": "f9311:c0:m40"} {"signature": "def sonar_config(self, trigger_pin, echo_pin, cb=None, ping_interval=, max_distance=):", "body": "if max_distance > :max_distance = max_distance_lsb = max_distance & max_distance_msb = (max_distance >> ) & data = [trigger_pin, echo_pin, ping_interval, max_distance_lsb, max_distance_msb]self.set_pin_mode(trigger_pin, self.SONAR, self.INPUT)self.set_pin_mode(echo_pin, self.SONAR, self.INPUT)if len(self._command_handler.active_sonar_map) > :if self.verbose:print(\"\")returnelse:with self.data_lock:self._command_handler.active_sonar_map[trigger_pin] = [cb, [self.IGNORE]]self._command_handler.send_sysex(self._command_handler.SONAR_CONFIG, data)", "docstring": "Configure the pins,ping interval and maximum distance for an HC-SR04 type device.\nSingle pin configuration may be used. To do so, set both the trigger and echo pins to the same value.\nUp to a maximum of 6 SONAR devices is supported\nIf the maximum is exceeded a message is sent to the console and the request is ignored.\nNOTE: data is measured in centimeters\n\n:param trigger_pin: The pin number of for the trigger (transmitter).\n\n:param echo_pin: The pin number for the received echo.\n\n:param ping_interval: Minimum interval between pings. Lowest number to use is 33 ms.Max is 127\n\n:param max_distance: Maximum distance in cm. Max is 200.\n\n:param cb: optional callback function to report sonar data changes", "id": "f9311:c0:m41"} {"signature": "def stepper_config(self, steps_per_revolution, stepper_pins):", "body": "data = [self.STEPPER_CONFIGURE, steps_per_revolution & , (steps_per_revolution >> ) & ]for pin in range(len(stepper_pins)):data.append(stepper_pins[pin])self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data)", "docstring": "Configure stepper motor prior to operation.\n\n:param steps_per_revolution: number of steps per motor revolution\n\n:param stepper_pins: a list of control pin numbers - either 4 or 2", "id": "f9311:c0:m42"} {"signature": "def stepper_step(self, motor_speed, number_of_steps):", "body": "if number_of_steps > :direction = else:direction = abs_number_of_steps = abs(number_of_steps)data = [self.STEPPER_STEP, motor_speed & , (motor_speed >> ) & , (motor_speed >> ) & ,abs_number_of_steps & , (abs_number_of_steps >> ) & , direction]self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data)", "docstring": "Move a stepper motor for the number of steps at the specified speed\n\n:param motor_speed: 21 bits of data to set motor speed\n\n:param number_of_steps: 14 bits for number of steps & direction\n positive is forward, negative is reverse", "id": "f9311:c0:m43"} {"signature": "def stepper_request_library_version(self):", "body": "data = [self.STEPPER_LIBRARY_VERSION]self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data)", "docstring": "Request the stepper library version from the Arduino.\nTo retrieve the version after this command is called, call\nget_stepper_version", "id": "f9311:c0:m44"} {"signature": "def __init__(self, port_id, command_deque, baud_rate):", "body": "self.port_id = port_idself.command_deque = command_dequeself.baud_rate = baud_ratethreading.Thread.__init__(self)self.daemon = Trueself.arduino = serial.Serial(self.port_id, self.baud_rate,timeout=int(self.timeout), writeTimeout=)self.stop_event = threading.Event()if sys.platform == '':self.arduino.nonblocking()", "docstring": "Constructor:\n\n:param command_deque: A reference to the deque shared with the _command_handler\n\n:param baud_rate: must match that of Arduino Sketch", "id": "f9312:c0:m0"} {"signature": "def open(self, verbose):", "body": "if verbose:print('' % self.port_id)try:self.arduino.close()time.sleep()self.arduino.open()time.sleep()return self.arduinoexcept Exception:raise", "docstring": "open the serial port using the configuration data\nreturns a reference to this instance", "id": "f9312:c0:m3"} {"signature": "def close(self):", "body": "try:self.arduino.close()except OSError:pass", "docstring": "Close the serial port\nreturn: None", "id": "f9312:c0:m4"} {"signature": "def write(self, data):", "body": "if sys.version_info[] < :self.arduino.write(data)else:self.arduino.write(bytes([ord(data)]))", "docstring": "write the data to the serial port\nreturn: None", "id": "f9312:c0:m5"} {"signature": "def run(self):", "body": "while not self.is_stopped():try:if self.arduino.inWaiting():c = self.arduino.read()self.command_deque.append(ord(c))else:time.sleep()except OSError:passexcept IOError:self.stop()self.close()", "docstring": "This method continually runs. If an incoming character is available on the serial port\nit is read and placed on the _command_deque\n@return: Never Returns", "id": "f9312:c0:m6"} {"signature": "def __init__(self, address, blink_rate, brightness):", "body": "self.firmata = PyMata(\"\")self.board_address = addressself.blink_rate = blink_rateself.brightness = brightnessself.clear_display_buffer()self.firmata.i2c_config(, self.firmata.ANALOG, , )self.oscillator_set(self.OSCILLATOR_ON)self.set_blink_rate(self.blink_rate)self.set_brightness(self.brightness)", "docstring": "@param address: I2C address of the device\n@param blink_rate: desired blink rate\n@param brightness: brightness level for the display", "id": "f9322:c0:m0"} {"signature": "def set_blink_rate(self, b):", "body": "if b > :b = self.firmata.i2c_write(self.board_address,(self.HT16K33_BLINK_CMD | self.HT16K33_BLINK_DISPLAYON | (b << )))", "docstring": "Set the user's desired blink rate (0 - 3)\n@param b: blink rate", "id": "f9322:c0:m1"} {"signature": "def oscillator_set(self, osc_mode):", "body": "self.firmata.i2c_write(self.board_address, osc_mode)", "docstring": "Turn oscillator on or off\n@param osc_mode: osc mode (OSCILLATOR_ON or OSCILLATOR_OFF)", "id": "f9322:c0:m2"} {"signature": "def set_brightness(self, brightness):", "body": "if brightness > :brightness = brightness |= self.brightness = brightnessself.firmata.i2c_write(, brightness)", "docstring": "Set the brightness level for the entire display\n@param brightness: brightness level (0 -15)", "id": "f9322:c0:m3"} {"signature": "def set_pixel(self, row, column, color, suppress_write):", "body": "if (row < ) or (row >= ):print(\"\")returnif (column < ) or (column >= ):print(\"\")returnself.display_buffer[row][column] = colorgreen = red = for col in range(, ):if self.display_buffer[row][col] == self.LED_GREEN:green |= << colelif self.display_buffer[row][col] == self.LED_RED:red |= << colelif self.display_buffer[row][col] == self.LED_YELLOW:green |= << colred |= << colelif self.display_buffer[row][col] == self.LED_OFF:green &= ~( << col)red &= ~( << col)if not suppress_write:self.firmata.i2c_write(, row * , , green)self.firmata.i2c_write(, row * + , , red)", "docstring": "@param row: pixel row number\n@param column: pix column number\n@param color: pixel color (yellow is both red and green both on)\n@param suppress_write: if true, just sets the internal data structure, else writes out the pixel to the display", "id": "f9322:c0:m4"} {"signature": "def set_bit_map(self, shape, color):", "body": "for row in range(, ):data = shape[row]bit_mask = for column in range(, ):if data & bit_mask:self.set_pixel(row, column, color, True)bit_mask >>= self.output_entire_buffer()", "docstring": "Populate the bit map with the supplied \"shape\" and color\nand then write the entire bitmap to the display\n@param shape: pattern to display\n@param color: color for the pattern", "id": "f9322:c0:m5"} {"signature": "def output_entire_buffer(self):", "body": "green = red = for row in range(, ):for col in range(, ):if self.display_buffer[row][col] == self.LED_GREEN:green |= << colelif self.display_buffer[row][col] == self.LED_RED:red |= << colelif self.display_buffer[row][col] == self.LED_YELLOW:green |= << colred |= << colelif self.display_buffer[row][col] == self.LED_OFF:green &= ~( << col)red &= ~( << col)self.firmata.i2c_write(, row * , , green)self.firmata.i2c_write(, row * + , , red)", "docstring": "Write the entire buffer to the display", "id": "f9322:c0:m6"} {"signature": "def clear_display_buffer(self):", "body": "for row in range(, ):self.firmata.i2c_write(, row * , , )self.firmata.i2c_write(, (row * ) + , , )for column in range(, ):self.display_buffer[row][column] = ", "docstring": "Set all led's to off.", "id": "f9322:c0:m7"} {"signature": "def close(self):", "body": "self.firmata.close()", "docstring": "close the interface down cleanly", "id": "f9322:c0:m8"} {"signature": "def generate_versionwarning_data_json(app, config=None, **kwargs):", "body": "config = config or kwargs.pop('', None)if config is None:config = app.configif config.versionwarning_project_version in config.versionwarning_messages:custom = Truemessage = config.versionwarning_messages.get(config.versionwarning_project_version)else:custom = Falsemessage = config.versionwarning_default_messagebanner_html = config.versionwarning_banner_html.format(id_div=config.versionwarning_banner_id_div,banner_title=config.versionwarning_banner_title,message=message.format(**{config.versionwarning_message_placeholder: ''},),admonition_type=config.versionwarning_admonition_type,)data = json.dumps({'': {'': config.versionwarning_api_url,},'': {'': banner_html,'': config.versionwarning_banner_id_div,'': config.versionwarning_body_selector,'': custom,},'': {'': config.versionwarning_project_slug,},'': {'': config.versionwarning_project_version,},}, indent=)data_path = os.path.join(STATIC_PATH, '')if not os.path.exists(data_path):os.mkdir(data_path)with open(os.path.join(data_path, JSON_DATA_FILENAME), '') as f:f.write(data)config.html_static_path.append(STATIC_PATH)", "docstring": "Generate the ``versionwarning-data.json`` file.\n\nThis file is included in the output and read by the AJAX request when\naccessing to the documentation and used to compare the live versions with\nthe curent one.\n\nBesides, this file contains meta data about the project, the API to use and\nthe banner itself.", "id": "f9334:m0"} {"signature": "def tearDown(self):", "body": "del mapped['']del mapped['']", "docstring": "Remove definitions of the built functions", "id": "f9339:c0:m1"} {"signature": "def make_graph_2() -> BELGraph:", "body": "graph = BELGraph(name='',version='',description='',authors='',contact='',)graph.add_node_from_data(gene_f)graph.add_node_from_data(protein_e)graph.add_node_from_data(protein_b)graph.add_increases(protein_e,protein_b,citation='',evidence='',annotations={'': ''},)graph.add_increases(gene_f,protein_e,citation='',evidence='',annotations={'': ''})return graph", "docstring": "Make an example graph.", "id": "f9345:m1"} {"signature": "def make_graph_3() -> BELGraph:", "body": "graph = BELGraph(name='',version='',description='',authors='',contact='',)graph.add_increases(protein_a, protein_b, n(), n())graph.add_decreases(protein_b, gene_c, n(), n())graph.add_decreases(rna_d, gene_f, n(), n())graph.add_increases(protein_e, gene_f, n(), n())graph.add_increases(gene_f, gene_c, n(), n())graph.add_association(gene_c, protein_g, n(), n())return graph", "docstring": "Make an example graph.\n\n A -> B -| C\n D -| F -> C\n C -| F\n C -- G", "id": "f9345:m2"} {"signature": "def make_graph_4() -> BELGraph:", "body": "graph = BELGraph(name='',version='',description='',authors='',contact='',)graph.add_increases(protein_a, protein_b, n(), n())graph.add_decreases(protein_b, gene_c, n(), n())graph.add_decreases(protein_b, rna_d, n(), n())graph.add_decreases(protein_b, protein_e, n(), n())graph.add_decreases(protein_b, gene_f, n(), n())graph.add_increases(protein_b, protein_g, n(), n())graph.add_decreases(protein_b, protein_h, n(), n())graph.add_increases(protein_b, protein_h, n(), n())graph.add_increases(protein_b, protein_i, n(), n())graph.add_association(protein_b, protein_j, n(), n())return graph", "docstring": "Make an example graph.\n\n A -> B\n B -| C\n B -| D\n B -| E\n B -| F\n B -> G\n\n B -> H\n B -| H\n\n B -> I\n B -- J", "id": "f9345:m3"} {"signature": "def flatten_list_abundance(node: ListAbundance) -> ListAbundance:", "body": "return node.__class__(list(chain.from_iterable((flatten_list_abundance(member).membersif isinstance(member, ListAbundance) else[member])for member in node.members)))", "docstring": "Flattens the complex or composite abundance.", "id": "f9353:m0"} {"signature": "def list_abundance_expansion(graph: BELGraph) -> None:", "body": "mapping = {node: flatten_list_abundance(node)for node in graphif isinstance(node, ListAbundance)}relabel_nodes(graph, mapping, copy=False)", "docstring": "Flatten list abundances.", "id": "f9353:m1"} {"signature": "def list_abundance_cartesian_expansion(graph: BELGraph) -> None:", "body": "for u, v, k, d in list(graph.edges(keys=True, data=True)):if CITATION not in d:continueif isinstance(u, ListAbundance) and isinstance(v, ListAbundance):for u_member, v_member in itt.product(u.members, v.members):graph.add_qualified_edge(u_member, v_member,relation=d[RELATION],citation=d.get(CITATION),evidence=d.get(EVIDENCE),annotations=d.get(ANNOTATIONS),)elif isinstance(u, ListAbundance):for member in u.members:graph.add_qualified_edge(member, v,relation=d[RELATION],citation=d.get(CITATION),evidence=d.get(EVIDENCE),annotations=d.get(ANNOTATIONS),)elif isinstance(v, ListAbundance):for member in v.members:graph.add_qualified_edge(u, member,relation=d[RELATION],citation=d.get(CITATION),evidence=d.get(EVIDENCE),annotations=d.get(ANNOTATIONS),)_remove_list_abundance_nodes(graph)", "docstring": "Expand all list abundances to simple subject-predicate-object networks.", "id": "f9353:m2"} {"signature": "def _reaction_cartesion_expansion_unqualified_helper(graph: BELGraph,u: BaseEntity,v: BaseEntity,d: dict,) -> None:", "body": "if isinstance(u, Reaction) and isinstance(v, Reaction):enzymes = _get_catalysts_in_reaction(u) | _get_catalysts_in_reaction(v)for reactant, product in chain(itt.product(u.reactants, u.products),itt.product(v.reactants, v.products)):if reactant in enzymes or product in enzymes:continuegraph.add_unqualified_edge(reactant, product, INCREASES)for product, reactant in itt.product(u.products, u.reactants):if reactant in enzymes or product in enzymes:continuegraph.add_unqualified_edge(product, reactant, d[RELATION],)elif isinstance(u, Reaction):enzymes = _get_catalysts_in_reaction(u)for product in u.products:if product in enzymes:continueif v not in u.products and v not in u.reactants:graph.add_unqualified_edge(product, v, INCREASES)for reactant in u.reactants:graph.add_unqualified_edge(reactant, product, INCREASES)elif isinstance(v, Reaction):enzymes = _get_catalysts_in_reaction(v)for reactant in v.reactants:if reactant in enzymes:continueif u not in v.products and u not in v.reactants:graph.add_unqualified_edge(u, reactant, INCREASES)for product in v.products:graph.add_unqualified_edge(reactant, product, INCREASES)", "docstring": "Helper to deal with cartension expansion in unqualified edges.", "id": "f9353:m3"} {"signature": "def _get_catalysts_in_reaction(reaction: Reaction) -> Set[BaseAbundance]:", "body": "return {reactantfor reactant in reaction.reactantsif reactant in reaction.products}", "docstring": "Return nodes that are both in reactants and reactions in a reaction.", "id": "f9353:m4"} {"signature": "def reaction_cartesian_expansion(graph: BELGraph, accept_unqualified_edges: bool = True) -> None:", "body": "for u, v, d in list(graph.edges(data=True)):if CITATION not in d and accept_unqualified_edges:_reaction_cartesion_expansion_unqualified_helper(graph, u, v, d)continueif isinstance(u, Reaction) and isinstance(v, Reaction):catalysts = _get_catalysts_in_reaction(u) | _get_catalysts_in_reaction(v)for reactant, product in chain(itt.product(u.reactants, u.products), itt.product(v.reactants, v.products)):if reactant in catalysts or product in catalysts:continuegraph.add_increases(reactant, product,citation=d.get(CITATION),evidence=d.get(EVIDENCE),annotations=d.get(ANNOTATIONS),)for product, reactant in itt.product(u.products, u.reactants):if reactant in catalysts or product in catalysts:continuegraph.add_qualified_edge(product, reactant,relation=d[RELATION],citation=d.get(CITATION),evidence=d.get(EVIDENCE),annotations=d.get(ANNOTATIONS),)elif isinstance(u, Reaction):catalysts = _get_catalysts_in_reaction(u)for product in u.products:if product in catalysts:continueif v not in u.products and v not in u.reactants:graph.add_increases(product, v,citation=d.get(CITATION),evidence=d.get(EVIDENCE),annotations=d.get(ANNOTATIONS),)for reactant in u.reactants:graph.add_increases(reactant, product,citation=d.get(CITATION),evidence=d.get(EVIDENCE),annotations=d.get(ANNOTATIONS),)elif isinstance(v, Reaction):for reactant in v.reactants:catalysts = _get_catalysts_in_reaction(v)if reactant in catalysts:continueif u not in v.products and u not in v.reactants:graph.add_increases(u, reactant,citation=d.get(CITATION),evidence=d.get(EVIDENCE),annotations=d.get(ANNOTATIONS),)for product in v.products:graph.add_increases(reactant, product,citation=d.get(CITATION),evidence=d.get(EVIDENCE),annotations=d.get(ANNOTATIONS),)_remove_reaction_nodes(graph)", "docstring": "Expand all reactions to simple subject-predicate-object networks.", "id": "f9353:m5"} {"signature": "def remove_reified_nodes(graph: BELGraph) -> None:", "body": "_remove_list_abundance_nodes(graph)_remove_reaction_nodes(graph)", "docstring": "Remove complex nodes.", "id": "f9353:m6"} {"signature": "def make_pubmed_abstract_group(pmids: Iterable[Union[str, int]]) -> Iterable[str]:", "body": "for pmid in set(pmids):yield ''res = requests.get(title_url_fmt.format(pmid))title = res.content.decode('').strip()yield ''.format(title, pmid)res = requests.get(abstract_url_fmt.format(pmid))abstract = res.content.decode('').strip()yield ''.format(abstract)yield ''", "docstring": "Build a skeleton for the citations' statements.\n\n :param pmids: A list of PubMed identifiers\n :return: An iterator over the lines of the citation section", "id": "f9354:m0"} {"signature": "def get_entrez_gene_data(entrez_ids: Iterable[Union[str, int]]):", "body": "url = PUBMED_GENE_QUERY_URL.format(''.join(str(x).strip() for x in entrez_ids))response = requests.get(url)tree = ElementTree.fromstring(response.content)return {element.attrib['']: {'': _sanitize(element.find('').text),'': element.find('').text}for element in tree.findall('')}", "docstring": "Get gene info from Entrez.", "id": "f9354:m2"} {"signature": "def make_pubmed_gene_group(entrez_ids: Iterable[Union[str, int]]) -> Iterable[str]:", "body": "url = PUBMED_GENE_QUERY_URL.format(''.join(str(x).strip() for x in entrez_ids))response = requests.get(url)tree = ElementTree.fromstring(response.content)for x in tree.findall(''):yield ''.format(x.find('').text)yield ''.format(x.attrib[''])yield ''.format(x.find('').text.strip().replace('', ''))yield ''", "docstring": "Builds a skeleton for gene summaries\n\n :param entrez_ids: A list of Entrez Gene identifiers to query the PubMed service\n :return: An iterator over statement lines for NCBI Entrez Gene summaries", "id": "f9354:m3"} {"signature": "def write_boilerplate(name: str,version: Optional[str] = None,description: Optional[str] = None,authors: Optional[str] = None,contact: Optional[str] = None,copyright: Optional[str] = None,licenses: Optional[str] = None,disclaimer: Optional[str] = None,namespace_url: Optional[Mapping[str, str]] = None,namespace_patterns: Optional[Mapping[str, str]] = None,annotation_url: Optional[Mapping[str, str]] = None,annotation_patterns: Optional[Mapping[str, str]] = None,annotation_list: Optional[Mapping[str, Set[str]]] = None,pmids: Optional[Iterable[Union[str, int]]] = None,entrez_ids: Optional[Iterable[Union[str, int]]] = None,file: Optional[TextIO] = None,) -> None:", "body": "lines = make_knowledge_header(name=name,version=version or '',description=description,authors=authors,contact=contact,copyright=copyright,licenses=licenses,disclaimer=disclaimer,namespace_url=namespace_url,namespace_patterns=namespace_patterns,annotation_url=annotation_url,annotation_patterns=annotation_patterns,annotation_list=annotation_list,)for line in lines:print(line, file=file)if pmids is not None:for line in make_pubmed_abstract_group(pmids):print(line, file=file)if entrez_ids is not None:for line in make_pubmed_gene_group(entrez_ids):print(line, file=file)", "docstring": "Write a boilerplate BEL document, with standard document metadata, definitions.\n\n :param name: The unique name for this BEL document\n :param contact: The email address of the maintainer\n :param description: A description of the contents of this document\n :param authors: The authors of this document\n :param version: The version. Defaults to current date in format ``YYYYMMDD``.\n :param copyright: Copyright information about this document\n :param licenses: The license applied to this document\n :param disclaimer: The disclaimer for this document\n :param namespace_url: an optional dictionary of {str name: str URL} of namespaces\n :param namespace_patterns: An optional dictionary of {str name: str regex} namespaces\n :param annotation_url: An optional dictionary of {str name: str URL} of annotations\n :param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations\n :param annotation_list: An optional dictionary of {str name: set of names} of list annotations\n :param pmids: A list of PubMed identifiers to auto-populate with citation and abstract\n :param entrez_ids: A list of Entrez identifiers to autopopulate the gene summary as evidence\n :param file: A writable file or file-like. If None, defaults to :data:`sys.stdout`", "id": "f9354:m4"} {"signature": "def lint_file(in_file, out_file=None):", "body": "for line in in_file:print(line.strip(), file=out_file)", "docstring": "Helps remove extraneous whitespace from the lines of a file\n\n :param file in_file: A readable file or file-like\n :param file out_file: A writable file or file-like", "id": "f9356:m0"} {"signature": "def lint_directory(source, target):", "body": "for path in os.listdir(source):if not path.endswith(''):continuelog.info('', path)with open(os.path.join(source, path)) as i, open(os.path.join(target, path), '') as o:lint_file(i, o)", "docstring": "Adds a linted version of each document in the source directory to the target directory\n\n :param str source: Path to directory to lint\n :param str target: Path to directory to output", "id": "f9356:m1"} {"signature": "def node_is_upstream_leaf(graph: BELGraph, node: BaseEntity) -> bool:", "body": "return == len(graph.predecessors(node)) and == len(graph.successors(node))", "docstring": "Return if the node is an upstream leaf.\n\n An upstream leaf is defined as a node that has no in-edges, and exactly 1 out-edge.", "id": "f9357:m0"} {"signature": "def get_upstream_leaves(graph: BELGraph) -> Iterable[BaseEntity]:", "body": "return filter_nodes(graph, node_is_upstream_leaf)", "docstring": "Iterate over all of the leaves of the graph (with no incoming edges and only one outgoing edge).\n\n .. seealso:: :func:`upstream_leaf_predicate`", "id": "f9357:m1"} {"signature": "def get_unweighted_upstream_leaves(graph: BELGraph, key: Optional[str] = None) -> Iterable[BaseEntity]:", "body": "if key is None:key = WEIGHTreturn filter_nodes(graph, [node_is_upstream_leaf, data_missing_key_builder(key)])", "docstring": "Get nodes with no incoming edges, one outgoing edge, and without the given key in its data dictionary.\n\n .. seealso :: :func:`data_does_not_contain_key_builder`\n\n :param graph: A BEL graph\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n :return: An iterable over leaves (nodes with an in-degree of 0) that don't have the given annotation", "id": "f9357:m2"} {"signature": "@in_place_transformationdef remove_unweighted_leaves(graph: BELGraph, key: Optional[str] = None) -> None:", "body": "unweighted_leaves = list(get_unweighted_upstream_leaves(graph, key=key))graph.remove_nodes_from(unweighted_leaves)", "docstring": "Remove nodes that are leaves and that don't have a weight (or other key) attribute set.\n\n :param graph: A BEL graph\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.", "id": "f9357:m3"} {"signature": "def is_unweighted_source(graph: BELGraph, node: BaseEntity, key: str) -> bool:", "body": "return graph.in_degree(node) == and key not in graph.nodes[node]", "docstring": "Check if the node is both a source and also has an annotation.\n\n :param graph: A BEL graph\n :param node: A BEL node\n :param key: The key in the node data dictionary representing the experimental data", "id": "f9357:m4"} {"signature": "def get_unweighted_sources(graph: BELGraph, key: Optional[str] = None) -> Iterable[BaseEntity]:", "body": "if key is None:key = WEIGHTfor node in graph:if is_unweighted_source(graph, node, key):yield node", "docstring": "Get nodes on the periphery of the sub-graph that do not have a annotation for the given key.\n\n :param graph: A BEL graph\n :param key: The key in the node data dictionary representing the experimental data\n :return: An iterator over BEL nodes that are unannotated and on the periphery of this subgraph", "id": "f9357:m5"} {"signature": "@in_place_transformationdef remove_unweighted_sources(graph: BELGraph, key: Optional[str] = None) -> None:", "body": "nodes = list(get_unweighted_sources(graph, key=key))graph.remove_nodes_from(nodes)", "docstring": "Prune unannotated nodes on the periphery of the sub-graph.\n\n :param graph: A BEL graph\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.", "id": "f9357:m6"} {"signature": "@in_place_transformationdef prune_mechanism_by_data(graph, key: Optional[str] = None) -> None:", "body": "remove_unweighted_leaves(graph, key=key)remove_unweighted_sources(graph, key=key)", "docstring": "Remove all leaves and source nodes that don't have weights.\n\n Is a thin wrapper around :func:`remove_unweighted_leaves` and :func:`remove_unweighted_sources`\n\n :param graph: A BEL graph\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n\n Equivalent to:\n\n >>> remove_unweighted_leaves(graph)\n >>> remove_unweighted_sources(graph)", "id": "f9357:m7"} {"signature": "@transformationdef generate_mechanism(graph: BELGraph, node: BaseEntity, key: Optional[str] = None) -> BELGraph:", "body": "subgraph = get_upstream_causal_subgraph(graph, node)expand_upstream_causal(graph, subgraph)remove_inconsistent_edges(subgraph)collapse_consistent_edges(subgraph)if key is not None: prune_mechanism_by_data(subgraph, key)return subgraph", "docstring": "Generate a mechanistic sub-graph upstream of the given node.\n\n :param graph: A BEL graph\n :param node: A BEL node\n :param key: The key in the node data dictionary representing the experimental data.\n :return: A sub-graph grown around the target BEL node", "id": "f9357:m8"} {"signature": "def generate_bioprocess_mechanisms(graph, key: Optional[str] = None) -> Mapping[BiologicalProcess, BELGraph]:", "body": "return {biological_process: generate_mechanism(graph, biological_process, key=key)for biological_process in get_nodes_by_function(graph, BIOPROCESS)}", "docstring": "Generate a mechanistic sub-graph for each biological process in the graph using :func:`generate_mechanism`.\n\n :param graph: A BEL graph\n :param key: The key in the node data dictionary representing the experimental data.", "id": "f9357:m9"} {"signature": "def to_html(graph: BELGraph) -> str:", "body": "context = get_network_summary_dict(graph)summary_dict = graph.summary_dict()citation_years = context['']function_count = context['']relation_count = context['']error_count = context['']transformations_count = context['']hub_data = context['']disease_data = context['']authors_count = context['']variants_count = context['']namespaces_count = context['']confidence_count = context['']confidence_data = [(label, confidence_count.get(label, ))for label in ('', '', '', '', '', '')]template = environment.get_template('')return template.render(graph=graph,chart_1_data=prepare_c3(function_count, ''),chart_6_data=prepare_c3(namespaces_count, ''),chart_5_data=prepare_c3(variants_count, ''),number_variants=sum(variants_count.values()),number_namespaces=len(namespaces_count),chart_2_data=prepare_c3(relation_count, ''),chart_4_data=prepare_c3(transformations_count, '') if transformations_count else None,number_transformations=sum(transformations_count.values()),chart_3_data=prepare_c3(error_count, '') if error_count else None,chart_7_data=prepare_c3(hub_data, ''),chart_9_data=prepare_c3(disease_data, '') if disease_data else None,chart_authors_count=prepare_c3(authors_count, ''),chart_10_data=prepare_c3_time_series(citation_years, '') if citation_years else None,chart_confidence_count=prepare_c3(confidence_data, ''),summary_dict=summary_dict,**context)", "docstring": "Render the graph as an HTML string.\n\n Common usage may involve writing to a file like:\n\n >>> from pybel.examples import sialic_acid_graph\n >>> with open('html_output.html', 'w') as file:\n ... print(to_html(sialic_acid_graph), file=file)", "id": "f9359:m0"} {"signature": "def get_network_summary_dict(graph: BELGraph) -> Mapping:", "body": "return dict(function_count=count_functions(graph),modifications_count=get_modifications_count(graph),relation_count=count_relations(graph),authors_count=count_authors(graph).most_common(),variants_count=count_variants(graph),namespaces_count=count_namespaces(graph),hub_data={(node.name or node.identifierif NAME in node or IDENTIFIER in node elsestr(node)): degreefor node, degree in get_top_hubs(graph, n=)},disease_data={(node.name or node.identifierif NAME in node or IDENTIFIER in node elsestr(node)): countfor node, count in get_top_pathologies(graph, n=)},regulatory_pairs=[get_pair_tuple(u, v)for u, v in get_regulatory_pairs(graph)],unstable_pairs=list(itt.chain((get_pair_tuple(u, v) + ('',) for u, v, in get_chaotic_pairs(graph)),(get_pair_tuple(u, v) + ('',) for u, v, in get_dampened_pairs(graph)),)),contradictory_pairs=[get_pair_tuple(u, v) + (relation,)for u, v, relation in get_contradiction_summary(graph)],contradictory_triplets=list(itt.chain((get_triplet_tuple(a, b, c) + ('',) for a, b, c inget_separate_unstable_correlation_triples(graph)),(get_triplet_tuple(a, b, c) + ('',) for a, b, c in get_mutually_unstable_correlation_triples(graph)),(get_triplet_tuple(a, b, c) + ('',) for a, b, c in get_jens_unstable(graph)),(get_triplet_tuple(a, b, c) + ('',) for a, b, c in get_increase_mismatch_triplets(graph)),(get_triplet_tuple(a, b, c) + ('',) for a, b, c in get_decrease_mismatch_triplets(graph)),)),unstable_triplets=list(itt.chain((get_triplet_tuple(a, b, c) + ('',) for a, b, c in get_chaotic_triplets(graph)),(get_triplet_tuple(a, b, c) + ('',) for a, b, c in get_dampened_triplets(graph)),)),causal_pathologies=sorted({get_pair_tuple(u, v) + (graph[u][v][k][RELATION],)for u, v, k in filter_edges(graph, has_pathology_causal)}),undefined_namespaces=get_undefined_namespaces(graph),undefined_annotations=get_undefined_annotations(graph),namespaces_with_incorrect_names=get_namespaces_with_incorrect_names(graph),unused_namespaces=get_unused_namespaces(graph),unused_annotations=get_unused_annotations(graph),unused_list_annotation_values=get_unused_list_annotation_values(graph),naked_names=get_naked_names(graph),error_count=count_error_types(graph),error_groups=get_most_common_errors(graph),syntax_errors=get_syntax_errors(graph),citation_years=get_citation_years(graph),confidence_count=count_confidences(graph),)", "docstring": "Create a summary dictionary.", "id": "f9359:m1"} {"signature": "def get_pair_tuple(a: BaseEntity, b: BaseEntity) -> Tuple[str, str, str, str]:", "body": "return a.as_bel(), a.sha512, b.as_bel(), b.sha512", "docstring": "Get the pair as a tuple of BEL/hashes.", "id": "f9359:m2"} {"signature": "def get_triplet_tuple(a: BaseEntity, b: BaseEntity, c: BaseEntity) -> Tuple[str, str, str, str, str, str]:", "body": "return a.as_bel(), a.sha512, b.as_bel(), b.sha512, c.as_bel(), c.sha512", "docstring": "Get the triple as a tuple of BEL/hashes.", "id": "f9359:m3"} {"signature": "def to_jupyter(graph: BELGraph, chart: Optional[str] = None) -> Javascript:", "body": "with open(os.path.join(HERE, ''), '') as f:js_template = Template(f.read())return Javascript(js_template.render(**_get_context(graph, chart=chart)))", "docstring": "Render the graph as JavaScript in a Jupyter Notebook.", "id": "f9363:m0"} {"signature": "def to_html(graph: BELGraph, chart: Optional[str] = None) -> str:", "body": "with open(os.path.join(HERE, ''), '') as f:html_template = Template(f.read())return html_template.render(**_get_context(graph, chart=chart))", "docstring": "Render the graph as an HTML string.\n\n Common usage may involve writing to a file like:\n\n >>> from pybel.examples import sialic_acid_graph\n >>> with open('ideogram_output.html', 'w') as file:\n ... print(to_html(sialic_acid_graph), file=file)", "id": "f9363:m1"} {"signature": "def _generate_id() -> str:", "body": "return ''.join(random.sample('', ))", "docstring": "Generate a random string of letters.", "id": "f9363:m3"} {"signature": "def prerender(graph: BELGraph) -> Mapping[str, Mapping[str, Any]]:", "body": "import bio2bel_hgncfrom bio2bel_hgnc.models import HumanGenegraph: BELGraph = graph.copy()enrich_protein_and_rna_origins(graph)collapse_all_variants(graph)genes: Set[Gene] = get_nodes_by_function(graph, GENE)hgnc_symbols = {gene.namefor gene in genesif gene.namespace.lower() == ''}result = {}hgnc_manager = bio2bel_hgnc.Manager()human_genes = (hgnc_manager.session.query(HumanGene.symbol, HumanGene.location).filter(HumanGene.symbol.in_(hgnc_symbols)).all())for human_gene in human_genes:result[human_gene.symbol] = {'': human_gene.symbol,'': (human_gene.location.split('')[]if '' in human_gene.location elsehuman_gene.location.split('')[]),}df = get_df()for _, (gene_id, symbol, start, stop) in df[df[''].isin(hgnc_symbols)].iterrows():result[symbol][''] = startresult[symbol][''] = stopreturn result", "docstring": "Generate the annotations JSON for Ideogram.", "id": "f9363:m4"} {"signature": "def get_df() -> pd.DataFrame:", "body": "return pd.read_csv(os.path.join(HERE, ''))", "docstring": "Get RefSeq information as a dataframe.", "id": "f9363:m5"} {"signature": "@transformationdef get_subgraph_by_node_filter(graph: BELGraph, node_predicates: NodePredicates) -> BELGraph:", "body": "return get_subgraph_by_induction(graph, filter_nodes(graph, node_predicates))", "docstring": "Induce a sub-graph on the nodes that pass the given predicate(s).", "id": "f9365:m0"} {"signature": "@transformationdef get_causal_subgraph(graph: BELGraph) -> BELGraph:", "body": "return get_subgraph_by_edge_filter(graph, is_causal_relation)", "docstring": "Build a new sub-graph induced over the causal edges.", "id": "f9365:m1"} {"signature": "@transformationdef get_subgraph_by_node_search(graph: BELGraph, query: Strings) -> BELGraph:", "body": "nodes = search_node_names(graph, query)return get_subgraph_by_induction(graph, nodes)", "docstring": "Get a sub-graph induced over all nodes matching the query string.\n\n :param graph: A BEL Graph\n :param query: A query string or iterable of query strings for node names\n\n Thinly wraps :func:`search_node_names` and :func:`get_subgraph_by_induction`.", "id": "f9365:m2"} {"signature": "@transformationdef get_largest_component(graph: BELGraph) -> BELGraph:", "body": "biggest_component_nodes = max(nx.weakly_connected_components(graph), key=len)return subgraph(graph, biggest_component_nodes)", "docstring": "Get the giant component of a graph.", "id": "f9365:m3"} {"signature": "def search_node_names(graph, query):", "body": "return filter_nodes(graph, build_node_name_search(query))", "docstring": "Search for nodes containing a given string(s).\n\n :param pybel.BELGraph graph: A BEL graph\n :param query: The search query\n :type query: str or iter[str]\n :return: An iterator over nodes whose names match the search query\n :rtype: iter\n\n Example:\n\n .. code-block:: python\n\n >>> from pybel.examples import sialic_acid_graph\n >>> from pybel_tools.selection import search_node_names\n >>> list(search_node_names(sialic_acid_graph, 'CD33'))\n [('Protein', 'HGNC', 'CD33'), ('Protein', 'HGNC', 'CD33', ('pmod', ('bel', 'Ph')))]", "id": "f9366:m0"} {"signature": "def search_node_namespace_names(graph, query, namespace):", "body": "node_predicates = [namespace_inclusion_builder(namespace),build_node_name_search(query)]return filter_nodes(graph, node_predicates)", "docstring": "Search for nodes with the given namespace(s) and whose names containing a given string(s).\n\n :param pybel.BELGraph graph: A BEL graph\n :param query: The search query\n :type query: str or iter[str]\n :param namespace: The namespace(s) to filter\n :type namespace: str or iter[str]\n :return: An iterator over nodes whose names match the search query\n :rtype: iter", "id": "f9366:m1"} {"signature": "def search_node_hgnc_names(graph, query):", "body": "return search_node_namespace_names(graph, query, namespace='')", "docstring": "Search for nodes with the HGNC namespace and whose names containing a given string(s).\n\n :param pybel.BELGraph graph: A BEL graph\n :param query: The search query\n :type query: str or iter[str]\n :return: An iterator over nodes whose names match the search query\n :rtype: iter", "id": "f9366:m2"} {"signature": "def pairwise(iterable):", "body": "a, b = itt.tee(iterable)next(b, None)return zip(a, b)", "docstring": "s -> (s0,s1), (s1,s2), (s2, s3), ...", "id": "f9367:m0"} {"signature": "def rank_path(graph, path, edge_ranking=None):", "body": "edge_ranking = default_edge_ranking if edge_ranking is None else edge_rankingreturn sum(max(edge_ranking[d[RELATION]] for d in graph.edge[u][v].values()) for u, v in pairwise(path))", "docstring": "Takes in a path (a list of nodes in the graph) and calculates a score\n\n :param pybel.BELGraph graph: A BEL graph\n :param list[tuple] path: A list of nodes in the path (includes terminal nodes)\n :param dict edge_ranking: A dictionary of {relationship: score}\n :return: The score for the edge\n :rtype: int", "id": "f9367:m1"} {"signature": "def find_root_in_path(graph, path_nodes):", "body": "path_graph = graph.subgraph(path_nodes)node_in_degree_tuple = sorted([(n, d) for n, d in path_graph.in_degree().items()], key=itemgetter())node_out_degree_tuple = sorted([(n, d) for n, d in path_graph.out_degree().items()], key=itemgetter(),reverse=True)tied_root_index = for i in range(, (len(node_in_degree_tuple) - )):if node_in_degree_tuple[i][] < node_in_degree_tuple[i + ][]:tied_root_index = ibreakif tied_root_index != :root_tuple = max(node_out_degree_tuple[:tied_root_index], key=itemgetter())else:root_tuple = node_in_degree_tuple[]return path_graph, root_tuple[]", "docstring": "Find the 'root' of the path -> The node with the lowest out degree, if multiple:\n root is the one with the highest out degree among those with lowest out degree\n\n :param pybel.BELGraph graph: A BEL Graph\n :param list[tuple] path_nodes: A list of nodes in their order in a path\n :return: A pair of the graph: graph of the path and the root node\n :rtype: tuple[pybel.BELGraph,tuple]", "id": "f9367:m5"} {"signature": "def convert_path_to_metapath(graph, nodes):", "body": "return [graph.node[node][FUNCTION]for node in nodes]", "docstring": "Converts a list of nodes to their corresponding functions\n\n :param list[tuple] nodes: A list of BEL node tuples\n :rtype: list[str]", "id": "f9369:m0"} {"signature": "@lru_cache(maxsize=None)def get_walks_exhaustive(graph, node, length):", "body": "if == length:return (node,),return tuple((node, key) + pathfor neighbor in graph.edge[node]for path in get_walks_exhaustive(graph, neighbor, length - )if node not in pathfor key in graph.edge[node][neighbor])", "docstring": "Gets all walks under a given length starting at a given node\n\n :param networkx.Graph graph: A graph\n :param node: Starting node\n :param int length: The length of walks to get\n :return: A list of paths\n :rtype: list[tuple]", "id": "f9369:m1"} {"signature": "def match_simple_metapath(graph, node, simple_metapath):", "body": "if == len(simple_metapath):yield node,else:for neighbor in graph.edges[node]:if graph.nodes[neighbor][FUNCTION] == simple_metapath[]:for path in match_simple_metapath(graph, neighbor, simple_metapath[:]):if node not in path:yield (node,) + path", "docstring": "Matches a simple metapath starting at the given node\n\n :param pybel.BELGraph graph: A BEL graph\n :param tuple node: A BEL node\n :param list[str] simple_metapath: A list of BEL Functions\n :return: An iterable over paths from the node matching the metapath\n :rtype: iter[tuple]", "id": "f9369:m2"} {"signature": "def convert_simple_walk(graph, simple_walk):", "body": "return [graph.nodes[node][FUNCTION]for node in simple_walk]", "docstring": "Converts a walk into a sequence of BEL functions\n\n :param pybel.BELGraph graph: A BEL graph\n :param iter[tuple] simple_walk: An iterable of BEL nodes\n :return: A list of BEL functions of the walk\n :rtype: list[str]", "id": "f9369:m3"} {"signature": "def match_complex_metapath(graph, node, complex_metapath):", "body": "raise NotImplementedError", "docstring": "Matches a complex metapath starting at the given node\n\n :param pybel.BELGraph graph: A BEL graph\n :param tuple node: A BEL node\n :param list[str] complex_metapath: An iterable of alternating BEL nodes and relations\n :return: An iterable over paths from the node matching the metapath\n :rtype: iter[tuple]", "id": "f9369:m4"} {"signature": "def convert_complex_walk(graph, complex_walk):", "body": "raise NotImplementedError", "docstring": "Converts a walk into an alternative sequence of BEL functions and BEL relations, starting and ending\n with a BEL function\n\n :param pybel.BELGraph graph: A BEL graph\n :param iter[tuple] complex_walk: An iterable of alternating BEL nodes and relations\n :return: An alternating list of BEL functions and relations of the walk\n :rtype: list[str]", "id": "f9369:m5"} {"signature": "def get_leaves_by_type(graph, func=None, prune_threshold=):", "body": "for node, data in graph.nodes(data=True):if func and func != data.get(FUNCTION):continueif graph.in_degree(node) + graph.out_degree(node) <= prune_threshold:yield node", "docstring": "Returns an iterable over all nodes in graph (in-place) with only a connection to one node. Useful for gene and\n RNA. Allows for optional filter by function type.\n\n :param pybel.BELGraph graph: A BEL graph\n :param func: If set, filters by the node's function from :mod:`pybel.constants` like\n :data:`pybel.constants.GENE`, :data:`pybel.constants.RNA`, :data:`pybel.constants.PROTEIN`, or \n :data:`pybel.constants.BIOPROCESS`\n :type func: str\n :param prune_threshold: Removes nodes with less than or equal to this number of connections. Defaults to :code:`1`\n :type prune_threshold: int\n :return: An iterable over nodes with only a connection to one node\n :rtype: iter[tuple]", "id": "f9370:m0"} {"signature": "def group_nodes_by_annotation(graph: BELGraph, annotation: str = '') -> Mapping[str, Set[BaseEntity]]:", "body": "result = defaultdict(set)for u, v, d in graph.edges(data=True):if not edge_has_annotation(d, annotation):continueresult[d[ANNOTATIONS][annotation]].add(u)result[d[ANNOTATIONS][annotation]].add(v)return dict(result)", "docstring": "Group the nodes occurring in edges by the given annotation.", "id": "f9371:m0"} {"signature": "def average_node_annotation(graph: BELGraph,key: str,annotation: str = '',aggregator: Optional[Callable[[Iterable[X]], X]] = None,) -> Mapping[str, X]:", "body": "if aggregator is None:def aggregator(x):\"\"\"\"\"\"return sum(x) / len(x)result = {}for subgraph, nodes in group_nodes_by_annotation(graph, annotation).items():values = [graph.nodes[node][key] for node in nodes if key in graph.nodes[node]]result[subgraph] = aggregator(values)return result", "docstring": "Groups graph into subgraphs and assigns each subgraph a score based on the average of all nodes values\n for the given node key\n\n :param pybel.BELGraph graph: A BEL graph\n :param key: The key in the node data dictionary representing the experimental data\n :param annotation: A BEL annotation to use to group nodes\n :param aggregator: A function from list of values -> aggregate value. Defaults to taking the average of a list of\n floats.\n :type aggregator: lambda", "id": "f9371:m1"} {"signature": "def group_nodes_by_annotation_filtered(graph: BELGraph,node_predicates: NodePredicates = None,annotation: str = '',) -> Mapping[str, Set[BaseEntity]]:", "body": "node_filter = concatenate_node_predicates(node_predicates)return {key: {nodefor node in nodesif node_filter(graph, node)}for key, nodes in group_nodes_by_annotation(graph, annotation).items()}", "docstring": "Group the nodes occurring in edges by the given annotation, with a node filter applied.\n\n :param graph: A BEL graph\n :param node_predicates: A predicate or list of predicates (graph, node) -> bool\n :param annotation: The annotation to use for grouping\n :return: A dictionary of {annotation value: set of nodes}", "id": "f9371:m2"} {"signature": "def get_mapped_nodes(graph: BELGraph, namespace: str, names: Iterable[str]) -> Mapping[BaseEntity, Set[BaseEntity]]:", "body": "parent_to_variants = defaultdict(set)names = set(names)for u, v, d in graph.edges(data=True):if d[RELATION] in {HAS_MEMBER, HAS_COMPONENT} and v.get(NAMESPACE) == namespace and v.get(NAME) in names:parent_to_variants[v].add(u)elif d[RELATION] == HAS_VARIANT and u.get(NAMESPACE) == namespace and u.get(NAME) in names:parent_to_variants[u].add(v)elif d[RELATION] == ORTHOLOGOUS and u.get(NAMESPACE) == namespace and u.get(NAME) in names:parent_to_variants[u].add(v)return dict(parent_to_variants)", "docstring": "Return a dict with keys: nodes that match the namespace and in names and values other nodes (complexes, variants, orthologous...) or this node.\n\n :param graph: A BEL graph\n :param namespace: The namespace to search\n :param names: List or set of values from which we want to map nodes from\n :return: Main node to variants/groups.", "id": "f9371:m3"} {"signature": "def is_edge_consistent(graph, u, v):", "body": "if not graph.has_edge(u, v):raise ValueError(''.format(graph, u, v))return == len(set(d[RELATION] for d in graph.edge[u][v].values()))", "docstring": "Check if all edges between two nodes have the same relation.\n\n :param pybel.BELGraph graph: A BEL Graph\n :param tuple u: The source BEL node\n :param tuple v: The target BEL node\n :return: If all edges from the source to target node have the same relation\n :rtype: bool", "id": "f9372:m0"} {"signature": "def all_edges_consistent(graph):", "body": "return all(is_edge_consistent(graph, u, v)for u, v in graph.edges())", "docstring": "Return if all edges are consistent in a graph. Wraps :func:`pybel_tools.utils.is_edge_consistent`.\n\n :param pybel.BELGraph graph: A BEL graph\n :return: Are all edges consistent\n :rtype: bool", "id": "f9372:m1"} {"signature": "@transformationdef rewire_targets(graph, rewiring_probability):", "body": "if not all_edges_consistent(graph):raise ValueError(''.format(graph))result = graph.copy()nodes = result.nodes()for u, v in result.edges():if random.random() < rewiring_probability:continuew = random.choice(nodes)while w == u or result.has_edge(u, w):w = random.choice(nodes)result.add_edge(w, v)result.remove_edge(u, v)return result", "docstring": "Rewire a graph's edges' target nodes.\n\n - For BEL graphs, assumes edge consistency (all edges between two given nodes are have the same relation)\n - Doesn't make self-edges\n\n :param pybel.BELGraph graph: A BEL graph\n :param float rewiring_probability: The probability of rewiring (between 0 and 1)\n :return: A rewired BEL graph", "id": "f9372:m2"} {"signature": "def get_merged_namespace_names(locations, check_keywords=True):", "body": "resources = {location: get_bel_resource(location) for location in locations}if check_keywords:resource_keywords = set(config[''][''] for config in resources.values())if != len(resource_keywords):raise ValueError(''.format(resource_keywords))result = {}for resource in resources:result.update(resource[''])return result", "docstring": "Loads many namespaces and combines their names.\n\n :param iter[str] locations: An iterable of URLs or file paths pointing to BEL namespaces.\n :param bool check_keywords: Should all the keywords be the same? Defaults to ``True``\n :return: A dictionary of {names: labels}\n :rtype: dict[str, str]\n\n Example Usage\n\n >>> from pybel.resources import write_namespace\n >>> from pybel_tools.definition_utils import export_namespace, get_merged_namespace_names\n >>> graph = ...\n >>> original_ns_url = ...\n >>> export_namespace(graph, 'MBS') # Outputs in current directory to MBS.belns\n >>> value_dict = get_merged_namespace_names([original_ns_url, 'MBS.belns'])\n >>> with open('merged_namespace.belns', 'w') as f:\n >>> ... write_namespace('MyBrokenNamespace', 'MBS', 'Other', 'Charles Hoyt', 'PyBEL Citation', value_dict, file=f)", "id": "f9373:m0"} {"signature": "def merge_namespaces(input_locations, output_path, namespace_name, namespace_keyword, namespace_domain, author_name,citation_name, namespace_description=None, namespace_species=None, namespace_version=None,namespace_query_url=None, namespace_created=None, author_contact=None, author_copyright=None,citation_description=None, citation_url=None, citation_version=None, citation_date=None,case_sensitive=True, delimiter='', cacheable=True, functions=None, value_prefix='',sort_key=None, check_keywords=True):", "body": "results = get_merged_namespace_names(input_locations, check_keywords=check_keywords)with open(output_path, '') as file:write_namespace(namespace_name=namespace_name,namespace_keyword=namespace_keyword,namespace_domain=namespace_domain,author_name=author_name,citation_name=citation_name,values=results,namespace_species=namespace_species,namespace_description=namespace_description,namespace_query_url=namespace_query_url,namespace_version=namespace_version,namespace_created=namespace_created,author_contact=author_contact,author_copyright=author_copyright,citation_description=citation_description,citation_url=citation_url,citation_version=citation_version,citation_date=citation_date,case_sensitive=case_sensitive,delimiter=delimiter,cacheable=cacheable,functions=functions,value_prefix=value_prefix,sort_key=sort_key,file=file)", "docstring": "Merges namespaces from multiple locations to one.\n\n :param iter input_locations: An iterable of URLs or file paths pointing to BEL namespaces.\n :param str output_path: The path to the file to write the merged namespace\n :param str namespace_name: The namespace name\n :param str namespace_keyword: Preferred BEL Keyword, maximum length of 8\n :param str namespace_domain: One of: :data:`pybel.constants.NAMESPACE_DOMAIN_BIOPROCESS`,\n :data:`pybel.constants.NAMESPACE_DOMAIN_CHEMICAL`,\n :data:`pybel.constants.NAMESPACE_DOMAIN_GENE`, or\n :data:`pybel.constants.NAMESPACE_DOMAIN_OTHER`\n :param str author_name: The namespace's authors\n :param str citation_name: The name of the citation\n :param str namespace_query_url: HTTP URL to query for details on namespace values (must be valid URL)\n :param str namespace_description: Namespace description\n :param str namespace_species: Comma-separated list of species taxonomy id's\n :param str namespace_version: Namespace version\n :param str namespace_created: Namespace public timestamp, ISO 8601 datetime\n :param str author_contact: Namespace author's contact info/email address\n :param str author_copyright: Namespace's copyright/license information\n :param str citation_description: Citation description\n :param str citation_url: URL to more citation information\n :param str citation_version: Citation version\n :param str citation_date: Citation publish timestamp, ISO 8601 Date\n :param bool case_sensitive: Should this config file be interpreted as case-sensitive?\n :param str delimiter: The delimiter between names and labels in this config file\n :param bool cacheable: Should this config file be cached?\n :param functions: The encoding for the elements in this namespace\n :type functions: iterable of characters\n :param str value_prefix: a prefix for each name\n :param sort_key: A function to sort the values with :func:`sorted`\n :param bool check_keywords: Should all the keywords be the same? Defaults to ``True``", "id": "f9373:m1"} {"signature": "def export_namespace(graph, namespace, directory=None, cacheable=False):", "body": "directory = os.getcwd() if directory is None else directorypath = os.path.join(directory, ''.format(namespace))with open(path, '') as file:log.info('', path)right_names = get_names_by_namespace(graph, namespace)log.info('', len(right_names), namespace)wrong_names = get_incorrect_names_by_namespace(graph, namespace)log.info('', len(right_names), namespace)undefined_ns_names = get_undefined_namespace_names(graph, namespace)log.info('', len(right_names), namespace)names = (right_names | wrong_names | undefined_ns_names)if == len(names):log.warning('', namespace)write_namespace(namespace_name=namespace,namespace_keyword=namespace,namespace_domain='',author_name=graph.authors,author_contact=graph.contact,citation_name=graph.name,values=names,cacheable=cacheable,file=file)", "docstring": "Exports all names and missing names from the given namespace to its own BEL Namespace files in the given\n directory.\n\n Could be useful during quick and dirty curation, where planned namespace building is not a priority.\n\n :param pybel.BELGraph graph: A BEL graph\n :param str namespace: The namespace to process\n :param str directory: The path to the directory where to output the namespace. Defaults to the current working\n directory returned by :func:`os.getcwd`\n :param bool cacheable: Should the namespace be cacheable? Defaults to ``False`` because, in general, this operation\n will probably be used for evil, and users won't want to reload their entire cache after each\n iteration of curation.", "id": "f9375:m0"} {"signature": "def export_namespaces(graph, namespaces, directory=None, cacheable=False):", "body": "directory = os.getcwd() if directory is None else directory for namespace in namespaces:export_namespace(graph, namespace, directory=directory, cacheable=cacheable)", "docstring": "Thinly wraps :func:`export_namespace` for an iterable of namespaces.\n\n :param pybel.BELGraph graph: A BEL graph\n :param iter[str] namespaces: An iterable of strings for the namespaces to process\n :param str directory: The path to the directory where to output the namespaces. Defaults to the current working\n directory returned by :func:`os.getcwd`\n :param bool cacheable: Should the namespaces be cacheable? Defaults to ``False`` because, in general, this operation\n will probably be used for evil, and users won't want to reload their entire cache after each\n iteration of curation.", "id": "f9375:m1"} {"signature": "def get_peripheral_successor_edges(graph: BELGraph, subgraph: BELGraph) -> EdgeIterator:", "body": "for u in subgraph:for _, v, k in graph.out_edges(u, keys=True):if v not in subgraph:yield u, v, k", "docstring": "Get the set of possible successor edges peripheral to the sub-graph.\n\n The source nodes in this iterable are all inside the sub-graph, while the targets are outside.", "id": "f9376:m0"} {"signature": "def get_peripheral_predecessor_edges(graph: BELGraph, subgraph: BELGraph) -> EdgeIterator:", "body": "for v in subgraph:for u, _, k in graph.in_edges(v, keys=True):if u not in subgraph:yield u, v, k", "docstring": "Get the set of possible predecessor edges peripheral to the sub-graph.\n\n The target nodes in this iterable are all inside the sub-graph, while the sources are outside.", "id": "f9376:m1"} {"signature": "def count_sources(edge_iter: EdgeIterator) -> Counter:", "body": "return Counter(u for u, _, _ in edge_iter)", "docstring": "Count the source nodes in an edge iterator with keys and data.\n\n :return: A counter of source nodes in the iterable", "id": "f9376:m2"} {"signature": "def count_targets(edge_iter: EdgeIterator) -> Counter:", "body": "return Counter(v for _, v, _ in edge_iter)", "docstring": "Count the target nodes in an edge iterator with keys and data.\n\n :return: A counter of target nodes in the iterable", "id": "f9376:m3"} {"signature": "def count_possible_successors(graph: BELGraph, subgraph: BELGraph) -> Counter:", "body": "return count_targets(get_peripheral_successor_edges(graph, subgraph))", "docstring": ":param graph: A BEL graph\n:param subgraph: An iterator of BEL nodes\n:return: A counter of possible successor nodes", "id": "f9376:m4"} {"signature": "def count_possible_predecessors(graph: BELGraph, subgraph: BELGraph) -> Counter:", "body": "return count_sources(get_peripheral_predecessor_edges(graph, subgraph))", "docstring": ":param graph: A BEL graph\n:param subgraph: An iterator of BEL nodes\n:return: A counter of possible predecessor nodes", "id": "f9376:m5"} {"signature": "def get_subgraph_edges(graph: BELGraph,annotation: str,value: str,source_filter=None,target_filter=None,):", "body": "if source_filter is None:source_filter = keep_node_permissiveif target_filter is None:target_filter = keep_node_permissivefor u, v, k, data in graph.edges(keys=True, data=True):if not edge_has_annotation(data, annotation):continueif data[ANNOTATIONS][annotation] == value and source_filter(graph, u) and target_filter(graph, v):yield u, v, k, data", "docstring": "Gets all edges from a given subgraph whose source and target nodes pass all of the given filters\n\n :param pybel.BELGraph graph: A BEL graph\n :param str annotation: The annotation to search\n :param str value: The annotation value to search by\n :param source_filter: Optional filter for source nodes (graph, node) -> bool\n :param target_filter: Optional filter for target nodes (graph, node) -> bool\n :return: An iterable of (source node, target node, key, data) for all edges that match the annotation/value and\n node filters\n :rtype: iter[tuple]", "id": "f9376:m6"} {"signature": "def get_subgraph_peripheral_nodes(graph: BELGraph,subgraph: Iterable[BaseEntity],node_predicates: NodePredicates = None,edge_predicates: EdgePredicates = None,):", "body": "node_filter = concatenate_node_predicates(node_predicates=node_predicates)edge_filter = and_edge_predicates(edge_predicates=edge_predicates)result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))for u, v, k, d in get_peripheral_successor_edges(graph, subgraph):if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k):continueresult[v][''][u].append((k, d))for u, v, k, d in get_peripheral_predecessor_edges(graph, subgraph):if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k):continueresult[u][''][v].append((k, d))return result", "docstring": "Get a summary dictionary of all peripheral nodes to a given sub-graph.\n\n :return: A dictionary of {external node: {'successor': {internal node: list of (key, dict)},\n 'predecessor': {internal node: list of (key, dict)}}}\n :rtype: dict\n\n For example, it might be useful to quantify the number of predecessors and successors:\n\n >>> from pybel.struct.filters import exclude_pathology_filter\n >>> value = 'Blood vessel dilation subgraph'\n >>> sg = get_subgraph_by_annotation_value(graph, annotation='Subgraph', value=value)\n >>> p = get_subgraph_peripheral_nodes(graph, sg, node_predicates=exclude_pathology_filter)\n >>> for node in sorted(p, key=lambda n: len(set(p[n]['successor']) | set(p[n]['predecessor'])), reverse=True):\n >>> if 1 == len(p[value][node]['successor']) or 1 == len(p[value][node]['predecessor']):\n >>> continue\n >>> print(node,\n >>> len(p[node]['successor']),\n >>> len(p[node]['predecessor']),\n >>> len(set(p[node]['successor']) | set(p[node]['predecessor'])))", "id": "f9376:m7"} {"signature": "@uni_in_place_transformationdef expand_periphery(universe: BELGraph,graph: BELGraph,node_predicates: NodePredicates = None,edge_predicates: EdgePredicates = None,threshold: int = ,) -> None:", "body": "nd = get_subgraph_peripheral_nodes(universe, graph, node_predicates=node_predicates,edge_predicates=edge_predicates)for node, dd in nd.items():pred_d = dd['']succ_d = dd['']in_subgraph_connections = set(pred_d) | set(succ_d)if threshold > len(in_subgraph_connections):continuegraph.add_node(node, attr_dict=universe[node])for u, edges in pred_d.items():for k, d in edges:safe_add_edge(graph, u, node, k, d)for v, edges in succ_d.items():for k, d in edges:safe_add_edge(graph, node, v, k, d)", "docstring": "Iterates over all possible edges, peripheral to a given subgraph, that could be added from the given graph.\n Edges could be added if they go to nodes that are involved in relationships that occur with more than the\n threshold (default 2) number of nodes in the subgraph.\n\n :param universe: The universe of BEL knowledge\n :param graph: The (sub)graph to expand\n :param threshold: Minimum frequency of betweenness occurrence to add a gap node\n\n A reasonable edge filter to use is :func:`pybel_tools.filters.keep_causal_edges` because this function can allow\n for huge expansions if there happen to be hub nodes.", "id": "f9376:m8"} {"signature": "@uni_in_place_transformationdef enrich_complexes(graph: BELGraph) -> None:", "body": "nodes = list(get_nodes_by_function(graph, COMPLEX))for u in nodes:for v in u.members:graph.add_has_component(u, v)", "docstring": "Add all of the members of the complex abundances to the graph.", "id": "f9376:m9"} {"signature": "@uni_in_place_transformationdef enrich_composites(graph: BELGraph):", "body": "nodes = list(get_nodes_by_function(graph, COMPOSITE))for u in nodes:for v in u.members:graph.add_has_component(u, v)", "docstring": "Adds all of the members of the composite abundances to the graph.", "id": "f9376:m10"} {"signature": "@uni_in_place_transformationdef enrich_reactions(graph: BELGraph):", "body": "nodes = list(get_nodes_by_function(graph, REACTION))for u in nodes:for v in u.reactants:graph.add_has_reactant(u, v)for v in u.products:graph.add_has_product(u, v)", "docstring": "Adds all of the reactants and products of reactions to the graph.", "id": "f9376:m11"} {"signature": "@uni_in_place_transformationdef enrich_variants(graph: BELGraph, func: Union[None, str, Iterable[str]] = None):", "body": "if func is None:func = {PROTEIN, RNA, MIRNA, GENE}nodes = list(get_nodes_by_function(graph, func))for u in nodes:parent = u.get_parent()if parent is None:continueif parent not in graph:graph.add_has_variant(parent, u)", "docstring": "Add the reference nodes for all variants of the given function.\n\n :param graph: The target BEL graph to enrich\n :param func: The function by which the subject of each triple is filtered. Defaults to the set of protein, rna,\n mirna, and gene.", "id": "f9376:m12"} {"signature": "@uni_in_place_transformationdef enrich_unqualified(graph: BELGraph):", "body": "enrich_complexes(graph)enrich_composites(graph)enrich_reactions(graph)enrich_variants(graph)", "docstring": "Enrich the sub-graph with the unqualified edges from the graph.\n\n The reason you might want to do this is you induce a sub-graph from the original graph based on an annotation\n filter, but the unqualified edges that don't have annotations that most likely connect elements within your graph\n are not included.\n\n .. seealso::\n\n This function thinly wraps the successive application of the following functions:\n\n - :func:`enrich_complexes`\n - :func:`enrich_composites`\n - :func:`enrich_reactions`\n - :func:`enrich_variants`\n\n Equivalent to:\n\n >>> enrich_complexes(graph)\n >>> enrich_composites(graph)\n >>> enrich_reactions(graph)\n >>> enrich_variants(graph)", "id": "f9376:m13"} {"signature": "@uni_in_place_transformationdef expand_internal(universe: BELGraph, graph: BELGraph, edge_predicates: EdgePredicates = None) -> None:", "body": "edge_filter = and_edge_predicates(edge_predicates)for u, v in itt.product(graph, repeat=):if graph.has_edge(u, v) or not universe.has_edge(u, v):continuers = defaultdict(list)for key, data in universe[u][v].items():if not edge_filter(universe, u, v, key):continuers[data[RELATION]].append((key, data))if == len(rs):relation = list(rs)[]for key, data in rs[relation]:graph.add_edge(u, v, key=key, **data)else:log.debug('', u, v)", "docstring": "Edges between entities in the sub-graph that pass the given filters.\n\n :param universe: The full graph\n :param graph: A sub-graph to find the upstream information\n :param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool", "id": "f9376:m14"} {"signature": "@uni_in_place_transformationdef expand_internal_causal(universe: BELGraph, graph: BELGraph) -> None:", "body": "expand_internal(universe, graph, edge_predicates=is_causal_relation)", "docstring": "Add causal edges between entities in the sub-graph.\n\n Is an extremely thin wrapper around :func:`expand_internal`.\n\n :param universe: A BEL graph representing the universe of all knowledge\n :param graph: The target BEL graph to enrich with causal relations between contained nodes\n\n Equivalent to:\n\n >>> from pybel_tools.mutation import expand_internal\n >>> from pybel.struct.filters.edge_predicates import is_causal_relation\n >>> expand_internal(universe, graph, edge_predicates=is_causal_relation)", "id": "f9376:m15"} {"signature": "def build_expand_node_neighborhood_by_hash(manager: Manager) -> Callable[[BELGraph, BELGraph, str], None]:", "body": "@uni_in_place_transformationdef expand_node_neighborhood_by_hash(universe: BELGraph, graph: BELGraph, node_hash: str) -> None:\"\"\"\"\"\"node = manager.get_dsl_by_hash(node_hash)return expand_node_neighborhood(universe, graph, node)return expand_node_neighborhood_by_hash", "docstring": "Make an expand function that's bound to the manager.", "id": "f9377:m0"} {"signature": "def build_delete_node_by_hash(manager: Manager) -> Callable[[BELGraph, str], None]:", "body": "@in_place_transformationdef delete_node_by_hash(graph: BELGraph, node_hash: str) -> None:\"\"\"\"\"\"node = manager.get_dsl_by_hash(node_hash)graph.remove_node(node)return delete_node_by_hash", "docstring": "Make a delete function that's bound to the manager.", "id": "f9377:m1"} {"signature": "@in_place_transformationdef remove_inconsistent_edges(graph: BELGraph) -> None:", "body": "for u, v in get_inconsistent_edges(graph):edges = [(u, v, k) for k in graph[u][v]]graph.remove_edges_from(edges)", "docstring": "Remove all edges between node pairs with inconsistent edges.\n\n This is the all-or-nothing approach. It would be better to do more careful investigation of the evidences during\n curation.", "id": "f9379:m0"} {"signature": "def get_inconsistent_edges(graph: BELGraph) -> Iterable[Tuple[BaseEntity]]:", "body": "for u, v in graph.edges():if not pair_is_consistent(graph, u, v):yield u, v", "docstring": "Iterate over pairs of nodes with inconsistent edges.", "id": "f9379:m1"} {"signature": "@in_place_transformationdef collapse_protein_variants(graph: BELGraph) -> None:", "body": "_collapse_variants_by_function(graph, PROTEIN)", "docstring": "Collapse all protein's variants' edges to their parents, in-place.", "id": "f9380:m0"} {"signature": "@in_place_transformationdef collapse_gene_variants(graph: BELGraph) -> None:", "body": "_collapse_variants_by_function(graph, GENE)", "docstring": "Collapse all gene's variants' edges to their parents, in-place.", "id": "f9380:m1"} {"signature": "def _collapse_variants_by_function(graph: BELGraph, func: str) -> None:", "body": "for parent_node, variant_node, data in graph.edges(data=True):if data[RELATION] == HAS_VARIANT and parent_node.function == func:collapse_pair(graph, from_node=variant_node, to_node=parent_node)", "docstring": "Collapse all of the given functions' variants' edges to their parents, in-place.", "id": "f9380:m2"} {"signature": "@in_place_transformationdef rewire_variants_to_genes(graph: BELGraph) -> None:", "body": "mapping = {}for node in graph:if not isinstance(node, Protein) or not node.variants:continuemapping[node] = Gene(name=node.name,namespace=node.namespace,identifier=node.identifier,variants=node.variants,)nx.relabel_nodes(graph, mapping, copy=False)", "docstring": "Find all protein variants that are pointing to a gene and not a protein and fixes them by changing their\n function to be :data:`pybel.constants.GENE`, in place\n\n A use case is after running :func:`collapse_to_genes`.", "id": "f9380:m3"} {"signature": "def _collapse_edge_passing_predicates(graph: BELGraph, edge_predicates: EdgePredicates = None) -> None:", "body": "for u, v, _ in filter_edges(graph, edge_predicates=edge_predicates):collapse_pair(graph, survivor=u, victim=v)", "docstring": "Collapse all edges passing the given edge predicates.", "id": "f9380:m4"} {"signature": "def _collapse_edge_by_namespace(graph: BELGraph,victim_namespaces: Strings,survivor_namespaces: str,relations: Strings) -> None:", "body": "relation_filter = build_relation_predicate(relations)source_namespace_filter = build_source_namespace_filter(victim_namespaces)target_namespace_filter = build_target_namespace_filter(survivor_namespaces)edge_predicates = [relation_filter,source_namespace_filter,target_namespace_filter]_collapse_edge_passing_predicates(graph, edge_predicates=edge_predicates)", "docstring": "Collapse pairs of nodes with the given namespaces that have the given relationship.\n\n :param graph: A BEL Graph\n :param victim_namespaces: The namespace(s) of the node to collapse\n :param survivor_namespaces: The namespace of the node to keep\n :param relations: The relation(s) to search", "id": "f9380:m5"} {"signature": "@in_place_transformationdef collapse_equivalencies_by_namespace(graph: BELGraph, victim_namespace: Strings, survivor_namespace: str) -> None:", "body": "_collapse_edge_by_namespace(graph, victim_namespace, survivor_namespace, EQUIVALENT_TO)", "docstring": "Collapse pairs of nodes with the given namespaces that have equivalence relationships.\n\n :param graph: A BEL graph\n :param victim_namespace: The namespace(s) of the node to collapse\n :param survivor_namespace: The namespace of the node to keep\n\n To convert all ChEBI names to InChI keys, assuming there are appropriate equivalence relations between nodes with\n those namespaces:\n\n >>> collapse_equivalencies_by_namespace(graph, 'CHEBI', 'CHEBIID')\n >>> collapse_equivalencies_by_namespace(graph, 'CHEBIID', 'INCHI')", "id": "f9380:m6"} {"signature": "@in_place_transformationdef collapse_orthologies_by_namespace(graph: BELGraph, victim_namespace: Strings, survivor_namespace: str) -> None:", "body": "_collapse_edge_by_namespace(graph, victim_namespace, survivor_namespace, ORTHOLOGOUS)", "docstring": "Collapse pairs of nodes with the given namespaces that have orthology relationships.\n\n :param graph: A BEL Graph\n :param victim_namespace: The namespace(s) of the node to collapse\n :param survivor_namespace: The namespace of the node to keep\n\n To collapse all MGI nodes to their HGNC orthologs, use:\n >>> collapse_orthologies_by_namespace('MGI', 'HGNC')\n\n\n To collapse collapse both MGI and RGD nodes to their HGNC orthologs, use:\n >>> collapse_orthologies_by_namespace(['MGI', 'RGD'], 'HGNC')", "id": "f9380:m7"} {"signature": "@in_place_transformationdef collapse_entrez_to_hgnc(graph: BELGraph):", "body": "collapse_equivalencies_by_namespace(graph, ['', '', ''], '')", "docstring": "Collapse Entrez equivalences to HGNC.", "id": "f9380:m8"} {"signature": "@in_place_transformationdef collapse_mgi_to_hgnc(graph: BELGraph):", "body": "collapse_orthologies_by_namespace(graph, ['', ''], '')", "docstring": "Collapse MGI orthologies to HGNC.", "id": "f9380:m9"} {"signature": "@in_place_transformationdef collapse_rgd_to_hgnc(graph: BELGraph):", "body": "collapse_orthologies_by_namespace(graph, ['', ''], '')", "docstring": "Collapse RGD orthologies to HGNC.", "id": "f9380:m10"} {"signature": "@in_place_transformationdef collapse_flybase_to_hgnc(graph: BELGraph):", "body": "collapse_orthologies_by_namespace(graph, '', '')", "docstring": "Collapse FlyBase orthologies to HGNC.", "id": "f9380:m11"} {"signature": "@in_place_transformationdef collapse_entrez_equivalencies(graph: BELGraph):", "body": "relation_filter = build_relation_predicate(EQUIVALENT_TO)source_namespace_filter = build_source_namespace_filter(['', '', ''])edge_predicates = [relation_filter,source_namespace_filter,]_collapse_edge_passing_predicates(graph, edge_predicates=edge_predicates)", "docstring": "Collapse all equivalence edges away from Entrez. Assumes well formed, 2-way equivalencies.", "id": "f9380:m12"} {"signature": "@in_place_transformationdef collapse_consistent_edges(graph: BELGraph):", "body": "for u, v in graph.edges():relation = pair_is_consistent(graph, u, v)if not relation:continueedges = [(u, v, k) for k in graph[u][v]]graph.remove_edges_from(edges)graph.add_edge(u, v, attr_dict={RELATION: relation})", "docstring": "Collapse consistent edges together.\n\n .. warning:: This operation doesn't preserve evidences or other annotations", "id": "f9380:m13"} {"signature": "@transformationdef collapse_to_protein_interactions(graph: BELGraph) -> BELGraph:", "body": "rv: BELGraph = graph.copy()collapse_to_genes(rv)def is_edge_ppi(_: BELGraph, u: BaseEntity, v: BaseEntity, __: str) -> bool:\"\"\"\"\"\"return isinstance(u, Gene) and isinstance(v, Gene)return get_subgraph_by_edge_filter(rv, edge_predicates=[has_polarity, is_edge_ppi])", "docstring": "Collapse to a graph made of only causal gene/protein edges.", "id": "f9380:m14"} {"signature": "@in_place_transformationdef collapse_nodes_with_same_names(graph: BELGraph) -> None:", "body": "survivor_mapping = defaultdict(set) victims = set() it = tqdm(itt.combinations(graph, r=), total=graph.number_of_nodes() * (graph.number_of_nodes() - ) / )for a, b in it:if b in victims:continuea_name, b_name = a.get(NAME), b.get(NAME)if not a_name or not b_name or a_name.lower() != b_name.lower():continueif a.keys() != b.keys(): continuefor k in set(a.keys()) - {NAME, NAMESPACE}:if a[k] != b[k]: continuesurvivor_mapping[a].add(b)victims.add(b)collapse_nodes(graph, survivor_mapping)", "docstring": "Collapse all nodes with the same name, merging namespaces by picking first alphabetical one.", "id": "f9380:m15"} {"signature": "@in_place_transformationdef infer_missing_two_way_edges(graph):", "body": "for u, v, k, d in graph.edges(data=True, keys=True):if d[RELATION] in TWO_WAY_RELATIONS:infer_missing_backwards_edge(graph, u, v, k)", "docstring": "Add edges to the graph when a two way edge exists, and the opposite direction doesn't exist.\n\n Use: two way edges from BEL definition and/or axiomatic inverses of membership relations\n\n :param pybel.BELGraph graph: A BEL graph", "id": "f9381:m0"} {"signature": "def infer_missing_backwards_edge(graph, u, v, k):", "body": "if u in graph[v]:for attr_dict in graph[v][u].values():if attr_dict == graph[u][v][k]:returngraph.add_edge(v, u, key=k, **graph[u][v][k])", "docstring": "Add the same edge, but in the opposite direction if not already present.\n\n :type graph: pybel.BELGraph\n :type u: tuple\n :type v: tuple\n :type k: int", "id": "f9381:m1"} {"signature": "@uni_in_place_transformationdef enrich_internal_unqualified_edges(graph, subgraph):", "body": "for u, v in itt.combinations(subgraph, ):if not graph.has_edge(u, v):continuefor k in graph[u][v]:if k < :subgraph.add_edge(u, v, key=k, **graph[u][v][k])", "docstring": "Add the missing unqualified edges between entities in the subgraph that are contained within the full graph.\n\n :param pybel.BELGraph graph: The full BEL graph\n :param pybel.BELGraph subgraph: The query BEL subgraph", "id": "f9381:m2"} {"signature": "@in_place_transformationdef enrich_pubmed_citations(graph: BELGraph, manager: Manager) -> Set[str]:", "body": "pmids = get_pubmed_identifiers(graph)pmid_data, errors = get_citations_by_pmids(manager=manager, pmids=pmids)for u, v, k in filter_edges(graph, has_pubmed):pmid = graph[u][v][k][CITATION][CITATION_REFERENCE].strip()if pmid not in pmid_data:log.warning('', pmid)errors.add(pmid)continuegraph[u][v][k][CITATION].update(pmid_data[pmid])return errors", "docstring": "Overwrite all PubMed citations with values from NCBI's eUtils lookup service.\n\n :return: A set of PMIDs for which the eUtils service crashed", "id": "f9382:m0"} {"signature": "@uni_in_place_transformationdef update_context(universe: BELGraph, graph: BELGraph):", "body": "for namespace in get_namespaces(graph):if namespace in universe.namespace_url:graph.namespace_url[namespace] = universe.namespace_url[namespace]elif namespace in universe.namespace_pattern:graph.namespace_pattern[namespace] = universe.namespace_pattern[namespace]else:log.warning('', namespace)for annotation in get_annotations(graph):if annotation in universe.annotation_url:graph.annotation_url[annotation] = universe.annotation_url[annotation]elif annotation in universe.annotation_pattern:graph.annotation_pattern[annotation] = universe.annotation_pattern[annotation]elif annotation in universe.annotation_list:graph.annotation_list[annotation] = universe.annotation_list[annotation]else:log.warning('', annotation)", "docstring": "Update the context of a subgraph from the universe of all knowledge.", "id": "f9382:m1"} {"signature": "@in_place_transformationdef highlight_nodes(graph: BELGraph, nodes: Optional[Iterable[BaseEntity]] = None, color: Optional[str]=None):", "body": "color = color or NODE_HIGHLIGHT_DEFAULT_COLORfor node in nodes if nodes is not None else graph:graph.node[node][NODE_HIGHLIGHT] = color", "docstring": "Adds a highlight tag to the given nodes.\n\n :param graph: A BEL graph\n :param nodes: The nodes to add a highlight tag on\n :param color: The color to highlight (use something that works with CSS)", "id": "f9383:m0"} {"signature": "def is_node_highlighted(graph: BELGraph, node: BaseEntity) -> bool:", "body": "return NODE_HIGHLIGHT in graph.node[node]", "docstring": "Returns if the given node is highlighted.\n\n :param graph: A BEL graph\n :param node: A BEL node\n :type node: tuple\n :return: Does the node contain highlight information?\n :rtype: bool", "id": "f9383:m1"} {"signature": "@in_place_transformationdef remove_highlight_nodes(graph: BELGraph, nodes: Optional[Iterable[BaseEntity]]=None) -> None:", "body": "for node in graph if nodes is None else nodes:if is_node_highlighted(graph, node):del graph.node[node][NODE_HIGHLIGHT]", "docstring": "Removes the highlight from the given nodes, or all nodes if none given.\n\n :param graph: A BEL graph\n :param nodes: The list of nodes to un-highlight", "id": "f9383:m2"} {"signature": "@in_place_transformationdef highlight_edges(graph: BELGraph, edges=None, color: Optional[str]=None) -> None:", "body": "color = color or EDGE_HIGHLIGHT_DEFAULT_COLORfor u, v, k, d in edges if edges is not None else graph.edges(keys=True, data=True):graph[u][v][k][EDGE_HIGHLIGHT] = color", "docstring": "Adds a highlight tag to the given edges.\n\n :param graph: A BEL graph\n :param edges: The edges (4-tuples of u, v, k, d) to add a highlight tag on\n :type edges: iter[tuple]\n :param str color: The color to highlight (use something that works with CSS)", "id": "f9383:m3"} {"signature": "def is_edge_highlighted(graph: BELGraph, u, v, k) -> bool:", "body": "return EDGE_HIGHLIGHT in graph[u][v][k]", "docstring": "Returns if the given edge is highlighted.\n\n :param graph: A BEL graph\n :return: Does the edge contain highlight information?\n :rtype: bool", "id": "f9383:m4"} {"signature": "@in_place_transformationdef remove_highlight_edges(graph: BELGraph, edges=None):", "body": "for u, v, k, _ in graph.edges(keys=True, data=True) if edges is None else edges:if is_edge_highlighted(graph, u, v, k):del graph[u][v][k][EDGE_HIGHLIGHT]", "docstring": "Remove the highlight from the given edges, or all edges if none given.\n\n :param graph: A BEL graph\n :param edges: The edges (4-tuple of u,v,k,d) to remove the highlight from)\n :type edges: iter[tuple]", "id": "f9383:m5"} {"signature": "@uni_in_place_transformationdef highlight_subgraph(universe: BELGraph, graph: BELGraph):", "body": "highlight_nodes(universe, graph)highlight_edges(universe, graph.edges())", "docstring": "Highlight all nodes/edges in the universe that in the given graph.\n\n :param universe: The universe of knowledge\n :param graph: The BEL graph to mutate", "id": "f9383:m6"} {"signature": "@in_place_transformationdef remove_highlight_subgraph(graph: BELGraph, subgraph: BELGraph):", "body": "remove_highlight_nodes(graph, subgraph.nodes())remove_highlight_edges(graph, subgraph.edges())", "docstring": "Remove the highlight from all nodes/edges in the graph that are in the subgraph.\n\n :param graph: The BEL graph to mutate\n :param subgraph: The subgraph from which to remove the highlighting", "id": "f9383:m7"} {"signature": "@transformationdef random_by_nodes(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph:", "body": "percentage = percentage or assert < percentage <= nodes = graph.nodes()n = int(len(nodes) * percentage)subnodes = random.sample(nodes, n)result = graph.subgraph(subnodes)update_node_helper(graph, result)return result", "docstring": "Get a random graph by inducing over a percentage of the original nodes.\n\n :param graph: A BEL graph\n :param percentage: The percentage of edges to keep", "id": "f9384:m0"} {"signature": "@transformationdef random_by_edges(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph:", "body": "percentage = percentage or assert < percentage <= edges = graph.edges(keys=True)n = int(graph.number_of_edges() * percentage)subedges = random.sample(edges, n)rv = graph.fresh_copy()for u, v, k in subedges:safe_add_edge(rv, u, v, k, graph[u][v][k])update_node_helper(graph, rv)return rv", "docstring": "Get a random graph by keeping a certain percentage of original edges.\n\n :param graph: A BEL graph\n :param percentage: What percentage of eges to take", "id": "f9384:m1"} {"signature": "@transformationdef shuffle_node_data(graph: BELGraph, key: str, percentage: Optional[float] = None) -> BELGraph:", "body": "percentage = percentage or assert < percentage <= n = graph.number_of_nodes()swaps = int(percentage * n * (n - ) / )result: BELGraph = graph.copy()for _ in range(swaps):s, t = random.sample(result.node, )result.nodes[s][key], result.nodes[t][key] = result.nodes[t][key], result.nodes[s][key]return result", "docstring": "Shuffle the node's data.\n\n Useful for permutation testing.\n\n :param graph: A BEL graph\n :param key: The node data dictionary key\n :param percentage: What percentage of possible swaps to make", "id": "f9384:m2"} {"signature": "@transformationdef shuffle_relations(graph: BELGraph, percentage: Optional[str] = None) -> BELGraph:", "body": "percentage = percentage or assert < percentage <= n = graph.number_of_edges()swaps = int(percentage * n * (n - ) / )result: BELGraph = graph.copy()edges = result.edges(keys=True)for _ in range(swaps):(s1, t1, k1), (s2, t2, k2) = random.sample(edges, )result[s1][t1][k1], result[s2][t2][k2] = result[s2][t2][k2], result[s1][t1][k1]return result", "docstring": "Shuffle the relations.\n\n Useful for permutation testing.\n\n :param graph: A BEL graph\n :param percentage: What percentage of possible swaps to make", "id": "f9384:m3"} {"signature": "@click.group(help=f\"\"f\"\")@click.version_option()def main():", "body": "", "docstring": "Command Line Interface for PyBEL Tools.", "id": "f9385:m0"} {"signature": "@main.group()@connection_option@click.pass_contextdef io(ctx, connection: str):", "body": "ctx.obj = Manager(connection=connection)", "docstring": "Upload and conversion utilities.", "id": "f9385:m1"} {"signature": "@main.group()def namespace():", "body": "", "docstring": "Namespace file utilities.", "id": "f9385:m2"} {"signature": "@namespace.command()@click.argument('')@click.argument('')@click.argument('')@click.argument('')@click.option('', default=getuser())@click.option('')@click.option('')@click.option('')@click.option('')@click.option('')@click.option('', default=sys.stdin, help=\"\")@click.option('', type=click.File(''), default=sys.stdout)def write(name, keyword, domain, citation, author, description, species, version, contact, license, values, output):", "body": "write_namespace(name, keyword, domain, author, citation, values,namespace_description=description,namespace_species=species,namespace_version=version,author_contact=contact,author_copyright=license,file=output,)", "docstring": "Build a namespace from items.", "id": "f9385:m3"} {"signature": "@namespace.command()@click.option('', '', type=click.File(''), default=sys.stdin, help=\"\")@click.option('', '', type=click.File(''), default=sys.stdout,help=\"\")def convert_to_annotation(file, output):", "body": "resource = parse_bel_resource(file)write_annotation(keyword=resource[''][''],values={k: '' for k in resource['']},citation_name=resource[''][''],description=resource[''][''],file=output,)", "docstring": "Convert a namespace file to an annotation file.", "id": "f9385:m4"} {"signature": "@main.group()def annotation():", "body": "", "docstring": "Annotation file utilities.", "id": "f9385:m5"} {"signature": "@annotation.command()@click.option('', '', type=click.File(''), default=sys.stdin, help=\"\")@click.option('', '', type=click.File(''), default=sys.stdout,help=\"\")@click.option('', help=\"\")def convert_to_namespace(file, output, keyword):", "body": "resource = parse_bel_resource(file)write_namespace(namespace_keyword=(keyword or resource['']['']),namespace_name=resource[''][''],namespace_description=resource[''][''],author_name='',namespace_domain=NAMESPACE_DOMAIN_OTHER,values=resource[''],citation_name=resource[''][''],file=output,)", "docstring": "Convert an annotation file to a namespace file.", "id": "f9385:m6"} {"signature": "@main.group()def document():", "body": "", "docstring": "BEL document utilities.", "id": "f9385:m7"} {"signature": "@document.command()@click.argument('')@click.argument('')@click.argument('')@click.argument('', nargs=-)@click.option('')@click.option('')@click.option('')@click.option('')@click.option('')@click.option('', type=click.File(''), default=sys.stdout)def boilerplate(name, contact, description, pmids, version, copyright, authors, licenses, disclaimer, output):", "body": "from .document_utils import write_boilerplatewrite_boilerplate(name=name,version=version,description=description,authors=authors,contact=contact,copyright=copyright,licenses=licenses,disclaimer=disclaimer,pmids=pmids,file=output,)", "docstring": "Build a template BEL document with the given PubMed identifiers.", "id": "f9385:m8"} {"signature": "@document.command()@click.argument('', nargs=-)@connection_option@click.option('', '', type=click.File(''), default=sys.stdin, help='')@click.option('', '', help=''.format(os.getcwd()))def serialize_namespaces(namespaces, connection: str, path, directory):", "body": "from .definition_utils import export_namespacesgraph = from_lines(path, manager=connection)export_namespaces(namespaces, graph, directory)", "docstring": "Parse a BEL document then serializes the given namespaces (errors and all) to the given directory.", "id": "f9385:m9"} {"signature": "@io.command()@graph_pickle_argument@click.option('', '', type=click.File(''), default=sys.stdout)def get_pmids(graph: BELGraph, output: TextIO):", "body": "for pmid in get_pubmed_identifiers(graph):click.echo(pmid, file=output)", "docstring": "Output PubMed identifiers from a graph to a stream.", "id": "f9385:m10"} {"signature": "@in_place_transformationdef overlay_data(graph: BELGraph,data: Mapping[BaseEntity, Any],label: Optional[str] = None,overwrite: bool = False,) -> None:", "body": "if label is None:label = WEIGHTfor node, value in data.items():if node not in graph:log.debug('', node)continueif label in graph.nodes[node] and not overwrite:log.debug('', label, node)continuegraph.nodes[node][label] = value", "docstring": "Overlays tabular data on the network\n\n :param graph: A BEL Graph\n :param data: A dictionary of {tuple node: data for that node}\n :param label: The annotation label to put in the node dictionary\n :param overwrite: Should old annotations be overwritten?", "id": "f9387:m0"} {"signature": "@in_place_transformationdef overlay_type_data(graph: BELGraph,data: Mapping[str, float],func: str,namespace: str,label: Optional[str] = None,overwrite: bool = False,impute: Optional[float] = None,) -> None:", "body": "new_data = {node: data.get(node[NAME], impute)for node in filter_nodes(graph, function_namespace_inclusion_builder(func, namespace))}overlay_data(graph, new_data, label=label, overwrite=overwrite)", "docstring": "Overlay tabular data on the network for data that comes from an data set with identifiers that lack\n namespaces.\n\n For example, if you want to overlay differential gene expression data from a table, that table\n probably has HGNC identifiers, but no specific annotations that they are in the HGNC namespace or\n that the entities to which they refer are RNA.\n\n :param graph: A BEL Graph\n :param dict data: A dictionary of {name: data}\n :param func: The function of the keys in the data dictionary\n :param namespace: The namespace of the keys in the data dictionary\n :param label: The annotation label to put in the node dictionary\n :param overwrite: Should old annotations be overwritten?\n :param impute: The value to use for missing data", "id": "f9387:m1"} {"signature": "def load_differential_gene_expression(path: str,gene_symbol_column: str = '',logfc_column: str = '',aggregator: Optional[Callable[[List[float]], float]] = None,) -> Mapping[str, float]:", "body": "if aggregator is None:aggregator = np.mediandf = pd.read_csv(path)assert gene_symbol_column in df.columnsassert logfc_column in df.columnsdf = df.loc[df[gene_symbol_column].notnull(), [gene_symbol_column, logfc_column]]values = defaultdict(list)for _, gene_symbol, log_fold_change in df.itertuples():values[gene_symbol].append(log_fold_change)return {gene_symbol: aggregator(log_fold_changes)for gene_symbol, log_fold_changes in values.items()}", "docstring": "Load and pre-process a differential gene expression data.\n\n :param path: The path to the CSV\n :param gene_symbol_column: The header of the gene symbol column in the data frame\n :param logfc_column: The header of the log-fold-change column in the data frame\n :param aggregator: A function that aggregates a list of differential gene expression values. Defaults to\n :func:`numpy.median`. Could also use: :func:`numpy.mean`, :func:`numpy.average`,\n :func:`numpy.min`, or :func:`numpy.max`\n :return: A dictionary of {gene symbol: log fold change}", "id": "f9387:m2"} {"signature": "def summarize_edge_filter(graph: BELGraph, edge_predicates: EdgePredicates) -> None:", "body": "passed = count_passed_edge_filter(graph, edge_predicates)print(''.format(passed, graph.number_of_edges(),(''.join(edge_filter.__name__ for edge_filter in edge_predicates)if isinstance(edge_predicates, Iterable) elseedge_predicates.__name__)))", "docstring": "Print a summary of the number of edges passing a given set of filters.", "id": "f9389:m0"} {"signature": "def build_edge_data_filter(annotations: Mapping, partial_match: bool = True) -> EdgePredicate: ", "body": "@edge_predicatedef annotation_dict_filter(data: EdgeData) -> bool:\"\"\"\"\"\"return subdict_matches(data, annotations, partial_match=partial_match)return annotation_dict_filter", "docstring": "Build a filter that keeps edges whose data dictionaries are super-dictionaries to the given dictionary.\n\n :param annotations: The annotation query dict to match\n :param partial_match: Should the query values be used as partial or exact matches? Defaults to :code:`True`.", "id": "f9389:m1"} {"signature": "def build_pmid_inclusion_filter(pmids: Strings) -> EdgePredicate:", "body": "if isinstance(pmids, str):@edge_predicatedef pmid_inclusion_filter(data: EdgeData) -> bool:\"\"\"\"\"\"return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] == pmidselif isinstance(pmids, Iterable):pmids = set(pmids)@edge_predicatedef pmid_inclusion_filter(data: EdgeData) -> bool:\"\"\"\"\"\"return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] in pmidselse:raise TypeErrorreturn pmid_inclusion_filter", "docstring": "Pass for edges with citations whose references are one of the given PubMed identifiers.\n\n :param pmids: A PubMed identifier or list of PubMed identifiers to filter for", "id": "f9389:m2"} {"signature": "def build_pmid_exclusion_filter(pmids: Strings) -> EdgePredicate:", "body": "if isinstance(pmids, str):@edge_predicatedef pmid_exclusion_filter(data: EdgeData) -> bool:\"\"\"\"\"\"return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] != pmidselif isinstance(pmids, Iterable):pmids = set(pmids)@edge_predicatedef pmid_exclusion_filter(data: EdgeData) -> bool:\"\"\"\"\"\"return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] not in pmidselse:raise TypeErrorreturn pmid_exclusion_filter", "docstring": "Fail for edges with citations whose references are one of the given PubMed identifiers.\n\n :param pmids: A PubMed identifier or list of PubMed identifiers to filter against", "id": "f9389:m3"} {"signature": "def build_author_inclusion_filter(authors: Strings) -> EdgePredicate:", "body": "if isinstance(authors, str):@edge_predicatedef author_filter(data: EdgeData) -> bool:\"\"\"\"\"\"return has_authors(data) and authors in data[CITATION][CITATION_AUTHORS]elif isinstance(authors, Iterable):authors = set(authors)@edge_predicatedef author_filter(data: EdgeData) -> bool:\"\"\"\"\"\"return has_authors(data) and any(author in data[CITATION][CITATION_AUTHORS]for author in authors)else:raise TypeErrorreturn author_filter", "docstring": "Pass only for edges with author information that matches one of the given authors.\n\n :param authors: The author or list of authors to filter by", "id": "f9389:m4"} {"signature": "def node_has_namespace(node: BaseEntity, namespace: str) -> bool:", "body": "ns = node.get(NAMESPACE)return ns is not None and ns == namespace", "docstring": "Pass for nodes that have the given namespace.", "id": "f9389:m5"} {"signature": "def node_has_namespaces(node: BaseEntity, namespaces: Set[str]) -> bool:", "body": "ns = node.get(NAMESPACE)return ns is not None and ns in namespaces", "docstring": "Pass for nodes that have one of the given namespaces.", "id": "f9389:m6"} {"signature": "def build_source_namespace_filter(namespaces: Strings) -> EdgePredicate:", "body": "if isinstance(namespaces, str):def source_namespace_filter(_, u: BaseEntity, __, ___) -> bool:return node_has_namespace(u, namespaces)elif isinstance(namespaces, Iterable):namespaces = set(namespaces)def source_namespace_filter(_, u: BaseEntity, __, ___) -> bool:return node_has_namespaces(u, namespaces)else:raise TypeErrorreturn source_namespace_filter", "docstring": "Pass for edges whose source nodes have the given namespace or one of the given namespaces.\n\n :param namespaces: The namespace or namespaces to filter by", "id": "f9389:m7"} {"signature": "def build_target_namespace_filter(namespaces: Strings) -> EdgePredicate:", "body": "if isinstance(namespaces, str):def target_namespace_filter(_, __, v: BaseEntity, ___) -> bool:return node_has_namespace(v, namespaces)elif isinstance(namespaces, Iterable):namespaces = set(namespaces)def target_namespace_filter(_, __, v: BaseEntity, ___) -> bool:return node_has_namespaces(v, namespaces)else:raise TypeErrorreturn target_namespace_filter", "docstring": "Only passes for edges whose target nodes have the given namespace or one of the given namespaces\n\n :param namespaces: The namespace or namespaces to filter by", "id": "f9389:m8"} {"signature": "@in_place_transformationdef remove_nodes_by_function(graph: BELGraph, func: Strings) -> None:", "body": "remove_filtered_nodes(graph, function_inclusion_filter_builder(func))", "docstring": "Remove nodes with the given function.\n\n This could be useful directly to remove pathologies.", "id": "f9390:m0"} {"signature": "@in_place_transformationdef remove_nodes_by_namespace(graph: BELGraph, namespace: Strings) -> None:", "body": "remove_filtered_nodes(graph, namespace_inclusion_builder(namespace))", "docstring": "Remove nodes with the given namespace.\n\n This might be useful to exclude information learned about distant species, such as excluding all information\n from MGI and RGD in diseases where mice and rats don't give much insight to the human disease mechanism.", "id": "f9390:m1"} {"signature": "@register_deprecated('')@in_place_transformationdef remove_mouse_nodes(graph: BELGraph) -> None:", "body": "remove_nodes_by_namespace(graph, ['', ''])", "docstring": "Remove nodes using the MGI and MGIID namespaces.", "id": "f9390:m2"} {"signature": "@register_deprecated('')@in_place_transformationdef remove_rat_nodes(graph: BELGraph) -> None:", "body": "remove_nodes_by_namespace(graph, ['', ''])", "docstring": "Remove nodes using the RGD and RGDID namespaces.", "id": "f9390:m3"} {"signature": "@in_place_transformationdef remove_nodes_by_function_namespace(graph: BELGraph, func: str, namespace: Strings) -> None:", "body": "remove_filtered_nodes(graph, function_namespace_inclusion_builder(func, namespace))", "docstring": "Remove nodes with the given function and namespace.\n\n This might be useful to exclude information learned about distant species, such as excluding all information\n from MGI and RGD in diseases where mice and rats don't give much insight to the human disease mechanism.", "id": "f9390:m4"} {"signature": "def summarize_node_filter(graph: BELGraph, node_filters: NodePredicates) -> None:", "body": "passed = count_passed_node_filter(graph, node_filters)print(''.format(passed, graph.number_of_nodes()))", "docstring": "Print a summary of the number of nodes passing a given set of filters.\n\n :param graph: A BEL graph\n :param node_filters: A node filter or list/tuple of node filters", "id": "f9391:m0"} {"signature": "def node_inclusion_filter_builder(nodes: Iterable[BaseEntity]) -> NodePredicate:", "body": "node_set = set(nodes)def inclusion_filter(_: BELGraph, node: BaseEntity) -> bool:\"\"\"\"\"\"return node in node_setreturn inclusion_filter", "docstring": "Build a filter that only passes on nodes in the given list.\n\n :param nodes: An iterable of BEL nodes", "id": "f9391:m1"} {"signature": "def node_exclusion_filter_builder(nodes: Iterable[BaseEntity]) -> NodePredicate:", "body": "node_set = set(nodes)def exclusion_filter(_: BELGraph, node: BaseEntity) -> bool:\"\"\"\"\"\"return node not in node_setreturn exclusion_filter", "docstring": "Build a filter that fails on nodes in the given list.", "id": "f9391:m2"} {"signature": "def function_inclusion_filter_builder(func: Strings) -> NodePredicate:", "body": "if isinstance(func, str):return _single_function_inclusion_filter_builder(func)elif isinstance(func, Iterable):return _collection_function_inclusion_builder(func)raise ValueError(''.format(func))", "docstring": "Build a filter that only passes on nodes of the given function(s).\n\n :param func: A BEL Function or list/set/tuple of BEL functions", "id": "f9391:m5"} {"signature": "def function_exclusion_filter_builder(func: Strings) -> NodePredicate:", "body": "if isinstance(func, str):def function_exclusion_filter(_: BELGraph, node: BaseEntity) -> bool:\"\"\"\"\"\"return node[FUNCTION] != funcreturn function_exclusion_filterelif isinstance(func, Iterable):functions = set(func)def functions_exclusion_filter(_: BELGraph, node: BaseEntity) -> bool:\"\"\"\"\"\"return node[FUNCTION] not in functionsreturn functions_exclusion_filterraise ValueError(''.format(func))", "docstring": "Build a filter that fails on nodes of the given function(s).\n\n :param func: A BEL Function or list/set/tuple of BEL functions", "id": "f9391:m6"} {"signature": "def function_namespace_inclusion_builder(func: str, namespace: Strings) -> NodePredicate:", "body": "if isinstance(namespace, str):def function_namespaces_filter(_: BELGraph, node: BaseEntity) -> bool:\"\"\"\"\"\"if func != node[FUNCTION]:return Falsereturn NAMESPACE in node and node[NAMESPACE] == namespaceelif isinstance(namespace, Iterable):namespaces = set(namespace)def function_namespaces_filter(_: BELGraph, node: BaseEntity) -> bool:\"\"\"\"\"\"if func != node[FUNCTION]:return Falsereturn NAMESPACE in node and node[NAMESPACE] in namespaceselse:raise ValueError(''.format(namespace))return function_namespaces_filter", "docstring": "Build a filter function for matching the given BEL function with the given namespace or namespaces.\n\n :param func: A BEL function\n :param namespace: The namespace to search by", "id": "f9391:m7"} {"signature": "def data_contains_key_builder(key: str) -> NodePredicate: ", "body": "def data_contains_key(_: BELGraph, node: BaseEntity) -> bool:\"\"\"\"\"\"return key in nodereturn data_contains_key", "docstring": "Build a filter that passes only on nodes that have the given key in their data dictionary.\n\n :param key: A key for the node's data dictionary", "id": "f9391:m8"} {"signature": "def variants_of(graph: BELGraph,node: Protein,modifications: Optional[Set[str]] = None,) -> Set[Protein]:", "body": "if modifications:return _get_filtered_variants_of(graph, node, modifications)return {vfor u, v, key, data in graph.edges(keys=True, data=True)if (u == nodeand data[RELATION] == HAS_VARIANTand pybel.struct.has_protein_modification(v))}", "docstring": "Returns all variants of the given node.", "id": "f9391:m10"} {"signature": "def get_variants_to_controllers(graph: BELGraph,node: Protein,modifications: Optional[Set[str]] = None,) -> Mapping[Protein, Set[Protein]]:", "body": "rv = defaultdict(set)variants = variants_of(graph, node, modifications)for controller, variant, data in graph.in_edges(variants, data=True):if data[RELATION] in CAUSAL_RELATIONS:rv[variant].add(controller)return rv", "docstring": "Get a mapping from variants of the given node to all of its upstream controllers.", "id": "f9391:m12"} {"signature": "def insert_graph(self, graph: BELGraph, **_kwargs) -> Network:", "body": "result = _Namespace()result.id = len(self.networks)self.networks[result.id] = graphreturn result", "docstring": "Insert a graph and return the resulting ORM object (mocked).", "id": "f9395:c1:m1"} {"signature": "def get_graph_by_id(self, network_id: int) -> BELGraph:", "body": "return self.networks[network_id]", "docstring": "Get a graph by its identifier.", "id": "f9395:c1:m2"} {"signature": "def get_graphs_by_ids(self, network_ids: Iterable[int]) -> List[BELGraph]:", "body": "return [self.networks[network_id]for network_id in network_ids]", "docstring": "Get several graphs by their identifiers.", "id": "f9395:c1:m3"} {"signature": "def group_dict_set(iterator: Iterable[Tuple[A, B]]) -> Mapping[A, Set[B]]:", "body": "d = defaultdict(set)for key, value in iterator:d[key].add(value)return dict(d)", "docstring": "Make a dict that accumulates the values for each key in an iterator of doubles.", "id": "f9396:m0"} {"signature": "def get_edge_relations(graph: BELGraph) -> Mapping[Tuple[BaseEntity, BaseEntity], Set[str]]:", "body": "return group_dict_set(((u, v), d[RELATION])for u, v, d in graph.edges(data=True))", "docstring": "Build a dictionary of {node pair: set of edge types}.", "id": "f9396:m1"} {"signature": "def count_unique_relations(graph: BELGraph) -> Counter:", "body": "return Counter(itt.chain.from_iterable(get_edge_relations(graph).values()))", "docstring": "Return a histogram of the different types of relations present in a graph.\n\n Note: this operation only counts each type of edge once for each pair of nodes", "id": "f9396:m2"} {"signature": "def get_annotations_containing_keyword(graph: BELGraph, keyword: str) -> List[Mapping[str, str]]:", "body": "return [{'': annotation,'': value}for annotation, value in iter_annotation_value_pairs(graph)if keyword.lower() in value.lower()]", "docstring": "Get annotation/value pairs for values for whom the search string is a substring\n\n :param graph: A BEL graph\n :param keyword: Search for annotations whose values have this as a substring", "id": "f9396:m3"} {"signature": "def count_annotation_values(graph: BELGraph, annotation: str) -> Counter:", "body": "return Counter(iter_annotation_values(graph, annotation))", "docstring": "Count in how many edges each annotation appears in a graph\n\n :param graph: A BEL graph\n :param annotation: The annotation to count\n :return: A Counter from {annotation value: frequency}", "id": "f9396:m4"} {"signature": "def count_annotation_values_filtered(graph: BELGraph,annotation: str,source_predicate: Optional[NodePredicate] = None,target_predicate: Optional[NodePredicate] = None,) -> Counter:", "body": "if source_predicate and target_predicate:return Counter(data[ANNOTATIONS][annotation]for u, v, data in graph.edges(data=True)if edge_has_annotation(data, annotation) and source_predicate(graph, u) and target_predicate(graph, v))elif source_predicate:return Counter(data[ANNOTATIONS][annotation]for u, v, data in graph.edges(data=True)if edge_has_annotation(data, annotation) and source_predicate(graph, u))elif target_predicate:return Counter(data[ANNOTATIONS][annotation]for u, v, data in graph.edges(data=True)if edge_has_annotation(data, annotation) and target_predicate(graph, u))else:return Counter(data[ANNOTATIONS][annotation]for u, v, data in graph.edges(data=True)if edge_has_annotation(data, annotation))", "docstring": "Count in how many edges each annotation appears in a graph, but filter out source nodes and target nodes.\n\n See :func:`pybel_tools.utils.keep_node` for a basic filter.\n\n :param graph: A BEL graph\n :param annotation: The annotation to count\n :param source_predicate: A predicate (graph, node) -> bool for keeping source nodes\n :param target_predicate: A predicate (graph, node) -> bool for keeping target nodes\n :return: A Counter from {annotation value: frequency}", "id": "f9396:m5"} {"signature": "def pair_is_consistent(graph: BELGraph, u: BaseEntity, v: BaseEntity) -> Optional[str]:", "body": "relations = {data[RELATION] for data in graph[u][v].values()}if != len(relations):returnreturn list(relations)[]", "docstring": "Return if the edges between the given nodes are consistent, meaning they all have the same relation.\n\n :return: If the edges aren't consistent, return false, otherwise return the relation type", "id": "f9396:m6"} {"signature": "def get_contradictory_pairs(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity]]:", "body": "for u, v in graph.edges():if pair_has_contradiction(graph, u, v):yield u, v", "docstring": "Iterates over contradictory node pairs in the graph based on their causal relationships\n\n :return: An iterator over (source, target) node pairs that have contradictory causal edges", "id": "f9396:m7"} {"signature": "def get_consistent_edges(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity]]:", "body": "for u, v in graph.edges():if pair_is_consistent(graph, u, v):yield u, v", "docstring": "Yield pairs of (source node, target node) for which all of their edges have the same type of relation.\n\n :return: An iterator over (source, target) node pairs corresponding to edges with many inconsistent relations", "id": "f9396:m8"} {"signature": "def pair_has_contradiction(graph: BELGraph, u: BaseEntity, v: BaseEntity) -> bool:", "body": "relations = {data[RELATION] for data in graph[u][v].values()}return relation_set_has_contradictions(relations)", "docstring": "Check if a pair of nodes has any contradictions in their causal relationships.\n\n Assumes both nodes are in the graph.", "id": "f9397:m0"} {"signature": "def relation_set_has_contradictions(relations: Set[str]) -> bool:", "body": "has_increases = any(relation in CAUSAL_INCREASE_RELATIONS for relation in relations)has_decreases = any(relation in CAUSAL_DECREASE_RELATIONS for relation in relations)has_cnc = any(relation == CAUSES_NO_CHANGE for relation in relations)return < sum([has_cnc, has_decreases, has_increases])", "docstring": "Return if the set of BEL relations contains a contradiction.", "id": "f9397:m1"} {"signature": "def plot_summary_axes(graph: BELGraph, lax, rax, logx=True):", "body": "ntc = count_functions(graph)etc = count_relations(graph)df = pd.DataFrame.from_dict(dict(ntc), orient='')df_ec = pd.DataFrame.from_dict(dict(etc), orient='')df.sort_values(, ascending=True).plot(kind='', logx=logx, ax=lax)lax.set_title(''.format(graph.number_of_nodes()))df_ec.sort_values(, ascending=True).plot(kind='', logx=logx, ax=rax)rax.set_title(''.format(graph.number_of_edges()))", "docstring": "Plots your graph summary statistics on the given axes.\n\n After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view.\n\n Shows:\n 1. Count of nodes, grouped by function type\n 2. Count of edges, grouped by relation type\n\n :param pybel.BELGraph graph: A BEL graph\n :param lax: An axis object from matplotlib\n :param rax: An axis object from matplotlib\n\n Example usage:\n\n >>> import matplotlib.pyplot as plt\n >>> from pybel import from_pickle\n >>> from pybel_tools.summary import plot_summary_axes\n >>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')\n >>> fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n >>> plot_summary_axes(graph, axes[0], axes[1])\n >>> plt.tight_layout()\n >>> plt.show()", "id": "f9398:m0"} {"signature": "def plot_summary(graph: BELGraph, plt, logx=True, **kwargs):", "body": "fig, axes = plt.subplots(, , **kwargs)lax = axes[]rax = axes[]plot_summary_axes(graph, lax, rax, logx=logx)plt.tight_layout()return fig, axes", "docstring": "Plots your graph summary statistics. This function is a thin wrapper around :func:`plot_summary_axis`. It\n automatically takes care of building figures given matplotlib's pyplot module as an argument. After, you need\n to run :func:`plt.show`.\n\n :code:`plt` is given as an argument to avoid needing matplotlib as a dependency for this function\n\n Shows:\n\n 1. Count of nodes, grouped by function type\n 2. Count of edges, grouped by relation type\n\n :param plt: Give :code:`matplotlib.pyplot` to this parameter\n :param kwargs: keyword arguments to give to :func:`plt.subplots`\n\n Example usage:\n\n >>> import matplotlib.pyplot as plt\n >>> from pybel import from_pickle\n >>> from pybel_tools.summary import plot_summary\n >>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')\n >>> plot_summary(graph, plt, figsize=(10, 4))\n >>> plt.show()", "id": "f9398:m1"} {"signature": "def count_subgraph_sizes(graph: BELGraph, annotation: str = '') -> Counter[int]:", "body": "return count_dict_values(group_nodes_by_annotation(graph, annotation))", "docstring": "Count the number of nodes in each subgraph induced by an annotation.\n\n :param annotation: The annotation to group by and compare. Defaults to 'Subgraph'\n :return: A dictionary from {annotation value: number of nodes}", "id": "f9399:m0"} {"signature": "def calculate_subgraph_edge_overlap(graph: BELGraph,annotation: str = '') -> Tuple[Mapping[str, EdgeSet],Mapping[str, Mapping[str, EdgeSet]],Mapping[str, Mapping[str, EdgeSet]],Mapping[str, Mapping[str, float]],]:", "body": "sg2edge = defaultdict(set)for u, v, d in graph.edges(data=True):if not edge_has_annotation(d, annotation):continuesg2edge[d[ANNOTATIONS][annotation]].add((u, v))subgraph_intersection = defaultdict(dict)subgraph_union = defaultdict(dict)result = defaultdict(dict)for sg1, sg2 in itt.product(sg2edge, repeat=):subgraph_intersection[sg1][sg2] = sg2edge[sg1] & sg2edge[sg2]subgraph_union[sg1][sg2] = sg2edge[sg1] | sg2edge[sg2]result[sg1][sg2] = len(subgraph_intersection[sg1][sg2]) / len(subgraph_union[sg1][sg2])return sg2edge, subgraph_intersection, subgraph_union, result", "docstring": "Build a DatafFame to show the overlap between different sub-graphs.\n\n Options:\n 1. Total number of edges overlap (intersection)\n 2. Percentage overlap (tanimoto similarity)\n\n :param graph: A BEL graph\n :param annotation: The annotation to group by and compare. Defaults to 'Subgraph'\n :return: {subgraph: set of edges}, {(subgraph 1, subgraph2): set of intersecting edges},\n {(subgraph 1, subgraph2): set of unioned edges}, {(subgraph 1, subgraph2): tanimoto similarity},", "id": "f9399:m1"} {"signature": "def summarize_subgraph_edge_overlap(graph: BELGraph, annotation: str = '') -> Mapping[str, Mapping[str, float]]:", "body": "_, _, _, subgraph_overlap = calculate_subgraph_edge_overlap(graph, annotation)return subgraph_overlap", "docstring": "Return a similarity matrix between all subgraphs (or other given annotation).\n\n :param annotation: The annotation to group by and compare. Defaults to :code:`\"Subgraph\"`\n :return: A similarity matrix in a dict of dicts\n :rtype: dict", "id": "f9399:m2"} {"signature": "def summarize_subgraph_node_overlap(graph: BELGraph, node_predicates=None, annotation: str = ''):", "body": "r1 = group_nodes_by_annotation_filtered(graph, node_predicates=node_predicates, annotation=annotation)return calculate_tanimoto_set_distances(r1)", "docstring": "Calculate the subgraph similarity tanimoto similarity in nodes passing the given filter.\n\n Provides an alternate view on subgraph similarity, from a more node-centric view", "id": "f9399:m3"} {"signature": "def rank_subgraph_by_node_filter(graph: BELGraph,node_predicates: Union[NodePredicate, Iterable[NodePredicate]],annotation: str = '',reverse: bool = True,) -> List[Tuple[str, int]]:", "body": "r1 = group_nodes_by_annotation_filtered(graph, node_predicates=node_predicates, annotation=annotation)r2 = count_dict_values(r1)return sorted(r2.items(), key=itemgetter(), reverse=reverse)", "docstring": "Rank sub-graphs by which have the most nodes matching an given filter.\n\n A use case for this function would be to identify which subgraphs contain the most differentially expressed\n genes.\n\n >>> from pybel import from_pickle\n >>> from pybel.constants import GENE\n >>> from pybel_tools.integration import overlay_type_data\n >>> from pybel_tools.summary import rank_subgraph_by_node_filter\n >>> import pandas as pd\n >>> graph = from_pickle('~/dev/bms/aetionomy/alzheimers.gpickle')\n >>> df = pd.read_csv('~/dev/bananas/data/alzheimers_dgxp.csv', columns=['Gene', 'log2fc'])\n >>> data = {gene: log2fc for _, gene, log2fc in df.itertuples()}\n >>> overlay_type_data(graph, data, 'log2fc', GENE, 'HGNC', impute=0.0)\n >>> results = rank_subgraph_by_node_filter(graph, lambda g, n: 1.3 < abs(g[n]['log2fc']))", "id": "f9399:m4"} {"signature": "def get_namespaces_with_incorrect_names(graph: BELGraph) -> Set[str]:", "body": "return {exc.namespacefor _, exc, _ in graph.warningsif isinstance(exc, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning))}", "docstring": "Return the set of all namespaces with incorrect names in the graph.", "id": "f9400:m0"} {"signature": "def get_undefined_namespaces(graph: BELGraph) -> Set[str]:", "body": "return {exc.namespacefor _, exc, _ in graph.warningsif isinstance(exc, UndefinedNamespaceWarning)}", "docstring": "Get all namespaces that are used in the BEL graph aren't actually defined.", "id": "f9400:m1"} {"signature": "def get_incorrect_names_by_namespace(graph: BELGraph, namespace: str) -> Set[str]:", "body": "return {exc.namefor _, exc, _ in graph.warningsif isinstance(exc, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)) and exc.namespace == namespace}", "docstring": "Return the set of all incorrect names from the given namespace in the graph.\n\n :return: The set of all incorrect names from the given namespace in the graph", "id": "f9400:m2"} {"signature": "def get_undefined_namespace_names(graph: BELGraph, namespace: str) -> Set[str]:", "body": "return {exc.namefor _, exc, _ in graph.warningsif isinstance(exc, UndefinedNamespaceWarning) and exc.namespace == namespace}", "docstring": "Get the names from a namespace that wasn't actually defined.\n\n :return: The set of all names from the undefined namespace", "id": "f9400:m3"} {"signature": "def get_incorrect_names(graph: BELGraph) -> Mapping[str, Set[str]]:", "body": "return {namespace: get_incorrect_names_by_namespace(graph, namespace)for namespace in get_namespaces(graph)}", "docstring": "Return the dict of the sets of all incorrect names from the given namespace in the graph.\n\n :return: The set of all incorrect names from the given namespace in the graph", "id": "f9400:m4"} {"signature": "def get_undefined_annotations(graph: BELGraph) -> Set[str]:", "body": "return {exc.annotationfor _, exc, _ in graph.warningsif isinstance(exc, UndefinedAnnotationWarning)}", "docstring": "Get all annotations that aren't actually defined.\n\n :return: The set of all undefined annotations", "id": "f9400:m5"} {"signature": "def calculate_incorrect_name_dict(graph: BELGraph) -> Mapping[str, str]:", "body": "missing = defaultdict(list)for _, e, ctx in graph.warnings:if not isinstance(e, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)):continuemissing[e.namespace].append(e.name)return dict(missing)", "docstring": "Group all of the incorrect identifiers in a dict of {namespace: list of erroneous names}.\n\n :return: A dictionary of {namespace: list of erroneous names}", "id": "f9400:m6"} {"signature": "def calculate_error_by_annotation(graph: BELGraph, annotation: str) -> Mapping[str, List[str]]:", "body": "results = defaultdict(list)for _, exc, ctx in graph.warnings:if not ctx or not edge_has_annotation(ctx, annotation):continuevalues = ctx[ANNOTATIONS][annotation]if isinstance(values, str):results[values].append(exc.__class__.__name__)elif isinstance(values, Iterable):for value in values:results[value].append(exc.__class__.__name__)return dict(results)", "docstring": "Group the graph by a given annotation and builds lists of errors for each.\n\n :return: A dictionary of {annotation value: list of errors}", "id": "f9400:m7"} {"signature": "def group_errors(graph: BELGraph) -> Mapping[str, List[int]]:", "body": "warning_summary = defaultdict(list)for _, exc, _ in graph.warnings:warning_summary[str(exc)].append(exc.line_number)return dict(warning_summary)", "docstring": "Group the errors together for analysis of the most frequent error.\n\n :return: A dictionary of {error string: list of line numbers}", "id": "f9400:m8"} {"signature": "def get_most_common_errors(graph: BELGraph, n: Optional[int] = ):", "body": "return count_dict_values(group_errors(graph)).most_common(n)", "docstring": "Get the (n) most common errors in a graph.", "id": "f9400:m9"} {"signature": "def get_names_including_errors_by_namespace(graph: BELGraph, namespace: str) -> Set[str]:", "body": "return get_names_by_namespace(graph, namespace) | get_incorrect_names_by_namespace(graph, namespace)", "docstring": "Takes the names from the graph in a given namespace (:func:`pybel.struct.summary.get_names_by_namespace`) and\n the erroneous names from the same namespace (:func:`get_incorrect_names_by_namespace`) and returns them together\n as a unioned set\n\n :return: The set of all correct and incorrect names from the given namespace in the graph", "id": "f9400:m10"} {"signature": "def get_names_including_errors(graph: BELGraph) -> Mapping[str, Set[str]]:", "body": "return {namespace: get_names_including_errors_by_namespace(graph, namespace)for namespace in get_namespaces(graph)}", "docstring": "Takes the names from the graph in a given namespace and the erroneous names from the same namespace and returns\n them together as a unioned set\n\n :return: The dict of the sets of all correct and incorrect names from the given namespace in the graph", "id": "f9400:m11"} {"signature": "def get_causal_out_edges(graph: BELGraph,nbunch: Union[BaseEntity, Iterable[BaseEntity]],) -> Set[Tuple[BaseEntity, BaseEntity]]:", "body": "return {(u, v)for u, v, k, d in graph.out_edges(nbunch, keys=True, data=True)if is_causal_relation(graph, u, v, k, d)}", "docstring": "Get the out-edges to the given node that are causal.\n\n :return: A set of (source, target) pairs where the source is the given node", "id": "f9402:m0"} {"signature": "def get_causal_in_edges(graph: BELGraph,nbunch: Union[BaseEntity, Iterable[BaseEntity]],) -> Set[Tuple[BaseEntity, BaseEntity]]:", "body": "return {(u, v)for u, v, k, d in graph.in_edges(nbunch, keys=True, data=True)if is_causal_relation(graph, u, v, k, d)}", "docstring": "Get the in-edges to the given node that are causal.\n\n :return: A set of (source, target) pairs where the target is the given node", "id": "f9402:m1"} {"signature": "def get_causal_source_nodes(graph: BELGraph, func: str) -> Set[BaseEntity]:", "body": "return {nodefor node in graphif node.function == func and is_causal_source(graph, node)}", "docstring": "Return a set of all nodes that have an in-degree of 0.\n\n This likely means that it is an external perturbagen and is not known to have any causal origin from within the\n biological system. These nodes are useful to identify because they generally don't provide any mechanistic insight.", "id": "f9402:m2"} {"signature": "def get_causal_central_nodes(graph: BELGraph, func: str) -> Set[BaseEntity]:", "body": "return {nodefor node in graphif node.function == func and is_causal_central(graph, node)}", "docstring": "Return a set of all nodes that have both an in-degree > 0 and out-degree > 0.\n\n This means that they are an integral part of a pathway, since they are both produced and consumed.", "id": "f9402:m3"} {"signature": "def get_causal_sink_nodes(graph: BELGraph, func) -> Set[BaseEntity]:", "body": "return {nodefor node in graphif node.function == func and is_causal_sink(graph, node)}", "docstring": "Returns a set of all ABUNDANCE nodes that have an causal out-degree of 0.\n\n This likely means that the knowledge assembly is incomplete, or there is a curation error.", "id": "f9402:m4"} {"signature": "def get_degradations(graph: BELGraph) -> Set[BaseEntity]:", "body": "return get_nodes(graph, is_degraded)", "docstring": "Get all nodes that are degraded.", "id": "f9402:m5"} {"signature": "def get_activities(graph: BELGraph) -> Set[BaseEntity]:", "body": "return get_nodes(graph, has_activity)", "docstring": "Get all nodes that have molecular activities.", "id": "f9402:m6"} {"signature": "def get_translocated(graph: BELGraph) -> Set[BaseEntity]:", "body": "return get_nodes(graph, is_translocated)", "docstring": "Get all nodes that are translocated.", "id": "f9402:m7"} {"signature": "def count_top_centrality(graph: BELGraph, number: Optional[int] = ) -> Mapping[BaseEntity, int]:", "body": "dd = nx.betweenness_centrality(graph)dc = Counter(dd)return dict(dc.most_common(number))", "docstring": "Get top centrality dictionary.", "id": "f9402:m9"} {"signature": "def get_modifications_count(graph: BELGraph) -> Mapping[str, int]:", "body": "return remove_falsy_values({'': len(get_translocated(graph)),'': len(get_degradations(graph)),'': len(get_activities(graph)),})", "docstring": "Get a modifications count dictionary.", "id": "f9402:m10"} {"signature": "def remove_falsy_values(counter: Mapping[Any, int]) -> Mapping[Any, int]:", "body": "return {label: countfor label, count in counter.items()if count}", "docstring": "Remove all values that are zero.", "id": "f9402:m11"} {"signature": "def _generate_citation_dict(graph: BELGraph) -> Mapping[str, Mapping[Tuple[BaseEntity, BaseEntity], str]]:", "body": "results = defaultdict(lambda: defaultdict(set))for u, v, data in graph.edges(data=True):if CITATION not in data:continueresults[data[CITATION][CITATION_TYPE]][u, v].add(data[CITATION][CITATION_REFERENCE].strip())return dict(results)", "docstring": "Prepare a citation data dictionary from a graph.\n\n :return: A dictionary of dictionaries {citation type: {(source, target): citation reference}", "id": "f9403:m0"} {"signature": "def get_pmid_by_keyword(keyword: str,graph: Optional[BELGraph] = None,pubmed_identifiers: Optional[Set[str]] = None,) -> Set[str]:", "body": "if pubmed_identifiers is not None:return {pubmed_identifierfor pubmed_identifier in pubmed_identifiersif pubmed_identifier.startswith(keyword)}if graph is None:raise ValueError('')return {pubmed_identifierfor pubmed_identifier in iterate_pubmed_identifiers(graph)if pubmed_identifier.startswith(keyword)}", "docstring": "Get the set of PubMed identifiers beginning with the given keyword string.\n\n :param keyword: The beginning of a PubMed identifier\n :param graph: A BEL graph\n :param pubmed_identifiers: A set of pre-cached PubMed identifiers\n :return: A set of PubMed identifiers starting with the given string", "id": "f9403:m1"} {"signature": "def count_pmids(graph: BELGraph) -> Counter:", "body": "return Counter(iterate_pubmed_identifiers(graph))", "docstring": "Count the frequency of PubMed documents in a graph.\n\n :return: A Counter from {(pmid, name): frequency}", "id": "f9403:m2"} {"signature": "def count_citations(graph: BELGraph, **annotations) -> Counter:", "body": "citations = defaultdict(set)annotation_dict_filter = build_edge_data_filter(annotations)for u, v, _, d in filter_edges(graph, annotation_dict_filter):if CITATION not in d:continuecitations[u, v].add((d[CITATION][CITATION_TYPE], d[CITATION][CITATION_REFERENCE].strip()))return Counter(itt.chain.from_iterable(citations.values()))", "docstring": "Counts the citations in a graph based on a given filter\n\n :param graph: A BEL graph\n :param dict annotations: The annotation filters to use\n :return: A counter from {(citation type, citation reference): frequency}", "id": "f9403:m3"} {"signature": "def count_citations_by_annotation(graph: BELGraph, annotation: str) -> Mapping[str, typing.Counter[str]]:", "body": "citations = defaultdict(lambda: defaultdict(set))for u, v, data in graph.edges(data=True):if not edge_has_annotation(data, annotation) or CITATION not in data:continuek = data[ANNOTATIONS][annotation]citations[k][u, v].add((data[CITATION][CITATION_TYPE], data[CITATION][CITATION_REFERENCE].strip()))return {k: Counter(itt.chain.from_iterable(v.values())) for k, v in citations.items()}", "docstring": "Group the citation counters by subgraphs induced by the annotation.\n\n :param graph: A BEL graph\n :param annotation: The annotation to use to group the graph\n :return: A dictionary of Counters {subgraph name: Counter from {citation: frequency}}", "id": "f9403:m4"} {"signature": "def count_authors(graph: BELGraph) -> typing.Counter[str]:", "body": "return Counter(graph._iterate_authors())", "docstring": "Count the number of edges in which each author appears.", "id": "f9403:m5"} {"signature": "def count_author_publications(graph: BELGraph) -> typing.Counter[str]:", "body": "authors = group_as_dict(_iter_author_publiations(graph))return Counter(count_dict_values(count_defaultdict(authors)))", "docstring": "Count the number of publications of each author to the given graph.", "id": "f9403:m6"} {"signature": "def get_authors(graph: BELGraph) -> Set[str]:", "body": "return set(graph._iterate_authors())", "docstring": "Get the set of all authors in the given graph.", "id": "f9403:m8"} {"signature": "def get_authors_by_keyword(keyword: str, graph=None, authors=None) -> Set[str]:", "body": "keyword_lower = keyword.lower()if authors is not None:return {authorfor author in authorsif keyword_lower in author.lower()}if graph is None:raise ValueError('')return {authorfor author in get_authors(graph)if keyword_lower in author.lower()}", "docstring": "Get authors for whom the search term is a substring.\n\n :param pybel.BELGraph graph: A BEL graph\n :param keyword: The keyword to search the author strings for\n :param set[str] authors: An optional set of pre-cached authors calculated from the graph\n :return: A set of authors with the keyword as a substring", "id": "f9403:m9"} {"signature": "def count_authors_by_annotation(graph: BELGraph, annotation: str = '') -> Mapping[str, typing.Counter[str]]:", "body": "authors = group_as_dict(_iter_authors_by_annotation(graph, annotation=annotation))return count_defaultdict(authors)", "docstring": "Group the author counters by sub-graphs induced by the annotation.\n\n :param graph: A BEL graph\n :param annotation: The annotation to use to group the graph\n :return: A dictionary of Counters {subgraph name: Counter from {author: frequency}}", "id": "f9403:m10"} {"signature": "def get_evidences_by_pmid(graph: BELGraph, pmids: Union[str, Iterable[str]]):", "body": "result = defaultdict(set)for _, _, _, data in filter_edges(graph, build_pmid_inclusion_filter(pmids)):result[data[CITATION][CITATION_REFERENCE]].add(data[EVIDENCE])return dict(result)", "docstring": "Get a dictionary from the given PubMed identifiers to the sets of all evidence strings associated with each\n in the graph.\n\n :param graph: A BEL graph\n :param pmids: An iterable of PubMed identifiers, as strings. Is consumed and converted to a set.\n :return: A dictionary of {pmid: set of all evidence strings}\n :rtype: dict", "id": "f9403:m12"} {"signature": "def count_citation_years(graph: BELGraph) -> typing.Counter[int]:", "body": "result = defaultdict(set)for _, _, data in graph.edges(data=True):if CITATION not in data or CITATION_DATE not in data[CITATION]:continuetry:dt = _ensure_datetime(data[CITATION][CITATION_DATE])result[dt.year].add((data[CITATION][CITATION_TYPE], data[CITATION][CITATION_REFERENCE]))except Exception:continuereturn count_dict_values(result)", "docstring": "Count the number of citations from each year.", "id": "f9403:m13"} {"signature": "def get_citation_years(graph: BELGraph) -> List[Tuple[int, int]]:", "body": "return create_timeline(count_citation_years(graph))", "docstring": "Create a citation timeline counter from the graph.", "id": "f9403:m15"} {"signature": "def create_timeline(year_counter: typing.Counter[int]) -> List[Tuple[int, int]]:", "body": "if not year_counter:return []from_year = min(year_counter) - until_year = datetime.now().year + return [(year, year_counter.get(year, ))for year in range(from_year, until_year)]", "docstring": "Complete the Counter timeline.\n\n :param Counter year_counter: counter dict for each year\n :return: complete timeline", "id": "f9403:m16"} {"signature": "def count_confidences(graph: BELGraph) -> typing.Counter[str]:", "body": "return Counter((''if ANNOTATIONS not in data or '' not in data[ANNOTATIONS] elselist(data[ANNOTATIONS][''])[])for _, _, data in graph.edges(data=True)if CITATION in data )", "docstring": "Count the confidences in the graph.", "id": "f9403:m17"} {"signature": "def pairwise(iterable: Iterable[X]) -> Iterable[Tuple[X, X]]:", "body": "a, b = itt.tee(iterable)next(b, None)return zip(a, b)", "docstring": "Iterate over pairs in list s -> (s0,s1), (s1,s2), (s2, s3), ...", "id": "f9404:m0"} {"signature": "def count_defaultdict(dict_of_lists: Mapping[X, List[Y]]) -> Mapping[X, typing.Counter[Y]]:", "body": "return {k: Counter(v)for k, v in dict_of_lists.items()}", "docstring": "Count the number of elements in each value of the dictionary.", "id": "f9404:m2"} {"signature": "def count_dict_values(dict_of_counters: Mapping[X, Sized]) -> typing.Counter[X]:", "body": "return Counter({k: len(v)for k, v in dict_of_counters.items()})", "docstring": "Count the number of elements in each value (can be list, Counter, etc).\n\n :param dict_of_counters: A dictionary of things whose lengths can be measured (lists, Counters, dicts)\n :return: A Counter with the same keys as the input but the count of the length of the values list/tuple/set/Counter", "id": "f9404:m3"} {"signature": "def set_percentage(x: Iterable[X], y: Iterable[X]) -> float:", "body": "a, b = set(x), set(y)if not a:return return len(a & b) / len(a)", "docstring": "What percentage of x is contained within y?\n\n :param set x: A set\n :param set y: Another set\n :return: The percentage of x contained within y", "id": "f9404:m4"} {"signature": "def tanimoto_set_similarity(x: Iterable[X], y: Iterable[X]) -> float:", "body": "a, b = set(x), set(y)union = a | bif not union:return return len(a & b) / len(union)", "docstring": "Calculate the tanimoto set similarity.", "id": "f9404:m5"} {"signature": "def min_tanimoto_set_similarity(x: Iterable[X], y: Iterable[X]) -> float:", "body": "a, b = set(x), set(y)if not a or not b:return return len(a & b) / min(len(a), len(b))", "docstring": "Calculate the tanimoto set similarity using the minimum size.\n\n :param set x: A set\n :param set y: Another set\n :return: The similarity between", "id": "f9404:m6"} {"signature": "def calculate_single_tanimoto_set_distances(target: Iterable[X], dict_of_sets: Mapping[Y, Set[X]]) -> Mapping[Y, float]:", "body": "target_set = set(target)return {k: tanimoto_set_similarity(target_set, s)for k, s in dict_of_sets.items()}", "docstring": "Return a dictionary of distances keyed by the keys in the given dict.\n\n Distances are calculated based on pairwise tanimoto similarity of the sets contained\n\n :param set target: A set\n :param dict_of_sets: A dict of {x: set of y}\n :type dict_of_sets: dict\n :return: A similarity dicationary based on the set overlap (tanimoto) score between the target set and the sets in\n dos\n :rtype: dict", "id": "f9404:m7"} {"signature": "def calculate_tanimoto_set_distances(dict_of_sets: Mapping[X, Set]) -> Mapping[X, Mapping[X, float]]:", "body": "result: Dict[X, Dict[X, float]] = defaultdict(dict)for x, y in itt.combinations(dict_of_sets, ):result[x][y] = result[y][x] = tanimoto_set_similarity(dict_of_sets[x], dict_of_sets[y])for x in dict_of_sets:result[x][x] = return dict(result)", "docstring": "Return a distance matrix keyed by the keys in the given dict.\n\n Distances are calculated based on pairwise tanimoto similarity of the sets contained.\n\n :param dict_of_sets: A dict of {x: set of y}\n :return: A similarity matrix based on the set overlap (tanimoto) score between each x as a dict of dicts", "id": "f9404:m8"} {"signature": "def calculate_global_tanimoto_set_distances(dict_of_sets: Mapping[X, Set]) -> Mapping[X, Mapping[X, float]]:", "body": "universe = set(itt.chain.from_iterable(dict_of_sets.values()))universe_size = len(universe)result: Dict[X, Dict[X, float]] = defaultdict(dict)for x, y in itt.combinations(dict_of_sets, ):result[x][y] = result[y][x] = - len(dict_of_sets[x] | dict_of_sets[y]) / universe_sizefor x in dict_of_sets:result[x][x] = - len(x) / universe_sizereturn dict(result)", "docstring": "r\"\"\"Calculate an alternative distance matrix based on the following equation.\n\n .. math:: distance(A, B)=1- \\|A \\cup B\\| / \\| \\cup_{s \\in S} s\\|\n\n :param dict_of_sets: A dict of {x: set of y}\n :return: A similarity matrix based on the alternative tanimoto distance as a dict of dicts", "id": "f9404:m9"} {"signature": "def barh(d, plt, title=None):", "body": "labels = sorted(d, key=d.get)index = range(len(labels))plt.yticks(index, labels)plt.barh(index, [d[v] for v in labels])if title is not None:plt.title(title)", "docstring": "A convenience function for plotting a horizontal bar plot from a Counter", "id": "f9404:m10"} {"signature": "def barv(d, plt, title=None, rotation=''):", "body": "labels = sorted(d, key=d.get, reverse=True)index = range(len(labels))plt.xticks(index, labels, rotation=rotation)plt.bar(index, [d[v] for v in labels])if title is not None:plt.title(title)", "docstring": "A convenience function for plotting a vertical bar plot from a Counter", "id": "f9404:m11"} {"signature": "def safe_add_edge(graph, u, v, key, attr_dict, **attr):", "body": "if key < :graph.add_edge(u, v, key=key, attr_dict=attr_dict, **attr)else:graph.add_edge(u, v, attr_dict=attr_dict, **attr)", "docstring": "Adds an edge while preserving negative keys, and paying no respect to positive ones\n\n :param pybel.BELGraph graph: A BEL Graph\n :param tuple u: The source BEL node\n :param tuple v: The target BEL node\n :param int key: The edge key. If less than zero, corresponds to an unqualified edge, else is disregarded\n :param dict attr_dict: The edge data dictionary\n :param dict attr: Edge data to assign via keyword arguments", "id": "f9404:m12"} {"signature": "def prepare_c3(data: Union[List[Tuple[str, int]], Mapping[str, int]],y_axis_label: str = '',x_axis_label: str = '',) -> str:", "body": "if not isinstance(data, list):data = sorted(data.items(), key=itemgetter(), reverse=True)try:labels, values = zip(*data)except ValueError:log.info(f'')labels, values = [], []return json.dumps([[x_axis_label] + list(labels),[y_axis_label] + list(values),])", "docstring": "Prepares C3 JSON for making a bar chart from a Counter\n\n :param data: A dictionary of {str: int} to display as bar chart\n :param y_axis_label: The Y axis label\n :param x_axis_label: X axis internal label. Should be left as default 'x')\n :return: A JSON dictionary for making a C3 bar chart", "id": "f9404:m13"} {"signature": "def prepare_c3_time_series(data: List[Tuple[int, int]], y_axis_label: str = '', x_axis_label: str = '') -> str:", "body": "years, counter = zip(*data)years = [datetime.date(year, , ).isoformat()for year in years]return json.dumps([[x_axis_label] + list(years),[y_axis_label] + list(counter)])", "docstring": "Prepare C3 JSON string dump for a time series.\n\n :param data: A list of tuples [(year, count)]\n :param y_axis_label: The Y axis label\n :param x_axis_label: X axis internal label. Should be left as default 'x')", "id": "f9404:m14"} {"signature": "def calculate_betweenness_centality(graph: BELGraph, number_samples: int = CENTRALITY_SAMPLES) -> Counter:", "body": "try:res = nx.betweenness_centrality(graph, k=number_samples)except Exception:res = nx.betweenness_centrality(graph)return Counter(res)", "docstring": "Calculate the betweenness centrality over nodes in the graph.\n\n Tries to do it with a certain number of samples, but then tries a complete approach if it fails.", "id": "f9404:m16"} {"signature": "def get_circulations(elements: T) -> Iterable[T]:", "body": "for i in range(len(elements)):yield elements[i:] + elements[:i]", "docstring": "Iterate over all possible circulations of an ordered collection (tuple or list).\n\n Example:\n\n >>> list(get_circulations([1, 2, 3]))\n [[1, 2, 3], [2, 3, 1], [3, 1, 2]]", "id": "f9404:m17"} {"signature": "def canonical_circulation(elements: T, key: Optional[Callable[[T], bool]] = None) -> T:", "body": "return min(get_circulations(elements), key=key)", "docstring": "Get get a canonical representation of the ordered collection by finding its minimum circulation with the\n given sort key", "id": "f9404:m18"} {"signature": "def get_version() -> str:", "body": "return VERSION", "docstring": "Get the current PyBEL Tools version.", "id": "f9404:m19"} {"signature": "def self_edge_filter(_: BELGraph, source: BaseEntity, target: BaseEntity, __: str) -> bool:", "body": "return source == target", "docstring": "Check if the source and target nodes are the same.", "id": "f9405:m0"} {"signature": "def has_protein_modification_increases_activity(graph: BELGraph,source: BaseEntity,target: BaseEntity,key: str,) -> bool:", "body": "edge_data = graph[source][target][key]return has_protein_modification(graph, source) and part_has_modifier(edge_data, OBJECT, ACTIVITY)", "docstring": "Check if pmod of source causes activity of target.", "id": "f9405:m1"} {"signature": "@edge_predicatedef has_degradation_increases_activity(data: Dict) -> bool:", "body": "return part_has_modifier(data, SUBJECT, DEGRADATION) and part_has_modifier(data, OBJECT, ACTIVITY)", "docstring": "Check if the degradation of source causes activity of target.", "id": "f9405:m2"} {"signature": "@edge_predicatedef has_translocation_increases_activity(data: Dict) -> bool:", "body": "return part_has_modifier(data, SUBJECT, TRANSLOCATION) and part_has_modifier(data, OBJECT, ACTIVITY)", "docstring": "Check if the translocation of source causes activity of target.", "id": "f9405:m3"} {"signature": "def complex_has_member(graph: BELGraph, complex_node: ComplexAbundance, member_node: BaseEntity) -> bool:", "body": "return any( v == member_nodefor _, v, data in graph.out_edges(complex_node, data=True)if data[RELATION] == HAS_COMPONENT)", "docstring": "Does the given complex contain the member?", "id": "f9405:m4"} {"signature": "def complex_increases_activity(graph: BELGraph, u: BaseEntity, v: BaseEntity, key: str) -> bool:", "body": "return (isinstance(u, (ComplexAbundance, NamedComplexAbundance)) andcomplex_has_member(graph, u, v) andpart_has_modifier(graph[u][v][key], OBJECT, ACTIVITY))", "docstring": "Return if the formation of a complex with u increases the activity of v.", "id": "f9405:m5"} {"signature": "def find_activations(graph: BELGraph):", "body": "for u, v, key, data in graph.edges(keys=True, data=True):if u != v:continuebel = graph.edge_to_bel(u, v, data)line = data.get(LINE)if line is None:continue elif has_protein_modification_increases_activity(graph, u, v, key):print(line, '', bel)find_related(graph, v, data)elif has_degradation_increases_activity(data):print(line, '', bel)find_related(graph, v, data)elif has_translocation_increases_activity(data):print(line, '', bel)find_related(graph, v, data)elif complex_increases_activity(graph, u, v, key):print(line, '', bel)find_related(graph, v, data)elif has_same_subject_object(graph, u, v, key):print(line, '', bel)else:print(line, '', bel)", "docstring": "Find edges that are A - A, meaning that some conditions in the edge best describe the interaction.", "id": "f9405:m10"} {"signature": "def compare(graph: BELGraph, annotation: str = '') -> Mapping[str, Mapping[str, float]]:", "body": "canonical_mechanisms = get_subgraphs_by_annotation(graph, annotation)canonical_nodes = _transform_graph_dict_to_node_dict(canonical_mechanisms)candidate_mechanisms = generate_bioprocess_mechanisms(graph)candidate_nodes = _transform_graph_dict_to_node_dict(candidate_mechanisms)results: Dict[str, Dict[str, float]] = defaultdict(dict)it = itt.product(canonical_nodes.items(), candidate_nodes.items())for (canonical_name, canonical_graph), (candidate_bp, candidate_graph) in it:tanimoto = tanimoto_set_similarity(candidate_nodes, canonical_nodes)results[canonical_name][candidate_bp] = tanimotoreturn dict(results)", "docstring": "Compare generated mechanisms to actual ones.\n\n 1. Generates candidate mechanisms for each biological process\n 2. Gets sub-graphs for all NeuroMMSig signatures\n 3. Make tanimoto similarity comparison for all sets\n\n :return: A dictionary table comparing the canonical subgraphs to generated ones", "id": "f9407:m0"} {"signature": "def get_neurommsig_scores(graph: BELGraph,genes: List[Gene],annotation: str = '',ora_weight: Optional[float] = None,hub_weight: Optional[float] = None,top_percent: Optional[float] = None,topology_weight: Optional[float] = None,preprocess: bool = False) -> Optional[Mapping[str, float]]:", "body": "if preprocess:graph = neurommsig_graph_preprocessor.run(graph)if not any(gene in graph for gene in genes):logger.debug('')returnsubgraphs = get_subgraphs_by_annotation(graph, annotation=annotation)return get_neurommsig_scores_prestratified(subgraphs=subgraphs,genes=genes,ora_weight=ora_weight,hub_weight=hub_weight,top_percent=top_percent,topology_weight=topology_weight,)", "docstring": "Preprocess the graph, stratify by the given annotation, then run the NeuroMMSig algorithm on each.\n\n :param graph: A BEL graph\n :param genes: A list of gene nodes\n :param annotation: The annotation to use to stratify the graph to subgraphs\n :param ora_weight: The relative weight of the over-enrichment analysis score from\n :py:func:`neurommsig_gene_ora`. Defaults to 1.0.\n :param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.\n Defaults to 1.0.\n :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).\n :param topology_weight: The relative weight of the topolgical analysis core from\n :py:func:`neurommsig_topology`. Defaults to 1.0.\n :param preprocess: If true, preprocess the graph.\n :return: A dictionary from {annotation value: NeuroMMSig composite score}\n\n Pre-processing steps:\n\n 1. Infer the central dogma with :func:``\n 2. Collapse all proteins, RNAs and miRNAs to genes with :func:``\n 3. Collapse variants to genes with :func:``", "id": "f9408:m0"} {"signature": "def get_neurommsig_scores_prestratified(subgraphs: Mapping[str, BELGraph],genes: List[Gene],ora_weight: Optional[float] = None,hub_weight: Optional[float] = None,top_percent: Optional[float] = None,topology_weight: Optional[float] = None,) -> Optional[Mapping[str, float]]:", "body": "return {name: get_neurommsig_score(graph=subgraph,genes=genes,ora_weight=ora_weight,hub_weight=hub_weight,top_percent=top_percent,topology_weight=topology_weight,)for name, subgraph in subgraphs.items()}", "docstring": "Takes a graph stratification and runs neurommsig on each\n\n :param subgraphs: A pre-stratified set of graphs\n :param genes: A list of gene nodes\n :param ora_weight: The relative weight of the over-enrichment analysis score from\n :py:func:`neurommsig_gene_ora`. Defaults to 1.0.\n :param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.\n Defaults to 1.0.\n :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).\n :param topology_weight: The relative weight of the topolgical analysis core from\n :py:func:`neurommsig_topology`. Defaults to 1.0.\n :return: A dictionary from {annotation value: NeuroMMSig composite score}\n\n Pre-processing steps:\n\n 1. Infer the central dogma with :func:``\n 2. Collapse all proteins, RNAs and miRNAs to genes with :func:``\n 3. Collapse variants to genes with :func:``", "id": "f9408:m1"} {"signature": "def get_neurommsig_score(graph: BELGraph,genes: List[Gene],ora_weight: Optional[float] = None,hub_weight: Optional[float] = None,top_percent: Optional[float] = None,topology_weight: Optional[float] = None) -> float:", "body": "ora_weight = ora_weight or hub_weight = hub_weight or topology_weight = topology_weight or total_weight = ora_weight + hub_weight + topology_weightgenes = list(genes)ora_score = neurommsig_gene_ora(graph, genes)hub_score = neurommsig_hubs(graph, genes, top_percent=top_percent)topology_score = neurommsig_topology(graph, genes)weighted_sum = (ora_weight * ora_score +hub_weight * hub_score +topology_weight * topology_score)return weighted_sum / total_weight", "docstring": "Calculate the composite NeuroMMSig Score for a given list of genes.\n\n :param graph: A BEL graph\n :param genes: A list of gene nodes\n :param ora_weight: The relative weight of the over-enrichment analysis score from\n :py:func:`neurommsig_gene_ora`. Defaults to 1.0.\n :param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.\n Defaults to 1.0.\n :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).\n :param topology_weight: The relative weight of the topolgical analysis core from\n :py:func:`neurommsig_topology`. Defaults to 1.0.\n :return: The NeuroMMSig composite score", "id": "f9408:m2"} {"signature": "def neurommsig_gene_ora(graph: BELGraph, genes: List[Gene]) -> float:", "body": "graph_genes = set(get_nodes_by_function(graph, GENE))return len(graph_genes.intersection(genes)) / len(graph_genes)", "docstring": "Calculate the percentage of target genes mappable to the graph.\n\n Assume: graph central dogma inferred, collapsed to genes, collapsed variants", "id": "f9408:m3"} {"signature": "def neurommsig_hubs(graph: BELGraph, genes: List[Gene], top_percent: Optional[float] = None) -> float:", "body": "top_percent = top_percent or if graph.number_of_nodes() < :logger.debug('')return graph_genes = set(get_nodes_by_function(graph, GENE))bc = Counter({node: betweenness_centralityfor node, betweenness_centrality in calculate_betweenness_centality(graph).items()if node in graph_genes})number_central_nodes = int(len(graph_genes) * top_percent)if number_central_nodes < :number_central_nodes = number_mappable_central_nodes = sum(node in genesfor node in bc.most_common(number_central_nodes))return number_mappable_central_nodes / number_central_nodes", "docstring": "Calculate the percentage of target genes mappable to the graph.\n\n Assume: graph central dogma inferred, collapsed to genes, collapsed variants, graph has more than 20 nodes\n\n :param graph: A BEL graph\n :param genes: A list of nodes\n :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).", "id": "f9408:m4"} {"signature": "def neurommsig_topology(graph: BELGraph, nodes: List[BaseEntity]) -> float:", "body": "nodes = list(nodes)number_nodes = len(nodes)if number_nodes <= :return unnormalized_sum = sum(u in graph[v]for u, v in itt.product(nodes, repeat=)if v in graph and u != v)return unnormalized_sum / (number_nodes * (number_nodes - ))", "docstring": "Calculate the node neighbor score for a given list of nodes.\n\n - Doesn't consider self loops\n\n .. math::\n\n \\frac{\\sum_i^n N_G[i]}{n*(n-1)}", "id": "f9408:m5"} {"signature": "@click.command()def main():", "body": "logging.basicConfig(level=logging.INFO)log.setLevel(logging.INFO)bms_base = get_bms_base()neurommsig_base = get_neurommsig_base()neurommsig_excel_dir = os.path.join(neurommsig_base, '', '', '')nift_values = get_nift_values()log.info('')ad_path = os.path.join(neurommsig_excel_dir, '', '')ad_df = preprocess(ad_path)with open(os.path.join(bms_base, '', '', ''), '') as ad_file:write_neurommsig_bel(ad_file, ad_df, mesh_alzheimer, nift_values)log.info('')pd_path = os.path.join(neurommsig_excel_dir, '', '')pd_df = preprocess(pd_path)with open(os.path.join(bms_base, '', '', ''), '') as pd_file:write_neurommsig_bel(pd_file, pd_df, mesh_parkinson, nift_values)", "docstring": "Convert the Alzheimer's and Parkinson's disease NeuroMMSig excel sheets to BEL.", "id": "f9409:m0"} {"signature": "def preprocessing_excel(path):", "body": "if not os.path.exists(path):raise ValueError(\"\" % path)df = pd.read_excel(path, sheetname=, header=)df.iloc[:, ] = pd.Series(df.iloc[:, ]).fillna(method='')df = df[df.ix[:, ].notnull()]df = df.reset_index(drop=True)df.ix[:, ].fillna(, inplace=True)if (df.ix[:, ].isnull().sum()) != :raise ValueError(\"\")return df", "docstring": "Preprocess the excel sheet\n\n :param filepath: filepath of the excel data\n :return: df: pandas dataframe with excel data\n :rtype: pandas.DataFrame", "id": "f9410:m0"} {"signature": "def munge_cell(cell, line=None, validators=None):", "body": "if pd.isnull(cell) or isinstance(cell, int):return Nonec = ''.join(cell.split())if validators is not None and all(re.match(validator, c) is None for validator in validators):if line:log.info(\"\", line, c)return Nonereturn [x.strip() for x in str(c).strip().split('')]", "docstring": ":param cell:\n:param line:\n:param validators:\n:return:", "id": "f9410:m1"} {"signature": "def preprocessing_br_projection_excel(path: str) -> pd.DataFrame:", "body": "if not os.path.exists(path):raise ValueError(\"\" % path)return pd.read_excel(path, sheetname=, header=)", "docstring": "Preprocess the excel file.\n\n Parameters\n ----------\n path : Filepath of the excel sheet", "id": "f9410:m2"} {"signature": "def get_nift_values() -> Mapping[str, str]:", "body": "r = get_bel_resource(NIFT)return {name.lower(): namefor name in r['']}", "docstring": "Extract the list of NIFT names from the BEL resource and builds a dictionary mapping from the lowercased version\n to the uppercase version.", "id": "f9410:m4"} {"signature": "def write_neurommsig_bel(file,df: pd.DataFrame,disease: str,nift_values: Mapping[str, str],):", "body": "write_neurommsig_biolerplate(disease, file)missing_features = set()fixed_caps = set()nift_value_originals = set(nift_values.values())graph = BELGraph(name=f'',description=f'',authors='',contact='',version=time.strftime(''),)for pathway, pathway_df in df.groupby(pathway_column):sorted_pathway_df = pathway_df.sort_values(genes_column)sliced_df = sorted_pathway_df[columns].itertuples()for _, gene, pubmeds, lit_snps, gwas_snps, ld_block_snps, clinical_features, clinical_snps in sliced_df:gene = ensure_quotes(gene)for snp in itt.chain(lit_snps or [], gwas_snps or [], ld_block_snps or [], clinical_snps or []):if not snp.strip():continuegraph.add_association(Gene('', gene),Gene('', snp),evidence='',citation='',annotations={'': disease,},)for clinical_feature in clinical_features or []:if not clinical_feature.strip():continueif clinical_feature.lower() not in nift_values:missing_features.add(clinical_feature)continueif clinical_feature not in nift_value_originals:fixed_caps.add((clinical_feature, nift_values[clinical_feature.lower()]))clinical_feature = nift_values[clinical_feature.lower()] graph.add_association(Gene('', gene),Abundance('', clinical_feature),evidence='',citation='',annotations={'': disease,},)if clinical_snps:for clinical_snp in clinical_snps:graph.add_association(Gene('', clinical_snp),Abundance('', clinical_feature),evidence='',citation='',annotations={'': disease,},)if missing_features:log.warning('', disease)for feature in missing_features:log.warning(feature)if fixed_caps:log.warning('')for broken, fixed in fixed_caps:log.warning('', broken, fixed)", "docstring": "Writes the NeuroMMSigDB excel sheet to BEL\n\n :param file: a file or file-like that can be writen to\n :param df:\n :param disease:\n :param nift_values: a dictionary of lowercased to normal names in NIFT", "id": "f9410:m6"} {"signature": "def get_bms_base() -> str:", "body": "bms_base = os.environ.get('')if bms_base is None:raise RuntimeError(\"\"\"\"\"\")return bms_base", "docstring": "Get the path to the BMS git repository from the environment or thrown an exception.\n\n :raises: RuntimeError", "id": "f9413:m4"} {"signature": "def get_neurommsig_base() -> str:", "body": "neurommsig_base = os.environ.get('')if neurommsig_base is None:raise RuntimeError(\"\"\"\"\"\")return neurommsig_base", "docstring": "Get the path to the NeuroMMSig git repository from the environment or thrown an exception.\n\n :raises: RuntimeError", "id": "f9413:m5"} {"signature": "def get_contradiction_summary(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity, str]]:", "body": "for u, v in set(graph.edges()):relations = {data[RELATION] for data in graph[u][v].values()}if relation_set_has_contradictions(relations):yield u, v, relations", "docstring": "Yield triplets of (source node, target node, set of relations) for (source node, target node) pairs\n that have multiple, contradictory relations.", "id": "f9414:m0"} {"signature": "def get_regulatory_pairs(graph: BELGraph) -> Set[NodePair]:", "body": "cg = get_causal_subgraph(graph)results = set()for u, v, d in cg.edges(data=True):if d[RELATION] not in CAUSAL_INCREASE_RELATIONS:continueif cg.has_edge(v, u) and any(dd[RELATION] in CAUSAL_DECREASE_RELATIONS for dd in cg[v][u].values()):results.add((u, v))return results", "docstring": "Find pairs of nodes that have mutual causal edges that are regulating each other such that ``A -> B`` and\n ``B -| A``.\n\n :return: A set of pairs of nodes with mutual causal edges", "id": "f9414:m1"} {"signature": "def get_chaotic_pairs(graph: BELGraph) -> SetOfNodePairs:", "body": "cg = get_causal_subgraph(graph)results = set()for u, v, d in cg.edges(data=True):if d[RELATION] not in CAUSAL_INCREASE_RELATIONS:continueif cg.has_edge(v, u) and any(dd[RELATION] in CAUSAL_INCREASE_RELATIONS for dd in cg[v][u].values()):results.add(tuple(sorted([u, v], key=str)))return results", "docstring": "Find pairs of nodes that have mutual causal edges that are increasing each other such that ``A -> B`` and\n ``B -> A``.\n\n :return: A set of pairs of nodes with mutual causal edges", "id": "f9414:m2"} {"signature": "def get_dampened_pairs(graph: BELGraph) -> SetOfNodePairs:", "body": "cg = get_causal_subgraph(graph)results = set()for u, v, d in cg.edges(data=True):if d[RELATION] not in CAUSAL_DECREASE_RELATIONS:continueif cg.has_edge(v, u) and any(dd[RELATION] in CAUSAL_DECREASE_RELATIONS for dd in cg[v][u].values()):results.add(tuple(sorted([u, v], key=str)))return results", "docstring": "Find pairs of nodes that have mutual causal edges that are decreasing each other such that ``A -| B`` and\n ``B -| A``.\n\n :return: A set of pairs of nodes with mutual causal edges", "id": "f9414:m3"} {"signature": "def get_correlation_graph(graph: BELGraph) -> Graph:", "body": "result = Graph()for u, v, d in graph.edges(data=True):if d[RELATION] not in CORRELATIVE_RELATIONS:continueif not result.has_edge(u, v):result.add_edge(u, v, **{d[RELATION]: True})elif d[RELATION] not in result[u][v]:log.log(, '', u, v)result[u][v][d[RELATION]] = Trueresult[v][u][d[RELATION]] = Truereturn result", "docstring": "Extract an undirected graph of only correlative relationships.", "id": "f9414:m4"} {"signature": "def get_correlation_triangles(graph: BELGraph) -> SetOfNodeTriples:", "body": "return {tuple(sorted([n, u, v], key=str))for n in graphfor u, v in itt.combinations(graph[n], )if graph.has_edge(u, v)}", "docstring": "Return a set of all triangles pointed by the given node.", "id": "f9414:m5"} {"signature": "def get_triangles(graph: DiGraph) -> SetOfNodeTriples:", "body": "return {tuple(sorted([a, b, c], key=str))for a, b in graph.edges()for c in graph.successors(b)if graph.has_edge(c, a)}", "docstring": "Get a set of triples representing the 3-cycles from a directional graph.\n\n Each 3-cycle is returned once, with nodes in sorted order.", "id": "f9414:m6"} {"signature": "def get_separate_unstable_correlation_triples(graph: BELGraph) -> Iterable[NodeTriple]:", "body": "cg = get_correlation_graph(graph)for a, b, c in get_correlation_triangles(cg):if POSITIVE_CORRELATION in cg[a][b] and POSITIVE_CORRELATION in cg[b][c] and NEGATIVE_CORRELATION incg[a][c]:yield b, a, cif POSITIVE_CORRELATION in cg[a][b] and NEGATIVE_CORRELATION in cg[b][c] and POSITIVE_CORRELATION incg[a][c]:yield a, b, cif NEGATIVE_CORRELATION in cg[a][b] and POSITIVE_CORRELATION in cg[b][c] and POSITIVE_CORRELATION incg[a][c]:yield c, a, b", "docstring": "Yield all triples of nodes A, B, C such that ``A pos B``, ``A pos C``, and ``B neg C``.\n\n :return: An iterator over triples of unstable graphs, where the second two are negative", "id": "f9414:m7"} {"signature": "def get_mutually_unstable_correlation_triples(graph: BELGraph) -> Iterable[NodeTriple]:", "body": "cg = get_correlation_graph(graph)for a, b, c in get_correlation_triangles(cg):if all(NEGATIVE_CORRELATION in x for x in (cg[a][b], cg[b][c], cg[a][c])):yield a, b, c", "docstring": "Yield triples of nodes (A, B, C) such that ``A neg B``, ``B neg C``, and ``C neg A``.", "id": "f9414:m8"} {"signature": "def jens_transformation_alpha(graph: BELGraph) -> DiGraph:", "body": "result = DiGraph()for u, v, d in graph.edges(data=True):relation = d[RELATION]if relation == POSITIVE_CORRELATION:result.add_edge(u, v)result.add_edge(v, u)elif relation in CAUSAL_INCREASE_RELATIONS:result.add_edge(u, v)elif relation in CAUSAL_DECREASE_RELATIONS:result.add_edge(v, u)return result", "docstring": "Apply Jens' transformation (Type 1) to the graph.\n\n 1. Induce a sub-graph over causal + correlative edges\n 2. Transform edges by the following rules:\n - increases => increases\n - decreases => backwards increases\n - positive correlation => two way increases\n - negative correlation => delete\n\n The resulting graph can be used to search for 3-cycles, which now symbolize unstable triplets where ``A -> B``,\n ``A -| C`` and ``B positiveCorrelation C``.", "id": "f9414:m9"} {"signature": "def jens_transformation_beta(graph: BELGraph) -> DiGraph:", "body": "result = DiGraph()for u, v, d in graph.edges(data=True):relation = d[RELATION]if relation == NEGATIVE_CORRELATION:result.add_edge(u, v)result.add_edge(v, u)elif relation in CAUSAL_INCREASE_RELATIONS:result.add_edge(v, u)elif relation in CAUSAL_DECREASE_RELATIONS:result.add_edge(u, v)return result", "docstring": "Apply Jens' Transformation (Type 2) to the graph.\n\n 1. Induce a sub-graph over causal and correlative relations\n 2. Transform edges with the following rules:\n - increases => backwards decreases\n - decreases => decreases\n - positive correlation => delete\n - negative correlation => two way decreases\n\n The resulting graph can be used to search for 3-cycles, which now symbolize stable triples where ``A -> B``,\n ``A -| C`` and ``B negativeCorrelation C``.", "id": "f9414:m10"} {"signature": "def get_jens_unstable(graph: BELGraph) -> Iterable[NodeTriple]:", "body": "r = jens_transformation_alpha(graph)return get_triangles(r)", "docstring": "Yield triples of nodes (A, B, C) where ``A -> B``, ``A -| C``, and ``C positiveCorrelation A``.\n\n Calculated efficiently using the Jens Transformation.", "id": "f9414:m11"} {"signature": "def get_increase_mismatch_triplets(graph: BELGraph) -> Iterable[NodeTriple]:", "body": "return _get_mismatch_triplets_helper(graph, CAUSAL_INCREASE_RELATIONS)", "docstring": "Yield triples of nodes (A, B, C) where ``A -> B``, ``A -> C``, and ``C negativeCorrelation A``.", "id": "f9414:m13"} {"signature": "def get_decrease_mismatch_triplets(graph: BELGraph) -> Iterable[NodeTriple]:", "body": "return _get_mismatch_triplets_helper(graph, CAUSAL_DECREASE_RELATIONS)", "docstring": "Yield triples of nodes (A, B, C) where ``A -| B``, ``A -| C``, and ``C negativeCorrelation A``.", "id": "f9414:m14"} {"signature": "def get_chaotic_triplets(graph: BELGraph) -> Iterable[NodeTriple]:", "body": "return _get_disregulated_triplets_helper(graph, CAUSAL_INCREASE_RELATIONS)", "docstring": "Yield triples of nodes (A, B, C) that mutually increase each other, such as when ``A -> B``, ``B -> C``, and\n ``C -> A``.", "id": "f9414:m16"} {"signature": "def get_dampened_triplets(graph: BELGraph) -> Iterable[NodeTriple]:", "body": "return _get_disregulated_triplets_helper(graph, CAUSAL_DECREASE_RELATIONS)", "docstring": "Yield triples of nodes (A, B, C) that mutually decreases each other, such as when ``A -| B``,\n ``B -| C``, and ``C -| A``.", "id": "f9414:m17"} {"signature": "def summarize_stability(graph: BELGraph) -> Mapping[str, int]:", "body": "regulatory_pairs = get_regulatory_pairs(graph)chaotic_pairs = get_chaotic_pairs(graph)dampened_pairs = get_dampened_pairs(graph)contraditory_pairs = get_contradiction_summary(graph)separately_unstable_triples = get_separate_unstable_correlation_triples(graph)mutually_unstable_triples = get_mutually_unstable_correlation_triples(graph)jens_unstable_triples = get_jens_unstable(graph)increase_mismatch_triples = get_increase_mismatch_triplets(graph)decrease_mismatch_triples = get_decrease_mismatch_triplets(graph)chaotic_triples = get_chaotic_triplets(graph)dampened_triples = get_dampened_triplets(graph)return {'': _count_or_len(regulatory_pairs),'': _count_or_len(chaotic_pairs),'': _count_or_len(dampened_pairs),'': _count_or_len(contraditory_pairs),'': _count_or_len(separately_unstable_triples),'': _count_or_len(mutually_unstable_triples),'': _count_or_len(jens_unstable_triples),'': _count_or_len(increase_mismatch_triples),'': _count_or_len(decrease_mismatch_triples),'': _count_or_len(chaotic_triples),'': _count_or_len(dampened_triples)}", "docstring": "Summarize the stability of the graph.", "id": "f9414:m18"} {"signature": "def run_rcr(graph, tag=''):", "body": "hypotheses = defaultdict(set)increases = defaultdict(set)decreases = defaultdict(set)for u, v, d in graph.edges(data=True):hypotheses[u].add(v)if d[RELATION] in CAUSAL_INCREASE_RELATIONS:increases[u].add(v)elif d[RELATION] in CAUSAL_DECREASE_RELATIONS:decreases[u].add(v)correct = defaultdict(int)contra = defaultdict(int)ambiguous = defaultdict(int)missing = defaultdict(int)for controller, downstream_nodes in hypotheses.items():if len(downstream_nodes) < :continue for node in downstream_nodes:if node in increases[controller] and node in decreases[controller]:ambiguous[controller] += elif node in increases[controller]:if graph.node[node][tag] == :correct[controller] += elif graph.node[node][tag] == -:contra[controller] += elif node in decreases[controller]:if graph.node[node][tag] == :contra[controller] += elif graph.node[node][tag] == -:correct[controller] += else:missing[controller] += controllers = {controllerfor controller, downstream_nodes in hypotheses.items()if <= len(downstream_nodes)}concordance_scores = {controller: scipy.stats.beta(, correct[controller], contra[controller])for controller in controllers}population = {nodefor controller in controllersfor node in hypotheses[controller]}population_size = len(population)return pandas.DataFrame({'': contra,'': correct,'': concordance_scores})", "docstring": "Run the reverse causal reasoning algorithm on a graph.\n\n Steps:\n\n 1. Get all downstream controlled things into map (that have at least 4 downstream things)\n 2. calculate population of all things that are downstream controlled\n\n .. note:: Assumes all nodes have been pre-tagged with data\n\n :param pybel.BELGraph graph:\n :param str tag: The key for the nodes' data dictionaries that corresponds to the integer value for its differential\n expression.", "id": "f9415:m2"} {"signature": "def get_cutoff(value: float, cutoff: Optional[float] = None) -> int:", "body": "cutoff = cutoff if cutoff is not None else if value > cutoff:return if value < (- * cutoff):return - return ", "docstring": "Assign if a value is greater than or less than a cutoff.", "id": "f9416:m0"} {"signature": "def edge_concords(graph, u, v, k, d, key, cutoff: Optional[float] = None) -> Concordance:", "body": "if key not in graph.nodes[u] or key not in graph.nodes[v]:return Concordance.unassignedrelation = d[RELATION]if relation not in (UP | DOWN | {CAUSES_NO_CHANGE}):return Concordance.unassignedsource_regulation = get_cutoff(graph.nodes[u][key], cutoff=cutoff)target_regulation = get_cutoff(graph.nodes[v][key], cutoff=cutoff)if source_regulation == :if target_regulation == and relation in UP:return Concordance.correctelif target_regulation == and relation in DOWN:return Concordance.incorrectelif target_regulation == - and relation in UP:return Concordance.incorrectelif target_regulation == - and relation in DOWN:return Concordance.correctelif target_regulation == and relation in (DOWN | UP):return Concordance.incorrectelif target_regulation == and relation == CAUSES_NO_CHANGE:return Concordance.correctelif target_regulation in {, -} and relation == CAUSES_NO_CHANGE:return Concordance.incorrectelse:log.warning('', u, source_regulation, relation, v, target_regulation)return Concordance.ambiguouselif source_regulation == -:if target_regulation == and relation in UP:return Concordance.incorrectelif target_regulation == and relation in DOWN:return Concordance.correctelif target_regulation == - and relation in UP:return Concordance.correctelif target_regulation == - and relation in DOWN:return Concordance.incorrectelif target_regulation == and relation in (DOWN | UP):return Concordance.incorrectelif target_regulation == and relation == CAUSES_NO_CHANGE:return Concordance.correctelif target_regulation in {, -} and relation == CAUSES_NO_CHANGE:return Concordance.incorrectelse:log.warning('', u, source_regulation, relation, v, target_regulation)return Concordance.ambiguouselse: if target_regulation == and relation == CAUSES_NO_CHANGE:return Concordance.correctreturn Concordance.ambiguous", "docstring": ":param pybel.BELGraph graph: A BEL graph\n:param u:\n:param v:\n:param k:\n:param d:\n:param str key: The node data dictionary key storing the logFC\n:param float cutoff: The optional logFC cutoff for significance\n:rtype: Concordance", "id": "f9416:m1"} {"signature": "def calculate_concordance_helper(graph: BELGraph,key: str,cutoff: Optional[float] = None,) -> Tuple[int, int, int, int]:", "body": "scores = defaultdict(int)for u, v, k, d in graph.edges(keys=True, data=True):c = edge_concords(graph, u, v, k, d, key, cutoff=cutoff)scores[c] += return (scores[Concordance.correct],scores[Concordance.incorrect],scores[Concordance.ambiguous],scores[Concordance.unassigned],)", "docstring": "Help calculate network-wide concordance\n\n Assumes data already annotated with given key\n\n :param graph: A BEL graph\n :param key: The node data dictionary key storing the logFC\n :param cutoff: The optional logFC cutoff for significance", "id": "f9416:m2"} {"signature": "def calculate_concordance(graph: BELGraph, key: str, cutoff: Optional[float] = None,use_ambiguous: bool = False) -> float:", "body": "correct, incorrect, ambiguous, _ = calculate_concordance_helper(graph, key, cutoff=cutoff)try:return correct / (correct + incorrect + (ambiguous if use_ambiguous else ))except ZeroDivisionError:return -", "docstring": "Calculates network-wide concordance.\n\n Assumes data already annotated with given key\n\n :param graph: A BEL graph\n :param key: The node data dictionary key storing the logFC\n :param cutoff: The optional logFC cutoff for significance\n :param use_ambiguous: Compare to ambiguous edges as well", "id": "f9416:m3"} {"signature": "def one_sided(value: float, distribution: List[float]) -> float:", "body": "assert distributionreturn sum(value < element for element in distribution) / len(distribution)", "docstring": "Calculate the one-sided probability of getting a value more extreme than the distribution.", "id": "f9416:m4"} {"signature": "def calculate_concordance_probability(graph: BELGraph,key: str,cutoff: Optional[float] = None,permutations: Optional[int] = None,percentage: Optional[float] = None,use_ambiguous: bool = False,permute_type: str = '',) -> Tuple[float, List[float], float]:", "body": "if permute_type == '':permute_func = partial(random_by_edges, percentage=percentage)elif permute_type == '':permute_func = partial(shuffle_node_data, key=key, percentage=percentage)elif permute_type == '':permute_func = partial(shuffle_relations, percentage=percentage)else:raise ValueError(''.format(permute_type))graph: BELGraph = graph.copy()collapse_to_genes(graph)collapse_all_variants(graph)score = calculate_concordance(graph, key, cutoff=cutoff)distribution = []for _ in range(permutations or ):permuted_graph = permute_func(graph)permuted_graph_scores = calculate_concordance(permuted_graph, key, cutoff=cutoff, use_ambiguous=use_ambiguous)distribution.append(permuted_graph_scores)return score, distribution, one_sided(score, distribution)", "docstring": "Calculates a graph's concordance as well as its statistical probability.\n\n\n\n :param graph: A BEL graph\n :param str key: The node data dictionary key storing the logFC\n :param float cutoff: The optional logFC cutoff for significance\n :param int permutations: The number of random permutations to test. Defaults to 500\n :param float percentage: The percentage of the graph's edges to maintain. Defaults to 0.9\n :param bool use_ambiguous: Compare to ambiguous edges as well\n :returns: A triple of the concordance score, the null distribution, and the p-value.", "id": "f9416:m5"} {"signature": "def calculate_concordance_by_annotation(graph, annotation, key, cutoff=None):", "body": "return {value: calculate_concordance(subgraph, key, cutoff=cutoff)for value, subgraph in get_subgraphs_by_annotation(graph, annotation).items()}", "docstring": "Returns the concordance scores for each stratified graph based on the given annotation\n\n :param pybel.BELGraph graph: A BEL graph\n :param str annotation: The annotation to group by.\n :param str key: The node data dictionary key storing the logFC\n :param float cutoff: The optional logFC cutoff for significance\n :rtype: dict[str,tuple]", "id": "f9416:m6"} {"signature": "def calculate_concordance_probability_by_annotation(graph, annotation, key, cutoff=None, permutations=None,percentage=None,use_ambiguous=False):", "body": "result = [(value, calculate_concordance_probability(subgraph,key,cutoff=cutoff,permutations=permutations,percentage=percentage,use_ambiguous=use_ambiguous,))for value, subgraph in get_subgraphs_by_annotation(graph, annotation).items()]return dict(result)", "docstring": "Returns the results of concordance analysis on each subgraph, stratified by the given annotation.\n\n :param pybel.BELGraph graph: A BEL graph\n :param str annotation: The annotation to group by.\n :param str key: The node data dictionary key storing the logFC\n :param float cutoff: The optional logFC cutoff for significance\n :param int permutations: The number of random permutations to test. Defaults to 500\n :param float percentage: The percentage of the graph's edges to maintain. Defaults to 0.9\n :param bool use_ambiguous: Compare to ambiguous edges as well\n :rtype: dict[str,tuple]", "id": "f9416:m7"} {"signature": "def bel_to_spia_matrices(graph: BELGraph) -> Mapping[str, pd.DataFrame]:", "body": "index_nodes = get_matrix_index(graph)spia_matrices = build_spia_matrices(index_nodes)for u, v, edge_data in graph.edges(data=True):if isinstance(u, CentralDogma) and isinstance(v, CentralDogma):update_spia_matrices(spia_matrices, u, v, edge_data)elif isinstance(u, CentralDogma) and isinstance(v, ListAbundance):for node in v.members:if not isinstance(node, CentralDogma):continueupdate_spia_matrices(spia_matrices, u, node, edge_data)elif isinstance(u, ListAbundance) and isinstance(v, CentralDogma):for node in u.members:if not isinstance(node, CentralDogma):continueupdate_spia_matrices(spia_matrices, node, v, edge_data)elif isinstance(u, ListAbundance) and isinstance(v, ListAbundance):for sub_member, obj_member in product(u.members, v.members):if isinstance(sub_member, CentralDogma) and isinstance(obj_member, CentralDogma):update_spia_matrices(spia_matrices, sub_member, obj_member, edge_data)return spia_matrices", "docstring": "Create an excel sheet ready to be used in SPIA software.\n\n :param graph: BELGraph\n :return: dictionary with matrices", "id": "f9417:m0"} {"signature": "def get_matrix_index(graph: BELGraph) -> Set[str]:", "body": "return {node.namefor node in graphif isinstance(node, CentralDogma) and node.namespace.upper() == ''}", "docstring": "Return set of HGNC names from Proteins/Rnas/Genes/miRNA, nodes that can be used by SPIA.", "id": "f9417:m1"} {"signature": "def build_spia_matrices(nodes: Set[str]) -> Dict[str, pd.DataFrame]:", "body": "nodes = list(sorted(nodes))matrices = OrderedDict()for relation in KEGG_RELATIONS:matrices[relation] = pd.DataFrame(, index=nodes, columns=nodes)return matrices", "docstring": "Build an adjacency matrix for each KEGG relationship and return in a dictionary.\n\n :param nodes: A set of HGNC gene symbols\n :return: Dictionary of adjacency matrix for each relationship", "id": "f9417:m2"} {"signature": "def update_spia_matrices(spia_matrices: Dict[str, pd.DataFrame],u: CentralDogma,v: CentralDogma,edge_data: EdgeData,) -> None:", "body": "if u.namespace.upper() != '' or v.namespace.upper() != '':returnu_name = u.namev_name = v.namerelation = edge_data[RELATION]if relation in CAUSAL_INCREASE_RELATIONS:if v.variants and any(isinstance(variant, ProteinModification) for variant in v.variants):for variant in v.variants:if not isinstance(variant, ProteinModification):continueif variant[IDENTIFIER][NAME] == \"\":spia_matrices[\"\"][u_name][v_name] = elif variant[IDENTIFIER][NAME] == \"\":spia_matrices[\"\"][u_name][v_name] = elif isinstance(v, (Gene, Rna)): spia_matrices[''][u_name][v_name] = else:spia_matrices[''][u_name][v_name] = elif relation in CAUSAL_DECREASE_RELATIONS:if v.variants and any(isinstance(variant, ProteinModification) for variant in v.variants):for variant in v.variants:if not isinstance(variant, ProteinModification):continueif variant[IDENTIFIER][NAME] == \"\":spia_matrices[''][u_name][v_name] = elif variant[IDENTIFIER][NAME] == \"\":spia_matrices[\"\"][u_name][v_name] = elif isinstance(v, (Gene, Rna)): spia_matrices[\"\"][u_name][v_name] = else:spia_matrices[\"\"][u_name][v_name] = elif relation == ASSOCIATION:spia_matrices[\"\"][u_name][v_name] = ", "docstring": "Populate the adjacency matrix.", "id": "f9417:m3"} {"signature": "def spia_matrices_to_excel(spia_matrices: Mapping[str, pd.DataFrame], path: str) -> None:", "body": "writer = pd.ExcelWriter(path, engine='')for relation, df in spia_matrices.items():df.to_excel(writer, sheet_name=relation, index=False)writer.save()", "docstring": "Export a SPIA data dictionary into an Excel sheet at the given path.\n\n .. note::\n\n # The R import should add the values:\n # [\"nodes\"] from the columns\n # [\"title\"] from the name of the file\n # [\"NumberOfReactions\"] set to \"0\"", "id": "f9417:m4"} {"signature": "def spia_matrices_to_tsvs(spia_matrices: Mapping[str, pd.DataFrame], directory: str) -> None:", "body": "os.makedirs(directory, exist_ok=True)for relation, df in spia_matrices.items():df.to_csv(os.path.join(directory, f''), index=True)", "docstring": "Export a SPIA data dictionary into a directory as several TSV documents.", "id": "f9417:m5"} {"signature": "@click.command()@graph_pickle_argument@click.option('', type=click.Path(file_okay=True, dir_okay=False))@click.option('', type=click.Path(file_okay=False, dir_okay=True))def main(graph: BELGraph, xlsx: str, tsvs: str):", "body": "if not xlsx and not tsvs:click.secho('', fg='')sys.exit()spia_matrices = bel_to_spia_matrices(graph)if xlsx:spia_matrices_to_excel(spia_matrices, xlsx)if tsvs:spia_matrices_to_tsvs(spia_matrices, tsvs)", "docstring": "Export the graph to a SPIA Excel sheet.", "id": "f9417:m6"} {"signature": "def _get_drug_target_interactions(manager: Optional[''] = None) -> Mapping[str, List[str]]:", "body": "if manager is None:import bio2bel_drugbankmanager = bio2bel_drugbank.Manager()if not manager.is_populated():manager.populate()return manager.get_drug_to_hgnc_symbols()", "docstring": "Get a mapping from drugs to their list of gene.", "id": "f9418:m0"} {"signature": "def multi_run_epicom(graphs: Iterable[BELGraph], path: Union[None, str, TextIO]) -> None:", "body": "if isinstance(path, str):with open(path, '') as file:_multi_run_helper_file_wrapper(graphs, file)else:_multi_run_helper_file_wrapper(graphs, path)", "docstring": "Run EpiCom analysis on many graphs.", "id": "f9418:m5"} {"signature": "@click.group()def main():", "body": "", "docstring": "Run EpiCom Reloaded.", "id": "f9419:m0"} {"signature": "@main.command()@graph_pickle_argument@directory_optiondef run(graph, directory):", "body": "run_epicom(graph, directory)", "docstring": "Run on an arbitrary graph.", "id": "f9419:m1"} {"signature": "@main.command()@directory_optiondef ad(directory):", "body": "graph = get_ad_graph()run_epicom(graph, directory)", "docstring": "Run on the AD graph.", "id": "f9419:m2"} {"signature": "@main.command()@directory_optiondef pd(directory):", "body": "graph = get_pd_graph()run_epicom(graph, directory)", "docstring": "Run on the PD graph.", "id": "f9419:m3"} {"signature": "@main.command()@directory_optiondef ep(directory):", "body": "graph = get_ep_graph()run_epicom(graph, directory)", "docstring": "Run on the Epilepsy graph.", "id": "f9419:m4"} {"signature": "@main.command()@click.option('', '', type=click.File(''), default=sys.stdout)def multi(output):", "body": "graphs = [get_ad_graph(),get_ep_graph(),get_pd_graph(),]multi_run_epicom(graphs, output)", "docstring": "Run on all graphs.", "id": "f9419:m5"} {"signature": "def get_networks_using_annotation(manager: pybel.Manager, annotation: str):", "body": "raise NotImplementedError", "docstring": ":param pybel.manager.Manager manager:\n:param str annotation:\n:return: list[pybel.manager.models.Network]", "id": "f9420:m0"} {"signature": "def get_drug_model(manager: pybel.Manager, name: str):", "body": "raise NotImplementedError", "docstring": ":param pybel.manager.Manager manager:\n:param str name:\n:return: pybel.manager.models.NamespaceEntry", "id": "f9420:m1"} {"signature": "def build_database(manager: pybel.Manager, annotation_url: Optional[str] = None) -> None:", "body": "annotation_url = annotation_url or NEUROMMSIG_DEFAULT_URLannotation = manager.get_namespace_by_url(annotation_url)if annotation is None:raise RuntimeError('')networks = get_networks_using_annotation(manager, annotation)dtis = ...for network in networks:graph = network.as_bel()scores = epicom_on_graph(graph, dtis)for (drug_name, subgraph_name), score in scores.items():drug_model = get_drug_model(manager, drug_name)subgraph_model = manager.get_annotation_entry(annotation_url, subgraph_name)score_model = Score(network=network,annotation=subgraph_model,drug=drug_model,score=score)manager.session.add(score_model)t = time.time()logger.info('')manager.session.commit()logger.info('', time.time() - t)", "docstring": "Build a database of scores for NeuroMMSig annotated graphs.\n\n 1. Get all networks that use the Subgraph annotation\n 2. run on each", "id": "f9420:m2"} {"signature": "def get_random_walk_spanning_tree(graph):", "body": "raise NotImplementedError", "docstring": "Generates a spanning tree from the directed graph using the random walk approach proposed independently by\n by Broder (1989) and Aldous (1990). It simply generates random walks until all nodes have been covered.\n\n Algorithm:\n\n 1. Choose a starting vertex s arbitrarily. Set T_V \u2190 {s} and T_E \u2190 \u2205.\n 2. Do a simple random walk starting at s. Whenever we cross an edge e = {u, v} with v \u2208 V ,\n add v to TV and add e to TE.\n 3. Stop the random walk when TV = V . Output T = (T_V , T_E) as our spanning tree\n\n :param networkx.DiGraph graph: The input graph\n :rtype: networkx.DiGraph\n\n .. seealso::\n\n - https://math.dartmouth.edu/~pw/math100w13/kothari.pdf\n - http://keyulux.com/pdf/spanning_tree.pdf", "id": "f9424:m0"} {"signature": "def rank_causalr_hypothesis(graph, node_to_regulation, regulator_node):", "body": "upregulation_hypothesis = {'': ,'': ,'': }downregulation_hypothesis = {'': ,'': ,'': }targets = [nodefor node in node_to_regulationif node != regulator_node]predicted_regulations = run_cna(graph, regulator_node, targets) for _, target_node, predicted_regulation in predicted_regulations:if (predicted_regulation is Effect.inhibition or predicted_regulation is Effect.activation) and (predicted_regulation.value == node_to_regulation[target_node]):upregulation_hypothesis[''] += downregulation_hypothesis[''] += elif predicted_regulation is Effect.ambiguous:upregulation_hypothesis[''] += downregulation_hypothesis[''] += elif predicted_regulation is Effect.no_effect:continueelse:downregulation_hypothesis[''] += upregulation_hypothesis[''] += upregulation_hypothesis[''] = upregulation_hypothesis[''] - upregulation_hypothesis['']downregulation_hypothesis[''] = downregulation_hypothesis[''] - downregulation_hypothesis['']return upregulation_hypothesis, downregulation_hypothesis", "docstring": "Test the regulator hypothesis of the given node on the input data using the algorithm.\n\n Note: this method returns both +/- signed hypotheses evaluated\n\n Algorithm:\n\n 1. Calculate the shortest path between the regulator node and each node in observed_regulation\n 2. Calculate the concordance of the causal network and the observed regulation when there is path\n between target node and regulator node\n\n :param networkx.DiGraph graph: A causal graph\n :param dict node_to_regulation: Nodes to score (1,-1,0)\n :return Dictionaries with hypothesis results (keys: score, correct, incorrect, ambiguous)\n :rtype: dict", "id": "f9426:m0"} {"signature": "def run_cna(graph, root, targets, relationship_dict=None):", "body": "causal_effects = []relationship_dict = causal_effect_dict if relationship_dict is None else relationship_dictfor target in targets:try:shortest_paths = nx.all_shortest_paths(graph, source=root, target=target)effects_in_path = set()for shortest_path in shortest_paths:effects_in_path.add(get_path_effect(graph, shortest_path, relationship_dict))if len(effects_in_path) == :causal_effects.append((root, target, next(iter(effects_in_path)))) elif Effect.activation in effects_in_path and Effect.inhibition in effects_in_path:causal_effects.append((root, target, Effect.ambiguous))elif Effect.activation in effects_in_path and Effect.inhibition not in effects_in_path:causal_effects.append((root, target, Effect.activation))elif Effect.inhibition in effects_in_path and Effect.activation not in effects_in_path:causal_effects.append((root, target, Effect.inhibition))else:log.warning(''.format(effects_in_path))except nx.NetworkXNoPath:log.warning(''.format(root, target))return causal_effects", "docstring": "Returns the effect from the root to the target nodes represented as {-1,1}\n\n :param pybel.BELGraph graph: A BEL graph\n :param BaseEntity root: The root node\n :param iter targets: The targets nodes\n :param dict relationship_dict: dictionary with relationship effects\n :return list[tuple]:", "id": "f9426:m1"} {"signature": "def get_path_effect(graph, path, relationship_dict):", "body": "causal_effect = []for predecessor, successor in pairwise(path):if pair_has_contradiction(graph, predecessor, successor):return Effect.ambiguousedges = graph.get_edge_data(predecessor, successor)edge_key, edge_relation, _ = rank_edges(edges)relation = graph[predecessor][successor][edge_key][RELATION]if relation not in relationship_dict or relationship_dict[relation] == :return Effect.no_effectcausal_effect.append(relationship_dict[relation])final_effect = reduce(lambda x, y: x * y, causal_effect)return Effect.activation if final_effect == else Effect.inhibition", "docstring": "Calculate the final effect of the root node to the sink node in the path.\n\n :param pybel.BELGraph graph: A BEL graph\n :param list path: Path from root to sink node\n :param dict relationship_dict: dictionary with relationship effects\n :rtype: Effect", "id": "f9426:m2"} {"signature": "def rank_edges(edges, edge_ranking=None):", "body": "edge_ranking = default_edge_ranking if edge_ranking is None else edge_rankingedges_scores = [(edge_id, edge_data[RELATION], edge_ranking[edge_data[RELATION]])for edge_id, edge_data in edges.items()]return max(edges_scores, key=itemgetter())", "docstring": "Return the highest ranked edge from a multiedge.\n\n :param dict edges: dictionary with all edges between two nodes\n :param dict edge_ranking: A dictionary of {relationship: score}\n :return: Highest ranked edge\n :rtype: tuple: (edge id, relation, score given ranking)", "id": "f9426:m3"} {"signature": "def calculate_average_scores_on_graph(graph: BELGraph,key: Optional[str] = None,tag: Optional[str] = None,default_score: Optional[float] = None,runs: Optional[int] = None,use_tqdm: bool = False,):", "body": "subgraphs = generate_bioprocess_mechanisms(graph, key=key)scores = calculate_average_scores_on_subgraphs(subgraphs,key=key,tag=tag,default_score=default_score,runs=runs,use_tqdm=use_tqdm)return scores", "docstring": "Calculate the scores over all biological processes in the sub-graph.\n\n As an implementation, it simply computes the sub-graphs then calls :func:`calculate_average_scores_on_subgraphs` as\n described in that function's documentation.\n\n :param graph: A BEL graph with heats already on the nodes\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n :param default_score: The initial score for all nodes. This number can go up or down.\n :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n :param use_tqdm: Should there be a progress bar for runners?\n :return: A dictionary of {pybel node tuple: results tuple}\n :rtype: dict[tuple, tuple]\n\n Suggested usage with :mod:`pandas`:\n\n >>> import pandas as pd\n >>> from pybel_tools.analysis.heat import calculate_average_scores_on_graph\n >>> graph = ... # load graph and data\n >>> scores = calculate_average_scores_on_graph(graph)\n >>> pd.DataFrame.from_items(scores.items(), orient='index', columns=RESULT_LABELS)", "id": "f9429:m0"} {"signature": "def calculate_average_scores_on_subgraphs(subgraphs: Mapping[H, BELGraph],key: Optional[str] = None,tag: Optional[str] = None,default_score: Optional[float] = None,runs: Optional[int] = None,use_tqdm: bool = False,tqdm_kwargs: Optional[Mapping[str, Any]] = None,) -> Mapping[H, Tuple[float, float, float, float, int, int]]:", "body": "results = {}log.info('', len(subgraphs), runs)it = subgraphs.items()if use_tqdm:_tqdm_kwargs = dict(total=len(subgraphs), desc='')if tqdm_kwargs:_tqdm_kwargs.update(tqdm_kwargs)it = tqdm(it, **_tqdm_kwargs)for node, subgraph in it:number_first_neighbors = subgraph.in_degree(node)number_first_neighbors = if isinstance(number_first_neighbors, dict) else number_first_neighborsmechanism_size = subgraph.number_of_nodes()runners = workflow(subgraph, node, key=key, tag=tag, default_score=default_score, runs=runs)scores = [runner.get_final_score() for runner in runners]if == len(scores):results[node] = (None,None,None,None,number_first_neighbors,mechanism_size,)continuescores = np.array(scores)average_score = np.average(scores)score_std = np.std(scores)med_score = np.median(scores)chi_2_stat, norm_p = stats.normaltest(scores)results[node] = (average_score,score_std,norm_p,med_score,number_first_neighbors,mechanism_size,)return results", "docstring": "Calculate the scores over precomputed candidate mechanisms.\n\n :param subgraphs: A dictionary of keys to their corresponding subgraphs\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n :param default_score: The initial score for all nodes. This number can go up or down.\n :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n :param use_tqdm: Should there be a progress bar for runners?\n :return: A dictionary of keys to results tuples\n\n Example Usage:\n\n >>> import pandas as pd\n >>> from pybel_tools.generation import generate_bioprocess_mechanisms\n >>> from pybel_tools.analysis.heat import calculate_average_scores_on_subgraphs\n >>> # load graph and data\n >>> graph = ...\n >>> candidate_mechanisms = generate_bioprocess_mechanisms(graph)\n >>> scores = calculate_average_scores_on_subgraphs(candidate_mechanisms)\n >>> pd.DataFrame.from_items(scores.items(), orient='index', columns=RESULT_LABELS)", "id": "f9429:m1"} {"signature": "def workflow(graph: BELGraph,node: BaseEntity,key: Optional[str] = None,tag: Optional[str] = None,default_score: Optional[float] = None,runs: Optional[int] = None,minimum_nodes: int = ,) -> List['']:", "body": "subgraph = generate_mechanism(graph, node, key=key)if subgraph.number_of_nodes() <= minimum_nodes:return []runners = multirun(subgraph, node, key=key, tag=tag, default_score=default_score, runs=runs)return list(runners)", "docstring": "Generate candidate mechanisms and run the heat diffusion workflow.\n\n :param graph: A BEL graph\n :param node: The BEL node that is the focus of this analysis\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n :param default_score: The initial score for all nodes. This number can go up or down.\n :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n :param minimum_nodes: The minimum number of nodes a sub-graph needs to try running heat diffusion\n :return: A list of runners", "id": "f9429:m2"} {"signature": "def multirun(graph: BELGraph,node: BaseEntity,key: Optional[str] = None,tag: Optional[str] = None,default_score: Optional[float] = None,runs: Optional[int] = None,use_tqdm: bool = False,) -> Iterable['']:", "body": "if runs is None:runs = it = range(runs)if use_tqdm:it = tqdm(it, total=runs)for i in it:try:runner = Runner(graph, node, key=key, tag=tag, default_score=default_score)runner.run()yield runnerexcept Exception:log.debug('', i, node)", "docstring": "Run the heat diffusion workflow multiple times, each time yielding a :class:`Runner` object upon completion.\n\n :param graph: A BEL graph\n :param node: The BEL node that is the focus of this analysis\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n :param default_score: The initial score for all nodes. This number can go up or down.\n :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n :param use_tqdm: Should there be a progress bar for runners?\n :return: An iterable over the runners after each iteration", "id": "f9429:m3"} {"signature": "def workflow_aggregate(graph: BELGraph,node: BaseEntity,key: Optional[str] = None,tag: Optional[str] = None,default_score: Optional[float] = None,runs: Optional[int] = None,aggregator: Optional[Callable[[Iterable[float]], float]] = None,) -> Optional[float]:", "body": "runners = workflow(graph, node, key=key, tag=tag, default_score=default_score, runs=runs)scores = [runner.get_final_score() for runner in runners]if not scores:log.warning('', node)returnif aggregator is None:return np.average(scores)return aggregator(scores)", "docstring": "Get the average score over multiple runs.\n\n This function is very simple, and can be copied to do more interesting statistics over the :class:`Runner`\n instances. To iterate over the runners themselves, see :func:`workflow`\n\n :param graph: A BEL graph\n :param node: The BEL node that is the focus of this analysis\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n :param default_score: The initial score for all nodes. This number can go up or down.\n :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n :param aggregator: A function that aggregates a list of scores. Defaults to :func:`numpy.average`.\n Could also use: :func:`numpy.mean`, :func:`numpy.median`, :func:`numpy.min`, :func:`numpy.max`\n :return: The average score for the target node", "id": "f9429:m4"} {"signature": "def workflow_all(graph: BELGraph,key: Optional[str] = None,tag: Optional[str] = None,default_score: Optional[float] = None,runs: Optional[int] = None,) -> Mapping[BaseEntity, List[Runner]]:", "body": "results = {}for node in get_nodes_by_function(graph, BIOPROCESS):results[node] = workflow(graph, node, key=key, tag=tag, default_score=default_score, runs=runs)return results", "docstring": "Run the heat diffusion workflow and get runners for every possible candidate mechanism\n\n 1. Get all biological processes\n 2. Get candidate mechanism induced two level back from each biological process\n 3. Heat diffusion workflow for each candidate mechanism for multiple runs\n 4. Return all runner results\n\n :param graph: A BEL graph\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n :param default_score: The initial score for all nodes. This number can go up or down.\n :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n :return: A dictionary of {node: list of runners}", "id": "f9429:m5"} {"signature": "def workflow_all_aggregate(graph: BELGraph,key: Optional[str] = None,tag: Optional[str] = None,default_score: Optional[float] = None,runs: Optional[int] = None,aggregator: Optional[Callable[[Iterable[float]], float]] = None,):", "body": "results = {}bioprocess_nodes = list(get_nodes_by_function(graph, BIOPROCESS))for bioprocess_node in tqdm(bioprocess_nodes):subgraph = generate_mechanism(graph, bioprocess_node, key=key)try:results[bioprocess_node] = workflow_aggregate(graph=subgraph,node=bioprocess_node,key=key,tag=tag,default_score=default_score,runs=runs,aggregator=aggregator)except Exception:log.exception('', bioprocess_node)return results", "docstring": "Run the heat diffusion workflow to get average score for every possible candidate mechanism.\n\n 1. Get all biological processes\n 2. Get candidate mechanism induced two level back from each biological process\n 3. Heat diffusion workflow on each candidate mechanism for multiple runs\n 4. Report average scores for each candidate mechanism\n\n :param graph: A BEL graph\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n :param default_score: The initial score for all nodes. This number can go up or down.\n :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n :param aggregator: A function that aggregates a list of scores. Defaults to :func:`numpy.average`.\n Could also use: :func:`numpy.mean`, :func:`numpy.median`, :func:`numpy.min`, :func:`numpy.max`\n :return: A dictionary of {node: upstream causal subgraph}", "id": "f9429:m6"} {"signature": "def calculate_average_score_by_annotation(graph: BELGraph,annotation: str,key: Optional[str] = None,runs: Optional[int] = None,use_tqdm: bool = False,) -> Mapping[str, float]:", "body": "candidate_mechanisms = generate_bioprocess_mechanisms(graph, key=key)scores: Mapping[BaseEntity, Tuple] = calculate_average_scores_on_subgraphs(subgraphs=candidate_mechanisms,key=key,runs=runs,use_tqdm=use_tqdm,)subgraph_bp: Mapping[str, List[BaseEntity]] = defaultdict(list)subgraphs: Mapping[str, BELGraph] = get_subgraphs_by_annotation(graph, annotation)for annotation_value, subgraph in subgraphs.items():subgraph_bp[annotation_value].extend(get_nodes_by_function(subgraph, BIOPROCESS))return {annotation_value: np.average(scores[bp][] for bp in bps)for annotation_value, bps in subgraph_bp.items()}", "docstring": "For each sub-graph induced over the edges matching the annotation, calculate the average score\n for all of the contained biological processes\n\n Assumes you haven't done anything yet\n\n 1. Generates biological process upstream candidate mechanistic sub-graphs with\n :func:`generate_bioprocess_mechanisms`\n 2. Calculates scores for each sub-graph with :func:`calculate_average_scores_on_sub-graphs`\n 3. Overlays data with pbt.integration.overlay_data\n 4. Calculates averages with pbt.selection.group_nodes.average_node_annotation\n\n :param graph: A BEL graph\n :param annotation: A BEL annotation\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n :param use_tqdm: Should there be a progress bar for runners?\n :return: A dictionary from {str annotation value: tuple scores}\n\n Example Usage:\n\n >>> import pybel\n >>> from pybel_tools.integration import overlay_data\n >>> from pybel_tools.analysis.heat import calculate_average_score_by_annotation\n >>> graph = pybel.from_path(...)\n >>> scores = calculate_average_score_by_annotation(graph, 'subgraph')", "id": "f9429:m7"} {"signature": "def __init__(self,graph: BELGraph,target_node: BaseEntity,key: Optional[str] = None,tag: Optional[str] = None,default_score: Optional[float] = None,) -> None:", "body": "self.graph: BELGraph = graph.copy()self.target_node = target_nodeself.key = key or WEIGHTself.default_score = default_score or DEFAULT_SCOREself.tag = tag or SCOREfor node, data in self.graph.nodes(data=True):if not self.graph.predecessors(node):self.graph.nodes[node][self.tag] = data.get(self.key, )log.log(, '', target_node, self.graph.nodes[node][self.tag])", "docstring": "Initialize the heat diffusion runner class.\n\n :param graph: A BEL graph\n :param target_node: The BEL node that is the focus of this analysis\n :param key: The key in the node data dictionary representing the experimental data. Defaults to\n :data:`pybel_tools.constants.WEIGHT`.\n :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n :param default_score: The initial score for all nodes. This number can go up or down.", "id": "f9429:c0:m0"} {"signature": "def iter_leaves(self) -> Iterable[BaseEntity]:", "body": "for node in self.graph:if self.tag in self.graph.nodes[node]:continueif not any(self.tag not in self.graph.nodes[p] for p in self.graph.predecessors(node)):yield node", "docstring": "Return an iterable over all nodes that are leaves.\n\n A node is a leaf if either:\n\n - it doesn't have any predecessors, OR\n - all of its predecessors have a score in their data dictionaries", "id": "f9429:c0:m1"} {"signature": "def has_leaves(self) -> List[BaseEntity]:", "body": "leaves = list(self.iter_leaves())return leaves", "docstring": "Return if the current graph has any leaves.\n\n Implementation is not that smart currently, and does a full sweep.", "id": "f9429:c0:m2"} {"signature": "def in_out_ratio(self, node: BaseEntity) -> float:", "body": "return self.graph.in_degree(node) / float(self.graph.out_degree(node))", "docstring": "Calculate the ratio of in-degree / out-degree of a node.", "id": "f9429:c0:m3"} {"signature": "def unscored_nodes_iter(self) -> BaseEntity:", "body": "for node, data in self.graph.nodes(data=True):if self.tag not in data:yield node", "docstring": "Iterate over all nodes without a score.", "id": "f9429:c0:m4"} {"signature": "def get_random_edge(self):", "body": "nodes = [(n, self.in_out_ratio(n))for n in self.unscored_nodes_iter()if n != self.target_node]node, deg = min(nodes, key=itemgetter())log.log(, '', node, deg)possible_edges = self.graph.in_edges(node, keys=True)log.log(, '', possible_edges)edge_to_remove = random.choice(possible_edges)log.log(, '', edge_to_remove)return edge_to_remove", "docstring": "This function should be run when there are no leaves, but there are still unscored nodes. It will introduce\n a probabilistic element to the algorithm, where some edges are disregarded randomly to eventually get a score\n for the network. This means that the score can be averaged over many runs for a given graph, and a better\n data structure will have to be later developed that doesn't destroy the graph (instead, annotates which edges\n have been disregarded, later)\n\n 1. get all un-scored\n 2. rank by in-degree\n 3. weighted probability over all in-edges where lower in-degree means higher probability\n 4. pick randomly which edge\n\n :return: A random in-edge to the lowest in/out degree ratio node. This is a 3-tuple of (node, node, key)\n :rtype: tuple", "id": "f9429:c0:m5"} {"signature": "def remove_random_edge(self):", "body": "u, v, k = self.get_random_edge()log.log(, '', u, v, k)self.graph.remove_edge(u, v, k)", "docstring": "Remove a random in-edge from the node with the lowest in/out degree ratio.", "id": "f9429:c0:m6"} {"signature": "def remove_random_edge_until_has_leaves(self) -> None:", "body": "while True:leaves = set(self.iter_leaves())if leaves:returnself.remove_random_edge()", "docstring": "Remove random edges until there is at least one leaf node.", "id": "f9429:c0:m7"} {"signature": "def score_leaves(self) -> Set[BaseEntity]:", "body": "leaves = set(self.iter_leaves())if not leaves:log.warning('')return set()for leaf in leaves:self.graph.nodes[leaf][self.tag] = self.calculate_score(leaf)log.log(, '', leaf)return leaves", "docstring": "Calculate the score for all leaves.\n\n :return: The set of leaf nodes that were scored", "id": "f9429:c0:m8"} {"signature": "def run(self) -> None:", "body": "while not self.done_chomping():self.remove_random_edge_until_has_leaves()self.score_leaves()", "docstring": "Calculate scores for all leaves until there are none, removes edges until there are, and repeats until\n all nodes have been scored.", "id": "f9429:c0:m9"} {"signature": "def run_with_graph_transformation(self) -> Iterable[BELGraph]:", "body": "yield self.get_remaining_graph()while not self.done_chomping():while not list(self.iter_leaves()):self.remove_random_edge()yield self.get_remaining_graph()self.score_leaves()yield self.get_remaining_graph()", "docstring": "Calculate scores for all leaves until there are none, removes edges until there are, and repeats until\n all nodes have been scored. Also, yields the current graph at every step so you can make a cool animation\n of how the graph changes throughout the course of the algorithm\n\n :return: An iterable of BEL graphs", "id": "f9429:c0:m10"} {"signature": "def done_chomping(self) -> bool:", "body": "return self.tag in self.graph.nodes[self.target_node]", "docstring": "Determines if the algorithm is complete by checking if the target node of this analysis has been scored\n yet. Because the algorithm removes edges when it gets stuck until it is un-stuck, it is always guaranteed to\n finish.\n\n :return: Is the algorithm done running?", "id": "f9429:c0:m11"} {"signature": "def get_final_score(self) -> float:", "body": "if not self.done_chomping():raise ValueError('')return self.graph.nodes[self.target_node][self.tag]", "docstring": "Return the final score for the target node.\n\n :return: The final score for the target node", "id": "f9429:c0:m12"} {"signature": "def calculate_score(self, node: BaseEntity) -> float:", "body": "score = (self.graph.nodes[node][self.tag]if self.tag in self.graph.nodes[node] elseself.default_score)for predecessor, _, d in self.graph.in_edges(node, data=True):if d[RELATION] in CAUSAL_INCREASE_RELATIONS:score += self.graph.nodes[predecessor][self.tag]elif d[RELATION] in CAUSAL_DECREASE_RELATIONS:score -= self.graph.nodes[predecessor][self.tag]return score", "docstring": "Calculate the new score of the given node.", "id": "f9429:c0:m13"} {"signature": "def get_remaining_graph(self) -> BELGraph:", "body": "return self.graph.subgraph(self.unscored_nodes_iter())", "docstring": "Allows for introspection on the algorithm at a given point by returning the sub-graph induced\n by all unscored nodes\n\n :return: The remaining un-scored BEL graph", "id": "f9429:c0:m14"} {"signature": "def _get_column_nums_from_args(columns):", "body": "nums = []for c in columns:for p in c.split(''):p = p.strip()try:c = int(p)nums.append(c)except (TypeError, ValueError):start, ignore, end = p.partition('')try:start = int(start)end = int(end)except (TypeError, ValueError):raise ValueError('' % c)inc = if start < end else -nums.extend(range(start, end + inc, inc))return [n - for n in nums]", "docstring": "Turn column inputs from user into list of simple numbers.\n\n Inputs can be:\n\n - individual number: 1\n - range: 1-3\n - comma separated list: 1,2,3,4-6", "id": "f9434:m0"} {"signature": "def _get_printable_columns(columns, row):", "body": "if not columns:return rowreturn tuple(row[c] for c in columns)", "docstring": "Return only the part of the row which should be printed.", "id": "f9434:m1"} {"signature": "def extract_json(fileobj, keywords, comment_tags, options):", "body": "data=fileobj.read()json_extractor=JsonExtractor(data)strings_data=json_extractor.get_lines_data()for item in strings_data:messages = [item['']]if item.get('') == '':messages.append(item[''])yield item[''],item.get('',''),tuple(messages),[]", "docstring": "Supports: gettext, ngettext. See package README or github ( https://github.com/tigrawap/pybabel-json ) for more usage info.", "id": "f9435:m0"} {"signature": "def get_lines_data(self):", "body": "encoding = ''for token in tokenize(self.data.decode(encoding)):if token.type == '':if token.value == '':self.start_object()elif token.value =='':self.with_separator(token)elif token.value == '':self.end_object()elif token.value == '':self.end_pair()elif token.type=='':if self.state=='':self.current_key=unquote_string(token.value)if self.current_key==JSON_GETTEXT_KEYWORD:self.gettext_mode=Trueif self.gettext_mode:if self.current_key==JSON_GETTEXT_KEY_CONTENT:self.token_to_add=tokenelif self.current_key==JSON_GETTEXT_KEY_ALT_CONTENT:self.token_params['']=tokenelif self.current_key==JSON_GETTEXT_KEY_FUNCNAME:self.token_params['']=token.valueelse:self.token_to_add=tokenreturn self.results", "docstring": "Returns string:line_numbers list\nSince all strings are unique it is OK to get line numbers this way.\nSince same string can occur several times inside single .json file the values should be popped(FIFO) from the list\n:rtype: list", "id": "f9435:c0:m7"} {"signature": "def tearDown(self):", "body": "if os.path.exists(self.path):os.remove(self.path)", "docstring": "Called after each test to remove the file", "id": "f9440:c0:m0"} {"signature": "def _initfile(path, data=\"\"):", "body": "data = {} if data.lower() == \"\" else []if not os.path.exists(path): dirname = os.path.dirname(path)if dirname and not os.path.exists(dirname):raise IOError((\"\"\"\").format(os.path.dirname(path)))with open(path, \"\") as f:json.dump(data, f)return Trueelif os.path.getsize(path) == : with open(path, \"\") as f:json.dump(data, f)else: return False", "docstring": "Initialize an empty JSON file.", "id": "f9441:m0"} {"signature": "def _checkType(self, key):", "body": "pass", "docstring": "Make sure the type of a key is appropriate.", "id": "f9441:c0:m4"} {"signature": "def _data(self):", "body": "if self.is_caching:return self.cachewith open(self.path, \"\") as f:return json.load(f)", "docstring": "A simpler version of data to avoid infinite recursion in some cases.\n\n Don't use this.", "id": "f9441:c4:m1"} {"signature": "@propertydef data(self):", "body": "self._updateType()return self._data()", "docstring": "Get a vanilla dict object to represent the file.", "id": "f9441:c4:m2"} {"signature": "@data.setterdef data(self, data):", "body": "if self.is_caching:self.cache = dataelse:fcontents = self.file_contentswith open(self.path, \"\") as f:try:indent = self.indent if self.pretty else Nonejson.dump(data, f, sort_keys=self.sort_keys, indent=indent)except Exception as e:f.seek()f.truncate()f.write(fcontents)raise eself._updateType()", "docstring": "Overwrite the file with new data. You probably shouldn't do\n this yourself, it's easy to screw up your whole file with this.", "id": "f9441:c4:m3"} {"signature": "def _updateType(self):", "body": "data = self._data()if isinstance(data, dict) and isinstance(self, ListFile):self.__class__ = DictFileelif isinstance(data, list) and isinstance(self, DictFile):self.__class__ = ListFile", "docstring": "Make sure that the class behaves like the data structure that it\n is, so that we don't get a ListFile trying to represent a dict.", "id": "f9441:c4:m6"} {"signature": "def set_data(self, data):", "body": "warnings.warn(\"\",DeprecationWarning)self.data = data", "docstring": "Equivalent to setting the \"data\" attribute. Exists for backwards\n compatibility.", "id": "f9441:c4:m7"} {"signature": "def remove(self):", "body": "os.remove(self.path)", "docstring": "Delete the file from the disk completely.", "id": "f9441:c4:m8"} {"signature": "@propertydef file_contents(self):", "body": "with open(self.path, \"\") as f:return f.read()", "docstring": "Get the raw file contents of the file.", "id": "f9441:c4:m9"} {"signature": "@propertydef is_caching(self):", "body": "return hasattr(self, \"\")", "docstring": "Returns a boolean value describing whether a grouped write is\n underway.", "id": "f9441:c4:m10"} {"signature": "def clear(self):", "body": "self.data = []", "docstring": "L.clear() -> None -- remove all items from L.", "id": "f9441:c6:m1"} {"signature": "@staticmethoddef with_data(path, data):", "body": "if isinstance(data, str):data = json.loads(data)if os.path.exists(path):raise ValueError(\"\"\"\"\"\"\"\")else:f = File(path)f.data = datareturn f", "docstring": "Initialize a new file that starts out with some data. Pass data\n as a list, dict, or JSON string.", "id": "f9441:c7:m1"} {"signature": "def get_fuel_prices(self) -> GetFuelPricesResponse:", "body": "response = requests.get(''.format(API_URL_BASE),headers=self._get_headers(),timeout=self._timeout,)if not response.ok:raise FuelCheckError.create(response)return GetFuelPricesResponse.deserialize(response.json())", "docstring": "Fetches fuel prices for all stations.", "id": "f9446:c0:m3"} {"signature": "def get_fuel_prices_for_station(self,station: int) -> List[Price]:", "body": "response = requests.get(''.format(API_URL_BASE, station),headers=self._get_headers(),timeout=self._timeout,)if not response.ok:raise FuelCheckError.create(response)data = response.json()return [Price.deserialize(data) for data in data['']]", "docstring": "Gets the fuel prices for a specific fuel station.", "id": "f9446:c0:m4"} {"signature": "def get_fuel_prices_within_radius(self, latitude: float, longitude: float, radius: int,fuel_type: str, brands: Optional[List[str]] = None) -> List[StationPrice]:", "body": "if brands is None:brands = []response = requests.post(''.format(API_URL_BASE),json={'': fuel_type,'': latitude,'': longitude,'': radius,'': brands,},headers=self._get_headers(),timeout=self._timeout,)if not response.ok:raise FuelCheckError.create(response)data = response.json()stations = {station['']: Station.deserialize(station)for station in data['']}station_prices = [] for serialized_price in data['']:price = Price.deserialize(serialized_price)station_prices.append(StationPrice(price=price,station=stations[price.station_code]))return station_prices", "docstring": "Gets all the fuel prices within the specified radius.", "id": "f9446:c0:m5"} {"signature": "def get_fuel_price_trends(self, latitude: float, longitude: float,fuel_types: List[str]) -> PriceTrends:", "body": "response = requests.post(''.format(API_URL_BASE),json={'': {'': latitude,'': longitude,},'': [{'': type} for type in fuel_types],},headers=self._get_headers(),timeout=self._timeout,)if not response.ok:raise FuelCheckError.create(response)data = response.json()return PriceTrends(variances=[Variance.deserialize(variance)for variance in data['']],average_prices=[AveragePrice.deserialize(avg_price)for avg_price in data['']])", "docstring": "Gets the fuel price trends for the given location and fuel types.", "id": "f9446:c0:m6"} {"signature": "def get_reference_data(self,modified_since: Optional[datetime.datetime] = None) -> GetReferenceDataResponse:", "body": "if modified_since is None:modified_since = datetime.datetime(year=, month=, day=)response = requests.get(''.format(API_URL_BASE),headers={'': self._format_dt(modified_since),**self._get_headers(),},timeout=self._timeout,)if not response.ok:raise FuelCheckError.create(response)return GetReferenceDataResponse.deserialize(response.json())", "docstring": "Fetches API reference data.\n\n:param modified_since: The response will be empty if no\nchanges have been made to the reference data since this\ntimestamp, otherwise all reference data will be returned.", "id": "f9446:c0:m7"} {"signature": "def data(provider_method, first_param_name_suffix=False):", "body": "def test_func_decorator(test_func):test_func._provider_method = provider_methodtest_func._provider_name_suffix = first_param_name_suffixreturn test_funcreturn test_func_decorator", "docstring": "A method decorator for unittest.TestCase classes that configured a\nstatic method to be used to provide multiple sets of test data to a single\ntest\n\n:param provider_method:\n The name of the staticmethod of the class to use as the data provider\n\n:param first_param_name_suffix:\n If the first parameter for each set should be appended to the method\n name to generate the name of the test. Otherwise integers are used.\n\n:return:\n The decorated function", "id": "f9453:m0"} {"signature": "def data_decorator(cls):", "body": "def generate_test_func(name, original_function, num, params):if original_function._provider_name_suffix:data_name = params[]params = params[:]else:data_name = numexpanded_name = '' % (name, data_name)def generated_test_function(self):original_function(self, *params)setattr(cls, expanded_name, generated_test_function)for name in dir(cls):func = getattr(cls, name)if hasattr(func, ''):num = for params in getattr(cls, func._provider_method)():generate_test_func(name, func, num, params)num += return cls", "docstring": "A class decorator that works with the @provider decorator to generate test\nmethod from a data provider", "id": "f9453:m1"} {"signature": "def close(self):", "body": "if not self.socket:returnself.socket.close()self.socket = None", "docstring": "Closes any open connection", "id": "f9454:c2:m1"} {"signature": "def download(self, url, timeout):", "body": "self.setup_connection(url, timeout)tries = while tries < :tries += try:self.ensure_connected()req_headers = OrderedDict()req_headers[''] = self.url_info[]if self.url_info[] != :req_headers[''] += '' % self.url_info[]req_headers[''] = '' if self.keep_alive else ''req_headers[\"\"] = '' % version.__version__request = ''url_info = urlparse(url)path = '' if not url_info.path else url_info.pathif url_info.query:path += '' + url_info.queryrequest += path + ''self.write_request(request, req_headers)response = self.read_headers()if not response:self.close()continuev, code, message, resp_headers = responsedata = self.read_body(code, resp_headers, timeout)if code == :location = resp_headers.get('')if not isinstance(location, str_cls):raise HttpsClientError('')if not re.match(r'', location):if not location.startswith(''):location = os.path.dirname(url_info.path) + locationlocation = url_info.scheme + '' + url_info.netloc + locationreturn self.download(location, timeout)if code != :raise HttpsClientError('' % (code, url))else:return dataexcept (oscrypto_errors.TLSGracefulDisconnectError):self.close()continue", "docstring": "Downloads a URL and returns the contents\n\n:param url:\n The URL to download\n\n:param timeout:\n The int number of seconds to set the timeout to\n\n:return:\n The string contents of the URL", "id": "f9454:c2:m2"} {"signature": "def setup_connection(self, url, timeout):", "body": "url_info = urlparse(url)if url_info.scheme == '':raise HttpsClientException('')hostname = url_info.hostnameport = url_info.portif not port:port = if self.socket and self.url_info != (hostname, port):self.close()self.timeout = timeoutself.url_info = (hostname, port)return self.ensure_connected()", "docstring": ":param url:\n The URL to download\n\n:param timeout:\n The int number of seconds to set the timeout to\n\n:return:\n A boolean indicating if the connection was reused", "id": "f9454:c2:m3"} {"signature": "def ensure_connected(self):", "body": "if self.socket:return Truehost, port = self.url_infosession = tls.TLSSession()self.socket = tls.TLSSocket(host, port, timeout=self.timeout, session=session)return False", "docstring": "Make sure a valid tls.TLSSocket() is open to the server\n\n:return:\n A boolean indicating if the connection was reused", "id": "f9454:c2:m4"} {"signature": "def write_request(self, request, headers):", "body": "lines = [request]for header, value in headers.items():lines.append('' % (header, value))lines.extend(['', ''])request = ''.join(lines).encode('')self.socket.write(request)", "docstring": ":param request:\n A unicode string of the first line of the HTTP request\n\n:param headers:\n An OrderedDict of the request headers", "id": "f9454:c2:m5"} {"signature": "def read_headers(self):", "body": "version = Nonecode = Nonetext = Noneheaders = OrderedDict()data = self.socket.read_until(b'')string = data.decode('')first = Falsefor line in string.split(''):line = line.strip()if first is False:if line == '':continuematch = re.match(r'', line)if not match:return Noneversion = tuple(map(int, match.group().split('')))code = int(match.group())text = match.group()first = Trueelse:if not len(line):continueparts = line.split('', )if len(parts) == :name = parts[].strip().lower()value = parts[].strip()if name in headers:if isinstance(headers[name], tuple):headers[name] = headers[name] + (value,)else:headers[name] = (headers[name], value)else:headers[name] = valuereturn (version, code, text, headers)", "docstring": "Reads the HTTP response headers from the socket\n\n:return:\n On error, None, otherwise a 4-element tuple:\n 0: A 2-element tuple of integers representing the HTTP version\n 1: An integer representing the HTTP response code\n 2: A unicode string of the HTTP response code name\n 3: An OrderedDict of HTTP headers with lowercase unicode key and unicode values", "id": "f9454:c2:m6"} {"signature": "def parse_content_length(self, headers):", "body": "content_length = headers.get('')if isinstance(content_length, str_cls) and len(content_length) > :content_length = int(content_length)return content_length", "docstring": "Returns the content-length from a dict of headers\n\n:return:\n An integer of the content length", "id": "f9454:c2:m7"} {"signature": "def _assert_regexp_matches(self, text, expected_regexp, msg=None):", "body": "if isinstance(expected_regexp, str_cls):expected_regexp = re.compile(expected_regexp)if not expected_regexp.search(text):msg = msg or \"\"msg = '' % (msg, expected_regexp.pattern, text)self.fail(msg)", "docstring": "Fail the test unless the text matches the regular expression.", "id": "f9455:m8"} {"signature": "def local_oscrypto():", "body": "global _local_moduleif _local_module:return _local_modulemodule_name = ''src_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '')module_info = imp.find_module(module_name, [src_dir])_local_module = imp.load_module(module_name, *module_info)if os.environ.get(''):paths = os.environ.get('').split('')if len(paths) != :raise ValueError('')_local_module.use_openssl(*paths)elif os.environ.get(''):_local_module.use_winlegacy()return _local_module", "docstring": "Make sure oscrypto is initialized and the backend is selected via env vars\n\n:return:\n The oscrypto module", "id": "f9459:m0"} {"signature": "def make_suite():", "body": "loader = unittest.TestLoader()suite = unittest.TestSuite()for test_class in test_classes():tests = loader.loadTestsFromTestCase(test_class)suite.addTests(tests)return suite", "docstring": "Constructs a unittest.TestSuite() of all tests for the package. For use\nwith setuptools.\n\n:return:\n A unittest.TestSuite() object", "id": "f9459:m1"} {"signature": "def run():", "body": "setup_file = os.path.join(package_root, '')git_wc_proc = subprocess.Popen(['', '', '', ''],stdout=subprocess.PIPE,stderr=subprocess.STDOUT,cwd=package_root)git_wc_status, _ = git_wc_proc.communicate()if len(git_wc_status) > :print(git_wc_status.decode('').rstrip(), file=sys.stderr)print('', file=sys.stderr)return Falsegit_tag_proc = subprocess.Popen(['', '', '', '', ''],stdout=subprocess.PIPE,stderr=subprocess.PIPE,cwd=package_root)tag, tag_error = git_tag_proc.communicate()if len(tag_error) > :print(tag_error.decode('').rstrip(), file=sys.stderr)print('', file=sys.stderr)return Falseif len(tag) == :print('', file=sys.stderr)return Falsetag = tag.decode('').strip()setuptools.sandbox.run_setup(setup_file,['', '', ''])twine.cli.dispatch(['', '' % (package_name, tag)])setuptools.sandbox.run_setup(setup_file,[''])", "docstring": "Creates a sdist .tar.gz and a bdist_wheel --univeral .whl and uploads\nthem to pypi\n\n:return:\n A bool - if the packaging and upload process was successful", "id": "f9464:m0"} {"signature": "def _get_func_info(docstring, def_lineno, code_lines, prefix):", "body": "def_index = def_lineno - definition = code_lines[def_index]definition = definition.rstrip()while not definition.endswith(''):def_index += definition += '' + code_lines[def_index].rstrip()definition = textwrap.dedent(definition).rstrip('')definition = definition.replace('', '' + prefix)description = ''found_colon = Falseparams = ''for line in docstring.splitlines():if line and line[] == '':found_colon = Trueif not found_colon:if description:description += ''description += lineelse:if params:params += ''params += linedescription = description.strip()description_md = ''if description:description_md = \"\" % (prefix, description.replace('', '' + prefix))description_md = re.sub('', '', description_md)params = params.strip()if params:definition += ('' % (prefix, prefix))definition += params.replace('', '' % prefix)definition += ('' % prefix)definition = re.sub('', '', definition)for search, replace in definition_replacements.items():definition = definition.replace(search, replace)return (definition, description_md)", "docstring": "Extracts the function signature and description of a Python function\n\n:param docstring:\n A unicode string of the docstring for the function\n\n:param def_lineno:\n An integer line number that function was defined on\n\n:param code_lines:\n A list of unicode string lines from the source file the function was\n defined in\n\n:param prefix:\n A prefix to prepend to all output lines\n\n:return:\n A 2-element tuple:\n\n - [0] A unicode string of the function signature with a docstring of\n parameter info\n - [1] A markdown snippet of the function description", "id": "f9465:m0"} {"signature": "def _find_sections(md_ast, sections, last, last_class, total_lines=None):", "body": "def child_walker(node):for child, entering in node.walker():if child == node:continueyield child, enteringfor child, entering in child_walker(md_ast):if child.t == '':start_line = child.sourcepos[][]if child.level == :if last:sections[(last[''], last[''])] = (last[''], start_line - )last.clear()if child.level in set([, ]):heading_elements = []for heading_child, _ in child_walker(child):heading_elements.append(heading_child)if len(heading_elements) != :continuefirst = heading_elements[]second = heading_elements[]if first.t != '':continueif second.t != '':continuetype_name = second.literal.strip()identifier = first.literal.strip().replace('', '').lstrip('')if last:sections[(last[''], last[''])] = (last[''], start_line - )last.clear()if type_name == '':if child.level != :continueif type_name == '':if child.level != :continuelast_class.append(identifier)if type_name in set(['', '']):if child.level != :continueidentifier = last_class[-] + '' + identifierlast.update({'': type_name,'': identifier,'': start_line,})elif child.t == '':find_sections(child, sections, last, last_class)if last:sections[(last[''], last[''])] = (last[''], total_lines)", "docstring": "Walks through a CommonMark AST to find section headers that delineate\ncontent that should be updated by this script\n\n:param md_ast:\n The AST of the markdown document\n\n:param sections:\n A dict to store the start and end lines of a section. The key will be\n a two-element tuple of the section type (\"class\", \"function\",\n \"method\" or \"attribute\") and identifier. The values are a two-element\n tuple of the start and end line number in the markdown document of the\n section.\n\n:param last:\n A dict containing information about the last section header seen.\n Includes the keys \"type_name\", \"identifier\", \"start_line\".\n\n:param last_class:\n A unicode string of the name of the last class found - used when\n processing methods and attributes.\n\n:param total_lines:\n An integer of the total number of lines in the markdown document -\n used to work around a bug in the API of the Python port of CommonMark", "id": "f9465:m1"} {"signature": "def walk_ast(node, code_lines, sections, md_chunks):", "body": "if isinstance(node, _ast.FunctionDef):key = ('', node.name)if key not in sections:returndocstring = ast.get_docstring(node)def_lineno = node.lineno + len(node.decorator_list)definition, description_md = _get_func_info(docstring, def_lineno, code_lines, '')md_chunk = textwrap.dedent(\"\"\"\"\"\").strip() % (node.name,definition,description_md) + \"\"md_chunks[key] = md_chunk.replace('', '')elif isinstance(node, _ast.ClassDef):if ('', node.name) not in sections:returnfor subnode in node.body:if isinstance(subnode, _ast.FunctionDef):node_id = node.name + '' + subnode.namemethod_key = ('', node_id)is_method = method_key in sectionsattribute_key = ('', node_id)is_attribute = attribute_key in sectionsis_constructor = subnode.name == ''if not is_constructor and not is_attribute and not is_method:continuedocstring = ast.get_docstring(subnode)def_lineno = subnode.lineno + len(subnode.decorator_list)if not docstring:continueif is_method or is_constructor:definition, description_md = _get_func_info(docstring, def_lineno, code_lines, '')if is_constructor:key = ('', node.name)class_docstring = ast.get_docstring(node) or ''class_description = textwrap.dedent(class_docstring).strip()if class_description:class_description_md = \"\" % (class_description.replace(\"\", \"\"))else:class_description_md = ''md_chunk = textwrap.dedent(\"\"\"\"\"\").strip() % (node.name,class_description_md,definition,description_md)md_chunk = md_chunk.replace('', '')else:key = method_keymd_chunk = textwrap.dedent(\"\"\"\"\"\").strip() % (subnode.name,definition,description_md)if md_chunk[-:] == '':md_chunk = md_chunk[:-]else:key = attribute_keydescription = textwrap.dedent(docstring).strip()description_md = \"\" % (description.replace(\"\", \"\"))md_chunk = textwrap.dedent(\"\"\"\"\"\").strip() % (subnode.name,description_md)md_chunks[key] = re.sub('', '', md_chunk.rstrip())elif isinstance(node, _ast.If):for subast in node.body:walk_ast(subast, code_lines, sections, md_chunks)for subast in node.orelse:walk_ast(subast, code_lines, sections, md_chunks)", "docstring": "A callback used to walk the Python AST looking for classes, functions,\nmethods and attributes. Generates chunks of markdown markup to replace\nthe existing content.\n\n:param node:\n An _ast module node object\n\n:param code_lines:\n A list of unicode strings - the source lines of the Python file\n\n:param sections:\n A dict of markdown document sections that need to be updated. The key\n will be a two-element tuple of the section type (\"class\", \"function\",\n \"method\" or \"attribute\") and identifier. The values are a two-element\n tuple of the start and end line number in the markdown document of the\n section.\n\n:param md_chunks:\n A dict with keys from the sections param and the values being a unicode\n string containing a chunk of markdown markup.", "id": "f9465:m2"} {"signature": "def run():", "body": "print('')md_files = []for root, _, filenames in os.walk(os.path.join(package_root, '')):for filename in filenames:if not filename.endswith(''):continuemd_files.append(os.path.join(root, filename))parser = CommonMark.Parser()for md_file in md_files:md_file_relative = md_file[len(package_root) + :]if md_file_relative in md_source_map:py_files = md_source_map[md_file_relative]py_paths = [os.path.join(package_root, py_file) for py_file in py_files]else:py_files = [os.path.basename(md_file).replace('', '')]py_paths = [os.path.join(package_root, package_name, py_files[])]if not os.path.exists(py_paths[]):continuewith open(md_file, '') as f:markdown = f.read().decode('')original_markdown = markdownmd_lines = list(markdown.splitlines())md_ast = parser.parse(markdown)last_class = []last = {}sections = OrderedDict()find_sections(md_ast, sections, last, last_class, markdown.count(\"\") + )md_chunks = {}for index, py_file in enumerate(py_files):py_path = py_paths[index]with open(os.path.join(py_path), '') as f:code = f.read().decode('')module_ast = ast.parse(code, filename=py_file)code_lines = list(code.splitlines())for node in ast.iter_child_nodes(module_ast):walk_ast(node, code_lines, sections, md_chunks)added_lines = def _replace_md(key, sections, md_chunk, md_lines, added_lines):start, end = sections[key]start -= start += added_linesend += added_linesnew_lines = md_chunk.split('')added_lines += len(new_lines) - (end - start)if start > and md_lines[start][:] == '' and md_lines[start - ][:] == '>':added_lines += new_lines.insert(, '')md_lines[start:end] = new_linesreturn added_linesfor key in sections:if key not in md_chunks:raise ValueError('' % key[])added_lines = _replace_md(key, sections, md_chunks[key], md_lines, added_lines)markdown = ''.join(md_lines).strip() + ''if original_markdown != markdown:with open(md_file, '') as f:f.write(markdown.encode(''))", "docstring": "Looks through the docs/ dir and parses each markdown document, looking for\nsections to update from Python docstrings. Looks for section headers in\nthe format:\n\n - ### `ClassName()` class\n - ##### `.method_name()` method\n - ##### `.attribute_name` attribute\n - ### `function_name()` function\n\nThe markdown content following these section headers up until the next\nsection header will be replaced by new markdown generated from the Python\ndocstrings of the associated source files.\n\nBy default maps docs/{name}.md to {modulename}/{name}.py. Allows for\ncustom mapping via the md_source_map variable.", "id": "f9465:m3"} {"signature": "def _pep425_implementation():", "body": "return '' if hasattr(sys, '') else ''", "docstring": ":return:\n A 2 character unicode string of the implementation - 'cp' for cpython\n or 'pp' for PyPy", "id": "f9466:m0"} {"signature": "def _pep425_version():", "body": "if hasattr(sys, ''):return (sys.version_info[], sys.pypy_version_info.major,sys.pypy_version_info.minor)else:return (sys.version_info[], sys.version_info[])", "docstring": ":return:\n A tuple of integers representing the Python version number", "id": "f9466:m1"} {"signature": "def _pep425_supports_manylinux():", "body": "try:import _manylinuxreturn bool(_manylinux.manylinux1_compatible)except (ImportError, AttributeError):passtry:proc = ctypes.CDLL(None)gnu_get_libc_version = proc.gnu_get_libc_versiongnu_get_libc_version.restype = ctypes.c_char_pver = gnu_get_libc_version()if not isinstance(ver, str_cls):ver = ver.decode('')match = re.match(r'', ver)return match and match.group() == '' and int(match.group()) >= except (AttributeError):return False", "docstring": ":return:\n A boolean indicating if the machine can use manylinux1 packages", "id": "f9466:m2"} {"signature": "def _pep425_get_abi():", "body": "try:soabi = sysconfig.get_config_var('')if soabi:if soabi.startswith(''):return '' % soabi.split('')[]return soabi.replace('', '').replace('', '')except (IOError, NameError):passimpl = _pep425_implementation()suffix = ''if impl == '':suffix += ''if sys.maxunicode == and sys.version_info < (, ):suffix += ''return '' % (impl, ''.join(map(str_cls, _pep425_version())), suffix)", "docstring": ":return:\n A unicode string of the system abi. Will be something like: \"cp27m\",\n \"cp33m\", etc.", "id": "f9466:m3"} {"signature": "def _pep425tags():", "body": "tags = []versions = []version_info = _pep425_version()major = version_info[:-]for minor in range(version_info[-], -, -):versions.append(''.join(map(str, major + (minor,))))impl = _pep425_implementation()abis = []abi = _pep425_get_abi()if abi:abis.append(abi)abi3 = _pep425_implementation() == '' and sys.version_info >= (,)if abi3:abis.append('')abis.append('')if sys.platform == '':plat_ver = platform.mac_ver()ver_parts = plat_ver[].split('')minor = int(ver_parts[])arch = plat_ver[]if sys.maxsize == :arch = ''arches = []while minor > :arches.append('' % (minor, arch))arches.append('' % (minor,))arches.append('' % (minor,))minor -= else:if sys.platform == '':if '' in sys.version.lower():arches = ['']arches = [sys.platform]elif hasattr(os, ''):(plat, _, _, _, machine) = os.uname()plat = plat.lower().replace('', '')machine.replace('', '').replace('', '')if plat == '' and sys.maxsize == :machine = ''arch = '' % (plat, machine)if _pep425_supports_manylinux():arches = [arch.replace('', ''), arch]else:arches = [arch]for abi in abis:for arch in arches:tags.append(('' % (impl, versions[]), abi, arch))if abi3:for version in versions[:]:for arch in arches:tags.append(('' % (impl, version), '', arch))for arch in arches:tags.append(('' % (versions[][]), '', arch))tags.append(('' % (impl, versions[]), '', ''))tags.append(('' % (impl, versions[][]), '', ''))for i, version in enumerate(versions):tags.append(('' % (version,), '', ''))if i == :tags.append(('' % (version[]), '', ''))tags.append(('', '', ''))return tags", "docstring": ":return:\n A list of 3-element tuples with unicode strings or None:\n [0] implementation tag - cp33, pp27, cp26, py2, py2.py3\n [1] abi tag - cp26m, None\n [2] arch tag - linux_x86_64, macosx_10_10_x85_64, etc", "id": "f9466:m4"} {"signature": "def run():", "body": "print('' % flake8.__version__)flake8_style = get_style_guide(config_file=os.path.join(package_root, ''))paths = []for _dir in [package_name, '', '']:for root, _, filenames in os.walk(_dir):for filename in filenames:if not filename.endswith(''):continuepaths.append(os.path.join(root, filename))report = flake8_style.check_files(paths)success = report.total_errors == if success:print('')return success", "docstring": "Runs flake8 lint\n\n:return:\n A bool - if flake8 did not find any errors", "id": "f9467:m0"} {"signature": "def run(ci=False):", "body": "xml_report_path = os.path.join(package_root, '')if os.path.exists(xml_report_path):os.unlink(xml_report_path)cov = coverage.Coverage(include='' % package_name)cov.start()from .tests import run as run_testsresult = run_tests()print()if ci:suite = unittest.TestSuite()loader = unittest.TestLoader()for other_package in other_packages:for test_class in _load_package_tests(other_package):suite.addTest(loader.loadTestsFromTestCase(test_class))if suite.countTestCases() > :print('')sys.stdout.flush()runner_result = unittest.TextTestRunner(stream=sys.stdout, verbosity=).run(suite)result = runner_result.wasSuccessful() and resultprint()sys.stdout.flush()cov.stop()cov.save()cov.report(show_missing=False)print()sys.stdout.flush()if ci:cov.xml_report()if ci and result and os.path.exists(xml_report_path):_codecov_submit()print()return result", "docstring": "Runs the tests while measuring coverage\n\n:param ci:\n If coverage is being run in a CI environment - this triggers trying to\n run the tests for the rest of modularcrypto and uploading coverage data\n\n:return:\n A bool - if the tests ran successfully", "id": "f9468:m0"} {"signature": "def _git_command(params, cwd):", "body": "proc = subprocess.Popen([''] + params,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,cwd=cwd)stdout, stderr = proc.communicate()code = proc.wait()if code != :e = OSError('')e.stdout = stdoutraise ereturn stdout.decode('').strip()", "docstring": "Executes a git command, returning the output\n\n:param params:\n A list of the parameters to pass to git\n\n:param cwd:\n The working directory to execute git in\n\n:return:\n A 2-element tuple of (stdout, stderr)", "id": "f9468:m3"} {"signature": "def _parse_env_var_file(data):", "body": "output = {}for line in data.splitlines():line = line.strip()if not line or '' not in line:continueparts = line.split('')if len(parts) != :continuename = parts[]value = parts[]if len(value) > :if value[] == '' and value[-] == '':value = value[:-]output[name] = valuereturn output", "docstring": "Parses a basic VAR=\"value data\" file contents into a dict\n\n:param data:\n A unicode string of the file data\n\n:return:\n A dict of parsed name/value data", "id": "f9468:m4"} {"signature": "def _platform_name():", "body": "if sys.platform == '':version = _plat.mac_ver()[]_plat_ver_info = tuple(map(int, version.split('')))if _plat_ver_info < (, ):name = ''else:name = ''return '' % (name, version)elif sys.platform == '':_win_ver = sys.getwindowsversion()_plat_ver_info = (_win_ver[], _win_ver[])return '' % _plat.win32_ver()[]elif sys.platform in ['', '']:if os.path.exists(''):with open('', '', encoding='') as f:pairs = _parse_env_var_file(f.read())if '' in pairs and '' in pairs:return '' % (pairs[''], pairs[''])version = pairs['']elif '' in pairs:return pairs['']elif '' in pairs:return pairs['']else:raise ValueError('')elif os.path.exists(''):with open('', '', encoding='') as f:pairs = _parse_env_var_file(f.read())if '' in pairs:return pairs['']else:raise ValueError('')else:return ''else:return '' % (_plat.system(), _plat.release())", "docstring": "Returns information about the current operating system and version\n\n:return:\n A unicode string containing the OS name and version", "id": "f9468:m5"} {"signature": "def _list_files(root):", "body": "dir_patterns, file_patterns = _gitignore(root)paths = []prefix = os.path.abspath(root) + os.sepfor base, dirs, files in os.walk(root):for d in dirs:for dir_pattern in dir_patterns:if fnmatch(d, dir_pattern):dirs.remove(d)breakfor f in files:skip = Falsefor file_pattern in file_patterns:if fnmatch(f, file_pattern):skip = Truebreakif skip:continuefull_path = os.path.join(base, f)if full_path[:len(prefix)] == prefix:full_path = full_path[len(prefix):]paths.append(full_path)return sorted(paths)", "docstring": "Lists all of the files in a directory, taking into account any .gitignore\nfile that is present\n\n:param root:\n A unicode filesystem path\n\n:return:\n A list of unicode strings, containing paths of all files not ignored\n by .gitignore with root, using relative paths", "id": "f9468:m6"} {"signature": "def _gitignore(root):", "body": "gitignore_path = os.path.join(root, '')dir_patterns = ['']file_patterns = []if not os.path.exists(gitignore_path):return (dir_patterns, file_patterns)with open(gitignore_path, '', encoding='') as f:for line in f.readlines():line = line.strip()if not line:continueif line.startswith(''):continueif '' in line:raise NotImplementedError('')if line.startswith(''):raise NotImplementedError('')if line.startswith(''):raise NotImplementedError('')if line.startswith(''):line = '' + line[:]if line.startswith(''):line = '' + line[:]if line.endswith(''):dir_patterns.append(line[:-])else:file_patterns.append(line)return (dir_patterns, file_patterns)", "docstring": "Parses a .gitignore file and returns patterns to match dirs and files.\nOnly basic gitignore patterns are supported. Pattern negation, ** wildcards\nand anchored patterns are not currently implemented.\n\n:param root:\n A unicode string of the path to the git repository\n\n:return:\n A 2-element tuple:\n - 0: a list of unicode strings to match against dirs\n - 1: a list of unicode strings to match against dirs and files", "id": "f9468:m7"} {"signature": "def _do_request(method, url, headers, data=None, query_params=None, timeout=):", "body": "if query_params:url += '' + urlencode(query_params).replace('', '')if isinstance(data, dict):data_bytes = {}for key in data:data_bytes[key.encode('')] = data[key].encode('')data = urlencode(data_bytes)headers[''] = ''if isinstance(data, str_cls):raise TypeError('')try:tempfd, tempf_path = tempfile.mkstemp('')os.write(tempfd, data or b'')os.close(tempfd)if sys.platform == '':powershell_exe = os.path.join('')code = \"\"code += \"\"for key in headers:code += \"\" % (key, headers[key])code += \"\" % (url, method, tempf_path)code += \"\"code += \"\"stdout, stderr = _execute([powershell_exe, '', code], os.getcwd())if stdout[-:] == b'' and b'' in stdout:stdout = stdout[:-]parts = stdout.split(b'', )if len(parts) == :stdout = parts[] + b'' + codecs.decode(parts[].replace(b'', b''), '')else:args = ['','',method,'','','','','', '']for key in headers:args.append('')args.append(\"\" % (key, headers[key]))args.append('')args.append('' % tempf_path)args.append(url)stdout, stderr = _execute(args, os.getcwd())finally:if tempf_path and os.path.exists(tempf_path):os.remove(tempf_path)if len(stderr) > :raise URLError(\"\" % (method, url, stderr))parts = stdout.split(b'', )if len(parts) != :raise URLError(\"\" % (method, url, stdout))header_block, body = partscontent_type_header = Nonecontent_len_header = Nonefor hline in header_block.decode('').splitlines():hline_parts = hline.split('', )if len(hline_parts) != :continuename, val = hline_partsname = name.strip().lower()val = val.strip()if name == '':content_type_header = valif name == '':content_len_header = valif content_type_header is None and content_len_header != '':raise URLError(\"\" % (method, url, stdout))if content_type_header is None:content_type = ''encoding = ''else:content_type, params = cgi.parse_header(content_type_header)encoding = params.get('')return (content_type, encoding, body)", "docstring": "Performs an HTTP request\n\n:param method:\n A unicode string of 'POST' or 'PUT'\n\n:param url;\n A unicode string of the URL to request\n\n:param headers:\n A dict of unicode strings, where keys are header names and values are\n the header values.\n\n:param data:\n A dict of unicode strings (to be encoded as\n application/x-www-form-urlencoded), or a byte string of data.\n\n:param query_params:\n A dict of unicode keys and values to pass as query params\n\n:param timeout:\n An integer number of seconds to use as the timeout\n\n:return:\n A 3-element tuple:\n - 0: A unicode string of the response content-type\n - 1: A unicode string of the response encoding, or None\n - 2: A byte string of the response body", "id": "f9468:m8"} {"signature": "def _execute(params, cwd):", "body": "proc = subprocess.Popen(params,stdout=subprocess.PIPE,stderr=subprocess.PIPE,cwd=cwd)stdout, stderr = proc.communicate()code = proc.wait()if code != :e = OSError('' % (params, code, stderr))e.stdout = stdoute.stderr = stderrraise ereturn (stdout, stderr)", "docstring": "Executes a subprocess\n\n:param params:\n A list of the executable and arguments to pass to it\n\n:param cwd:\n The working directory to execute the command in\n\n:return:\n A 2-element tuple of (stdout, stderr)", "id": "f9468:m9"} {"signature": "def run():", "body": "print('' + sys.version.replace('', ''))try:oscrypto_tests_module_info = imp.find_module('', [os.path.join(build_root, '')])oscrypto_tests = imp.load_module('', *oscrypto_tests_module_info)oscrypto = oscrypto_tests.local_oscrypto()print('' % oscrypto.backend())except (ImportError):passif run_lint:print('')lint_result = run_lint()else:lint_result = Trueif run_coverage:print('')sys.stdout.flush()tests_result = run_coverage(ci=True)else:print('')sys.stdout.flush()tests_result = run_tests()sys.stdout.flush()return lint_result and tests_result", "docstring": "Runs the linter and tests\n\n:return:\n A bool - if the linter and tests ran successfully", "id": "f9470:m0"} {"signature": "def run():", "body": "deps_dir = os.path.join(build_root, '')if os.path.exists(deps_dir):shutil.rmtree(deps_dir, ignore_errors=True)os.mkdir(deps_dir)try:print(\"\")_stage_requirements(deps_dir, os.path.join(package_root, '', ''))print(\"\")for other_package in other_packages:pkg_url = '' % other_packagepkg_dir = os.path.join(build_root, other_package)if os.path.exists(pkg_dir):print(\"\" % other_package)continueprint(\"\" % pkg_url)_execute(['', '', pkg_url], build_root)print()except (Exception):if os.path.exists(deps_dir):shutil.rmtree(deps_dir, ignore_errors=True)raisereturn True", "docstring": "Installs required development dependencies. Uses git to checkout other\nmodularcrypto repos for more accurate coverage data.", "id": "f9471:m0"} {"signature": "def _download(url, dest):", "body": "print('' % url)filename = os.path.basename(url)dest_path = os.path.join(dest, filename)if sys.platform == '':powershell_exe = os.path.join('')code = \"\"code += \"\" % (url, dest_path)_execute([powershell_exe, '', code], dest)else:_execute(['', '', '', '', '', url], dest)return dest_path", "docstring": "Downloads a URL to a directory\n\n:param url:\n The URL to download\n\n:param dest:\n The path to the directory to save the file in\n\n:return:\n The filesystem path to the saved file", "id": "f9471:m1"} {"signature": "def _tuple_from_ver(version_string):", "body": "return tuple(map(int, version_string.split('')))", "docstring": ":param version_string:\n A unicode dotted version string\n\n:return:\n A tuple of integers", "id": "f9471:m2"} {"signature": "def _open_archive(path):", "body": "if path.endswith(''):return zipfile.ZipFile(path, '')return tarfile.open(path, '')", "docstring": ":param path:\n A unicode string of the filesystem path to the archive\n\n:return:\n An archive object", "id": "f9471:m3"} {"signature": "def _list_archive_members(archive):", "body": "if isinstance(archive, zipfile.ZipFile):return archive.infolist()return archive.getmembers()", "docstring": ":param archive:\n An archive from _open_archive()\n\n:return:\n A list of info objects to be used with _info_name() and _extract_info()", "id": "f9471:m4"} {"signature": "def _archive_single_dir(archive):", "body": "common_root = Nonefor info in _list_archive_members(archive):fn = _info_name(info)if fn in set(['', '']):continuesep = Noneif '' in fn:sep = ''elif '' in fn:sep = ''if sep is None:root_dir = fnelse:root_dir, _ = fn.split(sep, )if common_root is None:common_root = root_direlse:if common_root != root_dir:return Nonereturn common_root", "docstring": "Check if all members of the archive are in a single top-level directory\n\n:param archive:\n An archive from _open_archive()\n\n:return:\n None if not a single top level directory in archive, otherwise a\n unicode string of the top level directory name", "id": "f9471:m5"} {"signature": "def _info_name(info):", "body": "if isinstance(info, zipfile.ZipInfo):return info.filename.replace('', '')return info.name.replace('', '')", "docstring": "Returns a normalized file path for an archive info object\n\n:param info:\n An info object from _list_archive_members()\n\n:return:\n A unicode string with all directory separators normalized to \"/\"", "id": "f9471:m6"} {"signature": "def _extract_info(archive, info):", "body": "if isinstance(archive, zipfile.ZipFile):fn = info.filenameis_dir = fn.endswith('') or fn.endswith('')out = archive.read(info)if is_dir and out == b'':return Nonereturn outinfo_file = archive.extractfile(info)if info_file:return info_file.read()return None", "docstring": "Extracts the contents of an archive info object\n\n;param archive:\n An archive from _open_archive()\n\n:param info:\n An info object from _list_archive_members()\n\n:return:\n None, or a byte string of the file contents", "id": "f9471:m7"} {"signature": "def _extract_package(deps_dir, pkg_path):", "body": "if pkg_path.endswith(''):try:zf = Nonezf = zipfile.ZipFile(pkg_path, '')for zi in zf.infolist():if not zi.filename.startswith(''):continuedata = _extract_info(zf, zi)if data is not None:dst_path = os.path.join(deps_dir, zi.filename[:])dst_dir = os.path.dirname(dst_path)if not os.path.exists(dst_dir):os.makedirs(dst_dir)with open(dst_path, '') as f:f.write(data)finally:if zf:zf.close()returnif pkg_path.endswith(''):try:zf = Nonezf = zipfile.ZipFile(pkg_path, '')zf.extractall(deps_dir)finally:if zf:zf.close()returntry:ar = Nonear = _open_archive(pkg_path)pkg_name = Nonebase_path = _archive_single_dir(ar) or ''if len(base_path):if '' in base_path:pkg_name, _ = base_path.split('', )base_path += ''base_pkg_path = Noneif pkg_name is not None:base_pkg_path = base_path + pkg_name + ''src_path = base_path + ''members = []for info in _list_archive_members(ar):fn = _info_name(info)if base_pkg_path is not None and fn.startswith(base_pkg_path):dst_path = fn[len(base_pkg_path) - len(pkg_name) - :]members.append((info, dst_path))continueif fn.startswith(src_path):members.append((info, fn[len(src_path):]))continuefor info, path in members:info_data = _extract_info(ar, info)if info_data is not None:dst_path = os.path.join(deps_dir, path)dst_dir = os.path.dirname(dst_path)if not os.path.exists(dst_dir):os.makedirs(dst_dir)with open(dst_path, '') as f:f.write(info_data)finally:if ar:ar.close()", "docstring": "Extract a .whl, .zip, .tar.gz or .tar.bz2 into a package path to\nuse when running CI tasks\n\n:param deps_dir:\n A unicode string of the directory the package should be extracted to\n\n:param pkg_path:\n A unicode string of the path to the archive", "id": "f9471:m8"} {"signature": "def _stage_requirements(deps_dir, path):", "body": "valid_tags = _pep425tags()exe_suffix = Noneif sys.platform == '' and _pep425_implementation() == '':win_arch = '' if sys.maxsize == else ''version_info = sys.version_infoexe_suffix = '' % (win_arch, version_info[], version_info[])packages = _parse_requires(path)for p in packages:pkg = p['']if p[''] == '':if pkg.endswith('') or pkg.endswith('') or pkg.endswith('') or pkg.endswith(''):url = pkgelse:raise Exception('')else:pypi_json_url = '' % pkgjson_dest = _download(pypi_json_url, deps_dir)with open(json_dest, '') as f:pkg_info = json.loads(f.read().decode(''))if os.path.exists(json_dest):os.remove(json_dest)latest = pkg_info['']['']if p[''] == '':if _tuple_from_ver(p['']) > _tuple_from_ver(latest):raise Exception('' % (p[''], pkg, latest))version = latestelif p[''] == '':if p[''] not in pkg_info['']:raise Exception('' % (p[''], pkg))version = p['']else:version = latestwheels = {}whl = Nonetar_bz2 = Nonetar_gz = Noneexe = Nonefor download in pkg_info[''][version]:if exe_suffix and download[''].endswith(exe_suffix):exe = download['']if download[''].endswith(''):parts = os.path.basename(download['']).split('')tag_impl = parts[-]tag_abi = parts[-]tag_arch = parts[-].split('')[]wheels[(tag_impl, tag_abi, tag_arch)] = download['']if download[''].endswith(''):tar_bz2 = download['']if download[''].endswith(''):tar_gz = download['']for tag in valid_tags:if tag in wheels:whl = wheels[tag]breakif exe_suffix and exe:url = exeelif whl:url = whlelif tar_bz2:url = tar_bz2elif tar_gz:url = tar_gzelse:raise Exception('' % pkg)local_path = _download(url, deps_dir)_extract_package(deps_dir, local_path)os.remove(local_path)", "docstring": "Installs requirements without using Python to download, since\ndifferent services are limiting to TLS 1.2, and older version of\nPython do not support that\n\n:param deps_dir:\n A unicode path to a temporary diretory to use for downloads\n\n:param path:\n A unicoe filesystem path to a requirements file", "id": "f9471:m9"} {"signature": "def _parse_requires(path):", "body": "python_version = ''.join(map(str_cls, sys.version_info[:]))sys_platform = sys.platformpackages = []with open(path, '') as f:contents = f.read().decode('')for line in re.split(r'', contents):line = line.strip()if not len(line):continueif re.match(r'', line):continueif '' in line:package, cond = line.split('', )package = package.strip()cond = cond.strip()cond = cond.replace('', repr(sys_platform))cond = cond.replace('', repr(python_version))if not eval(cond):continueelse:package = line.strip()if re.match(r'', package):sub_req_file = re.sub(r'', '', package)sub_req_file = os.path.abspath(os.path.join(os.path.dirname(path), sub_req_file))packages.extend(_parse_requires(sub_req_file))continueif re.match(r'', package):packages.append({'': '', '': package})continueif '' in package:parts = package.split('')package = parts[].strip()ver = parts[].strip()packages.append({'': '', '': package, '': ver})continueif '' in package:parts = package.split('')package = parts[].strip()ver = parts[].strip()packages.append({'': '', '': package, '': ver})continueif re.search(r'', package):raise Exception('' % package)packages.append({'': '', '': package})return packages", "docstring": "Does basic parsing of pip requirements files, to allow for\nusing something other than Python to do actual TLS requests\n\n:param path:\n A path to a requirements file\n\n:return:\n A list of dict objects containing the keys:\n - 'type' ('any', 'url', '==', '>=')\n - 'pkg'\n - 'ver' (if 'type' == '==' or 'type' == '>=')", "id": "f9471:m10"} {"signature": "def _execute(params, cwd):", "body": "proc = subprocess.Popen(params,stdout=subprocess.PIPE,stderr=subprocess.PIPE,cwd=cwd)stdout, stderr = proc.communicate()code = proc.wait()if code != :e = OSError('' % (params, code, stderr))e.stdout = stdoute.stderr = stderrraise ereturn (stdout, stderr)", "docstring": "Executes a subprocess\n\n:param params:\n A list of the executable and arguments to pass to it\n\n:param cwd:\n The working directory to execute the command in\n\n:return:\n A 2-element tuple of (stdout, stderr)", "id": "f9471:m11"} {"signature": "def run(matcher=None, repeat=):", "body": "loader = unittest.TestLoader()test_list = []for test_class in test_classes():if matcher:names = loader.getTestCaseNames(test_class)for name in names:if re.search(matcher, name):test_list.append(test_class(name))else:test_list.append(loader.loadTestsFromTestCase(test_class))stream = sys.stdoutverbosity = if matcher and repeat == :verbosity = elif repeat > :stream = StringIO()for _ in range(, repeat):suite = unittest.TestSuite()for test in test_list:suite.addTest(test)result = unittest.TextTestRunner(stream=stream, verbosity=verbosity).run(suite)if len(result.errors) > or len(result.failures) > :if repeat > :print(stream.getvalue())return Falseif repeat > :stream.truncate()return True", "docstring": "Runs the tests\n\n:param matcher:\n A unicode string containing a regular expression to use to filter test\n names by. A value of None will cause no filtering.\n\n:param repeat:\n An integer - the number of times to run the tests\n\n:return:\n A bool - if the tests succeeded", "id": "f9472:m0"} {"signature": "def handle_error(result):", "body": "if result:return_, error_string = get_error()if not isinstance(error_string, str_cls):error_string = _try_decode(error_string)raise OSError(error_string)", "docstring": "Extracts the last Windows error message into a python unicode string\n\n:param result:\n A function result, 0 or None indicates failure\n\n:return:\n A unicode string error message", "id": "f9474:m0"} {"signature": "def _try_decode(byte_string):", "body": "try:return str_cls(byte_string, _encoding)except (UnicodeDecodeError):for encoding in _fallback_encodings:try:return str_cls(byte_string, encoding, errors='')except (UnicodeDecodeError):passreturn str_cls(byte_string, errors='')", "docstring": "Tries decoding a byte string from the OS into a unicode string\n\n:param byte_string:\n A byte string\n\n:return:\n A unicode string", "id": "f9476:m0"} {"signature": "def handle_error(error_num):", "body": "if error_num == :returnmessages = {BcryptConst.STATUS_NOT_FOUND: '',BcryptConst.STATUS_INVALID_PARAMETER: '',BcryptConst.STATUS_NO_MEMORY: (''),BcryptConst.STATUS_INVALID_HANDLE: '',BcryptConst.STATUS_INVALID_SIGNATURE: '',BcryptConst.STATUS_NOT_SUPPORTED: '',BcryptConst.STATUS_BUFFER_TOO_SMALL: '',BcryptConst.STATUS_INVALID_BUFFER_SIZE: '',}output = '' % error_numif error_num is not None and error_num in messages:output += '' + messages[error_num]raise OSError(output)", "docstring": "Extracts the last Windows error message into a python unicode string\n\n:param error_num:\n The number to get the error string for\n\n:return:\n A unicode string error message", "id": "f9478:m2"} {"signature": "def extract_from_system(cert_callback=None, callback_only_on_failure=False):", "body": "certificates = {}processed = {}now = datetime.datetime.utcnow()for store in [\"\", \"\"]:store_handle = crypt32.CertOpenSystemStoreW(null(), store)handle_error(store_handle)context_pointer = null()while True:context_pointer = crypt32.CertEnumCertificatesInStore(store_handle, context_pointer)if is_null(context_pointer):breakcontext = unwrap(context_pointer)trust_all = Falsedata = Nonedigest = Noneif context.dwCertEncodingType != Crypt32Const.X509_ASN_ENCODING:continuedata = bytes_from_buffer(context.pbCertEncoded, int(context.cbCertEncoded))digest = hashlib.sha1(data).digest()if digest in processed:continueprocessed[digest] = Truecert_info = unwrap(context.pCertInfo)not_before_seconds = _convert_filetime_to_timestamp(cert_info.NotBefore)try:not_before = datetime.datetime.fromtimestamp(not_before_seconds)if not_before > now:if cert_callback:cert_callback(Certificate.load(data), '')continueexcept (ValueError, OSError):passnot_after_seconds = _convert_filetime_to_timestamp(cert_info.NotAfter)try:not_after = datetime.datetime.fromtimestamp(not_after_seconds)if not_after < now:if cert_callback:cert_callback(Certificate.load(data), '')continueexcept (ValueError, OSError) as e:if not_after_seconds < :message = e.args[] + '' + str_cls(not_after_seconds)e.args = (message,) + e.args[:]raise etrust_oids = set()reject_oids = set()to_read = new(crypt32, '', )res = crypt32.CertGetEnhancedKeyUsage(context_pointer,Crypt32Const.CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG,null(),to_read)error_code, _ = get_error()if not res and error_code != Crypt32Const.CRYPT_E_NOT_FOUND:handle_error(res)if error_code == Crypt32Const.CRYPT_E_NOT_FOUND:trust_all = Trueelse:usage_buffer = buffer_from_bytes(deref(to_read))res = crypt32.CertGetEnhancedKeyUsage(context_pointer,Crypt32Const.CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG,cast(crypt32, '', usage_buffer),to_read)handle_error(res)key_usage_pointer = struct_from_buffer(crypt32, '', usage_buffer)key_usage = unwrap(key_usage_pointer)if key_usage.cUsageIdentifier == :if cert_callback:cert_callback(Certificate.load(data), '')continueoids = array_from_pointer(crypt32,'',key_usage.rgpszUsageIdentifier,key_usage.cUsageIdentifier)for oid in oids:trust_oids.add(oid.decode(''))cert = Noneif not trust_all:cert = Certificate.load(data)if cert.extended_key_usage_value:for cert_oid in cert.extended_key_usage_value:oid = cert_oid.dottedif oid not in trust_oids:reject_oids.add(oid)if cert_callback and not callback_only_on_failure:if cert is None:cert = Certificate.load(data)cert_callback(cert, None)certificates[digest] = (data, trust_oids, reject_oids)result = crypt32.CertCloseStore(store_handle, )handle_error(result)store_handle = Nonereturn certificates.values()", "docstring": "Extracts trusted CA certificates from the Windows certificate store\n\n:param cert_callback:\n A callback that is called once for each certificate in the trust store.\n It should accept two parameters: an asn1crypto.x509.Certificate object,\n and a reason. The reason will be None if the certificate is being\n exported, otherwise it will be a unicode string of the reason it won't.\n\n:param callback_only_on_failure:\n A boolean - if the callback should only be called when a certificate is\n not exported.\n\n:raises:\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A list of 3-element tuples:\n - 0: a byte string of a DER-encoded certificate\n - 1: a set of unicode strings that are OIDs of purposes to trust the\n certificate for\n - 2: a set of unicode strings that are OIDs of purposes to reject the\n certificate for", "id": "f9479:m1"} {"signature": "def _convert_filetime_to_timestamp(filetime):", "body": "hundreds_nano_seconds = struct.unpack(b'',struct.pack(b'',filetime.dwHighDateTime,filetime.dwLowDateTime))[]seconds_since_1601 = hundreds_nano_seconds / return seconds_since_1601 - ", "docstring": "Windows returns times as 64-bit unsigned longs that are the number\nof hundreds of nanoseconds since Jan 1 1601. This converts it to\na datetime object.\n\n:param filetime:\n A FILETIME struct object\n\n:return:\n An integer unix timestamp", "id": "f9479:m2"} {"signature": "def __init__(self, protocol=None, manual_validation=False, extra_trust_roots=None):", "body": "if not isinstance(manual_validation, bool):raise TypeError(pretty_message('''''',type_name(manual_validation)))self._manual_validation = manual_validationif protocol is None:protocol = set(['', '', ''])if isinstance(protocol, str_cls):protocol = set([protocol])elif not isinstance(protocol, set):raise TypeError(pretty_message('''''',type_name(protocol)))unsupported_protocols = protocol - set(['', '', '', ''])if unsupported_protocols:raise ValueError(pretty_message('''''',repr(unsupported_protocols)))self._protocols = protocolself._extra_trust_roots = []if extra_trust_roots:for extra_trust_root in extra_trust_roots:if isinstance(extra_trust_root, Certificate):extra_trust_root = extra_trust_root.asn1elif isinstance(extra_trust_root, byte_cls):extra_trust_root = parse_certificate(extra_trust_root)elif isinstance(extra_trust_root, str_cls):with open(extra_trust_root, '') as f:extra_trust_root = parse_certificate(f.read())elif not isinstance(extra_trust_root, x509.Certificate):raise TypeError(pretty_message('''''',type_name(extra_trust_root)))self._extra_trust_roots.append(extra_trust_root)self._obtain_credentials()", "docstring": ":param protocol:\n A unicode string or set of unicode strings representing allowable\n protocols to negotiate with the server:\n\n - \"TLSv1.2\"\n - \"TLSv1.1\"\n - \"TLSv1\"\n - \"SSLv3\"\n\n Default is: {\"TLSv1\", \"TLSv1.1\", \"TLSv1.2\"}\n\n:param manual_validation:\n If certificate and certificate path validation should be skipped\n and left to the developer to implement\n\n:param extra_trust_roots:\n A list containing one or more certificates to be treated as trust\n roots, in one of the following formats:\n - A byte string of the DER encoded certificate\n - A unicode string of the certificate filename\n - An asn1crypto.x509.Certificate object\n - An oscrypto.asymmetric.Certificate object\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9481:c2:m0"} {"signature": "def _obtain_credentials(self):", "body": "protocol_values = {'': Secur32Const.SP_PROT_SSL3_CLIENT,'': Secur32Const.SP_PROT_TLS1_CLIENT,'': Secur32Const.SP_PROT_TLS1_1_CLIENT,'': Secur32Const.SP_PROT_TLS1_2_CLIENT,}protocol_bit_mask = for key, value in protocol_values.items():if key in self._protocols:protocol_bit_mask |= valuealgs = [Secur32Const.CALG_AES_128,Secur32Const.CALG_AES_256,Secur32Const.CALG_3DES,Secur32Const.CALG_SHA1,Secur32Const.CALG_ECDHE,Secur32Const.CALG_DH_EPHEM,Secur32Const.CALG_RSA_KEYX,Secur32Const.CALG_RSA_SIGN,Secur32Const.CALG_ECDSA,Secur32Const.CALG_DSS_SIGN,]if '' in self._protocols:algs.extend([Secur32Const.CALG_SHA512,Secur32Const.CALG_SHA384,Secur32Const.CALG_SHA256,])alg_array = new(secur32, '' % len(algs))for index, alg in enumerate(algs):alg_array[index] = algflags = Secur32Const.SCH_USE_STRONG_CRYPTO | Secur32Const.SCH_CRED_NO_DEFAULT_CREDSif not self._manual_validation and not self._extra_trust_roots:flags |= Secur32Const.SCH_CRED_AUTO_CRED_VALIDATIONelse:flags |= Secur32Const.SCH_CRED_MANUAL_CRED_VALIDATIONschannel_cred_pointer = struct(secur32, '')schannel_cred = unwrap(schannel_cred_pointer)schannel_cred.dwVersion = Secur32Const.SCHANNEL_CRED_VERSIONschannel_cred.cCreds = schannel_cred.paCred = null()schannel_cred.hRootStore = null()schannel_cred.cMappers = schannel_cred.aphMappers = null()schannel_cred.cSupportedAlgs = len(alg_array)schannel_cred.palgSupportedAlgs = alg_arrayschannel_cred.grbitEnabledProtocols = protocol_bit_maskschannel_cred.dwMinimumCipherStrength = schannel_cred.dwMaximumCipherStrength = schannel_cred.dwSessionLifespan = schannel_cred.dwFlags = flagsschannel_cred.dwCredFormat = cred_handle_pointer = new(secur32, '')result = secur32.AcquireCredentialsHandleW(null(),Secur32Const.UNISP_NAME,Secur32Const.SECPKG_CRED_OUTBOUND,null(),schannel_cred_pointer,null(),null(),cred_handle_pointer,null())handle_error(result)self._credentials_handle = cred_handle_pointer", "docstring": "Obtains a credentials handle from secur32.dll for use with SChannel", "id": "f9481:c2:m1"} {"signature": "@classmethoddef wrap(cls, socket, hostname, session=None):", "body": "if not isinstance(socket, socket_.socket):raise TypeError(pretty_message('''''',type_name(socket)))if not isinstance(hostname, str_cls):raise TypeError(pretty_message('''''',type_name(hostname)))if session is not None and not isinstance(session, TLSSession):raise TypeError(pretty_message('''''',type_name(session)))new_socket = cls(None, None, session=session)new_socket._socket = socketnew_socket._hostname = hostnametry:new_socket._handshake()except (_TLSDowngradeError) as e:new_e = TLSVerificationError(e.message, e.certificate)raise new_eexcept (_TLSRetryError) as e:new_e = TLSError(e.message)raise new_ereturn new_socket", "docstring": "Takes an existing socket and adds TLS\n\n:param socket:\n A socket.socket object to wrap with TLS\n\n:param hostname:\n A unicode string of the hostname or IP the socket is connected to\n\n:param session:\n An existing TLSSession object to allow for session reuse, specific\n protocol or manual certificate validation\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9481:c3:m0"} {"signature": "def __init__(self, address, port, timeout=, session=None):", "body": "self._received_bytes = b''self._decrypted_bytes = b''if address is None and port is None:self._socket = Noneelse:if not isinstance(address, str_cls):raise TypeError(pretty_message('''''',type_name(address)))if not isinstance(port, int_types):raise TypeError(pretty_message('''''',type_name(port)))if timeout is not None and not isinstance(timeout, numbers.Number):raise TypeError(pretty_message('''''',type_name(timeout)))self._socket = socket_.create_connection((address, port), timeout)self._socket.settimeout(timeout)if session is None:session = TLSSession()elif not isinstance(session, TLSSession):raise TypeError(pretty_message('''''',type_name(session)))self._session = sessionif self._socket:self._hostname = addresstry:self._handshake()except (_TLSDowngradeError):self.close()new_session = TLSSession(session._protocols - set(['']),session._manual_validation,session._extra_trust_roots)session.__del__()self._received_bytes = b''self._session = new_sessionself._socket = socket_.create_connection((address, port), timeout)self._socket.settimeout(timeout)self._handshake()except (_TLSRetryError):self._received_bytes = b''self._socket = socket_.create_connection((address, port), timeout)self._socket.settimeout(timeout)self._handshake()", "docstring": ":param address:\n A unicode string of the domain name or IP address to conenct to\n\n:param port:\n An integer of the port number to connect to\n\n:param timeout:\n An integer timeout to use for the socket\n\n:param session:\n An oscrypto.tls.TLSSession object to allow for session reuse and\n controlling the protocols and validation performed", "id": "f9481:c3:m1"} {"signature": "def _create_buffers(self, number):", "body": "buffers = new(secur32, '' % number)for index in range(, number):buffers[index].cbBuffer = buffers[index].BufferType = Secur32Const.SECBUFFER_EMPTYbuffers[index].pvBuffer = null()sec_buffer_desc_pointer = struct(secur32, '')sec_buffer_desc = unwrap(sec_buffer_desc_pointer)sec_buffer_desc.ulVersion = Secur32Const.SECBUFFER_VERSIONsec_buffer_desc.cBuffers = numbersec_buffer_desc.pBuffers = buffersreturn (sec_buffer_desc_pointer, buffers)", "docstring": "Creates a SecBufferDesc struct and contained SecBuffer structs\n\n:param number:\n The number of contains SecBuffer objects to create\n\n:return:\n A tuple of (SecBufferDesc pointer, SecBuffer array)", "id": "f9481:c3:m2"} {"signature": "def _extra_trust_root_validation(self):", "body": "store = Nonecert_chain_context_pointer = Nonetry:store = crypt32.CertOpenStore(Crypt32Const.CERT_STORE_PROV_MEMORY,Crypt32Const.X509_ASN_ENCODING,null(),,null())if is_null(store):handle_crypt32_error()cert_hashes = set()for cert in self._session._extra_trust_roots:cert_data = cert.dump()result = crypt32.CertAddEncodedCertificateToStore(store,Crypt32Const.X509_ASN_ENCODING,cert_data,len(cert_data),Crypt32Const.CERT_STORE_ADD_USE_EXISTING,null())if not result:handle_crypt32_error()cert_hashes.add(cert.sha256)cert_context_pointer_pointer = new(crypt32, '')result = secur32.QueryContextAttributesW(self._context_handle_pointer,Secur32Const.SECPKG_ATTR_REMOTE_CERT_CONTEXT,cert_context_pointer_pointer)handle_error(result)cert_context_pointer = unwrap(cert_context_pointer_pointer)cert_context_pointer = cast(crypt32, '', cert_context_pointer)orig_now_pointer = new(kernel32, '')kernel32.GetSystemTimeAsFileTime(orig_now_pointer)now_pointer = cast(crypt32, '', orig_now_pointer)usage_identifiers = new(crypt32, '')usage_identifiers[] = cast(crypt32, '', Crypt32Const.PKIX_KP_SERVER_AUTH)usage_identifiers[] = cast(crypt32, '', Crypt32Const.SERVER_GATED_CRYPTO)usage_identifiers[] = cast(crypt32, '', Crypt32Const.SGC_NETSCAPE)cert_enhkey_usage_pointer = struct(crypt32, '')cert_enhkey_usage = unwrap(cert_enhkey_usage_pointer)cert_enhkey_usage.cUsageIdentifier = cert_enhkey_usage.rgpszUsageIdentifier = cast(crypt32, '', usage_identifiers)cert_usage_match_pointer = struct(crypt32, '')cert_usage_match = unwrap(cert_usage_match_pointer)cert_usage_match.dwType = Crypt32Const.USAGE_MATCH_TYPE_ORcert_usage_match.Usage = cert_enhkey_usagecert_chain_para_pointer = struct(crypt32, '')cert_chain_para = unwrap(cert_chain_para_pointer)cert_chain_para.RequestedUsage = cert_usage_matchcert_chain_para_size = sizeof(crypt32, cert_chain_para)cert_chain_para.cbSize = cert_chain_para_sizecert_chain_context_pointer_pointer = new(crypt32, '')result = crypt32.CertGetCertificateChain(null(),cert_context_pointer,now_pointer,store,cert_chain_para_pointer,Crypt32Const.CERT_CHAIN_CACHE_END_CERT | Crypt32Const.CERT_CHAIN_REVOCATION_CHECK_CACHE_ONLY,null(),cert_chain_context_pointer_pointer)handle_crypt32_error(result)cert_chain_policy_para_flags = Crypt32Const.CERT_CHAIN_POLICY_IGNORE_ALL_REV_UNKNOWN_FLAGScert_chain_context_pointer = unwrap(cert_chain_context_pointer_pointer)cert_chain_context = unwrap(cert_chain_context_pointer)num_chains = native(int, cert_chain_context.cChain)if num_chains == :first_simple_chain_pointer = unwrap(cert_chain_context.rgpChain)first_simple_chain = unwrap(first_simple_chain_pointer)num_elements = native(int, first_simple_chain.cElement)last_element_pointer = first_simple_chain.rgpElement[num_elements - ]last_element = unwrap(last_element_pointer)last_element_cert = unwrap(last_element.pCertContext)last_element_cert_data = bytes_from_buffer(last_element_cert.pbCertEncoded,native(int, last_element_cert.cbCertEncoded))last_cert = x509.Certificate.load(last_element_cert_data)if last_cert.sha256 in cert_hashes:cert_chain_policy_para_flags |= Crypt32Const.CERT_CHAIN_POLICY_ALLOW_UNKNOWN_CA_FLAGssl_extra_cert_chain_policy_para_pointer = struct(crypt32, '')ssl_extra_cert_chain_policy_para = unwrap(ssl_extra_cert_chain_policy_para_pointer)ssl_extra_cert_chain_policy_para.cbSize = sizeof(crypt32, ssl_extra_cert_chain_policy_para)ssl_extra_cert_chain_policy_para.dwAuthType = Crypt32Const.AUTHTYPE_SERVERssl_extra_cert_chain_policy_para.fdwChecks = ssl_extra_cert_chain_policy_para.pwszServerName = cast(crypt32,'',buffer_from_unicode(self._hostname))cert_chain_policy_para_pointer = struct(crypt32, '')cert_chain_policy_para = unwrap(cert_chain_policy_para_pointer)cert_chain_policy_para.cbSize = sizeof(crypt32, cert_chain_policy_para)cert_chain_policy_para.dwFlags = cert_chain_policy_para_flagscert_chain_policy_para.pvExtraPolicyPara = cast(crypt32, '', ssl_extra_cert_chain_policy_para_pointer)cert_chain_policy_status_pointer = struct(crypt32, '')cert_chain_policy_status = unwrap(cert_chain_policy_status_pointer)cert_chain_policy_status.cbSize = sizeof(crypt32, cert_chain_policy_status)result = crypt32.CertVerifyCertificateChainPolicy(Crypt32Const.CERT_CHAIN_POLICY_SSL,cert_chain_context_pointer,cert_chain_policy_para_pointer,cert_chain_policy_status_pointer)handle_crypt32_error(result)cert_context = unwrap(cert_context_pointer)cert_data = bytes_from_buffer(cert_context.pbCertEncoded, native(int, cert_context.cbCertEncoded))cert = x509.Certificate.load(cert_data)error = cert_chain_policy_status.dwErrorif error:if error == Crypt32Const.CERT_E_EXPIRED:raise_expired_not_yet_valid(cert)if error == Crypt32Const.CERT_E_UNTRUSTEDROOT:oscrypto_cert = load_certificate(cert)if oscrypto_cert.self_signed:raise_self_signed(cert)else:raise_no_issuer(cert)if error == Crypt32Const.CERT_E_CN_NO_MATCH:raise_hostname(cert, self._hostname)if error == Crypt32Const.TRUST_E_CERT_SIGNATURE:raise_weak_signature(cert)if error == Crypt32Const.CRYPT_E_REVOKED:raise_revoked(cert)raise_verification(cert)if cert.hash_algo in set(['', '']):raise_weak_signature(cert)finally:if store:crypt32.CertCloseStore(store, )if cert_chain_context_pointer:crypt32.CertFreeCertificateChain(cert_chain_context_pointer)", "docstring": "Manually invoked windows certificate chain builder and verification\nstep when there are extra trust roots to include in the search process", "id": "f9481:c3:m3"} {"signature": "def _handshake(self, renegotiate=False):", "body": "in_buffers = Noneout_buffers = Nonenew_context_handle_pointer = Nonetry:if renegotiate:temp_context_handle_pointer = self._context_handle_pointerelse:new_context_handle_pointer = new(secur32, '')temp_context_handle_pointer = new_context_handle_pointerrequested_flags = {Secur32Const.ISC_REQ_REPLAY_DETECT: '',Secur32Const.ISC_REQ_SEQUENCE_DETECT: '',Secur32Const.ISC_REQ_CONFIDENTIALITY: '',Secur32Const.ISC_REQ_ALLOCATE_MEMORY: '',Secur32Const.ISC_REQ_INTEGRITY: '',Secur32Const.ISC_REQ_STREAM: '',Secur32Const.ISC_REQ_USE_SUPPLIED_CREDS: '',}self._context_flags = for flag in requested_flags:self._context_flags |= flagin_sec_buffer_desc_pointer, in_buffers = self._create_buffers()in_buffers[].BufferType = Secur32Const.SECBUFFER_TOKENout_sec_buffer_desc_pointer, out_buffers = self._create_buffers()out_buffers[].BufferType = Secur32Const.SECBUFFER_TOKENout_buffers[].BufferType = Secur32Const.SECBUFFER_ALERToutput_context_flags_pointer = new(secur32, '')if renegotiate:first_handle = temp_context_handle_pointersecond_handle = null()else:first_handle = null()second_handle = temp_context_handle_pointerresult = secur32.InitializeSecurityContextW(self._session._credentials_handle,first_handle,self._hostname,self._context_flags,,,null(),,second_handle,out_sec_buffer_desc_pointer,output_context_flags_pointer,null())if result not in set([Secur32Const.SEC_E_OK, Secur32Const.SEC_I_CONTINUE_NEEDED]):handle_error(result, TLSError)if not renegotiate:temp_context_handle_pointer = second_handleelse:temp_context_handle_pointer = first_handlehandshake_server_bytes = b''handshake_client_bytes = b''if out_buffers[].cbBuffer > :token = bytes_from_buffer(out_buffers[].pvBuffer, out_buffers[].cbBuffer)handshake_client_bytes += tokenself._socket.send(token)out_buffers[].cbBuffer = secur32.FreeContextBuffer(out_buffers[].pvBuffer)out_buffers[].pvBuffer = null()in_data_buffer = buffer_from_bytes()in_buffers[].pvBuffer = cast(secur32, '', in_data_buffer)bytes_read = b''while result != Secur32Const.SEC_E_OK:try:fail_late = Falsebytes_read = self._socket.recv()if bytes_read == b'':raise_disconnection()except (socket_error_cls):fail_late = Truehandshake_server_bytes += bytes_readself._received_bytes += bytes_readin_buffers[].cbBuffer = len(self._received_bytes)write_to_buffer(in_data_buffer, self._received_bytes)result = secur32.InitializeSecurityContextW(self._session._credentials_handle,temp_context_handle_pointer,self._hostname,self._context_flags,,,in_sec_buffer_desc_pointer,,null(),out_sec_buffer_desc_pointer,output_context_flags_pointer,null())if result == Secur32Const.SEC_E_INCOMPLETE_MESSAGE:in_buffers[].BufferType = Secur32Const.SECBUFFER_TOKENif in_buffers[].BufferType != Secur32Const.SECBUFFER_EMPTY:in_buffers[].BufferType = Secur32Const.SECBUFFER_EMPTYin_buffers[].cbBuffer = if not is_null(in_buffers[].pvBuffer):secur32.FreeContextBuffer(in_buffers[].pvBuffer)in_buffers[].pvBuffer = null()if fail_late:raise_disconnection()continueif result == Secur32Const.SEC_E_ILLEGAL_MESSAGE:if detect_client_auth_request(handshake_server_bytes):raise_client_auth()alert_info = parse_alert(handshake_server_bytes)if alert_info and alert_info == (, ):raise_protocol_version()raise_handshake()if result == Secur32Const.SEC_E_WRONG_PRINCIPAL:chain = extract_chain(handshake_server_bytes)raise_hostname(chain[], self._hostname)if result == Secur32Const.SEC_E_CERT_EXPIRED:chain = extract_chain(handshake_server_bytes)raise_expired_not_yet_valid(chain[])if result == Secur32Const.SEC_E_UNTRUSTED_ROOT:chain = extract_chain(handshake_server_bytes)cert = chain[]oscrypto_cert = load_certificate(cert)if not oscrypto_cert.self_signed:raise_no_issuer(cert)raise_self_signed(cert)if result == Secur32Const.SEC_E_INTERNAL_ERROR:if get_dh_params_length(handshake_server_bytes) < :raise_dh_params()if result == Secur32Const.SEC_I_INCOMPLETE_CREDENTIALS:raise_client_auth()if result == Crypt32Const.TRUST_E_CERT_SIGNATURE:raise_weak_signature(cert)if result == Secur32Const.SEC_E_INVALID_TOKEN:if out_buffers[].cbBuffer > :alert_bytes = bytes_from_buffer(out_buffers[].pvBuffer, out_buffers[].cbBuffer)handshake_client_bytes += alert_bytesalert_number = alert_bytes[:]if alert_number == b'' or alert_number == b'':if '' in self._session._protocols and len(self._session._protocols) > :chain = extract_chain(handshake_server_bytes)raise _TLSDowngradeError('',chain[])if detect_client_auth_request(handshake_server_bytes):raise_client_auth()if detect_other_protocol(handshake_server_bytes):raise_protocol_error(handshake_server_bytes)raise_handshake()if result == Secur32Const.SEC_E_BUFFER_TOO_SMALL or result == Secur32Const.SEC_E_MESSAGE_ALTERED:if '' in self._session._protocols:raise _TLSRetryError('')if fail_late:raise_disconnection()if result == Secur32Const.SEC_E_INVALID_PARAMETER:if get_dh_params_length(handshake_server_bytes) < :raise_dh_params()if result not in set([Secur32Const.SEC_E_OK, Secur32Const.SEC_I_CONTINUE_NEEDED]):handle_error(result, TLSError)if out_buffers[].cbBuffer > :token = bytes_from_buffer(out_buffers[].pvBuffer, out_buffers[].cbBuffer)handshake_client_bytes += tokenself._socket.send(token)out_buffers[].cbBuffer = secur32.FreeContextBuffer(out_buffers[].pvBuffer)out_buffers[].pvBuffer = null()if in_buffers[].BufferType == Secur32Const.SECBUFFER_EXTRA:extra_amount = in_buffers[].cbBufferself._received_bytes = self._received_bytes[-extra_amount:]in_buffers[].BufferType = Secur32Const.SECBUFFER_EMPTYin_buffers[].cbBuffer = secur32.FreeContextBuffer(in_buffers[].pvBuffer)in_buffers[].pvBuffer = null()if result == Secur32Const.SEC_E_OK:handshake_server_bytes = handshake_server_bytes[-extra_amount:]else:self._received_bytes = b''connection_info_pointer = struct(secur32, '')result = secur32.QueryContextAttributesW(temp_context_handle_pointer,Secur32Const.SECPKG_ATTR_CONNECTION_INFO,connection_info_pointer)handle_error(result, TLSError)connection_info = unwrap(connection_info_pointer)self._protocol = {Secur32Const.SP_PROT_SSL2_CLIENT: '',Secur32Const.SP_PROT_SSL3_CLIENT: '',Secur32Const.SP_PROT_TLS1_CLIENT: '',Secur32Const.SP_PROT_TLS1_1_CLIENT: '',Secur32Const.SP_PROT_TLS1_2_CLIENT: '',}.get(native(int, connection_info.dwProtocol), str_cls(connection_info.dwProtocol))if self._protocol in set(['', '', '', '']):session_info = parse_session_info(handshake_server_bytes, handshake_client_bytes)self._cipher_suite = session_info['']self._compression = session_info['']self._session_id = session_info['']self._session_ticket = session_info['']output_context_flags = deref(output_context_flags_pointer)for flag in requested_flags:if (flag | output_context_flags) == :raise OSError(pretty_message('''''',requested_flags[flag]))if not renegotiate:self._context_handle_pointer = temp_context_handle_pointernew_context_handle_pointer = Nonestream_sizes_pointer = struct(secur32, '')result = secur32.QueryContextAttributesW(self._context_handle_pointer,Secur32Const.SECPKG_ATTR_STREAM_SIZES,stream_sizes_pointer)handle_error(result)stream_sizes = unwrap(stream_sizes_pointer)self._header_size = native(int, stream_sizes.cbHeader)self._message_size = native(int, stream_sizes.cbMaximumMessage)self._trailer_size = native(int, stream_sizes.cbTrailer)self._buffer_size = self._header_size + self._message_size + self._trailer_sizeif self._session._extra_trust_roots:self._extra_trust_root_validation()except (OSError, socket_.error):self.close()raisefinally:if out_buffers:if not is_null(out_buffers[].pvBuffer):secur32.FreeContextBuffer(out_buffers[].pvBuffer)if not is_null(out_buffers[].pvBuffer):secur32.FreeContextBuffer(out_buffers[].pvBuffer)if new_context_handle_pointer:secur32.DeleteSecurityContext(new_context_handle_pointer)", "docstring": "Perform an initial TLS handshake, or a renegotiation\n\n:param renegotiate:\n If the handshake is for a renegotiation", "id": "f9481:c3:m4"} {"signature": "def read(self, max_length):", "body": "if not isinstance(max_length, int_types):raise TypeError(pretty_message('''''',type_name(max_length)))if self._context_handle_pointer is None:if self._decrypted_bytes != b'':output = self._decrypted_bytes[:max_length]self._decrypted_bytes = self._decrypted_bytes[max_length:]return outputself._raise_closed()if not self._decrypt_data_buffer:self._decrypt_data_buffer = buffer_from_bytes(self._buffer_size)self._decrypt_desc, self._decrypt_buffers = self._create_buffers()self._decrypt_buffers[].BufferType = Secur32Const.SECBUFFER_DATAself._decrypt_buffers[].pvBuffer = cast(secur32, '', self._decrypt_data_buffer)to_recv = max(max_length, self._buffer_size)null_value = null()buf0 = self._decrypt_buffers[]buf1 = self._decrypt_buffers[]buf2 = self._decrypt_buffers[]buf3 = self._decrypt_buffers[]def _reset_buffers():buf0.BufferType = Secur32Const.SECBUFFER_DATAbuf0.pvBuffer = cast(secur32, '', self._decrypt_data_buffer)buf0.cbBuffer = buf1.BufferType = Secur32Const.SECBUFFER_EMPTYbuf1.pvBuffer = null_valuebuf1.cbBuffer = buf2.BufferType = Secur32Const.SECBUFFER_EMPTYbuf2.pvBuffer = null_valuebuf2.cbBuffer = buf3.BufferType = Secur32Const.SECBUFFER_EMPTYbuf3.pvBuffer = null_valuebuf3.cbBuffer = output = self._decrypted_bytesoutput_len = len(output)self._decrypted_bytes = b''if output_len > and not self.select_read():self._decrypted_bytes = b''return outputdo_read = len(self._received_bytes) == while output_len < max_length:if do_read:self._received_bytes += self._socket.recv(to_recv)if len(self._received_bytes) == :raise_disconnection()data_len = min(len(self._received_bytes), self._buffer_size)if data_len == :breakself._decrypt_buffers[].cbBuffer = data_lenwrite_to_buffer(self._decrypt_data_buffer, self._received_bytes[:data_len])result = secur32.DecryptMessage(self._context_handle_pointer,self._decrypt_desc,,null())do_read = Falseif result == Secur32Const.SEC_E_INCOMPLETE_MESSAGE:_reset_buffers()do_read = Truecontinueelif result == Secur32Const.SEC_I_CONTEXT_EXPIRED:self._remote_closed = Trueself.shutdown()breakelif result == Secur32Const.SEC_I_RENEGOTIATE:self._handshake(renegotiate=True)return self.read(max_length)elif result != Secur32Const.SEC_E_OK:handle_error(result, TLSError)valid_buffer_types = set([Secur32Const.SECBUFFER_EMPTY,Secur32Const.SECBUFFER_STREAM_HEADER,Secur32Const.SECBUFFER_STREAM_TRAILER])extra_amount = Nonefor buf in (buf0, buf1, buf2, buf3):buffer_type = buf.BufferTypeif buffer_type == Secur32Const.SECBUFFER_DATA:output += bytes_from_buffer(buf.pvBuffer, buf.cbBuffer)output_len = len(output)elif buffer_type == Secur32Const.SECBUFFER_EXTRA:extra_amount = native(int, buf.cbBuffer)elif buffer_type not in valid_buffer_types:raise OSError(pretty_message('''''',buffer_type))if extra_amount:self._received_bytes = self._received_bytes[data_len - extra_amount:]else:self._received_bytes = self._received_bytes[data_len:]_reset_buffers()if self.select_read():do_read = Trueif not do_read and len(self._received_bytes) == :breakif len(output) > max_length:self._decrypted_bytes = output[max_length:]output = output[:max_length]return output", "docstring": "Reads data from the TLS-wrapped socket\n\n:param max_length:\n The number of bytes to read\n\n:raises:\n socket.socket - when a non-TLS socket error occurs\n oscrypto.errors.TLSError - when a TLS-related error occurs\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the data read", "id": "f9481:c3:m5"} {"signature": "def select_read(self, timeout=None):", "body": "if len(self._decrypted_bytes) > :return Trueread_ready, _, _ = select.select([self._socket], [], [], timeout)return len(read_ready) > ", "docstring": "Blocks until the socket is ready to be read from, or the timeout is hit\n\n:param timeout:\n A float - the period of time to wait for data to be read. None for\n no time limit.\n\n:return:\n A boolean - if data is ready to be read. Will only be False if\n timeout is not None.", "id": "f9481:c3:m6"} {"signature": "def read_until(self, marker):", "body": "if not isinstance(marker, byte_cls) and not isinstance(marker, Pattern):raise TypeError(pretty_message('''''',type_name(marker)))output = b''is_regex = isinstance(marker, Pattern)while True:if len(self._decrypted_bytes) > :chunk = self._decrypted_bytesself._decrypted_bytes = b''else:chunk = self.read()offset = len(output)output += chunkif is_regex:match = marker.search(output)if match is not None:end = match.end()breakelse:start = max(, offset - len(marker) - )match = output.find(marker, start)if match != -:end = match + len(marker)breakself._decrypted_bytes = output[end:] + self._decrypted_bytesreturn output[:end]", "docstring": "Reads data from the socket until a marker is found. Data read may\ninclude data beyond the marker.\n\n:param marker:\n A byte string or regex object from re.compile(). Used to determine\n when to stop reading. Regex objects are more inefficient since\n they must scan the entire byte string of read data each time data\n is read off the socket.\n\n:return:\n A byte string of the data read", "id": "f9481:c3:m7"} {"signature": "def read_line(self):", "body": "return self.read_until(_line_regex)", "docstring": "r\"\"\"\n Reads a line from the socket, including the line ending of \"\\r\\n\", \"\\r\",\n or \"\\n\"\n\n :return:\n A byte string of the next line from the socket", "id": "f9481:c3:m8"} {"signature": "def read_exactly(self, num_bytes):", "body": "output = b''remaining = num_byteswhile remaining > :output += self.read(remaining)remaining = num_bytes - len(output)return output", "docstring": "Reads exactly the specified number of bytes from the socket\n\n:param num_bytes:\n An integer - the exact number of bytes to read\n\n:return:\n A byte string of the data that was read", "id": "f9481:c3:m9"} {"signature": "def write(self, data):", "body": "if self._context_handle_pointer is None:self._raise_closed()if not self._encrypt_data_buffer:self._encrypt_data_buffer = buffer_from_bytes(self._header_size + self._message_size + self._trailer_size)self._encrypt_desc, self._encrypt_buffers = self._create_buffers()self._encrypt_buffers[].BufferType = Secur32Const.SECBUFFER_STREAM_HEADERself._encrypt_buffers[].cbBuffer = self._header_sizeself._encrypt_buffers[].pvBuffer = cast(secur32, '', self._encrypt_data_buffer)self._encrypt_buffers[].BufferType = Secur32Const.SECBUFFER_DATAself._encrypt_buffers[].pvBuffer = ref(self._encrypt_data_buffer, self._header_size)self._encrypt_buffers[].BufferType = Secur32Const.SECBUFFER_STREAM_TRAILERself._encrypt_buffers[].cbBuffer = self._trailer_sizeself._encrypt_buffers[].pvBuffer = ref(self._encrypt_data_buffer, self._header_size + self._message_size)while len(data) > :to_write = min(len(data), self._message_size)write_to_buffer(self._encrypt_data_buffer, data[:to_write], self._header_size)self._encrypt_buffers[].cbBuffer = to_writeself._encrypt_buffers[].pvBuffer = ref(self._encrypt_data_buffer, self._header_size + to_write)result = secur32.EncryptMessage(self._context_handle_pointer,,self._encrypt_desc,)if result != Secur32Const.SEC_E_OK:handle_error(result, TLSError)to_send = native(int, self._encrypt_buffers[].cbBuffer)to_send += native(int, self._encrypt_buffers[].cbBuffer)to_send += native(int, self._encrypt_buffers[].cbBuffer)try:self._socket.send(bytes_from_buffer(self._encrypt_data_buffer, to_send))except (socket_.error) as e:if e.errno == :raise_disconnection()raisedata = data[to_send:]", "docstring": "Writes data to the TLS-wrapped socket\n\n:param data:\n A byte string to write to the socket\n\n:raises:\n socket.socket - when a non-TLS socket error occurs\n oscrypto.errors.TLSError - when a TLS-related error occurs\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9481:c3:m10"} {"signature": "def select_write(self, timeout=None):", "body": "_, write_ready, _ = select.select([], [self._socket], [], timeout)return len(write_ready) > ", "docstring": "Blocks until the socket is ready to be written to, or the timeout is hit\n\n:param timeout:\n A float - the period of time to wait for the socket to be ready to\n written to. None for no time limit.\n\n:return:\n A boolean - if the socket is ready for writing. Will only be False\n if timeout is not None.", "id": "f9481:c3:m11"} {"signature": "def shutdown(self):", "body": "if self._context_handle_pointer is None:returnout_buffers = Nonetry:if _win_version_info >= (, ):buffers = new(secur32, '')buffers[].cbBuffer = buffers[].BufferType = Secur32Const.SECBUFFER_TOKENbuffers[].pvBuffer = cast(secur32, '', buffer_from_bytes(b''))sec_buffer_desc_pointer = struct(secur32, '')sec_buffer_desc = unwrap(sec_buffer_desc_pointer)sec_buffer_desc.ulVersion = Secur32Const.SECBUFFER_VERSIONsec_buffer_desc.cBuffers = sec_buffer_desc.pBuffers = buffersresult = secur32.ApplyControlToken(self._context_handle_pointer, sec_buffer_desc_pointer)handle_error(result, TLSError)out_sec_buffer_desc_pointer, out_buffers = self._create_buffers()out_buffers[].BufferType = Secur32Const.SECBUFFER_TOKENout_buffers[].BufferType = Secur32Const.SECBUFFER_ALERToutput_context_flags_pointer = new(secur32, '')result = secur32.InitializeSecurityContextW(self._session._credentials_handle,self._context_handle_pointer,self._hostname,self._context_flags,,,null(),,null(),out_sec_buffer_desc_pointer,output_context_flags_pointer,null())acceptable_results = set([Secur32Const.SEC_E_OK,Secur32Const.SEC_E_CONTEXT_EXPIRED,Secur32Const.SEC_I_CONTINUE_NEEDED])if result not in acceptable_results:handle_error(result, TLSError)token = bytes_from_buffer(out_buffers[].pvBuffer, out_buffers[].cbBuffer)try:self._socket.send(token)except (socket_.error):passfinally:if out_buffers:if not is_null(out_buffers[].pvBuffer):secur32.FreeContextBuffer(out_buffers[].pvBuffer)if not is_null(out_buffers[].pvBuffer):secur32.FreeContextBuffer(out_buffers[].pvBuffer)secur32.DeleteSecurityContext(self._context_handle_pointer)self._context_handle_pointer = Nonetry:self._socket.shutdown(socket_.SHUT_RDWR)except (socket_.error):pass", "docstring": "Shuts down the TLS session and then shuts down the underlying socket\n\n:raises:\n OSError - when an error is returned by the OS crypto library", "id": "f9481:c3:m12"} {"signature": "def close(self):", "body": "try:self.shutdown()finally:if self._socket:try:self._socket.close()except (socket_.error):passself._socket = None", "docstring": "Shuts down the TLS session and socket and forcibly closes it", "id": "f9481:c3:m13"} {"signature": "def _read_certificates(self):", "body": "cert_context_pointer_pointer = new(crypt32, '')result = secur32.QueryContextAttributesW(self._context_handle_pointer,Secur32Const.SECPKG_ATTR_REMOTE_CERT_CONTEXT,cert_context_pointer_pointer)handle_error(result, TLSError)cert_context_pointer = unwrap(cert_context_pointer_pointer)cert_context_pointer = cast(crypt32, '', cert_context_pointer)cert_context = unwrap(cert_context_pointer)cert_data = bytes_from_buffer(cert_context.pbCertEncoded, native(int, cert_context.cbCertEncoded))self._certificate = x509.Certificate.load(cert_data)self._intermediates = []store_handle = Nonetry:store_handle = cert_context.hCertStorecontext_pointer = crypt32.CertEnumCertificatesInStore(store_handle, null())while not is_null(context_pointer):context = unwrap(context_pointer)data = bytes_from_buffer(context.pbCertEncoded, native(int, context.cbCertEncoded))if data != cert_data:self._intermediates.append(x509.Certificate.load(data))context_pointer = crypt32.CertEnumCertificatesInStore(store_handle, context_pointer)finally:if store_handle:crypt32.CertCloseStore(store_handle, )", "docstring": "Reads end-entity and intermediate certificate information from the\nTLS session", "id": "f9481:c3:m14"} {"signature": "def _raise_closed(self):", "body": "if self._remote_closed:raise TLSGracefulDisconnectError('')else:raise TLSDisconnectError('')", "docstring": "Raises an exception describing if the local or remote end closed the\nconnection", "id": "f9481:c3:m15"} {"signature": "@propertydef certificate(self):", "body": "if self._context_handle_pointer is None:self._raise_closed()if self._certificate is None:self._read_certificates()return self._certificate", "docstring": "An asn1crypto.x509.Certificate object of the end-entity certificate\npresented by the server", "id": "f9481:c3:m16"} {"signature": "@propertydef intermediates(self):", "body": "if self._context_handle_pointer is None:self._raise_closed()if self._certificate is None:self._read_certificates()return self._intermediates", "docstring": "A list of asn1crypto.x509.Certificate objects that were presented as\nintermediates by the server", "id": "f9481:c3:m17"} {"signature": "@propertydef cipher_suite(self):", "body": "return self._cipher_suite", "docstring": "A unicode string of the IANA cipher suite name of the negotiated\ncipher suite", "id": "f9481:c3:m18"} {"signature": "@propertydef protocol(self):", "body": "return self._protocol", "docstring": "A unicode string of: \"TLSv1.2\", \"TLSv1.1\", \"TLSv1\", \"SSLv3\"", "id": "f9481:c3:m19"} {"signature": "@propertydef compression(self):", "body": "return self._compression", "docstring": "A boolean if compression is enabled", "id": "f9481:c3:m20"} {"signature": "@propertydef session_id(self):", "body": "return self._session_id", "docstring": "A unicode string of \"new\" or \"reused\" or None for no ticket", "id": "f9481:c3:m21"} {"signature": "@propertydef session_ticket(self):", "body": "return self._session_ticket", "docstring": "A unicode string of \"new\" or \"reused\" or None for no ticket", "id": "f9481:c3:m22"} {"signature": "@propertydef session(self):", "body": "return self._session", "docstring": "The oscrypto.tls.TLSSession object used for this connection", "id": "f9481:c3:m23"} {"signature": "@propertydef hostname(self):", "body": "return self._hostname", "docstring": "A unicode string of the TLS server domain name or IP address", "id": "f9481:c3:m24"} {"signature": "@propertydef port(self):", "body": "return self.socket.getpeername()[]", "docstring": "An integer of the port number the socket is connected to", "id": "f9481:c3:m25"} {"signature": "@propertydef socket(self):", "body": "if self._context_handle_pointer is None:self._raise_closed()return self._socket", "docstring": "The underlying socket.socket connection", "id": "f9481:c3:m26"} {"signature": "def handle_error(result):", "body": "if result:returncode, error_string = get_error()if code == Advapi32Const.NTE_BAD_SIGNATURE:raise SignatureError('')if not isinstance(error_string, str_cls):error_string = _try_decode(error_string)raise OSError(error_string)", "docstring": "Extracts the last Windows error message into a python unicode string\n\n:param result:\n A function result, 0 or None indicates failure\n\n:return:\n A unicode string error message", "id": "f9483:m2"} {"signature": "def handle_error(result, exception_class=None):", "body": "if result == :returnif result == Secur32Const.SEC_E_OUT_OF_SEQUENCE:raise TLSError('')if result == Secur32Const.SEC_E_MESSAGE_ALTERED:raise TLSError('')if result == Secur32Const.SEC_E_CONTEXT_EXPIRED:raise TLSError('')_, error_string = get_error()if not isinstance(error_string, str_cls):error_string = _try_decode(error_string)if exception_class is None:exception_class = OSErrorraise exception_class(('' % result) + error_string)", "docstring": "Extracts the last Windows error message into a python unicode string\n\n:param result:\n A function result, 0 or None indicates failure\n\n:param exception_class:\n The exception class to use for the exception if an error occurred\n\n:return:\n A unicode string error message", "id": "f9484:m0"} {"signature": "def generate_pair(algorithm, bit_size=None, curve=None):", "body": "if algorithm not in set(['', '', '']):raise ValueError(pretty_message('''''',repr(algorithm)))if algorithm == '':if bit_size not in set([, , , ]):raise ValueError(pretty_message('''''',repr(bit_size)))elif algorithm == '':if _win_version_info < (, ) or _backend == '':if bit_size != :raise ValueError(pretty_message('''''',repr(bit_size)))else:if bit_size not in set([, , ]):raise ValueError(pretty_message('''''',repr(bit_size)))elif algorithm == '':if curve not in set(['', '', '']):raise ValueError(pretty_message('''''',repr(curve)))if _backend == '':if algorithm == '':pub_info, priv_info = _pure_python_ec_generate_pair(curve)return (PublicKey(None, pub_info), PrivateKey(None, priv_info))return _advapi32_generate_pair(algorithm, bit_size)else:return _bcrypt_generate_pair(algorithm, bit_size, curve)", "docstring": "Generates a public/private key pair\n\n:param algorithm:\n The key algorithm - \"rsa\", \"dsa\" or \"ec\"\n\n:param bit_size:\n An integer - used for \"rsa\" and \"dsa\". For \"rsa\" the value maye be 1024,\n 2048, 3072 or 4096. For \"dsa\" the value may be 1024, plus 2048 or 3072\n if on Windows 8 or newer.\n\n:param curve:\n A unicode string - used for \"ec\" keys. Valid values include \"secp256r1\",\n \"secp384r1\" and \"secp521r1\".\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A 2-element tuple of (PublicKey, PrivateKey). The contents of each key\n may be saved by calling .asn1.dump().", "id": "f9489:m0"} {"signature": "def _advapi32_generate_pair(algorithm, bit_size=None):", "body": "if algorithm == '':provider = Advapi32Const.MS_ENH_RSA_AES_PROValgorithm_id = Advapi32Const.CALG_RSA_SIGNstruct_type = ''else:provider = Advapi32Const.MS_ENH_DSS_DH_PROValgorithm_id = Advapi32Const.CALG_DSS_SIGNstruct_type = ''context_handle = Nonekey_handle = Nonetry:context_handle = open_context_handle(provider, verify_only=False)key_handle_pointer = new(advapi32, '')flags = (bit_size << ) | Advapi32Const.CRYPT_EXPORTABLEres = advapi32.CryptGenKey(context_handle, algorithm_id, flags, key_handle_pointer)handle_error(res)key_handle = unwrap(key_handle_pointer)out_len = new(advapi32, '')res = advapi32.CryptExportKey(key_handle,null(),Advapi32Const.PRIVATEKEYBLOB,,null(),out_len)handle_error(res)buffer_length = deref(out_len)buffer_ = buffer_from_bytes(buffer_length)res = advapi32.CryptExportKey(key_handle,null(),Advapi32Const.PRIVATEKEYBLOB,,buffer_,out_len)handle_error(res)blob_struct_pointer = struct_from_buffer(advapi32, struct_type, buffer_)blob_struct = unwrap(blob_struct_pointer)struct_size = sizeof(advapi32, blob_struct)private_blob = bytes_from_buffer(buffer_, buffer_length)[struct_size:]if algorithm == '':public_info, private_info = _advapi32_interpret_rsa_key_blob(bit_size, blob_struct, private_blob)else:public_out_len = new(advapi32, '')res = advapi32.CryptExportKey(key_handle,null(),Advapi32Const.PUBLICKEYBLOB,,null(),public_out_len)handle_error(res)public_buffer_length = deref(public_out_len)public_buffer = buffer_from_bytes(public_buffer_length)res = advapi32.CryptExportKey(key_handle,null(),Advapi32Const.PUBLICKEYBLOB,,public_buffer,public_out_len)handle_error(res)public_blob = bytes_from_buffer(public_buffer, public_buffer_length)[struct_size:]public_info, private_info = _advapi32_interpret_dsa_key_blob(bit_size, public_blob, private_blob)return (load_public_key(public_info), load_private_key(private_info))finally:if context_handle:close_context_handle(context_handle)if key_handle:advapi32.CryptDestroyKey(key_handle)", "docstring": "Generates a public/private key pair using CryptoAPI\n\n:param algorithm:\n The key algorithm - \"rsa\" or \"dsa\"\n\n:param bit_size:\n An integer - used for \"rsa\" and \"dsa\". For \"rsa\" the value maye be 1024,\n 2048, 3072 or 4096. For \"dsa\" the value may be 1024.\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A 2-element tuple of (PublicKey, PrivateKey). The contents of each key\n may be saved by calling .asn1.dump().", "id": "f9489:m1"} {"signature": "def _bcrypt_generate_pair(algorithm, bit_size=None, curve=None):", "body": "if algorithm == '':alg_constant = BcryptConst.BCRYPT_RSA_ALGORITHMstruct_type = ''private_blob_type = BcryptConst.BCRYPT_RSAFULLPRIVATE_BLOBpublic_blob_type = BcryptConst.BCRYPT_RSAPUBLIC_BLOBelif algorithm == '':alg_constant = BcryptConst.BCRYPT_DSA_ALGORITHMif bit_size > :struct_type = ''else:struct_type = ''private_blob_type = BcryptConst.BCRYPT_DSA_PRIVATE_BLOBpublic_blob_type = BcryptConst.BCRYPT_DSA_PUBLIC_BLOBelse:alg_constant = {'': BcryptConst.BCRYPT_ECDSA_P256_ALGORITHM,'': BcryptConst.BCRYPT_ECDSA_P384_ALGORITHM,'': BcryptConst.BCRYPT_ECDSA_P521_ALGORITHM,}[curve]bit_size = {'': ,'': ,'': ,}[curve]struct_type = ''private_blob_type = BcryptConst.BCRYPT_ECCPRIVATE_BLOBpublic_blob_type = BcryptConst.BCRYPT_ECCPUBLIC_BLOBalg_handle = open_alg_handle(alg_constant)key_handle_pointer = new(bcrypt, '')res = bcrypt.BCryptGenerateKeyPair(alg_handle, key_handle_pointer, bit_size, )handle_error(res)key_handle = unwrap(key_handle_pointer)res = bcrypt.BCryptFinalizeKeyPair(key_handle, )handle_error(res)private_out_len = new(bcrypt, '')res = bcrypt.BCryptExportKey(key_handle, null(), private_blob_type, null(), , private_out_len, )handle_error(res)private_buffer_length = deref(private_out_len)private_buffer = buffer_from_bytes(private_buffer_length)res = bcrypt.BCryptExportKey(key_handle,null(),private_blob_type,private_buffer,private_buffer_length,private_out_len,)handle_error(res)private_blob_struct_pointer = struct_from_buffer(bcrypt, struct_type, private_buffer)private_blob_struct = unwrap(private_blob_struct_pointer)struct_size = sizeof(bcrypt, private_blob_struct)private_blob = bytes_from_buffer(private_buffer, private_buffer_length)[struct_size:]if algorithm == '':private_key = _bcrypt_interpret_rsa_key_blob('', private_blob_struct, private_blob)elif algorithm == '':if bit_size > :private_key = _bcrypt_interpret_dsa_key_blob('', , private_blob_struct, private_blob)else:private_key = _bcrypt_interpret_dsa_key_blob('', , private_blob_struct, private_blob)else:private_key = _bcrypt_interpret_ec_key_blob('', private_blob_struct, private_blob)public_out_len = new(bcrypt, '')res = bcrypt.BCryptExportKey(key_handle, null(), public_blob_type, null(), , public_out_len, )handle_error(res)public_buffer_length = deref(public_out_len)public_buffer = buffer_from_bytes(public_buffer_length)res = bcrypt.BCryptExportKey(key_handle,null(),public_blob_type,public_buffer,public_buffer_length,public_out_len,)handle_error(res)public_blob_struct_pointer = struct_from_buffer(bcrypt, struct_type, public_buffer)public_blob_struct = unwrap(public_blob_struct_pointer)struct_size = sizeof(bcrypt, public_blob_struct)public_blob = bytes_from_buffer(public_buffer, public_buffer_length)[struct_size:]if algorithm == '':public_key = _bcrypt_interpret_rsa_key_blob('', public_blob_struct, public_blob)elif algorithm == '':if bit_size > :public_key = _bcrypt_interpret_dsa_key_blob('', , public_blob_struct, public_blob)else:public_key = _bcrypt_interpret_dsa_key_blob('', , public_blob_struct, public_blob)else:public_key = _bcrypt_interpret_ec_key_blob('', public_blob_struct, public_blob)return (load_public_key(public_key), load_private_key(private_key))", "docstring": "Generates a public/private key pair using CNG\n\n:param algorithm:\n The key algorithm - \"rsa\", \"dsa\" or \"ec\"\n\n:param bit_size:\n An integer - used for \"rsa\" and \"dsa\". For \"rsa\" the value maye be 1024,\n 2048, 3072 or 4096. For \"dsa\" the value may be 1024, plus 2048 or 3072\n if on Windows 8 or newer.\n\n:param curve:\n A unicode string - used for \"ec\" keys. Valid values include \"secp256r1\",\n \"secp384r1\" and \"secp521r1\".\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A 2-element tuple of (PublicKey, PrivateKey). The contents of each key\n may be saved by calling .asn1.dump().", "id": "f9489:m2"} {"signature": "def generate_dh_parameters(bit_size):", "body": "if not isinstance(bit_size, int_types):raise TypeError(pretty_message('''''',type_name(bit_size)))if bit_size < :raise ValueError('')if bit_size > :raise ValueError('')if bit_size % != :raise ValueError('')alg_handle = Noneg = try:byte_size = bit_size // if _backend == '':alg_handle = open_alg_handle(BcryptConst.BCRYPT_RNG_ALGORITHM)buffer = buffer_from_bytes(byte_size)while True:if _backend == '':rb = os.urandom(byte_size)else:res = bcrypt.BCryptGenRandom(alg_handle, buffer, byte_size, )handle_error(res)rb = bytes_from_buffer(buffer)p = int_from_bytes(rb)if p % == :continueif g == :if p % != :continueelif g == :rem = p % if rem != and rem != :continuedivisible = Falsefor prime in _SMALL_PRIMES:if p % prime == :divisible = Truebreakif not divisible and _is_prime(bit_size, p):q = p // if _is_prime(bit_size, q):return algos.DHParameters({'': p, '': g})finally:if alg_handle:close_alg_handle(alg_handle)", "docstring": "Generates DH parameters for use with Diffie-Hellman key exchange. Returns\na structure in the format of DHParameter defined in PKCS#3, which is also\nused by the OpenSSL dhparam tool.\n\nTHIS CAN BE VERY TIME CONSUMING!\n\n:param bit_size:\n The integer bit size of the parameters to generate. Must be between 512\n and 4096, and divisible by 64. Recommended secure value as of early 2016\n is 2048, with an absolute minimum of 1024.\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n An asn1crypto.algos.DHParameters object. Use\n oscrypto.asymmetric.dump_dh_parameters() to save to disk for usage with\n web servers.", "id": "f9489:m3"} {"signature": "def _is_prime(bit_size, n):", "body": "r = s = n - while s % == :r += s //= if bit_size >= :k = elif bit_size >= :k = elif bit_size >= :k = elif bit_size >= :k = elif bit_size >= :k = for _ in range(k):a = random.randrange(, n - )x = pow(a, s, n)if x == or x == n - :continuefor _ in range(r - ):x = pow(x, , n)if x == n - :breakelse:return Falsereturn True", "docstring": "An implementation of Miller\u2013Rabin for checking if a number is prime.\n\n:param bit_size:\n An integer of the number of bits in the prime number\n\n:param n:\n An integer, the prime number\n\n:return:\n A boolean", "id": "f9489:m4"} {"signature": "def _advapi32_interpret_rsa_key_blob(bit_size, blob_struct, blob):", "body": "len1 = bit_size // len2 = bit_size // prime1_offset = len1prime2_offset = prime1_offset + len2exponent1_offset = prime2_offset + len2exponent2_offset = exponent1_offset + len2coefficient_offset = exponent2_offset + len2private_exponent_offset = coefficient_offset + len2public_exponent = blob_struct.rsapubkey.pubexpmodulus = int_from_bytes(blob[:prime1_offset][::-])prime1 = int_from_bytes(blob[prime1_offset:prime2_offset][::-])prime2 = int_from_bytes(blob[prime2_offset:exponent1_offset][::-])exponent1 = int_from_bytes(blob[exponent1_offset:exponent2_offset][::-])exponent2 = int_from_bytes(blob[exponent2_offset:coefficient_offset][::-])coefficient = int_from_bytes(blob[coefficient_offset:private_exponent_offset][::-])private_exponent = int_from_bytes(blob[private_exponent_offset:private_exponent_offset + len1][::-])public_key_info = keys.PublicKeyInfo({'': keys.PublicKeyAlgorithm({'': '',}),'': keys.RSAPublicKey({'': modulus,'': public_exponent,}),})rsa_private_key = keys.RSAPrivateKey({'': '','': modulus,'': public_exponent,'': private_exponent,'': prime1,'': prime2,'': exponent1,'': exponent2,'': coefficient,})private_key_info = keys.PrivateKeyInfo({'': ,'': keys.PrivateKeyAlgorithm({'': '',}),'': rsa_private_key,})return (public_key_info, private_key_info)", "docstring": "Takes a CryptoAPI RSA private key blob and converts it into the ASN.1\nstructures for the public and private keys\n\n:param bit_size:\n The integer bit size of the key\n\n:param blob_struct:\n An instance of the advapi32.RSAPUBKEY struct\n\n:param blob:\n A byte string of the binary data after the header\n\n:return:\n A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,\n asn1crypto.keys.PrivateKeyInfo)", "id": "f9489:m5"} {"signature": "def _advapi32_interpret_dsa_key_blob(bit_size, public_blob, private_blob):", "body": "len1 = len2 = bit_size // q_offset = len2g_offset = q_offset + len1x_offset = g_offset + len2y_offset = x_offsetp = int_from_bytes(private_blob[:q_offset][::-])q = int_from_bytes(private_blob[q_offset:g_offset][::-])g = int_from_bytes(private_blob[g_offset:x_offset][::-])x = int_from_bytes(private_blob[x_offset:x_offset + len1][::-])y = int_from_bytes(public_blob[y_offset:y_offset + len2][::-])public_key_info = keys.PublicKeyInfo({'': keys.PublicKeyAlgorithm({'': '','': keys.DSAParams({'': p,'': q,'': g,})}),'': core.Integer(y),})private_key_info = keys.PrivateKeyInfo({'': ,'': keys.PrivateKeyAlgorithm({'': '','': keys.DSAParams({'': p,'': q,'': g,})}),'': core.Integer(x),})return (public_key_info, private_key_info)", "docstring": "Takes a CryptoAPI DSS private key blob and converts it into the ASN.1\nstructures for the public and private keys\n\n:param bit_size:\n The integer bit size of the key\n\n:param public_blob:\n A byte string of the binary data after the public key header\n\n:param private_blob:\n A byte string of the binary data after the private key header\n\n:return:\n A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,\n asn1crypto.keys.PrivateKeyInfo)", "id": "f9489:m6"} {"signature": "def _bcrypt_interpret_rsa_key_blob(key_type, blob_struct, blob):", "body": "public_exponent_byte_length = native(int, blob_struct.cbPublicExp)modulus_byte_length = native(int, blob_struct.cbModulus)modulus_offset = public_exponent_byte_lengthpublic_exponent = int_from_bytes(blob[:modulus_offset])modulus = int_from_bytes(blob[modulus_offset:modulus_offset + modulus_byte_length])if key_type == '':return keys.PublicKeyInfo({'': keys.PublicKeyAlgorithm({'': '',}),'': keys.RSAPublicKey({'': modulus,'': public_exponent,}),})elif key_type == '':prime1_byte_length = native(int, blob_struct.cbPrime1)prime2_byte_length = native(int, blob_struct.cbPrime2)prime1_offset = modulus_offset + modulus_byte_lengthprime2_offset = prime1_offset + prime1_byte_lengthexponent1_offset = prime2_offset + prime2_byte_lengthexponent2_offset = exponent1_offset + prime2_byte_lengthcoefficient_offset = exponent2_offset + prime2_byte_lengthprivate_exponent_offset = coefficient_offset + prime1_byte_lengthprime1 = int_from_bytes(blob[prime1_offset:prime2_offset])prime2 = int_from_bytes(blob[prime2_offset:exponent1_offset])exponent1 = int_from_bytes(blob[exponent1_offset:exponent2_offset])exponent2 = int_from_bytes(blob[exponent2_offset:coefficient_offset])coefficient = int_from_bytes(blob[coefficient_offset:private_exponent_offset])private_exponent = int_from_bytes(blob[private_exponent_offset:private_exponent_offset + modulus_byte_length])rsa_private_key = keys.RSAPrivateKey({'': '','': modulus,'': public_exponent,'': private_exponent,'': prime1,'': prime2,'': exponent1,'': exponent2,'': coefficient,})return keys.PrivateKeyInfo({'': ,'': keys.PrivateKeyAlgorithm({'': '',}),'': rsa_private_key,})else:raise ValueError(pretty_message('''''',repr(key_type)))", "docstring": "Take a CNG BCRYPT_RSAFULLPRIVATE_BLOB and converts it into an ASN.1\nstructure\n\n:param key_type:\n A unicode string of \"private\" or \"public\"\n\n:param blob_struct:\n An instance of BCRYPT_RSAKEY_BLOB\n\n:param blob:\n A byte string of the binary data contained after the struct\n\n:return:\n An asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo\n object, based on the key_type param", "id": "f9489:m7"} {"signature": "def _bcrypt_interpret_dsa_key_blob(key_type, version, blob_struct, blob):", "body": "key_byte_length = native(int, blob_struct.cbKey)if version == :q = int_from_bytes(native(byte_cls, blob_struct.q))g_offset = key_byte_lengthpublic_offset = g_offset + key_byte_lengthprivate_offset = public_offset + key_byte_lengthp = int_from_bytes(blob[:g_offset])g = int_from_bytes(blob[g_offset:public_offset])elif version == :seed_byte_length = native(int, blob_struct.cbSeedLength)group_byte_length = native(int, blob_struct.cbGroupSize)q_offset = seed_byte_lengthp_offset = q_offset + group_byte_lengthg_offset = p_offset + key_byte_lengthpublic_offset = g_offset + key_byte_lengthprivate_offset = public_offset + key_byte_lengthq = int_from_bytes(blob[q_offset:p_offset])p = int_from_bytes(blob[p_offset:g_offset])g = int_from_bytes(blob[g_offset:public_offset])else:raise ValueError('' % repr(version))if key_type == '':public = int_from_bytes(blob[public_offset:private_offset])return keys.PublicKeyInfo({'': keys.PublicKeyAlgorithm({'': '','': keys.DSAParams({'': p,'': q,'': g,})}),'': core.Integer(public),})elif key_type == '':private = int_from_bytes(blob[private_offset:private_offset + key_byte_length])return keys.PrivateKeyInfo({'': ,'': keys.PrivateKeyAlgorithm({'': '','': keys.DSAParams({'': p,'': q,'': g,})}),'': core.Integer(private),})else:raise ValueError(pretty_message('''''',repr(key_type)))", "docstring": "Take a CNG BCRYPT_DSA_KEY_BLOB or BCRYPT_DSA_KEY_BLOB_V2 and converts it\ninto an ASN.1 structure\n\n:param key_type:\n A unicode string of \"private\" or \"public\"\n\n:param version:\n An integer - 1 or 2, indicating the blob is BCRYPT_DSA_KEY_BLOB or\n BCRYPT_DSA_KEY_BLOB_V2\n\n:param blob_struct:\n An instance of BCRYPT_DSA_KEY_BLOB or BCRYPT_DSA_KEY_BLOB_V2\n\n:param blob:\n A byte string of the binary data contained after the struct\n\n:return:\n An asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo\n object, based on the key_type param", "id": "f9489:m8"} {"signature": "def _bcrypt_interpret_ec_key_blob(key_type, blob_struct, blob):", "body": "magic = native(int, blob_struct.dwMagic)key_byte_length = native(int, blob_struct.cbKey)curve = {BcryptConst.BCRYPT_ECDSA_PRIVATE_P256_MAGIC: '',BcryptConst.BCRYPT_ECDSA_PRIVATE_P384_MAGIC: '',BcryptConst.BCRYPT_ECDSA_PRIVATE_P521_MAGIC: '',BcryptConst.BCRYPT_ECDSA_PUBLIC_P256_MAGIC: '',BcryptConst.BCRYPT_ECDSA_PUBLIC_P384_MAGIC: '',BcryptConst.BCRYPT_ECDSA_PUBLIC_P521_MAGIC: '',}[magic]public = b'' + blob[:key_byte_length * ]if key_type == '':return keys.PublicKeyInfo({'': keys.PublicKeyAlgorithm({'': '','': keys.ECDomainParameters(name='',value=curve)}),'': public,})elif key_type == '':private = int_from_bytes(blob[key_byte_length * :key_byte_length * ])return keys.PrivateKeyInfo({'': ,'': keys.PrivateKeyAlgorithm({'': '','': keys.ECDomainParameters(name='',value=curve)}),'': keys.ECPrivateKey({'': '','': private,'': public,}),})else:raise ValueError(pretty_message('''''',repr(key_type)))", "docstring": "Take a CNG BCRYPT_ECCKEY_BLOB and converts it into an ASN.1 structure\n\n:param key_type:\n A unicode string of \"private\" or \"public\"\n\n:param blob_struct:\n An instance of BCRYPT_ECCKEY_BLOB\n\n:param blob:\n A byte string of the binary data contained after the struct\n\n:return:\n An asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo\n object, based on the key_type param", "id": "f9489:m9"} {"signature": "def load_certificate(source):", "body": "if isinstance(source, x509.Certificate):certificate = sourceelif isinstance(source, byte_cls):certificate = parse_certificate(source)elif isinstance(source, str_cls):with open(source, '') as f:certificate = parse_certificate(f.read())else:raise TypeError(pretty_message('''''',type_name(source)))return _load_key(certificate, Certificate)", "docstring": "Loads an x509 certificate into a Certificate object\n\n:param source:\n A byte string of file contents or a unicode string filename\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A Certificate object", "id": "f9489:m10"} {"signature": "def _load_key(key_object, container):", "body": "key_info = key_objectif isinstance(key_object, x509.Certificate):key_info = key_object['']['']algo = key_info.algorithmcurve_name = Noneif algo == '':curve_type, curve_name = key_info.curveif curve_type != '':raise AsymmetricKeyError(pretty_message(''''''))if curve_name not in set(['', '', '']):raise AsymmetricKeyError(pretty_message(''''''))elif algo == '':if key_info.hash_algo is None:raise IncompleteAsymmetricKeyError(pretty_message(''''''))elif key_info.bit_size > and (_win_version_info < (, ) or _backend == ''):raise AsymmetricKeyError(pretty_message('''''',key_info.hash_algo.upper(),key_info.bit_size))elif key_info.bit_size == and key_info.hash_algo == '':raise AsymmetricKeyError(pretty_message(''''''))if _backend == '':if algo == '':return container(None, key_object)return _advapi32_load_key(key_object, key_info, container)return _bcrypt_load_key(key_object, key_info, container, curve_name)", "docstring": "Loads a certificate, public key or private key into a Certificate,\nPublicKey or PrivateKey object\n\n:param key_object:\n An asn1crypto.x509.Certificate, asn1crypto.keys.PublicKeyInfo or\n asn1crypto.keys.PrivateKeyInfo object\n\n:param container:\n The class of the object to hold the key_handle\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A PrivateKey, PublicKey or Certificate object, based on container", "id": "f9489:m11"} {"signature": "def _advapi32_load_key(key_object, key_info, container):", "body": "key_type = '' if isinstance(key_info, keys.PublicKeyInfo) else ''algo = key_info.algorithmif algo == '':provider = Advapi32Const.MS_ENH_RSA_AES_PROVelse:provider = Advapi32Const.MS_ENH_DSS_DH_PROVcontext_handle = Nonekey_handle = Nonetry:context_handle = open_context_handle(provider, verify_only=key_type == '')blob = _advapi32_create_blob(key_info, key_type, algo)buffer_ = buffer_from_bytes(blob)key_handle_pointer = new(advapi32, '')res = advapi32.CryptImportKey(context_handle,buffer_,len(blob),null(),,key_handle_pointer)handle_error(res)key_handle = unwrap(key_handle_pointer)output = container(key_handle, key_object)output.context_handle = context_handleif algo == '':ex_blob = _advapi32_create_blob(key_info, key_type, algo, signing=False)ex_buffer = buffer_from_bytes(ex_blob)ex_key_handle_pointer = new(advapi32, '')res = advapi32.CryptImportKey(context_handle,ex_buffer,len(ex_blob),null(),,ex_key_handle_pointer)handle_error(res)output.ex_key_handle = unwrap(ex_key_handle_pointer)return outputexcept (Exception):if key_handle:advapi32.CryptDestroyKey(key_handle)if context_handle:close_context_handle(context_handle)raise", "docstring": "Loads a certificate, public key or private key into a Certificate,\nPublicKey or PrivateKey object via CryptoAPI\n\n:param key_object:\n An asn1crypto.x509.Certificate, asn1crypto.keys.PublicKeyInfo or\n asn1crypto.keys.PrivateKeyInfo object\n\n:param key_info:\n An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo\n object\n\n:param container:\n The class of the object to hold the key_handle\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A PrivateKey, PublicKey or Certificate object, based on container", "id": "f9489:m12"} {"signature": "def _advapi32_create_blob(key_info, key_type, algo, signing=True):", "body": "if key_type == '':blob_type = Advapi32Const.PUBLICKEYBLOBelse:blob_type = Advapi32Const.PRIVATEKEYBLOBif algo == '':struct_type = ''if signing:algorithm_id = Advapi32Const.CALG_RSA_SIGNelse:algorithm_id = Advapi32Const.CALG_RSA_KEYXelse:struct_type = ''algorithm_id = Advapi32Const.CALG_DSS_SIGNblob_header_pointer = struct(advapi32, '')blob_header = unwrap(blob_header_pointer)blob_header.bType = blob_typeblob_header.bVersion = Advapi32Const.CUR_BLOB_VERSIONblob_header.reserved = blob_header.aiKeyAlg = algorithm_idblob_struct_pointer = struct(advapi32, struct_type)blob_struct = unwrap(blob_struct_pointer)blob_struct.publickeystruc = blob_headerbit_size = key_info.bit_sizelen1 = bit_size // len2 = bit_size // if algo == '':pubkey_pointer = struct(advapi32, '')pubkey = unwrap(pubkey_pointer)pubkey.bitlen = bit_sizeif key_type == '':parsed_key_info = key_info[''].parsedpubkey.magic = Advapi32Const.RSA1pubkey.pubexp = parsed_key_info[''].nativeblob_data = int_to_bytes(parsed_key_info[''].native, signed=False, width=len1)[::-]else:parsed_key_info = key_info[''].parsedpubkey.magic = Advapi32Const.RSA2pubkey.pubexp = parsed_key_info[''].nativeblob_data = int_to_bytes(parsed_key_info[''].native, signed=False, width=len1)[::-]blob_data += int_to_bytes(parsed_key_info[''].native, signed=False, width=len2)[::-]blob_data += int_to_bytes(parsed_key_info[''].native, signed=False, width=len2)[::-]blob_data += int_to_bytes(parsed_key_info[''].native, signed=False, width=len2)[::-]blob_data += int_to_bytes(parsed_key_info[''].native, signed=False, width=len2)[::-]blob_data += int_to_bytes(parsed_key_info[''].native, signed=False, width=len2)[::-]blob_data += int_to_bytes(parsed_key_info[''].native, signed=False, width=len1)[::-]blob_struct.rsapubkey = pubkeyelse:pubkey_pointer = struct(advapi32, '')pubkey = unwrap(pubkey_pointer)pubkey.bitlen = bit_sizeif key_type == '':pubkey.magic = Advapi32Const.DSS1params = key_info[''][''].nativekey_data = int_to_bytes(key_info[''].parsed.native, signed=False, width=len1)[::-]else:pubkey.magic = Advapi32Const.DSS2params = key_info[''][''].nativekey_data = int_to_bytes(key_info[''].parsed.native, signed=False, width=)[::-]blob_struct.dsspubkey = pubkeyblob_data = int_to_bytes(params[''], signed=False, width=len1)[::-]blob_data += int_to_bytes(params[''], signed=False, width=)[::-]blob_data += int_to_bytes(params[''], signed=False, width=len1)[::-]blob_data += key_datadssseed_pointer = struct(advapi32, '')dssseed = unwrap(dssseed_pointer)dssseed.counter = blob_data += struct_bytes(dssseed_pointer)return struct_bytes(blob_struct_pointer) + blob_data", "docstring": "Generates a blob for importing a key to CryptoAPI\n\n:param key_info:\n An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo\n object\n\n:param key_type:\n A unicode string of \"public\" or \"private\"\n\n:param algo:\n A unicode string of \"rsa\" or \"dsa\"\n\n:param signing:\n If the key handle is for signing - may only be False for rsa keys\n\n:return:\n A byte string of a blob to pass to advapi32.CryptImportKey()", "id": "f9489:m13"} {"signature": "def _bcrypt_load_key(key_object, key_info, container, curve_name):", "body": "alg_handle = Nonekey_handle = Nonekey_type = '' if isinstance(key_info, keys.PublicKeyInfo) else ''algo = key_info.algorithmtry:alg_selector = key_info.curve[] if algo == '' else algoalg_constant = {'': BcryptConst.BCRYPT_RSA_ALGORITHM,'': BcryptConst.BCRYPT_DSA_ALGORITHM,'': BcryptConst.BCRYPT_ECDSA_P256_ALGORITHM,'': BcryptConst.BCRYPT_ECDSA_P384_ALGORITHM,'': BcryptConst.BCRYPT_ECDSA_P521_ALGORITHM,}[alg_selector]alg_handle = open_alg_handle(alg_constant)if algo == '':if key_type == '':blob_type = BcryptConst.BCRYPT_RSAPUBLIC_BLOBmagic = BcryptConst.BCRYPT_RSAPUBLIC_MAGICparsed_key = key_info[''].parsedprime1_size = prime2_size = else:blob_type = BcryptConst.BCRYPT_RSAFULLPRIVATE_BLOBmagic = BcryptConst.BCRYPT_RSAFULLPRIVATE_MAGICparsed_key = key_info[''].parsedprime1 = int_to_bytes(parsed_key[''].native)prime2 = int_to_bytes(parsed_key[''].native)exponent1 = int_to_bytes(parsed_key[''].native)exponent2 = int_to_bytes(parsed_key[''].native)coefficient = int_to_bytes(parsed_key[''].native)private_exponent = int_to_bytes(parsed_key[''].native)prime1_size = len(prime1)prime2_size = len(prime2)public_exponent = int_to_bytes(parsed_key[''].native)modulus = int_to_bytes(parsed_key[''].native)blob_struct_pointer = struct(bcrypt, '')blob_struct = unwrap(blob_struct_pointer)blob_struct.Magic = magicblob_struct.BitLength = key_info.bit_sizeblob_struct.cbPublicExp = len(public_exponent)blob_struct.cbModulus = len(modulus)blob_struct.cbPrime1 = prime1_sizeblob_struct.cbPrime2 = prime2_sizeblob = struct_bytes(blob_struct_pointer) + public_exponent + modulusif key_type == '':blob += prime1 + prime2blob += fill_width(exponent1, prime1_size)blob += fill_width(exponent2, prime2_size)blob += fill_width(coefficient, prime1_size)blob += fill_width(private_exponent, len(modulus))elif algo == '':if key_type == '':blob_type = BcryptConst.BCRYPT_DSA_PUBLIC_BLOBpublic_key = key_info[''].parsed.nativeparams = key_info['']['']else:blob_type = BcryptConst.BCRYPT_DSA_PRIVATE_BLOBpublic_key = key_info.public_key.nativeprivate_bytes = int_to_bytes(key_info[''].parsed.native)params = key_info['']['']public_bytes = int_to_bytes(public_key)p = int_to_bytes(params[''].native)g = int_to_bytes(params[''].native)q = int_to_bytes(params[''].native)if key_info.bit_size > :q_len = len(q)else:q_len = key_width = max(len(public_bytes), len(g), len(p))public_bytes = fill_width(public_bytes, key_width)p = fill_width(p, key_width)g = fill_width(g, key_width)q = fill_width(q, q_len)count = b'' * seed = b'' * q_lenif key_info.bit_size > :if key_type == '':magic = BcryptConst.BCRYPT_DSA_PUBLIC_MAGIC_V2else:magic = BcryptConst.BCRYPT_DSA_PRIVATE_MAGIC_V2blob_struct_pointer = struct(bcrypt, '')blob_struct = unwrap(blob_struct_pointer)blob_struct.dwMagic = magicblob_struct.cbKey = key_widthblob_struct.hashAlgorithm = BcryptConst.DSA_HASH_ALGORITHM_SHA256blob_struct.standardVersion = BcryptConst.DSA_FIPS186_3blob_struct.cbSeedLength = q_lenblob_struct.cbGroupSize = q_lenblob_struct.Count = byte_array(count)blob = struct_bytes(blob_struct_pointer)blob += seed + q + p + g + public_bytesif key_type == '':blob += fill_width(private_bytes, q_len)else:if key_type == '':magic = BcryptConst.BCRYPT_DSA_PUBLIC_MAGICelse:magic = BcryptConst.BCRYPT_DSA_PRIVATE_MAGICblob_struct_pointer = struct(bcrypt, '')blob_struct = unwrap(blob_struct_pointer)blob_struct.dwMagic = magicblob_struct.cbKey = key_widthblob_struct.Count = byte_array(count)blob_struct.Seed = byte_array(seed)blob_struct.q = byte_array(q)blob = struct_bytes(blob_struct_pointer) + p + g + public_bytesif key_type == '':blob += fill_width(private_bytes, q_len)elif algo == '':if key_type == '':blob_type = BcryptConst.BCRYPT_ECCPUBLIC_BLOBpublic_key = key_info['']else:blob_type = BcryptConst.BCRYPT_ECCPRIVATE_BLOBpublic_key = key_info.public_keyprivate_bytes = int_to_bytes(key_info[''].parsed[''].native)blob_struct_pointer = struct(bcrypt, '')blob_struct = unwrap(blob_struct_pointer)magic = {('', ''): BcryptConst.BCRYPT_ECDSA_PUBLIC_P256_MAGIC,('', ''): BcryptConst.BCRYPT_ECDSA_PUBLIC_P384_MAGIC,('', ''): BcryptConst.BCRYPT_ECDSA_PUBLIC_P521_MAGIC,('', ''): BcryptConst.BCRYPT_ECDSA_PRIVATE_P256_MAGIC,('', ''): BcryptConst.BCRYPT_ECDSA_PRIVATE_P384_MAGIC,('', ''): BcryptConst.BCRYPT_ECDSA_PRIVATE_P521_MAGIC,}[(key_type, curve_name)]key_width = {'': ,'': ,'': }[curve_name]x, y = public_key.to_coords()x_bytes = int_to_bytes(x)y_bytes = int_to_bytes(y)x_bytes = fill_width(x_bytes, key_width)y_bytes = fill_width(y_bytes, key_width)blob_struct.dwMagic = magicblob_struct.cbKey = key_widthblob = struct_bytes(blob_struct_pointer) + x_bytes + y_bytesif key_type == '':blob += fill_width(private_bytes, key_width)key_handle_pointer = new(bcrypt, '')res = bcrypt.BCryptImportKeyPair(alg_handle,null(),blob_type,key_handle_pointer,blob,len(blob),BcryptConst.BCRYPT_NO_KEY_VALIDATION)handle_error(res)key_handle = unwrap(key_handle_pointer)return container(key_handle, key_object)finally:if alg_handle:close_alg_handle(alg_handle)", "docstring": "Loads a certificate, public key or private key into a Certificate,\nPublicKey or PrivateKey object via CNG\n\n:param key_object:\n An asn1crypto.x509.Certificate, asn1crypto.keys.PublicKeyInfo or\n asn1crypto.keys.PrivateKeyInfo object\n\n:param key_info:\n An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo\n object\n\n:param container:\n The class of the object to hold the key_handle\n\n:param curve_name:\n None or a unicode string of the curve name for an EC key\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A PrivateKey, PublicKey or Certificate object, based on container", "id": "f9489:m14"} {"signature": "def load_private_key(source, password=None):", "body": "if isinstance(source, keys.PrivateKeyInfo):private_object = sourceelse:if password is not None:if isinstance(password, str_cls):password = password.encode('')if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))if isinstance(source, str_cls):with open(source, '') as f:source = f.read()elif not isinstance(source, byte_cls):raise TypeError(pretty_message('''''',type_name(source)))private_object = parse_private(source, password)return _load_key(private_object, PrivateKey)", "docstring": "Loads a private key into a PrivateKey object\n\n:param source:\n A byte string of file contents, a unicode string filename or an\n asn1crypto.keys.PrivateKeyInfo object\n\n:param password:\n A byte or unicode string to decrypt the private key file. Unicode\n strings will be encoded using UTF-8. Not used is the source is a\n PrivateKeyInfo object.\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when the private key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A PrivateKey object", "id": "f9489:m15"} {"signature": "def load_public_key(source):", "body": "if isinstance(source, keys.PublicKeyInfo):public_key = sourceelif isinstance(source, byte_cls):public_key = parse_public(source)elif isinstance(source, str_cls):with open(source, '') as f:public_key = parse_public(f.read())else:raise TypeError(pretty_message('''''',type_name(public_key)))return _load_key(public_key, PublicKey)", "docstring": "Loads a public key into a PublicKey object\n\n:param source:\n A byte string of file contents, a unicode string filename or an\n asn1crypto.keys.PublicKeyInfo object\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when the public key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A PublicKey object", "id": "f9489:m16"} {"signature": "def load_pkcs12(source, password=None):", "body": "if password is not None:if isinstance(password, str_cls):password = password.encode('')if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))if isinstance(source, str_cls):with open(source, '') as f:source = f.read()elif not isinstance(source, byte_cls):raise TypeError(pretty_message('''''',type_name(source)))key_info, cert_info, extra_certs_info = parse_pkcs12(source, password)key = Nonecert = Noneif key_info:key = _load_key(key_info, PrivateKey)if cert_info:cert = _load_key(cert_info.public_key, Certificate)extra_certs = [_load_key(info.public_key, Certificate) for info in extra_certs_info]return (key, cert, extra_certs)", "docstring": "Loads a .p12 or .pfx file into a PrivateKey object and one or more\nCertificates objects\n\n:param source:\n A byte string of file contents or a unicode string filename\n\n:param password:\n A byte or unicode string to decrypt the PKCS12 file. Unicode strings\n will be encoded using UTF-8.\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when a contained key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A three-element tuple containing (PrivateKey, Certificate, [Certificate, ...])", "id": "f9489:m17"} {"signature": "def rsa_pkcs1v15_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError('')return _verify(certificate_or_public_key, signature, data, hash_algorithm)", "docstring": "Verifies an RSASSA-PKCS-v1.5 signature.\n\nWhen the hash_algorithm is \"raw\", the operation is identical to RSA\npublic key decryption. That is: the data is not hashed and no ASN.1\nstructure with an algorithm identifier of the hash algorithm is placed in\nthe encrypted byte string.\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9489:m18"} {"signature": "def rsa_pss_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError('')return _verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=True)", "docstring": "Verifies an RSASSA-PSS signature. For the PSS padding the mask gen algorithm\nwill be mgf1 using the same hash algorithm as the signature. The salt length\nwith be the length of the hash algorithm, and the trailer field with be the\nstandard 0xBC byte.\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9489:m19"} {"signature": "def dsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError('')return _verify(certificate_or_public_key, signature, data, hash_algorithm)", "docstring": "Verifies a DSA signature\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9489:m20"} {"signature": "def ecdsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError('')return _verify(certificate_or_public_key, signature, data, hash_algorithm)", "docstring": "Verifies an ECDSA signature\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9489:m21"} {"signature": "def _verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):raise TypeError(pretty_message('''''',type_name(certificate_or_public_key)))if not isinstance(signature, byte_cls):raise TypeError(pretty_message('''''',type_name(signature)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))valid_hash_algorithms = set(['', '', '', '', ''])if certificate_or_public_key.algorithm == '' and not rsa_pss_padding:valid_hash_algorithms |= set([''])if hash_algorithm not in valid_hash_algorithms:valid_hash_algorithms_error = ''if certificate_or_public_key.algorithm == '' and not rsa_pss_padding:valid_hash_algorithms_error += ''raise ValueError(pretty_message('''''',valid_hash_algorithms_error,repr(hash_algorithm)))if certificate_or_public_key.algorithm != '' and rsa_pss_padding is not False:raise ValueError(pretty_message('''''',certificate_or_public_key.algorithm.upper()))if hash_algorithm == '':if len(data) > certificate_or_public_key.byte_size - :raise ValueError(pretty_message('''''',certificate_or_public_key.byte_size,len(data)))if _backend == '':if certificate_or_public_key.algorithm == '':return _pure_python_ecdsa_verify(certificate_or_public_key, signature, data, hash_algorithm)return _advapi32_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding)return _bcrypt_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding)", "docstring": "Verifies an RSA, DSA or ECDSA signature\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:param rsa_pss_padding:\n If PSS padding should be used for RSA keys\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9489:m22"} {"signature": "def _advapi32_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):", "body": "algo = certificate_or_public_key.algorithmif algo == '' and rsa_pss_padding:hash_length = {'': ,'': ,'': ,'': ,'': }.get(hash_algorithm, )decrypted_signature = raw_rsa_public_crypt(certificate_or_public_key, signature)key_size = certificate_or_public_key.bit_sizeif not verify_pss_padding(hash_algorithm, hash_length, key_size, data, decrypted_signature):raise SignatureError('')returnif algo == '' and hash_algorithm == '':padded_plaintext = raw_rsa_public_crypt(certificate_or_public_key, signature)try:plaintext = remove_pkcs1v15_signature_padding(certificate_or_public_key.byte_size, padded_plaintext)if not constant_compare(plaintext, data):raise ValueError()except (ValueError):raise SignatureError('')returnhash_handle = Nonetry:alg_id = {'': Advapi32Const.CALG_MD5,'': Advapi32Const.CALG_SHA1,'': Advapi32Const.CALG_SHA_256,'': Advapi32Const.CALG_SHA_384,'': Advapi32Const.CALG_SHA_512,}[hash_algorithm]hash_handle_pointer = new(advapi32, '')res = advapi32.CryptCreateHash(certificate_or_public_key.context_handle,alg_id,null(),,hash_handle_pointer)handle_error(res)hash_handle = unwrap(hash_handle_pointer)res = advapi32.CryptHashData(hash_handle, data, len(data), )handle_error(res)if algo == '':try:signature = algos.DSASignature.load(signature).to_p1363()half_len = len(signature) // signature = signature[half_len:] + signature[:half_len]except (ValueError, OverflowError, TypeError):raise SignatureError('')reversed_signature = signature[::-]res = advapi32.CryptVerifySignatureW(hash_handle,reversed_signature,len(signature),certificate_or_public_key.key_handle,null(),)handle_error(res)finally:if hash_handle:advapi32.CryptDestroyHash(hash_handle)", "docstring": "Verifies an RSA, DSA or ECDSA signature via CryptoAPI\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:param rsa_pss_padding:\n If PSS padding should be used for RSA keys\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9489:m23"} {"signature": "def _bcrypt_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):", "body": "if hash_algorithm == '':digest = dataelse:hash_constant = {'': BcryptConst.BCRYPT_MD5_ALGORITHM,'': BcryptConst.BCRYPT_SHA1_ALGORITHM,'': BcryptConst.BCRYPT_SHA256_ALGORITHM,'': BcryptConst.BCRYPT_SHA384_ALGORITHM,'': BcryptConst.BCRYPT_SHA512_ALGORITHM}[hash_algorithm]digest = getattr(hashlib, hash_algorithm)(data).digest()padding_info = null()flags = if certificate_or_public_key.algorithm == '':if rsa_pss_padding:flags = BcryptConst.BCRYPT_PAD_PSSpadding_info_struct_pointer = struct(bcrypt, '')padding_info_struct = unwrap(padding_info_struct_pointer)hash_buffer = buffer_from_unicode(hash_constant)padding_info_struct.pszAlgId = cast(bcrypt, '', hash_buffer)padding_info_struct.cbSalt = len(digest)else:flags = BcryptConst.BCRYPT_PAD_PKCS1padding_info_struct_pointer = struct(bcrypt, '')padding_info_struct = unwrap(padding_info_struct_pointer)if hash_algorithm == '':padding_info_struct.pszAlgId = null()else:hash_buffer = buffer_from_unicode(hash_constant)padding_info_struct.pszAlgId = cast(bcrypt, '', hash_buffer)padding_info = cast(bcrypt, '', padding_info_struct_pointer)else:try:signature = algos.DSASignature.load(signature).to_p1363()except (ValueError, OverflowError, TypeError):raise SignatureError('')res = bcrypt.BCryptVerifySignature(certificate_or_public_key.key_handle,padding_info,digest,len(digest),signature,len(signature),flags)failure = res == BcryptConst.STATUS_INVALID_SIGNATUREfailure = failure or res == BcryptConst.STATUS_INVALID_PARAMETERif failure:raise SignatureError('')handle_error(res)", "docstring": "Verifies an RSA, DSA or ECDSA signature via CNG\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:param rsa_pss_padding:\n If PSS padding should be used for RSA keys\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9489:m24"} {"signature": "def rsa_pkcs1v15_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError('')return _sign(private_key, data, hash_algorithm)", "docstring": "Generates an RSASSA-PKCS-v1.5 signature.\n\nWhen the hash_algorithm is \"raw\", the operation is identical to RSA\nprivate key encryption. That is: the data is not hashed and no ASN.1\nstructure with an algorithm identifier of the hash algorithm is placed in\nthe encrypted byte string.\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9489:m25"} {"signature": "def rsa_pss_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError('')return _sign(private_key, data, hash_algorithm, rsa_pss_padding=True)", "docstring": "Generates an RSASSA-PSS signature. For the PSS padding the mask gen\nalgorithm will be mgf1 using the same hash algorithm as the signature. The\nsalt length with be the length of the hash algorithm, and the trailer field\nwith be the standard 0xBC byte.\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9489:m26"} {"signature": "def dsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError('')return _sign(private_key, data, hash_algorithm)", "docstring": "Generates a DSA signature\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9489:m27"} {"signature": "def ecdsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError('')return _sign(private_key, data, hash_algorithm)", "docstring": "Generates an ECDSA signature\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9489:m28"} {"signature": "def _sign(private_key, data, hash_algorithm, rsa_pss_padding=False):", "body": "if not isinstance(private_key, PrivateKey):raise TypeError(pretty_message('''''',type_name(private_key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))valid_hash_algorithms = set(['', '', '', '', ''])if private_key.algorithm == '' and not rsa_pss_padding:valid_hash_algorithms |= set([''])if hash_algorithm not in valid_hash_algorithms:valid_hash_algorithms_error = ''if private_key.algorithm == '' and not rsa_pss_padding:valid_hash_algorithms_error += ''raise ValueError(pretty_message('''''',valid_hash_algorithms_error,repr(hash_algorithm)))if private_key.algorithm != '' and rsa_pss_padding is not False:raise ValueError(pretty_message('''''',private_key.algorithm.upper()))if hash_algorithm == '':if len(data) > private_key.byte_size - :raise ValueError(pretty_message('''''',private_key.byte_size,len(data)))if _backend == '':if private_key.algorithm == '':return _pure_python_ecdsa_sign(private_key, data, hash_algorithm)return _advapi32_sign(private_key, data, hash_algorithm, rsa_pss_padding)return _bcrypt_sign(private_key, data, hash_algorithm, rsa_pss_padding)", "docstring": "Generates an RSA, DSA or ECDSA signature\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:param rsa_pss_padding:\n If PSS padding should be used for RSA keys\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9489:m29"} {"signature": "def _advapi32_sign(private_key, data, hash_algorithm, rsa_pss_padding=False):", "body": "algo = private_key.algorithmif algo == '' and hash_algorithm == '':padded_data = add_pkcs1v15_signature_padding(private_key.byte_size, data)return raw_rsa_private_crypt(private_key, padded_data)if algo == '' and rsa_pss_padding:hash_length = {'': ,'': ,'': ,'': ,'': }.get(hash_algorithm, )padded_data = add_pss_padding(hash_algorithm, hash_length, private_key.bit_size, data)return raw_rsa_private_crypt(private_key, padded_data)if private_key.algorithm == '' and hash_algorithm == '':raise ValueError(pretty_message(''''''))hash_handle = Nonetry:alg_id = {'': Advapi32Const.CALG_MD5,'': Advapi32Const.CALG_SHA1,'': Advapi32Const.CALG_SHA_256,'': Advapi32Const.CALG_SHA_384,'': Advapi32Const.CALG_SHA_512,}[hash_algorithm]hash_handle_pointer = new(advapi32, '')res = advapi32.CryptCreateHash(private_key.context_handle,alg_id,null(),,hash_handle_pointer)handle_error(res)hash_handle = unwrap(hash_handle_pointer)res = advapi32.CryptHashData(hash_handle, data, len(data), )handle_error(res)out_len = new(advapi32, '')res = advapi32.CryptSignHashW(hash_handle,Advapi32Const.AT_SIGNATURE,null(),,null(),out_len)handle_error(res)buffer_length = deref(out_len)buffer_ = buffer_from_bytes(buffer_length)res = advapi32.CryptSignHashW(hash_handle,Advapi32Const.AT_SIGNATURE,null(),,buffer_,out_len)handle_error(res)output = bytes_from_buffer(buffer_, deref(out_len))output = output[::-]if algo == '':half_len = len(output) // output = output[half_len:] + output[:half_len]output = algos.DSASignature.from_p1363(output).dump()return outputfinally:if hash_handle:advapi32.CryptDestroyHash(hash_handle)", "docstring": "Generates an RSA, DSA or ECDSA signature via CryptoAPI\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:param rsa_pss_padding:\n If PSS padding should be used for RSA keys\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9489:m30"} {"signature": "def _bcrypt_sign(private_key, data, hash_algorithm, rsa_pss_padding=False):", "body": "if hash_algorithm == '':digest = dataelse:hash_constant = {'': BcryptConst.BCRYPT_MD5_ALGORITHM,'': BcryptConst.BCRYPT_SHA1_ALGORITHM,'': BcryptConst.BCRYPT_SHA256_ALGORITHM,'': BcryptConst.BCRYPT_SHA384_ALGORITHM,'': BcryptConst.BCRYPT_SHA512_ALGORITHM}[hash_algorithm]digest = getattr(hashlib, hash_algorithm)(data).digest()padding_info = null()flags = if private_key.algorithm == '':if rsa_pss_padding:hash_length = {'': ,'': ,'': ,'': ,'': }[hash_algorithm]flags = BcryptConst.BCRYPT_PAD_PSSpadding_info_struct_pointer = struct(bcrypt, '')padding_info_struct = unwrap(padding_info_struct_pointer)hash_buffer = buffer_from_unicode(hash_constant)padding_info_struct.pszAlgId = cast(bcrypt, '', hash_buffer)padding_info_struct.cbSalt = hash_lengthelse:flags = BcryptConst.BCRYPT_PAD_PKCS1padding_info_struct_pointer = struct(bcrypt, '')padding_info_struct = unwrap(padding_info_struct_pointer)if hash_algorithm == '':padding_info_struct.pszAlgId = null()else:hash_buffer = buffer_from_unicode(hash_constant)padding_info_struct.pszAlgId = cast(bcrypt, '', hash_buffer)padding_info = cast(bcrypt, '', padding_info_struct_pointer)if private_key.algorithm == '' and private_key.bit_size > and hash_algorithm in set(['', '']):raise ValueError(pretty_message(''''''))out_len = new(bcrypt, '')res = bcrypt.BCryptSignHash(private_key.key_handle,padding_info,digest,len(digest),null(),,out_len,flags)handle_error(res)buffer_len = deref(out_len)buffer = buffer_from_bytes(buffer_len)if private_key.algorithm == '':padding_info = cast(bcrypt, '', padding_info_struct_pointer)res = bcrypt.BCryptSignHash(private_key.key_handle,padding_info,digest,len(digest),buffer,buffer_len,out_len,flags)handle_error(res)signature = bytes_from_buffer(buffer, deref(out_len))if private_key.algorithm != '':signature = algos.DSASignature.from_p1363(signature).dump()return signature", "docstring": "Generates an RSA, DSA or ECDSA signature via CNG\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:param rsa_pss_padding:\n If PSS padding should be used for RSA keys\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9489:m31"} {"signature": "def _encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):raise TypeError(pretty_message('''''',type_name(certificate_or_public_key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if not isinstance(rsa_oaep_padding, bool):raise TypeError(pretty_message('''''',type_name(rsa_oaep_padding)))if _backend == '':return _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding)return _bcrypt_encrypt(certificate_or_public_key, data, rsa_oaep_padding)", "docstring": "Encrypts a value using an RSA public key\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to encrypt with\n\n:param data:\n A byte string of the data to encrypt\n\n:param rsa_oaep_padding:\n If OAEP padding should be used instead of PKCS#1 v1.5\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the ciphertext", "id": "f9489:m32"} {"signature": "def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):", "body": "flags = if rsa_oaep_padding:flags = Advapi32Const.CRYPT_OAEPout_len = new(advapi32, '', len(data))res = advapi32.CryptEncrypt(certificate_or_public_key.ex_key_handle,null(),True,flags,null(),out_len,)handle_error(res)buffer_len = deref(out_len)buffer = buffer_from_bytes(buffer_len)write_to_buffer(buffer, data)pointer_set(out_len, len(data))res = advapi32.CryptEncrypt(certificate_or_public_key.ex_key_handle,null(),True,flags,buffer,out_len,buffer_len)handle_error(res)return bytes_from_buffer(buffer, deref(out_len))[::-]", "docstring": "Encrypts a value using an RSA public key via CryptoAPI\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to encrypt with\n\n:param data:\n A byte string of the data to encrypt\n\n:param rsa_oaep_padding:\n If OAEP padding should be used instead of PKCS#1 v1.5\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the ciphertext", "id": "f9489:m33"} {"signature": "def _bcrypt_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):", "body": "flags = BcryptConst.BCRYPT_PAD_PKCS1if rsa_oaep_padding is True:flags = BcryptConst.BCRYPT_PAD_OAEPpadding_info_struct_pointer = struct(bcrypt, '')padding_info_struct = unwrap(padding_info_struct_pointer)hash_buffer = buffer_from_unicode(BcryptConst.BCRYPT_SHA1_ALGORITHM)padding_info_struct.pszAlgId = cast(bcrypt, '', hash_buffer)padding_info_struct.pbLabel = null()padding_info_struct.cbLabel = padding_info = cast(bcrypt, '', padding_info_struct_pointer)else:padding_info = null()out_len = new(bcrypt, '')res = bcrypt.BCryptEncrypt(certificate_or_public_key.key_handle,data,len(data),padding_info,null(),,null(),,out_len,flags)handle_error(res)buffer_len = deref(out_len)buffer = buffer_from_bytes(buffer_len)res = bcrypt.BCryptEncrypt(certificate_or_public_key.key_handle,data,len(data),padding_info,null(),,buffer,buffer_len,out_len,flags)handle_error(res)return bytes_from_buffer(buffer, deref(out_len))", "docstring": "Encrypts a value using an RSA public key via CNG\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to encrypt with\n\n:param data:\n A byte string of the data to encrypt\n\n:param rsa_oaep_padding:\n If OAEP padding should be used instead of PKCS#1 v1.5\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the ciphertext", "id": "f9489:m34"} {"signature": "def _decrypt(private_key, ciphertext, rsa_oaep_padding=False):", "body": "if not isinstance(private_key, PrivateKey):raise TypeError(pretty_message('''''',type_name(private_key)))if not isinstance(ciphertext, byte_cls):raise TypeError(pretty_message('''''',type_name(ciphertext)))if not isinstance(rsa_oaep_padding, bool):raise TypeError(pretty_message('''''',type_name(rsa_oaep_padding)))if _backend == '':return _advapi32_decrypt(private_key, ciphertext, rsa_oaep_padding)return _bcrypt_decrypt(private_key, ciphertext, rsa_oaep_padding)", "docstring": "Encrypts a value using an RSA private key\n\n:param private_key:\n A PrivateKey instance to decrypt with\n\n:param ciphertext:\n A byte string of the data to decrypt\n\n:param rsa_oaep_padding:\n If OAEP padding should be used instead of PKCS#1 v1.5\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9489:m35"} {"signature": "def _advapi32_decrypt(private_key, ciphertext, rsa_oaep_padding=False):", "body": "flags = if rsa_oaep_padding:flags = Advapi32Const.CRYPT_OAEPciphertext = ciphertext[::-]buffer = buffer_from_bytes(ciphertext)out_len = new(advapi32, '', len(ciphertext))res = advapi32.CryptDecrypt(private_key.ex_key_handle,null(),True,flags,buffer,out_len)handle_error(res)return bytes_from_buffer(buffer, deref(out_len))", "docstring": "Encrypts a value using an RSA private key via CryptoAPI\n\n:param private_key:\n A PrivateKey instance to decrypt with\n\n:param ciphertext:\n A byte string of the data to decrypt\n\n:param rsa_oaep_padding:\n If OAEP padding should be used instead of PKCS#1 v1.5\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9489:m36"} {"signature": "def _bcrypt_decrypt(private_key, ciphertext, rsa_oaep_padding=False):", "body": "flags = BcryptConst.BCRYPT_PAD_PKCS1if rsa_oaep_padding is True:flags = BcryptConst.BCRYPT_PAD_OAEPpadding_info_struct_pointer = struct(bcrypt, '')padding_info_struct = unwrap(padding_info_struct_pointer)hash_buffer = buffer_from_unicode(BcryptConst.BCRYPT_SHA1_ALGORITHM)padding_info_struct.pszAlgId = cast(bcrypt, '', hash_buffer)padding_info_struct.pbLabel = null()padding_info_struct.cbLabel = padding_info = cast(bcrypt, '', padding_info_struct_pointer)else:padding_info = null()out_len = new(bcrypt, '')res = bcrypt.BCryptDecrypt(private_key.key_handle,ciphertext,len(ciphertext),padding_info,null(),,null(),,out_len,flags)handle_error(res)buffer_len = deref(out_len)buffer = buffer_from_bytes(buffer_len)res = bcrypt.BCryptDecrypt(private_key.key_handle,ciphertext,len(ciphertext),padding_info,null(),,buffer,buffer_len,out_len,flags)handle_error(res)return bytes_from_buffer(buffer, deref(out_len))", "docstring": "Encrypts a value using an RSA private key via CNG\n\n:param private_key:\n A PrivateKey instance to decrypt with\n\n:param ciphertext:\n A byte string of the data to decrypt\n\n:param rsa_oaep_padding:\n If OAEP padding should be used instead of PKCS#1 v1.5\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9489:m37"} {"signature": "def rsa_pkcs1v15_encrypt(certificate_or_public_key, data):", "body": "return _encrypt(certificate_or_public_key, data)", "docstring": "Encrypts a byte string using an RSA public key or certificate. Uses PKCS#1\nv1.5 padding.\n\n:param certificate_or_public_key:\n A PublicKey or Certificate object\n\n:param data:\n A byte string, with a maximum length 11 bytes less than the key length\n (in bytes)\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the encrypted data", "id": "f9489:m38"} {"signature": "def rsa_pkcs1v15_decrypt(private_key, ciphertext):", "body": "return _decrypt(private_key, ciphertext)", "docstring": "Decrypts a byte string using an RSA private key. Uses PKCS#1 v1.5 padding.\n\n:param private_key:\n A PrivateKey object\n\n:param ciphertext:\n A byte string of the encrypted data\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the original plaintext", "id": "f9489:m39"} {"signature": "def rsa_oaep_encrypt(certificate_or_public_key, data):", "body": "return _encrypt(certificate_or_public_key, data, rsa_oaep_padding=True)", "docstring": "Encrypts a byte string using an RSA public key or certificate. Uses PKCS#1\nOAEP padding with SHA1.\n\n:param certificate_or_public_key:\n A PublicKey or Certificate object\n\n:param data:\n A byte string, with a maximum length 41 bytes (or more) less than the\n key length (in bytes)\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the encrypted data", "id": "f9489:m40"} {"signature": "def rsa_oaep_decrypt(private_key, ciphertext):", "body": "return _decrypt(private_key, ciphertext, rsa_oaep_padding=True)", "docstring": "Decrypts a byte string using an RSA private key. Uses PKCS#1 OAEP padding\nwith SHA1.\n\n:param private_key:\n A PrivateKey object\n\n:param ciphertext:\n A byte string of the encrypted data\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the original plaintext", "id": "f9489:m41"} {"signature": "def __init__(self, key_handle, asn1):", "body": "self.key_handle = key_handleself.asn1 = asn1if _backend == '':self._lib = advapi32else:self._lib = bcrypt", "docstring": ":param key_handle:\n A CNG BCRYPT_KEY_HANDLE value (Vista and newer) or an HCRYPTKEY\n (XP and 2003) from loading/importing the key\n\n:param asn1:\n An asn1crypto.keys.PrivateKeyInfo object", "id": "f9489:c0:m0"} {"signature": "@propertydef algorithm(self):", "body": "return self.asn1.algorithm", "docstring": ":return:\n A unicode string of \"rsa\", \"dsa\" or \"ec\"", "id": "f9489:c0:m1"} {"signature": "@propertydef curve(self):", "body": "return self.asn1.curve[]", "docstring": ":return:\n A unicode string of EC curve name", "id": "f9489:c0:m2"} {"signature": "@propertydef bit_size(self):", "body": "return self.asn1.bit_size", "docstring": ":return:\n The number of bits in the key, as an integer", "id": "f9489:c0:m3"} {"signature": "@propertydef byte_size(self):", "body": "return self.asn1.byte_size", "docstring": ":return:\n The number of bytes in the key, as an integer", "id": "f9489:c0:m4"} {"signature": "def __init__(self, key_handle, asn1):", "body": "PrivateKey.__init__(self, key_handle, asn1)", "docstring": ":param key_handle:\n A CNG BCRYPT_KEY_HANDLE value (Vista and newer) or an HCRYPTKEY\n (XP and 2003) from loading/importing the key\n\n:param asn1:\n An asn1crypto.keys.PublicKeyInfo object", "id": "f9489:c1:m0"} {"signature": "def __init__(self, key_handle, asn1):", "body": "PublicKey.__init__(self, key_handle, asn1)", "docstring": ":param key_handle:\n A CNG BCRYPT_KEY_HANDLE value (Vista and newer) or an HCRYPTKEY\n (XP and 2003) from loading/importing the certificate\n\n:param asn1:\n An asn1crypto.x509.Certificate object", "id": "f9489:c2:m0"} {"signature": "@propertydef algorithm(self):", "body": "return self.asn1.public_key.algorithm", "docstring": ":return:\n A unicode string of \"rsa\", \"dsa\" or \"ec\"", "id": "f9489:c2:m1"} {"signature": "@propertydef curve(self):", "body": "return self.asn1.public_key.curve[]", "docstring": ":return:\n A unicode string of EC curve name", "id": "f9489:c2:m2"} {"signature": "@propertydef bit_size(self):", "body": "return self.asn1.public_key.bit_size", "docstring": ":return:\n The number of bits in the key, as an integer", "id": "f9489:c2:m3"} {"signature": "@propertydef byte_size(self):", "body": "return self.asn1.public_key.byte_size", "docstring": ":return:\n The number of bytes in the key, as an integer", "id": "f9489:c2:m4"} {"signature": "@propertydef self_signed(self):", "body": "if self._self_signed is None:self._self_signed = Falseif self.asn1.self_signed in set(['', '']):signature_algo = self.asn1[''].signature_algohash_algo = self.asn1[''].hash_algoif signature_algo == '':verify_func = rsa_pkcs1v15_verifyelif signature_algo == '':verify_func = dsa_verifyelif signature_algo == '':verify_func = ecdsa_verifyelse:raise OSError(pretty_message('''''',signature_algo))try:verify_func(self,self.asn1[''].native,self.asn1[''].dump(),hash_algo)self._self_signed = Trueexcept (SignatureError):passreturn self._self_signed", "docstring": ":return:\n A boolean - if the certificate is self-signed", "id": "f9489:c2:m5"} {"signature": "def handle_error(result):", "body": "if result:return_, error_string = get_error()if not isinstance(error_string, str_cls):error_string = _try_decode(error_string)raise OSError(error_string)", "docstring": "Extracts the last Windows error message into a python unicode string\n\n:param result:\n A function result, 0 or None indicates failure\n\n:return:\n A unicode string error message", "id": "f9492:m0"} {"signature": "def aes_cbc_no_padding_encrypt(key, data, iv):", "body": "if len(key) not in [, , ]:raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))if len(data) % != :raise ValueError(pretty_message('''''',len(data)))return (iv, _encrypt('', key, data, iv, False))", "docstring": "Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and\nno padding. This means the ciphertext must be an exact multiple of 16 bytes\nlong.\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - either a byte string 16-bytes long or None\n to generate an IV\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9494:m0"} {"signature": "def aes_cbc_no_padding_decrypt(key, data, iv):", "body": "if len(key) not in [, , ]:raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt('', key, data, iv, False)", "docstring": "Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no\npadding.\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string 16-bytes long\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9494:m1"} {"signature": "def aes_cbc_pkcs7_encrypt(key, data, iv):", "body": "if len(key) not in [, , ]:raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return (iv, _encrypt('', key, data, iv, True))", "docstring": "Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and\nPKCS#7 padding.\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - either a byte string 16-bytes long or None\n to generate an IV\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9494:m2"} {"signature": "def aes_cbc_pkcs7_decrypt(key, data, iv):", "body": "if len(key) not in [, , ]:raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt('', key, data, iv, True)", "docstring": "Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string 16-bytes long\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9494:m3"} {"signature": "def rc4_encrypt(key, data):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))return _encrypt('', key, data, None, None)", "docstring": "Encrypts plaintext using RC4 with a 40-128 bit key\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the ciphertext", "id": "f9494:m4"} {"signature": "def rc4_decrypt(key, data):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))return _decrypt('', key, data, None, None)", "docstring": "Decrypts RC4 ciphertext using a 40-128 bit key\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9494:m5"} {"signature": "def rc2_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return (iv, _encrypt('', key, data, iv, True))", "docstring": "Encrypts plaintext using RC2 with a 64 bit key\n\n:param key:\n The encryption key - a byte string 8 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The 8-byte initialization vector to use - a byte string - set as None\n to generate an appropriate one\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9494:m6"} {"signature": "def rc2_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt('', key, data, iv, True)", "docstring": "Decrypts RC2 ciphertext using a 64 bit key\n\n:param key:\n The encryption key - a byte string 8 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector used for encryption - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9494:m7"} {"signature": "def tripledes_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) != and len(key) != :raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))cipher = ''if len(key) == :cipher = ''return (iv, _encrypt(cipher, key, data, iv, True))", "docstring": "Encrypts plaintext using 3DES in either 2 or 3 key mode\n\n:param key:\n The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The 8-byte initialization vector to use - a byte string - set as None\n to generate an appropriate one\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9494:m8"} {"signature": "def tripledes_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != and len(key) != :raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))cipher = ''if len(key) == :cipher = ''return _decrypt(cipher, key, data, iv, True)", "docstring": "Decrypts 3DES ciphertext in either 2 or 3 key mode\n\n:param key:\n The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector used for encryption - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9494:m9"} {"signature": "def des_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) != :raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return (iv, _encrypt('', key, data, iv, True))", "docstring": "Encrypts plaintext using DES with a 56 bit key\n\n:param key:\n The encryption key - a byte string 8 bytes long (includes error\n correction bits)\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The 8-byte initialization vector to use - a byte string - set as None\n to generate an appropriate one\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9494:m10"} {"signature": "def des_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != :raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt('', key, data, iv, True)", "docstring": "Decrypts DES ciphertext using a 56 bit key\n\n:param key:\n The encryption key - a byte string 8 bytes long (includes error\n correction bits)\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector used for encryption - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9494:m11"} {"signature": "def _advapi32_create_handles(cipher, key, iv):", "body": "context_handle = Noneif cipher == '':algorithm_id = {: Advapi32Const.CALG_AES_128,: Advapi32Const.CALG_AES_192,: Advapi32Const.CALG_AES_256,}[len(key)]else:algorithm_id = {'': Advapi32Const.CALG_DES,'': Advapi32Const.CALG_3DES_112,'': Advapi32Const.CALG_3DES,'': Advapi32Const.CALG_RC2,'': Advapi32Const.CALG_RC4,}[cipher]provider = Advapi32Const.MS_ENH_RSA_AES_PROVcontext_handle = open_context_handle(provider, verify_only=False)blob_header_pointer = struct(advapi32, '')blob_header = unwrap(blob_header_pointer)blob_header.bType = Advapi32Const.PLAINTEXTKEYBLOBblob_header.bVersion = Advapi32Const.CUR_BLOB_VERSIONblob_header.reserved = blob_header.aiKeyAlg = algorithm_idblob_struct_pointer = struct(advapi32, '')blob_struct = unwrap(blob_struct_pointer)blob_struct.hdr = blob_headerblob_struct.dwKeySize = len(key)blob = struct_bytes(blob_struct_pointer) + keyflags = if cipher in set(['', '']) and len(key) == :flags = Advapi32Const.CRYPT_NO_SALTkey_handle_pointer = new(advapi32, '')res = advapi32.CryptImportKey(context_handle,blob,len(blob),null(),flags,key_handle_pointer)handle_error(res)key_handle = unwrap(key_handle_pointer)if cipher == '':buf = new(advapi32, '', len(key) * )res = advapi32.CryptSetKeyParam(key_handle,Advapi32Const.KP_EFFECTIVE_KEYLEN,buf,)handle_error(res)if cipher != '':res = advapi32.CryptSetKeyParam(key_handle,Advapi32Const.KP_IV,iv,)handle_error(res)buf = new(advapi32, '', Advapi32Const.CRYPT_MODE_CBC)res = advapi32.CryptSetKeyParam(key_handle,Advapi32Const.KP_MODE,buf,)handle_error(res)buf = new(advapi32, '', Advapi32Const.PKCS5_PADDING)res = advapi32.CryptSetKeyParam(key_handle,Advapi32Const.KP_PADDING,buf,)handle_error(res)return (context_handle, key_handle)", "docstring": "Creates an HCRYPTPROV and HCRYPTKEY for symmetric encryption/decryption. The\nHCRYPTPROV must be released by close_context_handle() and the\nHCRYPTKEY must be released by advapi32.CryptDestroyKey() when done.\n\n:param cipher:\n A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n \"rc2\", \"rc4\"\n\n:param key:\n A byte string of the symmetric key\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:return:\n A tuple of (HCRYPTPROV, HCRYPTKEY)", "id": "f9494:m12"} {"signature": "def _bcrypt_create_key_handle(cipher, key):", "body": "alg_handle = Nonealg_constant = {'': BcryptConst.BCRYPT_AES_ALGORITHM,'': BcryptConst.BCRYPT_DES_ALGORITHM,'': BcryptConst.BCRYPT_3DES_112_ALGORITHM,'': BcryptConst.BCRYPT_3DES_ALGORITHM,'': BcryptConst.BCRYPT_RC2_ALGORITHM,'': BcryptConst.BCRYPT_RC4_ALGORITHM,}[cipher]try:alg_handle = open_alg_handle(alg_constant)blob_type = BcryptConst.BCRYPT_KEY_DATA_BLOBblob_struct_pointer = struct(bcrypt, '')blob_struct = unwrap(blob_struct_pointer)blob_struct.dwMagic = BcryptConst.BCRYPT_KEY_DATA_BLOB_MAGICblob_struct.dwVersion = BcryptConst.BCRYPT_KEY_DATA_BLOB_VERSION1blob_struct.cbKeyData = len(key)blob = struct_bytes(blob_struct_pointer) + keyif cipher == '':buf = new(bcrypt, '', len(key) * )res = bcrypt.BCryptSetProperty(alg_handle,BcryptConst.BCRYPT_EFFECTIVE_KEY_LENGTH,buf,,)handle_error(res)key_handle_pointer = new(bcrypt, '')res = bcrypt.BCryptImportKey(alg_handle,null(),blob_type,key_handle_pointer,null(),,blob,len(blob),)handle_error(res)return unwrap(key_handle_pointer)finally:if alg_handle:close_alg_handle(alg_handle)", "docstring": "Creates a BCRYPT_KEY_HANDLE for symmetric encryption/decryption. The\nhandle must be released by bcrypt.BCryptDestroyKey() when done.\n\n:param cipher:\n A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n \"rc2\", \"rc4\"\n\n:param key:\n A byte string of the symmetric key\n\n:return:\n A BCRYPT_KEY_HANDLE", "id": "f9494:m13"} {"signature": "def _encrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):raise TypeError(pretty_message('''''',type_name(key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if cipher != '' and not isinstance(iv, byte_cls):raise TypeError(pretty_message('''''',type_name(iv)))if cipher != '' and not padding:raise ValueError('')if _backend == '':return _advapi32_encrypt(cipher, key, data, iv, padding)return _bcrypt_encrypt(cipher, key, data, iv, padding)", "docstring": "Encrypts plaintext\n\n:param cipher:\n A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n \"rc2\", \"rc4\"\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:param padding:\n Boolean, if padding should be used - unused for RC4\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the ciphertext", "id": "f9494:m14"} {"signature": "def _advapi32_encrypt(cipher, key, data, iv, padding):", "body": "context_handle = Nonekey_handle = Nonetry:context_handle, key_handle = _advapi32_create_handles(cipher, key, iv)out_len = new(advapi32, '', len(data))res = advapi32.CryptEncrypt(key_handle,null(),True,,null(),out_len,)handle_error(res)buffer_len = deref(out_len)buffer = buffer_from_bytes(buffer_len)write_to_buffer(buffer, data)pointer_set(out_len, len(data))res = advapi32.CryptEncrypt(key_handle,null(),True,,buffer,out_len,buffer_len)handle_error(res)output = bytes_from_buffer(buffer, deref(out_len))if cipher == '' and not padding:if output[-:] != (b'' * ):raise ValueError('')output = output[:-]return outputfinally:if key_handle:advapi32.CryptDestroyKey(key_handle)if context_handle:close_context_handle(context_handle)", "docstring": "Encrypts plaintext via CryptoAPI\n\n:param cipher:\n A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n \"rc2\", \"rc4\"\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:param padding:\n Boolean, if padding should be used - unused for RC4\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the ciphertext", "id": "f9494:m15"} {"signature": "def _bcrypt_encrypt(cipher, key, data, iv, padding):", "body": "key_handle = Nonetry:key_handle = _bcrypt_create_key_handle(cipher, key)if iv is None:iv_len = else:iv_len = len(iv)flags = if padding is True:flags = BcryptConst.BCRYPT_BLOCK_PADDINGout_len = new(bcrypt, '')res = bcrypt.BCryptEncrypt(key_handle,data,len(data),null(),null(),,null(),,out_len,flags)handle_error(res)buffer_len = deref(out_len)buffer = buffer_from_bytes(buffer_len)iv_buffer = buffer_from_bytes(iv) if iv else null()res = bcrypt.BCryptEncrypt(key_handle,data,len(data),null(),iv_buffer,iv_len,buffer,buffer_len,out_len,flags)handle_error(res)return bytes_from_buffer(buffer, deref(out_len))finally:if key_handle:bcrypt.BCryptDestroyKey(key_handle)", "docstring": "Encrypts plaintext via CNG\n\n:param cipher:\n A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n \"rc2\", \"rc4\"\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:param padding:\n Boolean, if padding should be used - unused for RC4\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the ciphertext", "id": "f9494:m16"} {"signature": "def _decrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):raise TypeError(pretty_message('''''',type_name(key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if cipher != '' and not isinstance(iv, byte_cls):raise TypeError(pretty_message('''''',type_name(iv)))if cipher != '' and padding is None:raise ValueError('')if _backend == '':return _advapi32_decrypt(cipher, key, data, iv, padding)return _bcrypt_decrypt(cipher, key, data, iv, padding)", "docstring": "Decrypts AES/RC4/RC2/3DES/DES ciphertext\n\n:param cipher:\n A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n \"rc2\", \"rc4\"\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:param padding:\n Boolean, if padding should be used - unused for RC4\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9494:m17"} {"signature": "def _advapi32_decrypt(cipher, key, data, iv, padding):", "body": "context_handle = Nonekey_handle = Nonetry:context_handle, key_handle = _advapi32_create_handles(cipher, key, iv)if cipher == '' and not padding:data += (b'' * )buffer = buffer_from_bytes(data)out_len = new(advapi32, '', len(data))res = advapi32.CryptDecrypt(key_handle,null(),True,,buffer,out_len)handle_error(res)return bytes_from_buffer(buffer, deref(out_len))finally:if key_handle:advapi32.CryptDestroyKey(key_handle)if context_handle:close_context_handle(context_handle)", "docstring": "Decrypts AES/RC4/RC2/3DES/DES ciphertext via CryptoAPI\n\n:param cipher:\n A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n \"rc2\", \"rc4\"\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:param padding:\n Boolean, if padding should be used - unused for RC4\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9494:m18"} {"signature": "def _bcrypt_decrypt(cipher, key, data, iv, padding):", "body": "key_handle = Nonetry:key_handle = _bcrypt_create_key_handle(cipher, key)if iv is None:iv_len = else:iv_len = len(iv)flags = if padding is True:flags = BcryptConst.BCRYPT_BLOCK_PADDINGout_len = new(bcrypt, '')res = bcrypt.BCryptDecrypt(key_handle,data,len(data),null(),null(),,null(),,out_len,flags)handle_error(res)buffer_len = deref(out_len)buffer = buffer_from_bytes(buffer_len)iv_buffer = buffer_from_bytes(iv) if iv else null()res = bcrypt.BCryptDecrypt(key_handle,data,len(data),null(),iv_buffer,iv_len,buffer,buffer_len,out_len,flags)handle_error(res)return bytes_from_buffer(buffer, deref(out_len))finally:if key_handle:bcrypt.BCryptDestroyKey(key_handle)", "docstring": "Decrypts AES/RC4/RC2/3DES/DES ciphertext via CNG\n\n:param cipher:\n A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n \"rc2\", \"rc4\"\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:param padding:\n Boolean, if padding should be used - unused for RC4\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9494:m19"} {"signature": "def parse_public(data):", "body": "if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))key_type = Noneif data[:] == b'':key_type, algo, data = _unarmor_pem(data)if key_type == '':raise ValueError(pretty_message(''''''))if algo == '':return keys.PublicKeyInfo.wrap(data, '')if key_type is None or key_type == '':try:pki = keys.PublicKeyInfo.load(data)pki.nativereturn pkiexcept (ValueError):pass try:rpk = keys.RSAPublicKey.load(data)rpk.nativereturn keys.PublicKeyInfo.wrap(rpk, '')except (ValueError):pass if key_type is None or key_type == '':try:parsed_cert = x509.Certificate.load(data)key_info = parsed_cert['']['']return key_infoexcept (ValueError):pass raise ValueError('')", "docstring": "Loads a public key from a DER or PEM-formatted file. Supports RSA, DSA and\nEC public keys. For RSA keys, both the old RSAPublicKey and\nSubjectPublicKeyInfo structures are supported. Also allows extracting a\npublic key from an X.509 certificate.\n\n:param data:\n A byte string to load the public key from\n\n:raises:\n ValueError - when the data does not appear to contain a public key\n\n:return:\n An asn1crypto.keys.PublicKeyInfo object", "id": "f9495:m0"} {"signature": "def parse_certificate(data):", "body": "if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))key_type = Noneif data[:] == b'':key_type, _, data = _unarmor_pem(data)if key_type == '':raise ValueError(pretty_message(''''''))if key_type == '':raise ValueError(pretty_message(''''''))if key_type is None or key_type == '':try:return x509.Certificate.load(data)except (ValueError):pass raise ValueError(pretty_message(''''''))", "docstring": "Loads a certificate from a DER or PEM-formatted file. Supports X.509\ncertificates only.\n\n:param data:\n A byte string to load the certificate from\n\n:raises:\n ValueError - when the data does not appear to contain a certificate\n\n:return:\n An asn1crypto.x509.Certificate object", "id": "f9495:m1"} {"signature": "def parse_private(data, password=None):", "body": "if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if password is not None:if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))else:password = b''if data[:] == b'':key_type, _, data = _unarmor_pem(data, password)if key_type == '':raise ValueError(pretty_message(''''''))if key_type == '':raise ValueError(pretty_message(''''''))try:pki = keys.PrivateKeyInfo.load(data)pki.nativereturn pkiexcept (ValueError):pass try:parsed_wrapper = keys.EncryptedPrivateKeyInfo.load(data)encryption_algorithm_info = parsed_wrapper['']encrypted_data = parsed_wrapper[''].nativedecrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)pki = keys.PrivateKeyInfo.load(decrypted_data)pki.nativereturn pkiexcept (ValueError):pass try:parsed = keys.RSAPrivateKey.load(data)parsed.nativereturn keys.PrivateKeyInfo.wrap(parsed, '')except (ValueError):pass try:parsed = keys.DSAPrivateKey.load(data)parsed.nativereturn keys.PrivateKeyInfo.wrap(parsed, '')except (ValueError):pass try:parsed = keys.ECPrivateKey.load(data)parsed.nativereturn keys.PrivateKeyInfo.wrap(parsed, '')except (ValueError):pass raise ValueError(pretty_message(''''''))", "docstring": "Loads a private key from a DER or PEM-formatted file. Supports RSA, DSA and\nEC private keys. Works with the follow formats:\n\n - RSAPrivateKey (PKCS#1)\n - ECPrivateKey (SECG SEC1 V2)\n - DSAPrivateKey (OpenSSL)\n - PrivateKeyInfo (RSA/DSA/EC - PKCS#8)\n - EncryptedPrivateKeyInfo (RSA/DSA/EC - PKCS#8)\n - Encrypted RSAPrivateKey (PEM only, OpenSSL)\n - Encrypted DSAPrivateKey (PEM only, OpenSSL)\n - Encrypted ECPrivateKey (PEM only, OpenSSL)\n\n:param data:\n A byte string to load the private key from\n\n:param password:\n The password to unencrypt the private key\n\n:raises:\n ValueError - when the data does not appear to contain a private key, or the password is invalid\n\n:return:\n An asn1crypto.keys.PrivateKeyInfo object", "id": "f9495:m2"} {"signature": "def _unarmor_pem(data, password=None):", "body": "object_type, headers, der_bytes = pem.unarmor(data)type_regex = ''armor_type = re.match(type_regex, object_type)if not armor_type:raise ValueError(pretty_message(''''''))pem_header = armor_type.group()data = data.strip()if pem_header in set(['', '', '']):algo = armor_type.group().lower()return ('', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))key_type = pem_header.lower()algo = Noneif key_type == '':key_type = ''elif key_type == '':key_type = ''algo = ''return (key_type, algo, der_bytes)", "docstring": "Removes PEM-encoding from a public key, private key or certificate. If the\nprivate key is encrypted, the password will be used to decrypt it.\n\n:param data:\n A byte string of the PEM-encoded data\n\n:param password:\n A byte string of the encryption password, or None\n\n:return:\n A 3-element tuple in the format: (key_type, algorithm, der_bytes). The\n key_type will be a unicode string of \"public key\", \"private key\" or\n \"certificate\". The algorithm will be a unicode string of \"rsa\", \"dsa\"\n or \"ec\".", "id": "f9495:m3"} {"signature": "def _unarmor_pem_openssl_private(headers, data, password):", "body": "enc_algo = Noneenc_iv_hex = Noneenc_iv = Noneif '' in headers:params = headers['']if params.find('') != -:enc_algo, enc_iv_hex = params.strip().split('')else:enc_algo = ''if not enc_algo:return dataif enc_iv_hex:enc_iv = binascii.unhexlify(enc_iv_hex.encode(''))enc_algo = enc_algo.lower()enc_key_length = {'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,}[enc_algo]enc_key = hashlib.md5(password + enc_iv[:]).digest()while enc_key_length > len(enc_key):enc_key += hashlib.md5(enc_key + password + enc_iv[:]).digest()enc_key = enc_key[:enc_key_length]enc_algo_name = {'': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '','': '',}[enc_algo]decrypt_func = crypto_funcs[enc_algo_name]if enc_algo_name == '':return decrypt_func(enc_key, data)return decrypt_func(enc_key, data, enc_iv)", "docstring": "Parses a PKCS#1 private key, or encrypted private key\n\n:param headers:\n A dict of \"Name: Value\" lines from right after the PEM header\n\n:param data:\n A byte string of the DER-encoded PKCS#1 private key\n\n:param password:\n A byte string of the password to use if the private key is encrypted\n\n:return:\n A byte string of the DER-encoded private key", "id": "f9495:m4"} {"signature": "def parse_pkcs12(data, password=None):", "body": "if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if password is not None:if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))else:password = b''certs = {}private_keys = {}pfx = pkcs12.Pfx.load(data)auth_safe = pfx['']if auth_safe[''].native != '':raise ValueError(pretty_message(''''''))authenticated_safe = pfx.authenticated_safemac_data = pfx['']if mac_data:mac_algo = mac_data[''][''][''].nativekey_length = {'': ,'': ,'': ,'': ,'': ,'': ,'': ,}[mac_algo]mac_key = pkcs12_kdf(mac_algo,password,mac_data[''].native,mac_data[''].native,key_length, )hash_mod = getattr(hashlib, mac_algo)computed_hmac = hmac.new(mac_key, auth_safe[''].contents, hash_mod).digest()stored_hmac = mac_data[''][''].nativeif not constant_compare(computed_hmac, stored_hmac):raise ValueError('')for content_info in authenticated_safe:content = content_info['']if isinstance(content, core.OctetString):_parse_safe_contents(content.native, certs, private_keys, password)elif isinstance(content, cms.EncryptedData):encrypted_content_info = content['']encryption_algorithm_info = encrypted_content_info['']encrypted_content = encrypted_content_info[''].nativedecrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password)_parse_safe_contents(decrypted_content, certs, private_keys, password)else:raise ValueError(pretty_message(''''''))key_fingerprints = set(private_keys.keys())cert_fingerprints = set(certs.keys())common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints))key = Nonecert = Noneother_certs = []if len(common_fingerprints) >= :fingerprint = common_fingerprints[]key = private_keys[fingerprint]cert = certs[fingerprint]other_certs = [certs[f] for f in certs if f != fingerprint]return (key, cert, other_certs)if len(private_keys) > :first_key = sorted(list(private_keys.keys()))[]key = private_keys[first_key]if len(certs) > :first_key = sorted(list(certs.keys()))[]cert = certs[first_key]del certs[first_key]if len(certs) > :other_certs = sorted(list(certs.values()))return (key, cert, other_certs)", "docstring": "Parses a PKCS#12 ANS.1 DER-encoded structure and extracts certs and keys\n\n:param data:\n A byte string of a DER-encoded PKCS#12 file\n\n:param password:\n A byte string of the password to any encrypted data\n\n:raises:\n ValueError - when any of the parameters are of the wrong type or value\n OSError - when an error is returned by one of the OS decryption functions\n\n:return:\n A three-element tuple of:\n 1. An asn1crypto.keys.PrivateKeyInfo object\n 2. An asn1crypto.x509.Certificate object\n 3. A list of zero or more asn1crypto.x509.Certificate objects that are\n \"extra\" certificates, possibly intermediates from the cert chain", "id": "f9495:m5"} {"signature": "def _parse_safe_contents(safe_contents, certs, private_keys, password):", "body": "if isinstance(safe_contents, byte_cls):safe_contents = pkcs12.SafeContents.load(safe_contents)for safe_bag in safe_contents:bag_value = safe_bag['']if isinstance(bag_value, pkcs12.CertBag):if bag_value[''].native == '':cert = bag_value[''].parsedpublic_key_info = cert['']['']certs[public_key_info.fingerprint] = bag_value[''].parsedelif isinstance(bag_value, keys.PrivateKeyInfo):private_keys[bag_value.fingerprint] = bag_valueelif isinstance(bag_value, keys.EncryptedPrivateKeyInfo):encryption_algorithm_info = bag_value['']encrypted_key_bytes = bag_value[''].nativedecrypted_key_bytes = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_key_bytes, password)private_key = keys.PrivateKeyInfo.load(decrypted_key_bytes)private_keys[private_key.fingerprint] = private_keyelif isinstance(bag_value, pkcs12.SafeContents):_parse_safe_contents(bag_value, certs, private_keys, password)else:pass", "docstring": "Parses a SafeContents PKCS#12 ANS.1 structure and extracts certs and keys\n\n:param safe_contents:\n A byte string of ber-encoded SafeContents, or a asn1crypto.pkcs12.SafeContents\n parsed object\n\n:param certs:\n A dict to store certificates in\n\n:param keys:\n A dict to store keys in\n\n:param password:\n A byte string of the password to any encrypted data", "id": "f9495:m6"} {"signature": "def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password):", "body": "decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher]if encryption_algorithm_info.kdf == '':if encryption_algorithm_info.encryption_cipher == '':raise ValueError(pretty_message(''''''))enc_key = pbkdf2(encryption_algorithm_info.kdf_hmac,password,encryption_algorithm_info.kdf_salt,encryption_algorithm_info.kdf_iterations,encryption_algorithm_info.key_length)enc_iv = encryption_algorithm_info.encryption_ivplaintext = decrypt_func(enc_key, encrypted_content, enc_iv)elif encryption_algorithm_info.kdf == '':derived_output = pbkdf1(encryption_algorithm_info.kdf_hmac,password,encryption_algorithm_info.kdf_salt,encryption_algorithm_info.kdf_iterations,encryption_algorithm_info.key_length + )enc_key = derived_output[:]enc_iv = derived_output[:]plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)elif encryption_algorithm_info.kdf == '':enc_key = pkcs12_kdf(encryption_algorithm_info.kdf_hmac,password,encryption_algorithm_info.kdf_salt,encryption_algorithm_info.kdf_iterations,encryption_algorithm_info.key_length, )if encryption_algorithm_info.encryption_cipher == '':plaintext = decrypt_func(enc_key, encrypted_content)else:enc_iv = pkcs12_kdf(encryption_algorithm_info.kdf_hmac,password,encryption_algorithm_info.kdf_salt,encryption_algorithm_info.kdf_iterations,encryption_algorithm_info.encryption_block_size, )plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)return plaintext", "docstring": "Decrypts encrypted ASN.1 data\n\n:param encryption_algorithm_info:\n An instance of asn1crypto.pkcs5.Pkcs5EncryptionAlgorithm\n\n:param encrypted_content:\n A byte string of the encrypted content\n\n:param password:\n A byte string of the encrypted content's password\n\n:return:\n A byte string of the decrypted plaintext", "id": "f9495:m7"} {"signature": "def constant_compare(a, b):", "body": "if not isinstance(a, byte_cls):raise TypeError(pretty_message('''''',type_name(a)))if not isinstance(b, byte_cls):raise TypeError(pretty_message('''''',type_name(b)))if len(a) != len(b):return Falseif sys.version_info < (,):a = [ord(char) for char in a]b = [ord(char) for char in b]result = for x, y in zip(a, b):result |= x ^ yreturn result == ", "docstring": "Compares two byte strings in constant time to see if they are equal\n\n:param a:\n The first byte string\n\n:param b:\n The second byte string\n\n:return:\n A boolean if the two byte strings are equal", "id": "f9496:m0"} {"signature": "def get_path(temp_dir=None, cache_length=, cert_callback=None):", "body": "ca_path, temp = _ca_path(temp_dir)if temp and _cached_path_needs_update(ca_path, cache_length):empty_set = set()any_purpose = ''apple_ssl = ''win_server_auth = ''with path_lock:if _cached_path_needs_update(ca_path, cache_length):with open(ca_path, '') as f:for cert, trust_oids, reject_oids in extract_from_system(cert_callback, True):if sys.platform == '':if trust_oids != empty_set and any_purpose not in trust_oidsand apple_ssl not in trust_oids:if cert_callback:cert_callback(Certificate.load(cert), '')continueif reject_oids != empty_set and (apple_ssl in reject_oidsor any_purpose in reject_oids):if cert_callback:cert_callback(Certificate.load(cert), '')continueelif sys.platform == '':if trust_oids != empty_set and any_purpose not in trust_oidsand win_server_auth not in trust_oids:if cert_callback:cert_callback(Certificate.load(cert), '')continueif reject_oids != empty_set and (win_server_auth in reject_oidsor any_purpose in reject_oids):if cert_callback:cert_callback(Certificate.load(cert), '')continueif cert_callback:cert_callback(Certificate.load(cert), None)f.write(armor('', cert))if not ca_path:raise CACertsError('')return ca_path", "docstring": "Get the filesystem path to a file that contains OpenSSL-compatible CA certs.\n\nOn OS X and Windows, there are extracted from the system certificate store\nand cached in a file on the filesystem. This path should not be writable\nby other users, otherwise they could inject CA certs into the trust list.\n\n:param temp_dir:\n The temporary directory to cache the CA certs in on OS X and Windows.\n Needs to have secure permissions so other users can not modify the\n contents.\n\n:param cache_length:\n The number of hours to cache the CA certs on OS X and Windows\n\n:param cert_callback:\n A callback that is called once for each certificate in the trust store.\n It should accept two parameters: an asn1crypto.x509.Certificate object,\n and a reason. The reason will be None if the certificate is being\n exported, otherwise it will be a unicode string of the reason it won't.\n This is only called on Windows and OS X when passed to this function.\n\n:raises:\n oscrypto.errors.CACertsError - when an error occurs exporting/locating certs\n\n:return:\n The full filesystem path to a CA certs file", "id": "f9498:m0"} {"signature": "def get_list(cache_length=, map_vendor_oids=True, cert_callback=None):", "body": "if not _in_memory_up_to_date(cache_length):with memory_lock:if not _in_memory_up_to_date(cache_length):certs = []for cert_bytes, trust_oids, reject_oids in extract_from_system(cert_callback):if map_vendor_oids:trust_oids = _map_oids(trust_oids)reject_oids = _map_oids(reject_oids)certs.append((Certificate.load(cert_bytes), trust_oids, reject_oids))_module_values[''] = certs_module_values[''] = time.time()return list(_module_values[''])", "docstring": "Retrieves (and caches in memory) the list of CA certs from the OS. Includes\ntrust information from the OS - purposes the certificate should be trusted\nor rejected for.\n\nTrust information is encoded via object identifiers (OIDs) that are sourced\nfrom various RFCs and vendors (Apple and Microsoft). This trust information\naugments what is in the certificate itself. Any OID that is in the set of\ntrusted purposes indicates the certificate has been explicitly trusted for\na purpose beyond the extended key purpose extension. Any OID in the reject\nset is a purpose that the certificate should not be trusted for, even if\npresent in the extended key purpose extension.\n\n*A list of common trust OIDs can be found as part of the `KeyPurposeId()`\nclass in the `asn1crypto.x509` module of the `asn1crypto` package.*\n\n:param cache_length:\n The number of hours to cache the CA certs in memory before they are\n refreshed\n\n:param map_vendor_oids:\n A bool indicating if the following mapping of OIDs should happen for\n trust information from the OS trust list:\n - 1.2.840.113635.100.1.3 (apple_ssl) -> 1.3.6.1.5.5.7.3.1 (server_auth)\n - 1.2.840.113635.100.1.3 (apple_ssl) -> 1.3.6.1.5.5.7.3.2 (client_auth)\n - 1.2.840.113635.100.1.8 (apple_smime) -> 1.3.6.1.5.5.7.3.4 (email_protection)\n - 1.2.840.113635.100.1.9 (apple_eap) -> 1.3.6.1.5.5.7.3.13 (eap_over_ppp)\n - 1.2.840.113635.100.1.9 (apple_eap) -> 1.3.6.1.5.5.7.3.14 (eap_over_lan)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.5 (ipsec_end_system)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.6 (ipsec_tunnel)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.7 (ipsec_user)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.17 (ipsec_ike)\n - 1.2.840.113635.100.1.16 (apple_code_signing) -> 1.3.6.1.5.5.7.3.3 (code_signing)\n - 1.2.840.113635.100.1.20 (apple_time_stamping) -> 1.3.6.1.5.5.7.3.8 (time_stamping)\n - 1.3.6.1.4.1.311.10.3.2 (microsoft_time_stamp_signing) -> 1.3.6.1.5.5.7.3.8 (time_stamping)\n\n:param cert_callback:\n A callback that is called once for each certificate in the trust store.\n It should accept two parameters: an asn1crypto.x509.Certificate object,\n and a reason. The reason will be None if the certificate is being\n exported, otherwise it will be a unicode string of the reason it won't.\n\n:raises:\n oscrypto.errors.CACertsError - when an error occurs exporting/locating certs\n\n:return:\n A (copied) list of 3-element tuples containing CA certs from the OS\n trust ilst:\n - 0: an asn1crypto.x509.Certificate object\n - 1: a set of unicode strings of OIDs of trusted purposes\n - 2: a set of unicode strings of OIDs of rejected purposes", "id": "f9498:m1"} {"signature": "def clear_cache(temp_dir=None):", "body": "with memory_lock:_module_values[''] = None_module_values[''] = Noneca_path, temp = _ca_path(temp_dir)if temp:with path_lock:if os.path.exists(ca_path):os.remove(ca_path)", "docstring": "Clears any cached info that was exported from the OS trust store. This will\nensure the latest changes are returned from calls to get_list() and\nget_path(), but at the expense of re-exporting and parsing all certificates.\n\n:param temp_dir:\n The temporary directory to cache the CA certs in on OS X and Windows.\n Needs to have secure permissions so other users can not modify the\n contents. Must be the same value passed to get_path().", "id": "f9498:m2"} {"signature": "def _ca_path(temp_dir=None):", "body": "ca_path = system_path()if ca_path is None:if temp_dir is None:temp_dir = tempfile.gettempdir()if not os.path.isdir(temp_dir):raise CACertsError(pretty_message('''''',temp_dir))ca_path = os.path.join(temp_dir, '')return (ca_path, True)return (ca_path, False)", "docstring": "Returns the file path to the CA certs file\n\n:param temp_dir:\n The temporary directory to cache the CA certs in on OS X and Windows.\n Needs to have secure permissions so other users can not modify the\n contents.\n\n:return:\n A 2-element tuple:\n - 0: A unicode string of the file path\n - 1: A bool if the file is a temporary file", "id": "f9498:m3"} {"signature": "def _map_oids(oids):", "body": "new_oids = set()for oid in oids:if oid in _oid_map:new_oids |= _oid_map[oid]return oids | new_oids", "docstring": "Takes a set of unicode string OIDs and converts vendor-specific OIDs into\ngenerics OIDs from RFCs.\n\n - 1.2.840.113635.100.1.3 (apple_ssl) -> 1.3.6.1.5.5.7.3.1 (server_auth)\n - 1.2.840.113635.100.1.3 (apple_ssl) -> 1.3.6.1.5.5.7.3.2 (client_auth)\n - 1.2.840.113635.100.1.8 (apple_smime) -> 1.3.6.1.5.5.7.3.4 (email_protection)\n - 1.2.840.113635.100.1.9 (apple_eap) -> 1.3.6.1.5.5.7.3.13 (eap_over_ppp)\n - 1.2.840.113635.100.1.9 (apple_eap) -> 1.3.6.1.5.5.7.3.14 (eap_over_lan)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.5 (ipsec_end_system)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.6 (ipsec_tunnel)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.7 (ipsec_user)\n - 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.17 (ipsec_ike)\n - 1.2.840.113635.100.1.16 (apple_code_signing) -> 1.3.6.1.5.5.7.3.3 (code_signing)\n - 1.2.840.113635.100.1.20 (apple_time_stamping) -> 1.3.6.1.5.5.7.3.8 (time_stamping)\n - 1.3.6.1.4.1.311.10.3.2 (microsoft_time_stamp_signing) -> 1.3.6.1.5.5.7.3.8 (time_stamping)\n\n:param oids:\n A set of unicode strings\n\n:return:\n The original set of OIDs with any mapped OIDs added", "id": "f9498:m4"} {"signature": "def _cached_path_needs_update(ca_path, cache_length):", "body": "exists = os.path.exists(ca_path)if not exists:return Truestats = os.stat(ca_path)if stats.st_mtime < time.time() - cache_length * * :return Trueif stats.st_size == :return Truereturn False", "docstring": "Checks to see if a cache file needs to be refreshed\n\n:param ca_path:\n A unicode string of the path to the cache file\n\n:param cache_length:\n An integer representing the number of hours the cache is valid for\n\n:return:\n A boolean - True if the cache needs to be updated, False if the file\n is up-to-date", "id": "f9498:m5"} {"signature": "def _in_memory_up_to_date(cache_length):", "body": "return (_module_values[''] and_module_values[''] and_module_values[''] > time.time() - (cache_length * * ))", "docstring": "Checks to see if the in-memory cache of certificates is fresh\n\n:param cache_length:\n An integer representing the number of hours the cache is valid for\n\n:return:\n A boolean - True if the cache is up-to-date, False if it needs to be\n refreshed", "id": "f9498:m6"} {"signature": "def system_path():", "body": "ca_path = Nonepaths = ['','','','','','','']if '' in os.environ:paths.insert(, os.environ[''])for path in paths:if os.path.exists(path) and os.path.getsize(path) > :ca_path = pathbreakif not ca_path:raise OSError(pretty_message(''''''))return ca_path", "docstring": "Tries to find a CA certs bundle in common locations\n\n:raises:\n OSError - when no valid CA certs bundle was found on the filesystem\n\n:return:\n The full filesystem path to a CA certs bundle file", "id": "f9501:m0"} {"signature": "def extract_from_system(cert_callback=None, callback_only_on_failure=False):", "body": "all_purposes = ''ca_path = system_path()output = []with open(ca_path, '') as f:for armor_type, _, cert_bytes in unarmor(f.read(), multiple=True):if armor_type == '':if cert_callback:cert_callback(Certificate.load(cert_bytes), None)output.append((cert_bytes, set(), set()))elif armor_type == '':cert, aux = TrustedCertificate.load(cert_bytes)reject_all = Falsetrust_oids = set()reject_oids = set()for purpose in aux['']:if purpose.dotted == all_purposes:trust_oids = set([purpose.dotted])breaktrust_oids.add(purpose.dotted)for purpose in aux['']:if purpose.dotted == all_purposes:reject_all = Truebreakreject_oids.add(purpose.dotted)if reject_all:if cert_callback:cert_callback(cert, '')continueif cert_callback and not callback_only_on_failure:cert_callback(cert, None)output.append((cert.dump(), trust_oids, reject_oids))return output", "docstring": "Extracts trusted CA certs from the system CA cert bundle\n\n:param cert_callback:\n A callback that is called once for each certificate in the trust store.\n It should accept two parameters: an asn1crypto.x509.Certificate object,\n and a reason. The reason will be None if the certificate is being\n exported, otherwise it will be a unicode string of the reason it won't.\n\n:param callback_only_on_failure:\n A boolean - if the callback should only be called when a certificate is\n not exported.\n\n:return:\n A list of 3-element tuples:\n - 0: a byte string of a DER-encoded certificate\n - 1: a set of unicode strings that are OIDs of purposes to trust the\n certificate for\n - 2: a set of unicode strings that are OIDs of purposes to reject the\n certificate for", "id": "f9501:m1"} {"signature": "def _extract_error():", "body": "error_num = errno()try:error_string = os.strerror(error_num)except (ValueError):return str_cls(error_num)if isinstance(error_string, str_cls):return error_stringreturn _try_decode(error_string)", "docstring": "Extracts the last OS error message into a python unicode string\n\n:return:\n A unicode string error message", "id": "f9502:m1"} {"signature": "def pbkdf2(hash_algorithm, password, salt, iterations, key_length):", "body": "if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))if not isinstance(salt, byte_cls):raise TypeError(pretty_message('''''',type_name(salt)))if not isinstance(iterations, int_types):raise TypeError(pretty_message('''''',type_name(iterations)))if iterations < :raise ValueError('')if not isinstance(key_length, int_types):raise TypeError(pretty_message('''''',type_name(key_length)))if key_length < :raise ValueError('')if hash_algorithm not in set(['', '', '', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))algo = {'': CommonCryptoConst.kCCPRFHmacAlgSHA1,'': CommonCryptoConst.kCCPRFHmacAlgSHA224,'': CommonCryptoConst.kCCPRFHmacAlgSHA256,'': CommonCryptoConst.kCCPRFHmacAlgSHA384,'': CommonCryptoConst.kCCPRFHmacAlgSHA512}[hash_algorithm]output_buffer = buffer_from_bytes(key_length)result = CommonCrypto.CCKeyDerivationPBKDF(CommonCryptoConst.kCCPBKDF2,password,len(password),salt,len(salt),algo,iterations,output_buffer,key_length)if result != :raise OSError(_extract_error())return bytes_from_buffer(output_buffer)", "docstring": "PBKDF2 from PKCS#5\n\n:param hash_algorithm:\n The string name of the hash algorithm to use: \"sha1\", \"sha224\", \"sha256\", \"sha384\", \"sha512\"\n\n:param password:\n A byte string of the password to use an input to the KDF\n\n:param salt:\n A cryptographic random byte string\n\n:param iterations:\n The numbers of iterations to use when deriving the key\n\n:param key_length:\n The length of the desired key in bytes\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n The derived key as a byte string", "id": "f9502:m2"} {"signature": "def rand_bytes(length):", "body": "if not isinstance(length, int_types):raise TypeError(pretty_message('''''',type_name(length)))if length < :raise ValueError('')if length > :raise ValueError('')buffer = buffer_from_bytes(length)result = Security.SecRandomCopyBytes(Security.kSecRandomDefault, length, buffer)if result != :raise OSError(_extract_error())return bytes_from_buffer(buffer)", "docstring": "Returns a number of random bytes suitable for cryptographic purposes\n\n:param length:\n The desired number of bytes\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string", "id": "f9502:m3"} {"signature": "def _cast_pointer_p(value):", "body": "return cast(value, pointer_p)", "docstring": "Casts a value to a pointer of a pointer\n\n:param value:\n A ctypes object\n\n:return:\n A POINTER(c_void_p) object", "id": "f9504:m0"} {"signature": "@classmethoddef register_native_mapping(cls, type_id, callback):", "body": "cls._native_map[int(type_id)] = callback", "docstring": "Register a function to convert a core foundation data type into its\nequivalent in python\n\n:param type_id:\n The CFTypeId for the type\n\n:param callback:\n A callback to pass the CFType object to", "id": "f9504:c0:m0"} {"signature": "@staticmethoddef cf_number_to_number(value):", "body": "type_ = CoreFoundation.CFNumberGetType(_cast_pointer_p(value))c_type = {: c_byte, : ctypes.c_short, : ctypes.c_int32, : ctypes.c_int64, : ctypes.c_float, : ctypes.c_double, : c_byte, : ctypes.c_short, : ctypes.c_int, : c_long, : ctypes.c_longlong, : ctypes.c_float, : ctypes.c_double, : c_long, : ctypes.c_int, : ctypes.c_double, }[type_]output = c_type()CoreFoundation.CFNumberGetValue(_cast_pointer_p(value), type_, byref(output))return output.value", "docstring": "Converts a CFNumber object to a python float or integer\n\n:param value:\n The CFNumber object\n\n:return:\n A python number (float or integer)", "id": "f9504:c0:m1"} {"signature": "@staticmethoddef cf_dictionary_to_dict(dictionary):", "body": "dict_length = CoreFoundation.CFDictionaryGetCount(dictionary)keys = (CFTypeRef * dict_length)()values = (CFTypeRef * dict_length)()CoreFoundation.CFDictionaryGetKeysAndValues(dictionary,_cast_pointer_p(keys),_cast_pointer_p(values))output = {}for index in range(, dict_length):output[CFHelpers.native(keys[index])] = CFHelpers.native(values[index])return output", "docstring": "Converts a CFDictionary object into a python dictionary\n\n:param dictionary:\n The CFDictionary to convert\n\n:return:\n A python dict", "id": "f9504:c0:m2"} {"signature": "@classmethoddef native(cls, value):", "body": "type_id = CoreFoundation.CFGetTypeID(value)if type_id in cls._native_map:return cls._native_map[type_id](value)else:return value", "docstring": "Converts a CF* object into its python equivalent\n\n:param value:\n The CF* object to convert\n\n:return:\n The native python object", "id": "f9504:c0:m3"} {"signature": "@staticmethoddef cf_string_to_unicode(value):", "body": "string = CoreFoundation.CFStringGetCStringPtr(_cast_pointer_p(value),kCFStringEncodingUTF8)if string is None:buffer = buffer_from_bytes()result = CoreFoundation.CFStringGetCString(_cast_pointer_p(value),buffer,,kCFStringEncodingUTF8)if not result:raise OSError('')string = byte_string_from_buffer(buffer)if string is not None:string = string.decode('')return string", "docstring": "Creates a python unicode string from a CFString object\n\n:param value:\n The CFString to convert\n\n:return:\n A python unicode string", "id": "f9504:c0:m4"} {"signature": "@staticmethoddef cf_string_from_unicode(string):", "body": "return CoreFoundation.CFStringCreateWithCString(CoreFoundation.kCFAllocatorDefault,string.encode(''),kCFStringEncodingUTF8)", "docstring": "Creates a CFStringRef object from a unicode string\n\n:param string:\n The unicode string to create the CFString object from\n\n:return:\n A CFStringRef", "id": "f9504:c0:m5"} {"signature": "@staticmethoddef cf_data_to_bytes(value):", "body": "start = CoreFoundation.CFDataGetBytePtr(value)num_bytes = CoreFoundation.CFDataGetLength(value)return string_at(start, num_bytes)", "docstring": "Extracts a bytestring from a CFData object\n\n:param value:\n A CFData object\n\n:return:\n A byte string", "id": "f9504:c0:m6"} {"signature": "@staticmethoddef cf_data_from_bytes(bytes_):", "body": "return CoreFoundation.CFDataCreate(CoreFoundation.kCFAllocatorDefault,bytes_,len(bytes_))", "docstring": "Creates a CFDataRef object from a byte string\n\n:param bytes_:\n The data to create the CFData object from\n\n:return:\n A CFDataRef", "id": "f9504:c0:m7"} {"signature": "@staticmethoddef cf_dictionary_from_pairs(pairs):", "body": "length = len(pairs)keys = []values = []for pair in pairs:key, value = pairkeys.append(key)values.append(value)keys = (CFStringRef * length)(*keys)values = (CFTypeRef * length)(*values)return CoreFoundation.CFDictionaryCreate(CoreFoundation.kCFAllocatorDefault,_cast_pointer_p(byref(keys)),_cast_pointer_p(byref(values)),length,kCFTypeDictionaryKeyCallBacks,kCFTypeDictionaryValueCallBacks)", "docstring": "Creates a CFDictionaryRef object from a list of 2-element tuples\nrepresenting the key and value. Each key should be a CFStringRef and each\nvalue some sort of CF* type.\n\n:param pairs:\n A list of 2-element tuples\n\n:return:\n A CFDictionaryRef", "id": "f9504:c0:m8"} {"signature": "@staticmethoddef cf_array_from_list(values):", "body": "length = len(values)values = (CFTypeRef * length)(*values)return CoreFoundation.CFArrayCreate(CoreFoundation.kCFAllocatorDefault,_cast_pointer_p(byref(values)),length,kCFTypeArrayCallBacks)", "docstring": "Creates a CFArrayRef object from a list of CF* type objects.\n\n:param values:\n A list of CF* type object\n\n:return:\n A CFArrayRef", "id": "f9504:c0:m9"} {"signature": "@staticmethoddef cf_number_from_integer(integer):", "body": "integer_as_long = c_long(integer)return CoreFoundation.CFNumberCreate(CoreFoundation.kCFAllocatorDefault,kCFNumberCFIndexType,byref(integer_as_long))", "docstring": "Creates a CFNumber object from an integer\n\n:param integer:\n The integer to create the CFNumber for\n\n:return:\n A CFNumber", "id": "f9504:c0:m10"} {"signature": "def extract_from_system(cert_callback=None, callback_only_on_failure=False):", "body": "certs_pointer_pointer = new(CoreFoundation, '')res = Security.SecTrustCopyAnchorCertificates(certs_pointer_pointer)handle_sec_error(res)certs_pointer = unwrap(certs_pointer_pointer)certificates = {}trust_info = {}all_purposes = ''default_trust = (set(), set())length = CoreFoundation.CFArrayGetCount(certs_pointer)for index in range(, length):cert_pointer = CoreFoundation.CFArrayGetValueAtIndex(certs_pointer, index)der_cert, cert_hash = _cert_details(cert_pointer)certificates[cert_hash] = der_certCoreFoundation.CFRelease(certs_pointer)for domain in [SecurityConst.kSecTrustSettingsDomainUser, SecurityConst.kSecTrustSettingsDomainAdmin]:cert_trust_settings_pointer_pointer = new(CoreFoundation, '')res = Security.SecTrustSettingsCopyCertificates(domain, cert_trust_settings_pointer_pointer)if res == SecurityConst.errSecNoTrustSettings:continuehandle_sec_error(res)cert_trust_settings_pointer = unwrap(cert_trust_settings_pointer_pointer)length = CoreFoundation.CFArrayGetCount(cert_trust_settings_pointer)for index in range(, length):cert_pointer = CoreFoundation.CFArrayGetValueAtIndex(cert_trust_settings_pointer, index)trust_settings_pointer_pointer = new(CoreFoundation, '')res = Security.SecTrustSettingsCopyTrustSettings(cert_pointer, domain, trust_settings_pointer_pointer)if res == SecurityConst.errSecItemNotFound:continueif res == SecurityConst.errSecInvalidTrustSettings:der_cert, cert_hash = _cert_details(cert_pointer)if cert_hash in certificates:_cert_callback(cert_callback,certificates[cert_hash],'')del certificates[cert_hash]continuehandle_sec_error(res)trust_settings_pointer = unwrap(trust_settings_pointer_pointer)trust_oids = set()reject_oids = set()settings_length = CoreFoundation.CFArrayGetCount(trust_settings_pointer)for settings_index in range(, settings_length):settings_dict_entry = CoreFoundation.CFArrayGetValueAtIndex(trust_settings_pointer, settings_index)settings_dict = CFHelpers.cf_dictionary_to_dict(settings_dict_entry)policy_oid = settings_dict.get('', {}).get('', all_purposes)trust_result = settings_dict.get('', )should_trust = trust_result != and trust_result != if should_trust:trust_oids.add(policy_oid)else:reject_oids.add(policy_oid)der_cert, cert_hash = _cert_details(cert_pointer)if all_purposes in reject_oids:if cert_hash in certificates:_cert_callback(cert_callback,certificates[cert_hash],'')del certificates[cert_hash]else:if all_purposes in trust_oids:trust_oids = set([all_purposes])trust_info[cert_hash] = (trust_oids, reject_oids)CoreFoundation.CFRelease(trust_settings_pointer)CoreFoundation.CFRelease(cert_trust_settings_pointer)output = []for cert_hash in certificates:if not callback_only_on_failure:_cert_callback(cert_callback, certificates[cert_hash], None)cert_trust_info = trust_info.get(cert_hash, default_trust)output.append((certificates[cert_hash], cert_trust_info[], cert_trust_info[]))return output", "docstring": "Extracts trusted CA certificates from the OS X trusted root keychain.\n\n:param cert_callback:\n A callback that is called once for each certificate in the trust store.\n It should accept two parameters: an asn1crypto.x509.Certificate object,\n and a reason. The reason will be None if the certificate is being\n exported, otherwise it will be a unicode string of the reason it won't.\n\n:param callback_only_on_failure:\n A boolean - if the callback should only be called when a certificate is\n not exported.\n\n:raises:\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A list of 3-element tuples:\n - 0: a byte string of a DER-encoded certificate\n - 1: a set of unicode strings that are OIDs of purposes to trust the\n certificate for\n - 2: a set of unicode strings that are OIDs of purposes to reject the\n certificate for", "id": "f9505:m1"} {"signature": "def _cert_callback(callback, der_cert, reason):", "body": "if not callback:returncallback(x509.Certificate.load(der_cert), reason)", "docstring": "Constructs an asn1crypto.x509.Certificate object and calls the export\ncallback\n\n:param callback:\n The callback to call\n\n:param der_cert:\n A byte string of the DER-encoded certificate\n\n:param reason:\n None if cert is being exported, or a unicode string of the reason it\n is not being exported", "id": "f9505:m2"} {"signature": "def _cert_details(cert_pointer):", "body": "data_pointer = Nonetry:data_pointer = Security.SecCertificateCopyData(cert_pointer)der_cert = CFHelpers.cf_data_to_bytes(data_pointer)cert_hash = hashlib.sha1(der_cert).digest()return (der_cert, cert_hash)finally:if data_pointer is not None:CoreFoundation.CFRelease(data_pointer)", "docstring": "Return the certificate and a hash of it\n\n:param cert_pointer:\n A SecCertificateRef\n\n:return:\n A 2-element tuple:\n - [0]: A byte string of the SHA1 hash of the cert\n - [1]: A byte string of the DER-encoded contents of the cert", "id": "f9505:m3"} {"signature": "def handle_cf_error(error_pointer):", "body": "if is_null(error_pointer):returnerror = unwrap(error_pointer)if is_null(error):returncf_string_domain = CoreFoundation.CFErrorGetDomain(error)domain = CFHelpers.cf_string_to_unicode(cf_string_domain)CoreFoundation.CFRelease(cf_string_domain)num = CoreFoundation.CFErrorGetCode(error)cf_string_ref = CoreFoundation.CFErrorCopyDescription(error)output = CFHelpers.cf_string_to_unicode(cf_string_ref)CoreFoundation.CFRelease(cf_string_ref)if output is None:if domain == '':code_map = {-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',-: '',}if num in code_map:output = code_map[num]if not output:output = '' % (domain, num)raise OSError(output)", "docstring": "Checks a CFErrorRef and throws an exception if there is an error to report\n\n:param error_pointer:\n A CFErrorRef\n\n:raises:\n OSError - when the CFErrorRef contains an error", "id": "f9506:m0"} {"signature": "def _read_callback(connection_id, data_buffer, data_length_pointer):", "body": "self = Nonetry:self = _connection_refs.get(connection_id)if not self:socket = _socket_refs.get(connection_id)else:socket = self._socketif not self and not socket:return bytes_requested = deref(data_length_pointer)timeout = socket.gettimeout()error = Nonedata = b''try:while len(data) < bytes_requested:if timeout is not None and timeout > :read_ready, _, _ = select.select([socket], [], [], timeout)if len(read_ready) == :raise socket_.error(errno.EAGAIN, '')chunk = socket.recv(bytes_requested - len(data))data += chunkif chunk == b'':if len(data) == :if timeout is None:return SecurityConst.errSSLClosedNoNotifyreturn SecurityConst.errSSLClosedAbortbreakexcept (socket_.error) as e:error = e.errnoif error is not None and error != errno.EAGAIN:if error == errno.ECONNRESET or error == errno.EPIPE:return SecurityConst.errSSLClosedNoNotifyreturn SecurityConst.errSSLClosedAbortif self and not self._done_handshake:if len(data) >= and len(self._server_hello) == :valid_record_type = data[:] in set([b'', b''])valid_protocol_version = data[:] in set([b'',b'',b'',b'',b''])if not valid_record_type or not valid_protocol_version:self._server_hello += data + _read_remaining(socket)return SecurityConst.errSSLProtocolself._server_hello += datawrite_to_buffer(data_buffer, data)pointer_set(data_length_pointer, len(data))if len(data) != bytes_requested:return SecurityConst.errSSLWouldBlockreturn except (KeyboardInterrupt) as e:if self:self._exception = ereturn SecurityConst.errSSLClosedAbort", "docstring": "Callback called by Secure Transport to actually read the socket\n\n:param connection_id:\n An integer identifing the connection\n\n:param data_buffer:\n A char pointer FFI type to write the data to\n\n:param data_length_pointer:\n A size_t pointer FFI type of the amount of data to read. Will be\n overwritten with the amount of data read on return.\n\n:return:\n An integer status code of the result - 0 for success", "id": "f9507:m0"} {"signature": "def _read_remaining(socket):", "body": "output = b''old_timeout = socket.gettimeout()try:socket.settimeout()output += socket.recv()except (socket_.error):passfinally:socket.settimeout(old_timeout)return output", "docstring": "Reads everything available from the socket - used for debugging when there\nis a protocol error\n\n:param socket:\n The socket to read from\n\n:return:\n A byte string of the remaining data", "id": "f9507:m1"} {"signature": "def _write_callback(connection_id, data_buffer, data_length_pointer):", "body": "try:self = _connection_refs.get(connection_id)if not self:socket = _socket_refs.get(connection_id)else:socket = self._socketif not self and not socket:return data_length = deref(data_length_pointer)data = bytes_from_buffer(data_buffer, data_length)if self and not self._done_handshake:self._client_hello += dataerror = Nonetry:sent = socket.send(data)except (socket_.error) as e:error = e.errnoif error is not None and error != errno.EAGAIN:if error == errno.ECONNRESET or error == errno.EPIPE:return SecurityConst.errSSLClosedNoNotifyreturn SecurityConst.errSSLClosedAbortif sent != data_length:pointer_set(data_length_pointer, sent)return SecurityConst.errSSLWouldBlockreturn except (KeyboardInterrupt) as e:self._exception = ereturn SecurityConst.errSSLPeerUserCancelled", "docstring": "Callback called by Secure Transport to actually write to the socket\n\n:param connection_id:\n An integer identifing the connection\n\n:param data_buffer:\n A char pointer FFI type containing the data to write\n\n:param data_length_pointer:\n A size_t pointer FFI type of the amount of data to write. Will be\n overwritten with the amount of data actually written on return.\n\n:return:\n An integer status code of the result - 0 for success", "id": "f9507:m2"} {"signature": "def __init__(self, protocol=None, manual_validation=False, extra_trust_roots=None):", "body": "if not isinstance(manual_validation, bool):raise TypeError(pretty_message('''''',type_name(manual_validation)))self._manual_validation = manual_validationif protocol is None:protocol = set(['', '', ''])if isinstance(protocol, str_cls):protocol = set([protocol])elif not isinstance(protocol, set):raise TypeError(pretty_message('''''',type_name(protocol)))unsupported_protocols = protocol - set(['', '', '', ''])if unsupported_protocols:raise ValueError(pretty_message('''''',repr(unsupported_protocols)))self._protocols = protocolself._extra_trust_roots = []if extra_trust_roots:for extra_trust_root in extra_trust_roots:if isinstance(extra_trust_root, Certificate):extra_trust_root = extra_trust_root.asn1elif isinstance(extra_trust_root, byte_cls):extra_trust_root = parse_certificate(extra_trust_root)elif isinstance(extra_trust_root, str_cls):with open(extra_trust_root, '') as f:extra_trust_root = parse_certificate(f.read())elif not isinstance(extra_trust_root, x509.Certificate):raise TypeError(pretty_message('''''',type_name(extra_trust_root)))self._extra_trust_roots.append(extra_trust_root)self._peer_id = rand_bytes()", "docstring": ":param protocol:\n A unicode string or set of unicode strings representing allowable\n protocols to negotiate with the server:\n\n - \"TLSv1.2\"\n - \"TLSv1.1\"\n - \"TLSv1\"\n - \"SSLv3\"\n\n Default is: {\"TLSv1\", \"TLSv1.1\", \"TLSv1.2\"}\n\n:param manual_validation:\n If certificate and certificate path validation should be skipped\n and left to the developer to implement\n\n:param extra_trust_roots:\n A list containing one or more certificates to be treated as trust\n roots, in one of the following formats:\n - A byte string of the DER encoded certificate\n - A unicode string of the certificate filename\n - An asn1crypto.x509.Certificate object\n - An oscrypto.asymmetric.Certificate object\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9507:c0:m0"} {"signature": "@classmethoddef wrap(cls, socket, hostname, session=None):", "body": "if not isinstance(socket, socket_.socket):raise TypeError(pretty_message('''''',type_name(socket)))if not isinstance(hostname, str_cls):raise TypeError(pretty_message('''''',type_name(hostname)))if session is not None and not isinstance(session, TLSSession):raise TypeError(pretty_message('''''',type_name(session)))new_socket = cls(None, None, session=session)new_socket._socket = socketnew_socket._hostname = hostnamenew_socket._handshake()return new_socket", "docstring": "Takes an existing socket and adds TLS\n\n:param socket:\n A socket.socket object to wrap with TLS\n\n:param hostname:\n A unicode string of the hostname or IP the socket is connected to\n\n:param session:\n An existing TLSSession object to allow for session reuse, specific\n protocol or manual certificate validation\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9507:c1:m0"} {"signature": "def __init__(self, address, port, timeout=, session=None):", "body": "self._done_handshake = Falseself._server_hello = b''self._client_hello = b''self._decrypted_bytes = b''if address is None and port is None:self._socket = Noneelse:if not isinstance(address, str_cls):raise TypeError(pretty_message('''''',type_name(address)))if not isinstance(port, int_types):raise TypeError(pretty_message('''''',type_name(port)))if timeout is not None and not isinstance(timeout, numbers.Number):raise TypeError(pretty_message('''''',type_name(timeout)))self._socket = socket_.create_connection((address, port), timeout)self._socket.settimeout(timeout)if session is None:session = TLSSession()elif not isinstance(session, TLSSession):raise TypeError(pretty_message('''''',type_name(session)))self._session = sessionif self._socket:self._hostname = addressself._handshake()", "docstring": ":param address:\n A unicode string of the domain name or IP address to conenct to\n\n:param port:\n An integer of the port number to connect to\n\n:param timeout:\n An integer timeout to use for the socket\n\n:param session:\n An oscrypto.tls.TLSSession object to allow for session reuse and\n controlling the protocols and validation performed", "id": "f9507:c1:m1"} {"signature": "def _handshake(self):", "body": "session_context = Nonessl_policy_ref = Nonecrl_search_ref = Nonecrl_policy_ref = Noneocsp_search_ref = Noneocsp_policy_ref = Nonepolicy_array_ref = Nonetry:if osx_version_info < (, ):session_context_pointer = new(Security, '')result = Security.SSLNewContext(False, session_context_pointer)handle_sec_error(result)session_context = unwrap(session_context_pointer)else:session_context = Security.SSLCreateContext(null(),SecurityConst.kSSLClientSide,SecurityConst.kSSLStreamType)result = Security.SSLSetIOFuncs(session_context,_read_callback_pointer,_write_callback_pointer)handle_sec_error(result)self._connection_id = id(self) % _connection_refs[self._connection_id] = self_socket_refs[self._connection_id] = self._socketresult = Security.SSLSetConnection(session_context, self._connection_id)handle_sec_error(result)utf8_domain = self._hostname.encode('')result = Security.SSLSetPeerDomainName(session_context,utf8_domain,len(utf8_domain))handle_sec_error(result)if osx_version_info >= (, ):disable_auto_validation = self._session._manual_validation or self._session._extra_trust_rootsexplicit_validation = (not self._session._manual_validation) and self._session._extra_trust_rootselse:disable_auto_validation = Trueexplicit_validation = not self._session._manual_validationif osx_version_info < (, ):for protocol in ['', '', '']:protocol_const = _PROTOCOL_STRING_CONST_MAP[protocol]enabled = protocol in self._session._protocolsresult = Security.SSLSetProtocolVersionEnabled(session_context,protocol_const,enabled)handle_sec_error(result)if disable_auto_validation:result = Security.SSLSetEnableCertVerify(session_context, False)handle_sec_error(result)else:protocol_consts = [_PROTOCOL_STRING_CONST_MAP[protocol] for protocol in self._session._protocols]min_protocol = min(protocol_consts)max_protocol = max(protocol_consts)result = Security.SSLSetProtocolVersionMin(session_context,min_protocol)handle_sec_error(result)result = Security.SSLSetProtocolVersionMax(session_context,max_protocol)handle_sec_error(result)if disable_auto_validation:result = Security.SSLSetSessionOption(session_context,SecurityConst.kSSLSessionOptionBreakOnServerAuth,True)handle_sec_error(result)supported_ciphers_pointer = new(Security, '')result = Security.SSLGetNumberSupportedCiphers(session_context, supported_ciphers_pointer)handle_sec_error(result)supported_ciphers = deref(supported_ciphers_pointer)cipher_buffer = buffer_from_bytes(supported_ciphers * )supported_cipher_suites_pointer = cast(Security, '', cipher_buffer)result = Security.SSLGetSupportedCiphers(session_context,supported_cipher_suites_pointer,supported_ciphers_pointer)handle_sec_error(result)supported_ciphers = deref(supported_ciphers_pointer)supported_cipher_suites = array_from_pointer(Security,'',supported_cipher_suites_pointer,supported_ciphers)good_ciphers = []for supported_cipher_suite in supported_cipher_suites:cipher_suite = int_to_bytes(supported_cipher_suite, width=)cipher_suite_name = CIPHER_SUITE_MAP.get(cipher_suite, cipher_suite)good_cipher = _cipher_blacklist_regex.search(cipher_suite_name) is Noneif good_cipher:good_ciphers.append(supported_cipher_suite)num_good_ciphers = len(good_ciphers)good_ciphers_array = new(Security, '', num_good_ciphers)array_set(good_ciphers_array, good_ciphers)good_ciphers_pointer = cast(Security, '', good_ciphers_array)result = Security.SSLSetEnabledCiphers(session_context,good_ciphers_pointer,num_good_ciphers)handle_sec_error(result)peer_id = self._session._peer_id + self._hostname.encode('')result = Security.SSLSetPeerID(session_context, peer_id, len(peer_id))handle_sec_error(result)handshake_result = Security.SSLHandshake(session_context)if self._exception is not None:exception = self._exceptionself._exception = Noneraise exceptionwhile handshake_result == SecurityConst.errSSLWouldBlock:handshake_result = Security.SSLHandshake(session_context)if self._exception is not None:exception = self._exceptionself._exception = Noneraise exceptionif osx_version_info < (, ) and osx_version_info >= (, ):do_validation = explicit_validation and handshake_result == else:do_validation = explicit_validation and handshake_result == SecurityConst.errSSLServerAuthCompletedif do_validation:trust_ref_pointer = new(Security, '')result = Security.SSLCopyPeerTrust(session_context,trust_ref_pointer)handle_sec_error(result)trust_ref = unwrap(trust_ref_pointer)cf_string_hostname = CFHelpers.cf_string_from_unicode(self._hostname)ssl_policy_ref = Security.SecPolicyCreateSSL(True, cf_string_hostname)result = CoreFoundation.CFRelease(cf_string_hostname)handle_cf_error(result)ocsp_oid_pointer = struct(Security, '')ocsp_oid = unwrap(ocsp_oid_pointer)ocsp_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_OCSP)ocsp_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_OCSP)ocsp_oid.Data = cast(Security, '', ocsp_oid_buffer)ocsp_search_ref_pointer = new(Security, '')result = Security.SecPolicySearchCreate(SecurityConst.CSSM_CERT_X_509v3,ocsp_oid_pointer,null(),ocsp_search_ref_pointer)handle_sec_error(result)ocsp_search_ref = unwrap(ocsp_search_ref_pointer)ocsp_policy_ref_pointer = new(Security, '')result = Security.SecPolicySearchCopyNext(ocsp_search_ref, ocsp_policy_ref_pointer)handle_sec_error(result)ocsp_policy_ref = unwrap(ocsp_policy_ref_pointer)ocsp_struct_pointer = struct(Security, '')ocsp_struct = unwrap(ocsp_struct_pointer)ocsp_struct.Version = SecurityConst.CSSM_APPLE_TP_OCSP_OPTS_VERSIONocsp_struct.Flags = (SecurityConst.CSSM_TP_ACTION_OCSP_DISABLE_NET |SecurityConst.CSSM_TP_ACTION_OCSP_CACHE_READ_DISABLE)ocsp_struct_bytes = struct_bytes(ocsp_struct_pointer)cssm_data_pointer = struct(Security, '')cssm_data = unwrap(cssm_data_pointer)cssm_data.Length = len(ocsp_struct_bytes)ocsp_struct_buffer = buffer_from_bytes(ocsp_struct_bytes)cssm_data.Data = cast(Security, '', ocsp_struct_buffer)result = Security.SecPolicySetValue(ocsp_policy_ref, cssm_data_pointer)handle_sec_error(result)crl_oid_pointer = struct(Security, '')crl_oid = unwrap(crl_oid_pointer)crl_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_CRL)crl_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_CRL)crl_oid.Data = cast(Security, '', crl_oid_buffer)crl_search_ref_pointer = new(Security, '')result = Security.SecPolicySearchCreate(SecurityConst.CSSM_CERT_X_509v3,crl_oid_pointer,null(),crl_search_ref_pointer)handle_sec_error(result)crl_search_ref = unwrap(crl_search_ref_pointer)crl_policy_ref_pointer = new(Security, '')result = Security.SecPolicySearchCopyNext(crl_search_ref, crl_policy_ref_pointer)handle_sec_error(result)crl_policy_ref = unwrap(crl_policy_ref_pointer)crl_struct_pointer = struct(Security, '')crl_struct = unwrap(crl_struct_pointer)crl_struct.Version = SecurityConst.CSSM_APPLE_TP_CRL_OPTS_VERSIONcrl_struct.CrlFlags = crl_struct_bytes = struct_bytes(crl_struct_pointer)cssm_data_pointer = struct(Security, '')cssm_data = unwrap(cssm_data_pointer)cssm_data.Length = len(crl_struct_bytes)crl_struct_buffer = buffer_from_bytes(crl_struct_bytes)cssm_data.Data = cast(Security, '', crl_struct_buffer)result = Security.SecPolicySetValue(crl_policy_ref, cssm_data_pointer)handle_sec_error(result)policy_array_ref = CFHelpers.cf_array_from_list([ssl_policy_ref,crl_policy_ref,ocsp_policy_ref])result = Security.SecTrustSetPolicies(trust_ref, policy_array_ref)handle_sec_error(result)if self._session._extra_trust_roots:ca_cert_refs = []ca_certs = []for cert in self._session._extra_trust_roots:ca_cert = load_certificate(cert)ca_certs.append(ca_cert)ca_cert_refs.append(ca_cert.sec_certificate_ref)result = Security.SecTrustSetAnchorCertificatesOnly(trust_ref, False)handle_sec_error(result)array_ref = CFHelpers.cf_array_from_list(ca_cert_refs)result = Security.SecTrustSetAnchorCertificates(trust_ref, array_ref)handle_sec_error(result)result_pointer = new(Security, '')result = Security.SecTrustEvaluate(trust_ref, result_pointer)handle_sec_error(result)trust_result_code = deref(result_pointer)invalid_chain_error_codes = set([SecurityConst.kSecTrustResultProceed,SecurityConst.kSecTrustResultUnspecified])if trust_result_code not in invalid_chain_error_codes:handshake_result = SecurityConst.errSSLXCertChainInvalidelse:handshake_result = Security.SSLHandshake(session_context)while handshake_result == SecurityConst.errSSLWouldBlock:handshake_result = Security.SSLHandshake(session_context)self._done_handshake = Truehandshake_error_codes = set([SecurityConst.errSSLXCertChainInvalid,SecurityConst.errSSLCertExpired,SecurityConst.errSSLCertNotYetValid,SecurityConst.errSSLUnknownRootCert,SecurityConst.errSSLNoRootCert,SecurityConst.errSSLHostNameMismatch,SecurityConst.errSSLInternal,])if handshake_result in handshake_error_codes:trust_ref_pointer = new(Security, '')result = Security.SSLCopyPeerTrust(session_context,trust_ref_pointer)handle_sec_error(result)trust_ref = unwrap(trust_ref_pointer)result_code_pointer = new(Security, '')result = Security.SecTrustGetCssmResultCode(trust_ref, result_code_pointer)result_code = deref(result_code_pointer)chain = extract_chain(self._server_hello)self_signed = Falserevoked = Falseexpired = Falsenot_yet_valid = Falseno_issuer = Falsecert = Nonebad_hostname = Falseif chain:cert = chain[]oscrypto_cert = load_certificate(cert)self_signed = oscrypto_cert.self_signedrevoked = result_code == SecurityConst.CSSMERR_TP_CERT_REVOKEDno_issuer = not self_signed and result_code == SecurityConst.CSSMERR_TP_NOT_TRUSTEDexpired = result_code == SecurityConst.CSSMERR_TP_CERT_EXPIREDnot_yet_valid = result_code == SecurityConst.CSSMERR_TP_CERT_NOT_VALID_YETbad_hostname = result_code == SecurityConst.CSSMERR_APPLETP_HOSTNAME_MISMATCHif osx_version_info >= (, ):validity = cert['']['']not_before = validity[''].chosen.nativenot_after = validity[''].chosen.nativeutcnow = datetime.datetime.now(timezone.utc)expired = not_after < utcnownot_yet_valid = not_before > utcnowif chain and chain[].hash_algo in set(['', '']):raise_weak_signature(chain[])if revoked:raise_revoked(cert)if bad_hostname:raise_hostname(cert, self._hostname)elif expired or not_yet_valid:raise_expired_not_yet_valid(cert)elif no_issuer:raise_no_issuer(cert)elif self_signed:raise_self_signed(cert)if detect_client_auth_request(self._server_hello):raise_client_auth()raise_verification(cert)if handshake_result == SecurityConst.errSSLPeerHandshakeFail:if detect_client_auth_request(self._server_hello):raise_client_auth()raise_handshake()if handshake_result == SecurityConst.errSSLWeakPeerEphemeralDHKey:raise_dh_params()if handshake_result == SecurityConst.errSSLPeerProtocolVersion:raise_protocol_version()if handshake_result in set([SecurityConst.errSSLRecordOverflow, SecurityConst.errSSLProtocol]):self._server_hello += _read_remaining(self._socket)raise_protocol_error(self._server_hello)if handshake_result in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]):if not self._done_handshake:self._server_hello += _read_remaining(self._socket)if detect_other_protocol(self._server_hello):raise_protocol_error(self._server_hello)raise_disconnection()if osx_version_info < (, ):dh_params_length = get_dh_params_length(self._server_hello)if dh_params_length is not None and dh_params_length < :raise_dh_params()would_block = handshake_result == SecurityConst.errSSLWouldBlockserver_auth_complete = handshake_result == SecurityConst.errSSLServerAuthCompletedmanual_validation = self._session._manual_validation and server_auth_completeif not would_block and not manual_validation:handle_sec_error(handshake_result, TLSError)self._session_context = session_contextprotocol_const_pointer = new(Security, '')result = Security.SSLGetNegotiatedProtocolVersion(session_context,protocol_const_pointer)handle_sec_error(result)protocol_const = deref(protocol_const_pointer)self._protocol = _PROTOCOL_CONST_STRING_MAP[protocol_const]cipher_int_pointer = new(Security, '')result = Security.SSLGetNegotiatedCipher(session_context,cipher_int_pointer)handle_sec_error(result)cipher_int = deref(cipher_int_pointer)cipher_bytes = int_to_bytes(cipher_int, width=)self._cipher_suite = CIPHER_SUITE_MAP.get(cipher_bytes, cipher_bytes)session_info = parse_session_info(self._server_hello,self._client_hello)self._compression = session_info['']self._session_id = session_info['']self._session_ticket = session_info['']except (OSError, socket_.error):if session_context:if osx_version_info < (, ):result = Security.SSLDisposeContext(session_context)handle_sec_error(result)else:result = CoreFoundation.CFRelease(session_context)handle_cf_error(result)self._session_context = Noneself.close()raisefinally:if ssl_policy_ref:result = CoreFoundation.CFRelease(ssl_policy_ref)handle_cf_error(result)ssl_policy_ref = Noneif crl_policy_ref:result = CoreFoundation.CFRelease(crl_policy_ref)handle_cf_error(result)crl_policy_ref = Noneif ocsp_policy_ref:result = CoreFoundation.CFRelease(ocsp_policy_ref)handle_cf_error(result)ocsp_policy_ref = Noneif policy_array_ref:result = CoreFoundation.CFRelease(policy_array_ref)handle_cf_error(result)policy_array_ref = None", "docstring": "Perform an initial TLS handshake", "id": "f9507:c1:m2"} {"signature": "def read(self, max_length):", "body": "if not isinstance(max_length, int_types):raise TypeError(pretty_message('''''',type_name(max_length)))if self._session_context is None:if self._decrypted_bytes != b'':output = self._decrypted_bytesself._decrypted_bytes = b''return outputself._raise_closed()buffered_length = len(self._decrypted_bytes)if buffered_length >= max_length:output = self._decrypted_bytes[:max_length]self._decrypted_bytes = self._decrypted_bytes[max_length:]return outputif buffered_length > and not self.select_read():output = self._decrypted_bytesself._decrypted_bytes = b''return outputto_read = max_length - len(self._decrypted_bytes)read_buffer = buffer_from_bytes(to_read)processed_pointer = new(Security, '')result = Security.SSLRead(self._session_context,read_buffer,to_read,processed_pointer)if self._exception is not None:exception = self._exceptionself._exception = Noneraise exceptionif result and result not in set([SecurityConst.errSSLWouldBlock, SecurityConst.errSSLClosedGraceful]):handle_sec_error(result, TLSError)if result and result == SecurityConst.errSSLClosedGraceful:self._gracefully_closed = Trueself._shutdown(False)self._raise_closed()bytes_read = deref(processed_pointer)output = self._decrypted_bytes + bytes_from_buffer(read_buffer, bytes_read)self._decrypted_bytes = output[max_length:]return output[:max_length]", "docstring": "Reads data from the TLS-wrapped socket\n\n:param max_length:\n The number of bytes to read - output may be less than this\n\n:raises:\n socket.socket - when a non-TLS socket error occurs\n oscrypto.errors.TLSError - when a TLS-related error occurs\n oscrypto.errors.TLSDisconnectError - when the connection disconnects\n oscrypto.errors.TLSGracefulDisconnectError - when the remote end gracefully closed the connection\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the data read", "id": "f9507:c1:m3"} {"signature": "def select_read(self, timeout=None):", "body": "if len(self._decrypted_bytes) > :return Trueread_ready, _, _ = select.select([self._socket], [], [], timeout)return len(read_ready) > ", "docstring": "Blocks until the socket is ready to be read from, or the timeout is hit\n\n:param timeout:\n A float - the period of time to wait for data to be read. None for\n no time limit.\n\n:return:\n A boolean - if data is ready to be read. Will only be False if\n timeout is not None.", "id": "f9507:c1:m4"} {"signature": "def read_until(self, marker):", "body": "if not isinstance(marker, byte_cls) and not isinstance(marker, Pattern):raise TypeError(pretty_message('''''',type_name(marker)))output = b''is_regex = isinstance(marker, Pattern)while True:if len(self._decrypted_bytes) > :chunk = self._decrypted_bytesself._decrypted_bytes = b''else:to_read = self._os_buffered_size() or chunk = self.read(to_read)offset = len(output)output += chunkif is_regex:match = marker.search(output)if match is not None:end = match.end()breakelse:start = max(, offset - len(marker) - )match = output.find(marker, start)if match != -:end = match + len(marker)breakself._decrypted_bytes = output[end:] + self._decrypted_bytesreturn output[:end]", "docstring": "Reads data from the socket until a marker is found. Data read includes\nthe marker.\n\n:param marker:\n A byte string or regex object from re.compile(). Used to determine\n when to stop reading. Regex objects are more inefficient since\n they must scan the entire byte string of read data each time data\n is read off the socket.\n\n:return:\n A byte string of the data read, including the marker", "id": "f9507:c1:m5"} {"signature": "def _os_buffered_size(self):", "body": "num_bytes_pointer = new(Security, '')result = Security.SSLGetBufferedReadSize(self._session_context,num_bytes_pointer)handle_sec_error(result)return deref(num_bytes_pointer)", "docstring": "Returns the number of bytes of decrypted data stored in the Secure\nTransport read buffer. This amount of data can be read from SSLRead()\nwithout calling self._socket.recv().\n\n:return:\n An integer - the number of available bytes", "id": "f9507:c1:m6"} {"signature": "def read_line(self):", "body": "return self.read_until(_line_regex)", "docstring": "r\"\"\"\n Reads a line from the socket, including the line ending of \"\\r\\n\", \"\\r\",\n or \"\\n\"\n\n :return:\n A byte string of the next line from the socket", "id": "f9507:c1:m7"} {"signature": "def read_exactly(self, num_bytes):", "body": "output = b''remaining = num_byteswhile remaining > :output += self.read(remaining)remaining = num_bytes - len(output)return output", "docstring": "Reads exactly the specified number of bytes from the socket\n\n:param num_bytes:\n An integer - the exact number of bytes to read\n\n:return:\n A byte string of the data that was read", "id": "f9507:c1:m8"} {"signature": "def write(self, data):", "body": "if self._session_context is None:self._raise_closed()processed_pointer = new(Security, '')data_len = len(data)while data_len:write_buffer = buffer_from_bytes(data)result = Security.SSLWrite(self._session_context,write_buffer,data_len,processed_pointer)if self._exception is not None:exception = self._exceptionself._exception = Noneraise exceptionhandle_sec_error(result, TLSError)bytes_written = deref(processed_pointer)data = data[bytes_written:]data_len = len(data)if data_len > :self.select_write()", "docstring": "Writes data to the TLS-wrapped socket\n\n:param data:\n A byte string to write to the socket\n\n:raises:\n socket.socket - when a non-TLS socket error occurs\n oscrypto.errors.TLSError - when a TLS-related error occurs\n oscrypto.errors.TLSDisconnectError - when the connection disconnects\n oscrypto.errors.TLSGracefulDisconnectError - when the remote end gracefully closed the connection\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9507:c1:m9"} {"signature": "def select_write(self, timeout=None):", "body": "_, write_ready, _ = select.select([], [self._socket], [], timeout)return len(write_ready) > ", "docstring": "Blocks until the socket is ready to be written to, or the timeout is hit\n\n:param timeout:\n A float - the period of time to wait for the socket to be ready to\n written to. None for no time limit.\n\n:return:\n A boolean - if the socket is ready for writing. Will only be False\n if timeout is not None.", "id": "f9507:c1:m10"} {"signature": "def _shutdown(self, manual):", "body": "if self._session_context is None:returnresult = Security.SSLClose(self._session_context)if osx_version_info < (, ):result = Security.SSLDisposeContext(self._session_context)handle_sec_error(result)else:result = CoreFoundation.CFRelease(self._session_context)handle_cf_error(result)self._session_context = Noneif manual:self._local_closed = Truetry:self._socket.shutdown(socket_.SHUT_RDWR)except (socket_.error):pass", "docstring": "Shuts down the TLS session and then shuts down the underlying socket\n\n:param manual:\n A boolean if the connection was manually shutdown", "id": "f9507:c1:m11"} {"signature": "def shutdown(self):", "body": "self._shutdown(True)", "docstring": "Shuts down the TLS session and then shuts down the underlying socket", "id": "f9507:c1:m12"} {"signature": "def close(self):", "body": "try:self.shutdown()finally:if self._socket:try:self._socket.close()except (socket_.error):passself._socket = Noneif self._connection_id in _socket_refs:del _socket_refs[self._connection_id]", "docstring": "Shuts down the TLS session and socket and forcibly closes it", "id": "f9507:c1:m13"} {"signature": "def _read_certificates(self):", "body": "trust_ref = Nonecf_data_ref = Noneresult = Nonetry:trust_ref_pointer = new(Security, '')result = Security.SSLCopyPeerTrust(self._session_context,trust_ref_pointer)handle_sec_error(result)trust_ref = unwrap(trust_ref_pointer)number_certs = Security.SecTrustGetCertificateCount(trust_ref)self._intermediates = []for index in range(, number_certs):sec_certificate_ref = Security.SecTrustGetCertificateAtIndex(trust_ref,index)cf_data_ref = Security.SecCertificateCopyData(sec_certificate_ref)cert_data = CFHelpers.cf_data_to_bytes(cf_data_ref)result = CoreFoundation.CFRelease(cf_data_ref)handle_cf_error(result)cf_data_ref = Nonecert = x509.Certificate.load(cert_data)if index == :self._certificate = certelse:self._intermediates.append(cert)finally:if trust_ref:result = CoreFoundation.CFRelease(trust_ref)handle_cf_error(result)if cf_data_ref:result = CoreFoundation.CFRelease(cf_data_ref)handle_cf_error(result)", "docstring": "Reads end-entity and intermediate certificate information from the\nTLS session", "id": "f9507:c1:m14"} {"signature": "def _raise_closed(self):", "body": "if self._local_closed:raise TLSDisconnectError('')elif self._gracefully_closed:raise TLSGracefulDisconnectError('')else:raise TLSDisconnectError('')", "docstring": "Raises an exception describing if the local or remote end closed the\nconnection", "id": "f9507:c1:m15"} {"signature": "@propertydef certificate(self):", "body": "if self._session_context is None:self._raise_closed()if self._certificate is None:self._read_certificates()return self._certificate", "docstring": "An asn1crypto.x509.Certificate object of the end-entity certificate\npresented by the server", "id": "f9507:c1:m16"} {"signature": "@propertydef intermediates(self):", "body": "if self._session_context is None:self._raise_closed()if self._certificate is None:self._read_certificates()return self._intermediates", "docstring": "A list of asn1crypto.x509.Certificate objects that were presented as\nintermediates by the server", "id": "f9507:c1:m17"} {"signature": "@propertydef cipher_suite(self):", "body": "return self._cipher_suite", "docstring": "A unicode string of the IANA cipher suite name of the negotiated\ncipher suite", "id": "f9507:c1:m18"} {"signature": "@propertydef protocol(self):", "body": "return self._protocol", "docstring": "A unicode string of: \"TLSv1.2\", \"TLSv1.1\", \"TLSv1\", \"SSLv3\"", "id": "f9507:c1:m19"} {"signature": "@propertydef compression(self):", "body": "return self._compression", "docstring": "A boolean if compression is enabled", "id": "f9507:c1:m20"} {"signature": "@propertydef session_id(self):", "body": "return self._session_id", "docstring": "A unicode string of \"new\" or \"reused\" or None for no ticket", "id": "f9507:c1:m21"} {"signature": "@propertydef session_ticket(self):", "body": "return self._session_ticket", "docstring": "A unicode string of \"new\" or \"reused\" or None for no ticket", "id": "f9507:c1:m22"} {"signature": "@propertydef session(self):", "body": "return self._session", "docstring": "The oscrypto.tls.TLSSession object used for this connection", "id": "f9507:c1:m23"} {"signature": "@propertydef hostname(self):", "body": "return self._hostname", "docstring": "A unicode string of the TLS server domain name or IP address", "id": "f9507:c1:m24"} {"signature": "@propertydef port(self):", "body": "return self.socket.getpeername()[]", "docstring": "An integer of the port number the socket is connected to", "id": "f9507:c1:m25"} {"signature": "@propertydef socket(self):", "body": "if self._session_context is None:self._raise_closed()return self._socket", "docstring": "The underlying socket.socket connection", "id": "f9507:c1:m26"} {"signature": "def handle_sec_error(error, exception_class=None):", "body": "if error == :returnif error in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]):raise TLSDisconnectError('')if error == SecurityConst.errSSLClosedGraceful:raise TLSGracefulDisconnectError('')cf_error_string = Security.SecCopyErrorMessageString(error, null())output = CFHelpers.cf_string_to_unicode(cf_error_string)CoreFoundation.CFRelease(cf_error_string)if output is None or output == '':output = '' % errorif exception_class is None:exception_class = OSErrorraise exception_class(output)", "docstring": "Checks a Security OSStatus error code and throws an exception if there is an\nerror to report\n\n:param error:\n An OSStatus\n\n:param exception_class:\n The exception class to use for the exception if an error occurred\n\n:raises:\n OSError - when the OSStatus contains an error", "id": "f9508:m0"} {"signature": "@classmethoddef register_native_mapping(cls, type_id, callback):", "body": "cls._native_map[int(type_id)] = callback", "docstring": "Register a function to convert a core foundation data type into its\nequivalent in python\n\n:param type_id:\n The CFTypeId for the type\n\n:param callback:\n A callback to pass the CFType object to", "id": "f9510:c0:m0"} {"signature": "@staticmethoddef cf_number_to_number(value):", "body": "type_ = CoreFoundation.CFNumberGetType(value)type_name_ = {: '', : '', : '', : '', : '', : '', : '', : '', : '', : '', : '', : '', : '', : '', : '', : '', }[type_]output = new(CoreFoundation, type_name_ + '')CoreFoundation.CFNumberGetValue(value, type_, output)return deref(output)", "docstring": "Converts a CFNumber object to a python float or integer\n\n:param value:\n The CFNumber object\n\n:return:\n A python number (float or integer)", "id": "f9510:c0:m1"} {"signature": "@staticmethoddef cf_dictionary_to_dict(dictionary):", "body": "dict_length = CoreFoundation.CFDictionaryGetCount(dictionary)keys = new(CoreFoundation, '' % dict_length)values = new(CoreFoundation, '' % dict_length)CoreFoundation.CFDictionaryGetKeysAndValues(dictionary,keys,values)output = {}for index in range(, dict_length):output[CFHelpers.native(keys[index])] = CFHelpers.native(values[index])return output", "docstring": "Converts a CFDictionary object into a python dictionary\n\n:param dictionary:\n The CFDictionary to convert\n\n:return:\n A python dict", "id": "f9510:c0:m2"} {"signature": "@classmethoddef native(cls, value):", "body": "type_id = CoreFoundation.CFGetTypeID(value)if type_id in cls._native_map:return cls._native_map[type_id](value)else:return value", "docstring": "Converts a CF* object into its python equivalent\n\n:param value:\n The CF* object to convert\n\n:return:\n The native python object", "id": "f9510:c0:m3"} {"signature": "@staticmethoddef cf_string_to_unicode(value):", "body": "string_ptr = CoreFoundation.CFStringGetCStringPtr(value,kCFStringEncodingUTF8)string = None if is_null(string_ptr) else ffi.string(string_ptr)if string is None:buffer = buffer_from_bytes()result = CoreFoundation.CFStringGetCString(value,buffer,,kCFStringEncodingUTF8)if not result:raise OSError('')string = byte_string_from_buffer(buffer)if string is not None:string = string.decode('')return string", "docstring": "Creates a python unicode string from a CFString object\n\n:param value:\n The CFString to convert\n\n:return:\n A python unicode string", "id": "f9510:c0:m4"} {"signature": "@staticmethoddef cf_string_from_unicode(string):", "body": "return CoreFoundation.CFStringCreateWithCString(CoreFoundation.kCFAllocatorDefault,string.encode(''),kCFStringEncodingUTF8)", "docstring": "Creates a CFStringRef object from a unicode string\n\n:param string:\n The unicode string to create the CFString object from\n\n:return:\n A CFStringRef", "id": "f9510:c0:m5"} {"signature": "@staticmethoddef cf_data_to_bytes(value):", "body": "start = CoreFoundation.CFDataGetBytePtr(value)num_bytes = CoreFoundation.CFDataGetLength(value)return ffi.buffer(start, num_bytes)[:]", "docstring": "Extracts a bytestring from a CFData object\n\n:param value:\n A CFData object\n\n:return:\n A byte string", "id": "f9510:c0:m6"} {"signature": "@staticmethoddef cf_data_from_bytes(bytes_):", "body": "return CoreFoundation.CFDataCreate(CoreFoundation.kCFAllocatorDefault,bytes_,len(bytes_))", "docstring": "Creates a CFDataRef object from a byte string\n\n:param bytes_:\n The data to create the CFData object from\n\n:return:\n A CFDataRef", "id": "f9510:c0:m7"} {"signature": "@staticmethoddef cf_dictionary_from_pairs(pairs):", "body": "length = len(pairs)keys = []values = []for pair in pairs:key, value = pairkeys.append(key)values.append(value)return CoreFoundation.CFDictionaryCreate(CoreFoundation.kCFAllocatorDefault,keys,values,length,ffi.addressof(CoreFoundation.kCFTypeDictionaryKeyCallBacks),ffi.addressof(CoreFoundation.kCFTypeDictionaryValueCallBacks))", "docstring": "Creates a CFDictionaryRef object from a list of 2-element tuples\nrepresenting the key and value. Each key should be a CFStringRef and each\nvalue some sort of CF* type.\n\n:param pairs:\n A list of 2-element tuples\n\n:return:\n A CFDictionaryRef", "id": "f9510:c0:m8"} {"signature": "@staticmethoddef cf_array_from_list(values):", "body": "length = len(values)return CoreFoundation.CFArrayCreate(CoreFoundation.kCFAllocatorDefault,values,length,ffi.addressof(CoreFoundation.kCFTypeArrayCallBacks))", "docstring": "Creates a CFArrayRef object from a list of CF* type objects.\n\n:param values:\n A list of CF* type object\n\n:return:\n A CFArrayRef", "id": "f9510:c0:m9"} {"signature": "@staticmethoddef cf_number_from_integer(integer):", "body": "integer_as_long = ffi.new('', integer)return CoreFoundation.CFNumberCreate(CoreFoundation.kCFAllocatorDefault,kCFNumberCFIndexType,integer_as_long)", "docstring": "Creates a CFNumber object from an integer\n\n:param integer:\n The integer to create the CFNumber for\n\n:return:\n A CFNumber", "id": "f9510:c0:m10"} {"signature": "def generate_pair(algorithm, bit_size=None, curve=None):", "body": "if algorithm not in set(['', '', '']):raise ValueError(pretty_message('''''',repr(algorithm)))if algorithm == '':if bit_size not in set([, , , ]):raise ValueError(pretty_message('''''',repr(bit_size)))elif algorithm == '':if bit_size not in set([]):raise ValueError(pretty_message('''''',repr(bit_size)))elif algorithm == '':if curve not in set(['', '', '']):raise ValueError(pretty_message('''''',repr(curve)))cf_dict = Nonepublic_key_ref = Noneprivate_key_ref = Nonecf_data_public = Nonecf_data_private = Nonecf_string = Nonesec_access_ref = Nonetry:key_type = {'': Security.kSecAttrKeyTypeDSA,'': Security.kSecAttrKeyTypeECDSA,'': Security.kSecAttrKeyTypeRSA,}[algorithm]if algorithm == '':key_size = {'': ,'': ,'': ,}[curve]else:key_size = bit_sizeprivate_key_pointer = new(Security, '')public_key_pointer = new(Security, '')cf_string = CFHelpers.cf_string_from_unicode(\"\")if algorithm == '':sec_access_ref_pointer = new(Security, '')result = Security.SecAccessCreate(cf_string, null(), sec_access_ref_pointer)sec_access_ref = unwrap(sec_access_ref_pointer)result = Security.SecKeyCreatePair(null(),SecurityConst.CSSM_ALGID_DSA,key_size,,SecurityConst.CSSM_KEYUSE_VERIFY,SecurityConst.CSSM_KEYATTR_EXTRACTABLE | SecurityConst.CSSM_KEYATTR_PERMANENT,SecurityConst.CSSM_KEYUSE_SIGN,SecurityConst.CSSM_KEYATTR_EXTRACTABLE | SecurityConst.CSSM_KEYATTR_PERMANENT,sec_access_ref,public_key_pointer,private_key_pointer)handle_sec_error(result)else:cf_dict = CFHelpers.cf_dictionary_from_pairs([(Security.kSecAttrKeyType, key_type),(Security.kSecAttrKeySizeInBits, CFHelpers.cf_number_from_integer(key_size)),(Security.kSecAttrLabel, cf_string)])result = Security.SecKeyGeneratePair(cf_dict, public_key_pointer, private_key_pointer)handle_sec_error(result)public_key_ref = unwrap(public_key_pointer)private_key_ref = unwrap(private_key_pointer)cf_data_public_pointer = new(CoreFoundation, '')result = Security.SecItemExport(public_key_ref, , , null(), cf_data_public_pointer)handle_sec_error(result)cf_data_public = unwrap(cf_data_public_pointer)public_key_bytes = CFHelpers.cf_data_to_bytes(cf_data_public)cf_data_private_pointer = new(CoreFoundation, '')result = Security.SecItemExport(private_key_ref, , , null(), cf_data_private_pointer)handle_sec_error(result)cf_data_private = unwrap(cf_data_private_pointer)private_key_bytes = CFHelpers.cf_data_to_bytes(cf_data_private)result = Security.SecKeychainItemDelete(public_key_ref)handle_sec_error(result)result = Security.SecKeychainItemDelete(private_key_ref)handle_sec_error(result)finally:if cf_dict:CoreFoundation.CFRelease(cf_dict)if public_key_ref:CoreFoundation.CFRelease(public_key_ref)if private_key_ref:CoreFoundation.CFRelease(private_key_ref)if cf_data_public:CoreFoundation.CFRelease(cf_data_public)if cf_data_private:CoreFoundation.CFRelease(cf_data_private)if cf_string:CoreFoundation.CFRelease(cf_string)if sec_access_ref:CoreFoundation.CFRelease(sec_access_ref)return (load_public_key(public_key_bytes), load_private_key(private_key_bytes))", "docstring": "Generates a public/private key pair\n\n:param algorithm:\n The key algorithm - \"rsa\", \"dsa\" or \"ec\"\n\n:param bit_size:\n An integer - used for \"rsa\" and \"dsa\". For \"rsa\" the value maye be 1024,\n 2048, 3072 or 4096. For \"dsa\" the value may be 1024.\n\n:param curve:\n A unicode string - used for \"ec\" keys. Valid values include \"secp256r1\",\n \"secp384r1\" and \"secp521r1\".\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A 2-element tuple of (PublicKey, PrivateKey). The contents of each key\n may be saved by calling .asn1.dump().", "id": "f9511:m0"} {"signature": "def generate_dh_parameters(bit_size):", "body": "if not isinstance(bit_size, int_types):raise TypeError(pretty_message('''''',type_name(bit_size)))if bit_size < :raise ValueError('')if bit_size > :raise ValueError('')if bit_size % != :raise ValueError('')public_key_ref = Noneprivate_key_ref = Nonecf_data_public = Nonecf_data_private = Nonecf_string = Nonesec_access_ref = Nonetry:public_key_pointer = new(Security, '')private_key_pointer = new(Security, '')cf_string = CFHelpers.cf_string_from_unicode(\"\")sec_access_ref_pointer = new(Security, '')result = Security.SecAccessCreate(cf_string, null(), sec_access_ref_pointer)sec_access_ref = unwrap(sec_access_ref_pointer)result = Security.SecKeyCreatePair(null(),SecurityConst.CSSM_ALGID_DH,bit_size,,,SecurityConst.CSSM_KEYATTR_EXTRACTABLE | SecurityConst.CSSM_KEYATTR_PERMANENT,,SecurityConst.CSSM_KEYATTR_EXTRACTABLE | SecurityConst.CSSM_KEYATTR_PERMANENT,sec_access_ref,public_key_pointer,private_key_pointer)handle_sec_error(result)public_key_ref = unwrap(public_key_pointer)private_key_ref = unwrap(private_key_pointer)cf_data_private_pointer = new(CoreFoundation, '')result = Security.SecItemExport(private_key_ref, , , null(), cf_data_private_pointer)handle_sec_error(result)cf_data_private = unwrap(cf_data_private_pointer)private_key_bytes = CFHelpers.cf_data_to_bytes(cf_data_private)result = Security.SecKeychainItemDelete(public_key_ref)handle_sec_error(result)result = Security.SecKeychainItemDelete(private_key_ref)handle_sec_error(result)return algos.KeyExchangeAlgorithm.load(private_key_bytes)['']finally:if public_key_ref:CoreFoundation.CFRelease(public_key_ref)if private_key_ref:CoreFoundation.CFRelease(private_key_ref)if cf_data_public:CoreFoundation.CFRelease(cf_data_public)if cf_data_private:CoreFoundation.CFRelease(cf_data_private)if cf_string:CoreFoundation.CFRelease(cf_string)if sec_access_ref:CoreFoundation.CFRelease(sec_access_ref)", "docstring": "Generates DH parameters for use with Diffie-Hellman key exchange. Returns\na structure in the format of DHParameter defined in PKCS#3, which is also\nused by the OpenSSL dhparam tool.\n\nTHIS CAN BE VERY TIME CONSUMING!\n\n:param bit_size:\n The integer bit size of the parameters to generate. Must be between 512\n and 4096, and divisible by 64. Recommended secure value as of early 2016\n is 2048, with an absolute minimum of 1024.\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n An asn1crypto.algos.DHParameters object. Use\n oscrypto.asymmetric.dump_dh_parameters() to save to disk for usage with\n web servers.", "id": "f9511:m1"} {"signature": "def load_certificate(source):", "body": "if isinstance(source, x509.Certificate):certificate = sourceelif isinstance(source, byte_cls):certificate = parse_certificate(source)elif isinstance(source, str_cls):with open(source, '') as f:certificate = parse_certificate(f.read())else:raise TypeError(pretty_message('''''',type_name(source)))return _load_x509(certificate)", "docstring": "Loads an x509 certificate into a Certificate object\n\n:param source:\n A byte string of file contents, a unicode string filename or an\n asn1crypto.x509.Certificate object\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A Certificate object", "id": "f9511:m2"} {"signature": "def _load_x509(certificate):", "body": "source = certificate.dump()cf_source = Nonetry:cf_source = CFHelpers.cf_data_from_bytes(source)sec_key_ref = Security.SecCertificateCreateWithData(CoreFoundation.kCFAllocatorDefault, cf_source)return Certificate(sec_key_ref, certificate)finally:if cf_source:CoreFoundation.CFRelease(cf_source)", "docstring": "Loads an ASN.1 object of an x509 certificate into a Certificate object\n\n:param certificate:\n An asn1crypto.x509.Certificate object\n\n:return:\n A Certificate object", "id": "f9511:m3"} {"signature": "def load_private_key(source, password=None):", "body": "if isinstance(source, keys.PrivateKeyInfo):private_object = sourceelse:if password is not None:if isinstance(password, str_cls):password = password.encode('')if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))if isinstance(source, str_cls):with open(source, '') as f:source = f.read()elif not isinstance(source, byte_cls):raise TypeError(pretty_message('''''',type_name(source)))private_object = parse_private(source, password)return _load_key(private_object)", "docstring": "Loads a private key into a PrivateKey object\n\n:param source:\n A byte string of file contents, a unicode string filename or an\n asn1crypto.keys.PrivateKeyInfo object\n\n:param password:\n A byte or unicode string to decrypt the private key file. Unicode\n strings will be encoded using UTF-8. Not used is the source is a\n PrivateKeyInfo object.\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when the private key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A PrivateKey object", "id": "f9511:m4"} {"signature": "def load_public_key(source):", "body": "if isinstance(source, keys.PublicKeyInfo):public_key = sourceelif isinstance(source, byte_cls):public_key = parse_public(source)elif isinstance(source, str_cls):with open(source, '') as f:public_key = parse_public(f.read())else:raise TypeError(pretty_message('''''',type_name(source)))return _load_key(public_key)", "docstring": "Loads a public key into a PublicKey object\n\n:param source:\n A byte string of file contents, a unicode string filename or an\n asn1crypto.keys.PublicKeyInfo object\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when the public key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A PublicKey object", "id": "f9511:m5"} {"signature": "def _load_key(key_object):", "body": "if key_object.algorithm == '':curve_type, details = key_object.curveif curve_type != '':raise AsymmetricKeyError('')if details not in set(['', '', '']):raise AsymmetricKeyError(pretty_message(''''''))elif key_object.algorithm == '' and key_object.hash_algo == '':raise AsymmetricKeyError(pretty_message('''''',key_object.bit_size))elif key_object.algorithm == '' and key_object.hash_algo is None:raise IncompleteAsymmetricKeyError(pretty_message(''''''))if isinstance(key_object, keys.PublicKeyInfo):source = key_object.dump()key_class = Security.kSecAttrKeyClassPublicelse:source = key_object.unwrap().dump()key_class = Security.kSecAttrKeyClassPrivatecf_source = Nonecf_dict = Nonecf_output = Nonetry:cf_source = CFHelpers.cf_data_from_bytes(source)key_type = {'': Security.kSecAttrKeyTypeDSA,'': Security.kSecAttrKeyTypeECDSA,'': Security.kSecAttrKeyTypeRSA,}[key_object.algorithm]cf_dict = CFHelpers.cf_dictionary_from_pairs([(Security.kSecAttrKeyType, key_type),(Security.kSecAttrKeyClass, key_class),(Security.kSecAttrCanSign, CoreFoundation.kCFBooleanTrue),(Security.kSecAttrCanVerify, CoreFoundation.kCFBooleanTrue),])error_pointer = new(CoreFoundation, '')sec_key_ref = Security.SecKeyCreateFromData(cf_dict, cf_source, error_pointer)handle_cf_error(error_pointer)if key_class == Security.kSecAttrKeyClassPublic:return PublicKey(sec_key_ref, key_object)if key_class == Security.kSecAttrKeyClassPrivate:return PrivateKey(sec_key_ref, key_object)finally:if cf_source:CoreFoundation.CFRelease(cf_source)if cf_dict:CoreFoundation.CFRelease(cf_dict)if cf_output:CoreFoundation.CFRelease(cf_output)", "docstring": "Common code to load public and private keys into PublicKey and PrivateKey\nobjects\n\n:param key_object:\n An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo\n object\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A PublicKey or PrivateKey object", "id": "f9511:m6"} {"signature": "def load_pkcs12(source, password=None):", "body": "if password is not None:if isinstance(password, str_cls):password = password.encode('')if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))if isinstance(source, str_cls):with open(source, '') as f:source = f.read()elif not isinstance(source, byte_cls):raise TypeError(pretty_message('''''',type_name(source)))key_info, cert_info, extra_certs_info = parse_pkcs12(source, password)key = Nonecert = Noneif key_info:key = _load_key(key_info)if cert_info:cert = _load_x509(cert_info)extra_certs = [_load_x509(info) for info in extra_certs_info]return (key, cert, extra_certs)", "docstring": "Loads a .p12 or .pfx file into a PrivateKey object and one or more\nCertificates objects\n\n:param source:\n A byte string of file contents or a unicode string filename\n\n:param password:\n A byte or unicode string to decrypt the PKCS12 file. Unicode strings\n will be encoded using UTF-8.\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when a contained key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A three-element tuple containing (PrivateKey, Certificate, [Certificate, ...])", "id": "f9511:m7"} {"signature": "def rsa_pkcs1v15_encrypt(certificate_or_public_key, data):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):raise TypeError(pretty_message('''''',type_name(certificate_or_public_key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))key_length = certificate_or_public_key.byte_sizebuffer = buffer_from_bytes(key_length)output_length = new(Security, '', key_length)result = Security.SecKeyEncrypt(certificate_or_public_key.sec_key_ref,SecurityConst.kSecPaddingPKCS1,data,len(data),buffer,output_length)handle_sec_error(result)return bytes_from_buffer(buffer, deref(output_length))", "docstring": "Encrypts a byte string using an RSA public key or certificate. Uses PKCS#1\nv1.5 padding.\n\n:param certificate_or_public_key:\n A PublicKey or Certificate object\n\n:param data:\n A byte string, with a maximum length 11 bytes less than the key length\n (in bytes)\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the encrypted data", "id": "f9511:m8"} {"signature": "def rsa_pkcs1v15_decrypt(private_key, ciphertext):", "body": "if not isinstance(private_key, PrivateKey):raise TypeError(pretty_message('''''',type_name(private_key)))if not isinstance(ciphertext, byte_cls):raise TypeError(pretty_message('''''',type_name(ciphertext)))key_length = private_key.byte_sizebuffer = buffer_from_bytes(key_length)output_length = new(Security, '', key_length)if osx_version_info < (, ):padding = SecurityConst.kSecPaddingNoneelse:padding = SecurityConst.kSecPaddingPKCS1result = Security.SecKeyDecrypt(private_key.sec_key_ref,padding,ciphertext,len(ciphertext),buffer,output_length)handle_sec_error(result)output = bytes_from_buffer(buffer, deref(output_length))if osx_version_info < (, ):output = remove_pkcs1v15_encryption_padding(key_length, output)return output", "docstring": "Decrypts a byte string using an RSA private key. Uses PKCS#1 v1.5 padding.\n\n:param private_key:\n A PrivateKey object\n\n:param ciphertext:\n A byte string of the encrypted data\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the original plaintext", "id": "f9511:m9"} {"signature": "def rsa_oaep_encrypt(certificate_or_public_key, data):", "body": "return _encrypt(certificate_or_public_key, data, Security.kSecPaddingOAEPKey)", "docstring": "Encrypts a byte string using an RSA public key or certificate. Uses PKCS#1\nOAEP padding with SHA1.\n\n:param certificate_or_public_key:\n A PublicKey or Certificate object\n\n:param data:\n A byte string, with a maximum length 41 bytes (or more) less than the\n key length (in bytes)\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the encrypted data", "id": "f9511:m10"} {"signature": "def rsa_oaep_decrypt(private_key, ciphertext):", "body": "return _decrypt(private_key, ciphertext, Security.kSecPaddingOAEPKey)", "docstring": "Decrypts a byte string using an RSA private key. Uses PKCS#1 OAEP padding\nwith SHA1.\n\n:param private_key:\n A PrivateKey object\n\n:param ciphertext:\n A byte string of the encrypted data\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the original plaintext", "id": "f9511:m11"} {"signature": "def _encrypt(certificate_or_public_key, data, padding):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):raise TypeError(pretty_message('''''',type_name(certificate_or_public_key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if not padding:raise ValueError('')cf_data = Nonesec_transform = Nonetry:cf_data = CFHelpers.cf_data_from_bytes(data)error_pointer = new(CoreFoundation, '')sec_transform = Security.SecEncryptTransformCreate(certificate_or_public_key.sec_key_ref,error_pointer)handle_cf_error(error_pointer)if padding:Security.SecTransformSetAttribute(sec_transform,Security.kSecPaddingKey,padding,error_pointer)handle_cf_error(error_pointer)Security.SecTransformSetAttribute(sec_transform,Security.kSecTransformInputAttributeName,cf_data,error_pointer)handle_cf_error(error_pointer)ciphertext = Security.SecTransformExecute(sec_transform, error_pointer)handle_cf_error(error_pointer)return CFHelpers.cf_data_to_bytes(ciphertext)finally:if cf_data:CoreFoundation.CFRelease(cf_data)if sec_transform:CoreFoundation.CFRelease(sec_transform)", "docstring": "Encrypts plaintext using an RSA public key or certificate\n\n:param certificate_or_public_key:\n A Certificate or PublicKey object\n\n:param data:\n The plaintext - a byte string\n\n:param padding:\n The padding mode to use, specified as a kSecPadding*Key value\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the ciphertext", "id": "f9511:m12"} {"signature": "def _decrypt(private_key, ciphertext, padding):", "body": "if not isinstance(private_key, PrivateKey):raise TypeError(pretty_message('''''',type_name(private_key)))if not isinstance(ciphertext, byte_cls):raise TypeError(pretty_message('''''',type_name(ciphertext)))if not padding:raise ValueError('')cf_data = Nonesec_transform = Nonetry:cf_data = CFHelpers.cf_data_from_bytes(ciphertext)error_pointer = new(CoreFoundation, '')sec_transform = Security.SecDecryptTransformCreate(private_key.sec_key_ref,error_pointer)handle_cf_error(error_pointer)Security.SecTransformSetAttribute(sec_transform,Security.kSecPaddingKey,padding,error_pointer)handle_cf_error(error_pointer)Security.SecTransformSetAttribute(sec_transform,Security.kSecTransformInputAttributeName,cf_data,error_pointer)handle_cf_error(error_pointer)plaintext = Security.SecTransformExecute(sec_transform, error_pointer)handle_cf_error(error_pointer)return CFHelpers.cf_data_to_bytes(plaintext)finally:if cf_data:CoreFoundation.CFRelease(cf_data)if sec_transform:CoreFoundation.CFRelease(sec_transform)", "docstring": "Decrypts RSA ciphertext using a private key\n\n:param private_key:\n A PrivateKey object\n\n:param ciphertext:\n The ciphertext - a byte string\n\n:param padding:\n The padding mode to use, specified as a kSecPadding*Key value\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9511:m13"} {"signature": "def rsa_pkcs1v15_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError('')return _verify(certificate_or_public_key, signature, data, hash_algorithm)", "docstring": "Verifies an RSASSA-PKCS-v1.5 signature.\n\nWhen the hash_algorithm is \"raw\", the operation is identical to RSA\npublic key decryption. That is: the data is not hashed and no ASN.1\nstructure with an algorithm identifier of the hash algorithm is placed in\nthe encrypted byte string.\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9511:m14"} {"signature": "def rsa_pss_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):raise TypeError(pretty_message('''''',type_name(certificate_or_public_key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if certificate_or_public_key.algorithm != '':raise ValueError('')hash_length = {'': ,'': ,'': ,'': ,'': }.get(hash_algorithm, )key_length = certificate_or_public_key.byte_sizebuffer = buffer_from_bytes(key_length)output_length = new(Security, '', key_length)result = Security.SecKeyEncrypt(certificate_or_public_key.sec_key_ref,SecurityConst.kSecPaddingNone,signature,len(signature),buffer,output_length)handle_sec_error(result)plaintext = bytes_from_buffer(buffer, deref(output_length))if not verify_pss_padding(hash_algorithm, hash_length, certificate_or_public_key.bit_size, data, plaintext):raise SignatureError('')", "docstring": "Verifies an RSASSA-PSS signature. For the PSS padding the mask gen algorithm\nwill be mgf1 using the same hash algorithm as the signature. The salt length\nwith be the length of the hash algorithm, and the trailer field with be the\nstandard 0xBC byte.\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9511:m15"} {"signature": "def dsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError('')return _verify(certificate_or_public_key, signature, data, hash_algorithm)", "docstring": "Verifies a DSA signature\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9511:m16"} {"signature": "def ecdsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError('')return _verify(certificate_or_public_key, signature, data, hash_algorithm)", "docstring": "Verifies an ECDSA signature\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9511:m17"} {"signature": "def _verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):raise TypeError(pretty_message('''''',type_name(certificate_or_public_key)))if not isinstance(signature, byte_cls):raise TypeError(pretty_message('''''',type_name(signature)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))valid_hash_algorithms = set(['', '', '', '', '', ''])if certificate_or_public_key.algorithm == '':valid_hash_algorithms |= set([''])if hash_algorithm not in valid_hash_algorithms:valid_hash_algorithms_error = ''if certificate_or_public_key.algorithm == '':valid_hash_algorithms_error += ''raise ValueError(pretty_message('''''',valid_hash_algorithms_error,repr(hash_algorithm)))if certificate_or_public_key.algorithm == '' and hash_algorithm == '':if len(data) > certificate_or_public_key.byte_size - :raise ValueError(pretty_message('''''',certificate_or_public_key.byte_size,len(data)))result = Security.SecKeyRawVerify(certificate_or_public_key.sec_key_ref,SecurityConst.kSecPaddingPKCS1,data,len(data),signature,len(signature))if result == SecurityConst.errSecVerifyFailed or result == SecurityConst.errSSLCrypto:raise SignatureError('')handle_sec_error(result)returncf_signature = Nonecf_data = Nonecf_hash_length = Nonesec_transform = Nonetry:error_pointer = new(CoreFoundation, '')cf_signature = CFHelpers.cf_data_from_bytes(signature)sec_transform = Security.SecVerifyTransformCreate(certificate_or_public_key.sec_key_ref,cf_signature,error_pointer)handle_cf_error(error_pointer)hash_constant = {'': Security.kSecDigestMD5,'': Security.kSecDigestSHA1,'': Security.kSecDigestSHA2,'': Security.kSecDigestSHA2,'': Security.kSecDigestSHA2,'': Security.kSecDigestSHA2}[hash_algorithm]Security.SecTransformSetAttribute(sec_transform,Security.kSecDigestTypeAttribute,hash_constant,error_pointer)handle_cf_error(error_pointer)if hash_algorithm in set(['', '', '', '']):hash_length = {'': ,'': ,'': ,'': }[hash_algorithm]cf_hash_length = CFHelpers.cf_number_from_integer(hash_length)Security.SecTransformSetAttribute(sec_transform,Security.kSecDigestLengthAttribute,cf_hash_length,error_pointer)handle_cf_error(error_pointer)if certificate_or_public_key.algorithm == '':Security.SecTransformSetAttribute(sec_transform,Security.kSecPaddingKey,Security.kSecPaddingPKCS1Key,error_pointer)handle_cf_error(error_pointer)cf_data = CFHelpers.cf_data_from_bytes(data)Security.SecTransformSetAttribute(sec_transform,Security.kSecTransformInputAttributeName,cf_data,error_pointer)handle_cf_error(error_pointer)res = Security.SecTransformExecute(sec_transform, error_pointer)if not is_null(error_pointer):error = unwrap(error_pointer)if not is_null(error):raise SignatureError('')res = bool(CoreFoundation.CFBooleanGetValue(res))if not res:raise SignatureError('')finally:if sec_transform:CoreFoundation.CFRelease(sec_transform)if cf_signature:CoreFoundation.CFRelease(cf_signature)if cf_data:CoreFoundation.CFRelease(cf_data)if cf_hash_length:CoreFoundation.CFRelease(cf_hash_length)", "docstring": "Verifies an RSA, DSA or ECDSA signature\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9511:m18"} {"signature": "def rsa_pkcs1v15_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError('')return _sign(private_key, data, hash_algorithm)", "docstring": "Generates an RSASSA-PKCS-v1.5 signature.\n\nWhen the hash_algorithm is \"raw\", the operation is identical to RSA\nprivate key encryption. That is: the data is not hashed and no ASN.1\nstructure with an algorithm identifier of the hash algorithm is placed in\nthe encrypted byte string.\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\",\n \"sha512\" or \"raw\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9511:m19"} {"signature": "def rsa_pss_sign(private_key, data, hash_algorithm):", "body": "if not isinstance(private_key, PrivateKey):raise TypeError(pretty_message('''''',type_name(private_key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if private_key.algorithm != '':raise ValueError('')hash_length = {'': ,'': ,'': ,'': ,'': }.get(hash_algorithm, )encoded_data = add_pss_padding(hash_algorithm, hash_length, private_key.bit_size, data)key_length = private_key.byte_sizebuffer = buffer_from_bytes(key_length)output_length = new(Security, '', key_length)result = Security.SecKeyDecrypt(private_key.sec_key_ref,SecurityConst.kSecPaddingNone,encoded_data,len(encoded_data),buffer,output_length)handle_sec_error(result)return bytes_from_buffer(buffer, deref(output_length))", "docstring": "Generates an RSASSA-PSS signature. For the PSS padding the mask gen\nalgorithm will be mgf1 using the same hash algorithm as the signature. The\nsalt length with be the length of the hash algorithm, and the trailer field\nwith be the standard 0xBC byte.\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or\n \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9511:m20"} {"signature": "def dsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError('')return _sign(private_key, data, hash_algorithm)", "docstring": "Generates a DSA signature\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or\n \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9511:m21"} {"signature": "def ecdsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError('')return _sign(private_key, data, hash_algorithm)", "docstring": "Generates an ECDSA signature\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or\n \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9511:m22"} {"signature": "def _sign(private_key, data, hash_algorithm):", "body": "if not isinstance(private_key, PrivateKey):raise TypeError(pretty_message('''''',type_name(private_key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))valid_hash_algorithms = set(['', '', '', '', '', ''])if private_key.algorithm == '':valid_hash_algorithms |= set([''])if hash_algorithm not in valid_hash_algorithms:valid_hash_algorithms_error = ''if private_key.algorithm == '':valid_hash_algorithms_error += ''raise ValueError(pretty_message('''''',valid_hash_algorithms_error,repr(hash_algorithm)))if private_key.algorithm == '' and hash_algorithm == '':if len(data) > private_key.byte_size - :raise ValueError(pretty_message('''''',private_key.byte_size,len(data)))key_length = private_key.byte_sizebuffer = buffer_from_bytes(key_length)output_length = new(Security, '', key_length)result = Security.SecKeyRawSign(private_key.sec_key_ref,SecurityConst.kSecPaddingPKCS1,data,len(data),buffer,output_length)handle_sec_error(result)return bytes_from_buffer(buffer, deref(output_length))cf_signature = Nonecf_data = Nonecf_hash_length = Nonesec_transform = Nonetry:error_pointer = new(CoreFoundation, '')sec_transform = Security.SecSignTransformCreate(private_key.sec_key_ref, error_pointer)handle_cf_error(error_pointer)hash_constant = {'': Security.kSecDigestMD5,'': Security.kSecDigestSHA1,'': Security.kSecDigestSHA2,'': Security.kSecDigestSHA2,'': Security.kSecDigestSHA2,'': Security.kSecDigestSHA2}[hash_algorithm]Security.SecTransformSetAttribute(sec_transform,Security.kSecDigestTypeAttribute,hash_constant,error_pointer)handle_cf_error(error_pointer)if hash_algorithm in set(['', '', '', '']):hash_length = {'': ,'': ,'': ,'': }[hash_algorithm]cf_hash_length = CFHelpers.cf_number_from_integer(hash_length)Security.SecTransformSetAttribute(sec_transform,Security.kSecDigestLengthAttribute,cf_hash_length,error_pointer)handle_cf_error(error_pointer)if private_key.algorithm == '':Security.SecTransformSetAttribute(sec_transform,Security.kSecPaddingKey,Security.kSecPaddingPKCS1Key,error_pointer)handle_cf_error(error_pointer)cf_data = CFHelpers.cf_data_from_bytes(data)Security.SecTransformSetAttribute(sec_transform,Security.kSecTransformInputAttributeName,cf_data,error_pointer)handle_cf_error(error_pointer)cf_signature = Security.SecTransformExecute(sec_transform, error_pointer)handle_cf_error(error_pointer)return CFHelpers.cf_data_to_bytes(cf_signature)finally:if sec_transform:CoreFoundation.CFRelease(sec_transform)if cf_signature:CoreFoundation.CFRelease(cf_signature)if cf_data:CoreFoundation.CFRelease(cf_data)if cf_hash_length:CoreFoundation.CFRelease(cf_hash_length)", "docstring": "Generates an RSA, DSA or ECDSA signature\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or\n \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9511:m23"} {"signature": "def __init__(self, sec_key_ref, asn1):", "body": "self.sec_key_ref = sec_key_refself.asn1 = asn1self._lib = CoreFoundation", "docstring": ":param sec_key_ref:\n A Security framework SecKeyRef value from loading/importing the\n key\n\n:param asn1:\n An asn1crypto.keys.PrivateKeyInfo object", "id": "f9511:c0:m0"} {"signature": "@propertydef algorithm(self):", "body": "return self.asn1.algorithm", "docstring": ":return:\n A unicode string of \"rsa\", \"dsa\" or \"ec\"", "id": "f9511:c0:m1"} {"signature": "@propertydef curve(self):", "body": "return self.asn1.curve[]", "docstring": ":return:\n A unicode string of EC curve name", "id": "f9511:c0:m2"} {"signature": "@propertydef bit_size(self):", "body": "return self.asn1.bit_size", "docstring": ":return:\n The number of bits in the key, as an integer", "id": "f9511:c0:m3"} {"signature": "@propertydef byte_size(self):", "body": "return self.asn1.byte_size", "docstring": ":return:\n The number of bytes in the key, as an integer", "id": "f9511:c0:m4"} {"signature": "def __init__(self, sec_key_ref, asn1):", "body": "PrivateKey.__init__(self, sec_key_ref, asn1)", "docstring": ":param sec_key_ref:\n A Security framework SecKeyRef value from loading/importing the\n key\n\n:param asn1:\n An asn1crypto.keys.PublicKeyInfo object", "id": "f9511:c1:m0"} {"signature": "def __init__(self, sec_certificate_ref, asn1):", "body": "self.sec_certificate_ref = sec_certificate_refself.asn1 = asn1", "docstring": ":param sec_certificate_ref:\n A Security framework SecCertificateRef value from loading/importing\n the certificate\n\n:param asn1:\n An asn1crypto.x509.Certificate object", "id": "f9511:c2:m0"} {"signature": "@propertydef algorithm(self):", "body": "return self.public_key.algorithm", "docstring": ":return:\n A unicode string of \"rsa\", \"dsa\" or \"ec\"", "id": "f9511:c2:m1"} {"signature": "@propertydef curve(self):", "body": "return self.public_key.curve", "docstring": ":return:\n A unicode string of EC curve name", "id": "f9511:c2:m2"} {"signature": "@propertydef bit_size(self):", "body": "return self.public_key.bit_size", "docstring": ":return:\n The number of bits in the public key, as an integer", "id": "f9511:c2:m3"} {"signature": "@propertydef byte_size(self):", "body": "return self.public_key.byte_size", "docstring": ":return:\n The number of bytes in the public key, as an integer", "id": "f9511:c2:m4"} {"signature": "@propertydef sec_key_ref(self):", "body": "return self.public_key.sec_key_ref", "docstring": ":return:\n The SecKeyRef of the public key", "id": "f9511:c2:m5"} {"signature": "@propertydef public_key(self):", "body": "if not self._public_key and self.sec_certificate_ref:sec_public_key_ref_pointer = new(Security, '')res = Security.SecCertificateCopyPublicKey(self.sec_certificate_ref, sec_public_key_ref_pointer)handle_sec_error(res)sec_public_key_ref = unwrap(sec_public_key_ref_pointer)self._public_key = PublicKey(sec_public_key_ref, self.asn1[''][''])return self._public_key", "docstring": ":return:\n The PublicKey object for the public key this certificate contains", "id": "f9511:c2:m6"} {"signature": "@propertydef self_signed(self):", "body": "if self._self_signed is None:self._self_signed = Falseif self.asn1.self_signed in set(['', '']):signature_algo = self.asn1[''].signature_algohash_algo = self.asn1[''].hash_algoif signature_algo == '':verify_func = rsa_pkcs1v15_verifyelif signature_algo == '':verify_func = dsa_verifyelif signature_algo == '':verify_func = ecdsa_verifyelse:raise OSError(pretty_message('''''',signature_algo))try:verify_func(self.public_key,self.asn1[''].native,self.asn1[''].dump(),hash_algo)self._self_signed = Trueexcept (SignatureError):passreturn self._self_signed", "docstring": ":return:\n A boolean - if the certificate is self-signed", "id": "f9511:c2:m7"} {"signature": "def aes_cbc_no_padding_encrypt(key, data, iv):", "body": "if len(key) not in [, , ]:raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))if len(data) % != :raise ValueError(pretty_message('''''',len(data)))return (iv, _encrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingNoneKey))", "docstring": "Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and\nno padding. This means the ciphertext must be an exact multiple of 16 bytes\nlong.\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - either a byte string 16-bytes long or None\n to generate an IV\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9513:m0"} {"signature": "def aes_cbc_no_padding_decrypt(key, data, iv):", "body": "if len(key) not in [, , ]:raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingNoneKey)", "docstring": "Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no\npadding.\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string 16-bytes long\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9513:m1"} {"signature": "def aes_cbc_pkcs7_encrypt(key, data, iv):", "body": "if len(key) not in [, , ]:raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return (iv, _encrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingPKCS7Key))", "docstring": "Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and\nPKCS#7 padding.\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - either a byte string 16-bytes long or None\n to generate an IV\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9513:m2"} {"signature": "def aes_cbc_pkcs7_decrypt(key, data, iv):", "body": "if len(key) not in [, , ]:raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingPKCS7Key)", "docstring": "Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string 16-bytes long\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9513:m3"} {"signature": "def rc4_encrypt(key, data):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))return _encrypt(Security.kSecAttrKeyTypeRC4, key, data, None, None)", "docstring": "Encrypts plaintext using RC4 with a 40-128 bit key\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the ciphertext", "id": "f9513:m4"} {"signature": "def rc4_decrypt(key, data):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))return _decrypt(Security.kSecAttrKeyTypeRC4, key, data, None, None)", "docstring": "Decrypts RC4 ciphertext using a 40-128 bit key\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9513:m5"} {"signature": "def rc2_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return (iv, _encrypt(Security.kSecAttrKeyTypeRC2, key, data, iv, Security.kSecPaddingPKCS5Key))", "docstring": "Encrypts plaintext using RC2 with a 64 bit key\n\n:param key:\n The encryption key - a byte string 8 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The 8-byte initialization vector to use - a byte string - set as None\n to generate an appropriate one\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9513:m6"} {"signature": "def rc2_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt(Security.kSecAttrKeyTypeRC2, key, data, iv, Security.kSecPaddingPKCS5Key)", "docstring": "Decrypts RC2 ciphertext using a 64 bit key\n\n:param key:\n The encryption key - a byte string 8 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector used for encryption - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9513:m7"} {"signature": "def tripledes_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) != and len(key) != :raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))if len(key) == :key = key + key[:]return (iv, _encrypt(Security.kSecAttrKeyType3DES, key, data, iv, Security.kSecPaddingPKCS5Key))", "docstring": "Encrypts plaintext using 3DES in either 2 or 3 key mode\n\n:param key:\n The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The 8-byte initialization vector to use - a byte string - set as None\n to generate an appropriate one\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9513:m8"} {"signature": "def tripledes_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != and len(key) != :raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))if len(key) == :key = key + key[:]return _decrypt(Security.kSecAttrKeyType3DES, key, data, iv, Security.kSecPaddingPKCS5Key)", "docstring": "Decrypts 3DES ciphertext in either 2 or 3 key mode\n\n:param key:\n The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector used for encryption - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9513:m9"} {"signature": "def des_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) != :raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return (iv, _encrypt(Security.kSecAttrKeyTypeDES, key, data, iv, Security.kSecPaddingPKCS5Key))", "docstring": "Encrypts plaintext using DES with a 56 bit key\n\n:param key:\n The encryption key - a byte string 8 bytes long (includes error correction bits)\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The 8-byte initialization vector to use - a byte string - set as None\n to generate an appropriate one\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9513:m10"} {"signature": "def des_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != :raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt(Security.kSecAttrKeyTypeDES, key, data, iv, Security.kSecPaddingPKCS5Key)", "docstring": "Decrypts DES ciphertext using a 56 bit key\n\n:param key:\n The encryption key - a byte string 8 bytes long (includes error correction bits)\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector used for encryption - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9513:m11"} {"signature": "def _encrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):raise TypeError(pretty_message('''''',type_name(key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if cipher != Security.kSecAttrKeyTypeRC4 and not isinstance(iv, byte_cls):raise TypeError(pretty_message('''''',type_name(iv)))if cipher != Security.kSecAttrKeyTypeRC4 and not padding:raise ValueError('')cf_dict = Nonecf_key = Nonecf_data = Nonecf_iv = Nonesec_key = Nonesec_transform = Nonetry:cf_dict = CFHelpers.cf_dictionary_from_pairs([(Security.kSecAttrKeyType, cipher)])cf_key = CFHelpers.cf_data_from_bytes(key)cf_data = CFHelpers.cf_data_from_bytes(data)error_pointer = new(CoreFoundation, '')sec_key = Security.SecKeyCreateFromData(cf_dict, cf_key, error_pointer)handle_cf_error(error_pointer)sec_transform = Security.SecEncryptTransformCreate(sec_key, error_pointer)handle_cf_error(error_pointer)if cipher != Security.kSecAttrKeyTypeRC4:Security.SecTransformSetAttribute(sec_transform, Security.kSecModeCBCKey, null(), error_pointer)handle_cf_error(error_pointer)Security.SecTransformSetAttribute(sec_transform, Security.kSecPaddingKey, padding, error_pointer)handle_cf_error(error_pointer)cf_iv = CFHelpers.cf_data_from_bytes(iv)Security.SecTransformSetAttribute(sec_transform, Security.kSecIVKey, cf_iv, error_pointer)handle_cf_error(error_pointer)Security.SecTransformSetAttribute(sec_transform,Security.kSecTransformInputAttributeName,cf_data,error_pointer)handle_cf_error(error_pointer)ciphertext = Security.SecTransformExecute(sec_transform, error_pointer)handle_cf_error(error_pointer)return CFHelpers.cf_data_to_bytes(ciphertext)finally:if cf_dict:CoreFoundation.CFRelease(cf_dict)if cf_key:CoreFoundation.CFRelease(cf_key)if cf_data:CoreFoundation.CFRelease(cf_data)if cf_iv:CoreFoundation.CFRelease(cf_iv)if sec_key:CoreFoundation.CFRelease(sec_key)if sec_transform:CoreFoundation.CFRelease(sec_transform)", "docstring": "Encrypts plaintext\n\n:param cipher:\n A kSecAttrKeyType* value that specifies the cipher to use\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:param padding:\n The padding mode to use, specified as a kSecPadding*Key value - unused for RC4\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the ciphertext", "id": "f9513:m12"} {"signature": "def _decrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):raise TypeError(pretty_message('''''',type_name(key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if cipher != Security.kSecAttrKeyTypeRC4 and not isinstance(iv, byte_cls):raise TypeError(pretty_message('''''',type_name(iv)))if cipher != Security.kSecAttrKeyTypeRC4 and not padding:raise ValueError('')cf_dict = Nonecf_key = Nonecf_data = Nonecf_iv = Nonesec_key = Nonesec_transform = Nonetry:cf_dict = CFHelpers.cf_dictionary_from_pairs([(Security.kSecAttrKeyType, cipher)])cf_key = CFHelpers.cf_data_from_bytes(key)cf_data = CFHelpers.cf_data_from_bytes(data)error_pointer = new(CoreFoundation, '')sec_key = Security.SecKeyCreateFromData(cf_dict, cf_key, error_pointer)handle_cf_error(error_pointer)sec_transform = Security.SecDecryptTransformCreate(sec_key, error_pointer)handle_cf_error(error_pointer)if cipher != Security.kSecAttrKeyTypeRC4:Security.SecTransformSetAttribute(sec_transform, Security.kSecModeCBCKey, null(), error_pointer)handle_cf_error(error_pointer)Security.SecTransformSetAttribute(sec_transform, Security.kSecPaddingKey, padding, error_pointer)handle_cf_error(error_pointer)cf_iv = CFHelpers.cf_data_from_bytes(iv)Security.SecTransformSetAttribute(sec_transform, Security.kSecIVKey, cf_iv, error_pointer)handle_cf_error(error_pointer)Security.SecTransformSetAttribute(sec_transform,Security.kSecTransformInputAttributeName,cf_data,error_pointer)handle_cf_error(error_pointer)plaintext = Security.SecTransformExecute(sec_transform, error_pointer)handle_cf_error(error_pointer)return CFHelpers.cf_data_to_bytes(plaintext)finally:if cf_dict:CoreFoundation.CFRelease(cf_dict)if cf_key:CoreFoundation.CFRelease(cf_key)if cf_data:CoreFoundation.CFRelease(cf_data)if cf_iv:CoreFoundation.CFRelease(cf_iv)if sec_key:CoreFoundation.CFRelease(sec_key)if sec_transform:CoreFoundation.CFRelease(sec_transform)", "docstring": "Decrypts AES/RC4/RC2/3DES/DES ciphertext\n\n:param cipher:\n A kSecAttrKeyType* value that specifies the cipher to use\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:param padding:\n The padding mode to use, specified as a kSecPadding*Key value - unused for RC4\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9513:m13"} {"signature": "def ec_generate_pair(curve):", "body": "if curve not in set(['', '', '']):raise ValueError(pretty_message('''''',repr(curve)))curve_num_bytes = CURVE_BYTES[curve]curve_base_point = {'': SECP256R1_BASE_POINT,'': SECP384R1_BASE_POINT,'': SECP521R1_BASE_POINT,}[curve]while True:private_key_bytes = rand_bytes(curve_num_bytes)private_key_int = int_from_bytes(private_key_bytes, signed=False)if private_key_int > and private_key_int < curve_base_point.order:breakprivate_key_info = keys.PrivateKeyInfo({'': ,'': keys.PrivateKeyAlgorithm({'': '','': keys.ECDomainParameters(name='',value=curve)}),'': keys.ECPrivateKey({'': '','': private_key_int}),})private_key_info[''].parsed[''] = private_key_info.public_keypublic_key_info = private_key_info.public_key_inforeturn (public_key_info, private_key_info)", "docstring": "Generates a EC public/private key pair\n\n:param curve:\n A unicode string. Valid values include \"secp256r1\", \"secp384r1\" and\n \"secp521r1\".\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n\n:return:\n A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,\n asn1crypto.keys.PrivateKeyInfo)", "id": "f9516:m0"} {"signature": "def ecdsa_sign(private_key, data, hash_algorithm):", "body": "if not hasattr(private_key, '') or not isinstance(private_key.asn1, keys.PrivateKeyInfo):raise TypeError(pretty_message('''''',type_name(private_key)))curve_name = private_key.curveif curve_name not in set(['', '', '']):raise ValueError(pretty_message(''''''))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if hash_algorithm not in set(['', '', '', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))hash_func = getattr(hashlib, hash_algorithm)ec_private_key = private_key.asn1[''].parsedprivate_key_bytes = ec_private_key[''].contentsprivate_key_int = ec_private_key[''].nativecurve_num_bytes = CURVE_BYTES[curve_name]curve_base_point = {'': SECP256R1_BASE_POINT,'': SECP384R1_BASE_POINT,'': SECP521R1_BASE_POINT,}[curve_name]n = curve_base_point.orderdigest = hash_func(data).digest()hash_length = len(digest)h = int_from_bytes(digest, signed=False) % nV = b'' * hash_lengthK = b'' * hash_lengthK = hmac.new(K, V + b'' + private_key_bytes + digest, hash_func).digest()V = hmac.new(K, V, hash_func).digest()K = hmac.new(K, V + b'' + private_key_bytes + digest, hash_func).digest()V = hmac.new(K, V, hash_func).digest()r = s = while True:T = b''while len(T) < curve_num_bytes:V = hmac.new(K, V, hash_func).digest()T += Vk = int_from_bytes(T[:curve_num_bytes], signed=False)if k == or k >= n:continuer = (curve_base_point * k).x % nif r == :continues = (inverse_mod(k, n) * (h + (private_key_int * r) % n)) % nif s == :continuebreakreturn DSASignature({'': r, '': s}).dump()", "docstring": "Generates an ECDSA signature in pure Python (thus slow)\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9516:m1"} {"signature": "def ecdsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "has_asn1 = hasattr(certificate_or_public_key, '')if not has_asn1 or not isinstance(certificate_or_public_key.asn1, (keys.PublicKeyInfo, Certificate)):raise TypeError(pretty_message('''''',type_name(certificate_or_public_key)))curve_name = certificate_or_public_key.curveif curve_name not in set(['', '', '']):raise ValueError(pretty_message(''''''))if not isinstance(signature, byte_cls):raise TypeError(pretty_message('''''',type_name(signature)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if hash_algorithm not in set(['', '', '', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))asn1 = certificate_or_public_key.asn1if isinstance(asn1, Certificate):asn1 = asn1.public_keycurve_base_point = {'': SECP256R1_BASE_POINT,'': SECP384R1_BASE_POINT,'': SECP521R1_BASE_POINT,}[curve_name]x, y = asn1[''].to_coords()n = curve_base_point.orderpublic_key_point = PrimePoint(curve_base_point.curve, x, y, n)try:signature = DSASignature.load(signature)r = signature[''].natives = signature[''].nativeexcept (ValueError):raise SignatureError('')invalid = invalid |= r < invalid |= r >= ninvalid |= s < invalid |= s >= nif invalid:raise SignatureError('')hash_func = getattr(hashlib, hash_algorithm)digest = hash_func(data).digest()z = int_from_bytes(digest, signed=False) % nw = inverse_mod(s, n)u1 = (z * w) % nu2 = (r * w) % nhash_point = (curve_base_point * u1) + (public_key_point * u2)if r != (hash_point.x % n):raise SignatureError('')", "docstring": "Verifies an ECDSA signature in pure Python (thus slow)\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9516:m2"} {"signature": "def pbkdf2_iteration_calculator(hash_algorithm, key_length, target_ms=, quiet=False):", "body": "if hash_algorithm not in set(['', '', '', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))if not isinstance(key_length, int_types):raise TypeError(pretty_message('''''',type_name(key_length)))if key_length < :raise ValueError(pretty_message('''''',repr(key_length)))if not isinstance(target_ms, int_types):raise TypeError(pretty_message('''''',type_name(target_ms)))if target_ms < :raise ValueError(pretty_message('''''',repr(target_ms)))if pbkdf2.pure_python:raise OSError(pretty_message(''''''))iterations = password = ''.encode('')salt = rand_bytes(key_length)def _measure():start = _get_start()pbkdf2(hash_algorithm, password, salt, iterations, key_length)observed_ms = _get_elapsed(start)if not quiet:print('' % (iterations, observed_ms))return / target_ms * observed_msfraction = _measure()iterations = int(iterations / fraction / )fraction = _measure()iterations = iterations / fractionround_factor = - if iterations < else -result = int(round(iterations, round_factor))if result > :result = (result // ) * return result", "docstring": "Runs pbkdf2() twice to determine the approximate number of iterations to\nuse to hit a desired time per run. Use this on a production machine to\ndynamically adjust the number of iterations as high as you can.\n\n:param hash_algorithm:\n The string name of the hash algorithm to use: \"md5\", \"sha1\", \"sha224\",\n \"sha256\", \"sha384\", \"sha512\"\n\n:param key_length:\n The length of the desired key in bytes\n\n:param target_ms:\n The number of milliseconds the derivation should take\n\n:param quiet:\n If no output should be printed as attempts are made\n\n:return:\n An integer number of iterations of PBKDF2 using the specified hash\n that will take at least target_ms", "id": "f9517:m0"} {"signature": "def pbkdf1(hash_algorithm, password, salt, iterations, key_length):", "body": "if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',(type_name(password))))if not isinstance(salt, byte_cls):raise TypeError(pretty_message('''''',(type_name(salt))))if not isinstance(iterations, int_types):raise TypeError(pretty_message('''''',(type_name(iterations))))if iterations < :raise ValueError(pretty_message('''''',repr(iterations)))if not isinstance(key_length, int_types):raise TypeError(pretty_message('''''',(type_name(key_length))))if key_length < :raise ValueError(pretty_message('''''',repr(key_length)))if hash_algorithm not in set(['', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))if key_length > and hash_algorithm in set(['', '']):raise ValueError(pretty_message('''''',(hash_algorithm, repr(key_length))))if key_length > and hash_algorithm == '':raise ValueError(pretty_message('''''',repr(key_length)))algo = getattr(hashlib, hash_algorithm)output = algo(password + salt).digest()for _ in range(, iterations + ):output = algo(output).digest()return output[:key_length]", "docstring": "An implementation of PBKDF1 - should only be used for interop with legacy\nsystems, not new architectures\n\n:param hash_algorithm:\n The string name of the hash algorithm to use: \"md2\", \"md5\", \"sha1\"\n\n:param password:\n A byte string of the password to use an input to the KDF\n\n:param salt:\n A cryptographic random byte string\n\n:param iterations:\n The numbers of iterations to use when deriving the key\n\n:param key_length:\n The length of the desired key in bytes\n\n:return:\n The derived key as a byte string", "id": "f9517:m1"} {"signature": "def _is_osx_107():", "body": "if sys.platform != '':return Falseversion = platform.mac_ver()[]return tuple(map(int, version.split('')))[:] == (, )", "docstring": ":return:\n A bool if the current machine is running OS X 10.7", "id": "f9518:m0"} {"signature": "def add_pss_padding(hash_algorithm, salt_length, key_length, message):", "body": "if _backend != '' and sys.platform != '':raise SystemError(pretty_message(''''''))if not isinstance(message, byte_cls):raise TypeError(pretty_message('''''',type_name(message)))if not isinstance(salt_length, int_types):raise TypeError(pretty_message('''''',type_name(salt_length)))if salt_length < :raise ValueError(pretty_message('''''',repr(salt_length)))if not isinstance(key_length, int_types):raise TypeError(pretty_message('''''',type_name(key_length)))if key_length < :raise ValueError(pretty_message('''''',repr(key_length)))if hash_algorithm not in set(['', '', '', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))hash_func = getattr(hashlib, hash_algorithm)em_bits = key_length - em_len = int(math.ceil(em_bits / ))message_digest = hash_func(message).digest()hash_length = len(message_digest)if em_len < hash_length + salt_length + :raise ValueError(pretty_message(''''''))if salt_length > :salt = os.urandom(salt_length)else:salt = b''m_prime = (b'' * ) + message_digest + saltm_prime_digest = hash_func(m_prime).digest()padding = b'' * (em_len - salt_length - hash_length - )db = padding + b'' + saltdb_mask = _mgf1(hash_algorithm, m_prime_digest, em_len - hash_length - )masked_db = int_to_bytes(int_from_bytes(db) ^ int_from_bytes(db_mask))masked_db = fill_width(masked_db, len(db_mask))zero_bits = ( * em_len) - em_bitsleft_bit_mask = ('' * zero_bits) + ('' * ( - zero_bits))left_int_mask = int(left_bit_mask, )if left_int_mask != :masked_db = chr_cls(left_int_mask & ord(masked_db[:])) + masked_db[:]return masked_db + m_prime_digest + b''", "docstring": "Pads a byte string using the EMSA-PSS-Encode operation described in PKCS#1\nv2.2.\n\n:param hash_algorithm:\n The string name of the hash algorithm to use: \"sha1\", \"sha224\",\n \"sha256\", \"sha384\", \"sha512\"\n\n:param salt_length:\n The length of the salt as an integer - typically the same as the length\n of the output from the hash_algorithm\n\n:param key_length:\n The length of the RSA key, in bits\n\n:param message:\n A byte string of the message to pad\n\n:return:\n The encoded (passed) message", "id": "f9518:m1"} {"signature": "def verify_pss_padding(hash_algorithm, salt_length, key_length, message, signature):", "body": "if _backend != '' and sys.platform != '':raise SystemError(pretty_message(''''''))if not isinstance(message, byte_cls):raise TypeError(pretty_message('''''',type_name(message)))if not isinstance(signature, byte_cls):raise TypeError(pretty_message('''''',type_name(signature)))if not isinstance(salt_length, int_types):raise TypeError(pretty_message('''''',type_name(salt_length)))if salt_length < :raise ValueError(pretty_message('''''',repr(salt_length)))if hash_algorithm not in set(['', '', '', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))hash_func = getattr(hashlib, hash_algorithm)em_bits = key_length - em_len = int(math.ceil(em_bits / ))message_digest = hash_func(message).digest()hash_length = len(message_digest)if em_len < hash_length + salt_length + :return Falseif signature[-:] != b'':return Falsezero_bits = ( * em_len) - em_bitsmasked_db_length = em_len - hash_length - masked_db = signature[:masked_db_length]first_byte = ord(masked_db[:])bits_that_should_be_zero = first_byte >> ( - zero_bits)if bits_that_should_be_zero != :return Falsem_prime_digest = signature[masked_db_length:masked_db_length + hash_length]db_mask = _mgf1(hash_algorithm, m_prime_digest, em_len - hash_length - )left_bit_mask = ('' * zero_bits) + ('' * ( - zero_bits))left_int_mask = int(left_bit_mask, )if left_int_mask != :db_mask = chr_cls(left_int_mask & ord(db_mask[:])) + db_mask[:]db = int_to_bytes(int_from_bytes(masked_db) ^ int_from_bytes(db_mask))if len(db) < len(masked_db):db = (b'' * (len(masked_db) - len(db))) + dbzero_length = em_len - hash_length - salt_length - zero_string = b'' * zero_lengthif not constant_compare(db[:zero_length], zero_string):return Falseif db[zero_length:zero_length + ] != b'':return Falsesalt = db[ - salt_length:]m_prime = (b'' * ) + message_digest + salth_prime = hash_func(m_prime).digest()return constant_compare(m_prime_digest, h_prime)", "docstring": "Verifies the PSS padding on an encoded message\n\n:param hash_algorithm:\n The string name of the hash algorithm to use: \"sha1\", \"sha224\",\n \"sha256\", \"sha384\", \"sha512\"\n\n:param salt_length:\n The length of the salt as an integer - typically the same as the length\n of the output from the hash_algorithm\n\n:param key_length:\n The length of the RSA key, in bits\n\n:param message:\n A byte string of the message to pad\n\n:param signature:\n The signature to verify\n\n:return:\n A boolean indicating if the signature is invalid", "id": "f9518:m2"} {"signature": "def _mgf1(hash_algorithm, seed, mask_length):", "body": "if not isinstance(seed, byte_cls):raise TypeError(pretty_message('''''',type_name(seed)))if not isinstance(mask_length, int_types):raise TypeError(pretty_message('''''',type_name(mask_length)))if mask_length < :raise ValueError(pretty_message('''''',repr(mask_length)))if hash_algorithm not in set(['', '', '', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))output = b''hash_length = {'': ,'': ,'': ,'': ,'': }[hash_algorithm]iterations = int(math.ceil(mask_length / hash_length))pack = struct.Struct(b'').packhash_func = getattr(hashlib, hash_algorithm)for counter in range(, iterations):b = pack(counter)output += hash_func(seed + b).digest()return output[:mask_length]", "docstring": "The PKCS#1 MGF1 mask generation algorithm\n\n:param hash_algorithm:\n The string name of the hash algorithm to use: \"sha1\", \"sha224\",\n \"sha256\", \"sha384\", \"sha512\"\n\n:param seed:\n A byte string to use as the seed for the mask\n\n:param mask_length:\n The desired mask length, as an integer\n\n:return:\n A byte string of the mask", "id": "f9518:m3"} {"signature": "def add_pkcs1v15_signature_padding(key_length, data):", "body": "if _backend != '':raise SystemError(pretty_message(''''''))return _add_pkcs1v15_padding(key_length, data, '')", "docstring": "Adds PKCS#1 v1.5 padding to a message to be signed\n\n:param key_length:\n An integer of the number of bytes in the key\n\n:param data:\n A byte string to pad\n\n:return:\n The padded data as a byte string", "id": "f9518:m4"} {"signature": "def remove_pkcs1v15_signature_padding(key_length, data):", "body": "if _backend != '':raise SystemError(pretty_message(''''''))return _remove_pkcs1v15_padding(key_length, data, '')", "docstring": "Removes PKCS#1 v1.5 padding from a signed message using constant time\noperations\n\n:param key_length:\n An integer of the number of bytes in the key\n\n:param data:\n A byte string to unpad\n\n:return:\n The unpadded data as a byte string", "id": "f9518:m5"} {"signature": "def remove_pkcs1v15_encryption_padding(key_length, data):", "body": "if not _is_osx_107():raise SystemError(pretty_message(''''''))return _remove_pkcs1v15_padding(key_length, data, '')", "docstring": "Removes PKCS#1 v1.5 padding from a decrypted message using constant time\noperations\n\n:param key_length:\n An integer of the number of bytes in the key\n\n:param data:\n A byte string to unpad\n\n:return:\n The unpadded data as a byte string", "id": "f9518:m6"} {"signature": "def _add_pkcs1v15_padding(key_length, data, operation):", "body": "if operation == '':second_byte = b''else:second_byte = b''if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if not isinstance(key_length, int_types):raise TypeError(pretty_message('''''',type_name(key_length)))if key_length < :raise ValueError(pretty_message('''''',repr(key_length)))if len(data) > key_length - :raise ValueError(pretty_message('''''',key_length - ,len(data)))required_bytes = key_length - - len(data)padding = b''while required_bytes > :temp_padding = rand_bytes(required_bytes)temp_padding = b''.join(temp_padding.split(b''))padding += temp_paddingrequired_bytes -= len(temp_padding)return b'' + second_byte + padding + b'' + data", "docstring": "Adds PKCS#1 v1.5 padding to a message\n\n:param key_length:\n An integer of the number of bytes in the key\n\n:param data:\n A byte string to unpad\n\n:param operation:\n A unicode string of \"encrypting\" or \"signing\"\n\n:return:\n The padded data as a byte string", "id": "f9518:m7"} {"signature": "def _remove_pkcs1v15_padding(key_length, data, operation):", "body": "if operation == '':second_byte = else:second_byte = if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if not isinstance(key_length, int_types):raise TypeError(pretty_message('''''',type_name(key_length)))if key_length < :raise ValueError(pretty_message('''''',repr(key_length)))if len(data) != key_length:raise ValueError('' % operation)error = trash = padding_end = for i in range(, len(data)):byte = data[i:i + ]byte_num = ord(byte)if i == :error |= byte_numelif i == :error |= int((byte_num | second_byte) != second_byte)elif i < :error |= int((byte_num ^ ) == )else:non_zero = byte_num | if padding_end == :if non_zero:trash |= ielse:padding_end |= ielse:if non_zero:trash |= ielse:trash |= iif error != :raise ValueError('' % operation)return data[padding_end + :]", "docstring": "Removes PKCS#1 v1.5 padding from a message using constant time operations\n\n:param key_length:\n An integer of the number of bytes in the key\n\n:param data:\n A byte string to unpad\n\n:param operation:\n A unicode string of \"decrypting\" or \"verifying\"\n\n:return:\n The unpadded data as a byte string", "id": "f9518:m8"} {"signature": "def raw_rsa_private_crypt(private_key, data):", "body": "if _backend != '':raise SystemError('')if not hasattr(private_key, '') or not isinstance(private_key.asn1, PrivateKeyInfo):raise TypeError(pretty_message('''''',type_name(private_key)))algo = private_key.asn1[''][''].nativeif algo != '':raise ValueError(pretty_message('''''',algo.upper()))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))rsa_private_key = private_key.asn1[''].parsedtransformed_int = pow(int_from_bytes(data),rsa_private_key[''].native,rsa_private_key[''].native)return int_to_bytes(transformed_int, width=private_key.asn1.byte_size)", "docstring": "Performs a raw RSA algorithm in a byte string using a private key.\nThis is a low-level primitive and is prone to disastrous results if used\nincorrectly.\n\n:param private_key:\n An oscrypto.asymmetric.PrivateKey object\n\n:param data:\n A byte string of the plaintext to be signed or ciphertext to be\n decrypted. Must be less than or equal to the length of the private key.\n In the case of signing, padding must already be applied. In the case of\n decryption, padding must be removed afterward.\n\n:return:\n A byte string of the transformed data", "id": "f9518:m9"} {"signature": "def raw_rsa_public_crypt(certificate_or_public_key, data):", "body": "if _backend != '':raise SystemError('')has_asn1 = hasattr(certificate_or_public_key, '')valid_types = (PublicKeyInfo, Certificate)if not has_asn1 or not isinstance(certificate_or_public_key.asn1, valid_types):raise TypeError(pretty_message('''''',type_name(certificate_or_public_key)))algo = certificate_or_public_key.asn1[''][''].nativeif algo != '':raise ValueError(pretty_message('''''',algo.upper()))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))rsa_public_key = certificate_or_public_key.asn1[''].parsedtransformed_int = pow(int_from_bytes(data),rsa_public_key[''].native,rsa_public_key[''].native)return int_to_bytes(transformed_int,width=certificate_or_public_key.asn1.byte_size)", "docstring": "Performs a raw RSA algorithm in a byte string using a certificate or\npublic key. This is a low-level primitive and is prone to disastrous results\nif used incorrectly.\n\n:param certificate_or_public_key:\n An oscrypto.asymmetric.PublicKey or oscrypto.asymmetric.Certificate\n object\n\n:param data:\n A byte string of the signature when verifying, or padded plaintext when\n encrypting. Must be less than or equal to the length of the public key.\n When verifying, padding will need to be removed afterwards. When\n encrypting, padding must be applied before.\n\n:return:\n A byte string of the transformed data", "id": "f9518:m10"} {"signature": "def type_name(value):", "body": "if inspect.isclass(value):cls = valueelse:cls = value.__class__if cls.__module__ in set(['', '']):return cls.__name__return '' % (cls.__module__, cls.__name__)", "docstring": "Returns a user-readable name for the type of an object\n\n:param value:\n A value to get the type name of\n\n:return:\n A unicode string of the object's type name", "id": "f9519:m0"} {"signature": "def rand_bytes(length):", "body": "if not isinstance(length, int_types):raise TypeError(pretty_message('''''',type_name(length)))if length < :raise ValueError('')if length > :raise ValueError('')return os.urandom(length)", "docstring": "Returns a number of random bytes suitable for cryptographic purposes\n\n:param length:\n The desired number of bytes\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A byte string", "id": "f9520:m0"} {"signature": "def pbkdf2(hash_algorithm, password, salt, iterations, key_length):", "body": "if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))if not isinstance(salt, byte_cls):raise TypeError(pretty_message('''''',type_name(salt)))if not isinstance(iterations, int_types):raise TypeError(pretty_message('''''',type_name(iterations)))if iterations < :raise ValueError(pretty_message('''''',repr(iterations)))if not isinstance(key_length, int_types):raise TypeError(pretty_message('''''',type_name(key_length)))if key_length < :raise ValueError(pretty_message('''''',repr(key_length)))if hash_algorithm not in set(['', '', '', '', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))algo = getattr(hashlib, hash_algorithm)hash_length = {'': ,'': ,'': ,'': ,'': ,'': }[hash_algorithm]blocks = int(math.ceil(key_length / hash_length))original_hmac = hmac.new(password, None, algo)int_pack = struct.Struct(b'').packoutput = b''for block in range(, blocks + ):prf = original_hmac.copy()prf.update(salt + int_pack(block))last = prf.digest()u = int_from_bytes(last)for _ in range(, iterations + ):prf = original_hmac.copy()prf.update(last)last = prf.digest()u ^= int_from_bytes(last)t = int_to_bytes(u)output += treturn output[:key_length]", "docstring": "Implements PBKDF2 from PKCS#5 v2.2 in pure Python\n\n:param hash_algorithm:\n The string name of the hash algorithm to use: \"md5\", \"sha1\", \"sha224\",\n \"sha256\", \"sha384\", \"sha512\"\n\n:param password:\n A byte string of the password to use an input to the KDF\n\n:param salt:\n A cryptographic random byte string\n\n:param iterations:\n The numbers of iterations to use when deriving the key\n\n:param key_length:\n The length of the desired key in bytes\n\n:return:\n The derived key as a byte string", "id": "f9521:m0"} {"signature": "def pretty_message(string, *params):", "body": "output = textwrap.dedent(string)if output.find('') != -:output = re.sub('', '', output)if params:output = output % paramsoutput = output.strip()return output", "docstring": "Takes a multi-line string and does the following:\n\n - dedents\n - converts newlines with text before and after into a single line\n - strips leading and trailing whitespace\n\n:param string:\n The string to format\n\n:param *params:\n Params to interpolate into the string\n\n:return:\n The formatted string", "id": "f9523:m0"} {"signature": "def extract_chain(server_handshake_bytes):", "body": "output = []chain_bytes = Nonefor record_type, _, record_data in parse_tls_records(server_handshake_bytes):if record_type != b'':continuefor message_type, message_data in parse_handshake_messages(record_data):if message_type == b'':chain_bytes = message_databreakif chain_bytes:breakif chain_bytes:pointer = while pointer < len(chain_bytes):cert_length = int_from_bytes(chain_bytes[pointer:pointer + ])cert_start = pointer + cert_end = cert_start + cert_lengthpointer = cert_endcert_bytes = chain_bytes[cert_start:cert_end]output.append(Certificate.load(cert_bytes))return output", "docstring": "Extracts the X.509 certificates from the server handshake bytes for use\nwhen debugging\n\n:param server_handshake_bytes:\n A byte string of the handshake data received from the server\n\n:return:\n A list of asn1crypto.x509.Certificate objects", "id": "f9524:m0"} {"signature": "def detect_client_auth_request(server_handshake_bytes):", "body": "for record_type, _, record_data in parse_tls_records(server_handshake_bytes):if record_type != b'':continuefor message_type, message_data in parse_handshake_messages(record_data):if message_type == b'':return Truereturn False", "docstring": "Determines if a CertificateRequest message is sent from the server asking\nthe client for a certificate\n\n:param server_handshake_bytes:\n A byte string of the handshake data received from the server\n\n:return:\n A boolean - if a client certificate request was found", "id": "f9524:m1"} {"signature": "def get_dh_params_length(server_handshake_bytes):", "body": "output = Nonedh_params_bytes = Nonefor record_type, _, record_data in parse_tls_records(server_handshake_bytes):if record_type != b'':continuefor message_type, message_data in parse_handshake_messages(record_data):if message_type == b'':dh_params_bytes = message_databreakif dh_params_bytes:breakif dh_params_bytes:output = int_from_bytes(dh_params_bytes[:]) * return output", "docstring": "Determines the length of the DH params from the ServerKeyExchange\n\n:param server_handshake_bytes:\n A byte string of the handshake data received from the server\n\n:return:\n None or an integer of the bit size of the DH parameters", "id": "f9524:m2"} {"signature": "def parse_alert(server_handshake_bytes):", "body": "for record_type, _, record_data in parse_tls_records(server_handshake_bytes):if record_type != b'':continueif len(record_data) != :return Nonereturn (int_from_bytes(record_data[:]), int_from_bytes(record_data[:]))return None", "docstring": "Parses the handshake for protocol alerts\n\n:param server_handshake_bytes:\n A byte string of the handshake data received from the server\n\n:return:\n None or an 2-element tuple of integers:\n 0: 1 (warning) or 2 (fatal)\n 1: The alert description (see https://tools.ietf.org/html/rfc5246#section-7.2)", "id": "f9524:m3"} {"signature": "def parse_session_info(server_handshake_bytes, client_handshake_bytes):", "body": "protocol = Nonecipher_suite = Nonecompression = Falsesession_id = Nonesession_ticket = Noneserver_session_id = Noneclient_session_id = Nonefor record_type, _, record_data in parse_tls_records(server_handshake_bytes):if record_type != b'':continuefor message_type, message_data in parse_handshake_messages(record_data):if message_type != b'':continueprotocol = {b'': \"\",b'': \"\",b'': \"\",b'': \"\",b'': \"\",}[message_data[:]]session_id_length = int_from_bytes(message_data[:])if session_id_length > :server_session_id = message_data[: + session_id_length]cipher_suite_start = + session_id_lengthcipher_suite_bytes = message_data[cipher_suite_start:cipher_suite_start + ]cipher_suite = CIPHER_SUITE_MAP[cipher_suite_bytes]compression_start = cipher_suite_start + compression = message_data[compression_start:compression_start + ] != b''extensions_length_start = compression_start + extensions_data = message_data[extensions_length_start:]for extension_type, extension_data in _parse_hello_extensions(extensions_data):if extension_type == :session_ticket = \"\"breakbreakfor record_type, _, record_data in parse_tls_records(client_handshake_bytes):if record_type != b'':continuefor message_type, message_data in parse_handshake_messages(record_data):if message_type != b'':continuesession_id_length = int_from_bytes(message_data[:])if session_id_length > :client_session_id = message_data[: + session_id_length]cipher_suite_start = + session_id_lengthcipher_suite_length = int_from_bytes(message_data[cipher_suite_start:cipher_suite_start + ])compression_start = cipher_suite_start + + cipher_suite_lengthcompression_length = int_from_bytes(message_data[compression_start:compression_start + ])if server_session_id is None and session_ticket is None:extensions_length_start = compression_start + + compression_lengthextensions_data = message_data[extensions_length_start:]for extension_type, extension_data in _parse_hello_extensions(extensions_data):if extension_type == :session_ticket = \"\"breakbreakif server_session_id is not None:if client_session_id is None:session_id = \"\"else:if client_session_id != server_session_id:session_id = \"\"else:session_id = \"\"return {\"\": protocol,\"\": cipher_suite,\"\": compression,\"\": session_id,\"\": session_ticket,}", "docstring": "Parse the TLS handshake from the client to the server to extract information\nincluding the cipher suite selected, if compression is enabled, the\nsession id and if a new or reused session ticket exists.\n\n:param server_handshake_bytes:\n A byte string of the handshake data received from the server\n\n:param client_handshake_bytes:\n A byte string of the handshake data sent to the server\n\n:return:\n A dict with the following keys:\n - \"protocol\": unicode string\n - \"cipher_suite\": unicode string\n - \"compression\": boolean\n - \"session_id\": \"new\", \"reused\" or None\n - \"session_ticket: \"new\", \"reused\" or None", "id": "f9524:m4"} {"signature": "def parse_tls_records(data):", "body": "pointer = data_len = len(data)while pointer < data_len:if data[pointer:pointer + ] == b'':breaklength = int_from_bytes(data[pointer + :pointer + ])yield (data[pointer:pointer + ],data[pointer + :pointer + ],data[pointer + :pointer + + length])pointer += + length", "docstring": "Creates a generator returning tuples of information about each record\nin a byte string of data from a TLS client or server. Stops as soon as it\nfind a ChangeCipherSpec message since all data from then on is encrypted.\n\n:param data:\n A byte string of TLS records\n\n:return:\n A generator that yields 3-element tuples:\n [0] Byte string of record type\n [1] Byte string of protocol version\n [2] Byte string of record data", "id": "f9524:m5"} {"signature": "def parse_handshake_messages(data):", "body": "pointer = data_len = len(data)while pointer < data_len:length = int_from_bytes(data[pointer + :pointer + ])yield (data[pointer:pointer + ],data[pointer + :pointer + + length])pointer += + length", "docstring": "Creates a generator returning tuples of information about each message in\na byte string of data from a TLS handshake record\n\n:param data:\n A byte string of a TLS handshake record data\n\n:return:\n A generator that yields 2-element tuples:\n [0] Byte string of message type\n [1] Byte string of message data", "id": "f9524:m6"} {"signature": "def _parse_hello_extensions(data):", "body": "if data == b'':returnextentions_length = int_from_bytes(data[:])extensions_start = extensions_end = + extentions_lengthpointer = extensions_startwhile pointer < extensions_end:extension_type = int_from_bytes(data[pointer:pointer + ])extension_length = int_from_bytes(data[pointer + :pointer + ])yield (extension_type,data[pointer + :pointer + + extension_length])pointer += + extension_length", "docstring": "Creates a generator returning tuples of information about each extension\nfrom a byte string of extension data contained in a ServerHello ores\nClientHello message\n\n:param data:\n A byte string of a extension data from a TLS ServerHello or ClientHello\n message\n\n:return:\n A generator that yields 2-element tuples:\n [0] Byte string of extension type\n [1] Byte string of extension data", "id": "f9524:m7"} {"signature": "def raise_hostname(certificate, hostname):", "body": "is_ip = re.match('', hostname) or hostname.find('') != -if is_ip:hostname_type = '' % hostnameelse:hostname_type = '' % hostnamemessage = '' % hostname_typevalid_ips = ''.join(certificate.valid_ips)valid_domains = ''.join(certificate.valid_domains)if valid_domains:message += '' % valid_domainsif valid_domains and valid_ips:message += ''if valid_ips:message += '' % valid_ipsraise TLSVerificationError(message, certificate)", "docstring": "Raises a TLSVerificationError due to a hostname mismatch\n\n:param certificate:\n An asn1crypto.x509.Certificate object\n\n:raises:\n TLSVerificationError", "id": "f9524:m8"} {"signature": "def raise_verification(certificate):", "body": "message = ''raise TLSVerificationError(message, certificate)", "docstring": "Raises a generic TLSVerificationError\n\n:param certificate:\n An asn1crypto.x509.Certificate object\n\n:raises:\n TLSVerificationError", "id": "f9524:m9"} {"signature": "def raise_weak_signature(certificate):", "body": "message = ''raise TLSVerificationError(message, certificate)", "docstring": "Raises a TLSVerificationError when a certificate uses a weak signature\nalgorithm\n\n:param certificate:\n An asn1crypto.x509.Certificate object\n\n:raises:\n TLSVerificationError", "id": "f9524:m10"} {"signature": "def raise_client_auth():", "body": "message = ''raise TLSError(message)", "docstring": "Raises a TLSError indicating client authentication is required\n\n:raises:\n TLSError", "id": "f9524:m11"} {"signature": "def raise_revoked(certificate):", "body": "message = ''raise TLSVerificationError(message, certificate)", "docstring": "Raises a TLSVerificationError due to the certificate being revoked\n\n:param certificate:\n An asn1crypto.x509.Certificate object\n\n:raises:\n TLSVerificationError", "id": "f9524:m12"} {"signature": "def raise_no_issuer(certificate):", "body": "message = ''raise TLSVerificationError(message, certificate)", "docstring": "Raises a TLSVerificationError due to no issuer certificate found in trust\nroots\n\n:param certificate:\n An asn1crypto.x509.Certificate object\n\n:raises:\n TLSVerificationError", "id": "f9524:m13"} {"signature": "def raise_self_signed(certificate):", "body": "message = ''raise TLSVerificationError(message, certificate)", "docstring": "Raises a TLSVerificationError due to a self-signed certificate\nroots\n\n:param certificate:\n An asn1crypto.x509.Certificate object\n\n:raises:\n TLSVerificationError", "id": "f9524:m14"} {"signature": "def raise_expired_not_yet_valid(certificate):", "body": "validity = certificate['']['']not_after = validity[''].nativenot_before = validity[''].nativenow = datetime.now(timezone.utc)if not_before > now:formatted_before = not_before.strftime('')message = '' % formatted_beforeelif not_after < now:formatted_after = not_after.strftime('')message = '' % formatted_afterraise TLSVerificationError(message, certificate)", "docstring": "Raises a TLSVerificationError due to certificate being expired, or not yet\nbeing valid\n\n:param certificate:\n An asn1crypto.x509.Certificate object\n\n:raises:\n TLSVerificationError", "id": "f9524:m15"} {"signature": "def raise_disconnection():", "body": "raise TLSDisconnectError('')", "docstring": "Raises a TLSDisconnectError due to a disconnection\n\n:raises:\n TLSDisconnectError", "id": "f9524:m16"} {"signature": "def raise_protocol_error(server_handshake_bytes):", "body": "other_protocol = detect_other_protocol(server_handshake_bytes)if other_protocol:raise TLSError('' % other_protocol)raise TLSError('')", "docstring": "Raises a TLSError due to a protocol error\n\n:param server_handshake_bytes:\n A byte string of the handshake data received from the server\n\n:raises:\n TLSError", "id": "f9524:m17"} {"signature": "def raise_handshake():", "body": "raise TLSError('')", "docstring": "Raises a TLSError due to a handshake error\n\n:raises:\n TLSError", "id": "f9524:m18"} {"signature": "def raise_protocol_version():", "body": "raise TLSError('')", "docstring": "Raises a TLSError due to a TLS version incompatibility\n\n:raises:\n TLSError", "id": "f9524:m19"} {"signature": "def raise_dh_params():", "body": "raise TLSError('')", "docstring": "Raises a TLSError due to weak DH params\n\n:raises:\n TLSError", "id": "f9524:m20"} {"signature": "def detect_other_protocol(server_handshake_bytes):", "body": "if server_handshake_bytes[:] == b'':return ''if server_handshake_bytes[:] == b'':if re.match(b'', server_handshake_bytes, re.I):return ''else:return ''if server_handshake_bytes[:] == b'':return ''if server_handshake_bytes[:] == b'':return ''if server_handshake_bytes[:] == b'' or server_handshake_bytes[:] == b'':return ''return None", "docstring": "Looks at the server handshake bytes to try and detect a different protocol\n\n:param server_handshake_bytes:\n A byte string of the handshake data received from the server\n\n:return:\n None, or a unicode string of \"ftp\", \"http\", \"imap\", \"pop3\", \"smtp\"", "id": "f9524:m21"} {"signature": "def dump_dh_parameters(dh_parameters, encoding=''):", "body": "if encoding not in set(['', '']):raise ValueError(pretty_message('''''',repr(encoding)))if not isinstance(dh_parameters, algos.DHParameters):raise TypeError(pretty_message('''''',type_name(dh_parameters)))output = dh_parameters.dump()if encoding == '':output = pem.armor('', output)return output", "docstring": "Serializes an asn1crypto.algos.DHParameters object into a byte string\n\n:param dh_parameters:\n An asn1crypto.algos.DHParameters object\n\n:param encoding:\n A unicode string of \"pem\" or \"der\"\n\n:return:\n A byte string of the encoded DH parameters", "id": "f9525:m0"} {"signature": "def dump_public_key(public_key, encoding=''):", "body": "if encoding not in set(['', '']):raise ValueError(pretty_message('''''',repr(encoding)))is_oscrypto = isinstance(public_key, PublicKey)if not isinstance(public_key, keys.PublicKeyInfo) and not is_oscrypto:raise TypeError(pretty_message('''''',type_name(public_key)))if is_oscrypto:public_key = public_key.asn1output = public_key.dump()if encoding == '':output = pem.armor('', output)return output", "docstring": "Serializes a public key object into a byte string\n\n:param public_key:\n An oscrypto.asymmetric.PublicKey or asn1crypto.keys.PublicKeyInfo object\n\n:param encoding:\n A unicode string of \"pem\" or \"der\"\n\n:return:\n A byte string of the encoded public key", "id": "f9525:m1"} {"signature": "def dump_certificate(certificate, encoding=''):", "body": "if encoding not in set(['', '']):raise ValueError(pretty_message('''''',repr(encoding)))is_oscrypto = isinstance(certificate, Certificate)if not isinstance(certificate, x509.Certificate) and not is_oscrypto:raise TypeError(pretty_message('''''',type_name(certificate)))if is_oscrypto:certificate = certificate.asn1output = certificate.dump()if encoding == '':output = pem.armor('', output)return output", "docstring": "Serializes a certificate object into a byte string\n\n:param certificate:\n An oscrypto.asymmetric.Certificate or asn1crypto.x509.Certificate object\n\n:param encoding:\n A unicode string of \"pem\" or \"der\"\n\n:return:\n A byte string of the encoded certificate", "id": "f9525:m2"} {"signature": "def dump_private_key(private_key, passphrase, encoding='', target_ms=):", "body": "if encoding not in set(['', '']):raise ValueError(pretty_message('''''',repr(encoding)))if passphrase is not None:if not isinstance(passphrase, str_cls):raise TypeError(pretty_message('''''',type_name(passphrase)))if passphrase == '':raise ValueError(pretty_message(''''''))is_oscrypto = isinstance(private_key, PrivateKey)if not isinstance(private_key, keys.PrivateKeyInfo) and not is_oscrypto:raise TypeError(pretty_message('''''',type_name(private_key)))if is_oscrypto:private_key = private_key.asn1output = private_key.dump()if passphrase is not None:cipher = ''key_length = kdf_hmac = ''kdf_salt = rand_bytes(key_length)iterations = pbkdf2_iteration_calculator(kdf_hmac, key_length, target_ms=target_ms, quiet=True)if iterations < :iterations = passphrase_bytes = passphrase.encode('')key = pbkdf2(kdf_hmac, passphrase_bytes, kdf_salt, iterations, key_length)iv, ciphertext = aes_cbc_pkcs7_encrypt(key, output, None)output = keys.EncryptedPrivateKeyInfo({'': {'': '','': {'': {'': '','': {'': algos.Pbkdf2Salt(name='',value=kdf_salt),'': iterations,'': {'': kdf_hmac,'': core.Null()}}},'': {'': cipher,'': iv}}},'': ciphertext}).dump()if encoding == '':if passphrase is None:object_type = ''else:object_type = ''output = pem.armor(object_type, output)return output", "docstring": "Serializes a private key object into a byte string of the PKCS#8 format\n\n:param private_key:\n An oscrypto.asymmetric.PrivateKey or asn1crypto.keys.PrivateKeyInfo\n object\n\n:param passphrase:\n A unicode string of the passphrase to encrypt the private key with.\n A passphrase of None will result in no encryption. A blank string will\n result in a ValueError to help ensure that the lack of passphrase is\n intentional.\n\n:param encoding:\n A unicode string of \"pem\" or \"der\"\n\n:param target_ms:\n Use PBKDF2 with the number of iterations that takes about this many\n milliseconds on the current machine.\n\n:raises:\n ValueError - when a blank string is provided for the passphrase\n\n:return:\n A byte string of the encoded and encrypted public key", "id": "f9525:m3"} {"signature": "def dump_openssl_private_key(private_key, passphrase):", "body": "if passphrase is not None:if not isinstance(passphrase, str_cls):raise TypeError(pretty_message('''''',type_name(passphrase)))if passphrase == '':raise ValueError(pretty_message(''''''))is_oscrypto = isinstance(private_key, PrivateKey)if not isinstance(private_key, keys.PrivateKeyInfo) and not is_oscrypto:raise TypeError(pretty_message('''''',type_name(private_key)))if is_oscrypto:private_key = private_key.asn1output = private_key.unwrap().dump()headers = Noneif passphrase is not None:iv = rand_bytes()headers = OrderedDict()headers[''] = ''headers[''] = '' % binascii.hexlify(iv).decode('')key_length = passphrase_bytes = passphrase.encode('')key = hashlib.md5(passphrase_bytes + iv[:]).digest()while key_length > len(key):key += hashlib.md5(key + passphrase_bytes + iv[:]).digest()key = key[:key_length]iv, output = aes_cbc_pkcs7_encrypt(key, output, iv)if private_key.algorithm == '':object_type = ''elif private_key.algorithm == '':object_type = ''elif private_key.algorithm == '':object_type = ''return pem.armor(object_type, output, headers=headers)", "docstring": "Serializes a private key object into a byte string of the PEM formats used\nby OpenSSL. The format chosen will depend on the type of private key - RSA,\nDSA or EC.\n\nDo not use this method unless you really must interact with a system that\ndoes not support PKCS#8 private keys. The encryption provided by PKCS#8 is\nfar superior to the OpenSSL formats. This is due to the fact that the\nOpenSSL formats don't stretch the passphrase, making it very easy to\nbrute-force.\n\n:param private_key:\n An oscrypto.asymmetric.PrivateKey or asn1crypto.keys.PrivateKeyInfo\n object\n\n:param passphrase:\n A unicode string of the passphrase to encrypt the private key with.\n A passphrase of None will result in no encryption. A blank string will\n result in a ValueError to help ensure that the lack of passphrase is\n intentional.\n\n:raises:\n ValueError - when a blank string is provided for the passphrase\n\n:return:\n A byte string of the encoded and encrypted public key", "id": "f9525:m4"} {"signature": "def backend():", "body": "if _module_values[''] is not None:return _module_values['']with _backend_lock:if _module_values[''] is not None:return _module_values['']if sys.platform == '':if sys.getwindowsversion()[] < :_module_values[''] = ''else:_module_values[''] = ''elif sys.platform == '':_module_values[''] = ''else:_module_values[''] = ''return _module_values['']", "docstring": ":return:\n A unicode string of the backend being used: \"openssl\", \"osx\", \"win\",\n \"winlegacy\"", "id": "f9526:m0"} {"signature": "def _backend_config():", "body": "if backend() != '':return {}if _module_values[''] is not None:return _module_values['']with _backend_lock:if _module_values[''] is not None:return _module_values['']_module_values[''] = {}return _module_values['']", "docstring": ":return:\n A dict of config info for the backend. Only currently used by \"openssl\",\n it may contains zero or more of the following keys:\n - \"libcrypto_path\"\n - \"libssl_path\"", "id": "f9526:m1"} {"signature": "def use_openssl(libcrypto_path, libssl_path, trust_list_path=None):", "body": "if not isinstance(libcrypto_path, str_cls):raise ValueError('' % type_name(libcrypto_path))if not isinstance(libssl_path, str_cls):raise ValueError('' % type_name(libssl_path))if not os.path.exists(libcrypto_path):raise LibraryNotFoundError('' % libcrypto_path)if not os.path.exists(libssl_path):raise LibraryNotFoundError('' % libssl_path)if trust_list_path is not None:if not isinstance(trust_list_path, str_cls):raise ValueError('' % type_name(trust_list_path))if not os.path.exists(trust_list_path):raise OSError('' % trust_list_path)with _backend_lock:if _module_values[''] is not None:raise RuntimeError('')_module_values[''] = ''_module_values[''] = {'': libcrypto_path,'': libssl_path,'': trust_list_path,}", "docstring": "Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll),\nor using a specific dynamic library on Linux/BSD (.so).\n\nThis can also be used to configure oscrypto to use LibreSSL dynamic\nlibraries.\n\nThis method must be called before any oscrypto submodules are imported.\n\n:param libcrypto_path:\n A unicode string of the file path to the OpenSSL/LibreSSL libcrypto\n dynamic library.\n\n:param libssl_path:\n A unicode string of the file path to the OpenSSL/LibreSSL libssl\n dynamic library.\n\n:param trust_list_path:\n An optional unicode string of the path to a file containing\n OpenSSL-compatible CA certificates in PEM format. If this is not\n provided and the platform is OS X or Windows, the system trust roots\n will be exported from the OS and used for all TLS connections.\n\n:raises:\n ValueError - when one of the paths is not a unicode string\n OSError - when the trust_list_path does not exist on the filesystem\n oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem\n RuntimeError - when this function is called after another part of oscrypto has been imported", "id": "f9526:m2"} {"signature": "def use_winlegacy():", "body": "if sys.platform != '':plat = platform.system() or sys.platformif plat == '':plat = ''raise EnvironmentError('' % plat)with _backend_lock:if _module_values[''] is not None:raise RuntimeError('')_module_values[''] = ''", "docstring": "Forces use of the legacy Windows CryptoAPI. This should only be used on\nWindows XP or for testing. It is less full-featured than the Cryptography\nNext Generation (CNG) API, and as a result the elliptic curve and PSS\npadding features are implemented in pure Python. This isn't ideal, but it\na shim for end-user client code. No one is going to run a server on Windows\nXP anyway, right?!\n\n:raises:\n EnvironmentError - when this function is called on an operating system other than Windows\n RuntimeError - when this function is called after another part of oscrypto has been imported", "id": "f9526:m3"} {"signature": "def pkcs12_kdf(hash_algorithm, password, salt, iterations, key_length, id_):", "body": "if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))if not isinstance(salt, byte_cls):raise TypeError(pretty_message('''''',type_name(salt)))if not isinstance(iterations, int_types):raise TypeError(pretty_message('''''',type_name(iterations)))if iterations < :raise ValueError(pretty_message('''''',repr(iterations)))if not isinstance(key_length, int_types):raise TypeError(pretty_message('''''',type_name(key_length)))if key_length < :raise ValueError(pretty_message('''''',repr(key_length)))if hash_algorithm not in set(['', '', '', '', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))if id_ not in set([, , ]):raise ValueError(pretty_message('''''',repr(id_)))utf16_password = password.decode('').encode('') + b''algo = getattr(hashlib, hash_algorithm)u = {'': ,'': ,'': ,'': ,'': ,'': }[hash_algorithm]if hash_algorithm in ['', '']:v = else:v = d = chr_cls(id_) * vs = b''if salt != b'':s_len = v * int(math.ceil(float(len(salt)) / v))while len(s) < s_len:s += salts = s[:s_len]p = b''if utf16_password != b'':p_len = v * int(math.ceil(float(len(utf16_password)) / v))while len(p) < p_len:p += utf16_passwordp = p[:p_len]i = s + pc = int(math.ceil(float(key_length) / u))a = b'' * (c * u)for num in range(, c + ):a2 = algo(d + i).digest()for _ in range(, iterations + ):a2 = algo(a2).digest()if num < c:b = b''while len(b) < v:b += a2b = int_from_bytes(b[:v]) + for num2 in range(, len(i) // v):start = num2 * vend = (num2 + ) * vi_num2 = i[start:end]i_num2 = int_to_bytes(int_from_bytes(i_num2) + b)i_num2_l = len(i_num2)if i_num2_l > v:i_num2 = i_num2[i_num2_l - v:]i = i[:start] + i_num2 + i[end:]begin = (num - ) * uto_copy = min(key_length, u)a = a[:begin] + a2[:to_copy] + a[begin + to_copy:]return a[:key_length]", "docstring": "KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19\n\n:param hash_algorithm:\n The string name of the hash algorithm to use: \"md5\", \"sha1\", \"sha224\",\n \"sha256\", \"sha384\", \"sha512\"\n\n:param password:\n A byte string of the password to use an input to the KDF\n\n:param salt:\n A cryptographic random byte string\n\n:param iterations:\n The numbers of iterations to use when deriving the key\n\n:param key_length:\n The length of the desired key in bytes\n\n:param id_:\n The ID of the usage - 1 for key, 2 for iv, 3 for mac\n\n:return:\n The derived key as a byte string", "id": "f9527:m0"} {"signature": "def fill_width(bytes_, width):", "body": "while len(bytes_) < width:bytes_ = b'' + bytes_return bytes_", "docstring": "Ensure a byte string representing a positive integer is a specific width\n(in bytes)\n\n:param bytes_:\n The integer byte string\n\n:param width:\n The desired width as an integer\n\n:return:\n A byte string of the width specified", "id": "f9528:m0"} {"signature": "def pkcs12_kdf(hash_algorithm, password, salt, iterations, key_length, id_):", "body": "if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))if not isinstance(salt, byte_cls):raise TypeError(pretty_message('''''',type_name(salt)))if not isinstance(iterations, int_types):raise TypeError(pretty_message('''''',type_name(iterations)))if iterations < :raise ValueError(pretty_message('''''',repr(iterations)))if not isinstance(key_length, int_types):raise TypeError(pretty_message('''''',type_name(key_length)))if key_length < :raise ValueError(pretty_message('''''',repr(key_length)))if hash_algorithm not in set(['', '', '', '', '', '']):raise ValueError(pretty_message('''''',repr(hash_algorithm)))if id_ not in set([, , ]):raise ValueError(pretty_message('''''',repr(id_)))utf16_password = password.decode('').encode('') + b''digest_type = {'': libcrypto.EVP_md5,'': libcrypto.EVP_sha1,'': libcrypto.EVP_sha224,'': libcrypto.EVP_sha256,'': libcrypto.EVP_sha384,'': libcrypto.EVP_sha512,}[hash_algorithm]()output_buffer = buffer_from_bytes(key_length)result = libcrypto.PKCS12_key_gen_uni(utf16_password,len(utf16_password),salt,len(salt),id_,iterations,key_length,output_buffer,digest_type)handle_openssl_error(result)return bytes_from_buffer(output_buffer)", "docstring": "KDF from RFC7292 appendix B.2 - https://tools.ietf.org/html/rfc7292#page-19\n\n:param hash_algorithm:\n The string name of the hash algorithm to use: \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\", \"sha512\"\n\n:param password:\n A byte string of the password to use an input to the KDF\n\n:param salt:\n A cryptographic random byte string\n\n:param iterations:\n The numbers of iterations to use when deriving the key\n\n:param key_length:\n The length of the desired key in bytes\n\n:param id_:\n The ID of the usage - 1 for key, 2 for iv, 3 for mac\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n\n:return:\n The derived key as a byte string", "id": "f9531:m0"} {"signature": "def __init__(self, protocol=None, manual_validation=False, extra_trust_roots=None):", "body": "if not isinstance(manual_validation, bool):raise TypeError(pretty_message('''''',type_name(manual_validation)))self._manual_validation = manual_validationif protocol is None:protocol = set(['', '', ''])if isinstance(protocol, str_cls):protocol = set([protocol])elif not isinstance(protocol, set):raise TypeError(pretty_message('''''',type_name(protocol)))valid_protocols = set(['', '', '', ''])unsupported_protocols = protocol - valid_protocolsif unsupported_protocols:raise ValueError(pretty_message('''''',repr(unsupported_protocols)))self._protocols = protocolself._extra_trust_roots = []if extra_trust_roots:for extra_trust_root in extra_trust_roots:if isinstance(extra_trust_root, Certificate):extra_trust_root = extra_trust_root.asn1elif isinstance(extra_trust_root, byte_cls):extra_trust_root = parse_certificate(extra_trust_root)elif isinstance(extra_trust_root, str_cls):with open(extra_trust_root, '') as f:extra_trust_root = parse_certificate(f.read())elif not isinstance(extra_trust_root, x509.Certificate):raise TypeError(pretty_message('''''',type_name(extra_trust_root)))self._extra_trust_roots.append(extra_trust_root)ssl_ctx = Nonetry:if libcrypto_version_info < (, ):method = libssl.SSLv23_method()else:method = libssl.TLS_method()ssl_ctx = libssl.SSL_CTX_new(method)if is_null(ssl_ctx):handle_openssl_error()self._ssl_ctx = ssl_ctxlibssl.SSL_CTX_set_timeout(ssl_ctx, )libssl.SSL_CTX_ctrl(ssl_ctx,LibsslConst.SSL_CTRL_SET_SESS_CACHE_MODE,LibsslConst.SSL_SESS_CACHE_CLIENT,null())if sys.platform in set(['', '']):trust_list_path = _trust_list_pathif trust_list_path is None:trust_list_path = get_path()if sys.platform == '':path_encoding = ''else:path_encoding = ''result = libssl.SSL_CTX_load_verify_locations(ssl_ctx,trust_list_path.encode(path_encoding),null())else:result = libssl.SSL_CTX_set_default_verify_paths(ssl_ctx)handle_openssl_error(result)verify_mode = LibsslConst.SSL_VERIFY_NONE if manual_validation else LibsslConst.SSL_VERIFY_PEERlibssl.SSL_CTX_set_verify(ssl_ctx, verify_mode, null())result = libssl.SSL_CTX_set_cipher_list(ssl_ctx,(b''b''b''b''b''b''b''b''b''b''b''b''))handle_openssl_error(result)disabled_protocols = set([''])disabled_protocols |= (valid_protocols - self._protocols)for disabled_protocol in disabled_protocols:libssl.SSL_CTX_ctrl(ssl_ctx,LibsslConst.SSL_CTRL_OPTIONS,_PROTOCOL_MAP[disabled_protocol],null())if self._extra_trust_roots:x509_store = libssl.SSL_CTX_get_cert_store(ssl_ctx)for cert in self._extra_trust_roots:oscrypto_cert = load_certificate(cert)result = libssl.X509_STORE_add_cert(x509_store,oscrypto_cert.x509)handle_openssl_error(result)except (Exception):if ssl_ctx:libssl.SSL_CTX_free(ssl_ctx)self._ssl_ctx = Noneraise", "docstring": ":param protocol:\n A unicode string or set of unicode strings representing allowable\n protocols to negotiate with the server:\n\n - \"TLSv1.2\"\n - \"TLSv1.1\"\n - \"TLSv1\"\n - \"SSLv3\"\n\n Default is: {\"TLSv1\", \"TLSv1.1\", \"TLSv1.2\"}\n\n:param manual_validation:\n If certificate and certificate path validation should be skipped\n and left to the developer to implement\n\n:param extra_trust_roots:\n A list containing one or more certificates to be treated as trust\n roots, in one of the following formats:\n - A byte string of the DER encoded certificate\n - A unicode string of the certificate filename\n - An asn1crypto.x509.Certificate object\n - An oscrypto.asymmetric.Certificate object\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9532:c0:m0"} {"signature": "@classmethoddef wrap(cls, socket, hostname, session=None):", "body": "if not isinstance(socket, socket_.socket):raise TypeError(pretty_message('''''',type_name(socket)))if not isinstance(hostname, str_cls):raise TypeError(pretty_message('''''',type_name(hostname)))if session is not None and not isinstance(session, TLSSession):raise TypeError(pretty_message('''''',type_name(session)))new_socket = cls(None, None, session=session)new_socket._socket = socketnew_socket._hostname = hostnamenew_socket._handshake()return new_socket", "docstring": "Takes an existing socket and adds TLS\n\n:param socket:\n A socket.socket object to wrap with TLS\n\n:param hostname:\n A unicode string of the hostname or IP the socket is connected to\n\n:param session:\n An existing TLSSession object to allow for session reuse, specific\n protocol or manual certificate validation\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9532:c1:m0"} {"signature": "def __init__(self, address, port, timeout=, session=None):", "body": "self._raw_bytes = b''self._decrypted_bytes = b''if address is None and port is None:self._socket = Noneelse:if not isinstance(address, str_cls):raise TypeError(pretty_message('''''',type_name(address)))if not isinstance(port, int_types):raise TypeError(pretty_message('''''',type_name(port)))if timeout is not None and not isinstance(timeout, numbers.Number):raise TypeError(pretty_message('''''',type_name(timeout)))self._socket = socket_.create_connection((address, port), timeout)self._socket.settimeout(timeout)if session is None:session = TLSSession()elif not isinstance(session, TLSSession):raise TypeError(pretty_message('''''',type_name(session)))self._session = sessionif self._socket:self._hostname = addressself._handshake()", "docstring": ":param address:\n A unicode string of the domain name or IP address to conenct to\n\n:param port:\n An integer of the port number to connect to\n\n:param timeout:\n An integer timeout to use for the socket\n\n:param session:\n An oscrypto.tls.TLSSession object to allow for session reuse and\n controlling the protocols and validation performed", "id": "f9532:c1:m1"} {"signature": "def _handshake(self):", "body": "self._ssl = Noneself._rbio = Noneself._wbio = Nonetry:self._ssl = libssl.SSL_new(self._session._ssl_ctx)if is_null(self._ssl):self._ssl = Nonehandle_openssl_error()mem_bio = libssl.BIO_s_mem()self._rbio = libssl.BIO_new(mem_bio)if is_null(self._rbio):handle_openssl_error()self._wbio = libssl.BIO_new(mem_bio)if is_null(self._wbio):handle_openssl_error()libssl.SSL_set_bio(self._ssl, self._rbio, self._wbio)utf8_domain = self._hostname.encode('')libssl.SSL_ctrl(self._ssl,LibsslConst.SSL_CTRL_SET_TLSEXT_HOSTNAME,LibsslConst.TLSEXT_NAMETYPE_host_name,utf8_domain)libssl.SSL_set_connect_state(self._ssl)if self._session._ssl_session:libssl.SSL_set_session(self._ssl, self._session._ssl_session)self._bio_write_buffer = buffer_from_bytes(self._buffer_size)self._read_buffer = buffer_from_bytes(self._buffer_size)handshake_server_bytes = b''handshake_client_bytes = b''while True:result = libssl.SSL_do_handshake(self._ssl)handshake_client_bytes += self._raw_write()if result == :breakerror = libssl.SSL_get_error(self._ssl, result)if error == LibsslConst.SSL_ERROR_WANT_READ:chunk = self._raw_read()if chunk == b'':if handshake_server_bytes == b'':raise_disconnection()if detect_client_auth_request(handshake_server_bytes):raise_client_auth()raise_protocol_error(handshake_server_bytes)handshake_server_bytes += chunkelif error == LibsslConst.SSL_ERROR_WANT_WRITE:handshake_client_bytes += self._raw_write()elif error == LibsslConst.SSL_ERROR_ZERO_RETURN:self._gracefully_closed = Trueself._shutdown(False)self._raise_closed()else:info = peek_openssl_error()if libcrypto_version_info < (, ):dh_key_info = (,LibsslConst.SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,LibsslConst.SSL_R_DH_KEY_TOO_SMALL)else:dh_key_info = (,LibsslConst.SSL_F_TLS_PROCESS_SKE_DHE,LibsslConst.SSL_R_DH_KEY_TOO_SMALL)if info == dh_key_info:raise_dh_params()if libcrypto_version_info < (, ):unknown_protocol_info = (,LibsslConst.SSL_F_SSL23_GET_SERVER_HELLO,LibsslConst.SSL_R_UNKNOWN_PROTOCOL)else:unknown_protocol_info = (,LibsslConst.SSL_F_SSL3_GET_RECORD,LibsslConst.SSL_R_WRONG_VERSION_NUMBER)if info == unknown_protocol_info:raise_protocol_error(handshake_server_bytes)tls_version_info_error = (,LibsslConst.SSL_F_SSL23_GET_SERVER_HELLO,LibsslConst.SSL_R_TLSV1_ALERT_PROTOCOL_VERSION)if info == tls_version_info_error:raise_protocol_version()handshake_error_info = (,LibsslConst.SSL_F_SSL23_GET_SERVER_HELLO,LibsslConst.SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE)if info == handshake_error_info:raise_handshake()handshake_failure_info = (,LibsslConst.SSL_F_SSL3_READ_BYTES,LibsslConst.SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE)if info == handshake_failure_info:raise_client_auth()if libcrypto_version_info < (, ):cert_verify_failed_info = (,LibsslConst.SSL_F_SSL3_GET_SERVER_CERTIFICATE,LibsslConst.SSL_R_CERTIFICATE_VERIFY_FAILED)else:cert_verify_failed_info = (,LibsslConst.SSL_F_TLS_PROCESS_SERVER_CERTIFICATE,LibsslConst.SSL_R_CERTIFICATE_VERIFY_FAILED)if info == cert_verify_failed_info:verify_result = libssl.SSL_get_verify_result(self._ssl)chain = extract_chain(handshake_server_bytes)self_signed = Falsetime_invalid = Falseno_issuer = Falsecert = Noneoscrypto_cert = Noneif chain:cert = chain[]oscrypto_cert = load_certificate(cert)self_signed = oscrypto_cert.self_signedissuer_error_codes = set([LibsslConst.X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT,LibsslConst.X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN,LibsslConst.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY])if verify_result in issuer_error_codes:no_issuer = not self_signedtime_error_codes = set([LibsslConst.X509_V_ERR_CERT_HAS_EXPIRED,LibsslConst.X509_V_ERR_CERT_NOT_YET_VALID])time_invalid = verify_result in time_error_codesif time_invalid:raise_expired_not_yet_valid(cert)if no_issuer:raise_no_issuer(cert)if self_signed:raise_self_signed(cert)if oscrypto_cert and oscrypto_cert.asn1.hash_algo in set(['', '']):raise_weak_signature(oscrypto_cert)raise_verification(cert)handle_openssl_error(, TLSError)session_info = parse_session_info(handshake_server_bytes,handshake_client_bytes)self._protocol = session_info['']self._cipher_suite = session_info['']self._compression = session_info['']self._session_id = session_info['']self._session_ticket = session_info['']if self._cipher_suite.find('') != -:dh_params_length = get_dh_params_length(handshake_server_bytes)if dh_params_length < :self.close()raise_dh_params()if self._session_id == '' or self._session_ticket == '':if self._session._ssl_session:libssl.SSL_SESSION_free(self._session._ssl_session)self._session._ssl_session = libssl.SSL_get1_session(self._ssl)if not self._session._manual_validation:if self.certificate.hash_algo in set(['', '']):raise_weak_signature(self.certificate)if not self.certificate.is_valid_domain_ip(self._hostname):raise_hostname(self.certificate, self._hostname)except (OSError, socket_.error):if self._ssl:libssl.SSL_free(self._ssl)self._ssl = Noneself._rbio = Noneself._wbio = Noneelse:if self._rbio:libssl.BIO_free(self._rbio)self._rbio = Noneif self._wbio:libssl.BIO_free(self._wbio)self._wbio = Noneself.close()raise", "docstring": "Perform an initial TLS handshake", "id": "f9532:c1:m2"} {"signature": "def _raw_read(self):", "body": "data = self._raw_bytestry:data += self._socket.recv()except (socket_.error):passoutput = datawritten = libssl.BIO_write(self._rbio, data, len(data))self._raw_bytes = data[written:]return output", "docstring": "Reads data from the socket and writes it to the memory bio\nused by libssl to decrypt the data. Returns the unencrypted\ndata for the purpose of debugging handshakes.\n\n:return:\n A byte string of ciphertext from the socket. Used for\n debugging the handshake only.", "id": "f9532:c1:m3"} {"signature": "def _raw_write(self):", "body": "data_available = libssl.BIO_ctrl_pending(self._wbio)if data_available == :return b''to_read = min(self._buffer_size, data_available)read = libssl.BIO_read(self._wbio, self._bio_write_buffer, to_read)to_write = bytes_from_buffer(self._bio_write_buffer, read)output = to_writewhile len(to_write):raise_disconnect = Falsetry:sent = self._socket.send(to_write)except (socket_.error) as e:if e.errno == or e.errno == :raise_disconnect = Trueelse:raiseif raise_disconnect:raise_disconnection()to_write = to_write[sent:]if len(to_write):self.select_write()return output", "docstring": "Takes ciphertext from the memory bio and writes it to the\nsocket.\n\n:return:\n A byte string of ciphertext going to the socket. Used\n for debugging the handshake only.", "id": "f9532:c1:m4"} {"signature": "def read(self, max_length):", "body": "if not isinstance(max_length, int_types):raise TypeError(pretty_message('''''',type_name(max_length)))buffered_length = len(self._decrypted_bytes)if buffered_length >= max_length:output = self._decrypted_bytes[:max_length]self._decrypted_bytes = self._decrypted_bytes[max_length:]return outputif self._ssl is None:self._raise_closed()if buffered_length > and not self.select_read():output = self._decrypted_bytesself._decrypted_bytes = b''return outputto_read = min(self._buffer_size, max_length - buffered_length)output = self._decrypted_bytesagain = Truewhile again:again = Falseresult = libssl.SSL_read(self._ssl, self._read_buffer, to_read)self._raw_write()if result <= :error = libssl.SSL_get_error(self._ssl, result)if error == LibsslConst.SSL_ERROR_WANT_READ:if self._raw_read() != b'':again = Truecontinueraise_disconnection()elif error == LibsslConst.SSL_ERROR_WANT_WRITE:self._raw_write()again = Truecontinueelif error == LibsslConst.SSL_ERROR_ZERO_RETURN:self._gracefully_closed = Trueself._shutdown(False)breakelse:handle_openssl_error(, TLSError)output += bytes_from_buffer(self._read_buffer, result)if self._gracefully_closed and len(output) == :self._raise_closed()self._decrypted_bytes = output[max_length:]return output[:max_length]", "docstring": "Reads data from the TLS-wrapped socket\n\n:param max_length:\n The number of bytes to read - output may be less than this\n\n:raises:\n socket.socket - when a non-TLS socket error occurs\n oscrypto.errors.TLSError - when a TLS-related error occurs\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the data read", "id": "f9532:c1:m5"} {"signature": "def select_read(self, timeout=None):", "body": "if len(self._decrypted_bytes) > :return Trueread_ready, _, _ = select.select([self._socket], [], [], timeout)return len(read_ready) > ", "docstring": "Blocks until the socket is ready to be read from, or the timeout is hit\n\n:param timeout:\n A float - the period of time to wait for data to be read. None for\n no time limit.\n\n:return:\n A boolean - if data is ready to be read. Will only be False if\n timeout is not None.", "id": "f9532:c1:m6"} {"signature": "def read_until(self, marker):", "body": "if not isinstance(marker, byte_cls) and not isinstance(marker, Pattern):raise TypeError(pretty_message('''''',type_name(marker)))output = b''is_regex = isinstance(marker, Pattern)while True:if len(self._decrypted_bytes) > :chunk = self._decrypted_bytesself._decrypted_bytes = b''else:if self._ssl is None:self._raise_closed()to_read = libssl.SSL_pending(self._ssl) or chunk = self.read(to_read)offset = len(output)output += chunkif is_regex:match = marker.search(output)if match is not None:end = match.end()breakelse:start = max(, offset - len(marker) - )match = output.find(marker, start)if match != -:end = match + len(marker)breakself._decrypted_bytes = output[end:] + self._decrypted_bytesreturn output[:end]", "docstring": "Reads data from the socket until a marker is found. Data read includes\nthe marker.\n\n:param marker:\n A byte string or regex object from re.compile(). Used to determine\n when to stop reading. Regex objects are more inefficient since\n they must scan the entire byte string of read data each time data\n is read off the socket.\n\n:return:\n A byte string of the data read, including the marker", "id": "f9532:c1:m7"} {"signature": "def read_line(self):", "body": "return self.read_until(_line_regex)", "docstring": "r\"\"\"\n Reads a line from the socket, including the line ending of \"\\r\\n\", \"\\r\",\n or \"\\n\"\n\n :return:\n A byte string of the next line from the socket", "id": "f9532:c1:m8"} {"signature": "def read_exactly(self, num_bytes):", "body": "output = b''remaining = num_byteswhile remaining > :output += self.read(remaining)remaining = num_bytes - len(output)return output", "docstring": "Reads exactly the specified number of bytes from the socket\n\n:param num_bytes:\n An integer - the exact number of bytes to read\n\n:return:\n A byte string of the data that was read", "id": "f9532:c1:m9"} {"signature": "def write(self, data):", "body": "data_len = len(data)while data_len:if self._ssl is None:self._raise_closed()result = libssl.SSL_write(self._ssl, data, data_len)self._raw_write()if result <= :error = libssl.SSL_get_error(self._ssl, result)if error == LibsslConst.SSL_ERROR_WANT_READ:if self._raw_read() != b'':continueraise_disconnection()elif error == LibsslConst.SSL_ERROR_WANT_WRITE:self._raw_write()continueelif error == LibsslConst.SSL_ERROR_ZERO_RETURN:self._gracefully_closed = Trueself._shutdown(False)self._raise_closed()else:handle_openssl_error(, TLSError)data = data[result:]data_len = len(data)", "docstring": "Writes data to the TLS-wrapped socket\n\n:param data:\n A byte string to write to the socket\n\n:raises:\n socket.socket - when a non-TLS socket error occurs\n oscrypto.errors.TLSError - when a TLS-related error occurs\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9532:c1:m10"} {"signature": "def select_write(self, timeout=None):", "body": "_, write_ready, _ = select.select([], [self._socket], [], timeout)return len(write_ready) > ", "docstring": "Blocks until the socket is ready to be written to, or the timeout is hit\n\n:param timeout:\n A float - the period of time to wait for the socket to be ready to\n written to. None for no time limit.\n\n:return:\n A boolean - if the socket is ready for writing. Will only be False\n if timeout is not None.", "id": "f9532:c1:m11"} {"signature": "def _shutdown(self, manual):", "body": "if self._ssl is None:returnwhile True:result = libssl.SSL_shutdown(self._ssl)try:self._raw_write()except (TLSDisconnectError):passif result >= :breakif result < :error = libssl.SSL_get_error(self._ssl, result)if error == LibsslConst.SSL_ERROR_WANT_READ:if self._raw_read() != b'':continueelse:breakelif error == LibsslConst.SSL_ERROR_WANT_WRITE:self._raw_write()continueelse:handle_openssl_error(, TLSError)if manual:self._local_closed = Truelibssl.SSL_free(self._ssl)self._ssl = Noneself._rbio = Noneself._wbio = Nonetry:self._socket.shutdown(socket_.SHUT_RDWR)except (socket_.error):pass", "docstring": "Shuts down the TLS session and then shuts down the underlying socket\n\n:param manual:\n A boolean if the connection was manually shutdown", "id": "f9532:c1:m12"} {"signature": "def shutdown(self):", "body": "self._shutdown(True)", "docstring": "Shuts down the TLS session and then shuts down the underlying socket", "id": "f9532:c1:m13"} {"signature": "def close(self):", "body": "try:self.shutdown()finally:if self._socket:try:self._socket.close()except (socket_.error):passself._socket = None", "docstring": "Shuts down the TLS session and socket and forcibly closes it", "id": "f9532:c1:m14"} {"signature": "def _read_certificates(self):", "body": "stack_pointer = libssl.SSL_get_peer_cert_chain(self._ssl)if is_null(stack_pointer):handle_openssl_error(, TLSError)if libcrypto_version_info < (, ):number_certs = libssl.sk_num(stack_pointer)else:number_certs = libssl.OPENSSL_sk_num(stack_pointer)self._intermediates = []for index in range(, number_certs):if libcrypto_version_info < (, ):x509_ = libssl.sk_value(stack_pointer, index)else:x509_ = libssl.OPENSSL_sk_value(stack_pointer, index)buffer_size = libcrypto.i2d_X509(x509_, null())cert_buffer = buffer_from_bytes(buffer_size)cert_pointer = buffer_pointer(cert_buffer)cert_length = libcrypto.i2d_X509(x509_, cert_pointer)handle_openssl_error(cert_length)cert_data = bytes_from_buffer(cert_buffer, cert_length)cert = x509.Certificate.load(cert_data)if index == :self._certificate = certelse:self._intermediates.append(cert)", "docstring": "Reads end-entity and intermediate certificate information from the\nTLS session", "id": "f9532:c1:m15"} {"signature": "def _raise_closed(self):", "body": "if self._local_closed:raise TLSDisconnectError('')elif self._gracefully_closed:raise TLSGracefulDisconnectError('')else:raise TLSDisconnectError('')", "docstring": "Raises an exception describing if the local or remote end closed the\nconnection", "id": "f9532:c1:m16"} {"signature": "@propertydef certificate(self):", "body": "if self._ssl is None:self._raise_closed()if self._certificate is None:self._read_certificates()return self._certificate", "docstring": "An asn1crypto.x509.Certificate object of the end-entity certificate\npresented by the server", "id": "f9532:c1:m17"} {"signature": "@propertydef intermediates(self):", "body": "if self._ssl is None:self._raise_closed()if self._certificate is None:self._read_certificates()return self._intermediates", "docstring": "A list of asn1crypto.x509.Certificate objects that were presented as\nintermediates by the server", "id": "f9532:c1:m18"} {"signature": "@propertydef cipher_suite(self):", "body": "return self._cipher_suite", "docstring": "A unicode string of the IANA cipher suite name of the negotiated\ncipher suite", "id": "f9532:c1:m19"} {"signature": "@propertydef protocol(self):", "body": "return self._protocol", "docstring": "A unicode string of: \"TLSv1.2\", \"TLSv1.1\", \"TLSv1\", \"SSLv3\"", "id": "f9532:c1:m20"} {"signature": "@propertydef compression(self):", "body": "return self._compression", "docstring": "A boolean if compression is enabled", "id": "f9532:c1:m21"} {"signature": "@propertydef session_id(self):", "body": "return self._session_id", "docstring": "A unicode string of \"new\" or \"reused\" or None for no ticket", "id": "f9532:c1:m22"} {"signature": "@propertydef session_ticket(self):", "body": "return self._session_ticket", "docstring": "A unicode string of \"new\" or \"reused\" or None for no ticket", "id": "f9532:c1:m23"} {"signature": "@propertydef session(self):", "body": "return self._session", "docstring": "The oscrypto.tls.TLSSession object used for this connection", "id": "f9532:c1:m24"} {"signature": "@propertydef hostname(self):", "body": "return self._hostname", "docstring": "A unicode string of the TLS server domain name or IP address", "id": "f9532:c1:m25"} {"signature": "@propertydef port(self):", "body": "return self.socket.getpeername()[]", "docstring": "An integer of the port number the socket is connected to", "id": "f9532:c1:m26"} {"signature": "@propertydef socket(self):", "body": "if self._ssl is None:self._raise_closed()return self._socket", "docstring": "The underlying socket.socket connection", "id": "f9532:c1:m27"} {"signature": "def generate_pair(algorithm, bit_size=None, curve=None):", "body": "if algorithm not in set(['', '', '']):raise ValueError(pretty_message('''''',repr(algorithm)))if algorithm == '':if bit_size not in set([, , , ]):raise ValueError(pretty_message('''''',repr(bit_size)))elif algorithm == '':if libcrypto_version_info < (,):if bit_size != :raise ValueError(pretty_message('''''',repr(bit_size)))else:if bit_size not in set([, , ]):raise ValueError(pretty_message('''''',repr(bit_size)))elif algorithm == '':if curve not in set(['', '', '']):raise ValueError(pretty_message('''''',repr(curve)))if algorithm == '':rsa = Noneexponent = Nonetry:rsa = libcrypto.RSA_new()if is_null(rsa):handle_openssl_error()exponent_pointer = new(libcrypto, '')result = libcrypto.BN_dec2bn(exponent_pointer, b'')handle_openssl_error(result)exponent = unwrap(exponent_pointer)result = libcrypto.RSA_generate_key_ex(rsa, bit_size, exponent, null())handle_openssl_error(result)buffer_length = libcrypto.i2d_RSAPublicKey(rsa, null())if buffer_length < :handle_openssl_error(buffer_length)buffer = buffer_from_bytes(buffer_length)result = libcrypto.i2d_RSAPublicKey(rsa, buffer_pointer(buffer))if result < :handle_openssl_error(result)public_key_bytes = bytes_from_buffer(buffer, buffer_length)buffer_length = libcrypto.i2d_RSAPrivateKey(rsa, null())if buffer_length < :handle_openssl_error(buffer_length)buffer = buffer_from_bytes(buffer_length)result = libcrypto.i2d_RSAPrivateKey(rsa, buffer_pointer(buffer))if result < :handle_openssl_error(result)private_key_bytes = bytes_from_buffer(buffer, buffer_length)finally:if rsa:libcrypto.RSA_free(rsa)if exponent:libcrypto.BN_free(exponent)elif algorithm == '':dsa = Nonetry:dsa = libcrypto.DSA_new()if is_null(dsa):handle_openssl_error()result = libcrypto.DSA_generate_parameters_ex(dsa, bit_size, null(), , null(), null(), null())handle_openssl_error(result)result = libcrypto.DSA_generate_key(dsa)handle_openssl_error(result)buffer_length = libcrypto.i2d_DSA_PUBKEY(dsa, null())if buffer_length < :handle_openssl_error(buffer_length)buffer = buffer_from_bytes(buffer_length)result = libcrypto.i2d_DSA_PUBKEY(dsa, buffer_pointer(buffer))if result < :handle_openssl_error(result)public_key_bytes = bytes_from_buffer(buffer, buffer_length)buffer_length = libcrypto.i2d_DSAPrivateKey(dsa, null())if buffer_length < :handle_openssl_error(buffer_length)buffer = buffer_from_bytes(buffer_length)result = libcrypto.i2d_DSAPrivateKey(dsa, buffer_pointer(buffer))if result < :handle_openssl_error(result)private_key_bytes = bytes_from_buffer(buffer, buffer_length)finally:if dsa:libcrypto.DSA_free(dsa)elif algorithm == '':ec_key = Nonetry:curve_id = {'': LibcryptoConst.NID_X9_62_prime256v1,'': LibcryptoConst.NID_secp384r1,'': LibcryptoConst.NID_secp521r1,}[curve]ec_key = libcrypto.EC_KEY_new_by_curve_name(curve_id)if is_null(ec_key):handle_openssl_error()result = libcrypto.EC_KEY_generate_key(ec_key)handle_openssl_error(result)libcrypto.EC_KEY_set_asn1_flag(ec_key, LibcryptoConst.OPENSSL_EC_NAMED_CURVE)buffer_length = libcrypto.i2o_ECPublicKey(ec_key, null())if buffer_length < :handle_openssl_error(buffer_length)buffer = buffer_from_bytes(buffer_length)result = libcrypto.i2o_ECPublicKey(ec_key, buffer_pointer(buffer))if result < :handle_openssl_error(result)public_key_point_bytes = bytes_from_buffer(buffer, buffer_length)public_key = keys.PublicKeyInfo({'': keys.PublicKeyAlgorithm({'': '','': keys.ECDomainParameters(name='',value=curve)}),'': public_key_point_bytes})public_key_bytes = public_key.dump()buffer_length = libcrypto.i2d_ECPrivateKey(ec_key, null())if buffer_length < :handle_openssl_error(buffer_length)buffer = buffer_from_bytes(buffer_length)result = libcrypto.i2d_ECPrivateKey(ec_key, buffer_pointer(buffer))if result < :handle_openssl_error(result)private_key_bytes = bytes_from_buffer(buffer, buffer_length)finally:if ec_key:libcrypto.EC_KEY_free(ec_key)return (load_public_key(public_key_bytes), load_private_key(private_key_bytes))", "docstring": "Generates a public/private key pair\n\n:param algorithm:\n The key algorithm - \"rsa\", \"dsa\" or \"ec\"\n\n:param bit_size:\n An integer - used for \"rsa\" and \"dsa\". For \"rsa\" the value maye be 1024,\n 2048, 3072 or 4096. For \"dsa\" the value may be 1024, plus 2048 or 3072\n if OpenSSL 1.0.0 or newer is available.\n\n:param curve:\n A unicode string - used for \"ec\" keys. Valid values include \"secp256r1\",\n \"secp384r1\" and \"secp521r1\".\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A 2-element tuple of (PublicKey, PrivateKey). The contents of each key\n may be saved by calling .asn1.dump().", "id": "f9533:m0"} {"signature": "def generate_dh_parameters(bit_size):", "body": "if not isinstance(bit_size, int_types):raise TypeError(pretty_message('''''',type_name(bit_size)))if bit_size < :raise ValueError('')if bit_size > :raise ValueError('')if bit_size % != :raise ValueError('')dh = Nonetry:dh = libcrypto.DH_new()if is_null(dh):handle_openssl_error()result = libcrypto.DH_generate_parameters_ex(dh, bit_size, LibcryptoConst.DH_GENERATOR_2, null())handle_openssl_error(result)buffer_length = libcrypto.i2d_DHparams(dh, null())if buffer_length < :handle_openssl_error(buffer_length)buffer = buffer_from_bytes(buffer_length)result = libcrypto.i2d_DHparams(dh, buffer_pointer(buffer))if result < :handle_openssl_error(result)dh_params_bytes = bytes_from_buffer(buffer, buffer_length)return algos.DHParameters.load(dh_params_bytes)finally:if dh:libcrypto.DH_free(dh)", "docstring": "Generates DH parameters for use with Diffie-Hellman key exchange. Returns\na structure in the format of DHParameter defined in PKCS#3, which is also\nused by the OpenSSL dhparam tool.\n\nTHIS CAN BE VERY TIME CONSUMING!\n\n:param bit_size:\n The integer bit size of the parameters to generate. Must be between 512\n and 4096, and divisible by 64. Recommended secure value as of early 2016\n is 2048, with an absolute minimum of 1024.\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n An asn1crypto.algos.DHParameters object. Use\n oscrypto.asymmetric.dump_dh_parameters() to save to disk for usage with\n web servers.", "id": "f9533:m1"} {"signature": "def load_certificate(source):", "body": "if isinstance(source, asn1x509.Certificate):certificate = sourceelif isinstance(source, byte_cls):certificate = parse_certificate(source)elif isinstance(source, str_cls):with open(source, '') as f:certificate = parse_certificate(f.read())else:raise TypeError(pretty_message('''''',type_name(source)))return _load_x509(certificate)", "docstring": "Loads an x509 certificate into a Certificate object\n\n:param source:\n A byte string of file contents, a unicode string filename or an\n asn1crypto.x509.Certificate object\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A Certificate object", "id": "f9533:m2"} {"signature": "def _load_x509(certificate):", "body": "source = certificate.dump()buffer = buffer_from_bytes(source)evp_pkey = libcrypto.d2i_X509(null(), buffer_pointer(buffer), len(source))if is_null(evp_pkey):handle_openssl_error()return Certificate(evp_pkey, certificate)", "docstring": "Loads an ASN.1 object of an x509 certificate into a Certificate object\n\n:param certificate:\n An asn1crypto.x509.Certificate object\n\n:return:\n A Certificate object", "id": "f9533:m3"} {"signature": "def load_private_key(source, password=None):", "body": "if isinstance(source, keys.PrivateKeyInfo):private_object = sourceelse:if password is not None:if isinstance(password, str_cls):password = password.encode('')if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))if isinstance(source, str_cls):with open(source, '') as f:source = f.read()elif not isinstance(source, byte_cls):raise TypeError(pretty_message('''''',type_name(source)))private_object = parse_private(source, password)return _load_key(private_object)", "docstring": "Loads a private key into a PrivateKey object\n\n:param source:\n A byte string of file contents, a unicode string filename or an\n asn1crypto.keys.PrivateKeyInfo object\n\n:param password:\n A byte or unicode string to decrypt the private key file. Unicode\n strings will be encoded using UTF-8. Not used is the source is a\n PrivateKeyInfo object.\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when the private key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A PrivateKey object", "id": "f9533:m4"} {"signature": "def load_public_key(source):", "body": "if isinstance(source, keys.PublicKeyInfo):public_key = sourceelif isinstance(source, byte_cls):public_key = parse_public(source)elif isinstance(source, str_cls):with open(source, '') as f:public_key = parse_public(f.read())else:raise TypeError(pretty_message('''''',type_name(public_key)))if public_key.algorithm == '':if libcrypto_version_info < (,) and public_key.hash_algo == '':raise AsymmetricKeyError(pretty_message('''''',public_key.bit_size))elif public_key.hash_algo is None:raise IncompleteAsymmetricKeyError(pretty_message(''''''))data = public_key.dump()buffer = buffer_from_bytes(data)evp_pkey = libcrypto.d2i_PUBKEY(null(), buffer_pointer(buffer), len(data))if is_null(evp_pkey):handle_openssl_error()return PublicKey(evp_pkey, public_key)", "docstring": "Loads a public key into a PublicKey object\n\n:param source:\n A byte string of file contents, a unicode string filename or an\n asn1crypto.keys.PublicKeyInfo object\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when the public key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A PublicKey object", "id": "f9533:m5"} {"signature": "def _load_key(private_object):", "body": "if libcrypto_version_info < (,) and private_object.algorithm == '' and private_object.hash_algo == '':raise AsymmetricKeyError(pretty_message('''''',private_object.bit_size))source = private_object.unwrap().dump()buffer = buffer_from_bytes(source)evp_pkey = libcrypto.d2i_AutoPrivateKey(null(), buffer_pointer(buffer), len(source))if is_null(evp_pkey):handle_openssl_error()return PrivateKey(evp_pkey, private_object)", "docstring": "Loads a private key into a PrivateKey object\n\n:param private_object:\n An asn1crypto.keys.PrivateKeyInfo object\n\n:return:\n A PrivateKey object", "id": "f9533:m6"} {"signature": "def load_pkcs12(source, password=None):", "body": "if password is not None:if isinstance(password, str_cls):password = password.encode('')if not isinstance(password, byte_cls):raise TypeError(pretty_message('''''',type_name(password)))if isinstance(source, str_cls):with open(source, '') as f:source = f.read()elif not isinstance(source, byte_cls):raise TypeError(pretty_message('''''',type_name(source)))key_info, cert_info, extra_certs_info = parse_pkcs12(source, password)key = Nonecert = Noneif key_info:key = _load_key(key_info)if cert_info:cert = _load_x509(cert_info)extra_certs = [_load_x509(info) for info in extra_certs_info]return (key, cert, extra_certs)", "docstring": "Loads a .p12 or .pfx file into a PrivateKey object and one or more\nCertificates objects\n\n:param source:\n A byte string of file contents or a unicode string filename\n\n:param password:\n A byte or unicode string to decrypt the PKCS12 file. Unicode strings\n will be encoded using UTF-8.\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n oscrypto.errors.AsymmetricKeyError - when a contained key is incompatible with the OS crypto library\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A three-element tuple containing (PrivateKey, Certificate, [Certificate, ...])", "id": "f9533:m7"} {"signature": "def rsa_pkcs1v15_encrypt(certificate_or_public_key, data):", "body": "return _encrypt(certificate_or_public_key, data, LibcryptoConst.RSA_PKCS1_PADDING)", "docstring": "Encrypts a byte string using an RSA public key or certificate. Uses PKCS#1\nv1.5 padding.\n\n:param certificate_or_public_key:\n A PublicKey or Certificate object\n\n:param data:\n A byte string, with a maximum length 11 bytes less than the key length\n (in bytes)\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the encrypted data", "id": "f9533:m8"} {"signature": "def rsa_pkcs1v15_decrypt(private_key, ciphertext):", "body": "return _decrypt(private_key, ciphertext, LibcryptoConst.RSA_PKCS1_PADDING)", "docstring": "Decrypts a byte string using an RSA private key. Uses PKCS#1 v1.5 padding.\n\n:param private_key:\n A PrivateKey object\n\n:param ciphertext:\n A byte string of the encrypted data\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the original plaintext", "id": "f9533:m9"} {"signature": "def rsa_oaep_encrypt(certificate_or_public_key, data):", "body": "return _encrypt(certificate_or_public_key, data, LibcryptoConst.RSA_PKCS1_OAEP_PADDING)", "docstring": "Encrypts a byte string using an RSA public key or certificate. Uses PKCS#1\nOAEP padding with SHA1.\n\n:param certificate_or_public_key:\n A PublicKey or Certificate object\n\n:param data:\n A byte string, with a maximum length 41 bytes (or more) less than the\n key length (in bytes)\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the encrypted data", "id": "f9533:m10"} {"signature": "def rsa_oaep_decrypt(private_key, ciphertext):", "body": "return _decrypt(private_key, ciphertext, LibcryptoConst.RSA_PKCS1_OAEP_PADDING)", "docstring": "Decrypts a byte string using an RSA private key. Uses PKCS#1 OAEP padding\nwith SHA1.\n\n:param private_key:\n A PrivateKey object\n\n:param ciphertext:\n A byte string of the encrypted data\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the original plaintext", "id": "f9533:m11"} {"signature": "def _encrypt(certificate_or_public_key, data, padding):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):raise TypeError(pretty_message('''''',type_name(certificate_or_public_key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))rsa = Nonetry:buffer_size = libcrypto.EVP_PKEY_size(certificate_or_public_key.evp_pkey)buffer = buffer_from_bytes(buffer_size)rsa = libcrypto.EVP_PKEY_get1_RSA(certificate_or_public_key.evp_pkey)res = libcrypto.RSA_public_encrypt(len(data), data, buffer, rsa, padding)handle_openssl_error(res)return bytes_from_buffer(buffer, res)finally:if rsa:libcrypto.RSA_free(rsa)", "docstring": "Encrypts plaintext using an RSA public key or certificate\n\n:param certificate_or_public_key:\n A PublicKey, Certificate or PrivateKey object\n\n:param data:\n The byte string to encrypt\n\n:param padding:\n The padding mode to use\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the encrypted data", "id": "f9533:m12"} {"signature": "def _decrypt(private_key, ciphertext, padding):", "body": "if not isinstance(private_key, PrivateKey):raise TypeError(pretty_message('''''',type_name(private_key)))if not isinstance(ciphertext, byte_cls):raise TypeError(pretty_message('''''',type_name(ciphertext)))rsa = Nonetry:buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)buffer = buffer_from_bytes(buffer_size)rsa = libcrypto.EVP_PKEY_get1_RSA(private_key.evp_pkey)res = libcrypto.RSA_private_decrypt(len(ciphertext), ciphertext, buffer, rsa, padding)handle_openssl_error(res)return bytes_from_buffer(buffer, res)finally:if rsa:libcrypto.RSA_free(rsa)", "docstring": "Decrypts RSA ciphertext using a private key\n\n:param private_key:\n A PrivateKey object\n\n:param ciphertext:\n The ciphertext - a byte string\n\n:param padding:\n The padding mode to use\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the plaintext", "id": "f9533:m13"} {"signature": "def rsa_pkcs1v15_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError(pretty_message('''''',certificate_or_public_key.algorithm.upper()))return _verify(certificate_or_public_key, signature, data, hash_algorithm)", "docstring": "Verifies an RSASSA-PKCS-v1.5 signature.\n\nWhen the hash_algorithm is \"raw\", the operation is identical to RSA\npublic key decryption. That is: the data is not hashed and no ASN.1\nstructure with an algorithm identifier of the hash algorithm is placed in\nthe encrypted byte string.\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\",\n \"sha512\" or \"raw\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9533:m14"} {"signature": "def rsa_pss_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError(pretty_message('''''',certificate_or_public_key.algorithm.upper()))return _verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=True)", "docstring": "Verifies an RSASSA-PSS signature. For the PSS padding the mask gen algorithm\nwill be mgf1 using the same hash algorithm as the signature. The salt length\nwith be the length of the hash algorithm, and the trailer field with be the\nstandard 0xBC byte.\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9533:m15"} {"signature": "def dsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError(pretty_message('''''',certificate_or_public_key.algorithm.upper()))return _verify(certificate_or_public_key, signature, data, hash_algorithm)", "docstring": "Verifies a DSA signature\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9533:m16"} {"signature": "def ecdsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '':raise ValueError(pretty_message('''''',certificate_or_public_key.algorithm.upper()))return _verify(certificate_or_public_key, signature, data, hash_algorithm)", "docstring": "Verifies an ECDSA signature\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9533:m17"} {"signature": "def _verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):raise TypeError(pretty_message('''''',type_name(certificate_or_public_key)))if not isinstance(signature, byte_cls):raise TypeError(pretty_message('''''',type_name(signature)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))valid_hash_algorithms = set(['', '', '', '', '', ''])if certificate_or_public_key.algorithm == '' and not rsa_pss_padding:valid_hash_algorithms |= set([''])if hash_algorithm not in valid_hash_algorithms:valid_hash_algorithms_error = ''if certificate_or_public_key.algorithm == '' and not rsa_pss_padding:valid_hash_algorithms_error += ''raise ValueError(pretty_message('''''',valid_hash_algorithms_error,repr(hash_algorithm)))if certificate_or_public_key.algorithm != '' and rsa_pss_padding:raise ValueError(pretty_message('''''',certificate_or_public_key.algorithm.upper()))if certificate_or_public_key.algorithm == '' and hash_algorithm == '':if len(data) > certificate_or_public_key.byte_size - :raise ValueError(pretty_message('''''',certificate_or_public_key.byte_size,len(data)))rsa = Nonetry:rsa = libcrypto.EVP_PKEY_get1_RSA(certificate_or_public_key.evp_pkey)if is_null(rsa):handle_openssl_error()buffer_size = libcrypto.EVP_PKEY_size(certificate_or_public_key.evp_pkey)decrypted_buffer = buffer_from_bytes(buffer_size)decrypted_length = libcrypto.RSA_public_decrypt(len(signature),signature,decrypted_buffer,rsa,LibcryptoConst.RSA_PKCS1_PADDING)handle_openssl_error(decrypted_length)decrypted_bytes = bytes_from_buffer(decrypted_buffer, decrypted_length)if not constant_compare(data, decrypted_bytes):raise SignatureError('')returnfinally:if rsa:libcrypto.RSA_free(rsa)evp_md_ctx = Nonersa = Nonedsa = Nonedsa_sig = Noneec_key = Noneecdsa_sig = Nonetry:if libcrypto_version_info < (, ):evp_md_ctx = libcrypto.EVP_MD_CTX_create()else:evp_md_ctx = libcrypto.EVP_MD_CTX_new()evp_md = {'': libcrypto.EVP_md5,'': libcrypto.EVP_sha1,'': libcrypto.EVP_sha224,'': libcrypto.EVP_sha256,'': libcrypto.EVP_sha384,'': libcrypto.EVP_sha512}[hash_algorithm]()if libcrypto_version_info < (,):if certificate_or_public_key.algorithm == '' and rsa_pss_padding:digest = getattr(hashlib, hash_algorithm)(data).digest()rsa = libcrypto.EVP_PKEY_get1_RSA(certificate_or_public_key.evp_pkey)if is_null(rsa):handle_openssl_error()buffer_size = libcrypto.EVP_PKEY_size(certificate_or_public_key.evp_pkey)decoded_buffer = buffer_from_bytes(buffer_size)decoded_length = libcrypto.RSA_public_decrypt(len(signature),signature,decoded_buffer,rsa,LibcryptoConst.RSA_NO_PADDING)handle_openssl_error(decoded_length)res = libcrypto.RSA_verify_PKCS1_PSS(rsa,digest,evp_md,decoded_buffer,LibcryptoConst.EVP_MD_CTX_FLAG_PSS_MDLEN)elif certificate_or_public_key.algorithm == '':res = libcrypto.EVP_DigestInit_ex(evp_md_ctx, evp_md, null())handle_openssl_error(res)res = libcrypto.EVP_DigestUpdate(evp_md_ctx, data, len(data))handle_openssl_error(res)res = libcrypto.EVP_VerifyFinal(evp_md_ctx,signature,len(signature),certificate_or_public_key.evp_pkey)elif certificate_or_public_key.algorithm == '':digest = getattr(hashlib, hash_algorithm)(data).digest()signature_buffer = buffer_from_bytes(signature)signature_pointer = buffer_pointer(signature_buffer)dsa_sig = libcrypto.d2i_DSA_SIG(null(), signature_pointer, len(signature))if is_null(dsa_sig):raise SignatureError('')dsa = libcrypto.EVP_PKEY_get1_DSA(certificate_or_public_key.evp_pkey)if is_null(dsa):handle_openssl_error()res = libcrypto.DSA_do_verify(digest, len(digest), dsa_sig, dsa)elif certificate_or_public_key.algorithm == '':digest = getattr(hashlib, hash_algorithm)(data).digest()signature_buffer = buffer_from_bytes(signature)signature_pointer = buffer_pointer(signature_buffer)ecdsa_sig = libcrypto.d2i_ECDSA_SIG(null(), signature_pointer, len(signature))if is_null(ecdsa_sig):raise SignatureError('')ec_key = libcrypto.EVP_PKEY_get1_EC_KEY(certificate_or_public_key.evp_pkey)if is_null(ec_key):handle_openssl_error()res = libcrypto.ECDSA_do_verify(digest, len(digest), ecdsa_sig, ec_key)else:evp_pkey_ctx_pointer_pointer = new(libcrypto, '')res = libcrypto.EVP_DigestVerifyInit(evp_md_ctx,evp_pkey_ctx_pointer_pointer,evp_md,null(),certificate_or_public_key.evp_pkey)handle_openssl_error(res)evp_pkey_ctx_pointer = unwrap(evp_pkey_ctx_pointer_pointer)if rsa_pss_padding:res = libcrypto.EVP_PKEY_CTX_ctrl(evp_pkey_ctx_pointer,LibcryptoConst.EVP_PKEY_RSA,-, LibcryptoConst.EVP_PKEY_CTRL_RSA_PADDING,LibcryptoConst.RSA_PKCS1_PSS_PADDING,null())handle_openssl_error(res)res = libcrypto.EVP_PKEY_CTX_ctrl(evp_pkey_ctx_pointer,LibcryptoConst.EVP_PKEY_RSA,LibcryptoConst.EVP_PKEY_OP_SIGN | LibcryptoConst.EVP_PKEY_OP_VERIFY,LibcryptoConst.EVP_PKEY_CTRL_RSA_PSS_SALTLEN,-,null())handle_openssl_error(res)res = libcrypto.EVP_DigestUpdate(evp_md_ctx, data, len(data))handle_openssl_error(res)res = libcrypto.EVP_DigestVerifyFinal(evp_md_ctx, signature, len(signature))if res < :raise SignatureError('')handle_openssl_error(res)finally:if evp_md_ctx:if libcrypto_version_info < (, ):libcrypto.EVP_MD_CTX_destroy(evp_md_ctx)else:libcrypto.EVP_MD_CTX_free(evp_md_ctx)if rsa:libcrypto.RSA_free(rsa)if dsa:libcrypto.DSA_free(dsa)if dsa_sig:libcrypto.DSA_SIG_free(dsa_sig)if ec_key:libcrypto.EC_KEY_free(ec_key)if ecdsa_sig:libcrypto.ECDSA_SIG_free(ecdsa_sig)", "docstring": "Verifies an RSA, DSA or ECDSA signature\n\n:param certificate_or_public_key:\n A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n A byte string of the signature to verify\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:param rsa_pss_padding:\n If the certificate_or_public_key is an RSA key, this enables PSS padding\n\n:raises:\n oscrypto.errors.SignatureError - when the signature is determined to be invalid\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library", "id": "f9533:m18"} {"signature": "def rsa_pkcs1v15_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError(pretty_message('''''',private_key.algorithm.upper()))return _sign(private_key, data, hash_algorithm)", "docstring": "Generates an RSASSA-PKCS-v1.5 signature.\n\nWhen the hash_algorithm is \"raw\", the operation is identical to RSA\nprivate key encryption. That is: the data is not hashed and no ASN.1\nstructure with an algorithm identifier of the hash algorithm is placed in\nthe encrypted byte string.\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\",\n \"sha512\" or \"raw\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9533:m19"} {"signature": "def rsa_pss_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError(pretty_message('''''',private_key.algorithm.upper()))return _sign(private_key, data, hash_algorithm, rsa_pss_padding=True)", "docstring": "Generates an RSASSA-PSS signature. For the PSS padding the mask gen\nalgorithm will be mgf1 using the same hash algorithm as the signature. The\nsalt length with be the length of the hash algorithm, and the trailer field\nwith be the standard 0xBC byte.\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9533:m20"} {"signature": "def dsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError(pretty_message('''''',private_key.algorithm.upper()))return _sign(private_key, data, hash_algorithm)", "docstring": "Generates a DSA signature\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9533:m21"} {"signature": "def ecdsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '':raise ValueError(pretty_message('''''',private_key.algorithm.upper()))return _sign(private_key, data, hash_algorithm)", "docstring": "Generates an ECDSA signature\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9533:m22"} {"signature": "def _sign(private_key, data, hash_algorithm, rsa_pss_padding=False):", "body": "if not isinstance(private_key, PrivateKey):raise TypeError(pretty_message('''''',type_name(private_key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))valid_hash_algorithms = set(['', '', '', '', '', ''])if private_key.algorithm == '' and not rsa_pss_padding:valid_hash_algorithms |= set([''])if hash_algorithm not in valid_hash_algorithms:valid_hash_algorithms_error = ''if private_key.algorithm == '' and not rsa_pss_padding:valid_hash_algorithms_error += ''raise ValueError(pretty_message('''''',valid_hash_algorithms_error,repr(hash_algorithm)))if private_key.algorithm != '' and rsa_pss_padding:raise ValueError(pretty_message('''''',private_key.algorithm.upper()))if private_key.algorithm == '' and hash_algorithm == '':if len(data) > private_key.byte_size - :raise ValueError(pretty_message('''''',private_key.byte_size,len(data)))rsa = Nonetry:rsa = libcrypto.EVP_PKEY_get1_RSA(private_key.evp_pkey)if is_null(rsa):handle_openssl_error()buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)signature_buffer = buffer_from_bytes(buffer_size)signature_length = libcrypto.RSA_private_encrypt(len(data),data,signature_buffer,rsa,LibcryptoConst.RSA_PKCS1_PADDING)handle_openssl_error(signature_length)return bytes_from_buffer(signature_buffer, signature_length)finally:if rsa:libcrypto.RSA_free(rsa)evp_md_ctx = Nonersa = Nonedsa = Nonedsa_sig = Noneec_key = Noneecdsa_sig = Nonetry:if libcrypto_version_info < (, ):evp_md_ctx = libcrypto.EVP_MD_CTX_create()else:evp_md_ctx = libcrypto.EVP_MD_CTX_new()evp_md = {'': libcrypto.EVP_md5,'': libcrypto.EVP_sha1,'': libcrypto.EVP_sha224,'': libcrypto.EVP_sha256,'': libcrypto.EVP_sha384,'': libcrypto.EVP_sha512}[hash_algorithm]()if libcrypto_version_info < (,):if private_key.algorithm == '' and rsa_pss_padding:digest = getattr(hashlib, hash_algorithm)(data).digest()rsa = libcrypto.EVP_PKEY_get1_RSA(private_key.evp_pkey)if is_null(rsa):handle_openssl_error()buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)em_buffer = buffer_from_bytes(buffer_size)res = libcrypto.RSA_padding_add_PKCS1_PSS(rsa,em_buffer,digest,evp_md,LibcryptoConst.EVP_MD_CTX_FLAG_PSS_MDLEN)handle_openssl_error(res)signature_buffer = buffer_from_bytes(buffer_size)signature_length = libcrypto.RSA_private_encrypt(buffer_size,em_buffer,signature_buffer,rsa,LibcryptoConst.RSA_NO_PADDING)handle_openssl_error(signature_length)elif private_key.algorithm == '':buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)signature_buffer = buffer_from_bytes(buffer_size)signature_length = new(libcrypto, '')res = libcrypto.EVP_DigestInit_ex(evp_md_ctx, evp_md, null())handle_openssl_error(res)res = libcrypto.EVP_DigestUpdate(evp_md_ctx, data, len(data))handle_openssl_error(res)res = libcrypto.EVP_SignFinal(evp_md_ctx,signature_buffer,signature_length,private_key.evp_pkey)handle_openssl_error(res)signature_length = deref(signature_length)elif private_key.algorithm == '':digest = getattr(hashlib, hash_algorithm)(data).digest()dsa = libcrypto.EVP_PKEY_get1_DSA(private_key.evp_pkey)if is_null(dsa):handle_openssl_error()dsa_sig = libcrypto.DSA_do_sign(digest, len(digest), dsa)if is_null(dsa_sig):handle_openssl_error()buffer_size = libcrypto.i2d_DSA_SIG(dsa_sig, null())signature_buffer = buffer_from_bytes(buffer_size)signature_pointer = buffer_pointer(signature_buffer)signature_length = libcrypto.i2d_DSA_SIG(dsa_sig, signature_pointer)handle_openssl_error(signature_length)elif private_key.algorithm == '':digest = getattr(hashlib, hash_algorithm)(data).digest()ec_key = libcrypto.EVP_PKEY_get1_EC_KEY(private_key.evp_pkey)if is_null(ec_key):handle_openssl_error()ecdsa_sig = libcrypto.ECDSA_do_sign(digest, len(digest), ec_key)if is_null(ecdsa_sig):handle_openssl_error()buffer_size = libcrypto.i2d_ECDSA_SIG(ecdsa_sig, null())signature_buffer = buffer_from_bytes(buffer_size)signature_pointer = buffer_pointer(signature_buffer)signature_length = libcrypto.i2d_ECDSA_SIG(ecdsa_sig, signature_pointer)handle_openssl_error(signature_length)else:buffer_size = libcrypto.EVP_PKEY_size(private_key.evp_pkey)signature_buffer = buffer_from_bytes(buffer_size)signature_length = new(libcrypto, '', buffer_size)evp_pkey_ctx_pointer_pointer = new(libcrypto, '')res = libcrypto.EVP_DigestSignInit(evp_md_ctx,evp_pkey_ctx_pointer_pointer,evp_md,null(),private_key.evp_pkey)handle_openssl_error(res)evp_pkey_ctx_pointer = unwrap(evp_pkey_ctx_pointer_pointer)if rsa_pss_padding:res = libcrypto.EVP_PKEY_CTX_ctrl(evp_pkey_ctx_pointer,LibcryptoConst.EVP_PKEY_RSA,-, LibcryptoConst.EVP_PKEY_CTRL_RSA_PADDING,LibcryptoConst.RSA_PKCS1_PSS_PADDING,null())handle_openssl_error(res)res = libcrypto.EVP_PKEY_CTX_ctrl(evp_pkey_ctx_pointer,LibcryptoConst.EVP_PKEY_RSA,LibcryptoConst.EVP_PKEY_OP_SIGN | LibcryptoConst.EVP_PKEY_OP_VERIFY,LibcryptoConst.EVP_PKEY_CTRL_RSA_PSS_SALTLEN,-,null())handle_openssl_error(res)res = libcrypto.EVP_DigestUpdate(evp_md_ctx, data, len(data))handle_openssl_error(res)res = libcrypto.EVP_DigestSignFinal(evp_md_ctx, signature_buffer, signature_length)handle_openssl_error(res)signature_length = deref(signature_length)return bytes_from_buffer(signature_buffer, signature_length)finally:if evp_md_ctx:if libcrypto_version_info < (, ):libcrypto.EVP_MD_CTX_destroy(evp_md_ctx)else:libcrypto.EVP_MD_CTX_free(evp_md_ctx)if rsa:libcrypto.RSA_free(rsa)if dsa:libcrypto.DSA_free(dsa)if dsa_sig:libcrypto.DSA_SIG_free(dsa_sig)if ec_key:libcrypto.EC_KEY_free(ec_key)if ecdsa_sig:libcrypto.ECDSA_SIG_free(ecdsa_sig)", "docstring": "Generates an RSA, DSA or ECDSA signature\n\n:param private_key:\n The PrivateKey to generate the signature with\n\n:param data:\n A byte string of the data the signature is for\n\n:param hash_algorithm:\n A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:param rsa_pss_padding:\n If the private_key is an RSA key, this enables PSS padding\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by the OS crypto library\n\n:return:\n A byte string of the signature", "id": "f9533:m23"} {"signature": "def __init__(self, evp_pkey, asn1):", "body": "self.evp_pkey = evp_pkeyself.asn1 = asn1self._lib = libcrypto", "docstring": ":param evp_pkey:\n An OpenSSL EVP_PKEY value from loading/importing the key\n\n:param asn1:\n An asn1crypto.keys.PrivateKeyInfo object", "id": "f9533:c0:m0"} {"signature": "@propertydef algorithm(self):", "body": "return self.asn1.algorithm", "docstring": ":return:\n A unicode string of \"rsa\", \"dsa\" or \"ec\"", "id": "f9533:c0:m1"} {"signature": "@propertydef curve(self):", "body": "return self.asn1.curve[]", "docstring": ":return:\n A unicode string of EC curve name", "id": "f9533:c0:m2"} {"signature": "@propertydef bit_size(self):", "body": "return self.asn1.bit_size", "docstring": ":return:\n The number of bits in the key, as an integer", "id": "f9533:c0:m3"} {"signature": "@propertydef byte_size(self):", "body": "return self.asn1.byte_size", "docstring": ":return:\n The number of bytes in the key, as an integer", "id": "f9533:c0:m4"} {"signature": "def __init__(self, evp_pkey, asn1):", "body": "PrivateKey.__init__(self, evp_pkey, asn1)", "docstring": ":param evp_pkey:\n An OpenSSL EVP_PKEY value from loading/importing the key\n\n:param asn1:\n An asn1crypto.keys.PublicKeyInfo object", "id": "f9533:c1:m0"} {"signature": "def __init__(self, x509, asn1):", "body": "self.x509 = x509self.asn1 = asn1self._lib = libcrypto", "docstring": ":param x509:\n An OpenSSL X509 value from loading/importing the certificate\n\n:param asn1:\n An asn1crypto.x509.Certificate object", "id": "f9533:c2:m0"} {"signature": "@propertydef algorithm(self):", "body": "return self.public_key.algorithm", "docstring": ":return:\n A unicode string of \"rsa\", \"dsa\" or \"ec\"", "id": "f9533:c2:m1"} {"signature": "@propertydef curve(self):", "body": "return self.public_key.curve", "docstring": ":return:\n A unicode string of EC curve name", "id": "f9533:c2:m2"} {"signature": "@propertydef bit_size(self):", "body": "return self.public_key.bit_size", "docstring": ":return:\n The number of bits in the public key, as an integer", "id": "f9533:c2:m3"} {"signature": "@propertydef byte_size(self):", "body": "return self.public_key.byte_size", "docstring": ":return:\n The number of bytes in the public key, as an integer", "id": "f9533:c2:m4"} {"signature": "@propertydef evp_pkey(self):", "body": "return self.public_key.evp_pkey", "docstring": ":return:\n The EVP_PKEY of the public key this certificate contains", "id": "f9533:c2:m5"} {"signature": "@propertydef public_key(self):", "body": "if not self._public_key and self.x509:evp_pkey = libcrypto.X509_get_pubkey(self.x509)self._public_key = PublicKey(evp_pkey, self.asn1[''][''])return self._public_key", "docstring": ":return:\n The PublicKey object for the public key this certificate contains", "id": "f9533:c2:m6"} {"signature": "@propertydef self_signed(self):", "body": "if self._self_signed is None:self._self_signed = Falseif self.asn1.self_signed in set(['', '']):signature_algo = self.asn1[''].signature_algohash_algo = self.asn1[''].hash_algoif signature_algo == '':verify_func = rsa_pkcs1v15_verifyelif signature_algo == '':verify_func = dsa_verifyelif signature_algo == '':verify_func = ecdsa_verifyelse:raise OSError(pretty_message('''''',signature_algo))try:verify_func(self.public_key,self.asn1[''].native,self.asn1[''].dump(),hash_algo)self._self_signed = Trueexcept (SignatureError):passreturn self._self_signed", "docstring": ":return:\n A boolean - if the certificate is self-signed", "id": "f9533:c2:m7"} {"signature": "def handle_openssl_error(result, exception_class=None):", "body": "if result > :returnif exception_class is None:exception_class = OSErrorerror_num = libcrypto.ERR_get_error()buffer = buffer_from_bytes()libcrypto.ERR_error_string(error_num, buffer)error_string = byte_string_from_buffer(buffer)raise exception_class(_try_decode(error_string))", "docstring": "Checks if an error occured, and if so throws an OSError containing the\nlast OpenSSL error message\n\n:param result:\n An integer result code - 1 or greater indicates success\n\n:param exception_class:\n The exception class to use for the exception if an error occurred\n\n:raises:\n OSError - when an OpenSSL error occurs", "id": "f9535:m1"} {"signature": "def peek_openssl_error():", "body": "error = libcrypto.ERR_peek_error()lib = int((error >> ) & )func = int((error >> ) & )reason = int(error & )return (lib, func, reason)", "docstring": "Peeks into the error stack and pulls out the lib, func and reason\n\n:return:\n A three-element tuple of integers (lib, func, reason)", "id": "f9535:m2"} {"signature": "def aes_cbc_no_padding_encrypt(key, data, iv):", "body": "cipher = _calculate_aes_cipher(key)if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))if len(data) % != :raise ValueError(pretty_message('''''',len(data)))return (iv, _encrypt(cipher, key, data, iv, False))", "docstring": "Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and\nno padding. This means the ciphertext must be an exact multiple of 16 bytes\nlong.\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - either a byte string 16-bytes long or None\n to generate an IV\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9537:m0"} {"signature": "def aes_cbc_no_padding_decrypt(key, data, iv):", "body": "cipher = _calculate_aes_cipher(key)if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt(cipher, key, data, iv, False)", "docstring": "Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no\npadding.\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string 16-bytes long\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A byte string of the plaintext", "id": "f9537:m1"} {"signature": "def aes_cbc_pkcs7_encrypt(key, data, iv):", "body": "cipher = _calculate_aes_cipher(key)if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return (iv, _encrypt(cipher, key, data, iv, True))", "docstring": "Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and\nPKCS#7 padding.\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - either a byte string 16-bytes long or None\n to generate an IV\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9537:m2"} {"signature": "def aes_cbc_pkcs7_decrypt(key, data, iv):", "body": "cipher = _calculate_aes_cipher(key)if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt(cipher, key, data, iv, True)", "docstring": "Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key\n\n:param key:\n The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string 16-bytes long\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A byte string of the plaintext", "id": "f9537:m3"} {"signature": "def _calculate_aes_cipher(key):", "body": "if len(key) not in [, , ]:raise ValueError(pretty_message('''''',len(key)))if len(key) == :cipher = ''elif len(key) == :cipher = ''elif len(key) == :cipher = ''return cipher", "docstring": "Determines if the key is a valid AES 128, 192 or 256 key\n\n:param key:\n A byte string of the key to use\n\n:raises:\n ValueError - when an invalid key is provided\n\n:return:\n A unicode string of the AES variation - \"aes128\", \"aes192\" or \"aes256\"", "id": "f9537:m4"} {"signature": "def rc4_encrypt(key, data):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))return _encrypt('', key, data, None, None)", "docstring": "Encrypts plaintext using RC4 with a 40-128 bit key\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A byte string of the ciphertext", "id": "f9537:m5"} {"signature": "def rc4_decrypt(key, data):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))return _decrypt('', key, data, None, None)", "docstring": "Decrypts RC4 ciphertext using a 40-128 bit key\n\n:param key:\n The encryption key - a byte string 5-16 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A byte string of the plaintext", "id": "f9537:m6"} {"signature": "def rc2_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return (iv, _encrypt('', key, data, iv, True))", "docstring": "Encrypts plaintext using RC2 in CBC mode with a 40-128 bit key and PKCS#5\npadding.\n\n:param key:\n The encryption key - a byte string 8 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - a byte string 8-bytes long or None\n to generate an IV\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9537:m7"} {"signature": "def rc2_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) < or len(key) > :raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt('', key, data, iv, True)", "docstring": "Decrypts RC2 ciphertext ib CBC mode using a 40-128 bit key and PKCS#5\npadding.\n\n:param key:\n The encryption key - a byte string 8 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string 8 bytes long\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A byte string of the plaintext", "id": "f9537:m8"} {"signature": "def tripledes_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) != and len(key) != :raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))cipher = ''if len(key) == :key = key + key[:]cipher = ''return (iv, _encrypt(cipher, key, data, iv, True))", "docstring": "Encrypts plaintext using 3DES in CBC mode using either the 2 or 3 key\nvariant (16 or 24 byte long key) and PKCS#5 padding.\n\n:param key:\n The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - a byte string 8-bytes long or None\n to generate an IV\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9537:m9"} {"signature": "def tripledes_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != and len(key) != :raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))cipher = ''if len(key) == :key = key + key[:]cipher = ''return _decrypt(cipher, key, data, iv, True)", "docstring": "Decrypts 3DES ciphertext in CBC mode using either the 2 or 3 key variant\n(16 or 24 byte long key) and PKCS#5 padding.\n\n:param key:\n The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string 8-bytes long\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A byte string of the plaintext", "id": "f9537:m10"} {"signature": "def des_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) != :raise ValueError(pretty_message('''''',len(key)))if not iv:iv = rand_bytes()elif len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return (iv, _encrypt('', key, data, iv, True))", "docstring": "Encrypts plaintext using DES in CBC mode with a 56 bit key and PKCS#5\npadding.\n\n:param key:\n The encryption key - a byte string 8 bytes long (includes error correction bits)\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - a byte string 8-bytes long or None\n to generate an IV\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A tuple of two byte strings (iv, ciphertext)", "id": "f9537:m11"} {"signature": "def des_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != :raise ValueError(pretty_message('''''',len(key)))if len(iv) != :raise ValueError(pretty_message('''''',len(iv)))return _decrypt('', key, data, iv, True)", "docstring": "Decrypts DES ciphertext in CBC mode using a 56 bit key and PKCS#5 padding.\n\n:param key:\n The encryption key - a byte string 8 bytes long (includes error correction bits)\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string 8-bytes long\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A byte string of the plaintext", "id": "f9537:m12"} {"signature": "def _encrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):raise TypeError(pretty_message('''''',type_name(key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if cipher != '' and not isinstance(iv, byte_cls):raise TypeError(pretty_message('''''',type_name(iv)))if cipher != '' and not padding:raise ValueError('')evp_cipher_ctx = Nonetry:evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()if is_null(evp_cipher_ctx):handle_openssl_error()evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)if iv is None:iv = null()if cipher in set(['', '']):res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())handle_openssl_error(res)res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))handle_openssl_error(res)if cipher == '':res = libcrypto.EVP_CIPHER_CTX_ctrl(evp_cipher_ctx,LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,len(key) * ,null())handle_openssl_error(res)evp_cipher = null()res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)handle_openssl_error(res)if padding is not None:res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))handle_openssl_error(res)buffer = buffer_from_bytes(buffer_size)output_length = new(libcrypto, '')res = libcrypto.EVP_EncryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))handle_openssl_error(res)output = bytes_from_buffer(buffer, deref(output_length))res = libcrypto.EVP_EncryptFinal_ex(evp_cipher_ctx, buffer, output_length)handle_openssl_error(res)output += bytes_from_buffer(buffer, deref(output_length))return outputfinally:if evp_cipher_ctx:libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx)", "docstring": "Encrypts plaintext\n\n:param cipher:\n A unicode string of \"aes128\", \"aes192\", \"aes256\", \"des\",\n \"tripledes_2key\", \"tripledes_3key\", \"rc2\", \"rc4\"\n\n:param key:\n The encryption key - a byte string 5-32 bytes long\n\n:param data:\n The plaintext - a byte string\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:param padding:\n Boolean, if padding should be used - unused for RC4\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A byte string of the ciphertext", "id": "f9537:m13"} {"signature": "def _decrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):raise TypeError(pretty_message('''''',type_name(key)))if not isinstance(data, byte_cls):raise TypeError(pretty_message('''''',type_name(data)))if cipher != '' and not isinstance(iv, byte_cls):raise TypeError(pretty_message('''''',type_name(iv)))if cipher != '' and padding is None:raise ValueError('')evp_cipher_ctx = Nonetry:evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()if is_null(evp_cipher_ctx):handle_openssl_error()evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)if iv is None:iv = null()if cipher in set(['', '']):res = libcrypto.EVP_DecryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())handle_openssl_error(res)res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))handle_openssl_error(res)if cipher == '':res = libcrypto.EVP_CIPHER_CTX_ctrl(evp_cipher_ctx,LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,len(key) * ,null())handle_openssl_error(res)evp_cipher = null()res = libcrypto.EVP_DecryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)handle_openssl_error(res)if padding is not None:res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))handle_openssl_error(res)buffer = buffer_from_bytes(buffer_size)output_length = new(libcrypto, '')res = libcrypto.EVP_DecryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))handle_openssl_error(res)output = bytes_from_buffer(buffer, deref(output_length))res = libcrypto.EVP_DecryptFinal_ex(evp_cipher_ctx, buffer, output_length)handle_openssl_error(res)output += bytes_from_buffer(buffer, deref(output_length))return outputfinally:if evp_cipher_ctx:libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx)", "docstring": "Decrypts AES/RC4/RC2/3DES/DES ciphertext\n\n:param cipher:\n A unicode string of \"aes128\", \"aes192\", \"aes256\", \"des\",\n \"tripledes_2key\", \"tripledes_3key\", \"rc2\", \"rc4\"\n\n:param key:\n The encryption key - a byte string 5-32 bytes long\n\n:param data:\n The ciphertext - a byte string\n\n:param iv:\n The initialization vector - a byte string - unused for RC4\n\n:param padding:\n Boolean, if padding should be used - unused for RC4\n\n:raises:\n ValueError - when any of the parameters contain an invalid value\n TypeError - when any of the parameters are of the wrong type\n OSError - when an error is returned by OpenSSL\n\n:return:\n A byte string of the plaintext", "id": "f9537:m14"} {"signature": "def _setup_evp_encrypt_decrypt(cipher, data):", "body": "evp_cipher = {'': libcrypto.EVP_aes_128_cbc,'': libcrypto.EVP_aes_192_cbc,'': libcrypto.EVP_aes_256_cbc,'': libcrypto.EVP_rc2_cbc,'': libcrypto.EVP_rc4,'': libcrypto.EVP_des_cbc,'': libcrypto.EVP_des_ede_cbc,'': libcrypto.EVP_des_ede3_cbc,}[cipher]()if cipher == '':buffer_size = len(data)else:block_size = {'': ,'': ,'': ,'': ,'': ,'': ,'': ,}[cipher]buffer_size = block_size * int(math.ceil(len(data) / block_size))return (evp_cipher, buffer_size)", "docstring": "Creates an EVP_CIPHER pointer object and determines the buffer size\nnecessary for the parameter specified.\n\n:param evp_cipher_ctx:\n An EVP_CIPHER_CTX pointer\n\n:param cipher:\n A unicode string of \"aes128\", \"aes192\", \"aes256\", \"des\",\n \"tripledes_2key\", \"tripledes_3key\", \"rc2\", \"rc4\"\n\n:param key:\n The key byte string\n\n:param data:\n The plaintext or ciphertext as a byte string\n\n:param padding:\n If padding is to be used\n\n:return:\n A 2-element tuple with the first element being an EVP_CIPHER pointer\n and the second being an integer that is the required buffer size", "id": "f9537:m15"} {"signature": "def __init__(self, color=True, datefmt=None):", "body": "logging.Formatter.__init__(self, datefmt=datefmt)self._colors = {}if color and _stderr_supports_color():fg_color = (curses.tigetstr(\"\") orcurses.tigetstr(\"\") or \"\")if (, ) < sys.version_info < (, , ):fg_color = str(fg_color, \"\")for levelno, code in self.DEFAULT_COLORS.items():self._colors[levelno] = str(curses.tparm(fg_color, code), \"\")self._normal = str(curses.tigetstr(\"\"), \"\")scr = curses.initscr()self.termwidth = scr.getmaxyx()[]curses.endwin()else:self._normal = ''self.termwidth = ", "docstring": "r\"\"\"\n :arg bool color: Enables color support.\n :arg string fmt: Log message format.\n It will be applied to the attributes dict of log records. The\n text between ``%(color)s`` and ``%(end_color)s`` will be colored\n depending on the level if color support is on.\n :arg dict colors: color mappings from logging level to terminal color\n code\n :arg string datefmt: Datetime format.\n Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.\n .. versionchanged:: 3.2\n Added ``fmt`` and ``datefmt`` arguments.", "id": "f9550:c0:m0"} {"signature": "def mkdir_p(*args, **kwargs):", "body": "try:return os.mkdir(*args, **kwargs)except OSError as exc:if exc.errno != errno.EEXIST:raise", "docstring": "Like `mkdir`, but does not raise an exception if the\n directory already exists.", "id": "f9552:m2"} {"signature": "def default_subprocess_runner(cmd, cwd=None, extra_environ=None):", "body": "env = os.environ.copy()if extra_environ:env.update(extra_environ)check_call(cmd, cwd=cwd, env=env)", "docstring": "The default method of calling the wrapper subprocess.", "id": "f9553:m1"} {"signature": "def norm_and_check(source_tree, requested):", "body": "if os.path.isabs(requested):raise ValueError(\"\")abs_source = os.path.abspath(source_tree)abs_requested = os.path.normpath(os.path.join(abs_source, requested))norm_source = os.path.normcase(abs_source)norm_requested = os.path.normcase(abs_requested)if os.path.commonprefix([norm_source, norm_requested]) != norm_source:raise ValueError(\"\")return abs_requested", "docstring": "Normalise and check a backend path.\n\n Ensure that the requested backend path is specified as a relative path,\n and resolves to a location under the given source tree.\n\n Return an absolute version of the requested path.", "id": "f9553:m2"} {"signature": "def get_requires_for_build_wheel(self, config_settings=None):", "body": "return self._call_hook('', {'': config_settings})", "docstring": "Identify packages required for building a wheel\n\n Returns a list of dependency specifications, e.g.:\n [\"wheel >= 0.25\", \"setuptools\"]\n\n This does not include requirements specified in pyproject.toml.\n It returns the result of calling the equivalently named hook in a\n subprocess.", "id": "f9553:c3:m2"} {"signature": "def prepare_metadata_for_build_wheel(self, metadata_directory, config_settings=None):", "body": "return self._call_hook('', {'': abspath(metadata_directory),'': config_settings,})", "docstring": "Prepare a *.dist-info folder with metadata for this project.\n\n Returns the name of the newly created folder.\n\n If the build backend defines a hook with this name, it will be called\n in a subprocess. If not, the backend will be asked to build a wheel,\n and the dist-info extracted from that.", "id": "f9553:c3:m3"} {"signature": "def build_wheel(self, wheel_directory, config_settings=None,metadata_directory=None):", "body": "if metadata_directory is not None:metadata_directory = abspath(metadata_directory)return self._call_hook('', {'': abspath(wheel_directory),'': config_settings,'': metadata_directory,})", "docstring": "Build a wheel from this project.\n\n Returns the name of the newly created file.\n\n In general, this will call the 'build_wheel' hook in the backend.\n However, if that was previously called by\n 'prepare_metadata_for_build_wheel', and the same metadata_directory is\n used, the previously built wheel will be copied to wheel_directory.", "id": "f9553:c3:m4"} {"signature": "def get_requires_for_build_sdist(self, config_settings=None):", "body": "return self._call_hook('', {'': config_settings})", "docstring": "Identify packages required for building a wheel\n\n Returns a list of dependency specifications, e.g.:\n [\"setuptools >= 26\"]\n\n This does not include requirements specified in pyproject.toml.\n It returns the result of calling the equivalently named hook in a\n subprocess.", "id": "f9553:c3:m5"} {"signature": "def build_sdist(self, sdist_directory, config_settings=None):", "body": "return self._call_hook('', {'': abspath(sdist_directory),'': config_settings,})", "docstring": "Build an sdist from this project.\n\n Returns the name of the newly created file.\n\n This calls the 'build_sdist' backend hook in a subprocess.", "id": "f9553:c3:m6"} {"signature": "def build_wheel(source_dir, wheel_dir, config_settings=None):", "body": "if config_settings is None:config_settings = {}requires, backend = _load_pyproject(source_dir)hooks = Pep517HookCaller(source_dir, backend)with BuildEnvironment() as env:env.pip_install(requires)reqs = hooks.get_requires_for_build_wheel(config_settings)env.pip_install(reqs)return hooks.build_wheel(wheel_dir, config_settings)", "docstring": "Build a wheel from a source directory using PEP 517 hooks.\n\n :param str source_dir: Source directory containing pyproject.toml\n :param str wheel_dir: Target directory to create wheel in\n :param dict config_settings: Options to pass to build backend\n\n This is a blocking function which will run pip in a subprocess to install\n build requirements.", "id": "f9554:m1"} {"signature": "def build_sdist(source_dir, sdist_dir, config_settings=None):", "body": "if config_settings is None:config_settings = {}requires, backend = _load_pyproject(source_dir)hooks = Pep517HookCaller(source_dir, backend)with BuildEnvironment() as env:env.pip_install(requires)reqs = hooks.get_requires_for_build_sdist(config_settings)env.pip_install(reqs)return hooks.build_sdist(sdist_dir, config_settings)", "docstring": "Build an sdist from a source directory using PEP 517 hooks.\n\n :param str source_dir: Source directory containing pyproject.toml\n :param str sdist_dir: Target directory to place sdist in\n :param dict config_settings: Options to pass to build backend\n\n This is a blocking function which will run pip in a subprocess to install\n build requirements.", "id": "f9554:m2"} {"signature": "def pip_install(self, reqs):", "body": "if not reqs:returnlog.info('', reqs)check_call([sys.executable, '', '', '', '','', self.path] + list(reqs))", "docstring": "Install dependencies into this env by calling pip in a subprocess", "id": "f9554:c0:m2"} {"signature": "def contained_in(filename, directory):", "body": "filename = os.path.normcase(os.path.abspath(filename))directory = os.path.normcase(os.path.abspath(directory))return os.path.commonprefix([filename, directory]) == directory", "docstring": "Test if a file is located within the given directory.", "id": "f9557:m0"} {"signature": "def _build_backend():", "body": "backend_path = os.environ.get('')if backend_path:extra_pathitems = backend_path.split(os.pathsep)sys.path[:] = extra_pathitemsep = os.environ['']mod_path, _, obj_path = ep.partition('')try:obj = import_module(mod_path)except ImportError:raise BackendUnavailable(traceback.format_exc())if backend_path:if not any(contained_in(obj.__file__, path)for path in extra_pathitems):raise BackendInvalid(\"\")if obj_path:for path_part in obj_path.split(''):obj = getattr(obj, path_part)return obj", "docstring": "Find and load the build backend", "id": "f9557:m1"} {"signature": "def get_requires_for_build_wheel(config_settings):", "body": "backend = _build_backend()try:hook = backend.get_requires_for_build_wheelexcept AttributeError:return []else:return hook(config_settings)", "docstring": "Invoke the optional get_requires_for_build_wheel hook\n\n Returns [] if the hook is not defined.", "id": "f9557:m2"} {"signature": "def prepare_metadata_for_build_wheel(metadata_directory, config_settings):", "body": "backend = _build_backend()try:hook = backend.prepare_metadata_for_build_wheelexcept AttributeError:return _get_wheel_metadata_from_wheel(backend, metadata_directory,config_settings)else:return hook(metadata_directory, config_settings)", "docstring": "Invoke optional prepare_metadata_for_build_wheel\n\n Implements a fallback by building a wheel if the hook isn't defined.", "id": "f9557:m3"} {"signature": "def _dist_info_files(whl_zip):", "body": "res = []for path in whl_zip.namelist():m = re.match(r'', path)if m:res.append(path)if res:return resraise Exception(\"\")", "docstring": "Identify the .dist-info folder inside a wheel ZipFile.", "id": "f9557:m4"} {"signature": "def _get_wheel_metadata_from_wheel(backend, metadata_directory, config_settings):", "body": "from zipfile import ZipFilewhl_basename = backend.build_wheel(metadata_directory, config_settings)with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), ''):pass whl_file = os.path.join(metadata_directory, whl_basename)with ZipFile(whl_file) as zipf:dist_info = _dist_info_files(zipf)zipf.extractall(path=metadata_directory, members=dist_info)return dist_info[].split('')[]", "docstring": "Build a wheel and extract the metadata from it.\n\n Fallback for when the build backend does not\n define the 'get_wheel_metadata' hook.", "id": "f9557:m5"} {"signature": "def _find_already_built_wheel(metadata_directory):", "body": "if not metadata_directory:return Nonemetadata_parent = os.path.dirname(metadata_directory)if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)):return Nonewhl_files = glob(os.path.join(metadata_parent, ''))if not whl_files:print('')return Noneif len(whl_files) > :print('''')return Nonereturn whl_files[]", "docstring": "Check for a wheel already built during the get_wheel_metadata hook.", "id": "f9557:m6"} {"signature": "def build_wheel(wheel_directory, config_settings, metadata_directory=None):", "body": "prebuilt_whl = _find_already_built_wheel(metadata_directory)if prebuilt_whl:shutil.copy2(prebuilt_whl, wheel_directory)return os.path.basename(prebuilt_whl)return _build_backend().build_wheel(wheel_directory, config_settings,metadata_directory)", "docstring": "Invoke the mandatory build_wheel hook.\n\n If a wheel was already built in the\n prepare_metadata_for_build_wheel fallback, this\n will copy it rather than rebuilding the wheel.", "id": "f9557:m7"} {"signature": "def get_requires_for_build_sdist(config_settings):", "body": "backend = _build_backend()try:hook = backend.get_requires_for_build_sdistexcept AttributeError:return []else:return hook(config_settings)", "docstring": "Invoke the optional get_requires_for_build_wheel hook\n\n Returns [] if the hook is not defined.", "id": "f9557:m8"} {"signature": "def build_sdist(sdist_directory, config_settings):", "body": "backend = _build_backend()try:return backend.build_sdist(sdist_directory, config_settings)except getattr(backend, '', _DummyException):raise GotUnsupportedOperation(traceback.format_exc())", "docstring": "Invoke the mandatory build_sdist hook.", "id": "f9557:m9"} {"signature": "def get_position(self, position_id):", "body": "url = \"\" % position_idreturn self.position_from_json(self._get_resource(url)[\"\"])", "docstring": "Returns position data.\n\nhttp://dev.wheniwork.com/#get-existing-position", "id": "f9558:c0:m0"} {"signature": "def get_positions(self):", "body": "url = \"\"data = self._get_resource(url)positions = []for entry in data['']:positions.append(self.position_from_json(entry))return positions", "docstring": "Returns a list of positions.\n\nhttp://dev.wheniwork.com/#listing-positions", "id": "f9558:c0:m1"} {"signature": "def create_position(self, params={}):", "body": "url = \"\"body = paramsdata = self._post_resource(url, body)return self.position_from_json(data[\"\"])", "docstring": "Creates a position\n\nhttp://dev.wheniwork.com/#create-update-position", "id": "f9558:c0:m2"} {"signature": "def get_requests(self, params={}):", "body": "if \"\" in params:params[''] = ''.join(map(str, params['']))requests = []users = {}messages = {}params[''] = while True:param_list = [(k, params[k]) for k in sorted(params)]url = \"\" % urlencode(param_list)data = self._get_resource(url)for entry in data[\"\"]:user = Users.user_from_json(entry)users[user.user_id] = userfor entry in data[\"\"]:request = self.request_from_json(entry)requests.append(request)for entry in data[\"\"]:message = Messages.message_from_json(entry)if message.request_id not in messages:messages[message.request_id] = []messages[message.request_id].append(message)if not data['']:breakparams[''] += for request in requests:request.user = users.get(request.user_id, None)request.messages = messages.get(request.request_id, [])return requests", "docstring": "List requests\n\nhttp://dev.wheniwork.com/#listing-requests", "id": "f9560:c0:m0"} {"signature": "def get_message(self, message_id):", "body": "url = \"\" % message_idreturn self.message_from_json(self._get_resource(url)[\"\"])", "docstring": "Get Existing Message\n\nhttp://dev.wheniwork.com/#get-existing-message", "id": "f9570:c0:m0"} {"signature": "def get_messages(self, params={}):", "body": "param_list = [(k, params[k]) for k in sorted(params)]url = \"\" % urlencode(param_list)data = self._get_resource(url)messages = []for entry in data[\"\"]:messages.append(self.message_from_json(entry))return messages", "docstring": "List messages\n\nhttp://dev.wheniwork.com/#listing-messages", "id": "f9570:c0:m1"} {"signature": "def create_message(self, params={}):", "body": "url = \"\"body = paramsdata = self._post_resource(url, body)return self.message_from_json(data[\"\"])", "docstring": "Creates a message\n\nhttp://dev.wheniwork.com/#create/update-message", "id": "f9570:c0:m2"} {"signature": "def update_message(self, message):", "body": "url = \"\" % message.message_iddata = self._put_resource(url, message.json_data())return self.message_from_json(data)", "docstring": "Modify an existing message.\n\nhttp://dev.wheniwork.com/#create/update-message", "id": "f9570:c0:m3"} {"signature": "def delete_messages(self, messages):", "body": "url = \"\" % urlencode([('', \"\".join(messages))])data = self._delete_resource(url)return data", "docstring": "Delete existing messages.\n\nhttp://dev.wheniwork.com/#delete-existing-message", "id": "f9570:c0:m4"} {"signature": "def get_location(self, location_id):", "body": "url = \"\" % location_idreturn self.location_from_json(self._get_resource(url)[\"\"])", "docstring": "Returns location data.\n\nhttp://dev.wheniwork.com/#get-existing-location", "id": "f9571:c0:m0"} {"signature": "def get_locations(self):", "body": "url = \"\"data = self._get_resource(url)locations = []for entry in data['']:locations.append(self.location_from_json(entry))return locations", "docstring": "Returns a list of locations.\n\nhttp://dev.wheniwork.com/#listing-locations", "id": "f9571:c0:m1"} {"signature": "def get_site(self, site_id):", "body": "url = \"\" % site_idreturn self.site_from_json(self._get_resource(url)[\"\"])", "docstring": "Returns site data.\n\nhttp://dev.wheniwork.com/#get-existing-site", "id": "f9572:c0:m0"} {"signature": "def get_sites(self):", "body": "url = \"\"data = self._get_resource(url)sites = []for entry in data['']:sites.append(self.site_from_json(entry))return sites", "docstring": "Returns a list of sites.\n\nhttp://dev.wheniwork.com/#listing-sites", "id": "f9572:c0:m1"} {"signature": "def create_site(self, params={}):", "body": "url = \"\"body = paramsdata = self._post_resource(url, body)return self.site_from_json(data[\"\"])", "docstring": "Creates a site\n\nhttp://dev.wheniwork.com/#create-update-site", "id": "f9572:c0:m2"} {"signature": "def _get_resource(self, url, data_key=None):", "body": "headers = {\"\": \"\"}if self.token:headers[\"\"] = \"\" % self.tokenresponse = WhenIWork_DAO().getURL(url, headers)if response.status != :raise DataFailureException(url, response.status, response.data)return json.loads(response.data)", "docstring": "When I Work GET method. Return representation of the requested\nresource.", "id": "f9573:c0:m2"} {"signature": "def _put_resource(self, url, body):", "body": "headers = {\"\": \"\",\"\": \"\"}if self.token:headers[\"\"] = \"\" % self.tokenresponse = WhenIWork_DAO().putURL(url, headers, json.dumps(body))if not (response.status == or response.status == orresponse.status == ):raise DataFailureException(url, response.status, response.data)return json.loads(response.data)", "docstring": "When I Work PUT method.", "id": "f9573:c0:m3"} {"signature": "def _post_resource(self, url, body):", "body": "headers = {\"\": \"\",\"\": \"\"}if self.token:headers[\"\"] = \"\" % self.tokenresponse = WhenIWork_DAO().postURL(url, headers, json.dumps(body))if not (response.status == or response.status == ):raise DataFailureException(url, response.status, response.data)return json.loads(response.data)", "docstring": "When I Work POST method.", "id": "f9573:c0:m4"} {"signature": "def _delete_resource(self, url):", "body": "headers = {\"\": \"\",\"\": \"\"}if self.token:headers[\"\"] = \"\" % self.tokenresponse = WhenIWork_DAO().deleteURL(url, headers)if not (response.status == or response.status == orresponse.status == ):raise DataFailureException(url, response.status, response.data)return json.loads(response.data)", "docstring": "When I Work DELETE method.", "id": "f9573:c0:m5"} {"signature": "def get_account(self):", "body": "url = \"\"return self.account_from_json(self._get_resource(url)[\"\"])", "docstring": "Get Existing Account\n\nhttp://dev.wheniwork.com/#get-existing-account", "id": "f9574:c0:m0"} {"signature": "def get_user(self, user_id):", "body": "url = \"\" % user_idreturn self.user_from_json(self._get_resource(url)[\"\"])", "docstring": "Returns user profile data.\n\nhttp://dev.wheniwork.com/#get-existing-user", "id": "f9576:c0:m0"} {"signature": "def get_users(self, params={}):", "body": "param_list = [(k, params[k]) for k in sorted(params)]url = \"\" % urlencode(param_list)data = self._get_resource(url)users = []for entry in data[\"\"]:users.append(self.user_from_json(entry))return users", "docstring": "Returns a list of users.\n\nhttp://dev.wheniwork.com/#listing-users", "id": "f9576:c0:m1"} {"signature": "def get_shifts(self, params={}):", "body": "param_list = [(k, params[k]) for k in sorted(params)]url = \"\" % urlencode(param_list)data = self._get_resource(url)shifts = []locations = {}sites = {}positions = {}users = {}for entry in data.get(\"\", []):location = Locations.location_from_json(entry)locations[location.location_id] = locationfor entry in data.get(\"\", []):site = Sites.site_from_json(entry)sites[site.site_id] = sitefor entry in data.get(\"\", []):position = Positions.position_from_json(entry)positions[position.position_id] = positionfor entry in data.get(\"\", []):user = Users.user_from_json(entry)users[user.user_id] = userfor entry in data[\"\"]:shift = self.shift_from_json(entry)shifts.append(shift)for shift in shifts:shift.location = locations.get(shift.location_id, None)shift.site = sites.get(shift.site_id, None)shift.position = positions.get(shift.position_id, None)shift.user = users.get(shift.user_id, None)return shifts", "docstring": "List shifts\n\nhttp://dev.wheniwork.com/#listing-shifts", "id": "f9577:c0:m0"} {"signature": "def create_shift(self, params={}):", "body": "url = \"\"body = paramsdata = self._post_resource(url, body)shift = self.shift_from_json(data[\"\"])return shift", "docstring": "Creates a shift\n\nhttp://dev.wheniwork.com/#create/update-shift", "id": "f9577:c0:m1"} {"signature": "def delete_shifts(self, shifts):", "body": "url = \"\" % urlencode({'': \"\".join(str(s) for s in shifts)})data = self._delete_resource(url)return data", "docstring": "Delete existing shifts.\n\nhttp://dev.wheniwork.com/#delete-shift", "id": "f9577:c0:m2"} {"signature": "def __init__(self):", "body": "self._is_scanning = Falseself._powered_on = threading.Event()self._powered_off = threading.Event()", "docstring": "Create an instance of the bluetooth adapter from the provided bluez\n DBus object.", "id": "f9580:c0:m0"} {"signature": "def _state_changed(self, state):", "body": "logger.debug(''.format(state))if state == :self._powered_off.clear()self._powered_on.set()elif state == :self._powered_on.clear()self._powered_off.set()", "docstring": "Called when the power state changes.", "id": "f9580:c0:m1"} {"signature": "@propertydef name(self):", "body": "return \"\"", "docstring": "Return the name of this BLE network adapter.", "id": "f9580:c0:m2"} {"signature": "def start_scan(self, timeout_sec=TIMEOUT_SEC):", "body": "get_provider()._central_manager.scanForPeripheralsWithServices_options_(None, None)self._is_scanning = True", "docstring": "Start scanning for BLE devices.", "id": "f9580:c0:m3"} {"signature": "def stop_scan(self, timeout_sec=TIMEOUT_SEC):", "body": "get_provider()._central_manager.stopScan()self._is_scanning = False", "docstring": "Stop scanning for BLE devices.", "id": "f9580:c0:m4"} {"signature": "@propertydef is_scanning(self):", "body": "return self._is_scanning", "docstring": "Return True if the BLE adapter is scanning for devices, otherwise\n return False.", "id": "f9580:c0:m5"} {"signature": "def power_on(self, timeout_sec=TIMEOUT_SEC):", "body": "self._powered_on.clear()IOBluetoothPreferenceSetControllerPowerState()if not self._powered_on.wait(timeout_sec):raise RuntimeError('')", "docstring": "Power on Bluetooth.", "id": "f9580:c0:m6"} {"signature": "def power_off(self, timeout_sec=TIMEOUT_SEC):", "body": "self._powered_off.clear()IOBluetoothPreferenceSetControllerPowerState()if not self._powered_off.wait(timeout_sec):raise RuntimeError('')", "docstring": "Power off Bluetooth.", "id": "f9580:c0:m7"} {"signature": "@propertydef is_powered(self):", "body": "return IOBluetoothPreferenceGetControllerPowerState() == ", "docstring": "Return True if the BLE adapter is powered up, otherwise return False.", "id": "f9580:c0:m8"} {"signature": "def centralManagerDidUpdateState_(self, manager):", "body": "logger.debug('')get_provider()._adapter._state_changed(manager.state())", "docstring": "Called when the BLE adapter is powered on and ready to scan/connect\n to devices.", "id": "f9581:c0:m0"} {"signature": "def centralManager_didDiscoverPeripheral_advertisementData_RSSI_(self, manager, peripheral, data, rssi):", "body": "logger.debug('')device = device_list().get(peripheral)if device is None:device = device_list().add(peripheral, CoreBluetoothDevice(peripheral))device._update_advertised(data)", "docstring": "Called when the BLE adapter found a device while scanning, or has\n new advertisement data for a device.", "id": "f9581:c0:m1"} {"signature": "def centralManager_didConnectPeripheral_(self, manager, peripheral):", "body": "logger.debug('')peripheral.setDelegate_(self)peripheral.discoverServices_(None)device = device_list().get(peripheral)if device is not None:device._set_connected()", "docstring": "Called when a device is connected.", "id": "f9581:c0:m2"} {"signature": "def centralManager_didDisconnectPeripheral_error_(self, manager, peripheral, error):", "body": "logger.debug('')device = device_list().get(peripheral)if device is not None:device._set_disconnected()device_list().remove(peripheral)", "docstring": "Called when a device is disconnected.", "id": "f9581:c0:m4"} {"signature": "def peripheral_didDiscoverServices_(self, peripheral, services):", "body": "logger.debug('')for service in peripheral.services():if service_list().get(service) is None:service_list().add(service, CoreBluetoothGattService(service))peripheral.discoverCharacteristics_forService_(None, service)", "docstring": "Called when services are discovered for a device.", "id": "f9581:c0:m5"} {"signature": "def peripheral_didDiscoverCharacteristicsForService_error_(self, peripheral, service, error):", "body": "logger.debug('')if error is not None:returnfor char in service.characteristics():if characteristic_list().get(char) is None:characteristic_list().add(char, CoreBluetoothGattCharacteristic(char))peripheral.discoverDescriptorsForCharacteristic_(char)device = device_list().get(peripheral)if device is not None:device._characteristics_discovered(service)", "docstring": "Called when characteristics are discovered for a service.", "id": "f9581:c0:m6"} {"signature": "def peripheral_didDiscoverDescriptorsForCharacteristic_error_(self, peripheral, characteristic, error):", "body": "logger.debug('')if error is not None:returnfor desc in characteristic.descriptors():if descriptor_list().get(desc) is None:descriptor_list().add(desc, CoreBluetoothGattDescriptor(desc))", "docstring": "Called when characteristics are discovered for a service.", "id": "f9581:c0:m7"} {"signature": "def peripheral_didUpdateValueForCharacteristic_error_(self, peripheral, characteristic, error):", "body": "logger.debug('')if error is not None:returndevice = device_list().get(peripheral)if device is not None:device._characteristic_changed(characteristic)", "docstring": "Called when characteristic value was read or updated.", "id": "f9581:c0:m10"} {"signature": "def peripheral_didUpdateValueForDescriptor_error_(self, peripheral, descriptor, error):", "body": "logger.debug('')if error is not None:returndevice = device_list().get(peripheral)if device is not None:device._descriptor_changed(descriptor)", "docstring": "Called when descriptor value was read or updated.", "id": "f9581:c0:m11"} {"signature": "def peripheral_didReadRSSI_error_(self, peripheral, rssi, error):", "body": "logger.debug('')if error is not None:returndevice = device_list().get(peripheral)if device is not None:device._rssi_changed(rssi)", "docstring": "Called when a new RSSI value for the peripheral is available.", "id": "f9581:c0:m12"} {"signature": "def initialize(self):", "body": "self._central_manager = CBCentralManager.alloc()self._central_manager.initWithDelegate_queue_options_(self._central_delegate,None, None)", "docstring": "Initialize the BLE provider. Must be called once before any other\n calls are made to the provider.", "id": "f9581:c1:m1"} {"signature": "def run_mainloop_with(self, target):", "body": "self._user_thread = threading.Thread(target=self._user_thread_main,args=(target,))self._user_thread.daemon = Trueself._user_thread.start()try:AppHelper.runConsoleEventLoop(installInterrupt=True)except KeyboardInterrupt:AppHelper.stopEventLoop()sys.exit()", "docstring": "Start the OS's main loop to process asyncronous BLE events and then\n run the specified target function in a background thread. Target\n function should be a function that takes no parameters and optionally\n return an integer response code. When the target function stops\n executing or returns with value then the main loop will be stopped and\n the program will exit with the returned code.\n\n Note that an OS main loop is required to process asyncronous BLE events\n and this function is provided as a convenience for writing simple tools\n and scripts that don't need to be full-blown GUI applications. If you\n are writing a GUI application that has a main loop (a GTK glib main loop\n on Linux, or a Cocoa main loop on OSX) then you don't need to call this\n function.", "id": "f9581:c1:m2"} {"signature": "def _user_thread_main(self, target):", "body": "try:return_code = target()if return_code is None:return_code = AppHelper.callAfter(lambda: sys.exit(return_code))except Exception as ex:AppHelper.callAfter(self._raise_error, sys.exc_info())", "docstring": "Main entry point for the thread that will run user's code.", "id": "f9581:c1:m3"} {"signature": "def _raise_error(self, exec_info):", "body": "raise_(exec_info[], None, exec_info[])", "docstring": "Raise an exception from the provided exception info. Used to cause\n the main thread to stop with an error.", "id": "f9581:c1:m4"} {"signature": "def list_adapters(self):", "body": "return [self._adapter]", "docstring": "Return a list of BLE adapter objects connected to the system.", "id": "f9581:c1:m5"} {"signature": "def list_devices(self):", "body": "return self._devices.list()", "docstring": "Return a list of BLE devices known to the system.", "id": "f9581:c1:m6"} {"signature": "def clear_cached_data(self):", "body": "if self._adapter.is_powered:self._adapter.power_off()with open(os.devnull, '') as devnull:subprocess.call('',shell=True, stdout=devnull, stderr=subprocess.STDOUT)subprocess.call('',shell=True, stdout=devnull, stderr=subprocess.STDOUT)", "docstring": "Clear the internal bluetooth device cache. This is useful if a device\n changes its state like name and it can't be detected with the new state\n anymore. WARNING: This will delete some files underneath the running user's\n ~/Library/Preferences/ folder!\n\n See this Stackoverflow question for information on what the function does:\n http://stackoverflow.com/questions/20553957/how-can-i-clear-the-corebluetooth-cache-on-macos", "id": "f9581:c1:m7"} {"signature": "def disconnect_devices(self, service_uuids):", "body": "cbuuids = map(uuid_to_cbuuid, service_uuids)for device in self._central_manager.retrieveConnectedPeripheralsWithServices_(cbuuids):self._central_manager.cancelPeripheralConnection_(device)", "docstring": "Disconnect any connected devices that have any of the specified\n service UUIDs.", "id": "f9581:c1:m8"} {"signature": "def cbuuid_to_uuid(cbuuid):", "body": "data = cbuuid.data().bytes()template = '' if len(data) <= else ''value = template.format(hexlify(data.tobytes()[:]).decode(''))return uuid.UUID(hex=value)", "docstring": "Convert Objective-C CBUUID type to native Python UUID type.", "id": "f9582:m0"} {"signature": "def uuid_to_cbuuid(uuid):", "body": "return CBUUID.UUIDWithString_(str(uuid))", "docstring": "Convert native Python UUID type to Objective-C CBUUID type.", "id": "f9582:m1"} {"signature": "def nsuuid_to_uuid(nsuuid):", "body": "return uuid.UUID(nsuuid.UUIDString())", "docstring": "Convert Objective-C NSUUID type to native Python UUID type.", "id": "f9582:m2"} {"signature": "def list(self):", "body": "with self._lock:return self._metadata.values()", "docstring": "Return list of all metadata objects.", "id": "f9583:c0:m1"} {"signature": "def get(self, cbobject):", "body": "with self._lock:return self._metadata.get(cbobject, None)", "docstring": "Retrieve the metadata associated with the specified CoreBluetooth\n object.", "id": "f9583:c0:m2"} {"signature": "def get_all(self, cbobjects):", "body": "try:with self._lock:return [self._metadata[x] for x in cbobjects]except KeyError:raise RuntimeError('')", "docstring": "Retrieve a list of metadata objects associated with the specified\n list of CoreBluetooth objects. If an object cannot be found then an\n exception is thrown.", "id": "f9583:c0:m3"} {"signature": "def add(self, cbobject, metadata):", "body": "with self._lock:if cbobject not in self._metadata:self._metadata[cbobject] = metadatareturn self._metadata[cbobject]", "docstring": "Add the specified CoreBluetooth item with the associated metadata if\n it doesn't already exist. Returns the newly created or preexisting\n metadata item.", "id": "f9583:c0:m4"} {"signature": "def remove(self, cbobject):", "body": "with self._lock:if cbobject in self._metadata:del self._metadata[cbobject]", "docstring": "Remove any metadata associated with the provided CoreBluetooth object.", "id": "f9583:c0:m5"} {"signature": "def __init__(self, service):", "body": "self._service = service", "docstring": "Create an instance of the GATT service from the provided CoreBluetooth\n CBService instance.", "id": "f9584:c0:m0"} {"signature": "@propertydef uuid(self):", "body": "return cbuuid_to_uuid(self._service.UUID())", "docstring": "Return the UUID of this GATT service.", "id": "f9584:c0:m1"} {"signature": "def list_characteristics(self):", "body": "return characteristic_list().get_all(self._service.characteristics())", "docstring": "Return list of GATT characteristics that have been discovered for this\n service.", "id": "f9584:c0:m2"} {"signature": "def __init__(self, characteristic):", "body": "self._characteristic = characteristicself._value_read = threading.Event()", "docstring": "Create an instance of the GATT characteristic from the provided\n CoreBluetooth CBCharacteristic instance.", "id": "f9584:c1:m0"} {"signature": "@propertydef _device(self):", "body": "return device_list().get(self._characteristic.service().peripheral())", "docstring": "Return the parent CoreBluetoothDevice object that owns this\n characteristic.", "id": "f9584:c1:m1"} {"signature": "@propertydef uuid(self):", "body": "return cbuuid_to_uuid(self._characteristic.UUID())", "docstring": "Return the UUID of this GATT characteristic.", "id": "f9584:c1:m2"} {"signature": "def read_value(self, timeout_sec=TIMEOUT_SEC):", "body": "self._value_read.clear()self._device._peripheral.readValueForCharacteristic_(self._characteristic)if not self._value_read.wait(timeout_sec):raise RuntimeError('')return self._characteristic.value()", "docstring": "Read the value of this characteristic.", "id": "f9584:c1:m3"} {"signature": "def write_value(self, value, write_type=):", "body": "data = NSData.dataWithBytes_length_(value, len(value))self._device._peripheral.writeValue_forCharacteristic_type_(data,self._characteristic,write_type)", "docstring": "Write the specified value to this characteristic.", "id": "f9584:c1:m4"} {"signature": "def start_notify(self, on_change):", "body": "self._device._notify_characteristic(self._characteristic, on_change)self._device._peripheral.setNotifyValue_forCharacteristic_(True,self._characteristic)", "docstring": "Enable notification of changes for this characteristic on the\n specified on_change callback. on_change should be a function that takes\n one parameter which is the value (as a string of bytes) of the changed\n characteristic value.", "id": "f9584:c1:m5"} {"signature": "def stop_notify(self):", "body": "self._device._peripheral.setNotifyValue_forCharacteristic_(False,self._characteristic)", "docstring": "Disable notification of changes for this characteristic.", "id": "f9584:c1:m6"} {"signature": "def list_descriptors(self):", "body": "return descriptor_list().get_all(self._characteristic.descriptors())", "docstring": "Return list of GATT descriptors that have been discovered for this\n characteristic.", "id": "f9584:c1:m7"} {"signature": "def __init__(self, descriptor):", "body": "self._descriptor = descriptorself._value_read = threading.Event()", "docstring": "Create an instance of the GATT descriptor from the provided\n CoreBluetooth CBDescriptor value.", "id": "f9584:c2:m0"} {"signature": "@propertydef _device(self):", "body": "return device_list().get(self._descriptor.characteristic().service().peripheral())", "docstring": "Return the parent CoreBluetoothDevice object that owns this\n characteristic.", "id": "f9584:c2:m1"} {"signature": "@propertydef uuid(self):", "body": "return cbuuid_to_uuid(self._descriptor.UUID())", "docstring": "Return the UUID of this GATT descriptor.", "id": "f9584:c2:m2"} {"signature": "def read_value(self):", "body": "passself._value_read.clear()self._device._peripheral.readValueForDescriptor(self._descriptor)if not self._value_read.wait(timeout_sec):raise RuntimeError('')return self._value", "docstring": "Read the value of this descriptor.", "id": "f9584:c2:m3"} {"signature": "def __init__(self, peripheral):", "body": "self._peripheral = peripheralself._advertised = []self._discovered_services = set()self._char_on_changed = {}self._rssi = Noneself._connected = threading.Event()self._disconnected = threading.Event()self._discovered = threading.Event()self._rssi_read = threading.Event()", "docstring": "Create an instance of the CoreBluetooth device from the provided\n CBPeripheral instance.", "id": "f9585:c0:m0"} {"signature": "def connect(self, timeout_sec=TIMEOUT_SEC):", "body": "self._central_manager.connectPeripheral_options_(self._peripheral, None)if not self._connected.wait(timeout_sec):raise RuntimeError('')", "docstring": "Connect to the device. If not connected within the specified timeout\n then an exception is thrown.", "id": "f9585:c0:m2"} {"signature": "def disconnect(self, timeout_sec=TIMEOUT_SEC):", "body": "for service in self.list_services():for char in service.list_characteristics():for desc in char.list_descriptors():descriptor_list().remove(desc)characteristic_list().remove(char)service_list().remove(service)self._central_manager.cancelPeripheralConnection_(self._peripheral)if not self._disconnected.wait(timeout_sec):raise RuntimeError('')", "docstring": "Disconnect from the device. If not disconnected within the specified\n timeout then an exception is thrown.", "id": "f9585:c0:m3"} {"signature": "def _set_connected(self):", "body": "self._disconnected.clear()self._connected.set()", "docstring": "Set the connected event.", "id": "f9585:c0:m4"} {"signature": "def _set_disconnected(self):", "body": "self._connected.clear()self._disconnected.set()", "docstring": "Set the connected event.", "id": "f9585:c0:m5"} {"signature": "def _update_advertised(self, advertised):", "body": "if '' in advertised:self._advertised = self._advertised + map(cbuuid_to_uuid, advertised[''])", "docstring": "Called when advertisement data is received.", "id": "f9585:c0:m6"} {"signature": "def _characteristics_discovered(self, service):", "body": "self._discovered_services.add(service)if self._discovered_services >= set(self._peripheral.services()):self._discovered.set()", "docstring": "Called when GATT characteristics have been discovered.", "id": "f9585:c0:m7"} {"signature": "def _notify_characteristic(self, characteristic, on_change):", "body": "self._char_on_changed[characteristic] = on_change", "docstring": "Call the specified on_change callback when this characteristic\n changes.", "id": "f9585:c0:m8"} {"signature": "def _characteristic_changed(self, characteristic):", "body": "on_changed = self._char_on_changed.get(characteristic, None)if on_changed is not None:on_changed(characteristic.value().bytes().tobytes())char = characteristic_list().get(characteristic)if char is not None:char._value_read.set()", "docstring": "Called when the specified characteristic has changed its value.", "id": "f9585:c0:m9"} {"signature": "def _descriptor_changed(self, descriptor):", "body": "desc = descriptor_list().get(descriptor)if desc is not None:desc._value_read.set()", "docstring": "Called when the specified descriptor has changed its value.", "id": "f9585:c0:m10"} {"signature": "def _rssi_changed(self, rssi):", "body": "self._rssi = rssiself._rssi_read.set()", "docstring": "Called when the RSSI signal strength has been read.", "id": "f9585:c0:m11"} {"signature": "def list_services(self):", "body": "return service_list().get_all(self._peripheral.services())", "docstring": "Return a list of GattService objects that have been discovered for\n this device.", "id": "f9585:c0:m12"} {"signature": "def discover(self, service_uuids, char_uuids, timeout_sec=TIMEOUT_SEC):", "body": "if not self._discovered.wait(timeout_sec):raise RuntimeError('')", "docstring": "Wait up to timeout_sec for the specified services and characteristics\n to be discovered on the device. If the timeout is exceeded without\n discovering the services and characteristics then an exception is thrown.", "id": "f9585:c0:m13"} {"signature": "@propertydef advertised(self):", "body": "return self._advertised", "docstring": "Return a list of UUIDs for services that are advertised by this\n device.", "id": "f9585:c0:m14"} {"signature": "@propertydef id(self):", "body": "return nsuuid_to_uuid(self._peripheral.identifier())", "docstring": "Return a unique identifier for this device. On supported platforms\n this will be the MAC address of the device, however on unsupported\n platforms (Mac OSX) it will be a unique ID like a UUID.", "id": "f9585:c0:m15"} {"signature": "@propertydef name(self):", "body": "return self._peripheral.name()", "docstring": "Return the name of this device.", "id": "f9585:c0:m16"} {"signature": "@propertydef is_connected(self):", "body": "return self._connected.is_set()", "docstring": "Return True if the device is connected to the system, otherwise False.", "id": "f9585:c0:m17"} {"signature": "@propertydef rssi(self, timeout_sec=TIMEOUT_SEC):", "body": "self._rssi_read.clear()self._peripheral.readRSSI()if not self._rssi_read.wait(timeout_sec):raise RuntimeError('')return self._rssi", "docstring": "Return the RSSI signal strength in decibels.", "id": "f9585:c0:m18"} {"signature": "@abc.abstractpropertydef name(self):", "body": "raise NotImplementedError", "docstring": "Return the name of this BLE network adapter.", "id": "f9586:c0:m0"} {"signature": "@abc.abstractmethoddef start_scan(self, timeout_sec):", "body": "raise NotImplementedError", "docstring": "Start scanning for BLE devices with this adapter.", "id": "f9586:c0:m1"} {"signature": "@abc.abstractmethoddef stop_scan(self, timeout_sec):", "body": "raise NotImplementedError", "docstring": "Stop scanning for BLE devices with this adapter.", "id": "f9586:c0:m2"} {"signature": "@abc.abstractpropertydef is_scanning(self):", "body": "raise NotImplementedError", "docstring": "Return True if the BLE adapter is scanning for devices, otherwise\n return False.", "id": "f9586:c0:m3"} {"signature": "@abc.abstractmethoddef power_on(self):", "body": "raise NotImplementedError", "docstring": "Power on this BLE adapter.", "id": "f9586:c0:m4"} {"signature": "@abc.abstractmethoddef power_off(self):", "body": "raise NotImplementedError", "docstring": "Power off this BLE adapter.", "id": "f9586:c0:m5"} {"signature": "@abc.abstractpropertydef is_powered(self):", "body": "raise NotImplementedError", "docstring": "Return True if the BLE adapter is powered up, otherwise return False.", "id": "f9586:c0:m6"} {"signature": "@abc.abstractmethoddef initialize(self):", "body": "raise NotImplementedError", "docstring": "Initialize the BLE provider. Must be called once before any other\n calls are made to the provider.", "id": "f9587:c0:m0"} {"signature": "@abc.abstractmethoddef run_mainloop_with(self, target):", "body": "raise NotImplementedError", "docstring": "Start the OS's main loop to process asyncronous BLE events and then\n run the specified target function in a background thread. Target\n function should be a function that takes no parameters and optionally\n return an integer response code. When the target function stops\n executing or returns with value then the main loop will be stopped and\n the program will exit with the returned code.\n\n Note that an OS main loop is required to process asyncronous BLE events\n and this function is provided as a convenience for writing simple tools\n and scripts that don't need to be full-blown GUI applications. If you\n are writing a GUI application that has a main loop (a GTK glib main loop\n on Linux, or a Cocoa main loop on OSX) then you don't need to call this\n function.", "id": "f9587:c0:m1"} {"signature": "@abc.abstractmethoddef list_adapters(self):", "body": "raise NotImplementedError", "docstring": "Return a list of BLE adapter objects connected to the system.", "id": "f9587:c0:m2"} {"signature": "@abc.abstractmethoddef list_devices(self):", "body": "raise NotImplementedError", "docstring": "Return a list of BLE devices known to the system.", "id": "f9587:c0:m3"} {"signature": "@abc.abstractmethoddef clear_cached_data(self):", "body": "raise NotImplementedError", "docstring": "Clear any internally cached BLE device data. Necessary in some cases\n to prevent issues with stale device data getting cached by the OS.", "id": "f9587:c0:m4"} {"signature": "@abc.abstractmethoddef disconnect_devices(self, service_uuids):", "body": "raise NotImplementedError", "docstring": "Disconnect any connected devices that have any of the specified\n service UUIDs.", "id": "f9587:c0:m5"} {"signature": "def get_default_adapter(self):", "body": "adapters = self.list_adapters()if len(adapters) > :return adapters[]else:return None", "docstring": "Return the first BLE adapter found, or None if no adapters are\n available.", "id": "f9587:c0:m6"} {"signature": "def find_devices(self, service_uuids=[], name=None):", "body": "expected = set(service_uuids)devices = self.list_devices()found = []for device in devices:if name is not None:if device.name == name:found.append(device)else:actual = set(device.advertised)if actual >= expected:found.append(device)return found", "docstring": "Return devices that advertise the specified service UUIDs and/or have\n the specified name. Service_uuids should be a list of Python uuid.UUID\n objects and is optional. Name is a string device name to look for and is\n also optional. Will not block, instead it returns immediately with a\n list of found devices (which might be empty).", "id": "f9587:c0:m7"} {"signature": "def find_device(self, service_uuids=[], name=None, timeout_sec=TIMEOUT_SEC):", "body": "start = time.time()while True:found = self.find_devices(service_uuids, name)if len(found) > :return found[]if time.time()-start >= timeout_sec:return Nonetime.sleep()", "docstring": "Return the first device that advertises the specified service UUIDs or\n has the specified name. Will wait up to timeout_sec seconds for the device\n to be found, and if the timeout is zero then it will not wait at all and\n immediately return a result. When no device is found a value of None is\n returned.", "id": "f9587:c0:m8"} {"signature": "@abc.abstractpropertydef uuid(self):", "body": "raise NotImplementedError", "docstring": "Return the UUID of this GATT service.", "id": "f9589:c0:m0"} {"signature": "@abc.abstractmethoddef list_characteristics(self):", "body": "raise NotImplementedError", "docstring": "Return list of GATT characteristics that have been discovered for this\n service.", "id": "f9589:c0:m1"} {"signature": "def find_characteristic(self, uuid):", "body": "for char in self.list_characteristics():if char.uuid == uuid:return charreturn None", "docstring": "Return the first child characteristic found that has the specified\n UUID. Will return None if no characteristic that matches is found.", "id": "f9589:c0:m2"} {"signature": "@abc.abstractpropertydef uuid(self):", "body": "raise NotImplementedError", "docstring": "Return the UUID of this GATT characteristic.", "id": "f9589:c1:m0"} {"signature": "@abc.abstractmethoddef read_value(self):", "body": "raise NotImplementedError", "docstring": "Read the value of this characteristic.", "id": "f9589:c1:m1"} {"signature": "@abc.abstractmethoddef write_value(self, value):", "body": "raise NotImplementedError", "docstring": "Write the specified value to this characteristic.", "id": "f9589:c1:m2"} {"signature": "@abc.abstractmethoddef start_notify(self, on_change):", "body": "raise NotImplementedError", "docstring": "Enable notification of changes for this characteristic on the\n specified on_change callback. on_change should be a function that takes\n one parameter which is the value (as a string of bytes) of the changed\n characteristic value.", "id": "f9589:c1:m3"} {"signature": "@abc.abstractmethoddef stop_notify(self):", "body": "raise NotImplementedError", "docstring": "Disable notification of changes for this characteristic.", "id": "f9589:c1:m4"} {"signature": "@abc.abstractmethoddef list_descriptors(self):", "body": "raise NotImplementedError", "docstring": "Return list of GATT descriptors that have been discovered for this\n characteristic.", "id": "f9589:c1:m5"} {"signature": "def find_descriptor(self, uuid):", "body": "for desc in self.list_descriptors():if desc.uuid == uuid:return descreturn None", "docstring": "Return the first child descriptor found that has the specified\n UUID. Will return None if no descriptor that matches is found.", "id": "f9589:c1:m6"} {"signature": "@abc.abstractpropertydef uuid(self):", "body": "raise NotImplementedError", "docstring": "Return the UUID of this GATT descriptor.", "id": "f9589:c2:m0"} {"signature": "@abc.abstractmethoddef read_value(self):", "body": "raise NotImplementedError", "docstring": "Read the value of this descriptor.", "id": "f9589:c2:m1"} {"signature": "@abc.abstractmethoddef connect(self, timeout_sec):", "body": "raise NotImplementedError", "docstring": "Connect to the BLE device.", "id": "f9590:c0:m0"} {"signature": "@abc.abstractmethoddef disconnect(self, timeout_sec):", "body": "raise NotImplementedError", "docstring": "Disconnect from the BLE device.", "id": "f9590:c0:m1"} {"signature": "@abc.abstractmethoddef list_services(self):", "body": "raise NotImplementedError", "docstring": "Return a list of GattService objects that have been discovered for\n this device.", "id": "f9590:c0:m2"} {"signature": "@abc.abstractpropertydef discover(self, service_uuids, char_uuids, timeout_sec=):", "body": "raise NotImplementedError", "docstring": "Wait up to timeout_sec for the specified services and characteristics\n to be discovered on the device. If the timeout is exceeded without\n discovering the services and characteristics then an exception is thrown.", "id": "f9590:c0:m3"} {"signature": "@abc.abstractpropertydef advertised(self):", "body": "raise NotImplementedError", "docstring": "Return a list of UUIDs for services that are advertised by this\n device.", "id": "f9590:c0:m4"} {"signature": "@abc.abstractpropertydef id(self):", "body": "raise NotImplementedError", "docstring": "Return a unique identifier for this device. On supported platforms\n this will be the MAC address of the device, however on unsupported\n platforms (Mac OSX) it will be a unique ID like a UUID.", "id": "f9590:c0:m5"} {"signature": "@abc.abstractpropertydef name(self):", "body": "raise NotImplementedError", "docstring": "Return the name of this device.", "id": "f9590:c0:m6"} {"signature": "@abc.abstractpropertydef is_connected(self):", "body": "raise NotImplementedError", "docstring": "Return True if the device is connected to the system, otherwise False.", "id": "f9590:c0:m7"} {"signature": "@abc.abstractpropertydef rssi(self):", "body": "raise NotImplementedError", "docstring": "Return the RSSI signal strength in decibels.", "id": "f9590:c0:m8"} {"signature": "def find_service(self, uuid):", "body": "for service in self.list_services():if service.uuid == uuid:return servicereturn None", "docstring": "Return the first child service found that has the specified\n UUID. Will return None if no service that matches is found.", "id": "f9590:c0:m9"} {"signature": "def __eq__(self, other):", "body": "return self.id == other.id", "docstring": "Test if this device is the same as the provided device.", "id": "f9590:c0:m10"} {"signature": "def __ne__(self, other):", "body": "return self.id != other.id", "docstring": "Test if this device is not the same as the provided device.", "id": "f9590:c0:m11"} {"signature": "def __hash__(self):", "body": "return hash(self.id)", "docstring": "Hash function implementation that allows device instances to be put\n inside dictionaries and other containers.", "id": "f9590:c0:m12"} {"signature": "def get_provider():", "body": "global _providerif _provider is None:if sys.platform.startswith(''):from .bluez_dbus.provider import BluezProvider_provider = BluezProvider()elif sys.platform == '':from .corebluetooth.provider import CoreBluetoothProvider_provider = CoreBluetoothProvider()else:raise RuntimeError(''.format(sys.platform))return _provider", "docstring": "Return an instance of the BLE provider for the current platform.", "id": "f9592:m0"} {"signature": "def __init__(self, device):", "body": "self._uart = device.find_service(UART_SERVICE_UUID)if self._uart is None:raise RuntimeError('')self._tx = self._uart.find_characteristic(TX_CHAR_UUID)self._rx = self._uart.find_characteristic(RX_CHAR_UUID)if self._tx is None or self._rx is None:raise RuntimeError('')self._queue = queue.Queue()self._rx.start_notify(self._rx_received)", "docstring": "Initialize UART from provided bluez device.", "id": "f9593:c0:m0"} {"signature": "def write(self, data):", "body": "self._tx.write_value(data)", "docstring": "Write a string of data to the UART device.", "id": "f9593:c0:m2"} {"signature": "def read(self, timeout_sec=None):", "body": "try:return self._queue.get(timeout=timeout_sec)except queue.Empty:return None", "docstring": "Block until data is available to read from the UART. Will return a\n string of data that has been received. Timeout_sec specifies how many\n seconds to wait for data to be available and will block forever if None\n (the default). If the timeout is exceeded and no data is found then\n None is returned.", "id": "f9593:c0:m3"} {"signature": "def __init__(self, device):", "body": "self._colorific = device.find_service(COLOR_SERVICE_UUID)self._color = self._colorific.find_characteristic(COLOR_CHAR_UUID)", "docstring": "Initialize device information from provided bluez device.", "id": "f9594:c0:m0"} {"signature": "def set_color(self, r, g, b):", "body": "command = ''.format(chr(r & ),chr(g & ),chr(b & ))self._color.write_value(command)", "docstring": "Set the red, green, blue color of the bulb.", "id": "f9594:c0:m1"} {"signature": "def __init__(self, device):", "body": "self._dis = device.find_service(DIS_SERVICE_UUID)self._manufacturer = self._dis.find_characteristic(MANUFACTURER_CHAR_UUID)self._model = self._dis.find_characteristic(MODEL_CHAR_UUID)self._serial = self._dis.find_characteristic(SERIAL_CHAR_UUID)self._hw_revision = self._dis.find_characteristic(HW_REVISION_CHAR_UUID)self._sw_revision = self._dis.find_characteristic(SW_REVISION_CHAR_UUID)self._fw_revision = self._dis.find_characteristic(FW_REVISION_CHAR_UUID)self._sys_id = self._dis.find_characteristic(SYS_ID_CHAR_UUID)self._reg_cert = self._dis.find_characteristic(REG_CERT_CHAR_UUID)self._pnp_id = self._dis.find_characteristic(PNP_ID_CHAR_UUID)", "docstring": "Initialize device information from provided bluez device.", "id": "f9595:c0:m0"} {"signature": "@classmethoddef find_device(cls, timeout_sec=TIMEOUT_SEC):", "body": "return get_provider().find_device(service_uuids=cls.ADVERTISED, timeout_sec=timeout_sec)", "docstring": "Find the first available device that supports this service and return\n it, or None if no device is found. Will wait for up to timeout_sec\n seconds to find the device.", "id": "f9597:c0:m0"} {"signature": "@classmethoddef find_devices(cls):", "body": "return get_provider().find_devices(cls.ADVERTISED)", "docstring": "Find all the available devices that support this service and\n returns a list of them. Does not poll and will return immediately.", "id": "f9597:c0:m1"} {"signature": "@classmethoddef disconnect_devices(cls):", "body": "return get_provider().disconnect_devices(service_uuids=cls.ADVERTISED)", "docstring": "Disconnect any currently connected devices that implement this\n service.", "id": "f9597:c0:m2"} {"signature": "@classmethoddef discover(cls, device, timeout_sec=TIMEOUT_SEC):", "body": "device.discover(cls.SERVICES, cls.CHARACTERISTICS, timeout_sec)", "docstring": "Wait until the specified device has discovered the expected services\n and characteristics for this service. Should be called once before other\n calls are made on the service. Returns true if the service has been\n discovered in the specified timeout, or false if not discovered.", "id": "f9597:c0:m3"} {"signature": "def __init__(self, dbus_obj):", "body": "self._adapter = dbus.Interface(dbus_obj, _INTERFACE)self._props = dbus.Interface(dbus_obj, '')self._scan_started = threading.Event()self._scan_stopped = threading.Event()self._props.connect_to_signal('', self._prop_changed)", "docstring": "Create an instance of the bluetooth adapter from the provided bluez\n DBus object.", "id": "f9598:c0:m0"} {"signature": "@propertydef name(self):", "body": "return self._props.Get(_INTERFACE, '')", "docstring": "Return the name of this BLE network adapter.", "id": "f9598:c0:m2"} {"signature": "def start_scan(self, timeout_sec=TIMEOUT_SEC):", "body": "self._scan_started.clear()self._adapter.StartDiscovery()if not self._scan_started.wait(timeout_sec):raise RuntimeError('')", "docstring": "Start scanning for BLE devices with this adapter.", "id": "f9598:c0:m3"} {"signature": "def stop_scan(self, timeout_sec=TIMEOUT_SEC):", "body": "self._scan_stopped.clear()self._adapter.StopDiscovery()if not self._scan_stopped.wait(timeout_sec):raise RuntimeError('')", "docstring": "Stop scanning for BLE devices with this adapter.", "id": "f9598:c0:m4"} {"signature": "@propertydef is_scanning(self):", "body": "return self._props.Get(_INTERFACE, '')", "docstring": "Return True if the BLE adapter is scanning for devices, otherwise\n return False.", "id": "f9598:c0:m5"} {"signature": "def power_on(self):", "body": "return self._props.Set(_INTERFACE, '', True)", "docstring": "Power on this BLE adapter.", "id": "f9598:c0:m6"} {"signature": "def power_off(self):", "body": "return self._props.Set(_INTERFACE, '', False)", "docstring": "Power off this BLE adapter.", "id": "f9598:c0:m7"} {"signature": "@propertydef is_powered(self):", "body": "return self._props.Get(_INTERFACE, '')", "docstring": "Return True if the BLE adapter is powered up, otherwise return False.", "id": "f9598:c0:m8"} {"signature": "def initialize(self):", "body": "GObject.threads_init()dbus.mainloop.glib.threads_init()self._mainloop = dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)self._bus = dbus.SystemBus()self._bluez = dbus.Interface(self._bus.get_object('', ''),'')", "docstring": "Initialize bluez DBus communication. Must be called before any other\n calls are made!", "id": "f9599:c0:m1"} {"signature": "def run_mainloop_with(self, target):", "body": "self._user_thread = threading.Thread(target=self._user_thread_main, args=(target,))self._user_thread.daemon = True self._user_thread.start()self._gobject_mainloop = GObject.MainLoop()try:self._gobject_mainloop.run() except KeyboardInterrupt:self._gobject_mainloop.quit()sys.exit()if self._exception is not None:raise_(self._exception[], None, self._exception[])else:sys.exit(self._return_code)", "docstring": "Start the OS's main loop to process asyncronous BLE events and then\n run the specified target function in a background thread. Target\n function should be a function that takes no parameters and optionally\n return an integer response code. When the target function stops\n executing or returns with value then the main loop will be stopped and\n the program will exit with the returned code.\n\n Note that an OS main loop is required to process asyncronous BLE events\n and this function is provided as a convenience for writing simple tools\n and scripts that don't need to be full-blown GUI applications. If you\n are writing a GUI application that has a main loop (a GTK glib main loop\n on Linux, or a Cocoa main loop on OSX) then you don't need to call this\n function.", "id": "f9599:c0:m2"} {"signature": "def _user_thread_main(self, target):", "body": "try:while True:if self._gobject_mainloop is not None and self._gobject_mainloop.is_running():breaktime.sleep()self._return_code = target()if self._return_code is None:self._return_code = self._gobject_mainloop.quit()except Exception as ex:self._exception = sys.exc_info()self._gobject_mainloop.quit()", "docstring": "Main entry point for the thread that will run user's code.", "id": "f9599:c0:m3"} {"signature": "def clear_cached_data(self):", "body": "for device in self.list_devices():if device.is_connected:continueadapter = dbus.Interface(self._bus.get_object('', device._adapter),_ADAPTER_INTERFACE)adapter.RemoveDevice(device._device.object_path)", "docstring": "Clear any internally cached BLE device data. Necessary in some cases\n to prevent issues with stale device data getting cached by the OS.", "id": "f9599:c0:m4"} {"signature": "def disconnect_devices(self, service_uuids=[]):", "body": "service_uuids = set(service_uuids)for device in self.list_devices():if not device.is_connected:continuedevice_uuids = set(map(lambda x: x.uuid, device.list_services()))if device_uuids >= service_uuids:device.disconnect()", "docstring": "Disconnect any connected devices that have the specified list of\n service UUIDs. The default is an empty list which means all devices\n are disconnected.", "id": "f9599:c0:m5"} {"signature": "def list_adapters(self):", "body": "return map(BluezAdapter, self._get_objects(''))", "docstring": "Return a list of BLE adapter objects connected to the system.", "id": "f9599:c0:m6"} {"signature": "def list_devices(self):", "body": "return map(BluezDevice, self._get_objects(''))", "docstring": "Return a list of BLE devices known to the system.", "id": "f9599:c0:m7"} {"signature": "def _get_objects(self, interface, parent_path=''):", "body": "parent_path = parent_path.lower()objects = []for opath, interfaces in iteritems(self._bluez.GetManagedObjects()):if interface in interfaces.keys() and opath.lower().startswith(parent_path):objects.append(self._bus.get_object('', opath))return objects", "docstring": "Return a list of all bluez DBus objects that implement the requested\n interface name and are under the specified path. The default is to\n search devices under the root of all bluez objects.", "id": "f9599:c0:m8"} {"signature": "def _get_objects_by_path(self, paths):", "body": "return map(lambda x: self._bus.get_object('', x), paths)", "docstring": "Return a list of all bluez DBus objects from the provided list of paths.", "id": "f9599:c0:m9"} {"signature": "def _print_tree(self):", "body": "objects = self._bluez.GetManagedObjects()for path in objects.keys():print(\"\" % (path))interfaces = objects[path]for interface in interfaces.keys():if interface in [\"\",\"\"]:continueprint(\"\" % (interface))properties = interfaces[interface]for key in properties.keys():print(\"\" % (key, properties[key]))", "docstring": "Print tree of all bluez objects, useful for debugging.", "id": "f9599:c0:m10"} {"signature": "def __init__(self, dbus_obj):", "body": "self._props = dbus.Interface(dbus_obj, '')", "docstring": "Create an instance of the GATT service from the provided bluez\n DBus object.", "id": "f9600:c0:m0"} {"signature": "@propertydef uuid(self):", "body": "return uuid.UUID(str(self._props.Get(_SERVICE_INTERFACE, '')))", "docstring": "Return the UUID of this GATT service.", "id": "f9600:c0:m1"} {"signature": "def list_characteristics(self):", "body": "paths = self._props.Get(_SERVICE_INTERFACE, '')return map(BluezGattCharacteristic,get_provider()._get_objects_by_path(paths))", "docstring": "Return list of GATT characteristics that have been discovered for this\n service.", "id": "f9600:c0:m2"} {"signature": "def __init__(self, dbus_obj):", "body": "self._characteristic = dbus.Interface(dbus_obj, _CHARACTERISTIC_INTERFACE)self._props = dbus.Interface(dbus_obj, '')", "docstring": "Create an instance of the GATT characteristic from the provided bluez\n DBus object.", "id": "f9600:c1:m0"} {"signature": "@propertydef uuid(self):", "body": "return uuid.UUID(str(self._props.Get(_CHARACTERISTIC_INTERFACE, '')))", "docstring": "Return the UUID of this GATT characteristic.", "id": "f9600:c1:m1"} {"signature": "def read_value(self):", "body": "return self._characteristic.ReadValue()", "docstring": "Read the value of this characteristic.", "id": "f9600:c1:m2"} {"signature": "def write_value(self, value):", "body": "self._characteristic.WriteValue(value)", "docstring": "Write the specified value to this characteristic.", "id": "f9600:c1:m3"} {"signature": "def start_notify(self, on_change):", "body": "def characteristic_changed(iface, changed_props, invalidated_props):if iface != _CHARACTERISTIC_INTERFACE:returnif '' not in changed_props:returnon_change(''.join(map(chr, changed_props[''])))self._props.connect_to_signal('', characteristic_changed)self._characteristic.StartNotify()", "docstring": "Enable notification of changes for this characteristic on the\n specified on_change callback. on_change should be a function that takes\n one parameter which is the value (as a string of bytes) of the changed\n characteristic value.", "id": "f9600:c1:m4"} {"signature": "def stop_notify(self):", "body": "self._characteristic.StopNotify()", "docstring": "Disable notification of changes for this characteristic.", "id": "f9600:c1:m5"} {"signature": "def list_descriptors(self):", "body": "paths = self._props.Get(_CHARACTERISTIC_INTERFACE, '')return map(BluezGattDescriptor,get_provider()._get_objects_by_path(paths))", "docstring": "Return list of GATT descriptors that have been discovered for this\n characteristic.", "id": "f9600:c1:m6"} {"signature": "def __init__(self, dbus_obj):", "body": "self._descriptor = dbus.Interface(dbus_obj, _DESCRIPTOR_INTERFACE)self._props = dbus.Interface(dbus_obj, '')", "docstring": "Create an instance of the GATT descriptor from the provided bluez\n DBus object.", "id": "f9600:c2:m0"} {"signature": "@propertydef uuid(self):", "body": "return uuid.UUID(str(self._props.Get(_DESCRIPTOR_INTERFACE, '')))", "docstring": "Return the UUID of this GATT descriptor.", "id": "f9600:c2:m1"} {"signature": "def read_value(self):", "body": "return self._descriptor.ReadValue()", "docstring": "Read the value of this descriptor.", "id": "f9600:c2:m2"} {"signature": "def __init__(self, dbus_obj):", "body": "self._device = dbus.Interface(dbus_obj, _INTERFACE)self._props = dbus.Interface(dbus_obj, '')self._connected = threading.Event()self._disconnected = threading.Event()self._props.connect_to_signal('', self._prop_changed)", "docstring": "Create an instance of the bluetooth device from the provided bluez\n DBus object.", "id": "f9601:c0:m0"} {"signature": "def connect(self, timeout_sec=TIMEOUT_SEC):", "body": "self._connected.clear()self._device.Connect()if not self._connected.wait(timeout_sec):raise RuntimeError('')", "docstring": "Connect to the device. If not connected within the specified timeout\n then an exception is thrown.", "id": "f9601:c0:m2"} {"signature": "def disconnect(self, timeout_sec=TIMEOUT_SEC):", "body": "self._disconnected.clear()self._device.Disconnect()if not self._disconnected.wait(timeout_sec):raise RuntimeError('')", "docstring": "Disconnect from the device. If not disconnected within the specified\n timeout then an exception is thrown.", "id": "f9601:c0:m3"} {"signature": "def list_services(self):", "body": "return map(BluezGattService,get_provider()._get_objects(_SERVICE_INTERFACE,self._device.object_path))", "docstring": "Return a list of GattService objects that have been discovered for\n this device.", "id": "f9601:c0:m4"} {"signature": "def discover(self, service_uuids, char_uuids, timeout_sec=TIMEOUT_SEC):", "body": "expected_services = set(service_uuids)expected_chars = set(char_uuids)start = time.time()while True:actual_services = set(self.advertised)chars = map(BluezGattCharacteristic,get_provider()._get_objects(_CHARACTERISTIC_INTERFACE,self._device.object_path))actual_chars = set(map(lambda x: x.uuid, chars))if actual_services >= expected_services and actual_chars >= expected_chars:return Trueif time.time()-start >= timeout_sec:return Falsetime.sleep()", "docstring": "Wait up to timeout_sec for the specified services and characteristics\n to be discovered on the device. If the timeout is exceeded without\n discovering the services and characteristics then an exception is thrown.", "id": "f9601:c0:m5"} {"signature": "@propertydef advertised(self):", "body": "uuids = []try:uuids = self._props.Get(_INTERFACE, '')except dbus.exceptions.DBusException as ex:if ex.get_dbus_name() != '':raise exreturn [uuid.UUID(str(x)) for x in uuids]", "docstring": "Return a list of UUIDs for services that are advertised by this\n device.", "id": "f9601:c0:m6"} {"signature": "@propertydef id(self):", "body": "return self._props.Get(_INTERFACE, '')", "docstring": "Return a unique identifier for this device. On supported platforms\n this will be the MAC address of the device, however on unsupported\n platforms (Mac OSX) it will be a unique ID like a UUID.", "id": "f9601:c0:m7"} {"signature": "@propertydef name(self):", "body": "return self._props.Get(_INTERFACE, '')", "docstring": "Return the name of this device.", "id": "f9601:c0:m8"} {"signature": "@propertydef is_connected(self):", "body": "return self._props.Get(_INTERFACE, '')", "docstring": "Return True if the device is connected to the system, otherwise False.", "id": "f9601:c0:m9"} {"signature": "@propertydef rssi(self):", "body": "return self._props.Get(_INTERFACE, '')", "docstring": "Return the RSSI signal strength in decibels.", "id": "f9601:c0:m10"} {"signature": "@propertydef _adapter(self):", "body": "return self._props.Get(_INTERFACE, '')", "docstring": "Return the DBus path to the adapter that owns this device.", "id": "f9601:c0:m11"} {"signature": "def init_app(self, app):", "body": "self._jobs = []if not hasattr(app, ''):app.extensions = {}app.extensions[''] = selfapp.restpoints_instance = selfapp.add_url_rule('', '', ping)app.add_url_rule('', '', time)app.add_url_rule('', '', status(self._jobs))", "docstring": "Initialize a :class:`~flask.Flask` application for use with\n this extension.", "id": "f9608:c0:m1"} {"signature": "def add_status_job(self, job_func, name=None, timeout=):", "body": "job_name = job_func.__name__ if name is None else namejob = (job_name, timeout, job_func)self._jobs.append(job)", "docstring": "Adds a job to be included during calls to the `/status` endpoint.\n\n :param job_func: the status function.\n :param name: the name used in the JSON response for the given status\n function. The name of the function is the default.\n :param timeout: the time limit before the job status is set to\n \"timeout exceeded\".", "id": "f9608:c0:m2"} {"signature": "def status_job(self, fn=None, name=None, timeout=):", "body": "if fn is None:def decorator(fn):self.add_status_job(fn, name, timeout)return decoratorelse:self.add_status_job(fn, name, timeout)", "docstring": "Decorator that invokes `add_status_job`.\n\n ::\n\n @app.status_job\n def postgresql():\n # query/ping postgres\n\n @app.status_job(name=\"Active Directory\")\n def active_directory():\n # query active directory\n\n @app.status_job(timeout=5)\n def paypal():\n # query paypal, timeout after 5 seconds", "id": "f9608:c0:m3"} {"signature": "def status(jobs):", "body": "def status_handler():endpoints = []stats = {\"\": None}executor = concurrent.futures.ThreadPoolExecutor(max_workers=)for job, future in [(job, executor.submit(timeit, job[], number=)) for job in jobs]:name, timeout, _ = jobendpoint = {\"\": name}try:data = future.result(timeout=timeout)endpoint[\"\"] = dataexcept concurrent.futures.TimeoutError:endpoint[\"\"] = \"\"except Exception as ex:endpoint[\"\"] = str(ex)endpoints.append(endpoint)if len(endpoints) > :stats[\"\"] = endpointsexecutor.shutdown(wait=False)return jsonify(**stats)return status_handler", "docstring": "Handler that calls each status job in a worker pool, attempting to timeout.\n The resulting durations/errors are written to the response\n as JSON.\n\n eg.\n\n `{\n \"endpoints\": [\n { \"endpoint\": \"Jenny's Database\", \"duration\": 1.002556324005127 },\n { \"endpoint\": \"Hotmail\", \"duration\": -1, \"error\": \"Host is down\" },\n ]\n }`", "id": "f9610:m0"} {"signature": "def ping():", "body": "return \"\"", "docstring": "Handler that simply returns `pong` from a GET.", "id": "f9610:m1"} {"signature": "def main(argd):", "body": "green_exe = get_green_exe()if argd[''] or argd['']:return list_tests(full=argd[''])green_args = parse_test_names(argd['']) or ['']if argd['']:return print_test_names(green_args)cmd = [green_exe, '']if not argd['']:cmd.append('')cmd.extend(green_args)print_header(cmd)return subprocess.run(cmd).returncode", "docstring": "Main entry point, expects doctopt arg dict as argd.", "id": "f9612:m0"} {"signature": "def get_green_exe():", "body": "paths = set(s for s in os.environ.get('', '').split('')if s and os.path.isdir(s))pyver = ''.format(v=sys.version_info)greenmajorexe = ''.format(sys.version_info.major)greenexe = ''.format(pyver)for trypath in paths:greenpath = os.path.join(trypath, greenexe)greenmajorpath = os.path.join(trypath, greenmajorexe)if os.path.exists(greenpath):return greenpathelif os.path.exists(greenmajorpath):return greenmajorpathraise MissingDependency('')", "docstring": "Get the green executable for this Python version.", "id": "f9612:m1"} {"signature": "def print_err(*args, **kwargs):", "body": "if kwargs.get('', None) is None:kwargs[''] = sys.stderrprint(*args, **kwargs)", "docstring": "A wrapper for print() that uses stderr by default.", "id": "f9612:m10"} {"signature": "def print_header(cmd):", "body": "textcolors = {'': ''}libcolors = {'': '', '': ''}vercolors = {'': ''}execolors = {'': '', '': ''}argcolors = {'': ''}def fmt_app_info(name, ver):\"\"\"\"\"\"return C('', **textcolors).join(C(name, **libcolors),C(ver, **vercolors))def fmt_cmd_args(cmdargs):\"\"\"\"\"\"return C('').join(C(cmdargs[], **execolors),C('').join(C(s, **argcolors) for s in cmdargs[:]),).join('', '', style='')print(''.format(C('').join(C('', **textcolors),fmt_app_info(APPNAME, APPVERSION),C('', **textcolors),fmt_app_info('', green_version),fmt_cmd_args(cmd),)))print(C('').join(C('', ''),C(os.getcwd(), '', style=''),),)", "docstring": "Print some info about the Colr and Green versions being used.", "id": "f9612:m11"} {"signature": "def main():", "body": "global DEBUGargd = docopt(USAGESTR, version=VERSIONSTR, script=SCRIPT)DEBUG = argd['']width = parse_int(argd[''] or DEFAULT_WIDTH) or indent = parse_int(argd[''] or (argd[''] or ))prepend = '' * (indent * )if prepend and argd['']:width -= len(prepend)userprepend = argd[''] or (argd[''] or '')prepend = ''.join((prepend, userprepend))if argd['']:width -= len(userprepend)userappend = argd[''] or (argd[''] or '')if argd['']:width -= len(userappend)if argd['']:argd[''] = ((try_read_file(w) if len(w) < else w)for w in argd[''])words = ''.join((w for w in argd[''] if w))else:words = read_stdin()block = FormatBlock(words).iter_format_block(chars=argd[''],fill=argd[''],prepend=prepend,strip_first=argd[''],append=userappend,strip_last=argd[''],width=width,newlines=argd[''],lstrip=argd[''],)for i, line in enumerate(block):if argd['']:print(''.format(i + , line))else:print(line)return ", "docstring": "Main entry point, expects doctopt arg dict as argd.", "id": "f9614:m0"} {"signature": "def debug(*args, **kwargs):", "body": "if not (DEBUG and args):return Noneparent = kwargs.get('', None)with suppress(KeyError):kwargs.pop('')backlevel = kwargs.get('', )with suppress(KeyError):kwargs.pop('')frame = inspect.currentframe()while backlevel > :frame = frame.f_backbacklevel -= fname = os.path.split(frame.f_code.co_filename)[-]lineno = frame.f_linenoif parent:func = ''.format(parent.__class__.__name__, frame.f_code.co_name)else:func = frame.f_code.co_namelineinfo = ''.format(C(fname, ''),C(str(lineno).ljust(), ''),C().join(C(func, ''), '').ljust())pargs = list(C(a, '').str() for a in args)pargs[] = ''.join((lineinfo, pargs[]))print_err(*pargs, **kwargs)", "docstring": "Print a message only if DEBUG is truthy.", "id": "f9614:m1"} {"signature": "def parse_int(s):", "body": "try:val = int(s)except ValueError:print_err(''.format(s))sys.exit()return val", "docstring": "Parse a string as an integer.\n Exit with a message on failure.", "id": "f9614:m2"} {"signature": "def print_err(*args, **kwargs):", "body": "if kwargs.get('', None) is None:kwargs[''] = sys.stderrprint(*args, **kwargs)", "docstring": "Print to stderr by default.", "id": "f9614:m3"} {"signature": "def read_stdin():", "body": "if sys.stdin.isatty() and sys.stdout.isatty():print('')return sys.stdin.read()", "docstring": "Read from stdin, but print a helpful message if it's a tty.", "id": "f9614:m4"} {"signature": "def try_read_file(s):", "body": "try:with open(s, '') as f:data = f.read()except FileNotFoundError:return sexcept EnvironmentError as ex:print_err(''.format(s, ex))return Nonereturn data", "docstring": "If `s` is a file name, read the file and return it's content.\n Otherwise, return the original string.\n Returns None if the file was opened, but errored during reading.", "id": "f9614:m5"} {"signature": "def expand_words(self, line, width=):", "body": "if not line.strip():return linewordi = while len(strip_codes(line)) < width:wordendi = self.find_word_end(line, wordi)if wordendi < :wordi = wordendi = self.find_word_end(line, wordi)if wordendi < :line = ''.join(('', line))else:line = ''.join((line[:wordendi], line[wordendi:]))wordi += if '' not in strip_codes(line).strip():return line.replace('', '')return line", "docstring": "Insert spaces between words until it is wide enough for `width`.", "id": "f9616:c0:m1"} {"signature": "@staticmethoddef find_word_end(text, count=):", "body": "if not text:return -elif '' not in text:return elif not text.strip():return -count = count or found = foundindex = -inword = Falseindices = get_indices(str(text))sortedindices = sorted(indices)for i in sortedindices:c = indices[i]if inword and c.isspace():inword = Falsefoundindex = ifound += testindex = iwhile testindex > :testindex -= s = indices.get(testindex, None)if s is None:continueif len(s) == :foundindex = testindex + breakif found == count:return foundindexelif not c.isspace():inword = Truelastindex = sortedindices[-]if len(indices[lastindex]) > :while lastindex > :lastindex -= s = indices.get(lastindex, None)if s is None:continueif len(s) == :return lastindex + return - if inword else foundindex", "docstring": "This is a helper method for self.expand_words().\n Finds the index of word endings (default is first word).\n The last word doesn't count.\n If there are no words, or there are no spaces in the word, it\n returns -1.\n\n This method ignores escape codes.\n Example:\n s = 'this is a test'\n i = find_word_end(s, count=1)\n print('-'.join((s[:i], s[i:])))\n # 'this- is a test'\n i = find_word_end(s, count=2)\n print('-'.join((s[:i], s[i:])))\n # 'this is- a test'", "id": "f9616:c0:m2"} {"signature": "def format(self, text=None,width=, chars=False, fill=False, newlines=False,prepend=None, append=None, strip_first=False, strip_last=False,lstrip=False):", "body": "return ''.join(self.iter_format_block((self.text if text is None else text) or '',prepend=prepend,append=append,strip_first=strip_first,strip_last=strip_last,width=width,chars=chars,fill=fill,newlines=newlines,lstrip=lstrip))", "docstring": "Format a long string into a block of newline seperated text.\n Arguments:\n See iter_format_block().", "id": "f9616:c0:m3"} {"signature": "def iter_add_text(self, lines, prepend=None, append=None):", "body": "if (prepend is None) and (append is None):yield from lineselse:fmtpcs = [''] if prepend else []fmtpcs.append('')if append:fmtpcs.append('')fmtstr = ''.join(fmtpcs)yield from (fmtstr.format(prepend=prepend, line=line, append=append)for line in lines)", "docstring": "Prepend or append text to lines. Yields each line.", "id": "f9616:c0:m4"} {"signature": "def iter_block(self, text=None,width=, chars=False, newlines=False, lstrip=False):", "body": "text = (self.text if text is None else text) or ''if width < :width = fmtline = str.lstrip if lstrip else strif chars and (not newlines):yield from self.iter_char_block(text,width=width,fmtfunc=fmtline)elif newlines:for line in text.split(''):yield from self.iter_block(line,width=width,chars=chars,lstrip=lstrip,newlines=False,)else:yield from self.iter_space_block(text,width=width,fmtfunc=fmtline,)", "docstring": "Iterator that turns a long string into lines no greater than\n 'width' in length.\n It can wrap on spaces or characters. It only does basic blocks.\n For prepending see `iter_format_block()`.\n\n Arguments:\n text : String to format.\n width : Maximum width for each line.\n Default: 60\n chars : Wrap on characters if true, otherwise on spaces.\n Default: False\n newlines : Preserve newlines when True.\n Default: False\n lstrip : Whether to remove leading spaces from each line.\n Default: False", "id": "f9616:c0:m5"} {"signature": "def iter_char_block(self, text=None, width=, fmtfunc=str):", "body": "if width < :width = text = (self.text if text is None else text) or ''text = ''.join(text.split(''))escapecodes = get_codes(text)if not escapecodes:yield from (fmtfunc(text[i:i + width])for i in range(, len(text), width))else:blockwidth = block = []for i, s in enumerate(get_indices_list(text)):block.append(s)if len(s) == :blockwidth += if blockwidth == width:yield ''.join(block)block = []blockwidth = if block:yield ''.join(block)", "docstring": "Format block by splitting on individual characters.", "id": "f9616:c0:m6"} {"signature": "def iter_format_block(self, text=None,width=, chars=False, fill=False, newlines=False,append=None, prepend=None, strip_first=False, strip_last=False,lstrip=False):", "body": "if fill:chars = Falseiterlines = self.iter_block((self.text if text is None else text) or '',width=width,chars=chars,newlines=newlines,lstrip=lstrip,)if not (prepend or append):if fill:yield from (self.expand_words(l, width=width) for l in iterlines)else:yield from iterlineselse:if prepend:prependlen = len(prepend)else:strip_first = Falseprependlen = if append:lines = list(iterlines)lasti = len(lines) - iterlines = (l for l in lines)appendlen = len(append)else:strip_last = Falseappendlen = lasti = -for i, l in enumerate(self.iter_add_text(iterlines,prepend=prepend,append=append)):if strip_first and (i == ):l = l[prependlen:]elif strip_last and (i == lasti):l = l[:-appendlen]if fill:yield self.expand_words(l, width=width)else:yield l", "docstring": "Iterate over lines in a formatted block of text.\n This iterator allows you to prepend to each line.\n For basic blocks see iter_block().\n\n\n Arguments:\n text : String to format.\n\n width : Maximum width for each line. The prepend string\n is not included in this calculation.\n Default: 60\n\n chars : Whether to wrap on characters instead of spaces.\n Default: False\n fill : Insert spaces between words so that each line is\n the same width. This overrides `chars`.\n Default: False\n\n newlines : Whether to preserve newlines in the original\n string.\n Default: False\n\n append : String to append after each line.\n\n prepend : String to prepend before each line.\n\n strip_first : Whether to omit the prepend string for the first\n line.\n Default: False\n\n Example (when using prepend='$'):\n Without strip_first -> '$this', '$that'\n With strip_first -> 'this', '$that'\n\n strip_last : Whether to omit the append string for the last\n line (like strip_first does for prepend).\n Default: False\n\n lstrip : Whether to remove leading spaces from each line.\n This doesn't include any spaces in `prepend`.\n Default: False", "id": "f9616:c0:m7"} {"signature": "def iter_space_block(self, text=None, width=, fmtfunc=str):", "body": "if width < :width = curline = ''text = (self.text if text is None else text) or ''for word in text.split():possibleline = ''.join((curline, word)) if curline else wordcodelen = sum(len(s) for s in get_codes(possibleline))reallen = len(possibleline) - codelenif reallen > width:yield fmtfunc(curline)curline = wordelse:curline = possiblelineif curline:yield fmtfunc(curline)", "docstring": "Format block by wrapping on spaces.", "id": "f9616:c0:m8"} {"signature": "@staticmethoddef squeeze_words(line, width=):", "body": "while ('' in line) and (len(line) > width):head, _, tail = line.rpartition('')line = ''.join((head, tail))return line", "docstring": "Remove spaces in between words until it is small enough for\n `width`.\n This will always leave at least one space between words,\n so it may not be able to get below `width` characters.", "id": "f9616:c0:m9"} {"signature": "def get_codes(s: Any) -> List[str]:", "body": "return codegrabpat.findall(str(s))", "docstring": "Grab all escape codes from a string.\n Returns a list of all escape codes.", "id": "f9617:m0"} {"signature": "def get_code_indices(s: Any) -> Dict[int, str]:", "body": "indices = {}i = codes = get_codes(s)for code in codes:codeindex = s.index(code)realindex = i + codeindexindices[realindex] = codecodelen = len(code)i = realindex + codelens = s[codeindex + codelen:]return indices", "docstring": "Retrieve a dict of {index: escape_code} for a given string.\n If no escape codes are found, an empty dict is returned.", "id": "f9617:m1"} {"signature": "def get_indices(s: Any) -> Dict[int, str]:", "body": "codes = get_code_indices(s)if not codes:return {i: c for i, c in enumerate(s)}indices = {}for codeindex in sorted(codes):code = codes[codeindex]if codeindex == :indices[codeindex] = codecontinuestart = max(indices or {: ''}, key=int)startcode = indices.get(start, '')startlen = start + len(startcode)indices.update({i: s[i] for i in range(startlen, codeindex)})indices[codeindex] = codeif not indices:return {i: c for i, c in enumerate(s)}lastindex = max(indices, key=int)lastitem = indices[lastindex]start = lastindex + len(lastitem)textlen = len(s)if start < (textlen - ):indices.update({i: s[i] for i in range(start, textlen)})return indices", "docstring": "Retrieve a dict of characters and escape codes with their real index\n into the string as the key.", "id": "f9617:m2"} {"signature": "def get_indices_list(s: Any) -> List[str]:", "body": "indices = get_indices(s)return [indices[i] for i in sorted(indices, key=int)]", "docstring": "Retrieve a list of characters and escape codes where each escape\n code uses only one index. The indexes will not match up with the\n indexes in the original string.", "id": "f9617:m3"} {"signature": "def is_escape_code(s: Any) -> bool:", "body": "return codepat.match(str(s)) is not None", "docstring": "Returns True if `s` appears to be any kind of escape code.", "id": "f9617:m4"} {"signature": "def strip_codes(s: Any) -> str:", "body": "return codepat.sub('', str(s) if (s or (s == )) else '')", "docstring": "Strip all color codes from a string.\n Returns empty string for \"falsey\" inputs.", "id": "f9617:m5"} {"signature": "def get_settings_path(settings_module):", "body": "cwd = os.getcwd()settings_filename = '' % (settings_module.split('')[-])while cwd:if settings_filename in os.listdir(cwd):breakcwd = os.path.split(cwd)[]if os.name == '' and NT_ROOT.match(cwd):return Noneelif cwd == '':return Nonereturn cwd", "docstring": "Hunt down the settings.py module by going up the FS path", "id": "f9619:m0"} {"signature": "def _dummy(*args, **kwargs):", "body": "return", "docstring": "Dummy function that replaces the transaction functions", "id": "f9619:m1"} {"signature": "def begin(self):", "body": "for plugin in self.nose_config.plugins.plugins:if getattr(plugin, '', False):self.django_plugins.append(plugin)os.environ[''] = self.settings_moduleif self.conf.addPaths:map(add_path, self.conf.where)try:__import__(self.settings_module)self.settings_path = self.settings_moduleexcept ImportError:self.settings_path = get_settings_path(self.settings_module)if not self.settings_path:raise RuntimeError(\"\")add_path(self.settings_path)sys.path.append(self.settings_path)from django.conf import settingssettings.DEBUG = Falseself.call_plugins_method('', settings)from django.core import managementfrom django.test.utils import setup_test_environmentif hasattr(settings, ''):self.old_db = settings.DATABASES['']['']else:self.old_db = settings.DATABASE_NAMEfrom django.db import connectionsself._monkeypatch_test_classes()for connection in connections.all():self.call_plugins_method('', settings, setup_test_environment,connection)try:setup_test_environment()except RuntimeError: passimport djangoif hasattr(django, ''):django.setup()self.call_plugins_method('', settings)management.get_commands()if self.django_version < self.DJANGO_1_7:management._commands[''] = ''for connection in connections.all():self.call_plugins_method('', settings, connection, management)connection.creation.create_test_db(verbosity=self.verbosity,autoclobber=True,)logger.debug(\"\")self._num_syncdb_calls += self.call_plugins_method('', settings, connection)self.store_original_transaction_methods()", "docstring": "Create the test database and schema, if needed, and switch the\nconnection over to that database. Then call install() to install\nall apps listed in the loaded settings module.", "id": "f9619:c1:m17"} {"signature": "def finalize(self, result=None):", "body": "if not self.settings_path:returnfrom django.test.utils import teardown_test_environmentfrom django.db import connectionfrom django.conf import settingsself.call_plugins_method('', settings, connection)try:connection.creation.destroy_test_db(self.old_db,verbosity=self.verbosity,)except Exception:passself.call_plugins_method('', settings, connection)self.call_plugins_method('', settings, teardown_test_environment)teardown_test_environment()self.call_plugins_method('', settings)", "docstring": "Clean up any created database and schema.", "id": "f9619:c1:m24"} {"signature": "def get_unique_token(self):", "body": "if self._unique_token is None:self._unique_token = self._random_token()return self._unique_token", "docstring": "Get a unique token for usage in differentiating test runs that need to\nrun in parallel.", "id": "f9621:c0:m0"} {"signature": "def _random_token(self, bits=):", "body": "alphabet = string.ascii_letters + string.digits + ''num_letters = int(math.ceil(bits / ))return ''.join(random.choice(alphabet) for i in range(num_letters))", "docstring": "Generates a random token, using the url-safe base64 alphabet.\nThe \"bits\" argument specifies the bits of randomness to use.", "id": "f9621:c0:m1"} {"signature": "def afterRollback(self, settings):", "body": "self.clear_test_media()", "docstring": "After every test, we want to empty the media directory so that media\nleft over from one test doesn't affect a later test.", "id": "f9622:c0:m1"} {"signature": "def options(self, parser, env=None):", "body": "if env is None:env = os.environparser.add_option('',help='',)super(SphinxSearchPlugin, self).options(parser, env)", "docstring": "Sphinx config file that can optionally take the following python\ntemplate string arguments:\n\n``database_name``\n``database_password``\n``database_username``\n``database_host``\n``database_port``\n``sphinx_search_data_dir``\n``searchd_log_dir``", "id": "f9626:c0:m1"} {"signature": "def _wait_for_connection(self, port):", "body": "connected = Falsemax_tries = num_tries = wait_time = while not connected or num_tries >= max_tries:time.sleep(wait_time)try:af = socket.AF_INETaddr = ('', port)sock = socket.socket(af, socket.SOCK_STREAM)sock.connect(addr)except socket.error:if sock:sock.close()num_tries += continueconnected = Trueif not connected:print(\"\", file=sys.stderr)", "docstring": "Wait until we can make a socket connection to sphinx.", "id": "f9626:c0:m8"} {"signature": "def func():", "body": "pass", "docstring": "Function-level test\n >>> 1+3\n 4", "id": "f9640:m0"} {"signature": "def __str__(self):", "body": "return self.name", "docstring": "Function in class test\n>>> 1 + 2\n3", "id": "f9640:c0:m0"} {"signature": "def is_valid_identifier(name):", "body": "if not isinstance(name, str):return Falseif '' in name:return Falseif name.strip() != name:return Falsetry:code = compile(''.format(name), filename='', mode='')exec(code)return Trueexcept SyntaxError:return False", "docstring": "Pedantic yet imperfect. Test to see if \"name\" is a valid python identifier", "id": "f9667:m0"} {"signature": "def make_body(self, data):", "body": "", "docstring": "please implement", "id": "f9670:c1:m1"} {"signature": "def callback(self):", "body": "", "docstring": "please implement", "id": "f9670:c1:m4"} {"signature": "@classmethoddef from_config(cls, cfg,default_fg=DEFAULT_FG_16, default_bg=DEFAULT_BG_16,default_fg_hi=DEFAULT_FG_256, default_bg_hi=DEFAULT_BG_256,max_colors=**):", "body": "e = PaletteEntry(mono = default_fg,foreground=default_fg,background=default_bg,foreground_high=default_fg_hi,background_high=default_bg_hi)if isinstance(cfg, str):e.foreground_high = cfgif e.allowed(cfg, ):e.foreground = cfgelse:rgb = AttrSpec(fg=cfg, bg=\"\", colors=max_colors).get_rgb_values()[:]e.foreground = nearest_basic_color(rgb)elif isinstance(cfg, dict):bg = cfg.get(\"\", None)if isinstance(bg, str):e.background_high = bgif e.allowed(bg, ):e.background = bgelse:rgb = AttrSpec(fg=bg, bg=\"\", colors=max_colors).get_rgb_values()[:]e.background = nearest_basic_color(rgb)elif isinstance(bg, dict):e.background_high = bg.get(\"\", default_bg_hi)if \"\" in bg:if e.allowed(bg[\"\"], ):e.background = bg[\"\"]else:rgb = AttrSpec(fg=bg[\"\"], bg=\"\", colors=max_colors).get_rgb_values()[:]e.background = nearest_basic_color(rgb)fg = cfg.get(\"\", cfg)if isinstance(fg, str):e.foreground_high = fgif e.allowed(fg, ):e.foreground = fgelse:rgb = AttrSpec(fg=fg, bg=\"\", colors=max_colors).get_rgb_values()[:]e.foreground = nearest_basic_color(rgb)elif isinstance(fg, dict):e.foreground_high = fg.get(\"\", default_fg_hi)if \"\" in fg:if e.allowed(fg[\"\"], ):e.foreground = fg[\"\"]else:rgb = AttrSpec(fg=fg[\"\"], bg=\"\", colors=max_colors).get_rgb_values()[:]e.foreground = nearest_basic_color(rgb)return e", "docstring": "Build a palette definition from either a simple string or a dictionary,\nfilling in defaults for items not specified.\n\ne.g.:\n \"dark green\"\n dark green foreground, black background\n\n {lo: dark gray, hi: \"#666\"}\n dark gray on 16-color terminals, #666 for 256+ color", "id": "f9671:c0:m1"} {"signature": "def check_import_dashboard_stackexchange(elastic_url, import_file, es_index=None,data_sources=None, add_vis_studies=False, strict=False):", "body": "if \"\" in data_sources and \"\" not in data_sources:raise RuntimeError('')", "docstring": "Check that stackexchange data sources adds also stackoverflow\n data source which is the name used in panels", "id": "f9677:m0"} {"signature": "def __init__(self, conf_file, conf_list=[]):", "body": "self.conf_list = [conf_file] + conf_listself.raw_conf = Noneself.__read_conf_files()", "docstring": "Initialize object.\n\n The object can be initialized with a configuration file,\n and, optionally, with a list of other configuration files.\n If the list of other configuration files exist, it will\n be read, in order, after the configuration file.\n Values set in a file read later will overwrite values set\n in files read earlier. Values not set by any file will\n be set to the default values, when possible.\n\n :param conf_file; configuration file name\n :param conf_list: list of other configuration files (default: empty)", "id": "f9685:c0:m0"} {"signature": "@classmethoddef general_params(cls):", "body": "params = {}params_general = {\"\": {\"\": {\"\": True,\"\": ,\"\": int,\"\": \"\"},\"\": {\"\": False,\"\": False,\"\": bool,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": True,\"\": bool,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": , \"\": int,\"\": \"\"},\"\": {\"\": True,\"\": ,\"\": int,\"\": \"\"},\"\": {\"\": True,\"\": ,\"\": int,\"\": \"\"},\"\": {\"\": True,\"\": ,\"\": int,\"\": \"\"},\"\": {\"\": True,\"\": ALIASES_JSON,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": MENU_YAML,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": int,\"\": \"\"}}}params_projects = {\"\": {\"\": {\"\": True,\"\": PROJECTS_JSON,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"}}}params_phases = {\"\": {\"\": {\"\": False,\"\": True,\"\": bool,\"\": \"\"},\"\": {\"\": False,\"\": True,\"\": bool,\"\": \"\"},\"\": {\"\": False,\"\": True,\"\": bool,\"\": \"\"},\"\": {\"\": False,\"\": True,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"}}}general_config_params = [params_general, params_projects, params_phases]for section_params in general_config_params:params.update(section_params)params_collection = {\"\": {\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"}}}params_enrichment = {\"\": {\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": True,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": ,\"\": int,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"}}}params_panels = {\"\": {\"\": {\"\": True,\"\": True,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": None,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": True,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"}}}params_report = {\"\": {\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": [],\"\": list,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"}}}params_sortinghat = {\"\": {\"\": {\"\": False,\"\": \"\",\"\": bool,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": [\"\"],\"\": list,\"\": \"\"},\"\": {\"\": False,\"\": ,\"\": int,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": [\"\", \"\", \"\"],\"\": list,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\",\"\": \"\"},\"\": {\"\": True,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": True,\"\": bool,\"\": \"\"\"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": [],\"\": list,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": None,\"\": str,\"\": \"\"},\"\": {\"\": True,\"\": [],\"\": list,\"\": \"\"},\"\": {\"\": True,\"\": [],\"\": list,\"\": \"\"},\"\": {\"\": True,\"\": False,\"\": bool,\"\": \"\"}}}params_track_items = {\"\": {\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"},\"\": {\"\": False,\"\": \"\",\"\": str,\"\": \"\"}}}tasks_config_params = [params_collection, params_enrichment, params_panels,params_report, params_sortinghat, params_track_items]for section_params in tasks_config_params:params.update(section_params)return params", "docstring": "Define all the possible config params", "id": "f9685:c0:m2"} {"signature": "def set_param(self, section, param, value):", "body": "if section not in self.conf or param not in self.conf[section]:logger.error('', section, param)else:self.conf[section][param] = value", "docstring": "Change a param in the config", "id": "f9685:c0:m5"} {"signature": "def __add_types(self, raw_conf):", "body": "typed_conf = {}for s in raw_conf.keys():typed_conf[s] = {}for option in raw_conf[s]:val = raw_conf[s][option]if len(val) > and (val[] == '' and val[-] == ''):typed_conf[s][option] = val[:-]elif len(val) > and (val[] == '' and val[-] == ''):typed_conf[s][option] = val[:-].replace('', '').split('')elif val.lower() in ['', '']:typed_conf[s][option] = True if val.lower() == '' else Falseelif val.lower() == '':typed_conf[s][option] = ''else:try:typed_conf[s][option] = int(val)except ValueError:typed_conf[s][option] = valreturn typed_conf", "docstring": "Convert to int, boolean, list, None types config items", "id": "f9685:c0:m11"} {"signature": "def _add_to_conf(self, new_conf):", "body": "for section in new_conf:if section not in self.conf:self.conf[section] = new_conf[section]else:for param in new_conf[section]:self.conf[section][param] = new_conf[section][param]", "docstring": "Add new configuration to self.conf.\n\n Adds configuration parameters in new_con to self.conf.\n If they already existed in conf, overwrite them.\n\n :param new_conf: new configuration, to add", "id": "f9685:c0:m12"} {"signature": "@classmethoddef sha_github_file(cls, config, repo_file, repository_api, repository_branch):", "body": "repo_file_sha = Nonecfg = config.get_conf()github_token = cfg['']['']headers = {\"\": \"\" + github_token}url_dir = repository_api + \"\" + repository_branchlogger.debug(\"\", url_dir)raw_repo_file_info = requests.get(url_dir, headers=headers)raw_repo_file_info.raise_for_status()for rfile in raw_repo_file_info.json()['']:if rfile[''] == repo_file:logger.debug(\"\", rfile[\"\"])repo_file_sha = rfile[\"\"]breakreturn repo_file_sha", "docstring": "Return the GitHub SHA for a file in the repository", "id": "f9686:c3:m2"} {"signature": "def __get_uuids_from_profile_name(self, profile_name):", "body": "uuids = []with self.db.connect() as session:query = session.query(Profile).filter(Profile.name == profile_name)profiles = query.all()if profiles:for p in profiles:uuids.append(p.uuid)return uuids", "docstring": "Get the uuid for a profile name", "id": "f9686:c4:m2"} {"signature": "@classmethoddef measure_memory(cls, obj, seen=None):", "body": "size = sys.getsizeof(obj)if seen is None:seen = set()obj_id = id(obj)if obj_id in seen:return seen.add(obj_id)if isinstance(obj, dict):size += sum([cls.measure_memory(v, seen) for v in obj.values()])size += sum([cls.measure_memory(k, seen) for k in obj.keys()])elif hasattr(obj, ''):size += cls.measure_memory(obj.__dict__, seen)elif hasattr(obj, '') and not isinstance(obj, (str, bytes, bytearray)):size += sum([cls.measure_memory(i, seen) for i in obj])return size", "docstring": "Recursively finds size of objects", "id": "f9687:c1:m1"} {"signature": "def __feed_arthur(self):", "body": "with self.ARTHUR_FEED_LOCK:if (time.time() - self.ARTHUR_LAST_MEMORY_CHECK) > * self.ARTHUR_LAST_MEMORY_CHECK_TIME:self.ARTHUR_LAST_MEMORY_CHECK = time.time()logger.debug(\"\")try:memory_size = self.measure_memory(self.arthur_items) / ( * )except RuntimeError as ex:logger.warning(\"\", ex)memory_size = self.ARTHUR_LAST_MEMORY_SIZEself.ARTHUR_LAST_MEMORY_CHECK_TIME = time.time() - self.ARTHUR_LAST_MEMORY_CHECKlogger.debug(\"\",memory_size, self.ARTHUR_LAST_MEMORY_CHECK_TIME)self.ARTHUR_LAST_MEMORY_SIZE = memory_sizeif self.ARTHUR_LAST_MEMORY_SIZE > self.ARTHUR_MAX_MEMORY_SIZE:logger.debug(\"\")returnlogger.info(\"\")db_url = self.config.get_conf()['']['']conn = redis.StrictRedis.from_url(db_url)logger.debug(\"\", db_url)pipe = conn.pipeline()pipe.lrange(Q_STORAGE_ITEMS, , self.ARTHUR_REDIS_ITEMS - )pipe.ltrim(Q_STORAGE_ITEMS, self.ARTHUR_REDIS_ITEMS, -)items = pipe.execute()[]for item in items:arthur_item = pickle.loads(item)if arthur_item[''] not in self.arthur_items:self.arthur_items[arthur_item['']] = []self.arthur_items[arthur_item['']].append(arthur_item)for tag in self.arthur_items:if self.arthur_items[tag]:logger.debug(\"\", tag, len(self.arthur_items[tag]))", "docstring": "Feed Ocean with backend data collected from arthur redis queue", "id": "f9687:c1:m2"} {"signature": "def __feed_backend_arthur(self, repo):", "body": "self.__feed_arthur()tag = self.backend_tag(repo)logger.debug(\"\", self.arthur_items.keys())logger.debug(\"\", tag)if tag in self.arthur_items:logger.debug(\"\", tag)while self.arthur_items[tag]:yield self.arthur_items[tag].pop()", "docstring": "Feed Ocean with backend data collected from arthur redis queue", "id": "f9687:c1:m4"} {"signature": "def __create_arthur_json(self, repo, backend_args):", "body": "backend_args = self._compose_arthur_params(self.backend_section, repo)if self.backend_section == '':backend_args[''] = os.path.join(self.REPOSITORY_DIR, repo)backend_args[''] = self.backend_tag(repo)ajson = {\"\": [{}]}ajson[\"\"][][''] = self.backend_tag(repo)ajson[\"\"][][''] = self.backend_section.split(\"\")[]ajson[\"\"][][''] = backend_argsajson[\"\"][][''] = backend_args['']ajson[\"\"][][''] = {}ajson[\"\"][][''] = {\"\": self.ARTHUR_TASK_DELAY}es_col_url = self._get_collection_url()es_index = self.conf[self.backend_section]['']es = ElasticSearch(es_col_url, es_index)connector = get_connector_from_name(self.backend_section)klass = connector[] signature = inspect.signature(klass.fetch)last_activity = Nonefilter_ = {\"\": \"\", \"\": backend_args['']}if '' in signature.parameters:last_activity = es.get_last_item_field('', [filter_])if last_activity:ajson[\"\"][][''][''] = last_activity.isoformat()elif '' in signature.parameters:last_activity = es.get_last_item_field('', [filter_])if last_activity:ajson[\"\"][][''][''] = last_activityif last_activity:logging.info(\"\", last_activity)return(ajson)", "docstring": "Create the JSON for configuring arthur to collect data\n\n https://github.com/grimoirelab/arthur#adding-tasks\n Sample for git:\n\n {\n \"tasks\": [\n {\n \"task_id\": \"arthur.git\",\n \"backend\": \"git\",\n \"backend_args\": {\n \"gitpath\": \"/tmp/arthur_git/\",\n \"uri\": \"https://github.com/grimoirelab/arthur.git\"\n },\n \"category\": \"commit\",\n \"archive_args\": {\n \"archive_path\": '/tmp/test_archives',\n \"fetch_from_archive\": false,\n \"archive_after\": None\n },\n \"scheduler_args\": {\n \"delay\": 10\n }\n }\n ]\n }", "id": "f9687:c1:m5"} {"signature": "def __check_looks_like_uri(self, uri):", "body": "if uri.split('')[] == '':return Trueelif uri.split('')[] == '':if uri.split('')[] == '':return Trueelse:raise GithubFileNotFound('' % uri)", "docstring": "Checks the URI looks like a RAW uri in github:\n\n - 'https://raw.githubusercontent.com/github/hubot/master/README.md'\n - 'https://github.com/github/hubot/raw/master/README.md'\n\n :param uri: uri of the file", "id": "f9688:c0:m1"} {"signature": "def read_file_from_uri(self, uri):", "body": "logger.debug(\"\" % (uri))self.__check_looks_like_uri(uri)try:req = urllib.request.Request(uri)req.add_header('', '' % self.token)r = urllib.request.urlopen(req)except urllib.error.HTTPError as err:if err.code == :raise GithubFileNotFound('' % uri)else:raisereturn r.read().decode(\"\")", "docstring": "Reads the file from Github\n\n :param uri: URI of the Github raw File\n\n :returns: UTF-8 text with the content", "id": "f9688:c0:m2"} {"signature": "def is_backend_task(self):", "body": "return True", "docstring": "Returns True if the Task is executed per backend.\ni.e. SortingHat unify is not executed per backend.", "id": "f9689:c0:m3"} {"signature": "def execute(self):", "body": "logger.debug(\"\")", "docstring": "Execute the Task", "id": "f9689:c0:m4"} {"signature": "def es_version(self, url):", "body": "try:res = self.grimoire_con.get(url)res.raise_for_status()major = res.json()[''][''].split(\"\")[]except Exception:logger.error(\"\" + url)raisereturn major", "docstring": "Get Elasticsearch version.\n\n Get the version of Elasticsearch. This is useful because\n Elasticsearch and Kibiter are paired (same major version for 5, 6).\n\n :param url: Elasticseearch url hosting Kibiter indices\n :returns: major version, as string", "id": "f9689:c0:m13"} {"signature": "def __init__(self, config):", "body": "self.config = configself.conf = config.get_conf()self.grimoire_con = grimoire_con(conn_retries=)", "docstring": "config is a Config object", "id": "f9690:c0:m0"} {"signature": "def execute_tasks(self, tasks_cls):", "body": "self.execute_batch_tasks(tasks_cls)", "docstring": "Just a wrapper to the execute_batch_tasks method", "id": "f9690:c0:m6"} {"signature": "def execute_nonstop_tasks(self, tasks_cls):", "body": "self.execute_batch_tasks(tasks_cls,self.conf[''][''],self.conf[''][''], False)", "docstring": "Just a wrapper to the execute_batch_tasks method", "id": "f9690:c0:m7"} {"signature": "def execute_batch_tasks(self, tasks_cls, big_delay=, small_delay=, wait_for_threads=True):", "body": "def _split_tasks(tasks_cls):\"\"\"\"\"\"backend_t = []global_t = []for t in tasks_cls:if t.is_backend_task(t):backend_t.append(t)else:global_t.append(t)return backend_t, global_tbackend_tasks, global_tasks = _split_tasks(tasks_cls)logger.debug('' % (backend_tasks))logger.debug('' % (global_tasks))threads = []stopper = threading.Event()if len(backend_tasks) > :repos_backend = self._get_repos_by_backend()for backend in repos_backend:t = TasksManager(backend_tasks, backend, stopper, self.config, small_delay)threads.append(t)t.start()if len(global_tasks) > :gt = TasksManager(global_tasks, \"\", stopper, self.config, big_delay)threads.append(gt)gt.start()if big_delay > :when = datetime.now() + timedelta(seconds=big_delay)when_str = when.strftime('')logger.info(\"\" % (global_tasks, when_str))if wait_for_threads:time.sleep() stopper.set() for t in threads:t.join()self.__check_queue_for_errors()logger.debug(\"\")", "docstring": "Start a task manager per backend to complete the tasks.\n\n:param task_cls: list of tasks classes to be executed\n:param big_delay: seconds before global tasks are executed, should be days usually\n:param small_delay: seconds before backend tasks are executed, should be minutes\n:param wait_for_threads: boolean to set when threads are infinite or\n should be synchronized in a meeting point", "id": "f9690:c0:m8"} {"signature": "def __execute_initial_load(self):", "body": "if self.conf['']['']:tasks_cls = [TaskPanels, TaskPanelsMenu]self.execute_tasks(tasks_cls)if self.conf['']['']:tasks_cls = [TaskInitSortingHat]self.execute_tasks(tasks_cls)logger.info(\"\")tasks_cls = [TaskProjects]self.execute_tasks(tasks_cls)logger.info(\"\")return", "docstring": "Tasks that should be done just one time", "id": "f9690:c0:m10"} {"signature": "def start(self):", "body": "logger.info(\"\")logger.info(\"\")logger.info(\"\")logger.info(\"\")if not self.check_es_access():print('')sys.exit()if self.conf['']['']:if not self.check_redis_access():print('')sys.exit()if not self.check_arthur_access():print('')sys.exit()if self.conf['']['']:if not self.check_bestiary_access():print('')sys.exit()self.__execute_initial_load()all_tasks_cls = []all_tasks_cls.append(TaskProjects) if self.conf['']['']:if not self.conf['']['']:all_tasks_cls.append(TaskRawDataCollection)else:all_tasks_cls.append(TaskRawDataArthurCollection)if self.conf['']['']:all_tasks_cls.append(TaskIdentitiesLoad)all_tasks_cls.append(TaskIdentitiesMerge)all_tasks_cls.append(TaskIdentitiesExport)if self.conf['']['']:all_tasks_cls.append(TaskEnrich)if self.conf['']['']:all_tasks_cls.append(TaskTrackItems)if self.conf['']['']:all_tasks_cls.append(TaskReport)while True:if not all_tasks_cls:logger.warning(\"\")breaktry:if not self.conf['']['']:self.execute_batch_tasks(all_tasks_cls,self.conf[''][''],self.conf[''][''])self.execute_batch_tasks(all_tasks_cls,self.conf[''][''],self.conf[''][''])breakelse:self.execute_nonstop_tasks(all_tasks_cls)except DataCollectionError as e:logger.error(str(e))var = traceback.format_exc()logger.error(var)except DataEnrichmentError as e:logger.error(str(e))var = traceback.format_exc()logger.error(var)logger.info(\"\")", "docstring": "This method defines the workflow of SirMordred. So it calls to:\n- initialize the databases\n- execute the different phases for the first iteration\n (collection, identities, enrichment)\n- start the collection and enrichment in parallel by data source\n- start also the Sorting Hat merge", "id": "f9690:c0:m11"} {"signature": "def compose_mbox(projects):", "body": "mbox_archives = ''mailing_lists_projects = [project for project in projects if '' in projects[project]]for mailing_lists in mailing_lists_projects:projects[mailing_lists][''] = []for mailing_list in projects[mailing_lists]['']:if '' in mailing_list:name = mailing_list.split('')[]elif '' in mailing_list:name = mailing_list.split('')[]else:name = mailing_list.split('')[]list_new = \"\" % (name, mbox_archives, name, name)projects[mailing_lists][''].append(list_new)return projects", "docstring": "Compose projects.json only for mbox, but using the mailing_lists lists\n\n change: 'https://dev.eclipse.org/mailman/listinfo/emft-dev'\n to: 'emfg-dev /home/bitergia/mboxes/emft-dev.mbox/emft-dev.mbox\n\n :param projects: projects.json\n :return: projects.json with mbox", "id": "f9692:m0"} {"signature": "def compose_gerrit(projects):", "body": "git_projects = [project for project in projects if '' in projects[project]]for project in git_projects:repos = [repo for repo in projects[project][''] if '' in repo]if len(repos) > :projects[project][''] = []for repo in repos:gerrit_project = repo.replace(\"\", \"\")gerrit_project = gerrit_project.replace(\"\", \"\")projects[project][''].append(\"\" + gerrit_project)return projects", "docstring": "Compose projects.json for gerrit, but using the git lists\n\n change: 'http://git.eclipse.org/gitroot/xwt/org.eclipse.xwt.git'\n to: 'git.eclipse.org_xwt/org.eclipse.xwt\n\n :param projects: projects.json\n :return: projects.json with gerrit", "id": "f9692:m1"} {"signature": "def compose_git(projects, data):", "body": "for p in [project for project in data if len(data[project]['']) > ]:repos = []for url in data[p]['']:if len(url[''].split()) > : repo = url[''].split()[].replace('', '')else:repo = url[''].replace('', '')if repo not in repos:repos.append(repo)projects[p][''] = reposreturn projects", "docstring": "Compose projects.json for git\n\n We need to replace '/c/' by '/gitroot/' for instance\n\n change: 'http://git.eclipse.org/c/xwt/org.eclipse.xwt.git'\n to: 'http://git.eclipse.org/gitroot/xwt/org.eclipse.xwt.git'\n\n :param projects: projects.json\n :param data: eclipse JSON\n :return: projects.json with git", "id": "f9692:m2"} {"signature": "def compose_mailing_lists(projects, data):", "body": "for p in [project for project in data if len(data[project]['']) > ]:if '' not in projects[p]:projects[p][''] = []urls = [url[''].replace('', '') for url in data[p][''] ifurl[''] not in projects[p]['']]projects[p][''] += urlsfor p in [project for project in data if len(data[project]['']) > ]:if '' not in projects[p]:projects[p][''] = []mailing_list = data[p][''][''].replace('', '')projects[p][''].append(mailing_list)return projects", "docstring": "Compose projects.json for mailing lists\n\n At upstream has two different key for mailing list: 'mailings_lists' and 'dev_list'\n The key 'mailing_lists' is an array with mailing lists\n The key 'dev_list' is a dict with only one mailing list\n\n :param projects: projects.json\n :param data: eclipse JSON\n :return: projects.json with mailing_lists", "id": "f9692:m3"} {"signature": "def compose_github(projects, data):", "body": "for p in [project for project in data if len(data[project]['']) > ]:if '' not in projects[p]:projects[p][''] = []urls = [url[''] for url in data[p][''] ifurl[''] not in projects[p]['']]projects[p][''] += urlsreturn projects", "docstring": "Compose projects.json for github\n\n :param projects: projects.json\n :param data: eclipse JSON\n :return: projects.json with github", "id": "f9692:m4"} {"signature": "def compose_bugzilla(projects, data):", "body": "for p in [project for project in data if len(data[project]['']) > ]:if '' not in projects[p]:projects[p][''] = []urls = [url[''] for url in data[p][''] ifurl[''] not in projects[p]['']]projects[p][''] += urlsreturn projects", "docstring": "Compose projects.json for bugzilla\n\n :param projects: projects.json\n :param data: eclipse JSON\n :return: projects.json with bugzilla", "id": "f9692:m5"} {"signature": "def compose_title(projects, data):", "body": "for project in data:projects[project] = {'': {'': data[project]['']}}return projects", "docstring": "Compose the projects JSON file only with the projects name\n\n :param projects: projects.json\n :param data: eclipse JSON with the origin format\n :return: projects.json with titles", "id": "f9692:m6"} {"signature": "def compose_projects_json(projects, data):", "body": "projects = compose_git(projects, data)projects = compose_mailing_lists(projects, data)projects = compose_bugzilla(projects, data)projects = compose_github(projects, data)projects = compose_gerrit(projects)projects = compose_mbox(projects)return projects", "docstring": "Compose projects.json with all data sources\n\n :param projects: projects.json\n :param data: eclipse JSON\n :return: projects.json with all data sources", "id": "f9692:m7"} {"signature": "@classmethoddef get_repos_by_backend_section(cls, backend_section, raw=True):", "body": "repos = []projects = TaskProjects.get_projects()for pro in projects:if backend_section in projects[pro]:if cls.GLOBAL_PROJECT not in projects:repos += projects[pro][backend_section]else:if raw:if pro != cls.GLOBAL_PROJECT:if backend_section not in projects[cls.GLOBAL_PROJECT]:repos += projects[pro][backend_section]elif backend_section in projects[pro] and backend_section in projects[cls.GLOBAL_PROJECT]:repos += projects[cls.GLOBAL_PROJECT][backend_section]else:not_in_unknown = [projects[pro] for pro in projects if pro != cls.GLOBAL_PROJECT][]if backend_section not in not_in_unknown:repos += projects[cls.GLOBAL_PROJECT][backend_section]else:if pro != cls.GLOBAL_PROJECT:if backend_section not in projects[cls.GLOBAL_PROJECT]:repos += projects[pro][backend_section]elif backend_section in projects[pro] and backend_section in projects[cls.GLOBAL_PROJECT]:repos += projects[pro][backend_section]else:not_in_unknown_prj = [projects[prj] for prj in projects if prj != cls.GLOBAL_PROJECT]not_in_unknown_sections = list(set([section for prj in not_in_unknown_prjfor section in list(prj.keys())]))if backend_section not in not_in_unknown_sections:repos += projects[pro][backend_section]logger.debug(\"\", backend_section, repos, raw)repos = list(set(repos))return repos", "docstring": "return list with the repositories for a backend_section", "id": "f9693:c0:m4"} {"signature": "def convert_from_eclipse(self, eclipse_projects):", "body": "projects = {}projects[''] = {\"\": [\"\"],\"\": [\"\"]}projects = compose_title(projects, eclipse_projects)projects = compose_projects_json(projects, eclipse_projects)return projects", "docstring": "Convert from eclipse projects format to grimoire projects json format", "id": "f9693:c0:m8"} {"signature": "def __init__(self, tasks_cls, backend_section, stopper, config, timer=):", "body": "super().__init__(name=backend_section) self.config = configself.tasks_cls = tasks_cls self.tasks = [] self.backend_section = backend_sectionself.stopper = stopper self.timer = timerself.thread_id = None", "docstring": ":tasks_cls : tasks classes to be executed using the backend\n:backend_section: perceval backend section name\n:config: config object for the manager", "id": "f9695:c0:m0"} {"signature": "def __kibiter_version(self):", "body": "version = Nonees_url = self.conf['']['']config_url = ''url = urijoin(es_url, config_url)version = Nonetry:res = self.grimoire_con.get(url)res.raise_for_status()version = res.json()[''][''][]['']logger.debug(\"\", version)except requests.exceptions.HTTPError:logger.warning(\"\")return version", "docstring": "Get the kibiter vesion.\n\n :param major: major Elasticsearch version", "id": "f9696:c0:m2"} {"signature": "def create_dashboard(self, panel_file, data_sources=None, strict=True):", "body": "es_enrich = self.conf['']['']kibana_url = self.conf['']['']mboxes_sources = set(['', '', '', ''])if data_sources and any(x in data_sources for x in mboxes_sources):data_sources = list(data_sources)data_sources.append('')if data_sources and ('' in data_sources):data_sources = list(data_sources)data_sources.append('')if data_sources and '' in data_sources:data_sources = list(data_sources)data_sources.append('')if data_sources and '' in data_sources:data_sources = list(data_sources)data_sources.append('')if data_sources and '' in data_sources:data_sources = list(data_sources)data_sources.append('')try:import_dashboard(es_enrich, kibana_url, panel_file, data_sources=data_sources, strict=strict)except ValueError:logger.error(\"\", panel_file)except RuntimeError:logger.error(\"\", panel_file)", "docstring": "Upload a panel to Elasticsearch if it does not exist yet.\n\n If a list of data sources is specified, upload only those\n elements (visualizations, searches) that match that data source.\n\n :param panel_file: file name of panel (dashobard) to upload\n :param data_sources: list of data sources\n :param strict: only upload a dashboard if it is newer than the one already existing", "id": "f9696:c0:m6"} {"signature": "def __upload_title(self, kibiter_major):", "body": "if kibiter_major == \"\":resource = \"\"data = {\"\": {\"\": self.project_name}}mapping_resource = \"\"mapping = {\"\": \"\"}url = urijoin(self.conf[''][''], resource)mapping_url = urijoin(self.conf[''][''],mapping_resource)logger.debug(\"\")res = self.grimoire_con.put(mapping_url, data=json.dumps(mapping),headers=ES6_HEADER)try:res.raise_for_status()except requests.exceptions.HTTPError:logger.error(\"\")logger.error(res.json())logger.debug(\"\")res = self.grimoire_con.post(url, data=json.dumps(data),headers=ES6_HEADER)try:res.raise_for_status()except requests.exceptions.HTTPError:logger.error(\"\")logger.error(res.json())", "docstring": "Upload to Kibiter the title for the dashboard.\n\n The title is shown on top of the dashboard menu, and is Usually\n the name of the project being dashboarded.\n This is done only for Kibiter 6.x.\n\n :param kibiter_major: major version of kibiter", "id": "f9696:c1:m3"} {"signature": "def __create_dashboard_menu(self, dash_menu, kibiter_major):", "body": "logger.info(\"\")if kibiter_major == \"\":menu_resource = \"\"mapping_resource = \"\"mapping = {\"\": \"\"}menu = {'': dash_menu}else:menu_resource = \"\"mapping_resource = \"\"mapping = {\"\": \"\"}menu = dash_menumenu_url = urijoin(self.conf[''][''],menu_resource)mapping_url = urijoin(self.conf[''][''],mapping_resource)logger.debug(\"\")res = self.grimoire_con.put(mapping_url, data=json.dumps(mapping),headers=ES6_HEADER)try:res.raise_for_status()except requests.exceptions.HTTPError:logger.error(\"\")res = self.grimoire_con.post(menu_url, data=json.dumps(menu),headers=ES6_HEADER)try:res.raise_for_status()except requests.exceptions.HTTPError:logger.error(\"\")logger.error(res.json())raise", "docstring": "Create the menu definition to access the panels in a dashboard.\n\n :param menu: dashboard menu to upload\n :param kibiter_major: major version of kibiter", "id": "f9696:c1:m4"} {"signature": "def __remove_dashboard_menu(self, kibiter_major):", "body": "logger.info(\"\")if kibiter_major == \"\":metadashboard = \"\"else:metadashboard = \"\"menu_url = urijoin(self.conf[''][''], metadashboard)self.grimoire_con.delete(menu_url)", "docstring": "Remove existing menu for dashboard, if any.\n\n Usually, we remove the menu before creating a new one.\n\n :param kibiter_major: major version of kibiter", "id": "f9696:c1:m5"} {"signature": "def __get_menu_entries(self, kibiter_major):", "body": "menu_entries = []for entry in self.panels_menu:if entry[''] not in self.data_sources:continueparent_menu_item = {'': entry[''],'': entry[''],'': \"\",'': \"\",'': []}for subentry in entry['']:try:dash_name = get_dashboard_name(subentry[''])except FileNotFoundError:logging.error(\"\", subentry[''])continuechild_item = {\"\": subentry[''],\"\": subentry[''],\"\": \"\",\"\": \"\",\"\": dash_name}parent_menu_item[''].append(child_item)menu_entries.append(parent_menu_item)return menu_entries", "docstring": "Get the menu entries from the panel definition", "id": "f9696:c1:m6"} {"signature": "def __get_dash_menu(self, kibiter_major):", "body": "omenu = []omenu.append(self.menu_panels_common[''])ds_menu = self.__get_menu_entries(kibiter_major)kafka_menu = Nonecommunity_menu = Nonefound_kafka = [pos for pos, menu in enumerate(ds_menu) if menu[''] == KAFKA_NAME]if found_kafka:kafka_menu = ds_menu.pop(found_kafka[])found_community = [pos for pos, menu in enumerate(ds_menu) if menu[''] == COMMUNITY_NAME]if found_community:community_menu = ds_menu.pop(found_community[])ds_menu.sort(key=operator.itemgetter(''))omenu += ds_menuif kafka_menu:omenu.append(kafka_menu)if community_menu:omenu.append(community_menu)omenu.append(self.menu_panels_common[''])omenu.append(self.menu_panels_common[''])logger.debug(\"\", json.dumps(ds_menu, indent=))return omenu", "docstring": "Order the dashboard menu", "id": "f9696:c1:m7"} {"signature": "def __autorefresh_studies(self, cfg):", "body": "if '' not in self.conf[self.backend_section] or'' not in self.conf[self.backend_section]['']:logger.debug(\"\")returnaoc_index = self.conf[''].get('', GitEnrich.GIT_AOC_ENRICHED)if not aoc_index:aoc_index = GitEnrich.GIT_AOC_ENRICHEDlogger.debug(\"\", aoc_index)es = Elasticsearch([self.conf['']['']], timeout=,verify_certs=self._get_enrich_backend().elastic.requests.verify)if not es.indices.exists(index=aoc_index):logger.debug(\"\")returnlogger.debug(\"\")aoc_backend = GitEnrich(self.db_sh, None, cfg[''][''],self.db_user, self.db_password, self.db_host)aoc_backend.mapping = Noneaoc_backend.roles = ['']elastic_enrich = get_elastic(self.conf[''][''],aoc_index, clean=False, backend=aoc_backend)aoc_backend.set_elastic(elastic_enrich)self.__autorefresh(aoc_backend, studies=True)", "docstring": "Execute autorefresh for areas of code study if configured", "id": "f9700:c0:m6"} {"signature": "def __studies(self, retention_time):", "body": "cfg = self.config.get_conf()if '' not in cfg[self.backend_section] or notcfg[self.backend_section]['']:logger.debug('' % self.backend_section)returnstudies = [study for study in cfg[self.backend_section][''] if study.strip() != \"\"]if not studies:logger.debug('' % self.backend_section)returnlogger.debug(\"\" % (self.backend_section, studies))time.sleep() enrich_backend = self._get_enrich_backend()ocean_backend = self._get_ocean_backend(enrich_backend)active_studies = []all_studies = enrich_backend.studiesall_studies_names = [study.__name__ for study in enrich_backend.studies]logger.debug(\"\", self.backend_section, all_studies_names)logger.debug(\"\", studies)cfg_studies_types = [study.split(\"\")[] for study in studies]if not set(cfg_studies_types).issubset(set(all_studies_names)):logger.error('', self.backend_section, studies)raise RuntimeError('', self.backend_section, studies)for study in enrich_backend.studies:if study.__name__ in cfg_studies_types:active_studies.append(study)enrich_backend.studies = active_studiesprint(\"\" % (self.backend_section,[study for study in studies]))studies_args = self.__load_studies()do_studies(ocean_backend, enrich_backend, studies_args, retention_time=retention_time)enrich_backend.studies = all_studies", "docstring": "Execute the studies configured for the current backend", "id": "f9700:c0:m7"} {"signature": "def retain_identities(self, retention_time):", "body": "enrich_es = self.conf['']['']sortinghat_db = self.dbcurrent_data_source = self.get_backend(self.backend_section)active_data_sources = self.config.get_active_data_sources()if retention_time is None:logger.debug(\"\")returnif retention_time <= :logger.debug(\"\")returnretain_identities(retention_time, enrich_es, sortinghat_db, current_data_source, active_data_sources)", "docstring": "Retain the identities in SortingHat based on the `retention_time`\n value declared in the setup.cfg.\n\n :param retention_time: maximum number of minutes wrt the current date to retain the SortingHat data", "id": "f9700:c0:m8"} {"signature": "def micro_mordred(cfg_path, backend_sections, raw, arthur, identities, enrich, panels):", "body": "config = Config(cfg_path)if raw:for backend in backend_sections:get_raw(config, backend, arthur)if identities:get_identities(config)if enrich:for backend in backend_sections:get_enrich(config, backend)if panels:get_panels(config)", "docstring": "Execute the raw and/or the enrich phases of a given backend section defined in a Mordred configuration file.\n\n :param cfg_path: the path of a Mordred configuration file\n :param backend_sections: the backend sections where the raw and/or enrich phases will be executed\n :param raw: if true, it activates the collection of raw data\n :param arthur: if true, it enables Arthur to collect the raw data\n :param identities: if true, it activates the identities merge in SortingHat\n :param enrich: if true, it activates the collection of enrich data\n :param panels: if true, it activates the upload of panels", "id": "f9702:m0"} {"signature": "def get_raw(config, backend_section, arthur):", "body": "if arthur:task = TaskRawDataArthurCollection(config, backend_section=backend_section)else:task = TaskRawDataCollection(config, backend_section=backend_section)TaskProjects(config).execute()try:task.execute()logging.info(\"\")except Exception as e:logging.error(str(e))sys.exit(-)", "docstring": "Execute the raw phase for a given backend section, optionally using Arthur\n\n :param config: a Mordred config object\n :param backend_section: the backend section where the raw phase is executed\n :param arthur: if true, it enables Arthur to collect the raw data", "id": "f9702:m1"} {"signature": "def get_identities(config):", "body": "TaskProjects(config).execute()task = TaskIdentitiesMerge(config)task.execute()logging.info(\"\")", "docstring": "Execute the merge identities phase\n\n :param config: a Mordred config object", "id": "f9702:m2"} {"signature": "def get_enrich(config, backend_section):", "body": "TaskProjects(config).execute()task = TaskEnrich(config, backend_section=backend_section)try:task.execute()logging.info(\"\")except Exception as e:logging.error(str(e))sys.exit(-)", "docstring": "Execute the enrich phase for a given backend section\n\n :param config: a Mordred config object\n :param backend_section: the backend section where the enrich phase is executed", "id": "f9702:m3"} {"signature": "def get_panels(config):", "body": "task = TaskPanels(config)task.execute()task = TaskPanelsMenu(config)task.execute()logging.info(\"\")", "docstring": "Execute the panels phase\n\n :param config: a Mordred config object", "id": "f9702:m4"} {"signature": "def config_logging(debug):", "body": "if debug:logging.basicConfig(level=logging.DEBUG, format='')logging.debug(\"\")else:logging.basicConfig(level=logging.INFO, format='')", "docstring": "Config logging level output output", "id": "f9702:m5"} {"signature": "def get_params_parser():", "body": "parser = argparse.ArgumentParser(add_help=False)parser.add_argument('', '', dest='',action='',help=argparse.SUPPRESS)parser.add_argument(\"\", action='', dest='',help=\"\")parser.add_argument(\"\", action='', dest='',help=\"\")parser.add_argument(\"\", action='', dest='',help=\"\")parser.add_argument(\"\", action='', dest='',help=\"\")parser.add_argument(\"\", action='', dest='',help=\"\")parser.add_argument(\"\", dest='',help=\"\")parser.add_argument(\"\", dest='', default=[],nargs='', help=\"\")if len(sys.argv) == :parser.print_help()sys.exit()return parser", "docstring": "Parse command line arguments", "id": "f9702:m6"} {"signature": "def get_params():", "body": "parser = get_params_parser()args = parser.parse_args()if not args.raw and not args.enrich and not args.identities and not args.panels:print(\"\")sys.exit()return args", "docstring": "Get params to execute the micro-mordred", "id": "f9702:m7"} {"signature": "def random_rgb():", "body": "return random.randint(, ), random.randint(, ), random.randint(, )", "docstring": "Generate a uniformly random RGB value.\n\n:return: A tuple of three integers with values between 0 and 255 inclusive", "id": "f9713:m0"} {"signature": "def random_hex():", "body": "return rgb_to_hex(random_rgb())", "docstring": "Generate a uniformly random HEX value.\n\n:return: A string representing a random HEX value between 000000 and FFFFFF inclusive", "id": "f9713:m1"} {"signature": "def random_web():", "body": "return rgb_to_web(random_rgb())", "docstring": "Generate a uniformly random WEB value.\n\n:return:", "id": "f9713:m2"} {"signature": "def offset_random_rgb(seed, amount=):", "body": "r, g, b = seedresults = []for _ in range(amount):base_val = ((r + g + b) / ) + new_val = base_val + (random.random() * rgb_max_val / ) ratio = new_val / base_valresults.append((min(int(r*ratio), rgb_max_val), min(int(g*ratio), rgb_max_val), min(int(b*ratio), rgb_max_val)))return results[] if len(results) > else results", "docstring": "Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized\noffset from the seed.\n\n:param seed:\n:param amount:\n:return:", "id": "f9713:m3"} {"signature": "def offset_random_hex(seed, amount=):", "body": "return rgb_to_hex(offset_random_rgb(seed, amount))", "docstring": "Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized\noffset from the seed.\n\n:param seed:\n:param amount:\n:return:", "id": "f9713:m4"} {"signature": "def offset_random_web(seed, amount=):", "body": "return rgb_to_web(offset_random_rgb(seed, amount))", "docstring": "Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized\noffset from the seed.\n\n:param seed:\n:param amount:\n:return:", "id": "f9713:m5"} {"signature": "def color_run(start_color, end_color, step_count, inclusive=True, to_color=True):", "body": "if isinstance(start_color, Color):start_color = start_color.rgbif isinstance(end_color, Color):end_color = end_color.rgbstep = tuple((end_color[i] - start_color[i])/step_count for i in range())add = lambda x, y: tuple(sum(z) for z in zip(x, y))mult = lambda x, y: tuple(y * z for z in x)run = [add(start_color, mult(step, i)) for i in range(, step_count)]if inclusive:run = [start_color] + run + [end_color]return run if not to_color else [Color(c) for c in run]", "docstring": "Given a start color, end color, and a number of steps, returns a list of colors which represent a 'scale' between\nthe start and end color.\n\n:param start_color: The color starting the run\n:param end_color: The color ending the run\n:param step_count: The number of colors to have between the start and end color\n:param inclusive: Flag determining whether to include start and end values in run (default True)\n:param to_color: Flag indicating return values should be Color objects (default True)\n:return: List of colors between the start and end color\n:rtype: list", "id": "f9713:m6"} {"signature": "def text_color(background, dark_color=rgb_min, light_color=rgb_max):", "body": "max_y = rgb_to_yiq(rgb_max)[]return light_color if rgb_to_yiq(background)[] <= max_y / else dark_color", "docstring": "Given a background color in the form of an RGB 3-tuple, returns the color the text should be (defaulting to white\nand black) for best readability. The light (white) and dark (black) defaults can be overridden to return preferred\nvalues.\n\n:param background:\n:param dark_color:\n:param light_color:\n:return:", "id": "f9713:m7"} {"signature": "def minify_hex(_hex):", "body": "size = len(_hex.strip(''))if size == :return _hexelif size == :if _hex[] == _hex[] and _hex[] == _hex[] and _hex[] == _hex[]:return _hex[::]else:return _hexelse:raise ColorException(''.format(size))", "docstring": "Given a HEX value, tries to reduce it from a 6 character hex (e.g. #ffffff) to a 3 character hex (e.g. #fff).\nIf the HEX value is unable to be minified, returns the 6 character HEX representation.\n\n:param _hex:\n:return:", "id": "f9713:m8"} {"signature": "def __init__(self, color=None, **kwargs):", "body": "self.equality_fn = RGB_eqself.arithmetic = ArithmeticModel.LIGHTif isinstance(color, Color):self._color = color._colorelse:self._color = color if color else rgb_minfor k, v in kwargs.items():setattr(self, k, v)", "docstring": "Initialization", "id": "f9713:c2:m0"} {"signature": "def __eq__(self, other):", "body": "if isinstance(other, Color):return self.equality_fn(self, other)return False", "docstring": "Equals", "id": "f9713:c2:m1"} {"signature": "def __ne__(self, other):", "body": "return not self.__eq__(other)", "docstring": "Not Equals", "id": "f9713:c2:m2"} {"signature": "def __add__(self, other):", "body": "if isinstance(other, Color):r1, g1, b1 = self.rgbr2, g2, b2 = other.rgbelif isinstance(other, tuple) and len(other) == :r1, g1, b1 = self.rgbr2, g2, b2 = otherelse:raise TypeError(\"\".format(type(self), type(other)))if self.arithmetic is ArithmeticModel.LIGHT:return Color((min(r1 + r2, rgb_max_val), min(g1 + g2, rgb_max_val), min(b1 + b2, rgb_max_val)))else:return Color(((r1 + r2 // ), (g1 + g2 // ), (b1 + b2 // )))", "docstring": "Addition", "id": "f9713:c2:m3"} {"signature": "def __sub__(self, other):", "body": "if isinstance(other, Color):r1, g1, b1 = self.rgbr2, g2, b2 = other.rgbelif isinstance(other, tuple) and len(other) == :r1, g1, b1 = self.rgbr2, g2, b2 = otherelse:raise TypeError(\"\".format(type(self), type(other)))return Color((max(r1 - r2, rgb_min_val), max(g1 - g2, rgb_min_val), max(b1 - b2, rgb_min_val)))", "docstring": "Subtraction", "id": "f9713:c2:m4"} {"signature": "def __iter__(self):", "body": "return iter(self._color)", "docstring": "Iterator", "id": "f9713:c2:m5"} {"signature": "def __str__(self):", "body": "return \"\".format(self._color)", "docstring": "String representation", "id": "f9713:c2:m6"} {"signature": "def __repr__(self):", "body": "return \"\".format(self._color)", "docstring": "General representation", "id": "f9713:c2:m7"} {"signature": "@propertydef red(self):", "body": "return self._color[]", "docstring": "The red component of the RGB color representation.", "id": "f9713:c2:m8"} {"signature": "@propertydef green(self):", "body": "return self._color[]", "docstring": "The green component of the RGB color representation.", "id": "f9713:c2:m10"} {"signature": "@propertydef blue(self):", "body": "return self._color[]", "docstring": "The blue component of the RGB color representation.", "id": "f9713:c2:m12"} {"signature": "@propertydef rgb(self):", "body": "return self._color", "docstring": "An RGB representation of the color.", "id": "f9713:c2:m14"} {"signature": "@propertydef hex(self):", "body": "return rgb_to_hex(self.rgb)", "docstring": "A 6-char HEX representation of the color, with a prepended octothorpe.", "id": "f9713:c2:m16"} {"signature": "@propertydef shorthex(self):", "body": "return minify_hex(self.hex)", "docstring": "The same as Color.hex, however, HEX values that can be minified to 3-char are returned as such.", "id": "f9713:c2:m18"} {"signature": "@propertydef web(self):", "body": "return rgb_to_web(self.rgb)", "docstring": "A WEB representation of the color.", "id": "f9713:c2:m19"} {"signature": "@propertydef yiq(self):", "body": "return rgb_to_yiq(self.rgb)", "docstring": "A YIQ representation of the color.", "id": "f9713:c2:m21"} {"signature": "@propertydef hsv(self):", "body": "return rgb_to_hsv(self.rgb)", "docstring": "An HSV representation of the color", "id": "f9713:c2:m23"} {"signature": "def rgb_to_hex(rgb):", "body": "r, g, b = rgbreturn \"\".format(hex(int(r))[:].zfill(), hex(int(g))[:].zfill(), hex(int(b))[:].zfill())", "docstring": "Convert an RGB color representation to a HEX color representation.\n\n(r, g, b) :: r -> [0, 255]\n g -> [0, 255]\n b -> [0, 255]\n\n:param rgb: A tuple of three numeric values corresponding to the red, green, and blue value.\n:return: HEX representation of the input RGB value.\n:rtype: str", "id": "f9719:m0"} {"signature": "def rgb_to_web(rgb):", "body": "try:return web_colors[rgb]except KeyError:return rgb_to_hex(rgb)", "docstring": "Convert an RGB color representation to a WEB color representation.\n\n(r, g, b) :: r -> [0, 255]\n g -> [0, 255]\n b -> [0, 255]\n\n:param rgb: A tuple of three numeric values corresponding to the red, green, and blue value.\n:return: WEB representation of the input RGB value.\n:rtype: str", "id": "f9719:m1"} {"signature": "def rgb_to_yiq(rgb):", "body": "r, g, b = rgb[] / , rgb[] / , rgb[] / y = ( * r) + ( * g) + ( * b)i = ( * r) - ( * g) - ( * b)q = ( * r) - ( * g) + ( * b)return round(y, ), round(i, ), round(q, )", "docstring": "Convert an RGB color representation to a YIQ color representation.\n\n(r, g, b) :: r -> [0, 255]\n g -> [0, 255]\n b -> [0, 255]\n\n:param rgb: A tuple of three numeric values corresponding to the red, green, and blue value.\n:return: YIQ representation of the input RGB value.\n:rtype: tuple", "id": "f9719:m2"} {"signature": "def rgb_to_hsv(rgb):", "body": "r, g, b = rgb[] / , rgb[] / , rgb[] / _min = min(r, g, b)_max = max(r, g, b)v = _maxdelta = _max - _minif _max == :return , , vs = delta / _maxif delta == :delta = if r == _max:h = * (((g - b) / delta) % )elif g == _max:h = * (((b - r) / delta) + )else:h = * (((r - g) / delta) + )return round(h, ), round(s, ), round(v, )", "docstring": "Convert an RGB color representation to an HSV color representation.\n\n(r, g, b) :: r -> [0, 255]\n g -> [0, 255]\n b -> [0, 255]\n\n:param rgb: A tuple of three numeric values corresponding to the red, green, and blue value.\n:return: HSV representation of the input RGB value.\n:rtype: tuple", "id": "f9719:m3"} {"signature": "def hex_to_rgb(_hex):", "body": "_hex = _hex.strip('')n = len(_hex) // if len(_hex) == :r = int(_hex[:n] * , )g = int(_hex[n: * n] * , )b = int(_hex[ * n: * n] * , )else:r = int(_hex[:n], )g = int(_hex[n: * n], )b = int(_hex[ * n: * n], )return r, g, b", "docstring": "Convert a HEX color representation to an RGB color representation.\n\nhex :: hex -> [000000, FFFFFF]\n\n:param _hex: The 3- or 6-char hexadecimal string representing the color value.\n:return: RGB representation of the input HEX value.\n:rtype: tuple", "id": "f9719:m4"} {"signature": "def hex_to_web(_hex):", "body": "try:return web_colors[hex_to_rgb(_hex)]except KeyError:return _hex", "docstring": "Convert a HEX color representation to a WEB color representation.\n\nhex :: hex -> [000000, FFFFFF]\n\n:param _hex: The 3- or 6-char hexadecimal string representing the color value.\n:return: WEB representation of the input HEX value.\n:rtype: str", "id": "f9719:m5"} {"signature": "def hex_to_yiq(_hex):", "body": "return rgb_to_yiq(hex_to_rgb(_hex))", "docstring": "Convert a HEX color representation to a YIQ color representation.\n\nhex :: hex -> [000000, FFFFFF]\n\n:param _hex: The 3- or 6-char hexadecimal string representing the color value.\n:return: YIQ representation of the input HEX value.\n:rtype: tuple", "id": "f9719:m6"} {"signature": "def hex_to_hsv(_hex):", "body": "return rgb_to_hsv(hex_to_rgb(_hex))", "docstring": "Convert a HEX color representation to an HSV color representation.\n\nhex :: hex -> [000000, FFFFFF]\n\n:param _hex: The 3- or 6-char hexadecimal string representing the color value.\n:return: HSV representation of the input HEX value.\n:rtype: tuple", "id": "f9719:m7"} {"signature": "def web_to_rgb(web):", "body": "try:return web_colors[web.lower()]except KeyError:return hex_to_rgb(web)", "docstring": "Convert a WEB color representation to an RGB color representation.\n\nweb :: web -> [000000, FFFFFF]\n | in static.web_colors\n\n:param web: The WEB string representation of a color.\n:return: RGB representation of the input WEB value.\n:rtype: tuple", "id": "f9719:m8"} {"signature": "def web_to_hex(web):", "body": "try:return rgb_to_hex(web_colors[web])except KeyError:return web", "docstring": "Convert a WEB color representation to a HEX color representation.\n\nweb :: web -> [000000, FFFFFF]\n | in static.web_colors\n\n:param web: The WEB string representation of a color.\n:return: HEX representation of the input WEB value.\n:rtype: str", "id": "f9719:m9"} {"signature": "def web_to_yiq(web):", "body": "return rgb_to_yiq(web_to_rgb(web))", "docstring": "Convert a WEB color representation to a YIQ color representation.\n\nweb :: web -> [000000, FFFFFF]\n | in static.web_colors\n\n:param web: The WEB string representation of a color.\n:return: YIQ representation of the input WEB value.\n:rtype: tuple", "id": "f9719:m10"} {"signature": "def web_to_hsv(web):", "body": "return rgb_to_hsv(web_to_rgb(web))", "docstring": "Convert a WEB color representation to an HSV color representation.\n\nweb :: web -> [000000, FFFFFF]\n | in static.web_colors\n\n:param web: The WEB string representation of a color.\n:return: HSV representation of the input WEB value.\n:rtype: tuple", "id": "f9719:m11"} {"signature": "def yiq_to_rgb(yiq):", "body": "y, i, q = yiqr = y + ( * i) + ( * q)g = y - ( * i) - ( * q)b = y - ( * i) + ( * q)r = if r > else max(, r)g = if g > else max(, g)b = if b > else max(, b)return round(r * , ), round(g * , ), round(b * , )", "docstring": "Convert a YIQ color representation to an RGB color representation.\n\n(y, i, q) :: y -> [0, 1]\n i -> [-0.5957, 0.5957]\n q -> [-0.5226, 0.5226]\n\n:param yiq: A tuple of three numeric values corresponding to the luma and chrominance.\n:return: RGB representation of the input YIQ value.\n:rtype: tuple", "id": "f9719:m12"} {"signature": "def yiq_to_hex(yiq):", "body": "return rgb_to_hex(yiq_to_rgb(yiq))", "docstring": "Convert a YIQ color representation to a HEX color representation.\n\n(y, i, q) :: y -> [0, 1]\n i -> [-0.5957, 0.5957]\n q -> [-0.5226, 0.5226]\n\n:param yiq: A tuple of three numeric values corresponding to the luma and chrominance.\n:return: HEX representation of the input YIQ value.\n:rtype: str", "id": "f9719:m13"} {"signature": "def yiq_to_web(yiq):", "body": "return rgb_to_web(yiq_to_rgb(yiq))", "docstring": "Convert a YIQ color representation to a WEB color representation.\n\n(y, i, q) :: y -> [0, 1]\n i -> [-0.5957, 0.5957]\n q -> [-0.5226, 0.5226]\n\n:param yiq: A tuple of three numeric values corresponding to the luma and chrominance.\n:return: WEB representation of the input YIQ value.\n:rtype: str", "id": "f9719:m14"} {"signature": "def yiq_to_hsv(yiq):", "body": "return rgb_to_hsv(yiq_to_rgb(yiq))", "docstring": "Convert a YIQ color representation to an HSV color representation.\n\n(y, i, q) :: y -> [0, 1]\n i -> [-0.5957, 0.5957]\n q -> [-0.5226, 0.5226]\n\n:param yiq: A tuple of three numeric values corresponding to the luma and chrominance.\n:return: HSV representation of the input YIQ value.\n:rtype: tuple", "id": "f9719:m15"} {"signature": "def hsv_to_rgb(hsv):", "body": "h, s, v = hsvc = v * sh /= x = c * ( - abs((h % ) - ))m = v - cif h < :res = (c, x, )elif h < :res = (x, c, )elif h < :res = (, c, x)elif h < :res = (, x, c)elif h < :res = (x, , c)elif h < :res = (c, , x)else:raise ColorException(\"\")r, g, b = resreturn round((r + m)*, ), round((g + m)*, ), round((b + m)*, )", "docstring": "Convert an HSV color representation to an RGB color representation.\n\n(h, s, v) :: h -> [0, 360)\n s -> [0, 1]\n v -> [0, 1]\n\n:param hsv: A tuple of three numeric values corresponding to the hue, saturation, and value.\n:return: RGB representation of the input HSV value.\n:rtype: tuple", "id": "f9719:m16"} {"signature": "def hsv_to_hex(hsv):", "body": "return rgb_to_hex(hsv_to_rgb(hsv))", "docstring": "Convert an HSV color representation to a HEX color representation.\n\n(h, s, v) :: h -> [0, 360)\n s -> [0, 1]\n v -> [0, 1]\n\n:param hsv: A tuple of three numeric values corresponding to the hue, saturation, and value.\n:return: HEX representation of the input HSV value.\n:rtype: str", "id": "f9719:m17"} {"signature": "def hsv_to_web(hsv):", "body": "return rgb_to_web(hsv_to_rgb(hsv))", "docstring": "Convert an HSV color representation to a WEB color representation.\n\n(h, s, v) :: h -> [0, 360)\n s -> [0, 1]\n v -> [0, 1]\n\n:param hsv: A tuple of three numeric values corresponding to the hue, saturation, and value.\n:return: WEB representation of the input HSV value.\n:rtype: str", "id": "f9719:m18"} {"signature": "def hsv_to_yiq(hsv):", "body": "return rgb_to_yiq(hsv_to_rgb(hsv))", "docstring": "Convert an HSV color representation to a YIQ color representation.\n\n(h, s, v) :: h -> [0, 360)\n s -> [0, 1]\n v -> [0, 1]\n\n:param hsv: A tuple of three numeric values corresponding to the hue, saturation, and value.\n:return: YIQ representation of the input HSV value.\n:rtype: tuple", "id": "f9719:m19"} {"signature": "def __init__(self, chain_order=, vk_object=None, *file_paths):", "body": "if chain_order < :raise MarkovTextExcept(\"\".format(chain_order))self.chain_order = chain_orderself.tokens_array = ()self.base_dict = {}self.start_arrays = ()self.vk_object = vk_objectself.vocabulars = {}self.temp_folder = abspath(os_join(expanduser(\"\"), \"\"))if not isdir(self.temp_folder):makedirs(self.temp_folder)for _path in frozenset(filter(isfile, map(abspath, file_paths))):self.update(_path)", "docstring": ":chain_order: \u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e \u0437\u0432\u0435\u043d\u044c\u0435\u0432 \u0446\u0435\u043f\u0438, \u0434\u043b\u044f \u043f\u0440\u0438\u043d\u044f\u0442\u0438\u044f \u0440\u0435\u0448\u0435\u043d\u0438\u044f \u043e \u0441\u043b\u0435\u0434\u0443\u044e\u0449\u0435\u043c.\n:vk_object: \u041e\u0431\u044a\u0435\u043a\u0442 \u043a\u043b\u0430\u0441\u0441\u0430 \u0412\u043b\u0430\u0434\u044f-\u0431\u043e\u0442\u0430, \u0434\u043b\u044f \u0438\u043d\u0442\u0435\u0433\u0440\u0430\u0446\u0438\u0438. \u041d\u0435 \u043e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u0435\u043d.\n:file_paths: \u041f\u0443\u0442\u0438 \u043a \u0442\u0435\u043a\u0441\u0442\u043e\u0432\u044b\u043c \u0444\u0430\u0439\u043b\u0430\u043c, \u0434\u043b\u044f \u043e\u0431\u0443\u0447\u0435\u043d\u0438\u044f \u043c\u043e\u0434\u0435\u043b\u0438.", "id": "f9726:c1:m0"} {"signature": "def token_is_correct(self, token):", "body": "if self.is_rus_word(token):return Trueelif self.ONLY_MARKS.search(token):return Trueelif self.END_TOKENS.search(token):return Trueelif token in \"\":return Truereturn False", "docstring": "\u041f\u043e\u0434\u0445\u043e\u0434\u0438\u0442 \u043b\u0438 \u0442\u043e\u043a\u0435\u043d, \u0434\u043b\u044f \u0433\u0435\u043d\u0435\u0440\u0430\u0446\u0438\u0438 \u0442\u0435\u043a\u0441\u0442\u0430.\n\u0414\u043e\u043f\u0443\u0441\u043a\u0430\u044e\u0442\u0441\u044f \u0440\u0443\u0441\u0441\u043a\u0438\u0435 \u0441\u043b\u043e\u0432\u0430, \u0437\u043d\u0430\u043a\u0438 \u043f\u0440\u0435\u043f\u0438\u043d\u0430\u043d\u0438\u044f \u0438 \u0441\u0438\u043c\u0432\u043e\u043b\u044b \u043d\u0430\u0447\u0430\u043b\u0430 \u0438 \u043a\u043e\u043d\u0446\u0430.", "id": "f9726:c1:m2"} {"signature": "def get_optimal_variant(self, variants, start_words, **kwargs):", "body": "if not start_words:return (choice(variants), {})_variants = []_weights = []for tok in frozenset(variants):if not self.token_is_correct(tok):continueweight = variants.count(tok)for word in start_words:for token in self.ONLY_WORDS.finditer(word.strip().lower()):if token.group() == tok:weight <<= _variants.append(tok)_weights.append(weight)if not _variants:return (choice(variants), {})return (choices(_variants, weights=_weights, k=)[], {})", "docstring": "\u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442 \u043e\u043f\u0442\u0438\u043c\u0430\u043b\u044c\u043d\u044b\u0439 \u0432\u0430\u0440\u0438\u0430\u043d\u0442, \u0438\u0437 \u0432\u044b\u0431\u043e\u0440\u043a\u0438.", "id": "f9726:c1:m4"} {"signature": "def start_generation(self, *start_words, **kwargs):", "body": "out_text = \"\"_need_capialize = Truefor token in self._get_generate_tokens(*start_words, **kwargs):if token in \"\":_need_capialize = Truecontinueif self.ONLY_WORDS.search(token):out_text += \"\"if _need_capialize:_need_capialize = Falsetoken = token.title()out_text += tokenreturn out_text.strip()", "docstring": "\u0413\u0435\u043d\u0435\u0440\u0438\u0440\u0443\u0435\u0442 \u043f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u0435.\n:start_words: \u041f\u043e\u043f\u044b\u0442\u0430\u0442\u044c\u0441\u044f \u043d\u0430\u0447\u0430\u0442\u044c \u043f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u0435 \u0441 \u044d\u0442\u0438\u0445 \u0441\u043b\u043e\u0432.", "id": "f9726:c1:m6"} {"signature": "def get_start_array(self, *start_words, **kwargs):", "body": "if not self.start_arrays:raise MarkovTextExcept(\"\")if not start_words:return choice(self.start_arrays)_variants = []_weights = []for tokens in self.start_arrays:weight = for word in start_words:word = word.strip().lower()for token in self.ONLY_WORDS.finditer(word):if token.group() in tokens:weight <<= if weight > :_variants.append(tokens)_weights.append(weight)if not _variants:return choice(self.start_arrays)return choices(_variants, weights=_weights, k=)[]", "docstring": "\u0413\u0435\u043d\u0435\u0440\u0438\u0440\u0443\u0435\u0442 \u043d\u0430\u0447\u0430\u043b\u043e \u043f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u044f.\n:start_words: \u041f\u043e\u043f\u044b\u0442\u0430\u0442\u044c\u0441\u044f \u043d\u0430\u0447\u0430\u0442\u044c \u043f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u0435 \u0441 \u044d\u0442\u0438\u0445 \u0441\u043b\u043e\u0432.", "id": "f9726:c1:m7"} {"signature": "def create_base(self):", "body": "self.base_dict = {}_start_arrays = []for tokens, word in self.chain_generator():self.base_dict.setdefault(tokens, []).append(word)if tokens[] == \"\": _start_arrays.append(tokens)self.start_arrays = tuple(frozenset(self.get_corrected_arrays(_start_arrays)))", "docstring": "\u041c\u0435\u0442\u043e\u0434 \u0441\u043e\u0437\u0434\u0430\u0451\u0442 \u0431\u0430\u0437\u043e\u0432\u044b\u0439 \u0441\u043b\u043e\u0432\u0430\u0440\u044c, \u043d\u0430 \u043e\u0441\u043d\u043e\u0432\u0435 \u043c\u0430\u0441\u0441\u0438\u0432\u0430 \u0442\u043e\u043a\u0435\u043d\u043e\u0432.\n\u0412\u044b\u0437\u044b\u0432\u0430\u0435\u0442\u0441\u044f \u0438\u0437 \u043c\u0435\u0442\u043e\u0434\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f.", "id": "f9726:c1:m8"} {"signature": "def chain_generator(self):", "body": "n_chain = self.chain_orderif n_chain < :raise MarkovTextExcept(\"\".format(n_chain))n_chain += changing_array = deque(maxlen=n_chain)for token in self.tokens_array:changing_array.append(token)if len(changing_array) < n_chain:continue yield (tuple(changing_array)[:-], changing_array[-])", "docstring": "\u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442 \u0433\u0435\u043d\u0435\u0440\u0430\u0442\u043e\u0440, \u0444\u043e\u0440\u043c\u0430\u0442\u0430:\n ((\"\u0442\u043e\u043a\u0435\u043d\", ...), \"\u0432\u0430\u0440\u0438\u0430\u043d\u0442\")\n \u0413\u0434\u0435 \u043a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e \u0442\u043e\u043a\u0435\u043d\u043e\u0432 \u043e\u043f\u0440\u0435\u0434\u0435\u043b\u044f\u0435\u0442 \u043f\u0435\u0440\u0435\u043c\u0435\u043d\u043d\u0430\u044f \u043e\u0431\u044a\u0435\u043a\u0442\u0430 chain_order.", "id": "f9726:c1:m9"} {"signature": "def set_vocabulary(self, peer_id, from_dialogue=None, update=False):", "body": "self.tokens_array = self.get_vocabulary(peer_id,from_dialogue,update)self.create_base()", "docstring": "\u041f\u043e\u043b\u0443\u0447\u0430\u0435\u0442 \u0432\u043e\u043a\u0430\u0431\u0443\u043b\u0430\u0440 \u0438\u0437 \u0444\u0443\u043d\u043a\u0446\u0438\u0438 get_vocabulary \u0438 \u0434\u0435\u043b\u0430\u0435\u0442 \u0435\u0433\u043e \u0430\u043a\u0442\u0438\u0432\u043d\u044b\u043c.", "id": "f9726:c1:m10"} {"signature": "def create_dump(self, name=None):", "body": "name = name or \"\"backup_dump_file = os_join(self.temp_folder,\"\".format(name))dump_file = os_join(self.temp_folder,\"\".format(name))with open(backup_dump_file, \"\", encoding=\"\") as js_file:json.dump(self.tokens_array, js_file, ensure_ascii=False)copy2(backup_dump_file, dump_file)remove(backup_dump_file)", "docstring": "\u0421\u043e\u0445\u0440\u0430\u043d\u044f\u0435\u0442 \u0442\u0435\u043a\u0443\u0449\u0443\u044e \u0431\u0430\u0437\u0443 \u043d\u0430 \u0436\u0451\u0441\u0442\u043a\u0438\u0439 \u0434\u0438\u0441\u043a.\n:name: \u0418\u043c\u044f \u0444\u0430\u0439\u043b\u0430, \u0431\u0435\u0437 \u0440\u0430\u0441\u0448\u0438\u0440\u0435\u043d\u0438\u044f.", "id": "f9726:c1:m11"} {"signature": "def load_dump(self, name=None):", "body": "name = name or \"\"dump_file = os_join(self.temp_folder,\"\".format(name))if not isfile(dump_file):raise MarkovTextExcept(\"\".format(dump_file))with open(dump_file, \"\") as js_file:self.tokens_array = tuple(json.load(js_file))self.create_base()", "docstring": "\u0417\u0430\u0433\u0440\u0443\u0436\u0430\u0435\u0442 \u0431\u0430\u0437\u0443 \u0441 \u0436\u0451\u0441\u0442\u043a\u043e\u0433\u043e \u0434\u0438\u0441\u043a\u0430.\n\u0422\u0435\u043a\u0443\u0449\u0430\u044f \u0431\u0430\u0437\u0430 \u0437\u0430\u043c\u0435\u043d\u044f\u0435\u0442\u0441\u044f.\n:name: \u0418\u043c\u044f \u0444\u0430\u0439\u043b\u0430, \u0431\u0435\u0437 \u0440\u0430\u0441\u0448\u0438\u0440\u0435\u043d\u0438\u044f.", "id": "f9726:c1:m12"} {"signature": "def get_vocabulary(self, target, user=None, update=False):", "body": "if not self.vk_object:raise MarkovTextExcept(\"\")json_name = \"\".format(target.__class__.__name__,target.id,user.id)json_file = os_join(self.temp_folder, \"\".format(json_name))if not update:result = self.vocabulars.get(json_name, None)if result:return resultelif isfile(json_file):with open(json_file, \"\") as js_file:result = self.vocabulars[json_name] = tuple(json.load(js_file))return result_tokens_array = tuple(self.__parse_from_vk_dialogue(target, user))backup_file = \"\".format(splitext(json_file)[])with open(backup_file, \"\", encoding=\"\") as js_file:json.dump(_tokens_array, js_file, ensure_ascii=False)copy2(backup_file, json_file)remove(backup_file)self.vocabulars[json_name] = _tokens_arrayreturn _tokens_array", "docstring": "\u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442 \u0437\u0430\u043f\u0430\u0441 \u0441\u043b\u043e\u0432, \u043d\u0430 \u043e\u0441\u043d\u043e\u0432\u0435 \u043f\u0435\u0440\u0435\u043f\u0438\u0441\u043e\u043a \u0412\u041a.\n\u0414\u043b\u044f \u0438\u043c\u0438\u0442\u0430\u0446\u0438\u0438 \u0440\u0435\u0447\u0438 \u043a\u043e\u043d\u043a\u0440\u0435\u0442\u043d\u043e\u0433\u043e \u0447\u0435\u043b\u043e\u0432\u0435\u043a\u0430.\n\u0420\u0430\u0431\u043e\u0442\u0430\u0435\u0442 \u0442\u043e\u043b\u044c\u043a\u043e \u0441 \u0438\u043c\u043f\u043e\u0440\u0442\u043e\u043c \u043e\u0431\u044a\u0435\u043a\u0442\u0430 \"\u0412\u043b\u0430\u0434\u044f-\u0431\u043e\u0442\u0430\".\n\n:target:\n \u041e\u0431\u044a\u0435\u043a\u0442 \u0441\u043e\u0431\u0441\u0435\u0434\u043d\u0438\u043a\u0430. \u0418\u0441\u0442\u043e\u0447\u043d\u0438\u043a \u043f\u0435\u0440\u0435\u043f\u0438\u0441\u043a\u0438.\n:user:\n \u041e\u0431\u044a\u0435\u043a\u0442 \u044e\u0437\u0435\u0440\u0430, \u0447\u044c\u044e \u0440\u0435\u0447\u044c \u0438\u043c\u0438\u0442\u0438\u0440\u0443\u0435\u043c.\n \u0415\u0441\u043b\u0438 None, \u0442\u043e \u0432\u0441\u044f \u043f\u0435\u0440\u0435\u043f\u0438\u0441\u043a\u0430.\n:update:\n \u041d\u0435 \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u044c \u0431\u044d\u043a\u0430\u043f. \u041e\u0431\u043d\u043e\u0432\u0438\u0442\u044c \u0444\u043e\u0440\u0441\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u043e.", "id": "f9726:c1:m13"} {"signature": "def update(self, data, fromfile=True):", "body": "func = (self._parse_from_file if fromfile else self._parse_from_text)new_data = tuple(func(data))if new_data:self.tokens_array += new_dataself.create_base()", "docstring": "\u041f\u0440\u0438\u043d\u0438\u043c\u0430\u0435\u0442 \u0442\u0435\u043a\u0441\u0442, \u0438\u043b\u0438 \u043f\u0443\u0442\u044c \u043a \u0444\u0430\u0439\u043b\u0443 \u0438 \u043e\u0431\u043d\u043e\u0432\u043b\u044f\u0435\u0442 \u0441\u0443\u0449\u0435\u0441\u0442\u0432\u0443\u044e\u0449\u0443\u044e \u0431\u0430\u0437\u0443.", "id": "f9726:c1:m14"} {"signature": "def _parse_from_text(self, text):", "body": "if not isinstance(text, str):raise MarkovTextExcept(\"\")text = text.strip().lower()need_start_token = Truetoken = \"\" for token in self.WORD_OR_MARKS.finditer(text):token = token.group()if need_start_token:need_start_token = Falseyield \"\"yield tokenif self.END_TOKENS.search(token):need_start_token = Trueyield \"\"if token != \"\":yield \"\"", "docstring": "\u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442 \u0433\u0435\u043d\u0435\u0440\u0430\u0442\u043e\u0440 \u0442\u043e\u043a\u0435\u043d\u043e\u0432, \u0438\u0437 \u0442\u0435\u043a\u0441\u0442\u0430.", "id": "f9726:c1:m16"} {"signature": "def _parse_from_file(self, file_path):", "body": "file_path = abspath(file_path)if not isfile(file_path):raise MarkovTextExcept(\"\")with open(file_path, \"\") as txt_file:for line in txt_file:text = line.decode(\"\", \"\").strip()if not text:continueyield from self._parse_from_text(text)", "docstring": "\u0441\u043c. \u043e\u043f\u0438\u0441\u0430\u043d\u0438\u0435 _parse_from_text.\n\u0422\u043e\u043b\u044c\u043a\u043e \u043d\u0430 \u0432\u0445\u043e\u0434 \u043f\u043e\u0434\u0430\u0451\u0442\u0441\u044f \u043d\u0435 \u0442\u0435\u043a\u0441\u0442, \u0430 \u043f\u0443\u0442\u044c \u043a \u0444\u0430\u0439\u043b\u0443.", "id": "f9726:c1:m17"} {"signature": "def lines_hash(lines):", "body": "x = xxh32()for i in lines:x.update(i.encode())return x.digest()", "docstring": "Creates a unique binary id for the given lines\nArgs:\n lines (list): List of strings that should be collectively hashed\nReturns:\n bytearray: Binary hash", "id": "f9728:m0"} {"signature": "def tokenize(sentence):", "body": "tokens = []class Vars:start_pos = -last_type = ''def update(c, i):if c.isalpha() or c in '':t = ''elif c.isdigit() or c == '':t = ''elif c.isspace():t = ''else:t = ''if t != Vars.last_type or t == '':if Vars.start_pos >= :token = sentence[Vars.start_pos:i].lower()if token not in '':tokens.append(token)Vars.start_pos = - if t == '' else iVars.last_type = tfor i, char in enumerate(sentence):update(char, i)update('', len(sentence))return tokens", "docstring": "Converts a single sentence into a list of individual significant units\nArgs:\n sentence (str): Input string ie. 'This is a sentence.'\nReturns:\n list: List of tokens ie. ['this', 'is', 'a', 'sentence']", "id": "f9728:m1"} {"signature": "def expand_parentheses(sent):", "body": "return SentenceTreeParser(sent).expand_parentheses()", "docstring": "['1', '(', '2', '|', '3, ')'] -> [['1', '2'], ['1', '3']]\nFor example:\n\nWill it (rain|pour) (today|tomorrow|)?\n\n---->\n\nWill it rain today?\nWill it rain tomorrow?\nWill it rain?\nWill it pour today?\nWill it pour tomorrow?\nWill it pour?\n\nArgs:\n sent (list): List of tokens in sentence\nReturns:\n list>: Multiple possible sentences from original", "id": "f9728:m2"} {"signature": "def resolve_conflicts(inputs, outputs):", "body": "data = {}for inp, out in zip(inputs, outputs):tup = tuple(inp)if tup in data:data[tup].append(out)else:data[tup] = [out]inputs, outputs = [], []for inp, outs in data.items():inputs.append(list(inp))combined = [] * len(outs[])for i in range(len(combined)):combined[i] = max(j[i] for j in outs)outputs.append(combined)return inputs, outputs", "docstring": "Checks for duplicate inputs and if there are any,\nremove one and set the output to the max of the two outputs\nArgs:\n inputs (list>): Array of input vectors\n outputs (list>): Array of output vectors\nReturns:\n tuple: The modified inputs and outputs", "id": "f9728:m4"} {"signature": "@staticmethoddef wrap_name(name):", "body": "if '' in name:parts = name.split('')intent_name, ent_name = parts[], parts[:]return intent_name + '' + ''.join(ent_name) + ''else:return '' + name + ''", "docstring": "Wraps SkillName:entity into SkillName:{entity}", "id": "f9731:c0:m2"} {"signature": "def __init__(self, tree):", "body": "self._tree = tree", "docstring": "Construct a sentence tree fragment which is merely a wrapper for\na list of Strings\n\nArgs:\n tree (?): Base tree for the sentence fragment, type depends on\n subclass, refer to those subclasses", "id": "f9733:c0:m0"} {"signature": "def tree(self):", "body": "return self._tree", "docstring": "Return the represented sentence tree as raw data.", "id": "f9733:c0:m1"} {"signature": "def expand(self):", "body": "return [[]]", "docstring": "Expanded version of the fragment. In this case an empty sentence.\n\nReturns:\n List>: A list with an empty sentence (= token/string list)", "id": "f9733:c0:m2"} {"signature": "def expand(self):", "body": "return [[self._tree]]", "docstring": "Creates one sentence that contains exactly that word.\n\nReturns:\n List>: A list with the given string as sentence\n (= token/string list)", "id": "f9733:c1:m0"} {"signature": "def expand(self):", "body": "old_expanded = [[]]for sub in self._tree:sub_expanded = sub.expand()new_expanded = []while len(old_expanded) > :sentence = old_expanded.pop()for new in sub_expanded:new_expanded.append(sentence + new)old_expanded = new_expandedreturn old_expanded", "docstring": "Creates a combination of all sub-sentences.\n\nReturns:\n List>: A list with all subsentence expansions combined in\n every possible way", "id": "f9733:c2:m0"} {"signature": "def expand(self):", "body": "options = []for option in self._tree:options.extend(option.expand())return options", "docstring": "Returns all of its options as seperated sub-sentences.\n\nReturns:\n List>: A list containing the sentences created by all\n expansions of its sub-sentences", "id": "f9733:c3:m0"} {"signature": "def _parse(self):", "body": "self._current_position = return self._parse_expr()", "docstring": "Generate sentence token trees\n['1', '(', '2', '|', '3, ')'] -> ['1', ['2', '3']]", "id": "f9733:c4:m1"} {"signature": "def _parse_expr(self):", "body": "sentence_list = []cur_sentence = []sentence_list.append(Sentence(cur_sentence))while self._current_position < len(self.tokens):cur = self.tokens[self._current_position]self._current_position += if cur == '':subexpr = self._parse_expr()normal_brackets = Falseif len(subexpr.tree()) == :normal_brackets = Truecur_sentence.append(Word(''))cur_sentence.append(subexpr)if normal_brackets:cur_sentence.append(Word(''))elif cur == '':cur_sentence = []sentence_list.append(Sentence(cur_sentence))elif cur == '':breakelse:cur_sentence.append(Word(cur))return Options(sentence_list)", "docstring": "Generate sentence token trees from the current position to\nthe next closing parentheses / end of the list and return it\n['1', '(', '2', '|', '3, ')'] -> ['1', [['2'], ['3']]]\n['2', '|', '3'] -> [['2'], ['3']]", "id": "f9733:c4:m2"} {"signature": "def _expand_tree(self, tree):", "body": "return tree.expand()", "docstring": "Expand a list of sub sentences to all combinated sentences.\n['1', ['2', '3']] -> [['1', '2'], ['1', '3']]", "id": "f9733:c4:m3"} {"signature": "@_save_argsdef add_intent(self, name, lines, reload_cache=False):", "body": "self.intents.add(name, lines, reload_cache)self.padaos.add_intent(name, lines)self.must_train = True", "docstring": "Creates a new intent, optionally checking the cache first\n\nArgs:\n name (str): The associated name of the intent\n lines (list): All the sentences that should activate the intent\n reload_cache: Whether to ignore cached intent if exists", "id": "f9734:c0:m2"} {"signature": "@_save_argsdef add_entity(self, name, lines, reload_cache=False):", "body": "Entity.verify_name(name)self.entities.add(Entity.wrap_name(name), lines, reload_cache)self.padaos.add_entity(name, lines)self.must_train = True", "docstring": "Adds an entity that matches the given lines.\n\nExample:\n self.add_intent('weather', ['will it rain on {weekday}?'])\n self.add_entity('{weekday}', ['monday', 'tuesday', 'wednesday']) # ...\n\nArgs:\n name (str): The name of the entity\n lines (list): Lines of example extracted entities\n reload_cache (bool): Whether to refresh all of cache", "id": "f9734:c0:m3"} {"signature": "@_save_argsdef load_entity(self, name, file_name, reload_cache=False):", "body": "Entity.verify_name(name)self.entities.load(Entity.wrap_name(name), file_name, reload_cache)with open(file_name) as f:self.padaos.add_entity(name, f.read().split(''))self.must_train = True", "docstring": "Loads an entity, optionally checking the cache first\n\nArgs:\n name (str): The associated name of the entity\n file_name (str): The location of the entity file\n reload_cache (bool): Whether to refresh all of cache", "id": "f9734:c0:m4"} {"signature": "@_save_argsdef load_file(self, *args, **kwargs):", "body": "self.load_intent(*args, **kwargs)", "docstring": "Legacy. Use load_intent instead", "id": "f9734:c0:m5"} {"signature": "@_save_argsdef load_intent(self, name, file_name, reload_cache=False):", "body": "self.intents.load(name, file_name, reload_cache)with open(file_name) as f:self.padaos.add_intent(name, f.read().split(''))self.must_train = True", "docstring": "Loads an intent, optionally checking the cache first\n\nArgs:\n name (str): The associated name of the intent\n file_name (str): The location of the intent file\n reload_cache (bool): Whether to refresh all of cache", "id": "f9734:c0:m6"} {"signature": "@_save_argsdef remove_intent(self, name):", "body": "self.intents.remove(name)self.padaos.remove_intent(name)self.must_train = True", "docstring": "Unload an intent", "id": "f9734:c0:m7"} {"signature": "@_save_argsdef remove_entity(self, name):", "body": "self.entities.remove(name)self.padaos.remove_entity(name)", "docstring": "Unload an entity", "id": "f9734:c0:m8"} {"signature": "def train(self, debug=True, force=False, single_thread=False, timeout=):", "body": "if not self.must_train and not force:returnself.padaos.compile()self.train_thread = Thread(target=self._train, kwargs=dict(debug=debug,single_thread=single_thread,timeout=timeout), daemon=True)self.train_thread.start()self.train_thread.join(timeout)self.must_train = Falsereturn not self.train_thread.is_alive()", "docstring": "Trains all the loaded intents that need to be updated\nIf a cache file exists with the same hash as the intent file,\nthe intent will not be trained and just loaded from file\n\nArgs:\n debug (bool): Whether to print a message to stdout each time a new intent is trained\n force (bool): Whether to force training if already finished\n single_thread (bool): Whether to force running in a single thread\n timeout (float): Seconds before cancelling training\nReturns:\n bool: True if training succeeded without timeout", "id": "f9734:c0:m10"} {"signature": "def train_subprocess(self, *args, **kwargs):", "body": "ret = call([sys.executable, '', '', '', self.cache_dir,'', json.dumps(self.serialized_args),'', json.dumps(args),'', json.dumps(kwargs),])if ret == :raise TypeError(''.format(args, kwargs))data = self.serialized_argsself.clear()self.apply_training_args(data)self.padaos.compile()if ret == :self.must_train = Falsereturn Trueelif ret == : return Falseelse:raise ValueError(''.format(ret))", "docstring": "Trains in a subprocess which provides a timeout guarantees everything shuts down properly\n\nArgs:\n See \nReturns:\n bool: True for success, False if timed out", "id": "f9734:c0:m11"} {"signature": "def calc_intents(self, query):", "body": "if self.must_train:self.train()intents = {} if self.train_thread and self.train_thread.is_alive() else {i.name: i for i in self.intents.calc_intents(query, self.entities)}sent = tokenize(query)for perfect_match in self.padaos.calc_intents(query):name = perfect_match['']intents[name] = MatchData(name, sent, matches=perfect_match[''], conf=)return list(intents.values())", "docstring": "Tests all the intents against the query and returns\ndata on how well each one matched against the query\n\nArgs:\n query (str): Input sentence to test against intents\nReturns:\n list: List of intent matches\nSee calc_intent() for a description of the returned MatchData", "id": "f9734:c0:m12"} {"signature": "def calc_intent(self, query):", "body": "matches = self.calc_intents(query)if len(matches) == :return MatchData('', '')best_match = max(matches, key=lambda x: x.conf)best_matches = (match for match in matches if match.conf == best_match.conf)return min(best_matches, key=lambda x: sum(map(len, x.matches.values())))", "docstring": "Tests all the intents against the query and returns\nmatch data of the best intent\n\nArgs:\n query (str): Input sentence to test against intents\nReturns:\n MatchData: Best intent match", "id": "f9734:c0:m13"} {"signature": "def _train_and_save(obj, cache, data, print_updates):", "body": "obj.train(data)if print_updates:print('' + obj.name + '')obj.save(cache)", "docstring": "Internal pickleable function used to train objects in another process", "id": "f9740:m0"} {"signature": "def randomseed(func):", "body": "def wrapper(*args, **kwargs):np.random.seed()return func(*args, **kwargs)return wrapper", "docstring": "Sets the seed of numpy's random number generator", "id": "f9757:m0"} {"signature": "def generate_sparse_system(n=, m=, p=, eta=, seed=):", "body": "global x_true, x_obs, Anp.random.seed(seed)x_true = * np.random.randn(n) * (np.random.rand(n) < p)A = np.random.randn(m, n)y = A.dot(x_true)xls = np.linalg.lstsq(A, y)[]ls_error = np.linalg.norm(xls - x_true, )return A, y, x_true, xls, ls_error", "docstring": "Generate a sparse, noisy system (Ax = y)\n\n Parameters\n ----------\n n : int\n Number of variables\n\n m : int\n Number of observations\n\n p : float\n Probability of non-zero variable\n\n eta : float\n Noise level (standard deviation of additive Gaussian noise)\n\n seed : int\n Random seed\n\n Returns\n -------\n A : array_like\n Sensing matrix (m x n)\n\n y : array_like\n Observations (m,)\n\n x_true : array_like\n True sparse signal (n,)\n\n xls : array_like\n Least squares solution (n,)\n\n ls_error : float\n Error (2-norm) of the least squares solution", "id": "f9758:m0"} {"signature": "def generate_lowrank_matrix(n=, m=, k=, eta=, seed=):", "body": "print(\"\")global Xtrue, Xobsnp.random.seed(seed)Xtrue = np.sin(np.linspace(, * np.pi, n)).reshape(-, ).dot(np.cos(np.linspace(, * np.pi, m)).reshape(, -))Xobs = Xtrue + eta * np.random.randn(n, m)return Xobs, Xtrue", "docstring": "Generate an n-by-m noisy low-rank matrix", "id": "f9760:m0"} {"signature": "def gradient_optimizer(coro):", "body": "class GradientOptimizer(Optimizer):@wraps(coro)def __init__(self, *args, **kwargs):self.algorithm = coro(*args, **kwargs)self.algorithm.send(None)self.operators = []def set_transform(self, func):self.transform = compose(destruct, func, self.restruct)def minimize(self, f_df, x0, display=sys.stdout, maxiter=):self.display = displayself.theta = x0xk = self.algorithm.send(destruct(x0).copy())store = defaultdict(list)runtimes = []if len(self.operators) == :self.operators = [proxops.identity()]obj, grad = wrap(f_df, x0)transform = compose(destruct, *reversed(self.operators), self.restruct)self.optional_print(tp.header(['', '', '', '']))try:for k in count():tstart = perf_counter()f = obj(xk)df = grad(xk)xk = transform(self.algorithm.send(df))runtimes.append(perf_counter() - tstart)store[''].append(f)self.optional_print(tp.row([k,f,np.linalg.norm(destruct(df)),tp.humantime(runtimes[-])]))if k >= maxiter:breakexcept KeyboardInterrupt:passself.optional_print(tp.bottom())self.optional_print(u''.format(store[''][-]))self.optional_print(u''.format(tp.humantime(sum(runtimes))))self.optional_print(u''.format(tp.humantime(np.mean(runtimes)),tp.humantime(np.std(runtimes)),))return OptimizeResult({'': self.restruct(xk),'': f,'': self.restruct(df),'': k,'': np.array(store['']),})return GradientOptimizer", "docstring": "Turns a coroutine into a gradient based optimizer.", "id": "f9764:m0"} {"signature": "def add(self, operator, *args):", "body": "if isinstance(operator, str):op = getattr(proxops, operator)(*args)elif isinstance(operator, proxops.ProximalOperatorBaseClass):op = operatorelse:raise ValueError(\"\")self.operators.append(op)return self", "docstring": "Adds a proximal operator to the list of operators", "id": "f9764:c0:m2"} {"signature": "def __init__(self, tau=(, , ), tol=(, )):", "body": "self.operators = []self.tau = namedtuple('', ('', '', ''))(*tau)self.tol = namedtuple('', ('', ''))(*tol)", "docstring": "Proximal Consensus (ADMM)\n\nParameters\n----------\ntau : (float, float, float)\n ADMM scheduling. The augmented Lagrangian quadratic penalty parameter,\n rho, is initialized to tau[0]. Depending on the primal and dual residuals,\n the parameter is increased by a factor of tau[1] or decreased by a factor\n of tau[2] at every iteration. (See Boyd et. al. 2011 for details)\n\ntol : (float, float)\n Primal and Dual residual tolerances", "id": "f9764:c1:m0"} {"signature": "@gradient_optimizerdef sgd(lr=, mom=):", "body": "xk = yieldvk = np.zeros_like(xk)for k in count():grad = yield xkvk = mom * vk - lr * gradxk += vk", "docstring": "Stochastic gradient descent (SGD)\n\nParameters\n----------\nlr : float, optional\n Learning rate (Default: 1e-3)\n\nmom : float, optional\n Momentum (Default: 0.0)", "id": "f9765:m0"} {"signature": "@gradient_optimizerdef nag(lr=):", "body": "xk = yieldyk = xk.copy()for k in count():grad = yield ykxprev = xk.copy()xk = yk - lr * gradyk = xk + (k / (k + )) * (xk - xprev)", "docstring": "Nesterov's accelerated gradient (NAG)\n\nParameters\n----------\nlr : float, optional\n Learning rate (Default: 1e-3)", "id": "f9765:m1"} {"signature": "@gradient_optimizerdef rmsprop(lr=, damping=, decay=):", "body": "xk = yieldrms = np.zeros_like(xk)for k in count():grad = yield xkrms *= decayrms += ( - decay) * grad**xk -= lr * grad / (damping + np.sqrt(rms))", "docstring": "RMSProp\n\nParameters\n----------\nlr : float, optional\n Learning rate (Default: 1e-3)\n\ndamping : float, optional\n Damping term (Default: 1e-12)\n\ndecay : float, optional\n Decay of the learning rate (Default: 0)", "id": "f9765:m2"} {"signature": "@gradient_optimizerdef sag(nterms=, lr=):", "body": "xk = yieldgradients = deque([], nterms)for k in count():grad = yield xkgradients.append(grad)xk -= lr * np.mean(gradients, axis=)", "docstring": "Stochastic Average Gradient (SAG)\n\nParameters\n----------\nnterms : int, optional\n Number of gradient evaluations to use in the average (Default: 10)\n\nlr : float, optional\n (Default: 1e-3)", "id": "f9765:m3"} {"signature": "@gradient_optimizerdef smorms(lr=, epsilon=):", "body": "xk = yieldmem = np.ones_like(xk)g = np.zeros_like(xk)g2 = np.zeros_like(xk)for k in count():grad = yield xkr = / (mem + )r_1 = - rg *= r_1g += r * gradg2 *= r_1g2 += r * grad ** glr = g ** / (g2 + epsilon)mem = + mem * ( - glr)xk -= grad * np.minimum(lr, glr) / (np.sqrt(g2) + epsilon)", "docstring": "Squared mean over root mean squared cubed (SMORMS3)\n\nNotes\n-----\nby Simon Funk\nhttp://sifter.org/~simon/journal/20150420.html\n\nParameters\n----------\nlr : float, optional\n (Default: 1e-3)\n\nepsilon : float, optional\n (Default: 1e-8)", "id": "f9765:m4"} {"signature": "@gradient_optimizerdef adam(lr=, beta=(, ), epsilon=):", "body": "xk = yieldmk = np.zeros_like(xk)vk = np.zeros_like(xk)b1, b2 = betafor k in count(start=):grad = yield xkmk *= b1mk += ( - b1) * gradvk *= b2vk += ( - b2) * (grad ** )momentum_norm = mk / ( - b1 ** k)velocity_norm = np.sqrt(vk / ( - b2 ** k))xk -= lr * momentum_norm / (epsilon + velocity_norm)", "docstring": "ADAM\n\nParameters\n----------\nlr : float, optional\n Learnin rate (Default: 1e-3)\n\nbeta : (float, float)\n (Default: (0.9, 0.999))\n\nepsilon : float\n (Default: 1e-8)", "id": "f9765:m5"} {"signature": "@proxifydef nucnorm(x, rho, penalty, newshape=None):", "body": "orig_shape = x.shapeif newshape is not None:x = x.reshape(newshape)u, s, v = np.linalg.svd(x, full_matrices=False)sthr = np.maximum(s - (penalty / rho), )return np.linalg.multi_dot((u, np.diag(sthr), v)).reshape(orig_shape)", "docstring": "Nuclear norm\n\nParameters\n----------\npenalty : float\n nuclear norm penalty hyperparameter\n\nnewshape : tuple, optional\n Desired shape of the parameters to apply the nuclear norm to. The given\n parameters are reshaped to an array with this shape, or not reshaped if\n the value of newshape is None. (Default: None)", "id": "f9766:m1"} {"signature": "@proxifydef sparse(x, rho, penalty):", "body": "lmbda = penalty / rhoreturn (x - lmbda) * (x >= lmbda) + (x + lmbda) * (x <= -lmbda)", "docstring": "Proximal operator for the l1-norm: soft thresholding\n\nParameters\n----------\npenalty : float\n Strength or weight on the l1-norm", "id": "f9766:m2"} {"signature": "@proxifydef squared_error(x, rho, x_obs):", "body": "return (x + x_obs / rho) / ( + / rho)", "docstring": "Proximal operator for squared error (l2 or Fro. norm)\n\nsquared_error(x_obs)\n\nParameters\n----------\nx_obs : array_like\n Observed array or matrix that you want to stay close to", "id": "f9766:m3"} {"signature": "@proxifydef lbfgs(x, rho, f_df, maxiter=):", "body": "def f_df_augmented(theta):f, df = f_df(theta)obj = f + (rho / ) * np.linalg.norm(theta - x) ** grad = df + rho * (theta - x)return obj, gradres = scipy_minimize(f_df_augmented, x, jac=True, method='',options={'': maxiter, '': False})return res.x", "docstring": "Minimize the proximal operator of a given objective using L-BFGS\n\nParameters\n----------\nf_df : function\n Returns the objective and gradient of the function to minimize\n\nmaxiter : int\n Maximum number of L-BFGS iterations", "id": "f9766:m4"} {"signature": "@proxifydef tvd(x, rho, penalty):", "body": "return denoise_tv_bregman(x, rho / penalty)", "docstring": "Total variation denoising proximal operator\n\nParameters\n----------\npenalty : float", "id": "f9766:m5"} {"signature": "@proxifydef nonneg(x, rho):", "body": "return np.maximum(x, )", "docstring": "Projection onto the non-negative orthant", "id": "f9766:m6"} {"signature": "@proxifydef smooth(x, rho, penalty, axis=, newshape=None):", "body": "orig_shape = x.shapeif newshape is not None:x = x.reshape(newshape)n = x.shape[axis]lap_op = spdiags([( + rho / penalty) * np.ones(n),- * np.ones(n), - * np.ones(n)],[, -, ], n, n, format='')A = penalty * lap_opb = rho * np.rollaxis(x, axis, )return np.rollaxis(spsolve(A, b), axis, ).reshape(orig_shape)", "docstring": "Applies a smoothing operator along one dimension\n\ncurrently only accepts a matrix as input\n\nParameters\n----------\npenalty : float\n\naxis : int, optional\n Axis along which to apply the smoothing (Default: 0)\n\nnewshape : tuple, optional\n Desired shape of the parameters to apply the nuclear norm to. The given\n parameters are reshaped to an array with this shape, or not reshaped if\n the value of newshape is None. (Default: None)", "id": "f9766:m7"} {"signature": "@proxifydef sdcone(x, rho):", "body": "U, V = np.linalg.eigh(x)return V.dot(np.diag(np.maximum(U, )).dot(V.T))", "docstring": "Projection onto the semidefinite cone", "id": "f9766:m8"} {"signature": "@proxifydef linear(x, rho, weights):", "body": "return x - weights / rho", "docstring": "Proximal operator for a linear function w^T x", "id": "f9766:m9"} {"signature": "@proxifydef simplex(x, rho):", "body": "u = np.flipud(np.sort(x.ravel()))lambdas = ( - np.cumsum(u)) / ( + np.arange(u.size))ix = np.where(u + lambdas > )[].max()return np.maximum(x + lambdas[ix], )", "docstring": "Projection onto the probability simplex\n\nhttp://arxiv.org/pdf/1309.1541v1.pdf", "id": "f9766:m10"} {"signature": "@proxifydef columns(x, rho, proxop):", "body": "xnext = np.zeros_like(x)for ix in range(x.shape[]):xnext[:, ix] = proxop(x[:, ix], rho)return xnext", "docstring": "Applies a proximal operator to the columns of a matrix", "id": "f9766:m11"} {"signature": "@proxifydef identity(x, rho=None):", "body": "return x", "docstring": "Identity operator", "id": "f9766:m12"} {"signature": "@proxifydef fantope(x, rho, dim, tol=):", "body": "U, V = np.linalg.eigh(x)minval, maxval = np.maximum(U.min(), ), np.maximum(U.max(), * dim)while True:theta = * (maxval + minval)thr_eigvals = np.minimum(np.maximum((U - theta), ), )constraint = np.sum(thr_eigvals)if np.abs(constraint - dim) <= tol:breakelif constraint < dim:maxval = thetaelif constraint > dim:minval = thetaelse:breakreturn np.linalg.multi_dot((V, np.diag(thr_eigvals), V.T))", "docstring": "Projection onto the fantope [1]_\n\n.. [1] Vu, Vincent Q., et al. \"Fantope projection and selection: A\n near-optimal convex relaxation of sparse PCA.\" Advances in\n neural information processing systems. 2013.", "id": "f9766:m13"} {"signature": "def __init__(self, A, b):", "body": "self.P = A.T.dot(A)self.q = A.T.dot(b)self.n = self.q.size", "docstring": "Proximal operator for solving a linear least squares system, Ax = b\n\nParameters\n----------\nA : array_like\n Sensing matrix (Ax = b)\n\nb : array_like\n Responses (Ax = b)", "id": "f9766:c1:m0"} {"signature": "def objective(param_scales=(, ), xstar=None, seed=None):", "body": "ndim = len(param_scales)def decorator(func):@wraps(func)def wrapper(theta):return func(theta)def param_init():np.random.seed(seed)return np.random.randn(ndim,) * np.array(param_scales)wrapper.ndim = ndimwrapper.param_init = param_initwrapper.xstar = xstarreturn wrapperreturn decorator", "docstring": "Gives objective functions a number of dimensions and parameter range\n\n Parameters\n ----------\n param_scales : (int, int)\n Scale (std. dev.) for choosing each parameter\n\n xstar : array_like\n Optimal parameters", "id": "f9768:m0"} {"signature": "@objective(xstar=, param_scales=(,))def doublewell(theta):", "body": "k0, k1, depth = , , shallow = * k0 * theta ** + depthdeep = * k1 * theta ** obj = float(np.minimum(shallow, deep))grad = np.where(deep < shallow, k1 * theta, k0 * theta)return obj, grad", "docstring": "Pointwise minimum of two quadratic bowls", "id": "f9768:m1"} {"signature": "@objective(xstar=(, ))def rosenbrock(theta):", "body": "x, y = thetaobj = ( - x)** + * (y - x**)**grad = np.zeros()grad[] = * x - * (x * y - x**) - grad[] = * (y - x**)return obj, grad", "docstring": "Objective and gradient for the rosenbrock function", "id": "f9768:m2"} {"signature": "@objective(xstar=(, ))def sphere(theta):", "body": "return * np.linalg.norm(theta)**, theta", "docstring": "l2-norm of the parameters", "id": "f9768:m3"} {"signature": "@objective(xstar=(, ))def matyas(theta):", "body": "x, y = thetaobj = * (x ** + y ** ) - * x * ygrad = np.array([ * x - * y, * y - * x])return obj, grad", "docstring": "Matyas function", "id": "f9768:m4"} {"signature": "@objective(xstar=(, ))def beale(theta):", "body": "x, y = thetaA = - x + x * yB = - x + x * y**C = - x + x * y**obj = A ** + B ** + C ** grad = np.array([ * A * (y - ) + * B * (y ** - ) + * C * (y ** - ), * A * x + * B * x * y + * C * x * y ** ])return obj, grad", "docstring": "Beale's function", "id": "f9768:m5"} {"signature": "@objective(xstar=(, ))def booth(theta):", "body": "x, y = thetaA = x + * y - B = * x + y - obj = A** + B**grad = np.array([ * A + * B, * A + * B])return obj, grad", "docstring": "Booth's function", "id": "f9768:m6"} {"signature": "@objective(xstar=(-, -))def mccormick(theta):", "body": "x, y = thetaobj = np.sin(x + y) + (x - y)** - * x + * y + grad = np.array([np.cos(x + y) + * (x - y) - ,np.cos(x + y) - * (x - y) + ])return obj, grad", "docstring": "McCormick function", "id": "f9768:m7"} {"signature": "@objective(xstar=(, ))def camel(theta):", "body": "x, y = thetaobj = * x ** - * x ** + x ** / + x * y + y ** grad = np.array([ * x - * x ** + x ** + y,x + * y])return obj, grad", "docstring": "Three-hump camel function", "id": "f9768:m8"} {"signature": "@objective(xstar=(, ))def michalewicz(theta):", "body": "x, y = thetaobj = - np.sin(x) * np.sin(x ** / np.pi) ** -np.sin(y) * np.sin( * y ** / np.pi) ** grad = np.array([- np.cos(x) * np.sin(x ** / np.pi) ** - ( / np.pi) * x *np.sin(x) * np.sin(x ** / np.pi) ** * np.cos(x ** / np.pi),- np.cos(y) * np.sin( * y ** / np.pi) ** - ( / np.pi) * y * np.sin(y) *np.sin( * y ** / np.pi) ** * np.cos( * y ** / np.pi),])return obj, grad", "docstring": "Michalewicz function", "id": "f9768:m9"} {"signature": "@objective(xstar=(, ))def bohachevsky1(theta):", "body": "x, y = thetaobj = x ** + * y ** - * np.cos( * np.pi * x) - * np.cos( * np.pi * y) + grad = np.array([ * x + * np.sin( * np.pi * x) * * np.pi, * y + * np.sin( * np.pi * y) * * np.pi,])return obj, grad", "docstring": "One of the Bohachevsky functions", "id": "f9768:m10"} {"signature": "@objective(xstar=(, ))def zakharov(theta):", "body": "x, y = thetaobj = x ** + y ** + ( * x + y) ** + ( * x + y) ** grad = np.array([ * x + y + * ( * x + y) ** , * y + x + * ( * x + y) ** ,])return obj, grad", "docstring": "Zakharov function", "id": "f9768:m11"} {"signature": "@objective(xstar=(, / np.sqrt()))def dixon_price(theta):", "body": "x, y = thetaobj = (x - ) ** + * ( * y ** - x) ** grad = np.array([ * x - - * ( * y ** - x), * ( * y ** - x) * y,])return obj, grad", "docstring": "Dixon-Price function", "id": "f9768:m12"} {"signature": "@objective(xstar=(, -))def goldstein_price(theta):", "body": "x, y = thetaobj = ( + (x + y + ) ** * ( - * x + * x ** - * y + * x * y + * y ** )) *( + ( * x - * y) ** *( - * x + * x ** + * y - * x * y + * x ** ))grad = np.array([(( * x - * y)** * ( * x - * y - ) + ( * x - * y) *( * x** - * x * y - * x + * y + )) *((x + y + )** * ( * x** + * x * y - * x + * y** - * y + ) + ) +(( * x - * y)** * ( * x** - * x * y - * x + * y + ) + ) *((x + y + )** *( * x + * y - ) + ( * x + * y + ) *( * x** + * x * y - * x + * y** - * y + )),((- * x + ) * ( * x - * y)** + (- * x + * y) *( * x** - * x * y - * x + * y + )) *((x + y + )** * ( * x** + * x * y - * x + * y** - * y + ) + ) +(( * x - * y)** * ( * x** - * x * y - * x + * y + ) + ) *((x + y + )** * ( * x + * y - ) + ( * x + * y + ) *( * x** + * x * y - * x + * y** - * y + )),])return obj, grad", "docstring": "Goldstein-Price function", "id": "f9768:m13"} {"signature": "@objective(xstar=(-, -))def styblinski_tang(theta):", "body": "x, y = thetaobj = * (x ** - * x ** + * x + y ** - * y ** + * y)grad = np.array([ * x ** - * x + , * y ** - * y + ,])return obj, grad", "docstring": "Styblinski-Tang function", "id": "f9768:m14"} {"signature": "def wrap(f_df, xref, size=):", "body": "memoized_f_df = lrucache(lambda x: f_df(restruct(x, xref)), size)objective = compose(first, memoized_f_df)gradient = compose(destruct, second, memoized_f_df)return objective, gradient", "docstring": "Memoizes an objective + gradient function, and splits it into\ntwo functions that return just the objective and gradient, respectively.\n\nParameters\n----------\nf_df : function\n Must be unary (takes a single argument)\n\nxref : list, dict, or array_like\n The form of the parameters\n\nsize : int, optional\n Size of the cache (Default=1)", "id": "f9770:m0"} {"signature": "def docstring(docstr):", "body": "def decorator(func):@wraps(func)def wrapper(*args, **kwargs):return func(*args, **kwargs)wrapper.__doc__ = docstrreturn wrapperreturn decorator", "docstring": "Decorates a function with the given docstring\n\nParameters\n----------\ndocstr : string", "id": "f9770:m1"} {"signature": "def lrucache(func, size):", "body": "if size == :return funcelif size < :raise ValueError(\"\")if not is_arity(, func):raise ValueError(\"\")cache = OrderedDict()def wrapper(x):if not(type(x) is np.ndarray):raise ValueError(\"\")if x.size <= :key = hash(x.tostring())else:key = hash(repr(x))if key not in cache:if len(cache) >= size:cache.popitem(last=False)cache[key] = func(x)return cache[key]return wrapper", "docstring": "A simple implementation of a least recently used (LRU) cache.\nMemoizes the recent calls of a computationally intensive function.\n\nParameters\n----------\nfunc : function\n Must be unary (takes a single argument)\n\nsize : int\n The size of the cache (number of previous calls to store)", "id": "f9770:m2"} {"signature": "def check_grad(f_df, xref, stepsize=, tol=, width=, style='', out=sys.stdout):", "body": "CORRECT = u''INCORRECT = u''obj, grad = wrap(f_df, xref, size=)x0 = destruct(xref)df = grad(x0)out.write(tp.header([\"\", \"\", \"\"], width=width, style=style) + \"\")out.flush()def parse_error(number):failure = \"\"passing = \"\"warning = \"\"end = \"\"base = \"\"if error < * tol:return base.format(passing, error, end)elif error < tol:return base.format(warning, error, end)else:return base.format(failure, error, end)num_errors = for j in range(x0.size):dx = np.zeros(x0.size)dx[j] = stepsizedf_approx = (obj(x0 + dx) - obj(x0 - dx)) / ( * stepsize)df_analytic = df[j]abs_error = np.linalg.norm(df_approx - df_analytic)error = abs_error if np.allclose(abs_error, ) else abs_error /(np.linalg.norm(df_analytic) + np.linalg.norm(df_approx))num_errors += error >= tolerrstr = CORRECT if error < tol else INCORRECTout.write(tp.row([df_approx, df_analytic, parse_error(error) + '' + errstr],width=width, style=style) + \"\")out.flush()out.write(tp.bottom(, width=width, style=style) + \"\")return num_errors", "docstring": "Compares the numerical gradient to the analytic gradient\n\nParameters\n----------\nf_df : function\n The analytic objective and gradient function to check\n\nx0 : array_like\n Parameter values to check the gradient at\n\nstepsize : float, optional\n Stepsize for the numerical gradient. Too big and this will poorly estimate the gradient.\n Too small and you will run into precision issues (default: 1e-6)\n\ntol : float, optional\n Tolerance to use when coloring correct/incorrect gradients (default: 1e-5)\n\nwidth : int, optional\n Width of the table columns (default: 15)\n\nstyle : string, optional\n Style of the printed table, see tableprint for a list of styles (default: 'round')", "id": "f9770:m3"} {"signature": "def create_db():", "body": "try:return psycopg2.connect(**db_state[''])except psycopg2.OperationalError as exc:nosuch_db = '' % db_state['']['']if nosuch_db in str(exc):try:master = psycopg2.connect(database='')master.rollback()master.autocommit = Truecursor = master.cursor()cursor.execute('' % db_state[''][''])cursor.close()master.close()except psycopg2.Error as exc:message = (''+ db_state['']['']+ '' % exc)raise RuntimeError(message)else:conn = psycopg2.connect(**db_state[''])db_state[''] = Truereturn conn", "docstring": "connect to test db", "id": "f9777:m1"} {"signature": "def drop_db():", "body": "if not db_state['']:returnget_conn().close()master = psycopg2.connect(database='')master.rollback()master.autocommit = Truecursor = master.cursor()cursor.execute('' % db_state[''][''])cursor.close()master.close()", "docstring": "Drop test db", "id": "f9777:m2"} {"signature": "def __init__(self, connection, table, xform):", "body": "super(RenameReplace, self).__init__(connection, table)self.xform = xform", "docstring": "xform must be a function which translates old\nnames to new ones, used on tables & pk constraints", "id": "f9780:c1:m0"} {"signature": "def timestamp(_, dt):", "body": "dt = util.to_utc(dt)unix_timestamp = calendar.timegm(dt.timetuple())val = ((unix_timestamp - psql_epoch) * ) + dt.microsecondreturn ('', (, val))", "docstring": "get microseconds since 2000-01-01 00:00", "id": "f9784:m3"} {"signature": "def numeric(_, n):", "body": "try:nt = n.as_tuple()except AttributeError:raise TypeError('' % n)digits = []if isinstance(nt.exponent, str):ndigits = weight = sign = dscale = else:decdigits = list(reversed(nt.digits + (nt.exponent % ) * (,)))weight = while decdigits:if any(decdigits[:]):breakweight += del decdigits[:]while decdigits:digits.insert(, ndig(decdigits[:]))del decdigits[:]ndigits = len(digits)weight += nt.exponent // + ndigits - sign = nt.sign * dscale = -min(, nt.exponent)data = [ndigits, weight, sign, dscale] + digitsreturn ('' % ndigits, [ * len(data)] + data)", "docstring": "NBASE = 1000\nndigits = total number of base-NBASE digits\nweight = base-NBASE weight of first digit\nsign = 0x0000 if positive, 0x4000 if negative, 0xC000 if nan\ndscale = decimal digits after decimal place", "id": "f9784:m5"} {"signature": "def _is_url_arg(p):", "body": "return p.startswith('')", "docstring": "Is an argument of the URL.\n\n>>> _is_url_arg('[idAction]')\nTrue\n>>> _is_url_arg('actions')\nFalse", "id": "f9786:m0"} {"signature": "def _is_api_definition(line):", "body": "return line.split('', )[] in HTTP_METHODS", "docstring": "Is a definition of a Trello endpoint.\n\n>>> _is_api_definition('GET /1/actions/[idAction]')\nTrue\n>>> _is_api_definition('action')\nFalse", "id": "f9786:m1"} {"signature": "def _camelcase_to_underscore(url):", "body": "def upper2underscore(text):for char in text:if char.islower():yield charelse:yield ''if char.isalpha():yield char.lower()return ''.join(upper2underscore(url))", "docstring": "Translate camelCase into underscore format.\n\n>>> _camelcase_to_underscore('minutesBetweenSummaries')\n'minutes_between_summaries'", "id": "f9786:m2"} {"signature": "def create_tree(endpoints):", "body": "tree = {}for method, url, doc in endpoints:path = [p for p in url.strip('').split('')]here = treeversion = path[]here.setdefault(version, {})here = here[version]for p in path[:]:part = _camelcase_to_underscore(p)here.setdefault(part, {})here = here[part]if not '' in here:here[''] = [[method, doc]]else:if not method in here['']:here[''].append([method, doc])return tree", "docstring": "Creates the Trello endpoint tree.\n\n>>> r = {'1': { \\\n 'actions': {'METHODS': {'GET'}}, \\\n 'boards': { \\\n 'members': {'METHODS': {'DELETE'}}}} \\\n }\n>>> r == create_tree([ \\\n 'GET /1/actions/[idAction]', \\\n 'DELETE /1/boards/[board_id]/members/[idMember]'])\nTrue", "id": "f9786:m3"} {"signature": "def main():", "body": "ep = requests.get(TRELLO_API_DOC).contentroot = html.fromstring(ep)links = root.xpath('')pages = [requests.get(TRELLO_API_DOC + u)for u in links if u.endswith('')]endpoints = []for page in pages:root = html.fromstring(page.content)sections = root.xpath('')for sec in sections:ep_html = etree.tostring(sec).decode('')ep_text = html2text(ep_html).splitlines()match = EP_DESC_REGEX.match(ep_text[])if not match:continueep_method, ep_url = match.groups()ep_text[] = ''.join([ep_method, ep_url])ep_doc = b64encode(gzip.compress(''.join(ep_text).encode('')))endpoints.append((ep_method, ep_url, ep_doc))print(yaml.dump(create_tree(endpoints)))", "docstring": "Prints the complete YAML.", "id": "f9786:m4"} {"signature": "def generate_api(version):", "body": "def get_partial_api(key, token=None):return TrelloAPI(ENDPOINTS[version], version, key, token=token)get_partial_api.__doc__ =\"\"\"\"\"\".format(version)return get_partial_api", "docstring": "Generates a factory function to instantiate the API with the given\nversion.", "id": "f9787:m0"} {"signature": "@propertydef _url(self):", "body": "if self._api_arg:mypart = str(self._api_arg)else:mypart = self._nameif self._parent:return ''.join(filter(None, [self._parent._url, mypart]))else:return mypart", "docstring": "Resolve the URL to this point.\n\n>>> trello = TrelloAPIV1('APIKEY')\n>>> trello.batch._url\n'1/batch'\n>>> trello.boards(board_id='BOARD_ID')._url\n'1/boards/BOARD_ID'\n>>> trello.boards(board_id='BOARD_ID')(field='FIELD')._url\n'1/boards/BOARD_ID/FIELD'\n>>> trello.boards(board_id='BOARD_ID').cards(filter='FILTER')._url\n'1/boards/BOARD_ID/cards/FILTER'", "id": "f9787:c0:m2"} {"signature": "def _api_call(self, method_name, *args, **kwargs):", "body": "params = kwargs.setdefault('', {})params.update({'': self._apikey})if self._token is not None:params.update({'': self._token})http_method = getattr(requests, method_name)return http_method(TRELLO_URL + self._url, *args, **kwargs)", "docstring": "Makes the HTTP request.", "id": "f9787:c0:m3"} {"signature": "def __call__(self, **kwargs):", "body": "if not kwargs:raise ValueError(\"\".format(''.join(self._allowed_args)))elif len(kwargs) > :raise ValueError(\"\")elif set(kwargs.keys()) <= set(self._allowed_args):_name, _api_arg = list(kwargs.items())[]name = '' + _name + ''return TrelloAPI(endpoints=self._endpoints[name],name=name,apikey=self._apikey,parent=self,api_arg=_api_arg,token=self._token)else:raise ValueError(\"\".format(kwargs.keys()))", "docstring": "Adds a variable parameter to the API URL.", "id": "f9787:c0:m4"} {"signature": "def findall(text):", "body": "results = TIMESTRING_RE.findall(text)dates = []for date in results:if re.compile('', re.I).match(date[]):dates.append((date[].strip(), Range(date[])))else:dates.append((date[].strip(), Date(date[])))return dates", "docstring": "Find all the timestrings within a block of text.\n\n >>> timestring.findall(\"once upon a time, about 3 weeks ago, there was a boy whom was born on august 15th at 7:20 am. epic.\")\n [\n ('3 weeks ago,', ),\n ('august 15th at 7:20 am', )\n ]", "id": "f9791:m0"} {"signature": "def __init__(self, start, end=None, offset=None, start_of_week=, tz=None, verbose=False):", "body": "self._dates = []pgoffset = Noneif start is None:raise TimestringInvalid(\"\")if not isinstance(start, (Date, datetime)):start = str(start)if end and not isinstance(end, (Date, datetime)):end = str(end)if start and end:\"\"\"\"\"\"self._dates = (Date(start, tz=tz), Date(end, tz=tz))elif start == '':self._dates = (Date(''), Date(''))elif re.search(r'', start):\"\"\"\"\"\"start = re.sub('', '', start.lower())r = tuple(re.split(r'', start.strip()))self._dates = (Date(r[], tz=tz), Date(r[-], tz=tz))elif re.match(r\"\", start):\"\"\"\"\"\"start, end = tuple(re.sub('', '', start).split(''))self._dates = (Date(start), Date(end))else:now = datetime.now()if re.search(r\"\", start):pgoffset = re.search(r\"\", start).group() + \"\"if tz:now = now.replace(tzinfo=pytz.timezone(str(tz)))res = TIMESTRING_RE.search(start)if res:group = res.groupdict()if verbose:print(dict(map(lambda a: (a, group.get(a)), filter(lambda a: group.get(a), group))))if (group.get('') or group.get('')) is not None:delta = (group.get('') or group.get('')).lower()start = Date(\"\", offset=offset, tz=tz)di = \"\" % (str(int(group[''] or )), delta)if group[''] == '':if delta.startswith(''):start = Date(datetime(now.year, , ), offset=offset, tz=tz)elif delta.startswith(''):start = Date(datetime(now.year, now.month, ), offset=offset, tz=tz)elif delta.startswith(''):start = Date(\"\", offset=offset, tz=tz) - (str(Date(\"\", tz=tz).date.weekday())+'')elif delta.startswith(''):start = Date(\"\", offset=offset, tz=tz)elif delta.startswith(''):start = Date(\"\", offset=dict(hour=now.hour+), tz=tz)elif delta.startswith('') or delta.startswith(''):start = Date(\"\", tz=tz)else:raise TimestringInvalid(\"\")end = start + dielif group[''] == '':if int(group[''] or ) > :di = \"\" % (str(int(group[''] or ) - ), delta)end = start + dielif group.get('') or group[''] == '' and int(group[''] or ) == :end = start - dielse:if not (delta.startswith('') or delta.startswith('') or delta.startswith('')):start = Range('', offset=offset, tz=tz).endend = start - di elif group.get(''):start = Date(start, offset=offset, tz=tz)start = start.replace(day=)end = start + ''elif group.get(''):start = Date(start, offset=offset, tz=tz)start = start.replace(day=, month=)end = start + ''else:start = Date(start, offset=offset, tz=tz)end = start + ''else:raise TimestringInvalid(\"\")if end is None:end = start + ''if start > end:start, end = copy(end), copy(start)if pgoffset:start = start - pgoffsetif end != '':end = end - pgoffsetself._dates = (start, end)if self._dates[] > self._dates[]:self._dates = (self._dates[], self._dates[] + '')", "docstring": "`start` can be type or ", "id": "f9792:c0:m0"} {"signature": "def __len__(self):", "body": "return abs(int(self[].to_unixtime() - self[].to_unixtime()))", "docstring": "Returns how many `seconds` the `Range` lasts.", "id": "f9792:c0:m11"} {"signature": "def cmp(self, other):", "body": "if isinstance(other, Range):start = self.start.replace(tzinfo=other.start.tz) if other.start.tz and self.start.tz is None else self.startend = self.end.replace(tzinfo=other.end.tz) if other.end.tz and self.end.tz is None else self.endif start == other.start and end == other.end:return elif start < other.start:return -else:return elif isinstance(other, Date):if other.tz and self.start.tz is None:return if other == self.start.replace(tzinfo=other.tz) else - if other > self.start.replace(tzinfo=other.start.tz) else return if other == self.start else - if other > self.start else else:return self.cmp(Range(other, tz=self.start.tz))", "docstring": "*Note: checks Range.start() only*\n Key: self = [], other = {}\n * [ {----]----} => -1\n * {---[---} ] => 1\n * [---] {---} => -1\n * [---] same as {---} => 0\n * [--{-}--] => -1", "id": "f9792:c0:m15"} {"signature": "def __contains__(self, other):", "body": "if isinstance(other, Date):if self.start == '' and self.end >= other:return Trueelif self.end == '' and self.start <= other:return Trueelif other == '':return self.start == '' or self.end == ''elif other.tz and self.start.tz is None:return self.start.replace(tzinfo=other.tz).to_unixtime() <= other.to_unixtime() <= self.end.replace(tzinfo=other.tz).to_unixtime()return self.start <= other <= self.endelif isinstance(other, Range):if self.start == '':return other.end <= self.endelif self.end == '':return self.start <= other.startelif other.start.tz and self.start.tz is None:return self.start.replace(tzinfo=other.start.tz).to_unixtime() <= other.start.to_unixtime() <= self.end.replace(tzinfo=other.start.tz).to_unixtime()and self.start.replace(tzinfo=other.start.tz).to_unixtime() <= other.end.to_unixtime() <= self.end.replace(tzinfo=other.start.tz).to_unixtime()return self.start <= other.start <= self.end and self.start <= other.end <= self.endelse:return self.__contains__(Range(other, tz=self.start.tz))", "docstring": "*Note: checks Range.start() only*\n Key: self = [], other = {}\n * [---{-}---] => True else False", "id": "f9792:c0:m16"} {"signature": "def cut(self, by, from_start=True):", "body": "s, e = copy(self.start), copy(self.end)if from_start:e = s + byelse:s = e - byreturn Range(s, e)", "docstring": "Cuts this object from_start to the number requestd\n returns new instance", "id": "f9792:c0:m17"} {"signature": "def next(self, times=):", "body": "return Range(copy(self.end),self.end + self.elapse, tz=self.start.tz)", "docstring": "Returns a new instance of self\n times is not supported yet.", "id": "f9792:c0:m19"} {"signature": "def prev(self, times=):", "body": "return Range(self.start - self.elapse,copy(self.start), tz=self.start.tz)", "docstring": "Returns a new instance of self\n times is not supported yet.", "id": "f9792:c0:m20"} {"signature": "def replace(self, **k):", "body": "if self.date != '':return Date(self.date.replace(**k))else:return Date('')", "docstring": "Note returns a new Date obj", "id": "f9794:c0:m17"} {"signature": "def adjust(self, to):", "body": "if self.date == '':returnnew = copy(self)if type(to) in (str, unicode):to = to.lower()res = TIMESTRING_RE.search(to)if res:rgroup = res.groupdict()if (rgroup.get('') or rgroup.get('')):i = int(text2num(rgroup.get('', ''))) * (- if to.startswith('') else )delta = (rgroup.get('') or rgroup.get('')).lower()if delta.startswith(''):try:new.date = new.date.replace(year=(new.date.year + i))except ValueError:new.date = new.date + timedelta(days=( * i))elif delta.startswith(''):if (new.date.month + i) > :new.date = new.date.replace(month=(i - (i / )),year=(new.date.year + + (i / )))elif (new.date.month + i) < :new.date = new.date.replace(month=, year=(new.date.year - ))else:new.date = new.date.replace(month=(new.date.month + i))elif delta.startswith(''):passelif delta.startswith(''):new.date = new.date + timedelta(days=( * i))elif delta.startswith(''):new.date = new.date + timedelta(seconds=i)else:new.date = new.date + timedelta(**{('' if delta.startswith('') else '' if delta.startswith('') else '' if delta.startswith('') else ''): i})return newelse:new.date = new.date + timedelta(seconds=int(to))return newraise TimestringInvalid('')", "docstring": "Adjusts the time from kwargs to timedelta\n**Will change this object**\n\nreturn new copy of self", "id": "f9794:c0:m18"} {"signature": "def __str__(self):", "body": "return str(self.date)", "docstring": "Returns date in representation of `%x %X` ie `2013-02-17 00:00:00`", "id": "f9794:c0:m23"} {"signature": "def iteritems(dct):", "body": "if sys.version_info[] == :return dct.iteritems()return dct.items()", "docstring": "Get iterator for dict items", "id": "f9796:m0"} {"signature": "def epoll_poller(timeout=, map=None):", "body": "if map is None:map = asyncore.socket_mappollster = select.epoll()if map:for fd, obj in iteritems(map):flags = if obj.readable():flags |= select.POLLIN | select.POLLPRIif obj.writable():flags |= select.POLLOUTif flags:flags |= select.POLLERR | select.POLLHUP | select.POLLNVALpollster.register(fd, flags)try:r = pollster.poll(timeout)except select.error as err:if err.args[] != EINTR:raiser = []for fd, flags in r:obj = map.get(fd)if obj is None:continueasyncore.readwrite(obj, flags)", "docstring": "A poller which uses epoll(), supported on Linux 2.5.44 and newer\n\nBorrowed from here:\nhttps://github.com/m13253/python-asyncore-epoll/blob/master/asyncore_epoll.py#L200", "id": "f9796:m1"} {"signature": "def get_poll_func():", "body": "if hasattr(select, ''):poll_func = epoll_pollerelif hasattr(select, ''):poll_func = asyncore.poll2else:poll_func = asyncore.pollreturn poll_func", "docstring": "Get the best available socket poll function\n\n :return: poller function", "id": "f9796:m2"} {"signature": "def make_server(host, port, app=None,server_class=AsyncWsgiServer,handler_class=AsyncWsgiHandler,ws_handler_class=None,ws_path=''):", "body": "handler_class.ws_handler_class = ws_handler_classhandler_class.ws_path = ws_pathhttpd = server_class((host, port), RequestHandlerClass=handler_class)httpd.set_app(app)return httpd", "docstring": "Create server instance with an optional WebSocket handler\n\n For pure WebSocket server ``app`` may be ``None`` but an attempt to access\n any path other than ``ws_path`` will cause server error.\n\n :param host: hostname or IP\n :type host: str\n :param port: server port\n :type port: int\n :param app: WSGI application\n :param server_class: WSGI server class, defaults to AsyncWsgiServer\n :param handler_class: WSGI handler class, defaults to AsyncWsgiHandler\n :param ws_handler_class: WebSocket hanlder class, defaults to ``None``\n :param ws_path: WebSocket path on the server, defaults to '/ws'\n :type ws_path: str, optional\n :return: initialized server instance", "id": "f9796:m3"} {"signature": "def finish_response(self):", "body": "self.iterator = iter(self.result)", "docstring": "Get WSGI response iterator for sending in handle_write", "id": "f9796:c0:m2"} {"signature": "def poll_once(self, timeout=):", "body": "if self._map:self._poll_func(timeout, self._map)", "docstring": "Poll active sockets once\n\nThis method can be used to allow aborting server polling loop\non some condition.\n\n:param timeout: polling timeout", "id": "f9796:c2:m4"} {"signature": "def handle_request(self):", "body": "self.poll_once()", "docstring": "Call :meth:`poll_once`", "id": "f9796:c2:m5"} {"signature": "def serve_forever(self, poll_interval=):", "body": "logger.info(''.format(self.server_name, self.server_port))while True:try:self.poll_once(poll_interval)except (KeyboardInterrupt, SystemExit):breakself.handle_close()logger.info('')", "docstring": "Start serving HTTP requests\n\nThis method blocks the current thread.\n\n:param poll_interval: polling timeout\n:return:", "id": "f9796:c2:m6"} {"signature": "def handleMessage(self):", "body": "pass", "docstring": "Called when websocket frame is received.\nTo access the frame data call self.data.\n\nIf the frame is Text then self.data is a unicode object.\nIf the frame is Binary then self.data is a bytearray object.", "id": "f9797:c2:m1"} {"signature": "def handleConnected(self):", "body": "pass", "docstring": "Called when a websocket client connects to the server.", "id": "f9797:c2:m2"} {"signature": "def handleClose(self):", "body": "pass", "docstring": "Called when a websocket server gets a Close frame from a client.", "id": "f9797:c2:m3"} {"signature": "def close(self, status=, reason=u''):", "body": "try:if self.closed is False:close_msg = bytearray()close_msg.extend(struct.pack(\"\", status))if _check_unicode(reason):close_msg.extend(reason.encode(''))else:close_msg.extend(reason)self._sendMessage(False, CLOSE, close_msg)finally:self.closed = True", "docstring": "Send Close frame to the client. The underlying socket is only closed\nwhen the client acknowledges the Close frame.\n\nstatus is the closing identifier.\nreason is the reason for the close.", "id": "f9797:c2:m6"} {"signature": "def sendFragmentStart(self, data):", "body": "opcode = BINARYif _check_unicode(data):opcode = TEXTself._sendMessage(True, opcode, data)", "docstring": "Send the start of a data fragment stream to a websocket client.\nSubsequent data should be sent using sendFragment().\nA fragment stream is completed when sendFragmentEnd() is called.\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f9797:c2:m8"} {"signature": "def sendFragment(self, data):", "body": "self._sendMessage(True, STREAM, data)", "docstring": "see sendFragmentStart()\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f9797:c2:m9"} {"signature": "def sendFragmentEnd(self, data):", "body": "self._sendMessage(False, STREAM, data)", "docstring": "see sendFragmentEnd()\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f9797:c2:m10"} {"signature": "def sendMessage(self, data):", "body": "opcode = BINARYif _check_unicode(data):opcode = TEXTself._sendMessage(False, opcode, data)", "docstring": "Send websocket data frame to the client.\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f9797:c2:m11"} {"signature": "@classmethoddef get(cls, update_dict=None, remove_fields=None):", "body": "cls.demo_course_count += course_copy = copy.deepcopy(cls.DEMO_COURSE)if update_dict:if \"\" in update_dict:course_copy[\"\"].update(update_dict[\"\"])del update_dict[\"\"]course_copy.update(update_dict)course_copy.update({\"\": \"\".format(course_copy[\"\"], cls.demo_course_count)})if remove_fields:for remove_field in remove_fields:if remove_field in course_copy:del course_copy[remove_field]return course_copy", "docstring": "get a new demo course", "id": "f9806:c0:m0"} {"signature": "@classmethoddef reset_count(cls):", "body": "cls.demo_course_count = ", "docstring": "go back to zero", "id": "f9806:c0:m1"} {"signature": "@staticmethoddef index(searcher, course_info):", "body": "searcher.index(doc_type=\"\", sources=course_info)", "docstring": "Adds course info dictionary to the index", "id": "f9806:c0:m2"} {"signature": "@classmethoddef get_and_index(cls, searcher, update_dict=None, remove_fields=None):", "body": "cls.index(searcher, [cls.get(update_dict, remove_fields)])", "docstring": "Adds course info dictionary to the index", "id": "f9806:c0:m3"} {"signature": "@propertydef _is_elastic(self):", "body": "return isinstance(self.searcher, ElasticSearchEngine)", "docstring": "check search engine implementation, to manage cleanup differently", "id": "f9806:c1:m0"} {"signature": "def _reset_mocked_tracker(self):", "body": "self.mock_tracker.reset_mock()", "docstring": "reset mocked tracker and clear logged emits", "id": "f9807:c0:m0"} {"signature": "def assert_no_events_were_emitted(self):", "body": "self.assertFalse(self.mock_tracker.emit.called)", "docstring": "Ensures no events were emitted since the last event related assertion", "id": "f9807:c0:m3"} {"signature": "def assert_search_initiated_event(self, search_term, size, page):", "body": "initiated_search_call = self.mock_tracker.emit.mock_calls[] expected_result = call('', {\"\": six.text_type(search_term),\"\": size,\"\": page,})self.assertEqual(expected_result, initiated_search_call)", "docstring": "Ensures an search initiated event was emitted", "id": "f9807:c0:m4"} {"signature": "def assert_results_returned_event(self, search_term, size, page, total):", "body": "returned_results_call = self.mock_tracker.emit.mock_calls[] expected_result = call('', {\"\": six.text_type(search_term),\"\": size,\"\": page,\"\": total,})self.assertEqual(expected_result, returned_results_call)", "docstring": "Ensures an results returned event was emitted", "id": "f9807:c0:m5"} {"signature": "def assert_initiated_return_events(self, search_term, size, page, total):", "body": "self.assertEqual(self.mock_tracker.emit.call_count, ) self.assert_search_initiated_event(search_term, size, page)self.assert_results_returned_event(search_term, size, page, total)", "docstring": "Asserts search initiated and results returned events were emitted", "id": "f9807:c0:m6"} {"signature": "@propertydef additional_property(self):", "body": "return \"\"", "docstring": "additional property that should appear within processed results", "id": "f9810:c1:m0"} {"signature": "@propertydef url(self):", "body": "if \"\" not in self._results_fields or \"\" not in self._results_fields:raise ValueError(\"\")return u\"\".format(course_id=self._results_fields[\"\"],location=self._results_fields[\"\"],)", "docstring": "Property to display the url for the given location, useful for allowing navigation", "id": "f9810:c1:m1"} {"signature": "def should_remove(self, user):", "body": "return \"\" in self._results_fields", "docstring": "remove items when url is None", "id": "f9810:c1:m2"} {"signature": "def post_request(body, course_id=None):", "body": "address = '' if course_id is None else ''.format(course_id)response = Client().post(address, body)return getattr(response, \"\", ), json.loads(getattr(response, \"\", None))", "docstring": "Helper method to post the request and process the response", "id": "f9813:m0"} {"signature": "def post_discovery_request(body):", "body": "address = ''response = Client().post(address, body)return getattr(response, \"\", ), json.loads(getattr(response, \"\", None))", "docstring": "Helper method to post the request and process the response", "id": "f9813:m1"} {"signature": "@propertydef searcher(self):", "body": "if self._searcher is None:self._searcher = SearchEngine.get_search_engine(TEST_INDEX_NAME)return self._searcher", "docstring": "cached instance of search engine", "id": "f9813:c0:m0"} {"signature": "def search(self, **kwargs): ", "body": "raise exceptions.ElasticsearchException(\"\")", "docstring": "this will definitely fail", "id": "f9813:c4:m0"} {"signature": "def json_date_to_datetime(json_date_string_value):", "body": "if \"\" in json_date_string_value:if \"\" in json_date_string_value:format_string = \"\"else:format_string = \"\"if json_date_string_value.endswith(\"\"):format_string += \"\"else:format_string = \"\"return datetime.strptime(json_date_string_value,format_string)", "docstring": "converts json date string to date object", "id": "f9814:m0"} {"signature": "def _find_field(doc, field_name):", "body": "if not isinstance(doc, dict):raise ValueError('')if not isinstance(field_name, six.string_types):raise ValueError('')immediate_field, remaining_path = field_name.split('', ) if '' in field_name else (field_name, None)field_value = doc.get(immediate_field)if isinstance(field_value, dict):return _find_field(field_value, remaining_path)return field_value", "docstring": "find the dictionary field corresponding to the . limited name", "id": "f9814:m1"} {"signature": "def _filter_intersection(documents_to_search, dictionary_object, include_blanks=False):", "body": "if not dictionary_object:return documents_to_searchdef value_matches(doc, field_name, field_value):\"\"\"\"\"\"compare_value = _find_field(doc, field_name)if compare_value is None:return include_blanksif isinstance(field_value, (DateRange, datetime)):if isinstance(compare_value, six.string_types):compare_value = json_date_to_datetime(compare_value)field_has_tz_info = Falseif isinstance(field_value, DateRange):field_has_tz_info = ((field_value.lower and field_value.lower.tzinfo is not None) or(field_value.upper and field_value.upper.tzinfo is not None))else:field_has_tz_info = field_value.tzinfo is not Noneif not field_has_tz_info:compare_value = compare_value.replace(tzinfo=None)elif compare_value.tzinfo is None:compare_value = compare_value.replace(tzinfo=pytz.UTC)if isinstance(field_value, ValueRange):return ((field_value.lower is None or compare_value >= field_value.lower) and(field_value.upper is None or compare_value <= field_value.upper))elif _is_iterable(compare_value) and not _is_iterable(field_value):return any((item == field_value for item in compare_value))elif _is_iterable(field_value) and not _is_iterable(compare_value):return any((item == compare_value for item in field_value))elif _is_iterable(compare_value) and _is_iterable(field_value):return any((six.text_type(item) in field_value for item in compare_value))return compare_value == field_valuefiltered_documents = documents_to_searchfor field_name, field_value in dictionary_object.items():filtered_documents = [d for d in filtered_documents if value_matches(d, field_name, field_value)]return filtered_documents", "docstring": "Filters out documents that do not match all of the field values within the dictionary_object\nIf include_blanks is True, then the document is considered a match if the field is not present", "id": "f9814:m2"} {"signature": "def _process_query_string(documents_to_search, query_string):", "body": "def _encode_string(string):\"\"\"\"\"\"if six.PY2:string = string.encode('').translate(None, RESERVED_CHARACTERS)else:string = string.translate(string.maketrans('', '', RESERVED_CHARACTERS))return stringdef has_string(dictionary_object, search_string):\"\"\"\"\"\"for name in dictionary_object:if isinstance(dictionary_object[name], dict):return has_string(dictionary_object[name], search_string)elif dictionary_object[name]:if search_string.lower() in _encode_string(dictionary_object[name].lower()):return Truereturn Falsesearch_strings = _encode_string(query_string).split(\"\")documents_to_keep = []for search_string in search_strings:documents_to_keep.extend([d for d in documents_to_search if \"\" in d and has_string(d[\"\"], search_string)])return documents_to_keep", "docstring": "keep the documents that contain at least one of the search strings provided", "id": "f9814:m3"} {"signature": "def _process_exclude_dictionary(documents_to_search, exclude_dictionary):", "body": "for exclude_property in exclude_dictionary:exclude_values = exclude_dictionary[exclude_property]if not isinstance(exclude_values, list):exclude_values = [exclude_values]documents_to_search = [documentfor document in documents_to_searchif document.get(exclude_property) not in exclude_values]return documents_to_search", "docstring": "remove results that have fields that match in the exclude_dictionary", "id": "f9814:m4"} {"signature": "def _count_facet_values(documents, facet_terms):", "body": "facets = {}def process_facet(facet):\"\"\"\"\"\"faceted_documents = [facet_document for facet_document in documents if facet in facet_document]terms = {}def add_facet_value(facet_value):\"\"\"\"\"\"if isinstance(facet_value, list):for individual_value in facet_value:add_facet_value(individual_value)else:if facet_value not in terms:terms[facet_value] = terms[facet_value] += for document in faceted_documents:add_facet_value(document[facet])total = sum([terms[term] for term in terms])return total, termsfor facet in facet_terms:total, terms = process_facet(facet)facets[facet] = {\"\": total,\"\": terms,}return facets", "docstring": "Calculate the counts for the facets provided:\n\nFor each facet, count up the number of hits for each facet value, so\nthat we can report back the breakdown of how many of each value there\nexist. Notice that the total is the total number of facet matches that\nwe receive - a single document will get counted multiple times in the\ntotal if the facet field is multi-valued:\n\n e.g. a course may have a value for modes as [\"honor\", \"validated\"], and\n so the document will count as 1 towards the honor count, 1 towards the\n validated count, and 2 towards the total. (This may be a little\n surprising but matches the behaviour that elasticsearch presents)", "id": "f9814:m5"} {"signature": "@classmethoddef _backing_file(cls, create_if_missing=False):", "body": "backing_file_name = getattr(settings, \"\", None)if cls._file_name_override:backing_file_name = cls._file_name_overrideif not backing_file_name:cls._disabled = Falsereturn Noneif create_if_missing or os.path.exists(backing_file_name):cls._disabled = Falsereturn backing_file_namecls._disabled = Truereturn None", "docstring": "return path to test file to use for backing purposes", "id": "f9814:c0:m2"} {"signature": "@classmethoddef _write_to_file(cls, create_if_missing=False):", "body": "file_name = cls._backing_file(create_if_missing)if file_name:with open(file_name, \"\") as dict_file:json.dump(cls._mock_elastic, dict_file, cls=DjangoJSONEncoder)", "docstring": "write the index dict to the backing file", "id": "f9814:c0:m3"} {"signature": "@classmethoddef _load_from_file(cls):", "body": "file_name = cls._backing_file()if file_name and os.path.exists(file_name):with open(file_name, \"\") as dict_file:cls._mock_elastic = json.load(dict_file)", "docstring": "load the index dict from the contents of the backing file", "id": "f9814:c0:m4"} {"signature": "@staticmethoddef _paginate_results(size, from_, raw_results):", "body": "results = raw_resultsif size:start = if from_ is not None:start = from_results = raw_results[start:start + size]return results", "docstring": "Give the correct page of results", "id": "f9814:c0:m5"} {"signature": "@classmethoddef load_index(cls, index_name):", "body": "cls._load_from_file()if index_name not in cls._mock_elastic:cls._mock_elastic[index_name] = {}cls._write_to_file()return cls._mock_elastic[index_name]", "docstring": "load the index, if necessary from the backed file", "id": "f9814:c0:m6"} {"signature": "@classmethoddef load_doc_type(cls, index_name, doc_type):", "body": "index = cls.load_index(index_name)if doc_type not in index:index[doc_type] = []cls._write_to_file()return index[doc_type]", "docstring": "load the documents of type doc_type, if necessary loading from the backed file", "id": "f9814:c0:m7"} {"signature": "@classmethoddef add_documents(cls, index_name, doc_type, sources):", "body": "cls.load_doc_type(index_name, doc_type).extend(sources)cls._write_to_file()", "docstring": "add documents of specific type to index", "id": "f9814:c0:m8"} {"signature": "@classmethoddef remove_documents(cls, index_name, doc_type, doc_ids):", "body": "index = cls.load_index(index_name)if doc_type not in index:returnindex[doc_type] = [d for d in index[doc_type] if \"\" not in d or d[\"\"] not in doc_ids]cls._write_to_file()", "docstring": "remove documents by id of specific type to index", "id": "f9814:c0:m9"} {"signature": "@classmethoddef destroy(cls):", "body": "cls._mock_elastic = {}cls._write_to_file()", "docstring": "Clean out the dictionary for test resets", "id": "f9814:c0:m10"} {"signature": "def index(self, doc_type, sources): ", "body": "if not MockSearchEngine._disabled:doc_ids = [s[\"\"] for s in sources if \"\" in s]MockSearchEngine.remove_documents(self.index_name, doc_type, doc_ids)MockSearchEngine.add_documents(self.index_name, doc_type, sources)", "docstring": "Add/update documents of given type to the index", "id": "f9814:c0:m12"} {"signature": "def remove(self, doc_type, doc_ids): ", "body": "if not MockSearchEngine._disabled:MockSearchEngine.remove_documents(self.index_name, doc_type, doc_ids)", "docstring": "Remove documents of type with given ids from the index", "id": "f9814:c0:m13"} {"signature": "def search(self,query_string=None,field_dictionary=None,filter_dictionary=None,exclude_dictionary=None,facet_terms=None,**kwargs): ", "body": "if MockSearchEngine._disabled:return {\"\": ,\"\": ,\"\": ,\"\": []}documents_to_search = []if \"\" in kwargs:documents_to_search = MockSearchEngine.load_doc_type(self.index_name, kwargs[\"\"])else:index = MockSearchEngine.load_index(self.index_name)for doc_type in index:documents_to_search.extend(index[doc_type])if field_dictionary:documents_to_search = _filter_intersection(documents_to_search, field_dictionary)if filter_dictionary:documents_to_search = _filter_intersection(documents_to_search, filter_dictionary, True)if query_string:documents_to_search = _process_query_string(documents_to_search, query_string)if \"\" in kwargs:if not exclude_dictionary:exclude_dictionary = {}if \"\" not in exclude_dictionary:exclude_dictionary[\"\"] = []exclude_dictionary[\"\"].extend(kwargs[\"\"])if exclude_dictionary:documents_to_search = _process_exclude_dictionary(documents_to_search, exclude_dictionary)def score_documents(documents_to_search):\"\"\"\"\"\"search_results = []max_score = while documents_to_search:current_doc = documents_to_search[]score = len([d for d in documents_to_search if d == current_doc])if score > max_score:max_score = scoredocuments_to_search = [d for d in documents_to_search if d != current_doc]data = copy.copy(current_doc)search_results.append({\"\": score,\"\": data,})return search_results, max_scoresearch_results, max_score = score_documents(documents_to_search)results = MockSearchEngine._paginate_results(kwargs[\"\"] if \"\" in kwargs else None,kwargs[\"\"] if \"\" in kwargs else None,sorted(search_results, key=lambda k: k[\"\"]))response = {\"\": ,\"\": len(search_results),\"\": max_score,\"\": results}if facet_terms:response[\"\"] = _count_facet_values(documents_to_search, facet_terms)return response", "docstring": "Perform search upon documents within index", "id": "f9814:c0:m14"} {"signature": "@propertydef _is_elastic(self):", "body": "return isinstance(self.searcher, ElasticSearchEngine)", "docstring": "check search engine implementation, to manage cleanup differently", "id": "f9815:c0:m0"} {"signature": "def _index_for_facets(self):", "body": "self.searcher.index(\"\", [{\"\": \"\", \"\": \"\", \"\": \"\"}])self.searcher.index(\"\", [{\"\": \"\", \"\": \"\", \"\": \"\"}])self.searcher.index(\"\", [{\"\": \"\", \"\": \"\", \"\": \"\"}])self.searcher.index(\"\", [{\"\": \"\", \"\": \"\", \"\": \"\"}])self.searcher.index(\"\", [{\"\": \"\", \"\": \"\", \"\": \"\"}])self.searcher.index(\"\", [{\"\": \"\", \"\": \"\", \"\": \"\"}])self.searcher.index(\"\", [{\"\": \"\", \"\": \"\", \"\": \"\"}])", "docstring": "Prepare index for facet tests", "id": "f9815:c0:m28"} {"signature": "def initialize(self, **kwargs):", "body": "pass", "docstring": "empty base implementation", "id": "f9816:c0:m0"} {"signature": "@classmethoddef set_search_enviroment(cls, **kwargs):", "body": "initializer = _load_class(getattr(settings, \"\", None), cls)()return initializer.initialize(**kwargs)", "docstring": "Called from within search handler\nFinds desired subclass and calls initialize method", "id": "f9816:c0:m1"} {"signature": "def _process_pagination_values(request):", "body": "size = page = from_ = if \"\" in request.POST:size = int(request.POST[\"\"])max_page_size = getattr(settings, \"\", )if not ( < size <= max_page_size): raise ValueError(_('').format(page_size=size))if \"\" in request.POST:page = int(request.POST[\"\"])from_ = page * sizereturn size, from_, page", "docstring": "process pagination requests from request parameter", "id": "f9817:m0"} {"signature": "def _process_field_values(request):", "body": "return {field_key: request.POST[field_key]for field_key in request.POSTif field_key in course_discovery_filter_fields()}", "docstring": "Create separate dictionary of supported filter values provided", "id": "f9817:m1"} {"signature": "@require_POSTdef do_search(request, course_id=None):", "body": "SearchInitializer.set_search_enviroment(request=request, course_id=course_id)results = {\"\": _(\"\")}status_code = search_term = request.POST.get(\"\", None)try:if not search_term:raise ValueError(_(''))size, from_, page = _process_pagination_values(request)track.emit('',{\"\": search_term,\"\": size,\"\": page,})results = perform_search(search_term,user=request.user,size=size,from_=from_,course_id=course_id)status_code = track.emit('',{\"\": search_term,\"\": size,\"\": page,\"\": results[\"\"],})except ValueError as invalid_err:results = {\"\": six.text_type(invalid_err)}log.debug(six.text_type(invalid_err))except QueryParseError:results = {\"\": _('')}except Exception as err: results = {\"\": _('').format(search_string=search_term)}log.exception('',search_term,request.user.id,err)return JsonResponse(results, status=status_code)", "docstring": "Search view for http requests\n\nArgs:\n request (required) - django request object\n course_id (optional) - course_id within which to restrict search\n\nReturns:\n http json response with the following fields\n \"took\" - how many seconds the operation took\n \"total\" - how many results were found\n \"max_score\" - maximum score from these results\n \"results\" - json array of result documents\n\n or\n\n \"error\" - displayable information about an error that occured on the server\n\nPOST Params:\n \"search_string\" (required) - text upon which to search\n \"page_size\" (optional)- how many results to return per page (defaults to 20, with maximum cutoff at 100)\n \"page_index\" (optional) - for which page (zero-indexed) to include results (defaults to 0)", "id": "f9817:m2"} {"signature": "@require_POSTdef course_discovery(request):", "body": "results = {\"\": _(\"\")}status_code = search_term = request.POST.get(\"\", None)try:size, from_, page = _process_pagination_values(request)field_dictionary = _process_field_values(request)track.emit('',{\"\": search_term,\"\": size,\"\": page,})results = course_discovery_search(search_term=search_term,size=size,from_=from_,field_dictionary=field_dictionary,)track.emit('',{\"\": search_term,\"\": size,\"\": page,\"\": results[\"\"],})status_code = except ValueError as invalid_err:results = {\"\": six.text_type(invalid_err)}log.debug(six.text_type(invalid_err))except QueryParseError:results = {\"\": _('')}except Exception as err: results = {\"\": _('').format(search_string=search_term)}log.exception('',search_term,request.user.id,err)return JsonResponse(results, status=status_code)", "docstring": "Search for courses\n\nArgs:\n request (required) - django request object\n\nReturns:\n http json response with the following fields\n \"took\" - how many seconds the operation took\n \"total\" - how many results were found\n \"max_score\" - maximum score from these resutls\n \"results\" - json array of result documents\n\n or\n\n \"error\" - displayable information about an error that occured on the server\n\nPOST Params:\n \"search_string\" (optional) - text with which to search for courses\n \"page_size\" (optional)- how many results to return per page (defaults to 20, with maximum cutoff at 100)\n \"page_index\" (optional) - for which page (zero-indexed) to include results (defaults to 0)", "id": "f9817:m3"} {"signature": "def filter_dictionary(self, **kwargs):", "body": "return {\"\": DateRange(None, datetime.utcnow())}", "docstring": "base implementation which filters via start_date", "id": "f9818:c0:m0"} {"signature": "def field_dictionary(self, **kwargs):", "body": "field_dictionary = {}if \"\" in kwargs and kwargs[\"\"]:field_dictionary[\"\"] = kwargs[\"\"]return field_dictionary", "docstring": "base implementation which add course if provided", "id": "f9818:c0:m1"} {"signature": "def exclude_dictionary(self, **kwargs):", "body": "return {}", "docstring": "base implementation which excludes nothing", "id": "f9818:c0:m2"} {"signature": "@classmethoddef generate_field_filters(cls, **kwargs):", "body": "generator = _load_class(getattr(settings, \"\", None), cls)()return (generator.field_dictionary(**kwargs),generator.filter_dictionary(**kwargs),generator.exclude_dictionary(**kwargs),)", "docstring": "Called from within search handler\nFinds desired subclass and adds filter information based upon user information", "id": "f9818:c0:m3"} {"signature": "def _translate_hits(es_response):", "body": "def translate_result(result):\"\"\"\"\"\"translated_result = copy.copy(result)data = translated_result.pop(\"\")translated_result.update({\"\": data,\"\": translated_result[\"\"]})return translated_resultdef translate_facet(result):\"\"\"\"\"\"terms = {term[\"\"]: term[\"\"] for term in result[\"\"]}return {\"\": terms,\"\": result[\"\"],\"\": result[\"\"],}results = [translate_result(hit) for hit in es_response[\"\"][\"\"]]response = {\"\": es_response[\"\"],\"\": es_response[\"\"][\"\"],\"\": es_response[\"\"][\"\"],\"\": results,}if \"\" in es_response:response[\"\"] = {facet: translate_facet(es_response[\"\"][facet]) for facet in es_response[\"\"]}return response", "docstring": "Provide resultset in our desired format from elasticsearch results", "id": "f9819:m0"} {"signature": "def _get_filter_field(field_name, field_value):", "body": "filter_field = Noneif isinstance(field_value, ValueRange):range_values = {}if field_value.lower:range_values.update({\"\": field_value.lower_string})if field_value.upper:range_values.update({\"\": field_value.upper_string})filter_field = {\"\": {field_name: range_values}}elif _is_iterable(field_value):filter_field = {\"\": {field_name: field_value}}else:filter_field = {\"\": {field_name: field_value}}return filter_field", "docstring": "Return field to apply into filter, if an array then use a range, otherwise look for a term match", "id": "f9819:m1"} {"signature": "def _process_field_queries(field_dictionary):", "body": "def field_item(field):\"\"\"\"\"\"return {\"\": {field: field_dictionary[field]}}return [field_item(field) for field in field_dictionary]", "docstring": "We have a field_dictionary - we want to match the values for an elasticsearch \"match\" query\nThis is only potentially useful when trying to tune certain search operations", "id": "f9819:m2"} {"signature": "def _process_field_filters(field_dictionary):", "body": "return [_get_filter_field(field, field_value) for field, field_value in field_dictionary.items()]", "docstring": "We have a field_dictionary - we match the values using a \"term\" filter in elasticsearch", "id": "f9819:m3"} {"signature": "def _process_filters(filter_dictionary):", "body": "def filter_item(field):\"\"\"\"\"\"if filter_dictionary[field] is not None:return {\"\": [_get_filter_field(field, filter_dictionary[field]),{\"\": {\"\": field}}]}return {\"\": {\"\": field}}return [filter_item(field) for field in filter_dictionary]", "docstring": "We have a filter_dictionary - this means that if the field is included\nand matches, then we can include, OR if the field is undefined, then we\nassume it is safe to include", "id": "f9819:m4"} {"signature": "def _process_exclude_dictionary(exclude_dictionary):", "body": "not_properties = []for exclude_property in exclude_dictionary:exclude_values = exclude_dictionary[exclude_property]if not isinstance(exclude_values, list):exclude_values = [exclude_values]not_properties.extend([{\"\": {exclude_property: exclude_value}} for exclude_value in exclude_values])if not not_properties:return {}return {\"\": {\"\": {\"\": not_properties}}}", "docstring": "Based on values in the exclude_dictionary generate a list of term queries that\nwill filter out unwanted results.", "id": "f9819:m5"} {"signature": "def _process_facet_terms(facet_terms):", "body": "elastic_facets = {}for facet in facet_terms:facet_term = {\"\": facet}if facet_terms[facet]:for facet_option in facet_terms[facet]:facet_term[facet_option] = facet_terms[facet][facet_option]elastic_facets[facet] = {\"\": facet_term}return elastic_facets", "docstring": "We have a list of terms with which we return facets", "id": "f9819:m6"} {"signature": "@staticmethoddef get_cache_item_name(index_name, doc_type):", "body": "return \"\".format(index_name,doc_type)", "docstring": "name-formatter for cache_item_name", "id": "f9819:c0:m0"} {"signature": "@classmethoddef get_mappings(cls, index_name, doc_type):", "body": "return cache.get(cls.get_cache_item_name(index_name, doc_type), {})", "docstring": "fetch mapped-items structure from cache", "id": "f9819:c0:m1"} {"signature": "@classmethoddef set_mappings(cls, index_name, doc_type, mappings):", "body": "cache.set(cls.get_cache_item_name(index_name, doc_type), mappings)", "docstring": "set new mapped-items structure into cache", "id": "f9819:c0:m2"} {"signature": "@classmethoddef log_indexing_error(cls, indexing_errors):", "body": "indexing_errors_log = []for indexing_error in indexing_errors:indexing_errors_log.append(str(indexing_error))raise exceptions.ElasticsearchException(''.join(indexing_errors_log))", "docstring": "Logs indexing errors and raises a general ElasticSearch Exception", "id": "f9819:c0:m3"} {"signature": "def _get_mappings(self, doc_type):", "body": "mapping = ElasticSearchEngine.get_mappings(self.index_name, doc_type)if not mapping:mapping = self._es.indices.get_mapping(index=self.index_name,doc_type=doc_type,).get(self.index_name, {}).get('', {}).get(doc_type, {})if mapping:ElasticSearchEngine.set_mappings(self.index_name,doc_type,mapping)return mapping", "docstring": "Interfaces with the elasticsearch mappings for the index\nprevents multiple loading of the same mappings from ES when called more than once\n\nMappings format in elasticsearch is as follows:\n{\n \"doc_type\": {\n \"properties\": {\n \"nested_property\": {\n \"properties\": {\n \"an_analysed_property\": {\n \"type\": \"string\"\n },\n \"another_analysed_property\": {\n \"type\": \"string\"\n }\n }\n },\n \"a_not_analysed_property\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"a_date_property\": {\n \"type\": \"date\"\n }\n }\n }\n}\n\nWe cache the properties of each doc_type, if they are not available, we'll load them again from Elasticsearch", "id": "f9819:c0:m4"} {"signature": "def _clear_mapping(self, doc_type):", "body": "ElasticSearchEngine.set_mappings(self.index_name, doc_type, {})", "docstring": "Remove the cached mappings, so that they get loaded from ES next time they are requested", "id": "f9819:c0:m5"} {"signature": "def _check_mappings(self, doc_type, body):", "body": "exclude_fields = [\"\"]field_properties = getattr(settings, \"\", {})def field_property(field_name, field_value):\"\"\"\"\"\"prop_val = Noneif field_name in field_properties:prop_val = field_properties[field_name]elif isinstance(field_value, dict):props = {fn: field_property(fn, field_value[fn]) for fn in field_value}prop_val = {\"\": props}else:prop_val = {\"\": \"\",\"\": \"\",}return prop_valnew_properties = {field: field_property(field, value)for field, value in body.items()if (field not in exclude_fields) and (field not in self._get_mappings(doc_type).get('', {}))}if new_properties:self._es.indices.put_mapping(index=self.index_name,doc_type=doc_type,body={doc_type: {\"\": new_properties,}})self._clear_mapping(doc_type)", "docstring": "We desire to index content so that anything we want to be textually searchable(and therefore needing to be\nanalysed), but the other fields are designed to be filters, and only require an exact match. So, we want to\nset up the mappings for these fields as \"not_analyzed\" - this will allow our filters to work faster because\nthey only have to work off exact matches", "id": "f9819:c0:m7"} {"signature": "def index(self, doc_type, sources, **kwargs):", "body": "try:actions = []for source in sources:self._check_mappings(doc_type, source)id_ = source[''] if '' in source else Nonelog.debug(\"\", doc_type, id_)action = {\"\": self.index_name,\"\": doc_type,\"\": id_,\"\": source}actions.append(action)_, indexing_errors = bulk(self._es,actions,**kwargs)if indexing_errors:ElasticSearchEngine.log_indexing_error(indexing_errors)except Exception as ex:log.exception(\"\", str(ex))raise", "docstring": "Implements call to add documents to the ES index\nNote the call to _check_mappings which will setup fields with the desired mappings", "id": "f9819:c0:m8"} {"signature": "def remove(self, doc_type, doc_ids, **kwargs):", "body": "try:actions = []for doc_id in doc_ids:log.debug(\"\", doc_type, doc_id)action = {'': '',\"\": self.index_name,\"\": doc_type,\"\": doc_id}actions.append(action)bulk(self._es, actions, **kwargs)except BulkIndexError as ex:valid_errors = [error for error in ex.errors if error[''][''] != ]if valid_errors:log.exception(\"\")raise", "docstring": "Implements call to remove the documents from the index", "id": "f9819:c0:m9"} {"signature": "def search(self,query_string=None,field_dictionary=None,filter_dictionary=None,exclude_dictionary=None,facet_terms=None,exclude_ids=None,use_field_match=False,**kwargs): ", "body": "log.debug(\"\", query_string)elastic_queries = []elastic_filters = []if query_string:if six.PY2:query_string = query_string.encode('').translate(None, RESERVED_CHARACTERS)else:query_string = query_string.translate(query_string.maketrans('', '', RESERVED_CHARACTERS))elastic_queries.append({\"\": {\"\": [\"\"],\"\": query_string}})if field_dictionary:if use_field_match:elastic_queries.extend(_process_field_queries(field_dictionary))else:elastic_filters.extend(_process_field_filters(field_dictionary))if filter_dictionary:elastic_filters.extend(_process_filters(filter_dictionary))if exclude_ids:if not exclude_dictionary:exclude_dictionary = {}if \"\" not in exclude_dictionary:exclude_dictionary[\"\"] = []exclude_dictionary[\"\"].extend(exclude_ids)if exclude_dictionary:elastic_filters.append(_process_exclude_dictionary(exclude_dictionary))query_segment = {\"\": {}}if elastic_queries:query_segment = {\"\": {\"\": elastic_queries}}query = query_segmentif elastic_filters:filter_segment = {\"\": {\"\": elastic_filters}}query = {\"\": {\"\": query_segment,\"\": filter_segment,}}body = {\"\": query}if facet_terms:facet_query = _process_facet_terms(facet_terms)if facet_query:body[\"\"] = facet_querytry:es_response = self._es.search(index=self.index_name,body=body,**kwargs)except exceptions.ElasticsearchException as ex:message = six.text_type(ex)if '' in message:log.exception(\"\", message)raise QueryParseError('')else:log.exception(\"\", str(message))raisereturn _translate_hits(es_response)", "docstring": "Implements call to search the index for the desired content.\n\nArgs:\n query_string (str): the string of values upon which to search within the\n content of the objects within the index\n\n field_dictionary (dict): dictionary of values which _must_ exist and\n _must_ match in order for the documents to be included in the results\n\n filter_dictionary (dict): dictionary of values which _must_ match if the\n field exists in order for the documents to be included in the results;\n documents for which the field does not exist may be included in the\n results if they are not otherwise filtered out\n\n exclude_dictionary(dict): dictionary of values all of which which must\n not match in order for the documents to be included in the results;\n documents which have any of these fields and for which the value matches\n one of the specified values shall be filtered out of the result set\n\n facet_terms (dict): dictionary of terms to include within search\n facets list - key is the term desired to facet upon, and the value is a\n dictionary of extended information to include. Supported right now is a\n size specification for a cap upon how many facet results to return (can\n be an empty dictionary to use default size for underlying engine):\n\n e.g.\n {\n \"org\": {\"size\": 10}, # only show top 10 organizations\n \"modes\": {}\n }\n\n use_field_match (bool): flag to indicate whether to use elastic\n filtering or elastic matching for field matches - this is nothing but a\n potential performance tune for certain queries\n\n (deprecated) exclude_ids (list): list of id values to exclude from the results -\n useful for finding maches that aren't \"one of these\"\n\nReturns:\n dict object with results in the desired format\n {\n \"took\": 3,\n \"total\": 4,\n \"max_score\": 2.0123,\n \"results\": [\n {\n \"score\": 2.0123,\n \"data\": {\n ...\n }\n },\n {\n \"score\": 0.0983,\n \"data\": {\n ...\n }\n }\n ],\n \"facets\": {\n \"org\": {\n \"total\": total_count,\n \"other\": 1,\n \"terms\": {\n \"MITx\": 25,\n \"HarvardX\": 18\n }\n },\n \"modes\": {\n \"total\": modes_count,\n \"other\": 15,\n \"terms\": {\n \"honor\": 58,\n \"verified\": 44,\n }\n }\n }\n }\n\nRaises:\n ElasticsearchException when there is a problem with the response from elasticsearch\n\nExample usage:\n .search(\n \"find the words within this string\",\n {\n \"must_have_field\": \"mast_have_value for must_have_field\"\n },\n {\n\n }\n )", "id": "f9819:c0:m10"} {"signature": "def _load_class(class_path, default):", "body": "if class_path is None:return defaultcomponent = class_path.rsplit('', )result_processor = getattr(importlib.import_module(component[]),component[],default) if len(component) > else defaultreturn result_processor", "docstring": "Loads the class from the class_path string", "id": "f9822:m0"} {"signature": "def _is_iterable(item):", "body": "return isinstance(item, collections.Iterable) and not isinstance(item, six.string_types)", "docstring": "Checks if an item is iterable (list, tuple, generator), but not string", "id": "f9822:m1"} {"signature": "@propertydef upper(self):", "body": "return self._upper", "docstring": "return class member _upper as a proerty value", "id": "f9822:c0:m1"} {"signature": "@propertydef lower(self):", "body": "return self._lower", "docstring": "return class member _lower as a proerty value", "id": "f9822:c0:m2"} {"signature": "@propertydef upper_string(self):", "body": "return str(self._upper)", "docstring": "return string representation of _upper as a proerty value", "id": "f9822:c0:m3"} {"signature": "@propertydef lower_string(self):", "body": "return str(self._lower)", "docstring": "return string representation of _upper as a proerty value", "id": "f9822:c0:m4"} {"signature": "@propertydef upper_string(self):", "body": "return self._upper.isoformat()", "docstring": "use isoformat for _upper date's string format", "id": "f9822:c1:m0"} {"signature": "@propertydef lower_string(self):", "body": "return self._lower.isoformat()", "docstring": "use isoformat for _lower date's string format", "id": "f9822:c1:m1"} {"signature": "def course_discovery_filter_fields():", "body": "return getattr(settings, \"\", DEFAULT_FILTER_FIELDS)", "docstring": "look up the desired list of course discovery filter fields", "id": "f9823:m0"} {"signature": "def course_discovery_facets():", "body": "return getattr(settings, \"\", {field: {} for field in course_discovery_filter_fields()})", "docstring": "Discovery facets to include, by default we specify each filter field with unspecified size attribute", "id": "f9823:m1"} {"signature": "def perform_search(search_term,user=None,size=,from_=,course_id=None):", "body": "(field_dictionary, filter_dictionary, exclude_dictionary) = SearchFilterGenerator.generate_field_filters(user=user,course_id=course_id)searcher = SearchEngine.get_search_engine(getattr(settings, \"\", \"\"))if not searcher:raise NoSearchEngineError(\"\")results = searcher.search_string(search_term,field_dictionary=field_dictionary,filter_dictionary=filter_dictionary,exclude_dictionary=exclude_dictionary,size=size,from_=from_,doc_type=\"\",)for result in results[\"\"]:result[\"\"] = SearchResultProcessor.process_result(result[\"\"], search_term, user)results[\"\"] = len([r for r in results[\"\"] if r[\"\"] is None])results[\"\"] = [r for r in results[\"\"] if r[\"\"] is not None]return results", "docstring": "Call the search engine with the appropriate parameters", "id": "f9823:m2"} {"signature": "def course_discovery_search(search_term=None, size=, from_=, field_dictionary=None):", "body": "use_search_fields = [\"\"](search_fields, _, exclude_dictionary) = SearchFilterGenerator.generate_field_filters()use_field_dictionary = {}use_field_dictionary.update({field: search_fields[field] for field in search_fields if field in use_search_fields})if field_dictionary:use_field_dictionary.update(field_dictionary)if not getattr(settings, \"\", False):use_field_dictionary[\"\"] = DateRange(None, datetime.utcnow())searcher = SearchEngine.get_search_engine(getattr(settings, \"\", \"\"))if not searcher:raise NoSearchEngineError(\"\")results = searcher.search(query_string=search_term,doc_type=\"\",size=size,from_=from_,field_dictionary=use_field_dictionary,filter_dictionary={\"\": DateRange(datetime.utcnow(), None)},exclude_dictionary=exclude_dictionary,facet_terms=course_discovery_facets(),)return results", "docstring": "Course Discovery activities against the search engine index of course details", "id": "f9823:m3"} {"signature": "def index(self, doc_type, sources, **kwargs):", "body": "raise NotImplementedError", "docstring": "This operation is called to add documents of given type to the search index", "id": "f9824:c0:m1"} {"signature": "def remove(self, doc_type, doc_ids, **kwargs):", "body": "raise NotImplementedError", "docstring": "This operation is called to remove documents of given type from the search index", "id": "f9824:c0:m2"} {"signature": "def search(self,query_string=None,field_dictionary=None,filter_dictionary=None,exclude_dictionary=None,facet_terms=None,**kwargs): ", "body": "raise NotImplementedError", "docstring": "This operation is called to search for matching documents within the search index", "id": "f9824:c0:m3"} {"signature": "def search_string(self, query_string, **kwargs):", "body": "return self.search(query_string=query_string, **kwargs)", "docstring": "Helper function when primary search is for a query string", "id": "f9824:c0:m4"} {"signature": "def search_fields(self, field_dictionary, **kwargs):", "body": "return self.search(field_dictionary=field_dictionary, **kwargs)", "docstring": "Helper function when primary search is for a set of matching fields", "id": "f9824:c0:m5"} {"signature": "@staticmethoddef get_search_engine(index=None):", "body": "search_engine_class = _load_class(getattr(settings, \"\", None), None)return search_engine_class(index=index) if search_engine_class else None", "docstring": "Returns the desired implementor (defined in settings)", "id": "f9824:c0:m6"} {"signature": "@staticmethoddef strings_in_dictionary(dictionary):", "body": "strings = [value for value in six.itervalues(dictionary) if not isinstance(value, dict)]for child_dict in [dv for dv in six.itervalues(dictionary) if isinstance(dv, dict)]:strings.extend(SearchResultProcessor.strings_in_dictionary(child_dict))return strings", "docstring": "Used by default implementation for finding excerpt", "id": "f9825:c0:m1"} {"signature": "@staticmethoddef find_matches(strings, words, length_hoped):", "body": "lower_words = [w.lower() for w in words]def has_match(string):\"\"\"\"\"\"lower_string = string.lower()for test_word in lower_words:if test_word in lower_string:return Truereturn Falseshortened_strings = [textwrap.wrap(s) for s in strings]short_string_list = list(chain.from_iterable(shortened_strings))matches = [ms for ms in short_string_list if has_match(ms)]cumulative_len = break_at = Nonefor idx, match in enumerate(matches):cumulative_len += len(match)if cumulative_len >= length_hoped:break_at = idxbreakreturn matches[:break_at]", "docstring": "Used by default property excerpt", "id": "f9825:c0:m2"} {"signature": "@staticmethoddef decorate_matches(match_in, match_word):", "body": "matches = re.finditer(match_word, match_in, re.IGNORECASE)for matched_string in set([match.group() for match in matches]):match_in = match_in.replace(matched_string,getattr(settings, \"\", u\"\").format(matched_string))return match_in", "docstring": "decorate the matches within the excerpt", "id": "f9825:c0:m3"} {"signature": "def should_remove(self, user): ", "body": "return False", "docstring": "Override this in a class in order to add in last-chance access checks to the search process\nYour application will want to make this decision", "id": "f9825:c0:m4"} {"signature": "def add_properties(self):", "body": "for property_name in [p[] for p in inspect.getmembers(self.__class__) if isinstance(p[], property)]:self._results_fields[property_name] = getattr(self, property_name, None)", "docstring": "Called during post processing of result\nAny properties defined in your subclass will get exposed as members of the result json from the search", "id": "f9825:c0:m5"} {"signature": "@classmethoddef process_result(cls, dictionary, match_phrase, user):", "body": "result_processor = _load_class(getattr(settings, \"\", None), cls)srp = result_processor(dictionary, match_phrase)if srp.should_remove(user):return Nonetry:srp.add_properties()except Exception as ex: log.exception(\"\",json.dumps(dictionary, cls=DjangoJSONEncoder), str(ex))return Nonereturn dictionary", "docstring": "Called from within search handler. Finds desired subclass and decides if the\nresult should be removed and adds properties derived from the result information", "id": "f9825:c0:m6"} {"signature": "@propertydef excerpt(self):", "body": "if \"\" not in self._results_fields:return Nonematch_phrases = [self._match_phrase]if six.PY2:separate_phrases = [phrase.decode('')for phrase in shlex.split(self._match_phrase.encode(''))]else:separate_phrases = [phrasefor phrase in shlex.split(self._match_phrase)]if len(separate_phrases) > :match_phrases.extend(separate_phrases)else:match_phrases = separate_phrasesmatches = SearchResultProcessor.find_matches(SearchResultProcessor.strings_in_dictionary(self._results_fields[\"\"]),match_phrases,DESIRED_EXCERPT_LENGTH)excerpt_text = ELLIPSIS.join(matches)for match_word in match_phrases:excerpt_text = SearchResultProcessor.decorate_matches(excerpt_text, match_word)return excerpt_text", "docstring": "Property to display a useful excerpt representing the matches within the results", "id": "f9825:c0:m7"} {"signature": "def _parseAccept(headers):", "body": "def sort(value):return float(value[].get('', ))return OrderedDict(sorted(_splitHeaders(headers), key=sort, reverse=True))", "docstring": "Parse and sort an ``Accept`` header.\n\nThe header is sorted according to the ``q`` parameter for each header value.\n\n@rtype: `OrderedDict` mapping `bytes` to `dict`\n@return: Mapping of media types to header parameters.", "id": "f9827:m0"} {"signature": "def _splitHeaders(headers):", "body": "return [cgi.parse_header(value)for value in chain.from_iterable(s.split('') for s in headersif s)]", "docstring": "Split an HTTP header whose components are separated with commas.\n\nEach component is then split on semicolons and the component arguments\nconverted into a `dict`.\n\n@return: `list` of 2-`tuple` of `bytes`, `dict`\n@return: List of header arguments and mapping of component argument names\n to values.", "id": "f9827:m1"} {"signature": "def contentEncoding(requestHeaders, encoding=None):", "body": "if encoding is None:encoding = b''headers = _splitHeaders(requestHeaders.getRawHeaders(b'', []))if headers:return headers[][].get(b'', encoding)return encoding", "docstring": "Extract an encoding from a ``Content-Type`` header.\n\n@type requestHeaders: `twisted.web.http_headers.Headers`\n@param requestHeaders: Request headers.\n\n@type encoding: `bytes`\n@param encoding: Default encoding to assume if the ``Content-Type``\n header is lacking one. Defaults to ``UTF-8``.\n\n@rtype: `bytes`\n@return: Content encoding.", "id": "f9827:m2"} {"signature": "def maybe(f, default=None):", "body": "@wraps(f)def _maybe(x, *a, **kw):if x is None:return defaultreturn f(x, *a, **kw)return _maybe", "docstring": "Create a nil-safe callable decorator.\n\nIf the wrapped callable receives ``None`` as its argument, it will return\n``None`` immediately.", "id": "f9827:m3"} {"signature": "def _renderResource(resource, request):", "body": "meth = getattr(resource, '' + nativeString(request.method), None)if meth is None:try:allowedMethods = resource.allowedMethodsexcept AttributeError:allowedMethods = _computeAllowedMethods(resource)raise UnsupportedMethod(allowedMethods)return meth(request)", "docstring": "Render a given resource.\n\nSee `IResource.render `.", "id": "f9828:m0"} {"signature": "def __init__(self, wrappedResource):", "body": "self._wrappedResource = wrappedResourceResource.__init__(self)", "docstring": ":type wrappedResource: `ISpinneretResource`\n:param wrappedResource: Spinneret resource to wrap in an `IResource`.", "id": "f9828:c3:m0"} {"signature": "def _adaptToResource(self, result):", "body": "if result is None:return NotFound()spinneretResource = ISpinneretResource(result, None)if spinneretResource is not None:return SpinneretResource(spinneretResource)renderable = IRenderable(result, None)if renderable is not None:return _RenderableResource(renderable)resource = IResource(result, None)if resource is not None:return resourceif isinstance(result, URLPath):return Redirect(str(result))return result", "docstring": "Adapt a result to `IResource`.\n\nSeveral adaptions are tried they are, in order: ``None``,\n`IRenderable `, `IResource\n`, and `URLPath\n`. Anything else is returned as\nis.\n\nA `URLPath ` is treated as\na redirect.", "id": "f9828:c3:m1"} {"signature": "def _handleRenderResult(self, request, result):", "body": "def _requestFinished(result, cancel):cancel()return resultif not isinstance(result, Deferred):result = succeed(result)def _whenDone(result):render = getattr(result, '', lambda request: result)renderResult = render(request)if renderResult != NOT_DONE_YET:request.write(renderResult)request.finish()return resultrequest.notifyFinish().addBoth(_requestFinished, result.cancel)result.addCallback(self._adaptToResource)result.addCallback(_whenDone)result.addErrback(request.processingFailed)return NOT_DONE_YET", "docstring": "Handle the result from `IResource.render`.\n\nIf the result is a `Deferred` then return `NOT_DONE_YET` and add\na callback to write the result to the request when it arrives.", "id": "f9828:c3:m3"} {"signature": "def __init__(self, handlers, fallback=False):", "body": "Resource.__init__(self)self._handlers = list(handlers)self._fallback = fallbackself._acceptHandlers = {}for handler in self._handlers:for acceptType in handler.acceptTypes:if acceptType in self._acceptHandlers:raise ValueError('' % (acceptType,))self._acceptHandlers[acceptType] = handler", "docstring": ":type handlers: ``iterable`` of `INegotiableResource` and either\n `IResource` or `ISpinneretResource`.\n:param handlers: Iterable of negotiable resources, either\n `ISpinneretResource` or `IResource`, to use as handlers for\n negotiation.\n\n:type fallback: `bool`\n:param fallback: Fall back to the first handler in the case where\n negotiation fails?", "id": "f9828:c4:m0"} {"signature": "def _negotiateHandler(self, request):", "body": "accept = _parseAccept(request.requestHeaders.getRawHeaders(''))for contentType in accept.keys():handler = self._acceptHandlers.get(contentType.lower())if handler is not None:return handler, handler.contentTypeif self._fallback:handler = self._handlers[]return handler, handler.contentTypereturn NotAcceptable(), None", "docstring": "Negotiate a handler based on the content types acceptable to the\nclient.\n\n:rtype: 2-`tuple` of `twisted.web.iweb.IResource` and `bytes`\n:return: Pair of a resource and the content type.", "id": "f9828:c4:m1"} {"signature": "def locateChild(request, segments):", "body": "", "docstring": "Locate another object which can be adapted to `IResource`.\n\n:type request: `IRequest `\n:param request: Request.\n\n:type segments: ``sequence`` of `bytes`\n:param segments: Sequence of strings giving the remaining query\n segments to resolve.\n\n:rtype: 2-`tuple` of `IResource`, `IRenderable` or `URLPath` and\n a ``sequence`` of `bytes`\n:return: Pair of an `IResource`, `IRenderable` or `URLPath` and\n a sequence of the remaining path segments to be process, or\n a `Deferred` containing the aforementioned result.", "id": "f9829:c0:m0"} {"signature": "def MatchesException(exc_type, matcher):", "body": "return Raises(AfterPreprocessing(lambda x: x[],MatchesAll(IsInstance(exc_type), matcher)))", "docstring": "Match an exception type and a user-provided matcher against the exception\ninstance.", "id": "f9831:m0"} {"signature": "def renderRoute(resource, segments):", "body": "request = InMemoryRequest(segments)child = getChildForRequest(resource, request)request.render(child)return request", "docstring": "Locate and render a child resource.\n\n@type resource: `IResource`\n@param resource: Resource to locate the child resource on.\n\n@type segments: `list` of `bytes`\n@param segments: Path segments.\n\n@return: Request.", "id": "f9834:m0"} {"signature": "def _isSequenceTypeNotText(x):", "body": "return isSequenceType(x) and not isinstance(x, (bytes, unicode))", "docstring": "Is this a ``sequence`` type that isn't also a ``string`` type?", "id": "f9836:m0"} {"signature": "def one(func, n=):", "body": "def _one(result):if _isSequenceTypeNotText(result) and len(result) > n:return func(result[n])return Nonereturn maybe(_one)", "docstring": "Create a callable that applies ``func`` to a value in a sequence.\n\nIf the value is not a sequence or is an empty sequence then ``None`` is\nreturned.\n\n:type func: `callable`\n:param func: Callable to be applied to each result.\n\n:type n: `int`\n:param n: Index of the value to apply ``func`` to.", "id": "f9836:m1"} {"signature": "def many(func):", "body": "def _many(result):if _isSequenceTypeNotText(result):return map(func, result)return []return maybe(_many, default=[])", "docstring": "Create a callable that applies ``func`` to every value in a sequence.\n\nIf the value is not a sequence then an empty list is returned.\n\n:type func: `callable`\n:param func: Callable to be applied to the first result.", "id": "f9836:m2"} {"signature": "def Text(value, encoding=None):", "body": "if encoding is None:encoding = ''if isinstance(value, bytes):return value.decode(encoding)elif isinstance(value, unicode):return valuereturn None", "docstring": "Parse a value as text.\n\n:type value: `unicode` or `bytes`\n:param value: Text value to parse\n\n:type encoding: `bytes`\n:param encoding: Encoding to treat ``bytes`` values as, defaults to\n ``utf-8``.\n\n:rtype: `unicode`\n:return: Parsed text or ``None`` if ``value`` is neither `bytes` nor\n `unicode`.", "id": "f9836:m3"} {"signature": "def Integer(value, base=, encoding=None):", "body": "try:return int(Text(value, encoding), base)except (TypeError, ValueError):return None", "docstring": "Parse a value as an integer.\n\n:type value: `unicode` or `bytes`\n:param value: Text value to parse\n\n:type base: `unicode` or `bytes`\n:param base: Base to assume ``value`` is specified in.\n\n:type encoding: `bytes`\n:param encoding: Encoding to treat ``bytes`` values as, defaults to\n ``utf-8``.\n\n:rtype: `int`\n:return: Parsed integer or ``None`` if ``value`` could not be parsed as an\n integer.", "id": "f9836:m4"} {"signature": "def Float(value, encoding=None):", "body": "try:return float(Text(value, encoding))except (TypeError, ValueError):return None", "docstring": "Parse a value as a floating point number.\n\n:type value: `unicode` or `bytes`\n:param value: Text value to parse.\n\n:type encoding: `bytes`\n:param encoding: Encoding to treat `bytes` values as, defaults to\n ``utf-8``.\n\n:rtype: `float`\n:return: Parsed float or ``None`` if ``value`` could not be parsed as a\n float.", "id": "f9836:m5"} {"signature": "def Boolean(value, true=(u'', u'', u''), false=(u'', u'', u''),encoding=None):", "body": "value = Text(value, encoding)if value is not None:value = value.lower().strip()if value in true:return Trueelif value in false:return Falsereturn None", "docstring": "Parse a value as a boolean.\n\n:type value: `unicode` or `bytes`\n:param value: Text value to parse.\n\n:type true: `tuple` of `unicode`\n:param true: Values to compare, ignoring case, for ``True`` values.\n\n:type false: `tuple` of `unicode`\n:param false: Values to compare, ignoring case, for ``False`` values.\n\n:type encoding: `bytes`\n:param encoding: Encoding to treat `bytes` values as, defaults to\n ``utf-8``.\n\n:rtype: `bool`\n:return: Parsed boolean or ``None`` if ``value`` did not match ``true`` or\n ``false`` values.", "id": "f9836:m6"} {"signature": "def Delimited(value, parser=Text, delimiter=u'', encoding=None):", "body": "value = Text(value, encoding)if value is None or value == u'':return []return map(parser, value.split(delimiter))", "docstring": "Parse a value as a delimited list.\n\n:type value: `unicode` or `bytes`\n:param value: Text value to parse.\n\n:type parser: `callable` taking a `unicode` parameter\n:param parser: Callable to map over the delimited text values.\n\n:type delimiter: `unicode`\n:param delimiter: Delimiter text.\n\n:type encoding: `bytes`\n:param encoding: Encoding to treat `bytes` values as, defaults to\n ``utf-8``.\n\n:rtype: `list`\n:return: List of parsed values.", "id": "f9836:m7"} {"signature": "def Timestamp(value, _divisor=, tz=UTC, encoding=None):", "body": "value = Float(value, encoding)if value is not None:value = value / _divisorreturn datetime.fromtimestamp(value, tz)return None", "docstring": "Parse a value as a POSIX timestamp in seconds.\n\n:type value: `unicode` or `bytes`\n:param value: Text value to parse, which should be the number of seconds\n since the epoch.\n\n:type _divisor: `float`\n:param _divisor: Number to divide the value by.\n\n:type tz: `tzinfo`\n:param tz: Timezone, defaults to UTC.\n\n:type encoding: `bytes`\n:param encoding: Encoding to treat `bytes` values as, defaults to\n ``utf-8``.\n\n:rtype: `datetime.datetime`\n:return: Parsed datetime or ``None`` if ``value`` could not be parsed.", "id": "f9836:m8"} {"signature": "def TimestampMs(value, encoding=None):", "body": "return Timestamp(value, _divisor=, encoding=encoding)", "docstring": "Parse a value as a POSIX timestamp in milliseconds.\n\n:type value: `unicode` or `bytes`\n:param value: Text value to parse, which should be the number of\n milliseconds since the epoch.\n\n:type encoding: `bytes`\n:param encoding: Encoding to treat `bytes` values as, defaults to\n ``utf-8``.\n\n:rtype: `datetime.datetime`\n:return: Parsed datetime or ``None`` if ``value`` could not be parsed.", "id": "f9836:m9"} {"signature": "def parse(expected, query):", "body": "return dict((key, parser(query.get(key, [])))for key, parser in expected.items())", "docstring": "Parse query parameters.\n\n:type expected: `dict` mapping `bytes` to `callable`\n:param expected: Mapping of query argument names to argument parsing\n callables.\n\n:type query: `dict` mapping `bytes` to `list` of `bytes`\n:param query: Mapping of query argument names to lists of argument values,\n this is the form that Twisted Web's `IRequest.args\n ` value takes.\n\n:rtype: `dict` mapping `bytes` to `object`\n:return: Mapping of query argument names to parsed argument values.", "id": "f9836:m10"} {"signature": "def Text(name, encoding=None):", "body": "def _match(request, value):return name, query.Text(value,encoding=contentEncoding(request.requestHeaders, encoding))return _match", "docstring": "Match a route parameter.\n\n`Any` is a synonym for `Text`.\n\n:type name: `bytes`\n:param name: Route parameter name.\n\n:type encoding: `bytes`\n:param encoding: Default encoding to assume if the ``Content-Type``\n header is lacking one.\n\n:return: ``callable`` suitable for use with `route` or `subroute`.", "id": "f9838:m0"} {"signature": "def Integer(name, base=, encoding=None):", "body": "def _match(request, value):return name, query.Integer(value,base=base,encoding=contentEncoding(request.requestHeaders, encoding))return _match", "docstring": "Match an integer route parameter.\n\n:type name: `bytes`\n:param name: Route parameter name.\n\n:type base: `int`\n:param base: Base to interpret the value in.\n\n:type encoding: `bytes`\n:param encoding: Default encoding to assume if the ``Content-Type``\n header is lacking one.\n\n:return: ``callable`` suitable for use with `route` or `subroute`.", "id": "f9838:m1"} {"signature": "def _matchRoute(components, request, segments, partialMatching):", "body": "if len(components) == and isinstance(components[], bytes):components = components[]if components[:] == '':components = components[:]components = components.split('')results = OrderedDict()NO_MATCH = None, segmentsremaining = list(segments)if len(segments) == len(components) == :return results, remainingfor us, them in izip_longest(components, segments):if us is None:if partialMatching:breakelse:return NO_MATCHelif them is None:return NO_MATCHif callable(us):name, match = us(request, them)if match is None:return NO_MATCHresults[name] = matchelif us != them:return NO_MATCHremaining.pop()return results, remaining", "docstring": "Match a request path against our path components.\n\nThe path components are always matched relative to their parent is in the\nresource hierarchy, in other words it is only possible to match URIs nested\nmore deeply than the parent resource.\n\n:type components: ``iterable`` of `bytes` or `callable`\n:param components: Iterable of path components, to match against the\n request, either static strings or dynamic parameters. As a convenience,\n a single `bytes` component containing ``/`` may be given instead of\n manually separating the components. If no components are given the null\n route is matched, this is the case where ``segments`` is empty.\n\n:type segments: ``sequence`` of `bytes`\n:param segments: Sequence of path segments, from the request, to match\n against.\n\n:type partialMatching: `bool`\n:param partialMatching: Allow partial matching against the request path?\n\n:rtype: 2-`tuple` of `dict` keyed on `bytes` and `list` of `bytes`\n:return: Pair of parameter results, mapping parameter names to processed\n values, and a list of the remaining request path segments. If there is\n no route match the result will be ``None`` and the original request path\n segments.", "id": "f9838:m2"} {"signature": "def route(*components):", "body": "return partial(_matchRoute, components, partialMatching=False)", "docstring": "Match a request path exactly.\n\nThe path components are always matched relative to their parent is in the\nresource hierarchy, in other words it is only possible to match URIs nested\nmore deeply than the parent resource.\n\n:type components: ``iterable`` of `bytes` or `callable`\n:param components: Iterable of path components, to match against the\n request, either static strings or dynamic parameters. As a convenience,\n a single `bytes` component containing ``/`` may be given instead of\n manually separating the components. If no components are given the null\n route is matched, this is the case where ``segments`` is empty.\n\n:rtype: 2-`tuple` of `dict` keyed on `bytes` and `list` of `bytes`\n:return: Pair of parameter results, mapping parameter names to processed\n values, and a list of the remaining request path segments. If there is\n no route match the result will be ``None`` and the original request\n path segments.", "id": "f9838:m3"} {"signature": "def subroute(*components):", "body": "return partial(_matchRoute, components, partialMatching=True)", "docstring": "Partially match a request path exactly.\n\nThe path components are always matched relative to their parent is in the\nresource hierarchy, in other words it is only possible to match URIs nested\nmore deeply than the parent resource.\n\nIf there are more request path segments than components the match may still\nbe successful, the remaining path segments are returned in the second part\nof the result.\n\n:type components: ``iterable`` of `bytes` or `callable`\n:param components: Iterable of path components, to match against the\n request, either static strings or dynamic parameters. As a convenience,\n a single `bytes` component containing ``/`` may be given instead of\n manually separating the components. If no components are given the null\n route is matched, this is the case where ``segments`` is empty.\n\n:rtype: 2-`tuple` of `dict` keyed on `bytes` and `list` of `bytes`\n:return: Pair of parameter results, mapping parameter names to processed\n values, and a list of the remaining request path segments. If there is\n no route match the result will be ``None`` and the original request\n path segments.", "id": "f9838:m4"} {"signature": "def routedResource(f, routerAttribute=''):", "body": "return wraps(f)(lambda *a, **kw: getattr(f(*a, **kw), routerAttribute).resource())", "docstring": "Decorate a router-producing callable to instead produce a resource.\n\nThis simply produces a new callable that invokes the original callable, and\ncalls ``resource`` on the ``routerAttribute``.\n\nIf the router producer has multiple routers the attribute can be altered to\nchoose the appropriate one, for example:\n\n.. code-block:: python\n\n class _ComplexRouter(object):\n router = Router()\n privateRouter = Router()\n\n @router.route('/')\n def publicRoot(self, request, params):\n return SomethingPublic(...)\n\n @privateRouter.route('/')\n def privateRoot(self, request, params):\n return SomethingPrivate(...)\n\n PublicResource = routedResource(_ComplexRouter)\n PrivateResource = routedResource(_ComplexRouter, 'privateRouter')\n\n:type f: ``callable``\n:param f: Callable producing an object with a `Router` attribute, for\n example, a type.\n\n:type routerAttribute: `str`\n:param routerAttribute: Name of the `Router` attribute on the result of\n calling ``f``.\n\n:rtype: `callable`\n:return: Callable producing an `IResource`.", "id": "f9838:m5"} {"signature": "def __init__(self, obj, routes):", "body": "self._obj = objself._routes = routes", "docstring": ":param obj: Parent object containing the route handler.\n\n:type routes: `list` of 3-`tuple` containing `bytes`, `callable`,\n `callable`\n:param routes: List of 3-tuple containing the route handler name, the\n route handler function and the matcher function.", "id": "f9838:c0:m0"} {"signature": "def _matchRoute(self, request, segments):", "body": "for name, meth, route in self._routes:matches, remaining = route(request, segments)if matches is not None:return meth(self._obj, request, matches), remainingreturn None, segments", "docstring": "Find a route handler that matches the request path and invoke it.", "id": "f9838:c0:m1"} {"signature": "def _forObject(self, obj):", "body": "router = type(self)()router._routes = list(self._routes)router._self = objreturn router", "docstring": "Create a new `Router` instance, with it's own set of routes, for\n``obj``.", "id": "f9838:c1:m1"} {"signature": "def _addRoute(self, f, matcher):", "body": "self._routes.append((f.func_name, f, matcher))", "docstring": "Add a route handler and matcher to the collection of possible routes.", "id": "f9838:c1:m3"} {"signature": "def resource(self):", "body": "return SpinneretResource(_RouterResource(self._self, self._routes))", "docstring": "Create an `IResource ` that\nwill perform URL routing.", "id": "f9838:c1:m4"} {"signature": "def route(self, *components):", "body": "def _factory(f):self._addRoute(f, route(*components))return freturn _factory", "docstring": "See `txspinneret.route.route`.\n\nThis decorator can be stacked with itself to specify multiple routes\nwith a single handler.", "id": "f9838:c1:m5"} {"signature": "def subroute(self, *components):", "body": "def _factory(f):self._addRoute(f, subroute(*components))return freturn _factory", "docstring": "See `txspinneret.route.subroute`.\n\nThis decorator can be stacked with itself to specify multiple routes\nwith a single handler.", "id": "f9838:c1:m6"} {"signature": "def get_version():", "body": "version_module_path = os.path.join(os.path.dirname(__file__), \"\", \"\")with open(version_module_path) as version_module:exec(version_module.read())return locals()[\"\"]", "docstring": "Get the version from version module without importing more than\nnecessary.", "id": "f9839:m0"} {"signature": "def read(path):", "body": "with open(path) as f:return f.read()", "docstring": "Read the contents of a file.", "id": "f9839:m1"} {"signature": "def _tailCallback(f, uid):", "body": "def t(*args):raise _TailCall(f, args, uid)t.C = freturn t", "docstring": "This is the \"callable\" version of the continuation, which sould only\nbe accessible from the inside of the function to be continued. An\nattribute called \"C\" can be used in order to get back the public\nversion of the continuation (for passing the continuation to another\nfunction).", "id": "f9844:m0"} {"signature": "def with_continuations(**c):", "body": "if len(c): keys, k = zip(*c.items())else: keys, k = tuple([]), tuple([])def d(f):return C(lambda kself, *conts:lambda *args:f(*args, self=kself, **dict(zip(keys, conts)))) (*k)return d", "docstring": "A decorator for defining tail-call optimized functions.\n\nExample\n-------\n\n @with_continuations()\n def factorial(n, k, self=None):\n return self(n-1, k*n) if n > 1 else k\n\n @with_continuations()\n def identity(x, self=None):\n return x\n\n @with_continuations(out=identity)\n def factorial2(n, k, self=None, out=None):\n return self(n-1, k*n) if n > 1 else out(k)\n\n print(factorial(7,1))\n print(factorial2(7,1))", "id": "f9844:m1"} {"signature": "@http_connection()def make_get_request(url, params, headers, connection):", "body": "timeout = getattr(connection, '')response = connection.get(url, params=params, headers=headers, timeout=timeout)if response.ok or response.status_code == :return response.json() if response.content else Noneelse:response.raise_for_status()", "docstring": "Helper function that makes an HTTP GET request to the given firebase\nendpoint. Timeout is 60 seconds.\n`url`: The full URL of the firebase endpoint (DSN appended.)\n`params`: Python dict that is appended to the URL like a querystring.\n`headers`: Python dict. HTTP request headers.\n`connection`: Predefined HTTP connection instance. If not given, it\nis supplied by the `decorators.http_connection` function.\n\nThe returning value is a Python dict deserialized by the JSON decoder. However,\nif the status code is not 2x or 403, an requests.HTTPError is raised.\n\nconnection = connection_pool.get_available_connection()\nresponse = make_get_request('http://firebase.localhost/users', {'print': silent'},\n {'X_FIREBASE_SOMETHING': 'Hi'}, connection)\nresponse => {'1': 'John Doe', '2': 'Jane Doe'}", "id": "f9848:m0"} {"signature": "@http_connection()def make_put_request(url, data, params, headers, connection):", "body": "timeout = getattr(connection, '')response = connection.put(url, data=data, params=params, headers=headers,timeout=timeout)if response.ok or response.status_code == :return response.json() if response.content else Noneelse:response.raise_for_status()", "docstring": "Helper function that makes an HTTP PUT request to the given firebase\nendpoint. Timeout is 60 seconds.\n`url`: The full URL of the firebase endpoint (DSN appended.)\n`data`: JSON serializable dict that will be stored in the remote storage.\n`params`: Python dict that is appended to the URL like a querystring.\n`headers`: Python dict. HTTP request headers.\n`connection`: Predefined HTTP connection instance. If not given, it\nis supplied by the `decorators.http_connection` function.\n\nThe returning value is a Python dict deserialized by the JSON decoder. However,\nif the status code is not 2x or 403, an requests.HTTPError is raised.\n\nconnection = connection_pool.get_available_connection()\nresponse = make_put_request('http://firebase.localhost/users',\n '{\"1\": \"Ozgur Vatansever\"}',\n {'X_FIREBASE_SOMETHING': 'Hi'}, connection)\nresponse => {'1': 'Ozgur Vatansever'} or {'error': 'Permission denied.'}", "id": "f9848:m1"} {"signature": "@http_connection()def make_post_request(url, data, params, headers, connection):", "body": "timeout = getattr(connection, '')response = connection.post(url, data=data, params=params, headers=headers,timeout=timeout)if response.ok or response.status_code == :return response.json() if response.content else Noneelse:response.raise_for_status()", "docstring": "Helper function that makes an HTTP POST request to the given firebase\nendpoint. Timeout is 60 seconds.\n`url`: The full URL of the firebase endpoint (DSN appended.)\n`data`: JSON serializable dict that will be stored in the remote storage.\n`params`: Python dict that is appended to the URL like a querystring.\n`headers`: Python dict. HTTP request headers.\n`connection`: Predefined HTTP connection instance. If not given, it\nis supplied by the `decorators.http_connection` function.\n\nThe returning value is a Python dict deserialized by the JSON decoder. However,\nif the status code is not 2x or 403, an requests.HTTPError is raised.\n\nconnection = connection_pool.get_available_connection()\nresponse = make_put_request('http://firebase.localhost/users/',\n '{\"Ozgur Vatansever\"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)\nresponse => {u'name': u'-Inw6zol_2f5ThHwVcSe'} or {'error': 'Permission denied.'}", "id": "f9848:m2"} {"signature": "@http_connection()def make_patch_request(url, data, params, headers, connection):", "body": "timeout = getattr(connection, '')response = connection.patch(url, data=data, params=params, headers=headers,timeout=timeout)if response.ok or response.status_code == :return response.json() if response.content else Noneelse:response.raise_for_status()", "docstring": "Helper function that makes an HTTP PATCH request to the given firebase\nendpoint. Timeout is 60 seconds.\n`url`: The full URL of the firebase endpoint (DSN appended.)\n`data`: JSON serializable dict that will be stored in the remote storage.\n`params`: Python dict that is appended to the URL like a querystring.\n`headers`: Python dict. HTTP request headers.\n`connection`: Predefined HTTP connection instance. If not given, it\nis supplied by the `decorators.http_connection` function.\n\nThe returning value is a Python dict deserialized by the JSON decoder. However,\nif the status code is not 2x or 403, an requests.HTTPError is raised.\n\nconnection = connection_pool.get_available_connection()\nresponse = make_put_request('http://firebase.localhost/users/1',\n '{\"Ozgur Vatansever\"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)\nresponse => {'Ozgur Vatansever'} or {'error': 'Permission denied.'}", "id": "f9848:m3"} {"signature": "@http_connection()def make_delete_request(url, params, headers, connection):", "body": "timeout = getattr(connection, '')response = connection.delete(url, params=params, headers=headers, timeout=timeout)if response.ok or response.status_code == :return response.json() if response.content else Noneelse:response.raise_for_status()", "docstring": "Helper function that makes an HTTP DELETE request to the given firebase\nendpoint. Timeout is 60 seconds.\n`url`: The full URL of the firebase endpoint (DSN appended.)\n`params`: Python dict that is appended to the URL like a querystring.\n`headers`: Python dict. HTTP request headers.\n`connection`: Predefined HTTP connection instance. If not given, it\nis supplied by the `decorators.http_connection` function.\n\nThe returning value is NULL. However, if the status code is not 2x or 403,\nan requests.HTTPError is raised.\n\nconnection = connection_pool.get_available_connection()\nresponse = make_put_request('http://firebase.localhost/users/1',\n {'X_FIREBASE_SOMETHING': 'Hi'}, connection)\nresponse => NULL or {'error': 'Permission denied.'}", "id": "f9848:m4"} {"signature": "def get_user(self):", "body": "token = self.authenticator.create_token(self.extra)user_id = self.extra.get('')return FirebaseUser(self.email, token, self.provider, user_id)", "docstring": "Method that gets the authenticated user. The returning user has\nthe token, email and the provider data.", "id": "f9848:c1:m1"} {"signature": "def _build_endpoint_url(self, url, name=None):", "body": "if not url.endswith(self.URL_SEPERATOR):url = url + self.URL_SEPERATORif name is None:name = ''return '' % (urlparse.urljoin(self.dsn, url), name,self.NAME_EXTENSION)", "docstring": "Method that constructs a full url with the given url and the\nsnapshot name.\n\nExample:\nfull_url = _build_endpoint_url('/users', '1')\nfull_url => 'http://firebase.localhost/users/1.json'", "id": "f9848:c2:m1"} {"signature": "def _authenticate(self, params, headers):", "body": "if self.authentication:user = self.authentication.get_user()params.update({'': user.firebase_auth_token})headers.update(self.authentication.authenticator.HEADERS)", "docstring": "Method that simply adjusts authentication credentials for the\nrequest.\n`params` is the querystring of the request.\n`headers` is the header of the request.\n\nIf auth instance is not provided to this class, this method simply\nreturns without doing anything.", "id": "f9848:c2:m2"} {"signature": "@http_connection()def get(self, url, name, params=None, headers=None, connection=None):", "body": "if name is None: name = ''params = params or {}headers = headers or {}endpoint = self._build_endpoint_url(url, name)self._authenticate(params, headers)return make_get_request(endpoint, params, headers, connection=connection)", "docstring": "Synchronous GET request.", "id": "f9848:c2:m3"} {"signature": "def get_async(self, url, name, callback=None, params=None, headers=None):", "body": "if name is None: name = ''params = params or {}headers = headers or {}endpoint = self._build_endpoint_url(url, name)self._authenticate(params, headers)process_pool.apply_async(make_get_request,args=(endpoint, params, headers), callback=callback)", "docstring": "Asynchronous GET request with the process pool.", "id": "f9848:c2:m4"} {"signature": "@http_connection()def put(self, url, name, data, params=None, headers=None, connection=None):", "body": "assert name, ''params = params or {}headers = headers or {}endpoint = self._build_endpoint_url(url, name)self._authenticate(params, headers)data = json.dumps(data, cls=JSONEncoder)return make_put_request(endpoint, data, params, headers,connection=connection)", "docstring": "Synchronous PUT request. There will be no returning output from\nthe server, because the request will be made with ``silent``\nparameter. ``data`` must be a JSONable value.", "id": "f9848:c2:m5"} {"signature": "def put_async(self, url, name, data, callback=None, params=None, headers=None):", "body": "if name is None: name = ''params = params or {}headers = headers or {}endpoint = self._build_endpoint_url(url, name)self._authenticate(params, headers)data = json.dumps(data, cls=JSONEncoder)process_pool.apply_async(make_put_request,args=(endpoint, data, params, headers),callback=callback)", "docstring": "Asynchronous PUT request with the process pool.", "id": "f9848:c2:m6"} {"signature": "@http_connection()def post(self, url, data, params=None, headers=None, connection=None):", "body": "params = params or {}headers = headers or {}endpoint = self._build_endpoint_url(url, None)self._authenticate(params, headers)data = json.dumps(data, cls=JSONEncoder)return make_post_request(endpoint, data, params, headers,connection=connection)", "docstring": "Synchronous POST request. ``data`` must be a JSONable value.", "id": "f9848:c2:m7"} {"signature": "def post_async(self, url, data, callback=None, params=None, headers=None):", "body": "params = params or {}headers = headers or {}endpoint = self._build_endpoint_url(url, None)self._authenticate(params, headers)data = json.dumps(data, cls=JSONEncoder)process_pool.apply_async(make_post_request,args=(endpoint, data, params, headers),callback=callback)", "docstring": "Asynchronous POST request with the process pool.", "id": "f9848:c2:m8"} {"signature": "@http_connection()def patch(self, url, data, params=None, headers=None, connection=None):", "body": "params = params or {}headers = headers or {}endpoint = self._build_endpoint_url(url, None)self._authenticate(params, headers)data = json.dumps(data, cls=JSONEncoder)return make_patch_request(endpoint, data, params, headers,connection=connection)", "docstring": "Synchronous POST request. ``data`` must be a JSONable value.", "id": "f9848:c2:m9"} {"signature": "def patch_async(self, url, data, callback=None, params=None, headers=None):", "body": "params = params or {}headers = headers or {}endpoint = self._build_endpoint_url(url, None)self._authenticate(params, headers)data = json.dumps(data, cls=JSONEncoder)process_pool.apply_async(make_patch_request,args=(endpoint, data, params, headers),callback=callback)", "docstring": "Asynchronous PATCH request with the process pool.", "id": "f9848:c2:m10"} {"signature": "@http_connection()def delete(self, url, name, params=None, headers=None, connection=None):", "body": "if not name: name = ''params = params or {}headers = headers or {}endpoint = self._build_endpoint_url(url, name)self._authenticate(params, headers)return make_delete_request(endpoint, params, headers, connection=connection)", "docstring": "Synchronous DELETE request. ``data`` must be a JSONable value.", "id": "f9848:c2:m11"} {"signature": "def delete_async(self, url, name, callback=None, params=None, headers=None):", "body": "if not name: name = ''params = params or {}headers = headers or {}endpoint = self._build_endpoint_url(url, name)self._authenticate(params, headers)process_pool.apply_async(make_delete_request,args=(endpoint, params, headers), callback=callback)", "docstring": "Asynchronous DELETE request with the process pool.", "id": "f9848:c2:m12"} {"signature": "def create_token(self, data, options=None):", "body": "if not options:options = {}options.update({'': self.admin, '': self.debug})claims = self._create_options_claims(options)claims[''] = self.TOKEN_VERSIONclaims[''] = int(time.mktime(time.gmtime()))claims[''] = datareturn self._encode_token(self.secret, claims)", "docstring": "Generates a secure authentication token.\n\nOur token format follows the JSON Web Token (JWT) standard:\nheader.claims.signature\n\nWhere:\n1) 'header' is a stringified, base64-encoded JSON object containing version and algorithm information.\n2) 'claims' is a stringified, base64-encoded JSON object containing a set of claims:\n Library-generated claims:\n 'iat' -> The issued at time in seconds since the epoch as a number\n 'd' -> The arbitrary JSON object supplied by the user.\n User-supplied claims (these are all optional):\n 'exp' (optional) -> The expiration time of this token, as a number of seconds since the epoch.\n 'nbf' (optional) -> The 'not before' time before which the token should be rejected (seconds since the epoch)\n 'admin' (optional) -> If set to true, this client will bypass all security rules (use this to authenticate servers)\n 'debug' (optional) -> 'set to true to make this client receive debug information about security rule execution.\n 'simulate' (optional, internal-only for now) -> Set to true to neuter all API operations (listens / puts\n will run security rules but not actually write or return data).\n3) A signature that proves the validity of this token (see: http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-07)\n\nFor base64-encoding we use URL-safe base64 encoding. This ensures that the entire token is URL-safe\nand could, for instance, be placed as a query argument without any encoding (and this is what the JWT spec requires).\n\nArgs:\n data - a json serializable object of data to be included in the token\n options - An optional dictionary of additional claims for the token. Possible keys include:\n a) 'expires' -- A timestamp (as a number of seconds since the epoch) denoting a time after which\n this token should no longer be valid.\n b) 'notBefore' -- A timestamp (as a number of seconds since the epoch) denoting a time before\n which this token should be rejected by the server.\n c) 'admin' -- Set to true to bypass all security rules (use this for your trusted servers).\n d) 'debug' -- Set to true to enable debug mode (so you can see the results of Rules API operations)\n e) 'simulate' -- (internal-only for now) Set to true to neuter all API operations (listens / puts\n will run security rules but not actually write or return data)\nReturns:\n A signed Firebase Authentication Token\nRaises:\n ValueError: if an invalid key is specified in options", "id": "f9849:c0:m1"} {"signature": "@atexit.registerdef close_process_pool():", "body": "process_pool.close()process_pool.join()process_pool.terminate()", "docstring": "Clean up function that closes and terminates the process pool\ndefined in the ``async`` file.", "id": "f9850:m0"} {"signature": "@classmethoddef _create_class_proxy(cls, theclass):", "body": "def make_method(name):def method(self, *args, **kw):return getattr(object.__getattribute__(self, \"\")(), name)(*args, **kw)return methodnamespace = {}for name in cls._special_names:if hasattr(theclass, name):namespace[name] = make_method(name)return type(\"\" % (cls.__name__, theclass.__name__), (cls,), namespace)", "docstring": "creates a proxy for the given class", "id": "f9853:c0:m9"} {"signature": "def __new__(cls, obj, *args, **kwargs):", "body": "try:cache = cls.__dict__[\"\"]except KeyError:cls._class_proxy_cache = cache = {}try:theclass = cache[obj.__class__]except KeyError:cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)ins = object.__new__(theclass)theclass.__init__(ins, obj, *args, **kwargs)return ins", "docstring": "creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are\npassed to this class' __init__, so deriving classes can define an\n__init__ method of their own.\nnote: _class_proxy_cache is unique per deriving class (each deriving\nclass must hold its own cache)", "id": "f9853:c0:m10"} {"signature": "def http_connection(timeout):", "body": "def wrapper(f):def wrapped(*args, **kwargs):if not ('' in kwargs) or not kwargs['']:connection = requests.Session()kwargs[''] = connectionelse:connection = kwargs['']if not getattr(connection, '', False):connection.timeout = timeoutconnection.headers.update({'': ''})return f(*args, **kwargs)return wraps(f)(wrapped)return wrapper", "docstring": "Decorator function that injects a requests.Session instance into\nthe decorated function's actual parameters if not given.", "id": "f9854:m0"} {"signature": "def get():", "body": "config = {}try:config = _load_config()except IOError:try:_create_default_config()config = _load_config()except IOError as e:raise ConfigError(_FILE_CREATION_ERROR.format(e.args[]))except SyntaxError as e:raise ConfigError(_JSON_SYNTAX_ERROR.format(e.args[]))except Exception:raise ConfigError(_JSON_SYNTAX_ERROR.format(''))try:_validate(config)except KeyError as e:raise ConfigError(_MANDATORY_KEY_ERROR.format(e.args[]))except SyntaxError as e:raise ConfigError(_INVALID_KEY_ERROR.format(e.args[]))except ValueError as e:raise ConfigError(_INVALID_VALUE_ERROR.format(e.args[]))config[''] = os.path.expanduser(config[''])_complete_config(config)return config", "docstring": "Only API function for the config module.\n\n :return: {dict} loaded validated configuration.", "id": "f9873:m0"} {"signature": "def _load_config():", "body": "config_path = _get_config_path()with open(config_path, '') as f:return yaml.safe_load(f)", "docstring": "Config loading\n Raises:\n IOError on missing config file\n SyntaxError on invalid json syntax\n :return: {dict} loaded but unvalidated config", "id": "f9873:m3"} {"signature": "def _validate(config):", "body": "for mandatory_key in _mandatory_keys:if mandatory_key not in config:raise KeyError(mandatory_key)for key in config.keys():if key not in _mandatory_keys and key not in _optional_keys:raise SyntaxError(key)if not isinstance(config[key], _default_config[key].__class__):raise ValueError(key)", "docstring": "Config validation\n Raises:\n KeyError on missing mandatory key\n SyntaxError on invalid key\n ValueError on invalid value for key\n :param config: {dict} config to validate\n :return: None", "id": "f9873:m4"} {"signature": "def _create_default_config():", "body": "config_path = _get_config_path()with open(config_path, '') as f:yaml.dump(_default_config, f, default_flow_style=False)", "docstring": "Writes the full default configuration to the appropriate place.\n Raises:\n IOError - on unsuccesful file write\n :return: None", "id": "f9873:m5"} {"signature": "def get_data_for_root(project_root):", "body": "raw_nodes = file_handler.get_node_list(project_root)command_tree = command_processor.generate_command_tree(raw_nodes)command_processor.flatten_commands(command_tree)command_processor.process_variables(command_tree)return command_tree", "docstring": "This is the only API function of the projectfile module. It parses the Projectfiles\n from the given path and assembles the flattened command data structure.\n\n Returned data: {\n 'min-version': (1, 0, 0),\n 'description': 'Optional main description.',\n 'commands': {\n 'command_1': {\n 'description': 'Optional command level description for command_1.',\n 'script': [\n 'flattened',\n 'out command',\n 'list for',\n 'command_1',\n ...\n ]\n }\n ...\n }\n }\n\n Raises:\n ProjectfileError with descriptive error message in the format of:\n {\n 'path': 'Optional path for the corresponding Projectfile.',\n 'line': 'Optional line number for the error in the Projectfile.',\n 'error': 'Mandatory descriptive error message.'\n }\n\n\n :param project_root:\n :return: {dict} parsed and flattened commands with descriptions", "id": "f9875:m0"} {"signature": "def _data_integrity_check(data):", "body": "deps = []for command in data['']:if '' in data[''][command]:for d in data[''][command]['']:deps.append({'': d,'': command})for d in deps:if d[''] not in data['']:raise error.ProjectfileError({'': error.PROJECTFILE_INVALID_DEPENDENCY.format(d[''], d[''])})", "docstring": "Checks if all command dependencies refers to and existing command. If not, a ProjectfileError\n will be raised with the problematic dependency and it's command.\n\n :param data: parsed raw data set.\n :return: None", "id": "f9880:m3"} {"signature": "def __init__(self, config_path, prog_version=None,use_base_defaults=False, defaults=None):", "body": "self.config_path = config_pathself.config = self.read_config()self.defaults = defaults if defaultselse self.defaults if use_base_defaults else Noneself.prog_version = prog_version if prog_version else self.config.get(\"\", {}).get(\"\")", "docstring": ":param config_path:\n:param use_base_defaults:\n:param defaults:", "id": "f9890:c0:m0"} {"signature": "def apply_defaults(self, commands):", "body": "for command in commands:if '' in command and \"\" in command['']:command[''] = eval(\"\".format(command['']))if command[''][].startswith(''):if '' not in command:command[''] = False", "docstring": "apply default settings to commands\n not static, shadow \"self\" in eval", "id": "f9890:c0:m2"} {"signature": "def create_commands(self, commands, parser):", "body": "self.apply_defaults(commands)def create_single_command(command):keys = command['']del command['']kwargs = {}for item in command:kwargs[item] = command[item]parser.add_argument(*keys, **kwargs)if len(commands) > :for command in commands:create_single_command(command)else:create_single_command(commands[])", "docstring": "add commands to parser", "id": "f9890:c0:m3"} {"signature": "def create_subparsers(self, parser):", "body": "subparsers = parser.add_subparsers()for name in self.config['']:subparser = subparsers.add_parser(name)self.create_commands(self.config[''][name], subparser)", "docstring": "get config for subparser and create commands", "id": "f9890:c0:m4"} {"signature": "def show_version(self):", "body": "class ShowVersionAction(argparse.Action):def __init__(inner_self, nargs=, **kw):super(ShowVersionAction, inner_self).__init__(nargs=nargs, **kw)def __call__(inner_self, parser, args, value, option_string=None):print(\"\".format(parser_name=self.config.get(\"\", {}).get(\"\"),version=self.prog_version))return ShowVersionAction", "docstring": "custom command line action to show version", "id": "f9890:c0:m7"} {"signature": "def check_path_action(self):", "body": "class CheckPathAction(argparse.Action):def __call__(self, parser, args, value, option_string=None):if type(value) is list:value = value[]user_value = valueif option_string == '':if not os.path.isdir(value):_current_user = os.path.expanduser(\"\")if not value.startswith(_current_user)and not value.startswith(os.getcwd()):if os.path.isdir(os.path.join(_current_user, value)):value = os.path.join(_current_user, value)elif os.path.isdir(os.path.join(os.getcwd(), value)):value = os.path.join(os.getcwd(), value)else:value = Noneelse:value = Noneelif option_string == '':if not os.path.isdir(value):if not os.path.isdir(os.path.join(args.target, value)):value = Noneif not value:logger.error(\"\"\"\",user_value, option_string)exit()setattr(args, self.dest, value)return CheckPathAction", "docstring": "custom command line action to check file exist", "id": "f9890:c0:m8"} {"signature": "def status_printer():", "body": "last_len = []def p(s):s = next(spinner) + '' + slen_s = len(s)output = '' + s + ('' * max(last_len[] - len_s, ))sys.stdout.write(output)sys.stdout.flush()last_len[] = len_sreturn p", "docstring": "Manage the printing and in-place updating of a line of characters\n\n .. note::\n If the string is longer than a line, then in-place updating may not\n work (it will print a new line at each refresh).", "id": "f9897:m1"} {"signature": "def get_or_guess_paths_to_mutate(paths_to_mutate):", "body": "if paths_to_mutate is None:this_dir = os.getcwd().split(os.sep)[-]if isdir(''):return ''elif isdir(''):return ''elif isdir(this_dir):return this_direlif isdir(this_dir.replace('', '')):return this_dir.replace('', '')elif isdir(this_dir.replace('', '')):return this_dir.replace('', '')elif isdir(this_dir.replace('', '')):return this_dir.replace('', '')elif isdir(this_dir.replace('', '')):return this_dir.replace('', '')else:raise FileNotFoundError('''''')else:return paths_to_mutate", "docstring": ":type paths_to_mutate: str or None\n:rtype: str", "id": "f9897:m2"} {"signature": "def do_apply(mutation_pk, dict_synonyms, backup):", "body": "filename, mutation_id = filename_and_mutation_id_from_pk(int(mutation_pk))update_line_numbers(filename)context = Context(mutation_id=mutation_id,filename=filename,dict_synonyms=dict_synonyms,)mutate_file(backup=backup,context=context,)if context.number_of_performed_mutations == :raise RuntimeError('')", "docstring": "Apply a specified mutant to the source code\n\n :param mutation_pk: mutmut cache primary key of the mutant to apply\n :type mutation_pk: str\n\n :param dict_synonyms: list of synonym keywords for a python dictionary\n :type dict_synonyms: list[str]\n\n :param backup: if :obj:`True` create a backup of the source file\n before applying the mutation\n :type backup: bool", "id": "f9897:m3"} {"signature": "def popen_streaming_output(cmd, callback, timeout=None):", "body": "if os.name == '': process = subprocess.Popen(shlex.split(cmd),stdout=subprocess.PIPE,stderr=subprocess.PIPE)stdout = process.stdoutelse:master, slave = os.openpty()process = subprocess.Popen(shlex.split(cmd, posix=True),stdout=slave,stderr=slave)stdout = os.fdopen(master)os.close(slave)def kill(process_):\"\"\"\"\"\"try:process_.kill()except OSError:passtimer = Timer(timeout, kill, [process])timer.setDaemon(True)timer.start()while process.returncode is None:try:if os.name == '': line = stdout.readline()line = line.decode(\"\")if line: callback(line.rstrip())else:while True:line = stdout.readline()if not line:breakcallback(line.rstrip())except (IOError, OSError):passif not timer.is_alive():raise TimeoutError(\"\".format(cmd, timeout))process.poll()timer.cancel()return process.returncode", "docstring": "Open a subprocess and stream its output without hard-blocking.\n\n :param cmd: the command to execute within the subprocess\n :type cmd: str\n\n :param callback: function that intakes the subprocess' stdout line by line.\n It is called for each line received from the subprocess' stdout stream.\n :type callback: Callable[[Context], bool]\n\n :param timeout: the timeout time of the subprocess\n :type timeout: float\n\n :raises TimeoutError: if the subprocess' execution time exceeds\n the timeout time\n\n :return: the return code of the executed subprocess\n :rtype: int", "id": "f9897:m6"} {"signature": "def run_mutation(config, filename, mutation_id):", "body": "context = Context(mutation_id=mutation_id,filename=filename,exclude=config.exclude_callback,dict_synonyms=config.dict_synonyms,config=config,)cached_status = cached_mutation_status(filename, mutation_id, config.hash_of_tests)if cached_status == BAD_SURVIVED:config.surviving_mutants += elif cached_status == BAD_TIMEOUT:config.surviving_mutants_timeout += elif cached_status == OK_KILLED:config.killed_mutants += elif cached_status == OK_SUSPICIOUS:config.suspicious_mutants += else:assert cached_status == UNTESTED, cached_statusconfig.print_progress()if cached_status != UNTESTED:return cached_statusif config.pre_mutation:result = subprocess.check_output(config.pre_mutation, shell=True).decode().strip()if result:print(result)try:number_of_mutations_performed = mutate_file(backup=True,context=context)assert number_of_mutations_performedstart = time()try:survived = tests_pass(config)except TimeoutError:context.config.surviving_mutants_timeout += return BAD_TIMEOUTtime_elapsed = time() - startif time_elapsed > config.test_time_base + (config.baseline_time_elapsed * config.test_time_multipler):config.suspicious_mutants += return OK_SUSPICIOUSif survived:context.config.surviving_mutants += return BAD_SURVIVEDelse:context.config.killed_mutants += return OK_KILLEDfinally:move(filename + '', filename)if config.post_mutation:result = subprocess.check_output(config.post_mutation, shell=True).decode().strip()if result:print(result)", "docstring": ":type config: Config\n:type filename: str\n:type mutation_id: MutationID\n:return: (computed or cached) status of the tested mutant\n:rtype: str", "id": "f9897:m8"} {"signature": "def read_coverage_data():", "body": "print('')from coverage import Coveragecov = Coverage('')cov.load()return cov.get_data()", "docstring": ":rtype: CoverageData or None", "id": "f9897:m11"} {"signature": "def add_mutations_by_file(mutations_by_file, filename, exclude, dict_synonyms):", "body": "with open(filename) as f:source = f.read()context = Context(source=source,filename=filename,exclude=exclude,dict_synonyms=dict_synonyms,)try:mutations_by_file[filename] = list_mutations(context)register_mutants(mutations_by_file)except Exception as e:raise RuntimeError('' % (context.filename, context.current_source_line), e)", "docstring": ":type mutations_by_file: dict[str, list[MutationID]]\n:type filename: str\n:type exclude: Callable[[Context], bool]\n:type dict_synonyms: list[str]", "id": "f9897:m14"} {"signature": "def compute_exit_code(config, exception=None):", "body": "code = if exception is not None:code = code | if config.surviving_mutants > :code = code | if config.surviving_mutants_timeout > :code = code | if config.suspicious_mutants > :code = code | return code", "docstring": "Compute an exit code for mutmut mutation testing\n\n The following exit codes are available for mutmut:\n * 0 if all mutants were killed (OK_KILLED)\n * 1 if a fatal error occurred\n * 2 if one or more mutants survived (BAD_SURVIVED)\n * 4 if one or more mutants timed out (BAD_TIMEOUT)\n * 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)\n\n Exit codes 1 to 8 will be bit-ORed so that it is possible to know what\n different mutant statuses occurred during mutation testing.\n\n :param exception:\n :type exception: Exception\n :param config:\n :type config: Config\n\n :return: integer noting the exit code of the mutation tests.\n :rtype: int", "id": "f9897:m16"} {"signature": "def argument_mutation(children, context, **_):", "body": "if len(context.stack) >= and context.stack[-].type in ('', ''):stack_pos_of_power_node = -elif len(context.stack) >= and context.stack[-].type in ('', ''):stack_pos_of_power_node = -else:returnpower_node = context.stack[stack_pos_of_power_node]if power_node.children[].type == '' and power_node.children[].value in context.dict_synonyms:c = children[]if c.type == '':children = children[:]children[] = Name(c.value + '', start_pos=c.start_pos, prefix=c.prefix)return children", "docstring": ":type context: Context", "id": "f9898:m5"} {"signature": "def mutate(context):", "body": "try:result = parse(context.source, error_recovery=False)except Exception:print('' % context.filename)print('')raisemutate_list_of_nodes(result, context=context)mutated_source = result.get_code().replace('', '')if context.remove_newline_at_end:assert mutated_source[-] == ''mutated_source = mutated_source[:-]if context.number_of_performed_mutations:assert context.source != mutated_sourcecontext.mutated_source = mutated_sourcereturn mutated_source, context.number_of_performed_mutations", "docstring": ":type context: Context\n:return: tuple: mutated source code, number of mutations performed\n:rtype: tuple[str, int]", "id": "f9898:m12"} {"signature": "def mutate_node(node, context):", "body": "context.stack.append(node)try:if node.type in ('', '', ''):returnif node.start_pos[] - != context.current_line_index:context.current_line_index = node.start_pos[] - context.index = if hasattr(node, ''):mutate_list_of_nodes(node, context=context)if context.number_of_performed_mutations and context.mutation_id != ALL:returnmutation = mutations_by_type.get(node.type)if mutation is None:returnfor key, value in sorted(mutation.items()):old = getattr(node, key)if context.exclude_line():continuenew = evaluate(value,context=context,node=node,value=getattr(node, '', None),children=getattr(node, '', None),)assert not callable(new)if new is not None and new != old:if context.should_mutate():context.number_of_performed_mutations += context.performed_mutation_ids.append(context.mutation_id_of_current_index)setattr(node, key, new)context.index += if context.number_of_performed_mutations and context.mutation_id != ALL:returnfinally:context.stack.pop()", "docstring": ":type context: Context", "id": "f9898:m13"} {"signature": "def mutate_list_of_nodes(node, context):", "body": "return_annotation_started = Falsefor child_node in node.children:if child_node.type == '' and child_node.value == '':return_annotation_started = Trueif return_annotation_started and child_node.type == '' and child_node.value == '':return_annotation_started = Falseif return_annotation_started:continuemutate_node(child_node, context=context)if context.number_of_performed_mutations and context.mutation_id != ALL:return", "docstring": ":type context: Context", "id": "f9898:m14"} {"signature": "def count_mutations(context):", "body": "assert context.mutation_id == ALLmutate(context)return context.number_of_performed_mutations", "docstring": ":type context: Context", "id": "f9898:m15"} {"signature": "def list_mutations(context):", "body": "assert context.mutation_id == ALLmutate(context)return context.performed_mutation_ids", "docstring": ":type context: Context", "id": "f9898:m16"} {"signature": "def mutate_file(backup, context):", "body": "with open(context.filename) as f:code = f.read()context.source = codeif backup:with open(context.filename + '', '') as f:f.write(code)result, number_of_mutations_performed = mutate(context)with open(context.filename, '') as f:f.write(result)return number_of_mutations_performed", "docstring": ":type backup: bool\n:type context: Context", "id": "f9898:m17"} {"signature": "@patch(\"\")def server(self, args, run=None):", "body": "args = [sys.argv[]] + argsresult = StringIO()with patch(\"\", result):with mock.patch('', args):nemo, app = Server.cmd()result_string = result.getvalue()return nemo, app, result_string, run", "docstring": "Run args (Splitted list of command line)\n ..note:: See https://wrongsideofmemphis.wordpress.com/2010/03/01/store-standard-output-on-a-variable-in-python/\n :param args: List of commandline arguments\n :return: Sys stdout, status", "id": "f9921:c0:m0"} {"signature": "def resolve(self, uri):", "body": "for r in self.__retrievers__:if r.match(uri):return rraise UnresolvableURIError()", "docstring": "Resolve a Resource identified by URI\n :param uri: The URI of the resource to be resolved\n :type uri: str\n :return: the contents of the resource as a string\n :rtype: str", "id": "f9924:c1:m1"} {"signature": "def match(self, uri):", "body": "return False", "docstring": "Check to see if this URI is retrievable by this Retriever implementation\n :param uri: the URI of the resource to be retrieved\n :type uri: str\n :return: True if it can be, False if not\n :rtype: bool", "id": "f9924:c2:m0"} {"signature": "def read(self, uri):", "body": "return None, \"\"", "docstring": "Retrieve the contents of the resource\n :param uri: the URI of the resource to be retrieved\n :type uri: str\n :return: the contents of the resource and it's mime type in a tuple\n :rtype: str, str", "id": "f9924:c2:m1"} {"signature": "def match(self, uri):", "body": "return HTTPRetriever.__reg_exp__.match(uri) is not None", "docstring": "Check to see if this URI is retrievable by this Retriever implementation\n\n :param uri: the URI of the resource to be retrieved\n :type uri: str\n :return: True if it can be, False if not\n :rtype: bool", "id": "f9924:c3:m0"} {"signature": "def read(self, uri):", "body": "req = request(\"\", uri)return req.content, req.headers['']", "docstring": "Retrieve the contents of the resource\n\n :param uri: the URI of the resource to be retrieved\n :type uri: str\n :return: the contents of the resource\n :rtype: str", "id": "f9924:c3:m1"} {"signature": "def __absolute__(self, uri):", "body": "return op.abspath(op.join(self.__path__, uri))", "docstring": "Get the absolute uri for a file\n\n :param uri: URI of the resource to be retrieved\n :return: Absolute Path", "id": "f9924:c4:m1"} {"signature": "def match(self, uri):", "body": "absolute_uri = self.__absolute__(uri)return absolute_uri.startswith(self.__path__) and op.exists(absolute_uri)", "docstring": "Check to see if this URI is retrievable by this Retriever implementation\n\n :param uri: the URI of the resource to be retrieved\n :type uri: str\n :return: True if it can be, False if not\n :rtype: bool", "id": "f9924:c4:m2"} {"signature": "def read(self, uri):", "body": "uri = self.__absolute__(uri)mime, _ = guess_type(uri)if \"\" in mime:return send_file(uri), mimeelse:with open(uri, \"\") as f:file = f.read()return file, mime", "docstring": "Retrieve the contents of the resource\n\n :param uri: the URI of the resource to be retrieved\n :type uri: str\n :return: the contents of the resource\n :rtype: str", "id": "f9924:c4:m3"} {"signature": "@staticmethoddef match(uri):", "body": "return CTSRetriever.__reg_exp__.match(uri) is not None", "docstring": "Check to see if this URI is retrievable by this Retriever implementation\n\n :param uri: the URI of the resource to be retrieved\n :type uri: str\n :return: True if it can be, False if not\n :rtype: bool", "id": "f9924:c5:m1"} {"signature": "def read(self, uri):", "body": "return self.__resolver__.getTextualNode(uri).export(Mimetypes.XML.TEI), \"\"", "docstring": "Retrieve the contents of the resource\n\n :param uri: the URI of the resource to be retrieved\n :type uri: str\n :return: the contents of the resource\n :rtype: str", "id": "f9924:c5:m2"} {"signature": "def process(self, nemo):", "body": "self.__nemo__ = nemofor annotation in self.__annotations__:annotation.target.expanded = frozenset(self.__getinnerreffs__(objectId=annotation.target.objectId,subreference=annotation.target.subreference))", "docstring": "Register nemo and parses annotations\n\n .. note:: Process parses the annotation and extends informations about the target URNs by retrieving resource in range\n\n :param nemo: Nemo", "id": "f9925:c0:m2"} {"signature": "def __get_resource_metadata__(self, objectId):", "body": "return self.textResolver.getMetadata(objectId)", "docstring": "Return a metadata text object\n\n :param objectId: objectId of the text\n :return: Text", "id": "f9925:c0:m3"} {"signature": "def __getinnerreffs__(self, objectId, subreference):", "body": "level = yield subreferencewhile level > -:reffs = self.__nemo__.resolver.getReffs(objectId,subreference=subreference,level=level)if len(reffs) == :breakelse:for r in reffs:yield rlevel += ", "docstring": "Resolve the list of urns between in a range\n\n :param text_metadata: Resource Metadata\n :param objectId: ID of the Text\n :type objectId: str\n :param subreference: ID of the Text\n :type subreference: str\n :return: References in the span", "id": "f9925:c0:m7"} {"signature": "def to_json(self):", "body": "if self.subreference is not None:return {\"\": self.objectId,\"\": {\"\": \"\",\"\": \"\",\"\": self.subreference}}else:return {\"\": self.objectId}", "docstring": "Method to call to get a serializable object for json.dump or jsonify based on the target\n\n :return: dict", "id": "f9926:c0:m3"} {"signature": "def read(self):", "body": "if not self.__content__:self.__retriever__ = self.__resolver__.resolve(self.uri)self.__content__, self.__mimetype__ = self.__retriever__.read(self.uri)return self.__content__", "docstring": "Read the contents of the Annotation Resource\n\n :return: the contents of the resource\n :rtype: str or bytes or flask.response", "id": "f9926:c1:m8"} {"signature": "def expand(self):", "body": "return []", "docstring": "Expand the contents of the Annotation if it is expandable (i.e. if it references multiple resources)\n\n :return: the list of expanded resources\n :rtype: list(AnnotationResource)", "id": "f9926:c1:m9"} {"signature": "def getAnnotations(self, targets, wildcard=\"\", include=None, exclude=None, limit=None, start=, expand=False,**kwargs):", "body": "return , []", "docstring": "Retrieve annotations from the query provider\n\n :param targets: The CTS URN(s) to query as the target of annotations\n :type targets: [MyCapytain.common.reference.URN], URN or None\n :param wildcard: Wildcard specifier for how to match the URN\n :type wildcard: str\n :param include: URI(s) of Annotation types to include in the results\n :type include: list(str)\n :param exclude: URI(s) of Annotation types to include in the results\n :type exclude: list(str)\n :param limit: The max number of results to return (Default is None for no limit)\n :type limit: int\n :param start: the starting record to return (Default is 1)\n :type start: int \n :param expand: Flag to state whether Annotations are expanded (Default is False)\n :type expand: bool\n\n :return: Tuple representing the query results. The first element\n The first element is the number of total Annotations found\n The second element is the list of Annotations\n :rtype: (int, list(Annotation)\n\n .. note::\n\n Wildcard should be one of the following value\n\n - '.' to match exact,\n - '.%' to match exact plus lower in the hierarchy\n - '%.' to match exact + higher in the hierarchy\n - '-' to match in the range\n - '%.%' to match all", "id": "f9927:c0:m1"} {"signature": "def getResource(self, sha):", "body": "return None", "docstring": "Retrieve a single annotation resource by sha\n\n :param sha: The sha of the resource\n :type sha: str\n :return: the requested annotation resource\n :rtype: AnnotationResource", "id": "f9927:c0:m2"} {"signature": "def f_slugify(string):", "body": "return slugify(string)", "docstring": "Slugify a string\n\n :param string: String to slugify\n :return: Slugified string", "id": "f9929:m0"} {"signature": "def f_formatting_passage_reference(string):", "body": "return string.split(\"\")[]", "docstring": "Get the first part only of a two parts reference\n\n :param string: A urn reference part\n :type string: str\n :return: First part only of the two parts reference\n :rtype: str", "id": "f9929:m1"} {"signature": "def f_i18n_iso(isocode, lang=\"\"):", "body": "if lang not in flask_nemo._data.AVAILABLE_TRANSLATIONS:lang = \"\"try:return flask_nemo._data.ISOCODES[isocode][lang]except KeyError:return \"\"", "docstring": "Replace isocode by its language equivalent\n\n :param isocode: Three character long language code\n :param lang: Lang in which to return the language name\n :return: Full Text Language Name", "id": "f9929:m2"} {"signature": "def f_order_resource_by_lang(versions_list):", "body": "return sorted(versions_list, key=itemgetter(\"\"))", "docstring": "Takes a list of versions and put translations after editions\n\n :param versions_list: List of text versions\n :type versions_list: [Text]\n :return: List where first members will be editions\n :rtype: [Text]", "id": "f9929:m3"} {"signature": "def f_hierarchical_passages(reffs, citation):", "body": "d = OrderedDict()levels = [x for x in citation]for cit, name in reffs:ref = cit.split('')[]levs = [''.format(levels[i].name, v) for i, v in enumerate(ref.split(''))]getFromDict(d, levs[:-])[name] = citreturn d", "docstring": "A function to construct a hierarchical dictionary representing the different citation layers of a text\n\n :param reffs: passage references with human-readable equivalent\n :type reffs: [(str, str)]\n :param citation: Main Citation\n :type citation: Citation\n :return: nested dictionary representing where keys represent the names of the levels and the final values represent the passage reference\n :rtype: OrderedDict", "id": "f9929:m4"} {"signature": "def f_is_str(value):", "body": "return isinstance(value, str)", "docstring": "Check if object is a string\n\n :param value: object to check against\n :return: Return if value is a string", "id": "f9929:m5"} {"signature": "def f_i18n_citation_type(string, lang=\"\"):", "body": "s = \"\".join(string.strip(\"\").split(\"\"))return s.capitalize()", "docstring": "Take a string of form %citation_type|passage% and format it for human\n\n :param string: String of formation %citation_type|passage%\n :param lang: Language to translate to\n :return: Human Readable string\n\n .. note :: To Do : Use i18n tools and provide real i18n", "id": "f9929:m6"} {"signature": "def f_annotation_filter(annotations, type_uri, number):", "body": "filtered = [annotationfor annotation in annotationsif annotation.type_uri == type_uri]number = min([len(filtered), number])if number == :return Noneelse:return filtered[number-]", "docstring": "Annotation filtering filter\n\n :param annotations: List of annotations\n :type annotations: [AnnotationResource]\n :param type_uri: URI Type on which to filter\n :type type_uri: str\n :param number: Number of the annotation to return\n :type number: int\n :return: Annotation(s) matching the request\n :rtype: [AnnotationResource] or AnnotationResource", "id": "f9929:m7"} {"signature": "def render(self, **kwargs):", "body": "breadcrumbs = []breadcrumbs = []if \"\" in kwargs:breadcrumbs = [{\"\": \"\",\"\": \"\",\"\": {}}]if \"\" in kwargs[\"\"]:breadcrumbs += [{\"\": parent[\"\"],\"\": \"\",\"\": {\"\": parent[\"\"],\"\": f_slugify(parent[\"\"]),},}for parent in kwargs[\"\"][\"\"]][::-]if \"\" in kwargs[\"\"]:breadcrumbs.append({\"\": kwargs[\"\"][\"\"][\"\"],\"\": None,\"\": {}})if len(breadcrumbs) > :breadcrumbs[-][\"\"] = Nonereturn {\"\": breadcrumbs}", "docstring": "Make breadcrumbs for a route\n\n :param kwargs: dictionary of named arguments used to construct the view\n :type kwargs: dict\n :return: List of dict items the view can use to construct the link.\n :rtype: {str: list({ \"link\": str, \"title\", str, \"args\", dict})}", "id": "f9930:c0:m0"} {"signature": "def r_annotations(self):", "body": "target = request.args.get(\"\", None)wildcard = request.args.get(\"\", \"\", type=str)include = request.args.get(\"\")exclude = request.args.get(\"\")limit = request.args.get(\"\", None, type=int)start = request.args.get(\"\", , type=int)expand = request.args.get(\"\", False, type=bool)if target:try:urn = MyCapytain.common.reference.URN(target)except ValueError:return \"\", count, annotations = self.__queryinterface__.getAnnotations(urn, wildcard=wildcard, include=include,exclude=exclude, limit=limit, start=start,expand=expand)else:count, annotations = self.__queryinterface__.getAnnotations(None, limit=limit, start=start, expand=expand)mapped = []response = {\"\": type(self).JSONLD_CONTEXT,\"\": url_for(\"\", start=start, limit=limit),\"\": \"\",\"\": start,\"\": [],\"\": count}for a in annotations:mapped.append({\"\": url_for(\"\", sha=a.sha),\"\": url_for(\"\", sha=a.sha),\"\": \"\",\"\": a.target.to_json(),\"\": a.type_uri,\"\": [a.uri],\"\": a.slug})response[\"\"] = mappedresponse = jsonify(response)return response", "docstring": "Route to retrieve annotations by target\n\n :param target_urn: The CTS URN for which to retrieve annotations \n :type target_urn: str\n :return: a JSON string containing count and list of resources\n :rtype: {str: Any}", "id": "f9931:c0:m1"} {"signature": "def r_annotation(self, sha):", "body": "annotation = self.__queryinterface__.getResource(sha)if not annotation:return \"\", response = {\"\": type(self).JSONLD_CONTEXT,\"\": url_for(\"\", sha=annotation.sha),\"\": url_for(\"\", sha=annotation.sha),\"\": \"\",\"\": annotation.target.to_json(),\"\": [annotation.uri],\"\": annotation.type_uri,\"\": annotation.slug}return jsonify(response)", "docstring": "Route to retrieve contents of an annotation resource\n\n :param uri: The uri of the annotation resource\n :type uri: str\n :return: annotation contents\n :rtype: {str: Any}", "id": "f9931:c0:m2"} {"signature": "def r_annotation_body(self, sha):", "body": "annotation = self.__queryinterface__.getResource(sha)if not annotation:return \"\", content = annotation.read()if isinstance(content, Response):return contentheaders = {\"\": annotation.mimetype}return Response(content, headers=headers)", "docstring": "Route to retrieve contents of an annotation resource\n\n :param uri: The uri of the annotation resource\n :type uri: str\n :return: annotation contents\n :rtype: {str: Any}", "id": "f9931:c0:m3"} {"signature": "def resource_qualifier(resource):", "body": "if resource.startswith(\"\") or resource.startswith(\"\"):return resource, Noneelse:return reversed(op.split(resource))", "docstring": "Split a resource in (filename, directory) tuple with taking care of external resources\n\n :param resource: A file path or a URI\n :return: (Filename, Directory) for files, (URI, None) for URI", "id": "f9932:m0"} {"signature": "def join_or_single(start, end):", "body": "if start == end:return startelse:return \"\".format(start,end)", "docstring": "Join passages range. If they are the same, return a single part of the range\n\n :param start: Start of the passage range\n :param end: End of the passage range\n :return: Finale Passage Chunk Identifier", "id": "f9932:m1"} {"signature": "def getFromDict(dataDict, keyList):", "body": "return reduce(create_hierarchy, keyList, dataDict)", "docstring": "Retrieves and creates when necessary a dictionary in nested dictionaries\n\n :param dataDict: a dictionary\n :param keyList: list of keys\n :return: target dictionary", "id": "f9932:m2"} {"signature": "def create_hierarchy(hierarchy, level):", "body": "if level not in hierarchy:hierarchy[level] = OrderedDict()return hierarchy[level]", "docstring": "Create an OrderedDict\n\n :param hierarchy: a dictionary\n :param level: single key\n :return: deeper dictionary", "id": "f9932:m3"} {"signature": "def default_chunker(text, getreffs):", "body": "level = len(text.citation)return [tuple([reff.split(\"\")[-]]*) for reff in getreffs(level=level)]", "docstring": "This is the default chunker which will resolve the reference giving a callback (getreffs) and a text object with its metadata\n\n :param text: Text Object representing either an edition or a translation\n :type text: MyCapytains.resources.inventory.Text\n :param getreffs: callback function which retrieves a list of references\n :type getreffs: function\n\n :return: List of urn references with their human readable version\n :rtype: [(str, str)]", "id": "f9933:m0"} {"signature": "def scheme_chunker(text, getreffs):", "body": "level = len(text.citation)types = [citation.name for citation in text.citation]if types == [\"\", \"\", \"\"]:level = elif types == [\"\", \"\"]:return line_chunker(text, getreffs)return [tuple([reff.split(\"\")[-]]*) for reff in getreffs(level=level)]", "docstring": "This is the scheme chunker which will resolve the reference giving a callback (getreffs) and a text object with its metadata\n\n :param text: Text Object representing either an edition or a translation\n :type text: MyCapytains.resources.inventory.Text\n :param getreffs: callback function which retrieves a list of references\n :type getreffs: function\n\n :return: List of urn references with their human readable version\n :rtype: [(str, str)]", "id": "f9933:m1"} {"signature": "def line_chunker(text, getreffs, lines=):", "body": "level = len(text.citation)source_reffs = [reff.split(\"\")[-] for reff in getreffs(level=level)]reffs = []i = while i + lines - < len(source_reffs):reffs.append(tuple([source_reffs[i]+\"\"+source_reffs[i+lines-], source_reffs[i]]))i += linesif i < len(source_reffs):reffs.append(tuple([source_reffs[i]+\"\"+source_reffs[len(source_reffs)-], source_reffs[i]]))return reffs", "docstring": "Groups line reference together\n\n :param text: Text object\n :type text: MyCapytains.resources.text.api\n :param getreffs: Callback function to retrieve text\n :type getreffs: function(level)\n :param lines: Number of lines to use by group\n :type lines: int\n :return: List of grouped urn references with their human readable version\n :rtype: [(str, str)]", "id": "f9933:m2"} {"signature": "def level_chunker(text, getreffs, level=):", "body": "references = getreffs(level=level)return [(ref.split(\"\")[-], ref.split(\"\")[-]) for ref in references]", "docstring": "Chunk a text at the passage level\n\n :param text: Text object\n :type text: MyCapytains.resources.text.api\n :param getreffs: Callback function to retrieve text\n :type getreffs: function(level)\n :return: List of urn references with their human readable version\n :rtype: [(str, str)]", "id": "f9933:m3"} {"signature": "def level_grouper(text, getreffs, level=None, groupby=):", "body": "if level is None or level > len(text.citation):level = len(text.citation)references = [ref.split(\"\")[-] for ref in getreffs(level=level)]_refs = OrderedDict()for key in references:k = \"\".join(key.split(\"\")[:level-])if k not in _refs:_refs[k] = []_refs[k].append(key)del kreturn [(join_or_single(ref[], ref[-]),join_or_single(ref[], ref[-]))for sublist in _refs.values()for ref in [sublist[i:i+groupby]for i in range(, len(sublist), groupby)]]", "docstring": "Alternative to level_chunker: groups levels together at the latest level\n\n :param text: Text object\n :param getreffs: GetValidReff query callback\n :param level: Level of citation to retrieve\n :param groupby: Number of level to groupby\n :return: Automatically curated references", "id": "f9933:m4"} {"signature": "def _plugin_endpoint_rename(fn_name, instance):", "body": "if instance and instance.namespaced:fn_name = \"\".format(instance.name, fn_name[:])return fn_name", "docstring": "Rename endpoint function name to avoid conflict when namespacing is set to true\n\n :param fn_name: Name of the route function\n :param instance: Instance bound to the function\n :return: Name of the new namespaced function name", "id": "f9934:m0"} {"signature": "@propertydef plugins(self):", "body": "return self.__plugins__", "docstring": "Dictionary of registered plugins\n\n :rtype: dict", "id": "f9934:c0:m1"} {"signature": "@propertydef assets(self):", "body": "return self.__assets__", "docstring": "Dictionary of assets (First level : type, second level resource)\n\n :rtype: dict", "id": "f9934:c0:m2"} {"signature": "@propertydef inventory(self):", "body": "return self.get_inventory()", "docstring": "Root collection of the application\n\n :rtype: Collection", "id": "f9934:c0:m3"} {"signature": "def init_app(self, app=None):", "body": "if app:self.app = appself.register()", "docstring": "Initiate the application\n\n :param app: Flask application on which to add the extension\n :type app: flask.Flask", "id": "f9934:c0:m4"} {"signature": "def get_locale(self):", "body": "best_match = request.accept_languages.best_match(['', '', '', ''])if best_match is None:if len(request.accept_languages) > :best_match = request.accept_languages[][][:]else:return self.__default_lang__lang = self.__default_lang__if best_match == \"\":lang = \"\"elif best_match == \"\":lang = \"\"elif best_match == \"\":lang = \"\"elif best_match == \"\":lang = \"\"return lang", "docstring": "Retrieve the best matching locale using request headers\n\n .. note:: Probably one of the thing to enhance quickly.\n\n :rtype: str", "id": "f9934:c0:m5"} {"signature": "def transform(self, work, xml, objectId, subreference=None):", "body": "if str(objectId) in self._transform:func = self._transform[str(objectId)]else:func = self._transform[\"\"]if isinstance(func, str):with open(func) as f:xslt = etree.XSLT(etree.parse(f))return etree.tostring(xslt(xml),encoding=str, method=\"\",xml_declaration=None, pretty_print=False, with_tail=True, standalone=None)elif isinstance(func, Callable):return func(work, xml, objectId, subreference)elif func is None:return etree.tostring(xml, encoding=str)", "docstring": "Transform input according to potentially registered XSLT\n\n .. note:: Since 1.0.0, transform takes an objectId parameter which represent the passage which is called\n\n .. note:: Due to XSLT not being able to be used twice, we rexsltise the xml at every call of xslt\n\n .. warning:: Until a C libxslt error is fixed ( https://bugzilla.gnome.org/show_bug.cgi?id=620102 ), \\\n it is not possible to use strip tags in the xslt given to this application\n\n :param work: Work object containing metadata about the xml\n :type work: MyCapytains.resources.inventory.Text\n :param xml: XML to transform\n :type xml: etree._Element\n :param objectId: Object Identifier\n :type objectId: str\n :param subreference: Subreference\n :type subreference: str\n :return: String representation of transformed resource\n :rtype: str", "id": "f9934:c0:m6"} {"signature": "def get_inventory(self):", "body": "if self._inventory is not None:return self._inventoryself._inventory = self.resolver.getMetadata()return self._inventory", "docstring": "Request the api endpoint to retrieve information about the inventory\n\n :return: Main Collection\n :rtype: Collection", "id": "f9934:c0:m7"} {"signature": "def get_collection(self, objectId):", "body": "return self.inventory[objectId]", "docstring": "Retrieve a collection in the inventory\n\n :param objectId: Collection Identifier\n :type objectId: str\n :return: Requested collection\n :rtype: Collection", "id": "f9934:c0:m8"} {"signature": "def get_reffs(self, objectId, subreference=None, collection=None, export_collection=False):", "body": "if collection is not None:text = collectionelse:text = self.get_collection(objectId)reffs = self.chunk(text,lambda level: self.resolver.getReffs(objectId, level=level, subreference=subreference))if export_collection is True:return text, reffsreturn reffs", "docstring": "Retrieve and transform a list of references.\n\n Returns the inventory collection object with its metadata and a callback function taking a level parameter \\\n and returning a list of strings.\n\n :param objectId: Collection Identifier\n :type objectId: str\n :param subreference: Subreference from which to retrieve children\n :type subreference: str\n :param collection: Collection object bearing metadata\n :type collection: Collection\n :param export_collection: Return collection metadata\n :type export_collection: bool\n :return: Returns either the list of references, or the text collection object with its references as tuple\n :rtype: (Collection, [str]) or [str]", "id": "f9934:c0:m9"} {"signature": "def get_passage(self, objectId, subreference):", "body": "passage = self.resolver.getTextualNode(textId=objectId,subreference=subreference,metadata=True)return passage", "docstring": "Retrieve the passage identified by the parameters\n\n :param objectId: Collection Identifier\n :type objectId: str\n :param subreference: Subreference of the passage\n :type subreference: str\n :return: An object bearing metadata and its text\n :rtype: InteractiveTextualNode", "id": "f9934:c0:m10"} {"signature": "def get_siblings(self, objectId, subreference, passage):", "body": "reffs = [reff for reff, _ in self.get_reffs(objectId)]if subreference in reffs:index = reffs.index(subreference)if < index < len(reffs) - :return reffs[index-], reffs[index+]elif index == and index < len(reffs) - :return None, reffs[]elif index > and index == len(reffs) - :return reffs[index-], Noneelse:return None, Noneelse:return passage.siblingsId", "docstring": "Get siblings of a browsed subreference\n\n .. note:: Since 1.0.0c, there is no more prevnext dict. Nemo uses the list of original\\\n chunked references to retrieve next and previous, or simply relies on the resolver to get siblings\\\n when the subreference is not found in given original chunks.\n\n :param objectId: Id of the object\n :param subreference: Subreference of the object\n :param passage: Current Passage\n :return: Previous and next references\n :rtype: (str, str)", "id": "f9934:c0:m11"} {"signature": "def semantic(self, collection, parent=None):", "body": "if parent is not None:collections = parent.parents[::-] + [parent, collection]else:collections = collection.parents[::-] + [collection]return filters.slugify(\"\".join([item.get_label() for item in collections if item.get_label()]))", "docstring": "Generates a SEO friendly string for given collection\n\n :param collection: Collection object to generate string for\n :param parent: Current collection parent\n :return: SEO/URL Friendly string", "id": "f9934:c0:m12"} {"signature": "def make_coins(self, collection, text, subreference=\"\", lang=None):", "body": "if lang is None:lang = self.__default_lang__return \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\".format(title=quote(str(text.get_title(lang))), author=quote(str(text.get_creator(lang))),cid=url_for(\"\", objectId=collection.id, _external=True),language=collection.lang, pages=quote(subreference), edition=quote(str(text.get_description(lang))))", "docstring": "Creates a CoINS Title string from information\n\n :param collection: Collection to create coins from\n :param text: Text/Passage object\n :param subreference: Subreference\n :param lang: Locale information\n :return: Coins HTML title value", "id": "f9934:c0:m13"} {"signature": "def expose_ancestors_or_children(self, member, collection, lang=None):", "body": "x = {\"\": member.id,\"\": str(member.get_label(lang)),\"\": str(member.model),\"\": str(member.type),\"\": member.size,\"\": self.semantic(member, parent=collection)}if isinstance(member, ResourceCollection):x[\"\"] = str(member.lang)return x", "docstring": "Build an ancestor or descendant dict view based on selected information\n\n :param member: Current Member to build for\n :param collection: Collection from which we retrieved it\n :param lang: Language to express data in\n :return:", "id": "f9934:c0:m14"} {"signature": "def make_members(self, collection, lang=None):", "body": "objects = sorted([self.expose_ancestors_or_children(member, collection, lang=lang)for member in collection.membersif member.get_label()],key=itemgetter(\"\"))return objects", "docstring": "Build member list for given collection\n\n :param collection: Collection to build dict view of for its members\n :param lang: Language to express data in\n :return: List of basic objects", "id": "f9934:c0:m15"} {"signature": "def make_parents(self, collection, lang=None):", "body": "return [{\"\": member.id,\"\": str(member.get_label(lang)),\"\": str(member.model),\"\": str(member.type),\"\": member.size}for member in collection.parentsif member.get_label()]", "docstring": "Build parents list for given collection\n\n :param collection: Collection to build dict view of for its members\n :param lang: Language to express data in\n :return: List of basic objects", "id": "f9934:c0:m16"} {"signature": "def r_index(self):", "body": "return {\"\": \"\"}", "docstring": "Homepage route function\n\n :return: Template to use for Home page\n :rtype: {str: str}", "id": "f9934:c0:m17"} {"signature": "def r_collections(self, lang=None):", "body": "collection = self.resolver.getMetadata()return {\"\": \"\",\"\": collection.get_label(lang),\"\": {\"\": self.make_members(collection, lang=lang)}}", "docstring": "Retrieve the top collections of the inventory\n\n :param lang: Lang in which to express main data\n :type lang: str\n :return: Collections information and template\n :rtype: {str: Any}", "id": "f9934:c0:m18"} {"signature": "def r_collection(self, objectId, lang=None):", "body": "collection = self.resolver.getMetadata(objectId)return {\"\": \"\",\"\": {\"\": {\"\": str(collection.get_label(lang)),\"\": collection.id,\"\": str(collection.model),\"\": str(collection.type),},\"\": self.make_members(collection, lang=lang),\"\": self.make_parents(collection, lang=lang)},}", "docstring": "Collection content browsing route function\n\n :param objectId: Collection identifier\n :type objectId: str\n :param lang: Lang in which to express main data\n :type lang: str\n :return: Template and collections contained in given collection\n :rtype: {str: Any}", "id": "f9934:c0:m19"} {"signature": "def r_references(self, objectId, lang=None):", "body": "collection, reffs = self.get_reffs(objectId=objectId, export_collection=True)return {\"\": \"\",\"\": objectId,\"\": collection.citation,\"\": {\"\": {\"\": collection.get_label(lang),\"\": collection.id,\"\": str(collection.model),\"\": str(collection.type),},\"\": self.make_parents(collection, lang=lang)},\"\": reffs}", "docstring": "Text exemplar references browsing route function\n\n :param objectId: Collection identifier\n :type objectId: str\n :param lang: Lang in which to express main data\n :type lang: str\n :return: Template and required information about text with its references", "id": "f9934:c0:m20"} {"signature": "def r_first_passage(self, objectId):", "body": "collection, reffs = self.get_reffs(objectId=objectId, export_collection=True)first, _ = reffs[]return redirect(url_for(\"\", objectId=objectId, subreference=first, semantic=self.semantic(collection)))", "docstring": "Provides a redirect to the first passage of given objectId\n\n :param objectId: Collection identifier\n :type objectId: str\n :return: Redirection to the first passage of given text", "id": "f9934:c0:m21"} {"signature": "def r_passage(self, objectId, subreference, lang=None):", "body": "collection = self.get_collection(objectId)if isinstance(collection, CtsWorkMetadata):editions = [t for t in collection.children.values() if isinstance(t, CtsEditionMetadata)]if len(editions) == :raise UnknownCollection(\"\")return redirect(url_for(\"\", objectId=str(editions[].id), subreference=subreference))text = self.get_passage(objectId=objectId, subreference=subreference)passage = self.transform(text, text.export(Mimetypes.PYTHON.ETREE), objectId)prev, next = self.get_siblings(objectId, subreference, text)return {\"\": \"\",\"\": objectId,\"\": subreference,\"\": {\"\": {\"\": collection.get_label(lang),\"\": collection.id,\"\": str(collection.model),\"\": str(collection.type),\"\": text.get_creator(lang),\"\": text.get_title(lang),\"\": text.get_description(lang),\"\": collection.citation,\"\": self.make_coins(collection, text, subreference, lang=lang)},\"\": self.make_parents(collection, lang=lang)},\"\": Markup(passage),\"\": prev,\"\": next}", "docstring": "Retrieve the text of the passage\n\n :param objectId: Collection identifier\n :type objectId: str\n :param lang: Lang in which to express main data\n :type lang: str\n :param subreference: Reference identifier\n :type subreference: str\n :return: Template, collections metadata and Markup object representing the text\n :rtype: {str: Any}", "id": "f9934:c0:m22"} {"signature": "def r_assets(self, filetype, asset):", "body": "if filetype in self.assets and asset in self.assets[filetype] and self.assets[filetype][asset]:return send_from_directory(directory=self.assets[filetype][asset],filename=asset)abort()", "docstring": "Route for specific assets.\n\n :param filetype: Asset Type\n :param asset: Filename of an asset\n :return: Response", "id": "f9934:c0:m23"} {"signature": "def register_assets(self):", "body": "self.blueprint.add_url_rule(\"\".format(self.static_url_path),view_func=self.r_assets,endpoint=\"\",methods=[\"\"])", "docstring": "Merge and register assets, both as routes and dictionary\n\n :return: None", "id": "f9934:c0:m24"} {"signature": "def create_blueprint(self):", "body": "self.register_plugins()self.blueprint = Blueprint(self.name,\"\",url_prefix=self.prefix,template_folder=self.template_folder,static_folder=self.static_folder,static_url_path=self.static_url_path)for url, name, methods, instance in self._urls:self.blueprint.add_url_rule(url,view_func=self.view_maker(name, instance),endpoint=_plugin_endpoint_rename(name, instance),methods=methods)for url, name, methods, instance in self._semantic_url:self.blueprint.add_url_rule(url,view_func=self.view_maker(name, instance),endpoint=_plugin_endpoint_rename(name, instance)+\"\",methods=methods)self.register_assets()self.register_filters()self.__templates_namespaces__.extend(self.__instance_templates__)for namespace, directory in self.__templates_namespaces__[::-]:if namespace not in self.__template_loader__:self.__template_loader__[namespace] = []self.__template_loader__[namespace].append(jinja2.FileSystemLoader(op.abspath(directory)))self.blueprint.jinja_loader = jinja2.PrefixLoader({namespace: jinja2.ChoiceLoader(paths) for namespace, paths in self.__template_loader__.items()},\"\")if self.cache is not None:for func, instance in self.cached:setattr(instance, func.__name__, self.cache.memoize()(func))return self.blueprint", "docstring": "Create blueprint and register rules\n\n :return: Blueprint of the current nemo app\n :rtype: flask.Blueprint", "id": "f9934:c0:m25"} {"signature": "def view_maker(self, name, instance=None):", "body": "if instance is None:instance = selfsig = \"\" in [parameter.namefor parameter in inspect.signature(getattr(instance, name)).parameters.values()]def route(**kwargs):if sig and \"\" not in kwargs:kwargs[\"\"] = self.get_locale()if \"\" in kwargs:del kwargs[\"\"]return self.route(getattr(instance, name), **kwargs)return route", "docstring": "Create a view\n\n :param name: Name of the route function to use for the view.\n :type name: str\n :return: Route function which makes use of Nemo context (such as menu informations)\n :rtype: function", "id": "f9934:c0:m26"} {"signature": "def main_collections(self, lang=None):", "body": "return sorted([{\"\": member.id,\"\": str(member.get_label(lang=lang)),\"\": str(member.model),\"\": str(member.type),\"\": member.size}for member in self.resolver.getMetadata().members], key=itemgetter(\"\"))", "docstring": "Retrieve main parent collections of a repository\n\n :param lang: Language to retrieve information in\n :return: Sorted collections representations", "id": "f9934:c0:m27"} {"signature": "def make_cache_keys(self, endpoint, kwargs):", "body": "keys = sorted(kwargs.keys())i18n_cache_key = endpoint+\"\"+\"\".join([kwargs[k] for k in keys])if \"\" in keys:cache_key = endpoint+\"\" + \"\".join([kwargs[k] for k in keys if k != \"\"])else:cache_key = i18n_cache_keyreturn i18n_cache_key, cache_key", "docstring": "This function is built to provide cache keys for templates\n\n :param endpoint: Current endpoint\n :param kwargs: Keyword Arguments\n :return: tuple of i18n dependant cache key and i18n ignoring cache key\n :rtype: tuple(str)", "id": "f9934:c0:m28"} {"signature": "def render(self, template, **kwargs):", "body": "kwargs[\"\"] = \"\" % kwargs[\"\"].values()kwargs[\"\"] = self.get_locale()kwargs[\"\"] = self.assetskwargs[\"\"] = self.main_collections(kwargs[\"\"])kwargs[\"\"] = self.cache is not Nonekwargs[\"\"] = kwargs[\"\"], kwargs[\"\"] = self.make_cache_keys(request.endpoint, kwargs[\"\"])kwargs[\"\"] = templatefor plugin in self.__plugins_render_views__:kwargs.update(plugin.render(**kwargs))return render_template(kwargs[\"\"], **kwargs)", "docstring": "Render a route template and adds information to this route.\n\n :param template: Template name.\n :type template: str\n :param kwargs: dictionary of named arguments used to be passed to the template\n :type kwargs: dict\n :return: Http Response with rendered template\n :rtype: flask.Response", "id": "f9934:c0:m29"} {"signature": "def route(self, fn, **kwargs):", "body": "new_kwargs = fn(**kwargs)if not isinstance(new_kwargs, dict):return new_kwargsnew_kwargs[\"\"] = kwargsreturn self.render(**new_kwargs)", "docstring": "Route helper : apply fn function but keep the calling object, *ie* kwargs, for other functions\n\n :param fn: Function to run the route with\n :type fn: function\n :param kwargs: Parsed url arguments\n :type kwargs: dict\n :return: HTTP Response with rendered template\n :rtype: flask.Response", "id": "f9934:c0:m30"} {"signature": "def register(self):", "body": "if self.app is not None:if not self.blueprint:self.blueprint = self.create_blueprint()self.app.register_blueprint(self.blueprint)if self.cache is None:setattr(self.app.jinja_env, \"\", self)self.app.jinja_env.add_extension(FakeCacheExtension)return self.blueprintreturn None", "docstring": "Register the app using Blueprint\n\n :return: Nemo blueprint\n :rtype: flask.Blueprint", "id": "f9934:c0:m31"} {"signature": "def register_filters(self):", "body": "for _filter, instance in self._filters:if not instance:self.app.jinja_env.filters[_filter.replace(\"\", \"\")] = getattr(flask_nemo.filters, _filter)else:self.app.jinja_env.filters[_filter.replace(\"\", \"\")] = getattr(instance, _filter.replace(\"\".format(instance.name), \"\"))", "docstring": "Register filters for Jinja to use\n\n .. note:: Extends the dictionary filters of jinja_env using self._filters list", "id": "f9934:c0:m32"} {"signature": "def register_plugins(self):", "body": "if len([plugin for plugin in self.__plugins__.values() if plugin.clear_routes]) > : self._urls = list()self.cached = list()clear_assets = [plugin for plugin in self.__plugins__.values() if plugin.clear_assets]if len(clear_assets) > and not self.prevent_plugin_clearing_assets: self.__assets__ = copy(type(self).ASSETS)static_path = [plugin.static_folder for plugin in clear_assets if plugin.static_folder]if len(static_path) > :self.static_folder = static_path[-]for plugin in self.__plugins__.values():self._urls.extend([(url, function, methods, plugin) for url, function, methods in plugin.routes])self._filters.extend([(filt, plugin) for filt in plugin.filters])self.__templates_namespaces__.extend([(namespace, directory) for namespace, directory in plugin.templates.items()])for asset_type in self.__assets__:for key, value in plugin.assets[asset_type].items():self.__assets__[asset_type][key] = valueif plugin.augment:self.__plugins_render_views__.append(plugin)if hasattr(plugin, \"\"):for func in plugin.CACHED:self.cached.append((getattr(plugin, func), plugin))plugin.register_nemo(self)", "docstring": "Register plugins in Nemo instance\n\n - Clear routes first if asked by one plugin\n - Clear assets if asked by one plugin and replace by the last plugin registered static_folder\n - Register each plugin\n - Append plugin routes to registered routes\n - Append plugin filters to registered filters\n - Append templates directory to given namespaces\n - Append assets (CSS, JS, statics) to given resources \n - Append render view (if exists) to Nemo.render stack", "id": "f9934:c0:m33"} {"signature": "def chunk(self, text, reffs):", "body": "if str(text.id) in self.chunker:return self.chunker[str(text.id)](text, reffs)return self.chunker[\"\"](text, reffs)", "docstring": "Handle a list of references depending on the text identifier using the chunker dictionary.\n\n :param text: Text object from which comes the references\n :type text: MyCapytains.resources.texts.api.Text\n :param reffs: List of references to transform\n :type reffs: References\n :return: Transformed list of references\n :rtype: [str]", "id": "f9934:c0:m34"} {"signature": "def register_nemo(self, nemo=None):", "body": "self.__nemo__ = nemo", "docstring": "Register Nemo on to the plugin instance\n\n :param nemo: Instance of Nemo", "id": "f9937:c0:m1"} {"signature": "def render(self, **kwargs):", "body": "return kwargs", "docstring": "View Rendering function that gets triggered before nemo renders the resources and adds informations to \\\n pass to the templates\n\n :param kwargs: Dictionary of arguments to pass to the template\n :return: Dictionary of arguments to pass to the template", "id": "f9937:c0:m13"} {"signature": "def _req(self, url, method='', **kw):", "body": "send = requests.post if method == '' else requests.gettry:r = send(url,headers=self._token_header(),timeout=self.settings[''],**kw)except requests.exceptions.Timeout:raise ApiError('' % self.settings[''])try:json = r.json()except ValueError:raise ApiError('')if json.get('') != '':raise ApiError('' % json)return json", "docstring": "Make request and convert JSON response to python objects", "id": "f9942:c2:m2"} {"signature": "def get_active_bets(self, project_id=None):", "body": "url = urljoin(self.settings[''],'')if project_id is not None:url += ''.format(project_id)bets = []has_next_page = Truewhile has_next_page:res = self._req(url)bets.extend(res[''][''])url = res[''].get('')has_next_page = bool(url)return bets", "docstring": "Returns all active bets", "id": "f9942:c2:m3"} {"signature": "def get_bets(self, type=None, order_by=None, state=None, project_id=None,page=None, page_size=None):", "body": "if page is None:page = if page_size is None:page_size = if state == '':_states = [] elif state == '':_states = self.CLOSED_STATESelse:_states = self.ACTIVE_STATESurl = urljoin(self.settings[''],''.format(page, page_size))url += ''.format(''.join(_states))if type is not None:url += ''.format(type)if order_by in ['', '']:url += ''.format(order_by)if project_id is not None:url += ''.format(project_id)res = self._req(url)return res['']['']", "docstring": "Return bets with given filters and ordering.\n\n :param type: return bets only with this type.\n Use None to include all (default).\n :param order_by: '-last_stake' or 'last_stake' to sort by stake's\n created date or None for default ordering.\n :param state: one of 'active', 'closed', 'all' (default 'active').\n :param project_id: return bets associated with given project id in kava\n :param page: default 1.\n :param page_site: page size (default 100).", "id": "f9942:c2:m4"} {"signature": "def get_project_slug(self, bet):", "body": "if bet.get(''):params = json.loads(bet[''])return params.get('')return None", "docstring": "Return slug of a project that given bet is associated with\n or None if bet is not associated with any project.", "id": "f9942:c2:m5"} {"signature": "def stakes_in(self, bet):", "body": "return self._stakes_by_side(bet, self.SIDE_IN)", "docstring": "Return all stakes on 'in' side for given bet.", "id": "f9942:c2:m8"} {"signature": "def stakes_out(self, bet):", "body": "return self._stakes_by_side(bet, self.SIDE_OUT)", "docstring": "Return all stakes on 'out' side for given bet.", "id": "f9942:c2:m9"} {"signature": "def set_callback(self, event, callback):", "body": "self._callbacks[event] = callback", "docstring": "Set callback for event.\n\n Supported events: see `Event` class.\n\n Callback must take one parameter, which is a bet that changed.\n\n If callback is already set, it will be reset to a new value.", "id": "f9942:c2:m23"} {"signature": "def subscribe(self, event, bet_ids):", "body": "if not self._subscriptions.get(event):self._subscriptions[event] = set()self._subscriptions[event] = self._subscriptions[event].union(bet_ids)", "docstring": "Subscribe to event for given bet ids.", "id": "f9942:c2:m24"} {"signature": "def event_loop(self):", "body": "return [gevent.spawn(self._poll_bet_executed)]", "docstring": "Look for changes in bets, that user subscribed to by self.subscribe\n and trigger corresponding callbacks.", "id": "f9942:c2:m25"} {"signature": "def is_configured(self, project, **kwargs):", "body": "params = self.get_optionreturn bool(params('', project) and params('', project))", "docstring": "Check if plugin is configured.", "id": "f9947:c0:m0"} {"signature": "def post_process(self, group, event, is_new, is_sample, **kwargs):", "body": "if not self.is_configured(group.project):returnhost = self.get_option('', group.project)port = int(self.get_option('', group.project))prefix = self.get_option('', group.project)hostname = self.get_option('', group.project) or socket.gethostname()resolve_age = group.project.get_option('', None)now = int(time.time())template = '' % (prefix, group.project.slug)level = group.get_level_display()label = template % levelgroups = group.project.group_set.filter(status=STATUS_UNRESOLVED)if resolve_age:oldest = timezone.now() - timedelta(hours=int(resolve_age))groups = groups.filter(last_seen__gt=oldest)num_errors = groups.filter(level=group.level).count()metric = Metric(hostname, label, num_errors, now)log.info('', label, num_errors)send_to_zabbix([metric], host, port)", "docstring": "Process error.", "id": "f9947:c0:m1"} {"signature": "def isA(instance, typeList):", "body": "return any(map(lambda iType: isinstance(instance,iType), typeList))", "docstring": "Return true if ``instance`` is an instance of any the Directive\ntypes in ``typeList``", "id": "f9951:m0"} {"signature": "def _substituteCheckPattern(self, inputString, lineNumber, lastLineNumber, checkFileName, isForRegex):", "body": "assert isinstance(inputString, str)assert isinstance(lineNumber, int)assert isinstance(lastLineNumber, int)assert isinstance(checkFileName, str)\"\"\"\"\"\"sPattern = r''matcher = re.compile(sPattern)result = \"\"loop = Truestart = end = len(inputString) while loop:m = matcher.search(inputString, start, end)if not m:_logger.debug(''.format(result))result += inputString[start:end]break else:prevIndex = max(, m.start() -)_logger.debug(''.format(index=prevIndex, char=inputString[prevIndex]))if inputString[prevIndex] == \"\":_logger.debug('')_logger.debug(''.format(result))result += inputString[start:prevIndex] _logger.debug(''.format(result))result += inputString[(prevIndex+):m.end()] start = min(m.end(), end)_logger.debug(''.format(result))_logger.debug(''.format(start=start, end=end, ss=inputString[start:end]))else:_logger.debug(''.format(result))_logger.debug(''.format(begin=m.start(),end=m.end(), ss=inputString[m.start():m.end()]))result += inputString[start:m.start()] if m.groupdict()[''] == None:_logger.debug('')result += str(lineNumber)else:offset = if m.groupdict()[''] == '' else -offset *= int(m.groupdict()[''])_logger.debug(''.format(offset))requestedLineNumber = lineNumber + offset_logger.debug(''.format(requestedLineNumber))if requestedLineNumber <= :raise ParsingException(''.format(file=checkFileName, line=lineNumber, col=m.start()))elif requestedLineNumber > lastLineNumber:raise ParsingException(''.format(file=checkFileName, line=lineNumber, col=m.start()))result += str(requestedLineNumber)start = min(m.end(),end)_logger.debug(''.format(start=start, end=end, ss=inputString[start:end]))\"\"\"\"\"\"basenameCheckFileName = os.path.basename(checkFileName)assert basenameCheckFileName.count('') == result = self._simpleSubstitution(\"\", basenameCheckFileName, result)abspathCheckFileName = os.path.abspath(checkFileName)if isForRegex:abspathCheckFileName = abspathCheckFileName.replace('', '')result = self._simpleSubstitution(\"\", abspathCheckFileName, result)assert len(result) != return result", "docstring": "Do various ${} substitutions", "id": "f9952:c2:m3"} {"signature": "def match(self, subsetLines, subsetOffset, fileName):", "body": "raise NotImplementedError()", "docstring": "Search through lines for match.\nWhat is returned is defined by implementations", "id": "f9953:c1:m2"} {"signature": "def match(self, subsetLines, offsetOfSubset, fileName):", "body": "for (offset,l) in enumerate(subsetLines):m = self.regex.search(l)if m != None:truePosition = offset + offsetOfSubset_logger.debug(''.format(str(truePosition+ )))_logger.debug(''.format(l))self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +) return truePositionself.failed = Trueraise DirectiveException(self)", "docstring": "Search through lines for match.\nRaise an Exception if fail to match\nIf match is succesful return the position the match was found", "id": "f9953:c4:m1"} {"signature": "def match(self, subsetLines, offsetOfSubset, fileName):", "body": "for (offset,l) in enumerate(subsetLines):column = l.find(self.literal)if column != -:truePosition = offset + offsetOfSubset_logger.debug(''.format(str(truePosition+ ), column))_logger.debug(''.format(l))self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +)return truePositionself.failed = Trueraise DirectiveException(self)", "docstring": "Search through lines for match.\nRaise an Exception if fail to match\nIf match is succesful return the position the match was found", "id": "f9953:c5:m1"} {"signature": "def match(self, subsetLines, offsetOfSubset, fileName):", "body": "for (offset,l) in enumerate(subsetLines):for t in self.regex:m = t.Regex.search(l)if m != None:truePosition = offset + offsetOfSubset_logger.debug(''.format(str(truePosition+ )))_logger.debug(''.format(l))self.failed = Trueself.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +)raise DirectiveException(self)", "docstring": "Search through lines for match.\nRaise an Exception if a match", "id": "f9953:c8:m3"} {"signature": "def match(self, subsetLines, offsetOfSubset, fileName):", "body": "for (offset,l) in enumerate(subsetLines):for literal in self.literals:column = l.find(literal.Literal)if column != -:truePosition = offset + offsetOfSubset_logger.debug(''.format(line=str(truePosition+ ), col=column))_logger.debug(''.format(l))self.failed = Trueself.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +)raise DirectiveException(self)", "docstring": "Search through lines for match.\nRaise an Exception if a match", "id": "f9953:c9:m3"} {"signature": "def scriptEntryPoint():", "body": "import sysreturn main(sys.argv)", "docstring": "This provides an entry point for disutils", "id": "f9955:m2"} {"signature": "def add_file(self, file, **kwargs):", "body": "if os.access(file, os.F_OK):if file in self.f_repository:raise DuplicationError(\"\")self.f_repository.append(file)else:raise IOError(\"\")", "docstring": "Append a file to file repository.\n\n For file monitoring, monitor instance needs file.\n Please put the name of file to `file` argument.\n\n :param file: the name of file you want monitor.", "id": "f9958:c3:m1"} {"signature": "def add_files(self, filelist, **kwargs):", "body": "if not isinstance(filelist, list):raise TypeError(\"\")for file in filelist:self.add_file(file)", "docstring": "Append files to file repository.\n\n ModificationMonitor can append files to repository using this.\n Please put the list of file names to `filelist` argument.\n\n :param filelist: the list of file nmaes", "id": "f9958:c3:m2"} {"signature": "def monitor(self, sleep=):", "body": "manager = FileModificationObjectManager()timestamps = {}filebodies = {}for file in self.f_repository:timestamps[file] = self._get_mtime(file)filebodies[file] = open(file).read()while True:for file in self.f_repository:mtime = timestamps[file]fbody = filebodies[file]modified = self._check_modify(file, mtime, fbody)if not modified:continuenew_mtime = self._get_mtime(file)new_fbody = open(file).read()obj = FileModificationObject(file,(mtime, new_mtime),(fbody, new_fbody) )timestamps[file] = new_mtimefilebodies[file] = new_fbodymanager.add_object(obj)yield objtime.sleep(sleep)", "docstring": "Run file modification monitor.\n\n The monitor can catch file modification using timestamp and file body. \n Monitor has timestamp data and file body data. And insert timestamp \n data and file body data before into while roop. In while roop, monitor \n get new timestamp and file body, and then monitor compare new timestamp\n to originaltimestamp. If new timestamp and file body differ original,\n monitor regard thease changes as `modification`. Then monitor create\n instance of FileModificationObjectManager and FileModificationObject,\n and monitor insert FileModificationObject to FileModificationObject-\n Manager. Then, yield this object.\n\n :param sleep: How times do you sleep in while roop.", "id": "f9958:c3:m3"} {"signature": "def is_colour(value):", "body": "global PREDEFINED, HEX_MATCH, RGB_MATCH, RGBA_MATCH, HSL_MATCH, HSLA_MATCHvalue = value.strip()if HEX_MATCH.match(value) or RGB_MATCH.match(value) orRGBA_MATCH.match(value) or HSL_MATCH.match(value) orHSLA_MATCH.match(value) or value in PREDEFINED:return Truereturn False", "docstring": "Returns True if the value given is a valid CSS colour, i.e. matches one\n of the regular expressions in the module or is in the list of\n predetefined values by the browser.", "id": "f9962:m0"} {"signature": "@register.filterdef getitem(dictionary, keyvar):", "body": "try:return dictionary[keyvar]except KeyError:return ''", "docstring": "Custom django template filter that allows access to an item of a\n dictionary through the key contained in a template variable. Example:\n\n .. code-block:: python\n\n context_data = {\n 'data':{\n 'foo':'bar',\n },\n 'key':'foo',\n }\n\n template = Template('{% load awltags %}{{data|getitem:key}}')\n context = Context(context_data)\n result = template.render(context)\n\n >>> result\n 'bar'\n\n .. note::\n Any KeyErrors are ignored and return an empty string", "id": "f9980:m0"} {"signature": "@register.tagdef accessor(parser, token):", "body": "contents = token.split_contents()tag = contents[]if len(contents) < :raise template.TemplateSyntaxError(('''') % tag)as_var = Noneif len(contents) >= :if contents[-] == '':as_var = contents[-]contents = contents[:-]return AccessorNode(contents[], contents[:], as_var)", "docstring": "This template tag is used to do complex nested attribute accessing of\n an object. The first parameter is the object being accessed, subsequent\n paramters are one of: \n\n * a variable in the template context\n * a literal in the template context\n * either of the above surrounded in square brackets\n\n For each variable or literal parameter given a `getattr` is called on the\n object, chaining to the next parameter. For any sqaure bracket enclosed\n items the access is done through a dictionary lookup.\n\n Example::\n\n {% accessor car where 'front_seat' [position] ['fabric'] %}\n\n The above would result in the following chain of commands:\n\n .. code-block:: python\n\n ref = getattr(car, where)\n ref = getattr(ref, 'front_seat')\n ref = ref[position]\n return ref['fabric']\n\n This tag also supports \"as\" syntax, putting the results into a template\n variable::\n\n {% accessor car 'interior' as foo %}", "id": "f9980:m1"} {"signature": "def django_logging_dict(log_dir, handlers=[''], filename=''):", "body": "d = default_logging_dict(log_dir, handlers, filename)d[''].update({'':{'':'','':'',}})d[''].update({'': { '': ['', ''],'': '','': False,},'': {'': ['', ''],'': '','': False,},})return d", "docstring": "Extends :func:`logthing.utils.default_logging_dict` with django\n specific values.", "id": "f9982:m0"} {"signature": "def extra_context(request):", "body": "host = os.environ.get('', None)or request.get_host()d = {'':request,'':host,'':request.path.startswith(''),}return d", "docstring": "Adds useful global items to the context for use in templates.\n\n * *request*: the request object\n * *HOST*: host name of server\n * *IN_ADMIN*: True if you are in the django admin area", "id": "f9984:m0"} {"signature": "def create_admin(username='', email='', password=''):", "body": "admin = User.objects.create_user(username, email, password)admin.is_staff = Trueadmin.is_superuser = Trueadmin.save()return admin", "docstring": "Create and save an admin user.\n\n :param username:\n Admin account's username. Defaults to 'admin'\n :param email:\n Admin account's email address. Defaults to 'admin@admin.com'\n :param password:\n Admin account's password. Defaults to 'admin'\n :returns:\n Django user with staff and superuser privileges", "id": "f9985:m0"} {"signature": "def messages_from_response(response):", "body": "messages = []if hasattr(response, '') and response.context and'' in response.context:messages = response.context['']elif hasattr(response, ''):morsel = response.cookies.get('')if not morsel:return []from django.contrib.messages.storage.cookie import CookieStoragestore = CookieStorage(FakeRequest())messages = store._decode(morsel.value)else:return []return [(m.message, m.level) for m in messages]", "docstring": "Returns a list of the messages from the django MessageMiddleware\n package contained within the given response. This is to be used during\n unit testing when trying to see if a message was set properly in a view.\n\n :param response: HttpResponse object, likely obtained through a\n test client.get() or client.post() call\n\n :returns: a list of tuples (message_string, message_level), one for each\n message in the response context", "id": "f9985:m1"} {"signature": "def __init__(self, user=None, method='', cookies={}, data={}):", "body": "super(FakeRequest, self).__init__()self.method = methodself.COOKIES = cookiesif user:self.user = userif method == '':self.GET = dataelse:self.POST = dataself.path = ''", "docstring": "Constructor\n\n :param user:\n Django User object to include in the request. Defaults to None.\n If none is given then the parameter is not set at all\n :param method:\n Request method. Defaults to 'GET'\n :param cookies:\n Dict containing cookies for the request. Defaults to empty\n :param data:\n Dict for get or post fields. Defaults to empty", "id": "f9985:c0:m0"} {"signature": "def initiate(self):", "body": "self.site = admin.sites.AdminSite()self.admin_user = create_admin(self.USERNAME, self.EMAIL, self.PASSWORD)self.authed = False", "docstring": "Sets up the :class:`AdminSite` and creates a user with the\n appropriate privileges. This should be called from the inheritor's\n :class:`TestCase.setUp` method.", "id": "f9985:c1:m0"} {"signature": "def authorize(self):", "body": "response = self.client.login(username=self.USERNAME, password=self.PASSWORD)self.assertTrue(response)self.authed = True", "docstring": "Authenticates the superuser account via the web login.", "id": "f9985:c1:m1"} {"signature": "def authed_get(self, url, response_code=, headers={}, follow=False):", "body": "if not self.authed:self.authorize()response = self.client.get(url, follow=follow, **headers)self.assertEqual(response_code, response.status_code)return response", "docstring": "Does a django test client ``get`` against the given url after\n logging in the admin first.\n\n :param url:\n URL to fetch\n :param response_code:\n Expected response code from the URL fetch. This value is\n asserted. Defaults to 200\n :param headers:\n Optional dictionary of headers to send in the request\n :param follow:\n When True, the get call will follow any redirect requests.\n Defaults to False.\n :returns:\n Django testing ``Response`` object", "id": "f9985:c1:m2"} {"signature": "def authed_post(self, url, data, response_code=, follow=False,headers={}):", "body": "if not self.authed:self.authorize()response = self.client.post(url, data, follow=follow, **headers)self.assertEqual(response_code, response.status_code)return response", "docstring": "Does a django test client ``post`` against the given url after\n logging in the admin first.\n\n :param url:\n URL to fetch\n :param data:\n Dictionary to form contents to post\n :param response_code:\n Expected response code from the URL fetch. This value is\n asserted. Defaults to 200\n :param headers:\n Optional dictionary of headers to send in with the request\n :returns:\n Django testing ``Response`` object", "id": "f9985:c1:m3"} {"signature": "def visit_admin_link(self, admin_model, instance, field_name,response_code=, headers={}):", "body": "html = self.field_value(admin_model, instance, field_name)url, text = parse_link(html)if not url:raise AttributeError('' % html)return self.authed_get(url, response_code=response_code,headers=headers)", "docstring": "This method is used for testing links that are in the change list\n view of the django admin. For the given instance and field name, the\n HTML link tags in the column are parsed for a URL and then invoked\n with :class:`AdminToolsMixin.authed_get`.\n\n :param admin_model:\n Instance of a :class:`admin.ModelAdmin` object that is responsible\n for displaying the change list\n :param instance:\n Object instance that is the row in the admin change list\n :param field_name:\n Name of the field/column to containing the HTML link to get a URL\n from to visit\n :param response_code:\n Expected HTTP status code resulting from the call. The value of\n this is asserted. Defaults to 200.\n :param headers:\n Optional dictionary of headers to send in the request\n :returns:\n Django test ``Response`` object\n :raises AttributeError:\n If the column does not contain a URL that can be parsed", "id": "f9985:c1:m4"} {"signature": "def field_value(self, admin_model, instance, field_name):", "body": "_, _, value = lookup_field(field_name, instance, admin_model)return value", "docstring": "Returns the value displayed in the column on the web interface for\n a given instance.\n\n :param admin_model:\n Instance of a :class:`admin.ModelAdmin` object that is responsible\n for displaying the change list\n :param instance:\n Object instance that is the row in the admin change list\n :field_name:\n Name of the field/column to fetch", "id": "f9985:c1:m5"} {"signature": "def field_names(self, admin_model):", "body": "request = FakeRequest(user=self.admin_user)return admin_model.get_list_display(request)", "docstring": "Returns the names of the fields/columns used by the given admin\n model.\n\n :param admin_model:\n Instance of a :class:`admin.ModelAdmin` object that is responsible\n for displaying the change list\n :returns:\n List of field names", "id": "f9985:c1:m6"} {"signature": "def render_page(request, page_name, data={}):", "body": "return render(request, page_name, data)", "docstring": ".. deprecated:: 0.12\n Use ``django.shortcuts.render`` instead\n\nThis function was a wrapper for ``render_to_response`` that handled\nrequest context. The ``django.shortcuts.render`` method does the same\nthing, so this just wraps that now.", "id": "f9990:m0"} {"signature": "def render_page_to_string(request, page_name, data={}):", "body": "return render_to_string(page_name, data, request=request)", "docstring": "A shortcut for using ``render_to_string`` with a\n :class:`RequestContext` automatically.", "id": "f9990:m1"} {"signature": "def refetch(obj):", "body": "return obj.__class__.objects.get(id=obj.id)", "docstring": "Queries the database for the same object that is passed in, refetching\n its contents in case they are stale.\n\n :param obj:\n Object to refetch\n\n :returns:\n Refreshed version of the object", "id": "f9990:m2"} {"signature": "def refetch_for_update(obj):", "body": "return obj.__class__.objects.select_for_update().get(id=obj.id)", "docstring": "Queries the database for the same object that is passed in, refetching\n its contents and runs ``select_for_update()`` to lock the corresponding\n row until the next commit.\n\n :param obj:\n Object to refetch\n :returns:\n Refreshed version of the object", "id": "f9990:m3"} {"signature": "def get_field_names(obj, ignore_auto=True, ignore_relations=True, exclude=[]):", "body": "from django.db.models import (AutoField, ForeignKey, ManyToManyField, ManyToOneRel, OneToOneField, OneToOneRel)for field in obj._meta.get_fields():if ignore_auto and isinstance(field, AutoField):continueif ignore_relations and (isinstance(field, ForeignKey) orisinstance(field, ManyToManyField) orisinstance(field, ManyToOneRel) orisinstance(field, OneToOneRel) orisinstance(field, OneToOneField)):a = ; acontinueif field.name in exclude:continueyield field.name", "docstring": "Returns the field names of a Django model object.\n\n :param obj: the Django model class or object instance to get the fields\n from\n :param ignore_auto: ignore any fields of type AutoField. Defaults to True\n :param ignore_relations: ignore any fields that involve relations such as\n the ForeignKey or ManyToManyField\n :param exclude: exclude anything in this list from the results\n\n :returns: generator of found field names", "id": "f9990:m4"} {"signature": "def get_obj_attr(obj, attr):", "body": "fields = attr.split('')field_obj = getattr(obj, fields[])for field in fields[:]:field_obj = getattr(field_obj, field)return field_obj", "docstring": "Works like getattr() but supports django's double underscore object\n dereference notation.\n\n Example usage:\n\n .. code-block:: python\n\n >>> get_obj_attr(book, 'writer__age')\n 42\n >>> get_obj_attr(book, 'publisher__address')\n
\n\n :param obj: \n Object to start the derference from\n\n :param attr:\n String name of attribute to return\n\n :returns:\n Derferenced object \n\n :raises:\n AttributeError in the attribute in question does not exist", "id": "f9990:m5"} {"signature": "def as_list(self):", "body": "result = []for child in self.children:self._depth_traversal(child, result)return result", "docstring": "Returns a list of strings describing the full paths and patterns\n along with the name of the urls. Example:\n\n .. code-block::python\n >>> u = URLTree()\n >>> u.as_list()\n [\n 'admin/',\n 'admin/$, name=index',\n 'admin/login/$, name=login',\n ]", "id": "f9990:c0:m3"} {"signature": "def print_tree(self):", "body": "for line in self.as_list():print(line)", "docstring": "Convenience method for printing the results of\n :class:`URLTree.as_list` to STDOUT", "id": "f9990:c0:m4"} {"signature": "@classmethoddef increment(cls, name):", "body": "with transaction.atomic():counter = Counter.objects.select_for_update().get(name=name)counter.value += counter.save()return counter.value", "docstring": "Call this method to increment the named counter. This is atomic on\n the database.\n\n :param name:\n Name for a previously created ``Counter`` object", "id": "f9991:c0:m0"} {"signature": "@classmethoddef lock_until_commit(cls, name):", "body": "Lock.objects.select_for_update().get(name=name)", "docstring": "Grabs this lock and holds it (using ``select_for_update()``) until\n the next commit is done.\n\n :param name:\n Name for a previously created ``Lock`` object", "id": "f9991:c1:m0"} {"signature": "def count(self):", "body": "return sum(qs.count() for qs in self.querysets)", "docstring": "Performs a .count() for all subquerysets and returns the number of\nrecords as an integer.", "id": "f9991:c4:m1"} {"signature": "def _clone(self):", "body": "return self.__class__(*self.querysets)", "docstring": "Returns a clone of this queryset chain", "id": "f9991:c4:m2"} {"signature": "def _all(self):", "body": "return chain(*self.querysets)", "docstring": "Iterates records in all subquerysets", "id": "f9991:c4:m3"} {"signature": "def __getitem__(self, index):", "body": "if type(index) is slice:return list(islice(self._all(), index.start, index.stop, index.step or ))else:return next(islice(self._all(), index, index+))", "docstring": "Retrieves an item or slice from the chained set of results from all\nsubquerysets.", "id": "f9991:c4:m4"} {"signature": "def post_required(method_or_options=[]):", "body": "def decorator(method):expected_fields = []if not callable(method_or_options):expected_fields = method_or_options@wraps(method)def wrapper(*args, **kwargs):request = args[]if request.method != '':logger.error('')raise Http404('')missing = []for field in expected_fields:if field not in request.POST:missing.append(field)if missing:s = '' % missinglogger.error(s)raise Http404(s)return method(*args, **kwargs)return wrapperif callable(method_or_options):return decorator(method_or_options)return decorator", "docstring": "View decorator that enforces that the method was called using POST.\n This decorator can be called with or without parameters. As it is\n expected to wrap a view, the first argument of the method being wrapped is\n expected to be a ``request`` object.\n\n .. code-block:: python\n\n @post_required\n def some_view(request):\n pass\n\n\n @post_required(['firstname', 'lastname'])\n def some_view(request):\n pass\n\n The optional parameter contains a single list which specifies the names of\n the expected fields in the POST dictionary. The list is not exclusive,\n you can pass in fields that are not checked by the decorator.\n\n :param options:\n List of the names of expected POST keys.", "id": "f9992:m0"} {"signature": "def json_post_required(*decorator_args):", "body": "def decorator(method):@wraps(method)def wrapper(*args, **kwargs):field = decorator_args[]if len(decorator_args) == :request_name = decorator_args[]else:request_name = fieldrequest = args[]if request.method != '':logger.error('')raise Http404('')if field not in request.POST:s = '' % fieldlogger.error(s)raise Http404(s)setattr(request, request_name, json.loads(request.POST[field]))return method(*args, **kwargs)return wrapperreturn decorator", "docstring": "View decorator that enforces that the method was called using POST and\n contains a field containing a JSON dictionary. This method should\n only be used to wrap views and assumes the first argument of the method\n being wrapped is a ``request`` object.\n\n .. code-block:: python\n\n @json_post_required('data', 'json_data')\n def some_view(request):\n username = request.json_data['username']\n\n :param field:\n The name of the POST field that contains a JSON dictionary\n :param request_name:\n [optional] Name of the parameter on the request to put the\n deserialized JSON data. If not given the field name is used", "id": "f9992:m1"} {"signature": "@staff_member_requireddef move(request, content_type_id, obj_id, rank):", "body": "content_type = ContentType.objects.get_for_id(content_type_id)obj = get_object_or_404(content_type.model_class(), id=obj_id)obj.rank = int(rank)obj.save()return HttpResponseRedirect(request.META[''])", "docstring": "View to be used in the django admin for changing a :class:`RankedModel`\n object's rank. See :func:`admin_link_move_up` and\n :func:`admin_link_move_down` for helper functions to incoroprate in your\n admin models.\n\n Upon completion this view sends the caller back to the referring page.\n\n :param content_type_id:\n ``ContentType`` id of object being moved\n :param obj_id:\n ID of object being moved\n :param rank:\n New rank of the object", "id": "f9993:m0"} {"signature": "@transaction.atomicdef save(self, *args, **kwargs):", "body": "rerank = kwargs.pop('', True)if rerank:if not self.id:self._process_new_rank_obj()elif self.rank == self._rank_at_load:passelse:self._process_moved_rank_obj()super(RankedModel, self).save(*args, **kwargs)", "docstring": "Overridden method that handles that re-ranking of objects and the\n integrity of the ``rank`` field.\n\n :param rerank:\n Added parameter, if True will rerank other objects based on the\n change in this save. Defaults to True.", "id": "f9995:c0:m3"} {"signature": "def grouped_filter(self):", "body": "return self.__class__.objects.all()", "docstring": "This method should be overridden in order to allow groupings of\n ``RankModel`` objects. The default is there is a single group which\n are all instances of the inheriting class. \n\n An example with a grouped model would be::\n\n class Grouped(RankedModel):\n group_number = models.IntegerField()\n\n def grouped_filter(self):\n return Grouped.objects.filter(\n group_number=self.group_number)\n\n :returns:\n :class:`QuerySet` of ``RankedModel`` objects that are in the same\n group.", "id": "f9995:c0:m4"} {"signature": "def repack(self):", "body": "items = self.grouped_filter().order_by('').select_for_update()for count, item in enumerate(items):item.rank = count + item.save(rerank=False)", "docstring": "Removes any blank ranks in the order.", "id": "f9995:c0:m5"} {"signature": "def admin_link_move_up(obj, link_text=''):", "body": "if obj.rank == :return ''content_type = ContentType.objects.get_for_model(obj)link = reverse('', args=(content_type.id, obj.id, obj.rank - ))return '' % (link, link_text)", "docstring": "Returns a link to a view that moves the passed in object up in rank.\n\n :param obj:\n Object to move\n :param link_text:\n Text to display in the link. Defaults to \"up\"\n :returns:\n HTML link code to view for moving the object", "id": "f9996:m0"} {"signature": "def admin_link_move_down(obj, link_text=''):", "body": "if obj.rank == obj.grouped_filter().count():return ''content_type = ContentType.objects.get_for_model(obj)link = reverse('', args=(content_type.id, obj.id, obj.rank + ))return '' % (link, link_text)", "docstring": "Returns a link to a view that moves the passed in object down in rank.\n\n :param obj:\n Object to move\n :param link_text:\n Text to display in the link. Defaults to \"down\"\n :returns:\n HTML link code to view for moving the object", "id": "f9996:m1"} {"signature": "def admin_obj_link(obj, display=''):", "body": "url = reverse('' % (obj._meta.app_label,obj._meta.model_name))url += '' % obj.idtext = str(obj)if display:text = displayreturn format_html('', url, text)", "docstring": "Returns a link to the django admin change list with a filter set to\n only the object given.\n\n :param obj:\n Object to create the admin change list display link for\n :param display:\n Text to display in the link. Defaults to string call of the object\n :returns:\n Text containing HTML for a link", "id": "f9998:m0"} {"signature": "def admin_obj_attr(obj, attr):", "body": "try:field_obj = get_obj_attr(obj, attr)if not field_obj:return ''except AttributeError:return ''return field_obj", "docstring": "A safe version of :func:``utils.get_obj_attr`` that returns and empty\n string in the case of an exception or an empty object", "id": "f9998:m1"} {"signature": "def _obj_display(obj, display=''):", "body": "result = ''if not display:result = str(obj)else:template = Template(display)context = Context({'':obj})result = template.render(context)return result", "docstring": "Returns string representation of an object, either the default or based\n on the display template passed in.", "id": "f9998:m2"} {"signature": "def make_admin_obj_mixin(name):", "body": "@classmethoddef add_obj_link(cls, funcname, attr, title='', display=''):if not title:title = attr.capitalize()_display = displaydef _link(self, obj):field_obj = admin_obj_attr(obj, attr)if not field_obj:return ''text = _obj_display(field_obj, _display)return admin_obj_link(field_obj, text)_link.short_description = title_link.allow_tags = True_link.admin_order_field = attrsetattr(cls, funcname, _link)@classmethoddef add_obj_ref(cls, funcname, attr, title='', display=''):if not title:title = attr.capitalize()_display = displaydef _ref(self, obj):field_obj = admin_obj_attr(obj, attr)if not field_obj:return ''return _obj_display(field_obj, _display)_ref.short_description = title_ref.allow_tags = True_ref.admin_order_field = attrsetattr(cls, funcname, _ref)klass = type(name, (), {})klass.add_obj_link = add_obj_linkklass.add_obj_ref = add_obj_refreturn klass", "docstring": "This method dynamically creates a mixin to be used with your \n :class:`ModelAdmin` classes. The mixin provides utility methods that can\n be referenced in side of the admin object's ``list_display`` and other\n similar attributes.\n\n :param name:\n Each usage of the mixin must be given a unique name for the mixin class\n being created\n :returns:\n Dynamically created mixin class\n\n The created class supports the following methods:\n\n .. code-block:: python\n\n add_obj_ref(funcname, attr, [title, display])\n\n\n Django admin ``list_display`` does not support the double underscore\n semantics of object references. This method adds a function to the mixin\n that returns the ``str(obj)`` value from object relations.\n\n :param funcname:\n Name of the function to be added to the mixin. In the admin class\n object that includes the mixin, this name is used in the\n ``list_display`` tuple.\n :param attr:\n Name of the attribute to dereference from the corresponding object,\n i.e. what will be dereferenced. This name supports double underscore\n object link referencing for ``models.ForeignKey`` members.\n :param title:\n Title for the column of the django admin table. If not given it\n defaults to a capitalized version of ``attr``\n :param display:\n What to display as the text in the column. If not given it defaults\n to the string representation of the object for the row: ``str(obj)`` .\n This parameter supports django templating, the context for which\n contains a dictionary key named \"obj\" with the value being the object\n for the row.\n\n .. code-block:: python\n\n add_obj_link(funcname, attr, [title, display])\n\n\n This method adds a function to the mixin that returns a link to a django\n admin change list page for the member attribute of the object being\n displayed.\n\n :param funcname:\n Name of the function to be added to the mixin. In the admin class\n object that includes the mixin, this name is used in the\n ``list_display`` tuple.\n :param attr:\n Name of the attribute to dereference from the corresponding object,\n i.e. what will be lined to. This name supports double underscore\n object link referencing for ``models.ForeignKey`` members.\n :param title:\n Title for the column of the django admin table. If not given it\n defaults to a capitalized version of ``attr``\n :param display:\n What to display as the text for the link being shown. If not given it\n defaults to the string representation of the object for the row: \n ``str(obj)`` . This parameter supports django templating, the context\n for which contains a dictionary key named \"obj\" with the value being\n the object for the row.\n\n Example usage:\n\n .. code-block:: python\n\n # ---- models.py file ----\n class Author(models.Model):\n name = models.CharField(max_length=100)\n\n\n class Book(models.Model):\n title = models.CharField(max_length=100)\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n\n\n .. code-block:: python\n\n # ---- admin.py file ----\n @admin.register(Author)\n class Author(admin.ModelAdmin):\n list_display = ('name', )\n\n\n mixin = make_admin_obj_mixin('BookMixin')\n mixin.add_obj_link('show_author', 'Author', 'Our Authors',\n '{{obj.name}} (id={{obj.id}})')\n\n @admin.register(Book)\n class BookAdmin(admin.ModelAdmin, mixin):\n list_display = ('name', 'show_author')\n\n\n A sample django admin page for \"Book\" would have the table:\n\n +---------------------------------+------------------------+\n | Name | Our Authors |\n +=================================+========================+\n | Hitchhikers Guide To The Galaxy | *Douglas Adams (id=1)* |\n +---------------------------------+------------------------+\n | War and Peace | *Tolstoy (id=2)* |\n +---------------------------------+------------------------+\n | Dirk Gently | *Douglas Adams (id=1)* |\n +---------------------------------+------------------------+\n\n\n Each of the *items* in the \"Our Authors\" column would be a link to the\n django admin change list for the \"Author\" object with a filter set to show\n just the object that was clicked. For example, if you clicked \"Douglas\n Adams (id=1)\" you would be taken to the Author change list page filtered\n just for Douglas Adams books.\n\n The ``add_obj_ref`` method is similar to the above, but instead of showing\n links, it just shows text and so can be used for view-only attributes of\n dereferenced objects.", "id": "f9998:m3"} {"signature": "def fancy_modeladmin(*args):", "body": "global klass_countklass_count += name = '' % klass_countklass = type(name, (FancyModelAdmin,), {})klass.list_display = []if len(args) > :klass.add_displays(*args)return klass", "docstring": "Returns a new copy of a :class:`FancyModelAdmin` class (a class, not\n an instance!). This can then be inherited from when declaring a model\n admin class. The :class:`FancyModelAdmin` class has additional methods\n for managing the ``list_display`` attribute.\n\n :param ``*args``: [optional] any arguments given will be added to the\n ``list_display`` property using regular django ``list_display``\n functionality.\n\n This function is meant as a replacement for :func:`make_admin_obj_mixin`,\n it does everything the old one does with fewer bookkeeping needs for the\n user as well as adding functionality.\n\n Example usage:\n\n .. code-block:: python\n\n # ---- models.py file ----\n class Author(models.Model):\n name = models.CharField(max_length=100)\n\n\n class Book(models.Model):\n title = models.CharField(max_length=100)\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n\n\n .. code-block:: python\n\n # ---- admin.py file ----\n @admin.register(Author)\n class Author(admin.ModelAdmin):\n list_display = ('name', )\n\n\n base = fany_list_display_modeladmin()\n base.add_displays('id', 'name')\n base.add_obj_link('author', 'Our Authors',\n '{{obj.name}} (id={{obj.id}})')\n\n @admin.register(Book)\n class BookAdmin(base):\n list_display = ('name', 'show_author')\n\n\n A sample django admin page for \"Book\" would have the table:\n\n +----+---------------------------------+------------------------+\n | ID | Name | Our Authors |\n +====+=================================+========================+\n | 1 | Hitchhikers Guide To The Galaxy | *Douglas Adams (id=1)* |\n +----+---------------------------------+------------------------+\n | 2 | War and Peace | *Tolstoy (id=2)* |\n +----+---------------------------------+------------------------+\n | 3 | Dirk Gently | *Douglas Adams (id=1)* |\n +----+---------------------------------+------------------------+\n\n\n See :class:`FancyModelAdmin` for a full list of functionality\n provided by the returned base class.", "id": "f9998:m4"} {"signature": "@classmethoddef add_displays(cls, *args):", "body": "for arg in args:cls.list_display.append(arg)", "docstring": "Each arg is added to the ``list_display`` property without any\n extra wrappers, using only the regular django functionality", "id": "f9998:c0:m0"} {"signature": "@classmethoddef add_display(cls, attr, title=''):", "body": "global klass_countklass_count += fn_name = '' % klass_countcls.list_display.append(fn_name)if not title:title = attr.capitalize()def _ref(self, obj):_, _, value = lookup_field(attr, obj, cls)return value_ref.short_description = title_ref.allow_tags = True_ref.admin_order_field = attrsetattr(cls, fn_name, _ref)", "docstring": "Adds a ``list_display`` property without any extra wrappers,\n similar to :func:`add_displays`, but can also change the title.\n\n :param attr:\n Name of the attribute to add to the display\n\n :param title:\n Title for the column of the django admin table. If not given it\n defaults to a capitalized version of ``attr``", "id": "f9998:c0:m1"} {"signature": "@classmethoddef add_link(cls, attr, title='', display=''):", "body": "global klass_countklass_count += fn_name = '' % klass_countcls.list_display.append(fn_name)if not title:title = attr.capitalize()_display = displaydef _link(self, obj):field_obj = admin_obj_attr(obj, attr)if not field_obj:return ''text = _obj_display(field_obj, _display)return admin_obj_link(field_obj, text)_link.short_description = title_link.allow_tags = True_link.admin_order_field = attrsetattr(cls, fn_name, _link)", "docstring": "Adds a ``list_display`` attribute that appears as a link to the\n django admin change page for the type of object being shown. Supports\n double underscore attribute name dereferencing.\n\n :param attr:\n Name of the attribute to dereference from the corresponding\n object, i.e. what will be lined to. This name supports double\n underscore object link referencing for ``models.ForeignKey``\n members.\n\n :param title:\n Title for the column of the django admin table. If not given it\n defaults to a capitalized version of ``attr``\n\n :param display:\n What to display as the text for the link being shown. If not\n given it defaults to the string representation of the object for\n the row: ``str(obj)`` . This parameter supports django\n templating, the context for which contains a dictionary key named\n \"obj\" with the value being the object for the row.\n\n Example usage:\n\n .. code-block:: python\n\n # ---- admin.py file ----\n\n base = fancy_modeladmin('id')\n base.add_link('author', 'Our Authors',\n '{{obj.name}} (id={{obj.id}})')\n\n @admin.register(Book)\n class BookAdmin(base):\n pass\n\n The django admin change page for the Book class would have a column\n for \"id\" and another titled \"Our Authors\". The \"Our Authors\" column\n would have a link for each Author object referenced by \"book.author\".\n The link would go to the Author django admin change listing. The\n display of the link would be the name of the author with the id in\n brakcets, e.g. \"Douglas Adams (id=42)\"", "id": "f9998:c0:m2"} {"signature": "@classmethoddef add_object(cls, attr, title='', display=''):", "body": "global klass_countklass_count += fn_name = '' % klass_countcls.list_display.append(fn_name)if not title:title = attr.capitalize()_display = displaydef _ref(self, obj):field_obj = admin_obj_attr(obj, attr)if not field_obj:return ''return _obj_display(field_obj, _display)_ref.short_description = title_ref.allow_tags = True_ref.admin_order_field = attrsetattr(cls, fn_name, _ref)", "docstring": "Adds a ``list_display`` attribute showing an object. Supports\n double underscore attribute name dereferencing.\n\n :param attr:\n Name of the attribute to dereference from the corresponding\n object, i.e. what will be lined to. This name supports double\n underscore object link referencing for ``models.ForeignKey``\n members.\n\n :param title:\n Title for the column of the django admin table. If not given it\n defaults to a capitalized version of ``attr``\n\n :param display:\n What to display as the text for the link being shown. If not\n given it defaults to the string representation of the object for\n the row: ``str(obj)``. This parameter supports django templating,\n the context for which contains a dictionary key named \"obj\" with\n the value being the object for the row.", "id": "f9998:c0:m3"} {"signature": "@classmethoddef add_formatted_field(cls, field, format_string, title=''):", "body": "global klass_countklass_count += fn_name = '' % klass_countcls.list_display.append(fn_name)if not title:title = field.capitalize()_format_string = format_stringdef _ref(self, obj):return _format_string % getattr(obj, field)_ref.short_description = title_ref.allow_tags = True_ref.admin_order_field = fieldsetattr(cls, fn_name, _ref)", "docstring": "Adds a ``list_display`` attribute showing a field in the object\n using a python %formatted string.\n\n :param field:\n Name of the field in the object.\n\n :param format_string:\n A old-style (to remain python 2.x compatible) % string formatter\n with a single variable reference. The named ``field`` attribute\n will be passed to the formatter using the \"%\" operator. \n\n :param title:\n Title for the column of the django admin table. If not given it\n defaults to a capitalized version of ``field``", "id": "f9998:c0:m4"} {"signature": "def create_validator():", "body": "field_names = ('', '', '', '', '','')validator = CSVValidator(field_names)validator.add_header_check('', '')validator.add_record_length_check('', '')validator.add_value_check('', int, '', '')validator.add_value_check('', int, '', '')validator.add_value_check('', enumeration('', ''), '', '')validator.add_value_check('', number_range_inclusive(, , int), '', '')validator.add_value_check('', datetime_string(''),'', '')def check_age_variables(r):age_years = int(r[''])age_months = int(r[''])valid = (age_months >= age_years * and age_months % age_years < )if not valid:raise RecordError('', '')validator.add_record_check(check_age_variables)return validator", "docstring": "Create an example CSV validator for patient demographic data.", "id": "f10011:m0"} {"signature": "def main():", "body": "description = ''parser = argparse.ArgumentParser(description=description)parser.add_argument('', metavar='', help='')parser.add_argument('', '',dest='',type=int,action='',default=,help='')parser.add_argument('', '',dest='',action='',default=False,help='')parser.add_argument('', '',dest='',action='',default=False,help='')args = parser.parse_args()if not os.path.isfile(args.file):print('' % args.file)sys.exit()with open(args.file, '') as f:data = csv.reader(f, delimiter='')validator = create_validator()problems = validator.validate(data, summarize=args.summarize,report_unexpected_exceptions=args.report_unexpected_exceptions,context={'': args.file})write_problems(problems, sys.stdout, summarize=args.summarize, limit=args.limit)if problems: sys.exit()else:sys.exit()", "docstring": "Main function.", "id": "f10011:m1"} {"signature": "def enumeration(*args):", "body": "assert len(args) > , ''if len(args) == :members = args[]else:members = argsdef checker(value):if value not in members:raise ValueError(value)return checker", "docstring": "Return a value check function which raises a value error if the value is not\nin a pre-defined enumeration of values.\n\nIf you pass in a list, tuple or set as the single argument, it is assumed\nthat the list/tuple/set defines the membership of the enumeration.\n\nIf you pass in more than on argument, it is assumed the arguments themselves\ndefine the enumeration.", "id": "f10012:m0"} {"signature": "def match_pattern(regex):", "body": "prog = re.compile(regex)def checker(v):result = prog.match(v)if result is None:raise ValueError(v)return checker", "docstring": "Return a value check function which raises a ValueError if the value does\nnot match the supplied regular expression, see also `re.match`.", "id": "f10012:m1"} {"signature": "def search_pattern(regex):", "body": "prog = re.compile(regex)def checker(v):result = prog.search(v)if result is None:raise ValueError(v)return checker", "docstring": "Return a value check function which raises a ValueError if the supplied\nregular expression does not match anywhere in the value, see also\n`re.search`.", "id": "f10012:m2"} {"signature": "def number_range_inclusive(min, max, type=float):", "body": "def checker(v):if type(v) < min or type(v) > max:raise ValueError(v)return checker", "docstring": "Return a value check function which raises a ValueError if the supplied\nvalue when cast as `type` is less than `min` or greater than `max`.", "id": "f10012:m3"} {"signature": "def number_range_exclusive(min, max, type=float):", "body": "def checker(v):if type(v) <= min or type(v) >= max:raise ValueError(v)return checker", "docstring": "Return a value check function which raises a ValueError if the supplied\nvalue when cast as `type` is less than or equal to `min` or greater than\nor equal to `max`.", "id": "f10012:m4"} {"signature": "def datetime_string(format):", "body": "def checker(v):datetime.strptime(v, format)return checker", "docstring": "Return a value check function which raises a ValueError if the supplied\nvalue cannot be converted to a datetime using the supplied format string.\n\nSee also `datetime.strptime`.", "id": "f10012:m5"} {"signature": "def datetime_range_inclusive(min, max, format):", "body": "dmin = datetime.strptime(min, format)dmax = datetime.strptime(max, format)def checker(v):dv = datetime.strptime(v, format)if dv < dmin or dv > dmax:raise ValueError(v)return checker", "docstring": "Return a value check function which raises a ValueError if the supplied\nvalue when converted to a datetime using the supplied `format` string is\nless than `min` or greater than `max`.", "id": "f10012:m6"} {"signature": "def datetime_range_exclusive(min, max, format):", "body": "dmin = datetime.strptime(min, format)dmax = datetime.strptime(max, format)def checker(v):dv = datetime.strptime(v, format)if dv <= dmin or dv >= dmax:raise ValueError(v)return checker", "docstring": "Return a value check function which raises a ValueError if the supplied\nvalue when converted to a datetime using the supplied `format` string is\nless than or equal to `min` or greater than or equal to `max`.", "id": "f10012:m7"} {"signature": "def write_problems(problems, file, summarize=False, limit=):", "body": "w = file.write w(\"\"\"\"\"\"lems====total += code = p['']if code in counts:counts[code] += else:counts[code] = if not summarize:ptitle = '' % (p[''], p[''])w(ptitle)underline = ''for i in range(len(ptitle.strip())):underline += ''underline += ''w(underline)for k in sorted(p.viewkeys() - set(['', '', ''])):w('' % (k, p[k]))if '' in p:c = p['']for k in sorted(c.viewkeys()):w('' % (k, c[k]))w(", "docstring": "Write problems as restructured text to a file (or stdout/stderr).", "id": "f10012:m8"} {"signature": "def __init__(self, field_names):", "body": "self._field_names = tuple(field_names)self._value_checks = []self._header_checks = []self._record_length_checks = []self._value_predicates = []self._record_checks = []self._record_predicates = []self._unique_checks = []self._skips = []", "docstring": "Instantiate a `CSVValidator`, supplying expected `field_names` as a\nsequence of strings.", "id": "f10012:c1:m0"} {"signature": "def add_header_check(self,code=HEADER_CHECK_FAILED,message=MESSAGES[HEADER_CHECK_FAILED]):", "body": "t = code, messageself._header_checks.append(t)", "docstring": "Add a header check, i.e., check whether the header record is consistent\nwith the expected field names.\n\nArguments\n---------\n\n`code` - problem code to report if the header record is not valid,\ndefaults to `HEADER_CHECK_FAILED`\n\n`message` - problem message to report if a value is not valid", "id": "f10012:c1:m1"} {"signature": "def add_record_length_check(self,code=RECORD_LENGTH_CHECK_FAILED,message=MESSAGES[RECORD_LENGTH_CHECK_FAILED],modulus=):", "body": "t = code, message, modulusself._record_length_checks.append(t)", "docstring": "Add a record length check, i.e., check whether the length of a record is\nconsistent with the number of expected fields.\n\nArguments\n---------\n\n`code` - problem code to report if a record is not valid, defaults to\n`RECORD_LENGTH_CHECK_FAILED`\n\n`message` - problem message to report if a record is not valid\n\n`modulus` - apply the check to every nth record, defaults to 1 (check\nevery record)", "id": "f10012:c1:m2"} {"signature": "def add_value_check(self, field_name, value_check,code=VALUE_CHECK_FAILED,message=MESSAGES[VALUE_CHECK_FAILED],modulus=):", "body": "assert field_name in self._field_names, '' % field_nameassert callable(value_check), ''t = field_name, value_check, code, message, modulusself._value_checks.append(t)", "docstring": "Add a value check function for the specified field.\n\nArguments\n---------\n\n`field_name` - the name of the field to attach the value check function\nto\n\n`value_check` - a function that accepts a single argument (a value) and\nraises a `ValueError` if the value is not valid\n\n`code` - problem code to report if a value is not valid, defaults to\n`VALUE_CHECK_FAILED`\n\n`message` - problem message to report if a value is not valid\n\n`modulus` - apply the check to every nth record, defaults to 1 (check\nevery record)", "id": "f10012:c1:m3"} {"signature": "def add_value_predicate(self, field_name, value_predicate,code=VALUE_PREDICATE_FALSE,message=MESSAGES[VALUE_PREDICATE_FALSE],modulus=):", "body": "assert field_name in self._field_names, '' % field_nameassert callable(value_predicate), ''t = field_name, value_predicate, code, message, modulusself._value_predicates.append(t)", "docstring": "Add a value predicate function for the specified field.\n\nN.B., everything you can do with value predicates can also be done with\nvalue check functions, whether you use one or the other is a matter of\nstyle.\n\nArguments\n---------\n\n`field_name` - the name of the field to attach the value predicate\nfunction to\n\n`value_predicate` - a function that accepts a single argument (a value)\nand returns False if the value is not valid\n\n`code` - problem code to report if a value is not valid, defaults to\n`VALUE_PREDICATE_FALSE`\n\n`message` - problem message to report if a value is not valid\n\n`modulus` - apply the check to every nth record, defaults to 1 (check\nevery record)", "id": "f10012:c1:m4"} {"signature": "def add_record_check(self, record_check, modulus=):", "body": "assert callable(record_check), ''t = record_check, modulusself._record_checks.append(t)", "docstring": "Add a record check function.\n\nArguments\n---------\n\n`record_check` - a function that accepts a single argument (a record as\na dictionary of values indexed by field name) and raises a\n`RecordError` if the record is not valid\n\n`modulus` - apply the check to every nth record, defaults to 1 (check\nevery record)", "id": "f10012:c1:m5"} {"signature": "def add_record_predicate(self, record_predicate,code=RECORD_PREDICATE_FALSE,message=MESSAGES[RECORD_PREDICATE_FALSE],modulus=):", "body": "assert callable(record_predicate), ''t = record_predicate, code, message, modulusself._record_predicates.append(t)", "docstring": "Add a record predicate function.\n\nN.B., everything you can do with record predicates can also be done with\nrecord check functions, whether you use one or the other is a matter of\nstyle.\n\nArguments\n---------\n\n`record_predicate` - a function that accepts a single argument (a record\nas a dictionary of values indexed by field name) and returns False if\nthe value is not valid\n\n`code` - problem code to report if a record is not valid, defaults to\n`RECORD_PREDICATE_FALSE`\n\n`message` - problem message to report if a record is not valid\n\n`modulus` - apply the check to every nth record, defaults to 1 (check\nevery record)", "id": "f10012:c1:m6"} {"signature": "def add_unique_check(self, key,code=UNIQUE_CHECK_FAILED,message=MESSAGES[UNIQUE_CHECK_FAILED]):", "body": "if isinstance(key, basestring):assert key in self._field_names, '' % keyelse:for f in key:assert f in self._field_names, '' % keyt = key, code, messageself._unique_checks.append(t)", "docstring": "Add a unique check on a single column or combination of columns.\n\nArguments\n---------\n\n`key` - a single field name (string) specifying a field in which all\nvalues are expected to be unique, or a sequence of field names (tuple\nor list of strings) specifying a compound key\n\n`code` - problem code to report if a record is not valid, defaults to\n`UNIQUE_CHECK_FAILED`\n\n`message` - problem message to report if a record is not valid", "id": "f10012:c1:m7"} {"signature": "def add_skip(self, skip):", "body": "assert callable(skip), ''self._skips.append(skip)", "docstring": "Add a `skip` function which accepts a single argument (a record as a\nsequence of values) and returns True if all checks on the record should\nbe skipped.", "id": "f10012:c1:m8"} {"signature": "def validate(self, data,expect_header_row=True,ignore_lines=,summarize=False,limit=,context=None,report_unexpected_exceptions=True):", "body": "problems = list()problem_generator = self.ivalidate(data, expect_header_row,ignore_lines, summarize, context,report_unexpected_exceptions)for i, p in enumerate(problem_generator):if not limit or i < limit:problems.append(p)return problems", "docstring": "Validate `data` and return a list of validation problems found.\n\nArguments\n---------\n\n`data` - any source of row-oriented data, e.g., as provided by a\n`csv.reader`, or a list of lists of strings, or ...\n\n`expect_header_row` - does the data contain a header row (i.e., the\nfirst record is a list of field names)? Defaults to True.\n\n`ignore_lines` - ignore n lines (rows) at the beginning of the data\n\n`summarize` - only report problem codes, no other details\n\n`limit` - report at most n problems\n\n`context` - a dictionary of any additional information to be added to\nany problems found - useful if problems are being aggregated from\nmultiple validators\n\n`report_unexpected_exceptions` - value check function, value predicates,\nrecord check functions, record predicates, and other user-supplied\nvalidation functions may raise unexpected exceptions. If this argument\nis true, any unexpected exceptions will be reported as validation\nproblems; if False, unexpected exceptions will be handled silently.", "id": "f10012:c1:m9"} {"signature": "def ivalidate(self, data,expect_header_row=True,ignore_lines=,summarize=False,context=None,report_unexpected_exceptions=True):", "body": "unique_sets = self._init_unique_sets() for i, r in enumerate(data):if expect_header_row and i == ignore_lines:for p in self._apply_header_checks(i, r, summarize, context):yield pelif i >= ignore_lines:skip = Falsefor p in self._apply_skips(i, r, summarize,report_unexpected_exceptions,context):if p is True:skip = Trueelse:yield pif not skip:for p in self._apply_each_methods(i, r, summarize,report_unexpected_exceptions,context):yield p for p in self._apply_value_checks(i, r, summarize,report_unexpected_exceptions,context):yield pfor p in self._apply_record_length_checks(i, r, summarize,context):yield pfor p in self._apply_value_predicates(i, r, summarize,report_unexpected_exceptions,context):yield pfor p in self._apply_record_checks(i, r, summarize,report_unexpected_exceptions,context):yield pfor p in self._apply_record_predicates(i, r, summarize,report_unexpected_exceptions,context):yield pfor p in self._apply_unique_checks(i, r, unique_sets, summarize):yield pfor p in self._apply_check_methods(i, r, summarize,report_unexpected_exceptions,context):yield pfor p in self._apply_assert_methods(i, r, summarize,report_unexpected_exceptions,context):yield pfor p in self._apply_finally_assert_methods(summarize,report_unexpected_exceptions,context):yield p", "docstring": "Validate `data` and return a iterator over problems found.\n\nUse this function rather than validate() if you expect a large number\nof problems.\n\nArguments\n---------\n\n`data` - any source of row-oriented data, e.g., as provided by a\n`csv.reader`, or a list of lists of strings, or ...\n\n`expect_header_row` - does the data contain a header row (i.e., the\nfirst record is a list of field names)? Defaults to True.\n\n`ignore_lines` - ignore n lines (rows) at the beginning of the data\n\n`summarize` - only report problem codes, no other details\n\n`context` - a dictionary of any additional information to be added to\nany problems found - useful if problems are being aggregated from\nmultiple validators\n\n`report_unexpected_exceptions` - value check function, value predicates,\nrecord check functions, record predicates, and other user-supplied\nvalidation functions may raise unexpected exceptions. If this argument\nis true, any unexpected exceptions will be reported as validation\nproblems; if False, unexpected exceptions will be handled silently.", "id": "f10012:c1:m10"} {"signature": "def _init_unique_sets(self):", "body": "ks = dict()for t in self._unique_checks:key = t[]ks[key] = set() return ks", "docstring": "Initialise sets used for uniqueness checking.", "id": "f10012:c1:m11"} {"signature": "def _apply_value_checks(self, i, r,summarize=False,report_unexpected_exceptions=True,context=None):", "body": "for field_name, check, code, message, modulus in self._value_checks:if i % modulus == : fi = self._field_names.index(field_name)if fi < len(r): value = r[fi]try:check(value)except ValueError:p = {'': code}if not summarize:p[''] = messagep[''] = i + p[''] = fi + p[''] = field_namep[''] = valuep[''] = rif context is not None: p[''] = contextyield pexcept Exception as e:if report_unexpected_exceptions:p = {'': UNEXPECTED_EXCEPTION}if not summarize:p[''] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)p[''] = i + p[''] = fi + p[''] = field_namep[''] = valuep[''] = rp[''] = ep[''] = '' % (check.__name__,check.__doc__)if context is not None: p[''] = contextyield p", "docstring": "Apply value check functions on the given record `r`.", "id": "f10012:c1:m12"} {"signature": "def _apply_header_checks(self, i, r, summarize=False, context=None):", "body": "for code, message in self._header_checks:if tuple(r) != self._field_names:p = {'': code}if not summarize:p[''] = messagep[''] = i + p[''] = tuple(r)p[''] = set(self._field_names) - set(r)p[''] = set(r) - set(self._field_names)if context is not None: p[''] = contextyield p", "docstring": "Apply header checks on the given record `r`.", "id": "f10012:c1:m13"} {"signature": "def _apply_record_length_checks(self, i, r, summarize=False, context=None):", "body": "for code, message, modulus in self._record_length_checks:if i % modulus == : if len(r) != len(self._field_names):p = {'': code}if not summarize:p[''] = messagep[''] = i + p[''] = rp[''] = len(r)if context is not None: p[''] = contextyield p", "docstring": "Apply record length checks on the given record `r`.", "id": "f10012:c1:m14"} {"signature": "def _apply_value_predicates(self, i, r,summarize=False,report_unexpected_exceptions=True,context=None):", "body": "for field_name, predicate, code, message, modulus in self._value_predicates:if i % modulus == : fi = self._field_names.index(field_name)if fi < len(r): value = r[fi]try:valid = predicate(value)if not valid:p = {'': code}if not summarize:p[''] = messagep[''] = i + p[''] = fi + p[''] = field_namep[''] = valuep[''] = rif context is not None: p[''] = contextyield pexcept Exception as e:if report_unexpected_exceptions:p = {'': UNEXPECTED_EXCEPTION}if not summarize:p[''] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)p[''] = i + p[''] = fi + p[''] = field_namep[''] = valuep[''] = rp[''] = ep[''] = '' % (predicate.__name__,predicate.__doc__)if context is not None: p[''] = contextyield p", "docstring": "Apply value predicates on the given record `r`.", "id": "f10012:c1:m15"} {"signature": "def _apply_record_checks(self, i, r,summarize=False,report_unexpected_exceptions=True,context=None):", "body": "for check, modulus in self._record_checks:if i % modulus == : rdict = self._as_dict(r)try:check(rdict)except RecordError as e:code = e.code if e.code is not None else RECORD_CHECK_FAILEDp = {'': code}if not summarize:message = e.message if e.message is not None else MESSAGES[RECORD_CHECK_FAILED]p[''] = messagep[''] = i + p[''] = rif context is not None: p[''] = contextif e.details is not None: p[''] = e.detailsyield pexcept Exception as e:if report_unexpected_exceptions:p = {'': UNEXPECTED_EXCEPTION}if not summarize:p[''] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)p[''] = i + p[''] = rp[''] = ep[''] = '' % (check.__name__,check.__doc__)if context is not None: p[''] = contextyield p", "docstring": "Apply record checks on `r`.", "id": "f10012:c1:m16"} {"signature": "def _apply_record_predicates(self, i, r,summarize=False,report_unexpected_exceptions=True,context=None):", "body": "for predicate, code, message, modulus in self._record_predicates:if i % modulus == : rdict = self._as_dict(r)try:valid = predicate(rdict)if not valid:p = {'': code}if not summarize:p[''] = messagep[''] = i + p[''] = rif context is not None: p[''] = contextyield pexcept Exception as e:if report_unexpected_exceptions:p = {'': UNEXPECTED_EXCEPTION}if not summarize:p[''] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)p[''] = i + p[''] = rp[''] = ep[''] = '' % (predicate.__name__,predicate.__doc__)if context is not None: p[''] = contextyield p", "docstring": "Apply record predicates on `r`.", "id": "f10012:c1:m17"} {"signature": "def _apply_unique_checks(self, i, r, unique_sets,summarize=False,context=None):", "body": "for key, code, message in self._unique_checks:value = Nonevalues = unique_sets[key]if isinstance(key, basestring): fi = self._field_names.index(key)if fi >= len(r):continuevalue = r[fi]else: value = []for f in key:fi = self._field_names.index(f)if fi >= len(r):breakvalue.append(r[fi])value = tuple(value) if value in values:p = {'': code}if not summarize:p[''] = messagep[''] = i + p[''] = rp[''] = keyp[''] = valueif context is not None: p[''] = contextyield pvalues.add(value)", "docstring": "Apply unique checks on `r`.", "id": "f10012:c1:m18"} {"signature": "def _apply_each_methods(self, i, r,summarize=False,report_unexpected_exceptions=True,context=None):", "body": "for a in dir(self):if a.startswith(''):rdict = self._as_dict(r)f = getattr(self, a)try:f(rdict)except Exception as e:if report_unexpected_exceptions:p = {'': UNEXPECTED_EXCEPTION}if not summarize:p[''] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)p[''] = i + p[''] = rp[''] = ep[''] = '' % (f.__name__,f.__doc__)if context is not None: p[''] = contextyield p", "docstring": "Invoke 'each' methods on `r`.", "id": "f10012:c1:m19"} {"signature": "def _apply_assert_methods(self, i, r,summarize=False,report_unexpected_exceptions=True,context=None):", "body": "for a in dir(self):if a.startswith(''):rdict = self._as_dict(r)f = getattr(self, a)try:f(rdict)except AssertionError as e:code = ASSERT_CHECK_FAILEDmessage = MESSAGES[ASSERT_CHECK_FAILED]if len(e.args) > :custom = e.args[]if isinstance(custom, (list, tuple)):if len(custom) > :code = custom[]if len(custom) > :message = custom[]else:code = customp = {'': code}if not summarize:p[''] = messagep[''] = i + p[''] = rif context is not None: p[''] = contextyield pexcept Exception as e:if report_unexpected_exceptions:p = {'': UNEXPECTED_EXCEPTION}if not summarize:p[''] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)p[''] = i + p[''] = rp[''] = ep[''] = '' % (f.__name__,f.__doc__)if context is not None: p[''] = contextyield p", "docstring": "Apply 'assert' methods on `r`.", "id": "f10012:c1:m20"} {"signature": "def _apply_check_methods(self, i, r,summarize=False,report_unexpected_exceptions=True,context=None):", "body": "for a in dir(self):if a.startswith(''):rdict = self._as_dict(r)f = getattr(self, a)try:f(rdict)except RecordError as e:code = e.code if e.code is not None else RECORD_CHECK_FAILEDp = {'': code}if not summarize:message = e.message if e.message is not None else MESSAGES[RECORD_CHECK_FAILED]p[''] = messagep[''] = i + p[''] = rif context is not None: p[''] = contextif e.details is not None: p[''] = e.detailsyield pexcept Exception as e:if report_unexpected_exceptions:p = {'': UNEXPECTED_EXCEPTION}if not summarize:p[''] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)p[''] = i + p[''] = rp[''] = ep[''] = '' % (f.__name__,f.__doc__)if context is not None: p[''] = contextyield p", "docstring": "Apply 'check' methods on `r`.", "id": "f10012:c1:m21"} {"signature": "def _apply_finally_assert_methods(self,summarize=False,report_unexpected_exceptions=True,context=None):", "body": "for a in dir(self):if a.startswith(''):f = getattr(self, a)try:f()except AssertionError as e:code = ASSERT_CHECK_FAILEDmessage = MESSAGES[ASSERT_CHECK_FAILED]if len(e.args) > :custom = e.args[]if isinstance(custom, (list, tuple)):if len(custom) > :code = custom[]if len(custom) > :message = custom[]else:code = customp = {'': code}if not summarize:p[''] = messageif context is not None: p[''] = contextyield pexcept Exception as e:if report_unexpected_exceptions:p = {'': UNEXPECTED_EXCEPTION}if not summarize:p[''] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)p[''] = ep[''] = '' % (f.__name__,f.__doc__)if context is not None: p[''] = contextyield p", "docstring": "Apply 'finally_assert' methods.", "id": "f10012:c1:m22"} {"signature": "def _apply_skips(self, i, r,summarize=False,report_unexpected_exceptions=True,context=None):", "body": "for skip in self._skips:try:result = skip(r)if result is True:yield Trueexcept Exception as e:if report_unexpected_exceptions:p = {'': UNEXPECTED_EXCEPTION}if not summarize:p[''] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)p[''] = i + p[''] = rp[''] = ep[''] = '' % (skip.__name__,skip.__doc__)if context is not None: p[''] = contextyield p", "docstring": "Apply skip functions on `r`.", "id": "f10012:c1:m23"} {"signature": "def _as_dict(self, r):", "body": "d = dict()for i, f in enumerate(self._field_names):d[f] = r[i] if i < len(r) else Nonereturn d", "docstring": "Convert the record to a dictionary using field names as keys.", "id": "f10012:c1:m24"} {"signature": "def _decode_field(message, field, value):", "body": "if field.type == FieldDescriptor.TYPE_MESSAGE:decode(getattr(message, field.name), value)else:try:if field.type == FieldDescriptor.TYPE_BYTES:value = base64.b64decode(value)setattr(message, field.name, value)except (ValueError, TypeError) as e:logger.warning('',message.__class__.__name__, field.name, e)", "docstring": "Decode optional or required field.", "id": "f10015:m0"} {"signature": "def _decode_repeated_field(message, field, value_list):", "body": "if field.type == FieldDescriptor.TYPE_MESSAGE:for value in value_list:decode(getattr(message, field.name).add(), value)else:try:for value in value_list:if field.type == FieldDescriptor.TYPE_BYTES:value = base64.b64decode(value)getattr(message, field.name).append(value)except (ValueError, TypeError) as e:logger.warning('',message.__class__.__name__, field.name, e)message.ClearField(field.name)", "docstring": "Decode repeated field.", "id": "f10015:m1"} {"signature": "def decode(message, pblite, ignore_first_item=False):", "body": "if not isinstance(pblite, list):logger.warning('',type(pblite))returnif ignore_first_item:pblite = pblite[:]if pblite and isinstance(pblite[-], dict):extra_fields = {int(field_number): value for field_number, valuein pblite[-].items()}pblite = pblite[:-]else:extra_fields = {}fields_values = itertools.chain(enumerate(pblite, start=),extra_fields.items())for field_number, value in fields_values:if value is None:continuetry:field = message.DESCRIPTOR.fields_by_number[field_number]except KeyError:if value not in [[], '', ]:logger.debug('''', message.__class__.__name__, field_number,value)continueif field.label == FieldDescriptor.LABEL_REPEATED:_decode_repeated_field(message, field, value)else:_decode_field(message, field, value)", "docstring": "Decode pblite to Protocol Buffer message.\n\n This method is permissive of decoding errors and will log them as warnings\n and continue decoding where possible.\n\n The first element of the outer pblite list must often be ignored using the\n ignore_first_item parameter because it contains an abbreviation of the name\n of the protobuf message (eg. cscmrp for ClientSendChatMessageResponseP)\n that's not part of the protobuf.\n\n Args:\n message: protocol buffer message instance to decode into.\n pblite: list representing a pblite-serialized message.\n ignore_first_item: If True, ignore the item at index 0 in the pblite\n list, making the item at index 1 correspond to field 1 in the\n message.", "id": "f10015:m2"} {"signature": "def markdown(tag):", "body": "return (MARKDOWN_START.format(tag=tag), MARKDOWN_END.format(tag=tag))", "docstring": "Return start and end regex pattern sequences for simple Markdown tag.", "id": "f10017:m0"} {"signature": "def html(tag):", "body": "return (HTML_START.format(tag=tag), HTML_END.format(tag=tag))", "docstring": "Return sequence of start and end regex patterns for simple HTML tag", "id": "f10017:m1"} {"signature": "def url_complete(url):", "body": "return url if url_proto_regex.search(url) else '' + url", "docstring": "If URL doesn't start with protocol, prepend it with http://", "id": "f10017:m2"} {"signature": "def preprocess(self, text):", "body": "return text.replace('', '')", "docstring": "Preprocess text before parsing", "id": "f10017:c1:m1"} {"signature": "def postprocess(self, text):", "body": "return markdown_unescape_regex.sub(r'', text)", "docstring": "Postprocess text after parsing", "id": "f10017:c1:m2"} {"signature": "def _best_effort_decode(data_bytes):", "body": "decoder = Utf8IncrementalDecoder()return decoder.decode(data_bytes)", "docstring": "Decode as much of data_bytes as possible as UTF-8.", "id": "f10018:m0"} {"signature": "def _parse_sid_response(res):", "body": "res = json.loads(list(ChunkParser().get_chunks(res))[])sid = res[][][]gsessionid = res[][][]['']return (sid, gsessionid)", "docstring": "Parse response format for request for new channel SID.\n\n Example format (after parsing JS):\n [ [0,[\"c\",\"SID_HERE\",\"\",8]],\n [1,[{\"gsid\":\"GSESSIONID_HERE\"}]]]\n\n Returns (SID, gsessionid) tuple.", "id": "f10018:m1"} {"signature": "def get_chunks(self, new_data_bytes):", "body": "self._buf += new_data_byteswhile True:buf_decoded = _best_effort_decode(self._buf)buf_utf16 = buf_decoded.encode('')[:]length_str_match = LEN_REGEX.match(buf_decoded)if length_str_match is None:breakelse:length_str = length_str_match.group()length = int(length_str) * length_length = len((length_str + '').encode('')[:])if len(buf_utf16) - length_length < length:breaksubmission = buf_utf16[length_length:length_length + length]yield submission.decode('')drop_length = (len((length_str + '').encode()) +len(submission.decode('').encode()))self._buf = self._buf[drop_length:]", "docstring": "Yield chunks generated from received data.\n\n The buffer may not be decodable as UTF-8 if there's a split multi-byte\n character at the end. To handle this, do a \"best effort\" decode of the\n buffer to decode as much of it as possible.\n\n The length is actually the length of the string as reported by\n JavaScript. JavaScript's string length function returns the number of\n code units in the string, represented in UTF-16. We can emulate this by\n encoding everything in UTF-16 and multiplying the reported length by 2.\n\n Note that when encoding a string in UTF-16, Python will prepend a\n byte-order character, so we need to remove the first two bytes.", "id": "f10018:c1:m1"} {"signature": "def __init__(self, session, max_retries, retry_backoff_base):", "body": "self.on_connect = event.Event('')self.on_reconnect = event.Event('')self.on_disconnect = event.Event('')self.on_receive_array = event.Event('')self._max_retries = max_retriesself._retry_backoff_base = retry_backoff_baseself._is_connected = Falseself._on_connect_called = Falseself._chunk_parser = Noneself._session = sessionself._sid_param = Noneself._gsessionid_param = None", "docstring": "Create a new channel.\n\n Args:\n session (http_utils.Session): Request session.\n max_retries (int): Number of retries for long-polling request.\n retry_backoff_base (int): The base term for the long-polling\n exponential backoff.", "id": "f10018:c2:m0"} {"signature": "@propertydef is_connected(self):", "body": "return self._is_connected", "docstring": "Whether the channel is currently connected.", "id": "f10018:c2:m1"} {"signature": "async def listen(self):", "body": "retries = need_new_sid = True while retries <= self._max_retries:if retries > :backoff_seconds = self._retry_backoff_base ** retrieslogger.info('', backoff_seconds)await asyncio.sleep(backoff_seconds)if need_new_sid:await self._fetch_channel_sid()need_new_sid = Falseself._chunk_parser = ChunkParser()try:await self._longpoll_request()except ChannelSessionError as err:logger.warning('', err)need_new_sid = Trueexcept exceptions.NetworkError as err:logger.warning('', err)else:retries = continueretries += logger.info('', retries)if self._is_connected:self._is_connected = Falseawait self.on_disconnect.fire()logger.error('')", "docstring": "Listen for messages on the backwards channel.\n\n This method only returns when the connection has been closed due to an\n error.", "id": "f10018:c2:m2"} {"signature": "async def send_maps(self, map_list):", "body": "params = {'': , '': , '': '', }if self._gsessionid_param is not None:params[''] = self._gsessionid_paramif self._sid_param is not None:params[''] = self._sid_paramdata_dict = dict(count=len(map_list), ofs=)for map_num, map_ in enumerate(map_list):for map_key, map_val in map_.items():data_dict[''.format(map_num, map_key)] = map_valres = await self._session.fetch('', CHANNEL_URL, params=params, data=data_dict)return res", "docstring": "Sends a request to the server containing maps (dicts).", "id": "f10018:c2:m3"} {"signature": "async def _fetch_channel_sid(self):", "body": "logger.info('')self._sid_param = Noneself._gsessionid_param = Noneres = await self.send_maps([])self._sid_param, self._gsessionid_param = _parse_sid_response(res.body)logger.info(''.format(self._sid_param))logger.info(''.format(self._gsessionid_param))", "docstring": "Creates a new channel for receiving push data.\n\n Sending an empty forward channel request will create a new channel on\n the server.\n\n There's a separate API to get the gsessionid alone that Hangouts for\n Chrome uses, but if we don't send a gsessionid with this request, it\n will return a gsessionid as well as the SID.\n\n Raises hangups.NetworkError if the channel can not be created.", "id": "f10018:c2:m4"} {"signature": "async def _longpoll_request(self):", "body": "params = {'': , '': self._gsessionid_param,'': '', '': , '': self._sid_param, '': , '': '', '': '', }logger.info('')try:async with self._session.fetch_raw('', CHANNEL_URL,params=params) as res:if res.status != :if res.status == and res.reason == '':raise ChannelSessionError('')raise exceptions.NetworkError(''.format(res.status, res.reason))while True:async with async_timeout.timeout(PUSH_TIMEOUT):chunk = await res.content.read(MAX_READ_BYTES)if not chunk:breakawait self._on_push_data(chunk)except asyncio.TimeoutError:raise exceptions.NetworkError('')except aiohttp.ServerDisconnectedError as err:raise exceptions.NetworkError('' % err)except aiohttp.ClientPayloadError:raise ChannelSessionError('')except aiohttp.ClientError as err:raise exceptions.NetworkError('' % err)", "docstring": "Open a long-polling request and receive arrays.\n\n This method uses keep-alive to make re-opening the request faster, but\n the remote server will set the \"Connection: close\" header once an hour.\n\n Raises hangups.NetworkError or ChannelSessionError.", "id": "f10018:c2:m5"} {"signature": "async def _on_push_data(self, data_bytes):", "body": "logger.debug(''.format(data_bytes))for chunk in self._chunk_parser.get_chunks(data_bytes):if not self._is_connected:if self._on_connect_called:self._is_connected = Trueawait self.on_reconnect.fire()else:self._on_connect_called = Trueself._is_connected = Trueawait self.on_connect.fire()container_array = json.loads(chunk)for inner_array in container_array:array_id, data_array = inner_arraylogger.debug('',array_id, data_array)await self.on_receive_array.fire(data_array)", "docstring": "Parse push data and trigger events.", "id": "f10018:c2:m6"} {"signature": "async def connect(self):", "body": "proxy = os.environ.get('')self._session = http_utils.Session(self._cookies, proxy=proxy)try:self._channel = channel.Channel(self._session, self._max_retries, self._retry_backoff_base)self._channel.on_connect.add_observer(self.on_connect.fire)self._channel.on_reconnect.add_observer(self.on_reconnect.fire)self._channel.on_disconnect.add_observer(self.on_disconnect.fire)self._channel.on_receive_array.add_observer(self._on_receive_array)self._listen_future = asyncio.ensure_future(self._channel.listen())try:await self._listen_futureexcept asyncio.CancelledError:self._listen_future.cancel()logger.info('')finally:await self._session.close()", "docstring": "Establish a connection to the chat server.\n\n Returns when an error has occurred, or :func:`disconnect` has been\n called.", "id": "f10019:c0:m1"} {"signature": "async def disconnect(self):", "body": "logger.info('')self._listen_future.cancel()", "docstring": "Gracefully disconnect from the server.\n\n When disconnection is complete, :func:`connect` will return.", "id": "f10019:c0:m2"} {"signature": "def get_request_header(self):", "body": "if self._client_id is not None:self._request_header.client_identifier.resource = self._client_idreturn self._request_header", "docstring": "Return ``request_header`` for use when constructing requests.\n\n Returns:\n Populated request header.", "id": "f10019:c0:m3"} {"signature": "@staticmethoddef get_client_generated_id():", "body": "return random.randint(, **)", "docstring": "Return ``client_generated_id`` for use when constructing requests.\n\n Returns:\n Client generated ID.", "id": "f10019:c0:m4"} {"signature": "async def set_active(self):", "body": "is_active = (self._active_client_state ==hangouts_pb2.ACTIVE_CLIENT_STATE_IS_ACTIVE)timed_out = (time.time() - self._last_active_secs >SETACTIVECLIENT_LIMIT_SECS)if not is_active or timed_out:self._active_client_state = (hangouts_pb2.ACTIVE_CLIENT_STATE_IS_ACTIVE)self._last_active_secs = time.time()if self._email is None:try:get_self_info_request = hangouts_pb2.GetSelfInfoRequest(request_header=self.get_request_header(),)get_self_info_response = await self.get_self_info(get_self_info_request)except exceptions.NetworkError as e:logger.warning(''.format(e))returnself._email = (get_self_info_response.self_entity.properties.email[])if self._client_id is None:logger.info('')returntry:set_active_request = hangouts_pb2.SetActiveClientRequest(request_header=self.get_request_header(),is_active=True,full_jid=\"\".format(self._email, self._client_id),timeout_secs=ACTIVE_TIMEOUT_SECS,)await self.set_active_client(set_active_request)except exceptions.NetworkError as e:logger.warning(''.format(e))else:logger.info(''.format(ACTIVE_TIMEOUT_SECS))", "docstring": "Set this client as active.\n\n While a client is active, no other clients will raise notifications.\n Call this method whenever there is an indication the user is\n interacting with this client. This method may be called very\n frequently, and it will only make a request when necessary.", "id": "f10019:c0:m5"} {"signature": "async def upload_image(self, image_file, filename=None, *,return_uploaded_image=False):", "body": "image_filename = filename or os.path.basename(image_file.name)image_data = image_file.read()res = await self._base_request(IMAGE_UPLOAD_URL,'', '',json.dumps({\"\": \"\",\"\": {\"\": [{\"\": {\"\": \"\",\"\": image_filename,\"\": {},\"\": len(image_data)}}]}}))try:upload_url = self._get_upload_session_status(res)[''][]['']['']except KeyError:raise exceptions.NetworkError('')res = await self._base_request(upload_url, '', '', image_data)try:raw_info = (self._get_upload_session_status(res)[''][''][''][''])image_id = raw_info['']url = raw_info['']except KeyError:raise exceptions.NetworkError('')result = UploadedImage(image_id=image_id, url=url)return result if return_uploaded_image else result.image_id", "docstring": "Upload an image that can be later attached to a chat message.\n\n Args:\n image_file: A file-like object containing an image.\n filename (str): (optional) Custom name for the uploaded file.\n return_uploaded_image (bool): (optional) If True, return\n :class:`.UploadedImage` instead of image ID. Defaults to False.\n\n Raises:\n hangups.NetworkError: If the upload request failed.\n\n Returns:\n :class:`.UploadedImage` instance, or ID of the uploaded image.", "id": "f10019:c0:m6"} {"signature": "@staticmethoddef _get_upload_session_status(res):", "body": "response = json.loads(res.body.decode())if '' not in response:try:info = (response[''][''][''][''][''])reason = ''.format(info[''], info[''])except KeyError:reason = ''raise exceptions.NetworkError(''.format(reason))return response['']", "docstring": "Parse the image upload response to obtain status.\n\n Args:\n res: http_utils.FetchResponse instance, the upload response\n\n Returns:\n dict, sessionStatus of the response\n\n Raises:\n hangups.NetworkError: If the upload request failed.", "id": "f10019:c0:m7"} {"signature": "async def _on_receive_array(self, array):", "body": "if array[] == '':pass else:wrapper = json.loads(array[][''])if '' in wrapper:self._client_id = wrapper['']['']logger.info('', self._client_id)await self._add_channel_services()if '' in wrapper:pblite_message = json.loads(wrapper[''][''])if pblite_message[] == '':batch_update = hangouts_pb2.BatchUpdate()pblite.decode(batch_update, pblite_message,ignore_first_item=True)for state_update in batch_update.state_update:logger.debug('', state_update)header = state_update.state_update_headerself._active_client_state = header.active_client_stateawait self.on_state_update.fire(state_update)else:logger.info('', pblite_message[])", "docstring": "Parse channel array and call the appropriate events.", "id": "f10019:c0:m8"} {"signature": "async def _add_channel_services(self):", "body": "logger.info('')services = [\"\", \"\"]map_list = [dict(p=json.dumps({\"\": {\"\": {\"\": service}}}))for service in services]await self._channel.send_maps(map_list)logger.info('')", "docstring": "Add services to the channel.\n\n The services we add to the channel determine what kind of data we will\n receive on it.\n\n The \"babel\" service includes what we need for Hangouts. If this fails\n for some reason, hangups will never receive any events. The\n \"babel_presence_last_seen\" service is also required to receive presence\n notifications.\n\n This needs to be re-called whenever we open a new channel (when there's\n a new SID and client_id.", "id": "f10019:c0:m9"} {"signature": "async def _pb_request(self, endpoint, request_pb, response_pb):", "body": "logger.debug('', endpoint,request_pb)res = await self._base_request(''.format(endpoint),'', '', request_pb.SerializeToString())try:response_pb.ParseFromString(base64.b64decode(res.body))except binascii.Error as e:raise exceptions.NetworkError(''.format(e))except google.protobuf.message.DecodeError as e:raise exceptions.NetworkError(''.format(e))logger.debug('', response_pb)status = response_pb.response_header.statusif status != hangouts_pb2.RESPONSE_STATUS_OK:description = response_pb.response_header.error_descriptionraise exceptions.NetworkError(''.format(status, description))", "docstring": "Send a Protocol Buffer formatted chat API request.\n\n Args:\n endpoint (str): The chat API endpoint to use.\n request_pb: The request body as a Protocol Buffer message.\n response_pb: The response body as a Protocol Buffer message.\n\n Raises:\n NetworkError: If the request fails.", "id": "f10019:c0:m10"} {"signature": "async def _base_request(self, url, content_type, response_type, data):", "body": "headers = {'': content_type,'': '',}params = {'': response_type,'': API_KEY,}res = await self._session.fetch('', url, headers=headers, params=params, data=data,)return res", "docstring": "Send a generic authenticated POST request.\n\n Args:\n url (str): URL of request.\n content_type (str): Request content type.\n response_type (str): The desired response format. Valid options\n are: 'json' (JSON), 'protojson' (pblite), and 'proto' (binary\n Protocol Buffer). 'proto' requires manually setting an extra\n header 'X-Goog-Encode-Response-If-Executable: base64'.\n data (str): Request body data.\n\n Returns:\n FetchResponse: Response containing HTTP code, cookies, and body.\n\n Raises:\n NetworkError: If the request fails.", "id": "f10019:c0:m11"} {"signature": "async def add_user(self, add_user_request):", "body": "response = hangouts_pb2.AddUserResponse()await self._pb_request('',add_user_request, response)return response", "docstring": "Invite users to join an existing group conversation.", "id": "f10019:c0:m12"} {"signature": "async def create_conversation(self, create_conversation_request):", "body": "response = hangouts_pb2.CreateConversationResponse()await self._pb_request('',create_conversation_request, response)return response", "docstring": "Create a new conversation.", "id": "f10019:c0:m13"} {"signature": "async def delete_conversation(self, delete_conversation_request):", "body": "response = hangouts_pb2.DeleteConversationResponse()await self._pb_request('',delete_conversation_request, response)return response", "docstring": "Leave a one-to-one conversation.\n\n One-to-one conversations are \"sticky\"; they can't actually be deleted.\n This API clears the event history of the specified conversation up to\n ``delete_upper_bound_timestamp``, hiding it if no events remain.", "id": "f10019:c0:m14"} {"signature": "async def easter_egg(self, easter_egg_request):", "body": "response = hangouts_pb2.EasterEggResponse()await self._pb_request('',easter_egg_request, response)return response", "docstring": "Send an easter egg event to a conversation.", "id": "f10019:c0:m15"} {"signature": "async def get_conversation(self, get_conversation_request):", "body": "response = hangouts_pb2.GetConversationResponse()await self._pb_request('',get_conversation_request, response)return response", "docstring": "Return conversation info and recent events.", "id": "f10019:c0:m16"} {"signature": "async def get_entity_by_id(self, get_entity_by_id_request):", "body": "response = hangouts_pb2.GetEntityByIdResponse()await self._pb_request('',get_entity_by_id_request, response)return response", "docstring": "Return one or more user entities.\n\n Searching by phone number only finds entities when their phone number\n is in your contacts (and not always even then), and can't be used to\n find Google Voice contacts.", "id": "f10019:c0:m17"} {"signature": "async def get_group_conversation_url(self,get_group_conversation_url_request):", "body": "response = hangouts_pb2.GetGroupConversationUrlResponse()await self._pb_request('',get_group_conversation_url_request,response)return response", "docstring": "Get URL to allow others to join a group conversation.", "id": "f10019:c0:m18"} {"signature": "async def get_self_info(self, get_self_info_request):", "body": "response = hangouts_pb2.GetSelfInfoResponse()await self._pb_request('',get_self_info_request, response)return response", "docstring": "Return info about the current user.", "id": "f10019:c0:m19"} {"signature": "async def get_suggested_entities(self, get_suggested_entities_request):", "body": "response = hangouts_pb2.GetSuggestedEntitiesResponse()await self._pb_request('',get_suggested_entities_request, response)return response", "docstring": "Return suggested contacts.", "id": "f10019:c0:m20"} {"signature": "async def query_presence(self, query_presence_request):", "body": "response = hangouts_pb2.QueryPresenceResponse()await self._pb_request('',query_presence_request, response)return response", "docstring": "Return presence status for a list of users.", "id": "f10019:c0:m21"} {"signature": "async def remove_user(self, remove_user_request):", "body": "response = hangouts_pb2.RemoveUserResponse()await self._pb_request('',remove_user_request, response)return response", "docstring": "Remove a participant from a group conversation.", "id": "f10019:c0:m22"} {"signature": "async def rename_conversation(self, rename_conversation_request):", "body": "response = hangouts_pb2.RenameConversationResponse()await self._pb_request('',rename_conversation_request, response)return response", "docstring": "Rename a conversation.\n\n Both group and one-to-one conversations may be renamed, but the\n official Hangouts clients have mixed support for one-to-one\n conversations with custom names.", "id": "f10019:c0:m23"} {"signature": "async def search_entities(self, search_entities_request):", "body": "response = hangouts_pb2.SearchEntitiesResponse()await self._pb_request('',search_entities_request, response)return response", "docstring": "Return user entities based on a query.", "id": "f10019:c0:m24"} {"signature": "async def send_chat_message(self, send_chat_message_request):", "body": "response = hangouts_pb2.SendChatMessageResponse()await self._pb_request('',send_chat_message_request, response)return response", "docstring": "Send a chat message to a conversation.", "id": "f10019:c0:m25"} {"signature": "async def modify_otr_status(self, modify_otr_status_request):", "body": "response = hangouts_pb2.ModifyOTRStatusResponse()await self._pb_request('',modify_otr_status_request, response)return response", "docstring": "Enable or disable message history in a conversation.", "id": "f10019:c0:m26"} {"signature": "async def send_offnetwork_invitation(self, send_offnetwork_invitation_request):", "body": "response = hangouts_pb2.SendOffnetworkInvitationResponse()await self._pb_request('',send_offnetwork_invitation_request,response)return response", "docstring": "Send an email to invite a non-Google contact to Hangouts.", "id": "f10019:c0:m27"} {"signature": "async def set_active_client(self, set_active_client_request):", "body": "response = hangouts_pb2.SetActiveClientResponse()await self._pb_request('',set_active_client_request, response)return response", "docstring": "Set the active client.", "id": "f10019:c0:m28"} {"signature": "async def set_conversation_notification_level(self, set_conversation_notification_level_request):", "body": "response = hangouts_pb2.SetConversationNotificationLevelResponse()await self._pb_request('',set_conversation_notification_level_request, response)return response", "docstring": "Set the notification level of a conversation.", "id": "f10019:c0:m29"} {"signature": "async def set_focus(self, set_focus_request):", "body": "response = hangouts_pb2.SetFocusResponse()await self._pb_request('',set_focus_request, response)return response", "docstring": "Set focus to a conversation.", "id": "f10019:c0:m30"} {"signature": "async def set_group_link_sharing_enabled(self, set_group_link_sharing_enabled_request):", "body": "response = hangouts_pb2.SetGroupLinkSharingEnabledResponse()await self._pb_request('',set_group_link_sharing_enabled_request,response)return response", "docstring": "Set whether group link sharing is enabled for a conversation.", "id": "f10019:c0:m31"} {"signature": "async def set_presence(self, set_presence_request):", "body": "response = hangouts_pb2.SetPresenceResponse()await self._pb_request('',set_presence_request, response)return response", "docstring": "Set the presence status.", "id": "f10019:c0:m32"} {"signature": "async def set_typing(self, set_typing_request):", "body": "response = hangouts_pb2.SetTypingResponse()await self._pb_request('',set_typing_request, response)return response", "docstring": "Set the typing status of a conversation.", "id": "f10019:c0:m33"} {"signature": "async def sync_all_new_events(self, sync_all_new_events_request):", "body": "response = hangouts_pb2.SyncAllNewEventsResponse()await self._pb_request('',sync_all_new_events_request, response)return response", "docstring": "List all events occurring at or after a timestamp.", "id": "f10019:c0:m34"} {"signature": "async def sync_recent_conversations(self, sync_recent_conversations_request):", "body": "response = hangouts_pb2.SyncRecentConversationsResponse()await self._pb_request('',sync_recent_conversations_request,response)return response", "docstring": "Return info on recent conversations and their events.", "id": "f10019:c0:m35"} {"signature": "async def update_watermark(self, update_watermark_request):", "body": "response = hangouts_pb2.UpdateWatermarkResponse()await self._pb_request('',update_watermark_request, response)return response", "docstring": "Update the watermark (read timestamp) of a conversation.", "id": "f10019:c0:m36"} {"signature": "async def build_user_conversation_list(client):", "body": "conv_states, sync_timestamp = await _sync_all_conversations(client)required_user_ids = set()for conv_state in conv_states:required_user_ids |= {user.UserID(chat_id=part.id.chat_id, gaia_id=part.id.gaia_id)for part in conv_state.conversation.participant_data}required_entities = []if required_user_ids:logger.debug(''.format(required_user_ids))try:response = await client.get_entity_by_id(hangouts_pb2.GetEntityByIdRequest(request_header=client.get_request_header(),batch_lookup_spec=[hangouts_pb2.EntityLookupSpec(gaia_id=user_id.gaia_id,create_offnetwork_gaia=True,)for user_id in required_user_ids],))for entity_result in response.entity_result:required_entities.extend(entity_result.entity)except exceptions.NetworkError as e:logger.warning(''.format(e))conv_part_list = []for conv_state in conv_states:conv_part_list.extend(conv_state.conversation.participant_data)get_self_info_response = await client.get_self_info(hangouts_pb2.GetSelfInfoRequest(request_header=client.get_request_header(),))self_entity = get_self_info_response.self_entityuser_list = user.UserList(client, self_entity, required_entities,conv_part_list)conversation_list = ConversationList(client, conv_states,user_list, sync_timestamp)return (user_list, conversation_list)", "docstring": "Build :class:`.UserList` and :class:`.ConversationList`.\n\n This method requests data necessary to build the list of conversations and\n users. Users that are not in the contact list but are participating in a\n conversation will also be retrieved.\n\n Args:\n client (Client): Connected client.\n\n Returns:\n (:class:`.UserList`, :class:`.ConversationList`):\n Tuple of built objects.", "id": "f10020:m0"} {"signature": "async def _sync_all_conversations(client):", "body": "conv_states = []sync_timestamp = Nonerequest = hangouts_pb2.SyncRecentConversationsRequest(request_header=client.get_request_header(),max_conversations=CONVERSATIONS_PER_REQUEST,max_events_per_conversation=,sync_filter=[hangouts_pb2.SYNC_FILTER_INBOX,hangouts_pb2.SYNC_FILTER_ARCHIVED,])for _ in range(MAX_CONVERSATION_PAGES):logger.info('', request.last_event_timestamp)response = await client.sync_recent_conversations(request)conv_states = list(response.conversation_state) + conv_statessync_timestamp = parsers.from_timestamp(response.response_header.current_server_time)if response.continuation_end_timestamp == :logger.info('')breakelse:request.last_event_timestamp = response.continuation_end_timestampelse:logger.warning('')logger.info('', len(conv_states))return conv_states, sync_timestamp", "docstring": "Sync all conversations by making paginated requests.\n\n Conversations are ordered by ascending sort timestamp.\n\n Args:\n client (Client): Connected client.\n\n Raises:\n NetworkError: If the requests fail.\n\n Returns:\n tuple of list of ``ConversationState`` messages and sync timestamp", "id": "f10020:m1"} {"signature": "@propertydef id_(self):", "body": "return self._conversation.conversation_id.id", "docstring": "The conversation's ID (:class:`str`).", "id": "f10020:c0:m1"} {"signature": "@propertydef users(self):", "body": "return [self._user_list.get_user(user.UserID(chat_id=part.id.chat_id,gaia_id=part.id.gaia_id))for part in self._conversation.participant_data]", "docstring": "List of conversation participants (:class:`~hangups.user.User`).", "id": "f10020:c0:m2"} {"signature": "@propertydef name(self):", "body": "custom_name = self._conversation.namereturn None if custom_name == '' else custom_name", "docstring": "The conversation's custom name (:class:`str`)\n\n May be ``None`` if conversation has no custom name.", "id": "f10020:c0:m3"} {"signature": "@propertydef last_modified(self):", "body": "timestamp = self._conversation.self_conversation_state.sort_timestampif timestamp is None:timestamp = return parsers.from_timestamp(timestamp)", "docstring": "When conversation was last modified (:class:`datetime.datetime`).", "id": "f10020:c0:m4"} {"signature": "@propertydef events(self):", "body": "return list(self._events)", "docstring": "Loaded events sorted oldest to newest.\n\n (list of :class:`.ConversationEvent`).", "id": "f10020:c0:m6"} {"signature": "@propertydef watermarks(self):", "body": "return self._watermarks.copy()", "docstring": "Participant watermarks.\n\n (dict of :class:`.UserID`, :class:`datetime.datetime`).", "id": "f10020:c0:m7"} {"signature": "@propertydef unread_events(self):", "body": "return [conv_event for conv_event in self._eventsif conv_event.timestamp > self.latest_read_timestamp]", "docstring": "Loaded events which are unread sorted oldest to newest.\n\n Some Hangouts clients don't update the read timestamp for certain event\n types, such as membership changes, so this may return more unread\n events than these clients will show. There's also a delay between\n sending a message and the user's own message being considered read.\n\n (list of :class:`.ConversationEvent`).", "id": "f10020:c0:m8"} {"signature": "@propertydef is_archived(self):", "body": "return (hangouts_pb2.CONVERSATION_VIEW_ARCHIVED inself._conversation.self_conversation_state.view)", "docstring": "``True`` if this conversation has been archived.", "id": "f10020:c0:m9"} {"signature": "@propertydef is_quiet(self):", "body": "level = self._conversation.self_conversation_state.notification_levelreturn level == hangouts_pb2.NOTIFICATION_LEVEL_QUIET", "docstring": "``True`` if notification level for this conversation is quiet.", "id": "f10020:c0:m10"} {"signature": "@propertydef is_off_the_record(self):", "body": "status = self._conversation.otr_statusreturn status == hangouts_pb2.OFF_THE_RECORD_STATUS_OFF_THE_RECORD", "docstring": "``True`` if conversation is off the record (history is disabled).", "id": "f10020:c0:m11"} {"signature": "def _on_watermark_notification(self, notif):", "body": "if self.get_user(notif.user_id).is_self:logger.info(''.format(self.id_, notif.read_timestamp))self_conversation_state = (self._conversation.self_conversation_state)self_conversation_state.self_read_state.latest_read_timestamp = (parsers.to_timestamp(notif.read_timestamp))previous_timestamp = self._watermarks.get(notif.user_id,datetime.datetime.min.replace(tzinfo=datetime.timezone.utc))if notif.read_timestamp > previous_timestamp:logger.info(('' +'').format(self.id_,notif.user_id.chat_id,notif.read_timestamp))self._watermarks[notif.user_id] = notif.read_timestamp", "docstring": "Handle a watermark notification.", "id": "f10020:c0:m12"} {"signature": "def update_conversation(self, conversation):", "body": "new_state = conversation.self_conversation_stateold_state = self._conversation.self_conversation_stateself._conversation = conversationif not new_state.delivery_medium_option:new_state.delivery_medium_option.extend(old_state.delivery_medium_option)old_timestamp = old_state.self_read_state.latest_read_timestampnew_timestamp = new_state.self_read_state.latest_read_timestampif new_timestamp == :new_state.self_read_state.latest_read_timestamp = old_timestampfor new_entry in conversation.read_state:tstamp = parsers.from_timestamp(new_entry.latest_read_timestamp)if tstamp == :continueuid = parsers.from_participantid(new_entry.participant_id)if uid not in self._watermarks or self._watermarks[uid] < tstamp:self._watermarks[uid] = tstamp", "docstring": "Update the internal state of the conversation.\n\n This method is used by :class:`.ConversationList` to maintain this\n instance.\n\n Args:\n conversation: ``Conversation`` message.", "id": "f10020:c0:m13"} {"signature": "@staticmethoddef _wrap_event(event_):", "body": "cls = conversation_event.ConversationEventif event_.HasField(''):cls = conversation_event.ChatMessageEventelif event_.HasField(''):cls = conversation_event.OTREventelif event_.HasField(''):cls = conversation_event.RenameEventelif event_.HasField(''):cls = conversation_event.MembershipChangeEventelif event_.HasField(''):cls = conversation_event.HangoutEventelif event_.HasField(''):cls = conversation_event.GroupLinkSharingModificationEventreturn cls(event_)", "docstring": "Wrap hangouts_pb2.Event in ConversationEvent subclass.", "id": "f10020:c0:m14"} {"signature": "def add_event(self, event_):", "body": "conv_event = self._wrap_event(event_)if conv_event.id_ not in self._events_dict:self._events.append(conv_event)self._events_dict[conv_event.id_] = conv_eventelse:logger.info('',self.id_, conv_event.id_)return Nonereturn conv_event", "docstring": "Add an event to the conversation.\n\n This method is used by :class:`.ConversationList` to maintain this\n instance.\n\n Args:\n event_: ``Event`` message.\n\n Returns:\n :class:`.ConversationEvent` representing the event.", "id": "f10020:c0:m15"} {"signature": "def get_user(self, user_id):", "body": "return self._user_list.get_user(user_id)", "docstring": "Get user by its ID.\n\n Args:\n user_id (~hangups.user.UserID): ID of user to return.\n\n Raises:\n KeyError: If the user ID is not found.\n\n Returns:\n :class:`~hangups.user.User` with matching ID.", "id": "f10020:c0:m16"} {"signature": "def _get_default_delivery_medium(self):", "body": "medium_options = (self._conversation.self_conversation_state.delivery_medium_option)try:default_medium = medium_options[].delivery_mediumexcept IndexError:logger.warning('', self.id_)default_medium = hangouts_pb2.DeliveryMedium(medium_type=hangouts_pb2.DELIVERY_MEDIUM_BABEL)for medium_option in medium_options:if medium_option.current_default:default_medium = medium_option.delivery_mediumreturn default_medium", "docstring": "Return default DeliveryMedium to use for sending messages.\n\n Use the first option, or an option that's marked as the current\n default.", "id": "f10020:c0:m17"} {"signature": "def _get_event_request_header(self):", "body": "otr_status = (hangouts_pb2.OFF_THE_RECORD_STATUS_OFF_THE_RECORDif self.is_off_the_record elsehangouts_pb2.OFF_THE_RECORD_STATUS_ON_THE_RECORD)return hangouts_pb2.EventRequestHeader(conversation_id=hangouts_pb2.ConversationId(id=self.id_),client_generated_id=self._client.get_client_generated_id(),expected_otr=otr_status,delivery_medium=self._get_default_delivery_medium(),)", "docstring": "Return EventRequestHeader for conversation.", "id": "f10020:c0:m18"} {"signature": "async def send_message(self, segments, image_file=None, image_id=None,image_user_id=None):", "body": "async with self._send_message_lock:if image_file:try:uploaded_image = await self._client.upload_image(image_file, return_uploaded_image=True)except exceptions.NetworkError as e:logger.warning(''.format(e))raiseimage_id = uploaded_image.image_idtry:request = hangouts_pb2.SendChatMessageRequest(request_header=self._client.get_request_header(),event_request_header=self._get_event_request_header(),message_content=hangouts_pb2.MessageContent(segment=[seg.serialize() for seg in segments],),)if image_id is not None:request.existing_media.photo.photo_id = image_idif image_user_id is not None:request.existing_media.photo.user_id = image_user_idrequest.existing_media.photo.is_custom_user_id = Trueawait self._client.send_chat_message(request)except exceptions.NetworkError as e:logger.warning(''.format(e))raise", "docstring": "Send a message to this conversation.\n\n A per-conversation lock is acquired to ensure that messages are sent in\n the correct order when this method is called multiple times\n asynchronously.\n\n Args:\n segments: List of :class:`.ChatMessageSegment` objects to include\n in the message.\n image_file: (optional) File-like object containing an image to be\n attached to the message.\n image_id: (optional) ID of an Picasa photo to be attached to the\n message. If you specify both ``image_file`` and ``image_id``\n together, ``image_file`` takes precedence and ``image_id`` will\n be ignored.\n image_user_id: (optional) Picasa user ID, required only if\n ``image_id`` refers to an image from a different Picasa user,\n such as Google's sticker user.\n\n Raises:\n .NetworkError: If the message cannot be sent.", "id": "f10020:c0:m19"} {"signature": "async def leave(self):", "body": "is_group_conversation = (self._conversation.type ==hangouts_pb2.CONVERSATION_TYPE_GROUP)try:if is_group_conversation:await self._client.remove_user(hangouts_pb2.RemoveUserRequest(request_header=self._client.get_request_header(),event_request_header=self._get_event_request_header(),))else:await self._client.delete_conversation(hangouts_pb2.DeleteConversationRequest(request_header=self._client.get_request_header(),conversation_id=hangouts_pb2.ConversationId(id=self.id_),delete_upper_bound_timestamp=parsers.to_timestamp(datetime.datetime.now(tz=datetime.timezone.utc))))except exceptions.NetworkError as e:logger.warning(''.format(e))raise", "docstring": "Leave this conversation.\n\n Raises:\n .NetworkError: If conversation cannot be left.", "id": "f10020:c0:m20"} {"signature": "async def rename(self, name):", "body": "await self._client.rename_conversation(hangouts_pb2.RenameConversationRequest(request_header=self._client.get_request_header(),new_name=name,event_request_header=self._get_event_request_header(),))", "docstring": "Rename this conversation.\n\n Hangouts only officially supports renaming group conversations, so\n custom names for one-to-one conversations may or may not appear in all\n first party clients.\n\n Args:\n name (str): New name.\n\n Raises:\n .NetworkError: If conversation cannot be renamed.", "id": "f10020:c0:m21"} {"signature": "async def set_notification_level(self, level):", "body": "await self._client.set_conversation_notification_level(hangouts_pb2.SetConversationNotificationLevelRequest(request_header=self._client.get_request_header(),conversation_id=hangouts_pb2.ConversationId(id=self.id_),level=level,))", "docstring": "Set the notification level of this conversation.\n\n Args:\n level: ``NOTIFICATION_LEVEL_QUIET`` to disable notifications, or\n ``NOTIFICATION_LEVEL_RING`` to enable them.\n\n Raises:\n .NetworkError: If the request fails.", "id": "f10020:c0:m22"} {"signature": "async def set_typing(self, typing=hangouts_pb2.TYPING_TYPE_STARTED):", "body": "try:await self._client.set_typing(hangouts_pb2.SetTypingRequest(request_header=self._client.get_request_header(),conversation_id=hangouts_pb2.ConversationId(id=self.id_),type=typing,))except exceptions.NetworkError as e:logger.warning(''.format(e))raise", "docstring": "Set your typing status in this conversation.\n\n Args:\n typing: (optional) ``TYPING_TYPE_STARTED``, ``TYPING_TYPE_PAUSED``,\n or ``TYPING_TYPE_STOPPED`` to start, pause, or stop typing,\n respectively. Defaults to ``TYPING_TYPE_STARTED``.\n\n Raises:\n .NetworkError: If typing status cannot be set.", "id": "f10020:c0:m23"} {"signature": "async def update_read_timestamp(self, read_timestamp=None):", "body": "if read_timestamp is None:read_timestamp = (self.events[-].timestamp if self.events elsedatetime.datetime.now(datetime.timezone.utc))if read_timestamp > self.latest_read_timestamp:logger.info(''.format(self.id_, self.latest_read_timestamp, read_timestamp))state = self._conversation.self_conversation_statestate.self_read_state.latest_read_timestamp = (parsers.to_timestamp(read_timestamp))try:await self._client.update_watermark(hangouts_pb2.UpdateWatermarkRequest(request_header=self._client.get_request_header(),conversation_id=hangouts_pb2.ConversationId(id=self.id_),last_read_timestamp=parsers.to_timestamp(read_timestamp),))except exceptions.NetworkError as e:logger.warning(''.format(e))raise", "docstring": "Update the timestamp of the latest event which has been read.\n\n This method will avoid making an API request if it will have no effect.\n\n Args:\n read_timestamp (datetime.datetime): (optional) Timestamp to set.\n Defaults to the timestamp of the newest event.\n\n Raises:\n .NetworkError: If the timestamp cannot be updated.", "id": "f10020:c0:m24"} {"signature": "async def get_events(self, event_id=None, max_events=):", "body": "if event_id is None:conv_events = self._events[- * max_events:]else:conv_event = self.get_event(event_id)if self._events[].id_ != event_id:conv_events = self._events[self._events.index(conv_event) + :]else:logger.info(''.format(self.id_, conv_event.timestamp))res = await self._client.get_conversation(hangouts_pb2.GetConversationRequest(request_header=self._client.get_request_header(),conversation_spec=hangouts_pb2.ConversationSpec(conversation_id=hangouts_pb2.ConversationId(id=self.id_)),include_event=True,max_events_per_conversation=max_events,event_continuation_token=self._event_cont_token))if res.conversation_state.HasField(''):self.update_conversation(res.conversation_state.conversation)self._event_cont_token = (res.conversation_state.event_continuation_token)conv_events = [self._wrap_event(event) for eventin res.conversation_state.event]logger.info(''.format(len(conv_events), self.id_))for conv_event in reversed(conv_events):if conv_event.id_ not in self._events_dict:self._events.insert(, conv_event)self._events_dict[conv_event.id_] = conv_eventelse:logger.info('',self.id_, conv_event.id_)return conv_events", "docstring": "Get events from this conversation.\n\n Makes a request to load historical events if necessary.\n\n Args:\n event_id (str): (optional) If provided, return events preceding\n this event, otherwise return the newest events.\n max_events (int): Maximum number of events to return. Defaults to\n 50.\n\n Returns:\n List of :class:`.ConversationEvent` instances, ordered\n newest-first.\n\n Raises:\n KeyError: If ``event_id`` does not correspond to a known event.\n .NetworkError: If the events could not be requested.", "id": "f10020:c0:m25"} {"signature": "def next_event(self, event_id, prev=False):", "body": "i = self.events.index(self._events_dict[event_id])if prev and i > :return self.events[i - ]elif not prev and i + < len(self.events):return self.events[i + ]else:return None", "docstring": "Get the event following another event in this conversation.\n\n Args:\n event_id (str): ID of the event.\n prev (bool): If ``True``, return the previous event rather than the\n next event. Defaults to ``False``.\n\n Raises:\n KeyError: If no such :class:`.ConversationEvent` is known.\n\n Returns:\n :class:`.ConversationEvent` or ``None`` if there is no following\n event.", "id": "f10020:c0:m26"} {"signature": "def get_event(self, event_id):", "body": "return self._events_dict[event_id]", "docstring": "Get an event in this conversation by its ID.\n\n Args:\n event_id (str): ID of the event.\n\n Raises:\n KeyError: If no such :class:`.ConversationEvent` is known.\n\n Returns:\n :class:`.ConversationEvent` with the given ID.", "id": "f10020:c0:m27"} {"signature": "def get_all(self, include_archived=False):", "body": "return [conv for conv in self._conv_dict.values()if not conv.is_archived or include_archived]", "docstring": "Get all the conversations.\n\n Args:\n include_archived (bool): (optional) Whether to include archived\n conversations. Defaults to ``False``.\n\n Returns:\n List of all :class:`.Conversation` objects.", "id": "f10020:c1:m1"} {"signature": "def get(self, conv_id):", "body": "return self._conv_dict[conv_id]", "docstring": "Get a conversation by its ID.\n\n Args:\n conv_id (str): ID of conversation to return.\n\n Raises:\n KeyError: If the conversation ID is not found.\n\n Returns:\n :class:`.Conversation` with matching ID.", "id": "f10020:c1:m2"} {"signature": "async def leave_conversation(self, conv_id):", "body": "logger.info(''.format(conv_id))await self._conv_dict[conv_id].leave()del self._conv_dict[conv_id]", "docstring": "Leave a conversation.\n\n Args:\n conv_id (str): ID of conversation to leave.", "id": "f10020:c1:m3"} {"signature": "def _add_conversation(self, conversation, events=[],event_cont_token=None):", "body": "conv_id = conversation.conversation_id.idlogger.debug(''.format(conv_id))conv = Conversation(self._client, self._user_list, conversation,events, event_cont_token)self._conv_dict[conv_id] = convreturn conv", "docstring": "Add new conversation from hangouts_pb2.Conversation", "id": "f10020:c1:m4"} {"signature": "async def _on_state_update(self, state_update):", "body": "notification_type = state_update.WhichOneof('')if state_update.HasField(''):try:await self._handle_conversation_delta(state_update.conversation)except exceptions.NetworkError:logger.warning('',notification_type.replace('', ''),state_update.conversation.conversation_id.id)returnif notification_type == '':await self._handle_set_typing_notification(state_update.typing_notification)elif notification_type == '':await self._handle_watermark_notification(state_update.watermark_notification)elif notification_type == '':await self._on_event(state_update.event_notification.event)", "docstring": "Receive a StateUpdate and fan out to Conversations.\n\n Args:\n state_update: hangouts_pb2.StateUpdate instance", "id": "f10020:c1:m5"} {"signature": "async def _get_or_fetch_conversation(self, conv_id):", "body": "conv = self._conv_dict.get(conv_id, None)if conv is None:logger.info('', conv_id)res = await self._client.get_conversation(hangouts_pb2.GetConversationRequest(request_header=self._client.get_request_header(),conversation_spec=hangouts_pb2.ConversationSpec(conversation_id=hangouts_pb2.ConversationId(id=conv_id)), include_event=False))conv_state = res.conversation_stateevent_cont_token = Noneif conv_state.HasField(''):event_cont_token = conv_state.event_continuation_tokenreturn self._add_conversation(conv_state.conversation,event_cont_token=event_cont_token)else:return conv", "docstring": "Get a cached conversation or fetch a missing conversation.\n\n Args:\n conv_id: string, conversation identifier\n\n Raises:\n NetworkError: If the request to fetch the conversation fails.\n\n Returns:\n :class:`.Conversation` with matching ID.", "id": "f10020:c1:m6"} {"signature": "async def _on_event(self, event_):", "body": "conv_id = event_.conversation_id.idtry:conv = await self._get_or_fetch_conversation(conv_id)except exceptions.NetworkError:logger.warning('',conv_id)else:self._sync_timestamp = parsers.from_timestamp(event_.timestamp)conv_event = conv.add_event(event_)if conv_event is not None:await self.on_event.fire(conv_event)await conv.on_event.fire(conv_event)", "docstring": "Receive a hangouts_pb2.Event and fan out to Conversations.\n\n Args:\n event_: hangouts_pb2.Event instance", "id": "f10020:c1:m7"} {"signature": "async def _handle_conversation_delta(self, conversation):", "body": "conv_id = conversation.conversation_id.idconv = self._conv_dict.get(conv_id, None)if conv is None:await self._get_or_fetch_conversation(conv_id)else:conv.update_conversation(conversation)", "docstring": "Receive Conversation delta and create or update the conversation.\n\n Args:\n conversation: hangouts_pb2.Conversation instance\n\n Raises:\n NetworkError: A request to fetch the complete conversation failed.", "id": "f10020:c1:m8"} {"signature": "async def _handle_set_typing_notification(self, set_typing_notification):", "body": "conv_id = set_typing_notification.conversation_id.idres = parsers.parse_typing_status_message(set_typing_notification)await self.on_typing.fire(res)try:conv = await self._get_or_fetch_conversation(conv_id)except exceptions.NetworkError:logger.warning('',conv_id)else:await conv.on_typing.fire(res)", "docstring": "Receive SetTypingNotification and update the conversation.\n\n Args:\n set_typing_notification: hangouts_pb2.SetTypingNotification\n instance", "id": "f10020:c1:m9"} {"signature": "async def _handle_watermark_notification(self, watermark_notification):", "body": "conv_id = watermark_notification.conversation_id.idres = parsers.parse_watermark_notification(watermark_notification)await self.on_watermark_notification.fire(res)try:conv = await self._get_or_fetch_conversation(conv_id)except exceptions.NetworkError:logger.warning('',conv_id)else:await conv.on_watermark_notification.fire(res)", "docstring": "Receive WatermarkNotification and update the conversation.\n\n Args:\n watermark_notification: hangouts_pb2.WatermarkNotification instance", "id": "f10020:c1:m10"} {"signature": "async def _sync(self):", "body": "logger.info(''.format(self._sync_timestamp))try:res = await self._client.sync_all_new_events(hangouts_pb2.SyncAllNewEventsRequest(request_header=self._client.get_request_header(),last_sync_timestamp=parsers.to_timestamp(self._sync_timestamp),max_response_size_bytes=, ))except exceptions.NetworkError as e:logger.warning(''.format(e))else:for conv_state in res.conversation_state:conv_id = conv_state.conversation_id.idconv = self._conv_dict.get(conv_id, None)if conv is not None:conv.update_conversation(conv_state.conversation)for event_ in conv_state.event:timestamp = parsers.from_timestamp(event_.timestamp)if timestamp > self._sync_timestamp:await self._on_event(event_)else:self._add_conversation(conv_state.conversation,conv_state.event,conv_state.event_continuation_token)", "docstring": "Sync conversation state and events that could have been missed.", "id": "f10020:c1:m11"} {"signature": "def get_auth(credentials_prompt, refresh_token_cache, manual_login=False):", "body": "with requests.Session() as session:session.headers = {'': USER_AGENT}try:logger.info('')refresh_token = refresh_token_cache.get()if refresh_token is None:raise GoogleAuthError(\"\")access_token = _auth_with_refresh_token(session, refresh_token)except GoogleAuthError as e:logger.info('', e)logger.info('')if manual_login:authorization_code = (credentials_prompt.get_authorization_code())else:authorization_code = _get_authorization_code(session, credentials_prompt)access_token, refresh_token = _auth_with_code(session, authorization_code)refresh_token_cache.set(refresh_token)logger.info('')return _get_session_cookies(session, access_token)", "docstring": "Authenticate with Google.\n\n Args:\n refresh_token_cache (RefreshTokenCache): Cache to use so subsequent\n logins may not require credentials.\n credentials_prompt (CredentialsPrompt): Prompt to use if credentials\n are required to log in.\n manual_login (bool): If true, prompt user to log in through a browser\n and enter authorization code manually. Defaults to false.\n\n Returns:\n dict: Google session cookies.\n\n Raises:\n GoogleAuthError: If authentication with Google fails.", "id": "f10021:m0"} {"signature": "def get_auth_stdin(refresh_token_filename, manual_login=False):", "body": "refresh_token_cache = RefreshTokenCache(refresh_token_filename)return get_auth(CredentialsPrompt(), refresh_token_cache, manual_login=manual_login)", "docstring": "Simple wrapper for :func:`get_auth` that prompts the user using stdin.\n\n Args:\n refresh_token_filename (str): Path to file where refresh token will be\n cached.\n manual_login (bool): If true, prompt user to log in through a browser\n and enter authorization code manually. Defaults to false.\n\n Raises:\n GoogleAuthError: If authentication with Google fails.", "id": "f10021:m1"} {"signature": "def _get_authorization_code(session, credentials_prompt):", "body": "browser = Browser(session, OAUTH2_LOGIN_URL)email = credentials_prompt.get_email()browser.submit_form(FORM_SELECTOR, {EMAIL_SELECTOR: email})password = credentials_prompt.get_password()browser.submit_form(FORM_SELECTOR, {PASSWORD_SELECTOR: password})if browser.has_selector(TOTP_CHALLENGE_SELECTOR):browser.submit_form(TOTP_CHALLENGE_SELECTOR, {})elif browser.has_selector(PHONE_CHALLENGE_SELECTOR):browser.submit_form(PHONE_CHALLENGE_SELECTOR, {})if browser.has_selector(VERIFICATION_FORM_SELECTOR):if browser.has_selector(TOTP_CODE_SELECTOR):input_selector = TOTP_CODE_SELECTORelif browser.has_selector(PHONE_CODE_SELECTOR):input_selector = PHONE_CODE_SELECTORelse:raise GoogleAuthError('')verfification_code = credentials_prompt.get_verification_code()browser.submit_form(VERIFICATION_FORM_SELECTOR, {input_selector: verfification_code})try:return browser.get_cookie('')except KeyError:raise GoogleAuthError('')", "docstring": "Get authorization code using Google account credentials.\n\n Because hangups can't use a real embedded browser, it has to use the\n Browser class to enter the user's credentials and retrieve the\n authorization code, which is placed in a cookie. This is the most fragile\n part of the authentication process, because a change to a login form or an\n unexpected prompt could break it.\n\n Raises GoogleAuthError authentication fails.\n\n Returns authorization code string.", "id": "f10021:m2"} {"signature": "def _auth_with_refresh_token(session, refresh_token):", "body": "token_request_data = {'': OAUTH2_CLIENT_ID,'': OAUTH2_CLIENT_SECRET,'': '','': refresh_token,}res = _make_token_request(session, token_request_data)return res['']", "docstring": "Authenticate using OAuth refresh token.\n\n Raises GoogleAuthError if authentication fails.\n\n Returns access token string.", "id": "f10021:m3"} {"signature": "def _auth_with_code(session, authorization_code):", "body": "token_request_data = {'': OAUTH2_CLIENT_ID,'': OAUTH2_CLIENT_SECRET,'': authorization_code,'': '','': '',}res = _make_token_request(session, token_request_data)return res[''], res['']", "docstring": "Authenticate using OAuth authorization code.\n\n Raises GoogleAuthError if authentication fails.\n\n Returns access token string and refresh token string.", "id": "f10021:m4"} {"signature": "def _make_token_request(session, token_request_data):", "body": "try:r = session.post(OAUTH2_TOKEN_REQUEST_URL, data=token_request_data)r.raise_for_status()except requests.RequestException as e:raise GoogleAuthError(''.format(e))else:res = r.json()if '' in res:raise GoogleAuthError(''.format(res['']))return res", "docstring": "Make OAuth token request.\n\n Raises GoogleAuthError if authentication fails.\n\n Returns dict response.", "id": "f10021:m5"} {"signature": "def _get_session_cookies(session, access_token):", "body": "headers = {'': ''.format(access_token)}try:r = session.get((''''), headers=headers)r.raise_for_status()except requests.RequestException as e:raise GoogleAuthError(''.format(e))uberauth = r.texttry:r = session.get(('''''').format(uberauth), headers=headers)r.raise_for_status()except requests.RequestException as e:raise GoogleAuthError(''.format(e))cookies = session.cookies.get_dict(domain='')if cookies == {}:raise GoogleAuthError('')return cookies", "docstring": "Use the access token to get session cookies.\n\n Raises GoogleAuthError if session cookies could not be loaded.\n\n Returns dict of cookies.", "id": "f10021:m6"} {"signature": "@staticmethoddef get_email():", "body": "print('')return input('')", "docstring": "Prompt for email.\n\n Returns:\n str: Google account email address.", "id": "f10021:c1:m0"} {"signature": "@staticmethoddef get_password():", "body": "return getpass.getpass()", "docstring": "Prompt for password.\n\n Returns:\n str: Google account password.", "id": "f10021:c1:m1"} {"signature": "@staticmethoddef get_verification_code():", "body": "return input('')", "docstring": "Prompt for verification code.\n\n Returns:\n str: Google account verification code.", "id": "f10021:c1:m2"} {"signature": "@staticmethoddef get_authorization_code():", "body": "print(MANUAL_LOGIN_INSTRUCTIONS)return input('')", "docstring": "Prompt for authorization code.\n\n Returns:\n str: Google account authorization code.", "id": "f10021:c1:m3"} {"signature": "def get(self):", "body": "logger.info('', repr(self._filename))try:with open(self._filename) as f:return f.read()except IOError as e:logger.info('', e)", "docstring": "Get cached refresh token.\n\n Returns:\n Cached refresh token, or ``None`` on failure.", "id": "f10021:c2:m1"} {"signature": "def set(self, refresh_token):", "body": "logger.info('', repr(self._filename))try:with open(self._filename, '') as f:f.write(refresh_token)except IOError as e:logger.warning('', e)", "docstring": "Cache a refresh token, ignoring any failure.\n\n Args:\n refresh_token (str): Refresh token to cache.", "id": "f10021:c2:m2"} {"signature": "def has_selector(self, selector):", "body": "return len(self._page.soup.select(selector)) > ", "docstring": "Return True if selector matches an element on the current page.", "id": "f10021:c3:m1"} {"signature": "def submit_form(self, form_selector, input_dict):", "body": "logger.info('', self._page.url.split('')[])logger.info('',[elem.get('') for elem in self._page.soup.select('')])try:form = self._page.soup.select(form_selector)[]except IndexError:raise GoogleAuthError(''.format(form_selector))logger.info('',[elem.get('') for elem in form.select('')])for selector, value in input_dict.items():try:form.select(selector)[][''] = valueexcept IndexError:raise GoogleAuthError(''.format(selector))try:self._page = self._browser.submit(form, self._page.url)self._page.raise_for_status()except requests.RequestException as e:raise GoogleAuthError(''.format(e))", "docstring": "Populate and submit a form on the current page.\n\n Raises GoogleAuthError if form can not be submitted.", "id": "f10021:c3:m2"} {"signature": "def get_cookie(self, name):", "body": "return self._session.cookies[name]", "docstring": "Return cookie value from the browser session.\n\n Raises KeyError if cookie is not found.", "id": "f10021:c3:m3"} {"signature": "def mock_google(verification_input_id=None):", "body": "httpretty.HTTPretty.allow_net_connect = Falsehttpretty.register_uri(httpretty.GET,'',body=get_form(auth.FORM_SELECTOR[:], '', auth.EMAIL_SELECTOR[:]), content_type='')next_action = ('' if verification_input_id is not None else '')httpretty.register_uri(httpretty.GET, '',body=get_form(auth.FORM_SELECTOR[:], next_action, auth.PASSWORD_SELECTOR[:]), content_type='')httpretty.register_uri(httpretty.GET, '',body=get_form(auth.VERIFICATION_FORM_SELECTOR[:], '',verification_input_id), content_type='')httpretty.register_uri(httpretty.GET, '',body='', content_type='', set_cookie='')httpretty.register_uri(httpretty.POST, '',body=json.dumps(dict(access_token='', refresh_token='')),content_type='')httpretty.register_uri(httpretty.GET, '',body='', content_type='')httpretty.register_uri(httpretty.GET, '',body='', content_type='',set_cookie='')", "docstring": "Set up httpretty to mock authentication requests.\n\n This simplifies the sequence of redirects and doesn't make any assertions\n about the requests.", "id": "f10029:m3"} {"signature": "def from_timestamp(microsecond_timestamp):", "body": "return datetime.datetime.fromtimestamp(microsecond_timestamp // , datetime.timezone.utc).replace(microsecond=(microsecond_timestamp % ))", "docstring": "Convert a microsecond timestamp to a UTC datetime instance.", "id": "f10032:m0"} {"signature": "def to_timestamp(datetime_timestamp):", "body": "return int(datetime_timestamp.timestamp() * )", "docstring": "Convert UTC datetime to microsecond timestamp used by Hangouts.", "id": "f10032:m1"} {"signature": "def from_participantid(participant_id):", "body": "return user.UserID(chat_id=participant_id.chat_id,gaia_id=participant_id.gaia_id)", "docstring": "Convert hangouts_pb2.ParticipantId to UserID.", "id": "f10032:m2"} {"signature": "def to_participantid(user_id):", "body": "return hangouts_pb2.ParticipantId(chat_id=user_id.chat_id,gaia_id=user_id.gaia_id)", "docstring": "Convert UserID to hangouts_pb2.ParticipantId.", "id": "f10032:m3"} {"signature": "def parse_typing_status_message(p):", "body": "return TypingStatusMessage(conv_id=p.conversation_id.id,user_id=from_participantid(p.sender_id),timestamp=from_timestamp(p.timestamp),status=p.type,)", "docstring": "Return TypingStatusMessage from hangouts_pb2.SetTypingNotification.\n\n The same status may be sent multiple times consecutively, and when a\n message is sent the typing status will not change to stopped.", "id": "f10032:m4"} {"signature": "def parse_watermark_notification(p):", "body": "return WatermarkNotification(conv_id=p.conversation_id.id,user_id=from_participantid(p.sender_id),read_timestamp=from_timestamp(p.latest_read_timestamp),)", "docstring": "Return WatermarkNotification from hangouts_pb2.WatermarkNotification.", "id": "f10032:m5"} {"signature": "def upgrade_name(self, user_):", "body": "if user_.name_type > self.name_type:self.full_name = user_.full_nameself.first_name = user_.first_nameself.name_type = user_.name_typelogger.debug('',self.name_type.name.lower(), self.full_name, self)", "docstring": "Upgrade name type of this user.\n\n Google Voice participants often first appear with no name at all, and\n then get upgraded unpredictably to numbers (\"+12125551212\") or names.\n\n Args:\n user_ (~hangups.user.User): User to upgrade with.", "id": "f10033:c0:m1"} {"signature": "@staticmethoddef from_entity(entity, self_user_id):", "body": "user_id = UserID(chat_id=entity.id.chat_id,gaia_id=entity.id.gaia_id)return User(user_id, entity.properties.display_name,entity.properties.first_name,entity.properties.photo_url,entity.properties.email,(self_user_id == user_id) or (self_user_id is None))", "docstring": "Construct user from ``Entity`` message.\n\n Args:\n entity: ``Entity`` message.\n self_user_id (~hangups.user.UserID or None): The ID of the current\n user. If ``None``, assume ``entity`` is the current user.\n\n Returns:\n :class:`~hangups.user.User` object.", "id": "f10033:c0:m2"} {"signature": "@staticmethoddef from_conv_part_data(conv_part_data, self_user_id):", "body": "user_id = UserID(chat_id=conv_part_data.id.chat_id,gaia_id=conv_part_data.id.gaia_id)return User(user_id, conv_part_data.fallback_name, None, None, [],(self_user_id == user_id) or (self_user_id is None))", "docstring": "Construct user from ``ConversationParticipantData`` message.\n\n Args:\n conv_part_id: ``ConversationParticipantData`` message.\n self_user_id (~hangups.user.UserID or None): The ID of the current\n user. If ``None``, assume ``conv_part_id`` is the current user.\n\n Returns:\n :class:`~hangups.user.User` object.", "id": "f10033:c0:m3"} {"signature": "def get_user(self, user_id):", "body": "try:return self._user_dict[user_id]except KeyError:logger.warning('',user_id)return User(user_id, None, None, None, [], False)", "docstring": "Get a user by its ID.\n\n Args:\n user_id (~hangups.user.UserID): The ID of the user.\n\n Raises:\n KeyError: If no such user is known.\n\n Returns:\n :class:`~hangups.user.User` with the given ID.", "id": "f10033:c1:m1"} {"signature": "def get_all(self):", "body": "return self._user_dict.values()", "docstring": "Get all known users.\n\n Returns:\n List of :class:`~hangups.user.User` instances.", "id": "f10033:c1:m2"} {"signature": "def _add_user_from_conv_part(self, conv_part):", "body": "user_ = User.from_conv_part_data(conv_part, self._self_user.id_)existing = self._user_dict.get(user_.id_)if existing is None:logger.warning('',user_.name_type.name.lower(), user_.full_name)self._user_dict[user_.id_] = user_return user_else:existing.upgrade_name(user_)return existing", "docstring": "Add or upgrade User from ConversationParticipantData.", "id": "f10033:c1:m3"} {"signature": "def _on_state_update(self, state_update):", "body": "if state_update.HasField(''):self._handle_conversation(state_update.conversation)", "docstring": "Receive a StateUpdate", "id": "f10033:c1:m4"} {"signature": "def _handle_conversation(self, conversation):", "body": "for participant in conversation.participant_data:self._add_user_from_conv_part(participant)", "docstring": "Receive Conversation and update list of users", "id": "f10033:c1:m5"} {"signature": "def replace_emoticons(string):", "body": "return _replace_words(HANGOUTS_EMOTICONS_TO_EMOJI, string)", "docstring": "Replace emoticon words in string with corresponding emoji.", "id": "f10034:m0"} {"signature": "def _replace_words(replacements, string):", "body": "output_lines = []for line in string.split(''):output_words = []for word in line.split(''):new_word = replacements.get(word, word)output_words.append(new_word)output_lines.append(output_words)return ''.join(''.join(output_words) for output_words in output_lines)", "docstring": "Replace words with corresponding values in replacements dict.\n\n Words must be separated by spaces or newlines.", "id": "f10034:m1"} {"signature": "def send(self, notification):", "body": "pass", "docstring": "Send a notification.", "id": "f10035:c0:m0"} {"signature": "def set_terminal_title(title):", "body": "sys.stdout.write(\"\".format(title))", "docstring": "Use an xterm escape sequence to set the terminal title.", "id": "f10036:m0"} {"signature": "@contextlib.contextmanagerdef bracketed_paste_mode():", "body": "sys.stdout.write('')try:yieldfinally:sys.stdout.write('')", "docstring": "Context manager for enabling/disabling bracketed paste mode.", "id": "f10036:m1"} {"signature": "def dir_maker(path):", "body": "directory = os.path.dirname(path)if directory != '' and not os.path.isdir(directory):try:os.makedirs(directory)except OSError as e:sys.exit(''.format(e))", "docstring": "Create a directory if it does not exist.", "id": "f10036:m2"} {"signature": "def main():", "body": "dirs = appdirs.AppDirs('', '')default_log_path = os.path.join(dirs.user_log_dir, '')default_token_path = os.path.join(dirs.user_cache_dir, '')default_config_path = ''user_config_path = os.path.join(dirs.user_config_dir, '')dir_maker(user_config_path)if not os.path.isfile(user_config_path):with open(user_config_path, '') as cfg:cfg.write(\"\")parser = configargparse.ArgumentParser(prog='', default_config_files=[default_config_path,user_config_path],formatter_class=configargparse.ArgumentDefaultsHelpFormatter,add_help=False, )general_group = parser.add_argument_group('')general_group.add('', '', action='',help='')general_group.add('', default=default_token_path,help='')general_group.add('', default='',help='')general_group.add('', default='',help='')general_group.add('', '', help='',is_config_file=True, default=user_config_path)general_group.add('', '', action='',version=''.format(hangups.__version__))general_group.add('', '', action='',help='')general_group.add('', action='',help='')general_group.add('', default=default_log_path, help='')key_group = parser.add_argument_group('')key_group.add('', default='',help='')key_group.add('', default='',help='')key_group.add('', default='',help='')key_group.add('', default='',help='')key_group.add('', default='',help='')key_group.add('', default='',help='')key_group.add('', default='',help='')key_group.add('', default='',help='')key_group.add('', default='',help='')notification_group = parser.add_argument_group('')notification_group.add('', '',action='',help=configargparse.SUPPRESS)notification_group.add('', '',action='',help='')notification_group.add('',choices=sorted(NOTIFIER_TYPES.keys()),default='',help='')col_group = parser.add_argument_group('')col_group.add('', choices=COL_SCHEMES.keys(),default='', help='')col_group.add('', choices=('', '', ''),default=, help='')for name in COL_SCHEME_NAMES:col_group.add('' + name.replace('', '') + '',help=name + '')col_group.add('' + name.replace('', '') + '',help=name + '')args = parser.parse_args()for path in [args.log, args.token_path]:dir_maker(path)logging.basicConfig(filename=args.log,level=logging.DEBUG if args.debug else logging.WARNING,format=LOG_FORMAT)logging.getLogger('').setLevel(logging.WARNING)datetimefmt = {'': args.date_format,'': args.time_format}palette_colors = int(args.col_palette_colors)col_scheme = COL_SCHEMES[args.col_scheme]for name in COL_SCHEME_NAMES:col_scheme = add_color_to_scheme(col_scheme, name,getattr(args, '' + name + ''),getattr(args, '' + name + ''),palette_colors)keybindings = {'': args.key_next_tab,'': args.key_prev_tab,'': args.key_close_tab,'': args.key_quit,'': args.key_menu,'': args.key_up,'': args.key_down,'': args.key_page_up,'': args.key_page_down,}notifier_ = get_notifier(args.notification_type, args.disable_notifications)try:ChatUI(args.token_path, keybindings, col_scheme, palette_colors,datetimefmt, notifier_, args.discreet_notifications,args.manual_login)except KeyboardInterrupt:sys.exit('')", "docstring": "Main entry point.", "id": "f10036:m4"} {"signature": "def __init__(self, refresh_token_path, keybindings, palette,palette_colors, datetimefmt, notifier_,discreet_notifications, manual_login):", "body": "self._keys = keybindingsself._datetimefmt = datetimefmtself._notifier = notifier_self._discreet_notifications = discreet_notificationsset_terminal_title('')self._conv_widgets = {} self._tabbed_window = None self._conv_list = None self._user_list = None self._coroutine_queue = CoroutineQueue()self._exception = Nonetry:cookies = hangups.auth.get_auth_stdin(refresh_token_path, manual_login)except hangups.GoogleAuthError as e:sys.exit(''.format(e))self._client = hangups.Client(cookies)self._client.on_connect.add_observer(self._on_connect)loop = asyncio.get_event_loop()loop.set_exception_handler(self._exception_handler)try:self._urwid_loop = urwid.MainLoop(LoadingWidget(), palette, handle_mouse=False,input_filter=self._input_filter,event_loop=urwid.AsyncioEventLoop(loop=loop))except urwid.AttrSpecError as e:sys.exit(e)self._urwid_loop.screen.set_terminal_properties(colors=palette_colors)self._urwid_loop.start()coros = [self._connect(), self._coroutine_queue.consume()]with bracketed_paste_mode():try:loop.run_until_complete(asyncio.gather(*coros))except HangupsDisconnected:passfinally:self._urwid_loop.stop()task = asyncio.gather(*coros, return_exceptions=True)task.cancel()try:loop.run_until_complete(task)except asyncio.CancelledError:passloop.close()if self._exception:raise self._exception", "docstring": "Start the user interface.", "id": "f10036:c1:m0"} {"signature": "def _exception_handler(self, _loop, context):", "body": "self._coroutine_queue.put(self._client.disconnect())default_exception = Exception(context.get(''))self._exception = context.get('', default_exception)", "docstring": "Handle exceptions from the asyncio loop.", "id": "f10036:c1:m2"} {"signature": "def _input_filter(self, keys, _):", "body": "if keys == [self._keys['']]:if self._urwid_loop.widget == self._tabbed_window:self._show_menu()else:self._hide_menu()elif keys == [self._keys['']]:self._coroutine_queue.put(self._client.disconnect())else:return keys", "docstring": "Handle global keybindings.", "id": "f10036:c1:m3"} {"signature": "def _show_menu(self):", "body": "current_widget = self._tabbed_window.get_current_widget()if hasattr(current_widget, ''):menu_widget = current_widget.get_menu_widget(self._hide_menu)overlay = urwid.Overlay(menu_widget, self._tabbed_window,align='', width=('', ),valign='', height=('', ))self._urwid_loop.widget = overlay", "docstring": "Show the overlay menu.", "id": "f10036:c1:m4"} {"signature": "def _hide_menu(self):", "body": "self._urwid_loop.widget = self._tabbed_window", "docstring": "Hide the overlay menu.", "id": "f10036:c1:m5"} {"signature": "def get_conv_widget(self, conv_id):", "body": "if conv_id not in self._conv_widgets:set_title_cb = (lambda widget, title:self._tabbed_window.set_tab(widget, title=title))widget = ConversationWidget(self._client, self._coroutine_queue,self._conv_list.get(conv_id), set_title_cb, self._keys,self._datetimefmt)self._conv_widgets[conv_id] = widgetreturn self._conv_widgets[conv_id]", "docstring": "Return an existing or new ConversationWidget.", "id": "f10036:c1:m6"} {"signature": "def add_conversation_tab(self, conv_id, switch=False):", "body": "conv_widget = self.get_conv_widget(conv_id)self._tabbed_window.set_tab(conv_widget, switch=switch,title=conv_widget.title)", "docstring": "Add conversation tab if not present, and optionally switch to it.", "id": "f10036:c1:m7"} {"signature": "def on_select_conversation(self, conv_id):", "body": "self.add_conversation_tab(conv_id, switch=True)", "docstring": "Called when the user selects a new conversation to listen to.", "id": "f10036:c1:m8"} {"signature": "async def _on_connect(self):", "body": "self._user_list, self._conv_list = (await hangups.build_user_conversation_list(self._client))self._conv_list.on_event.add_observer(self._on_event)conv_picker = ConversationPickerWidget(self._conv_list,self.on_select_conversation,self._keys)self._tabbed_window = TabbedWindowWidget(self._keys)self._tabbed_window.set_tab(conv_picker, switch=True,title='')self._urwid_loop.widget = self._tabbed_window", "docstring": "Handle connecting for the first time.", "id": "f10036:c1:m9"} {"signature": "def _on_event(self, conv_event):", "body": "conv = self._conv_list.get(conv_event.conversation_id)user = conv.get_user(conv_event.user_id)show_notification = all((isinstance(conv_event, hangups.ChatMessageEvent),not user.is_self,not conv.is_quiet,))if show_notification:self.add_conversation_tab(conv_event.conversation_id)if self._discreet_notifications:notification = DISCREET_NOTIFICATIONelse:notification = notifier.Notification(user.full_name, get_conv_name(conv), conv_event.text)self._notifier.send(notification)", "docstring": "Open conversation tab for new messages & pass events to notifier.", "id": "f10036:c1:m10"} {"signature": "def put(self, coro):", "body": "assert asyncio.iscoroutine(coro)self._queue.put_nowait(coro)", "docstring": "Put a coroutine in the queue to be executed.", "id": "f10036:c2:m1"} {"signature": "async def consume(self):", "body": "while True:coro = await self._queue.get()assert asyncio.iscoroutine(coro)await coro", "docstring": "Consume coroutines from the queue by executing them.", "id": "f10036:c2:m2"} {"signature": "def keypress(self, size, key):", "body": "return super().keypress(size, key)", "docstring": "forward the call", "id": "f10036:c3:m0"} {"signature": "def _rename(self, name, callback):", "body": "self._coroutine_queue.put(self._conversation.rename(name))callback()", "docstring": "Rename conversation and call callback.", "id": "f10036:c5:m1"} {"signature": "def _get_label(self):", "body": "return get_conv_name(self._conversation, show_unread=True)", "docstring": "Return the button's label generated from the conversation.", "id": "f10036:c7:m1"} {"signature": "def _on_event(self, _):", "body": "self._button.set_label(self._get_label())", "docstring": "Update the button's label when an event occurs.", "id": "f10036:c7:m2"} {"signature": "@propertydef last_modified(self):", "body": "return self._conversation.last_modified", "docstring": "Last modified date of conversation, used for sorting.", "id": "f10036:c7:m3"} {"signature": "def _on_event(self, _):", "body": "self.sort(key=lambda conv_button: conv_button.last_modified,reverse=True)", "docstring": "Re-order the conversations when an event occurs.", "id": "f10036:c8:m1"} {"signature": "def show_message(self, message_str):", "body": "if self._message_handle is not None:self._message_handle.cancel()self._message_handle = asyncio.get_event_loop().call_later(self._MESSAGE_DELAY_SECS, self._clear_message)self._message = message_strself._update()", "docstring": "Show a temporary message.", "id": "f10036:c12:m1"} {"signature": "def _clear_message(self):", "body": "self._message = Noneself._message_handle = Noneself._update()", "docstring": "Clear the temporary message.", "id": "f10036:c12:m2"} {"signature": "def _on_disconnect(self):", "body": "self._is_connected = Falseself._update()", "docstring": "Show reconnecting message when disconnected.", "id": "f10036:c12:m3"} {"signature": "def _on_reconnect(self):", "body": "self._is_connected = Trueself._update()", "docstring": "Hide reconnecting message when reconnected.", "id": "f10036:c12:m4"} {"signature": "def _on_event(self, conv_event):", "body": "if isinstance(conv_event, hangups.ChatMessageEvent):self._typing_statuses[conv_event.user_id] = (hangups.TYPING_TYPE_STOPPED)self._update()", "docstring": "Make users stop typing when they send a message.", "id": "f10036:c12:m5"} {"signature": "def _on_typing(self, typing_message):", "body": "self._typing_statuses[typing_message.user_id] = typing_message.statusself._update()", "docstring": "Handle typing updates.", "id": "f10036:c12:m6"} {"signature": "def _update(self):", "body": "typing_users = [self._conversation.get_user(user_id)for user_id, status in self._typing_statuses.items()if status == hangups.TYPING_TYPE_STARTED]displayed_names = [user.first_name for user in typing_usersif not user.is_self]if displayed_names:typing_message = ''.format(''.join(sorted(displayed_names)),'' if len(displayed_names) == else '')else:typing_message = ''if not self._is_connected:self._widget.set_text(\"\")elif self._message is not None:self._widget.set_text(self._message)else:self._widget.set_text(typing_message)", "docstring": "Update status text.", "id": "f10036:c12:m7"} {"signature": "@staticmethoddef _get_date_str(timestamp, datetimefmt, show_date=False):", "body": "fmt = ''if show_date:fmt += ''+datetimefmt.get('', '')+''fmt += datetimefmt.get('', '')return timestamp.astimezone(tz=None).strftime(fmt)", "docstring": "Convert UTC datetime into user interface string.", "id": "f10036:c13:m1"} {"signature": "@staticmethoddef from_conversation_event(conversation, conv_event, prev_conv_event,datetimefmt, watermark_users=None):", "body": "user = conversation.get_user(conv_event.user_id)if prev_conv_event is not None:is_new_day = (conv_event.timestamp.astimezone(tz=None).date() !=prev_conv_event.timestamp.astimezone(tz=None).date())else:is_new_day = Falseif isinstance(conv_event, hangups.ChatMessageEvent):return MessageWidget(conv_event.timestamp, conv_event.text,datetimefmt, user, show_date=is_new_day,watermark_users=watermark_users)elif isinstance(conv_event, hangups.RenameEvent):if conv_event.new_name == '':text = (''.format(user.first_name))else:text = (''.format(user.first_name, conv_event.new_name))return MessageWidget(conv_event.timestamp, text, datetimefmt,show_date=is_new_day,watermark_users=watermark_users)elif isinstance(conv_event, hangups.MembershipChangeEvent):event_users = [conversation.get_user(user_id) for user_idin conv_event.participant_ids]names = ''.join([user.full_name for user in event_users])if conv_event.type_ == hangups.MEMBERSHIP_CHANGE_TYPE_JOIN:text = (''.format(user.first_name, names))else: text = (''.format(names))return MessageWidget(conv_event.timestamp, text, datetimefmt,show_date=is_new_day,watermark_users=watermark_users)elif isinstance(conv_event, hangups.HangoutEvent):text = {hangups.HANGOUT_EVENT_TYPE_START: (''),hangups.HANGOUT_EVENT_TYPE_END: (''),hangups.HANGOUT_EVENT_TYPE_ONGOING: (''),}.get(conv_event.event_type, '')return MessageWidget(conv_event.timestamp, text, datetimefmt,show_date=is_new_day,watermark_users=watermark_users)elif isinstance(conv_event, hangups.GroupLinkSharingModificationEvent):status_on = hangups.GROUP_LINK_SHARING_STATUS_ONstatus_text = ('' if conv_event.new_status == status_onelse '')text = ''.format(user.first_name,status_text)return MessageWidget(conv_event.timestamp, text, datetimefmt,show_date=is_new_day,watermark_users=watermark_users)else:text = ''return MessageWidget(conv_event.timestamp, text, datetimefmt,show_date=is_new_day,watermark_users=watermark_users)", "docstring": "Return MessageWidget representing a ConversationEvent.\n\n Returns None if the ConversationEvent does not have a widget\n representation.", "id": "f10036:c13:m3"} {"signature": "def _handle_event(self, conv_event):", "body": "if not self._is_scrolling:self.set_focus(conv_event.id_)else:self._modified()", "docstring": "Handle updating and scrolling when a new event is added.\n\n Automatically scroll down to show the new text if the bottom is\n showing. This allows the user to scroll up to read previous messages\n while new messages are arriving.", "id": "f10036:c14:m1"} {"signature": "async def _load(self):", "body": "try:conv_events = await self._conversation.get_events(self._conversation.events[].id_)except (IndexError, hangups.NetworkError):conv_events = []if not conv_events:self._first_loaded = Trueif self._focus_position == self.POSITION_LOADING and conv_events:self.set_focus(conv_events[-].id_)else:self._modified()self._refresh_watermarked_events()self._is_loading = False", "docstring": "Load more events for this conversation.", "id": "f10036:c14:m2"} {"signature": "def __getitem__(self, position):", "body": "if position == self.POSITION_LOADING:if self._first_loaded:return urwid.Text('', align='')else:if not self._is_loading and not self._first_loaded:self._is_loading = Trueself._coroutine_queue.put(self._load())return urwid.Text('', align='')try:prev_position = self._get_position(position, prev=True)if prev_position == self.POSITION_LOADING:prev_event = Noneelse:prev_event = self._conversation.get_event(prev_position)return MessageWidget.from_conversation_event(self._conversation, self._conversation.get_event(position),prev_event, self._datetimefmt,watermark_users=self._watermarked_events.get(position, None))except KeyError:raise IndexError(''.format(position))", "docstring": "Return widget at position or raise IndexError.", "id": "f10036:c14:m3"} {"signature": "def _on_watermark_notification(self, _):", "body": "self._refresh_watermarked_events()self._modified()", "docstring": "Update watermarks for this conversation.", "id": "f10036:c14:m6"} {"signature": "def _get_position(self, position, prev=False):", "body": "if position == self.POSITION_LOADING:if prev:raise IndexError('')else:return self._conversation.events[].id_else:ev = self._conversation.next_event(position, prev=prev)if ev is None:if prev:return self.POSITION_LOADINGelse:raise IndexError('')else:return ev.id_", "docstring": "Return the next/previous position or raise IndexError.", "id": "f10036:c14:m7"} {"signature": "def next_position(self, position):", "body": "return self._get_position(position)", "docstring": "Return the position below position or raise IndexError.", "id": "f10036:c14:m8"} {"signature": "def prev_position(self, position):", "body": "return self._get_position(position, prev=True)", "docstring": "Return the position above position or raise IndexError.", "id": "f10036:c14:m9"} {"signature": "def set_focus(self, position):", "body": "self._focus_position = positionself._modified()try:self.next_position(position)except IndexError:self._is_scrolling = Falseelse:self._is_scrolling = True", "docstring": "Set the focus to position or raise IndexError.", "id": "f10036:c14:m10"} {"signature": "def get_focus(self):", "body": "return (self[self._focus_position], self._focus_position)", "docstring": "Return (widget, position) tuple.", "id": "f10036:c14:m11"} {"signature": "def get_menu_widget(self, close_callback):", "body": "return ConversationMenu(self._coroutine_queue, self._conversation, close_callback,self._keys)", "docstring": "Return the menu widget associated with this widget.", "id": "f10036:c15:m1"} {"signature": "def keypress(self, size, key):", "body": "self._coroutine_queue.put(self._client.set_active())self._coroutine_queue.put(self._conversation.update_read_timestamp())return super().keypress(size, key)", "docstring": "Handle marking messages as read and keeping client active.", "id": "f10036:c15:m2"} {"signature": "def _set_title(self):", "body": "self.title = get_conv_name(self._conversation, show_unread=True,truncate=True)self._set_title_cb(self, self.title)", "docstring": "Update this conversation's tab title.", "id": "f10036:c15:m3"} {"signature": "def _on_return(self, text):", "body": "if not text:returnelif text.startswith('') and len(text.split('')) == :filename = text.split('')[]image_file = open(filename, '')text = ''else:image_file = Nonetext = replace_emoticons(text)segments = hangups.ChatMessageSegment.from_str(text)self._coroutine_queue.put(self._handle_send_message(self._conversation.send_message(segments, image_file=image_file)))", "docstring": "Called when the user presses return on the send message widget.", "id": "f10036:c15:m4"} {"signature": "async def _handle_send_message(self, coro):", "body": "try:await coroexcept hangups.NetworkError:self._status_widget.show_message('')", "docstring": "Handle showing an error if a message fails to send.", "id": "f10036:c15:m5"} {"signature": "def _on_watermark_notification(self, _):", "body": "self._set_title()", "docstring": "Handle watermark changes for this conversation.", "id": "f10036:c15:m6"} {"signature": "def _on_event(self, _):", "body": "self._set_title()", "docstring": "Display a new conversation message.", "id": "f10036:c15:m7"} {"signature": "def get_current_widget(self):", "body": "return self._widgets[self._tab_index]", "docstring": "Return the widget in the current tab.", "id": "f10036:c16:m1"} {"signature": "def _update_tabs(self):", "body": "text = []for num, widget in enumerate(self._widgets):palette = ('' if num == self._tab_indexelse '')text += [(palette, ''.format(self._widget_title[widget])),('', ''),]self._tabs.set_text(text)self._frame.contents[''] = (self._widgets[self._tab_index], None)", "docstring": "Update tab display.", "id": "f10036:c16:m2"} {"signature": "def keypress(self, size, key):", "body": "key = super().keypress(size, key)num_tabs = len(self._widgets)if key == self._keys['']:self._tab_index = (self._tab_index - ) % num_tabsself._update_tabs()elif key == self._keys['']:self._tab_index = (self._tab_index + ) % num_tabsself._update_tabs()elif key == self._keys['']:if self._tab_index > :curr_tab = self._widgets[self._tab_index]self._widgets.remove(curr_tab)del self._widget_title[curr_tab]self._tab_index -= self._update_tabs()else:return key", "docstring": "Handle keypresses for changing tabs.", "id": "f10036:c16:m3"} {"signature": "def set_tab(self, widget, switch=False, title=None):", "body": "if widget not in self._widgets:self._widgets.append(widget)self._widget_title[widget] = ''if switch:self._tab_index = self._widgets.index(widget)if title:self._widget_title[widget] = titleself._update_tabs()", "docstring": "Add or modify a tab.\n\n If widget is not a tab, it will be added. If switch is True, switch to\n this tab. If title is given, set the tab's title.", "id": "f10036:c16:m4"} {"signature": "def get_conv_name(conv, truncate=False, show_unread=False):", "body": "num_unread = len([conv_event for conv_event in conv.unread_events ifisinstance(conv_event, hangups.ChatMessageEvent) andnot conv.get_user(conv_event.user_id).is_self])if show_unread and num_unread > :postfix = ''.format(num_unread)else:postfix = ''if conv.name is not None:return conv.name + postfixelse:participants = sorted((user for user in conv.users if not user.is_self),key=lambda user: user.id_)names = [user.first_name for user in participants]if not participants:return \"\" + postfixif len(participants) == :return participants[].full_name + postfixelif truncate and len(participants) > :return (''.join(names[:] + [''.format(len(names) - )]) +postfix)else:return ''.join(names) + postfix", "docstring": "Return a readable name for a conversation.\n\n If the conversation has a custom name, use the custom name. Otherwise, for\n one-to-one conversations, the name is the full name of the other user. For\n group conversations, the name is a comma-separated list of first names. If\n the group conversation is empty, the name is \"Empty Conversation\".\n\n If truncate is true, only show up to two names in a group conversation.\n\n If show_unread is True, if there are unread chat messages, show the number\n of unread chat messages in parentheses after the conversation name.", "id": "f10037:m0"} {"signature": "def add_color_to_scheme(scheme, name, foreground, background, palette_colors):", "body": "if foreground is None and background is None:return schemenew_scheme = []for item in scheme:if item[] == name:if foreground is None:foreground = item[]if background is None:background = item[]if palette_colors > :new_scheme.append((name, '', '', '', foreground, background))else:new_scheme.append((name, foreground, background))else:new_scheme.append(item)return new_scheme", "docstring": "Add foreground and background colours to a color scheme", "id": "f10037:m1"} {"signature": "def add_observer(self, callback):", "body": "if callback in self._observers:raise ValueError(''.format(callback, self))self._observers.append(callback)", "docstring": "Add an observer to this event.\n\n Args:\n callback: A function or coroutine callback to call when the event\n is fired.\n\n Raises:\n ValueError: If the callback has already been added.", "id": "f10038:c0:m1"} {"signature": "def remove_observer(self, callback):", "body": "if callback not in self._observers:raise ValueError(''.format(callback, self))self._observers.remove(callback)", "docstring": "Remove an observer from this event.\n\n Args:\n callback: A function or coroutine callback to remove from this\n event.\n\n Raises:\n ValueError: If the callback is not an observer of this event.", "id": "f10038:c0:m2"} {"signature": "async def fire(self, *args, **kwargs):", "body": "logger.debug(''.format(self))for observer in self._observers:gen = observer(*args, **kwargs)if asyncio.iscoroutinefunction(observer):await gen", "docstring": "Fire this event, calling all observers with the same arguments.", "id": "f10038:c0:m3"} {"signature": "@propertydef timestamp(self):", "body": "return parsers.from_timestamp(self._event.timestamp)", "docstring": "When the event occurred (:class:`datetime.datetime`).", "id": "f10039:c0:m1"} {"signature": "@propertydef user_id(self):", "body": "return user.UserID(chat_id=self._event.sender_id.chat_id,gaia_id=self._event.sender_id.gaia_id)", "docstring": "Who created the event (:class:`~hangups.user.UserID`).", "id": "f10039:c0:m2"} {"signature": "@propertydef conversation_id(self):", "body": "return self._event.conversation_id.id", "docstring": "ID of the conversation containing the event (:class:`str`).", "id": "f10039:c0:m3"} {"signature": "@propertydef id_(self):", "body": "return self._event.event_id", "docstring": "ID of this event (:class:`str`).", "id": "f10039:c0:m4"} {"signature": "def __init__(self, text, segment_type=None,is_bold=False, is_italic=False, is_strikethrough=False,is_underline=False, link_target=None):", "body": "if segment_type is not None:self.type_ = segment_typeelif link_target is not None:self.type_ = hangouts_pb2.SEGMENT_TYPE_LINKelse:self.type_ = hangouts_pb2.SEGMENT_TYPE_TEXTself.text = textself.is_bold = is_boldself.is_italic = is_italicself.is_strikethrough = is_strikethroughself.is_underline = is_underlineself.link_target = link_target", "docstring": "Create a new chat message segment.", "id": "f10039:c1:m0"} {"signature": "@staticmethoddef from_str(text):", "body": "segment_list = chat_message_parser.parse(text)return [ChatMessageSegment(segment.text, **segment.params)for segment in segment_list]", "docstring": "Construct :class:`ChatMessageSegment` list parsed from a string.\n\n Args:\n text (str): Text to parse. May contain line breaks, URLs and\n formatting markup (simplified Markdown and HTML) to be\n converted into equivalent segments.\n\n Returns:\n List of :class:`ChatMessageSegment` objects.", "id": "f10039:c1:m1"} {"signature": "@staticmethoddef deserialize(segment):", "body": "link_target = segment.link_data.link_targetreturn ChatMessageSegment(segment.text, segment_type=segment.type,is_bold=segment.formatting.bold,is_italic=segment.formatting.italic,is_strikethrough=segment.formatting.strikethrough,is_underline=segment.formatting.underline,link_target=None if link_target == '' else link_target)", "docstring": "Construct :class:`ChatMessageSegment` from ``Segment`` message.\n\n Args:\n segment: ``Segment`` message to parse.\n\n Returns:\n :class:`ChatMessageSegment` object.", "id": "f10039:c1:m2"} {"signature": "def serialize(self):", "body": "segment = hangouts_pb2.Segment(type=self.type_,text=self.text,formatting=hangouts_pb2.Formatting(bold=self.is_bold,italic=self.is_italic,strikethrough=self.is_strikethrough,underline=self.is_underline,),)if self.link_target is not None:segment.link_data.link_target = self.link_targetreturn segment", "docstring": "Serialize this segment to a ``Segment`` message.\n\n Returns:\n ``Segment`` message.", "id": "f10039:c1:m3"} {"signature": "@propertydef text(self):", "body": "lines = ['']for segment in self.segments:if segment.type_ == hangouts_pb2.SEGMENT_TYPE_TEXT:lines[-] += segment.textelif segment.type_ == hangouts_pb2.SEGMENT_TYPE_LINK:lines[-] += segment.textelif segment.type_ == hangouts_pb2.SEGMENT_TYPE_LINE_BREAK:lines.append('')else:logger.warning(''.format(segment.type_))lines.extend(self.attachments)return ''.join(lines)", "docstring": "Text of the message without formatting (:class:`str`).", "id": "f10039:c2:m0"} {"signature": "@propertydef segments(self):", "body": "seg_list = self._event.chat_message.message_content.segmentreturn [ChatMessageSegment.deserialize(seg) for seg in seg_list]", "docstring": "List of :class:`ChatMessageSegment` in message (:class:`list`).", "id": "f10039:c2:m1"} {"signature": "@propertydef attachments(self):", "body": "raw_attachments = self._event.chat_message.message_content.attachmentif raw_attachments is None:raw_attachments = []attachments = []for attachment in raw_attachments:for embed_item_type in attachment.embed_item.type:known_types = [hangouts_pb2.ITEM_TYPE_PLUS_PHOTO,hangouts_pb2.ITEM_TYPE_PLACE_V2,hangouts_pb2.ITEM_TYPE_PLACE,hangouts_pb2.ITEM_TYPE_THING,]if embed_item_type not in known_types:logger.warning('''', embed_item_type)if attachment.embed_item.HasField(''):attachments.append(attachment.embed_item.plus_photo.thumbnail.image_url)return attachments", "docstring": "List of attachments in the message (:class:`list`).", "id": "f10039:c2:m2"} {"signature": "@propertydef new_otr_status(self):", "body": "return self._event.otr_modification.new_otr_status", "docstring": "The conversation's new OTR status.\n\n May be either ``OFF_THE_RECORD_STATUS_OFF_THE_RECORD`` or\n ``OFF_THE_RECORD_STATUS_ON_THE_RECORD``.", "id": "f10039:c3:m0"} {"signature": "@propertydef old_otr_status(self):", "body": "return self._event.otr_modification.old_otr_status", "docstring": "The conversation's old OTR status.\n\n May be either ``OFF_THE_RECORD_STATUS_OFF_THE_RECORD`` or\n ``OFF_THE_RECORD_STATUS_ON_THE_RECORD``.", "id": "f10039:c3:m1"} {"signature": "@propertydef new_name(self):", "body": "return self._event.conversation_rename.new_name", "docstring": "The conversation's new name (:class:`str`).\n\n May be an empty string if the conversation's name was cleared.", "id": "f10039:c4:m0"} {"signature": "@propertydef old_name(self):", "body": "return self._event.conversation_rename.old_name", "docstring": "The conversation's old name (:class:`str`).\n\n May be an empty string if the conversation had no previous name.", "id": "f10039:c4:m1"} {"signature": "@propertydef type_(self):", "body": "return self._event.membership_change.type", "docstring": "The type of membership change.\n\n May be either ``MEMBERSHIP_CHANGE_TYPE_JOIN`` or\n ``MEMBERSHIP_CHANGE_TYPE_LEAVE``.", "id": "f10039:c5:m0"} {"signature": "@propertydef participant_ids(self):", "body": "return [user.UserID(chat_id=id_.chat_id, gaia_id=id_.gaia_id)for id_ in self._event.membership_change.participant_ids]", "docstring": ":class:`~hangups.user.UserID` of users involved (:class:`list`).", "id": "f10039:c5:m1"} {"signature": "@propertydef event_type(self):", "body": "return self._event.hangout_event.event_type", "docstring": "The Hangout event type.\n\n May be one of ``HANGOUT_EVENT_TYPE_START``, ``HANGOUT_EVENT_TYPE_END``,\n ``HANGOUT_EVENT_TYPE_JOIN``, ``HANGOUT_EVENT_TYPE_LEAVE``,\n ``HANGOUT_EVENT_TYPE_COMING_SOON``, or ``HANGOUT_EVENT_TYPE_ONGOING``.", "id": "f10039:c6:m0"} {"signature": "@propertydef new_status(self):", "body": "return self._event.group_link_sharing_modification.new_status", "docstring": "The new group link sharing status.\n\n May be either ``GROUP_LINK_SHARING_STATUS_ON`` or\n ``GROUP_LINK_SHARING_STATUS_OFF``.", "id": "f10039:c7:m0"} {"signature": "def _get_authorization_headers(sapisid_cookie):", "body": "time_msec = int(time.time() * )auth_string = ''.format(time_msec, sapisid_cookie, ORIGIN_URL)auth_hash = hashlib.sha1(auth_string.encode()).hexdigest()sapisidhash = ''.format(time_msec, auth_hash)return {'': sapisidhash,'': ORIGIN_URL,'': '',}", "docstring": "Return authorization headers for API request.", "id": "f10041:m0"} {"signature": "async def fetch(self, method, url, params=None, headers=None, data=None):", "body": "logger.debug('', method, url, data)for retry_num in range(MAX_RETRIES):try:async with self.fetch_raw(method, url, params=params,headers=headers, data=data) as res:async with async_timeout.timeout(REQUEST_TIMEOUT):body = await res.read()logger.debug('',res.status, res.reason, body)except asyncio.TimeoutError:error_msg = ''except aiohttp.ServerDisconnectedError as err:error_msg = ''.format(err)except (aiohttp.ClientError, ValueError) as err:error_msg = ''.format(err)else:breaklogger.info('', retry_num, error_msg)else:logger.info('', MAX_RETRIES)raise exceptions.NetworkError(error_msg)if res.status != :logger.info('',res.status, res.reason)raise exceptions.NetworkError(''.format(res.status, res.reason))return FetchResponse(res.status, body)", "docstring": "Make an HTTP request.\n\n Automatically uses configured HTTP proxy, and adds Google authorization\n header and cookies.\n\n Failures will be retried MAX_RETRIES times before raising NetworkError.\n\n Args:\n method (str): Request method.\n url (str): Request URL.\n params (dict): (optional) Request query string parameters.\n headers (dict): (optional) Request headers.\n data: (str): (optional) Request body data.\n\n Returns:\n FetchResponse: Response data.\n\n Raises:\n NetworkError: If the request fails.", "id": "f10041:c0:m1"} {"signature": "def fetch_raw(self, method, url, params=None, headers=None, data=None):", "body": "if not urllib.parse.urlparse(url).hostname.endswith(''):raise Exception('')headers = headers or {}headers.update(self._authorization_headers)return self._session.request(method, url, params=params, headers=headers, data=data,proxy=self._proxy)", "docstring": "Make an HTTP request using aiohttp directly.\n\n Automatically uses configured HTTP proxy, and adds Google authorization\n header and cookies.\n\n Args:\n method (str): Request method.\n url (str): Request URL.\n params (dict): (optional) Request query string parameters.\n headers (dict): (optional) Request headers.\n data: (str): (optional) Request body data.\n\n Returns:\n aiohttp._RequestContextManager: ContextManager for a HTTP response.\n\n Raises:\n See ``aiohttp.ClientSession.request``.", "id": "f10041:c0:m2"} {"signature": "async def close(self):", "body": "await self._session.close()", "docstring": "Close the underlying aiohttp.ClientSession.", "id": "f10041:c0:m3"} {"signature": "async def lookup_entities(client, args):", "body": "lookup_spec = _get_lookup_spec(args.entity_identifier)request = hangups.hangouts_pb2.GetEntityByIdRequest(request_header=client.get_request_header(),batch_lookup_spec=[lookup_spec],)res = await client.get_entity_by_id(request)for entity_result in res.entity_result:for entity in entity_result.entity:print(entity)", "docstring": "Search for entities by phone number, email, or gaia_id.", "id": "f10045:m0"} {"signature": "def _get_lookup_spec(identifier):", "body": "if identifier.startswith(''):return hangups.hangouts_pb2.EntityLookupSpec(phone=identifier, create_offnetwork_gaia=True)elif '' in identifier:return hangups.hangouts_pb2.EntityLookupSpec(email=identifier, create_offnetwork_gaia=True)else:return hangups.hangouts_pb2.EntityLookupSpec(gaia_id=identifier)", "docstring": "Return EntityLookupSpec from phone number, email address, or gaia ID.", "id": "f10045:m1"} {"signature": "def run_example(example_coroutine, *extra_args):", "body": "args = _get_parser(extra_args).parse_args()logging.basicConfig(level=logging.DEBUG if args.debug else logging.WARNING)cookies = hangups.auth.get_auth_stdin(args.token_path)client = hangups.Client(cookies)loop = asyncio.get_event_loop()task = asyncio.ensure_future(_async_main(example_coroutine, client, args),loop=loop)try:loop.run_until_complete(task)except KeyboardInterrupt:task.cancel()loop.run_until_complete(task)finally:loop.close()", "docstring": "Run a hangups example coroutine.\n\n Args:\n example_coroutine (coroutine): Coroutine to run with a connected\n hangups client and arguments namespace as arguments.\n extra_args (str): Any extra command line arguments required by the\n example.", "id": "f10052:m0"} {"signature": "def _get_parser(extra_args):", "body": "parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,)dirs = appdirs.AppDirs('', '')default_token_path = os.path.join(dirs.user_cache_dir, '')parser.add_argument('', default=default_token_path,help='')parser.add_argument('', '', action='',help='')for extra_arg in extra_args:parser.add_argument(extra_arg, required=True)return parser", "docstring": "Return ArgumentParser with any extra arguments.", "id": "f10052:m1"} {"signature": "async def _async_main(example_coroutine, client, args):", "body": "task = asyncio.ensure_future(client.connect())on_connect = asyncio.Future()client.on_connect.add_observer(lambda: on_connect.set_result(None))done, _ = await asyncio.wait((on_connect, task), return_when=asyncio.FIRST_COMPLETED)await asyncio.gather(*done)try:await example_coroutine(client, args)except asyncio.CancelledError:passfinally:await client.disconnect()await task", "docstring": "Run the example coroutine.", "id": "f10052:m2"} {"signature": "def print_table(col_tuple, row_tuples):", "body": "col_widths = [max(len(str(row[col])) for row in [col_tuple] + row_tuples)for col in range(len(col_tuple))]format_str = ''.join(''.format(col_width)for col_width in col_widths)header_border = ''.join('' * col_width for col_width in col_widths)print(header_border)print(format_str.format(*col_tuple))print(header_border)for row_tuple in row_tuples:print(format_str.format(*row_tuple))print(header_border)print()", "docstring": "Print column headers and rows as a reStructuredText table.\n\n Args:\n col_tuple: Tuple of column name strings.\n row_tuples: List of tuples containing row data.", "id": "f10057:m0"} {"signature": "def make_subsection(text):", "body": "return ''.format(text, '' * len(text))", "docstring": "Format text as reStructuredText subsection.\n\n Args:\n text: Text string to format.\n\n Returns:\n Formatted text string.", "id": "f10057:m1"} {"signature": "def make_link(text):", "body": "return ''.format(text)", "docstring": "Format text as reStructuredText link.\n\n Args:\n text: Text string to format.\n\n Returns:\n Formatted text string.", "id": "f10057:m2"} {"signature": "def make_code(text):", "body": "return ''.format(text)", "docstring": "Format text as reStructuredText code.\n\n Args:\n text: Text string to format.\n\n Returns:\n Formatted text string.", "id": "f10057:m3"} {"signature": "def make_comment(text):", "body": "return ''.format(text)", "docstring": "Format text as reStructuredText comment.\n\n Args:\n text: Text string to format.\n\n Returns:\n Formatted text string.", "id": "f10057:m4"} {"signature": "def get_comment_from_location(location):", "body": "return textwrap.dedent(location.leading_comments orlocation.trailing_comments)", "docstring": "Return comment text from location.\n\n Args:\n location: descriptor_pb2.SourceCodeInfo.Location instance to get\n comment from.\n\n Returns:\n Comment as string.", "id": "f10057:m5"} {"signature": "def generate_enum_doc(enum_descriptor, locations, path, name_prefix=''):", "body": "print(make_subsection(name_prefix + enum_descriptor.name))location = locations[path]if location.HasField(''):print(textwrap.dedent(location.leading_comments))row_tuples = []for value_index, value in enumerate(enum_descriptor.value):field_location = locations[path + (, value_index)]row_tuples.append((make_code(value.name),value.number,textwrap.fill(get_comment_from_location(field_location), INFINITY),))print_table(('', '', ''), row_tuples)", "docstring": "Generate doc for an enum.\n\n Args:\n enum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum\n to generate docs for.\n locations: Dictionary of location paths tuples to\n descriptor_pb2.SourceCodeInfo.Location instances.\n path: Path tuple to the enum definition.\n name_prefix: Optional prefix for this enum's name.", "id": "f10057:m6"} {"signature": "def generate_message_doc(message_descriptor, locations, path, name_prefix=''):", "body": "prefixed_name = name_prefix + message_descriptor.nameprint(make_subsection(prefixed_name))location = locations[path]if location.HasField(''):print(textwrap.dedent(location.leading_comments))row_tuples = []for field_index, field in enumerate(message_descriptor.field):field_location = locations[path + (, field_index)]if field.type not in [, ]:type_str = TYPE_TO_STR[field.type]else:type_str = make_link(field.type_name.lstrip(''))row_tuples.append((make_code(field.name),field.number,type_str,LABEL_TO_STR[field.label],textwrap.fill(get_comment_from_location(field_location), INFINITY),))print_table(('', '', '', '', ''),row_tuples)nested_types = enumerate(message_descriptor.nested_type)for index, nested_message_desc in nested_types:generate_message_doc(nested_message_desc, locations,path + (, index),name_prefix=prefixed_name + '')for index, nested_enum_desc in enumerate(message_descriptor.enum_type):generate_enum_doc(nested_enum_desc, locations, path + (, index),name_prefix=prefixed_name + '')", "docstring": "Generate docs for message and nested messages and enums.\n\n Args:\n message_descriptor: descriptor_pb2.DescriptorProto instance for message\n to generate docs for.\n locations: Dictionary of location paths tuples to\n descriptor_pb2.SourceCodeInfo.Location instances.\n path: Path tuple to the message definition.\n name_prefix: Optional prefix for this message's name.", "id": "f10057:m7"} {"signature": "def compile_protofile(proto_file_path):", "body": "out_file = tempfile.mkstemp()[]try:subprocess.check_output(['', '','', out_file,proto_file_path])except subprocess.CalledProcessError as e:sys.exit(''.format(e.returncode))return out_file", "docstring": "Compile proto file to descriptor set.\n\n Args:\n proto_file_path: Path to proto file to compile.\n\n Returns:\n Path to file containing compiled descriptor set.\n\n Raises:\n SystemExit if the compilation fails.", "id": "f10057:m8"} {"signature": "def main():", "body": "parser = argparse.ArgumentParser()parser.add_argument('')args = parser.parse_args()out_file = compile_protofile(args.protofilepath)with open(out_file, '') as proto_file:file_descriptor_set = descriptor_pb2.FileDescriptorSet.FromString(proto_file.read())for file_descriptor in file_descriptor_set.file:locations = {}for location in file_descriptor.source_code_info.location:locations[tuple(location.path)] = locationprint(make_comment(''''.format(args.protofilepath)))for index, message_desc in enumerate(file_descriptor.message_type):generate_message_doc(message_desc, locations, (, index))for index, enum_desc in enumerate(file_descriptor.enum_type):generate_enum_doc(enum_desc, locations, (, index))", "docstring": "Parse arguments and print generated documentation to stdout.", "id": "f10057:m9"} {"signature": "@fixturedef post_token_dropbox(dropbox_container, config, post_token):", "body": "from briefkasten import parse_post_tokenreturn dropbox_container.get_dropbox(parse_post_token(post_token,secret=config.registry.settings['']))", "docstring": "returns a dropbox instance matching the given post_token", "id": "f10063:m8"} {"signature": "@view_config(route_name='',request_method='',renderer='')def dropbox_form(request):", "body": "from briefkasten import generate_post_tokentoken = generate_post_token(secret=request.registry.settings[''])return dict(action=request.route_url('', token=token),fileupload_url=request.route_url('', token=token),**defaults(request))", "docstring": "generates a dropbox uid and renders the submission form with a signed version of that id", "id": "f10069:m1"} {"signature": "@view_config(route_name='',accept='',renderer='',request_method='')def dropbox_fileupload(dropbox, request):", "body": "attachment = request.POST['']attached = dropbox.add_attachment(attachment)return dict(files=[dict(name=attached,type=attachment.type,)])", "docstring": "accepts a single file upload and adds it to the dropbox as attachment", "id": "f10069:m2"} {"signature": "@view_config(route_name='',request_method='')def dropbox_submission(dropbox, request):", "body": "try:data = dropbox_schema.deserialize(request.POST)except Exception:return HTTPFound(location=request.route_url(''))dropbox.message = data.get('')if '' in dropbox.settings:dropbox.from_watchdog = is_equal(unicode(dropbox.settings['']),data.pop('', u''))if data.get('') is not None:dropbox.add_attachment(data[''])dropbox.submit()drop_url = request.route_url('', drop_id=dropbox.drop_id)print(\"\" % drop_url)return HTTPFound(location=drop_url)", "docstring": "handles the form submission, redirects to the dropbox's status page.", "id": "f10069:m3"} {"signature": "def sanitize_filename(filename):", "body": "token = generate_drop_id()name, extension = splitext(filename)if extension:return '' % (token, extension)else:return token", "docstring": "preserve the file ending, but replace the name with a random token", "id": "f10070:m1"} {"signature": "def get_dropbox(self, drop_id):", "body": "return Dropbox(self, drop_id=drop_id)", "docstring": "returns the dropbox with the given id, if it does not exist an empty dropbox\n will be created and returned", "id": "f10070:c0:m3"} {"signature": "def __init__(self, container, drop_id, message=None, attachments=None, from_watchdog=False):", "body": "self.drop_id = drop_idself.container = containerself.paths_created = []self.send_attachments = Falseself.fs_path = fs_dropbox_path = join(container.fs_path, drop_id)self.fs_attachment_container = join(self.fs_path, '')self.fs_cleansed_attachment_container = join(self.fs_path, '')self.fs_replies_path = join(self.fs_path, '')self.gpg_context = self.container.gpg_contextself.admins = self.settings['']if not exists(fs_dropbox_path):mkdir(fs_dropbox_path)chmod(fs_dropbox_path, )self.paths_created.append(fs_dropbox_path)self.status = ''self.editor_token = editor_token = generate_drop_id()self._write_message(fs_dropbox_path, '', editor_token)self.from_watchdog = from_watchdogelse:self.editor_token = open(join(self.fs_path, '')).readline()if self.from_watchdog:self.editors = [self.settings['']]else:self.editors = self.settings['']if message is not None:self._write_message(fs_dropbox_path, '', message)if attachments is not None:for attachment in attachments:if attachment is None:continueself.add_attachment(attachment)", "docstring": "the attachments are expected to conform to what the webob library uses for file uploads,\nnamely an instance of `cgi.FieldStorage` with the following attributes:\n - a file handle under the key `file`\n - the name of the file under `filename`", "id": "f10070:c1:m0"} {"signature": "def process(self):", "body": "if self.num_attachments > :self.status = ''fs_dirty_archive = self._create_backup()self._process_attachments()if self.status_int < and not self.send_attachments:self._create_archive()if self.status_int >= and self.status_int < :if '' in self.settings:shutil.move(fs_dirty_archive,'' % (self.container.fs_archive_dirty, self.drop_id))self.status = ''if self.status_int == :if '' in self.settings:shutil.move(fs_dirty_archive,'' % (self.container.fs_archive_dirty, self.drop_id))if self.status_int < or self.status_int == :try:if self._notify_editors() > :if self.status_int < :self.status = ''else:self.status = ''except Exception:import tracebacktb = traceback.format_exc()self.status = '' % tbself.cleanup()return self.status", "docstring": "Calls the external cleanser scripts to (optionally) purge the meta data and then\n send the contents of the dropbox via email.", "id": "f10070:c1:m3"} {"signature": "def cleanup(self):", "body": "try:remove(join(self.fs_path, ''))remove(join(self.fs_path, ''))except OSError:passshutil.rmtree(join(self.fs_path, ''), ignore_errors=True)shutil.rmtree(join(self.fs_path, ''), ignore_errors=True)", "docstring": "ensures that no data leaks from drop after processing by\n removing all data except the status file", "id": "f10070:c1:m4"} {"signature": "def add_reply(self, reply):", "body": "self._write_message(self.fs_replies_path, '', dumps(reply))", "docstring": "Add an editorial reply to the drop box.\n\n :param reply: the message, must conform to :class:`views.DropboxReplySchema`", "id": "f10070:c1:m5"} {"signature": "def _create_encrypted_zip(self, source='', fs_target_dir=None):", "body": "backup_recipients = [r for r in self.editors if checkRecipient(self.gpg_context, r)]if not backup_recipients:self.status = ''return self.statusfs_backup = join(self.fs_path, '' % source)if fs_target_dir is None:fs_backup_pgp = join(self.fs_path, '' % source)else:fs_backup_pgp = join(fs_target_dir, '' % self.drop_id)fs_source = dict(dirty=self.fs_dirty_attachments,clean=self.fs_cleansed_attachments)with ZipFile(fs_backup, '', ZIP_STORED) as backup:if exists(join(self.fs_path, '')):backup.write(join(self.fs_path, ''), arcname='')for fs_attachment in fs_source[source]:backup.write(fs_attachment, arcname=split(fs_attachment)[-])with open(fs_backup, \"\") as backup:self.gpg_context.encrypt_file(backup,backup_recipients,always_trust=True,output=fs_backup_pgp)remove(fs_backup)return fs_backup_pgp", "docstring": "creates a zip file from the drop and encrypts it to the editors.\n the encrypted archive is created inside fs_target_dir", "id": "f10070:c1:m6"} {"signature": "def _create_archive(self):", "body": "self.status = ''return self._create_encrypted_zip(source='', fs_target_dir=self.container.fs_archive_cleansed)", "docstring": "creates an encrypted archive of the dropbox outside of the drop directory.", "id": "f10070:c1:m9"} {"signature": "@propertydef num_attachments(self):", "body": "if exists(self.fs_attachment_container):return len(listdir(self.fs_attachment_container))else:return ", "docstring": "returns the current number of uploaded attachments in the filesystem", "id": "f10070:c1:m11"} {"signature": "@propertydef size_attachments(self):", "body": "total_size = for attachment in self.fs_cleansed_attachments:total_size += stat(attachment).st_sizereturn total_size", "docstring": "returns the number of bytes that the cleansed attachments take up on disk", "id": "f10070:c1:m12"} {"signature": "@propertydef replies(self):", "body": "fs_reply_path = join(self.fs_replies_path, '')if exists(fs_reply_path):return [load(open(fs_reply_path, ''))]else:return []", "docstring": "returns a list of strings", "id": "f10070:c1:m13"} {"signature": "@propertydef message(self):", "body": "try:with open(join(self.fs_path, '')) as message_file:return ''.join([line.decode('') for line in message_file.readlines()])except IOError:return ''", "docstring": "returns the user submitted text", "id": "f10070:c1:m14"} {"signature": "@message.setterdef message(self, newtext):", "body": "self._write_message(self.fs_path, '', newtext)", "docstring": "overwrite the message text. this also updates the corresponding file.", "id": "f10070:c1:m15"} {"signature": "@propertydef status_int(self):", "body": "return int(self.status.split()[])", "docstring": "returns the status as integer, so it can be used in comparisons", "id": "f10070:c1:m19"} {"signature": "@propertydef fs_dirty_attachments(self):", "body": "if exists(self.fs_attachment_container):return [join(self.fs_attachment_container, attachment)for attachment in listdir(self.fs_attachment_container)]else:return []", "docstring": "returns a list of absolute paths to the attachements", "id": "f10070:c1:m24"} {"signature": "@propertydef fs_cleansed_attachments(self):", "body": "if exists(self.fs_cleansed_attachment_container):return [join(self.fs_cleansed_attachment_container, attachment)for attachment in listdir(self.fs_cleansed_attachment_container)]else:return []", "docstring": "returns a list of absolute paths to the cleansed attachements", "id": "f10070:c1:m25"} {"signature": "@fixture()def config(request, settings):", "body": "config = setUp(settings=settings)request.addfinalizer(tearDown)return config", "docstring": "Sets up a Pyramid `Configurator` instance suitable for testing.", "id": "f10071:m4"} {"signature": "@fixturedef app(config):", "body": "from . import configurereturn configure({}, **config.registry.settings).make_wsgi_app()", "docstring": "Returns WSGI application wrapped in WebTest's testing interface.", "id": "f10071:m5"} {"signature": "def setup_smtp_factory(**settings):", "body": "return CustomSMTP(host=settings.get('', ''),port=int(settings.get('', )),user=settings.get(''),password=settings.get(''),timeout=float(settings.get('', )),)", "docstring": "expects a dictionary with 'mail.' keys to create an appropriate smtplib.SMTP instance", "id": "f10072:m0"} {"signature": "def sendMultiPart(smtp, gpg_context, sender, recipients, subject, text, attachments):", "body": "sent = for to in recipients:if not to.startswith(''):uid = '' % toelse:uid = toif not checkRecipient(gpg_context, uid):continuemsg = MIMEMultipart()msg[''] = sendermsg[''] = tomsg[''] = subjectmsg[\"\"] = formatdate(localtime=True)msg.preamble = u''attach = MIMEText(str(gpg_context.encrypt(text.encode(''), uid, always_trust=True)))attach.set_charset('')msg.attach(attach)for attachment in attachments:with open(attachment, '') as fp:attach = MIMEBase('', '')attach.set_payload(str(gpg_context.encrypt_file(fp, uid, always_trust=True)))attach.add_header('', '', filename=basename('' % attachment))msg.attach(attach)smtp.begin()smtp.sendmail(sender, to, msg.as_string())smtp.quit()sent += return sent", "docstring": "a helper method that composes and sends an email with attachments\n requires a pre-configured smtplib.SMTP instance", "id": "f10072:m2"} {"signature": "def begin(self):", "body": "self.connect(self.host, self.port)if self.user:self.starttls()self.login(self.user, self.password)", "docstring": "connects and optionally authenticates a connection.", "id": "f10072:c0:m1"} {"signature": "def generate_post_token(secret):", "body": "return URLSafeTimedSerializer(secret, salt=u'').dumps(generate_drop_id())", "docstring": "returns a URL safe, signed token that contains a UUID", "id": "f10073:m0"} {"signature": "def dropbox_post_factory(request):", "body": "try:max_age = int(request.registry.settings.get(''))except Exception:max_age = try:drop_id = parse_post_token(token=request.matchdict[''],secret=request.registry.settings[''],max_age=max_age)except SignatureExpired:raise HTTPGone('')except Exception: raise HTTPNotFound('')dropbox = request.registry.settings[''].get_dropbox(drop_id)if dropbox.status_int >= :raise HTTPGone('')return dropbox", "docstring": "receives a UUID via the request and returns either a fresh or an existing dropbox\n for it", "id": "f10073:m2"} {"signature": "def dropbox_factory(request):", "body": "try:return request.registry.settings[''].get_dropbox(request.matchdict[''])except KeyError:raise HTTPNotFound('')", "docstring": "expects the id of an existing dropbox and returns its instance", "id": "f10073:m3"} {"signature": "def is_equal(a, b):", "body": "if len(a) != len(b):return Falseresult = for x, y in zip(a, b):result |= ord(x) ^ ord(y)return result == ", "docstring": "a constant time comparison implementation taken from\n http://codahale.com/a-lesson-in-timing-attacks/ and\n Django's `util` module https://github.com/django/django/blob/master/django/utils/crypto.py#L82", "id": "f10073:m4"} {"signature": "def dropbox_editor_factory(request):", "body": "dropbox = dropbox_factory(request)if is_equal(dropbox.editor_token, request.matchdict[''].encode('')):return dropboxelse:raise HTTPNotFound('')", "docstring": "this factory also requires the editor token", "id": "f10073:m5"} {"signature": "def german_locale(request):", "body": "return ''", "docstring": "a 'negotiator' that always returns german", "id": "f10073:m6"} {"signature": "def main(global_config, **settings):", "body": "return configure(global_config, **settings).make_wsgi_app()", "docstring": "Configure and create the main application.", "id": "f10073:m8"} {"signature": "@fab.taskdef download_poudriere_assets():", "body": "download_distfiles()download_packages()", "docstring": "download ports tree, distfiles, and packages from poudriere", "id": "f10074:m6"} {"signature": "@fab.taskdef upload_poudriere_assets():", "body": "upload_distfiles()upload_packages()", "docstring": "upload local ports tree, distfiles, and packages from poudriere", "id": "f10074:m7"} {"signature": "@taskdef upload_theme():", "body": "get_vars()with fab.settings():local_theme_path = path.abspath(path.join(fab.env[''],fab.env.instance.config['']))rsync('','','' % local_theme_path,''.format(**AV))briefkasten_ctl('')", "docstring": "upload and/or update the theme with the current git state", "id": "f10075:m1"} {"signature": "@taskdef upload_pgp_keys():", "body": "get_vars()upload_target = ''with fab.settings(fab.hide('')):fab.run('' % upload_target)fab.run('' % upload_target)local_key_path = path.join(fab.env[''], fab.env.instance.config[''])remote_key_path = ''.format(**AV)rsync('', local_key_path, '' % upload_target)fab.run('' % (AV[''], remote_key_path))fab.run('' % remote_key_path)with fab.shell_env(GNUPGHOME=remote_key_path):fab.sudo('''''' % upload_target,user=AV[''], shell_escape=False)fab.run('' % upload_target)", "docstring": "upload and/or update the PGP keys for editors, import them into PGP", "id": "f10075:m2"} {"signature": "@taskdef upload_backend(index='', user=None):", "body": "get_vars()use_devpi(index=index)with fab.lcd(''):fab.local('')", "docstring": "Build the backend and upload it to the remote server at the given index", "id": "f10075:m3"} {"signature": "@taskdef update_backend(use_pypi=False, index='', build=True, user=None, version=None):", "body": "get_vars()if value_asbool(build):upload_backend(index=index, user=user)with fab.cd(''.format(**AV)):if value_asbool(use_pypi):command = ''else:command = ''.format(index=index,user=user,**AV)if version:command = '' % (command, version)fab.sudo(command)briefkasten_ctl('')", "docstring": "Install the backend from the given devpi index at the given version on the target host and restart the service.\n\nIf version is None, it defaults to the latest version\n\nOptionally, build and upload the application first from local sources. This requires a\nfull backend development environment on the machine running this command (pyramid etc.)", "id": "f10075:m5"} {"signature": "@taskdef reset_cleansers(confirm=True):", "body": "if value_asbool(confirm) and not yesno(\"\"\"\"\"\"):exit(\"\")get_vars()cleanser_count = AV['']fab.run('')for cleanser_index in range(cleanser_count):cindex = ''.format(cleanser_index + )fab.run(''.format(cindex=cindex))with fab.warn_only():fab.run(''.format(cindex=cindex))fab.run(''.format(cindex=cindex))fab.run(''.format(cindex=cindex))fab.run(''.format(cindex=cindex))with fab.warn_only():fab.run('')fab.run('')fab.run('')fab.run('')", "docstring": "destroys all cleanser slaves and their rollback snapshots, as well as the initial master\n snapshot - this allows re-running the jailhost deployment to recreate fresh cleansers.", "id": "f10077:m2"} {"signature": "@taskdef reset_jails(confirm=True, keep_cleanser_master=True):", "body": "if value_asbool(confirm) and not yesno(\"\"\"\"\"\"):exit(\"\")reset_cleansers(confirm=False)jails = ['', '', '']if not value_asbool(keep_cleanser_master):jails.append('')with fab.warn_only():for jail in jails:fab.run(''.format(jail=jail))fab.run('')", "docstring": "stops, deletes and re-creates all jails.\n since the cleanser master is rather large, that one is omitted by default.", "id": "f10077:m3"} {"signature": "def parse_command_line(self, argv=None):", "body": "conflicting_flags = set(['', '', ''])if len(conflicting_flags.intersection(set(argv))) > :raise serverextensions.ArgumentConflict('')return super(ToggleJupyterTensorboardApp,self).parse_command_line(argv)", "docstring": "Overriden to check for conflicting flags\nSince notebook version doesn't do it well (or, indeed, at all)", "id": "f10083:c0:m0"} {"signature": "def start(self):", "body": "if self.extra_args:sys.exit(''.format(self.name))else:if self._toggle_value:nbextensions.install_nbextension_python(_pkg_name, overwrite=True, symlink=False,user=self.user, sys_prefix=self.sys_prefix, prefix=None,nbextensions_dir=None, logger=None)else:nbextensions.uninstall_nbextension_python(_pkg_name, user=self.user, sys_prefix=self.sys_prefix,prefix=None, nbextensions_dir=None, logger=None)self.toggle_nbextension_python(_pkg_name)self.toggle_server_extension_python(_pkg_name)", "docstring": "Perform the App's actions as configured.", "id": "f10083:c0:m3"} {"signature": "def start(self):", "body": "super(JupyterTensorboardApp, self).start()subcmds = \"\".join(sorted(self.subcommands))sys.exit(\"\" % subcmds)", "docstring": "Perform the App's actions as configured", "id": "f10083:c3:m0"} {"signature": "def set(key, val):", "body": "global _opts_opts[key] = val", "docstring": "Set key to value.", "id": "f10088:m0"} {"signature": "def _pybossa_req(method, domain, id=None, payload=None, params={},headers={'': ''},files=None):", "body": "url = _opts[''] + '' + domainif id is not None:url += '' + str(id)if '' in _opts:params[''] = _opts['']if method == '':r = requests.get(url, params=params)elif method == '':if files is None and headers[''] == '':r = requests.post(url, params=params, headers=headers,data=json.dumps(payload))else:r = requests.post(url, params=params, files=files, data=payload)elif method == '':r = requests.put(url, params=params, headers=headers,data=json.dumps(payload))elif method == '':r = requests.delete(url, params=params, headers=headers,data=json.dumps(payload))if r.status_code // == :if r.text and r.text != '':return json.loads(r.text)else:return Trueelse:return json.loads(r.text)", "docstring": "Send a JSON request.\n\nReturns True if everything went well, otherwise it returns the status\ncode of the response.", "id": "f10088:m1"} {"signature": "def get_projects(limit=, offset=, last_id=None):", "body": "if last_id is not None:params = dict(limit=limit, last_id=last_id)else:print(OFFSET_WARNING)params = dict(limit=limit, offset=offset)try:res = _pybossa_req('', '',params=params)if type(res).__name__ == '':return [Project(project) for project in res]else:raise TypeErrorexcept: raise", "docstring": "Return a list of registered projects.\n\n :param limit: Number of returned items, default 100\n :type limit: integer\n :param offset: Offset for the query, default 0\n :type offset: integer\n :param last_id: id of the last project, used for pagination. If provided, offset is ignored\n :type last_id: integer\n :rtype: list\n :returns: A list of PYBOSSA Projects", "id": "f10088:m2"} {"signature": "def get_project(project_id):", "body": "try:res = _pybossa_req('', '', project_id)if res.get(''):return Project(res)else:return resexcept: raise", "docstring": "Return a PYBOSSA Project for the project_id.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :rtype: PYBOSSA Project\n :returns: A PYBOSSA Project object", "id": "f10088:m3"} {"signature": "def find_project(**kwargs):", "body": "try:res = _pybossa_req('', '', params=kwargs)if type(res).__name__ == '':return [Project(project) for project in res]else:return resexcept: raise", "docstring": "Return a list with matching project arguments.\n\n :param kwargs: PYBOSSA Project members\n :rtype: list\n :returns: A list of projects that match the kwargs", "id": "f10088:m4"} {"signature": "def create_project(name, short_name, description):", "body": "try:project = dict(name=name, short_name=short_name,description=description)res = _pybossa_req('', '', payload=project)if res.get(''):return Project(res)else:return resexcept: raise", "docstring": "Create a project.\n\n :param name: PYBOSSA Project Name\n :type name: string\n :param short_name: PYBOSSA Project short name or slug\n :type short_name: string\n :param description: PYBOSSA Project description\n :type decription: string\n :returns: True -- the response status code", "id": "f10088:m5"} {"signature": "def update_project(project):", "body": "try:project_id = project.idproject = _forbidden_attributes(project)res = _pybossa_req('', '', project_id, payload=project.data)if res.get(''):return Project(res)else:return resexcept: raise", "docstring": "Update a project instance.\n\n :param project: PYBOSSA project\n :type project: PYBOSSA Project\n :returns: True -- the response status code", "id": "f10088:m6"} {"signature": "def delete_project(project_id):", "body": "try:res = _pybossa_req('', '', project_id)if type(res).__name__ == '':return Trueelse:return resexcept: raise", "docstring": "Delete a Project with id = project_id.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :returns: True -- the response status code", "id": "f10088:m7"} {"signature": "def get_categories(limit=, offset=, last_id=None):", "body": "if last_id is not None:params = dict(limit=limit, last_id=last_id)else:params = dict(limit=limit, offset=offset)print(OFFSET_WARNING)try:res = _pybossa_req('', '',params=params)if type(res).__name__ == '':return [Category(category) for category in res]else:raise TypeErrorexcept:raise", "docstring": "Return a list of registered categories.\n\n :param limit: Number of returned items, default 20\n :type limit: integer\n :param offset: Offset for the query, default 0\n :type offset: integer\n :param last_id: id of the last category, used for pagination. If provided, offset is ignored\n :type last_id: integer\n :rtype: list\n :returns: A list of PYBOSSA Categories", "id": "f10088:m8"} {"signature": "def get_category(category_id):", "body": "try:res = _pybossa_req('', '', category_id)if res.get(''):return Category(res)else:return resexcept: raise", "docstring": "Return a PYBOSSA Category for the category_id.\n\n :param category_id: PYBOSSA Category ID\n :type category_id: integer\n :rtype: PYBOSSA Category\n :returns: A PYBOSSA Category object", "id": "f10088:m9"} {"signature": "def find_category(**kwargs):", "body": "try:res = _pybossa_req('', '', params=kwargs)if type(res).__name__ == '':return [Category(category) for category in res]else:return resexcept: raise", "docstring": "Return a list with matching Category arguments.\n\n :param kwargs: PYBOSSA Category members\n :rtype: list\n :returns: A list of project that match the kwargs", "id": "f10088:m10"} {"signature": "def create_category(name, description):", "body": "try:category = dict(name=name, short_name=name.lower().replace(\"\", \"\"),description=description)res = _pybossa_req('', '', payload=category)if res.get(''):return Category(res)else:return resexcept: raise", "docstring": "Create a Category.\n\n :param name: PYBOSSA Category Name\n :type name: string\n :param description: PYBOSSA Category description\n :type decription: string\n :returns: True -- the response status code", "id": "f10088:m11"} {"signature": "def update_category(category):", "body": "try:res = _pybossa_req('', '',category.id, payload=category.data)if res.get(''):return Category(res)else:return resexcept: raise", "docstring": "Update a Category instance.\n\n :param category: PYBOSSA Category\n :type category: PYBOSSA Category\n :returns: True -- the response status code", "id": "f10088:m12"} {"signature": "def delete_category(category_id):", "body": "try:res = _pybossa_req('', '', category_id)if type(res).__name__ == '':return Trueelse:return resexcept: raise", "docstring": "Delete a Category with id = category_id.\n\n :param category_id: PYBOSSA Category ID\n :type category_id: integer\n :returns: True -- the response status code", "id": "f10088:m13"} {"signature": "def get_tasks(project_id, limit=, offset=, last_id=None):", "body": "if last_id is not None:params = dict(limit=limit, last_id=last_id)else:params = dict(limit=limit, offset=offset)print(OFFSET_WARNING)params[''] = project_idtry:res = _pybossa_req('', '',params=params)if type(res).__name__ == '':return [Task(task) for task in res]else:return resexcept: raise", "docstring": "Return a list of tasks for a given project ID.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :param limit: Number of returned items, default 100\n :type limit: integer\n :param offset: Offset for the query, default 0\n :param last_id: id of the last task, used for pagination. If provided, offset is ignored\n :type last_id: integer\n :type offset: integer\n :returns: True -- the response status code", "id": "f10088:m14"} {"signature": "def find_tasks(project_id, **kwargs):", "body": "try:kwargs[''] = project_idres = _pybossa_req('', '', params=kwargs)if type(res).__name__ == '':return [Task(task) for task in res]else:return resexcept: raise", "docstring": "Return a list of matched tasks for a given project ID.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :param kwargs: PYBOSSA Task members\n :type info: dict\n :rtype: list\n :returns: A list of tasks that match the kwargs", "id": "f10088:m15"} {"signature": "def create_task(project_id, info, n_answers=, priority_0=, quorum=):", "body": "try:task = dict(project_id=project_id,info=info,calibration=,priority_0=priority_0,n_answers=n_answers,quorum=quorum)res = _pybossa_req('', '', payload=task)if res.get(''):return Task(res)else:return resexcept: raise", "docstring": "Create a task for a given project ID.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :param info: PYBOSSA Project info JSON field\n :type info: dict\n :param n_answers: Number of answers or TaskRuns per task, default 30\n :type n_answers: integer\n :param priority_0: Value between 0 and 1 indicating priority of task within\n Project (higher = more important), default 0.0\n :type priority_0: float\n :param quorum: Number of times this task should be done by different users,\n default 0\n :type quorum: integer\n :returns: True -- the response status code", "id": "f10088:m16"} {"signature": "def update_task(task):", "body": "try:task_id = task.idtask = _forbidden_attributes(task)res = _pybossa_req('', '', task_id, payload=task.data)if res.get(''):return Task(res)else:return resexcept: raise", "docstring": "Update a task for a given task ID.\n\n :param task: PYBOSSA task", "id": "f10088:m17"} {"signature": "def delete_task(task_id):", "body": "try:res = _pybossa_req('', '', task_id)if type(res).__name__ == '':return Trueelse:return resexcept: raise", "docstring": "Delete a task for a given task ID.\n\n :param task: PYBOSSA task", "id": "f10088:m18"} {"signature": "def get_taskruns(project_id, limit=, offset=, last_id=None):", "body": "if last_id is not None:params = dict(limit=limit, last_id=last_id)else:params = dict(limit=limit, offset=offset)print(OFFSET_WARNING)params[''] = project_idtry:res = _pybossa_req('', '',params=params)if type(res).__name__ == '':return [TaskRun(taskrun) for taskrun in res]else:raise TypeErrorexcept:raise", "docstring": "Return a list of task runs for a given project ID.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :param limit: Number of returned items, default 100\n :type limit: integer\n :param offset: Offset for the query, default 0\n :type offset: integer\n :param last_id: id of the last taskrun, used for pagination. If provided, offset is ignored\n :type last_id: integer\n :rtype: list\n :returns: A list of task runs for the given project ID", "id": "f10088:m19"} {"signature": "def find_taskruns(project_id, **kwargs):", "body": "try:kwargs[''] = project_idres = _pybossa_req('', '', params=kwargs)if type(res).__name__ == '':return [TaskRun(taskrun) for taskrun in res]else:return resexcept: raise", "docstring": "Return a list of matched task runs for a given project ID.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :param kwargs: PYBOSSA Task Run members\n :rtype: list\n :returns: A List of task runs that match the query members", "id": "f10088:m20"} {"signature": "def delete_taskrun(taskrun_id):", "body": "try:res = _pybossa_req('', '', taskrun_id)if type(res).__name__ == '':return Trueelse:return resexcept: raise", "docstring": "Delete the given taskrun.\n\n :param task: PYBOSSA task", "id": "f10088:m21"} {"signature": "def get_results(project_id, limit=, offset=, last_id=None):", "body": "if last_id is not None:params = dict(limit=limit, last_id=last_id)else:params = dict(limit=limit, offset=offset)print(OFFSET_WARNING)params[''] = project_idtry:res = _pybossa_req('', '',params=params)if type(res).__name__ == '':return [Result(result) for result in res]else:return resexcept: raise", "docstring": "Return a list of results for a given project ID.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :param limit: Number of returned items, default 100\n :type limit: integer\n :param offset: Offset for the query, default 0\n :param last_id: id of the last result, used for pagination. If provided, offset is ignored\n :type last_id: integer\n :type offset: integer\n :returns: True -- the response status code", "id": "f10088:m22"} {"signature": "def find_results(project_id, **kwargs):", "body": "try:kwargs[''] = project_idres = _pybossa_req('', '', params=kwargs)if type(res).__name__ == '':return [Result(result) for result in res]else:return resexcept: raise", "docstring": "Return a list of matched results for a given project ID.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :param kwargs: PYBOSSA Results members\n :type info: dict\n :rtype: list\n :returns: A list of results that match the kwargs", "id": "f10088:m23"} {"signature": "def update_result(result):", "body": "try:result_id = result.idresult = _forbidden_attributes(result)res = _pybossa_req('', '', result_id, payload=result.data)if res.get(''):return Result(res)else:return resexcept: raise", "docstring": "Update a result for a given result ID.\n\n :param result: PYBOSSA result", "id": "f10088:m24"} {"signature": "def _forbidden_attributes(obj):", "body": "for key in list(obj.data.keys()):if key in list(obj.reserved_keys.keys()):obj.data.pop(key)return obj", "docstring": "Return the object without the forbidden attributes.", "id": "f10088:m25"} {"signature": "def create_helpingmaterial(project_id, info, media_url=None, file_path=None):", "body": "try:helping = dict(project_id=project_id,info=info,media_url=None,)if file_path:files = {'': open(file_path, '')}payload = {'': project_id}res = _pybossa_req('', '',payload=payload, files=files)else:res = _pybossa_req('', '', payload=helping)if res.get(''):return HelpingMaterial(res)else:return resexcept: raise", "docstring": "Create a helping material for a given project ID.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :param info: PYBOSSA Helping Material info JSON field\n :type info: dict\n :param media_url: URL for a media file (image, video or audio)\n :type media_url: string\n :param file_path: File path to the local image, video or sound to upload. \n :type file_path: string\n :returns: True -- the response status code", "id": "f10088:m26"} {"signature": "def get_helping_materials(project_id, limit=, offset=, last_id=None):", "body": "if last_id is not None:params = dict(limit=limit, last_id=last_id)else:params = dict(limit=limit, offset=offset)print(OFFSET_WARNING)params[''] = project_idtry:res = _pybossa_req('', '',params=params)if type(res).__name__ == '':return [HelpingMaterial(helping) for helping in res]else:return resexcept: raise", "docstring": "Return a list of helping materials for a given project ID.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :param limit: Number of returned items, default 100\n :type limit: integer\n :param offset: Offset for the query, default 0\n :param last_id: id of the last helping material, used for pagination. If provided, offset is ignored\n :type last_id: integer\n :type offset: integer\n :returns: True -- the response status code", "id": "f10088:m27"} {"signature": "def find_helping_materials(project_id, **kwargs):", "body": "try:kwargs[''] = project_idres = _pybossa_req('', '', params=kwargs)if type(res).__name__ == '':return [HelpingMaterial(helping) for helping in res]else:return resexcept: raise", "docstring": "Return a list of matched helping materials for a given project ID.\n\n :param project_id: PYBOSSA Project ID\n :type project_id: integer\n :param kwargs: PYBOSSA HelpingMaterial members\n :type info: dict\n :rtype: list\n :returns: A list of helping materials that match the kwargs", "id": "f10088:m28"} {"signature": "def update_helping_material(helpingmaterial):", "body": "try:helpingmaterial_id = helpingmaterial.idhelpingmaterial = _forbidden_attributes(helpingmaterial)res = _pybossa_req('', '',helpingmaterial_id, payload=helpingmaterial.data)if res.get(''):return HelpingMaterial(res)else:return resexcept: raise", "docstring": "Update a helping material for a given helping material ID.\n\n :param helpingmaterial: PYBOSSA helping material", "id": "f10088:m29"} {"signature": "def __init__(self, data):", "body": "self.__dict__[''] = data", "docstring": "Init method.", "id": "f10088:c0:m0"} {"signature": "def __getattr__(self, name):", "body": "data = self.__dict__['']if name == '':return dataif name in data:return data[name]raise AttributeError('' + name)", "docstring": "Get attribute.", "id": "f10088:c0:m1"} {"signature": "def __setattr__(self, name, value):", "body": "data = self.__dict__['']if name == '':self.__dict__[''] = valuereturn Trueif name in data:data[name] = valuereturn Trueraise AttributeError('' + name)", "docstring": "Set attribute.", "id": "f10088:c0:m2"} {"signature": "def __repr__(self): ", "body": "tmp = '' + self.short_name + '' + str(self.id) + ''return tmp", "docstring": "Return a representation.", "id": "f10088:c1:m0"} {"signature": "def __repr__(self): ", "body": "tmp = ('' + self.short_name + ''+ str(self.id) + '')return tmp", "docstring": "Return a representation.", "id": "f10088:c2:m0"} {"signature": "def __repr__(self): ", "body": "return '' + str(self.id) + ''", "docstring": "Return a represenation.", "id": "f10088:c3:m0"} {"signature": "def __repr__(self): ", "body": "return '' + str(self.id) + ''", "docstring": "Return representation.", "id": "f10088:c4:m0"} {"signature": "def __repr__(self): ", "body": "return '' + str(self.id) + ''", "docstring": "Return representation.", "id": "f10088:c5:m0"} {"signature": "def __repr__(self): ", "body": "return '' + str(self.id) + ''", "docstring": "Return representation.", "id": "f10088:c6:m0"} {"signature": "def get_keywords():", "body": "git_refnames = \"\"git_full = \"\"keywords = {\"\": git_refnames, \"\": git_full}return keywords", "docstring": "Get the keywords needed to look up the version information.", "id": "f10100:m0"} {"signature": "def get_config():", "body": "cfg = VersioneerConfig()cfg.VCS = \"\"cfg.style = \"\"cfg.tag_prefix = \"\"cfg.parentdir_prefix = \"\"cfg.versionfile_source = \"\"cfg.verbose = Falsereturn cfg", "docstring": "Create, populate and return the VersioneerConfig() object.", "id": "f10100:m1"} {"signature": "def register_vcs_handler(vcs, method): ", "body": "def decorate(f):\"\"\"\"\"\"if vcs not in HANDLERS:HANDLERS[vcs] = {}HANDLERS[vcs][method] = freturn freturn decorate", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f10100:m2"} {"signature": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):", "body": "assert isinstance(commands, list)p = Nonefor c in commands:try:dispcmd = str([c] + args)p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,stderr=(subprocess.PIPE if hide_stderrelse None))breakexcept EnvironmentError:e = sys.exc_info()[]if e.errno == errno.ENOENT:continueif verbose:print(\"\" % dispcmd)print(e)return Noneelse:if verbose:print(\"\" % (commands,))return Nonestdout = p.communicate()[].strip()if sys.version_info[] >= :stdout = stdout.decode()if p.returncode != :if verbose:print(\"\" % dispcmd)return Nonereturn stdout", "docstring": "Call the given command(s).", "id": "f10100:m3"} {"signature": "def versions_from_parentdir(parentdir_prefix, root, verbose):", "body": "dirname = os.path.basename(root)if not dirname.startswith(parentdir_prefix):if verbose:print(\"\"\"\" % (root, dirname, parentdir_prefix))raise NotThisMethod(\"\")return {\"\": dirname[len(parentdir_prefix):],\"\": None,\"\": False, \"\": None}", "docstring": "Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes\n both the project name and a version string.", "id": "f10100:m4"} {"signature": "@register_vcs_handler(\"\", \"\")def git_get_keywords(versionfile_abs):", "body": "keywords = {}try:f = open(versionfile_abs, \"\")for line in f.readlines():if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()f.close()except EnvironmentError:passreturn keywords", "docstring": "Extract version information from the given file.", "id": "f10100:m5"} {"signature": "@register_vcs_handler(\"\", \"\")def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:raise NotThisMethod(\"\")refnames = keywords[\"\"].strip()if refnames.startswith(\"\"):if verbose:print(\"\")raise NotThisMethod(\"\")refs = set([r.strip() for r in refnames.strip(\"\").split(\"\")])TAG = \"\"tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])if not tags:tags = set([r for r in refs if re.search(r'', r)])if verbose:print(\"\" % \"\".join(refs-tags))if verbose:print(\"\" % \"\".join(sorted(tags)))for ref in sorted(tags):if ref.startswith(tag_prefix):r = ref[len(tag_prefix):]if verbose:print(\"\" % r)return {\"\": r,\"\": keywords[\"\"].strip(),\"\": False, \"\": None}if verbose:print(\"\")return {\"\": \"\",\"\": keywords[\"\"].strip(),\"\": False, \"\": \"\"}", "docstring": "Get version information from git keywords.", "id": "f10100:m6"} {"signature": "@register_vcs_handler(\"\", \"\")def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):", "body": "if not os.path.exists(os.path.join(root, \"\")):if verbose:print(\"\" % root)raise NotThisMethod(\"\")GITS = [\"\"]if sys.platform == \"\":GITS = [\"\", \"\"]describe_out = run_command(GITS, [\"\", \"\", \"\",\"\", \"\",\"\", \"\" % tag_prefix],cwd=root)if describe_out is None:raise NotThisMethod(\"\")describe_out = describe_out.strip()full_out = run_command(GITS, [\"\", \"\"], cwd=root)if full_out is None:raise NotThisMethod(\"\")full_out = full_out.strip()pieces = {}pieces[\"\"] = full_outpieces[\"\"] = full_out[:] pieces[\"\"] = Nonegit_describe = describe_outdirty = git_describe.endswith(\"\")pieces[\"\"] = dirtyif dirty:git_describe = git_describe[:git_describe.rindex(\"\")]if \"\" in git_describe:mo = re.search(r'', git_describe)if not mo:pieces[\"\"] = (\"\"% describe_out)return piecesfull_tag = mo.group()if not full_tag.startswith(tag_prefix):if verbose:fmt = \"\"print(fmt % (full_tag, tag_prefix))pieces[\"\"] = (\"\"% (full_tag, tag_prefix))return piecespieces[\"\"] = full_tag[len(tag_prefix):]pieces[\"\"] = int(mo.group())pieces[\"\"] = mo.group()else:pieces[\"\"] = Nonecount_out = run_command(GITS, [\"\", \"\", \"\"],cwd=root)pieces[\"\"] = int(count_out) return pieces", "docstring": "Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.", "id": "f10100:m7"} {"signature": "def plus_or_dot(pieces):", "body": "if \"\" in pieces.get(\"\", \"\"):return \"\"return \"\"", "docstring": "Return a + if we don't already have one, else return a .", "id": "f10100:m8"} {"signature": "def render_pep440(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += plus_or_dot(pieces)rendered += \"\" % (pieces[\"\"], pieces[\"\"])if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % (pieces[\"\"],pieces[\"\"])if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f10100:m9"} {"signature": "def render_pep440_pre(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE", "id": "f10100:m10"} {"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += plus_or_dot(pieces)rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f10100:m11"} {"signature": "def render_pep440_old(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f10100:m12"} {"signature": "def render_git_describe(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f10100:m13"} {"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f10100:m14"} {"signature": "def render(pieces, style):", "body": "if pieces[\"\"]:return {\"\": \"\",\"\": pieces.get(\"\"),\"\": None,\"\": pieces[\"\"]}if not style or style == \"\":style = \"\" if style == \"\":rendered = render_pep440(pieces)elif style == \"\":rendered = render_pep440_pre(pieces)elif style == \"\":rendered = render_pep440_post(pieces)elif style == \"\":rendered = render_pep440_old(pieces)elif style == \"\":rendered = render_git_describe(pieces)elif style == \"\":rendered = render_git_describe_long(pieces)else:raise ValueError(\"\" % style)return {\"\": rendered, \"\": pieces[\"\"],\"\": pieces[\"\"], \"\": None}", "docstring": "Render the given version pieces into the requested style.", "id": "f10100:m15"} {"signature": "def get_versions():", "body": "cfg = get_config()verbose = cfg.verbosetry:return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,verbose)except NotThisMethod:passtry:root = os.path.realpath(__file__)for i in cfg.versionfile_source.split(''):root = os.path.dirname(root)except NameError:return {\"\": \"\", \"\": None,\"\": None,\"\": \"\"}try:pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)return render(pieces, cfg.style)except NotThisMethod:passtry:if cfg.parentdir_prefix:return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)except NotThisMethod:passreturn {\"\": \"\", \"\": None,\"\": None,\"\": \"\"}", "docstring": "Get version information or return default if unable to do so.", "id": "f10100:m16"} {"signature": "def read_stream(schema, stream, *, buffer_size=io.DEFAULT_BUFFER_SIZE):", "body": "reader = _lancaster.Reader(schema)buf = stream.read(buffer_size)remainder = b''while len(buf) > :values, n = reader.read_seq(buf)yield from valuesremainder = buf[n:]buf = stream.read(buffer_size)if len(buf) > and len(remainder) > :ba = bytearray()ba.extend(remainder)ba.extend(buf)buf = memoryview(ba).tobytes()if len(remainder) > :raise EOFError(''''.format(len(remainder)))", "docstring": "Using a schema, deserialize a stream of consecutive Avro values.\n\n :param str schema: json string representing the Avro schema\n :param file-like stream: a buffered stream of binary input\n :param int buffer_size: size of bytes to read from the stream each time\n :return: yields a sequence of python data structures deserialized\n from the stream", "id": "f10101:m0"} {"signature": "def read_stream_tuples(schema, stream, *, buffer_size=io.DEFAULT_BUFFER_SIZE):", "body": "reader = _lancaster.Reader(schema, _get_datetime_flags(schema))buf = stream.read(buffer_size)remainder = b''while len(buf) > :values, n = reader.read_seq_tuples(buf)yield from valuesremainder = buf[n:]buf = stream.read(buffer_size)if len(buf) > and len(remainder) > :ba = bytearray()ba.extend(remainder)ba.extend(buf)buf = memoryview(ba).tobytes()if len(remainder) > :raise EOFError(''''.format(len(remainder)))", "docstring": "Using a schema, deserialize a stream of consecutive Avro values\n into tuples.\n\n This assumes the input is avro records of simple values (numbers,\n strings, etc.).\n\n :param str schema: json string representing the Avro schema, field\n names may include 'is_datetime' boolean fields to force\n decoding long values of epoch nanoseconds into datetime\n objects\n :param file-like stream: a buffered stream of binary input\n :param int buffer_size: size of bytes to read from the stream each time\n :return: yields a sequence of python tuples deserialized from the stream", "id": "f10101:m2"} {"signature": "def get_root():", "body": "root = os.path.realpath(os.path.abspath(os.getcwd()))setup_py = os.path.join(root, \"\")versioneer_py = os.path.join(root, \"\")if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[])))setup_py = os.path.join(root, \"\")versioneer_py = os.path.join(root, \"\")if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):err = (\"\"\"\"\"\"\"\"\"\")raise VersioneerBadRootError(err)try:me = os.path.realpath(os.path.abspath(__file__))if os.path.splitext(me)[] != os.path.splitext(versioneer_py)[]:print(\"\"% (os.path.dirname(me), versioneer_py))except NameError:passreturn root", "docstring": "Get the project root directory.\n\n We require that all commands are run from the project root, i.e. the\n directory that contains setup.py, setup.cfg, and versioneer.py .", "id": "f10103:m0"} {"signature": "def get_config_from_root(root):", "body": "setup_cfg = os.path.join(root, \"\")parser = configparser.SafeConfigParser()with open(setup_cfg, \"\") as f:parser.readfp(f)VCS = parser.get(\"\", \"\") def get(parser, name):if parser.has_option(\"\", name):return parser.get(\"\", name)return Nonecfg = VersioneerConfig()cfg.VCS = VCScfg.style = get(parser, \"\") or \"\"cfg.versionfile_source = get(parser, \"\")cfg.versionfile_build = get(parser, \"\")cfg.tag_prefix = get(parser, \"\")if cfg.tag_prefix in (\"\", ''):cfg.tag_prefix = \"\"cfg.parentdir_prefix = get(parser, \"\")cfg.verbose = get(parser, \"\")return cfg", "docstring": "Read the project setup.cfg file to determine Versioneer config.", "id": "f10103:m1"} {"signature": "def register_vcs_handler(vcs, method): ", "body": "def decorate(f):\"\"\"\"\"\"if vcs not in HANDLERS:HANDLERS[vcs] = {}HANDLERS[vcs][method] = freturn freturn decorate", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f10103:m2"} {"signature": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):", "body": "assert isinstance(commands, list)p = Nonefor c in commands:try:dispcmd = str([c] + args)p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,stderr=(subprocess.PIPE if hide_stderrelse None))breakexcept EnvironmentError:e = sys.exc_info()[]if e.errno == errno.ENOENT:continueif verbose:print(\"\" % dispcmd)print(e)return Noneelse:if verbose:print(\"\" % (commands,))return Nonestdout = p.communicate()[].strip()if sys.version_info[] >= :stdout = stdout.decode()if p.returncode != :if verbose:print(\"\" % dispcmd)return Nonereturn stdout", "docstring": "Call the given command(s).", "id": "f10103:m3"} {"signature": "@register_vcs_handler(\"\", \"\")def git_get_keywords(versionfile_abs):", "body": "keywords = {}try:f = open(versionfile_abs, \"\")for line in f.readlines():if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()f.close()except EnvironmentError:passreturn keywords", "docstring": "Extract version information from the given file.", "id": "f10103:m4"} {"signature": "@register_vcs_handler(\"\", \"\")def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:raise NotThisMethod(\"\")refnames = keywords[\"\"].strip()if refnames.startswith(\"\"):if verbose:print(\"\")raise NotThisMethod(\"\")refs = set([r.strip() for r in refnames.strip(\"\").split(\"\")])TAG = \"\"tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])if not tags:tags = set([r for r in refs if re.search(r'', r)])if verbose:print(\"\" % \"\".join(refs-tags))if verbose:print(\"\" % \"\".join(sorted(tags)))for ref in sorted(tags):if ref.startswith(tag_prefix):r = ref[len(tag_prefix):]if verbose:print(\"\" % r)return {\"\": r,\"\": keywords[\"\"].strip(),\"\": False, \"\": None}if verbose:print(\"\")return {\"\": \"\",\"\": keywords[\"\"].strip(),\"\": False, \"\": \"\"}", "docstring": "Get version information from git keywords.", "id": "f10103:m5"} {"signature": "@register_vcs_handler(\"\", \"\")def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):", "body": "if not os.path.exists(os.path.join(root, \"\")):if verbose:print(\"\" % root)raise NotThisMethod(\"\")GITS = [\"\"]if sys.platform == \"\":GITS = [\"\", \"\"]describe_out = run_command(GITS, [\"\", \"\", \"\",\"\", \"\",\"\", \"\" % tag_prefix],cwd=root)if describe_out is None:raise NotThisMethod(\"\")describe_out = describe_out.strip()full_out = run_command(GITS, [\"\", \"\"], cwd=root)if full_out is None:raise NotThisMethod(\"\")full_out = full_out.strip()pieces = {}pieces[\"\"] = full_outpieces[\"\"] = full_out[:] pieces[\"\"] = Nonegit_describe = describe_outdirty = git_describe.endswith(\"\")pieces[\"\"] = dirtyif dirty:git_describe = git_describe[:git_describe.rindex(\"\")]if \"\" in git_describe:mo = re.search(r'', git_describe)if not mo:pieces[\"\"] = (\"\"% describe_out)return piecesfull_tag = mo.group()if not full_tag.startswith(tag_prefix):if verbose:fmt = \"\"print(fmt % (full_tag, tag_prefix))pieces[\"\"] = (\"\"% (full_tag, tag_prefix))return piecespieces[\"\"] = full_tag[len(tag_prefix):]pieces[\"\"] = int(mo.group())pieces[\"\"] = mo.group()else:pieces[\"\"] = Nonecount_out = run_command(GITS, [\"\", \"\", \"\"],cwd=root)pieces[\"\"] = int(count_out) return pieces", "docstring": "Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.", "id": "f10103:m6"} {"signature": "def do_vcs_install(manifest_in, versionfile_source, ipy):", "body": "GITS = [\"\"]if sys.platform == \"\":GITS = [\"\", \"\"]files = [manifest_in, versionfile_source]if ipy:files.append(ipy)try:me = __file__if me.endswith(\"\") or me.endswith(\"\"):me = os.path.splitext(me)[] + \"\"versioneer_file = os.path.relpath(me)except NameError:versioneer_file = \"\"files.append(versioneer_file)present = Falsetry:f = open(\"\", \"\")for line in f.readlines():if line.strip().startswith(versionfile_source):if \"\" in line.strip().split()[:]:present = Truef.close()except EnvironmentError:passif not present:f = open(\"\", \"\")f.write(\"\" % versionfile_source)f.close()files.append(\"\")run_command(GITS, [\"\", \"\"] + files)", "docstring": "Git-specific installation logic for Versioneer.\n\n For Git, this means creating/changing .gitattributes to mark _version.py\n for export-time keyword substitution.", "id": "f10103:m7"} {"signature": "def versions_from_parentdir(parentdir_prefix, root, verbose):", "body": "dirname = os.path.basename(root)if not dirname.startswith(parentdir_prefix):if verbose:print(\"\"\"\" % (root, dirname, parentdir_prefix))raise NotThisMethod(\"\")return {\"\": dirname[len(parentdir_prefix):],\"\": None,\"\": False, \"\": None}", "docstring": "Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes\n both the project name and a version string.", "id": "f10103:m8"} {"signature": "def versions_from_file(filename):", "body": "try:with open(filename) as f:contents = f.read()except EnvironmentError:raise NotThisMethod(\"\")mo = re.search(r\"\",contents, re.M | re.S)if not mo:raise NotThisMethod(\"\")return json.loads(mo.group())", "docstring": "Try to determine the version from _version.py if present.", "id": "f10103:m9"} {"signature": "def write_to_version_file(filename, versions):", "body": "os.unlink(filename)contents = json.dumps(versions, sort_keys=True,indent=, separators=(\"\", \"\"))with open(filename, \"\") as f:f.write(SHORT_VERSION_PY % contents)print(\"\" % (filename, versions[\"\"]))", "docstring": "Write the given version number to the given _version.py file.", "id": "f10103:m10"} {"signature": "def plus_or_dot(pieces):", "body": "if \"\" in pieces.get(\"\", \"\"):return \"\"return \"\"", "docstring": "Return a + if we don't already have one, else return a .", "id": "f10103:m11"} {"signature": "def render_pep440(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += plus_or_dot(pieces)rendered += \"\" % (pieces[\"\"], pieces[\"\"])if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % (pieces[\"\"],pieces[\"\"])if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f10103:m12"} {"signature": "def render_pep440_pre(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE", "id": "f10103:m13"} {"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += plus_or_dot(pieces)rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f10103:m14"} {"signature": "def render_pep440_old(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f10103:m15"} {"signature": "def render_git_describe(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f10103:m16"} {"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f10103:m17"} {"signature": "def render(pieces, style):", "body": "if pieces[\"\"]:return {\"\": \"\",\"\": pieces.get(\"\"),\"\": None,\"\": pieces[\"\"]}if not style or style == \"\":style = \"\" if style == \"\":rendered = render_pep440(pieces)elif style == \"\":rendered = render_pep440_pre(pieces)elif style == \"\":rendered = render_pep440_post(pieces)elif style == \"\":rendered = render_pep440_old(pieces)elif style == \"\":rendered = render_git_describe(pieces)elif style == \"\":rendered = render_git_describe_long(pieces)else:raise ValueError(\"\" % style)return {\"\": rendered, \"\": pieces[\"\"],\"\": pieces[\"\"], \"\": None}", "docstring": "Render the given version pieces into the requested style.", "id": "f10103:m18"} {"signature": "def get_versions(verbose=False):", "body": "if \"\" in sys.modules:del sys.modules[\"\"]root = get_root()cfg = get_config_from_root(root)assert cfg.VCS is not None, \"\"handlers = HANDLERS.get(cfg.VCS)assert handlers, \"\" % cfg.VCSverbose = verbose or cfg.verboseassert cfg.versionfile_source is not None,\"\"assert cfg.tag_prefix is not None, \"\"versionfile_abs = os.path.join(root, cfg.versionfile_source)get_keywords_f = handlers.get(\"\")from_keywords_f = handlers.get(\"\")if get_keywords_f and from_keywords_f:try:keywords = get_keywords_f(versionfile_abs)ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)if verbose:print(\"\" % ver)return verexcept NotThisMethod:passtry:ver = versions_from_file(versionfile_abs)if verbose:print(\"\" % (versionfile_abs, ver))return verexcept NotThisMethod:passfrom_vcs_f = handlers.get(\"\")if from_vcs_f:try:pieces = from_vcs_f(cfg.tag_prefix, root, verbose)ver = render(pieces, cfg.style)if verbose:print(\"\" % ver)return verexcept NotThisMethod:passtry:if cfg.parentdir_prefix:ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)if verbose:print(\"\" % ver)return verexcept NotThisMethod:passif verbose:print(\"\")return {\"\": \"\", \"\": None,\"\": None, \"\": \"\"}", "docstring": "Get the project version from whatever source is available.\n\n Returns dict with two keys: 'version' and 'full'.", "id": "f10103:m19"} {"signature": "def get_version():", "body": "return get_versions()[\"\"]", "docstring": "Get the short version string for this project.", "id": "f10103:m20"} {"signature": "def get_cmdclass():", "body": "if \"\" in sys.modules:del sys.modules[\"\"]cmds = {}from distutils.core import Commandclass cmd_version(Command):description = \"\"user_options = []boolean_options = []def initialize_options(self):passdef finalize_options(self):passdef run(self):vers = get_versions(verbose=True)print(\"\" % vers[\"\"])print(\"\" % vers.get(\"\"))print(\"\" % vers.get(\"\"))if vers[\"\"]:print(\"\" % vers[\"\"])cmds[\"\"] = cmd_versionif \"\" in sys.modules:from setuptools.command.build_py import build_py as _build_pyelse:from distutils.command.build_py import build_py as _build_pyclass cmd_build_py(_build_py):def run(self):root = get_root()cfg = get_config_from_root(root)versions = get_versions()_build_py.run(self)if cfg.versionfile_build:target_versionfile = os.path.join(self.build_lib,cfg.versionfile_build)print(\"\" % target_versionfile)write_to_version_file(target_versionfile, versions)cmds[\"\"] = cmd_build_pyif \"\" in sys.modules: from cx_Freeze.dist import build_exe as _build_execlass cmd_build_exe(_build_exe):def run(self):root = get_root()cfg = get_config_from_root(root)versions = get_versions()target_versionfile = cfg.versionfile_sourceprint(\"\" % target_versionfile)write_to_version_file(target_versionfile, versions)_build_exe.run(self)os.unlink(target_versionfile)with open(cfg.versionfile_source, \"\") as f:LONG = LONG_VERSION_PY[cfg.VCS]f.write(LONG %{\"\": \"\",\"\": cfg.style,\"\": cfg.tag_prefix,\"\": cfg.parentdir_prefix,\"\": cfg.versionfile_source,})cmds[\"\"] = cmd_build_exedel cmds[\"\"]if \"\" in sys.modules:from setuptools.command.sdist import sdist as _sdistelse:from distutils.command.sdist import sdist as _sdistclass cmd_sdist(_sdist):def run(self):versions = get_versions()self._versioneer_generated_versions = versionsself.distribution.metadata.version = versions[\"\"]return _sdist.run(self)def make_release_tree(self, base_dir, files):root = get_root()cfg = get_config_from_root(root)_sdist.make_release_tree(self, base_dir, files)target_versionfile = os.path.join(base_dir, cfg.versionfile_source)print(\"\" % target_versionfile)write_to_version_file(target_versionfile,self._versioneer_generated_versions)cmds[\"\"] = cmd_sdistreturn cmds", "docstring": "Get the custom setuptools/distutils subclasses used by Versioneer.", "id": "f10103:m21"} {"signature": "def do_setup():", "body": "root = get_root()try:cfg = get_config_from_root(root)except (EnvironmentError, configparser.NoSectionError,configparser.NoOptionError) as e:if isinstance(e, (EnvironmentError, configparser.NoSectionError)):print(\"\",file=sys.stderr)with open(os.path.join(root, \"\"), \"\") as f:f.write(SAMPLE_CONFIG)print(CONFIG_ERROR, file=sys.stderr)return print(\"\" % cfg.versionfile_source)with open(cfg.versionfile_source, \"\") as f:LONG = LONG_VERSION_PY[cfg.VCS]f.write(LONG % {\"\": \"\",\"\": cfg.style,\"\": cfg.tag_prefix,\"\": cfg.parentdir_prefix,\"\": cfg.versionfile_source,})ipy = os.path.join(os.path.dirname(cfg.versionfile_source),\"\")if os.path.exists(ipy):try:with open(ipy, \"\") as f:old = f.read()except EnvironmentError:old = \"\"if INIT_PY_SNIPPET not in old:print(\"\" % ipy)with open(ipy, \"\") as f:f.write(INIT_PY_SNIPPET)else:print(\"\" % ipy)else:print(\"\" % ipy)ipy = Nonemanifest_in = os.path.join(root, \"\")simple_includes = set()try:with open(manifest_in, \"\") as f:for line in f:if line.startswith(\"\"):for include in line.split()[:]:simple_includes.add(include)except EnvironmentError:passif \"\" not in simple_includes:print(\"\")with open(manifest_in, \"\") as f:f.write(\"\")else:print(\"\")if cfg.versionfile_source not in simple_includes:print(\"\" %cfg.versionfile_source)with open(manifest_in, \"\") as f:f.write(\"\" % cfg.versionfile_source)else:print(\"\")do_vcs_install(manifest_in, cfg.versionfile_source, ipy)return ", "docstring": "Main VCS-independent setup function for installing Versioneer.", "id": "f10103:m22"} {"signature": "def scan_setup_py():", "body": "found = set()setters = Falseerrors = with open(\"\", \"\") as f:for line in f.readlines():if \"\" in line:found.add(\"\")if \"\" in line:found.add(\"\")if \"\" in line:found.add(\"\")if \"\" in line:setters = Trueif \"\" in line:setters = Trueif len(found) != :print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")errors += if setters:print(\"\")print(\"\")print(\"\")print(\"\")errors += return errors", "docstring": "Validate the contents of setup.py against Versioneer's expectations.", "id": "f10103:m23"} {"signature": "def parse(self, text):", "body": "return parser.parse(text).parsed", "docstring": "Simple convenience function to unwrap the array of parameters.", "id": "f10106:c0:m0"} {"signature": "def _load_fixture(filename):", "body": "with open(filename, '') as stream:content = stream.read().decode('')data = json.loads(content)session = Session()for item in data:table = Base.metadata.tables[item[''].split('')[-]]item[''][''] = item['']session.connection().execute(table.insert().values(**item['']))session.commit()", "docstring": "Loads the passed fixture into the database following the\ndjango format.", "id": "f10110:m0"} {"signature": "def access_control_headers(self, header):", "body": "response, _ = self.client.options(self.right_path)assert response.status == http.client.OKassert header not in responseresponse, _ = self.client.options(self.right_path,headers={'': self.right_origin,'': self.default_method})assert response.status == http.client.OKassert header not in responseresponse, _ = self.client.options(self.right_path, headers={'': ''})assert response.status == http.client.OKassert header not in responseresponse, _ = self.client.options(self.left_path)assert response.status == http.client.OKassert header not in responseresponse, _ = self.client.options(self.left_path,headers={'': self.left_origin,'': self.default_method})assert response.status == http.client.OKassert header not in response", "docstring": "Test each Access-Control-Allow header here, since they\nall do the same thing.", "id": "f10115:c0:m2"} {"signature": "def parse(text, encoding=''):", "body": "if isinstance(text, six.binary_type):text = text.decode(encoding)return Query(text, split_segments(text))", "docstring": "Parse the querystring into a normalized form.", "id": "f10131:m0"} {"signature": "def split_segments(text, closing_paren=False):", "body": "buf = StringIO()segments = []combinators = []last_group = Falseiterator = iter(text)last_negation = Falsefor character in iterator:if character in COMBINATORS:if last_negation:buf.write(constants.OPERATOR_NEGATION)val = buf.getvalue()reset_stringio(buf)if not last_group and not len(val):raise ValueError('' % character)if len(val):segments.append(parse_segment(val))combinators.append(COMBINATORS[character])elif character == constants.GROUP_BEGIN:if buf.tell():raise ValueError('' % character)seg = split_segments(iterator, True)if last_negation:seg = UnarySegmentCombinator(seg)segments.append(seg)last_group = Truecontinueelif character == constants.GROUP_END:val = buf.getvalue()if not buf.tell() or not closing_paren:raise ValueError('' % character)segments.append(parse_segment(val))return combine(segments, combinators)elif character == constants.OPERATOR_NEGATION and not buf.tell():last_negation = Truecontinueelse:if last_negation:buf.write(constants.OPERATOR_NEGATION)if last_group:raise ValueError('' % character)buf.write(character)last_negation = Falselast_group = Falseelse:if closing_paren:raise ValueError('' % constants.GROUP_END)if not last_group:segments.append(parse_segment(buf.getvalue()))return combine(segments, combinators)", "docstring": "Return objects representing segments.", "id": "f10131:m2"} {"signature": "def parse_directive(key):", "body": "if constants.DIRECTIVE in key:return key.split(constants.DIRECTIVE, )else:return key, None", "docstring": "Takes a key of type (foo:bar) and returns either the key and the\ndirective, or the key and None (for no directive.)", "id": "f10131:m4"} {"signature": "def parse_segment(text):", "body": "if not len(text):return NoopQuerySegment()q = QuerySegment()equalities = zip(constants.OPERATOR_EQUALITIES, itertools.repeat(text))equalities = map(lambda x: (x[], x[].split(x[], )), equalities)equalities = list(filter(lambda x: len(x[]) > , equalities))key_len = len(min((x[][] for x in equalities), key=len))equalities = filter(lambda x: len(x[][]) == key_len, equalities)op, (key, value) = min(equalities, key=lambda x: len(x[][]))key, directive = parse_directive(key)if directive:op = constants.OPERATOR_EQUALITY_FALLBACKq.directive = directivepath = key.split(constants.SEP_PATH)last = path[-]if last.endswith(constants.OPERATOR_NEGATION):last = last[:-]q.negated = not q.negatedif last == constants.PATH_NEGATION:path.pop(-)q.negated = not q.negatedq.values = value.split(constants.SEP_VALUE)if path[-] in constants.OPERATOR_SUFFIXES:if op not in constants.OPERATOR_FALLBACK:raise ValueError('''')q.operator = constants.OPERATOR_SUFFIX_MAP[path[-]]path.pop(-)else:q.operator = constants.OPERATOR_EQUALITY_MAP[op]if not len(path):raise ValueError('')q.path = pathreturn q", "docstring": "we expect foo=bar", "id": "f10131:m5"} {"signature": "def __str__(self):", "body": "o = StringIO()o.write('')if self.negated:o.write('')o.write(''.join(self.path))if self.values:o.write('' % REVERSED_OPERATOR_SUFFIX_MAP[self.operator])o.write(''.join(map(lambda x: \"\".format(str(x)), self.values)))o.write('')return o.getvalue()", "docstring": "Format this query segment in a human-readable representation\nintended for debugging.", "id": "f10131:c1:m2"} {"signature": "def __init__(self, **kwargs):", "body": "self.__dict__.update(kwargs)", "docstring": "Initialize authentication protocol; establish parameters.", "id": "f10134:c0:m0"} {"signature": "def authenticate(self, request):", "body": "return None", "docstring": "Gets the a user if they are authenticated; else None.\n\n @retval False Unable to authenticate.\n @retval None Able to authenticate but failed.\n @retval User object representing the current user.", "id": "f10134:c0:m1"} {"signature": "def unauthenticated(self):", "body": "raise http.exceptions.Forbidden()", "docstring": "Callback that is invoked when after a user is determined to\nbe unauthenticated.", "id": "f10134:c0:m2"} {"signature": "def get_user(self, request, method, *args):", "body": "return None", "docstring": "Callback that is invoked when a user is attempting to be\nauthenticated with a set of credentials.", "id": "f10134:c1:m2"} {"signature": "def can_serialize(self, data=None):", "body": "try:self.serialize(data)return Trueexcept ValueError:return False", "docstring": "Tests this serializer to see if it can serialize.", "id": "f10135:c0:m1"} {"signature": "def serialize(self, data=None):", "body": "if data is not None and self.response is not None:self.response[''] = self.media_types[]self.response.write(data)return data", "docstring": "Transforms the object into an acceptable format for transmission.\n\n@throws ValueError\n To indicate this serializer does not support the encoding of the\n specified object.", "id": "f10135:c0:m2"} {"signature": "def is_accessible(self, user, method, resource):", "body": "return True", "docstring": "Determines the accessibility to a resource endpoint for a particular\nmethod. An inaccessible resource is indistinguishable from a\nnon-existant resource.\n\n@param[in] user\n The user in question that is being checked.\n\n@param[in] method\n The method in question that is being performed (eg. 'GET').\n\n@param[in] resource\n The resource instance that is being authorized.\n\n@returns\n Returns true if the user can access the resource for\n the passed operation; otherwise, false.", "id": "f10140:c0:m0"} {"signature": "def inaccessible(self):", "body": "raise http.exceptions.Forbidden()", "docstring": "Informs the client that the resource is inaccessible.", "id": "f10140:c0:m1"} {"signature": "def is_authorized(self, user, operation, resource, item):", "body": "return True", "docstring": "Determines authroization to a specific resource object.\n\n @param[in] user\n The user in question that is being checked.\n\n @param[in] operation\n The operation in question that is being performed (eg. 'read').\n\n @param[in] resource\n The resource instance that is being authorized.\n\n @param[in] item\n The specific instance of an object returned by a `read` from\n the `resource`.\n\n @returns\n Returns true to indicate authorization or false to indicate\n otherwise.", "id": "f10140:c0:m2"} {"signature": "def unauthorized(self):", "body": "raise http.exceptions.Forbidden()", "docstring": "Informs the client that it is not authrozied for the resource.", "id": "f10140:c0:m3"} {"signature": "def filter(self, user, operation, resource, iterable):", "body": "return iterable", "docstring": "Filters an iterable to contain only the items for which the user\nis authorized to perform the operation on.\n\n@param[in] user\n The user in question that is being checked.\n\n@param[in] operation\n The operation in question that is being performed (eg. 'read').\n\n@param[in] resource\n The resource instance that is being authorized.\n\n@param[in] iterable\n The iterable of objects to be checked. This method is called\n from the model connector so the actual value of this parameter\n depends on the model connector (eg. it may be a django queryset).\n\n@returns\n Returns an iterable containing the remaining objects.", "id": "f10140:c0:m4"} {"signature": "def inaccessible(self):", "body": "raise http.exceptions.NotFound()", "docstring": "Informs the client that the resource is inaccessible.", "id": "f10140:c1:m0"} {"signature": "@staticmethoddef _Header(sequence, name):", "body": "return tuple(sequence._headers.get(name, '').split(''))", "docstring": "Returns the passed header as a tuple.\n\n Implements a facade so that the response headers can override\n this to provide a mutable sequence header.", "id": "f10141:c0:m0"} {"signature": "@staticmethoddef normalize(name):", "body": "return str(string.capwords(name, ''))", "docstring": "Normalizes the case of the passed name to be Http-Header-Case.", "id": "f10141:c0:m2"} {"signature": "@abc.abstractmethoddef __getitem__(self, name):", "body": "", "docstring": "Retrieves a header with the passed name.\n\n @param[in] name\n The case-insensitive name of the header to retrieve.", "id": "f10141:c0:m3"} {"signature": "@abc.abstractmethoddef __len__(self):", "body": "", "docstring": "Retrieves the number of headers in the request.", "id": "f10141:c0:m4"} {"signature": "@abc.abstractmethoddef __iter__(self):", "body": "", "docstring": "Returns an iterable for all headers in the request.", "id": "f10141:c0:m5"} {"signature": "@abc.abstractmethoddef __contains__(self, name):", "body": "", "docstring": "Tests if the passed header exists in the request.", "id": "f10141:c0:m6"} {"signature": "def index(self, name, value):", "body": "return self._sequence[name].index(value)", "docstring": "Return the index in the list of the first item whose value is x in\nthe values of the named header.", "id": "f10141:c0:m7"} {"signature": "def count(self, name, value):", "body": "return self._sequence[name].count(value)", "docstring": "Return the number of times a value appears in the list of the values\nof the named header.", "id": "f10141:c0:m8"} {"signature": "def getlist(self, name):", "body": "return self._sequence[name]", "docstring": "Retrieves the passed header as a tuple of its values.", "id": "f10141:c0:m9"} {"signature": "def bind(self, resource):", "body": "self._resource = weakref.proxy(resource)", "docstring": "Binds this to the passed resource object.\n\n This is used so that the request and response classes can access\n metadata and configuration on the resource handling this request\n so helper methods on the request and response like `serialize` work\n in full knowledge of configuration supplied to the resource.", "id": "f10141:c1:m1"} {"signature": "@propertydef protocol(self):", "body": "raise NotImplementedError()", "docstring": "Retrieves the upper-cased version of the protocol (eg. HTTP).", "id": "f10141:c1:m3"} {"signature": "@propertydef host(self):", "body": "return self.headers.get('') or ''", "docstring": "Retrieves the hostname, normally from the `Host` header.", "id": "f10141:c1:m4"} {"signature": "@propertydef mount_point(self):", "body": "raise NotImplementedError()", "docstring": "Retrieves the mount point portion of the path of this request.", "id": "f10141:c1:m5"} {"signature": "@propertydef query(self):", "body": "raise NotImplementedError()", "docstring": "Retrieves the text after the first ? in the path.", "id": "f10141:c1:m6"} {"signature": "@propertydef uri(self):", "body": "raise NotImplementedError()", "docstring": "Returns the complete URI of the request.", "id": "f10141:c1:m7"} {"signature": "@propertydef encoding(self):", "body": "content_type = self.headers.get('')if content_type:ptype, _, params = mimeparse.parse_mime_type(content_type)default = '' if ptype == '' else ''return params.get('', default)", "docstring": "The name of the encoding used to decode the stream\u2019s bytes\ninto strings, and to encode strings into bytes.\n\nReads the charset value from the `Content-Type` header, if available;\nelse, returns nothing.", "id": "f10141:c1:m8"} {"signature": "def _read(self):", "body": "return None", "docstring": "Read and return the request data.\n\n @note Connectors should override this method.", "id": "f10141:c1:m9"} {"signature": "def read(self, deserialize=False, format=None):", "body": "if deserialize:data, _ = self.deserialize(format=format)return datacontent = self._read()if not content:return ''if type(content) is six.binary_type:content = content.decode(self.encoding)return content", "docstring": "Read and return the request data.\n\n @param[in] deserialize\n True to deserialize the resultant text using a determiend format\n or the passed format.\n\n @param[in] format\n A specific format to deserialize in; if provided, no detection is\n done. If not provided, the content-type header is looked at to\n determine an appropriate deserializer.", "id": "f10141:c1:m10"} {"signature": "def deserialize(self, format=None):", "body": "return self._resource.deserialize(self, format=format)", "docstring": "Deserializes the request body using a determined deserializer.\n\n @param[in] format\n A specific format to deserialize in; if provided, no detection is\n done. If not provided, the content-type header is looked at to\n determine an appropriate deserializer.\n\n @returns\n A tuple of the deserialized data and an instance of the\n deserializer used.", "id": "f10141:c1:m11"} {"signature": "def __len__(self):", "body": "length = self.headers.get('')return int(length) if length else ", "docstring": "Returns the length of the request body, if known.", "id": "f10141:c1:m12"} {"signature": "def __getitem__(self, name):", "body": "return self.headers[name]", "docstring": "Retrieves a header with the passed name.", "id": "f10141:c1:m13"} {"signature": "def get(self, name, default=None):", "body": "return self.headers.get(name, default)", "docstring": "Retrieves a header with the passed name.", "id": "f10141:c1:m14"} {"signature": "def getlist(self, name):", "body": "return self.headers.getlist(name)", "docstring": "Retrieves a the multi-valued list of the header with\nthe passed name.", "id": "f10141:c1:m15"} {"signature": "def __contains__(self, name):", "body": "return name in self.headers", "docstring": "Tests if the passed header exists in the request.", "id": "f10141:c1:m16"} {"signature": "def keys(self):", "body": "return self.headers.keys()", "docstring": "Return a new view of the header names.", "id": "f10141:c1:m17"} {"signature": "def values(self):", "body": "return self.headers.values()", "docstring": "Return a new view of the header values.", "id": "f10141:c1:m18"} {"signature": "def items(self):", "body": "return self.headers.items()", "docstring": "Return a new view of the headers.", "id": "f10141:c1:m19"} {"signature": "@abc.abstractmethoddef __setitem__(self, name, value):", "body": "", "docstring": "Stores a header with the passed name.\n\n @param[in] name\n The name to store the header as. This is passed through\n `Headers.normalize` before storing on the response.\n\n @param[in] value\n The value to store for the header; for multi-valued headers,\n this can be a comma-separated list of values.", "id": "f10144:c0:m0"} {"signature": "@abc.abstractmethoddef __delitem__(self, name):", "body": "", "docstring": "Removes a header with the passed name.\n\n @param[in] name\n The case-insensitive name of the header to remove\n from the response.", "id": "f10144:c0:m1"} {"signature": "def append(self, name, value):", "body": "return self._sequence[name].append(value)", "docstring": "Add a value to the end of the list for the named header.", "id": "f10144:c0:m2"} {"signature": "def extend(self, name, values):", "body": "return self._sequence[name].extend(values)", "docstring": "Extend the list for the named header by appending all values.", "id": "f10144:c0:m3"} {"signature": "def insert(self, name, index, value):", "body": "return self._sequence[name].insert(index, value)", "docstring": "Insert a value at the passed index in the named header.", "id": "f10144:c0:m4"} {"signature": "def remove(self, name, value):", "body": "return self._sequence[name].remove(value)", "docstring": "Remove the first item with the passed value from the\nlist for the named header.", "id": "f10144:c0:m5"} {"signature": "def popvalue(self, name, index=None):", "body": "return self._sequence[name].pop(index)", "docstring": "Remove the item at the given position in the named header list.", "id": "f10144:c0:m6"} {"signature": "def sort(self, name):", "body": "return self._sequence[name].sort()", "docstring": "Sort the items of the list, in place.", "id": "f10144:c0:m7"} {"signature": "def reverse(self, name):", "body": "return self._sequence[name].reverse()", "docstring": "Reverse the elements of the list, in place.", "id": "f10144:c0:m8"} {"signature": "def require_not_closed(self):", "body": "if self.closed:raise exceptions.InvalidOperation('')", "docstring": "Raises an exception if the response is closed.", "id": "f10144:c1:m1"} {"signature": "def require_open(self):", "body": "self.require_not_closed()if self.streaming:raise exceptions.InvalidOperation('')", "docstring": "Raises an exception if the response is not open.", "id": "f10144:c1:m2"} {"signature": "@propertydef status(self):", "body": "raise NotImplementedError()", "docstring": "Gets the status code of the response.", "id": "f10144:c1:m3"} {"signature": "@status.setterdef status(self, value):", "body": "raise NotImplementedError()", "docstring": "Sets the status code of the response.", "id": "f10144:c1:m4"} {"signature": "@propertydef body(self):", "body": "return self._body", "docstring": "Returns the current value of the response body.", "id": "f10144:c1:m5"} {"signature": "@body.setterdef body(self, value):", "body": "self._body = value", "docstring": "Sets the response body to the passed value.\n\n @note\n During asynchronous or streaming responses, remember that\n the `body` property refers to the portion of the response *not*\n sent to the client.", "id": "f10144:c1:m6"} {"signature": "def bind(self, resource):", "body": "self._resource = weakref.proxy(resource)", "docstring": "Binds this to the passed resource object.\n\n @sa armet.http.request.Request.bind", "id": "f10144:c1:m7"} {"signature": "@abc.abstractmethoddef close(self):", "body": "self.require_not_closed()if not self.streaming or self.asynchronous:if '' not in self.headers:self.headers[''] = self.tell()self.flush()self._closed = True", "docstring": "Flush and close the stream.\n\n This is called automatically by the base resource on resources\n unless the resource is operating asynchronously; in that case,\n this method MUST be called in order to signal the end of the request.\n If not the request will simply hang as it is waiting for some\n thread to tell it to return to the client.", "id": "f10144:c1:m11"} {"signature": "@propertydef closed(self):", "body": "return self._closed", "docstring": "True if the stream is closed.", "id": "f10144:c1:m12"} {"signature": "def tell(self):", "body": "return self._length", "docstring": "Return the current stream position.", "id": "f10144:c1:m13"} {"signature": "def write(self, chunk, serialize=False, format=None):", "body": "self.require_not_closed()if chunk is None:returnif serialize or format is not None:self.serialize(chunk, format=format)return if type(chunk) is six.binary_type:self._length += len(chunk)self._stream.write(chunk)elif isinstance(chunk, six.string_types):encoding = self.encodingif encoding is not None:chunk = chunk.encode(encoding)else:raise exceptions.InvalidOperation('')self._length += len(chunk)self._stream.write(chunk)elif isinstance(chunk, collections.Iterable):for section in chunk:self.write(section)else:raise exceptions.InvalidOperation('')", "docstring": "Writes the given chunk to the output buffer.\n\n @param[in] chunk\n Either a byte array, a unicode string, or a generator. If `chunk`\n is a generator then calling `self.write()` is\n equivalent to:\n\n @code\n for x in :\n self.write(x)\n self.flush()\n @endcode\n\n @param[in] serialize\n True to serialize the lines in a determined serializer.\n\n @param[in] format\n A specific format to serialize in; if provided, no detection is\n done. If not provided, the accept header (as well as the URL\n extension) is looked at to determine an appropriate serializer.", "id": "f10144:c1:m14"} {"signature": "def serialize(self, data, format=None):", "body": "return self._resource.serialize(data, response=self, format=format)", "docstring": "Serializes the data into this response using a serializer.\n\n @param[in] data\n The data to be serialized.\n\n @param[in] format\n A specific format to serialize in; if provided, no detection is\n done. If not provided, the accept header (as well as the URL\n extension) is looked at to determine an appropriate serializer.\n\n @returns\n A tuple of the serialized text and an instance of the\n serializer used.", "id": "f10144:c1:m15"} {"signature": "def flush(self):", "body": "self.require_not_closed()chunk = self._stream.getvalue()self._stream.truncate()self._stream.seek()self.body = chunk if (self._body is None) else (self._body + chunk)if self.asynchronous:self.streaming = True", "docstring": "Flush the write buffers of the stream.\n\n This results in writing the current contents of the write buffer to\n the transport layer, initiating the HTTP/1.1 response. This initiates\n a streaming response. If the `Content-Length` header is not given\n then the chunked `Transfer-Encoding` is applied.", "id": "f10144:c1:m16"} {"signature": "def send(self, *args, **kwargs):", "body": "self.write(*args, **kwargs)self.flush()", "docstring": "Writes the passed chunk and flushes it to the client.", "id": "f10144:c1:m17"} {"signature": "def end(self, *args, **kwargs):", "body": "self.send(*args, **kwargs)self.close()", "docstring": "Writes the passed chunk, flushes it to the client,\nand terminates the connection.", "id": "f10144:c1:m18"} {"signature": "def __getitem__(self, name):", "body": "return self.headers[name]", "docstring": "Retrieves a header with the passed name.", "id": "f10144:c1:m19"} {"signature": "def __setitem__(self, name, value):", "body": "self.headers[name] = value", "docstring": "Stores a header with the passed name.", "id": "f10144:c1:m20"} {"signature": "def __delitem__(self, name):", "body": "del self.headers", "docstring": "Removes a header with the passed name.", "id": "f10144:c1:m21"} {"signature": "def __len__(self):", "body": "return self.tell()", "docstring": "Retrieves the actual length of the response.", "id": "f10144:c1:m22"} {"signature": "def __nonzero__(self):", "body": "return not self._closed", "docstring": "Test if the response is closed.", "id": "f10144:c1:m23"} {"signature": "def __bool__(self):", "body": "return not self._closed", "docstring": "Test if the response is closed.", "id": "f10144:c1:m24"} {"signature": "def __contains__(self, name):", "body": "return name in self.headers", "docstring": "Tests if the passed header exists in the response.", "id": "f10144:c1:m25"} {"signature": "def append(self, name, value):", "body": "return self.headers.append(name, value)", "docstring": "Add a value to the end of the list for the named header.", "id": "f10144:c1:m26"} {"signature": "def extend(self, name, values):", "body": "return self.headers.extend(name, values)", "docstring": "Extend the list for the named header by appending all values.", "id": "f10144:c1:m27"} {"signature": "def insert(self, name, index, value):", "body": "return self.headers.insert(index, value)", "docstring": "Insert a value at the passed index in the named header.", "id": "f10144:c1:m28"} {"signature": "def remove(self, name, value):", "body": "return self.headers.remove(name, value)", "docstring": "Remove the first item with the passed value from the\nlist for the named header.", "id": "f10144:c1:m29"} {"signature": "def popvalue(self, name, index=None):", "body": "return self.headers.popvalue(name, index)", "docstring": "Remove the item at the given position in the named header list.", "id": "f10144:c1:m30"} {"signature": "def index(self, name, value):", "body": "return self.headers.index(name, value)", "docstring": "Return the index in the list of the first item whose value is x in\nthe values of the named header.", "id": "f10144:c1:m31"} {"signature": "def count(self, name, value):", "body": "return self.headers.count(name, value)", "docstring": "Return the number of times a value appears in the list of the values\nof the named header.", "id": "f10144:c1:m32"} {"signature": "def sort(self, name):", "body": "return self.headers.sort(name)", "docstring": "Sort the items of the list, in place.", "id": "f10144:c1:m33"} {"signature": "def reverse(self, name):", "body": "return self.headers.reverse(name)", "docstring": "Reverse the elements of the list, in place.", "id": "f10144:c1:m34"} {"signature": "def getlist(self, name):", "body": "return self.headers.getlist(name)", "docstring": "Retrieves the passed header as a sequence of its values.", "id": "f10144:c1:m35"} {"signature": "def deserialize(self, request=None, text=None):", "body": "if text is None:text = request.read()return text", "docstring": "Parses the request content into a format consumable by python.\n\n @throws ValueError\n To indicate this deserializer cannot deserialize the\n passed text.", "id": "f10146:c0:m0"} {"signature": "def dasherize(value):", "body": "value = value.strip()value = re.sub(r'', r'', value)value = re.sub(r'', r'', value)value = re.sub(r'', r'', value)value = value.lower()return value", "docstring": "Dasherizes the passed value.", "id": "f10150:m0"} {"signature": "def import_module(name):", "body": "try:return importlib.import_module(name)except ImportError:return None", "docstring": "Attempt to import a module; returns None if unsuccessful.", "id": "f10152:m0"} {"signature": "def cons(collection, value):", "body": "if isinstance(value, collections.Mapping):if collection is None:collection = {}collection.update(**value)elif isinstance(value, six.string_types):if collection is None:collection = []collection.append(value)elif isinstance(value, collections.Iterable):if collection is None:collection = []collection.extend(value)else:if collection is None:collection = []collection.append(value)return collection", "docstring": "Extends a collection with a value.", "id": "f10154:m0"} {"signature": "def ilike_helper(default):", "body": "@functools.wraps(default)def wrapped(x, y):if isinstance(y, six.string_types) and not isinstance(x.type, sa.Enum):return x.ilike(\"\" + y + \"\")else:return default(x, y)return wrapped", "docstring": "Helper function that performs an `ilike` query if a string value\n is passed, otherwise the normal default operation.", "id": "f10157:m0"} {"signature": "def parse(specifiers):", "body": "specifiers = \"\".join(specifiers.split())for specifier in specifiers.split(''):if len(specifier) == :raise ValueError(\"\")count = specifier.count('')if (count and specifier[] == '') or not count:yield int(specifier), int(specifier)continuespecifier = list(map(int, specifier.split('')))if len(specifier) == :if specifier[] < or specifier[] < :raise ValueError(\"\"\"\")if specifier[] < specifier[]:raise ValueError(\"\")yield tuple(specifier)continueraise ValueError(\"\")", "docstring": "Consumes set specifiers as text and forms a generator to retrieve\nthe requested ranges.\n\n@param[in] specifiers\n Expected syntax is from the byte-range-specifier ABNF found in the\n [RFC 2616]; eg. 15-17,151,-16,26-278,15\n\n@returns\n Consecutive tuples that describe the requested range; eg. (1, 72) or\n (1, 1) [read as 1 to 72 or 1 to 1].", "id": "f10160:m0"} {"signature": "def paginate(request, response, items):", "body": "header = request.headers.get('')if not header:return itemsprefix = RANGE_SPECIFIER + ''if not header.find(prefix) == :raise exceptions.RequestedRangeNotSatisfiable()else:ranges = parse(header[len(prefix):])ranges = list(ranges)if len(ranges) > :raise exceptions.RequestedRangeNotSatisfiable('')start, end = ranges[]max_length = request.resource.count(items)end = min(end, max_length)response.status = client.PARTIAL_CONTENTresponse.headers[''] = '' % (start, end, max_length)response.headers[''] = RANGE_SPECIFIERitems = items[start:end + ]return items", "docstring": "Paginate an iterable during a request.\n\n Magically splicling an iterable in our supported ORMs allows LIMIT and\n OFFSET queries. We should probably delegate this to the ORM or something\n in the future.", "id": "f10160:m1"} {"signature": "def use(**kwargs):", "body": "config = dict(use.config)use.config.update(kwargs)return config", "docstring": "Updates the active resource configuration to the passed\nkeyword arguments.\n\nInvoking this method without passing arguments will just return the\nactive resource configuration.\n\n@returns\n The previous configuration.", "id": "f10162:m0"} {"signature": "def get(self, target):", "body": "return self._get[target](target)", "docstring": "Retrieve the value of this attribute from the passed object.", "id": "f10168:c0:m2"} {"signature": "def set(self, target, value):", "body": "if not self._set:returnif self.path is None:self.set = lambda *a: Nonereturn Noneif self._segments[target.__class__]:self.get(target)if self._segments[target.__class__]:returnparent_getter = compose(*self._getters[target.__class__][:-])target = parent_getter(target)func = self._make_setter(self.path.split('')[-], target.__class__)func(target, value)def setter(target, value):func(parent_getter(target), value)self.set = setter", "docstring": "Set the value of this attribute for the passed object.", "id": "f10168:c0:m3"} {"signature": "def prepare(self, value):", "body": "return value", "docstring": "Prepare the value for serialization and presentation to the client.", "id": "f10168:c0:m4"} {"signature": "def clean(self, value):", "body": "return value", "docstring": "Cleans the value from deserialization into consumption by python.", "id": "f10168:c0:m5"} {"signature": "def clone(self):", "body": "return self.__class__(**self.__dict__)", "docstring": "Construct an identical attribute.\n\n Used by the resource metaclass to ensure all attributes are unique\n instances. This is done so that when getters and setters are resolved\n the caches don't clobber base classes (for inherited attributes).", "id": "f10168:c0:m7"} {"signature": "def asynchronous(resource):", "body": "resource.meta.asynchronous = Truereturn resource", "docstring": "Instructs a decorated resource that it is to be asynchronous.\n\n An asynchronous resource means that `response.close()` must be called\n explicitly as returning from a method (eg. `get`) does not close\n the connection.\n\n @note\n This can also be configured by setting `asynchronous` to `True`\n on `.Meta`. The benefit of the decorator is that this\n can be applied to specific methods as well as the entire class\n body.", "id": "f10172:m0"} {"signature": "def resource(**kwargs):", "body": "def inner(function):name = kwargs.pop('', None)if name is None:name = utils.dasherize(function.__name__)methods = kwargs.pop('', None)if isinstance(methods, six.string_types):methods = methods,handler = (function, methods)if name not in _resources:_handlers[name] = []from armet import resourceskwargs[''] = nameclass LightweightResource(resources.Resource):Meta = type(str(''), (), kwargs)def route(self, request, response):for handler, methods in _handlers[name]:if methods is None or request.method in methods:return handler(request, response)resources.Resource.route(self)_resources[name] = LightweightResource_handlers[name].append(handler)return _resources[name]return inner", "docstring": "Wraps the decorated function in a lightweight resource.", "id": "f10172:m1"} {"signature": "def read(resource, url):", "body": "return resource._request_read(url)", "docstring": "Perform a `read` request in the passed `resource` context against\nthe given `url`.\n\nReturns what a `read` would return (the managed target item).", "id": "f10178:m0"} {"signature": "@propertydef allowed_operations(self):", "body": "if self.slug is not None:return self.meta.detail_allowed_operationsreturn self.meta.list_allowed_operations", "docstring": "Retrieves the allowed operations for this request.", "id": "f10180:c0:m2"} {"signature": "def assert_operations(self, *args):", "body": "if not set(args).issubset(self.allowed_operations):raise http.exceptions.Forbidden()", "docstring": "Assets if the requested operations are allowed in this context.", "id": "f10180:c0:m3"} {"signature": "def make_response(self, data=None):", "body": "if data is not None:data = self.prepare(data)self.response.write(data, serialize=True)", "docstring": "Fills the response object from the passed data.", "id": "f10180:c0:m4"} {"signature": "def get(self, request, response):", "body": "self.assert_operations('')items = self.read()if not items:raise http.exceptions.NotFound()if (isinstance(items, Iterable)and not isinstance(items, six.string_types)) and items:items = pagination.paginate(self.request, self.response, items)self.make_response(items)", "docstring": "Processes a `GET` request.", "id": "f10180:c0:m13"} {"signature": "def post(self, request, response):", "body": "if self.slug is not None:raise http.exceptions.NotImplemented()self.assert_operations('')data = self._clean(None, self.request.read(deserialize=True))item = self.create(data)self.response.status = http.client.CREATEDself.make_response(item)", "docstring": "Processes a `POST` request.", "id": "f10180:c0:m14"} {"signature": "def put(self, request, response):", "body": "if self.slug is None:raise http.exceptions.NotImplemented()target = self.read()data = self._clean(target, self.request.read(deserialize=True))if target is not None:self.assert_operations('')try:self.update(target, data)except AttributeError:raise http.exceptions.NotImplemented()self.make_response(target)else:self.assert_operations('')target = self.create(data)self.response.status = http.client.CREATEDself.make_response(target)", "docstring": "Processes a `PUT` request.", "id": "f10180:c0:m15"} {"signature": "def delete(self, request, response):", "body": "if self.slug is None:raise http.exceptions.NotImplemented()self.assert_operations('')self.destroy()self.response.status = http.client.NO_CONTENTself.make_response()", "docstring": "Processes a `DELETE` request.", "id": "f10180:c0:m16"} {"signature": "def link(self, request, response):", "body": "from armet.resources.managed.request import readif self.slug is None:raise http.exceptions.NotImplemented()target = self.read()links = self._parse_link_headers(request[''])for link in links:self.relate(target, read(self, link['']))self.response.status = http.client.NO_CONTENTself.make_response()", "docstring": "Processes a `LINK` request.\n\n A `LINK` request is asking to create a relation from the currently\n represented URI to all of the `Link` request headers.", "id": "f10180:c0:m18"} {"signature": "def unlink(self, request, response):", "body": "from armet.resources.managed.request import readif self.slug is None:raise http.exceptions.NotImplemented()target = self.read()links = self._parse_link_headers(request[''])for link in links:self.unrelate(target, read(self, link['']))self.response.status = http.client.NO_CONTENTself.make_response()", "docstring": "Processes a `UNLINK` request.\n\n A `UNLINK` request is asking to revoke a relation from the currently\n represented URI to all of the `Link` request headers.", "id": "f10180:c0:m19"} {"signature": "def _merge(options, name, bases, default=None):", "body": "result = Nonefor base in bases:if base is None:continuevalue = getattr(base, name, None)if value is None:continueresult = utils.cons(result, value)value = options.get(name)if value is not None:result = utils.cons(result, value)return result or default", "docstring": "Merges a named option collection.", "id": "f10184:m0"} {"signature": "def __init__(self, meta, name, data, bases):", "body": "self.debug = meta.get('')if self.debug is None:self.debug = Falseself.abstract = data.get('')self.name = meta.get('')if self.name is None:dashed = utils.dasherize(name).strip()if dashed:self.name = re.sub(r'', '', dashed)else:self.name = nameelif callable(self.name):self.name = self.name(name)self.asynchronous = meta.get('', False)self.connectors = connectors = _merge(meta, '', bases, {})if not connectors.get('') and not self.abstract:raise ImproperlyConfigured('')for key in connectors:connector = connectors[key]if isinstance(connector, six.string_types):if connector in getattr(included_connectors, key):connectors[key] = ''.format(connector)self.options = options = _merge(meta, '', bases, {})for name in options:setattr(self, name, meta.get(name))self.patterns = meta.get('', [])for index, pattern in enumerate(self.patterns):if isinstance(pattern, six.string_types):pattern = (None, pattern)self.patterns[index] = (pattern[], re.compile(pattern[]))self.trailing_slash = meta.get('', True)self.http_allowed_methods = meta.get('')if self.http_allowed_methods is None:self.http_allowed_methods = ('','','','','','','','','')self.http_allowed_headers = meta.get('')if self.http_allowed_headers is None:self.http_allowed_headers = ('','','','')self.http_exposed_headers = meta.get('')if self.http_exposed_headers is None:self.http_exposed_headers = ('','','','')self.http_allowed_origins = meta.get('')if self.http_allowed_origins is None:self.http_allowed_origins = ()self.legacy_redirect = meta.get('', True)self.serializers = serializers = meta.get('')if not serializers:self.serializers = {'': '','': ''}for name, serializer in six.iteritems(self.serializers):if isinstance(serializer, six.string_types):segments = serializer.split('')module = ''.join(segments[:-])module = import_module(module)self.serializers[name] = getattr(module, segments[-])self.allowed_serializers = meta.get('')if not self.allowed_serializers:self.allowed_serializers = tuple(self.serializers.keys())for name in self.allowed_serializers:if name not in self.serializers:raise ImproperlyConfigured(''''.format(name))self.default_serializer = meta.get('')if not self.default_serializer:if '' in self.allowed_serializers:self.default_serializer = ''else:self.default_serializer = self.allowed_serializers[]if self.default_serializer not in self.allowed_serializers:raise ImproperlyConfigured(''''.format(self.default_serializer))self.deserializers = deserializers = meta.get('')if not deserializers:self.deserializers = {'': '','': ''}for name, deserializer in six.iteritems(self.deserializers):if isinstance(deserializer, six.string_types):segments = deserializer.split('')module = ''.join(segments[:-])module = import_module(module)self.deserializers[name] = getattr(module, segments[-])self.allowed_deserializers = meta.get('')if not self.allowed_deserializers:self.allowed_deserializers = tuple(self.deserializers.keys())for name in self.allowed_deserializers:if name not in self.deserializers:raise ImproperlyConfigured(''''.format(name))self.authentication = meta.get('')if self.authentication is None:self.authentication = (authentication.Authentication(),)self.authorization = meta.get('')if self.authorization is None:self.authorization = authorization.Authorization()", "docstring": "Initializes the options object and defaults configuration not\nspecified.\n\n@param[in] meta\n Dictionary of the merged meta attributes.\n\n@param[in] name\n Name of the resource class this is being instantiataed for.", "id": "f10184:c0:m0"} {"signature": "@classmethoddef redirect(cls, request, response):", "body": "if cls.meta.legacy_redirect:if request.method in ('', '',):response.status = http.client.MOVED_PERMANENTLYelse:response.status = http.client.TEMPORARY_REDIRECTelse:response.status = http.client.PERMANENT_REDIRECTresponse.close()", "docstring": "Redirect to the canonical URI for this resource.", "id": "f10185:c0:m1"} {"signature": "@classmethoddef view(cls, request, response):", "body": "test = cls.meta.trailing_slashif test ^ request.path.endswith(''):path = request.path + '' if test else request.path[:-]response[''] = ''.format(request.protocol.lower(),request.host,request.mount_point,path,'' + request.query if request.query else '')return cls.redirect(request, response)try:obj = cls(request, response)request.bind(obj)response.bind(obj)obj._request = requestresult = obj.dispatch(request, response)if not response.asynchronous:if (isinstance(result, collections.Iterable) andnot isinstance(result, six.string_types)):return cls.stream(response, result)else:response.end(result)if response.body:return response.bodyexcept http.exceptions.BaseHTTPException as e:response.status = e.statusresponse.headers.update(e.headers)if e.content:response.send(e.content, serialize=True, format='')response.close()if response.body:return response.bodyexcept Exception:logger.exception('')if not response.streaming and not response.closed:response.status = http.client.INTERNAL_SERVER_ERRORresponse.headers.clear()response.close()", "docstring": "Entry-point of the request / response cycle; Handles resource creation\nand delegation.\n\n@param[in] requset\n The HTTP request object; containing accessors for information\n about the request.\n\n@param[in] response\n The HTTP response object; contains accessors for modifying\n the information that will be sent to the client.", "id": "f10185:c0:m2"} {"signature": "@classmethoddef parse(cls, path):", "body": "for resource, pattern in cls.meta.patterns:match = re.match(pattern, path)if match is not None:return resource, match.groupdict(), match.string[match.end():]return None if not cls.meta.patterns else False", "docstring": "Parses out parameters and separates them out of the path.\n\n This uses one of the many defined patterns on the options class. But,\n it defaults to a no-op if there are no defined patterns.", "id": "f10185:c0:m3"} {"signature": "@classmethoddef traverse(cls, request, params=None):", "body": "result = cls.parse(request.path)if result is None:return cls, {}elif not result:raise http.exceptions.NotFound()resource, data, rest = resultif params:data.update(params)if resource is None:return cls, dataif data.get('') is not None:request.path = data.pop('')elif rest is not None:request.path = restresult = resource.traverse(request, params=data)return result", "docstring": "Traverses down the path and determines the accessed resource.\n\n This makes use of the patterns array to implement simple traversal.\n This defaults to a no-op if there are no defined patterns.", "id": "f10185:c0:m4"} {"signature": "@classmethoddef stream(cls, response, sequence):", "body": "iterator = iter(sequence)data = {'': next(iterator)}response.streaming = Truedef streamer():while True:if response.asynchronous:yield data['']else:response.send(data[''])yield response.bodyresponse.body = Nonetry:data[''] = next(iterator)except StopIteration:breakif not response.asynchronous:response.close()return streamer()", "docstring": "Helper method used in conjunction with the view handler to\nstream responses to the client.", "id": "f10185:c0:m5"} {"signature": "@utils.boundmethoddef deserialize(self, request=None, text=None, format=None):", "body": "if isinstance(self, Resource):if not request:request = self._requestDeserializer = Noneif format:Deserializer = self.meta.deserializers[format]if not Deserializer:media_ranges = request.get('')if media_ranges:media_types = six.iterkeys(self._deserializer_map)media_type = mimeparse.best_match(media_types, media_ranges)if media_type:format = self._deserializer_map[media_type]Deserializer = self.meta.deserializers[format]else:passif Deserializer:try:deserializer = Deserializer()data = deserializer.deserialize(request=request, text=text)return data, deserializerexcept ValueError:passraise http.exceptions.UnsupportedMediaType()", "docstring": "Deserializes the text using a determined deserializer.\n\n @param[in] request\n The request object to pull information from; normally used to\n determine the deserialization format (when `format` is\n not provided).\n\n @param[in] text\n The text to be deserialized. Can be left blank and the\n request will be read.\n\n @param[in] format\n A specific format to deserialize in; if provided, no detection is\n done. If not provided, the content-type header is looked at to\n determine an appropriate deserializer.\n\n @returns\n A tuple of the deserialized data and an instance of the\n deserializer used.", "id": "f10185:c0:m6"} {"signature": "@utils.boundmethoddef serialize(self, data, response=None, request=None, format=None):", "body": "if isinstance(self, Resource):if not request:request = self._requestSerializer = Noneif format:Serializer = self.meta.serializers[format]if not Serializer:media_ranges = (request.get('') or '').strip()if not media_ranges:media_ranges = ''if media_ranges != '':media_types = six.iterkeys(self._serializer_map)media_type = mimeparse.best_match(media_types, media_ranges)if media_type:format = self._serializer_map[media_type]Serializer = self.meta.serializers[format]else:default = self.meta.default_serializerSerializer = self.meta.serializers[default]if Serializer:try:serializer = Serializer(request, response)return serializer.serialize(data), serializerexcept ValueError:passavailable = {}for name in self.meta.allowed_serializers:Serializer = self.meta.serializers[name]instance = Serializer(request, None)if instance.can_serialize(data):available[name] = Serializer.media_types[]raise http.exceptions.NotAcceptable(available)", "docstring": "Serializes the data using a determined serializer.\n\n @param[in] data\n The data to be serialized.\n\n @param[in] response\n The response object to serialize the data to.\n If this method is invoked as an instance method, the response\n object can be omitted and it will be taken from the instance.\n\n @param[in] request\n The request object to pull information from; normally used to\n determine the serialization format (when `format` is not provided).\n May be used by some serializers as well to pull additional headers.\n If this method is invoked as an instance method, the request\n object can be omitted and it will be taken from the instance.\n\n @param[in] format\n A specific format to serialize in; if provided, no detection is\n done. If not provided, the accept header (as well as the URL\n extension) is looked at to determine an appropriate serializer.\n\n @returns\n A tuple of the serialized text and an instance of the\n serializer used.", "id": "f10185:c0:m7"} {"signature": "@classmethoddef _process_cross_domain_request(cls, request, response):", "body": "origin = request.get('')if not origin:returnif not (origin in cls.meta.http_allowed_origins or'' == cls.meta.http_allowed_origins):returnmethod = request.get('')if method and method not in cls.meta.http_allowed_methods:returnheaders = request.get('', ())if headers:headers = [h.strip() for h in headers.split('')]allowed_headers = [h.lower() for h in cls.meta.http_allowed_headers]if any(h.lower() not in allowed_headers for h in headers):returnresponse[''] = originresponse[''] = ''allowed_methods = ''.join(cls.meta.http_allowed_methods)response[''] = allowed_methodsallowed_headers = ''.join(cls.meta.http_allowed_headers)if allowed_headers:response[''] = allowed_headersexposed_headers = ''.join(cls.meta.http_exposed_headers)if exposed_headers:response[''] = exposed_headers", "docstring": "Facilitate Cross-Origin Requests (CORs).", "id": "f10185:c0:m8"} {"signature": "def dispatch(self, request, response):", "body": "self.require_authentication(request)self.require_accessibility(request.user, request.method)self._process_cross_domain_request(request, response)return self.route(request, response)", "docstring": "Entry-point of the dispatch cycle for this resource.\n\n Performs common work such as authentication, decoding, etc. before\n handing complete control of the result to a function with the\n same name as the request method.", "id": "f10185:c0:m10"} {"signature": "def require_authentication(self, request):", "body": "request.user = user = Noneif request.method == '':returnfor auth in self.meta.authentication:user = auth.authenticate(request)if user is False:continueif user is None and not auth.allow_anonymous:auth.unauthenticated()request.user = userreturnif not user and not auth.allow_anonymous:auth.unauthenticated()", "docstring": "Ensure we are authenticated.", "id": "f10185:c0:m11"} {"signature": "def require_accessibility(self, user, method):", "body": "if method == '':returnauthz = self.meta.authorizationif not authz.is_accessible(user, method, self):authz.unaccessible()", "docstring": "Ensure we are allowed to access this resource.", "id": "f10185:c0:m12"} {"signature": "def require_http_allowed_method(cls, request):", "body": "allowed = cls.meta.http_allowed_methodsif request.method not in allowed:raise http.exceptions.MethodNotAllowed(allowed)", "docstring": "Ensure that we're allowed to use this HTTP method.", "id": "f10185:c0:m13"} {"signature": "def route(self, request, response):", "body": "self.require_http_allowed_method(request)function = getattr(self, request.method.lower(), None)if function is None:raise http.exceptions.NotImplemented()return function(request, response)", "docstring": "Processes every request.\n\n Directs control flow to the appropriate HTTP/1.1 method.", "id": "f10185:c0:m14"} {"signature": "def options(self, request, response):", "body": "response[''] = ''.join(self.meta.http_allowed_methods)response.status = http.client.OK", "docstring": "Process an `OPTIONS` request.\n\n Used to initiate a cross-origin request. All handling specific to\n CORS requests is done on every request however this method also\n returns a list of available methods.", "id": "f10185:c0:m15"} {"signature": "def random_string(length):", "body": "str_list = [random.choice(string.digits + string.ascii_letters) for i in range(length)]return ''.join(str_list)", "docstring": "Generate random string with parameter length.\nExample:\n\n >>> from eggit.egg_string import random_string\n >>> random_string(8)\n 'q4f2eaT4'\n >>>", "id": "f10194:m0"} {"signature": "@staticmethoddef datetime_str_to_timestamp(datetime_str):", "body": "try:dtf = DTFormat()struct_time = time.strptime(datetime_str, dtf.datetime_format)return time.mktime(struct_time)except:return None", "docstring": "'2018-01-01 00:00:00' (str) --> 1514736000\n\n:param str datetime_str: datetime string\n:return: unix timestamp (int) or None\n:rtype: int or None", "id": "f10195:c1:m0"} {"signature": "@staticmethoddef get_datetime_object(datetime_str):", "body": "try:dft = DTFormat()return datetime.strptime(datetime_str, dft.datetime_format)except:return None", "docstring": "Get datetime object from datetime string\n\nexample:\n DateTimeUtils.get_datetime_object('2018-01-01 00:00:00')\n\n:param str string: datetime string\n:return: datetime object\n:rtype: datetime", "id": "f10195:c1:m1"} {"signature": "@staticmethoddef get_datetime_string(datetime_obj):", "body": "if isinstance(datetime_obj, datetime):dft = DTFormat()return datetime_obj.strftime(dft.datetime_format)return None", "docstring": "Get datetime string from datetime object\n\n:param datetime datetime_obj: datetime object\n:return: datetime string\n:rtype: str", "id": "f10195:c1:m2"} {"signature": "@staticmethoddef now_str():", "body": "dft = DTFormat()return datetime.now().strftime(dft.datetime_format)", "docstring": "Get now datetime str like '2018-01-01 00:00:00' (str)\n\n:return: datetime string\n:rtype: str", "id": "f10195:c1:m3"} {"signature": "@staticmethoddef timestamp_to_datetime(timestamp):", "body": "if isinstance(timestamp, (int, float, str)):try:timestamp = float(timestamp)if timestamp.is_integer():timestamp = int(timestamp)except:return Nonetemp = str(timestamp).split('')[]if len(temp) == :timestamp = timestamp / if len(temp) < :return Noneelse:return Nonereturn datetime.fromtimestamp(timestamp)", "docstring": "1514736000 --> datetime object\n\n:param int timestamp: unix timestamp (int)\n:return: datetime object or None\n:rtype: datetime or None", "id": "f10195:c1:m4"} {"signature": "@staticmethoddef timestamp_to_datetime_str(timestamp):", "body": "return DateTimeUtils.get_datetime_string(DateTimeUtils.timestamp_to_datetime(timestamp))", "docstring": "1514736000 --> '2018-01-01 00:00:00' (str)\n\n:param int timestamp: unix timestamp\n:return: datetime str\n:rtype: str", "id": "f10195:c1:m5"} {"signature": "@staticmethoddef str_to_int(str_val):", "body": "try:return int(str_val)except ValueError:return None", "docstring": "Convert string to int::\n\n >>> from eggit.converters import Converter\n >>> result = Converter.str_to_int('123')\n >>> print(result)\n 123\n >>> result = Converter.str_to_int('abc')\n >>> print(result)\n None\n >>> result = Converter.str_to_int('1.2')\n >>> print(result)\n None\n\n:param str str_val: the string value\n:return: the target integer value\n:rtype: int or None", "id": "f10196:c0:m0"} {"signature": "@staticmethoddef str_to_bool(str_val):", "body": "if (str_val.lower() == ''):return Truereturn False", "docstring": "Convert string to boolean::\n\n >>> from eggit.converters import Converter\n >>> Converter.str_to_bool('True') # or true\n True\n >>> Converter.str_to_bool('False') # or false\n False\n >>> Converter.str_to_bool('OtherString')\n False\n\n:param str_val: the source string value\n:return: the target boolean value\n:rtype: bool", "id": "f10196:c0:m1"} {"signature": "def __init__(self, current_page, total_page_count, items, total_item_count, page_size=):", "body": "self.current_page = current_pageself.total_page_count = total_page_countself.items = itemsself.total_item_count = total_item_countself.page_size = page_size", "docstring": ":param int current_page: Current page number\n:param int total_page_count: Total page count\n:param object items: Paging data\n:param int total_item_count: Total item count\n:param int page_size: How many items per page", "id": "f10200:c0:m0"} {"signature": "def get_dict(self):", "body": "return dict(current_page=self.current_page,total_page_count=self.total_page_count,items=self.items,total_item_count=self.total_item_count,page_size=self.page_size)", "docstring": "Convert Paginator instance to dict\n\n:return: Paging data\n:rtype: dict", "id": "f10200:c0:m1"} {"signature": "def format_cookies(path):", "body": "with open(path, '') as f:_cookies = {}for row in f.read().split(''):k, v = row.strip().split('', )_cookies[k] = vreturn _cookies", "docstring": "\u5c06 cookie \u5b57\u7b26\u4e32\u8f6c\u5316\u4e3a\u5b57\u5178\n\n:param path: cookies \u6587\u4ef6\u8def\u5f84\n:return: cookies \u5b57\u5178", "id": "f10204:m0"} {"signature": "def delete_empty_dir(directory):", "body": "if os.path.exists(directory):if os.path.isdir(directory):for d in os.listdir(directory):path = os.path.join(directory, d)if os.path.isdir(path):delete_empty_dir(path)if not os.listdir(directory):os.rmdir(directory)print(\"\" + directory)else:print(\"\")", "docstring": "\u5220\u9664\u7a7a\u76ee\u5f55\n\n:param directory: \u76ee\u5f55\u8def\u5f84", "id": "f10204:m1"} {"signature": "def _print(stats, limit, label):", "body": "print(\"\".format(label))for index, stat in enumerate(stats):if index < limit:print(stat)else:break", "docstring": "\u63a7\u5236\u8f93\u51fa\u91cf", "id": "f10205:m0"} {"signature": "def memoryit(group_by='', limit=):", "body": "def wrapper(func):@wraps(func)def inner(*args, **kwargs):tracemalloc.start()_start = tracemalloc.take_snapshot()_result = func(*args, **kwargs)_end = tracemalloc.take_snapshot()stats = _end.compare_to(_start, group_by)_print(stats, limit, func.__name__ + '')return _resultreturn innerreturn wrapper", "docstring": "\u8ffd\u8e2a\u51fd\u6570\u5185\u5b58\u6d88\u8017\u60c5\u51b5\n\n:param group_by: \u7edf\u8ba1\u5206\u7ec4\uff0c\u6709 'filename', 'lineno', 'traceback' \u53ef\u9009\n:param limit: \u9650\u5236\u8f93\u51fa\u884c\u6570", "id": "f10205:m1"} {"signature": "@contextmanagerdef memoryit_block(group_by='', limit=, label=''):", "body": "tracemalloc.start()_start = tracemalloc.take_snapshot()try:yieldfinally:_end = tracemalloc.take_snapshot()stats = _end.compare_to(_start, group_by)_print(stats, limit, label)", "docstring": "\u8ffd\u8e2a\u4ee3\u7801\u5757\u5185\u5b58\u6d88\u8017\u60c5\u51b5\n\n:param group_by: \u7edf\u8ba1\u5206\u7ec4\uff0c\u6709 'filename', 'lineno', 'traceback' \u53ef\u9009\n:param limit: \u9650\u5236\u8f93\u51fa\u884c\u6570\n:param label: \u4ee3\u7801\u5757\u6807\u7b7e", "id": "f10205:m2"} {"signature": "def _green(string):", "body": "return ''.format(string)", "docstring": "\u5c06\u5b57\u4f53\u8f6c\u53d8\u4e3a\u7eff\u8272", "id": "f10206:m0"} {"signature": "@contextmanagerdef timeit_block(unit='', label=\"\"):", "body": "start = time.time()try:yieldfinally:_format(unit, time.time() - start, label)", "docstring": "\u6d4b\u8bd5\u4ee3\u7801\u5757\u8017\u65f6\n\n:param unit: \u65f6\u95f4\u5355\u4f4d\uff0c\u6709 's','m','h' \u53ef\u9009\uff08seconds\uff0cminutes\uff0chours\uff09\n:param label: \u4ee3\u7801\u5757\u6807\u7b7e", "id": "f10206:m2"} {"signature": "def timeit(unit=''):", "body": "def wrapper(func):@wraps(func)def inner(*args, **kwargs):start = time.time()_result = func(*args, **kwargs)_format(unit, time.time() - start, func.__name__ + '')return _resultreturn innerreturn wrapper", "docstring": "\u6d4b\u8bd5\u51fd\u6570\u8017\u65f6\n\n:param unit: \u65f6\u95f4\u5355\u4f4d\uff0c\u6709 's','m','h' \u53ef\u9009\uff08seconds\uff0cminutes\uff0chours\uff09", "id": "f10206:m3"} {"signature": "def profileit(field=''):", "body": "def wrapper(func):@wraps(func)def inner(*args, **kwargs):pro = Profile()pro.runcall(func, *args, **kwargs)stats = Stats(pro)stats.strip_dirs()stats.sort_stats(field)print(\"\".format(func.__name__))stats.print_stats()stats.print_callers()return innerreturn wrapper", "docstring": "\u6d4b\u8bd5\u51fd\u6570\u8fd0\u884c\u6d88\u8017\u60c5\u51b5\n\n:param field: \u8f93\u51fa\u5185\u5bb9\u6392\u5e8f\u65b9\u5f0f\u3002\n \u53ef\u9009\u53c2\u6570\u4e3a \"stdname\", \"calls\", \"time\", \"cumulative\"", "id": "f10207:m0"} {"signature": "def dump(obj, fp, startindex=, separator=DEFAULT, index_separator=DEFAULT):", "body": "if startindex < :raise ValueError(''.format(startindex))try:firstkey = next(iter(obj.keys()))except StopIteration:returnif isinstance(firstkey, six.text_type):converter = six.uelse:converter = six.bdefault_separator = converter('')default_index_separator = converter('')newline = converter('')if separator is DEFAULT:separator = default_separatorif index_separator is DEFAULT:index_separator = default_index_separatorfor key, value in six.iteritems(obj):if isinstance(value, (list, tuple, set)):for index, item in enumerate(value, start=startindex):fp.write(key)fp.write(index_separator)fp.write(converter(str(index)))fp.write(separator)fp.write(item)fp.write(newline)else:fp.write(key)fp.write(separator)fp.write(value)fp.write(newline)", "docstring": "Dump an object in req format to the fp given.\n\n :param Mapping obj: The object to serialize. Must have a keys method.\n :param fp: A writable that can accept all the types given.\n :param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types.\n :param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types.", "id": "f10211:m0"} {"signature": "def dumps(obj, startindex=, separator=DEFAULT, index_separator=DEFAULT):", "body": "try:firstkey = next(iter(obj.keys()))except StopIteration:return str()if isinstance(firstkey, six.text_type):io = StringIO()else:io = BytesIO()dump(obj=obj,fp=io,startindex=startindex,separator=separator,index_separator=index_separator,)return io.getvalue()", "docstring": "Dump an object in req format to a string.\n\n :param Mapping obj: The object to serialize. Must have a keys method.\n :param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types.\n :param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types.", "id": "f10211:m1"} {"signature": "def load(fp, separator=DEFAULT, index_separator=DEFAULT, cls=dict, list_cls=list):", "body": "converter = Noneoutput = cls()arraykeys = set()for line in fp:if converter is None:if isinstance(line, six.text_type):converter = six.uelse:converter = six.bdefault_separator = converter('')default_index_separator = converter('')newline = converter('')if separator is DEFAULT:separator = default_separatorif index_separator is DEFAULT:index_separator = default_index_separatorkey, value = line.strip().split(separator, )keyparts = key.split(index_separator)try:index = int(keyparts[-])endwithint = Trueexcept ValueError:endwithint = Falseif len(keyparts) > and endwithint:basekey = key.rsplit(index_separator, )[]if basekey not in arraykeys:arraykeys.add(basekey)if basekey in output:if not isinstance(output[basekey], dict):output[basekey] = {-: output[basekey]}else:output[basekey] = {}output[basekey][index] = valueelse:if key in output and isinstance(output[key], dict):output[key][-] = valueelse:output[key] = valuefor key in arraykeys:output[key] = list_cls(pair[] for pair in sorted(six.iteritems(output[key])))return output", "docstring": "Load an object from the file pointer.\n\n :param fp: A readable filehandle.\n :param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types.\n :param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types.\n :param cls: A callable that returns a Mapping that is filled with pairs. The most common alternate option would be OrderedDict.\n :param list_cls: A callable that takes an iterable and returns a sequence.", "id": "f10211:m2"} {"signature": "def loads(s, separator=DEFAULT, index_separator=DEFAULT, cls=dict, list_cls=list):", "body": "if isinstance(s, six.text_type):io = StringIO(s)else:io = BytesIO(s)return load(fp=io,separator=separator,index_separator=index_separator,cls=cls,list_cls=list_cls,)", "docstring": "Loads an object from a string.\n\n :param s: An object to parse\n :type s: bytes or str\n :param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types.\n :param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types.\n :param cls: A callable that returns a Mapping that is filled with pairs. The most common alternate option would be OrderedDict.\n :param list_cls: A callable that takes an iterable and returns a sequence.", "id": "f10211:m3"} {"signature": "def b64_encode(data: bytes) -> bytes:", "body": "encoded = urlsafe_b64encode(data)return encoded.replace(b'', b'')", "docstring": ":param data: Data the encode.\n:type data: bytes\n:return: Base 64 encoded data with padding removed.\n:rtype: bytes", "id": "f10213:m0"} {"signature": "def b64_decode(data: bytes) -> bytes:", "body": "missing_padding = len(data) % if missing_padding != :data += b'' * ( - missing_padding)return urlsafe_b64decode(data)", "docstring": ":param data: Base 64 encoded data to decode.\n:type data: bytes\n:return: Base 64 decoded data.\n:rtype: bytes", "id": "f10213:m1"} {"signature": "def to_bytes(data: Union[str, bytes]) -> bytes:", "body": "if isinstance(data, bytes):return datareturn data.encode('')", "docstring": ":param data: Data to convert to bytes.\n:type data: Union[str, bytes]\n:return: `data` encoded to UTF8.\n:rtype: bytes", "id": "f10213:m2"} {"signature": "def from_bytes(data: Union[str, bytes]) -> str:", "body": "if isinstance(data, str):return datareturn str(data, '')", "docstring": ":param data: A UTF8 byte string.\n:type data: Union[str, bytes]\n:return: `data` decoded from UTF8.\n:rtype: str", "id": "f10213:m3"} {"signature": "def join(*args: bytes) -> bytes:", "body": "return b''.join(args)", "docstring": "Join any amount of byte strings with a `.`.\n:param args: Any amount of byte strings.\n:type args: bytes\n:return: All provided bytes concatenated with `.`.\n:rtype: bytes", "id": "f10213:m4"} {"signature": "def get_algorithm(alg: str) -> Callable:", "body": "if alg not in algorithms:raise ValueError(''.format(alg))return algorithms[alg]", "docstring": ":param alg: The name of the requested `JSON Web Algorithm `_. `RFC7518 `_ is related.\n:type alg: str\n:return: The requested algorithm.\n:rtype: Callable\n:raises: ValueError", "id": "f10214:m0"} {"signature": "def _hash(secret: bytes, data: bytes, alg: str) -> bytes:", "body": "algorithm = get_algorithm(alg)return hmac.new(secret, msg=data, digestmod=algorithm).digest()", "docstring": "Create a new HMAC hash.\n\n:param secret: The secret used when hashing data.\n:type secret: bytes\n:param data: The data to hash.\n:type data: bytes\n:param alg: The algorithm to use when hashing `data`.\n:type alg: str\n:return: New HMAC hash.\n:rtype: bytes", "id": "f10214:m1"} {"signature": "def encode(secret: Union[str, bytes], payload: dict = None,alg: str = default_alg, header: dict = None) -> str:", "body": "secret = util.to_bytes(secret)payload = payload or {}header = header or {}header_json = util.to_bytes(json.dumps(header))header_b64 = util.b64_encode(header_json)payload_json = util.to_bytes(json.dumps(payload))payload_b64 = util.b64_encode(payload_json)pre_signature = util.join(header_b64, payload_b64)signature = _hash(secret, pre_signature, alg)signature_b64 = util.b64_encode(signature)token = util.join(pre_signature, signature_b64)return util.from_bytes(token)", "docstring": ":param secret: The secret used to encode the token.\n:type secret: Union[str, bytes]\n:param payload: The payload to be encoded in the token.\n:type payload: dict\n:param alg: The algorithm used to hash the token.\n:type alg: str\n:param header: The header to be encoded in the token.\n:type header: dict\n:return: A new token\n:rtype: str", "id": "f10214:m2"} {"signature": "def decode(secret: Union[str, bytes], token: Union[str, bytes],alg: str = default_alg) -> Tuple[dict, dict]:", "body": "secret = util.to_bytes(secret)token = util.to_bytes(token)pre_signature, signature_segment = token.rsplit(b'', )header_b64, payload_b64 = pre_signature.split(b'')try:header_json = util.b64_decode(header_b64)header = json.loads(util.from_bytes(header_json))except (json.decoder.JSONDecodeError, UnicodeDecodeError, ValueError):raise InvalidHeaderError('')try:payload_json = util.b64_decode(payload_b64)payload = json.loads(util.from_bytes(payload_json))except (json.decoder.JSONDecodeError, UnicodeDecodeError, ValueError):raise InvalidPayloadError('')if not isinstance(header, dict):raise InvalidHeaderError(''.format(header))if not isinstance(payload, dict):raise InvalidPayloadError(''.format(payload))signature = util.b64_decode(signature_segment)calculated_signature = _hash(secret, pre_signature, alg)if not compare_signature(signature, calculated_signature):raise InvalidSignatureError('')return header, payload", "docstring": "Decodes the given token's header and payload and validates the signature.\n\n:param secret: The secret used to decode the token. Must match the\n secret used when creating the token.\n:type secret: Union[str, bytes]\n:param token: The token to decode.\n:type token: Union[str, bytes]\n:param alg: The algorithm used to decode the token. Must match the\n algorithm used when creating the token.\n:type alg: str\n:return: The decoded header and payload.\n:rtype: Tuple[dict, dict]", "id": "f10214:m3"} {"signature": "def compare_signature(expected: Union[str, bytes],actual: Union[str, bytes]) -> bool:", "body": "expected = util.to_bytes(expected)actual = util.to_bytes(actual)return hmac.compare_digest(expected, actual)", "docstring": "Compares the given signatures.\n\n:param expected: The expected signature.\n:type expected: Union[str, bytes]\n:param actual: The actual signature.\n:type actual: Union[str, bytes]\n:return: Do the signatures match?\n:rtype: bool", "id": "f10214:m4"} {"signature": "def compare_token(expected: Union[str, bytes],actual: Union[str, bytes]) -> bool:", "body": "expected = util.to_bytes(expected)actual = util.to_bytes(actual)_, expected_sig_seg = expected.rsplit(b'', )_, actual_sig_seg = actual.rsplit(b'', )expected_sig = util.b64_decode(expected_sig_seg)actual_sig = util.b64_decode(actual_sig_seg)return compare_signature(expected_sig, actual_sig)", "docstring": "Compares the given tokens.\n\n:param expected: The expected token.\n:type expected: Union[str, bytes]\n:param actual: The actual token.\n:type actual: Union[str, bytes]\n:return: Do the tokens match?\n:rtype: bool", "id": "f10214:m5"} {"signature": "def __init__(self, secret: Union[str, bytes], payload: dict = None,alg: str = default_alg, header: dict = None,issuer: str = None, subject: str = None, audience: str = None,valid_to: int = None, valid_from: int = None,issued_at: int = None, id: str = None):", "body": "self.secret = secretself.payload = payload or {}self.alg = algself._header = {}self.header = header or {}self.registered_claims = {}if issuer:self.issuer = issuerif subject:self.subject = subjectif audience:self.audience = audienceif valid_to:self.valid_to = valid_toif valid_from:self.valid_from = valid_fromif issued_at:self.issued_at = issued_atif id:self.id = idself._pop_claims_from_payload()", "docstring": ":param secret: The secret used to encode the token.\n:type secret: Union[str, bytes]\n:param payload: The payload to be encoded in the token.\n:type payload: dict\n:param alg: The algorithm used to hash the token.\n:type alg: str\n:param header: The header of the token.\n:type header: dict\n:param issuer: The issuer of the token.\n:type issuer: str\n:param subject: The subject of the token.\n:type subject: str\n:param audience: The audience of the token.\n:type audience: str\n:param valid_to: Date the token expires as a timestamp.\n:type valid_to: int\n:param valid_from: Date the token is valid from as timestamp.\n:type valid_from: int\n:param issued_at: Date the token was issued as a timestamp.\n:type issued_at: int\n:param id: The unique ID of the token.\n:type id: str", "id": "f10214:c0:m0"} {"signature": "@propertydef header(self) -> dict:", "body": "header = {}if isinstance(self._header, dict):header = self._header.copy()header.update(self._header)header.update({'': '','': self.alg})return header", "docstring": ":return: Token header.\n:rtype: dict", "id": "f10214:c0:m1"} {"signature": "@header.setterdef header(self, header: dict):", "body": "self._header = header", "docstring": "Sets the token header.\n\n:param header: New header\n:type header: dict", "id": "f10214:c0:m2"} {"signature": "@propertydef issuer(self) -> Union[str, None]:", "body": "return self.registered_claims.get('')", "docstring": ":return: Issuer (`iss`) claim from the token.\n:rtype: Union[str, None]", "id": "f10214:c0:m3"} {"signature": "@issuer.setterdef issuer(self, issuer: str):", "body": "self.registered_claims[''] = issuer", "docstring": "Sets the issuer (`iss`) claim in the token.\n\n:param issuer: New value.\n:type issuer: str", "id": "f10214:c0:m4"} {"signature": "@propertydef subject(self) -> Union[str, None]:", "body": "return self.registered_claims.get('')", "docstring": ":return: Subject (`sub`) claim from the token.\n:rtype: Union[str, None]", "id": "f10214:c0:m5"} {"signature": "@subject.setterdef subject(self, subject: str):", "body": "self.registered_claims[''] = subject", "docstring": "Sets the subject (`sub`) claim in the token.\n\n:param subject: New value.\n:type subject: str", "id": "f10214:c0:m6"} {"signature": "@propertydef audience(self) -> Union[str, None]:", "body": "return self.registered_claims.get('')", "docstring": ":return: Audience (`aud`) claim from the token.\n:rtype: Union[str, None]", "id": "f10214:c0:m7"} {"signature": "@audience.setterdef audience(self, audience: str):", "body": "self.registered_claims[''] = audience", "docstring": "Sets the audience (`aud`) claim in the token.\n\n:param audience: New value.\n:type audience: str", "id": "f10214:c0:m8"} {"signature": "@propertydef valid_to(self) -> Union[int, None]:", "body": "return self.registered_claims.get('')", "docstring": ":return: Expires (`exp`) claim from the token.\n:rtype: Union[int, None]", "id": "f10214:c0:m9"} {"signature": "@valid_to.setterdef valid_to(self, valid_to: int):", "body": "self.registered_claims[''] = valid_to", "docstring": "Sets the expires (`exp`) claim in the token.\n\n:param valid_to: New value.\n:type valid_to: int", "id": "f10214:c0:m10"} {"signature": "@propertydef valid_from(self) -> Union[int, None]:", "body": "return self.registered_claims.get('')", "docstring": ":return: Not before (`nbf`) claim from the token.\n:rtype: Union[int, None]", "id": "f10214:c0:m11"} {"signature": "@valid_from.setterdef valid_from(self, valid_from: int):", "body": "self.registered_claims[''] = valid_from", "docstring": "Sets the not before (`nbf`) claim in the token.\n\n:param valid_from: New value.\n:type valid_from: int", "id": "f10214:c0:m12"} {"signature": "@propertydef issued_at(self) -> Union[int, None]:", "body": "return self.registered_claims.get('')", "docstring": ":return: Issued at (`iat`) claim from the token.\n:rtype: Union[int, None]", "id": "f10214:c0:m13"} {"signature": "@issued_at.setterdef issued_at(self, issued_at: int):", "body": "self.registered_claims[''] = issued_at", "docstring": "Sets the issued at (`iat`) claim in the token.\n\n:param issued_at: New value.\n:type issued_at: int", "id": "f10214:c0:m14"} {"signature": "@propertydef id(self) -> Union[str, None]:", "body": "return self.registered_claims.get('')", "docstring": ":return: ID (`jti`) claim from the token.\n:rtype: Union[str, None]", "id": "f10214:c0:m15"} {"signature": "@id.setterdef id(self, id: str):", "body": "self.registered_claims[''] = id", "docstring": "Sets the ID (`jti`) claim in the token.\n\n:param id: New value.\n:type id: str", "id": "f10214:c0:m16"} {"signature": "def valid(self, time: int = None) -> bool:", "body": "if time is None:epoch = datetime(, , , , , )now = datetime.utcnow()time = int((now - epoch).total_seconds())if isinstance(self.valid_from, int) and time < self.valid_from:return Falseif isinstance(self.valid_to, int) and time > self.valid_to:return Falsereturn True", "docstring": "Is the token valid? This method only checks the timestamps within the\ntoken and compares them against the current time if none is provided.\n\n:param time: The timestamp to validate against\n:type time: Union[int, None]\n:return: The validity of the token.\n:rtype: bool", "id": "f10214:c0:m17"} {"signature": "def _pop_claims_from_payload(self):", "body": "claims_in_payload = [k for k in self.payload.keys() ifk in registered_claims.values()]for name in claims_in_payload:self.registered_claims[name] = self.payload.pop(name)", "docstring": "Check for registered claims in the payload and move them to the\nregistered_claims property, overwriting any extant claims.", "id": "f10214:c0:m18"} {"signature": "def encode(self) -> str:", "body": "payload = {}payload.update(self.registered_claims)payload.update(self.payload)return encode(self.secret, payload, self.alg, self.header)", "docstring": "Create a token based on the data held in the class.\n\n:return: A new token\n:rtype: str", "id": "f10214:c0:m19"} {"signature": "@staticmethoddef decode(secret: Union[str, bytes], token: Union[str, bytes],alg: str = default_alg) -> '':", "body": "header, payload = decode(secret, token, alg)return Jwt(secret, payload, alg, header)", "docstring": "Decodes the given token into an instance of `Jwt`.\n\n:param secret: The secret used to decode the token. Must match the\n secret used when creating the token.\n:type secret: Union[str, bytes]\n:param token: The token to decode.\n:type token: Union[str, bytes]\n:param alg: The algorithm used to decode the token. Must match the\n algorithm used when creating the token.\n:type alg: str\n:return: The decoded token.\n:rtype: `Jwt`", "id": "f10214:c0:m20"} {"signature": "def compare(self, jwt: '', compare_dates: bool = False) -> bool:", "body": "if self.secret != jwt.secret:return Falseif self.payload != jwt.payload:return Falseif self.alg != jwt.alg:return Falseif self.header != jwt.header:return Falseexpected_claims = self.registered_claimsactual_claims = jwt.registered_claimsif not compare_dates:strip = ['', '', '']expected_claims = {k: {v if k not in strip else None} for k, v inexpected_claims.items()}actual_claims = {k: {v if k not in strip else None} for k, v inactual_claims.items()}if expected_claims != actual_claims:return Falsereturn True", "docstring": "Compare against another `Jwt`.\n\n:param jwt: The token to compare against.\n:type jwt: Jwt\n:param compare_dates: Should the comparision take dates into account?\n:type compare_dates: bool\n:return: Are the two Jwt's the same?\n:rtype: bool", "id": "f10214:c0:m21"} {"signature": "def isvalid(code):", "body": "return isinstance(code, basestring) and re.match(PATTERN, code) is not None", "docstring": "``isvalid(code) -> bool``\n\n This function checks if the given fiscal code is syntactically valid.\n\n eg: isvalid('RCCMNL83S18D969H') -> True\n isvalid('RCCMNL83S18D969') -> False", "id": "f10221:m0"} {"signature": "def __common_triplet(input_string, consonants, vowels):", "body": "output = consonantswhile len(output) < :try:output += vowels.pop()except IndexError:output += ''return output[:]", "docstring": "__common_triplet(input_string, consonants, vowels) -> string", "id": "f10221:m1"} {"signature": "def __consonants_and_vowels(input_string):", "body": "input_string = input_string.upper().replace('', '')consonants = [ char for char in input_string if char in __CONSONANTS ]vowels = [ char for char in input_string if char in __VOWELS ]return \"\".join(consonants), vowels", "docstring": "__consonants_and_vowels(input_string) -> (string, list)\n\n Get the consonants as a string and the vowels as a list.", "id": "f10221:m2"} {"signature": "def __surname_triplet(input_string):", "body": "consonants, vowels = __consonants_and_vowels(input_string)return __common_triplet(input_string, consonants, vowels)", "docstring": "__surname_triplet(input_string) -> string", "id": "f10221:m3"} {"signature": "def __name_triplet(input_string):", "body": "if input_string == '':return '' consonants, vowels = __consonants_and_vowels(input_string)if len(consonants) > :return \"\" % (consonants[], consonants[], consonants[])return __common_triplet(input_string, consonants, vowels)", "docstring": "__name_triplet(input_string) -> string", "id": "f10221:m4"} {"signature": "def control_code(input_string):", "body": "assert len(input_string) == even_controlcode = {}for idx, char in enumerate(string.digits):even_controlcode[char] = idxfor idx, char in enumerate(string.ascii_uppercase):even_controlcode[char] = idxvalues = [ , , , , , , , , , , , , , , , , , ,, , , , , , , ]odd_controlcode = {}for idx, char in enumerate(string.digits):odd_controlcode[char] = values[idx]for idx, char in enumerate(string.ascii_uppercase):odd_controlcode[char] = values[idx]code = for idx, char in enumerate(input_string):if idx % == :code += odd_controlcode[char]else:code += even_controlcode[char]return string.ascii_uppercase[code % ]", "docstring": "``control_code(input_string) -> int``\n\n Computes the control code for the given input_string string. The expected\n input_string is the first 15 characters of a fiscal code.\n\n eg: control_code('RCCMNL83S18D969') -> 'H'", "id": "f10221:m5"} {"signature": "def build(surname, name, birthday, sex, municipality):", "body": "output = __surname_triplet(surname) + __name_triplet(name)output += str(birthday.year)[:]output += MONTHSCODE[birthday.month - ]output += \"\" % (sex.upper() == '' and birthday.day or + birthday.day)output += municipalityoutput += control_code(output)assert isvalid(output)return output", "docstring": "``build(surname, name, birthday, sex, municipality) -> string``\n\n Computes the fiscal code for the given person data.\n\n eg: build('Rocca', 'Emanuele', datetime.datetime(1983, 11, 18), 'M', 'D969') \n -> RCCMNL83S18D969H", "id": "f10221:m6"} {"signature": "def get_birthday(code):", "body": "assert isvalid(code)day = int(code[:])day = day < and day or day - month = MONTHSCODE.index(code[]) + year = int(code[:])return \"\" % (day, month, year)", "docstring": "``get_birthday(code) -> string``\n\n Birthday of the person whose fiscal code is 'code', in the format DD-MM-YY. \n\n Unfortunately it's not possible to guess the four digit birth year, given\n that the Italian fiscal code uses only the last two digits (1983 -> 83).\n Therefore, this function returns a string and not a datetime object.\n\n eg: birthday('RCCMNL83S18D969H') -> 18-11-83", "id": "f10221:m7"} {"signature": "def get_sex(code):", "body": "assert isvalid(code)return int(code[:]) < and '' or ''", "docstring": "``get_sex(code) -> string``\n\n The sex of the person whose fiscal code is 'code'.\n\n eg: sex('RCCMNL83S18D969H') -> 'M'\n sex('CNTCHR83T41D969D') -> 'F'", "id": "f10221:m8"} {"signature": "def fit(self, X, y, model_filename=None):", "body": "train_file = \"\"X = [x.replace(\"\", \"\") for x in X]y = [_.replace(\"\", \"\") for _ in y]lines = [\"\".format(self.prefix, j, i) for i, j in zip(X, y)]content = \"\".join(lines)write(train_file, Text(content))if model_filename:self.estimator = ft.supervised(train_file, model_filename)else:self.estimator = ft.supervised(train_file, '')os.remove('')os.remove(train_file)", "docstring": "Fit FastText according to X, y\n\n Parameters\n ----------\n X : list of string\n each item is a raw text\n y : list of string\n each item is a label", "id": "f10241:c0:m1"} {"signature": "def predict(self, X):", "body": "x = Xif not isinstance(X, list):x = [X]y = self.estimator.predict(x)y = [item[] for item in y]y = [self._remove_prefix(label) for label in y]if not isinstance(X, list):y = y[]return y", "docstring": "In order to obtain the most likely label for a list of text\n\n Parameters\n ----------\n X : list of string\n Raw texts\n\n Returns\n -------\n C : list of string\n List labels", "id": "f10241:c0:m3"} {"signature": "def fit(self, X, y, coef_init=None, intercept_init=None,sample_weight=None):", "body": "super(SGDClassifier, self).fit(X, y, coef_init, intercept_init,sample_weight)", "docstring": "Fit linear model with Stochastic Gradient Descent.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data\n\n y : numpy array, shape (n_samples,)\n Target values\n\n coef_init : array, shape (n_classes, n_features)\n The initial coefficients to warm-start the optimization.\n\n intercept_init : array, shape (n_classes,)\n The initial intercept to warm-start the optimization.\n\n sample_weight : array-like, shape (n_samples,), optional\n Weights applied to individual samples.\n If not provided, uniform weights are assumed. These weights will\n be multiplied with class_weight (passed through the\n constructor) if class_weight is specified\n\n Returns\n -------\n self : returns an instance of self.", "id": "f10242:c0:m1"} {"signature": "def predict(self, X):", "body": "super(SGDClassifier, self).predict(X)", "docstring": "Predict class labels for samples in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Samples.\n\n Returns\n -------\n C : array, shape = [n_samples]\n Predicted class label per sample.", "id": "f10242:c0:m2"} {"signature": "def fit(self, X, y=None):", "body": "self.booster_ = NoneX = self._convert(X, y)if self.wl:wl = [(X, '')]for i, ent in enumerate(self.wl):ent, lbl = entwl.append((self.convert(ent, lbl), '' + str(i)))self.booster_ = xgb.train(self.param, X, self.n_iter, wl, verbose_eval=self.param[\"\"])else:self.booster_ = xgb.train(self.param, X, self.n_iter,[(X, '')], verbose_eval=self.param[\"\"])return self", "docstring": "Parameters\n----------\nX : {array-like, sparse matrix}\n Training data. Shape (n_samples, n_features)\n\ny : numpy array\n Target values. Shape (n_samples,)\n\nReturns\n-------\nself : C\n returns an instance of self.", "id": "f10243:c0:m1"} {"signature": "def fit(self, X, y):", "body": "word_vector_transformer = WordVectorTransformer(padding='')X = word_vector_transformer.fit_transform(X)X = LongTensor(X)self.word_vector_transformer = word_vector_transformery_transformer = LabelEncoder()y = y_transformer.fit_transform(y)y = torch.from_numpy(y)self.y_transformer = y_transformerdataset = CategorizedDataset(X, y)dataloader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True,num_workers=)KERNEL_SIZES = self.kernel_sizesNUM_KERNEL = self.num_kernelEMBEDDING_DIM = self.embedding_dimmodel = TextCNN(vocab_size=word_vector_transformer.get_vocab_size(),embedding_dim=EMBEDDING_DIM,output_size=len(self.y_transformer.classes_),kernel_sizes=KERNEL_SIZES,num_kernel=NUM_KERNEL)if USE_CUDA:model = model.cuda()EPOCH = self.epochLR = self.lrloss_function = nn.CrossEntropyLoss()optimizer = optim.Adam(model.parameters(), lr=LR)for epoch in range(EPOCH):losses = []for i, data in enumerate(dataloader):X, y = dataX, y = Variable(X), Variable(y)optimizer.zero_grad()model.train()output = model(X)loss = loss_function(output, y)losses.append(loss.data.tolist()[])loss.backward()optimizer.step()if i % == :print(\"\" % (epoch, EPOCH, np.mean(losses)))losses = []self.model = model", "docstring": "Fit KimCNNClassifier according to X, y\n\n Parameters\n ----------\n X : list of string\n each item is a raw text\n y : list of string\n each item is a label", "id": "f10244:c2:m1"} {"signature": "def predict(self, X):", "body": "x = self.word_vector_transformer.transform(X)x = Variable(LongTensor(x))y = self.model(x)y = torch.max(y, )[].data.numpy()y = self.y_transformer.inverse_transform(y)return y", "docstring": "Parameters\n----------\nX : list of string\n Raw texts\n\nReturns\n-------\nC : list of string\n List labels", "id": "f10244:c2:m2"} {"signature": "def fit(self, X, y):", "body": "trainer = pycrfsuite.Trainer(verbose=True)for xseq, yseq in zip(X, y):trainer.append(xseq, yseq)trainer.set_params(self.params)if self.filename:filename = self.filenameelse:filename = ''trainer.train(filename)tagger = pycrfsuite.Tagger()tagger.open(filename)self.estimator = tagger", "docstring": "Fit CRF according to X, y\n\n Parameters\n ----------\n X : list of text\n each item is a text\n y: list\n each item is either a label (in multi class problem) or list of\n labels (in multi label problem)", "id": "f10245:c0:m1"} {"signature": "def predict(self, X):", "body": "if isinstance(X[], list):return [self.estimator.tag(x) for x in X]return self.estimator.tag(X)", "docstring": "Predict class labels for samples in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Samples.", "id": "f10245:c0:m2"} {"signature": "@staticmethoddef log(model_folder, binary_file=\"\",log_folder=\"\"):", "body": "file = join(model_folder, binary_file)vectorizer = joblib.load(file)output = []for token in vectorizer.vocabulary_:index = vectorizer.vocabulary_[token]ngram = len(token.split(\"\"))output.append({\"\": token,\"\": ngram,\"\": vectorizer.idf_[index],\"\": vectorizer.period_[index].item(),\"\": vectorizer.df_[index],})output = sorted(output, key=lambda item: item[\"\"])content = json.dumps(output, ensure_ascii=False)write(join(log_folder, \"\"), content)", "docstring": "Parameters\n----------\nmodel_folder : string\n folder contains binaries file of model\nbinary_file : string\n file path to tfidf binary file\nlog_folder : string\n log folder", "id": "f10247:c0:m0"} {"signature": "@staticmethoddef log(model_folder, binary_file=\"\",log_folder=\"\"):", "body": "file = join(model_folder, binary_file)vectorizer = joblib.load(file)output = []for token in vectorizer.vocabulary_:index = vectorizer.vocabulary_[token]ngram = len(token.split(\"\"))output.append({\"\": token,\"\": ngram,\"\": vectorizer.period_[index].item(),\"\": vectorizer.df_[index],})output = sorted(output, key=lambda item: item[\"\"])content = json.dumps(output, ensure_ascii=False)write(join(log_folder, \"\"), content)", "docstring": "Parameters\n----------\nmodel_folder : string\n folder contains binaries file of model\nbinary_file : string\n file path to count transformer binary file\nlog_folder : string\n log folder", "id": "f10251:c0:m0"} {"signature": "def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):", "body": "columnwidth = max([len(x) for x in labels] + []) empty_cell = \"\" * columnwidthprint(\"\" + empty_cell, end=\"\")for label in labels:print(\"\".format(columnwidth) % label, end=\"\")print()for i, label1 in enumerate(labels):print(\"\".format(columnwidth) % label1, end=\"\")for j in range(len(labels)):cell = \"\".format(columnwidth) % cm[i, j]if hide_zeroes:cell = cell if float(cm[i, j]) != else empty_cellif hide_diagonal:cell = cell if i != j else empty_cellif hide_threshold:cell = cell if cm[i, j] > hide_threshold else empty_cellprint(cell, end=\"\")print()", "docstring": "pretty print for confusion matrixes", "id": "f10253:m0"} {"signature": "def data(self, X=None, y=None, sentences=None):", "body": "self.X = Xself.y = yself.sentences = sentences", "docstring": "Add data to flow", "id": "f10255:c0:m1"} {"signature": "def transform(self, transformer):", "body": "self.transformers.append(transformer)from languageflow.transformer.tagged import TaggedTransformerif isinstance(transformer, TaggedTransformer):self.X, self.y = transformer.transform(self.sentences)if isinstance(transformer, TfidfVectorizer):self.X = transformer.fit_transform(self.X)if isinstance(transformer, CountVectorizer):self.X = transformer.fit_transform(self.X)if isinstance(transformer, NumberRemover):self.X = transformer.transform(self.X)if isinstance(transformer, MultiLabelBinarizer):self.y = transformer.fit_transform(self.y)", "docstring": "Add transformer to flow and apply transformer to data in flow\n\nParameters\n----------\ntransformer : Transformer\n a transformer to transform data", "id": "f10255:c0:m2"} {"signature": "def add_model(self, model):", "body": "self.models.append(model)", "docstring": "Add model to flow", "id": "f10255:c0:m3"} {"signature": "def train(self):", "body": "for i, model in enumerate(self.models):N = [int(i * len(self.y)) for i in self.lc_range]for n in N:X = self.X[:n]y = self.y[:n]e = Experiment(X, y, model.estimator, self.scores,self.validation_method)e.log_folder = self.log_foldere.train()", "docstring": "Train model with transformed data", "id": "f10255:c0:m7"} {"signature": "def export(self, model_name, export_folder):", "body": "for transformer in self.transformers:if isinstance(transformer, MultiLabelBinarizer):joblib.dump(transformer,join(export_folder, \"\"),protocol=)if isinstance(transformer, TfidfVectorizer):joblib.dump(transformer,join(export_folder, \"\"),protocol=)if isinstance(transformer, CountVectorizer):joblib.dump(transformer,join(export_folder, \"\"),protocol=)if isinstance(transformer, NumberRemover):joblib.dump(transformer,join(export_folder, \"\"),protocol=)model = [model for model in self.models if model.name == model_name][]e = Experiment(self.X, self.y, model.estimator, None)model_filename = join(export_folder, \"\")e.export(model_filename)", "docstring": "Export model and transformers to export_folder\n\nParameters\n----------\nmodel_name: string\n name of model to export\nexport_folder: string\n folder to store exported model and transformers", "id": "f10255:c0:m8"} {"signature": "@click.group()def main(args=None):", "body": "pass", "docstring": "Console script for languageflow", "id": "f10256:m0"} {"signature": "def __init__(self, filepath):", "body": "data_folder = join(dirname(dirname(__file__)), \"\")data_file = join(data_folder, filepath)self.data_file = data_fileself.words_data = None", "docstring": "load words from Ho Ngoc Duc's dictionary\n\n :param str filepath: filename of dictionary data\n :type filepath: str", "id": "f10259:c0:m0"} {"signature": "def analyze(self, output_folder=\"\", auto_remove=False):", "body": "if auto_remove:try:shutil.rmtree(output_folder)except:passtry:mkdir(output_folder)except:passtokens = [token for sublist in self.sentences for token in sublist]df = pd.DataFrame(tokens)log = u\"\"log += u\"\".format(len(self.sentences))n = df.shape[]log += self._analyze_first_token(df, , output_folder)for i in range(, n):log += self._analyze_field(df, i, output_folder)print(log)stat_file = join(output_folder, \"\")write(stat_file, log)", "docstring": ":type auto_remove: boolean\n:param boolean auto_remove: auto remove previous files in analyze folder", "id": "f10260:c0:m6"} {"signature": "def serve(self, port=):", "body": "from http.server import HTTPServer, CGIHTTPRequestHandleros.chdir(self.log_folder)httpd = HTTPServer(('', port), CGIHTTPRequestHandler)print(\"\" + str(httpd.server_port))webbrowser.open(''.format(port))httpd.serve_forever()", "docstring": "Start LanguageBoard web application\n\n Parameters\n ----------\n port: int\n port to serve web application", "id": "f10261:c0:m1"} {"signature": "def load_big_file(f):", "body": "logger.info(f'')with open(f, '') as f_in:bf = mmap.mmap(f_in.fileno(), )f_in.close()return bf", "docstring": "Workaround for loading a big pickle file. Files over 2GB cause pickle errors on certin Mac and Windows distributions.\n:param f:\n:return:", "id": "f10262:m0"} {"signature": "def url_to_filename(url: str, etag: str = None) -> str:", "body": "url_bytes = url.encode('')b64_bytes = base64.b64encode(url_bytes)decoded = b64_bytes.decode('')if etag:etag = etag.replace('', '')return f\"\"else:return decoded", "docstring": "Converts a url into a filename in a reversible way.\nIf `etag` is specified, add it on the end, separated by a period\n(which necessarily won't appear in the base64-encoded filename).\nGet rid of the quotes in the etag, since Windows doesn't like them.", "id": "f10262:m1"} {"signature": "def filename_to_url(filename: str) -> Tuple[str, str]:", "body": "try:decoded, etag = filename.split(\"\", )except ValueError:decoded, etag = filename, Nonefilename_bytes = decoded.encode('')url_bytes = base64.b64decode(filename_bytes)return url_bytes.decode(''), etag", "docstring": "Recovers the the url from the encoded filename. Returns it and the ETag\n(which may be ``None``)", "id": "f10262:m2"} {"signature": "def cached_path(url_or_filename: str, cache_dir: Path) -> Path:", "body": "dataset_cache = Path(CACHE_ROOT) / cache_dirparsed = urlparse(url_or_filename)if parsed.scheme in ('', ''):return get_from_cache(url_or_filename, dataset_cache)elif parsed.scheme == '' and Path(url_or_filename).exists():return Path(url_or_filename)elif parsed.scheme == '':raise FileNotFoundError(\"\".format(url_or_filename))else:raise ValueError(\"\".format(url_or_filename))", "docstring": "Given something that might be a URL (or might be a local path),\ndetermine which. If it's a URL, download the file and cache it, and\nreturn the path to the cached file. If it's already a local path,\nmake sure the file exists and then return the path.", "id": "f10262:m3"} {"signature": "def get_from_cache(url: str, cache_dir: Path = None) -> Path:", "body": "cache_dir.mkdir(parents=True, exist_ok=True)filename = re.sub(r'', '', url)cache_path = cache_dir / filenameif cache_path.exists():return cache_pathresponse = requests.head(url)if response.status_code != :if \"\" in url:passelse:raise IOError(\"\".format(url))if not cache_path.exists():fd, temp_filename = tempfile.mkstemp()logger.info(\"\", url, temp_filename)req = requests.get(url, stream=True)content_length = req.headers.get('')total = int(content_length) if content_length is not None else Noneprogress = Tqdm.tqdm(unit=\"\", total=total)with open(temp_filename, '') as temp_file:for chunk in req.iter_content(chunk_size=):if chunk: progress.update(len(chunk))temp_file.write(chunk)progress.close()logger.info(\"\", temp_filename, cache_path)shutil.copyfile(temp_filename, str(cache_path))logger.info(\"\", temp_filename)os.close(fd)os.remove(temp_filename)return cache_path", "docstring": "Given a URL, look for the corresponding dataset in the local cache.\nIf it's not there, download it. Then return the path to the cached file.", "id": "f10262:m4"} {"signature": "@staticmethoddef set_slower_interval(use_slower_interval: bool) -> None:", "body": "if use_slower_interval:Tqdm.default_mininterval = else:Tqdm.default_mininterval = ", "docstring": "If ``use_slower_interval`` is ``True``, we will dramatically slow down ``tqdm's`` default\noutput rate. ``tqdm's`` default output rate is great for interactively watching progress,\nbut it is not great for log files. You might want to set this if you are primarily going\nto be looking at output through log files, not the terminal.", "id": "f10262:c0:m1"} {"signature": "def _flat(l):", "body": "return [item[] for item in l]", "docstring": ":type l: list of list", "id": "f10269:m0"} {"signature": "def fit_transform(self, raw_documents, y=None):", "body": "documents = super(TfidfVectorizer, self).fit_transform(raw_documents=raw_documents, y=y)count = CountVectorizer(encoding=self.encoding,decode_error=self.decode_error,strip_accents=self.strip_accents,lowercase=self.lowercase,preprocessor=self.preprocessor,tokenizer=self.tokenizer,stop_words=self.stop_words,token_pattern=self.token_pattern,ngram_range=self.ngram_range,analyzer=self.analyzer,max_df=self.max_df,min_df=self.min_df,max_features=self.max_features,vocabulary=self.vocabulary_,binary=self.binary,dtype=self.dtype)count.fit_transform(raw_documents=raw_documents, y=y)self.period_ = count.period_self.df_ = count.df_self.n = count.nreturn documents", "docstring": "Learn vocabulary and idf, return term-document matrix.\n This is equivalent to fit followed by transform, but more efficiently\n implemented.\n Parameters\n ----------\n raw_documents : iterable\n an iterable which yields either str, unicode or file objects\n Returns\n -------\n X : sparse matrix, [n_samples, n_features]\n Tf-idf-weighted document-term matrix.", "id": "f10270:c0:m1"} {"signature": "def transform(self, raw_documents):", "body": "return [self._remove(document) for document in raw_documents]", "docstring": "Remove number in each document\n\nParameters\n----------\nraw_documents : iterable\n An iterable which yields either str, unicode\n\nReturns\n-------\nX : iterable\n cleaned documents", "id": "f10273:c0:m2"} {"signature": "def Text(text):", "body": "if not is_unicode(text):text = text.decode(\"\")text = unicodedata.normalize(\"\", text)return text", "docstring": "provide a wrapper for python string\n map byte to str (python 3)\n map str to unicode (python 2)\n all string in utf-8 encoding\n normalize string to NFC", "id": "f10274:m0"} {"signature": "def fit_transform(self, raw_documents, y=None):", "body": "documents = super(CountVectorizer, self).fit_transform(raw_documents=raw_documents, y=y)self.n = len(raw_documents)m = (self.transform(raw_documents) > ).astype(int)m = m.sum(axis=).A1self.period_ = mself.df_ = m / self.nreturn documents", "docstring": "Learn the vocabulary dictionary and return term-document matrix.\n This is equivalent to fit followed by transform, but more efficiently\n implemented.\n\n Parameters\n ----------\n raw_documents : iterable\n An iterable which yields either str, unicode or file objects.\n\n Returns\n -------\n X : array, [n_samples, n_features]\n Document-term matrix.", "id": "f10276:c0:m1"} {"signature": "def load_key(pubkey):", "body": "try:return load_pem_public_key(pubkey.encode(), default_backend())except ValueError:pubkey = pubkey.replace('', '').replace('', '')return load_pem_public_key(pubkey.encode(), default_backend())", "docstring": "Load public RSA key.\n\n Work around keys with incorrect header/footer format.\n\n Read more about RSA encryption with cryptography:\n https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/", "id": "f10279:m0"} {"signature": "def encrypt(pubkey, password):", "body": "key = load_key(pubkey)encrypted_password = key.encrypt(password, PKCS1v15())return base64.b64encode(encrypted_password)", "docstring": "Encrypt password using given RSA public key and encode it with base64.\n\n The encrypted password can only be decrypted by someone with the\n private key (in this case, only Travis).", "id": "f10279:m1"} {"signature": "def fetch_public_key(repo):", "body": "keyurl = ''.format(repo)data = json.loads(urlopen(keyurl).read().decode())if '' not in data:errmsg = \"\".format(repo)errmsg += \"\"raise ValueError(errmsg)return data['']", "docstring": "Download RSA public key Travis will use for this repo.\n\n Travis API docs: http://docs.travis-ci.com/api/#repository-keys", "id": "f10279:m2"} {"signature": "def prepend_line(filepath, line):", "body": "with open(filepath) as f:lines = f.readlines()lines.insert(, line)with open(filepath, '') as f:f.writelines(lines)", "docstring": "Rewrite a file adding a line to its beginning.", "id": "f10279:m3"} {"signature": "def load_yaml_config(filepath):", "body": "with open(filepath) as f:return yaml.load(f)", "docstring": "Load yaml config file at the given path.", "id": "f10279:m4"} {"signature": "def save_yaml_config(filepath, config):", "body": "with open(filepath, '') as f:yaml.dump(config, f, default_flow_style=False)", "docstring": "Save yaml config file at the given path.", "id": "f10279:m5"} {"signature": "def update_travis_deploy_password(encrypted_password):", "body": "config = load_yaml_config(TRAVIS_CONFIG_FILE)config[''][''] = dict(secure=encrypted_password)save_yaml_config(TRAVIS_CONFIG_FILE, config)line = ('''')prepend_line(TRAVIS_CONFIG_FILE, line)", "docstring": "Put `encrypted_password` into the deploy section of .travis.yml.", "id": "f10279:m6"} {"signature": "def main(args):", "body": "public_key = fetch_public_key(args.repo)password = args.password or getpass('')update_travis_deploy_password(encrypt(public_key, password.encode()))print(\"\")", "docstring": "Add a PyPI password to .travis.yml so that Travis can deploy to PyPI.\n\n Fetch the Travis public key for the repo, and encrypt the PyPI password\n with it before adding, so that only Travis can decrypt and use the PyPI\n password.", "id": "f10279:m7"} {"signature": "def basicConfig(**kwargs):", "body": "logging.basicConfig(**kwargs)logging._acquireLock()try:stream = logging.root.handlers[]stream.setFormatter(ColoredFormatter(fmt=kwargs.get('', BASIC_FORMAT),datefmt=kwargs.get('', None)))finally:logging._releaseLock()", "docstring": "Call ``logging.basicConfig`` and override the formatter it creates.", "id": "f10288:m0"} {"signature": "def ensure_configured(func):", "body": "@functools.wraps(func)def wrapper(*args, **kwargs):if len(logging.root.handlers) == :basicConfig()return func(*args, **kwargs)return wrapper", "docstring": "Modify a function to call ``basicConfig`` first if no handlers exist.", "id": "f10288:m1"} {"signature": "def __init__(self, fmt=None, datefmt=None, style='',log_colors=None, reset=True,secondary_log_colors=None):", "body": "if fmt is None:if sys.version_info > (, ):fmt = default_formats[style]else:fmt = default_formats['']if sys.version_info > (, ):super(ColoredFormatter, self).__init__(fmt, datefmt, style)elif sys.version_info > (, ):super(ColoredFormatter, self).__init__(fmt, datefmt)else:logging.Formatter.__init__(self, fmt, datefmt)self.log_colors = (log_colors if log_colors is not None else default_log_colors)self.secondary_log_colors = secondary_log_colorsself.reset = reset", "docstring": "Set the format and colors the ColoredFormatter will use.\n\nThe ``fmt``, ``datefmt`` and ``style`` args are passed on to the\n``logging.Formatter`` constructor.\n\nThe ``secondary_log_colors`` argument can be used to create additional\n``log_color`` attributes. Each key in the dictionary will set\n``{key}_log_color``, using the value to select from a different\n``log_colors`` set.\n\n:Parameters:\n- fmt (str): The format string to use\n- datefmt (str): A format string for the date\n- log_colors (dict):\n A mapping of log level names to color names\n- reset (bool):\n Implictly append a color reset to all records unless False\n- style ('%' or '{' or '$'):\n The format style to use. (*No meaning prior to Python 3.2.*)\n- secondary_log_colors (dict):\n Map secondary ``log_color`` attributes. (*New in version 2.6.*)", "id": "f10289:c1:m0"} {"signature": "def color(self, log_colors, name):", "body": "return parse_colors(log_colors.get(name, \"\"))", "docstring": "Return escape codes from a ``log_colors`` dict.", "id": "f10289:c1:m1"} {"signature": "def format(self, record):", "body": "record = ColoredRecord(record)record.log_color = self.color(self.log_colors, record.levelname)if self.secondary_log_colors:for name, log_colors in list(self.secondary_log_colors.items()):color = self.color(log_colors, record.levelname)setattr(record, name + '', color)if sys.version_info > (, ):message = super(ColoredFormatter, self).format(record)else:message = logging.Formatter.format(self, record)if self.reset and not message.endswith(escape_codes['']):message += escape_codes['']return message", "docstring": "Format a message from a record object.", "id": "f10289:c1:m2"} {"signature": "def __init__(self, fmt=None, datefmt=None, style='',log_colors=None, reset=True,secondary_log_colors=None):", "body": "if sys.version_info > (, ):super(LevelFormatter, self).__init__(fmt=fmt, datefmt=datefmt, style=style, log_colors=log_colors,reset=reset, secondary_log_colors=secondary_log_colors)else:ColoredFormatter.__init__(self, fmt=fmt, datefmt=datefmt, style=style,log_colors=log_colors, reset=reset,secondary_log_colors=secondary_log_colors)self.style = styleself.fmt = fmt", "docstring": "Set the per-loglevel format that will be used.\n\nSupports fmt as a dict. All other args are passed on to the\n``colorlog.ColoredFormatter`` constructor.\n\n:Parameters:\n- fmt (dict):\n A mapping of log levels (represented as strings, e.g. 'WARNING') to\n different formatters. (*New in version 2.7.0)\n(All other parameters are the same as in colorlog.ColoredFormatter)\n\nExample:\n\nformatter = colorlog.LevelFormatter(fmt={\n 'DEBUG':'%(log_color)s%(msg)s (%(module)s:%(lineno)d)',\n 'INFO': '%(log_color)s%(msg)s',\n 'WARNING': '%(log_color)sWARN: %(msg)s (%(module)s:%(lineno)d)',\n 'ERROR': '%(log_color)sERROR: %(msg)s (%(module)s:%(lineno)d)',\n 'CRITICAL': '%(log_color)sCRIT: %(msg)s (%(module)s:%(lineno)d)',\n})", "id": "f10289:c2:m0"} {"signature": "def format(self, record):", "body": "if isinstance(self.fmt, dict):self._fmt = self.fmt[record.levelname]if sys.version_info > (, ):if self.style not in logging._STYLES:raise ValueError('' % ''.join(list(logging._STYLES.keys())))self._style = logging._STYLES[self.style][](self._fmt)if sys.version_info > (, ):message = super(LevelFormatter, self).format(record)else:message = ColoredFormatter.format(self, record)return message", "docstring": "Customize the message format based on the log level.", "id": "f10289:c2:m1"} {"signature": "def parse_colors(sequence):", "body": "return ''.join(escape_codes[n] for n in sequence.split('') if n)", "docstring": "Return escape codes from a color sequence.", "id": "f10290:m1"} {"signature": "def path(filename):", "body": "return os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)", "docstring": "Return an absolute path to a file in the current directory.", "id": "f10293:m0"} {"signature": "def assert_log_message(log_function, message, capsys):", "body": "log_function(message)out, err = capsys.readouterr()print(err, end='', file=sys.stderr)assert message in err, ''return err", "docstring": "Call a log function and check the message has been output.", "id": "f10294:m0"} {"signature": "@propertydef stdout(self):", "body": "if self._streaming:stdout = []while not self.__stdout.empty():try:line = self.__stdout.get_nowait()stdout.append(line)except:passelse:stdout = self.__stdoutreturn stdout", "docstring": "Converts stdout string to a list.", "id": "f10302:c0:m10"} {"signature": "@propertydef stderr(self):", "body": "if self._streaming:stderr = []while not self.__stderr.empty():try:line = self.__stderr.get_nowait()stderr.append(line)except:passelse:stderr = self.__stderrreturn stderr", "docstring": "Converts stderr string to a list.", "id": "f10302:c0:m11"} {"signature": "def stdin(self, line):", "body": "if self._streaming:self.__stdin.put(line)", "docstring": "Sends input to stdin.", "id": "f10302:c0:m12"} {"signature": "@propertydef traceback(self):", "body": "if self._exception:return traceback.format_exc().split(\"\")else:return []", "docstring": "Converts traceback string to a list.", "id": "f10302:c0:m13"} {"signature": "@propertydef is_success(self):", "body": "return self.is_complete and self.rc == ", "docstring": "Returns if the result of the command was a success.\nTrue for success, False for failure.", "id": "f10302:c0:m14"} {"signature": "@propertydef is_failure(self):", "body": "return self.is_complete and not self.rc == ", "docstring": "Returns if the result of the command was a failure.\nTrue for failure, False for succes.", "id": "f10302:c0:m15"} {"signature": "@propertydef has_exception(self):", "body": "return bool(self._exception)", "docstring": "Returns True if self._exception is not empty.", "id": "f10302:c0:m16"} {"signature": "def print_stdout(self, always_print=False):", "body": "if self.__stdout or always_print:self.__echo.info(\"\" + \"\" * )self.__format_lines_info(self.stdout)self.__echo.info(\"\" + \"\" * )", "docstring": "Prints the stdout to console - if there is any stdout, otherwise does nothing.\n:param always_print: print the stdout, even if there is nothing in the buffer (default: false)", "id": "f10302:c0:m17"} {"signature": "def print_stderr(self, always_print=False):", "body": "if self.__stderr or always_print:self.__echo.critical(\"\" + \"\" * )self.__format_lines_error(self.stderr)self.__echo.critical(\"\" + \"\" * )", "docstring": "Prints the stderr to console - if there is any stdout, otherwise does nothing.\n:param always_print: print the stderr, even if there is nothing in the buffer (default: false)", "id": "f10302:c0:m18"} {"signature": "def print_traceback(self, always_print=False):", "body": "if self._exception or always_print:self.__echo.critical(\"\" + \"\" * )self.__format_lines_error(self.traceback)self.__echo.critical(\"\" + \"\" * )", "docstring": "Prints the traceback to console - if there is any traceback, otherwise does nothing.\n:param always_print: print the traceback, even if there is nothing in the buffer (default: false)", "id": "f10302:c0:m19"} {"signature": "@propertydef current_context(self):", "body": "return self._context[-] if len(self._context) > else {}", "docstring": "Returns the context that Sultan is running on", "id": "f10303:c0:m2"} {"signature": "def __enter__(self):", "body": "if len(self._context) == :raise InvalidContextError(\"\")return self", "docstring": "Sultan can be used with context using `with` blocks, as such:\n\n```python\n\nwith Sultan.load(cwd=\"/tmp\") as s:\n s.ls(\"-lah\").run()\n```\n\nThis is easier to manage than doing the following::\n\n s = Sultan()\n s.cd(\"/tmp\").and_().ls(\"-lah\").run()\n\nThere are one-off times when running `s.cd(\"/tmp\").and_().ls(\"-lah\").run()` works better. However,\nif you have multiple commands to run in a given directory, using Sultan with context, allows your\ncode to be easy to manage.", "id": "f10303:c0:m3"} {"signature": "def __exit__(self, type, value, traceback):", "body": "if len(self._context) > :self._context.pop()", "docstring": "Restores the context to previous context.", "id": "f10303:c0:m4"} {"signature": "def run(self, halt_on_nonzero=True, quiet=False, q=False, streaming=False):", "body": "commands = str(self)if not (quiet or q):self._echo.cmd(commands)env = self._context[].get('', {}) if len(self._context) > else os.environexecutable = self.current_context.get('')try:process = subprocess.Popen(commands,bufsize=,shell=True,env=env,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,executable=executable,universal_newlines=True)result = Result(process, commands, self._context, streaming, halt_on_nonzero=halt_on_nonzero)except Exception as e:result = Result(None, commands, self._context, exception=e)result.dump_exception()if halt_on_nonzero:raise efinally:self.clear()return result", "docstring": "After building your commands, call `run()` to have your code executed.", "id": "f10303:c0:m8"} {"signature": "def _add(self, command):", "body": "self.commands.append(command)return self", "docstring": "Private method that adds a custom command (see `pipe` and `and_`).\n\nNOT FOR PUBLIC USE", "id": "f10303:c0:m9"} {"signature": "def __str__(self):", "body": "context = self.current_contextSPECIAL_CASES = (Pipe, And, Redirect, Or)output = \"\"for i, cmd in enumerate(self.commands):if (i == ):separator = \"\"else:if isinstance(cmd, SPECIAL_CASES):separator = \"\"else:if isinstance(self.commands[i - ], SPECIAL_CASES):separator = \"\"else:separator = \"\"cmd_str = str(cmd)output += separator + cmd_stroutput = output.strip() + \"\"cwd = context.get('')if cwd:prepend = \"\" % (cwd)output = prepend + outputsrc = context.get('')if src:prepend = \"\" % (src)output = prepend + outputsudo = context.get('')user = context.get('')if sudo:if user != getpass.getuser():output = \"\" % (user, output)elif getpass.getuser() == '':output = \"\" % (user, output)else:output = \"\" % (output)ssh_config = context.get('')hostname = context.get('')if hostname:params = {'': user,'': hostname,'': output, '': '' % ssh_config if ssh_config else ''}output = \"\" % (params)return output", "docstring": "Returns the chained commands that were built as a string.", "id": "f10303:c0:m11"} {"signature": "def spit(self):", "body": "self._echo.log(str(self))", "docstring": "Logs to the logger the command.", "id": "f10303:c0:m12"} {"signature": "def pipe(self):", "body": "self._add(Pipe(self, ''))return self", "docstring": "Pipe commands in Sultan.\n\nUsage::\n\n # runs: 'cat /var/log/foobar.log | grep 192.168.1.1'\n s = Sultan()\n s.cat(\"/var/log/foobar.log\").pipe().grep(\"192.168.1.1\").run()", "id": "f10303:c0:m13"} {"signature": "def and_(self):", "body": "self._add(And(self, \"\"))return self", "docstring": "Combines multiple commands using `&&`.\n\nUsage::\n\n # runs: 'cd /tmp && touch foobar.txt'\n s = Sultan()\n s.cd(\"/tmp\").and_().touch(\"foobar.txt\").run()", "id": "f10303:c0:m14"} {"signature": "def or_(self):", "body": "self._add(Or(self, ''))return self", "docstring": "Combines multiple commands using `||`.\n\nUsage::\n\n # runs: 'touch /tmp/foobar || echo \"Step Completed\"'\n s = Sultan()\n s.touch('/tmp/foobar').or_().echo(\"Step Completed\").run()", "id": "f10303:c0:m15"} {"signature": "def validate_config(self):", "body": "for key, key_config in self.params_map.items():if key_config['']:if key not in self.config:raise ValueError(\"\")for key in self.config.keys():if key not in self.params_map:raise ValueError(\"\" % key)", "docstring": "Validates the provided config to make sure all the required fields are \nthere.", "id": "f10303:c7:m2"} {"signature": "def callAfter(func, *args, **kwargs):", "body": "pool = NSAutoreleasePool.alloc().init()obj = PyObjCAppHelperCaller_wrap.alloc().initWithArgs_((func, args, kwargs))obj.callAfter_(None)del objdel pool", "docstring": "call a function on the main thread (async)", "id": "f10308:m0"} {"signature": "def callLater(delay, func, *args, **kwargs):", "body": "pool = NSAutoreleasePool.alloc().init()obj = PyObjCAppHelperCaller_wrap.alloc().initWithArgs_((func, args, kwargs))obj.callLater_(delay)del objdel pool", "docstring": "call a function on the main thread after a delay (async)", "id": "f10308:m1"} {"signature": "def stopEventLoop():", "body": "stopper = PyObjCAppHelperRunLoopStopper_wrap.currentRunLoopStopper()if stopper is None:if NSApp() is not None:NSApp().terminate_(None)return Truereturn FalseNSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(,stopper,'',None,False)return True", "docstring": "Stop the current event loop if possible\nreturns True if it expects that it was successful, False otherwise", "id": "f10308:m2"} {"signature": "def endSheetMethod(meth):", "body": "return objc.selector(meth, signature=b'')", "docstring": "Return a selector that can be used as the delegate callback for\nsheet methods", "id": "f10308:m3"} {"signature": "def runEventLoop(argv=None, unexpectedErrorAlert=None, installInterrupt=None, pdb=None, main=NSApplicationMain):", "body": "if argv is None:argv = sys.argvif pdb is None:pdb = '' in os.environif pdb:from PyObjCTools import DebuggingDebugging.installVerboseExceptionHandler()activator = PyObjCAppHelperApplicationActivator_wrap.alloc().init()NSNotificationCenter.defaultCenter().addObserver_selector_name_object_(activator,'',NSApplicationDidFinishLaunchingNotification,None,)else:Debugging = Noneif installInterrupt is None and pdb:installInterrupt = Trueif unexpectedErrorAlert is None:unexpectedErrorAlert = unexpectedErrorAlertPdbrunLoop = NSRunLoop.currentRunLoop()stopper = PyObjCAppHelperRunLoopStopper_wrap.alloc().init()PyObjCAppHelperRunLoopStopper_wrap.addRunLoopStopper_toRunLoop_(stopper, runLoop)firstRun = NSApp() is Nonetry:while stopper.shouldRun():try:if firstRun:firstRun = Falseif installInterrupt:installMachInterrupt()main(argv)else:NSApp().run()except RAISETHESE:traceback.print_exc()breakexcept:exctype, e, tb = sys.exc_info()objc_exception = Falseif isinstance(e, objc.error):NSLog(\"\", str(e))elif not unexpectedErrorAlert():NSLog(\"\", \"\")traceback.print_exc()sys.exit()else:NSLog(\"\", \"\")traceback.print_exc()else:breakfinally:if Debugging is not None:Debugging.removeExceptionHandler()PyObjCAppHelperRunLoopStopper_wrap.removeRunLoopStopperFromRunLoop_(runLoop)", "docstring": "Run the event loop, ask the user if we should continue if an\n exception is caught. Use this function instead of NSApplicationMain().", "id": "f10308:m8"} {"signature": "def addloghandler(self, handler):", "body": "self.logger.addHandler(handler)return ", "docstring": "Add custom log handler\n@param handler: Handler instance\n@type handler: object\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10311:c3:m3"} {"signature": "def removeloghandler(self, handler):", "body": "self.logger.removeHandler(handler)return ", "docstring": "Remove custom log handler\n@param handler: Handler instance\n@type handler: object\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10311:c3:m4"} {"signature": "def log(self, message, level=logging.DEBUG):", "body": "if _ldtp_debug:print(message)self.logger.log(level, str(message))return ", "docstring": "Logs the message in the root logger with the log level\n@param message: Message to be logged\n@type message: string\n@param level: Log level, defaul DEBUG\n@type level: integer\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10311:c3:m5"} {"signature": "def startlog(self, filename, overwrite=True):", "body": "if not filename:return if overwrite:_mode = ''else:_mode = ''self._file_logger = self.logging.FileHandler(os.path.expanduser(filename), _mode)_formatter = self.logging.Formatter('')self._file_logger.setFormatter(_formatter)self.logger.addHandler(_file_logger)if _ldtp_debug:self._file_logger.setLevel(logging.DEBUG)else:self._file_logger.setLevel(logging.ERROR)return ", "docstring": "@param filename: Start logging on the specified file\n@type filename: string\n@param overwrite: Overwrite or append\n False - Append log to an existing file\n True - Write log to a new file. If file already exist, \n then erase existing file content and start log\n@type overwrite: boolean\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10311:c3:m6"} {"signature": "def stoplog(self):", "body": "if self._file_logger:self.logger.removeHandler(_file_logger)self._file_logger = Nonereturn ", "docstring": "Stop logging.\n\n @return: 1 on success and 0 on error\n @rtype: integer", "id": "f10311:c3:m7"} {"signature": "def imagecapture(self, window_name=None, out_file=None, x=, y=,width=None, height=None):", "body": "if not out_file:out_file = tempfile.mktemp('', '')else:out_file = os.path.expanduser(out_file)if _ldtp_windows_env:if width == None:width = -if height == None:height = -if window_name == None:window_name = ''data = self._remote_imagecapture(window_name, x, y, width, height)f = open(out_file, '')f.write(b64decode(data))f.close()return out_file", "docstring": "Captures screenshot of the whole desktop or given window\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param x: x co-ordinate value\n@type x: integer\n@param y: y co-ordinate value\n@type y: integer\n@param width: width co-ordinate value\n@type width: integer\n@param height: height co-ordinate value\n@type height: integer\n\n@return: screenshot filename\n@rtype: string", "id": "f10311:c3:m11"} {"signature": "def onwindowcreate(self, window_name, fn_name, *args):", "body": "self._pollEvents._callback[window_name] = [\"\", fn_name, args]return self._remote_onwindowcreate(window_name)", "docstring": "On window create, call the function with given arguments\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10311:c3:m33"} {"signature": "def removecallback(self, window_name):", "body": "if window_name in self._pollEvents._callback:del self._pollEvents._callback[window_name]return self._remote_removecallback(window_name)", "docstring": "Remove registered callback on window create\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10311:c3:m34"} {"signature": "def registerevent(self, event_name, fn_name, *args):", "body": "if not isinstance(event_name, str):raise ValueError(\"\")self._pollEvents._callback[event_name] = [event_name, fn_name, args]return self._remote_registerevent(event_name)", "docstring": "Register at-spi event\n\n@param event_name: Event name in at-spi format.\n@type event_name: string\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10311:c3:m35"} {"signature": "def deregisterevent(self, event_name):", "body": "if event_name in self._pollEvents._callback:del self._pollEvents._callback[event_name]return self._remote_deregisterevent(event_name)", "docstring": "Remove callback of registered event\n\n@param event_name: Event name in at-spi format.\n@type event_name: string\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10311:c3:m36"} {"signature": "def registerkbevent(self, keys, modifiers, fn_name, *args):", "body": "event_name = \"\" % (keys, modifiers)self._pollEvents._callback[event_name] = [event_name, fn_name, args]return self._remote_registerkbevent(keys, modifiers)", "docstring": "Register keystroke events\n\n@param keys: key to listen\n@type keys: string\n@param modifiers: control / alt combination using gtk MODIFIERS\n@type modifiers: int\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10311:c3:m37"} {"signature": "def deregisterkbevent(self, keys, modifiers):", "body": "event_name = \"\" % (keys, modifiers)if event_name in _pollEvents._callback:del _pollEvents._callback[event_name]return self._remote_deregisterkbevent(keys, modifiers)", "docstring": "Remove callback of registered event\n\n@param keys: key to listen\n@type keys: string\n@param modifiers: control / alt combination using gtk MODIFIERS\n@type modifiers: int\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10311:c3:m38"} {"signature": "def windowuptime(self, window_name):", "body": "tmp_time = self._remote_windowuptime(window_name)if tmp_time:tmp_time = tmp_time.split('')start_time = tmp_time[].split('')end_time = tmp_time[].split('')_start_time = datetime.datetime(int(start_time[]), int(start_time[]),int(start_time[]), int(start_time[]),int(start_time[]), int(start_time[]))_end_time = datetime.datetime(int(end_time[]), int(end_time[]),int(end_time[]), int(end_time[]),int(end_time[]), int(end_time[]))return _start_time, _end_timereturn None", "docstring": "Get window uptime\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: \"starttime, endtime\" as datetime python object", "id": "f10311:c3:m39"} {"signature": "def __del__(self):", "body": "self._stop = True", "docstring": "Stop polling when destroying this class", "id": "f10311:c4:m1"} {"signature": "def __del__(self):", "body": "self._stop = True", "docstring": "Stop callback when destroying this class", "id": "f10311:c5:m1"} {"signature": "@classmethoddef paste(cls):", "body": "pb = AppKit.NSPasteboard.generalPasteboard()data = pb.stringForType_(cls.STRING)return data", "docstring": "Get the clipboard data ('Paste').\n\n Returns: Data (string) retrieved or None if empty. Exceptions from\n AppKit will be handled by caller.", "id": "f10313:c0:m0"} {"signature": "@classmethoddef copy(cls, data):", "body": "pp = pprint.PrettyPrinter()copy_data = ''logging.debug(copy_data % pp.pformat(data))cleared = cls.clearAll()if not cleared:logging.warning('')return Falseif not isinstance(data, types.ListType):data = [data]pb = AppKit.NSPasteboard.generalPasteboard()pb_set_ok = pb.writeObjects_(data)return bool(pb_set_ok)", "docstring": "Set the clipboard data ('Copy').\n\n Parameters: data to set (string)\n Optional: datatype if it's not a string\n Returns: True / False on successful copy, Any exception raised (like\n passes the NSPasteboardCommunicationError) should be caught\n by the caller.", "id": "f10313:c0:m1"} {"signature": "@classmethoddef clearContents(cls):", "body": "log_msg = ''logging.debug(log_msg)pb = AppKit.NSPasteboard.generalPasteboard()pb.clearContents()return True", "docstring": "Clear contents of general pasteboard.\n\n Future enhancement can include specifying which clipboard to clear\n Returns: True on success; caller should expect to catch exceptions,\n probably from AppKit (ValueError)", "id": "f10313:c0:m2"} {"signature": "@classmethoddef clearProperties(cls):", "body": "log_msg = ''logging.debug(log_msg)pb = AppKit.NSPasteboard.generalPasteboard()pb.clearProperties()return True", "docstring": "Clear properties of general pasteboard.\n\n Future enhancement can include specifying which clipboard's properties\n to clear\n Returns: True on success; caller should catch exceptions raised,\n e.g. from AppKit (ValueError)", "id": "f10313:c0:m3"} {"signature": "@classmethoddef clearAll(cls):", "body": "cls.clearContents()cls.clearProperties()return True", "docstring": "Clear contents and properties of general pasteboard.\n\n Future enhancement can include specifying which clipboard's properties\n to clear\n Returns: Boolean True on success; caller should handle exceptions", "id": "f10313:c0:m4"} {"signature": "@classmethoddef isEmpty(cls, datatype=None):", "body": "if not datatype:datatype = AppKit.NSStringif not isinstance(datatype, types.ListType):datatype = [datatype]pp = pprint.PrettyPrinter()logging.debug('' % pp.pformat(datatype))opt_dict = {}logging.debug('' % pp.pformat(opt_dict))try:log_msg = ''logging.debug(log_msg)pb = AppKit.NSPasteboard.generalPasteboard()its_empty = not bool(pb.canReadObjectForClasses_options_(datatype,opt_dict))except ValueError as error:logging.error(error)raisereturn bool(its_empty)", "docstring": "Method to test if the general pasteboard is empty or not with respect\n to the type of object you want.\n\n Parameters: datatype (defaults to strings)\n Returns: Boolean True (empty) / False (has contents); Raises\n exception (passes any raised up)", "id": "f10313:c0:m5"} {"signature": "def __init__(self, bundleID, bundlePath=None, defaultsPlistName=''):", "body": "self.__bundleID = bundleIDself.__bundlePath = bundlePathUserDict.__init__(self)self.__setup(defaultsPlistName)", "docstring": "bundleId: the application bundle identifier\nbundlePath: the full bundle path (useful to test a Debug build)\ndefaultsPlistName: the name of the plist that contains default values", "id": "f10314:c0:m0"} {"signature": "def verifyscrollbarvertical(self, window_name, object_name):", "body": "try:object_handle = self._get_object_handle(window_name, object_name)if object_handle.AXOrientation == \"\":return except:passreturn ", "docstring": "Verify scrollbar is vertical\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m0"} {"signature": "def verifyscrollbarhorizontal(self, window_name, object_name):", "body": "try:object_handle = self._get_object_handle(window_name, object_name)if object_handle.AXOrientation == \"\":return except:passreturn ", "docstring": "Verify scrollbar is horizontal\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m1"} {"signature": "def setmax(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)object_handle.AXValue = return ", "docstring": "Set max value\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m2"} {"signature": "def setmin(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)object_handle.AXValue = return ", "docstring": "Set min value\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m3"} {"signature": "def scrollup(self, window_name, object_name):", "body": "if not self.verifyscrollbarvertical(window_name, object_name):raise LdtpServerException('')return self.setmin(window_name, object_name)", "docstring": "Scroll up\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m4"} {"signature": "def scrolldown(self, window_name, object_name):", "body": "if not self.verifyscrollbarvertical(window_name, object_name):raise LdtpServerException('')return self.setmax(window_name, object_name)", "docstring": "Scroll down\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m5"} {"signature": "def scrollleft(self, window_name, object_name):", "body": "if not self.verifyscrollbarhorizontal(window_name, object_name):raise LdtpServerException('')return self.setmin(window_name, object_name)", "docstring": "Scroll left\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m6"} {"signature": "def scrollright(self, window_name, object_name):", "body": "if not self.verifyscrollbarhorizontal(window_name, object_name):raise LdtpServerException('')return self.setmax(window_name, object_name)", "docstring": "Scroll right\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m7"} {"signature": "def onedown(self, window_name, object_name, iterations):", "body": "if not self.verifyscrollbarvertical(window_name, object_name):raise LdtpServerException('')object_handle = self._get_object_handle(window_name, object_name)i = maxValue = / flag = Falsewhile i < iterations:if object_handle.AXValue >= :raise LdtpServerException('')object_handle.AXValue += maxValuetime.sleep( / )flag = Truei += if flag:return else:raise LdtpServerException('')", "docstring": "Press scrollbar down with number of iterations\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param interations: iterations to perform on slider increase\n@type iterations: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m8"} {"signature": "def oneup(self, window_name, object_name, iterations):", "body": "if not self.verifyscrollbarvertical(window_name, object_name):raise LdtpServerException('')object_handle = self._get_object_handle(window_name, object_name)i = minValue = / flag = Falsewhile i < iterations:if object_handle.AXValue <= :raise LdtpServerException('')object_handle.AXValue -= minValuetime.sleep( / )flag = Truei += if flag:return else:raise LdtpServerException('')", "docstring": "Press scrollbar up with number of iterations\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param interations: iterations to perform on slider increase\n@type iterations: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m9"} {"signature": "def oneright(self, window_name, object_name, iterations):", "body": "if not self.verifyscrollbarhorizontal(window_name, object_name):raise LdtpServerException('')object_handle = self._get_object_handle(window_name, object_name)i = maxValue = / flag = Falsewhile i < iterations:if object_handle.AXValue >= :raise LdtpServerException('')object_handle.AXValue += maxValuetime.sleep( / )flag = Truei += if flag:return else:raise LdtpServerException('')", "docstring": "Press scrollbar right with number of iterations\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param interations: iterations to perform on slider increase\n@type iterations: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m10"} {"signature": "def oneleft(self, window_name, object_name, iterations):", "body": "if not self.verifyscrollbarhorizontal(window_name, object_name):raise LdtpServerException('')object_handle = self._get_object_handle(window_name, object_name)i = minValue = / flag = Falsewhile i < iterations:if object_handle.AXValue <= :raise LdtpServerException('')object_handle.AXValue -= minValuetime.sleep( / )flag = Truei += if flag:return else:raise LdtpServerException('')", "docstring": "Press scrollbar left with number of iterations\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param interations: iterations to perform on slider increase\n@type iterations: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m11"} {"signature": "def imagecapture(self, window_name=None, x=, y=,width=None, height=None):", "body": "if x or y or (width and width != -) or (height and height != -):raise LdtpServerException(\"\")if window_name:handle, name, app = self._get_window_handle(window_name)try:self._grabfocus(handle)except:passrect = self._getobjectsize(handle)screenshot = CGWindowListCreateImage(NSMakeRect(rect[],rect[], rect[], rect[]), , , )else:screenshot = CGWindowListCreateImage(CGRectInfinite, , , )image = CIImage.imageWithCGImage_(screenshot)bitmapRep = NSBitmapImageRep.alloc().initWithCIImage_(image)blob = bitmapRep.representationUsingType_properties_(NSPNGFileType, None)tmpFile = tempfile.mktemp('', '')blob.writeToFile_atomically_(tmpFile, False)rv = b64encode(open(tmpFile).read())os.remove(tmpFile)return rv", "docstring": "Captures screenshot of the whole desktop or given window\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param x: x co-ordinate value\n@type x: int\n@param y: y co-ordinate value\n@type y: int\n@param width: width co-ordinate value\n@type width: int\n@param height: height co-ordinate value\n@type height: int\n\n@return: screenshot with base64 encoded for the client\n@rtype: string", "id": "f10316:c0:m0"} {"signature": "def getrowcount(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)return len(object_handle.AXRows)", "docstring": "Get count of rows in table object.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: Number of rows.\n@rtype: integer", "id": "f10317:c0:m0"} {"signature": "def selectrow(self, window_name, object_name, row_text, partial_match=False):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)for cell in object_handle.AXRows:if re.match(row_text,cell.AXChildren[].AXValue):if not cell.AXSelected:object_handle.activate()cell.AXSelected = Trueelse:passreturn raise LdtpServerException(u\"\" % row_text)", "docstring": "Select row\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text: Row text to select\n@type row_text: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m1"} {"signature": "def multiselect(self, window_name, object_name, row_text_list, partial_match=False):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)object_handle.activate()selected = Falsetry:window = self._get_front_most_window()except (IndexError,):window = self._get_any_window()for row_text in row_text_list:selected = Falsefor cell in object_handle.AXRows:parent_cell = cellcell = self._getfirstmatchingchild(cell, \"\")if not cell:continueif re.match(row_text, cell.AXValue):selected = Trueif not parent_cell.AXSelected:x, y, width, height = self._getobjectsize(parent_cell)window.clickMouseButtonLeftWithMods((x + width / ,y + height / ),[''])self.wait()else:passbreakif not selected:raise LdtpServerException(u\"\" % row_text)if not selected:raise LdtpServerException(u\"\")return ", "docstring": "Select multiple row\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text_list: Row list with matching text to select\n@type row_text: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m2"} {"signature": "def multiremove(self, window_name, object_name, row_text_list, partial_match=False):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)object_handle.activate()unselected = Falsetry:window = self._get_front_most_window()except (IndexError,):window = self._get_any_window()for row_text in row_text_list:selected = Falsefor cell in object_handle.AXRows:parent_cell = cellcell = self._getfirstmatchingchild(cell, \"\")if not cell:continueif re.match(row_text, cell.AXValue):unselected = Trueif parent_cell.AXSelected:x, y, width, height = self._getobjectsize(parent_cell)window.clickMouseButtonLeftWithMods((x + width / ,y + height / ),[''])self.wait()else:passbreakif not unselected:raise LdtpServerException(u\"\" % row_text)if not unselected:raise LdtpServerException(u\"\")return ", "docstring": "Remove multiple row\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text_list: Row list with matching text to select\n@type row_text: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m3"} {"signature": "def selectrowpartialmatch(self, window_name, object_name, row_text):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)for cell in object_handle.AXRows:if re.search(row_text,cell.AXChildren[].AXValue):if not cell.AXSelected:object_handle.activate()cell.AXSelected = Trueelse:passreturn raise LdtpServerException(u\"\" % row_text)", "docstring": "Select row partial match\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text: Row text to select\n@type row_text: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m4"} {"signature": "def selectrowindex(self, window_name, object_name, row_index):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)count = len(object_handle.AXRows)if row_index < or row_index > count:raise LdtpServerException('' % row_index)cell = object_handle.AXRows[row_index]if not cell.AXSelected:object_handle.activate()cell.AXSelected = Trueelse:passreturn ", "docstring": "Select row index\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to select\n@type row_index: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m5"} {"signature": "def selectlastrow(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)cell = object_handle.AXRows[-]if not cell.AXSelected:object_handle.activate()cell.AXSelected = Trueelse:passreturn ", "docstring": "Select last row\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m6"} {"signature": "def setcellvalue(self, window_name, object_name, row_index,column=, data=None):", "body": "raise LdtpServerException(\"\")", "docstring": "Set cell value\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column: Column index to get, default value 0\n@type column: integer\n@param data: data, default value None\n None, used for toggle button\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m7"} {"signature": "def getcellvalue(self, window_name, object_name, row_index, column=):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)count = len(object_handle.AXRows)if row_index < or row_index > count:raise LdtpServerException('' % row_index)cell = object_handle.AXRows[row_index]count = len(cell.AXChildren)if column < or column > count:raise LdtpServerException('' % column)obj = cell.AXChildren[column]if not re.search(\"\", obj.AXRole):obj = cell.AXChildren[column]return obj.AXValue", "docstring": "Get cell value\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column: Column index to get, default value 0\n@type column: integer\n\n@return: cell value on success.\n@rtype: string", "id": "f10317:c0:m8"} {"signature": "def getcellsize(self, window_name, object_name, row_index, column=):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)count = len(object_handle.AXRows)if row_index < or row_index > count:raise LdtpServerException('' % row_index)cell = object_handle.AXRows[row_index]count = len(cell.AXChildren)if column < or column > count:raise LdtpServerException('' % column)obj = cell.AXChildren[column]if not re.search(\"\", obj.AXRole):obj = cell.AXChildren[column]return self._getobjectsize(obj)", "docstring": "Get cell size\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column: Column index to get, default value 0\n@type column: integer\n\n@return: cell coordinates on success.\n@rtype: list", "id": "f10317:c0:m9"} {"signature": "def rightclick(self, window_name, object_name, row_text):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)object_handle.activate()self.wait()for cell in object_handle.AXRows:cell = self._getfirstmatchingchild(cell, \"\")if not cell:continueif re.match(row_text, cell.AXValue):x, y, width, height = self._getobjectsize(cell)cell.clickMouseButtonRight((x + width / , y + height / ))return raise LdtpServerException(u'' % row_text)", "docstring": "Right click on table cell\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text: Row text to right click\n@type row_text: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m10"} {"signature": "def checkrow(self, window_name, object_name, row_index, column=):", "body": "raise LdtpServerException(\"\")", "docstring": "Check row\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column: Column index to get, default value 0\n@type column: integer\n\n@return: cell value on success.\n@rtype: string", "id": "f10317:c0:m11"} {"signature": "def expandtablecell(self, window_name, object_name, row_index, column=):", "body": "raise LdtpServerException(\"\")", "docstring": "Expand or contract table cell\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column: Column index to get, default value 0\n@type column: integer\n\n@return: cell value on success.\n@rtype: string", "id": "f10317:c0:m12"} {"signature": "def uncheckrow(self, window_name, object_name, row_index, column=):", "body": "raise LdtpServerException(\"\")", "docstring": "Check row\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column: Column index to get, default value 0\n@type column: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m13"} {"signature": "def gettablerowindex(self, window_name, object_name, row_text):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)index = for cell in object_handle.AXRows:if re.match(row_text,cell.AXChildren[].AXValue):return indexindex += raise LdtpServerException(u\"\" % row_text)", "docstring": "Get table row index matching given text\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text: Row text to select\n@type row_text: string\n\n@return: row index matching the text on success.\n@rtype: integer", "id": "f10317:c0:m14"} {"signature": "def singleclickrow(self, window_name, object_name, row_text):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)object_handle.activate()self.wait()for cell in object_handle.AXRows:cell = self._getfirstmatchingchild(cell, \"\")if not cell:continueif re.match(row_text, cell.AXValue):x, y, width, height = self._getobjectsize(cell)cell.clickMouseButtonLeft((x + width / , y + height / ))return raise LdtpServerException('' % row_text)", "docstring": "Single click row matching given text\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text: Row text to select\n@type row_text: string\n\n@return: row index matching the text on success.\n@rtype: integer", "id": "f10317:c0:m15"} {"signature": "def doubleclickrow(self, window_name, object_name, row_text):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)object_handle.activate()self.wait()for cell in object_handle.AXRows:cell = self._getfirstmatchingchild(cell, \"\")if not cell:continueif re.match(row_text, cell.AXValue):x, y, width, height = self._getobjectsize(cell)cell.doubleClickMouse((x + width / , y + height / ))return raise LdtpServerException('' % row_text)", "docstring": "Double click row matching given text\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text: Row text to select\n@type row_text: string\n\n@return: row index matching the text on success.\n@rtype: integer", "id": "f10317:c0:m16"} {"signature": "def doubleclickrowindex(self, window_name, object_name, row_index, col_index=):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)count = len(object_handle.AXRows)if row_index < or row_index > count:raise LdtpServerException('' % row_index)cell = object_handle.AXRows[row_index]self._grabfocus(cell)x, y, width, height = self._getobjectsize(cell)cell.doubleClickMouse((x + width / , y + height / ))return ", "docstring": "Double click row matching given text\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param row_index: Row index to click\n@type row_index: integer\n@param col_index: Column index to click\n@type col_index: integer\n\n@return: row index matching the text on success.\n@rtype: integer", "id": "f10317:c0:m17"} {"signature": "def verifytablecell(self, window_name, object_name, row_index,column_index, row_text):", "body": "try:value = getcellvalue(window_name, object_name, row_index, column_index)if re.match(row_text, value):return except LdtpServerException:passreturn ", "docstring": "Verify table cell value with given text\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column_index: Column index to get, default value 0\n@type column_index: integer\n@param row_text: Row text to match\n@type string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10317:c0:m18"} {"signature": "def doesrowexist(self, window_name, object_name, row_text,partial_match=False):", "body": "try:object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:return for cell in object_handle.AXRows:if not partial_match and re.match(row_text,cell.AXChildren[].AXValue):return elif partial_match and re.search(row_text,cell.AXChildren[].AXValue):return except LdtpServerException:passreturn ", "docstring": "Verify table cell value with given text\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text: Row text to match\n@type string\n@param partial_match: Find partial match strings\n@type boolean\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10317:c0:m19"} {"signature": "def verifypartialtablecell(self, window_name, object_name, row_index,column_index, row_text):", "body": "try:value = getcellvalue(window_name, object_name, row_index, column_index)if re.searchmatch(row_text, value):return except LdtpServerException:passreturn ", "docstring": "Verify partial table cell value\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column_index: Column index to get, default value 0\n@type column_index: integer\n@param row_text: Row text to match\n@type string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10317:c0:m20"} {"signature": "def selecttab(self, window_name, object_name, tab_name):", "body": "tab_handle = self._get_tab_handle(window_name, object_name, tab_name)tab_handle.Press()return ", "docstring": "Select tab based on name.\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param tab_name: tab to select\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10318:c0:m2"} {"signature": "def selecttabindex(self, window_name, object_name, tab_index):", "body": "children = self._get_tab_children(window_name, object_name)length = len(children)if tab_index < or tab_index > length:raise LdtpServerException(u\"\" % tab_index)tab_handle = children[tab_index]if not tab_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)tab_handle.Press()return ", "docstring": "Select tab based on index.\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param tab_index: tab to select\n@type data: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10318:c0:m3"} {"signature": "def verifytabname(self, window_name, object_name, tab_name):", "body": "try:tab_handle = self._get_tab_handle(window_name, object_name, tab_name)if tab_handle.AXValue:return except LdtpServerException:passreturn ", "docstring": "Verify tab name.\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param tab_name: tab to select\n@type data: string\n\n@return: 1 on success 0 on failure\n@rtype: integer", "id": "f10318:c0:m4"} {"signature": "def gettabcount(self, window_name, object_name):", "body": "children = self._get_tab_children(window_name, object_name)return len(children)", "docstring": "Get tab count.\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: tab count on success.\n@rtype: integer", "id": "f10318:c0:m5"} {"signature": "def gettabname(self, window_name, object_name, tab_index):", "body": "children = self._get_tab_children(window_name, object_name)length = len(children)if tab_index < or tab_index > length:raise LdtpServerException(u\"\" % tab_index)tab_handle = children[tab_index]if not tab_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)return tab_handle.AXTitle", "docstring": "Get tab name\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param tab_index: Index of tab (zero based index)\n@type object_name: int\n\n@return: text on success.\n@rtype: string", "id": "f10318:c0:m6"} {"signature": "def mouseleftclick(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)self._grabfocus(object_handle)x, y, width, height = self._getobjectsize(object_handle)object_handle.clickMouseButtonLeft((x + width / , y + height / ))return ", "docstring": "Mouse left click on an object.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10319:c0:m0"} {"signature": "def mouserightclick(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)self._grabfocus(object_handle)x, y, width, height = self._getobjectsize(object_handle)object_handle.clickMouseButtonRight((x + width / , y + height / ))return ", "docstring": "Mouse right click on an object.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10319:c0:m1"} {"signature": "def generatemouseevent(self, x, y, eventType=\"\",drag_button_override=''):", "body": "if drag_button_override not in mouse_click_override:raise ValueError('' %drag_button_override)global drag_button_rememberedpoint = (x, y)button = centre click_type = Noneif eventType == \"\" or eventType == \"\":if drag_button_override is not '':events = [mouse_click_override[drag_button_override]]elif drag_button_remembered:events = [drag_button_remembered]else:events = [move]if eventType == \"\":point = CGEventGetLocation(CGEventCreate(None))point.x += xpoint.y += yelif eventType == \"\":events = [press_left]drag_button_remembered = drag_leftelif eventType == \"\":events = [release_left]drag_button_remembered = Noneelif eventType == \"\":events = [press_left, release_left]elif eventType == \"\":events = [press_left, release_left]click_type = double_clickelif eventType == \"\":events = [press_other]drag_button_remembered = drag_otherelif eventType == \"\":events = [release_other]drag_button_remembered = Noneelif eventType == \"\":events = [press_other, release_other]elif eventType == \"\":events = [press_other, release_other]click_type = double_clickelif eventType == \"\":events = [press_right]drag_button_remembered = drag_rightelif eventType == \"\":events = [release_right]drag_button_remembered = Noneelif eventType == \"\":events = [press_right, release_right]elif eventType == \"\":events = [press_right, release_right]click_type = double_clickelse:raise LdtpServerException(u\"\" % eventType)for event in events:CG_event = CGEventCreateMouseEvent(None, event, point, button)if click_type:CGEventSetIntegerValueField(CG_event, kCGMouseEventClickState, click_type)CGEventPost(kCGHIDEventTap, CG_event)time.sleep()return ", "docstring": "Generate mouse event on x, y co-ordinates.\n\n@param x: X co-ordinate\n@type x: int\n@param y: Y co-ordinate\n@type y: int\n@param eventType: Mouse click type\n@type eventType: str\n@param drag_button_override: Any drag_xxx value\n Only relevant for movements, i.e. |type| = \"abs\" or \"rel\"\n Quartz is not fully compatible with windows, so for drags\n the drag button must be explicitly defined. generatemouseevent\n will remember the last button pressed by default, and drag\n that button, use this argument to override that.\n@type drag_button_override: str\n\n@return: 1 on success.\n@rtype: integer", "id": "f10319:c0:m2"} {"signature": "def mousemove(self, window_name, object_name):", "body": "raise LdtpServerException(\"\")", "docstring": "Mouse move on an object.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10319:c0:m3"} {"signature": "def doubleclick(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)self._grabfocus(object_handle)x, y, width, height = self._getobjectsize(object_handle)window = self._get_front_most_window()window.doubleClickMouse((x + width / , y + height / ))return ", "docstring": "Double click on the object\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10319:c0:m4"} {"signature": "def simulatemousemove(self, source_x, source_y, dest_x, dest_y, delay=):", "body": "raise LdtpServerException(\"\")", "docstring": "@param source_x: Source X\n@type source_x: integer\n@param source_y: Source Y\n@type source_y: integer\n@param dest_x: Dest X\n@type dest_x: integer\n@param dest_y: Dest Y\n@type dest_y: integer\n@param delay: Sleep time between the mouse move\n@type delay: double\n\n@return: 1 if simulation was successful, 0 if not.\n@rtype: integer", "id": "f10319:c0:m5"} {"signature": "def selectitem(self, window_name, object_name, item_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)self._grabfocus(object_handle.AXWindow)try:object_handle.Press()except AttributeError:x, y, width, height = self._getobjectsize(object_handle)self.generatemouseevent(x + , y + , \"\")self.wait()handle = self._get_sub_menu_handle(object_handle, item_name)x, y, width, height = self._getobjectsize(handle)self.generatemouseevent(x + , y + , \"\")return self.wait()menu_list = re.split(\"\", item_name)try:menu_handle = self._internal_menu_handler(object_handle, menu_list,True)self.wait()if not menu_handle.AXEnabled:raise LdtpServerException(u\"\" %menu_list[-])menu_handle.Press()except LdtpServerException:object_handle.activate()object_handle.sendKey(AXKeyCodeConstants.ESCAPE)raisereturn ", "docstring": "Select combo box / layered pane item\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param item_name: Item name to select\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10321:c0:m0"} {"signature": "def selectindex(self, window_name, object_name, item_index):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)self._grabfocus(object_handle.AXWindow)try:object_handle.Press()except AttributeError:x, y, width, height = self._getobjectsize(object_handle)self.generatemouseevent(x + , y + , \"\")self.wait()if not object_handle.AXChildren:raise LdtpServerException(u\"\")children = object_handle.AXChildren[]if not children:raise LdtpServerException(u\"\")children = children.AXChildrentmp_children = []for child in children:role, label = self._ldtpize_accessible(child)if label:tmp_children.append(child)children = tmp_childrenlength = len(children)try:if item_index < or item_index > length:raise LdtpServerException(u\"\" % item_index)menu_handle = children[item_index]if not menu_handle.AXEnabled:raise LdtpServerException(u\"\" % menu_list[-])self._grabfocus(menu_handle)x, y, width, height = self._getobjectsize(menu_handle)window = object_handle.AXWindow\"\"\"\"\"\"window.doubleClickMouse((x + , y + ))child = Nonefinally:if child:child.Cancel()return ", "docstring": "Select combo box item / layered pane based on index\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param item_index: Item index to select\n@type object_name: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10321:c0:m1"} {"signature": "def getallitem(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)object_handle.Press()self.wait()child = Nonetry:if not object_handle.AXChildren:raise LdtpServerException(u\"\")children = object_handle.AXChildren[]if not children:raise LdtpServerException(u\"\")children = children.AXChildrenitems = []for child in children:label = self._get_title(child)if label:items.append(label)finally:if child:child.Cancel()return items", "docstring": "Get all combo box item\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: list of string on success.\n@rtype: list", "id": "f10321:c0:m2"} {"signature": "def showlist(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)object_handle.Press()return ", "docstring": "Show combo box list / menu\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10321:c0:m3"} {"signature": "def hidelist(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)object_handle.activate()object_handle.sendKey(AXKeyCodeConstants.ESCAPE)return ", "docstring": "Hide combo box list / menu\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10321:c0:m4"} {"signature": "def verifydropdown(self, window_name, object_name):", "body": "try:object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled or not object_handle.AXChildren:return children = object_handle.AXChildren[]if children:return except LdtpServerException:passreturn ", "docstring": "Verify drop down list / menu poped up\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10321:c0:m5"} {"signature": "def verifyshowlist(self, window_name, object_name):", "body": "return self.verifydropdown(window_name, object_name)", "docstring": "Verify drop down list / menu poped up\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10321:c0:m6"} {"signature": "def verifyhidelist(self, window_name, object_name):", "body": "try:object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:return if not object_handle.AXChildren:return children = object_handle.AXChildren[]if not children:return return except LdtpServerException:passreturn ", "docstring": "Verify list / menu is hidden in combo box\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10321:c0:m7"} {"signature": "def verifyselect(self, window_name, object_name, item_name):", "body": "try:object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:return role, label = self._ldtpize_accessible(object_handle)title = self._get_title(object_handle)if re.match(item_name, title, re.M | re.U | re.L) orre.match(item_name, label, re.M | re.U | re.L) orre.match(item_name, u\"\" % (role, label),re.M | re.U | re.L):return except LdtpServerException:passreturn ", "docstring": "Verify the item selected in combo box\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param item_name: Item name to select\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10321:c0:m8"} {"signature": "def getcombovalue(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)return self._get_title(object_handle)", "docstring": "Get current selected combobox value\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: selected item on success, else LdtpExecutionError on failure.\n@rtype: string", "id": "f10321:c0:m9"} {"signature": "def generatekeyevent(self, data):", "body": "KeyComboAction(data)return ", "docstring": "Generates key event to the system, this simulates the best user like\ninteraction via keyboard.\n\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m0"} {"signature": "def keypress(self, data):", "body": "try:window = self._get_front_most_window()except (IndexError,):window = self._get_any_window()key_press_action = KeyPressAction(window, data)return ", "docstring": "Press key. NOTE: keyrelease should be called\n\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m1"} {"signature": "def keyrelease(self, data):", "body": "try:window = self._get_front_most_window()except (IndexError,):window = self._get_any_window()key_release_action = KeyReleaseAction(window, data)return ", "docstring": "Release key. NOTE: keypress should be called before this\n\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m2"} {"signature": "def enterstring(self, window_name, object_name='', data=''):", "body": "if not object_name and not data:return self.generatekeyevent(window_name)else:object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)self._grabfocus(object_handle)object_handle.sendKeys(data)return ", "docstring": "Type string sequence.\n\n@param window_name: Window name to focus on, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to focus on, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m3"} {"signature": "def settextvalue(self, window_name, object_name, data):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)object_handle.AXValue = datareturn ", "docstring": "Type string sequence.\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m4"} {"signature": "def gettextvalue(self, window_name, object_name, startPosition=, endPosition=):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)return object_handle.AXValue", "docstring": "Get text value\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param startPosition: Starting position of text to fetch\n@type: startPosition: int\n@param endPosition: Ending position of text to fetch\n@type: endPosition: int\n\n@return: text on success.\n@rtype: string", "id": "f10324:c0:m5"} {"signature": "def inserttext(self, window_name, object_name, position, data):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)existing_data = object_handle.AXValuesize = len(existing_data)if position < :position = if position > size:position = sizeobject_handle.AXValue = existing_data[:position] + data +existing_data[position:]return ", "docstring": "Insert string sequence in given position.\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param position: position where text has to be entered.\n@type data: int\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m6"} {"signature": "def verifypartialmatch(self, window_name, object_name, partial_text):", "body": "try:if re.search(fnmatch.translate(partial_text),self.gettextvalue(window_name,object_name)):return except:passreturn ", "docstring": "Verify partial text\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param partial_text: Partial text to match\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m7"} {"signature": "def verifysettext(self, window_name, object_name, text):", "body": "try:return int(re.match(fnmatch.translate(text),self.gettextvalue(window_name,object_name)))except:return ", "docstring": "Verify text is set correctly\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param text: text to match\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m8"} {"signature": "def istextstateenabled(self, window_name, object_name):", "body": "try:object_handle = self._get_object_handle(window_name, object_name)if object_handle.AXEnabled:return except LdtpServerException:passreturn ", "docstring": "Verifies text state enabled or not\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10324:c0:m9"} {"signature": "def getcharcount(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)return object_handle.AXNumberOfCharacters", "docstring": "Get character count\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m10"} {"signature": "def appendtext(self, window_name, object_name, data):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)object_handle.AXValue += datareturn ", "docstring": "Append string sequence.\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m11"} {"signature": "def getcursorposition(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)return object_handle.AXSelectedTextRange.loc", "docstring": "Get cursor position\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: Cursor position on success.\n@rtype: integer", "id": "f10324:c0:m12"} {"signature": "def setcursorposition(self, window_name, object_name, cursor_position):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)object_handle.AXSelectedTextRange.loc = cursor_positionreturn ", "docstring": "Set cursor position\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param cursor_position: Cursor position to be set\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m13"} {"signature": "def cuttext(self, window_name, object_name, start_position, end_position=-):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)size = object_handle.AXNumberOfCharactersif end_position == - or end_position > size:end_position = sizeif start_position < :start_position = data = object_handle.AXValueClipboard.copy(data[start_position:end_position])object_handle.AXValue = data[:start_position] + data[end_position:]return ", "docstring": "cut text from start position to end position\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param start_position: Start position\n@type object_name: integer\n@param end_position: End position, default -1\nCut all the text from start position till end\n@type object_name: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m14"} {"signature": "def copytext(self, window_name, object_name, start_position, end_position=-):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)size = object_handle.AXNumberOfCharactersif end_position == - or end_position > size:end_position = sizeif start_position < :start_position = data = object_handle.AXValueClipboard.copy(data[start_position:end_position])return ", "docstring": "copy text from start position to end position\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param start_position: Start position\n@type object_name: integer\n@param end_position: End position, default -1\nCopy all the text from start position till end\n@type object_name: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m15"} {"signature": "def deletetext(self, window_name, object_name, start_position, end_position=-):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)size = object_handle.AXNumberOfCharactersif end_position == - or end_position > size:end_position = sizeif start_position < :start_position = data = object_handle.AXValueobject_handle.AXValue = data[:start_position] + data[end_position:]return ", "docstring": "delete text from start position to end position\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param start_position: Start position\n@type object_name: integer\n@param end_position: End position, default -1\nDelete all the text from start position till end\n@type object_name: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m16"} {"signature": "def pastetext(self, window_name, object_name, position=):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)size = object_handle.AXNumberOfCharactersif position > size:position = sizeif position < :position = clipboard = Clipboard.paste()data = object_handle.AXValueobject_handle.AXValue = data[:position] + clipboard + data[position:]return ", "docstring": "paste text from start position to end position\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param position: Position to paste the text, default 0\n@type object_name: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m17"} {"signature": "def selectmenuitem(self, window_name, object_name):", "body": "menu_handle = self._get_menu_handle(window_name, object_name)if not menu_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)menu_handle.Press()return ", "docstring": "Select (click) a menu item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m1"} {"signature": "def doesmenuitemexist(self, window_name, object_name):", "body": "try:menu_handle = self._get_menu_handle(window_name, object_name,False)return except LdtpServerException:return ", "docstring": "Check a menu item exist.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n@param strict_hierarchy: Mandate menu hierarchy if set to True\n@type object_name: boolean\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m2"} {"signature": "def menuitemenabled(self, window_name, object_name):", "body": "try:menu_handle = self._get_menu_handle(window_name, object_name,False)if menu_handle.AXEnabled:return except LdtpServerException:passreturn ", "docstring": "Verify a menu item is enabled\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m3"} {"signature": "def listsubmenus(self, window_name, object_name):", "body": "menu_handle = self._get_menu_handle(window_name, object_name)role, label = self._ldtpize_accessible(menu_handle)menu_clicked = Falsetry:if not menu_handle.AXChildren:menu_clicked = Truetry:menu_handle.Press()self.wait()except atomac._a11y.ErrorCannotComplete:passif not menu_handle.AXChildren:raise LdtpServerException(u\"\" %label)children = menu_handle.AXChildren[]sub_menus = []for current_menu in children.AXChildren:role, label = self._ldtpize_accessible(current_menu)if not label:continuesub_menus.append(u\"\" % (role, label))finally:if menu_clicked:menu_handle.Cancel()return sub_menus", "docstring": "List children of menu item\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: menu item in list on success.\n@rtype: list", "id": "f10325:c0:m4"} {"signature": "def verifymenucheck(self, window_name, object_name):", "body": "try:menu_handle = self._get_menu_handle(window_name, object_name,False)try:if menu_handle.AXMenuItemMarkChar:return except atomac._a11y.Error:passexcept LdtpServerException:passreturn ", "docstring": "Verify a menu item is checked\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m5"} {"signature": "def verifymenuuncheck(self, window_name, object_name):", "body": "try:menu_handle = self._get_menu_handle(window_name, object_name,False)try:if not menu_handle.AXMenuItemMarkChar:return except atomac._a11y.Error:return except LdtpServerException:passreturn ", "docstring": "Verify a menu item is un-checked\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m6"} {"signature": "def menucheck(self, window_name, object_name):", "body": "menu_handle = self._get_menu_handle(window_name, object_name)if not menu_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)try:if menu_handle.AXMenuItemMarkChar:return except atomac._a11y.Error:passmenu_handle.Press()return ", "docstring": "Check (click) a menu item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m7"} {"signature": "def menuuncheck(self, window_name, object_name):", "body": "menu_handle = self._get_menu_handle(window_name, object_name)if not menu_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)try:if not menu_handle.AXMenuItemMarkChar:return except atomac._a11y.Error:return menu_handle.Press()return ", "docstring": "Uncheck (click) a menu item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m8"} {"signature": "def main(port=, parentpid=None):", "body": "if \"\" in os.environ:_ldtp_debug = Trueelse:_ldtp_debug = False_ldtp_debug_file = os.environ.get('', None)if _ldtp_debug:print(\"\".format(int(parentpid)))if _ldtp_debug_file:with open(unicode(_ldtp_debug_file), \"\") as fp:fp.write(\"\".format(int(parentpid)))server = LDTPServer(('', port), allow_none=True, logRequests=_ldtp_debug,requestHandler=RequestHandler)server.register_introspection_functions()server.register_multicall_functions()ldtp_inst = core.Core()server.register_instance(ldtp_inst)if parentpid:thread.start_new_thread(notifyclient, (parentpid,))try:server.serve_forever()except KeyboardInterrupt:passexcept:if _ldtp_debug:print(traceback.format_exc())if _ldtp_debug_file:with open(_ldtp_debug_file, \"\") as fp:fp.write(traceback.format_exc())", "docstring": "Main entry point. Parse command line options and start up a server.", "id": "f10326:m1"} {"signature": "def server_bind(self, *args, **kwargs):", "body": "self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, )SimpleXMLRPCServer.server_bind(self, *args, **kwargs)", "docstring": "Server Bind. Forces reuse of port.", "id": "f10326:c1:m0"} {"signature": "def getapplist(self):", "body": "app_list = []self._update_apps()for gui in self._running_apps:name = gui.localizedName()try:name = unicode(name)except NameError:name = str(name)except UnicodeEncodeError:passapp_list.append(name)return list(set(app_list))", "docstring": "Get all accessibility application name that are currently running\n\n@return: list of appliction name of string type on success.\n@rtype: list", "id": "f10327:c0:m3"} {"signature": "def getwindowlist(self):", "body": "return self._get_windows(True).keys()", "docstring": "Get all accessibility window that are currently open\n\n@return: list of window names in LDTP format of string type on success.\n@rtype: list", "id": "f10327:c0:m4"} {"signature": "def isalive(self):", "body": "return True", "docstring": "Client will use this to verify whether the server instance is alive or not.\n\n@return: True on success.\n@rtype: boolean", "id": "f10327:c0:m5"} {"signature": "def poll_events(self):", "body": "if not self._callback_event:return ''return self._callback_event.pop()", "docstring": "Poll for any registered events or window create events\n\n@return: window name\n@rtype: string", "id": "f10327:c0:m6"} {"signature": "def getlastlog(self):", "body": "if not self._custom_logger.log_events:return ''return self._custom_logger.log_events.pop()", "docstring": "Returns one line of log at any time, if any available, else empty string\n\n@return: log as string\n@rtype: string", "id": "f10327:c0:m7"} {"signature": "def startprocessmonitor(self, process_name, interval=):", "body": "if process_name in self._process_stats:self._process_stats[process_name].stop()self._process_stats[process_name] = ProcessStats(process_name, interval)self._process_stats[process_name].start()return ", "docstring": "Start memory and CPU monitoring, with the time interval between\neach process scan\n\n@param process_name: Process name, ex: firefox-bin.\n@type process_name: string\n@param interval: Time interval between each process scan\n@type interval: double\n\n@return: 1 on success\n@rtype: integer", "id": "f10327:c0:m8"} {"signature": "def stopprocessmonitor(self, process_name):", "body": "if process_name in self._process_stats:self._process_stats[process_name].stop()return ", "docstring": "Stop memory and CPU monitoring\n\n@param process_name: Process name, ex: firefox-bin.\n@type process_name: string\n\n@return: 1 on success\n@rtype: integer", "id": "f10327:c0:m9"} {"signature": "def getcpustat(self, process_name):", "body": "_stat_inst = ProcessStats(process_name)_stat_list = []for p in _stat_inst.get_cpu_memory_stat():try:_stat_list.append(p.get_cpu_percent())except psutil.AccessDenied:passreturn _stat_list", "docstring": "get CPU stat for the give process name\n\n@param process_name: Process name, ex: firefox-bin.\n@type process_name: string\n\n@return: cpu stat list on success, else empty list\n If same process name, running multiple instance,\n get the stat of all the process CPU usage\n@rtype: list", "id": "f10327:c0:m10"} {"signature": "def getmemorystat(self, process_name):", "body": "_stat_inst = ProcessStats(process_name)_stat_list = []for p in _stat_inst.get_cpu_memory_stat():try:_stat_list.append(round(p.get_memory_percent(), ))except psutil.AccessDenied:passreturn _stat_list", "docstring": "get memory stat\n\n@param process_name: Process name, ex: firefox-bin.\n@type process_name: string\n\n@return: memory stat list on success, else empty list\n If same process name, running multiple instance,\n get the stat of all the process memory usage\n@rtype: list", "id": "f10327:c0:m11"} {"signature": "def getobjectlist(self, window_name):", "body": "try:window_handle, name, app = self._get_window_handle(window_name, True)object_list = self._get_appmap(window_handle, name, True)except atomac._a11y.ErrorInvalidUIElement:self._windows = {}window_handle, name, app = self._get_window_handle(window_name, True)object_list = self._get_appmap(window_handle, name, True)return object_list.keys()", "docstring": "Get list of items in given GUI.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: list of items in LDTP naming convention.\n@rtype: list", "id": "f10327:c0:m12"} {"signature": "def getobjectinfo(self, window_name, object_name):", "body": "try:obj_info = self._get_object_map(window_name, object_name,wait_for_object=False)except atomac._a11y.ErrorInvalidUIElement:self._windows = {}obj_info = self._get_object_map(window_name, object_name,wait_for_object=False)props = []if obj_info:for obj_prop in obj_info.keys():if not obj_info[obj_prop] or obj_prop == \"\":continueprops.append(obj_prop)return props", "docstring": "Get object properties.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: list of properties\n@rtype: list", "id": "f10327:c0:m13"} {"signature": "def getobjectproperty(self, window_name, object_name, prop):", "body": "try:obj_info = self._get_object_map(window_name, object_name,wait_for_object=False)except atomac._a11y.ErrorInvalidUIElement:self._windows = {}obj_info = self._get_object_map(window_name, object_name,wait_for_object=False)if obj_info and prop != \"\" and prop in obj_info:if prop == \"\":return ldtp_class_type.get(obj_info[prop], obj_info[prop])else:return obj_info[prop]raise LdtpServerException('' %(prop, object_name))", "docstring": "Get object property value.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param prop: property name.\n@type prop: string\n\n@return: property\n@rtype: string", "id": "f10327:c0:m14"} {"signature": "def getchild(self, window_name, child_name='', role='', parent=''):", "body": "matches = []if role:role = re.sub('', '', role)self._windows = {}if parent and (child_name or role):_window_handle, _window_name =self._get_window_handle(window_name)[:]if not _window_handle:raise LdtpServerException('' %window_name)appmap = self._get_appmap(_window_handle, _window_name)obj = self._get_object_map(window_name, parent)def _get_all_children_under_obj(obj, child_list):if role and obj[''] == role:child_list.append(obj[''])elif child_name and self._match_name_to_appmap(child_name, obj):child_list.append(obj[''])if obj:children = obj['']if not children:return child_listfor child in children.split():return _get_all_children_under_obj(appmap[child],child_list)matches = _get_all_children_under_obj(obj, [])if not matches:if child_name:_name = '' % child_nameif role:_role = '' % roleif parent:_parent = '' % parentexception = '' % (_name, _role, _parent)raise LdtpServerException(exception)return matches_window_handle, _window_name =self._get_window_handle(window_name)[:]if not _window_handle:raise LdtpServerException('' %window_name)appmap = self._get_appmap(_window_handle, _window_name)for name in appmap.keys():obj = appmap[name]if role and not child_name and obj[''] == role:matches.append(name)if parent and child_name and not role andself._match_name_to_appmap(parent, obj):matches.append(name)if child_name and not role andself._match_name_to_appmap(child_name, obj):return namematches.append(name)if role and child_name and obj[''] == role andself._match_name_to_appmap(child_name, obj):matches.append(name)if not matches:_name = ''_role = ''_parent = ''if child_name:_name = '' % child_nameif role:_role = '' % roleif parent:_parent = '' % parentexception = '' % (_name, _role, _parent)raise LdtpServerException(exception)return matches", "docstring": "Gets the list of object available in the window, which matches\ncomponent name or role name or both.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param child_name: Child name to search for.\n@type child_name: string\n@param role: role name to search for, or an empty string for wildcard.\n@type role: string\n@param parent: parent name to search for, or an empty string for wildcard.\n@type role: string\n@return: list of matched children names\n@rtype: list", "id": "f10327:c0:m15"} {"signature": "def launchapp(self, cmd, args=[], delay=, env=, lang=\"\"):", "body": "try:atomac.NativeUIElement.launchAppByBundleId(cmd)return except RuntimeError:if atomac.NativeUIElement.launchAppByBundlePath(cmd, args):try:time.sleep(int(delay))except ValueError:time.sleep()return else:raise LdtpServerException(u\"\" % cmd)", "docstring": "Launch application.\n\n@param cmd: Command line string to execute.\n@type cmd: string\n@param args: Arguments to the application\n@type args: list\n@param delay: Delay after the application is launched\n@type delay: int\n@param env: GNOME accessibility environment to be set or not\n@type env: int\n@param lang: Application language to be used\n@type lang: string\n\n@return: 1 on success\n@rtype: integer\n\n@raise LdtpServerException: When command fails", "id": "f10327:c0:m16"} {"signature": "def wait(self, timeout=):", "body": "time.sleep(timeout)return ", "docstring": "Wait a given amount of seconds.\n\n@param timeout: Wait timeout in seconds\n@type timeout: double\n\n@return: 1\n@rtype: integer", "id": "f10327:c0:m17"} {"signature": "def closewindow(self, window_name):", "body": "return self._singleclick(window_name, \"\")", "docstring": "Close window.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m18"} {"signature": "def minimizewindow(self, window_name):", "body": "return self._singleclick(window_name, \"\")", "docstring": "Minimize window.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m19"} {"signature": "def maximizewindow(self, window_name):", "body": "return self._singleclick(window_name, \"\")", "docstring": "Maximize window.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m20"} {"signature": "def activatewindow(self, window_name):", "body": "window_handle = self._get_window_handle(window_name)self._grabfocus(window_handle)return ", "docstring": "Activate window.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m21"} {"signature": "def click(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)size = self._getobjectsize(object_handle)self._grabfocus(object_handle)self.wait()self.generatemouseevent(size[] + size[] / , size[] + size[] / , \"\")return ", "docstring": "Click item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m22"} {"signature": "def getallstates(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)_obj_states = []if object_handle.AXEnabled:_obj_states.append(\"\")if object_handle.AXFocused:_obj_states.append(\"\")else:try:if object_handle.AXFocused:_obj_states.append(\"\")except:passif re.match(\"\", object_handle.AXRole, re.M | re.U | re.L) orre.match(\"\", object_handle.AXRole,re.M | re.U | re.L):if object_handle.AXValue:_obj_states.append(\"\")return _obj_states", "docstring": "Get all states of given object\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: list of string on success.\n@rtype: list", "id": "f10327:c0:m23"} {"signature": "def hasstate(self, window_name, object_name, state, guiTimeOut=):", "body": "try:object_handle = self._get_object_handle(window_name, object_name)if state == \"\":return int(object_handle.AXEnabled)elif state == \"\":return int(object_handle.AXFocused)elif state == \"\":return int(object_handle.AXFocused)elif state == \"\":if re.match(\"\", object_handle.AXRole,re.M | re.U | re.L) orre.match(\"\", object_handle.AXRole,re.M | re.U | re.L):if object_handle.AXValue:return except:passreturn ", "docstring": "has state\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@type window_name: string\n@param state: State of the current object.\n@type object_name: string\n@param guiTimeOut: Wait timeout in seconds\n@type guiTimeOut: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m24"} {"signature": "def getobjectsize(self, window_name, object_name=None):", "body": "if not object_name:handle, name, app = self._get_window_handle(window_name)else:handle = self._get_object_handle(window_name, object_name)return self._getobjectsize(handle)", "docstring": "Get object size\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: x, y, width, height on success.\n@rtype: list", "id": "f10327:c0:m25"} {"signature": "def getwindowsize(self, window_name):", "body": "return self.getobjectsize(window_name)", "docstring": "Get window size.\n\n@param window_name: Window name to get size of.\n@type window_name: string\n\n@return: list of dimensions [x, y, w, h]\n@rtype: list", "id": "f10327:c0:m26"} {"signature": "def grabfocus(self, window_name, object_name=None):", "body": "if not object_name:handle, name, app = self._get_window_handle(window_name)else:handle = self._get_object_handle(window_name, object_name)return self._grabfocus(handle)", "docstring": "Grab focus.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m27"} {"signature": "def guiexist(self, window_name, object_name=None):", "body": "try:self._windows = {}if not object_name:handle, name, app = self._get_window_handle(window_name, False)else:handle = self._get_object_handle(window_name, object_name,wait_for_object=False,force_remap=True)return except LdtpServerException:passreturn ", "docstring": "Checks whether a window or component exists.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m28"} {"signature": "def guitimeout(self, timeout):", "body": "self._window_timeout = timeoutreturn ", "docstring": "Change GUI timeout period, default 30 seconds.\n\n@param timeout: timeout in seconds\n@type timeout: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m29"} {"signature": "def objtimeout(self, timeout):", "body": "self._obj_timeout = timeoutreturn ", "docstring": "Change object timeout period, default 5 seconds.\n\n@param timeout: timeout in seconds\n@type timeout: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m30"} {"signature": "def waittillguiexist(self, window_name, object_name='',guiTimeOut=, state=''):", "body": "timeout = while timeout < guiTimeOut:if self.guiexist(window_name, object_name):return time.sleep()timeout += return ", "docstring": "Wait till a window or component exists.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param guiTimeOut: Wait timeout in seconds\n@type guiTimeOut: integer\n@param state: Object state used only when object_name is provided.\n@type object_name: string\n\n@return: 1 if GUI was found, 0 if not.\n@rtype: integer", "id": "f10327:c0:m31"} {"signature": "def waittillguinotexist(self, window_name, object_name='', guiTimeOut=):", "body": "timeout = while timeout < guiTimeOut:if not self.guiexist(window_name, object_name):return time.sleep()timeout += return ", "docstring": "Wait till a window does not exist.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param guiTimeOut: Wait timeout in seconds\n@type guiTimeOut: integer\n\n@return: 1 if GUI has gone away, 0 if not.\n@rtype: integer", "id": "f10327:c0:m32"} {"signature": "def objectexist(self, window_name, object_name):", "body": "try:object_handle = self._get_object_handle(window_name, object_name)return except LdtpServerException:return ", "docstring": "Checks whether a window or component exists.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 if GUI was found, 0 if not.\n@rtype: integer", "id": "f10327:c0:m33"} {"signature": "def stateenabled(self, window_name, object_name):", "body": "try:object_handle = self._get_object_handle(window_name, object_name)if object_handle.AXEnabled:return except LdtpServerException:passreturn ", "docstring": "Check whether an object state is enabled or not\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10327:c0:m34"} {"signature": "def check(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)if object_handle.AXValue == :return self._grabfocus(object_handle)x, y, width, height = self._getobjectsize(object_handle)self.generatemouseevent(x + width / , y + height / , \"\")return ", "docstring": "Check item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m35"} {"signature": "def uncheck(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)if not object_handle.AXEnabled:raise LdtpServerException(u\"\" % object_name)if object_handle.AXValue == :return self._grabfocus(object_handle)x, y, width, height = self._getobjectsize(object_handle)self.generatemouseevent(x + width / , y + height / , \"\")return ", "docstring": "Uncheck item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m36"} {"signature": "def verifycheck(self, window_name, object_name):", "body": "try:object_handle = self._get_object_handle(window_name, object_name,wait_for_object=False)if object_handle.AXValue == :return except LdtpServerException:passreturn ", "docstring": "Verify check item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10327:c0:m37"} {"signature": "def verifyuncheck(self, window_name, object_name):", "body": "try:object_handle = self._get_object_handle(window_name, object_name,wait_for_object=False)if object_handle.AXValue == :return except LdtpServerException:passreturn ", "docstring": "Verify uncheck item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10327:c0:m38"} {"signature": "def getaccesskey(self, window_name, object_name):", "body": "menu_handle = self._get_menu_handle(window_name, object_name)key = menu_handle.AXMenuItemCmdCharmodifiers = menu_handle.AXMenuItemCmdModifiersglpyh = menu_handle.AXMenuItemCmdGlyphvirtual_key = menu_handle.AXMenuItemCmdVirtualKeymodifiers_type = \"\"if modifiers == :modifiers_type = \"\"elif modifiers == :modifiers_type = \"\"elif modifiers == :modifiers_type = \"\"elif modifiers == :modifiers_type = \"\"elif modifiers == :modifiers_type = \"\"elif modifiers == :modifiers_type = \"\"if virtual_key == and glpyh == :modifiers = \"\"key = \"\"elif virtual_key == and glpyh == :modifiers = \"\"key = \"\"elif virtual_key == and glpyh == :modifiers = \"\"key = \"\"elif virtual_key == and glpyh == :modifiers = \"\"key = \"\"elif virtual_key == and glpyh == :key = \"\"elif virtual_key == and glpyh == :key = \"\"elif virtual_key == and glpyh == :key = \"\"elif virtual_key == and glpyh == :key = \"\"elif virtual_key == and glpyh == :key = \"\"if not key:raise LdtpServerException(\"\")return modifiers_type + key", "docstring": "Get access key of given object\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: access key in string format on success, else LdtpExecutionError on failure.\n@rtype: string", "id": "f10327:c0:m39"} {"signature": "def __init__(self, appname, interval=):", "body": "if not importPsUtil:raise LdtpServerException('')threading.Thread.__init__(self)self._appname = appnameself._interval = intervalself._stop = Falseself.running = True", "docstring": "Start memory and CPU monitoring, with the time interval between\neach process scan\n\n@param appname: Process name, ex: firefox-bin.\n@type appname: string\n@param interval: Time interval between each process scan\n@type interval: float", "id": "f10328:c1:m0"} {"signature": "def _ldtpize_accessible(self, acc):", "body": "actual_role = self._get_role(acc)label = self._get_title(acc)if re.match(\"\", actual_role, re.M | re.U | re.L):strip = r\"\"else:strip = r\"\"if label:label = re.sub(strip, u\"\", label)role = abbreviated_roles.get(actual_role, \"\")if self._ldtp_debug and role == \"\":print(actual_role, acc)return role, label", "docstring": "Get LDTP format accessibile name\n\n@param acc: Accessible handle\n@type acc: object\n\n@return: object type, stripped object name (associated / direct),\n associated label\n@rtype: tuple", "id": "f10328:c2:m8"} {"signature": "def _glob_match(self, pattern, string):", "body": "return bool(re.match(fnmatch.translate(pattern), string,re.M | re.U | re.L))", "docstring": "Match given string, by escaping regex characters", "id": "f10328:c2:m9"} {"signature": "def __init__(self, data):", "body": "self._data = dataself._dummy_window = NativeUIElement()_keyOp = KeyboardOp()self._keyvalId = _keyOp.get_keyval_id(data)if not self._keyvalId:raise LdtpServerException(\"\")self._doCombo()", "docstring": "@param data: data to type\n@type data: string", "id": "f10329:c2:m0"} {"signature": "def loadKeyboard():", "body": "keyboard_layout = DEFAULT_KEYBOARDkeyboard_layout.update(specialKeys)return keyboard_layout", "docstring": "Load a given keyboard mapping (of characters to virtual key codes).\n\n Default is US keyboard\n Parameters: None (relies on the internationalization settings)\n Returns: A dictionary representing the current keyboard mapping (of\n characters to keycodes)", "id": "f10330:m0"} {"signature": "@classmethoddef _getRunningApps(cls):", "body": "def runLoopAndExit():AppHelper.stopEventLoop()AppHelper.callLater(, runLoopAndExit)AppHelper.runConsoleEventLoop()ws = AppKit.NSWorkspace.sharedWorkspace()apps = ws.runningApplications()return apps", "docstring": "Get a list of the running applications.", "id": "f10331:c0:m0"} {"signature": "@classmethoddef getAppRefByPid(cls, pid):", "body": "return _a11y.getAppRefByPid(cls, pid)", "docstring": "Get the top level element for the application specified by pid.", "id": "f10331:c0:m1"} {"signature": "@classmethoddef getAppRefByBundleId(cls, bundleId):", "body": "ra = AppKit.NSRunningApplicationapps = ra.runningApplicationsWithBundleIdentifier_(bundleId)if len(apps) == :raise ValueError(('''' % bundleId))pid = apps[].processIdentifier()return cls.getAppRefByPid(pid)", "docstring": "Get the top level element for the application with the specified\nbundle ID, such as com.vmware.fusion.", "id": "f10331:c0:m2"} {"signature": "@classmethoddef getAppRefByLocalizedName(cls, name):", "body": "apps = cls._getRunningApps()for app in apps:if fnmatch.fnmatch(app.localizedName(), name):pid = app.processIdentifier()return cls.getAppRefByPid(pid)raise ValueError('')", "docstring": "Get the top level element for the application with the specified\n localized name, such as VMware Fusion.\n\n Wildcards are also allowed.", "id": "f10331:c0:m3"} {"signature": "@classmethoddef getFrontmostApp(cls):", "body": "apps = cls._getRunningApps()for app in apps:pid = app.processIdentifier()ref = cls.getAppRefByPid(pid)try:if ref.AXFrontmost:return refexcept (_a11y.ErrorUnsupported,_a11y.ErrorCannotComplete,_a11y.ErrorAPIDisabled,_a11y.ErrorNotImplemented):passraise ValueError('')", "docstring": "Get the current frontmost application.\n\n Raise a ValueError exception if no GUI applications are found.", "id": "f10331:c0:m4"} {"signature": "@classmethoddef getAnyAppWithWindow(cls):", "body": "apps = cls._getRunningApps()for app in apps:pid = app.processIdentifier()ref = cls.getAppRefByPid(pid)if hasattr(ref, '') and len(ref.windows()) > :return refraise ValueError('')", "docstring": "Get a random app that has windows.\n\n Raise a ValueError exception if no GUI applications are found.", "id": "f10331:c0:m5"} {"signature": "@classmethoddef getSystemObject(cls):", "body": "return _a11y.getSystemObject(cls)", "docstring": "Get the top level system accessibility object.", "id": "f10331:c0:m6"} {"signature": "@classmethoddef setSystemWideTimeout(cls, timeout=):", "body": "return cls.getSystemObject().setTimeout(timeout)", "docstring": "Set the system-wide accessibility timeout.\n\n Optional: timeout (non-negative float; defaults to 0)\n A value of 0 will reset the timeout to the system default.\n Returns: None.", "id": "f10331:c0:m7"} {"signature": "@staticmethoddef launchAppByBundleId(bundleID):", "body": "ws = AppKit.NSWorkspace.sharedWorkspace()r = ws.launchAppWithBundleIdentifier_options_additionalEventParamDescriptor_launchIdentifier_(bundleID,AppKit.NSWorkspaceLaunchAllowingClassicStartup,AppKit.NSAppleEventDescriptor.nullDescriptor(),None)if not r[]:raise RuntimeError('')", "docstring": "Launch the application with the specified bundle ID", "id": "f10331:c0:m8"} {"signature": "@staticmethoddef launchAppByBundlePath(bundlePath, arguments=None):", "body": "if arguments is None:arguments = []bundleUrl = NSURL.fileURLWithPath_(bundlePath)workspace = AppKit.NSWorkspace.sharedWorkspace()arguments_strings = list(map(lambda a: NSString.stringWithString_(str(a)),arguments))arguments = NSDictionary.dictionaryWithDictionary_({AppKit.NSWorkspaceLaunchConfigurationArguments: NSArray.arrayWithArray_(arguments_strings)})return workspace.launchApplicationAtURL_options_configuration_error_(bundleUrl,AppKit.NSWorkspaceLaunchAllowingClassicStartup,arguments,None)", "docstring": "Launch app with a given bundle path.\n\n Return True if succeed.", "id": "f10331:c0:m9"} {"signature": "@staticmethoddef terminateAppByBundleId(bundleID):", "body": "ra = AppKit.NSRunningApplicationif getattr(ra, \"\"):appList = ra.runningApplicationsWithBundleIdentifier_(bundleID)if appList and len(appList) > :app = appList[]return app and app.terminate() and True or Falsereturn False", "docstring": "Terminate app with a given bundle ID.\n Requires 10.6.\n\n Return True if succeed.", "id": "f10331:c0:m10"} {"signature": "def setTimeout(self, timeout=):", "body": "self._setTimeout(timeout)", "docstring": "Set the accessibiltiy API timeout on the given reference.\n\n Optional: timeout (non-negative float; defaults to 0)\n A value of 0 will reset the timeout to the system-wide\n value\n Returns: None", "id": "f10331:c0:m11"} {"signature": "def _postQueuedEvents(self, interval=):", "body": "while len(self.eventList) > :(nextEvent, args) = self.eventList.popleft()nextEvent(*args)time.sleep(interval)", "docstring": "Private method to post queued events (e.g. Quartz events).\n\n Each event in queue is a tuple (event call, args to event call).", "id": "f10331:c0:m12"} {"signature": "def _clearEventQueue(self):", "body": "if hasattr(self, ''):self.eventList.clear()", "docstring": "Clear the event queue.", "id": "f10331:c0:m13"} {"signature": "def _queueEvent(self, event, args):", "body": "if not hasattr(self, ''):self.eventList = deque([(event, args)])returnself.eventList.append((event, args))", "docstring": "Private method to queue events to run.\n\n Each event in queue is a tuple (event call, args to event call).", "id": "f10331:c0:m14"} {"signature": "def _addKeyToQueue(self, keychr, modFlags=, globally=False):", "body": "if not keychr:returnif not hasattr(self, ''):self.keyboard = AXKeyboard.loadKeyboard()if keychr in self.keyboard[''] and not modFlags:self._sendKeyWithModifiers(keychr,[AXKeyCodeConstants.SHIFT],globally)returnif keychr.isupper() and not modFlags:self._sendKeyWithModifiers(keychr.lower(),[AXKeyCodeConstants.SHIFT],globally)returnif keychr not in self.keyboard:self._clearEventQueue()raise ValueError('' % keychr)keyDown = Quartz.CGEventCreateKeyboardEvent(None,self.keyboard[keychr],True)keyUp = Quartz.CGEventCreateKeyboardEvent(None,self.keyboard[keychr],False)Quartz.CGEventSetFlags(keyDown, modFlags)Quartz.CGEventSetFlags(keyUp, modFlags)if not globally:macVer, _, _ = platform.mac_ver()macVer = int(macVer.split('')[])if macVer > :appPid = self._getPid()self._queueEvent(Quartz.CGEventPostToPid, (appPid, keyDown))self._queueEvent(Quartz.CGEventPostToPid, (appPid, keyUp))else:appPsn = self._getPsnForPid(self._getPid())self._queueEvent(Quartz.CGEventPostToPSN, (appPsn, keyDown))self._queueEvent(Quartz.CGEventPostToPSN, (appPsn, keyUp))else:self._queueEvent(Quartz.CGEventPost, (, keyDown))self._queueEvent(Quartz.CGEventPost, (, keyUp))", "docstring": "Add keypress to queue.\n\n Parameters: key character or constant referring to a non-alpha-numeric\n key (e.g. RETURN or TAB)\n modifiers\n global or app specific\n Returns: None or raise ValueError exception.", "id": "f10331:c0:m15"} {"signature": "def _sendKey(self, keychr, modFlags=, globally=False):", "body": "escapedChrs = {'': AXKeyCodeConstants.RETURN,'': AXKeyCodeConstants.RETURN,'': AXKeyCodeConstants.TAB,}if keychr in escapedChrs:keychr = escapedChrs[keychr]self._addKeyToQueue(keychr, modFlags, globally=globally)self._postQueuedEvents()", "docstring": "Send one character with no modifiers.\n\n Parameters: key character or constant referring to a non-alpha-numeric\n key (e.g. RETURN or TAB)\n modifier flags,\n global or app specific\n Returns: None or raise ValueError exception", "id": "f10331:c0:m16"} {"signature": "def _sendKeys(self, keystr):", "body": "for nextChr in keystr:self._sendKey(nextChr)", "docstring": "Send a series of characters with no modifiers.\n\n Parameters: keystr\n Returns: None or raise ValueError exception", "id": "f10331:c0:m17"} {"signature": "def _pressModifiers(self, modifiers, pressed=True, globally=False):", "body": "if not isinstance(modifiers, list):raise TypeError('')if not hasattr(self, ''):self.keyboard = AXKeyboard.loadKeyboard()modFlags = for nextMod in modifiers:if nextMod not in self.keyboard:errStr = ''self._clearEventQueue()raise ValueError(errStr % self.keyboard[nextMod])modEvent = Quartz.CGEventCreateKeyboardEvent(Quartz.CGEventSourceCreate(),self.keyboard[nextMod],pressed)if not pressed:Quartz.CGEventSetFlags(modEvent, )if globally:self._queueEvent(Quartz.CGEventPost, (, modEvent))else:macVer, _, _ = platform.mac_ver()macVer = int(macVer.split('')[])if macVer > :appPid = self._getPid()self._queueEvent(Quartz.CGEventPostToPid, (appPid, modEvent))else:appPsn = self._getPsnForPid(self._getPid())self._queueEvent(Quartz.CGEventPostToPSN, (appPsn, modEvent))modFlags += AXKeyboard.modKeyFlagConstants[nextMod]return modFlags", "docstring": "Press given modifiers (provided in list form).\n\n Parameters: modifiers list, global or app specific\n Optional: keypressed state (default is True (down))\n Returns: Unsigned int representing flags to set", "id": "f10331:c0:m18"} {"signature": "def _holdModifierKeys(self, modifiers):", "body": "modFlags = self._pressModifiers(modifiers)self._postQueuedEvents()return modFlags", "docstring": "Hold given modifier keys (provided in list form).\n\n Parameters: modifiers list\n Returns: Unsigned int representing flags to set", "id": "f10331:c0:m19"} {"signature": "def _releaseModifiers(self, modifiers, globally=False):", "body": "modifiers.reverse()modFlags = self._pressModifiers(modifiers, pressed=False,globally=globally)return modFlags", "docstring": "Release given modifiers (provided in list form).\n\n Parameters: modifiers list\n Returns: None", "id": "f10331:c0:m20"} {"signature": "def _releaseModifierKeys(self, modifiers):", "body": "modFlags = self._releaseModifiers(modifiers)self._postQueuedEvents()return modFlags", "docstring": "Release given modifier keys (provided in list form).\n\n Parameters: modifiers list\n Returns: Unsigned int representing flags to set", "id": "f10331:c0:m21"} {"signature": "@staticmethoddef _isSingleCharacter(keychr):", "body": "if not keychr:return Falseif len(keychr) == :return Truereturn keychr.count('') == and keychr.count('>') == andkeychr[] == '' and keychr[-] == '>'", "docstring": "Check whether given keyboard character is a single character.\n\n Parameters: key character which will be checked.\n Returns: True when given key character is a single character.", "id": "f10331:c0:m22"} {"signature": "def _sendKeyWithModifiers(self, keychr, modifiers, globally=False):", "body": "if not self._isSingleCharacter(keychr):raise ValueError('')if not hasattr(self, ''):self.keyboard = AXKeyboard.loadKeyboard()modFlags = self._pressModifiers(modifiers, globally=globally)self._sendKey(keychr, modFlags, globally=globally)self._releaseModifiers(modifiers, globally=globally)self._postQueuedEvents()", "docstring": "Send one character with the given modifiers pressed.\n\n Parameters: key character, list of modifiers, global or app specific\n Returns: None or raise ValueError exception", "id": "f10331:c0:m23"} {"signature": "def _queueMouseButton(self, coord, mouseButton, modFlags, clickCount=,dest_coord=None):", "body": "mouseButtons = {Quartz.kCGMouseButtonLeft: '',Quartz.kCGMouseButtonRight: '',}if mouseButton not in mouseButtons:raise ValueError('')eventButtonDown = getattr(Quartz,'' % mouseButtons[mouseButton])eventButtonUp = getattr(Quartz,'' % mouseButtons[mouseButton])eventButtonDragged = getattr(Quartz,'' % mouseButtons[mouseButton])buttonDown = Quartz.CGEventCreateMouseEvent(None,eventButtonDown,coord,mouseButton)Quartz.CGEventSetFlags(buttonDown, modFlags)Quartz.CGEventSetIntegerValueField(buttonDown,Quartz.kCGMouseEventClickState,int(clickCount))if dest_coord:buttonDragged = Quartz.CGEventCreateMouseEvent(None,eventButtonDragged,dest_coord,mouseButton)Quartz.CGEventSetFlags(buttonDragged, modFlags)buttonUp = Quartz.CGEventCreateMouseEvent(None,eventButtonUp,dest_coord,mouseButton)else:buttonUp = Quartz.CGEventCreateMouseEvent(None,eventButtonUp,coord,mouseButton)Quartz.CGEventSetFlags(buttonUp, modFlags)Quartz.CGEventSetIntegerValueField(buttonUp,Quartz.kCGMouseEventClickState,int(clickCount))self._queueEvent(Quartz.CGEventPost,(Quartz.kCGSessionEventTap, buttonDown))if dest_coord:self._queueEvent(Quartz.CGEventPost,(Quartz.kCGHIDEventTap, buttonDragged))self._queueEvent(Quartz.CGEventPost,(Quartz.kCGSessionEventTap, buttonUp))", "docstring": "Private method to handle generic mouse button clicking.\n\n Parameters: coord (x, y) to click, mouseButton (e.g.,\n kCGMouseButtonLeft), modFlags set (int)\n Optional: clickCount (default 1; set to 2 for double-click; 3 for\n triple-click on host)\n Returns: None", "id": "f10331:c0:m24"} {"signature": "def _leftMouseDragged(self, stopCoord, strCoord, speed):", "body": "appPid = self._getPid()if strCoord == (, ):loc = AppKit.NSEvent.mouseLocation()strCoord = (loc.x, Quartz.CGDisplayPixelsHigh() - loc.y)appPid = self._getPid()pressLeftButton = Quartz.CGEventCreateMouseEvent(None,Quartz.kCGEventLeftMouseDown,strCoord,Quartz.kCGMouseButtonLeft)Quartz.CGEventPost(Quartz.CoreGraphics.kCGHIDEventTap, pressLeftButton)time.sleep()speed = round( / float(speed), )xmoved = stopCoord[] - strCoord[]ymoved = stopCoord[] - strCoord[]if ymoved == :raise ValueError('')else:k = abs(ymoved / xmoved)if xmoved != :for xpos in range(int(abs(xmoved))):if xmoved > and ymoved > :currcoord = (strCoord[] + xpos, strCoord[] + xpos * k)elif xmoved > and ymoved < :currcoord = (strCoord[] + xpos, strCoord[] - xpos * k)elif xmoved < and ymoved < :currcoord = (strCoord[] - xpos, strCoord[] - xpos * k)elif xmoved < and ymoved > :currcoord = (strCoord[] - xpos, strCoord[] + xpos * k)dragLeftButton = Quartz.CGEventCreateMouseEvent(None,Quartz.kCGEventLeftMouseDragged,currcoord,Quartz.kCGMouseButtonLeft)Quartz.CGEventPost(Quartz.CoreGraphics.kCGHIDEventTap,dragLeftButton)time.sleep(speed)else:raise ValueError('')upLeftButton = Quartz.CGEventCreateMouseEvent(None,Quartz.kCGEventLeftMouseUp,stopCoord,Quartz.kCGMouseButtonLeft)time.sleep()Quartz.CGEventPost(Quartz.CoreGraphics.kCGHIDEventTap, upLeftButton)", "docstring": "Private method to handle generic mouse left button dragging and\n dropping.\n\n Parameters: stopCoord(x,y) drop point\n Optional: strCoord (x, y) drag point, default (0,0) get current\n mouse position\n speed (int) 1 to unlimit, simulate mouse moving\n action from some special requirement\n Returns: None", "id": "f10331:c0:m25"} {"signature": "def _waitFor(self, timeout, notification, **kwargs):", "body": "callback = self._matchOtherretelem = NonecallbackArgs = NonecallbackKwargs = Noneif '' in kwargs:callback = kwargs['']del kwargs['']if '' in kwargs:if not isinstance(kwargs[''], tuple):errStr = ''raise TypeError(errStr)callbackArgs = kwargs['']del kwargs['']if '' in kwargs:if not isinstance(kwargs[''], dict):errStr = ''raise TypeError(errStr)callbackKwargs = kwargs['']del kwargs['']if kwargs:if callbackKwargs:callbackKwargs.update(kwargs)else:callbackKwargs = kwargselse:callbackArgs = (retelem,)callbackKwargs = kwargsreturn self._setNotification(timeout, notification, callback,callbackArgs,callbackKwargs)", "docstring": "Wait for a particular UI event to occur; this can be built\n upon in NativeUIElement for specific convenience methods.", "id": "f10331:c0:m26"} {"signature": "def waitForFocusToMatchCriteria(self, timeout=, **kwargs):", "body": "def _matchFocused(retelem, **kwargs):return retelem if retelem._match(**kwargs) else Noneretelem = Nonereturn self._waitFor(timeout, '',callback=_matchFocused,args=(retelem,),**kwargs)", "docstring": "Convenience method to wait for focused element to change\n (to element matching kwargs criteria).\n\n Returns: Element or None", "id": "f10331:c0:m27"} {"signature": "def _getActions(self):", "body": "actions = _a11y.AXUIElement._getActions(self)return [action[:] for action in actions]", "docstring": "Retrieve a list of actions supported by the object.", "id": "f10331:c0:m28"} {"signature": "def _performAction(self, action):", "body": "try:_a11y.AXUIElement._performAction(self, '' % action)except _a11y.ErrorUnsupported as e:sierra_ver = ''if mac_ver()[] < sierra_ver:raise eelse:pass", "docstring": "Perform the specified action.", "id": "f10331:c0:m29"} {"signature": "def _generateChildren(self):", "body": "try:children = self.AXChildrenexcept _a11y.Error:returnif children:for child in children:yield child", "docstring": "Generator which yields all AXChildren of the object.", "id": "f10331:c0:m30"} {"signature": "def _generateChildrenR(self, target=None):", "body": "if target is None:target = selftry:children = target.AXChildrenexcept _a11y.Error:returnif children:for child in children:yield childfor c in self._generateChildrenR(child):yield c", "docstring": "Generator which recursively yields all AXChildren of the object.", "id": "f10331:c0:m31"} {"signature": "def _match(self, **kwargs):", "body": "for k in kwargs.keys():try:val = getattr(self, k)except _a11y.Error:return Falseif sys.version_info[:] <= (, ):if isinstance(val, basestring):if not fnmatch.fnmatch(unicode(val), kwargs[k]):return Falseelse:if val != kwargs[k]:return Falseelif sys.version_info[] == :if isinstance(val, str):if not fnmatch.fnmatch(val, str(kwargs[k])):return Falseelse:if val != kwargs[k]:return Falseelse:if isinstance(val, str) or isinstance(val, unicode):if not fnmatch.fnmatch(val, kwargs[k]):return Falseelse:if val != kwargs[k]:return Falsereturn True", "docstring": "Method which indicates if the object matches specified criteria.\n\n Match accepts criteria as kwargs and looks them up on attributes.\n Actual matching is performed with fnmatch, so shell-like wildcards\n work within match strings. Examples:\n\n obj._match(AXTitle='Terminal*')\n obj._match(AXRole='TextField', AXRoleDescription='search text field')", "id": "f10331:c0:m32"} {"signature": "def _matchOther(self, obj, **kwargs):", "body": "if obj is not None:if self._findFirstR(**kwargs):return obj._match(**kwargs)return False", "docstring": "Perform _match but on another object, not self.", "id": "f10331:c0:m33"} {"signature": "def _generateFind(self, **kwargs):", "body": "for needle in self._generateChildren():if needle._match(**kwargs):yield needle", "docstring": "Generator which yields matches on AXChildren.", "id": "f10331:c0:m34"} {"signature": "def _generateFindR(self, **kwargs):", "body": "for needle in self._generateChildrenR():if needle._match(**kwargs):yield needle", "docstring": "Generator which yields matches on AXChildren and their children.", "id": "f10331:c0:m35"} {"signature": "def _findAll(self, **kwargs):", "body": "result = []for item in self._generateFind(**kwargs):result.append(item)return result", "docstring": "Return a list of all children that match the specified criteria.", "id": "f10331:c0:m36"} {"signature": "def _findAllR(self, **kwargs):", "body": "result = []for item in self._generateFindR(**kwargs):result.append(item)return result", "docstring": "Return a list of all children (recursively) that match the specified\n criteria.", "id": "f10331:c0:m37"} {"signature": "def _findFirst(self, **kwargs):", "body": "for item in self._generateFind(**kwargs):return item", "docstring": "Return the first object that matches the criteria.", "id": "f10331:c0:m38"} {"signature": "def _findFirstR(self, **kwargs):", "body": "for item in self._generateFindR(**kwargs):return item", "docstring": "Search recursively for the first object that matches the criteria.", "id": "f10331:c0:m39"} {"signature": "def _getApplication(self):", "body": "app = selfwhile True:try:app = app.AXParentexcept _a11y.ErrorUnsupported:breakreturn app", "docstring": "Get the base application UIElement.\n\n If the UIElement is a child of the application, it will try\n to get the AXParent until it reaches the top application level\n element.", "id": "f10331:c0:m40"} {"signature": "def _menuItem(self, menuitem, *args):", "body": "self._activate()for item in args:if menuitem.AXChildren[].AXRole == '':menuitem = menuitem.AXChildren[]role = ''try:menuitem = menuitem.AXChildren[int(item)]except ValueError:menuitem = menuitem.findFirst(AXRole='',AXTitle=item)return menuitem", "docstring": "Return the specified menu item.\n\n Example - refer to items by name:\n\n app._menuItem(app.AXMenuBar, 'File', 'New').Press()\n app._menuItem(app.AXMenuBar, 'Edit', 'Insert', 'Line Break').Press()\n\n Refer to items by index:\n\n app._menuitem(app.AXMenuBar, 1, 0).Press()\n\n Refer to items by mix-n-match:\n\n app._menuitem(app.AXMenuBar, 1, 'About TextEdit').Press()", "id": "f10331:c0:m41"} {"signature": "def _activate(self):", "body": "ra = AppKit.NSRunningApplicationapp = ra.runningApplicationWithProcessIdentifier_(self._getPid())app.activateWithOptions_()", "docstring": "Activate the application (bringing menus and windows forward).", "id": "f10331:c0:m42"} {"signature": "def _getBundleId(self):", "body": "ra = AppKit.NSRunningApplicationapp = ra.runningApplicationWithProcessIdentifier_(self._getPid())return app.bundleIdentifier()", "docstring": "Return the bundle ID of the application.", "id": "f10331:c0:m43"} {"signature": "def _getLocalizedName(self):", "body": "return self._getApplication().AXTitle", "docstring": "Return the localized name of the application.", "id": "f10331:c0:m44"} {"signature": "def __getattr__(self, name):", "body": "if name.startswith(''):try:attr = self._getAttribute(name)return attrexcept AttributeError:passactions = []try:actions = self._getActions()except Exception:passif name.startswith('') and (name[:] in actions):errStr = ''''raise AttributeError(errStr)if name in actions:def performSpecifiedAction():self._activate()return self._performAction(name)return performSpecifiedActionelse:raise AttributeError('' % (self, name))", "docstring": "Handle attribute requests in several ways:\n\n 1. If it starts with AX, it is probably an a11y attribute. Pass\n it to the handler in _a11y which will determine that for sure.\n 2. See if the attribute is an action which can be invoked on the\n UIElement. If so, return a function that will invoke the attribute.", "id": "f10331:c0:m45"} {"signature": "def __setattr__(self, name, value):", "body": "if name.startswith(''):return self._setAttribute(name, value)else:_a11y.AXUIElement.__setattr__(self, name, value)", "docstring": "Set attributes on the object.", "id": "f10331:c0:m46"} {"signature": "def __repr__(self):", "body": "title = repr('')role = ''c = repr(self.__class__).partition('')[-].rpartition('')[]try:title = repr(self.AXTitle)except Exception:try:title = repr(self.AXValue)except Exception:try:title = repr(self.AXRoleDescription)except Exception:passtry:role = self.AXRoleexcept Exception:passif len(title) > :title = title[:] + ''return '' % (c, role, title)", "docstring": "Build a descriptive string for UIElements.", "id": "f10331:c0:m47"} {"signature": "def getAttributes(self):", "body": "return self._getAttributes()", "docstring": "Get a list of the attributes available on the element.", "id": "f10331:c1:m0"} {"signature": "def getActions(self):", "body": "return self._getActions()", "docstring": "Return a list of the actions available on the element.", "id": "f10331:c1:m1"} {"signature": "def setString(self, attribute, string):", "body": "return self._setString(attribute, string)", "docstring": "Set the specified attribute to the specified string.", "id": "f10331:c1:m2"} {"signature": "def findFirst(self, **kwargs):", "body": "return self._findFirst(**kwargs)", "docstring": "Return the first object that matches the criteria.", "id": "f10331:c1:m3"} {"signature": "def findFirstR(self, **kwargs):", "body": "return self._findFirstR(**kwargs)", "docstring": "Search recursively for the first object that matches the\n criteria.", "id": "f10331:c1:m4"} {"signature": "def findAll(self, **kwargs):", "body": "return self._findAll(**kwargs)", "docstring": "Return a list of all children that match the specified criteria.", "id": "f10331:c1:m5"} {"signature": "def findAllR(self, **kwargs):", "body": "return self._findAllR(**kwargs)", "docstring": "Return a list of all children (recursively) that match\n the specified criteria.", "id": "f10331:c1:m6"} {"signature": "def getElementAtPosition(self, coord):", "body": "return self._getElementAtPosition(float(coord[]), float(coord[]))", "docstring": "Return the AXUIElement at the given coordinates.\n\n If self is behind other windows, this function will return self.", "id": "f10331:c1:m7"} {"signature": "def activate(self):", "body": "return self._activate()", "docstring": "Activate the application (bringing menus and windows forward)", "id": "f10331:c1:m8"} {"signature": "def getApplication(self):", "body": "return self._getApplication()", "docstring": "Get the base application UIElement.\n\n If the UIElement is a child of the application, it will try\n to get the AXParent until it reaches the top application level\n element.", "id": "f10331:c1:m9"} {"signature": "def menuItem(self, *args):", "body": "menuitem = self._getApplication().AXMenuBarreturn self._menuItem(menuitem, *args)", "docstring": "Return the specified menu item.\n\n Example - refer to items by name:\n\n app.menuItem('File', 'New').Press()\n app.menuItem('Edit', 'Insert', 'Line Break').Press()\n\n Refer to items by index:\n\n app.menuitem(1, 0).Press()\n\n Refer to items by mix-n-match:\n\n app.menuitem(1, 'About TextEdit').Press()", "id": "f10331:c1:m10"} {"signature": "def popUpItem(self, *args):", "body": "self.Press()time.sleep()return self._menuItem(self, *args)", "docstring": "Return the specified item in a pop up menu.", "id": "f10331:c1:m11"} {"signature": "def getBundleId(self):", "body": "return self._getBundleId()", "docstring": "Return the bundle ID of the application.", "id": "f10331:c1:m12"} {"signature": "def getLocalizedName(self):", "body": "return self._getLocalizedName()", "docstring": "Return the localized name of the application.", "id": "f10331:c1:m13"} {"signature": "def sendKey(self, keychr):", "body": "return self._sendKey(keychr)", "docstring": "Send one character with no modifiers.", "id": "f10331:c1:m14"} {"signature": "def sendGlobalKey(self, keychr):", "body": "return self._sendKey(keychr, globally=True)", "docstring": "Send one character without modifiers to the system.\n\n It will not send an event directly to the application, system will\n dispatch it to the window which has keyboard focus.\n\n Parameters: keychr - Single keyboard character which will be sent.", "id": "f10331:c1:m15"} {"signature": "def sendKeys(self, keystr):", "body": "return self._sendKeys(keystr)", "docstring": "Send a series of characters with no modifiers.", "id": "f10331:c1:m16"} {"signature": "def pressModifiers(self, modifiers):", "body": "return self._holdModifierKeys(modifiers)", "docstring": "Hold modifier keys (e.g. [Option]).", "id": "f10331:c1:m17"} {"signature": "def releaseModifiers(self, modifiers):", "body": "return self._releaseModifierKeys(modifiers)", "docstring": "Release modifier keys (e.g. [Option]).", "id": "f10331:c1:m18"} {"signature": "def sendKeyWithModifiers(self, keychr, modifiers):", "body": "return self._sendKeyWithModifiers(keychr, modifiers, False)", "docstring": "Send one character with modifiers pressed\n\n Parameters: key character, modifiers (list) (e.g. [SHIFT] or\n [COMMAND, SHIFT] (assuming you've first used\n from pyatom.AXKeyCodeConstants import *))", "id": "f10331:c1:m19"} {"signature": "def sendGlobalKeyWithModifiers(self, keychr, modifiers):", "body": "return self._sendKeyWithModifiers(keychr, modifiers, True)", "docstring": "Global send one character with modifiers pressed.\n\n See sendKeyWithModifiers", "id": "f10331:c1:m20"} {"signature": "def dragMouseButtonLeft(self, coord, dest_coord, interval=):", "body": "modFlags = self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags,dest_coord=dest_coord)self._postQueuedEvents(interval=interval)", "docstring": "Drag the left mouse button without modifiers pressed.\n\n Parameters: coordinates to click on screen (tuple (x, y))\n dest coordinates to drag to (tuple (x, y))\n interval to send event of btn down, drag and up\n Returns: None", "id": "f10331:c1:m21"} {"signature": "def doubleClickDragMouseButtonLeft(self, coord, dest_coord, interval=):", "body": "modFlags = self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags,dest_coord=dest_coord)self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags,dest_coord=dest_coord,clickCount=)self._postQueuedEvents(interval=interval)", "docstring": "Double-click and drag the left mouse button without modifiers\n pressed.\n\n Parameters: coordinates to double-click on screen (tuple (x, y))\n dest coordinates to drag to (tuple (x, y))\n interval to send event of btn down, drag and up\n Returns: None", "id": "f10331:c1:m22"} {"signature": "def clickMouseButtonLeft(self, coord, interval=None):", "body": "modFlags = self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags)if interval:self._postQueuedEvents(interval=interval)else:self._postQueuedEvents()", "docstring": "Click the left mouse button without modifiers pressed.\n\n Parameters: coordinates to click on screen (tuple (x, y))\n Returns: None", "id": "f10331:c1:m23"} {"signature": "def clickMouseButtonRight(self, coord):", "body": "modFlags = self._queueMouseButton(coord, Quartz.kCGMouseButtonRight, modFlags)self._postQueuedEvents()", "docstring": "Click the right mouse button without modifiers pressed.\n\n Parameters: coordinates to click on scren (tuple (x, y))\n Returns: None", "id": "f10331:c1:m24"} {"signature": "def clickMouseButtonLeftWithMods(self, coord, modifiers, interval=None):", "body": "modFlags = self._pressModifiers(modifiers)self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags)self._releaseModifiers(modifiers)if interval:self._postQueuedEvents(interval=interval)else:self._postQueuedEvents()", "docstring": "Click the left mouse button with modifiers pressed.\n\n Parameters: coordinates to click; modifiers (list) (e.g. [SHIFT] or\n [COMMAND, SHIFT] (assuming you've first used\n from pyatom.AXKeyCodeConstants import *))\n Returns: None", "id": "f10331:c1:m25"} {"signature": "def clickMouseButtonRightWithMods(self, coord, modifiers):", "body": "modFlags = self._pressModifiers(modifiers)self._queueMouseButton(coord, Quartz.kCGMouseButtonRight, modFlags)self._releaseModifiers(modifiers)self._postQueuedEvents()", "docstring": "Click the right mouse button with modifiers pressed.\n\n Parameters: coordinates to click; modifiers (list)\n Returns: None", "id": "f10331:c1:m26"} {"signature": "def leftMouseDragged(self, stopCoord, strCoord=(, ), speed=):", "body": "self._leftMouseDragged(stopCoord, strCoord, speed)", "docstring": "Click the left mouse button and drag object.\n\n Parameters: stopCoord, the position of dragging stopped\n strCoord, the position of dragging started\n (0,0) will get current position\n speed is mouse moving speed, 0 to unlimited\n Returns: None", "id": "f10331:c1:m27"} {"signature": "def doubleClickMouse(self, coord):", "body": "modFlags = self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags)self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags,clickCount=)self._postQueuedEvents()", "docstring": "Double-click primary mouse button.\n\n Parameters: coordinates to click (assume primary is left button)\n Returns: None", "id": "f10331:c1:m28"} {"signature": "def doubleMouseButtonLeftWithMods(self, coord, modifiers):", "body": "modFlags = self._pressModifiers(modifiers)self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags)self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags,clickCount=)self._releaseModifiers(modifiers)self._postQueuedEvents()", "docstring": "Click the left mouse button with modifiers pressed.\n\n Parameters: coordinates to click; modifiers (list)\n Returns: None", "id": "f10331:c1:m29"} {"signature": "def tripleClickMouse(self, coord):", "body": "modFlags = for i in range():self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags)self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags,clickCount=)self._postQueuedEvents()", "docstring": "Triple-click primary mouse button.\n\n Parameters: coordinates to click (assume primary is left button)\n Returns: None", "id": "f10331:c1:m30"} {"signature": "def waitFor(self, timeout, notification, **kwargs):", "body": "return self._waitFor(timeout, notification, **kwargs)", "docstring": "Generic wait for a UI event that matches the specified\n criteria to occur.\n\n For customization of the callback, use keyword args labeled\n 'callback', 'args', and 'kwargs' for the callback fn, callback args,\n and callback kwargs, respectively. Also note that on return,\n the observer-returned UI element will be included in the first\n argument if 'args' are given. Note also that if the UI element is\n destroyed, callback should not use it, otherwise the function will\n hang.", "id": "f10331:c1:m31"} {"signature": "def waitForCreation(self, timeout=, notification=''):", "body": "callback = AXCallbacks.returnElemCallbackretelem = Noneargs = (retelem,)return self.waitFor(timeout, notification, callback=callback,args=args)", "docstring": "Convenience method to wait for creation of some UI element.\n\n Returns: The element created", "id": "f10331:c1:m32"} {"signature": "def waitForWindowToAppear(self, winName, timeout=):", "body": "return self.waitFor(timeout, '', AXTitle=winName)", "docstring": "Convenience method to wait for a window with the given name to\n appear.\n\n Returns: Boolean", "id": "f10331:c1:m33"} {"signature": "def waitForWindowToDisappear(self, winName, timeout=):", "body": "callback = AXCallbacks.elemDisappearedCallbackretelem = Noneargs = (retelem, self)win = self.findFirst(AXRole='', AXTitle=winName)return self.waitFor(timeout, '',callback=callback, args=args,AXRole='', AXTitle=winName)", "docstring": "Convenience method to wait for a window with the given name to\n disappear.\n\n Returns: Boolean", "id": "f10331:c1:m34"} {"signature": "def waitForSheetToAppear(self, timeout=):", "body": "return self.waitForCreation(timeout, '')", "docstring": "Convenience method to wait for a sheet to appear.\n\n Returns: the sheet that appeared (element) or None", "id": "f10331:c1:m35"} {"signature": "def waitForValueToChange(self, timeout=):", "body": "callback = AXCallbacks.returnElemCallbackretelem = Nonereturn self.waitFor(timeout, '', callback=callback,args=(retelem,))", "docstring": "Convenience method to wait for value attribute of given element to\n change.\n\n Some types of elements (e.g. menu items) have their titles change,\n so this will not work for those. This seems to work best if you set\n the notification at the application level.\n\n Returns: Element or None", "id": "f10331:c1:m36"} {"signature": "def waitForFocusToChange(self, newFocusedElem, timeout=):", "body": "return self.waitFor(timeout, '',AXRole=newFocusedElem.AXRole,AXPosition=newFocusedElem.AXPosition)", "docstring": "Convenience method to wait for focused element to change (to new\n element given).\n\n Returns: Boolean", "id": "f10331:c1:m37"} {"signature": "def waitForFocusedWindowToChange(self, nextWinName, timeout=):", "body": "callback = AXCallbacks.returnElemCallbackretelem = Nonereturn self.waitFor(timeout, '',AXTitle=nextWinName)", "docstring": "Convenience method to wait for focused window to change\n\n Returns: Boolean", "id": "f10331:c1:m38"} {"signature": "def _convenienceMatch(self, role, attr, match):", "body": "kwargs = {}if match:kwargs[attr] = matchreturn self.findAll(AXRole=role, **kwargs)", "docstring": "Method used by role based convenience functions to find a match", "id": "f10331:c1:m39"} {"signature": "def _convenienceMatchR(self, role, attr, match):", "body": "kwargs = {}if match:kwargs[attr] = matchreturn self.findAllR(AXRole=role, **kwargs)", "docstring": "Method used by role based convenience functions to find a match", "id": "f10331:c1:m40"} {"signature": "def textAreas(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of text areas with an optional match parameter.", "id": "f10331:c1:m41"} {"signature": "def textAreasR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of text areas with an optional match parameter.", "id": "f10331:c1:m42"} {"signature": "def textFields(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of textfields with an optional match parameter.", "id": "f10331:c1:m43"} {"signature": "def textFieldsR(self, match=None):", "body": "return self._convenienceMatchR('', '',match)", "docstring": "Return a list of textfields with an optional match parameter.", "id": "f10331:c1:m44"} {"signature": "def buttons(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of buttons with an optional match parameter.", "id": "f10331:c1:m45"} {"signature": "def buttonsR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of buttons with an optional match parameter.", "id": "f10331:c1:m46"} {"signature": "def windows(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of windows with an optional match parameter.", "id": "f10331:c1:m47"} {"signature": "def windowsR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of windows with an optional match parameter.", "id": "f10331:c1:m48"} {"signature": "def sheets(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of sheets with an optional match parameter.", "id": "f10331:c1:m49"} {"signature": "def sheetsR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of sheets with an optional match parameter.", "id": "f10331:c1:m50"} {"signature": "def staticTexts(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of statictexts with an optional match parameter.", "id": "f10331:c1:m51"} {"signature": "def staticTextsR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of statictexts with an optional match parameter", "id": "f10331:c1:m52"} {"signature": "def genericElements(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of genericelements with an optional match parameter.", "id": "f10331:c1:m53"} {"signature": "def genericElementsR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of genericelements with an optional match parameter.", "id": "f10331:c1:m54"} {"signature": "def groups(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of groups with an optional match parameter.", "id": "f10331:c1:m55"} {"signature": "def groupsR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of groups with an optional match parameter.", "id": "f10331:c1:m56"} {"signature": "def radioButtons(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of radio buttons with an optional match parameter.", "id": "f10331:c1:m57"} {"signature": "def radioButtonsR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of radio buttons with an optional match parameter.", "id": "f10331:c1:m58"} {"signature": "def popUpButtons(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of popup menus with an optional match parameter.", "id": "f10331:c1:m59"} {"signature": "def popUpButtonsR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of popup menus with an optional match parameter.", "id": "f10331:c1:m60"} {"signature": "def rows(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of rows with an optional match parameter.", "id": "f10331:c1:m61"} {"signature": "def rowsR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of rows with an optional match parameter.", "id": "f10331:c1:m62"} {"signature": "def sliders(self, match=None):", "body": "return self._convenienceMatch('', '', match)", "docstring": "Return a list of sliders with an optional match parameter.", "id": "f10331:c1:m63"} {"signature": "def slidersR(self, match=None):", "body": "return self._convenienceMatchR('', '', match)", "docstring": "Return a list of sliders with an optional match parameter.", "id": "f10331:c1:m64"} {"signature": "def addloghandler(handler):", "body": "logger.addHandler(handler)return ", "docstring": "Add custom log handler\n@param handler: Handler instance\n@type handler: object\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10334:m2"} {"signature": "def removeloghandler(handler):", "body": "logger.removeHandler(handler)return ", "docstring": "Remove custom log handler\n@param handler: Handler instance\n@type handler: object\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10334:m3"} {"signature": "def log(message, level=logging.DEBUG):", "body": "if _ldtp_debug:print(message)logger.log(level, str(message))return ", "docstring": "Logs the message in the root logger with the log level\n@param message: Message to be logged\n@type message: string\n@param level: Log level, defaul DEBUG\n@type level: integer\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10334:m4"} {"signature": "def startlog(filename, overwrite=True):", "body": "if not filename:return if overwrite:_mode = ''else:_mode = ''global _file_logger_file_logger = logging.FileHandler(os.path.expanduser(filename), _mode)_formatter = logging.Formatter('')_file_logger.setFormatter(_formatter)logger.addHandler(_file_logger)if _ldtp_debug:_file_logger.setLevel(logging.DEBUG)else:_file_logger.setLevel(logging.ERROR)return ", "docstring": "@param filename: Start logging on the specified file\n@type filename: string\n@param overwrite: Overwrite or append\n False - Append log to an existing file\n True - Write log to a new file. If file already exist, \n then erase existing file content and start log\n@type overwrite: boolean\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10334:m5"} {"signature": "def stoplog():", "body": "global _file_loggerif _file_logger:logger.removeHandler(_file_logger)_file_logger = Nonereturn ", "docstring": "Stop logging.\n\n @return: 1 on success and 0 on error\n @rtype: integer", "id": "f10334:m6"} {"signature": "def imagecapture(window_name=None, out_file=None, x=, y=,width=None, height=None):", "body": "if not out_file:out_file = tempfile.mktemp('', '')else:out_file = os.path.expanduser(out_file)if _ldtp_windows_env:if width == None:width = -if height == None:height = -if window_name == None:window_name = ''data = _remote_imagecapture(window_name, x, y, width, height)f = open(out_file, '')f.write(b64decode(data))f.close()return out_file", "docstring": "Captures screenshot of the whole desktop or given window\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param x: x co-ordinate value\n@type x: integer\n@param y: y co-ordinate value\n@type y: integer\n@param width: width co-ordinate value\n@type width: integer\n@param height: height co-ordinate value\n@type height: integer\n\n@return: screenshot filename\n@rtype: string", "id": "f10334:m9"} {"signature": "def onwindowcreate(window_name, fn_name, *args):", "body": "_pollEvents._callback[window_name] = [\"\", fn_name, args]return _remote_onwindowcreate(window_name)", "docstring": "On window create, call the function with given arguments\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10334:m33"} {"signature": "def removecallback(window_name):", "body": "if window_name in _pollEvents._callback:del _pollEvents._callback[window_name]return _remote_removecallback(window_name)", "docstring": "Remove registered callback on window create\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10334:m34"} {"signature": "def registerevent(event_name, fn_name, *args):", "body": "if not isinstance(event_name, str):raise ValueError(\"\")_pollEvents._callback[event_name] = [event_name, fn_name, args]return _remote_registerevent(event_name)", "docstring": "Register at-spi event\n\n@param event_name: Event name in at-spi format.\n@type event_name: string\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10334:m35"} {"signature": "def deregisterevent(event_name):", "body": "if event_name in _pollEvents._callback:del _pollEvents._callback[event_name]return _remote_deregisterevent(event_name)", "docstring": "Remove callback of registered event\n\n@param event_name: Event name in at-spi format.\n@type event_name: string\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10334:m36"} {"signature": "def registerkbevent(keys, modifiers, fn_name, *args):", "body": "event_name = \"\" % (keys, modifiers)_pollEvents._callback[event_name] = [event_name, fn_name, args]return _remote_registerkbevent(keys, modifiers)", "docstring": "Register keystroke events\n\n@param keys: key to listen\n@type keys: string\n@param modifiers: control / alt combination using gtk MODIFIERS\n@type modifiers: int\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10334:m37"} {"signature": "def deregisterkbevent(keys, modifiers):", "body": "event_name = \"\" % (keys, modifiers)if event_name in _pollEvents._callback:del _pollEvents._callback[event_name]return _remote_deregisterkbevent(keys, modifiers)", "docstring": "Remove callback of registered event\n\n@param keys: key to listen\n@type keys: string\n@param modifiers: control / alt combination using gtk MODIFIERS\n@type modifiers: int\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10334:m38"} {"signature": "def windowuptime(window_name):", "body": "tmp_time = _remote_windowuptime(window_name)if tmp_time:tmp_time = tmp_time.split('')start_time = tmp_time[].split('')end_time = tmp_time[].split('')_start_time = datetime.datetime(int(start_time[]), int(start_time[]),int(start_time[]), int(start_time[]),int(start_time[]), int(start_time[]))_end_time = datetime.datetime(int(end_time[]), int(end_time[]),int(end_time[]), int(end_time[]),int(end_time[]), int(end_time[]))return _start_time, _end_timereturn None", "docstring": "Get window uptime\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: \"starttime, endtime\" as datetime python object", "id": "f10334:m39"} {"signature": "def __del__(self):", "body": "try:self.alive = Falseexcept:pass", "docstring": "Stop polling when destroying this class", "id": "f10334:c0:m1"} {"signature": "def stop(self):", "body": "try:self.alive = Falseself.join(self.sleep_time)except:pass", "docstring": "Stop the thread", "id": "f10334:c0:m2"} {"signature": "def __del__(self):", "body": "try:self.alive = Falseexcept:pass", "docstring": "Stop callback when destroying this class", "id": "f10334:c1:m1"} {"signature": "def stop(self):", "body": "try:self.alive = Falseself.join(self.sleep_time)except:pass", "docstring": "Stop the thread", "id": "f10334:c1:m2"} {"signature": "def elemDisappearedCallback(retelem, obj, **kwargs):", "body": "return not obj.findFirstR(**kwargs)", "docstring": "Callback for checking if a UI element is no longer onscreen.\n\n kwargs should contains some unique set of identifier (e.g. title/value, role)\n Returns: Boolean", "id": "f10336:m0"} {"signature": "def returnElemCallback(retelem):", "body": "return retelem", "docstring": "Callback for when a sheet appears.\n\n Returns: element returned by observer callback", "id": "f10336:m1"} {"signature": "def read(fname):", "body": "return open(os.path.join(os.path.dirname(__file__), fname)).read()", "docstring": "Returns the contents of the specified file located in the same dir as\nthe script", "id": "f10338:m0"} {"signature": "@classmethoddef Async(cls, token, session=None, **options):", "body": "return cls(token, session=session, is_async=True, **options)", "docstring": "Returns the client in async mode.", "id": "f10347:c0:m2"} {"signature": "def get_version(self):", "body": "return self._get_model(self.api.VERSION)", "docstring": "Gets the version of RoyaleAPI. Returns a string", "id": "f10347:c0:m16"} {"signature": "def get_endpoints(self):", "body": "return self._get_model(self.api.ENDPOINTS)", "docstring": "Gets a list of endpoints available in RoyaleAPI", "id": "f10347:c0:m17"} {"signature": "@typecasteddef get_constants(self, **params: keys):", "body": "url = self.api.CONSTANTSreturn self._get_model(url, **params)", "docstring": "Get the CR Constants\n\n Parameters\n ----------\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m18"} {"signature": "@typecasteddef get_player(self, *tags: crtag, **params: keys):", "body": "url = self.api.PLAYER + '' + ''.join(tags)return self._get_model(url, FullPlayer, **params)", "docstring": "Get a player information\n\n Parameters\n ----------\n \\*tags: str\n Valid player tags. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m19"} {"signature": "@typecasteddef get_player_verify(self, tag: crtag, apikey: str, **params: keys):", "body": "url = self.api.PLAYER + '' + tag + ''params.update({'': apikey})return self._get_model(url, FullPlayer, **params)", "docstring": "Check the API Key of a player.\n This endpoint has been **restricted** to\n certain members of the community\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n apikey: str\n The API Key in the player's settings\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m20"} {"signature": "@typecasteddef get_player_battles(self, *tags: crtag, **params: keys):", "body": "url = self.api.PLAYER + '' + ''.join(tags) + ''return self._get_model(url, **params)", "docstring": "Get a player's battle log\n\n Parameters\n ----------\n \\*tags: str\n Valid player tags. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m21"} {"signature": "@typecasteddef get_player_chests(self, *tags: crtag, **params: keys):", "body": "url = self.api.PLAYER + '' + ''.join(tags) + ''return self._get_model(url, **params)", "docstring": "Get information about a player's chest cycle\n\n Parameters\n ----------\n \\*tags: str\n Valid player tags. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m22"} {"signature": "@typecasteddef get_clan(self, *tags: crtag, **params: keys):", "body": "url = self.api.CLAN + '' + ''.join(tags)return self._get_model(url, FullClan, **params)", "docstring": "Get a clan information\n\n Parameters\n ----------\n \\*tags: str\n Valid clan tags. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m23"} {"signature": "@typecasted def search_clans(self, **params: clansearch):", "body": "url = self.api.CLAN + ''return self._get_model(url, PartialClan, **params)", "docstring": "Search for a clan. At least one\n of the filters must be present\n\n Parameters\n ----------\n name: Optional[str]\n The name of a clan\n minMembers: Optional[int]\n The minimum member count\n of a clan\n maxMembers: Optional[int]\n The maximum member count\n of a clan\n score: Optional[int]\n The minimum trophy score of\n a clan\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m24"} {"signature": "def get_tracking_clans(self, **params: keys):", "body": "url = self.api.CLAN + ''return self._get_model(url, **params)", "docstring": "Get a list of clans that are being\n tracked by having either cr-api.com or\n royaleapi.com in the description\n\n Parameters\n ----------\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m25"} {"signature": "@typecasteddef get_clan_tracking(self, *tags: crtag, **params: keys):", "body": "url = self.api.CLAN + '' + ''.join(tags) + ''return self._get_model(url, **params)", "docstring": "Returns if the clan is currently being tracked\n by the API by having either cr-api.com or royaleapi.com\n in the clan description\n\n Parameters\n ----------\n \\*tags: str\n Valid clan tags. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m26"} {"signature": "@typecasteddef get_clan_battles(self, *tags: crtag, **params: keys):", "body": "url = self.api.CLAN + '' + ''.join(tags) + ''return self._get_model(url, **params)", "docstring": "Get the battle log from everyone in the clan\n\n Parameters\n ----------\n \\*tags: str\n Valid player tags. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*type: str\n Filters what kind of battles. Pick from:\n :all:, :war:, :clanMate:\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m27"} {"signature": "@typecasteddef get_clan_history(self, *tags: crtag, **params: keys):", "body": "url = self.api.CLAN + '' + ''.join(tags) + ''return self._get_model(url, **params)", "docstring": "Get the clan history. Only works if the clan is being tracked\n by having either cr-api.com or royaleapi.com in the clan's\n description\n\n Parameters\n ----------\n \\*tags: str\n Valid clan tags. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m28"} {"signature": "@typecasteddef get_clan_war(self, tag: crtag, **params: keys):", "body": "url = self.api.CLAN + '' + tag + ''return self._get_model(url, **params)", "docstring": "Get inforamtion about a clan's current clan war\n\n Parameters\n ----------\n *tag: str\n A valid clan tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m29"} {"signature": "@typecasteddef get_clan_war_log(self, tag: crtag, **params: keys):", "body": "url = self.api.CLAN + '' + tag + ''return self._get_model(url, **params)", "docstring": "Get a clan's war log\n\n Parameters\n ----------\n \\*tags: str\n Valid clan tags. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m30"} {"signature": "@typecasteddef get_tournament(self, tag: crtag, **params: keys):", "body": "url = self.api.TOURNAMENT + '' + tagreturn self._get_model(url, **params)", "docstring": "Get a tournament information\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m31"} {"signature": "@typecasteddef search_tournaments(self, **params: keys):", "body": "url = self.api.TOURNAMENT + ''return self._get_model(url, PartialClan, **params)", "docstring": "Search for a tournament\n\n Parameters\n ----------\n name: str\n The name of the tournament\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m32"} {"signature": "@typecasteddef get_top_clans(self, country_key='', **params: keys):", "body": "url = self.api.TOP + '' + str(country_key)return self._get_model(url, PartialClan, **params)", "docstring": "Get a list of top clans by trophy\n\n location_id: Optional[str] = ''\n A location ID or '' (global)\n See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json\n for a list of acceptable location IDs\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m33"} {"signature": "@typecasteddef get_top_war_clans(self, country_key='', **params: keys):", "body": "url = self.api.TOP + '' + str(country_key)return self._get_model(url, PartialClan, **params)", "docstring": "Get a list of top clans by war\n\n location_id: Optional[str] = ''\n A location ID or '' (global)\n See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json\n for a list of acceptable location IDs\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m34"} {"signature": "@typecasteddef get_top_players(self, country_key='', **params: keys):", "body": "url = self.api.TOP + '' + str(country_key)return self._get_model(url, PartialPlayerClan, **params)", "docstring": "Get a list of top players\n\n location_id: Optional[str] = ''\n A location ID or '' (global)\n See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json\n for a list of acceptable location IDs\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m35"} {"signature": "@typecasteddef get_popular_clans(self, **params: keys):", "body": "url = self.api.POPULAR + ''return self._get_model(url, PartialClan, **params)", "docstring": "Get a list of most queried clans\n\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m36"} {"signature": "@typecasteddef get_popular_players(self, **params: keys):", "body": "url = self.api.POPULAR + ''return self._get_model(url, PartialPlayerClan, **params)", "docstring": "Get a list of most queried players\n\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m37"} {"signature": "@typecasteddef get_popular_tournaments(self, **params: keys):", "body": "url = self.api.POPULAR + ''return self._get_model(url, PartialTournament, **params)", "docstring": "Get a list of most queried tournaments\n\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m38"} {"signature": "@typecasteddef get_popular_decks(self, **params: keys):", "body": "url = self.api.POPULAR + ''return self._get_model(url, **params)", "docstring": "Get a list of most queried decks\n\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m39"} {"signature": "@typecasteddef get_known_tournaments(self, **params: tournamentfilter):", "body": "url = self.api.TOURNAMENT + ''return self._get_model(url, PartialTournament, **params)", "docstring": "Get a list of queried tournaments\n\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m40"} {"signature": "@typecasteddef get_open_tournaments(self, **params: tournamentfilter):", "body": "url = self.api.TOURNAMENT + ''return self._get_model(url, PartialTournament, **params)", "docstring": "Get a list of open tournaments\n\n \\*\\*1k: Optional[int] = 0\n Set to 1 to filter tournaments that have\n at least 1000 max players\n \\*\\*full: Optional[int] = 0\n Set to 1 to filter tournaments that are\n full\n \\*\\*inprep: Optional[int] = 0\n Set to 1 to filter tournaments that are\n in preperation\n \\*\\*joinable: Optional[int] = 0\n Set to 1 to filter tournaments that are\n joinable\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m41"} {"signature": "@typecasteddef get_1k_tournaments(self, **params: tournamentfilter):", "body": "url = self.api.TOURNAMENT + ''return self._get_model(url, PartialTournament, **params)", "docstring": "Get a list of tournaments that have at least 1000\n max players\n\n \\*\\*open: Optional[int] = 0\n Set to 1 to filter tournaments that are\n open\n \\*\\*full: Optional[int] = 0\n Set to 1 to filter tournaments that are\n full\n \\*\\*inprep: Optional[int] = 0\n Set to 1 to filter tournaments that are\n in preperation\n \\*\\*joinable: Optional[int] = 0\n Set to 1 to filter tournaments that are\n joinable\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m42"} {"signature": "@typecasteddef get_prep_tournaments(self, **params: tournamentfilter):", "body": "url = self.api.TOURNAMENT + ''return self._get_model(url, PartialTournament, **params)", "docstring": "Get a list of tournaments that are in preperation\n\n \\*\\*1k: Optional[int] = 0\n Set to 1 to filter tournaments that have\n at least 1000 max players\n \\*\\*open: Optional[int] = 0\n Set to 1 to filter tournaments that are\n open\n \\*\\*full: Optional[int] = 0\n Set to 1 to filter tournaments that are\n full\n \\*\\*joinable: Optional[int] = 0\n Set to 1 to filter tournaments that are\n joinable\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m43"} {"signature": "@typecasteddef get_joinable_tournaments(self, **params: tournamentfilter):", "body": "url = self.api.TOURNAMENT + ''return self._get_model(url, PartialTournament, **params)", "docstring": "Get a list of tournaments that are joinable\n\n \\*\\*1k: Optional[int] = 0\n Set to 1 to filter tournaments that have\n at least 1000 max players\n \\*\\*open: Optional[int] = 0\n Set to 1 to filter tournaments that are\n open\n \\*\\*full: Optional[int] = 0\n Set to 1 to filter tournaments that are\n full\n \\*\\*inprep: Optional[int] = 0\n Set to 1 to filter tournaments that are\n in preperation\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m44"} {"signature": "@typecasteddef get_full_tournaments(self, **params: tournamentfilter):", "body": "url = self.api.TOURNAMENT + ''return self._get_model(url, PartialTournament, **params)", "docstring": "Get a list of tournaments that are full\n\n \\*\\*1k: Optional[int] = 0\n Set to 1 to filter tournaments that have\n at least 1000 max players\n \\*\\*open: Optional[int] = 0\n Set to 1 to filter tournaments that are\n open\n \\*\\*inprep: Optional[int] = 0\n Set to 1 to filter tournaments that are\n in preperation\n \\*\\*joinable: Optional[int] = 0\n Set to 1 to filter tournaments that are\n joinable\n \\*\\*keys: Optional[list] = None\n Filter which keys should be included in the\n response\n \\*\\*exclude: Optional[list] = None\n Filter which keys should be excluded from the\n response\n \\*\\*max: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*page: Optional[int] = None\n Works with max, the zero-based page of the\n items\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m45"} {"signature": "def typecasted(func):", "body": "signature = inspect.signature(func).parameters.items()@wraps(func)def wrapper(*args, **kwargs):args = list(args)new_args = []new_kwargs = {}for _, param in signature:converter = param.annotationif converter is inspect._empty:converter = lambda a: a if param.kind is param.POSITIONAL_OR_KEYWORD:if args:to_conv = args.pop()new_args.append(converter(to_conv))elif param.kind is param.VAR_POSITIONAL:for a in args:new_args.append(converter(a))else:for k, v in kwargs.items():nk, nv = converter(k, v)new_kwargs[nk] = nvreturn func(*new_args, **new_kwargs)return wrapper", "docstring": "Decorator that converts arguments via annotations.", "id": "f10349:m0"} {"signature": "def refresh(self):", "body": "if self.client.is_async:return self._arefresh()data, cached, ts, response = self.client.request(self.url, timeout=None, refresh=True)return self.from_data(data, cached, ts, response)", "docstring": "(a)sync refresh the data.", "id": "f10350:c1:m0"} {"signature": "def get_clan(self):", "body": "try:return self.client.get_clan(self.clan.tag)except AttributeError:try:return self.client.get_clan(self.tag)except AttributeError:raise ValueError('')", "docstring": "(a)sync function to return clan.", "id": "f10350:c3:m0"} {"signature": "def get_player(self):", "body": "return self.client.get_player(self.tag)", "docstring": "(a)sync function to return player.", "id": "f10350:c4:m0"} {"signature": "@classmethoddef Async(cls, token, session=None, **options):", "body": "return cls(token, session=session, is_async=True, **options)", "docstring": "Returns the client in async mode.", "id": "f10352:c0:m2"} {"signature": "@typecasteddef get_player(self, tag: crtag, timeout=None):", "body": "url = self.api.PLAYER + '' + tagreturn self._get_model(url, FullPlayer, timeout=timeout)", "docstring": "Get information about a player\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m16"} {"signature": "@typecasteddef get_player_verify(self, tag: crtag, apikey: str, timeout=None):", "body": "url = self.api.PLAYER + '' + tag + ''return self._get_model(url, FullPlayer, timeout=timeout, method='', json={'': apikey})", "docstring": "Check the API Key of a player.\n This endpoint has been **restricted** to\n certain members of the community\n\n Raises BadRequest if the apikey is invalid\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n apikey: str\n The API Key in the player's settings\n timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m17"} {"signature": "@typecasteddef get_player_battles(self, tag: crtag, **params: keys):", "body": "url = self.api.PLAYER + '' + tag + ''return self._get_model(url, **params)", "docstring": "Get a player's battle log\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*limit: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m18"} {"signature": "@typecasteddef get_player_chests(self, tag: crtag, timeout: int=None):", "body": "url = self.api.PLAYER + '' + tag + ''return self._get_model(url, timeout=timeout)", "docstring": "Get information about a player's chest cycle\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m19"} {"signature": "@typecasteddef get_clan(self, tag: crtag, timeout: int=None):", "body": "url = self.api.CLAN + '' + tagreturn self._get_model(url, FullClan, timeout=timeout)", "docstring": "Get inforamtion about a clan\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m20"} {"signature": "@typecasteddef search_clans(self, **params: clansearch):", "body": "url = self.api.CLANreturn self._get_model(url, PartialClan, **params)", "docstring": "Search for a clan. At least one\n of the filters must be present\n\n Parameters\n ----------\n name: Optional[str]\n The name of a clan\n (has to be at least 3 characters long)\n locationId: Optional[int]\n A location ID\n minMembers: Optional[int]\n The minimum member count\n of a clan\n maxMembers: Optional[int]\n The maximum member count\n of a clan\n minScore: Optional[int]\n The minimum trophy score of\n a clan\n \\*\\*limit: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m21"} {"signature": "@typecasteddef get_clan_war(self, tag: crtag, timeout: int=None):", "body": "url = self.api.CLAN + '' + tag + ''return self._get_model(url, timeout=timeout)", "docstring": "Get inforamtion about a clan's current clan war\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m22"} {"signature": "@typecasteddef get_clan_members(self, tag: crtag, **params: keys):", "body": "url = self.api.CLAN + '' + tag + ''return self._get_model(url, **params)", "docstring": "Get the clan's members\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*limit: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m23"} {"signature": "@typecasteddef get_clan_war_log(self, tag: crtag, **params: keys):", "body": "url = self.api.CLAN + '' + tag + ''return self._get_model(url, **params)", "docstring": "Get a clan's war log\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*limit: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m24"} {"signature": "@typecasteddef get_tournament(self, tag: crtag, timeout=):", "body": "url = self.api.TOURNAMENT + '' + tagreturn self._get_model(url, PartialTournament, timeout=timeout)", "docstring": "Get a tournament information\n\n Parameters\n ----------\n tag: str\n A valid tournament tag. Minimum length: 3\n Valid characters: 0289PYLQGRJCUV\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m25"} {"signature": "@typecasteddef search_tournaments(self, name: str, **params: keys):", "body": "url = self.api.TOURNAMENTparams[''] = namereturn self._get_model(url, PartialTournament, **params)", "docstring": "Search for a tournament by its name\n\n Parameters\n ----------\n name: str\n The name of a tournament\n \\*\\*limit: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m26"} {"signature": "@typecasteddef get_all_cards(self, timeout: int=None):", "body": "url = self.api.CARDSreturn self._get_model(url, timeout=timeout)", "docstring": "Get a list of all the cards in the game\n\n Parameters\n ----------\n timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m27"} {"signature": "@typecasteddef get_all_locations(self, timeout: int=None):", "body": "url = self.api.LOCATIONSreturn self._get_model(url, timeout=timeout)", "docstring": "Get a list of all locations\n\n Parameters\n ----------\n timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m29"} {"signature": "@typecasteddef get_location(self, location_id: int, timeout: int=None):", "body": "url = self.api.LOCATIONS + '' + str(location_id)return self._get_model(url, timeout=timeout)", "docstring": "Get a location information\n\n Parameters\n ----------\n location_id: int\n A location ID\n See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json\n for a list of acceptable location IDs\n timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m30"} {"signature": "@typecasteddef get_top_clans(self, location_id='', **params: keys):", "body": "url = self.api.LOCATIONS + '' + str(location_id) + ''return self._get_model(url, PartialClan, **params)", "docstring": "Get a list of top clans by trophy\n\n Parameters\n ----------\n location_id: Optional[str] = 'global'\n A location ID or global\n See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json\n for a list of acceptable location IDs\n \\*\\*limit: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m31"} {"signature": "@typecasteddef get_top_clanwar_clans(self, location_id='', **params: keys):", "body": "url = self.api.LOCATIONS + '' + str(location_id) + ''return self._get_model(url, PartialClan, **params)", "docstring": "Get a list of top clan war clans\n\n Parameters\n ----------\n location_id: Optional[str] = 'global'\n A location ID or global\n See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json\n for a list of acceptable location IDs\n \\*\\*limit: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m32"} {"signature": "@typecasteddef get_top_players(self, location_id='', **params: keys):", "body": "url = self.api.LOCATIONS + '' + str(location_id) + ''return self._get_model(url, PartialPlayerClan, **params)", "docstring": "Get a list of top players\n\n Parameters\n ----------\n location_id: Optional[str] = 'global'\n A location ID or global\n See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json\n for a list of acceptable location IDs\n \\*\\*limit: Optional[int] = None\n Limit the number of items returned in the response\n \\*\\*timeout: Optional[int] = None\n Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m33"} {"signature": "def get_clan_image(self, obj: BaseAttrDict):", "body": "try:badge_id = obj.clan.badge_idexcept AttributeError:try:badge_id = obj.badge_idexcept AttributeError:return ''if badge_id is None:return ''for i in self.constants.alliance_badges:if i.id == badge_id:return '' + i.name + ''", "docstring": "Get the clan badge image URL\n\n Parameters\n ---------\n obj: official_api.models.BaseAttrDict\n An object that has the clan badge ID either in ``.clan.badge_id`` or ``.badge_id``\n Can be a clan or a profile for example.\n\n Returns str", "id": "f10352:c0:m34"} {"signature": "def get_arena_image(self, obj: BaseAttrDict):", "body": "badge_id = obj.arena.idfor i in self.constants.arenas:if i.id == badge_id:return ''.format(i.arena_id)", "docstring": "Get the arena image URL\n\n Parameters\n ---------\n obj: official_api.models.BaseAttrDict\n An object that has the arena ID in ``.arena.id``\n Can be ``Profile`` for example.\n\n Returns None or str", "id": "f10352:c0:m35"} {"signature": "def get_card_info(self, card_name: str):", "body": "for c in self.constants.cards:if c.name == card_name:return c", "docstring": "Returns card info from constants\n\n Parameters\n ---------\n card_name: str\n A card name\n\n Returns None or Constants", "id": "f10352:c0:m36"} {"signature": "def get_rarity_info(self, rarity: str):", "body": "for c in self.constants.rarities:if c.name == rarity:return c", "docstring": "Returns card info from constants\n\n Parameters\n ---------\n rarity: str\n A rarity name\n\n Returns None or Constants", "id": "f10352:c0:m37"} {"signature": "def get_deck_link(self, deck: BaseAttrDict):", "body": "deck_link = ''for i in deck:card = self.get_card_info(i.name)deck_link += ''.format(card)return deck_link", "docstring": "Form a deck link\n\n Parameters\n ---------\n deck: official_api.models.BaseAttrDict\n An object is a deck. Can be retrieved from ``Player.current_deck``\n\n Returns str", "id": "f10352:c0:m38"} {"signature": "def get_datetime(self, timestamp: str, unix=True):", "body": "time = datetime.strptime(timestamp, '')if unix:return int(time.timestamp())else:return time", "docstring": "Converts a %Y%m%dT%H%M%S.%fZ to a UNIX timestamp\n or a datetime.datetime object\n\n Parameters\n ---------\n timestamp: str\n A timstamp in the %Y%m%dT%H%M%S.%fZ format, usually returned by the API\n in the ``created_time`` field for example (eg. 20180718T145906.000Z)\n unix: Optional[bool] = True\n Whether to return a POSIX timestamp (seconds since epoch) or not\n\n Returns int or datetime.datetime", "id": "f10352:c0:m39"} {"signature": "def typecasted(func):", "body": "signature = inspect.signature(func).parameters.items()@wraps(func)def wrapper(*args, **kwargs):args = list(args)new_args = []new_kwargs = {}for _, param in signature:converter = param.annotationif converter is inspect._empty:converter = lambda a: a if param.kind is param.POSITIONAL_OR_KEYWORD:if args:to_conv = args.pop()new_args.append(converter(to_conv))elif param.kind is param.VAR_POSITIONAL:for a in args:new_args.append(converter(a))else:for k, v in kwargs.items():nk, nv = converter(k, v)new_kwargs[nk] = nvreturn func(*new_args, **new_kwargs)return wrapper", "docstring": "Decorator that converts arguments via annotations.", "id": "f10353:m0"} {"signature": "def update_data(self):", "body": "if self.client.is_async:return self._aupdate_data()if self.cursor['']:data, cached, ts, response = self.client._request(self.response.url, timeout=None, after=self.cursor[''])self.cursor = {'': data[''][''].get(''), '': data[''][''].get('')}self.raw_data += [self.model(self.client, d, response, cached=cached, ts=ts) for d in data['']]return Truereturn False", "docstring": "Adds the NEXT data in the raw_data dictionary.\n Returns True if data is added.\n Returns False if data is not added", "id": "f10354:c1:m8"} {"signature": "def all_data(self):", "body": "if self.client.is_async:return self._aall_data()while self.update_data():pass", "docstring": "Loops through and adds all data to the raw_data\n\n This has a chance to get 429 RatelimitError", "id": "f10354:c1:m10"} {"signature": "def refresh(self):", "body": "if self.client.is_async:return self._arefresh()data, cached, ts, response = self.client._request(self.response.url, timeout=None, refresh=True)return self.from_data(data, cached, ts, response)", "docstring": "(a)sync refresh the data.", "id": "f10354:c2:m0"} {"signature": "def get_clan(self):", "body": "try:return self.client.get_clan(self.clan.tag)except AttributeError:try:return self.client.get_clan(self.tag)except AttributeError:raise ValueError('')", "docstring": "(a)sync function to return clan.", "id": "f10354:c3:m0"} {"signature": "def get_player(self):", "body": "return self.client.get_player(self.tag)", "docstring": "(a)sync function to return player.", "id": "f10354:c5:m0"} {"signature": "def models_preparing(app):", "body": "def wrapper(resource, parent):if isinstance(resource, DeclarativeMeta):resource = ListResource(resource)if not getattr(resource, '', None):resource.__parent__ = parentreturn resourceresources_preparing_factory(app, wrapper)", "docstring": "Wrap all sqlalchemy model in settings.", "id": "f10359:m0"} {"signature": "def paginate_link_tag(item):", "body": "a_tag = Page.default_link_tag(item)if item[''] == '':return make_html_tag('', a_tag, **{'': ''})return make_html_tag('', a_tag)", "docstring": "Create an A-HREF tag that points to another page usable in paginate.", "id": "f10360:m0"} {"signature": "def mock_iter_entry_points_factory(data, mocked_group):", "body": "from pkg_resources import iter_entry_pointsdef entrypoints(group, name=None):if group == mocked_group:for entrypoint in data:yield entrypointelse:for x in iter_entry_points(group=group, name=name):yield xreturn entrypoints", "docstring": "Create a mock iter_entry_points function.", "id": "f10384:m0"} {"signature": "def date_range(start_date, end_date):", "body": "if start_date >= end_date:for n in range((start_date - end_date).days + ):yield end_date + datetime.timedelta(n)else:for n in range((end_date - start_date).days + ):yield start_date + datetime.timedelta(n)", "docstring": "Get all dates in a given range.", "id": "f10384:m4"} {"signature": "@contextmanagerdef user_set(app, user):", "body": "def handler(sender, **kwargs):g.user = userwith appcontext_pushed.connected_to(handler, app):yield", "docstring": "User set.", "id": "f10384:m13"} {"signature": "def generate_events(app, file_number=, event_number=, robot_event_number=,start_date=datetime.date(, , ),end_date=datetime.date(, , )):", "body": "current_queues.declare()for t in current_search.put_templates(ignore=[]):passdef _unique_ts_gen():ts = while True:ts += yield tsdef generator_list():unique_ts = _unique_ts_gen()for file_idx in range(file_number):for entry_date in date_range(start_date, end_date):file_id = ''.format(file_idx + )bucket_id = ''.format(file_idx + )def build_event(is_robot=False):ts = next(unique_ts)return dict(timestamp=datetime.datetime.combine(entry_date,datetime.time(minute=ts % ,second=ts % )).isoformat(),bucket_id=bucket_id,file_id=file_id,file_key='',size=,visitor_id=,is_robot=is_robot)for event_idx in range(event_number):yield build_event()for event_idx in range(robot_event_number):yield build_event(True)mock_queue = Mock()mock_queue.consume.return_value = generator_list()mock_queue.routing_key = ''EventsIndexer(mock_queue,preprocessors=[build_file_unique_id],double_click_window=).run()current_search_client.indices.refresh(index='')", "docstring": "Queued events for processing tests.", "id": "f10384:m25"} {"signature": "def get_deleted_docs(index):", "body": "return current_search_client.indices.stats()[''][index]['']['']['']", "docstring": "Get all deleted docs from an ES index.", "id": "f10384:m29"} {"signature": "def _create_record_view_event(timestamp,record_id='',pid_type='',pid_value='',visitor_id=,user_id=None):", "body": "doc = dict(timestamp=datetime.datetime(*timestamp).isoformat(),record_id=record_id,pid_type=pid_type,pid_value=pid_value,visitor_id=visitor_id,user_id=user_id,)return build_record_unique_id(doc)", "docstring": "Create a file_download event content.", "id": "f10384:m31"} {"signature": "def __init__(self, *args, **kwargs):", "body": "pass", "docstring": "Mock constructor to accept the query_config parameters.", "id": "f10384:c0:m0"} {"signature": "def run(self, *args, **kwargs):", "body": "return dict(bucket_id='',value=)", "docstring": "Sample response.", "id": "f10384:c0:m1"} {"signature": "def get_queue_size(queue_name):", "body": "queue = current_queues.queues[queue_name]_, size, _ = queue.queue.queue_declare(passive=True)return size", "docstring": "Get the current number of messages in a queue.", "id": "f10385:m0"} {"signature": "def myfunc():", "body": "pass", "docstring": "Example function.", "id": "f10387:m0"} {"signature": "def declare_queues():", "body": "return [dict(name=''.format(event['']),exchange=current_stats.exchange)for event in current_stats._events_config.values()]", "docstring": "Index statistics events.", "id": "f10390:m0"} {"signature": "def file_download_event_builder(event, sender_app, obj=None, **kwargs):", "body": "event.update(dict(timestamp=datetime.datetime.utcnow().isoformat(),bucket_id=str(obj.bucket_id),file_id=str(obj.file_id),file_key=obj.key,size=obj.file.size,referrer=request.referrer,**get_user()))return event", "docstring": "Build a file-download event.", "id": "f10392:m0"} {"signature": "def build_file_unique_id(doc):", "body": "doc[''] = ''.format(doc[''], doc[''])return doc", "docstring": "Build file unique identifier.", "id": "f10392:m1"} {"signature": "def build_record_unique_id(doc):", "body": "doc[''] = ''.format(doc[''], doc[''])return doc", "docstring": "Build record unique identifier.", "id": "f10392:m2"} {"signature": "def record_view_event_builder(event, sender_app, pid=None, record=None,**kwargs):", "body": "event.update(dict(timestamp=datetime.datetime.utcnow().isoformat(),record_id=str(record.id),pid_type=pid.pid_type,pid_value=str(pid.pid_value),referrer=request.referrer,**get_user()))return event", "docstring": "Build a record-view event.", "id": "f10392:m3"} {"signature": "def register_events():", "body": "return [dict(event_type='',templates='',processor_class=EventsIndexer,processor_config=dict(preprocessors=[flag_robots,anonymize_user,build_file_unique_id])),dict(event_type='',templates='',processor_class=EventsIndexer,processor_config=dict(preprocessors=[flag_robots,anonymize_user,build_record_unique_id]))]", "docstring": "Register sample events.", "id": "f10394:m0"} {"signature": "def register_aggregations():", "body": "return [dict(aggregation_name='',templates='',aggregator_class=StatAggregator,aggregator_config=dict(client=current_search_client,event='',aggregation_field='',aggregation_interval='',copy_fields=dict(file_key='',bucket_id='',file_id='',),metric_aggregation_fields={'': ('', '',{'': }),'': ('', '', {}),},)), dict(aggregation_name='',templates='',aggregator_class=StatAggregator,aggregator_config=dict(client=current_search_client,event='',aggregation_field='',aggregation_interval='',copy_fields=dict(record_id='',pid_type='',pid_value='',),metric_aggregation_fields={'': ('', '',{'': }),},))]", "docstring": "Register sample aggregations.", "id": "f10394:m1"} {"signature": "def register_queries():", "body": "return [dict(query_name='',query_class=ESDateHistogramQuery,query_config=dict(index='',doc_type='',copy_fields=dict(bucket_id='',file_key='',),required_filters=dict(bucket_id='',file_key='',))),dict(query_name='',query_class=ESTermsQuery,query_config=dict(index='',doc_type='',copy_fields=dict(),required_filters=dict(bucket_id='',),aggregated_fields=[''])),]", "docstring": "Register queries.", "id": "f10394:m2"} {"signature": "def register_templates():", "body": "event_templates = [current_stats._events_config[e]['']for e incurrent_stats._events_config]aggregation_templates = [current_stats._aggregations_config[a]['']for a incurrent_stats._aggregations_config]return event_templates + aggregation_templates", "docstring": "Register elasticsearch templates for events.", "id": "f10399:m0"} {"signature": "def __init__(self, description, **kwargs):", "body": "super(RESTException, self).__init__(**kwargs)self.description = description", "docstring": "Initialize exception.", "id": "f10401:c6:m0"} {"signature": "def __init__(self, query_name):", "body": "super(RESTException, self).__init__()self.query_name = query_nameself.description = ''.format(query_name)", "docstring": "Constructor.\n\n :param query_name: name of the unknown query.", "id": "f10401:c7:m0"} {"signature": "def filter_robots(query):", "body": "return query.filter('', is_robot=False)", "docstring": "Modify an elasticsearch query so that robot events are filtered out.", "id": "f10402:m0"} {"signature": "def __init__(self, name, event, client=None,aggregation_field=None,metric_aggregation_fields=None,copy_fields=None,query_modifiers=None,aggregation_interval='',index_interval='', batch_size=):", "body": "self.name = nameself.client = client or current_search_clientself.event = eventself.aggregation_alias = ''.format(self.event)self.aggregation_field = aggregation_fieldself.metric_aggregation_fields = metric_aggregation_fields or {}self.allowed_metrics = {'', '', '', '', '', '','', '', ''}if any(v not in self.allowed_metricsfor k, (v, _, _) in (metric_aggregation_fields or {}).items()):raise(ValueError(''.format(''.join(self.allowed_metrics))))self.copy_fields = copy_fields or {}self.aggregation_interval = aggregation_intervalself.index_interval = index_intervalself.query_modifiers = (query_modifiers if query_modifiers is not Noneelse [filter_robots])self.supported_intervals = OrderedDict([('', ''),('', ''),('', ''),('', '')])self.dt_rounding_map = {'': '', '': '', '': '', '': ''}if list(self.supported_intervals.keys()).index(aggregation_interval)>list(self.supported_intervals.keys()).index(index_interval):raise(ValueError(''''))self.index_name_suffix = self.supported_intervals[index_interval]self.doc_id_suffix = self.supported_intervals[aggregation_interval]self.batch_size = batch_sizeself.event_index = ''.format(self.event)", "docstring": "Construct aggregator instance.\n\n :param event: aggregated event.\n :param client: elasticsearch client.\n :param aggregation_field: field on which the aggregation will be done.\n :param metric_aggregation_fields: dictionary of fields on which a\n metric aggregation will be computed. The format of the dictionary\n is \"destination field\" ->\n tuple(\"metric type\", \"source field\", \"metric_options\").\n :param copy_fields: list of fields which are copied from the raw events\n into the aggregation.\n :param query_modifiers: list of functions modifying the raw events\n query. By default the query_modifiers are [filter_robots].\n :param aggregation_interval: aggregation time window. default: month.\n :param index_interval: time window of the elasticsearch indices which\n will contain the resulting aggregations.\n :param batch_size: max number of days for which raw events are being\n fetched in one query. This number has to be coherent with the\n aggregation_interval.", "id": "f10402:c0:m0"} {"signature": "@propertydef bookmark_doc_type(self):", "body": "return ''.format(self.name)", "docstring": "Get document type for the aggregation's bookmark.", "id": "f10402:c0:m1"} {"signature": "@propertydef aggregation_doc_type(self):", "body": "return ''.format(self.event, self.aggregation_interval)", "docstring": "Get document type for the aggregation.", "id": "f10402:c0:m2"} {"signature": "def _get_oldest_event_timestamp(self):", "body": "query_events = Search(using=self.client,index=self.event_index)[:].sort({'': {'': ''}})result = query_events.execute()if len(result) == :return Nonereturn parser.parse(result[][''])", "docstring": "Search for the oldest event timestamp.", "id": "f10402:c0:m3"} {"signature": "def get_bookmark(self):", "body": "if not Index(self.aggregation_alias,using=self.client).exists():if not Index(self.event_index,using=self.client).exists():return datetime.date.today()return self._get_oldest_event_timestamp()query_bookmark = Search(using=self.client,index=self.aggregation_alias,doc_type=self.bookmark_doc_type)[:].sort({'': {'': ''}})bookmarks = query_bookmark.execute()if len(bookmarks) == :return self._get_oldest_event_timestamp()bookmark = datetime.datetime.strptime(bookmarks[].date,self.doc_id_suffix)return bookmark", "docstring": "Get last aggregation date.", "id": "f10402:c0:m4"} {"signature": "def set_bookmark(self):", "body": "def _success_date():bookmark = {'': self.new_bookmark or datetime.datetime.utcnow().strftime(self.doc_id_suffix)}yield dict(_index=self.last_index_written,_type=self.bookmark_doc_type,_source=bookmark)if self.last_index_written:bulk(self.client,_success_date(),stats_only=True)", "docstring": "Set bookmark for starting next aggregation.", "id": "f10402:c0:m5"} {"signature": "def _format_range_dt(self, d):", "body": "if not isinstance(d, six.string_types):d = d.isoformat()return ''.format(d, self.dt_rounding_map[self.aggregation_interval])", "docstring": "Format range filter datetime to the closest aggregation interval.", "id": "f10402:c0:m6"} {"signature": "def agg_iter(self, lower_limit=None, upper_limit=None):", "body": "lower_limit = lower_limit or self.get_bookmark().isoformat()upper_limit = upper_limit or (datetime.datetime.utcnow().replace(microsecond=).isoformat())aggregation_data = {}self.agg_query = Search(using=self.client,index=self.event_index).filter('', timestamp={'': self._format_range_dt(lower_limit),'': self._format_range_dt(upper_limit)})for modifier in self.query_modifiers:self.agg_query = modifier(self.agg_query)hist = self.agg_query.aggs.bucket('','',field='',interval=self.aggregation_interval)terms = hist.bucket('', '', field=self.aggregation_field, size=)top = terms.metric('', '', size=, sort={'': ''})for dst, (metric, src, opts) in self.metric_aggregation_fields.items():terms.metric(dst, metric, field=src, **opts)results = self.agg_query.execute()index_name = Nonefor interval in results.aggregations[''].buckets:interval_date = datetime.datetime.strptime(interval[''], '')for aggregation in interval[''].buckets:aggregation_data[''] = interval_date.isoformat()aggregation_data[self.aggregation_field] = aggregation['']aggregation_data[''] = aggregation['']if self.metric_aggregation_fields:for f in self.metric_aggregation_fields:aggregation_data[f] = aggregation[f]['']doc = aggregation.top_hit.hits.hits[]['']for destination, source in self.copy_fields.items():if isinstance(source, six.string_types):aggregation_data[destination] = doc[source]else:aggregation_data[destination] = source(doc,aggregation_data)index_name = ''.format(self.event,interval_date.strftime(self.index_name_suffix))self.indices.add(index_name)yield dict(_id=''.format(aggregation[''],interval_date.strftime(self.doc_id_suffix)),_index=index_name,_type=self.aggregation_doc_type,_source=aggregation_data)self.last_index_written = index_name", "docstring": "Aggregate and return dictionary to be indexed in ES.", "id": "f10402:c0:m7"} {"signature": "def run(self, start_date=None, end_date=None, update_bookmark=True):", "body": "if not Index(self.event_index, using=self.client).exists():returnlower_limit = start_date or self.get_bookmark()if lower_limit is None:returnupper_limit = min(end_date or datetime.datetime.max, datetime.datetime.utcnow().replace(microsecond=),datetime.datetime.combine(lower_limit + datetime.timedelta(self.batch_size),datetime.datetime.min.time()))while upper_limit <= datetime.datetime.utcnow():self.indices = set()self.new_bookmark = upper_limit.strftime(self.doc_id_suffix)bulk(self.client,self.agg_iter(lower_limit, upper_limit),stats_only=True,chunk_size=)current_search_client.indices.flush(index=''.join(self.indices),wait_if_ongoing=True)if update_bookmark:self.set_bookmark()self.indices = set()lower_limit = lower_limit + datetime.timedelta(self.batch_size)upper_limit = min(end_date or datetime.datetime.max, datetime.datetime.utcnow().replace(microsecond=),lower_limit + datetime.timedelta(self.batch_size))if lower_limit > upper_limit:break", "docstring": "Calculate statistics aggregations.", "id": "f10402:c0:m8"} {"signature": "def list_bookmarks(self, start_date=None, end_date=None, limit=None):", "body": "query = Search(using=self.client,index=self.aggregation_alias,doc_type=self.bookmark_doc_type).sort({'': {'': ''}})range_args = {}if start_date:range_args[''] = self._format_range_dt(start_date.replace(microsecond=))if end_date:range_args[''] = self._format_range_dt(end_date.replace(microsecond=))if range_args:query = query.filter('', date=range_args)return query[:limit].execute() if limit else query.scan()", "docstring": "List the aggregation's bookmarks.", "id": "f10402:c0:m9"} {"signature": "def delete(self, start_date=None, end_date=None):", "body": "aggs_query = Search(using=self.client,index=self.aggregation_alias,doc_type=self.aggregation_doc_type).extra(_source=False)range_args = {}if start_date:range_args[''] = self._format_range_dt(start_date.replace(microsecond=))if end_date:range_args[''] = self._format_range_dt(end_date.replace(microsecond=))if range_args:aggs_query = aggs_query.filter('', timestamp=range_args)bookmarks_query = Search(using=self.client,index=self.aggregation_alias,doc_type=self.bookmark_doc_type).sort({'': {'': ''}})if range_args:bookmarks_query = bookmarks_query.filter('', date=range_args)def _delete_actions():for query in (aggs_query, bookmarks_query):affected_indices = set()for doc in query.scan():affected_indices.add(doc.meta.index)yield dict(_index=doc.meta.index,_op_type='',_id=doc.meta.id,_type=doc.meta.doc_type)current_search_client.indices.flush(index=''.join(affected_indices), wait_if_ongoing=True)bulk(self.client, _delete_actions(), refresh=True)", "docstring": "Delete aggregation documents.", "id": "f10402:c0:m10"} {"signature": "def __init__(self, **kwargs):", "body": "super(StatsQueryResource, self).__init__(serializers={'':lambda data, *args, **kwargs: jsonify(data),},default_method_media_type={'': '',},default_media_type='',**kwargs)", "docstring": "Constructor.", "id": "f10403:c0:m0"} {"signature": "def post(self, **kwargs):", "body": "data = request.get_json(force=False)if data is None:data = {}result = {}for query_name, config in data.items():if config is None or not isinstance(config, dict)or (set(config.keys()) != {'', ''} andset(config.keys()) != {''}):raise InvalidRequestInputError('''''')stat = config['']params = config.get('', {})try:query_cfg = current_stats.queries[stat]except KeyError:raise UnknownQueryError(stat)permission = current_stats.permission_factory(stat, params)if permission is not None and not permission.can():message = (''''''.format(stat))if current_user.is_authenticated:abort(, message)abort(, message)try:query = query_cfg.query_class(**query_cfg.query_config)result[query_name] = query.run(**params)except ValueError as e:raise InvalidRequestInputError(e.args[])except NotFoundError as e:return Nonereturn self.make_response(result)", "docstring": "Get statistics.", "id": "f10403:c0:m1"} {"signature": "def lazy_result(f):", "body": "@wraps(f)def decorated(ctx, param, value):return LocalProxy(lambda: f(ctx, param, value))return decorated", "docstring": "Decorate function to return LazyProxy.", "id": "f10404:m0"} {"signature": "@click.group()def stats():", "body": "", "docstring": "Statistics commands.", "id": "f10404:m5"} {"signature": "@stats.group()def events():", "body": "", "docstring": "Event management commands.", "id": "f10404:m6"} {"signature": "@events.command('')@click.argument('', nargs=-, callback=_validate_event_type)@click.option('', '', is_flag=True)@with_appcontextdef _events_process(event_types=None, eager=False):", "body": "event_types = event_types or list(current_stats.enabled_events)if eager:process_events.apply((event_types,), throw=True)click.secho('', fg='')else:process_events.delay(event_types)click.secho('', fg='')", "docstring": "Process stats events.", "id": "f10404:m7"} {"signature": "@stats.group()def aggregations():", "body": "", "docstring": "Aggregation management commands.", "id": "f10404:m8"} {"signature": "@aggregations.command('')@aggr_arg@click.option('', callback=_verify_date)@click.option('', callback=_verify_date)@click.option('', '', is_flag=True)@click.option('', '', is_flag=True)@with_appcontextdef _aggregations_process(aggregation_types=None,start_date=None, end_date=None,update_bookmark=False, eager=False):", "body": "aggregation_types = (aggregation_types orlist(current_stats.enabled_aggregations))if eager:aggregate_events.apply((aggregation_types,),dict(start_date=start_date, end_date=end_date,update_bookmark=update_bookmark),throw=True)click.secho('', fg='')else:aggregate_events.delay(aggregation_types, start_date=start_date, end_date=end_date)click.secho('', fg='')", "docstring": "Process stats aggregations.", "id": "f10404:m9"} {"signature": "@aggregations.command('')@aggr_arg@click.option('', callback=_parse_date)@click.option('', callback=_parse_date)@click.confirmation_option(prompt='')@with_appcontextdef _aggregations_delete(aggregation_types=None,start_date=None, end_date=None):", "body": "aggregation_types = (aggregation_types orlist(current_stats.enabled_aggregations))for a in aggregation_types:aggr_cfg = current_stats.aggregations[a]aggregator = aggr_cfg.aggregator_class(name=aggr_cfg.name, **aggr_cfg.aggregator_config)aggregator.delete(start_date, end_date)", "docstring": "Delete computed aggregations.", "id": "f10404:m10"} {"signature": "@aggregations.command('')@aggr_arg@click.option('', callback=_parse_date)@click.option('', callback=_parse_date)@click.option('', '', default=)@with_appcontextdef _aggregations_list_bookmarks(aggregation_types=None,start_date=None, end_date=None, limit=None):", "body": "aggregation_types = (aggregation_types orlist(current_stats.enabled_aggregations))for a in aggregation_types:aggr_cfg = current_stats.aggregations[a]aggregator = aggr_cfg.aggregator_class(name=aggr_cfg.name, **aggr_cfg.aggregator_config)bookmarks = aggregator.list_bookmarks(start_date, end_date, limit)click.echo(''.format(a))for b in bookmarks:click.echo(''.format(b.date))", "docstring": "List aggregation bookmarks.", "id": "f10404:m11"} {"signature": "@cached_propertydef _events_config(self):", "body": "result = {}for ep in iter_entry_points(group=self.entry_point_group_events):for cfg in ep.load()():if cfg[''] not in self.enabled_events:continueelif cfg[''] in result:raise DuplicateEventError(''''.format(cfg[''], ep.name))cfg.update(self.enabled_events[cfg['']] or {})result[cfg['']] = cfgreturn result", "docstring": "Load events configuration.", "id": "f10405:c0:m1"} {"signature": "@cached_propertydef _aggregations_config(self):", "body": "result = {}for ep in iter_entry_points(group=self.entry_point_group_aggs):for cfg in ep.load()():if cfg[''] not in self.enabled_aggregations:continueelif cfg[''] in result:raise DuplicateAggregationError(''''.format(cfg[''], ep.name))cfg.update(self.enabled_aggregations[cfg['']] or {})result[cfg['']] = cfgreturn result", "docstring": "Load aggregation configurations.", "id": "f10405:c0:m3"} {"signature": "@cached_propertydef _queries_config(self):", "body": "result = {}for ep in iter_entry_points(group=self.entry_point_group_queries):for cfg in ep.load()():if cfg[''] not in self.enabled_queries:continueelif cfg[''] in result:raise DuplicateQueryError(''''.format(cfg[''], ep.name))cfg.update(self.enabled_queries[cfg['']] or {})result[cfg['']] = cfgreturn result", "docstring": "Load queries configuration.", "id": "f10405:c0:m5"} {"signature": "@cached_propertydef permission_factory(self):", "body": "return load_or_import_from_config('', app=self.app)", "docstring": "Load default permission factory for Buckets collections.", "id": "f10405:c0:m7"} {"signature": "def publish(self, event_type, events):", "body": "assert event_type in self.eventscurrent_queues.queues[''.format(event_type)].publish(events)", "docstring": "Publish events.", "id": "f10405:c0:m8"} {"signature": "def consume(self, event_type, no_ack=True, payload=True):", "body": "assert event_type in self.eventsreturn current_queues.queues[''.format(event_type)].consume(payload=payload)", "docstring": "Comsume all pending events.", "id": "f10405:c0:m9"} {"signature": "def __init__(self, app=None, **kwargs):", "body": "if app:self.init_app(app, **kwargs)", "docstring": "Extension initialization.", "id": "f10405:c1:m0"} {"signature": "def init_app(self, app,entry_point_group_events='',entry_point_group_aggs='',entry_point_group_queries=''):", "body": "self.init_config(app)state = _InvenioStatsState(app,entry_point_group_events=entry_point_group_events,entry_point_group_aggs=entry_point_group_aggs,entry_point_group_queries=entry_point_group_queries)self._state = app.extensions[''] = stateif app.config['']:signal_receivers = {key: value for key, value inapp.config.get('', {}).items()if '' in value}register_receivers(app, signal_receivers)return state", "docstring": "Flask application initialization.", "id": "f10405:c1:m1"} {"signature": "def init_config(self, app):", "body": "for k in dir(config):if k.startswith(''):app.config.setdefault(k, getattr(config, k))", "docstring": "Initialize configuration.", "id": "f10405:c1:m2"} {"signature": "def __getattr__(self, name):", "body": "return getattr(self._state, name, None)", "docstring": "Proxy to state object.", "id": "f10405:c1:m3"} {"signature": "def __init__(self, query_name, doc_type, index, client=None,*args, **kwargs):", "body": "super(ESQuery, self).__init__()self.index = indexself.client = client or current_search_clientself.query_name = query_nameself.doc_type = doc_type", "docstring": "Constructor.\n\n :param doc_type: queried document type.\n :param index: queried index.\n :param client: elasticsearch client used to query.", "id": "f10407:c0:m0"} {"signature": "def extract_date(self, date):", "body": "if isinstance(date, six.string_types):try:date = dateutil.parser.parse(date)except ValueError:raise ValueError('').format(self.query_name)if not isinstance(date, datetime):raise TypeError('').format(self.query_name)return date", "docstring": "Extract date from string if necessary.\n\n :returns: the extracted date.", "id": "f10407:c0:m1"} {"signature": "def run(self, *args, **kwargs):", "body": "raise NotImplementedError()", "docstring": "Run the query.", "id": "f10407:c0:m2"} {"signature": "def __init__(self, time_field='', copy_fields=None,query_modifiers=None, required_filters=None,metric_fields=None, *args, **kwargs):", "body": "super(ESDateHistogramQuery, self).__init__(*args, **kwargs)self.time_field = time_fieldself.copy_fields = copy_fields or {}self.query_modifiers = query_modifiers or []self.required_filters = required_filters or {}self.metric_fields = metric_fields or {'': ('', '', {})}self.allowed_metrics = {'', '', '', '', '', '','', '', ''}if any(v not in self.allowed_metricsfor k, (v, _, _) in (self.metric_fields or {}).items()):raise(ValueError(''.format(''.join(self.allowed_metrics))))", "docstring": "Constructor.\n\n :param time_field: name of the timestamp field.\n :param copy_fields: list of fields to copy from the top hit document\n into the resulting aggregation.\n :param query_modifiers: List of functions accepting a ``query`` and\n ``**kwargs`` (same as provided to the ``run`` method), that will\n be applied to the aggregation query.\n :param required_filters: Dict of \"mandatory query parameter\" ->\n \"filtered field\".\n :param metric_fields: Dict of \"destination field\" ->\n tuple(\"metric type\", \"source field\", \"metric_options\").", "id": "f10407:c1:m0"} {"signature": "def validate_arguments(self, interval, start_date, end_date, **kwargs):", "body": "if interval not in self.allowed_intervals:raise InvalidRequestInputError('').format(self.query_name)if set(kwargs) < set(self.required_filters):raise InvalidRequestInputError(''''.format(set(self.required_filters.keys()),self.query_name))", "docstring": "Validate query arguments.", "id": "f10407:c1:m1"} {"signature": "def build_query(self, interval, start_date, end_date, **kwargs):", "body": "agg_query = Search(using=self.client,index=self.index,doc_type=self.doc_type)[:]if start_date is not None or end_date is not None:time_range = {}if start_date is not None:time_range[''] = start_date.isoformat()if end_date is not None:time_range[''] = end_date.isoformat()agg_query = agg_query.filter('',**{self.time_field: time_range})for modifier in self.query_modifiers:agg_query = modifier(agg_query, **kwargs)base_agg = agg_query.aggs.bucket('','',field=self.time_field,interval=interval)for destination, (metric, field, opts) in self.metric_fields.items():base_agg.metric(destination, metric, field=field, **opts)if self.copy_fields:base_agg.metric('', '', size=, sort={'': ''})for query_param, filtered_field in self.required_filters.items():if query_param in kwargs:agg_query = agg_query.filter('', **{filtered_field: kwargs[query_param]})return agg_query", "docstring": "Build the elasticsearch query.", "id": "f10407:c1:m2"} {"signature": "def process_query_result(self, query_result, interval,start_date, end_date):", "body": "def build_buckets(agg):\"\"\"\"\"\"bucket_result = dict(key=agg[''],date=agg[''],)for metric in self.metric_fields:bucket_result[metric] = agg[metric]['']if self.copy_fields and agg['']['']['']:doc = agg[''][''][''][]['']for destination, source in self.copy_fields.items():if isinstance(source, six.string_types):bucket_result[destination] = doc[source]else:bucket_result[destination] = source(bucket_result, doc)return bucket_resultbuckets = query_result['']['']['']return dict(interval=interval,key_type='',start_date=start_date.isoformat() if start_date else None,end_date=end_date.isoformat() if end_date else None,buckets=[build_buckets(b) for b in buckets])", "docstring": "Build the result using the query result.", "id": "f10407:c1:m3"} {"signature": "def run(self, interval='', start_date=None,end_date=None, **kwargs):", "body": "start_date = self.extract_date(start_date) if start_date else Noneend_date = self.extract_date(end_date) if end_date else Noneself.validate_arguments(interval, start_date, end_date, **kwargs)agg_query = self.build_query(interval, start_date,end_date, **kwargs)query_result = agg_query.execute().to_dict()res = self.process_query_result(query_result, interval,start_date, end_date)return res", "docstring": "Run the query.", "id": "f10407:c1:m4"} {"signature": "def __init__(self, time_field='', copy_fields=None,query_modifiers=None, required_filters=None,aggregated_fields=None, metric_fields=None, *args, **kwargs):", "body": "super(ESTermsQuery, self).__init__(*args, **kwargs)self.time_field = time_fieldself.copy_fields = copy_fields or {}self.query_modifiers = query_modifiers or []self.required_filters = required_filters or {}self.aggregated_fields = aggregated_fields or []self.metric_fields = metric_fields or {'': ('', '', {})}", "docstring": "Constructor.\n\n :param time_field: name of the timestamp field.\n :param copy_fields: list of fields to copy from the top hit document\n into the resulting aggregation.\n :param query_modifiers: List of functions accepting a ``query`` and\n ``**kwargs`` (same as provided to the ``run`` method), that will\n be applied to the aggregation query.\n :param required_filters: Dict of \"mandatory query parameter\" ->\n \"filtered field\".\n :param aggregated_fields: List of fields which will be used in the\n terms aggregations.\n :param metric_fields: Dict of \"destination field\" ->\n tuple(\"metric type\", \"source field\").", "id": "f10407:c2:m0"} {"signature": "def validate_arguments(self, start_date, end_date, **kwargs):", "body": "if set(kwargs) < set(self.required_filters):raise InvalidRequestInputError(''''.format(set(self.required_filters.keys()),self.query_name))", "docstring": "Validate query arguments.", "id": "f10407:c2:m1"} {"signature": "def build_query(self, start_date, end_date, **kwargs):", "body": "agg_query = Search(using=self.client,index=self.index,doc_type=self.doc_type)[:]if start_date is not None or end_date is not None:time_range = {}if start_date is not None:time_range[''] = start_date.isoformat()if end_date is not None:time_range[''] = end_date.isoformat()agg_query = agg_query.filter('',**{self.time_field: time_range})for modifier in self.query_modifiers:agg_query = modifier(agg_query, **kwargs)base_agg = agg_query.aggsdef _apply_metric_aggs(agg):for dst, (metric, field, opts) in self.metric_fields.items():agg.metric(dst, metric, field=field, **opts)_apply_metric_aggs(base_agg)if self.aggregated_fields:cur_agg = base_aggfor term in self.aggregated_fields:cur_agg = cur_agg.bucket(term, '', field=term, size=)_apply_metric_aggs(cur_agg)if self.copy_fields:base_agg.metric('', '', size=, sort={'': ''})for query_param, filtered_field in self.required_filters.items():if query_param in kwargs:agg_query = agg_query.filter('', **{filtered_field: kwargs[query_param]})return agg_query", "docstring": "Build the elasticsearch query.", "id": "f10407:c2:m2"} {"signature": "def process_query_result(self, query_result, start_date, end_date):", "body": "def build_buckets(agg, fields, bucket_result):\"\"\"\"\"\"for metric in self.metric_fields:bucket_result[metric] = agg[metric]['']if fields:current_level = fields[]bucket_result.update(dict(type='',field=current_level,key_type='',buckets=[build_buckets(b, fields[:], dict(key=b['']))for b in agg[current_level]['']]))return bucket_resultaggs = query_result['']result = dict(start_date=start_date.isoformat() if start_date else None,end_date=end_date.isoformat() if end_date else None,)if self.copy_fields and aggs['']['']['']:doc = aggs[''][''][''][]['']for destination, source in self.copy_fields.items():if isinstance(source, six.string_types):result[destination] = doc[source]else:result[destination] = source(result, doc)return build_buckets(aggs, self.aggregated_fields, result)", "docstring": "Build the result using the query result.", "id": "f10407:c2:m3"} {"signature": "def run(self, start_date=None, end_date=None, **kwargs):", "body": "start_date = self.extract_date(start_date) if start_date else Noneend_date = self.extract_date(end_date) if end_date else Noneself.validate_arguments(start_date, end_date, **kwargs)agg_query = self.build_query(start_date, end_date, **kwargs)query_result = agg_query.execute().to_dict()res = self.process_query_result(query_result, start_date, end_date)return res", "docstring": "Run the query.", "id": "f10407:c2:m4"} {"signature": "def anonymize_user(doc):", "body": "ip = doc.pop('', None)if ip:doc.update({'': get_geoip(ip)})user_id = doc.pop('', '')session_id = doc.pop('', '')user_agent = doc.pop('', '')timestamp = arrow.get(doc.get(''))timeslice = timestamp.strftime('')salt = get_anonymization_salt(timestamp)visitor_id = hashlib.sha224(salt.encode(''))if user_id:visitor_id.update(user_id.encode(''))elif session_id:visitor_id.update(session_id.encode(''))elif ip and user_agent:vid = ''.format(ip, user_agent, timeslice)visitor_id.update(vid.encode(''))else:passunique_session_id = hashlib.sha224(salt.encode(''))if user_id:sid = ''.format(user_id, timeslice)unique_session_id.update(sid.encode(''))elif session_id:sid = ''.format(session_id, timeslice)unique_session_id.update(sid.encode(''))elif ip and user_agent:sid = ''.format(ip, user_agent, timeslice)unique_session_id.update(sid.encode(''))doc.update(dict(visitor_id=visitor_id.hexdigest(),unique_session_id=unique_session_id.hexdigest()))return doc", "docstring": "Preprocess an event by anonymizing user information.\n\n The anonymization is done by removing fields that can uniquely identify a\n user, such as the user's ID, session ID, IP address and User Agent, and\n hashing them to produce a ``visitor_id`` and ``unique_session_id``. To\n further secure the method, a randomly generated 32-byte salt is used, that\n expires after 24 hours and is discarded. The salt values are stored in\n Redis (or whichever backend Invenio-Cache uses). The ``unique_session_id``\n is calculated in the same way as the ``visitor_id``, with the only\n difference that it also takes into account the hour of the event . All of\n these rules effectively mean that a user can have a unique ``visitor_id``\n for each day and unique ``unique_session_id`` for each hour of a day.\n\n This session ID generation process was designed according to the `Project\n COUNTER Code of Practice `_.\n\n In addition to that the country of the user is extracted from the IP\n address as a ISO 3166-1 alpha-2 two-letter country code (e.g. \"CH\" for\n Switzerland).", "id": "f10409:m0"} {"signature": "def flag_robots(doc):", "body": "doc[''] = '' in doc and is_robot(doc[''])return doc", "docstring": "Flag events which are created by robots.\n\n The list of robots is defined by the `COUNTER-robots Python package\n `_ , which follows the\n `list defined by Project COUNTER\n `_ that was later split\n into robots and machines by `the Make Data Count project\n `_.", "id": "f10409:m1"} {"signature": "def flag_machines(doc):", "body": "doc[''] = '' in doc and is_machine(doc[''])return doc", "docstring": "Flag events which are created by machines.\n\n The list of machines is defined by the `COUNTER-robots Python package\n `_ , which follows the\n `list defined by Project COUNTER\n `_ that was later split\n into robots and machines by `the Make Data Count project\n `_.", "id": "f10409:m2"} {"signature": "def hash_id(iso_timestamp, msg):", "body": "return ''.format(iso_timestamp,hashlib.sha1(msg.get('').encode('') +str(msg.get('')).encode('')).hexdigest())", "docstring": "Generate event id, optimized for ES.", "id": "f10409:m3"} {"signature": "def __init__(self, queue, prefix='', suffix='', client=None,preprocessors=None, double_click_window=):", "body": "self.queue = queueself.client = client or current_search_clientself.doctype = queue.routing_keyself.index = ''.format(prefix, self.queue.routing_key)self.suffix = suffixself.preprocessors = [obj_or_import_string(preproc) for preproc in preprocessors] if preprocessors is not None else self.default_preprocessorsself.double_click_window = double_click_window", "docstring": "Initialize indexer.\n\n :param prefix: prefix appended to elasticsearch indices' name.\n :param suffix: suffix appended to elasticsearch indices' name.\n :param double_click_window: time window during which similar events are\n deduplicated (counted as one occurence).\n :param client: elasticsearch client.\n :param preprocessors: a list of functions which are called on every\n event before it is indexed. Each function should return the\n processed event. If it returns None, the event is filtered and\n won't be indexed.", "id": "f10409:c0:m0"} {"signature": "def actionsiter(self):", "body": "for msg in self.queue.consume():try:for preproc in self.preprocessors:msg = preproc(msg)if msg is None:breakif msg is None:continuesuffix = arrow.get(msg.get('')).strftime(self.suffix)ts = parser.parse(msg.get(''))ts = ts.replace(microsecond=)msg[''] = ts.isoformat()if self.double_click_window > :timestamp = mktime(utc.localize(ts).utctimetuple())ts = ts.fromtimestamp(timestamp // self.double_click_window *self.double_click_window)yield dict(_id=hash_id(ts.isoformat(), msg),_op_type='',_index=''.format(self.index, suffix),_type=self.doctype,_source=msg,)except Exception:current_app.logger.exception(u'')", "docstring": "Iterator.", "id": "f10409:c0:m1"} {"signature": "def run(self):", "body": "return elasticsearch.helpers.bulk(self.client,self.actionsiter(),stats_only=True,chunk_size=)", "docstring": "Process events queue.", "id": "f10409:c0:m2"} {"signature": "def register_receivers(app, config):", "body": "for event_name, event_config in config.items():event_builders = [obj_or_import_string(func)for func in event_config.get('', [])]signal = obj_or_import_string(event_config[''])signal.connect(EventEmmiter(event_name, event_builders), sender=app, weak=False)", "docstring": "Register signal receivers which send events.", "id": "f10410:m0"} {"signature": "def __init__(self, name, builders):", "body": "self.name = nameself.builders = builders", "docstring": "Contructor.", "id": "f10410:c0:m0"} {"signature": "def __call__(self, *args, **kwargs):", "body": "try:if self.name in current_stats.events:event = {}for builder in self.builders:event = builder(event, *args, **kwargs)if event is None:returncurrent_stats.publish(self.name, [event])except Exception:current_app.logger.exception(u'')", "docstring": "Receive a signal and send an event.", "id": "f10410:c0:m1"} {"signature": "@shared_taskdef process_events(event_types):", "body": "results = []for e in event_types:processor = current_stats.events[e].processor_class(**current_stats.events[e].processor_config)results.append((e, processor.run()))return results", "docstring": "Index statistics events.", "id": "f10411:m0"} {"signature": "@shared_taskdef aggregate_events(aggregations, start_date=None, end_date=None,update_bookmark=True):", "body": "start_date = dateutil_parse(start_date) if start_date else Noneend_date = dateutil_parse(end_date) if end_date else Noneresults = []for a in aggregations:aggr_cfg = current_stats.aggregations[a]aggregator = aggr_cfg.aggregator_class(name=aggr_cfg.name, **aggr_cfg.aggregator_config)results.append(aggregator.run(start_date, end_date, update_bookmark))return results", "docstring": "Aggregate indexed events.", "id": "f10411:m1"} {"signature": "def get_anonymization_salt(ts):", "body": "salt_key = ''.format(ts.date().isoformat())salt = current_cache.get(salt_key)if not salt:salt_bytes = os.urandom()salt = b64encode(salt_bytes).decode('')current_cache.set(salt_key, salt, timeout= * * )return salt", "docstring": "Get the anonymization salt based on the event timestamp's day.", "id": "f10412:m0"} {"signature": "def get_geoip(ip):", "body": "reader = geolite2.reader()ip_data = reader.get(ip) or {}return ip_data.get('', {}).get('')", "docstring": "Lookup country for IP address.", "id": "f10412:m1"} {"signature": "def get_user():", "body": "return dict(ip_address=request.remote_addr,user_agent=request.user_agent.string,user_id=(current_user.get_id() if current_user.is_authenticated else None),session_id=session.get(''))", "docstring": "User information.\n\n .. note::\n\n **Privacy note** A users IP address, user agent string, and user id\n (if logged in) is sent to a message queue, where it is stored for about\n 5 minutes. The information is used to:\n\n - Detect robot visits from the user agent string.\n - Generate an anonymized visitor id (using a random salt per day).\n - Detect the users host contry based on the IP address.\n\n The information is then discarded.", "id": "f10412:m2"} {"signature": "def obj_or_import_string(value, default=None):", "body": "if isinstance(value, six.string_types):return import_string(value)elif value:return valuereturn default", "docstring": "Import string or return object.\n\n :params value: Import path or class object to instantiate.\n :params default: Default object to return if the import fails.\n :returns: The imported object.", "id": "f10412:m3"} {"signature": "def load_or_import_from_config(key, app=None, default=None):", "body": "app = app or current_appimp = app.config.get(key)return obj_or_import_string(imp, default=default)", "docstring": "Load or import value from config.\n\n :returns: The loaded value.", "id": "f10412:m4"} {"signature": "def default_permission_factory(query_name, params):", "body": "from invenio_stats import current_statsif current_stats.queries[query_name].permission_factory is None:return AllowAllPermissionelse:return current_stats.queries[query_name].permission_factory(query_name, params)", "docstring": "Default permission factory.\n\n It enables by default the statistics if they don't have a dedicated\n permission factory.", "id": "f10412:m5"} {"signature": "@app.cli.group()def fixtures():", "body": "", "docstring": "Command for working with test data.", "id": "f10413:m0"} {"signature": "@classmethoddef parse_byteranges(cls, environ):", "body": "r = []s = environ.get(cls.header_range, '').replace('','').lower()if s:l = s.split('')if len(l) == :unit, vals = tuple(l)if unit == '' and vals:gen_rng = ( tuple(rng.split('')) for rng in vals.split('') if '' in rng )for start, end in gen_rng:if start or end:r.append( (int(start) if start else None, int(end) if end else None) )return r", "docstring": "Outputs a list of tuples with ranges or the empty list\nAccording to the rfc, start or end values can be omitted", "id": "f10417:c1:m1"} {"signature": "@classmethoddef check_ranges(cls, ranges, length):", "body": "result = []for start, end in ranges:if isinstance(start, int) or isinstance(end, int):if isinstance(start, int) and not ( <= start < length):continueelif isinstance(start, int) and isinstance(end, int) and not (start <= end):continueelif start is None and end == :continueresult.append( (start,end) )return result", "docstring": "Removes errored ranges", "id": "f10417:c1:m2"} {"signature": "@classmethoddef convert_ranges(cls, ranges, length):", "body": "result = []for start, end in ranges:if end is None:result.append( (start, length-) )elif start is None:s = length - endresult.append( ( if s < else s, length-) )else:result.append( (start, end if end < length else length-) )return result", "docstring": "Converts to valid byte ranges", "id": "f10417:c1:m3"} {"signature": "@classmethoddef condense_ranges(cls, ranges):", "body": "result = []if ranges:ranges.sort(key=lambda tup: tup[])result.append(ranges[])for i in range(, len(ranges)):if result[-][] + >= ranges[i][]:result[-] = (result[-][], max(result[-][], ranges[i][]))else:result.append(ranges[i])return result", "docstring": "Sorts and removes overlaps", "id": "f10417:c1:m4"} {"signature": "def main():", "body": "parser = ArgumentParser()parser.add_argument(\"\", \"\", dest=\"\",help=\"\",metavar=\"\")parser.add_argument(\"\", \"\", dest=\"\",help=\"\" +\"\" +\"\",metavar=\"\")parser.add_argument(\"\", \"\", dest=\"\",help=\"\" +\"\",metavar=\"\")parser.add_argument(\"\", \"\", dest=\"\", default='',help=\"\" +\"\" +\"\" +\"\" +\"\" +\"\",metavar=\"\")parser.add_argument(\"\", \"\", dest=\"\",help=\"\",metavar=\"\")parser.add_argument(\"\", \"\", dest=\"\",help=\"\",metavar=\"\")options = parser.parse_args()version = os.popen(\"\").read().strip()if options.inputfile:if options.inputfile.endswith(''):input_genome_file = open(options.inputfile)elif options.inputfile.endswith(''):input_genome_file = gzip.open(options.inputfile)elif options.inputfile.endswith(''):input_genome_file = bz2.BZ2File(options.inputfile)else:raise IOError(\"\" +\"\")elif not sys.stdin.isatty():input_genome_file = sys.stdinelse:sys.stderr.write(\"\")parser.print_help()sys.exit()if options.build and options.build in ['', '']:build = options.buildelse:raise IOError(\"\")if (not (options.clinvarfile or options.clinvardir) or(options.clinvarfile and options.clinvardir)):sys.stderr.write(\"\")parser.print_help()sys.exit()if options.clinvarfile:clinvarfilename = options.clinvarfileelif options.clinvardir:clinvarfilename = get_latest_vcf_file(target_dir=options.clinvardir,build=build)if clinvarfilename.endswith(''):input_clinvar_file = open(options.clinvarfile)elif clinvarfilename.endswith(''):input_clinvar_file = gzip.open(clinvarfilename)elif clinvarfilename.endswith(''):input_clinvar_file = bz2.BZ2File(clinvarfilename)else:raise IOError(\"\" +\"\")if options.type not in ['', '']:raise IOError(\"\")if options.type == \"\":csv_report(input_genome_file=input_genome_file,input_clinvar_file=input_clinvar_file,build=build,version=version)elif options.type == \"\":notes_json = {}if options.notes:notes_json[\"\"] = options.notestry:notes_json = json.loads(options.notes)except:sys.stderr.write(\"\")json_report(input_genome_file=input_genome_file,input_clinvar_file=input_clinvar_file,build=build,notes=notes_json,version=version)", "docstring": "Parse command line argument and\noutput appropriate file type (csv or JSON)", "id": "f10418:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "(self.clnalleleid, self.hgvs, self.clnsig,self.clndn, self.clndisdb, self.clnvi) = [kwargs[x] for x in['', '', '', '', '', '']]super(ClinVarAllele, self).__init__(*args, **kwargs)", "docstring": "Initialize ClinVarAllele object\n\nA ClinVarAllele is an allele for a genomic position that has data\nfrom ClinVar associated with it.\n\nRequired arguments:\nsequence: String of DNA letters (A, C, G, or T) for the allele;\n may be empty (to represent a deletion)\nfrequency: Preferred allele frequency\nalleleid: ClinVar Allele ID\nclnhgvs: HGVS nomenclature for this allele\nclnsig: ClinVar clinical significance\nclndn: ClinVar disease name\nclndisdb: Database IDs of disease database entries (tag-value pairs)\nclnvi: Database IDs of clinical sources (tag-value pairs)", "id": "f10419:c0:m0"} {"signature": "def as_dict(self, *args, **kwargs):", "body": "self_as_dict = super(ClinVarAllele, self).as_dict(*args, **kwargs)self_as_dict[''] = self.hgvsself_as_dict[''] = self.clnalleleidself_as_dict[''] = self.clnsigself_as_dict[''] = self.clndnself_as_dict[''] = self.clndisdbself_as_dict[''] = self.clnvireturn self_as_dict", "docstring": "Return ClinVarAllele data as dict object.", "id": "f10419:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "kwargs[''] = Falsesuper(ClinVarVCFLine, self).__init__(self, *args, **kwargs)", "docstring": "Initialize ClinVarVCFLine with VCF line", "id": "f10419:c1:m0"} {"signature": "def as_dict(self):", "body": "return {'': self.chrom,'': self.start,'': self.ref_allele,'': self.alt_alleles,'': self.info,'': [x.as_dict() for x in self.alleles]}", "docstring": "Dict representation of parsed ClinVar VCF line", "id": "f10419:c1:m1"} {"signature": "def _parse_frequencies(self):", "body": "frequencies = OrderedDict([('', ''),('', ''),('', '')])pref_freq = ''for source in frequencies.keys():freq_key = '' + sourceif freq_key in self.info:frequencies[source] = self.info[freq_key]if pref_freq == '':pref_freq = frequencies[source]return pref_freq, frequencies", "docstring": "Parse frequency data in ClinVar VCF", "id": "f10419:c1:m2"} {"signature": "def _parse_allele_data(self):", "body": "pref_freq, frequencies = self._parse_frequencies()info_clnvar_single_tags = ['', '', '']cln_data = {x.lower(): self.info[x] if x in self.info else Nonefor x in info_clnvar_single_tags}cln_data.update({'': [x.split('') for x inself.info[''].split('')]if '' in self.info else []})cln_data.update({'': self.info[''].split('') if'' in self.info else []})cln_data.update({'': self.info[''].split('')if '' in self.info else []})try:sequence = self.alt_alleles[]except IndexError:sequence = self.ref_alleleallele = ClinVarAllele(frequency=pref_freq, sequence=sequence,**cln_data)if not cln_data['']:return []return [allele]", "docstring": "Parse alleles for ClinVar VCF, overrides parent method.", "id": "f10419:c1:m3"} {"signature": "def __init__(self, *args, **kwargs):", "body": "sequence = kwargs['']if '' in kwargs:frequency = kwargs['']else:frequency = ''if not (re.match(r'', sequence) orre.match(r'', sequence)):raise ValueError(\"\")self.sequence = sequenceif frequency:try:if (float(frequency) < orfloat(frequency) > ):raise ValueError('')except ValueError:if not frequency == '':raise ValueError('' +'' +'')self.frequency = frequency", "docstring": "Initialize Allele object\n\nRequired arguments:\nsequence: Short string of DNA letters (ACGT) for the allele.\n May be empty (to represent a deletion).\n\nOptional arguments:\nfrequency: a string representation of a float between 0 and 1", "id": "f10420:c0:m0"} {"signature": "def __unicode__(self):", "body": "return self.as_json()", "docstring": "Print Allele object as dict object data.", "id": "f10420:c0:m1"} {"signature": "def __str__(self):", "body": "return self.as_json()", "docstring": "Print Allele object as dict object data.", "id": "f10420:c0:m2"} {"signature": "def as_dict(self):", "body": "self_as_dict = dict()self_as_dict[''] = self.sequenceif hasattr(self, ''):self_as_dict[''] = self.frequencyreturn self_as_dict", "docstring": "Return Allele data as dict object.", "id": "f10420:c0:m3"} {"signature": "def as_json(self):", "body": "return json.dumps(self.as_dict())", "docstring": "Print Allele object as JSON.", "id": "f10420:c0:m4"} {"signature": "def __init__(self, *args, **kwargs):", "body": "vcf_line = kwargs['']skip_info = ('' in kwargs and kwargs[''])vcf_fields = vcf_line.strip().split('')self.chrom = vcf_fields[]self.start = int(vcf_fields[])self.ref_allele = vcf_fields[]if vcf_fields[] == '':self.alt_alleles = []else:self.alt_alleles = vcf_fields[].split('')if not skip_info:self.info = self._parse_info(vcf_fields[])self.alleles = self._parse_allele_data()", "docstring": "Store data from a VCF line.", "id": "f10420:c1:m0"} {"signature": "def _parse_allele_data(self):", "body": "return [Allele(sequence=x) for x in[self.ref_allele] + self.alt_alleles]", "docstring": "Create list of Alleles from VCF line data", "id": "f10420:c1:m1"} {"signature": "def _parse_info(self, info_field):", "body": "info = dict()for item in info_field.split(''):info_item_data = item.split('')if len(info_item_data) == :info[info_item_data[]] = Trueelif len(info_item_data) == :info[info_item_data[]] = info_item_data[]return info", "docstring": "Parse the VCF info field", "id": "f10420:c1:m2"} {"signature": "def __str__(self):", "body": "return json.dumps(self.as_dict(), ensure_ascii=True)", "docstring": "String representation of parsed VCF data", "id": "f10420:c1:m3"} {"signature": "def as_dict(self):", "body": "self_as_dict = {'': self.chrom,'': self.start,'': self.ref_allele,'': self.alt_alleles,'': [x.as_dict() for x in self.alleles]}try:self_as_dict[''] = self.infoexcept AttributeError:passreturn self_as_dict", "docstring": "Dict representation of parsed VCF data", "id": "f10420:c1:m4"} {"signature": "@staticmethoddef get_pos(vcf_line):", "body": "if not vcf_line:return Nonevcf_data = vcf_line.strip().split('')return_data = dict()return_data[''] = CHROM_INDEX[vcf_data[]]return_data[''] = int(vcf_data[])return return_data", "docstring": "Very lightweight parsing of a vcf line to get position.\n\nReturns a dict containing:\n'chrom': index of chromosome (int), indicates sort order\n'pos': position on chromosome (int)", "id": "f10420:c1:m5"} {"signature": "def match_to_clinvar(genome_file, clin_file):", "body": "clin_curr_line = _next_line(clin_file)genome_curr_line = _next_line(genome_file)while clin_curr_line.startswith(''):clin_curr_line = _next_line(clin_file)while genome_curr_line.startswith(''):genome_curr_line = _next_line(genome_file)while clin_curr_line and genome_curr_line:clin_curr_pos = VCFLine.get_pos(clin_curr_line)genome_curr_pos = VCFLine.get_pos(genome_curr_line)try:if clin_curr_pos[''] > genome_curr_pos['']:genome_curr_line = _next_line(genome_file)continueelif clin_curr_pos[''] < genome_curr_pos['']:clin_curr_line = _next_line(clin_file)continueif clin_curr_pos[''] > genome_curr_pos['']:genome_curr_line = _next_line(genome_file)continueelif clin_curr_pos[''] < genome_curr_pos['']:clin_curr_line = _next_line(clin_file)continueexcept StopIteration:breakgenome_vcf_line = GenomeVCFLine(vcf_line=genome_curr_line,skip_info=True)if not genome_vcf_line.genotype_allele_indexes:genome_curr_line = _next_line(genome_file)continueclinvar_vcf_line = ClinVarVCFLine(vcf_line=clin_curr_line)if not genome_vcf_line.ref_allele == clinvar_vcf_line.ref_allele:try:genome_curr_line = _next_line(genome_file)clin_curr_line = _next_line(clin_file)continueexcept StopIteration:breakgenotype_allele_indexes = genome_vcf_line.genotype_allele_indexesgenome_alleles = [genome_vcf_line.alleles[x] forx in genotype_allele_indexes]if len(genome_alleles) == :zygosity = ''elif len(genome_alleles) == :if genome_alleles[].sequence == genome_alleles[].sequence:zygosity = ''genome_alleles = [genome_alleles[]]else:zygosity = ''else:raise ValueError('' +'' +'' +str(genome_vcf_line))for genome_allele in genome_alleles:for allele in clinvar_vcf_line.alleles:if genome_allele.sequence == allele.sequence:if hasattr(allele, ''):yield (genome_vcf_line, allele, zygosity)try:genome_curr_line = _next_line(genome_file)clin_curr_line = _next_line(clin_file)except StopIteration:break", "docstring": "Match a genome VCF to variants in the ClinVar VCF file\n\nActs as a generator, yielding tuples of:\n(ClinVarVCFLine, ClinVarAllele, zygosity)\n\n'zygosity' is a string and corresponds to the genome's zygosity for that\nClinVarAllele. It can be either: 'Het' (heterozygous), 'Hom' (homozygous),\nor 'Hem' (hemizygous, e.g. X chromosome in XY individuals).", "id": "f10421:m1"} {"signature": "def nav_to_vcf_dir(ftp, build):", "body": "if build == '':ftp.cwd(DIR_CLINVAR_VCF_B37)elif build == '':ftp.cwd(DIR_CLINVAR_VCF_B38)else:raise IOError(\"\")", "docstring": "Navigate an open ftplib.FTP to appropriate directory for ClinVar VCF files.\n\nArgs:\n ftp: (type: ftplib.FTP) an open connection to ftp.ncbi.nlm.nih.gov\n build: (type: string) genome build, either 'b37' or 'b38'", "id": "f10422:m0"} {"signature": "def _parse_genotype(self, vcf_fields):", "body": "format_col = vcf_fields[].split('')genome_data = vcf_fields[].split('')try:gt_idx = format_col.index('')except ValueError:return []return [int(x) for x in re.split(r'', genome_data[gt_idx]) ifx != '']", "docstring": "Parse genotype from VCF line data", "id": "f10423:c0:m2"} {"signature": "def itertable(table):", "body": "for item in table:res = {k.lower(): nfd(v) if isinstance(v, text_type) else v for k, v in item.items()}for extra in res.pop('', []):k, _, v = extra.partition('')res[k.strip()] = v.strip()yield res", "docstring": "Auxiliary function for iterating over a data table.", "id": "f10437:m3"} {"signature": "def __getitem__(self, sound):", "body": "return self.resolve_sound(sound)", "docstring": "Return a Sound instance matching the specification.", "id": "f10437:c0:m2"} {"signature": "@command()def _make_package(args): ", "body": "from lingpy.sequence.sound_classes import token2classfrom lingpy.data import Modelcolumns = ['', '', '', '', '', '']bipa = TranscriptionSystem('')for src, rows in args.repos.iter_sources(type=''):args.log.info(''.format(src['']))uritemplate = URITemplate(src['']) if src[''] else Noneout = [['', '', '', '','', ''] + columns]graphemes = set()for row in rows:if row[''] in graphemes:args.log.warn(''.format(row['']))continuegraphemes.add(row[''])if not row['']:bipa_sound = bipa[row['']]explicit = ''else:bipa_sound = bipa[row['']]explicit = ''generated = '' if bipa_sound.generated else ''if is_valid_sound(bipa_sound, bipa):bipa_grapheme = bipa_sound.sbipa_name = bipa_sound.nameelse:bipa_grapheme, bipa_name = '', ''url = uritemplate.expand(**row) if uritemplate else row.get('', '')out.append([bipa_grapheme, bipa_name, generated, explicit, row[''],url] + [row.get(c, '') for c in columns])found = len([o for o in out if o[] != ''])args.log.info(''.format(found, len(out), found / len(out) * ))with UnicodeWriter(pkg_path('', ''.format(src[''])), delimiter='') as writer:writer.writerows(out)count = with UnicodeWriter(pkg_path('', ''), delimiter='') as writer:writer.writerow(['', ''] + SOUNDCLASS_SYSTEMS)for grapheme, sound in sorted(bipa.sounds.items()):if not sound.alias:writer.writerow([sound.name, grapheme] + [token2class(grapheme, Model(cls)) for cls in SOUNDCLASS_SYSTEMS])count += args.log.info(''.format(count))", "docstring": "Prepare transcriptiondata from the transcription sources.", "id": "f10438:m1"} {"signature": "def resolve_sound(self, sound):", "body": "sound = sound if isinstance(sound, Sound) else self.system[sound]if sound.name in self.data:return ''.join([x[''] for x in self.data[sound.name]])raise KeyError(\"\")", "docstring": "Function tries to identify a sound in the data.\n\n Notes\n -----\n The function tries to resolve sounds to take a sound with less complex\n features in order to yield the next approximate sound class, if the\n transcription data are sound classes.", "id": "f10439:c0:m1"} {"signature": "def __init__(self, id_):", "body": "if hasattr(self, ''):returnassert id_system = pkg_path('', id_)if not (system.exists() and system.is_dir()):raise ValueError(''.format(id_))self.system = TableGroup.from_file(pkg_path('', ''))self.system._fname = system / ''self.features = {'': {}, '': {}, '': {}}self._feature_values = {}features = jsonlib.load(pkg_path('', ''))self.diacritics = dict(consonant={}, vowel={}, click={}, diphthong={}, tone={}, cluster={})for dia in itertable(self.system.tabledict['']):if not dia[''] and not dia['']:self.features[dia['']][dia['']] = dia['']self._feature_values[dia['']] = dia['']self.diacritics[dia['']][dia['']] = dia['']self.sound_classes = {}self.columns = {} self.sounds = {} self._covered = {}aliases = []for cls in [Consonant, Vowel, Tone, Marker]: type_ = cls.__name__.lower()self.sound_classes[type_] = clsself.columns[type_] = [c[''].lower() for c inself.system.tabledict[''.format(type_)].asdict()['']['']]for l, item in enumerate(itertable(self.system.tabledict[''.format(type_)])):if item[''] in self.sounds:raise ValueError(''.format(type_ + '', l + , item['']))sound = cls(ts=self, **item)for key, value in item.items():if key not in {'', '', ''} andvalue and value not in self._feature_values:self._feature_values[value] = keyif type_ != '' and value not in features[type_][key]:raise ValueError(\"\".format(key, value, l + ))self.sounds[item['']] = soundif not sound.alias:if sound.featureset in self.features:raise ValueError(''.format(type_ + '', l + , sound.name))self.features[sound.featureset] = soundelse:aliases += [(l, sound.type, sound.featureset)]if [x for x in aliases if x[] not in self.features]: error = ''.join(text_type(x[] + ) + '' + text_type(x[])for x in aliases if x[] not in self.features)raise ValueError(''.format(error))self._regex = Noneself._update_regex()self._normalize = {norm(r['']): norm(r[''])for r in itertable(self.system.tabledict[''])}", "docstring": ":param system: The name of a transcription system or a directory containing one.", "id": "f10441:c0:m0"} {"signature": "def _norm(self, string):", "body": "nstring = norm(string)if \"\" in string:s, t = string.split('')nstring = treturn self.normalize(nstring)", "docstring": "Extended normalization: normalize by list of norm-characers, split\n by character \"/\".", "id": "f10441:c0:m2"} {"signature": "def normalize(self, string):", "body": "return ''.join([self._normalize.get(x, x) for x in nfd(string)])", "docstring": "Normalize the string according to normalization list", "id": "f10441:c0:m3"} {"signature": "def _from_name(self, string):", "body": "components = string.split('')if frozenset(components) in self.features:return self.features[frozenset(components)]rest, sound_class = components[:-], components[-]if sound_class in ['', '']:if string.startswith('') and '' in string:extension = {'': '', '': ''}[sound_class]string_ = ''.join(string.split('')[:-])from_, to_ = string_.split('')v1, v2 = frozenset(from_.split('') + [extension]), frozenset(to_.split('') + [extension])if v1 in self.features and v2 in self.features:s1, s2 = (self.features[v1], self.features[v2])if sound_class == '':return Diphthong.from_sounds(s1 + s2, s1, s2, self) else:return Cluster.from_sounds(s1 + s2, s1, s2, self) else:s1, s2 = self._from_name(from_ + '' + extension), self._from_name(to_ + '' + extension)if not (isinstance(s1, UnknownSound) or isinstance(s2, UnknownSound)): if sound_class == '':return Diphthong.from_sounds( s1 + s2, s1, s2, self)return Cluster.from_sounds(s1 + s2, s1, s2, self) raise ValueError('')else:raise ValueError('')if sound_class not in self.sound_classes:raise ValueError('')args = {self._feature_values.get(comp, ''): comp for comp in rest}if '' in args:raise ValueError('')args[''] = ''args[''] = selfsound = self.sound_classes[sound_class](**args)if sound.featureset not in self.features:sound.generated = Truereturn soundreturn self.features[sound.featureset]", "docstring": "Parse a sound from its name", "id": "f10441:c0:m4"} {"signature": "def _parse(self, string):", "body": "nstring = self._norm(string)if nstring in self.sounds:sound = self.sounds[nstring]sound.normalized = nstring != stringsound.source = stringreturn soundmatch = list(self._regex.finditer(nstring))if len(match) == :sound1 = self._parse(nstring[:match[].start()])sound2 = self._parse(nstring[match[].start():])if '' not in (sound1.type, sound2.type) andsound1.type == sound2.type:if sound1.type == '':return Diphthong.from_sounds( string, sound1, sound2, self)elif sound1.type == '' andsound1.manner in ('', '', '', '') andsound2.manner in ('', '', '', ''):return Cluster.from_sounds( string, sound1, sound2, self)return UnknownSound(grapheme=nstring, source=string, ts=self) if len(match) != :return UnknownSound(grapheme=nstring, source=string, ts=self) pre, mid, post = nstring.partition(nstring[match[].start():match[].end()])base_sound = self.sounds[mid]if isinstance(base_sound, Marker): assert pre or postreturn UnknownSound(grapheme=nstring, source=string, ts=self) features = attr.asdict(base_sound)features.update(source=string,generated=True,normalized=nstring != string,base=base_sound.grapheme)grapheme, sound = '', ''for dia in [p + EMPTY for p in pre]:feature = self.diacritics[base_sound.type].get(dia, {})if not feature:return UnknownSound( grapheme=nstring, source=string, ts=self)features[self._feature_values[feature]] = featuregrapheme += dia[]sound += self.features[base_sound.type][feature][]grapheme += base_sound.graphemesound += base_sound.sfor dia in [EMPTY + p for p in post]:feature = self.diacritics[base_sound.type].get(dia, {})if not feature:return UnknownSound( grapheme=nstring, source=string, ts=self)features[self._feature_values[feature]] = featuregrapheme += dia[]sound += self.features[base_sound.type][feature][]features[''] = soundnew_sound = self.sound_classes[base_sound.type](**features)if text_type(new_sound) != sound:new_sound.alias = Trueif grapheme != sound:new_sound.alias = Truenew_sound.grapheme = graphemereturn new_sound", "docstring": "Parse a string and return its features.\n\n :param string: A one-symbol string in NFD\n\n Notes\n -----\n Strategy is rather simple: we determine the base part of a string and\n then search left and right of this part for the additional features as\n expressed by the diacritics. Fails if a segment has more than one basic\n part.", "id": "f10441:c0:m5"} {"signature": "def is_valid_sound(sound, ts):", "body": "if isinstance(sound, (Marker, UnknownSound)):return Falses1 = ts[sound.name]s2 = ts[sound.s]return s1.name == s2.name and s1.s == s2.s", "docstring": "Check the consistency of a given transcription system conversino", "id": "f10442:m0"} {"signature": "def __eq__(self, other):", "body": "return self.ts.id == other.ts.id and self.grapheme == other.grapheme", "docstring": "In the absence of features, we consider symbols equal, if they belong to the same\nsystem and are represented by the same grapheme.", "id": "f10442:c0:m2"} {"signature": "@propertydef uname(self):", "body": "try:return ''.join(unicodedata.name(ss) for ss in self.__unicode__())except TypeError:return ''except ValueError:return ''", "docstring": "Return unicode name(s) for a character set.", "id": "f10442:c0:m4"} {"signature": "@propertydef codepoints(self):", "body": "return ''.join('' + ('' + hex(ord(x))[:])[-:] for x in self.__unicode__())", "docstring": "Return unicode codepoint(s) for a grapheme.", "id": "f10442:c0:m5"} {"signature": "def __unicode__(self):", "body": "if not self.generated:if not self.alias and self.grapheme in self.ts.sounds:return self.graphemeelif self.alias and self.featureset in self.ts.features:return text_type(self.ts.features[self.featureset])raise ValueError(''.format(self.grapheme)) elements = [f for f in self._features() if f not in EXCLUDE_FEATURES] + [self.type]base_str = self.base or ''base_graphemes = []while elements:base = self.ts.features.get(frozenset(elements))if base:base_graphemes.append(base.grapheme)elements.pop()base_str = base_graphemes[-] if base_graphemes else base_str or ''base_vals = {self.ts._feature_values[elm] for elm inself.ts.sounds[base_str].name.split('')[:-]} ifbase_str != '' else {}out = []for p in self._write_order['']:if p not in base_vals and getattr(self, p, '') in self._features():out.append(norm(self.ts.features[self.type].get(getattr(self, p, ''), '')))out.append(base_str)for p in self._write_order['']:if p not in base_vals and getattr(self, p, '') in self._features():out.append(norm(self.ts.features[self.type].get(getattr(self, p, ''), '')))return ''.join(out)", "docstring": "Return the reference representation of the sound.\n\nNote\n----\nWe first try to return the non-alias value in our data. If this fails,\nwe create the sound based on it's feature representation.", "id": "f10442:c2:m9"} {"signature": "@propertydef table(self):", "body": "tbl = []features = [f for f in self._name_order if f not in self.ts.columns[self.type]]if self.generated and self.s != self.source:tbl += [self.__unicode__() + '' + self.source]else:tbl += [self.__unicode__()]for name in self.ts.columns[self.type][:]:if name != '' and name != '':tbl += [getattr(self, name) or '']elif name == '':tbl += ['' if getattr(self, name) else '']else:bundle = []for f in features:val = getattr(self, f)if val:bundle += [''.format(f, val)]tbl += [''.join(bundle)]return tbl", "docstring": "Returns the tabular representation of the sound as given in our data", "id": "f10442:c2:m11"} {"signature": "@propertydef table(self):", "body": "return [self.grapheme, self.from_sound.name, self.to_sound.name]", "docstring": "Overwrite the table attribute for complex sounds", "id": "f10442:c5:m3"} {"signature": "def resolve_sound(self, sound):", "body": "sound = sound if isinstance(sound, Symbol) else self.system[sound]if sound.name in self.data:return self.data[sound.name]['']if not sound.type == '':if sound.type in ['', '']:return self.resolve_sound(sound.from_sound)name = [s for s in sound.name.split('') ifself.system._feature_values.get(s, '') not in['', '', '']]while len(name) >= :sound = self.system.get(''.join(name))if sound and sound.name in self.data:return self.resolve_sound(sound)name.pop()raise KeyError(\"\")", "docstring": "Function tries to identify a sound in the data.\n\n Notes\n -----\n The function tries to resolve sounds to take a sound with less complex\n features in order to yield the next approximate sound class, if the\n transcription data are sound classes.", "id": "f10444:c0:m1"} {"signature": "def _normalize_csp_header(header):", "body": "return {p.strip() for p in (header or '').split('')}", "docstring": "Normalize a CSP header for consistent comparisons.", "id": "f10450:m2"} {"signature": "def __init__(self, app=None, **kwargs):", "body": "self.limiter = Noneself.talisman = Noneif app:self.init_app(app, **kwargs)", "docstring": "r\"\"\"Extension initialization.\n\n :param app: An instance of :class:`~flask.Flask`.\n :param \\**kwargs: Keyword arguments are passed to ``init_app`` method.", "id": "f10457:c0:m0"} {"signature": "def init_app(self, app, **kwargs):", "body": "self.init_config(app)self.limiter = Limiter(app, key_func=get_ipaddr)if app.config['']:self.talisman = Talisman(app, **app.config.get('', {}))if app.config['']:blueprint = Blueprint('', __name__)@blueprint.route('')def ping():\"\"\"\"\"\"return ''ping.talisman_view_options = {'': False}app.register_blueprint(blueprint)requestid_header = app.config.get('')if requestid_header:@app.before_requestdef set_request_id():\"\"\"\"\"\"request_id = request.headers.get(requestid_header)if request_id:g.request_id = request_id[:]try:from flask_debugtoolbar import DebugToolbarExtensionapp.extensions[''] = DebugToolbarExtension(app)except ImportError:app.logger.debug('')app.extensions[''] = self", "docstring": "Initialize application object.\n\n :param app: An instance of :class:`~flask.Flask`.", "id": "f10457:c0:m1"} {"signature": "def init_config(self, app):", "body": "config_apps = ['', '']flask_talisman_debug_mode = [\"\"]for k in dir(config):if any([k.startswith(prefix) for prefix in config_apps]):app.config.setdefault(k, getattr(config, k))if app.config['']:app.config.setdefault('', {})headers = app.config['']if headers.get('') != {}:headers.setdefault('', {})csp = headers['']if csp.get('') != []:csp.setdefault('', [])csp[''] += flask_talisman_debug_mode", "docstring": "Initialize configuration.\n\n :param app: An instance of :class:`~flask.Flask`.", "id": "f10457:c0:m2"} {"signature": "@propertydef trusted_hosts(self):", "body": "if current_app:return current_app.config.get('', None)", "docstring": "Get list of trusted hosts.", "id": "f10461:c0:m0"} {"signature": "def config_loader(app, **kwargs_config):", "body": "local_templates_path = os.path.join(app.instance_path, '')if os.path.exists(local_templates_path):app.jinja_loader = ChoiceLoader([FileSystemLoader(local_templates_path),app.jinja_loader,])app.jinja_options = dict(app.jinja_options,cache_size=,bytecode_cache=BytecodeCache(app))invenio_config_loader(app, **kwargs_config)", "docstring": "Configuration loader.\n\n Adds support for loading templates from the Flask application's instance\n folder (``/templates``).", "id": "f10462:m0"} {"signature": "def app_class():", "body": "try:pkg_resources.get_distribution('')from invenio_files_rest.app import Flask as FlaskBaseexcept pkg_resources.DistributionNotFound:from flask import Flask as FlaskBaseclass Request(TrustedHostsMixin, FlaskBase.request_class):passclass Flask(FlaskBase):request_class = Requestreturn Flask", "docstring": "Create Flask application class.\n\n Invenio-Files-REST needs to patch the Werkzeug form parsing in order to\n support streaming large file uploads. This is done by subclassing the Flask\n application class.", "id": "f10462:m1"} {"signature": "def lower(option,value):", "body": "if type(option) is str:option=option.lower()if type(value) is str:value=value.lower()return (option,value)", "docstring": "Enforces lower case options and option values where appropriate", "id": "f10465:m0"} {"signature": "def to_string(option,value):", "body": "try:value=value.__str__()except AttributeError:passreturn (option,value)", "docstring": "Converts any values to strings when appropriate", "id": "f10465:m1"} {"signature": "def to_float(option,value):", "body": "if type(value) is str:try:value=float(value)except ValueError:passreturn (option,value)", "docstring": "Converts string values to floats when appropriate", "id": "f10465:m2"} {"signature": "def to_bool(option,value):", "body": "if type(value) is str:if value.lower() == '':value=Trueelif value.lower() == '':value=Falsereturn (option,value)", "docstring": "Converts string values to booleans when appropriate", "id": "f10465:m3"} {"signature": "def __init__(self,config=None,rules=None):", "body": "self._rules = rules or []self._dict = config or {}self.enforce_rules()", "docstring": ":param config: Old configuration\n:type config: behaving like dict or Config", "id": "f10465:c0:m0"} {"signature": "def set_defaults(self,defaults):", "body": "self._set_defaults(self,defaults)", "docstring": "Set options but only if they haven't already been defined\n\n:param defaults: Options and values to be set\n:type defaults: behaving like _dict or Config", "id": "f10465:c0:m12"} {"signature": "def fork(self,name):", "body": "fork=deepcopy(self)self[name]=forkreturn fork", "docstring": "Create fork and store it in current instance", "id": "f10465:c0:m14"} {"signature": "def sub_config(self,name):", "body": "sub = Config()self[name]=subreturn sub", "docstring": "Return empty Config object and store this object in current Config", "id": "f10465:c0:m15"} {"signature": "def black_scholes(times,r,sigma,S0,d,M,dW=None):", "body": "N=len(times)times = times.flatten()p0 = np.log(S0)if dW is None:dW=np.sqrt(times[:]-times[:-])[None,:,None]*np.random.normal(size=(M,N-,d))if np.squeeze(sigma).ndim<=:dF = sigma*dWito_correction = np.squeeze(sigma**/)else:dF = np.einsum('',sigma,dW)ito_correction = np.sum(sigma**,)/drift = (r-ito_correction)*times[None,:,None]diffusion = integral(dF=dF,axis=,cumulative = True)return np.exp(p0 + drift + diffusion)", "docstring": "Return M Euler-Maruyama sample paths with N time steps of S_t, where \n dS_t = S_t*r*dt+S_t*sigma*dW_t\n S(0)=S0\n\n:rtype: M x N x d array", "id": "f10466:m0"} {"signature": "def heston(times,mu,rho,kappa,theta,xi,S0,nu0,d,M,nu_1d=True):", "body": "d_nu = if nu_1d else dnu = np.zeros((M,len(times),d_nu))S = np.zeros((M,len(times),d))nu[:,,:] = nu0S[:,,:] = S0if *kappa*theta<=xi**:raise ValueError('')test = np.std(np.diff(times.flatten())) if test>:raise ValueErrordt = times[]-times[]N = len(times)if d == :if np.array(rho).size ==:rho = np.array([[,rho],[rho,]])chol = np.linalg.cholesky(rho)dW = np.sqrt(dt)*np.einsum('',chol,np.random.normal(size=(M,N-,d+d_nu)))for i in range(,N):dt = times[i]-times[i-]nu[:,i,:] = np.abs(nu[:,i-,:] + kappa*(theta-nu[:,i-,:])*dt+xi*np.sqrt(nu[:,i-,:])*dW[:,i-,d:])S = S0*np.exp(integral(np.sqrt(nu),dF = dW[:,:,:d],axis=,cumulative = True)+integral(mu - *nu,F = times,axis=,trapez=False,cumulative = True))return np.concatenate((S,nu),axis=-)", "docstring": "Return M Euler-Maruyama sample paths with N time steps of (S_t,v_t), where\n (S_t,v_t) follows the Heston model of mathematical finance\n\n:rtype: M x N x d array", "id": "f10466:m1"} {"signature": "def fBrown(H,T,N,M,dW = None,cholesky = False):", "body": "alpha = -Htimes = np.linspace(, T, N)dt = T/(N-)if cholesky:if dW is not None:raise ValueError('')times = times[:]tdt = times/np.reshape(times,(-,))tdt[np.tril_indices(N-,-)]=cov = np.reshape(times,(-,))**(-*alpha)*(/(-alpha))*(tdt-)**(-alpha)*scipy.special.hyp2f1(alpha,-alpha,-alpha,/(-tdt))cov[,:] = np.fill_diagonal(cov,times**(-*alpha)/(-*alpha))cov[np.tril_indices(N-,-)] = cov.T[np.tril_indices(N-,-)]L = scipy.linalg.cholesky(cov)return np.concatenate((np.zeros((,M)),L.T@np.random.normal(size=(N-,M))))if dW is None:dW = np.sqrt(dt)*np.random.normal(size=(N-,M))if H == :return integral(dF = dW,cumulative = True) a = /dt/(-alpha)*((T-times[N-::-])**(-alpha)-(T-times[::-])**(-alpha))out = toeplitz_multiplication(a,np.zeros_like(a),dW[::-])[::-]out -=a[]*dWcov = np.array([[ dt**(-*alpha)/(-*alpha),dt**(-alpha)/(-alpha)],[dt**(-alpha)/(-alpha),dt]])var = cov[,]-cov[,]**/cov[,]out += cov[,]/cov[,]*dW out += np.sqrt(var)*np.random.normal(size = (N-,M))out = np.concatenate((np.zeros((,M)),out))return out", "docstring": "Sample fractional Brownian motion with differentiability index H \non interval [0,T] (H=1/2 yields standard Brownian motion)\n\n:param H: Differentiability, larger than 0\n:param T: Final time\n:param N: Number of time steps\n:param M: Number of samples\n:param dW: Driving noise, optional", "id": "f10466:m2"} {"signature": "def r_bergomi(H,T,eta,xi,rho,S0,r,N,M,dW=None,dW_orth=None,cholesky = False,return_v=False):", "body": "times = np.linspace(, T, N)dt = T/(N-)times = np.reshape(times,(-,))if dW is None:dW = np.sqrt(dt)*np.random.normal(size=(N-,M))if dW_orth is None:dW_orth = np.sqrt(dt)*np.random.normal(size=(N-,M))dZ = rho*dW+np.sqrt(-rho**)*dW_orthY = eta*np.sqrt(*H)*fBrown(H,T,N,M,dW =dW,cholesky = cholesky)v = xi*np.exp(Y-*(eta**)*times**(*H))S = S0*np.exp(integral(np.sqrt(v),dF = dZ,axis=,cumulative = True)+integral(r - *v,F = times,axis=,trapez=False,cumulative = True))if return_v:return np.array([S,v]).Telse:return np.array([S]).T", "docstring": "Return M Euler-Maruyama sample paths with N time steps of (S_t,v_t), where\n (S_t,v_t) follows the rBergomi model of mathematical finance\n\n:rtype: M x N x d array", "id": "f10466:m3"} {"signature": "def path_from_keywords(keywords,into=''):", "body": "subdirs = []def prepare_string(s):s = str(s)s = re.sub(''+f\"\",'',s)if into=='':s = s.replace('', '')if '' in s:s = s.title()s = s.replace('','')return sif isinstance(keywords,set):keywords_list = sorted(keywords)for property in keywords_list:subdirs.append(prepare_string(property))else:keywords_list = sorted(keywords.items())for property,value in keywords_list: if Bool.valid(value):subdirs.append(('' if value else ('' if into=='' else ''))+prepare_string(property))elif (Float|Integer).valid(value):subdirs.append(''.format(prepare_string(property),prepare_string(value)))else:subdirs.append(''.format(prepare_string(property),'' if into == '' else '',prepare_string(value)))if into == '':out = os.path.join(*subdirs)else:out = ''.join(subdirs)return out", "docstring": "turns keyword pairs into path or filename \n\nif `into=='path'`, then keywords are separted by underscores, else keywords are used to create a directory hierarchy", "id": "f10481:m0"} {"signature": "def find_files(pattern, path=None,match_name=False):", "body": "if not path:path = os.getcwd()result = []for root, __, files in os.walk(path):for name in files:if fnmatch.fnmatch(name if match_name else os.path.join(root,name),pattern):result.append(os.path.join(root, name))return result", "docstring": "https://stackoverflow.com/questions/1724693/find-a-file-in-python\n\nWARNING: pattern is by default matched to entire path not to file names", "id": "f10481:m2"} {"signature": "def find_directories(pattern, path=None,match_name=False):", "body": "if not path:path = os.getcwd()result = []for root, __, __ in os.walk(path):match_against = os.path.basename(root) if match_name else roottry:does_match = pattern.match(match_against)except AttributeError:does_match = fnmatch.fnmatch(match_against,pattern)if does_match:result.append(root)return result", "docstring": "WARNING: pattern is matched to entire path, not directory names, unless\nmatch_name = True", "id": "f10481:m3"} {"signature": "def zip_dir(zip_name, source_dir,rename_source_dir=False):", "body": "src_path = Path(source_dir).expanduser().resolve()with ZipFile(zip_name, '', ZIP_DEFLATED) as zf:for file in src_path.rglob(''):path_in_zip = str(file.relative_to(src_path.parent))if rename_source_dir != False:_,tail = path_in_zip.split(os.sep,)path_in_zip=os.sep.join([rename_source_dir,tail])zf.write(str(file.resolve()), path_in_zip)", "docstring": "https://stackoverflow.com/questions/1855095/how-to-create-a-zip-archive-of-a-directory", "id": "f10481:m4"} {"signature": "def unique(seq):", "body": "has = []return [x for x in seq if not (x in has or has.append(x))]", "docstring": "https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-whilst-preserving-order", "id": "f10482:m0"} {"signature": "def __init__(self, init_dict=None):", "body": "dict.__init__(self)if init_dict:for key in init_dict:self[key] = init_dict[key]", "docstring": ":param init_dict: Initial state of function\n:type init_dict: Dictionary whos values support addition and scalar multiplication", "id": "f10482:c0:m0"} {"signature": "def expand_domain(self, X):", "body": "for x in X:self[x] = None", "docstring": "Expand domain\n\n:param X: New elements of domain\n:type X: Iterable", "id": "f10482:c0:m1"} {"signature": "def __add__(self, other):", "body": "F = RFunction()for key in self.keys():F[key] = self[key]for key in other.keys():if key in F.keys():F[key] += other[key]else:F[key] = other[key]return F", "docstring": "Vector space operation: Add two real-valued functions", "id": "f10482:c0:m2"} {"signature": "def __radd__(self, other):", "body": "if other == :return selfelse: return self.__add__(other)", "docstring": "When iterables of functions are added, the first function is added to 0\nusing __radd__", "id": "f10482:c0:m3"} {"signature": "def __rmul__(self, other):", "body": "F = RFunction()for key in self.keys():F[key] = other * self[key]return F", "docstring": "Vector space operation: Multiply real-valued function with real", "id": "f10482:c0:m5"} {"signature": "def __init__(self, default):", "body": "self.default = defaultsuper().__init__()", "docstring": ":param default: Default values for unknown keys\n:type default: Function", "id": "f10482:c3:m0"} {"signature": "def validate(arg, spec):", "body": "rejection_subreason = Noneif spec is None:return argtry:return spec._validate(arg)except Exception as e:rejection_subreason = etry:lenience = spec.lenienceexcept AttributeError:passelse:for level in range(, lenience + ):temp = Nonetry:temp = spec.forgive(arg=arg, level=level)except Exception:pass if temp is not None and temp is not arg:arg = temptry:return spec._validate(arg)except Exception as e:rejection_subreason = erejection_reason = ''.format(arg, spec)rejection_subreason = ''.format(rejection_subreason.__class__.__name__, rejection_subreason) if rejection_subreason is not None else ''raise ValidationError(rejection_reason + rejection_subreason)", "docstring": "Make sure `arg` adheres to specification\n\n:param arg: Anything\n:param spec: Specification\n:type spec: Specification\n\n:return: Validated object", "id": "f10483:m0"} {"signature": "def _validate_many(args, specs, defaults,passed_conditions,value_conditions,allow_unknowns,unknowns_spec):", "body": "validated_args = builtins.dict() passed_but_not_specified = set(args.keys()) - set(specs.keys())if passed_but_not_specified:if not allow_unknowns:raise ValueError(('' + ''.format(passed_but_not_specified)))else:for arg in passed_but_not_specified:if unknowns_spec is not None:specs[arg] = unknowns_specif passed_conditions:validate(args, Dict(passed_conditions=passed_conditions))for arg in specs:if (not arg in args) or NotPassed(args[arg]):if arg in defaults:if isinstance(defaults[arg],DefaultGenerator):validated_args[arg] = defaults[arg]()else:validated_args[arg] = defaults[arg]else:validated_args[arg] = NotPassedelse:validated_args[arg] = validate(args[arg], specs[arg])if value_conditions:validated_args = validate(validated_args, value_conditions)return validated_args", "docstring": "Similar to validate but validates multiple objects at once, each with their own specification. \n\nFill objects that were specified but not provided with NotPassed or default values\nApply `value_condition` to object dictionary as a whole", "id": "f10483:m1"} {"signature": "def integral(A=None,dF=None,F=None,axis = ,trapez = False,cumulative = False):", "body": "ndim = max(v.ndim for v in (A,dF,F) if v is not None)def broadcast(x):new_shape = []*ndimnew_shape[axis] = -return np.reshape(x,new_shape)if F is not None:assert(dF is None)if F.ndimF = broadcast(F)N = F.shape[axis]dF = F.take(indices = range(,N),axis = axis)-F.take(indices = range(N-),axis = axis)elif dF is not None:if dF.ndimdF = broadcast(dF)N = dF.shape[axis]+else:if A.ndimA = broadcast(A)N = A.shape[axis]if A is not None:if trapez:midA = (A.take(indices = range(,N),axis = axis)+A.take(indices = range(N-),axis = axis))/else:midA = A.take(indices=range(N-),axis=axis)if dF is not None:dY = midA*dFelse:dY = midAelse:dY = dFpad_shape = list(dY.shape)pad_shape[axis] = pad = np.zeros(pad_shape)if cumulative:return np.concatenate((pad,np.cumsum(dY,axis = axis)),axis = axis)else:return np.sum(dY,axis = axis)", "docstring": "Turns an array A of length N (the function values in N points)\nand an array dF of length N-1 (the masses of the N-1 intervals)\ninto an array of length N (the integral \\int A dF at N points, with first entry 0)\n\n:param A: Integrand (optional, default ones, length N)\n:param dF: Integrator (optional, default ones, length N-1)\n:param F: Alternative to dF (optional, length N)\n:param trapez: Use trapezoidal rule (else left point)", "id": "f10484:m6"} {"signature": "def toeplitz_multiplication(a,b,v):", "body": "a = np.reshape(a,(-))b = np.reshape(b,(-))n = len(a)c = np.concatenate((a[[]],b[:],np.zeros(),a[-::-]))p = ifft(fft(c)*fft(v.T,n=*n)).Tif np.all(np.isreal(a)) and np.all(np.isreal(b)) and np.all(np.isreal(v)):return np.real(p[:n])else:return p[:n]", "docstring": "Multiply Toeplitz matrix with first row a and first column b with vector v\n\nNormal matrix multiplication would require storage and runtime O(n^2);\nembedding into a circulant matrix and using FFT yields O(log(n)n)", "id": "f10484:m7"} {"signature": "def grid_evaluation(X, Y, f,vectorized=True):", "body": "XX = np.reshape(np.concatenate([X[..., None], Y[..., None]], axis=), (X.size, ), order='')if vectorized:ZZ = f(XX)else:ZZ = np.array([f(x) for x in XX])return np.reshape(ZZ, X.shape, order='')", "docstring": "Evaluate function on given grid and return values in grid format\n\nAssume X and Y are 2-dimensional arrays containing x and y coordinates, \nrespectively, of a two-dimensional grid, and f is a function that takes\n1-d arrays with two entries. This function evaluates f on the grid points\ndescribed by X and Y and returns another 2-dimensional array of the shape \nof X and Y that contains the values of f.\n\n:param X: 2-dimensional array of x-coordinates\n:param Y: 2-dimensional array of y-coordinates\n:param f: function to be evaluated on grid\n:param vectorized: `f` can handle arrays of inputs\n:return: 2-dimensional array of values of f", "id": "f10484:m8"} {"signature": "def orthonormal_complement_basis(v:NDim()):", "body": "_, _, V = np.linalg.svd(np.array([v]))return V[:]", "docstring": "Return orthonormal basis of complement of vector.\n\n:param v: 1-dimensional numpy array \n:return: Matrix whose .dot() computes coefficients w.r.t. an orthonormal basis of the complement of v \n (i.e. whose row vectors form an orthonormal basis of the complement of v)", "id": "f10484:m10"} {"signature": "def weighted_median(values, weights):", "body": "if len(values) == :return values[]if len(values) == :raise ValueError('')values = [float(value) for value in values]indices_sorted = np.argsort(values)values = [values[ind] for ind in indices_sorted]weights = [weights[ind] for ind in indices_sorted]total_weight = sum(weights)below_weight = i = -while below_weight < total_weight / :i += below_weight += weights[i]return values[i]", "docstring": "Returns element such that sum of weights below and above are (roughly) equal\n\n:param values: Values whose median is sought\n:type values: List of reals\n:param weights: Weights of each value\n:type weights: List of positive reals\n:return: value of weighted median\n:rtype: Real", "id": "f10484:m11"} {"signature": "def __init__(self, wrapped, tight=False):", "body": "self.__tight = tightself.__wrapped = array(wrapped) if tight else wrappedself.__hash = int(sha1(wrapped.view(uint8)).hexdigest(), )", "docstring": "r'''Creates a new hashable object encapsulating an ndarray.\n\n wrapped\n The wrapped ndarray.\n\n tight\n Optional. If True, a copy of the input ndaray is created.\n Defaults to False.", "id": "f10484:c0:m0"} {"signature": "def unwrap(self):", "body": "if self.__tight:return array(self.__wrapped)return self.__wrapped", "docstring": "r'''Returns the encapsulated ndarray.\n\n If the wrapper is \"tight\", a copy of the encapsulated ndarray is\n returned. Otherwise, the encapsulated ndarray itself is returned.", "id": "f10484:c0:m3"} {"signature": "def snooze(value):", "body": "for i in range(int( * value)):_ = ** (i / value)", "docstring": "time.sleep() substitute\nKeep busy for some time (very roughly and depending on machine, `value` is in ms)\n\n:param value: Time\n:type value: Number", "id": "f10486:m0"} {"signature": "@validate_args(warnings=False)def EasyHPC(backend:In('', '')|Function='',n_tasks:In('', '', '', '')='',n_results:In('', '')='',aux_output:Bool=True, reduce:Function=None,split_job=NotPassed,parallel = True,method = None,pool = None):", "body": "self = argparse.Namespace()direct_call = (~String&Function).valid(backend)if direct_call:f = backendbackend = ''if backend == '': self.processor = _MPI_processorself.finalizer = _MPI_finalizerif backend == '':self.processor = _MP_processorself.finalizer = Noneself.info = argparse.Namespace()self.info.n_tasks = n_tasksself.info.n_results = n_resultsself.info.parallel = parallelself.info.reduce = reduceself.info.wrap_MPI = Falseself.info.aux_output = aux_output self.info.method = methodself.info.pool = pool or Pool()self.info.split_job = split_jobif self.info.n_tasks == '':if self.info.n_results == '':raise ValueError('')if NotPassed(self.info.split_job):raise ValueError('')if direct_call:def _lam(*args,**kwargs):return _MultiProcessorWrapper_call(args,kwargs,f,self.processor,self.finalizer,self.info)return _lamreturn lambda f: _easy_hpc_call(f,self)", "docstring": ":param n_tasks: How many tasks does the decorated function handle? \n:param n_results: If the decorated function handles many tasks at once, are the results reduced (n_results = 'one') or not (as many results as tasks)?\n:param reduce: Function that reduces multiple outputs to a single output\n:param splitjob: Function that converts an input (to the decorated function) that represents one large job to two smaller jobs\n\nNOTE: don't turn this into a class, you'll run into strange pickling errors", "id": "f10489:m1"} {"signature": "def plot_indices(mis, dims=None, weights=None, groups=,legend = True,index_labels=None, colors = None,axis_labels = None,size_exponent=,ax=None):", "body": "if weights is None:weights = {mi: for mi in mis}if Function.valid(weights):weights = {mi:weights(mi) for mi in mis}values = list(weights.values())if Integer.valid(groups):N_g = groupsgroups = [[mi for mi in mis if (weights[mi] > np.percentile(values, /groups*g) or g==) and weights[mi] <= np.percentile(values, /groups*(g+))] for g in range(N_g)]group_names = [''.format(/N_g*(N_g-i-),/N_g*(N_g-i)) for i in reversed(range(N_g))]else:if Function.valid(groups):groups = {mi:groups(mi) for mi in mis}group_names = unique(list(groups.values()))groups = [[mi for mi in mis if groups[mi]==name] for name in group_names]N_g = len(group_names)if colors is None: colors = matplotlib.cm.rainbow(np.linspace(, , N_g)) if Dict.valid(mis):if index_labels is None or weights is None:temp = list(mis.keys())if (List|Tuple).valid(temp[]):if not (index_labels is None and weights is None):raise ValueError('')weights = {mi:mis[mi][] for mi in mis}index_labels= {mi:mis[mi][] for mi in mis}else:if weights is None:weights = miselse:index_labels = mismis = tempelse:raise ValueError('')if dims is None:try:dims = len(mis[])except TypeError:dims = sorted(list(set.union(*(set(mi.active_dims()) for mi in mis)))) if len(dims) > :raise ValueError('')if len(dims) < :warnings.warn('')returnif ax is None:fig = plt.figure() if len(dims) == :ax = fig.gca(projection='')else:ax = fig.gca()size_function = lambda mi: sum([weights[mi2] for mi2 in mis if mi.equal_mod(mi2, lambda dim: dim not in dims)]) sizes = {mi: np.power(size_function(mi), size_exponent) for mi in mis}for i,plot_indices in enumerate(groups):X = np.array([mi[dims[]] for mi in plot_indices])if len(dims) > :Y = np.array([mi[dims[]] for mi in plot_indices])else:Y = np.array([ for mi in plot_indices])if len(dims) > :Z = np.array([mi[dims[]] for mi in plot_indices])else:Z = np.array([ for mi in plot_indices]) sizes_plot = np.array([sizes[mi] for mi in plot_indices])if weights:if len(dims) == :ax.scatter(X, Y, Z, s = * sizes_plot / max(sizes.values()), color=colors[i], alpha=) else:ax.scatter(X, Y, s = * sizes_plot / max(sizes.values()), color=colors[i], alpha=)else:if len(dims) == :ax.scatter(X, Y, Z,color = colors[i],alpha=)else:ax.scatter(X, Y,color=colors[i],alpha=)if True:if len(dims)==:axs=''else:axs=''extents = np.array([getattr(ax, ''.format(dim))() for dim in axs])sz = extents[:,] - extents[:,]maxsize = max(abs(sz))for dim in axs:getattr(ax, ''.format(dim))(, maxsize)if axis_labels is not None:ax.set_xlabel(axis_labels[])if len(dims)>:ax.set_ylabel(axis_labels[])if len(dims)>:ax.set_zlabel(axis_labels[])else:ax.set_xlabel('' + str(dims[])+'',size=)if len(dims) > :ax.set_ylabel('' + str(dims[])+'',size=)if len(dims) > :ax.set_zlabel('' + str(dims[])+'',size=)plt.grid()x_coordinates = [mi[dims[]] for mi in mis]xticks=list(range(min(x_coordinates),max(x_coordinates)+))ax.set_xticks(xticks)if len(dims)>:y_coordinates = [mi[dims[]] for mi in mis]ax.set_yticks(list(range(min(y_coordinates),max(y_coordinates)+)))if len(dims)>:z_coordinates = [mi[dims[]] for mi in mis]ax.set_zticks(list(range(min(z_coordinates),max(z_coordinates)+)))if index_labels:for mi in index_labels:ax.annotate(''.format(index_labels[mi]),xy=(mi[],mi[]))if legend and len(group_names)>:ax.legend([patches.Patch(color=color) for color in np.flipud(colors)],group_names)return ax", "docstring": "Plot multi-index set\n\n:param mis: Multi-index set\n:type mis: Iterable of SparseIndices\n:param dims: Which dimensions to use for plotting\n:type dims: List of integers.\n:param weights: Weights associated with each multi-index\n:type weights: Dictionary\n:param quantiles: Number of groups plotted in different colors\n:type quantiles: Integer>=1 or list of colors\n\nTODO: exchange index_labels and dims, exchange quantiles and dims", "id": "f10490:m1"} {"signature": "def ezplot(f,xlim,ylim=None,ax = None,vectorized=True,N=None,contour = False,args=None,kwargs=None,dry_run=False,show=None,include_endpoints=False):", "body": "kwargs = kwargs or {}args = args or []d = if ylim is None else if ax is None:fig = plt.figure()show = show if show is not None else Trueax = fig.gca() if (d== or contour) else fig.gca(projection='')if d == :if N is None:N = if include_endpoints:X = np.linspace(xlim[],xlim[],N)else:L = xlim[] - xlim[]X = np.linspace(xlim[] + L / N, xlim[] - L / N, N)X = X.reshape((-, ))if vectorized:Z = f(X)else:Z = np.array([f(x) for x in X])if not dry_run:C = ax.plot(X, Z,*args,**kwargs)elif d == :if N is None:N = T = np.zeros((N, ))if include_endpoints:T[:,]=np.linspace(xlim[],xlim[],N)T[:,]=np.linspace(ylim[],ylim[],N)else:L = xlim[] - xlim[]T[:, ] = np.linspace(xlim[] + L / N, xlim[] - L / N, N) L = ylim[] - ylim[]T[:, ] = np.linspace(ylim[] + L / N, ylim[] - L / N, N) X, Y = meshgrid(T[:, ], T[:, ])Z = grid_evaluation(X, Y, f,vectorized=vectorized)if contour:if not dry_run:N=colors=np.concatenate((np.ones((N,)),np.tile(np.linspace(,,N).reshape(-,),(,))),axis=)colors = [ [,,],*colors,[,,]]print('',np.max(Z[:]))C = ax.contourf(X,Y,Z,levels = [-np.inf,*np.linspace(-,,N),np.inf],colors=colors)else:if not dry_run:C = ax.plot_surface(X, Y, Z)if show:plt.show()return ax,C,Z", "docstring": "Plot polynomial approximation.\n\n:param vectorized: `f` can handle an array of inputs", "id": "f10490:m2"} {"signature": "def plot3D(X, Y, Z):", "body": "fig = plt.figure()ax = Axes3D(fig)light = LightSource(, )illuminated_surface = light.shade(Z, cmap=cm.coolwarm) Xmin = np.amin(X)Xmax = np.amax(X)Ymin = np.amin(Y)Ymax = np.amax(Y)Zmin = np.amin(Z)Zmax = np.amax(Z)ax.contourf(X, Y, Z, zdir='', offset=Xmin - * (Xmax - Xmin), cmap=cm.coolwarm, alpha=) ax.contourf(X, Y, Z, zdir='', offset=Ymax + * (Ymax - Ymin), cmap=cm.coolwarm, alpha=) ax.contourf(X, Y, Z, zdir='', offset=Zmin - * (Zmax - Zmin), cmap=cm.coolwarm, alpha=) ax.plot_surface(X, Y, Z, cstride=, rstride=, facecolors=illuminated_surface, alpha=)plt.show()", "docstring": "Surface plot.\n\nGenerate X and Y using, for example\n X,Y = np.mgrid[0:1:50j, 0:1:50j]\n or\n X,Y= np.meshgrid([0,1,2],[1,2,3]).\n\n:param X: 2D-Array of x-coordinates\n:param Y: 2D-Array of y-coordinates\n:param Z: 2D-Array of z-coordinates", "id": "f10490:m3"} {"signature": "def plot_convergence(times, values, name=None, title=None, reference='', convergence_type='', expect_residuals=None,expect_times=None, plot_rate='', base = np.exp(),xlabel = '', p=, preasymptotics=True, stagnation=False, marker='',legend='',relative = False,ax = None):", "body": "name = name or ''self_reference = (isinstance(reference,str) and reference=='') ax = ax or plt.gca()color = next(ax._get_lines.prop_cycler)['']ax.tick_params(labeltop=False, labelright=True, right=True, which='')ax.yaxis.grid(which=\"\", linestyle='', alpha=)ax.yaxis.grid(which=\"\", linestyle='', alpha=)c_ticks = ACCEPT_MISFIT = values, times = np.squeeze(values), np.squeeze(times)assert(times.ndim == )assert(len(times) == len(values))sorting = np.argsort(times)times = times[sorting]values = values[sorting]if plot_rate == True:plot_rate = ''if plot_rate !='':plot_rate = plot_rate*np.log(base)if self_reference:if len(times) <= :raise ValueError('')limit = values[-]limit_time = times[-]times = times[:-]values = values[:-]else:limit = np.squeeze(reference)limit_time = np.Infresiduals = np.zeros(len(times))N = limit.sizefor L in range(len(times)):if p < np.Inf:residuals[L] = np.power(np.sum(np.power(np.abs(values[L] - limit), p) / N), / p) else:residuals[L] = np.amax(np.abs(values[L] - limit))if relative:if presiduals /= np.power(np.sum(np.power(np.abs(limit),p)/N),/p)else:residuals /= np.amax(np.abs(limit))try:remove = np.isnan(times) | np.isinf(times) | np.isnan(residuals) | np.isinf(residuals) | (residuals == ) | ((times == ) & (convergence_type == ''))except TypeError:print(times,residuals)times = times[~remove]if sum(~remove) < ( if self_reference else ):raise ValueError('')residuals = residuals[~remove]if convergence_type == '':x = np.log(times)limit_x = np.log(limit_time)else:x = timeslimit_x = limit_timemax_x = max(x)y = np.log(residuals)try:rate, offset, min_x_fit, max_x_fit = _fit_rate(x, y, stagnation, preasymptotics, limit_x, have_rate=False if (plot_rate == '' or plot_rate is None) else plot_rate)except FitError as e:warnings.warn(str(e))plot_rate = Falserate = Noneif self_reference:if rate >= :warnings.warn('')else:real_rate = _real_rate(rate, l_bound=min_x_fit, r_bound=max_x_fit, reference_x=limit_x)if (real_rate is None or abs((real_rate - rate) / rate) >= ACCEPT_MISFIT):warnings.warn(('')+ (''.format(real_rate) if real_rate else '')+ (''.format(rate) if rate else '')) if plot_rate:name += '' if plot_rate == '' else ''if convergence_type == '':name+=''.format(rate) else:base_rate = rate/np.log(base)base_rate_str = f''if base_rate_str=='':base_rate_str=''if base_rate_str =='':base_rate_str = ''name+=f''if convergence_type == '':X = np.linspace(np.exp(min_x_fit), np.exp(max_x_fit), c_ticks)ax.loglog(X, np.exp(offset) * X ** rate, '', color=color)else:X = np.linspace(min_x_fit, max_x_fit, c_ticks)ax.semilogy(X, np.exp(offset + rate * X), '', color=color)max_x_data = max_xkeep_1 = (x <= max_x_data)if convergence_type == '':ax.loglog(np.array(times)[keep_1], np.array(residuals)[keep_1], label=name, marker=marker, color=color)ax.loglog(np.array(times), np.array(residuals), marker=marker, color=color, alpha=)else:ax.semilogy(np.array(times)[keep_1], np.array(residuals)[keep_1], label=name, marker=marker, color=color)ax.semilogy(np.array(times), np.array(residuals), marker=marker, color=color, alpha=)if expect_times is not None and expect_residuals is not None:ax.loglog(expect_times, expect_residuals, '', marker=marker, color=color) if name:ax.legend(loc=legend)if title:ax.set_title(title)return rate", "docstring": "Show loglog or semilogy convergence plot.\n\nSpecify :code:`reference` if exact limit is known. Otherwise limit is \ntaken to be last entry of :code:`values`.\n\nDistance to limit is computed as RMSE (or analogous p-norm if p is specified)\n\nSpecify either :code:`plot_rate`(pass number or 'fit') or \n:code:`expect_residuals` and :code:`expect_times` to add a second plot with\nthe expected convergence.\n\n:param times: Runtimes\n:type times: List of positive numbers\n:param values: Outputs\n:type values: List of arrays\n:param reference: Exact solution, or 'self' if not available\n:type reference: Array or 'self'\n:param convergence_type: Convergence type\n:type convergence_type: 'algebraic' or 'exponential'\n:param expect_residuals: Expected residuals\n:type expect_residuals: List of positive numbers\n:param expect_times: Expected runtimes\n:type expect_times: List of positive numbers\n:param plot_rate: Expected convergence order\n:type plot_rate: Real or 'fit'\n:param preasymptotics: Ignore initial entries for rate fitting\n:type preasymptotics: Boolean\n:param stagnation: Ignore final entries from rate fitting\n:type stagnation: Boolean\n:param marker: Marker for data points\n:type marker: Matplotlib marker string\n:return: fitted convergence order", "id": "f10490:m5"} {"signature": "def log_calls(function):", "body": "def wrapper(self,*args,**kwargs): self.log.log(group=function.__name__,message='') function(self,*args,**kwargs)self.log.log(group=function.__name__,message='') return wrapper", "docstring": "Decorator that logs function calls in their self.log", "id": "f10492:m0"} {"signature": "def add_runtime(function):", "body": "def wrapper(*args,**kwargs): pr=cProfile.Profile()pr.enable()output = function(*args,**kwargs)pr.disable()return pr,outputreturn wrapper", "docstring": "Decorator that adds a runtime profile object to the output", "id": "f10492:m1"} {"signature": "def print_memory(function):", "body": "import memory_profilerdef wrapper(*args,**kwargs):m = StringIO()temp_func = memory_profiler.profile(func = function,stream=m,precision=)output = temp_func(*args,**kwargs)print(m.getvalue())m.close()return outputreturn wrapper", "docstring": "Decorator that prints memory information at each call of the function", "id": "f10492:m2"} {"signature": "def print_profile(function):", "body": "import memory_profilerdef wrapper(*args,**kwargs):m=StringIO()pr=cProfile.Profile()pr.enable()temp_func = memory_profiler.profile(func=function,stream=m,precision=)output = temp_func(*args,**kwargs)print(m.getvalue())pr.disable()ps = pstats.Stats(pr)ps.sort_stats('').print_stats('',)m.close()return outputreturn wrapper", "docstring": "Decorator that prints memory and runtime information at each call of the function", "id": "f10492:m3"} {"signature": "def declaration(function):", "body": "function,name=_strip_function(function)if not function.__code__.co_code in [empty_function.__code__.co_code, doc_string_only_function.__code__.co_code]: raise ValueError('')def not_implemented_function(*args,**kwargs):raise ValueError(''.format(args[],name))not_implemented_function.__qualname__=not_implemented_function.__name__ return default(not_implemented_function,name=name)", "docstring": "Declare abstract function. \n\nRequires function to be empty except for docstring describing semantics.\nTo apply function, first argument must come with implementation of semantics.", "id": "f10492:m8"} {"signature": "def print_runtime(function):", "body": "def wrapper(*args,**kwargs):pr=cProfile.Profile()pr.enable()output = function(*args,**kwargs)pr.disable()ps = pstats.Stats(pr)ps.sort_stats('').print_stats()return outputreturn wrapper", "docstring": "Decorator that prints running time information at each call of the function", "id": "f10492:m9"} {"signature": "def print_peak_memory(func,stream = None):", "body": "import timeimport psutilimport osmemory_denominator=**memory_usage_refresh=def wrapper(*args,**kwargs):from multiprocessing.pool import ThreadPoolpool = ThreadPool(processes=)process = psutil.Process(os.getpid())start_mem = process.memory_info().rssdelta_mem = max_memory = async_result = pool.apply_async(func, args,kwargs)while(not async_result.ready()):current_mem = process.memory_info().rssdelta_mem = current_mem - start_memif delta_mem > max_memory:max_memory = delta_memtime.sleep(memory_usage_refresh)return_val = async_result.get() max_memory /= memory_denominatorif stream is not None:stream.write(str(max_memory))return return_valreturn wrapper", "docstring": "Print peak memory usage (in MB) of a function call\n\n:param func: Function to be called\n:param stream: Stream to write peak memory usage (defaults to stdout) \n\nhttps://stackoverflow.com/questions/9850995/tracking-maximum-memory-usage-by-a-python-function", "id": "f10492:m10"} {"signature": "def smart_range(*args):", "body": "if len(args)==:string_input = Truestring = args[].replace('','')original_args=string.split('')args = []for arg in original_args:try:args.append(ast.literal_eval(arg))except (ValueError,SyntaxError):try:args.append(eval(arg,{'':{}}))except (NameError,SyntaxError):args.append(arg)else:string_input = Falsearg_start = args[]if len(args)>:arg_step = args[]if len(args)>:raise ValueError('')else:arg_step = Nonearg_end = args[-]if String.valid(arg_start) and len(arg_start)==:range_type = ''elif all(Integer.valid(arg) for arg in args):range_type = ''else: if string_input and original_args[][] in ['','']:range_type = ''else:range_type = ''if range_type == '':start = ord(arg_start)step = (ord(arg_step)- start) if arg_step else end = ord(arg_end)out = [chr(i) for i in range(start,end+step,step)]if np.sign(step)*(ord(out[-])-end)>:del out[-]return outelif range_type == '':if string_input:if len(args)== and all('' in oa for oa in original_args):bases,exponents = zip(*[oa.split('') for oa in original_args])if len(set(bases))==:return [int(bases[])**exponent for exponent in smart_range(''.join(exponents))]start = arg_startstep = (arg_step - arg_start) if arg_step is not None else end = arg_endout = list(range(start,end+step,step))if np.sign(step)*(out[-]-end)>:del out[-]return outelif range_type == '':if len(args)== and all('' in oa for oa in original_args):bases,exponents = zip(*[oa.split('') for oa in original_args])if len(set(bases))==:return [float(bases[])**exponent for exponent in smart_range(''.join(exponents)) ]if len(args) == :raise ValueError()start = arg_startstep = arg_step - arg_startend = arg_endout = list(np.arange(start,end+*step,step))return outelif range_type == '':lopen,start = (original_args[][]==''),float(original_args[][:])end,N = original_args[].split('')end,ropen = float(end[:-]),(end[-]=='')N = ast.literal_eval(N)+lopen +ropenpoints = np.linspace(start,end,num=N)return points[lopen:len(points)-ropen]", "docstring": "smart_range(1,3,9)==[1,3,5,7,9]", "id": "f10493:m0"} {"signature": "def ld_to_dl(ld):", "body": "if ld:keys = list(ld[])dl = {key:[d[key] for d in ld] for key in keys}return dlelse:return {}", "docstring": "Convert list of dictionaries to dictionary of lists", "id": "f10493:m1"} {"signature": "def chain(*fs):", "body": "def chained(x):for f in reversed(fs):if f:x=f(x)return xreturn chained", "docstring": "Concatenate functions", "id": "f10493:m3"} {"signature": "def cmd_exists(cmd):", "body": "return shutil.which(cmd) is not None", "docstring": "Check whether given command is available on system", "id": "f10493:m6"} {"signature": "def split_list(l,N):", "body": "npmode = isinstance(l,np.ndarray)if npmode:l=list(l)g=np.concatenate((np.array([]),np.cumsum(split_integer(len(l),length=N))))s=[l[g[i]:g[i+]] for i in range(N)]if npmode:s=[np.array(sl) for sl in s]return s", "docstring": "Subdivide list into N lists", "id": "f10493:m8"} {"signature": "def random_string(length):", "body": "return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))", "docstring": "Generate alphanumerical string. Hint: Check whether module tempfile has what you want, especially when you are concerned about race conditions", "id": "f10493:m9"} {"signature": "def random_word(length,dictionary = False):", "body": "if dictionary:try:with open('') as fp:words = [word.lower()[:-] for word in fp.readlines() if re.match(''.format(''+str(length)+''),word)]return random.choice(words)except FileNotFoundError:passvowels = list('')consonants = list('')pairs = [(random.choice(consonants),random.choice(vowels)) for _ in range(length//+)] return ''.join([l for p in pairs for l in p])[:length]", "docstring": "Creates random lowercase words from dictionary or by alternating vowels and consonants\n\nThe second method chooses from 85**length words.\nThe dictionary method chooses from 3000--12000 words for 3<=length<=12\n(though this of course depends on the available dictionary)\n\n:param length: word length\n:param dictionary: Try reading from dictionary, else fall back to artificial words", "id": "f10493:m10"} {"signature": "def string_from_seconds(seconds):", "body": "td = str(timedelta(seconds = seconds))parts = td.split('')if len(parts) == :td = td+''elif len(parts) == :td = ''.join([parts[],parts[][:]])return td", "docstring": "Converts seconds into elapsed time string of form \n\n(X days(s)?,)? HH:MM:SS.YY", "id": "f10493:m11"} {"signature": "def input_with_prefill(prompt, text):", "body": "def hook():readline.insert_text(text)readline.redisplay()try:readline.set_pre_input_hook(hook)except Exception:passresult = input(prompt)try:readline.set_pre_input_hook()except Exception:passreturn result", "docstring": "https://stackoverflow.com/questions/8505163/is-it-possible-to-prefill-a-input-in-python-3s-command-line-interface", "id": "f10493:m12"} {"signature": "def is_identifier(s):", "body": "return s.isidentifier() and not keyword.iskeyword(s)", "docstring": "Check if string is valid variable name", "id": "f10493:m14"} {"signature": "def __call__(self, limits):", "body": "if any(np.isinf(limits)):return []if limits[] == limits[]:return np.array([limits[]])return self.locator.tick_values(limits[], limits[])", "docstring": "Compute breaks\n\nParameters\n----------\nlimits : tuple\n Minimum and maximum values\n\nReturns\n-------\nout : array_like\n Sequence of breaks points", "id": "f10499:c1:m1"} {"signature": "def __call__(self, limits):", "body": "if any(np.isinf(limits)):return []n = self.nbase = self.baserng = np.log(limits)/np.log(base)_min = int(np.floor(rng[]))_max = int(np.ceil(rng[]))dtype = float if _min < or _max < else intif _max == _min:return base ** _minby = int(np.floor((_max-_min)/n)) + for step in range(by, , -):breaks = base ** np.arange(_min, _max+, step=step, dtype=dtype)relevant_breaks = ((limits[] <= breaks) &(breaks <= limits[]))if np.sum(relevant_breaks) >= n-:return breaksreturn _log_sub_breaks(n=n, base=base)(limits)", "docstring": "Compute breaks\n\nParameters\n----------\nlimits : tuple\n Minimum and maximum values\n\nReturns\n-------\nout : array_like\n Sequence of breaks points", "id": "f10499:c2:m1"} {"signature": "def __call__(self, major, limits=None, n=None):", "body": "if len(major) < :return np.array([])if limits is None:limits = min_max(major)if n is None:n = self.ndiff = np.diff(major)step = diff[]if len(diff) > and all(diff == step):major = np.hstack([major[]-step,major,major[-]+step])mbreaks = []factors = np.arange(, n+)for lhs, rhs in zip(major[:-], major[:]):sep = (rhs - lhs)/(n+)mbreaks.append(lhs + factors * sep)minor = np.hstack(mbreaks)minor = minor.compress((limits[] <= minor) &(minor <= limits[]))return minor", "docstring": "Minor breaks\n\nParameters\n----------\nmajor : array_like\n Major breaks\nlimits : array_like | None\n Limits of the scale. If *array_like*, must be\n of size 2. If **None**, then the minimum and\n maximum of the major breaks are used.\nn : int\n Number of minor breaks between the major\n breaks. If **None**, then *self.n* is used.\n\nReturns\n-------\nout : array_like\n Minor beraks", "id": "f10499:c4:m1"} {"signature": "def __call__(self, major, limits=None, n=None):", "body": "if not self.trans.dataspace_is_numerical:raise TypeError(\"\"\"\")if limits is None:limits = min_max(major)if n is None:n = self.nmajor = self._extend_breaks(major)major = self.trans.inverse(major)limits = self.trans.inverse(limits)minor = minor_breaks(n)(major, limits)return self.trans.transform(minor)", "docstring": "Minor breaks for transformed scales\n\nParameters\n----------\nmajor : array_like\n Major breaks\nlimits : array_like | None\n Limits of the scale. If *array_like*, must be\n of size 2. If **None**, then the minimum and\n maximum of the major breaks are used.\nn : int\n Number of minor breaks between the major\n breaks. If **None**, then *self.n* is used.\n\nReturns\n-------\nout : array_like\n Minor breaks", "id": "f10499:c5:m1"} {"signature": "def _extend_breaks(self, major):", "body": "trans = self.transtrans = trans if isinstance(trans, type) else trans.__class__is_log = trans.__name__.startswith('')diff = np.diff(major)step = diff[]if is_log and all(diff == step):major = np.hstack([major[]-step, major, major[-]+step])return major", "docstring": "Append 2 extra breaks at either end of major\n\nIf breaks of transform space are non-equidistant,\n:func:`minor_breaks` add minor breaks beyond the first\nand last major breaks. The solutions is to extend those\nbreaks (in transformed space) before the minor break call\nis made. How the breaks depends on the type of transform.", "id": "f10499:c5:m2"} {"signature": "def __call__(self, limits):", "body": "if any(pd.isnull(x) for x in limits):return []ret = self.locator.tick_values(*limits)return [num2date(val) for val in ret]", "docstring": "Compute breaks\n\nParameters\n----------\nlimits : tuple\n Minimum and maximum :class:`datetime.datetime` values.\n\nReturns\n-------\nout : array_like\n Sequence of break points.", "id": "f10499:c6:m1"} {"signature": "def __call__(self, limits):", "body": "if any(pd.isnull(x) for x in limits):return []helper = timedelta_helper(limits)scaled_limits = helper.scaled_limits()scaled_breaks = self._breaks_func(scaled_limits)breaks = helper.numeric_to_timedelta(scaled_breaks)return breaks", "docstring": "Compute breaks\n\nParameters\n----------\nlimits : tuple\n Minimum and maximum :class:`datetime.timedelta` values.\n\nReturns\n-------\nout : array_like\n Sequence of break points.", "id": "f10499:c7:m1"} {"signature": "def best_units(self, sequence):", "body": "ts_range = self.value(max(sequence)) - self.value(min(sequence))package = self.determine_package(sequence[])if package == '':cuts = [(, ''),(, ''),(, ''),(, ''),(, ''),(, ''),(, ''),(, ''),(, '')]denomination = NANOSECONDSbase_units = ''else:cuts = [(, ''),(, ''),(, ''),(, ''),(, ''),(, ''),(, '')]denomination = SECONDSbase_units = ''for size, units in reversed(cuts):if ts_range >= size*denomination[units]:return unitsreturn base_units", "docstring": "Determine good units for representing a sequence of timedeltas", "id": "f10499:c8:m3"} {"signature": "def value(self, td):", "body": "if self.package == '':return td.valueelse:return td.total_seconds()", "docstring": "Return the numeric value representation on a timedelta", "id": "f10499:c8:m4"} {"signature": "def scaled_limits(self):", "body": "_min = self.limits[]/self.factor_max = self.limits[]/self.factorreturn _min, _max", "docstring": "Minimum and Maximum to use for computing breaks", "id": "f10499:c8:m5"} {"signature": "def timedelta_to_numeric(self, timedeltas):", "body": "return [self.to_numeric(td) for td in timedeltas]", "docstring": "Convert sequence of timedelta to numerics", "id": "f10499:c8:m6"} {"signature": "def numeric_to_timedelta(self, numerics):", "body": "if self.package == '':return [self.type(int(x*self.factor), units='')for x in numerics]else:return [self.type(seconds=x*self.factor)for x in numerics]", "docstring": "Convert sequence of numerics to timedelta", "id": "f10499:c8:m7"} {"signature": "def to_numeric(self, td):", "body": "if self.package == '':return td.value/NANOSECONDS[self.units]else:return td.total_seconds()/SECONDS[self.units]", "docstring": "Convert timedelta to a number corresponding to the\nappropriate units. The appropriate units are those\ndetermined with the object is initialised.", "id": "f10499:c8:m9"} {"signature": "def __call__(self, limits):", "body": "Q = self.Qw = self.wonly_inside = self.only_insidesimplicity_max = self.simplicity_maxdensity_max = self.density_maxcoverage_max = self.coverage_maxsimplicity = self.simplicitycoverage = self.coveragedensity = self.densitylegibility = self.legibilitylog10 = np.log10ceil = np.ceilfloor = np.floordmin, dmax = limitsif dmin > dmax:dmin, dmax = dmax, dminelif dmin == dmax:return np.array([dmin])best_score = -j = while j < float(''):for q in Q:sm = simplicity_max(q, j)if w[]*sm + w[] + w[] + w[] < best_score:j = float('')breakk = while k < float(''):dm = density_max(k)if w[]*sm + w[] + w[]*dm + w[] < best_score:breakdelta = (dmax-dmin)/(k+)/j/qz = ceil(log10(delta))while z < float(''):step = j*q*(**z)cm = coverage_max(dmin, dmax, step*(k-))if w[]*sm + w[]*cm + w[]*dm + w[] < best_score:breakmin_start = int(floor(dmax/step)*j - (k-)*j)max_start = int(ceil(dmin/step)*j)if min_start > max_start:z = z+breakfor start in range(min_start, max_start+):lmin = start * (step/j)lmax = lmin + step*(k-)lstep = steps = simplicity(q, j, lmin, lmax, lstep)c = coverage(dmin, dmax, lmin, lmax)d = density(k, dmin, dmax, lmin, lmax)l = legibility(lmin, lmax, lstep)score = w[]*s + w[]*c + w[]*d + w[]*lif (score > best_score and(not only_inside or(lmin >= dmin and lmax <= dmax))):best_score = scorebest = (lmin, lmax, lstep, q, k)z = z+k = k+j = j+try:locs = best[] + np.arange(best[])*best[]except UnboundLocalError:locs = []return locs", "docstring": "Calculate the breaks\n\nParameters\n----------\nlimits : array\n Minimum and maximum values.\n\nReturns\n-------\nout : array_like\n Sequence of break points.", "id": "f10499:c9:m8"} {"signature": "def rescale(x, to=(, ), _from=None):", "body": "if _from is None:_from = np.min(x), np.max(x)return np.interp(x, _from, to)", "docstring": "Rescale numeric vector to have specified minimum and maximum.\n\nParameters\n----------\nx : array_like | numeric\n 1D vector of values to manipulate.\nto : tuple\n output range (numeric vector of length two)\n_from : tuple\n input range (numeric vector of length two).\n If not given, is calculated from the range of x\n\nReturns\n-------\nout : array_like\n Rescaled values\n\nExamples\n--------\n>>> x = [0, 2, 4, 6, 8, 10]\n>>> rescale(x)\narray([0. , 0.2, 0.4, 0.6, 0.8, 1. ])\n>>> rescale(x, to=(0, 2))\narray([0. , 0.4, 0.8, 1.2, 1.6, 2. ])\n>>> rescale(x, to=(0, 2), _from=(0, 20))\narray([0. , 0.2, 0.4, 0.6, 0.8, 1. ])", "id": "f10500:m0"} {"signature": "def rescale_mid(x, to=(, ), _from=None, mid=):", "body": "array_like = Truetry:len(x)except TypeError:array_like = Falsex = [x]if not hasattr(x, ''):x = np.asarray(x)if _from is None:_from = np.array([np.min(x), np.max(x)])else:_from = np.asarray(_from)if (zero_range(_from) or zero_range(to)):out = np.repeat(np.mean(to), len(x))else:extent = * np.max(np.abs(_from - mid))out = (x - mid) / extent * np.diff(to) + np.mean(to)if not array_like:out = out[]return out", "docstring": "Rescale numeric vector to have specified minimum, midpoint,\nand maximum.\n\nParameters\n----------\nx : array_like | numeric\n 1D vector of values to manipulate.\nto : tuple\n output range (numeric vector of length two)\n_from : tuple\n input range (numeric vector of length two).\n If not given, is calculated from the range of x\nmid\t: numeric\n mid-point of input range\n\nReturns\n-------\nout : array_like\n Rescaled values\n\nExamples\n--------\n>>> rescale_mid([1, 2, 3], mid=1)\narray([0.5 , 0.75, 1. ])\n>>> rescale_mid([1, 2, 3], mid=2)\narray([0. , 0.5, 1. ])", "id": "f10500:m1"} {"signature": "def rescale_max(x, to=(, ), _from=None):", "body": "array_like = Truetry:len(x)except TypeError:array_like = Falsex = [x]if not hasattr(x, ''):x = np.asarray(x)if _from is None:_from = np.array([np.min(x), np.max(x)])out = x/_from[] * to[]if not array_like:out = out[]return out", "docstring": "Rescale numeric vector to have specified maximum.\n\nParameters\n----------\nx : array_like | numeric\n 1D vector of values to manipulate.\nto : tuple\n output range (numeric vector of length two)\n_from : tuple\n input range (numeric vector of length two).\n If not given, is calculated from the range of x.\n Only the 2nd (max) element is essential to the\n output.\n\nReturns\n-------\nout : array_like\n Rescaled values\n\nExamples\n--------\n>>> x = [0, 2, 4, 6, 8, 10]\n>>> rescale_max(x, (0, 3))\narray([0. , 0.6, 1.2, 1.8, 2.4, 3. ])\n\nOnly the 2nd (max) element of the parameters ``to``\nand ``_from`` are essential to the output.\n\n>>> rescale_max(x, (1, 3))\narray([0. , 0.6, 1.2, 1.8, 2.4, 3. ])\n>>> rescale_max(x, (0, 20))\narray([ 0., 4., 8., 12., 16., 20.])\n\nIf :python:`max(x) < _from[1]` then values will be\nscaled beyond the requested (:python:`to[1]`) maximum.\n\n>>> rescale_max(x, to=(1, 3), _from=(-1, 6))\narray([0., 1., 2., 3., 4., 5.])", "id": "f10500:m2"} {"signature": "def squish_infinite(x, range=(, )):", "body": "xtype = type(x)if not hasattr(x, ''):x = np.asarray(x)x[x == -np.inf] = range[]x[x == np.inf] = range[]if not isinstance(x, xtype):x = xtype(x)return x", "docstring": "Truncate infinite values to a range.\n\nParameters\n----------\nx : array_like\n Values that should have infinities squished.\nrange : tuple\n The range onto which to squish the infinites.\n Must be of size 2.\n\nReturns\n-------\nout : array_like\n Values with infinites squished.\n\nExamples\n--------\n>>> squish_infinite([0, .5, .25, np.inf, .44])\n[0.0, 0.5, 0.25, 1.0, 0.44]\n>>> squish_infinite([0, -np.inf, .5, .25, np.inf], (-10, 9))\n[0.0, -10.0, 0.5, 0.25, 9.0]", "id": "f10500:m3"} {"signature": "def squish(x, range=(, ), only_finite=True):", "body": "xtype = type(x)if not hasattr(x, ''):x = np.asarray(x)finite = np.isfinite(x) if only_finite else Truex[np.logical_and(x < range[], finite)] = range[]x[np.logical_and(x > range[], finite)] = range[]if not isinstance(x, xtype):x = xtype(x)return x", "docstring": "Squish values into range.\n\nParameters\n----------\nx : array_like\n Values that should have out of range values squished.\nrange : tuple\n The range onto which to squish the values.\nonly_finite: boolean\n When true, only squishes finite values.\n\nReturns\n-------\nout : array_like\n Values with out of range values squished.\n\nExamples\n--------\n>>> squish([-1.5, 0.2, 0.5, 0.8, 1.0, 1.2])\n[0.0, 0.2, 0.5, 0.8, 1.0, 1.0]\n\n>>> squish([-np.inf, -1.5, 0.2, 0.5, 0.8, 1.0, np.inf], only_finite=False)\n[0.0, 0.0, 0.2, 0.5, 0.8, 1.0, 1.0]", "id": "f10500:m4"} {"signature": "def censor(x, range=(, ), only_finite=True):", "body": "if not len(x):return xpy_time_types = (datetime.datetime, datetime.timedelta)np_pd_time_types = (pd.Timestamp, pd.Timedelta,np.datetime64, np.timedelta64)x0 = first_element(x)if type(x0) in py_time_types:return _censor_with(x, range, '')if not hasattr(x, '') and isinstance(x0, np_pd_time_types):return _censor_with(x, range, type(x0)(''))x_array = np.asarray(x)if pdtypes.is_number(x0) and not isinstance(x0, np.timedelta64):null = float('')elif com.is_datetime_arraylike(x_array):null = pd.Timestamp('')elif pdtypes.is_datetime64_dtype(x_array):null = np.datetime64('')elif isinstance(x0, pd.Timedelta):null = pd.Timedelta('')elif pdtypes.is_timedelta64_dtype(x_array):null = np.timedelta64('')else:raise ValueError(\"\"\"\".format(type(x0)))if only_finite:try:finite = np.isfinite(x)except TypeError:finite = np.repeat(True, len(x))else:finite = np.repeat(True, len(x))if hasattr(x, ''):outside = (x < range[]) | (x > range[])bool_idx = finite & outsidex = x.copy()x[bool_idx] = nullelse:x = [null if not range[] <= val <= range[] and f else valfor val, f in zip(x, finite)]return x", "docstring": "Convert any values outside of range to a **NULL** type object.\n\nParameters\n----------\nx : array_like\n Values to manipulate\nrange : tuple\n (min, max) giving desired output range\nonly_finite : bool\n If True (the default), will only modify\n finite values.\n\nReturns\n-------\nx : array_like\n Censored array\n\nExamples\n--------\n>>> a = [1, 2, np.inf, 3, 4, -np.inf, 5]\n>>> censor(a, (0, 10))\n[1, 2, inf, 3, 4, -inf, 5]\n>>> censor(a, (0, 10), False)\n[1, 2, nan, 3, 4, nan, 5]\n>>> censor(a, (2, 4))\n[nan, 2, inf, 3, 4, -inf, nan]\n\nNotes\n-----\nAll values in ``x`` should be of the same type. ``only_finite`` parameter\nis not considered for Datetime and Timedelta types.\n\nThe **NULL** type object depends on the type of values in **x**.\n\n- :class:`float` - :py:`float('nan')`\n- :class:`int` - :py:`float('nan')`\n- :class:`datetime.datetime` : :py:`np.datetime64(NaT)`\n- :class:`datetime.timedelta` : :py:`np.timedelta64(NaT)`", "id": "f10500:m5"} {"signature": "def _censor_with(x, range, value=None):", "body": "return [val if range[] <= val <= range[] else valuefor val in x]", "docstring": "Censor any values outside of range with ``None``", "id": "f10500:m6"} {"signature": "def zero_range(x, tol=np.finfo(float).eps * ):", "body": "try:if len(x) == :return Trueexcept TypeError:return Trueif len(x) != :raise ValueError('')x = tuple(x)if isinstance(x[], (pd.Timestamp, datetime.datetime)):x = date2num(x)elif isinstance(x[], np.datetime64):return x[] == x[]elif isinstance(x[], (pd.Timedelta, datetime.timedelta)):x = x[].total_seconds(), x[].total_seconds()elif isinstance(x[], np.timedelta64):return x[] == x[]elif not isinstance(x[], (float, int, np.number)):raise TypeError(\"\"\"\".format(type(x[])))if any(np.isnan(x)):return np.nanif x[] == x[]:return Trueif all(np.isinf(x)):return Falsem = np.abs(x).min()if m == :return Falsereturn np.abs((x[] - x[]) / m) < tol", "docstring": "Determine if range of vector is close to zero.\n\nParameters\n----------\nx : array_like | numeric\n Value(s) to check. If it is an array_like, it\n should be of length 2.\ntol : float\n Tolerance. Default tolerance is the `machine epsilon`_\n times :math:`10^2`.\n\nReturns\n-------\nout : bool\n Whether ``x`` has zero range.\n\nExamples\n--------\n>>> zero_range([1, 1])\nTrue\n>>> zero_range([1, 2])\nFalse\n>>> zero_range([1, 2], tol=2)\nTrue\n\n.. _machine epsilon: https://en.wikipedia.org/wiki/Machine_epsilon", "id": "f10500:m7"} {"signature": "def expand_range(range, mul=, add=, zero_width=):", "body": "x = rangetry:x[]except TypeError:x = (x, x)if zero_range(x):new = x[]-zero_width/, x[]+zero_width/else:dx = (x[] - x[]) * mul + addnew = x[]-dx, x[]+dxreturn new", "docstring": "Expand a range with a multiplicative or additive constant\n\nParameters\n----------\nrange : tuple\n Range of data. Size 2.\nmul : int | float\n Multiplicative constant\nadd : int | float | timedelta\n Additive constant\nzero_width : int | float | timedelta\n Distance to use if range has zero width\n\nReturns\n-------\nout : tuple\n Expanded range\n\nExamples\n--------\n>>> expand_range((3, 8))\n(3, 8)\n>>> expand_range((0, 10), mul=0.1)\n(-1.0, 11.0)\n>>> expand_range((0, 10), add=2)\n(-2, 12)\n>>> expand_range((0, 10), mul=.1, add=2)\n(-3.0, 13.0)\n>>> expand_range((0, 1))\n(0, 1)\n\nWhen the range has zero width\n\n>>> expand_range((5, 5))\n(4.5, 5.5)\n\nNotes\n-----\nIf expanding *datetime* or *timedelta* types, **add** and\n**zero_width** must be suitable *timedeltas* i.e. You should\nnot mix types between **Numpy**, **Pandas** and the\n:mod:`datetime` module.\n\nIn Python 2, you cannot multiplicative constant **mul** cannot be\na :class:`float`.", "id": "f10500:m8"} {"signature": "def expand_range_distinct(range, expand=(, , , ), zero_width=):", "body": "if len(expand) == :expand = tuple(expand) * lower = expand_range(range, expand[], expand[], zero_width)[]upper = expand_range(range, expand[], expand[], zero_width)[]return (lower, upper)", "docstring": "Expand a range with a multiplicative or additive constants\n\nSimilar to :func:`expand_range` but both sides of the range\nexpanded using different constants\n\nParameters\n----------\nrange : tuple\n Range of data. Size 2\nexpand : tuple\n Length 2 or 4. If length is 2, then the same constants\n are used for both sides. If length is 4 then the first\n two are are the Multiplicative (*mul*) and Additive (*add*)\n constants for the lower limit, and the second two are\n the constants for the upper limit.\nzero_width : int | float | timedelta\n Distance to use if range has zero width\n\nReturns\n-------\nout : tuple\n Expanded range\n\nExamples\n--------\n>>> expand_range_distinct((3, 8))\n(3, 8)\n>>> expand_range_distinct((0, 10), (0.1, 0))\n(-1.0, 11.0)\n>>> expand_range_distinct((0, 10), (0.1, 0, 0.1, 0))\n(-1.0, 11.0)\n>>> expand_range_distinct((0, 10), (0.1, 0, 0, 0))\n(-1.0, 10)\n>>> expand_range_distinct((0, 10), (0, 2))\n(-2, 12)\n>>> expand_range_distinct((0, 10), (0, 2, 0, 2))\n(-2, 12)\n>>> expand_range_distinct((0, 10), (0, 0, 0, 2))\n(0, 12)\n>>> expand_range_distinct((0, 10), (.1, 2))\n(-3.0, 13.0)\n>>> expand_range_distinct((0, 10), (.1, 2, .1, 2))\n(-3.0, 13.0)\n>>> expand_range_distinct((0, 10), (0, 0, .1, 2))\n(0, 13.0)", "id": "f10500:m9"} {"signature": "def trans_new(name, transform, inverse, breaks=None,minor_breaks=None, _format=None,domain=(-np.inf, np.inf), doc='', **kwargs):", "body": "def _get(func):if isinstance(func, (classmethod, staticmethod, MethodType)):return funcelse:return staticmethod(func)klass_name = ''.format(name)d = {'': _get(transform),'': _get(inverse),'': domain,'': doc,**kwargs}if breaks:d[''] = _get(breaks)if minor_breaks:d[''] = _get(minor_breaks)if _format:d[''] = _get(_format)return type(klass_name, (trans,), d)", "docstring": "Create a transformation class object\n\nParameters\n----------\nname : str\n Name of the transformation\ntransform : callable ``f(x)``\n A function (preferably a `ufunc`) that computes\n the transformation.\ninverse : callable ``f(x)``\n A function (preferably a `ufunc`) that computes\n the inverse of the transformation.\nbreaks : callable ``f(limits)``\n Function to compute the breaks for this transform.\n If None, then a default good enough for a linear\n domain is used.\nminor_breaks : callable ``f(major, limits)``\n Function to compute the minor breaks for this\n transform. If None, then a default good enough for\n a linear domain is used.\n_format : callable ``f(breaks)``\n Function to format the generated breaks.\ndomain : array_like\n Domain over which the transformation is valid.\n It should be of length 2.\ndoc : str\n Docstring for the class.\n**kwargs : dict\n Attributes of the transform, e.g if base is passed\n in kwargs, then `t.base` would be a valied attribute.\n\nReturns\n-------\nout : trans\n Transform class", "id": "f10509:m0"} {"signature": "def log_trans(base=None, **kwargs):", "body": "if base is None:name = ''base = np.exp()transform = np.logelif base == :name = ''transform = np.log10elif base == :name = ''transform = np.log2else:name = ''.format(base)def transform(x):return np.log(x)/np.log(base)def inverse(x):try:return base ** xexcept TypeError:return [base**val for val in x]if '' not in kwargs:kwargs[''] = (sys.float_info.min, np.inf)if '' not in kwargs:kwargs[''] = log_breaks(base=base)kwargs[''] = basekwargs[''] = log_format(base)_trans = trans_new(name, transform, inverse, **kwargs)if '' not in kwargs:n = int(base) - _trans.minor_breaks = trans_minor_breaks(_trans, n=n)return _trans", "docstring": "Create a log transform class for *base*\n\nParameters\n----------\nbase : float\n Base for the logarithm. If None, then\n the natural log is used.\nkwargs : dict\n Keyword arguments passed onto\n :func:`trans_new`. Should not include\n the `transform` or `inverse`.\n\nReturns\n-------\nout : type\n Log transform class", "id": "f10509:m1"} {"signature": "def exp_trans(base=None, **kwargs):", "body": "if base is None:name = ''base = np.exp()else:name = ''.format(base)def transform(x):return base ** xdef inverse(x):return np.log(x)/np.log(base)kwargs[''] = basereturn trans_new(name, transform, inverse, **kwargs)", "docstring": "Create a exponential transform class for *base*\n\nThis is inverse of the log transform.\n\nParameters\n----------\nbase : float\n Base of the logarithm\nkwargs : dict\n Keyword arguments passed onto\n :func:`trans_new`. Should not include\n the `transform` or `inverse`.\n\nReturns\n-------\nout : type\n Exponential transform class", "id": "f10509:m2"} {"signature": "def boxcox_trans(p, **kwargs):", "body": "if np.abs(p) < :return log_trans()def transform(x):return (x**p - ) / (p * np.sign(x-))def inverse(x):return (np.abs(x) * p + np.sign(x)) ** ( / p)kwargs[''] = pkwargs[''] = kwargs.get('', ''.format(p))kwargs[''] = transformkwargs[''] = inversereturn trans_new(**kwargs)", "docstring": "Boxcox Transformation\n\nParameters\n----------\np : float\n Power parameter, commonly denoted by\n lower-case lambda in formulae\nkwargs : dict\n Keyword arguments passed onto\n :func:`trans_new`. Should not include\n the `transform` or `inverse`.", "id": "f10509:m3"} {"signature": "def probability_trans(distribution, *args, **kwargs):", "body": "import scipy.stats as statscdists = {k for k in dir(stats)if hasattr(getattr(stats, k), '')}if distribution not in cdists:msg = \"\"raise ValueError(msg.format(distribution))try:doc = kwargs.pop('')except KeyError:doc = ''try:name = kwargs.pop('')except KeyError:name = ''.format(distribution)def transform(x):return getattr(stats, distribution).cdf(x, *args, **kwargs)def inverse(x):return getattr(stats, distribution).ppf(x, *args, **kwargs)return trans_new(name,transform, inverse, domain=(, ),doc=doc)", "docstring": "Probability Transformation\n\nParameters\n----------\ndistribution : str\n Name of the distribution. Valid distributions are\n listed at :mod:`scipy.stats`. Any of the continuous\n or discrete distributions.\nargs : tuple\n Arguments passed to the distribution functions.\nkwargs : dict\n Keyword arguments passed to the distribution functions.\n\nNotes\n-----\nMake sure that the distribution is a good enough\napproximation for the data. When this is not the case,\ncomputations may run into errors. Absence of any errors\ndoes not imply that the distribution fits the data.", "id": "f10509:m4"} {"signature": "def gettrans(t):", "body": "obj = tif isinstance(obj, str):name = ''.format(obj)obj = globals()[name]()if callable(obj):obj = obj()if isinstance(obj, type):obj = obj()if not isinstance(obj, trans):raise ValueError(\"\")return obj", "docstring": "Return a trans object\n\nParameters\n----------\nt : str | callable | type | trans\n name of transformation function\n\nReturns\n-------\nout : trans", "id": "f10509:m5"} {"signature": "@staticmethoddef transform(x):", "body": "return x", "docstring": "Transform of x", "id": "f10509:c0:m1"} {"signature": "@staticmethoddef inverse(x):", "body": "return x", "docstring": "Inverse of x", "id": "f10509:c0:m2"} {"signature": "def breaks(self, limits):", "body": "vmin = np.max([self.domain[], limits[]])vmax = np.min([self.domain[], limits[]])breaks = np.asarray(self.breaks_([vmin, vmax]))breaks = breaks.compress((breaks >= self.domain[]) &(breaks <= self.domain[]))return breaks", "docstring": "Calculate breaks in data space and return them\nin transformed space.\n\nExpects limits to be in *transform space*, this\nis the same space as that where the domain is\nspecified.\n\nThis method wraps around :meth:`breaks_` to ensure\nthat the calculated breaks are within the domain\nthe transform. This is helpful in cases where an\naesthetic requests breaks with limits expanded for\nsome padding, yet the expansion goes beyond the\ndomain of the transform. e.g for a probability\ntransform the breaks will be in the domain\n``[0, 1]`` despite any outward limits.\n\nParameters\n----------\nlimits : tuple\n The scale limits. Size 2.\n\nReturns\n-------\nout : array_like\n Major breaks", "id": "f10509:c0:m3"} {"signature": "@staticmethoddef transform(x):", "body": "try:x = date2num(x)except AttributeError:x = [pd.Timestamp(item) for item in x]x = date2num(x)return x", "docstring": "Transform from date to a numerical format", "id": "f10509:c7:m0"} {"signature": "@staticmethoddef inverse(x):", "body": "return num2date(x)", "docstring": "Transform to date from numerical format", "id": "f10509:c7:m1"} {"signature": "@staticmethoddef transform(x):", "body": "try:x = np.array([_x.total_seconds()*** for _x in x])except TypeError:x = x.total_seconds()***return x", "docstring": "Transform from Timeddelta to numerical format", "id": "f10509:c8:m0"} {"signature": "@staticmethoddef inverse(x):", "body": "try:x = [datetime.timedelta(microseconds=i) for i in x]except TypeError:x = datetime.timedelta(microseconds=x)return x", "docstring": "Transform to Timedelta from numerical format", "id": "f10509:c8:m1"} {"signature": "@staticmethoddef transform(x):", "body": "try:x = np.array([_x.value for _x in x])except TypeError:x = x.valuereturn x", "docstring": "Transform from Timeddelta to numerical format", "id": "f10509:c9:m0"} {"signature": "@staticmethoddef inverse(x):", "body": "try:x = [pd.Timedelta(int(i)) for i in x]except TypeError:x = pd.Timedelta(int(x))return x", "docstring": "Transform to Timedelta from numerical format", "id": "f10509:c9:m1"} {"signature": "@classmethoddef apply(cls, x, palette, na_value=None, trans=None):", "body": "if trans is not None:x = trans.transform(x)limits = cls.train(x)return cls.map(x, palette, limits, na_value)", "docstring": "Scale data continuously\n\nParameters\n----------\nx : array_like\n Continuous values to scale\npalette : callable ``f(x)``\n Palette to use\nna_value : object\n Value to use for missing values.\ntrans : trans\n How to transform the data before scaling. If\n ``None``, no transformation is done.\n\nReturns\n-------\nout : array_like\n Scaled values", "id": "f10510:c0:m0"} {"signature": "@classmethoddef train(cls, new_data, old=None):", "body": "if not len(new_data):return oldif not hasattr(new_data, ''):new_data = np.asarray(new_data)if new_data.dtype.kind not in CONTINUOUS_KINDS:raise TypeError(\"\")if old is not None:new_data = np.hstack([new_data, old])return min_max(new_data, na_rm=True, finite=True)", "docstring": "Train a continuous scale\n\nParameters\n----------\nnew_data : array_like\n New values\nold : array_like\n Old range. Most likely a tuple of length 2.\n\nReturns\n-------\nout : tuple\n Limits(range) of the scale", "id": "f10510:c0:m1"} {"signature": "@classmethoddef map(cls, x, palette, limits, na_value=None, oob=censor):", "body": "x = oob(rescale(x, _from=limits))pal = palette(x)try:pal[pd.isnull(x)] = na_valueexcept TypeError:pal = [v if not pd.isnull(v) else na_value for v in pal]return pal", "docstring": "Map values to a continuous palette\n\nParameters\n----------\nx : array_like\n Continuous values to scale\npalette : callable ``f(x)``\n palette to use\nna_value : object\n Value to use for missing values.\noob : callable ``f(x)``\n Function to deal with values that are\n beyond the limits\n\nReturns\n-------\nout : array_like\n Values mapped onto a palette", "id": "f10510:c0:m2"} {"signature": "@classmethoddef apply(cls, x, palette, na_value=None):", "body": "limits = cls.train(x)return cls.map(x, palette, limits, na_value)", "docstring": "Scale data discretely\n\nParameters\n----------\nx : array_like\n Discrete values to scale\npalette : callable ``f(x)``\n Palette to use\nna_value : object\n Value to use for missing values.\n\nReturns\n-------\nout : array_like\n Scaled values", "id": "f10510:c1:m0"} {"signature": "@classmethoddef train(cls, new_data, old=None, drop=False, na_rm=False):", "body": "if not len(new_data):return oldif old is None:old = []nan_bool_idx = pd.isnull(new_data)has_na = np.any(nan_bool_idx)if not hasattr(new_data, ''):new_data = np.asarray(new_data)new_data = new_data[~nan_bool_idx]if new_data.dtype.kind not in DISCRETE_KINDS:raise TypeError(\"\")if pdtypes.is_categorical_dtype(new_data):try:new = list(new_data.cat.categories) except AttributeError:new = list(new_data.categories) if drop:present = set(new_data.drop_duplicates())new = [i for i in new if i in present]else:try:new = np.unique(new_data)new.sort()except TypeError:new = list(set(new_data))new = multitype_sort(new)if has_na and not na_rm:new = np.hstack([new, np.nan])old_set = set(old)return list(old) + [i for i in new if (i not in old_set)]", "docstring": "Train a continuous scale\n\nParameters\n----------\nnew_data : array_like\n New values\nold : array_like\n Old range. List of values known to the scale.\ndrop : bool\n Whether to drop(not include) unused categories\nna_rm : bool\n If ``True``, remove missing values. Missing values\n are either ``NaN`` or ``None``.\n\nReturns\n-------\nout : list\n Values covered by the scale", "id": "f10510:c1:m1"} {"signature": "@classmethoddef map(cls, x, palette, limits, na_value=None):", "body": "n = len(limits)pal = palette(n)[match(x, limits)]try:pal[pd.isnull(x)] = na_valueexcept TypeError:pal = [v if not pd.isnull(v) else na_value for v in pal]return pal", "docstring": "Map values to a discrete palette\n\nParameters\n----------\npalette : callable ``f(x)``\n palette to use\nx : array_like\n Continuous values to scale\nna_value : object\n Value to use for missing values.\n\nReturns\n-------\nout : array_like\n Values mapped onto a palette", "id": "f10510:c1:m2"} {"signature": "def get_keywords():", "body": "git_refnames = \"\"git_full = \"\"git_date = \"\"keywords = {\"\": git_refnames, \"\": git_full, \"\": git_date}return keywords", "docstring": "Get the keywords needed to look up the version information.", "id": "f10511:m0"} {"signature": "def get_config():", "body": "cfg = VersioneerConfig()cfg.VCS = \"\"cfg.style = \"\"cfg.tag_prefix = \"\"cfg.parentdir_prefix = \"\"cfg.versionfile_source = \"\"cfg.verbose = Falsereturn cfg", "docstring": "Create, populate and return the VersioneerConfig() object.", "id": "f10511:m1"} {"signature": "def register_vcs_handler(vcs, method): ", "body": "def decorate(f):\"\"\"\"\"\"if vcs not in HANDLERS:HANDLERS[vcs] = {}HANDLERS[vcs][method] = freturn freturn decorate", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f10511:m2"} {"signature": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,env=None):", "body": "assert isinstance(commands, list)p = Nonefor c in commands:try:dispcmd = str([c] + args)p = subprocess.Popen([c] + args, cwd=cwd, env=env,stdout=subprocess.PIPE,stderr=(subprocess.PIPE if hide_stderrelse None))breakexcept EnvironmentError:e = sys.exc_info()[]if e.errno == errno.ENOENT:continueif verbose:print(\"\" % dispcmd)print(e)return None, Noneelse:if verbose:print(\"\" % (commands,))return None, Nonestdout = p.communicate()[].strip()if sys.version_info[] >= :stdout = stdout.decode()if p.returncode != :if verbose:print(\"\" % dispcmd)print(\"\" % stdout)return None, p.returncodereturn stdout, p.returncode", "docstring": "Call the given command(s).", "id": "f10511:m3"} {"signature": "def versions_from_parentdir(parentdir_prefix, root, verbose):", "body": "rootdirs = []for i in range():dirname = os.path.basename(root)if dirname.startswith(parentdir_prefix):return {\"\": dirname[len(parentdir_prefix):],\"\": None,\"\": False, \"\": None, \"\": None}else:rootdirs.append(root)root = os.path.dirname(root) if verbose:print(\"\" %(str(rootdirs), parentdir_prefix))raise NotThisMethod(\"\")", "docstring": "Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory", "id": "f10511:m4"} {"signature": "@register_vcs_handler(\"\", \"\")def git_get_keywords(versionfile_abs):", "body": "keywords = {}try:f = open(versionfile_abs, \"\")for line in f.readlines():if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()f.close()except EnvironmentError:passreturn keywords", "docstring": "Extract version information from the given file.", "id": "f10511:m5"} {"signature": "@register_vcs_handler(\"\", \"\")def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:raise NotThisMethod(\"\")date = keywords.get(\"\")if date is not None:date = date.strip().replace(\"\", \"\", ).replace(\"\", \"\", )refnames = keywords[\"\"].strip()if refnames.startswith(\"\"):if verbose:print(\"\")raise NotThisMethod(\"\")refs = set([r.strip() for r in refnames.strip(\"\").split(\"\")])TAG = \"\"tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])if not tags:tags = set([r for r in refs if re.search(r'', r)])if verbose:print(\"\" % \"\".join(refs - tags))if verbose:print(\"\" % \"\".join(sorted(tags)))for ref in sorted(tags):if ref.startswith(tag_prefix):r = ref[len(tag_prefix):]if verbose:print(\"\" % r)return {\"\": r,\"\": keywords[\"\"].strip(),\"\": False, \"\": None,\"\": date}if verbose:print(\"\")return {\"\": \"\",\"\": keywords[\"\"].strip(),\"\": False, \"\": \"\", \"\": None}", "docstring": "Get version information from git keywords.", "id": "f10511:m6"} {"signature": "@register_vcs_handler(\"\", \"\")def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):", "body": "GITS = [\"\"]if sys.platform == \"\":GITS = [\"\", \"\"]out, rc = run_command(GITS, [\"\", \"\"], cwd=root,hide_stderr=True)if rc != :if verbose:print(\"\" % root)raise NotThisMethod(\"\")describe_out, rc = run_command(GITS, [\"\", \"\", \"\",\"\", \"\",\"\", \"\" % tag_prefix],cwd=root)if describe_out is None:raise NotThisMethod(\"\")describe_out = describe_out.strip()full_out, rc = run_command(GITS, [\"\", \"\"], cwd=root)if full_out is None:raise NotThisMethod(\"\")full_out = full_out.strip()pieces = {}pieces[\"\"] = full_outpieces[\"\"] = full_out[:] pieces[\"\"] = Nonegit_describe = describe_outdirty = git_describe.endswith(\"\")pieces[\"\"] = dirtyif dirty:git_describe = git_describe[:git_describe.rindex(\"\")]if \"\" in git_describe:mo = re.search(r'', git_describe)if not mo:pieces[\"\"] = (\"\"% describe_out)return piecesfull_tag = mo.group()if not full_tag.startswith(tag_prefix):if verbose:fmt = \"\"print(fmt % (full_tag, tag_prefix))pieces[\"\"] = (\"\"% (full_tag, tag_prefix))return piecespieces[\"\"] = full_tag[len(tag_prefix):]pieces[\"\"] = int(mo.group())pieces[\"\"] = mo.group()else:pieces[\"\"] = Nonecount_out, rc = run_command(GITS, [\"\", \"\", \"\"],cwd=root)pieces[\"\"] = int(count_out) date = run_command(GITS, [\"\", \"\", \"\", \"\"],cwd=root)[].strip()pieces[\"\"] = date.strip().replace(\"\", \"\", ).replace(\"\", \"\", )return pieces", "docstring": "Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.", "id": "f10511:m7"} {"signature": "def plus_or_dot(pieces):", "body": "if \"\" in pieces.get(\"\", \"\"):return \"\"return \"\"", "docstring": "Return a + if we don't already have one, else return a .", "id": "f10511:m8"} {"signature": "def render_pep440(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += plus_or_dot(pieces)rendered += \"\" % (pieces[\"\"], pieces[\"\"])if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % (pieces[\"\"],pieces[\"\"])if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f10511:m9"} {"signature": "def render_pep440_pre(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE", "id": "f10511:m10"} {"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += plus_or_dot(pieces)rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f10511:m11"} {"signature": "def render_pep440_old(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f10511:m12"} {"signature": "def render_git_describe(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f10511:m13"} {"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f10511:m14"} {"signature": "def render(pieces, style):", "body": "if pieces[\"\"]:return {\"\": \"\",\"\": pieces.get(\"\"),\"\": None,\"\": pieces[\"\"],\"\": None}if not style or style == \"\":style = \"\" if style == \"\":rendered = render_pep440(pieces)elif style == \"\":rendered = render_pep440_pre(pieces)elif style == \"\":rendered = render_pep440_post(pieces)elif style == \"\":rendered = render_pep440_old(pieces)elif style == \"\":rendered = render_git_describe(pieces)elif style == \"\":rendered = render_git_describe_long(pieces)else:raise ValueError(\"\" % style)return {\"\": rendered, \"\": pieces[\"\"],\"\": pieces[\"\"], \"\": None,\"\": pieces.get(\"\")}", "docstring": "Render the given version pieces into the requested style.", "id": "f10511:m15"} {"signature": "def get_versions():", "body": "cfg = get_config()verbose = cfg.verbosetry:return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,verbose)except NotThisMethod:passtry:root = os.path.realpath(__file__)for i in cfg.versionfile_source.split(''):root = os.path.dirname(root)except NameError:return {\"\": \"\", \"\": None,\"\": None,\"\": \"\",\"\": None}try:pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)return render(pieces, cfg.style)except NotThisMethod:passtry:if cfg.parentdir_prefix:return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)except NotThisMethod:passreturn {\"\": \"\", \"\": None,\"\": None,\"\": \"\", \"\": None}", "docstring": "Get version information or return default if unable to do so.", "id": "f10511:m16"} {"signature": "def _format(formatter, x):", "body": "formatter.create_dummy_axis()formatter.set_locs([val for val in x if ~np.isnan(val)])try:oom = int(formatter.orderOfMagnitude)except AttributeError:oom = labels = [formatter(tick) for tick in x]pattern = re.compile(r'')for i, label in enumerate(labels):match = pattern.search(label)if match:labels[i] = pattern.sub('', label)if oom:labels = [''.format(s, oom) if s != '' else sfor s in labels]return labels", "docstring": "Helper to format and tidy up", "id": "f10513:m0"} {"signature": "def __call__(self, x):", "body": "if self.style == '':return [self.fmt.format(val) for val in x]elif self.style == '':return [self.fmt % val for val in x]else:raise ValueError(\"\")", "docstring": "Format a sequence of inputs\n\nParameters\n----------\nx : array\n Input\n\nReturns\n-------\nout : list\n List of strings.", "id": "f10513:c0:m1"} {"signature": "def __call__(self, x):", "body": "big_mark = self.big_markcomma = '' if big_mark else ''tpl = ''.join((self.prefix, '', comma, '',str(self.digits), '', self.suffix))labels = [tpl.format(val) for val in x]if big_mark and big_mark != '':labels = [val.replace('', big_mark) for val in labels]return labels", "docstring": "Format a sequence of inputs\n\nParameters\n----------\nx : array\n Input\n\nReturns\n-------\nout : list\n List of strings.", "id": "f10513:c1:m1"} {"signature": "def __call__(self, x):", "body": "return self.formatter(x)", "docstring": "Format a sequence of inputs\n\nParameters\n----------\nx : array\n Input\n\nReturns\n-------\nout : list\n List of strings.", "id": "f10513:c2:m1"} {"signature": "def __call__(self, x):", "body": "if len(x) == :return []_precision = precision(x)x = round_any(x, _precision / ) * if _precision > :digits = else:digits = abs(int(np.log10(_precision)))formatter = currency_format(prefix='',suffix='',digits=digits,big_mark=self.big_mark)labels = formatter(x)pattern = re.compile(r'')if all(pattern.search(val) for val in labels):labels = [pattern.sub('', val) for val in labels]return labels", "docstring": "Format a sequence of inputs\n\nParameters\n----------\nx : array\n Input\n\nReturns\n-------\nout : list\n List of strings.", "id": "f10513:c3:m1"} {"signature": "def __call__(self, x):", "body": "return _format(self.formatter, x)", "docstring": "Format a sequence of inputs\n\nParameters\n----------\nx : array\n Input\n\nReturns\n-------\nout : list\n List of strings.", "id": "f10513:c5:m1"} {"signature": "def _tidyup_labels(self, labels):", "body": "def remove_zeroes(s):\"\"\"\"\"\"tup = s.split('')if len(tup) == :mantissa = tup[].rstrip('').rstrip('')exponent = int(tup[])if exponent:s = '' % (mantissa, exponent)else:s = mantissareturn sdef as_exp(s):\"\"\"\"\"\"return s if '' in s else ''.format(float(s))has_e = np.array(['' in x for x in labels])if not np.all(has_e) and not np.all(~has_e):labels = [as_exp(x) for x in labels]labels = [remove_zeroes(x) for x in labels]return labels", "docstring": "Make all labels uniform in format and remove redundant zeros\nfor labels in exponential format.\n\nParameters\n----------\nlabels : list-like\n Labels to be tidied.\n\nReturns\n-------\nout : list-like\n Labels", "id": "f10513:c6:m1"} {"signature": "def __call__(self, x):", "body": "if len(x) == :return []if self.base == :xmin = int(np.floor(np.log10(np.min(x))))xmax = int(np.ceil(np.log10(np.max(x))))emin, emax = self.exponent_limitsall_multiples = np.all([np.log10(num).is_integer() for num in x])if same_log10_order_of_magnitude(x):f = mpl_format()f.formatter.set_powerlimits((emin, emax))return f(x)elif all_multiples and (xmin <= emin or xmax >= emax):fmt = ''else:fmt = ''else:fmt = ''labels = [fmt.format(num) for num in x]return self._tidyup_labels(labels)", "docstring": "Format a sequence of inputs\n\nParameters\n----------\nx : array\n Input\n\nReturns\n-------\nout : list\n List of strings.", "id": "f10513:c6:m2"} {"signature": "def __call__(self, x):", "body": "if self.tz is None and len(x):tz = self.formatter.tz = x[].tzinfoif not all(value.tzinfo == tz for value in x):msg = (\"\"\"\"\"\"\"\")warn(msg.format(tz.zone))x = [date2num(val) for val in x]return _format(self.formatter, x)", "docstring": "Format a sequence of inputs\n\nParameters\n----------\nx : array\n Input\n\nReturns\n-------\nout : list\n List of strings.", "id": "f10513:c7:m1"} {"signature": "def round_any(x, accuracy, f=np.round):", "body": "if not hasattr(x, ''):x = np.asarray(x)return f(x / accuracy) * accuracy", "docstring": "Round to multiple of any number.", "id": "f10514:m0"} {"signature": "def min_max(x, na_rm=False, finite=True):", "body": "if not hasattr(x, ''):x = np.asarray(x)if na_rm and finite:x = x[np.isfinite(x)]elif not na_rm and np.any(np.isnan(x)):return np.nan, np.nanelif na_rm:x = x[~np.isnan(x)]elif finite:x = x[~np.isinf(x)]if (len(x)):return np.min(x), np.max(x)else:return float(''), float('')", "docstring": "Return the minimum and maximum of x\n\nParameters\n----------\nx : array_like\n Sequence\nna_rm : bool\n Whether to remove ``nan`` values.\nfinite : bool\n Whether to consider only finite values.\n\nReturns\n-------\nout : tuple\n (minimum, maximum) of x", "id": "f10514:m1"} {"signature": "def match(v1, v2, nomatch=-, incomparables=None, start=):", "body": "v2_indices = {}for i, x in enumerate(v2):if x not in v2_indices:v2_indices[x] = iv1_to_v2_map = [nomatch] * len(v1)skip = set(incomparables) if incomparables else set()for i, x in enumerate(v1):if x in skip:continuetry:v1_to_v2_map[i] = v2_indices[x] + startexcept KeyError:passreturn v1_to_v2_map", "docstring": "Return a vector of the positions of (first)\nmatches of its first argument in its second.\n\nParameters\n----------\nv1: array_like\n Values to be matched\n\nv2: array_like\n Values to be matched against\n\nnomatch: int\n Value to be returned in the case when\n no match is found.\n\nincomparables: array_like\n Values that cannot be matched. Any value in ``v1``\n matching a value in this list is assigned the nomatch\n value.\nstart: int\n Type of indexing to use. Most likely 0 or 1", "id": "f10514:m2"} {"signature": "def precision(x):", "body": "from .bounds import zero_rangerng = min_max(x, na_rm=True)if zero_range(rng):span = np.abs(rng[])else:span = np.diff(rng)[]if span == :return else:return ** int(np.floor(np.log10(span)))", "docstring": "Return the precision of x\n\nParameters\n----------\nx : array_like | numeric\n Value(s) whose for which to compute the precision.\n\nReturns\n-------\nout : numeric\n The precision of ``x`` or that the values in ``x``.\n\nNotes\n-----\nThe precision is computed in base 10.\n\nExamples\n--------\n>>> precision(0.08)\n0.01\n>>> precision(9)\n1\n>>> precision(16)\n10", "id": "f10514:m3"} {"signature": "def first_element(obj):", "body": "if isinstance(obj, Iterator):raise RuntimeError(\"\")return next(iter(obj))", "docstring": "Return the first element of `obj`\n\nParameters\n----------\nobj : iterable\n Should not be an iterator\n\nReturns\n-------\nout : object\n First element of `obj`. Raise a class:`StopIteration`\n exception if `obj` is empty.", "id": "f10514:m4"} {"signature": "def multitype_sort(a):", "body": "types = defaultdict(list)numbers = {int, float, complex}for x in a:t = type(x)if t in numbers:types[''].append(x)else:types[t].append(x)for t in types:types[t] = np.sort(types[t])return list(chain(*(types[t] for t in types)))", "docstring": "Sort elements of multiple types\n\nx is assumed to contain elements of different types, such that\nplain sort would raise a `TypeError`.\n\nParameters\n----------\na : array-like\n Array of items to be sorted\n\nReturns\n-------\nout : list\n Items sorted within their type groups.", "id": "f10514:m5"} {"signature": "def nearest_int(x):", "body": "if x == :return np.int64()elif x > :return np.int64(x + )else:return np.int64(x - )", "docstring": "Return nearest long integer to x", "id": "f10514:m6"} {"signature": "def is_close_to_int(x):", "body": "if not np.isfinite(x):return Falsereturn abs(x - nearest_int(x)) < ", "docstring": "Check if value is close to an integer\n\nParameters\n----------\nx : float\n Numeric value to check\n\nReturns\n-------\nout : bool", "id": "f10514:m7"} {"signature": "def same_log10_order_of_magnitude(x, delta=):", "body": "dmin = np.log10(np.min(x)*(-delta))dmax = np.log10(np.max(x)*(+delta))return np.floor(dmin) == np.floor(dmax)", "docstring": "Return true if range is approximately in same order of magnitude\n\nFor example these sequences are in the same order of magnitude:\n\n - [1, 8, 5] # [1, 10)\n - [35, 20, 80] # [10 100)\n - [232, 730] # [100, 1000)\n\nParameters\n----------\nx : array-like\n Values in base 10. Must be size 2 and\n ``rng[0] <= rng[1]``.\ndelta : float\n Fuzz factor for approximation. It is multiplicative.", "id": "f10514:m8"} {"signature": "def identity(*args):", "body": "return args if len(args) > else args[]", "docstring": "Return whatever is passed in", "id": "f10514:m9"} {"signature": "def hls_palette(n_colors=, h=, l=, s=):", "body": "hues = np.linspace(, , n_colors + )[:-]hues += hhues %= hues -= hues.astype(int)palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]return palette", "docstring": "Get a set of evenly spaced colors in HLS hue space.\n\nh, l, and s should be between 0 and 1\n\nParameters\n----------\n\nn_colors : int\n number of colors in the palette\nh : float\n first hue\nl : float\n lightness\ns : float\n saturation\n\nReturns\n-------\npalette : list\n List of colors as RGB hex strings.\n\nSee Also\n--------\nhusl_palette : Make a palette using evenly spaced circular\n hues in the HUSL system.\n\nExamples\n--------\n>>> len(hls_palette(2))\n2\n>>> len(hls_palette(9))\n9", "id": "f10515:m0"} {"signature": "def husl_palette(n_colors=, h=, s=, l=):", "body": "hues = np.linspace(, , n_colors + )[:-]hues += hhues %= hues *= s *= l *= palette = [husl.husl_to_rgb(h_i, s, l) for h_i in hues]return palette", "docstring": "Get a set of evenly spaced colors in HUSL hue space.\n\nh, s, and l should be between 0 and 1\n\nParameters\n----------\n\nn_colors : int\n number of colors in the palette\nh : float\n first hue\ns : float\n saturation\nl : float\n lightness\n\nReturns\n-------\npalette : list\n List of colors as RGB hex strings.\n\nSee Also\n--------\nhls_palette : Make a palette using evenly spaced circular\n hues in the HSL system.\n\nExamples\n--------\n>>> len(husl_palette(3))\n3\n>>> len(husl_palette(11))\n11", "id": "f10515:m1"} {"signature": "def rescale_pal(range=(, )):", "body": "def _rescale(x):return rescale(x, range, _from=(, ))return _rescale", "docstring": "Rescale the input to the specific output range.\n\nUseful for alpha, size, and continuous position.\n\nParameters\n----------\nrange : tuple\n Range of the scale\n\nReturns\n-------\nout : function\n Palette function that takes a sequence of values\n in the range ``[0, 1]`` and returns values in\n the specified range.\n\nExamples\n--------\n>>> palette = rescale_pal()\n>>> palette([0, .2, .4, .6, .8, 1])\narray([0.1 , 0.28, 0.46, 0.64, 0.82, 1. ])\n\nThe returned palette expects inputs in the ``[0, 1]``\nrange. Any value outside those limits is clipped to\n``range[0]`` or ``range[1]``.\n\n>>> palette([-2, -1, 0.2, .4, .8, 2, 3])\narray([0.1 , 0.1 , 0.28, 0.46, 0.82, 1. , 1. ])", "id": "f10515:m2"} {"signature": "def area_pal(range=(, )):", "body": "def area_palette(x):return rescale(np.sqrt(x), to=range, _from=(, ))return area_palette", "docstring": "Point area palette (continuous).\n\nParameters\n----------\nrange : tuple\n Numeric vector of length two, giving range of possible sizes.\n Should be greater than 0.\n\nReturns\n-------\nout : function\n Palette function that takes a sequence of values\n in the range ``[0, 1]`` and returns values in\n the specified range.\n\nExamples\n--------\n>>> x = np.arange(0, .6, .1)**2\n>>> palette = area_pal()\n>>> palette(x)\narray([1. , 1.5, 2. , 2.5, 3. , 3.5])\n\nThe results are equidistant because the input ``x`` is in\narea space, i.e it is squared.", "id": "f10515:m3"} {"signature": "def abs_area(max):", "body": "def abs_area_palette(x):return rescale(np.sqrt(np.abs(x)), to=(, max), _from=(, ))return abs_area_palette", "docstring": "Point area palette (continuous), with area proportional to value.\n\nParameters\n----------\nmax : float\n A number representing the maximum size\n\nReturns\n-------\nout : function\n Palette function that takes a sequence of values\n in the range ``[0, 1]`` and returns values in the range\n ``[0, max]``.\n\nExamples\n--------\n>>> x = np.arange(0, .8, .1)**2\n>>> palette = abs_area(5)\n>>> palette(x)\narray([0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5])\n\nCompared to :func:`area_pal`, :func:`abs_area` will handle values\nin the range ``[-1, 0]`` without returning ``np.nan``. And values\nwhose absolute value is greater than 1 will be clipped to the\nmaximum.", "id": "f10515:m4"} {"signature": "def grey_pal(start=, end=):", "body": "gamma = ends = ((, start, start), (, end, end))cdict = {'': ends, '': ends, '': ends}grey_cmap = mcolors.LinearSegmentedColormap('', cdict)def continuous_grey_palette(n):colors = []for x in np.linspace(start**gamma, end**gamma, n):x = (x ** (/gamma) - start) / (end - start)colors.append(mcolors.rgb2hex(grey_cmap(x)))return colorsreturn continuous_grey_palette", "docstring": "Utility for creating continuous grey scale palette\n\nParameters\n----------\nstart : float\n grey value at low end of palette\nend : float\n grey value at high end of palette\n\nReturns\n-------\nout : function\n Continuous color palette that takes a single\n :class:`int` parameter ``n`` and returns ``n``\n equally spaced colors.\n\nExamples\n--------\n>>> palette = grey_pal()\n>>> palette(5)\n['#333333', '#737373', '#989898', '#b5b5b5', '#cccccc']", "id": "f10515:m5"} {"signature": "def hue_pal(h=, l=, s=, color_space=''):", "body": "if not all([ <= val <= for val in (h, l, s)]):msg = (\"\"\"\".format(h, l, s))raise ValueError(msg)if color_space not in ('', ''):msg = \"\"raise ValueError(msg)name = ''.format(color_space)palette = globals()[name]def _hue_pal(n):colors = palette(n, h=h, l=l, s=s)return [mcolors.rgb2hex(c) for c in colors]return _hue_pal", "docstring": "Utility for making hue palettes for color schemes.\n\nParameters\n----------\nh : float\n first hue. In the [0, 1] range\nl : float\n lightness. In the [0, 1] range\ns : float\n saturation. In the [0, 1] range\ncolor_space : 'hls' | 'husl'\n Color space to use for the palette\n\nReturns\n-------\nout : function\n A discrete color palette that takes a single\n :class:`int` parameter ``n`` and returns ``n``\n equally spaced colors. Though the palette\n is continuous, since it is varies the hue it\n is good for categorical data. However if ``n``\n is large enough the colors show continuity.\n\nExamples\n--------\n>>> hue_pal()(5)\n['#db5f57', '#b9db57', '#57db94', '#5784db', '#c957db']\n>>> hue_pal(color_space='husl')(5)\n['#e0697e', '#9b9054', '#569d79', '#5b98ab', '#b675d7']", "id": "f10515:m6"} {"signature": "def brewer_pal(type='', palette=):", "body": "def full_type_name(text):abbrevs = {'': '','': '','': ''}text = abbrevs.get(text, text)return text.title()def number_to_palette_name(ctype, n):\"\"\"\"\"\"n -= palettes = sorted(colorbrewer.COLOR_MAPS[ctype].keys())if n < len(palettes):return palettes[n]raise ValueError(\"\"\"\".format(len(palettes),ctype, n+))def max_palette_colors(type, palette_name):\"\"\"\"\"\"if type == '':return elif type == '':return else:qlimit = {'': , '': , '': ,'': , '': , '': ,'': , '': }return qlimit[palette_name]type = full_type_name(type)if isinstance(palette, int):palette_name = number_to_palette_name(type, palette)else:palette_name = palettenmax = max_palette_colors(type, palette_name)def _brewer_pal(n):_n = n if n <= nmax else nmaxtry:bmap = colorbrewer.get_map(palette_name, type, _n)except ValueError as err:if <= _n < :bmap = colorbrewer.get_map(palette_name, type, )else:raise errhex_colors = bmap.hex_colors[:n]if n > nmax:msg = (\"\"\"\"\"\"\"\".format(palette_name, nmax))warnings.warn(msg)hex_colors = hex_colors + [None] * (n - nmax)return hex_colorsreturn _brewer_pal", "docstring": "Utility for making a brewer palette\n\nParameters\n----------\ntype : 'sequential' | 'qualitative' | 'diverging'\n Type of palette. Sequential, Qualitative or\n Diverging. The following abbreviations may\n be used, ``seq``, ``qual`` or ``div``.\n\npalette : int | str\n Which palette to choose from. If is an integer,\n it must be in the range ``[0, m]``, where ``m``\n depends on the number sequential, qualitative or\n diverging palettes. If it is a string, then it\n is the name of the palette.\n\nReturns\n-------\nout : function\n A color palette that takes a single\n :class:`int` parameter ``n`` and returns ``n``\n colors. The maximum value of ``n`` varies\n depending on the parameters.\n\nExamples\n--------\n>>> brewer_pal()(5)\n['#EFF3FF', '#BDD7E7', '#6BAED6', '#3182BD', '#08519C']\n>>> brewer_pal('qual')(5)\n['#7FC97F', '#BEAED4', '#FDC086', '#FFFF99', '#386CB0']\n>>> brewer_pal('qual', 2)(5)\n['#1B9E77', '#D95F02', '#7570B3', '#E7298A', '#66A61E']\n>>> brewer_pal('seq', 'PuBuGn')(5)\n['#F6EFF7', '#BDC9E1', '#67A9CF', '#1C9099', '#016C59']\n\nThe available color names for each palette type can be\nobtained using the following code::\n\n import palettable.colorbrewer as brewer\n\n print([k for k in brewer.COLOR_MAPS['Sequential'].keys()])\n print([k for k in brewer.COLOR_MAPS['Qualitative'].keys()])\n print([k for k in brewer.COLOR_MAPS['Diverging'].keys()])", "id": "f10515:m7"} {"signature": "def ratios_to_colors(values, colormap):", "body": "iterable = Truetry:iter(values)except TypeError:iterable = Falsevalues = [values]color_tuples = colormap(values)try:hex_colors = [mcolors.rgb2hex(t) for t in color_tuples]except IndexError:hex_colors = mcolors.rgb2hex(color_tuples)return hex_colors if iterable else hex_colors[]", "docstring": "Map values in the range [0, 1] onto colors\n\nParameters\n----------\nvalues : array_like | float\n Numeric(s) in the range [0, 1]\ncolormap : cmap\n Matplotlib colormap to use for the mapping\n\nReturns\n-------\nout : list | float\n Color(s) corresponding to the values", "id": "f10515:m8"} {"signature": "def gradient_n_pal(colors, values=None, name=''):", "body": "if values is None:colormap = mcolors.LinearSegmentedColormap.from_list(name, colors)else:colormap = mcolors.LinearSegmentedColormap.from_list(name, list(zip(values, colors)))def _gradient_n_pal(vals):return ratios_to_colors(vals, colormap)return _gradient_n_pal", "docstring": "Create a n color gradient palette\n\nParameters\n----------\ncolors : list\n list of colors\nvalues : list, optional\n list of points in the range [0, 1] at which to\n place each color. Must be the same size as\n `colors`. Default to evenly space the colors\nname : str\n Name to call the resultant MPL colormap\n\nReturns\n-------\nout : function\n Continuous color palette that takes a single\n parameter either a :class:`float` or a sequence\n of floats maps those value(s) onto the palette\n and returns color(s). The float(s) must be\n in the range [0, 1].\n\nExamples\n--------\n>>> palette = gradient_n_pal(['red', 'blue'])\n>>> palette([0, .25, .5, .75, 1])\n['#ff0000', '#bf0040', '#7f0080', '#3f00c0', '#0000ff']", "id": "f10515:m9"} {"signature": "def cmap_pal(name=None, lut=None):", "body": "colormap = get_cmap(name, lut)def _cmap_pal(vals):return ratios_to_colors(vals, colormap)return _cmap_pal", "docstring": "Create a continuous palette using an MPL colormap\n\nParameters\n----------\nname : str\n Name of colormap\nlut : None | int\n This is the number of entries desired in the lookup table.\n Default is ``None``, leave it up Matplotlib.\n\nReturns\n-------\nout : function\n Continuous color palette that takes a single\n parameter either a :class:`float` or a sequence\n of floats maps those value(s) onto the palette\n and returns color(s). The float(s) must be\n in the range [0, 1].\n\nExamples\n--------\n>>> palette = cmap_pal('viridis')\n>>> palette([.1, .2, .3, .4, .5])\n['#482475', '#414487', '#355f8d', '#2a788e', '#21918c']", "id": "f10515:m10"} {"signature": "def cmap_d_pal(name=None, lut=None):", "body": "colormap = get_cmap(name, lut)if not isinstance(colormap, mcolors.ListedColormap):raise ValueError(\"\"\"\")ncolors = len(colormap.colors)def _cmap_d_pal(n):if n > ncolors:raise ValueError(\"\"\"\".format(name, ncolors, n))if ncolors < :return [mcolors.rgb2hex(c) for c in colormap.colors[:n]]else:idx = np.linspace(, ncolors-, n).round().astype(int)return [mcolors.rgb2hex(colormap.colors[i]) for i in idx]return _cmap_d_pal", "docstring": "Create a discrete palette using an MPL Listed colormap\n\nParameters\n----------\nname : str\n Name of colormap\nlut : None | int\n This is the number of entries desired in the lookup table.\n Default is ``None``, leave it up Matplotlib.\n\nReturns\n-------\nout : function\n A discrete color palette that takes a single\n :class:`int` parameter ``n`` and returns ``n``\n colors. The maximum value of ``n`` varies\n depending on the parameters.\n\nExamples\n--------\n>>> palette = cmap_d_pal('viridis')\n>>> palette(5)\n['#440154', '#3b528b', '#21918c', '#5cc863', '#fde725']", "id": "f10515:m11"} {"signature": "def desaturate_pal(color, prop, reverse=False):", "body": "if not <= prop <= :raise ValueError(\"\")rgb = mcolors.colorConverter.to_rgb(color)h, l, s = colorsys.rgb_to_hls(*rgb)s *= propdesaturated_color = colorsys.hls_to_rgb(h, l, s)colors = [color, desaturated_color]if reverse:colors = colors[::-]return gradient_n_pal(colors, name='')", "docstring": "Create a palette that desaturate a color by some proportion\n\nParameters\n----------\ncolor : matplotlib color\n hex, rgb-tuple, or html color name\nprop : float\n saturation channel of color will be multiplied by\n this value\nreverse : bool\n Whether to reverse the palette.\n\nReturns\n-------\nout : function\n Continuous color palette that takes a single\n parameter either a :class:`float` or a sequence\n of floats maps those value(s) onto the palette\n and returns color(s). The float(s) must be\n in the range [0, 1].\n\nExamples\n--------\n>>> palette = desaturate_pal('red', .1)\n>>> palette([0, .25, .5, .75, 1])\n['#ff0000', '#e21d1d', '#c53a3a', '#a95656', '#8c7373']", "id": "f10515:m12"} {"signature": "def manual_pal(values):", "body": "max_n = len(values)def _manual_pal(n):if n > max_n:msg = (\"\"\"\")warnings.warn(msg.format(max_n, n))return values[:n]return _manual_pal", "docstring": "Create a palette from a list of values\n\nParameters\n----------\nvalues : sequence\n Values that will be returned by the palette function.\n\nReturns\n-------\nout : function\n A function palette that takes a single\n :class:`int` parameter ``n`` and returns ``n`` values.\n\nExamples\n--------\n>>> palette = manual_pal(['a', 'b', 'c', 'd', 'e'])\n>>> palette(3)\n['a', 'b', 'c']", "id": "f10515:m13"} {"signature": "def xkcd_palette(colors):", "body": "return [xkcd_rgb[name] for name in colors]", "docstring": "Make a palette with color names from the xkcd color survey.\n\nSee xkcd for the full list of colors: http://xkcd.com/color/rgb/\n\nParameters\n----------\ncolors : list of strings\n List of keys in the ``mizani.external.xkcd_rgb`` dictionary.\n\nReturns\n-------\npalette : list\n List of colors as RGB hex strings.\n\nExamples\n--------\n>>> palette = xkcd_palette(['red', 'green', 'blue'])\n>>> palette\n['#e50000', '#15b01a', '#0343df']\n\n>>> from mizani.external import xkcd_rgb\n>>> list(sorted(xkcd_rgb.keys()))[:5]\n['acid green', 'adobe', 'algae', 'algae green', 'almost black']", "id": "f10515:m14"} {"signature": "def crayon_palette(colors):", "body": "return [crayon_rgb[name] for name in colors]", "docstring": "Make a palette with color names from Crayola crayons.\n\nThe colors come from\nhttp://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors\n\nParameters\n----------\ncolors : list of strings\n List of keys in the ``mizani.external.crayloax_rgb`` dictionary.\n\nReturns\n-------\npalette : list\n List of colors as RGB hex strings.\n\nExamples\n--------\n>>> palette = crayon_palette(['almond', 'silver', 'yellow'])\n>>> palette\n['#eed9c4', '#c9c0bb', '#fbe870']\n\n>>> from mizani.external import crayon_rgb\n>>> list(sorted(crayon_rgb.keys()))[:5]\n['almond', 'antique brass', 'apricot', 'aquamarine', 'asparagus']", "id": "f10515:m15"} {"signature": "def cubehelix_pal(start=, rot=, gamma=, hue=,light=, dark=, reverse=False):", "body": "cdict = mpl._cm.cubehelix(gamma, start, rot, hue)cubehelix_cmap = mpl.colors.LinearSegmentedColormap('', cdict)def cubehelix_palette(n):values = np.linspace(light, dark, n)return [mcolors.rgb2hex(cubehelix_cmap(x)) for x in values]return cubehelix_palette", "docstring": "Utility for creating continuous palette from the cubehelix system.\n\nThis produces a colormap with linearly-decreasing (or increasing)\nbrightness. That means that information will be preserved if printed to\nblack and white or viewed by someone who is colorblind.\n\nParameters\n----------\nstart : float (0 <= start <= 3)\n The hue at the start of the helix.\nrot : float\n Rotations around the hue wheel over the range of the palette.\ngamma : float (0 <= gamma)\n Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1)\n colors.\nhue : float (0 <= hue <= 1)\n Saturation of the colors.\ndark : float (0 <= dark <= 1)\n Intensity of the darkest color in the palette.\nlight : float (0 <= light <= 1)\n Intensity of the lightest color in the palette.\nreverse : bool\n If True, the palette will go from dark to light.\n\nReturns\n-------\nout : function\n Continuous color palette that takes a single\n :class:`int` parameter ``n`` and returns ``n``\n equally spaced colors.\n\n\nReferences\n----------\nGreen, D. A. (2011). \"A colour scheme for the display of astronomical\nintensity images\". Bulletin of the Astromical Society of India, Vol. 39,\np. 289-295.\n\nExamples\n--------\n>>> palette = cubehelix_pal()\n>>> palette(5)\n['#edd1cb', '#d499a7', '#aa688f', '#6e4071', '#2d1e3e']", "id": "f10515:m16"} {"signature": "def identity_pal():", "body": "return identity", "docstring": "Create palette that maps values onto themselves\n\nReturns\n-------\nout : function\n Palette function that takes a value or sequence of values\n and returns the same values.\n\nExamples\n--------\n>>> palette = identity_pal()\n>>> palette(9)\n9\n>>> palette([2, 4, 6])\n[2, 4, 6]", "id": "f10515:m17"} {"signature": "def check_dependencies():", "body": "pass", "docstring": "Check for system level dependencies", "id": "f10518:m0"} {"signature": "def get_required_packages():", "body": "install_requires = ['','','','']return install_requires", "docstring": "Return required packages\n\nPlus any version tests and warnings", "id": "f10518:m1"} {"signature": "def get_package_data():", "body": "return {}", "docstring": "Return package data\n\nFor example:\n\n {'': ['*.txt', '*.rst'],\n 'hello': ['*.msg']}\n\nmeans:\n - If any package contains *.txt or *.rst files,\n include them\n - And include any *.msg files found in\n the 'hello' package, too:", "id": "f10518:m2"} {"signature": "def add_path(path):", "body": "drivers.add_path(path)", "docstring": "Adds a directory to the list of folders where to load drivers from", "id": "f10519:m0"} {"signature": "def remove_path(path):", "body": "drivers.remove_path(path)", "docstring": "Removes a directory from the list of folders where to load drivers from", "id": "f10519:m1"} {"signature": "def clear_path():", "body": "drivers.clear_path()", "docstring": "Clears the list of folders where to load drivers from", "id": "f10519:m2"} {"signature": "def list_available_drivers():", "body": "return drivers.available_drivers()", "docstring": "Returns a list of string with the names of available drivers.\n\n Available means that the driver is installed and can be used. For example,\n it will not contain \"Raspberry\" if you're not running on a Raspberry Pi,\n even if the raspberry.py script is present in the drivers directory.\n\n @returns a list of strings that can be fed to `ahio.new_driver` to get an\n instance of the desired driver.", "id": "f10519:m3"} {"signature": "def driver_info(name):", "body": "return drivers.driver_info(name)", "docstring": "Returns driver metadata.\n\n Returns a class which static properties contains metadata from the\n driver, such as name and availability.\n\n @returns a subclass from `ahio.abstract_driver.AbstractahioDriverInfo` with\n metadata from the driver.", "id": "f10519:m4"} {"signature": "def new_driver(name):", "body": "return drivers.new_driver_object(name)", "docstring": "Instantiates a new object of the named driver.\n\n The API used by the returned object can be seen in\n `ahio.abstract_driver.AbstractDriver`\n\n @returns a Driver object from the required type or None if it's not\n available", "id": "f10519:m5"} {"signature": "def available_pins(self):", "body": "raise NotImplementedMethod()", "docstring": "Returns available pins.\n\n Returns a list of dictionaries indicating the available pins and it's\n capabilities. It should follow this format:\n \\verbatim\n [ {\n 'id': 1, # some value that represents this pin in your\n # implementation.\n # prefer numbers and Enums. This value will be used\n # in `map_pin(a,p)`\n 'name': 'Pin 1', # a name that can be shown to the user, if needed\n 'analog': {\n 'input': True, # if analog input is available\n 'output': False, # if analog output is available\n 'read_range': (0, 1023), # if input is supported, what is the\n # valid range (both inclusive)\n 'write_range': (0, 5) # if output is supported, what is the\n #valid range (both inclusive)\n },\n 'digital': {\n 'input': True, # if digital input is available\n 'output': True, # if digital output is available\n 'pwm': True # if pwm generation is available\n }\n }]\n \\endverbatim\n\n If you're developing a driver, you should override this function.\n\n @returns a list of dictionaries", "id": "f10520:c1:m0"} {"signature": "def map_pin(self, abstract_pin_id, physical_pin_id):", "body": "if physical_pin_id:self._pin_mapping[abstract_pin_id] = physical_pin_idelse:self._pin_mapping.pop(abstract_pin_id, None)", "docstring": "Maps a pin number to a physical device pin.\n\n To make it easy to change drivers without having to refactor a lot of\n code, this library does not use the names set by the driver to identify\n a pin. This function will map a number, that will be used by other\n functions, to a physical pin represented by the drivers pin id. That\n way, if you need to use another pin or change the underlying driver\n completly, you only need to redo the mapping.\n\n If you're developing a driver, keep in mind that your driver will not\n know about this. The other functions will translate the mapped pin to\n your id before calling your function.\n\n @arg abstract_pin_id the id that will identify this pin in the\n other function calls. You can choose what you want.\n\n @arg physical_pin_id the id returned in the driver.\n See `AbstractDriver.available_pins`. Setting it to None removes the\n mapping.", "id": "f10520:c1:m1"} {"signature": "def mapped_pins(self):", "body": "return self._pin_mapping", "docstring": "Returns a dictionary containing the mapped pins.\n\n Each key of the dictionary is the ID you set with map_pin, and each\n value is the driver-specific ID.\n\n @returns a dictionary of mapped pins", "id": "f10520:c1:m2"} {"signature": "def set_pin_interpolation(self,pin,read_min,read_max,write_min,write_max):", "body": "if type(pin) is list:args = (read_min, read_max, write_min, write_max)for p in pin:self.set_pin_interpolation(p, *args)returnvalid_read = (read_min is not None and read_max is not None)valid_write = (write_min is not None and write_max is not None)if not valid_read and not valid_write:self._pin_lin.pop(pin, None)returnpin_id = self._pin_mapping.get(pin, None)pins = [pin for pin in self.available_pins() if pin_id == pin['']]read = pins[]['']['']write = pins[]['']['']valid_read = valid_read and readvalid_write = valid_write and writeself._pin_lin[pin] = {'': (*read, read_min, read_max) if valid_read else None,'': (write_min, write_max, *write) if valid_write else None}", "docstring": "Interpolates input and output values for `pin`.\n\n Changes the output and input of `AbstractDriver.read` and\n `AbstractDriver.write` functions to use a value in range\n (`read_min`, `read_max`) or (`write_min`, `write_max`) instead of the\n values returned by `available_pins` (analog only). The conversion is\n done using linear interpolation. If `read_min`, `read_max`, `write_min`\n and `write_max` are all None or don't form valid pairs (like, read_min\n has a value but read_max is None), the pin is deregistered. If you pass\n a pair but leave the other with None values, only one direction is\n registered.\n\n @arg pin pin id you've set using `AbstractDriver.map_pin`\n @arg read_min the min value for the linear interpolation of\n `AbstractDriver.read`.\n @arg read_max the max value for the linear interpolation of\n `AbstractDriver.read`.\n @arg write_min the min value for the linear interpolation of\n `AbstractDriver.write`.\n @arg write_max the max value for the linear interpolation of\n `AbstractDriver.write`.", "id": "f10520:c1:m4"} {"signature": "def set_pin_direction(self, pin, direction):", "body": "if type(pin) is list:for p in pin:self.set_pin_direction(p, direction)returnpin_id = self._pin_mapping.get(pin, None)if pin_id and type(direction) is ahio.Direction:self._set_pin_direction(pin_id, direction)else:raise KeyError('' % pin)", "docstring": "Sets pin `pin` to `direction`.\n\n The pin should support the requested mode. Calling this function\n on a unmapped pin does nothing. Calling it with a unsupported direction\n throws RuntimeError.\n\n If you're developing a driver, you should implement\n _set_pin_direction(self, pin, direction) where `pin` will be one of\n your internal IDs. If a pin is set to OUTPUT, put it on LOW state.\n\n @arg pin pin id you've set using `AbstractDriver.map_pin`\n @arg mode a value from `AbstractDriver.Direction`\n\n @throw KeyError if pin isn't mapped.\n @throw RuntimeError if direction is not supported by pin.", "id": "f10520:c1:m5"} {"signature": "def pin_direction(self, pin):", "body": "if type(pin) is list:return [self.pin_direction(p) for p in pin]pin_id = self._pin_mapping.get(pin, None)if pin_id:return self._pin_direction(pin_id)else:raise KeyError('' % pin)", "docstring": "Gets the `ahio.Direction` this pin was set to.\n\n If you're developing a driver, implement _pin_direction(self, pin)\n\n @arg pin the pin you want to see the mode\n @returns the `ahio.Direction` the pin is set to\n\n @throw KeyError if pin isn't mapped.", "id": "f10520:c1:m6"} {"signature": "def set_pin_type(self, pin, ptype):", "body": "if type(pin) is list:for p in pin:self.set_pin_type(p, ptype)returnpin_id = self._pin_mapping.get(pin, None)if type(ptype) is not ahio.PortType:raise KeyError('')elif pin_id:self._set_pin_type(pin_id, ptype)else:raise KeyError('' % pin)", "docstring": "Sets pin `pin` to `type`.\n\n The pin should support the requested mode. Calling this function\n on a unmapped pin does nothing. Calling it with a unsupported mode\n throws RuntimeError.\n\n If you're developing a driver, you should implement\n _set_pin_type(self, pin, ptype) where `pin` will be one of your\n internal IDs. If a pin is set to OUTPUT, put it on LOW state.\n\n @arg pin pin id you've set using `AbstractDriver.map_pin`\n @arg mode a value from `AbstractDriver.PortType`\n\n @throw KeyError if pin isn't mapped.\n @throw RuntimeError if type is not supported by pin.", "id": "f10520:c1:m7"} {"signature": "def pin_type(self, pin):", "body": "if type(pin) is list:return [self.pin_type(p) for p in pin]pin_id = self._pin_mapping.get(pin, None)if pin_id:return self._pin_type(pin_id)else:raise KeyError('' % pin)", "docstring": "Gets the `ahio.PortType` this pin was set to.\n\n If you're developing a driver, implement _pin_type(self, pin)\n\n @arg pin the pin you want to see the mode\n @returns the `ahio.PortType` the pin is set to\n\n @throw KeyError if pin isn't mapped.", "id": "f10520:c1:m8"} {"signature": "def write(self, pin, value, pwm=False):", "body": "if type(pin) is list:for p in pin:self.write(p, value, pwm)returnif pwm and type(value) is not int and type(value) is not float:raise TypeError('')pin_id = self._pin_mapping.get(pin, None)if pin_id:lpin = self._pin_lin.get(pin, None)if lpin and type(lpin['']) is tuple:write_range = lpin['']value = self._linear_interpolation(value, *write_range)self._write(pin_id, value, pwm)else:raise KeyError('' % pin)", "docstring": "Sets the output to the given value.\n\n Sets `pin` output to given value. If the pin is in INPUT mode, do\n nothing. If it's an analog pin, value should be in write_range.\n If it's not in the allowed range, it will be clamped. If pin is in\n digital mode, value can be `ahio.LogicValue` if `pwm` = False, or a\n number between 0 and 1 if `pwm` = True. If PWM is False, the pin will\n be set to HIGH or LOW, if `pwm` is True, a PWM wave with the given\n cycle will be created. If the pin does not support PWM and `pwm` is\n True, raise RuntimeError. The `pwm` argument should be ignored in case\n the pin is analog. If value is not valid for the given\n pwm/analog|digital combination, raise TypeError.\n\n If you're developing a driver, implement _write(self, pin, value, pwm)\n\n @arg pin the pin to write to\n @arg value the value to write on the pin\n @arg pwm wether the output should be a pwm wave\n\n @throw RuntimeError if the pin does not support PWM and `pwm` is True.\n @throw TypeError if value is not valid for this pin's mode and pwm\n value.\n @throw KeyError if pin isn't mapped.", "id": "f10520:c1:m9"} {"signature": "def read(self, pin):", "body": "if type(pin) is list:return [self.read(p) for p in pin]pin_id = self._pin_mapping.get(pin, None)if pin_id:value = self._read(pin_id)lpin = self._pin_lin.get(pin, None)if lpin and type(lpin['']) is tuple:read_range = lpin['']value = self._linear_interpolation(value, *read_range)return valueelse:raise KeyError('' % pin)", "docstring": "Reads value from pin `pin`.\n\n Returns the value read from pin `pin`. If it's an analog pin, returns\n a number in analog.input_range. If it's digital, returns\n `ahio.LogicValue`.\n\n If you're developing a driver, implement _read(self, pin)\n\n @arg pin the pin to read from\n @returns the value read from the pin\n\n @throw KeyError if pin isn't mapped.", "id": "f10520:c1:m10"} {"signature": "def analog_references(self):", "body": "raise NotImplementedMethod()", "docstring": "Possible values for analog reference.\n\n If you're developing a driver, override this function.\n\n @returns a list of values that can be passed to set_analog_reference or\n returned from analog_reference(). Very driver specific.", "id": "f10520:c1:m11"} {"signature": "def set_analog_reference(self, reference, pin=None):", "body": "if pin is None:self._set_analog_reference(reference, None)else:pin_id = self._pin_mapping.get(pin, None)if pin_id:self._set_analog_reference(reference, pin_id)else:raise KeyError('' % pin)", "docstring": "Sets the analog reference to `reference`\n\n If the driver supports per pin reference setting, set pin to the\n desired reference. If not, passing None means set to all, which is the\n default in most hardware. If only per pin reference is supported and\n pin is None, raise RuntimeError.\n\n If you're developing a driver, implement\n _set_analog_reference(self, reference, pin). Raise RuntimeError if pin\n was set but is not supported by the platform.\n\n @arg reference the value that describes the analog reference. See\n `AbstractDriver.analog_references`\n @arg pin if the the driver supports it, the pin that will use\n `reference` as reference. None for all.\n\n @throw RuntimeError if pin is None on a per pin only hardware, or if\n it's a valid pin on a global only analog reference hardware.\n @throw KeyError if pin isn't mapped.", "id": "f10520:c1:m12"} {"signature": "def analog_reference(self, pin=None):", "body": "if pin is None:return self._analog_reference(None)else:pin_id = self._pin_mapping.get(pin, None)if pin_id:return self._analog_reference(pin_id)else:raise KeyError('' % pin)", "docstring": "Returns the analog reference.\n\n If the driver supports per pin analog reference setting, returns the\n reference for pin `pin`. If pin is None, returns the global analog\n reference. If only per pin reference is supported and pin is None,\n raise RuntimeError.\n\n If you're developing a driver, implement _analog_reference(self, pin)\n\n @arg pin if the the driver supports it, the pin that will use\n `reference` as reference. None for all.\n\n @returns the reference used for pin\n\n @throw RuntimeError if pin is None on a per pin only hardware, or if\n it's a valid pin on a global only analog reference hardware.\n @throw KeyError if pin isn't mapped.", "id": "f10520:c1:m13"} {"signature": "def set_pwm_frequency(self, frequency, pin=None):", "body": "if pin is None:self._set_pwm_frequency(frequency, None)else:pin_id = self._pin_mapping.get(pin, None)if pin_id:self._set_pwm_frequency(frequency, pin_id)else:raise KeyError('' % pin)", "docstring": "Sets PWM frequency, if supported by hardware\n\n If the driver supports per pin frequency setting, set pin to the\n desired frequency. If not, passing None means set to all. If only per\n pin frequency is supported and pin is None, raise RuntimeError.\n\n If you're developing a driver, implement\n _set_pwm_frequency(self, frequency, pin). Raise RuntimeError if pin\n was set but is not supported by the platform.\n\n @arg frequency pwm frequency to be set, in Hz\n @arg pin if the the driver supports it, the pin that will use\n `frequency` as pwm frequency. None for all/global.\n\n @throw RuntimeError if pin is None on a per pin only hardware, or if\n it's a valid pin on a global only hardware.\n @throw KeyError if pin isn't mapped.", "id": "f10520:c1:m14"} {"signature": "def setup(self, port):", "body": "port = str(port)self._serial = serial.Serial(port, , timeout=)time.sleep() if not self._serial.is_open:raise RuntimeError('')self._serial.write(b'')if self._serial.read() != b'':raise RuntimeError('')ps = [p for p in self.available_pins() if p['']['']]for pin in ps:self._set_pin_direction(pin[''], ahio.Direction.Output)", "docstring": "Connects to an Arduino UNO on serial port `port`.\n\n @throw RuntimeError can't connect to Arduino", "id": "f10522:c1:m2"} {"signature": "def pi_version():", "body": "if not os.path.isfile(''):return Nonewith open('', '') as infile:cpuinfo = infile.read()match = re.search('', cpuinfo,flags=re.MULTILINE | re.IGNORECASE)if not match:return Noneif match.group() == '':return elif match.group() == '':return elif match.group() == '':return else:return None", "docstring": "Detect the version of the Raspberry Pi. Returns either 1, 2 or\n None depending on if it's a Raspberry Pi 1 (model A, B, A+, B+),\n Raspberry Pi 2 (model B+), or not a Raspberry Pi.\n https://github.com/adafruit/Adafruit_Python_GPIO/blob/master/Adafruit_GPIO/Platform.py", "id": "f10523:m0"} {"signature": "def setup(self, address, rack=, slot=, port=):", "body": "rack = int(rack)slot = int(slot)port = int(port)address = str(address)self._client = snap7.client.Client()self._client.connect(address, rack, slot, port)", "docstring": "Connects to a Siemens S7 PLC.\n\n Connects to a Siemens S7 using the Snap7 library.\n See [the snap7 documentation](http://snap7.sourceforge.net/) for\n supported models and more details.\n\n It's not currently possible to query the device for available pins,\n so `available_pins()` returns an empty list. Instead, you should use\n `map_pin()` to map to a Merker, Input or Output in the PLC. The\n internal id you should use is a string following this format:\n '[DMQI][XBWD][0-9]+.?[0-9]*' where:\n\n * [DMQI]: D for DB, M for Merker, Q for Output, I for Input\n * [XBWD]: X for bit, B for byte, W for word, D for dword\n * [0-9]+: Address of the resource\n * [0-9]*: Bit of the address (type X only, ignored in others)\n\n For example: 'IB100' will read a byte from an input at address 100 and\n 'MX50.2' will read/write bit 2 of the Merker at address 50. It's not\n allowed to write to inputs (I), but you can read/write Outpus, DBs and\n Merkers. If it's disallowed by the PLC, an exception will be thrown by\n python-snap7 library.\n\n For this library to work, it might be needed to change some settings\n in the PLC itself. See\n [the snap7 documentation](http://snap7.sourceforge.net/) for more\n information. You also need to put the PLC in RUN mode. Not however that\n having a Ladder program downloaded, running and modifying variables\n will probably interfere with inputs and outputs, so put it in RUN mode,\n but preferably without a downloaded program.\n\n @arg address IP address of the module.\n @arg rack rack where the module is installed.\n @arg slot slot in the rack where the module is installed.\n @arg port port the PLC is listenning to.\n\n @throw RuntimeError if something went wrong\n @throw any exception thrown by `snap7`'s methods.", "id": "f10524:c1:m2"} {"signature": "def available_drivers():", "body": "global __modulesglobal __availableif type(__modules) is not list:__modules = list(__modules)if not __available:__available = [d.ahioDriverInfo.NAMEfor d in __modulesif d.ahioDriverInfo.AVAILABLE]return __available", "docstring": "Returns a list of available drivers names.", "id": "f10526:m3"} {"signature": "def driver_info(name):", "body": "driver = __locate_driver_named(name)return driver.ahioDriverInfo if driver else None", "docstring": "Returns driver metadata.\n\n Returns a class which static properties contains metadata from the\n driver, such as name and availability.\n\n @returns a subclass from `ahio.abstract_driver.AbstractahioDriverInfo` with\n metadata from the driver.", "id": "f10526:m4"} {"signature": "def new_driver_object(name):", "body": "driver = __locate_driver_named(name)return driver.Driver() if driver else None", "docstring": "Instantiates a new object of the named driver.\n\n The API used by the returned object can be seen in\n `ahio.abstract_driver.AbstractDriver`\n\n @returns a Driver object from the required type of None if it's not\n available", "id": "f10526:m5"} {"signature": "def __load_driver(name):", "body": "global __counttry:dname = os.path.basename(name).replace('', '')mod_name = '' % (dname, __count)loader = importlib.machinery.SourceFileLoader(mod_name, name)driver = loader.load_module()__count += return driver if hasattr(driver, '') else Falseexcept Exception:return False", "docstring": "Tries to load the driver named @arg name.\n\n A driver is considered valid if it has a ahioDriverInfo object. It should\n however implement all APIs described in `ahio.abstract_driver`, as they'll\n be needed to use the driver.\n\n @returns the driver package, or False if it failed.", "id": "f10526:m6"} {"signature": "def __locate_driver_named(name):", "body": "global __modulesif type(__modules) is not list:__modules = list(__modules)ms = [d for d in __modules if d.ahioDriverInfo.NAME == name]if not ms:return Nonereturn ms[]", "docstring": "Searchs __modules for a driver named @arg name.\n\n @returns the package for driver @arg name or None if one can't be found.", "id": "f10526:m7"} {"signature": "def setup(self, address, port):", "body": "address = str(address)port = int(port)self._socket = socket.socket()self._socket.connect((address, port))self._socket.send(b'')with self._socket.makefile() as f:if f.readline().strip() != '':raise RuntimeError('')", "docstring": "Connects to server at `address`:`port`.\n\n Connects to a TCP server listening at `address`:`port` that implements\n the protocol described in the file \"Generic TCP I:O Protocol.md\"\n\n @arg address IP or address to connect to.\n @arg port port to connect to.\n\n @throw RuntimeError if connection was successiful but protocol isn't\n supported.\n @throw any exception thrown by `socket.socket`'s methods.", "id": "f10527:c1:m2"} {"signature": "def setup(self,configuration=\"\"):", "body": "from pymodbus3.client.sync import ModbusSerialClient, ModbusUdpClient, ModbusTcpClientself._client = eval(configuration)self._client.connect()", "docstring": "Start a Modbus server.\n\n The following classes are available with their respective named\n parameters:\n\n ModbusTcpClient\n host: The host to connect to (default 127.0.0.1)\n port: The modbus port to connect to (default 502)\n source_address: The source address tuple to bind to (default ('', 0))\n timeout: The timeout to use for this socket (default Defaults.Timeout)\n\n ModbusUdpClient\n host: The host to connect to (default 127.0.0.1)\n port: The modbus port to connect to (default 502)\n timeout: The timeout to use for this socket (default None)\n\n ModbusSerialClient\n method: The method to use for connection (asii, rtu, binary)\n port: The serial port to attach to\n stopbits: The number of stop bits to use (default 1)\n bytesize: The bytesize of the serial messages (default 8 bits)\n parity: Which kind of parity to use (default None)\n baudrate: The baud rate to use for the serial device\n timeout: The timeout between serial requests (default 3s)\n\n When configuring the ports, the following convention should be\n respected:\n\n portname: C1:13 -> Coil on device 1, address 13\n\n The letters can be:\n\n C = Coil\n I = Input\n R = Register\n H = Holding\n\n @arg configuration a string that instantiates one of those classes.\n\n @throw RuntimeError can't connect to Arduino", "id": "f10528:c1:m2"} {"signature": "def get(self, url, params={}):", "body": "params.update({'': self.api_key})try:response = requests.get(self.host + url, params=params)except RequestException as e:response = e.argsreturn self.json_parse(response.content)", "docstring": "Issues a GET request against the API, properly formatting the params\n\n:param url: a string, the url you are requesting\n:param params: a dict, the key-value of all the paramaters needed\n in the request\n:returns: a dict parsed of the JSON response", "id": "f10530:c0:m1"} {"signature": "def post(self, url, params={}, files=None):", "body": "params.update({'': self.api_key})try:response = requests.post(self.host + url, data=params, files=files)return self.json_parse(response.content)except RequestException as e:return self.json_parse(e.args)", "docstring": "Issues a POST request against the API, allows for multipart data uploads\n\n:param url: a string, the url you are requesting\n:param params: a dict, the key-value of all the parameters needed\n in the request\n:param files: a list, the list of tuples of files\n\n:returns: a dict parsed of the JSON response", "id": "f10530:c0:m2"} {"signature": "def json_parse(self, content):", "body": "try:data = json.loads(content)except ValueError as e:return {'': { '': , '': ''}, '': {\"\": \"\"}}if '' in data:return {'': { '': , '': ''}, '': {\"\": data['']}}elif '' in data:return data['']else:return {}", "docstring": "Wraps and abstracts content validation and JSON parsing\nto make sure the user gets the correct response.\n\n:param content: The content returned from the web request to be parsed as json\n\n:returns: a dict of the json response", "id": "f10530:c0:m3"} {"signature": "def __init__(self, api_key):", "body": "self.ApiResourceMixin.set_api_key(api_key)self.products = self.ProductsResource()self.quote = self.QuoteResource()self.status = self.StatusResource()self.webhooks = self.WebhooksResource()", "docstring": "Initializes the ShirtsIOClient object, creating the ShirtsIORequest\nobject which deals with all request formatting.\n\n:param api_key: a string, the user specific secret, received\n from the /access_token endpoint\n\n:returns: None", "id": "f10531:c0:m0"} {"signature": "def validate_params(required, optional, params):", "body": "missing_fields = [x for x in required if x not in params]if missing_fields:field_strings = \"\".join(missing_fields)raise Exception(\"\" % field_strings)disallowed_fields = [x for x in params if x not in optional and x not in required]if disallowed_fields:field_strings = \"\".join(disallowed_fields)raise Exception(\"\" % field_strings)", "docstring": "Helps us validate the parameters for the request\n\n:param valid_options: a list of strings of valid options for the\n api request\n:param params: a dict, the key-value store which we really only care about\n the key which has tells us what the user is using for the\n API request\n\n:returns: None or throws an exception if the validation fails", "id": "f10532:m0"} {"signature": "def new_user(yaml_path):", "body": "print('')api_key = input('')tokens = {'': api_key,}yaml_file = open(yaml_path, '')yaml.dump(tokens, yaml_file, indent=)yaml_file.close()return tokens", "docstring": "Return the consumer and oauth tokens with three-legged OAuth process and\nsave in a yaml file in the user's home directory.", "id": "f10533:m0"} {"signature": "def build_index_and_mapping(triples):", "body": "ents = bidict()rels = bidict()ent_id = rel_id = collected = []for t in triples:for e in (t.head, t.tail):if e not in ents:ents[e] = ent_ident_id += if t.relation not in rels:rels[t.relation] = rel_idrel_id += collected.append(kgedata.TripleIndex(ents[t.head], rels[t.relation], ents[t.tail]))return collected, ents, rels", "docstring": "index all triples into indexes and return their mappings", "id": "f10537:m0"} {"signature": "def recover_triples_from_mapping(indexes, ents: bidict, rels: bidict):", "body": "triples = []for t in indexes:triples.append(kgedata.Triple(ents.inverse[t.head], rels.inverse[t.relation], ents.inverse[t.tail]))return triples", "docstring": "recover triples from mapping.", "id": "f10537:m1"} {"signature": "def unpack(triple):", "body": "return triple.head, triple.relation, triple.tail", "docstring": "unpacks triple into (h, r, t). Can take Triple or TripleIndex.", "id": "f10537:m2"} {"signature": "def shuffle_triples(triples):", "body": "return np.random.permutation(triples)", "docstring": "Shuffle triples.", "id": "f10537:m3"} {"signature": "def _transform_triple_numpy(x):", "body": "return np.array([x.head, x.relation, x.tail], dtype=np.int64)", "docstring": "Transform triple index into a 1-D numpy array.", "id": "f10537:m5"} {"signature": "def pack_triples_numpy(triples):", "body": "if len(triples) == :return np.array([], dtype=np.int64)return np.stack(list(map(_transform_triple_numpy, triples)), axis=)", "docstring": "Packs a list of triple indexes into a 2D numpy array.", "id": "f10537:m6"} {"signature": "def remove_near_duplicate_relation(triples, threshold=):", "body": "logging.debug(\"\")_assert_threshold(threshold)duplicate_rel_counter = defaultdict(list)relations = set()for t in triples:duplicate_rel_counter[t.relation].append(f\"\")relations.add(t.relation)relations = list(relations)num_triples = len(triples)removal_relation_set = set()for rel, values in duplicate_rel_counter.items():duplicate_rel_counter[rel] = Superminhash(values)for i in relations:for j in relations:if i == j or i in removal_relation_set or j in removal_relation_set: continueclose_relations = [i]if _set_close_to(duplicate_rel_counter[i], duplicate_rel_counter[j], threshold):close_relations.append(j)if len(close_relations) > :close_relations.pop(np.random.randint(len(close_relations)))removal_relation_set |= set(close_relations)logging.info(\"\".format(len(removal_relation_set), str(removal_relation_set)))return list(filterfalse(lambda x: x.relation in removal_relation_set, triples))", "docstring": "If entity pairs in a relation is as close as another relations, only keep one relation of such set.", "id": "f10537:m10"} {"signature": "def remove_inverse_relation(triples, threshold=):", "body": "logging.debug(\"\")_assert_threshold(threshold)rel_counter = defaultdict(list)inverse_rel_counter = defaultdict(list)relations = set()for t in triples:rel_counter[t.relation].append(f\"\")inverse_rel_counter[t.relation].append(f\"\")relations.add(t.relation)relations = list(relations)for rel, values in rel_counter.items():rel_counter[rel] = Superminhash(values)for rel, values in inverse_rel_counter.items():inverse_rel_counter[rel] = Superminhash(values)num_triples = len(triples)removal_relation_set = set()for i in relations:if i in removal_relation_set: continueclose_relations = [i]for j in relations:if i == j or j in removal_relation_set: continueif _set_close_to(rel_counter[i], inverse_rel_counter[j], threshold):close_relations.append(j)if len(close_relations) > :close_relations.pop(np.random.randint(len(close_relations)))removal_relation_set |= set(close_relations)logging.info(\"\".format(len(removal_relation_set), str(removal_relation_set)))return list(filterfalse(lambda x: x.relation in removal_relation_set, triples))", "docstring": "if entity pairs in a relation overlaps its reverse pairs in another relation, only keep one relation of such set.", "id": "f10537:m11"} {"signature": "def shrink_indexes_in_place(self, triples):", "body": "_ent_roots = self.UnionFind(self._ent_id)_rel_roots = self.UnionFind(self._rel_id)for t in triples:_ent_roots.add(t.head)_ent_roots.add(t.tail)_rel_roots.add(t.relation)for i, t in enumerate(triples):h = _ent_roots.find(t.head)r = _rel_roots.find(t.relation)t = _ent_roots.find(t.tail)triples[i] = kgedata.TripleIndex(h, r, t)ents = bidict()available_ent_idx = for previous_idx, ent_exist in enumerate(_ent_roots.roots()):if not ent_exist:self._ents.inverse.pop(previous_idx)else:ents[self._ents.inverse[previous_idx]] = available_ent_idxavailable_ent_idx += rels = bidict()available_rel_idx = for previous_idx, rel_exist in enumerate(_rel_roots.roots()):if not rel_exist:self._rels.inverse.pop(previous_idx)else:rels[self._rels.inverse[previous_idx]] = available_rel_idxavailable_rel_idx += self._ents = entsself._rels = relsself._ent_id = available_ent_idxself._rel_id = available_rel_idx", "docstring": "Uses a union find to find segment.", "id": "f10537:c0:m3"} {"signature": "def read_labels(filename, delimiter=DEFAULT_DELIMITER):", "body": "_assert_good_file(filename)with open(filename) as f:labels = [_label_processing(l, delimiter) for l in f]return labels", "docstring": "read label files. Format: ent label", "id": "f10541:m6"} {"signature": "def write_index_translation(translation_filename, entity_ids, relation_ids):", "body": "translation = triple_pb.Translation()entities = []for name, index in entity_ids.items():translation.entities.add(element=name, index=index)relations = []for name, index in relation_ids.items():translation.relations.add(element=name, index=index)with open(translation_filename, \"\") as f:f.write(translation.SerializeToString())", "docstring": "write triples into a translation file.", "id": "f10541:m7"} {"signature": "def write_triples(filename, triples, delimiter=DEFAULT_DELIMITER, triple_order=\"\"):", "body": "with open(filename, '') as f:for t in triples:line = t.serialize(delimiter, triple_order)f.write(line + \"\")", "docstring": "write triples to file.", "id": "f10541:m8"} {"signature": "def read_translation(filename):", "body": "translation = triple_pb.Translation()with open(filename, \"\") as f:translation.ParseFromString(f.read())def unwrap_translation_units(units):for u in units: yield u.element, u.indexreturn (list(unwrap_translation_units(translation.entities)),list(unwrap_translation_units(translation.relations)))", "docstring": "Returns protobuf mapcontainer. Read from translation file.", "id": "f10541:m9"} {"signature": "def read_openke_translation(filename, delimiter='', entity_first=True):", "body": "result = {}with open(filename, \"\") as f:_ = next(f) for line in f:line_slice = line.rstrip().split(delimiter)if not entity_first:line_slice = list(reversed(line_slice))result[line_slice[]] = line_slice[]return result", "docstring": "Returns map with entity or relations from plain text.", "id": "f10541:m10"} {"signature": "def strip_labels(filename):", "body": "labels = []with open(filename) as f, open('', '') as f1:for l in f:if l.startswith(''):nextl = l.replace(\"\", '')l = l.replace(\"\", '')l = l.replace(\"\", '')l = l.replace(\"\", '')f1.write(l)", "docstring": "Strips labels.", "id": "f10551:m0"} {"signature": "def logon(self, username, password):", "body": "if self._token:self.logoff()try:response = self.__makerequest('', email=username, password=password)except FogBugzAPIError:e = sys.exc_info()[]raise FogBugzLogonError(e)self._token = response.token.stringif type(self._token) == CData:self._token = self._token.encode('')", "docstring": "Logs the user on to FogBugz.\n\nReturns None for a successful login.", "id": "f10557:c4:m1"} {"signature": "def logoff(self):", "body": "self.__makerequest('')self._token = None", "docstring": "Logs off the current user.", "id": "f10557:c4:m2"} {"signature": "def token(self, token):", "body": "self._token = token", "docstring": "Set the token without actually logging on. More secure.", "id": "f10557:c4:m3"} {"signature": "def __encode_multipart_formdata(self, fields, files):", "body": "BOUNDARY = _make_boundary()if len(files) > :fields[''] = str(len(files))crlf = ''buf = BytesIO()for k, v in fields.items():if DEBUG:print(\"\" % (repr(k), repr(v)))lines = ['' + BOUNDARY,'' % k,'',str(v),'',]buf.write(crlf.join(lines).encode(''))n = for f, h in files.items():n += lines = ['' + BOUNDARY,'''' % (n, f),'',]buf.write(crlf.join(lines).encode(''))lines = ['','','',]buf.write(crlf.join(lines).encode(''))buf.write(h.read())buf.write(crlf.encode(''))buf.write(('' + BOUNDARY + '' + crlf).encode(''))content_type = \"\" % BOUNDARYreturn content_type, buf.getvalue()", "docstring": "fields is a sequence of (key, value) elements for regular form fields.\nfiles is a sequence of (filename, filehandle) files to be uploaded\nreturns (content_type, body)", "id": "f10557:c4:m4"} {"signature": "def __getattr__(self, name):", "body": "if name.startswith(\"\"):raise AttributeError(\"\" % name)if name not in self.__handlerCache:def handler(**kwargs):return self.__makerequest(name, **kwargs)self.__handlerCache[name] = handlerreturn self.__handlerCache[name]", "docstring": "Handle all FogBugz API calls. Example::\n\n fb.logon(email@example.com, password)\n response = fb.search(q=\"assignedto:email\")", "id": "f10557:c4:m6"} {"signature": "def get_transition_viewset_method(transition_name, **kwargs):", "body": "@detail_route(methods=[''], **kwargs)def inner_func(self, request, pk=None, **kwargs):object = self.get_object()transition_method = getattr(object, transition_name)transition_method(by=self.request.user)if self.save_after_transition:object.save()serializer = self.get_serializer(object)return Response(serializer.data)return inner_func", "docstring": "Create a viewset method for the provided `transition_name`", "id": "f10559:m0"} {"signature": "def get_viewset_transition_action_mixin(model, **kwargs):", "body": "instance = model()class Mixin(object):save_after_transition = Truetransitions = instance.get_all_status_transitions()transition_names = set(x.name for x in transitions)for transition_name in transition_names:setattr(Mixin,transition_name,get_transition_viewset_method(transition_name, **kwargs))return Mixin", "docstring": "Find all transitions defined on `model`, then create a corresponding\nviewset action method for each and apply it to `Mixin`. Finally, return\n`Mixin`", "id": "f10559:m1"} {"signature": "def process_macros(self, content: str) -> str:", "body": "def _sub(macro):name = macro.group('')params = self.get_options(macro.group(''))return self.options[''].get(name, '').format_map(params)return self.pattern.sub(_sub, content)", "docstring": "Replace macros with content defined in the config.\n\n :param content: Markdown content\n\n :returns: Markdown content without macros", "id": "f10561:c0:m0"} {"signature": "def authenticate(self, request):", "body": "try:oauth_request = oauth_provider.utils.get_oauth_request(request)except oauth.Error as err:raise exceptions.AuthenticationFailed(err.message)if not oauth_request:return Noneoauth_params = oauth_provider.consts.OAUTH_PARAMETERS_NAMESfound = any(param for param in oauth_params if param in oauth_request)missing = list(param for param in oauth_params if param not in oauth_request)if not found:return Noneif missing:msg = '' % (''.join(missing))raise exceptions.AuthenticationFailed(msg)if not self.check_nonce(request, oauth_request):msg = ''raise exceptions.AuthenticationFailed(msg)try:consumer_key = oauth_request.get_parameter('')consumer = oauth_provider_store.get_consumer(request, oauth_request, consumer_key)except oauth_provider.store.InvalidConsumerError:msg = '' % oauth_request.get_parameter('')raise exceptions.AuthenticationFailed(msg)if consumer.status != oauth_provider.consts.ACCEPTED:msg = '' % consumer.get_status_display()raise exceptions.AuthenticationFailed(msg)try:token_param = oauth_request.get_parameter('')token = oauth_provider_store.get_access_token(request, oauth_request, consumer, token_param)except oauth_provider.store.InvalidTokenError:msg = '' % oauth_request.get_parameter('')raise exceptions.AuthenticationFailed(msg)try:self.validate_token(request, consumer, token)except oauth.Error as err:raise exceptions.AuthenticationFailed(err.message)user = token.userif not user.is_active:msg = '' % user.usernameraise exceptions.AuthenticationFailed(msg)return (token.user, token)", "docstring": "Returns two-tuple of (user, token) if authentication succeeds,\nor None otherwise.", "id": "f10563:c0:m1"} {"signature": "def authenticate_header(self, request):", "body": "return '' % self.www_authenticate_realm", "docstring": "If permission is denied, return a '401 Unauthorized' response,\nwith an appropraite 'WWW-Authenticate' header.", "id": "f10563:c0:m2"} {"signature": "def validate_token(self, request, consumer, token):", "body": "oauth_server, oauth_request = oauth_provider.utils.initialize_server_request(request)oauth_server.verify_request(oauth_request, consumer, token)", "docstring": "Check the token and raise an `oauth.Error` exception if invalid.", "id": "f10563:c0:m3"} {"signature": "def check_nonce(self, request, oauth_request):", "body": "oauth_nonce = oauth_request['']oauth_timestamp = oauth_request['']return check_nonce(request, oauth_request, oauth_nonce, oauth_timestamp)", "docstring": "Checks nonce of request, and return True if valid.", "id": "f10563:c0:m4"} {"signature": "def authenticate(self, request):", "body": "auth = get_authorization_header(request).split()if len(auth) == :msg = ''raise exceptions.AuthenticationFailed(msg)elif len(auth) > :msg = ''raise exceptions.AuthenticationFailed(msg)if auth and auth[].lower() == b'':access_token = auth[]elif '' in request.POST:access_token = request.POST['']elif '' in request.GET and self.allow_query_params_token:access_token = request.GET['']else:return Nonereturn self.authenticate_credentials(request, access_token)", "docstring": "Returns two-tuple of (user, token) if authentication succeeds,\nor None otherwise.", "id": "f10563:c1:m1"} {"signature": "def authenticate_credentials(self, request, access_token):", "body": "try:token = oauth2_provider.oauth2.models.AccessToken.objects.select_related('')token = token.get(token=access_token, expires__gt=provider_now())except oauth2_provider.oauth2.models.AccessToken.DoesNotExist:raise exceptions.AuthenticationFailed('')user = token.userif not user.is_active:msg = '' % user.get_username()raise exceptions.AuthenticationFailed(msg)return (user, token)", "docstring": "Authenticate the request, given the access token.", "id": "f10563:c1:m2"} {"signature": "def authenticate_header(self, request):", "body": "return '' % self.www_authenticate_realm", "docstring": "Bearer is the only finalized type currently\nCheck details on the `OAuth2Authentication.authenticate` method", "id": "f10563:c1:m3"} {"signature": "def get_version(package):", "body": "init_py = open(os.path.join(package, '')).read()return re.search(\"\",init_py, re.MULTILINE).group()", "docstring": "Return package version as listed in `__version__` in `init.py`.", "id": "f10569:m0"} {"signature": "def get_packages(package):", "body": "return [dirpathfor dirpath, dirnames, filenames in os.walk(package)if os.path.exists(os.path.join(dirpath, ''))]", "docstring": "Return root package and all sub-packages.", "id": "f10569:m1"} {"signature": "def get_package_data(package):", "body": "walk = [(dirpath.replace(package + os.sep, '', ), filenames)for dirpath, dirnames, filenames in os.walk(package)if not os.path.exists(os.path.join(dirpath, ''))]filepaths = []for base, filenames in walk:filepaths.extend([os.path.join(base, filename)for filename in filenames])return {package: filepaths}", "docstring": "Return all files under the root package, that are not in a\npackage themselves.", "id": "f10569:m2"} {"signature": "def move_dot(self):", "body": "return self.__class__(self.production, self.pos + , self.lookahead)", "docstring": "Returns the DottedRule that results from moving the dot.", "id": "f10574:c1:m7"} {"signature": "def first(self, symbols):", "body": "ret = set()if EPSILON in symbols:return set([EPSILON])for symbol in symbols:ret |= self._first[symbol] - set([EPSILON])if EPSILON not in self._first[symbol]:breakelse:ret.add(EPSILON)return ret", "docstring": "Computes the intermediate FIRST set using symbols.", "id": "f10574:c2:m1"} {"signature": "def _compute_first(self):", "body": "for terminal in self.terminals:self._first[terminal].add(terminal)self._first[END_OF_INPUT].add(END_OF_INPUT)while True:changed = Falsefor nonterminal, productions in self.nonterminals.items():for production in productions:new_first = self.first(production.rhs)if new_first - self._first[nonterminal]:self._first[nonterminal] |= new_firstchanged = Trueif not changed:break", "docstring": "Computes the FIRST set for every symbol in the grammar.\n\n Tenatively based on _compute_first in PLY.", "id": "f10574:c2:m2"} {"signature": "def _compute_follow(self):", "body": "self._follow[self.start_symbol].add(END_OF_INPUT)while True:changed = Falsefor nonterminal, productions in self.nonterminals.items():for production in productions:for i, symbol in enumerate(production.rhs):if symbol not in self.nonterminals:continuefirst = self.first(production.rhs[i + :])new_follow = first - set([EPSILON])if EPSILON in first or i == (len(production.rhs) - ):new_follow |= self._follow[nonterminal]if new_follow - self._follow[symbol]:self._follow[symbol] |= new_followchanged = Trueif not changed:break", "docstring": "Computes the FOLLOW set for every non-terminal in the grammar.\n\n Tenatively based on _compute_follow in PLY.", "id": "f10574:c2:m3"} {"signature": "def initial_closure(self):", "body": "first_rule = DottedRule(self.start, , END_OF_INPUT)return self.closure([first_rule])", "docstring": "Computes the initial closure using the START_foo production.", "id": "f10574:c2:m4"} {"signature": "def goto(self, rules, symbol):", "body": "return self.closure({rule.move_dot() for rule in rulesif not rule.at_end and rule.rhs[rule.pos] == symbol},)", "docstring": "Computes the next closure for rules based on the symbol we got.\n\n Args:\n rules - an iterable of DottedRules\n symbol - a string denoting the symbol we've just seen\n\n Returns: frozenset of DottedRules", "id": "f10574:c2:m5"} {"signature": "def closure(self, rules):", "body": "closure = set()todo = set(rules)while todo:rule = todo.pop()closure.add(rule)if rule.at_end:continuesymbol = rule.rhs[rule.pos]for production in self.nonterminals[symbol]:for first in self.first(rule.rest):if EPSILON in production.rhs:new_rule = DottedRule(production, , first)else:new_rule = DottedRule(production, , first)if new_rule not in closure:todo.add(new_rule)return frozenset(closure)", "docstring": "Fills out the entire closure based on some initial dotted rules.\n\n Args:\n rules - an iterable of DottedRules\n\n Returns: frozenset of DottedRules", "id": "f10574:c2:m6"} {"signature": "def closures(self):", "body": "initial = self.initial_closure()closures = collections.OrderedDict()goto = collections.defaultdict(dict)todo = set([initial])while todo:closure = todo.pop()closures[closure] = closuresymbols = {rule.rhs[rule.pos] for rule in closureif not rule.at_end}for symbol in symbols:next_closure = self.goto(closure, symbol)if next_closure in closures or next_closure in todo:next_closure = (closures.get(next_closure)or todo.get(next_closure))else:closures[next_closure] = next_closuretodo.add(next_closure)goto[closure][symbol] = next_closurereturn initial, closures, goto", "docstring": "Computes all LR(1) closure sets for the grammar.", "id": "f10574:c2:m7"} {"signature": "@staticmethoddef compute_precedence(terminals, productions, precedence_levels):", "body": "precedence = collections.OrderedDict()for terminal in terminals:precedence[terminal] = DEFAULT_PREClevel_precs = range(len(precedence_levels), , -)for i, level in zip(level_precs, precedence_levels):assoc = level[]for symbol in level[:]:precedence[symbol] = (assoc, i)for production, prec_symbol in productions:if prec_symbol is None:prod_terminals = [symbol for symbol in production.rhsif symbol in terminals] or [None]precedence[production] = precedence.get(prod_terminals[-],DEFAULT_PREC)else:precedence[production] = precedence.get(prec_symbol,DEFAULT_PREC)return precedence", "docstring": "Computes the precedence of terminal and production.\n\n The precedence of a terminal is it's level in the PRECEDENCE tuple. For\n a production, the precedence is the right-most terminal (if it exists).\n The default precedence is DEFAULT_PREC - (LEFT, 0).\n\n Returns:\n precedence - dict[terminal | production] = (assoc, level)", "id": "f10580:c0:m1"} {"signature": "@staticmethoddef make_tables(grammar, precedence):", "body": "ACTION = {}GOTO = {}labels = {}def get_label(closure):if closure not in labels:labels[closure] = len(labels)return labels[closure]def resolve_shift_reduce(lookahead, s_action, r_action):s_assoc, s_level = precedence[lookahead]r_assoc, r_level = precedence[r_action[]]if s_level < r_level:return r_actionelif s_level == r_level and r_assoc == LEFT:return r_actionelse:return s_actioninitial, closures, goto = grammar.closures()for closure in closures:label = get_label(closure)for rule in closure:new_action, lookahead = None, rule.lookaheadif not rule.at_end:symbol = rule.rhs[rule.pos]is_terminal = symbol in grammar.terminalshas_goto = symbol in goto[closure]if is_terminal and has_goto:next_state = get_label(goto[closure][symbol])new_action, lookahead = ('', next_state), symbolelif rule.production == grammar.start and rule.at_end:new_action = ('',)elif rule.at_end:new_action = ('', rule.production)if new_action is None:continueprev_action = ACTION.get((label, lookahead))if prev_action is None or prev_action == new_action:ACTION[label, lookahead] = new_actionelse:types = (prev_action[], new_action[])if types == ('', ''):chosen = resolve_shift_reduce(lookahead,prev_action,new_action)elif types == ('', ''):chosen = resolve_shift_reduce(lookahead,new_action,prev_action)else:raise TableConflictError(prev_action, new_action)ACTION[label, lookahead] = chosenfor symbol in grammar.nonterminals:if symbol in goto[closure]:GOTO[label, symbol] = get_label(goto[closure][symbol])return get_label(initial), ACTION, GOTO", "docstring": "Generates the ACTION and GOTO tables for the grammar.\n\n Returns:\n action - dict[state][lookahead] = (action, ...)\n goto - dict[state][just_reduced] = new_state", "id": "f10580:c0:m2"} {"signature": "def parse(self, raw):", "body": "lexer = self.LEXER(raw)tokens = iter(itertools.chain(lexer, [END_OF_INPUT_TOKEN]))stack = [(self.INITIAL_STATE, '', '')]token = next(tokens)while stack:state, _, _ = stack[-]action = self.ACTION.get((state, token.name))if action is None:raise StartSymbolNotReducedError(self.START)if action[] == '':production = action[]if len(production):args = (item[] for item in stack[-len(production):])del stack[-len(production):]else:args = []prev_state, _, _ = stack[-]new_state = self.GOTO[prev_state, production.lhs]stack.append((new_state,production.lhs,production.func(self, *args),))elif action[] == '':stack.append((action[], token.name, token.value))token = next(tokens)elif action[] == '':return stack[-][]", "docstring": "Parses an input string and applies the parser's grammar.", "id": "f10580:c1:m0"} {"signature": "@document_func_view(serializer_class=BookSerializer, response_serializer_class=LibrarySerializer,doc_format_args=('',))@api_view(['', '', ''])def hello_world(request):", "body": "return Response('')", "docstring": "Works for `functional` views too!\nYeah, that thing rocks!\nAnd allows formatting {}", "id": "f10592:m0"} {"signature": "def document_func_view(serializer_class=None,response_serializer_class=None,filter_backends=None,permission_classes=None,authentication_classes=None,doc_format_args=list(),doc_format_kwargs=dict()):", "body": "def decorator(func):if serializer_class:func.cls.serializer_class = func.view_class.serializer_class = serializer_classif response_serializer_class:func.cls.response_serializer_class = func.view_class.response_serializer_class = response_serializer_classif filter_backends:func.cls.filter_backends = func.view_class.filter_backends = filter_backendsif permission_classes:func.cls.permission_classes = func.view_class.permission_classes = permission_classesif authentication_classes:func.cls.authentication_classes = func.view_class.authentication_classes = authentication_classesif doc_format_args or doc_format_kwargs:func.cls.__doc__ = func.view_class.__doc__ = getdoc(func).format(*doc_format_args, **doc_format_kwargs)return funcreturn decorator", "docstring": "Decorator to make functional view documentable via drf-autodocs", "id": "f10612:m0"} {"signature": "def format_docstring(*args, **kwargs):", "body": "def decorator(func):func.__doc__ = getdoc(func).format(*args, **kwargs)return funcreturn decorator", "docstring": "Decorator for clean docstring formatting", "id": "f10612:m1"} {"signature": "def copy(src, dst):", "body": "(szip, dzip) = (src.endswith(\"\"), dst.endswith(\"\"))logging.info(\"\"%(src, dst))if szip and dzip:shutil.copy2(src, dst)elif szip:with zipfile.ZipFile(src, mode='') as z:tmpdir = tempfile.mkdtemp()try:z.extractall(tmpdir)if len(z.namelist()) != :raise RuntimeError(\"\"\"\"%src)tmpfile = join(tmpdir,z.namelist()[])try:os.remove(dst)except OSError:passshutil.move(tmpfile, dst)finally:shutil.rmtree(tmpdir, ignore_errors=True)elif dzip:with zipfile.ZipFile(dst, mode='', compression=ZIP_DEFLATED) as z:z.write(src, arcname=basename(src))else:shutil.copy2(src, dst)", "docstring": "File copy that support compress and decompress of zip files", "id": "f10614:m0"} {"signature": "def remove(path):", "body": "try:if isfile(path):os.remove(path)else:shutil.rmtree(path, ignore_errors=True)except OSError:pass", "docstring": "Remove file or dir if exist", "id": "f10614:m1"} {"signature": "def apply_changesets(args, changesets, catalog):", "body": "tmpdir = tempfile.mkdtemp()tmp_patch = join(tmpdir, \"\")tmp_lcat = join(tmpdir, \"\")for node in changesets:remove(tmp_patch)copy(node.mfile[''][''], tmp_patch)logging.info(\"\"%(catalog, tmp_lcat))shutil.move(catalog, tmp_lcat)cmd = args.patch_cmd.replace(\"\", tmp_lcat).replace(\"\", tmp_patch).replace(\"\", catalog)logging.info(\"\"%cmd)subprocess.check_call(cmd, shell=True)shutil.rmtree(tmpdir, ignore_errors=True)", "docstring": "Apply to the 'catalog' the changesets in the metafile list 'changesets", "id": "f10614:m2"} {"signature": "def lock_file(filename):", "body": "lockfile = \"\"%filenameif isfile(lockfile):return Falseelse:with open(lockfile, \"\"):passreturn True", "docstring": "Locks the file by writing a '.lock' file.\n Returns True when the file is locked and\n False when the file was locked already", "id": "f10617:m0"} {"signature": "def unlock_file(filename):", "body": "lockfile = \"\"%filenameif isfile(lockfile):os.remove(lockfile)return Trueelse:return False", "docstring": "Unlocks the file by remove a '.lock' file.\n Returns True when the file is unlocked and\n False when the file was unlocked already", "id": "f10617:m1"} {"signature": "def copy_smart_previews(local_catalog, cloud_catalog, local2cloud=True):", "body": "lcat_noext = local_catalog[:local_catalog.rfind(\"\")]ccat_noext = cloud_catalog[:cloud_catalog.rfind(\"\")]lsmart = join(dirname(local_catalog),\"\"%basename(lcat_noext))csmart = join(dirname(cloud_catalog),\"\"%basename(ccat_noext))if local2cloud and os.path.isdir(lsmart):logging.info(\"\"%(lsmart, csmart))distutils.dir_util.copy_tree(lsmart,csmart, update=)elif os.path.isdir(csmart):logging.info(\"\"%(csmart, lsmart))distutils.dir_util.copy_tree(csmart,lsmart, update=)", "docstring": "Copy Smart Previews from local to cloud or\n vica versa when 'local2cloud==False'\n NB: nothing happens if source dir doesn't exist", "id": "f10617:m2"} {"signature": "def hashsum(filename):", "body": "with open(filename, mode='') as f:d = hashlib.sha1()for buf in iter(partial(f.read, **), b''):d.update(buf)return d.hexdigest()", "docstring": "Return a hash of the file From ", "id": "f10617:m3"} {"signature": "def cmd_init_push_to_cloud(args):", "body": "(lcat, ccat) = (args.local_catalog, args.cloud_catalog)logging.info(\"\"%(lcat, ccat))if not isfile(lcat):args.error(\"\"%lcat)if isfile(ccat):args.error(\"\"%ccat)(lmeta, cmeta) = (\"\"%lcat, \"\"%ccat)if isfile(lmeta):args.error(\"\"%lmeta)if isfile(cmeta):args.error(\"\"%cmeta)logging.info(\"\"%(lcat))if not lock_file(lcat):raise RuntimeError(\"\"%lcat)util.copy(lcat, ccat)mfile = MetaFile(lmeta)utcnow = datetime.utcnow().strftime(DATETIME_FORMAT)[:-]mfile[''][''] = hashsum(lcat)mfile[''][''] = utcnowmfile[''][''] = lcatmfile[''][''] = ccatmfile[''][''] = hashsum(lcat)mfile[''][''] = utcnowmfile.flush()mfile = MetaFile(cmeta)mfile[''][''] = Truemfile[''][''] = hashsum(lcat)mfile[''][''] = utcnowmfile[''][''] = basename(ccat)mfile.flush()if not args.no_smart_previews:copy_smart_previews(lcat, ccat, local2cloud=True)logging.info(\"\"%(lcat))unlock_file(lcat)logging.info(\"\")", "docstring": "Initiate the local catalog and push it the cloud", "id": "f10617:m4"} {"signature": "def cmd_init_pull_from_cloud(args):", "body": "(lcat, ccat) = (args.local_catalog, args.cloud_catalog)logging.info(\"\"%(ccat, lcat))if isfile(lcat):args.error(\"\"%lcat)if not isfile(ccat):args.error(\"\"%ccat)(lmeta, cmeta) = (\"\"%lcat, \"\"%ccat)if isfile(lmeta):args.error(\"\"%lmeta)if not isfile(cmeta):args.error(\"\"%cmeta)logging.info(\"\"%(lcat))if not lock_file(lcat):raise RuntimeError(\"\"%lcat)util.copy(ccat, lcat)cloudDAG = ChangesetDAG(ccat)path = cloudDAG.path(cloudDAG.root.hash, cloudDAG.leafs[].hash)util.apply_changesets(args, path, lcat)mfile = MetaFile(lmeta)utcnow = datetime.utcnow().strftime(DATETIME_FORMAT)[:-]mfile[''][''] = hashsum(lcat)mfile[''][''] = utcnowmfile[''][''] = lcatmfile[''][''] = cloudDAG.leafs[].mfile['']['']mfile[''][''] = cloudDAG.leafs[].mfile['']['']mfile[''][''] = cloudDAG.leafs[].mfile['']['']mfile.flush()if not args.no_smart_previews:copy_smart_previews(lcat, ccat, local2cloud=False)logging.info(\"\"%(lcat))unlock_file(lcat)logging.info(\"\")", "docstring": "Initiate the local catalog by downloading the cloud catalog", "id": "f10617:m5"} {"signature": "def cmd_normal(args):", "body": "logging.info(\"\")(lcat, ccat) = (args.local_catalog, args.cloud_catalog)(lmeta, cmeta) = (\"\"%lcat, \"\"%ccat)if not isfile(lcat):args.error(\"\"%lcat)if not isfile(ccat):args.error(\"\"%ccat)logging.info(\"\"%(lcat))if not lock_file(lcat):raise RuntimeError(\"\"%lcat)logging.info(\"\"%lcat)util.remove(\"\"%lcat)util.copy(lcat, \"\"%lcat)lmfile = MetaFile(lmeta)cmfile = MetaFile(cmeta)cloudDAG = ChangesetDAG(ccat)path = cloudDAG.path(lmfile[''][''], cloudDAG.leafs[].hash)util.apply_changesets(args, path, lcat)if not args.no_smart_previews:copy_smart_previews(lcat, ccat, local2cloud=False)logging.info(\"\"%lcat)util.remove(\"\"%lcat)util.copy(lcat, \"\"%lcat)logging.info(\"\"%(lcat))unlock_file(lcat)if args.lightroom_exec_debug:logging.info(\"\"%(args.lightroom_exec_debug, lcat))with open(lcat, \"\") as f:f.write(\"\"%args.lightroom_exec_debug)elif args.lightroom_exec:logging.info(\"\"%(args.lightroom_exec, lcat))subprocess.call([args.lightroom_exec, lcat])tmpdir = tempfile.mkdtemp()tmp_patch = join(tmpdir, \"\")diff_cmd = args.diff_cmd.replace(\"\", \"\"%lcat).replace(\"\", lcat).replace(\"\", tmp_patch)logging.info(\"\"%diff_cmd)subprocess.call(diff_cmd, shell=True)patch = \"\"%(ccat, hashsum(tmp_patch))util.copy(tmp_patch, patch)mfile = MetaFile(\"\"%patch)utcnow = datetime.utcnow().strftime(DATETIME_FORMAT)[:-]mfile[''][''] = Falsemfile[''][''] = hashsum(tmp_patch)mfile[''][''] = utcnowmfile[''][''] = basename(patch)mfile[''][''] = cloudDAG.leafs[].mfile['']['']mfile[''][''] = cloudDAG.leafs[].mfile['']['']mfile[''][''] = cloudDAG.leafs[].mfile['']['']mfile[''][''] = basename(cloudDAG.leafs[].mfile[''][''])mfile.flush()mfile = MetaFile(lmeta)mfile[''][''] = hashsum(lcat)mfile[''][''] = utcnowmfile[''][''] = patchmfile[''][''] = hashsum(tmp_patch)mfile[''][''] = utcnowmfile.flush()shutil.rmtree(tmpdir, ignore_errors=True)if not args.no_smart_previews:copy_smart_previews(lcat, ccat, local2cloud=True)logging.info(\"\"%(lcat))unlock_file(lcat)", "docstring": "Normal procedure:\n * Pull from cloud (if necessary)\n * Run Lightroom\n * Push to cloud", "id": "f10617:m6"} {"signature": "def parse_arguments(argv=None):", "body": "def default_config_path():\"\"\"\"\"\"if os.name == \"\":return join(os.getenv(''), \"\")else:return join(os.path.expanduser(\"\"), \"\")parser = argparse.ArgumentParser(description='',formatter_class=argparse.ArgumentDefaultsHelpFormatter)cmd_group = parser.add_mutually_exclusive_group()cmd_group.add_argument('',help='',action=\"\")cmd_group.add_argument('',help='',action=\"\")parser.add_argument('',help='',type=lambda x: os.path.expanduser(x))parser.add_argument('',help='',type=lambda x: os.path.expanduser(x))lr_exec = parser.add_mutually_exclusive_group()lr_exec.add_argument('',help='',type=str)lr_exec.add_argument('',help='',type=str)parser.add_argument('', '',help='',action=\"\")parser.add_argument('',help=\"\",action=\"\")parser.add_argument('',help=\"\",type=str,default=default_config_path())parser.add_argument('',help=\"\"\"\",type=str,)parser.add_argument('',help=\"\"\"\",type=str,)args = parser.parse_args(args=argv)args.error = parser.errorif args.config_file in ['', '', '', \"\", '']:args.config_file = Noneif args.verbose:logging.basicConfig(level=logging.INFO)config_parser.read(args)(lcat, ccat) = (args.local_catalog, args.cloud_catalog)if lcat is None:parser.error(\"\")if ccat is None:parser.error(\"\")return args", "docstring": "Return arguments", "id": "f10617:m7"} {"signature": "def path(self, a_hash, b_hash):", "body": "def _path(a, b):if a is b:return [a]else:assert len(a.children) == return [a] + _path(a.children[], b)a = self.nodes[a_hash]b = self.nodes[b_hash]return _path(a, b)[:]", "docstring": "Return nodes in the path between 'a' and 'b' going from\n parent to child NOT including 'a", "id": "f10617:c1:m2"} {"signature": "def read(args):", "body": "if args.config_file is None or not isfile(args.config_file):returnlogging.info(\"\"%args.config_file)config = cparser.ConfigParser()config.read(args.config_file)if not config.has_section(''):raise RuntimeError(\"\")for (name, value) in config.items(''):if value == \"\":value = Trueelif value == \"\":value = Falseif getattr(args, name) is None:setattr(args, name, value)", "docstring": "Reading the configure file and adds non-existing attributes to 'args", "id": "f10619:m0"} {"signature": "def write(args):", "body": "logging.info(\"\"%args.config_file)if args.config_file is None:returnconfig = cparser.ConfigParser()config.add_section(\"\")for p in [x for x in dir(args) if not x.startswith(\"\")]:if p in IGNORE_ARGS:continuevalue = getattr(args, p)if value is not None:config.set('', p, str(value))with open(args.config_file, '') as f:config.write(f)", "docstring": "Writing the configure file with the attributes in 'args", "id": "f10619:m1"} {"signature": "def serialize_rules(self, rules):", "body": "serialized = []for rule in rules:direction = rule[\"\"]source = ''destination = ''if rule.get(\"\"):prefix = rule[\"\"]if direction == \"\":source = self._convert_remote_network(prefix)else:if (Capabilities.EGRESS not inCONF.QUARK.environment_capabilities):raise q_exc.EgressSecurityGroupRulesNotEnabled()else:destination = self._convert_remote_network(prefix)optional_fields = {}protocol_map = protocols.PROTOCOL_MAP[rule[\"\"]]if rule[\"\"] == protocol_map[\"\"]:optional_fields[\"\"] = rule[\"\"]optional_fields[\"\"] = rule[\"\"]else:optional_fields[\"\"] = rule[\"\"]optional_fields[\"\"] = rule[\"\"]payload = {\"\": rule[\"\"],\"\": rule[\"\"],\"\": source,\"\": destination,\"\": \"\",\"\": direction}payload.update(optional_fields)serialized.append(payload)return serialized", "docstring": "Creates a payload for the redis server.", "id": "f10622:c0:m1"} {"signature": "def serialize_groups(self, groups):", "body": "rules = []for group in groups:rules.extend(self.serialize_rules(group.rules))return rules", "docstring": "Creates a payload for the redis server\n\n The rule schema is the following:\n\n REDIS KEY - port_device_id.port_mac_address/sg\n REDIS VALUE - A JSON dump of the following:\n\n port_mac_address must be lower-cased and stripped of non-alphanumeric\n characters\n\n {\"id\": \"\",\n \"rules\": [\n {\"ethertype\": ,\n \"protocol\": ,\n \"port start\": , # optional\n \"port end\": , # optional\n \"icmp type\": , # optional\n \"icmp code\": , # optional\n \"source network\": ,\n \"destination network\": ,\n \"action\": ,\n \"direction\": },\n ],\n \"security groups ack\": \n }\n\n Example:\n {\"id\": \"004c6369-9f3d-4d33-b8f5-9416bf3567dd\",\n \"rules\": [\n {\"ethertype\": 0x800,\n \"protocol\": \"tcp\",\n \"port start\": 1000,\n \"port end\": 1999,\n \"source network\": \"10.10.10.0/24\",\n \"destination network\": \"\",\n \"action\": \"allow\",\n \"direction\": \"ingress\"},\n ],\n \"security groups ack\": \"true\"\n }\n\n port start/end and icmp type/code are mutually exclusive pairs.", "id": "f10622:c0:m2"} {"signature": "def apply_rules(self, device_id, mac_address, rules):", "body": "LOG.info(\"\" %(device_id, mac_address))rule_dict = {SECURITY_GROUP_RULE_KEY: rules}redis_key = self.vif_key(device_id, mac_address)self.set_field(redis_key, SECURITY_GROUP_HASH_ATTR, rule_dict)self.set_field_raw(redis_key, SECURITY_GROUP_ACK, False)", "docstring": "Writes a series of security group rules to a redis server.", "id": "f10622:c0:m4"} {"signature": "@utils.retry_loop()def get_security_group_states(self, interfaces):", "body": "LOG.debug(\"\".format(interfaces))interfaces = tuple(interfaces)vif_keys = [self.vif_key(vif.device_id, vif.mac_address)for vif in interfaces]sec_grp_all = self.get_fields_all(vif_keys)ret = {}for vif, group in zip(interfaces, sec_grp_all):if group:ret[vif] = {SECURITY_GROUP_ACK: None,SECURITY_GROUP_HASH_ATTR: []}temp_ack = group[SECURITY_GROUP_ACK].lower()temp_rules = group[SECURITY_GROUP_HASH_ATTR]if temp_rules:temp_rules = json.loads(temp_rules)ret[vif][SECURITY_GROUP_HASH_ATTR] = temp_rules[\"\"]if \"\" in temp_ack:ret[vif][SECURITY_GROUP_ACK] = Trueelif \"\" in temp_ack:ret[vif][SECURITY_GROUP_ACK] = Falseelse:ret.pop(vif, None)LOG.debug(\"\" % temp_ack)return ret", "docstring": "Gets security groups for interfaces from Redis\n\n Returns a dictionary of xapi.VIFs with values of the current\n acknowledged status in Redis.\n\n States not explicitly handled:\n * ack key, no rules - This is the same as just tagging the VIF,\n the instance will be inaccessible\n * rules key, no ack - Nothing will happen, the VIF will\n not be tagged.", "id": "f10622:c0:m7"} {"signature": "@utils.retry_loop()def update_group_states_for_vifs(self, vifs, ack):", "body": "vif_keys = [self.vif_key(vif.device_id, vif.mac_address)for vif in vifs]self.set_fields(vif_keys, SECURITY_GROUP_ACK, ack)", "docstring": "Updates security groups by setting the ack field", "id": "f10622:c0:m8"} {"signature": "def _make_job_dict(job):", "body": "body = {\"\": job.get(''),\"\": job.get(''),\"\": job.get(''),\"\": job.get(''),\"\": job.get(''),\"\": job.get(''),\"\": job.get('', None)}if not body['']:body[''] = job.get('')completed = for sub in job.subtransactions:if sub.get(''):completed += pct = if job.get('') else if len(job.subtransactions) > :pct = float(completed) / len(job.subtransactions) * body[''] = int(pct)body[''] = completedbody[''] = len(job.subtransactions)return body", "docstring": "Creates the view for a job while calculating progress.\n\n Since a root job does not have a transaction id (TID) it will return its\n id as the TID.", "id": "f10625:m21"} {"signature": "def filter_factory(global_conf, **local_conf):", "body": "conf = global_conf.copy()conf.update(local_conf)def wrapper(app):return ResponseAsyncIdAdder(app, conf)return wrapper", "docstring": "Returns a WSGI filter app for use with paste.deploy.", "id": "f10626:m0"} {"signature": "@webob.dec.wsgify(RequestClass=wsgi.Request)def __call__(self, req):", "body": "resp = self._get_resp(req)context = self._get_ctx(req)if hasattr(context, ''):try:json_body = json.loads(resp.body)json_body[''] = context.async_job['']['']resp.body = json.dumps(json_body)resp.headers[''] = context.async_job['']['']except ValueError: return respexcept Exception as e: LOG.error(\"\" % e)return resp", "docstring": "Attempts to put the job_id into the response body and header.", "id": "f10626:c0:m3"} {"signature": "def delete_locks(context, network_ids, addresses):", "body": "addresses_no_longer_null_routed = _find_addresses_to_be_unlocked(context, network_ids, addresses)LOG.info(\"\",len(addresses_no_longer_null_routed),[addr.id for addr in addresses_no_longer_null_routed])for address in addresses_no_longer_null_routed:lock_holder = Nonetry:lock_holder = db_api.lock_holder_find(context, lock_id=address.lock_id, name=LOCK_NAME,scope=db_api.ONE)if lock_holder:db_api.lock_holder_delete(context, address, lock_holder)except Exception:LOG.exception(\"\", lock_holder)continuecontext.session.flush()", "docstring": "Deletes locks for each IP address that is no longer null-routed.", "id": "f10627:m6"} {"signature": "def create_locks(context, network_ids, addresses):", "body": "for address in addresses:address_model = Nonetry:address_model = _find_or_create_address(context, network_ids, address)lock_holder = Noneif address_model.lock_id:lock_holder = db_api.lock_holder_find(context,lock_id=address_model.lock_id, name=LOCK_NAME,scope=db_api.ONE)if not lock_holder:LOG.info(\"\",address_model.address_readable,address_model.id)db_api.lock_holder_create(context, address_model, name=LOCK_NAME, type=\"\")except Exception:LOG.exception(\"\",address_model)continuecontext.session.flush()", "docstring": "Creates locks for each IP address that is null-routed.\n\n The function creates the IP address if it is not present in the database.", "id": "f10627:m8"} {"signature": "def make_case2(context):", "body": "query = context.session.query(models.IPAddress)period_start, period_end = billing.calc_periods()ip_list = billing.build_full_day_ips(query, period_start, period_end)import randomind = random.randint(, len(ip_list) - )address = ip_list[ind]address.allocated_at = datetime.datetime.utcnow() -datetime.timedelta(days=)context.session.add(address)context.session.flush()", "docstring": "This is a helper method for testing.\n\n When run with the current context, it will create a case 2 entries\n in the database. See top of file for what case 2 is.", "id": "f10628:m0"} {"signature": "@click.command()@click.option('', is_flag=True,help='')@click.option('', default=,help='')@click.option('', default=,help='')def main(notify, hour, minute):", "body": "config_opts = ['', '']config.init(config_opts)network_strategy.STRATEGY.load()billing.PUBLIC_NETWORK_ID = network_strategy.STRATEGY.get_public_net_id()config.setup_logging()context = neutron_context.get_admin_context()query = context.session.query(models.IPAddress)(period_start, period_end) = billing.calc_periods(hour, minute)full_day_ips = billing.build_full_day_ips(query,period_start,period_end)partial_day_ips = billing.build_partial_day_ips(query,period_start,period_end)if notify:for ipaddress in full_day_ips:click.echo(''.format(period_start, period_end))payload = billing.build_payload(ipaddress,billing.IP_EXISTS,start_time=period_start,end_time=period_end)billing.do_notify(context,billing.IP_EXISTS,payload)for ipaddress in partial_day_ips:click.echo(''.format(period_start, period_end))payload = billing.build_payload(ipaddress,billing.IP_EXISTS,start_time=ipaddress.allocated_at,end_time=period_end)billing.do_notify(context,billing.IP_EXISTS,payload)else:click.echo(''.format(len(full_day_ips)))for ipaddress in full_day_ips:pp(billing.build_payload(ipaddress,billing.IP_EXISTS,start_time=period_start,end_time=period_end))click.echo('')click.echo(''.format(len(partial_day_ips)))for ipaddress in partial_day_ips:pp(billing.build_payload(ipaddress,billing.IP_EXISTS,start_time=ipaddress.allocated_at,end_time=period_end))", "docstring": "Runs billing report. Optionally sends notifications to billing", "id": "f10628:m1"} {"signature": "def _load_worker_plugin_with_module(self, module, version):", "body": "classes = inspect.getmembers(module, inspect.isclass)loaded = for cls_name, cls in classes:if hasattr(cls, ''):if version not in cls.versions:continueelse:continueif issubclass(cls, base_worker.QuarkAsyncPluginBase):LOG.debug(\"\" % cls_name)plugin = cls()self.plugins.append(plugin)loaded += LOG.debug(\"\" %(len(classes), loaded))", "docstring": "Instantiates worker plugins that have requsite properties.\n\n The required properties are:\n * must have PLUGIN_EP entrypoint registered (or it wouldn't be in the\n list)\n * must have class attribute versions (list) of supported RPC versions\n * must subclass QuarkAsyncPluginBase", "id": "f10629:c0:m1"} {"signature": "def _discover_via_entrypoints(self):", "body": "emgr = extension.ExtensionManager(PLUGIN_EP, invoke_on_load=False)return ((ext.name, ext.plugin) for ext in emgr)", "docstring": "Looks for modules with amtching entry points.", "id": "f10629:c0:m2"} {"signature": "def _register_extensions(self, version):", "body": "for name, module in itertools.chain(self._discover_via_entrypoints()):self._load_worker_plugin_with_module(module, version)", "docstring": "Loads plugins that match the PLUGIN_EP entrypoint.", "id": "f10629:c0:m3"} {"signature": "def serve_rpc(self):", "body": "if cfg.CONF.QUARK_ASYNC.rpc_workers < :cfg.CONF.set_override('', , \"\")try:rpc = service.RpcWorker(self.plugins)launcher = common_service.ProcessLauncher(CONF, wait_interval=)launcher.launch_service(rpc, workers=CONF.QUARK_ASYNC.rpc_workers)return launcherexcept Exception:with excutils.save_and_reraise_exception():LOG.exception(_LE(''''))", "docstring": "Launches configured # of workers per loaded plugin.", "id": "f10629:c0:m4"} {"signature": "def start_api_and_rpc_workers(self):", "body": "pool = eventlet.GreenPool()quark_rpc = self.serve_rpc()pool.spawn(quark_rpc.wait)pool.waitall()", "docstring": "Initializes eventlet and starts wait for workers to exit.\n\n Spawns the workers returned from serve_rpc", "id": "f10629:c0:m5"} {"signature": "def run(self):", "body": "self.start_api_and_rpc_workers()", "docstring": "Start of async worker process.", "id": "f10629:c0:m6"} {"signature": "def _populate_segment_allocation_range(self, sa_range):", "body": "id_range = xrange(sa_range[''],sa_range[''] + )sa_dicts = []total = for i in id_range:sa_dicts.append({'': sa_range[''],'': sa_range[''],'': i,'': sa_range[''],'': True})total = total + db_api.segment_allocation_range_populate_bulk(self.context, sa_dicts)self.context.session.flush()allocs = db_api.segment_allocation_find(self.context, segment_allocation_range_id=sa_range['']).all()self.assertEqual(len(allocs), len(id_range))", "docstring": "Populate a given segment range.", "id": "f10649:c0:m2"} {"signature": "def _create_segment_allocation_range(self, **kwargs):", "body": "sa_dict = self._make_segment_allocation_range_dict(**kwargs)sa_range = db_api.segment_allocation_range_create(self.context, **sa_dict)self.context.session.flush()self._populate_segment_allocation_range(sa_range)return sa_range", "docstring": "Create a segment allocation range in the database.", "id": "f10649:c0:m3"} {"signature": "def _allocate_segment(self, sa_range, count=):", "body": "allocs = []for i in xrange(sa_range[''], sa_range[''] + count):filters = {'': sa_range[''],'': True}alloc = db_api.segment_allocation_find(self.context, **filters).first()if not alloc:raise Exception(\"\")update = {'': False}allocs.append(db_api.segment_allocation_update(self.context, alloc, **update))self.context.session.flush()self.assertEqual(len(allocs), count)return allocs", "docstring": "Populate a given segment range.", "id": "f10649:c0:m4"} {"signature": "def _sa_range_to_dict(self, sa_range, allocations=None):", "body": "size = (sa_range[''] + ) - sa_range['']sa_range_dict = dict(sa_range)sa_range_dict.pop('')sa_range_dict[''] = sizeif allocations is not None:sa_range_dict[''] = size - allocationsreturn sa_range_dict", "docstring": "Helper to turn a model into a dict for assertions.", "id": "f10649:c0:m5"} {"signature": "def _assert_tags(self, model, tags=None):", "body": "tags = tags if tags else []expected_tags = (self.existing_tags + tags)self.assertEqual(sorted(model.tags),sorted(expected_tags))", "docstring": "Assert given tags and already existing tags are present.", "id": "f10674:c3:m2"} {"signature": "def get_used_ips(session, **kwargs):", "body": "LOG.debug(\"\")with session.begin():query = session.query(models.Subnet.segment_id,func.count(models.IPAddress.address))query = query.group_by(models.Subnet.segment_id)query = _filter(query, **kwargs)reuse_window = timeutils.utcnow() - datetime.timedelta(seconds=cfg.CONF.QUARK.ipam_reuse_after)query = query.outerjoin(models.IPAddress,and_(models.Subnet.id == models.IPAddress.subnet_id,or_(not_(models.IPAddress.lock_id.is_(None)),models.IPAddress._deallocated.is_(None),models.IPAddress._deallocated == ,models.IPAddress.deallocated_at > reuse_window)))query = query.outerjoin(models.IPPolicyCIDR,and_(models.Subnet.ip_policy_id == models.IPPolicyCIDR.ip_policy_id,models.IPAddress.address >= models.IPPolicyCIDR.first_ip,models.IPAddress.address <= models.IPPolicyCIDR.last_ip))query = query.filter(or_(models.IPAddress._deallocated.is_(None),models.IPAddress._deallocated == ,models.IPPolicyCIDR.id.is_(None)))ret = ((segment_id, address_count)for segment_id, address_count in query.all())return dict(ret)", "docstring": "Returns dictionary with keys segment_id and value used IPs count.\n\n Used IP address count is determined by:\n - allocated IPs\n - deallocated IPs whose `deallocated_at` is within the `reuse_after`\n window compared to the present time, excluding IPs that are accounted for\n in the current IP policy (because IP policy is mutable and deallocated IPs\n are not checked nor deleted on IP policy creation, thus deallocated IPs\n that don't fit the current IP policy can exist in the neutron database).", "id": "f10695:m4"} {"signature": "def get_unused_ips(session, used_ips_counts, **kwargs):", "body": "LOG.debug(\"\")with session.begin():query = session.query(models.Subnet.segment_id,models.Subnet)query = _filter(query, **kwargs)query = query.group_by(models.Subnet.segment_id, models.Subnet.id)ret = defaultdict(int)for segment_id, subnet in query.all():net_size = netaddr.IPNetwork(subnet._cidr).sizeip_policy = subnet[\"\"] or {\"\": }ret[segment_id] += net_size - ip_policy[\"\"]for segment_id in used_ips_counts:ret[segment_id] -= used_ips_counts[segment_id]return ret", "docstring": "Returns dictionary with key segment_id, and value unused IPs count.\n\n Unused IP address count is determined by:\n - adding subnet's cidr's size\n - subtracting IP policy exclusions on subnet\n - subtracting used ips per segment", "id": "f10695:m5"} {"signature": "def get_inspector():", "body": "global _INSPECTORif _INSPECTOR:return _INSPECTORelse:bind = op.get_bind()_INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind)return _INSPECTOR", "docstring": "Reuse inspector", "id": "f10710:m0"} {"signature": "def get_columns(table):", "body": "inspector = get_inspector()return inspector.get_columns(table)", "docstring": "Returns list of columns for given table.", "id": "f10710:m2"} {"signature": "def get_data():", "body": "output = []tables = get_tables()for table in tables:try:columns = get_columns(table)except sa.exc.NoSuchTableError:continuefor column in columns:if column[''] == '':output.append((table, column))return output", "docstring": "Returns combined list of tuples: [(table, column)].\n\n List is built, based on retrieved tables, where column with name\n ``tenant_id`` exists.", "id": "f10710:m3"} {"signature": "def downgrade():", "body": "with op.batch_alter_table(t2_name) as batch_op:batch_op.drop_column('')with op.batch_alter_table(t1_name) as batch_op:batch_op.drop_column('')", "docstring": "alexm: i believe this method is never called", "id": "f10718:m1"} {"signature": "def run_migrations_offline():", "body": "context.configure(url=neutron_config.database.connection)with context.begin_transaction():context.run_migrations()", "docstring": "Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.", "id": "f10740:m0"} {"signature": "def run_migrations_online():", "body": "engine = create_engine(neutron_config.database.connection,poolclass=pool.NullPool)connection = engine.connect()context.configure(connection=connection,target_metadata=target_metadata)try:with context.begin_transaction():context.run_migrations()finally:connection.close()", "docstring": "Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.", "id": "f10740:m1"} {"signature": "@propertydef parent(self):", "body": "return getattr(self, \"\" % self.discriminator)", "docstring": "Return the parent object.", "id": "f10741:c1:m1"} {"signature": "def sg_gather_associated_ports(context, group):", "body": "if not group:return Noneif not hasattr(group, \"\") or len(group.ports) <= :return []return group.ports", "docstring": "Gather all ports associated to security group.\n\n Returns:\n * list, or None", "id": "f10742:m62"} {"signature": "def security_group_rule_update(context, rule, **kwargs):", "body": "rule.update(kwargs)context.session.add(rule)return rule", "docstring": "Updates a security group rule.\n\n NOTE(alexm) this is non-standard functionality.", "id": "f10742:m72"} {"signature": "def segment_allocation_find(context, lock_mode=False, **filters):", "body": "range_ids = filters.pop(\"\", None)query = context.session.query(models.SegmentAllocation)if lock_mode:query = query.with_lockmode(\"\")query = query.filter_by(**filters)if range_ids:query.filter(models.SegmentAllocation.segment_allocation_range_id.in_(range_ids))return query", "docstring": "Query for segment allocations.", "id": "f10742:m89"} {"signature": "def segment_allocation_range_populate_bulk(context, sa_dicts):", "body": "context.session.bulk_insert_mappings(models.SegmentAllocation,sa_dicts)", "docstring": "Bulk-insert deallocated segment allocations.\n\n NOTE(morgabra): This is quite performant when populating large ranges,\n but you don't get any ORM conveniences or protections here.", "id": "f10742:m91"} {"signature": "@classmethoddef get_name(cls):", "body": "if not hasattr(cls, ''):raise NotImplementedError()return cls.NAME", "docstring": "API name of the tag.", "id": "f10743:c1:m0"} {"signature": "@classmethoddef get_prefix(cls):", "body": "return \"\" % cls.get_name().upper()", "docstring": "Tag 'key', saved in the database as :", "id": "f10743:c1:m1"} {"signature": "def set(self, model, value):", "body": "self.validate(value)self._pop(model)value = self.serialize(value)model.tags.append(value)", "docstring": "Set tag on model object.", "id": "f10743:c1:m5"} {"signature": "def get(self, model):", "body": "for tag in model.tags:if self.is_tag(tag):value = self.deserialize(tag)try:self.validate(value)return valueexcept TagValidationError:continuereturn None", "docstring": "Get a matching valid tag off the model.", "id": "f10743:c1:m6"} {"signature": "def _pop(self, model):", "body": "tags = []for tag in model.tags:if self.is_tag(tag):tags.append(tag)if tags:for tag in tags:model.tags.remove(tag)return tags", "docstring": "Pop all matching tags off the model and return them.", "id": "f10743:c1:m7"} {"signature": "def pop(self, model):", "body": "tags = self._pop(model)if tags:for tag in tags:value = self.deserialize(tag)try:self.validate(value)return valueexcept TagValidationError:continue", "docstring": "Pop all matching tags off the port, return a valid one.", "id": "f10743:c1:m8"} {"signature": "def is_tag(self, tag):", "body": "return tag[:len(self.get_prefix())] == self.get_prefix()", "docstring": "Is a given tag this type?", "id": "f10743:c1:m9"} {"signature": "def has_tag(self, model):", "body": "for tag in model.tags:if self.is_tag(tag):return Truereturn False", "docstring": "Does the given port have this tag?", "id": "f10743:c1:m10"} {"signature": "def validate(self, value):", "body": "try:vlan_id_int = int(value)assert vlan_id_int >= self.MIN_VLAN_IDassert vlan_id_int <= self.MAX_VLAN_IDexcept Exception:msg = (\"\"\"\"\"\" % {'': value,'': self.MIN_VLAN_ID,'': self.MAX_VLAN_ID})raise TagValidationError(value, msg)return True", "docstring": "Validates a VLAN ID.\n\n :param value: The VLAN ID to validate against.\n :raises TagValidationError: Raised if the VLAN ID is invalid.", "id": "f10743:c2:m0"} {"signature": "def get_all(self, model):", "body": "tags = {}for name, tag in self.tags.items():for mtag in model.tags:if tag.is_tag(mtag):tags[name] = tag.get(model)return tags", "docstring": "Get all known tags from a model.\n\n Returns a dict of {:}.", "id": "f10743:c3:m0"} {"signature": "def set_all(self, model, **tags):", "body": "for name, tag in self.tags.items():if name in tags:value = tags.pop(name)if value:try:tag.set(model, value)except TagValidationError as e:raise n_exc.BadRequest(resource=\"\",msg=\"\" % (e.message))", "docstring": "Validate and set all known tags on a port.", "id": "f10743:c3:m1"} {"signature": "def update_sg(self, context, sg, rule_id, action):", "body": "db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE)if not db_sg:return Nonewith context.session.begin():job_body = dict(action=\"\" % (action, rule_id),resource_id=rule_id,tenant_id=db_sg[''])job_body = dict(job=job_body)job = job_api.create_job(context.elevated(), job_body)rpc_client = QuarkSGAsyncProducerClient()try:rpc_client.populate_subtasks(context, sg, job[''])except om_exc.MessagingTimeout:LOG.error(\"\")return Nonereturn {\"\": job['']}", "docstring": "Begins the async update process.", "id": "f10744:c0:m0"} {"signature": "def populate_subtasks(self, context, sg, parent_job_id):", "body": "db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE)if not db_sg:return Noneports = db_api.sg_gather_associated_ports(context, db_sg)if len(ports) == :return {\"\": }for port in ports:job_body = dict(action=\"\" % port[''],tenant_id=db_sg[''],resource_id=port[''],parent_id=parent_job_id)job_body = dict(job=job_body)job = job_api.create_job(context.elevated(), job_body)rpc_consumer = QuarkSGAsyncConsumerClient()try:rpc_consumer.update_port(context, port[''], job[''])except om_exc.MessagingTimeout:LOG.error(\"\")return None", "docstring": "Produces a list of ports to be updated async.", "id": "f10744:c3:m0"} {"signature": "def update_ports_for_sg(self, context, portid, jobid):", "body": "port = db_api.port_find(context, id=portid, scope=db_api.ONE)if not port:LOG.warning(\"\")returnnet_driver = port_api._get_net_driver(port.network, port=port)base_net_driver = port_api._get_net_driver(port.network)sg_list = [sg for sg in port.security_groups]success = Falseerror = Noneretries = retry_delay = for retry in xrange(retries):try:net_driver.update_port(context, port_id=port[\"\"],mac_address=port[\"\"],device_id=port[\"\"],base_net_driver=base_net_driver,security_groups=sg_list)success = Trueerror = Nonebreakexcept Exception as error:LOG.warning(\"\")time.sleep(retry_delay)status_str = \"\"if not success:status_str = \"\" % (portid, retries, error)update_body = dict(completed=True, status=status_str)update_body = dict(job=update_body)job_api.update_job(context.elevated(), jobid, update_body)", "docstring": "Updates the ports through redis.", "id": "f10744:c6:m0"} {"signature": "def _setup_rpc(self):", "body": "self.endpoints.extend(self.callbacks)", "docstring": "Registers callbacks to RPC assigned by subclasses.\n\n This does nothing if the subclasses do not set callbacks value.", "id": "f10746:c0:m1"} {"signature": "def start_rpc_listeners(self):", "body": "self._setup_rpc()if not self.endpoints:return []self.conn = n_rpc.create_connection()self.conn.create_consumer(self.topic, self.endpoints,fanout=False)return self.conn.consume_in_threads()", "docstring": "Configure all listeners here", "id": "f10746:c0:m2"} {"signature": "@propertydef context(self):", "body": "if not self._context:self._context = context.get_admin_context()return self._context", "docstring": "Provides an admin context for workers.", "id": "f10746:c0:m3"} {"signature": "def do_notify(context, event_type, payload):", "body": "LOG.debug(''.format(payload))notifier = n_rpc.get_notifier('')notifier.info(context, event_type, payload)", "docstring": "Generic Notifier.\n\n Parameters:\n - `context`: session context\n - `event_type`: the event type to report, i.e. ip.usage\n - `payload`: dict containing the payload to send", "id": "f10747:m0"} {"signature": "@env.has_capability(env.Capabilities.IP_BILLING)def notify(context, event_type, ipaddress, send_usage=False, *args, **kwargs):", "body": "if (event_type == IP_ADD and not CONF.QUARK.notify_ip_add) or(event_type == IP_DEL and not CONF.QUARK.notify_ip_delete) or(event_type == IP_ASSOC and not CONF.QUARK.notify_flip_associate) or(event_type == IP_DISASSOC and not CONF.QUARK.notify_flip_disassociate)or (event_type == IP_EXISTS and not CONF.QUARK.notify_ip_exists):LOG.debug(''.format(event_type))returnif '' in kwargs and kwargs['']:LOG.debug('')returnts = ipaddress.allocated_at if event_type == IP_ADD else _now()payload = build_payload(ipaddress, event_type, event_time=ts)do_notify(context, event_type, payload)if send_usage:if ipaddress.allocated_at is not None andipaddress.allocated_at >= _midnight_today():start_time = ipaddress.allocated_atelse:start_time = _midnight_today()payload = build_payload(ipaddress,IP_EXISTS,start_time=start_time,end_time=ts)do_notify(context, IP_EXISTS, payload)", "docstring": "Method to send notifications.\n\n We must send USAGE when a public IPv4 address is deallocated or a FLIP is\n associated.\n Parameters:\n - `context`: the context for notifier\n - `event_type`: the event type for IP allocate, deallocate, associate,\n disassociate\n - `ipaddress`: the ipaddress object to notify about\n Returns:\n nothing\n Notes: this may live in the billing module", "id": "f10747:m1"} {"signature": "def build_payload(ipaddress,event_type,event_time=None,start_time=None,end_time=None):", "body": "payload = {'': str(event_type),'': str(ipaddress.used_by_tenant_id),'': str(ipaddress.address_readable),'': int(ipaddress.version),'': str(ipaddress.address_type),'': str(ipaddress.id)}if event_type == IP_EXISTS:if start_time is None or end_time is None:raise ValueError(''.format(event_type))payload.update({'': str(convert_timestamp(start_time)),'': str(convert_timestamp(end_time))})elif event_type in [IP_ADD, IP_DEL, IP_ASSOC, IP_DISASSOC]:if event_time is None:raise ValueError(''.format(event_type))payload.update({'': str(convert_timestamp(event_time)),'': str(ipaddress.subnet_id),'': str(ipaddress.network_id),'': True if ipaddress.network_id == PUBLIC_NETWORK_IDelse False,})else:raise ValueError(''.format(event_type))return payload", "docstring": "Method builds a payload out of the passed arguments.\n\n Parameters:\n `ipaddress`: the models.IPAddress object\n `event_type`: USAGE,CREATE,DELETE,SUSPEND,or UNSUSPEND\n `start_time`: startTime for cloudfeeds\n `end_time`: endTime for cloudfeeds\n Returns a dictionary suitable to notify billing.\n Message types mapping to cloud feeds for references:\n ip.exists - USAGE\n ip.add - CREATE\n ip.delete - DELETE\n ip.associate - UP\n ip.disassociate - DOWN\n Refer to: http://rax.io/cf-api for more details.", "id": "f10747:m2"} {"signature": "def build_full_day_ips(query, period_start, period_end):", "body": "ip_list = query.filter(models.IPAddress.version == ).filter(models.IPAddress.network_id == PUBLIC_NETWORK_ID).filter(models.IPAddress.used_by_tenant_id is not None).filter(models.IPAddress.allocated_at != null()).filter(models.IPAddress.allocated_at < period_start).filter(or_(models.IPAddress._deallocated is False,models.IPAddress.deallocated_at == null(),models.IPAddress.deallocated_at >= period_end)).all()return ip_list", "docstring": "Method to build an IP list for the case 1\n\n when the IP was allocated before the period start\n and is still allocated after the period end.\n This method only looks at public IPv4 addresses.", "id": "f10747:m3"} {"signature": "def build_partial_day_ips(query, period_start, period_end):", "body": "ip_list = query.filter(models.IPAddress.version == ).filter(models.IPAddress.network_id == PUBLIC_NETWORK_ID).filter(models.IPAddress.used_by_tenant_id is not None).filter(and_(models.IPAddress.allocated_at != null(),models.IPAddress.allocated_at >= period_start,models.IPAddress.allocated_at < period_end)).filter(or_(models.IPAddress._deallocated is False,models.IPAddress.deallocated_at == null(),models.IPAddress.deallocated_at >= period_end)).all()return ip_list", "docstring": "Method to build an IP list for the case 2\n\n when the IP was allocated after the period start and\n is still allocated after the period end.\n This method only looks at public IPv4 addresses.", "id": "f10747:m4"} {"signature": "def calc_periods(hour=, minute=):", "body": "period_end = datetime.datetime.utcnow().replace(hour=hour,minute=minute,second=,microsecond=)period_start = period_end - datetime.timedelta(days=)period_end -= datetime.timedelta(seconds=)return (period_start, period_end)", "docstring": "Returns a tuple of start_period and end_period.\n\n Assumes that the period is 24-hrs.\n Parameters:\n - `hour`: the hour from 0 to 23 when the period ends\n - `minute`: the minute from 0 to 59 when the period ends\n This method will calculate the end of the period as the closest hour/minute\n going backwards.\n It will also calculate the start of the period as the passed hour/minute\n but 24 hrs ago.\n Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the\n day before yesterday until today's midnight.\n If we pass 2,0 - we will get the start time as 2am of the previous morning\n till 2am of today's morning.\n By default it's midnight.", "id": "f10747:m5"} {"signature": "def convert_timestamp(ts):", "body": "return ts.replace(microsecond=).isoformat() + ''", "docstring": "Converts the timestamp to a format suitable for Billing.\n\n Examples of a good timestamp for startTime, endTime, and eventTime:\n '2016-05-20T00:00:00Z'\n We must drop microseconds so that Yagi does not get upset.\n Note the trailing 'Z'. Python does not add the 'Z' so we tack it on\n ourselves.", "id": "f10747:m7"} {"signature": "def _now():", "body": "return datetime.datetime.utcnow().replace(microsecond=)", "docstring": "Method to get the utcnow without microseconds", "id": "f10747:m8"} {"signature": "def __init__(self, device_id, record, ref):", "body": "self.device_id = device_idself.record = recordself.ref = refself.success = False", "docstring": "Constructs VIF\n\n `device_id` and `mac_address` should be strings if they will later be\n compared to decoded VIF instances (via from_string).\n\n `ref` is the OpaqueRef string for the vif as returned from xenapi.", "id": "f10748:c0:m0"} {"signature": "def get_instances(self, session):", "body": "LOG.debug(\"\")recs = session.xenapi.VM.get_all_records()is_inst = lambda r: (r[''].lower() == '' andnot r[''] andnot r[''] and('' in r[''] orr[''].startswith('')))instances = dict()for vm_ref, rec in recs.iteritems():if not is_inst(rec):continueinstances[vm_ref] = VM(ref=vm_ref,uuid=rec[\"\"][\"\"],vifs=rec[\"\"],dom_id=rec[\"\"])return instances", "docstring": "Returns a dict of `VM OpaqueRef` (str) -> `xapi.VM`.", "id": "f10748:c1:m3"} {"signature": "def get_interfaces(self):", "body": "LOG.debug(\"\")with self.sessioned() as session:instances = self.get_instances(session)recs = session.xenapi.VIF.get_all_records()interfaces = set()for vif_ref, rec in recs.iteritems():vm = instances.get(rec[\"\"])if not vm:continuedevice_id = vm.uuidinterfaces.add(VIF(device_id, rec, vif_ref))return interfaces", "docstring": "Returns a set of VIFs from `get_instances` return value.", "id": "f10748:c1:m4"} {"signature": "def update_interfaces(self, added_sg, updated_sg, removed_sg):", "body": "if not (added_sg or updated_sg or removed_sg):returnwith self.sessioned() as session:self._set_security_groups(session, added_sg)self._unset_security_groups(session, removed_sg)combined = added_sg + updated_sg + removed_sgself._refresh_interfaces(session, combined)", "docstring": "Handles changes to interfaces' security groups\n\n Calls refresh_interfaces on argument VIFs. Set security groups on\n added_sg's VIFs. Unsets security groups on removed_sg's VIFs.", "id": "f10748:c1:m8"} {"signature": "def is_isonet_vif(vif):", "body": "nicira_iface_id = vif.record.get('').get('')if nicira_iface_id:return Truereturn False", "docstring": "Determine if a vif is on isonet\n\n Returns True if a vif belongs to an isolated network by checking\n for a nicira interface id.", "id": "f10749:m1"} {"signature": "def partition_vifs(xapi_client, interfaces, security_group_states):", "body": "added = []updated = []removed = []for vif in interfaces:if ('' in CONF.QUARK.environment_capabilities andis_isonet_vif(vif)):continuevif_has_groups = vif in security_group_statesif vif.tagged and vif_has_groups andsecurity_group_states[vif][sg_cli.SECURITY_GROUP_ACK]:continueif vif.tagged:if vif_has_groups:updated.append(vif)else:removed.append(vif)else:if vif_has_groups:added.append(vif)return added, updated, removed", "docstring": "Splits VIFs into three explicit categories and one implicit\n\n Added - Groups exist in Redis that have not been ack'd and the VIF\n is not tagged.\n Action: Tag the VIF and apply flows\n Updated - Groups exist in Redis that have not been ack'd and the VIF\n is already tagged\n Action: Do not tag the VIF, do apply flows\n Removed - Groups do NOT exist in Redis but the VIF is tagged\n Action: Untag the VIF, apply default flows\n Self-Heal - Groups are ack'd in Redis but the VIF is untagged. We treat\n this case as if it were an \"added\" group.\n Action: Tag the VIF and apply flows\n NOOP - The VIF is not tagged and there are no matching groups in Redis.\n This is our implicit category\n Action: Do nothing", "id": "f10749:m2"} {"signature": "def get_groups_to_ack(groups_to_ack, init_sg_states, curr_sg_states):", "body": "security_groups_changed = []for vif in groups_to_ack:initial_state = init_sg_states[vif][sg_cli.SECURITY_GROUP_HASH_ATTR]current_state = curr_sg_states[vif][sg_cli.SECURITY_GROUP_HASH_ATTR]bad_match_msg = ('''''' % vif)if len(initial_state) != len(current_state):security_groups_changed.append(vif)LOG.info(bad_match_msg)elif len(initial_state) > :for rule in current_state:if rule not in initial_state:security_groups_changed.append(vif)LOG.info(bad_match_msg)breakret = [group for group in groups_to_ackif group not in security_groups_changed]return ret", "docstring": "Compares initial security group rules with current sg rules.\n\n Given the groups that were successfully returned from\n xapi_client.update_interfaces call, compare initial and current\n security group rules to determine if an update occurred during\n the window that the xapi_client.update_interfaces was executing.\n Return a list of vifs whose security group rules have not changed.", "id": "f10749:m4"} {"signature": "def run():", "body": "groups_client = sg_cli.SecurityGroupsClient()xapi_client = xapi.XapiClient()interfaces = set()while True:try:interfaces = xapi_client.get_interfaces()except Exception:LOG.exception(\"\")_sleep()continuetry:sg_states = groups_client.get_security_group_states(interfaces)new_sg, updated_sg, removed_sg = partition_vifs(xapi_client,interfaces,sg_states)xapi_client.update_interfaces(new_sg, updated_sg, removed_sg)groups_to_ack = [v for v in new_sg + updated_sg if v.success]sg_sts_curr = groups_client.get_security_group_states(interfaces)groups_to_ack = get_groups_to_ack(groups_to_ack, sg_states,sg_sts_curr)ack_groups(groups_client, groups_to_ack)except Exception:LOG.exception(\"\"\"\")_sleep()continue_sleep()", "docstring": "Fetches changes and applies them to VIFs periodically\n\n Process as of RM11449:\n * Get all groups from redis\n * Fetch ALL VIFs from Xen\n * Walk ALL VIFs and partition them into added, updated and removed\n * Walk the final \"modified\" VIFs list and apply flows to each", "id": "f10749:m5"} {"signature": "@staticmethoddef delete_tenant_quota(context, tenant_id):", "body": "tenant_quotas = context.session.query(Quota)tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id)tenant_quotas.delete()", "docstring": "Delete the quota entries for a given tenant_id.\n\n Atfer deletion, this tenant will use default quota values in conf.", "id": "f10750:c0:m0"} {"signature": "def _chunks(self, iterable, chunk_size):", "body": "iterator = iter(iterable)chunk = list(itertools.islice(iterator, , chunk_size))while chunk:yield chunkchunk = list(itertools.islice(iterator, , chunk_size))", "docstring": "Chunks data into chunk with size<=chunk_size.", "id": "f10751:c0:m1"} {"signature": "def _check_collisions(self, new_range, existing_ranges):", "body": "def _contains(num, r1):return (num >= r1[] andnum <= r1[])def _is_overlap(r1, r2):return (_contains(r1[], r2) or_contains(r1[], r2) or_contains(r2[], r1) or_contains(r2[], r1))for existing_range in existing_ranges:if _is_overlap(new_range, existing_range):return Truereturn False", "docstring": "Check for overlapping ranges.", "id": "f10751:c0:m2"} {"signature": "def _try_allocate(self, context, segment_id, network_id):", "body": "LOG.info(\"\"\"\"% (network_id, segment_id, self.segment_type))filter_dict = {\"\": segment_id,\"\": self.segment_type,\"\": False}available_ranges = db_api.segment_allocation_range_find(context, scope=db_api.ALL, **filter_dict)available_range_ids = [r[\"\"] for r in available_ranges]try:with context.session.begin(subtransactions=True):filter_dict = {\"\": True,\"\": segment_id,\"\": self.segment_type,\"\": available_range_ids}allocations = db_api.segment_allocation_find(context, lock_mode=True, **filter_dict).limit().all()if allocations:allocation = random.choice(allocations)update_dict = {\"\": False,\"\": None,\"\": network_id}allocation = db_api.segment_allocation_update(context, allocation, **update_dict)LOG.info(\"\"\"\"% (allocation[\"\"], network_id, segment_id,self.segment_type))return allocationexcept Exception:LOG.exception(\"\")LOG.info(\"\"\"\"% (network_id, segment_id, self.segment_type))", "docstring": "Find a deallocated network segment id and reallocate it.\n\n NOTE(morgabra) This locks the segment table, but only the rows\n in use by the segment, which is pretty handy if we ever have\n more than 1 segment or segment type.", "id": "f10751:c0:m8"} {"signature": "def execute():", "body": "", "docstring": "Process providernets request", "id": "f10752:c0:m0"} {"signature": "def _create_flip(context, flip, port_fixed_ips):", "body": "if port_fixed_ips:context.session.begin()try:ports = [val[''] for val in port_fixed_ips.values()]flip = db_api.port_associate_ip(context, ports, flip,port_fixed_ips.keys())for port_id in port_fixed_ips:fixed_ip = port_fixed_ips[port_id]['']flip = db_api.floating_ip_associate_fixed_ip(context, flip,fixed_ip)flip_driver = registry.DRIVER_REGISTRY.get_driver()flip_driver.register_floating_ip(flip, port_fixed_ips)context.session.commit()except Exception:context.session.rollback()raisebilling.notify(context, billing.IP_ASSOC, flip)", "docstring": "Associates the flip with ports and creates it with the flip driver\n\n :param context: neutron api request context.\n :param flip: quark.db.models.IPAddress object representing a floating IP\n :param port_fixed_ips: dictionary of the structure:\n {\"\": {\"port\": ,\n \"fixed_ip\": \"\"}}\n :return: None", "id": "f10753:m6"} {"signature": "def _update_flip(context, flip_id, ip_type, requested_ports):", "body": "notifications = {billing.IP_ASSOC: set(),billing.IP_DISASSOC: set()}context.session.begin()try:flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE)if not flip:if ip_type == ip_types.SCALING:raise q_exc.ScalingIpNotFound(id=flip_id)raise q_exc.FloatingIpNotFound(id=flip_id)current_ports = flip.portsreq_port_ids = [request_port.get('')for request_port in requested_ports]curr_port_ids = [curr_port.id for curr_port in current_ports]added_port_ids = [port_id for port_id in req_port_idsif port_id and port_id not in curr_port_ids]removed_port_ids = [port_id for port_id in curr_port_idsif port_id not in req_port_ids]remaining_port_ids = set(curr_port_ids) - set(removed_port_ids)if (ip_type == ip_types.FLOATING and curr_port_ids andcurr_port_ids == req_port_ids):d = dict(flip_id=flip_id, port_id=curr_port_ids[])raise q_exc.PortAlreadyAssociatedToFloatingIp(**d)if (ip_type == ip_types.FLOATING andnot curr_port_ids and not req_port_ids):raise q_exc.FloatingIpUpdateNoPortIdSupplied()flip_subnet = v._make_subnet_dict(flip.subnet)for added_port_id in added_port_ids:port = _get_port(context, added_port_id)nw = port.networknw_ports = v._make_ports_list(nw.ports)fixed_ips = [ip.get('') for p in nw_portsfor ip in p.get('')]gw_ip = flip_subnet.get('')if gw_ip in fixed_ips:port_with_gateway_ip = Nonefor port in nw_ports:for ip in port.get(''):if gw_ip in ip.get(''):port_with_gateway_ip = portbreakport_id = port_with_gateway_ip.get('')network_id = port_with_gateway_ip.get('')raise q_exc.FixedIpAllocatedToGatewayIp(port_id=port_id,network_id=network_id)port_fixed_ips = {}for port_id in remaining_port_ids:port = db_api.port_find(context, id=port_id, scope=db_api.ONE)fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)port_fixed_ips[port_id] = {'': port, '': fixed_ip}for port_id in removed_port_ids:port = db_api.port_find(context, id=port_id, scope=db_api.ONE)flip = db_api.port_disassociate_ip(context, [port], flip)notifications[billing.IP_DISASSOC].add(flip)fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)if fixed_ip:flip = db_api.floating_ip_disassociate_fixed_ip(context, flip, fixed_ip)for port_id in added_port_ids:port = db_api.port_find(context, id=port_id, scope=db_api.ONE)if not port:raise n_exc.PortNotFound(port_id=port_id)if any(ip for ip in port.ip_addressesif (ip.get('') == ip_types.FLOATING)):raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id)if any(ip for ip in port.ip_addressesif (ip.get('') == ip_types.SCALING)):raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id)fixed_ip = _get_next_available_fixed_ip(port)LOG.info('' % fixed_ip)if not fixed_ip:raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)port_fixed_ips[port_id] = {'': port, '': fixed_ip}flip = db_api.port_associate_ip(context, [port], flip, [port_id])notifications[billing.IP_ASSOC].add(flip)flip = db_api.floating_ip_associate_fixed_ip(context, flip,fixed_ip)flip_driver = registry.DRIVER_REGISTRY.get_driver()if not remaining_port_ids and not added_port_ids:flip_driver.remove_floating_ip(flip)elif added_port_ids and not curr_port_ids:flip_driver.register_floating_ip(flip, port_fixed_ips)else:flip_driver.update_floating_ip(flip, port_fixed_ips)context.session.commit()except Exception:context.session.rollback()raisefor notif_type, flip_set in notifications.iteritems():for flip in flip_set:billing.notify(context, notif_type, flip)context.session.refresh(flip)return flip", "docstring": "Update a flip based IPAddress\n\n :param context: neutron api request context.\n :param flip_id: id of the flip or scip\n :param ip_type: ip_types.FLOATING | ip_types.SCALING\n :param requested_ports: dictionary of the structure:\n {\"port_id\": \"\", \"fixed_ip\": \"\"}\n :return: quark.models.IPAddress", "id": "f10753:m8"} {"signature": "def create_floatingip(context, content):", "body": "LOG.info('' %(id, context.tenant_id, content))network_id = content.get('')if not network_id:raise n_exc.BadRequest(resource='',msg='')fixed_ip_address = content.get('')ip_address = content.get('')port_id = content.get('')port = Noneport_fixed_ip = {}network = _get_network(context, network_id)if port_id:port = _get_port(context, port_id)fixed_ip = _get_fixed_ip(context, fixed_ip_address, port)port_fixed_ip = {port.id: {'': port, '': fixed_ip}}flip = _allocate_ip(context, network, port, ip_address, ip_types.FLOATING)_create_flip(context, flip, port_fixed_ip)return v._make_floating_ip_dict(flip, port_id)", "docstring": "Allocate or reallocate a floating IP.\n\n :param context: neutron api request context.\n :param content: dictionary describing the floating ip, with keys\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py. All keys will be populated.\n\n :returns: Dictionary containing details for the new floating IP. If values\n are declared in the fields parameter, then only those keys will be\n present.", "id": "f10753:m10"} {"signature": "def update_floatingip(context, id, content):", "body": "LOG.info('' %(id, context.tenant_id, content))if '' not in content:raise n_exc.BadRequest(resource='',msg='')requested_ports = []if content.get(''):requested_ports = [{'': content.get('')}]flip = _update_flip(context, id, ip_types.FLOATING, requested_ports)return v._make_floating_ip_dict(flip)", "docstring": "Update an existing floating IP.\n\n :param context: neutron api request context.\n :param id: id of the floating ip\n :param content: dictionary with keys indicating fields to update.\n valid keys are those that have a value of True for 'allow_put'\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py.\n\n :returns: Dictionary containing details for the new floating IP. If values\n are declared in the fields parameter, then only those keys will be\n present.", "id": "f10753:m11"} {"signature": "def delete_floatingip(context, id):", "body": "LOG.info('' % (id, context.tenant_id))_delete_flip(context, id, ip_types.FLOATING)", "docstring": "deallocate a floating IP.\n\n :param context: neutron api request context.\n :param id: id of the floating ip", "id": "f10753:m12"} {"signature": "def get_floatingip(context, id, fields=None):", "body": "LOG.info('' % (id, context.tenant_id))filters = {'': ip_types.FLOATING, '': False}floating_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE,**filters)if not floating_ip:raise q_exc.FloatingIpNotFound(id=id)return v._make_floating_ip_dict(floating_ip)", "docstring": "Retrieve a floating IP.\n\n :param context: neutron api request context.\n :param id: The UUID of the floating IP.\n :param fields: a list of strings that are valid keys in a\n floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.\n\n :returns: Dictionary containing details for the floating IP. If values\n are declared in the fields parameter, then only those keys will be\n present.", "id": "f10753:m13"} {"signature": "def get_floatingips(context, filters=None, fields=None, sorts=[''],limit=None, marker=None, page_reverse=False):", "body": "LOG.info('' %(context.tenant_id, filters, fields))floating_ips = _get_ips_by_type(context, ip_types.FLOATING,filters=filters, fields=fields)return [v._make_floating_ip_dict(flip) for flip in floating_ips]", "docstring": "Retrieve a list of floating ips.\n\n :param context: neutron api request context.\n :param filters: a dictionary with keys that are valid keys for\n a floating ip as listed in the RESOURCE_ATTRIBUTE_MAP object\n in neutron/api/v2/attributes.py. Values in this dictionary\n are an iterable containing values that will be used for an exact\n match comparison for that value. Each result returned by this\n function will have matched one of the values for each key in\n filters.\n :param fields: a list of strings that are valid keys in a\n floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.\n\n :returns: List of floating IPs that are accessible to the tenant who\n submits the request (as indicated by the tenant id of the context)\n as well as any filters.", "id": "f10753:m14"} {"signature": "def get_floatingips_count(context, filters=None):", "body": "LOG.info('' %(context.tenant_id, filters))if filters is None:filters = {}filters[''] = Falsefilters[''] = ip_types.FLOATINGcount = db_api.ip_address_count_all(context, filters)LOG.info('' % (count,context.tenant_id))return count", "docstring": "Return the number of floating IPs.\n\n :param context: neutron api request context\n :param filters: a dictionary with keys that are valid keys for\n a floating IP as listed in the RESOURCE_ATTRIBUTE_MAP object\n in neutron/api/v2/attributes.py. Values in this dictionary\n are an iterable containing values that will be used for an exact\n match comparison for that value. Each result returned by this\n function will have matched one of the values for each key in\n filters.\n\n :returns: The number of floating IPs that are accessible to the tenant who\n submits the request (as indicated by the tenant id of the context)\n as well as any filters.\n\n NOTE: this method is optional, as it was not part of the originally\n defined plugin API.", "id": "f10753:m15"} {"signature": "def create_scalingip(context, content):", "body": "LOG.info('',context.tenant_id, content)network_id = content.get('')ip_address = content.get('')requested_ports = content.get('', [])network = _get_network(context, network_id)port_fixed_ips = {}for req_port in requested_ports:port = _get_port(context, req_port[''])fixed_ip = _get_fixed_ip(context, req_port.get(''),port)port_fixed_ips[port.id] = {\"\": port, \"\": fixed_ip}scip = _allocate_ip(context, network, None, ip_address, ip_types.SCALING)_create_flip(context, scip, port_fixed_ips)return v._make_scaling_ip_dict(scip)", "docstring": "Allocate or reallocate a scaling IP.\n\n :param context: neutron api request context.\n :param content: dictionary describing the scaling ip, with keys\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py. All keys will be populated.\n\n :returns: Dictionary containing details for the new scaling IP. If values\n are declared in the fields parameter, then only those keys will be\n present.", "id": "f10753:m16"} {"signature": "def update_scalingip(context, id, content):", "body": "LOG.info('' %(id, context.tenant_id, content))requested_ports = content.get('', [])flip = _update_flip(context, id, ip_types.SCALING, requested_ports)return v._make_scaling_ip_dict(flip)", "docstring": "Update an existing scaling IP.\n\n :param context: neutron api request context.\n :param id: id of the scaling ip\n :param content: dictionary with keys indicating fields to update.\n valid keys are those that have a value of True for 'allow_put'\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py.\n\n :returns: Dictionary containing details for the new scaling IP. If values\n are declared in the fields parameter, then only those keys will be\n present.", "id": "f10753:m17"} {"signature": "def delete_scalingip(context, id):", "body": "LOG.info('' % (id, context.tenant_id))_delete_flip(context, id, ip_types.SCALING)", "docstring": "Deallocate a scaling IP.\n\n :param context: neutron api request context.\n :param id: id of the scaling ip", "id": "f10753:m18"} {"signature": "def get_scalingip(context, id, fields=None):", "body": "LOG.info('' % (id, context.tenant_id))filters = {'': ip_types.SCALING, '': False}scaling_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE,**filters)if not scaling_ip:raise q_exc.ScalingIpNotFound(id=id)return v._make_scaling_ip_dict(scaling_ip)", "docstring": "Retrieve a scaling IP.\n\n :param context: neutron api request context.\n :param id: The UUID of the scaling IP.\n :param fields: a list of strings that are valid keys in a\n scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.\n\n :returns: Dictionary containing details for the scaling IP. If values\n are declared in the fields parameter, then only those keys will be\n present.", "id": "f10753:m19"} {"signature": "def get_scalingips(context, filters=None, fields=None, sorts=[''],limit=None, marker=None, page_reverse=False):", "body": "LOG.info('' %(context.tenant_id, filters, fields))scaling_ips = _get_ips_by_type(context, ip_types.SCALING,filters=filters, fields=fields)return [v._make_scaling_ip_dict(scip) for scip in scaling_ips]", "docstring": "Retrieve a list of scaling ips.\n\n :param context: neutron api request context.\n :param filters: a dictionary with keys that are valid keys for\n a scaling ip as listed in the RESOURCE_ATTRIBUTE_MAP object\n in neutron/api/v2/attributes.py. Values in this dictionary\n are an iterable containing values that will be used for an exact\n match comparison for that value. Each result returned by this\n function will have matched one of the values for each key in\n filters.\n :param fields: a list of strings that are valid keys in a\n scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.\n\n :returns: List of scaling IPs that are accessible to the tenant who\n submits the request (as indicated by the tenant id of the context)\n as well as any filters.", "id": "f10753:m20"} {"signature": "def _validate_subnet_cidr(context, network_id, new_subnet_cidr):", "body": "if neutron_cfg.cfg.CONF.allow_overlapping_ips:returntry:new_subnet_ipset = netaddr.IPSet([new_subnet_cidr])except TypeError:LOG.exception(\"\" % new_subnet_cidr)raise n_exc.BadRequest(resource=\"\",msg=\"\")filters = {'': network_id,'': [False]}subnet_list = db_api.subnet_find(context=context.elevated(), **filters)for subnet in subnet_list:if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset):err_msg = (_(\"\"\"\"\"\") %{'': new_subnet_cidr,'': network_id})LOG.error(_(\"\"\"\"\"\"),{'': new_subnet_cidr,'': subnet.id,'': subnet.cidr})raise n_exc.InvalidInput(error_message=err_msg)", "docstring": "Validate the CIDR for a subnet.\n\n Verifies the specified CIDR does not overlap with the ones defined\n for the other subnets specified for this network, or with any other\n CIDR if overlapping IPs are disabled.", "id": "f10754:m0"} {"signature": "def create_subnet(context, subnet):", "body": "LOG.info(\"\" % context.tenant_id)net_id = subnet[\"\"][\"\"]with context.session.begin():net = db_api.network_find(context=context, limit=None, sorts=[''],marker=None, page_reverse=False, fields=None,id=net_id, scope=db_api.ONE)if not net:raise n_exc.NetworkNotFound(net_id=net_id)sub_attrs = subnet[\"\"]always_pop = [\"\", \"\", \"\", \"\",\"\"]admin_only = [\"\", \"\", \"\",\"\"]utils.filter_body(context, sub_attrs, admin_only, always_pop)_validate_subnet_cidr(context, net_id, sub_attrs[\"\"])cidr = netaddr.IPNetwork(sub_attrs[\"\"])err_vals = {'': sub_attrs[\"\"], '': net_id}err = _(\"\"\"\"\"\")if cidr.version == and cidr.prefixlen > :err_vals[\"\"] = err_msg = err % err_valsraise n_exc.InvalidInput(error_message=err_msg)elif cidr.version == and cidr.prefixlen > :err_vals[\"\"] = err_msg = err % err_valsraise n_exc.InvalidInput(error_message=err_msg)net_subnets = get_subnets(context,filters=dict(network_id=net_id))if not context.is_admin:v4_count, v6_count = , for subnet in net_subnets:if netaddr.IPNetwork(subnet['']).version == :v6_count += else:v4_count += if cidr.version == :tenant_quota_v6 = context.session.query(qdv.Quota).filter_by(tenant_id=context.tenant_id,resource='').first()if tenant_quota_v6 != -:quota.QUOTAS.limit_check(context, context.tenant_id,v6_subnets_per_network=v6_count + )else:tenant_quota_v4 = context.session.query(qdv.Quota).filter_by(tenant_id=context.tenant_id,resource='').first()if tenant_quota_v4 != -:quota.QUOTAS.limit_check(context, context.tenant_id,v4_subnets_per_network=v4_count + )gateway_ip = utils.pop_param(sub_attrs, \"\")dns_ips = utils.pop_param(sub_attrs, \"\", [])host_routes = utils.pop_param(sub_attrs, \"\", [])allocation_pools = utils.pop_param(sub_attrs, \"\", None)sub_attrs[\"\"] = netnew_subnet = db_api.subnet_create(context, **sub_attrs)cidrs = []alloc_pools = allocation_pool.AllocationPools(sub_attrs[\"\"],allocation_pools)if isinstance(allocation_pools, list):cidrs = alloc_pools.get_policy_cidrs()quota.QUOTAS.limit_check(context,context.tenant_id,alloc_pools_per_subnet=len(alloc_pools))ip_policies.ensure_default_policy(cidrs, [new_subnet])new_subnet[\"\"] = db_api.ip_policy_create(context,exclude=cidrs)quota.QUOTAS.limit_check(context, context.tenant_id,routes_per_subnet=len(host_routes))default_route = Nonefor route in host_routes:netaddr_route = netaddr.IPNetwork(route[\"\"])if netaddr_route.value == routes.DEFAULT_ROUTE.value:if default_route:raise q_exc.DuplicateRouteConflict(subnet_id=new_subnet[\"\"])default_route = routegateway_ip = default_route[\"\"]alloc_pools.validate_gateway_excluded(gateway_ip)new_subnet[\"\"].append(db_api.route_create(context, cidr=route[\"\"], gateway=route[\"\"]))quota.QUOTAS.limit_check(context, context.tenant_id,dns_nameservers_per_subnet=len(dns_ips))for dns_ip in dns_ips:new_subnet[\"\"].append(db_api.dns_create(context, ip=netaddr.IPAddress(dns_ip)))if gateway_ip and default_route is None:alloc_pools.validate_gateway_excluded(gateway_ip)new_subnet[\"\"].append(db_api.route_create(context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip))subnet_dict = v._make_subnet_dict(new_subnet)subnet_dict[\"\"] = gateway_ipreturn subnet_dict", "docstring": "Create a subnet.\n\n Create a subnet which represents a range of IP addresses\n that can be allocated to devices\n\n : param context: neutron api request context\n : param subnet: dictionary describing the subnet, with keys\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py. All keys will be populated.", "id": "f10754:m1"} {"signature": "def update_subnet(context, id, subnet):", "body": "LOG.info(\"\" %(id, context.tenant_id))with context.session.begin():subnet_db = db_api.subnet_find(context=context, limit=None,page_reverse=False, sorts=[''],marker_obj=None, fields=None,id=id, scope=db_api.ONE)if not subnet_db:raise n_exc.SubnetNotFound(subnet_id=id)s = subnet[\"\"]always_pop = [\"\", \"\", \"\", \"\", \"\",\"\", \"\"]admin_only = [\"\", \"\", \"\",\"\", \"\"]utils.filter_body(context, s, admin_only, always_pop)dns_ips = utils.pop_param(s, \"\", [])host_routes = utils.pop_param(s, \"\", [])gateway_ip = utils.pop_param(s, \"\", None)allocation_pools = utils.pop_param(s, \"\", None)if not CONF.QUARK.allow_allocation_pool_update:if allocation_pools:raise n_exc.BadRequest(resource=\"\",msg=\"\")if subnet_db[\"\"] is not None:ip_policy_cidrs = subnet_db[\"\"].get_cidrs_ip_set()else:ip_policy_cidrs = netaddr.IPSet([])alloc_pools = allocation_pool.AllocationPools(subnet_db[\"\"],policies=ip_policy_cidrs)else:alloc_pools = allocation_pool.AllocationPools(subnet_db[\"\"],allocation_pools)original_pools = subnet_db.allocation_poolsori_pools = allocation_pool.AllocationPools(subnet_db[\"\"],original_pools)is_growing = _pool_is_growing(ori_pools, alloc_pools)if not CONF.QUARK.allow_allocation_pool_growth and is_growing:raise n_exc.BadRequest(resource=\"\",msg=\"\"\"\")quota.QUOTAS.limit_check(context,context.tenant_id,alloc_pools_per_subnet=len(alloc_pools))if gateway_ip:alloc_pools.validate_gateway_excluded(gateway_ip)default_route = Nonefor route in host_routes:netaddr_route = netaddr.IPNetwork(route[\"\"])if netaddr_route.value == routes.DEFAULT_ROUTE.value:default_route = routebreakif default_route is None:route_model = db_api.route_find(context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id,scope=db_api.ONE)if route_model:db_api.route_update(context, route_model,gateway=gateway_ip)else:db_api.route_create(context,cidr=str(routes.DEFAULT_ROUTE),gateway=gateway_ip, subnet_id=id)if dns_ips:subnet_db[\"\"] = []quota.QUOTAS.limit_check(context, context.tenant_id,dns_nameservers_per_subnet=len(dns_ips))for dns_ip in dns_ips:subnet_db[\"\"].append(db_api.dns_create(context,ip=netaddr.IPAddress(dns_ip)))if host_routes:subnet_db[\"\"] = []quota.QUOTAS.limit_check(context, context.tenant_id,routes_per_subnet=len(host_routes))for route in host_routes:subnet_db[\"\"].append(db_api.route_create(context, cidr=route[\"\"], gateway=route[\"\"]))if CONF.QUARK.allow_allocation_pool_update:if isinstance(allocation_pools, list):cidrs = alloc_pools.get_policy_cidrs()ip_policies.ensure_default_policy(cidrs, [subnet_db])subnet_db[\"\"] = db_api.ip_policy_update(context, subnet_db[\"\"], exclude=cidrs)db_api.subnet_update_set_alloc_pool_cache(context, subnet_db)subnet = db_api.subnet_update(context, subnet_db, **s)return v._make_subnet_dict(subnet)", "docstring": "Update values of a subnet.\n\n : param context: neutron api request context\n : param id: UUID representing the subnet to update.\n : param subnet: dictionary with keys indicating fields to update.\n valid keys are those that have a value of True for 'allow_put'\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py.", "id": "f10754:m3"} {"signature": "def get_subnet(context, id, fields=None):", "body": "LOG.info(\"\" %(id, context.tenant_id, fields))subnet = db_api.subnet_find(context=context, limit=None,page_reverse=False, sorts=[''],marker_obj=None, fields=None, id=id,join_dns=True, join_routes=True,scope=db_api.ONE)if not subnet:raise n_exc.SubnetNotFound(subnet_id=id)cache = subnet.get(\"\")if not cache:new_cache = subnet.allocation_poolsdb_api.subnet_update_set_alloc_pool_cache(context, subnet, new_cache)return v._make_subnet_dict(subnet)", "docstring": "Retrieve a subnet.\n\n : param context: neutron api request context\n : param id: UUID representing the subnet to fetch.\n : param fields: a list of strings that are valid keys in a\n subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.", "id": "f10754:m4"} {"signature": "def get_subnets(context, limit=None, page_reverse=False, sorts=[''],marker=None, filters=None, fields=None):", "body": "LOG.info(\"\" %(context.tenant_id, filters, fields))filters = filters or {}subnets = db_api.subnet_find(context, limit=limit,page_reverse=page_reverse, sorts=sorts,marker_obj=marker, join_dns=True,join_routes=True, join_pool=True, **filters)for subnet in subnets:cache = subnet.get(\"\")if not cache:db_api.subnet_update_set_alloc_pool_cache(context, subnet, subnet.allocation_pools)return v._make_subnets_list(subnets, fields=fields)", "docstring": "Retrieve a list of subnets.\n\n The contents of the list depends on the identity of the user\n making the request (as indicated by the context) as well as any\n filters.\n : param context: neutron api request context\n : param filters: a dictionary with keys that are valid keys for\n a subnet as listed in the RESOURCE_ATTRIBUTE_MAP object\n in neutron/api/v2/attributes.py. Values in this dictiontary\n are an iterable containing values that will be used for an exact\n match comparison for that value. Each result returned by this\n function will have matched one of the values for each key in\n filters.\n : param fields: a list of strings that are valid keys in a\n subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.", "id": "f10754:m5"} {"signature": "def get_subnets_count(context, filters=None):", "body": "LOG.info(\"\" %(context.tenant_id, filters))return db_api.subnet_count_all(context, **filters)", "docstring": "Return the number of subnets.\n\n The result depends on the identity of the user making the request\n (as indicated by the context) as well as any filters.\n : param context: neutron api request context\n : param filters: a dictionary with keys that are valid keys for\n a network as listed in the RESOURCE_ATTRIBUTE_MAP object\n in neutron/api/v2/attributes.py. Values in this dictiontary\n are an iterable containing values that will be used for an exact\n match comparison for that value. Each result returned by this\n function will have matched one of the values for each key in\n filters.\n\n NOTE: this method is optional, as it was not part of the originally\n defined plugin API.", "id": "f10754:m6"} {"signature": "def delete_subnet(context, id):", "body": "LOG.info(\"\" % (id, context.tenant_id))with context.session.begin():subnet = db_api.subnet_find(context, id=id, scope=db_api.ONE)if not subnet:raise n_exc.SubnetNotFound(subnet_id=id)if not context.is_admin:if STRATEGY.is_provider_network(subnet.network_id):if subnet.tenant_id == context.tenant_id:raise n_exc.NotAuthorized(subnet_id=id)else:raise n_exc.SubnetNotFound(subnet_id=id)_delete_subnet(context, subnet)", "docstring": "Delete a subnet.\n\n : param context: neutron api request context\n : param id: UUID representing the subnet to delete.", "id": "f10754:m8"} {"signature": "def _filter_update_security_group_rule(rule):", "body": "allowed = ['', '']filtered = {}for k, val in rule.iteritems():if k in allowed:if isinstance(val, basestring) andlen(val) <= GROUP_NAME_MAX_LENGTH:filtered[k] = valreturn filtered", "docstring": "Only two fields are allowed for modification:\n\n external_service and external_service_id", "id": "f10756:m1"} {"signature": "@env.has_capability(env.Capabilities.SG_UPDATE_ASYNC)def _perform_async_update_rule(context, id, db_sg_group, rule_id, action):", "body": "rpc_reply = Nonesg_rpc = sg_rpc_api.QuarkSGAsyncProcessClient()ports = db_api.sg_gather_associated_ports(context, db_sg_group)if len(ports) > :rpc_reply = sg_rpc.start_update(context, id, rule_id, action)if rpc_reply:job_id = rpc_reply['']job_api.add_job_to_context(context, job_id)else:LOG.error(\"\")", "docstring": "Updates a SG rule async and return the job information.\n\n Only happens if the security group has associated ports. If the async\n connection fails the update continues (legacy mode).", "id": "f10756:m8"} {"signature": "def create_security_group_rule(context, security_group_rule):", "body": "LOG.info(\"\" %(context.tenant_id))with context.session.begin():rule = _validate_security_group_rule(context, security_group_rule[\"\"])rule[\"\"] = uuidutils.generate_uuid()group_id = rule[\"\"]group = db_api.security_group_find(context, id=group_id,scope=db_api.ONE)if not group:raise sg_ext.SecurityGroupNotFound(id=group_id)quota.QUOTAS.limit_check(context, context.tenant_id,security_rules_per_group=len(group.get(\"\", [])) + )new_rule = db_api.security_group_rule_create(context, **rule)if group:_perform_async_update_rule(context, group_id, group, new_rule.id,RULE_CREATE)return v._make_security_group_rule_dict(new_rule)", "docstring": "Creates a rule and updates the ports (async) if enabled.", "id": "f10756:m9"} {"signature": "def update_security_group_rule(context, id, security_group_rule):", "body": "LOG.info(\"\" %(context.tenant_id))new_rule = security_group_rule[\"\"]new_rule = _filter_update_security_group_rule(new_rule)with context.session.begin():rule = db_api.security_group_rule_find(context, id=id,scope=db_api.ONE)if not rule:raise sg_ext.SecurityGroupRuleNotFound(id=id)db_rule = db_api.security_group_rule_update(context, rule, **new_rule)group_id = db_rule.group_idgroup = db_api.security_group_find(context, id=group_id,scope=db_api.ONE)if not group:raise sg_ext.SecurityGroupNotFound(id=group_id)if group:_perform_async_update_rule(context, group_id, group, rule.id,RULE_UPDATE)return v._make_security_group_rule_dict(db_rule)", "docstring": "Updates a rule and updates the ports", "id": "f10756:m10"} {"signature": "def delete_security_group_rule(context, id):", "body": "LOG.info(\"\" %(id, context.tenant_id))with context.session.begin():rule = db_api.security_group_rule_find(context, id=id,scope=db_api.ONE)if not rule:raise sg_ext.SecurityGroupRuleNotFound(id=id)group = db_api.security_group_find(context, id=rule[\"\"],scope=db_api.ONE)if not group:raise sg_ext.SecurityGroupNotFound(id=id)rule[\"\"] = iddb_api.security_group_rule_delete(context, rule)if group:_perform_async_update_rule(context, group.id, group, id, RULE_DELETE)", "docstring": "Deletes a rule and updates the ports (async) if enabled.", "id": "f10756:m12"} {"signature": "@utils.exc_wrapper(internal=True)def create_port(context, port):", "body": "LOG.info(\"\" % context.tenant_id)port_attrs = port[\"\"]admin_only = [\"\", \"\", \"\", \"\",\"\", \"\",\"\"]utils.filter_body(context, port_attrs, admin_only=admin_only)port_attrs = port[\"\"]mac_address = utils.pop_param(port_attrs, \"\", None)use_forbidden_mac_range = utils.pop_param(port_attrs,\"\", False)segment_id = utils.pop_param(port_attrs, \"\")fixed_ips = utils.pop_param(port_attrs, \"\")if \"\" not in port_attrs:port_attrs[''] = \"\"device_id = port_attrs['']if \"\" not in port_attrs:port_attrs[''] = \"\"instance_node_id = port_attrs['']net_id = port_attrs[\"\"]port_id = uuidutils.generate_uuid()net = db_api.network_find(context=context, limit=None, sorts=[''],marker=None, page_reverse=False, fields=None,id=net_id, scope=db_api.ONE)if not net:raise n_exc.NetworkNotFound(net_id=net_id)_raise_if_unauthorized(context, net)if device_id:existing_ports = db_api.port_find(context,network_id=net_id,device_id=device_id,scope=db_api.ONE)if existing_ports:raise n_exc.BadRequest(resource=\"\", msg=\"\"\"\")if fixed_ips:quota.QUOTAS.limit_check(context, context.tenant_id,fixed_ips_per_port=len(fixed_ips))if not STRATEGY.is_provider_network(net_id):segment_id = Noneport_count = db_api.port_count_all(context, network_id=[net_id],tenant_id=[context.tenant_id])quota.QUOTAS.limit_check(context, context.tenant_id,ports_per_network=port_count + )else:if not segment_id:raise q_exc.AmbiguousNetworkId(net_id=net_id)network_plugin = utils.pop_param(port_attrs, \"\")if not network_plugin:network_plugin = net[\"\"]port_attrs[\"\"] = network_pluginipam_driver = _get_ipam_driver(net, port=port_attrs)net_driver = _get_net_driver(net, port=port_attrs)base_net_driver = _get_net_driver(net)security_groups = utils.pop_param(port_attrs, \"\")if security_groups is not None:raise q_exc.SecurityGroupsNotImplemented()group_ids, security_groups = _make_security_group_list(context,security_groups)quota.QUOTAS.limit_check(context, context.tenant_id,security_groups_per_port=len(group_ids))addresses = []backend_port = Nonewith utils.CommandManager().execute() as cmd_mgr:@cmd_mgr.dodef _allocate_ips(fixed_ips, net, port_id, segment_id, mac,**kwargs):if fixed_ips:if (STRATEGY.is_provider_network(net_id) andnot context.is_admin):raise n_exc.NotAuthorized()ips, subnets = split_and_validate_requested_subnets(context,net_id,segment_id,fixed_ips)kwargs[\"\"] = ipskwargs[\"\"] = subnetsipam_driver.allocate_ip_address(context, addresses, net[\"\"], port_id,CONF.QUARK.ipam_reuse_after, segment_id=segment_id,mac_address=mac, **kwargs)@cmd_mgr.undodef _allocate_ips_undo(addr, **kwargs):LOG.info(\"\")if addresses:for address in addresses:try:with context.session.begin():ipam_driver.deallocate_ip_address(context, address,**kwargs)except Exception:LOG.exception(\"\" % address)@cmd_mgr.dodef _allocate_mac(net, port_id, mac_address,use_forbidden_mac_range=False,**kwargs):mac = ipam_driver.allocate_mac_address(context, net[\"\"], port_id, CONF.QUARK.ipam_reuse_after,mac_address=mac_address,use_forbidden_mac_range=use_forbidden_mac_range, **kwargs)return mac@cmd_mgr.undodef _allocate_mac_undo(mac, **kwargs):LOG.info(\"\")if mac:try:with context.session.begin():ipam_driver.deallocate_mac_address(context,mac[\"\"])except Exception:LOG.exception(\"\" % mac)@cmd_mgr.dodef _allocate_backend_port(mac, addresses, net, port_id, **kwargs):backend_port = net_driver.create_port(context, net[\"\"],port_id=port_id,security_groups=group_ids,device_id=device_id,instance_node_id=instance_node_id,mac_address=mac,addresses=addresses,base_net_driver=base_net_driver)_filter_backend_port(backend_port)return backend_port@cmd_mgr.undodef _allocate_back_port_undo(backend_port,**kwargs):LOG.info(\"\")try:backend_port_uuid = Noneif backend_port:backend_port_uuid = backend_port.get(\"\")net_driver.delete_port(context, backend_port_uuid)except Exception:LOG.exception(\"\" % backend_port)@cmd_mgr.dodef _allocate_db_port(port_attrs, backend_port, addresses, mac,**kwargs):port_attrs[\"\"] = net[\"\"]port_attrs[\"\"] = port_idport_attrs[\"\"] = security_groupsLOG.info(\"\" % backend_port)port_attrs.update(backend_port)with context.session.begin():new_port = db_api.port_create(context, addresses=addresses, mac_address=mac[\"\"],backend_key=backend_port[\"\"], **port_attrs)return new_port@cmd_mgr.undodef _allocate_db_port_undo(new_port,**kwargs):LOG.info(\"\")if not new_port:returntry:with context.session.begin():db_api.port_delete(context, new_port)except Exception:LOG.exception(\"\" % backend_port)mac = _allocate_mac(net, port_id, mac_address,use_forbidden_mac_range=use_forbidden_mac_range)_allocate_ips(fixed_ips, net, port_id, segment_id, mac)backend_port = _allocate_backend_port(mac, addresses, net, port_id)new_port = _allocate_db_port(port_attrs, backend_port, addresses, mac)return v._make_port_dict(new_port)", "docstring": "Create a port\n\n Create a port which is a connection point of a device (e.g., a VM\n NIC) to attach to a L2 Neutron network.\n : param context: neutron api request context\n : param port: dictionary describing the port, with keys\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py. All keys will be populated.", "id": "f10757:m5"} {"signature": "@utils.exc_wrapper(internal=True)def update_port(context, id, port):", "body": "LOG.info(\"\" % (id, context.tenant_id))port_db = db_api.port_find(context, id=id, scope=db_api.ONE)if not port_db:raise n_exc.PortNotFound(port_id=id)port_dict = port[\"\"]fixed_ips = port_dict.pop(\"\", None)admin_only = [\"\", \"\", \"\", \"\",\"\"]always_filter = [\"\", \"\", \"\"]utils.filter_body(context, port_dict, admin_only=admin_only,always_filter=always_filter)if fixed_ips:quota.QUOTAS.limit_check(context, context.tenant_id,fixed_ips_per_port=len(fixed_ips))new_security_groups = utils.pop_param(port_dict, \"\")if new_security_groups is not None:if (Capabilities.TENANT_NETWORK_SG not inCONF.QUARK.environment_capabilities):if not STRATEGY.is_provider_network(port_db[\"\"]):raise q_exc.TenantNetworkSecurityGroupRulesNotEnabled()if new_security_groups is not None and not port_db[\"\"]:raise q_exc.SecurityGroupsRequireDevice()group_ids, security_group_mods = _make_security_group_list(context, new_security_groups)quota.QUOTAS.limit_check(context, context.tenant_id,security_groups_per_port=len(group_ids))if fixed_ips is not None:ipam_driver = ipam.IPAM_REGISTRY.get_strategy(ipam.QuarkIpamANY.get_name())addresses, subnet_ids = [], []ip_addresses = {}for fixed_ip in fixed_ips:subnet_id = fixed_ip.get(\"\")ip_address = fixed_ip.get(\"\")if not (subnet_id or ip_address):raise n_exc.BadRequest(resource=\"\",msg=\"\")if ip_address and not subnet_id:raise n_exc.BadRequest(resource=\"\",msg=\"\")if subnet_id and ip_address:ip_netaddr = Nonetry:ip_netaddr = netaddr.IPAddress(ip_address).ipv6()except netaddr.AddrFormatError:raise n_exc.InvalidInput(error_message=\"\")ip_addresses[ip_netaddr] = subnet_idelse:subnet_ids.append(subnet_id)port_ips = set([netaddr.IPAddress(int(a[\"\"]))for a in port_db[\"\"]])new_ips = set([a for a in ip_addresses.keys()])ips_to_allocate = list(new_ips - port_ips)ips_to_deallocate = list(port_ips - new_ips)for ip in ips_to_allocate:if ip in ip_addresses:allocated = []ipam_driver.allocate_ip_address(context, allocated, port_db[\"\"],port_db[\"\"], reuse_after=None, ip_addresses=[ip],subnets=[ip_addresses[ip]])addresses.extend(allocated)for ip in ips_to_deallocate:ipam_driver.deallocate_ips_by_port(context, port_db, ip_address=ip)for subnet_id in subnet_ids:ipam_driver.allocate_ip_address(context, addresses, port_db[\"\"], port_db[\"\"],reuse_after=CONF.QUARK.ipam_reuse_after,subnets=[subnet_id])if addresses:port_dict[\"\"] = port_db[\"\"]port_dict[\"\"].extend(addresses)net_driver = _get_net_driver(port_db.network, port=port_db)base_net_driver = _get_net_driver(port_db.network)kwargs = {}if new_security_groups is not None:kwargs[\"\"] = security_group_modsnet_driver.update_port(context, port_id=port_db[\"\"],mac_address=port_db[\"\"],device_id=port_db[\"\"],base_net_driver=base_net_driver,**kwargs)port_dict[\"\"] = security_group_modswith context.session.begin():port = db_api.port_update(context, port_db, **port_dict)if port_db in context.session:context.session.expunge(port_db)port_db = db_api.port_find(context, id=id, scope=db_api.ONE)return v._make_port_dict(port_db)", "docstring": "Update values of a port.\n\n : param context: neutron api request context\n : param id: UUID representing the port to update.\n : param port: dictionary with keys indicating fields to update.\n valid keys are those that have a value of True for 'allow_put'\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py.", "id": "f10757:m6"} {"signature": "@utils.exc_wrapper(internal=True)def get_port(context, id, fields=None):", "body": "LOG.info(\"\" %(id, context.tenant_id, fields))results = db_api.port_find(context, id=id, fields=fields,scope=db_api.ONE)if not results:raise n_exc.PortNotFound(port_id=id)return v._make_port_dict(results)", "docstring": "Retrieve a port.\n\n : param context: neutron api request context\n : param id: UUID representing the port to fetch.\n : param fields: a list of strings that are valid keys in a\n port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.", "id": "f10757:m7"} {"signature": "@utils.exc_wrapper(internal=True)def get_ports(context, limit=None, sorts=[''], marker=None,page_reverse=False, filters=None, fields=None):", "body": "LOG.info(\"\" %(context.tenant_id, filters, fields))if filters is None:filters = {}if \"\" in filters:if not context.is_admin:raise n_exc.NotAuthorized()ips = []try:ips = [netaddr.IPAddress(ip) for ip in filters.pop(\"\")]except netaddr.AddrFormatError:raise n_exc.InvalidInput(error_message=\"\")query = db_api.port_find_by_ip_address(context, ip_address=ips,scope=db_api.ALL, **filters)ports = []for ip in query:ports.extend(ip.ports)else:ports = db_api.port_find(context, limit, sorts, marker,fields=fields, join_security_groups=True,**filters)return v._make_ports_list(ports, fields)", "docstring": "Retrieve a list of ports.\n\n The contents of the list depends on the identity of the user\n making the request (as indicated by the context) as well as any\n filters.\n : param context: neutron api request context\n : param filters: a dictionary with keys that are valid keys for\n a port as listed in the RESOURCE_ATTRIBUTE_MAP object\n in neutron/api/v2/attributes.py. Values in this dictionary\n are an iterable containing values that will be used for an exact\n match comparison for that value. Each result returned by this\n function will have matched one of the values for each key in\n filters.\n : param fields: a list of strings that are valid keys in a\n port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.", "id": "f10757:m8"} {"signature": "@utils.exc_wrapper(internal=True)def get_ports_count(context, filters=None):", "body": "LOG.info(\"\" %(context.tenant_id, filters))return db_api.port_count_all(context, join_security_groups=True, **filters)", "docstring": "Return the number of ports.\n\n The result depends on the identity of the user making the request\n (as indicated by the context) as well as any filters.\n : param context: neutron api request context\n : param filters: a dictionary with keys that are valid keys for\n a port as listed in the RESOURCE_ATTRIBUTE_MAP object\n in neutron/api/v2/attributes.py. Values in this dictionary\n are an iterable containing values that will be used for an exact\n match comparison for that value. Each result returned by this\n function will have matched one of the values for each key in\n filters.\n\n NOTE: this method is optional, as it was not part of the originally\n defined plugin API.", "id": "f10757:m9"} {"signature": "@utils.exc_wrapper(internal=True)def delete_port(context, id):", "body": "LOG.info(\"\" % (id, context.tenant_id))port = db_api.port_find(context, id=id, scope=db_api.ONE)if not port:raise n_exc.PortNotFound(port_id=id)if '' in port: LOG.info(\"\" %(id, context.tenant_id, port['']))backend_key = port[\"\"]mac_address = netaddr.EUI(port[\"\"]).valueipam_driver = _get_ipam_driver(port[\"\"], port=port)ipam_driver.deallocate_mac_address(context, mac_address)ipam_driver.deallocate_ips_by_port(context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)net_driver = _get_net_driver(port[\"\"], port=port)base_net_driver = _get_net_driver(port[\"\"])net_driver.delete_port(context, backend_key, device_id=port[\"\"],mac_address=port[\"\"],base_net_driver=base_net_driver)with context.session.begin():db_api.port_delete(context, port)", "docstring": "Delete a port.\n\n : param context: neutron api request context\n : param id: UUID representing the port to delete.", "id": "f10757:m10"} {"signature": "def _get_deallocated_override():", "body": "return ''", "docstring": "This function exists to mock and for future requirements if needed.", "id": "f10758:m11"} {"signature": "def update_ip_address(context, id, ip_address):", "body": "LOG.info(\"\" % (id, context.tenant_id))ports = []if '' not in ip_address:raise n_exc.BadRequest(resource=\"\",msg=\"\")with context.session.begin():db_address = db_api.ip_address_find(context, id=id, scope=db_api.ONE)if not db_address:raise q_exc.IpAddressNotFound(addr_id=id)iptype = db_address.address_typeif iptype == ip_types.FIXED and not CONF.QUARK.ipaddr_allow_fixed_ip:raise n_exc.BadRequest(resource=\"\",msg=\"\")reset = ip_address[''].get('', False)if reset and db_address[''] == :if context.is_admin:LOG.info(\"\")db_address[''] = _get_deallocated_override()else:msg = \"\"raise webob.exc.HTTPForbidden(detail=msg)port_ids = ip_address[''].get('', None)if port_ids is not None and not port_ids:raise n_exc.BadRequest(resource=\"\",msg=\"\")if iptype == ip_types.SHARED:has_owner = db_address.has_any_shared_owner()if port_ids:if iptype == ip_types.FIXED and len(port_ids) > :raise n_exc.BadRequest(resource=\"\",msg=\"\")_raise_if_shared_and_enabled(ip_address, db_address)ports = db_api.port_find(context, tenant_id=context.tenant_id,id=port_ids, scope=db_api.ALL)if len(ports) != len(port_ids):raise n_exc.PortNotFound(port_id=port_ids)validate_and_fetch_segment(ports, db_address[\"\"])validate_port_ip_quotas(context, db_address.network_id, ports)if iptype == ip_types.SHARED and has_owner:for assoc in db_address.associations:pid = assoc.port_idif pid not in port_ids and '' != assoc.service:raise q_exc.PortRequiresDisassociation()LOG.info(\"\"\"\" % (db_address.address_readable,[p.id for p in ports]))new_address = db_api.update_port_associations_for_ip(context,ports,db_address)elif iptype == ip_types.SHARED and has_owner:raise q_exc.PortRequiresDisassociation()elif '' in ip_address['']and context.is_admin:if len(db_address.associations) != :exc_msg = (\"\"\"\"% (db_address[''],db_address.associations))raise q_exc.ActionNotAuthorized(msg=exc_msg)if ip_address[''][''] == '':db_address[''] = Falseelse:db_address[''] = Truereturn v._make_ip_dict(db_address, context.is_admin)else:ipam_driver.deallocate_ip_address(context, db_address)return v._make_ip_dict(db_address, context.is_admin)return v._make_ip_dict(new_address, context.is_admin)", "docstring": "Due to NCP-1592 ensure that address_type cannot change after update.", "id": "f10758:m13"} {"signature": "def delete_ip_address(context, id):", "body": "LOG.info(\"\" % (id, context.tenant_id))with context.session.begin():ip_address = db_api.ip_address_find(context, id=id, scope=db_api.ONE)if not ip_address or ip_address.deallocated:raise q_exc.IpAddressNotFound(addr_id=id)iptype = ip_address.address_typeif iptype == ip_types.FIXED and not CONF.QUARK.ipaddr_allow_fixed_ip:raise n_exc.BadRequest(resource=\"\",msg=\"\")if ip_address.has_any_shared_owner():raise q_exc.PortRequiresDisassociation()db_api.update_port_associations_for_ip(context, [], ip_address)ipam_driver.deallocate_ip_address(context, ip_address)", "docstring": "Delete an ip address.\n\n : param context: neutron api request context\n : param id: UUID representing the ip address to delete.", "id": "f10758:m14"} {"signature": "def get_ports_for_ip_address(context, ip_id, limit=None, sorts=[''],marker=None, page_reverse=False, filters=None,fields=None):", "body": "LOG.info(\"\" %(context.tenant_id, filters, fields))addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE)if not addr:raise q_exc.IpAddressNotFound(addr_id=ip_id)if filters is None:filters = {}filters[''] = [ip_id]ports = db_api.port_find(context, limit, sorts, marker,fields=fields, join_security_groups=True,**filters)return v._make_ip_ports_list(addr, ports, fields)", "docstring": "Retrieve a list of ports.\n\n The contents of the list depends on the identity of the user\n making the request (as indicated by the context) as well as any\n filters.\n : param context: neutron api request context\n : param filters: a dictionary with keys that are valid keys for\n a port as listed in the RESOURCE_ATTRIBUTE_MAP object\n in neutron/api/v2/attributes.py. Values in this dictionary\n are an iterable containing values that will be used for an exact\n match comparison for that value. Each result returned by this\n function will have matched one of the values for each key in\n filters.\n : param fields: a list of strings that are valid keys in a\n port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.", "id": "f10758:m15"} {"signature": "def get_port_for_ip_address(context, ip_id, id, fields=None):", "body": "LOG.info(\"\" %(id, context.tenant_id, fields))addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE)if not addr:raise q_exc.IpAddressNotFound(addr_id=ip_id)filters = {'': [ip_id]}results = db_api.port_find(context, id=id, fields=fields,scope=db_api.ONE, **filters)if not results:raise n_exc.PortNotFound(port_id=id)return v._make_port_for_ip_dict(addr, results)", "docstring": "Retrieve a port.\n\n : param context: neutron api request context\n : param id: UUID representing the port to fetch.\n : param fields: a list of strings that are valid keys in a\n port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.", "id": "f10758:m16"} {"signature": "def update_port_for_ip_address(context, ip_id, id, port):", "body": "LOG.info(\"\" % (id, context.tenant_id))sanitize_list = ['']with context.session.begin():addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE)if not addr:raise q_exc.IpAddressNotFound(addr_id=ip_id)port_db = db_api.port_find(context, id=id, scope=db_api.ONE)if not port_db:raise q_exc.PortNotFound(port_id=id)port_dict = {k: port[''][k] for k in sanitize_list}require_da = Falseservice = port_dict.get('')if require_da and _shared_ip_and_active(addr, except_port=id):raise q_exc.PortRequiresDisassociation()addr.set_service_for_port(port_db, service)context.session.add(addr)return v._make_port_for_ip_dict(addr, port_db)", "docstring": "Update values of a port.\n\n : param context: neutron api request context\n : param ip_id: UUID representing the ip associated with port to update\n : param id: UUID representing the port to update.\n : param port: dictionary with keys indicating fields to update.\n valid keys are those that have a value of True for 'allow_put'\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py.", "id": "f10758:m17"} {"signature": "def add_job_to_context(context, job_id):", "body": "db_job = db_api.async_transaction_find(context, id=job_id, scope=db_api.ONE)if not db_job:returncontext.async_job = {\"\": v._make_job_dict(db_job)}", "docstring": "Adds job to neutron context for use later.", "id": "f10759:m0"} {"signature": "def create_job(context, body):", "body": "LOG.info(\"\" % context.tenant_id)if not context.is_admin:raise n_exc.NotAuthorized()job = body.get('')if '' in job:parent_id = job['']if not parent_id:raise q_exc.JobNotFound(job_id=parent_id)parent_job = db_api.async_transaction_find(context, id=parent_id, scope=db_api.ONE)if not parent_job:raise q_exc.JobNotFound(job_id=parent_id)tid = parent_idif parent_job.get(''):tid = parent_job.get('')job[''] = tidif not job:raise n_exc.BadRequest(resource=\"\", msg=\"\")with context.session.begin(subtransactions=True):new_job = db_api.async_transaction_create(context, **job)return v._make_job_dict(new_job)", "docstring": "Creates a job with support for subjobs.\n\n If parent_id is not in the body:\n * the job is considered a parent job\n * it will have a NULL transaction id\n * its transaction id == its id\n * all subjobs will use its transaction id as theirs\n\n Else:\n * the job is a sub job\n * the parent id is the id passed in\n * the transaction id is the root of the job tree", "id": "f10759:m3"} {"signature": "def delete_job(context, id, **filters):", "body": "LOG.info(\"\" % (id, context.tenant_id))if not context.is_admin:raise n_exc.NotAuthorized()with context.session.begin():job = db_api.async_transaction_find(context, id=id, scope=db_api.ONE,**filters)if not job:raise q_exc.JobNotFound(job_id=id)db_api.async_transaction_delete(context, job)", "docstring": "Delete an ip address.\n\n : param context: neutron api request context\n : param id: UUID representing the ip address to delete.", "id": "f10759:m5"} {"signature": "def delete_segment_allocation_range(context, sa_id):", "body": "LOG.info(\"\" %(sa_id, context.tenant_id))if not context.is_admin:raise n_exc.NotAuthorized()with context.session.begin():sa_range = db_api.segment_allocation_range_find(context, id=sa_id, scope=db_api.ONE)if not sa_range:raise q_exc.SegmentAllocationRangeNotFound(segment_allocation_range_id=sa_id)_delete_segment_allocation_range(context, sa_range)", "docstring": "Delete a segment_allocation_range.\n\n : param context: neutron api request context\n : param id: UUID representing the segment_allocation_range to delete.", "id": "f10760:m4"} {"signature": "def create_network(context, network):", "body": "LOG.info(\"\" % context.tenant_id)with context.session.begin():net_attrs = network[\"\"]subs = net_attrs.pop(\"\", [])if not context.is_admin:if len(subs) > :v4_count, v6_count = , for s in subs:version = netaddr.IPNetwork(s['']['']).versionif version == :v6_count += else:v4_count += if v4_count > :tenant_q_v4 = context.session.query(qdv.Quota).filter_by(tenant_id=context.tenant_id,resource='').first()if tenant_q_v4 != -:quota.QUOTAS.limit_check(context,context.tenant_id,v4_subnets_per_network=v4_count)if v6_count > :tenant_q_v6 = context.session.query(qdv.Quota).filter_by(tenant_id=context.tenant_id,resource='').first()if tenant_q_v6 != -:quota.QUOTAS.limit_check(context,context.tenant_id,v6_subnets_per_network=v6_count)net_uuid = utils.pop_param(net_attrs, \"\", None)net_type = Noneif net_uuid and context.is_admin:net = db_api.network_find(context=context, limit=None,sorts=[''], marker=None,page_reverse=False, id=net_uuid,scope=db_api.ONE)net_type = utils.pop_param(net_attrs, \"\", None)if net:raise q_exc.NetworkAlreadyExists(id=net_uuid)else:net_uuid = uuidutils.generate_uuid()pnet_type, phys_net, seg_id = _adapt_provider_nets(context, network)ipam_strategy = utils.pop_param(net_attrs, \"\", None)if not ipam_strategy or not context.is_admin:ipam_strategy = CONF.QUARK.default_ipam_strategyif not ipam.IPAM_REGISTRY.is_valid_strategy(ipam_strategy):raise q_exc.InvalidIpamStrategy(strat=ipam_strategy)net_attrs[\"\"] = ipam_strategydefault_net_type = net_type or CONF.QUARK.default_network_typenet_driver = registry.DRIVER_REGISTRY.get_driver(default_net_type)net_driver.create_network(context, net_attrs[\"\"],network_id=net_uuid, phys_type=pnet_type,phys_net=phys_net, segment_id=seg_id)net_attrs[\"\"] = net_uuidnet_attrs[\"\"] = context.tenant_idnet_attrs[\"\"] = default_net_typenew_net = db_api.network_create(context, **net_attrs)new_subnets = []for sub in subs:sub[\"\"][\"\"] = new_net[\"\"]sub[\"\"][\"\"] = context.tenant_ids = db_api.subnet_create(context, **sub[\"\"])new_subnets.append(s)new_net[\"\"] = new_subnetsreturn v._make_network_dict(new_net)", "docstring": "Create a network.\n\n Create a network which represents an L2 network segment which\n can have a set of subnets and ports associated with it.\n : param context: neutron api request context\n : param network: dictionary describing the network, with keys\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py. All keys will be populated.", "id": "f10763:m1"} {"signature": "def update_network(context, id, network):", "body": "LOG.info(\"\" %(id, context.tenant_id))with context.session.begin():net = db_api.network_find(context, id=id, scope=db_api.ONE)if not net:raise n_exc.NetworkNotFound(net_id=id)net_dict = network[\"\"]utils.pop_param(net_dict, \"\")if not context.is_admin and \"\" in net_dict:utils.pop_param(net_dict, \"\")net = db_api.network_update(context, net, **net_dict)return v._make_network_dict(net)", "docstring": "Update values of a network.\n\n : param context: neutron api request context\n : param id: UUID representing the network to update.\n : param network: dictionary with keys indicating fields to update.\n valid keys are those that have a value of True for 'allow_put'\n as listed in the RESOURCE_ATTRIBUTE_MAP object in\n neutron/api/v2/attributes.py.", "id": "f10763:m2"} {"signature": "def get_network(context, id, fields=None):", "body": "LOG.info(\"\" %(id, context.tenant_id, fields))network = db_api.network_find(context=context, limit=None, sorts=[''],marker=None, page_reverse=False,id=id, join_subnets=True, scope=db_api.ONE)if not network:raise n_exc.NetworkNotFound(net_id=id)return v._make_network_dict(network, fields=fields)", "docstring": "Retrieve a network.\n\n : param context: neutron api request context\n : param id: UUID representing the network to fetch.\n : param fields: a list of strings that are valid keys in a\n network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.", "id": "f10763:m3"} {"signature": "def get_networks(context, limit=None, sorts=[''], marker=None,page_reverse=False, filters=None, fields=None):", "body": "LOG.info(\"\" %(context.tenant_id, filters, fields))filters = filters or {}nets = db_api.network_find(context, limit, sorts, marker, page_reverse,join_subnets=True, **filters) or []nets = [v._make_network_dict(net, fields=fields) for net in nets]return nets", "docstring": "Retrieve a list of networks.\n\n The contents of the list depends on the identity of the user\n making the request (as indicated by the context) as well as any\n filters.\n : param context: neutron api request context\n : param filters: a dictionary with keys that are valid keys for\n a network as listed in the RESOURCE_ATTRIBUTE_MAP object\n in neutron/api/v2/attributes.py. Values in this dictiontary\n are an iterable containing values that will be used for an exact\n match comparison for that value. Each result returned by this\n function will have matched one of the values for each key in\n filters.\n : param fields: a list of strings that are valid keys in a\n network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.", "id": "f10763:m4"} {"signature": "def get_networks_count(context, filters=None):", "body": "LOG.info(\"\" %(context.tenant_id, filters))return db_api.network_count_all(context)", "docstring": "Return the number of networks.\n\n The result depends on the identity of the user making the request\n (as indicated by the context) as well as any filters.\n : param context: neutron api request context\n : param filters: a dictionary with keys that are valid keys for\n a network as listed in the RESOURCE_ATTRIBUTE_MAP object\n in neutron/api/v2/attributes.py. Values in this dictiontary\n are an iterable containing values that will be used for an exact\n match comparison for that value. Each result returned by this\n function will have matched one of the values for each key in\n filters.\n\n NOTE: this method is optional, as it was not part of the originally\n defined plugin API.", "id": "f10763:m5"} {"signature": "def delete_network(context, id):", "body": "LOG.info(\"\" % (id, context.tenant_id))with context.session.begin():net = db_api.network_find(context=context, limit=None, sorts=[''],marker=None, page_reverse=False, id=id,scope=db_api.ONE)if not net:raise n_exc.NetworkNotFound(net_id=id)if not context.is_admin:if STRATEGY.is_provider_network(net.id):raise n_exc.NotAuthorized(net_id=id)if net.ports:raise n_exc.NetworkInUse(net_id=id)net_driver = registry.DRIVER_REGISTRY.get_driver(net[\"\"])net_driver.delete_network(context, id)for subnet in net[\"\"]:subnets._delete_subnet(context, subnet)db_api.network_delete(context, net)", "docstring": "Delete a network.\n\n : param context: neutron api request context\n : param id: UUID representing the network to delete.", "id": "f10763:m6"} {"signature": "def get_mac_address_range(context, id, fields=None):", "body": "LOG.info(\"\" %(id, context.tenant_id, fields))if not context.is_admin:raise n_exc.NotAuthorized()mac_address_range = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE)if not mac_address_range:raise q_exc.MacAddressRangeNotFound(mac_address_range_id=id)return v._make_mac_range_dict(mac_address_range)", "docstring": "Retrieve a mac_address_range.\n\n : param context: neutron api request context\n : param id: UUID representing the network to fetch.\n : param fields: a list of strings that are valid keys in a\n network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n object in neutron/api/v2/attributes.py. Only these fields\n will be returned.", "id": "f10764:m1"} {"signature": "def delete_mac_address_range(context, id):", "body": "LOG.info(\"\" %(id, context.tenant_id))if not context.is_admin:raise n_exc.NotAuthorized()with context.session.begin():mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE)if not mar:raise q_exc.MacAddressRangeNotFound(mac_address_range_id=id)_delete_mac_address_range(context, mar)", "docstring": "Delete a mac_address_range.\n\n : param context: neutron api request context\n : param id: UUID representing the mac_address_range to delete.", "id": "f10764:m5"} {"signature": "@classmethoddef get_resources(cls):", "body": "plugin = directory.get_plugin()controller = IPPoliciesController(plugin)return [extensions.ResourceExtension(Ip_policies.get_alias(),controller)]", "docstring": "Returns Ext Resources.", "id": "f10767:c1:m6"} {"signature": "@classmethoddef get_resources(cls):", "body": "plugin = directory.get_plugin()controller = IPAvailabilityController(plugin)return [extensions.ResourceExtension(Ip_availability.get_alias(),controller)]", "docstring": "Returns Ext Resources.", "id": "f10768:c1:m6"} {"signature": "@classmethoddef get_resources(cls):", "body": "ip_controller = IpAddressesController(directory.get_plugin())ip_port_controller = IpAddressPortController(directory.get_plugin())resources = []resources.append(extensions.ResourceExtension(Ip_addresses.get_alias(),ip_controller))parent = {'': '','': ''}resources.append(extensions.ResourceExtension('', ip_port_controller, parent=parent))return resources", "docstring": "Returns Ext Resources.", "id": "f10771:c2:m6"} {"signature": "@classmethoddef get_resources(cls):", "body": "job_controller = JobsController(directory.get_plugin())resources = []resources.append(extensions.ResourceExtension(Jobs.get_alias(),job_controller))return resources", "docstring": "Returns Ext Resources.", "id": "f10772:c1:m6"} {"signature": "@classmethoddef get_resources(cls):", "body": "plugin = directory.get_plugin()controller = SegmentAllocationRangesController(plugin)return [extensions.ResourceExtension(Segment_allocation_ranges.get_alias(),controller)]", "docstring": "Returns Ext Resources.", "id": "f10774:c1:m6"} {"signature": "@classmethoddef get_resources(cls):", "body": "plural_mappings = resource_helper.build_plural_mappings({}, RESOURCE_ATTRIBUTE_MAP)return resource_helper.build_resource_info(plural_mappings,RESOURCE_ATTRIBUTE_MAP,None,register_quota=True)", "docstring": "Returns Ext Resources.", "id": "f10775:c0:m5"} {"signature": "@classmethoddef get_resources(cls):", "body": "controller = RoutesController(directory.get_plugin())return [extensions.ResourceExtension(Routes.get_alias(),controller)]", "docstring": "Returns Ext Resources.", "id": "f10776:c1:m6"} {"signature": "@classmethoddef get_resources(cls):", "body": "plugin = directory.get_plugin()controller = MacAddressRangesController(plugin)return [extensions.ResourceExtension(Mac_address_ranges.get_alias(),controller)]", "docstring": "Returns Ext Resources.", "id": "f10777:c1:m6"} {"signature": "def _allocate_from_v6_subnet(self, context, net_id, subnet,port_id, reuse_after, ip_address=None,**kwargs):", "body": "LOG.info(\"\".format(utils.pretty_kwargs(network_id=net_id, subnet=subnet,port_id=port_id, ip_address=ip_address)))if ip_address:LOG.info(\"\"\"\" % ip_address)return self._allocate_from_subnet(context, net_id=net_id,subnet=subnet, port_id=port_id,reuse_after=reuse_after,ip_address=ip_address, **kwargs)else:mac = kwargs.get(\"\")if mac:mac = kwargs[\"\"].get(\"\")if subnet and subnet[\"\"]:ip_policy_cidrs = subnet[\"\"].get_cidrs_ip_set()else:ip_policy_cidrs = netaddr.IPSet([])for tries, ip_address in enumerate(generate_v6(mac, port_id, subnet[\"\"])):LOG.info(\"\".format(tries + , CONF.QUARK.v6_allocation_attempts))if tries > CONF.QUARK.v6_allocation_attempts - :LOG.info(\"\")raise ip_address_failure(net_id)ip_address = netaddr.IPAddress(ip_address).ipv6()LOG.info(\"\".format(str(ip_address)))if (ip_policy_cidrs is not None andip_address in ip_policy_cidrs):LOG.info(\"\".format(str(ip_address)))continuetry:with context.session.begin():address = db_api.ip_address_create(context, address=ip_address,subnet_id=subnet[\"\"],version=subnet[\"\"], network_id=net_id,address_type=kwargs.get('',ip_types.FIXED))return addressexcept db_exception.DBDuplicateEntry:LOG.info(\"\"\"\".format(str(ip_address)))LOG.debug(\"\"\"\", subnet[\"\"], ip_address)", "docstring": "This attempts to allocate v6 addresses as per RFC2462 and RFC3041.\n\n To accomodate this, we effectively treat all v6 assignment as a\n first time allocation utilizing the MAC address of the VIF. Because\n we recycle MACs, we will eventually attempt to recreate a previously\n generated v6 address. Instead of failing, we've opted to handle\n reallocating that address in this method.\n\n This should provide a performance boost over attempting to check\n each and every subnet in the existing reallocate logic, as we'd\n have to iterate over each and every subnet returned", "id": "f10784:c2:m4"} {"signature": "def opt_args_decorator(func):", "body": "@wraps(func)def wrapped_dec(*args, **kwargs):if len(args) == and len(kwargs) == and callable(args[]):return func(args[])else:return lambda realf: func(realf, *args, **kwargs)return wrapped_dec", "docstring": "A decorator to be used on another decorator\n\n This is done to allow separate handling on the basis of argument values", "id": "f10785:m5"} {"signature": "def append_quark_extensions(conf):", "body": "if '' in conf:conf.set_override('', \"\".join(extensions.__path__))", "docstring": "Adds the Quark API Extensions to the extension path.\n\n Pulled out for test coveage.", "id": "f10786:m0"} {"signature": "def _fix_missing_tenant_id(self, context, body, key):", "body": "if not body:raise n_exc.BadRequest(resource=key,msg=\"\")resource = body.get(key)if not resource:raise n_exc.BadRequest(resource=key,msg=\"\")if context.tenant_id is None:context.tenant_id = resource.get(\"\")if context.tenant_id is None:msg = _(\"\"\"\")raise n_exc.BadRequest(resource=key, msg=msg)", "docstring": "Will add the tenant_id to the context from body.\n\n It is assumed that the body must have a tenant_id because neutron\n core could never have gotten here otherwise.", "id": "f10786:c0:m1"} {"signature": "def get_public_net_id(self):", "body": "for id, net_params in self.strategy.iteritems():if id == CONF.QUARK.public_net_id:return idreturn None", "docstring": "Returns the public net id", "id": "f10787:c0:m14"} {"signature": "def _validate_allocation_pools(self):", "body": "ip_pools = self._alloc_poolssubnet_cidr = self._subnet_cidrLOG.debug(_(\"\"))ip_sets = []for ip_pool in ip_pools:try:start_ip = netaddr.IPAddress(ip_pool[''])end_ip = netaddr.IPAddress(ip_pool[''])except netaddr.AddrFormatError:LOG.info(_(\"\"\"\"),{'': ip_pool[''],'': ip_pool['']})raise n_exc_ext.InvalidAllocationPool(pool=ip_pool)if (start_ip.version != self._subnet_cidr.version orend_ip.version != self._subnet_cidr.version):LOG.info(_(\"\"\"\"))raise n_exc_ext.InvalidAllocationPool(pool=ip_pool)if end_ip < start_ip:LOG.info(_(\"\"\"\"),{'': ip_pool[''], '': ip_pool['']})raise n_exc_ext.InvalidAllocationPool(pool=ip_pool)if (start_ip < self._subnet_first_ip orend_ip > self._subnet_last_ip):LOG.info(_(\"\"\"\"),{'': ip_pool[''],'': ip_pool['']})raise n_exc_ext.OutOfBoundsAllocationPool(pool=ip_pool,subnet_cidr=subnet_cidr)ip_sets.append(netaddr.IPSet(netaddr.IPRange(ip_pool[''],ip_pool['']).cidrs()))LOG.debug(_(\"\"\"\"))ip_ranges = ip_pools[:]for l_cursor in xrange(len(ip_sets)):for r_cursor in xrange(l_cursor + , len(ip_sets)):if ip_sets[l_cursor] & ip_sets[r_cursor]:l_range = ip_ranges[l_cursor]r_range = ip_ranges[r_cursor]LOG.info(_(\"\"\"\"),{'': l_range, '': r_range})raise n_exc_ext.OverlappingAllocationPools(pool_1=l_range,pool_2=r_range,subnet_cidr=subnet_cidr)", "docstring": "Validate IP allocation pools.\n\n Verify start and end address for each allocation pool are valid,\n ie: constituted by valid and appropriately ordered IP addresses.\n Also, verify pools do not overlap among themselves.\n Finally, verify that each range fall within the subnet's CIDR.", "id": "f10788:c0:m2"} {"signature": "def _lswitch_select_open(self, context, switches=None, **kwargs):", "body": "if switches is not None:for res in switches[\"\"]:count = res[\"\"][\"\"][\"\"]if (self.limits[''] == orcount < self.limits['']):return res[\"\"]return None", "docstring": "Selects an open lswitch for a network.\n\n Note that it does not select the most full switch, but merely one with\n ports available.", "id": "f10791:c2:m25"} {"signature": "def _add_default_tz_bindings(self, context, switch, network_id):", "body": "default_tz = CONF.NVP.default_tzif not default_tz:LOG.warn(\"\"\"\"\"\")returnif not network_id:LOG.warn(\"\"\"\")returnfor net_type in CONF.NVP.additional_default_tz_types:if net_type in TZ_BINDINGS:binding = TZ_BINDINGS[net_type]binding.add(context, switch, default_tz, network_id)else:LOG.warn(\"\" % (net_type))", "docstring": "Configure any additional default transport zone bindings.", "id": "f10791:c2:m28"} {"signature": "def _remove_default_tz_bindings(self, context, network_id):", "body": "default_tz = CONF.NVP.default_tzif not default_tz:LOG.warn(\"\"\"\"\"\")returnif not network_id:LOG.warn(\"\"\"\")returnfor net_type in CONF.NVP.additional_default_tz_types:if net_type in TZ_BINDINGS:binding = TZ_BINDINGS[net_type]binding.remove(context, default_tz, network_id)else:LOG.warn(\"\" % (net_type))", "docstring": "Deconfigure any additional default transport zone bindings.", "id": "f10791:c2:m29"} {"signature": "def get_lswitch_ids_for_network(self, context, network_id):", "body": "lswitches = self._lswitches_for_network(context, network_id).results()return [s[''] for s in lswitches[\"\"]]", "docstring": "Public interface for fetching lswitch ids for a given network.\n\n NOTE(morgabra) This is here because calling private methods\n from outside the class feels wrong, and we need to be able to\n fetch lswitch ids for use in other drivers.", "id": "f10791:c2:m31"} {"signature": "def _lswitch_status_query(self, context, network_id):", "body": "pass", "docstring": "Child implementation of lswitch_status_query.\n\n Deliberately empty as we rely on _get_network_details to be more\n efficient than we can be here.", "id": "f10794:c0:m14"} {"signature": "def get_lswitch_ids_for_network(self, context, network_id):", "body": "lswitches = self._lswitches_for_network(context, network_id)return [s[''] for s in lswitches]", "docstring": "Public interface for fetching lswitch ids for a given network.\n\n NOTE(morgabra) This is here because calling private methods\n from outside the class feels wrong, and we need to be able to\n fetch lswitch ids for use in other drivers.", "id": "f10794:c0:m19"} {"signature": "def select_ipam_strategy(self, network_id, network_strategy, **kwargs):", "body": "LOG.info(\"\"\"\" % (network_id, network_strategy))net_type = \"\"if STRATEGY.is_provider_network(network_id):net_type = \"\"strategy = self._ipam_strategies.get(net_type, {})default = strategy.get(\"\")overrides = strategy.get(\"\", {})if network_strategy in overrides:LOG.info(\"\"% (overrides[network_strategy]))return overrides[network_strategy]if default:LOG.info(\"\"\"\" % (default))return defaultLOG.info(\"\"\"\" % (network_strategy))return network_strategy", "docstring": "Return relevant IPAM strategy name.\n\n :param network_id: neutron network id.\n :param network_strategy: default strategy for the network.\n\n NOTE(morgabra) This feels like a hack but I can't think of a better\n idea. The root problem is we can now attach ports to networks with\n a different backend driver/ipam strategy than the network speficies.\n\n We handle the the backend driver part with allowing network_plugin to\n be specified for port objects. This works pretty well because nova or\n whatever knows when we are hooking up an Ironic node so it can pass\n along that key during port_create().\n\n IPAM is a little trickier, especially in Ironic's case, because we\n *must* use a specific IPAM for provider networks. There isn't really\n much of an option other than involve the backend driver when selecting\n the IPAM strategy.", "id": "f10795:c2:m5"} {"signature": "def _get_base_network_info(self, context, network_id, base_net_driver):", "body": "driver_name = base_net_driver.get_name()net_info = {\"\": driver_name}LOG.debug(''% (driver_name, network_id))if driver_name == '':LOG.debug(''% (network_id))lswitch_ids = base_net_driver.get_lswitch_ids_for_network(context, network_id)if not lswitch_ids or len(lswitch_ids) > :msg = (''% (len(lswitch_ids)))LOG.error(msg)raise IronicException(msg)lswitch_id = lswitch_ids.pop()LOG.info(''% (network_id, lswitch_id))net_info[''] = lswitch_idLOG.debug(''% (driver_name, network_id, net_info))return net_info", "docstring": "Return a dict of extra network information.\n\n :param context: neutron request context.\n :param network_id: neturon network id.\n :param net_driver: network driver associated with network_id.\n :raises IronicException: Any unexpected data fetching failures will\n be logged and IronicException raised.\n\n This driver can attach to networks managed by other drivers. We may\n need some information from these drivers, or otherwise inform\n downstream about the type of network we are attaching to. We can\n make these decisions here.", "id": "f10795:c2:m9"} {"signature": "def create_port(self, context, network_id, port_id, **kwargs):", "body": "LOG.info(\"\" % (context.tenant_id, network_id,port_id))if not kwargs.get(''):raise IronicException(msg='')base_net_driver = kwargs['']if not kwargs.get(''):raise IronicException(msg='')device_id = kwargs['']if not kwargs.get(''):raise IronicException(msg='')instance_node_id = kwargs['']if not kwargs.get(''):raise IronicException(msg='')mac_address = str(netaddr.EUI(kwargs[\"\"][\"\"]))mac_address = mac_address.replace('', '')if kwargs.get(''):msg = ''raise IronicException(msg=msg)fixed_ips = []addresses = kwargs.get('')if not isinstance(addresses, list):addresses = [addresses]for address in addresses:fixed_ips.append(self._make_fixed_ip_dict(context, address))body = {\"\": port_id,\"\": network_id,\"\": device_id,\"\": kwargs.get('', ''),\"\": context.tenant_id or \"\",\"\": context.roles,\"\": mac_address,\"\": fixed_ips,\"\": instance_node_id,\"\": not STRATEGY.is_provider_network(network_id)}net_info = self._get_base_network_info(context, network_id, base_net_driver)body.update(net_info)try:LOG.info(\"\" % (body))port = self._create_port(context, body)LOG.info(\"\" % (port))return {\"\": port[''][''],\"\": port['']['']}except Exception as e:msg = \"\" % (e)raise IronicException(msg=msg)", "docstring": "Create a port.\n\n :param context: neutron api request context.\n :param network_id: neutron network id.\n :param port_id: neutron port id.\n :param kwargs:\n required keys - device_id: neutron port device_id (instance_id)\n instance_node_id: nova hypervisor host id\n mac_address: neutron port mac address\n base_net_driver: the base network driver\n optional keys - addresses: list of allocated IPAddress models\n security_groups: list of associated security groups\n :raises IronicException: If the client is unable to create the\n downstream port for any reason, the exception will be logged\n and IronicException raised.", "id": "f10795:c2:m10"} {"signature": "def update_port(self, context, port_id, **kwargs):", "body": "LOG.info(\"\" % (context.tenant_id, port_id))if kwargs.get(\"\"):msg = ''raise IronicException(msg=msg)return {\"\": port_id}", "docstring": "Update a port.\n\n :param context: neutron api request context.\n :param port_id: neutron port id.\n :param kwargs: optional kwargs.\n :raises IronicException: If the client is unable to update the\n downstream port for any reason, the exception will be logged\n and IronicException raised.\n\n TODO(morgabra) It does not really make sense in the context of Ironic\n to allow updating ports. fixed_ips and mac_address are burned in the\n configdrive on the host, and we otherwise cannot migrate a port between\n instances. Eventually we will need to support security groups, but for\n now it's a no-op on port data changes, and we need to rely on the\n API/Nova to not allow updating data on active ports.", "id": "f10795:c2:m11"} {"signature": "def delete_port(self, context, port_id, **kwargs):", "body": "LOG.info(\"\" % (context.tenant_id, port_id))try:self._delete_port(context, port_id)LOG.info(\"\" % (port_id))except Exception:LOG.error(\"\"\"\" % (port_id))", "docstring": "Delete a port.\n\n :param context: neutron api request context.\n :param port_id: neutron port id.\n :param kwargs: optional kwargs.\n :raises IronicException: If the client is unable to delete the\n downstream port for any reason, the exception will be logged\n and IronicException raised.", "id": "f10795:c2:m13"} {"signature": "def diag_port(self, context, port_id, **kwargs):", "body": "LOG.info(\"\" % port_id)try:port = self._client.show_port(port_id)except Exception as e:msg = \"\" % (str(e))LOG.exception(msg)raise IronicException(msg=msg)return {\"\": port}", "docstring": "Diagnose a port.\n\n :param context: neutron api request context.\n :param port_id: neutron port id.\n :param kwargs: optional kwargs.\n :raises IronicException: If the client is unable to fetch the\n downstream port for any reason, the exception will be\n logged and IronicException raised.", "id": "f10795:c2:m14"} {"signature": "def create_network(self, *args, **kwargs):", "body": "raise NotImplementedError('''')", "docstring": "Create a network.\n\n :raises NotImplementedError: This driver does not manage networks.\n\n NOTE: This is a no-op in the base driver, but this raises here as to\n explicitly disallow network operations in case of a misconfiguration.", "id": "f10795:c2:m15"} {"signature": "def delete_network(self, *args, **kwargs):", "body": "raise NotImplementedError('''')", "docstring": "Delete a network.\n\n :raises NotImplementedError: This driver does not manage networks.\n\n NOTE: This is a no-op in the base driver, but this raises here as to\n explicitly disallow network operations in case of a misconfiguration.", "id": "f10795:c2:m16"} {"signature": "def diag_network(self, *args, **kwargs):", "body": "raise NotImplementedError('''')", "docstring": "Diagnose a network.\n\n :raises NotImplementedError: This driver does not manage networks.\n\n NOTE: This is a no-op in the base driver, but this raises here as to\n explicitly disallow network operations in case of a misconfiguration.", "id": "f10795:c2:m17"} {"signature": "def create_security_group(self, context, group_name, **group):", "body": "raise NotImplementedError('''')", "docstring": "Create a security group.\n\n :raises NotImplementedError: This driver does not implement security\n groups.\n\n NOTE: Security groups will be supported in the future, but for now\n they are explicitly disallowed.", "id": "f10795:c2:m18"} {"signature": "def delete_security_group(self, context, group_id, **kwargs):", "body": "raise NotImplementedError('''')", "docstring": "Delete a security group.\n\n :raises NotImplementedError: This driver does not implement security\n groups.\n\n NOTE: Security groups will be supported in the future, but for now\n they are explicitly disallowed.", "id": "f10795:c2:m19"} {"signature": "def update_security_group(self, context, group_id, **group):", "body": "raise NotImplementedError('''')", "docstring": "Update a security group.\n\n :raises NotImplementedError: This driver does not implement security\n groups.\n\n NOTE: Security groups will be supported in the future, but for now\n they are explicitly disallowed.", "id": "f10795:c2:m20"} {"signature": "def create_security_group_rule(self, context, group_id, rule):", "body": "raise NotImplementedError('''')", "docstring": "Create a security group rule.\n\n :raises NotImplementedError: This driver does not implement security\n groups.\n\n NOTE: Security groups will be supported in the future, but for now\n they are explicitly disallowed.", "id": "f10795:c2:m21"} {"signature": "def delete_security_group_rule(self, context, group_id, rule):", "body": "raise NotImplementedError('''')", "docstring": "Delete a security group rule.\n\n :raises NotImplementedError: This driver does not implement security\n groups.\n\n NOTE: Security groups will be supported in the future, but for now\n they are explicitly disallowed.", "id": "f10795:c2:m22"} {"signature": "def register_floating_ip(self, floating_ip, port_fixed_ips):", "body": "url = CONF.QUARK.floating_ip_base_urltimeout = CONF.QUARK.unicorn_api_timeout_secondsreq = self._build_request_body(floating_ip, port_fixed_ips)try:LOG.info(\"\"% (url, req))r = requests.post(url, data=json.dumps(req), timeout=timeout)except Exception as e:LOG.error(\"\"\"\"% (floating_ip.id, e.message))raise ex.RegisterFloatingIpFailure(id=floating_ip.id)if r.status_code != and r.status_code != :msg = \"\"\"\" % (r.status_code, r.json())LOG.error(\"\" % msg)raise ex.RegisterFloatingIpFailure(id=floating_ip.id)", "docstring": "Register a floating ip with Unicorn\n\n :param floating_ip: The quark.db.models.IPAddress to register\n :param port_fixed_ips: A dictionary containing the port and fixed ips\n to associate the floating IP with. Has the structure of:\n {\"\": {\"port\": ,\n \"fixed_ip\": \"\"}}\n :return: None", "id": "f10798:c0:m2"} {"signature": "def update_floating_ip(self, floating_ip, port_fixed_ips):", "body": "url = \"\" % (CONF.QUARK.floating_ip_base_url,floating_ip[\"\"])timeout = CONF.QUARK.unicorn_api_timeout_secondsreq = self._build_request_body(floating_ip, port_fixed_ips)try:LOG.info(\"\"% (url, req))r = requests.put(url, data=json.dumps(req), timeout=timeout)except Exception as e:LOG.error(\"\"\"\"% (floating_ip.id, e.message))raise ex.RegisterFloatingIpFailure(id=floating_ip.id)if r.status_code != and r.status_code != :msg = \"\"\"\" % (r.status_code, r.json())LOG.error(\"\" % msg)raise ex.RegisterFloatingIpFailure(id=floating_ip.id)", "docstring": "Update an existing floating ip with Unicorn\n\n :param floating_ip: The quark.db.models.IPAddress to update\n :param port_fixed_ips: A dictionary containing the port and fixed ips\n to associate the floating IP with. Has the structure of:\n {\"\": {\"port\": ,\n \"fixed_ip\": \"\"}}\n :return: None", "id": "f10798:c0:m3"} {"signature": "def remove_floating_ip(self, floating_ip):", "body": "url = \"\" % (CONF.QUARK.floating_ip_base_url,floating_ip.address_readable)timeout = CONF.QUARK.unicorn_api_timeout_secondstry:LOG.info(\"\" % url)r = requests.delete(url, timeout=timeout)except Exception as e:LOG.error(\"\"\"\"% (floating_ip.id, e.message))raise ex.RemoveFloatingIpFailure(id=floating_ip.id)if r.status_code == :LOG.warn(\"\"% floating_ip.address_readable)elif r.status_code != :msg = \"\"\"\" % (r.status_code, r.json())LOG.error(\"\" % msg)raise ex.RemoveFloatingIpFailure(id=floating_ip.id)", "docstring": "Register a floating ip with Unicorn\n\n :param floating_ip: The quark.db.models.IPAddress to remove\n :return: None", "id": "f10798:c0:m4"} {"signature": "def decode_exactly(geohash):", "body": "lat_interval, lon_interval = (-, ), (-, )lat_err, lon_err = , is_even = Truefor c in geohash:cd = __decodemap[c]for mask in [, , , , ]:if is_even: lon_err /= if cd & mask:lon_interval = ((lon_interval[]+lon_interval[])/, lon_interval[])else:lon_interval = (lon_interval[], (lon_interval[]+lon_interval[])/)else: lat_err /= if cd & mask:lat_interval = ((lat_interval[]+lat_interval[])/, lat_interval[])else:lat_interval = (lat_interval[], (lat_interval[]+lat_interval[])/)is_even = not is_evenlat = (lat_interval[] + lat_interval[]) / lon = (lon_interval[] + lon_interval[]) / return lat, lon, lat_err, lon_err", "docstring": "Decode the geohash to its exact values, including the error\nmargins of the result. Returns four float values: latitude,\nlongitude, the plus/minus error for latitude (as a positive\nnumber) and the plus/minus error for longitude (as a positive\nnumber).", "id": "f10801:m0"} {"signature": "def decode(geohash):", "body": "lat, lon, lat_err, lon_err = decode_exactly(geohash)lats = \"\" % (max(, int(round(-log10(lat_err)))) - , lat)lons = \"\" % (max(, int(round(-log10(lon_err)))) - , lon)if '' in lats: lats = lats.rstrip('')if '' in lons: lons = lons.rstrip('')return lats, lons", "docstring": "Decode geohash, returning two strings with latitude and longitude\ncontaining only relevant digits and with trailing zeroes removed.", "id": "f10801:m1"} {"signature": "def encode(latitude, longitude, precision=):", "body": "lat_interval, lon_interval = (-, ), (-, )geohash = []bits = [ , , , , ]bit = ch = even = Truewhile len(geohash) < precision:if even:mid = (lon_interval[] + lon_interval[]) / if longitude > mid:ch |= bits[bit]lon_interval = (mid, lon_interval[])else:lon_interval = (lon_interval[], mid)else:mid = (lat_interval[] + lat_interval[]) / if latitude > mid:ch |= bits[bit]lat_interval = (mid, lat_interval[])else:lat_interval = (lat_interval[], mid)even = not evenif bit < :bit += else:geohash += __base32[ch]bit = ch = return ''.join(geohash)", "docstring": "Encode a position given in float arguments latitude, longitude to\na geohash which will have the character count precision.", "id": "f10801:m2"} {"signature": "@rate_limited()def boto3_cached_conn(service, service_type='', future_expiration_minutes=, account_number=None,assume_role=None, session_name='', region='', return_credentials=False,external_id=None, arn_partition=''):", "body": "key = (account_number,assume_role,session_name,external_id,region,service_type,service,arn_partition)if key in CACHE:retval = _get_cached_creds(key, service, service_type, region, future_expiration_minutes, return_credentials)if retval:return retvalrole = Noneif assume_role:sts = boto3.session.Session().client('')if not all([account_number, assume_role]):raise ValueError(\"\")arn = ''.format(account_number,assume_role,partition=arn_partition)assume_role_kwargs = {'': arn,'': session_name}if external_id:assume_role_kwargs[''] = external_idrole = sts.assume_role(**assume_role_kwargs)if service_type == '':conn = _client(service, region, role)elif service_type == '':conn = _resource(service, region, role)if role:CACHE[key] = roleif return_credentials:return conn, role['']return conn", "docstring": "Used to obtain a boto3 client or resource connection.\nFor cross account, provide both account_number and assume_role.\n\n:usage:\n\n# Same Account:\nclient = boto3_cached_conn('iam')\nresource = boto3_cached_conn('iam', service_type='resource')\n\n# Cross Account Client:\nclient = boto3_cached_conn('iam', account_number='000000000000', assume_role='role_name')\n\n# Cross Account Resource:\nresource = boto3_cached_conn('iam', service_type='resource', account_number='000000000000', assume_role='role_name')\n\n:param service: AWS service (i.e. 'iam', 'ec2', 'kms')\n:param service_type: 'client' or 'resource'\n:param future_expiration_minutes: Connections will expire from the cache\n when their expiration is within this many minutes of the present time. [Default 15]\n:param account_number: Required if assume_role is provided.\n:param assume_role: Name of the role to assume into for account described by account_number.\n:param session_name: Session name to attach to requests. [Default 'cloudaux']\n:param region: Region name for connection. [Default us-east-1]\n:param return_credentials: Indicates if the STS credentials should be returned with the client [Default False]\n:param external_id: Optional external id to pass to sts:AssumeRole.\n See https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html\n:param arn_partition: Optional parameter to specify other aws partitions such as aws-us-gov for aws govcloud\n:return: boto3 client or resource connection", "id": "f10811:m4"} {"signature": "def sts_conn(service, service_type='', future_expiration_minutes=):", "body": "def decorator(f):@wraps(f)def decorated_function(*args, **kwargs):if kwargs.get(\"\"):kwargs[service_type] = kwargs.pop(\"\")kwargs.pop(\"\", None)kwargs.pop(\"\", None)else:kwargs[service_type] = boto3_cached_conn(service,service_type=service_type,future_expiration_minutes=future_expiration_minutes,account_number=kwargs.pop('', None),assume_role=kwargs.pop('', None),session_name=kwargs.pop('', ''),external_id=kwargs.pop('', None),region=kwargs.pop('', ''),arn_partition=kwargs.pop('', ''))return f(*args, **kwargs)return decorated_functionreturn decorator", "docstring": "This will wrap all calls with an STS AssumeRole if the required parameters are sent over.\nNamely, it requires the following in the kwargs:\n- Service Type (Required)\n- Account Number (Required for Assume Role)\n- IAM Role Name (Required for Assume Role)\n- Region (Optional, but recommended)\n- AWS Partition (Optional, defaults to 'aws' if none specified)\n- IAM Session Name (Optional, but recommended to appear in CloudTrail)\n\nIf `force_client` is set to a boto3 client, then this will simply pass that in as the client.\n`force_client` is mostly useful for mocks and tests.\n:param service:\n:param service_type:\n:param future_expiration_minutes:\n:return:", "id": "f10811:m5"} {"signature": "@paginated('', response_pagination_marker='')@sts_conn('')@rate_limited()def describe_load_balancers(arns=None, names=None, client=None):", "body": "kwargs = dict()if arns:kwargs.update(dict(LoadBalancerArns=arns))if names:kwargs.update(dict(Names=names))return client.describe_load_balancers(**kwargs)", "docstring": "Permission: elasticloadbalancing:DescribeLoadBalancers", "id": "f10813:m0"} {"signature": "@paginated('', response_pagination_marker='')@sts_conn('')@rate_limited()def describe_listeners(load_balancer_arn=None, listener_arns=None, client=None):", "body": "kwargs = dict()if load_balancer_arn:kwargs.update(dict(LoadBalancerArn=load_balancer_arn))if listener_arns:kwargs.update(dict(ListenerArns=listener_arns))return client.describe_listeners(**kwargs)", "docstring": "Permission: elasticloadbalancing:DescribeListeners", "id": "f10813:m1"} {"signature": "@sts_conn('')@rate_limited()def describe_load_balancer_attributes(arn, client=None):", "body": "return client.describe_load_balancer_attributes(LoadBalancerArn=arn)['']", "docstring": "Permission: elasticloadbalancing:DescribeLoadBalancerAttributes", "id": "f10813:m2"} {"signature": "@sts_conn('')@rate_limited()def describe_rules(listener_arn=None, rule_arns=None, client=None):", "body": "kwargs = dict()if listener_arn:kwargs.update(dict(ListenerArn=listener_arn))if rule_arns:kwargs.update(dict(RuleArns=rule_arns))return client.describe_rules(**kwargs)['']", "docstring": "Permission: elasticloadbalancing:DescribeRules", "id": "f10813:m3"} {"signature": "@sts_conn('')@rate_limited()def describe_tags(arns, client=None):", "body": "return client.describe_tags(ResourceArns=arns)['']", "docstring": "Permission: elasticloadbalancing:DescribeTags", "id": "f10813:m5"} {"signature": "@sts_conn('')@rate_limited()def describe_target_group_attributes(arn, client=None):", "body": "return client.describe_target_group_attributes(TargetGroupArn=arn)['']", "docstring": "Permission: elasticloadbalancing:DescribeTargetGroupAttributes", "id": "f10813:m6"} {"signature": "@paginated('', response_pagination_marker='')@sts_conn('')@rate_limited()def describe_target_groups(load_balancer_arn=None, target_group_arns=None, names=None, client=None):", "body": "kwargs = dict()if load_balancer_arn:kwargs.update(LoadBalancerArn=load_balancer_arn)if target_group_arns:kwargs.update(TargetGroupArns=target_group_arns)if names:kwargs.update(Names=names)return client.describe_target_groups(**kwargs)", "docstring": "Permission: elasticloadbalancing:DescribeTargetGroups", "id": "f10813:m7"} {"signature": "@sts_conn('')@rate_limited()def describe_target_health(target_group_arn, targets=None, client=None):", "body": "kwargs = dict(TargetGroupArn=target_group_arn)if targets:kwargs.update(Targets=targets)return client.describe_target_health(**kwargs)['']", "docstring": "Permission: elasticloadbalancing:DescribeTargetHealth", "id": "f10813:m8"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_location(client=None, **kwargs):", "body": "return client.get_bucket_location(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m1"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_acl(client=None, **kwargs):", "body": "return client.get_bucket_acl(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m2"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_policy(client=None, **kwargs):", "body": "return client.get_bucket_policy(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m3"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_tagging(client=None, **kwargs):", "body": "return client.get_bucket_tagging(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m4"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_versioning(client=None, **kwargs):", "body": "return client.get_bucket_versioning(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m5"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_lifecycle_configuration(client=None, **kwargs):", "body": "return client.get_bucket_lifecycle_configuration(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m6"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_logging(client=None, **kwargs):", "body": "return client.get_bucket_logging(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m7"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_website(client=None, **kwargs):", "body": "return client.get_bucket_website(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m8"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_cors(client=None, **kwargs):", "body": "return client.get_bucket_cors(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m9"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_notification_configuration(client=None, **kwargs):", "body": "return client.get_bucket_notification_configuration(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m10"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_accelerate_configuration(client=None, **kwargs):", "body": "return client.get_bucket_accelerate_configuration(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m11"} {"signature": "@sts_conn('')@rate_limited()def get_bucket_replication(client=None, **kwargs):", "body": "return client.get_bucket_replication(**kwargs)", "docstring": "Bucket='string'", "id": "f10817:m12"} {"signature": "@sts_conn('')@paginated('', request_pagination_marker=\"\",response_pagination_marker=\"\")@rate_limited()def list_bucket_analytics_configurations(client=None, **kwargs):", "body": "result = client.list_bucket_analytics_configurations(**kwargs)if not result.get(\"\"):result.update({\"\": []})return result", "docstring": "Bucket='string'", "id": "f10817:m13"} {"signature": "@sts_conn('')@paginated('', request_pagination_marker=\"\",response_pagination_marker=\"\")@rate_limited()def list_bucket_metrics_configurations(client=None, **kwargs):", "body": "result = client.list_bucket_metrics_configurations(**kwargs)if not result.get(\"\"):result.update({\"\": []})return result", "docstring": "Bucket='string'", "id": "f10817:m14"} {"signature": "@sts_conn('')@paginated('', request_pagination_marker=\"\",response_pagination_marker=\"\")@rate_limited()def list_bucket_inventory_configurations(client=None, **kwargs):", "body": "result = client.list_bucket_inventory_configurations(**kwargs)if not result.get(\"\"):result.update({\"\": []})return result", "docstring": "Bucket='string'", "id": "f10817:m15"} {"signature": "@sts_conn('')@paginated('', request_pagination_marker=\"\",response_pagination_marker=\"\")@rate_limited()def list_rules(client=None, **kwargs):", "body": "result = client.list_rules(**kwargs)if not result.get(\"\"):result.update({\"\": []})return result", "docstring": "NamePrefix='string'", "id": "f10820:m0"} {"signature": "@sts_conn('')@rate_limited()def describe_rule(client=None, **kwargs):", "body": "return client.describe_rule(**kwargs)", "docstring": "Name='string'", "id": "f10820:m1"} {"signature": "@sts_conn('')@paginated('', request_pagination_marker=\"\",response_pagination_marker=\"\")@rate_limited()def list_targets_by_rule(client=None, **kwargs):", "body": "result = client.list_targets_by_rule(**kwargs)if not result.get(\"\"):result.update({\"\": []})return result", "docstring": "Rule='string'", "id": "f10820:m2"} {"signature": "@sts_conn('', service_type='')@rate_limited()def get_role_managed_policy_documents(role, client=None, **kwargs):", "body": "policies = get_role_managed_policies(role, force_client=client)policy_names = (policy[''] for policy in policies)delayed_gmpd_calls = (delayed(get_managed_policy_document)(policy[''], force_client=client) for policyin policies)policy_documents = Parallel(n_jobs=, backend=\"\")(delayed_gmpd_calls)return dict(zip(policy_names, policy_documents))", "docstring": "Retrieve the currently active policy version document for every managed policy that is attached to the role.", "id": "f10821:m12"} {"signature": "@sts_conn('', service_type='')@rate_limited()def get_managed_policy_document(policy_arn, policy_metadata=None, client=None, **kwargs):", "body": "if not policy_metadata:policy_metadata = client.get_policy(PolicyArn=policy_arn)policy_document = client.get_policy_version(PolicyArn=policy_arn,VersionId=policy_metadata[''][''])return policy_document['']['']", "docstring": "Retrieve the currently active (i.e. 'default') policy version document for a policy.\n\n :param policy_arn:\n :param policy_metadata: This is a previously fetch managed policy response from boto/cloudaux.\n This is used to prevent unnecessary API calls to get the initial policy default version id.\n :param client:\n :param kwargs:\n :return:", "id": "f10821:m13"} {"signature": "@sts_conn('', service_type='')@rate_limited()def get_policy(policy_arn, client=None, **kwargs):", "body": "return client.get_policy(PolicyArn=policy_arn, **kwargs)", "docstring": "Retrieve the IAM Managed Policy.", "id": "f10821:m14"} {"signature": "@paginated('')@rate_limited()def _get_users_for_group(client, **kwargs):", "body": "return client.get_group(**kwargs)", "docstring": "Fetch the paginated users attached to the group.", "id": "f10821:m34"} {"signature": "@sts_conn('', service_type='')@rate_limited()def get_group(group_name, users=True, client=None, **kwargs):", "body": "result = client.get_group(GroupName=group_name, **kwargs)if users:if result.get(''):kwargs_to_send = {'': group_name}kwargs_to_send.update(kwargs)user_list = result['']kwargs_to_send[''] = result['']result[''] = user_list + _get_users_for_group(client, **kwargs_to_send)else:result.pop('', None)result.pop('', None)result.pop('', None)return result", "docstring": "Get's the IAM Group details.\n\n :param group_name:\n :param users: Optional -- will return the IAM users that the group is attached to if desired (paginated).\n :param client:\n :param kwargs:\n :return:", "id": "f10821:m35"} {"signature": "@sts_conn('', service_type='')@paginated('')@rate_limited()def list_group_policies(group_name, client=None, **kwargs):", "body": "return client.list_group_policies(GroupName=group_name, **kwargs)", "docstring": "Lets the IAM group inline policies for a given group.", "id": "f10821:m36"} {"signature": "@sts_conn('', service_type='')@rate_limited()def get_group_policy_document(group_name, policy_name, client=None, **kwargs):", "body": "return client.get_group_policy(GroupName=group_name, PolicyName=policy_name, **kwargs)['']", "docstring": "Fetches the specific IAM group inline-policy document.", "id": "f10821:m37"} {"signature": "@sts_conn('', service_type='')@paginated('')@rate_limited()def list_attached_group_managed_policies(group_name, client=None, **kwargs):", "body": "return client.list_attached_group_policies(GroupName=group_name, **kwargs)", "docstring": "Lists the attached IAM managed policies for a given IAM group.", "id": "f10821:m38"} {"signature": "@sts_conn('', service_type='')@paginated('')@rate_limited()def list_groups_for_user(user_name, client=None, **kwargs):", "body": "return client.list_groups_for_user(UserName=user_name, **kwargs)", "docstring": "Lists the IAM groups that is attached to a given IAM user.", "id": "f10821:m39"} {"signature": "@sts_conn('', service_type='')@paginated('')@rate_limited()def list_server_certificates(client=None, **kwargs):", "body": "return client.list_server_certificates(**kwargs)", "docstring": "Lists the IAM Server Certificates (IAM SSL) for a given AWS account.", "id": "f10821:m40"} {"signature": "@openstack_conn()def list_items(conn=None, **kwargs):", "body": "return [x for x in getattr( getattr( conn, kwargs.pop('') ),kwargs.pop(''))(**kwargs)]", "docstring": ":rtype: ``list``", "id": "f10835:m0"} {"signature": "@registry.register(flag=FLAGS.POLICY, depends_on=FLAGS.VERSIONS, key='')def _get_policy(lambda_function, **conn):", "body": "policies = dict(Versions=dict(), Aliases=dict(), DEFAULT=dict())for version in [v[''] for v in lambda_function['']]:try:policies[''][version] = get_policy(FunctionName=lambda_function[''], Qualifier=version, **conn)policies[''][version] = json.loads(policies[''][version])except Exception as e:passfor alias in [v[''] for v in lambda_function['']]:try:policies[''][alias] = get_policy(FunctionName=lambda_function[''], Qualifier=alias, **conn)policies[''][alias] = json.loads(policies[''][alias])except Exception as e:passtry:policies[''] = get_policy(FunctionName=lambda_function[''], **conn)policies[''] = json.loads(policies[''])except Exception as e:passreturn policies", "docstring": "Get LambdaFunction Policies. (there can be many of these!)\n\n Lambda Function Policies are overly complicated. They can be attached to a label,\n a version, and there is also a default policy.\n\n This method attempts to gather all three types.\n\n AWS returns an exception if the policy requested does not exist. We catch and ignore these exceptions.", "id": "f10837:m0"} {"signature": "@modify_outputdef get_lambda_function(lambda_function, flags=FLAGS.ALL, **conn):", "body": "try:basestringexcept NameError as _:basestring = strif isinstance(lambda_function, basestring):lambda_function_arn = ARN(lambda_function)if lambda_function_arn.error:lambda_function = dict(FunctionName=lambda_function)else:lambda_function = dict(FunctionName=lambda_function_arn.name, FunctionArn=lambda_function)if '' in lambda_function:lambda_function_arn = ARN(lambda_function[''])if not lambda_function_arn.error:if lambda_function_arn.account_number:conn[''] = lambda_function_arn.account_numberif lambda_function_arn.region:conn[''] = lambda_function_arn.regionreturn registry.build_out(flags, start_with=lambda_function, pass_datastructure=True, **conn)", "docstring": "Fully describes a lambda function.\n\n Args:\n lambda_function: Name, ARN, or dictionary of lambda function. If dictionary, should likely be the return value from list_functions. At a minimum, must contain a key titled 'FunctionName'.\n flags: Flags describing which sections should be included in the return value. Default ALL\n\n Returns:\n dictionary describing the requested lambda function.", "id": "f10837:m6"} {"signature": "@registry.register(flag=FLAGS.FLOW_LOGS, depends_on=FLAGS.BASE, key=\"\")def get_vpc_flow_logs(vpc, **conn):", "body": "fl_result = describe_flow_logs(Filters=[{\"\": \"\", \"\": [vpc[\"\"]]}], **conn)fl_ids = []for fl in fl_result:fl_ids.append(fl[\"\"])return fl_ids", "docstring": "Gets the VPC Flow Logs for a VPC", "id": "f10838:m0"} {"signature": "@registry.register(flag=FLAGS.CLASSIC_LINK, depends_on=FLAGS.BASE, key=\"\")def get_classic_link(vpc, **conn):", "body": "result = {}try:cl_result = describe_vpc_classic_link(VpcIds=[vpc[\"\"]], **conn)[]result[\"\"] = cl_result[\"\"]dns_result = describe_vpc_classic_link_dns_support(VpcIds=[vpc[\"\"]], **conn)[]result[\"\"] = dns_result[\"\"]except ClientError as e:if '' not in str(e):raise ereturn result", "docstring": "Gets the Classic Link details about a VPC", "id": "f10838:m1"} {"signature": "@registry.register(flag=FLAGS.INTERNET_GATEWAY, depends_on=FLAGS.BASE, key=\"\")def get_internet_gateway(vpc, **conn):", "body": "result = {}ig_result = describe_internet_gateways(Filters=[{\"\": \"\", \"\": [vpc[\"\"]]}], **conn)if ig_result:result.update({\"\": ig_result[][\"\"][][\"\"],\"\": ig_result[][\"\"],\"\": ig_result[].get(\"\", [])})return result", "docstring": "Gets the Internet Gateway details about a VPC", "id": "f10838:m2"} {"signature": "@registry.register(flag=FLAGS.VPC_PEERING_CONNECTIONS, depends_on=FLAGS.BASE, key=\"\")def get_vpc_peering_connections(vpc, **conn):", "body": "accepter_result = describe_vpc_peering_connections(Filters=[{\"\": \"\",\"\": [vpc[\"\"]]}], **conn)requester_result = describe_vpc_peering_connections(Filters=[{\"\": \"\",\"\": [vpc[\"\"]]}], **conn)peer_ids = []for peering in accepter_result + requester_result:peer_ids.append(peering[\"\"])return peer_ids", "docstring": "Gets the Internet Gateway details about a VPC", "id": "f10838:m3"} {"signature": "@registry.register(flag=FLAGS.SUBNETS, depends_on=FLAGS.BASE, key=\"\")def get_subnets(vpc, **conn):", "body": "subnets = describe_subnets(Filters=[{\"\": \"\", \"\": [vpc[\"\"]]}], **conn)s_ids = []for s in subnets:s_ids.append(s[\"\"])return s_ids", "docstring": "Gets the VPC Subnets", "id": "f10838:m4"} {"signature": "@registry.register(flag=FLAGS.ROUTE_TABLES, depends_on=FLAGS.BASE, key=\"\")def get_route_tables(vpc, **conn):", "body": "route_tables = describe_route_tables(Filters=[{\"\": \"\", \"\": [vpc[\"\"]]}], **conn)rt_ids = []for r in route_tables:rt_ids.append(r[\"\"])return rt_ids", "docstring": "Gets the VPC Route Tables", "id": "f10838:m5"} {"signature": "@registry.register(flag=FLAGS.NETWORK_ACLS, depends_on=FLAGS.BASE, key=\"\")def get_network_acls(vpc, **conn):", "body": "route_tables = describe_network_acls(Filters=[{\"\": \"\", \"\": [vpc[\"\"]]}], **conn)nacl_ids = []for r in route_tables:nacl_ids.append(r[\"\"])return nacl_ids", "docstring": "Gets the VPC Network ACLs", "id": "f10838:m6"} {"signature": "@registry.register(flag=FLAGS.BASE)def get_base(vpc, **conn):", "body": "base_result = describe_vpcs(VpcIds=[vpc[\"\"]], **conn)[]vpc_name = Nonefor t in base_result.get(\"\", []):if t[\"\"] == \"\":vpc_name = t[\"\"]dhcp_opts = Noneif base_result.get(\"\"):dhcp_opts = describe_dhcp_options(DhcpOptionsIds=[base_result[\"\"]], **conn)[][\"\"]attributes = {}attr_vals = [(\"\", \"\"),(\"\", \"\")]for attr, query in attr_vals:attributes[attr] = describe_vpc_attribute(VpcId=vpc[\"\"], Attribute=query, **conn)[attr]vpc.update({'': vpc_name,'': conn[\"\"],'': base_result.get(\"\", []),'': base_result[\"\"],'': base_result[\"\"],'': dhcp_opts,'': base_result[\"\"],'': base_result.get(\"\", []),'': base_result.get(\"\", []),'': attributes,'': })return vpc", "docstring": "The base will return:\n- ARN\n- Region\n- Name\n- Id\n- Tags\n- IsDefault\n- InstanceTenancy\n- CidrBlock\n- CidrBlockAssociationSet\n- Ipv6CidrBlockAssociationSet\n- DhcpOptionsId\n- Attributes\n- _version\n\n:param bucket_name:\n:param conn:\n:return:", "id": "f10838:m7"} {"signature": "@modify_outputdef get_vpc(vpc_id, flags=FLAGS.ALL, **conn):", "body": "if not conn.get(\"\"):raise CloudAuxException({\"\": \"\"\"\",\"\": vpc_id})if not conn.get(\"\"):raise CloudAuxException({\"\": \"\"\"\",\"\": vpc_id})start = {'': \"\".format(region=conn[\"\"],account=conn[\"\"],vpc_id=vpc_id),'': vpc_id}return registry.build_out(flags, start_with=start, pass_datastructure=True, **conn)", "docstring": "Orchestrates all the calls required to fully fetch details about a VPC:\n\n{\n \"Arn\": ...,\n \"Region\": ...,\n \"Name\": ...,\n \"Id\": ...,\n \"Tags: ...,\n \"VpcPeeringConnections\": ...,\n \"ClassicLink\": ...,\n \"DhcpOptionsId\": ...,\n \"InternetGateway\": ...,\n \"IsDefault\": ...,\n \"CidrBlock\": ...,\n \"CidrBlockAssociationSet\": ...,\n \"Ipv6CidrBlockAssociationSet\": ...,\n \"InstanceTenancy\": ...,\n \"RouteTables\": ...,\n \"NetworkAcls\": ...,\n \"FlowLogs\": ...,\n \"Subnets\": ...,\n \"Attributes\": ...,\n \"FlowLogs\": ...,\n \"_version\": 1\n}\n\n:param vpc_id: The ID of the VPC\n:param flags:\n:param conn:\n:return:", "id": "f10838:m8"} {"signature": "def _reformat_policy(policy):", "body": "policy_name = policy['']ret = {}ret[''] = policy['']attrs = policy['']if ret[''] != '':return policy_name, retattributes = dict()for attr in attrs:attributes[attr['']] = attr['']ret[''] = dict()ret[''][''] = bool(attributes.get(''))ret[''][''] = bool(attributes.get(''))ret[''][''] = bool(attributes.get(''))ret[''][''] = bool(attributes.get(''))ret[''][''] = bool(attributes.get(''))ret[''] = bool(attributes.get(''))ret[''] = attributes.get('', None)non_ciphers = ['','','','','','','']ciphers = []for cipher in attributes:if attributes[cipher] == '' and cipher not in non_ciphers:ciphers.append(cipher)ciphers.sort()ret[''] = ciphersreturn policy_name, ret", "docstring": "Policies returned from boto3 are massive, ugly, and difficult to read.\nThis method flattens and reformats the policy.\n\n:param policy: Result from invoking describe_load_balancer_policies(...)\n:return: Returns a tuple containing policy_name and the reformatted policy dict.", "id": "f10839:m0"} {"signature": "def _flatten_listener(listener):", "body": "result = dict()if set(listener.keys()) == set(['', '']):result.update(listener[''])result[''] = listener['']else:result = dict(listener)return result", "docstring": "from\n\n{\n \"Listener\": {\n \"InstancePort\": 80,\n \"LoadBalancerPort\": 80,\n \"Protocol\": \"HTTP\",\n \"InstanceProtocol\": \"HTTP\"\n },\n \"PolicyNames\": []\n},\n\nto\n\n{\n \"InstancePort\": 80,\n \"LoadBalancerPort\": 80,\n \"Protocol\": \"HTTP\",\n \"InstanceProtocol\": \"HTTP\",\n \"PolicyNames\": []\n}", "id": "f10839:m1"} {"signature": "@modify_outputdef get_load_balancer(load_balancer, flags=FLAGS.ALL ^ FLAGS.POLICY_TYPES, **conn):", "body": "try:basestringexcept NameError as _:basestring = strif isinstance(load_balancer, basestring):load_balancer = dict(LoadBalancerName=load_balancer)return registry.build_out(flags, start_with=load_balancer, pass_datastructure=True, **conn)", "docstring": "Fully describes an ELB.\n\n:param loadbalancer: Could be an ELB Name or a dictionary. Likely the return value from a previous call to describe_load_balancers. At a minimum, must contain a key titled 'LoadBalancerName'.\n:param flags: Flags describing which sections should be included in the return value. Default is FLAGS.ALL minus FLAGS.POLICY_TYPES.\n:return: Returns a dictionary describing the ELB with the fields described in the flags parameter.", "id": "f10839:m7"} {"signature": "@modify_outputdef get_vault(vault_obj, flags=FLAGS.ALL, **conn):", "body": "if isinstance(vault_obj, string_types):vault_arn = ARN(vault_obj)if vault_arn.error:vault_obj = {'': vault_obj}else:vault_obj = {'': vault_arn.parsed_name}return registry.build_out(flags, vault_obj, **conn)", "docstring": "Orchestrates calls to build a Glacier Vault in the following format:\n\n{\n \"VaultARN\": ...,\n \"VaultName\": ...,\n \"CreationDate\" ...,\n \"LastInventoryDate\" ...,\n \"NumberOfArchives\" ...,\n \"SizeInBytes\" ...,\n \"Policy\" ...,\n \"Tags\" ...\n}\nArgs:\n vault_obj: name, ARN, or dict of Glacier Vault\n flags: Flags describing which sections should be included in the return value. Default ALL\n\nReturns:\n dictionary describing the requested Vault", "id": "f10840:m3"} {"signature": "@modify_outputdef get_image(image_id, flags=FLAGS.ALL, **conn):", "body": "image = dict(ImageId=image_id)conn[''] = conn.get('', '')return registry.build_out(flags, image, **conn)", "docstring": "Orchestrates all the calls required to fully build out an EC2 Image (AMI, AKI, ARI)\n\n{\n \"Architecture\": \"x86_64\", \n \"Arn\": \"arn:aws:ec2:us-east-1::image/ami-11111111\", \n \"BlockDeviceMappings\": [], \n \"CreationDate\": \"2013-07-11T16:04:06.000Z\", \n \"Description\": \"...\", \n \"Hypervisor\": \"xen\", \n \"ImageId\": \"ami-11111111\", \n \"ImageLocation\": \"111111111111/...\", \n \"ImageType\": \"machine\", \n \"KernelId\": \"aki-88888888\", \n \"LaunchPermissions\": [], \n \"Name\": \"...\", \n \"OwnerId\": \"111111111111\", \n \"ProductCodes\": [], \n \"Public\": false, \n \"RamdiskId\": {}, \n \"RootDeviceName\": \"/dev/sda1\", \n \"RootDeviceType\": \"ebs\", \n \"SriovNetSupport\": \"simple\",\n \"State\": \"available\", \n \"Tags\": [], \n \"VirtualizationType\": \"hvm\", \n \"_version\": 1\n}\n\n:param image_id: str ami id\n:param flags: By default, set to ALL fields\n:param conn: dict containing enough information to make a connection to the desired account.\nMust at least have 'assume_role' key.\n:return: dict containing a fully built out image.", "id": "f10841:m5"} {"signature": "@modify_outputdef get_elbv2(alb, flags=FLAGS.ALL, **conn):", "body": "try:basestringexcept NameError as _:basestring = strif isinstance(alb, basestring):from cloudaux.orchestration.aws.arn import ARNalb_arn = ARN(alb)if alb_arn.error:alb = dict(LoadBalancerName=alb)else:alb = dict(LoadBalancerArn=alb)return registry.build_out(flags, start_with=alb, pass_datastructure=True, **conn)", "docstring": "Fully describes an ALB (ELBv2).\n\n:param alb: Could be an ALB Name, ALB ARN, or a dictionary. Likely the return value from a previous call to describe_load_balancers. At a minimum, must contain a key titled 'LoadBalancerArn'.\n:param flags: Flags describing which sections should be included in the return value. Default is FLAGS.ALL.\n:return: Returns a dictionary describing the ALB with the fields described in the flags parameter.", "id": "f10842:m8"} {"signature": "@modify_outputdef get_security_group(sg_obj, flags=FLAGS.ALL, **conn):", "body": "if isinstance(sg_obj, string_types):group_arn = ARN(sg_obj)if group_arn.error:sg_obj = {'': sg_obj}else:sg_obj = {'': group_arn.parsed_name}return registry.build_out(flags, sg_obj, **conn)", "docstring": "Orchestrates calls to build a Security Group in the following format:\n\n{\n \"Description\": ...,\n \"GroupName\": ...,\n \"IpPermissions\" ...,\n \"OwnerId\" ...,\n \"GroupId\" ...,\n \"IpPermissionsEgress\" ...,\n \"VpcId\" ...\n}\nArgs:\n sg_obj: name, ARN, or dict of Security Group\n flags: Flags describing which sections should be included in the return value. Default ALL\n\nReturns:\n dictionary describing the requested Security Group", "id": "f10843:m1"} {"signature": "def _conn_from_arn(arn):", "body": "arn = ARN(arn)if arn.error:raise CloudAuxException(''.format(arn=arn))return dict(account_number=arn.account_number,)", "docstring": "Extracts the account number from an ARN.\n:param arn: Amazon ARN containing account number.\n:return: dictionary with a single account_number key that can be merged with an existing\nconnection dictionary containing fields such as assume_role, session_name, region.", "id": "f10845:m1"} {"signature": "def _get_name_from_structure(item, default):", "body": "if item.get(default):return item.get(default)if item.get(''):arn = item.get('')item_arn = ARN(arn)if item_arn.error:raise CloudAuxException(''.format(arn=arn))return item_arn.parsed_nameraise MissingFieldException(''.format(input=item))", "docstring": "Given a possibly sparsely populated item dictionary, try to retrieve the item name.\nFirst try the default field. If that doesn't exist, try to parse the from the ARN.\n:param item: dict containing (at the very least) item_name and/or arn\n:return: item name", "id": "f10845:m2"} {"signature": "@modify_outputdef get_queue(queue, flags=FLAGS.ALL, **conn):", "body": "if queue.startswith(\"\") or queue.startswith(\"\"):queue_name = queueelse:queue_name = get_queue_url(QueueName=queue, **conn)sqs_queue = {\"\": queue_name}return registry.build_out(flags, sqs_queue, **conn)", "docstring": "Orchestrates all the calls required to fully fetch details about an SQS Queue:\n\n{\n \"Arn\": ...,\n \"Region\": ...,\n \"Name\": ...,\n \"Url\": ...,\n \"Attributes\": ...,\n \"Tags\": ...,\n \"DeadLetterSourceQueues\": ...,\n \"_version\": 1\n}\n\n:param queue: Either the queue name OR the queue url\n:param flags: By default, set to ALL fields.\n:param conn: dict containing enough information to make a connection to the desired account. Must at least have\n 'assume_role' key.\n:return: dict containing a fully built out SQS queue.", "id": "f10846:m3"} {"signature": "@modify_outputdef get_bucket(bucket_name, include_created=None, flags=FLAGS.ALL ^ FLAGS.CREATED_DATE, **conn):", "body": "if type(include_created) is bool:if include_created:flags = flags | FLAGS.CREATED_DATEelse:flags = flags & ~FLAGS.CREATED_DATEregion = get_bucket_region(Bucket=bucket_name, **conn)if not region:return dict(Error='')conn[''] = regionreturn registry.build_out(flags, bucket_name, **conn)", "docstring": "Orchestrates all the calls required to fully build out an S3 bucket in the following format:\n\n{\n \"Arn\": ...,\n \"Name\": ...,\n \"Region\": ...,\n \"Owner\": ...,\n \"Grants\": ...,\n \"GrantReferences\": ...,\n \"LifecycleRules\": ...,\n \"Logging\": ...,\n \"Policy\": ...,\n \"Tags\": ...,\n \"Versioning\": ...,\n \"Website\": ...,\n \"Cors\": ...,\n \"Notifications\": ...,\n \"Acceleration\": ...,\n \"Replication\": ...,\n \"CreationDate\": ...,\n \"AnalyticsConfigurations\": ...,\n \"MetricsConfigurations\": ...,\n \"InventoryConfigurations\": ...,\n \"_version\": 9\n}\n\nNOTE: \"GrantReferences\" is an ephemeral field that is not guaranteed to be consistent -- do not base logic off of it\n\n:param include_created: legacy param moved to FLAGS.\n:param bucket_name: str bucket name\n:param flags: By default, set to ALL fields except for FLAGS.CREATED_DATE as obtaining that information is a slow\n and expensive process.\n:param conn: dict containing enough information to make a connection to the desired account. Must at least have\n 'assume_role' key.\n:return: dict containing a fully built out bucket.", "id": "f10847:m16"} {"signature": "@registry.register(flag=FLAGS.INLINE_POLICIES, key='')def get_inline_policies(group, **conn):", "body": "policy_list = list_group_policies(group[''])policy_documents = {}for policy in policy_list:policy_documents[policy] = get_group_policy_document(group[''], policy, **conn)return policy_documents", "docstring": "Get the inline policies for the group.", "id": "f10848:m0"} {"signature": "@registry.register(flag=FLAGS.MANAGED_POLICIES, key='')def get_managed_policies(group, **conn):", "body": "managed_policies = list_attached_group_managed_policies(group[''], **conn)managed_policy_names = []for policy in managed_policies:managed_policy_names.append(policy[''])return managed_policy_names", "docstring": "Get a list of the managed policy names that are attached to the group.", "id": "f10848:m1"} {"signature": "@registry.register(flag=FLAGS.USERS, key='')def get_users(group, **conn):", "body": "group_details = get_group_api(group[''], **conn)user_list = []for user in group_details.get('', []):user_list.append(user[''])return user_list", "docstring": "Gets a list of the usernames that are a part of this group.", "id": "f10848:m2"} {"signature": "@registry.register(flag=FLAGS.BASE)def _get_base(group, **conn):", "body": "group[''] = group.update(get_group_api(group[''], users=False, **conn)[''])group[''] = get_iso_string(group[''])return group", "docstring": "Fetch the base IAM Group.", "id": "f10848:m3"} {"signature": "@modify_outputdef get_group(group, flags=FLAGS.BASE | FLAGS.INLINE_POLICIES | FLAGS.MANAGED_POLICIES, **conn):", "body": "if not group.get(''):raise MissingFieldException('')group = modify(group, output='')_conn_from_args(group, conn)return registry.build_out(flags, start_with=group, pass_datastructure=True, **conn)", "docstring": "Orchestrates all the calls required to fully build out an IAM Group in the following format:\n\n{\n \"Arn\": ...,\n \"GroupName\": ...,\n \"Path\": ...,\n \"GroupId\": ...,\n \"CreateDate\": ..., # str\n \"InlinePolicies\": ...,\n \"ManagedPolicies\": ..., # These are just the names of the Managed Policies.\n \"Users\": ..., # False by default -- these are just the names of the users.\n \"_version\": 1\n}\n\n:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the `get_group` call\n multiple times.\n:param group: dict MUST contain the GroupName and also a combination of either the ARN or the account_number.\n:param output: Determines whether keys should be returned camelized or underscored.\n:param conn: dict containing enough information to make a connection to the desired account.\n Must at least have 'assume_role' key.\n:return: dict containing fully built out Group.", "id": "f10848:m4"} {"signature": "@registry.register(flag=FLAGS.BASE)def _get_base(role, **conn):", "body": "base_fields = frozenset(['', '', '', '', '', ''])needs_base = Falsefor field in base_fields:if field not in role:needs_base = Truebreakif needs_base:role_name = _get_name_from_structure(role, '')role = CloudAux.go('', RoleName=role_name, **conn)role = role['']role.update(dict(CreateDate=get_iso_string(role[''])))role[''] = return role", "docstring": "Determine whether the boto get_role call needs to be made or if we already have all that data\nin the role object.\n:param role: dict containing (at the very least) role_name and/or arn.\n:param conn: dict containing enough information to make a connection to the desired account.\n:return: Camelized dict describing role containing all all base_fields.", "id": "f10849:m4"} {"signature": "@modify_outputdef get_role(role, flags=FLAGS.ALL, **conn):", "body": "role = modify(role, output='')_conn_from_args(role, conn)return registry.build_out(flags, start_with=role, pass_datastructure=True, **conn)", "docstring": "Orchestrates all the calls required to fully build out an IAM Role in the following format:\n\n{\n \"Arn\": ...,\n \"AssumeRolePolicyDocument\": ...,\n \"CreateDate\": ..., # str\n \"InlinePolicies\": ...,\n \"InstanceProfiles\": ...,\n \"ManagedPolicies\": ...,\n \"Path\": ...,\n \"RoleId\": ...,\n \"RoleName\": ...,\n \"Tags\": {},\n \"_version\": 3\n}\n\n:param role: dict containing (at the very least) role_name and/or arn.\n:param output: Determines whether keys should be returned camelized or underscored.\n:param conn: dict containing enough information to make a connection to the desired account.\nMust at least have 'assume_role' key.\n:return: dict containing a fully built out role.", "id": "f10849:m5"} {"signature": "def get_all_roles(**conn):", "body": "roles = []account_roles = get_account_authorization_details('', **conn)for role in account_roles:roles.append({'': role[''],'': role[''],'': get_iso_string(role['']),'': role[''],'': [{'': ip[''],'': ip[''],'': get_iso_string(ip['']),'': ip[''],'': ip['']} for ip in role['']],'': [{\"\": x[''],\"\": x['']} for x in role['']],'': role[''],'': role[''],'': role['']})return roles", "docstring": "Returns a List of Roles represented as the dictionary below:\n\n{\n \"Arn\": ...,\n \"AssumeRolePolicyDocument\": ...,\n \"CreateDate\": ..., # str\n \"InlinePolicies\": ...,\n \"InstanceProfiles\": ...,\n \"ManagedPolicies\": ...,\n \"Path\": ...,\n \"RoleId\": ...,\n \"RoleName\": ...,\n}\n\n:param conn: dict containing enough information to make a connection to the desired account.\n:return: list containing dicts or fully built out roles", "id": "f10849:m6"} {"signature": "@modify_outputdef get_user(user, flags=FLAGS.ALL, **conn):", "body": "user = modify(user, output='')_conn_from_args(user, conn)return registry.build_out(flags, start_with=user, pass_datastructure=True, **conn)", "docstring": "Orchestrates all the calls required to fully build out an IAM User in the following format:\n\n{\n \"Arn\": ...,\n \"AccessKeys\": ...,\n \"CreateDate\": ..., # str\n \"InlinePolicies\": ...,\n \"ManagedPolicies\": ...,\n \"MFADevices\": ...,\n \"Path\": ...,\n \"UserId\": ...,\n \"UserName\": ...,\n \"SigningCerts\": ...\n}\n\n:param user: dict MUST contain the UserName and also a combination of either the ARN or the account_number\n:param output: Determines whether keys should be returned camelized or underscored.\n:param conn: dict containing enough information to make a connection to the desired account.\nMust at least have 'assume_role' key.\n:return: dict containing fully built out user.", "id": "f10852:m7"} {"signature": "def get_all_users(flags=FLAGS.ACCESS_KEYS | FLAGS.MFA_DEVICES | FLAGS.LOGIN_PROFILE | FLAGS.SIGNING_CERTIFICATES,**conn):", "body": "users = []account_users = get_account_authorization_details('', **conn)for user in account_users:temp_user = {'': user[''],'': get_iso_string(user['']),'': user[''],'': user[''],'': [{\"\": x[''],\"\": x['']} for x in user['']],'': user[''],'': user[''],'': user['']}user = modify(temp_user, output='')_conn_from_args(user, conn)users.append(registry.build_out(flags, start_with=user, pass_datastructure=True, **conn))return users", "docstring": "Returns a list of Users represented as dictionary below:\n\n{\n \"Arn\": ...,\n \"AccessKeys\": ...,\n \"CreateDate\": ..., # str\n \"InlinePolicies\": ...,\n \"ManagedPolicies\": ...,\n \"MFADevices\": ...,\n \"Path\": ...,\n \"UserId\": ...,\n \"UserName\": ...,\n \"SigningCerts\": ...\n}\n\n:param flags:\n:param conn: dict containing enough information to make a connection to the desired account.\n:return: list of dicts containing fully built out user.", "id": "f10852:m8"} {"signature": "@registry.register(flag=FLAGS.BASE)def get_base(managed_policy, **conn):", "body": "managed_policy[''] = arn = _get_name_from_structure(managed_policy, '')policy = get_policy(arn, **conn)document = get_managed_policy_document(arn, policy_metadata=policy, **conn)managed_policy.update(policy[''])managed_policy[''] = documentmanaged_policy[''] = get_iso_string(managed_policy[''])managed_policy[''] = get_iso_string(managed_policy[''])return managed_policy", "docstring": "Fetch the base Managed Policy.\n\n This includes the base policy and the latest version document.\n\n :param managed_policy:\n :param conn:\n :return:", "id": "f10853:m0"} {"signature": "@modify_outputdef get_managed_policy(managed_policy, flags=FLAGS.ALL, **conn):", "body": "_conn_from_args(managed_policy, conn)return registry.build_out(flags, start_with=managed_policy, pass_datastructure=True, **conn)", "docstring": "Orchestrates all of the calls required to fully build out an IAM Managed Policy in the following format:\n\n{\n \"Arn\": \"...\",\n \"PolicyName\": \"...\",\n \"PolicyId\": \"...\",\n \"Path\": \"...\",\n \"DefaultVersionId\": \"...\",\n \"AttachmentCount\": 123,\n \"PermissionsBoundaryUsageCount\": 123,\n \"IsAttachable\": ...,\n \"Description\": \"...\",\n \"CreateDate\": \"...\",\n \"UpdateDate\": \"...\",\n \"Document\": \"...\",\n \"_version\": 1\n}\n\n:param managed_policy: dict MUST contain the ARN.\n:param flags:\n:param conn:\n:return:", "id": "f10853:m1"} {"signature": "@registry.register(flag=FLAGS.BASE)def _get_base(server_certificate, **conn):", "body": "server_certificate[''] = cert_details = get_server_certificate_api(server_certificate[''], **conn)if cert_details:server_certificate.update(cert_details[''])server_certificate[''] = cert_details['']server_certificate[''] = cert_details.get('', None)server_certificate[''] = get_iso_string(server_certificate[''])server_certificate[''] = get_iso_string(server_certificate[''])return server_certificate", "docstring": "Fetch the base IAM Server Certificate.", "id": "f10854:m0"} {"signature": "@modify_outputdef get_server_certificate(server_certificate, flags=FLAGS.BASE, **conn):", "body": "if not server_certificate.get(''):raise MissingFieldException('')server_certificate = modify(server_certificate, output='')_conn_from_args(server_certificate, conn)return registry.build_out(flags, start_with=server_certificate, pass_datastructure=True, **conn)", "docstring": "Orchestrates all the calls required to fully build out an IAM User in the following format:\n\n{\n \"Arn\": ...,\n \"ServerCertificateName\": ...,\n \"Path\": ...,\n \"ServerCertificateId\": ...,\n \"UploadDate\": ..., # str\n \"Expiration\": ..., # str\n \"CertificateBody\": ...,\n \"CertificateChain\": ...,\n \"_version\": 1\n}\n\n:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the\n `get_server_certificate` call multiple times.\n:param server_certificate: dict MUST contain the ServerCertificateName and also a combination of\n either the ARN or the account_number.\n:param output: Determines whether keys should be returned camelized or underscored.\n:param conn: dict containing enough information to make a connection to the desired account.\n Must at least have 'assume_role' key.\n:return: dict containing fully built out Server Certificate.", "id": "f10854:m1"} {"signature": "@modify_outputdef get_event(rule, flags=FLAGS.ALL, **conn):", "body": "try:basestringexcept NameError as _:basestring = strif isinstance(rule, basestring):rule_arn = ARN(rule)if rule_arn.error:rule_name = ruleelse:rule_name = rule_arn.namerule = describe_rule(Name=rule_name, **conn)return registry.build_out(flags, rule, **conn)", "docstring": "Orchestrates all the calls required to fully build out a CloudWatch Event Rule in the following format:\n\n{\n \"Arn\": ...,\n \"Name\": ...,\n \"Region\": ...,\n \"Description\": ...,\n \"State\": ...,\n \"Rule\": ...,\n \"Targets\" ...,\n \"_version\": 1\n}\n\n:param rule: str cloudwatch event name\n:param flags: By default, set to ALL fields\n:param conn: dict containing enough information to make a connection to the desired account.\nMust at least have 'assume_role' key.\n:return: dict containing a fully built out event rule with targets.", "id": "f10855:m3"} {"signature": "@registry.register(flag=FLAGS.RULES)def get_rules(security_group, **kwargs):", "body": "rules = security_group.pop('',[])for rule in rules:rule[''] = rule.pop('')rule[''] = rule.pop('')rule[''] = rule.pop('')rule[''] = rule.pop('')rule[''] = rule.pop('')security_group[''] = sorted(rules)return security_group", "docstring": "format the rule fields to match AWS to support auditor reuse,\n will need to remap back if we want to orchestrate from our stored items", "id": "f10856:m1"} {"signature": "def get_item(item, **kwargs):", "body": "_item = {}for k,v in inspect.getmembers(item, lambda a:not(inspect.isroutine(a))):if not k.startswith('') and not k in ignore_list:_item[k] = vreturn sub_dict(_item)", "docstring": "API versioning for each OpenStack service is independent. Generically capture\n the public members (non-routine and non-private) of the OpenStack SDK objects.\n\nNote the lack of the modify_output decorator. Preserving the field naming allows\n us to reconstruct objects and orchestrate from stored items.", "id": "f10857:m0"} {"signature": "def sub_list(l):", "body": "r = []for i in l:if type(i) in prims:r.append(i)elif type(i) is list:r.append(sub_list(i))elif type(i) is dict:r.append(sub_dict(i))else:r.append(str(i))r = sorted(r)return r", "docstring": "Recursively walk a data-structure sorting any lists along the way.\nAny unknown types get mapped to string representation\n\n:param l: list\n:return: sorted list, where any child lists are also sorted.", "id": "f10857:m1"} {"signature": "def sub_dict(d):", "body": "r = {}for k in d:if type(d[k]) in prims:r[k] = d[k]elif type(d[k]) is list:r[k] = sub_list(d[k])elif type(d[k]) is dict:r[k] = sub_dict(d[k])else:r[k] = str(d[k])return r", "docstring": "Recursively walk a data-structure sorting any lists along the way.\nAny unknown types get mapped to string representation\n\n:param d: dict\n:return: dict where any lists, even those buried deep in the structure, have been sorted.", "id": "f10857:m2"} {"signature": "def _modify(item, func):", "body": "result = dict()for key in item:result[func(key)] = item[key]return result", "docstring": "Modifies each item.keys() string based on the func passed in.\nOften used with inflection's camelize or underscore methods.\n\n:param item: dictionary representing item to be modified\n:param func: function to run on each key string\n:return: dictionary where each key has been modified by func.", "id": "f10858:m0"} {"signature": "def modify(item, output=''):", "body": "if output == '':return _modify(item, camelize)elif output == '':return _modify(item, underscore)", "docstring": "Calls _modify and either passes the inflection.camelize method or the inflection.underscore method.\n\n:param item: dictionary representing item to be modified\n:param output: string 'camelized' or 'underscored'\n:return:", "id": "f10858:m1"} {"signature": "def get_iso_string(input):", "body": "return input.replace(tzinfo=None, microsecond=).isoformat() + ''", "docstring": "Strips out the microseconds from datetime objects, and returns a proper ISO-format UTC string.\n\n :param input: Datetime object.\n :returns string: A datetime ISO format string with", "id": "f10862:m0"} {"signature": "def __init__(self, **kwargs):", "body": "self.conn_details.update(kwargs)", "docstring": "cloudaux = CloudAux(\n **{'account_number': '000000000000',\n 'assume_role': 'role_name',\n })", "id": "f10862:c0:m0"} {"signature": "def call(self, function_expr, **kwargs):", "body": "if '' in function_expr:tech, service_type, function_name = function_expr.split('')else:tech = self.conn_details.get('')service_type = self.conn_details.get('', '')function_name = function_expr@sts_conn(tech, service_type=service_type)def wrapped_method(function_name, **nargs):service_type = nargs.pop(nargs.pop('', ''))return getattr(service_type, function_name)(**nargs)kwargs.update(self.conn_details)if '' in kwargs:del kwargs['']return wrapped_method(function_name, **kwargs)", "docstring": "cloudaux = CloudAux(\n **{'account_number': '000000000000',\n 'assume_role': 'role_name',\n 'session_name': 'testing',\n 'region': 'us-east-1',\n 'tech': 'kms',\n 'service_type': 'client'\n })\n\ncloudaux.call(\"list_aliases\")\ncloudaux.call(\"kms.client.list_aliases\")", "id": "f10862:c0:m1"} {"signature": "@staticmethoddef go(function_expr, **kwargs):", "body": "if '' in function_expr:tech, service_type, function_name = function_expr.split('')else:tech = kwargs.pop('')service_type = kwargs.get('')function_name = function_expr@sts_conn(tech, service_type=service_type)def wrapped_method(function_name, **nargs):service_type = nargs.pop(nargs.pop('', ''))return getattr(service_type, function_name)(**nargs)return wrapped_method(function_name, **kwargs)", "docstring": "CloudAux.go(\n 'list_aliases',\n **{\n 'account_number': '000000000000',\n 'assume_role': 'role_name',\n 'session_name': 'cloudaux',\n 'region': 'us-east-1',\n 'tech': 'kms',\n 'service_type': 'client'\n })\n\nCloudAux.go(\n 'kms.client.list_aliases',\n **{\n 'account_number': '000000000000',\n 'assume_role': 'role_name',\n 'session_name': 'cloudaux',\n 'region': 'us-east-1'\n })", "id": "f10862:c0:m2"} {"signature": "def get(self, key, delete_if_expired=True):", "body": "self._update_cache_stats(key, None)if key in self._CACHE:(expiration, obj) = self._CACHE[key]if expiration > self._now():self._update_cache_stats(key, '')return objelse:if delete_if_expired:self.delete(key)self._update_cache_stats(key, '')return Noneself._update_cache_stats(key, '')return None", "docstring": "Retrieve key from Cache.\n\n:param key: key to look up in cache.\n:type key: ``object``\n\n:param delete_if_expired: remove value from cache if it is expired.\n Default is True.\n:type delete_if_expired: ``bool``\n\n:returns: value from cache or None\n:rtype: varies or None", "id": "f10864:c0:m1"} {"signature": "def insert(self, key, obj, future_expiration_minutes=):", "body": "expiration_time = self._calculate_expiration(future_expiration_minutes)self._CACHE[key] = (expiration_time, obj)return True", "docstring": "Insert item into cache.\n\n:param key: key to look up in cache.\n:type key: ``object``\n\n:param obj: item to store in cache.\n:type obj: varies\n\n:param future_expiration_minutes: number of minutes item is valid\n:type param: ``int``\n\n:returns: True\n:rtype: ``bool``", "id": "f10864:c0:m2"} {"signature": "def _update_cache_stats(self, key, result):", "body": "if result is None:self._CACHE_STATS[''].setdefault(key,{'': , '': , '': })else:self._CACHE_STATS[''][key][result] +=", "docstring": "Update the cache stats.\n\nIf no cache-result is specified, we iniitialize the key.\nOtherwise, we increment the correct cache-result.\n\nNote the behavior for expired. A client can be expired and the key\nstill exists.", "id": "f10864:c0:m6"} {"signature": "def get_access_details(self, key=None):", "body": "if key in self._CACHE_STATS:return self._CACHE_STATS[''][key]else:return self._CACHE_STATS['']", "docstring": "Get access details in cache.", "id": "f10864:c0:m7"} {"signature": "def get_stats(self):", "body": "expired = sum([x[''] for _, x inself._CACHE_STATS[''].items()])miss = sum([x[''] for _, x inself._CACHE_STATS[''].items()])hit = sum([x[''] for _, x inself._CACHE_STATS[''].items()])return {'': {'': len(self._CACHE_STATS['']),'': expired,'': miss,'': hit,}}", "docstring": "Get general stats for the cache.", "id": "f10864:c0:m8"} {"signature": "@gcp_stats()@gcp_cache(future_expiration_minutes=)def get_client(service, service_type='', **conn_args):", "body": "client_details = choose_client(service)user_agent = get_user_agent(**conn_args)if client_details:if client_details[''] == '':client = get_gcp_client(mod_name=client_details[''],pkg_name=conn_args.get('', ''),key_file=conn_args.get('', None),project=conn_args[''], user_agent=user_agent)else:client = get_google_client(mod_name=client_details[''],key_file=conn_args.get('', None),user_agent=user_agent, api_version=conn_args.get('', ''))else:try:client = get_google_client(mod_name=service, key_file=conn_args.get('', None),user_agent=user_agent, api_version=conn_args.get('', ''))except Exception as e:raise ereturn client_details, client", "docstring": "User function to get the correct client.\n\nBased on the GOOGLE_CLIENT_MAP dictionary, the return will be a cloud or general\nclient that can interact with the desired service.\n\n:param service: GCP service to connect to. E.g. 'gce', 'iam'\n:type service: ``str``\n\n:param conn_args: Dictionary of connection arguments. 'project' is required.\n 'user_agent' can be specified and will be set in the client\n returned.\n:type conn_args: ``dict``\n\n:return: client_details, client\n:rtype: ``tuple`` of ``dict``, ``object``", "id": "f10865:m0"} {"signature": "def choose_client(service):", "body": "client_options = get_available_clients(service)if client_options:return client_options[]return None", "docstring": "Logic to choose the appropriate client.\n\n:param service: Google Cloud service name. Examples: 'iam', 'gce'.\n:type service: ``str``\n\n:return: specific dictionary recommended for a particular service.\n:rtype: ``dict``", "id": "f10865:m1"} {"signature": "def get_available_clients(service):", "body": "details = GOOGLE_CLIENT_MAP.get(service, None)if details:return [details]else:return None", "docstring": "Return clients available for this service.\n\n:param service: Google Cloud service name. Examples: 'iam', 'gce'.\n:type service: ``str``\n\n:return: list of dictionaries describing the clients available.\n:rtype: ``list``", "id": "f10865:m2"} {"signature": "def get_gcp_client(**kwargs):", "body": "return _gcp_client(project=kwargs[''], mod_name=kwargs[''],pkg_name=kwargs.get('', ''),key_file=kwargs.get('', None),http_auth=kwargs.get('', None),user_agent=kwargs.get('', None))", "docstring": "Public GCP client builder.", "id": "f10865:m3"} {"signature": "def _gcp_client(project, mod_name, pkg_name, key_file=None, http_auth=None,user_agent=None):", "body": "client = Noneif http_auth is None:http_auth = _googleauth(key_file=key_file, user_agent=user_agent)try:google_module = importlib.import_module('' + mod_name,package=pkg_name)client = google_module.Client(use_GAX=USE_GAX, project=project,http=http_auth)except ImportError as ie:import_err = '' % (pkg_name, mod_name)raise ImportError(import_err)except TypeError:client = google_module.Client(project=project, http=http_auth)if user_agent and hasattr(client, ''):client.user_agent = user_agentreturn client", "docstring": "Private GCP client builder.\n\n:param project: Google Cloud project string.\n:type project: ``str``\n\n:param mod_name: Module name to load. Should be found in sys.path.\n:type mod_name: ``str``\n\n:param pkg_name: package name that mod_name is part of. Default is 'google.cloud' .\n:type pkg_name: ``str``\n\n:param key_file: Default is None.\n:type key_file: ``str`` or None\n\n:param http_auth: httplib2 authorized client. Default is None.\n:type http_auth: :class: `HTTPLib2`\n\n:param user_agent: User Agent string to use in requests. Default is None.\n:type http_auth: ``str`` or None\n\n:return: GCP client\n:rtype: ``object``", "id": "f10865:m4"} {"signature": "def _googleauth(key_file=None, scopes=[], user_agent=None):", "body": "if key_file:if not scopes:scopes = DEFAULT_SCOPEScreds = ServiceAccountCredentials.from_json_keyfile_name(key_file,scopes=scopes)else:creds = GoogleCredentials.get_application_default()http = Http()if user_agent:http = set_user_agent(http, user_agent)http_auth = creds.authorize(http)return http_auth", "docstring": "Google http_auth helper.\n\nIf key_file is not specified, default credentials will be used.\n\nIf scopes is specified (and key_file), will be used instead of DEFAULT_SCOPES\n\n:param key_file: path to key file to use. Default is None\n:type key_file: ``str``\n\n:param scopes: scopes to set. Default is DEFAUL_SCOPES\n:type scopes: ``list``\n\n:param user_agent: User Agent string to use in requests. Default is None.\n:type http_auth: ``str`` or None\n\n:return: HTTPLib2 authorized client.\n:rtype: :class: `HTTPLib2`", "id": "f10865:m7"} {"signature": "def _build_google_client(service, api_version, http_auth):", "body": "client = build(service, api_version, http=http_auth)return client", "docstring": "Google build client helper.\n\n:param service: service to build client for\n:type service: ``str``\n\n:param api_version: API version to use.\n:type api_version: ``str``\n\n:param http_auth: Initialized HTTP client to use.\n:type http_auth: ``object``\n\n:return: google-python-api client initialized to use 'service'\n:rtype: ``object``", "id": "f10865:m8"} {"signature": "@gcp_conn('')def list_firewall_rules(client=None, **kwargs):", "body": "return gce_list(service=client.firewalls(),**kwargs)", "docstring": ":rtype: ``list``", "id": "f10866:m0"} {"signature": "@gcp_conn('')def list_networks(client=None, **kwargs):", "body": "return gce_list(service=client.networks(),**kwargs)", "docstring": ":rtype: ``list``", "id": "f10867:m0"} {"signature": "@gcp_conn('')def list_subnetworks(client=None, **kwargs):", "body": "return gce_list_aggregated(service=client.subnetworks(),key_name='', **kwargs)", "docstring": ":rtype: ``list``", "id": "f10867:m1"} {"signature": "@gcp_conn('')def list_buckets(client=None, **kwargs):", "body": "buckets = client.list_buckets(**kwargs)return [b.__dict__ for b in buckets]", "docstring": "List buckets for a project.\n\n:param client: client object to use.\n:type client: Google Cloud Storage client\n\n:returns: list of dictionary reprsentation of Bucket\n:rtype: ``list`` of ``dict``", "id": "f10868:m0"} {"signature": "@gcp_conn('')def get_bucket(client=None, **kwargs):", "body": "bucket = client.lookup_bucket(kwargs[''])return bucket", "docstring": "Get bucket object.\n\n:param client: client object to use.\n:type client: Google Cloud Storage client\n\n:returns: Bucket object\n:rtype: ``object``", "id": "f10868:m1"} {"signature": "def get_bucket_field(**kwargs):", "body": "bucket = get_bucket(**kwargs)if bucket:return getattr(bucket, kwargs[''], None)else:return None", "docstring": "Get value from member field of bucket object.\n\n:param Field: name of member of Bucket object to return.\n:type Field: ``str``\n\n:returns: value contained by the specified member field.\n:rtype: varies", "id": "f10868:m2"} {"signature": "def list_objects_in_bucket(**kwargs):", "body": "bucket = get_bucket(**kwargs)if bucket:return [o for o in bucket.list_blobs()]else:return None", "docstring": "List objects in bucket.\n\n:param Bucket: name of bucket\n:type Bucket: ``str``\n\n:returns list of objects in bucket\n:rtype: ``list``", "id": "f10868:m3"} {"signature": "def get_object_in_bucket(**kwargs):", "body": "bucket = get_bucket(**kwargs)if bucket:return bucket.get_blob(kwargs[''])else:return None", "docstring": "Retrieve object from Bucket.\n\n:param Bucket: name of bucket\n:type Bucket: ``str``\n\n:returns: object from bucket or None\n:rtype ``object`` or None", "id": "f10868:m4"} {"signature": "def get_creds_from_kwargs(kwargs):", "body": "creds = {'': kwargs.pop('', None),'': kwargs.pop('', None),'': kwargs.get('', None),'': kwargs.pop('', None),'': kwargs.pop('', '')}return (creds, kwargs)", "docstring": "Helper to get creds out of kwargs.", "id": "f10869:m1"} {"signature": "def rewrite_kwargs(conn_type, kwargs, module_name=None):", "body": "if conn_type != '' and module_name != '':if '' in kwargs:kwargs[''] = '' % kwargs.pop('')if conn_type == '' and module_name == '':if '' in kwargs:del kwargs['']return kwargs", "docstring": "Manipulate connection keywords.\n\nModifieds keywords based on connection type.\n\nThere is an assumption here that the client has\nalready been created and that these keywords are being\npassed into methods for interacting with various services.\n\nCurrent modifications:\n- if conn_type is not cloud and module is 'compute', \n then rewrite project as name.\n- if conn_type is cloud and module is 'storage',\n then remove 'project' from dict.\n\n:param conn_type: E.g. 'cloud' or 'general'\n:type conn_type: ``str``\n\n:param kwargs: Dictionary of keywords sent in by user.\n:type kwargs: ``dict``\n\n:param module_name: Name of specific module that will be loaded.\n Default is None.\n:type conn_type: ``str`` or None\n\n:returns kwargs with client and module specific changes\n:rtype: ``dict``", "id": "f10869:m2"} {"signature": "def gce_list_aggregated(service=None, key_name='', **kwargs):", "body": "resp_list = []req = service.aggregatedList(**kwargs)while req is not None:resp = req.execute()for location, item in resp[''].items():if key_name in item:resp_list.extend(item[key_name])req = service.aggregatedList_next(previous_request=req,previous_response=resp)return resp_list", "docstring": "General aggregated list function for the GCE service.", "id": "f10869:m3"} {"signature": "def gce_list(service=None, **kwargs):", "body": "resp_list = []req = service.list(**kwargs)while req is not None:resp = req.execute()for item in resp.get('', []):resp_list.append(item)req = service.list_next(previous_request=req, previous_response=resp)return resp_list", "docstring": "General list function for the GCE service.", "id": "f10869:m4"} {"signature": "def service_list(service=None, key_name=None, **kwargs):", "body": "resp_list = []req = service.list(**kwargs)while req is not None:resp = req.execute()if key_name and key_name in resp:resp_list.extend(resp[key_name])else:resp_list.append(resp)if hasattr(service, ''):req = service.list_next(previous_request=req,previous_response=resp)else:req = Nonereturn resp_list", "docstring": "General list function for Google APIs.", "id": "f10869:m5"} {"signature": "def get_cache_stats():", "body": "from cloudaux.gcp.decorators import _GCP_CACHEreturn _GCP_CACHE.get_stats()", "docstring": "Helper to retrieve stats cache.", "id": "f10869:m6"} {"signature": "def get_cache_access_details(key=None):", "body": "from cloudaux.gcp.decorators import _GCP_CACHEreturn _GCP_CACHE.get_access_details(key=key)", "docstring": "Retrieve detailed cache information.", "id": "f10869:m7"} {"signature": "def get_gcp_stats():", "body": "from cloudaux.gcp.decorators import _GCP_STATSreturn _GCP_STATS", "docstring": "Retrieve stats, such as function timings.", "id": "f10869:m8"} {"signature": "def get_user_agent_default(pkg_name=''):", "body": "version = ''try:import pkg_resourcesversion = pkg_resources.get_distribution(pkg_name).versionexcept pkg_resources.DistributionNotFound:passexcept ImportError:passreturn '' % (version)", "docstring": "Get default User Agent String.\n\nTry to import pkg_name to get an accurate version number.\n\nreturn: string", "id": "f10869:m9"} {"signature": "def get_user_agent(**kwargs):", "body": "user_agent = kwargs.get('', None)if not user_agent:return get_user_agent_default()return user_agent", "docstring": "If there is a useragent, find it.\n\nLook in the keywords for user_agent. If not found,\nreturn get_user_agent_default", "id": "f10869:m10"} {"signature": "def _build_key(func_name, args, kwargs):", "body": "return \"\" % (func_name, args, kwargs)", "docstring": "Builds key for cache and stats.", "id": "f10870:m0"} {"signature": "def gcp_conn(service, service_type='', future_expiration_minutes=):", "body": "def decorator(f):@wraps(f)def decorated_function(*args, **kwargs):from cloudaux.gcp.auth import get_client(conn_args, kwargs) = get_creds_from_kwargs(kwargs)client_details, client = get_client(service, service_type=service_type,future_expiration_minutes=, **conn_args)if client_details:kwargs = rewrite_kwargs(client_details[''], kwargs,client_details[''])kwargs[''] = clientreturn f(*args, **kwargs)return decorated_functionreturn decorator", "docstring": "service_type: not currently used.", "id": "f10870:m1"} {"signature": "def gcp_stats():", "body": "def decorator(f):@wraps(f)def decorated_function(*args, **kwargs):start_time = time.time()result = f(*args, **kwargs)end_time = time.time()strkey = _build_key(f.__name__, args, kwargs)_GCP_STATS.setdefault(strkey, []).append(end_time - start_time)return resultreturn decorated_functionreturn decorator", "docstring": "Collect stats\n\nSpecifically, time function calls\n:returns: function response\n:rtype: varies", "id": "f10870:m2"} {"signature": "def gcp_cache(future_expiration_minutes=):", "body": "def decorator(f):@wraps(f)def decorated_function(*args, **kwargs):strkey = _build_key(f.__name__, args, kwargs)cached_result = _GCP_CACHE.get(strkey)if cached_result:return cached_resultelse:result = f(*args, **kwargs)_GCP_CACHE.insert(strkey, result, future_expiration_minutes)return resultreturn decorated_functionreturn decorator", "docstring": "Cache function output\n:param future_expiration_minutes: Number of minutes in the future until item\n expires. Default is 15.\n:returns: function response, optionally from the cache\n:rtype: varies", "id": "f10870:m3"} {"signature": "def iter_project(projects, key_file=None):", "body": "def decorator(func):@wraps(func)def decorated_function(*args, **kwargs):item_list = []exception_map = {}for project in projects:if isinstance(project, string_types):kwargs[''] = projectif key_file:kwargs[''] = key_fileelif isinstance(project, dict):kwargs[''] = project['']kwargs[''] = project['']itm, exc = func(*args, **kwargs)item_list.extend(itm)exception_map.update(exc)return (item_list, exception_map)return decorated_functionreturn decorator", "docstring": "Call decorated function for each item in project list.\n\nNote: the function 'decorated' is expected to return a value plus a dictionary of exceptions.\n\nIf item in list is a dictionary, we look for a 'project' and 'key_file' entry, respectively.\nIf item in list is of type string_types, we assume it is the project string. Default credentials\nwill be used by the underlying client library.\n\n:param projects: list of project strings or list of dictionaries\n Example: {'project':..., 'keyfile':...}. Required.\n:type projects: ``list`` of ``str`` or ``list`` of ``dict``\n\n:param key_file: path on disk to keyfile, for use with all projects\n:type key_file: ``str``\n\n:returns: tuple containing a list of function output and an exceptions map\n:rtype: ``tuple of ``list``, ``dict``", "id": "f10870:m4"} {"signature": "@gcp_conn('')def get_serviceaccount(client=None, **kwargs):", "body": "service_account=kwargs.pop('')resp = client.projects().serviceAccounts().get(name=service_account).execute()return resp", "docstring": "service_account='string'", "id": "f10871:m1"} {"signature": "@gcp_conn('')def get_serviceaccount_keys(client=None, **kwargs):", "body": "service_account=kwargs.pop('')kwargs[''] = service_accountreturn service_list(client.projects().serviceAccounts().keys(),key_name='', **kwargs)", "docstring": "service_account='string'", "id": "f10871:m2"} {"signature": "@gcp_conn('')def get_iam_policy(client=None, **kwargs):", "body": "service_account=kwargs.pop('')resp = client.projects().serviceAccounts().getIamPolicy(resource=service_account).execute()if '' in resp:return resp['']else:return None", "docstring": "service_account='string'", "id": "f10871:m3"} {"signature": "def get_short_module_name(module_name, obj_name):", "body": "parts = module_name.split('')short_name = module_namefor i in range(len(parts) - , , -):short_name = ''.join(parts[:i])try:exec('' % (short_name, obj_name))except ImportError:short_name = ''.join(parts[:(i + )])breakreturn short_name", "docstring": "Get the shortest possible module name", "id": "f10905:m0"} {"signature": "def identify_names(code):", "body": "finder = NameFinder()finder.visit(ast.parse(code))example_code_obj = {}for name, full_name in finder.get_mapping():module, attribute = full_name.rsplit('', )module_short = get_short_module_name(module, attribute)cobj = {'': attribute, '': module,'': module_short}example_code_obj[name] = cobjreturn example_code_obj", "docstring": "Builds a codeobj summary by identifying and resolving used names\n\n >>> code = '''\n ... from a.b import c\n ... import d as e\n ... print(c)\n ... e.HelloWorld().f.g\n ... '''\n >>> for name, o in sorted(identify_names(code).items()):\n ... print(name, o['name'], o['module'], o['module_short'])\n c c a.b a.b\n e.HelloWorld HelloWorld d d", "id": "f10905:m1"} {"signature": "def scan_used_functions(example_file, gallery_conf):", "body": "example_code_obj = identify_names(open(example_file).read())if example_code_obj:codeobj_fname = example_file[:-] + ''with open(codeobj_fname, '') as fid:pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)backrefs = set(''.format(**entry)for entry in example_code_obj.values()if entry[''].startswith(gallery_conf['']))return backrefs", "docstring": "save variables so we can later add links to the documentation", "id": "f10905:m2"} {"signature": "def _thumbnail_div(full_dir, fname, snippet, is_backref=False):", "body": "thumb = os.path.join(full_dir, '', '','' % fname[:-])ref_name = os.path.join(full_dir, fname).replace(os.path.sep, '')template = BACKREF_THUMBNAIL_TEMPLATE if is_backref else THUMBNAIL_TEMPLATEreturn template.format(snippet=snippet, thumbnail=thumb, ref_name=ref_name)", "docstring": "Generates RST to place a thumbnail in a gallery", "id": "f10905:m3"} {"signature": "def write_backreferences(seen_backrefs, gallery_conf,target_dir, fname, snippet):", "body": "example_file = os.path.join(target_dir, fname)backrefs = scan_used_functions(example_file, gallery_conf)for backref in backrefs:include_path = os.path.join(gallery_conf[''],'' % backref)seen = backref in seen_backrefswith open(include_path, '' if seen else '') as ex_file:if not seen:heading = '' % backrefex_file.write(heading + '')ex_file.write('' * len(heading) + '')ex_file.write(_thumbnail_div(target_dir, fname, snippet,is_backref=True))seen_backrefs.add(backref)", "docstring": "Writes down back reference files, which include a thumbnail list\n of examples using a certain module", "id": "f10905:m4"} {"signature": "def get_docstring_and_rest(filename):", "body": "with open(filename) as f:content = f.read()node = ast.parse(content)if not isinstance(node, ast.Module):raise TypeError(\"\"\"\".format(node.__class__.__name__))if node.body and isinstance(node.body[], ast.Expr) andisinstance(node.body[].value, ast.Str):docstring_node = node.body[]docstring = docstring_node.value.srest = content.split('', docstring_node.lineno)[-]return docstring, restelse:raise ValueError(('''').format(filename))", "docstring": "Separate `filename` content between docstring and the rest\n\n Strongly inspired from ast.get_docstring.\n\n Returns\n -------\n docstring: str\n docstring of `filename`\n rest: str\n `filename` content without the docstring", "id": "f10906:m0"} {"signature": "def split_code_and_text_blocks(source_file):", "body": "docstring, rest_of_content = get_docstring_and_rest(source_file)blocks = [('', docstring)]pattern = re.compile(r'',flags=re.M)pos_so_far = for match in re.finditer(pattern, rest_of_content):match_start_pos, match_end_pos = match.span()code_block_content = rest_of_content[pos_so_far:match_start_pos]text_content = match.group('')sub_pat = re.compile('', flags=re.M)text_block_content = dedent(re.sub(sub_pat, '', text_content))if code_block_content.strip():blocks.append(('', code_block_content))if text_block_content.strip():blocks.append(('', text_block_content))pos_so_far = match_end_posremaining_content = rest_of_content[pos_so_far:]if remaining_content.strip():blocks.append(('', remaining_content))return blocks", "docstring": "Return list with source file separated into code and text blocks.\n\n Returns\n -------\n blocks : list of (label, content)\n List where each element is a tuple with the label ('text' or 'code'),\n and content string of block.", "id": "f10906:m1"} {"signature": "def codestr2rst(codestr, lang=''):", "body": "code_directive = \"\".format(lang)indented_block = indent(codestr, '' * )return code_directive + indented_block", "docstring": "Return reStructuredText code block from code string", "id": "f10906:m2"} {"signature": "def text2string(content):", "body": "try:return ast.literal_eval(content) + ''except Exception:return content", "docstring": "Returns a string without the extra triple quotes", "id": "f10906:m3"} {"signature": "def extract_intro(filename):", "body": "docstring, _ = get_docstring_and_rest(filename)paragraphs = docstring.lstrip().split('')if len(paragraphs) > :first_paragraph = re.sub('', '', paragraphs[])first_paragraph = (first_paragraph[:] + ''if len(first_paragraph) > else first_paragraph)else:raise ValueError(\"\"\"\"\"\".format(filename))return first_paragraph", "docstring": "Extract the first paragraph of module-level docstring. max:95 char", "id": "f10906:m4"} {"signature": "def get_md5sum(src_file):", "body": "with open(src_file, '') as src_data:src_content = src_data.read()if sys.version_info[] == :src_content = src_content.encode('')src_md5 = hashlib.md5(src_content).hexdigest()return src_md5", "docstring": "Returns md5sum of file", "id": "f10906:m5"} {"signature": "def check_md5sum_change(src_file):", "body": "src_md5 = get_md5sum(src_file)src_md5_file = src_file + ''src_file_changed = Trueif os.path.exists(src_md5_file):with open(src_md5_file, '') as file_checksum:ref_md5 = file_checksum.read()if src_md5 == ref_md5:src_file_changed = Falseif src_file_changed:with open(src_md5_file, '') as file_checksum:file_checksum.write(src_md5)return src_file_changed", "docstring": "Returns True if src_file has a different md5sum", "id": "f10906:m6"} {"signature": "def _plots_are_current(src_file, image_file):", "body": "first_image_file = image_file.format()has_image = os.path.exists(first_image_file)src_file_changed = check_md5sum_change(src_file)return has_image and not src_file_changed", "docstring": "Test existence of image file and no change in md5sum of\n example", "id": "f10906:m7"} {"signature": "def save_figures(image_path, fig_count, gallery_conf):", "body": "figure_list = []fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()for fig_mngr in fig_managers:fig = plt.figure(fig_mngr.num)kwargs = {}to_rgba = matplotlib.colors.colorConverter.to_rgbafor attr in ['', '']:fig_attr = getattr(fig, '' + attr)()default_attr = matplotlib.rcParams['' + attr]if to_rgba(fig_attr) != to_rgba(default_attr):kwargs[attr] = fig_attrcurrent_fig = image_path.format(fig_count + fig_mngr.num)fig.savefig(current_fig, **kwargs)figure_list.append(current_fig)if gallery_conf.get('', False):from mayavi import mlabe = mlab.get_engine()last_matplotlib_fig_num = len(figure_list)total_fig_num = last_matplotlib_fig_num + len(e.scenes)mayavi_fig_nums = range(last_matplotlib_fig_num, total_fig_num)for scene, mayavi_fig_num in zip(e.scenes, mayavi_fig_nums):current_fig = image_path.format(mayavi_fig_num)mlab.savefig(current_fig, figure=scene)scale_image(current_fig, current_fig, , )figure_list.append(current_fig)mlab.close(all=True)return figure_list", "docstring": "Save all open matplotlib figures of the example code-block\n\n Parameters\n ----------\n image_path : str\n Path where plots are saved (format string which accepts figure number)\n fig_count : int\n Previous figure number count. Figure number add from this number\n\n Returns\n -------\n list of strings containing the full path to each figure", "id": "f10906:m8"} {"signature": "def scale_image(in_fname, out_fname, max_width, max_height):", "body": "try:from PIL import Imageexcept ImportError:import Imageimg = Image.open(in_fname)width_in, height_in = img.sizescale_w = max_width / float(width_in)scale_h = max_height / float(height_in)if height_in * scale_w <= max_height:scale = scale_welse:scale = scale_hif scale >= and in_fname == out_fname:returnwidth_sc = int(round(scale * width_in))height_sc = int(round(scale * height_in))img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)thumb = Image.new('', (max_width, max_height), (, , ))pos_insert = ((max_width - width_sc) // , (max_height - height_sc) // )thumb.paste(img, pos_insert)thumb.save(out_fname)if os.environ.get('', False):try:subprocess.call([\"\", \"\", \"\", \"\", out_fname])except Exception:warnings.warn('')", "docstring": "Scales an image with the same aspect ratio centered in an\n image with a given max_width and max_height\n if in_fname == out_fname the image can only be scaled down", "id": "f10906:m9"} {"signature": "def save_thumbnail(image_path, base_image_name, gallery_conf):", "body": "first_image_file = image_path.format()thumb_dir = os.path.join(os.path.dirname(first_image_file), '')if not os.path.exists(thumb_dir):os.makedirs(thumb_dir)thumb_file = os.path.join(thumb_dir,'' % base_image_name)if os.path.exists(first_image_file):scale_image(first_image_file, thumb_file, , )elif not os.path.exists(thumb_file):default_thumb_file = os.path.join(glr_path_static(), '')default_thumb_file = gallery_conf.get(\"\",default_thumb_file)scale_image(default_thumb_file, thumb_file, , )", "docstring": "Save the thumbnail image", "id": "f10906:m10"} {"signature": "def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs):", "body": "if not os.path.exists(os.path.join(src_dir, '')):print( * '')print('' %src_dir)print('')print( * '')return \"\" fhindex = open(os.path.join(src_dir, '')).read()if not os.path.exists(target_dir):os.makedirs(target_dir)sorted_listdir = [fname for fname in sorted(os.listdir(src_dir))if fname.endswith('')]entries_text = []for fname in sorted_listdir:amount_of_code = generate_file_rst(fname, target_dir, src_dir,gallery_conf)new_fname = os.path.join(src_dir, fname)intro = extract_intro(new_fname)write_backreferences(seen_backrefs, gallery_conf,target_dir, fname, intro)this_entry = _thumbnail_div(target_dir, fname, intro) + \"\"\"\"\"\" % (target_dir, fname[:-])entries_text.append((amount_of_code, this_entry))entries_text.sort()for _, entry_text in entries_text:fhindex += entry_textfhindex += \"\"\"\"\"\"return fhindex", "docstring": "Generate the gallery reStructuredText for an example directory", "id": "f10906:m11"} {"signature": "def execute_script(code_block, example_globals, image_path, fig_count,src_file, gallery_conf):", "body": "time_elapsed = stdout = ''print('' % src_file)plt.close('')cwd = os.getcwd()orig_stdout = sys.stdouttry:os.chdir(os.path.dirname(src_file))my_buffer = StringIO()my_stdout = Tee(sys.stdout, my_buffer)sys.stdout = my_stdoutt_start = time()exec(code_block, example_globals)time_elapsed = time() - t_startsys.stdout = orig_stdoutmy_stdout = my_buffer.getvalue().strip().expandtabs()if my_stdout:stdout = CODE_OUTPUT.format(indent(my_stdout, '' * ))os.chdir(cwd)figure_list = save_figures(image_path, fig_count, gallery_conf)image_list = \"\"if len(figure_list) == :figure_name = figure_list[]image_list = SINGLE_IMAGE % figure_name.lstrip('')elif len(figure_list) > :image_list = HLIST_HEADERfor figure_name in figure_list:image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('')except Exception:formatted_exception = traceback.format_exc()print( * '')print('' % src_file)print(formatted_exception)print( * '')figure_list = []image_list = codestr2rst(formatted_exception, lang='')broken_img = os.path.join(glr_path_static(), '')shutil.copyfile(broken_img, os.path.join(cwd, image_path.format()))fig_count += if gallery_conf['']:raisefinally:os.chdir(cwd)sys.stdout = orig_stdoutprint(\"\" % time_elapsed)code_output = \"\".format(image_list, stdout)return code_output, time_elapsed, fig_count + len(figure_list)", "docstring": "Executes the code block of the example file", "id": "f10906:m12"} {"signature": "def generate_file_rst(fname, target_dir, src_dir, gallery_conf):", "body": "src_file = os.path.join(src_dir, fname)example_file = os.path.join(target_dir, fname)shutil.copyfile(src_file, example_file)image_dir = os.path.join(target_dir, '')if not os.path.exists(image_dir):os.makedirs(image_dir)base_image_name = os.path.splitext(fname)[]image_fname = '' + base_image_name + ''image_path = os.path.join(image_dir, image_fname)script_blocks = split_code_and_text_blocks(example_file)amount_of_code = sum([len(bcontent)for blabel, bcontent in script_blocksif blabel == ''])if _plots_are_current(example_file, image_path):return amount_of_codetime_elapsed = ref_fname = example_file.replace(os.path.sep, '')example_rst = \"\"\"\"\"\".format(ref_fname)example_nb = Notebook(fname, target_dir)filename_pattern = gallery_conf.get('')if re.search(filename_pattern, src_file) and gallery_conf['']:example_globals = {'': ''}fig_count = is_example_notebook_like = len(script_blocks) > for blabel, bcontent in script_blocks:if blabel == '':code_output, rtime, fig_count = execute_script(bcontent,example_globals,image_path,fig_count,src_file,gallery_conf)time_elapsed += rtimeexample_nb.add_code_cell(bcontent)if is_example_notebook_like:example_rst += codestr2rst(bcontent) + ''example_rst += code_outputelse:example_rst += code_outputexample_rst += codestr2rst(bcontent) + ''else:example_rst += text2string(bcontent) + ''example_nb.add_markdown_cell(text2string(bcontent))else:for blabel, bcontent in script_blocks:if blabel == '':example_rst += codestr2rst(bcontent) + ''example_nb.add_code_cell(bcontent)else:example_rst += bcontent + ''example_nb.add_markdown_cell(text2string(bcontent))save_thumbnail(image_path, base_image_name, gallery_conf)time_m, time_s = divmod(time_elapsed, )example_nb.save_file()with open(os.path.join(target_dir, base_image_name + ''), '') as f:example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname,example_nb.file_name)f.write(example_rst)return amount_of_code", "docstring": "Generate the rst file for a given example.\n\n Returns the amout of code (in characters) of the corresponding\n files.", "id": "f10906:m13"} {"signature": "def glr_path_static():", "body": "return os.path.abspath(os.path.join(os.path.dirname(__file__), ''))", "docstring": "Returns path to packaged static files", "id": "f10907:m0"} {"signature": "def generate_gallery_rst(app):", "body": "try:plot_gallery = eval(app.builder.config.plot_gallery)except TypeError:plot_gallery = bool(app.builder.config.plot_gallery)gallery_conf.update(app.config.sphinx_gallery_conf)gallery_conf.update(plot_gallery=plot_gallery)gallery_conf.update(abort_on_example_error=app.builder.config.abort_on_example_error)app.config.sphinx_gallery_conf = gallery_confapp.config.html_static_path.append(glr_path_static())clean_gallery_out(app.builder.outdir)examples_dirs = gallery_conf['']gallery_dirs = gallery_conf['']if not isinstance(examples_dirs, list):examples_dirs = [examples_dirs]if not isinstance(gallery_dirs, list):gallery_dirs = [gallery_dirs]mod_examples_dir = os.path.relpath(gallery_conf[''],app.builder.srcdir)seen_backrefs = set()for examples_dir, gallery_dir in zip(examples_dirs, gallery_dirs):examples_dir = os.path.relpath(examples_dir,app.builder.srcdir)gallery_dir = os.path.relpath(gallery_dir,app.builder.srcdir)for workdir in [examples_dir, gallery_dir, mod_examples_dir]:if not os.path.exists(workdir):os.makedirs(workdir)fhindex = open(os.path.join(gallery_dir, ''), '')fhindex.write(generate_dir_rst(examples_dir, gallery_dir, gallery_conf,seen_backrefs))for directory in sorted(os.listdir(examples_dir)):if os.path.isdir(os.path.join(examples_dir, directory)):src_dir = os.path.join(examples_dir, directory)target_dir = os.path.join(gallery_dir, directory)fhindex.write(generate_dir_rst(src_dir, target_dir,gallery_conf,seen_backrefs))fhindex.flush()", "docstring": "Generate the Main examples gallery reStructuredText\n\n Start the sphinx-gallery configuration and recursively scan the examples\n directories in order to populate the examples gallery", "id": "f10908:m1"} {"signature": "def setup(app):", "body": "app.add_config_value('', True, '')app.add_config_value('', False, '')app.add_config_value('', gallery_conf, '')app.add_stylesheet('')app.connect('', generate_gallery_rst)app.connect('', embed_code_links)", "docstring": "Setup sphinx-gallery sphinx extension", "id": "f10908:m2"} {"signature": "def ipy_notebook_skeleton():", "body": "py_version = sys.version_infonotebook_skeleton = {\"\": [],\"\": {\"\": {\"\": \"\" + str(py_version[]),\"\": \"\",\"\": \"\" + str(py_version[])},\"\": {\"\": {\"\": \"\",\"\": py_version[]},\"\": \"\",\"\": \"\",\"\": \"\",\"\": \"\",\"\": \"\" + str(py_version[]),\"\": ''.format(*sys.version_info[:])}},\"\": ,\"\": }return notebook_skeleton", "docstring": "Returns a dictionary with the elements of a Jupyter notebook", "id": "f10909:m0"} {"signature": "def rst2md(text):", "body": "top_heading = re.compile(r'', flags=re.M)text = re.sub(top_heading, r'', text)math_eq = re.compile(r'', flags=re.M)text = re.sub(math_eq,lambda match: r''.format(match.group().strip()),text)inline_math = re.compile(r'')text = re.sub(inline_math, r'', text)return text", "docstring": "Converts the RST text from the examples docstrigs and comments\n into markdown text for the IPython notebooks", "id": "f10909:m1"} {"signature": "def __init__(self, file_name, target_dir):", "body": "self.file_name = file_name.replace('', '')self.write_file = os.path.join(target_dir, self.file_name)self.work_notebook = ipy_notebook_skeleton()self.add_code_cell(\"\")", "docstring": "Declare the skeleton of the notebook\n\n Parameters\n ----------\n file_name : str\n original script file name, .py extension will be renamed\n target_dir: str\n directory where notebook file is to be saved", "id": "f10909:c0:m0"} {"signature": "def add_code_cell(self, code):", "body": "code_cell = {\"\": \"\",\"\": None,\"\": {\"\": False},\"\": [],\"\": [code.strip()]}self.work_notebook[\"\"].append(code_cell)", "docstring": "Add a code cell to the notebook\n\n Parameters\n ----------\n code : str\n Cell content", "id": "f10909:c0:m1"} {"signature": "def add_markdown_cell(self, text):", "body": "markdown_cell = {\"\": \"\",\"\": {},\"\": [rst2md(text)]}self.work_notebook[\"\"].append(markdown_cell)", "docstring": "Add a markdown cell to the notebook\n\n Parameters\n ----------\n code : str\n Cell content", "id": "f10909:c0:m2"} {"signature": "def save_file(self):", "body": "with open(self.write_file, '') as out_nb:json.dump(self.work_notebook, out_nb, indent=)", "docstring": "Saves the notebook to a file", "id": "f10909:c0:m3"} {"signature": "def _get_data(url):", "body": "if url.startswith(''):try:resp = urllib.urlopen(url)encoding = resp.headers.dict.get('', '')except AttributeError:resp = urllib.request.urlopen(url)encoding = resp.headers.get('', '')data = resp.read()if encoding == '':passelif encoding == '':data = StringIO(data)data = gzip.GzipFile(fileobj=data).read()else:raise RuntimeError('')else:with open(url, '') as fid:data = fid.read()return data", "docstring": "Helper function to get data over http or from a local file", "id": "f10910:m0"} {"signature": "def get_data(url, gallery_dir):", "body": "if sys.version_info[] == and isinstance(url, unicode):url = url.encode('')cached_file = os.path.join(gallery_dir, '')search_index = shelve.open(cached_file)if url in search_index:data = search_index[url]else:data = _get_data(url)search_index[url] = datasearch_index.close()return data", "docstring": "Persistent dictionary usage to retrieve the search indexes", "id": "f10910:m1"} {"signature": "def _select_block(str_in, start_tag, end_tag):", "body": "start_pos = str_in.find(start_tag)if start_pos < :raise ValueError('')depth = for pos in range(start_pos, len(str_in)):if str_in[pos] == start_tag:depth += elif str_in[pos] == end_tag:depth -= if depth == :breaksel = str_in[start_pos + :pos]return sel", "docstring": "Select first block delimited by start_tag and end_tag", "id": "f10910:m2"} {"signature": "def _parse_dict_recursive(dict_str):", "body": "dict_out = dict()pos_last = pos = dict_str.find('')while pos >= :key = dict_str[pos_last:pos]if dict_str[pos + ] == '':pos_tmp = dict_str.find('', pos + )if pos_tmp < :raise RuntimeError('')value = dict_str[pos + : pos_tmp].split('')for i in range(len(value)):try:value[i] = int(value[i])except ValueError:passelif dict_str[pos + ] == '':subdict_str = _select_block(dict_str[pos:], '', '')value = _parse_dict_recursive(subdict_str)pos_tmp = pos + len(subdict_str)else:raise ValueError('')key = key.strip('')if len(key) > :dict_out[key] = valuepos_last = dict_str.find('', pos_tmp)if pos_last < :breakpos_last += pos = dict_str.find('', pos_last)return dict_out", "docstring": "Parse a dictionary from the search index", "id": "f10910:m3"} {"signature": "def parse_sphinx_searchindex(searchindex):", "body": "if hasattr(searchindex, ''):searchindex = searchindex.decode('')query = ''pos = searchindex.find(query)if pos < :raise ValueError('')sel = _select_block(searchindex[pos:], '', '')objects = _parse_dict_recursive(sel)query = ''pos = searchindex.find(query)if pos < :raise ValueError('')filenames = searchindex[pos + len(query) + :]filenames = filenames[:filenames.find('')]filenames = [f.strip('') for f in filenames.split('')]return filenames, objects", "docstring": "Parse a Sphinx search index\n\n Parameters\n ----------\n searchindex : str\n The Sphinx search index (contents of searchindex.js)\n\n Returns\n -------\n filenames : list of str\n The file names parsed from the search index.\n objects : dict\n The objects parsed from the search index.", "id": "f10910:m4"} {"signature": "def embed_code_links(app, exception):", "body": "if exception is not None:returnif not app.builder.config.plot_gallery:returnif app.builder.name not in ['', '']:returnprint('')gallery_conf = app.config.sphinx_gallery_confgallery_dirs = gallery_conf['']if not isinstance(gallery_dirs, list):gallery_dirs = [gallery_dirs]for gallery_dir in gallery_dirs:_embed_code_links(app, gallery_conf, gallery_dir)", "docstring": "Embed hyperlinks to documentation into example code", "id": "f10910:m6"} {"signature": "def _get_link(self, cobj):", "body": "fname_idx = Nonefull_name = cobj[''] + '' + cobj['']if full_name in self._searchindex['']:value = self._searchindex[''][full_name]if isinstance(value, dict):value = value[next(iter(value.keys()))]fname_idx = value[]elif cobj[''] in self._searchindex['']:value = self._searchindex[''][cobj['']]if cobj[''] in value.keys():fname_idx = value[cobj['']][]if fname_idx is not None:fname = self._searchindex[''][fname_idx] + ''if self._is_windows:fname = fname.replace('', '')link = os.path.join(self.doc_url, fname)else:link = posixpath.join(self.doc_url, fname)if hasattr(link, ''):link = link.decode('', '')if link in self._page_cache:html = self._page_cache[link]else:html = get_data(link, self.gallery_dir)self._page_cache[link] = htmlcomb_names = [cobj[''] + '' + cobj['']]if self.extra_modules_test is not None:for mod in self.extra_modules_test:comb_names.append(mod + '' + cobj[''])url = Falseif hasattr(html, ''):html = html.decode('', '')for comb_name in comb_names:if hasattr(comb_name, ''):comb_name = comb_name.decode('', '')if comb_name in html:url = link + u'' + comb_namelink = urlelse:link = Falsereturn link", "docstring": "Get a valid link, False if not found", "id": "f10910:c0:m1"} {"signature": "def resolve(self, cobj, this_url):", "body": "full_name = cobj[''] + '' + cobj['']link = self._link_cache.get(full_name, None)if link is None:link = self._get_link(cobj)self._link_cache[full_name] = linkif link is False or link is None:return Noneif self.relative:link = os.path.relpath(link, start=this_url)if self._is_windows:link = link.replace('', '')link = link[:]return link", "docstring": "Resolve the link to the documentation, returns None if not found\n\n Parameters\n ----------\n cobj : dict\n Dict with information about the \"code object\" for which we are\n resolving a link.\n cobi['name'] : function or class name (str)\n cobj['module_short'] : shortened module name (str)\n cobj['module'] : module name (str)\n this_url: str\n URL of the current page. Needed to construct relative URLs\n (only used if relative=True in constructor).\n\n Returns\n -------\n link : str | None\n The link (URL) to the documentation.", "id": "f10910:c0:m2"} {"signature": "def pascal(n):", "body": "errors.is_positive_integer(n)result = numpy.zeros((n, n))for i in range(, n):result[i, ] = result[, i] = if n > :for i in range(, n):for j in range(, n):result[i, j] = result[i-, j] + result[i, j-]return result", "docstring": "Return Pascal matrix\n\n :param int n: size of the matrix\n\n .. doctest::\n\n >>> from spectrum import pascal\n >>> pascal(6)\n array([[ 1., 1., 1., 1., 1., 1.],\n [ 1., 2., 3., 4., 5., 6.],\n [ 1., 3., 6., 10., 15., 21.],\n [ 1., 4., 10., 20., 35., 56.],\n [ 1., 5., 15., 35., 70., 126.],\n [ 1., 6., 21., 56., 126., 252.]])\n\n .. todo:: use the symmetric property to improve computational time if needed", "id": "f10916:m0"} {"signature": "def corrmtx(x_input, m, method=''):", "body": "valid_methods = ['', '', '','', '']if method not in valid_methods:raise ValueError(\"\" % valid_methods)from scipy.linalg import toeplitzN = len(x_input)if isinstance(x_input, list):x = numpy.array(x_input)else:x = x_input.copy()if x.dtype == complex:complex_type = Trueelse:complex_type = Falseif method in ['', '']:Lp = toeplitz(x[:m], []*(m+))Tp = toeplitz(x[m:N], x[m::-])if method in ['', '']:Up = toeplitz([]*(m+), numpy.insert(x[N:N-m-:-],,))if method == '':if complex_type == True:C = numpy.zeros((N+m, m+), dtype=complex)else:C = numpy.zeros((N+m, m+))for i in range(, m):C[i] = Lp[i]for i in range(m, N):C[i] = Tp[i-m]for i in range(N, N+m):C[i] = Up[i-N]elif method == '':if complex_type == True:C = numpy.zeros((N, m+), dtype=complex)else:C = numpy.zeros((N, m+))for i in range(, m):C[i] = Lp[i]for i in range(m, N):C[i] = Tp[i-m]elif method == '':if complex_type == True:C = numpy.zeros((N, m+), dtype=complex)else:C = numpy.zeros((N, m+))for i in range(, N-m):C[i] = Tp[i]for i in range(N-m, N):C[i] = Up[i-N+m]elif method == '':return Tpelif method == '':if complex_type == True:C = numpy.zeros((*(N-m), m+), dtype=complex)else:C = numpy.zeros((*(N-m), m+))for i in range(, N-m):C[i] = Tp[i]Tp = numpy.fliplr(Tp.conj())for i in range(N-m, *(N-m)):C[i] = Tp[i-N+m]return C", "docstring": "r\"\"\"Correlation matrix\n\n This function is used by PSD estimator functions. It generates\n the correlation matrix from a correlation data set and a maximum lag.\n\n :param array x: autocorrelation samples (1D)\n :param int m: the maximum lag\n\n Depending on the choice of the method, the correlation matrix has different\n sizes, but the number of rows is always m+1.\n\n Method can be :\n\n * 'autocorrelation': (default) X is the (n+m)-by-(m+1) rectangular Toeplitz\n matrix derived using prewindowed and postwindowed data.\n * 'prewindowed': X is the n-by-(m+1) rectangular Toeplitz matrix derived\n using prewindowed data only.\n * 'postwindowed': X is the n-by-(m+1) rectangular Toeplitz matrix that\n derived using postwindowed data only.\n * 'covariance': X is the (n-m)-by-(m+1) rectangular Toeplitz matrix\n derived using nonwindowed data.\n * 'modified': X is the 2(n-m)-by-(m+1) modified rectangular Toeplitz\n matrix that generates an autocorrelation estimate for the length n data\n vector x, derived using forward and backward prediction error estimates.\n\n\n :return:\n * the autocorrelation matrix\n * R, the (m+1)-by-(m+1) autocorrelation matrix estimate ``R= X'*X``.\n\n .. rubric:: Algorithm details:\n\n The **autocorrelation** matrix is a :math:`(N+p) \\times (p+1)` rectangular Toeplilz\n data matrix:\n\n .. math:: X_p = \\begin{pmatrix}L_p\\\\T_p\\\\Up\\end{pmatrix}\n\n where the lower triangular :math:`p \\times (p+1)` matrix :math:`L_p` is\n\n .. math:: L_p =\n \\begin{pmatrix}\n x[1] & \\cdots & 0 & 0 \\\\\n \\vdots & \\ddots & \\vdots & \\vdots \\\\\n x[p] & \\cdots & x[1] & 0\n \\end{pmatrix}\n\n where the rectangular :math:`(N-p) \\times (p+1)` matrix :math:`T_p` is\n\n .. math:: T_p =\n \\begin{pmatrix}\n x[p+1] & \\cdots & x[1] \\\\\n \\vdots & \\ddots & \\vdots \\\\\n x[N-p] & \\cdots & x[p+1] \\\\\n \\vdots & \\ddots & \\vdots \\\\\n x[N] & \\cdots & x[N-p]\n \\end{pmatrix}\n\n and where the upper triangular :math:`p \\times (p+1)` matrix :math:`U_p` is\n\n .. math:: U_p =\n \\begin{pmatrix}\n 0 & x[N] & \\cdots & x[N-p+1] \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n 0 & 0 & \\cdots & x[N]\n \\end{pmatrix}\n\n From this definition, the prewindowed matrix is\n\n .. math:: X_p = \\begin{pmatrix}L_p\\\\T_p\\end{pmatrix}\n\n the postwindowed matrix is\n\n .. math:: X_p = \\begin{pmatrix}T_p\\\\U_p\\end{pmatrix}\n\n the covariance matrix is:\n\n .. math:: X_p = \\begin{pmatrix}T_p\\end{pmatrix}\n\n and the modified covariance matrix is:\n\n .. math:: X_p = \\begin{pmatrix}T_p\\\\T_p^*\\end{pmatrix}", "id": "f10916:m1"} {"signature": "def csvd(A):", "body": "U, S, V = numpy.linalg.svd(A)return U, S, V", "docstring": "SVD decomposition using numpy.linalg.svd\n\n :param A: a M by N matrix\n\n :return:\n * U, a M by M matrix\n * S the N eigen values\n * V a N by N matrix\n\n See :func:`numpy.linalg.svd` for a detailed documentation.\n\n Should return the same as in [Marple]_ , CSVD routine.\n\n ::\n\n U, S, V = numpy.linalg.svd(A)\n U, S, V = cvsd(A)", "id": "f10916:m2"} {"signature": "def pylab_rms_flat(a):", "body": "return np.sqrt(np.mean(np.absolute(a) ** ))", "docstring": "Return the root mean square of all the elements of *a*, flattened out.\n(Copied 1:1 from matplotlib.mlab.)", "id": "f10917:m0"} {"signature": "def CORRELATION(x, y=None, maxlags=None, norm=''):", "body": "assert norm in ['','', '', None]x = np.array(x)if y is None:y = xelse:y = np.array(y)N = max(len(x), len(y))if len(x) < N:x = y.copy()x.resize(N)if len(y) < N:y = y.copy()y.resize(N)if maxlags is None:maxlags = N - assert maxlags < N, ''realdata = np.isrealobj(x) and np.isrealobj(y)if realdata == True:r = np.zeros(maxlags, dtype=float)else:r = np.zeros(maxlags, dtype=complex)if norm == '':rmsx = pylab_rms_flat(x)rmsy = pylab_rms_flat(y)for k in range(, maxlags+):nk = N - k - if realdata == True:sum = for j in range(, nk+):sum = sum + x[j+k] * y[j]else:sum = + for j in range(, nk+):sum = sum + x[j+k] * y[j].conjugate()if k == :if norm in ['', '']:r0 = sum/float(N)elif norm is None:r0 = sumelse:r0 = else:if norm == '':r[k-] = sum / float(N-k)elif norm == '':r[k-] = sum / float(N)elif norm is None:r[k-] = sumelif norm == '':r[k-] = sum/(rmsx*rmsy)/float(N)r = np.insert(r, , r0)return r", "docstring": "r\"\"\"Correlation function\n\n This function should give the same results as :func:`xcorr` but it\n returns the positive lags only. Moreover the algorithm does not use\n FFT as compared to other algorithms.\n\n :param array x: first data array of length N\n :param array y: second data array of length N. If not specified, computes the\n autocorrelation.\n :param int maxlags: compute cross correlation between [0:maxlags]\n when maxlags is not specified, the range of lags is [0:maxlags].\n :param str norm: normalisation in ['biased', 'unbiased', None, 'coeff']\n\n * *biased* correlation=raw/N,\n * *unbiased* correlation=raw/(N-`|lag|`)\n * *coeff* correlation=raw/(rms(x).rms(y))/N\n * None correlation=raw\n\n :return:\n * a numpy.array correlation sequence, r[1,N]\n * a float for the zero-lag correlation, r[0]\n\n The *unbiased* correlation has the form:\n\n .. math::\n\n \\hat{r}_{xx} = \\frac{1}{N-m}T \\sum_{n=0}^{N-m-1} x[n+m]x^*[n] T\n\n The *biased* correlation differs by the front factor only:\n\n .. math::\n\n \\check{r}_{xx} = \\frac{1}{N}T \\sum_{n=0}^{N-m-1} x[n+m]x^*[n] T\n\n with :math:`0\\leq m\\leq N-1`.\n\n .. doctest::\n\n >>> from spectrum import CORRELATION\n >>> x = [1,2,3,4,5]\n >>> res = CORRELATION(x,x, maxlags=0, norm='biased')\n >>> res[0]\n 11.0\n\n .. note:: this function should be replaced by :func:`xcorr`.\n\n .. seealso:: :func:`xcorr`", "id": "f10917:m1"} {"signature": "def xcorr(x, y=None, maxlags=None, norm=''):", "body": "N = len(x)if y is None:y = xassert len(x) == len(y), ''if maxlags is None:maxlags = N-lags = np.arange(, *N-)else:assert maxlags <= N, ''lags = np.arange(N-maxlags-, N+maxlags)res = np.correlate(x, y, mode='')if norm == '':Nf = float(N)res = res[lags] / float(N) elif norm == '':res = res[lags] / (float(N)-abs(np.arange(-N+, N)))[lags]elif norm == '':Nf = float(N)rms = pylab_rms_flat(x) * pylab_rms_flat(y)res = res[lags] / rms / Nfelse:res = res[lags]lags = np.arange(-maxlags, maxlags+)return res, lags", "docstring": "Cross-correlation using numpy.correlate\n\n Estimates the cross-correlation (and autocorrelation) sequence of a random\n process of length N. By default, there is no normalisation and the output\n sequence of the cross-correlation has a length 2*N+1.\n\n :param array x: first data array of length N\n :param array y: second data array of length N. If not specified, computes the\n autocorrelation.\n :param int maxlags: compute cross correlation between [-maxlags:maxlags]\n when maxlags is not specified, the range of lags is [-N+1:N-1].\n :param str option: normalisation in ['biased', 'unbiased', None, 'coeff']\n\n The true cross-correlation sequence is\n\n .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])\n\n However, in practice, only a finite segment of one realization of the\n infinite-length random process is available.\n\n The correlation is estimated using numpy.correlate(x,y,'full').\n Normalisation is handled by this function using the following cases:\n\n * 'biased': Biased estimate of the cross-correlation function\n * 'unbiased': Unbiased estimate of the cross-correlation function\n * 'coeff': Normalizes the sequence so the autocorrelations at zero\n lag is 1.0.\n\n :return:\n * a numpy.array containing the cross-correlation sequence (length 2*N-1)\n * lags vector\n\n .. note:: If x and y are not the same length, the shorter vector is\n zero-padded to the length of the longer vector.\n\n .. rubric:: Examples\n\n .. doctest::\n\n >>> from spectrum import xcorr\n >>> x = [1,2,3,4,5]\n >>> c, l = xcorr(x,x, maxlags=0, norm='biased')\n >>> c\n array([ 11.])\n\n .. seealso:: :func:`CORRELATION`.", "id": "f10917:m2"} {"signature": "def _numpy_cholesky(A, B):", "body": "L = numpy.linalg.cholesky(A)y = numpy.linalg.solve(L,B)x = numpy.linalg.solve(L.transpose().conjugate(),y)return x, L", "docstring": "Solve Ax=B using numpy cholesky solver\n\n A = LU\n\n in the case where A is square and Hermitian, A = L.L* where L* is\n transpoed and conjugate matrix\n\n Ly = b\n\n where\n\n Ux=y\n\n so x = U^{-1} y\n where U = L*\n and y = L^{-1} B", "id": "f10918:m0"} {"signature": "def _numpy_solver(A, B):", "body": "x = numpy.linalg.solve(A, B)return x", "docstring": "This function solve Ax=B directly without taking care of the input\n matrix properties.", "id": "f10918:m1"} {"signature": "def CHOLESKY(A, B, method=''):", "body": "if method == '':X = _numpy_solver(A,B)return Xelif method == '':X, _L = _numpy_cholesky(A, B)return Xelif method == '':import scipy.linalgL = scipy.linalg.cholesky(A)X = scipy.linalg.cho_solve((L, False), B)else:raise ValueError('')return X", "docstring": "Solve linear system `AX=B` using CHOLESKY method.\n\n :param A: an input Hermitian matrix\n :param B: an array\n :param str method: a choice of method in [numpy, scipy, numpy_solver]\n\n * `numpy_solver` relies entirely on numpy.solver (no cholesky decomposition)\n * `numpy` relies on the numpy.linalg.cholesky for the decomposition and\n numpy.linalg.solve for the inversion.\n * `scipy` uses scipy.linalg.cholesky for the decomposition and\n scipy.linalg.cho_solve for the inversion.\n\n .. rubric:: Description\n\n When a matrix is square and Hermitian (symmetric with lower part being\n the complex conjugate of the upper one), then the usual triangular\n factorization takes on the special form:\n\n .. math:: A = R R^H\n\n where :math:`R` is a lower triangular matrix with nonzero real principal\n diagonal element. The input matrix can be made of complex data. Then, the\n inversion to find :math:`x` is made as follows:\n\n .. math:: Ry = B\n\n and\n\n .. math:: Rx = y\n\n .. doctest::\n\n >>> import numpy\n >>> from spectrum import CHOLESKY\n >>> A = numpy.array([[ 2.0+0.j , 0.5-0.5j, -0.2+0.1j],\n ... [ 0.5+0.5j, 1.0+0.j , 0.3-0.2j],\n ... [-0.2-0.1j, 0.3+0.2j, 0.5+0.j ]])\n >>> B = numpy.array([ 1.0+3.j , 2.0-1.j , 0.5+0.8j])\n >>> CHOLESKY(A, B)\n array([ 0.95945946+5.25675676j, 4.41891892-7.04054054j,\n -5.13513514+6.35135135j])", "id": "f10918:m2"} {"signature": "def modcovar_marple (X,IP):", "body": "Pv = []N = len(X)A = np.zeros(N, dtype=complex)D = np.zeros(N, dtype=complex)C = np.zeros(N, dtype=complex)R = np.zeros(N, dtype=complex)R1=for K in range(, N-):R1=R1 + *(X[K].real** + X[K].imag**)R2 = X[].real** + X[].imag**R3 = X[N-].real** + X[N-].imag**R4 = / (R1 + * (R2 + R3))P = R1 + R2 + R3DELTA = - R2 * R4GAMMA = - R3 * R4LAMBDA = (X[] * X[N-]).conjugate()*R4C[] = X[N-] * R4D[] = X[].conjugate() * R4M = if (IP ==):P = (*R1+R2+R3)/float(N)return [], P, []for M in range(, IP):SAVE1 = +for K in range(M+, N):SAVE1 = SAVE1 + X[K]*X[K-M-].conjugate()SAVE1 *= R[M] = SAVE1.conjugate()THETA = X[N-]*D[]PSI=X[N-]*C[]XI = X[].conjugate() * D[]if M==:passelse:for K in range(, M):THETA=THETA+X[N-K-]*D[K+] PSI = PSI + X[N-K-]*C[K+] XI = XI + X[K+].conjugate() * D[K+] R[K] = R[K]-X[N-M-] * X[N+-M+K-].conjugate() - X[M].conjugate() * X[M-K-] SAVE1=SAVE1+R[K].conjugate()*A[M-K-] C1 = -SAVE1/PA[M]=C1 P=P*(-C1.real**-C1.imag**) if M==:passelse:for K in range(, (M+)//):MK = M-K-SAVE1=A[K]A[K]=SAVE1+C1*A[MK].conjugate() if (K != MK):A[MK]=A[MK]+C1*(SAVE1.conjugate()) if M+ == IP:P=*P/float(N-M-)Pv.append(P)return A, P, Pvelse:Pv.append(*P/float(N-M-))R1=/(DELTA*GAMMA-(LAMBDA.real)**-(LAMBDA.imag)**)C1=(THETA*(LAMBDA.conjugate())+PSI*DELTA)*R1C2=(PSI*LAMBDA+THETA*GAMMA)*R1C3=(XI*(LAMBDA.conjugate())+THETA*DELTA)*R1C4=(THETA*LAMBDA+XI*GAMMA)*R1for K in range(, (M)//+):MK=M-KSAVE1=C[K].conjugate()SAVE2=D[K].conjugate()SAVE3=C[MK].conjugate()SAVE4=D[MK].conjugate()C[K]=C[K]+C1*SAVE3+C2*SAVE4 D[K]=D[K]+C3*SAVE3+C4*SAVE4 if K != MK:C[MK]=C[MK]+C1*SAVE1+C2*SAVE2 D[MK]=D[MK]+C3*SAVE1+C4*SAVE2 R2=PSI.real**+PSI.imag**R3=THETA.real**+THETA.imag**R4=XI.real**+XI.imag**R5=GAMMA-(R2*DELTA+R3*GAMMA+*np.real(PSI*LAMBDA*THETA.conjugate()))*R1R2=DELTA-(R3*DELTA+R4*GAMMA+*np.real(THETA*LAMBDA*XI.conjugate()))*R1GAMMA=R5 DELTA=R2 LAMBDA=LAMBDA+C3*PSI.conjugate()+C4*THETA.conjugate() if P <= :raise ValueError('')if (DELTA > and DELTA <= and GAMMA > and GAMMA <=):passelse:raise ValueError('')R1=/PR2=/(DELTA*GAMMA-LAMBDA.real**-LAMBDA.imag**) EF=X[M+]EB=X[N-M-]for K in range(, M+):EF=EF+A[K]*X[M-K] EB=EB+A[K].conjugate()*X[N-M+K-] C1=EB*R1 C2=EF.conjugate()*R1 C3=(EB.conjugate()*DELTA+EF*LAMBDA)*R2C4=(EF*GAMMA+(EB*LAMBDA).conjugate())*R2for K in range(M, -, -):SAVE1=A[K]A[K]=SAVE1+C3*C[K]+C4*D[K] C[K+]=C[K]+C1*SAVE1 D[K+]=D[K]+C2*SAVE1 C[]=C1D[]=C2R3=EB.real**+EB.imag**R4=EF.real**+EF.imag**P=P-(R3*DELTA+R4*GAMMA+*np.real(EF*EB*LAMBDA))*R2 DELTA=DELTA-R4*R1 GAMMA=GAMMA-R3*R1 LAMBDA=LAMBDA+(EF*EB).conjugate()*R1 if (P > ):passelse:raise ValueError(\"\")if (DELTA > and DELTA <= and GAMMA > and GAMMA <= ):passelse:raise ValueError(\"\")", "docstring": "Fast algorithm for the solution of the modified covariance least squares normal equations.\n\n This implementation is based on [Marple]_. This code is far more\n complicated and slower than :func:`modcovar` function, which is now the official version.\n See :func:`modcovar` for a detailed description of Modified Covariance method.\n\n :param X: - Array of complex data samples X(1) through X(N)\n :param int IP: - Order of linear prediction model (integer)\n\n :return:\n * P - Real linear prediction variance at order IP\n * A - Array of complex linear prediction coefficients\n * ISTAT - Integer status indicator at time of exit\n 0. for normal exit (no numerical ill-conditioning)\n 1. if P is not a positive value\n 2. if DELTA' and GAMMA' do not lie in the range 0 to 1\n 3. if P' is not a positive value\n 4. if DELTA and GAMMA do not lie in the range 0 to 1\n\n\n :validation: the AR parameters are the same as those returned by\n a completely different function :func:`modcovar`.\n\n .. note:: validation. results similar to test example in Marple but\n starts to differ for ip~8. with ratio of 0.975 for ip=15 probably\n due to precision.\n\n\n :References: [Marple]_", "id": "f10919:m0"} {"signature": "def modcovar(x, order):", "body": "from spectrum import corrmtximport scipy.linalgX = corrmtx(x, order, '')Xc = np.matrix(X[:,:])X1 = np.array(X[:,])a, residues, rank, singular_values = scipy.linalg.lstsq(-Xc, X1)Cz = np.dot(X1.conj().transpose(), Xc)e = np.dot(X1.conj().transpose(), X1) + np.dot(Cz, a)assert e.imag < , ''e = float(e.real) return a, e", "docstring": "Simple and fast implementation of the covariance AR estimate\n\n This code is 10 times faster than :func:`modcovar_marple` and more importantly\n only 10 lines of code, compared to a 200 loc for :func:`modcovar_marple`\n\n :param X: Array of complex data samples\n :param int order: Order of linear prediction model\n\n :return:\n * P - Real linear prediction variance at order IP\n * A - Array of complex linear prediction coefficients\n\n\n .. plot::\n :include-source:\n :width: 80%\n\n from spectrum import modcovar, marple_data, arma2psd, cshift\n from pylab import log10, linspace, axis, plot \n\n a, p = modcovar(marple_data, 15)\n PSD = arma2psd(a)\n PSD = cshift(PSD, len(PSD)/2) # switch positive and negative freq\n plot(linspace(-0.5, 0.5, 4096), 10*log10(PSD/max(PSD)))\n axis([-0.5,0.5,-60,0])\n\n .. seealso:: :class:`~spectrum.modcovar.pmodcovar`\n\n :validation: the AR parameters are the same as those returned by\n a completely different function :func:`modcovar_marple`.\n\n\n :References: Mathworks", "id": "f10919:m1"} {"signature": "def __init__(self, data, order, NFFT=None, sampling=,scale_by_freq=False):", "body": "super(pmodcovar, self).__init__(data, ar_order=order,NFFT=NFFT, sampling=sampling,scale_by_freq=scale_by_freq)", "docstring": "**Constructor**\n\n For a detailled description of the parameters, see :func:`modcovar`.\n\n :param array data: input data (list or numpy.array)\n :param int order:\n :param int NFFT: total length of the final data sets (padded with\n zero if needed; default is 4096)\n :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10919:c0:m0"} {"signature": "def morlet(lb, ub, n):", "body": "if n <= :raise ValueError(\"\")x = numpy.linspace(lb, ub, n)psi = numpy.cos(*x) * numpy.exp(-x**/)return psi", "docstring": "r\"\"\"Generate the Morlet waveform\n\n\n The Morlet waveform is defined as follows:\n\n .. math:: w[x] = \\cos{5x} \\exp^{-x^2/2}\n\n :param lb: lower bound\n :param ub: upper bound\n :param int n: waveform data samples\n\n\n .. plot::\n :include-source:\n :width: 80%\n\n from spectrum import morlet\n from pylab import plot\n plot(morlet(0,10,100))", "id": "f10920:m0"} {"signature": "def chirp(t, f0=, t1=, f1=, form='', phase=):", "body": "valid_forms = ['', '', '']if form not in valid_forms:raise ValueError(\"\"% valid_forms)t = numpy.array(t)phase = * pi * phase / if form == \"\":a = pi * (f1 - f0)/t1b = * pi * f0y = numpy.cos(a * t** + b*t + phase)elif form == \"\":a = (/ * pi * (f1-f0)/t1/t1)b = * pi * f0y = numpy.cos(a*t** + b * t + phase)elif form == \"\":a = * pi * t1/numpy.log(f1-f0)b = * pi * f0x = (f1-f0)**(/t1)y = numpy.cos(a * x**t + b * t + phase)return y", "docstring": "r\"\"\"Evaluate a chirp signal at time t.\n\n A chirp signal is a frequency swept cosine wave.\n\n .. math:: a = \\pi (f_1 - f_0) / t_1\n .. math:: b = 2 \\pi f_0\n .. math:: y = \\cos\\left( \\pi\\frac{f_1-f_0}{t_1} t^2 + 2\\pi f_0 t + \\rm{phase} \\right)\n\n :param array t: times at which to evaluate the chirp signal\n :param float f0: frequency at time t=0 (Hz)\n :param float t1: time t1\n :param float f1: frequency at time t=t1 (Hz)\n :param str form: shape of frequency sweep in ['linear', 'quadratic', 'logarithmic']\n :param float phase: phase shift at t=0\n\n The parameter **form** can be:\n\n * 'linear' :math:`f(t) = (f_1-f_0)(t/t_1) + f_0`\n * 'quadratic' :math:`f(t) = (f_1-f_0)(t/t_1)^2 + f_0`\n * 'logarithmic' :math:`f(t) = (f_1-f_0)^{(t/t_1)} + f_0`\n\n Example:\n\n .. plot::\n :include-source:\n :width: 80%\n\n from spectrum import chirp\n from pylab import linspace, plot\n t = linspace(0, 1, 1000)\n y = chirp(t, form='linear')\n plot(y)\n y = chirp(t, form='quadratic')\n plot(y, 'r')", "id": "f10920:m1"} {"signature": "def mexican(lb, ub, n):", "body": "if n <= :raise ValueError(\"\")x = numpy.linspace(lb, ub, n)psi = (-x**) * (/(numpy.sqrt()*pi**)) * numpy.exp(-x**/)return psi", "docstring": "r\"\"\"Generate the mexican hat wavelet\n\n The Mexican wavelet is:\n\n .. math:: w[x] = \\cos{5x} \\exp^{-x^2/2}\n\n :param lb: lower bound\n :param ub: upper bound\n :param int n: waveform data samples\n :return: the waveform\n\n .. plot::\n :include-source:\n :width: 80%\n\n from spectrum import mexican\n from pylab import plot\n plot(mexican(0, 10, 100))", "id": "f10920:m2"} {"signature": "def meyeraux(x):", "body": "return *x**-*x**+*x**-*x**", "docstring": "r\"\"\"Compute the Meyer auxiliary function\n\n The Meyer function is\n\n .. math:: y = 35 x^4-84 x^5+70 x^6-20 x^7\n\n :param array x:\n :return: the waveform\n\n .. plot::\n :include-source:\n :width: 80%\n\n from spectrum import meyeraux\n from pylab import linspace, plot\n t = linspace(0, 1, 1000)\n plot(t, meyeraux(t))", "id": "f10920:m3"} {"signature": "def tf2zp(b,a):", "body": "from numpy import rootsassert len(b) == len(a), \"\"g = b[] / a[]z = roots(b)p = roots(a)return z, p, g", "docstring": "Convert transfer function filter parameters to zero-pole-gain form\n\n Find the zeros, poles, and gains of this continuous-time system:\n\n .. warning:: b and a must have the same length.\n\n ::\n\n\n from spectrum import tf2zp\n b = [2,3,0]\n a = [1, 0.4, 1]\n [z,p,k] = tf2zp(b,a) % Obtain zero-pole-gain form\n z =\n 1.5\n 0\n p =\n -0.2000 + 0.9798i\n -0.2000 - 0.9798i\n k =\n 2\n\n :param b: numerator\n :param a: denominator\n :param fill: If True, check that the length of a and b are the same. If not, create a copy of the shortest element and append zeros to it.\n :return: z (zeros), p (poles), g (gain)\n\n\n Convert transfer function f(x)=sum(b*x^n)/sum(a*x^n) to\n zero-pole-gain form f(x)=g*prod(1-z*x)/prod(1-p*x)\n\n .. todo:: See if tf2ss followed by ss2zp gives better results. These\n are available from the control system toolbox. Note that\n the control systems toolbox doesn't bother, but instead uses\n\n .. seealso:: scipy.signal.tf2zpk, which gives the same results but uses a different\n algorithm (z^-1 instead of z).", "id": "f10921:m0"} {"signature": "def eqtflength(b,a):", "body": "d = abs(len(b)-len(a))if d != :if len(a) > len(b):try:b.extend([]*d)except:b = np.append(b, []*d)elif len(b)>len(a):try:a.extend([]*d)except:a = np.append(a, []*d)return b,aelse:return b,a", "docstring": "Given two list or arrays, pad with zeros the shortest array\n\n :param b: list or array\n :param a: list or array\n\n\n .. doctest::\n\n >>> from spectrum.transfer import eqtflength\n >>> a = [1,2]\n >>> b = [1,2,3,4]\n >>> a, b, = eqtflength(a,b)", "id": "f10921:m2"} {"signature": "def tf2zpk(b, a):", "body": "import scipy.signalz,p,k = scipy.signal.tf2zpk(b, a)return z,p,k", "docstring": "Return zero, pole, gain (z,p,k) representation from a numerator,\n denominator representation of a linear filter.\n\n Convert zero-pole-gain filter parameters to transfer function form\n\n :param ndarray b: numerator polynomial.\n :param ndarray a: numerator and denominator polynomials.\n\n :return:\n * z : ndarray Zeros of the transfer function.\n * p : ndarray Poles of the transfer function.\n * k : float System gain.\n\n If some values of b are too close to 0, they are removed. In that case, a\n BadCoefficients warning is emitted.\n\n .. doctest::\n\n >>> import scipy.signal\n >>> from spectrum.transfer import tf2zpk\n >>> [b, a] = scipy.signal.butter(3.,.4)\n >>> z, p ,k = tf2zpk(b,a)\n\n .. seealso:: :func:`zpk2tf`\n .. note:: wrapper of scipy function tf2zpk", "id": "f10921:m7"} {"signature": "def ss2zpk(a,b,c,d, input=):", "body": "import scipy.signalz, p, k = scipy.signal.ss2zpk(a, b, c, d, input=input)return z, p, k", "docstring": "State-space representation to zero-pole-gain representation.\n\n :param A: ndarray State-space representation of linear system.\n :param B: ndarray State-space representation of linear system.\n :param C: ndarray State-space representation of linear system.\n :param D: ndarray State-space representation of linear system.\n :param int input: optional For multiple-input systems, the input to use.\n\n :return:\n * z, p : sequence Zeros and poles.\n * k : float System gain.\n\n .. note:: wrapper of scipy function ss2zpk", "id": "f10921:m8"} {"signature": "def zpk2tf(z, p, k):", "body": "import scipy.signalb, a = scipy.signal.zpk2tf(z, p, k)return b, a", "docstring": "r\"\"\"Return polynomial transfer function representation from zeros and poles\n\n :param ndarray z: Zeros of the transfer function.\n :param ndarray p: Poles of the transfer function.\n :param float k: System gain.\n\n :return:\n b : ndarray Numerator polynomial.\n a : ndarray Numerator and denominator polynomials.\n\n :func:`zpk2tf` forms transfer function polynomials from the zeros, poles, and gains\n of a system in factored form.\n\n zpk2tf(z,p,k) finds a rational transfer function\n\n .. math:: \\frac{B(s)}{A(s)} = \\frac{b_1 s^{n-1}+\\dots b_{n-1}s+b_n}{a_1 s^{m-1}+\\dots a_{m-1}s+a_m}\n\n given a system in factored transfer function form\n\n .. math:: H(s) = \\frac{Z(s)}{P(s)} = k \\frac{(s-z_1)(s-z_2)\\dots(s-z_m)}{(s-p_1)(s-p_2)\\dots(s-p_n)}\n\n\n with p being the pole locations, and z the zero locations, with as many.\n The gains for each numerator transfer function are in vector k.\n The zeros and poles must be real or come in complex conjugate pairs.\n The polynomial denominator coefficients are returned in row vector a and\n the polynomial numerator coefficients are returned in matrix b, which has\n as many rows as there are columns of z.\n\n Inf values can be used as place holders in z if some columns have fewer zeros than others.\n\n .. note:: wrapper of scipy function zpk2tf", "id": "f10921:m9"} {"signature": "def zpk2ss(z, p, k):", "body": "import scipy.signalreturn scipy.signal.zpk2ss(z,p,k)", "docstring": "Zero-pole-gain representation to state-space representation\n\n :param sequence z,p: Zeros and poles.\n :param float k: System gain.\n\n :return:\n * A, B, C, D : ndarray State-space matrices.\n\n .. note:: wrapper of scipy function zpk2ss", "id": "f10921:m10"} {"signature": "def MINEIGVAL(T0, T, TOL):", "body": "M = len(T)eigval = eigvalold = eigvec = numpy.zeros(M+, dtype=complex)for k in range(,M+):eigvec[k] = +it=maxit = while abs(eigvalold-eigval)>TOL*eigvalold and itit=it+eigvalold = eigvaleig = toeplitz.HERMTOEP(T0, T, eigvec)SUM = save =+for k in range(, M+):SUM = SUM + eig[k].real**+eig[k].imag**save = save +eig[k]*eigvec[k].conjugate()SUM=/SUMeigval = save.real*SUMfor k in range(,M+):eigvec[k] = SUM * eig[k]if it==maxit:print('' % maxit)return eigval, eigvec", "docstring": "Finds the minimum eigenvalue of a Hermitian Toeplitz matrix\n\n The classical power method is used together with a fast Toeplitz\n equation solution routine. The eigenvector is normalized to unit length.\n\n :param T0: Scalar corresponding to real matrix element t(0)\n :param T: Array of M complex matrix elements t(1),...,t(M) C from the left column of the Toeplitz matrix\n :param TOL: Real scalar tolerance; routine exits when [ EVAL(k) - EVAL(k-1) ]/EVAL(k-1) < TOL , where the index k denotes the iteration number.\n\n :return:\n * EVAL - Real scalar denoting the minimum eigenvalue of matrix\n * EVEC - Array of M complex eigenvector elements associated\n\n\n .. note::\n * External array T must be dimensioned >= M\n * array EVEC must be >= M+1\n * Internal array E must be dimensioned >= M+1 . \n\n * **dependencies**\n * :meth:`spectrum.toeplitz.HERMTOEP`", "id": "f10922:m0"} {"signature": "def create_window(N, name=None, **kargs):", "body": "if name is None:name = ''name = name.lower()assert name in list(window_names.keys()),\"\"\"\"\"\"% (name, window_names)f = eval(window_names[name])windows_with_parameters ={'': {'': eval(window_names['']).__defaults__[]},'': {'': eval(window_names['']).__defaults__[]},'': {'': eval(window_names['']).__defaults__[]},'': {'': eval(window_names['']).__defaults__[]},'': {'': eval(window_names['']).__defaults__[]},'': {'':eval(window_names['']).__defaults__[]},'': {'':eval(window_names['']).__defaults__[]},'': {'': eval(window_names['']).__defaults__[]},'': {'':eval(window_names['']).__defaults__[]},'': {'': eval(window_names['']).__defaults__[],'': eval(window_names['']).__defaults__[]}, }if name not in list(windows_with_parameters.keys()):if len(kargs) == :w = f(N)else:raise ValueError(\"\"\"\"\"\")elif name in list(windows_with_parameters.keys()):dargs = {}for arg in list(kargs.keys()):try:default = windows_with_parameters[name][arg]except:raise ValueError(\"\"\"\"\"\" %(arg, name, list(windows_with_parameters[name].keys())))dargs[arg] = kargs.get(arg, default)w = f(N, **dargs)return w", "docstring": "r\"\"\"Returns the N-point window given a valid name\n\n :param int N: window size\n :param str name: window name (default is *rectangular*). Valid names\n are stored in :func:`~spectrum.window.window_names`.\n :param kargs: optional arguments are:\n\n * *beta*: argument of the :func:`window_kaiser` function (default is 8.6)\n * *attenuation*: argument of the :func:`window_chebwin` function (default is 50dB)\n * *alpha*: argument of the\n 1. :func:`window_gaussian` function (default is 2.5)\n 2. :func:`window_blackman` function (default is 0.16)\n 3. :func:`window_poisson` function (default is 2)\n 4. :func:`window_cauchy` function (default is 3)\n * *mode*: argument :func:`window_flattop` function (default is *symmetric*, can be *periodic*)\n * *r*: argument of the :func:`window_tukey` function (default is 0.5).\n\n The following windows have been simply wrapped from existing librairies like\n NumPy:\n\n * **Rectangular**: :func:`window_rectangle`,\n * **Bartlett** or Triangular: see :func:`window_bartlett`,\n * **Hanning** or Hann: see :func:`window_hann`,\n * **Hamming**: see :func:`window_hamming`,\n * **Kaiser**: see :func:`window_kaiser`,\n * **chebwin**: see :func:`window_chebwin`.\n\n The following windows have been implemented from scratch:\n\n * **Blackman**: See :func:`window_blackman`\n * **Bartlett-Hann** : see :func:`window_bartlett_hann`\n * **cosine or sine**: see :func:`window_cosine`\n * **gaussian**: see :func:`window_gaussian`\n * **Bohman**: see :func:`window_bohman`\n * **Lanczos or sinc**: see :func:`window_lanczos`\n * **Blackman Harris**: see :func:`window_blackman_harris`\n * **Blackman Nuttall**: see :func:`window_blackman_nuttall`\n * **Nuttall**: see :func:`window_nuttall`\n * **Tukey**: see :func:`window_tukey`\n * **Parzen**: see :func:`window_parzen`\n * **Flattop**: see :func:`window_flattop`\n * **Riesz**: see :func:`window_riesz`\n * **Riemann**: see :func:`window_riemann`\n * **Poisson**: see :func:`window_poisson`\n * **Poisson-Hanning**: see :func:`window_poisson_hanning`\n\n .. todo:: on request taylor, potter, Bessel, expo,\n rife-vincent, Kaiser-Bessel derived (KBD)\n\n .. plot::\n :width: 80%\n :include-source:\n\n from pylab import plot, legend\n from spectrum import create_window\n\n data = create_window(51, 'hamming')\n plot(data, label='hamming')\n data = create_window(51, 'kaiser')\n plot(data, label='kaiser')\n legend()\n\n .. plot::\n :width: 80%\n :include-source:\n\n from pylab import plot, log10, linspace, fft, clip\n from spectrum import create_window, fftshift\n\n A = fft(create_window(51, 'hamming'), 2048) / 25.5\n mag = abs(fftshift(A))\n freq = linspace(-0.5,0.5,len(A))\n response = 20 * log10(mag)\n mindB = -60\n response = clip(response,mindB,100)\n plot(freq, response)\n\n .. seealso:: :func:`window_visu`, :func:`Window`, :mod:`spectrum.dpss`", "id": "f10924:m0"} {"signature": "def enbw(data):", "body": "N = len(data)return N * np.sum(data**) / np.sum(data)**", "docstring": "r\"\"\"Computes the equivalent noise bandwidth\n\n .. math:: ENBW = N \\frac{\\sum_{n=1}^{N} w_n^2}{\\left(\\sum_{n=1}^{N} w_n \\right)^2}\n\n .. doctest::\n\n >>> from spectrum import create_window, enbw\n >>> w = create_window(64, 'rectangular')\n >>> enbw(w)\n 1.0\n\n The following table contains the ENBW values for some of the\n implemented windows in this module (with N=16384). They have been\n double checked against litterature (Source: [Harris]_, [Marple]_).\n\n If not present, it means that it has not been checked.\n\n =================== ============ =============\n name ENBW litterature\n =================== ============ =============\n rectangular 1. 1.\n triangle 1.3334 1.33\n Hann 1.5001 1.5\n Hamming 1.3629 1.36\n blackman 1.7268 1.73\n kaiser 1.7\n blackmanharris,4 2.004 2.\n riesz 1.2000 1.2\n riemann 1.32 1.3\n parzen 1.917 1.92\n tukey 0.25 1.102 1.1\n bohman 1.7858 1.79\n poisson 2 1.3130 1.3\n hanningpoisson 0.5 1.609 1.61\n cauchy 1.489 1.48\n lanczos 1.3\n =================== ============ =============", "id": "f10924:m1"} {"signature": "def _kaiser(n, beta):", "body": "from scipy.special import iv as besselIm = n - k = arange(, m)k = * beta / m * sqrt (k * (m - k))w = besselI (, k) / besselI (, beta)return w", "docstring": "Independant Kaiser window\n\n For the definition of the Kaiser window, see A. V. Oppenheim & R. W. Schafer, \"Discrete-Time Signal Processing\".\n\n The continuous version of width n centered about x=0 is:\n\n .. note:: 2 times slower than scipy.kaiser", "id": "f10924:m2"} {"signature": "def window_visu(N=, name='', **kargs):", "body": "mindB = kargs.pop('', -)maxdB = kargs.pop('', None)norm = kargs.pop('', True)w = Window(N, name, **kargs)w.plot_time_freq(mindB=mindB, maxdB=maxdB, norm=norm)", "docstring": "A Window visualisation tool\n\n :param N: length of the window\n :param name: name of the window\n :param NFFT: padding used by the FFT\n :param mindB: the minimum frequency power in dB\n :param maxdB: the maximum frequency power in dB\n :param kargs: optional arguments passed to :func:`create_window`\n\n This function plot the window shape and its equivalent in the Fourier domain.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'kaiser', beta=8.)", "id": "f10924:m3"} {"signature": "def window_rectangle(N):", "body": "return ones(N)", "docstring": "r\"\"\"Kaiser window\n\n :param N: window length\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'rectangle')", "id": "f10924:m4"} {"signature": "def window_kaiser(N, beta=, method=''):", "body": "if N == :return ones()if method == '':from numpy import kaiserreturn kaiser(N, beta)else:return _kaiser(N, beta)", "docstring": "r\"\"\"Kaiser window\n\n :param N: window length\n :param beta: kaiser parameter (default is 8.6)\n\n To obtain a Kaiser window that designs an FIR filter with\n sidelobe attenuation of :math:`\\alpha` dB, use the following :math:`\\beta` where\n :math:`\\beta = \\pi \\alpha`.\n\n .. math::\n\n w_n = \\frac{I_0\\left(\\pi\\alpha\\sqrt{1-\\left(\\frac{2n}{M}-1\\right)^2}\\right)} {I_0(\\pi \\alpha)}\n\n where\n\n * :math:`I_0` is the zeroth order Modified Bessel function of the first kind.\n * :math:`\\alpha` is a real number that determines the shape of the \n window. It determines the trade-off between main-lobe width and side \n lobe level.\n * the length of the sequence is N=M+1.\n\n The Kaiser window can approximate many other windows by varying \n the :math:`\\beta` parameter:\n\n ===== ========================\n beta Window shape\n ===== ========================\n 0 Rectangular\n 5 Similar to a Hamming\n 6 Similar to a Hanning\n 8.6 Similar to a Blackman\n ===== ========================\n\n .. plot::\n :width: 80%\n :include-source:\n\n from pylab import plot, legend, xlim\n from spectrum import window_kaiser\n N = 64\n for beta in [1,2,4,8,16]:\n plot(window_kaiser(N, beta), label='beta='+str(beta))\n xlim(0,N)\n legend()\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'kaiser', beta=8.)\n\n .. seealso:: numpy.kaiser, :func:`spectrum.window.create_window`", "id": "f10924:m5"} {"signature": "def window_blackman(N, alpha=):", "body": "a0 = ( - alpha)/a1 = a2 = alpha/if (N == ):win = array([])else:k = arange(, N)/float(N-)win = a0 - a1 * cos ( * pi * k) + a2 * cos ( * pi * k)return win", "docstring": "r\"\"\"Blackman window\n\n :param N: window length\n\n .. math:: a_0 - a_1 \\cos(\\frac{2\\pi n}{N-1}) +a_2 \\cos(\\frac{4\\pi n }{N-1})\n\n with\n\n .. math::\n\n a_0 = (1-\\alpha)/2, a_1=0.5, a_2=\\alpha/2 \\rm{\\;and\\; \\alpha}=0.16\n\n When :math:`\\alpha=0.16`, this is the unqualified Blackman window with\n :math:`a_0=0.48` and :math:`a_2=0.08`.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'blackman')\n\n .. note:: Although Numpy implements a blackman window for :math:`\\alpha=0.16`,\n this implementation is valid for any :math:`\\alpha`.\n\n .. seealso:: numpy.blackman, :func:`create_window`, :class:`Window`", "id": "f10924:m6"} {"signature": "def window_bartlett(N):", "body": "from numpy import bartlettreturn bartlett(N)", "docstring": "r\"\"\"Bartlett window (wrapping of numpy.bartlett) also known as Fejer\n\n :param int N: window length\n\n The Bartlett window is defined as\n\n .. math:: w(n) = \\frac{2}{N-1} \\left(\n \\frac{N-1}{2} - \\left|n - \\frac{N-1}{2}\\right|\n \\right)\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'bartlett')\n\n .. seealso:: numpy.bartlett, :func:`create_window`, :class:`Window`.", "id": "f10924:m7"} {"signature": "def window_hamming(N):", "body": "from numpy import hammingreturn hamming(N)", "docstring": "r\"\"\"Hamming window\n\n :param N: window length\n\n\n The Hamming window is defined as\n\n .. math:: 0.54 -0.46 \\cos\\left(\\frac{2\\pi n}{N-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'hamming')\n\n .. seealso:: numpy.hamming, :func:`create_window`, :class:`Window`.", "id": "f10924:m8"} {"signature": "def window_hann(N):", "body": "from numpy import hanningreturn hanning(N)", "docstring": "r\"\"\"Hann window (or Hanning). (wrapping of numpy.bartlett)\n\n :param int N: window length\n\n The Hanning window is also known as the Cosine Bell. Usually, it is called\n Hann window, to avoid confusion with the Hamming window.\n\n .. math:: w(n) = 0.5\\left(1- \\cos\\left(\\frac{2\\pi n}{N-1}\\right)\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'hanning')\n\n .. seealso:: numpy.hanning, :func:`create_window`, :class:`Window`.", "id": "f10924:m9"} {"signature": "def window_gaussian(N, alpha=):", "body": "t = linspace(-(N-)/, (N-)/, N)w = exp(-*(alpha * t/(N/))**)return w", "docstring": "r\"\"\"Gaussian window\n\n :param N: window length\n\n .. math:: \\exp^{-0.5 \\left( \\sigma\\frac{n}{N/2} \\right)^2}\n\n with :math:`\\frac{N-1}{2}\\leq n \\leq \\frac{N-1}{2}`.\n\n .. note:: N-1 is used to be in agreement with octave convention. The ENBW of\n 1.4 is also in agreement with [Harris]_\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'gaussian', alpha=2.5)\n\n\n\n .. seealso:: scipy.signal.gaussian, :func:`create_window`", "id": "f10924:m10"} {"signature": "def window_chebwin(N, attenuation=):", "body": "import scipy.signalreturn scipy.signal.chebwin(N, attenuation)", "docstring": "Cheb window\n\n :param N: window length\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'chebwin', attenuation=50)\n\n .. seealso:: scipy.signal.chebwin, :func:`create_window`, :class:`Window`", "id": "f10924:m11"} {"signature": "def window_cosine(N):", "body": "if N ==:return ones()n = arange(, N)win = sin(pi*n/(N-))return win", "docstring": "r\"\"\"Cosine tapering window also known as sine window.\n\n :param N: window length\n\n .. math:: w(n) = \\cos\\left(\\frac{\\pi n}{N-1} - \\frac{\\pi}{2}\\right) = \\sin \\left(\\frac{\\pi n}{N-1}\\right)\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'cosine')\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m12"} {"signature": "def window_lanczos(N):", "body": "if N ==:return ones()n = linspace(-N/, N/, N)win = sinc(*n/(N-))return win", "docstring": "r\"\"\"Lanczos window also known as sinc window.\n\n :param N: window length\n\n .. math:: w(n) = sinc \\left( \\frac{2n}{N-1} - 1 \\right)\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'lanczos')\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m13"} {"signature": "def window_bartlett_hann(N):", "body": "if N == :return ones()n = arange(, N)a0 = a1 = a2 = win = a0 - a1 *abs(n/(N-)-) -a2 * cos(*pi*n/(N-))return win", "docstring": "r\"\"\"Bartlett-Hann window\n\n :param N: window length\n\n .. math:: w(n) = a_0 + a_1 \\left| \\frac{n}{N-1} -\\frac{1}{2}\\right| - a_2 \\cos \\left( \\frac{2\\pi n}{N-1} \\right)\n\n with :math:`a_0 = 0.62`, :math:`a_1 = 0.48` and :math:`a_2=0.38`\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'bartlett_hann')\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m14"} {"signature": "def _coeff4(N, a0, a1, a2, a3):", "body": "if N == :return ones()n = arange(, N)N1 = N - w = a0 -a1*cos(*pi*n / N1) + a2*cos(*pi*n / N1) - a3*cos(*pi*n / N1)return w", "docstring": "a common internal function to some window functions with 4 coeffs\n\n\n For the blackmna harris for instance, the results are identical to octave if N is odd\n but not for even values...if n =0 whatever N is, the w(0) must be equal to a0-a1+a2-a3, which\n is the case here, but not in octave...", "id": "f10924:m15"} {"signature": "def window_nuttall(N):", "body": "a0 = a1 = a2 = a3 = return _coeff4(N, a0, a1, a2, a3)", "docstring": "r\"\"\"Nuttall tapering window\n\n :param N: window length\n\n .. math:: w(n) = a_0 - a_1 \\cos\\left(\\frac{2\\pi n}{N-1}\\right)+ a_2 \\cos\\left(\\frac{4\\pi n}{N-1}\\right)- a_3 \\cos\\left(\\frac{6\\pi n}{N-1}\\right)\n\n with :math:`a_0 = 0.355768`, :math:`a_1 = 0.487396`, :math:`a_2=0.144232` and :math:`a_3=0.012604`\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'nuttall', mindB=-80)\n\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m16"} {"signature": "def window_blackman_nuttall(N):", "body": "a0 = a1 = a2 = a3 = return _coeff4(N, a0, a1, a2, a3)", "docstring": "r\"\"\"Blackman Nuttall window\n\n returns a minimum, 4-term Blackman-Harris window. The window is minimum in the sense that its maximum sidelobes are minimized.\n The coefficients for this window differ from the Blackman-Harris window coefficients and produce slightly lower sidelobes.\n\n :param N: window length\n\n .. math:: w(n) = a_0 - a_1 \\cos\\left(\\frac{2\\pi n}{N-1}\\right)+ a_2 \\cos\\left(\\frac{4\\pi n}{N-1}\\right)- a_3 \\cos\\left(\\frac{6\\pi n}{N-1}\\right)\n\n with :math:`a_0 = 0.3635819`, :math:`a_1 = 0.4891775`, :math:`a_2=0.1365995` and :math:`0_3=.0106411`\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'blackman_nuttall', mindB=-80)\n\n .. seealso:: :func:`spectrum.window.create_window`\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m17"} {"signature": "def window_blackman_harris(N):", "body": "a0 = a1 = a2 = a3 = return _coeff4(N, a0, a1, a2, a3)", "docstring": "r\"\"\"Blackman Harris window\n\n :param N: window length\n\n .. math:: w(n) = a_0 - a_1 \\cos\\left(\\frac{2\\pi n}{N-1}\\right)+ a_2 \\cos\\left(\\frac{4\\pi n}{N-1}\\right)- a_3 \\cos\\left(\\frac{6\\pi n}{N-1}\\right)\n\n =============== =========\n coeff value\n =============== =========\n :math:`a_0` 0.35875\n :math:`a_1` 0.48829\n :math:`a_2` 0.14128\n :math:`a_3` 0.01168\n =============== =========\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'blackman_harris', mindB=-80)\n\n .. seealso:: :func:`spectrum.window.create_window`\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m18"} {"signature": "def window_bohman(N):", "body": "x = linspace(-, , N)w = (-abs(x)) * cos(pi*abs(x)) + /pi * sin(pi*abs(x))return w", "docstring": "r\"\"\"Bohman tapering window\n\n :param N: window length\n\n .. math:: w(n) = (1-|x|) \\cos (\\pi |x|) + \\frac{1}{\\pi} \\sin(\\pi |x|)\n\n where x is a length N vector of linearly spaced values between\n -1 and 1.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'bohman')\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m19"} {"signature": "def window_tukey(N, r=):", "body": "assert r>= and r<= , \"\"if N==:return ones()if r == :return ones(N)elif r == :return window_hann(N)else:from numpy import flipud, concatenate, wherex = linspace(, , N)x1 = where(x)w = *(+cos(*pi/r*(x[x1[]]-r/)))w = concatenate((w, ones(N-len(w)*), flipud(w)))return w", "docstring": "Tukey tapering window (or cosine-tapered window)\n\n :param N: window length\n :param r: defines the ratio between the constant section and the cosine\n section. It has to be between 0 and 1.\n\n The function returns a Hanning window for `r=0` and a full box for `r=1`.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'tukey')\n window_visu(64, 'tukey', r=1)\n\n .. math:: 0.5 (1+cos(2pi/r (x-r/2))) for 0<=x=1-r/2\n\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m20"} {"signature": "def window_parzen(N):", "body": "from numpy import where, concatenaten = linspace(-(N-)/, (N-)/, N)n1 = n[where(abs(n)<=(N-)/)[]]n2 = n[where(n>(N-)/)[]]n3 = n[where(n<-(N-)/)[]]w1 = -*(abs(n1)/(N/))** + *(abs(n1)/(N/))**w2 = *(-abs(n2)/(N/))**w3 = *(-abs(n3)/(N/))**w = concatenate((w3, w1, w2))return w", "docstring": "r\"\"\"Parsen tapering window (also known as de la Valle-Poussin)\n\n :param N: window length\n\n Parzen windows are piecewise cubic approximations\n of Gaussian windows. Parzen window sidelobes fall off as :math:`1/\\omega^4`.\n\n if :math:`0\\leq|x|\\leq (N-1)/4`:\n\n .. math:: w(n) = 1-6 \\left( \\frac{|n|}{N/2} \\right)^2 +6 \\left( \\frac{|n|}{N/2}\\right)^3\n\n if :math:`(N-1)/4\\leq|x|\\leq (N-1)/2`\n\n .. math:: w(n) = 2 \\left(1- \\frac{|n|}{N/2}\\right)^3\n\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'parzen')\n\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m21"} {"signature": "def window_flattop(N, mode='',precision=None):", "body": "assert mode in ['', '']t = arange(, N)if mode == '':x = *pi*t/float(N)else:if N ==:return ones()x = *pi*t/float(N-)a0 = a1 = a2 = a3 = a4 = if precision == '':d = a0 = /da1 = /da2 = /da3 = /da4 = /dw = a0-a1*cos(x)+a2*cos(*x)-a3*cos(*x)+a4*cos(*x)return w", "docstring": "r\"\"\"Flat-top tapering window\n\n Returns symmetric or periodic flat top window.\n\n :param N: window length\n :param mode: way the data are normalised. If mode is *symmetric*, then\n divide n by N-1. IF mode is *periodic*, divide by N,\n to be consistent with octave code.\n\n When using windows for filter design, the *symmetric* mode\n should be used (default). When using windows for spectral analysis, the *periodic*\n mode should be used. The mathematical form of the flat-top window in the symmetric\n case is:\n\n .. math:: w(n) = a_0\n - a_1 \\cos\\left(\\frac{2\\pi n}{N-1}\\right)\n + a_2 \\cos\\left(\\frac{4\\pi n}{N-1}\\right)\n - a_3 \\cos\\left(\\frac{6\\pi n}{N-1}\\right)\n + a_4 \\cos\\left(\\frac{8\\pi n}{N-1}\\right)\n\n ===== =============\n coeff value\n ===== =============\n a0 0.21557895\n a1 0.41663158\n a2 0.277263158\n a3 0.083578947\n a4 0.006947368\n ===== =============\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'bohman')\n\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m22"} {"signature": "def window_taylor(N, nbar=, sll=-):", "body": "B = **(-sll/)A = log(B + sqrt(B** - ))/pis2 = nbar** / (A** + (nbar - )**)ma = arange(,nbar)def calc_Fm(m):numer = (-)**(m+) * prod(-m**/s2/(A** + (ma - )**))denom = * prod([ -m**/j** for j in ma if j != m])return numer/denomFm = array([calc_Fm(m) for m in ma])def W(n):return * np.sum(Fm * cos(*pi*ma*(n-N/ + /)/N)) + w = array([W(n) for n in range(N)])scale = W((N-)/)w /= scalereturn w", "docstring": "Taylor tapering window\n\n Taylor windows allows you to make tradeoffs between the\n mainlobe width and sidelobe level (sll).\n\n Implemented as described by Carrara, Goodman, and Majewski \n in 'Spotlight Synthetic Aperture Radar: Signal Processing Algorithms'\n Pages 512-513\n\n :param N: window length\n :param float nbar:\n :param float sll:\n\n The default values gives equal height\n sidelobes (nbar) and maximum sidelobe level (sll).\n\n .. warning:: not implemented\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m23"} {"signature": "def window_riesz(N):", "body": "n = linspace(-N/, (N)/, N)w = - abs(n/(N/))**return w", "docstring": "r\"\"\"Riesz tapering window\n\n :param N: window length\n\n .. math:: w(n) = 1 - \\left| \\frac{n}{N/2} \\right|^2\n\n with :math:`-N/2 \\leq n \\leq N/2`.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'riesz')\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m24"} {"signature": "def window_riemann(N):", "body": "n = linspace(-N/, (N)/, N)w = sin(n/float(N)**pi) / (n / float(N)**pi)return w", "docstring": "r\"\"\"Riemann tapering window\n\n :param int N: window length\n\n .. math:: w(n) = 1 - \\left| \\frac{n}{N/2} \\right|^2\n\n with :math:`-N/2 \\leq n \\leq N/2`.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'riesz')\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m25"} {"signature": "def window_poisson(N, alpha=):", "body": "n = linspace(-N/, (N)/, N)w = exp(-alpha * abs(n)/(N/))return w", "docstring": "r\"\"\"Poisson tapering window\n\n :param int N: window length\n\n .. math:: w(n) = \\exp^{-\\alpha \\frac{|n|}{N/2} }\n\n with :math:`-N/2 \\leq n \\leq N/2`.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'poisson')\n window_visu(64, 'poisson', alpha=3)\n window_visu(64, 'poisson', alpha=4)\n\n .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m26"} {"signature": "def window_poisson_hanning(N, alpha=):", "body": "w1 = window_hann(N)w2 = window_poisson(N, alpha=alpha)return w1*w2", "docstring": "r\"\"\"Hann-Poisson tapering window\n\n This window is constructed as the product of the Hanning and Poisson\n windows. The parameter **alpha** is the Poisson parameter.\n\n :param int N: window length\n :param float alpha: parameter of the poisson window\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'poisson_hanning', alpha=0.5)\n window_visu(64, 'poisson_hanning', alpha=1)\n window_visu(64, 'poisson_hanning')\n\n .. seealso:: :func:`window_poisson`, :func:`window_hann`", "id": "f10924:m27"} {"signature": "def window_cauchy(N, alpha=):", "body": "n = linspace(-N/, (N)/, N)w = /(+ (alpha*n/(N/))**)return w", "docstring": "r\"\"\"Cauchy tapering window\n\n :param int N: window length\n :param float alpha: parameter of the poisson window\n\n .. math:: w(n) = \\frac{1}{1+\\left(\\frac{\\alpha*n}{N/2}\\right)**2}\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import window_visu\n window_visu(64, 'cauchy', alpha=3)\n window_visu(64, 'cauchy', alpha=4)\n window_visu(64, 'cauchy', alpha=5)\n\n\n .. seealso:: :func:`window_poisson`, :func:`window_hann`", "id": "f10924:m28"} {"signature": "def __init__(self, N, name=None, norm=True, **kargs):", "body": "assert N> , \"\"if name is None or name not in list(window_names.keys()):raise ValueError(\"\" %list(window_names.keys()))self.__N = Nself.__name = nameself.__norm = normself.__data = create_window(N, name, **kargs)self.__frequencies = Noneself.__response = Noneself.__enbw = enbw(self.data)", "docstring": "Create a tapering window object\n\n :param N: the window length\n :param name: the type of window, e.g., 'Hann'\n :param norm: normalise the window in frequency domain (for plotting)\n :param kargs: any of :func:`create_window` valid optional arguments.\n\n\n .. rubric:: Attributes:\n\n * data: time series data\n * frequencies: getter to the frequency series\n * response: getter to the PSD\n * enbw: getter to the Equivalent noise band width.", "id": "f10924:c0:m0"} {"signature": "def compute_response(self, **kargs):", "body": "from numpy.fft import fft, fftshiftnorm = kargs.get('', self.norm)NFFT = kargs.get('', )if NFFT < len(self.data):NFFT = self.data.size * A = fft(self.data, NFFT)mag = abs(fftshift(A))if norm is True:mag = mag / max(mag)response = * stools.log10(mag) self.__response = response", "docstring": "Compute the window data frequency response\n\n :param norm: True by default. normalised the frequency data.\n :param int NFFT: total length of the final data sets( 2048 by default. \n if less than data length, then NFFT is set to the data length*2).\n\n The response is stored in :attr:`response`.\n\n .. note:: Units are dB (20 log10) since we plot the frequency response)", "id": "f10924:c0:m9"} {"signature": "def plot_frequencies(self, mindB=None, maxdB=None, norm=True):", "body": "from pylab import plot, title, xlim, grid, ylim, xlabel, ylabelself.compute_response(norm=norm)plot(self.frequencies, self.response)title(\"\" % (self.enbw))ylabel('')xlabel('')xlim(-, )y0, y1 = ylim()if mindB:y0 = mindBif maxdB is not None:y1 = maxdBelse:y1 = max(self.response)ylim(y0, y1)grid(True)", "docstring": "Plot the window in the frequency domain\n\n :param mindB: change the default lower y bound\n :param maxdB: change the default upper lower bound\n :param bool norm: if True, normalise the frequency response.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum.window import Window\n w = Window(64, name='hamming')\n w.plot_frequencies()", "id": "f10924:c0:m10"} {"signature": "def plot_window(self):", "body": "from pylab import plot, xlim, grid, title, ylabel, axisx = linspace(, , self.N)xlim(, )plot(x, self.data)grid(True)title('' % (self.name.capitalize(), self.N))ylabel('')axis([, , , ])", "docstring": "Plot the window in the time domain\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum.window import Window\n w = Window(64, name='hamming')\n w.plot_window()", "id": "f10924:c0:m11"} {"signature": "def plot_time_freq(self, mindB=-, maxdB=None, norm=True,yaxis_label_position=\"\"):", "body": "from pylab import subplot, gcasubplot(, , )self.plot_window()subplot(, , )self.plot_frequencies(mindB=mindB, maxdB=maxdB, norm=norm)if yaxis_label_position==\"\":try: tight_layout()except: passelse:ax = gca()ax.yaxis.set_label_position(\"\")", "docstring": "Plotting method to plot both time and frequency domain results.\n\n See :meth:`plot_frequencies` for the optional arguments.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum.window import Window\n w = Window(64, name='hamming')\n w.plot_time_freq()", "id": "f10924:c0:m12"} {"signature": "def info(self):", "body": "print(self)", "docstring": "Print object information such as length and name", "id": "f10924:c0:m13"} {"signature": "def ac2poly(data):", "body": "a, e, _c = LEVINSON(data)a = numpy.insert(a, , )return a, e", "docstring": "Convert autocorrelation sequence to prediction polynomial\n\n :param array data: input data (list or numpy.array)\n :return:\n * AR parameters\n * noise variance\n\n This is an alias to::\n\n a, e, c = LEVINSON(data)\n\n :Example:\n\n .. doctest::\n\n >>> from spectrum import ac2poly\n >>> from numpy import array\n >>> r = [5, -2, 1.01]\n >>> ar, e = ac2poly(r)\n >>> ar\n array([ 1. , 0.38, -0.05])\n >>> e\n 4.1895000000000007", "id": "f10925:m0"} {"signature": "def ac2rc(data):", "body": "a, e, _c = LEVINSON(data)return a, data[]", "docstring": "Convert autocorrelation sequence to reflection coefficients\n\n :param data: an autorrelation vector\n :return: the reflection coefficient and data[0]\n\n This is an alias to::\n\n a, e, c = LEVINSON(data)\n c, data[0]", "id": "f10925:m1"} {"signature": "def poly2ac(poly, efinal):", "body": "results = rlevinson(poly, efinal)return results[]", "docstring": "Convert prediction filter polynomial to autocorrelation sequence\n\n :param array poly: the AR parameters\n :param efinal: an estimate of the final error\n :return: the autocorrelation sequence in complex format.\n\n .. doctest::\n\n >>> from numpy import array\n >>> from spectrum import poly2ac\n >>> poly = [ 1. , 0.38 , -0.05]\n >>> efinal = 4.1895\n >>> poly2ac(poly, efinal)\n array([ 5.00+0.j, -2.00+0.j, 1.01-0.j])", "id": "f10925:m2"} {"signature": "def ar2rc(ar):", "body": "raise NotImplementedError", "docstring": "Convert autoregressive parameters into reflection coefficients", "id": "f10925:m3"} {"signature": "def poly2rc(a, efinal):", "body": "results = rlevinson(a, efinal)return results[]", "docstring": "Convert prediction filter polynomial to reflection coefficients\n\n :param a: AR parameters\n :param efinal:", "id": "f10925:m4"} {"signature": "def rc2poly(kr, r0=None):", "body": "from .levinson import levupp = len(kr) a = numpy.array([, kr[]]) e = numpy.zeros(len(kr))if r0 is None:e0 = else:e0 = r0e[] = e0 * ( - numpy.conj(numpy.conjugate(kr[])*kr[]))for k in range(, p):[a, e[k]] = levup(a, kr[k], e[k-])efinal = e[-]return a, efinal", "docstring": "convert reflection coefficients to prediction filter polynomial\n\n :param k: reflection coefficients", "id": "f10925:m5"} {"signature": "def rc2ac(k, R0):", "body": "[a,efinal] = rc2poly(k, R0)R, u, kr, e = rlevinson(a, efinal)return R", "docstring": "Convert reflection coefficients to autocorrelation sequence.\n\n :param k: reflection coefficients\n :param R0: zero-lag autocorrelation\n :returns: the autocorrelation sequence\n\n .. seealso:: :func:`ac2rc`, :func:`poly2rc`, :func:`ac2poly`, :func:`poly2rc`, :func:`rc2poly`.", "id": "f10925:m6"} {"signature": "def is2rc(inv_sin):", "body": "return numpy.sin(numpy.array(inv_sin)*numpy.pi/)", "docstring": "Convert inverse sine parameters to reflection coefficients.\n\n :param inv_sin: inverse sine parameters\n :return: reflection coefficients\n\n .. seealso:: :func:`rc2is`, :func:`poly2rc`, :func:`ac2rc`, :func:`lar2rc`.\n\n :Reference: J.R. Deller, J.G. Proakis, J.H.L. Hansen, \n \"Discrete-Time Processing of Speech Signals\", Prentice Hall, Section 7.4.5.", "id": "f10925:m7"} {"signature": "def rc2is(k):", "body": "assert numpy.isrealobj(k), ''if max(numpy.abs(k)) >= :raise ValueError('')return (/numpy.pi)*numpy.arcsin(k)", "docstring": "Convert reflection coefficients to inverse sine parameters.\n\n :param k: reflection coefficients\n :return: inverse sine parameters\n\n .. seealso:: :func:`is2rc`, :func:`rc2poly`, :func:`rc2acC`, :func:`rc2lar`.\n\n Reference: J.R. Deller, J.G. Proakis, J.H.L. Hansen, \"Discrete-Time\n Processing of Speech Signals\", Prentice Hall, Section 7.4.5.", "id": "f10925:m8"} {"signature": "def rc2lar(k):", "body": "assert numpy.isrealobj(k), ''if max(numpy.abs(k)) >= :raise ValueError('')return - * numpy.arctanh(-numpy.array(k))", "docstring": "Convert reflection coefficients to log area ratios.\n\n :param k: reflection coefficients\n :return: inverse sine parameters\n\n The log area ratio is defined by G = log((1+k)/(1-k)) , where the K\n parameter is the reflection coefficient.\n\n .. seealso:: :func:`lar2rc`, :func:`rc2poly`, :func:`rc2ac`, :func:`rc2ic`.\n\n :References:\n [1] J. Makhoul, \"Linear Prediction: A Tutorial Review,\" Proc. IEEE, Vol.63, No.4, pp.561-580, Apr 1975.", "id": "f10925:m9"} {"signature": "def lar2rc(g):", "body": "assert numpy.isrealobj(g), ''return -numpy.tanh(-numpy.array(g)/)", "docstring": "Convert log area ratios to reflection coefficients.\n\n :param g: log area ratios\n :returns: the reflection coefficients\n\n .. seealso: :func:`rc2lar`, :func:`poly2rc`, :func:`ac2rc`, :func:`is2rc`.\n\n :References:\n [1] J. Makhoul, \"Linear Prediction: A Tutorial Review,\" Proc. IEEE, Vol.63, No.4, pp.561-580, Apr 1975.", "id": "f10925:m10"} {"signature": "def lsf2poly(lsf):", "body": "lsf = numpy.array(lsf)if max(lsf) > numpy.pi or min(lsf) < :raise ValueError('')p = len(lsf) z = numpy.exp( * lsf)rQ = z[::]rP = z[::]rQ = numpy.concatenate((rQ, rQ.conjugate()))rP = numpy.concatenate((rP, rP.conjugate()))Q = numpy.poly(rQ);P = numpy.poly(rP);if p%:P1 = numpy.convolve(P, [, , -])Q1 = Qelse:P1 = numpy.convolve(P, [, -])Q1 = numpy.convolve(Q, [, ])a = * (P1+Q1)return a[:-:]", "docstring": "Convert line spectral frequencies to prediction filter coefficients\n\n returns a vector a containing the prediction filter coefficients from a vector lsf of line spectral frequencies.\n\n .. doctest::\n\n >>> from spectrum import lsf2poly\n >>> lsf = [0.7842 , 1.5605 , 1.8776 , 1.8984, 2.3593]\n >>> a = lsf2poly(lsf)\n\n # array([ 1.00000000e+00, 6.14837835e-01, 9.89884967e-01,\n # 9.31594056e-05, 3.13713832e-03, -8.12002261e-03 ])\n\n .. seealso:: poly2lsf, rc2poly, ac2poly, rc2is", "id": "f10925:m11"} {"signature": "def poly2lsf(a):", "body": "a = numpy.array(a)if a[] != :a/=a[]if max(numpy.abs(numpy.roots(a))) >= :error('');p = len(a)- a1 = numpy.concatenate((a, numpy.array([])))a2 = a1[-::-]P1 = a1 - a2 Q1 = a1 + a2 if p%: P, r = deconvolve(P1,[, ,-])Q = Q1else: P, r = deconvolve(P1, [, -])Q, r = deconvolve(Q1, [, ])rP = numpy.roots(P)rQ = numpy.roots(Q)aP = numpy.angle(rP[::])aQ = numpy.angle(rQ[::])lsf = sorted(numpy.concatenate((-aP,-aQ)))return lsf", "docstring": "Prediction polynomial to line spectral frequencies.\n\n converts the prediction polynomial specified by A,\n into the corresponding line spectral frequencies, LSF.\n normalizes the prediction polynomial by A(1).\n\n .. doctest::\n\n >>> from spectrum import poly2lsf\n >>> a = [1.0000, 0.6149, 0.9899, 0.0000 ,0.0031, -0.0082]\n >>> lsf = poly2lsf(a)\n >>> lsf = array([0.7842, 1.5605, 1.8776, 1.8984, 2.3593])\n\n .. seealso:: lsf2poly, poly2rc, poly2qc, rc2is", "id": "f10925:m12"} {"signature": "def AIC(N, rho, k):", "body": "from numpy import log, arrayres = N * log(array(rho)) + * (array(k)+)return res", "docstring": "r\"\"\"Akaike Information Criterion\n\n :param rho: rho at order k\n :param N: sample size\n :param k: AR order.\n\n If k is the AR order and N the size of the sample, then Akaike criterion is\n\n .. math:: AIC(k) = \\log(\\rho_k) + 2\\frac{k+1}{N}\n\n ::\n\n AIC(64, [0.5,0.3,0.2], [1,2,3])\n\n :validation: double checked versus octave.", "id": "f10926:m0"} {"signature": "def AICc(N, rho, k, norm=True):", "body": "from numpy import log, arrayp = k res = log(rho) + * (p+) / (N-p-)return res", "docstring": "r\"\"\"corrected Akaike information criterion\n\n .. math:: AICc(k) = log(\\rho_k) + 2 \\frac{k+1}{N-k-2}\n\n\n :validation: double checked versus octave.", "id": "f10926:m1"} {"signature": "def KIC(N, rho, k):", "body": "from numpy import log, arrayres = log(rho) + * (k+) /float(N)return res", "docstring": "r\"\"\"Kullback information criterion\n\n .. math:: KIC(k) = log(\\rho_k) + 3 \\frac{k+1}{N}\n\n :validation: double checked versus octave.", "id": "f10926:m2"} {"signature": "def AKICc(N, rho, k):", "body": "from numpy import log, arrayp = kres = log(rho) + p/N/(N-p) + (-(p+)/N) * (p+) / (N-p-)return res", "docstring": "r\"\"\"approximate corrected Kullback information\n\n .. math:: AKICc(k) = log(rho_k) + \\frac{p}{N*(N-k)} + (3-\\frac{k+2}{N})*\\frac{k+1}{N-k-2}", "id": "f10926:m3"} {"signature": "def FPE(N,rho, k=None):", "body": "fpe = rho * (N + k + ) / (N- k -)return fpe", "docstring": "r\"\"\"Final prediction error criterion\n\n .. math:: FPE(k) = \\frac{N + k + 1}{N - k - 1} \\rho_k\n\n :validation: double checked versus octave.", "id": "f10926:m4"} {"signature": "def MDL(N, rho, k):", "body": "from numpy import logmdl = N* log(rho) + k * log(N)return mdl", "docstring": "r\"\"\"Minimum Description Length\n\n .. math:: MDL(k) = N log \\rho_k + p \\log N\n\n :validation: results", "id": "f10926:m5"} {"signature": "def CAT(N, rho, k):", "body": "from numpy import zeros, arangecat = zeros(len(rho))for p in arange(, len(rho)+):rho_p = float(N)/(N-p)*rho[p-]s = for j in range(, p+):rho_j = float(N)/(N-j)*rho[j-]s = s + /rho_jcat[p-] = s/float(N) - /rho_preturn cat", "docstring": "r\"\"\"Criterion Autoregressive Transfer Function :\n\n .. math:: CAT(k) = \\frac{1}{N} \\sum_{i=1}^k \\frac{1}{\\rho_i} - \\frac{\\rho_i}{\\rho_k}\n\n .. todo:: validation", "id": "f10926:m6"} {"signature": "def aic_eigen(s, N):", "body": "import numpy as npkaic = []n = len(s)for k in range(, n-):ak = /(n-k) * np.sum(s[k+:])gk = np.prod(s[k+:]**(/(n-k)))kaic.append( -*(n-k)*N * np.log(gk/ak) + *k*(*n-k))return kaic", "docstring": "r\"\"\"AIC order-selection using eigen values\n\n :param s: a list of `p` sorted eigen values\n :param N: the size of the input data. To be defined precisely.\n\n :return:\n * an array containing the AIC values\n\n Given :math:`n` sorted eigen values :math:`\\lambda_i` with\n :math:`0 <= i < n`, the proposed criterion from Wax and Kailath (1985)\n is:\n\n .. math:: AIC(k) = -2(n-k)N \\ln \\frac{g(k)}{a(k)} + 2k(2n-k)\n\n where the arithmetic sum :math:`a(k)` is:\n\n .. math:: a(k) = \\sum_{i=k+1}^{n}\\lambda_i\n\n and the geometric sum :math:`g(k)` is:\n\n .. math:: g(k) = \\prod_{i=k+1}^{n} \\lambda_i^{-(n-k)}\n\n The number of relevant sinusoids in the signal subspace is determined by\n selecting the minimum of `AIC`.\n\n .. seealso:: :func:`~spectrum.eigenfreq.eigen`\n .. todo:: define precisely the input parameter N. Should be the input\n data length but when using correlation matrix (SVD), I suspect it\n should be the length of the correlation matrix rather than the\n original data.\n\n :References:\n * [Marple]_ Chap 13,\n * [Wax]_", "id": "f10926:m7"} {"signature": "def mdl_eigen(s, N):", "body": "import numpy as npkmdl = []n = len(s)for k in range(, n-):ak = /(n-k) * np.sum(s[k+:])gk = np.prod(s[k+:]**(/(n-k)))kmdl.append( -(n-k)*N * np.log(gk/ak) + *k*(*n-k)*np.log(N))return kmdl", "docstring": "r\"\"\"MDL order-selection using eigen values\n\n :param s: a list of `p` sorted eigen values\n :param N: the size of the input data. To be defined precisely.\n\n :return:\n * an array containing the AIC values\n\n .. math:: MDL(k) = (n-k)N \\ln \\frac{g(k)}{a(k)} + 0.5k(2n-k) log(N)\n\n .. seealso:: :func:`aic_eigen` for details\n\n :References:\n * [Marple]_ Chap 13,\n * [Wax]_", "id": "f10926:m8"} {"signature": "def __init__(self, name, N):", "body": "self.__name = Noneself.name = nameself.__N = Nself.__rho = self.__k = Noneself.__old_data = Noneself.__data = Noneself.__norm = True", "docstring": "Create a criteria object\n\n :param name: a string or list of strings containing valid criteria\n method's name\n :param int N: size of the data sample.", "id": "f10926:c0:m0"} {"signature": "def __call__(self, rho=None, k=None, N=None, norm=True):", "body": "self.__norm = normif N is not None:self.N = Nif rho is not None:self.rho = rhoif k is not None:self.__k = kself.__norm = normf = eval(self.name)self.data = f(self.N, self.rho, self.k)if self.old_data is not None and self.data is not None:if self.data > self.old_data:return Falseelse:return Truereturn True", "docstring": "Call the criteria function correspondign to :attr:`name`.", "id": "f10926:c0:m11"} {"signature": "def __init__(self, N, sampling=):", "body": "self.__N = Nself.__sampling = samplingself.__df = Noneself._setN(N)self._setsampling(sampling)", "docstring": ".. rubric:: **Constructor**\n\n :param int N: the data length\n :param float sampling: sampling frequency of the input :attr:`data`.\n\n\n .. rubric:: Attributes:\n\n From the input parameters, read/write attributes are set:\n\n * :attr:`N`, the data length,\n * :attr:`sampling`, the sampling frequency.\n\n Additionally, the following read-only attribute is available:\n\n * :attr:`df`, the frequency step computed from :attr:`N` and\n :attr:`sampling`.", "id": "f10927:c0:m0"} {"signature": "def centerdc_gen(self):", "body": "for a in range(, self.N):yield (a-self.N/) * self.df", "docstring": "Return the centered frequency range as a generator.\n\n ::\n\n >>> print(list(Range(8).centerdc_gen()))\n [-0.5, -0.375, -0.25, -0.125, 0.0, 0.125, 0.25, 0.375]", "id": "f10927:c0:m6"} {"signature": "def twosided_gen(self):", "body": "for a in range(, self.N):yield a * self.df", "docstring": "Returns the twosided frequency range as a generator\n\n ::\n\n >>> print(list(Range(8).centerdc_gen()))\n [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875]", "id": "f10927:c0:m7"} {"signature": "def onesided_gen(self):", "body": "if self.N % == :for n in range(, self.N// + ):yield n * self.dfelse:for n in range(, (self.N+)//):yield n * self.df", "docstring": "Return the one-sided frequency range as a generator.\n\n If :attr:`N` is even, the length is N/2 + 1.\n If :attr:`N` is odd, the length is (N+1)/2.\n\n ::\n\n >>> print(list(Range(8).onesided()))\n [0.0, 0.125, 0.25, 0.375, 0.5]\n >>> print(list(Range(9).onesided()))\n [0.0, 0.1111, 0.2222, 0.3333, 0.4444]", "id": "f10927:c0:m8"} {"signature": "def onesided(self):", "body": "return list(self.onesided_gen())", "docstring": "Return the one-sided frequency range as a list (see\n :meth:`onesided_gen` for details).", "id": "f10927:c0:m9"} {"signature": "def twosided(self):", "body": "return list(self.twosided_gen())", "docstring": "Return the two-sided frequency range as a list (see\n :meth:`twosided_gen` for details).", "id": "f10927:c0:m10"} {"signature": "def centerdc(self):", "body": "return list(self.centerdc_gen())", "docstring": "Return the two-sided frequency range as a list (see\n :meth:`centerdc_gen` for details).", "id": "f10927:c0:m11"} {"signature": "def __init__(self, data, data_y=None, sampling=,detrend=None, scale_by_freq=True, NFFT=None):", "body": "self.__data = Noneself.__data_y = Noneself.__sampling = Noneself.__detrend = Noneself.__scale_by_freq = Noneself.__sides = Noneself.__N = Noneself.__NFFT = Noneself.__df = Noneself.__datatype = Noneself.__psd = Noneself.__method = Noneself.data = dataif data_y is not None:self.data_y = data_yself.sampling = samplingself.sides = ''self._range = Range(self.__data.size, sampling) self.modified = Trueself.sampling = samplingself.scale_by_freq = scale_by_freqself.NFFT = NFFTself.method = self.__class__", "docstring": "**Constructor**\n\n .. rubric:: Attributes:\n\n From the input parameters, the following attributes are set:\n\n * :attr:`data` (updates :attr:`N`, :attr:`df`, :attr:`datatype`)\n * :attr:`data_y` used for cross PSD only (correlogram)\n * :attr:`detrend`\n * :attr:`sampling` (updates :attr:`df`)\n * :attr:`scale_by_freq`\n * :attr:`NFFT` (reset :attr:`sides`, :attr:`df`)\n\n The following read-only attributes are set during the initialisation:\n\n * :attr:`datatype`\n * :attr:`df`\n * :attr:`N`\n\n And finally, additional read-write attributes are available:\n\n * :attr:`psd`: used to store the PSD data array, which size depends\n on :attr:`sides` i.e., one-sided for real data and two-sided for\n the complex data.\n * :attr:`sides`: if set, changed the :attr:`psd`.", "id": "f10927:c1:m0"} {"signature": "def frequencies(self, sides=None):", "body": "if sides is None:sides = self.sidesif sides not in self._sides_choices:raise errors.SpectrumChoiceError(sides, self._sides_choices)if sides == '':return self._range.onesided()if sides == '':return self._range.twosided()if sides == '':return self._range.centerdc()", "docstring": "Return the frequency vector according to :attr:`sides`", "id": "f10927:c1:m26"} {"signature": "def get_converted_psd(self, sides):", "body": "if sides == self.sides:return self.__psdif self.datatype == '':assert sides != '',\"\"if self.sides == '':logging.debug('')if sides == '':logging.debug('')newpsd = numpy.concatenate((self.psd[:-]/, list(reversed(self.psd[:-]/))))newpsd[-] = self.psd[-]newpsd[] *= elif sides == '':logging.debug('')P0 = self.psd[]P1 = self.psd[-]newpsd = numpy.concatenate((self.psd[-::-]/, self.psd[:-]/))newpsd[] = P1elif self.sides == '':logging.debug('')if sides == '':logging.debug('')midN = (len(self.psd)-) / newpsd = numpy.array(self.psd[:int(midN)+]*)newpsd[] /= newpsd[-] = self.psd[-]elif sides == '':newpsd = stools.twosided_2_centerdc(self.psd)elif self.sides == '': logging.debug('')if sides == '':logging.debug('')midN = int(len(self.psd) / )P1 = self.psd[]newpsd = numpy.append(self.psd[midN:]*, P1)elif sides == '':newpsd = stools.centerdc_2_twosided(self.psd)else:raise ValueError(\"\")return newpsd", "docstring": "This function returns the PSD in the **sides** format\n\n :param str sides: the PSD format in ['onesided', 'twosided', 'centerdc']\n :return: the expected PSD.\n\n .. doctest::\n\n from spectrum import *\n p = pcovar(marple_data, 15)\n centerdc_psd = p.get_converted_psd('centerdc')\n\n .. note:: this function does not change the object, in particular, it\n does not change the :attr:`psd` attribute. If you want to change\n the psd on the fly, change the attribute :attr:`sides`.", "id": "f10927:c1:m27"} {"signature": "def plot(self, filename=None, norm=False, ylim=None,sides=None, **kargs):", "body": "import pylabfrom pylab import ylim as plt_ylim_ = self.psdif sides is not None:if sides not in self._sides_choices:raise errors.SpectrumChoiceError(sides, self._sides_choices)if sides is None or sides == self.sides:frequencies = self.frequencies()psd = self.psdsides = self.sideselif sides is not None:if self.datatype == '':if sides == '':raise ValueError(\"\")logging.debug(\"\")frequencies = self.frequencies(sides=sides)psd = self.get_converted_psd(sides)if len(psd) != len(frequencies):raise ValueError(\"\" % (len(psd), len(frequencies)))if '' in list(kargs.keys()):save_ax = pylab.gca()pylab.sca(kargs[''])rollback = Truedel kargs['']else:rollback = Falseif norm:pylab.plot(frequencies, * stools.log10(psd/max(psd)), **kargs)else:pylab.plot(frequencies, * stools.log10(psd),**kargs)pylab.xlabel('')pylab.ylabel('')pylab.grid(True)if ylim:plt_ylim(ylim)if sides == '':pylab.xlim(, self.sampling/)elif sides == '':pylab.xlim(, self.sampling)elif sides == '':pylab.xlim(-self.sampling/, self.sampling/)if filename:pylab.savefig(filename)if rollback:pylab.sca(save_ax)del psd, frequencies", "docstring": "a simple plotting routine to plot the PSD versus frequency.\n\n :param str filename: save the figure into a file\n :param norm: False by default. If True, the PSD is normalised.\n :param ylim: readjust the y range .\n :param sides: if not provided, :attr:`sides` is used. See :attr:`sides`\n for details.\n :param kargs: any optional argument accepted by :func:`pylab.plot`.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import *\n p = Periodogram(marple_data)\n p.plot(norm=True, marker='o')", "id": "f10927:c1:m28"} {"signature": "def power(self):", "body": "if self.scale_by_freq == False:return sum(self.psd) * len(self.psd)else:return sum(self.psd) * self.df/(*numpy.pi)", "docstring": "r\"\"\"Return the power contained in the PSD\n\n if scale_by_freq is False, the power is:\n\n .. math:: P = N \\sum_{k=1}^{N} P_{xx}(k)\n\n else, it is\n\n .. math:: P = \\sum_{k=1}^{N} P_{xx}(k) \\frac{df}{2\\pi}\n\n .. todo:: check these equations", "id": "f10927:c1:m29"} {"signature": "def __init__(self, data, sampling=, ar_order=None, ma_order=None,lag=-, NFFT=None, detrend=None, scale_by_freq=True):", "body": "super(ParametricSpectrum, self).__init__(data, sampling=sampling,NFFT=NFFT,scale_by_freq=scale_by_freq,detrend=detrend)if ar_order is None and ma_order is None:raise errors.SpectrumARMAErrorself.__ar_order = ar_orderself.__ma_order = ma_orderself.ar_order = ar_orderself.ma_order = ma_orderself.lag = lagself.__ar = Noneself.__ma = Noneself.__reflection = Noneself.__rho = None", "docstring": "**Constructor**\n\n See the class documentation for the parameters.\n\n .. rubric:: Additional attributes to those inherited\n from :class:`Spectrum`:\n\n * :attr:`ar_order`, the ar order of the PSD estimates\n * :attr:`ma_order`, the ar order of the PSD estimates", "id": "f10927:c2:m0"} {"signature": "def __init__(self, data, sampling=,window='', NFFT=None, detrend=None,scale_by_freq=True, lag=-):", "body": "super(FourierSpectrum, self).__init__(data,sampling=sampling, detrend=detrend,scale_by_freq=scale_by_freq, NFFT=NFFT)self.__window = Noneself.__lag = Noneself.lag = lagself.window = window", "docstring": "**Constructor**\n\n See the class documentation for the parameters.\n\n .. rubric:: Additional attributes to those inherited from\n\n :class:`Spectrum` are:\n\n * :attr:`lag`, a lag used to compute the autocorrelation\n * :attr:`window`, the tapering window to be used", "id": "f10927:c3:m0"} {"signature": "def periodogram(self):", "body": "from .periodogram import speriodogrampsd = speriodogram(self.data, window=self.window, sampling=self.sampling,NFFT=self.NFFT, scale_by_freq=self.scale_by_freq,detrend=self.detrend)self.psd = psd", "docstring": "An alias to :class:`~spectrum.periodogram.Periodogram`\n\n The parameters are extracted from the attributes. Relevant attributes\n ares :attr:`window`, attr:`sampling`, attr:`NFFT`, attr:`scale_by_freq`,\n :attr:`detrend`.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import datasets\n from spectrum import FourierSpectrum\n s = FourierSpectrum(datasets.data_cosine(), sampling=1024, NFFT=512)\n s.periodogram()\n s.plot()", "id": "f10927:c3:m6"} {"signature": "def arcovar_marple(x, order):", "body": "assert len(x) >= order, \"\"x = np.array(x)N = len(x)r0 = sum(abs(x)**)r1 = abs(x[])**rN = abs(x[N-])**pf = r0 - r1pb = r0 - rNdelta = - r1 / r0gamma = - rN / r0c = np.zeros(N, dtype=complex)d = np.zeros(N, dtype=complex)r = np.zeros(N, dtype=complex)af = np.zeros(N, dtype=complex)ab = np.zeros(N, dtype=complex)c[] = x[N-].conjugate() / r0d[] = x[].conjugate() / r0if order == :pf = r0 / float(N)pb = pfreturn af, pf, ab, pb, pbv = []for m in range(, order+):logging.debug('', m)logging.debug(c[:])logging.debug(d[:])r1 = /pfr2 = /pbr3 = /deltar4 = /gammatemp = +for k in range(m+, N):temp = temp + x[k]*x[k-m-].conjugate()r[m] = temp.conjugate()theta = x[] * c[m]if m == :passelse:for k in range(, m):theta = theta + x[m-k] * c[k] r[k] = r[k] - x[N-m-] * x[N-m+k].conjugate() temp = temp + af[m-k-] * r[k].conjugate()\"\"\"\"\"\"c1 = -temp * r2c2 = -r1 * temp.conjugate()c3 = theta * r3c4 = r4 *theta.conjugate()af[m] = c1 ab[m] = c2 save = c[m]c[m] = save + c3*d[m]d[m] = d[m] + c4*saveif m == :passelse:for k in range(, m):save = af[k]af[k] = save + c1 * ab[m-k-] ab[m-k-] = ab[m-k-] + c2 * save save = c[k]c[k] = save + c3*d[k] d[k] = d[k] + c4*save r5 = temp.real** + temp.imag**pf = pf - r5*r2 pb = pb - r5*r1 r5 = theta.real** + theta.imag**delta = delta - r5*r4 gamma = gamma - r5*r3 if m != order-:passelse:pf = pf / float(N-m-)pb = pb / float(N-m-)breakif pf > and pb > :passelse:ValueError(\"\")if (delta > and delta <= and gamma > and gamma <=):passelse:ValueError(\"\")r1 = /pfr2 = /pbr3 = /deltar4 = /gammaef = x[m+]eb = x[(N-)-m-]for k in range(,m+):ef = ef + af[k] * x[m-k] eb = eb + ab[k] * x[N-m+k-] c1 = ef*r3c2 = eb*r4c3 = eb.conjugate() * r2c4 = ef.conjugate() * r1for k in range(m, -, -):save = af[k]af[k] = save + c1 * d[k] d[k+] = d[k] + c4 * save save = ab[k]ab[k] = save + c2 * c[m-k] c[m-k] = c[m-k] + c3 * save c[m+] = c3d[] = c4r5 = ef.real** + ef.imag**pf = pf - r5 * r3 delta = delta-r5 * r1 r5 = eb.real** + eb.imag**pb = pb - r5 * r4 gamma = gamma-r5*r2 pbv.append(pb)if (pf > and pb > ):passelse:ValueError(\"\")if (delta > and delta <= ) and (gamma > and gamma <= ):passelse:ValueError(\"\")return af, pf, ab, pb, pbv", "docstring": "r\"\"\"Estimate AR model parameters using covariance method\n\n This implementation is based on [Marple]_. This code is far more\n complicated and slower than :func:`arcovar` function, which is now the official version.\n See :func:`arcovar` for a detailed description of Covariance method.\n\n This function should be used in place of arcovar only if order<=4, for\n which :func:`arcovar` does not work.\n\n Fast algorithm for the solution of the covariance least squares normal\n equations from Marple.\n\n :param array X: Array of complex data samples\n :param int oder: Order of linear prediction model\n\n :return:\n * AF - Array of complex forward linear prediction coefficients\n * PF - Real forward linear prediction variance at order IP\n * AB - Array of complex backward linear prediction coefficients\n * PB - Real backward linear prediction variance at order IP\n * PV - store linear prediction coefficients\n\n .. note:: this code and the original code in Marple diverge for ip>10.\n it seems that this is related to single precision used with\n complex type in fortran whereas numpy uses double precision for\n complex type.\n\n :validation: the AR parameters are the same as those returned by\n a completely different function :func:`arcovar`.\n\n :References: [Marple]_", "id": "f10928:m0"} {"signature": "def arcovar(x, order):", "body": "from spectrum import corrmtximport scipy.linalgX = corrmtx(x, order, '')Xc = np.matrix(X[:, :])X1 = np.array(X[:, ])a, _residues, _rank, _singular_values = scipy.linalg.lstsq(-Xc, X1)Cz = np.dot(X1.conj().transpose(), Xc)e = np.dot(X1.conj().transpose(), X1) + np.dot(Cz, a)assert e.imag < , ''e = float(e.real) return a, e", "docstring": "r\"\"\"Simple and fast implementation of the covariance AR estimate\n\n This code is 10 times faster than :func:`arcovar_marple` and more importantly\n only 10 lines of code, compared to a 200 loc for :func:`arcovar_marple`\n\n\n :param array X: Array of complex data samples\n :param int oder: Order of linear prediction model\n\n :return:\n * a - Array of complex forward linear prediction coefficients\n * e - error\n\n The covariance method fits a Pth order autoregressive (AR) model to the\n input signal, which is assumed to be the output of\n an AR system driven by white noise. This method minimizes the forward\n prediction error in the least-squares sense. The output vector\n contains the normalized estimate of the AR system parameters\n\n The white noise input variance estimate is also returned.\n\n If is the power spectral density of y(n), then:\n\n .. math:: \\frac{e}{\\left| A(e^{jw}) \\right|^2} = \\frac{e}{\\left| 1+\\sum_{k-1}^P a(k)e^{-jwk}\\right|^2}\n\n Because the method characterizes the input data using an all-pole model,\n the correct choice of the model order p is important.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import arcovar, marple_data, arma2psd\n from pylab import plot, log10, linspace, axis\n\n ar_values, error = arcovar(marple_data, 15)\n psd = arma2psd(ar_values, sides='centerdc')\n plot(linspace(-0.5, 0.5, len(psd)), 10*log10(psd/max(psd)))\n axis([-0.5, 0.5, -60, 0])\n\n .. seealso:: :class:`pcovar`\n\n :validation: the AR parameters are the same as those returned by\n a completely different function :func:`arcovar_marple`.\n\n :References: [Mathworks]_", "id": "f10928:m1"} {"signature": "def __init__(self, data, order, NFFT=None, sampling=,scale_by_freq=False):", "body": "super(pcovar, self).__init__(data, ar_order=order,NFFT=NFFT, sampling=sampling,scale_by_freq=scale_by_freq)", "docstring": "**Constructor**\n\n For a detailled description of the parameters, see :func:`arcovar`.\n\n :param array data: input data (list or numpy.array)\n :param int order:\n :param int NFFT: total length of the final data sets (padded\n with zero if needed; default is 4096)\n :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10928:c0:m0"} {"signature": "def speriodogram(x, NFFT=None, detrend=True, sampling=,scale_by_freq=True, window='', axis=):", "body": "x = np.array(x)if x.ndim == :axis = r = x.shape[]w = Window(r, window) w = w.dataelif x.ndim == :logging.debug('')[r, c] = x.shapew = np.array([Window(r, window).data for this in range(c)]).reshape(r,c) if NFFT is None:NFFT = len(x)isreal = np.isrealobj(x)if detrend == True:m = np.mean(x, axis=axis)else:m = if isreal == True:if x.ndim == :res = (abs (rfft (x*w - m, NFFT, axis=))) ** / relse:res = (abs (rfft (x*w - m, NFFT, axis=-))) ** / relse:if x.ndim == :res = (abs (fft (x*w - m, NFFT, axis=))) ** / relse:res = (abs (fft (x*w - m, NFFT, axis=-))) ** / rif scale_by_freq is True:df = sampling / float(NFFT)res*= * np.pi / dfif x.ndim == :return res.transpose()else:return res", "docstring": "Simple periodogram, but matrices accepted.\n\n :param x: an array or matrix of data samples.\n :param NFFT: length of the data before FFT is computed (zero padding)\n :param bool detrend: detrend the data before co,puteing the FFT\n :param float sampling: sampling frequency of the input :attr:`data`.\n\n :param scale_by_freq:\n :param str window:\n\n :return: 2-sided PSD if complex data, 1-sided if real.\n\n if a matrix is provided (using numpy.matrix), then a periodogram\n is computed for each row. The returned matrix has the same shape as the input\n matrix.\n\n The mean of the input data is also removed from the data before computing\n the psd.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from pylab import grid, semilogy\n from spectrum import data_cosine, speriodogram\n data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200)\n semilogy(speriodogram(data, detrend=False, sampling=1024), marker='o')\n grid(True)\n\n\n .. plot::\n :width: 80%\n :include-source:\n\n import numpy\n from spectrum import speriodogram, data_cosine\n from pylab import figure, semilogy, figure ,imshow\n # create N data sets and make the frequency dependent on the time\n N = 100\n m = numpy.concatenate([data_cosine(N=1024, A=0.1, sampling=1024, freq=x) \n for x in range(1, N)]);\n m.resize(N, 1024)\n res = speriodogram(m)\n figure(1)\n semilogy(res)\n figure(2)\n imshow(res.transpose(), aspect='auto')\n\n .. todo:: a proper spectrogram class/function that takes care of normalisation", "id": "f10929:m0"} {"signature": "def WelchPeriodogram(data, NFFT=None, sampling=, **kargs):", "body": "from pylab import psdspectrum = Spectrum(data, sampling=)P = psd(data, NFFT, Fs=sampling, **kargs)spectrum.psd = P[]return P, spectrum", "docstring": "r\"\"\"Simple periodogram wrapper of numpy.psd function.\n\n :param A: the input data\n :param int NFFT: total length of the final data sets (padded \n with zero if needed; default is 4096)\n :param str window:\n\n :Technical documentation:\n\n When we calculate the periodogram of a set of data we get an estimation\n of the spectral density. In fact as we use a Fourier transform and a\n truncated segments the spectrum is the convolution of the data with a\n rectangular window which Fourier transform is\n\n .. math::\n\n W(s)= \\frac{1}{N^2} \\left[ \\frac{\\sin(\\pi s)}{\\sin(\\pi s/N)} \\right]^2\n\n Thus oscillations and sidelobes appears around the main frequency. One aim of t he tapering is to reduced this effects. We multiply data by a window whose sidelobes are much smaller than the main lobe. Classical window is hanning window. But other windows are available. However we must take into account this energy and divide the spectrum by energy of taper used. Thus periodogram becomes :\n\n .. math::\n\n D_k \\equiv \\sum_{j=0}^{N-1}c_jw_j \\; e^{2\\pi ijk/N} \\qquad k=0,...,N-1\n\n .. math::\n\n P(0)=P(f_0)=\\frac{1}{2\\pi W_{ss}}\\arrowvert{D_0}\\arrowvert^2\n\n .. math::\n\n P(f_k)=\\frac{1}{2\\pi W_{ss}} \\left[\\arrowvert{D_k}\\arrowvert^2+\\arrowvert{D_{N-k}}\\arrowvert^2\\right] \\qquad k=0,1,..., \\left( \\frac{1}{2}-1 \\right)\n\n .. math::\n\n P(f_c)=P(f_{N/2})= \\frac{1}{2\\pi W_{ss}} \\arrowvert{D_{N/2}}\\arrowvert^2\n\n with\n\n .. math::\n\n {W_{ss}} \\equiv N\\sum_{j=0}^{N-1}w_j^2\n\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import WelchPeriodogram, marple_data\n psd = WelchPeriodogram(marple_data, 256)", "id": "f10929:m1"} {"signature": "def DaniellPeriodogram(data, P, NFFT=None, detrend='', sampling=,scale_by_freq=True, window=''):", "body": "psd = speriodogram(data, NFFT=NFFT, detrend=detrend, sampling=sampling,scale_by_freq=scale_by_freq, window=window)if len(psd) % == :datatype = ''else:datatype = ''N = len(psd)_slice = * P + if datatype == '': newN = np.ceil(psd.size/float(_slice))if newN % == :newN = psd.size/_sliceelse:newN = np.ceil(psd.size/float(_slice))if newN % == :newN = psd.size/_slicenewpsd = np.zeros(int(newN)) for i in range(, newpsd.size):count = for n in range(i*_slice-P, i*_slice+P+): if n > and ncount += newpsd[i] += psd[n]newpsd[i] /= float(count)if datatype == '':freq = np.linspace(, sampling, len(newpsd))else:df = / samplingfreq = np.linspace(,sampling/, len(newpsd))return newpsd, freq", "docstring": "r\"\"\"Return Daniell's periodogram.\n\n To reduce fast fluctuations of the spectrum one idea proposed by daniell\n is to average each value with points in its neighboorhood. It's like\n a low filter.\n\n .. math:: \\hat{P}_D[f_i]= \\frac{1}{2P+1} \\sum_{n=i-P}^{i+P} \\tilde{P}_{xx}[f_n]\n\n where P is the number of points to average.\n\n Daniell's periodogram is the convolution of the spectrum with a low filter:\n\n .. math:: \\hat{P}_D(f)= \\hat{P}_{xx}(f)*H(f)\n\n Example::\n\n >>> DaniellPeriodogram(data, 8)\n\n if N/P is not integer, the final values of the original PSD are not used.\n\n using DaniellPeriodogram(data, 0) should give the original PSD.", "id": "f10929:m2"} {"signature": "def __init__(self, data, sampling=,window='', NFFT=None, scale_by_freq=False,detrend=None):", "body": "super(Periodogram, self).__init__(data,window=window,sampling=sampling,NFFT=NFFT,scale_by_freq=scale_by_freq,detrend=detrend)", "docstring": "**Periodogram Constructor**\n\n :param array data: input data (list or numpy.array)\n :param float sampling: sampling frequency of the input :attr:`data`.\n :param str window: a tapering window. See :class:`~spectrum.window.Window`.\n :param int NFFT: total length of the final data sets (padded with zero\n if needed; default is 4096)\n :param bool scale_by_freq:\n :param str detrend:", "id": "f10929:c0:m0"} {"signature": "def __init__(self, data, P, sampling=,window='', NFFT=None, scale_by_freq=True,detrend=None):", "body": "super(pdaniell, self).__init__(data,window=window,sampling=sampling,NFFT=NFFT,scale_by_freq=scale_by_freq,detrend=detrend)self.P = P", "docstring": "**pdaniell Constructor**\n\n :param array data: input data (list or numpy.array)\n :param int P: number of neighbours to average over.\n :param float sampling: sampling frequency of the input :attr:`data`.\n :param str window: a tapering window. See :class:`~spectrum.window.Window`.\n :param int NFFT: total length of the final data sets (padded with \n zero if needed; default is 4096)\n :param bool scale_by_freq:\n :param str detrend:", "id": "f10929:c1:m0"} {"signature": "def minvar(X, order, sampling=, NFFT=default_NFFT):", "body": "errors.is_positive_integer(order)errors.is_positive_integer(NFFT)psi = np.zeros(NFFT, dtype=complex)A, P, k = arburg (X, order - )A = np.insert(A, , +)\"\"\"\"\"\"for K in range(, order):SUM = MK = order-Kfor I in range(, order - K):SUM = SUM + float(MK-*I) * A[I].conjugate()*A[I+K] SUM = SUM/Pif K != :psi[NFFT-K] = SUM.conjugate()psi[K] = SUMpsi = fft(psi, NFFT)PSD = sampling / np.real(psi)return PSD, A, k", "docstring": "r\"\"\"Minimum Variance Spectral Estimation (MV)\n\n This function computes the minimum variance spectral estimate using\n the Musicus procedure. The Burg algorithm from :func:`~spectrum.burg.arburg`\n is used for the estimation of the autoregressive parameters.\n The MV spectral estimator is given by:\n\n .. math:: P_{MV}(f) = \\frac{T}{e^H(f) R^{-1}_p e(f)}\n\n\n where :math:`R^{-1}_p` is the inverse of the estimated autocorrelation\n matrix (Toeplitz) and :math:`e(f)` is the complex sinusoid vector.\n\n :param X: Array of complex or real data samples (length N)\n :param int order: Dimension of correlation matrix (AR order = order - 1 )\n :param float T: Sample interval (PSD scaling)\n :param int NFFT: length of the final PSD\n\n :return:\n * PSD - Power spectral density values (two-sided)\n * AR - AR coefficients (Burg algorithm)\n * k - Reflection coefficients (Burg algorithm)\n\n .. note:: The MV spectral estimator is not a true PSD function because the\n area under the MV estimate does not represent the total power in the\n measured process. MV minimises the variance of the output of a narrowband\n filter and adpats itself to the spectral content of the input data\n at each frequency.\n\n :Example: The following example computes a PSD estimate using :func:`minvar`\n The output PSD is transformed to a ``centerdc`` PSD and plotted.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import *\n from pylab import plot, log10, linspace, xlim\n psd, A, k = minvar(marple_data, 15)\n psd = twosided_2_centerdc(psd) # switch positive and negative freq\n f = linspace(-0.5, 0.5, len(psd))\n plot(f, 10 * log10(psd/max(psd)))\n xlim(-0.5, 0.5 )\n\n .. seealso::\n\n * External functions used are :meth:`~spectrum.burg.arburg`\n and numpy.fft.fft\n * :class:`pminvar`, a Class dedicated to MV method.\n\n :Reference: [Marple]_", "id": "f10930:m0"} {"signature": "def __init__(self, data, order, NFFT=None, sampling=, scale_by_freq=False):", "body": "super(pminvar, self).__init__(data, ar_order=order, sampling=sampling,NFFT=NFFT, scale_by_freq=scale_by_freq)", "docstring": "**Constructor**\n\n For a detailled description of the parameters, see :func:`minvar`.\n\n :param array data: input data (list or numpy.array)\n :param int order:\n :param int NFFT: total length of the final data sets (padded with\n zero if needed; default is 4096)\n :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10930:c0:m0"} {"signature": "def LEVINSON(r, order=None, allow_singularity=False):", "body": "T0 = numpy.real(r[])T = r[:]M = len(T)if order is None:M = len(T)else:assert order <= M, ''M = orderrealdata = numpy.isrealobj(r)if realdata is True:A = numpy.zeros(M, dtype=float)ref = numpy.zeros(M, dtype=float)else:A = numpy.zeros(M, dtype=complex)ref = numpy.zeros(M, dtype=complex)P = T0for k in range(, M):save = T[k]if k == :temp = -save / Pelse:for j in range(, k):save = save + A[j] * T[k-j-]temp = -save / Pif realdata:P = P * ( - temp**)else:P = P * ( - (temp.real**+temp.imag**))if P <= and allow_singularity==False:raise ValueError(\"\")A[k] = tempref[k] = temp if k == :continuekhalf = (k+)//if realdata is True:for j in range(, khalf):kj = k-j-save = A[j]A[j] = save + temp * A[kj]if j != kj:A[kj] += temp*saveelse:for j in range(, khalf):kj = k-j-save = A[j]A[j] = save + temp * A[kj].conjugate()if j != kj:A[kj] = A[kj] + temp * save.conjugate()return A, P, ref", "docstring": "r\"\"\"Levinson-Durbin recursion.\n\n Find the coefficients of a length(r)-1 order autoregressive linear process\n\n :param r: autocorrelation sequence of length N + 1 (first element being the zero-lag autocorrelation)\n :param order: requested order of the autoregressive coefficients. default is N.\n :param allow_singularity: false by default. Other implementations may be True (e.g., octave)\n\n :return:\n * the `N+1` autoregressive coefficients :math:`A=(1, a_1...a_N)`\n * the prediction errors\n * the `N` reflections coefficients values\n\n This algorithm solves the set of complex linear simultaneous equations\n using Levinson algorithm.\n\n .. math::\n\n \\bold{T}_M \\left( \\begin{array}{c} 1 \\\\ \\bold{a}_M \\end{array} \\right) =\n \\left( \\begin{array}{c} \\rho_M \\\\ \\bold{0}_M \\end{array} \\right)\n\n where :math:`\\bold{T}_M` is a Hermitian Toeplitz matrix with elements\n :math:`T_0, T_1, \\dots ,T_M`.\n\n .. note:: Solving this equations by Gaussian elimination would\n require :math:`M^3` operations whereas the levinson algorithm\n requires :math:`M^2+M` additions and :math:`M^2+M` multiplications.\n\n This is equivalent to solve the following symmetric Toeplitz system of\n linear equations\n\n .. math::\n\n \\left( \\begin{array}{cccc}\n r_1 & r_2^* & \\dots & r_{n}^*\\\\\n r_2 & r_1^* & \\dots & r_{n-1}^*\\\\\n \\dots & \\dots & \\dots & \\dots\\\\\n r_n & \\dots & r_2 & r_1 \\end{array} \\right)\n \\left( \\begin{array}{cccc}\n a_2\\\\\n a_3 \\\\\n \\dots \\\\\n a_{N+1} \\end{array} \\right)\n =\n \\left( \\begin{array}{cccc}\n -r_2\\\\\n -r_3 \\\\\n \\dots \\\\\n -r_{N+1} \\end{array} \\right)\n\n where :math:`r = (r_1 ... r_{N+1})` is the input autocorrelation vector, and\n :math:`r_i^*` denotes the complex conjugate of :math:`r_i`. The input r is typically\n a vector of autocorrelation coefficients where lag 0 is the first\n element :math:`r_1`.\n\n\n .. doctest::\n\n >>> import numpy; from spectrum import LEVINSON\n >>> T = numpy.array([3., -2+0.5j, .7-1j])\n >>> a, e, k = LEVINSON(T)", "id": "f10931:m0"} {"signature": "def rlevinson(a, efinal):", "body": "a = numpy.array(a)realdata = numpy.isrealobj(a)assert a[] == , ''p = len(a)if p < :raise ValueError('')if realdata == True:U = numpy.zeros((p, p)) else:U = numpy.zeros((p, p), dtype=complex)U[:, p-] = numpy.conj(a[-::-]) p = p -e = numpy.zeros(p)e[-] = efinal for k in range(p-, , -):[a, e[k-]] = levdown(a, e[k])U[:, k] = numpy.concatenate((numpy.conj(a[-::-].transpose()) ,[]*(p-k) ))e0 = e[]/(-abs(a[]**)) U[,] = kr = numpy.conj(U[,:]) kr = kr.transpose() R = numpy.zeros(, dtype=complex)k = R0 = e0 R[] = -numpy.conj(U[,])*R0 for k in range(,p):r = -sum(numpy.conj(U[k-::-,k])*R[-::-]) - kr[k]*e[k-]R = numpy.insert(R, len(R), r)R = numpy.insert(R, , e0)return R, U, kr, e", "docstring": "computes the autocorrelation coefficients, R based\n on the prediction polynomial A and the final prediction error Efinal,\n using the stepdown algorithm.\n\n Works for real or complex data\n\n :param a:\n :param efinal:\n\n :return:\n * R, the autocorrelation\n * U prediction coefficient\n * kr reflection coefficients\n * e errors\n\n A should be a minimum phase polynomial and A(1) is assumed to be unity.\n\n :returns: (P+1) by (P+1) upper triangular matrix, U,\n that holds the i'th order prediction polynomials\n Ai, i=1:P, where P is the order of the input\n polynomial, A.\n\n\n\n [ 1 a1(1)* a2(2)* ..... aP(P) * ]\n [ 0 1 a2(1)* ..... aP(P-1)* ]\n U = [ .................................]\n [ 0 0 0 ..... 1 ]\n\n from which the i'th order prediction polynomial can be extracted\n using Ai=U(i+1:-1:1,i+1)'. The first row of U contains the\n conjugates of the reflection coefficients, and the K's may be\n extracted using, K=conj(U(1,2:end)).\n\n .. todo:: remove the conjugate when data is real data, clean up the code\n test and doc.", "id": "f10931:m1"} {"signature": "def levdown(anxt, enxt=None):", "body": "if anxt[] != :raise ValueError('')anxt = anxt[:] knxt = anxt[-]if knxt == :raise ValueError('')acur = (anxt[:-]-knxt*numpy.conj(anxt[-::-]))/(-abs(knxt)**)ecur = Noneif enxt is not None:ecur = enxt/(-numpy.dot(knxt.conj().transpose(),knxt))acur = numpy.insert(acur, , )return acur, ecur", "docstring": "One step backward Levinson recursion\n\n :param anxt:\n :param enxt:\n :return:\n * acur the P'th order prediction polynomial based on the P+1'th order prediction polynomial, anxt.\n * ecur the the P'th order prediction error based on the P+1'th order prediction error, enxt.\n\n .. * knxt the P+1'th order reflection coefficient.", "id": "f10931:m2"} {"signature": "def levup(acur, knxt, ecur=None):", "body": "if acur[] != :raise ValueError('')acur = acur[:] anxt = numpy.concatenate((acur, [])) + knxt * numpy.concatenate((numpy.conj(acur[-::-]), []))enxt = Noneif ecur is not None:enxt = ( - numpy.dot(numpy.conj(knxt), knxt)) * ecuranxt = numpy.insert(anxt, , )return anxt, enxt", "docstring": "LEVUP One step forward Levinson recursion\n\n :param acur:\n :param knxt:\n :return:\n * anxt the P+1'th order prediction polynomial based on the P'th order prediction polynomial, acur, and the\n P+1'th order reflection coefficient, Knxt.\n * enxt the P+1'th order prediction prediction error, based on the P'th order prediction error, ecur.\n\n\n :References: P. Stoica R. Moses, Introduction to Spectral Analysis Prentice Hall, N.J., 1997, Chapter 3.", "id": "f10931:m3"} {"signature": "def arma2psd(A=None, B=None, rho=, T=, NFFT=, sides='',norm=False):", "body": "if NFFT is None:NFFT = if A is None and B is None:raise ValueError(\"\")psd = np.zeros(NFFT, dtype=complex)if A is not None:ip = len(A)den = np.zeros(NFFT, dtype=complex)den[] = +for k in range(, ip):den[k+] = A[k]denf = fft(den, NFFT)if B is not None:iq = len(B)num = np.zeros(NFFT, dtype=complex)num[] = +for k in range(, iq):num[k+] = B[k]numf = fft(num, NFFT)if A is not None and B is not None:psd = rho / T * abs(numf)** / abs(denf)**elif A is not None:psd = rho / T / abs(denf)**elif B is not None:psd = rho / T * abs(numf)**psd = np.real(psd)if sides != '':from . import toolsassert sides in ['']if sides == '':psd = tools.twosided_2_centerdc(psd)if norm == True:psd /= max(psd)return psd", "docstring": "r\"\"\"Computes power spectral density given ARMA values.\n\n This function computes the power spectral density values\n given the ARMA parameters of an ARMA model. It assumes that\n the driving sequence is a white noise process of zero mean and\n variance :math:`\\rho_w`. The sampling frequency and noise variance are\n used to scale the PSD output, which length is set by the user with the\n `NFFT` parameter.\n\n :param array A: Array of AR parameters (complex or real)\n :param array B: Array of MA parameters (complex or real)\n :param float rho: White noise variance to scale the returned PSD\n :param float T: Sample interval in seconds to scale the returned PSD\n :param int NFFT: Final size of the PSD\n :param str sides: Default PSD is two-sided, but sides can be set to centerdc.\n\n .. warning:: By convention, the AR or MA arrays does not contain the\n A0=1 value.\n\n If :attr:`B` is None, the model is a pure AR model. If :attr:`A` is None,\n the model is a pure MA model.\n\n :return: two-sided PSD\n\n .. rubric:: Details:\n\n AR case: the power spectral density is:\n\n .. math:: P_{ARMA}(f) = T \\rho_w \\left|\\frac{B(f)}{A(f)}\\right|^2\n\n where:\n\n .. math:: A(f) = 1 + \\sum_{k=1}^q b(k) e^{-j2\\pi fkT}\n .. math:: B(f) = 1 + \\sum_{k=1}^p a(k) e^{-j2\\pi fkT}\n\n .. rubric:: **Example:**\n\n .. plot::\n :width: 80%\n :include-source:\n\n import spectrum.arma\n from pylab import plot, log10, legend\n plot(10*log10(spectrum.arma.arma2psd([1,0.5],[0.5,0.5])), label='ARMA(2,2)')\n plot(10*log10(spectrum.arma.arma2psd([1,0.5],None)), label='AR(2)')\n plot(10*log10(spectrum.arma.arma2psd(None,[0.5,0.5])), label='MA(2)')\n legend()\n\n :References: [Marple]_", "id": "f10932:m0"} {"signature": "def arma_estimate(X, P, Q, lag):", "body": "R = CORRELATION(X, maxlags=lag, norm='')R0 = R[]MPQ = lag - Q + PN = len(X)Y = np.zeros(N-P, dtype=complex)for K in range(, MPQ):KPQ = K + Q - P+if KPQ < :Y[K] = R[-KPQ].conjugate()if KPQ == :Y[K] = R0if KPQ > :Y[K] = R[KPQ]Y.resize(lag)if P <= :res = arcovar_marple(Y.copy(), P) ar_params = res[]else:res = arcovar(Y.copy(), P) ar_params = res[]Y.resize(N-P)for k in range(P, N):SUM = X[k]for j in range(, P):SUM = SUM + ar_params[j] * X[k-j-] Y[k-P] = SUMma_params, rho = ma(Y, Q, *Q) return ar_params, ma_params, rho", "docstring": "Autoregressive and moving average estimators.\n\n This function provides an estimate of the autoregressive\n parameters, the moving average parameters, and the driving\n white noise variance of an ARMA(P,Q) for a complex or real data sequence.\n\n The parameters are estimated using three steps:\n\n * Estimate the AR parameters from the original data based on a least\n squares modified Yule-Walker technique,\n * Produce a residual time sequence by filtering the original data\n with a filter based on the AR parameters,\n * Estimate the MA parameters from the residual time sequence.\n\n :param array X: Array of data samples (length N)\n :param int P: Desired number of AR parameters\n :param int Q: Desired number of MA parameters\n :param int lag: Maximum lag to use for autocorrelation estimates\n\n :return:\n * A - Array of complex P AR parameter estimates\n * B - Array of complex Q MA parameter estimates\n * RHO - White noise variance estimate\n\n .. note::\n * lag must be >= Q (MA order)\n\n **dependencies**:\n * :meth:`spectrum.correlation.CORRELATION`\n * :meth:`spectrum.covar.arcovar`\n * :meth:`spectrum.arma.ma`\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import arma_estimate, arma2psd, marple_data\n import pylab\n\n a,b, rho = arma_estimate(marple_data, 15, 15, 30)\n psd = arma2psd(A=a, B=b, rho=rho, sides='centerdc', norm=True)\n pylab.plot(10 * pylab.log10(psd))\n pylab.ylim([-50,0])\n\n :reference: [Marple]_", "id": "f10932:m1"} {"signature": "def ma(X, Q, M):", "body": "if Q <= or Q >= M:raise ValueError('')a, rho, _c = yulewalker.aryule(X, M, '') a = np.insert(a, , )ma_params, _p, _c = yulewalker.aryule(a, Q, '') return ma_params, rho", "docstring": "Moving average estimator.\n\n This program provides an estimate of the moving average parameters\n and driving noise variance for a data sequence based on a\n long AR model and a least squares fit.\n\n :param array X: The input data array\n :param int Q: Desired MA model order (must be >0 and ,scale_by_freq=False):", "body": "super(parma, self).__init__(data, ma_order=Q, ar_order=P, lag=lag,NFFT=NFFT, sampling=sampling,scale_by_freq=scale_by_freq)self.lag = lag", "docstring": "**Constructor:**\n\n For a detailed description of the parameters, see :func:`arma_estimate`.\n\n :param array data: input data (list or numpy.array)\n :param int P:\n :param int Q:\n :param int lag:\n :param int NFFT: total length of the final data sets (padded with\n zero if needed; default is 4096)\n :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10932:c0:m0"} {"signature": "def __init__(self, data, Q, M, NFFT=None, sampling=,scale_by_freq=False):", "body": "super(pma, self).__init__(data, ma_order=Q, ar_order=M,NFFT=NFFT, sampling=sampling,scale_by_freq=scale_by_freq)", "docstring": "**Constructor:**\n\n For a detailed description of the parameters, see :func:`ma`.\n\n :param array data: input data (list or numpy.array)\n :param int Q: MA order\n :param int M: AR model used to estimate the MA parameters\n :param int NFFT: total length of the final data sets (padded with zero \n if needed; default is 4096)\n :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10932:c1:m0"} {"signature": "def _arburg2(X, order):", "body": "x = np.array(X)N = len(x)if order <= :raise ValueError(\"\")rho = sum(abs(x)**) / N den = rho * * Nef = np.zeros(N, dtype=complex)eb = np.zeros(N, dtype=complex)for j in range(, N): ef[j] = x[j]eb[j] = x[j]a = np.zeros(, dtype=complex)a[] = ref = np.zeros(order, dtype=complex)temp = E = np.zeros(order+)E[] = rhofor m in range(, order):efp = ef[:]ebp = eb[:-]num = -* np.dot(ebp.conj().transpose(), efp)den = np.dot(efp.conj().transpose(), efp)den += np.dot(ebp, ebp.conj().transpose())ref[m] = num / denef = efp + ref[m] * ebpeb = ebp + ref[m].conj().transpose() * efpa.resize(len(a)+)a = a + ref[m] * np.flipud(a).conjugate()E[m+] = ( - ref[m].conj().transpose()*ref[m]) * E[m]return a, E[-], ref", "docstring": "This version is 10 times faster than arburg, but the output rho is not correct.\n\n\n returns [1 a0,a1, an-1]", "id": "f10933:m0"} {"signature": "def arburg(X, order, criteria=None):", "body": "if order <= :raise ValueError(\"\")if order > len(X):raise ValueError(\"\")x = np.array(X)N = len(x)rho = sum(abs(x)**) / float(N) den = rho * * Nif criteria:from spectrum import Criteriacrit = Criteria(name=criteria, N=N)crit.data = rhologging.debug(''.format(, crit.old_data, crit.data, rho))a = np.zeros(, dtype=complex)ref = np.zeros(, dtype=complex)ef = x.astype(complex)eb = x.astype(complex)temp = for k in range(, order):num = sum([ef[j]*eb[j-].conjugate() for j in range(k+, N)])den = temp * den - abs(ef[k])** - abs(eb[N-])**kp = - * num / den temp = - abs(kp)**new_rho = temp * rhoif criteria:logging.debug(''.format(k+, crit.old_data, crit.data, new_rho))status = crit(rho=temp*rho, k=k+)if status is False:logging.debug('' % (crit.data, crit.old_data))breakrho = new_rhoif rho <= :raise ValueError(\"\" % rho)a.resize(a.size+)a[k] = kpif k == :for j in range(N-, k, -):save2 = ef[j]ef[j] = save2 + kp * eb[j-] eb[j] = eb[j-] + kp.conjugate() * save2else:khalf = (k+)// for j in range(, khalf):ap = a[j] a[j] = ap + kp * a[k-j-].conjugate() if j != k-j-:a[k-j-] = a[k-j-] + kp * ap.conjugate() for j in range(N-, k, -):save2 = ef[j]ef[j] = save2 + kp * eb[j-] eb[j] = eb[j-] + kp.conjugate() * save2ref.resize(ref.size+)ref[k] = kpreturn a, rho, ref", "docstring": "r\"\"\"Estimate the complex autoregressive parameters by the Burg algorithm.\n\n .. math:: x(n) = \\sqrt{(v}) e(n) + \\sum_{k=1}^{P+1} a(k) x(n-k)\n\n :param x: Array of complex data samples (length N)\n :param order: Order of autoregressive process (0,scale_by_freq=False):", "body": "super(pburg, self).__init__(data, ar_order=order,sampling=sampling, NFFT=NFFT,scale_by_freq=scale_by_freq)self.criteria = criteria", "docstring": "**Constructor**\n\n For a detailled description of the parameters, see :func:`burg`.\n\n :param array data: input data (list or np.array)\n :param int order:\n :param str criteria:\n :param int NFFT: total length of the final data sets (padded with zero if \n needed; default is 4096)\n\n :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10933:c0:m0"} {"signature": "def lpc(x, N=None):", "body": "m = len(x)if N is None:N = m - elif N > m-:x.resize(N+)X = fft(x, **nextpow2(*len(x)-))R = real(ifft(abs(X)**))R = R/(m-) a, e, ref = LEVINSON(R, N)return a, e", "docstring": "Linear Predictor Coefficients.\n\n :param x:\n :param int N: default is length(X) - 1\n\n :Details:\n\n Finds the coefficients :math:`A=(1, a(2), \\dots a(N+1))`, of an Nth order\n forward linear predictor that predicts the current value value of the\n real-valued time series x based on past samples:\n\n .. math:: \\hat{x}(n) = -a(2)*x(n-1) - a(3)*x(n-2) - ... - a(N+1)*x(n-N)\n\n such that the sum of the squares of the errors\n\n .. math:: err(n) = X(n) - Xp(n)\n\n is minimized. This function uses the Levinson-Durbin recursion to\n solve the normal equations that arise from the least-squares formulation.\n\n .. seealso:: :func:`levinson`, :func:`aryule`, :func:`prony`, :func:`stmcb`\n\n .. todo:: matrix case, references\n\n :Example:\n\n ::\n\n from scipy.signal import lfilter\n noise = randn(50000,1); % Normalized white Gaussian noise\n x = filter([1], [1 1/2 1/3 1/4], noise)\n x = x[45904:50000]\n x.reshape(4096, 1)\n x = x[0]\n\n Compute the predictor coefficients, estimated signal, prediction error, and autocorrelation sequence of the prediction error:\n\n\n 1.00000 + 0.00000i 0.51711 - 0.00000i 0.33908 - 0.00000i 0.24410 - 0.00000i\n\n ::\n\n a = lpc(x, 3)\n est_x = lfilter([0 -a(2:end)],1,x); % Estimated signal\n e = x - est_x; % Prediction error\n [acs,lags] = xcorr(e,'coeff'); % ACS of prediction error", "id": "f10934:m0"} {"signature": "def music(X, IP, NSIG=None, NFFT=default_NFFT, threshold=None, criteria='',verbose=False):", "body": "return eigen(X, IP, NSIG=NSIG, method='', NFFT=NFFT,threshold=threshold, criteria=criteria, verbose=verbose)", "docstring": "Eigen value pseudo spectrum estimate. See :func:`eigenfre`", "id": "f10936:m0"} {"signature": "def ev(X, IP, NSIG=None, NFFT=default_NFFT, threshold=None, criteria='',verbose=False):", "body": "return eigen(X, IP, NSIG=NSIG, method='', NFFT=NFFT,threshold=threshold, criteria=criteria, verbose=verbose)", "docstring": "Eigen value pseudo spectrum estimate. See :func:`eigenfre`", "id": "f10936:m1"} {"signature": "def eigen(X, P, NSIG=None, method='', threshold=None, NFFT=default_NFFT,criteria='', verbose=False):", "body": "if method not in ['', '']:raise ValueError(\"\")if NSIG != None and threshold != None:raise ValueError(\"\")if NSIG is not None:if NSIG < :raise ValueError('')if NSIG >= P:raise ValueError(\"\")N = len(X)NP = N - Passert * NP > P-, ''if NP > :NP = FB = np.zeros((*NP, P), dtype=complex)Z = np.zeros(NFFT, dtype=complex)PSD = np.zeros(NFFT)for I in range(, NP):for K in range(, P):FB[I, K] = X[I-K+P-]FB[I+NP, K] = X[I+K+].conjugate()_U, S, V = svd (FB)V = -V.transpose()NSIG = _get_signal_space(S, *NP,verbose=verbose, threshold=threshold,NSIG=NSIG, criteria=criteria)for I in range(NSIG, P):Z[:P] = V[:P, I]Z[P:NFFT] = Z = fft(Z, NFFT)if method == '':PSD = PSD + abs(Z)**elif method == '' :PSD = PSD + abs(Z)** / S[I]PSD = /PSDnby2 = int(NFFT/)newpsd = np.append(PSD[nby2::-], PSD[nby2*-:nby2-:-])return newpsd, S", "docstring": "r\"\"\"Pseudo spectrum using eigenvector method (EV or Music)\n\n This function computes either the Music or EigenValue (EV) noise\n subspace frequency estimator.\n\n First, an autocorrelation matrix of order `P` is computed from\n the data. Second, this matrix is separated into vector subspaces,\n one a signal subspace and the other a noise\n subspace using a SVD method to obtain the eigen values and vectors.\n From the eigen values :math:`\\lambda_i`, and eigen vectors :math:`v_k`,\n the **pseudo spectrum** (see note below) is computed as follows:\n\n .. math:: P_{ev}(f) = \\frac{1}{e^H(f)\\left(\\sum\\limits_{k=M+1}^{p} \\frac{1}{\\lambda_k}v_kv_k^H\\right)e(f)}\n\n The separation of the noise and signal subspaces requires expertise\n of the signal. However, AIC and MDL criteria may be used to automatically\n perform this task.\n\n You still need to provide the parameter `P` to indicate the maximum number\n of eigen values to be computed. The criteria will just select a subset\n to estimate the pseudo spectrum (see :func:`~spectrum.criteria.aic_eigen`\n and :func:`~spectrum.criteria.mdl_eigen` for details.\n\n .. note:: **pseudo spectrum**. func:`eigen` does not compute a PSD estimate.\n Indeed, the method does not preserve the measured process power.\n\n :param X: Array data samples\n :param int P: maximum number of eigen values to compute. NSIG (if\n specified) must therefore be less than P.\n :param str method: 'music' or 'ev'.\n :param int NSIG: If specified, the signal sub space uses NSIG eigen values.\n :param float threshold: If specified, the signal sub space is made of the\n eigen values larger than :math:`\\rm{threshold} \\times \\lambda_{min}`,\n where :math:`\\lambda_{min}` is the minimum eigen values.\n :param int NFFT: total length of the final data sets (padded with zero \n if needed; default is 4096)\n\n :return:\n * PSD: Array of real frequency estimator values (two sided for\n complex data and one sided for real data)\n * S, the eigen values\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import eigen, marple_data\n from pylab import plot, log10, linspace, legend, axis\n\n psd, ev = eigen(marple_data, 15, NSIG=11)\n f = linspace(-0.5, 0.5, len(psd))\n plot(f, 10 * log10(psd/max(psd)), label='User defined')\n\n psd, ev = eigen(marple_data, 15, threshold=2)\n plot(f, 10 * log10(psd/max(psd)), label='threshold method (100)')\n\n psd, ev = eigen(marple_data, 15)\n plot(f, 10 * log10(psd/max(psd)), label='AIC method (8)')\n\n legend()\n axis([-0.5, 0.5, -120, 0])\n\n .. seealso::\n :func:`pev`,\n :func:`pmusic`,\n :func:`~spectrum.criteria.aic_eigen`\n\n :References: [Marple]_, Chap 13\n\n .. todo:: for developers:\n\n * what should be the second argument of the criteria N, N-P, P...?\n * what should be the max value of NP", "id": "f10936:m2"} {"signature": "def _get_signal_space(S, NP, verbose=False, threshold=None, NSIG=None,criteria=''):", "body": "from .criteria import aic_eigen, mdl_eigenif NSIG is None:if threshold is None:logging.debug('')if criteria == '':aic = aic_eigen(S, NP*)elif criteria == '':aic = mdl_eigen(S, NP*)NSIG = np.argmin(aic) + logging.debug('', NSIG, '')else:logging.debug('')m = threshold * min(S)new_s = S[np.where(S>m)]NSIG = len(new_s)logging.debug('', NSIG)if NSIG == :NSIG = return NSIG", "docstring": "todo", "id": "f10936:m3"} {"signature": "def __init__(self, data, IP, NSIG=None, NFFT=None, sampling=,threshold=None, criteria=\"\", verbose=False, scale_by_freq=False):", "body": "super(pmusic, self).__init__(data, ar_order=IP,scale_by_freq=scale_by_freq, NFFT=NFFT, sampling=sampling)self.NSIG = NSIGself.threshold = thresholdself.criteria = criteriaself.verbose = verbose", "docstring": "**Constructor:**\n\n For a detailed description of the parameters, see :func:`arma_estimate`.\n\n :param array data: input data (list or numpy.array)\n :param int P: maximum number of eigen values to compute. NSIG (if\n specified) must therefore be less than P.\n :param int NSIG: If specified, the signal sub space uses NSIG eigen values.\n :param int NFFT: total length of the final data sets (padded with zero if needed; default is 4096)\n :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10936:c0:m0"} {"signature": "def __init__(self, data, IP, NSIG=None, NFFT=None, sampling=,scale_by_freq=False,threshold=None, criteria=\"\", verbose=False):", "body": "super(pev, self).__init__(data, ar_order=IP,scale_by_freq=scale_by_freq, NFFT=NFFT, sampling=sampling)self.NSIG = NSIGself.threshold = thresholdself.criteria = criteriaself.verbose = verbose", "docstring": "**Constructor:**\n\n For a detailed description of the parameters, see :func:`arma_estimate`.\n\n :param array data: input data (list or numpy.array)\n :param int P: maximum number of eigen values to compute. NSIG (if\n specified) must therefore be less than P.\n :param int NSIG: If specified, the signal sub space uses NSIG eigen values.\n :param int NFFT: total length of the final data sets (padded with\n zero if needed; default is 4096)\n :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10936:c1:m0"} {"signature": "def fftshift(x):", "body": "return np.fft.fftshift(x)", "docstring": "wrapper to numpy.fft.fftshift\n\n .. doctest::\n\n >>> from spectrum import fftshift\n >>> x = [100, 2, 3, 4, 5]\n >>> fftshift(x)\n array([ 4, 5, 100, 2, 3])", "id": "f10937:m0"} {"signature": "def _swapsides(data):", "body": "N = len(data)return np.concatenate((data[N//+:], data[:N//]))", "docstring": "todo is it really useful ?\n\n Swap sides\n\n .. doctest::\n\n >>> from spectrum import swapsides\n >>> x = [-2, -1, 1, 2]\n >>> swapsides(x)\n array([ 2, -2, -1])", "id": "f10937:m1"} {"signature": "def twosided_2_onesided(data):", "body": "assert len(data) % == N = len(data)psd = np.array(data[:N//+]) * psd[] /= psd[-] = data[-]return psd", "docstring": "Convert a one-sided PSD to a twosided PSD\n\n In order to keep the power in the onesided PSD the same\n as in the twosided version, the onesided values are twice\n as much as in the input data (except for the zero-lag value).\n\n ::\n\n >>> twosided_2_onesided([10, 2,3,3,2,8])\n array([ 10., 4., 6., 8.])", "id": "f10937:m2"} {"signature": "def onesided_2_twosided(data):", "body": "psd = np.concatenate((data[:-], cshift(data[-::-], -)))/psd[] *= psd[-] *= return psd", "docstring": "Convert a two-sided PSD to a one-sided PSD\n\n In order to keep the power in the twosided PSD the same\n as in the onesided version, the twosided values are 2 times\n lower than the input data (except for the zero-lag and N-lag\n values).\n\n ::\n\n >>> twosided_2_onesided([10, 4, 6, 8])\n array([ 10., 2., 3., 3., 2., 8.])", "id": "f10937:m3"} {"signature": "def twosided_2_centerdc(data):", "body": "N = len(data)newpsd = np.concatenate((cshift(data[N//:], ), data[:N//]))newpsd[] = data[-]return newpsd", "docstring": "Convert a two-sided PSD to a center-dc PSD", "id": "f10937:m4"} {"signature": "def centerdc_2_twosided(data):", "body": "N = len(data)newpsd = np.concatenate((data[N//:], (cshift(data[:N//], -))))return newpsd", "docstring": "Convert a center-dc PSD to a twosided PSD", "id": "f10937:m5"} {"signature": "def twosided(data):", "body": "twosided = np.concatenate((data[::-], data[:]))return twosided", "docstring": "return a twosided vector with non-duplication of the first element\n\n .. doctest::\n\n >>> from spectrum import twosided\n >>> a = [1,2,3]\n >>> twosided(a)\n array([3, 2, 1, 2, 3])", "id": "f10937:m6"} {"signature": "def _twosided_zerolag(data, zerolag):", "body": "res = twosided(np.insert(data, , zerolag))return res", "docstring": "Build a symmetric vector out of stricly positive lag vector and zero-lag\n\n .. doctest::\n\n >>> data = [3,2,1]\n >>> zerolag = 4\n >>> twosided_zerolag(data, zerolag)\n array([1, 2, 3, 4, 3, 2, 1])\n\n .. seealso:: Same behaviour as :func:`twosided_zerolag`", "id": "f10937:m7"} {"signature": "def cshift(data, offset):", "body": "if isinstance(offset, float):offset = int(offset)a = deque(data)a.rotate(offset)return np.array(a)", "docstring": "Circular shift to the right (within an array) by a given offset\n\n :param array data: input data (list or numpy.array)\n :param int offset: shift the array with the offset\n\n .. doctest::\n\n >>> from spectrum import cshift\n >>> cshift([0, 1, 2, 3, -2, -1], 2)\n array([-2, -1, 0, 1, 2, 3])", "id": "f10937:m8"} {"signature": "def pow2db(x):", "body": "return * log10(x)", "docstring": "returns the corresponding decibel (dB) value for a power value x.\n\n The relationship between power and decibels is:\n\n .. math:: X_{dB} = 10 * \\log_{10}(x)\n\n .. doctest::\n\n >>> from spectrum import pow2db\n >>> x = pow2db(0.1)\n >>> x\n -10.0", "id": "f10937:m9"} {"signature": "def db2pow(xdb):", "body": "return **(xdb/)", "docstring": "Convert decibels (dB) to power\n\n .. doctest::\n\n >>> from spectrum import db2pow\n >>> p = db2pow(-10)\n >>> p\n 0.1\n\n .. seealso:: :func:`pow2db`", "id": "f10937:m10"} {"signature": "def nextpow2(x):", "body": "res = ceil(log2(x))return res.astype('')", "docstring": "returns the smallest power of two that is greater than or equal to the\n absolute value of x.\n\n This function is useful for optimizing FFT operations, which are\n most efficient when sequence length is an exact power of two.\n\n :Example:\n\n .. doctest::\n\n >>> from spectrum import nextpow2\n >>> x = [255, 256, 257]\n >>> nextpow2(x)\n array([8, 8, 9])", "id": "f10937:m11"} {"signature": "def db2mag(xdb):", "body": "return **(xdb/)", "docstring": "Convert decibels (dB) to magnitude\n\n .. doctest::\n\n >>> from spectrum import db2mag\n >>> db2mag(-20)\n 0.1\n\n .. seealso:: :func:`pow2db`", "id": "f10937:m12"} {"signature": "def mag2db(x):", "body": "return * log10(x)", "docstring": "Convert magnitude to decibels (dB)\n\n The relationship between magnitude and decibels is:\n\n .. math:: X_{dB} = 20 * \\log_{10}(x)\n\n .. doctest::\n\n >>> from spectrum import mag2db\n >>> mag2db(0.1)\n -20.0\n\n .. seealso:: :func:`db2mag`", "id": "f10937:m13"} {"signature": "def aryule(X, order, norm='', allow_singularity=True):", "body": "assert norm in ['', '']r = CORRELATION(X, maxlags=order, norm=norm)A, P, k = LEVINSON(r, allow_singularity=allow_singularity)return A, P, k", "docstring": "r\"\"\"Compute AR coefficients using Yule-Walker method\n\n :param X: Array of complex data values, X(1) to X(N)\n :param int order: Order of autoregressive process to be fitted (integer)\n :param str norm: Use a biased or unbiased correlation.\n :param bool allow_singularity:\n\n :return:\n * AR coefficients (complex)\n * variance of white noise (Real)\n * reflection coefficients for use in lattice filter\n\n .. rubric:: Description:\n\n The Yule-Walker method returns the polynomial A corresponding to the\n AR parametric signal model estimate of vector X using the Yule-Walker\n (autocorrelation) method. The autocorrelation may be computed using a\n **biased** or **unbiased** estimation. In practice, the biased estimate of\n the autocorrelation is used for the unknown true autocorrelation. Indeed,\n an unbiased estimate may result in nonpositive-definite autocorrelation\n matrix.\n So, a biased estimate leads to a stable AR filter.\n The following matrix form represents the Yule-Walker equations. The are\n solved by means of the Levinson-Durbin recursion:\n\n .. math::\n\n \\left( \\begin{array}{cccc}\n r(1) & r(2)^* & \\dots & r(n)^*\\\\\n r(2) & r(1)^* & \\dots & r(n-1)^*\\\\\n \\dots & \\dots & \\dots & \\dots\\\\\n r(n) & \\dots & r(2) & r(1) \\end{array} \\right)\n \\left( \\begin{array}{cccc}\n a(2)\\\\\n a(3) \\\\\n \\dots \\\\\n a(n+1) \\end{array} \\right)\n =\n \\left( \\begin{array}{cccc}\n -r(2)\\\\\n -r(3) \\\\\n \\dots \\\\\n -r(n+1) \\end{array} \\right)\n\n The outputs consists of the AR coefficients, the estimated variance of the\n white noise process, and the reflection coefficients. These outputs can be\n used to estimate the optimal order by using :mod:`~spectrum.criteria`.\n\n .. rubric:: Examples:\n\n From a known AR process or order 4, we estimate those AR parameters using\n the aryule function.\n\n .. doctest::\n\n >>> from scipy.signal import lfilter\n >>> from spectrum import *\n >>> from numpy.random import randn\n >>> A =[1, -2.7607, 3.8106, -2.6535, 0.9238]\n >>> noise = randn(1, 1024)\n >>> y = lfilter([1], A, noise);\n >>> #filter a white noise input to create AR(4) process\n >>> [ar, var, reflec] = aryule(y[0], 4)\n >>> # ar should contains values similar to A\n\n The PSD estimate of a data samples is computed and plotted as follows:\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import *\n from pylab import *\n\n ar, P, k = aryule(marple_data, 15, norm='biased')\n psd = arma2psd(ar)\n plot(linspace(-0.5, 0.5, 4096), 10 * log10(psd/max(psd)))\n axis([-0.5, 0.5, -60, 0])\n\n .. note:: The outputs have been double checked against (1) octave outputs\n (octave has norm='biased' by default) and (2) Marple test code.\n\n .. seealso:: This function uses :func:`~spectrum.levinson.LEVINSON` and\n :func:`~spectrum.correlation.CORRELATION`. See the :mod:`~spectrum.criteria`\n module for criteria to automatically select the AR order.\n\n :References: [Marple]_", "id": "f10938:m0"} {"signature": "def __init__(self, data, order, norm='', NFFT=None, sampling=,scale_by_freq=True):", "body": "super(pyule, self).__init__(data, ar_order=order, NFFT=NFFT,scale_by_freq=scale_by_freq,sampling=sampling)self.sampling = samplingself._norm_aryule = norm", "docstring": "**Constructor**\n\n For a detailled description of the parameters, see :func:`aryule`.\n\n :param array data: input data (list or numpy.array)\n :param int order:\n :param int NFFT: total length of the final data sets (padded with\n zero if needed; default is 4096)\n :param float sampling: sampling frequency of the input :attr:`data`\n :param str norm: don't change if you do not know", "id": "f10938:c0:m0"} {"signature": "def pmtm(x, NW=None, k=None, NFFT=None, e=None, v=None, method='', show=False):", "body": "assert method in ['','','']N = len(x)if e is None and v is None:if NW is not None:[tapers, eigenvalues] = dpss(N, NW, k=k)else:raise ValueError(\"\")elif e is not None and v is not None:eigenvalues = e[:]tapers = v[:]else:raise ValueError(\"\")nwin = len(eigenvalues) if NFFT==None:NFFT = max(, **nextpow2(N))Sk_complex = np.fft.fft(np.multiply(tapers.transpose(), x), NFFT)Sk = abs(Sk_complex)**if method in ['', '']:if method == '':weights = np.ones((nwin, ))elif method == '':weights = np.array([_x/float(i+) for i,_x in enumerate(eigenvalues)])weights = weights.reshape(nwin,)elif method == '':sig2 = np.dot(x, x) / float(N)Sk = abs(np.fft.fft(np.multiply(tapers.transpose(), x), NFFT))**Sk = Sk.transpose()S = (Sk[:,] + Sk[:,]) / S = S.reshape(NFFT, )Stemp = np.zeros((NFFT,))S1 = np.zeros((NFFT,))tol = * sig2 / float(NFFT)i = a = sig2 * ( - eigenvalues)while sum(np.abs(S-S1))/NFFT > tol and i<:i = i + b1 = np.multiply(S, np.ones((,nwin)))b2 = np.multiply(S,eigenvalues.transpose()) + np.ones((NFFT,))*a.transpose()b = b1/b2wk=(b**)*(np.ones((NFFT,))*eigenvalues.transpose())S1 = sum(wk.transpose()*Sk.transpose())/ sum(wk.transpose())S1 = S1.reshape(NFFT, )Stemp = S1S1 = SS = Stemp weights=wkif show is True:from pylab import semilogyif method == \"\":Sk = np.mean(Sk * weights, axis=)else:Sk = np.mean(Sk * weights, axis=)semilogy(Sk)return Sk_complex, weights, eigenvalues", "docstring": "Multitapering spectral estimation\n\n :param array x: the data\n :param float NW: The time half bandwidth parameter (typical values are\n 2.5,3,3.5,4). Must be provided otherwise the tapering windows and\n eigen values (outputs of dpss) must be provided\n :param int k: uses the first k Slepian sequences. If *k* is not provided,\n *k* is set to *NW*2*.\n :param NW:\n :param e: the window concentrations (eigenvalues)\n :param v: the matrix containing the tapering windows\n :param str method: set how the eigenvalues are used. Must be\n in ['unity', 'adapt', 'eigen']\n :param bool show: plot results\n :return: Sk (complex), weights, eigenvalues\n\n Usually in spectral estimation the mean to reduce bias is to use tapering\n window. In order to reduce variance we need to average different spectrum.\n The problem is that we have only one set of data. Thus we need to\n decompose a set into several segments. Such method are well-known: simple\n daniell's periodogram, Welch's method and so on. The drawback of such\n methods is a loss of resolution since the segments used to compute the\n spectrum are smaller than the data set.\n The interest of multitapering method is to keep a good resolution while\n reducing bias and variance.\n\n How does it work? First we compute different simple periodogram with the\n whole data set (to keep good resolution) but each periodgram is computed\n with a differenttapering windows. Then, we average all these spectrum.\n To avoid redundancy and bias due to the tapers mtm use special tapers.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import data_cosine, dpss, pmtm\n\n data = data_cosine(N=2048, A=0.1, sampling=1024, freq=200)\n # If you already have the DPSS windows\n [tapers, eigen] = dpss(2048, 2.5, 4)\n res = pmtm(data, e=eigen, v=tapers, show=False)\n # You do not need to compute the DPSS before end\n res = pmtm(data, NW=2.5, show=False)\n res = pmtm(data, NW=2.5, k=4, show=True)\n\n\n .. versionchanged:: 0.6.2\n\n APN modified method to return each Sk as complex values, the eigenvalues\n and the weights", "id": "f10939:m0"} {"signature": "def dpss(N, NW=None, k=None):", "body": "assert NW < N/ , \"\".format(NW, N)if k is None:k = min(round(*NW),N)k = int(max(k,))from numpy import dot, zeros, arange, sqrtmtspeclib.multitap.restype = Nonelam = zeros(k, dtype=float)tapers = zeros(k*N, dtype=float)tapsum = zeros(k, dtype=float)res = mtspeclib.multitap(c_int(N),c_int(k),lam.ctypes.data_as(c_void_p),c_float(NW),tapers.ctypes.data_as(c_void_p),tapsum.ctypes.data_as(c_void_p),)tapers = tapers.reshape(k,N).transpose() / sqrt(N)for i in range(k):if i% == :if tapsum[i]<:tapsum[i] *= -tapers[:,i] *= -else:if tapers[,i] < :tapsum[i] *= -tapers[:,i] *= -acvs = _autocov(tapers.transpose(), debias=False) * Nnidx = arange(N)W = float(NW)/Nr = *W*np.sinc(*W*nidx)r[] = *Weigvals = dot(acvs, r)return [tapers, eigvals]", "docstring": "r\"\"\"Discrete prolate spheroidal (Slepian) sequences\n\n Calculation of the Discrete Prolate Spheroidal Sequences also known as the\n slepian sequences, and the corresponding eigenvalues.\n\n :param int N: desired window length\n :param float NW: The time half bandwidth parameter (typical values are\n 2.5,3,3.5,4).\n :param int k: returns the first k Slepian sequences. If *k* is not\n provided, *k* is set to *NW*2*.\n :return:\n * tapers, a matrix of tapering windows. Matrix is a N by *k* (k\n is the number of windows)\n * eigen, a vector of eigenvalues of length *k*\n\n The discrete prolate spheroidal or Slepian sequences derive from the following\n time-frequency concentration problem. For all finite-energy sequences index\n limited to some set , which sequence maximizes the following ratio:\n\n .. math::\n\n \\lambda = \\frac{\\int_{-W}^{W}\\left| X(f) \\right|^2 df}\n {\\int_{-F_s/2}^{F_s/2}\\left| X(f) \\right|^2 df}\n\n where :math:`F_s` is the sampling frequency and :math:`|W| < F_s/2`.\n This ratio determines which index-limited sequence has the largest proportion of its\n energy in the band :math:`[-W,W]` with :math:`0 < \\lambda < 1`.\n The sequence maximizing the ratio is the first\n discrete prolate spheroidal or Slepian sequence. The second Slepian sequence\n maximizes the ratio and is orthogonal to the first Slepian sequence. The third\n Slepian sequence maximizes the ratio of integrals and is orthogonal to both\n the first and second Slepian sequences and so on.\n\n .. note:: Note about the implementation. Since the slepian generation is\n computationally expensive, we use a C implementation based on the C\n code written by Lees as published in:\n\n Lees, J. M. and J. Park (1995): Multiple-taper spectral analysis: A stand-alone\n C-subroutine: Computers & Geology: 21, 199-236.\n\n However, the original C code has been trimmed. Indeed, we only require the\n multitap function (that depends on jtridib, jtinvit functions only).\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import *\n from pylab import *\n N = 512\n [w, eigens] = dpss(N, 2.5, 4)\n plot(w)\n title('Slepian Sequences N=%s, NW=2.5' % N)\n axis([0, N, -0.15, 0.15])\n legend(['1st window','2nd window','3rd window','4th window'])\n\n Windows are normalised:\n\n .. math:: \\sum_k h_k h_k = 1\n\n :references: [Percival]_\n\n Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and\n uncertainty V: The discrete case. Bell System Technical Journal,\n Volume 57 (1978), 1371430\n\n .. note:: the C code to create the slepian windows is extracted from original C code\n from Lees and Park (1995) and uses the conventions of Percival and Walden (1993).\n Functions that are not used here were removed.", "id": "f10939:m1"} {"signature": "def _other_dpss_method(N, NW, Kmax):", "body": "from scipy import linalg as laKmax = int(Kmax)W = float(NW)/Nab = np.zeros((,N), '')nidx = np.arange(N)ab[,:] = nidx[:]*(N-nidx[:])/ab[] = ((N--*nidx)/)** * np.cos(*np.pi*W)l,v = la.eig_banded(ab, select='', select_range=(N-Kmax, N-))dpss = v.transpose()[::-]fix_symmetric = (dpss[::].sum(axis=) < )for i, f in enumerate(fix_symmetric):if f:dpss[*i] *= -fix_skew = (dpss[::,] < )for i, f in enumerate(fix_skew):if f:dpss[*i+] *= -acvs = _autocov(dpss, debias=False) * Nr = *W*np.sinc(*W*nidx)r[] = *Weigvals = np.dot(acvs, r)return dpss, eigvals", "docstring": "Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]\n for a given frequency-spacing multiple NW and sequence length N.\n\n See dpss function that is the official version. This version is indepedant\n of the C code and relies on Scipy function. However, it is slower by a factor 3\n\n Tridiagonal form of DPSS calculation from:", "id": "f10939:m2"} {"signature": "def _autocov(s, **kwargs):", "body": "debias = kwargs.pop('', True)axis = kwargs.get('', -)if debias:s = _remove_bias(s, axis)kwargs[''] = Falsereturn _crosscov(s, s, **kwargs)", "docstring": "Returns the autocovariance of signal s at all lags.\n\n Adheres to the definition\n sxx[k] = E{S[n]S[n+k]} = cov{S[n],S[n+k]}\n where E{} is the expectation operator, and S is a zero mean process", "id": "f10939:m3"} {"signature": "def _crosscov(x, y, axis=-, all_lags=False, debias=True):", "body": "if x.shape[axis] != y.shape[axis]:raise ValueError('')if debias:x = _remove_bias(x, axis)y = _remove_bias(y, axis)slicing = [slice(d) for d in x.shape]slicing[axis] = slice(None,None,-)sxy = _fftconvolve(x, y[tuple(slicing)], axis=axis, mode='')N = x.shape[axis]sxy /= Nif all_lags:return sxyslicing[axis] = slice(N-,*N-)return sxy[tuple(slicing)]", "docstring": "Returns the crosscovariance sequence between two ndarrays.\n This is performed by calling fftconvolve on x, y[::-1]\n\n Parameters\n\n\n x: ndarray\n y: ndarray\n axis: time axis\n\n all_lags: {True/False}\n whether to return all nonzero lags, or to clip the length of s_xy\n to be the length of x and y. If False, then the zero lag covariance\n is at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2\n\n debias: {True/False}\n Always removes an estimate of the mean along the axis, unless\n told not to.\n\n\n cross covariance is defined as\n sxy[k] := E{X[t]*Y[t+k]}, where X,Y are zero mean random processes", "id": "f10939:m4"} {"signature": "def _crosscorr(x, y, **kwargs):", "body": "sxy = _crosscov(x, y, **kwargs)sx = np.std(x)sy = np.std(y)return sxy/(sx*sy)", "docstring": "Returns the crosscorrelation sequence between two ndarrays.\nThis is performed by calling fftconvolve on x, y[::-1]\n\nParameters\n\n\nx: ndarray\ny: ndarray\naxis: time axis\nall_lags: {True/False}\nwhether to return all nonzero lags, or to clip the length of r_xy\nto be the length of x and y. If False, then the zero lag correlation\nis at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2\n\nNotes\n\n\ncross correlation is defined as\nrxy[k] := E{X[t]*Y[t+k]}/(E{X*X}E{Y*Y})**.5,\nwhere X,Y are zero mean random processes. It is the noramlized cross\ncovariance.", "id": "f10939:m5"} {"signature": "def _remove_bias(x, axis):", "body": "padded_slice = [slice(d) for d in x.shape]padded_slice[axis] = np.newaxismn = np.mean(x, axis=axis)return x - mn[tuple(padded_slice)]", "docstring": "Subtracts an estimate of the mean from signal x at axis", "id": "f10939:m6"} {"signature": "def _fftconvolve(in1, in2, mode=\"\", axis=None):", "body": "from scipy.fftpack import fftn, fft, ifftn, ifftfrom scipy.signal.signaltools import _centeredfrom numpy import array, products1 = array(in1.shape)s2 = array(in2.shape)complex_result = (np.issubdtype(in1.dtype, np.complexfloating) ornp.issubdtype(in2.dtype, np.complexfloating))if axis is None:size = s1+s2-fslice = tuple([slice(, int(sz)) for sz in size])else:equal_shapes = s1==s2equal_shapes[axis] = Trueassert equal_shapes.all(), ''size = s1[axis]+s2[axis]-fslice = [slice(l) for l in s1]fslice[axis] = slice(, int(size))fslice = tuple(fslice)fsize = (**np.ceil(np.log2(size))).astype(np.int64)if axis is None:IN1 = fftn(in1,fsize)IN1 *= fftn(in2,fsize)ret = ifftn(IN1)[fslice].copy()else:IN1 = fft(in1,fsize,axis=axis)IN1 *= fft(in2,fsize,axis=axis)ret = ifft(IN1,axis=axis)[fslice].copy()if not complex_result:del IN1ret = ret.realif mode == \"\":return retelif mode == \"\":if product(s1,axis=) > product(s2,axis=):osize = s1else:osize = s2return _centered(ret,osize)elif mode == \"\":return _centered(ret,abs(s2-s1)+)", "docstring": "Convolve two N-dimensional arrays using FFT. See convolve.\n\n This is a fix of scipy.signal.fftconvolve, adding an axis argument and\n importing locally the stuff only needed for this function", "id": "f10939:m7"} {"signature": "def CORRELOGRAMPSD(X, Y=None, lag=-, window='',norm='', NFFT=, window_params={},correlation_method=''):", "body": "N = len(X)assert lag'assert correlation_method in ['', '']if Y is None:Y = numpy.array(X)crosscorrelation = Falseelse:crosscorrelation = Trueif NFFT is None:NFFT = Npsd = numpy.zeros(NFFT, dtype=complex)w = Window(*lag+, window, **window_params)w = w.data[lag+:]if correlation_method == '':rxy = CORRELATION (X, Y, maxlags=lag, norm=norm)elif correlation_method == '':rxy, _l = xcorr (X, Y, maxlags=lag, norm=norm)rxy = rxy[lag:]psd[] = rxy[]psd[:lag+] = rxy[:] * wif crosscorrelation is True:if correlation_method == '':ryx = CORRELATION(Y, X, maxlags=lag, norm=norm)elif correlation_method == '':ryx, _l = xcorr(Y, X, maxlags=lag, norm=norm)ryx = ryx[lag:]psd[-:NFFT-lag-:-] = ryx[:].conjugate() * welse: psd[-:NFFT-lag-:-] = rxy[:].conjugate() * wpsd = numpy.real(fft(psd))return psd", "docstring": "PSD estimate using correlogram method.\n\n\n :param array X: complex or real data samples X(1) to X(N)\n :param array Y: complex data samples Y(1) to Y(N). If provided, computes\n the cross PSD, otherwise the PSD is returned\n :param int lag: highest lag index to compute. Must be less than N\n :param str window_name: see :mod:`window` for list of valid names\n :param str norm: one of the valid normalisation of :func:`xcorr` (biased, \n unbiased, coeff, None)\n :param int NFFT: total length of the final data sets (padded with zero \n if needed; default is 4096)\n :param str correlation_method: either `xcorr` or `CORRELATION`.\n CORRELATION should be removed in the future.\n\n :return:\n * Array of real (cross) power spectral density estimate values. This is\n a two sided array with negative values following the positive ones\n whatever is the input data (real or complex).\n\n .. rubric:: Description:\n\n The exact power spectral density is the Fourier transform of the\n autocorrelation sequence:\n\n .. math:: P_{xx}(f) = T \\sum_{m=-\\infty}^{\\infty} r_{xx}[m] exp^{-j2\\pi fmT}\n\n The correlogram method of PSD estimation substitutes a finite sequence of\n autocorrelation estimates :math:`\\hat{r}_{xx}` in place of :math:`r_{xx}`.\n This estimation can be computed with :func:`xcorr` or :func:`CORRELATION` by\n chosing a proprer lag `L`. The estimated PSD is then\n\n .. math:: \\hat{P}_{xx}(f) = T \\sum_{m=-L}^{L} \\hat{r}_{xx}[m] exp^{-j2\\pi fmT}\n\n The lag index must be less than the number of data samples `N`. Ideally, it\n should be around `L/10` [Marple]_ so as to avoid greater statistical\n variance associated with higher lags.\n\n To reduce the leakage of the implicit rectangular window and therefore to\n reduce the bias in the estimate, a tapering window is normally used and lead\n to the so-called Blackman and Tukey correlogram:\n\n .. math:: \\hat{P}_{BT}(f) = T \\sum_{m=-L}^{L} w[m] \\hat{r}_{xx}[m] exp^{-j2\\pi fmT}\n\n The correlogram for the cross power spectral estimate is\n\n .. math:: \\hat{P}_{xx}(f) = T \\sum_{m=-L}^{L} \\hat{r}_{xx}[m] exp^{-j2\\pi fmT}\n\n which is computed if :attr:`Y` is not provide. In such case,\n :math:`r_{yx} = r_{xy}` so we compute the correlation only once.\n\n .. plot::\n :width: 80%\n :include-source:\n\n from spectrum import CORRELOGRAMPSD, marple_data\n from spectrum.tools import cshift\n from pylab import log10, axis, grid, plot,linspace\n\n psd = CORRELOGRAMPSD(marple_data, marple_data, lag=15)\n f = linspace(-0.5, 0.5, len(psd))\n psd = cshift(psd, len(psd)/2)\n plot(f, 10*log10(psd/max(psd)))\n axis([-0.5,0.5,-50,0])\n grid(True)\n\n .. seealso:: :func:`create_window`, :func:`CORRELATION`, :func:`xcorr`,\n :class:`pcorrelogram`.", "id": "f10940:m0"} {"signature": "def __init__(self, data, sampling=, lag=-,window='', NFFT=None, scale_by_freq=True,detrend=None):", "body": "super(pcorrelogram, self).__init__(data,window=window,sampling=sampling,NFFT=NFFT,scale_by_freq=scale_by_freq,lag=lag,detrend=detrend)", "docstring": "**Correlogram Constructor**\n\n :param array data: input data (list or numpy.array)\n :param float sampling: sampling frequency of the input :attr:`data`.\n :param int lag:\n :param str window: a tapering window. See :class:`~spectrum.window.Window`.\n :param int NFFT: total length of the final data sets (padded with \n zero if needed; default is 4096)\n :param bool scale_by_freq:\n :param str detrend:", "id": "f10940:c0:m0"} {"signature": "def TOEPLITZ(T0, TC, TR, Z):", "body": "assert len(TC)>assert len(TC)==len(TR)M = len(TC)X = numpy.zeros(M+,dtype=complex)A = numpy.zeros(M,dtype=complex)B = numpy.zeros(M,dtype=complex)P = T0if P == : raise ValueError(\"\")if P == : raise ValueError(\"\")X[] = Z[]/T0 for k in range(, M):save1 = TC[k]save2 = TR[k]beta = X[]*TC[k]if k == :temp1 = -save1 / Ptemp2 = -save2 / Pelse:for j in range(, k):save1 = save1 + A[j] * TC[k-j-]save2 = save2 + B[j] * TR[k-j-]beta = beta + X[j+] * TC[k-j-]temp1 = -save1 / Ptemp2 = -save2/PP = P * ( - (temp1*temp2))if P <= :raise ValueError(\"\")A[k] = temp1B[k] = temp2alpha = (Z[k+]-beta)/Pif k == : X[k+] = alphafor j in range(,k+):X[j] = X[j] + alpha * B[k-j]continuefor j in range(, k):kj = k-j-save1 = A[j]A[j] = save1 + temp1 * B[kj] B[kj] = B[kj] + temp2*save1X[k+] = alphafor j in range(,k+):X[j] = X[j] + alpha*B[k-j]return X", "docstring": "solve the general toeplitz linear equations\n\n Solve TX=Z\n\n :param T0: zero lag value\n :param TC: r1 to rN \n :param TR: r1 to rN\n\n returns X\n\n requires 3M^2+M operations instead of M^3 with gaussian elimination\n\n .. warning:: not used right now", "id": "f10942:m0"} {"signature": "def HERMTOEP(T0, T, Z):", "body": "assert len(T)>M = len(T)X = numpy.zeros(M+,dtype=complex)A = numpy.zeros(M,dtype=complex)P = T0if P == : raise ValueError(\"\")X[] = Z[]/T0 for k in range(, M):save = T[k]beta = X[]*T[k]if k == :temp = -save / Pelse:for j in range(, k):save = save + A[j] * T[k-j-]beta = beta + X[j+] * T[k-j-]temp = -save / PP = P * ( - (temp.real**+temp.imag**))if P <= :raise ValueError(\"\")A[k] = tempalpha = (Z[k+]-beta)/Pif k == :X[k+] = alphafor j in range(,k+):X[j] = X[j] + alpha * A[k-j].conjugate()continuekhalf = (k+)//for j in range(, khalf):kj = k-j-save=A[j]A[j] = save+temp*A[kj].conjugate() if j != kj:A[kj] = A[kj] + temp*save.conjugate()X[k+] = alphafor j in range(,k+):X[j] = X[j] + alpha * A[k-j].conjugate()return X", "docstring": "solve Tx=Z by a variation of Levinson algorithm where T \n is a complex hermitian toeplitz matrix\n\n :param T0: zero lag value\n :param T: r1 to rN\n\n :return: X\n\n used by eigen PSD method", "id": "f10942:m1"} {"signature": "def readwav(filename):", "body": "from scipy.io.wavfile import read as readwavsamplerate, signal = readwav(filename)return signal, samplerate", "docstring": "Read a WAV file and returns the data and sample rate\n\n ::\n\n from spectrum.io import readwav\n readwav()", "id": "f10943:m0"} {"signature": "def data_cosine(N=, A=, sampling=, freq=):", "body": "t = arange(, float(N)/sampling, /sampling)x = cos(*pi*t*freq) + A * randn(t.size)return x", "docstring": "r\"\"\"Return a noisy cosine at a given frequency.\n\n :param N: the final data size\n :param A: the strength of the noise\n :param float sampling: sampling frequency of the input :attr:`data`.\n :param float freq: the frequency :math:`f_0` of the cosine.\n\n .. math:: x[t] = cos(2\\pi t * f_0) + A w[t]\n\n where w[t] is a white noise of variance 1.\n\n .. doctest::\n\n >>> from spectrum import data_cosine\n >>> a = data_cosine(N=1024, sampling=1024, A=0.5, freq=100)", "id": "f10944:m0"} {"signature": "def data_two_freqs(N=):", "body": "nn = arange(N)xx = cos(*pi*nn) + sin(*pi*nn) + *randn(nn.size)return xx", "docstring": "A simple test example with two close frequencies", "id": "f10944:m1"} {"signature": "def spectrum_data(filename):", "body": "import osimport pkg_resourcesinfo = pkg_resources.get_distribution('')location = info.locationshare = os.sep.join([location, \"\", ''])filename2 = os.sep.join([share, filename])if os.path.exists(filename2):return filename2else:raise Exception('' % filename2)", "docstring": "Simple utilities to retrieve data sets from", "id": "f10944:m2"} {"signature": "def __init__(self, data, sampling=):", "body": "self.data = dataself.N = len(data)self.sampling = samplingself.dt = /sampling", "docstring": ":param array data: input data (list or numpy.array)\n:param sampling: the sampling frequency of the data (default 1Hz)", "id": "f10944:c0:m0"} {"signature": "def plot(self, **kargs):", "body": "from pylab import plot, linspace, xlabel, ylabel, gridtime = linspace(*self.dt, self.N*self.dt, self.N)plot(time, self.data, **kargs)xlabel('')ylabel('')grid(True)", "docstring": "Plot the data set, using the sampling information to set the x-axis\n correctly.", "id": "f10944:c0:m1"} {"signature": "@propertydef roles(self):", "body": "ret = {None}if self.state == POST_STATE.DEL:return retret.add('')return ret", "docstring": "BaseUser.roles \u7684\u5b9e\u73b0\uff0c\u8fd4\u56de\u7528\u6237\u53ef\u7528\u89d2\u8272\n:return:", "id": "f10963:c1:m0"} {"signature": "@classmethoddef gen_password_and_salt(cls, password_text):", "body": "salt = os.urandom()dk = hashlib.pbkdf2_hmac(config.PASSWORD_HASH_FUNC_NAME,password_text.encode(''),salt,config.PASSWORD_HASH_ITERATIONS,)return {'': dk, '': salt}", "docstring": "\u751f\u6210\u52a0\u5bc6\u540e\u7684\u5bc6\u7801\u548c\u76d0", "id": "f10963:c1:m1"} {"signature": "@classmethoddef gen_token(cls):", "body": "token = os.urandom()token_time = int(time.time())return {'': token, '': token_time}", "docstring": "\u751f\u6210 access_token", "id": "f10963:c1:m2"} {"signature": "@classmethoddef get_by_token(cls, token):", "body": "try:return cls.get(cls.token == token)except DoesNotExist:return None", "docstring": "\u6839\u636e access_token \u83b7\u53d6\u7528\u6237", "id": "f10963:c1:m4"} {"signature": "def set_password(self, new_password):", "body": "info = self.gen_password_and_salt(new_password)self.password = info['']self.salt = info['']self.save()", "docstring": "\u8bbe\u7f6e\u5bc6\u7801", "id": "f10963:c1:m5"} {"signature": "def _auth_base(self, password_text):", "body": "dk = hashlib.pbkdf2_hmac(config.PASSWORD_HASH_FUNC_NAME,password_text.encode(''),get_bytes_from_blob(self.salt),config.PASSWORD_HASH_ITERATIONS)if self.password == dk:return self", "docstring": "\u5df2\u83b7\u53d6\u4e86\u7528\u6237\u5bf9\u8c61\uff0c\u8fdb\u884c\u5bc6\u7801\u6821\u9a8c\n:param password_text:\n:return:", "id": "f10963:c1:m6"} {"signature": "@classmethoddef use(cls, name, method: [str, Set, List], url=None):", "body": "if not isinstance(method, (str, list, set, tuple)):raise BaseException('' % type(method).__name__)if isinstance(method, str):method = {method}cls._interface[name] = [{'': method, '': url}]", "docstring": "interface helper function", "id": "f10982:c0:m0"} {"signature": "@classmethoddef discard(cls, name):", "body": "cls._interface.pop(name, None)", "docstring": "interface helper function", "id": "f10982:c0:m2"} {"signature": "async def get_ip(self) -> Union[IPv4Address, IPv6Address]:", "body": "xff = await self.get_x_forwarded_for()if xff: return xff[]ip_addr = self._request.transport.get_extra_info('')[]return ip_address(ip_addr)", "docstring": "get ip address of client\n:return:", "id": "f10982:c0:m13"} {"signature": "def finish(self, code, data=NotImplemented):", "body": "if data is NotImplemented:data = RETCODE.txt_cn.get(code, None)self.ret_val = {'': code, '': data} self.response = web.json_response(self.ret_val, dumps=json_ex_dumps)logger.debug('' % self.ret_val)self._finish_end()", "docstring": "Set response as {'code': xxx, 'data': xxx}\n:param code:\n:param data:\n:return:", "id": "f10982:c0:m19"} {"signature": "def finish_raw(self, body: bytes, status: int = , content_type: Optional[str] = None):", "body": "self.ret_val = bodyself.response = web.Response(body=body, status=status, content_type=content_type)logger.debug('' % len(body))self._finish_end()", "docstring": "Set raw response\n:param body:\n:param status:\n:param content_type:\n:return:", "id": "f10982:c0:m20"} {"signature": "@propertydef route_info(self):", "body": "return self._request.match_info", "docstring": "info matched by router\n:return:", "id": "f10982:c0:m30"} {"signature": "@classmethoddef _ready(cls):", "body": "sync_call(cls.ready)", "docstring": "private version of cls.ready()", "id": "f10982:c0:m31"} {"signature": "@classmethoddef ready(cls):", "body": "pass", "docstring": "All modules loaded, and ready to serve.\nEmitted after register routes and before loop start\n:return:", "id": "f10982:c0:m32"} {"signature": "@classmethoddef add_soft_foreign_key(cls, column, table_name, alias=None):", "body": "if column in cls.fields:table = SQLForeignKey(table_name, column, cls.fields[column], True)if alias:if alias in cls.foreign_keys_table_alias:logger.warning(\"\" %(cls.__name__, column, table_name))cls.foreign_keys_table_alias[alias] = tableif column not in cls.foreign_keys:cls.foreign_keys[column] = [table]else:if not alias:logger.warning(\"\" %(cls.__name__, column, table_name))cls.foreign_keys[column].append(table)return True", "docstring": "the column stores foreign table's primary key but isn't a foreign key (to avoid constraint)\nwarning: if the table not exists, will crash when query with loadfk\n:param column: table's column\n:param table_name: foreign table name\n:param alias: table name's alias. Default is as same as table name.\n:return: True, None", "id": "f10982:c3:m2"} {"signature": "@propertydef current_request_role(self) -> [int, str]:", "body": "role_val = self.headers.get('')return int(role_val) if role_val and role_val.isdigit() else role_val", "docstring": "Current role requested by client.\n:return:", "id": "f10982:c3:m6"} {"signature": "async def load_fk(self, info: SQLQueryInfo, records: Iterable[DataRecord]) -> Union[List, Iterable]:", "body": "async def check(data, records):for column, fkvalues_lst in data.items():for fkvalues in fkvalues_lst:pks = []all_ni = Truevcls = self.app.tables[fkvalues['']]for i in records:val = i.get(column, NotImplemented)if val != NotImplemented:all_ni = Falsepks.append(val)if all_ni:logger.debug(\"\" % column)continuev = vcls(self.app, self._request) await v._prepare()info2 = SQLQueryInfo()info2.set_select(ALL_COLUMNS)info2.add_condition(PRIMARY_KEY, SQL_OP.IN, pks)info2.bind(v)try:fk_records, count = await v._sql.select_page(info2, size=-)except RecordNotFound:continueawait v.check_records_permission(info2, fk_records)fk_dict = {}for i in fk_records:fk_dict[i[vcls.primary_key]] = icolumn_to_set = fkvalues.get('', column) or columnfor _, record in enumerate(records):k = record.get(column, NotImplemented)if k in fk_dict:record[column_to_set] = fk_dict[k]if fkvalues['']:await check(fkvalues[''], fk_records)await check(info.loadfk, records)return records", "docstring": ":param info:\n:param records: the data got from database and filtered from permission\n:return:", "id": "f10982:c3:m8"} {"signature": "async def _call_handle(self, func, *args):", "body": "await async_call(func, *args)if self.is_finished:raise FinishQuitException()", "docstring": "call and check result of handle_query/read/insert/update", "id": "f10982:c3:m9"} {"signature": "@staticmethod@abstractmethodasync def _fetch_fields(cls_or_self):", "body": "pass", "docstring": "4 values must be set up in this function:\n1. cls.table_name: str\n2. cls.primary_key: str\n3. cls.fields: Dict['column', SQL_TYPE]\n4. cls.foreign_keys: Dict['column', List[SQLForeignKey]]\n\n:param cls_or_self:\n:return:", "id": "f10982:c3:m17"} {"signature": "async def after_read(self, records: List[DataRecord]):", "body": "pass", "docstring": "\u4e00\u5bf9\u591a\uff0c\u5f53\u6709\u4e00\u4e2a\u6743\u9650\u68c0\u67e5\u5931\u8d25\u65f6\u5373\u8fd4\u56de\u5f02\u5e38\n:param records:\n:return:", "id": "f10982:c3:m19"} {"signature": "async def before_insert(self, raw_post: Dict, values: SQLValuesToWrite):", "body": "pass", "docstring": "\u4e00\u5bf9\u4e00\n:param raw_post:\n:param values:\n:return:", "id": "f10982:c3:m20"} {"signature": "async def after_insert(self, raw_post: Dict, values: SQLValuesToWrite, record: DataRecord):", "body": "pass", "docstring": "\u4e00\u5bf9\u4e00\nEmitted before finish\n:param raw_post:\n:param values:\n:param record:\n:return:", "id": "f10982:c3:m21"} {"signature": "async def before_update(self, raw_post: Dict, values: SQLValuesToWrite, records: List[DataRecord]):", "body": "pass", "docstring": "\u4e00\u5bf9\u591a\uff0c\u5f53\u6709\u4e00\u4e2a\u6743\u9650\u68c0\u67e5\u5931\u8d25\u65f6\u5373\u8fd4\u56de\u5f02\u5e38\nraw_post \u6743\u9650\u8fc7\u6ee4\u548c\u5217\u8fc7\u6ee4\u524d\uff0cvalues \u8fc7\u6ee4\u540e\n:param raw_post:\n:param values:\n:param records:\n:return:", "id": "f10982:c3:m22"} {"signature": "async def after_update(self, raw_post: Dict, values: SQLValuesToWrite,old_records: List[DataRecord], records: List[DataRecord]):", "body": "", "docstring": ":param old_records:\n:param raw_post:\n:param values:\n:param records:\n:return:", "id": "f10982:c3:m23"} {"signature": "async def before_delete(self, records: List[DataRecord]):", "body": "pass", "docstring": ":param records:\n:return:", "id": "f10982:c3:m24"} {"signature": "async def after_delete(self, deleted_records: List[DataRecord]):", "body": "pass", "docstring": ":param deleted_records:\n:return:", "id": "f10982:c3:m25"} {"signature": "@staticmethod@abstractmethodasync def permission_valid_check(cls):", "body": "pass", "docstring": "To make sure current permission settings can fit with sql tables.\n:param cls:\n:return:", "id": "f10982:c3:m26"} {"signature": "@abstractmethodasync def select_one(self, info: SQLQueryInfo) -> DataRecord:", "body": "raise NotImplementedError()", "docstring": "Select one record from database\n:param info:\n:return: record", "id": "f10983:c0:m1"} {"signature": "@abstractmethodasync def select_page(self, info: SQLQueryInfo, size=, page=) -> Tuple[Tuple[DataRecord, ...], int]:", "body": "raise NotImplementedError()", "docstring": "Select from database\n:param info:\n:param size: -1 means infinite\n:param page:\n:param need_count: if True, get count as second return value, otherwise -1\n:return: records. count", "id": "f10983:c0:m2"} {"signature": "@abstractmethodasync def update(self, records: Iterable[DataRecord], values: SQLValuesToWrite, returning=False) -> Union[int, Iterable[DataRecord]]:", "body": "raise NotImplementedError()", "docstring": ":param records:\n:param values:\n:param returning:\n:return: return count if returning is False, otherwise records", "id": "f10983:c0:m3"} {"signature": "@abstractmethodasync def insert(self, values_lst: Iterable[SQLValuesToWrite], returning=False) -> Union[int, List[DataRecord]]:", "body": "raise NotImplementedError()", "docstring": ":param values_lst:\n:param returning:\n:return: return count if returning is False, otherwise records", "id": "f10983:c0:m4"} {"signature": "@abstractmethoddef get_current_user(self):", "body": "pass", "docstring": "Override to determine the current user from, e.g., a cookie.", "id": "f10987:c1:m0"} {"signature": "@abstractmethoddef setup_user_key(self, key, expires=):", "body": "pass", "docstring": "setup user key for server", "id": "f10987:c1:m2"} {"signature": "@abstractmethoddef teardown_user_key(self):", "body": "pass", "docstring": "teardown user key for server, make the token invalid here", "id": "f10987:c1:m3"} {"signature": "@staticmethoddef parse_order(text):", "body": "orders = []for i in map(str.strip, text.split('')):items = i.split('', )if len(items) == : column, order = items[], ''elif len(items) == : column, order = itemselse: raise InvalidParams(\"\")order = order.lower()if order not in ('', '', ''):raise InvalidParams('' % order)if order != '':orders.append(SQLQueryOrder(column, order))return orders", "docstring": ":param text: order=id.desc, xxx.asc\n:return: [\n [, asc|desc|default],\n [, asc|desc|default],\n]", "id": "f10988:c7:m2"} {"signature": "@classmethoddef parse_select(cls, text: str) -> Set:", "body": "if text == '':return ALL_COLUMNS selected_columns = set(filter(lambda x: x, map(str.strip, text.split(''))))if not selected_columns:raise InvalidParams(\"\")return selected_columns", "docstring": "get columns from select text\n:param text: col1, col2\n:return: ALL_COLUMNS or ['col1', 'col2']", "id": "f10988:c7:m4"} {"signature": "@classmethoddef parse_load_fk(cls, data: Dict[str, List[Dict[str, object]]]) -> Dict[str, List[Dict[str, object]]]:", "body": "default_value_dict = {'': None, '': None, '': None, '': None}def value_normalize_dict(value):def check(k, v):if k == '': return isinstance(v, str)if k == '': return isinstance(v, str)if k == '': return isinstance(v, str)if k == '': return isinstance(v, dict)valid = {k: v for k, v in value.items() if check(k, v)}if not valid: return default_value_dict.copy()if '' in valid and valid['']:valid[''] = cls.parse_load_fk(valid[''])for k, v in default_value_dict.items():valid.setdefault(k, v)return validdef value_normalize(value, no_list=True):if value is None:return default_value_dict.copy()elif not no_list and isinstance(value, List):return list(map(value_normalize, value))elif isinstance(value, str):val = default_value_dict.copy()val[''] = valuereturn valelif isinstance(value, Dict):return value_normalize_dict(value)else:raise InvalidParams('' % value)new_data = {}if not isinstance(data, dict):raise InvalidParams('' % data)for k, v in data.items():nv = value_normalize(v, False)new_data[k] = nv if isinstance(nv, List) else [nv]return new_data", "docstring": ":param data:{\n : role,\n : role,\n : {\n 'role': role,\n 'loadfk': { ... },\n },\n:return: {\n : {\n 'role': role,\n },\n ...\n : {\n 'role': role,\n 'loadfk': { ... },\n },\n}", "id": "f10988:c7:m5"} {"signature": "def add_condition(self, field_name, op, value):", "body": "if not isinstance(op, SQL_OP):if op not in SQL_OP.txt2op:raise SQLOperatorInvalid(op)else:op = SQL_OP.txt2op.get(op)self.conditions.append([field_name, op, value])", "docstring": "Add a query condition and validate it.\nraise ParamsException if failed.\nself.view required\n:param field_name:\n:param op:\n:param value:\n:return: None", "id": "f10988:c7:m6"} {"signature": "def __init__(self, role: (str, int), data: dict = None, based_on=None):", "body": "self.role = roleif based_on:self.rules = copy.deepcopy(based_on.rules)else:self.rules = {}self.query_condition_params = {}self.query_condition_params_funcs = {}self.common_checks = []self.record_checks = []if data:def convert(val: str):if val == '': return ''val = val.upper()ret = []if '' in val: ret.append(A.QUERY)if '' in val: ret.append(A.WRITE)if '' in val: ret.append(A.READ)if '' in val: ret.append(A.CREATE)if '' in val: ret.append(A.DELETE)return retdef parse(v):ret = copy.deepcopy(v)if ret == str:ret = convert(ret)elif ret == dict:for k, v in ret.items():ret[k] = convert(v)return retfor k, v in data.items():if isinstance(v, dict):if k in self.rules and isinstance(self.rules[k], dict):self.rules[k].update(parse(v))continueself.rules[k] = parse(v)", "docstring": "{\n 'user': {\n 'username': ['query', 'read'],\n 'nickname': ['query', 'read'],\n 'password': ['query', 'read'],\n '*': ['write'],\n },\n 'topic': '*',\n 'test': ['query', 'read', 'write', 'create', 'delete'],\n}\n:param role: \n:param data: \n:param based_on:", "id": "f10990:c3:m0"} {"signature": "def add_common_check(self, actions, table, func):", "body": "self.common_checks.append([table, actions, func])\"\"\"\"\"\"", "docstring": "emitted before query\n:param actions:\n:param table:\n:param func:\n:return:", "id": "f10990:c3:m3"} {"signature": "def _parse_permission(self, obj):", "body": "if isinstance(obj, str):if obj == '':return A.ALLelif obj in A.ALL:return obj,else:logger.warning('', obj)elif isinstance(obj, (list, tuple)):for i in obj:if i not in A.ALL:logger.warning('', i)return objelif isinstance(obj, dict):return self._parse_permission(obj.get(''))", "docstring": "\u4ece obj \u4e2d\u53d6\u51fa\u6743\u9650\n:param obj:\n:return: [A.QUERY, A.WRITE, ...]", "id": "f10990:c3:m5"} {"signature": "def can_with_columns(self, user, action, table, columns):", "body": "global_data = self.rules.get('')global_actions = self._parse_permission(global_data)if global_actions and action in global_actions:available = list(columns)else:available = []table_data = self.rules.get(table)table_actions = self._parse_permission(table_data)if table_actions and action in table_actions:available = list(columns)if type(table_data) == dict:for column in columns:column_actions = self._parse_permission(table_data.get(column))if column_actions is not None:if action in column_actions:if column not in available:available.append(column)else:if column in available:available.remove(column)for check in self.common_checks:if check[] == table and action in check[]:ret = check[-](self, user, action, available)if isinstance(ret, (tuple, set, list)):available = list(ret)elif ret == '':available = list(columns)elif ret is False:available = []if not available: breakreturn available", "docstring": "\u6839\u636e\u6743\u9650\u8fdb\u884c\u5217\u8fc7\u6ee4\n\u6ce8\u610f\u4e00\u70b9\uff0c\u53ea\u8981\u6709\u4e00\u4e2a\u6761\u4ef6\u80fd\u591f\u901a\u8fc7\u6743\u9650\u68c0\u6d4b\uff0c\u90a3\u4e48\u8fc7\u6ee4\u540e\u8fd8\u4f1a\u6709\u5269\u4f59\u6761\u4ef6\uff0c\u6700\u7ec8\u5c31\u4e0d\u4f1a\u62a5\u9519\u3002\n\u5982\u679c\u5168\u90e8\u6761\u4ef6\u90fd\u4e0d\u80fd\u8fc7\u68c0\u6d4b\uff0c\u5c31\u4f1a\u7206\u51fa\u6743\u9650\u9519\u8bef\u4e86\u3002\n\n:param user:\n:param action: \u884c\u4e3a\n:param table: \u8868\u540d\n:param columns: \u5217\u540d\u5217\u8868\n:return: \u53ef\u7528\u5217\u7684\u5217\u8868", "id": "f10990:c3:m6"} {"signature": "def can_with_record(self, user, action, record: DataRecord, *, available=None):", "body": "assert action not in (A.QUERY, A.CREATE), \"\" % actionrules = []for rule in self.record_checks:if record.table == rule[] and action in rule[]:rules.append(rule)if available is None: available = self.can_with_columns(user, action, record.table, record.keys())else: available = list(available)bak = available.copy()for rule in rules:ret = rule[-](self, user, action, record, available)if isinstance(ret, (tuple, set, list)):available = list(ret)elif ret == '':available = list(bak)elif not ret:available = []return available", "docstring": "\u8fdb\u884c\u57fa\u4e8e Record \u7684\u6743\u9650\u5224\u5b9a\uff0c\u8fd4\u56de\u53ef\u7528\u5217\u3002\n:param user:\n:param action:\n:param record:\n:param available: \u9650\u5b9a\u68c0\u67e5\u8303\u56f4\n:return: \u53ef\u7528\u5217", "id": "f10990:c3:m7"} {"signature": "def view_bind(app, cls_url, view_cls: Type['']):", "body": "if view_cls._no_route: returncls_url = cls_url or view_cls.__class__.__name__.lower()def add_route(name, route_info, beacon_info):for method in route_info['']:async def beacon(request): passroute_key = route_info[''] if route_info[''] else nameapp._raw_app.router.add_route(method, urljoin('', cls_url, route_key), beacon)app.route._beacons[beacon] = beacon_infofor name, route_info_lst in view_cls._interface.items():for route_info in route_info_lst:real_handler = getattr(view_cls, name, None)if real_handler is None: continue assert real_handler is not None, \"\"handler_name = '' % (view_cls.__name__, real_handler.__name__)assert iscoroutinefunction(real_handler), \"\" % handler_namebeacon_info = {'': view_cls,'': name,'': real_handler,'': route_info}add_route(name, route_info, beacon_info)", "docstring": "\u5c06 API \u7ed1\u5b9a\u5230 web \u670d\u52a1\u4e0a\n:param view_cls:\n:param app:\n:param cls_url:\n:return:", "id": "f10991:m1"} {"signature": "def add_static(self, prefix, path, **kwargs):", "body": "self.statics.append((prefix, path, kwargs),)", "docstring": ":param prefix: URL prefix\n:param path: file directory\n:param kwargs:\n:return:", "id": "f10991:c0:m5"} {"signature": "@classmethodasync def get_session(cls, view):", "body": "session = cls(view)session.key = await session.get_key()session._data = await session.load() or {}return session", "docstring": "Every request have a session instance\n:param view:\n:return:", "id": "f10992:c0:m9"} {"signature": "def __init__(self, *, cookies_secret: bytes, log_level=logging.DEBUG, session_cls=CookieSession,permission: Optional[''], client_max_size= * * ,cors_options: Optional[Union[CORSOptions, List[CORSOptions]]] = None):", "body": "from .route import get_route_middleware, Routefrom .permission import Permissions, Ability, ALL_PERMISSION, EMPTY_PERMISSIONself.route = Route(self)if permission is ALL_PERMISSION:logger.warning('')self.permission = Permissions(self)self.permission.add(Ability(None, {'': ''})) elif permission is None or permission is EMPTY_PERMISSION:self.permission = Permissions(self) else:self.permission = permissionpermission.app = selfself.tables = SlimTables()self.table_permissions = SlimPermissions(self.permission)if log_level:log.enable(log_level)if isinstance(cors_options, CORSOptions):self.cors_options = [cors_options]else:self.cors_options = cors_optionsself.options = ApplicationOptions()self.options.cookies_secret = cookies_secretself.options.session_cls = session_clsself._raw_app = web.Application(middlewares=[get_route_middleware(self)], client_max_size=client_max_size)", "docstring": ":param cookies_secret:\n:param log_level:\n:param permission: `ALL_PERMISSION`, `EMPTY_PERMISSION` or a `Permissions` object\n:param session_cls:\n:param client_max_size: 2MB is default client_max_body_size of nginx", "id": "f10993:c4:m0"} {"signature": "def _pack2(obj, fp, **options):", "body": "global compatibilityext_handlers = options.get(\"\")if obj is None:_pack_nil(obj, fp, options)elif ext_handlers and obj.__class__ in ext_handlers:_pack_ext(ext_handlers[obj.__class__](obj), fp, options)elif isinstance(obj, bool):_pack_boolean(obj, fp, options)elif isinstance(obj, int) or isinstance(obj, long):_pack_integer(obj, fp, options)elif isinstance(obj, float):_pack_float(obj, fp, options)elif compatibility and isinstance(obj, unicode):_pack_oldspec_raw(bytes(obj), fp, options)elif compatibility and isinstance(obj, bytes):_pack_oldspec_raw(obj, fp, options)elif isinstance(obj, unicode):_pack_string(obj, fp, options)elif isinstance(obj, str):_pack_binary(obj, fp, options)elif isinstance(obj, list) or isinstance(obj, tuple):_pack_array(obj, fp, options)elif isinstance(obj, dict):_pack_map(obj, fp, options)elif isinstance(obj, datetime.datetime):_pack_ext_timestamp(obj, fp, options)elif isinstance(obj, Ext):_pack_ext(obj, fp, options)elif ext_handlers:t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)if t:_pack_ext(ext_handlers[t](obj), fp, options)else:raise UnsupportedTypeException(\"\" % str(type(obj)))else:raise UnsupportedTypeException(\"\" % str(type(obj)))", "docstring": "Serialize a Python object into MessagePack bytes.\n\nArgs:\n obj: a Python object\n fp: a .write()-supporting file-like object\n\nKwargs:\n ext_handlers (dict): dictionary of Ext handlers, mapping a custom type\n to a callable that packs an instance of the type\n into an Ext object\n force_float_precision (str): \"single\" to force packing floats as\n IEEE-754 single-precision floats,\n \"double\" to force packing floats as\n IEEE-754 double-precision floats.\n\nReturns:\n None.\n\nRaises:\n UnsupportedType(PackException):\n Object type not supported for packing.\n\nExample:\n>>> f = open('test.bin', 'wb')\n>>> umsgpack.pack({u\"compact\": True, u\"schema\": 0}, f)\n>>>", "id": "f10997:m11"} {"signature": "def _pack3(obj, fp, **options):", "body": "global compatibilityext_handlers = options.get(\"\")if obj is None:_pack_nil(obj, fp, options)elif ext_handlers and obj.__class__ in ext_handlers:_pack_ext(ext_handlers[obj.__class__](obj), fp, options)elif isinstance(obj, bool):_pack_boolean(obj, fp, options)elif isinstance(obj, int):_pack_integer(obj, fp, options)elif isinstance(obj, float):_pack_float(obj, fp, options)elif compatibility and isinstance(obj, str):_pack_oldspec_raw(obj.encode(''), fp, options)elif compatibility and isinstance(obj, bytes):_pack_oldspec_raw(obj, fp, options)elif isinstance(obj, str):_pack_string(obj, fp, options)elif isinstance(obj, bytes):_pack_binary(obj, fp, options)elif isinstance(obj, list) or isinstance(obj, tuple):_pack_array(obj, fp, options)elif isinstance(obj, dict):_pack_map(obj, fp, options)elif isinstance(obj, datetime.datetime):_pack_ext_timestamp(obj, fp, options)elif isinstance(obj, Ext):_pack_ext(obj, fp, options)elif ext_handlers:t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)if t:_pack_ext(ext_handlers[t](obj), fp, options)else:raise UnsupportedTypeException(\"\" % str(type(obj)))else:raise UnsupportedTypeException(\"\" % str(type(obj)))", "docstring": "Serialize a Python object into MessagePack bytes.\n\nArgs:\n obj: a Python object\n fp: a .write()-supporting file-like object\n\nKwargs:\n ext_handlers (dict): dictionary of Ext handlers, mapping a custom type\n to a callable that packs an instance of the type\n into an Ext object\n force_float_precision (str): \"single\" to force packing floats as\n IEEE-754 single-precision floats,\n \"double\" to force packing floats as\n IEEE-754 double-precision floats.\n\nReturns:\n None.\n\nRaises:\n UnsupportedType(PackException):\n Object type not supported for packing.\n\nExample:\n>>> f = open('test.bin', 'wb')\n>>> umsgpack.pack({u\"compact\": True, u\"schema\": 0}, f)\n>>>", "id": "f10997:m12"} {"signature": "def _packb2(obj, **options):", "body": "fp = io.BytesIO()_pack2(obj, fp, **options)return fp.getvalue()", "docstring": "Serialize a Python object into MessagePack bytes.\n\nArgs:\n obj: a Python object\n\nKwargs:\n ext_handlers (dict): dictionary of Ext handlers, mapping a custom type\n to a callable that packs an instance of the type\n into an Ext object\n force_float_precision (str): \"single\" to force packing floats as\n IEEE-754 single-precision floats,\n \"double\" to force packing floats as\n IEEE-754 double-precision floats.\n\nReturns:\n A 'str' containing serialized MessagePack bytes.\n\nRaises:\n UnsupportedType(PackException):\n Object type not supported for packing.\n\nExample:\n>>> umsgpack.packb({u\"compact\": True, u\"schema\": 0})\n'\\x82\\xa7compact\\xc3\\xa6schema\\x00'\n>>>", "id": "f10997:m13"} {"signature": "def _packb3(obj, **options):", "body": "fp = io.BytesIO()_pack3(obj, fp, **options)return fp.getvalue()", "docstring": "Serialize a Python object into MessagePack bytes.\n\nArgs:\n obj: a Python object\n\nKwargs:\n ext_handlers (dict): dictionary of Ext handlers, mapping a custom type\n to a callable that packs an instance of the type\n into an Ext object\n force_float_precision (str): \"single\" to force packing floats as\n IEEE-754 single-precision floats,\n \"double\" to force packing floats as\n IEEE-754 double-precision floats.\n\nReturns:\n A 'bytes' containing serialized MessagePack bytes.\n\nRaises:\n UnsupportedType(PackException):\n Object type not supported for packing.\n\nExample:\n>>> umsgpack.packb({u\"compact\": True, u\"schema\": 0})\nb'\\x82\\xa7compact\\xc3\\xa6schema\\x00'\n>>>", "id": "f10997:m14"} {"signature": "def _unpack2(fp, **options):", "body": "return _unpack(fp, options)", "docstring": "Deserialize MessagePack bytes into a Python object.\n\nArgs:\n fp: a .read()-supporting file-like object\n\nKwargs:\n ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext\n type to a callable that unpacks an instance of\n Ext into an object\n use_ordered_dict (bool): unpack maps into OrderedDict, instead of\n unordered dict (default False)\n allow_invalid_utf8 (bool): unpack invalid strings into instances of\n InvalidString, for access to the bytes\n (default False)\n\nReturns:\n A Python object.\n\nRaises:\n InsufficientDataException(UnpackException):\n Insufficient data to unpack the serialized object.\n InvalidStringException(UnpackException):\n Invalid UTF-8 string encountered during unpacking.\n UnsupportedTimestampException(UnpackException):\n Unsupported timestamp format encountered during unpacking.\n ReservedCodeException(UnpackException):\n Reserved code encountered during unpacking.\n UnhashableKeyException(UnpackException):\n Unhashable key encountered during map unpacking.\n The serialized map cannot be deserialized into a Python dictionary.\n DuplicateKeyException(UnpackException):\n Duplicate key encountered during map unpacking.\n\nExample:\n>>> f = open('test.bin', 'rb')\n>>> umsgpack.unpackb(f)\n{u'compact': True, u'schema': 0}\n>>>", "id": "f10997:m29"} {"signature": "def _unpack3(fp, **options):", "body": "return _unpack(fp, options)", "docstring": "Deserialize MessagePack bytes into a Python object.\n\nArgs:\n fp: a .read()-supporting file-like object\n\nKwargs:\n ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext\n type to a callable that unpacks an instance of\n Ext into an object\n use_ordered_dict (bool): unpack maps into OrderedDict, instead of\n unordered dict (default False)\n allow_invalid_utf8 (bool): unpack invalid strings into instances of\n InvalidString, for access to the bytes\n (default False)\n\nReturns:\n A Python object.\n\nRaises:\n InsufficientDataException(UnpackException):\n Insufficient data to unpack the serialized object.\n InvalidStringException(UnpackException):\n Invalid UTF-8 string encountered during unpacking.\n UnsupportedTimestampException(UnpackException):\n Unsupported timestamp format encountered during unpacking.\n ReservedCodeException(UnpackException):\n Reserved code encountered during unpacking.\n UnhashableKeyException(UnpackException):\n Unhashable key encountered during map unpacking.\n The serialized map cannot be deserialized into a Python dictionary.\n DuplicateKeyException(UnpackException):\n Duplicate key encountered during map unpacking.\n\nExample:\n>>> f = open('test.bin', 'rb')\n>>> umsgpack.unpackb(f)\n{'compact': True, 'schema': 0}\n>>>", "id": "f10997:m30"} {"signature": "def _unpackb2(s, **options):", "body": "if not isinstance(s, (str, bytearray)):raise TypeError(\"\")return _unpack(io.BytesIO(s), options)", "docstring": "Deserialize MessagePack bytes into a Python object.\n\nArgs:\n s: a 'str' or 'bytearray' containing serialized MessagePack bytes\n\nKwargs:\n ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext\n type to a callable that unpacks an instance of\n Ext into an object\n use_ordered_dict (bool): unpack maps into OrderedDict, instead of\n unordered dict (default False)\n allow_invalid_utf8 (bool): unpack invalid strings into instances of\n InvalidString, for access to the bytes\n (default False)\n\nReturns:\n A Python object.\n\nRaises:\n TypeError:\n Packed data type is neither 'str' nor 'bytearray'.\n InsufficientDataException(UnpackException):\n Insufficient data to unpack the serialized object.\n InvalidStringException(UnpackException):\n Invalid UTF-8 string encountered during unpacking.\n UnsupportedTimestampException(UnpackException):\n Unsupported timestamp format encountered during unpacking.\n ReservedCodeException(UnpackException):\n Reserved code encountered during unpacking.\n UnhashableKeyException(UnpackException):\n Unhashable key encountered during map unpacking.\n The serialized map cannot be deserialized into a Python dictionary.\n DuplicateKeyException(UnpackException):\n Duplicate key encountered during map unpacking.\n\nExample:\n>>> umsgpack.unpackb(b'\\x82\\xa7compact\\xc3\\xa6schema\\x00')\n{u'compact': True, u'schema': 0}\n>>>", "id": "f10997:m31"} {"signature": "def _unpackb3(s, **options):", "body": "if not isinstance(s, (bytes, bytearray)):raise TypeError(\"\")return _unpack(io.BytesIO(s), options)", "docstring": "Deserialize MessagePack bytes into a Python object.\n\nArgs:\n s: a 'bytes' or 'bytearray' containing serialized MessagePack bytes\n\nKwargs:\n ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext\n type to a callable that unpacks an instance of\n Ext into an object\n use_ordered_dict (bool): unpack maps into OrderedDict, instead of\n unordered dict (default False)\n allow_invalid_utf8 (bool): unpack invalid strings into instances of\n InvalidString, for access to the bytes\n (default False)\n\nReturns:\n A Python object.\n\nRaises:\n TypeError:\n Packed data type is neither 'bytes' nor 'bytearray'.\n InsufficientDataException(UnpackException):\n Insufficient data to unpack the serialized object.\n InvalidStringException(UnpackException):\n Invalid UTF-8 string encountered during unpacking.\n UnsupportedTimestampException(UnpackException):\n Unsupported timestamp format encountered during unpacking.\n ReservedCodeException(UnpackException):\n Reserved code encountered during unpacking.\n UnhashableKeyException(UnpackException):\n Unhashable key encountered during map unpacking.\n The serialized map cannot be deserialized into a Python dictionary.\n DuplicateKeyException(UnpackException):\n Duplicate key encountered during map unpacking.\n\nExample:\n>>> umsgpack.unpackb(b'\\x82\\xa7compact\\xc3\\xa6schema\\x00')\n{'compact': True, 'schema': 0}\n>>>", "id": "f10997:m32"} {"signature": "def __init__(self, type, data):", "body": "if not isinstance(type, int):raise TypeError(\"\")elif sys.version_info[] == and not isinstance(data, bytes):raise TypeError(\"\")elif sys.version_info[] == and not isinstance(data, str):raise TypeError(\"\")self.type = typeself.data = data", "docstring": "Construct a new Ext object.\n\nArgs:\n type: application-defined type integer\n data: application-defined data byte array\n\nExample:\n>>> foo = umsgpack.Ext(0x05, b\"\\x01\\x02\\x03\")\n>>> umsgpack.packb({u\"special stuff\": foo, u\"awesome\": True})\n'\\x82\\xa7awesome\\xc3\\xadspecial stuff\\xc7\\x03\\x05\\x01\\x02\\x03'\n>>> bar = umsgpack.unpackb(_)\n>>> print(bar[\"special stuff\"])\nExt Object (Type: 0x05, Data: 01 02 03)\n>>>", "id": "f10997:c0:m0"} {"signature": "def __eq__(self, other):", "body": "return (isinstance(other, self.__class__) andself.type == other.type andself.data == other.data)", "docstring": "Compare this Ext object with another for equality.", "id": "f10997:c0:m1"} {"signature": "def __ne__(self, other):", "body": "return not self.__eq__(other)", "docstring": "Compare this Ext object with another for inequality.", "id": "f10997:c0:m2"} {"signature": "def __str__(self):", "body": "s = \"\" % self.types += \"\".join([\"\" % ord(self.data[i:i + ])for i in xrange(min(len(self.data), ))])if len(self.data) > :s += \"\"s += \"\"return s", "docstring": "String representation of this Ext object.", "id": "f10997:c0:m3"} {"signature": "def __hash__(self):", "body": "return hash((self.type, self.data))", "docstring": "Provide a hash of this Ext object.", "id": "f10997:c0:m4"} {"signature": "def pagination_calc(items_count, page_size, cur_page=, nearby=):", "body": "if type(cur_page) == str:cur_page = int(cur_page) if cur_page.isdigit() else elif type(cur_page) == int:if cur_page <= :cur_page = else:cur_page = page_count = if page_size == - else int(math.ceil(items_count / page_size))items_length = nearby * + first_page = Nonelast_page = Noneprev_page = cur_page - if cur_page != else Nonenext_page = cur_page + if cur_page != page_count else Noneif page_count <= items_length:items = range(, page_count + )elif cur_page <= nearby:items = range(, items_length + )last_page = Trueelif cur_page >= page_count - nearby:items = range(page_count - items_length + , page_count + )first_page = Trueelse:items = range(cur_page - nearby, cur_page + nearby + )first_page, last_page = True, Trueif first_page:first_page = if last_page:last_page = page_countreturn {'': cur_page,'': prev_page,'': next_page,'': first_page,'': last_page,'': list(items),'': {'': page_size,'': page_count,'': items_count,}}", "docstring": ":param nearby:\n:param items_count: count of all items\n:param page_size: size of one page\n:param cur_page: current page number, accept string digit\n:return: num of pages, an iterator", "id": "f11003:m0"} {"signature": "def get_bytes_from_blob(val) -> bytes:", "body": "if isinstance(val, bytes):return valelif isinstance(val, memoryview):return val.tobytes()else:raise TypeError('')", "docstring": "\u4e0d\u540c\u6570\u636e\u5e93\u4eceblob\u62ff\u51fa\u7684\u6570\u636e\u6709\u6240\u5dee\u522b\uff0c\u6709\u7684\u662fmemoryview\u6709\u7684\u662fbytes", "id": "f11004:m3"} {"signature": "def parse_query_by_json(data):", "body": "data = json.loads(data)for i in ('', '', ''):if i not in data:raise QueryException(\"\" % i)tables = data['']columns = data['']conditions = data['']def parse_stmt(s, expr_cls, all_op, multi_items_op):if len(s) == :return []if s[] in all_op:if s[] in multi_items_op:values = []for i in s[:]:values.append(parse_stmt(i, expr_cls, all_op, multi_items_op))return expr_cls(None, s[], None, values=values)else:if len(s) == :lhs = Column(s[], table=s[])rhs = Column(s[], table=s[])if (s[] not in tables) or (s[] not in tables):raise QueryException('')return expr_cls(lhs, s[], rhs)else:lhs = Column(s[], table=s[])if s[] not in tables:raise QueryException('')return expr_cls(lhs, s[], s[])else:raise QueryException('')query_op = ('', '', '', '')query_columns = []for i in columns:if len(i) == :query_columns.append(Column(i[], table=i[]))else:query_columns.append(parse_stmt(i, QueryExpression, query_op, query_op))wheres = parse_stmt(conditions, ConditionExpression, _operator_map, ('', '',))return {'': tables,'': query_columns,'': wheres,}", "docstring": "['and',\n ['==', 't1', 'col1', val1],\n ['!=', 't1', 'col2', 't2', 'col2'],\n ['and',\n ['==', 't1', 'col3', val3],\n ['!=', 't2', 'col4', val4],\n ]\n]\n:return:\n:param data: \n:return:", "id": "f11012:m3"} {"signature": "def ensure_remote_branch_is_tracked(branch):", "body": "if branch == MASTER_BRANCH:returnoutput = subprocess.check_output(['', '', ''])for line in output.split(''):if line.strip() == branch:breakelse:try:sys.stdout.write(subprocess.check_output(['', '', '', '' % branch]))except subprocess.CalledProcessError:raise SystemExit()", "docstring": "Track the specified remote branch if it is not already tracked.", "id": "f11015:m0"} {"signature": "def main(branch):", "body": "try:output = subprocess.check_output(['', '']).decode('')sys.stdout.write(output)except subprocess.CalledProcessError:returnensure_remote_branch_is_tracked(branch)subprocess.check_call(['', '', '', branch])subprocess.check_call(['', '', ''])subprocess.check_call(['', '', '', '' % branch])subprocess.check_call(['', '', '', '', ''])print('' % branch)", "docstring": "Checkout, update and branch from the specified branch.", "id": "f11015:m1"} {"signature": "def block_inception_a(inputs, scope=None, is_train=False):", "body": "with tf.variable_scope(name_or_scope=scope, default_name='', values=[inputs]):with tf.variable_scope(''):branch_0, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_1, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1, _ = conv_module(branch_1, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_2, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2, _ = conv_module(branch_2, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2, _ = conv_module(branch_2, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_3 = tl.layers.MeanPool2d(inputs, filter_size=(, ), strides=(, ), padding='', name='')branch_3, _ = conv_module(branch_3, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')return tl.layers.ConcatLayer([branch_0, branch_1, branch_2, branch_3], concat_dim=, name='')", "docstring": "Builds Inception-A block for Inception v4 network.", "id": "f11040:m0"} {"signature": "def block_reduction_a(inputs, scope=None, is_train=False):", "body": "with tf.variable_scope(scope, '', [inputs]):with tf.variable_scope(''):branch_0, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_1, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1, _ = conv_module(branch_1, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1, _ = conv_module(branch_1, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_2 = tl.layers.MaxPool2d(inputs, (, ), strides=(, ), padding='', name='')return tl.layers.ConcatLayer([branch_0, branch_1, branch_2], concat_dim=, name='')", "docstring": "Builds Reduction-A block for Inception v4 network.", "id": "f11040:m1"} {"signature": "def block_inception_b(inputs, scope=None, is_train=False):", "body": "with tf.variable_scope(scope, '', [inputs]):with tf.variable_scope(''):branch_0, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_1, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1, _ = conv_module(branch_1, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1, _ = conv_module(branch_1, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_2, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2, _ = conv_module(branch_2, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2, _ = conv_module(branch_2, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2, _ = conv_module(branch_2, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2, _ = conv_module(branch_2, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_3 = tl.layers.MeanPool2d(inputs, filter_size=(, ), strides=(, ), padding='', name='')branch_3, _ = conv_module(branch_3, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')return tl.layers.ConcatLayer([branch_0, branch_1, branch_2, branch_3], concat_dim=, name='')", "docstring": "Builds Inception-B block for Inception v4 network.", "id": "f11040:m2"} {"signature": "def block_reduction_b(inputs, scope=None, is_train=False):", "body": "with tf.variable_scope(scope, '', [inputs]):with tf.variable_scope(''):branch_0, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_0, _ = conv_module(branch_0, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_1, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1, _ = conv_module(branch_1, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1, _ = conv_module(branch_1, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1, _ = conv_module(branch_1, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_2 = tl.layers.MaxPool2d(inputs, (, ), strides=(, ), padding='', name='')return tl.layers.ConcatLayer([branch_0, branch_1, branch_2], concat_dim=, name='')", "docstring": "Builds Reduction-B block for Inception v4 network.", "id": "f11040:m3"} {"signature": "def block_inception_c(inputs, scope=None, is_train=False):", "body": "with tf.variable_scope(scope, '', [inputs]):with tf.variable_scope(''):branch_0, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')with tf.variable_scope(''):branch_1, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1a, _ = conv_module(branch_1, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1b, _ = conv_module(branch_1, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_1 = tl.layers.ConcatLayer([branch_1a, branch_1b], concat_dim=, name='')with tf.variable_scope(''):branch_2, _ = conv_module(inputs, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2, _ = conv_module(branch_2, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2, _ = conv_module(branch_2, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2a, _ = conv_module(branch_2, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2b, _ = conv_module(branch_2, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')branch_2 = tl.layers.ConcatLayer([branch_2a, branch_2b], concat_dim=, name='')with tf.variable_scope(''):branch_3 = tl.layers.MeanPool2d(inputs, filter_size=(, ), strides=(, ), padding='', name='')branch_3, _ = conv_module(branch_3, n_out_channel=, filter_size=(, ), strides=(, ), padding='', batch_norm_init=None,is_train=is_train, use_batchnorm=True, activation_fn='', name='')return tl.layers.ConcatLayer([branch_0, branch_1, branch_2, branch_3], concat_dim=, name='')", "docstring": "Builds Inception-C block for Inception v4 network.", "id": "f11040:m4"} {"signature": "def Vgg19(rgb):", "body": "start_time = time.time()print(\"\")rgb_scaled = rgb * red, green, blue = tf.split(rgb_scaled, , )if red.get_shape().as_list()[:] != [, , ]:raise Exception(\"\")if green.get_shape().as_list()[:] != [, , ]:raise Exception(\"\")if blue.get_shape().as_list()[:] != [, , ]:raise Exception(\"\")bgr = tf.concat([blue - VGG_MEAN[],green - VGG_MEAN[],red - VGG_MEAN[],], axis=)if bgr.get_shape().as_list()[:] != [, , ]:raise Exception(\"\")net_in = InputLayer(bgr, name='')net = Conv2dLayer(net_in, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = PoolLayer(net, ksize=[, , , ], strides=[, , , ], padding='', pool=tf.nn.max_pool, name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = PoolLayer(net, ksize=[, , , ], strides=[, , , ], padding='', pool=tf.nn.max_pool, name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = PoolLayer(net, ksize=[, , , ], strides=[, , , ], padding='', pool=tf.nn.max_pool, name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = PoolLayer(net, ksize=[, , , ], strides=[, , , ], padding='', pool=tf.nn.max_pool, name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = Conv2dLayer(net, act=tf.nn.relu, shape=[, , , ], strides=[, , , ], padding='', name='')net = PoolLayer(net, ksize=[, , , ], strides=[, , , ], padding='', pool=tf.nn.max_pool, name='')net = FlattenLayer(net, name='')net = DenseLayer(net, n_units=, act=tf.nn.relu, name='')net = DenseLayer(net, n_units=, act=tf.nn.relu, name='')net = DenseLayer(net, n_units=, act=None, name='')print(\"\" % (time.time() - start_time))return net", "docstring": "Build the VGG 19 Model\n\nParameters\n-----------\nrgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]", "id": "f11062:m2"} {"signature": "def Vgg19_simple_api(rgb):", "body": "start_time = time.time()print(\"\")rgb_scaled = rgb * red, green, blue = tf.split(rgb_scaled, , )if red.get_shape().as_list()[:] != [, , ]:raise Exception(\"\")if green.get_shape().as_list()[:] != [, , ]:raise Exception(\"\")if blue.get_shape().as_list()[:] != [, , ]:raise Exception(\"\")bgr = tf.concat([blue - VGG_MEAN[],green - VGG_MEAN[],red - VGG_MEAN[],], axis=)if bgr.get_shape().as_list()[:] != [, , ]:raise Exception(\"\")net_in = InputLayer(bgr, name='')net = Conv2d(net_in, , filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = MaxPool2d(net, filter_size=(, ), strides=(, ), padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = MaxPool2d(net, filter_size=(, ), strides=(, ), padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = MaxPool2d(net, filter_size=(, ), strides=(, ), padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = MaxPool2d(net, filter_size=(, ), strides=(, ), padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = Conv2d(net, n_filter=, filter_size=(, ), strides=(, ), act=tf.nn.relu, padding='', name='')net = MaxPool2d(net, filter_size=(, ), strides=(, ), padding='', name='')net = FlattenLayer(net, name='')net = DenseLayer(net, n_units=, act=tf.nn.relu, name='')net = DenseLayer(net, n_units=, act=tf.nn.relu, name='')net = DenseLayer(net, n_units=, act=None, name='')print(\"\" % (time.time() - start_time))return net", "docstring": "Build the VGG 19 Model\n\nParameters\n-----------\nrgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]", "id": "f11062:m3"} {"signature": "def prepro(I):", "body": "I = I[:]I = I[::, ::, ]I[I == ] = I[I == ] = I[I != ] = return I.astype(np.float).ravel()", "docstring": "Prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector.", "id": "f11074:m0"} {"signature": "def augment_with_ngrams(unigrams, unigram_vocab_size, n_buckets, n=):", "body": "def get_ngrams(n):return list(zip(*[unigrams[i:] for i in range(n)]))def hash_ngram(ngram):bytes_ = array.array('', ngram).tobytes()hash_ = int(hashlib.sha256(bytes_).hexdigest(), )return unigram_vocab_size + hash_ % n_bucketsreturn unigrams + [hash_ngram(ngram) for i in range(, n + ) for ngram in get_ngrams(i)]", "docstring": "Augment unigram features with hashed n-gram features.", "id": "f11076:m0"} {"signature": "def load_and_preprocess_imdb_data(n_gram=None):", "body": "X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)if n_gram is not None:X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train])X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test])return X_train, y_train, X_test, y_test", "docstring": "Load IMDb data and augment with hashed n-gram features.", "id": "f11076:m1"} {"signature": "def main(_):", "body": "if FLAGS.model == \"\":init_scale = learning_rate = max_grad_norm = num_steps = hidden_size = max_epoch = max_max_epoch = keep_prob = lr_decay = batch_size = vocab_size = elif FLAGS.model == \"\":init_scale = learning_rate = max_grad_norm = num_steps = hidden_size = max_epoch = max_max_epoch = keep_prob = lr_decay = batch_size = vocab_size = elif FLAGS.model == \"\":init_scale = learning_rate = max_grad_norm = num_steps = hidden_size = max_epoch = max_max_epoch = keep_prob = lr_decay = / batch_size = vocab_size = else:raise ValueError(\"\", FLAGS.model)train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()print(''.format(len(train_data))) print(''.format(len(valid_data))) print(''.format(len(test_data))) print(''.format(vocab_size)) sess = tf.InteractiveSession()input_data = tf.placeholder(tf.int32, [batch_size, num_steps])targets = tf.placeholder(tf.int32, [batch_size, num_steps])input_data_test = tf.placeholder(tf.int32, [, ])targets_test = tf.placeholder(tf.int32, [, ])def inference(x, is_training, num_steps, reuse=None):\"\"\"\"\"\"print(\"\" % (num_steps, is_training, reuse))init = tf.random_uniform_initializer(-init_scale, init_scale)with tf.variable_scope(\"\", reuse=reuse):net = tl.layers.EmbeddingInputlayer(x, vocab_size, hidden_size, init, name='')net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='')net = tl.layers.RNNLayer(net,cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={'': ,'': True},n_hidden=hidden_size,initializer=init,n_steps=num_steps,return_last=False,name='')lstm1 = netnet = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='')net = tl.layers.RNNLayer(net,cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={'': ,'': True},n_hidden=hidden_size,initializer=init,n_steps=num_steps,return_last=False,return_seq_2d=True,name='')lstm2 = netnet = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='')net = tl.layers.DenseLayer(net, vocab_size, W_init=init, b_init=init, act=None, name='')return net, lstm1, lstm2net, lstm1, lstm2 = inference(input_data, is_training=True, num_steps=num_steps, reuse=None)net_val, lstm1_val, lstm2_val = inference(input_data, is_training=False, num_steps=num_steps, reuse=True)net_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=, reuse=True)sess.run(tf.global_variables_initializer())def loss_fn(outputs, targets, batch_size):loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([outputs], [tf.reshape(targets, [-])], [tf.ones_like(tf.reshape(targets, [-]), dtype=tf.float32)])cost = tf.reduce_sum(loss) / batch_sizereturn costcost = loss_fn(net.outputs, targets, batch_size)cost_val = loss_fn(net_val.outputs, targets, batch_size)cost_test = loss_fn(net_test.outputs, targets_test, )with tf.variable_scope(''):lr = tf.Variable(, trainable=False)tvars = tf.trainable_variables()grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)optimizer = tf.train.GradientDescentOptimizer(lr)train_op = optimizer.apply_gradients(zip(grads, tvars))sess.run(tf.global_variables_initializer())net.print_params()net.print_layers()tl.layers.print_all_variables()print(\"\")for i in range(max_max_epoch):new_lr_decay = lr_decay**max(i - max_epoch, )sess.run(tf.assign(lr, learning_rate * new_lr_decay))print(\"\" % (i + , max_max_epoch, sess.run(lr)))epoch_size = ((len(train_data) // batch_size) - ) // num_stepsstart_time = time.time()costs = iters = state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)state2 = tl.layers.initialize_rnn_state(lstm2.initial_state)for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, num_steps)):feed_dict = {input_data: x,targets: y,lstm1.initial_state.c: state1[],lstm1.initial_state.h: state1[],lstm2.initial_state.c: state2[],lstm2.initial_state.h: state2[],}feed_dict.update(net.all_drop)_cost, state1_c, state1_h, state2_c, state2_h, _ = sess.run([cost, lstm1.final_state.c, lstm1.final_state.h, lstm2.final_state.c, lstm2.final_state.h, train_op],feed_dict=feed_dict)state1 = (state1_c, state1_h)state2 = (state2_c, state2_h)costs += _costiters += num_stepsif step % (epoch_size // ) == :print(\"\" %(step * / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time)))train_perplexity = np.exp(costs / iters)print(\"\" % (i + , max_max_epoch, train_perplexity))start_time = time.time()costs = iters = state1 = tl.layers.initialize_rnn_state(lstm1_val.initial_state)state2 = tl.layers.initialize_rnn_state(lstm2_val.initial_state)for step, (x, y) in enumerate(tl.iterate.ptb_iterator(valid_data, batch_size, num_steps)):feed_dict = {input_data: x,targets: y,lstm1_val.initial_state.c: state1[],lstm1_val.initial_state.h: state1[],lstm2_val.initial_state.c: state2[],lstm2_val.initial_state.h: state2[],}_cost, state1_c, state1_h, state2_c, state2_h, _ = sess.run([cost_val, lstm1_val.final_state.c, lstm1_val.final_state.h, lstm2_val.final_state.c,lstm2_val.final_state.h,tf.no_op()], feed_dict=feed_dict)state1 = (state1_c, state1_h)state2 = (state2_c, state2_h)costs += _costiters += num_stepsvalid_perplexity = np.exp(costs / iters)print(\"\" % (i + , max_max_epoch, valid_perplexity))print(\"\")start_time = time.time()costs = iters = state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state)state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state)for step, (x, y) in enumerate(tl.iterate.ptb_iterator(test_data, batch_size=, num_steps=)):feed_dict = {input_data_test: x,targets_test: y,lstm1_test.initial_state.c: state1[],lstm1_test.initial_state.h: state1[],lstm2_test.initial_state.c: state2[],lstm2_test.initial_state.h: state2[],}_cost, state1_c, state1_h, state2_c, state2_h = sess.run([cost_test,lstm1_test.final_state.c,lstm1_test.final_state.h,lstm2_test.final_state.c,lstm2_test.final_state.h,], feed_dict=feed_dict)state1 = (state1_c, state1_h)state2 = (state2_c, state2_h)costs += _costiters += test_perplexity = np.exp(costs / iters)print(\"\" % (test_perplexity, time.time() - start_time))print(\"\")", "docstring": "The core of the model consists of an LSTM cell that processes one word at\na time and computes probabilities of the possible continuations of the\nsentence. The memory state of the network is initialized with a vector\nof zeros and gets updated after reading each word. Also, for computational\nreasons, we will process data in mini-batches of size batch_size.", "id": "f11077:m0"} {"signature": "def main(_):", "body": "if FLAGS.model == \"\":init_scale = learning_rate = max_grad_norm = num_steps = hidden_size = max_epoch = max_max_epoch = keep_prob = lr_decay = batch_size = vocab_size = elif FLAGS.model == \"\":init_scale = learning_rate = max_grad_norm = num_steps = hidden_size = max_epoch = max_max_epoch = keep_prob = lr_decay = batch_size = vocab_size = elif FLAGS.model == \"\":init_scale = learning_rate = max_grad_norm = num_steps = hidden_size = max_epoch = max_max_epoch = keep_prob = lr_decay = / batch_size = vocab_size = else:raise ValueError(\"\", FLAGS.model)train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()print(''.format(len(train_data))) print(''.format(len(valid_data))) print(''.format(len(test_data))) print(''.format(vocab_size)) sess = tf.InteractiveSession()input_data = tf.placeholder(tf.int32, [batch_size, num_steps])targets = tf.placeholder(tf.int32, [batch_size, num_steps])input_data_test = tf.placeholder(tf.int32, [, ])targets_test = tf.placeholder(tf.int32, [, ])def inference(x, is_training, num_steps, reuse=None):\"\"\"\"\"\"print(\"\" % (num_steps, is_training, reuse))init = tf.random_uniform_initializer(-init_scale, init_scale)with tf.variable_scope(\"\", reuse=reuse):net = tl.layers.EmbeddingInputlayer(x, vocab_size, hidden_size, init, name='')net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='')net = tl.layers.RNNLayer(net,cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={'': }, n_hidden=hidden_size,initializer=init,n_steps=num_steps,return_last=False,name='')lstm1 = netnet = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='')net = tl.layers.RNNLayer(net,cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={'': }, n_hidden=hidden_size,initializer=init,n_steps=num_steps,return_last=False,return_seq_2d=True,name='')lstm2 = netnet = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='')net = tl.layers.DenseLayer(net, vocab_size, W_init=init, b_init=init, act=None, name='')return net, lstm1, lstm2net, lstm1, lstm2 = inference(input_data, is_training=True, num_steps=num_steps, reuse=None)net_val, lstm1_val, lstm2_val = inference(input_data, is_training=False, num_steps=num_steps, reuse=True)net_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=, reuse=True)sess.run(tf.global_variables_initializer())def loss_fn(outputs, targets): loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([outputs], [tf.reshape(targets, [-])], [tf.ones_like(tf.reshape(targets, [-]), dtype=tf.float32)])cost = tf.reduce_sum(loss) / batch_sizereturn costcost = loss_fn(net.outputs, targets) cost_val = loss_fn(net_val.outputs, targets) cost_test = loss_fn(net_test.outputs, targets_test) with tf.variable_scope(''):lr = tf.Variable(, trainable=False)tvars = tf.trainable_variables()grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)optimizer = tf.train.GradientDescentOptimizer(lr)train_op = optimizer.apply_gradients(zip(grads, tvars))sess.run(tf.global_variables_initializer())net.print_params()net.print_layers()tl.layers.print_all_variables()print(\"\")for i in range(max_max_epoch):new_lr_decay = lr_decay**max(i - max_epoch, )sess.run(tf.assign(lr, learning_rate * new_lr_decay))print(\"\" % (i + , max_max_epoch, sess.run(lr)))epoch_size = ((len(train_data) // batch_size) - ) // num_stepsstart_time = time.time()costs = iters = state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)state2 = tl.layers.initialize_rnn_state(lstm2.initial_state)for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, num_steps)):feed_dict = {input_data: x,targets: y,lstm1.initial_state: state1,lstm2.initial_state: state2,}feed_dict.update(net.all_drop)_cost, state1, state2, _ = sess.run([cost, lstm1.final_state, lstm2.final_state, train_op], feed_dict=feed_dict)costs += _costiters += num_stepsif step % (epoch_size // ) == :print(\"\" %(step * / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time)))train_perplexity = np.exp(costs / iters)print(\"\" % (i + , max_max_epoch, train_perplexity))start_time = time.time()costs = iters = state1 = tl.layers.initialize_rnn_state(lstm1_val.initial_state)state2 = tl.layers.initialize_rnn_state(lstm2_val.initial_state)for step, (x, y) in enumerate(tl.iterate.ptb_iterator(valid_data, batch_size, num_steps)):feed_dict = {input_data: x,targets: y,lstm1_val.initial_state: state1,lstm2_val.initial_state: state2,}_cost, state1, state2, _ = sess.run([cost_val, lstm1_val.final_state, lstm2_val.final_state,tf.no_op()], feed_dict=feed_dict)costs += _costiters += num_stepsvalid_perplexity = np.exp(costs / iters)print(\"\" % (i + , max_max_epoch, valid_perplexity))print(\"\")start_time = time.time()costs = iters = state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state)state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state)for step, (x, y) in enumerate(tl.iterate.ptb_iterator(test_data, batch_size=, num_steps=)):feed_dict = {input_data_test: x,targets_test: y,lstm1_test.initial_state: state1,lstm2_test.initial_state: state2,}_cost, state1, state2 = sess.run([cost_test, lstm1_test.final_state, lstm2_test.final_state], feed_dict=feed_dict)costs += _costiters += test_perplexity = np.exp(costs / iters)print(\"\" % (test_perplexity, time.time() - start_time))print(\"\")", "docstring": "The core of the model consists of an LSTM cell that processes one word at\na time and computes probabilities of the possible continuations of the\nsentence. The memory state of the network is initialized with a vector\nof zeros and gets updated after reading each word. Also, for computational\nreasons, we will process data in mini-batches of size batch_size.", "id": "f11078:m0"} {"signature": "def basic_clean_str(string):", "body": "string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) return string.strip().lower()", "docstring": "Tokenization/string cleaning for a datasets.", "id": "f11079:m0"} {"signature": "def customized_clean_str(string):", "body": "string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string)string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) string = re.sub(r\"\", \"\", string) return string.strip().lower()", "docstring": "Tokenization/string cleaning for a datasets.", "id": "f11079:m1"} {"signature": "def main_restore_embedding_layer():", "body": "vocabulary_size = embedding_size = model_file_name = \"\"batch_size = Noneprint(\"\")all_var = tl.files.load_npy_to_any(name=model_file_name + '')data = all_var['']count = all_var['']dictionary = all_var['']reverse_dictionary = all_var['']tl.nlp.save_vocab(count, name='' + model_file_name + '')del all_var, data, countload_params = tl.files.load_npz(name=model_file_name + '')x = tf.placeholder(tf.int32, shape=[batch_size])emb_net = tl.layers.EmbeddingInputlayer(x, vocabulary_size, embedding_size, name='')sess.run(tf.global_variables_initializer())tl.files.assign_params(sess, [load_params[]], emb_net)emb_net.print_params()emb_net.print_layers()word = b''word_id = dictionary[word]print('', word_id)words = [b'', b'', b'', b'']word_ids = tl.nlp.words_to_word_ids(words, dictionary, _UNK)context = tl.nlp.word_ids_to_words(word_ids, reverse_dictionary)print('', word_ids)print('', context)vector = sess.run(emb_net.outputs, feed_dict={x: [word_id]})print('', vector.shape)vectors = sess.run(emb_net.outputs, feed_dict={x: word_ids})print('', vectors.shape)", "docstring": "How to use Embedding layer, and how to convert IDs to vector,\n IDs to words, etc.", "id": "f11079:m3"} {"signature": "def main_lstm_generate_text():", "body": "init_scale = learning_rate = max_grad_norm = sequence_length = hidden_size = max_epoch = max_max_epoch = lr_decay = batch_size = top_k_list = [, , , ]print_length = model_file_name = \"\"words = customized_read_words(input_fpath=\"\")vocab = tl.nlp.create_vocab([words], word_counts_output_file='', min_word_count=)vocab = tl.nlp.Vocabulary('', unk_word=\"\")vocab_size = vocab.unk_id + train_data = [vocab.word_to_id(word) for word in words]seed = \"\"seed = nltk.tokenize.word_tokenize(seed)print('' % seed)sess = tf.InteractiveSession()input_data = tf.placeholder(tf.int32, [batch_size, sequence_length])targets = tf.placeholder(tf.int32, [batch_size, sequence_length])input_data_test = tf.placeholder(tf.int32, [, ])def inference(x, is_train, sequence_length, reuse=None):\"\"\"\"\"\"print(\"\" % (sequence_length, is_train, reuse))rnn_init = tf.random_uniform_initializer(-init_scale, init_scale)with tf.variable_scope(\"\", reuse=reuse):network = EmbeddingInputlayer(x, vocab_size, hidden_size, rnn_init, name='')network = RNNLayer(network, cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={'': ,'': True}, n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False,return_seq_2d=True, name='')lstm1 = networknetwork = DenseLayer(network, vocab_size, W_init=rnn_init, b_init=rnn_init, act=None, name='')return network, lstm1network, lstm1 = inference(input_data, is_train=True, sequence_length=sequence_length, reuse=None)network_test, lstm1_test = inference(input_data_test, is_train=False, sequence_length=, reuse=True)y_linear = network_test.outputsy_soft = tf.nn.softmax(y_linear)def loss_fn(outputs, targets, batch_size, sequence_length):loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([outputs], [tf.reshape(targets, [-])], [tf.ones([batch_size * sequence_length])])cost = tf.reduce_sum(loss) / batch_sizereturn costcost = loss_fn(network.outputs, targets, batch_size, sequence_length)with tf.variable_scope(''):lr = tf.Variable(, trainable=False)tvars = network.all_paramsgrads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)optimizer = tf.train.GradientDescentOptimizer(lr)train_op = optimizer.apply_gradients(zip(grads, tvars))sess.run(tf.global_variables_initializer())print(\"\")for i in range(max_max_epoch):new_lr_decay = lr_decay**max(i - max_epoch, )sess.run(tf.assign(lr, learning_rate * new_lr_decay))print(\"\" % (i + , max_max_epoch, sess.run(lr)))epoch_size = ((len(train_data) // batch_size) - ) // sequence_lengthstart_time = time.time()costs = iters = state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, sequence_length)):_cost, state1, _ = sess.run([cost, lstm1.final_state, train_op], feed_dict={input_data: x,targets: y,lstm1.initial_state: state1})costs += _costiters += sequence_lengthif step % (epoch_size // ) == :print(\"\" %(step * / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time)))train_perplexity = np.exp(costs / iters)print(\"\" % (i + , max_max_epoch, train_perplexity))for top_k in top_k_list:state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state)outs_id = [vocab.word_to_id(w) for w in seed]for ids in outs_id[:-]:a_id = np.asarray(ids).reshape(, )state1 = sess.run([lstm1_test.final_state], feed_dict={input_data_test: a_id,lstm1_test.initial_state: state1})a_id = outs_id[-]for _ in range(print_length):a_id = np.asarray(a_id).reshape(, )out, state1 = sess.run([y_soft, lstm1_test.final_state], feed_dict={input_data_test: a_id,lstm1_test.initial_state: state1})a_id = tl.nlp.sample_top(out[], top_k=top_k)outs_id.append(a_id)sentence = [vocab.id_to_word(w) for w in outs_id]sentence = \"\".join(sentence)print(top_k, '', sentence)print(\"\")tl.files.save_npz(network_test.all_params, name=model_file_name)", "docstring": "Generate text by Synced sequence input and output.", "id": "f11079:m4"} {"signature": "def model_batch_norm(x, y_, reuse, is_train):", "body": "W_init = tf.truncated_normal_initializer(stddev=)W_init2 = tf.truncated_normal_initializer(stddev=)b_init2 = tf.constant_initializer(value=)with tf.variable_scope(\"\", reuse=reuse):net = InputLayer(x, name='')net = Conv2d(net, , (, ), (, ), padding='', W_init=W_init, b_init=None, name='')net = BatchNormLayer(net, decay=, is_train=is_train, act=tf.nn.relu, name='')net = MaxPool2d(net, (, ), (, ), padding='', name='')net = Conv2d(net, , (, ), (, ), padding='', W_init=W_init, b_init=None, name='')net = BatchNormLayer(net, decay=, is_train=is_train, act=tf.nn.relu, name='')net = MaxPool2d(net, (, ), (, ), padding='', name='')net = FlattenLayer(net, name='') net = DenseLayer(net, , act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='')net = DenseLayer(net, , act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='')net = DenseLayer(net, , act=None, W_init=W_init2, name='')y = net.outputsce = tl.cost.cross_entropy(y, y_, name='')L2 = for p in tl.layers.get_variables_with_name('', True, True):L2 += tf.contrib.layers.l2_regularizer()(p)cost = ce + L2correct_prediction = tf.equal(tf.argmax(y, ), y_)acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))return net, cost, acc", "docstring": "Batch normalization should be placed before rectifier.", "id": "f11084:m1"} {"signature": "def distort_fn(x, is_train=False):", "body": "x = tl.prepro.crop(x, , , is_random=is_train)if is_train:x = tl.prepro.flip_axis(x, axis=, is_random=True)x = tl.prepro.brightness(x, gamma=, gain=, is_random=True)x = (x - np.mean(x)) / max(np.std(x), ) return x", "docstring": "The images are processed as follows:\n.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.\n.. They are approximately whitened to make the model insensitive to dynamic range.\nFor training, we additionally apply a series of random distortions to\nartificially increase the data set size:\n.. Randomly flip the image from left to right.\n.. Randomly distort the image brightness.", "id": "f11084:m2"} {"signature": "def data_to_tfrecord(images, labels, filename):", "body": "if os.path.isfile(filename):print(\"\" % filename)returnprint(\"\" % filename)writer = tf.python_io.TFRecordWriter(filename)for index, img in enumerate(images):img_raw = img.tobytes()label = int(labels[index])example = tf.train.Example(features=tf.train.Features(feature={\"\": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),'': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),}))writer.write(example.SerializeToString()) writer.close()", "docstring": "Save data into TFRecord.", "id": "f11089:m0"} {"signature": "def read_and_decode(filename, is_train=None):", "body": "filename_queue = tf.train.string_input_producer([filename])reader = tf.TFRecordReader()_, serialized_example = reader.read(filename_queue)features = tf.parse_single_example(serialized_example, features={'': tf.FixedLenFeature([], tf.int64),'': tf.FixedLenFeature([], tf.string),})img = tf.decode_raw(features[''], tf.float32)img = tf.reshape(img, [, , ])if is_train ==True:img = tf.random_crop(img, [, , ])img = tf.image.random_flip_left_right(img)img = tf.image.random_brightness(img, max_delta=)img = tf.image.random_contrast(img, lower=, upper=)img = tf.image.per_image_standardization(img)elif is_train == False:img = tf.image.resize_image_with_crop_or_pad(img, , )img = tf.image.per_image_standardization(img)elif is_train == None:img = imglabel = tf.cast(features[''], tf.int32)return img, label", "docstring": "Return tensor to read from TFRecord.", "id": "f11089:m1"} {"signature": "def data_to_tfrecord(images, labels, filename):", "body": "if os.path.isfile(filename):print(\"\" % filename)returnprint(\"\" % filename)writer = tf.python_io.TFRecordWriter(filename)for index, img in enumerate(images):img_raw = img.tobytes()label = int(labels[index])example = tf.train.Example(features=tf.train.Features(feature={\"\": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),'': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),}))writer.write(example.SerializeToString()) writer.close()", "docstring": "Save data into TFRecord.", "id": "f11095:m0"} {"signature": "def read_and_decode(filename, is_train=None):", "body": "filename_queue = tf.train.string_input_producer([filename])reader = tf.TFRecordReader()_, serialized_example = reader.read(filename_queue)features = tf.parse_single_example(serialized_example, features={'': tf.FixedLenFeature([], tf.int64),'': tf.FixedLenFeature([], tf.string),})img = tf.decode_raw(features[''], tf.float32)img = tf.reshape(img, [, , ])if is_train ==True:img = tf.random_crop(img, [, , ])img = tf.image.random_flip_left_right(img)img = tf.image.random_brightness(img, max_delta=)img = tf.image.random_contrast(img, lower=, upper=)img = tf.image.per_image_standardization(img)elif is_train == False:img = tf.image.resize_image_with_crop_or_pad(img, , )img = tf.image.per_image_standardization(img)elif is_train == None:img = imglabel = tf.cast(features[''], tf.int32)return img, label", "docstring": "Return tensor to read from TFRecord.", "id": "f11095:m1"} {"signature": "def data_to_tfrecord(images, labels, filename):", "body": "if os.path.isfile(filename):print(\"\" % filename)returnprint(\"\" % filename)writer = tf.python_io.TFRecordWriter(filename)for index, img in enumerate(images):img_raw = img.tobytes()label = int(labels[index])example = tf.train.Example(features=tf.train.Features(feature={\"\": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),'': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),}))writer.write(example.SerializeToString()) writer.close()", "docstring": "Save data into TFRecord.", "id": "f11096:m0"} {"signature": "def read_and_decode(filename, is_train=None):", "body": "filename_queue = tf.train.string_input_producer([filename])reader = tf.TFRecordReader()_, serialized_example = reader.read(filename_queue)features = tf.parse_single_example(serialized_example, features={'': tf.FixedLenFeature([], tf.int64),'': tf.FixedLenFeature([], tf.string),})img = tf.decode_raw(features[''], tf.float32)img = tf.reshape(img, [, , ])if is_train ==True:img = tf.random_crop(img, [, , ])img = tf.image.random_flip_left_right(img)img = tf.image.random_brightness(img, max_delta=)img = tf.image.random_contrast(img, lower=, upper=)img = tf.image.per_image_standardization(img)elif is_train == False:img = tf.image.resize_image_with_crop_or_pad(img, , )img = tf.image.per_image_standardization(img)elif is_train == None:img = imglabel = tf.cast(features[''], tf.int32)return img, label", "docstring": "Return tensor to read from TFRecord.", "id": "f11096:m1"} {"signature": "def data_to_tfrecord(images, labels, filename):", "body": "if os.path.isfile(filename):print(\"\" % filename)returnprint(\"\" % filename)writer = tf.python_io.TFRecordWriter(filename)for index, img in enumerate(images):img_raw = img.tobytes()label = int(labels[index])example = tf.train.Example(features=tf.train.Features(feature={\"\": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),'': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),}))writer.write(example.SerializeToString()) writer.close()", "docstring": "Save data into TFRecord.", "id": "f11099:m0"} {"signature": "def read_and_decode(filename, is_train=None):", "body": "filename_queue = tf.train.string_input_producer([filename])reader = tf.TFRecordReader()_, serialized_example = reader.read(filename_queue)features = tf.parse_single_example(serialized_example, features={'': tf.FixedLenFeature([], tf.int64),'': tf.FixedLenFeature([], tf.string),})img = tf.decode_raw(features[''], tf.float32)img = tf.reshape(img, [, , ])if is_train ==True:img = tf.random_crop(img, [, , ])img = tf.image.random_flip_left_right(img)img = tf.image.random_brightness(img, max_delta=)img = tf.image.random_contrast(img, lower=, upper=)img = tf.image.per_image_standardization(img)elif is_train == False:img = tf.image.resize_image_with_crop_or_pad(img, , )img = tf.image.per_image_standardization(img)elif is_train == None:img = imglabel = tf.cast(features[''], tf.int32)return img, label", "docstring": "Return tensor to read from TFRecord.", "id": "f11099:m1"} {"signature": "def _int64_feature(value):", "body": "return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))", "docstring": "Wrapper for inserting an int64 Feature into a SequenceExample proto,\n e.g, An integer label.", "id": "f11102:m0"} {"signature": "def _bytes_feature(value):", "body": "return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "docstring": "Wrapper for inserting a bytes Feature into a SequenceExample proto,\n e.g, an image in byte", "id": "f11102:m1"} {"signature": "def _int64_feature_list(values):", "body": "return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])", "docstring": "Wrapper for inserting an int64 FeatureList into a SequenceExample proto,\n e.g, sentence in list of ints", "id": "f11102:m2"} {"signature": "def _bytes_feature_list(values):", "body": "return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])", "docstring": "Wrapper for inserting a bytes FeatureList into a SequenceExample proto,\n e.g, sentence in list of bytes", "id": "f11102:m3"} {"signature": "def distort_image(image, thread_id):", "body": "with tf.name_scope(\"\"): image = tf.image.random_flip_left_right(image)color_ordering = thread_id % with tf.name_scope(\"\"): if color_ordering == :image = tf.image.random_brightness(image, max_delta= / )image = tf.image.random_saturation(image, lower=, upper=)image = tf.image.random_hue(image, max_delta=)image = tf.image.random_contrast(image, lower=, upper=)elif color_ordering == :image = tf.image.random_brightness(image, max_delta= / )image = tf.image.random_contrast(image, lower=, upper=)image = tf.image.random_saturation(image, lower=, upper=)image = tf.image.random_hue(image, max_delta=)image = tf.clip_by_value(image, , )return image", "docstring": "Perform random distortions on an image.\n Args:\n image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).\n thread_id: Preprocessing thread id used to select the ordering of color\n distortions. There should be a multiple of 2 preprocessing threads.\n Returns:````\n distorted_image: A float32 Tensor of shape [height, width, 3] with values in\n [0, 1].", "id": "f11102:m4"} {"signature": "def prefetch_input_data(reader, file_pattern, is_training, batch_size, values_per_shard, input_queue_capacity_factor=,num_reader_threads=, shard_queue_name=\"\", value_queue_name=\"\"):", "body": "data_files = []for pattern in file_pattern.split(\"\"):data_files.extend(tf.gfile.Glob(pattern))if not data_files:tl.logging.fatal(\"\", file_pattern)else:tl.logging.info(\"\", len(data_files), file_pattern)if is_training:print(\"\")filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=, name=shard_queue_name)min_queue_examples = values_per_shard * input_queue_capacity_factorcapacity = min_queue_examples + * batch_sizevalues_queue = tf.RandomShuffleQueue(capacity=capacity, min_after_dequeue=min_queue_examples, dtypes=[tf.string],name=\"\" + value_queue_name)else:print(\"\")filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=, name=shard_queue_name)capacity = values_per_shard + * batch_sizevalues_queue = tf.FIFOQueue(capacity=capacity, dtypes=[tf.string], name=\"\" + value_queue_name)enqueue_ops = []for _ in range(num_reader_threads):_, value = reader.read(filename_queue)enqueue_ops.append(values_queue.enqueue([value]))tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(values_queue, enqueue_ops))tf.summary.scalar(\"\" % (values_queue.name, capacity),tf.cast(values_queue.size(), tf.float32) * ( / capacity))return values_queue", "docstring": "Prefetches string values from disk into an input queue.\n\n In training the capacity of the queue is important because a larger queue\n means better mixing of training examples between shards. The minimum number of\n values kept in the queue is values_per_shard * input_queue_capacity_factor,\n where input_queue_memory factor should be chosen to trade-off better mixing\n with memory usage.\n\n Args:\n reader: Instance of tf.ReaderBase.\n file_pattern: Comma-separated list of file patterns (e.g.\n /tmp/train_data-?????-of-00100).\n is_training: Boolean; whether prefetching for training or eval.\n batch_size: Model batch size used to determine queue capacity.\n values_per_shard: Approximate number of values per shard.\n input_queue_capacity_factor: Minimum number of values to keep in the queue\n in multiples of values_per_shard. See comments above.\n num_reader_threads: Number of reader threads to fill the queue.\n shard_queue_name: Name for the shards filename queue.\n value_queue_name: Name for the values input queue.\n\n Returns:\n A Queue containing prefetched string values.", "id": "f11102:m5"} {"signature": "def batch_with_dynamic_pad(images_and_captions, batch_size, queue_capacity, add_summaries=True):", "body": "enqueue_list = []for image, caption in images_and_captions:caption_length = tf.shape(caption)[]input_length = tf.expand_dims(tf.subtract(caption_length, ), )input_seq = tf.slice(caption, [], input_length)target_seq = tf.slice(caption, [], input_length)indicator = tf.ones(input_length, dtype=tf.int32)enqueue_list.append([image, input_seq, target_seq, indicator])images, input_seqs, target_seqs, mask = tf.train.batch_join(enqueue_list, batch_size=batch_size, capacity=queue_capacity, dynamic_pad=True, name=\"\")if add_summaries:lengths = tf.add(tf.reduce_sum(mask, ), )tf.summary.scalar(\"\", tf.reduce_min(lengths))tf.summary.scalar(\"\", tf.reduce_max(lengths))tf.summary.scalar(\"\", tf.reduce_mean(lengths))return images, input_seqs, target_seqs, mask", "docstring": "Batches input images and captions.\n\n This function splits the caption into an input sequence and a target sequence,\n where the target sequence is the input sequence right-shifted by 1. Input and\n target sequences are batched and padded up to the maximum length of sequences\n in the batch. A mask is created to distinguish real words from padding words.\n\n Example:\n Actual captions in the batch ('-' denotes padded character):\n [\n [ 1 2 5 4 5 ],\n [ 1 2 3 4 - ],\n [ 1 2 3 - - ],\n ]\n\n input_seqs:\n [\n [ 1 2 3 4 ],\n [ 1 2 3 - ],\n [ 1 2 - - ],\n ]\n\n target_seqs:\n [\n [ 2 3 4 5 ],\n [ 2 3 4 - ],\n [ 2 3 - - ],\n ]\n\n mask:\n [\n [ 1 1 1 1 ],\n [ 1 1 1 0 ],\n [ 1 1 0 0 ],\n ]\n\n Args:\n images_and_captions: A list of pairs [image, caption], where image is a\n Tensor of shape [height, width, channels] and caption is a 1-D Tensor of\n any length. Each pair will be processed and added to the queue in a\n separate thread.\n batch_size: Batch size.\n queue_capacity: Queue capacity.\n add_summaries: If true, add caption length summaries.\n\n Returns:\n images: A Tensor of shape [batch_size, height, width, channels].\n input_seqs: An int32 Tensor of shape [batch_size, padded_length].\n target_seqs: An int32 Tensor of shape [batch_size, padded_length].\n mask: An int32 0/1 Tensor of shape [batch_size, padded_length].", "id": "f11102:m6"} {"signature": "def example1():", "body": "st = time.time()for _ in range(): xx = tl.prepro.rotation(image, rg=-, is_random=False)xx = tl.prepro.flip_axis(xx, axis=, is_random=False)xx = tl.prepro.shear2(xx, shear=(, -), is_random=False)xx = tl.prepro.zoom(xx, zoom_range= / )xx = tl.prepro.shift(xx, wrg=-, hrg=, is_random=False)print(\"\" % ((time.time() - st) / ))tl.vis.save_image(xx, '')", "docstring": "Example 1: Applying transformation one-by-one is very SLOW !", "id": "f11105:m1"} {"signature": "def example2():", "body": "st = time.time()for _ in range(): transform_matrix = create_transformation_matrix()result = tl.prepro.affine_transform_cv2(image, transform_matrix) print(\"\" % ((time.time() - st) / )) tl.vis.save_image(result, '')", "docstring": "Example 2: Applying all transforms in one is very FAST !", "id": "f11105:m2"} {"signature": "def example3():", "body": "n_data = imgs_file_list = [''] * n_datatrain_targets = [np.ones()] * n_datadef generator():if len(imgs_file_list) != len(train_targets):raise RuntimeError('')for _input, _target in zip(imgs_file_list, train_targets):yield _input, _targetdef _data_aug_fn(image):transform_matrix = create_transformation_matrix()result = tl.prepro.affine_transform_cv2(image, transform_matrix) return resultdef _map_fn(image_path, target):image = tf.read_file(image_path)image = tf.image.decode_jpeg(image, channels=) image = tf.image.convert_image_dtype(image, dtype=tf.float32)image = tf.py_func(_data_aug_fn, [image], [tf.float32])target = tf.reshape(target, ())return image, targetn_epoch = batch_size = dataset = tf.data.Dataset().from_generator(generator, output_types=(tf.string, tf.int64))dataset = dataset.shuffle(buffer_size=) dataset = dataset.repeat(n_epoch)dataset = dataset.map(_map_fn, num_parallel_calls=multiprocessing.cpu_count())dataset = dataset.batch(batch_size) dataset = dataset.prefetch() iterator = dataset.make_one_shot_iterator()one_element = iterator.get_next()sess = tf.Session()n_step = round(n_epoch * n_data / batch_size)st = time.time()for _ in range(n_step):_images, _targets = sess.run(one_element)print(\"\" % ((time.time() - st) / batch_size / n_step))", "docstring": "Example 3: Using TF dataset API to load and process image for training", "id": "f11105:m3"} {"signature": "def example4():", "body": "transform_matrix = create_transformation_matrix()result = tl.prepro.affine_transform_cv2(image, transform_matrix) coords = [[(, ), (, ), (, ), (, )], [(, ), (, ), (, )]]coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)def imwrite(image, coords_list, name):coords_list_ = []for coords in coords_list:coords = np.array(coords, np.int32)coords = coords.reshape((-, , ))coords_list_.append(coords)image = cv2.polylines(image, coords_list_, True, (, , ), )cv2.imwrite(name, image[..., ::-])imwrite(image, coords, '')imwrite(result, coords_result, '')", "docstring": "Example 4: Transforming coordinates using affine matrix.", "id": "f11105:m4"} {"signature": "def generate_graph_and_checkpoint(graph_output_path, checkpoint_output_path):", "body": "X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-, , , ))sess = tf.InteractiveSession()batch_size = x = tf.placeholder(tf.float32, shape=[batch_size, , , ]) y_ = tf.placeholder(tf.int64, shape=[batch_size])net = tl.layers.InputLayer(x, name='')net = tl.layers.Conv2d(net, , (, ), (, ), act=tf.nn.relu, padding='', name='')net = tl.layers.MaxPool2d(net, (, ), (, ), padding='', name='')net = tl.layers.Conv2d(net, , (, ), (, ), act=tf.nn.relu, padding='', name='')net = tl.layers.MaxPool2d(net, (, ), (, ), padding='', name='')net = tl.layers.FlattenLayer(net, name='')net = tl.layers.DropoutLayer(net, keep=, name='')net = tl.layers.DenseLayer(net, , act=tf.nn.relu, name='')net = tl.layers.DropoutLayer(net, keep=, name='')net = tl.layers.DenseLayer(net, , act=None, name='')y = net.outputsprint([n.name for n in tf.get_default_graph().as_graph_def().node])with open(graph_output_path, \"\") as file:graph = tf.get_default_graph().as_graph_def(add_shapes=True)file.write(graph.SerializeToString())cost = tl.cost.cross_entropy(y, y_, '')correct_prediction = tf.equal(tf.argmax(y, ), y_)acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))n_epoch = learning_rate = print_freq = train_params = net.all_paramstrain_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, var_list=train_params)tl.layers.initialize_global_variables(sess)net.print_params()net.print_layers()print('' % learning_rate)print('' % batch_size)for epoch in range(n_epoch):start_time = time.time()for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):feed_dict = {x: X_train_a, y_: y_train_a}feed_dict.update(net.all_drop) sess.run(train_op, feed_dict=feed_dict)if epoch % == :tl.files.save_ckpt(sess, mode_name='', save_dir=checkpoint_output_path, printable=True)if epoch + == or (epoch + ) % print_freq == :print(\"\" % (epoch + , n_epoch, time.time() - start_time))train_loss, train_acc, n_batch = , , for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):dp_dict = tl.utils.dict_to_one(net.all_drop) feed_dict = {x: X_train_a, y_: y_train_a}feed_dict.update(dp_dict)err, ac = sess.run([cost, acc], feed_dict=feed_dict)train_loss += errtrain_acc += acn_batch += print(\"\" % (train_loss / n_batch))print(\"\" % (train_acc / n_batch))val_loss, val_acc, n_batch = , , for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True):dp_dict = tl.utils.dict_to_one(net.all_drop) feed_dict = {x: X_val_a, y_: y_val_a}feed_dict.update(dp_dict)err, ac = sess.run([cost, acc], feed_dict=feed_dict)val_loss += errval_acc += acn_batch += print(\"\" % (val_loss / n_batch))print(\"\" % (val_acc / n_batch))print('')test_loss, test_acc, n_batch = , , for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=True):dp_dict = tl.utils.dict_to_one(net.all_drop) feed_dict = {x: X_test_a, y_: y_test_a}feed_dict.update(dp_dict)err, ac = sess.run([cost, acc], feed_dict=feed_dict)test_loss += errtest_acc += acn_batch += print(\"\" % (test_loss / n_batch))print(\"\" % (test_acc / n_batch))", "docstring": "Reimplementation of the TensorFlow official MNIST CNN tutorials and generate the graph and checkpoint for this model:\n- https://www.tensorflow.org/versions/r0.8/tutorials/mnist/pros/index.html\n- https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/mnist/convolutional.py\n\n- For simplified CNN layer see \"Convolutional layer (Simplified)\"\n\nParameters\n-----------\ngraph_output_path : string\n the path of the graph where you want to save.\ncheckpoint_output_path : string\n the path of the checkpoint where you want to save.\n\nReferences\n-----------\n- `onnx-tf exporting tutorial `__", "id": "f11112:m0"} {"signature": "def freeze_graph(graph_path, checkpoint_path, output_path, end_node_names, is_binary_graph):", "body": "_freeze_graph(input_graph=graph_path, input_saver='', input_binary=is_binary_graph, input_checkpoint=checkpoint_path,output_graph=output_path, output_node_names=end_node_names, restore_op_name='',filename_tensor_name='', clear_devices=True, initializer_nodes=None)", "docstring": "Reimplementation of the TensorFlow official freeze_graph function to freeze the graph and checkpoint together:\n\n Parameters\n -----------\n graph_path : string\n the path where your graph file save.\n checkpoint_output_path : string\n the path where your checkpoint save.\n output_path : string\n the path where you want to save the output proto buff\n end_node_names : string\n the name of the end node in your graph you want to get in your proto buff\n is_binary_graph : boolean\n declare your file whether is a binary graph\n\n References\n ----------\n - `onnx-tf exporting tutorial `__\n - `tensorflow freeze_graph `", "id": "f11112:m1"} {"signature": "def convert_model_to_onnx(frozen_graph_path, end_node_names, onnx_output_path):", "body": "with tf.gfile.GFile(frozen_graph_path, \"\") as f:graph_def = tf.GraphDef()graph_def.ParseFromString(f.read())onnx_model = tensorflow_graph_to_onnx_model(graph_def, end_node_names, opset=)file = open(onnx_output_path, \"\")file.write(onnx_model.SerializeToString())file.close()", "docstring": "Reimplementation of the TensorFlow-onnx official tutorial convert the proto buff to onnx file:\n\n Parameters\n -----------\n frozen_graph_path : string\n the path where your frozen graph file save.\n end_node_names : string\n the name of the end node in your graph you want to get in your proto buff\n onnx_output_path : string\n the path where you want to save the onnx file.\n\n References\n -----------\n - `onnx-tf exporting tutorial `", "id": "f11112:m2"} {"signature": "def convert_onnx_to_model(onnx_input_path):", "body": "model = onnx.load(onnx_input_path)tf_rep = prepare(model)img = np.load(\"\")output = tf_rep.run(img.reshape([, ]))print(\"\", np.argmax(output))", "docstring": "Reimplementation of the TensorFlow-onnx official tutorial convert the onnx file to specific: model\n\n Parameters\n -----------\n onnx_input_path : string\n the path where you save the onnx file.\n\n References\n -----------\n - `onnx-tf exporting tutorial `__", "id": "f11112:m3"} {"signature": "def _GetNextLogCountPerToken(token):", "body": "global _log_counter_per_token _log_counter_per_token[token] = + _log_counter_per_token.get(token, -)return _log_counter_per_token[token]", "docstring": "Wrapper for _log_counter_per_token.\n\n Args:\n token: The token for which to look up the count.\n\n Returns:\n The number of times this function has been called with\n *token* as an argument (starting at 0)", "id": "f11116:m11"} {"signature": "def log_every_n(level, msg, n, *args):", "body": "count = _GetNextLogCountPerToken(_GetFileAndLine())log_if(level, msg, not (count % n), *args)", "docstring": "Log 'msg % args' at level 'level' once per 'n' times.\n\n Logs the 1st call, (N+1)st call, (2N+1)st call, etc.\n Not threadsafe.\n\n Args:\n level: The level at which to log.\n msg: The message to be logged.\n n: The number of times this should be called before it is logged.\n *args: The args to be substituted into the msg.", "id": "f11116:m12"} {"signature": "def log_first_n(level, msg, n, *args): ", "body": "count = _GetNextLogCountPerToken(_GetFileAndLine())log_if(level, msg, count < n, *args)", "docstring": "Log 'msg % args' at level 'level' only first 'n' times.\n\n Not threadsafe.\n\n Args:\n level: The level at which to log.\n msg: The message to be logged.\n n: The number of times this should be called before it is logged.\n *args: The args to be substituted into the msg.", "id": "f11116:m13"} {"signature": "def log_if(level, msg, condition, *args):", "body": "if condition:vlog(level, msg, *args)", "docstring": "Log 'msg % args' at level 'level' only if condition is fulfilled.", "id": "f11116:m14"} {"signature": "def _GetFileAndLine():", "body": "f = _sys._getframe()our_file = f.f_code.co_filenamef = f.f_backwhile f:code = f.f_codeif code.co_filename != our_file:return (code.co_filename, f.f_lineno)f = f.f_backreturn ('', )", "docstring": "Returns (filename, linenumber) for the stack frame.", "id": "f11116:m15"} {"signature": "def google2_log_prefix(level, timestamp=None, file_and_line=None):", "body": "global _level_namesnow = timestamp or _time.time()now_tuple = _time.localtime(now)now_microsecond = int( * (now % ))(filename, line) = file_and_line or _GetFileAndLine()basename = _os.path.basename(filename)severity = ''if level in _level_names:severity = _level_names[level][]s = '' % (severity,now_tuple[], now_tuple[], now_tuple[], now_tuple[], now_tuple[], now_microsecond,_get_thread_id(),basename,line)return s", "docstring": "Assemble a logline prefix using the google2 format.", "id": "f11116:m16"} {"signature": "def get_verbosity():", "body": "return _get_logger().getEffectiveLevel()", "docstring": "Return how much logging output will be produced.", "id": "f11116:m17"} {"signature": "def set_verbosity(v):", "body": "_get_logger().setLevel(v)", "docstring": "Sets the threshold for what messages will be logged.", "id": "f11116:m18"} {"signature": "def _get_thread_id():", "body": "thread_id = six.moves._thread.get_ident()return thread_id & _THREAD_ID_MASK", "docstring": "Get id of current thread, suitable for logging as an unsigned quantity.", "id": "f11116:m19"} {"signature": "def deconv2d_bilinear_upsampling_initializer(shape):", "body": "if shape[] != shape[]:raise Exception('')if shape[] < shape[]:raise Exception('')filter_size = shape[]num_out_channels = shape[]num_in_channels = shape[]bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32)scale_factor = (filter_size + ) // if filter_size % == :center = scale_factor - else:center = scale_factor - for x in range(filter_size):for y in range(filter_size):bilinear_kernel[x, y] = ( - abs(x - center) / scale_factor) * ( - abs(y - center) / scale_factor)weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels))for i in range(num_out_channels):weights[:, :, i, i] = bilinear_kernelreturn tf.constant_initializer(value=weights, dtype=LayersConfig.tf_dtype)", "docstring": "Returns the initializer that can be passed to DeConv2dLayer for initializing the\n weights in correspondence to channel-wise bilinear up-sampling.\n Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211)\n\n Parameters\n ----------\n shape : tuple of int\n The shape of the filters, [height, width, output_channels, in_channels].\n It must match the shape passed to DeConv2dLayer.\n\n Returns\n -------\n ``tf.constant_initializer``\n A constant initializer with weights set to correspond to per channel bilinear upsampling\n when passed as W_int in DeConv2dLayer\n\n Examples\n --------\n - Upsampling by a factor of 2, ie e.g 100->200\n >>> import tensorflow as tf\n >>> import tensorlayer as tl\n >>> rescale_factor = 2\n >>> imsize = 128\n >>> num_channels = 3\n >>> filter_shape = (5, 5)\n >>> filter_size = (2 * rescale_factor - rescale_factor % 2) #Corresponding bilinear filter size\n >>> num_in_channels = 3\n >>> num_out_channels = 3\n >>> deconv_filter_shape = (filter_size, filter_size, num_out_channels, num_in_channels)\n >>> x = tf.placeholder(tf.float32, (1, imsize, imsize, num_channels))\n >>> net = tl.layers.InputLayer(x, name='input_layer')\n >>> bilinear_init = deconv2d_bilinear_upsampling_initializer(shape=filter_shape)\n >>> net = tl.layers.DeConv2dLayer(net,\n ... shape=filter_shape,\n ... output_shape=(1, imsize*rescale_factor, imsize*rescale_factor, num_out_channels),\n ... strides=(1, rescale_factor, rescale_factor, 1),\n ... W_init=bilinear_init,\n ... padding='SAME',\n ... act=None, name='g/h1/decon2d')", "id": "f11119:m0"} {"signature": "def load_cifar10_dataset(shape=(-, , , ), path='', plotable=False):", "body": "path = os.path.join(path, '')logging.info(\"\".format(path))def unpickle(file):fp = open(file, '')if sys.version_info.major == :data = pickle.load(fp)elif sys.version_info.major == :data = pickle.load(fp, encoding='')else:raise RuntimeError(\"\")fp.close()return datafilename = ''url = ''maybe_download_and_extract(filename, path, url, extract=True)X_train = Noney_train = []for i in range(, ):data_dic = unpickle(os.path.join(path, '', \"\".format(i)))if i == :X_train = data_dic['']else:X_train = np.vstack((X_train, data_dic['']))y_train += data_dic['']test_data_dic = unpickle(os.path.join(path, '', \"\"))X_test = test_data_dic['']y_test = np.array(test_data_dic[''])if shape == (-, , , ):X_test = X_test.reshape(shape)X_train = X_train.reshape(shape)elif shape == (-, , , ):X_test = X_test.reshape(shape, order='')X_train = X_train.reshape(shape, order='')X_test = np.transpose(X_test, (, , , ))X_train = np.transpose(X_train, (, , , ))else:X_test = X_test.reshape(shape)X_train = X_train.reshape(shape)y_train = np.array(y_train)if plotable:logging.info('')import matplotlib.pyplot as pltfig = plt.figure()logging.info('' % X_train[].shape)plt.ion() count = for _ in range(): for _ in range(): _ = fig.add_subplot(, , count)if shape == (-, , , ):plt.imshow(np.transpose(X_train[count - ], (, , )), interpolation='')elif shape == (-, , , ):plt.imshow(X_train[count - ], interpolation='')else:raise Exception(\"\")plt.gca().xaxis.set_major_locator(plt.NullLocator())plt.gca().yaxis.set_major_locator(plt.NullLocator())count = count + plt.draw() plt.pause() logging.info(\"\" % X_train.shape)logging.info(\"\" % y_train.shape)logging.info(\"\" % X_test.shape)logging.info(\"\" % y_test.shape)X_train = np.asarray(X_train, dtype=np.float32)X_test = np.asarray(X_test, dtype=np.float32)y_train = np.asarray(y_train, dtype=np.int32)y_test = np.asarray(y_test, dtype=np.int32)return X_train, y_train, X_test, y_test", "docstring": "Load CIFAR-10 dataset.\n\n It consists of 60000 32x32 colour images in 10 classes, with\n 6000 images per class. There are 50000 training images and 10000 test images.\n\n The dataset is divided into five training batches and one test batch, each with\n 10000 images. The test batch contains exactly 1000 randomly-selected images from\n each class. The training batches contain the remaining images in random order,\n but some training batches may contain more images from one class than another.\n Between them, the training batches contain exactly 5000 images from each class.\n\n Parameters\n ----------\n shape : tupe\n The shape of digit images e.g. (-1, 3, 32, 32) and (-1, 32, 32, 3).\n path : str\n The path that the data is downloaded to, defaults is ``data/cifar10/``.\n plotable : boolean\n Whether to plot some image examples, False as default.\n\n Examples\n --------\n >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))\n\n References\n ----------\n - `CIFAR website `__\n - `Data download link `__\n - ``__", "id": "f11120:m0"} {"signature": "def load_ptb_dataset(path=''):", "body": "path = os.path.join(path, '')logging.info(\"\".format(path))filename = ''url = ''maybe_download_and_extract(filename, path, url, extract=True)data_path = os.path.join(path, '', '')train_path = os.path.join(data_path, \"\")valid_path = os.path.join(data_path, \"\")test_path = os.path.join(data_path, \"\")word_to_id = nlp.build_vocab(nlp.read_words(train_path))train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)vocab_size = len(word_to_id)return train_data, valid_data, test_data, vocab_size", "docstring": "Load Penn TreeBank (PTB) dataset.\n\n It is used in many LANGUAGE MODELING papers,\n including \"Empirical Evaluation and Combination of Advanced Language\n Modeling Techniques\", \"Recurrent Neural Network Regularization\".\n It consists of 929k training words, 73k validation words, and 82k test\n words. It has 10k words in its vocabulary.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/ptb/``.\n\n Returns\n --------\n train_data, valid_data, test_data : list of int\n The training, validating and testing data in integer format.\n vocab_size : int\n The vocabulary size.\n\n Examples\n --------\n >>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()\n\n References\n ---------------\n - ``tensorflow.models.rnn.ptb import reader``\n - `Manual download `__\n\n Notes\n ------\n - If you want to get the raw data, see the source code.", "id": "f11121:m0"} {"signature": "def load_flickr1M_dataset(tag='', size=, path=\"\", n_threads=, printable=False):", "body": "import shutilpath = os.path.join(path, '')logging.info(\"\".format(size * , size * ))images_zip = ['', '', '', '', '', '', '','', '', '']tag_zip = ''url = ''for image_zip in images_zip[:size]:image_folder = image_zip.split(\"\")[]if folder_exists(os.path.join(path, image_folder)) is False:logging.info(\"\".format(image_folder, path))maybe_download_and_extract(image_zip, path, url, extract=True)del_file(os.path.join(path, image_zip))shutil.move(os.path.join(path, ''), os.path.join(path, image_folder))else:logging.info(\"\".format(image_folder, path))if folder_exists(os.path.join(path, \"\")) is False:logging.info(\"\".format(path))maybe_download_and_extract(tag_zip, path, url, extract=True)del_file(os.path.join(path, tag_zip))else:logging.info(\"\".format(path))images_list = []images_folder_list = []for i in range(, size):images_folder_list += load_folder_list(path=os.path.join(path, '' % i))images_folder_list.sort(key=lambda s: int(s.split('')[-])) for folder in images_folder_list[:size * ]:tmp = load_file_list(path=folder, regx='', printable=False)tmp.sort(key=lambda s: int(s.split('')[-])) images_list.extend([os.path.join(folder, x) for x in tmp])tag_list = []tag_folder_list = load_folder_list(os.path.join(path, \"\"))tag_folder_list.sort(key=lambda s: int(os.path.basename(s)))for folder in tag_folder_list[:size * ]:tmp = load_file_list(path=folder, regx='', printable=False)tmp.sort(key=lambda s: int(s.split('')[-])) tmp = [os.path.join(folder, s) for s in tmp]tag_list += tmplogging.info(\"\".format(tag))select_images_list = []for idx, _val in enumerate(tag_list):tags = read_file(tag_list[idx]).split('')if tag in tags:select_images_list.append(images_list[idx])logging.info(\"\".format(tag))images = visualize.read_images(select_images_list, '', n_threads=n_threads, printable=printable)return images", "docstring": "Load Flick1M dataset.\n\n Returns a list of images by a given tag from Flickr1M dataset,\n it will download Flickr1M from `the official website `__\n at the first time you use it.\n\n Parameters\n ------------\n tag : str or None\n What images to return.\n - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search `__.\n - If you want to get all images, set to ``None``.\n\n size : int\n integer between 1 to 10. 1 means 100k images ... 5 means 500k images, 10 means all 1 million images. Default is 10.\n path : str\n The path that the data is downloaded to, defaults is ``data/flickr25k/``.\n n_threads : int\n The number of thread to read image.\n printable : boolean\n Whether to print infomation when reading images, default is ``False``.\n\n Examples\n ----------\n Use 200k images\n\n >>> images = tl.files.load_flickr1M_dataset(tag='zebra', size=2)\n\n Use 1 Million images\n\n >>> images = tl.files.load_flickr1M_dataset(tag='zebra')", "id": "f11122:m0"} {"signature": "def load_cyclegan_dataset(filename='', path=''):", "body": "path = os.path.join(path, '')url = ''if folder_exists(os.path.join(path, filename)) is False:logging.info(\"\".format(filename, path))maybe_download_and_extract(filename + '', path, url, extract=True)del_file(os.path.join(path, filename + ''))def load_image_from_folder(path):path_imgs = load_file_list(path=path, regx='', printable=False)return visualize.read_images(path_imgs, path=path, n_threads=, printable=False)im_train_A = load_image_from_folder(os.path.join(path, filename, \"\"))im_train_B = load_image_from_folder(os.path.join(path, filename, \"\"))im_test_A = load_image_from_folder(os.path.join(path, filename, \"\"))im_test_B = load_image_from_folder(os.path.join(path, filename, \"\"))def if_2d_to_3d(images): for i, _v in enumerate(images):if len(images[i].shape) == :images[i] = images[i][:, :, np.newaxis]images[i] = np.tile(images[i], (, , ))return imagesim_train_A = if_2d_to_3d(im_train_A)im_train_B = if_2d_to_3d(im_train_B)im_test_A = if_2d_to_3d(im_test_A)im_test_B = if_2d_to_3d(im_test_B)return im_train_A, im_train_B, im_test_A, im_test_B", "docstring": "Load images from CycleGAN's database, see `this link `__.\n\n Parameters\n ------------\n filename : str\n The dataset you want, see `this link `__.\n path : str\n The path that the data is downloaded to, defaults is `data/cyclegan`\n\n Examples\n ---------\n >>> im_train_A, im_train_B, im_test_A, im_test_B = load_cyclegan_dataset(filename='summer2winter_yosemite')", "id": "f11123:m0"} {"signature": "def load_mnist_dataset(shape=(-, ), path=''):", "body": "return _load_mnist_dataset(shape, path, name='', url='')", "docstring": "Load the original mnist.\n\n Automatically download MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 digit images respectively.\n\n Parameters\n ----------\n shape : tuple\n The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)).\n path : str\n The path that the data is downloaded to.\n\n Returns\n -------\n X_train, y_train, X_val, y_val, X_test, y_test: tuple\n Return splitted training/validation/test set respectively.\n\n Examples\n --------\n >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784), path='datasets')\n >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))", "id": "f11124:m0"} {"signature": "def load_matt_mahoney_text8_dataset(path=''):", "body": "path = os.path.join(path, '')logging.info(\"\".format(path))filename = ''url = ''maybe_download_and_extract(filename, path, url, expected_bytes=)with zipfile.ZipFile(os.path.join(path, filename)) as f:word_list = f.read(f.namelist()[]).split()for idx, _ in enumerate(word_list):word_list[idx] = word_list[idx].decode()return word_list", "docstring": "Load Matt Mahoney's dataset.\n\n Download a text file from Matt Mahoney's website\n if not present, and make sure it's the right size.\n Extract the first file enclosed in a zip file as a list of words.\n This dataset can be used for Word Embedding.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/mm_test8/``.\n\n Returns\n --------\n list of str\n The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...]\n\n Examples\n --------\n >>> words = tl.files.load_matt_mahoney_text8_dataset()\n >>> print('Data size', len(words))", "id": "f11125:m0"} {"signature": "def load_celebA_dataset(path=''):", "body": "data_dir = ''filename, drive_id = \"\", \"\"save_path = os.path.join(path, filename)image_path = os.path.join(path, data_dir)if os.path.exists(image_path):logging.info(''.format(save_path))else:exists_or_mkdir(path)download_file_from_google_drive(drive_id, save_path)zip_dir = ''with zipfile.ZipFile(save_path) as zf:zip_dir = zf.namelist()[]zf.extractall(path)os.remove(save_path)os.rename(os.path.join(path, zip_dir), image_path)data_files = load_file_list(path=image_path, regx='', printable=False)for i, _v in enumerate(data_files):data_files[i] = os.path.join(image_path, data_files[i])return data_files", "docstring": "Load CelebA dataset\n\n Return a list of image path.\n\n Parameters\n -----------\n path : str\n The path that the data is downloaded to, defaults is ``data/celebA/``.", "id": "f11126:m0"} {"signature": "def load_fashion_mnist_dataset(shape=(-, ), path=''):", "body": "return _load_mnist_dataset(shape, path, name='', url='')", "docstring": "Load the fashion mnist.\n\n Automatically download fashion-MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 fashion images respectively, `examples `__.\n\n Parameters\n ----------\n shape : tuple\n The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)).\n path : str\n The path that the data is downloaded to.\n\n Returns\n -------\n X_train, y_train, X_val, y_val, X_test, y_test: tuple\n Return splitted training/validation/test set respectively.\n\n Examples\n --------\n >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1,784), path='datasets')\n >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1, 28, 28, 1))", "id": "f11127:m0"} {"signature": "def load_wmt_en_fr_dataset(path=''):", "body": "path = os.path.join(path, '')_WMT_ENFR_TRAIN_URL = \"\"_WMT_ENFR_DEV_URL = \"\"def gunzip_file(gz_path, new_path):\"\"\"\"\"\"logging.info(\"\" % (gz_path, new_path))with gzip.open(gz_path, \"\") as gz_file:with open(new_path, \"\") as new_file:for line in gz_file:new_file.write(line)def get_wmt_enfr_train_set(path):\"\"\"\"\"\"filename = \"\"maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True)train_path = os.path.join(path, \"\")gunzip_file(train_path + \"\", train_path + \"\")gunzip_file(train_path + \"\", train_path + \"\")return train_pathdef get_wmt_enfr_dev_set(path):\"\"\"\"\"\"filename = \"\"dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False)dev_name = \"\"dev_path = os.path.join(path, \"\")if not (gfile.Exists(dev_path + \"\") and gfile.Exists(dev_path + \"\")):logging.info(\"\" % dev_file)with tarfile.open(dev_file, \"\") as dev_tar:fr_dev_file = dev_tar.getmember(\"\" + dev_name + \"\")en_dev_file = dev_tar.getmember(\"\" + dev_name + \"\")fr_dev_file.name = dev_name + \"\" en_dev_file.name = dev_name + \"\"dev_tar.extract(fr_dev_file, path)dev_tar.extract(en_dev_file, path)return dev_pathlogging.info(\"\".format(path))train_path = get_wmt_enfr_train_set(path)dev_path = get_wmt_enfr_dev_set(path)return train_path, dev_path", "docstring": "Load WMT'15 English-to-French translation dataset.\n\n It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set.\n Returns the directories of training data and test data.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``.\n\n References\n ----------\n - Code modified from /tensorflow/models/rnn/translation/data_utils.py\n\n Notes\n -----\n Usually, it will take a long time to download this dataset.", "id": "f11128:m0"} {"signature": "def load_mpii_pose_dataset(path='', is_16_pos_only=False):", "body": "path = os.path.join(path, '')logging.info(\"\".format(path))url = \"\"tar_filename = \"\"extracted_filename = \"\"if folder_exists(os.path.join(path, extracted_filename)) is False:logging.info(\"\".format(extracted_filename, path))maybe_download_and_extract(tar_filename, path, url, extract=True)del_file(os.path.join(path, tar_filename))url = \"\"tar_filename = \"\"extracted_filename2 = \"\"if folder_exists(os.path.join(path, extracted_filename2)) is False:logging.info(\"\".format(extracted_filename, path))maybe_download_and_extract(tar_filename, path, url, extract=True)del_file(os.path.join(path, tar_filename))import scipy.io as siologging.info(\"\")ann_train_list = []ann_test_list = []img_train_list = []img_test_list = []def save_joints():mat = sio.loadmat(os.path.join(path, extracted_filename, \"\"))for _, (anno, train_flag) in enumerate( zip(mat[''][''][, ][], mat[''][''][, ][])):img_fn = anno[''][''][, ][]train_flag = int(train_flag)if train_flag:img_train_list.append(img_fn)ann_train_list.append([])else:img_test_list.append(img_fn)ann_test_list.append([])head_rect = []if '' in str(anno[''].dtype):head_rect = zip([x1[, ] for x1 in anno[''][''][]], [y1[, ] for y1 in anno[''][''][]],[x2[, ] for x2 in anno[''][''][]], [y2[, ] for y2 in anno[''][''][]])else:head_rect = [] if '' in str(anno[''].dtype):annopoints = anno[''][''][]head_x1s = anno[''][''][]head_y1s = anno[''][''][]head_x2s = anno[''][''][]head_y2s = anno[''][''][]for annopoint, head_x1, head_y1, head_x2, head_y2 in zip(annopoints, head_x1s, head_y1s, head_x2s,head_y2s):if annopoint.size:head_rect = [float(head_x1[, ]),float(head_y1[, ]),float(head_x2[, ]),float(head_y2[, ])]annopoint = annopoint[''][, ]j_id = [str(j_i[, ]) for j_i in annopoint[''][]]x = [x[, ] for x in annopoint[''][]]y = [y[, ] for y in annopoint[''][]]joint_pos = {}for _j_id, (_x, _y) in zip(j_id, zip(x, y)):joint_pos[int(_j_id)] = [float(_x), float(_y)]if '' in str(annopoint.dtype):vis = [v[] if v.size > else [] for v in annopoint[''][]]vis = dict([(k, int(v[])) if len(v) > else v for k, v in zip(j_id, vis)])else:vis = Noneif ((is_16_pos_only ==True) and (len(joint_pos) == )) or (is_16_pos_only == False):data = {'': img_fn,'': train_flag,'': head_rect,'': vis,'': joint_pos}if train_flag:ann_train_list[-].append(data)else:ann_test_list[-].append(data)save_joints()logging.info(\"\")img_dir = os.path.join(path, extracted_filename2)_img_list = load_file_list(path=os.path.join(path, extracted_filename2), regx='', printable=False)for i, im in enumerate(img_train_list):if im not in _img_list:print(''.format(im, img_dir))del img_train_list[i]del ann_train_list[i]for i, im in enumerate(img_test_list):if im not in _img_list:print(''.format(im, img_dir))del img_train_list[i]del ann_train_list[i]n_train_images = len(img_train_list)n_test_images = len(img_test_list)n_images = n_train_images + n_test_imageslogging.info(\"\".format(n_images, n_train_images, n_test_images))n_train_ann = len(ann_train_list)n_test_ann = len(ann_test_list)n_ann = n_train_ann + n_test_annlogging.info(\"\".format(n_ann, n_train_ann, n_test_ann))n_train_people = len(sum(ann_train_list, []))n_test_people = len(sum(ann_test_list, []))n_people = n_train_people + n_test_peoplelogging.info(\"\".format(n_people, n_train_people, n_test_people))for i, value in enumerate(img_train_list):img_train_list[i] = os.path.join(img_dir, value)for i, value in enumerate(img_test_list):img_test_list[i] = os.path.join(img_dir, value)return img_train_list, ann_train_list, img_test_list, ann_test_list", "docstring": "Load MPII Human Pose Dataset.\n\n Parameters\n -----------\n path : str\n The path that the data is downloaded to.\n is_16_pos_only : boolean\n If True, only return the peoples contain 16 pose keypoints. (Usually be used for single person pose estimation)\n\n Returns\n ----------\n img_train_list : list of str\n The image directories of training data.\n ann_train_list : list of dict\n The annotations of training data.\n img_test_list : list of str\n The image directories of testing data.\n ann_test_list : list of dict\n The annotations of testing data.\n\n Examples\n --------\n >>> import pprint\n >>> import tensorlayer as tl\n >>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()\n >>> image = tl.vis.read_image(img_train_list[0])\n >>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')\n >>> pprint.pprint(ann_train_list[0])\n\n References\n -----------\n - `MPII Human Pose Dataset. CVPR 14 `__\n - `MPII Human Pose Models. CVPR 16 `__\n - `MPII Human Shape, Poselet Conditioned Pictorial Structures and etc `__\n - `MPII Keyponts and ID `__", "id": "f11130:m0"} {"signature": "def load_flickr25k_dataset(tag='', path=\"\", n_threads=, printable=False):", "body": "path = os.path.join(path, '')filename = ''url = ''if folder_exists(os.path.join(path, \"\")) is False:logging.info(\"\".format(path))maybe_download_and_extract(filename, path, url, extract=True)del_file(os.path.join(path, filename))folder_imgs = os.path.join(path, \"\")path_imgs = load_file_list(path=folder_imgs, regx='', printable=False)path_imgs.sort(key=natural_keys)folder_tags = os.path.join(path, \"\", \"\", \"\")path_tags = load_file_list(path=folder_tags, regx='', printable=False)path_tags.sort(key=natural_keys)if tag is None:logging.info(\"\")else:logging.info(\"\".format(tag))images_list = []for idx, _v in enumerate(path_tags):tags = read_file(os.path.join(folder_tags, path_tags[idx])).split('')if tag is None or tag in tags:images_list.append(path_imgs[idx])images = visualize.read_images(images_list, folder_imgs, n_threads=n_threads, printable=printable)return images", "docstring": "Load Flickr25K dataset.\n\n Returns a list of images by a given tag from Flick25k dataset,\n it will download Flickr25k from `the official website `__\n at the first time you use it.\n\n Parameters\n ------------\n tag : str or None\n What images to return.\n - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search `__.\n - If you want to get all images, set to ``None``.\n\n path : str\n The path that the data is downloaded to, defaults is ``data/flickr25k/``.\n n_threads : int\n The number of thread to read image.\n printable : boolean\n Whether to print infomation when reading images, default is ``False``.\n\n Examples\n -----------\n Get images with tag of sky\n\n >>> images = tl.files.load_flickr25k_dataset(tag='sky')\n\n Get all images\n\n >>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True)", "id": "f11131:m0"} {"signature": "def load_nietzsche_dataset(path=''):", "body": "logging.info(\"\".format(path))path = os.path.join(path, '')filename = \"\"url = ''filepath = maybe_download_and_extract(filename, path, url)with open(filepath, \"\") as f:words = f.read()return words", "docstring": "Load Nietzsche dataset.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/nietzsche/``.\n\n Returns\n --------\n str\n The content.\n\n Examples\n --------\n >>> see tutorial_generate_text.py\n >>> words = tl.files.load_nietzsche_dataset()\n >>> words = basic_clean_str(words)\n >>> words = words.split()", "id": "f11133:m0"} {"signature": "def load_voc_dataset(path='', dataset='', contain_classes_in_person=False):", "body": "path = os.path.join(path, '')def _recursive_parse_xml_to_dict(xml):\"\"\"\"\"\"if xml is not None:return {xml.tag: xml.text}result = {}for child in xml:child_result = _recursive_parse_xml_to_dict(child)if child.tag != '':result[child.tag] = child_result[child.tag]else:if child.tag not in result:result[child.tag] = []result[child.tag].append(child_result[child.tag])return {xml.tag: result}import xml.etree.ElementTree as ETif dataset == \"\":url = \"\"tar_filename = \"\"extracted_filename = \"\" logging.info(\"\")elif dataset == \"\":extracted_filename = \"\" logging.info(\"\")logging.info(\"\")import timetime.sleep()if os.path.isdir(os.path.join(path, extracted_filename)) is False:logging.info(\"\")logging.info(\"\")logging.info(\"\" % path)exit()elif dataset == \"\":url = \"\"tar_filename = \"\"extracted_filename = \"\"logging.info(\"\")elif dataset == \"\":url = \"\"tar_filename = \"\"extracted_filename = \"\"logging.info(\"\")else:raise Exception(\"\")if dataset != \"\":from sys import platform as _platformif folder_exists(os.path.join(path, extracted_filename)) is False:logging.info(\"\".format(extracted_filename, path))maybe_download_and_extract(tar_filename, path, url, extract=True)del_file(os.path.join(path, tar_filename))if dataset == \"\":if _platform == \"\":os.system(\"\".format(path, path))else:os.system(\"\".format(path, path))elif dataset == \"\":if _platform == \"\":os.system(\"\".format(path, path))else:os.system(\"\".format(path, path))elif dataset == \"\":if _platform == \"\":os.system(\"\".format(path, path))else:os.system(\"\".format(path, path))del_folder(os.path.join(path, ''))classes = [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"]if contain_classes_in_person:classes_in_person = [\"\", \"\", \"\"]else:classes_in_person = []classes += classes_in_person classes_dict = utils.list_string_to_dict(classes)logging.info(\"\".format(classes_dict))folder_imgs = os.path.join(path, extracted_filename, \"\")imgs_file_list = load_file_list(path=folder_imgs, regx='', printable=False)logging.info(\"\".format(len(imgs_file_list)))imgs_file_list.sort(key=lambda s: int(s.replace('', '').replace('', '').split('')[-])) imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list]if dataset != \"\":folder_semseg = os.path.join(path, extracted_filename, \"\")imgs_semseg_file_list = load_file_list(path=folder_semseg, regx='', printable=False)logging.info(\"\".format(len(imgs_semseg_file_list)))imgs_semseg_file_list.sort(key=lambda s: int(s.replace('', '').replace('', '').split('')[-])) imgs_semseg_file_list = [os.path.join(folder_semseg, s) for s in imgs_semseg_file_list]folder_insseg = os.path.join(path, extracted_filename, \"\")imgs_insseg_file_list = load_file_list(path=folder_insseg, regx='', printable=False)logging.info(\"\".format(len(imgs_semseg_file_list)))imgs_insseg_file_list.sort(key=lambda s: int(s.replace('', '').replace('', '').split('')[-])) imgs_insseg_file_list = [os.path.join(folder_insseg, s) for s in imgs_insseg_file_list]else:imgs_semseg_file_list = []imgs_insseg_file_list = []folder_ann = os.path.join(path, extracted_filename, \"\")imgs_ann_file_list = load_file_list(path=folder_ann, regx='', printable=False)logging.info(\"\".format(len(imgs_ann_file_list)))imgs_ann_file_list.sort(key=lambda s: int(s.replace('', '').replace('', '').split('')[-])) imgs_ann_file_list = [os.path.join(folder_ann, s) for s in imgs_ann_file_list]if dataset == \"\": imgs_file_list_new = []for ann in imgs_ann_file_list:ann = os.path.split(ann)[-].split('')[]for im in imgs_file_list:if ann in im:imgs_file_list_new.append(im)breakimgs_file_list = imgs_file_list_newlogging.info(\"\" % len(imgs_file_list_new))def convert(size, box):dw = / size[]dh = / size[]x = (box[] + box[]) / y = (box[] + box[]) / w = box[] - box[]h = box[] - box[]x = x * dww = w * dwy = y * dhh = h * dhreturn x, y, w, hdef convert_annotation(file_name):\"\"\"\"\"\"in_file = open(file_name)out_file = \"\"tree = ET.parse(in_file)root = tree.getroot()size = root.find('')w = int(size.find('').text)h = int(size.find('').text)n_objs = for obj in root.iter(''):if dataset != \"\":difficult = obj.find('').textcls = obj.find('').textif cls not in classes or int(difficult) == :continueelse:cls = obj.find('').textif cls not in classes:continuecls_id = classes.index(cls)xmlbox = obj.find('')b = (float(xmlbox.find('').text), float(xmlbox.find('').text), float(xmlbox.find('').text),float(xmlbox.find('').text))bb = convert((w, h), b)out_file += str(cls_id) + \"\" + \"\".join([str(a) for a in bb]) + ''n_objs += if cls in \"\":for part in obj.iter(''):cls = part.find('').textif cls not in classes_in_person:continuecls_id = classes.index(cls)xmlbox = part.find('')b = (float(xmlbox.find('').text), float(xmlbox.find('').text),float(xmlbox.find('').text), float(xmlbox.find('').text))bb = convert((w, h), b)out_file += str(cls_id) + \"\" + \"\".join([str(a) for a in bb]) + ''n_objs += in_file.close()return n_objs, out_filelogging.info(\"\")n_objs_list = []objs_info_list = [] objs_info_dicts = {}for idx, ann_file in enumerate(imgs_ann_file_list):n_objs, objs_info = convert_annotation(ann_file)n_objs_list.append(n_objs)objs_info_list.append(objs_info)with tf.gfile.GFile(ann_file, '') as fid:xml_str = fid.read()xml = etree.fromstring(xml_str)data = _recursive_parse_xml_to_dict(xml)['']objs_info_dicts.update({imgs_file_list[idx]: data})return imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, classes, classes_in_person, classes_dict, n_objs_list, objs_info_list, objs_info_dicts", "docstring": "Pascal VOC 2007/2012 Dataset.\n\n It has 20 objects:\n aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor\n and additional 3 classes : head, hand, foot for person.\n\n Parameters\n -----------\n path : str\n The path that the data is downloaded to, defaults is ``data/VOC``.\n dataset : str\n The VOC dataset version, `2012`, `2007`, `2007test` or `2012test`. We usually train model on `2007+2012` and test it on `2007test`.\n contain_classes_in_person : boolean\n Whether include head, hand and foot annotation, default is False.\n\n Returns\n ---------\n imgs_file_list : list of str\n Full paths of all images.\n imgs_semseg_file_list : list of str\n Full paths of all maps for semantic segmentation. Note that not all images have this map!\n imgs_insseg_file_list : list of str\n Full paths of all maps for instance segmentation. Note that not all images have this map!\n imgs_ann_file_list : list of str\n Full paths of all annotations for bounding box and object class, all images have this annotations.\n classes : list of str\n Classes in order.\n classes_in_person : list of str\n Classes in person.\n classes_dict : dictionary\n Class label to integer.\n n_objs_list : list of int\n Number of objects in all images in ``imgs_file_list`` in order.\n objs_info_list : list of str\n Darknet format for the annotation of all images in ``imgs_file_list`` in order. ``[class_id x_centre y_centre width height]`` in ratio format.\n objs_info_dicts : dictionary\n The annotation of all images in ``imgs_file_list``, ``{imgs_file_list : dictionary for annotation}``,\n format from `TensorFlow/Models/object-detection `__.\n\n Examples\n ----------\n >>> imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list,\n >>> classes, classes_in_person, classes_dict,\n >>> n_objs_list, objs_info_list, objs_info_dicts = tl.files.load_voc_dataset(dataset=\"2012\", contain_classes_in_person=False)\n >>> idx = 26\n >>> print(classes)\n ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']\n >>> print(classes_dict)\n {'sheep': 16, 'horse': 12, 'bicycle': 1, 'bottle': 4, 'cow': 9, 'sofa': 17, 'car': 6, 'dog': 11, 'cat': 7, 'person': 14, 'train': 18, 'diningtable': 10, 'aeroplane': 0, 'bus': 5, 'pottedplant': 15, 'tvmonitor': 19, 'chair': 8, 'bird': 2, 'boat': 3, 'motorbike': 13}\n >>> print(imgs_file_list[idx])\n data/VOC/VOC2012/JPEGImages/2007_000423.jpg\n >>> print(n_objs_list[idx])\n 2\n >>> print(imgs_ann_file_list[idx])\n data/VOC/VOC2012/Annotations/2007_000423.xml\n >>> print(objs_info_list[idx])\n 14 0.173 0.461333333333 0.142 0.496\n 14 0.828 0.542666666667 0.188 0.594666666667\n >>> ann = tl.prepro.parse_darknet_ann_str_to_list(objs_info_list[idx])\n >>> print(ann)\n [[14, 0.173, 0.461333333333, 0.142, 0.496], [14, 0.828, 0.542666666667, 0.188, 0.594666666667]]\n >>> c, b = tl.prepro.parse_darknet_ann_list_to_cls_box(ann)\n >>> print(c, b)\n [14, 14] [[0.173, 0.461333333333, 0.142, 0.496], [0.828, 0.542666666667, 0.188, 0.594666666667]]\n\n References\n -------------\n - `Pascal VOC2012 Website `__.\n - `Pascal VOC2007 Website `__.", "id": "f11134:m0"} {"signature": "def _load_mnist_dataset(shape, path, name='', url=''):", "body": "path = os.path.join(path, name)def load_mnist_images(path, filename):filepath = maybe_download_and_extract(filename, path, url)logging.info(filepath)with gzip.open(filepath, '') as f:data = np.frombuffer(f.read(), np.uint8, offset=)data = data.reshape(shape)return data / np.float32()def load_mnist_labels(path, filename):filepath = maybe_download_and_extract(filename, path, url)with gzip.open(filepath, '') as f:data = np.frombuffer(f.read(), np.uint8, offset=)return datalogging.info(\"\".format(name.upper(), path))X_train = load_mnist_images(path, '')y_train = load_mnist_labels(path, '')X_test = load_mnist_images(path, '')y_test = load_mnist_labels(path, '')X_train, X_val = X_train[:-], X_train[-:]y_train, y_val = y_train[:-], y_train[-:]X_train = np.asarray(X_train, dtype=np.float32)y_train = np.asarray(y_train, dtype=np.int32)X_val = np.asarray(X_val, dtype=np.float32)y_val = np.asarray(y_val, dtype=np.int32)X_test = np.asarray(X_test, dtype=np.float32)y_test = np.asarray(y_test, dtype=np.int32)return X_train, y_train, X_val, y_val, X_test, y_test", "docstring": "A generic function to load mnist-like dataset.\n\n Parameters:\n ----------\n shape : tuple\n The shape of digit images.\n path : str\n The path that the data is downloaded to.\n name : str\n The dataset name you want to use(the default is 'mnist').\n url : str\n The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').", "id": "f11135:m0"} {"signature": "def load_mnist_dataset(shape=(-, ), path=''):", "body": "return _load_mnist_dataset(shape, path, name='', url='')", "docstring": "Load the original mnist.\n\n Automatically download MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 digit images respectively.\n\n Parameters\n ----------\n shape : tuple\n The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)).\n path : str\n The path that the data is downloaded to.\n\n Returns\n -------\n X_train, y_train, X_val, y_val, X_test, y_test: tuple\n Return splitted training/validation/test set respectively.\n\n Examples\n --------\n >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784), path='datasets')\n >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))", "id": "f11137:m0"} {"signature": "def load_fashion_mnist_dataset(shape=(-, ), path=''):", "body": "return _load_mnist_dataset(shape, path, name='', url='')", "docstring": "Load the fashion mnist.\n\n Automatically download fashion-MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 fashion images respectively, `examples `__.\n\n Parameters\n ----------\n shape : tuple\n The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)).\n path : str\n The path that the data is downloaded to.\n\n Returns\n -------\n X_train, y_train, X_val, y_val, X_test, y_test: tuple\n Return splitted training/validation/test set respectively.\n\n Examples\n --------\n >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1,784), path='datasets')\n >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1, 28, 28, 1))", "id": "f11137:m1"} {"signature": "def _load_mnist_dataset(shape, path, name='', url=''):", "body": "path = os.path.join(path, name)def load_mnist_images(path, filename):filepath = maybe_download_and_extract(filename, path, url)logging.info(filepath)with gzip.open(filepath, '') as f:data = np.frombuffer(f.read(), np.uint8, offset=)data = data.reshape(shape)return data / np.float32()def load_mnist_labels(path, filename):filepath = maybe_download_and_extract(filename, path, url)with gzip.open(filepath, '') as f:data = np.frombuffer(f.read(), np.uint8, offset=)return datalogging.info(\"\".format(name.upper(), path))X_train = load_mnist_images(path, '')y_train = load_mnist_labels(path, '')X_test = load_mnist_images(path, '')y_test = load_mnist_labels(path, '')X_train, X_val = X_train[:-], X_train[-:]y_train, y_val = y_train[:-], y_train[-:]X_train = np.asarray(X_train, dtype=np.float32)y_train = np.asarray(y_train, dtype=np.int32)X_val = np.asarray(X_val, dtype=np.float32)y_val = np.asarray(y_val, dtype=np.int32)X_test = np.asarray(X_test, dtype=np.float32)y_test = np.asarray(y_test, dtype=np.int32)return X_train, y_train, X_val, y_val, X_test, y_test", "docstring": "A generic function to load mnist-like dataset.\n\n Parameters:\n ----------\n shape : tuple\n The shape of digit images.\n path : str\n The path that the data is downloaded to.\n name : str\n The dataset name you want to use(the default is 'mnist').\n url : str\n The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').", "id": "f11137:m2"} {"signature": "def load_cifar10_dataset(shape=(-, , , ), path='', plotable=False):", "body": "path = os.path.join(path, '')logging.info(\"\".format(path))def unpickle(file):fp = open(file, '')if sys.version_info.major == :data = pickle.load(fp)elif sys.version_info.major == :data = pickle.load(fp, encoding='')fp.close()return datafilename = ''url = ''maybe_download_and_extract(filename, path, url, extract=True)X_train = Noney_train = []for i in range(, ):data_dic = unpickle(os.path.join(path, '', \"\".format(i)))if i == :X_train = data_dic['']else:X_train = np.vstack((X_train, data_dic['']))y_train += data_dic['']test_data_dic = unpickle(os.path.join(path, '', \"\"))X_test = test_data_dic['']y_test = np.array(test_data_dic[''])if shape == (-, , , ):X_test = X_test.reshape(shape)X_train = X_train.reshape(shape)elif shape == (-, , , ):X_test = X_test.reshape(shape, order='')X_train = X_train.reshape(shape, order='')X_test = np.transpose(X_test, (, , , ))X_train = np.transpose(X_train, (, , , ))else:X_test = X_test.reshape(shape)X_train = X_train.reshape(shape)y_train = np.array(y_train)if plotable:logging.info('')fig = plt.figure()logging.info('' % X_train[].shape)plt.ion() count = for _ in range(): for _ in range(): _ = fig.add_subplot(, , count)if shape == (-, , , ):plt.imshow(np.transpose(X_train[count - ], (, , )), interpolation='')elif shape == (-, , , ):plt.imshow(X_train[count - ], interpolation='')else:raise Exception(\"\")plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator())count = count + plt.draw() plt.pause() logging.info(\"\" % X_train.shape)logging.info(\"\" % y_train.shape)logging.info(\"\" % X_test.shape)logging.info(\"\" % y_test.shape)X_train = np.asarray(X_train, dtype=np.float32)X_test = np.asarray(X_test, dtype=np.float32)y_train = np.asarray(y_train, dtype=np.int32)y_test = np.asarray(y_test, dtype=np.int32)return X_train, y_train, X_test, y_test", "docstring": "Load CIFAR-10 dataset.\n\n It consists of 60000 32x32 colour images in 10 classes, with\n 6000 images per class. There are 50000 training images and 10000 test images.\n\n The dataset is divided into five training batches and one test batch, each with\n 10000 images. The test batch contains exactly 1000 randomly-selected images from\n each class. The training batches contain the remaining images in random order,\n but some training batches may contain more images from one class than another.\n Between them, the training batches contain exactly 5000 images from each class.\n\n Parameters\n ----------\n shape : tupe\n The shape of digit images e.g. (-1, 3, 32, 32) and (-1, 32, 32, 3).\n path : str\n The path that the data is downloaded to, defaults is ``data/cifar10/``.\n plotable : boolean\n Whether to plot some image examples, False as default.\n\n Examples\n --------\n >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))\n\n References\n ----------\n - `CIFAR website `__\n - `Data download link `__\n - ``__", "id": "f11137:m3"} {"signature": "def load_cropped_svhn(path='', include_extra=True):", "body": "start_time = time.time()path = os.path.join(path, '')logging.info(\"\".format(path, include_extra))url = \"\"np_file = os.path.join(path, \"\")if file_exists(np_file) is False:filename = \"\"filepath = maybe_download_and_extract(filename, path, url)mat = sio.loadmat(filepath)X_train = mat[''] / X_train = np.transpose(X_train, (, , , ))y_train = np.squeeze(mat[''], axis=)y_train[y_train == ] = np.savez(np_file, X=X_train, y=y_train)del_file(filepath)else:v = np.load(np_file)X_train = v['']y_train = v['']logging.info(\"\".format(len(y_train)))np_file = os.path.join(path, \"\")if file_exists(np_file) is False:filename = \"\"filepath = maybe_download_and_extract(filename, path, url)mat = sio.loadmat(filepath)X_test = mat[''] / X_test = np.transpose(X_test, (, , , ))y_test = np.squeeze(mat[''], axis=)y_test[y_test == ] = np.savez(np_file, X=X_test, y=y_test)del_file(filepath)else:v = np.load(np_file)X_test = v['']y_test = v['']logging.info(\"\".format(len(y_test)))if include_extra:logging.info(\"\")np_file = os.path.join(path, \"\")if file_exists(np_file) is False:logging.info(\"\")filename = \"\"filepath = maybe_download_and_extract(filename, path, url)mat = sio.loadmat(filepath)X_extra = mat[''] / X_extra = np.transpose(X_extra, (, , , ))y_extra = np.squeeze(mat[''], axis=)y_extra[y_extra == ] = np.savez(np_file, X=X_extra, y=y_extra)del_file(filepath)else:v = np.load(np_file)X_extra = v['']y_extra = v['']logging.info(\"\".format(len(y_extra), len(y_train)))t = time.time()X_train = np.concatenate((X_train, X_extra), )y_train = np.concatenate((y_train, y_extra), )logging.info(\"\".format(len(y_extra), len(y_train), time.time() - t))else:logging.info(\"\")logging.info(\"\" % (str(X_train.shape[:]), len(y_train), len(y_test)))logging.info(\"\".format(int(time.time() - start_time)))return X_train, y_train, X_test, y_test", "docstring": "Load Cropped SVHN.\n\n The Cropped Street View House Numbers (SVHN) Dataset contains 32x32x3 RGB images.\n Digit '1' has label 1, '9' has label 9 and '0' has label 0 (the original dataset uses 10 to represent '0'), see `ufldl website `__.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to.\n include_extra : boolean\n If True (default), add extra images to the training set.\n\n Returns\n -------\n X_train, y_train, X_test, y_test: tuple\n Return splitted training/test set respectively.\n\n Examples\n ---------\n >>> X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)\n >>> tl.vis.save_images(X_train[0:100], [10, 10], 'svhn.png')", "id": "f11137:m4"} {"signature": "def load_ptb_dataset(path=''):", "body": "path = os.path.join(path, '')logging.info(\"\".format(path))filename = ''url = ''maybe_download_and_extract(filename, path, url, extract=True)data_path = os.path.join(path, '', '')train_path = os.path.join(data_path, \"\")valid_path = os.path.join(data_path, \"\")test_path = os.path.join(data_path, \"\")word_to_id = nlp.build_vocab(nlp.read_words(train_path))train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)vocab_size = len(word_to_id)return train_data, valid_data, test_data, vocab_size", "docstring": "Load Penn TreeBank (PTB) dataset.\n\n It is used in many LANGUAGE MODELING papers,\n including \"Empirical Evaluation and Combination of Advanced Language\n Modeling Techniques\", \"Recurrent Neural Network Regularization\".\n It consists of 929k training words, 73k validation words, and 82k test\n words. It has 10k words in its vocabulary.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/ptb/``.\n\n Returns\n --------\n train_data, valid_data, test_data : list of int\n The training, validating and testing data in integer format.\n vocab_size : int\n The vocabulary size.\n\n Examples\n --------\n >>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()\n\n References\n ---------------\n - ``tensorflow.models.rnn.ptb import reader``\n - `Manual download `__\n\n Notes\n ------\n - If you want to get the raw data, see the source code.", "id": "f11137:m5"} {"signature": "def load_matt_mahoney_text8_dataset(path=''):", "body": "path = os.path.join(path, '')logging.info(\"\".format(path))filename = ''url = ''maybe_download_and_extract(filename, path, url, expected_bytes=)with zipfile.ZipFile(os.path.join(path, filename)) as f:word_list = f.read(f.namelist()[]).split()for idx, _ in enumerate(word_list):word_list[idx] = word_list[idx].decode()return word_list", "docstring": "Load Matt Mahoney's dataset.\n\n Download a text file from Matt Mahoney's website\n if not present, and make sure it's the right size.\n Extract the first file enclosed in a zip file as a list of words.\n This dataset can be used for Word Embedding.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/mm_test8/``.\n\n Returns\n --------\n list of str\n The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...]\n\n Examples\n --------\n >>> words = tl.files.load_matt_mahoney_text8_dataset()\n >>> print('Data size', len(words))", "id": "f11137:m6"} {"signature": "def load_nietzsche_dataset(path=''):", "body": "logging.info(\"\".format(path))path = os.path.join(path, '')filename = \"\"url = ''filepath = maybe_download_and_extract(filename, path, url)with open(filepath, \"\") as f:words = f.read()return words", "docstring": "Load Nietzsche dataset.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/nietzsche/``.\n\n Returns\n --------\n str\n The content.\n\n Examples\n --------\n >>> see tutorial_generate_text.py\n >>> words = tl.files.load_nietzsche_dataset()\n >>> words = basic_clean_str(words)\n >>> words = words.split()", "id": "f11137:m8"} {"signature": "def load_wmt_en_fr_dataset(path=''):", "body": "path = os.path.join(path, '')_WMT_ENFR_TRAIN_URL = \"\"_WMT_ENFR_DEV_URL = \"\"def gunzip_file(gz_path, new_path):\"\"\"\"\"\"logging.info(\"\" % (gz_path, new_path))with gzip.open(gz_path, \"\") as gz_file:with open(new_path, \"\") as new_file:for line in gz_file:new_file.write(line)def get_wmt_enfr_train_set(path):\"\"\"\"\"\"filename = \"\"maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True)train_path = os.path.join(path, \"\")gunzip_file(train_path + \"\", train_path + \"\")gunzip_file(train_path + \"\", train_path + \"\")return train_pathdef get_wmt_enfr_dev_set(path):\"\"\"\"\"\"filename = \"\"dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False)dev_name = \"\"dev_path = os.path.join(path, \"\")if not (gfile.Exists(dev_path + \"\") and gfile.Exists(dev_path + \"\")):logging.info(\"\" % dev_file)with tarfile.open(dev_file, \"\") as dev_tar:fr_dev_file = dev_tar.getmember(\"\" + dev_name + \"\")en_dev_file = dev_tar.getmember(\"\" + dev_name + \"\")fr_dev_file.name = dev_name + \"\" en_dev_file.name = dev_name + \"\"dev_tar.extract(fr_dev_file, path)dev_tar.extract(en_dev_file, path)return dev_pathlogging.info(\"\".format(path))train_path = get_wmt_enfr_train_set(path)dev_path = get_wmt_enfr_dev_set(path)return train_path, dev_path", "docstring": "Load WMT'15 English-to-French translation dataset.\n\n It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set.\n Returns the directories of training data and test data.\n\n Parameters\n ----------\n path : str\n The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``.\n\n References\n ----------\n - Code modified from /tensorflow/models/rnn/translation/data_utils.py\n\n Notes\n -----\n Usually, it will take a long time to download this dataset.", "id": "f11137:m9"} {"signature": "def load_flickr25k_dataset(tag='', path=\"\", n_threads=, printable=False):", "body": "path = os.path.join(path, '')filename = ''url = ''if folder_exists(os.path.join(path, \"\")) is False:logging.info(\"\".format(path))maybe_download_and_extract(filename, path, url, extract=True)del_file(os.path.join(path, filename))folder_imgs = os.path.join(path, \"\")path_imgs = load_file_list(path=folder_imgs, regx='', printable=False)path_imgs.sort(key=natural_keys)folder_tags = os.path.join(path, \"\", \"\", \"\")path_tags = load_file_list(path=folder_tags, regx='', printable=False)path_tags.sort(key=natural_keys)if tag is None:logging.info(\"\")else:logging.info(\"\".format(tag))images_list = []for idx, _v in enumerate(path_tags):tags = read_file(os.path.join(folder_tags, path_tags[idx])).split('')if tag is None or tag in tags:images_list.append(path_imgs[idx])images = visualize.read_images(images_list, folder_imgs, n_threads=n_threads, printable=printable)return images", "docstring": "Load Flickr25K dataset.\n\n Returns a list of images by a given tag from Flick25k dataset,\n it will download Flickr25k from `the official website `__\n at the first time you use it.\n\n Parameters\n ------------\n tag : str or None\n What images to return.\n - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search `__.\n - If you want to get all images, set to ``None``.\n\n path : str\n The path that the data is downloaded to, defaults is ``data/flickr25k/``.\n n_threads : int\n The number of thread to read image.\n printable : boolean\n Whether to print infomation when reading images, default is ``False``.\n\n Examples\n -----------\n Get images with tag of sky\n\n >>> images = tl.files.load_flickr25k_dataset(tag='sky')\n\n Get all images\n\n >>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True)", "id": "f11137:m10"} {"signature": "def load_flickr1M_dataset(tag='', size=, path=\"\", n_threads=, printable=False):", "body": "path = os.path.join(path, '')logging.info(\"\".format(size * , size * ))images_zip = ['', '', '', '', '', '', '','', '', '']tag_zip = ''url = ''for image_zip in images_zip[:size]:image_folder = image_zip.split(\"\")[]if folder_exists(os.path.join(path, image_folder)) is False:logging.info(\"\".format(image_folder, path))maybe_download_and_extract(image_zip, path, url, extract=True)del_file(os.path.join(path, image_zip))shutil.move(os.path.join(path, ''), os.path.join(path, image_folder))else:logging.info(\"\".format(image_folder, path))if folder_exists(os.path.join(path, \"\")) is False:logging.info(\"\".format(path))maybe_download_and_extract(tag_zip, path, url, extract=True)del_file(os.path.join(path, tag_zip))else:logging.info(\"\".format(path))images_list = []images_folder_list = []for i in range(, size):images_folder_list += load_folder_list(path=os.path.join(path, '' % i))images_folder_list.sort(key=lambda s: int(s.split('')[-])) for folder in images_folder_list[:size * ]:tmp = load_file_list(path=folder, regx='', printable=False)tmp.sort(key=lambda s: int(s.split('')[-])) images_list.extend([os.path.join(folder, x) for x in tmp])tag_list = []tag_folder_list = load_folder_list(os.path.join(path, \"\"))tag_folder_list.sort(key=lambda s: int(os.path.basename(s)))for folder in tag_folder_list[:size * ]:tmp = load_file_list(path=folder, regx='', printable=False)tmp.sort(key=lambda s: int(s.split('')[-])) tmp = [os.path.join(folder, s) for s in tmp]tag_list += tmplogging.info(\"\".format(tag))select_images_list = []for idx, _val in enumerate(tag_list):tags = read_file(tag_list[idx]).split('')if tag in tags:select_images_list.append(images_list[idx])logging.info(\"\".format(tag))images = visualize.read_images(select_images_list, '', n_threads=n_threads, printable=printable)return images", "docstring": "Load Flick1M dataset.\n\n Returns a list of images by a given tag from Flickr1M dataset,\n it will download Flickr1M from `the official website `__\n at the first time you use it.\n\n Parameters\n ------------\n tag : str or None\n What images to return.\n - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search `__.\n - If you want to get all images, set to ``None``.\n\n size : int\n integer between 1 to 10. 1 means 100k images ... 5 means 500k images, 10 means all 1 million images. Default is 10.\n path : str\n The path that the data is downloaded to, defaults is ``data/flickr25k/``.\n n_threads : int\n The number of thread to read image.\n printable : boolean\n Whether to print infomation when reading images, default is ``False``.\n\n Examples\n ----------\n Use 200k images\n\n >>> images = tl.files.load_flickr1M_dataset(tag='zebra', size=2)\n\n Use 1 Million images\n\n >>> images = tl.files.load_flickr1M_dataset(tag='zebra')", "id": "f11137:m11"} {"signature": "def load_cyclegan_dataset(filename='', path=''):", "body": "path = os.path.join(path, '')url = ''if folder_exists(os.path.join(path, filename)) is False:logging.info(\"\".format(filename, path))maybe_download_and_extract(filename + '', path, url, extract=True)del_file(os.path.join(path, filename + ''))def load_image_from_folder(path):path_imgs = load_file_list(path=path, regx='', printable=False)return visualize.read_images(path_imgs, path=path, n_threads=, printable=False)im_train_A = load_image_from_folder(os.path.join(path, filename, \"\"))im_train_B = load_image_from_folder(os.path.join(path, filename, \"\"))im_test_A = load_image_from_folder(os.path.join(path, filename, \"\"))im_test_B = load_image_from_folder(os.path.join(path, filename, \"\"))def if_2d_to_3d(images): for i, _v in enumerate(images):if len(images[i].shape) == :images[i] = images[i][:, :, np.newaxis]images[i] = np.tile(images[i], (, , ))return imagesim_train_A = if_2d_to_3d(im_train_A)im_train_B = if_2d_to_3d(im_train_B)im_test_A = if_2d_to_3d(im_test_A)im_test_B = if_2d_to_3d(im_test_B)return im_train_A, im_train_B, im_test_A, im_test_B", "docstring": "Load images from CycleGAN's database, see `this link `__.\n\n Parameters\n ------------\n filename : str\n The dataset you want, see `this link `__.\n path : str\n The path that the data is downloaded to, defaults is `data/cyclegan`\n\n Examples\n ---------\n >>> im_train_A, im_train_B, im_test_A, im_test_B = load_cyclegan_dataset(filename='summer2winter_yosemite')", "id": "f11137:m12"} {"signature": "def download_file_from_google_drive(ID, destination):", "body": "def save_response_content(response, destination, chunk_size= * ):total_size = int(response.headers.get('', ))with open(destination, \"\") as f:for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='', unit_scale=True,desc=destination):if chunk: f.write(chunk)def get_confirm_token(response):for key, value in response.cookies.items():if key.startswith(''):return valuereturn NoneURL = \"\"session = requests.Session()response = session.get(URL, params={'': ID}, stream=True)token = get_confirm_token(response)if token:params = {'': ID, '': token}response = session.get(URL, params=params, stream=True)save_response_content(response, destination)", "docstring": "Download file from Google Drive.\n\n See ``tl.files.load_celebA_dataset`` for example.\n\n Parameters\n --------------\n ID : str\n The driver ID.\n destination : str\n The destination for save file.", "id": "f11137:m13"} {"signature": "def load_celebA_dataset(path=''):", "body": "data_dir = ''filename, drive_id = \"\", \"\"save_path = os.path.join(path, filename)image_path = os.path.join(path, data_dir)if os.path.exists(image_path):logging.info(''.format(save_path))else:exists_or_mkdir(path)download_file_from_google_drive(drive_id, save_path)zip_dir = ''with zipfile.ZipFile(save_path) as zf:zip_dir = zf.namelist()[]zf.extractall(path)os.remove(save_path)os.rename(os.path.join(path, zip_dir), image_path)data_files = load_file_list(path=image_path, regx='', printable=False)for i, _v in enumerate(data_files):data_files[i] = os.path.join(image_path, data_files[i])return data_files", "docstring": "Load CelebA dataset\n\n Return a list of image path.\n\n Parameters\n -----------\n path : str\n The path that the data is downloaded to, defaults is ``data/celebA/``.", "id": "f11137:m14"} {"signature": "def load_voc_dataset(path='', dataset='', contain_classes_in_person=False):", "body": "path = os.path.join(path, '')def _recursive_parse_xml_to_dict(xml):\"\"\"\"\"\"if not xml:return {xml.tag: xml.text}result = {}for child in xml:child_result = _recursive_parse_xml_to_dict(child)if child.tag != '':result[child.tag] = child_result[child.tag]else:if child.tag not in result:result[child.tag] = []result[child.tag].append(child_result[child.tag])return {xml.tag: result}if dataset == \"\":url = \"\"tar_filename = \"\"extracted_filename = \"\" logging.info(\"\")elif dataset == \"\":extracted_filename = \"\" logging.info(\"\")logging.info(\"\")time.sleep()if os.path.isdir(os.path.join(path, extracted_filename)) is False:logging.info(\"\")logging.info(\"\")logging.info(\"\" % path)exit()elif dataset == \"\":url = \"\"tar_filename = \"\"extracted_filename = \"\"logging.info(\"\")elif dataset == \"\":url = \"\"tar_filename = \"\"extracted_filename = \"\"logging.info(\"\")else:raise Exception(\"\")if dataset != \"\":_platform = sys.platformif folder_exists(os.path.join(path, extracted_filename)) is False:logging.info(\"\".format(extracted_filename, path))maybe_download_and_extract(tar_filename, path, url, extract=True)del_file(os.path.join(path, tar_filename))if dataset == \"\":if _platform == \"\":os.system(\"\".format(path, path))else:os.system(\"\".format(path, path))elif dataset == \"\":if _platform == \"\":os.system(\"\".format(path, path))else:os.system(\"\".format(path, path))elif dataset == \"\":if _platform == \"\":os.system(\"\".format(path, path))else:os.system(\"\".format(path, path))del_folder(os.path.join(path, ''))classes = [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"]if contain_classes_in_person:classes_in_person = [\"\", \"\", \"\"]else:classes_in_person = []classes += classes_in_person classes_dict = utils.list_string_to_dict(classes)logging.info(\"\".format(classes_dict))folder_imgs = os.path.join(path, extracted_filename, \"\")imgs_file_list = load_file_list(path=folder_imgs, regx='', printable=False)logging.info(\"\".format(len(imgs_file_list)))imgs_file_list.sort(key=lambda s: int(s.replace('', '').replace('', '').split('')[-])) imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list]if dataset != \"\":folder_semseg = os.path.join(path, extracted_filename, \"\")imgs_semseg_file_list = load_file_list(path=folder_semseg, regx='', printable=False)logging.info(\"\".format(len(imgs_semseg_file_list)))imgs_semseg_file_list.sort(key=lambda s: int(s.replace('', '').replace('', '').split('')[-])) imgs_semseg_file_list = [os.path.join(folder_semseg, s) for s in imgs_semseg_file_list]folder_insseg = os.path.join(path, extracted_filename, \"\")imgs_insseg_file_list = load_file_list(path=folder_insseg, regx='', printable=False)logging.info(\"\".format(len(imgs_semseg_file_list)))imgs_insseg_file_list.sort(key=lambda s: int(s.replace('', '').replace('', '').split('')[-])) imgs_insseg_file_list = [os.path.join(folder_insseg, s) for s in imgs_insseg_file_list]else:imgs_semseg_file_list = []imgs_insseg_file_list = []folder_ann = os.path.join(path, extracted_filename, \"\")imgs_ann_file_list = load_file_list(path=folder_ann, regx='', printable=False)logging.info(\"\".format(len(imgs_ann_file_list)))imgs_ann_file_list.sort(key=lambda s: int(s.replace('', '').replace('', '').split('')[-])) imgs_ann_file_list = [os.path.join(folder_ann, s) for s in imgs_ann_file_list]if dataset == \"\": imgs_file_list_new = []for ann in imgs_ann_file_list:ann = os.path.split(ann)[-].split('')[]for im in imgs_file_list:if ann in im:imgs_file_list_new.append(im)breakimgs_file_list = imgs_file_list_newlogging.info(\"\" % len(imgs_file_list_new))def convert(size, box):dw = / size[]dh = / size[]x = (box[] + box[]) / y = (box[] + box[]) / w = box[] - box[]h = box[] - box[]x = x * dww = w * dwy = y * dhh = h * dhreturn x, y, w, hdef convert_annotation(file_name):\"\"\"\"\"\"in_file = open(file_name)out_file = \"\"tree = ET.parse(in_file)root = tree.getroot()size = root.find('')w = int(size.find('').text)h = int(size.find('').text)n_objs = for obj in root.iter(''):if dataset != \"\":difficult = obj.find('').textcls = obj.find('').textif cls not in classes or int(difficult) == :continueelse:cls = obj.find('').textif cls not in classes:continuecls_id = classes.index(cls)xmlbox = obj.find('')b = (float(xmlbox.find('').text), float(xmlbox.find('').text), float(xmlbox.find('').text),float(xmlbox.find('').text))bb = convert((w, h), b)out_file += str(cls_id) + \"\" + \"\".join([str(a) for a in bb]) + ''n_objs += if cls in \"\":for part in obj.iter(''):cls = part.find('').textif cls not in classes_in_person:continuecls_id = classes.index(cls)xmlbox = part.find('')b = (float(xmlbox.find('').text), float(xmlbox.find('').text),float(xmlbox.find('').text), float(xmlbox.find('').text))bb = convert((w, h), b)out_file += str(cls_id) + \"\" + \"\".join([str(a) for a in bb]) + ''n_objs += in_file.close()return n_objs, out_filelogging.info(\"\")n_objs_list = []objs_info_list = [] objs_info_dicts = {}for idx, ann_file in enumerate(imgs_ann_file_list):n_objs, objs_info = convert_annotation(ann_file)n_objs_list.append(n_objs)objs_info_list.append(objs_info)with tf.gfile.GFile(ann_file, '') as fid:xml_str = fid.read()xml = etree.fromstring(xml_str)data = _recursive_parse_xml_to_dict(xml)['']objs_info_dicts.update({imgs_file_list[idx]: data})return imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, classes, classes_in_person, classes_dict, n_objs_list, objs_info_list, objs_info_dicts", "docstring": "Pascal VOC 2007/2012 Dataset.\n\n It has 20 objects:\n aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor\n and additional 3 classes : head, hand, foot for person.\n\n Parameters\n -----------\n path : str\n The path that the data is downloaded to, defaults is ``data/VOC``.\n dataset : str\n The VOC dataset version, `2012`, `2007`, `2007test` or `2012test`. We usually train model on `2007+2012` and test it on `2007test`.\n contain_classes_in_person : boolean\n Whether include head, hand and foot annotation, default is False.\n\n Returns\n ---------\n imgs_file_list : list of str\n Full paths of all images.\n imgs_semseg_file_list : list of str\n Full paths of all maps for semantic segmentation. Note that not all images have this map!\n imgs_insseg_file_list : list of str\n Full paths of all maps for instance segmentation. Note that not all images have this map!\n imgs_ann_file_list : list of str\n Full paths of all annotations for bounding box and object class, all images have this annotations.\n classes : list of str\n Classes in order.\n classes_in_person : list of str\n Classes in person.\n classes_dict : dictionary\n Class label to integer.\n n_objs_list : list of int\n Number of objects in all images in ``imgs_file_list`` in order.\n objs_info_list : list of str\n Darknet format for the annotation of all images in ``imgs_file_list`` in order. ``[class_id x_centre y_centre width height]`` in ratio format.\n objs_info_dicts : dictionary\n The annotation of all images in ``imgs_file_list``, ``{imgs_file_list : dictionary for annotation}``,\n format from `TensorFlow/Models/object-detection `__.\n\n Examples\n ----------\n >>> imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list,\n >>> classes, classes_in_person, classes_dict,\n >>> n_objs_list, objs_info_list, objs_info_dicts = tl.files.load_voc_dataset(dataset=\"2012\", contain_classes_in_person=False)\n >>> idx = 26\n >>> print(classes)\n ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']\n >>> print(classes_dict)\n {'sheep': 16, 'horse': 12, 'bicycle': 1, 'bottle': 4, 'cow': 9, 'sofa': 17, 'car': 6, 'dog': 11, 'cat': 7, 'person': 14, 'train': 18, 'diningtable': 10, 'aeroplane': 0, 'bus': 5, 'pottedplant': 15, 'tvmonitor': 19, 'chair': 8, 'bird': 2, 'boat': 3, 'motorbike': 13}\n >>> print(imgs_file_list[idx])\n data/VOC/VOC2012/JPEGImages/2007_000423.jpg\n >>> print(n_objs_list[idx])\n 2\n >>> print(imgs_ann_file_list[idx])\n data/VOC/VOC2012/Annotations/2007_000423.xml\n >>> print(objs_info_list[idx])\n 14 0.173 0.461333333333 0.142 0.496\n 14 0.828 0.542666666667 0.188 0.594666666667\n >>> ann = tl.prepro.parse_darknet_ann_str_to_list(objs_info_list[idx])\n >>> print(ann)\n [[14, 0.173, 0.461333333333, 0.142, 0.496], [14, 0.828, 0.542666666667, 0.188, 0.594666666667]]\n >>> c, b = tl.prepro.parse_darknet_ann_list_to_cls_box(ann)\n >>> print(c, b)\n [14, 14] [[0.173, 0.461333333333, 0.142, 0.496], [0.828, 0.542666666667, 0.188, 0.594666666667]]\n\n References\n -------------\n - `Pascal VOC2012 Website `__.\n - `Pascal VOC2007 Website `__.", "id": "f11137:m15"} {"signature": "def load_mpii_pose_dataset(path='', is_16_pos_only=False):", "body": "path = os.path.join(path, '')logging.info(\"\".format(path))url = \"\"tar_filename = \"\"extracted_filename = \"\"if folder_exists(os.path.join(path, extracted_filename)) is False:logging.info(\"\".format(extracted_filename, path))maybe_download_and_extract(tar_filename, path, url, extract=True)del_file(os.path.join(path, tar_filename))url = \"\"tar_filename = \"\"extracted_filename2 = \"\"if folder_exists(os.path.join(path, extracted_filename2)) is False:logging.info(\"\".format(extracted_filename, path))maybe_download_and_extract(tar_filename, path, url, extract=True)del_file(os.path.join(path, tar_filename))logging.info(\"\")ann_train_list = []ann_test_list = []img_train_list = []img_test_list = []def save_joints():mat = sio.loadmat(os.path.join(path, extracted_filename, \"\"))for _, (anno, train_flag) in enumerate( zip(mat[''][''][, ][], mat[''][''][, ][])):img_fn = anno[''][''][, ][]train_flag = int(train_flag)if train_flag:img_train_list.append(img_fn)ann_train_list.append([])else:img_test_list.append(img_fn)ann_test_list.append([])head_rect = []if '' in str(anno[''].dtype):head_rect = zip([x1[, ] for x1 in anno[''][''][]], [y1[, ] for y1 in anno[''][''][]],[x2[, ] for x2 in anno[''][''][]], [y2[, ] for y2 in anno[''][''][]])else:head_rect = [] if '' in str(anno[''].dtype):annopoints = anno[''][''][]head_x1s = anno[''][''][]head_y1s = anno[''][''][]head_x2s = anno[''][''][]head_y2s = anno[''][''][]for annopoint, head_x1, head_y1, head_x2, head_y2 in zip(annopoints, head_x1s, head_y1s, head_x2s,head_y2s):if annopoint.size:head_rect = [float(head_x1[, ]),float(head_y1[, ]),float(head_x2[, ]),float(head_y2[, ])]annopoint = annopoint[''][, ]j_id = [str(j_i[, ]) for j_i in annopoint[''][]]x = [x[, ] for x in annopoint[''][]]y = [y[, ] for y in annopoint[''][]]joint_pos = {}for _j_id, (_x, _y) in zip(j_id, zip(x, y)):joint_pos[int(_j_id)] = [float(_x), float(_y)]if '' in str(annopoint.dtype):vis = [v[] if v.size > else [] for v in annopoint[''][]]vis = dict([(k, int(v[])) if len(v) > else v for k, v in zip(j_id, vis)])else:vis = Noneif ((is_16_pos_only ==True) and (len(joint_pos) == )) or (is_16_pos_only == False):data = {'': img_fn,'': train_flag,'': head_rect,'': vis,'': joint_pos}if train_flag:ann_train_list[-].append(data)else:ann_test_list[-].append(data)save_joints()logging.info(\"\")img_dir = os.path.join(path, extracted_filename2)_img_list = load_file_list(path=os.path.join(path, extracted_filename2), regx='', printable=False)for i, im in enumerate(img_train_list):if im not in _img_list:print(''.format(im, img_dir))del img_train_list[i]del ann_train_list[i]for i, im in enumerate(img_test_list):if im not in _img_list:print(''.format(im, img_dir))del img_train_list[i]del ann_train_list[i]n_train_images = len(img_train_list)n_test_images = len(img_test_list)n_images = n_train_images + n_test_imageslogging.info(\"\".format(n_images, n_train_images, n_test_images))n_train_ann = len(ann_train_list)n_test_ann = len(ann_test_list)n_ann = n_train_ann + n_test_annlogging.info(\"\".format(n_ann, n_train_ann, n_test_ann))n_train_people = len(sum(ann_train_list, []))n_test_people = len(sum(ann_test_list, []))n_people = n_train_people + n_test_peoplelogging.info(\"\".format(n_people, n_train_people, n_test_people))for i, value in enumerate(img_train_list):img_train_list[i] = os.path.join(img_dir, value)for i, value in enumerate(img_test_list):img_test_list[i] = os.path.join(img_dir, value)return img_train_list, ann_train_list, img_test_list, ann_test_list", "docstring": "Load MPII Human Pose Dataset.\n\n Parameters\n -----------\n path : str\n The path that the data is downloaded to.\n is_16_pos_only : boolean\n If True, only return the peoples contain 16 pose keypoints. (Usually be used for single person pose estimation)\n\n Returns\n ----------\n img_train_list : list of str\n The image directories of training data.\n ann_train_list : list of dict\n The annotations of training data.\n img_test_list : list of str\n The image directories of testing data.\n ann_test_list : list of dict\n The annotations of testing data.\n\n Examples\n --------\n >>> import pprint\n >>> import tensorlayer as tl\n >>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()\n >>> image = tl.vis.read_image(img_train_list[0])\n >>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')\n >>> pprint.pprint(ann_train_list[0])\n\n References\n -----------\n - `MPII Human Pose Dataset. CVPR 14 `__\n - `MPII Human Pose Models. CVPR 16 `__\n - `MPII Human Shape, Poselet Conditioned Pictorial Structures and etc `__\n - `MPII Keyponts and ID `__", "id": "f11137:m16"} {"signature": "def save_npz(save_list=None, name='', sess=None):", "body": "logging.info(\"\" % name)if save_list is None:save_list = []save_list_var = []if sess:save_list_var = sess.run(save_list)else:try:save_list_var.extend([v.eval() for v in save_list])except Exception:logging.info(\"\")np.savez(name, params=save_list_var)save_list_var = Nonedel save_list_varlogging.info(\"\")", "docstring": "Input parameters and the file name, save parameters into .npz file. Use tl.utils.load_npz() to restore.\n\n Parameters\n ----------\n save_list : list of tensor\n A list of parameters (tensor) to be saved.\n name : str\n The name of the `.npz` file.\n sess : None or Session\n Session may be required in some case.\n\n Examples\n --------\n Save model to npz\n\n >>> tl.files.save_npz(network.all_params, name='model.npz', sess=sess)\n\n Load model from npz (Method 1)\n\n >>> load_params = tl.files.load_npz(name='model.npz')\n >>> tl.files.assign_params(sess, load_params, network)\n\n Load model from npz (Method 2)\n\n >>> tl.files.load_and_assign_npz(sess=sess, name='model.npz', network=network)\n\n Notes\n -----\n If you got session issues, you can change the value.eval() to value.eval(session=sess)\n\n References\n ----------\n `Saving dictionary using numpy `__", "id": "f11137:m17"} {"signature": "def load_npz(path='', name=''):", "body": "d = np.load(os.path.join(path, name))return d['']", "docstring": "Load the parameters of a Model saved by tl.files.save_npz().\n\n Parameters\n ----------\n path : str\n Folder path to `.npz` file.\n name : str\n The name of the `.npz` file.\n\n Returns\n --------\n list of array\n A list of parameters in order.\n\n Examples\n --------\n - See ``tl.files.save_npz``\n\n References\n ----------\n - `Saving dictionary using numpy `__", "id": "f11137:m18"} {"signature": "def assign_params(sess, params, network):", "body": "ops = []for idx, param in enumerate(params):ops.append(network.all_params[idx].assign(param))if sess is not None:sess.run(ops)return ops", "docstring": "Assign the given parameters to the TensorLayer network.\n\n Parameters\n ----------\n sess : Session\n TensorFlow Session.\n params : list of array\n A list of parameters (array) in order.\n network : :class:`Layer`\n The network to be assigned.\n\n Returns\n --------\n list of operations\n A list of tf ops in order that assign params. Support sess.run(ops) manually.\n\n Examples\n --------\n - See ``tl.files.save_npz``\n\n References\n ----------\n - `Assign value to a TensorFlow variable `__", "id": "f11137:m19"} {"signature": "def load_and_assign_npz(sess=None, name=None, network=None):", "body": "if network is None:raise ValueError(\"\")if sess is None:raise ValueError(\"\")if not os.path.exists(name):logging.error(\"\".format(name))return Falseelse:params = load_npz(name=name)assign_params(sess, params, network)logging.info(\"\".format(name))return network", "docstring": "Load model from npz and assign to a network.\n\n Parameters\n -------------\n sess : Session\n TensorFlow Session.\n name : str\n The name of the `.npz` file.\n network : :class:`Layer`\n The network to be assigned.\n\n Returns\n --------\n False or network\n Returns False, if the model is not exist.\n\n Examples\n --------\n - See ``tl.files.save_npz``", "id": "f11137:m20"} {"signature": "def save_npz_dict(save_list=None, name='', sess=None):", "body": "if sess is None:raise ValueError(\"\")if save_list is None:save_list = []save_list_names = [tensor.name for tensor in save_list]save_list_var = sess.run(save_list)save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)}np.savez(name, **save_var_dict)save_list_var = Nonesave_var_dict = Nonedel save_list_vardel save_var_dictlogging.info(\"\" % name)", "docstring": "Input parameters and the file name, save parameters as a dictionary into .npz file.\n\n Use ``tl.files.load_and_assign_npz_dict()`` to restore.\n\n Parameters\n ----------\n save_list : list of parameters\n A list of parameters (tensor) to be saved.\n name : str\n The name of the `.npz` file.\n sess : Session\n TensorFlow Session.", "id": "f11137:m21"} {"signature": "def load_and_assign_npz_dict(name='', sess=None):", "body": "if sess is None:raise ValueError(\"\")if not os.path.exists(name):logging.error(\"\".format(name))return Falseparams = np.load(name)if len(params.keys()) != len(set(params.keys())):raise Exception(\"\" % name)ops = list()for key in params.keys():try:varlist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=key)if len(varlist) > :raise Exception(\"\" % key)elif len(varlist) == :raise KeyErrorelse:ops.append(varlist[].assign(params[key]))logging.info(\"\" % key)except KeyError:logging.info(\"\" % key)sess.run(ops)logging.info(\"\" % name)", "docstring": "Restore the parameters saved by ``tl.files.save_npz_dict()``.\n\n Parameters\n ----------\n name : str\n The name of the `.npz` file.\n sess : Session\n TensorFlow Session.", "id": "f11137:m22"} {"signature": "def save_ckpt(sess=None, mode_name='', save_dir='', var_list=None, global_step=None, printable=False):", "body": "if sess is None:raise ValueError(\"\")if var_list is None:var_list = []ckpt_file = os.path.join(save_dir, mode_name)if var_list == []:var_list = tf.global_variables()logging.info(\"\" % (ckpt_file, len(var_list)))if printable:for idx, v in enumerate(var_list):logging.info(\"\".format(idx, v.name, str(v.get_shape())))saver = tf.train.Saver(var_list)saver.save(sess, ckpt_file, global_step=global_step)", "docstring": "Save parameters into `ckpt` file.\n\n Parameters\n ------------\n sess : Session\n TensorFlow Session.\n mode_name : str\n The name of the model, default is ``model.ckpt``.\n save_dir : str\n The path / file directory to the `ckpt`, default is ``checkpoint``.\n var_list : list of tensor\n The parameters / variables (tensor) to be saved. If empty, save all global variables (default).\n global_step : int or None\n Step number.\n printable : boolean\n Whether to print all parameters information.\n\n See Also\n --------\n load_ckpt", "id": "f11137:m23"} {"signature": "def save_any_to_npy(save_dict=None, name=''):", "body": "if save_dict is None:save_dict = {}np.save(name, save_dict)", "docstring": "Save variables to `.npy` file.\n\n Parameters\n ------------\n save_dict : directory\n The variables to be saved.\n name : str\n File name.\n\n Examples\n ---------\n >>> tl.files.save_any_to_npy(save_dict={'data': ['a','b']}, name='test.npy')\n >>> data = tl.files.load_npy_to_any(name='test.npy')\n >>> print(data)\n {'data': ['a','b']}", "id": "f11137:m25"} {"signature": "def load_npy_to_any(path='', name=''):", "body": "file_path = os.path.join(path, name)try:return np.load(file_path).item()except Exception:return np.load(file_path)raise Exception(\"\" % file_path)", "docstring": "Load `.npy` file.\n\n Parameters\n ------------\n path : str\n Path to the file (optional).\n name : str\n File name.\n\n Examples\n ---------\n - see tl.files.save_any_to_npy()", "id": "f11137:m26"} {"signature": "def file_exists(filepath):", "body": "return os.path.isfile(filepath)", "docstring": "Check whether a file exists by given file path.", "id": "f11137:m27"} {"signature": "def folder_exists(folderpath):", "body": "return os.path.isdir(folderpath)", "docstring": "Check whether a folder exists by given folder path.", "id": "f11137:m28"} {"signature": "def del_file(filepath):", "body": "os.remove(filepath)", "docstring": "Delete a file by given file path.", "id": "f11137:m29"} {"signature": "def del_folder(folderpath):", "body": "shutil.rmtree(folderpath)", "docstring": "Delete a folder by given folder path.", "id": "f11137:m30"} {"signature": "def read_file(filepath):", "body": "with open(filepath, '') as afile:return afile.read()", "docstring": "Read a file and return a string.\n\n Examples\n ---------\n >>> data = tl.files.read_file('data.txt')", "id": "f11137:m31"} {"signature": "def load_file_list(path=None, regx='', printable=True, keep_prefix=False):", "body": "if path is None:path = os.getcwd()file_list = os.listdir(path)return_list = []for _, f in enumerate(file_list):if re.search(regx, f):return_list.append(f)if keep_prefix:for i, f in enumerate(return_list):return_list[i] = os.path.join(path, f)if printable:logging.info('' % return_list)logging.info('' % len(return_list))return return_list", "docstring": "r\"\"\"Return a file list in a folder by given a path and regular expression.\n\n Parameters\n ----------\n path : str or None\n A folder path, if `None`, use the current directory.\n regx : str\n The regx of file name.\n printable : boolean\n Whether to print the files infomation.\n keep_prefix : boolean\n Whether to keep path in the file name.\n\n Examples\n ----------\n >>> file_list = tl.files.load_file_list(path=None, regx='w1pre_[0-9]+\\.(npz)')", "id": "f11137:m32"} {"signature": "def load_folder_list(path=\"\"):", "body": "return [os.path.join(path, o) for o in os.listdir(path) if os.path.isdir(os.path.join(path, o))]", "docstring": "Return a folder list in a folder by given a folder path.\n\n Parameters\n ----------\n path : str\n A folder path.", "id": "f11137:m33"} {"signature": "def exists_or_mkdir(path, verbose=True):", "body": "if not os.path.exists(path):if verbose:logging.info(\"\" % path)os.makedirs(path)return Falseelse:if verbose:logging.info(\"\" % path)return True", "docstring": "Check a folder by given name, if not exist, create the folder and return False,\n if directory exists, return True.\n\n Parameters\n ----------\n path : str\n A folder path.\n verbose : boolean\n If True (default), prints results.\n\n Returns\n --------\n boolean\n True if folder already exist, otherwise, returns False and create the folder.\n\n Examples\n --------\n >>> tl.files.exists_or_mkdir(\"checkpoints/train\")", "id": "f11137:m34"} {"signature": "def maybe_download_and_extract(filename, working_directory, url_source, extract=False, expected_bytes=None):", "body": "def _download(filename, working_directory, url_source):progress_bar = progressbar.ProgressBar()def _dlProgress(count, blockSize, totalSize, pbar=progress_bar):if (totalSize != ):if not pbar.max_value:totalBlocks = math.ceil(float(totalSize) / float(blockSize))pbar.max_value = int(totalBlocks)pbar.update(count, force=True)filepath = os.path.join(working_directory, filename)logging.info('' % filename)urlretrieve(url_source + filename, filepath, reporthook=_dlProgress)exists_or_mkdir(working_directory, verbose=False)filepath = os.path.join(working_directory, filename)if not os.path.exists(filepath):_download(filename, working_directory, url_source)statinfo = os.stat(filepath)logging.info('' % (filename, statinfo.st_size)) if (not (expected_bytes is None) and (expected_bytes != statinfo.st_size)):raise Exception('' + filename + '')if (extract):if tarfile.is_tarfile(filepath):logging.info('')tarfile.open(filepath, '').extractall(working_directory)logging.info('')elif zipfile.is_zipfile(filepath):logging.info('')with zipfile.ZipFile(filepath) as zf:zf.extractall(working_directory)logging.info('')else:logging.info(\"\")return filepath", "docstring": "Checks if file exists in working_directory otherwise tries to dowload the file,\n and optionally also tries to extract the file if format is \".zip\" or \".tar\"\n\n Parameters\n -----------\n filename : str\n The name of the (to be) dowloaded file.\n working_directory : str\n A folder path to search for the file in and dowload the file to\n url : str\n The URL to download the file from\n extract : boolean\n If True, tries to uncompress the dowloaded file is \".tar.gz/.tar.bz2\" or \".zip\" file, default is False.\n expected_bytes : int or None\n If set tries to verify that the downloaded file is of the specified size, otherwise raises an Exception, defaults is None which corresponds to no check being performed.\n\n Returns\n ----------\n str\n File path of the dowloaded (uncompressed) file.\n\n Examples\n --------\n >>> down_file = tl.files.maybe_download_and_extract(filename='train-images-idx3-ubyte.gz',\n ... working_directory='data/',\n ... url_source='http://yann.lecun.com/exdb/mnist/')\n >>> tl.files.maybe_download_and_extract(filename='ADEChallengeData2016.zip',\n ... working_directory='data/',\n ... url_source='http://sceneparsing.csail.mit.edu/data/',\n ... extract=True)", "id": "f11137:m35"} {"signature": "def natural_keys(text):", "body": "def atoi(text):return int(text) if text.isdigit() else textreturn [atoi(c) for c in re.split('', text)]", "docstring": "Sort list of string with number in human order.\n\n Examples\n ----------\n >>> l = ['im1.jpg', 'im31.jpg', 'im11.jpg', 'im21.jpg', 'im03.jpg', 'im05.jpg']\n >>> l.sort(key=tl.files.natural_keys)\n ['im1.jpg', 'im03.jpg', 'im05', 'im11.jpg', 'im21.jpg', 'im31.jpg']\n >>> l.sort() # that is what we dont want\n ['im03.jpg', 'im05', 'im1.jpg', 'im11.jpg', 'im21.jpg', 'im31.jpg']\n\n References\n ----------\n - `link `__", "id": "f11137:m36"} {"signature": "def npz_to_W_pdf(path=None, regx=''):", "body": "file_list = load_file_list(path=path, regx=regx)for f in file_list:W = load_npz(path, f)[]logging.info(\"\" % (f, f.split('')[] + ''))visualize.draw_weights(W, second=, saveable=True, name=f.split('')[], fig_idx=)", "docstring": "r\"\"\"Convert the first weight matrix of `.npz` file to `.pdf` by using `tl.visualize.W()`.\n\n Parameters\n ----------\n path : str\n A folder path to `npz` files.\n regx : str\n Regx for the file name.\n\n Examples\n ---------\n Convert the first weight matrix of w1_pre...npz file to w1_pre...pdf.\n\n >>> tl.files.npz_to_W_pdf(path='/Users/.../npz_file/', regx='w1pre_[0-9]+\\.(npz)')", "id": "f11137:m37"} {"signature": "def __init__(self, learning_rate=, beta1=, beta2=, epsilon=, use_locking=False, name=\"\"):", "body": "super(AMSGrad, self).__init__(use_locking, name)self._lr = learning_rateself._beta1 = beta1self._beta2 = beta2self._epsilon = epsilonself._lr_t = Noneself._beta1_t = Noneself._beta2_t = Noneself._epsilon_t = Noneself._beta1_power = Noneself._beta2_power = None", "docstring": "Construct a new Adam optimizer.", "id": "f11138:c0:m0"} {"signature": "def roi_pooling(input, rois, pool_height, pool_width):", "body": "out = roi_pooling_module.roi_pooling(input, rois, pool_height=pool_height, pool_width=pool_width)output, argmax_output = out[], out[]return output", "docstring": "returns a tensorflow operation for computing the Region of Interest Pooling\n\n@arg input: feature maps on which to perform the pooling operation\n@arg rois: list of regions of interest in the format (feature map index, upper left, bottom right)\n@arg pool_width: size of the pooling sections", "id": "f11142:m0"} {"signature": "@private_methoddef _to_bc_h_w(self, x, x_shape):", "body": "x = tf.transpose(x, [, , , ])x = tf.reshape(x, (-, x_shape[], x_shape[]))return x", "docstring": "(b, h, w, c) -> (b*c, h, w)", "id": "f11162:c0:m1"} {"signature": "@private_methoddef _to_b_h_w_n_c(self, x, x_shape):", "body": "x = tf.reshape(x, (-, x_shape[], x_shape[], x_shape[], x_shape[]))x = tf.transpose(x, [, , , , ])return x", "docstring": "(b*c, h, w, n) -> (b, h, w, n, c)", "id": "f11162:c0:m2"} {"signature": "@private_methoddef tf_flatten(self, a):", "body": "return tf.reshape(a, [-])", "docstring": "Flatten tensor", "id": "f11162:c0:m3"} {"signature": "@private_methoddef _tf_repeat(self, a, repeats):", "body": "if len(a.get_shape()) != :raise AssertionError(\"\")a = tf.expand_dims(a, -)a = tf.tile(a, [, repeats])a = self.tf_flatten(a)return a", "docstring": "Tensorflow version of np.repeat for 1D", "id": "f11162:c0:m5"} {"signature": "@private_methoddef _tf_batch_map_coordinates(self, inputs, coords):", "body": "input_shape = inputs.get_shape()coords_shape = coords.get_shape()batch_channel = tf.shape(inputs)[]input_h = int(input_shape[])input_w = int(input_shape[])kernel_n = int(coords_shape[])n_coords = input_h * input_w * kernel_ncoords_lt = tf.cast(tf.floor(coords), '')coords_rb = tf.cast(tf.ceil(coords), '')coords_lb = tf.stack([coords_lt[:, :, :, :, ], coords_rb[:, :, :, :, ]], axis=-)coords_rt = tf.stack([coords_rb[:, :, :, :, ], coords_lt[:, :, :, :, ]], axis=-)idx = self._tf_repeat(tf.range(batch_channel), n_coords)vals_lt = self._get_vals_by_coords(inputs, coords_lt, idx, (batch_channel, input_h, input_w, kernel_n))vals_rb = self._get_vals_by_coords(inputs, coords_rb, idx, (batch_channel, input_h, input_w, kernel_n))vals_lb = self._get_vals_by_coords(inputs, coords_lb, idx, (batch_channel, input_h, input_w, kernel_n))vals_rt = self._get_vals_by_coords(inputs, coords_rt, idx, (batch_channel, input_h, input_w, kernel_n))coords_offset_lt = coords - tf.cast(coords_lt, '')vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, :, :, :, ]vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, :, :, :, ]mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, :, :, :, ]return mapped_vals", "docstring": "Batch version of tf_map_coordinates\n\n Only supports 2D feature maps\n\n Parameters\n ----------\n inputs : ``tf.Tensor``\n shape = (b*c, h, w)\n coords : ``tf.Tensor``\n shape = (b*c, h, w, n, 2)\n\n Returns\n -------\n ``tf.Tensor``\n A Tensor with the shape as (b*c, h, w, n)", "id": "f11162:c0:m6"} {"signature": "@private_methoddef _tf_batch_map_offsets(self, inputs, offsets, grid_offset):", "body": "input_shape = inputs.get_shape()batch_size = tf.shape(inputs)[]kernel_n = int(int(offsets.get_shape()[]) / )input_h = input_shape[]input_w = input_shape[]channel = input_shape[]inputs = self._to_bc_h_w(inputs, input_shape)offsets = tf.reshape(offsets, (batch_size, input_h, input_w, kernel_n, ))coords = tf.expand_dims(grid_offset, ) coords = tf.tile(coords, [batch_size, , , , ]) + offsets coords = tf.stack([tf.clip_by_value(coords[:, :, :, :, ], , tf.cast(input_h - , '')),tf.clip_by_value(coords[:, :, :, :, ], , tf.cast(input_w - , ''))], axis=-)coords = tf.tile(coords, [channel, , , , ])mapped_vals = self._tf_batch_map_coordinates(inputs, coords)mapped_vals = self._to_b_h_w_n_c(mapped_vals, [batch_size, input_h, input_w, kernel_n, channel])return mapped_vals", "docstring": "Batch map offsets into input\n\n Parameters\n ------------\n inputs : ``tf.Tensor``\n shape = (b, h, w, c)\n offsets: ``tf.Tensor``\n shape = (b, h, w, 2*n)\n grid_offset: `tf.Tensor``\n Offset grids shape = (h, w, n, 2)\n\n Returns\n -------\n ``tf.Tensor``\n A Tensor with the shape as (b, h, w, c)", "id": "f11162:c0:m7"} {"signature": "@deprecated_alias(layer='', end_support_version=) def atrous_conv1d(prev_layer,n_filter=,filter_size=,stride=,dilation=,act=None,padding='',data_format='',W_init=tf.truncated_normal_initializer(stddev=),b_init=tf.constant_initializer(value=),W_init_args=None,b_init_args=None,name='',):", "body": "return Conv1dLayer(prev_layer=prev_layer,act=act,shape=(filter_size, int(prev_layer.outputs.get_shape()[-]), n_filter),stride=stride,padding=padding,dilation_rate=dilation,data_format=data_format,W_init=W_init,b_init=b_init,W_init_args=W_init_args,b_init_args=b_init_args,name=name,)", "docstring": "Simplified version of :class:`AtrousConv1dLayer`.\n\n Parameters\n ----------\n prev_layer : :class:`Layer`\n Previous layer.\n n_filter : int\n The number of filters.\n filter_size : int\n The filter size.\n stride : tuple of int\n The strides: (height, width).\n dilation : int\n The filter dilation size.\n act : activation function\n The activation function of this layer.\n padding : str\n The padding algorithm type: \"SAME\" or \"VALID\".\n data_format : str\n Default is 'NWC' as it is a 1D CNN.\n W_init : initializer\n The initializer for the weight matrix.\n b_init : initializer or None\n The initializer for the bias vector. If None, skip biases.\n W_init_args : dictionary\n The arguments for the weight matrix initializer.\n b_init_args : dictionary\n The arguments for the bias vector initializer.\n name : str\n A unique layer name.\n\n Returns\n -------\n :class:`Layer`\n A :class:`AtrousConv1dLayer` object", "id": "f11167:m0"} {"signature": "def _to_channel_first_bias(b):", "body": "channel_size = int(b.shape[])new_shape = (channel_size, , )return tf.reshape(b, new_shape)", "docstring": "Reshape [c] to [c, 1, 1].", "id": "f11170:m0"} {"signature": "def _bias_scale(x, b, data_format):", "body": "if data_format == '':return x * belif data_format == '':return x * _to_channel_first_bias(b)else:raise ValueError('' % data_format)", "docstring": "The multiplication counter part of tf.nn.bias_add.", "id": "f11170:m1"} {"signature": "def _bias_add(x, b, data_format):", "body": "if data_format == '':return tf.add(x, b)elif data_format == '':return tf.add(x, _to_channel_first_bias(b))else:raise ValueError('' % data_format)", "docstring": "Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT.", "id": "f11170:m2"} {"signature": "def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, data_format, name=None):", "body": "with ops.name_scope(name, '', [x, mean, variance, scale, offset]):inv = math_ops.rsqrt(variance + variance_epsilon)if scale is not None:inv *= scalea = math_ops.cast(inv, x.dtype)b = math_ops.cast(offset - mean * inv if offset is not None else -mean * inv, x.dtype)df = {'': '', '': ''}return _bias_add(_bias_scale(x, a, df[data_format]), b, df[data_format])", "docstring": "Data Format aware version of tf.nn.batch_normalization.", "id": "f11170:m3"} {"signature": "def print_params(self, details=True, session=None):", "body": "for i, p in enumerate(self.all_params):if details:try:val = p.eval(session=session)logging.info(\"\".format(i, p.name, str(val.shape), p.dtype.name, val.mean(), np.median(val), val.std()))except Exception as e:logging.info(str(e))raise Exception(\"\"\"\")else:logging.info(\"\".format(i, p.name, str(p.get_shape()), p.dtype.name))logging.info(\"\" % self.count_params())", "docstring": "Print all info of parameters in the network", "id": "f11189:c1:m1"} {"signature": "def print_layers(self):", "body": "for i, layer in enumerate(self.all_layers):logging.info(\"\".format(i, layer.name, str(layer.get_shape()), layer.dtype.name))", "docstring": "Print all info of layers in the network.", "id": "f11189:c1:m2"} {"signature": "def count_params(self):", "body": "n_params = for _i, p in enumerate(self.all_params):n = for s in p.get_shape():try:s = int(s)except Exception:s = if s:n = n * sn_params = n_params + nreturn n_params", "docstring": "Returns the number of parameters in the network.", "id": "f11189:c1:m3"} {"signature": "def get_all_params(self, session=None):", "body": "_params = []for p in self.all_params:if session is None:_params.append(p.eval())else:_params.append(session.run(p))return _params", "docstring": "Return the parameters in a list of array.", "id": "f11189:c1:m4"} {"signature": "@protected_methoddef _get_init_args(self, skip=):", "body": "stack = inspect.stack()if len(stack) < skip + :raise ValueError(\"\")args, _, _, values = inspect.getargvalues(stack[skip][])params = {}for arg in args:if values[arg] is not None and arg not in ['', '', '']:val = values[arg]if inspect.isfunction(val):params[arg] = {\"\": val.__module__, \"\": val.__name__}elif arg.endswith(''):continueelse:params[arg] = valreturn params", "docstring": "Get all arguments of current layer for saving the graph.", "id": "f11189:c1:m11"} {"signature": "def transformer(U, theta, out_size, name=''):", "body": "def _repeat(x, n_repeats):with tf.variable_scope(''):rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([n_repeats,])), ), [, ])rep = tf.cast(rep, '')x = tf.matmul(tf.reshape(x, (-, )), rep)return tf.reshape(x, [-])def _interpolate(im, x, y, out_size):with tf.variable_scope(''):num_batch = tf.shape(im)[]height = tf.shape(im)[]width = tf.shape(im)[]channels = tf.shape(im)[]x = tf.cast(x, '')y = tf.cast(y, '')height_f = tf.cast(height, '')width_f = tf.cast(width, '')out_height = out_size[]out_width = out_size[]zero = tf.zeros([], dtype='')max_y = tf.cast(tf.shape(im)[] - , '')max_x = tf.cast(tf.shape(im)[] - , '')x = (x + ) * (width_f) / y = (y + ) * (height_f) / x0 = tf.cast(tf.floor(x), '')x1 = x0 + y0 = tf.cast(tf.floor(y), '')y1 = y0 + x0 = tf.clip_by_value(x0, zero, max_x)x1 = tf.clip_by_value(x1, zero, max_x)y0 = tf.clip_by_value(y0, zero, max_y)y1 = tf.clip_by_value(y1, zero, max_y)dim2 = widthdim1 = width * heightbase = _repeat(tf.range(num_batch) * dim1, out_height * out_width)base_y0 = base + y0 * dim2base_y1 = base + y1 * dim2idx_a = base_y0 + x0idx_b = base_y1 + x0idx_c = base_y0 + x1idx_d = base_y1 + x1im_flat = tf.reshape(im, tf.stack([-, channels]))im_flat = tf.cast(im_flat, '')Ia = tf.gather(im_flat, idx_a)Ib = tf.gather(im_flat, idx_b)Ic = tf.gather(im_flat, idx_c)Id = tf.gather(im_flat, idx_d)x0_f = tf.cast(x0, '')x1_f = tf.cast(x1, '')y0_f = tf.cast(y0, '')y1_f = tf.cast(y1, '')wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), )wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), )wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), )wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), )output = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])return outputdef _meshgrid(height, width):with tf.variable_scope(''):x_t = tf.matmul(tf.ones(shape=tf.stack([height, ])),tf.transpose(tf.expand_dims(tf.linspace(-, , width), ), [, ]))y_t = tf.matmul(tf.expand_dims(tf.linspace(-, , height), ), tf.ones(shape=tf.stack([, width])))x_t_flat = tf.reshape(x_t, (, -))y_t_flat = tf.reshape(y_t, (, -))ones = tf.ones_like(x_t_flat)grid = tf.concat(axis=, values=[x_t_flat, y_t_flat, ones])return griddef _transform(theta, input_dim, out_size):with tf.variable_scope(''):num_batch = tf.shape(input_dim)[]num_channels = tf.shape(input_dim)[]theta = tf.reshape(theta, (-, , ))theta = tf.cast(theta, '')out_height = out_size[]out_width = out_size[]grid = _meshgrid(out_height, out_width)grid = tf.expand_dims(grid, )grid = tf.reshape(grid, [-])grid = tf.tile(grid, tf.stack([num_batch]))grid = tf.reshape(grid, tf.stack([num_batch, , -]))T_g = tf.matmul(theta, grid)x_s = tf.slice(T_g, [, , ], [-, , -])y_s = tf.slice(T_g, [, , ], [-, , -])x_s_flat = tf.reshape(x_s, [-])y_s_flat = tf.reshape(y_s, [-])input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size)output = tf.reshape(input_transformed, tf.stack([num_batch, out_height, out_width, num_channels]))return outputwith tf.variable_scope(name):output = _transform(theta, U, out_size)return output", "docstring": "Spatial Transformer Layer for `2D Affine Transformation `__\n , see :class:`SpatialTransformer2dAffineLayer` class.\n\n Parameters\n ----------\n U : list of float\n The output of a convolutional net should have the\n shape [num_batch, height, width, num_channels].\n theta: float\n The output of the localisation network should be [num_batch, 6], value range should be [0, 1] (via tanh).\n out_size: tuple of int\n The size of the output of the network (height, width)\n name: str\n Optional function name\n\n Returns\n -------\n Tensor\n The transformed tensor.\n\n References\n ----------\n - `Spatial Transformer Networks `__\n - `TensorFlow/Models `__\n\n Notes\n -----\n To initialize the network to the identity transform init.\n\n >>> import tensorflow as tf\n >>> # ``theta`` to\n >>> identity = np.array([[1., 0., 0.], [0., 1., 0.]])\n >>> identity = identity.flatten()\n >>> theta = tf.Variable(initial_value=identity)", "id": "f11193:m0"} {"signature": "def batch_transformer(U, thetas, out_size, name=''):", "body": "with tf.variable_scope(name):num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:])indices = [[i] * num_transforms for i in xrange(num_batch)]input_repeated = tf.gather(U, tf.reshape(indices, [-]))return transformer(input_repeated, thetas, out_size)", "docstring": "Batch Spatial Transformer function for `2D Affine Transformation `__.\n\n Parameters\n ----------\n U : list of float\n tensor of inputs [batch, height, width, num_channels]\n thetas : list of float\n a set of transformations for each input [batch, num_transforms, 6]\n out_size : list of int\n the size of the output [out_height, out_width]\n name : str\n optional function name\n\n Returns\n ------\n float\n Tensor of size [batch * num_transforms, out_height, out_width, num_channels]", "id": "f11193:m1"} {"signature": "def compute_alpha(x):", "body": "threshold = _compute_threshold(x)alpha1_temp1 = tf.where(tf.greater(x, threshold), x, tf.zeros_like(x, tf.float32))alpha1_temp2 = tf.where(tf.less(x, -threshold), x, tf.zeros_like(x, tf.float32))alpha_array = tf.add(alpha1_temp1, alpha1_temp2, name=None)alpha_array_abs = tf.abs(alpha_array)alpha_array_abs1 = tf.where(tf.greater(alpha_array_abs, ), tf.ones_like(alpha_array_abs, tf.float32),tf.zeros_like(alpha_array_abs, tf.float32))alpha_sum = tf.reduce_sum(alpha_array_abs)n = tf.reduce_sum(alpha_array_abs1)alpha = tf.div(alpha_sum, n)return alpha", "docstring": "Computing the scale parameter.", "id": "f11194:m2"} {"signature": "def flatten_reshape(variable, name=''):", "body": "dim = for d in variable.get_shape()[:].as_list():dim *= dreturn tf.reshape(variable, shape=[-, dim], name=name)", "docstring": "Reshapes a high-dimension vector input.\n\n [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask]\n\n Parameters\n ----------\n variable : TensorFlow variable or tensor\n The variable or tensor to be flatten.\n name : str\n A unique layer name.\n\n Returns\n -------\n Tensor\n Flatten Tensor\n\n Examples\n --------\n >>> import tensorflow as tf\n >>> import tensorlayer as tl\n >>> x = tf.placeholder(tf.float32, [None, 128, 128, 3])\n >>> # Convolution Layer with 32 filters and a kernel size of 5\n >>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)\n >>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n >>> network = tf.layers.max_pooling2d(network, 2, 2)\n >>> print(network.get_shape()[:].as_list())\n >>> [None, 62, 62, 32]\n >>> network = tl.layers.flatten_reshape(network)\n >>> print(network.get_shape()[:].as_list()[1:])\n >>> [None, 123008]", "id": "f11194:m3"} {"signature": "@deprecated_alias(printable='', end_support_version=) def get_layers_with_name(net, name=\"\", verbose=False):", "body": "logging.info(\"\" % name)layers = []i = for layer in net.all_layers:if name in layer.name:layers.append(layer)if verbose:logging.info(\"\".format(i, layer.name, str(layer.get_shape())))i = i + return layers", "docstring": "Get a list of layers' output in a network by a given name scope.\n\n Parameters\n -----------\n net : :class:`Layer`\n The last layer of the network.\n name : str\n Get the layers' output that contain this name.\n verbose : boolean\n If True, print information of all the layers' output\n\n Returns\n --------\n list of Tensor\n A list of layers' output (TensorFlow tensor)\n\n Examples\n ---------\n >>> import tensorlayer as tl\n >>> layers = tl.layers.get_layers_with_name(net, \"CNN\", True)", "id": "f11194:m5"} {"signature": "@deprecated_alias(printable='', end_support_version=) def get_variables_with_name(name=None, train_only=True, verbose=False):", "body": "if name is None:raise Exception(\"\")logging.info(\"\" % name)if train_only:t_vars = tf.trainable_variables()else:t_vars = tf.global_variables()d_vars = [var for var in t_vars if name in var.name]if verbose:for idx, v in enumerate(d_vars):logging.info(\"\".format(idx, v.name, str(v.get_shape())))return d_vars", "docstring": "Get a list of TensorFlow variables by a given name scope.\n\n Parameters\n ----------\n name : str\n Get the variables that contain this name.\n train_only : boolean\n If Ture, only get the trainable variables.\n verbose : boolean\n If True, print the information of all variables.\n\n Returns\n -------\n list of Tensor\n A list of TensorFlow variables\n\n Examples\n --------\n >>> import tensorlayer as tl\n >>> dense_vars = tl.layers.get_variables_with_name('dense', True, True)", "id": "f11194:m6"} {"signature": "@deprecated(date=\"\", instructions=\"\")def initialize_global_variables(sess):", "body": "if sess is None:raise AssertionError('')sess.run(tf.global_variables_initializer())", "docstring": "Initialize the global variables of TensorFlow.\n\n Run ``sess.run(tf.global_variables_initializer())`` for TF 0.12+ or\n ``sess.run(tf.initialize_all_variables())`` for TF 0.11.\n\n Parameters\n ----------\n sess : Session\n TensorFlow session.", "id": "f11194:m7"} {"signature": "def initialize_rnn_state(state, feed_dict=None):", "body": "if isinstance(state, LSTMStateTuple):c = state.c.eval(feed_dict=feed_dict)h = state.h.eval(feed_dict=feed_dict)return c, helse:new_state = state.eval(feed_dict=feed_dict)return new_state", "docstring": "Returns the initialized RNN state.\n The inputs are `LSTMStateTuple` or `State` of `RNNCells`, and an optional `feed_dict`.\n\n Parameters\n ----------\n state : RNN state.\n The TensorFlow's RNN state.\n feed_dict : dictionary\n Initial RNN state; if None, returns zero state.\n\n Returns\n -------\n RNN state\n The TensorFlow's RNN state.", "id": "f11194:m8"} {"signature": "def list_remove_repeat(x):", "body": "y = []for i in x:if i not in y:y.append(i)return y", "docstring": "Remove the repeated items in a list, and return the processed list.\n You may need it to create merged layer like Concat, Elementwise and etc.\n\n Parameters\n ----------\n x : list\n Input\n\n Returns\n -------\n list\n A list that after removing it's repeated items\n\n Examples\n -------\n >>> l = [2, 3, 4, 2, 3]\n >>> l = list_remove_repeat(l)\n [2, 3, 4]", "id": "f11194:m9"} {"signature": "def merge_networks(layers=None):", "body": "if layers is None:raise Exception(\"\")layer = layers[]all_params = []all_layers = []all_drop = {}for l in layers:all_params.extend(l.all_params)all_layers.extend(l.all_layers)all_drop.update(l.all_drop)layer.all_params = list(all_params)layer.all_layers = list(all_layers)layer.all_drop = dict(all_drop)layer.all_layers = list_remove_repeat(layer.all_layers)layer.all_params = list_remove_repeat(layer.all_params)return layer", "docstring": "Merge all parameters, layers and dropout probabilities to a :class:`Layer`.\n The output of return network is the first network in the list.\n\n Parameters\n ----------\n layers : list of :class:`Layer`\n Merge all parameters, layers and dropout probabilities to the first layer in the list.\n\n Returns\n --------\n :class:`Layer`\n The network after merging all parameters, layers and dropout probabilities to the first network in the list.\n\n Examples\n ---------\n >>> import tensorlayer as tl\n >>> n1 = ...\n >>> n2 = ...\n >>> n1 = tl.layers.merge_networks([n1, n2])", "id": "f11194:m10"} {"signature": "def print_all_variables(train_only=False):", "body": "if train_only:t_vars = tf.trainable_variables()logging.info(\"\")else:t_vars = tf.global_variables()logging.info(\"\")for idx, v in enumerate(t_vars):logging.info(\"\".format(idx, str(v.get_shape()), v.name))", "docstring": "Print information of trainable or all variables,\n without ``tl.layers.initialize_global_variables(sess)``.\n\n Parameters\n ----------\n train_only : boolean\n Whether print trainable variables only.\n - If True, print the trainable variables.\n - If False, print all variables.", "id": "f11194:m11"} {"signature": "def ternary_operation(x):", "body": "g = tf.get_default_graph()with g.gradient_override_map({\"\": \"\"}):threshold = _compute_threshold(x)x = tf.sign(tf.add(tf.sign(tf.add(x, threshold)), tf.sign(tf.add(x, -threshold))))return x", "docstring": "Ternary operation use threshold computed with weights.", "id": "f11194:m18"} {"signature": "@tf.RegisterGradient(\"\")def _quantize_grad(op, grad):", "body": "return tf.clip_by_value(grad, -, )", "docstring": "Clip and binarize tensor using the straight through estimator (STE) for the gradient.", "id": "f11194:m19"} {"signature": "def _compute_threshold(x):", "body": "x_sum = tf.reduce_sum(tf.abs(x), reduction_indices=None, keepdims=False, name=None)threshold = tf.div(x_sum, tf.cast(tf.size(x), tf.float32), name=None)threshold = tf.multiply(, threshold, name=None)return threshold", "docstring": "ref: https://github.com/XJTUWYD/TWN\nComputing the threshold.", "id": "f11194:m22"} {"signature": "def _conv_linear(args, filter_size, num_features, bias, bias_start=, scope=None):", "body": "total_arg_size_depth = shapes = [a.get_shape().as_list() for a in args]for shape in shapes:if len(shape) != :raise ValueError(\"\" % str(shapes))if not shape[]:raise ValueError(\"\" % str(shapes))else:total_arg_size_depth += shape[]dtype = [a.dtype for a in args][]with tf.variable_scope(scope or \"\"):matrix = tf.get_variable(\"\", [filter_size[], filter_size[], total_arg_size_depth, num_features], dtype=dtype)if len(args) == :res = tf.nn.conv2d(args[], matrix, strides=[, , , ], padding='')else:res = tf.nn.conv2d(tf.concat(args, ), matrix, strides=[, , , ], padding='')if not bias:return resbias_term = tf.get_variable(\"\", [num_features], dtype=dtype, initializer=tf.constant_initializer(bias_start, dtype=dtype))return res + bias_term", "docstring": "convolution:\n\n Parameters\n ----------\n args : tensor\n 4D Tensor or a list of 4D, batch x n, Tensors.\n filter_size : tuple of int\n Filter height and width.\n num_features : int\n Nnumber of features.\n bias_start : float\n Starting value to initialize the bias; 0 by default.\n scope : VariableScope\n For the created subgraph; defaults to \"Linear\".\n\n Returns\n --------\n - A 4D Tensor with shape [batch h w num_features]\n\n Raises\n -------\n - ValueError : if some of the arguments has unspecified or wrong shape.", "id": "f11199:m0"} {"signature": "def advanced_indexing_op(inputs, index):", "body": "batch_size = tf.shape(inputs)[]max_length = tf.shape(inputs)[] dim_size = int(inputs.get_shape()[])index = tf.range(, batch_size) * max_length + (index - )flat = tf.reshape(inputs, [-, dim_size])relevant = tf.gather(flat, index)return relevant", "docstring": "Advanced Indexing for Sequences, returns the outputs by given sequence lengths.\n When return the last output :class:`DynamicRNNLayer` uses it to get the last outputs with the sequence lengths.\n\n Parameters\n -----------\n inputs : tensor for data\n With shape of [batch_size, n_step(max), n_features]\n index : tensor for indexing\n Sequence length in Dynamic RNN. [batch_size]\n\n Examples\n ---------\n >>> import numpy as np\n >>> import tensorflow as tf\n >>> import tensorlayer as tl\n >>> batch_size, max_length, n_features = 3, 5, 2\n >>> z = np.random.uniform(low=-1, high=1, size=[batch_size, max_length, n_features]).astype(np.float32)\n >>> b_z = tf.constant(z)\n >>> sl = tf.placeholder(dtype=tf.int32, shape=[batch_size])\n >>> o = advanced_indexing_op(b_z, sl)\n >>>\n >>> sess = tf.InteractiveSession()\n >>> tl.layers.initialize_global_variables(sess)\n >>>\n >>> order = np.asarray([1,1,2])\n >>> print(\"real\",z[0][order[0]-1], z[1][order[1]-1], z[2][order[2]-1])\n >>> y = sess.run([o], feed_dict={sl:order})\n >>> print(\"given\",order)\n >>> print(\"out\", y)\n real [-0.93021595 0.53820813] [-0.92548317 -0.77135968] [ 0.89952248 0.19149846]\n given [1 1 2]\n out [array([[-0.93021595, 0.53820813],\n [-0.92548317, -0.77135968],\n [ 0.89952248, 0.19149846]], dtype=float32)]\n\n References\n -----------\n - Modified from TFlearn (the original code is used for fixed length rnn), `references `__.", "id": "f11199:m1"} {"signature": "def retrieve_seq_length_op(data):", "body": "with tf.name_scope(''):used = tf.sign(tf.reduce_max(tf.abs(data), ))length = tf.reduce_sum(used, )return tf.cast(length, tf.int32)", "docstring": "An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features],\n it can be used when the features of padding (on right hand side) are all zeros.\n\n Parameters\n -----------\n data : tensor\n [batch_size, n_step(max), n_features] with zero padding on right hand side.\n\n Examples\n ---------\n >>> data = [[[1],[2],[0],[0],[0]],\n ... [[1],[2],[3],[0],[0]],\n ... [[1],[2],[6],[1],[0]]]\n >>> data = np.asarray(data)\n >>> print(data.shape)\n (3, 5, 1)\n >>> data = tf.constant(data)\n >>> sl = retrieve_seq_length_op(data)\n >>> sess = tf.InteractiveSession()\n >>> tl.layers.initialize_global_variables(sess)\n >>> y = sl.eval()\n [2 3 4]\n\n Multiple features\n >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],\n ... [[2,3],[2,4],[3,2],[0,0],[0,0]],\n ... [[3,3],[2,2],[5,3],[1,2],[0,0]]]\n >>> print(sl)\n [4 3 4]\n\n References\n ------------\n Borrow from `TFlearn `__.", "id": "f11199:m2"} {"signature": "def retrieve_seq_length_op2(data):", "body": "return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), )", "docstring": "An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)],\n it can be used when the features of padding (on right hand side) are all zeros.\n\n Parameters\n -----------\n data : tensor\n [batch_size, n_step(max)] with zero padding on right hand side.\n\n Examples\n --------\n >>> data = [[1,2,0,0,0],\n ... [1,2,3,0,0],\n ... [1,2,6,1,0]]\n >>> o = retrieve_seq_length_op2(data)\n >>> sess = tf.InteractiveSession()\n >>> tl.layers.initialize_global_variables(sess)\n >>> print(o.eval())\n [2 3 4]", "id": "f11199:m3"} {"signature": "def retrieve_seq_length_op3(data, pad_val=): ", "body": "data_shape_size = data.get_shape().ndimsif data_shape_size == :return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=), dtype=tf.int32), )elif data_shape_size == :return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), )elif data_shape_size == :raise ValueError(\"\")else:raise ValueError(\"\" % (data_shape_size))", "docstring": "Return tensor for sequence length, if input is ``tf.string``.", "id": "f11199:m4"} {"signature": "def target_mask_op(data, pad_val=): ", "body": "data_shape_size = data.get_shape().ndimsif data_shape_size == :return tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=), dtype=tf.int32)elif data_shape_size == :return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32)elif data_shape_size == :raise ValueError(\"\")else:raise ValueError(\"\" % (data_shape_size))", "docstring": "Return tensor for mask, if input is ``tf.string``.", "id": "f11199:m5"} {"signature": "def __call__(self, inputs, state, scope=None):", "body": "raise NotImplementedError(\"\")", "docstring": "Run this RNN cell on inputs, starting from the given state.", "id": "f11199:c2:m0"} {"signature": "@propertydef state_size(self):", "body": "raise NotImplementedError(\"\")", "docstring": "size(s) of state(s) used by this cell.", "id": "f11199:c2:m1"} {"signature": "@propertydef output_size(self):", "body": "raise NotImplementedError(\"\")", "docstring": "Integer or TensorShape: size of outputs produced by this cell.", "id": "f11199:c2:m2"} {"signature": "def zero_state(self, batch_size, dtype=LayersConfig.tf_dtype):", "body": "shape = self.shapenum_features = self.num_featureszeros = tf.zeros([batch_size, shape[], shape[], num_features * ], dtype=dtype)return zeros", "docstring": "Return zero-filled state tensor(s).\n Args:\n batch_size: int, float, or unit Tensor representing the batch size.\n Returns:\n tensor of shape '[batch_size x shape[0] x shape[1] x num_features]\n filled with zeros", "id": "f11199:c2:m3"} {"signature": "def __init__(self, shape, filter_size, num_features, forget_bias=, input_size=None, state_is_tuple=False,act=tf.nn.tanh):", "body": "if input_size is not None:logging.warn(\"\", self)self.shape = shapeself.filter_size = filter_sizeself.num_features = num_featuresself._forget_bias = forget_biasself._state_is_tuple = state_is_tupleself._activation = act", "docstring": "Initialize the basic Conv LSTM cell.", "id": "f11199:c3:m0"} {"signature": "@propertydef state_size(self):", "body": "return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else * self._num_units)", "docstring": "State size of the LSTMStateTuple.", "id": "f11199:c3:m1"} {"signature": "@propertydef output_size(self):", "body": "return self._num_units", "docstring": "Number of units in outputs.", "id": "f11199:c3:m2"} {"signature": "def __call__(self, inputs, state, scope=None):", "body": "with tf.variable_scope(scope or type(self).__name__): if self._state_is_tuple:c, h = stateelse:c, h = tf.split(state, , )concat = _conv_linear([inputs, h], self.filter_size, self.num_features * , True)i, j, f, o = tf.split(concat, , )new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) * self._activation(j))new_h = self._activation(new_c) * tf.nn.sigmoid(o)if self._state_is_tuple:new_state = LSTMStateTuple(new_c, new_h)else:new_state = tf.concat([new_c, new_h], )return new_h, new_state", "docstring": "Long short-term memory cell (LSTM).", "id": "f11199:c3:m3"} {"signature": "def threading_data(data=None, fn=None, thread_count=None, **kwargs):", "body": "def apply_fn(results, i, data, kwargs):results[i] = fn(data, **kwargs)if thread_count is None:results = [None] * len(data)threads = []for i, d in enumerate(data):t = threading.Thread(name='', target=apply_fn, args=(results, i, d, kwargs))t.start()threads.append(t)else:divs = np.linspace(, len(data), thread_count + )divs = np.round(divs).astype(int)results = [None] * thread_countthreads = []for i in range(thread_count):t = threading.Thread(name='', target=apply_fn, args=(results, i, data[divs[i]:divs[i + ]], kwargs))t.start()threads.append(t)for t in threads:t.join()if thread_count is None:try:return np.asarray(results)except Exception:return resultselse:return np.concatenate(results)", "docstring": "Process a batch of data by given function by threading.\n\n Usually be used for data augmentation.\n\n Parameters\n -----------\n data : numpy.array or others\n The data to be processed.\n thread_count : int\n The number of threads to use.\n fn : function\n The function for data processing.\n more args : the args for `fn`\n Ssee Examples below.\n\n Examples\n --------\n Process images.\n\n >>> images, _, _, _ = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))\n >>> images = tl.prepro.threading_data(images[0:32], tl.prepro.zoom, zoom_range=[0.5, 1])\n\n Customized image preprocessing function.\n\n >>> def distort_img(x):\n >>> x = tl.prepro.flip_axis(x, axis=0, is_random=True)\n >>> x = tl.prepro.flip_axis(x, axis=1, is_random=True)\n >>> x = tl.prepro.crop(x, 100, 100, is_random=True)\n >>> return x\n >>> images = tl.prepro.threading_data(images, distort_img)\n\n Process images and masks together (Usually be used for image segmentation).\n\n >>> X, Y --> [batch_size, row, col, 1]\n >>> data = tl.prepro.threading_data([_ for _ in zip(X, Y)], tl.prepro.zoom_multi, zoom_range=[0.5, 1], is_random=True)\n data --> [batch_size, 2, row, col, 1]\n >>> X_, Y_ = data.transpose((1,0,2,3,4))\n X_, Y_ --> [batch_size, row, col, 1]\n >>> tl.vis.save_image(X_, 'images.png')\n >>> tl.vis.save_image(Y_, 'masks.png')\n\n Process images and masks together by using ``thread_count``.\n\n >>> X, Y --> [batch_size, row, col, 1]\n >>> data = tl.prepro.threading_data(X, tl.prepro.zoom_multi, 8, zoom_range=[0.5, 1], is_random=True)\n data --> [batch_size, 2, row, col, 1]\n >>> X_, Y_ = data.transpose((1,0,2,3,4))\n X_, Y_ --> [batch_size, row, col, 1]\n >>> tl.vis.save_image(X_, 'after.png')\n >>> tl.vis.save_image(Y_, 'before.png')\n\n Customized function for processing images and masks together.\n\n >>> def distort_img(data):\n >>> x, y = data\n >>> x, y = tl.prepro.flip_axis_multi([x, y], axis=0, is_random=True)\n >>> x, y = tl.prepro.flip_axis_multi([x, y], axis=1, is_random=True)\n >>> x, y = tl.prepro.crop_multi([x, y], 100, 100, is_random=True)\n >>> return x, y\n\n >>> X, Y --> [batch_size, row, col, channel]\n >>> data = tl.prepro.threading_data([_ for _ in zip(X, Y)], distort_img)\n >>> X_, Y_ = data.transpose((1,0,2,3,4))\n\n Returns\n -------\n list or numpyarray\n The processed results.\n\n References\n ----------\n - `python queue `__\n - `run with limited queue `__", "id": "f11202:m0"} {"signature": "def affine_rotation_matrix(angle=(-, )):", "body": "if isinstance(angle, tuple):theta = np.pi / * np.random.uniform(angle[], angle[])else:theta = np.pi / * anglerotation_matrix = np.array([[np.cos(theta), np.sin(theta), ],[-np.sin(theta), np.cos(theta), ],[, , ]])return rotation_matrix", "docstring": "Create an affine transform matrix for image rotation.\n NOTE: In OpenCV, x is width and y is height.\n\n Parameters\n -----------\n angle : int/float or tuple of two int/float\n Degree to rotate, usually -180 ~ 180.\n - int/float, a fixed angle.\n - tuple of 2 floats/ints, randomly sample a value as the angle between these 2 values.\n\n Returns\n -------\n numpy.array\n An affine transform matrix.", "id": "f11202:m1"} {"signature": "def affine_horizontal_flip_matrix(prob=):", "body": "factor = np.random.uniform(, )if prob >= factor:filp_matrix = np.array([[ - , , ],[ , , ],[ , , ]])return filp_matrixelse:filp_matrix = np.array([[ , , ],[ , , ],[ , , ]])return filp_matrix", "docstring": "Create an affine transformation matrix for image horizontal flipping.\n NOTE: In OpenCV, x is width and y is height.\n\n Parameters\n ----------\n prob : float\n Probability to flip the image. 1.0 means always flip.\n\n Returns\n -------\n numpy.array\n An affine transform matrix.", "id": "f11202:m2"} {"signature": "def affine_vertical_flip_matrix(prob=):", "body": "factor = np.random.uniform(, )if prob >= factor:filp_matrix = np.array([[ , , ],[ , -, ],[ , , ]])return filp_matrixelse:filp_matrix = np.array([[ , , ],[ , , ],[ , , ]])return filp_matrix", "docstring": "Create an affine transformation for image vertical flipping.\n NOTE: In OpenCV, x is width and y is height.\n\n Parameters\n ----------\n prob : float\n Probability to flip the image. 1.0 means always flip.\n\n Returns\n -------\n numpy.array\n An affine transform matrix.", "id": "f11202:m3"} {"signature": "def affine_shift_matrix(wrg=(-, ), hrg=(-, ), w=, h=):", "body": "if isinstance(wrg, tuple):tx = np.random.uniform(wrg[], wrg[]) * welse:tx = wrg * wif isinstance(hrg, tuple):ty = np.random.uniform(hrg[], hrg[]) * helse:ty = hrg * hshift_matrix = np.array([[, , tx],[, , ty],[, , ]])return shift_matrix", "docstring": "Create an affine transform matrix for image shifting.\n NOTE: In OpenCV, x is width and y is height.\n\n Parameters\n -----------\n wrg : float or tuple of floats\n Range to shift on width axis, -1 ~ 1.\n - float, a fixed distance.\n - tuple of 2 floats, randomly sample a value as the distance between these 2 values.\n hrg : float or tuple of floats\n Range to shift on height axis, -1 ~ 1.\n - float, a fixed distance.\n - tuple of 2 floats, randomly sample a value as the distance between these 2 values.\n w, h : int\n The width and height of the image.\n\n Returns\n -------\n numpy.array\n An affine transform matrix.", "id": "f11202:m4"} {"signature": "def affine_shear_matrix(x_shear=(-, ), y_shear=(-, )):", "body": "if isinstance(x_shear, tuple):x_shear = np.random.uniform(x_shear[], x_shear[])if isinstance(y_shear, tuple):y_shear = np.random.uniform(y_shear[], y_shear[])shear_matrix = np.array([[, x_shear, ],[y_shear, , ],[, , ]])return shear_matrix", "docstring": "Create affine transform matrix for image shearing.\n NOTE: In OpenCV, x is width and y is height.\n\n Parameters\n -----------\n shear : tuple of two floats\n Percentage of shears for width and height directions.\n\n Returns\n -------\n numpy.array\n An affine transform matrix.", "id": "f11202:m5"} {"signature": "def affine_zoom_matrix(zoom_range=(, )):", "body": "if isinstance(zoom_range, (float, int)):scale = zoom_rangeelif isinstance(zoom_range, tuple):scale = np.random.uniform(zoom_range[], zoom_range[])else:raise Exception(\"\")zoom_matrix = np.array([[scale, , ],[, scale, ],[, , ]])return zoom_matrix", "docstring": "Create an affine transform matrix for zooming/scaling an image's height and width.\n OpenCV format, x is width.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n zoom_range : float or tuple of 2 floats\n The zooming/scaling ratio, greater than 1 means larger.\n - float, a fixed ratio.\n - tuple of 2 floats, randomly sample a value as the ratio between these 2 values.\n\n Returns\n -------\n numpy.array\n An affine transform matrix.", "id": "f11202:m6"} {"signature": "def affine_respective_zoom_matrix(w_range=, h_range=):", "body": "if isinstance(h_range, (float, int)):zy = h_rangeelif isinstance(h_range, tuple):zy = np.random.uniform(h_range[], h_range[])else:raise Exception(\"\")if isinstance(w_range, (float, int)):zx = w_rangeelif isinstance(w_range, tuple):zx = np.random.uniform(w_range[], w_range[])else:raise Exception(\"\")zoom_matrix = np.array([[zx, , ],[, zy, ],[, , ]])return zoom_matrix", "docstring": "Get affine transform matrix for zooming/scaling that height and width are changed independently.\n OpenCV format, x is width.\n\n Parameters\n -----------\n w_range : float or tuple of 2 floats\n The zooming/scaling ratio of width, greater than 1 means larger.\n - float, a fixed ratio.\n - tuple of 2 floats, randomly sample a value as the ratio between 2 values.\n h_range : float or tuple of 2 floats\n The zooming/scaling ratio of height, greater than 1 means larger.\n - float, a fixed ratio.\n - tuple of 2 floats, randomly sample a value as the ratio between 2 values.\n\n Returns\n -------\n numpy.array\n An affine transform matrix.", "id": "f11202:m7"} {"signature": "def transform_matrix_offset_center(matrix, y, x):", "body": "o_x = (x - ) / o_y = (y - ) / offset_matrix = np.array([[, , o_x], [, , o_y], [, , ]])reset_matrix = np.array([[, , -o_x], [, , -o_y], [, , ]])transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)return transform_matrix", "docstring": "Convert the matrix from Cartesian coordinates (the origin in the middle of image) to Image coordinates (the origin on the top-left of image).\n\n Parameters\n ----------\n matrix : numpy.array\n Transform matrix.\n x and y : 2 int\n Size of image.\n\n Returns\n -------\n numpy.array\n The transform matrix.\n\n Examples\n --------\n - See ``tl.prepro.rotation``, ``tl.prepro.shear``, ``tl.prepro.zoom``.", "id": "f11202:m8"} {"signature": "def affine_transform(x, transform_matrix, channel_index=, fill_mode='', cval=, order=):", "body": "x = np.rollaxis(x, channel_index, )final_affine_matrix = transform_matrix[:, :]final_offset = transform_matrix[:, ]channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix, final_offset, order=order, mode=fill_mode, cval=cval)for x_channel in x]x = np.stack(channel_images, axis=)x = np.rollaxis(x, , channel_index + )return x", "docstring": "Return transformed images by given an affine matrix in Scipy format (x is height).\n\n Parameters\n ----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n transform_matrix : numpy.array\n Transform matrix (offset center), can be generated by ``transform_matrix_offset_center``\n channel_index : int\n Index of channel, default 2.\n fill_mode : str\n Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__\n cval : float\n Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0\n order : int\n The order of interpolation. The order has to be in the range 0-5:\n - 0 Nearest-neighbor\n - 1 Bi-linear (default)\n - 2 Bi-quadratic\n - 3 Bi-cubic\n - 4 Bi-quartic\n - 5 Bi-quintic\n - `scipy ndimage affine_transform `__\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n --------\n >>> M_shear = tl.prepro.affine_shear_matrix(intensity=0.2, is_random=False)\n >>> M_zoom = tl.prepro.affine_zoom_matrix(zoom_range=0.8)\n >>> M_combined = M_shear.dot(M_zoom)\n >>> transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, h, w)\n >>> result = tl.prepro.affine_transform(image, transform_matrix)", "id": "f11202:m9"} {"signature": "def affine_transform_cv2(x, transform_matrix, flags=None, border_mode=''):", "body": "rows, cols = x.shape[], x.shape[]if flags is None:flags = cv2.INTER_AREAif border_mode is '':border_mode = cv2.BORDER_CONSTANTelif border_mode is '':border_mode = cv2.BORDER_REPLICATEelse:raise Exception(\"\")return cv2.warpAffine(x, transform_matrix[:,:],(cols,rows), flags=flags, borderMode=border_mode)", "docstring": "Return transformed images by given an affine matrix in OpenCV format (x is width). (Powered by OpenCV2, faster than ``tl.prepro.affine_transform``)\n\n Parameters\n ----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n transform_matrix : numpy.array\n A transform matrix, OpenCV format.\n border_mode : str\n - `constant`, pad the image with a constant value (i.e. black or 0)\n - `replicate`, the row or column at the very edge of the original is replicated to the extra border.\n\n Examples\n --------\n >>> M_shear = tl.prepro.affine_shear_matrix(intensity=0.2, is_random=False)\n >>> M_zoom = tl.prepro.affine_zoom_matrix(zoom_range=0.8)\n >>> M_combined = M_shear.dot(M_zoom)\n >>> result = tl.prepro.affine_transform_cv2(image, M_combined)", "id": "f11202:m10"} {"signature": "def affine_transform_keypoints(coords_list, transform_matrix):", "body": "coords_result_list = []for coords in coords_list:coords = np.asarray(coords)coords = coords.transpose([, ])coords = np.insert(coords, , , axis=)coords_result = np.matmul(transform_matrix, coords)coords_result = coords_result[:, :].transpose([, ])coords_result_list.append(coords_result)return coords_result_list", "docstring": "Transform keypoint coordinates according to a given affine transform matrix.\n OpenCV format, x is width.\n\n Note that, for pose estimation task, flipping requires maintaining the left and right body information.\n We should not flip the left and right body, so please use ``tl.prepro.keypoint_random_flip``.\n\n Parameters\n -----------\n coords_list : list of list of tuple/list\n The coordinates\n e.g., the keypoint coordinates of every person in an image.\n transform_matrix : numpy.array\n Transform matrix, OpenCV format.\n\n Examples\n ---------\n >>> # 1. get all affine transform matrices\n >>> M_rotate = tl.prepro.affine_rotation_matrix(angle=20)\n >>> M_flip = tl.prepro.affine_horizontal_flip_matrix(prob=1)\n >>> # 2. combine all affine transform matrices to one matrix\n >>> M_combined = dot(M_flip).dot(M_rotate)\n >>> # 3. transfrom the matrix from Cartesian coordinate (the origin in the middle of image)\n >>> # to Image coordinate (the origin on the top-left of image)\n >>> transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=w, y=h)\n >>> # 4. then we can transfrom the image once for all transformations\n >>> result = tl.prepro.affine_transform_cv2(image, transform_matrix) # 76 times faster\n >>> # 5. transform keypoint coordinates\n >>> coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]]\n >>> coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)", "id": "f11202:m11"} {"signature": "def projective_transform_by_points(x, src, dst, map_args=None, output_shape=None, order=, mode='', cval=, clip=True,preserve_range=False):", "body": "if map_args is None:map_args = {}if isinstance(src, list): src = np.array(src)if isinstance(dst, list):dst = np.array(dst)if np.max(x) > : x = x / m = transform.ProjectiveTransform()m.estimate(dst, src)warped = transform.warp(x, m, map_args=map_args, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip,preserve_range=preserve_range)return warped", "docstring": "Projective transform by given coordinates, usually 4 coordinates.\n\n see `scikit-image `__.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n src : list or numpy\n The original coordinates, usually 4 coordinates of (width, height).\n dst : list or numpy\n The coordinates after transformation, the number of coordinates is the same with src.\n map_args : dictionary or None\n Keyword arguments passed to inverse map.\n output_shape : tuple of 2 int\n Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified.\n order : int\n The order of interpolation. The order has to be in the range 0-5:\n - 0 Nearest-neighbor\n - 1 Bi-linear (default)\n - 2 Bi-quadratic\n - 3 Bi-cubic\n - 4 Bi-quartic\n - 5 Bi-quintic\n mode : str\n One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`.\n Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad.\n cval : float\n Used in conjunction with mode `constant`, the value outside the image boundaries.\n clip : boolean\n Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.\n preserve_range : boolean\n Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n --------\n Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3)\n\n >>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h]\n >>> dst = [[10,10],[0,32],[32,0],[32,32]]\n >>> x = tl.prepro.projective_transform_by_points(X, src, dst)\n\n References\n -----------\n - `scikit-image : geometric transformations `__\n - `scikit-image : examples `__", "id": "f11202:m12"} {"signature": "def rotation(x, rg=, is_random=False, row_index=, col_index=, channel_index=, fill_mode='', cval=, order=):", "body": "if is_random:theta = np.pi / * np.random.uniform(-rg, rg)else:theta = np.pi / * rgrotation_matrix = np.array([[np.cos(theta), -np.sin(theta), ], [np.sin(theta), np.cos(theta), ], [, , ]])h, w = x.shape[row_index], x.shape[col_index]transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)x = affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)return x", "docstring": "Rotate an image randomly or non-randomly.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n rg : int or float\n Degree to rotate, usually 0 ~ 180.\n is_random : boolean\n If True, randomly rotate. Default is False\n row_index col_index and channel_index : int\n Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n fill_mode : str\n Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__\n cval : float\n Value used for points outside the boundaries of the input if mode=`constant`. Default is 0.0\n order : int\n The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform `__\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n ---------\n >>> x --> [row, col, 1]\n >>> x = tl.prepro.rotation(x, rg=40, is_random=False)\n >>> tl.vis.save_image(x, 'im.png')", "id": "f11202:m13"} {"signature": "def rotation_multi(x, rg=, is_random=False, row_index=, col_index=, channel_index=, fill_mode='', cval=, order=):", "body": "if is_random:theta = np.pi / * np.random.uniform(-rg, rg)else:theta = np.pi / * rgrotation_matrix = np.array([[np.cos(theta), -np.sin(theta), ], [np.sin(theta), np.cos(theta), ], [, , ]])h, w = x[].shape[row_index], x[].shape[col_index]transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)results = []for data in x:results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order))return np.asarray(results)", "docstring": "Rotate multiple images with the same arguments, randomly or non-randomly.\n Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n Parameters\n -----------\n x : list of numpy.array\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.rotation``.\n\n Returns\n -------\n numpy.array\n A list of processed images.\n\n Examples\n --------\n >>> x, y --> [row, col, 1] greyscale\n >>> x, y = tl.prepro.rotation_multi([x, y], rg=90, is_random=False)", "id": "f11202:m14"} {"signature": "def crop(x, wrg, hrg, is_random=False, row_index=, col_index=):", "body": "h, w = x.shape[row_index], x.shape[col_index]if (h < hrg) or (w < wrg):raise AssertionError(\"\")if is_random:h_offset = int(np.random.uniform(, h - hrg))w_offset = int(np.random.uniform(, w - wrg))return x[h_offset:hrg + h_offset, w_offset:wrg + w_offset]else: h_offset = int(np.floor((h - hrg) / ))w_offset = int(np.floor((w - wrg) / ))h_end = h_offset + hrgw_end = w_offset + wrgreturn x[h_offset:h_end, w_offset:w_end]", "docstring": "Randomly or centrally crop an image.\n\n Parameters\n ----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n wrg : int\n Size of width.\n hrg : int\n Size of height.\n is_random : boolean,\n If True, randomly crop, else central crop. Default is False.\n row_index: int\n index of row.\n col_index: int\n index of column.\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m15"} {"signature": "def crop_multi(x, wrg, hrg, is_random=False, row_index=, col_index=):", "body": "h, w = x[].shape[row_index], x[].shape[col_index]if (h < hrg) or (w < wrg):raise AssertionError(\"\")if is_random:h_offset = int(np.random.uniform(, h - hrg))w_offset = int(np.random.uniform(, w - wrg))results = []for data in x:results.append(data[h_offset:hrg + h_offset, w_offset:wrg + w_offset])return np.asarray(results)else:h_offset = (h - hrg) / w_offset = (w - wrg) / results = []for data in x:results.append(data[h_offset:h - h_offset, w_offset:w - w_offset])return np.asarray(results)", "docstring": "Randomly or centrally crop multiple images.\n\n Parameters\n ----------\n x : list of numpy.array\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.crop``.\n\n Returns\n -------\n numpy.array\n A list of processed images.", "id": "f11202:m16"} {"signature": "def flip_axis(x, axis=, is_random=False):", "body": "if is_random:factor = np.random.uniform(-, )if factor > :x = np.asarray(x).swapaxes(axis, )x = x[::-, ...]x = x.swapaxes(, axis)return xelse:return xelse:x = np.asarray(x).swapaxes(axis, )x = x[::-, ...]x = x.swapaxes(, axis)return x", "docstring": "Flip the axis of an image, such as flip left and right, up and down, randomly or non-randomly,\n\n Parameters\n ----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n axis : int\n Which axis to flip.\n - 0, flip up and down\n - 1, flip left and right\n - 2, flip channel\n is_random : boolean\n If True, randomly flip. Default is False.\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m17"} {"signature": "def flip_axis_multi(x, axis, is_random=False):", "body": "if is_random:factor = np.random.uniform(-, )if factor > :results = []for data in x:data = np.asarray(data).swapaxes(axis, )data = data[::-, ...]data = data.swapaxes(, axis)results.append(data)return np.asarray(results)else:return np.asarray(x)else:results = []for data in x:data = np.asarray(data).swapaxes(axis, )data = data[::-, ...]data = data.swapaxes(, axis)results.append(data)return np.asarray(results)", "docstring": "Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly,\n\n Parameters\n -----------\n x : list of numpy.array\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.flip_axis``.\n\n Returns\n -------\n numpy.array\n A list of processed images.", "id": "f11202:m18"} {"signature": "def shift(x, wrg=, hrg=, is_random=False, row_index=, col_index=, channel_index=, fill_mode='', cval=,order=):", "body": "h, w = x.shape[row_index], x.shape[col_index]if is_random:tx = np.random.uniform(-hrg, hrg) * hty = np.random.uniform(-wrg, wrg) * welse:tx, ty = hrg * h, wrg * wtranslation_matrix = np.array([[, , tx], [, , ty], [, , ]])transform_matrix = translation_matrix x = affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)return x", "docstring": "Shift an image randomly or non-randomly.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n wrg : float\n Percentage of shift in axis x, usually -0.25 ~ 0.25.\n hrg : float\n Percentage of shift in axis y, usually -0.25 ~ 0.25.\n is_random : boolean\n If True, randomly shift. Default is False.\n row_index col_index and channel_index : int\n Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n fill_mode : str\n Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__\n cval : float\n Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.\n order : int\n The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform `__\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m19"} {"signature": "def shift_multi(x, wrg=, hrg=, is_random=False, row_index=, col_index=, channel_index=, fill_mode='', cval=,order=):", "body": "h, w = x[].shape[row_index], x[].shape[col_index]if is_random:tx = np.random.uniform(-hrg, hrg) * hty = np.random.uniform(-wrg, wrg) * welse:tx, ty = hrg * h, wrg * wtranslation_matrix = np.array([[, , tx], [, , ty], [, , ]])transform_matrix = translation_matrix results = []for data in x:results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order))return np.asarray(results)", "docstring": "Shift images with the same arguments, randomly or non-randomly.\n Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n Parameters\n -----------\n x : list of numpy.array\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.shift``.\n\n Returns\n -------\n numpy.array\n A list of processed images.", "id": "f11202:m20"} {"signature": "def shear(x, intensity=, is_random=False, row_index=, col_index=, channel_index=, fill_mode='', cval=,order=):", "body": "if is_random:shear = np.random.uniform(-intensity, intensity)else:shear = intensityshear_matrix = np.array([[, -np.sin(shear), ], [, np.cos(shear), ], [, , ]])h, w = x.shape[row_index], x.shape[col_index]transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)x = affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)return x", "docstring": "Shear an image randomly or non-randomly.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n intensity : float\n Percentage of shear, usually -0.5 ~ 0.5 (is_random==True), 0 ~ 0.5 (is_random==False),\n you can have a quick try by shear(X, 1).\n is_random : boolean\n If True, randomly shear. Default is False.\n row_index col_index and channel_index : int\n Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n fill_mode : str\n Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see and `scipy ndimage affine_transform `__\n cval : float\n Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.\n order : int\n The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform `__\n\n Returns\n -------\n numpy.array\n A processed image.\n\n References\n -----------\n - `Affine transformation `__", "id": "f11202:m21"} {"signature": "def shear_multi(x, intensity=, is_random=False, row_index=, col_index=, channel_index=, fill_mode='', cval=,order=):", "body": "if is_random:shear = np.random.uniform(-intensity, intensity)else:shear = intensityshear_matrix = np.array([[, -np.sin(shear), ], [, np.cos(shear), ], [, , ]])h, w = x[].shape[row_index], x[].shape[col_index]transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)results = []for data in x:results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order))return np.asarray(results)", "docstring": "Shear images with the same arguments, randomly or non-randomly.\n Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n Parameters\n -----------\n x : list of numpy.array\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.shear``.\n\n Returns\n -------\n numpy.array\n A list of processed images.", "id": "f11202:m22"} {"signature": "def shear2(x, shear=(, ), is_random=False, row_index=, col_index=, channel_index=, fill_mode='', cval=,order=):", "body": "if len(shear) != :raise AssertionError(\"\")if isinstance(shear, tuple):shear = list(shear)if is_random:shear[] = np.random.uniform(-shear[], shear[])shear[] = np.random.uniform(-shear[], shear[])shear_matrix = np.array([[, shear[], ],[shear[], , ],[, , ]])h, w = x.shape[row_index], x.shape[col_index]transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)x = affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)return x", "docstring": "Shear an image randomly or non-randomly.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n shear : tuple of two floats\n Percentage of shear for height and width direction (0, 1).\n is_random : boolean\n If True, randomly shear. Default is False.\n row_index col_index and channel_index : int\n Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n fill_mode : str\n Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__\n cval : float\n Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.\n order : int\n The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform `__\n\n Returns\n -------\n numpy.array\n A processed image.\n\n References\n -----------\n - `Affine transformation `__", "id": "f11202:m23"} {"signature": "def shear_multi2(x, shear=(, ), is_random=False, row_index=, col_index=, channel_index=, fill_mode='', cval=,order=):", "body": "if len(shear) != :raise AssertionError(\"\")if isinstance(shear, tuple):shear = list(shear)if is_random:shear[] = np.random.uniform(-shear[], shear[])shear[] = np.random.uniform(-shear[], shear[])shear_matrix = np.array([[, shear[], ], [shear[], , ], [, , ]])h, w = x[].shape[row_index], x[].shape[col_index]transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)results = []for data in x:results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order))return np.asarray(results)", "docstring": "Shear images with the same arguments, randomly or non-randomly.\n Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n Parameters\n -----------\n x : list of numpy.array\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.shear2``.\n\n Returns\n -------\n numpy.array\n A list of processed images.", "id": "f11202:m24"} {"signature": "def swirl(x, center=None, strength=, radius=, rotation=, output_shape=None, order=, mode='', cval=,clip=True, preserve_range=False, is_random=False):", "body": "if radius == :raise AssertionError(\"\")rotation = np.pi / * rotationif is_random:center_h = int(np.random.uniform(, x.shape[]))center_w = int(np.random.uniform(, x.shape[]))center = (center_h, center_w)strength = np.random.uniform(, strength)radius = np.random.uniform(, radius)rotation = np.random.uniform(-rotation, rotation)max_v = np.max(x)if max_v > : x = x / max_vswirled = skimage.transform.swirl(x, center=center, strength=strength, radius=radius, rotation=rotation, output_shape=output_shape, order=order,mode=mode, cval=cval, clip=clip, preserve_range=preserve_range)if max_v > :swirled = swirled * max_vreturn swirled", "docstring": "Swirl an image randomly or non-randomly, see `scikit-image swirl API `__\n and `example `__.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n center : tuple or 2 int or None\n Center coordinate of transformation (optional).\n strength : float\n The amount of swirling applied.\n radius : float\n The extent of the swirl in pixels. The effect dies out rapidly beyond radius.\n rotation : float\n Additional rotation applied to the image, usually [0, 360], relates to center.\n output_shape : tuple of 2 int or None\n Shape of the output image generated (height, width). By default the shape of the input image is preserved.\n order : int, optional\n The order of the spline interpolation, default is 1. The order has to be in the range 0-5. See skimage.transform.warp for detail.\n mode : str\n One of `constant` (default), `edge`, `symmetric` `reflect` and `wrap`.\n Points outside the boundaries of the input are filled according to the given mode, with `constant` used as the default. Modes match the behaviour of numpy.pad.\n cval : float\n Used in conjunction with mode `constant`, the value outside the image boundaries.\n clip : boolean\n Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.\n preserve_range : boolean\n Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.\n is_random : boolean,\n If True, random swirl. Default is False.\n - random center = [(0 ~ x.shape[0]), (0 ~ x.shape[1])]\n - random strength = [0, strength]\n - random radius = [1e-10, radius]\n - random rotation = [-rotation, rotation]\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n ---------\n >>> x --> [row, col, 1] greyscale\n >>> x = tl.prepro.swirl(x, strength=4, radius=100)", "id": "f11202:m25"} {"signature": "def swirl_multi(x, center=None, strength=, radius=, rotation=, output_shape=None, order=, mode='', cval=,clip=True, preserve_range=False, is_random=False):", "body": "if radius == :raise AssertionError(\"\")rotation = np.pi / * rotationif is_random:center_h = int(np.random.uniform(, x[].shape[]))center_w = int(np.random.uniform(, x[].shape[]))center = (center_h, center_w)strength = np.random.uniform(, strength)radius = np.random.uniform(, radius)rotation = np.random.uniform(-rotation, rotation)results = []for data in x:max_v = np.max(data)if max_v > : data = data / max_vswirled = skimage.transform.swirl(data, center=center, strength=strength, radius=radius, rotation=rotation, output_shape=output_shape,order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range)if max_v > :swirled = swirled * max_vresults.append(swirled)return np.asarray(results)", "docstring": "Swirl multiple images with the same arguments, randomly or non-randomly.\n Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n Parameters\n -----------\n x : list of numpy.array\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.swirl``.\n\n Returns\n -------\n numpy.array\n A list of processed images.", "id": "f11202:m26"} {"signature": "def elastic_transform(x, alpha, sigma, mode=\"\", cval=, is_random=False):", "body": "if is_random is False:random_state = np.random.RandomState(None)else:random_state = np.random.RandomState(int(time.time()))is_3d = Falseif len(x.shape) == and x.shape[-] == :x = x[:, :, ]is_3d = Trueelif len(x.shape) == and x.shape[-] != :raise Exception(\"\")if len(x.shape) != :raise AssertionError(\"\")shape = x.shapedx = gaussian_filter((random_state.rand(*shape) * - ), sigma, mode=mode, cval=cval) * alphady = gaussian_filter((random_state.rand(*shape) * - ), sigma, mode=mode, cval=cval) * alphax_, y_ = np.meshgrid(np.arange(shape[]), np.arange(shape[]), indexing='')indices = np.reshape(x_ + dx, (-, )), np.reshape(y_ + dy, (-, ))if is_3d:return map_coordinates(x, indices, order=).reshape((shape[], shape[], ))else:return map_coordinates(x, indices, order=).reshape(shape)", "docstring": "Elastic transformation for image as described in `[Simard2003] `__.\n\n Parameters\n -----------\n x : numpy.array\n A greyscale image.\n alpha : float\n Alpha value for elastic transformation.\n sigma : float or sequence of float\n The smaller the sigma, the more transformation. Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.\n mode : str\n See `scipy.ndimage.filters.gaussian_filter `__. Default is `constant`.\n cval : float,\n Used in conjunction with `mode` of `constant`, the value outside the image boundaries.\n is_random : boolean\n Default is False.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n ---------\n >>> x = tl.prepro.elastic_transform(x, alpha=x.shape[1]*3, sigma=x.shape[1]*0.07)\n\n References\n ------------\n - `Github `__.\n - `Kaggle `__", "id": "f11202:m27"} {"signature": "def elastic_transform_multi(x, alpha, sigma, mode=\"\", cval=, is_random=False):", "body": "if is_random is False:random_state = np.random.RandomState(None)else:random_state = np.random.RandomState(int(time.time()))shape = x[].shapeif len(shape) == :shape = (shape[], shape[])new_shape = random_state.rand(*shape)results = []for data in x:is_3d = Falseif len(data.shape) == and data.shape[-] == :data = data[:, :, ]is_3d = Trueelif len(data.shape) == and data.shape[-] != :raise Exception(\"\")if len(data.shape) != :raise AssertionError(\"\")dx = gaussian_filter((new_shape * - ), sigma, mode=mode, cval=cval) * alphady = gaussian_filter((new_shape * - ), sigma, mode=mode, cval=cval) * alphax_, y_ = np.meshgrid(np.arange(shape[]), np.arange(shape[]), indexing='')indices = np.reshape(x_ + dx, (-, )), np.reshape(y_ + dy, (-, ))if is_3d:results.append(map_coordinates(data, indices, order=).reshape((shape[], shape[], )))else:results.append(map_coordinates(data, indices, order=).reshape(shape))return np.asarray(results)", "docstring": "Elastic transformation for images as described in `[Simard2003] `__.\n\n Parameters\n -----------\n x : list of numpy.array\n List of greyscale images.\n others : args\n See ``tl.prepro.elastic_transform``.\n\n Returns\n -------\n numpy.array\n A list of processed images.", "id": "f11202:m28"} {"signature": "def zoom(x, zoom_range=(, ), flags=None, border_mode=''):", "body": "zoom_matrix = affine_zoom_matrix(zoom_range=zoom_range)h, w = x.shape[], x.shape[]transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)x = affine_transform_cv2(x, transform_matrix, flags=flags, border_mode=border_mode)return x", "docstring": "Zooming/Scaling a single image that height and width are changed together.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n zoom_range : float or tuple of 2 floats\n The zooming/scaling ratio, greater than 1 means larger.\n - float, a fixed ratio.\n - tuple of 2 floats, randomly sample a value as the ratio between 2 values.\n border_mode : str\n - `constant`, pad the image with a constant value (i.e. black or 0)\n - `replicate`, the row or column at the very edge of the original is replicated to the extra border.\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m29"} {"signature": "def respective_zoom(x, h_range=(, ), w_range=(, ), flags=None, border_mode=''):", "body": "zoom_matrix = affine_respective_zoom_matrix(h_range=h_range, w_range=w_range)h, w = x.shape[], x.shape[]transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)x = affine_transform_cv2(x, transform_matrix, flags=flags, border_mode=border_mode) return x", "docstring": "Zooming/Scaling a single image that height and width are changed independently.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n h_range : float or tuple of 2 floats\n The zooming/scaling ratio of height, greater than 1 means larger.\n - float, a fixed ratio.\n - tuple of 2 floats, randomly sample a value as the ratio between 2 values.\n w_range : float or tuple of 2 floats\n The zooming/scaling ratio of width, greater than 1 means larger.\n - float, a fixed ratio.\n - tuple of 2 floats, randomly sample a value as the ratio between 2 values.\n border_mode : str\n - `constant`, pad the image with a constant value (i.e. black or 0)\n - `replicate`, the row or column at the very edge of the original is replicated to the extra border.\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m30"} {"signature": "def zoom_multi(x, zoom_range=(, ), flags=None, border_mode=''):", "body": "zoom_matrix = affine_zoom_matrix(zoom_range=zoom_range)results = []for img in x:h, w = x.shape[], x.shape[]transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)results.append(affine_transform_cv2(x, transform_matrix, flags=flags, border_mode=border_mode))return result", "docstring": "Zoom in and out of images with the same arguments, randomly or non-randomly.\n Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n Parameters\n -----------\n x : list of numpy.array\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.zoom``.\n\n Returns\n -------\n numpy.array\n A list of processed images.", "id": "f11202:m31"} {"signature": "def brightness(x, gamma=, gain=, is_random=False):", "body": "if is_random:gamma = np.random.uniform( - gamma, + gamma)x = exposure.adjust_gamma(x, gamma, gain)return x", "docstring": "Change the brightness of a single image, randomly or non-randomly.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n gamma : float\n Non negative real number. Default value is 1.\n - Small than 1 means brighter.\n - If `is_random` is True, gamma in a range of (1-gamma, 1+gamma).\n gain : float\n The constant multiplier. Default value is 1.\n is_random : boolean\n If True, randomly change brightness. Default is False.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n References\n -----------\n - `skimage.exposure.adjust_gamma `__\n - `chinese blog `__", "id": "f11202:m32"} {"signature": "def brightness_multi(x, gamma=, gain=, is_random=False):", "body": "if is_random:gamma = np.random.uniform( - gamma, + gamma)results = []for data in x:results.append(exposure.adjust_gamma(data, gamma, gain))return np.asarray(results)", "docstring": "Change the brightness of multiply images, randomly or non-randomly.\n Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n Parameters\n -----------\n x : list of numpyarray\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.brightness``.\n\n Returns\n -------\n numpy.array\n A list of processed images.", "id": "f11202:m33"} {"signature": "def illumination(x, gamma=, contrast=, saturation=, is_random=False):", "body": "if is_random:if not (len(gamma) == len(contrast) == len(saturation) == ):raise AssertionError(\"\")illum_settings = np.random.randint(, ) if illum_settings == : gamma = np.random.uniform(gamma[], ) elif illum_settings == : gamma = np.random.uniform(, gamma[]) else:gamma = im_ = brightness(x, gamma=gamma, gain=, is_random=False)image = PIL.Image.fromarray(im_) contrast_adjust = PIL.ImageEnhance.Contrast(image)image = contrast_adjust.enhance(np.random.uniform(contrast[], contrast[])) saturation_adjust = PIL.ImageEnhance.Color(image)image = saturation_adjust.enhance(np.random.uniform(saturation[], saturation[])) im_ = np.array(image) else:im_ = brightness(x, gamma=gamma, gain=, is_random=False)image = PIL.Image.fromarray(im_) contrast_adjust = PIL.ImageEnhance.Contrast(image)image = contrast_adjust.enhance(contrast)saturation_adjust = PIL.ImageEnhance.Color(image)image = saturation_adjust.enhance(saturation)im_ = np.array(image) return np.asarray(im_)", "docstring": "Perform illumination augmentation for a single image, randomly or non-randomly.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n gamma : float\n Change brightness (the same with ``tl.prepro.brightness``)\n - if is_random=False, one float number, small than one means brighter, greater than one means darker.\n - if is_random=True, tuple of two float numbers, (min, max).\n contrast : float\n Change contrast.\n - if is_random=False, one float number, small than one means blur.\n - if is_random=True, tuple of two float numbers, (min, max).\n saturation : float\n Change saturation.\n - if is_random=False, one float number, small than one means unsaturation.\n - if is_random=True, tuple of two float numbers, (min, max).\n is_random : boolean\n If True, randomly change illumination. Default is False.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n ---------\n Random\n\n >>> x = tl.prepro.illumination(x, gamma=(0.5, 5.0), contrast=(0.3, 1.0), saturation=(0.7, 1.0), is_random=True)\n\n Non-random\n\n >>> x = tl.prepro.illumination(x, 0.5, 0.6, 0.8, is_random=False)", "id": "f11202:m34"} {"signature": "def rgb_to_hsv(rgb):", "body": "rgb = rgb.astype('')hsv = np.zeros_like(rgb)hsv[..., :] = rgb[..., :]r, g, b = rgb[..., ], rgb[..., ], rgb[..., ]maxc = np.max(rgb[..., :], axis=-)minc = np.min(rgb[..., :], axis=-)hsv[..., ] = maxcmask = maxc != minchsv[mask, ] = (maxc - minc)[mask] / maxc[mask]rc = np.zeros_like(r)gc = np.zeros_like(g)bc = np.zeros_like(b)rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]hsv[..., ] = np.select([r == maxc, g == maxc], [bc - gc, + rc - bc], default= + gc - rc)hsv[..., ] = (hsv[..., ] / ) % return hsv", "docstring": "Input RGB image [0~255] return HSV image [0~1].\n\n Parameters\n ------------\n rgb : numpy.array\n An image with values between 0 and 255.\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m35"} {"signature": "def hsv_to_rgb(hsv):", "body": "rgb = np.empty_like(hsv)rgb[..., :] = hsv[..., :]h, s, v = hsv[..., ], hsv[..., ], hsv[..., ]i = (h * ).astype('')f = (h * ) - ip = v * ( - s)q = v * ( - s * f)t = v * ( - s * ( - f))i = i % conditions = [s == , i == , i == , i == , i == , i == ]rgb[..., ] = np.select(conditions, [v, q, p, p, t, v], default=v)rgb[..., ] = np.select(conditions, [v, v, v, q, p, p], default=t)rgb[..., ] = np.select(conditions, [v, p, t, v, v, q], default=p)return rgb.astype('')", "docstring": "Input HSV image [0~1] return RGB image [0~255].\n\n Parameters\n -------------\n hsv : numpy.array\n An image with values between 0.0 and 1.0\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m36"} {"signature": "def adjust_hue(im, hout=, is_offset=True, is_clip=True, is_random=False):", "body": "hsv = rgb_to_hsv(im)if is_random:hout = np.random.uniform(-hout, hout)if is_offset:hsv[..., ] += houtelse:hsv[..., ] = houtif is_clip:hsv[..., ] = np.clip(hsv[..., ], , np.inf) rgb = hsv_to_rgb(hsv)return rgb", "docstring": "Adjust hue of an RGB image.\n\n This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type.\n For TF, see `tf.image.adjust_hue `__.and `tf.image.random_hue `__.\n\n Parameters\n -----------\n im : numpy.array\n An image with values between 0 and 255.\n hout : float\n The scale value for adjusting hue.\n - If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue.\n - If is_offset is True, add this value as the offset to the hue channel.\n is_offset : boolean\n Whether `hout` is added on HSV as offset or not. Default is True.\n is_clip : boolean\n If HSV value smaller than 0, set to 0. Default is True.\n is_random : boolean\n If True, randomly change hue. Default is False.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n ---------\n Random, add a random value between -0.2 and 0.2 as the offset to every hue values.\n\n >>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False)\n\n Non-random, make all hue to green.\n\n >>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False)\n\n References\n -----------\n - `tf.image.random_hue `__.\n - `tf.image.adjust_hue `__.\n - `StackOverflow: Changing image hue with python PIL `__.", "id": "f11202:m37"} {"signature": "def imresize(x, size=None, interp='', mode=None):", "body": "if size is None:size = [, ]if x.shape[-] == :x = scipy.misc.imresize(x[:, :, ], size, interp=interp, mode=mode)return x[:, :, np.newaxis]else:return scipy.misc.imresize(x, size, interp=interp, mode=mode)", "docstring": "Resize an image by given output size and method.\n\n Warning, this function will rescale the value to [0, 255].\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n size : list of 2 int or None\n For height and width.\n interp : str\n Interpolation method for re-sizing (`nearest`, `lanczos`, `bilinear`, `bicubic` (default) or `cubic`).\n mode : str\n The PIL image mode (`P`, `L`, etc.) to convert image before resizing.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n References\n ------------\n - `scipy.misc.imresize `__", "id": "f11202:m38"} {"signature": "def pixel_value_scale(im, val=, clip=None, is_random=False):", "body": "clip = clip if clip is not None else (-np.inf, np.inf)if is_random:scale = + np.random.uniform(-val, val)im = im * scaleelse:im = im * valif len(clip) == :im = np.clip(im, clip[], clip[])else:raise Exception(\"\")return im", "docstring": "Scales each value in the pixels of the image.\n\n Parameters\n -----------\n im : numpy.array\n An image.\n val : float\n The scale value for changing pixel value.\n - If is_random=False, multiply this value with all pixels.\n - If is_random=True, multiply a value between [1-val, 1+val] with all pixels.\n clip : tuple of 2 numbers\n The minimum and maximum value.\n is_random : boolean\n If True, see ``val``.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n ----------\n Random\n\n >>> im = pixel_value_scale(im, 0.1, [0, 255], is_random=True)\n\n Non-random\n\n >>> im = pixel_value_scale(im, 0.9, [0, 255], is_random=False)", "id": "f11202:m39"} {"signature": "def samplewise_norm(x, rescale=None, samplewise_center=False, samplewise_std_normalization=False, channel_index=, epsilon=):", "body": "if rescale:x *= rescaleif x.shape[channel_index] == :if samplewise_center:x = x - np.mean(x)if samplewise_std_normalization:x = x / np.std(x)return xelif x.shape[channel_index] == :if samplewise_center:x = x - np.mean(x, axis=channel_index, keepdims=True)if samplewise_std_normalization:x = x / (np.std(x, axis=channel_index, keepdims=True) + epsilon)return xelse:raise Exception(\"\" % x.shape[channel_index])", "docstring": "Normalize an image by rescale, samplewise centering and samplewise centering in order.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n rescale : float\n Rescaling factor. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation)\n samplewise_center : boolean\n If True, set each sample mean to 0.\n samplewise_std_normalization : boolean\n If True, divide each input by its std.\n epsilon : float\n A small position value for dividing standard deviation.\n\n Returns\n -------\n numpy.array\n A processed image.\n\n Examples\n --------\n >>> x = samplewise_norm(x, samplewise_center=True, samplewise_std_normalization=True)\n >>> print(x.shape, np.mean(x), np.std(x))\n (160, 176, 1), 0.0, 1.0\n\n Notes\n ------\n When samplewise_center and samplewise_std_normalization are True.\n - For greyscale image, every pixels are subtracted and divided by the mean and std of whole image.\n - For RGB image, every pixels are subtracted and divided by the mean and std of this pixel i.e. the mean and std of a pixel is 0 and 1.", "id": "f11202:m40"} {"signature": "def featurewise_norm(x, mean=None, std=None, epsilon=):", "body": "if mean:x = x - meanif std:x = x / (std + epsilon)return x", "docstring": "Normalize every pixels by the same given mean and std, which are usually\n compute from all examples.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n mean : float\n Value for subtraction.\n std : float\n Value for division.\n epsilon : float\n A small position value for dividing standard deviation.\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m41"} {"signature": "def get_zca_whitening_principal_components_img(X):", "body": "flatX = np.reshape(X, (X.shape[], X.shape[] * X.shape[] * X.shape[]))tl.logging.info(\"\")sigma = np.dot(flatX.T, flatX) / flatX.shape[]tl.logging.info(\"\")U, S, _ = linalg.svd(sigma) tl.logging.info(\"\")principal_components = np.dot(np.dot(U, np.diag( / np.sqrt(S + ))), U.T)return principal_components", "docstring": "Return the ZCA whitening principal components matrix.\n\n Parameters\n -----------\n x : numpy.array\n Batch of images with dimension of [n_example, row, col, channel] (default).\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m42"} {"signature": "def zca_whitening(x, principal_components):", "body": "flatx = np.reshape(x, (x.size))whitex = np.dot(flatx, principal_components)x = np.reshape(whitex, (x.shape[], x.shape[], x.shape[]))return x", "docstring": "Apply ZCA whitening on an image by given principal components matrix.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n principal_components : matrix\n Matrix from ``get_zca_whitening_principal_components_img``.\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m43"} {"signature": "def channel_shift(x, intensity, is_random=False, channel_index=):", "body": "if is_random:factor = np.random.uniform(-intensity, intensity)else:factor = intensityx = np.rollaxis(x, channel_index, )min_x, max_x = np.min(x), np.max(x)channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x]x = np.stack(channel_images, axis=)x = np.rollaxis(x, , channel_index + )return x", "docstring": "Shift the channels of an image, randomly or non-randomly, see `numpy.rollaxis `__.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] (default).\n intensity : float\n Intensity of shifting.\n is_random : boolean\n If True, randomly shift. Default is False.\n channel_index : int\n Index of channel. Default is 2.\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m44"} {"signature": "def channel_shift_multi(x, intensity, is_random=False, channel_index=):", "body": "if is_random:factor = np.random.uniform(-intensity, intensity)else:factor = intensityresults = []for data in x:data = np.rollaxis(data, channel_index, )min_x, max_x = np.min(data), np.max(data)channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x]data = np.stack(channel_images, axis=)data = np.rollaxis(x, , channel_index + )results.append(data)return np.asarray(results)", "docstring": "Shift the channels of images with the same arguments, randomly or non-randomly, see `numpy.rollaxis `__.\n Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n Parameters\n -----------\n x : list of numpy.array\n List of images with dimension of [n_images, row, col, channel] (default).\n others : args\n See ``tl.prepro.channel_shift``.\n\n Returns\n -------\n numpy.array\n A list of processed images.", "id": "f11202:m45"} {"signature": "def drop(x, keep=):", "body": "if len(x.shape) == :if x.shape[-] == : img_size = x.shapemask = np.random.binomial(n=, p=keep, size=x.shape[:-])for i in range():x[:, :, i] = np.multiply(x[:, :, i], mask)elif x.shape[-] == : img_size = x.shapex = np.multiply(x, np.random.binomial(n=, p=keep, size=img_size))else:raise Exception(\"\".format(x.shape))elif len(x.shape) == or : img_size = x.shapex = np.multiply(x, np.random.binomial(n=, p=keep, size=img_size))else:raise Exception(\"\".format(x.shape))return x", "docstring": "Randomly set some pixels to zero by a given keeping probability.\n\n Parameters\n -----------\n x : numpy.array\n An image with dimension of [row, col, channel] or [row, col].\n keep : float\n The keeping probability (0, 1), the lower more values will be set to zero.\n\n Returns\n -------\n numpy.array\n A processed image.", "id": "f11202:m46"} {"signature": "def array_to_img(x, dim_ordering=(, , ), scale=True):", "body": "x = x.transpose(dim_ordering)if scale:x += max(-np.min(x), )x_max = np.max(x)if x_max != :x = x / x_maxx *= if x.shape[] == :return PIL.Image.fromarray(x.astype(''), '')elif x.shape[] == :return PIL.Image.fromarray(x[:, :, ].astype(''), '')else:raise Exception('', x.shape[])", "docstring": "Converts a numpy array to PIL image object (uint8 format).\n\n Parameters\n ----------\n x : numpy.array\n An image with dimension of 3 and channels of 1 or 3.\n dim_ordering : tuple of 3 int\n Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n scale : boolean\n If True, converts image to [0, 255] from any range of value like [-1, 2]. Default is True.\n\n Returns\n -------\n PIL.image\n An image.\n\n References\n -----------\n `PIL Image.fromarray `__", "id": "f11202:m47"} {"signature": "def find_contours(x, level=, fully_connected='', positive_orientation=''):", "body": "return skimage.measure.find_contours(x, level, fully_connected=fully_connected, positive_orientation=positive_orientation)", "docstring": "Find iso-valued contours in a 2D array for a given level value, returns list of (n, 2)-ndarrays\n see `skimage.measure.find_contours `__.\n\n Parameters\n ------------\n x : 2D ndarray of double.\n Input data in which to find contours.\n level : float\n Value along which to find contours in the array.\n fully_connected : str\n Either `low` or `high`. Indicates whether array elements below the given level value are to be considered fully-connected (and hence elements above the value will only be face connected), or vice-versa. (See notes below for details.)\n positive_orientation : str\n Either `low` or `high`. Indicates whether the output contours will produce positively-oriented polygons around islands of low- or high-valued elements. If `low` then contours will wind counter-clockwise around elements below the iso-value. Alternately, this means that low-valued elements are always on the left of the contour.\n\n Returns\n --------\n list of (n,2)-ndarrays\n Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour.", "id": "f11202:m48"} {"signature": "def pt2map(list_points=None, size=(, ), val=):", "body": "if list_points is None:raise Exception(\"\")i_m = np.zeros(size)if len(list_points) == :return i_mfor xx in list_points:for x in xx:i_m[int(np.round(x[]))][int(np.round(x[]))] = valreturn i_m", "docstring": "Inputs a list of points, return a 2D image.\n\n Parameters\n --------------\n list_points : list of 2 int\n [[x, y], [x, y]..] for point coordinates.\n size : tuple of 2 int\n (w, h) for output size.\n val : float or int\n For the contour value.\n\n Returns\n -------\n numpy.array\n An image.", "id": "f11202:m49"} {"signature": "def binary_dilation(x, radius=):", "body": "mask = disk(radius)x = _binary_dilation(x, selem=mask)return x", "docstring": "Return fast binary morphological dilation of an image.\n see `skimage.morphology.binary_dilation `__.\n\n Parameters\n -----------\n x : 2D array\n A binary image.\n radius : int\n For the radius of mask.\n\n Returns\n -------\n numpy.array\n A processed binary image.", "id": "f11202:m50"} {"signature": "def dilation(x, radius=):", "body": "mask = disk(radius)x = dilation(x, selem=mask)return x", "docstring": "Return greyscale morphological dilation of an image,\n see `skimage.morphology.dilation `__.\n\n Parameters\n -----------\n x : 2D array\n An greyscale image.\n radius : int\n For the radius of mask.\n\n Returns\n -------\n numpy.array\n A processed greyscale image.", "id": "f11202:m51"} {"signature": "def binary_erosion(x, radius=):", "body": "mask = disk(radius)x = _binary_erosion(x, selem=mask)return x", "docstring": "Return binary morphological erosion of an image,\n see `skimage.morphology.binary_erosion `__.\n\n Parameters\n -----------\n x : 2D array\n A binary image.\n radius : int\n For the radius of mask.\n\n Returns\n -------\n numpy.array\n A processed binary image.", "id": "f11202:m52"} {"signature": "def erosion(x, radius=):", "body": "mask = disk(radius)x = _erosion(x, selem=mask)return x", "docstring": "Return greyscale morphological erosion of an image,\n see `skimage.morphology.erosion `__.\n\n Parameters\n -----------\n x : 2D array\n A greyscale image.\n radius : int\n For the radius of mask.\n\n Returns\n -------\n numpy.array\n A processed greyscale image.", "id": "f11202:m53"} {"signature": "def obj_box_coords_rescale(coords=None, shape=None):", "body": "if coords is None:coords = []if shape is None:shape = [, ]imh, imw = shape[], shape[]imh = imh * imw = imw * coords_new = list()for coord in coords:if len(coord) != :raise AssertionError(\"\")x = coord[] / imwy = coord[] / imhw = coord[] / imwh = coord[] / imhcoords_new.append([x, y, w, h])return coords_new", "docstring": "Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].\n\n Parameters\n ------------\n coords : list of list of 4 ints or None\n For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...].\n shape : list of 2 int or None\n \u3010height, width].\n\n Returns\n -------\n list of list of 4 numbers\n A list of new bounding boxes.\n\n\n Examples\n ---------\n >>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])\n >>> print(coords)\n [[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]\n >>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])\n >>> print(coords)\n [[0.3, 0.8, 0.5, 1.0]]\n >>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])\n >>> print(coords)\n [[0.15, 0.4, 0.25, 0.5]]\n\n Returns\n -------\n list of 4 numbers\n New coordinates.", "id": "f11202:m54"} {"signature": "def obj_box_coord_rescale(coord=None, shape=None):", "body": "if coord is None:coord = []if shape is None:shape = [, ]return obj_box_coords_rescale(coords=[coord], shape=shape)[]", "docstring": "Scale down one coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].\n It is the reverse process of ``obj_box_coord_scale_to_pixelunit``.\n\n Parameters\n ------------\n coords : list of 4 int or None\n One coordinates of one image e.g. [x, y, w, h].\n shape : list of 2 int or None\n For [height, width].\n\n Returns\n -------\n list of 4 numbers\n New bounding box.\n\n Examples\n ---------\n >>> coord = tl.prepro.obj_box_coord_rescale(coord=[30, 40, 50, 50], shape=[100, 100])\n [0.3, 0.4, 0.5, 0.5]", "id": "f11202:m55"} {"signature": "def obj_box_coord_scale_to_pixelunit(coord, shape=None):", "body": "if shape is None:shape = [, ]imh, imw = shape[:]x = int(coord[] * imw)x2 = int(coord[] * imw)y = int(coord[] * imh)y2 = int(coord[] * imh)return [x, y, x2, y2]", "docstring": "Convert one coordinate [x, y, w (or x2), h (or y2)] in ratio format to image coordinate format.\n It is the reverse process of ``obj_box_coord_rescale``.\n\n Parameters\n -----------\n coord : list of 4 float\n One coordinate of one image [x, y, w (or x2), h (or y2)] in ratio format, i.e value range [0~1].\n shape : tuple of 2 or None\n For [height, width].\n\n Returns\n -------\n list of 4 numbers\n New bounding box.\n\n Examples\n ---------\n >>> x, y, x2, y2 = tl.prepro.obj_box_coord_scale_to_pixelunit([0.2, 0.3, 0.5, 0.7], shape=(100, 200, 3))\n [40, 30, 100, 70]", "id": "f11202:m56"} {"signature": "def obj_box_coord_centroid_to_upleft_butright(coord, to_int=False):", "body": "if len(coord) != :raise AssertionError(\"\")x_center, y_center, w, h = coordx = x_center - w / y = y_center - h / x2 = x + wy2 = y + hif to_int:return [int(x), int(y), int(x2), int(y2)]else:return [x, y, x2, y2]", "docstring": "Convert one coordinate [x_center, y_center, w, h] to [x1, y1, x2, y2] in up-left and botton-right format.\n\n Parameters\n ------------\n coord : list of 4 int/float\n One coordinate.\n to_int : boolean\n Whether to convert output as integer.\n\n Returns\n -------\n list of 4 numbers\n New bounding box.\n\n Examples\n ---------\n >>> coord = obj_box_coord_centroid_to_upleft_butright([30, 40, 20, 20])\n [20, 30, 40, 50]", "id": "f11202:m57"} {"signature": "def obj_box_coord_upleft_butright_to_centroid(coord):", "body": "if len(coord) != :raise AssertionError(\"\")x1, y1, x2, y2 = coordw = x2 - x1h = y2 - y1x_c = x1 + w / y_c = y1 + h / return [x_c, y_c, w, h]", "docstring": "Convert one coordinate [x1, y1, x2, y2] to [x_center, y_center, w, h].\n It is the reverse process of ``obj_box_coord_centroid_to_upleft_butright``.\n\n Parameters\n ------------\n coord : list of 4 int/float\n One coordinate.\n\n Returns\n -------\n list of 4 numbers\n New bounding box.", "id": "f11202:m58"} {"signature": "def obj_box_coord_centroid_to_upleft(coord):", "body": "if len(coord) != :raise AssertionError(\"\")x_center, y_center, w, h = coordx = x_center - w / y = y_center - h / return [x, y, w, h]", "docstring": "Convert one coordinate [x_center, y_center, w, h] to [x, y, w, h].\n It is the reverse process of ``obj_box_coord_upleft_to_centroid``.\n\n Parameters\n ------------\n coord : list of 4 int/float\n One coordinate.\n\n Returns\n -------\n list of 4 numbers\n New bounding box.", "id": "f11202:m59"} {"signature": "def obj_box_coord_upleft_to_centroid(coord):", "body": "if len(coord) != :raise AssertionError(\"\")x, y, w, h = coordx_center = x + w / y_center = y + h / return [x_center, y_center, w, h]", "docstring": "Convert one coordinate [x, y, w, h] to [x_center, y_center, w, h].\n It is the reverse process of ``obj_box_coord_centroid_to_upleft``.\n\n Parameters\n ------------\n coord : list of 4 int/float\n One coordinate.\n\n Returns\n -------\n list of 4 numbers\n New bounding box.", "id": "f11202:m60"} {"signature": "def parse_darknet_ann_str_to_list(annotations):", "body": "annotations = annotations.split(\"\")ann = []for a in annotations:a = a.split()if len(a) == :for i, _v in enumerate(a):if i == :a[i] = int(a[i])else:a[i] = float(a[i])ann.append(a)return ann", "docstring": "r\"\"\"Input string format of class, x, y, w, h, return list of list format.\n\n Parameters\n -----------\n annotations : str\n The annotations in darkent format \"class, x, y, w, h ....\" seperated by \"\\\\n\".\n\n Returns\n -------\n list of list of 4 numbers\n List of bounding box.", "id": "f11202:m61"} {"signature": "def parse_darknet_ann_list_to_cls_box(annotations):", "body": "class_list = []bbox_list = []for ann in annotations:class_list.append(ann[])bbox_list.append(ann[:])return class_list, bbox_list", "docstring": "Parse darknet annotation format into two lists for class and bounding box.\n\n Input list of [[class, x, y, w, h], ...], return two list of [class ...] and [[x, y, w, h], ...].\n\n Parameters\n ------------\n annotations : list of list\n A list of class and bounding boxes of images e.g. [[class, x, y, w, h], ...]\n\n Returns\n -------\n list of int\n List of class labels.\n\n list of list of 4 numbers\n List of bounding box.", "id": "f11202:m62"} {"signature": "def obj_box_horizontal_flip(im, coords=None, is_rescale=False, is_center=False, is_random=False):", "body": "if coords is None:coords = []def _flip(im, coords):im = flip_axis(im, axis=, is_random=False)coords_new = list()for coord in coords:if len(coord) != :raise AssertionError(\"\")if is_rescale:if is_center:x = - coord[]else:x = - coord[] - coord[]else:if is_center:x = im.shape[] - coord[]else:x = im.shape[] - coord[] - coord[]coords_new.append([x, coord[], coord[], coord[]])return im, coords_newif is_random:factor = np.random.uniform(-, )if factor > :return _flip(im, coords)else:return im, coordselse:return _flip(im, coords)", "docstring": "Left-right flip the image and coordinates for object detection.\n\n Parameters\n ----------\n im : numpy.array\n An image with dimension of [row, col, channel] (default).\n coords : list of list of 4 int/float or None\n Coordinates [[x, y, w, h], [x, y, w, h], ...].\n is_rescale : boolean\n Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.\n is_center : boolean\n Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.\n is_random : boolean\n If True, randomly flip. Default is False.\n\n Returns\n -------\n numpy.array\n A processed image\n list of list of 4 numbers\n A list of new bounding boxes.\n\n Examples\n --------\n >>> im = np.zeros([80, 100]) # as an image with shape width=100, height=80\n >>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False)\n >>> print(coords)\n [[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]]\n >>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False)\n >>> print(coords)\n [[0.5, 0.4, 0.3, 0.3]]\n >>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False)\n >>> print(coords)\n [[80, 40, 30, 30]]\n >>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False)\n >>> print(coords)\n [[50, 40, 30, 30]]", "id": "f11202:m63"} {"signature": "def obj_box_imresize(im, coords=None, size=None, interp='', mode=None, is_rescale=False):", "body": "if coords is None:coords = []if size is None:size = [, ]imh, imw = im.shape[:]imh = imh * imw = imw * im = imresize(im, size=size, interp=interp, mode=mode)if is_rescale is False:coords_new = list()for coord in coords:if len(coord) != :raise AssertionError(\"\")x = int(coord[] * (size[] / imw))y = int(coord[] * (size[] / imh))w = int(coord[] * (size[] / imw))h = int(coord[] * (size[] / imh))coords_new.append([x, y, w, h])return im, coords_newelse:return im, coords", "docstring": "Resize an image, and compute the new bounding box coordinates.\n\n Parameters\n -------------\n im : numpy.array\n An image with dimension of [row, col, channel] (default).\n coords : list of list of 4 int/float or None\n Coordinates [[x, y, w, h], [x, y, w, h], ...]\n size interp and mode : args\n See ``tl.prepro.imresize``.\n is_rescale : boolean\n Set to True, if the input coordinates are rescaled to [0, 1], then return the original coordinates. Default is False.\n\n Returns\n -------\n numpy.array\n A processed image\n list of list of 4 numbers\n A list of new bounding boxes.\n\n Examples\n --------\n >>> im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80\n >>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False)\n >>> print(coords)\n [[40, 80, 60, 60], [20, 40, 40, 40]]\n >>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False)\n >>> print(coords)\n [[20, 20, 30, 15]]\n >>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False)\n >>> print(coords)\n [[30, 30, 45, 22]]\n >>> im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True)\n >>> print(coords, im2.shape)\n [[0.2, 0.4, 0.3, 0.3]] (160, 200, 3)", "id": "f11202:m64"} {"signature": "def obj_box_crop(im, classes=None, coords=None, wrg=, hrg=, is_rescale=False, is_center=False, is_random=False,thresh_wh=, thresh_wh2=):", "body": "if classes is None:classes = []if coords is None:coords = []h, w = im.shape[], im.shape[]if (h <= hrg) or (w <= wrg):raise AssertionError(\"\")if is_random:h_offset = int(np.random.uniform(, h - hrg) - )w_offset = int(np.random.uniform(, w - wrg) - )h_end = hrg + h_offsetw_end = wrg + w_offsetim_new = im[h_offset:h_end, w_offset:w_end]else: h_offset = int(np.floor((h - hrg) / ))w_offset = int(np.floor((w - wrg) / ))h_end = h_offset + hrgw_end = w_offset + wrgim_new = im[h_offset:h_end, w_offset:w_end]def _get_coord(coord):\"\"\"\"\"\"if is_center:coord = obj_box_coord_centroid_to_upleft(coord)x = coord[] - w_offsety = coord[] - h_offsetw = coord[]h = coord[]if x < :if x + w <= :return Nonew = w + xx = elif x > im_new.shape[]: return Noneif y < :if y + h <= :return Noneh = h + yy = elif y > im_new.shape[]: return Noneif (x is not None) and (x + w > im_new.shape[]): w = im_new.shape[] - xif (y is not None) and (y + h > im_new.shape[]): h = im_new.shape[] - yif (w / (h + ) > thresh_wh2) or (h / (w + ) > thresh_wh2): return Noneif (w / (im_new.shape[] * ) < thresh_wh) or (h / (im_new.shape[] * ) <thresh_wh): return Nonecoord = [x, y, w, h]if is_center:coord = obj_box_coord_upleft_to_centroid(coord)return coordcoords_new = list()classes_new = list()for i, _ in enumerate(coords):coord = coords[i]if len(coord) != :raise AssertionError(\"\")if is_rescale:coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)coord = _get_coord(coord)if coord is not None:coord = obj_box_coord_rescale(coord, im_new.shape)coords_new.append(coord)classes_new.append(classes[i])else:coord = _get_coord(coord)if coord is not None:coords_new.append(coord)classes_new.append(classes[i])return im_new, classes_new, coords_new", "docstring": "Randomly or centrally crop an image, and compute the new bounding box coordinates.\n Objects outside the cropped image will be removed.\n\n Parameters\n -----------\n im : numpy.array\n An image with dimension of [row, col, channel] (default).\n classes : list of int or None\n Class IDs.\n coords : list of list of 4 int/float or None\n Coordinates [[x, y, w, h], [x, y, w, h], ...]\n wrg hrg and is_random : args\n See ``tl.prepro.crop``.\n is_rescale : boolean\n Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.\n is_center : boolean, default False\n Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.\n thresh_wh : float\n Threshold, remove the box if its ratio of width(height) to image size less than the threshold.\n thresh_wh2 : float\n Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.\n\n Returns\n -------\n numpy.array\n A processed image\n list of int\n A list of classes\n list of list of 4 numbers\n A list of new bounding boxes.", "id": "f11202:m65"} {"signature": "def obj_box_shift(im, classes=None, coords=None, wrg=, hrg=, row_index=, col_index=, channel_index=, fill_mode='',cval=, order=, is_rescale=False, is_center=False, is_random=False, thresh_wh=, thresh_wh2=):", "body": "if classes is None:classes = []if coords is None:coords = []imh, imw = im.shape[row_index], im.shape[col_index]if (hrg >= ) and (hrg <= ) and (wrg >= ) and (wrg <= ):raise AssertionError(\"\")if is_random:tx = np.random.uniform(-hrg, hrg) * imhty = np.random.uniform(-wrg, wrg) * imwelse:tx, ty = hrg * imh, wrg * imwtranslation_matrix = np.array([[, , tx], [, , ty], [, , ]])transform_matrix = translation_matrix im_new = affine_transform(im, transform_matrix, channel_index, fill_mode, cval, order)def _get_coord(coord):\"\"\"\"\"\"if is_center:coord = obj_box_coord_centroid_to_upleft(coord)x = coord[] - ty y = coord[] - tx w = coord[]h = coord[]if x < :if x + w <= :return Nonew = w + xx = elif x > im_new.shape[]: return Noneif y < :if y + h <= :return Noneh = h + yy = elif y > im_new.shape[]: return Noneif (x is not None) and (x + w > im_new.shape[]): w = im_new.shape[] - xif (y is not None) and (y + h > im_new.shape[]): h = im_new.shape[] - yif (w / (h + ) > thresh_wh2) or (h / (w + ) > thresh_wh2): return Noneif (w / (im_new.shape[] * ) < thresh_wh) or (h / (im_new.shape[] * ) <thresh_wh): return Nonecoord = [x, y, w, h]if is_center:coord = obj_box_coord_upleft_to_centroid(coord)return coordcoords_new = list()classes_new = list()for i, _ in enumerate(coords):coord = coords[i]if len(coord) != :raise AssertionError(\"\")if is_rescale:coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)coord = _get_coord(coord)if coord is not None:coord = obj_box_coord_rescale(coord, im_new.shape)coords_new.append(coord)classes_new.append(classes[i])else:coord = _get_coord(coord)if coord is not None:coords_new.append(coord)classes_new.append(classes[i])return im_new, classes_new, coords_new", "docstring": "Shift an image randomly or non-randomly, and compute the new bounding box coordinates.\n Objects outside the cropped image will be removed.\n\n Parameters\n -----------\n im : numpy.array\n An image with dimension of [row, col, channel] (default).\n classes : list of int or None\n Class IDs.\n coords : list of list of 4 int/float or None\n Coordinates [[x, y, w, h], [x, y, w, h], ...]\n wrg, hrg row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.shift``.\n is_rescale : boolean\n Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.\n is_center : boolean\n Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.\n thresh_wh : float\n Threshold, remove the box if its ratio of width(height) to image size less than the threshold.\n thresh_wh2 : float\n Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.\n\n\n Returns\n -------\n numpy.array\n A processed image\n list of int\n A list of classes\n list of list of 4 numbers\n A list of new bounding boxes.", "id": "f11202:m66"} {"signature": "def obj_box_zoom(im, classes=None, coords=None, zoom_range=(,), row_index=, col_index=, channel_index=, fill_mode='',cval=, order=, is_rescale=False, is_center=False, is_random=False, thresh_wh=, thresh_wh2=):", "body": "if classes is None:classes = []if coords is None:coords = []if len(zoom_range) != :raise Exception('' '', zoom_range)if is_random:if zoom_range[] == and zoom_range[] == :zx, zy = , tl.logging.info(\"\")else:zx, zy = np.random.uniform(zoom_range[], zoom_range[], )else:zx, zy = zoom_rangezoom_matrix = np.array([[zx, , ], [, zy, ], [, , ]])h, w = im.shape[row_index], im.shape[col_index]transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)im_new = affine_transform(im, transform_matrix, channel_index, fill_mode, cval, order)def _get_coord(coord):\"\"\"\"\"\"if is_center:coord = obj_box_coord_centroid_to_upleft(coord)x = (coord[] - im.shape[] / ) / zy + im.shape[] / y = (coord[] - im.shape[] / ) / zx + im.shape[] / w = coord[] / zy h = coord[] / zx if x < :if x + w <= :return Nonew = w + xx = elif x > im_new.shape[]: return Noneif y < :if y + h <= :return Noneh = h + yy = elif y > im_new.shape[]: return Noneif (x is not None) and (x + w > im_new.shape[]): w = im_new.shape[] - xif (y is not None) and (y + h > im_new.shape[]): h = im_new.shape[] - yif (w / (h + ) > thresh_wh2) or (h / (w + ) > thresh_wh2): return Noneif (w / (im_new.shape[] * ) < thresh_wh) or (h / (im_new.shape[] * ) <thresh_wh): return Nonecoord = [x, y, w, h]if is_center:coord = obj_box_coord_upleft_to_centroid(coord)return coordcoords_new = list()classes_new = list()for i, _ in enumerate(coords):coord = coords[i]if len(coord) != :raise AssertionError(\"\")if is_rescale:coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)coord = _get_coord(coord)if coord is not None:coord = obj_box_coord_rescale(coord, im_new.shape)coords_new.append(coord)classes_new.append(classes[i])else:coord = _get_coord(coord)if coord is not None:coords_new.append(coord)classes_new.append(classes[i])return im_new, classes_new, coords_new", "docstring": "Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates.\n Objects outside the cropped image will be removed.\n\n Parameters\n -----------\n im : numpy.array\n An image with dimension of [row, col, channel] (default).\n classes : list of int or None\n Class IDs.\n coords : list of list of 4 int/float or None\n Coordinates [[x, y, w, h], [x, y, w, h], ...].\n zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``.\n is_rescale : boolean\n Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.\n is_center : boolean\n Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False.\n thresh_wh : float\n Threshold, remove the box if its ratio of width(height) to image size less than the threshold.\n thresh_wh2 : float\n Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.\n\n Returns\n -------\n numpy.array\n A processed image\n list of int\n A list of classes\n list of list of 4 numbers\n A list of new bounding boxes.", "id": "f11202:m67"} {"signature": "def pad_sequences(sequences, maxlen=None, dtype='', padding='', truncating='', value=):", "body": "lengths = [len(s) for s in sequences]nb_samples = len(sequences)if maxlen is None:maxlen = np.max(lengths)sample_shape = tuple()for s in sequences:if len(s) > :sample_shape = np.asarray(s).shape[:]breakx = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)for idx, s in enumerate(sequences):if len(s) == :continue if truncating == '':trunc = s[-maxlen:]elif truncating == '':trunc = s[:maxlen]else:raise ValueError('' % truncating)trunc = np.asarray(trunc, dtype=dtype)if trunc.shape[:] != sample_shape:raise ValueError('' %(trunc.shape[:], idx, sample_shape))if padding == '':x[idx, :len(trunc)] = truncelif padding == '':x[idx, -len(trunc):] = truncelse:raise ValueError('' % padding)return x.tolist()", "docstring": "Pads each sequence to the same length:\n the length of the longest sequence.\n If maxlen is provided, any sequence longer\n than maxlen is truncated to maxlen.\n Truncation happens off either the beginning (default) or\n the end of the sequence.\n Supports post-padding and pre-padding (default).\n\n Parameters\n ----------\n sequences : list of list of int\n All sequences where each row is a sequence.\n maxlen : int\n Maximum length.\n dtype : numpy.dtype or str\n Data type to cast the resulting sequence.\n padding : str\n Either 'pre' or 'post', pad either before or after each sequence.\n truncating : str\n Either 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence\n value : float\n Value to pad the sequences to the desired value.\n\n Returns\n ----------\n x : numpy.array\n With dimensions (number_of_sequences, maxlen)\n\n Examples\n ----------\n >>> sequences = [[1,1,1,1,1],[2,2,2],[3,3]]\n >>> sequences = pad_sequences(sequences, maxlen=None, dtype='int32',\n ... padding='post', truncating='pre', value=0.)\n [[1 1 1 1 1]\n [2 2 2 0 0]\n [3 3 0 0 0]]", "id": "f11202:m68"} {"signature": "def remove_pad_sequences(sequences, pad_id=):", "body": "sequences_out = copy.deepcopy(sequences)for i, _ in enumerate(sequences):for j in range(, len(sequences[i])):if sequences[i][-j] != pad_id:sequences_out[i] = sequences_out[i][:-j + ]breakreturn sequences_out", "docstring": "Remove padding.\n\n Parameters\n -----------\n sequences : list of list of int\n All sequences where each row is a sequence.\n pad_id : int\n The pad ID.\n\n Returns\n ----------\n list of list of int\n The processed sequences.\n\n Examples\n ----------\n >>> sequences = [[2,3,4,0,0], [5,1,2,3,4,0,0,0], [4,5,0,2,4,0,0,0]]\n >>> print(remove_pad_sequences(sequences, pad_id=0))\n [[2, 3, 4], [5, 1, 2, 3, 4], [4, 5, 0, 2, 4]]", "id": "f11202:m69"} {"signature": "def process_sequences(sequences, end_id=, pad_val=, is_shorten=True, remain_end_id=False):", "body": "max_length = for _, seq in enumerate(sequences):is_end = Falsefor i_w, n in enumerate(seq):if n == end_id and is_end == False: is_end = Trueif max_length < i_w:max_length = i_wif remain_end_id is False:seq[i_w] = pad_val elif is_end ==True:seq[i_w] = pad_valif remain_end_id is True:max_length += if is_shorten:for i, seq in enumerate(sequences):sequences[i] = seq[:max_length]return sequences", "docstring": "Set all tokens(ids) after END token to the padding value, and then shorten (option) it to the maximum sequence length in this batch.\n\n Parameters\n -----------\n sequences : list of list of int\n All sequences where each row is a sequence.\n end_id : int\n The special token for END.\n pad_val : int\n Replace the `end_id` and the IDs after `end_id` to this value.\n is_shorten : boolean\n Shorten the sequences. Default is True.\n remain_end_id : boolean\n Keep an `end_id` in the end. Default is False.\n\n Returns\n ----------\n list of list of int\n The processed sequences.\n\n Examples\n ---------\n >>> sentences_ids = [[4, 3, 5, 3, 2, 2, 2, 2], <-- end_id is 2\n ... [5, 3, 9, 4, 9, 2, 2, 3]] <-- end_id is 2\n >>> sentences_ids = precess_sequences(sentences_ids, end_id=vocab.end_id, pad_val=0, is_shorten=True)\n [[4, 3, 5, 3, 0], [5, 3, 9, 4, 9]]", "id": "f11202:m70"} {"signature": "def sequences_add_start_id(sequences, start_id=, remove_last=False):", "body": "sequences_out = [[] for _ in range(len(sequences))] for i, _ in enumerate(sequences):if remove_last:sequences_out[i] = [start_id] + sequences[i][:-]else:sequences_out[i] = [start_id] + sequences[i]return sequences_out", "docstring": "Add special start token(id) in the beginning of each sequence.\n\n Parameters\n ------------\n sequences : list of list of int\n All sequences where each row is a sequence.\n start_id : int\n The start ID.\n remove_last : boolean\n Remove the last value of each sequences. Usually be used for removing the end ID.\n\n Returns\n ----------\n list of list of int\n The processed sequences.\n\n Examples\n ---------\n >>> sentences_ids = [[4,3,5,3,2,2,2,2], [5,3,9,4,9,2,2,3]]\n >>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2)\n [[2, 4, 3, 5, 3, 2, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2, 3]]\n >>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2, remove_last=True)\n [[2, 4, 3, 5, 3, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2]]\n\n For Seq2seq\n\n >>> input = [a, b, c]\n >>> target = [x, y, z]\n >>> decode_seq = [start_id, a, b] <-- sequences_add_start_id(input, start_id, True)", "id": "f11202:m71"} {"signature": "def sequences_add_end_id(sequences, end_id=):", "body": "sequences_out = [[] for _ in range(len(sequences))] for i, _ in enumerate(sequences):sequences_out[i] = sequences[i] + [end_id]return sequences_out", "docstring": "Add special end token(id) in the end of each sequence.\n\n Parameters\n -----------\n sequences : list of list of int\n All sequences where each row is a sequence.\n end_id : int\n The end ID.\n\n Returns\n ----------\n list of list of int\n The processed sequences.\n\n Examples\n ---------\n >>> sequences = [[1,2,3],[4,5,6,7]]\n >>> print(sequences_add_end_id(sequences, end_id=999))\n [[1, 2, 3, 999], [4, 5, 6, 999]]", "id": "f11202:m72"} {"signature": "def sequences_add_end_id_after_pad(sequences, end_id=, pad_id=):", "body": "sequences_out = copy.deepcopy(sequences)for i, v in enumerate(sequences):for j, _v2 in enumerate(v):if sequences[i][j] == pad_id:sequences_out[i][j] = end_idbreakreturn sequences_out", "docstring": "Add special end token(id) in the end of each sequence.\n\n Parameters\n -----------\n sequences : list of list of int\n All sequences where each row is a sequence.\n end_id : int\n The end ID.\n pad_id : int\n The pad ID.\n\n Returns\n ----------\n list of list of int\n The processed sequences.\n\n Examples\n ---------\n >>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]]\n >>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0))\n [[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]]", "id": "f11202:m73"} {"signature": "def sequences_get_mask(sequences, pad_val=):", "body": "mask = np.ones_like(sequences)for i, seq in enumerate(sequences):for i_w in reversed(range(len(seq))):if seq[i_w] == pad_val:mask[i, i_w] = else:break return mask", "docstring": "Return mask for sequences.\n\n Parameters\n -----------\n sequences : list of list of int\n All sequences where each row is a sequence.\n pad_val : int\n The pad value.\n\n Returns\n ----------\n list of list of int\n The mask.\n\n Examples\n ---------\n >>> sentences_ids = [[4, 0, 5, 3, 0, 0],\n ... [5, 3, 9, 4, 9, 0]]\n >>> mask = sequences_get_mask(sentences_ids, pad_val=0)\n [[1 1 1 1 0 0]\n [1 1 1 1 1 0]]", "id": "f11202:m74"} {"signature": "def keypoint_random_crop(image, annos, mask=None, size=(, )):", "body": "_target_height = size[]_target_width = size[]target_size = (_target_width, _target_height)if len(np.shape(image)) == :image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)height, width, _ = np.shape(image)for _ in range():x = random.randrange(, width - target_size[]) if width > target_size[] else y = random.randrange(, height - target_size[]) if height > target_size[] else for joint in annos:if x <= joint[][] < x + target_size[] and y <= joint[][] < y + target_size[]:breakdef pose_crop(image, annos, mask, x, y, w, h): target_size = (w, h)img = imageresized = img[y:y + target_size[], x:x + target_size[], :]resized_mask = mask[y:y + target_size[], x:x + target_size[]]adjust_joint_list = []for joint in annos:adjust_joint = []for point in joint:if point[] < - or point[] < -:adjust_joint.append((-, -))continuenew_x, new_y = point[] - x, point[] - yif new_x > w - or new_y > h - :adjust_joint.append((-, -))continueadjust_joint.append((new_x, new_y))adjust_joint_list.append(adjust_joint)return resized, adjust_joint_list, resized_maskreturn pose_crop(image, annos, mask, x, y, target_size[], target_size[])", "docstring": "Randomly crop an image and corresponding keypoints without influence scales, given by ``keypoint_random_resize_shortestedge``.\n\n Parameters\n -----------\n image : 3 channel image\n The given image for augmentation.\n annos : list of list of floats\n The keypoints annotation of people.\n mask : single channel image or None\n The mask if available.\n size : tuple of int\n The size of returned image.\n\n Returns\n ----------\n preprocessed image, annotation, mask", "id": "f11202:m75"} {"signature": "def keypoint_resize_random_crop(image, annos, mask=None, size=(, )):", "body": "if len(np.shape(image)) == :image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)def resize_image(image, annos, mask, target_width, target_height):\"\"\"\"\"\"y, x, _ = np.shape(image)ratio_y = target_height / yratio_x = target_width / xnew_joints = []for people in annos:new_keypoints = []for keypoints in people:if keypoints[] < or keypoints[] < :new_keypoints.append((-, -))continuepts = (int(keypoints[] * ratio_x + ), int(keypoints[] * ratio_y + ))if pts[] > target_width - or pts[] > target_height - :new_keypoints.append((-, -))continuenew_keypoints.append(pts)new_joints.append(new_keypoints)annos = new_jointsnew_image = cv2.resize(image, (target_width, target_height), interpolation=cv2.INTER_AREA)if mask is not None:new_mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_AREA)return new_image, annos, new_maskelse:return new_image, annos, None_target_height = size[]_target_width = size[]if len(np.shape(image)) == :image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)height, width, _ = np.shape(image)if height <= width:ratio = _target_height / heightnew_width = int(ratio * width)if height == width:new_width = _target_heightimage, annos, mask = resize_image(image, annos, mask, new_width, _target_height)if new_width > _target_width:crop_range_x = np.random.randint(, new_width - _target_width)else:crop_range_x = image = image[:, crop_range_x:crop_range_x + _target_width, :]if mask is not None:mask = mask[:, crop_range_x:crop_range_x + _target_width]new_joints = []for people in annos:new_keypoints = []for keypoints in people:if keypoints[] < - or keypoints[] < -:new_keypoints.append((-, -))continuetop = crop_range_x + _target_width - if keypoints[] >= crop_range_x and keypoints[] <= top:pts = (int(keypoints[] - crop_range_x), int(keypoints[]))else:pts = (-, -)new_keypoints.append(pts)new_joints.append(new_keypoints)annos = new_jointsif height > width:ratio = _target_width / widthnew_height = int(ratio * height)image, annos, mask = resize_image(image, annos, mask, _target_width, new_height)if new_height > _target_height:crop_range_y = np.random.randint(, new_height - _target_height)else:crop_range_y = image = image[crop_range_y:crop_range_y + _target_width, :, :]if mask is not None:mask = mask[crop_range_y:crop_range_y + _target_width, :]new_joints = []for people in annos: new_keypoints = []for keypoints in people:if keypoints[] < or keypoints[] < :new_keypoints.append((-, -))continuebot = crop_range_y + _target_height - if keypoints[] >= crop_range_y and keypoints[] <= bot:pts = (int(keypoints[]), int(keypoints[] - crop_range_y))else:pts = (-, -)new_keypoints.append(pts)new_joints.append(new_keypoints)annos = new_jointsif mask is not None:return image, annos, maskelse:return image, annos, None", "docstring": "Reszie the image to make either its width or height equals to the given sizes.\n Then randomly crop image without influence scales.\n Resize the image match with the minimum size before cropping, this API will change the zoom scale of object.\n\n Parameters\n -----------\n image : 3 channel image\n The given image for augmentation.\n annos : list of list of floats\n The keypoints annotation of people.\n mask : single channel image or None\n The mask if available.\n size : tuple of int\n The size (height, width) of returned image.\n\n Returns\n ----------\n preprocessed image, annos, mask", "id": "f11202:m76"} {"signature": "def keypoint_random_rotate(image, annos, mask=None, rg=):", "body": "def _rotate_coord(shape, newxy, point, angle):angle = - * angle / * math.piox, oy = shapepx, py = pointox /= oy /= qx = math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)qy = math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)new_x, new_y = newxyqx += ox - new_xqy += oy - new_yreturn int(qx + ), int(qy + )def _largest_rotated_rect(w, h, angle):\"\"\"\"\"\"angle = angle / * math.piif w <= or h <= :return , width_is_longer = w >= hside_long, side_short = (w, h) if width_is_longer else (h, w)sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))if side_short <= * sin_a * cos_a * side_long:x = * side_shortwr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)else:cos_2a = cos_a * cos_a - sin_a * sin_awr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2areturn int(np.round(wr)), int(np.round(hr))img_shape = np.shape(image)height = img_shape[]width = img_shape[]deg = np.random.uniform(-rg, rg)img = imagecenter = (img.shape[] * , img.shape[] * ) rot_m = cv2.getRotationMatrix2D((int(center[]), int(center[])), deg, )ret = cv2.warpAffine(img, rot_m, img.shape[::-], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)if img.ndim == and ret.ndim == :ret = ret[:, :, np.newaxis]neww, newh = _largest_rotated_rect(ret.shape[], ret.shape[], deg)neww = min(neww, ret.shape[])newh = min(newh, ret.shape[])newx = int(center[] - neww * )newy = int(center[] - newh * )img = ret[newy:newy + newh, newx:newx + neww]adjust_joint_list = []for joint in annos: adjust_joint = []for point in joint:if point[] < - or point[] < -:adjust_joint.append((-, -))continuex, y = _rotate_coord((width, height), (newx, newy), point, deg)if x > neww - or y > newh - :adjust_joint.append((-, -))continueif x < or y < :adjust_joint.append((-, -))continueadjust_joint.append((x, y))adjust_joint_list.append(adjust_joint)joint_list = adjust_joint_listif mask is not None:msk = maskcenter = (msk.shape[] * , msk.shape[] * ) rot_m = cv2.getRotationMatrix2D((int(center[]), int(center[])), deg, )ret = cv2.warpAffine(msk, rot_m, msk.shape[::-], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)if msk.ndim == and msk.ndim == :ret = ret[:, :, np.newaxis]neww, newh = _largest_rotated_rect(ret.shape[], ret.shape[], deg)neww = min(neww, ret.shape[])newh = min(newh, ret.shape[])newx = int(center[] - neww * )newy = int(center[] - newh * )msk = ret[newy:newy + newh, newx:newx + neww]return img, joint_list, mskelse:return img, joint_list, None", "docstring": "Rotate an image and corresponding keypoints.\n\n Parameters\n -----------\n image : 3 channel image\n The given image for augmentation.\n annos : list of list of floats\n The keypoints annotation of people.\n mask : single channel image or None\n The mask if available.\n rg : int or float\n Degree to rotate, usually 0 ~ 180.\n\n Returns\n ----------\n preprocessed image, annos, mask", "id": "f11202:m77"} {"signature": "def keypoint_random_flip(image, annos, mask=None, prob=, flip_list=(, , , , , , , , , , , , , , , , , , )):", "body": "_prob = np.random.uniform(, )if _prob < prob:return image, annos, mask_, width, _ = np.shape(image)image = cv2.flip(image, )mask = cv2.flip(mask, )new_joints = []for people in annos: new_keypoints = []for k in flip_list:point = people[k]if point[] < or point[] < :new_keypoints.append((-, -))continueif point[] > image.shape[] - or point[] > image.shape[] - :new_keypoints.append((-, -))continueif (width - point[]) > image.shape[] - :new_keypoints.append((-, -))continuenew_keypoints.append((width - point[], point[]))new_joints.append(new_keypoints)annos = new_jointsreturn image, annos, mask", "docstring": "Flip an image and corresponding keypoints.\n\n Parameters\n -----------\n image : 3 channel image\n The given image for augmentation.\n annos : list of list of floats\n The keypoints annotation of people.\n mask : single channel image or None\n The mask if available.\n prob : float, 0 to 1\n The probability to flip the image, if 1, always flip the image.\n flip_list : tuple of int\n Denotes how the keypoints number be changed after flipping which is required for pose estimation task.\n The left and right body should be maintained rather than switch.\n (Default COCO format).\n Set to an empty tuple if you don't need to maintain left and right information.\n\n Returns\n ----------\n preprocessed image, annos, mask", "id": "f11202:m78"} {"signature": "def keypoint_random_resize(image, annos, mask=None, zoom_range=(, )):", "body": "height = image.shape[]width = image.shape[]_min, _max = zoom_rangescalew = np.random.uniform(_min, _max)scaleh = np.random.uniform(_min, _max)neww = int(width * scalew)newh = int(height * scaleh)dst = cv2.resize(image, (neww, newh), interpolation=cv2.INTER_AREA)if mask is not None:mask = cv2.resize(mask, (neww, newh), interpolation=cv2.INTER_AREA)adjust_joint_list = []for joint in annos: adjust_joint = []for point in joint:if point[] < - or point[] < -:adjust_joint.append((-, -))continueadjust_joint.append((int(point[] * scalew + ), int(point[] * scaleh + )))adjust_joint_list.append(adjust_joint)if mask is not None:return dst, adjust_joint_list, maskelse:return dst, adjust_joint_list, None", "docstring": "Randomly resize an image and corresponding keypoints.\n The height and width of image will be changed independently, so the scale will be changed.\n\n Parameters\n -----------\n image : 3 channel image\n The given image for augmentation.\n annos : list of list of floats\n The keypoints annotation of people.\n mask : single channel image or None\n The mask if available.\n zoom_range : tuple of two floats\n The minimum and maximum factor to zoom in or out, e.g (0.5, 1) means zoom out 1~2 times.\n\n Returns\n ----------\n preprocessed image, annos, mask", "id": "f11202:m79"} {"signature": "def __str__(self):", "body": "return self._s", "docstring": "Print information of databset.", "id": "f11203:c0:m1"} {"signature": "def _fill_project_info(self, args):", "body": "return args.update({'': self.project_name})", "docstring": "Fill in project_name for all studies, architectures and parameters.", "id": "f11203:c0:m2"} {"signature": "@staticmethoddef _serialization(ps):", "body": "return pickle.dumps(ps, protocol=pickle.HIGHEST_PROTOCOL)", "docstring": "Serialize data.", "id": "f11203:c0:m3"} {"signature": "@staticmethoddef _deserialization(ps):", "body": "return pickle.loads(ps)", "docstring": "Deseralize data.", "id": "f11203:c0:m4"} {"signature": "def save_model(self, network=None, model_name='', **kwargs):", "body": "kwargs.update({'': model_name})self._fill_project_info(kwargs) params = network.get_all_params()s = time.time()kwargs.update({'': network.all_graphs, '': datetime.utcnow()})try:params_id = self.model_fs.put(self._serialization(params))kwargs.update({'': params_id, '': datetime.utcnow()})self.db.Model.insert_one(kwargs)print(\"\".format(round(time.time() - s, )))return Trueexcept Exception as e:exc_type, exc_obj, exc_tb = sys.exc_info()fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[]logging.info(\"\".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))print(\"\")return False", "docstring": "Save model architecture and parameters into database, timestamp will be added automatically.\n\n Parameters\n ----------\n network : TensorLayer layer\n TensorLayer layer instance.\n model_name : str\n The name/key of model.\n kwargs : other events\n Other events, such as name, accuracy, loss, step number and etc (optinal).\n\n Examples\n ---------\n Save model architecture and parameters into database.\n >>> db.save_model(net, accuracy=0.8, loss=2.3, name='second_model')\n\n Load one model with parameters from database (run this in other script)\n >>> net = db.find_top_model(sess=sess, accuracy=0.8, loss=2.3)\n\n Find and load the latest model.\n >>> net = db.find_top_model(sess=sess, sort=[(\"time\", pymongo.DESCENDING)])\n >>> net = db.find_top_model(sess=sess, sort=[(\"time\", -1)])\n\n Find and load the oldest model.\n >>> net = db.find_top_model(sess=sess, sort=[(\"time\", pymongo.ASCENDING)])\n >>> net = db.find_top_model(sess=sess, sort=[(\"time\", 1)])\n\n Get model information\n >>> net._accuracy\n ... 0.8\n\n Returns\n ---------\n boolean : True for success, False for fail.", "id": "f11203:c0:m5"} {"signature": "def find_top_model(self, sess, sort=None, model_name='', **kwargs):", "body": "kwargs.update({'': model_name})self._fill_project_info(kwargs)s = time.time()d = self.db.Model.find_one(filter=kwargs, sort=sort)_temp_file_name = ''if d is not None:params_id = d['']graphs = d['']_datetime = d['']exists_or_mkdir(_temp_file_name, False)with open(os.path.join(_temp_file_name, ''), '') as file:pickle.dump(graphs, file, protocol=pickle.HIGHEST_PROTOCOL)else:print(\"\".format(kwargs))return Falsetry:params = self._deserialization(self.model_fs.get(params_id).read())np.savez(os.path.join(_temp_file_name, ''), params=params)network = load_graph_and_params(name=_temp_file_name, sess=sess)del_folder(_temp_file_name)pc = self.db.Model.find(kwargs)print(\"\".format(kwargs, sort, _datetime, round(time.time() - s, )))for key in d:network.__dict__.update({\"\" % key: d[key]})params_id_list = pc.distinct('')n_params = len(params_id_list)if n_params != :print(\"\".format(n_params))return networkexcept Exception as e:exc_type, exc_obj, exc_tb = sys.exc_info()fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[]logging.info(\"\".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))return False", "docstring": "Finds and returns a model architecture and its parameters from the database which matches the requirement.\n\n Parameters\n ----------\n sess : Session\n TensorFlow session.\n sort : List of tuple\n PyMongo sort comment, search \"PyMongo find one sorting\" and `collection level operations `__ for more details.\n model_name : str or None\n The name/key of model.\n kwargs : other events\n Other events, such as name, accuracy, loss, step number and etc (optinal).\n\n Examples\n ---------\n - see ``save_model``.\n\n Returns\n ---------\n network : TensorLayer layer\n Note that, the returned network contains all information of the document (record), e.g. if you saved accuracy in the document, you can get the accuracy by using ``net._accuracy``.", "id": "f11203:c0:m6"} {"signature": "def delete_model(self, **kwargs):", "body": "self._fill_project_info(kwargs)self.db.Model.delete_many(kwargs)logging.info(\"\")", "docstring": "Delete model.\n\n Parameters\n -----------\n kwargs : logging information\n Find items to delete, leave it empty to delete all log.", "id": "f11203:c0:m7"} {"signature": "def save_dataset(self, dataset=None, dataset_name=None, **kwargs):", "body": "self._fill_project_info(kwargs)if dataset_name is None:raise Exception(\"\")kwargs.update({'': dataset_name})s = time.time()try:dataset_id = self.dataset_fs.put(self._serialization(dataset))kwargs.update({'': dataset_id, '': datetime.utcnow()})self.db.Dataset.insert_one(kwargs)print(\"\".format(round(time.time() - s, )))return Trueexcept Exception as e:exc_type, exc_obj, exc_tb = sys.exc_info()fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[]logging.info(\"\".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))print(\"\")return False", "docstring": "Saves one dataset into database, timestamp will be added automatically.\n\n Parameters\n ----------\n dataset : any type\n The dataset you want to store.\n dataset_name : str\n The name of dataset.\n kwargs : other events\n Other events, such as description, author and etc (optinal).\n\n Examples\n ----------\n Save dataset\n >>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')\n\n Get dataset\n >>> dataset = db.find_top_dataset('mnist')\n\n Returns\n ---------\n boolean : Return True if save success, otherwise, return False.", "id": "f11203:c0:m8"} {"signature": "def find_top_dataset(self, dataset_name=None, sort=None, **kwargs):", "body": "self._fill_project_info(kwargs)if dataset_name is None:raise Exception(\"\")kwargs.update({'': dataset_name})s = time.time()d = self.db.Dataset.find_one(filter=kwargs, sort=sort)if d is not None:dataset_id = d['']else:print(\"\".format(kwargs))return Falsetry:dataset = self._deserialization(self.dataset_fs.get(dataset_id).read())pc = self.db.Dataset.find(kwargs)print(\"\".format(kwargs, round(time.time() - s, )))dataset_id_list = pc.distinct('')n_dataset = len(dataset_id_list)if n_dataset != :print(\"\".format(n_dataset))return datasetexcept Exception as e:exc_type, exc_obj, exc_tb = sys.exc_info()fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[]logging.info(\"\".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))return False", "docstring": "Finds and returns a dataset from the database which matches the requirement.\n\n Parameters\n ----------\n dataset_name : str\n The name of dataset.\n sort : List of tuple\n PyMongo sort comment, search \"PyMongo find one sorting\" and `collection level operations `__ for more details.\n kwargs : other events\n Other events, such as description, author and etc (optinal).\n\n Examples\n ---------\n Save dataset\n >>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')\n\n Get dataset\n >>> dataset = db.find_top_dataset('mnist')\n >>> datasets = db.find_datasets('mnist')\n\n Returns\n --------\n dataset : the dataset or False\n Return False if nothing found.", "id": "f11203:c0:m9"} {"signature": "def find_datasets(self, dataset_name=None, **kwargs):", "body": "self._fill_project_info(kwargs)if dataset_name is None:raise Exception(\"\")kwargs.update({'': dataset_name})s = time.time()pc = self.db.Dataset.find(kwargs)if pc is not None:dataset_id_list = pc.distinct('')dataset_list = []for dataset_id in dataset_id_list: tmp = self.dataset_fs.get(dataset_id).read()dataset_list.append(self._deserialization(tmp))else:print(\"\".format(kwargs))return Falseprint(\"\".format(len(dataset_list), round(time.time() - s, )))return dataset_list", "docstring": "Finds and returns all datasets from the database which matches the requirement.\n In some case, the data in a dataset can be stored separately for better management.\n\n Parameters\n ----------\n dataset_name : str\n The name/key of dataset.\n kwargs : other events\n Other events, such as description, author and etc (optional).\n\n Returns\n --------\n params : the parameters, return False if nothing found.", "id": "f11203:c0:m10"} {"signature": "def delete_datasets(self, **kwargs):", "body": "self._fill_project_info(kwargs)self.db.Dataset.delete_many(kwargs)logging.info(\"\")", "docstring": "Delete datasets.\n\n Parameters\n -----------\n kwargs : logging information\n Find items to delete, leave it empty to delete all log.", "id": "f11203:c0:m11"} {"signature": "def save_training_log(self, **kwargs):", "body": "self._fill_project_info(kwargs)kwargs.update({'': datetime.utcnow()})_result = self.db.TrainLog.insert_one(kwargs)_log = self._print_dict(kwargs)logging.info(\"\" + _log)", "docstring": "Saves the training log, timestamp will be added automatically.\n\n Parameters\n -----------\n kwargs : logging information\n Events, such as accuracy, loss, step number and etc.\n\n Examples\n ---------\n >>> db.save_training_log(accuracy=0.33, loss=0.98)", "id": "f11203:c0:m12"} {"signature": "def save_validation_log(self, **kwargs):", "body": "self._fill_project_info(kwargs)kwargs.update({'': datetime.utcnow()})_result = self.db.ValidLog.insert_one(kwargs)_log = self._print_dict(kwargs)logging.info(\"\" + _log)", "docstring": "Saves the validation log, timestamp will be added automatically.\n\n Parameters\n -----------\n kwargs : logging information\n Events, such as accuracy, loss, step number and etc.\n\n Examples\n ---------\n >>> db.save_validation_log(accuracy=0.33, loss=0.98)", "id": "f11203:c0:m13"} {"signature": "def delete_training_log(self, **kwargs):", "body": "self._fill_project_info(kwargs)self.db.TrainLog.delete_many(kwargs)logging.info(\"\")", "docstring": "Deletes training log.\n\n Parameters\n -----------\n kwargs : logging information\n Find items to delete, leave it empty to delete all log.\n\n Examples\n ---------\n Save training log\n >>> db.save_training_log(accuracy=0.33)\n >>> db.save_training_log(accuracy=0.44)\n\n Delete logs that match the requirement\n >>> db.delete_training_log(accuracy=0.33)\n\n Delete all logs\n >>> db.delete_training_log()", "id": "f11203:c0:m15"} {"signature": "def delete_validation_log(self, **kwargs):", "body": "self._fill_project_info(kwargs)self.db.ValidLog.delete_many(kwargs)logging.info(\"\")", "docstring": "Deletes validation log.\n\n Parameters\n -----------\n kwargs : logging information\n Find items to delete, leave it empty to delete all log.\n\n Examples\n ---------\n - see ``save_training_log``.", "id": "f11203:c0:m16"} {"signature": "def create_task(self, task_name=None, script=None, hyper_parameters=None, saved_result_keys=None, **kwargs):", "body": "if not isinstance(task_name, str): raise Exception(\"\")if not isinstance(script, str): raise Exception(\"\")if hyper_parameters is None:hyper_parameters = {}if saved_result_keys is None:saved_result_keys = []self._fill_project_info(kwargs)kwargs.update({'': datetime.utcnow()})kwargs.update({'': hyper_parameters})kwargs.update({'': saved_result_keys})_script = open(script, '').read()kwargs.update({'': '', '': _script, '': {}})self.db.Task.insert_one(kwargs)logging.info(\"\".format(task_name, script))", "docstring": "Uploads a task to the database, timestamp will be added automatically.\n\n Parameters\n -----------\n task_name : str\n The task name.\n script : str\n File name of the python script.\n hyper_parameters : dictionary\n The hyper parameters pass into the script.\n saved_result_keys : list of str\n The keys of the task results to keep in the database when the task finishes.\n kwargs : other parameters\n Users customized parameters such as description, version number.\n\n Examples\n -----------\n Uploads a task\n >>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')\n\n Finds and runs the latest task\n >>> db.run_top_task(sess=sess, sort=[(\"time\", pymongo.DESCENDING)])\n >>> db.run_top_task(sess=sess, sort=[(\"time\", -1)])\n\n Finds and runs the oldest task\n >>> db.run_top_task(sess=sess, sort=[(\"time\", pymongo.ASCENDING)])\n >>> db.run_top_task(sess=sess, sort=[(\"time\", 1)])", "id": "f11203:c0:m18"} {"signature": "def run_top_task(self, task_name=None, sort=None, **kwargs):", "body": "if not isinstance(task_name, str): raise Exception(\"\")self._fill_project_info(kwargs)kwargs.update({'': ''})task = self.db.Task.find_one_and_update(kwargs, {'': {'': ''}}, sort=sort)try:if task is None:logging.info(\"\".format(task_name, sort))return Falseelse:logging.info(\"\".format(task_name, sort))_datetime = task['']_script = task['']_id = task['']_hyper_parameters = task['']_saved_result_keys = task['']logging.info(\"\")for key in _hyper_parameters:globals()[key] = _hyper_parameters[key]logging.info(\"\".format(key, _hyper_parameters[key]))s = time.time()logging.info(\"\".format(task_name, sort, _datetime))_script = _script.decode('')with tf.Graph().as_default(): exec(_script, globals())_ = self.db.Task.find_one_and_update({'': _id}, {'': {'': ''}})__result = {}for _key in _saved_result_keys:logging.info(\"\".format(_key, globals()[_key], type(globals()[_key])))__result.update({\"\" % _key: globals()[_key]})_ = self.db.Task.find_one_and_update({'': _id}, {'': {'': __result}}, return_document=pymongo.ReturnDocument.AFTER)logging.info(\"\".format(task_name, sort, _datetime,time.time() - s))return Trueexcept Exception as e:exc_type, exc_obj, exc_tb = sys.exc_info()fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[]logging.info(\"\".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))logging.info(\"\")_ = self.db.Task.find_one_and_update({'': _id}, {'': {'': ''}})return False", "docstring": "Finds and runs a pending task that in the first of the sorting list.\n\n Parameters\n -----------\n task_name : str\n The task name.\n sort : List of tuple\n PyMongo sort comment, search \"PyMongo find one sorting\" and `collection level operations `__ for more details.\n kwargs : other parameters\n Users customized parameters such as description, version number.\n\n Examples\n ---------\n Monitors the database and pull tasks to run\n >>> while True:\n >>> print(\"waiting task from distributor\")\n >>> db.run_top_task(task_name='mnist', sort=[(\"time\", -1)])\n >>> time.sleep(1)\n\n Returns\n --------\n boolean : True for success, False for fail.", "id": "f11203:c0:m19"} {"signature": "def delete_tasks(self, **kwargs):", "body": "self._fill_project_info(kwargs)self.db.Task.delete_many(kwargs)logging.info(\"\")", "docstring": "Delete tasks.\n\n Parameters\n -----------\n kwargs : logging information\n Find items to delete, leave it empty to delete all log.\n\n Examples\n ---------\n >>> db.delete_tasks()", "id": "f11203:c0:m20"} {"signature": "def check_unfinished_task(self, task_name=None, **kwargs):", "body": "if not isinstance(task_name, str): raise Exception(\"\")self._fill_project_info(kwargs)kwargs.update({'': [{'': ''}, {'': ''}]})task = self.db.Task.find(kwargs)task_id_list = task.distinct('')n_task = len(task_id_list)if n_task == :logging.info(\"\".format(task_name))return Falseelse:logging.info(\"\".format(n_task, task_name))return True", "docstring": "Finds and runs a pending task.\n\n Parameters\n -----------\n task_name : str\n The task name.\n kwargs : other parameters\n Users customized parameters such as description, version number.\n\n Examples\n ---------\n Wait until all tasks finish in user's local console\n\n >>> while not db.check_unfinished_task():\n >>> time.sleep(1)\n >>> print(\"all tasks finished\")\n >>> sess = tf.InteractiveSession()\n >>> net = db.find_top_model(sess=sess, sort=[(\"test_accuracy\", -1)])\n >>> print(\"the best accuracy {} is from model {}\".format(net._test_accuracy, net._name))\n\n Returns\n --------\n boolean : True for success, False for fail.", "id": "f11203:c0:m21"} {"signature": "def ramp(x, v_min=, v_max=, name=None):", "body": "return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name)", "docstring": "Ramp activation function.\n\n Parameters\n ----------\n x : Tensor\n input.\n v_min : float\n cap input to v_min as a lower bound.\n v_max : float\n cap input to v_max as a upper bound.\n name : str\n The function name (optional).\n\n Returns\n -------\n Tensor\n A ``Tensor`` in the same type as ``x``.", "id": "f11204:m0"} {"signature": "@deprecated(date=\"\", instructions=\"\")def leaky_relu(x, alpha=, name=\"\"):", "body": "if not ( < alpha <= ):raise ValueError(\"\")with tf.name_scope(name, \"\") as name_scope:x = tf.convert_to_tensor(x, name=\"\")return tf.maximum(x, alpha * x, name=name_scope)", "docstring": "leaky_relu can be used through its shortcut: :func:`tl.act.lrelu`.\n\n This function is a modified version of ReLU, introducing a nonzero gradient for negative input. Introduced by the paper:\n `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__\n\n The function return the following results:\n - When x < 0: ``f(x) = alpha_low * x``.\n - When x >= 0: ``f(x) = x``.\n\n Parameters\n ----------\n x : Tensor\n Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.\n alpha : float\n Slope.\n name : str\n The function name (optional).\n\n Examples\n --------\n >>> import tensorlayer as tl\n >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')\n\n Returns\n -------\n Tensor\n A ``Tensor`` in the same type as ``x``.\n\n References\n ----------\n - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__", "id": "f11204:m1"} {"signature": "def leaky_relu6(x, alpha=, name=\"\"):", "body": "if not isinstance(alpha, tf.Tensor) and not ( < alpha <= ):raise ValueError(\"\")with tf.name_scope(name, \"\") as name_scope:x = tf.convert_to_tensor(x, name=\"\")return tf.minimum(tf.maximum(x, alpha * x), , name=name_scope)", "docstring": ":func:`leaky_relu6` can be used through its shortcut: :func:`tl.act.lrelu6`.\n\n This activation function is a modified version :func:`leaky_relu` introduced by the following paper:\n `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__\n\n This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:\n `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__\n\n The function return the following results:\n - When x < 0: ``f(x) = alpha_low * x``.\n - When x in [0, 6]: ``f(x) = x``.\n - When x > 6: ``f(x) = 6``.\n\n Parameters\n ----------\n x : Tensor\n Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.\n alpha : float\n Slope.\n name : str\n The function name (optional).\n\n Examples\n --------\n >>> import tensorlayer as tl\n >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_relu6(x, 0.2), name='dense')\n\n Returns\n -------\n Tensor\n A ``Tensor`` in the same type as ``x``.\n\n References\n ----------\n - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__\n - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__", "id": "f11204:m2"} {"signature": "def leaky_twice_relu6(x, alpha_low=, alpha_high=, name=\"\"):", "body": "if not isinstance(alpha_high, tf.Tensor) and not ( < alpha_high <= ):raise ValueError(\"\")if not isinstance(alpha_low, tf.Tensor) and not ( < alpha_low <= ):raise ValueError(\"\")with tf.name_scope(name, \"\") as name_scope:x = tf.convert_to_tensor(x, name=\"\")x_is_above_0 = tf.minimum(x, * ( - alpha_high) + alpha_high * x)x_is_below_0 = tf.minimum(alpha_low * x, )return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope)", "docstring": ":func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.\n\n This activation function is a modified version :func:`leaky_relu` introduced by the following paper:\n `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__\n\n This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:\n `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__\n\n This function push further the logic by adding `leaky` behaviour both below zero and above six.\n\n The function return the following results:\n - When x < 0: ``f(x) = alpha_low * x``.\n - When x in [0, 6]: ``f(x) = x``.\n - When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.\n\n Parameters\n ----------\n x : Tensor\n Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.\n alpha_low : float\n Slope for x < 0: ``f(x) = alpha_low * x``.\n alpha_high : float\n Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.\n name : str\n The function name (optional).\n\n Examples\n --------\n >>> import tensorlayer as tl\n >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')\n\n Returns\n -------\n Tensor\n A ``Tensor`` in the same type as ``x``.\n\n References\n ----------\n - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] `__\n - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] `__", "id": "f11204:m3"} {"signature": "def swish(x, name=''):", "body": "with tf.name_scope(name):x = tf.nn.sigmoid(x) * xreturn x", "docstring": "Swish function.\n\n See `Swish: a Self-Gated Activation Function `__.\n\n Parameters\n ----------\n x : Tensor\n input.\n name: str\n function name (optional).\n\n Returns\n -------\n Tensor\n A ``Tensor`` in the same type as ``x``.", "id": "f11204:m4"} {"signature": "def sign(x):", "body": "with tf.get_default_graph().gradient_override_map({\"\": \"\"}):return tf.sign(x, name='')", "docstring": "Sign function.\n\n Clip and binarize tensor using the straight through estimator (STE) for the gradient, usually be used for\n quantizing values in `Binarized Neural Networks`: https://arxiv.org/abs/1602.02830.\n\n Parameters\n ----------\n x : Tensor\n input.\n\n Examples\n --------\n >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')\n\n Returns\n -------\n Tensor\n A ``Tensor`` in the same type as ``x``.\n\n References\n ----------\n - `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013)`\n http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf\n\n - `BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. (2016)`\n https://arxiv.org/abs/1602.02830", "id": "f11204:m6"} {"signature": "def hard_tanh(x, name=''):", "body": "return tf.clip_by_value(x, -, , name=name)", "docstring": "Hard tanh activation function.\n\n Which is a ramp function with low bound of -1 and upper bound of 1, shortcut is `htanh`.\n\n Parameters\n ----------\n x : Tensor\n input.\n name : str\n The function name (optional).\n\n Returns\n -------\n Tensor\n A ``Tensor`` in the same type as ``x``.", "id": "f11204:m7"} {"signature": "@deprecated(date=\"\", instructions=\"\")def pixel_wise_softmax(x, name=''):", "body": "with tf.name_scope(name):return tf.nn.softmax(x)", "docstring": "Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.\n\n Usually be used for image segmentation.\n\n Parameters\n ----------\n x : Tensor\n input.\n - For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2.\n - For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2.\n name : str\n function name (optional)\n\n Returns\n -------\n Tensor\n A ``Tensor`` in the same type as ``x``.\n\n Examples\n --------\n >>> outputs = pixel_wise_softmax(network.outputs)\n >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)\n\n References\n ----------\n - `tf.reverse `__", "id": "f11204:m8"} {"signature": "def discount_episode_rewards(rewards=None, gamma=, mode=):", "body": "if rewards is None:raise Exception(\"\")discounted_r = np.zeros_like(rewards, dtype=np.float32)running_add = for t in reversed(xrange(, rewards.size)):if mode == :if rewards[t] != : running_add = running_add = running_add * gamma + rewards[t]discounted_r[t] = running_addreturn discounted_r", "docstring": "Take 1D float array of rewards and compute discounted rewards for an\n episode. When encount a non-zero value, consider as the end a of an episode.\n\n Parameters\n ----------\n rewards : list\n List of rewards\n gamma : float\n Discounted factor\n mode : int\n Mode for computing the discount rewards.\n - If mode == 0, reset the discount process when encount a non-zero reward (Ping-pong game).\n - If mode == 1, would not reset the discount process.\n\n Returns\n --------\n list of float\n The discounted rewards.\n\n Examples\n ----------\n >>> rewards = np.asarray([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1])\n >>> gamma = 0.9\n >>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma)\n >>> print(discount_rewards)\n [ 0.72899997 0.81 0.89999998 1. 0.72899997 0.81\n 0.89999998 1. 0.72899997 0.81 0.89999998 1. ]\n >>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma, mode=1)\n >>> print(discount_rewards)\n [ 1.52110755 1.69011939 1.87791049 2.08656716 1.20729685 1.34144104\n 1.49048996 1.65610003 0.72899997 0.81 0.89999998 1. ]", "id": "f11205:m0"} {"signature": "def cross_entropy_reward_loss(logits, actions, rewards, name=None):", "body": "cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=actions, logits=logits, name=name)return tf.reduce_sum(tf.multiply(cross_entropy, rewards))", "docstring": "Calculate the loss for Policy Gradient Network.\n\n Parameters\n ----------\n logits : tensor\n The network outputs without softmax. This function implements softmax inside.\n actions : tensor or placeholder\n The agent actions.\n rewards : tensor or placeholder\n The rewards.\n\n Returns\n --------\n Tensor\n The TensorFlow loss function.\n\n Examples\n ----------\n >>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D])\n >>> network = InputLayer(states_batch_pl, name='input')\n >>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1')\n >>> network = DenseLayer(network, n_units=3, name='out')\n >>> probs = network.outputs\n >>> sampling_prob = tf.nn.softmax(probs)\n >>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None])\n >>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None])\n >>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl)\n >>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)", "id": "f11205:m1"} {"signature": "def log_weight(probs, weights, name=''):", "body": "with tf.variable_scope(name):exp_v = tf.reduce_mean(tf.log(probs) * weights)return exp_v", "docstring": "Log weight.\n\n Parameters\n -----------\n probs : tensor\n If it is a network output, usually we should scale it to [0, 1] via softmax.\n weights : tensor\n The weights.\n\n Returns\n --------\n Tensor\n The Tensor after appling the log weighted expression.", "id": "f11205:m2"} {"signature": "def choice_action_by_probs(probs=(, ), action_list=None):", "body": "if action_list is None:n_action = len(probs)action_list = np.arange(n_action)else:if len(action_list) != len(probs):raise Exception(\"\")return np.random.choice(action_list, p=probs)", "docstring": "Choice and return an an action by given the action probability distribution.\n\n Parameters\n ------------\n probs : list of float.\n The probability distribution of all actions.\n action_list : None or a list of int or others\n A list of action in integer, string or others. If None, returns an integer range between 0 and len(probs)-1.\n\n Returns\n --------\n float int or str\n The chosen action.\n\n Examples\n ----------\n >>> for _ in range(5):\n >>> a = choice_action_by_probs([0.2, 0.4, 0.4])\n >>> print(a)\n 0\n 1\n 1\n 2\n 1\n >>> for _ in range(3):\n >>> a = choice_action_by_probs([0.5, 0.5], ['a', 'b'])\n >>> print(a)\n a\n b\n b", "id": "f11205:m3"} {"signature": "def read_image(image, path=''):", "body": "return imageio.imread(os.path.join(path, image))", "docstring": "Read one image.\n\n Parameters\n -----------\n image : str\n The image file name.\n path : str\n The image folder path.\n\n Returns\n -------\n numpy.array\n The image.", "id": "f11206:m0"} {"signature": "def read_images(img_list, path='', n_threads=, printable=True):", "body": "imgs = []for idx in range(, len(img_list), n_threads):b_imgs_list = img_list[idx:idx + n_threads]b_imgs = tl.prepro.threading_data(b_imgs_list, fn=read_image, path=path)imgs.extend(b_imgs)if printable:tl.logging.info('' % (len(imgs), path))return imgs", "docstring": "Returns all images in list by given path and name of each image file.\n\n Parameters\n -------------\n img_list : list of str\n The image file names.\n path : str\n The image folder path.\n n_threads : int\n The number of threads to read image.\n printable : boolean\n Whether to print information when reading images.\n\n Returns\n -------\n list of numpy.array\n The images.", "id": "f11206:m1"} {"signature": "def save_image(image, image_path=''):", "body": "try: imageio.imwrite(image_path, image)except Exception: imageio.imwrite(image_path, image[:, :, ])", "docstring": "Save a image.\n\n Parameters\n -----------\n image : numpy array\n [w, h, c]\n image_path : str\n path", "id": "f11206:m2"} {"signature": "def save_images(images, size, image_path=''):", "body": "if len(images.shape) == : images = images[:, :, :, np.newaxis]def merge(images, size):h, w = images.shape[], images.shape[]img = np.zeros((h * size[], w * size[], ), dtype=images.dtype)for idx, image in enumerate(images):i = idx % size[]j = idx // size[]img[j * h:j * h + h, i * w:i * w + w, :] = imagereturn imgdef imsave(images, size, path):if np.max(images) <= and (- <= np.min(images) < ):images = ((images + ) * ).astype(np.uint8)elif np.max(images) <= and np.min(images) >= :images = (images * ).astype(np.uint8)return imageio.imwrite(path, merge(images, size))if len(images) > size[] * size[]:raise AssertionError(\"\".format(len(images)))return imsave(images, size, image_path)", "docstring": "Save multiple images into one single image.\n\n Parameters\n -----------\n images : numpy array\n (batch, w, h, c)\n size : list of 2 ints\n row and column number.\n number of images should be equal or less than size[0] * size[1]\n image_path : str\n save path\n\n Examples\n ---------\n >>> import numpy as np\n >>> import tensorlayer as tl\n >>> images = np.random.rand(64, 100, 100, 3)\n >>> tl.visualize.save_images(images, [8, 8], 'temp.png')", "id": "f11206:m3"} {"signature": "def draw_boxes_and_labels_to_image(image, classes, coords, scores, classes_list, is_center=True, is_rescale=True, save_name=None):", "body": "if len(coords) != len(classes):raise AssertionError(\"\")if len(scores) > and len(scores) != len(classes):raise AssertionError(\"\")image = image.copy()imh, imw = image.shape[:]thick = int((imh + imw) // )for i, _v in enumerate(coords):if is_center:x, y, x2, y2 = tl.prepro.obj_box_coord_centroid_to_upleft_butright(coords[i])else:x, y, x2, y2 = coords[i]if is_rescale: x, y, x2, y2 = tl.prepro.obj_box_coord_scale_to_pixelunit([x, y, x2, y2], (imh, imw))cv2.rectangle(image,(int(x), int(y)),(int(x2), int(y2)), [, , ],thick)cv2.putText(image,classes_list[classes[i]] + ((\"\" % (scores[i])) if (len(scores) != ) else \"\"),(int(x), int(y)), , * imh, [, , ], int(thick / ) + ) if save_name is not None:save_image(image, save_name)return image", "docstring": "Draw bboxes and class labels on image. Return or save the image with bboxes, example in the docs of ``tl.prepro``.\n\n Parameters\n -----------\n image : numpy.array\n The RGB image [height, width, channel].\n classes : list of int\n A list of class ID (int).\n coords : list of int\n A list of list for coordinates.\n - Should be [x, y, x2, y2] (up-left and botton-right format)\n - If [x_center, y_center, w, h] (set is_center to True).\n scores : list of float\n A list of score (float). (Optional)\n classes_list : list of str\n for converting ID to string on image.\n is_center : boolean\n Whether the coordinates is [x_center, y_center, w, h]\n - If coordinates are [x_center, y_center, w, h], set it to True for converting it to [x, y, x2, y2] (up-left and botton-right) internally.\n - If coordinates are [x1, x2, y1, y2], set it to False.\n is_rescale : boolean\n Whether to rescale the coordinates from pixel-unit format to ratio format.\n - If True, the input coordinates are the portion of width and high, this API will scale the coordinates to pixel unit internally.\n - If False, feed the coordinates with pixel unit format.\n save_name : None or str\n The name of image file (i.e. image.png), if None, not to save image.\n\n Returns\n -------\n numpy.array\n The saved image.\n\n References\n -----------\n - OpenCV rectangle and putText.\n - `scikit-image `__.", "id": "f11206:m4"} {"signature": "def draw_mpii_pose_to_image(image, poses, save_name=''):", "body": "image = image.copy()imh, imw = image.shape[:]thick = int((imh + imw) // )radius = int(thick * )if image.max() < :image = image * for people in poses:joint_pos = people['']lines = [[(, ), [, , ]],[(, ), [, , ]],[(, ), [, , ]], [(, ), [, , ]],[(, ), [, , ]],[(, ), [, , ]], [(, ), [, , ]],[(, ), [, , ]], [(, ), [, , ]], [(, ), [, , ]],[(, ), [, , ]],[(, ), [, , ]], [(, ), [, , ]],[(, ), [, , ]],[(, ), [, , ]] ]for line in lines:start, end = line[]if (start in joint_pos) and (end in joint_pos):cv2.line(image,(int(joint_pos[start][]), int(joint_pos[start][])),(int(joint_pos[end][]), int(joint_pos[end][])), line[],thick)for pos in joint_pos.items():_, pos_loc = pos pos_loc = (int(pos_loc[]), int(pos_loc[]))cv2.circle(image, center=pos_loc, radius=radius, color=(, , ), thickness=-)head_rect = people['']if head_rect: cv2.rectangle(image,(int(head_rect[]), int(head_rect[])),(int(head_rect[]), int(head_rect[])), [, , ],thick)if save_name is not None:save_image(image, save_name)return image", "docstring": "Draw people(s) into image using MPII dataset format as input, return or save the result image.\n\n This is an experimental API, can be changed in the future.\n\n Parameters\n -----------\n image : numpy.array\n The RGB image [height, width, channel].\n poses : list of dict\n The people(s) annotation in MPII format, see ``tl.files.load_mpii_pose_dataset``.\n save_name : None or str\n The name of image file (i.e. image.png), if None, not to save image.\n\n Returns\n --------\n numpy.array\n The saved image.\n\n Examples\n --------\n >>> import pprint\n >>> import tensorlayer as tl\n >>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()\n >>> image = tl.vis.read_image(img_train_list[0])\n >>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')\n >>> pprint.pprint(ann_train_list[0])\n\n References\n -----------\n - `MPII Keyponts and ID `__", "id": "f11206:m5"} {"signature": "def frame(I=None, second=, saveable=True, name='', cmap=None, fig_idx=):", "body": "import matplotlib.pyplot as pltif saveable is False:plt.ion()plt.figure(fig_idx) if len(I.shape) and I.shape[-] == : I = I[:, :, ]plt.imshow(I, cmap)plt.title(name)if saveable:plt.savefig(name + '', format='')else:plt.draw()plt.pause(second)", "docstring": "Display a frame. Make sure OpenAI Gym render() is disable before using it.\n\n Parameters\n ----------\n I : numpy.array\n The image.\n second : int\n The display second(s) for the image(s), if saveable is False.\n saveable : boolean\n Save or plot the figure.\n name : str\n A name to save the image, if saveable is True.\n cmap : None or str\n 'gray' for greyscale, None for default, etc.\n fig_idx : int\n matplotlib figure index.\n\n Examples\n --------\n >>> env = gym.make(\"Pong-v0\")\n >>> observation = env.reset()\n >>> tl.visualize.frame(observation)", "id": "f11206:m6"} {"signature": "def CNN2d(CNN=None, second=, saveable=True, name='', fig_idx=):", "body": "import matplotlib.pyplot as pltn_mask = CNN.shape[]n_row = CNN.shape[]n_col = CNN.shape[]n_color = CNN.shape[]row = int(np.sqrt(n_mask))col = int(np.ceil(n_mask / row))plt.ion() fig = plt.figure(fig_idx)count = for _ir in range(, row + ):for _ic in range(, col + ):if count > n_mask:breakfig.add_subplot(col, row, count)if n_color == :plt.imshow(np.reshape(CNN[:, :, :, count - ], (n_row, n_col)), cmap='', interpolation=\"\")elif n_color == :plt.imshow(np.reshape(CNN[:, :, :, count - ], (n_row, n_col, n_color)), cmap='', interpolation=\"\")else:raise Exception(\"\")plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator())count = count + if saveable:plt.savefig(name + '', format='')else:plt.draw()plt.pause(second)", "docstring": "Display a group of RGB or Greyscale CNN masks.\n\n Parameters\n ----------\n CNN : numpy.array\n The image. e.g: 64 5x5 RGB images can be (5, 5, 3, 64).\n second : int\n The display second(s) for the image(s), if saveable is False.\n saveable : boolean\n Save or plot the figure.\n name : str\n A name to save the image, if saveable is True.\n fig_idx : int\n The matplotlib figure index.\n\n Examples\n --------\n >>> tl.visualize.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_mnist', fig_idx=2012)", "id": "f11206:m7"} {"signature": "def images2d(images=None, second=, saveable=True, name='', dtype=None, fig_idx=):", "body": "import matplotlib.pyplot as pltif dtype:images = np.asarray(images, dtype=dtype)n_mask = images.shape[]n_row = images.shape[]n_col = images.shape[]n_color = images.shape[]row = int(np.sqrt(n_mask))col = int(np.ceil(n_mask / row))plt.ion() fig = plt.figure(fig_idx)count = for _ir in range(, row + ):for _ic in range(, col + ):if count > n_mask:breakfig.add_subplot(col, row, count)if n_color == :plt.imshow(np.reshape(images[count - , :, :], (n_row, n_col)), cmap='', interpolation=\"\")elif n_color == :plt.imshow(images[count - , :, :], cmap='', interpolation=\"\")else:raise Exception(\"\")plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator())count = count + if saveable:plt.savefig(name + '', format='')else:plt.draw()plt.pause(second)", "docstring": "Display a group of RGB or Greyscale images.\n\n Parameters\n ----------\n images : numpy.array\n The images.\n second : int\n The display second(s) for the image(s), if saveable is False.\n saveable : boolean\n Save or plot the figure.\n name : str\n A name to save the image, if saveable is True.\n dtype : None or numpy data type\n The data type for displaying the images.\n fig_idx : int\n matplotlib figure index.\n\n Examples\n --------\n >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)\n >>> tl.visualize.images2d(X_train[0:100,:,:,:], second=10, saveable=False, name='cifar10', dtype=np.uint8, fig_idx=20212)", "id": "f11206:m8"} {"signature": "def tsne_embedding(embeddings, reverse_dictionary, plot_only=, second=, saveable=False, name='', fig_idx=):", "body": "import matplotlib.pyplot as pltdef plot_with_labels(low_dim_embs, labels, figsize=(, ), second=, saveable=True, name='', fig_idx=):if low_dim_embs.shape[] < len(labels):raise AssertionError(\"\")if saveable is False:plt.ion()plt.figure(fig_idx)plt.figure(figsize=figsize) for i, label in enumerate(labels):x, y = low_dim_embs[i, :]plt.scatter(x, y)plt.annotate(label, xy=(x, y), xytext=(, ), textcoords='', ha='', va='')if saveable:plt.savefig(name + '', format='')else:plt.draw()plt.pause(second)try:from sklearn.manifold import TSNEfrom six.moves import xrangetsne = TSNE(perplexity=, n_components=, init='', n_iter=)low_dim_embs = tsne.fit_transform(embeddings[:plot_only, :])labels = [reverse_dictionary[i] for i in xrange(plot_only)]plot_with_labels(low_dim_embs, labels, second=second, saveable=saveable, name=name, fig_idx=fig_idx)except ImportError:_err = \"\"tl.logging.error(_err)raise ImportError(_err)", "docstring": "Visualize the embeddings by using t-SNE.\n\n Parameters\n ----------\n embeddings : numpy.array\n The embedding matrix.\n reverse_dictionary : dictionary\n id_to_word, mapping id to unique word.\n plot_only : int\n The number of examples to plot, choice the most common words.\n second : int\n The display second(s) for the image(s), if saveable is False.\n saveable : boolean\n Save or plot the figure.\n name : str\n A name to save the image, if saveable is True.\n fig_idx : int\n matplotlib figure index.\n\n Examples\n --------\n >>> see 'tutorial_word2vec_basic.py'\n >>> final_embeddings = normalized_embeddings.eval()\n >>> tl.visualize.tsne_embedding(final_embeddings, labels, reverse_dictionary,\n ... plot_only=500, second=5, saveable=False, name='tsne')", "id": "f11206:m9"} {"signature": "def draw_weights(W=None, second=, saveable=True, shape=None, name='', fig_idx=):", "body": "if shape is None:shape = [, ]import matplotlib.pyplot as pltif saveable is False:plt.ion()fig = plt.figure(fig_idx) n_units = W.shape[]num_r = int(np.sqrt(n_units)) num_c = int(np.ceil(n_units / num_r))count = int()for _row in range(, num_r + ):for _col in range(, num_c + ):if count > n_units:breakfig.add_subplot(num_r, num_c, count)feature = W[:, count - ] / np.sqrt((W[:, count - ]**).sum())plt.imshow(np.reshape(feature, (shape[], shape[])), cmap='', interpolation=\"\") plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator())count = count + if saveable:plt.savefig(name + '', format='')else:plt.draw()plt.pause(second)", "docstring": "Visualize every columns of the weight matrix to a group of Greyscale img.\n\n Parameters\n ----------\n W : numpy.array\n The weight matrix\n second : int\n The display second(s) for the image(s), if saveable is False.\n saveable : boolean\n Save or plot the figure.\n shape : a list with 2 int or None\n The shape of feature image, MNIST is [28, 80].\n name : a string\n A name to save the image, if saveable is True.\n fig_idx : int\n matplotlib figure index.\n\n Examples\n --------\n >>> tl.visualize.draw_weights(network.all_params[0].eval(), second=10, saveable=True, name='weight_of_1st_layer', fig_idx=2012)", "id": "f11206:m10"} {"signature": "def generate_skip_gram_batch(data, batch_size, num_skips, skip_window, data_index=):", "body": "if batch_size % num_skips != :raise Exception(\"\")if num_skips > * skip_window:raise Exception(\"\")batch = np.ndarray(shape=(batch_size), dtype=np.int32)labels = np.ndarray(shape=(batch_size, ), dtype=np.int32)span = * skip_window + buffer = collections.deque(maxlen=span)for _ in range(span):buffer.append(data[data_index])data_index = (data_index + ) % len(data)for i in range(batch_size // num_skips):target = skip_window targets_to_avoid = [skip_window]for j in range(num_skips):while target in targets_to_avoid:target = random.randint(, span - )targets_to_avoid.append(target)batch[i * num_skips + j] = buffer[skip_window]labels[i * num_skips + j, ] = buffer[target]buffer.append(data[data_index])data_index = (data_index + ) % len(data)return batch, labels, data_index", "docstring": "Generate a training batch for the Skip-Gram model.\n\n See `Word2Vec example `__.\n\n Parameters\n ----------\n data : list of data\n To present context, usually a list of integers.\n batch_size : int\n Batch size to return.\n num_skips : int\n How many times to reuse an input to generate a label.\n skip_window : int\n How many words to consider left and right.\n data_index : int\n Index of the context location. This code use `data_index` to instead of yield like ``tl.iterate``.\n\n Returns\n -------\n batch : list of data\n Inputs.\n labels : list of data\n Labels\n data_index : int\n Index of the context location.\n\n Examples\n --------\n Setting num_skips=2, skip_window=1, use the right and left words.\n In the same way, num_skips=4, skip_window=2 means use the nearby 4 words.\n\n >>> data = [1,2,3,4,5,6,7,8,9,10,11]\n >>> batch, labels, data_index = tl.nlp.generate_skip_gram_batch(data=data, batch_size=8, num_skips=2, skip_window=1, data_index=0)\n >>> print(batch)\n [2 2 3 3 4 4 5 5]\n >>> print(labels)\n [[3]\n [1]\n [4]\n [2]\n [5]\n [3]\n [4]\n [6]]", "id": "f11210:m0"} {"signature": "def sample(a=None, temperature=):", "body": "if a is None:raise Exception(\"\")b = np.copy(a)try:if temperature == :return np.argmax(np.random.multinomial(, a, ))if temperature is None:return np.argmax(a)else:a = np.log(a) / temperaturea = np.exp(a) / np.sum(np.exp(a))return np.argmax(np.random.multinomial(, a, ))except Exception:message = \"\"warnings.warn(message, Warning)return np.argmax(np.random.multinomial(, b, ))", "docstring": "Sample an index from a probability array.\n\n Parameters\n ----------\n a : list of float\n List of probabilities.\n temperature : float or None\n The higher the more uniform. When a = [0.1, 0.2, 0.7],\n - temperature = 0.7, the distribution will be sharpen [0.05048273, 0.13588945, 0.81362782]\n - temperature = 1.0, the distribution will be the same [0.1, 0.2, 0.7]\n - temperature = 1.5, the distribution will be filtered [0.16008435, 0.25411807, 0.58579758]\n - If None, it will be ``np.argmax(a)``\n\n Notes\n ------\n - No matter what is the temperature and input list, the sum of all probabilities will be one. Even if input list = [1, 100, 200], the sum of all probabilities will still be one.\n - For large vocabulary size, choice a higher temperature or ``tl.nlp.sample_top`` to avoid error.", "id": "f11210:m1"} {"signature": "def sample_top(a=None, top_k=):", "body": "if a is None:a = []idx = np.argpartition(a, -top_k)[-top_k:]probs = a[idx]probs = probs / np.sum(probs)choice = np.random.choice(idx, p=probs)return choice", "docstring": "Sample from ``top_k`` probabilities.\n\n Parameters\n ----------\n a : list of float\n List of probabilities.\n top_k : int\n Number of candidates to be considered.", "id": "f11210:m2"} {"signature": "def process_sentence(sentence, start_word=\"\", end_word=\"\"):", "body": "if start_word is not None:process_sentence = [start_word]else:process_sentence = []process_sentence.extend(nltk.tokenize.word_tokenize(sentence.lower()))if end_word is not None:process_sentence.append(end_word)return process_sentence", "docstring": "Seperate a sentence string into a list of string words, add start_word and end_word,\n see ``create_vocab()`` and ``tutorial_tfrecord3.py``.\n\n Parameters\n ----------\n sentence : str\n A sentence.\n start_word : str or None\n The start word. If None, no start word will be appended.\n end_word : str or None\n The end word. If None, no end word will be appended.\n\n Returns\n ---------\n list of str\n A list of strings that separated into words.\n\n Examples\n -----------\n >>> c = \"how are you?\"\n >>> c = tl.nlp.process_sentence(c)\n >>> print(c)\n ['', 'how', 'are', 'you', '?', '']\n\n Notes\n -------\n - You have to install the following package.\n - `Installing NLTK `__\n - `Installing NLTK data `__", "id": "f11210:m3"} {"signature": "def create_vocab(sentences, word_counts_output_file, min_word_count=):", "body": "tl.logging.info(\"\")counter = Counter()for c in sentences:counter.update(c)tl.logging.info(\"\" % len(counter))word_counts = [x for x in counter.items() if x[] >= min_word_count]word_counts.sort(key=lambda x: x[], reverse=True)word_counts = [(\"\", )] + word_counts tl.logging.info(\"\" % len(word_counts))with tf.gfile.FastGFile(word_counts_output_file, \"\") as f:f.write(\"\".join([\"\" % (w, c) for w, c in word_counts]))tl.logging.info(\"\" % word_counts_output_file)reverse_vocab = [x[] for x in word_counts]unk_id = len(reverse_vocab)vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])vocab = SimpleVocabulary(vocab_dict, unk_id)return vocab", "docstring": "Creates the vocabulary of word to word_id.\n\n See ``tutorial_tfrecord3.py``.\n\n The vocabulary is saved to disk in a text file of word counts. The id of each\n word in the file is its corresponding 0-based line number.\n\n Parameters\n ------------\n sentences : list of list of str\n All sentences for creating the vocabulary.\n word_counts_output_file : str\n The file name.\n min_word_count : int\n Minimum number of occurrences for a word.\n\n Returns\n --------\n :class:`SimpleVocabulary`\n The simple vocabulary object, see :class:`Vocabulary` for more.\n\n Examples\n --------\n Pre-process sentences\n\n >>> captions = [\"one two , three\", \"four five five\"]\n >>> processed_capts = []\n >>> for c in captions:\n >>> c = tl.nlp.process_sentence(c, start_word=\"\", end_word=\"\")\n >>> processed_capts.append(c)\n >>> print(processed_capts)\n ...[['', 'one', 'two', ',', 'three', ''], ['', 'four', 'five', 'five', '']]\n\n Create vocabulary\n\n >>> tl.nlp.create_vocab(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1)\n Creating vocabulary.\n Total words: 8\n Words in vocabulary: 8\n Wrote vocabulary file: vocab.txt\n\n Get vocabulary object\n\n >>> vocab = tl.nlp.Vocabulary('vocab.txt', start_word=\"\", end_word=\"\", unk_word=\"\")\n INFO:tensorflow:Initializing vocabulary from file: vocab.txt\n [TL] Vocabulary from vocab.txt : \n vocabulary with 10 words (includes start_word, end_word, unk_word)\n start_id: 2\n end_id: 3\n unk_id: 9\n pad_id: 0", "id": "f11210:m4"} {"signature": "def simple_read_words(filename=\"\"):", "body": "with open(filename, \"\") as f:words = f.read()return words", "docstring": "Read context from file without any preprocessing.\n\n Parameters\n ----------\n filename : str\n A file path (like .txt file)\n\n Returns\n --------\n str\n The context in a string.", "id": "f11210:m5"} {"signature": "def read_words(filename=\"\", replace=None):", "body": "if replace is None:replace = ['', '']with tf.gfile.GFile(filename, \"\") as f:try: context_list = f.read().replace(*replace).split()except Exception: f.seek()replace = [x.encode('') for x in replace]context_list = f.read().replace(*replace).split()return context_list", "docstring": "Read list format context from a file.\n\n For customized read_words method, see ``tutorial_generate_text.py``.\n\n Parameters\n ----------\n filename : str\n a file path.\n replace : list of str\n replace original string by target string.\n\n Returns\n -------\n list of str\n The context in a list (split using space).", "id": "f11210:m6"} {"signature": "def read_analogies_file(eval_file='', word2id=None):", "body": "if word2id is None:word2id = {}questions = []questions_skipped = with open(eval_file, \"\") as analogy_f:for line in analogy_f:if line.startswith(b\"\"): continuewords = line.strip().lower().split(b\"\") ids = [word2id.get(w.strip()) for w in words]if None in ids or len(ids) != :questions_skipped += else:questions.append(np.array(ids))tl.logging.info(\"\" % eval_file)tl.logging.info(\"\", len(questions))tl.logging.info(\"\", questions_skipped)analogy_questions = np.array(questions, dtype=np.int32)return analogy_questions", "docstring": "Reads through an analogy question file, return its id format.\n\n Parameters\n ----------\n eval_file : str\n The file name.\n word2id : dictionary\n a dictionary that maps word to ID.\n\n Returns\n --------\n numpy.array\n A ``[n_examples, 4]`` numpy array containing the analogy question's word IDs.\n\n Examples\n ---------\n The file should be in this format\n\n >>> : capital-common-countries\n >>> Athens Greece Baghdad Iraq\n >>> Athens Greece Bangkok Thailand\n >>> Athens Greece Beijing China\n >>> Athens Greece Berlin Germany\n >>> Athens Greece Bern Switzerland\n >>> Athens Greece Cairo Egypt\n >>> Athens Greece Canberra Australia\n >>> Athens Greece Hanoi Vietnam\n >>> Athens Greece Havana Cuba\n\n Get the tokenized analogy question data\n\n >>> words = tl.files.load_matt_mahoney_text8_dataset()\n >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)\n >>> analogy_questions = tl.nlp.read_analogies_file(eval_file='questions-words.txt', word2id=dictionary)\n >>> print(analogy_questions)\n [[ 3068 1248 7161 1581]\n [ 3068 1248 28683 5642]\n [ 3068 1248 3878 486]\n ...,\n [ 1216 4309 19982 25506]\n [ 1216 4309 3194 8650]\n [ 1216 4309 140 312]]", "id": "f11210:m7"} {"signature": "def build_vocab(data):", "body": "counter = collections.Counter(data)count_pairs = sorted(counter.items(), key=lambda x: (-x[], x[]))words, _ = list(zip(*count_pairs))word_to_id = dict(zip(words, range(len(words))))return word_to_id", "docstring": "Build vocabulary.\n\n Given the context in list format.\n Return the vocabulary, which is a dictionary for word to id.\n e.g. {'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 .... }\n\n Parameters\n ----------\n data : list of str\n The context in list format\n\n Returns\n --------\n dictionary\n that maps word to unique ID. e.g. {'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 .... }\n\n References\n ---------------\n - `tensorflow.models.rnn.ptb.reader `_\n\n Examples\n --------\n >>> data_path = os.getcwd() + '/simple-examples/data'\n >>> train_path = os.path.join(data_path, \"ptb.train.txt\")\n >>> word_to_id = build_vocab(read_txt_words(train_path))", "id": "f11210:m8"} {"signature": "def build_reverse_dictionary(word_to_id):", "body": "reverse_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))return reverse_dictionary", "docstring": "Given a dictionary that maps word to integer id.\n Returns a reverse dictionary that maps a id to word.\n\n Parameters\n ----------\n word_to_id : dictionary\n that maps word to ID.\n\n Returns\n --------\n dictionary\n A dictionary that maps IDs to words.", "id": "f11210:m9"} {"signature": "def build_words_dataset(words=None, vocabulary_size=, printable=True, unk_key=''):", "body": "if words is None:raise Exception(\"\")count = [[unk_key, -]]count.extend(collections.Counter(words).most_common(vocabulary_size - ))dictionary = dict()for word, _ in count:dictionary[word] = len(dictionary)data = list()unk_count = for word in words:if word in dictionary:index = dictionary[word]else:index = unk_count += data.append(index)count[][] = unk_countreverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))if printable:tl.logging.info('' % len(collections.Counter(words).keys()))tl.logging.info(''.format(vocabulary_size))if len(collections.Counter(words).keys()) < vocabulary_size:raise Exception(\"\")return data, count, dictionary, reverse_dictionary", "docstring": "Build the words dictionary and replace rare words with 'UNK' token.\n The most common word has the smallest integer id.\n\n Parameters\n ----------\n words : list of str or byte\n The context in list format. You may need to do preprocessing on the words, such as lower case, remove marks etc.\n vocabulary_size : int\n The maximum vocabulary size, limiting the vocabulary size. Then the script replaces rare words with 'UNK' token.\n printable : boolean\n Whether to print the read vocabulary size of the given words.\n unk_key : str\n Represent the unknown words.\n\n Returns\n --------\n data : list of int\n The context in a list of ID.\n count : list of tuple and list\n Pair words and IDs.\n - count[0] is a list : the number of rare words\n - count[1:] are tuples : the number of occurrence of each word\n - e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]\n dictionary : dictionary\n It is `word_to_id` that maps word to ID.\n reverse_dictionary : a dictionary\n It is `id_to_word` that maps ID to word.\n\n Examples\n --------\n >>> words = tl.files.load_matt_mahoney_text8_dataset()\n >>> vocabulary_size = 50000\n >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size)\n\n References\n -----------------\n - `tensorflow/examples/tutorials/word2vec/word2vec_basic.py `__", "id": "f11210:m10"} {"signature": "def words_to_word_ids(data=None, word_to_id=None, unk_key=''):", "body": "if data is None:raise Exception(\"\")if word_to_id is None:raise Exception(\"\")word_ids = []for word in data:if word_to_id.get(word) is not None:word_ids.append(word_to_id[word])else:word_ids.append(word_to_id[unk_key])return word_ids", "docstring": "Convert a list of string (words) to IDs.\n\n Parameters\n ----------\n data : list of string or byte\n The context in list format\n word_to_id : a dictionary\n that maps word to ID.\n unk_key : str\n Represent the unknown words.\n\n Returns\n --------\n list of int\n A list of IDs to represent the context.\n\n Examples\n --------\n >>> words = tl.files.load_matt_mahoney_text8_dataset()\n >>> vocabulary_size = 50000\n >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)\n >>> context = [b'hello', b'how', b'are', b'you']\n >>> ids = tl.nlp.words_to_word_ids(words, dictionary)\n >>> context = tl.nlp.word_ids_to_words(ids, reverse_dictionary)\n >>> print(ids)\n [6434, 311, 26, 207]\n >>> print(context)\n [b'hello', b'how', b'are', b'you']\n\n References\n ---------------\n - `tensorflow.models.rnn.ptb.reader `__", "id": "f11210:m11"} {"signature": "def word_ids_to_words(data, id_to_word):", "body": "return [id_to_word[i] for i in data]", "docstring": "Convert a list of integer to strings (words).\n\n Parameters\n ----------\n data : list of int\n The context in list format.\n id_to_word : dictionary\n a dictionary that maps ID to word.\n\n Returns\n --------\n list of str\n A list of string or byte to represent the context.\n\n Examples\n ---------\n >>> see ``tl.nlp.words_to_word_ids``", "id": "f11210:m12"} {"signature": "def save_vocab(count=None, name=''):", "body": "if count is None:count = []pwd = os.getcwd()vocabulary_size = len(count)with open(os.path.join(pwd, name), \"\") as f:for i in xrange(vocabulary_size):f.write(\"\" % (tf.compat.as_text(count[i][]), count[i][]))tl.logging.info(\"\" % (vocabulary_size, name, pwd))", "docstring": "Save the vocabulary to a file so the model can be reloaded.\n\n Parameters\n ----------\n count : a list of tuple and list\n count[0] is a list : the number of rare words,\n count[1:] are tuples : the number of occurrence of each word,\n e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]\n\n Examples\n ---------\n >>> words = tl.files.load_matt_mahoney_text8_dataset()\n >>> vocabulary_size = 50000\n >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)\n >>> tl.nlp.save_vocab(count, name='vocab_text8.txt')\n >>> vocab_text8.txt\n UNK 418391\n the 1061396\n of 593677\n and 416629\n one 411764\n in 372201\n a 325873\n to 316376", "id": "f11210:m13"} {"signature": "def basic_tokenizer(sentence, _WORD_SPLIT=re.compile(b\"\")):", "body": "words = []sentence = tf.compat.as_bytes(sentence)for space_separated_fragment in sentence.strip().split():words.extend(re.split(_WORD_SPLIT, space_separated_fragment))return [w for w in words if w]", "docstring": "Very basic tokenizer: split the sentence into a list of tokens.\n\n Parameters\n -----------\n sentence : tensorflow.python.platform.gfile.GFile Object\n _WORD_SPLIT : regular expression for word spliting.\n\n\n Examples\n --------\n >>> see create_vocabulary\n >>> from tensorflow.python.platform import gfile\n >>> train_path = \"wmt/giga-fren.release2\"\n >>> with gfile.GFile(train_path + \".en\", mode=\"rb\") as f:\n >>> for line in f:\n >>> tokens = tl.nlp.basic_tokenizer(line)\n >>> tl.logging.info(tokens)\n >>> exit()\n [b'Changing', b'Lives', b'|', b'Changing', b'Society', b'|', b'How',\n b'It', b'Works', b'|', b'Technology', b'Drives', b'Change', b'Home',\n b'|', b'Concepts', b'|', b'Teachers', b'|', b'Search', b'|', b'Overview',\n b'|', b'Credits', b'|', b'HHCC', b'Web', b'|', b'Reference', b'|',\n b'Feedback', b'Virtual', b'Museum', b'of', b'Canada', b'Home', b'Page']\n\n References\n ----------\n - Code from ``/tensorflow/models/rnn/translation/data_utils.py``", "id": "f11210:m14"} {"signature": "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size, tokenizer=None, normalize_digits=True,_DIGIT_RE=re.compile(br\"\"), _START_VOCAB=None):", "body": "if _START_VOCAB is None:_START_VOCAB = [b\"\", b\"\", b\"\", b\"\"]if not gfile.Exists(vocabulary_path):tl.logging.info(\"\" % (vocabulary_path, data_path))vocab = {}with gfile.GFile(data_path, mode=\"\") as f:counter = for line in f:counter += if counter % == :tl.logging.info(\"\" % counter)tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)for w in tokens:word = re.sub(_DIGIT_RE, b\"\", w) if normalize_digits else wif word in vocab:vocab[word] += else:vocab[word] = vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)if len(vocab_list) > max_vocabulary_size:vocab_list = vocab_list[:max_vocabulary_size]with gfile.GFile(vocabulary_path, mode=\"\") as vocab_file:for w in vocab_list:vocab_file.write(w + b\"\")else:tl.logging.info(\"\" % (vocabulary_path, data_path))", "docstring": "r\"\"\"Create vocabulary file (if it does not exist yet) from data file.\n\n Data file is assumed to contain one sentence per line. Each sentence is\n tokenized and digits are normalized (if normalize_digits is set).\n Vocabulary contains the most-frequent tokens up to max_vocabulary_size.\n We write it to vocabulary_path in a one-token-per-line format, so that later\n token in the first line gets id=0, second line gets id=1, and so on.\n\n Parameters\n -----------\n vocabulary_path : str\n Path where the vocabulary will be created.\n data_path : str\n Data file that will be used to create vocabulary.\n max_vocabulary_size : int\n Limit on the size of the created vocabulary.\n tokenizer : function\n A function to use to tokenize each data sentence. If None, basic_tokenizer will be used.\n normalize_digits : boolean\n If true, all digits are replaced by `0`.\n _DIGIT_RE : regular expression function\n Default is ``re.compile(br\"\\d\")``.\n _START_VOCAB : list of str\n The pad, go, eos and unk token, default is ``[b\"_PAD\", b\"_GO\", b\"_EOS\", b\"_UNK\"]``.\n\n References\n ----------\n - Code from ``/tensorflow/models/rnn/translation/data_utils.py``", "id": "f11210:m15"} {"signature": "def initialize_vocabulary(vocabulary_path):", "body": "if gfile.Exists(vocabulary_path):rev_vocab = []with gfile.GFile(vocabulary_path, mode=\"\") as f:rev_vocab.extend(f.readlines())rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])return vocab, rev_vocabelse:raise ValueError(\"\", vocabulary_path)", "docstring": "Initialize vocabulary from file, return the `word_to_id` (dictionary)\n and `id_to_word` (list).\n\n We assume the vocabulary is stored one-item-per-line, so a file will result in a vocabulary {\"dog\": 0, \"cat\": 1}, and this function will also return the reversed-vocabulary [\"dog\", \"cat\"].\n\n Parameters\n -----------\n vocabulary_path : str\n Path to the file containing the vocabulary.\n\n Returns\n --------\n vocab : dictionary\n a dictionary that maps word to ID.\n rev_vocab : list of int\n a list that maps ID to word.\n\n Examples\n ---------\n >>> Assume 'test' contains\n dog\n cat\n bird\n >>> vocab, rev_vocab = tl.nlp.initialize_vocabulary(\"test\")\n >>> print(vocab)\n >>> {b'cat': 1, b'dog': 0, b'bird': 2}\n >>> print(rev_vocab)\n >>> [b'dog', b'cat', b'bird']\n\n Raises\n -------\n ValueError : if the provided vocabulary_path does not exist.", "id": "f11210:m16"} {"signature": "def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=True, UNK_ID=, _DIGIT_RE=re.compile(br\"\")):", "body": "if tokenizer:words = tokenizer(sentence)else:words = basic_tokenizer(sentence)if not normalize_digits:return [vocabulary.get(w, UNK_ID) for w in words]return [vocabulary.get(re.sub(_DIGIT_RE, b\"\", w), UNK_ID) for w in words]", "docstring": "Convert a string to list of integers representing token-ids.\n\n For example, a sentence \"I have a dog\" may become tokenized into\n [\"I\", \"have\", \"a\", \"dog\"] and with vocabulary {\"I\": 1, \"have\": 2,\n \"a\": 4, \"dog\": 7\"} this function will return [1, 2, 4, 7].\n\n Parameters\n -----------\n sentence : tensorflow.python.platform.gfile.GFile Object\n The sentence in bytes format to convert to token-ids, see ``basic_tokenizer()`` and ``data_to_token_ids()``.\n vocabulary : dictionary\n Mmapping tokens to integers.\n tokenizer : function\n A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.\n normalize_digits : boolean\n If true, all digits are replaced by 0.\n\n Returns\n --------\n list of int\n The token-ids for the sentence.", "id": "f11210:m17"} {"signature": "def data_to_token_ids(data_path, target_path, vocabulary_path, tokenizer=None, normalize_digits=True, UNK_ID=,_DIGIT_RE=re.compile(br\"\")):", "body": "if not gfile.Exists(target_path):tl.logging.info(\"\" % data_path)vocab, _ = initialize_vocabulary(vocabulary_path)with gfile.GFile(data_path, mode=\"\") as data_file:with gfile.GFile(target_path, mode=\"\") as tokens_file:counter = for line in data_file:counter += if counter % == :tl.logging.info(\"\" % counter)token_ids = sentence_to_token_ids(line, vocab, tokenizer, normalize_digits, UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE)tokens_file.write(\"\".join([str(tok) for tok in token_ids]) + \"\")else:tl.logging.info(\"\" % target_path)", "docstring": "Tokenize data file and turn into token-ids using given vocabulary file.\n\n This function loads data line-by-line from data_path, calls the above\n sentence_to_token_ids, and saves the result to target_path. See comment\n for sentence_to_token_ids on the details of token-ids format.\n\n Parameters\n -----------\n data_path : str\n Path to the data file in one-sentence-per-line format.\n target_path : str\n Path where the file with token-ids will be created.\n vocabulary_path : str\n Path to the vocabulary file.\n tokenizer : function\n A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.\n normalize_digits : boolean\n If true, all digits are replaced by 0.\n\n References\n ----------\n - Code from ``/tensorflow/models/rnn/translation/data_utils.py``", "id": "f11210:m18"} {"signature": "def moses_multi_bleu(hypotheses, references, lowercase=False):", "body": "if np.size(hypotheses) == :return np.float32()try:multi_bleu_path, _ = urllib.request.urlretrieve(\"\"\"\")os.chmod(multi_bleu_path, )except Exception: tl.logging.info(\"\")metrics_dir = os.path.dirname(os.path.realpath(__file__))bin_dir = os.path.abspath(os.path.join(metrics_dir, \"\", \"\", \"\"))multi_bleu_path = os.path.join(bin_dir, \"\")hypothesis_file = tempfile.NamedTemporaryFile()hypothesis_file.write(\"\".join(hypotheses).encode(\"\"))hypothesis_file.write(b\"\")hypothesis_file.flush()reference_file = tempfile.NamedTemporaryFile()reference_file.write(\"\".join(references).encode(\"\"))reference_file.write(b\"\")reference_file.flush()with open(hypothesis_file.name, \"\") as read_pred:bleu_cmd = [multi_bleu_path]if lowercase:bleu_cmd += [\"\"]bleu_cmd += [reference_file.name]try:bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT)bleu_out = bleu_out.decode(\"\")bleu_score = re.search(r\"\", bleu_out).group()bleu_score = float(bleu_score)except subprocess.CalledProcessError as error:if error.output is not None:tl.logging.warning(\"\")tl.logging.warning(error.output)bleu_score = np.float32()hypothesis_file.close()reference_file.close()return np.float32(bleu_score)", "docstring": "Calculate the bleu score for hypotheses and references\n using the MOSES ulti-bleu.perl script.\n\n Parameters\n ------------\n hypotheses : numpy.array.string\n A numpy array of strings where each string is a single example.\n references : numpy.array.string\n A numpy array of strings where each string is a single example.\n lowercase : boolean\n If True, pass the \"-lc\" flag to the multi-bleu script\n\n Examples\n ---------\n >>> hypotheses = [\"a bird is flying on the sky\"]\n >>> references = [\"two birds are flying on the sky\", \"a bird is on the top of the tree\", \"an airplane is on the sky\",]\n >>> score = tl.nlp.moses_multi_bleu(hypotheses, references)\n\n Returns\n --------\n float\n The BLEU score\n\n References\n ----------\n - `Google/seq2seq/metric/bleu `__", "id": "f11210:m19"} {"signature": "def __init__(self, vocab, unk_id):", "body": "self._vocab = vocabself._unk_id = unk_id", "docstring": "Initialize the vocabulary.", "id": "f11210:c0:m0"} {"signature": "def word_to_id(self, word):", "body": "if word in self._vocab:return self._vocab[word]else:return self._unk_id", "docstring": "Returns the integer id of a word string.", "id": "f11210:c0:m1"} {"signature": "def word_to_id(self, word):", "body": "if word in self.vocab:return self.vocab[word]else:return self.unk_id", "docstring": "Returns the integer word id of a word string.", "id": "f11210:c1:m1"} {"signature": "def id_to_word(self, word_id):", "body": "if word_id >= len(self.reverse_vocab):return self.reverse_vocab[self.unk_id]else:return self.reverse_vocab[word_id]", "docstring": "Returns the word string of an integer word id.", "id": "f11210:c1:m2"} {"signature": "def private_method(func):", "body": "def func_wrapper(*args, **kwargs):\"\"\"\"\"\"outer_frame = inspect.stack()[][]if '' not in outer_frame.f_locals or outer_frame.f_locals[''] is not args[]:raise RuntimeError('' % (args[].__class__.__name__, func.__name__))return func(*args, **kwargs)return func_wrapper", "docstring": "Decorator for making an instance method private.", "id": "f11212:m0"} {"signature": "def protected_method(func):", "body": "def func_wrapper(*args, **kwargs):\"\"\"\"\"\"outer_frame = inspect.stack()[][]caller = inspect.getmro(outer_frame.f_locals[''].__class__)[:-]target = inspect.getmro(args[].__class__)[:-]share_subsclass = Falsefor cls_ in target:if issubclass(caller[], cls_) or caller[] is cls_:share_subsclass = Truebreakif ('' not in outer_frame.f_locals orouter_frame.f_locals[''] is not args[]) and (not share_subsclass):raise RuntimeError('' % (args[].__class__.__name__, func.__name__))return func(*args, **kwargs)return func_wrapper", "docstring": "Decorator for making an instance method private.", "id": "f11212:m1"} {"signature": "def _add_deprecated_function_notice_to_docstring(doc, date, instructions):", "body": "if instructions:deprecation_message = \"\"\"\"\"\" % (('' if date is None else ('' % date)), instructions)else:deprecation_message = \"\"\"\"\"\" % (('' if date is None else ('' % date)))main_text = [deprecation_message]return _add_notice_to_docstring(doc, '', main_text)", "docstring": "Adds a deprecation notice to a docstring for deprecated functions.", "id": "f11215:m3"} {"signature": "def _add_notice_to_docstring(doc, no_doc_str, notice):", "body": "if not doc:lines = [no_doc_str]else:lines = _normalize_docstring(doc).splitlines()notice = [''] + noticeif len(lines) > :if lines[].strip():notice.append('')lines[:] = noticeelse:lines += noticereturn ''.join(lines)", "docstring": "Adds a deprecation notice to a docstring.", "id": "f11215:m4"} {"signature": "def _normalize_docstring(docstring):", "body": "if not docstring:return ''lines = docstring.expandtabs().splitlines()indent = sys.maxsizefor line in lines[:]:stripped = line.lstrip()if stripped:indent = min(indent, len(line) - len(stripped))trimmed = [lines[].strip()]if indent < sys.maxsize:for line in lines[:]:trimmed.append(line[indent:].rstrip())while trimmed and not trimmed[-]:trimmed.pop()while trimmed and not trimmed[]:trimmed.pop()return ''.join(trimmed)", "docstring": "Normalizes the docstring.\n\n Replaces tabs with spaces, removes leading and trailing blanks lines, and\n removes any indentation.\n\n Copied from PEP-257:\n https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation\n\n Args:\n docstring: the docstring to normalize\n\n Returns:\n The normalized docstring", "id": "f11215:m5"} {"signature": "def minibatches(inputs=None, targets=None, batch_size=None, allow_dynamic_batch_size=False, shuffle=False):", "body": "if len(inputs) != len(targets):raise AssertionError(\"\")if shuffle:indices = np.arange(len(inputs))np.random.shuffle(indices)for start_idx in range(, len(inputs), batch_size):end_idx = start_idx + batch_sizeif end_idx > len(inputs):if allow_dynamic_batch_size:end_idx = len(inputs)else:breakif shuffle:excerpt = indices[start_idx:end_idx]else:excerpt = slice(start_idx, end_idx)if (isinstance(inputs, list) or isinstance(targets, list)) and (shuffle ==True):yield [inputs[i] for i in excerpt], [targets[i] for i in excerpt]else:yield inputs[excerpt], targets[excerpt]", "docstring": "Generate a generator that input a group of example in numpy.array and\n their labels, return the examples and labels by the given batch size.\n\n Parameters\n ----------\n inputs : numpy.array\n The input features, every row is a example.\n targets : numpy.array\n The labels of inputs, every row is a example.\n batch_size : int\n The batch size.\n allow_dynamic_batch_size: boolean\n Allow the use of the last data batch in case the number of examples is not a multiple of batch_size, this may result in unexpected behaviour if other functions expect a fixed-sized batch-size.\n shuffle : boolean\n Indicating whether to use a shuffling queue, shuffle the dataset before return.\n\n Examples\n --------\n >>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])\n >>> y = np.asarray([0,1,2,3,4,5])\n >>> for batch in tl.iterate.minibatches(inputs=X, targets=y, batch_size=2, shuffle=False):\n >>> print(batch)\n (array([['a', 'a'], ['b', 'b']], dtype='):", "body": "if len(inputs) != len(targets):raise AssertionError(\"\")n_loads = (batch_size * stride) + (seq_length - stride)for start_idx in range(, len(inputs) - n_loads + , (batch_size * stride)):seq_inputs = np.zeros((batch_size, seq_length) + inputs.shape[:], dtype=inputs.dtype)seq_targets = np.zeros((batch_size, seq_length) + targets.shape[:], dtype=targets.dtype)for b_idx in xrange(batch_size):start_seq_idx = start_idx + (b_idx * stride)end_seq_idx = start_seq_idx + seq_lengthseq_inputs[b_idx] = inputs[start_seq_idx:end_seq_idx]seq_targets[b_idx] = targets[start_seq_idx:end_seq_idx]flatten_inputs = seq_inputs.reshape((-, ) + inputs.shape[:])flatten_targets = seq_targets.reshape((-, ) + targets.shape[:])yield flatten_inputs, flatten_targets", "docstring": "Generate a generator that return a batch of sequence inputs and targets.\n If `batch_size=100` and `seq_length=5`, one return will have 500 rows (examples).\n\n Parameters\n ----------\n inputs : numpy.array\n The input features, every row is a example.\n targets : numpy.array\n The labels of inputs, every element is a example.\n batch_size : int\n The batch size.\n seq_length : int\n The sequence length.\n stride : int\n The stride step, default is 1.\n\n Examples\n --------\n Synced sequence input and output.\n\n >>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])\n >>> y = np.asarray([0, 1, 2, 3, 4, 5])\n >>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=y, batch_size=2, seq_length=2, stride=1):\n >>> print(batch)\n (array([['a', 'a'], ['b', 'b'], ['b', 'b'], ['c', 'c']], dtype='>> return_last = True\n >>> num_steps = 2\n >>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])\n >>> Y = np.asarray([0,1,2,3,4,5])\n >>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=Y, batch_size=2, seq_length=num_steps, stride=1):\n >>> x, y = batch\n >>> if return_last:\n >>> tmp_y = y.reshape((-1, num_steps) + y.shape[1:])\n >>> y = tmp_y[:, -1]\n >>> print(x, y)\n [['a' 'a']\n ['b' 'b']\n ['b' 'b']\n ['c' 'c']] [1 2]\n [['c' 'c']\n ['d' 'd']\n ['d' 'd']\n ['e' 'e']] [3 4]", "id": "f11216:m1"} {"signature": "def seq_minibatches2(inputs, targets, batch_size, num_steps):", "body": "if len(inputs) != len(targets):raise AssertionError(\"\")data_len = len(inputs)batch_len = data_len // batch_sizedata = np.zeros((batch_size, batch_len) + inputs.shape[:], dtype=inputs.dtype)data2 = np.zeros([batch_size, batch_len])for i in range(batch_size):data[i] = inputs[batch_len * i:batch_len * (i + )]data2[i] = targets[batch_len * i:batch_len * (i + )]epoch_size = (batch_len - ) // num_stepsif epoch_size == :raise ValueError(\"\")for i in range(epoch_size):x = data[:, i * num_steps:(i + ) * num_steps]x2 = data2[:, i * num_steps:(i + ) * num_steps]yield (x, x2)", "docstring": "Generate a generator that iterates on two list of words. Yields (Returns) the source contexts and\n the target context by the given batch_size and num_steps (sequence_length).\n In TensorFlow's tutorial, this generates the `batch_size` pointers into the raw PTB data, and allows minibatch iteration along these pointers.\n\n Parameters\n ----------\n inputs : list of data\n The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.\n targets : list of data\n The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.\n batch_size : int\n The batch size.\n num_steps : int\n The number of unrolls. i.e. sequence length\n\n Yields\n ------\n Pairs of the batched data, each a matrix of shape [batch_size, num_steps].\n\n Raises\n ------\n ValueError : if batch_size or num_steps are too high.\n\n Examples\n --------\n >>> X = [i for i in range(20)]\n >>> Y = [i for i in range(20,40)]\n >>> for batch in tl.iterate.seq_minibatches2(X, Y, batch_size=2, num_steps=3):\n ... x, y = batch\n ... print(x, y)\n\n [[ 0. 1. 2.]\n [ 10. 11. 12.]]\n [[ 20. 21. 22.]\n [ 30. 31. 32.]]\n\n [[ 3. 4. 5.]\n [ 13. 14. 15.]]\n [[ 23. 24. 25.]\n [ 33. 34. 35.]]\n\n [[ 6. 7. 8.]\n [ 16. 17. 18.]]\n [[ 26. 27. 28.]\n [ 36. 37. 38.]]\n\n Notes\n -----\n - Hint, if the input data are images, you can modify the source code `data = np.zeros([batch_size, batch_len)` to `data = np.zeros([batch_size, batch_len, inputs.shape[1], inputs.shape[2], inputs.shape[3]])`.", "id": "f11216:m2"} {"signature": "def ptb_iterator(raw_data, batch_size, num_steps):", "body": "raw_data = np.array(raw_data, dtype=np.int32)data_len = len(raw_data)batch_len = data_len // batch_sizedata = np.zeros([batch_size, batch_len], dtype=np.int32)for i in range(batch_size):data[i] = raw_data[batch_len * i:batch_len * (i + )]epoch_size = (batch_len - ) // num_stepsif epoch_size == :raise ValueError(\"\")for i in range(epoch_size):x = data[:, i * num_steps:(i + ) * num_steps]y = data[:, i * num_steps + :(i + ) * num_steps + ]yield (x, y)", "docstring": "Generate a generator that iterates on a list of words, see `PTB example `__.\n Yields the source contexts and the target context by the given batch_size and num_steps (sequence_length).\n\n In TensorFlow's tutorial, this generates `batch_size` pointers into the raw\n PTB data, and allows minibatch iteration along these pointers.\n\n Parameters\n ----------\n raw_data : a list\n the context in list format; note that context usually be\n represented by splitting by space, and then convert to unique\n word IDs.\n batch_size : int\n the batch size.\n num_steps : int\n the number of unrolls. i.e. sequence_length\n\n Yields\n ------\n Pairs of the batched data, each a matrix of shape [batch_size, num_steps].\n The second element of the tuple is the same data time-shifted to the\n right by one.\n\n Raises\n ------\n ValueError : if batch_size or num_steps are too high.\n\n Examples\n --------\n >>> train_data = [i for i in range(20)]\n >>> for batch in tl.iterate.ptb_iterator(train_data, batch_size=2, num_steps=3):\n >>> x, y = batch\n >>> print(x, y)\n [[ 0 1 2] <---x 1st subset/ iteration\n [10 11 12]]\n [[ 1 2 3] <---y\n [11 12 13]]\n\n [[ 3 4 5] <--- 1st batch input 2nd subset/ iteration\n [13 14 15]] <--- 2nd batch input\n [[ 4 5 6] <--- 1st batch target\n [14 15 16]] <--- 2nd batch target\n\n [[ 6 7 8] 3rd subset/ iteration\n [16 17 18]]\n [[ 7 8 9]\n [17 18 19]]", "id": "f11216:m3"} {"signature": "def alphas(shape, alpha_value, name=None):", "body": "with ops.name_scope(name, \"\", [shape]) as name:alpha_tensor = convert_to_tensor(alpha_value)alpha_dtype = dtypes.as_dtype(alpha_tensor.dtype).base_dtypeif not isinstance(shape, ops.Tensor):try:shape = constant_op._tensor_shape_tensor_conversion_function(tensor_shape.TensorShape(shape))except (TypeError, ValueError):shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)if not shape._shape_tuple():shape = reshape(shape, [-]) try:output = constant(alpha_value, shape=shape, dtype=alpha_dtype, name=name)except (TypeError, ValueError):output = fill(shape, constant(alpha_value, dtype=alpha_dtype), name=name)if output.dtype.base_dtype != alpha_dtype:raise AssertionError(\"\" % (output.dtype.base_dtype, alpha_dtype))return output", "docstring": "Creates a tensor with all elements set to `alpha_value`.\n This operation returns a tensor of type `dtype` with shape `shape` and all\n elements set to alpha.\n\n Parameters\n ----------\n shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`.\n The shape of the desired tensor\n alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`\n The value used to fill the resulting `Tensor`.\n name: str\n A name for the operation (optional).\n\n Returns\n -------\n A `Tensor` with all elements set to alpha.\n\n Examples\n --------\n >>> tl.alphas([2, 3], tf.int32) # [[alpha, alpha, alpha], [alpha, alpha, alpha]]", "id": "f11217:m0"} {"signature": "def alphas_like(tensor, alpha_value, name=None, optimize=True):", "body": "with ops.name_scope(name, \"\", [tensor]) as name:tensor = ops.convert_to_tensor(tensor, name=\"\")if context.in_eager_mode(): ret = alphas(shape_internal(tensor, optimize=optimize), alpha_value=alpha_value, name=name)else: if (optimize and tensor.shape.is_fully_defined()):ret = alphas(tensor.shape, alpha_value=alpha_value, name=name)else:ret = alphas(shape_internal(tensor, optimize=optimize), alpha_value=alpha_value, name=name)ret.set_shape(tensor.get_shape())return ret", "docstring": "Creates a tensor with all elements set to `alpha_value`.\n Given a single tensor (`tensor`), this operation returns a tensor of the same\n type and shape as `tensor` with all elements set to `alpha_value`.\n\n Parameters\n ----------\n tensor: tf.Tensor\n The Tensorflow Tensor that will be used as a template.\n alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`\n The value used to fill the resulting `Tensor`.\n name: str\n A name for the operation (optional).\n optimize: bool\n if true, attempt to statically determine the shape of 'tensor' and encode it as a constant.\n\n Returns\n -------\n A `Tensor` with all elements set to `alpha_value`.\n\n Examples\n --------\n >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tl.alphas_like(tensor, 0.5) # [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]", "id": "f11217:m1"} {"signature": "def cross_entropy(output, target, name=None):", "body": "if name is None:raise Exception(\"\")return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output), name=name)", "docstring": "Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions,\n it implements softmax internally. See ``tf.nn.sparse_softmax_cross_entropy_with_logits``.\n\n Parameters\n ----------\n output : Tensor\n A batch of distribution with shape: [batch_size, num of classes].\n target : Tensor\n A batch of index with shape: [batch_size, ].\n name : string\n Name of this loss.\n\n Examples\n --------\n >>> ce = tl.cost.cross_entropy(y_logits, y_target_logits, 'my_loss')\n\n References\n -----------\n - About cross-entropy: ``__.\n - The code is borrowed from: ``__.", "id": "f11218:m0"} {"signature": "def sigmoid_cross_entropy(output, target, name=None):", "body": "return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output), name=name)", "docstring": "Sigmoid cross-entropy operation, see ``tf.nn.sigmoid_cross_entropy_with_logits``.\n\n Parameters\n ----------\n output : Tensor\n A batch of distribution with shape: [batch_size, num of classes].\n target : Tensor\n A batch of index with shape: [batch_size, ].\n name : string\n Name of this loss.", "id": "f11218:m1"} {"signature": "def binary_cross_entropy(output, target, epsilon=, name=''):", "body": "return tf.reduce_mean(tf.reduce_sum(-(target * tf.log(output + epsilon) + ( - target) * tf.log( - output + epsilon)), axis=),name=name)", "docstring": "Binary cross entropy operation.\n\n Parameters\n ----------\n output : Tensor\n Tensor with type of `float32` or `float64`.\n target : Tensor\n The target distribution, format the same with `output`.\n epsilon : float\n A small value to avoid output to be zero.\n name : str\n An optional name to attach to this function.\n\n References\n -----------\n - `ericjang-DRAW `__", "id": "f11218:m2"} {"signature": "def mean_squared_error(output, target, is_mean=False, name=\"\"):", "body": "if output.get_shape().ndims == : if is_mean:mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), ), name=name)else:mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), ), name=name)elif output.get_shape().ndims == : if is_mean:mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [, ]), name=name)else:mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [, ]), name=name)elif output.get_shape().ndims == : if is_mean:mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [, , ]), name=name)else:mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [, , ]), name=name)else:raise Exception(\"\")return mse", "docstring": "Return the TensorFlow expression of mean-square-error (L2) of two batch of data.\n\n Parameters\n ----------\n output : Tensor\n 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].\n target : Tensor\n The target distribution, format the same with `output`.\n is_mean : boolean\n Whether compute the mean or sum for each example.\n - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.\n - If False, use ``tf.reduce_sum`` (default).\n name : str\n An optional name to attach to this function.\n\n References\n ------------\n - `Wiki Mean Squared Error `__", "id": "f11218:m3"} {"signature": "def normalized_mean_square_error(output, target, name=\"\"):", "body": "if output.get_shape().ndims == : nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=))nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=))elif output.get_shape().ndims == : nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[, ]))nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[, ]))elif output.get_shape().ndims == : nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[, , ]))nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[, , ]))nmse = tf.reduce_mean(nmse_a / nmse_b, name=name)return nmse", "docstring": "Return the TensorFlow expression of normalized mean-square-error of two distributions.\n\n Parameters\n ----------\n output : Tensor\n 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].\n target : Tensor\n The target distribution, format the same with `output`.\n name : str\n An optional name to attach to this function.", "id": "f11218:m4"} {"signature": "def absolute_difference_error(output, target, is_mean=False, name=\"\"):", "body": "if output.get_shape().ndims == : if is_mean:loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), ), name=name)else:loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), ), name=name)elif output.get_shape().ndims == : if is_mean:loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), [, ]), name=name)else:loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), [, ]), name=name)elif output.get_shape().ndims == : if is_mean:loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), [, , ]), name=name)else:loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), [, , ]), name=name)else:raise Exception(\"\")return loss", "docstring": "Return the TensorFlow expression of absolute difference error (L1) of two batch of data.\n\n Parameters\n ----------\n output : Tensor\n 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].\n target : Tensor\n The target distribution, format the same with `output`.\n is_mean : boolean\n Whether compute the mean or sum for each example.\n - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data.\n - If False, use ``tf.reduce_sum`` (default).\n name : str\n An optional name to attach to this function.", "id": "f11218:m5"} {"signature": "def dice_coe(output, target, loss_type='', axis=(, , ), smooth=):", "body": "inse = tf.reduce_sum(output * target, axis=axis)if loss_type == '':l = tf.reduce_sum(output * output, axis=axis)r = tf.reduce_sum(target * target, axis=axis)elif loss_type == '':l = tf.reduce_sum(output, axis=axis)r = tf.reduce_sum(target, axis=axis)else:raise Exception(\"\")dice = ( * inse + smooth) / (l + r + smooth)dice = tf.reduce_mean(dice, name='')return dice", "docstring": "Soft dice (S\u00f8rensen or Jaccard) coefficient for comparing the similarity\n of two batch of data, usually be used for binary image segmentation\n i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match.\n\n Parameters\n -----------\n output : Tensor\n A distribution with shape: [batch_size, ....], (any dimensions).\n target : Tensor\n The target distribution, format the same with `output`.\n loss_type : str\n ``jaccard`` or ``sorensen``, default is ``jaccard``.\n axis : tuple of int\n All dimensions are reduced, default ``[1,2,3]``.\n smooth : float\n This small value will be added to the numerator and denominator.\n - If both output and target are empty, it makes sure dice is 1.\n - If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice.\n\n Examples\n ---------\n >>> outputs = tl.act.pixel_wise_softmax(network.outputs)\n >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_)\n\n References\n -----------\n - `Wiki-Dice `__", "id": "f11218:m6"} {"signature": "def dice_hard_coe(output, target, threshold=, axis=(, , ), smooth=):", "body": "output = tf.cast(output > threshold, dtype=tf.float32)target = tf.cast(target > threshold, dtype=tf.float32)inse = tf.reduce_sum(tf.multiply(output, target), axis=axis)l = tf.reduce_sum(output, axis=axis)r = tf.reduce_sum(target, axis=axis)hard_dice = ( * inse + smooth) / (l + r + smooth)hard_dice = tf.reduce_mean(hard_dice, name='')return hard_dice", "docstring": "Non-differentiable S\u00f8rensen\u2013Dice coefficient for comparing the similarity\n of two batch of data, usually be used for binary image segmentation i.e. labels are binary.\n The coefficient between 0 to 1, 1 if totally match.\n\n Parameters\n -----------\n output : tensor\n A distribution with shape: [batch_size, ....], (any dimensions).\n target : tensor\n The target distribution, format the same with `output`.\n threshold : float\n The threshold value to be true.\n axis : tuple of integer\n All dimensions are reduced, default ``(1,2,3)``.\n smooth : float\n This small value will be added to the numerator and denominator, see ``dice_coe``.\n\n References\n -----------\n - `Wiki-Dice `__", "id": "f11218:m7"} {"signature": "def iou_coe(output, target, threshold=, axis=(, , ), smooth=):", "body": "pre = tf.cast(output > threshold, dtype=tf.float32)truth = tf.cast(target > threshold, dtype=tf.float32)inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= , dtype=tf.float32), axis=axis) batch_iou = (inse + smooth) / (union + smooth)iou = tf.reduce_mean(batch_iou, name='')return iou", "docstring": "Non-differentiable Intersection over Union (IoU) for comparing the\n similarity of two batch of data, usually be used for evaluating binary image segmentation.\n The coefficient between 0 to 1, and 1 means totally match.\n\n Parameters\n -----------\n output : tensor\n A batch of distribution with shape: [batch_size, ....], (any dimensions).\n target : tensor\n The target distribution, format the same with `output`.\n threshold : float\n The threshold value to be true.\n axis : tuple of integer\n All dimensions are reduced, default ``(1,2,3)``.\n smooth : float\n This small value will be added to the numerator and denominator, see ``dice_coe``.\n\n Notes\n ------\n - IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.", "id": "f11218:m8"} {"signature": "def cross_entropy_seq(logits, target_seqs, batch_size=None): ", "body": "sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_exampleloss = sequence_loss_by_example_fn([logits], [tf.reshape(target_seqs, [-])], [tf.ones_like(tf.reshape(target_seqs, [-]), dtype=tf.float32)])cost = tf.reduce_sum(loss) if batch_size is not None:cost = cost / batch_sizereturn cost", "docstring": "Returns the expression of cross-entropy of two sequences, implement\n softmax internally. Normally be used for fixed length RNN outputs, see `PTB example `__.\n\n Parameters\n ----------\n logits : Tensor\n 2D tensor with shape of `[batch_size * n_steps, n_classes]`.\n target_seqs : Tensor\n The target sequence, 2D tensor `[batch_size, n_steps]`, if the number of step is dynamic, please use ``tl.cost.cross_entropy_seq_with_mask`` instead.\n batch_size : None or int.\n Whether to divide the cost by batch size.\n - If integer, the return cost will be divided by `batch_size`.\n - If None (default), the return cost will not be divided by anything.\n\n Examples\n --------\n >>> see `PTB example `__.for more details\n >>> input_data = tf.placeholder(tf.int32, [batch_size, n_steps])\n >>> targets = tf.placeholder(tf.int32, [batch_size, n_steps])\n >>> # build the network\n >>> print(net.outputs)\n (batch_size * n_steps, n_classes)\n >>> cost = tl.cost.cross_entropy_seq(network.outputs, targets)", "id": "f11218:m9"} {"signature": "def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):", "body": "targets = tf.reshape(target_seqs, [-]) weights = tf.to_float(tf.reshape(input_mask, [-])) losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weightsloss = tf.divide(tf.reduce_sum(losses), tf.reduce_sum(weights),name=\"\")if return_details:return loss, losses, weights, targetselse:return loss", "docstring": "Returns the expression of cross-entropy of two sequences, implement\n softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output.\n\n Parameters\n -----------\n logits : Tensor\n 2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example.\n - Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`.\n target_seqs : Tensor\n int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example.\n input_mask : Tensor\n The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1.\n return_details : boolean\n Whether to return detailed losses.\n - If False (default), only returns the loss.\n - If True, returns the loss, losses, weights and targets (see source code).\n\n Examples\n --------\n >>> batch_size = 64\n >>> vocab_size = 10000\n >>> embedding_size = 256\n >>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name=\"input\")\n >>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name=\"target\")\n >>> input_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name=\"mask\")\n >>> net = tl.layers.EmbeddingInputlayer(\n ... inputs = input_seqs,\n ... vocabulary_size = vocab_size,\n ... embedding_size = embedding_size,\n ... name = 'seq_embedding')\n >>> net = tl.layers.DynamicRNNLayer(net,\n ... cell_fn = tf.contrib.rnn.BasicLSTMCell,\n ... n_hidden = embedding_size,\n ... dropout = (0.7 if is_train else None),\n ... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs),\n ... return_seq_2d = True,\n ... name = 'dynamicrnn')\n >>> print(net.outputs)\n (?, 256)\n >>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name=\"output\")\n >>> print(net.outputs)\n (?, 10000)\n >>> loss = tl.cost.cross_entropy_seq_with_mask(net.outputs, target_seqs, input_mask)", "id": "f11218:m10"} {"signature": "def cosine_similarity(v1, v2):", "body": "return tf.reduce_sum(tf.multiply(v1, v2), ) /(tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), )) *tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), )))", "docstring": "Cosine similarity [-1, 1].\n\n Parameters\n ----------\n v1, v2 : Tensor\n Tensor with the same shape [batch_size, n_feature].\n\n References\n ----------\n - `Wiki `__.", "id": "f11218:m11"} {"signature": "def li_regularizer(scale, scope=None):", "body": "if isinstance(scale, numbers.Integral):raise ValueError('' % scale)if isinstance(scale, numbers.Real):if scale < :raise ValueError('' % scale)if scale >= :raise ValueError('' % scale)if scale == :tl.logging.info('')return lambda _, name=None: Nonedef li(weights):\"\"\"\"\"\"with tf.name_scope('') as scope:my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='')standard_ops_fn = standard_ops.multiplyreturn standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), ))),name=scope)return li", "docstring": "Li regularization removes the neurons of previous layer. The `i` represents `inputs`.\n Returns a function that can be used to apply group li regularization to weights.\n The implementation follows `TensorFlow contrib `__.\n\n Parameters\n ----------\n scale : float\n A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n scope: str\n An optional scope name for this function.\n\n Returns\n --------\n A function with signature `li(weights, name=None)` that apply Li regularization.\n\n Raises\n ------\n ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float.", "id": "f11218:m12"} {"signature": "def lo_regularizer(scale):", "body": "if isinstance(scale, numbers.Integral):raise ValueError('' % scale)if isinstance(scale, numbers.Real):if scale < :raise ValueError('' % scale)if scale >= :raise ValueError('' % scale)if scale == :tl.logging.info('')return lambda _, name=None: Nonedef lo(weights, name=''):\"\"\"\"\"\"with tf.name_scope(name) as scope:my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='')standard_ops_fn = standard_ops.multiplyreturn standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), ))),name=scope)return lo", "docstring": "Lo regularization removes the neurons of current layer. The `o` represents `outputs`\n Returns a function that can be used to apply group lo regularization to weights.\n The implementation follows `TensorFlow contrib `__.\n\n Parameters\n ----------\n scale : float\n A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n\n Returns\n -------\n A function with signature `lo(weights, name=None)` that apply Lo regularization.\n\n Raises\n ------\n ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.", "id": "f11218:m13"} {"signature": "def maxnorm_regularizer(scale=):", "body": "if isinstance(scale, numbers.Integral):raise ValueError('' % scale)if isinstance(scale, numbers.Real):if scale < :raise ValueError('' % scale)if scale == :tl.logging.info('')return lambda _, name=None: Nonedef mn(weights, name=''):\"\"\"\"\"\"with tf.name_scope(name) as scope:my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='')standard_ops_fn = standard_ops.multiplyreturn standard_ops_fn(my_scale, standard_ops.reduce_max(standard_ops.abs(weights)), name=scope)return mn", "docstring": "Max-norm regularization returns a function that can be used to apply max-norm regularization to weights.\n\n More about max-norm, see `wiki-max norm `_.\n The implementation follows `TensorFlow contrib `__.\n\n Parameters\n ----------\n scale : float\n A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n\n Returns\n ---------\n A function with signature `mn(weights, name=None)` that apply Lo regularization.\n\n Raises\n --------\n ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.", "id": "f11218:m14"} {"signature": "def maxnorm_o_regularizer(scale):", "body": "if isinstance(scale, numbers.Integral):raise ValueError('' % scale)if isinstance(scale, numbers.Real):if scale < :raise ValueError('' % scale)if scale == :tl.logging.info('')return lambda _, name=None: Nonedef mn_o(weights, name=''):\"\"\"\"\"\"with tf.name_scope(name) as scope:my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='')if tf.__version__ <= '':standard_ops_fn = standard_ops.mulelse:standard_ops_fn = standard_ops.multiplyreturn standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), )), name=scope)return mn_o", "docstring": "Max-norm output regularization removes the neurons of current layer.\n Returns a function that can be used to apply max-norm regularization to each column of weight matrix.\n The implementation follows `TensorFlow contrib `__.\n\n Parameters\n ----------\n scale : float\n A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n\n Returns\n ---------\n A function with signature `mn_o(weights, name=None)` that apply Lo regularization.\n\n Raises\n ---------\n ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.", "id": "f11218:m15"} {"signature": "def maxnorm_i_regularizer(scale):", "body": "if isinstance(scale, numbers.Integral):raise ValueError('' % scale)if isinstance(scale, numbers.Real):if scale < :raise ValueError('' % scale)if scale == :tl.logging.info('')return lambda _, name=None: Nonedef mn_i(weights, name=''):\"\"\"\"\"\"with tf.name_scope(name) as scope:my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='')if tf.__version__ <= '':standard_ops_fn = standard_ops.mulelse:standard_ops_fn = standard_ops.multiplyreturn standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), )), name=scope)return mn_i", "docstring": "Max-norm input regularization removes the neurons of previous layer.\n Returns a function that can be used to apply max-norm regularization to each row of weight matrix.\n The implementation follows `TensorFlow contrib `__.\n\n Parameters\n ----------\n scale : float\n A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n\n Returns\n ---------\n A function with signature `mn_i(weights, name=None)` that apply Lo regularization.\n\n Raises\n ---------\n ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.", "id": "f11218:m16"} {"signature": "def fit(sess, network, train_op, cost, X_train, y_train, x, y_, acc=None, batch_size=, n_epoch=, print_freq=,X_val=None, y_val=None, eval_train=True, tensorboard_dir=None, tensorboard_epoch_freq=,tensorboard_weight_histograms=True, tensorboard_graph_vis=True):", "body": "if X_train.shape[] < batch_size:raise AssertionError(\"\")if tensorboard_dir is not None:tl.logging.info(\"\")tl.files.exists_or_mkdir(tensorboard_dir)if hasattr(tf, '') and hasattr(tf.summary, ''):if tensorboard_graph_vis:train_writer = tf.summary.FileWriter(tensorboard_dir + '', sess.graph)val_writer = tf.summary.FileWriter(tensorboard_dir + '', sess.graph)else:train_writer = tf.summary.FileWriter(tensorboard_dir + '')val_writer = tf.summary.FileWriter(tensorboard_dir + '')if (tensorboard_weight_histograms):for param in network.all_params:if hasattr(tf, '') and hasattr(tf.summary, ''):tl.logging.info('' % param.name)tf.summary.histogram(param.name, param)if hasattr(tf, '') and hasattr(tf.summary, ''):tf.summary.scalar('', cost)merged = tf.summary.merge_all()tl.layers.initialize_global_variables(sess)tl.logging.info(\"\" % tensorboard_dir)tl.logging.info(\"\")start_time_begin = time.time()tensorboard_train_index, tensorboard_val_index = , for epoch in range(n_epoch):start_time = time.time()loss_ep = n_step = for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):feed_dict = {x: X_train_a, y_: y_train_a}feed_dict.update(network.all_drop) loss, _ = sess.run([cost, train_op], feed_dict=feed_dict)loss_ep += lossn_step += loss_ep = loss_ep / n_stepif tensorboard_dir is not None and hasattr(tf, ''):if epoch + == or (epoch + ) % tensorboard_epoch_freq == :for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):dp_dict = dict_to_one(network.all_drop) feed_dict = {x: X_train_a, y_: y_train_a}feed_dict.update(dp_dict)result = sess.run(merged, feed_dict=feed_dict)train_writer.add_summary(result, tensorboard_train_index)tensorboard_train_index += if (X_val is not None) and (y_val is not None):for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True):dp_dict = dict_to_one(network.all_drop) feed_dict = {x: X_val_a, y_: y_val_a}feed_dict.update(dp_dict)result = sess.run(merged, feed_dict=feed_dict)val_writer.add_summary(result, tensorboard_val_index)tensorboard_val_index += if epoch + == or (epoch + ) % print_freq == :if (X_val is not None) and (y_val is not None):tl.logging.info(\"\" % (epoch + , n_epoch, time.time() - start_time))if eval_train is True:train_loss, train_acc, n_batch = , , for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):dp_dict = dict_to_one(network.all_drop) feed_dict = {x: X_train_a, y_: y_train_a}feed_dict.update(dp_dict)if acc is not None:err, ac = sess.run([cost, acc], feed_dict=feed_dict)train_acc += acelse:err = sess.run(cost, feed_dict=feed_dict)train_loss += errn_batch += tl.logging.info(\"\" % (train_loss / n_batch))if acc is not None:tl.logging.info(\"\" % (train_acc / n_batch))val_loss, val_acc, n_batch = , , for X_val_a, y_val_a in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=True):dp_dict = dict_to_one(network.all_drop) feed_dict = {x: X_val_a, y_: y_val_a}feed_dict.update(dp_dict)if acc is not None:err, ac = sess.run([cost, acc], feed_dict=feed_dict)val_acc += acelse:err = sess.run(cost, feed_dict=feed_dict)val_loss += errn_batch += tl.logging.info(\"\" % (val_loss / n_batch))if acc is not None:tl.logging.info(\"\" % (val_acc / n_batch))else:tl.logging.info(\"\" % (epoch + , n_epoch, time.time() - start_time, loss_ep))tl.logging.info(\"\" % (time.time() - start_time_begin))", "docstring": "Training a given non time-series network by the given cost function, training data, batch_size, n_epoch etc.\n\n - MNIST example click `here `_.\n - In order to control the training details, the authors HIGHLY recommend ``tl.iterate`` see two MNIST examples `1 `_, `2 `_.\n\n Parameters\n ----------\n sess : Session\n TensorFlow Session.\n network : TensorLayer layer\n the network to be trained.\n train_op : TensorFlow optimizer\n The optimizer for training e.g. tf.train.AdamOptimizer.\n X_train : numpy.array\n The input of training data\n y_train : numpy.array\n The target of training data\n x : placeholder\n For inputs.\n y_ : placeholder\n For targets.\n acc : TensorFlow expression or None\n Metric for accuracy or others. If None, would not print the information.\n batch_size : int\n The batch size for training and evaluating.\n n_epoch : int\n The number of training epochs.\n print_freq : int\n Print the training information every ``print_freq`` epochs.\n X_val : numpy.array or None\n The input of validation data. If None, would not perform validation.\n y_val : numpy.array or None\n The target of validation data. If None, would not perform validation.\n eval_train : boolean\n Whether to evaluate the model during training.\n If X_val and y_val are not None, it reflects whether to evaluate the model on training data.\n tensorboard_dir : string\n path to log dir, if set, summary data will be stored to the tensorboard_dir/ directory for visualization with tensorboard. (default None)\n Also runs `tl.layers.initialize_global_variables(sess)` internally in fit() to setup the summary nodes.\n tensorboard_epoch_freq : int\n How many epochs between storing tensorboard checkpoint for visualization to log/ directory (default 5).\n tensorboard_weight_histograms : boolean\n If True updates tensorboard data in the logs/ directory for visualization\n of the weight histograms every tensorboard_epoch_freq epoch (default True).\n tensorboard_graph_vis : boolean\n If True stores the graph in the tensorboard summaries saved to log/ (default True).\n\n Examples\n --------\n See `tutorial_mnist_simple.py `_\n\n >>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,\n ... acc=acc, batch_size=500, n_epoch=200, print_freq=5,\n ... X_val=X_val, y_val=y_val, eval_train=False)\n >>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,\n ... acc=acc, batch_size=500, n_epoch=200, print_freq=5,\n ... X_val=X_val, y_val=y_val, eval_train=False,\n ... tensorboard=True, tensorboard_weight_histograms=True, tensorboard_graph_vis=True)\n\n Notes\n --------\n If tensorboard_dir not None, the `global_variables_initializer` will be run inside the fit function\n in order to initialize the automatically generated summary nodes used for tensorboard visualization,\n thus `tf.global_variables_initializer().run()` before the `fit()` call will be undefined.", "id": "f11219:m0"} {"signature": "def predict(sess, network, X, x, y_op, batch_size=None):", "body": "if batch_size is None:dp_dict = dict_to_one(network.all_drop) feed_dict = {x: X,}feed_dict.update(dp_dict)return sess.run(y_op, feed_dict=feed_dict)else:result = Nonefor X_a, _ in tl.iterate.minibatches(X, X, batch_size, shuffle=False):dp_dict = dict_to_one(network.all_drop)feed_dict = {x: X_a,}feed_dict.update(dp_dict)result_a = sess.run(y_op, feed_dict=feed_dict)if result is None:result = result_aelse:result = np.concatenate((result, result_a))if result is None:if len(X) % batch_size != :dp_dict = dict_to_one(network.all_drop)feed_dict = {x: X[-(len(X) % batch_size):, :],}feed_dict.update(dp_dict)result_a = sess.run(y_op, feed_dict=feed_dict)result = result_aelse:if len(X) != len(result) and len(X) % batch_size != :dp_dict = dict_to_one(network.all_drop)feed_dict = {x: X[-(len(X) % batch_size):, :],}feed_dict.update(dp_dict)result_a = sess.run(y_op, feed_dict=feed_dict)result = np.concatenate((result, result_a))return result", "docstring": "Return the predict results of given non time-series network.\n\nParameters\n----------\nsess : Session\n TensorFlow Session.\nnetwork : TensorLayer layer\n The network.\nX : numpy.array\n The inputs.\nx : placeholder\n For inputs.\ny_op : placeholder\n The argmax expression of softmax outputs.\nbatch_size : int or None\n The batch size for prediction, when dataset is large, we should use minibatche for prediction;\n if dataset is small, we can set it to None.\n\nExamples\n--------\nSee `tutorial_mnist_simple.py `_\n\n>>> y = network.outputs\n>>> y_op = tf.argmax(tf.nn.softmax(y), 1)\n>>> print(tl.utils.predict(sess, network, X_test, x, y_op))", "id": "f11219:m2"} {"signature": "def dict_to_one(dp_dict):", "body": "return {x: for x in dp_dict}", "docstring": "Input a dictionary, return a dictionary that all items are set to one.\n\n Used for disable dropout, dropconnect layer and so on.\n\n Parameters\n ----------\n dp_dict : dictionary\n The dictionary contains key and number, e.g. keeping probabilities.\n\n Examples\n --------\n >>> dp_dict = dict_to_one( network.all_drop )\n >>> dp_dict = dict_to_one( network.all_drop )\n >>> feed_dict.update(dp_dict)", "id": "f11219:m4"} {"signature": "def flatten_list(list_of_list):", "body": "return sum(list_of_list, [])", "docstring": "Input a list of list, return a list that all items are in a list.\n\n Parameters\n ----------\n list_of_list : a list of list\n\n Examples\n --------\n >>> tl.utils.flatten_list([[1, 2, 3],[4, 5],[6]])\n [1, 2, 3, 4, 5, 6]", "id": "f11219:m5"} {"signature": "def class_balancing_oversample(X_train=None, y_train=None, printable=True):", "body": "if printable:tl.logging.info(\"\")c = Counter(y_train)if printable:tl.logging.info('' % c.most_common())tl.logging.info('' % c.most_common()[-])tl.logging.info('' % c.most_common()[])most_num = c.most_common()[][]if printable:tl.logging.info('' % most_num)locations = {}number = {}for lab, num in c.most_common(): number[lab] = numlocations[lab] = np.where(np.array(y_train) == lab)[]if printable:tl.logging.info('')X = {} for lab, num in number.items():X[lab] = X_train[locations[lab]]if printable:tl.logging.info('')for key in X:temp = X[key]while True:if len(X[key]) >= most_num:breakX[key] = np.vstack((X[key], temp))if printable:tl.logging.info('' % len(X[][]))tl.logging.info('')for key in X:tl.logging.info(\"\" % (key, len(X[key])))if printable:tl.logging.info('')for key in X:X[key] = X[key][:most_num, :]tl.logging.info(\"\" % (key, len(X[key])))if printable:tl.logging.info('')y_train = []X_train = np.empty(shape=(, len(X[][])))for key in X:X_train = np.vstack((X_train, X[key]))y_train.extend([key for i in range(len(X[key]))])c = Counter(y_train)if printable:tl.logging.info('' % c.most_common())return X_train, y_train", "docstring": "Input the features and labels, return the features and labels after oversampling.\n\n Parameters\n ----------\n X_train : numpy.array\n The inputs.\n y_train : numpy.array\n The targets.\n\n Examples\n --------\n One X\n\n >>> X_train, y_train = class_balancing_oversample(X_train, y_train, printable=True)\n\n Two X\n\n >>> X, y = tl.utils.class_balancing_oversample(X_train=np.hstack((X1, X2)), y_train=y, printable=False)\n >>> X1 = X[:, 0:5]\n >>> X2 = X[:, 5:]", "id": "f11219:m6"} {"signature": "def get_random_int(min_v=, max_v=, number=, seed=None):", "body": "rnd = random.Random()if seed:rnd = random.Random(seed)return [rnd.randint(min_v, max_v) for p in range(, number)]", "docstring": "Return a list of random integer by the given range and quantity.\n\n Parameters\n -----------\n min_v : number\n The minimum value.\n max_v : number\n The maximum value.\n number : int\n Number of value.\n seed : int or None\n The seed for random.\n\n Examples\n ---------\n >>> r = get_random_int(min_v=0, max_v=10, number=5)\n [10, 2, 3, 3, 7]", "id": "f11219:m7"} {"signature": "def list_string_to_dict(string):", "body": "dictionary = {}for idx, c in enumerate(string):dictionary.update({c: idx})return dictionary", "docstring": "Inputs ``['a', 'b', 'c']``, returns ``{'a': 0, 'b': 1, 'c': 2}``.", "id": "f11219:m8"} {"signature": "def exit_tensorflow(sess=None, port=):", "body": "text = \"\"text2 = \"\"if sess is not None:sess.close()if _platform == \"\" or _platform == \"\":tl.logging.info('' % text)os.system('')os.system('' + port + '') os.system(\"\") _exit()elif _platform == \"\":tl.logging.info('' % text)subprocess.Popen(\"\" + str(port) + \"\", shell=True) elif _platform == \"\":raise NotImplementedError(\"\")else:tl.logging.info(text2 + _platform)", "docstring": "Close TensorFlow session, TensorBoard and Nvidia-process if available.\n\n Parameters\n ----------\n sess : Session\n TensorFlow Session.\n tb_port : int\n TensorBoard port you want to close, `6006` as default.", "id": "f11219:m9"} {"signature": "def open_tensorboard(log_dir='', port=):", "body": "text = \"\" + str(port) + \"\"text2 = \"\"if not tl.files.exists_or_mkdir(log_dir, verbose=False):tl.logging.info(\"\" % log_dir)if _platform == \"\" or _platform == \"\":raise NotImplementedError()elif _platform == \"\":tl.logging.info('' % text)subprocess.Popen(sys.prefix + \"\" + log_dir + \"\" + str(port), shell=True) elif _platform == \"\":raise NotImplementedError(\"\")else:tl.logging.info(_platform + text2)", "docstring": "Open Tensorboard.\n\n Parameters\n ----------\n log_dir : str\n Directory where your tensorboard logs are saved\n port : int\n TensorBoard port you want to open, 6006 is tensorboard default", "id": "f11219:m10"} {"signature": "def clear_all_placeholder_variables(printable=True):", "body": "tl.logging.info('')gl = globals().copy()for var in gl:if var[] == '': continueif '' in str(globals()[var]): continueif '' in str(globals()[var]): continueif '' in str(globals()[var]): continueif printable:tl.logging.info(\"\" % str(globals()[var]))del globals()[var]", "docstring": "Clears all the placeholder variables of keep prob,\n including keeping probabilities of all dropout, denoising, dropconnect etc.\n\n Parameters\n ----------\n printable : boolean\n If True, print all deleted variables.", "id": "f11219:m11"} {"signature": "def set_gpu_fraction(gpu_fraction=):", "body": "tl.logging.info(\"\" % gpu_fraction)gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))return sess", "docstring": "Set the GPU memory fraction for the application.\n\n Parameters\n ----------\n gpu_fraction : float\n Fraction of GPU memory, (0 ~ 1]\n\n References\n ----------\n - `TensorFlow using GPU `__", "id": "f11219:m12"} {"signature": "@deprecated(date=\"\", instructions=\"\")def create_task_spec_def():", "body": "if '' in os.environ:env = json.loads(os.environ.get('', ''))task_data = env.get('', None) or {'': '', '': }cluster_data = env.get('', None) or {'': None, '': None, '': None}return TaskSpecDef(task_type=task_data[''], index=task_data[''],trial=task_data[''] if '' in task_data else None, ps_hosts=cluster_data[''],worker_hosts=cluster_data[''], master=cluster_data[''] if '' in cluster_data else None)elif '' in os.environ:return TaskSpecDef(task_type=os.environ[''], index=os.environ[''], ps_hosts=os.environ.get('', None),worker_hosts=os.environ.get('', None), master=os.environ.get('', None))else:raise Exception('')", "docstring": "Returns the a :class:`TaskSpecDef` based on the environment variables for distributed training.\n\n References\n ----------\n - `ML-engine trainer considerations `__\n - `TensorPort Distributed Computing `__", "id": "f11220:m0"} {"signature": "@deprecated(date=\"\", instructions=\"\")def create_distributed_session(task_spec=None, checkpoint_dir=None, scaffold=None, hooks=None, chief_only_hooks=None, save_checkpoint_secs=,save_summaries_steps=object(), save_summaries_secs=object(), config=None, stop_grace_period_secs=,log_step_count_steps=):", "body": "target = task_spec.target() if task_spec is not None else Noneis_chief = task_spec.is_master() if task_spec is not None else Truereturn tf.train.MonitoredTrainingSession(master=target, is_chief=is_chief, checkpoint_dir=checkpoint_dir, scaffold=scaffold,save_checkpoint_secs=save_checkpoint_secs, save_summaries_steps=save_summaries_steps,save_summaries_secs=save_summaries_secs, log_step_count_steps=log_step_count_steps,stop_grace_period_secs=stop_grace_period_secs, config=config, hooks=hooks, chief_only_hooks=chief_only_hooks)", "docstring": "Creates a distributed session.\n\n It calls `MonitoredTrainingSession` to create a :class:`MonitoredSession` for distributed training.\n\n Parameters\n ----------\n task_spec : :class:`TaskSpecDef`.\n The task spec definition from create_task_spec_def()\n checkpoint_dir : str.\n Optional path to a directory where to restore variables.\n scaffold : ``Scaffold``\n A `Scaffold` used for gathering or building supportive ops.\n If not specified, a default one is created. It's used to finalize the graph.\n hooks : list of ``SessionRunHook`` objects.\n Optional\n chief_only_hooks : list of ``SessionRunHook`` objects.\n Activate these hooks if `is_chief==True`, ignore otherwise.\n save_checkpoint_secs : int\n The frequency, in seconds, that a checkpoint is saved\n using a default checkpoint saver. If `save_checkpoint_secs` is set to\n `None`, then the default checkpoint saver isn't used.\n save_summaries_steps : int\n The frequency, in number of global steps, that the\n summaries are written to disk using a default summary saver. If both\n `save_summaries_steps` and `save_summaries_secs` are set to `None`, then\n the default summary saver isn't used. Default 100.\n save_summaries_secs : int\n The frequency, in secs, that the summaries are written\n to disk using a default summary saver. If both `save_summaries_steps` and\n `save_summaries_secs` are set to `None`, then the default summary saver\n isn't used. Default not enabled.\n config : ``tf.ConfigProto``\n an instance of `tf.ConfigProto` proto used to configure the session.\n It's the `config` argument of constructor of `tf.Session`.\n stop_grace_period_secs : int\n Number of seconds given to threads to stop after\n `close()` has been called.\n log_step_count_steps : int\n The frequency, in number of global steps, that the\n global step/sec is logged.\n\n Examples\n --------\n A simple example for distributed training where all the workers use the same dataset:\n\n >>> task_spec = TaskSpec()\n >>> with tf.device(task_spec.device_fn()):\n >>> tensors = create_graph()\n >>> with tl.DistributedSession(task_spec=task_spec,\n ... checkpoint_dir='/tmp/ckpt') as session:\n >>> while not session.should_stop():\n >>> session.run(tensors)\n\n An example where the dataset is shared among the workers\n (see https://www.tensorflow.org/programmers_guide/datasets):\n\n >>> task_spec = TaskSpec()\n >>> # dataset is a :class:`tf.data.Dataset` with the raw data\n >>> dataset = create_dataset()\n >>> if task_spec is not None:\n >>> dataset = dataset.shard(task_spec.num_workers, task_spec.shard_index)\n >>> # shuffle or apply a map function to the new sharded dataset, for example:\n >>> dataset = dataset.shuffle(buffer_size=10000)\n >>> dataset = dataset.batch(batch_size)\n >>> dataset = dataset.repeat(num_epochs)\n >>> # create the iterator for the dataset and the input tensor\n >>> iterator = dataset.make_one_shot_iterator()\n >>> next_element = iterator.get_next()\n >>> with tf.device(task_spec.device_fn()):\n >>> # next_element is the input for the graph\n >>> tensors = create_graph(next_element)\n >>> with tl.DistributedSession(task_spec=task_spec,\n ... checkpoint_dir='/tmp/ckpt') as session:\n >>> while not session.should_stop():\n >>> session.run(tensors)\n\n References\n ----------\n - `MonitoredTrainingSession `__", "id": "f11220:m1"} {"signature": "@propertydef validation_metrics(self):", "body": "if (self._validation_iterator is None) or (self._validation_metrics is None):raise AttributeError('')n = metric_sums = [] * len(self._validation_metrics)self._sess.run(self._validation_iterator.initializer)while True:try:metrics = self._sess.run(self._validation_metrics)for i, m in enumerate(metrics):metric_sums[i] += mn += except tf.errors.OutOfRangeError:breakfor i, m in enumerate(metric_sums):metric_sums[i] = metric_sums[i] / nreturn zip(self._validation_metrics, metric_sums)", "docstring": "A helper function to compute validation related metrics", "id": "f11220:c0:m4"} {"signature": "def train_on_batch(self):", "body": "self._sess.run(self._train_op)", "docstring": "Train a mini-batch.", "id": "f11220:c0:m5"} {"signature": "def train_and_validate_to_end(self, validate_step_size=):", "body": "while not self._sess.should_stop():self.train_on_batch() if self.global_step % validate_step_size == :log_str = '' % self.global_stepfor n, m in self.validation_metrics:log_str += '' % (n.name, m)logging.info(log_str)", "docstring": "A helper function that shows how to train and validate a model at the same time.\n\n Parameters\n ----------\n validate_step_size : int\n Validate the training network every N steps.", "id": "f11220:c0:m6"} {"signature": "def is_ps(self):", "body": "return self.type == ''", "docstring": "Returns true if this server is a parameter server", "id": "f11220:c1:m1"} {"signature": "def is_worker(self):", "body": "return self.type == ''", "docstring": "Returns true if this server is a worker server", "id": "f11220:c1:m2"} {"signature": "def is_master(self):", "body": "return self._master", "docstring": "Returns true if this server is the master server", "id": "f11220:c1:m3"} {"signature": "def is_evaluator(self):", "body": "return self.type == '' and self.num_workers == self._index", "docstring": "Returns true if this server is the evaluator server", "id": "f11220:c1:m4"} {"signature": "def device_fn(self):", "body": "current_device = ''.format(self.type, self._index)ps_devices = ''return tf.train.replica_device_setter(ps_device=ps_devices, worker_device=current_device, cluster=self._cluster_spec)", "docstring": "Returns the function with the specification to create the graph in this server", "id": "f11220:c1:m5"} {"signature": "def use_last_worker_as_evaluator(self):", "body": "if self.num_workers <= :raise Exception('')return TaskSpecDef(task_type=self.type, index=self._index, trial=self.trial, ps_hosts=self.ps_hosts,worker_hosts=self.worker_hosts[:-], master=self.master)", "docstring": "Returns a new :class:`TaskSpecDef` where the last worker has been removed from\n the list of worker_hosts, so it is not used for training anymore. You can call\n is_evaluator to know whether this server is the evaluator one or not.\n In case there is only one server for training this method raises an exception, as\n you cannot use any server for evaluation.", "id": "f11220:c1:m8"} {"signature": "def clean_warning_registry():", "body": "warnings.resetwarnings()reg = \"\"for mod_name, mod in list(sys.modules.items()):if '' in mod_name:continueif hasattr(mod, reg):getattr(mod, reg).clear()", "docstring": "Safe way to reset warnings", "id": "f11227:m0"} {"signature": "def import_lab(namespace, filename, infer_duration=True, **parse_options):", "body": "annotation = core.Annotation(namespace)parse_options.setdefault('', r'')parse_options.setdefault('', '')parse_options.setdefault('', None)parse_options.setdefault('', False)parse_options.setdefault('', range())data = pd.read_csv(filename, **parse_options)data = data.dropna(how='', axis=)if len(data.columns) == :data.insert(, '', )if infer_duration:data[''][:-] = data.loc[:, ].diff()[:].valueselse:if infer_duration:data.loc[:, ] -= data[]for row in data.itertuples():time, duration = row[:]value = [x for x in row[:] if x is not None][-]annotation.append(time=time,duration=duration,confidence=,value=value)return annotation", "docstring": "r'''Load a .lab file as an Annotation object.\n\n .lab files are assumed to have the following format:\n\n ``TIME_START\\tTIME_END\\tANNOTATION``\n\n By default, .lab files are assumed to have columns separated by one\n or more white-space characters, and have no header or index column\n information.\n\n If the .lab file contains only two columns, then an empty duration\n field is inferred.\n\n If the .lab file contains more than three columns, each row's\n annotation value is assigned the contents of last non-empty column.\n\n\n Parameters\n ----------\n namespace : str\n The namespace for the new annotation\n\n filename : str\n Path to the .lab file\n\n infer_duration : bool\n If `True`, interval durations are inferred from `(start, end)` columns,\n or difference between successive times.\n\n If `False`, interval durations are assumed to be explicitly coded as\n `(start, duration)` columns. If only one time column is given, then\n durations are set to 0.\n\n For instantaneous event annotations (e.g., beats or onsets), this\n should be set to `False`.\n\n parse_options : additional keyword arguments\n Passed to ``pandas.DataFrame.read_csv``\n\n Returns\n -------\n annotation : Annotation\n The newly constructed annotation object\n\n See Also\n --------\n pandas.DataFrame.read_csv", "id": "f11232:m0"} {"signature": "def expand_filepaths(base_dir, rel_paths):", "body": "return [os.path.join(base_dir, os.path.normpath(rp)) for rp in rel_paths]", "docstring": "Expand a list of relative paths to a give base directory.\n\n Parameters\n ----------\n base_dir : str\n The target base directory\n\n rel_paths : list (or list-like)\n Collection of relative path strings\n\n Returns\n -------\n expanded_paths : list\n `rel_paths` rooted at `base_dir`\n\n Examples\n --------\n >>> jams.util.expand_filepaths('/data', ['audio', 'beat', 'seglab'])\n ['/data/audio', '/data/beat', '/data/seglab']", "id": "f11232:m1"} {"signature": "def smkdirs(dpath, mode=):", "body": "if not os.path.exists(dpath):os.makedirs(dpath, mode=mode)", "docstring": "Safely make a full directory path if it doesn't exist.\n\n Parameters\n ----------\n dpath : str\n Path of directory/directories to create\n\n mode : int [default=0777]\n Permissions for the new directories\n\n See also\n --------\n os.makedirs", "id": "f11232:m2"} {"signature": "def filebase(filepath):", "body": "return os.path.splitext(os.path.basename(filepath))[]", "docstring": "Return the extension-less basename of a file path.\n\n Parameters\n ----------\n filepath : str\n Path to a file\n\n Returns\n -------\n base : str\n The name of the file, with directory and extension removed\n\n Examples\n --------\n >>> jams.util.filebase('my_song.mp3')\n 'my_song'", "id": "f11232:m3"} {"signature": "def find_with_extension(in_dir, ext, depth=, sort=True):", "body": "assert depth >= ext = ext.strip(os.extsep)match = list()for n in range(, depth+):wildcard = os.path.sep.join([\"\"]*n)search_path = os.path.join(in_dir, os.extsep.join([wildcard, ext]))match += glob.glob(search_path)if sort:match.sort()return match", "docstring": "Naive depth-search into a directory for files with a given extension.\n\n Parameters\n ----------\n in_dir : str\n Path to search.\n ext : str\n File extension to match.\n depth : int\n Depth of directories to search.\n sort : bool\n Sort the list alphabetically\n\n Returns\n -------\n matched : list\n Collection of matching file paths.\n\n Examples\n --------\n >>> jams.util.find_with_extension('Audio', 'wav')\n ['Audio/LizNelson_Rainfall/LizNelson_Rainfall_MIX.wav',\n 'Audio/LizNelson_Rainfall/LizNelson_Rainfall_RAW/LizNelson_Rainfall_RAW_01_01.wav',\n 'Audio/LizNelson_Rainfall/LizNelson_Rainfall_RAW/LizNelson_Rainfall_RAW_02_01.wav',\n ...\n 'Audio/Phoenix_ScotchMorris/Phoenix_ScotchMorris_STEMS/Phoenix_ScotchMorris_STEM_02.wav',\n 'Audio/Phoenix_ScotchMorris/Phoenix_ScotchMorris_STEMS/Phoenix_ScotchMorris_STEM_03.wav',\n 'Audio/Phoenix_ScotchMorris/Phoenix_ScotchMorris_STEMS/Phoenix_ScotchMorris_STEM_04.wav']", "id": "f11232:m4"} {"signature": "def add_namespace(filename):", "body": "with open(filename, mode='') as fileobj:__NAMESPACE__.update(json.load(fileobj))", "docstring": "Add a namespace definition to our working set.\n\n Namespace files consist of partial JSON schemas defining the behavior\n of the `value` and `confidence` fields of an Annotation.\n\n Parameters\n ----------\n filename : str\n Path to json file defining the namespace object", "id": "f11234:m0"} {"signature": "def namespace(ns_key):", "body": "if ns_key not in __NAMESPACE__:raise NamespaceError(''.format(ns_key))sch = copy.deepcopy(JAMS_SCHEMA[''][''])for key in ['', '']:try:sch[''][key] = __NAMESPACE__[ns_key][key]except KeyError:passreturn sch", "docstring": "Construct a validation schema for a given namespace.\n\n Parameters\n ----------\n ns_key : str\n Namespace key identifier (eg, 'beat' or 'segment_tut')\n\n Returns\n -------\n schema : dict\n JSON schema of `namespace`", "id": "f11234:m1"} {"signature": "def namespace_array(ns_key):", "body": "obs_sch = namespace(ns_key)obs_sch[''] = ''sch = copy.deepcopy(JAMS_SCHEMA[''][''])sch[''] = obs_schreturn sch", "docstring": "Construct a validation schema for arrays of a given namespace.\n\n Parameters\n ----------\n ns_key : str\n Namespace key identifier\n\n Returns\n -------\n schema : dict\n JSON schema of `namespace` observation arrays", "id": "f11234:m2"} {"signature": "def is_dense(ns_key):", "body": "if ns_key not in __NAMESPACE__:raise NamespaceError(''.format(ns_key))return __NAMESPACE__[ns_key]['']", "docstring": "Determine whether a namespace has dense formatting.\n\n Parameters\n ----------\n ns_key : str\n Namespace key identifier\n\n Returns\n -------\n dense : bool\n True if `ns_key` has a dense packing\n False otherwise.", "id": "f11234:m3"} {"signature": "def values(ns_key):", "body": "if ns_key not in __NAMESPACE__:raise NamespaceError(''.format(ns_key))if '' not in __NAMESPACE__[ns_key]['']:raise NamespaceError(''.format(ns_key))return copy.copy(__NAMESPACE__[ns_key][''][''])", "docstring": "Return the allowed values for an enumerated namespace.\n\n Parameters\n ----------\n ns_key : str\n Namespace key identifier\n\n Returns\n -------\n values : list\n\n Raises\n ------\n NamespaceError\n If `ns_key` is not found, or does not have enumerated values\n\n Examples\n --------\n >>> jams.schema.values('tag_gtzan')\n ['blues', 'classical', 'country', 'disco', 'hip-hop', 'jazz',\n 'metal', 'pop', 'reggae', 'rock']", "id": "f11234:m4"} {"signature": "def get_dtypes(ns_key):", "body": "if ns_key not in __NAMESPACE__:raise NamespaceError(''.format(ns_key))value_dtype = __get_dtype(__NAMESPACE__[ns_key].get('', {}))confidence_dtype = __get_dtype(__NAMESPACE__[ns_key].get('', {}))return value_dtype, confidence_dtype", "docstring": "Get the dtypes associated with the value and confidence fields\n for a given namespace.\n\n Parameters\n ----------\n ns_key : str\n The namespace key in question\n\n Returns\n -------\n value_dtype, confidence_dtype : numpy.dtype\n Type identifiers for value and confidence fields.", "id": "f11234:m5"} {"signature": "def list_namespaces():", "body": "print(''.format('', ''))print('' * )for sch in sorted(__NAMESPACE__):desc = __NAMESPACE__[sch]['']desc = (desc[:] + '') if len(desc) > else descprint(''.format(sch, desc))", "docstring": "Print out a listing of available namespaces", "id": "f11234:m6"} {"signature": "def __get_dtype(typespec):", "body": "if '' in typespec:return __TYPE_MAP__.get(typespec[''], np.object_)elif '' in typespec:return np.object_elif '' in typespec:types = [__get_dtype(v) for v in typespec['']]if all([t == types[] for t in types]):return types[]return np.object_", "docstring": "Get the dtype associated with a jsonschema type definition\n\n Parameters\n ----------\n typespec : dict\n The schema definition\n\n Returns\n -------\n dtype : numpy.dtype\n The associated dtype", "id": "f11234:m7"} {"signature": "def __load_jams_schema():", "body": "schema_file = os.path.join(SCHEMA_DIR, '')jams_schema = Nonewith open(resource_filename(__name__, schema_file), mode='') as fdesc:jams_schema = json.load(fdesc)if jams_schema is None:raise JamsError('')return jams_schema", "docstring": "Load the schema file from the package.", "id": "f11234:m8"} {"signature": "def pprint_jobject(obj, **kwargs):", "body": "obj_simple = {k: v for k, v in six.iteritems(obj.__json__) if v}string = json.dumps(obj_simple, **kwargs)string = re.sub(r'', '', string)string = re.sub(r'', '', string)string = re.sub(r'', '', string)return string", "docstring": "Pretty-print a jobject.\n\n Parameters\n ----------\n obj : jams.JObject\n\n kwargs\n additional parameters to `json.dumps`\n\n Returns\n -------\n string\n A simplified display of `obj` contents.", "id": "f11235:m0"} {"signature": "def intervals(annotation, **kwargs):", "body": "times, labels = annotation.to_interval_values()return mir_eval.display.labeled_intervals(times, labels, **kwargs)", "docstring": "Plotting wrapper for labeled intervals", "id": "f11235:m1"} {"signature": "def hierarchy(annotation, **kwargs):", "body": "htimes, hlabels = hierarchy_flatten(annotation)htimes = [np.asarray(_) for _ in htimes]return mir_eval.display.hierarchy(htimes, hlabels, **kwargs)", "docstring": "Plotting wrapper for hierarchical segmentations", "id": "f11235:m2"} {"signature": "def pitch_contour(annotation, **kwargs):", "body": "ax = kwargs.pop('', None)ax = mir_eval.display.__get_axes(ax=ax)[]times, values = annotation.to_interval_values()indices = np.unique([v[''] for v in values])for idx in indices:rows = [i for (i, v) in enumerate(values) if v[''] == idx]freqs = np.asarray([values[r][''] for r in rows])unvoiced = ~np.asarray([values[r][''] for r in rows])freqs[unvoiced] *= -ax = mir_eval.display.pitch(times[rows, ], freqs, unvoiced=True,ax=ax,**kwargs)return ax", "docstring": "Plotting wrapper for pitch contours", "id": "f11235:m3"} {"signature": "def event(annotation, **kwargs):", "body": "times, values = annotation.to_interval_values()if any(values):labels = valueselse:labels = Nonereturn mir_eval.display.events(times, labels=labels, **kwargs)", "docstring": "Plotting wrapper for events", "id": "f11235:m4"} {"signature": "def beat_position(annotation, **kwargs):", "body": "times, values = annotation.to_interval_values()labels = [_[''] for _ in values]return mir_eval.display.events(times, labels=labels, **kwargs)", "docstring": "Plotting wrapper for beat-position data", "id": "f11235:m5"} {"signature": "def piano_roll(annotation, **kwargs):", "body": "times, midi = annotation.to_interval_values()return mir_eval.display.piano_roll(times, midi=midi, **kwargs)", "docstring": "Plotting wrapper for piano rolls", "id": "f11235:m6"} {"signature": "def display(annotation, meta=True, **kwargs):", "body": "for namespace, func in six.iteritems(VIZ_MAPPING):try:ann = coerce_annotation(annotation, namespace)axes = func(ann, **kwargs)axes.set_title(annotation.namespace)if meta:description = pprint_jobject(annotation.annotation_metadata, indent=)anchored_box = AnchoredText(description.strip(''),loc=,frameon=True,bbox_to_anchor=(, ),bbox_transform=axes.transAxes,borderpad=)axes.add_artist(anchored_box)axes.figure.subplots_adjust(right=)return axesexcept NamespaceError:passraise NamespaceError(''.format(annotation.namespace))", "docstring": "Visualize a jams annotation through mir_eval\n\n Parameters\n ----------\n annotation : jams.Annotation\n The annotation to display\n\n meta : bool\n If `True`, include annotation metadata in the figure\n\n kwargs\n Additional keyword arguments to mir_eval.display functions\n\n Returns\n -------\n ax\n Axis handles for the new display\n\n Raises\n ------\n NamespaceError\n If the annotation cannot be visualized", "id": "f11235:m7"} {"signature": "def display_multi(annotations, fig_kw=None, meta=True, **kwargs):", "body": "if fig_kw is None:fig_kw = dict()fig_kw.setdefault('', True)fig_kw.setdefault('', True)display_annotations = []for ann in annotations:for namespace in VIZ_MAPPING:if can_convert(ann, namespace):display_annotations.append(ann)breakif not len(display_annotations):raise ParameterError('')fig, axs = plt.subplots(nrows=len(display_annotations), ncols=, **fig_kw)if len(display_annotations) == :axs = [axs]for ann, ax in zip(display_annotations, axs):kwargs[''] = axdisplay(ann, meta=meta, **kwargs)return fig, axs", "docstring": "Display multiple annotations with shared axes\n\n Parameters\n ----------\n annotations : jams.AnnotationArray\n A collection of annotations to display\n\n fig_kw : dict\n Keyword arguments to `plt.figure`\n\n meta : bool\n If `True`, display annotation metadata for each annotation\n\n kwargs\n Additional keyword arguments to the `mir_eval.display` routines\n\n Returns\n -------\n fig\n The created figure\n axs\n List of subplot axes corresponding to each displayed annotation", "id": "f11235:m8"} {"signature": "def coerce_annotation(ann, namespace):", "body": "ann = convert(ann, namespace)ann.validate(strict=True)return ann", "docstring": "Validate that the annotation has the correct namespace,\n and is well-formed.\n\n If the annotation is not of the correct namespace, automatic conversion\n is attempted.\n\n Parameters\n ----------\n ann : jams.Annotation\n The annotation object in question\n\n namespace : str\n The namespace pattern to match `ann` against\n\n Returns\n -------\n ann_coerced: jams.Annotation\n The annotation coerced to the target namespace\n\n Raises\n ------\n NamespaceError\n If `ann` does not match the proper namespace\n\n SchemaError\n If `ann` fails schema validation\n\n See Also\n --------\n jams.nsconvert.convert", "id": "f11236:m0"} {"signature": "def beat(ref, est, **kwargs):", "body": "namespace = ''ref = coerce_annotation(ref, namespace)est = coerce_annotation(est, namespace)ref_times, _ = ref.to_event_values()est_times, _ = est.to_event_values()return mir_eval.beat.evaluate(ref_times, est_times, **kwargs)", "docstring": "r'''Beat tracking evaluation\n\n Parameters\n ----------\n ref : jams.Annotation\n Reference annotation object\n est : jams.Annotation\n Estimated annotation object\n kwargs\n Additional keyword arguments\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n See Also\n --------\n mir_eval.beat.evaluate\n\n Examples\n --------\n >>> # Load in the JAMS objects\n >>> ref_jam = jams.load('reference.jams')\n >>> est_jam = jams.load('estimated.jams')\n >>> # Select the first relevant annotations\n >>> ref_ann = ref_jam.search(namespace='beat')[0]\n >>> est_ann = est_jam.search(namespace='beat')[0]\n >>> scores = jams.eval.beat(ref_ann, est_ann)", "id": "f11236:m1"} {"signature": "def onset(ref, est, **kwargs):", "body": "namespace = ''ref = coerce_annotation(ref, namespace)est = coerce_annotation(est, namespace)ref_times, _ = ref.to_event_values()est_times, _ = est.to_event_values()return mir_eval.onset.evaluate(ref_times, est_times, **kwargs)", "docstring": "r'''Onset evaluation\n\n Parameters\n ----------\n ref : jams.Annotation\n Reference annotation object\n est : jams.Annotation\n Estimated annotation object\n kwargs\n Additional keyword arguments\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n See Also\n --------\n mir_eval.onset.evaluate\n\n Examples\n --------\n >>> # Load in the JAMS objects\n >>> ref_jam = jams.load('reference.jams')\n >>> est_jam = jams.load('estimated.jams')\n >>> # Select the first relevant annotations\n >>> ref_ann = ref_jam.search(namespace='onset')[0]\n >>> est_ann = est_jam.search(namespace='onset')[0]\n >>> scores = jams.eval.onset(ref_ann, est_ann)", "id": "f11236:m2"} {"signature": "def chord(ref, est, **kwargs):", "body": "namespace = ''ref = coerce_annotation(ref, namespace)est = coerce_annotation(est, namespace)ref_interval, ref_value = ref.to_interval_values()est_interval, est_value = est.to_interval_values()return mir_eval.chord.evaluate(ref_interval, ref_value,est_interval, est_value, **kwargs)", "docstring": "r'''Chord evaluation\n\n Parameters\n ----------\n ref : jams.Annotation\n Reference annotation object\n est : jams.Annotation\n Estimated annotation object\n kwargs\n Additional keyword arguments\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n See Also\n --------\n mir_eval.chord.evaluate\n\n Examples\n --------\n >>> # Load in the JAMS objects\n >>> ref_jam = jams.load('reference.jams')\n >>> est_jam = jams.load('estimated.jams')\n >>> # Select the first relevant annotations\n >>> ref_ann = ref_jam.search(namespace='chord')[0]\n >>> est_ann = est_jam.search(namespace='chord')[0]\n >>> scores = jams.eval.chord(ref_ann, est_ann)", "id": "f11236:m3"} {"signature": "def segment(ref, est, **kwargs):", "body": "namespace = ''ref = coerce_annotation(ref, namespace)est = coerce_annotation(est, namespace)ref_interval, ref_value = ref.to_interval_values()est_interval, est_value = est.to_interval_values()return mir_eval.segment.evaluate(ref_interval, ref_value,est_interval, est_value, **kwargs)", "docstring": "r'''Segment evaluation\n\n Parameters\n ----------\n ref : jams.Annotation\n Reference annotation object\n est : jams.Annotation\n Estimated annotation object\n kwargs\n Additional keyword arguments\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n See Also\n --------\n mir_eval.segment.evaluate\n\n Examples\n --------\n >>> # Load in the JAMS objects\n >>> ref_jam = jams.load('reference.jams')\n >>> est_jam = jams.load('estimated.jams')\n >>> # Select the first relevant annotations\n >>> ref_ann = ref_jam.search(namespace='segment_.*')[0]\n >>> est_ann = est_jam.search(namespace='segment_.*')[0]\n >>> scores = jams.eval.segment(ref_ann, est_ann)", "id": "f11236:m4"} {"signature": "def hierarchy_flatten(annotation):", "body": "intervals, values = annotation.to_interval_values()ordering = dict()for interval, value in zip(intervals, values):level = value['']if level not in ordering:ordering[level] = dict(intervals=list(), labels=list())ordering[level][''].append(interval)ordering[level][''].append(value[''])levels = sorted(list(ordering.keys()))hier_intervals = [ordering[level][''] for level in levels]hier_labels = [ordering[level][''] for level in levels]return hier_intervals, hier_labels", "docstring": "Flatten a multi_segment annotation into mir_eval style.\n\n Parameters\n ----------\n annotation : jams.Annotation\n An annotation in the `multi_segment` namespace\n\n Returns\n -------\n hier_intervalss : list\n A list of lists of intervals, ordered by increasing specificity.\n\n hier_labels : list\n A list of lists of labels, ordered by increasing specificity.", "id": "f11236:m5"} {"signature": "def hierarchy(ref, est, **kwargs):", "body": "namespace = ''ref = coerce_annotation(ref, namespace)est = coerce_annotation(est, namespace)ref_hier, ref_hier_lab = hierarchy_flatten(ref)est_hier, est_hier_lab = hierarchy_flatten(est)return mir_eval.hierarchy.evaluate(ref_hier, ref_hier_lab,est_hier, est_hier_lab,**kwargs)", "docstring": "r'''Multi-level segmentation evaluation\n\n Parameters\n ----------\n ref : jams.Annotation\n Reference annotation object\n est : jams.Annotation\n Estimated annotation object\n kwargs\n Additional keyword arguments\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n See Also\n --------\n mir_eval.hierarchy.evaluate\n\n Examples\n --------\n >>> # Load in the JAMS objects\n >>> ref_jam = jams.load('reference.jams')\n >>> est_jam = jams.load('estimated.jams')\n >>> # Select the first relevant annotations\n >>> ref_ann = ref_jam.search(namespace='multi_segment')[0]\n >>> est_ann = est_jam.search(namespace='multi_segment')[0]\n >>> scores = jams.eval.hierarchy(ref_ann, est_ann)", "id": "f11236:m6"} {"signature": "def tempo(ref, est, **kwargs):", "body": "ref = coerce_annotation(ref, '')est = coerce_annotation(est, '')ref_tempi = np.asarray([o.value for o in ref])ref_weight = ref.data[].confidenceest_tempi = np.asarray([o.value for o in est])return mir_eval.tempo.evaluate(ref_tempi, ref_weight, est_tempi, **kwargs)", "docstring": "r'''Tempo evaluation\n\n Parameters\n ----------\n ref : jams.Annotation\n Reference annotation object\n est : jams.Annotation\n Estimated annotation object\n kwargs\n Additional keyword arguments\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n See Also\n --------\n mir_eval.tempo.evaluate\n\n Examples\n --------\n >>> # Load in the JAMS objects\n >>> ref_jam = jams.load('reference.jams')\n >>> est_jam = jams.load('estimated.jams')\n >>> # Select the first relevant annotations\n >>> ref_ann = ref_jam.search(namespace='tempo')[0]\n >>> est_ann = est_jam.search(namespace='tempo')[0]\n >>> scores = jams.eval.tempo(ref_ann, est_ann)", "id": "f11236:m7"} {"signature": "def melody(ref, est, **kwargs):", "body": "namespace = ''ref = coerce_annotation(ref, namespace)est = coerce_annotation(est, namespace)ref_times, ref_p = ref.to_event_values()est_times, est_p = est.to_event_values()ref_freq = np.asarray([p[''] * (-)**(~p['']) for p in ref_p])est_freq = np.asarray([p[''] * (-)**(~p['']) for p in est_p])return mir_eval.melody.evaluate(ref_times, ref_freq,est_times, est_freq,**kwargs)", "docstring": "r'''Melody extraction evaluation\n\n Parameters\n ----------\n ref : jams.Annotation\n Reference annotation object\n est : jams.Annotation\n Estimated annotation object\n kwargs\n Additional keyword arguments\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n See Also\n --------\n mir_eval.melody.evaluate\n\n Examples\n --------\n >>> # Load in the JAMS objects\n >>> ref_jam = jams.load('reference.jams')\n >>> est_jam = jams.load('estimated.jams')\n >>> # Select the first relevant annotations\n >>> ref_ann = ref_jam.search(namespace='pitch_contour')[0]\n >>> est_ann = est_jam.search(namespace='pitch_contour')[0]\n >>> scores = jams.eval.melody(ref_ann, est_ann)", "id": "f11236:m8"} {"signature": "def pattern_to_mireval(ann):", "body": "patterns = defaultdict(lambda: defaultdict(list))for time, observation in zip(*ann.to_event_values()):pattern_id = observation['']occurrence_id = observation['']obs = (time, observation[''])patterns[pattern_id][occurrence_id].append(obs)return [list(_.values()) for _ in six.itervalues(patterns)]", "docstring": "Convert a pattern_jku annotation object to mir_eval format.\n\n Parameters\n ----------\n ann : jams.Annotation\n Must have `namespace='pattern_jku'`\n\n Returns\n -------\n patterns : list of list of tuples\n - `patterns[x]` is a list containing all occurrences of pattern x\n\n - `patterns[x][y]` is a list containing all notes for\n occurrence y of pattern x\n\n - `patterns[x][y][z]` contains a time-note tuple\n `(time, midi note)`", "id": "f11236:m9"} {"signature": "def pattern(ref, est, **kwargs):", "body": "namespace = ''ref = coerce_annotation(ref, namespace)est = coerce_annotation(est, namespace)ref_patterns = pattern_to_mireval(ref)est_patterns = pattern_to_mireval(est)return mir_eval.pattern.evaluate(ref_patterns, est_patterns, **kwargs)", "docstring": "r'''Pattern detection evaluation\n\n Parameters\n ----------\n ref : jams.Annotation\n Reference annotation object\n est : jams.Annotation\n Estimated annotation object\n kwargs\n Additional keyword arguments\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n See Also\n --------\n mir_eval.pattern.evaluate\n\n Examples\n --------\n >>> # Load in the JAMS objects\n >>> ref_jam = jams.load('reference.jams')\n >>> est_jam = jams.load('estimated.jams')\n >>> # Select the first relevant annotations\n >>> ref_ann = ref_jam.search(namespace='pattern_jku')[0]\n >>> est_ann = est_jam.search(namespace='pattern_jku')[0]\n >>> scores = jams.eval.pattern(ref_ann, est_ann)", "id": "f11236:m10"} {"signature": "def transcription(ref, est, **kwargs):", "body": "namespace = ''ref = coerce_annotation(ref, namespace)est = coerce_annotation(est, namespace)ref_intervals, ref_p = ref.to_interval_values()est_intervals, est_p = est.to_interval_values()ref_pitches = np.asarray([p[''] * (-)**(~p['']) for p in ref_p])est_pitches = np.asarray([p[''] * (-)**(~p['']) for p in est_p])return mir_eval.transcription.evaluate(ref_intervals, ref_pitches, est_intervals, est_pitches, **kwargs)", "docstring": "r'''Note transcription evaluation\n\n Parameters\n ----------\n ref : jams.Annotation\n Reference annotation object\n est : jams.Annotation\n Estimated annotation object\n kwargs\n Additional keyword arguments\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved.\n\n See Also\n --------\n mir_eval.transcription.evaluate\n\n Examples\n --------\n >>> # Load in the JAMS objects\n >>> ref_jam = jams.load('reference.jams')\n >>> est_jam = jams.load('estimated.jams')\n >>> # Select the first relevant annotations. You can use any annotation\n >>> # type that can be converted to pitch_contour (such as pitch_midi)\n >>> ref_ann = ref_jam.search(namespace='pitch_contour')[0]\n >>> est_ann = est_jam.search(namespace='note_hz')[0]\n >>> scores = jams.eval.transcription(ref_ann, est_ann)", "id": "f11236:m11"} {"signature": "def _conversion(target, source):", "body": "def register(func):''''''__CONVERSION__[target][source] = funcreturn funcreturn register", "docstring": "A decorator to register namespace conversions.\n\n Usage\n -----\n >>> @conversion('tag_open', 'tag_.*')\n ... def tag_to_open(annotation):\n ... annotation.namespace = 'tag_open'\n ... return annotation", "id": "f11238:m0"} {"signature": "def convert(annotation, target_namespace):", "body": "annotation.validate(strict=True)if annotation.namespace == target_namespace:return annotationif target_namespace in __CONVERSION__:annotation = deepcopy(annotation)for source in __CONVERSION__[target_namespace]:if annotation.search(namespace=source):return __CONVERSION__[target_namespace][source](annotation)raise NamespaceError(''''.format(annotation.namespace,target_namespace))", "docstring": "Convert a given annotation to the target namespace.\n\n Parameters\n ----------\n annotation : jams.Annotation\n An annotation object\n\n target_namespace : str\n The target namespace\n\n Returns\n -------\n mapped_annotation : jams.Annotation\n if `annotation` already belongs to `target_namespace`, then\n it is returned directly.\n\n otherwise, `annotation` is copied and automatically converted\n to the target namespace.\n\n Raises\n ------\n SchemaError\n if the input annotation fails to validate\n\n NamespaceError\n if no conversion is possible\n\n Examples\n --------\n Convert frequency measurements in Hz to MIDI\n\n >>> ann_midi = jams.convert(ann_hz, 'note_midi')\n\n And back to Hz\n\n >>> ann_hz2 = jams.convert(ann_midi, 'note_hz')", "id": "f11238:m1"} {"signature": "def can_convert(annotation, target_namespace):", "body": "if annotation.namespace == target_namespace:return Trueif target_namespace in __CONVERSION__:for source in __CONVERSION__[target_namespace]:if annotation.search(namespace=source):return Truereturn False", "docstring": "Test if an annotation can be mapped to a target namespace\n\n Parameters\n ----------\n annotation : jams.Annotation\n An annotation object\n\n target_namespace : str\n The target namespace\n\n Returns\n -------\n True\n if `annotation` can be automatically converted to\n `target_namespace`\n\n False\n otherwise", "id": "f11238:m2"} {"signature": "@_conversion('', '')def pitch_hz_to_contour(annotation):", "body": "annotation.namespace = ''data = annotation.pop_data()for obs in data:annotation.append(time=obs.time, duration=obs.duration,confidence=obs.confidence,value=dict(index=,frequency=np.abs(obs.value),voiced=obs.value > ))return annotation", "docstring": "Convert a pitch_hz annotation to a contour", "id": "f11238:m3"} {"signature": "@_conversion('', '')def pitch_midi_to_contour(annotation):", "body": "annotation = pitch_midi_to_hz(annotation)return pitch_hz_to_contour(annotation)", "docstring": "Convert a pitch_hz annotation to a contour", "id": "f11238:m4"} {"signature": "@_conversion('', '')def note_midi_to_hz(annotation):", "body": "annotation.namespace = ''data = annotation.pop_data()for obs in data:annotation.append(time=obs.time, duration=obs.duration,confidence=obs.confidence,value= * (**((obs.value - )/)))return annotation", "docstring": "Convert a pitch_midi annotation to pitch_hz", "id": "f11238:m5"} {"signature": "@_conversion('', '')def note_hz_to_midi(annotation):", "body": "annotation.namespace = ''data = annotation.pop_data()for obs in data:annotation.append(time=obs.time, duration=obs.duration,confidence=obs.confidence,value= * (np.log2(obs.value) - np.log2()) + )return annotation", "docstring": "Convert a pitch_hz annotation to pitch_midi", "id": "f11238:m6"} {"signature": "@_conversion('', '')def pitch_midi_to_hz(annotation):", "body": "annotation.namespace = ''data = annotation.pop_data()for obs in data:annotation.append(time=obs.time, duration=obs.duration,confidence=obs.confidence,value= * (**((obs.value - )/)))return annotation", "docstring": "Convert a pitch_midi annotation to pitch_hz", "id": "f11238:m7"} {"signature": "@_conversion('', '')def pitch_hz_to_midi(annotation):", "body": "annotation.namespace = ''data = annotation.pop_data()for obs in data:annotation.append(time=obs.time, duration=obs.duration,confidence=obs.confidence,value= * (np.log2(obs.value) - np.log2()) + )return annotation", "docstring": "Convert a pitch_hz annotation to pitch_midi", "id": "f11238:m8"} {"signature": "@_conversion('', '')def segment_to_open(annotation):", "body": "annotation.namespace = ''return annotation", "docstring": "Convert any segmentation to open label space", "id": "f11238:m9"} {"signature": "@_conversion('', '')def tag_to_open(annotation):", "body": "annotation.namespace = ''return annotation", "docstring": "Convert any tag annotation to open label space", "id": "f11238:m10"} {"signature": "@_conversion('', '')def scaper_to_tag(annotation):", "body": "annotation.namespace = ''data = annotation.pop_data()for obs in data:annotation.append(time=obs.time, duration=obs.duration,confidence=obs.confidence, value=obs.value[''])return annotation", "docstring": "Convert scaper annotations to tag_open", "id": "f11238:m11"} {"signature": "@_conversion('', '')def beat_position(annotation):", "body": "annotation.namespace = ''data = annotation.pop_data()for obs in data:annotation.append(time=obs.time, duration=obs.duration,confidence=obs.confidence,value=obs.value[''])return annotation", "docstring": "Convert beat_position to beat", "id": "f11238:m12"} {"signature": "@_conversion('', '')def chordh_to_chord(annotation):", "body": "annotation.namespace = ''return annotation", "docstring": "Convert Harte annotation to chord", "id": "f11238:m13"} {"signature": "def deprecated(version, version_removed):", "body": "def __wrapper(func, *args, **kwargs):''''''code = six.get_function_code(func)warnings.warn_explicit(\"\"\"\".format(func.__module__, func.__name__,version, version_removed),category=DeprecationWarning,filename=code.co_filename,lineno=code.co_firstlineno + )return func(*args, **kwargs)return decorator(__wrapper)", "docstring": "This is a decorator which can be used to mark functions\n as deprecated.\n\n It will result in a warning being emitted when the function is used.", "id": "f11239:m0"} {"signature": "@contextlib.contextmanagerdef _open(name_or_fdesc, mode='', fmt=''):", "body": "open_map = {'': open,'': open,'': gzip.open,'': gzip.open}if hasattr(name_or_fdesc, '') or hasattr(name_or_fdesc, ''):yield name_or_fdescelif isinstance(name_or_fdesc, six.string_types):if fmt == '':_, ext = os.path.splitext(name_or_fdesc)ext = ext[:]else:ext = fmttry:ext = ext.lower()if ext in ['', ''] and '' not in mode:mode = ''.format(mode)with open_map[ext](name_or_fdesc, mode=mode) as fdesc:yield fdescexcept KeyError:raise ParameterError(''''.format(ext))else:raise ParameterError(''''.format(name_or_fdesc))", "docstring": "An intelligent wrapper for ``open``.\n\n Parameters\n ----------\n name_or_fdesc : string-type or open file descriptor\n If a string type, refers to the path to a file on disk.\n\n If an open file descriptor, it is returned as-is.\n\n mode : string\n The mode with which to open the file.\n See ``open`` for details.\n\n fmt : string ['auto', 'jams', 'json', 'jamz']\n The encoding for the input/output stream.\n\n If `auto`, the format is inferred from the filename extension.\n\n Otherwise, use the specified coding.\n\n\n See Also\n --------\n open\n gzip.open", "id": "f11239:m1"} {"signature": "def load(path_or_file, validate=True, strict=True, fmt=''):", "body": "with _open(path_or_file, mode='', fmt=fmt) as fdesc:jam = JAMS(**json.load(fdesc))if validate:jam.validate(strict=strict)return jam", "docstring": "r\"\"\"Load a JAMS Annotation from a file.\n\n\n Parameters\n ----------\n path_or_file : str or file-like\n Path to the JAMS file to load\n OR\n An open file handle to load from.\n\n validate : bool\n Attempt to validate the JAMS object\n\n strict : bool\n if `validate == True`, enforce strict schema validation\n\n fmt : str ['auto', 'jams', 'jamz']\n The encoding format of the input\n\n If `auto`, encoding is inferred from the file name.\n\n If the input is an open file handle, `jams` encoding\n is used.\n\n\n Returns\n -------\n jam : JAMS\n The loaded JAMS object\n\n\n Raises\n ------\n SchemaError\n if `validate == True`, `strict==True`, and validation fails\n\n\n See also\n --------\n JAMS.validate\n JAMS.save\n\n\n Examples\n --------\n >>> # Load a jams object from a file name\n >>> J = jams.load('data.jams')\n >>> # Or from an open file descriptor\n >>> with open('data.jams', 'r') as fdesc:\n ... J = jams.load(fdesc)\n >>> # Non-strict validation\n >>> J = jams.load('data.jams', strict=False)\n >>> # No validation at all\n >>> J = jams.load('data.jams', validate=False)", "id": "f11239:m2"} {"signature": "def query_pop(query, prefix, sep=''):", "body": "terms = query.split(sep)if terms[] == prefix:terms = terms[:]return sep.join(terms)", "docstring": "Pop a prefix from a query string.\n\n\n Parameters\n ----------\n query : str\n The query string\n\n prefix : str\n The prefix string to pop, if it exists\n\n sep : str\n The string to separate fields\n\n Returns\n -------\n popped : str\n `query` with a `prefix` removed from the front (if found)\n or `query` if the prefix was not found\n\n Examples\n --------\n >>> query_pop('Annotation.namespace', 'Annotation')\n 'namespace'\n >>> query_pop('namespace', 'Annotation')\n 'namespace'", "id": "f11239:m3"} {"signature": "def match_query(string, query):", "body": "if six.callable(query):return query(string)elif (isinstance(query, six.string_types) andisinstance(string, six.string_types)):return re.match(query, string) is not Noneelse:return query == string", "docstring": "Test if a string matches a query.\n\n Parameters\n ----------\n string : str\n The string to test\n\n query : string, callable, or object\n Either a regular expression, callable function, or object.\n\n Returns\n -------\n match : bool\n `True` if:\n - `query` is a callable and `query(string) == True`\n - `query` is a regular expression and `re.match(query, string)`\n - or `string == query` for any other query\n\n `False` otherwise", "id": "f11239:m4"} {"signature": "def serialize_obj(obj):", "body": "if isinstance(obj, np.integer):return int(obj)elif isinstance(obj, np.floating):return float(obj)elif isinstance(obj, np.ndarray):return obj.tolist()elif isinstance(obj, list):return [serialize_obj(x) for x in obj]elif isinstance(obj, Observation):return {k: serialize_obj(v) for k, v in six.iteritems(obj._asdict())}return obj", "docstring": "Custom serialization functionality for working with advanced data types.\n\n - numpy arrays are converted to lists\n - lists are recursively serialized element-wise", "id": "f11239:m5"} {"signature": "def summary(obj, indent=):", "body": "if hasattr(obj, ''):rep = obj.__summary__()elif isinstance(obj, SortedKeyList):rep = ''.format(len(obj))else:rep = repr(obj)return rep.replace('', '' + '' * indent)", "docstring": "Helper function to format repr strings for JObjects and friends.\n\n Parameters\n ----------\n obj\n The object to repr\n\n indent : int >= 0\n indent each new line by `indent` spaces\n\n Returns\n -------\n r : str\n If `obj` has a `__summary__` method, it is used.\n\n If `obj` is a `SortedKeyList`, then it returns a description\n of the length of the list.\n\n Otherwise, `repr(obj)`.", "id": "f11239:m6"} {"signature": "def _get_divid(obj):", "body": "global __DIVID_COUNT____DIVID_COUNT__ += return ''.format(id(obj), __DIVID_COUNT__)", "docstring": "Static function to get a unique id for an object.\n This is used in HTML rendering to ensure unique div ids for each call\n to display an object", "id": "f11239:m8"} {"signature": "def __init__(self, **kwargs):", "body": "super(JObject, self).__init__()for name, value in six.iteritems(kwargs):setattr(self, name, value)", "docstring": "Construct a new JObject\n\n Parameters\n ----------\n kwargs\n Each keyword argument becomes an attribute with the specified value\n\n Examples\n --------\n >>> J = jams.JObject(foo=5)\n >>> J.foo\n 5\n >>> dict(J)\n {'foo': 5}", "id": "f11239:c0:m0"} {"signature": "@propertydef __schema__(self):", "body": "return schema.JAMS_SCHEMA[''].get(self.type, None)", "docstring": "The schema definition for this JObject, if it exists.\n\n Returns\n -------\n schema : dict or None", "id": "f11239:c0:m1"} {"signature": "@propertydef __json__(self):", "body": "filtered_dict = dict()for k, item in six.iteritems(self.__dict__):if k.startswith(''):continueif hasattr(item, ''):filtered_dict[k] = item.__json__else:filtered_dict[k] = serialize_obj(item)return filtered_dict", "docstring": "r\"\"\"Return the JObject as a set of native data types for serialization.\n\n Note: attributes beginning with underscores are suppressed.", "id": "f11239:c0:m2"} {"signature": "@classmethoddef __json_init__(cls, **kwargs):", "body": "return cls(**kwargs)", "docstring": "Initialize the object from a dictionary of values", "id": "f11239:c0:m3"} {"signature": "def __getitem__(self, key):", "body": "return self.__dict__[key]", "docstring": "Dict-style interface", "id": "f11239:c0:m6"} {"signature": "def __repr__(self):", "body": "indent = len(self.type) + jstr = '' + '' * indentprops = self._display_properties()params = jstr.join(''.format(p, summary(self[p],indent=indent))for (p, dp) in props)return ''.format(self.type, params)", "docstring": "Render the object alongside its attributes.", "id": "f11239:c0:m10"} {"signature": "def _display_properties(self):", "body": "return sorted([(k, k) for k in self.__dict__])", "docstring": "Returns a list of tuples (key, display_name)\n for properties of this object", "id": "f11239:c0:m11"} {"signature": "def dumps(self, **kwargs):", "body": "return json.dumps(self.__json__, **kwargs)", "docstring": "Serialize the JObject to a string.\n\n Parameters\n ----------\n kwargs\n Keyword arguments to json.dumps\n\n Returns\n -------\n object_str : str\n Serialized JObject\n\n See Also\n --------\n json.dumps\n loads\n\n Examples\n --------\n >>> J = jams.JObject(foo=5, bar='baz')\n >>> J.dumps()\n '{\"foo\": 5, \"bar\": \"baz\"}'", "id": "f11239:c0:m15"} {"signature": "def keys(self):", "body": "return self.__dict__.keys()", "docstring": "Return a list of the attributes of the object.\n\n Returns\n -------\n keys : list\n The attributes of the object\n\n Examples\n --------\n >>> J = jams.JObject(foo=5, bar='baz')\n >>> J.keys()\n ['foo', 'bar']", "id": "f11239:c0:m16"} {"signature": "def update(self, **kwargs):", "body": "for name, value in six.iteritems(kwargs):setattr(self, name, value)", "docstring": "Update the attributes of a JObject.\n\n Parameters\n ----------\n kwargs\n Keyword arguments of the form `attribute=new_value`\n\n Examples\n --------\n >>> J = jams.JObject(foo=5)\n >>> J.dumps()\n '{\"foo\": 5}'\n >>> J.update(bar='baz')\n >>> J.dumps()\n '{\"foo\": 5, \"bar\": \"baz\"}'", "id": "f11239:c0:m17"} {"signature": "@propertydef type(self):", "body": "return self.__class__.__name__", "docstring": "The type (class name) of a derived JObject type", "id": "f11239:c0:m18"} {"signature": "@classmethoddef loads(cls, string):", "body": "return cls.__json_init__(**json.loads(string))", "docstring": "De-serialize a JObject\n\n Parameters\n ----------\n string : str\n A serialized (JSON string) JObject\n\n Returns\n -------\n J : JObject\n The input string reconstructed as a JObject\n\n See Also\n --------\n json.loads\n dumps\n\n Examples\n --------\n >>> J = jams.JObject(foo=5, bar='baz')\n >>> J.dumps()\n '{\"foo\": 5, \"bar\": \"baz\"}'\n >>> jams.JObject.loads(J.dumps())\n ", "id": "f11239:c0:m19"} {"signature": "def search(self, **kwargs):", "body": "match = Falser_query = {}myself = self.__class__.__name__for k, value in six.iteritems(kwargs):k_pop = query_pop(k, myself)if k_pop:r_query[k_pop] = valueif not r_query:return Falsefor key in r_query:if hasattr(self, key):match |= match_query(getattr(self, key), r_query[key])if not match:for attr in dir(self):obj = getattr(self, attr)if isinstance(obj, JObject):match |= obj.search(**r_query)return match", "docstring": "Query this object (and its descendants).\n\n Parameters\n ----------\n kwargs\n Each `(key, value)` pair encodes a search field in `key`\n and a target value in `value`.\n\n `key` must be a string, and should correspond to a property in\n the JAMS object hierarchy, e.g., 'Annotation.namespace` or `email`\n\n `value` must be either an object (tested for equality), a\n string describing a search pattern (regular expression), or a\n lambda function which evaluates to `True` if the candidate\n object matches the search criteria and `False` otherwise.\n\n Returns\n -------\n match : bool\n `True` if any of the search keys match the specified value,\n `False` otherwise, or if the search keys do not exist\n within the object.\n\n Examples\n --------\n >>> J = jams.JObject(foo=5, needle='quick brown fox')\n >>> J.search(needle='.*brown.*')\n True\n >>> J.search(needle='.*orange.*')\n False\n >>> J.search(badger='.*brown.*')\n False\n >>> J.search(foo=5)\n True\n >>> J.search(foo=10)\n False\n >>> J.search(foo=lambda x: x < 10)\n True\n >>> J.search(foo=lambda x: x > 10)\n False", "id": "f11239:c0:m20"} {"signature": "def validate(self, strict=True):", "body": "valid = Truetry:jsonschema.validate(self.__json__, self.__schema__)except jsonschema.ValidationError as invalid:if strict:raise SchemaError(str(invalid))else:warnings.warn(str(invalid))valid = Falsereturn valid", "docstring": "Validate a JObject against its schema\n\n Parameters\n ----------\n strict : bool\n Enforce strict schema validation\n\n Returns\n -------\n valid : bool\n True if the jam validates\n False if not, and `strict==False`\n\n Raises\n ------\n SchemaError\n If `strict==True` and `jam` fails validation", "id": "f11239:c0:m21"} {"signature": "def __init__(self, namespace, data=None, annotation_metadata=None,sandbox=None, time=, duration=None):", "body": "super(Annotation, self).__init__()if annotation_metadata is None:annotation_metadata = AnnotationMetadata()self.annotation_metadata = AnnotationMetadata(**annotation_metadata)self.namespace = namespaceself.data = SortedKeyList(key=self._key)if data is not None:if isinstance(data, dict):self.append_columns(data)else:self.append_records(data)if sandbox is None:sandbox = Sandbox()self.sandbox = Sandbox(**sandbox)self.time = timeself.duration = duration", "docstring": "Create an Annotation.\n\n Note that, if an argument is None, an empty Annotation is created in\n its place. Additionally, a dictionary matching the expected structure\n of the arguments will be parsed (i.e. instantiating from JSON).\n\n Parameters\n ----------\n namespace : str\n The namespace for this annotation\n\n data : dict of lists, list of dicts, or list of Observations\n Data for the new annotation\n\n annotation_metadata : AnnotationMetadata (or dict), default=None.\n Metadata corresponding to this Annotation.\n\n sandbox : Sandbox (dict), default=None\n Miscellaneous information; keep to native datatypes if possible.\n\n time : non-negative number\n The starting time for this annotation\n\n duration : non-negative number\n The duration of this annotation", "id": "f11239:c2:m0"} {"signature": "def append(self, time=None, duration=None, value=None, confidence=None):", "body": "self.data.add(Observation(time=float(time),duration=float(duration),value=value,confidence=confidence))", "docstring": "Append an observation to the data field\n\n Parameters\n ----------\n time : float >= 0\n duration : float >= 0\n The time and duration of the new observation, in seconds\n value\n confidence\n The value and confidence of the new observations.\n\n Types and values should conform to the namespace of the\n Annotation object.\n\n Examples\n --------\n >>> ann = jams.Annotation(namespace='chord')\n >>> ann.append(time=3, duration=2, value='E#')", "id": "f11239:c2:m2"} {"signature": "def append_records(self, records):", "body": "for obs in records:if isinstance(obs, Observation):self.append(**obs._asdict())else:self.append(**obs)", "docstring": "Add observations from row-major storage.\n\n This is primarily useful for deserializing sparsely packed data.\n\n Parameters\n ----------\n records : iterable of dicts or Observations\n Each element of `records` corresponds to one observation.", "id": "f11239:c2:m3"} {"signature": "def append_columns(self, columns):", "body": "self.append_records([dict(time=t, duration=d, value=v, confidence=c)for (t, d, v, c)in six.moves.zip(columns[''],columns[''],columns[''],columns[''])])", "docstring": "Add observations from column-major storage.\n\n This is primarily used for deserializing densely packed data.\n\n Parameters\n ----------\n columns : dict of lists\n Keys must be `time, duration, value, confidence`,\n and each much be a list of equal length.", "id": "f11239:c2:m4"} {"signature": "def validate(self, strict=True):", "body": "ann_schema = schema.namespace_array(self.namespace)valid = Truetry:jsonschema.validate(self.__json_light__(data=False),schema.JAMS_SCHEMA)data_ser = [serialize_obj(obs) for obs in self.data]jsonschema.validate(data_ser, ann_schema)except jsonschema.ValidationError as invalid:if strict:raise SchemaError(str(invalid))else:warnings.warn(str(invalid))valid = Falsereturn valid", "docstring": "Validate this annotation object against the JAMS schema,\n and its data against the namespace schema.\n\n Parameters\n ----------\n strict : bool\n If `True`, then schema violations will cause an Exception.\n If `False`, then schema violations will issue a warning.\n\n Returns\n -------\n valid : bool\n `True` if the object conforms to schema.\n `False` if the object fails to conform to schema,\n but `strict == False`.\n\n Raises\n ------\n SchemaError\n If `strict == True` and the object fails validation\n\n See Also\n --------\n JObject.validate", "id": "f11239:c2:m5"} {"signature": "def trim(self, start_time, end_time, strict=False):", "body": "if end_time <= start_time:raise ParameterError('')if self.duration is None:orig_time = start_timeorig_duration = end_time - start_timewarnings.warn(\"\"\"\"\"\")else:orig_time = self.timeorig_duration = self.durationif start_time > (orig_time + orig_duration) or (end_time < orig_time):warnings.warn('''''')trim_start = self.timetrim_end = trim_startelse:trim_start = max(orig_time, start_time)trim_end = min(orig_time + orig_duration, end_time)ann_trimmed = Annotation(self.namespace,data=None,annotation_metadata=self.annotation_metadata,sandbox=self.sandbox,time=trim_start,duration=trim_end - trim_start)for obs in self.data:obs_start = obs.timeobs_end = obs_start + obs.durationif obs_start < trim_end and obs_end > trim_start:new_start = max(obs_start, trim_start)new_end = min(obs_end, trim_end)new_duration = new_end - new_startif ((not strict) or(new_start == obs_start and new_end == obs_end)):ann_trimmed.append(time=new_start,duration=new_duration,value=obs.value,confidence=obs.confidence)if '' not in ann_trimmed.sandbox.keys():ann_trimmed.sandbox.update(trim=[{'': start_time, '': end_time,'': trim_start, '': trim_end}])else:ann_trimmed.sandbox.trim.append({'': start_time, '': end_time,'': trim_start, '': trim_end})return ann_trimmed", "docstring": "Trim the annotation and return as a new `Annotation` object.\n\nTrimming will result in the new annotation only containing observations\nthat occur in the intersection of the time range spanned by the\nannotation and the time range specified by the user. The new annotation\nwill span the time range ``[trim_start, trim_end]`` where\n``trim_start = max(self.time, start_time)`` and ``trim_end =\nmin(self.time + self.duration, end_time)``.\n\nIf ``strict=False`` (default) observations that start before\n``trim_start`` and end after it will be trimmed such that they start at\n``trim_start``, and similarly observations that start before\n``trim_end`` and end after it will be trimmed to end at ``trim_end``.\nIf ``strict=True`` such borderline observations will be discarded.\n\nThe new duration of the annotation will be ``trim_end - trim_start``.\n\nNote that if the range defined by ``[start_time, end_time]``\ndoesn't intersect with the original time range spanned by the\nannotation the resulting annotation will contain no observations, will\nhave the same start time as the original annotation and have duration\n0.\n\nThis function also copies over all the annotation metadata from the\noriginal annotation and documents the trim operation by adding a list\nof tuples to the annotation's sandbox keyed by\n``Annotation.sandbox.trim`` which documents each trim operation with a\ntuple ``(start_time, end_time, trim_start, trim_end)``.\n\nParameters\n----------\nstart_time : float\n The desired start time for the trimmed annotation in seconds.\nend_time\n The desired end time for the trimmed annotation in seconds. Must be\n greater than ``start_time``.\nstrict : bool\n When ``False`` (default) observations that lie at the boundaries of\n the trimming range (given by ``[trim_start, trim_end]`` as\n described above), i.e. observations that start before and end after\n either the trim start or end time, will have their time and/or\n duration adjusted such that only the part of the observation that\n lies within the trim range is kept. When ``True`` such observations\n are discarded and not included in the trimmed annotation.\n\nReturns\n-------\nann_trimmed : Annotation\n The trimmed annotation, returned as a new jams.Annotation object.\n If the trim range specified by ``[start_time, end_time]`` does not\n intersect at all with the original time range of the annotation a\n warning will be issued and the returned annotation will be empty.\n\nRaises\n------\nParameterError\n If ``end_time`` is not greater than ``start_time``.\n\nExamples\n--------\n>>> ann = jams.Annotation(namespace='tag_open', time=2, duration=8)\n>>> ann.append(time=2, duration=2, value='one')\n>>> ann.append(time=4, duration=2, value='two')\n>>> ann.append(time=6, duration=2, value='three')\n>>> ann.append(time=7, duration=2, value='four')\n>>> ann.append(time=8, duration=2, value='five')\n>>> ann_trim = ann.trim(5, 8, strict=False)\n>>> print(ann_trim.time, ann_trim.duration)\n(5, 3)\n>>> ann_trim.to_dataframe()\n time duration value confidence\n0 5 1 two None\n1 6 2 three None\n2 7 1 four None\n>>> ann_trim_strict = ann.trim(5, 8, strict=True)\n>>> print(ann_trim_strict.time, ann_trim_strict.duration)\n(5, 3)\n>>> ann_trim_strict.to_dataframe()\n time duration value confidence\n0 6 2 three None", "id": "f11239:c2:m6"} {"signature": "def slice(self, start_time, end_time, strict=False):", "body": "sliced_ann = self.trim(start_time, end_time, strict=strict)raw_data = sliced_ann.pop_data()for obs in raw_data:new_time = max(, obs.time - start_time)sliced_ann.append(time=new_time,duration=obs.duration,value=obs.value,confidence=obs.confidence)ref_time = sliced_ann.timeslice_start = ref_timeslice_end = ref_time + sliced_ann.durationif '' not in sliced_ann.sandbox.keys():sliced_ann.sandbox.update(slice=[{'': start_time, '': end_time,'': slice_start, '': slice_end}])else:sliced_ann.sandbox.slice.append({'': start_time, '': end_time,'': slice_start, '': slice_end})sliced_ann.time = max(, ref_time - start_time)return sliced_ann", "docstring": "Slice the annotation and return as a new `Annotation` object.\n\nSlicing has the same effect as trimming (see `Annotation.trim`) except\nthat while trimming does not modify the start time of the annotation or\nthe observations it contains, slicing will set the new annotation's\nstart time to ``max(0, trimmed_annotation.time - start_time)`` and the\nstart time of its observations will be set with respect to this new\nreference start time.\n\nThis function documents the slice operation by adding a list of tuples\nto the annotation's sandbox keyed by ``Annotation.sandbox.slice`` which\ndocuments each slice operation with a tuple\n``(start_time, end_time, slice_start, slice_end)``, where\n``slice_start`` and ``slice_end`` are given by ``trim_start`` and\n``trim_end`` (see `Annotation.trim`).\n\nSince slicing is implemented using trimming, the trimming operation\nwill also be documented in ``Annotation.sandbox.trim`` as described in\n`Annotation.trim`.\n\nThis function is useful for example when trimming an audio file,\nallowing the user to trim the annotation while ensuring all time\ninformation matches the new trimmed audio file.\n\nParameters\n----------\nstart_time : float\n The desired start time for slicing in seconds.\nend_time\n The desired end time for slicing in seconds. Must be greater than\n ``start_time``.\nstrict : bool\n When ``False`` (default) observations that lie at the boundaries of\n the slice (see `Annotation.trim` for details) will have their time\n and/or duration adjusted such that only the part of the observation\n that lies within the slice range is kept. When ``True`` such\n observations are discarded and not included in the sliced\n annotation.\n\nReturns\n-------\nsliced_ann : Annotation\n The sliced annotation.\n\nSee Also\n--------\nAnnotation.trim\n\nExamples\n--------\n>>> ann = jams.Annotation(namespace='tag_open', time=2, duration=8)\n>>> ann.append(time=2, duration=2, value='one')\n>>> ann.append(time=4, duration=2, value='two')\n>>> ann.append(time=6, duration=2, value='three')\n>>> ann.append(time=7, duration=2, value='four')\n>>> ann.append(time=8, duration=2, value='five')\n>>> ann_slice = ann.slice(5, 8, strict=False)\n>>> print(ann_slice.time, ann_slice.duration)\n(0, 3)\n>>> ann_slice.to_dataframe()\n time duration value confidence\n0 0.0 1.0 two None\n1 1.0 2.0 three None\n2 2.0 1.0 four None\n>>> ann_slice_strict = ann.slice(5, 8, strict=True)\n>>> print(ann_slice_strict.time, ann_slice_strict.duration)\n(0, 3)\n>>> ann_slice_strict.to_dataframe()\n time duration value confidence\n0 1.0 2.0 three None", "id": "f11239:c2:m7"} {"signature": "def pop_data(self):", "body": "data = self.dataself.data = SortedKeyList(key=self._key)return data", "docstring": "Replace this observation's data with a fresh container.\n\n Returns\n -------\n annotation_data : SortedKeyList\n The original annotation data container", "id": "f11239:c2:m8"} {"signature": "def to_interval_values(self):", "body": "ints, vals = [], []for obs in self.data:ints.append([obs.time, obs.time + obs.duration])vals.append(obs.value)if not ints:return np.empty(shape=(, ), dtype=float), []return np.array(ints), vals", "docstring": "Extract observation data in a `mir_eval`-friendly format.\n\n Returns\n -------\n intervals : np.ndarray [shape=(n, 2), dtype=float]\n Start- and end-times of all valued intervals\n\n `intervals[i, :] = [time[i], time[i] + duration[i]]`\n\n labels : list\n List view of value field.", "id": "f11239:c2:m9"} {"signature": "def to_event_values(self):", "body": "ints, vals = [], []for obs in self.data:ints.append(obs.time)vals.append(obs.value)return np.array(ints), vals", "docstring": "Extract observation data in a `mir_eval`-friendly format.\n\n Returns\n -------\n times : np.ndarray [shape=(n,), dtype=float]\n Start-time of all observations\n\n labels : list\n List view of value field.", "id": "f11239:c2:m10"} {"signature": "def to_dataframe(self):", "body": "return pd.DataFrame.from_records(list(self.data),columns=['', '','', ''])", "docstring": "Convert this annotation to a pandas dataframe.\n\n Returns\n -------\n df : pd.DataFrame\n Columns are `time, duration, value, confidence`.\n Each row is an observation, and rows are sorted by\n ascending `time`.", "id": "f11239:c2:m11"} {"signature": "def to_samples(self, times, confidence=False):", "body": "times = np.asarray(times)if times.ndim != or np.any(times < ):raise ParameterError('')idx = np.argsort(times)samples = times[idx]values = [list() for _ in samples]confidences = [list() for _ in samples]for obs in self.data:start = np.searchsorted(samples, obs.time)end = np.searchsorted(samples, obs.time + obs.duration, side='')for i in range(start, end):values[idx[i]].append(obs.value)confidences[idx[i]].append(obs.confidence)if confidence:return values, confidenceselse:return values", "docstring": "Sample the annotation at specified times.\n\n Parameters\n ----------\n times : np.ndarray, non-negative, ndim=1\n The times (in seconds) to sample the annotation\n\n confidence : bool\n If `True`, return both values and confidences.\n If `False` (default) only return values.\n\n Returns\n -------\n values : list\n `values[i]` is a list of observation values for intervals\n that cover `times[i]`.\n\n confidence : list (optional)\n `confidence` values corresponding to `values`", "id": "f11239:c2:m12"} {"signature": "def to_html(self, max_rows=None):", "body": "n = len(self.data)div_id = _get_divid(self)out = r''''''.format(div_id, self.namespace, n)out += r''''''.format(div_id)out += r''''''.format(self.annotation_metadata._repr_html_())out += r''''''.format(self.sandbox._repr_html_())out += r''''''.format(self.namespace, n)out += r''''''if max_rows is None or n <= max_rows:out += self._fmt_rows(, n)else:out += self._fmt_rows(, max_rows//)out += r''''''out += self._fmt_rows(n-max_rows//, n)out += r''''''out += r''''''out += r''''''return out", "docstring": "Render this annotation list in HTML\n\n Returns\n -------\n rendered : str\n An HTML table containing this annotation's data.", "id": "f11239:c2:m14"} {"signature": "def _repr_html_(self, max_rows=):", "body": "return self.to_html(max_rows=max_rows)", "docstring": "Render annotation as HTML. See also: `to_html()`", "id": "f11239:c2:m16"} {"signature": "def __json_light__(self, data=True):", "body": "filtered_dict = dict()for k, item in six.iteritems(self.__dict__):if k.startswith(''):continueelif k == '':if data:filtered_dict[k] = self.__json_data__else:filtered_dict[k] = []elif hasattr(item, ''):filtered_dict[k] = item.__json__else:filtered_dict[k] = itemreturn filtered_dict", "docstring": "r\"\"\"Return the JObject as a set of native data types for serialization.\n\n Note: attributes beginning with underscores are suppressed.", "id": "f11239:c2:m18"} {"signature": "@propertydef __json_data__(self):", "body": "if schema.is_dense(self.namespace):dense_records = dict()for field in Observation._fields:dense_records[field] = []for obs in self.data:for key, val in six.iteritems(obs._asdict()):dense_records[key].append(serialize_obj(val))return dense_recordselse:return [serialize_obj(_) for _ in self.data]", "docstring": "r\"\"\"JSON-serialize the observation sequence.", "id": "f11239:c2:m19"} {"signature": "@classmethoddef _key(cls, obs):", "body": "if not isinstance(obs, Observation):raise JamsError(''.format(obs))return obs.time", "docstring": "Provides sorting index for Observation objects", "id": "f11239:c2:m20"} {"signature": "def __init__(self, name='', email=''):", "body": "super(Curator, self).__init__()self.name = nameself.email = email", "docstring": "Create a Curator.\n\n Parameters\n ----------\n name: str, default=''\n Common name of the curator.\n\n email: str, default=''\n An email address corresponding to the curator.", "id": "f11239:c3:m0"} {"signature": "def __init__(self, curator=None, version='', corpus='', annotator=None,annotation_tools='', annotation_rules='', validation='',data_source=''):", "body": "super(AnnotationMetadata, self).__init__()if curator is None:curator = Curator()if annotator is None:annotator = JObject()self.curator = Curator(**curator)self.annotator = JObject(**annotator)self.version = versionself.corpus = corpusself.annotation_tools = annotation_toolsself.annotation_rules = annotation_rulesself.validation = validationself.data_source = data_source", "docstring": "Create an AnnotationMetadata object.\n\n Parameters\n ----------\n curator: Curator, default=None\n Object documenting a name and email address for the person of\n correspondence.\n\n version: string, default=''\n Version of this annotation.\n\n annotator: dict, default=None\n Sandbox for information about the specific annotator, such as\n musical experience, skill level, principal instrument, etc.\n\n corpus: str, default=''\n Collection assignment.\n\n annotation_tools: str, default=''\n Description of the tools used to create the annotation.\n\n annotation_rules: str, default=''\n Description of the rules provided to the annotator.\n\n validation: str, default=''\n Methods for validating the integrity of the data.\n\n data_source: str, default=''\n Description of where the data originated, e.g. 'Manual Annotation'.", "id": "f11239:c4:m0"} {"signature": "def __init__(self, title='', artist='', release='', duration=None,identifiers=None, jams_version=None):", "body": "super(FileMetadata, self).__init__()if jams_version is None:jams_version = __VERSION__if identifiers is None:identifiers = Sandbox()self.title = titleself.artist = artistself.release = releaseself.duration = durationself.identifiers = Sandbox(**identifiers)self.jams_version = jams_version", "docstring": "Create a file-level Metadata object.\n\n Parameters\n ----------\n title: str\n Name of the recording.\n\n artist: str\n Name of the artist / musician.\n\n release: str\n Name of the release\n\n duration: number >= 0\n Time duration of the file, in seconds.\n\n identifiers : jams.Sandbox\n Sandbox of identifier keys (eg, musicbrainz ids)\n\n jams_version: str\n Version of the JAMS Schema.", "id": "f11239:c5:m0"} {"signature": "def __init__(self, annotations=None):", "body": "super(AnnotationArray, self).__init__()if annotations is None:annotations = list()self.extend([Annotation(**obj) for obj in annotations])", "docstring": "Create an AnnotationArray.\n\n Parameters\n ----------\n annotations: list\n List of Annotations, or appropriately formated dicts\n is consistent with Annotation.", "id": "f11239:c6:m0"} {"signature": "def search(self, **kwargs):", "body": "results = AnnotationArray()for annotation in self:if annotation.search(**kwargs):results.append(annotation)return results", "docstring": "Filter the annotation array down to only those Annotation\n objects matching the query.\n\n\n Parameters\n ----------\n kwargs : search parameters\n See JObject.search\n\n Returns\n -------\n results : AnnotationArray\n An annotation array of the objects matching the query\n\n See Also\n --------\n JObject.search", "id": "f11239:c6:m1"} {"signature": "def __getitem__(self, idx):", "body": "if isinstance(idx, (int, slice)):return list.__getitem__(self, idx)elif isinstance(idx, six.string_types) or six.callable(idx):return self.search(namespace=idx)elif isinstance(idx, tuple):return self.search(namespace=idx[])[idx[]]raise IndexError(''.format(idx))", "docstring": "Overloaded getitem for syntactic search sugar", "id": "f11239:c6:m2"} {"signature": "def trim(self, start_time, end_time, strict=False):", "body": "trimmed_array = AnnotationArray()for ann in self:trimmed_array.append(ann.trim(start_time, end_time, strict=strict))return trimmed_array", "docstring": "Trim every annotation contained in the annotation array using\n`Annotation.trim` and return as a new `AnnotationArray`.\n\nSee `Annotation.trim` for details about trimming. This function does\nnot modify the annotations in the original annotation array.\n\n\nParameters\n----------\nstart_time : float\n The desired start time for the trimmed annotations in seconds.\nend_time\n The desired end time for trimmed annotations in seconds. Must be\n greater than ``start_time``.\nstrict : bool\n When ``False`` (default) observations that lie at the boundaries of\n the trimming range (see `Annotation.trim` for details) will have\n their time and/or duration adjusted such that only the part of the\n observation that lies within the trim range is kept. When ``True``\n such observations are discarded and not included in the trimmed\n annotation.\n\nReturns\n-------\ntrimmed_array : AnnotationArray\n An annotation array where every annotation has been trimmed.", "id": "f11239:c6:m4"} {"signature": "def slice(self, start_time, end_time, strict=False):", "body": "sliced_array = AnnotationArray()for ann in self:sliced_array.append(ann.slice(start_time, end_time, strict=strict))return sliced_array", "docstring": "Slice every annotation contained in the annotation array using\n`Annotation.slice`\nand return as a new AnnotationArray\n\nSee `Annotation.slice` for details about slicing. This function does\nnot modify the annotations in the original annotation array.\n\nParameters\n----------\nstart_time : float\n The desired start time for slicing in seconds.\nend_time\n The desired end time for slicing in seconds. Must be greater than\n ``start_time``.\nstrict : bool\n When ``False`` (default) observations that lie at the boundaries of\n the slicing range (see `Annotation.slice` for details) will have\n their time and/or duration adjusted such that only the part of the\n observation that lies within the trim range is kept. When ``True``\n such observations are discarded and not included in the sliced\n annotation.\n\nReturns\n-------\nsliced_array : AnnotationArray\n An annotation array where every annotation has been sliced.", "id": "f11239:c6:m5"} {"signature": "def __init__(self, annotations=None, file_metadata=None, sandbox=None):", "body": "super(JAMS, self).__init__()if file_metadata is None:file_metadata = FileMetadata()if sandbox is None:sandbox = Sandbox()self.annotations = AnnotationArray(annotations=annotations)self.file_metadata = FileMetadata(**file_metadata)self.sandbox = Sandbox(**sandbox)", "docstring": "Create a Jams object.\n\n Parameters\n ----------\n annotations : list of Annotations\n Zero or more Annotation objects\n\n file_metadata : FileMetadata (or dict), default=None\n Metadata corresponding to the audio file.\n\n sandbox : Sandbox (or dict), default=None\n Unconstrained global sandbox for additional information.", "id": "f11239:c7:m0"} {"signature": "def add(self, jam, on_conflict=''):", "body": "if on_conflict not in ['', '', '']:raise ParameterError(\"\"\"\".format(on_conflict))if not self.file_metadata == jam.file_metadata:if on_conflict == '':self.file_metadata = jam.file_metadataelif on_conflict == '':raise JamsError(\"\"\"\")self.annotations.extend(jam.annotations)self.sandbox.update(**jam.sandbox)", "docstring": "Add the contents of another jam to this object.\n\n Note that, by default, this method fails if file_metadata is not\n identical and raises a ValueError; either resolve this manually\n (because conflicts should almost never happen), force an 'overwrite',\n or tell the method to 'ignore' the metadata of the object being added.\n\n Parameters\n ----------\n jam: JAMS object\n Object to add to this jam\n\n on_conflict: str, default='fail'\n Strategy for resolving metadata conflicts; one of\n ['fail', 'overwrite', or 'ignore'].\n\n Raises\n ------\n ParameterError\n if `on_conflict` is an unknown value\n\n JamsError\n If a conflict is detected and `on_conflict='fail'`", "id": "f11239:c7:m3"} {"signature": "def search(self, **kwargs):", "body": "return self.annotations.search(**kwargs)", "docstring": "Search a JAMS object for matching objects.\n\n Parameters\n ----------\n kwargs : keyword arguments\n Keyword query\n\n Returns\n -------\n AnnotationArray\n All annotation objects in this JAMS which match the query\n\n See Also\n --------\n JObject.search\n AnnotationArray.search\n\n\n Examples\n --------\n A simple query to get all beat annotations\n\n >>> beats = my_jams.search(namespace='beat')", "id": "f11239:c7:m4"} {"signature": "def save(self, path_or_file, strict=True, fmt=''):", "body": "self.validate(strict=strict)with _open(path_or_file, mode='', fmt=fmt) as fdesc:json.dump(self.__json__, fdesc, indent=)", "docstring": "Serialize annotation as a JSON formatted stream to file.\n\n Parameters\n ----------\n path_or_file : str or file-like\n Path to save the JAMS object on disk\n OR\n An open file descriptor to write into\n\n strict : bool\n Force strict schema validation\n\n fmt : str ['auto', 'jams', 'jamz']\n The output encoding format.\n\n If `auto`, it is inferred from the file name.\n\n If the input is an open file handle, `jams` encoding\n is used.\n\n\n Raises\n ------\n SchemaError\n If `strict == True` and the JAMS object fails schema\n or namespace validation.\n\n See also\n --------\n validate", "id": "f11239:c7:m5"} {"signature": "def validate(self, strict=True):", "body": "valid = Truetry:jsonschema.validate(self.__json_light__, schema.JAMS_SCHEMA)for ann in self.annotations:if isinstance(ann, Annotation):valid &= ann.validate(strict=strict)else:msg = ''.format(ann)valid = Falseif strict:raise SchemaError(msg)else:warnings.warn(str(msg))except jsonschema.ValidationError as invalid:if strict:raise SchemaError(str(invalid))else:warnings.warn(str(invalid))valid = Falsereturn valid", "docstring": "Validate a JAMS object against the schema.\n\n Parameters\n ----------\n strict : bool\n If `True`, an exception will be raised on validation failure.\n If `False`, a warning will be raised on validation failure.\n\n Returns\n -------\n valid : bool\n `True` if the object passes schema validation.\n `False` otherwise.\n\n Raises\n ------\n SchemaError\n If `strict==True` and the JAMS object does not match the schema\n\n See Also\n --------\n jsonschema.validate", "id": "f11239:c7:m6"} {"signature": "def trim(self, start_time, end_time, strict=False):", "body": "if self.file_metadata.duration is None:raise JamsError('''')if not ( <= start_time <= end_time <= float(self.file_metadata.duration)):raise ParameterError(''''''.format(float(self.file_metadata.duration)))jam_trimmed = JAMS(annotations=None,file_metadata=self.file_metadata,sandbox=self.sandbox)jam_trimmed.annotations = self.annotations.trim(start_time, end_time, strict=strict)if '' not in jam_trimmed.sandbox.keys():jam_trimmed.sandbox.update(trim=[{'': start_time, '': end_time}])else:jam_trimmed.sandbox.trim.append({'': start_time, '': end_time})return jam_trimmed", "docstring": "Trim all the annotations inside the jam and return as a new `JAMS`\nobject.\n\nSee `Annotation.trim` for details about how the annotations\nare trimmed.\n\nThis operation is also documented in the jam-level sandbox\nwith a list keyed by ``JAMS.sandbox.trim`` containing a tuple for each\njam-level trim of the form ``(start_time, end_time)``.\n\nThis function also copies over all of the file metadata from the\noriginal jam.\n\nNote: trimming does not affect the duration of the jam, i.e. the value\nof ``JAMS.file_metadata.duration`` will be the same for the original\nand trimmed jams.\n\nParameters\n----------\nstart_time : float\n The desired start time for the trimmed annotations in seconds.\nend_time\n The desired end time for trimmed annotations in seconds. Must be\n greater than ``start_time``.\nstrict : bool\n When ``False`` (default) observations that lie at the boundaries of\n the trimming range (see `Annotation.trim` for details), will have\n their time and/or duration adjusted such that only the part of the\n observation that lies within the trim range is kept. When ``True``\n such observations are discarded and not included in the trimmed\n annotation.\n\nReturns\n-------\njam_trimmed : JAMS\n The trimmed jam with trimmed annotations, returned as a new JAMS\n object.", "id": "f11239:c7:m7"} {"signature": "def slice(self, start_time, end_time, strict=False):", "body": "if self.file_metadata.duration is None:raise JamsError('''')if (start_time < orstart_time > float(self.file_metadata.duration) orend_time < start_time orend_time > float(self.file_metadata.duration)):raise ParameterError(''''''.format(float(self.file_metadata.duration)))jam_sliced = JAMS(annotations=None,file_metadata=self.file_metadata,sandbox=self.sandbox)jam_sliced.annotations = self.annotations.slice(start_time, end_time, strict=strict)jam_sliced.file_metadata.duration = end_time - start_timeif '' not in jam_sliced.sandbox.keys():jam_sliced.sandbox.update(slice=[{'': start_time, '': end_time}])else:jam_sliced.sandbox.slice.append({'': start_time, '': end_time})return jam_sliced", "docstring": "Slice all the annotations inside the jam and return as a new `JAMS`\nobject.\n\nSee `Annotation.slice` for details about how the annotations\nare sliced.\n\nThis operation is also documented in the jam-level sandbox\nwith a list keyed by ``JAMS.sandbox.slice`` containing a tuple for each\njam-level slice of the form ``(start_time, end_time)``.\n\nSince slicing is implemented using trimming, the operation will also be\ndocumented in ``JAMS.sandbox.trim`` as described in `JAMS.trim`.\n\nThis function also copies over all of the file metadata from the\noriginal jam.\n\nNote: slicing will affect the duration of the jam, i.e. the new value\nof ``JAMS.file_metadata.duration`` will be ``end_time - start_time``.\n\nParameters\n----------\nstart_time : float\n The desired start time for slicing in seconds.\nend_time\n The desired end time for slicing in seconds. Must be greater than\n ``start_time``.\nstrict : bool\n When ``False`` (default) observations that lie at the boundaries of\n the slicing range (see `Annotation.slice` for details), will have\n their time and/or duration adjusted such that only the part of the\n observation that lies within the slice range is kept. When ``True``\n such observations are discarded and not included in the sliced\n annotation.\n\nReturns\n-------\njam_sliced: JAMS\n The sliced jam with sliced annotations, returned as a new\n JAMS object.", "id": "f11239:c7:m8"} {"signature": "@propertydef __json_light__(self):", "body": "filtered_dict = dict()for k, item in six.iteritems(self.__dict__):if k.startswith('') or k == '':continueif hasattr(item, ''):filtered_dict[k] = item.__json__else:filtered_dict[k] = serialize_obj(item)return filtered_dict", "docstring": "r\"\"\"Return the JObject as a set of native data types for serialization.\n\n Note: attributes beginning with underscores are suppressed.\n\n This also skips the `annotations` field, which will be validated separately.", "id": "f11239:c7:m9"} {"signature": "def process_arguments(args):", "body": "parser = argparse.ArgumentParser(description='')parser.add_argument('',action='',help='')parser.add_argument('',action='',nargs='',help='')return vars(parser.parse_args(args))", "docstring": "Argument parser", "id": "f11240:m0"} {"signature": "def load_json(filename):", "body": "with open(filename, '') as fdesc:return json.load(fdesc)", "docstring": "Load a json file", "id": "f11240:m1"} {"signature": "def validate(schema_file=None, jams_files=None):", "body": "schema = load_json(schema_file)for jams_file in jams_files:try:jams = load_json(jams_file)jsonschema.validate(jams, schema)print(''.format(jams_file))except jsonschema.ValidationError as exc:print(''.format(jams_file))print(exc)", "docstring": "Validate a jams file against a schema", "id": "f11240:m2"} {"signature": "def mkclick(freq, sr=, duration=):", "body": "times = np.arange(int(sr * duration))click = np.sin( * np.pi * times * freq / float(sr))click *= np.exp(- times / ( * sr))return click", "docstring": "Generate a click sample.\n\n This replicates functionality from mir_eval.sonify.clicks,\n but exposes the target frequency and duration.", "id": "f11242:m0"} {"signature": "def clicks(annotation, sr=, length=None, **kwargs):", "body": "interval, _ = annotation.to_interval_values()return filter_kwargs(mir_eval.sonify.clicks, interval[:, ],fs=sr, length=length, **kwargs)", "docstring": "Sonify events with clicks.\n\n This uses mir_eval.sonify.clicks, and is appropriate for instantaneous\n events such as beats or segment boundaries.", "id": "f11242:m1"} {"signature": "def downbeat(annotation, sr=, length=None, **kwargs):", "body": "beat_click = mkclick( * , sr=sr)downbeat_click = mkclick( * , sr=sr)intervals, values = annotation.to_interval_values()beats, downbeats = [], []for time, value in zip(intervals[:, ], values):if value[''] == :downbeats.append(time)else:beats.append(time)if length is None:length = int(sr * np.max(intervals)) + len(beat_click) + y = filter_kwargs(mir_eval.sonify.clicks,np.asarray(beats),fs=sr, length=length, click=beat_click)y += filter_kwargs(mir_eval.sonify.clicks,np.asarray(downbeats),fs=sr, length=length, click=downbeat_click)return y", "docstring": "Sonify beats and downbeats together.", "id": "f11242:m2"} {"signature": "def multi_segment(annotation, sr=, length=None, **kwargs):", "body": "PENT = [, /, /, /, /]DURATION = h_int, _ = hierarchy_flatten(annotation)if length is None:length = int(sr * (max(np.max(_) for _ in h_int) + / DURATION) + )y = for ints, (oc, scale) in zip(h_int, product(range(, + len(h_int)),PENT)):click = mkclick( * scale * oc, sr=sr, duration=DURATION)y = y + filter_kwargs(mir_eval.sonify.clicks,np.unique(ints),fs=sr, length=length,click=click)return y", "docstring": "Sonify multi-level segmentations", "id": "f11242:m3"} {"signature": "def chord(annotation, sr=, length=None, **kwargs):", "body": "intervals, chords = annotation.to_interval_values()return filter_kwargs(mir_eval.sonify.chords,chords, intervals,fs=sr, length=length,**kwargs)", "docstring": "Sonify chords\n\n This uses mir_eval.sonify.chords.", "id": "f11242:m4"} {"signature": "def pitch_contour(annotation, sr=, length=None, **kwargs):", "body": "times = defaultdict(list)freqs = defaultdict(list)for obs in annotation:times[obs.value['']].append(obs.time)freqs[obs.value['']].append(obs.value[''] *(-)**(~obs.value['']))y_out = for ix in times:y_out = y_out + filter_kwargs(mir_eval.sonify.pitch_contour,np.asarray(times[ix]),np.asarray(freqs[ix]),fs=sr, length=length,**kwargs)if length is None:length = len(y_out)return y_out", "docstring": "Sonify pitch contours.\n\n This uses mir_eval.sonify.pitch_contour, and should only be applied\n to pitch annotations using the pitch_contour namespace.\n\n Each contour is sonified independently, and the resulting waveforms\n are summed together.", "id": "f11242:m5"} {"signature": "def piano_roll(annotation, sr=, length=None, **kwargs):", "body": "intervals, pitches = annotation.to_interval_values()pitch_map = {f: idx for idx, f in enumerate(np.unique(pitches))}gram = np.zeros((len(pitch_map), len(intervals)))for col, f in enumerate(pitches):gram[pitch_map[f], col] = return filter_kwargs(mir_eval.sonify.time_frequency,gram, pitches, intervals,sr, length=length, **kwargs)", "docstring": "Sonify a piano-roll\n\n This uses mir_eval.sonify.time_frequency, and is appropriate\n for sparse transcription data, e.g., annotations in the `note_midi`\n namespace.", "id": "f11242:m6"} {"signature": "def sonify(annotation, sr=, duration=None, **kwargs):", "body": "length = Noneif duration is None:duration = annotation.durationif duration is not None:length = int(duration * sr)if annotation.namespace in SONIFY_MAPPING:ann = coerce_annotation(annotation, annotation.namespace)return SONIFY_MAPPING[annotation.namespace](ann,sr=sr,length=length,**kwargs)for namespace, func in six.iteritems(SONIFY_MAPPING):try:ann = coerce_annotation(annotation, namespace)return func(ann, sr=sr, length=length, **kwargs)except NamespaceError:passraise NamespaceError(''.format(annotation.namespace))", "docstring": "Sonify a jams annotation through mir_eval\n\n Parameters\n ----------\n annotation : jams.Annotation\n The annotation to sonify\n\n sr = : positive number\n The sampling rate of the output waveform\n\n duration : float (optional)\n Optional length (in seconds) of the output waveform\n\n kwargs\n Additional keyword arguments to mir_eval.sonify functions\n\n Returns\n -------\n y_sonified : np.ndarray\n The waveform of the sonified annotation\n\n Raises\n ------\n NamespaceError\n If the annotation has an un-sonifiable namespace", "id": "f11242:m7"} {"signature": "def get_output_name(output_prefix, namespace, index):", "body": "return ''.format(output_prefix, namespace, index)", "docstring": "Get the output name (prefix)\n\n Parameters\n ----------\n output_prefix : str\n The path prefix of the target filename\n\n namespace : str\n The namespace of the annotation in question\n\n index : int\n The index number of this annotation within the namespace\n\n Returns\n -------\n output_name : str\n \"output_prefix__namespace__index\"", "id": "f11244:m0"} {"signature": "def get_comments(jam, ann):", "body": "jam_comments = jam.file_metadata.__json__ann_comments = ann.annotation_metadata.__json__return json.dumps({'': jam_comments,'': ann_comments},indent=)", "docstring": "Get the metadata from a jam and an annotation, combined as a string.\n\n Parameters\n ----------\n jam : JAMS\n The jams object\n\n ann : Annotation\n An annotation object\n\n Returns\n -------\n comments : str\n The jam.file_metadata and ann.annotation_metadata, combined and serialized", "id": "f11244:m1"} {"signature": "def lab_dump(ann, comment, filename, sep, comment_char):", "body": "intervals, values = ann.to_interval_values()frame = pd.DataFrame(columns=['', '', ''],data={'': intervals[:, ],'': intervals[:, ],'': values})with open(filename, '') as fdesc:for line in comment.split(''):fdesc.write(''.format(comment_char, line))frame.to_csv(path_or_buf=fdesc, index=False, sep=sep)", "docstring": "Save an annotation as a lab/csv.\n\n Parameters\n ----------\n ann : Annotation\n The annotation object\n\n comment : str\n The comment string header\n\n filename : str\n The output filename\n\n sep : str\n The separator string for output\n\n comment_char : str\n The character used to denote comments", "id": "f11244:m2"} {"signature": "def convert_jams(jams_file, output_prefix, csv=False, comment_char='', namespaces=None):", "body": "if namespaces is None:raise ValueError('')jam = jams.load(jams_file)counter = collections.Counter()annotations = []for query in namespaces:annotations.extend(jam.search(namespace=query))if csv:suffix = ''sep = ''else:suffix = ''sep = ''for ann in annotations:index = counter[ann.namespace]counter[ann.namespace] += filename = os.path.extsep.join([get_output_name(output_prefix,ann.namespace,index),suffix])comment = get_comments(jam, ann)lab_dump(ann, comment, filename, sep, comment_char)", "docstring": "Convert jams to labs.\n\n Parameters\n ----------\n jams_file : str\n The path on disk to the jams file in question\n\n output_prefix : str\n The file path prefix of the outputs\n\n csv : bool\n Whether to output in csv (True) or lab (False) format\n\n comment_char : str\n The character used to denote comments\n\n namespaces : list-like\n The set of namespace patterns to match for output", "id": "f11244:m3"} {"signature": "def parse_arguments(args):", "body": "parser = argparse.ArgumentParser(description='')parser.add_argument('','',dest='',action='',default=False,help='')parser.add_argument('', dest='', type=str, default='',help='')parser.add_argument('','',dest='',nargs='',default=[''],help='')parser.add_argument('',help='')parser.add_argument('', help='')return vars(parser.parse_args(args))", "docstring": "Parse arguments from the command line", "id": "f11244:m4"} {"signature": "def pprint(arr, columns=('', ''),names=('', ''),max_rows=, precision=):", "body": "if max_rows is True:pd.set_option('', )elif type(max_rows) is int:pd.set_option('', max_rows)pd.set_option('', precision)df = pd.DataFrame(arr.flatten(), index=arr[''].flatten(),columns=columns)df.columns = namesreturn df.style.format({names[]: '',names[]: ''})", "docstring": "Create a pandas DataFrame from a numpy ndarray.\n\nBy default use temp and lum with max rows of 32 and precision of 2.\n\narr - An numpy.ndarray.\ncolumns - The columns to include in the pandas DataFrame. Defaults to\n temperature and luminosity.\nnames - The column names for the pandas DataFrame. Defaults to\n Temperature and Luminosity.\nmax_rows - If max_rows is an integer then set the pandas\n display.max_rows option to that value. If max_rows\n is True then set display.max_rows option to 1000.\nprecision - An integer to set the pandas precision option.", "id": "f11253:m1"} {"signature": "def __init__(self, query=None, table=None):", "body": "if table is None:if query:super().__init__(query=query)else:super().__init__(self.query)else:super().__init__(table=table)", "docstring": "Initialize the data using the default query for Berkeley 20,\nor a provided query xor table.", "id": "f11253:c4:m0"} {"signature": "def __init__(self, file_like, data_types=None):", "body": "pass", "docstring": "A source of data.", "id": "f11254:c0:m0"} {"signature": "def remote_jupyter_proxy_url(port):", "body": "base_url = os.environ['']host = urllib.parse.urlparse(base_url).netlocif port is None:return hostservice_url_path = os.environ['']proxy_url_path = '' % portuser_url = urllib.parse.urljoin(base_url, service_url_path)full_url = urllib.parse.urljoin(user_url, proxy_url_path)return full_url", "docstring": "Callable to configure Bokeh's show method when a proxy must be\nconfigured.\n\nIf port is None we're asking about the URL\nfor the origin header.", "id": "f11260:m0"} {"signature": "def setup_notebook(debug=False):", "body": "output_notebook(INLINE, hide_banner=True)if debug:_setup_logging(logging.DEBUG)logging.debug('')else:_setup_logging(logging.WARNING)if '' not in os.environ:global jupyter_proxy_urljupyter_proxy_url = ''logging.info('')", "docstring": "Called at the start of notebook execution to setup the environment.\n\n This will configure bokeh, and setup the logging library to be\n reasonable.", "id": "f11260:m2"} {"signature": "def _diagram(plot_figure, source=None, color='', line_color='',xaxis_label='', yaxis_label='', name=None):", "body": "plot_figure.circle(x='', y='', source=source,size=, color=color, alpha=, name=name,line_color=line_color, line_width=)plot_figure.xaxis.axis_label = xaxis_labelplot_figure.yaxis.axis_label = yaxis_labelplot_figure.yaxis.formatter = NumeralTickFormatter()", "docstring": "Use a :class:`~bokeh.plotting.figure.Figure` and x and y collections\n to create an H-R diagram.", "id": "f11263:m1"} {"signature": "def cc_diagram(cluster_name):", "body": "x, y = get_hr_data(cluster_name)y_range = [max(y) + , min(y) - ]pf = figure(y_range=y_range, title=cluster_name)_diagram(x, y, pf)show_with_bokeh_server(pf)", "docstring": "Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R\n diagram using the cluster_name; then show it.", "id": "f11263:m2"} {"signature": "def hr_diagram(cluster_name, output=None):", "body": "cluster = get_hr_data(cluster_name)pf = hr_diagram_figure(cluster)show_with_bokeh_server(pf)", "docstring": "Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R\n diagram using the cluster_name; then show it.\n\n Re", "id": "f11263:m6"} {"signature": "def skyimage_figure(cluster):", "body": "pf_image = figure(x_range=(, ), y_range=(, ),title=''.format(cluster.name))pf_image.image_url(url=[cluster.image_path],x=, y=, w=, h=, anchor='')pf_image.toolbar_location = Nonepf_image.axis.visible = Falsereturn pf_image", "docstring": "Given a cluster create a Bokeh plot figure using the\ncluster's image.", "id": "f11263:m7"} {"signature": "def round_teff_luminosity(cluster):", "body": "temps = [round(t, -) for t in teff(cluster)]lums = [round(l, ) for l in luminosity(cluster)]return temps, lums", "docstring": "Returns rounded teff and luminosity lists.", "id": "f11263:m8"} {"signature": "def hr_diagram_figure(cluster):", "body": "temps, lums = round_teff_luminosity(cluster)x, y = temps, lumscolors, color_mapper = hr_diagram_color_helper(temps)x_range = [max(x) + max(x) * , min(x) - min(x) * ]source = ColumnDataSource(data=dict(x=x, y=y, color=colors))pf = figure(y_axis_type='', x_range=x_range, name='',tools='',title=''.format(cluster.name))pf.select(BoxSelectTool).select_every_mousemove = Falsepf.select(LassoSelectTool).select_every_mousemove = Falsehover = pf.select(HoverTool)[]hover.tooltips = [(\"\", \"\"),(\"\", \"\")]_diagram(source=source, plot_figure=pf, name='',color={'': '', '': color_mapper},xaxis_label='',yaxis_label='')return pf", "docstring": "Given a cluster create a Bokeh plot figure creating an\nH-R diagram.", "id": "f11263:m9"} {"signature": "def calculate_diagram_ranges(data):", "body": "data = round_arr_teff_luminosity(data)temps = data['']x_range = [ * np.amax(temps), * np.amin(temps)]lums = data['']y_range = [ * np.amin(lums), * np.amax(lums)]return (x_range, y_range)", "docstring": "Given a numpy array calculate what the ranges of the H-R\ndiagram should be.", "id": "f11263:m10"} {"signature": "def hr_diagram_from_data(data, x_range, y_range):", "body": "_, color_mapper = hr_diagram_color_helper([])data_dict = {'': list(data['']),'': list(data['']),'': list(data[''])}source = ColumnDataSource(data=data_dict)pf = figure(y_axis_type='', x_range=x_range, y_range=y_range)_diagram(source=source, plot_figure=pf,color={'': '', '': color_mapper},xaxis_label='',yaxis_label='')show_with_bokeh_server(pf)", "docstring": "Given a numpy array create a Bokeh plot figure creating an\nH-R diagram.", "id": "f11263:m11"} {"signature": "def cluster_text_input(cluster, title=None):", "body": "if not title:title = ''return TextInput(value=cluster.name, title=title)", "docstring": "Create an :class:`~bokeh.models.widgets.TextInput` using\nthe cluster.name as the default value and title.\n\nIf no title is provided use, 'Type in the name of your cluster\nand press Enter/Return:'.", "id": "f11263:m12"} {"signature": "def hr_diagram_selection(cluster_name):", "body": "cluster = get_hr_data(cluster_name)temps, lums = round_teff_luminosity(cluster)x, y = temps, lumscolors, color_mapper = hr_diagram_color_helper(temps)x_range = [max(x) + max(x) * , min(x) - min(x) * ]source = ColumnDataSource(data=dict(x=x, y=y, color=colors), name='')source_selected = ColumnDataSource(data=dict(x=[], y=[], color=[]),name='')pf = figure(y_axis_type='', x_range=x_range,tools='',title=''.format(cluster.name))_diagram(source=source, plot_figure=pf, name='', color={'':'', '': color_mapper},xaxis_label='',yaxis_label='')pf_selected = figure(y_axis_type='', y_range=pf.y_range,x_range=x_range,tools='',title=''.format(cluster.name))_diagram(source=source_selected, plot_figure=pf_selected, name='',color={'': '', '': color_mapper},xaxis_label='',yaxis_label='')source.callback = CustomJS(args=dict(source_selected=source_selected),code=\"\"\"\"\"\")show_with_bokeh_server(row(pf, pf_selected))", "docstring": "Given a cluster create two Bokeh plot based H-R diagrams.\nThe Selection in the left H-R diagram will show up on the\nright one.", "id": "f11263:m18"} {"signature": "def _filter_cluster_data(self):", "body": "min_temp = self.temperature_range_slider.value[]max_temp = self.temperature_range_slider.value[]temp_mask = np.logical_and(self.cluster.catalog[''] >= min_temp,self.cluster.catalog[''] <= max_temp)min_lum = self.luminosity_range_slider.value[]max_lum = self.luminosity_range_slider.value[]lum_mask = np.logical_and(self.cluster.catalog[''] >= min_lum,self.cluster.catalog[''] <= max_lum)selected_mask = np.isin(self.cluster.catalog[''], self.selection_ids)filter_mask = temp_mask & lum_mask & selected_maskself.filtered_data = self.cluster.catalog[filter_mask].dataself.source.data = {'': list(self.filtered_data['']),'': list(self.filtered_data['']),'': list(self.filtered_data['']),'': list(self.filtered_data[''])}logging.debug(\"\", self.filtered_data)", "docstring": "Filter the cluster data catalog into the filtered_data\ncatalog, which is what is shown in the H-R diagram.\n\nFilter on the values of the sliders, as well as the lasso\nselection in the skyviewer.", "id": "f11263:c0:m2"} {"signature": "def teff(cluster):", "body": "b_vs, _ = cluster.stars()teffs = []for b_v in b_vs:b_v -= cluster.eb_vif b_v > -:x = ( - b_v) / else:x = ( - math.sqrt( + * b_v)) / teffs.append(math.pow(, x))return teffs", "docstring": "Calculate Teff for main sequence stars ranging from Teff 3500K - 8000K. Use\n[Fe/H] of the cluster, if available.\n\nReturns a list of Teff values.", "id": "f11264:m2"} {"signature": "def bc(temp):", "body": "return (- + temp * - temp** * + temp** * )", "docstring": "Calculate Bolometric Correction Using the Teff from the previous equation.\nThis correction is for main sequence stars of the Teff range given above.", "id": "f11264:m4"} {"signature": "def color(teffs):", "body": "colors = []for t in teffs:if t >= :colors.append('') elif t >= :colors.append('') elif t >= :colors.append('') elif t >= :colors.append('') else:colors.append('') return colors", "docstring": "Conventional color descriptions of stars.\nSource: https://en.wikipedia.org/wiki/Stellar_classification", "id": "f11264:m5"} {"signature": "def table(cluster):", "body": "teffs = teff(cluster)lums = luminosity(cluster)arr = cluster.to_array()i = for row in arr:row[''][] = np.array([lums[i]], dtype='')row[''][] = np.array([teffs[i]], dtype='')i += arr = round_arr_teff_luminosity(arr)return arr", "docstring": "Create a numpy.ndarray with all observed fields and\ncomputed teff and luminosity values.", "id": "f11264:m6"} {"signature": "def round_arr_teff_luminosity(arr):", "body": "arr[''] = np.around(arr[''], -)arr[''] = np.around(arr[''], )return arr", "docstring": "Return the numpy array with rounded teff and luminosity columns.", "id": "f11264:m7"} {"signature": "def _real_time_thread(self):", "body": "while self.ws_client.connected():if self.die:breakif self.pause:sleep()continuemessage = self.ws_client.receive()if message is None:breakmessage_type = message['']if message_type == '':continueif message[''] <= self.sequence:continueif message_type == '':self._handle_open(message)elif message_type == '':self._handle_match(message)elif message_type == '':self._handle_done(message)elif message_type == '':self._handle_change(message)else:continueself.ws_client.disconnect()", "docstring": "Handles real-time updates to the order book.", "id": "f11266:c0:m7"} {"signature": "def end(self):", "body": "self.die = True", "docstring": "Makes sure the real-time thread dies and the WebSocket disconnects.", "id": "f11266:c0:m11"} {"signature": "def pause(self):", "body": "self.pause = True", "docstring": "Stops real-time updates until resume is called.", "id": "f11266:c0:m12"} {"signature": "def resume(self):", "body": "self.pause = False", "docstring": "Resumes real-time updates.", "id": "f11266:c0:m13"} {"signature": "def get_order_book(self):", "body": "return self.book", "docstring": "Returns the Real-Time Order Book.\n\n :returns: the Real-Time Order Book\n :rtype: dict", "id": "f11266:c0:m14"} {"signature": "def _create_api_uri(self, *parts):", "body": "return urljoin(self.API_URI, ''.join(map(quote, parts)))", "docstring": "Creates fully qualified endpoint URIs.\n\n :param parts: the string parts that form the request URI", "id": "f11267:c1:m1"} {"signature": "def _format_iso_time(self, time):", "body": "if isinstance(time, str):return timeelif isinstance(time, datetime):return time.strftime('')else:return None", "docstring": "Makes sure we have proper ISO 8601 time.\n\n :param time: either already ISO 8601 a string or datetime.datetime\n :returns: ISO 8601 time\n :rtype: str", "id": "f11267:c1:m2"} {"signature": "def _handle_response(self, response):", "body": "if not str(response.status_code).startswith(''):raise get_api_error(response)return response", "docstring": "Returns the given response or raises an APIError for non-2xx responses.\n\n :param requests.Response response: HTTP response\n :returns: requested data\n :rtype: requests.Response\n :raises APIError: for non-2xx responses", "id": "f11267:c1:m3"} {"signature": "def _request(self, method, *relative_path_parts, **kwargs):", "body": "raise NotImplementedError", "docstring": "Abstract method - must be overriden.", "id": "f11267:c1:m4"} {"signature": "def _get(self, *args, **kwargs):", "body": "return self._request('', *args, **kwargs)", "docstring": "Performs HTTP GET requests.\n\n :param args: arguments\n :param kwargs: argument keywords\n :returns: requested data\n :raises APIError: for non-2xx responses", "id": "f11267:c1:m5"} {"signature": "def _post(self, *args, **kwargs):", "body": "return self._request('', *args, **kwargs)", "docstring": "Performs HTTP POST requests.\n\n :param args: arguments\n :param kwargs: argument keywords\n :returns: requested data\n :raises APIError: for non-2xx responses", "id": "f11267:c1:m6"} {"signature": "def _delete(self, *args, **kwargs):", "body": "return self._request('', *args, **kwargs)", "docstring": "Performs HTTP POST requests.\n\n :param args: arguments\n :param kwargs: argument keywords\n :returns: requested data\n :raises APIError: for non-2xx responses", "id": "f11267:c1:m7"} {"signature": "def __next__(self):", "body": "next = self.endpoint()if next is not None:return nextraise StopIteration", "docstring": "Iterator function for Python 3.\n\n :returns: the next message in the sequence\n :raises StopIteration: if there are no more messages", "id": "f11267:c2:m2"} {"signature": "def _check_next(self):", "body": "if self.is_initial:return Trueif self.before:if self.before_cursor:return Trueelse:return Falseelse:if self.after_cursor:return Trueelse:return False", "docstring": "Checks if a next message is possible.\n\n :returns: True if a next message is possible, otherwise False\n :rtype: bool", "id": "f11267:c2:m4"} {"signature": "def is_before(self):", "body": "return self.before", "docstring": "Returns True if the direction is set to before.\n\n :returns: True if the direction is set to before, otherwise False\n :rtype: bool", "id": "f11267:c2:m5"} {"signature": "def is_after(self):", "body": "return not self.before", "docstring": "Returns True if the direction is set to after.\n\n :returns: True if the direction is set to after, otherwise False\n :rtype: bool", "id": "f11267:c2:m6"} {"signature": "def set_before(self):", "body": "self.before = True", "docstring": "Sets the direction to before.", "id": "f11267:c2:m7"} {"signature": "def set_after(self):", "body": "self.before = False", "docstring": "Sets the direction to after.", "id": "f11267:c2:m8"} {"signature": "def endpoint(self):", "body": "raise NotImplementedError", "docstring": "Abstract method - must be overriden. Performs the endpoint operation", "id": "f11267:c2:m9"} {"signature": "def get_before_cursor(self):", "body": "return self.before_cursor", "docstring": "Acquires the before cursor.\n\n :returns: the before cursor", "id": "f11267:c2:m10"} {"signature": "def get_after_cursor(self):", "body": "return self.after_cursor", "docstring": "Acquires the after cursor.\n\n :returns: the after cursor", "id": "f11267:c2:m11"} {"signature": "def _request(self, method, *relative_path_parts, **kwargs):", "body": "uri = self._create_api_uri(*relative_path_parts)if method == '':response = get(uri, auth=self.auth, params=kwargs.get('', None))elif method == '':response = post(uri, auth=self.auth, json=kwargs.get('', None))else:response = delete(uri, auth=self.auth, json=kwargs.get('', None))return self._handle_response(response).json()", "docstring": "Sends an HTTP request to the REST API and receives the requested data.\n\n :param str method: HTTP method name\n :param relative_path_parts: the relative paths for the request URI\n :param kwargs: argument keywords\n :returns: requested data\n :raises APIError: for non-2xx responses", "id": "f11268:c1:m1"} {"signature": "def list_accounts(self):", "body": "return self._get('')", "docstring": "``_", "id": "f11268:c1:m2"} {"signature": "def get_account(self, account_id):", "body": "return self._get('', account_id)", "docstring": "``_", "id": "f11268:c1:m3"} {"signature": "def get_account_history(self, account_id):", "body": "return self._get('', account_id, '')", "docstring": "``_", "id": "f11268:c1:m4"} {"signature": "def get_holds(self, account_id):", "body": "return self._get('', account_id, '')", "docstring": "``_", "id": "f11268:c1:m5"} {"signature": "def _place_order(self,side,product_id='',client_oid=None,type=None,stp=None,price=None,size=None,funds=None,time_in_force=None,cancel_after=None,post_only=None):", "body": "data = {'':side,'':product_id,'':client_oid,'':type,'':stp,'':price,'':size,'':funds,'':time_in_force,'':cancel_after,'':post_only}return self._post('', data=data)", "docstring": "``_", "id": "f11268:c1:m6"} {"signature": "def place_limit_order(self,side,price,size,product_id='',client_oid=None,stp=None,time_in_force=None,cancel_after=None,post_only=None):", "body": "return self._place_order(side,product_id=product_id,client_oid=client_oid,type='',stp=stp,price=price,size=size,time_in_force=time_in_force,cancel_after=cancel_after,post_only=post_only)", "docstring": "``_", "id": "f11268:c1:m7"} {"signature": "def place_market_order(self,side,product_id='',size=None,funds=None,client_oid=None,stp=None):", "body": "return self._place_order(type='',side=size,product_id=product_id,size=size,funds=funds,client_oid=client_oid,stp=stp)", "docstring": "``_", "id": "f11268:c1:m8"} {"signature": "def cancel_order(self, order_id):", "body": "return self._delete('', order_id)", "docstring": "``_", "id": "f11268:c1:m9"} {"signature": "def cancel_all(self, product_id=None):", "body": "return self._delete('', data={'':product_id})", "docstring": "``_", "id": "f11268:c1:m10"} {"signature": "def list_orders(self, status=None):", "body": "return self._get('', params={'':status})", "docstring": "``_", "id": "f11268:c1:m11"} {"signature": "def get_order(self, order_id):", "body": "return self._get('', order_id)", "docstring": "``_", "id": "f11268:c1:m12"} {"signature": "def list_fills(self):", "body": "return self._get('')", "docstring": "``_", "id": "f11268:c1:m13"} {"signature": "def _deposit_withdraw(self, type, amount, coinbase_account_id):", "body": "data = {'':type,'':amount,'':coinbase_account_id}return self._post('', data=data)", "docstring": "``_", "id": "f11268:c1:m14"} {"signature": "def deposit(self, amount, coinbase_account_id):", "body": "return self._deposit_withdraw('', amount, coinbase_account_id)", "docstring": "``_", "id": "f11268:c1:m15"} {"signature": "def withdraw(self, amount, coinbase_account_id):", "body": "return self._deposit_withdraw('', amount, coinbase_account_id)", "docstring": "``_", "id": "f11268:c1:m16"} {"signature": "def _new_report(self,type,start_date,end_date,product_id='',account_id=None,format=None,email=None):", "body": "data = {'':type,'':self._format_iso_time(start_date),'':self._format_iso_time(end_date),'':product_id,'':account_id,'':format,'':email}return self._post('', data=data)", "docstring": "``_", "id": "f11268:c1:m17"} {"signature": "def new_fills_report(self,start_date,end_date,account_id=None,product_id='',format=None,email=None):", "body": "return self._new_report(start_date,'',end_date,account_id,product_id,format,email)", "docstring": "``_", "id": "f11268:c1:m18"} {"signature": "def new_accounts_report(self,start_date,end_date,account_id,product_id='',format=None,email=None):", "body": "return self._new_report(start_date,'',end_date,product_id,account_id,format,email)", "docstring": "``_", "id": "f11268:c1:m19"} {"signature": "def get_report_status(self, report_id):", "body": "return self._get('', report_id)", "docstring": "``_", "id": "f11268:c1:m20"} {"signature": "def _request(self, method, *relative_path_parts, **kwargs):", "body": "uri = self._create_api_uri(*relative_path_parts)if method == '':response = get(uri, auth=self.auth, params=kwargs.get('', None))elif method == '':response = post(uri, auth=self.auth, json=kwargs.get('', None))else:response = delete(uri, auth=self.auth, json=kwargs.get('', None))self.is_initial = Falseself.before_cursor = response.headers.get('', None)self.after_cursor = response.headers.get('', None)return self._handle_response(response).json()", "docstring": "Sends an HTTP request to the REST API and receives the requested data.\n Additionally sets up pagination cursors.\n\n :param str method: HTTP method name\n :param relative_path_parts: the relative paths for the request URI\n :param kwargs: argument keywords\n :returns: requested data\n :raises APIError: for non-2xx responses", "id": "f11268:c2:m1"} {"signature": "def _request(self, method, *relative_path_parts, **kwargs):", "body": "uri = self._create_api_uri(*relative_path_parts)response = get(uri, params=kwargs.get('', None))return self._handle_response(response).json()", "docstring": "Sends an HTTP request to the REST API and receives the requested data.\n\n :param str method: HTTP method name\n :param relative_path_parts: the relative paths for the request URI\n :param kwargs: argument keywords\n :returns: requested data\n :raises APIError: for non-2xx responses", "id": "f11269:c0:m0"} {"signature": "def get_products(self):", "body": "return self._get('')", "docstring": "``_", "id": "f11269:c0:m1"} {"signature": "def get_product_order_book(self, level=None, product_id=''):", "body": "return self._get('', product_id, '', params={'':level})", "docstring": "``_", "id": "f11269:c0:m2"} {"signature": "def get_product_ticker(self, product_id=''):", "body": "return self._get('', product_id, '')", "docstring": "``_", "id": "f11269:c0:m3"} {"signature": "def get_trades(self, product_id=''):", "body": "return self._get('', product_id, '')", "docstring": "``_", "id": "f11269:c0:m4"} {"signature": "def get_historic_trades(self, start, end, granularity, product_id=''):", "body": "params = {'':self._format_iso_time(start),'':self._format_iso_time(end),'':granularity}return self._get('', product_id, '', params=params)", "docstring": "``_\n\n :param start: either datetime.datetime or str in ISO 8601\n :param end: either datetime.datetime or str in ISO 8601\n :pram int granularity: desired timeslice in seconds\n :returns: desired data", "id": "f11269:c0:m5"} {"signature": "def get_stats(self, product_id=''):", "body": "return self._get('', product_id, '')", "docstring": "``_", "id": "f11269:c0:m6"} {"signature": "def get_currencies(self):", "body": "return self._get('')", "docstring": "``_", "id": "f11269:c0:m7"} {"signature": "def get_time(self):", "body": "return self._get('')", "docstring": "``_", "id": "f11269:c0:m8"} {"signature": "def _request(self, method, *relative_path_parts, **kwargs):", "body": "uri = self._create_api_uri(*relative_path_parts)response = get(uri, params=self._get_params(**kwargs))self.is_initial = Falseself.before_cursor = response.headers.get('', None)self.after_cursor = response.headers.get('', None)return self._handle_response(response).json()", "docstring": "Sends an HTTP request to the REST API and receives the requested data.\n Additionally sets up pagination cursors.\n\n :param str method: HTTP method name\n :param relative_path_parts: the relative paths for the request URI\n :param kwargs: argument keywords\n :returns: requested data\n :raises APIError: for non-2xx responses", "id": "f11269:c1:m0"} {"signature": "def __next__(self):", "body": "next = self.receive()if next:return nextraise StopIteration", "docstring": "Iterator function for Python 3.\n\n :returns: the next message in the sequence\n :rtype: dict\n :raises StopIteration: if the WebSocket is not connected", "id": "f11270:c0:m4"} {"signature": "def _format_message(self, message):", "body": "return loads(message)", "docstring": "Makes sure messages are Pythonic.\n\n :param str message: raw message\n :returns: Pythonic message\n :rtype: dict", "id": "f11270:c0:m5"} {"signature": "def _keep_alive_thread(self):", "body": "while True:with self._lock:if self.connected():self._ws.ping()else:self.disconnect()self._thread = Nonereturnsleep()", "docstring": "Used exclusively as a thread which keeps the WebSocket alive.", "id": "f11270:c0:m6"} {"signature": "def connect(self):", "body": "if not self.connected():self._ws = create_connection(self.WS_URI)message = {'':self.WS_TYPE,'':self.WS_PRODUCT_ID}self._ws.send(dumps(message))with self._lock:if not self._thread:thread = Thread(target=self._keep_alive_thread, args=[])thread.start()", "docstring": "Connects and subscribes to the WebSocket Feed.", "id": "f11270:c0:m7"} {"signature": "def disconnect(self):", "body": "if self.connected():self._ws.close()self._ws = None", "docstring": "Disconnects from the WebSocket Feed.", "id": "f11270:c0:m8"} {"signature": "def receive(self):", "body": "if self.connected():return self._format_message(self._ws.recv())return None", "docstring": "Receive the next message in the sequence.\n\n :returns: the next message in the sequence, None if not connected\n :rtype: dict", "id": "f11270:c0:m9"} {"signature": "def connected(self):", "body": "if self._ws:return self._ws.connectedreturn False", "docstring": "Checks if we are connected to the WebSocket Feed.\n\n :returns: True if connected, otherwise False\n :rtype: bool", "id": "f11270:c0:m10"} {"signature": "def get_api_error(response):", "body": "error_class = _status_code_to_class.get(response.status_code, APIError)return error_class(response)", "docstring": "Acquires the correct error for a given response.\n\n :param requests.Response response: HTTP error response\n :returns: the appropriate error for a given response\n :rtype: APIError", "id": "f11271:m0"} {"signature": "@propertydef seed_url(self):", "body": "url = self.base_urlif self.URL_TEMPLATE is not None:url = urlparse.urljoin(self.base_url, self.URL_TEMPLATE.format(**self.url_kwargs))if not url:return Noneurl_parts = list(urlparse.urlparse(url))query = urlparse.parse_qsl(url_parts[])for k, v in self.url_kwargs.items():if v is None:continueif \"\".format(k) not in str(self.URL_TEMPLATE):for i in iterable(v):query.append((k, i))url_parts[] = urlencode(query)return urlparse.urlunparse(url_parts)", "docstring": "A URL that can be used to open the page.\n\n The URL is formatted from :py:attr:`URL_TEMPLATE`, which is then\n appended to :py:attr:`base_url` unless the template results in an\n absolute URL.\n\n :return: URL that can be used to open the page.\n :rtype: str", "id": "f11285:c0:m1"} {"signature": "def open(self):", "body": "if self.seed_url:self.driver_adapter.open(self.seed_url)self.wait_for_page_to_load()return selfraise UsageError(\"\")", "docstring": "Open the page.\n\n Navigates to :py:attr:`seed_url` and calls :py:func:`wait_for_page_to_load`.\n\n :return: The current page object.\n :rtype: :py:class:`Page`\n :raises: UsageError", "id": "f11285:c0:m2"} {"signature": "def wait_for_page_to_load(self):", "body": "self.wait.until(lambda _: self.loaded)self.pm.hook.pypom_after_wait_for_page_to_load(page=self)return self", "docstring": "Wait for the page to load.", "id": "f11285:c0:m3"} {"signature": "@propertydef loaded(self):", "body": "return True", "docstring": "Loaded state of the page.\n\n By default the driver will try to wait for any page loads to be\n complete, however it's not uncommon for it to return early. To address\n this you can override :py:attr:`loaded` to return ``True`` when the\n page has finished loading.\n\n :return: ``True`` if page is loaded, else ``False``.\n :rtype: bool\n\n Usage (Selenium)::\n\n from pypom import Page\n from selenium.webdriver.common.by import By\n\n class Mozilla(Page):\n\n @property\n def loaded(self):\n body = self.find_element(By.TAG_NAME, 'body')\n return 'loaded' in body.get_attribute('class')\n\n Usage (Splinter)::\n\n from pypom import Page\n\n class Mozilla(Page):\n\n def loaded(self):\n body = self.find_element('tag', 'body')\n return 'loaded' in body['class']\n\n Examples::\n\n # wait for the seed_url value to be in the current URL\n self.seed_url in self.selenium.current_url", "id": "f11285:c0:m4"} {"signature": "@propertydef selenium(self):", "body": "warn(\"\", DeprecationWarning, stacklevel=)return self.driver", "docstring": "Backwards compatibility attribute", "id": "f11286:c0:m1"} {"signature": "def find_elements(self, strategy, locator):", "body": "return self.driver_adapter.find_elements(strategy, locator)", "docstring": "Finds elements on the page.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target elements.\n :type strategy: str\n :type locator: str\n :return: List of :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.element_list.ElementList`\n :rtype: list", "id": "f11286:c0:m3"} {"signature": "def is_element_present(self, strategy, locator):", "body": "return self.driver_adapter.is_element_present(strategy, locator)", "docstring": "Checks whether an element is present.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: ``True`` if element is present, else ``False``.\n :rtype: bool", "id": "f11286:c0:m4"} {"signature": "def is_element_displayed(self, strategy, locator):", "body": "return self.driver_adapter.is_element_displayed(strategy, locator)", "docstring": "Checks whether an element is displayed.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: ``True`` if element is displayed, else ``False``.\n :rtype: bool", "id": "f11286:c0:m5"} {"signature": "@propertydef root(self):", "body": "if self._root is None and self._root_locator is not None:return self.page.find_element(*self._root_locator)return self._root", "docstring": "Root element for the page region.\n\n Page regions should define a root element either by passing this on\n instantiation or by defining a :py:attr:`_root_locator` attribute. To\n reduce the chances of hitting :py:class:`~selenium.common.exceptions.StaleElementReferenceException`\n or similar you should use :py:attr:`_root_locator`, as this is looked up every\n time the :py:attr:`root` property is accessed.", "id": "f11287:c0:m1"} {"signature": "def wait_for_region_to_load(self):", "body": "self.wait.until(lambda _: self.loaded)self.pm.hook.pypom_after_wait_for_region_to_load(region=self)return self", "docstring": "Wait for the page region to load.", "id": "f11287:c0:m2"} {"signature": "def find_element(self, strategy, locator):", "body": "return self.driver_adapter.find_element(strategy, locator, root=self.root)", "docstring": "Finds an element on the page.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: An element.\n :rytpe: :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.driver.webdriver.WebDriverElement`", "id": "f11287:c0:m3"} {"signature": "def find_elements(self, strategy, locator):", "body": "return self.driver_adapter.find_elements(strategy, locator, root=self.root)", "docstring": "Finds elements on the page.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target elements.\n :type strategy: str\n :type locator: str\n :return: List of :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.element_list.ElementList`\n :rtype: list", "id": "f11287:c0:m4"} {"signature": "def is_element_present(self, strategy, locator):", "body": "return self.driver_adapter.is_element_present(strategy, locator, root=self.root)", "docstring": "Checks whether an element is present.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: ``True`` if element is present, else ``False``.\n :rtype: bool", "id": "f11287:c0:m5"} {"signature": "def is_element_displayed(self, strategy, locator):", "body": "return self.driver_adapter.is_element_displayed(strategy, locator, root=self.root)", "docstring": "Checks whether an element is displayed.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: ``True`` if element is displayed, else ``False``.\n :rtype: bool", "id": "f11287:c0:m6"} {"signature": "@propertydef loaded(self):", "body": "return True", "docstring": "Loaded state of the page region.\n\n You may need to initialise your page region before it's ready for you\n to interact with it. If this is the case, you can override\n :py:attr:`loaded` to return ``True`` when the region has finished\n loading.\n\n :return: ``True`` if page is loaded, else ``False``.\n :rtype: bool\n\n Usage (Selenium)::\n\n from pypom import Page, Region\n from selenium.webdriver.common.by import By\n\n class Mozilla(Page):\n URL_TEMPLATE = 'https://www.mozilla.org/'\n\n @property\n def newsletter(self):\n return Newsletter(self)\n\n class Newsletter(Region):\n _root_locator = (By.ID, 'newsletter-form')\n\n @property\n def loaded(self):\n return 'loaded' in self.root.get_attribute('class')\n\n Usage (Splinter)::\n\n from pypom import Page, Region\n\n class Mozilla(Page):\n URL_TEMPLATE = 'https://www.mozilla.org/'\n\n @property\n def newsletter(self):\n return Newsletter(self)\n\n class Newsletter(Region):\n _root_locator = ('id', 'newsletter-form')\n\n @property\n def loaded(self):\n return 'loaded' in self.root['class']", "id": "f11287:c0:m7"} {"signature": "@hookspecdef pypom_after_wait_for_page_to_load(page):", "body": "", "docstring": "Called after waiting for the page to load", "id": "f11288:m0"} {"signature": "@hookspecdef pypom_after_wait_for_region_to_load(region):", "body": "", "docstring": "Called after waiting for the region to load", "id": "f11288:m1"} {"signature": "def wait_factory(timeout):", "body": "", "docstring": "Returns a WebDriverWait like property for a given timeout.\n\n :param timeout: Timeout used by WebDriverWait like calls\n :type timeout: int", "id": "f11290:c1:m0"} {"signature": "def open(url):", "body": "", "docstring": "Open the page.\n Navigates to :py:attr:`url`", "id": "f11290:c1:m1"} {"signature": "def find_element(strategy, locator, root=None):", "body": "", "docstring": "Finds an element on the page.\n\n :param strategy: Location strategy to use (type depends on the driver implementation)\n :param locator: Location of target element.\n :param root: (optional) root node.\n :type strategy: str\n :type locator: str\n :type root: web element object or None.\n :return: web element object\n :rtype: it depends on the driver implementation", "id": "f11290:c1:m2"} {"signature": "def find_elements(strategy, locator, root=None):", "body": "", "docstring": "Finds elements on the page.\n\n :param strategy: Location strategy to use (type depends on the driver implementation)\n :param locator: Location of target elements.\n :param root: (optional) root node.\n :type strategy: str\n :type locator: str\n :type root: web element object or None.\n :return: iterable of web element objects\n :rtype: iterable (if depends on the driver implementation)", "id": "f11290:c1:m3"} {"signature": "def is_element_present(strategy, locator, root=None):", "body": "", "docstring": "Checks whether an element is present.\n\n :param strategy: Location strategy to use (type depends on the driver implementation)\n :param locator: Location of target element.\n :param root: (optional) root node.\n :type strategy: str\n :type locator: str\n :type root: web element object or None.\n :return: ``True`` if element is present, else ``False``.\n :rtype: bool", "id": "f11290:c1:m4"} {"signature": "def is_element_displayed(strategy, locator, root=None):", "body": "", "docstring": "Checks whether an element is displayed.\n\n :param strategy: Location strategy to use (type depends on the driver implementation)\n :param locator: Location of target element.\n :param root: (optional) root node.\n :type strategy: str\n :type locator: str\n :type root: web element object or None.\n :return: ``True`` if element is displayed, else ``False``.\n :rtype: bool", "id": "f11290:c1:m5"} {"signature": "def registerDriver(iface, driver, class_implements=[]):", "body": "for class_item in class_implements:classImplements(class_item, iface)component.provideAdapter(factory=driver, adapts=[iface], provides=IDriver)", "docstring": "Register driver adapter used by page object", "id": "f11292:m0"} {"signature": "def register():", "body": "registerDriver(ISelenium,Selenium,class_implements=[Firefox,Chrome,Ie,Edge,Opera,Safari,BlackBerry,PhantomJS,Android,Remote,EventFiringWebDriver,],)", "docstring": "Register the Selenium specific driver implementation.\n\n This register call is performed by the init module if\n selenium is available.", "id": "f11293:m0"} {"signature": "def wait_factory(self, timeout):", "body": "return WebDriverWait(self.driver, timeout)", "docstring": "Returns a WebDriverWait like property for a given timeout.\n\n :param timeout: Timeout used by WebDriverWait calls\n :type timeout: int", "id": "f11293:c1:m1"} {"signature": "def open(self, url):", "body": "self.driver.get(url)", "docstring": "Open the page.\n Navigates to :py:attr:`url`", "id": "f11293:c1:m2"} {"signature": "def find_element(self, strategy, locator, root=None):", "body": "if root is not None:return root.find_element(strategy, locator)return self.driver.find_element(strategy, locator)", "docstring": "Finds an element on the page.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` for valid values.\n :param locator: Location of target element.\n :param root: (optional) root node.\n :type strategy: str\n :type locator: str\n :type root: str :py:class:`~selenium.webdriver.remote.webelement.WebElement` object or None.\n :return: :py:class:`~selenium.webdriver.remote.webelement.WebElement` object.\n :rtype: selenium.webdriver.remote.webelement.WebElement", "id": "f11293:c1:m3"} {"signature": "def find_elements(self, strategy, locator, root=None):", "body": "if root is not None:return root.find_elements(strategy, locator)return self.driver.find_elements(strategy, locator)", "docstring": "Finds elements on the page.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` for valid values.\n :param locator: Location of target elements.\n :param root: (optional) root node.\n :type strategy: str\n :type locator: str\n :type root: str :py:class:`~selenium.webdriver.remote.webelement.WebElement` object or None.\n :return: List of :py:class:`~selenium.webdriver.remote.webelement.WebElement` objects.\n :rtype: list", "id": "f11293:c1:m4"} {"signature": "def is_element_present(self, strategy, locator, root=None):", "body": "try:return self.find_element(strategy, locator, root=root)except NoSuchElementException:return False", "docstring": "Checks whether an element is present.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` for valid values.\n :param locator: Location of target element.\n :param root: (optional) root node.\n :type strategy: str\n :type locator: str\n :type root: str :py:class:`~selenium.webdriver.remote.webelement.WebElement` object or None.\n :return: ``True`` if element is present, else ``False``.\n :rtype: bool", "id": "f11293:c1:m5"} {"signature": "def is_element_displayed(self, strategy, locator, root=None):", "body": "try:return self.find_element(strategy, locator, root=root).is_displayed()except NoSuchElementException:return False", "docstring": "Checks whether an element is displayed.\n\n :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` for valid values.\n :param locator: Location of target element.\n :param root: (optional) root node.\n :type strategy: str\n :type locator: str\n :type root: str :py:class:`~selenium.webdriver.remote.webelement.WebElement` object or None.\n :return: ``True`` if element is displayed, else ``False``.\n :rtype: bool", "id": "f11293:c1:m6"} {"signature": "def register():", "body": "registerDriver(ISplinter,Splinter,class_implements=[FirefoxWebDriver,ChromeWebDriver,RemoteWebDriver,],)", "docstring": "Register the Selenium specific driver implementation.\n\n This register call is performed by the init module if\n selenium is available.", "id": "f11295:m0"} {"signature": "def open(self, url):", "body": "self.driver.visit(url)", "docstring": "Open the page.\n Navigates to :py:attr:`url`", "id": "f11295:c1:m1"} {"signature": "def find_element(self, strategy, locator, root=None):", "body": "elements = self.find_elements(strategy, locator, root=root)return elements and elements.first or None", "docstring": "Finds an element on the page.\n\n :param strategy: Location strategy to use. See pypom.splinter_driver.ALLOWED_STRATEGIES for valid values.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: :py:class:`~splinter.driver.webdriver.WebDriverElement`.\n :rtype: splinter.driver.webdriver.WebDriverElement", "id": "f11295:c1:m2"} {"signature": "def find_elements(self, strategy, locator, root=None):", "body": "node = root or self.driverif strategy in ALLOWED_STRATEGIES:return getattr(node, \"\" + strategy)(locator)raise UsageError(\"\")", "docstring": "Finds elements on the page.\n\n :param strategy: Location strategy to use. See pypom.splinter_driver.ALLOWED_STRATEGIES for valid values.\n :param locator: Location of target elements.\n :type strategy: str\n :type locator: str\n :return: List of :py:class:`~splinter.driver.webdriver.WebDriverElement`\n :rtype: :py:class:`splinter.element_list.ElementList`", "id": "f11295:c1:m3"} {"signature": "def is_element_present(self, strategy, locator, root=None):", "body": "return self.find_element(strategy, locator, root=root) and True or False", "docstring": "Checks whether an element is present.\n\n :param strategy: Location strategy to use. See pypom.splinter_driver.ALLOWED_STRATEGIES for valid values.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: ``True`` if element is present, else ``False``.\n :rtype: bool", "id": "f11295:c1:m4"} {"signature": "def is_element_displayed(self, strategy, locator, root=None):", "body": "element = self.find_element(strategy, locator, root=root)return element and element.visible or False", "docstring": "Checks whether an element is displayed.\n\n :param strategy: Location strategy to use. See pypom.splinter_driver.ALLOWED_STRATEGIES for valid values.\n :param locator: Location of target element.\n :type strategy: str\n :type locator: str\n :return: ``True`` if element is displayed, else ``False``.\n :rtype: bool", "id": "f11295:c1:m5"} {"signature": "def __init__(self, south, west, north, east):", "body": "self.west = westself.south = southself.east = eastself.north = north", "docstring": "Initialize query with given bounding box.\n\n:param bbox Bounding box with limit values in format west, south,\neast, north.", "id": "f11302:c0:m0"} {"signature": "def __init__(self, query_parameters):", "body": "self.query_parameters = query_parameters", "docstring": "Initialize a query for a set of ways satisfying the given parameters.\n\n :param query_parameters Overpass QL query parameters", "id": "f11302:c1:m0"} {"signature": "def get(self, query, responseformat=\"\", verbosity=\"\", build=True):", "body": "if build:full_query = self._construct_ql_query(query, responseformat=responseformat, verbosity=verbosity)else:full_query = queryif self.debug:logging.getLogger().info(query)r = self._get_from_overpass(full_query)content_type = r.headers.get(\"\")if self.debug:print(content_type)if content_type == \"\":result = []reader = csv.reader(StringIO(r.text), delimiter=\"\")for row in reader:result.append(row)return resultelif content_type in (\"\", \"\", \"\"):return r.textelif content_type == \"\":response = json.loads(r.text)if not build:return responseif \"\" not in response:raise UnknownOverpassError(\"\")overpass_remark = response.get(\"\", None)if overpass_remark and overpass_remark.startswith(\"\"):raise ServerRuntimeError(overpass_remark)if responseformat is not \"\":return responsereturn self._as_geojson(response[\"\"])", "docstring": "Pass in an Overpass query in Overpass QL.", "id": "f11305:c0:m1"} {"signature": "def search(self, feature_type, regex=False):", "body": "raise NotImplementedError()", "docstring": "Search for something.", "id": "f11305:c0:m2"} {"signature": "def readattr(path, name):", "body": "try:f = open(USB_SYS_PREFIX + path + \"\" + name)return f.readline().rstrip(\"\")except IOError:return None", "docstring": "Read attribute from sysfs and return as string", "id": "f11313:m0"} {"signature": "def find_ports(device):", "body": "bus_id = device.busdev_id = device.addressfor dirent in os.listdir(USB_SYS_PREFIX):matches = re.match(USB_PORTS_STR + '', dirent)if matches:bus_str = readattr(dirent, '')if bus_str:busnum = float(bus_str)else:busnum = Nonedev_str = readattr(dirent, '')if dev_str:devnum = float(dev_str)else:devnum = Noneif busnum == bus_id and devnum == dev_id:return str(matches.groups()[])", "docstring": "Find the port chain a device is plugged on.\n\nThis is done by searching sysfs for a device that matches the device\nbus/address combination.\n\nUseful when the underlying usb lib does not return device.port_number for\nwhatever reason.", "id": "f11313:m1"} {"signature": "def set_calibration_data(self, scale=None, offset=None):", "body": "if scale is not None and offset is not None:self._scale = scaleself._offset = offsetelif scale is None and offset is None:self._scale = self._offset = try:f = open('', '')except IOError:f = Noneif f:lines = f.read().split('')f.close()for line in lines:matches = re.match(CALIB_LINE_STR, line)if matches:bus = int(matches.groups()[])ports = matches.groups()[]scale = float(matches.groups()[])offset = float(matches.groups()[])if (str(ports) == str(self._ports)) and (str(bus) == str(self._bus)):self._scale = scaleself._offset = offsetelse:raise RuntimeError(\"\")", "docstring": "Set device calibration data based on settings in /etc/temper.conf.", "id": "f11313:c0:m1"} {"signature": "def lookup_offset(self, sensor):", "body": "if self._device.product == '':return return (sensor + ) * ", "docstring": "Lookup the number of sensors on the device by product name.", "id": "f11313:c0:m2"} {"signature": "def lookup_humidity_offset(self, sensor):", "body": "if self._device.product == '':return return None", "docstring": "Lookup the offset of the humidity data by product name.", "id": "f11313:c0:m3"} {"signature": "def lookup_sensor_count(self):", "body": "if (self._device.product == '') or(self._device.product == ''):return return ", "docstring": "Lookup the number of sensors on the device by product name.", "id": "f11313:c0:m4"} {"signature": "def get_sensor_count(self):", "body": "return self._sensor_count", "docstring": "Get number of sensors on the device.", "id": "f11313:c0:m5"} {"signature": "def set_sensor_count(self, count):", "body": "if count not in [, ,]:raise ValueError('')self._sensor_count = int(count)", "docstring": "Set number of sensors on the device.\n\nTo do: revamp /etc/temper.conf file to include this data.", "id": "f11313:c0:m6"} {"signature": "def get_ports(self):", "body": "if self._ports:return self._portsreturn ''", "docstring": "Get device USB ports.", "id": "f11313:c0:m7"} {"signature": "def get_bus(self):", "body": "if self._bus:return self._busreturn ''", "docstring": "Get device USB bus.", "id": "f11313:c0:m8"} {"signature": "def get_data(self, reset_device=False):", "body": "try:if reset_device:self._device.reset()for interface in [,]:if self._device.is_kernel_driver_active(interface):LOGGER.debug('''', interface, self._device, self._ports)self._device.detach_kernel_driver(interface)self._device.set_configuration()usb.util.claim_interface(self._device, INTERFACE)self._control_transfer(COMMANDS[''])self._interrupt_read()self._control_transfer(COMMANDS[''])temp_data = self._interrupt_read()if self._device.product == '':humidity_data = temp_dataelse:humidity_data = Nonedata = {'': temp_data, '': humidity_data}usb.util.dispose_resources(self._device)return dataexcept usb.USBError as err:if not reset_device:LOGGER.warning(\"\", err, self._device)return self.get_data(True)if \"\" in str(err):raise Exception(\"\"\"\")else:LOGGER.error(err)raise", "docstring": "Get data from the USB device.", "id": "f11313:c0:m9"} {"signature": "def get_temperature(self, format='', sensor=):", "body": "results = self.get_temperatures(sensors=[sensor,])if format == '':return results[sensor]['']elif format == '':return results[sensor]['']elif format == '':return results[sensor]['']else:raise ValueError(\"\")", "docstring": "Get device temperature reading.", "id": "f11313:c0:m10"} {"signature": "def get_temperatures(self, sensors=None):", "body": "_sensors = sensorsif _sensors is None:_sensors = list(range(, self._sensor_count))if not set(_sensors).issubset(list(range(, self._sensor_count))):raise ValueError('''' % (_sensors,self._sensor_count,list(range(, self._sensor_count)),))data = self.get_data()data = data['']results = {}for sensor in _sensors:offset = self.lookup_offset(sensor)celsius = struct.unpack_from('', data, offset)[] / celsius = celsius * self._scale + self._offsetresults[sensor] = {'': self.get_ports(),'': self.get_bus(),'': sensor,'': celsius * + ,'': celsius,'': celsius * ,'': celsius + ,}return results", "docstring": "Get device temperature reading.\n\nParams:\n- sensors: optional list of sensors to get a reading for, examples:\n [0,] - get reading for sensor 0\n [0, 1,] - get reading for sensors 0 and 1\n None - get readings for all sensors", "id": "f11313:c0:m11"} {"signature": "def get_humidity(self, sensors=None):", "body": "_sensors = sensorsif _sensors is None:_sensors = list(range(, self._sensor_count))if not set(_sensors).issubset(list(range(, self._sensor_count))):raise ValueError('''' % (_sensors,self._sensor_count,list(range(, self._sensor_count)),))data = self.get_data()data = data['']results = {}for sensor in _sensors:offset = self.lookup_humidity_offset(sensor)if offset is None:continuehumidity = (struct.unpack_from('', data, offset)[] * ) / results[sensor] = {'': self.get_ports(),'': self.get_bus(),'': sensor,'': humidity,}return results", "docstring": "Get device humidity reading.\n\nParams:\n- sensors: optional list of sensors to get a reading for, examples:\n [0,] - get reading for sensor 0\n [0, 1,] - get reading for sensors 0 and 1\n None - get readings for all sensors", "id": "f11313:c0:m12"} {"signature": "def _control_transfer(self, data):", "body": "LOGGER.debug('', data)self._device.ctrl_transfer(bmRequestType=, bRequest=,wValue=, wIndex=, data_or_wLength=data, timeout=TIMEOUT)", "docstring": "Send device a control request with standard parameters and as\npayload.", "id": "f11313:c0:m13"} {"signature": "def _interrupt_read(self):", "body": "data = self._device.read(ENDPOINT, REQ_INT_LEN, timeout=TIMEOUT)LOGGER.debug('', data)return data", "docstring": "Read data from device.", "id": "f11313:c0:m14"} {"signature": "def close(self):", "body": "pass", "docstring": "Does nothing in this device. Other device types may need to do cleanup here.", "id": "f11313:c0:m15"} {"signature": "def get_devices(self):", "body": "return self._devices", "docstring": "Get a list of all devices attached to this handler", "id": "f11313:c1:m1"} {"signature": "def hello_jp():", "body": "return u\"\"", "docstring": "Return 'Hello' in Japanese ('\u3053\u3093\u306b\u3061\u306f').", "id": "f11318:m0"} {"signature": "def get_version(fname=\"\"):", "body": "with open(fname) as f:for line in f:if line.startswith(\"\"):return eval(line.split(\"\")[-])", "docstring": "Parse our source code to get the current version number.", "id": "f11325:m0"} {"signature": "def code_mapping(level, msg, default=):", "body": "try:return code_mappings_by_level[level][msg]except KeyError:passif msg.count('') == and '' in msg and msg.endswith(''):txt = msg[: msg.index('')]return code_mappings_by_level[level].get(txt, default)return default", "docstring": "Return an error code between 0 and 99.", "id": "f11326:m0"} {"signature": "def trim(docstring):", "body": "if not docstring:return \"\"lines = docstring.expandtabs().splitlines()indent = sys.maxsizefor line in lines[:]:stripped = line.lstrip()if stripped:indent = min(indent, len(line) - len(stripped))trimmed = [lines[].strip()]if indent < sys.maxsize:for line in lines[:]:trimmed.append(line[indent:].rstrip())while trimmed and not trimmed[-]:trimmed.pop()while trimmed and not trimmed[]:trimmed.pop()return \"\".join(trimmed)", "docstring": "PEP257 docstring indentation trim function.", "id": "f11326:m1"} {"signature": "def dequote_docstring(text):", "body": "text = text.strip()if len(text) > and text[:] == text[-:] == '':return text[:-]if len(text) > and text[:] in ('', '') and text[-:] == '':return text[:-]if len(text) > and text[:] == text[-:] == \"\":return text[:-]if len(text) > and text[:] in (\"\", \"\") and text[-:] == \"\":return text[:-]if len(text) > and text[] == text[-] == '':return text[:-]if len(text) > and text[:] in ('', '') and text[-] == '':return text[:-]if len(text) > and text[] == text[-] == \"\":return text[:-]if len(text) > and text[:] in (\"\", \"\") and text[-] == \"\":return text[:-]raise ValueError(\"\")", "docstring": "Remove the quotes delimiting a docstring.", "id": "f11326:m2"} {"signature": "def humanize(string):", "body": "return re.compile(r\"\").sub(r\"\", string).lower()", "docstring": "Make a string human readable.", "id": "f11326:m3"} {"signature": "def __init__(self, *args):", "body": "if len(self._fields) != len(args):raise ValueError(\"\".format(len(args), len(self._fields), self.__class__.__name__, self._fields))vars(self).update(zip(self._fields, args))", "docstring": "Initialize.", "id": "f11326:c0:m0"} {"signature": "def __hash__(self):", "body": "return hash(repr(self))", "docstring": "Hash.", "id": "f11326:c0:m1"} {"signature": "def __eq__(self, other):", "body": "return other and vars(self) == vars(other)", "docstring": "Equality.", "id": "f11326:c0:m2"} {"signature": "def __repr__(self):", "body": "kwargs = \"\".join(\"\".format(field, getattr(self, field)) for field in self._fields)return \"\".format(self.__class__.__name__, kwargs)", "docstring": "Representation.", "id": "f11326:c0:m3"} {"signature": "def __iter__(self):", "body": "return chain([self], *self.children)", "docstring": "Iterate.", "id": "f11326:c1:m0"} {"signature": "@propertydef source(self):", "body": "full_src = self._source[self._slice]def is_empty_or_comment(line):return line.strip() == \"\" or line.strip().startswith(\"\")filtered_src = dropwhile(is_empty_or_comment, reversed(full_src))return \"\".join(reversed(list(filtered_src)))", "docstring": "Return the source code for the definition.", "id": "f11326:c1:m2"} {"signature": "def __str__(self):", "body": "out = \"\".format(self._publicity, self._human, self.name)if self.skipped_error_codes:out += \"\".format(self.skipped_error_codes)return out", "docstring": "Definition as a string.", "id": "f11326:c1:m3"} {"signature": "@propertydef is_public(self):", "body": "return not self.name.startswith(\"\") or self.name.startswith(\"\")", "docstring": "Is the module public.", "id": "f11326:c2:m0"} {"signature": "def __str__(self):", "body": "return \"\"", "docstring": "Definition as a string.", "id": "f11326:c2:m1"} {"signature": "@propertydef is_public(self):", "body": "if self.all is not None:return self.name in self.allelse:return not self.name.startswith(\"\")", "docstring": "Return True iff this function should be considered public.", "id": "f11326:c4:m0"} {"signature": "@propertydef is_magic(self):", "body": "return (self.name.startswith(\"\")and self.name.endswith(\"\")and self.name not in VARIADIC_MAGIC_METHODS)", "docstring": "Return True iff this method is a magic method (e.g., `__str__`).", "id": "f11326:c6:m0"} {"signature": "@propertydef is_public(self):", "body": "for decorator in self.decorators:if re.compile(r\"\".format(self.name)).match(decorator.name):return Falsename_is_public = (not self.name.startswith(\"\")or self.name in VARIADIC_MAGIC_METHODSor self.is_magic)return self.parent.is_public and name_is_public", "docstring": "Return True iff this method should be considered public.", "id": "f11326:c6:m1"} {"signature": "@propertydef is_public(self):", "body": "return (not self.name.startswith(\"\")and self.parent.is_classand self.parent.is_public)", "docstring": "Return True iff this class should be considered public.", "id": "f11326:c8:m0"} {"signature": "def __init__(self, message):", "body": "Exception.__init__(self,message+ textwrap.dedent(\"\"\"\"\"\"),)", "docstring": "Initialize the error with a more specific message.", "id": "f11326:c10:m0"} {"signature": "def __init__(self, filelike):", "body": "self._generator = tk.generate_tokens(filelike.readline)self.current = Token(*next(self._generator, None))self.line = self.current.start[]self.log = logself.got_logical_newline = True", "docstring": "Initialize.", "id": "f11326:c11:m0"} {"signature": "def move(self):", "body": "previous = self.currentcurrent = self._next_from_generator()self.current = None if current is None else Token(*current)self.line = self.current.start[] if self.current else self.lineself.got_logical_newline = previous.kind in self.LOGICAL_NEWLINESreturn previous", "docstring": "Move.", "id": "f11326:c11:m1"} {"signature": "def __iter__(self):", "body": "while True:if self.current is not None:yield self.currentelse:returnself.move()", "docstring": "Iterate.", "id": "f11326:c11:m3"} {"signature": "def __repr__(self):", "body": "return \"\".format(tk.tok_name[self])", "docstring": "Representation.", "id": "f11326:c12:m0"} {"signature": "def __init__(self, *args):", "body": "super(Token, self).__init__(*args)self.kind = TokenKind(self.kind)", "docstring": "Initialize.", "id": "f11326:c13:m0"} {"signature": "def parse(self, filelike, filename):", "body": "self.log = logself.source = filelike.readlines()src = \"\".join(self.source)compile(src, filename, \"\")self.stream = TokenStream(StringIO(src))self.filename = filenameself.all = Noneself.future_imports = set()self._accumulated_decorators = []return self.parse_module()", "docstring": "Parse the given file-like object and return its Module object.", "id": "f11326:c14:m0"} {"signature": "def __call__(self, *args, **kwargs):", "body": "return self.parse(*args, **kwargs)", "docstring": "Call the parse method.", "id": "f11326:c14:m1"} {"signature": "def consume(self, kind):", "body": "next_token = self.stream.move()assert next_token.kind == kind", "docstring": "Consume one token and verify it is of the expected kind.", "id": "f11326:c14:m2"} {"signature": "def leapfrog(self, kind, value=None):", "body": "while self.current is not None:if self.current.kind == kind and (value is None or self.current.value == value):self.consume(kind)returnself.stream.move()", "docstring": "Skip tokens in the stream until a certain token kind is reached.\n\n If `value` is specified, tokens whose values are different will also\n be skipped.", "id": "f11326:c14:m3"} {"signature": "def parse_docstring(self):", "body": "self.log.debug(\"\", self.current.kind, self.current.value)while self.current.kind in (tk.COMMENT, tk.NEWLINE, tk.NL):self.stream.move()self.log.debug(\"\",self.current.kind,self.current.value,)if self.current.kind == tk.STRING:docstring = self.current.valueself.stream.move()return docstringreturn None", "docstring": "Parse a single docstring and return its value.", "id": "f11326:c14:m4"} {"signature": "def parse_decorators(self): ", "body": "name = []arguments = []at_arguments = Falsewhile self.current is not None:self.log.debug(\"\",self.current.kind,self.current.value,)if self.current.kind == tk.NAME and self.current.value in [\"\", \"\"]:breakelif self.current.kind == tk.OP and self.current.value == \"\":self._accumulated_decorators.append(Decorator(\"\".join(name), \"\".join(arguments)))name = []arguments = []at_arguments = Falseelif self.current.kind == tk.OP and self.current.value == \"\":at_arguments = Trueelif self.current.kind == tk.OP and self.current.value == \"\":passelif self.current.kind == tk.NEWLINE or self.current.kind == tk.NL:passelse:if not at_arguments:name.append(self.current.value)else:arguments.append(self.current.value)self.stream.move()self._accumulated_decorators.append(Decorator(\"\".join(name), \"\".join(arguments)))", "docstring": "Called after first @ is found.\n\n Parse decorators into self._accumulated_decorators.\n Continue to do so until encountering the 'def' or 'class' start token.", "id": "f11326:c14:m5"} {"signature": "def parse_definitions(self, class_, all=False):", "body": "while self.current is not None:self.log.debug(\"\",self.current.kind,self.current.value,)self.log.debug(\"\", self.stream.got_logical_newline)if all and self.current.value == \"\":self.parse_all()elif (self.current.kind == tk.OPand self.current.value == \"\"and self.stream.got_logical_newline):self.consume(tk.OP)self.parse_decorators()elif self.current.value in [\"\", \"\"]:yield self.parse_definition(class_._nest(self.current.value))elif self.current.kind == tk.INDENT:self.consume(tk.INDENT)for definition in self.parse_definitions(class_):yield definitionelif self.current.kind == tk.DEDENT:self.consume(tk.DEDENT)returnelif self.current.value == \"\":self.parse_from_import_statement()else:self.stream.move()", "docstring": "Parse multiple definitions and yield them.", "id": "f11326:c14:m6"} {"signature": "def parse_all(self):", "body": "assert self.current.value == \"\"self.consume(tk.NAME)if self.current.value != \"\":raise AllError(\"\")self.consume(tk.OP)if self.current.value not in \"\":raise AllError(\"\")self.consume(tk.OP)self.all = []all_content = \"\"while self.current.kind != tk.OP or self.current.value not in \"\":if self.current.kind in (tk.NL, tk.COMMENT):passelif self.current.kind == tk.STRING or self.current.value == \"\":all_content += self.current.valueelse:raise AllError(\"\".format(self.current.kind))self.stream.move()self.consume(tk.OP)all_content += \"\"try:self.all = eval(all_content, {})except BaseException as e:raise AllError(\"\"\"\".format(all_content, e))", "docstring": "Parse the __all__ definition in a module.", "id": "f11326:c14:m7"} {"signature": "def parse_module(self):", "body": "self.log.debug(\"\")start = self.linedocstring = self.parse_docstring()children = list(self.parse_definitions(Module, all=True))assert self.current is None, self.currentend = self.linecls = Moduleif self.filename.endswith(\"\"):cls = Packagemodule = cls(self.filename,self.source,start,end,[],docstring,children,None,self.all,None,\"\",)for child in module.children:child.parent = modulemodule.future_imports = self.future_importsself.log.debug(\"\")return module", "docstring": "Parse a module (and its children) and return a Module object.", "id": "f11326:c14:m8"} {"signature": "def parse_definition(self, class_):", "body": "start = self.lineself.consume(tk.NAME)name = self.current.valueself.log.debug(\"\", class_.__name__, name)self.stream.move()if self.current.kind == tk.OP and self.current.value == \"\":parenthesis_level = while True:if self.current.kind == tk.OP:if self.current.value == \"\":parenthesis_level += elif self.current.value == \"\":parenthesis_level -= if parenthesis_level == :breakself.stream.move()if self.current.kind != tk.OP or self.current.value != \"\":self.leapfrog(tk.OP, value=\"\")else:self.consume(tk.OP)if self.current.kind in (tk.NEWLINE, tk.COMMENT):skipped_error_codes = self.parse_skip_comment()self.leapfrog(tk.INDENT)assert self.current.kind != tk.INDENTdocstring = self.parse_docstring()decorators = self._accumulated_decoratorsself.log.debug(\"\", decorators)self._accumulated_decorators = []self.log.debug(\"\")children = list(self.parse_definitions(class_))self.log.debug(\"\", name)end = self.line - else: skipped_error_codes = \"\"docstring = self.parse_docstring()decorators = [] children = []end = self.lineself.leapfrog(tk.NEWLINE)definition = class_(name,self.source,start,end,decorators,docstring,children,None,skipped_error_codes,)for child in definition.children:child.parent = definitionself.log.debug(\"\",class_.__name__,name,self.current.kind,self.current.value,)return definition", "docstring": "Parse a definition and return its value in a `class_` object.", "id": "f11326:c14:m9"} {"signature": "def parse_skip_comment(self):", "body": "skipped_error_codes = \"\"if self.current.kind == tk.COMMENT:if \"\" in self.current.value:skipped_error_codes = \"\".join(self.current.value.split(\"\")[:])elif self.current.value.startswith(\"\"):skipped_error_codes = \"\"return skipped_error_codes", "docstring": "Parse a definition comment for noqa skips.", "id": "f11326:c14:m10"} {"signature": "def check_current(self, kind=None, value=None):", "body": "msg = textwrap.dedent(\"\"\"\"\"\".format(self=self))kind_valid = self.current.kind == kind if kind else Truevalue_valid = self.current.value == value if value else Trueassert kind_valid and value_valid, msg", "docstring": "Verify the current token is of type `kind` and equals `value`.", "id": "f11326:c14:m11"} {"signature": "def parse_from_import_statement(self):", "body": "self.log.debug(\"\")is_future_import = self._parse_from_import_source()self._parse_from_import_names(is_future_import)", "docstring": "Parse a 'from x import y' statement.\n\n The purpose is to find __future__ statements.", "id": "f11326:c14:m12"} {"signature": "def _parse_from_import_source(self):", "body": "assert self.current.value == \"\", self.current.valueself.stream.move()is_future_import = self.current.value == \"\"self.stream.move()while (self.current is not Noneand self.current.kind in (tk.DOT, tk.NAME, tk.OP)and self.current.value != \"\"):self.stream.move()if self.current is None or self.current.value != \"\":return Falseself.check_current(value=\"\")assert self.current.value == \"\", self.current.valueself.stream.move()return is_future_import", "docstring": "Parse the 'from x import' part in a 'from x import y' statement.\n\n Return true iff `x` is __future__.", "id": "f11326:c14:m13"} {"signature": "def _parse_from_import_names(self, is_future_import):", "body": "if self.current.value == \"\":self.consume(tk.OP)expected_end_kinds = (tk.OP,)else:expected_end_kinds = (tk.NEWLINE, tk.ENDMARKER)while self.current.kind not in expected_end_kinds and not (self.current.kind == tk.OP and self.current.value == \"\"):if self.current.kind != tk.NAME:self.stream.move()continueself.log.debug(\"\",self.current.kind,self.current.value,)if is_future_import:self.log.debug(\"\", self.current.value)self.future_imports.add(self.current.value)self.consume(tk.NAME)self.log.debug(\"\",self.current.kind,self.current.value,)if self.current.kind == tk.NAME and self.current.value == \"\":self.consume(tk.NAME) if self.current.kind == tk.NAME:self.consume(tk.NAME) if self.current.value == \"\":self.consume(tk.OP)self.log.debug(\"\",self.current.kind,self.current.value,)", "docstring": "Parse the 'y' part in a 'from x import y' statement.", "id": "f11326:c14:m14"} {"signature": "def __init__(self, tree, filename=\"\", builtins=None):", "body": "self.tree = treeself.filename = filenametry:self.load_source()self.err = Noneexcept Exception as err:self.source = Noneself.err = err", "docstring": "Initialise.", "id": "f11326:c15:m0"} {"signature": "def run(self):", "body": "if self.err is not None:assert self.source is Nonemsg = \"\" % (rst_prefix,rst_fail_load,\"\" % self.err,)yield , , msg, type(self)module = []try:module = parse(StringIO(self.source), self.filename)except SyntaxError as err:msg = \"\" % (rst_prefix,rst_fail_parse,\"\" % err,)yield , , msg, type(self)module = []except AllError:msg = \"\" % (rst_prefix,rst_fail_all,\"\",)yield , , msg, type(self)module = []for definition in module:if not definition.docstring:continuetry:unindented = trim(dequote_docstring(definition.docstring))rst_errors = list(rst_lint.lint(unindented))except Exception as err:msg = \"\" % (rst_prefix,rst_fail_lint,\"\" % (definition.name, err),)yield definition.start, , msg, type(self)continuefor rst_error in rst_errors:if rst_error.level <= :continuemsg = rst_error.message.split(\"\", )[]code = code_mapping(rst_error.level, msg)assert code < , codecode += * rst_error.levelmsg = \"\" % (rst_prefix, code, msg)yield definition.start + rst_error.line, , msg, type(self)", "docstring": "Use docutils to check docstrings are valid RST.", "id": "f11326:c15:m1"} {"signature": "def load_source(self):", "body": "if self.filename in self.STDIN_NAMES:self.filename = \"\"if sys.version_info[] < :self.source = sys.stdin.read()else:self.source = TextIOWrapper(sys.stdin.buffer, errors=\"\").read()else:handle = tokenize_open(self.filename)self.source = handle.read()handle.close()", "docstring": "Load the source for the specified file.", "id": "f11326:c15:m2"} {"signature": "def _nominal_metric(v1, v2, **_kwargs):", "body": "return v1 != v2", "docstring": "Metric for nominal data.", "id": "f11329:m0"} {"signature": "def _ordinal_metric(_v1, _v2, i1, i2, n_v):", "body": "if i1 > i2:i1, i2 = i2, i1return (np.sum(n_v[i1:(i2 + )]) - (n_v[i1] + n_v[i2]) / ) ** ", "docstring": "Metric for ordinal data.", "id": "f11329:m1"} {"signature": "def _interval_metric(v1, v2, **_kwargs):", "body": "return (v1 - v2) ** ", "docstring": "Metric for interval data.", "id": "f11329:m2"} {"signature": "def _ratio_metric(v1, v2, **_kwargs):", "body": "return (((v1 - v2) / (v1 + v2)) ** ) if v1 + v2 != else ", "docstring": "Metric for ratio data.", "id": "f11329:m3"} {"signature": "def _coincidences(value_counts, value_domain, dtype=np.float64):", "body": "value_counts_matrices = value_counts.reshape(value_counts.shape + (,))pairable = np.maximum(np.sum(value_counts, axis=), )diagonals = np.tile(np.eye(len(value_domain)), (len(value_counts), , ))* value_counts.reshape((value_counts.shape[], , value_counts.shape[]))unnormalized_coincidences = value_counts_matrices * value_counts_matrices.transpose((, , )) - diagonalsreturn np.sum(np.divide(unnormalized_coincidences, (pairable - ).reshape((-, , )), dtype=dtype), axis=)", "docstring": "Coincidence matrix.\n\n Parameters\n ----------\n value_counts : ndarray, with shape (N, V)\n Number of coders that assigned a certain value to a determined unit, where N is the number of units\n and V is the value count.\n\n value_domain : array_like, with shape (V,)\n Possible values V the units can take.\n If the level of measurement is not nominal, it must be ordered.\n\n dtype : data-type\n Result and computation data-type.\n\n Returns\n -------\n o : ndarray, with shape (V, V)\n Coincidence matrix.", "id": "f11329:m4"} {"signature": "def _random_coincidences(value_domain, n, n_v):", "body": "n_v_column = n_v.reshape(-, )return (n_v_column.dot(n_v_column.T) - np.eye(len(value_domain)) * n_v_column) / (n - )", "docstring": "Random coincidence matrix.\n\n Parameters\n ----------\n value_domain : array_like, with shape (V,)\n Possible values V the units can take.\n If the level of measurement is not nominal, it must be ordered.\n\n n : scalar\n Number of pairable values.\n\n n_v : ndarray, with shape (V,)\n Number of pairable elements for each value.\n\n Returns\n -------\n e : ndarray, with shape (V, V)\n Random coincidence matrix.", "id": "f11329:m5"} {"signature": "def _distances(value_domain, distance_metric, n_v):", "body": "return np.array([[distance_metric(v1, v2, i1=i1, i2=i2, n_v=n_v)for i2, v2 in enumerate(value_domain)]for i1, v1 in enumerate(value_domain)])", "docstring": "Distances of the different possible values.\n\n Parameters\n ----------\n value_domain : array_like, with shape (V,)\n Possible values V the units can take.\n If the level of measurement is not nominal, it must be ordered.\n\n distance_metric : callable\n Callable that return the distance of two given values.\n\n n_v : ndarray, with shape (V,)\n Number of pairable elements for each value.\n\n Returns\n -------\n d : ndarray, with shape (V, V)\n Distance matrix for each value pair.", "id": "f11329:m6"} {"signature": "def _distance_metric(level_of_measurement):", "body": "return {'': _nominal_metric,'': _ordinal_metric,'': _interval_metric,'': _ratio_metric,}.get(level_of_measurement, level_of_measurement)", "docstring": "Distance metric callable of the level of measurement.\n\n Parameters\n ----------\n level_of_measurement : string or callable\n Steven's level of measurement of the variable.\n It must be one of 'nominal', 'ordinal', 'interval', 'ratio' or a callable.\n\n Returns\n -------\n metric : callable\n Distance callable.", "id": "f11329:m7"} {"signature": "def _transpose_list(list_of_lists):", "body": "return list(map(list, zip(*list_of_lists)))", "docstring": "Transpose a list of lists.", "id": "f11329:m8"} {"signature": "def _reliability_data_to_value_counts(reliability_data, value_domain):", "body": "return np.array([[sum( for rate in unit if rate == v) for v in value_domain] for unit in reliability_data.T])", "docstring": "Return the value counts given the reliability data.\n\n Parameters\n ----------\n reliability_data : ndarray, with shape (M, N)\n Reliability data matrix which has the rate the i coder gave to the j unit, where M is the number of raters\n and N is the unit count.\n Missing rates are represented with `np.nan`.\n\n value_domain : array_like, with shape (V,)\n Possible values the units can take.\n\n Returns\n -------\n value_counts : ndarray, with shape (N, V)\n Number of coders that assigned a certain value to a determined unit, where N is the number of units\n and V is the value count.", "id": "f11329:m9"} {"signature": "def alpha(reliability_data=None, value_counts=None, value_domain=None, level_of_measurement='',dtype=np.float64):", "body": "if (reliability_data is None) == (value_counts is None):raise ValueError(\"\")if value_counts is None:if type(reliability_data) is not np.ndarray:reliability_data = np.array(reliability_data)value_domain = value_domain or np.unique(reliability_data[~np.isnan(reliability_data)])value_counts = _reliability_data_to_value_counts(reliability_data, value_domain)else: if value_domain:assert value_counts.shape[] == len(value_domain),\"\"else:value_domain = tuple(range(value_counts.shape[]))distance_metric = _distance_metric(level_of_measurement)o = _coincidences(value_counts, value_domain, dtype=dtype)n_v = np.sum(o, axis=)n = np.sum(n_v)e = _random_coincidences(value_domain, n, n_v)d = _distances(value_domain, distance_metric, n_v)return - np.sum(o * d) / np.sum(e * d)", "docstring": "Compute Krippendorff's alpha.\n\n See https://en.wikipedia.org/wiki/Krippendorff%27s_alpha for more information.\n\n Parameters\n ----------\n reliability_data : array_like, with shape (M, N)\n Reliability data matrix which has the rate the i coder gave to the j unit, where M is the number of raters\n and N is the unit count.\n Missing rates are represented with `np.nan`.\n If it's provided then `value_counts` must not be provided.\n\n value_counts : ndarray, with shape (N, V)\n Number of coders that assigned a certain value to a determined unit, where N is the number of units\n and V is the value count.\n If it's provided then `reliability_data` must not be provided.\n\n value_domain : array_like, with shape (V,)\n Possible values the units can take.\n If the level of measurement is not nominal, it must be ordered.\n If `reliability_data` is provided, then the default value is the ordered list of unique rates that appear.\n Else, the default value is `list(range(V))`.\n\n level_of_measurement : string or callable\n Steven's level of measurement of the variable.\n It must be one of 'nominal', 'ordinal', 'interval', 'ratio' or a callable.\n\n dtype : data-type\n Result and computation data-type.\n\n Returns\n -------\n alpha : `dtype`\n Scalar value of Krippendorff's alpha of type `dtype`.\n\n Examples\n --------\n >>> reliability_data = [[np.nan, np.nan, np.nan, np.nan, np.nan, 3, 4, 1, 2, 1, 1, 3, 3, np.nan, 3],\n ... [1, np.nan, 2, 1, 3, 3, 4, 3, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],\n ... [np.nan, np.nan, 2, 1, 3, 4, 4, np.nan, 2, 1, 1, 3, 3, np.nan, 4]]\n >>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='nominal'), 6))\n 0.691358\n >>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='interval'), 6))\n 0.810845\n >>> value_counts = np.array([[1, 0, 0, 0],\n ... [0, 0, 0, 0],\n ... [0, 2, 0, 0],\n ... [2, 0, 0, 0],\n ... [0, 0, 2, 0],\n ... [0, 0, 2, 1],\n ... [0, 0, 0, 3],\n ... [1, 0, 1, 0],\n ... [0, 2, 0, 0],\n ... [2, 0, 0, 0],\n ... [2, 0, 0, 0],\n ... [0, 0, 2, 0],\n ... [0, 0, 2, 0],\n ... [0, 0, 0, 0],\n ... [0, 0, 1, 1]])\n >>> print(round(alpha(value_counts=value_counts, level_of_measurement='nominal'), 6))\n 0.691358\n >>> # The following examples were extracted from\n >>> # https://www.statisticshowto.datasciencecentral.com/wp-content/uploads/2016/07/fulltext.pdf, page 8.\n >>> reliability_data = [[1, 2, 3, 3, 2, 1, 4, 1, 2, np.nan, np.nan, np.nan],\n ... [1, 2, 3, 3, 2, 2, 4, 1, 2, 5, np.nan, 3.],\n ... [np.nan, 3, 3, 3, 2, 3, 4, 2, 2, 5, 1, np.nan],\n ... [1, 2, 3, 3, 2, 4, 4, 1, 2, 5, 1, np.nan]]\n >>> print(round(alpha(reliability_data, level_of_measurement='ordinal'), 3))\n 0.815\n >>> print(round(alpha(reliability_data, level_of_measurement='ratio'), 3))\n 0.797", "id": "f11329:m10"} {"signature": "def isEverythingGood(verbose=False):", "body": "global ALLGOODswhlab.loglevel=swhlab.loglevel_SILENT if verbose:swhlab.loglevel=swhlab.loglevel_DEBUGtry:shutil.rmtree('') except:passtry:os.mkdir('')except:passif os.path.isdir(''):swhlab.plotting.core.IMAGE_SAVE=Trueswhlab.plotting.core.IMAGE_SHOW=False unittest.main()if ALLGOOD:return Trueelse:return Falseelse:print(\"\")return False", "docstring": "return True if all unit tests pass.", "id": "f11332:m0"} {"signature": "def readLog(fname=\"\",onlyAfter=datetime.datetime(year=,month=,day=)):", "body": "with open(fname) as f:raw=f.read().split(\"\")efforts=[] for line in raw[:]:line=line.strip().split(\"\")date=datetime.datetime.strptime(line[], \"\")if onlyAfter and datecontinueif len(line)<:continuefor project in line[:]:project=project.strip()if len(project):efforts.append([date,project])return efforts", "docstring": "return a list of [stamp, project] elements.", "id": "f11338:m0"} {"signature": "def fileModifiedTimestamp(fname):", "body": "modifiedTime=os.path.getmtime(fname)stamp=time.strftime('', time.localtime(modifiedTime))return stamp", "docstring": "return \"YYYY-MM-DD\" when the file was modified.", "id": "f11339:m0"} {"signature": "def loadResults(resultsFile):", "body": "with open(resultsFile) as f:raw=f.read().split(\"\")foldersByDay={}for line in raw:folder=line.split('')[]+\"\"line=[]+line.split('')[].split(\"\")for day in line[:]:if not day in foldersByDay:foldersByDay[day]=[]foldersByDay[day]=foldersByDay[day]+[folder]nActiveDays=len(foldersByDay)dayFirst=sorted(foldersByDay.keys())[]dayLast=sorted(foldersByDay.keys())[-]dayFirst=datetime.datetime.strptime(dayFirst, \"\" )dayLast=datetime.datetime.strptime(dayLast, \"\" )nDays = (dayLast - dayFirst).days + emptyDays=for deltaDays in range(nDays):day=dayFirst+datetime.timedelta(days=deltaDays)stamp=datetime.datetime.strftime(day, \"\" )if not stamp in foldersByDay:foldersByDay[stamp]=[]emptyDays+=percActive=nActiveDays/nDays*print(\"\"%(nActiveDays,nDays,percActive))return foldersByDay", "docstring": "returns a dict of active folders with days as keys.", "id": "f11339:m2"} {"signature": "def HTML_results(resultsFile):", "body": "foldersByDay=loadResults(resultsFile)for day in sorted(list(foldersByDay.keys())):if time.strptime(day,\"\")\",\"\"):del foldersByDay[day]html=\"\"html+=\"\"html+=\"\"html+=\"\"html=html.replace(\"\",(time.strftime('', time.localtime())))html+=\"\"*html+=\"\"html+=\"\"lastMonth=\"\"lastYear=\"\"for day in sorted(list(foldersByDay.keys())):month=day[:]year=day[:]if year!=lastYear:html+=\"\"%yearlastYear=yearif month!=lastMonth:html+=\"\"%monthlastMonth=monthhtml+=\"\"%(day,day[:])html+=\"\"*html=html.replace(\"\",\"\")html+=\"\"html+=\"\"for day in sorted(list(foldersByDay.keys())):dt=datetime.datetime.strptime(day, \"\" )classPrefix=\"\"if int(dt.weekday())>:classPrefix=\"\"html+=\"\"%(day,day)title=\"\"%(day,DAYSOFWEEK[dt.weekday()])html+=\"\"%(classPrefix,title)html+=\"\"%(classPrefix)for folder in foldersByDay[day]:if \"\" in folder:continueif \"\" in folder:continueif \"\" and \"\" in folder:continueif \"\" in folder:continueif \"\" in folder:continueif \"\" in folder:continuehtml+=\"\"%folderhtml+=\"\"fnameSave=resultsFile+\"\"html=html.replace(\"\",\"\")with open(fnameSave,'') as f:f.write(HTML_TEMPLATE.replace(\"\",\"\"+html))print(\"\",fnameSave)", "docstring": "generates HTML report of active folders/days.", "id": "f11339:m3"} {"signature": "def clampfit_rename(path,char):", "body": "assert len(char)== and type(char)==str, \"\"assert os.path.exists(path), \"\"files = sorted(os.listdir(path))files = [x for x in files if len(x)> and x[]+x[]+x[]=='']for fname in files:fname2 = list(fname)fname2[]=charfname2=\"\".join(fname2)if fname==fname2:print(fname, \"\", fname2)else:print(fname, \"\", fname2)fname=os.path.join(path,fname)fname2=os.path.join(path,fname2)if not os.path.exists(fname2):os.rename(fname,fname2)return", "docstring": "Given ABFs and TIFs formatted long style, rename each of them to prefix their number with a different number.\n\nExample: 2017_10_11_0011.abf\nBecomes: 2017_10_11_?011.abf\nwhere ? can be any character.", "id": "f11340:m0"} {"signature": "def frameAndSave(abf,tag=\"\",dataType=\"\",saveAsFname=False,closeWhenDone=True):", "body": "print(\"\",closeWhenDone)plt.tight_layout()plt.subplots_adjust(top=,bottom =)plt.annotate(tag,(,),xycoords='',ha='',va='',family='',size=,alpha=)msgBot=\"\"%(abf.ID,abf.protocomment)plt.annotate(msgBot,(,),xycoords='',ha='',va='',family='',size=,alpha=)fname=tag.lower().replace(\"\",'')+\"\"fname=dataType+\"\"+fnameplt.tight_layout()if IMAGE_SAVE:abf.log.info(\"\",fname)try:if saveAsFname:saveAs=os.path.abspath(saveAsFname)else:saveAs=os.path.abspath(abf.outPre+fname)if not os.path.exists(abf.outFolder):os.mkdir(abf.outFolder)plt.savefig(saveAs)except Exception as E:abf.log.error(\"\",fname)print(E)if IMAGE_SHOW==True:if closeWhenDone==False:print(\"\")else:abf.log.info(\"\",fname)plt.show()if closeWhenDone:print(\"\")plt.close('')", "docstring": "frame the current matplotlib plot with ABF info, and optionally save it.\nNote that this is entirely independent of the ABFplot class object.\nif saveImage is False, show it instead.\n\nDatatype should be:\n * plot\n * experiment", "id": "f11342:m0"} {"signature": "def __init__(self,abf):", "body": "self.log = logging.getLogger(\"\")self.log.setLevel(swhlab.loglevel)self.close(True) if type(abf) is str:self.log.debug(\"\")abf=ABF(abf)self.abf=abfself.figure_width=self.figure_height=self.figure_dpi=self.subplot=False self.gridAlpha=self.title=os.path.basename(abf.filename)self.traceColor=''self.kwargs={\"\":}self.rainbow=Trueself.colormap=\"\"self.marginX,self.marginY=,self.log.debug(\"\")", "docstring": "Load an ABF and get ready to plot stuff.", "id": "f11342:c0:m0"} {"signature": "def figure(self,forceNew=False):", "body": "if plt._pylab_helpers.Gcf.get_num_fig_managers()> and forceNew is False:self.log.debug(\"\")returnif self.subplot:self.log.debug(\"\")else:self.log.debug(\"\")plt.figure(figsize=(self.figure_width,self.figure_height))", "docstring": "make sure a figure is ready.", "id": "f11342:c0:m1"} {"signature": "def save(self,callit=\"\",closeToo=True,fullpath=False):", "body": "if fullpath is False:fname=self.abf.outPre+\"\"+callit+\"\"else:fname=callitif not os.path.exists(os.path.dirname(fname)):os.mkdir(os.path.dirname(fname))plt.savefig(fname)self.log.info(\"\",os.path.basename(fname))if closeToo:plt.close()", "docstring": "save the existing figure. does not close it.", "id": "f11342:c0:m4"} {"signature": "def comments(self,minutes=False):", "body": "if self.comments==:returnself.log.debug(\"\")for i,t in enumerate(self.abf.comment_times):if minutes:t/=plt.axvline(t,color='',ls='')X1,X2,Y1,Y2=plt.axis()Y2=Y2-abs(Y2-Y1)*plt.text(t,Y2,self.abf.comment_tags[i],color='',rotation='',ha='',va='',weight='',alpha=,size=,)", "docstring": "Add comment lines/text to an existing plot. Defaults to seconds.\nCall after a plot has been made, and after margins have been set.", "id": "f11342:c0:m7"} {"signature": "def figure_chronological(self):", "body": "self.log.debug(\"\")self.figure()for sweep in range(self.abf.sweeps):self.abf.setsweep(sweep)self.setColorBySweep()if self.abf.derivative:plt.plot(self.abf.sweepX,self.abf.sweepD,**self.kwargs)else:plt.plot(self.abf.sweepX,self.abf.sweepY,**self.kwargs)self.comments()self.decorate()", "docstring": "plot every sweep of an ABF file (with comments).", "id": "f11342:c0:m9"} {"signature": "def figure_sweeps(self, offsetX=, offsetY=):", "body": "self.log.debug(\"\")self.figure()for sweep in range(self.abf.sweeps):self.abf.setsweep(sweep)self.setColorBySweep()plt.plot(self.abf.sweepX2+sweep*offsetX,self.abf.sweepY+sweep*offsetY,**self.kwargs)if offsetX:self.marginX=self.decorate()", "docstring": "plot every sweep of an ABF file.", "id": "f11342:c0:m11"} {"signature": "def figure_protocol(self):", "body": "self.log.debug(\"\")self.figure()plt.plot(self.abf.protoX,self.abf.protoY,color='')self.marginX=self.decorate(protocol=True)", "docstring": "plot the current sweep protocol.", "id": "f11342:c0:m12"} {"signature": "def figure_protocols(self):", "body": "self.log.debug(\"\")self.figure()for sweep in range(self.abf.sweeps):self.abf.setsweep(sweep)plt.plot(self.abf.protoX,self.abf.protoY,color='')self.marginX=self.decorate(protocol=True)", "docstring": "plot the protocol of all sweeps.", "id": "f11342:c0:m13"} {"signature": "def doStuff(ABFfolder,analyze=False,convert=False,index=True,overwrite=True,launch=True):", "body": "IN=INDEX(ABFfolder)if analyze:IN.analyzeAll()if convert:IN.convertImages()", "docstring": "Inelegant for now, but lets you manually analyze every ABF in a folder.", "id": "f11343:m0"} {"signature": "def analyzeSingle(abfFname):", "body": "assert os.path.exists(abfFname) and abfFname.endswith(\"\")ABFfolder,ABFfname=os.path.split(abfFname)abfID=os.path.splitext(ABFfname)[]IN=INDEX(ABFfolder)IN.analyzeABF(abfID)IN.scan()IN.html_single_basic([abfID],overwrite=True)IN.html_single_plot([abfID],overwrite=True)IN.scan()IN.html_index()return", "docstring": "Reanalyze data for a single ABF. Also remakes child and parent html.", "id": "f11343:m1"} {"signature": "def __init__(self,ABFfolder):", "body": "logging.basicConfig(format=swhlab.logFormat, datefmt=swhlab.logDateFormat, level=swhlab.loglevel)self.log = logging.getLogger(\"\")self.log.setLevel(swhlab.loglevel)if not type(ABFfolder) is str or not os.path.isdir(ABFfolder):self.log.error(\"\",ABFfolder)returnelse:self.log.info(\"\",ABFfolder)self.folder1=os.path.abspath(ABFfolder) self.folder2=os.path.abspath(ABFfolder+\"\") if not os.path.isdir(self.folder2):self.log.debug(\"\",self.folder2)os.mkdir(self.folder2)self.scan()self.groups=cm.abfGroups(self.folder1) self.log.debug(\"\",len(self.groups))nChildren=[len(x) for x in self.groups.values()]self.log.debug(\"\",np.average(nChildren))self.groupFiles=cm.abfGroupFiles(self.groups,self.folder2)nChildrenFiles=[len(x) for x in self.groupFiles.values()]self.log.debug(\"\",np.average(nChildrenFiles))", "docstring": "The SWHLab INDEX class allows a web-browsable index of ABF data.\n\nThis is intended to allow:\n * Automatic analysis of ABF files and output of data images\n * Watching of directories and automatic analysis of new ABFs\n * Manipulation and enhancement of micrographs (even multichannel)\n * creation of individual HTML pages for ABFs, cells, and folders\n\nGeneral sequence:\n * convert folder1 TIFs to JPGs\n * analyze data for cells that need it\n * create their individual pages for cells that need it\n * create master index", "id": "f11343:c0:m0"} {"signature": "def scan(self):", "body": "t1=cm.timeit()self.files1=cm.list_to_lowercase(sorted(os.listdir(self.folder1)))self.files2=cm.list_to_lowercase(sorted(os.listdir(self.folder2)))self.files1abf=[x for x in self.files1 if x.endswith(\"\")]self.files1abf=cm.list_to_lowercase(cm.abfSort(self.files1abf))self.IDs=[x[:-] for x in self.files1abf]self.log.debug(\"\",len(self.files1))self.log.debug(\"\",len(self.files1abf))self.log.debug(\"\",len(self.files2))self.log.debug(\"\",cm.timeit(t1))", "docstring": "scan folder1 and folder2 into files1 and files2.\nsince we are on windows, simplify things by making them all lowercase.\nthis WILL cause problems on 'nix operating systems.If this is the case,\njust run a script to rename every file to all lowercase.", "id": "f11343:c0:m1"} {"signature": "def convertImages(self):", "body": "exts=['','']for fname in [x for x in self.files1 if cm.ext(x) in exts]:ID=\"\"if len(fname)> and fname[:] in self.IDs:ID=fname[:]fname2=ID+\"\"+fnameif not fname2 in self.files2:self.log.info(\"\"%fname2)shutil.copy(os.path.join(self.folder1,fname),os.path.join(self.folder2,fname2))if not fname[:]+\"\" in self.files1:self.log.error(\"\",fname)exts=['','']for fname in [x for x in self.files1 if cm.ext(x) in exts]:ID=\"\"if len(fname)> and fname[:] in self.IDs:ID=fname[:]fname2=ID+\"\"+fname+\"\"if not fname2 in self.files2:self.log.info(\"\"%fname2)imaging.TIF_to_jpg(os.path.join(self.folder1,fname),saveAs=os.path.join(self.folder2,fname2))if not fname[:]+\"\" in self.files1:self.log.error(\"\",fname)", "docstring": "run this to turn all folder1 TIFs and JPGs into folder2 data.\nTIFs will be treated as micrographs and converted to JPG with enhanced\ncontrast. JPGs will simply be copied over.", "id": "f11343:c0:m2"} {"signature": "def analyzeAll(self):", "body": "searchableData=str(self.files2)self.log.debug(\"\",len(self.IDs))for ID in self.IDs:if not ID+\"\" in searchableData:self.log.debug(\"\",ID)try:self.analyzeABF(ID)except:print(\"\"*)else:self.log.debug(\"\",ID)self.log.debug(\"\",len(self.IDs))", "docstring": "analyze every unanalyzed ABF in the folder.", "id": "f11343:c0:m3"} {"signature": "def analyzeABF(self,ID):", "body": "for fname in self.files2:if fname.startswith(ID+\"\"):self.log.debug(\"\",fname)os.remove(os.path.join(self.folder2,fname))self.log.info(\"\",ID)protocols.analyze(os.path.join(self.folder1,ID+\"\"))", "docstring": "Analye a single ABF: make data, index it.\nIf called directly, will delete all ID_data_ and recreate it.", "id": "f11343:c0:m4"} {"signature": "def htmlFor(self,fname):", "body": "if os.path.splitext(fname)[].lower() in ['','']:html=''%(fname,fname)if \"\" in fname:html=html.replace('','')if \"\" in fname:html=html.replace('','')if \"\" in fname:html=html.replace('','')elif os.path.splitext(fname)[].lower() in ['','']:html=''%fnameelse:html=''%fnamereturn html", "docstring": "return appropriate HTML determined by file extension.", "id": "f11343:c0:m5"} {"signature": "def html_single_basic(self,abfID,launch=False,overwrite=False):", "body": "if type(abfID) is str:abfID=[abfID]for thisABFid in cm.abfSort(abfID):parentID=cm.parent(self.groups,thisABFid)saveAs=os.path.abspath(\"\"%(self.folder2,parentID))if overwrite is False and os.path.basename(saveAs) in self.files2:continuefilesByType=cm.filesByType(self.groupFiles[parentID])html=\"\"html+=''html+=''%parentIDhtml+=''%os.path.abspath(self.folder1+\"\"+parentID+\"\")html+=''catOrder=[\"\",\"\",\"\",\"\"]categories=cm.list_order_by(filesByType.keys(),catOrder)for category in [x for x in categories if len(filesByType[x])]:if category=='':html+=\"\"elif category=='':html+=\"\"elif category=='':html+=\"\"elif category=='':html+=\"\"else:html+=\"\"for fname in filesByType[category]:html+=self.htmlFor(fname)html+=''*print(\"\",saveAs,'')style.save(html,saveAs,launch=launch)", "docstring": "generate a generic flat file html for an ABF parent. You could give\nthis a single ABF ID, its parent ID, or a list of ABF IDs.\nIf a child ABF is given, the parent will automatically be used.", "id": "f11343:c0:m6"} {"signature": "def html_single_plot(self,abfID,launch=False,overwrite=False):", "body": "if type(abfID) is str:abfID=[abfID]for thisABFid in cm.abfSort(abfID):parentID=cm.parent(self.groups,thisABFid)saveAs=os.path.abspath(\"\"%(self.folder2,parentID))if overwrite is False and os.path.basename(saveAs) in self.files2:continuefilesByType=cm.filesByType(self.groupFiles[parentID])html=\"\"html+=''html+=''%parentIDhtml+=''%os.path.abspath(self.folder1+\"\"+parentID+\"\")html+=''for fname in filesByType['']:html+=self.htmlFor(fname)print(\"\",saveAs,'')style.save(html,saveAs,launch=launch)", "docstring": "create ID_plot.html of just intrinsic properties.", "id": "f11343:c0:m7"} {"signature": "def filesByExtension(fnames):", "body": "byExt={\"\":[],\"\":[],\"\":[]} for fname in fnames:ext = os.path.splitext(fname)[].replace(\"\",'').lower()if not ext in byExt.keys():byExt[ext]=[]byExt[ext]=byExt[ext]+[fname]return byExt", "docstring": "given a list of files, return a dict organized by extension.", "id": "f11344:m0"} {"signature": "def findCells(fnames):", "body": "IDs=[]filesByExt = filesByExtension(fnames)for abfFname in filesByExt['']:ID=os.path.splitext(abfFname)[]for picFname in filesByExt['']+filesByExt['']:if picFname.startswith(ID):IDs.append(ID)breakreturn smartSort(IDs)", "docstring": "given a list of files, return a list of cells by their ID.\nA cell is indicated when an ABF name matches the start of another file.\n\nExample:\n 123456.abf\n 123456-whatever.tif", "id": "f11344:m1"} {"signature": "def filesByCell(fnames,cells):", "body": "byCell={}fnames=smartSort(fnames)days = list(set([elem[:] for elem in fnames if elem.endswith(\"\")])) for day in smartSort(days):parent=Nonefor i,fname in enumerate([elem for elem in fnames if elem.startswith(day) and elem.endswith(\"\")]):ID=os.path.splitext(fname)[]if len([x for x in fnames if x.startswith(ID)])-:parent=IDif not parent in byCell:byCell[parent]=[]byCell[parent]=byCell[parent]+[fname]return byCell", "docstring": "given files and cells, return a dict of files grouped by cell.", "id": "f11344:m2"} {"signature": "def __init__(self,abfFolder,loglevel=logging.DEBUG):", "body": "self.log = logging.getLogger(\"\")self.log.setLevel(loglevel)if not os.path.isdir(abfFolder):self.log.error(\"\",abfFolder)else:self.folderScan(abfFolder)", "docstring": "This class allows indexing of ABFs in directories.\n\nCell organization:\n Experiments are intended to place all their ABFs in the same \n directory, with a new cell designated by a file ending in .TIF \n matching the same name of a file ending in .abf.\n\nNaming convension:\n abfFolder (./) has the ABFs\n fnames - all the files in this folder\n abfnames - all the abf files in this folder\n cells - all the cell IDs\n abfFolder2 (./swhlab/) has data generated by these scripts\n fnames2 - all the files in this folder\n\nNotes:\n the term ID is used to define the name of a cell as the filename\n of its first abf without the abf extension. i.e., 16o01009", "id": "f11344:c0:m0"} {"signature": "def folderScan(self,abfFolder=None):", "body": "if abfFolder is None and '' in dir(self):abfFolder=self.abfFolderelse:self.abfFolder=abfFolderself.abfFolder=os.path.abspath(self.abfFolder)self.log.info(\"\",self.abfFolder)if not os.path.exists(self.abfFolder):self.log.error(\"\",abfFolder)returnself.abfFolder2=os.path.abspath(self.abfFolder+\"\")if not os.path.exists(self.abfFolder2):self.log.error(\"\") os.mkdir(self.abfFolder2)self.fnames=os.listdir(self.abfFolder)self.fnames2=os.listdir(self.abfFolder2)self.log.debug(\"\",len(self.fnames))self.log.debug(\"\",len(self.fnames2))self.fnamesByExt = filesByExtension(self.fnames)if not \"\" in self.fnamesByExt.keys():self.log.error(\"\")self.log.debug(\"\",len(self.fnamesByExt[\"\"]))self.cells=findCells(self.fnames) self.log.debug(\"\"%len(self.cells))self.fnamesByCell = filesByCell(self.fnames,self.cells) self.log.debug(\"\"%str([len(self.fnamesByCell[elem]) for elem in self.fnamesByCell]))", "docstring": "populate class properties relating to files in the folder.", "id": "f11344:c0:m1"} {"signature": "def html_index(self,launch=False,showChildren=False):", "body": "self.makePics() html=''%os.path.basename(self.abfFolder)for ID in smartSort(self.fnamesByCell.keys()):link=''if ID+\"\" in self.fnames2:link=''%IDhtml+=(''%(link,ID)) if showChildren:for fname in self.fnamesByCell[ID]:thisID=os.path.splitext(fname)[]files2=[x for x in self.fnames2 if x.startswith(thisID) and not x.endswith(\"\")]html+=''%thisID if len(files2):html+=''%len(files2) html+=''html+=\"\"style.save(html,self.abfFolder2+\"\")self.html_index_splash() style.frames(self.abfFolder2+\"\",launch=launch)", "docstring": "generate list of cells with links. keep this simple.\nautomatically generates splash page and regnerates frames.", "id": "f11344:c0:m2"} {"signature": "def html_index_splash(self):", "body": "html=\"\"\"\"\"\"%version.__version__for parent in smartSort(self.fnamesByCell.keys()):html+=''%(parent,parent)for child in self.fnamesByCell[parent]:fullpath=os.path.join(self.abfFolder,child)protocol = swhlab.swh_abf.abfProtocol(fullpath)html+=''%(fullpath,protocol)style.save(html,self.abfFolder2+\"\")return", "docstring": "generate landing page.", "id": "f11344:c0:m3"} {"signature": "def html_single_basic(self,ID):", "body": "if not ID in self.cells:self.log.error(\"\",ID)returnhtmlFname=os.path.abspath(self.abfFolder2+\"\"+ID+\"\")html=\"\"%IDnpics=for childID in [os.path.splitext(x)[] for x in self.fnamesByCell[ID]]:pics=[x for x in self.fnames2 if x.startswith(childID) and os.path.splitext(x)[].lower() in [\"\",\"\"]]html+=\"\"%(os.path.abspath(self.abfFolder+''+childID+\"\"))for i,pic in enumerate(pics):html+=''%(pic,pic)npics+=html+=\"\"style.save(html,htmlFname)self.log.info(\"\",htmlFname,npics)", "docstring": "generate ./swhlab/xxyxxzzz.html for a single given abf.\nInput can be an ABF file path of ABF ID.", "id": "f11344:c0:m4"} {"signature": "def html_single_fixed(self,ID):", "body": "return", "docstring": "Single page generator designed for easy gruop comparisons.", "id": "f11344:c0:m5"} {"signature": "def html_singleAll(self,template=\"\"):", "body": "for fname in smartSort(self.cells):if template==\"\":self.html_single_fixed(fname)else:self.html_single_basic(fname)", "docstring": "generate a data view for every ABF in the project folder.", "id": "f11344:c0:m6"} {"signature": "def makePics(self):", "body": "rescanNeeded=Falsefor fname in smartSort(self.fnames):if fname in self.fnames2:continueext=os.path.splitext(fname)[].lower()if ext in [\"\",\"\"]:if not fname in self.abfFolder2:self.log.debug(\"\",fname)shutil.copy(os.path.join(self.abfFolder,fname),os.path.join(self.abfFolder2,fname))rescanNeeded=Trueif ext in [\"\",\"\"]:if not fname+\"\" in self.fnames2:self.log.debug(\"\",fname)swhlab.swh_image.TIF_to_jpg(os.path.join(self.abfFolder,fname),saveAs=os.path.join(self.abfFolder2,fname+\"\"))rescanNeeded=Trueif rescanNeeded:self.log.debug(\"\")self.log.debug(\"\")self.folderScan()", "docstring": "convert every .image we find to a ./swhlab/ image", "id": "f11344:c0:m7"} {"signature": "def frames(fname=None,menuWidth=,launch=False):", "body": "html=\"\"\"\"\"\"%(menuWidth)with open(fname,'') as f:f.write(html)if launch:webbrowser.open(fname)", "docstring": "create and save a two column frames HTML file.", "id": "f11345:m0"} {"signature": "def save(html,fname=None,launch=False):", "body": "html=html_top+html+html_bothtml=html.replace(\"\",swhlab.common.datetimeToString())if fname is None:fname = tempfile.gettempdir()+\"\"launch=Truefname=os.path.abspath(fname)with open(fname,'') as f:f.write(html)global stylesheetSavedstylesheetPath=os.path.join(os.path.dirname(fname),\"\")if not os.path.exists(stylesheetPath) or stylesheetSaved is False:with open(stylesheetPath,'') as f:f.write(stylesheet)stylesheetSaved=Trueif launch:webbrowser.open(fname)", "docstring": "wrap HTML in a top and bottom (with css) and save to disk.", "id": "f11345:m1"} {"signature": "def TIF_to_jpg(fnameTiff, overwrite=False, saveAs=\"\"):", "body": "if saveAs == \"\":saveAs=fnameTiff+\"\"if overwrite is False and os.path.exists(saveAs):print(\"\")returnimg=pylab.imread(fnameTiff)img=img/np.max(img) hist1,bins1=np.histogram(img.ravel(),bins=, range=(,))if np.average(img)<:vmin=Nonevmax=Nonemsg=\"\"while np.average(img)<:img=np.sqrt(img)msg+=\"\"else:msg=\"\"percentile=vmin=np.percentile(img.ravel(),percentile)vmax=np.percentile(img.ravel(),-percentile)hist2,bins2=np.histogram(img.ravel(),bins=, range=(,))fig=pylab.figure(facecolor='')fig.gca().imshow(img,cmap=pylab.gray(),vmin=vmin,vmax=vmax)pylab.subplots_adjust(top=, bottom=, right=, left=, hspace=, wspace=)pylab.gca().xaxis.set_major_locator(pylab.NullLocator())pylab.gca().yaxis.set_major_locator(pylab.NullLocator())pylab.axis('')fig.set_size_inches(img.shape[]/, img.shape[]/)msg=\"\"%(os.path.basename(fnameTiff),datetime.datetime.fromtimestamp(os.path.getmtime(fnameTiff)))+msgcenter=pylab.text(center,center,\"\"%(msg),va=\"\",color='',size='',family='',weight='',bbox=dict(facecolor='', alpha=))scaleWidthPx=Falseif \"\" in fnameTiff:scaleWidthPx,scaleBarText=,\"\"if \"\" in fnameTiff:scaleWidthPx,scaleBarText=,\"\"if scaleWidthPx:scaleBarPadding=x2,y2=img.shape[]-scaleBarPadding,img.shape[]-scaleBarPaddingx1,y1=x2-scaleWidthPx,y2for offset,color,alpha in [[,'',],[,'',]]:pylab.plot([x1+offset,x2+offset],[y1+offset,y2+offset],'',color=color,lw=,alpha=alpha)pylab.text((x1+x2)/+offset,y1-+offset,scaleBarText,color=color,ha=\"\",weight=\"\",alpha=alpha,size=\"\",va=\"\",family=\"\")pylab.savefig(saveAs,dpi=)pylab.close()", "docstring": "given a TIF taken by our cameras, make it a pretty labeled JPG.\n\nif the filename contains \"f10\" or \"f20\", add appropraite scale bars.\n\nautomatic contrast adjustment is different depending on if its a DIC\nimage or fluorescent image (which is detected automatically).", "id": "f11346:m0"} {"signature": "def TIF_to_jpg_all(path):", "body": "for fname in sorted(glob.glob(path+\"\")):print(fname)TIF_to_jpg(fname)", "docstring": "run TIF_to_jpg() on every TIF of a folder.", "id": "f11346:m1"} {"signature": "def processArgs():", "body": "if len(sys.argv)<:print(\"\")print(\"\")print('')returnif sys.argv[]=='':print(\"\",\"\".join(sys.path))print()print(\"\",sys.version)print(\"\",__file__)print(\"\",swhlab.__version__)returnif sys.argv[]=='':abfFolder=swhlab.common.gui_getFolder()if not abfFolder or not os.path.isdir(abfFolder):print(\"\")returnfnames=sorted(glob.glob(abfFolder+\"\"))outFolder=tempfile.gettempdir()+\"\"if os.path.exists(outFolder):shutil.rmtree(outFolder)os.mkdir(outFolder)outFile=outFolder+\"\"out=''out+=''%abfFolderfor i,fname in enumerate(fnames):print(\"\"%(i,len(fnames)))saveAs=os.path.join(os.path.dirname(outFolder),os.path.basename(fname))+\"\"out+=''%os.path.abspath(fname)out+=''%(saveAs,saveAs)swhlab.analysis.glance.processAbf(fname,saveAs)out+=''with open(outFile,'') as f:f.write(out)webbrowser.open_new_tab(outFile)returnprint(\"\")print(sys.argv)", "docstring": "check out the arguments and figure out what to do.", "id": "f11347:m0"} {"signature": "def where_cross(data,threshold):", "body": "Is=np.where(data>threshold)[]Is=np.concatenate(([],Is))Ds=Is[:-]-Is[:]+return Is[np.where(Ds)[]+]", "docstring": "return a list of Is where the data first crosses above threshold.", "id": "f11348:m0"} {"signature": "def kernel_gaussian(size=, sigma=None, forwardOnly=False):", "body": "if sigma is None:sigma=size/size=int(size)points=np.exp(-np.power(np.arange(size)-size/,)/(*np.power(sigma,)))if forwardOnly:points[:int(len(points)/)]=return points/sum(points)", "docstring": "return a 1d gassuan array of a given size and sigma.\nIf sigma isn't given, it will be 1/10 of the size, which is usually good.\nNote that this is fully numpy, and doesn't use scipy.", "id": "f11348:m1"} {"signature": "def lowpass(data,filterSize=None):", "body": "if filterSize is None:filterSize=len(data)/kernel=kernel_gaussian(size=filterSize)data=convolve(data,kernel) return data", "docstring": "minimal complexity low-pass filtering.\nFilter size is how \"wide\" the filter will be.\nSigma will be 1/10 of this filter width.\nIf filter size isn't given, it will be 1/10 of the data size.", "id": "f11348:m2"} {"signature": "def convolve(signal,kernel):", "body": "pad=np.ones(len(kernel)/)signal=np.concatenate((pad*signal[],signal,pad*signal[-]))signal=np.convolve(signal,kernel,mode='')signal=signal[len(pad):-len(pad)]return signal", "docstring": "This applies a kernel to a signal through convolution and returns the result.\n\nSome magic is done at the edges so the result doesn't apprach zero:\n 1. extend the signal's edges with len(kernel)/2 duplicated values\n 2. perform the convolution ('same' mode)\n 3. slice-off the ends we added\n 4. return the same number of points as the original", "id": "f11348:m3"} {"signature": "def waitFor(sec=):", "body": "while sec:print(\"\",sec,\"\")sec-=time.sleep()", "docstring": "wait a given number of seconds until returning.", "id": "f11348:m4"} {"signature": "def pause():", "body": "input(\"\")", "docstring": "halt everything until user input. Use this sparingly.", "id": "f11348:m5"} {"signature": "def exceptionToString(e):", "body": "exc_type, exc_obj, exc_tb = sys.exc_info()s=\"\"s+=\"\"%os.path.split(exc_tb.tb_frame.f_code.co_filename)[]s+=\"\"%exc_tb.tb_linenos+=\"\"%exc_typereturn s", "docstring": "when you \"except Exception as e\", give me the e and I'll give you a string.", "id": "f11348:m6"} {"signature": "def isIpython():", "body": "try:if str(__IPYTHON__):return Trueexcept:return False", "docstring": "returns True if running in an Ipython interpreter.", "id": "f11348:m7"} {"signature": "def timeit(timer=None):", "body": "if timer is None:return time.time()else:took=time.time()-timerif took<:return \"\"%(took*)elif took<:return \"\"%(took)else:return \"\"%(took/)", "docstring": "simple timer. returns a time object, or a string.", "id": "f11348:m8"} {"signature": "def list_move_to_front(l,value=''):", "body": "l=list(l)if value in l:l.remove(value)l.insert(,value)return l", "docstring": "if the value is in the list, move it to the front and return it.", "id": "f11348:m12"} {"signature": "def list_move_to_back(l,value=''):", "body": "l=list(l)if value in l:l.remove(value)l.append(value)return l", "docstring": "if the value is in the list, move it to the back and return it.", "id": "f11348:m13"} {"signature": "def list_order_by(l,firstItems):", "body": "l=list(l)for item in firstItems[::-]: if item in l:l.remove(item)l.insert(,item)return l", "docstring": "given a list and a list of items to be first, return the list in the\n same order except that it begins with each of the first items.", "id": "f11348:m14"} {"signature": "def list_to_lowercase(l):", "body": "return [x.lower() for x in l if type(x) is str]", "docstring": "given a list of strings, make them all lowercase.", "id": "f11348:m15"} {"signature": "def ext(fname):", "body": "if \"\" in fname:return os.path.splitext(fname)[]return fname", "docstring": "return the extension of a filename.", "id": "f11348:m16"} {"signature": "def abfSort(IDs):", "body": "IDs=list(IDs)monO=[]monN=[]monD=[]good=[]for ID in IDs:if ID is None:continueif '' in ID:monO.append(ID)elif '' in ID:monN.append(ID)elif '' in ID:monD.append(ID)else:good.append(ID)return sorted(good)+sorted(monO)+sorted(monN)+sorted(monD)", "docstring": "given a list of goofy ABF names, return it sorted intelligently.\nThis places things like 16o01001 after 16901001.", "id": "f11348:m17"} {"signature": "def abfGroups(abfFolder):", "body": "files=Falseif type(abfFolder) is str and os.path.isdir(abfFolder):files=abfSort(os.listdir(abfFolder))elif type(abfFolder) is list:files=abfSort(abfFolder)assert type(files) is listfiles=list_to_lowercase(files)abfs, IDs, others, parents, days = [],[],[],[],[]for fname in files:if fname.endswith(\"\"):abfs.append(fname)IDs.append(fname[:-])days.append(fname[:])else:others.append(fname)for ID in IDs:for fname in others:if fname.startswith(ID):parents.append(ID)parents=abfSort(set(parents)) days=abfSort(set(days)) groups={}for day in days:parent=Nonefor fname in [x for x in abfs if x.startswith(day)]:ID=fname[:-]if ID in parents:parent=IDif not parent in groups.keys():groups[parent]=[]groups[parent].extend([ID])return groups", "docstring": "Given a folder path or list of files, return groups (dict) by cell.\n\nRules which define parents (cells):\n * assume each cell has one or several ABFs\n * that cell can be labeled by its \"ID\" or \"parent\" ABF (first abf)\n * the ID is just the filename of the first abf without .abf\n * if any file starts with an \"ID\", that ID becomes a parent.\n * examples could be 16o14044.TIF or 16o14044-cell1-stuff.jpg\n * usually this is done by saving a pic of the cell with same filename\n\nReturns a dict of \"parent IDs\" representing the \"children\"\n groups[\"16o14041\"] = [\"16o14041\",\"16o14042\",\"16o14043\"]\n\nFrom there, getting children files is trivial. Just find all files in\nthe same folder whose filenames begin with one of the children.", "id": "f11348:m18"} {"signature": "def abfGroupFiles(groups,folder):", "body": "assert os.path.exists(folder)files=os.listdir(folder)group2={}for parent in groups.keys():if not parent in group2.keys():group2[parent]=[]for ID in groups[parent]:for fname in [x.lower() for x in files if ID in x.lower()]:group2[parent].extend([fname])return group2", "docstring": "when given a dictionary where every key contains a list of IDs, replace\nthe keys with the list of files matching those IDs. This is how you get a\nlist of files belonging to each child for each parent.", "id": "f11348:m19"} {"signature": "def parent(groups,ID):", "body": "if ID in groups.keys():return ID if not ID in groups.keys():for actualParent in groups.keys():if ID in groups[actualParent]:return actualParent return None", "docstring": "given a groups dictionary and an ID, return its actual parent ID.", "id": "f11348:m20"} {"signature": "def filesByType(fileList):", "body": "features=[\"\",\"\",\"\",\"\",\"\"]files={}for feature in features:files[feature]=[]for fname in fileList:other=Truefor feature in features:if \"\"+feature+\"\" in fname:files[feature].extend([fname])other=Falseif other:files[''].extend([fname])return files", "docstring": "given a list of files, return them as a dict sorted by type:\n * plot, tif, data, other", "id": "f11348:m21"} {"signature": "def userFolder():", "body": "path=os.path.expanduser(\"\")+\"\" if not os.path.exists(path):print(\"\",path)os.mkdir(path)return os.path.abspath(path)", "docstring": "return the semi-temporary user folder", "id": "f11348:m22"} {"signature": "def abfFname_Load():", "body": "fname=userFolder()+\"\"if os.path.exists(fname):abfFname=open(fname).read().strip()if os.path.exists(abfFname) or abfFname.endswith(\"\"):return abfFnamereturn os.path.abspath(os.sep)", "docstring": "return the path of the last loaded ABF.", "id": "f11348:m23"} {"signature": "def abfFname_Save(abfFname):", "body": "fname=userFolder()+\"\"with open(fname,'') as f:f.write(os.path.abspath(abfFname))return", "docstring": "return the path of the last loaded ABF.", "id": "f11348:m24"} {"signature": "def gui_getFile():", "body": "import tkinter as tkfrom tkinter import filedialogroot = tk.Tk() root.withdraw() root.wm_attributes('', ) fname = filedialog.askopenfilename(title = \"\",filetypes=[('', '')],initialdir=os.path.dirname(abfFname_Load()))if fname.endswith(\"\"):abfFname_Save(fname)return fnameelse:print(\"\")return None", "docstring": "Launch an ABF file selection file dialog.\nThis is smart, and remembers (through reboots) where you last were.", "id": "f11348:m25"} {"signature": "def gui_getFolder():", "body": "import tkinter as tkfrom tkinter import filedialogroot = tk.Tk() root.withdraw() root.wm_attributes('', ) fname = filedialog.askdirectory(title = \"\",initialdir=os.path.dirname(abfFname_Load()))if len(fname)>:abfFname_Save(fname+\"\")return fnameelse:print(\"\")return None", "docstring": "Launch a folder selection dialog.\nThis is smart, and remembers (through reboots) where you last were.", "id": "f11348:m26"} {"signature": "def tryLoadingFrom(tryPath,moduleName=''):", "body": "if not '' in swhlab.__file__:print(\"\",os.path.dirname(swhlab.__file__))return while len(tryPath)>:sp=tryPath+\"\" if os.path.isdir(sp) and os.path.exists(sp+\"\"):if not os.path.dirname(tryPath) in sys.path:sys.path.insert(,os.path.dirname(tryPath))print(\"\"*)print(\"\")print(\"\"*)tryPath=os.path.dirname(tryPath)return", "docstring": "if the module is in this path, load it from the local folder.", "id": "f11349:m0"} {"signature": "def abfIDfromFname(fname):", "body": "fname=os.path.abspath(fname)basename=os.path.basename(fname)return os.path.splitext(basename)[]", "docstring": "given a filename, return the ABFs ID string.", "id": "f11350:m0"} {"signature": "def abfProtocol(fname):", "body": "f=open(fname,'')raw=f.read(*) f.close()raw=raw.decode(\"\",\"\")raw=raw.split(\"\")[].split(\"\")[]protocol = os.path.basename(raw) protocolID = protocol.split(\"\")[] return protocolID", "docstring": "Determine the protocol used to record an ABF file", "id": "f11350:m1"} {"signature": "def headerHTML(header,fname):", "body": "html=\"\"html+=\"\"%(fname)html+=pprint.pformat(header, indent=)html=html.replace(\"\",'').replace(\"\",\"\")html=html.replace(r\"\",\"\")html+=\"\"print(\"\",fname)f=open(fname,'')f.write(html)f.close()webbrowser.open(fname)", "docstring": "given the bytestring ABF header, make and launch HTML.", "id": "f11350:m2"} {"signature": "def __init__(self, fname, createFolder=False):", "body": "logging.basicConfig(format=swhlab.logFormat, datefmt=swhlab.logDateFormat, level=swhlab.loglevel)self.log = logging.getLogger(\"\")self.log.setLevel(swhlab.loglevel)if \"\" in str(fname):self.log.debug(\"\")for item in sorted(dir(fname)):try:setattr(self,item,getattr(fname,item))except:passreturnself.log.debug(\"\"*)self.log.info(\"\",swhlab.__version__,str(fname))if not os.path.exists(str(fname)):self.log.error(\"\")returnself.ABFreader = io.AxonIO(filename=fname)self.ABFblock = self.ABFreader.read_block(lazy=False, cascade=True)self.header=self.ABFreader.read_header()self.protocomment=abfProtocol(fname) self.ID=abfIDfromFname(fname) self.filename=os.path.abspath(fname) self.fileID=os.path.abspath(os.path.splitext(self.filename)[]) self.outFolder=os.path.abspath(os.path.dirname(fname)+\"\") self.outPre=os.path.join(self.outFolder,self.ID)+'' self.sweeps=self.ABFblock.size[\"\"] self.timestamp=self.ABFblock.rec_datetime self.holding = self.header[''][][''] self.derivative=False self.setsweep() self.comments_load() self.kernel=None if createFolder:self.output_touch() self.log.debug(\"\"%self.protocomment)", "docstring": "Load an ABF and makes its stats and sweeps easily available.\n\nArguments:\n fname - filename of an ABF object\n createFolder - if True, the ./swhlab/ folder will be created", "id": "f11350:c0:m0"} {"signature": "def setsweep(self, sweep=, channel=):", "body": "try:sweep=int(sweep)except:self.log.error(\"\",sweep)returnif sweep<:sweep=self.sweeps--sweep sweep=max(,min(sweep,self.sweeps-)) if '' in dir(self) and self.sweep == sweep and self.derivative is False:self.log.debug(\"\",sweep)returnself.channels=self.ABFblock.segments[sweep].size[\"\"]if self.channels> and sweep==:self.log.info(\"\") self.trace = self.ABFblock.segments[sweep].analogsignals[channel]self.sweep=sweep self.channel=channel self.rate = int(self.trace.sampling_rate) self.period = float(/self.rate) self.pointsPerSec = int(self.rate) self.pointsPerMs = int(self.rate/) self.sweepSize = len(self.trace) self.sweepInterval = self.trace.duration.magnitude self.sweepLength = float(self.trace.t_stop-self.trace.t_start) self.length = self.sweepLength*self.sweeps self.lengthMinutes = self.length/ if str(self.trace.dimensionality) == '':self.units,self.units2=\"\",\"\"self.unitsD,self.unitsD2=\"\",\"\"self.protoUnits,self.protoUnits2=\"\",\"\"elif str(self.trace.dimensionality) == '':self.units,self.units2=\"\",\"\"self.unitsD,self.unitsD2=\"\",\"\"self.protoUnits,self.protoUnits2=\"\",\"\"else:self.units,self.units2=\"\",\"\"self.unitsD,self.unitsD2=\"\",\"\"self.sweepY = self.trace.magnitude self.sweepT = self.trace.times.magnitude self.sweepStart = float(self.trace.t_start) self.sweepX2 = self.sweepT-self.trace.t_start.magnitude self.sweepX = self.sweepX2+sweep*self.sweepInterval if self.derivative:self.log.debug(\"\")self.sweepD=self.sweepY[:]-self.sweepY[:-] self.sweepD=np.insert(self.sweepD,,self.sweepD[]) self.sweepD/=(self.period*) else:self.sweepD=[] self.generate_protocol()", "docstring": "set the sweep and channel of an ABF. Both start at 0.", "id": "f11350:c0:m1"} {"signature": "def sweepList(self):", "body": "return range(self.sweeps)", "docstring": "return a list of sweep numbers.", "id": "f11350:c0:m2"} {"signature": "def setsweeps(self):", "body": "for sweep in range(self.sweeps):self.setsweep(sweep)yield self.sweep", "docstring": "iterate over every sweep", "id": "f11350:c0:m3"} {"signature": "def comments_load(self):", "body": "self.comment_times,self.comment_sweeps,self.comment_tags=[],[],[]self.comments= self.comment_text=\"\"try:self.comment_tags = list(self.ABFblock.segments[].eventarrays[].annotations[''])self.comment_times = list(self.ABFblock.segments[].eventarrays[].times/self.trace.itemsize)self.comment_sweeps = list(self.comment_times)except:for events in self.ABFblock.segments[].events: self.comment_tags = events.annotations[''].tolist()self.comment_times = np.array(events.times.magnitude/self.trace.itemsize)self.comment_sweeps = self.comment_times/self.sweepIntervalfor i,c in enumerate(self.comment_tags):self.comment_tags[i]=c.decode(\"\")", "docstring": "read the header and populate self with information about comments", "id": "f11350:c0:m4"} {"signature": "def generate_protocol(self):", "body": "self.offsetX = int(self.sweepSize/)if not len(self.header['']):self.log.debug(\"\")self.protoX,self.protoY=[,self.sweepX[-]],[self.holding,self.holding]self.protoSeqX,self.protoSeqY=[],[self.holding]returnproto=self.header[''][self.channel]self.protoX,self.protoY=[] ,[]self.protoX.append()self.protoY.append(self.holding) for step in proto:dX = proto[step]['']Y = proto[step]['']+proto[step]['']*self.sweepself.protoX.append(self.protoX[-])self.protoY.append(Y)self.protoX.append(self.protoX[-]+dX)self.protoY.append(Y)finalVal=self.holding if self.header[''][]['']:finalVal=self.protoY[-]self.protoX.append(self.protoX[-])self.protoY.append(finalVal)self.protoX.append(self.sweepSize)self.protoY.append(finalVal)for i in range(,len(self.protoX)-): self.protoX[i]=self.protoX[i]+self.offsetXself.protoSeqY=[self.protoY[]]self.protoSeqX=[self.protoX[]]for i in range(,len(self.protoY)):if not self.protoY[i]==self.protoY[i-]:self.protoSeqY.append(self.protoY[i])self.protoSeqX.append(self.protoX[i])if self.protoY[]!=self.protoY[]:self.protoY.insert(,self.protoY[])self.protoX.insert(,self.protoX[])self.protoY.insert(,self.protoY[])self.protoX.insert(,self.protoX[]+self.offsetX/)self.protoSeqY.append(finalVal)self.protoSeqX.append(self.sweepSize)self.protoX=np.array(self.protoX)/self.pointsPerSecself.protoY=np.array(self.protoY)", "docstring": "Recreate the command stimulus (protocol) for the current sweep.\nIt's not stored point by point (that's a waste of time and memory!)\nInstead it's stored as a few (x,y) points which can be easily graphed.\n\nTODO: THIS\nfor segment in abf.ABFreader.read_protocol():\n for analogsignal in segment.analogsignals:\n print(analogsignal)\n plt.plot(analogsignal)\n plt.show()\n plt.close('all')", "id": "f11350:c0:m5"} {"signature": "def get_protocol(self,sweep):", "body": "self.setsweep(sweep)return list(self.protoX),list(self.protoY)", "docstring": "given a sweep, return the protocol as [Xs,Ys].\nThis is good for plotting/recreating the protocol trace.\nThere may be duplicate numbers.", "id": "f11350:c0:m6"} {"signature": "def get_protocol_sequence(self,sweep):", "body": "self.setsweep(sweep)return list(self.protoSeqX),list(self.protoSeqY)", "docstring": "given a sweep, return the protocol as condensed sequence.\nThis is better for comparing similarities and determining steps.\nThere should be no duplicate numbers.", "id": "f11350:c0:m7"} {"signature": "def clamp_values(self,timePoint=):", "body": "print(\"\") return ", "docstring": "return an array of command values at a time point (in sec).\nUseful for things like generating I/V curves.", "id": "f11350:c0:m8"} {"signature": "def epochTimes(self,nEpoch=):", "body": "times=[]durations=[]for epoch in self.header[''][self.channel].values():print(epoch['']/self.pointsPerSec)times.append(sum(durations))durations.append(epoch['']/self.pointsPerSec)times.append(sum(durations))times=np.array(times)+self.offsetX/self.pointsPerSec if nEpoch:return times[nEpoch],times[nEpoch+]else:return times", "docstring": "alternative to the existing abf protocol stuff\nreturn the start/stop time of an epoch.\nEpoch start at zero.\nA=0, B=1, C=2, D=3, ...", "id": "f11350:c0:m9"} {"signature": "def average(self,t1=,t2=None,setsweep=False):", "body": "if setsweep:self.setsweep(setsweep)if t2 is None or t2>self.sweepLength:t2=self.sweepLengthself.log.debug(\"\",t2)t1=max(t1,)if t1>t2:self.log.error(\"\")return FalseI1,I2=int(t1*self.pointsPerSec),int(t2*self.pointsPerSec)if I1==I2:return np.nanreturn np.average(self.sweepY[I1:I2])", "docstring": "return the average of part of the current sweep.", "id": "f11350:c0:m10"} {"signature": "def averageSweep(self,sweepFirst=,sweepLast=None):", "body": "if sweepLast is None:sweepLast=self.sweeps-nSweeps=sweepLast-sweepFirst+runningSum=np.zeros(len(self.sweepY))self.log.debug(\"\",sweepFirst,sweepLast)for sweep in np.arange(nSweeps)+sweepFirst:self.setsweep(sweep)runningSum+=self.sweepY.flatten()average=runningSum/nSweepsreturn average", "docstring": "Return a sweep which is the average of multiple sweeps.\nFor now, standard deviation is lost.", "id": "f11350:c0:m11"} {"signature": "def kernel_gaussian(self, sizeMS, sigmaMS=None, forwardOnly=False):", "body": "sigmaMS=sizeMS/ if sigmaMS is None else sigmaMSsize,sigma=sizeMS*self.pointsPerMs,sigmaMS*self.pointsPerMsself.kernel=swhlab.common.kernel_gaussian(size,sigma,forwardOnly)return self.kernel", "docstring": "create kernel based on this ABF info.", "id": "f11350:c0:m12"} {"signature": "def sweepYfiltered(self):", "body": "assert self.kernel is not Nonereturn swhlab.common.convolve(self.sweepY,self.kernel)", "docstring": "Get the filtered sweepY of the current sweep.\nOnly works if self.kernel has been generated.", "id": "f11350:c0:m13"} {"signature": "def sweepYsmartbase(self):", "body": "return self.sweepY-self.sweepYfiltered()", "docstring": "return the sweep with sweepYfiltered subtracted from it.", "id": "f11350:c0:m14"} {"signature": "def phasicNet(self,biggestEvent=,m1=,m2=None):", "body": "m1= if m1 is None else self.pointsPerSec*m1m2=- if m2 is None else self.pointsPerSec*m2Y=self.sweepYsmartbase()[int(m1):int(m2)]nBins=hist,bins=np.histogram(Y,bins=nBins,range=[-biggestEvent,biggestEvent],density=True)histSmooth=swhlab.common.lowpass(hist)peakI=np.where(histSmooth==max(histSmooth))[][]hist=np.roll(hist,int(nBins/-peakI))histSmooth=np.roll(histSmooth,int(nBins/-peakI))downward,upward=np.split(histSmooth,)downward=downward[::-]diff=np.sum(upward-downward)diff=diff/(len(Y)/self.pointsPerSec)return diff", "docstring": "Calculates the net difference between positive/negative phasic events\nReturns return the phasic difference value of the current sweep.\n\nArguments:\n biggestEvent (int): the size of the largest event anticipated\n m1 (int, optional): the time (sec) to start analyzing\n m2 (int, optional): the time (sec) to end analyzing\n\nExample:\n abf=swhlab.ABF(abfFile)\n abf.kernel=abf.kernel_gaussian(sizeMS=500) # kernel for smart baseline\n diff=[]\n for sweep in abf.setsweeps():\n print(\"Sweep\",sweep)\n diff.append(analyzeSweep(abf,plot=True,label=\"sweep %d\"%sweep))\n print(diff)", "id": "f11350:c0:m15"} {"signature": "def output_touch(self):", "body": "if not os.path.exists(self.outFolder):self.log.debug(\"\",self.outFolder)os.mkdir(self.outFolder)", "docstring": "ensure the ./swhlab/ folder exists.", "id": "f11350:c0:m16"} {"signature": "def output_clean(self):", "body": "for fname in glob.glob(self.outPre+\"\"):print(\"\",fname)pass", "docstring": "delete all ./swhlab/ data related to this ABF.", "id": "f11350:c0:m17"} {"signature": "def inspect(self):", "body": "webinspect.blacklist=[] webinspect.launch(self.ABFblock.segments[].eventarrays[],'')webinspect.blacklist=[''] webinspect.launch(self.ABFblock.segments[].analogsignals[],'')webinspect.blacklist=['',''] webinspect.launch(self.ABFblock.segments[],'')webinspect.blacklist=[] webinspect.launch(self.ABFblock,'')webinspect.blacklist=[] webinspect.launch(self.ABFreader,'')headerFile=r\"\"headerHTML(self.header,headerFile)", "docstring": "Generate HTML containing information about NeoIO objects.\nThis is useful when trying to figure out how to extract data from ABFs.", "id": "f11350:c0:m18"} {"signature": "def proto_unknown(theABF):", "body": "abf=ABF(theABF)abf.log.info(\"\")plot=ABFplot(abf)plot.rainbow=Falseplot.title=Noneplot.figure_height,plot.figure_width=SQUARESIZE,SQUARESIZEplot.kwargs[\"\"]=plot.figure_chronological()plt.gca().set_axis_bgcolor('') frameAndSave(abf,\"\")", "docstring": "protocol: unknown.", "id": "f11351:m0"} {"signature": "def proto_0111(theABF):", "body": "abf=ABF(theABF)abf.log.info(\"\")ap=AP(abf)ap.detect()abf.derivative=Trueplt.figure(figsize=(SQUARESIZE,SQUARESIZE))ax1=plt.subplot()plt.ylabel(abf.units2)ax2=plt.subplot(,sharey=ax1)ax3=plt.subplot()plt.ylabel(abf.unitsD2)ax4=plt.subplot(,sharey=ax3)for sweep in range(abf.sweeps):abf.setsweep(sweep)ax1.plot(abf.sweepX,abf.sweepY,color='',lw=)ax2.plot(abf.sweepX,abf.sweepY,color='')ax3.plot(abf.sweepX,abf.sweepD,color='',lw=)ax4.plot(abf.sweepX,abf.sweepD,color='')for ax in [ax1,ax2,ax3,ax4]: ax.margins(,)ax.grid(alpha=)for ax in [ax3,ax4]: ax.axhline(-,color='',alpha=,ls=\"\",lw=)for ax in [ax2,ax4]: ax.get_yaxis().set_visible(False)if len(ap.APs):firstAP=ap.APs[][\"\"]ax2.axis([firstAP-,firstAP+,None,None])ax4.axis([firstAP-,firstAP+,None,None])if len(ap.APs):firstAP=ap.APs[]msg=\"\".join([\"\"%(x,str(firstAP[x])) for x in sorted(firstAP.keys()) if not \"\" in x[-:]])plt.subplot()plt.gca().text(, , msg, transform= plt.gca().transAxes, fontsize=, verticalalignment='', family='')plt.tight_layout()frameAndSave(abf,\"\")plt.close('')", "docstring": "protocol: IC ramp for AP shape analysis.", "id": "f11351:m2"} {"signature": "def proto_gain(theABF,stepSize=,startAt=-):", "body": "abf=ABF(theABF)abf.log.info(\"\")plot=ABFplot(abf)plot.kwargs[\"\"]=plot.title=\"\"currents=np.arange(abf.sweeps)*stepSize-startAtap=AP(abf)ap.detect_time1=ap.detect_time2=ap.detect()plt.figure(figsize=(SQUARESIZE,SQUARESIZE))ax1=plt.subplot()plot.figure_sweeps()ax2=plt.subplot()ax2.get_yaxis().set_visible(False)plot.figure_sweeps(offsetY=)for ax in [ax1,ax2]:for limit in [ap.detect_time1,ap.detect_time2]:ax.axvline(limit,color='',ls='',alpha=,lw=)ax4=plt.subplot()plt.ylabel(\"\")plt.ylabel(\"\")plt.grid(alpha=)freqs=ap.get_bySweep(\"\")times=ap.get_bySweep(\"\")for i in range(abf.sweeps):if len(freqs[i]):plt.plot(times[i][:-],freqs[i],'',alpha=,lw=,color=plot.getColor(i/abf.sweeps))ax4=plt.subplot()ax4.grid(alpha=)plt.plot(currents,ap.get_bySweep(\"\"),'',label=\"\")plt.plot(currents,ap.get_bySweep(\"\"),'',label=\"\")plt.xlabel(\"\")plt.legend(loc=,fontsize=)plt.axhline(,color='',alpha=,ls=\"\",lw=)plt.margins(,)plt.tight_layout()frameAndSave(abf,\"\"%(startAt,stepSize))plt.close('')plt.figure(figsize=(SQUARESIZE,SQUARESIZE))plt.grid(alpha=)plt.ylabel(\"\")plt.xlabel(\"\")for sweep in abf.setsweeps():plt.plot(abf.sweepX2,abf.sweepY,color='',alpha=)if np.max(abf.sweepY>):breakplt.tight_layout()plt.margins(,)plt.axis([,,None,None])plt.title(\"\"%stepSize)frameAndSave(abf,\"\",closeWhenDone=False)plt.axis([,,None,None])plt.title(\"\"%(stepSize,startAt))frameAndSave(abf,\"\",closeWhenDone=False)plt.close('')", "docstring": "protocol: gain function of some sort. step size and start at are pA.", "id": "f11351:m3"} {"signature": "def proto_0201(theABF):", "body": "abf=ABF(theABF)abf.log.info(\"\")plot=ABFplot(abf)plot.figure_height,plot.figure_width=SQUARESIZE/,SQUARESIZE/plot.figure_sweeps()plt.tight_layout()frameAndSave(abf,\"\")plt.close('')", "docstring": "protocol: membrane test.", "id": "f11351:m7"} {"signature": "def proto_0202(theABF):", "body": "abf=ABF(theABF)abf.log.info(\"\")plot=ABFplot(abf)plot.figure_height,plot.figure_width=SQUARESIZE,SQUARESIZEplot.title=\"\"plot.kwargs[\"\"]=plot.figure_sweeps()abf.setsweep()plt.axis([None,None,abf.average(,)-,None])abf.setsweep(-)plt.axis([None,None,None,abf.average(,)+])plt.tight_layout()frameAndSave(abf,\"\")plt.close('')", "docstring": "protocol: MTIV.", "id": "f11351:m8"} {"signature": "def proto_0203(theABF):", "body": "abf=ABF(theABF)abf.log.info(\"\")plot=ABFplot(abf)plot.title=\"\"m1,m2=,plt.figure(figsize=(SQUARESIZE,SQUARESIZE/))plt.subplot()plot.figure_sweeps()plt.axvspan(m1,m2,color='',ec=None,alpha=)plt.subplot()plt.grid(alpha=)Xs=np.arange(abf.sweeps)*-Ys=[]for sweep in range(abf.sweeps):abf.setsweep(sweep)Ys.append(abf.average(m1,m2))plt.plot(Xs,Ys,'',ms=)plt.axvline(-,color='',ls='',lw=,alpha=)plt.axhline(,color='',ls='',lw=,alpha=)plt.margins(,)plt.xlabel(\"\")plt.tight_layout()frameAndSave(abf,\"\")plt.close('')", "docstring": "protocol: vast IV.", "id": "f11351:m9"} {"signature": "def proto_0204(theABF):", "body": "abf=ABF(theABF)abf.log.info(\"\")plot=ABFplot(abf)plot.figure_height,plot.figure_width=SQUARESIZE/,SQUARESIZE/plot.figure_sweeps()plt.tight_layout()frameAndSave(abf,\"\")plt.close('')", "docstring": "protocol: Cm ramp.", "id": "f11351:m10"} {"signature": "def proto_0222(theABF):", "body": "abf=ABF(theABF)abf.log.info(\"\")plot=ABFplot(abf)plot.figure_height,plot.figure_width=SQUARESIZE/,SQUARESIZE/plot.figure_sweeps()plt.tight_layout()frameAndSave(abf,\"\")plt.close('')", "docstring": "protocol: VC sine sweep.", "id": "f11351:m11"} {"signature": "def proto_0303(theABF):", "body": "abf=ABF(theABF)abf.log.info(\"\")proto_avgRange(theABF,,)plt.close('')plt.figure(figsize=(,))for sweep in abf.setsweeps():color=''if sweep in np.array(abf.comment_sweeps,dtype=int):color=''plt.plot(abf.sweepX2,abf.sweepY+*sweep,color=color,alpha=)plt.margins(,)plt.tight_layout()frameAndSave(abf,\"\")plt.close('')ap=AP(abf)ap.detect_time1=ap.detect_time2=ap.detect()apCount=[]apSweepTimes=[]for sweepNumber,times in enumerate(ap.get_bySweep(\"\")):apCount.append(len(times))if len(times):apSweepTimes.append(times[])else:apSweepTimes.append()plt.figure(figsize=(,))ax1=plt.subplot()plt.grid(alpha=,ls='')plt.plot(np.arange(len(apCount))*abf.sweepLength/,apCount,'',ms=)comment_lines(abf)plt.ylabel(\"\")plt.subplot(,sharex=ax1)plt.grid(alpha=,ls='')plt.plot(np.arange(len(apCount))*abf.sweepLength/,apSweepTimes,'',ms=)comment_lines(abf)plt.ylabel(\"\")plt.xlabel(\"\")plt.tight_layout()frameAndSave(abf,\"\")plt.close('')", "docstring": "protocol: repeated IC ramps.", "id": "f11351:m13"} {"signature": "def proto_0304(theABF):", "body": "abf=ABF(theABF)abf.log.info(\"\")ap=AP(abf)avgVoltagePerSweep = [];times = []for sweep in abf.setsweeps():avgVoltagePerSweep.append(abf.average(,))times.append(abf.sweepStart/)M1,M2=,ap.detect_time1, ap.detect_time2 = M1,M2ap.detect()apsPerSweepCos=[len(x) for x in ap.get_bySweep()]M1,M2=,ap.detect_time1, ap.detect_time2 = M1,M2ap.detect()apsPerSweepRamp=[len(x) for x in ap.get_bySweep()]plt.figure(figsize=(,))plt.subplot()plt.grid(ls='',alpha=)plt.plot(times,avgVoltagePerSweep,'')plt.ylabel(\"\")comment_lines(abf)plt.subplot()plt.grid(ls='',alpha=)plt.plot(times,apsPerSweepCos,'')plt.ylabel(\"\")comment_lines(abf)plt.subplot()plt.grid(ls='',alpha=)plt.plot(times,apsPerSweepRamp,'')plt.ylabel(\"\")comment_lines(abf)plt.tight_layout()frameAndSave(abf,\"\")plt.close('')", "docstring": "protocol: repeated IC steps.", "id": "f11351:m14"} {"signature": "def proto_avgRange(theABF,m1=None,m2=None):", "body": "abf=ABF(theABF)abf.log.info(\"\")if m1 is None:m1=abf.sweepLengthif m2 is None:m2=abf.sweepLengthI1=int(abf.pointsPerSec*m1)I2=int(abf.pointsPerSec*m2)Ts=np.arange(abf.sweeps)*abf.sweepIntervalYav=np.empty(abf.sweeps)*np.nan Ysd=np.empty(abf.sweeps)*np.nan for sweep in abf.setsweeps():Yav[sweep]=np.average(abf.sweepY[I1:I2])Ysd[sweep]=np.std(abf.sweepY[I1:I2])plot=ABFplot(abf)plt.figure(figsize=(SQUARESIZE*,SQUARESIZE/))plt.subplot()plot.title=\"\"plot.figure_sweep()plt.title(\"\")plt.axvspan(m1,m2,color='',ec=None,alpha=)plt.subplot()plt.grid(alpha=)for i,t in enumerate(abf.comment_times):plt.axvline(t/,color='',alpha=,lw=,ls='')plt.plot(Ts/,Yav,'',alpha=)plt.title(\"\"%(\"\".join(abf.comment_tags)))plt.ylabel(abf.units2)plt.xlabel(\"\")plt.margins(,)plt.subplot()plt.grid(alpha=)for i,t in enumerate(abf.comment_times):plt.axvline(t/,color='',alpha=,lw=,ls='')plt.plot(Ts/,Ysd,'',alpha=,color='',ms=,mew=)plt.title(\"\"%(\"\".join(abf.comment_tags)))plt.ylabel(abf.units2)plt.xlabel(\"\")plt.margins(,)plt.axis([None,None,,np.percentile(Ysd,)*])plt.tight_layout()frameAndSave(abf,\"\",\"\")plt.close('')", "docstring": "experiment: generic VC time course experiment.", "id": "f11351:m27"} {"signature": "def analyze(fname=False,save=True,show=None):", "body": "if fname and os.path.exists(fname.replace(\"\",\"\")):print(\"\")returnswhlab.plotting.core.IMAGE_SAVE=saveif show is None:if cm.isIpython():swhlab.plotting.core.IMAGE_SHOW=Trueelse:swhlab.plotting.core.IMAGE_SHOW=Falseabf=ABF(fname) print(\"\",abf.protocomment)runFunction=\"\"if \"\"+abf.protocomment in globals():runFunction=\"\"+abf.protocommentabf.log.debug(\"\"%(runFunction))plt.close('') globals()[runFunction](abf) try:globals()[runFunction](abf) except:abf.log.error(\"\")abf.log.error(sys.exc_info()[])return \"\"plt.close('') return \"\"", "docstring": "given a filename or ABF object, try to analyze it.", "id": "f11351:m28"} {"signature": "def __init__(self,abf):", "body": "self.log = logging.getLogger(\"\")self.log.setLevel(swhlab.loglevel)if abf in [None,False,'']:self.log.error(\"\",str(abf))returnif type(abf) is str:self.log.debug(\"\")abf=ABF(abf)self.abf=abfself.detect_over = self.detect_time1 = self.detect_time2 = abf.sweepLength self.APs=False", "docstring": "Load an ABF and get ready to do AP detection.\nAfter detect(), all AP data is stored as a list of dicts in AP.APs", "id": "f11352:c0:m0"} {"signature": "def ensureDetection(self):", "body": "if self.APs==False:self.log.debug(\"\")self.detect()", "docstring": "run this before analysis. Checks if event detection occured.\nIf not, runs AP detection on all sweeps.", "id": "f11352:c0:m2"} {"signature": "def detect(self):", "body": "self.log.info(\"\")t1=cm.timeit()for sweep in range(self.abf.sweeps):self.detectSweep(sweep)self.log.info(\"\",self.abf.sweeps,len(self.APs),cm.timeit(t1))", "docstring": "runs AP detection on every sweep.", "id": "f11352:c0:m3"} {"signature": "def detectSweep(self,sweep=):", "body": "if self.APs is False: self.APs=[] for i,ap in enumerate(self.APs):if ap[\"\"]==sweep:self.APs[i]=Noneif self.APs.count(None):self.log.debug(\"\",self.APs.count(None))while None in self.APs:self.APs.remove(None)self.log.debug(\"\",len(self.APs))self.abf.derivative=Trueself.abf.setsweep(sweep)Is = cm.where_cross(self.abf.sweepD,self.detect_over)self.log.debug(\"\"%len(Is))for i,I in enumerate(Is):if np.min(self.abf.sweepD[I:I+*self.abf.pointsPerMs])>-:Is[i]=Is=Is[np.nonzero(Is)]self.log.debug(\"\"%len(Is))for i,I in enumerate(Is):stepBack=while(self.abf.sweepD[I-stepBack])> and stepBack/self.abf.pointsPerMs<: stepBack+=Is[i]-=stepBacksweepAPs=[]for i,I in enumerate(Is):try:timeInSweep=I/self.abf.pointsPerSecif timeInSweepself.detect_time2:continue ap={} ap[\"\"]=sweep ap[\"\"]=I ap[\"\"]=I/self.abf.pointsPerSec ap[\"\"]=ap[\"\"]+self.abf.sweepInterval*sweep ap[\"\"]=self.abf.sweepY[I] chunk=self.abf.sweepD[I:I+*self.abf.pointsPerMs] I_toNegTen=np.where(chunk<-)[][]chunk=self.abf.sweepD[I+I_toNegTen:I+I_toNegTen+*self.abf.pointsPerMs] if not max(chunk)>-:self.log.debug(\"\"%ap[\"\"])self.log.error(\"\")continue I_recover=np.where(chunk>-)[][]+I_toNegTen+I ap[\"\"]=[I,I_recover] ap[\"\"]=(I_recover-I)/self.abf.pointsPerMs chunk=self.abf.sweepD[ap[\"\"][]:ap[\"\"][]]ap[\"\"]=np.max(chunk)ap[\"\"]=np.where(chunk==ap[\"\"])[][]+Iap[\"\"]=np.min(chunk)ap[\"\"]=np.where(chunk==ap[\"\"])[][]+Iif ap[\"\"]< or ap[\"\"]>-:self.log.debug(\"\")self.log.error(\"\")continuechunkSize=self.abf.pointsPerMs* if len(Is)->i and Is[i+]<(I+chunkSize): chunkSize=Is[i+]-I if chunkSize<(self.abf.pointsPerMs*):continue ap[\"\"]=[I,I+chunkSize] chunk=self.abf.sweepY[I:I+chunkSize]ap[\"\"]=np.max(chunk)ap[\"\"]=np.where(chunk==ap[\"\"])[][]+IchunkForMin=np.copy(chunk) chunkForMin[:ap[\"\"]-I]=np.inf ap[\"\"]=np.min(chunkForMin) ap[\"\"]=np.where(chunkForMin==ap[\"\"])[][]+Iif ap[\"\"]\"]:self.log.error(\"\")self.log.error(\"\") self.log.error(\"\")if (len(chunk))-((I+len(chunk))-ap[\"\"])<:self.log.error(\"\")self.log.error(\"\")self.log.error(\"\")ap[\"\"]=(ap[\"\"]-I)/self.abf.pointsPerMs ap[\"\"]=(ap[\"\"]-ap[\"\"])/self.abf.pointsPerMs ap[\"\"]=np.average([ap[\"\"],ap[\"\"]]) ap[\"\"]=cm.where_cross(chunk,ap[\"\"])[]+I ap[\"\"]=cm.where_cross(-chunk,-ap[\"\"])[]+I ap[\"\"]=(ap[\"\"]-ap[\"\"])/self.abf.pointsPerMs sweepAPs.extend([ap])except Exception as e:self.log.error(\"\",i,len(Is))self.log.error(cm.exceptionToString(e))self.log.debug(\"\",len(sweepAPs))self.APs.extend(sweepAPs)self.abf.derivative=False", "docstring": "perform AP detection on current sweep.", "id": "f11352:c0:m4"} {"signature": "def get_times(self):", "body": "self.ensureDetection()times=[]for ap in self.APs:times.append(ap[\"\"])return np.array(sorted(times))", "docstring": "return an array of times (in sec) of all APs.", "id": "f11352:c0:m5"} {"signature": "def get_bySweep(self,feature=\"\"):", "body": "self.ensureDetection()bySweepTimes=[[]]*self.abf.sweepsfor sweep in range(self.abf.sweeps):sweepTimes=[]for ap in self.APs:if ap[\"\"]==sweep:sweepTimes.append(ap[\"\"])bySweepTimes[sweep]=sweepTimesbySweepFreqs=[[]]*self.abf.sweepsfor i,times in enumerate(bySweepTimes):if len(times)<:continuediffs=np.array(times[:])-np.array(times[:-])bySweepFreqs[i]=np.array(/diffs).tolist()if feature == \"\":return bySweepFreqselif feature == \"\":result=np.zeros(self.abf.sweeps) for i,freqs in enumerate(bySweepFreqs):if len(freqs):result[i]=freqs[]return resultelif feature == \"\":return bySweepTimeselif feature == \"\":result=np.zeros(self.abf.sweeps) for i,times in enumerate(bySweepTimes):result[i]=len(bySweepTimes[i])return resultelif feature == \"\":result=np.zeros(self.abf.sweeps) for i,freqs in enumerate(bySweepFreqs):if len(freqs):result[i]=np.nanmean(freqs)return resultelif feature == \"\":result=np.zeros(self.abf.sweeps) for i,freqs in enumerate(bySweepFreqs):if len(freqs):result[i]=np.nanmedian(freqs)return resultelse:self.log.error(\"\",feature)return None", "docstring": "returns AP info by sweep arranged as a list (by sweep).\n\nfeature:\n * \"freqs\" - list of instantaneous frequencies by sweep.\n * \"firsts\" - list of first instantaneous frequency by sweep.\n * \"times\" - list of times of each AP in the sweep.\n * \"count\" - numer of APs per sweep.\n * \"average\" - average instanteous frequency per sweep.\n * \"median\" - median instanteous frequency per sweep.", "id": "f11352:c0:m6"} {"signature": "def processFolder(abfFolder):", "body": "if not type(abfFolder) is str or not len(abfFolder)>:returnfiles=sorted(glob.glob(abfFolder+\"\"))for i,fname in enumerate(files):print(\"\".format(i,len(files)),os.path.basename(fname))processAbf(fname,show=False)plt.show()return", "docstring": "call processAbf() for every ABF in a folder.", "id": "f11353:m0"} {"signature": "def processAbf(abfFname,saveAs=False,dpi=,show=True):", "body": "if not type(abfFname) is str or not len(abfFname)>:returnabf=swhlab.ABF(abfFname)plot=swhlab.plotting.ABFplot(abf)plot.figure_height=plot.figure_width=plot.subplot=Falseplot.figure(True)if abf.get_protocol_sequence()==abf.get_protocol_sequence() or abf.sweeps<:if abf.lengthMinutes<:ax1=plt.subplot()plot.figure_sweeps()plt.title(\"\".format(abf.ID,abf.sweeps))plt.gca().get_xaxis().set_visible(False)plt.subplot(,sharex=ax1)plot.figure_protocol()plt.title(\"\")else:plot.figure_chronological()else:plots=[,] if abf.units=='': ap=swhlab.AP(abf) ap.detect() if len(ap.APs): plots=[,,,] ax1=plt.subplot(plots[])plot.figure_sweeps()plt.title(\"\".format(abf.ID,abf.sweeps))plt.gca().get_xaxis().set_visible(False)plt.subplot(plots[],sharex=ax1)plot.figure_protocols()plt.title(\"\")if len(plots)>:ax2=plt.subplot(plots[])plot.rainbow=Falseplot.kwargs[\"\"]=''plot.figure_chronological()plt.gca().get_xaxis().set_visible(False)plt.title(\"\")plt.subplot(plots[],sharex=ax2)plot.abf.derivative=Trueplot.rainbow=Falseplot.traceColor=''plot.figure_chronological()plt.axis([ap.APs[][\"\"]-,ap.APs[][\"\"]+,None,None])plt.title(\"\")if saveAs:print(\"\",os.path.abspath(saveAs))plt.savefig(os.path.abspath(saveAs),dpi=dpi)returnif show:plot.show()", "docstring": "automatically generate a single representative image for an ABF.\nIf saveAs is given (full path of a jpg of png file), the image will be saved.\nOtherwise, the image will pop up in a matplotlib window.", "id": "f11353:m1"} {"signature": "def selectFile():", "body": "plt.close(\"\") print(\"\")processAbf(swhlab.common.gui_getFile())", "docstring": "launch an ABF file selector to determine what to glance at.", "id": "f11353:m2"} {"signature": "def selectFolder():", "body": "plt.close(\"\") processFolder(swhlab.common.gui_getFolder())", "docstring": "launch a folder selection dialog to glance at every ABF in a folder.", "id": "f11353:m3"} {"signature": "def drawPhasePlot(abf,m1=,m2=None):", "body": "if not m2:m2 = abf.sweepLengthcm = plt.get_cmap('')Y = abf.sweepYY = Y[int(abf.pointsPerSec*m1):int(abf.pointsPerSec*m2)]dY = (Y[:]-Y[:-])*abf.rate/ dY = np.append(dY,dY[-])Xs = np.arange(len(dY))/abf.pointsPerSecXs = Xs + Xs[-]*abf.sweepplt.subplot()plt.grid(alpha=)plt.plot(Xs,Y,lw=,color=cm(abf.sweep/abf.sweeps))plt.title(\"\")plt.ylabel(\"\")plt.xlabel(\"\")plt.margins(,)plt.subplot()plt.grid(alpha=)plt.plot(Xs,dY,lw=,color=cm(abf.sweep/abf.sweeps))plt.title(\"\")plt.ylabel(\"\")plt.xlabel(\"\")plt.margins(,)plt.subplot()plt.grid(alpha=)plt.plot(Y,dY,alpha=,lw=,color=cm(abf.sweep/abf.sweeps))plt.title(\"\")plt.ylabel(\"\")plt.xlabel(\"\")plt.margins(,)plt.tight_layout()", "docstring": "Given an ABF object (SWHLab), draw its phase plot of the current sweep.\nm1 and m2 are optional marks (in seconds) for plotting only a range of data.\nAssume a matplotlib figure is already open and just draw on top if it.", "id": "f11356:m0"} {"signature": "def figureStimulus(abf,sweeps=[]):", "body": "stimuli=[, ]for sweep in sweeps:abf.setsweep(sweep)for stimulus in stimuli:S1=int(abf.pointsPerSec*stimulus)S2=int(abf.pointsPerSec*(stimulus+)) abf.sweepY[S1:S2]=np.nan I1=int(abf.pointsPerSec*) I2=int(abf.pointsPerSec*) baseline=np.average(abf.sweepY[int(abf.pointsPerSec*):int(abf.pointsPerSec*)])Ys=lowPassFilter(abf.sweepY[I1:I2])-baselineXs=abf.sweepX2[I1:I1+len(Ys)].flatten()plt.plot(Xs,Ys,alpha=,lw=)return", "docstring": "Create a plot of one area of interest of a single sweep.", "id": "f11358:m2"} {"signature": "def kernel_gaussian(size=, sigma=None, forwardOnly=False):", "body": "if sigma is None:sigma=size/points=np.exp(-np.power(np.arange(size)-size/,)/(*np.power(sigma,)))if forwardOnly:points[:int(len(points)/)]=return points/sum(points)", "docstring": "return a 1d gassuan array of a given size and sigma.\nIf sigma isn't given, it will be 1/10 of the size, which is usually good.", "id": "f11361:m0"} {"signature": "def analyzeSweep(abf,sweep,m1=None,m2=None,plotToo=False):", "body": "abf.setsweep(sweep)if m1 is None: m1=else: m1=m1*abf.pointsPerSecif m2 is None: m2=-else: m2=m2*abf.pointsPerSecYorig=abf.sweepY[int(m1):int(m2)]X=np.arange(len(Yorig))/abf.pointsPerSecYlpf=linear_gaussian(Yorig,sigmaSize=abf.pointsPerMs*,forwardOnly=False)Yflat=Yorig-YlpfEPSCs,IPSCs=[],[]if plotToo:plt.figure(figsize=(,))ax1=plt.subplot()plt.title(\"\"%(abf.ID,sweep))plt.grid()plt.plot(X,Yorig,alpha=)plt.plot(X,Ylpf,'',alpha=,lw=)plt.margins(,)plt.subplot(,sharex=ax1)plt.title(\"\")plt.grid()plt.plot(X,Yflat,alpha=)plt.axhline(,color='',lw=,alpha=)plt.tight_layout()plt.show()hist, bin_edges = np.histogram(Yflat, density=True, bins=)peakPa=bin_edges[np.where(hist==max(hist))[][]+]if plotToo:plt.figure()plt.grid()plt.plot(bin_edges[:],hist,alpha=)plt.axvline(,color='')plt.axvline(peakPa,color='',ls='',lw=,alpha=)plt.semilogy()plt.title(\"\")plt.ylabel(\"\")plt.xlabel(\"\")plt.show()return peakPa", "docstring": "m1 and m2, if given, are in seconds.\nreturns [# EPSCs, # IPSCs]", "id": "f11361:m10"} {"signature": "def kernel_gaussian(size=, sigma=None, forwardOnly=False):", "body": "if sigma is None:sigma=size/points=np.exp(-np.power(np.arange(size)-size/,)/(*np.power(sigma,)))if forwardOnly:points[:int(len(points)/)]=return points/sum(points)", "docstring": "return a 1d gassuan array of a given size and sigma.\nIf sigma isn't given, it will be 1/10 of the size, which is usually good.", "id": "f11366:m0"} {"signature": "def analyzeSweep(abf,sweep,m1=None,m2=None,plotToo=False):", "body": "abf.setsweep(sweep)if m1 is None: m1=else: m1=m1*abf.pointsPerSecif m2 is None: m2=-else: m2=m2*abf.pointsPerSecYorig=abf.sweepY[int(m1):int(m2)]X=np.arange(len(Yorig))/abf.pointsPerSecKlpf=kernel_gaussian(size=abf.pointsPerMs*,forwardOnly=True)Ylpf=np.convolve(Yorig,Klpf,mode='')Y=Ylpf Kmb=kernel_gaussian(size=abf.pointsPerMs*,forwardOnly=True)Ymb=np.convolve(Yorig,Kmb,mode='')Y=Yorig-Ymb thresh= hitPos=np.where(Y>thresh)[] hitNeg=np.where(Y<-thresh)[] hitPos=np.concatenate((hitPos,[len(Y)-])) hitNeg=np.concatenate((hitNeg,[len(Y)-])) hitsPos=hitPos[np.where(np.abs(np.diff(hitPos))>)[]] hitsNeg=hitNeg[np.where(np.abs(np.diff(hitNeg))>)[]] hitsNeg=hitsNeg[:] if plotToo:plt.figure(figsize=(,))ax1=plt.subplot()plt.title(\"\"%(sweep,len(hitsPos),len(hitsNeg)))plt.ylabel(\"\")plt.grid()plt.plot(X,Yorig,color='',alpha=)for hit in hitsPos:plt.plot(X[hit],Yorig[hit]+,'',ms=,alpha=)for hit in hitsNeg:plt.plot(X[hit],Yorig[hit]-,'',ms=,alpha=)plt.margins(,)plt.subplot(,sharex=ax1)plt.title(\"\")plt.ylabel(\"\")plt.grid()plt.axhline(thresh,color='',ls='',alpha=,lw=)plt.axhline(-thresh,color='',ls='',alpha=,lw=)plt.plot(X,Y,color='',alpha=)plt.axis([X[],X[-],-thresh*,thresh*])plt.tight_layout()if type(plotToo) is str and os.path.isdir(plotToo):print(''%(plotToo,sweep))plt.savefig(plotToo+\"\"%sweep)else:plt.show()plt.close('')return [len(hitsPos),len(hitsNeg)]", "docstring": "m1 and m2, if given, are in seconds.\nreturns [# EPSCs, # IPSCs]", "id": "f11366:m7"} {"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=,quietPercentile=,histResolution=):", "body": "m1= if m1 is None else m1*self.pointsPerSecm2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSecm1,m2=int(m1),int(m2)padding= chunkPoints=int(chunkMs*self.pointsPerMs)histBins=int((padding*)/histResolution)Y=self.sweepY[m1:m2]hist,bins=np.histogram(Y,bins=*padding)Yoffset=bins[np.where(hist==max(hist))[][]]Y=Y-Yoffset nChunks=int(len(Y)/chunkPoints)hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))variances=np.ptp(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*blData=chunks[np.where(percentiles<=quietPercentile)[]].flatten()blHist,blBins=np.histogram(blData,bins=histBins,range=(-padding,padding))blHist=blHist/max(blHist)*max(hist)diff=hist-blHistreturn diff/abf.pointsPerSec", "docstring": "chunkMs should be ~50 ms or greater.\nbin sizes must be equal to or multiples of the data resolution.\ntransients smaller than the expected RMS will be silenced.", "id": "f11378:c0:m0"} {"signature": "def quietParts(data,percentile=):", "body": "nChunks=int(len(Y)/CHUNK_POINTS)chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))variances=np.var(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*selected=chunks[np.where(percentiles<=percentile)[]].flatten()return selected", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.\nReturns data where the variance for its chunk size is below the given percentile.\nCHUNK_POINTS should be adjusted so it's about 10ms of data.", "id": "f11379:m0"} {"signature": "def quietParts(data,percentile=):", "body": "nChunks=int(len(Y)/CHUNK_POINTS)chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))variances=np.var(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*selected=chunks[np.where(percentiles<=percentile)[]].flatten()return selected", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.\nReturns data where the variance for its chunk size is below the given percentile.\nCHUNK_POINTS should be adjusted so it's about 10ms of data.", "id": "f11381:m0"} {"signature": "def plot_shaded_data(X,Y,variances,varianceX):", "body": "plt.plot(X,Y,color='',lw=)nChunks=int(len(Y)/CHUNK_POINTS)for i in range(,,PERCENT_STEP):varLimitLow=np.percentile(variances,i)varLimitHigh=np.percentile(variances,i+PERCENT_STEP)varianceIsAboveMin=np.where(variances>=varLimitLow)[]varianceIsBelowMax=np.where(variances<=varLimitHigh)[]varianceIsRange=[chunkNumber for chunkNumber in range(nChunks)if chunkNumber in varianceIsAboveMinand chunkNumber in varianceIsBelowMax]for chunkNumber in varianceIsRange:t1=chunkNumber*CHUNK_POINTS/POINTS_PER_SECt2=t1+CHUNK_POINTS/POINTS_PER_SECplt.axvspan(t1,t2,alpha=,color=COLORMAP(i/),lw=)", "docstring": "plot X and Y data, then shade its background by variance.", "id": "f11382:m0"} {"signature": "def show_variances(Y,variances,varianceX,logScale=False):", "body": "plt.figure(,figsize=(,))plt.figure(,figsize=(,))varSorted=sorted(variances)plt.figure()plt.subplot()plt.grid()plt.title(\"\")plt.ylabel(\"\")plot_shaded_data(X,Y,variances,varianceX)plt.margins(,) plt.subplot()plt.ylabel(\"\"%str(logScale))plt.xlabel(\"\")plt.plot(varianceX,variances,'',lw=)plt.figure()plt.ylabel(\"\"%str(logScale))plt.xlabel(\"\")plt.title(\"\")plt.plot(varSorted,'',lw=)for i in range(,,PERCENT_STEP):varLimitLow=np.percentile(variances,i)varLimitHigh=np.percentile(variances,i+PERCENT_STEP)label=\"\"%(i,i++PERCENT_STEP)color=COLORMAP(i/)print(\"\"%(label,varLimitLow,varLimitHigh))plt.figure()plt.axhspan(varLimitLow,varLimitHigh,alpha=,lw=,color=color,label=label)plt.figure()chunkLow=np.where(varSorted>=varLimitLow)[][]chunkHigh=np.where(varSorted>=varLimitHigh)[][]plt.axvspan(chunkLow,chunkHigh,alpha=,lw=,color=color,label=label)for fignum in [,]:plt.figure(fignum)if logScale:plt.semilogy()plt.margins(,)plt.grid()if fignum is :plt.legend(fontsize=,loc='',shadow=True)plt.tight_layout()plt.savefig(''%(fignum,str(logScale)))plt.show()", "docstring": "create some fancy graphs to show color-coded variances.", "id": "f11382:m1"} {"signature": "def quietParts():", "body": "", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.", "id": "f11382:m3"} {"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=,quietPercentile=,histResolution=,plotToo=False,rmsExpected=):", "body": "m1= if m1 is None else m1*self.pointsPerSecm2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSecm1,m2=int(m1),int(m2)padding= chunkPoints=int(chunkMs*self.pointsPerMs)histBins=int((padding*)/histResolution)Y=self.sweepY[m1:m2]hist,bins=np.histogram(Y,bins=*padding)Yoffset=bins[np.where(hist==max(hist))[][]]Y=Y-Yoffset nChunks=int(len(Y)/chunkPoints)hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))hist=hist/len(Y) Xs=bins[:]chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))variances=np.var(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*blData=chunks[np.where(percentiles<=quietPercentile)[]].flatten()sigma=np.sqrt(np.var(blData))center=np.average(blData)+histResolution/blCurve=mlab.normpdf(Xs,center,sigma)blCurve=blCurve*max(hist)/max(blCurve)diff=hist-blCurveignrCenter=len(Xs)/ignrPad=rmsExpected/histResolutionignr1,ignt2=int(ignrCenter-ignrPad),int(ignrCenter+ignrPad)diff[ignr1:ignt2]=return diff/len(Y)*abf.pointsPerSec", "docstring": "chunkMs should be ~50 ms or greater.\nbin sizes must be equal to or multiples of the data resolution.\ntransients smaller than the expected RMS will be silenced.", "id": "f11387:c0:m0"} {"signature": "def quietParts(data,percentile=):", "body": "nChunks=int(len(Y)/CHUNK_POINTS)chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))variances=np.var(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*selected=chunks[np.where(percentiles<=percentile)[]].flatten()return selected", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.\nReturns data where the variance for its chunk size is below the given percentile.\nCHUNK_POINTS should be adjusted so it's about 10ms of data.", "id": "f11388:m0"} {"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=,quietPercentile=,histResolution=,plotToo=False):", "body": "m1= if m1 is None else m1*self.pointsPerSecm2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSecm1,m2=int(m1),int(m2)padding= chunkPoints=int(chunkMs*self.pointsPerMs)histBins=int((padding*)/histResolution)Y=self.sweepY[m1:m2]hist,bins=np.histogram(Y,bins=*padding)Yoffset=bins[np.where(hist==max(hist))[][]]Y=Y-Yoffset nChunks=int(len(Y)/chunkPoints)hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))Xs=bins[:]chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))variances=np.var(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*blData=chunks[np.where(percentiles<=quietPercentile)[]].flatten()sigma=np.sqrt(np.var(blData))center=np.average(blData)+histResolution/blCurve=mlab.normpdf(Xs,center,sigma)blCurve=blCurve*max(hist)/max(blCurve)diff=hist-blCurveif plotToo:plt.figure(figsize=(,))plt.plot(Y)plt.figure(figsize=(,))ax1=plt.subplot()plt.title(abf.ID+\"\")plt.ylabel(\"\")plt.plot(Xs,hist,'',alpha=,color='',lw=)plt.plot(Xs,blCurve,lw=,alpha=,color='')plt.margins(,)plt.subplot(,sharex=ax1)plt.title(\"\")plt.ylabel(\"\")plt.xlabel(\"\"%abf.units)plt.plot(Xs,diff,'',alpha=,color='',lw=)plt.axhline(,lw=,alpha=,color='')plt.axvline(,lw=,alpha=,color='')plt.margins(,)plt.axis([-,,None,None])plt.tight_layout()plt.show()return [Xs,diff]", "docstring": "let's keep the chunkMs as high as we reasonably can. 50ms is good.\nThings get flakey at lower numbers like 10ms.\n\nIMPORTANT! for this to work, prevent 0s from averaging in, so keep\nbin sizes well above the data resolution.", "id": "f11390:c0:m0"} {"signature": "def quietParts(data,percentile=):", "body": "nChunks=int(len(Y)/CHUNK_POINTS)chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))variances=np.var(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*selected=chunks[np.where(percentiles<=percentile)[]].flatten()return selected", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.\nReturns data where the variance for its chunk size is below the given percentile.\nCHUNK_POINTS should be adjusted so it's about 10ms of data.", "id": "f11391:m0"} {"signature": "def ndist(data,Xs):", "body": "sigma=np.sqrt(np.var(data))center=np.average(data)curve=mlab.normpdf(Xs,center,sigma)curve*=len(data)*HIST_RESOLUTIONreturn curve", "docstring": "given some data and a list of X posistions, return the normal\ndistribution curve as a Y point at each of those Xs.", "id": "f11391:m1"} {"signature": "def quietParts(data,percentile=):", "body": "nChunks=int(len(Y)/CHUNK_POINTS)chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))variances=np.var(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*selected=chunks[np.where(percentiles<=percentile)[]].flatten()return selected", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.\nReturns data where the variance for its chunk size is below the given percentile.\nCHUNK_POINTS should be adjusted so it's about 10ms of data.", "id": "f11392:m0"} {"signature": "def ndist(data,Xs):", "body": "sigma=np.sqrt(np.var(data))center=np.average(data)curve=mlab.normpdf(Xs,center,sigma)curve*=len(data)*HIST_RESOLUTIONreturn curve", "docstring": "given some data and a list of X posistions, return the normal\ndistribution curve as a Y point at each of those Xs.", "id": "f11392:m1"} {"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=,quietPercentile=,histResolution=):", "body": "m1= if m1 is None else m1*self.pointsPerSecm2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSecm1,m2=int(m1),int(m2)padding= chunkPoints=int(chunkMs*self.pointsPerMs)histBins=int((padding*)/histResolution)Y=self.sweepYfilteredHisto()[m1:m2]hist,bins=np.histogram(Y,bins=*padding)Yoffset=bins[np.where(hist==max(hist))[][]]Y=Y-Yoffset nChunks=int(len(Y)/chunkPoints)hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))variances=np.ptp(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*blData=chunks[np.where(percentiles<=quietPercentile)[]].flatten()blHist,blBins=np.histogram(blData,bins=histBins,range=(-padding,padding))blHist=blHist/max(blHist)*max(hist)diff=hist-blHistreturn diff/abf.pointsPerSec", "docstring": "chunkMs should be ~50 ms or greater.\nbin sizes must be equal to or multiples of the data resolution.\ntransients smaller than the expected RMS will be silenced.", "id": "f11393:c0:m1"} {"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=,quietPercentile=,histResolution=,plotToo=False):", "body": "m1= if m1 is None else m1*self.pointsPerSecm2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSecm1,m2=int(m1),int(m2)padding= chunkPoints=int(chunkMs*self.pointsPerMs)histBins=int((padding*)/histResolution)Y=self.sweepY[m1:m2]hist,bins=np.histogram(Y,bins=*padding)Yoffset=bins[np.where(hist==max(hist))[][]]Y=Y-Yoffset nChunks=int(len(Y)/chunkPoints)hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))hist=hist/len(Y) Xs=bins[:]chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))variances=np.var(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*blData=chunks[np.where(percentiles<=quietPercentile)[]].flatten()sigma=np.sqrt(np.var(blData))center=np.average(blData)+histResolution/blCurve=mlab.normpdf(Xs,center,sigma)blCurve=blCurve*max(hist)/max(blCurve)diff=histIGNORE_DISTANCE= ignrCenter=len(Xs)/ignrPad=IGNORE_DISTANCE/histResolutionignr1,ignt2=int(ignrCenter-ignrPad),int(ignrCenter+ignrPad)diff[ignr1:ignt2]=if plotToo:plt.figure(figsize=(,))plt.plot(Y)plt.figure(figsize=(,))ax1=plt.subplot()plt.title(abf.ID+\"\")plt.ylabel(\"\")plt.plot(Xs,hist,'',alpha=,color='',lw=)plt.plot(Xs,blCurve,lw=,alpha=,color='')plt.margins(,)plt.subplot(,sharex=ax1)plt.title(\"\")plt.ylabel(\"\")plt.xlabel(\"\"%abf.units)plt.plot(Xs,diff,'',alpha=,color='',lw=)plt.axhline(,lw=,alpha=,color='')plt.axvline(,lw=,alpha=,color='')plt.margins(,)plt.axis([-,,None,None])plt.tight_layout()plt.show()print(np.sum(np.split(diff,),))return diff/len(Y)*abf.pointsPerSec", "docstring": "let's keep the chunkMs as high as we reasonably can. 50ms is good.\nThings get flakey at lower numbers like 10ms.\n\nIMPORTANT! for this to work, prevent 0s from averaging in, so keep\nbin sizes well above the data resolution.", "id": "f11394:c0:m0"} {"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=,quietPercentile=,histResolution=):", "body": "m1= if m1 is None else m1*self.pointsPerSecm2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSecm1,m2=int(m1),int(m2)padding= chunkPoints=int(chunkMs*self.pointsPerMs)histBins=int((padding*)/histResolution)Y=self.sweepYfilteredHisto()[m1:m2]hist,bins=np.histogram(Y,bins=*padding)nChunks=int(len(Y)/chunkPoints)hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))variances=np.ptp(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*blData=chunks[np.where(percentiles<=quietPercentile)[]].flatten()blHist,blBins=np.histogram(blData,bins=histBins,range=(-padding,padding))blHist=blHist/max(blHist)*max(hist)diff=hist-blHistreturn diff/abf.pointsPerSec", "docstring": "chunkMs should be ~50 ms or greater.\nbin sizes must be equal to or multiples of the data resolution.\ntransients smaller than the expected RMS will be silenced.", "id": "f11395:c0:m1"} {"signature": "def quietParts(data,percentile=):", "body": "nChunks=int(len(Y)/CHUNK_POINTS)chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))variances=np.var(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*selected=chunks[np.where(percentiles<=percentile)[]].flatten()return selected", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.\nReturns data where the variance for its chunk size is below the given percentile.\nCHUNK_POINTS should be adjusted so it's about 10ms of data.", "id": "f11396:m0"} {"signature": "def ndist(data,Xs):", "body": "sigma=np.sqrt(np.var(data))center=np.average(data)curve=mlab.normpdf(Xs,center,sigma)curve*=len(data)*HIST_RESOLUTIONreturn curve", "docstring": "given some data and a list of X posistions, return the normal\ndistribution curve as a Y point at each of those Xs.", "id": "f11396:m1"} {"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=,quietPercentile=,histResolution=,plotToo=False):", "body": "m1= if m1 is None else m1*self.pointsPerSecm2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSecm1,m2=int(m1),int(m2)self.kernel=self.kernel_gaussian()Y=self.sweepYsmartbase()padding= chunkPoints=int(chunkMs*self.pointsPerMs)nChunks=int(len(Y)/chunkPoints)histBins=int((padding*)/histResolution)hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))hist=hist.astype(np.float)chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))variances=np.var(chunks,axis=)percentiles=np.empty(len(variances))for i,variance in enumerate(variances):percentiles[i]=sorted(variances).index(variance)/len(variances)*blData=chunks[np.where(percentiles<=quietPercentile)[]].flatten()sigma=np.sqrt(np.var(blData))center=np.average(blData)blCurve=mlab.normpdf(bins[:-],center,sigma)blCurve=blCurve/max(blCurve)blCurve=blCurve*max(hist)blankPoints=int(*sigma/histResolution)centerI=int(len(hist)/)for i in range(blankPoints):hist[centerI-i]=np.nanhist[centerI+i]=np.nandiff=hist-blCurveif plotToo:plt.figure(figsize=(,))plt.subplot()plt.title(abf.ID+\"\")plt.ylabel(\"\")plt.plot(bins[:-],hist,'',alpha=)plt.plot(bins[:-],blCurve,lw=,alpha=,color='')plt.margins(,)plt.subplot()plt.title(\"\")plt.ylabel(\"\")plt.xlabel(\"\"%abf.units)plt.plot(bins[:-],diff,'',alpha=,color='')plt.axhline(,lw=,alpha=,color='')plt.axvline(,lw=,alpha=,color='')plt.margins(,)plt.tight_layout()plt.show()", "docstring": "IMPORTANT: do this first!!\nself.kernel=self.kernel_gaussian(250)", "id": "f11397:c0:m0"} {"signature": "def plotAllSweeps(abfFile):", "body": "r = io.AxonIO(filename=abfFile)bl = r.read_block(lazy=False, cascade=True) print(abfFile+\"\"%len(bl.segments))plt.figure(figsize=(,))plt.title(abfFile)for sweep in range(len(bl.segments)):trace = bl.segments[sweep].analogsignals[]plt.plot(trace.times-trace.times[],trace.magnitude,alpha=) plt.ylabel(trace.dimensionality)plt.xlabel(\"\")plt.show()plt.close()", "docstring": "simple example how to load an ABF file and plot every sweep.", "id": "f11402:m0"} {"signature": "def fread(f,byteLocation,structFormat=None,nBytes=):", "body": "f.seek(byteLocation)if structFormat:val = struct.unpack(structFormat, f.read(struct.calcsize(structFormat)))val = val[] if len(val)== else list(val)return valelse:return f.read(nBytes)", "docstring": "Given an already-open (rb mode) file object, return a certain number of bytes at a specific location.\nIf a struct format is given, calculate the number of bytes required and return the object it represents.", "id": "f11403:m0"} {"signature": "def abf_read_header(fname, saveHeader=True):", "body": "header={} sections={} strings=[] protocol = {} tags=[] adcs=[] dacs=[] digitalOutputs=[] config={} f=open(fname,'')config[\"\"]=os.path.abspath(fname) config[\"\"]=os.path.basename(fname)[:-] for key, byte_location, fmt in headerDescriptionV2:header[key]=fread(f,byte_location,fmt)header['']=header[''].decode()for sectionNumber, sectionName in enumerate(sectionNames):uBlockIndex, uBytes, llNumEntries = fread(f,+sectionNumber*,\"\")sections[sectionName] = [uBlockIndex,uBytes,llNumEntries]config[\"\"]=header['']//byte_location = sections[''][]*BLOCKSIZEstring_size = sections[''][]strings_data = fread(f,byte_location,structFormat=None,nBytes=string_size)for key in [b'', b'', b'', b'', b'']:if key in strings_data:for line in strings_data.split(key)[].split(b'')[:-]:strings.append(line.decode())config[\"\"]=strings[]config[\"\"]=strings[]config[\"\"]=strings[::]config[\"\"]=strings[::]breakfor ADCsection in range(sections[''][]):thisADC={}byte_location=sections[''][]*BLOCKSIZE+sections[''][]*ADCsectionfor key, fmt in ADCInfoDescription:thisADC[key]=fread(f,byte_location,fmt)byte_location+=struct.calcsize(fmt)adcs.append(thisADC)byte_location=sections[''][]*BLOCKSIZEfor key, fmt in protocolInfoDescription:protocol[key]=fread(f,byte_location,fmt)byte_location+=struct.calcsize(fmt)protocol.pop('', None) byte_location=sections[''][]*BLOCKSIZEfor i in range(sections[''][]):thisTag=[]for key, fmt in TagInfoDescription:val=fread(f,byte_location,fmt)if type(val) is bytes:val=val.decode().strip()thisTag.append(val)byte_location+=struct.calcsize(fmt)tags.append(thisTag)for dacNumber in range(sections[''][]):thisDAC={}byte_location=sections[''][]*BLOCKSIZE+sections[''][]*dacNumber for key, fmt in DACInfoDescription:thisDAC[key]=fread(f,byte_location,fmt)byte_location+=struct.calcsize(fmt)thisDAC.pop('', None) if thisDAC['']==: continue dacs.append(thisDAC)epochs=[]for epochNumber in range(sections[''][]):thisEpoch={}byte_location=sections[''][]*BLOCKSIZE+sections[''][]*epochNumberfor key, fmt in EpochInfoPerDACDescription:thisEpoch[key]=fread(f,byte_location,fmt)byte_location+=struct.calcsize(fmt)thisEpoch.pop('', None) epochs.append(thisEpoch)byte_location=sections[''][]*BLOCKSIZEfor epochNumber in range(sections[''][]):if epochNumber>=len(epochs):break thisEpoch=epochs[epochNumber]for key, fmt in EpochSectionDescription:val=fread(f,byte_location,fmt)if key=='':val=format(val, '').rjust(,'') thisEpoch[key]=valbyte_location+=struct.calcsize(fmt)thisEpoch.pop('', None) epochs[epochNumber]=thisEpochf.close()config[\"\"]=float(\"\".join([str(x) for x in header['']]))/ config['']=config[''][:len(adcs)] config['']=config[''][:len(adcs)] config['']=[x[:] for x in tags]config['']=sections[''][]YY = int(header[''] / )MM = int((header[''] - YY * ) / )DD = int(header[''] - YY * - MM * )hh = int(header[''] / / )mm = int((header[''] / - hh * ) / )ss = header[''] / - hh * - mm * ms = int((ss%)*)ss = int(ss)config[''] = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)", "docstring": "Practice pulling data straight out of an ABF's binary header. Support only ABF2 (ClampEx an ClampFit 10).\nUse only native python libraries. Strive for simplicity and readability (to promote language portability).\nThis was made by Scott Harden after a line-by-line analysis of axonrawio.py from the neo io library.\nUnlike NeoIO's format, I'm going to try to prevent nested dictionaries to keep things simple.", "id": "f11403:m1"} {"signature": "def detect(abf,sweep=None,threshold_upslope=,dT=,saveToo=True):", "body": "if type(sweep) is int:sweeps=[sweep]else:sweeps=list(range(abf.sweeps))timeStart=time.clock()abf.APs=[None]*abf.sweepsabf.SAP=[None]*abf.sweepsfor sweep in sweeps:abf.setSweep(sweep)Y=abf.dataYdI = int(dT/*abf.rate) dY = (Y[dI:]-Y[:-dI])*(abf.rate//dI) Is = cm.where_cross(dY,threshold_upslope) abf.APs[sweep]=[]for i in range(len(Is)): try:AP=analyzeAP(Y,dY,Is[i],abf.rate) if AP:AP[\"\"]=sweepAP[\"\"]=sweep*abf.sweepInterval*abf.rate*+AP[\"\"]AP[\"\"]=sweep*abf.sweepInterval+AP[\"\"]AP[\"\"]=np.nan if len(abf.APs[sweep]):AP[\"\"]=/(AP[\"\"]-abf.APs[sweep][-][\"\"])if AP[\"\"] is np.nan or AP[\"\"]<: abf.APs[sweep].append(AP)except:print(\"\"%(i+,len(Is)))analyzeAPgroup(abf) abf.APs=cm.matrixfromDicts(abf.APs)abf.SAP=cm.matrixfromDicts(abf.SAP)print(\"\"%(len(cm.dictFlat(abf.APs)),(time.clock()-timeStart)*))if saveToo:abf.saveThing(abf.APs,\"\")abf.saveThing(abf.SAP,\"\")", "docstring": "An AP will be detected by a upslope that exceeds 50V/s. Analyzed too.\n if type(sweep) is int, graph int(sweep)\n if sweep==None, process all sweeps sweep.", "id": "f11404:m0"} {"signature": "def analyzeAPgroup(abf=exampleABF,T1=None,T2=None,plotToo=False):", "body": "if T1 is None or T2 is None:if len(abf.protoSeqX)>:T1=abf.protoSeqX[]/abf.rateT2=abf.protoSeqX[]/abf.rateelse:T1=T2=abf.sweepLengths={} s[\"\"]=abf.currentSweeps[\"\"]=abf.protoSeqY[]APs=[]for key in ['','']:s[key]=for AP in abf.APs[abf.currentSweep]:if T1\"]APs.append(AP)s[\"\"]=len(APs) apTimes=cm.dictVals(APs,'')if len(APs)>: s[\"\"]=np.average(apTimes)-T1 s[\"\"]=s[\"\"]/(T2-T1)* s[\"\"]=np.average(apTimes)-APs[][\"\"] s[\"\"]=s[\"\"]/(APs[-][\"\"]-APs[][\"\"])* s[\"\"]=(APs[][\"\"]-T1)* s[\"\"]=APs[][''] s[\"\"]=cm.dictAvg(APs[:],'')[] s[\"\"]=APs[-][''] s[\"\"]=cm.dictAvg(APs,'')[] s[\"\"]=len(APs)/(T2-T1) s[\"\"]=cm.dictAvg(APs[-int(len(APs)*):],'')[] s[\"\"]=s[\"\"]/s[\"\"] s[\"\"]=s[\"\"]/s[\"\"] s[\"\"]=s[\"\"]/s[\"\"] s[\"\"]=s[\"\"]/s[\"\"] s[\"\"]=cm.dictAvg(APs,'')[]/cm.dictAvg(APs,'')[] s[\"\"]=T1s[\"\"]=T2abf.SAP[abf.currentSweep]=s", "docstring": "On the current (setSweep()) sweep, calculate things like accomodation.\nOnly call directly just for demonstrating how it works by making a graph.\nOr call this if you want really custom T1 and T2 (multiple per sweep)\n This is called by default with default T1 and T2.\n Manually call it again for custom.", "id": "f11404:m1"} {"signature": "def check_AP_group(abf=exampleABF,sweep=):", "body": "abf.setSweep(sweep)swhlab.plot.new(abf,title=\"\"%(abf.currentSweep,abf.protoSeqY[]))swhlab.plot.sweep(abf)SAP=cm.matrixToDicts(abf.SAP[sweep])if \"\" in SAP.keys():T1=SAP[\"\"]T2=SAP[\"\"]pylab.axvspan(T1/abf.rate,T2/abf.rate,color='',alpha=)else:T1=T2=abf.sweepLengthswhlab.plot.annotate(abf)pylab.tight_layout()pylab.subplots_adjust(right=)pylab.annotate(cm.msgDict(SAP),(,),ha='',va='',weight='',family='',xycoords='',size=,color='')pylab.axis([T1-,T2+,None,None])", "docstring": "after running detect() and abf.SAP is populated, this checks it.", "id": "f11404:m2"} {"signature": "def analyzeAP(Y,dY,I,rate,verbose=False):", "body": "Ims = int(rate/) IsToLook=*Ims upslope=np.max(dY[I:I+IsToLook]) upslopeI=np.where(dY[I:I+IsToLook]==upslope)[][]+II=upslopeI downslope=np.min(dY[I:I+IsToLook]) downslopeI=np.where(dY[I:I+IsToLook]==downslope)[][]+Ipeak=np.max(Y[I:I+IsToLook]) peakI=np.where(Y[I:I+IsToLook]==peak)[][]+I thresholdI=I-np.where(dY[I:I+IsToLook:--]<)[] if not len(thresholdI):return FalsethresholdI=thresholdI[]threshold=Y[thresholdI] height=peak-threshold halfwidthPoint=np.average((threshold,peak))halfwidth=np.where(Y[I-IsToLook:I+IsToLook]>halfwidthPoint)[]if not len(halfwidth):return False halfwidthI1=halfwidth[]+I-IsToLookhalfwidthI2=halfwidth[-]+I-IsToLookif Y[halfwidthI1-]>halfwidthPoint or Y[halfwidthI2+]>halfwidthPoint:return False halfwidth=len(halfwidth)/rate* riseTime=(peakI-thresholdI)*/rate IsToLook=*Ims AHPchunk=np.diff(Y[downslopeI:downslopeI+IsToLook]) AHPI=np.where(AHPchunk>)[]if len(AHPI)==:AHPI=np.nanelse:AHPI=AHPI[]+downslopeIAHPchunk=Y[AHPI:AHPI+IsToLook]if max(AHPchunk)>threshold: AHPchunk=AHPchunk[:np.where(AHPchunk>threshold)[][]]if len(AHPchunk):AHP=np.nanmin(AHPchunk)AHPI=np.where(AHPchunk==AHP)[][]+AHPIAHPheight=threshold-AHP IsToLook=*Ims AHPreturn=np.average((AHP,threshold)) AHPreturnI=np.where(Y[AHPI:AHPI+IsToLook]>AHPreturn)[]if len(AHPreturnI): AHPreturnI=AHPreturnI[]+AHPIAHPrisetime=(AHPreturnI-AHPI)*/rate* AHPupslope=AHPheight/AHPrisetime AHPreturnFullI=(AHPreturnI-AHPI)*+AHPIelse: AHPreturnI,AHPrisetime,AHPupslope=np.nan,np.nan,np.nandownslope=np.nansweepI,sweepT=I,I/rate del IsToLook,I, Y, dY, Ims, AHPchunk, verbose return locals()", "docstring": "given a sweep and a time point, return the AP array for that AP.\nAPs will be centered in time by their maximum upslope.", "id": "f11404:m3"} {"signature": "def check_sweep(abf,sweep=None,dT=):", "body": "if abf.APs is None:APs=[]else:APs=cm.matrixToDicts(abf.APs)if sweep is None or len(sweep)==: for sweepNum in range(abf.sweeps):foundInThisSweep=for AP in APs:if AP[\"\"]==sweepNum:foundInThisSweep+=if foundInThisSweep>=:breaksweep=sweepNumabf.setSweep(sweep)Y=abf.dataYdI = int(dT/*abf.rate) dY = (Y[dI:]-Y[:-dI])*(abf.rate//dI) pylab.figure(figsize=(,))ax=pylab.subplot()pylab.title(\"\"%abf.currentSweep)pylab.ylabel(\"\")pylab.plot(Y,'',alpha=)for AP in APs:if not AP[\"\"]==sweep:continuepylab.axvline(AP[\"\"],alpha=,color='')pylab.plot(AP[\"\"],AP[\"\"],'',alpha=,ms=,color='')pylab.plot(AP[\"\"],AP[\"\"],'',alpha=,ms=,color='')pylab.plot([AP[\"\"],AP[\"\"]],[AP[\"\"],AP[\"\"]],'',alpha=,ms=,color='',lw=)pylab.plot([AP[\"\"],AP[\"\"]],[AP[\"\"],AP[\"\"]],'',lw=,alpha=,color='')pylab.subplot(,sharex=ax)pylab.ylabel(\"\")pylab.xlabel(\"\"%(abf.rate/))pylab.plot(dY,'',alpha=)pylab.margins(,)for AP in APs:if not AP[\"\"]==sweep:continuepylab.axvline(AP[\"\"],alpha=,color='')pylab.plot(AP[\"\"],AP[\"\"],'',alpha=,ms=,color='')pylab.plot(AP[\"\"],AP[\"\"],'',alpha=,ms=,color='')pylab.axis([APs[][\"\"]-,APs[-][\"\"]+,None,None])", "docstring": "Plotting for an eyeball check of AP detection in the given sweep.", "id": "f11404:m4"} {"signature": "def get_AP_timepoints(abf):", "body": "col=abf.APs.dtype.names.index(\"\")timePoints=[]for i in range(len(abf.APs)):timePoints.append(abf.APs[i][col])return timePoints", "docstring": "return list of time points (sec) of all AP events in experiment.", "id": "f11404:m5"} {"signature": "def check_AP_raw(abf,n=):", "body": "timePoints=get_AP_timepoints(abf)[:n] if len(timePoints)==:returnswhlab.plot.new(abf,True,title=\"\"%n,xlabel=\"\")Ys=abf.get_data_around(timePoints,padding=)Xs=(np.arange(len(Ys[]))-len(Ys[])/)*/abf.ratefor i in range(,len(Ys)):pylab.plot(Xs,Ys[i],alpha=,color='')pylab.plot(Xs,Ys[],alpha=,color='',lw=)pylab.margins(,)msg=cm.msgDict(cm.dictFlat(abf.APs)[],cantEndWith=\"\")pylab.subplots_adjust(right=)pylab.annotate(msg,(,),ha='',va='',xycoords='',family='',size=)", "docstring": "X", "id": "f11404:m6"} {"signature": "def check_AP_deriv(abf,n=):", "body": "timePoints=get_AP_timepoints(abf)[:] if len(timePoints)==:returnswhlab.plot.new(abf,True,title=\"\"%n,xlabel=\"\",ylabel=\"\")pylab.axhline(-,color='',lw=,ls=\"\",alpha=)pylab.axhline(-,color='',lw=,ls=\"\",alpha=)Ys=abf.get_data_around(timePoints,msDeriv=,padding=)Xs=(np.arange(len(Ys[]))-len(Ys[])/)*/abf.ratefor i in range(,len(Ys)):pylab.plot(Xs,Ys[i],alpha=,color='')pylab.plot(Xs,Ys[],alpha=,color='',lw=)pylab.margins(,)", "docstring": "X", "id": "f11404:m7"} {"signature": "def check_AP_phase(abf,n=):", "body": "timePoints=get_AP_timepoints(abf)[:] if len(timePoints)==:returnswhlab.plot.new(abf,True,title=\"\"%n,xlabel=\"\",ylabel=\"\")Ys=abf.get_data_around(timePoints,msDeriv=,padding=)Xs=abf.get_data_around(timePoints,padding=)for i in range(,len(Ys)):pylab.plot(Xs[i],Ys[i],alpha=,color='')pylab.plot(Xs[],Ys[],alpha=,color='',lw=)pylab.margins(,)", "docstring": "X", "id": "f11404:m8"} {"signature": "def stats_first(abf):", "body": "msg=\"\"for sweep in range(abf.sweeps):for AP in abf.APs[sweep]:for key in sorted(AP.keys()):if key[-] is \"\" or key[-:] in [\"\",\"\"]:continuemsg+=\"\"%(key,AP[key])return msg", "docstring": "provide all stats on the first AP.", "id": "f11404:m9"} {"signature": "def get_values(abf,key=\"\",continuous=False):", "body": "Xs,Ys,Ss=[],[],[]for sweep in range(abf.sweeps):for AP in cm.matrixToDicts(abf.APs):if not AP[\"\"]==sweep:continueYs.append(AP[key])Ss.append(AP[\"\"])if continuous:Xs.append(AP[\"\"])else:Xs.append(AP[\"\"])return np.array(Xs),np.array(Ys),np.array(Ss)", "docstring": "returns Xs, Ys (the key), and sweep #s for every AP found.", "id": "f11404:m10"} {"signature": "def getAvgBySweep(abf,feature,T0=None,T1=None):", "body": "if T1 is None:T1=abf.sweepLengthif T0 is None:T0=data = [np.empty(())]*abf.sweepsfor AP in cm.dictFlat(cm.matrixToDicts(abf.APs)):if T0']val=AP[feature]data[int(AP[''])]=np.concatenate((data[int(AP[''])],[val]))for sweep in range(abf.sweeps):if len(data[sweep])> and np.any(data[sweep]):data[sweep]=np.nanmean(data[sweep])elif len(data[sweep])==:data[sweep]=data[sweep][]else:data[sweep]=np.nanreturn data", "docstring": "return average of a feature divided by sweep.", "id": "f11404:m12"} {"signature": "def values_above_sweep(abf,dataI,dataY,ylabel=\"\",useFigure=None):", "body": "xOffset = abf.currentSweep*abf.sweepIntervalif not useFigure: pylab.figure(figsize=(,))ax=pylab.subplot()pylab.grid(alpha=)if len(dataI):pylab.plot(abf.dataX[dataI],dataY,'',ms=,alpha=,color=abf.colormap[abf.currentSweep])pylab.margins(,)pylab.ylabel(ylabel)pylab.subplot(,sharex=ax)pylab.grid(alpha=)pylab.plot(abf.dataX,abf.dataY,color=abf.colormap[abf.currentSweep],alpha=)pylab.ylabel(\"\"%abf.units)ax2=pylab.subplot()pylab.grid(alpha=)if len(dataI):pylab.plot(abf.dataX[dataI]+xOffset,dataY,'',ms=,alpha=,color=abf.colormap[abf.currentSweep])pylab.margins(,)pylab.ylabel(ylabel)pylab.subplot(,sharex=ax2)pylab.grid(alpha=)pylab.plot(abf.dataX+xOffset,abf.dataY,color=abf.colormap[abf.currentSweep])pylab.ylabel(\"\"%abf.units)pylab.tight_layout()", "docstring": "To make plots like AP frequency over original trace.\ndataI=[i] #the i of the sweep\ndataY=[1.234] #something like inst freq", "id": "f11405:m1"} {"signature": "def gain(abf):", "body": "Ys=np.nan_to_num(swhlab.ap.getAvgBySweep(abf,''))Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[]+)])swhlab.plot.new(abf,title=\"\",xlabel=\"\",ylabel=\"\")pylab.plot(Xs,Ys,'',ms=,alpha=,color='')pylab.axhline(,alpha=,lw=,color='',ls=\"\")pylab.margins(,)", "docstring": "easy way to plot a gain function.", "id": "f11405:m2"} {"signature": "def IV(abf,T1,T2,plotToo=True,color=''):", "body": "rangeData=abf.average_data([[T1,T2]]) AV,SD=rangeData[:,,],rangeData[:,,] Xs=abf.clampValues(T1) if plotToo:new(abf) pylab.subplot()pylab.title(\"\")pylab.xlabel(\"\")pylab.ylabel(\"\"%abf.units)sweep(abf,'',protocol=False)pylab.axis([None,None,np.min(rangeData)-,np.max(rangeData)+])pylab.axvspan(T1,T2,alpha=,color=color) pylab.margins(,)pylab.subplot()pylab.title(\"\")pylab.xlabel(\"\")pylab.ylabel(\"\"%abf.units)sweep(abf,'',protocol=False)pylab.axis([T1-,T2+,np.min(rangeData)-,np.max(rangeData)+])pylab.axvspan(T1,T2,alpha=,color=color) pylab.margins(,)pylab.subplot()pylab.title(\"\")pylab.xlabel(\"\")pylab.ylabel(\"\"%abf.unitsCommand)sweep(abf,'',protocol=True)pylab.axvspan(T1,T2,alpha=,color=color) pylab.margins(,)pylab.subplot()pylab.grid(alpha=)pylab.title(\"\")pylab.xlabel(\"\"%abf.unitsCommand)pylab.ylabel(\"\"%abf.units)pylab.errorbar(Xs,AV,SD,capsize=,marker='',color=color)if abf.units==\"\":pylab.axhline(,alpha=,lw=,color='',ls=\"\")pylab.axvline(-,alpha=,lw=,color='',ls=\"\")else:pylab.axhline(-,alpha=,lw=,color='',ls=\"\")pylab.axvline(,alpha=,lw=,color='',ls=\"\")pylab.margins(,)annotate(abf)return AV,SD", "docstring": "Given two time points (seconds) return IV data.\nOptionally plots a fancy graph (with errorbars)\nReturns [[AV],[SD]] for the given range.", "id": "f11405:m3"} {"signature": "def comments(abf,minutes=False):", "body": "if not len(abf.commentTimes):returnfor i in range(len(abf.commentTimes)):t,c = abf.commentTimes[i],abf.commentTags[i]if minutes:t=t/pylab.axvline(t,lw=,color='',ls=\"\",alpha=)X1,X2,Y1,Y2=pylab.axis()Y2=Y2-abs(Y2-Y1)*pylab.text(t,Y2,c,size=,color='',rotation='',ha='',va='',weight='',alpha=)if minutes:pylab.xlabel(\"\")else:pylab.xlabel(\"\")", "docstring": "draw vertical lines at comment points. Defaults to seconds.", "id": "f11405:m4"} {"signature": "def dual(ABF):", "body": "new(ABF)pylab.subplot()pylab.title(\"\")ABF.channel=sweep(ABF)pylab.subplot()pylab.title(\"\")ABF.channel=sweep(ABF)", "docstring": "Plot two channels of current sweep (top/bottom).", "id": "f11405:m5"} {"signature": "def sweep(ABF,sweep=None,rainbow=True,alpha=None,protocol=False,color='',continuous=False,offsetX=,offsetY=,minutes=False,decimate=None,newFigure=False):", "body": "if len(pylab.get_fignums())== or newFigure:new(ABF,True)if offsetY>:pylab.grid(None)if sweep is None:sweeps=[ABF.currentSweep]if not ABF.currentSweep:sweeps=[]elif sweep==\"\":sweeps=range(,ABF.sweeps)elif type(sweep) in [int,float]:sweeps=[int(sweep)]elif type(sweep) is list:sweeps=sweepelse:print(\"\",type(sweep),sweep)if continuous:offsetX=ABF.sweepIntervalcolors=[color]*len(sweeps) if rainbow and len(sweeps)>:for i in range(len(sweeps)):colors[i]=ABF.colormap[i]if alpha is None and len(sweeps)==:alpha=if rainbow and alpha is None:alpha=if alpha is None:alpha=if minutes == False:minutes=else:minutes=pylab.xlabel(\"\")ABF.decimateMethod=decimatefor i in range(len(sweeps)):ABF.setSweep(sweeps[i])if protocol:pylab.plot((np.array(ABF.protoX)/ABF.rate+offsetX*i)/minutes,ABF.protoY+offsetY*i,alpha=alpha,color=colors[i])else:pylab.plot((ABF.dataX+offsetX*i)/minutes,ABF.dataY+offsetY*i,alpha=alpha,color=colors[i])ABF.decimateMethod=Nonepylab.margins(,)", "docstring": "Load a particular sweep then plot it.\nIf sweep is None or False, just plot current dataX/dataY.\nIf rainbow, it'll make it color coded prettily.", "id": "f11405:m6"} {"signature": "def annotate(abf):", "body": "msg=\"\"%str(swhlab.VERSION)msg+=\"\"%abf.IDmsg+=\"\"%abf.channelmsg+=\"\"%abf.protoCommentmsg+=\"\"%(abf.holding,abf.units)msg+=\"\"%''.format(datetime.datetime.now())pylab.annotate(msg,(,),xycoords='',ha='',va='',color='',family='',size=,weight='')if abf.nADC>:msg=\"\"%(abf.channel+,abf.nADC)pylab.annotate(msg,(,),xycoords='',ha='',va='',color='',family='',size=,weight='')", "docstring": "stamp the bottom with file info.", "id": "f11405:m7"} {"signature": "def new(ABF,forceNewFigure=False,title=None,xlabel=None,ylabel=None):", "body": "if len(pylab.get_fignums()) and forceNewFigure==False:returnpylab.figure(figsize=(,))pylab.grid(alpha=)pylab.title(ABF.ID)pylab.ylabel(ABF.units)pylab.xlabel(\"\")if xlabel:pylab.xlabel(xlabel)if ylabel:pylab.ylabel(ylabel)if title:pylab.title(title)annotate(ABF)", "docstring": "makes a new matplotlib figure with default dims and DPI.\nAlso labels it with pA or mV depending on ABF.", "id": "f11405:m8"} {"signature": "def show(abf):", "body": "save(abf)", "docstring": "showing is the same as saving without a filename.", "id": "f11405:m9"} {"signature": "def save(abf,fname=None,tag=None,width=,close=True,facecolor='',resize=True):", "body": "if len(pylab.gca().get_lines())==:print(\"\")returnif resize:pylab.tight_layout()pylab.subplots_adjust(bottom=)annotate(abf)if tag:fname = abf.outpath+abf.ID+\"\"+tag+\"\"inchesX,inchesY = pylab.gcf().get_size_inches()dpi=width/inchesXif fname:if not os.path.exists(abf.outpath):os.mkdir(abf.outpath)print(\"\"%(os.path.basename(fname),dpi,inchesX*dpi,inchesY*dpi))pylab.savefig(fname,dpi=dpi,facecolor=facecolor)else:pylab.show()if close:pylab.close()", "docstring": "Save the pylab figure somewhere.\nIf fname==False, show it instead.\nHeight force > dpi force\nif a tag is given instead of a filename, save it alongside the ABF", "id": "f11405:m10"} {"signature": "def getUnit(name):", "body": "for key in UNITS:if name in key:return UNITS[key]return \"\"", "docstring": "given a column name, return the best guess unit.", "id": "f11406:m0"} {"signature": "def dictFlat(l):", "body": "if type(l) is dict:return [l]if \"\" in str(type(l)):return ldicts=[]for item in l:if type(item)==dict:dicts.append(item)elif type(item)==list:for item2 in item:dicts.append(item2)return dicts", "docstring": "Given a list of list of dicts, return just the dicts.", "id": "f11406:m2"} {"signature": "def listCount(l):", "body": "for i in range(len(l)):l[i]=len(l[i])return l", "docstring": "returns len() of each item in a list, as a list.", "id": "f11406:m3"} {"signature": "def dictVals(l,key):", "body": "dicts=dictFlat(l)vals=np.empty(len(dicts))*np.nanfor i in range(len(dicts)):if key in dicts[i]:vals[i]=dicts[i][key]return vals", "docstring": "Return all 'key' from a list of dicts. (or list of list of dicts)", "id": "f11406:m4"} {"signature": "def dictAvg(listOfDicts,key,stdErr=False):", "body": "vals=dictVals(listOfDicts,key)if len(vals) and np.any(vals):av=np.nanmean(vals)er=np.nanstd(vals)if stdErr:er=er/np.sqrt(np.count_nonzero(~np.isnan(er)))else:av,er=np.nan,np.nanreturn av,er", "docstring": "Given a list (l) of dicts (d), return AV and SD.", "id": "f11406:m5"} {"signature": "def dummyListOfDicts(size=):", "body": "titles=\"\".split(\"\")ld=[] for i in range(size):d={}for t in titles:if int(np.random.random()*)>: d[t]=float(np.random.random()*) if t==\"\" and \"\" in d.keys():d[t]=int(d[t])ld.append(d)return ld", "docstring": "returns a list (of the given size) of dicts with fake data.\nsome dictionary keys are missing for some of the items.", "id": "f11406:m6"} {"signature": "def matrixValues(matrix,key):", "body": "assert key in matrix.dtype.namescol=matrix.dtype.names.index(key)values=np.empty(len(matrix))*np.nanfor i in range(len(matrix)):values[i]=matrix[i][col]return values", "docstring": "given a key, return a list of values from the matrix with that key.", "id": "f11406:m7"} {"signature": "def matrixToDicts(data):", "body": "if \"\" in str(type(data[])):d={}for x in range(len(data)):d[data.dtype.names[x]]=data[x]return dl=[]for y in range(len(data)):d={}for x in range(len(data[y])):d[data.dtype.names[x]]=data[y][x]l.append(d)return l", "docstring": "given a recarray, return it as a list of dicts.", "id": "f11406:m8"} {"signature": "def matrixfromDicts(dicts):", "body": "if '' in str(type(dicts)):return dicts names=set([])dicts=dictFlat(dicts)for item in dicts:names=names.union(list(item.keys()))names=sorted(list(names))data=np.empty((len(dicts),len(names)),dtype=float)*np.nanfor y in range(len(dicts)):for key in dicts[y].keys():for x in range(len(names)):if names[x] in dicts[y]:data[y,x]=dicts[y][names[x]]if len(dicts):data=np.core.records.fromarrays(data.transpose(),names=names)return data", "docstring": "Give a list of dicts (or list of list of dicts) return a structured array.\nHeadings will be sorted in alphabetical order.", "id": "f11406:m9"} {"signature": "def htmlListToTR(l,trClass=None,tdClass=None,td1Class=None):", "body": "html=\"\"for item in l:if '' in str(type(item)):item=item[] html+=\"\"%itemhtml+=\"\"if trClass:html=html.replace(\"\",''%trClass)if td1Class:html=html.replace(\"\",''%td1Class,)if tdClass:html=html.replace(\"\",''%tdClass)return html", "docstring": "turns a list into a something\ncall this when generating HTML tables dynamically.", "id": "f11406:m10"} {"signature": "def html_temp_launch(html):", "body": "fname = tempfile.gettempdir()+\"\"with open(fname,'') as f:f.write(html)webbrowser.open(fname)", "docstring": "given text, make it a temporary HTML file and launch it.", "id": "f11406:m11"} {"signature": "def checkOut(thing,html=True):", "body": "msg=\"\"for name in sorted(dir(thing)):if not \"\" in name:msg+=\"\"%nametry:msg+=\"\"%getattr(thing,name)()except:passif html:html=''+msg+''html=html.replace(\"\",\"\").replace(\"\",\"\")fname = tempfile.gettempdir()+\"\"with open(fname,'') as f:f.write(html)webbrowser.open(fname)print(msg.replace('','').replace('',''))", "docstring": "show everything we can about an object's projects and methods.", "id": "f11406:m12"} {"signature": "def matrixToWks(data,names=None,units=None,bookName=None,sheetName=\"\",xCol=None):", "body": "if type(data) is list:data=matrixfromDicts(data)if not names:names=[\"\"]*len(data[])if data.dtype.names:names=list(data.dtype.names)if not units:units=[\"\"]*len(data[])for i in range(len(units)):if names[i] in UNITS.keys():units[i]=UNITS[names[i]]if '' in str(type(data)): data=data.view(float).reshape(data.shape + (-,))if xCol and xCol in names:xCol=names.index(xCol)names.insert(,names[xCol])units.insert(,units[xCol])data=np.insert(data,,data[:,xCol],)if not bookName:bookName=\"\"if not sheetName:sheetName=\"\"+str(time.clock())[-:]try:import PyOriginPyOrigin.LT_execute(\"\") except:print(\"\")matrixToHTML(data,names,units,bookName,sheetName,xCol)returnnrows,ncols=len(data),len(data[])if '' in str(type(data)): data=np.array(data.view(),dtype=float).reshape((nrows,ncols))data=np.transpose(data) PyOrigin.LT_execute(\"\"%(bookName,sheetName))wks=PyOrigin.ActiveLayer()while wks.GetColCount() < ncols:wks.InsertCol(wks.GetColCount(),'')for i in range(ncols):col=wks.Columns(i)col.SetLongName(names[i])col.SetUnits(units[i])wks.SetData(data,,)PyOrigin.LT_execute(\"\")PyOrigin.LT_execute(\"\")", "docstring": "Put 2d numpy data into an Origin worksheet.\nIf bookname and sheetname are given try to load data into that book/sheet.\nIf the book/sheet doesn't exist, create it.", "id": "f11406:m13"} {"signature": "def matrixToHTML(data,names=None,units=None,bookName=None,sheetName=None,xCol=None):", "body": "if not names:names=[\"\"]*len(data[])if data.dtype.names:names=list(data.dtype.names)if not units:units=[\"\"]*len(data[])for i in range(len(units)):if names[i] in UNITS.keys():units[i]=UNITS[names[i]]if '' in str(type(data)): data=data.view(float).reshape(data.shape + (-,))if xCol and xCol in names:xCol=names.index(xCol)names.insert(,names[xCol])units.insert(,units[xCol])data=np.insert(data,,data[:,xCol],)htmlFname = tempfile.gettempdir()+\"\"%(bookName,sheetName)html=\"\"\"\"\"\"html+=\"\"if bookName or sheetName:html+=''%(bookName,sheetName)html+=\"\"colNames=['']for i in range(len(units)):label=\"\"%(chr(i+ord('')),i)colNames.append(label)html+=htmlListToTR(colNames,'','')html+=htmlListToTR(['']+list(names),'',td1Class='')html+=htmlListToTR(['']+list(units),'',td1Class='')cutOff=Falsefor y in range(len(data)):html+=htmlListToTR([y+]+list(data[y]),trClass=''%(y%),td1Class='')if y>=:cutOff=Truebreakhtml+=\"\"html=html.replace(\"\",\"\")html=html.replace(\"\",\"\")if cutOff:html+=\"\"%(y,len(data))html+=\"\"with open(htmlFname,'') as f:f.write(html)webbrowser.open(htmlFname)return", "docstring": "Put 2d numpy data into a temporary HTML file.", "id": "f11406:m14"} {"signature": "def XMLtoPython(xmlStr=r\"\"):", "body": "if os.path.exists(xmlStr):with open(xmlStr) as f:xmlStr=f.read()print(xmlStr)print(\"\")return", "docstring": "given a string or a path to an XML file, return an XML object.", "id": "f11406:m15"} {"signature": "def XMLfromPython(xmlObj,saveAs=False):", "body": "return", "docstring": "given a an XML object, return XML string.\noptionally, save it to disk.", "id": "f11406:m16"} {"signature": "def algo_exp(x, m, t, b):", "body": "return m*np.exp(-t*x)+b", "docstring": "mono-exponential curve.", "id": "f11406:m17"} {"signature": "def fit_exp(y,graphToo=False):", "body": "x=np.arange(len(y))try:params, cv = scipy.optimize.curve_fit(algo_exp, x, y, p0=(,,))except:print(\"\"%len(x))return np.nan,np.nan,np.nan,np.nan m,t,b=paramstau=/tif graphToo:pylab.figure(figsize=(,))pylab.grid()pylab.title(\"\")pylab.plot(x,y,'',mfc='',ms=)pylab.plot(x,algo_exp(x,m,t,b),'',lw=)pylab.show()return m,t,b,tau", "docstring": "Exponential fit. Returns [multiplier, t, offset, time constant]", "id": "f11406:m18"} {"signature": "def numpyAlignXY(data):", "body": "print(data)Xs=data.flatten()[::] Xs=Xs[~np.isnan(Xs)] Xs=sorted(list(set(Xs))) aligned=np.empty((len(Xs),int(len(data[])/+)))*np.nanaligned[:,]=Xsfor col in range(,len(data[]),):for row in range(len(data)):X=data[row,col]Y=data[row,col+]if np.isnan(X) or np.isnan(Y):continuealigned[Xs.index(X),int(col/+)]=Yreturn aligned", "docstring": "given a numpy array (XYXYXY columns), return it aligned.\ndata returned will be XYYY. NANs may be returned.", "id": "f11406:m19"} {"signature": "def filter_gaussian(Ys,sigma,plotToo=False):", "body": "timeA=time.time()window=scipy.signal.gaussian(len(Ys),sigma)window/=sum(window)Ys2=np.convolve(Ys,window,'')print(\"\",len(Ys2),len(Ys))timeB=time.time()print(\"\"%((timeB-timeA)*))if len(Ys2)!=len(Ys):print(\"\")if plotToo:pylab.plot(Ys,label='',alpha=)pylab.plot(Ys2,'',label='')pylab.legend()pylab.show()return Ys2", "docstring": "simple gaussian convolution. Returns same # of points as gotten.", "id": "f11406:m20"} {"signature": "def where_cross(data,threshold):", "body": "Is=np.where(data>threshold)[]Is=np.concatenate(([],Is))Ds=Is[:-]-Is[:]+return Is[np.where(Ds)[]+]", "docstring": "return a list of Is where the data first crosses above threshold.", "id": "f11406:m21"} {"signature": "def show(closeToo=False):", "body": "IPython.display.display(pylab.gcf())if closeToo:pylab.close('')", "docstring": "alternative to pylab.show() that updates IPython window.", "id": "f11406:m22"} {"signature": "def originFormat_listOfDicts(l):", "body": "titles=[]for d in l:for k in d.keys():if not k in titles:titles.append(k)titles.sort()data=np.empty((len(l),len(titles)))*np.nanfor y in range(len(l)):for x in range(len(titles)):if titles[x] in l[y].keys():data[y][x]=l[y][titles[x]]return titles,data", "docstring": "Return [{},{},{}] as a 2d matrix.", "id": "f11406:m23"} {"signature": "def originFormat(thing):", "body": "if type(thing) is list and type(thing[]) is dict:return originFormat_listOfDicts(thing)if type(thing) is list and type(thing[]) is list:return originFormat_listOfDicts(dictFlat(thing))else:print(\"\")print(thing)", "docstring": "Try to format anything as a 2D matrix with column names.", "id": "f11406:m24"} {"signature": "def pickle_load(fname):", "body": "thing = pickle.load(open(fname,\"\"))return thing", "docstring": "return the contents of a pickle file", "id": "f11406:m25"} {"signature": "def pickle_save(thing,fname):", "body": "pickle.dump(thing, open(fname,\"\"),pickle.HIGHEST_PROTOCOL)return thing", "docstring": "save something to a pickle file", "id": "f11406:m26"} {"signature": "def getPkl(fname): ", "body": "thing = pickle.load(open(fname,\"\"))return thing", "docstring": "return the contents of a pickle file", "id": "f11406:m27"} {"signature": "def msgDict(d,matching=None,sep1=\"\",sep2=\"\",sort=True,cantEndWith=None):", "body": "msg=\"\"if \"\" in str(type(d)):keys=d.dtype.nameselse:keys=d.keys()if sort:keys=sorted(keys)for key in keys:if key[]==\"\":continueif matching:if not key in matching:continueif cantEndWith and key[-len(cantEndWith)]==cantEndWith:continueif '' in str(type(d[key])):s=\"\"%d[key]else:s=str(d[key])if \"\" in s:s=''msg+=key+sep1+s+sep2return msg.strip()", "docstring": "convert a dictionary to a pretty formatted string.", "id": "f11406:m28"} {"signature": "def groupsFromKey(keyFile=''):", "body": "groups={}thisGroup=\"\"with open(keyFile) as f:raw=f.read().split(\"\")for line in raw:line=line.strip()if len(line)<:continueif \"\" in line:thisGroup=line.split(\"\")[]groups[thisGroup]=[]else:groups[thisGroup]=groups[thisGroup]+[line]return groups", "docstring": "given a groups file, return a dict of groups.\nExample:\n ### GROUP: TR\n 16602083\n 16608059\n ### GROUP: TU\n 16504000\n 16507011", "id": "f11406:m33"} {"signature": "def findRelevantData(fileList,abfs):", "body": "relevant=[]things={}for abf in abfs:for fname in fileList:if abf in fname and not fname in relevant:relevant.append(fname)for item in sorted(relevant):thing = os.path.basename(item)if \"\" in thing:continueif not \"\" in thing:continuething=thing.split(\"\")[-].split(\"\")[]if not thing in things.keys(): things[thing]=itemreturn things", "docstring": "return an abf of the *FIRST* of every type of thing.", "id": "f11406:m34"} {"signature": "def determineProtocol(fname):", "body": "f=open(fname,'')raw=f.read() f.close()protoComment=\"\"if b\"\" in raw:protoComment=raw.split(b\"\")[].split(b\"\",)[]elif b\"\" in raw:protoComment=raw.split(b\"\")[].split(b\"\",)[]else:protoComment=\"\"if not type(protoComment) is str:protoComment=protoComment.decode(\"\")return protoComment", "docstring": "determine the comment cooked in the protocol.", "id": "f11406:m35"} {"signature": "def forwardSlash(listOfFiles):", "body": "for i,fname in enumerate(listOfFiles):listOfFiles[i]=fname.replace(\"\",\"\")return listOfFiles", "docstring": "convert silly C:\\\\names\\\\like\\\\this.txt to c:/names/like/this.txt", "id": "f11406:m36"} {"signature": "def scanABFfolder(abfFolder):", "body": "assert os.path.isdir(abfFolder)filesABF=forwardSlash(sorted(glob.glob(abfFolder+\"\")))filesSWH=[]if os.path.exists(abfFolder+\"\"):filesSWH=forwardSlash(sorted(glob.glob(abfFolder+\"\")))groups=getABFgroups(filesABF)return filesABF,filesSWH,groups", "docstring": "scan an ABF directory and subdirectory. Try to do this just once.\nReturns ABF files, SWHLab files, and groups.", "id": "f11406:m37"} {"signature": "def getParent(abfFname):", "body": "child=os.path.abspath(abfFname)files=sorted(glob.glob(os.path.dirname(child)+\"\"))parentID=abfFname for fname in files:if fname.endswith(\"\") and fname.replace(\"\",\"\") in files:parentID=os.path.basename(fname).replace(\"\",\"\")if os.path.basename(child) in fname:breakreturn parentID", "docstring": "given an ABF file name, return the ABF of its parent.", "id": "f11406:m38"} {"signature": "def getParent2(abfFname,groups):", "body": "if \"\" in abfFname:abfFname=os.path.basename(abfFname).replace(\"\",\"\")for parentID in groups.keys():if abfFname in groups[parentID]:return parentIDreturn abfFname", "docstring": "given an ABF and the groups dict, return the ID of its parent.", "id": "f11406:m39"} {"signature": "def getNotesForABF(abfFile):", "body": "parent=getParent(abfFile)parent=os.path.basename(parent).replace(\"\",\"\")expFile=os.path.dirname(abfFile)+\"\"if not os.path.exists(expFile):return \"\"with open(expFile) as f:raw=f.readlines()for line in raw:if line[]=='':line=line[:].strip()if line.startswith(parent):while \"\" in line:line=line.replace(\"\",\"\")line=line.replace(\"\",\"\")return linereturn \"\"%parent", "docstring": "given an ABF, find the parent, return that line of experiments.txt", "id": "f11406:m40"} {"signature": "def getABFgroups(files):", "body": "children=[]groups={}for fname in sorted(files):if fname.endswith(\"\"):if fname.replace(\"\",\"\") in files: if len(children):groups[children[]]=childrenchildren=[os.path.basename(fname)[:-]]else:children.append(os.path.basename(fname)[:-])groups[children[]]=childrenreturn groups", "docstring": "given a list of ALL files (not just ABFs), return a dict[ID]=[ID,ID,ID].\nParents are determined if a .abf matches a .TIF.\nThis is made to assign children files to parent ABF IDs.", "id": "f11406:m41"} {"signature": "def getIDfileDict(files):", "body": "d={}orphans=[]for fname in files:if fname.endswith(\"\"):d[os.path.basename(fname)[:-]]=[]for fname in files:if fname.endswith(\"\") or fname.endswith(\"\"):continue if len(os.path.basename(fname).split(\"\")[])>=:ID = os.path.basename(fname)[:] else:ID = os.path.basename(fname).split(\"\")[] if ID in d.keys():d[ID]=d[ID]+[fname]else:orphans.append(os.path.basename(fname))if orphans:print(\"\"%len(orphans))return d", "docstring": "given a list of files, return a dict[ID]=[files].\nThis is made to assign children files to parent ABF IDs.", "id": "f11406:m42"} {"signature": "def getIDsFromFiles(files):", "body": "if type(files) is str:files=glob.glob(files+\"\")IDs=[]for fname in files:if fname[-:].lower()=='':ext=fname.split('')[-]IDs.append(os.path.basename(fname).replace(''+ext,''))return sorted(IDs)", "docstring": "given a path or list of files, return ABF IDs.", "id": "f11406:m43"} {"signature": "def inspectABF(abf=exampleABF,saveToo=False,justPlot=False):", "body": "pylab.close('')print(\"\")if type(abf) is str:abf=swhlab.ABF(abf)swhlab.plot.new(abf,forceNewFigure=True)if abf.sweepInterval*abf.sweeps<*: pylab.subplot()pylab.title(\"\"%(abf.ID,abf.protoComment))swhlab.plot.sweep(abf,'')pylab.subplot()swhlab.plot.sweep(abf,'',continuous=True)swhlab.plot.comments(abf)else:print(\"\")swhlab.plot.sweep(abf,'',continuous=True,minutes=True)swhlab.plot.comments(abf,minutes=True)pylab.title(\"\"%(abf.ID,abf.protoComment))swhlab.plot.annotate(abf)if justPlot:returnif saveToo:path=os.path.split(abf.fname)[]basename=os.path.basename(abf.fname)pylab.savefig(os.path.join(path,\"\"+basename.replace(\"\",\"\")))pylab.show()return", "docstring": "May be given an ABF object or filename.", "id": "f11406:m44"} {"signature": "def ftp_login(folder=None):", "body": "pwDir=os.path.realpath(__file__)for i in range():pwDir=os.path.dirname(pwDir)pwFile = os.path.join(pwDir,\"\")print(\"\"%pwFile)try:with open(pwFile) as f:lines=f.readlines()username=lines[].strip()password=lines[].strip()print(\"\")except:print(\"\")username=TK_askPassword(\"\",\"\")password=TK_askPassword(\"\",\"\"%username)if not username or not password:print(\"\")returnprint(\"\",username)print(\"\",\"\"*(len(password)))print(\"\")try:ftp = ftplib.FTP(\"\")ftp.login(username, password)if folder:ftp.cwd(folder)return ftpexcept:print(\"\")return False", "docstring": "return an \"FTP\" object after logging in.", "id": "f11406:m47"} {"signature": "def ftp_folder_match(ftp,localFolder,deleteStuff=True):", "body": "for fname in glob.glob(localFolder+\"\"):ftp_upload(ftp,fname)return", "docstring": "upload everything from localFolder into the current FTP folder.", "id": "f11406:m48"} {"signature": "def version_upload(fname,username=\"\"):", "body": "print(\"\")password=TK_askPassword(\"\",\"\"%username)if not password:returnprint(\"\",username)print(\"\",\"\"*(len(password)))print(\"\")ftp = ftplib.FTP(\"\")ftp.login(username, password)print(\"\")ftp.cwd(\"\") print(\"\",os.path.basename(fname))ftp.storbinary(\"\" + os.path.basename(fname), open(fname, \"\"), ) print(\"\")ftp.quit()", "docstring": "Only scott should do this. Upload new version to site.", "id": "f11406:m50"} {"signature": "def TK_askPassword(title=\"\",msg=\"\"):", "body": "root = tkinter.Tk()root.withdraw() root.attributes(\"\", True) root.lift() value=tkinter.simpledialog.askstring(title,msg)root.destroy()return value", "docstring": "use the GUI to ask for a string.", "id": "f11406:m51"} {"signature": "def TK_message(title,msg):", "body": "root = tkinter.Tk()root.withdraw() root.attributes(\"\", True) root.lift() tkinter.messagebox.showwarning(title, msg)root.destroy()", "docstring": "use the GUI to pop up a message.", "id": "f11406:m52"} {"signature": "def TK_ask(title,msg):", "body": "root = tkinter.Tk()root.attributes(\"\", True) root.withdraw() result=tkinter.messagebox.askyesno(title,msg)root.destroy()return result", "docstring": "use the GUI to ask YES or NO.", "id": "f11406:m53"} {"signature": "def image_convert(fname,saveAs=True,showToo=False):", "body": "im=scipy.ndimage.imread(fname) im=np.array(im,dtype=float) cutoffLow=np.percentile(im,)cutoffHigh=np.percentile(im,)im[np.where(imim[np.where(im>cutoffHigh)]=cutoffHighim-=np.min(im) im/=np.max(im) im*= im = Image.fromarray(im)msg=\"\"%os.path.basename(fname)timestamp = datetime.datetime.fromtimestamp(os.path.getctime(fname))msg+=\"\"%timestamp.strftime('')d = ImageDraw.Draw(im)fnt = ImageFont.truetype(\"\", )d.text((,),msg,font=fnt,fill=)d.text((,),msg,font=fnt,fill=)if showToo:im.show()if saveAs is False:returnif saveAs is True:saveAs=fname+\"\"im.convert('').save(saveAs)", "docstring": "Convert weird TIF files into web-friendly versions.\nAuto contrast is applied (saturating lower and upper 0.1%).\n make saveAs True to save as .TIF.png\n make saveAs False and it won't save at all\n make saveAs \"someFile.jpg\" to save it as a different path/format", "id": "f11406:m54"} {"signature": "def __init__(self,ABFfname=None,debugLevel=,saveInfo=False):", "body": "self.valid=Falseif ABFfname is None:return if not type(ABFfname) is str:raise Exception('')if not ABFfname.lower().endswith(\"\"):raise Exception(''%ABFfname)ABFfname=os.path.abspath(ABFfname)if not os.path.exists(ABFfname):raise Exception(''%ABFfname)if saveInfo:print(\"\",os.path.basename(ABFfname))self.fname = os.path.abspath(ABFfname)self.ID = os.path.basename(self.fname).replace(\"\",\"\")self.outpath = os.path.join(os.path.split(self.fname)[],\"\")self.outpre = os.path.abspath(self.outpath+\"\"+self.ID+\"\") self.reader = neo.io.AxonIO(ABFfname)self.valid=Falsetry:self.header = self.reader.read_header()self.block = self.reader.read_block(lazy=False, cascade=True) except:print(\"\")returnself.valid=TruestartDay=time.strptime(str(self.header[\"\"]), '')self.timestamp=time.mktime(startDay)+self.header[\"\"]/ self.units = self.header[''][][''].decode('') self.unitsCommand = self.header[''][][''].decode('') self.holding = self.header[''][][''] self.rate = int(/self.header['']['']) self.timebase = self.header[''][''] self.nADC = self.header[''][''][''] self.sweeps = self.header['']self.gapFree=(self.sweeps==)if self.gapFree:self.sweeps=self.sweepSize = self.header['']['']/self.nADCself.sweepLength = self.sweepSize/self.rate self.sweepInterval = self.header[''][''] if self.sweepInterval==:self.sweepInterval=self.sweepLengthself.commentTags = self.block.segments[].eventarrays[].annotations['']self.commentTags = [x.decode('') for x in self.commentTags]self.commentTimes = np.array(self.block.segments[].eventarrays[].times)/self.nADCself.commentTimes = self.commentTimes/ self.commentSweeps = np.array(self.commentTimes/self.sweepInterval,dtype=int)self.baseline=[None,None] self.decimateMethod=None self.decimateBy= self.offsetX = int(self.sweepSize/) self.offsetY = self.dataY = None self.dataX = None self.dataStart = None self.currentSweep = None self.channel = self.generate_colormap()self.generate_protocol()self.protoComment=cm.determineProtocol(self.fname)if saveInfo:self.saveThing(self.abfinfo(returnDict=True),'',overwrite=False)", "docstring": "SWHLab4 ABF class.\n Basic usage:\n 1.) call this with an ABF filename\n 2.) select a sweet with setSweep()\n 3.) reference data by ABF.dataX and ABF.dataY", "id": "f11408:c0:m0"} {"signature": "def abfinfo(self,printToo=False,returnDict=False):", "body": "info=\"\"d={}for thingName in sorted(dir(self)):if thingName in ['','','','','','','']:continueif \"\" in thingName:continuething=getattr(self,thingName)if type(thing) is list and len(thing)>:continuethingType=str(type(thing)).split(\"\")[]if \"\" in thingType or \"\" in thingType:continueif thingName in [\"\",\"\"]:continueinfo+=\"\"%(thingName,thingType,thing)d[thingName]=thingif printToo:print()for line in info.split(\"\"):if len(line)<:continueprint(\"\",line)print()if returnDict:return dreturn info", "docstring": "show basic info about ABF class variables.", "id": "f11408:c0:m1"} {"signature": "def headerHTML(self,fname=None):", "body": "if fname is None:fname = self.fname.replace(\"\",\"\")html=\"\"html+=\"\"%self.IDhtml+=self.abfinfo().replace(\"\",\"\").replace(\">\",\"\").replace(\"\",\"\")html+=\"\"%self.IDhtml+=pprint.pformat(self.header, indent=)html=html.replace(\"\",'').replace(\"\",\"\")html=html.replace(r\"\",\"\")html+=\"\"print(\"\")print(fname)f=open(fname,'')f.write(html)f.close()", "docstring": "read the ABF header and save it HTML formatted.", "id": "f11408:c0:m2"} {"signature": "def generate_colormap(self,colormap=None,reverse=False):", "body": "if colormap is None:colormap = pylab.cm.Dark2self.cm=colormapself.colormap=[]for i in range(self.sweeps): self.colormap.append(colormap(i/self.sweeps))if reverse:self.colormap.reverse()", "docstring": "use 1 colormap for the whole abf. You can change it!.", "id": "f11408:c0:m3"} {"signature": "def setSweep(self,sweep=,force=False):", "body": "if sweep is None or sweep is False:sweep=if sweep<:sweep=self.sweeps-sweep if sweep<: sweep= if sweep>(self.sweeps-):print(\"\"%(sweep,self.sweeps-))sweep=self.sweeps-sweep=int(sweep)try:if self.currentSweep==sweep and force==False:returnself.currentSweep=sweepself.dataY = self.block.segments[sweep].analogsignals[self.channel]self.dataY = np.array(self.dataY)B1,B2=self.baselineif B1==None:B1=else:B1=B1*self.rateif B2==None:B2==self.sweepSizeelse:B2=B2*self.rateself.dataY-=np.average(self.dataY[self.baseline[]*self.rate:self.baseline[]*self.rate])self.sweep_genXs()self.sweep_decimate()self.generate_protocol(sweep=sweep)self.dataStart = self.sweepInterval*self.currentSweepexcept Exception:print(\"\"*,\"\",traceback.format_exc(),'',\"\"*)return self.dataX,self.dataY", "docstring": "Load X/Y data for a particular sweep.\n determines if forced reload is needed, updates currentSweep,\n regenerates dataX (if not None),decimates,returns X/Y.\n Note that setSweep() takes 0.17ms to complete, so go for it!", "id": "f11408:c0:m4"} {"signature": "def sweep_genXs(self):", "body": "if self.decimateMethod:self.dataX=np.arange(len(self.dataY))/self.rateself.dataX*=self.decimateByreturnif self.dataX is None or len(self.dataX)!=len(self.dataY):self.dataX=np.arange(len(self.dataY))/self.rate", "docstring": "generate sweepX (in seconds) to match sweepY", "id": "f11408:c0:m5"} {"signature": "def sweep_decimate(self):", "body": "if len(self.dataY)returnif self.decimateMethod:points = int(len(self.dataY)/self.decimateBy)self.dataY=self.dataY[:points*self.decimateBy]self.dataY = np.reshape(self.dataY,(points,self.decimateBy))if self.decimateMethod=='':self.dataY = np.average(self.dataY,)elif self.decimateMethod=='':self.dataY = np.max(self.dataY,)elif self.decimateMethod=='':self.dataY = np.min(self.dataY,)elif self.decimateMethod=='':self.dataY = self.dataY[:,]else:print(\"\",self.decimateMethod)self.dataX = np.arange(len(self.dataY))/self.rate*self.decimateBy", "docstring": "decimate data using one of the following methods:\n 'avg','max','min','fast'\nThey're self explainatory. 'fast' just plucks the n'th data point.", "id": "f11408:c0:m6"} {"signature": "def get_data_around(self,timePoints,thisSweep=False,padding=,msDeriv=):", "body": "if not np.array(timePoints).shape:timePoints=[float(timePoints)]data=Nonefor timePoint in timePoints:if thisSweep:sweep=self.currentSweepelse:sweep=int(timePoint/self.sweepInterval)timePoint=timePoint-sweep*self.sweepIntervalself.setSweep(sweep)if msDeriv:dx=int(msDeriv*self.rate/) newData=(self.dataY[dx:]-self.dataY[:-dx])*self.rate//dxelse:newData=self.dataYpadPoints=int(padding*self.rate)pad=np.empty(padPoints)*np.nanIc=timePoint*self.rate newData=np.concatenate((pad,pad,newData,pad,pad))Ic+=padPoints*newData=newData[Ic-padPoints:Ic+padPoints]newData=newData[:int(padPoints*)] if data is None:data=[newData]else:data=np.vstack((data,newData))return data", "docstring": "return self.dataY around a time point. All units are seconds.\nif thisSweep==False, the time point is considered to be experiment time\n and an appropriate sweep may be selected. i.e., with 10 second\n sweeps and timePint=35, will select the 5s mark of the third sweep", "id": "f11408:c0:m7"} {"signature": "def generate_protocol(self,sweep=None):", "body": "if sweep is None:sweep = self.currentSweepif sweep is None:sweep = if not self.channel in self.header[''].keys():self.protoX=[,self.sweepSize]self.protoY=[self.holding,self.holding]self.protoSeqX=self.protoXself.protoSeqY=self.protoYreturnproto=self.header[''][self.channel]self.protoX=[] self.protoY=[] self.protoX.append()self.protoY.append(self.holding)for step in proto:dX = proto[step]['']Y = proto[step]['']+proto[step]['']*sweepself.protoX.append(self.protoX[-])self.protoY.append(Y) self.protoX.append(self.protoX[-]+dX) self.protoY.append(Y) if self.header[''][]['']: finalVal=self.protoY[-] else:finalVal=self.holding self.protoX.append(self.protoX[-])self.protoY.append(finalVal)self.protoX.append(self.sweepSize)self.protoY.append(finalVal)for i in range(,len(self.protoX)-): self.protoX[i]=self.protoX[i]+self.offsetXself.protoSeqY=[self.protoY[]]self.protoSeqX=[self.protoX[]]for i in range(,len(self.protoY)):if not self.protoY[i]==self.protoY[i-]:self.protoSeqY.append(self.protoY[i])self.protoSeqX.append(self.protoX[i])if self.protoY[]!=self.protoY[]:self.protoY.insert(,self.protoY[])self.protoX.insert(,self.protoX[])self.protoY.insert(,self.protoY[])self.protoX.insert(,self.protoX[]+self.offsetX/)self.protoSeqY.append(finalVal)self.protoSeqX.append(self.sweepSize)self.protoX=np.array(self.protoX)self.protoY=np.array(self.protoY)", "docstring": "Create (x,y) points necessary to graph protocol for the current sweep.", "id": "f11408:c0:m8"} {"signature": "def clampValues(self,timePoint=):", "body": "Cs=np.zeros(self.sweeps)for i in range(self.sweeps):self.setSweep(i) for j in range(len(self.protoSeqX)):if self.protoSeqX[j]<=timePoint*self.rate:Cs[i]=self.protoSeqY[j]return Cs", "docstring": "return an array of command values at a time point (in sec).\nUseful for things like generating I/V curves.", "id": "f11408:c0:m9"} {"signature": "def guess_protocol(self):", "body": "clamp=\"\"if self.units==\"\":clamp=\"\"command=\"\"if self.sweeps>:self.setSweep()P0=str(self.protoX)+str(self.protoY)self.setSweep()P1=str(self.protoX)+str(self.protoY)if not P0==P1:command=\"\"tags=\"\"if len(self.commentSweeps):tags=\"\"ch=\"\"if self.nADC>:ch=\"\"guess=\"\".join([clamp,command,tags,ch])return guess", "docstring": "This just generates a string to define the nature of the ABF.\nThe ultimate goal is to use info about the abf to guess what to do with it.\n [vc/ic]-[steps/fixed]-[notag/drugs]-[2ch/1ch]\n This represents 2^4 (18) combinations, but is easily expanded.", "id": "f11408:c0:m10"} {"signature": "def average_sweep(self,T1=,T2=None,sweeps=None,stdErr=False):", "body": "T1=T1*self.rateif T2 is None:T2 = self.sweepSize-else:T2 = T2*self.rateif sweeps is None:sweeps = range(self.sweeps)Ys=np.empty((len(sweeps),(T2-T1)))for i in range(len(sweeps)):self.setSweep(sweeps[i])Ys[i]=self.dataY[T1:T2]Av = np.average(Ys,)Es = np.std(Ys,)Xs = self.dataX[T1:T2]if stdErr: Es = Es/np.sqrt(len(sweeps))return Xs,Av,Es", "docstring": "given an array of sweeps, return X,Y,Err average.\nThis returns *SWEEPS* of data, not just 1 data point.", "id": "f11408:c0:m11"} {"signature": "def average_data(self,ranges=[[None,None]],percentile=None):", "body": "ranges=copy.deepcopy(ranges) for i in range(len(ranges)):if ranges[i][] is None:ranges[i][] = else:ranges[i][] = int(ranges[i][]*self.rate)if ranges[i][] is None:ranges[i][] = -else:ranges[i][] = int(ranges[i][]*self.rate)datas=np.empty((self.sweeps,len(ranges),)) for iSweep in range(self.sweeps):self.setSweep(iSweep)for iRange in range(len(ranges)):I1=ranges[iRange][]I2=ranges[iRange][]if percentile:datas[iSweep][iRange][]=np.percentile(self.dataY[I1:I2],percentile)else:datas[iSweep][iRange][]=np.average(self.dataY[I1:I2])datas[iSweep][iRange][]=np.std(self.dataY[I1:I2])return datas", "docstring": "given a list of ranges, return single point averages for every sweep.\nUnits are in seconds. Expects something like:\n ranges=[[1,2],[4,5],[7,7.5]]\nNone values will be replaced with maximum/minimum bounds.\nFor baseline subtraction, make a range baseline then sub it youtself.\n returns datas[iSweep][iRange][AVorSD]\nif a percentile is given, return that percentile rather than average.\n percentile=50 is the median, but requires sorting, and is slower.", "id": "f11408:c0:m12"} {"signature": "def filter_gaussian(self,sigmaMs=,applyFiltered=False,applyBaseline=False):", "body": "if sigmaMs==:return self.dataYfiltered=cm.filter_gaussian(self.dataY,sigmaMs)if applyBaseline:self.dataY=self.dataY-filteredelif applyFiltered:self.dataY=filteredelse:return filtered", "docstring": "RETURNS filtered trace. Desn't filter it in place.", "id": "f11408:c0:m13"} {"signature": "def saveThing(self,thing,fname,overwrite=True,ext=\"\"):", "body": "if not os.path.exists(os.path.dirname(self.outpre)):os.mkdir(os.path.dirname(self.outpre))if ext and not ext in fname:fname+=extfname=self.outpre+fnameif overwrite is False:if os.path.exists(fname):print(\"\"%os.path.basename(fname))returntime1=cm.timethis()pickle.dump(thing, open(fname,\"\"),pickle.HIGHEST_PROTOCOL)print(\"\"%(os.path.basename(fname),str(type(thing)),sys.getsizeof(pickle.dumps(thing, -))/,cm.timethis(time1)))", "docstring": "save any object as /swhlab4/ID_[fname].pkl", "id": "f11408:c0:m14"} {"signature": "def loadThing(self,fname,ext=\"\"):", "body": "if ext and not ext in fname:fname+=extfname=self.outpre+fnametime1=cm.timethis()thing = pickle.load(open(fname,\"\"))print(\"\"%(os.path.basename(fname),sys.getsizeof(pickle.dumps(thing, -))/,cm.timethis(time1)))return thing", "docstring": "save any object from /swhlab4/ID_[fname].pkl", "id": "f11408:c0:m15"} {"signature": "def deleteStuff(self,ext=\"\",spareInfo=True,spare=[\"\"]):", "body": "print(\"\"+ext)for fname in sorted(glob.glob(self.outpre+ext)):reallyDelete=Truefor item in spare:if item in fname:reallyDelete=Falseif reallyDelete:os.remove(fname)", "docstring": "delete /swhlab4/ID_*", "id": "f11408:c0:m16"} {"signature": "def plot_standard4(abf=exampleABF):", "body": "if abf.sweeps<:returnswhlab.plot.new(abf)Xs=np.arange(abf.sweeps)*abf.sweepInterval/subplots=[,,,]features=['','','','']units=['','','','']for subplot,feature,unit in zip(subplots,features,units):pylab.subplot(subplot)pylab.grid(alpha=)pylab.plot(Xs,cm.dictVals(abf.MTs,feature),'',alpha=)pylab.xlabel(None)pylab.ylabel(\"\"%(feature,unit))swhlab.plot.comments(abf,True)pylab.margins(,)", "docstring": "make a standard memtest plot showing Ih, Ra, etc. with time.", "id": "f11409:m3"} {"signature": "def checkSweepIC(abf=exampleABF,sweep=):", "body": "_keys = abf.MTs.dtype.namesfor key in _keys:globals()[key]=abf.MTs[key] fitted=cm.algo_exp(np.arange(TCB-TCA),m,t,b)swhlab.plot.new(abf,forceNewFigure=True)Xs,Ys,Er=abf.average_sweep()for subplot in [,]:pylab.subplot(subplot)pylab.axhline(,color='',lw=,alpha=,ls=\"\")pylab.axhline(M2,color='',lw=,alpha=,ls=\"\")swhlab.plot.sweep(abf,'',rainbow=False,color='',alpha=)pylab.plot(Xs,Ys,color='',alpha=)pylab.plot(Xs[T1A:T1B],Ys[T1A:T1B],color='',lw=)pylab.plot(Xs[T2A:T2B],Ys[T2A:T2B],color='',lw=)pylab.plot(abf.dataX[TCA:TCB],fitted,color='',lw=,ls='')pylab.axis([(TCA-)/abf.rate,(TCB+)/abf.rate,None,None])pylab.tight_layout()msg=\"\"%(tc/abf.rate*)msg+=\"\"%(Rm)msg+=\"\"%(Cm)pylab.annotate(msg,(,),ha='',va='',weight='',family='',xycoords='',size=,color='')swhlab.plot.annotate(abf)return", "docstring": "Produce an eyeball-ready indication how the MT was calculated in IC.", "id": "f11409:m4"} {"signature": "def checkSweep(abf=exampleABF,sweep=):", "body": "if abf.units==\"\":return checkSweepIC(abf,sweep)if abf.MTs[sweep] is None:return False _keys = abf.MTs[sweep].dtype.namesfor key in _keys:globals()[key]=abf.MTs[sweep][key] _msg2=\"\"%abf.sweeps_msg=\"\"for i in range(len(_keys)):_msg+=\"\"%(_keys[i],abf.MTs[sweep][i])if _keys[i] in ['','','','','','']:_msg2+=\"\"%(_keys[i],abf.MTs[sweep][i])fitted=cm.algo_exp(np.arange(TCB-TCA),fitM,fitT,fitB)pylab.figure(figsize=(,))for subplot in [,]:pylab.subplot(subplot)pylab.plot(abf.dataX[:TCA],abf.dataY[:TCA],alpha=,color='',lw=)pylab.plot(abf.dataX[TCB:],abf.dataY[TCB:],alpha=,color='',lw=)pylab.plot(abf.dataX[TCA:TCB],abf.dataY[TCA:TCB],'',alpha=,lw=,mfc='',mec='')pylab.plot(abf.dataX[T1A:T1B],abf.dataY[T1A:T1B],alpha=,color='')pylab.plot(abf.dataX[T2A:T2B],abf.dataY[T2A:T2B],alpha=,color='')pylab.plot(abf.dataX[TCA:TCB],fitted,color='',lw=,ls=\"\")for i in [TA, TB]:pylab.axvline(i/abf.rate,color='',ls='',alpha=)for i in [P1,P2]:pylab.axhline(i,color='',ls=\"\",alpha=)for i in [PCA,PCB,PP]:pylab.axhline(i,color='',ls=\"\",alpha=)pylab.tight_layout()pylab.subplots_adjust(right=)pylab.annotate(_msg,(,),ha='',va='',alpha=,xycoords='',family='',size=)pylab.annotate(_msg2,(,),ha='',va='',weight='',family='',xycoords='',size=,color='')pylab.subplot()pylab.axis([None,abf.dataX[T2B]+,None,None])pylab.subplot()pylab.axis([(TB-)/abf.rate,(TCB+)/abf.rate,P1-,PP+])swhlab.plot.annotate(abf)for key in _keys:del key return", "docstring": "Produce an eyeball-ready indication how the MT was calculated in VC.", "id": "f11409:m5"} {"signature": "def convert(fname,saveAs=True,showToo=False):", "body": "im=ndimage.imread(fname) im=np.array(im,dtype=float) cutoffLow=np.percentile(im,)cutoffHigh=np.percentile(im,)im[np.where(imim[np.where(im>cutoffHigh)]=cutoffHighim-=np.min(im) im/=np.max(im) im*= im = Image.fromarray(im)msg=\"\"%os.path.basename(fname)msg+=\"\"%cm.epochToString(os.path.getmtime(fname))d = ImageDraw.Draw(im)fnt = ImageFont.truetype(\"\", )d.text((,),msg,font=fnt,fill=)d.text((,),msg,font=fnt,fill=)if showToo:im.show()if saveAs is False:returnif saveAs is True:saveAs=fname+\"\"im.convert('').save(saveAs)return saveAs", "docstring": "Convert weird TIF files into web-friendly versions.\nAuto contrast is applied (saturating lower and upper 0.1%).\n make saveAs True to save as .TIF.png\n make saveAs False and it won't save at all\n make saveAs \"someFile.jpg\" to save it as a different path/format", "id": "f11411:m0"} {"signature": "def genPNGs(folder,files=None):", "body": "if files is None:files=glob.glob(folder+\"\")new=[]for fname in files:ext=os.path.basename(fname).split(\"\")[-].lower()if ext in ['','']:if not os.path.exists(fname+\"\"):print(\"\"%os.path.basename(fname))cm.image_convert(fname)new.append(fname) else:passreturn new", "docstring": "Convert each TIF to PNG. Return filenames of new PNGs.", "id": "f11412:m0"} {"signature": "def htmlABFcontent(ID,group,d):", "body": "html=\"\"files=[]for abfID in group:files.extend(d[abfID])files=sorted(files)html+=\"\"for fname in files:if \"\" in fname.lower() and not \"\" in fname:fname=\"\"+os.path.basename(fname)html+=''%(fname,fname)html+=\"\"lastID=''for fname in sorted(files):if not \"\" in fname:continueID=os.path.basename(fname).split(\"\")[]if not ID==lastID:lastID=IDhtml+=\"\"%os.path.basename(fname).split(\"\")[]if \"\" in fname.lower():fname=os.path.basename(fname)html+=''%(fname,fname)continuehtml+=\"\"for fname in files:if not \"\" in fname:continueif \"\" in fname:callit=os.path.basename(fname)thing=cm.getPkl(fname)if \"\" in fname:callit+=\"\"thing=cm.dictFlat(thing)if len(thing):thing=thing[]elif \"\" in fname:if type(thing) == dict:callit+=\"\"else:callit+=\"\"thing=thing[]elif \"\" in fname:continue elif \"\" in fname or \"\" in fname:pass else:print(\"\"%os.path.basename(fname))continueif type(thing) is dict:thing=cm.msgDict(thing)if type(thing) is list:out=''for item in thing:out+=str(item)+\"\"thing=outthing=str(thing) thing=\"\"%os.path.basename(fname)+thinghtml+=\"\"%(os.path.basename(fname),thing)return html", "docstring": "generate text to go inside for single ABF page.", "id": "f11412:m1"} {"signature": "def htmlABF(ID,group,d,folder,overwrite=False):", "body": "fname=folder+\"\"%IDif overwrite is False and os.path.exists(fname):returnhtml=TEMPLATES['']html=html.replace(\"\",ID)html=html.replace(\"\",htmlABFcontent(ID,group,d))print(\"\"%os.path.basename(fname))with open(fname,'') as f:f.write(html)return", "docstring": "given an ID and the dict of files, generate a static html for that abf.", "id": "f11412:m2"} {"signature": "def expMenu(groups,folder):", "body": "orphans = sorted(list(groups.keys()))menu=[]if os.path.exists(folder+''):with open(folder+'') as f:raw=f.read()else:raw=\"\"for line in raw.split(\"\"):item={}if len(line)==:continueif line.startswith(\"\"):line=line[:].split(\"\",)item[\"\"]=line[]item[\"\"]=''if len(line)>:item[\"\"]=line[]else:item[\"\"]=\"\"if len(line)> and len(line[]):item[\"\"]=line[]if item[\"\"][]==\"\":item[\"\"]=''else:item[\"\"]=''if item[\"\"] in orphans:orphans.remove(item[\"\"])elif line.startswith(\"\"):line=line[:].strip().split(\"\",)item[\"\"]=line[]item[\"\"]=''if len(line)>:if line[].startswith(\"\"):line[]=line[][:]item[\"\"]=line[]else:item[\"\"]=linemenu.append(item)menu.append({\"\":\"\",\"\":\"\"})for ophan in orphans:menu.append({\"\":ophan,\"\":ophan,\"\":'',\"\":'',\"\":''})return menu", "docstring": "read experiment.txt and return a dict with [firstOfNewExp, color, star, comments].", "id": "f11412:m4"} {"signature": "def genIndex(folder,forceIDs=[]):", "body": "if not os.path.exists(folder+\"\"):print(\"\")returntimestart=cm.timethis()files=glob.glob(folder+\"\") files.extend(glob.glob(folder+\"\"))print(\"\"%(cm.timethis(timestart)*))files.extend(genPNGs(folder,files))files=sorted(files)timestart=cm.timethis()d=cm.getIDfileDict(files) print(\"\",len(d))print(\"\"%(cm.timethis(timestart)*))groups=cm.getABFgroups(files)print(\"\",len(groups))for ID in sorted(list(groups.keys())):overwrite=Falsefor abfID in groups[ID]:if abfID in forceIDs:overwrite=Truetry:htmlABF(ID,groups[ID],d,folder,overwrite)except:print(\"\")menu=expMenu(groups,folder)makeSplash(menu,folder)makeMenu(menu,folder)htmlFrames(d,folder)makeMenu(menu,folder)makeSplash(menu,folder)", "docstring": "expects a folder of ABFs.", "id": "f11412:m8"} {"signature": "def proto_00_01_gf(abf=exampleABF):", "body": "standard_inspect(abf)", "docstring": "gap free recording", "id": "f11413:m3"} {"signature": "def proto_00_02_egf(abf=exampleABF):", "body": "standard_inspect(abf)", "docstring": "episodic with no epochs (virtually gap free)", "id": "f11413:m4"} {"signature": "def proto_01_01_HP010(abf=exampleABF):", "body": "swhlab.memtest.memtest(abf) swhlab.memtest.checkSweep(abf) swhlab.plot.save(abf,tag=\"\")", "docstring": "hyperpolarization step. Use to calculate tau and stuff.", "id": "f11413:m5"} {"signature": "def proto_01_11_rampStep(abf=exampleABF):", "body": "standard_inspect(abf)swhlab.ap.detect(abf)swhlab.ap.check_sweep(abf) swhlab.plot.save(abf,tag=\"\")swhlab.ap.check_AP_raw(abf) swhlab.plot.save(abf,tag=\"\",resize=False)swhlab.ap.check_AP_deriv(abf) swhlab.plot.save(abf,tag=\"\")swhlab.ap.check_AP_phase(abf) swhlab.plot.save(abf,tag=\"\")for feature in ['','']:swhlab.ap.plot_values(abf,feature,continuous=True) swhlab.plot.save(abf,tag=feature)", "docstring": "each sweep is a ramp (of set size) which builds on the last sweep.\n Used for detection of AP properties from first few APs.", "id": "f11413:m6"} {"signature": "def proto_01_12_steps025(abf=exampleABF):", "body": "swhlab.ap.detect(abf)standard_groupingForInj(abf,)for feature in ['','']:swhlab.ap.plot_values(abf,feature,continuous=False) swhlab.plot.save(abf,tag=''+feature)swhlab.plot.gain(abf) swhlab.plot.save(abf,tag='')", "docstring": "IC steps. Use to determine gain function.", "id": "f11413:m7"} {"signature": "def proto_01_13_steps025dual(abf=exampleABF):", "body": "swhlab.ap.detect(abf)standard_groupingForInj(abf,)for feature in ['','']:swhlab.ap.plot_values(abf,feature,continuous=False) swhlab.plot.save(abf,tag=''+feature)f1=swhlab.ap.getAvgBySweep(abf,'',None,)f2=swhlab.ap.getAvgBySweep(abf,'',,None)f1=np.nan_to_num(f1)f2=np.nan_to_num(f2)Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[]+)])swhlab.plot.new(abf,title=\"\",xlabel=\"\",ylabel=\"\")pylab.plot(Xs,f1,'',ms=,alpha=,label=\"\",color='')pylab.plot(Xs,f2,'',ms=,alpha=,label=\"\",color='')pylab.legend(loc='')pylab.axis([Xs[],Xs[-],None,None])swhlab.plot.save(abf,tag='')", "docstring": "IC steps. See how hyperpol. step affects things.", "id": "f11413:m9"} {"signature": "def proto_02_01_MT70(abf=exampleABF):", "body": "standard_overlayWithAverage(abf)swhlab.memtest.memtest(abf)swhlab.memtest.checkSweep(abf)swhlab.plot.save(abf,tag='',resize=False)", "docstring": "repeated membrane tests.", "id": "f11413:m10"} {"signature": "def proto_02_02_IVdual(abf=exampleABF):", "body": "av1,sd1=swhlab.plot.IV(abf,,,True,'')swhlab.plot.save(abf,tag='')a2v,sd2=swhlab.plot.IV(abf,,,True,'')swhlab.plot.save(abf,tag='')swhlab.plot.sweep(abf,'')pylab.axis([None,None,min(av1)-,max(av1)+])swhlab.plot.save(abf,tag='')", "docstring": "dual I/V steps in VC mode, one from -70 and one -50.", "id": "f11413:m11"} {"signature": "def proto_02_03_IVfast(abf=exampleABF):", "body": "av1,sd1=swhlab.plot.IV(abf,,,True)swhlab.plot.save(abf,tag='')Xs=abf.clampValues() abf.saveThing([Xs,av1],'')", "docstring": "fast sweeps, 1 step per sweep, for clean IV without fast currents.", "id": "f11413:m12"} {"signature": "def proto_03_01_0s2(abf=exampleABF):", "body": "standard_inspect(abf)", "docstring": "repeated membrane tests, likely with drug added. Maybe IPSCs.", "id": "f11413:m13"} {"signature": "def proto_04_01_MTmon70s2(abf=exampleABF):", "body": "standard_inspect(abf)swhlab.memtest.memtest(abf)swhlab.memtest.checkSweep(abf)swhlab.plot.save(abf,tag='',resize=False)swhlab.memtest.plot_standard4(abf)swhlab.plot.save(abf,tag='')", "docstring": "repeated membrane tests, likely with drug added. Maybe IPSCs.", "id": "f11413:m14"} {"signature": "def proto_VC_50_MT_IV(abf=exampleABF):", "body": "swhlab.memtest.memtest(abf) swhlab.memtest.checkSweep(abf) swhlab.plot.save(abf,tag='',resize=False)av1,sd1=swhlab.plot.IV(abf,,,True,'')swhlab.plot.save(abf,tag='')Xs=abf.clampValues() abf.saveThing([Xs,av1],'')", "docstring": "combination of membrane test and IV steps.", "id": "f11413:m15"} {"signature": "def proto_IC_ramp_gain(abf=exampleABF):", "body": "standard_inspect(abf)swhlab.ap.detect(abf)swhlab.ap.check_AP_raw(abf) swhlab.plot.save(abf,tag=\"\",resize=False)swhlab.ap.check_AP_deriv(abf) swhlab.plot.save(abf,tag=\"\")swhlab.ap.check_AP_phase(abf) swhlab.plot.save(abf,tag=\"\")swhlab.ap.plot_values(abf,'',continuous=True) pylab.subplot()pylab.axhline(,color='',lw=,ls=\"\",alpha=)swhlab.plot.save(abf,tag='')swhlab.ap.plot_values(abf,'',continuous=True) pylab.subplot()pylab.axhline(-,color='',lw=,ls=\"\",alpha=)swhlab.plot.save(abf,tag='')", "docstring": "increasing ramps in (?) pA steps.", "id": "f11413:m16"} {"signature": "def proto_SHIV4(abf=exampleABF):", "body": "standard_inspect(abf)swhlab.ap.detect(abf)standard_groupingForInj(abf,)swhlab.ap.check_AP_raw(abf) swhlab.plot.save(abf,tag=\"\",resize=False)swhlab.ap.check_AP_deriv(abf) swhlab.plot.save(abf,tag=\"\")swhlab.ap.check_AP_phase(abf) swhlab.plot.save(abf,tag=\"\")swhlab.ap.plot_values(abf,'',continuous=True) pylab.subplot()pylab.axhline(,color='',lw=,ls=\"\",alpha=)swhlab.plot.save(abf,tag='')swhlab.ap.plot_values(abf,'',continuous=False) pylab.subplot()pylab.axhline(,color='',lw=,ls=\"\",alpha=)swhlab.plot.save(abf,tag='')swhlab.plot.gain(abf) swhlab.plot.save(abf,tag='')", "docstring": "increasing ramps in (?) pA steps.", "id": "f11413:m17"} {"signature": "def proto_sputter(abf=exampleABF):", "body": "standard_inspect(abf)swhlab.ap.detect(abf)swhlab.ap.check_AP_raw(abf) swhlab.plot.save(abf,tag=\"\",resize=False)swhlab.ap.check_AP_deriv(abf) swhlab.plot.save(abf,tag=\"\")swhlab.ap.check_AP_phase(abf) swhlab.plot.save(abf,tag=\"\")swhlab.ap.plot_values(abf,'',continuous=True) pylab.subplot()pylab.axhline(-,color='',lw=,ls=\"\",alpha=)pylab.axhline(-,color='',lw=,ls=\"\",alpha=)swhlab.plot.save(abf,tag='')", "docstring": "increasing ramps in (?) pA steps.", "id": "f11413:m18"} {"signature": "def indexImages(folder,fname=\"\"):", "body": "png']:sename(item).basename(item)fname))", "docstring": "OBSOLETE WAY TO INDEX A FOLDER.", "id": "f11413:m22"} {"signature": "def waitTillCopied(fname):", "body": "lastSize=while True:thisSize=os.path.getsize(fname)print(\"\",thisSize)if lastSize==thisSize:print(\"\")returnelse:lastSize=thisSizetime.sleep()", "docstring": "sometimes a huge file takes several seconds to copy over.\nThis will hang until the file is copied (file size is stable).", "id": "f11414:m0"} {"signature": "def handleNewABF(fname):", "body": "waitTillCopied(fname)standard.autoABF(fname)", "docstring": "we see a brand new ABF. now what?", "id": "f11414:m1"} {"signature": "def lazygo(watchFolder='',reAnalyze=False,rebuildSite=False,keepGoing=True,matching=False):", "body": "abfsKnown=[]while True:print()pagesNeeded=[]for fname in glob.glob(watchFolder+\"\"):ID=os.path.basename(fname).replace(\"\",\"\")if not fname in abfsKnown:if os.path.exists(fname.replace(\"\",\"\")): continueif matching and not matching in fname:continueabfsKnown.append(fname)if os.path.exists(os.path.dirname(fname)+\"\"+os.path.basename(fname).replace(\"\",\"\")) and reAnalyze==False:print(\"\",os.path.basename(fname))if rebuildSite:pagesNeeded.append(ID)else:handleNewABF(fname)pagesNeeded.append(ID)if len(pagesNeeded):print(\"\")indexing.genIndex(os.path.dirname(fname),forceIDs=pagesNeeded)if not keepGoing:returnfor i in range():print('',end='')time.sleep()", "docstring": "continuously monitor a folder for new abfs and try to analyze them.\nThis is intended to watch only one folder, but can run multiple copies.", "id": "f11414:m2"} {"signature": "def updateVersion(fname):", "body": "fname=os.path.abspath(fname)if not os.path.exists(fname):print(\"\",fname)returnwith open(fname) as f:raw=f.read().split(\"\")for i,line in enumerate(raw):if line.startswith(\"\"):version=int(line.split(\"\")[])raw[i]=\"\"%(version+)with open(fname,'') as f:f.write(\"\".join(raw))print(\"\"%(version,version+))sys.path.insert(,os.path.dirname(fname))import versionprint(\"\",version.__version__)with open('','') as f:f.write(str(version.__version__))", "docstring": "given a filename to a file containing a __counter__ variable,\nopen it, read the count, add one, rewrite the file.\n\nThis:\n __counter__=123\nBecomes:\n __counter__=124", "id": "f11416:m0"} {"signature": "def newVersion():", "body": "version=Nonefname=''with open(fname) as f:raw=f.read().split(\"\")for i,line in enumerate(raw):if line.startswith(\"\"):if version is None:version = int(line.split(\"\")[])raw[i]=\"\"%(version+)with open(fname,'') as f:f.write(\"\".join(raw))print(\"\"%(version,version+))", "docstring": "increments version counter in swhlab/version.py", "id": "f11417:m0"} {"signature": "def dictFromXml(xmlString):", "body": "dom = minidom.parseString(xmlString)return nodeToDic(dom.childNodes[])", "docstring": "Returns the a dictionary from the XML string.", "id": "f11418:m0"} {"signature": "def getTextFromNode(node):", "body": "t = \"\"for n in node.childNodes:if n.nodeType == n.TEXT_NODE:t += n.nodeValueelse:raise NotTextNodeErrorreturn t", "docstring": "Scans through all children of node and gathers the\ntext. If node has non-text child-nodes then\nNotTextNodeError is raised.", "id": "f11418:m1"} {"signature": "def __init__(self, api_key, password):", "body": "self.api_key = api_keyself.password = password", "docstring": "Initialise the AmbientSMS class\n\nExpects:\n - api_key - your AmbientSMS Central username\n - password - your AmbientSMS Central password", "id": "f11418:c2:m0"} {"signature": "def getbalance(self, url=''):", "body": "postXMLList = []postXMLList.append(\"\" % self.api_key)postXMLList.append(\"\" % self.password)postXML = '' % \"\".join(postXMLList)result = self.curl(url, postXML)if result.get(\"\", None):return result[\"\"]else:raise AmbientSMSError(result[\"\"])", "docstring": "Get the number of credits remaining at AmbientSMS", "id": "f11418:c2:m1"} {"signature": "def sendmsg(self,message,recipient_mobiles=[],url='',concatenate_message=True,message_id=str(time()).replace(\"\", \"\"),reply_path=None,allow_duplicates=True,allow_invalid_numbers=True,):", "body": "if not recipient_mobiles or not(isinstance(recipient_mobiles, list)or isinstance(recipient_mobiles, tuple)):raise AmbientSMSError(\"\")if not message or not len(message):raise AmbientSMSError(\"\")postXMLList = []postXMLList.append(\"\" % self.api_key)postXMLList.append(\"\" % self.password)postXMLList.append(\"\" %\"\".join([\"\" %m for m in recipient_mobiles]))postXMLList.append(\"\" % message)postXMLList.append(\"\" %( if concatenate_message else ))postXMLList.append(\"\" % message_id)postXMLList.append(\"\" %( if allow_duplicates else ))postXMLList.append(\"\" %( if allow_invalid_numbers else ))if reply_path:postXMLList.append(\"\" % reply_path)postXML = '' % \"\".join(postXMLList)result = self.curl(url, postXML)status = result.get(\"\", None)if status and int(status) in [, , ]:return resultelse:raise AmbientSMSError(int(status))", "docstring": "Send a mesage via the AmbientSMS API server", "id": "f11418:c2:m2"} {"signature": "def curl(self, url, post):", "body": "try:req = urllib.request.Request(url)req.add_header(\"\", \"\")data = urllib.request.urlopen(req, post.encode('')).read()except urllib.error.URLError as v:raise AmbientSMSError(v)return dictFromXml(data)", "docstring": "Inteface for sending web requests to the AmbientSMS API Server", "id": "f11418:c2:m3"} {"signature": "def objectlist_flat(self, lt, replace):", "body": "d = {}for k, v in lt:if k in d.keys() and not replace:if type(d[k]) is list:d[k].append(v)else:d[k] = [d[k], v]else:if isinstance(v, dict):dd = d.setdefault(k, {})for kk, vv in iteritems(v):if type(dd) == list:dd.append({kk: vv})elif kk in dd.keys():if hasattr(vv, ''):for k2, v2 in iteritems(vv):dd[kk][k2] = v2else:d[k] = [dd, {kk: vv}]else:dd[kk] = vvelse:d[k] = vreturn d", "docstring": "Similar to the dict constructor, but handles dups\n\nHCL is unclear on what one should do when duplicate keys are\nencountered. These comments aren't clear either:\n\nfrom decoder.go: if we're at the root or we're directly within\n a list, decode into dicts, otherwise lists\n\nfrom object.go: there's a flattened list structure", "id": "f11423:c0:m0"} {"signature": "def p_top(self, p):", "body": "if DEBUG:self.print_p(p)p[] = self.objectlist_flat(p[], True)", "docstring": "top : objectlist", "id": "f11423:c0:m1"} {"signature": "def p_objectlist_0(self, p):", "body": "if DEBUG:self.print_p(p)p[] = [p[]]", "docstring": "objectlist : objectitem", "id": "f11423:c0:m2"} {"signature": "def p_objectlist_1(self, p):", "body": "if DEBUG:self.print_p(p)p[] = p[] + [p[]]", "docstring": "objectlist : objectlist objectitem", "id": "f11423:c0:m3"} {"signature": "def p_objectlist_2(self, p):", "body": "if DEBUG:self.print_p(p)p[] = p[] + [p[]]", "docstring": "objectlist : objectlist COMMA objectitem", "id": "f11423:c0:m4"} {"signature": "def p_object_0(self, p):", "body": "if DEBUG:self.print_p(p)p[] = self.objectlist_flat(p[], False)", "docstring": "object : LEFTBRACE objectlist RIGHTBRACE", "id": "f11423:c0:m5"} {"signature": "def p_object_1(self, p):", "body": "if DEBUG:self.print_p(p)p[] = self.objectlist_flat(p[], False)", "docstring": "object : LEFTBRACE objectlist COMMA RIGHTBRACE", "id": "f11423:c0:m6"} {"signature": "def p_object_2(self, p):", "body": "if DEBUG:self.print_p(p)p[] = {}", "docstring": "object : LEFTBRACE RIGHTBRACE", "id": "f11423:c0:m7"} {"signature": "def p_objectkey_0(self, p):", "body": "if DEBUG:self.print_p(p)p[] = p[]", "docstring": "objectkey : IDENTIFIER\n | STRING", "id": "f11423:c0:m8"} {"signature": "def p_objectitem_0(self, p):", "body": "if DEBUG:self.print_p(p)p[] = (p[], p[])", "docstring": "objectitem : objectkey EQUAL number\n | objectkey EQUAL BOOL\n | objectkey EQUAL STRING\n | objectkey EQUAL object\n | objectkey EQUAL list", "id": "f11423:c0:m9"} {"signature": "def p_objectitem_1(self, p):", "body": "if DEBUG:self.print_p(p)p[] = p[]", "docstring": "objectitem : block", "id": "f11423:c0:m10"} {"signature": "def p_block_0(self, p):", "body": "if DEBUG:self.print_p(p)p[] = (p[], p[])", "docstring": "block : blockId object", "id": "f11423:c0:m11"} {"signature": "def p_block_1(self, p):", "body": "if DEBUG:self.print_p(p)p[] = (p[], {p[][]: p[][]})", "docstring": "block : blockId block", "id": "f11423:c0:m12"} {"signature": "def p_blockId(self, p):", "body": "if DEBUG:self.print_p(p)p[] = p[]", "docstring": "blockId : IDENTIFIER\n | STRING", "id": "f11423:c0:m13"} {"signature": "def p_list_0(self, p):", "body": "if DEBUG:self.print_p(p)p[] = p[]", "docstring": "list : LEFTBRACKET listitems RIGHTBRACKET\n | LEFTBRACKET listitems COMMA RIGHTBRACKET", "id": "f11423:c0:m14"} {"signature": "def p_list_1(self, p):", "body": "if DEBUG:self.print_p(p)p[] = []", "docstring": "list : LEFTBRACKET RIGHTBRACKET", "id": "f11423:c0:m15"} {"signature": "def p_listitems_0(self, p):", "body": "if DEBUG:self.print_p(p)p[] = [p[]]", "docstring": "listitems : listitem", "id": "f11423:c0:m16"} {"signature": "def p_listitems_1(self, p):", "body": "if DEBUG:self.print_p(p)p[] = p[] + [p[]]", "docstring": "listitems : listitems COMMA listitem", "id": "f11423:c0:m17"} {"signature": "def p_listitem(self, p):", "body": "if DEBUG:self.print_p(p)p[] = p[]", "docstring": "listitem : number\n | object\n | STRING", "id": "f11423:c0:m18"} {"signature": "def p_number_0(self, p):", "body": "if DEBUG:self.print_p(p)p[] = p[]", "docstring": "number : int", "id": "f11423:c0:m19"} {"signature": "def p_number_1(self, p):", "body": "if DEBUG:self.print_p(p)p[] = float(p[])", "docstring": "number : float", "id": "f11423:c0:m20"} {"signature": "def p_number_2(self, p):", "body": "if DEBUG:self.print_p(p)p[] = float(\"\".format(p[], p[]))", "docstring": "number : int exp", "id": "f11423:c0:m21"} {"signature": "def p_number_3(self, p):", "body": "if DEBUG:self.print_p(p)p[] = float(\"\".format(p[], p[]))", "docstring": "number : float exp", "id": "f11423:c0:m22"} {"signature": "def p_int_0(self, p):", "body": "if DEBUG:self.print_p(p)p[] = -p[]", "docstring": "int : MINUS int", "id": "f11423:c0:m23"} {"signature": "def p_int_1(self, p):", "body": "if DEBUG:self.print_p(p)p[] = p[]", "docstring": "int : NUMBER", "id": "f11423:c0:m24"} {"signature": "def p_float_0(self, p):", "body": "p[] = p[] * -", "docstring": "float : MINUS float", "id": "f11423:c0:m25"} {"signature": "def p_float_1(self, p):", "body": "p[] = p[]", "docstring": "float : FLOAT", "id": "f11423:c0:m26"} {"signature": "def p_exp_0(self, p):", "body": "if DEBUG:self.print_p(p)p[] = \"\".format(p[])", "docstring": "exp : EPLUS NUMBER", "id": "f11423:c0:m27"} {"signature": "def p_exp_1(self, p):", "body": "if DEBUG:self.print_p(p)p[] = \"\".format(p[])", "docstring": "exp : EMINUS NUMBER", "id": "f11423:c0:m28"} {"signature": "def t_BOOL(self, t):", "body": "t.value = t.value == ''return t", "docstring": "r'(true)|(false)", "id": "f11424:c0:m0"} {"signature": "def t_EMINUS(self, t):", "body": "return t", "docstring": "r'(?<=\\d|\\.)[eE]-", "id": "f11424:c0:m1"} {"signature": "def t_EPLUS(self, t):", "body": "return t", "docstring": "r'(?<=\\d|\\.)[eE]\\+?", "id": "f11424:c0:m2"} {"signature": "def t_FLOAT(self, t):", "body": "t.value = float(t.value)return t", "docstring": "r'-?((\\d+\\.\\d*)|(\\d*\\.\\d+))", "id": "f11424:c0:m3"} {"signature": "def t_hexnumber(self, t):", "body": "t.value = int(t.value, base=)t.type = ''return t", "docstring": "r'-?0[xX][0-9a-fA-F]+", "id": "f11424:c0:m4"} {"signature": "def t_intnumber(self, t):", "body": "t.value = int(t.value)t.type = ''return t", "docstring": "r'-?\\d+", "id": "f11424:c0:m5"} {"signature": "def t_PERIOD(self, t):", "body": "return t", "docstring": "r'\\.", "id": "f11424:c0:m6"} {"signature": "def t_COMMA(self, t):", "body": "return t", "docstring": "r',", "id": "f11424:c0:m7"} {"signature": "def t_IDENTIFIER(self, t):", "body": "t.value = text_type(t.value)return t", "docstring": "r'[^\\W\\d][\\w.-]*", "id": "f11424:c0:m8"} {"signature": "def t_string(self, t):", "body": "t.lexer.abs_start = t.lexer.lexpost.lexer.rel_pos = t.lexer.lexpost.lexer.string_value = u''t.lexer.begin('')", "docstring": "r'\\", "id": "f11424:c0:m9"} {"signature": "def t_string_escapedchar(self, t):", "body": "t.lexer.string_value += (t.lexer.lexdata[t.lexer.rel_pos : t.lexer.lexpos - ] + t.value)t.lexer.rel_pos = t.lexer.lexpospass", "docstring": "r'(?<=\\\\)(\\\"|\\\\)", "id": "f11424:c0:m10"} {"signature": "def t_string_stringdollar(self, t):", "body": "t.lexer.braces = t.lexer.begin('')", "docstring": "r'(?<=\\$)\\{", "id": "f11424:c0:m11"} {"signature": "def t_string_ignoring(self, t):", "body": "pass", "docstring": "r'[^\\\"]", "id": "f11424:c0:m12"} {"signature": "def t_string_STRING(self, t):", "body": "t.value = (t.lexer.string_value + t.lexer.lexdata[t.lexer.rel_pos : t.lexer.lexpos - ])t.lexer.lineno += t.lexer.lexdata[t.lexer.abs_start : t.lexer.lexpos - ].count('')t.lexer.begin('')return t", "docstring": "r'\\", "id": "f11424:c0:m13"} {"signature": "def t_stringdollar_dontcare(self, t):", "body": "pass", "docstring": "r'[^\\{\\}]", "id": "f11424:c0:m15"} {"signature": "def t_stringdollar_lbrace(self, t):", "body": "t.lexer.braces += ", "docstring": "r'\\{", "id": "f11424:c0:m16"} {"signature": "def t_stringdollar_rbrace(self, t):", "body": "t.lexer.braces -= if t.lexer.braces == :t.lexer.begin('')", "docstring": "r'\\}", "id": "f11424:c0:m17"} {"signature": "def t_tabbedheredoc(self, t):", "body": "t.lexer.is_tabbed = Trueself._init_heredoc(t)t.lexer.begin('')", "docstring": "r'<<-\\S+\\r?\\n", "id": "f11424:c0:m20"} {"signature": "def t_heredoc(self, t):", "body": "t.lexer.is_tabbed = Falseself._init_heredoc(t)t.lexer.begin('')", "docstring": "r'<<\\S+\\r?\\n", "id": "f11424:c0:m21"} {"signature": "def t_tabbedheredoc_STRING(self, t):", "body": "return self._end_heredoc(t)", "docstring": "r'^\\t*.+?(?=\\r?$)", "id": "f11424:c0:m23"} {"signature": "def t_heredoc_STRING(self, t):", "body": "return self._end_heredoc(t)", "docstring": "r'^.+?(?=\\r?$)", "id": "f11424:c0:m24"} {"signature": "def t_heredoc_ignoring(self, t):", "body": "pass", "docstring": "r'.+|\\n", "id": "f11424:c0:m25"} {"signature": "def t_COMMENT(self, t):", "body": "pass", "docstring": "r'(\\#|(//)).*", "id": "f11424:c0:m27"} {"signature": "def t_MULTICOMMENT(self, t):", "body": "t.lexer.lineno += t.value.count('')pass", "docstring": "r'/\\*(.|\\n)*?(\\*/)", "id": "f11424:c0:m28"} {"signature": "def t_newline(self, t):", "body": "t.lexer.lineno += len(t.value)", "docstring": "r'\\n+", "id": "f11424:c0:m29"} {"signature": "def isHcl(s):", "body": "for c in s:if c.isspace():continueif c == '':return Falseelse:return Trueraise ValueError(\"\")", "docstring": "Detects whether a string is JSON or HCL\n\n:param s: String that may contain HCL or JSON\n\n:returns: True if HCL, False if JSON, raises ValueError\n if neither", "id": "f11426:m0"} {"signature": "def load(fp):", "body": "return loads(fp.read())", "docstring": "Deserializes a file-pointer like object into a python dictionary.\nThe contents of the file must either be JSON or HCL.\n\n:param fp: An object that has a read() function\n\n:returns: Dictionary", "id": "f11426:m1"} {"signature": "def loads(s):", "body": "s = u(s)if isHcl(s):return HclParser().parse(s)else:return json.loads(s)", "docstring": "Deserializes a string and converts it to a dictionary. The contents\nof the string must either be JSON or HCL.\n\n:returns: Dictionary", "id": "f11426:m2"} {"signature": "def dumps(*args, **kwargs):", "body": "return json.dumps(*args, **kwargs)", "docstring": "Turns a dictionary into JSON, passthru to json.dumps", "id": "f11426:m3"} {"signature": "def _pre_install():", "body": "dat = join(setup_dir, '', '', '')if exists(dat):os.unlink(dat)sys.path.insert(, join(setup_dir, ''))import hclfrom hcl.parser import HclParserparser = HclParser()", "docstring": "Initialize the parse table at install time", "id": "f11427:m0"} {"signature": "def all_info_files(self) :", "body": "try :for info_file in list_files_in_dir(self.info_dir):if not os.path.basename(info_file).endswith('') :self.on_non_trashinfo_found()else :yield info_fileexcept OSError: pass", "docstring": "Returns a generator of \"Path\"s", "id": "f11431:c0:m1"} {"signature": "def describe(path):", "body": "if os.path.islink(path):return ''elif os.path.isdir(path):if path == '':return ''elif path == '':return ''else:if os.path.basename(path) == '':return \"\"elif os.path.basename(path) == '':return \"\"else:return ''elif os.path.isfile(path):if os.path.getsize(path) == :return ''else:return ''elif not os.path.exists(path):return ''else:return ''", "docstring": "Return a textual description of the file pointed by this path.\nOptions:\n - \"symbolic link\"\n - \"directory\"\n - \"'.' directory\"\n - \"'..' directory\"\n - \"regular file\"\n - \"regular empty file\"\n - \"non existent\"\n - \"entry\"", "id": "f11438:m2"} {"signature": "def trash(self, file) :", "body": "if self._should_skipped_by_specs(file):self.reporter.unable_to_trash_dot_entries(file)returnvolume_of_file_to_be_trashed = self.volume_of_parent(file)self.reporter.volume_of_file(volume_of_file_to_be_trashed)candidates = self._possible_trash_directories_for(volume_of_file_to_be_trashed)self.try_trash_file_using_candidates(file,volume_of_file_to_be_trashed,candidates)", "docstring": "Trash a file in the appropriate trash directory.\nIf the file belong to the same volume of the trash home directory it\nwill be trashed in the home trash directory.\nOtherwise it will be trashed in one of the relevant volume trash\ndirectories.\n\nEach volume can have two trash directories, they are\n - $volume/.Trash/$uid\n - $volume/.Trash-$uid\n\nFirstly the software attempt to trash the file in the first directory\nthen try to trash in the second trash directory.", "id": "f11438:c0:m4"} {"signature": "def _format_text(self, text) :", "body": "return text", "docstring": "[Does not] format a text, return the text as it is.", "id": "f11438:c3:m0"} {"signature": "def persist_trash_info(self, basename, content, logger):", "body": "self.ensure_dir(self.info_dir, )index = while True :if index == :suffix = \"\"elif index < :suffix = \"\" % indexelse :import randomsuffix = \"\" % random.randint(, )base_id = basenametrash_id = base_id + suffixtrash_info_basename = trash_id+\"\"dest = os.path.join(self.info_dir, trash_info_basename)try :self.atomic_write(dest, content)logger.debug(\"\" % dest)return destexcept OSError:logger.debug(\"\" % dest)index += raise IOError()", "docstring": "Create a .trashinfo file in the $trash/info directory.\nreturns the created TrashInfoFile.", "id": "f11438:c6:m4"} {"signature": "def copytree(src, dst, symlinks=False, ignore=None):", "body": "if not os.path.exists(dst):os.makedirs(dst)shutil.copystat(src, dst)lst = os.listdir(src)if ignore:excl = ignore(src, lst)lst = [x for x in lst if x not in excl]for item in lst:s = os.path.join(src, item)d = os.path.join(dst, item)if symlinks and os.path.islink(s):if os.path.lexists(d):os.remove(d)os.symlink(os.readlink(s), d)try:st = os.lstat(s)mode = stat.S_IMODE(st.st_mode)os.lchmod(d, mode)except:pass elif os.path.isdir(s):copytree(s, d, symlinks, ignore)else:shutil.copy2(s, d)", "docstring": "copytree that works even if folder already exists", "id": "f11486:m1"} {"signature": "def get_source(self, doc):", "body": "start_iter = doc.get_start_iter()end_iter = doc.get_end_iter()source = doc.get_text(start_iter, end_iter, False)return source", "docstring": "Grab contents of 'doc' and return it\n\n:param doc: The active document\n:return:", "id": "f11487:c0:m9"} {"signature": "def _create_view(self, name=\"\"):", "body": "view = gtk.TextView()view.set_editable(False)fontdesc = pango.FontDescription(\"\")view.modify_font(fontdesc)view.set_name(name)buff = view.get_buffer()buff.create_tag('', foreground='')return view", "docstring": "Create the gtk.TextView used for shell output", "id": "f11487:c1:m1"} {"signature": "def copytree(src, dst, symlinks=False, ignore=None):", "body": "if not exists(dst):os.makedirs(dst)shutil.copystat(src, dst)lst = os.listdir(src)if ignore:excl = ignore(src, lst)lst = [x for x in lst if x not in excl]for item in lst:s = join(src, item)d = join(dst, item)if symlinks and islink(s):if lexists(d):os.remove(d)os.symlink(os.readlink(s), d)try:st = os.lstat(s)mode = stat.S_IMODE(st.st_mode)os.lchmod(d, mode)except:pass elif isdir(s):copytree(s, d, symlinks, ignore)else:shutil.copy2(s, d)", "docstring": "copytree that works even if folder already exists", "id": "f11489:m1"} {"signature": "def _create_view(self, name=\"\"):", "body": "text_view = Gtk.TextView()text_view.set_editable(False)fontdesc = Pango.FontDescription(\"\")text_view.modify_font(fontdesc)text_view.set_name(name)buff = text_view.get_buffer()buff.create_tag('', foreground='')container = Gtk.ScrolledWindow()container.add(text_view)container.show_all()return container, text_view", "docstring": "Create the gtk.TextView inside a Gtk.ScrolledWindow\n:return: container, text_view", "id": "f11490:c0:m2"} {"signature": "def get_source(self, doc):", "body": "start_iter = doc.get_start_iter()end_iter = doc.get_end_iter()source = doc.get_text(start_iter, end_iter, False)return source", "docstring": "Grab contents of 'doc' and return it\n\n:param doc: The active document\n:return:", "id": "f11490:c0:m9"} {"signature": "def copytree(src, dst, symlinks=False, ignore=None):", "body": "if not exists(dst):os.makedirs(dst)shutil.copystat(src, dst)lst = os.listdir(src)if ignore:excl = ignore(src, lst)lst = [x for x in lst if x not in excl]for item in lst:s = join(src, item)d = join(dst, item)if symlinks and islink(s):if lexists(d):os.remove(d)os.symlink(os.readlink(s), d)try:st = os.lstat(s)mode = stat.S_IMODE(st.st_mode)os.lchmod(d, mode)except:pass elif isdir(s):copytree(s, d, symlinks, ignore)else:shutil.copy2(s, d)", "docstring": "copytree that works even if folder already exists", "id": "f11491:m1"} {"signature": "def get_source(self, doc):", "body": "start_iter = doc.get_start_iter()end_iter = doc.get_end_iter()source = doc.get_text(start_iter, end_iter, False)return source", "docstring": "Grab contents of 'doc' and return it\n\n:param doc: The active document\n:return:", "id": "f11492:c0:m9"} {"signature": "def _create_view(self, name=\"\"):", "body": "view = Gtk.TextView()view.set_editable(False)fontdesc = Pango.FontDescription(\"\")view.modify_font(fontdesc)view.set_name(name)buff = view.get_buffer()buff.create_tag('', foreground='')return view", "docstring": "Create the gtk.TextView used for shell output", "id": "f11492:c1:m1"} {"signature": "def find_example_dir():", "body": "code_stub = textwrap.dedent(\"\"\"\"\"\")code = code_stub % ''cmd = [\"\", \"\", code]p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)output, errors = p.communicate()if errors:print('')print(''.format(errors.decode('')))return Noneelse:examples_dir = output.decode('').strip()if os.path.isdir(examples_dir):return examples_dircode = code_stub % ''cmd = [\"\", \"\", code]p = subprocess.Popen(cmd, stdout=subprocess.PIPE)output, errors = p.communicate()examples_dir = output.decode('').strip()if os.path.isdir(examples_dir):return examples_dirif examples_dir:print(''.format(examples_dir))else:print('')", "docstring": "Find examples dir .. a little bit ugly..", "id": "f11493:m1"} {"signature": "def make_readable_filename(fn):", "body": "return os.path.splitext(fn)[].replace('', '').capitalize()", "docstring": "Change filenames for display in the menu.", "id": "f11493:m2"} {"signature": "def run(self):", "body": "try:for line in iter(self._fd.readline, False):if line is not None:if self._althandler:if self._althandler(line):continueself._queue.put(line)if not line:time.sleep()except ValueError: if not self._fd.closed:raise", "docstring": "The body of the tread: read lines and put them on the queue.", "id": "f11493:c0:m1"} {"signature": "def eof(self):", "body": "return (not self.is_alive()) and self._queue.empty() or self._fd.closed", "docstring": "Check whether there is no more content to expect.", "id": "f11493:c0:m2"} {"signature": "def live_source_load(self, source):", "body": "source = source.rstrip('')if source != self.source:self.source = sourceb64_source = base64.b64encode(bytes(bytearray(source, \"\")))self.send_command(CMD_LOAD_BASE64, b64_source)", "docstring": "Send new source code to the bot\n\n:param source:\n:param good_cb: callback called if code was good\n:param bad_cb: callback called if code was bad (will get contents of exception)\n:return:", "id": "f11493:c2:m2"} {"signature": "def send_command(self, cmd, *args):", "body": "if True:cookie = str(uuid.uuid4())response = CommandResponse(cmd, cookie, None, info=[])self.responses[cookie] = responseargs = list(args) + [b'' + bytes(cookie, \"\")]if args:bytes_args = []for arg in args:if isinstance(arg, bytes):bytes_args.append(arg)else:bytes_args.append(bytearray(arg, \"\"))data = bytearray(cmd, \"\") + b'' + b''.join(bytes_args) + b''else:data = bytearray(cmd, \"\") + b''self.process.stdin.write(data)self.process.stdin.flush()", "docstring": ":param cmd:\n:param args:\n:return:", "id": "f11493:c2:m4"} {"signature": "def close(self):", "body": "self.process.stdout.close()self.process.stderr.close()self.running = False", "docstring": "Close outputs of process.", "id": "f11493:c2:m5"} {"signature": "def get_output(self):", "body": "if self.process.poll() is not None:self.close()yield None, Nonewhile not (self.stdout_queue.empty() and self.stderr_queue.empty()):if not self.stdout_queue.empty():line = self.stdout_queue.get().decode('')yield line, Noneif not self.stderr_queue.empty():line = self.stderr_queue.get().decode('')yield None, line", "docstring": ":yield: stdout_line, stderr_line, running\n\nGenerator that outputs lines captured from stdout and stderr\n\nThese can be consumed to output on a widget in an IDE", "id": "f11493:c2:m6"} {"signature": "def get_command_responses(self):", "body": "if not self.response_queue.empty():yield Nonewhile not self.response_queue.empty():line = self.response_queue.get()if line is not None:yield line", "docstring": "Get responses to commands sent", "id": "f11493:c2:m7"} {"signature": "def examples_menu(root_dir=None, depth=):", "body": "examples_dir = ide_utils.get_example_dir()if not examples_dir:return \"\", [], []root_dir = root_dir or examples_dirfile_tmpl = ''dir_tmpl = ''file_actions = []submenu_actions = []xml = \"\"for fn in sorted(os.listdir(root_dir)):path = os.path.join(root_dir, fn)rel_path = path[len(examples_dir):]if os.path.isdir(path):action = ''.format(rel_path)label = fn.capitalize()sm_xml, sm_file_actions, sm_menu_actions = examples_menu(os.path.join(root_dir, fn), depth+)submenu_actions.extend(sm_menu_actions)file_actions.extend(sm_file_actions)submenu_actions.append((action, label))xml += dir_tmpl.format(name=fn, action=action, menu=sm_xml)elif os.path.splitext(path)[] in ['', ''] and not fn.startswith(''):action = ''.format(rel_path)label = ide_utils.make_readable_filename(fn)xml += file_tmpl.format(name=fn, action=action)file_actions.append((action, label))return xml, file_actions, submenu_actions", "docstring": ":return: xml for menu, [(bot_action, label), ...], [(menu_action, label), ...]", "id": "f11494:m0"} {"signature": "def mk_examples_menu(text, root_dir=None, depth=):", "body": "examples_dir = ide_utils.get_example_dir()if not examples_dir:return None, []root_dir = root_dir or examples_dir file_actions = []menu = Gio.Menu.new()base_item = Gio.MenuItem.new_submenu(text, menu)for fn in sorted(os.listdir(root_dir)):path = os.path.join(root_dir, fn)rel_path = path[len(examples_dir):]if os.path.isdir(path):label = fn.capitalize()item, sm_file_actions = mk_examples_menu(label, os.path.join(root_dir, fn))menu.append_item(item)file_actions.extend(sm_file_actions)elif os.path.splitext(path)[] in ['', ''] and not fn.startswith(''):label = ide_utils.make_readable_filename(fn)action_name = \"\" % encode_relpath(rel_path)menu.append(label, action_name)file_actions.append(rel_path)return base_item, file_actions", "docstring": ":return: base_item, rel_paths", "id": "f11494:m2"} {"signature": "def gedit3_menu(xml):", "body": "return MENU_UI.format(xml)", "docstring": "Build XML for GEDIT3 Menus.\n\nPass in the xml returned by example_menu", "id": "f11494:m3"} {"signature": "def get_child_by_name(parent, name):", "body": "def iterate_children(widget, name):if widget.get_name() == name:return widgettry:for w in widget.get_children():result = iterate_children(w, name)if result is not None:return resultelse:continueexcept AttributeError:passreturn iterate_children(parent, name)", "docstring": "Iterate through a gtk container, `parent`,\nand return the widget with the name `name`.", "id": "f11494:m4"} {"signature": "def venv_has_script(script):", "body": "def f(venv):path=os.path.join(venv, '', script)if os.path.isfile(path):return Truereturn f", "docstring": ":param script: script to look for in bin folder", "id": "f11494:m5"} {"signature": "def is_venv(directory, executable=''):", "body": "path=os.path.join(directory, '', executable)return os.path.isfile(path)", "docstring": ":param directory: base directory of python environment", "id": "f11494:m6"} {"signature": "def vw_envs(filter=None):", "body": "vw_root=os.path.abspath(os.path.expanduser(os.path.expandvars('')))venvs=[]for directory in os.listdir(vw_root):venv=os.path.join(vw_root, directory)if os.path.isdir(os.path.join(venv)):if filter and not filter(venv):continuevenvs.append(venv)return sorted(venvs)", "docstring": ":return: python environments in ~/.virtualenvs\n\n:param filter: if this returns False the venv will be ignored\n\n>>> vw_envs(filter=venv_has_script('pip'))", "id": "f11494:m7"} {"signature": "def sbot_executable():", "body": "gsettings=load_gsettings()venv = gsettings.get_string('')if venv == '':sbot = which('')elif venv == '':env_venv = os.environ.get('')if not env_venv:return which('')for p in os.environ[''].split(os.path.pathsep):sbot='' % pif not p.startswith(env_venv) and os.path.isfile(sbot):return sbotelse:sbot = os.path.join(venv, '')if not os.path.isfile(sbot):print('')sbot = which('')return os.path.realpath(sbot)", "docstring": "Find shoebot executable", "id": "f11494:m9"} {"signature": "def examples_menu(root_dir=None, depth=):", "body": "examples_dir = ide_utils.get_example_dir()if not examples_dir:return \"\", [], []root_dir = root_dir or examples_dirfile_tmpl = ''dir_tmpl = ''file_actions = []submenu_actions = []xml = \"\"for fn in sorted(os.listdir(root_dir)):path = os.path.join(root_dir, fn)rel_path = path[len(examples_dir):]if os.path.isdir(path):action = ''.format(rel_path)label = fn.capitalize()sm_xml, sm_file_actions, sm_menu_actions = examples_menu(os.path.join(root_dir, fn), depth+)submenu_actions.extend(sm_menu_actions)file_actions.extend(sm_file_actions)submenu_actions.append((action, label))xml += dir_tmpl.format(name=fn, action=action, menu=sm_xml)elif os.path.splitext(path)[] in ['', ''] and not fn.startswith(''):action = ''.format(rel_path)label = ide_utils.make_readable_filename(fn)xml += file_tmpl.format(name=fn, action=action)file_actions.append((action, label))return xml, file_actions, submenu_actions", "docstring": ":return: xml for menu, [(bot_action, label), ...], [(menu_action, label), ...]", "id": "f11495:m0"} {"signature": "def gedit2_menu(xml):", "body": "return MENU_UI.format(xml)", "docstring": "Build XML for GEDIT3 Menus.\n\nPass in the xml returned by example_menu", "id": "f11495:m1"} {"signature": "def get_child_by_name(parent, name):", "body": "def iterate_children(widget, name):if widget.get_name() == name:return widgettry:for w in widget.get_children():result = iterate_children(w, name)if result is not None:return resultelse:continueexcept AttributeError:passreturn iterate_children(parent, name)", "docstring": "Iterate through a gtk container, `parent`,\nand return the widget with the name `name`.", "id": "f11495:m2"} {"signature": "def create_listening_socket(host, port, handler):", "body": "sock = socket.socket()sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, )sock.bind((host, port))sock.listen()GObject.io_add_watch(sock, GObject.IO_IN, handler)return sock", "docstring": "Create socket and set listening options\n:param host:\n:param port:\n:param handler:\n:return:", "id": "f11496:m0"} {"signature": "def __init__(self, bot, host, port):", "body": "create_listening_socket(host, port, self.listener)self.shell = Noneself.bot = bot", "docstring": "Initialize server and start listening.", "id": "f11496:c0:m0"} {"signature": "def listener(self, sock, *args):", "body": "conn, addr = sock.accept()f = conn.makefile(conn)self.shell = ShoebotCmd(self.bot, stdin=f, stdout=f, intro=INTRO)print(_(\"\"))GObject.io_add_watch(conn, GObject.IO_IN, self.handler)if self.shell.intro:self.shell.stdout.write(str(self.shell.intro)+\"\")self.shell.stdout.flush()return True", "docstring": "Asynchronous connection listener. Starts a handler for each connection.", "id": "f11496:c0:m1"} {"signature": "def handler(self, conn, *args):", "body": "self.shell.stdout.write(self.shell.prompt)line = self.shell.stdin.readline()if not len(line):line = ''return Falseelse:line = line.rstrip('')line = self.shell.precmd(line)stop = self.shell.onecmd(line)stop = self.shell.postcmd(stop, line)self.shell.stdout.flush()self.shell.postloop()if stop:self.shell = Noneconn.close()return not stop", "docstring": "Asynchronous connection handler. Processes each line from the socket.", "id": "f11496:c0:m2"} {"signature": "def trusted_cmd(f):", "body": "def run_cmd(self, line):if self.trusted:f(self, line)else:print(\"\" % f.__name__[:])global trusted_cmdstrusted_cmds.add(f.__name__)run_cmd.__doc__ = f.__doc__return run_cmd", "docstring": "Trusted commands cannot be run remotely\n\n:param f:\n:return:", "id": "f11497:m0"} {"signature": "def __init__(self, bot, intro=None, trusted=False, **kwargs):", "body": "cmd.Cmd.__init__(self, **kwargs)self.bot = botself.pause_speed = Noneself.intro = intro or INTROself.prompt = PROMPTself.response_prompt = ''self.use_rawinput = Falseself.cookie = Noneself.escape_nl = Falseself.live_prefix = ''self.trusted = trusted", "docstring": ":param bot:\n:param intro:\n:param trusted: Only running from the commandline is trusted, not from sockets\n untrusted can only change variables\n:param kwargs:\n:return:", "id": "f11497:c0:m0"} {"signature": "def print_response(self, input='', keep=False, *args, **kwargs):", "body": "cookie = kwargs.get('')if cookie is None:cookie = self.cookie or ''status = kwargs.get('')lines = input.splitlines()if status and not lines:lines = ['']if cookie:output_template = ''else:output_template = ''for i, line in enumerate(lines):if i != len(lines) - or keep is True:cookie_char = '>'else:cookie_char = ''print(output_template.format(cookie_char=cookie_char,cookie=cookie,status=status or '',line=line.strip()), file=self.stdout)", "docstring": "print response, if cookie is set then print that each line\n:param args:\n:param keep: if True more output is to come\n:param cookie: set a custom cookie,\n if set to 'None' then self.cookie will be used.\n if set to 'False' disables cookie output entirely\n:return:", "id": "f11497:c0:m1"} {"signature": "def emptyline(self):", "body": "return \"\"", "docstring": "Kill the default behaviour of repeating the last line.\n\n:return:", "id": "f11497:c0:m2"} {"signature": "def do_escape_nl(self, arg):", "body": "if arg.lower() == '':self.escape_nl = Falseelse:self.escape_nl = True", "docstring": "Escape newlines in any responses", "id": "f11497:c0:m3"} {"signature": "def do_prompt(self, arg):", "body": "if arg.lower() == '':self.response_prompt = ''self.prompt = ''returnelif arg.lower() == '':self.prompt = PROMPTself.response_prompt = RESPONSE_PROMPTself.print_response('' % self.prompt, '', '' % self.response_prompt)", "docstring": "Enable or disable prompt\n:param arg: on|off\n:return:", "id": "f11497:c0:m4"} {"signature": "def do_title(self, title):", "body": "publish_event(SET_WINDOW_TITLE, data=title)", "docstring": "Change window title.", "id": "f11497:c0:m5"} {"signature": "def do_speed(self, speed):", "body": "if speed:try:self.bot._speed = float(speed)except Exception as e:self.print_response('' % speed)returnself.print_response('' % self.bot._speed)", "docstring": "rewind", "id": "f11497:c0:m6"} {"signature": "def do_restart(self, line):", "body": "self.bot._frame = self.bot._namespace.clear()self.bot._namespace.update(self.bot._initial_namespace)", "docstring": "Attempt to restart the bot.", "id": "f11497:c0:m7"} {"signature": "def do_pause(self, line):", "body": "if self.pause_speed is None:self.pause_speed = self.bot._speedself.bot._speed = self.print_response('')else:self.bot._speed = self.pause_speedself.pause_speed = Noneself.print_response('')", "docstring": "Toggle pause", "id": "f11497:c0:m8"} {"signature": "def do_play(self, line):", "body": "if self.pause_speed is None:self.bot._speed = self.pause_speedself.pause_speed = Noneself.print_response(\"\")", "docstring": "Resume playback if bot is paused", "id": "f11497:c0:m9"} {"signature": "def do_goto(self, line):", "body": "self.print_response(\"\" % line)self.bot._frame = int(line)", "docstring": "Go to specific frame\n:param line:\n:return:", "id": "f11497:c0:m10"} {"signature": "def do_rewind(self, line):", "body": "self.print_response(\"\" % self.bot._frame)self.bot._frame = ", "docstring": "rewind", "id": "f11497:c0:m11"} {"signature": "def do_vars(self, line):", "body": "if self.bot._vars:max_name_len = max([len(name) for name in self.bot._vars])for i, (name, v) in enumerate(self.bot._vars.items()):keep = i < len(self.bot._vars) - self.print_response(\"\" % (name.ljust(max_name_len), v.value), keep=keep)else:self.print_response(\"\")", "docstring": "List bot variables and values", "id": "f11497:c0:m12"} {"signature": "@trusted_cmddef do_load_base64(self, line):", "body": "cookie = self.cookieexecutor = self.bot._executordef source_good():self.print_response(status=RESPONSE_CODE_OK, cookie=cookie)executor.clear_callbacks()def source_bad(tb):if called_good:raise ValueError('')self.print_response(status=RESPONSE_REVERTED, keep=True, cookie=cookie)self.print_response(tb.replace('', ''), cookie=cookie)executor.clear_callbacks()called_good = Falsesource = str(base64.b64decode(line))publish_event(SOURCE_CHANGED_EVENT, data=source, extra_channels=\"\")self.bot._executor.load_edited_source(source, good_cb=source_good, bad_cb=source_bad)", "docstring": "load filename=(file)\nload base64=(base64 encoded)\n\nSend new code to shoebot.\n\nIf it does not run successfully shoebot will attempt to role back.\n\nEditors can enable livecoding by sending new code as it is edited.", "id": "f11497:c0:m13"} {"signature": "def do_bye(self, line):", "body": "return self.do_exit(line)", "docstring": "Exit shell and shoebot\n\nAlias for exit.", "id": "f11497:c0:m14"} {"signature": "def do_exit(self, line):", "body": "if self.trusted:publish_event(QUIT_EVENT)self.print_response('')return True", "docstring": "Exit shell and shoebot", "id": "f11497:c0:m15"} {"signature": "def do_quit(self, line):", "body": "return self.do_exit(line)", "docstring": "Exit shell and shoebot\n\nAlias for exit.", "id": "f11497:c0:m16"} {"signature": "def do_fullscreen(self, line):", "body": "self.bot.canvas.sink.trigger_fullscreen_action(True)print(self.response_prompt, file=self.stdout)", "docstring": "Make the current window fullscreen", "id": "f11497:c0:m17"} {"signature": "def do_windowed(self, line):", "body": "self.bot.canvas.sink.trigger_fullscreen_action(False)print(self.response_prompt, file=self.stdout)", "docstring": "Un-fullscreen the current window", "id": "f11497:c0:m18"} {"signature": "def do_EOF(self, line):", "body": "print(self.response_prompt, file=self.stdout)return self.do_exit(line)", "docstring": "Exit shell and shoebot\n\nAlias for exit.", "id": "f11497:c0:m19"} {"signature": "def do_help(self, arg):", "body": "print(self.response_prompt, file=self.stdout)return cmd.Cmd.do_help(self, arg)", "docstring": "Show help on all commands.", "id": "f11497:c0:m20"} {"signature": "def do_set(self, line):", "body": "try:name, value = [part.strip() for part in line.split('')]if name not in self.bot._vars:self.print_response('' % name)returnvariable = self.bot._vars[name]variable.value = variable.sanitize(value.strip(''))success, msg = self.bot.canvas.sink.var_changed(name, variable.value)if success:print(''.format(name, variable.value), file=self.stdout)else:print(''.format(msg), file=self.stdout)except Exception as e:print('', e)return", "docstring": "Set a variable.", "id": "f11497:c0:m21"} {"signature": "def precmd(self, line):", "body": "args = shlex.split(line or \"\")if args and '' in args[-]:cookie_index = line.index('')cookie = line[cookie_index + :]line = line[:cookie_index].strip()self.cookie = cookieif line.startswith(''):return ''elif '' in line:cmdname = line.partition(\"\")[]if hasattr(self, \"\" % cmdname):return lineif not line.startswith(\"\"):return \"\" + lineelse:return lineif len(args) and args[] in self.shortcuts:return \"\" % (self.shortcuts[args[]], \"\".join(args[:]))else:return line", "docstring": "Allow commands to have a last parameter of 'cookie=somevalue'\n\nTODO somevalue will be prepended onto any output lines so\nthat editors can distinguish output from certain kinds\nof events they have sent.\n\n:param line:\n:return:", "id": "f11497:c0:m22"} {"signature": "def postcmd(self, stop, line):", "body": "self.cookie = Nonereturn stop", "docstring": "Hook method executed just after a command dispatch is finished.", "id": "f11497:c0:m23"} {"signature": "def rect(self, x, y, width, height, roundness=, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)path.rect(x, y, width, height, roundness, self.rectmode)if draw:path.draw()return path", "docstring": "Draws a rectangle with top left corner at (x,y)\n\n The roundness variable sets rounded corners.", "id": "f11499:c0:m1"} {"signature": "def oval(self, x, y, width, height, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)path.ellipse(x, y, width, height)if draw:path.draw()return path", "docstring": "Draws an ellipse starting from (x,y) - ovals and ellipses are not the same", "id": "f11499:c0:m3"} {"signature": "def ellipse(self, x, y, width, height, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)path.ellipse(x,y,width,height)if draw:path.draw()return path", "docstring": "Draws an ellipse starting from (x,y)", "id": "f11499:c0:m4"} {"signature": "def line(self, x1, y1, x2, y2, draw=True):", "body": "p = self._pathself.newpath()self.moveto(x1,y1)self.lineto(x2,y2)self.endpath(draw=draw)self._path = preturn p", "docstring": "Draws a line from (x1,y1) to (x2,y2)", "id": "f11499:c0:m6"} {"signature": "def relmoveto(self, x, y):", "body": "if self._path is None:raise ShoebotError(_(\"\"))self._path.relmoveto(x,y)", "docstring": "Move relatively to the last point.", "id": "f11499:c0:m15"} {"signature": "def rellineto(self, x, y):", "body": "if self._path is None:raise ShoebotError(_(\"\"))self._path.rellineto(x,y)", "docstring": "Draw a line using relative coordinates.", "id": "f11499:c0:m16"} {"signature": "def relcurveto(self, h1x, h1y, h2x, h2y, x, y):", "body": "if self._path is None:raise ShoebotError(_(\"\"))self._path.relcurveto(x,y)", "docstring": "Draws a curve relatively to the last point.", "id": "f11499:c0:m17"} {"signature": "def image(self, path, x, y, width=None, height=None, alpha=, data=None, draw=True, **kwargs):", "body": "return self.Image(path, x, y, width, height, alpha, data, **kwargs)", "docstring": "Draws a image form path, in x,y and resize it to width, height dimensions.", "id": "f11499:c0:m19"} {"signature": "def colormode(self, mode=None, crange=None):", "body": "if mode is not None:if mode == \"\":self.color_mode = Bot.RGBelif mode == \"\":self.color_mode = Bot.HSBelse:raise NameError(_(\"\"))if crange is not None:self.color_range = crangereturn self.color_mode", "docstring": "Sets the current colormode (can be RGB or HSB) and eventually\n the color range.\n\n If called without arguments, it returns the current colormode.", "id": "f11499:c0:m29"} {"signature": "def fill(self,*args):", "body": "self._fillcolor = self.color(*args)return self._fillcolor", "docstring": "Sets a fill color, applying it to new paths.", "id": "f11499:c0:m31"} {"signature": "def nofill(self):", "body": "self._fillcolor = None", "docstring": "Stop applying fills to new paths.", "id": "f11499:c0:m32"} {"signature": "def stroke(self,*args):", "body": "self._strokecolor = self.color(*args)return self._strokecolor", "docstring": "Set a stroke color, applying it to new paths.", "id": "f11499:c0:m33"} {"signature": "def nostroke(self):", "body": "self._strokecolor = None", "docstring": "Stop applying strokes to new paths.", "id": "f11499:c0:m34"} {"signature": "def strokewidth(self, w=None):", "body": "if w is not None:self._strokewidth = welse:return self._strokewidth", "docstring": "Set the stroke width.", "id": "f11499:c0:m35"} {"signature": "def background(self,*args):", "body": "self._canvas.background = self.color(*args)", "docstring": "Set the background colour.", "id": "f11499:c0:m36"} {"signature": "def font(self, fontpath=None, fontsize=None):", "body": "if fontpath is not None:self._canvas.fontfile = fontpathelse:return self._canvas.fontfileif fontsize is not None:self._canvas.fontsize = fontsize", "docstring": "Set the font to be used with new text instances.\n\n Accepts any font Pango can recognize", "id": "f11499:c0:m37"} {"signature": "def text(self, txt, x, y, width=None, height=, outline=False, draw=True, **kwargs):", "body": "txt = self.Text(txt, x, y, width, height, outline=outline, ctx=None, **kwargs)if outline:path = txt.pathif draw:path.draw()return pathelse:return txt", "docstring": "Draws a string of text according to current font settings.", "id": "f11499:c0:m39"} {"signature": "def textpath(self, txt, x, y, width=None, height=, enableRendering=False, **kwargs):", "body": "txt = self.Text(txt, x, y, width, height, **kwargs)path = txt.pathif draw:path.draw()return path", "docstring": "Draws an outlined path of the input text", "id": "f11499:c0:m40"} {"signature": "def textmetrics(self, txt, width=None, height=None, **kwargs):", "body": "txt = self.Text(txt, , , width, height, enableRendering=False, **kwargs)return txt.metrics", "docstring": "Returns the width and height of a string of text as a tuple\n (according to current font settings).", "id": "f11499:c0:m41"} {"signature": "def textwidth(self, txt, width=None):", "body": "w = widthreturn self.textmetrics(txt, width=w)[]", "docstring": "Returns the width of a string of text according to the current\n font settings.", "id": "f11499:c0:m42"} {"signature": "def textheight(self, txt, width=None):", "body": "w = widthreturn self.textmetrics(txt, width=w)[]", "docstring": "Returns the height of a string of text according to the current\n font settings.", "id": "f11499:c0:m43"} {"signature": "def __init__(self, canvas, namespace=None, vars=None):", "body": "Grammar.__init__(self, canvas, namespace=namespace, vars=vars)canvas.set_bot(self)self._autoclosepath = Trueself._path = Noneif self._input_device:for key_name, value in self._input_device.get_key_map().items():self._namespace[key_name] = valuesetattr(self, key_name, value)self._canvas.size = Noneself._frame = self._set_initial_defaults()", "docstring": ":param canvas: Canvas implementation for output.\n:param namespace: Optionally specify a dict to inject as namespace\n:param vars: Optional dict containing initial values for variables", "id": "f11500:c0:m0"} {"signature": "def _set_initial_defaults(self):", "body": "DEFAULT_WIDTH, DEFAULT_HEIGHT = self._canvas.DEFAULT_SIZEself.WIDTH = self._namespace.get('', DEFAULT_WIDTH)self.HEIGHT = self._namespace.get('', DEFAULT_WIDTH)if '' in self._namespace or '' in self._namespace:self.size(w=self._namespace.get(''), h=self._namespace.get(''))self._transformmode = Bot.CENTERself._canvas.settings(fontfile=\"\",fontsize=,align=Bot.LEFT,lineheight=,fillcolor=self.color(),strokecolor=None,strokewidth=,background=self.color(, , ))", "docstring": "Set the default values. Called at __init__ and at the end of run(),\n do that new draw loop iterations don't take up values left over by the\n previous one.", "id": "f11500:c0:m1"} {"signature": "def _mouse_button_down(self, button):", "body": "self._namespace[''] = True", "docstring": "GUI callback for mouse button down", "id": "f11500:c0:m3"} {"signature": "def _mouse_button_up(self, button):", "body": "self._namespace[''] = self._input_device.mouse_down", "docstring": "GUI callback for mouse button up", "id": "f11500:c0:m4"} {"signature": "def _mouse_pointer_moved(self, x, y):", "body": "self._namespace[''] = xself._namespace[''] = y", "docstring": "GUI callback for mouse moved", "id": "f11500:c0:m5"} {"signature": "def _key_pressed(self, key, keycode):", "body": "self._namespace[''] = keyself._namespace[''] = keycodeself._namespace[''] = True", "docstring": "GUI callback for key pressed", "id": "f11500:c0:m6"} {"signature": "def _key_released(self, key, keycode):", "body": "self._namespace[''] = self._input_device.key_down", "docstring": "GUI callback for key released", "id": "f11500:c0:m7"} {"signature": "def setup(self):", "body": "pass", "docstring": "For override by user sketch", "id": "f11500:c0:m8"} {"signature": "def draw(self):", "body": "self._dynamic = False", "docstring": "For override by user sketch", "id": "f11500:c0:m9"} {"signature": "def _makeInstance(self, clazz, args, kwargs):", "body": "inst = clazz(self, *args, **kwargs)return inst", "docstring": "Creates an instance of a class defined in this document.\n This method sets the context of the object to the current context.", "id": "f11500:c0:m10"} {"signature": "def _makeColorableInstance(self, clazz, args, kwargs):", "body": "kwargs = dict(kwargs)fill = kwargs.get('', self._canvas.fillcolor)if not isinstance(fill, Color):fill = Color(fill, mode='', color_range=)kwargs[''] = fillstroke = kwargs.get('', self._canvas.strokecolor)if not isinstance(stroke, Color):stroke = Color(stroke, mode='', color_range=)kwargs[''] = strokekwargs[''] = kwargs.get('', self._canvas.strokewidth)inst = clazz(self, *args, **kwargs)return inst", "docstring": "Create an object, if fill, stroke or strokewidth\nis not specified, get them from the _canvas\n\n:param clazz:\n:param args:\n:param kwargs:\n:return:", "id": "f11500:c0:m11"} {"signature": "def color(self, *args):", "body": "return self.Color(mode=self.color_mode, color_range=self.color_range, *args)", "docstring": ":param args: color in a supported format.\n\n:return: Color object containing the color.", "id": "f11500:c0:m22"} {"signature": "def grid(self, cols, rows, colSize=, rowSize=, shuffled=False):", "body": "from random import shufflerowRange = range(int(rows))colRange = range(int(cols))if (shuffled):shuffle(rowRange)shuffle(colRange)for y in rowRange:for x in colRange:yield (x * colSize, y * rowSize)", "docstring": "Returns an iterator that contains coordinate tuples.\n The grid can be used to quickly create grid-like structures.\n A common way to use them is:\n for x, y in grid(10,10,12,12):\n rect(x,y, 10,10)", "id": "f11500:c0:m24"} {"signature": "def files(self, path=\"\"):", "body": "return glob(path)", "docstring": "Returns a list of files.\n You can use wildcards to specify which files to pick, e.g.\n f = files('*.gif')\n\n :param path: wildcard to use in file list.", "id": "f11500:c0:m25"} {"signature": "def snapshot(self, target=None, defer=None, autonumber=False):", "body": "if autonumber:file_number = self._frameelse:file_number = Noneif isinstance(target, cairo.Surface):if defer is None:self._canvas.snapshot(surface, defer)defer = Falsectx = cairo.Context(target)self._canvas._drawqueue.render(ctx)returnelif target is None:script_file = self._namespace.get('')if script_file:target = os.path.splitext(script_file)[] + ''file_number = Trueif target:if defer is None:defer = Trueself._canvas.snapshot(target, defer=defer, file_number=file_number)else:raise ShoebotError('')", "docstring": "Save the contents of current surface into a file or cairo surface/context\n\n :param filename: Can be a filename or a Cairo surface.\n :param defer: If true, buffering/threading may be employed however output will not be immediate.\n :param autonumber: If true then a number will be appended to the filename.", "id": "f11500:c0:m26"} {"signature": "def show(self, format='', as_data=False):", "body": "from io import BytesIOb = BytesIO()if format == '':from IPython.display import Imagesurface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.WIDTH, self.HEIGHT)self.snapshot(surface)surface.write_to_png(b)b.seek()data = b.read()if as_data:return dataelse:return Image(data)elif format == '':from IPython.display import SVGsurface = cairo.SVGSurface(b, self.WIDTH, self.HEIGHT)surface.finish()b.seek()data = b.read()if as_data:return dataelse:return SVG(data)", "docstring": "Returns an Image object of the current surface. Used for displaying\n output in Jupyter notebooks. Adapted from the cairo-jupyter project.", "id": "f11500:c0:m27"} {"signature": "def ximport(self, libName):", "body": "lib = __import__(libName)self._namespace[libName] = liblib._ctx = selfreturn lib", "docstring": "Import Nodebox libraries.\n\nThe libraries get _ctx, which provides\nthem with the nodebox API.\n\n:param libName: Library name to import", "id": "f11500:c0:m28"} {"signature": "def size(self, w=None, h=None):", "body": "if not w:w = self._canvas.widthif not h:h = self._canvas.heightif not w and not h:return (self._canvas.width, self._canvas.height)w, h = self._canvas.set_size((w, h))self._namespace[''] = wself._namespace[''] = hself.WIDTH = w self.HEIGHT = h", "docstring": "Set the canvas size\n\n Only the first call will actually be effective.\n\n :param w: Width\n :param h: height", "id": "f11500:c0:m29"} {"signature": "def speed(self, framerate=None):", "body": "if framerate is not None:self._speed = framerateself._dynamic = Trueelse:return self._speed", "docstring": "Set animation framerate.\n\n :param framerate: Frames per second to run bot.\n :return: Current framerate of animation.", "id": "f11500:c0:m30"} {"signature": "@propertydef is_edited(self):", "body": "return self.edited_source is not None", "docstring": ":return: True if source has been edited", "id": "f11501:c0:m1"} {"signature": "def load_edited_source(self, source, good_cb=None, bad_cb=None, filename=None):", "body": "with LiveExecution.lock:self.good_cb = good_cbself.bad_cb = bad_cbtry:compile(source + '', filename or self.filename, \"\")self.edited_source = sourceexcept Exception as e:if bad_cb:self.edited_source = Nonetb = traceback.format_exc()self.call_bad_cb(tb)returnif filename is not None:self.filename = filename", "docstring": "Load changed code into the execution environment.\n\nUntil the code is executed correctly, it will be\nin the 'tenuous' state.", "id": "f11501:c0:m2"} {"signature": "def reload_functions(self):", "body": "with LiveExecution.lock:if self.edited_source:tree = ast.parse(self.edited_source)for f in [n for n in ast.walk(tree) if isinstance(n, ast.FunctionDef)]:self.ns[f.name].__code__ = meta.decompiler.compile_func(f, self.filename, self.ns).__code__", "docstring": "Replace functions in namespace with functions from edited_source.", "id": "f11501:c0:m3"} {"signature": "def do_exec(self, source, ns):", "body": "exec(source, ns)", "docstring": "Override if you want to do something other than exec in ns\n\ntenuous is True if the source has just been edited and may fail", "id": "f11501:c0:m4"} {"signature": "def run_tenuous(self):", "body": "with LiveExecution.lock:ns_snapshot = copy.copy(self.ns)try:source = self.edited_sourceself.edited_source = Noneself.do_exec(source, ns_snapshot)self.known_good = sourceself.call_good_cb()return True, Noneexcept Exception as ex:tb = traceback.format_exc()self.call_bad_cb(tb)self.ns.clear()self.ns.update(ns_snapshot)return False, ex", "docstring": "Run edited source, if no exceptions occur then it\ngraduates to known good.", "id": "f11501:c0:m5"} {"signature": "def run(self):", "body": "with LiveExecution.lock:if self.edited_source:success, ex = self.run_tenuous()if success:returnself.do_exec(self.known_good, self.ns)", "docstring": "Attempt to known good or tenuous source.", "id": "f11501:c0:m6"} {"signature": "def clear_callbacks(self):", "body": "with LiveExecution.lock:self.bad_cb = Noneself.good_cb = None", "docstring": "clear the good and bad callbacks", "id": "f11501:c0:m7"} {"signature": "def call_bad_cb(self, tb):", "body": "with LiveExecution.lock:if self.bad_cb and not self.bad_cb(tb):self.bad_cb = None", "docstring": "If bad_cb returns True then keep it\n:param tb: traceback that caused exception\n:return:", "id": "f11501:c0:m8"} {"signature": "def call_good_cb(self):", "body": "with LiveExecution.lock:if self.good_cb and not self.good_cb():self.good_cb = None", "docstring": "If good_cb returns True then keep it\n:return:", "id": "f11501:c0:m9"} {"signature": "@contextlib.contextmanagerdef run_context(self):", "body": "with LiveExecution.lock:if self.edited_source is None:yield True, self.known_good, self.nsreturnns_snapshot = copy.copy(self.ns)try:yield False, self.edited_source, self.nsself.known_good = self.edited_sourceself.edited_source = Noneself.call_good_cb()returnexcept Exception as ex:tb = traceback.format_exc()self.call_bad_cb(tb)self.edited_source = Noneself.ns.clear()self.ns.update(ns_snapshot)", "docstring": "Context in which the user can run the source in a custom manner.\n\nIf no exceptions occur then the source will move from 'tenuous'\nto 'known good'.\n\n>>> with run_context() as (known_good, source, ns):\n>>> ... exec source in ns\n>>> ... ns['draw']()", "id": "f11501:c0:m10"} {"signature": "def _load_namespace(self, namespace, filename=None):", "body": "from shoebot import datafor name in dir(data):namespace[name] = getattr(data, name)for name in dir(self):if name[] != '':namespace[name] = getattr(self, name)namespace[''] = self namespace[''] = filename", "docstring": "Initialise bot namespace with info in shoebot.data\n\n:param filename: Will be set to __file__ in the namespace", "id": "f11502:c0:m1"} {"signature": "def _should_run(self, iteration, max_iterations):", "body": "if iteration == :return Trueif max_iterations:if iteration < max_iterations:return Trueelif max_iterations is None:if self._dynamic:return Trueelse:return Falsereturn Trueif not self._dynamic:return Falsereturn False", "docstring": "Return False if bot should quit", "id": "f11502:c0:m2"} {"signature": "def _frame_limit(self, start_time):", "body": "if self._speed:completion_time = time()exc_time = completion_time - start_timesleep_for = ( / abs(self._speed)) - exc_timeif sleep_for > :sleep(sleep_for)", "docstring": "Limit to framerate, should be called after\nrendering has completed\n\n:param start_time: When execution started", "id": "f11502:c0:m3"} {"signature": "def _run_frame(self, executor, limit=False, iteration=):", "body": "start_time = time()if iteration != and self._speed != :self._canvas.reset_canvas()self._set_dynamic_vars()if iteration == :executor.run()executor.ns['']()executor.ns['']()self._canvas.flush(self._frame)else:if self._dynamic:if self._speed != : with executor.run_context() as (known_good, source, ns):if not known_good:executor.reload_functions()with VarListener.batch(self._vars, self._oldvars, ns):self._oldvars.clear()exec(source, ns)ns['']()self._canvas.flush(self._frame)else:with executor.run_context() as (known_good, source, ns):if not known_good:executor.reload_functions()with VarListener.batch(self._vars, self._oldvars, ns):self._oldvars.clear()exec(source, ns)else:exec(source, ns)self._canvas.flush(self._frame)if limit:self._frame_limit(start_time)if self._speed > :self._frame += elif self._speed < :self._frame -= ", "docstring": "Run single frame of the bot\n\n :param source_or_code: path to code to run, or actual code.\n :param limit: Time a frame should take to run (float - seconds)", "id": "f11502:c0:m4"} {"signature": "def run(self, inputcode, iterations=None, run_forever=False, frame_limiter=False, verbose=False,break_on_error=False):", "body": "source = Nonefilename = Noneif os.path.isfile(inputcode):source = open(inputcode).read()filename = inputcodeelif isinstance(inputcode, str):filename = ''source = inputcodeself._load_namespace(self._namespace, filename)self._executor = executor = LiveExecution(source, ns=self._namespace, filename=filename)try:if not iterations:if run_forever:iterations = Noneelse:iterations = iteration = event = Nonewhile iteration != iterations and not event_is(event, QUIT_EVENT):self._run_frame(executor, limit=frame_limiter, iteration=iteration)if iteration == :self._initial_namespace = copy.copy(self._namespace) iteration += while self._should_run(iteration, iterations) and event is None:iteration += self._run_frame(executor, limit=frame_limiter, iteration=iteration)event = next_event()if not event:self._canvas.sink.main_iteration() while run_forever:while event is None:self._canvas.sink.main_iteration()event = next_event(block=True, timeout=)if not event:self._canvas.sink.main_iteration() if event.type == QUIT_EVENT:breakelif event.type == SOURCE_CHANGED_EVENT:while event and event.type == SOURCE_CHANGED_EVENT:event = next_event(block=True, timeout=)elif event.type == SET_WINDOW_TITLE:self._canvas.sink.set_title(event.data)event = None breakexcept Exception as e:import sysif verbose:errmsg = traceback.format_exc()else:errmsg = simple_traceback(e, executor.known_good or '')print(errmsg, file=sys.stderr)if break_on_error:raise", "docstring": "Executes the contents of a Nodebox/Shoebot script\nin current surface's context.\n\n:param inputcode: Path to shoebot source or string containing source\n:param iterations: None or Maximum amount of frames to run\n:param run_forever: If True then run until user quits the bot\n:param frame_limiter: If True then sleep between frames to respect speed() command.", "id": "f11502:c0:m5"} {"signature": "def _addvar(self, v):", "body": "oldvar = self._oldvars.get(v.name)if oldvar is not None:if isinstance(oldvar, Variable):if oldvar.compliesTo(v):v.value = oldvar.valueelse:v.value = v.sanitize(oldvar)else:for listener in VarListener.listeners:listener.var_added(v)self._vars[v.name] = vself._namespace[v.name] = v.valueself._oldvars[v.name] = vreturn v", "docstring": "Sets a new accessible variable.\n\n :param v: Variable.", "id": "f11502:c0:m7"} {"signature": "def simple_traceback(ex, source):", "body": "exc_type, exc_value, exc_tb = sys.exc_info()exc = traceback.format_exception(exc_type, exc_value, exc_tb)source_arr = source.splitlines()exc_location = exc[-]for i, err in enumerate(exc):if '' in err:exc_location = exc[i + ]breakfn = exc_location.split('')[][:-]line_number = int(exc_location.split('')[].replace('', '').strip())err_msgs = []err_where = ''.join(exc[i - ].split('')[:]).strip() err_msgs.append('' % err_where)for i in xrange(max(, line_number - ), line_number):if fn == \"\":line = source_arr[i]else:line = linecache.getline(fn, i + )err_msgs.append('' % (i + , line.rstrip()))err_msgs.append('' % (len(str(i)) * '', exc[-].rstrip()))err_msgs.append('')err_msgs.append(exc[].rstrip())for err in exc[:]:err_msgs.append(err.rstrip())return ''.join(err_msgs)", "docstring": "Format traceback, showing line number and surrounding source.", "id": "f11504:m0"} {"signature": "def __init__(self, canvas=None, namespace=None, vars=None):", "body": "Bot.__init__(self, canvas, namespace=namespace, vars=vars)canvas.mode = CORNERself._ns = self._namespace", "docstring": "Nodebot grammar constructor\n\n:param canvas: Canvas implementation for output.\n:param namespace: Optionally specify a dict to inject as namespace\n:param vars: Optional dict containing initial values for variables", "id": "f11505:c0:m0"} {"signature": "def image(self, path, x, y, width=None, height=None, alpha=, data=None, draw=True, **kwargs):", "body": "return self.Image(path, x, y, width, height, alpha, data, **kwargs)", "docstring": "Draws a image form path, in x,y and resize it to width, height dimensions.", "id": "f11505:c0:m1"} {"signature": "def imagesize(self, path):", "body": "img = PILImage.open(path)return img.size", "docstring": ":param: path Path to image file.\n:return: image size as tuple (width, height)", "id": "f11505:c0:m2"} {"signature": "def rect(self, x, y, width, height, roundness=, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)path.rect(x, y, width, height, roundness, self.rectmode)if draw:path.draw()return path", "docstring": "Draw a rectangle from x, y of width, height.\n\n:param startx: top left x-coordinate\n:param starty: top left y-coordinate\n\n:param width: height Size of rectangle.\n:roundness: Corner roundness defaults to 0.0 (a right-angle).\n:draw: If True draws immediately.\n:fill: Optionally pass a fill color.\n\n:return: path representing the rectangle.", "id": "f11505:c0:m3"} {"signature": "def rectmode(self, mode=None):", "body": "if mode in (self.CORNER, self.CENTER, self.CORNERS):self.rectmode = modereturn self.rectmodeelif mode is None:return self.rectmodeelse:raise ShoebotError(_(\"\"))", "docstring": "Set the current rectmode.\n\n:param mode: CORNER, CENTER, CORNERS\n:return: rectmode if mode is None or valid.", "id": "f11505:c0:m4"} {"signature": "def ellipsemode(self, mode=None):", "body": "if mode in (self.CORNER, self.CENTER, self.CORNERS):self.ellipsemode = modereturn self.ellipsemodeelif mode is None:return self.ellipsemodeelse:raise ShoebotError(_(\"\"))", "docstring": "Set the current ellipse drawing mode.\n\n:param mode: CORNER, CENTER, CORNERS\n:return: ellipsemode if mode is None or valid.", "id": "f11505:c0:m5"} {"signature": "def oval(self, x, y, width, height, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)path.ellipse(x, y, width, height, self.ellipsemode)if draw:path.draw()return path", "docstring": "Draw an ellipse starting from (x,y) - ovals and ellipses are not the same", "id": "f11505:c0:m6"} {"signature": "def ellipse(self, x, y, width, height, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)path.ellipse(x, y, width, height, self.ellipsemode)if draw:path.draw()return path", "docstring": "Draw an ellipse starting from (x,y)", "id": "f11505:c0:m7"} {"signature": "def circle(self, x, y, diameter, draw=True, **kwargs):", "body": "return self.ellipse(x, y, diameter, diameter, draw, **kwargs)", "docstring": "Draw a circle\n :param x: x-coordinate of the top left corner\n :param y: y-coordinate of the top left corner\n :param diameter: Diameter of circle.\n :param draw: Draw immediately (defaults to True, set to False to inhibit drawing)\n :return: Path object representing circle", "id": "f11505:c0:m8"} {"signature": "def line(self, x1, y1, x2, y2, draw=True):", "body": "p = self._pathself.beginpath()self.moveto(x1, y1)self.lineto(x2, y2)self.endpath(draw=draw)self._path = preturn p", "docstring": "Draw a line from (x1,y1) to (x2,y2)\n :param x1: start x-coordinate\n :param y1: start y-coordinate\n :param x2: end x-coordinate\n :param y2: end y-coordinate", "id": "f11505:c0:m9"} {"signature": "def arrow(self, x, y, width, type=NORMAL, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)if type == self.NORMAL:head = width * tail = width * path.moveto(x, y)path.lineto(x - head, y + head)path.lineto(x - head, y + tail)path.lineto(x - width, y + tail)path.lineto(x - width, y - tail)path.lineto(x - head, y - tail)path.lineto(x - head, y - head)path.lineto(x, y)elif type == self.FORTYFIVE:head = tail = + headpath.moveto(x, y)path.lineto(x, y + width * ( - head))path.lineto(x - width * head, y + width)path.lineto(x - width * head, y + width * tail * )path.lineto(x - width * tail * , y + width)path.lineto(x - width, y + width * tail * )path.lineto(x - width * tail * , y + width * head)path.lineto(x - width, y + width * head)path.lineto(x - width * ( - head), y)path.lineto(x, y)else:raise NameError(_(\"\"))if draw:path.draw()return path", "docstring": "Draw an arrow.\n\n Arrows can be two types: NORMAL or FORTYFIVE.\n\n :param x: top left x-coordinate\n :param y: top left y-coordinate\n :param width: width of arrow\n :param type: NORMAL or FORTYFIVE\n :draw: If True draws arrow immediately\n\n :return: Path object representing the arrow.", "id": "f11505:c0:m10"} {"signature": "def star(self, startx, starty, points=, outer=, inner=, draw=True, **kwargs):", "body": "self.beginpath(**kwargs)self.moveto(startx, starty + outer)for i in range(, int( * points)):angle = i * pi / pointsx = sin(angle)y = cos(angle)if i % :radius = innerelse:radius = outerx = startx + radius * xy = starty + radius * yself.lineto(x, y)return self.endpath(draw)", "docstring": "Draws a star.", "id": "f11505:c0:m11"} {"signature": "def drawimage(self, image, x=None, y=None):", "body": "if x is None:x = image.xif y is None:y = image.yself.image(image.path, image.x, image.y, data=image.data)", "docstring": ":param image: Image to draw\n:param x: optional, x coordinate (default is image.x)\n:param y: optional, y coordinate (default is image.y)\n:return:", "id": "f11505:c0:m20"} {"signature": "def relmoveto(self, x, y):", "body": "if self._path is None:raise ShoebotError(_(\"\"))self._path.relmoveto(x, y)", "docstring": "Move relatively to the last point.", "id": "f11505:c0:m22"} {"signature": "def rellineto(self, x, y):", "body": "if self._path is None:raise ShoebotError(_(\"\"))self._path.rellineto(x, y)", "docstring": "Draw a line using relative coordinates.", "id": "f11505:c0:m23"} {"signature": "def relcurveto(self, h1x, h1y, h2x, h2y, x, y):", "body": "if self._path is None:raise ShoebotError(_(\"\"))self._path.relcurveto(h1x, h1y, h2x, h2y, x, y)", "docstring": "Draws a curve relatively to the last point.", "id": "f11505:c0:m24"} {"signature": "def findpath(self, points, curvature=):", "body": "for i, pt in enumerate(points):if type(pt) == TupleType:points[i] = Point(pt[], pt[])if len(points) == :return Noneif len(points) == :path = self.BezierPath(None)path.moveto(points[].x, points[].y)return pathif len(points) == :path = self.BezierPath(None)path.moveto(points[].x, points[].y)path.lineto(points[].x, points[].y)return pathcurvature = max(, min(, curvature))if curvature == :path = self.BezierPath(None)path.moveto(points[].x, points[].y)for i in range(len(points)):path.lineto(points[i].x, points[i].y)return pathcurvature = + ( - curvature) * dx = {: , len(points) - : }dy = {: , len(points) - : }bi = {: -}ax = {: (points[].x - points[].x - dx[]) / }ay = {: (points[].y - points[].y - dy[]) / }for i in range(, len(points) - ):bi[i] = - / (curvature + bi[i - ])ax[i] = -(points[i + ].x - points[i - ].x - ax[i - ]) * bi[i]ay[i] = -(points[i + ].y - points[i - ].y - ay[i - ]) * bi[i]r = range(, len(points) - )r.reverse()for i in r:dx[i] = ax[i] + dx[i + ] * bi[i]dy[i] = ay[i] + dy[i + ] * bi[i]path = self.BezierPath(None)path.moveto(points[].x, points[].y)for i in range(len(points) - ):path.curveto(points[i].x + dx[i],points[i].y + dy[i],points[i + ].x - dx[i + ],points[i + ].y - dy[i + ],points[i + ].x,points[i + ].y)return path", "docstring": "Constructs a path between the given list of points.\n\n Interpolates the list of points and determines\n a smooth bezier path betweem them.\n\n The curvature parameter offers some control on\n how separate segments are stitched together:\n from straight angles to smooth curves.\n Curvature is only useful if the path has more than three points.", "id": "f11505:c0:m25"} {"signature": "def transform(self, mode=None):", "body": "if mode:self._canvas.mode = modereturn self._canvas.mode", "docstring": "Set the current transform mode.\n\n:param mode: CENTER or CORNER", "id": "f11505:c0:m28"} {"signature": "def translate(self, xt, yt, mode=None):", "body": "self._canvas.translate(xt, yt)if mode:self._canvas.mode = mode", "docstring": "Translate the current position by (xt, yt) and\noptionally set the transform mode.\n\n:param xt: Amount to move horizontally\n:param yt: Amount to move vertically\n:mode: Set the transform mode to CENTER or CORNER", "id": "f11505:c0:m29"} {"signature": "def rotate(self, degrees=, radians=):", "body": "if radians:angle = radianselse:angle = deg2rad(degrees)self._canvas.rotate(-angle)", "docstring": "Set the current rotation in degrees or radians.\n\n:param degrees: Degrees to rotate\n:param radians: Radians to rotate", "id": "f11505:c0:m30"} {"signature": "def scale(self, x=, y=None):", "body": "if not y:y = xif x == :x = if y == :y = self._canvas.scale(x, y)", "docstring": "Set a scale at which to draw objects.\n\n1.0 draws objects at their natural size\n\n:param x: Scale on the horizontal plane\n:param y: Scale on the vertical plane", "id": "f11505:c0:m31"} {"signature": "def outputmode(self):", "body": "raise NotImplementedError(_(\"\"))", "docstring": "NOT IMPLEMENTED", "id": "f11505:c0:m36"} {"signature": "def colormode(self, mode=None, crange=None):", "body": "if mode is not None:if mode == \"\":self.color_mode = RGBelif mode == \"\":self.color_mode = HSBelse:raise NameError(_(\"\"))if crange is not None:self.color_range = crangereturn self.color_mode", "docstring": "Set the current colormode (can be RGB or HSB) and eventually\n the color range.\n\n If called without arguments, it returns the current colormode.\n\n :param mode: Color mode, either \"rgb\", or \"hsb\"\n :param crange: Maximum scale value for color, e.g. 1.0 or 255\n\n :return: Returns the current color mode.", "id": "f11505:c0:m37"} {"signature": "def colorrange(self, crange):", "body": "self.color_range = float(crange)", "docstring": "By default colors range from 0.0 - 1.0 using colorrange\n other defaults can be used, e.g. 0.0 - 255.0\n\n :param crange: Color range of 0.0 - 255:\n >>> colorrange(256)", "id": "f11505:c0:m38"} {"signature": "def fill(self, *args):", "body": "if args is not None:self._canvas.fillcolor = self.color(*args)return self._canvas.fillcolor", "docstring": "Sets a fill color, applying it to new paths.\n\n :param args: color in supported format", "id": "f11505:c0:m39"} {"signature": "def nofill(self):", "body": "self._canvas.fillcolor = None", "docstring": "Stop applying fills to new paths.", "id": "f11505:c0:m40"} {"signature": "def stroke(self, *args):", "body": "if args is not None:self._canvas.strokecolor = self.color(*args)return self._canvas.strokecolor", "docstring": "Set a stroke color, applying it to new paths.\n\n :param args: color in supported format", "id": "f11505:c0:m41"} {"signature": "def nostroke(self):", "body": "c = self._canvas.strokecolorself._canvas.strokecolor = Nonereturn c", "docstring": "Stop applying strokes to new paths.\n\n :return: stroke color before nostroke was called.", "id": "f11505:c0:m42"} {"signature": "def strokewidth(self, w=None):", "body": "if w is not None:self._canvas.strokewidth = welse:return self._canvas.strokewidth", "docstring": "Set the stroke width.\n\n :param w: Stroke width.\n :return: If no width was specified then current width is returned.", "id": "f11505:c0:m43"} {"signature": "def background(self, *args):", "body": "self._canvas.background = self.color(*args)", "docstring": "Set the background color.\n\n :param color: See color() function for supported color formats.", "id": "f11505:c0:m44"} {"signature": "def font(self, fontpath=None, fontsize=None):", "body": "if fontpath is not None:self._canvas.fontfile = fontpathelse:return self._canvas.fontfileif fontsize is not None:self._canvas.fontsize = fontsize", "docstring": "Set the font to be used with new text instances.\n\n :param fontpath: path to truetype or opentype font.\n :param fontsize: size of font\n\n :return: current current fontpath (if fontpath param not set)\n Accepts TrueType and OpenType files. Depends on FreeType being\n installed.", "id": "f11505:c0:m45"} {"signature": "def fontsize(self, fontsize=None):", "body": "if fontsize is not None:self._canvas.fontsize = fontsizeelse:return self._canvas.fontsize", "docstring": "Set or return size of current font.\n\n:param fontsize: Size of font.\n:return: Size of font (if fontsize was not specified)", "id": "f11505:c0:m46"} {"signature": "def text(self, txt, x, y, width=None, height=, outline=False, draw=True, **kwargs):", "body": "txt = self.Text(txt, x, y, width, height, outline=outline, ctx=None, **kwargs)if outline:path = txt.pathif draw:path.draw()return pathelse:return txt", "docstring": "Draws a string of text according to current font settings.\n\n:param txt: Text to output\n:param x: x-coordinate of the top left corner\n:param y: y-coordinate of the top left corner\n:param width: text width\n:param height: text height\n:param outline: If True draws outline text (defaults to False)\n:param draw: Set to False to inhibit immediate drawing (defaults to True)\n:return: Path object representing the text.", "id": "f11505:c0:m47"} {"signature": "def textpath(self, txt, x, y, width=None, height=, draw=False, **kwargs):", "body": "txt = self.Text(txt, x, y, width, height, enableRendering=False, **kwargs)path = txt.pathif draw:path.draw()return path", "docstring": "Generates an outlined path of the input text.\n\n:param txt: Text to output\n:param x: x-coordinate of the top left corner\n:param y: y-coordinate of the top left corner\n:param width: text width\n:param height: text height\n:param draw: Set to False to inhibit immediate drawing (defaults to False)\n:return: Path object representing the text.", "id": "f11505:c0:m48"} {"signature": "def textmetrics(self, txt, width=None, height=None, **kwargs):", "body": "txt = self.Text(txt, , , width, height, enableRendering=False, **kwargs)return txt.metrics", "docstring": ":return: the width and height of a string of text as a tuple\naccording to current font settings.", "id": "f11505:c0:m49"} {"signature": "def textwidth(self, txt, width=None):", "body": "w = widthreturn self.textmetrics(txt, width=w)[]", "docstring": ":return: the width of a string of text according to the current\nfont settings.", "id": "f11505:c0:m50"} {"signature": "def textheight(self, txt, width=None):", "body": "w = widthreturn self.textmetrics(txt, width=w)[]", "docstring": "Returns the height of a string of text according to the current\n font settings.\n\n :param txt: string to measure\n :param width: width of a line of text in a block", "id": "f11505:c0:m51"} {"signature": "def lineheight(self, height=None):", "body": "if height is not None:self._canvas.lineheight = height", "docstring": "Set text lineheight.\n\n :param height: line height.", "id": "f11505:c0:m52"} {"signature": "def align(self, align=LEFT):", "body": "self._canvas.align = align", "docstring": "Set text alignment\n\n:param align: Text alignment (LEFT, CENTER, RIGHT)", "id": "f11505:c0:m53"} {"signature": "@propertydef canvas(self):", "body": "return self._canvas", "docstring": "Not entirely sure compatible the Shoebot 'canvas' is with Nodebox\nbut there you go.\n:return:", "id": "f11505:c0:m56"} {"signature": "def openAnything(source, searchpaths=None):", "body": "if hasattr(source, \"\"):return sourceif source == \"\":import sysreturn sys.stdinimport urllib.request, urllib.parse, urllib.errortry:return urllib.request.urlopen(source)except (IOError, OSError):passfor path in searchpaths or ['']:try:return open(os.path.join(path, source))except (IOError, OSError):passimport ioreturn io.StringIO(str(source))", "docstring": "URI, filename, or string --> stream\n\n This function lets you define parsers that take any input source\n (URL, pathname to local or network file, or actual data as a string)\n and deal with it in a uniform manner. Returned object is guaranteed\n to have all the basic stdio read methods (read, readline, readlines).\n Just .close() the object when you're done with it.\n\n Examples:\n >>> from xml.dom import minidom\n >>> sock = openAnything(\"http://localhost/kant.xml\")\n >>> doc = minidom.parse(sock)\n >>> sock.close()\n >>> sock = openAnything(\"c:\\\\inetpub\\\\wwwroot\\\\kant.xml\")\n >>> doc = minidom.parse(sock)\n >>> sock.close()\n >>> sock = openAnything(\"andor\")\n >>> doc = minidom.parse(sock)\n >>> sock.close()", "id": "f11511:m0"} {"signature": "def _load(self, source, searchpaths=None):", "body": "sock = openAnything(source, searchpaths=searchpaths)xmldoc = minidom.parse(sock).documentElementsock.close()return xmldoc", "docstring": "load XML input source, return parsed XML document\n\n - a URL of a remote XML file (\"http://diveintopython.org/kant.xml\")\n - a filename of a local XML file (\"~/diveintopython/common/py/kant.xml\")\n - standard input (\"-\")\n - the actual XML document, as a string\n\n :param searchpaths: optional searchpaths if file is used.", "id": "f11511:c1:m1"} {"signature": "def loadGrammar(self, grammar, searchpaths=None):", "body": "self.grammar = self._load(grammar, searchpaths=searchpaths)self.refs = {}for ref in self.grammar.getElementsByTagName(\"\"):self.refs[ref.attributes[\"\"].value] = ref", "docstring": "load context-free grammar", "id": "f11511:c1:m2"} {"signature": "def loadSource(self, source, searchpaths=None):", "body": "self.source = self._load(source, searchpaths=searchpaths)", "docstring": "load source", "id": "f11511:c1:m3"} {"signature": "def getDefaultSource(self):", "body": "xrefs = {}for xref in self.grammar.getElementsByTagName(\"\"):xrefs[xref.attributes[\"\"].value] = xrefs = list(xrefs.keys())standaloneXrefs = [e for e in list(self.refs.keys()) if e not in xrefs]if not standaloneXrefs:raise NoSourceError(\"\")return '' % random.choice(standaloneXrefs)", "docstring": "guess default source of the current grammar\n\n The default source will be one of the s that is not\n cross-referenced. This sounds complicated but it's not.\n Example: The default source for kant.xml is\n \"\", because 'section' is the one \n that is not 'd anywhere in the grammar.\n In most grammars, the default source will produce the\n longest (and most interesting) output.", "id": "f11511:c1:m4"} {"signature": "def reset(self):", "body": "self.pieces = []self.capitalizeNextWord = ", "docstring": "reset parser", "id": "f11511:c1:m5"} {"signature": "def refresh(self):", "body": "self.reset()self.parse(self.source)return self.output()", "docstring": "reset output buffer, re-parse entire source file, and return output\n\n Since parsing involves a good deal of randomness, this is an\n easy way to get new output without having to reload a grammar file\n each time.", "id": "f11511:c1:m6"} {"signature": "def output(self):", "body": "return \"\".join(self.pieces)", "docstring": "output generated text", "id": "f11511:c1:m7"} {"signature": "def randomChildElement(self, node):", "body": "choices = [e for e in node.childNodesif e.nodeType == e.ELEMENT_NODE]chosen = random.choice(choices)if _debug:sys.stderr.write('' %(len(choices), [e.toxml() for e in choices]))sys.stderr.write('' % chosen.toxml())return chosen", "docstring": "choose a random child element of a node\n\n This is a utility method used by do_xref and do_choice.", "id": "f11511:c1:m8"} {"signature": "def parse(self, node):", "body": "parseMethod = getattr(self, \"\" % node.__class__.__name__)parseMethod(node)", "docstring": "parse a single XML node\n\n A parsed XML document (from minidom.parse) is a tree of nodes\n of various types. Each node is represented by an instance of the\n corresponding Python class (Element for a tag, Text for\n text data, Document for the top-level document). The following\n statement constructs the name of a class method based on the type\n of node we're parsing (\"parse_Element\" for an Element node,\n \"parse_Text\" for a Text node, etc.) and then calls the method.", "id": "f11511:c1:m9"} {"signature": "def parse_Document(self, node):", "body": "self.parse(node.documentElement)", "docstring": "parse the document node\n\n The document node by itself isn't interesting (to us), but\n its only child, node.documentElement, is: it's the root node\n of the grammar.", "id": "f11511:c1:m10"} {"signature": "def parse_Text(self, node):", "body": "text = node.dataif self.capitalizeNextWord:self.pieces.append(text[].upper())self.pieces.append(text[:])self.capitalizeNextWord = else:self.pieces.append(text)", "docstring": "parse a text node\n\n The text of a text node is usually added to the output buffer\n verbatim. The one exception is that

sets\n a flag to capitalize the first letter of the next word. If\n that flag is set, we capitalize the text and reset the flag.", "id": "f11511:c1:m11"} {"signature": "def parse_Element(self, node):", "body": "handlerMethod = getattr(self, \"\" % node.tagName)handlerMethod(node)", "docstring": "parse an element\n\n An XML element corresponds to an actual tag in the source:\n ,

, , etc.\n Each element type is handled in its own method. Like we did in\n parse(), we construct a method name based on the name of the\n element (\"do_xref\" for an tag, etc.) and\n call the method.", "id": "f11511:c1:m12"} {"signature": "def parse_Comment(self, node):", "body": "pass", "docstring": "parse a comment\n\n The grammar can contain XML comments, but we ignore them", "id": "f11511:c1:m13"} {"signature": "def do_xref(self, node):", "body": "id = node.attributes[\"\"].valueself.parse(self.randomChildElement(self.refs[id]))", "docstring": "handle tag\n\n An tag is a cross-reference to a \n tag. evaluates to a randomly chosen child of\n .", "id": "f11511:c1:m14"} {"signature": "def do_p(self, node):", "body": "keys = list(node.attributes.keys())if \"\" in keys:if node.attributes[\"\"].value == \"\":self.capitalizeNextWord = if \"\" in keys:chance = int(node.attributes[\"\"].value)doit = (chance > random.randrange())else:doit = if doit:for child in node.childNodes: self.parse(child)", "docstring": "handle

tag\n\n The

tag is the core of the grammar. It can contain almost\n anything: freeform text, tags, tags, even other\n

tags. If a \"class='sentence'\" attribute is found, a flag\n is set and the next word will be capitalized. If a \"chance='X'\"\n attribute is found, there is an X% chance that the tag will be\n evaluated (and therefore a (100-X)% chance that it will be\n completely ignored)", "id": "f11511:c1:m15"} {"signature": "def do_choice(self, node):", "body": "self.parse(self.randomChildElement(node))", "docstring": "handle tag\n\n A tag contains one or more

tags. One

tag\n is chosen at random and evaluated; the rest are ignored.", "id": "f11511:c1:m16"} {"signature": "@staticmethod@contextmanagerdef disabled():", "body": "VarListener.active = FalseyieldVarListener.active = True", "docstring": "Context manager to temporarily disable all listeners\n\n>>> with VarListener.disabled()\n... pass", "id": "f11512:c0:m8"} {"signature": "@staticmethod@contextmanagerdef batch(vars, oldvars, ns):", "body": "snapshot_vars = dict(vars)with VarListener.disabled():yieldadded_vars = set(oldvars.keys()) - set(snapshot_vars.keys())deleted_vars = set(snapshot_vars.keys()) - set(oldvars.keys())existing_vars = set(vars.keys()) - added_vars - deleted_varsfor name in existing_vars:old_var = snapshot_vars[name]new_var = vars[name]if old_var.type != new_var.type or old_var.min != new_var.min or old_var.max != new_var.max:deleted_vars.add(name)added_vars.add(name)if old_var.type == new_var.type:new_var.value = old_var.valuefor listener in VarListener.listeners:for name in deleted_vars:listener.var_deleted(snapshot_vars[name])if ns.get(name) is snapshot_vars[name].value:del ns[name]for name in added_vars:listener.var_added(vars[name])", "docstring": "Context manager to only update listeners\nat the end, in the meantime it doesn't\nmatter what intermediate state the vars\nare in (they can be added and removed)\n\n>>> with VarListener.batch()\n... pass", "id": "f11512:c0:m9"} {"signature": "def set_callbacks(self, **kwargs):", "body": "for name in self.SUPPORTED_CALLBACKS:func = kwargs.get(name, getattr(self, name))setattr(self, name, func)", "docstring": "Set callbacks for input events", "id": "f11513:c0:m1"} {"signature": "def get_key_down(self):", "body": "return bool(self.keys_pressed)", "docstring": "Return True if any key is pressed", "id": "f11513:c0:m2"} {"signature": "def get_mouse_down(self):", "body": "return bool(self.mouse_buttons_down)", "docstring": "Return True if any mouse button is pressed", "id": "f11513:c0:m3"} {"signature": "def __init__(self, target=None, format=None, multifile=False, buff=None):", "body": "DrawQueueSink.__init__(self)if format is None:if target is not None and format is not '':format = os.path.splitext(target)[][:].lower()self.filename = targetelif buff is not None:raise AttributeError(\"\")else:self.file_root, self.file_ext = os.path.splitext(filename)self.buff = buffself.format = formatself.target = targetself.multifile = multifile", "docstring": ":param target: output filename (or cairo surface if format is 'surface')\n:param format: if filename is specified this is not needed. Can be 'surface' for Cairo surfaces\n:param multifile: If used with filename, then numbered files will be output for each froam.\n:param buff: optionally a file like object can be used instead of a filename\n this is useful for streaming output.", "id": "f11514:c0:m0"} {"signature": "def _output_file(self, frame):", "body": "if self.buff:return self.buffelif self.multifile:return self.file_root + \"\" % frame + self.file_extelse:return self.filename", "docstring": "If filename was used output a filename, along with multifile\nnumbered filenames will be used.\n\nIf buff was specified it is returned.\n\n:return: Output buff or filename.", "id": "f11514:c0:m1"} {"signature": "def create_rcontext(self, size, frame):", "body": "if self.format == '':surface = cairo.PDFSurface(self._output_file(frame), *size)elif self.format in ('', ''):surface = cairo.PSSurface(self._output_file(frame), *size)elif self.format == '':surface = cairo.SVGSurface(self._output_file(frame), *size)elif self.format == '':surface = self.targetelse:surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, *size)return cairo.Context(surface)", "docstring": "Called when CairoCanvas needs a cairo context to draw on", "id": "f11514:c0:m2"} {"signature": "def rendering_finished(self, size, frame, cairo_ctx):", "body": "surface = cairo_ctx.get_target()if self.format == '':surface.write_to_png(self._output_file(frame))surface.finish()surface.flush()", "docstring": "Called when CairoCanvas has rendered a bot", "id": "f11514:c0:m3"} {"signature": "def initial_transform(self):", "body": "return cairo.Matrix()", "docstring": "Return an identity matrix", "id": "f11515:c0:m2"} {"signature": "def output_closure(self, target, file_number=None):", "body": "def output_context(ctx):target_ctx = targettarget_ctx.set_source_surface(ctx.get_target())target_ctx.paint()return target_ctxdef output_surface(ctx):target_ctx = cairo.Context(target)target_ctx.set_source_surface(ctx.get_target())target_ctx.paint()return target_ctxdef output_file(ctx):root, extension = os.path.splitext(target)if file_number:filename = '' % (root, file_number, extension)else:filename = targetextension = extension.lower()if extension == '':surface = ctx.get_target()surface.write_to_png(target)elif extension == '':target_ctx = cairo.Context(cairo.PDFSurface(filename, *self.size_or_default()))target_ctx.set_source_surface(ctx.get_target())target_ctx.paint()elif extension in ('', ''):target_ctx = cairo.Context(cairo.PSSurface(filename, *self.size_or_default()))if extension == '':target_ctx.set_eps(extension='')target_ctx.set_source_surface(ctx.get_target())target_ctx.paint()elif extension == '':target_ctx = cairo.Context(cairo.SVGSurface(filename, *self.size_or_default()))target_ctx.set_source_surface(ctx.get_target())target_ctx.paint()return filenameif isinstance(target, cairo.Context):return output_contextelif isinstance(target, cairo.Surface):return output_surfaceelse:return output_file", "docstring": "Function to output to a cairo surface\n\ntarget is a cairo Context or filename\nif file_number is set, then files will be numbered\n(this is usually set to the current frame number)", "id": "f11515:c0:m17"} {"signature": "def ctx_render_background(self, cairo_ctx):", "body": "cairo_ctx.set_source_rgba(*self.background)cairo_ctx.paint()", "docstring": "Draws the background colour of the bot", "id": "f11515:c0:m18"} {"signature": "def append_immediate(self, render_func):", "body": "raise NotImplementedError('')", "docstring": "In implementations of drawqueue that use buffering\nthis will run the whole queue up to this point", "id": "f11517:c0:m1"} {"signature": "def append(self, render_func):", "body": "self.render_funcs.append(render_func)", "docstring": "Add a render function to the queue.", "id": "f11517:c0:m2"} {"signature": "def render(self, r_context):", "body": "for render_func in self.render_funcs:render_func(r_context)return r_context", "docstring": "Call all the render functions with r_context\n\nr_context, is the render_context - Set of\nkeyword args that should make sense to the\ncanvas implementation", "id": "f11517:c0:m3"} {"signature": "def set_bot(self, bot):", "body": "self.bot = botself.sink.set_bot(bot)", "docstring": "Bot must be set before running", "id": "f11518:c0:m1"} {"signature": "def get_input_device(self):", "body": "return None", "docstring": "Overrides can return actual input device", "id": "f11518:c0:m2"} {"signature": "def initial_drawqueue(self):", "body": "return DrawQueue()", "docstring": "Override to create use special kinds of draw queue", "id": "f11518:c0:m3"} {"signature": "def initial_transform(self):", "body": "pass", "docstring": "Must be overriden to create initial transform matrix", "id": "f11518:c0:m4"} {"signature": "def settings(self, **kwargs):", "body": "for k, v in kwargs.items():setattr(self, k, v)", "docstring": "Pass a load of settings into the canvas", "id": "f11518:c0:m8"} {"signature": "def size_or_default(self):", "body": "if not self.size:self.size = self.DEFAULT_SIZEreturn self.size", "docstring": "If size is not set, otherwise set size to DEFAULT_SIZE\nand return it.\n\nThis means, only the first call to size() is valid.", "id": "f11518:c0:m9"} {"signature": "def set_size(self, size):", "body": "if self.size is None:self.size = sizereturn sizeelse:return self.size", "docstring": "Size is only set the first time it is called\n\nSize that is set is returned", "id": "f11518:c0:m10"} {"signature": "def snapshot(self, target, defer=True, file_number=None):", "body": "output_func = self.output_closure(target, file_number)if defer:self._drawqueue.append(output_func)else:self._drawqueue.append_immediate(output_func)", "docstring": "Ask the drawqueue to output to target.\n\ntarget can be anything supported by the combination\nof canvas implementation and drawqueue implmentation.\n\nIf target is not supported then an exception is thrown.", "id": "f11518:c0:m13"} {"signature": "def flush(self, frame):", "body": "self.sink.render(self.size_or_default(), frame, self._drawqueue)self.reset_drawqueue()", "docstring": "Passes the drawqueue to the sink for rendering", "id": "f11518:c0:m14"} {"signature": "def deferred_render(self, render_func):", "body": "self._drawqueue.append(render_func)", "docstring": "Add a render function to the queue for rendering later", "id": "f11518:c0:m15"} {"signature": "def render(self, size, frame, drawqueue):", "body": "r_context = self.create_rcontext(size, frame)drawqueue.render(r_context)self.rendering_finished(size, frame, r_context)return r_context", "docstring": "Calls implmentation to get a render context,\npasses it to the drawqueues render function\nthen calls self.rendering_finished", "id": "f11519:c0:m1"} {"signature": "def create_rcontext(self, size, frame):", "body": "pass", "docstring": "Returns a cairo context for drawing this\nframe of the bot", "id": "f11519:c0:m2"} {"signature": "def main_iteration(self):", "body": "pass", "docstring": "Called from main loop, if your sink needs to handle GUI events\ndo it here.\n\n:return:", "id": "f11519:c0:m4"} {"signature": "def next_event(block=False, timeout=None):", "body": "try:return channel.listen(block=block, timeout=timeout).next()['']except StopIteration:return None", "docstring": "Get the next available event or None\n\n:param block:\n:param timeout:\n:return: None or (event, data)", "id": "f11520:m1"} {"signature": "def event_is(event, event_t):", "body": "return event is not None and event.type == event_t", "docstring": "Check if event type\n:param event: event to compare\n:param event_t: event type\n:return: bool", "id": "f11520:m2"} {"signature": "def publish_event(event_t, data=None, extra_channels=None, wait=None):", "body": "event = Event(event_t, data)pubsub.publish(\"\", event)for channel_name in extra_channels or []:pubsub.publish(channel_name, event)if wait is not None:channel = pubsub.subscribe(wait)channel.listen(wait)", "docstring": "Publish an event ot any subscribers.\n\n:param event_t: event type\n:param data: event data\n:param extra_channels:\n:param wait:\n:return:", "id": "f11520:m3"} {"signature": "def sort_by_preference(options, prefer):", "body": "if not prefer:return optionsreturn sorted(options, key=lambda x: (prefer + options).index(x))", "docstring": ":param options: List of options\n:param prefer: Prefered options\n:return:\n\nPass in a list of options, return options in 'prefer' first\n\n>>> sort_by_preference([\"cairo\", \"cairocffi\"], [\"cairocffi\"])\n[\"cairocffi\", \"cairo\"]", "id": "f11521:m0"} {"signature": "def get_driver_options():", "body": "options = os.environ.get(\"\")if not options:return {}try:return dict([kv.split('') for kv in options.split()])except ValueError:sys.stderr.write(\"\")sys.stderr.write(\"\")sys.stderr.write(\"\")sys.exit()", "docstring": "Interpret env var as key=value\n:return:", "id": "f11521:m1"} {"signature": "def import_libs(self, module_names, impl_name):", "body": "for name in module_names:try:module = __import__(name)has_module = Trueexcept ImportError:module = Nonehas_module = Falsesetattr(self, name, module)setattr(self, '' % name, has_module)for name in module_names:try:return name, __import__(name)except ImportError:passraise ImportError('' % (impl_name, ''.join(module_names)))", "docstring": "Loop through module_names,\nadd has_.... booleans to class\nset ..._impl to first successful import\n\n:param module_names: list of module names to try importing\n:param impl_name: used in error output if no modules succeed\n:return: name, module from first successful implementation", "id": "f11521:c0:m0"} {"signature": "def ensure_pycairo_context(self, ctx):", "body": "if self.cairocffi and isinstance(ctx, self.cairocffi.Context):from shoebot.util.cairocffi.cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairoreturn _UNSAFE_cairocffi_context_to_pycairo(ctx)else:return ctx", "docstring": "If ctx is a cairocffi Context convert it to a PyCairo Context\notherwise return the original context\n\n:param ctx:\n:return:", "id": "f11521:c1:m3"} {"signature": "def create_canvas(src, format=None, outputfile=None, multifile=False, buff=None, window=False, title=None,fullscreen=None, show_vars=False):", "body": "from core import CairoCanvas, CairoImageSink if outputfile:sink = CairoImageSink(outputfile, format, multifile, buff)elif window or show_vars:from gui import ShoebotWindowif not title:if src and os.path.isfile(src):title = os.path.splitext(os.path.basename(src))[] + ''else:title = ''sink = ShoebotWindow(title, show_vars, fullscreen=fullscreen)else:if src and isinstance(src, cairo.Surface):outputfile = srcformat = ''elif src and os.path.isfile(src):outputfile = os.path.splitext(os.path.basename(src))[] + '' + (format or '')else:outputfile = ''sink = CairoImageSink(outputfile, format, multifile, buff)canvas = CairoCanvas(sink)return canvas", "docstring": "Create canvas and sink for attachment to a bot\n\ncanvas is what draws images, 'sink' is the final consumer of the images\n\n:param src: Defaults for title or outputfile if not specified.\n\n:param format: CairoImageSink image format, if using buff instead of outputfile\n:param buff: CairoImageSink buffer object to send output to\n\n:param outputfile: CairoImageSink output filename e.g. \"hello.svg\"\n:param multifile: CairoImageSink if True,\n\n:param title: ShoebotWindow - set window title\n:param fullscreen: ShoebotWindow - set window title\n:param show_vars: ShoebotWindow - display variable window\n\nTwo kinds of sink are provided: CairoImageSink and ShoebotWindow\n\nShoebotWindow\n\nDisplays a window to draw shoebot inside.\n\n\nCairoImageSink\n\nOutput to a filename (or files if multifile is set), or a buffer object.", "id": "f11525:m2"} {"signature": "def create_bot(src=None, grammar=NODEBOX, format=None, outputfile=None, iterations=, buff=None, window=False,title=None, fullscreen=None, server=False, port=, show_vars=False, vars=None, namespace=None):", "body": "canvas = create_canvas(src, format, outputfile, iterations > , buff, window, title, fullscreen=fullscreen,show_vars=show_vars)if grammar == DRAWBOT:from shoebot.grammar import DrawBotbot = DrawBot(canvas, namespace=namespace, vars=vars)else:from shoebot.grammar import NodeBotbot = NodeBot(canvas, namespace=namespace, vars=vars)if server:from shoebot.sbio import SocketServersocket_server = SocketServer(bot, \"\", port=port)return bot", "docstring": "Create a canvas and a bot with the same canvas attached to it\n\nbot parameters\n:param grammar: DRAWBOT or NODEBOX - decides what kind of bot is created\n:param vars: preset dictionary of vars from the called\n\ncanvas parameters:\n... everything else ...\n\nSee create_canvas for details on those parameters.", "id": "f11525:m3"} {"signature": "def run(src,grammar=NODEBOX,format=None,outputfile=None,iterations=,buff=None,window=True,title=None,fullscreen=None,close_window=False,server=False,port=,show_vars=False,vars=None,namespace=None,run_shell=False,args=[],verbose=False,background_thread=True):", "body": "sys.argv = [sys.argv[]] + args create_args = [src,grammar,format,outputfile,iterations,buff,window,title,fullscreen,server,port,show_vars]create_kwargs = dict(vars=vars, namespace=namespace)run_args = [src]run_kwargs = dict(iterations=iterations,frame_limiter=window,verbose=verbose,run_forever=window and not (close_window or bool(outputfile)),)if background_thread:sbot_thread = ShoebotThread(create_args=create_args,create_kwargs=create_kwargs,run_args=run_args,run_kwargs=run_kwargs,send_sigint=run_shell)sbot_thread.start()sbot = sbot_thread.sbotelse:print('')if run_shell:raise ValueError('')sbot_thread = Nonesbot = create_bot(*create_args, **create_kwargs)sbot.run(*run_args, **run_kwargs)if run_shell:import shoebot.sbio.shellshell = shoebot.sbio.shell.ShoebotCmd(sbot, trusted=True)try:shell.cmdloop()except KeyboardInterrupt as e:publish_event(QUIT_EVENT) if verbose:raiseelse:returnelif background_thread:try:while sbot_thread.is_alive():sleep()except KeyboardInterrupt:publish_event(QUIT_EVENT)if all((background_thread, sbot_thread)):sbot_thread.join()return sbot", "docstring": "Create and run a bot, the arguments all correspond to sanitized\ncommandline options.\n\n:param background_thread: If True then use a background thread.\n\n\nOther args are split into create_args and run_args\n\nSee create_bot for details on create_args\n\nrun_args are passed to bot.run - see Nodebot.run or Drawbot.run\n\n\n\nBackground thread:\n\nreadline in python is blocking, running the app in a background\nthread opens up the main thread for IO on stdin/stdout, which\ncan be used for communication with shoebot when livecoding is\nenabled.\n\nSee shoebot.io for implementation of the shell, and the gedit\nplugin for an example of using livecoding.", "id": "f11525:m4"} {"signature": "def __init__(self, create_args, create_kwargs,run_args, run_kwargs,send_sigint=False):", "body": "super(ShoebotThread, self).__init__()self.bot_ready = threading.Event()self.create_args = create_argsself.create_kwargs = create_kwargsself.run_args = run_argsself.run_kwargs = run_kwargsself.send_sigint = send_sigintself._sbot = None", "docstring": ":param create_args: passed to create_bot\n:param create_kwargs: passed to create_bot\n:param run_args: passed to bot.run\n:param run_kwargs: passed to bot.run\n:param send_sigint: if True then SIGINT will be sent on bot completion\n so the main thread can terminate", "id": "f11525:c1:m0"} {"signature": "@propertydef sbot(self):", "body": "self.bot_ready.wait()return self._sbot", "docstring": ":return: bot instance for communication", "id": "f11525:c1:m2"} {"signature": "def error(message):", "body": "global parserprint (_(\"\") + message)print ()parser.print_help()sys.exit()", "docstring": "Prints an error message, the help message and quits", "id": "f11526:m1"} {"signature": "def warn(message):", "body": "print (_(\"\") + message)", "docstring": "Print a warning message", "id": "f11526:m2"} {"signature": "def shoebot_example(**shoebot_kwargs):", "body": "def decorator(f):def run():from shoebot import ShoebotInstallError print(\"\" % f.__name__.replace(\"\", \"\"))try:import shoebotoutputfile = \"\" % f.__name__bot = shoebot.create_bot(outputfile=outputfile)f(bot)bot.finish()print('' % outputfile)print('')except ShoebotInstallError as e:print('', e.args[])print('')except Exception:print('')for line in traceback.format_exc().splitlines():print('' % line)print('')return runreturn decorator", "docstring": "Decorator to run some code in a bot instance.", "id": "f11527:m5"} {"signature": "def get_key_map(self):", "body": "kdict = {}for gdk_name in dir(Gdk):nb_name = gdk_name.upper()kdict[nb_name] = getattr(Gdk, gdk_name)return kdict", "docstring": "Return a dict in the form of\n\nSHOEBOT_KEY_NAME, GTK_VALUE\n\nShoebot key names look like KEY_LEFT, whereas Gdk uses KEY_Left\n- Shoebot key names are derived from Nodebox 1, which was a mac\n app.", "id": "f11528:c0:m8"} {"signature": "def scale_context_and_center(self, cr):", "body": "bot_width, bot_height = self.bot_sizeif self.width != bot_width or self.height != bot_height:if self.width < self.height:scale_x = float(self.width) / float(bot_width)scale_y = scale_xcr.translate(, (self.height - (bot_height * scale_y)) / )elif self.width > self.height:scale_y = float(self.height) / float(bot_height)scale_x = scale_ycr.translate((self.width - (bot_width * scale_x)) / , )else:scale_x = scale_y = cr.scale(scale_x, scale_y)self.input_device.scale_x = scale_yself.input_device.scale_y = scale_y", "docstring": "Scale context based on difference between bot size and widget", "id": "f11529:c1:m3"} {"signature": "def draw(self, widget, cr):", "body": "if self.bot_size is None:self.draw_default_image(cr)returncr = driver.ensure_pycairo_context(cr)surface = self.backing_store.surfacecr.set_source_surface(surface)cr.paint()", "docstring": "Draw just the exposed part of the backing store, scaled to fit", "id": "f11529:c1:m4"} {"signature": "def create_rcontext(self, size, frame):", "body": "self.frame = framewidth, height = sizemeta_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (, , width, height))ctx = cairo.Context(meta_surface)return ctx", "docstring": "Creates a recording surface for the bot to draw on\n\n:param size: The width and height of bot", "id": "f11529:c1:m5"} {"signature": "def do_drawing(self, size, frame, cairo_ctx):", "body": "if self.get_window() and not self.bot_size:self.set_size_request(*size)self.bot_size = sizeself.backing_store = BackingStore.get_backingstore(self.width, self.height)cr = pycairo.Context(self.backing_store.surface)if self.scale_fit:self.scale_context_and_center(cr)cairo_ctx = driver.ensure_pycairo_context(cairo_ctx)cr.set_source_surface(cairo_ctx.get_target())cr.set_operator(cairo.OPERATOR_SOURCE)cr.paint()self.queue_draw()while Gtk.events_pending():Gtk.main_iteration_do(False)", "docstring": "Update the backing store from a cairo context and\nschedule a redraw (expose event)\n\n:param size: width, height in pixels of bot\n:param frame: frame # thar was drawn\n:param cairo_ctx: cairo context the bot was drawn on", "id": "f11529:c1:m6"} {"signature": "def gtk_mouse_button_down(self, widget, event):", "body": "if self.menu_enabled and event.button == :menu = self.uimanager.get_widget('')menu.popup(None, None, None, None, event.button, event.time)else:super(ShoebotWindow, self).gtk_mouse_button_down(widget, event)", "docstring": "Handle right mouse button clicks", "id": "f11530:c0:m1"} {"signature": "def create_rcontext(self, size, frame):", "body": "return self.sb_widget.create_rcontext(size, frame)", "docstring": "Delegates to the sb_widget", "id": "f11530:c0:m3"} {"signature": "def show_variables_window(self):", "body": "if self.var_window is None and self.bot._vars:self.var_window = VarWindow(self, self.bot, '' % (self.title or ''))self.var_window.window.connect(\"\", self.var_window_closed)", "docstring": "Show the variables window.", "id": "f11530:c0:m4"} {"signature": "def hide_variables_window(self):", "body": "if self.var_window is not None:self.var_window.window.destroy()self.var_window = None", "docstring": "Hide the variables window", "id": "f11530:c0:m5"} {"signature": "def var_window_closed(self, widget):", "body": "self.action_group.get_action('').set_active(False)self.show_vars = Falseself.var_window = None", "docstring": "Called if user clicked close button on var window\n:param widget:\n:return:", "id": "f11530:c0:m6"} {"signature": "def schedule_snapshot(self, format):", "body": "bot = self.botcanvas = self.bot.canvasscript = bot._namespace['']if script:filename = os.path.splitext(script)[] + '' + formatelse:filename = '' + formatf = canvas.output_closure(filename, self.bot._frame)self.scheduled_snapshots.append(f)", "docstring": "Tell the canvas to perform a snapshot when it's finished rendering\n:param format:\n:return:", "id": "f11530:c0:m8"} {"signature": "def snapshot_svg(self, widget):", "body": "self.schedule_snapshot('')", "docstring": "Save an SVG file after drawing is complete.", "id": "f11530:c0:m9"} {"signature": "def snapshot_ps(self, widget):", "body": "self.schedule_snapshot('')", "docstring": "Save an Postscript file after drawing is complete.", "id": "f11530:c0:m10"} {"signature": "def snapshot_pdf(self, widget):", "body": "self.schedule_snapshot('')", "docstring": "Save a PDF file after drawing is complete.", "id": "f11530:c0:m11"} {"signature": "def snapshot_png(self, widget):", "body": "self.schedule_snapshot('')", "docstring": "Save an PNG file after drawing is complete.", "id": "f11530:c0:m12"} {"signature": "def trigger_fullscreen_action(self, fullscreen):", "body": "action = self.action_group.get_action('')action.set_active(fullscreen)", "docstring": "Toggle fullscreen from outside the GUI,\ncauses the GUI to updated and run all its actions.", "id": "f11530:c0:m13"} {"signature": "def do_fullscreen(self, widget):", "body": "self.fullscreen()self.is_fullscreen = Truewhile Gtk.events_pending():Gtk.main_iteration()self.bot._screen_width = Gdk.Screen.width()self.bot._screen_height = Gdk.Screen.height()self.bot._screen_ratio = self.bot._screen_width / self.bot._screen_height", "docstring": "Widget Action to Make the window fullscreen and update the bot.", "id": "f11530:c0:m14"} {"signature": "def do_unfullscreen(self, widget):", "body": "self.unfullscreen()self.is_fullscreen = Falseself.bot._screen_ratio = None", "docstring": "Widget Action to set Windowed Mode.", "id": "f11530:c0:m15"} {"signature": "def do_window_close(self, widget, data=None):", "body": "publish_event(QUIT_EVENT)if self.has_server:self.sock.close()self.hide_variables_window()self.destroy()self.window_open = False", "docstring": "Widget Action to Close the window, triggering the quit event.", "id": "f11530:c0:m16"} {"signature": "def do_toggle_fullscreen(self, action):", "body": "is_fullscreen = action.get_active()if is_fullscreen:self.fullscreen()else:self.unfullscreen()", "docstring": "Widget Action to Toggle fullscreen from the GUI", "id": "f11530:c0:m17"} {"signature": "def do_toggle_play(self, action):", "body": "if self.pause_speed is None and not action.get_active():self.pause_speed = self.bot._speedself.bot._speed = else:self.bot._speed = self.pause_speedself.pause_speed = None", "docstring": "Widget Action to toggle play / pause.", "id": "f11530:c0:m18"} {"signature": "def do_toggle_variables(self, action):", "body": "self.show_vars = action.get_active()if self.show_vars:self.show_variables_window()else:self.hide_variables_window()", "docstring": "Widget Action to toggle showing the variables window.", "id": "f11530:c0:m19"} {"signature": "def main_iteration(self):", "body": "if self.show_vars:self.show_variables_window()else:self.hide_variables_window()for snapshot_f in self.scheduled_snapshots:fn = snapshot_f(self.last_draw_ctx)print(\"\" % fn)else:self.scheduled_snapshots = deque()while Gtk.events_pending():Gtk.main_iteration()", "docstring": "Called from main loop, if your sink needs to handle GUI events\ndo it here.\n\nCheck any GUI flags then call Gtk.main_iteration to update things.", "id": "f11530:c0:m20"} {"signature": "def add_variables(self):", "body": "for k, v in self.bot._vars.items():if not hasattr(v, ''):raise AttributeError('' % k)self.add_variable(v)", "docstring": "Add all widgets to specified vbox\n:param container:\n:return:", "id": "f11532:c0:m1"} {"signature": "def update_var(self, name, value):", "body": "widget = self.widgets.get(name)if widget is None:return False, ''.format(name)try:if isinstance(widget, Gtk.CheckButton):widget.set_active(value)return True, widget.get_active()elif isinstance(widget, Gtk.Entry):widget.set_text(value)return True, widget.get_text()else:widget.set_value(value)return True, widget.get_value()except Exception as e:return False, str(e)", "docstring": ":return: success, err_msg_if_failed", "id": "f11532:c0:m9"} {"signature": "def widget_changed(self, widget, v):", "body": "if v.type is NUMBER:self.bot._namespace[v.name] = widget.get_value()self.bot._vars[v.name].value = widget.get_value() publish_event(VARIABLE_UPDATED_EVENT, v) elif v.type is BOOLEAN:self.bot._namespace[v.name] = widget.get_active()self.bot._vars[v.name].value = widget.get_active() publish_event(VARIABLE_UPDATED_EVENT, v) elif v.type is TEXT:self.bot._namespace[v.name] = widget.get_text()self.bot._vars[v.name].value = widget.get_text() publish_event(VARIABLE_UPDATED_EVENT, v)", "docstring": "Called when a slider is adjusted.", "id": "f11532:c0:m10"} {"signature": "def var_added(self, v):", "body": "self.add_variable(v)self.window.set_size_request(, * len(self.widgets.keys()))self.window.show_all()", "docstring": "var was added in the bot while it ran, possibly\nby livecoding\n\n:param v:\n:return:", "id": "f11532:c0:m11"} {"signature": "def var_deleted(self, v):", "body": "widget = self.widgets[v.name]parent = widget.get_parent()self.container.remove(parent)del self.widgets[v.name]self.window.set_size_request(, * len(self.widgets.keys()))self.window.show_all()", "docstring": "var was added in the bot\n\n:param v:\n:return:", "id": "f11532:c0:m12"} {"signature": "def save_as(self):", "body": "chooser = ShoebotFileChooserDialog(_(''), None, Gtk.FileChooserAction.SAVE,(Gtk.STOCK_SAVE, Gtk.ResponseType.ACCEPT,Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL))chooser.set_do_overwrite_confirmation(True)chooser.set_transient_for(self)saved = chooser.run() == Gtk.ResponseType.ACCEPTif saved:old_filename = self.filenameself.source_buffer.filename = chooser.get_filename()if not self.save():self.filename = old_filenamechooser.destroy()return saved", "docstring": "Return True if the buffer was saved", "id": "f11533:c4:m29"} {"signature": "def pangocairo_create_context(cr):", "body": "try:return PangoCairo.create_context(cr)except KeyError as e:if e.args == ('',):raise ShoebotInstallError(\"\")else:raise", "docstring": "If python-gi-cairo is not installed, using PangoCairo.create_context\ndies with an unhelpful KeyError, check for that and output somethig\nuseful.", "id": "f11534:m0"} {"signature": "def _get_center(self):", "body": "w, h = self.layout.get_pixel_size()x = (self.x + w / )y = (self.y + h / )return x, y", "docstring": "Returns the center point of the path, disregarding transforms.", "id": "f11534:c0:m7"} {"signature": "def _set_mode(self, mode):", "body": "if mode == CENTER:self._call_transform_mode = self._center_transformelif mode == CORNER:self._call_transform_mode = self._corner_transformelse:raise ValueError('')", "docstring": "Sets call_transform_mode to point to the\ncenter_transform or corner_transform", "id": "f11535:c0:m1"} {"signature": "def _get_pathmode(self):", "body": "if self._pathmode is not None:return self._pathmodeelse:return self._canvas.pathmode", "docstring": "Return pathmode or get it from self._canvas", "id": "f11535:c0:m2"} {"signature": "def _get_center(self):", "body": "raise NotImplementedError()", "docstring": "Implementations must return the x, y of their center", "id": "f11535:c0:m3"} {"signature": "def _call_transform_mode(self):", "body": "raise NotImplementedError('')", "docstring": "This should never get called:\nset mode, changes the value of this to point\n\ncorner_transform or center_transform", "id": "f11535:c0:m4"} {"signature": "def _center_transform(self, transform):", "body": "dx, dy = self._get_center()t = cairo.Matrix()t.translate(dx, dy)t = transform * tt.translate(-dx, -dy)return t", "docstring": "Works like setupTransform of a version of java nodebox\nhttp://dev.nodebox.net/browser/nodebox-java/branches/rewrite/src/java/net/nodebox/graphics/Grob.java", "id": "f11535:c0:m5"} {"signature": "def _corner_transform(self, transform):", "body": "return transform", "docstring": "CORNER is the default, so we just return the transform", "id": "f11535:c0:m6"} {"signature": "def _deferred_render(self, render_func=None):", "body": "self._canvas.deferred_render(render_func or self._render)", "docstring": "Pass a function to the canvas for deferred rendering,\ndefaults to self._render", "id": "f11535:c0:m7"} {"signature": "def _render(self, ctx):", "body": "raise NotImplementedError()", "docstring": "For overriding by GRaphicOBjects", "id": "f11535:c0:m8"} {"signature": "def inheritFromContext(self, ignore=()):", "body": "for canvas_attr, grob_attr in STATES.items():if canvas_attr in ignore:continuesetattr(self, grob_attr, getattr(self._bot._canvas, canvas_attr))", "docstring": "Doesn't store exactly the same items as Nodebox for ease of implementation,\nit has enough to get the Nodebox Dentrite example working.", "id": "f11535:c0:m9"} {"signature": "def parse_color(v, color_range=):", "body": "while isinstance(v, (tuple, list)) and len(v) == :v = v[]if isinstance(v, (int, float)):red = green = blue = v / color_rangealpha = elif isinstance(v, data.Color):red, green, blue, alpha = velif isinstance(v, (tuple, list)):color = []for index in range(, len(v)):color.append(v[index] / color_range)if len(color) == :red = green = blue = alpha = color[]elif len(color) == :red = green = blue = color[]alpha = color[]elif len(color) == :red = color[]green = color[]blue = color[]alpha = elif len(color) == :red = color[]green = color[]blue = color[]alpha = color[]elif isinstance(v, str):v = v.strip('')if len(data) == :red = hex2dec(v[:]) / green = hex2dec(v[:]) / blue = hex2dec(v[:]) / alpha = elif len(v) == :red = hex2dec(v[:]) / green = hex2dec(v[:]) / blue = hex2dec(v[:]) / alpha = hex2dec(v[:]) / return red, green, blue, alpha", "docstring": "Receives a colour definition and returns a (r,g,b,a) tuple.\n\n Accepts:\n - v\n - (v)\n - (v,a)\n - (r,g,b)\n - (r,g,b,a)\n - #RRGGBB\n - RRGGBB\n - #RRGGBBAA\n - RRGGBBAA\n\n Returns a (red, green, blue, alpha) tuple, with values ranging from\n 0 to 1.\n\n The 'color_range' parameter sets the colour range in which the\n colour data values are specified (except in hexstrings).", "id": "f11536:m2"} {"signature": "def hex_to_rgb(hex):", "body": "hex = hex.lstrip(\"\")if len(hex) < :hex += hex[-] * ( - len(hex))if len(hex) == :r, g, b = hex[:], hex[:], hex[:]r, g, b = [int(n, ) / for n in (r, g, b)]a = elif len(hex) == :r, g, b, a = hex[:], hex[:], hex[:], hex[:]r, g, b, a = [int(n, ) / for n in (r, g, b, a)]return r, g, b, a", "docstring": "Returns RGB values for a hex color string.", "id": "f11536:m3"} {"signature": "def lab_to_rgb(l, a, b):", "body": "y = (l + ) / x = a / + yz = y - b / v = [x, y, z]for i in _range():if pow(v[i], ) > :v[i] = pow(v[i], )else:v[i] = (v[i] - / ) / x = v[] * / y = v[] * / z = v[] * / r = x * + y * - + z * -g = x * - + y * + z * b = x * + y * - + z * v = [r, g, b]for i in _range():if v[i] > :v[i] = * pow(v[i], / ) - else:v[i] = * v[i]r, g, b = v[], v[], v[]return r, g, b", "docstring": "Converts CIE Lab to RGB components.\n\n First we have to convert to XYZ color space.\n Conversion involves using a white point,\n in this case D65 which represents daylight illumination.\n\n Algorithm adopted from:\n http://www.easyrgb.com/math.php", "id": "f11536:m4"} {"signature": "def cmyk_to_rgb(c, m, y, k):", "body": "r = - min(, c + k)g = - min(, m + k)b = - min(, y + k)return r, g, b", "docstring": "Cyan, magenta, yellow, black to red, green, blue.\n ReportLab, http://www.koders.com/python/fid5C006F554616848C01AC7CB96C21426B69D2E5A9.aspx\n Results will differ from the way NSColor converts color spaces.", "id": "f11536:m5"} {"signature": "def hsv_to_rgb(h, s, v):", "body": "if s == : return v, v, vh = h / ( / )i = floor(h)f = h - ip = v * ( - s)q = v * ( - s * f)t = v * ( - s * ( - f))if i == :r, g, b = v, t, pelif i == :r, g, b = q, v, pelif i == :r, g, b = p, v, telif i == :r, g, b = p, q, velif i == :r, g, b = t, p, velse:r, g, b = v, p, qreturn r, g, b", "docstring": "Hue, saturation, brightness to red, green, blue.\n http://www.koders.com/python/fidB2FE963F658FE74D9BF74EB93EFD44DCAE45E10E.aspx\n Results will differ from the way NSColor converts color spaces.", "id": "f11536:m7"} {"signature": "def __getattr__(self, a):", "body": "if a in self.__dict__:return aelif a == \"\":return self.__dict__[\"\"]elif a == \"\":return self.__dict__[\"\"]elif a in [\"\", \"\",\"\", \"\", \"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\", \"\", \"\", \"\"]:return self.__dict__[\"\" + a[]]elif a in [\"\", \"\",\"\", \"\", \"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\"]:return self.__dict__[\"\" + a[]]raise AttributeError(\"\" + str(self.__class__) + \"\" + a + \"\")", "docstring": "Available properties:\n r, g, b, a or red, green, blue, alpha\n c, m, y, k or cyan, magenta, yellow, black,\n h, s or hue, saturation, brightness", "id": "f11536:c0:m16"} {"signature": "def angle(x0, y0, x1, y1):", "body": "return degrees(atan2(y1-y0, x1-x0))", "docstring": "Returns the angle between two points.", "id": "f11538:m0"} {"signature": "def distance(x0, y0, x1, y1):", "body": "return sqrt(pow(x1-x0, ) + pow(y1-y0, ))", "docstring": "Returns the distance between two points.", "id": "f11538:m1"} {"signature": "def coordinates(x0, y0, distance, angle):", "body": "return (x0 + cos(radians(angle)) * distance,y0 + sin(radians(angle)) * distance)", "docstring": "Returns the location of a point by rotating around origin (x0,y0).", "id": "f11538:m2"} {"signature": "def rotate(x, y, x0, y0, angle):", "body": "x, y = x - x0, y - y0a, b = cos(radians(angle)), sin(radians(angle))return (x * a - y * b + x0,y * a + x * b + y0)", "docstring": "Returns the coordinates of (x,y) rotated around origin (x0,y0).", "id": "f11538:m3"} {"signature": "def reflect(x, y, x0, y0, d=, a=):", "body": "return coordinates(x0, y0, d * distance(x0, y0, x, y),a + angle(x0, y0, x, y))", "docstring": "Returns the reflection of a point through origin (x0,y0).", "id": "f11538:m4"} {"signature": "def lerp(a, b, t):", "body": "if t < :return aif t > :return breturn a + (b - a) * t", "docstring": "Returns the linear interpolation between a and b for time t between 0.0-1.0.\n For example: lerp(100, 200, 0.5) => 150.", "id": "f11538:m5"} {"signature": "def smoothstep(a, b, x):", "body": "if x < a:return if x >= b:return x = float(x - a) / (b - a)return x * x * ( - * x)", "docstring": "Returns a smooth transition between 0.0 and 1.0 using Hermite interpolation (cubic spline),\n where x is a number between a and b. The return value will ease (slow down) as x nears a or b.\n For x smaller than a, returns 0.0. For x bigger than b, returns 1.0.", "id": "f11538:m6"} {"signature": "def line_line_intersection(x1, y1, x2, y2, x3, y3, x4, y4, infinite=False):", "body": "ua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)ub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)d = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)if d == :if ua == ub == :return []else:return []ua /= float(d)ub /= float(d)if not infinite and not ( <= ua <= and <= ub <= ):return None, Nonereturn [(x1 + ua * (x2 - x1),y1 + ua * (y2 - y1))]", "docstring": "Determines the intersection point of two lines, or two finite line segments if infinite=False.\n When the lines do not intersect, returns an empty list.", "id": "f11538:m8"} {"signature": "def circle_line_intersection(cx, cy, radius, x1, y1, x2, y2, infinite=False):", "body": "dx = x2 - x1dy = y2 - y1A = dx * dx + dy * dyB = * (dx * (x1 - cx) + dy * (y1 - cy))C = pow(x1 - cx, ) + pow(y1 - cy, ) - radius * radiusdet = B * B - * A * Cif A <= or det < :return []elif det == :t = -B / ( * A)return [(x1 + t * dx, y1 + t * dy)]else:points = []det2 = sqrt(det)t1 = (-B + det2) / ( * A)t2 = (-B - det2) / ( * A)if infinite or <= t1 <= :points.append((x1 + t1 * dx, y1 + t1 * dy))if infinite or <= t2 <= :points.append((x1 + t2 * dx, y1 + t2 * dy))return points", "docstring": "Returns a list of points where the circle and the line intersect.\n Returns an empty list when the circle and the line do not intersect.", "id": "f11538:m9"} {"signature": "def point_in_polygon(points, x, y):", "body": "odd = Falsen = len(points)for i in range(n):j = i < n - and i + or x0, y0 = points[i][], points[i][]x1, y1 = points[j][], points[j][]if (y0 < y and y1 >= y) or (y1 < y and y0 >= y):if x0 + (y - y0) / (y1 - y0) * (x1 - x0) < x:odd = not oddreturn odd", "docstring": "Ray casting algorithm.\n Determines how many times a horizontal ray starting from the point\n intersects with the sides of the polygon.\n If it is an even number of times, the point is outside, if odd, inside.\n The algorithm does not always report correctly when the point is very close to the boundary.\n The polygon is passed as a list of (x,y)-tuples.", "id": "f11538:m11"} {"signature": "def __init__(self, transform=None):", "body": "if isinstance(transform, AffineTransform):self.matrix = list(transform.matrix)else:self.matrix = self.identity", "docstring": "A geometric transformation in Euclidean space (i.e. 2D)\n that preserves collinearity and ratio of distance between points.\n Linear transformations include rotation, translation, scaling, shear.", "id": "f11538:c0:m0"} {"signature": "def _mmult(self, a, b):", "body": "return [a[] * b[] + a[] * b[],a[] * b[] + a[] * b[],,a[] * b[] + a[] * b[],a[] * b[] + a[] * b[],,a[] * b[] + a[] * b[] + b[],a[] * b[] + a[] * b[] + b[],]", "docstring": "Returns the 3x3 matrix multiplication of A and B.\n Note that scale(), translate(), rotate() work with premultiplication,\n e.g. the matrix A followed by B = BA and not AB.", "id": "f11538:c0:m4"} {"signature": "def invert(self):", "body": "m = self.matrixd = m[] * m[] - m[] * m[]self.matrix = [m[] / d, -m[] / d, ,-m[] / d, m[] / d, ,(m[] * m[] - m[] * m[]) / d,-(m[] * m[] - m[] * m[]) / d,]", "docstring": "Multiplying a matrix by its inverse produces the identity matrix.", "id": "f11538:c0:m5"} {"signature": "def transform_point(self, x, y):", "body": "m = self.matrixreturn (x*m[]+y*m[]+m[], x*m[]+y*m[]+m[])", "docstring": "Returns the new coordinates of (x,y) after transformation.", "id": "f11538:c0:m11"} {"signature": "def transform_path(self, path):", "body": "p = path.__class__() for pt in path:if pt.cmd == \"\":p.closepath()elif pt.cmd == \"\":p.moveto(*self.apply(pt.x, pt.y))elif pt.cmd == \"\":p.lineto(*self.apply(pt.x, pt.y))elif pt.cmd == \"\":vx1, vy1 = self.apply(pt.ctrl1.x, pt.ctrl1.y)vx2, vy2 = self.apply(pt.ctrl2.x, pt.ctrl2.y)x, y = self.apply(pt.x, pt.y)p.curveto(vx1, vy1, vx2, vy2, x, y)return p", "docstring": "Returns a BezierPath object with the transformation applied.", "id": "f11538:c0:m12"} {"signature": "def __init__(self, x, y, width, height):", "body": "if width == None: width = INFINITEif height == None: height = INFINITEif width < : x, width = x+width, -widthif height < : y, height = y+height, -heightself.x = xself.y = yself.width = widthself.height = height", "docstring": "Creates a bounding box.\n The bounding box is an untransformed rectangle that encompasses a shape or group of shapes.", "id": "f11538:c2:m0"} {"signature": "def __iter__(self):", "body": "return (self.x, self.y, self.width, self.height).__iter__()", "docstring": "You can conveniently unpack bounds: x,y,w,h = Bounds(0,0,100,100)", "id": "f11538:c2:m2"} {"signature": "def intersects(self, b):", "body": "return max(self.x, b.x) < min(self.x+self.width, b.x+b.width)and max(self.y, b.y) < min(self.y+self.height, b.y+b.height)", "docstring": "Return True if a part of the two bounds overlaps.", "id": "f11538:c2:m3"} {"signature": "def intersection(self, b):", "body": "if not self.intersects(b):return Nonemx, my = max(self.x, b.x), max(self.y, b.y)return Bounds(mx, my,min(self.x+self.width, b.x+b.width) - mx,min(self.y+self.height, b.y+b.height) - my)", "docstring": "Returns bounds that encompass the intersection of the two.\n If there is no overlap between the two, None is returned.", "id": "f11538:c2:m4"} {"signature": "def union(self, b):", "body": "mx, my = min(self.x, b.x), min(self.y, b.y)return Bounds(mx, my,max(self.x+self.width, b.x+b.width) - mx,max(self.y+self.height, b.y+b.height) - my)", "docstring": "Returns bounds that encompass the union of the two.", "id": "f11538:c2:m5"} {"signature": "def contains(self, *a):", "body": "if len(a) == : a = [Point(a[], a[])]if len(a) == :a = a[]if isinstance(a, Point):return a.x >= self.x and a.x <= self.x+self.widthand a.y >= self.y and a.y <= self.y+self.heightif isinstance(a, Bounds):return a.x >= self.x and a.x+a.width <= self.x+self.widthand a.y >= self.y and a.y+a.height <= self.y+self.height", "docstring": "Returns True if the given point or rectangle falls within the bounds.", "id": "f11538:c2:m6"} {"signature": "def get_matrix(self):", "body": "return self.get_matrix_with_center(,)", "docstring": "Returns this transform's matrix. Its centerpoint is presumed to be\n (0,0), which is the Cairo default.", "id": "f11539:c0:m12"} {"signature": "def __init__(self, name, type, **kwargs):", "body": "self.name = nameif not isinstance(name, basestring):raise AttributeError(\"\")if kwargs.get(\"\") and kwargs.get(\"\"):raise AttributeError(\"\") self.type = type or NUMBERself.min = Noneself.max = Noneself.step = None or kwargs.get(\"\")self.steps = kwargs.get(\"\", DEFAULT_STEPS)if self.type == NUMBER:self.min = kwargs.get(\"\", )self.max = kwargs.get(\"\", )if self.step is None:diff = max(self.min, self.max) - min(self.min, self.max)self.step = (diff / float(self.steps))self.default = kwargs.get(\"\")if self.default is None:self.default = self.minelif self.type == TEXT:self.default = kwargs.get(\"\", \"\")elif self.type == BOOLEAN:self.default = kwargs.get(\"\", True)elif self.type == BUTTON:self.default = kwargs.get(\"\", self.name)else:raise AttributeError(\"\")self.value = kwargs.get(\"\", self.default)if self.value is None and self.default is not None:self.value = self.default", "docstring": ":param name: Name of variable\n:param type: NUMBER | TEXT | BOOLEAN | BUTTON\n:param default: default value\n:param min: min value if number\n:param max: max value if number\n:param value: value\n:param step: step between values - cannot specify at same time as step\n:param steps: total steps\n:return:", "id": "f11541:c0:m0"} {"signature": "def sanitize(self, val):", "body": "if self.type == NUMBER:try:return clamp(self.min, self.max, float(val))except ValueError:return elif self.type == TEXT:try:return unicode(str(val), \"\", \"\")except:return \"\"elif self.type == BOOLEAN:if unicode(val).lower() in (\"\", \"\", \"\"):return Trueelse:return False", "docstring": "Given a Variable and a value, cleans it out", "id": "f11541:c0:m1"} {"signature": "def compliesTo(self, v):", "body": "if self.type == v.type:if self.type == NUMBER:if self.value < self.min or self.value > self.max:return Falsereturn Truereturn False", "docstring": "Return whether I am compatible with the given var:\n - Type should be the same\n - My value should be inside the given vars' min/max range.", "id": "f11541:c0:m2"} {"signature": "def _append_element(self, render_func, pe):", "body": "self._render_funcs.append(render_func)self._elements.append(pe)", "docstring": "Append a render function and the parameters to pass\nan equivilent PathElement, or the PathElement itself.", "id": "f11542:c0:m1"} {"signature": "def _traverse(self, cairo_ctx):", "body": "for render_func in self._render_funcs:render_func(cairo_ctx)", "docstring": "Traverse this path", "id": "f11542:c0:m13"} {"signature": "def _get_bounds(self):", "body": "if self._bounds:return self._boundsrecord_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (-, -, , ))dummy_ctx = cairo.Context(record_surface)self._traverse(dummy_ctx)self._bounds = dummy_ctx.path_extents()return self._bounds", "docstring": "Return cached bounds of this Grob.\nIf bounds are not cached, render to a meta surface, and\nkeep the meta surface and bounds cached.", "id": "f11542:c0:m14"} {"signature": "def contains(self, x, y):", "body": "if self._bounds:return self._boundsrecord_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (-, -, , ))dummy_ctx = cairo.Context(record_surface)self._traverse(dummy_ctx)in_fill = dummy_ctx.in_fill(x, y)return in_fill", "docstring": "Return cached bounds of this Grob.\nIf bounds are not cached, render to a meta surface, and\nkeep the meta surface and bounds cached.", "id": "f11542:c0:m16"} {"signature": "def _get_center(self):", "body": "if self._center:return self._center(x1, y1, x2, y2) = self._get_bounds()x = (x1 + x2) / y = (y1 + y2) / center = self._center = x, yreturn center", "docstring": "Return cached bounds of this Grob.\nIf bounds are not cached, render to a meta surface, and\nkeep the meta surface and bounds cached.", "id": "f11542:c0:m17"} {"signature": "def _render_closure(self):", "body": "fillcolor = self.fillstrokecolor = self.strokestrokewidth = self.strokewidthdef _render(cairo_ctx):''''''transform = self._call_transform_mode(self._transform)if fillcolor is None and strokecolor is None:returncairo_ctx.set_matrix(transform)self._traverse(cairo_ctx)cairo_ctx.set_matrix(cairo.Matrix())if fillcolor is not None and strokecolor is not None:if strokecolor[] < :cairo_ctx.push_group()cairo_ctx.set_source_rgba(*fillcolor)cairo_ctx.fill_preserve()e = cairo_ctx.stroke_extents()cairo_ctx.set_source_rgba(*strokecolor)cairo_ctx.set_operator(cairo.OPERATOR_SOURCE)cairo_ctx.set_line_width(strokewidth)cairo_ctx.stroke()cairo_ctx.pop_group_to_source()cairo_ctx.paint()else:cairo_ctx.set_source_rgba(*fillcolor)cairo_ctx.fill_preserve()cairo_ctx.set_source_rgba(*strokecolor)cairo_ctx.set_line_width(strokewidth)cairo_ctx.stroke()elif fillcolor is not None:cairo_ctx.set_source_rgba(*fillcolor)cairo_ctx.fill()elif strokecolor is not None:cairo_ctx.set_source_rgba(*strokecolor)cairo_ctx.set_line_width(strokewidth)cairo_ctx.stroke()return _render", "docstring": "Use a closure so that draw attributes can be saved", "id": "f11542:c0:m18"} {"signature": "def _get_contours(self):", "body": "contours = []current_contour = Noneempty = Truefor i, el in enumerate(self._get_elements()):if el.cmd == MOVETO:if not empty:contours.append(current_contour)current_contour = BezierPath(self._bot)current_contour.moveto(el.x, el.y)empty = Trueelif el.cmd == LINETO:empty = Falsecurrent_contour.lineto(el.x, el.y)elif el.cmd == CURVETO:empty = Falsecurrent_contour.curveto(el.c1x, el.c1y, el.c2x, el.c2y, el.x, el.y)elif el.cmd == CLOSE:current_contour.closepath()if not empty:contours.append(current_contour)return contours", "docstring": "Returns a list of contours in the path, as BezierPath objects.\nA contour is a sequence of lines and curves separated from the next contour by a MOVETO.\nFor example, the glyph \"o\" has two contours: the inner circle and the outer circle.", "id": "f11542:c0:m20"} {"signature": "def _locate(self, t, segments=None):", "body": "if segments is None:segments = self._segment_lengths(relative=True)if len(segments) == :raise PathError(\"\")for i, el in enumerate(self._get_elements()):if i == or el.cmd == MOVETO:closeto = Point(el.x, el.y)if t <= segments[i] or i == len(segments) - :breakelse:t -= segments[i]try:t /= segments[i]except ZeroDivisionError:passif i == len(segments) - and segments[i] == :i -= return (i, t, closeto)", "docstring": "Locates t on a specific segment in the path.\n Returns (index, t, PathElement)\n A path is a combination of lines and curves (segments).\n The returned index indicates the start of the segment that contains point t.\n The returned t is the absolute time on that segment,\n in contrast to the relative t on the whole of the path.\n The returned point is the last MOVETO, any subsequent CLOSETO after i closes to that point.\n When you supply the list of segment lengths yourself, as returned from length(path, segmented=True),\n point() works about thirty times faster in a for-loop since it doesn't need to recalculate\n the length during each iteration.", "id": "f11542:c0:m21"} {"signature": "def point(self, t, segments=None):", "body": "if len(self._elements) == :raise PathError(\"\")if self._segments is None:self._segments = self._get_length(segmented=True, precision=)i, t, closeto = self._locate(t, segments=self._segments)x0, y0 = self[i].x, self[i].yp1 = self[i + ]if p1.cmd == CLOSE:x, y = self._linepoint(t, x0, y0, closeto.x, closeto.y)return PathElement(LINETO, x, y)elif p1.cmd in (LINETO, MOVETO):x1, y1 = p1.x, p1.yx, y = self._linepoint(t, x0, y0, x1, y1)return PathElement(LINETO, x, y)elif p1.cmd == CURVETO:x3, y3, x1, y1, x2, y2 = p1.x, p1.y, p1.ctrl1.x, p1.ctrl1.y, p1.ctrl2.x, p1.ctrl2.yx, y, c1x, c1y, c2x, c2y = self._curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3)return PathElement(CURVETO, c1x, c1y, c2x, c2y, x, y)else:raise PathError(\"\" % (p1.cmd, p1))", "docstring": "Returns the PathElement at time t (0.0-1.0) on the path.\n\nReturns coordinates for point at t on the path.\nGets the length of the path, based on the length of each curve and line in the path.\nDetermines in what segment t falls. Gets the point on that segment.\nWhen you supply the list of segment lengths yourself, as returned from length(path, segmented=True),\npoint() works about thirty times faster in a for-loop since it doesn't need to recalculate\nthe length during each iteration.", "id": "f11542:c0:m22"} {"signature": "def points(self, amount=, start=, end=, segments=None):", "body": "if len(self._elements) == :raise PathError(\"\")n = end - startd = nif amount > :d = float(n) / (amount - )for i in range(int(amount)):yield self.point(start + d * i, segments)", "docstring": "Returns an iterator with a list of calculated points for the path.\n To omit the last point on closed paths: end=1-1.0/amount", "id": "f11542:c0:m23"} {"signature": "def _linepoint(self, t, x0, y0, x1, y1):", "body": "out_x = x0 + t * (x1 - x0)out_y = y0 + t * (y1 - y0)return (out_x, out_y)", "docstring": "Returns coordinates for point at t on the line.\n Calculates the coordinates of x and y for a point at t on a straight line.\n The t parameter is a number between 0.0 and 1.0,\n x0 and y0 define the starting point of the line,\n x1 and y1 the ending point of the line.", "id": "f11542:c0:m24"} {"signature": "def _linelength(self, x0, y0, x1, y1):", "body": "a = pow(abs(x0 - x1), )b = pow(abs(y0 - y1), )return sqrt(a + b)", "docstring": "Returns the length of the line.", "id": "f11542:c0:m25"} {"signature": "def _curvepoint(self, t, x0, y0, x1, y1, x2, y2, x3, y3, handles=False):", "body": "mint = - tx01 = x0 * mint + x1 * ty01 = y0 * mint + y1 * tx12 = x1 * mint + x2 * ty12 = y1 * mint + y2 * tx23 = x2 * mint + x3 * ty23 = y2 * mint + y3 * tout_c1x = x01 * mint + x12 * tout_c1y = y01 * mint + y12 * tout_c2x = x12 * mint + x23 * tout_c2y = y12 * mint + y23 * tout_x = out_c1x * mint + out_c2x * tout_y = out_c1y * mint + out_c2y * tif not handles:return (out_x, out_y, out_c1x, out_c1y, out_c2x, out_c2y)else:return (out_x, out_y, out_c1x, out_c1y, out_c2x, out_c2y, x01, y01, x23, y23)", "docstring": "Returns coordinates for point at t on the spline.\n Calculates the coordinates of x and y for a point at t on the cubic bezier spline,\n and its control points, based on the de Casteljau interpolation algorithm.\n The t parameter is a number between 0.0 and 1.0,\n x0 and y0 define the starting point of the spline,\n x1 and y1 its control point,\n x3 and y3 the ending point of the spline,\n x2 and y2 its control point.\n If the handles parameter is set, returns not only the point at t,\n but the modified control points of p0 and p3 should this point split the path as well.", "id": "f11542:c0:m26"} {"signature": "def _curvelength(self, x0, y0, x1, y1, x2, y2, x3, y3, n=):", "body": "length = xi = x0yi = y0for i in range(n):t = * (i + ) / npt_x, pt_y, pt_c1x, pt_c1y, pt_c2x, pt_c2y =self._curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3)c = sqrt(pow(abs(xi - pt_x), ) + pow(abs(yi - pt_y), ))length += cxi = pt_xyi = pt_yreturn length", "docstring": "Returns the length of the spline.\n Integrates the estimated length of the cubic bezier spline defined by x0, y0, ... x3, y3,\n by adding the lengths of lineair lines between points at t.\n The number of points is defined by n\n (n=10 would add the lengths of lines between 0.0 and 0.1, between 0.1 and 0.2, and so on).\n The default n=20 is fine for most cases, usually resulting in a deviation of less than 0.01.", "id": "f11542:c0:m27"} {"signature": "def _segment_lengths(self, relative=False, n=):", "body": "lengths = []first = Truefor el in self._get_elements():if first is True:close_x, close_y = el.x, el.yfirst = Falseelif el.cmd == MOVETO:close_x, close_y = el.x, el.ylengths.append()elif el.cmd == CLOSE:lengths.append(self._linelength(x0, y0, close_x, close_y))elif el.cmd == LINETO:lengths.append(self._linelength(x0, y0, el.x, el.y))elif el.cmd == CURVETO:x3, y3, x1, y1, x2, y2 = el.x, el.y, el.c1x, el.c1y, el.c2x, el.c2ylengths.append(self._curvelength(x0, y0, x1, y1, x2, y2, x3, y3, n))if el.cmd != CLOSE:x0 = el.xy0 = el.yif relative:length = sum(lengths)try:return [l / length for l in lengths]except ZeroDivisionError:return [] * len(lengths)else:return lengths", "docstring": "Returns a list with the lengths of each segment in the path.", "id": "f11542:c0:m28"} {"signature": "def _get_length(self, segmented=False, precision=):", "body": "if not segmented:return sum(self._segment_lengths(n=precision), )else:return self._segment_lengths(relative=True, n=precision)", "docstring": "Returns the length of the path.\n Calculates the length of each spline in the path, using n as a number of points to measure.\n When segmented is True, returns a list containing the individual length of each spline\n as values between 0.0 and 1.0, defining the relative length of each spline\n in relation to the total path length.", "id": "f11542:c0:m29"} {"signature": "def _get_elements(self):", "body": "for index, el in enumerate(self._elements):if isinstance(el, tuple):el = PathElement(*el)self._elements[index] = elyield el", "docstring": "Yields all elements as PathElements", "id": "f11542:c0:m30"} {"signature": "def __getitem__(self, item):", "body": "if isinstance(item, slice):indices = item.indices(len(self))return [self.__getitem__(i) for i in range(*indices)]else:el = self._elements[item]if isinstance(el, tuple):el = PathElement(*el)self._elements[item] = elreturn el", "docstring": "el is either a PathElement or the parameters to pass\nto one.\nIf el is a PathElement return it\nIf el is parameters, create a PathElement and return it", "id": "f11542:c0:m32"} {"signature": "def _get_center(self):", "body": "x = (self.x + self.width / )y = (self.y + self.height / )return (x, y)", "docstring": "Returns the center point of the path, disregarding transforms.", "id": "f11543:c1:m3"} {"signature": "def drawdaisy(x, y, color=''):", "body": "_ctx.push()_fill =_ctx.fill()_stroke = _ctx.stroke()sc = ( / _ctx.HEIGHT) * float(y * ) * _ctx.strokewidth(sc * )_ctx.stroke('')_ctx.line(x + (sin(x * ) * ), y + , x + sin(_ctx.FRAME * ), y)_ctx.translate(-, )_ctx.scale(sc)_ctx.fill(color)_ctx.nostroke()for angle in xrange(, , ):_ctx.rotate(degrees=)_ctx.rect(x, y, , , )_ctx.fill('')_ctx.ellipse(x + , y, , )_ctx.fill(_fill)_ctx.stroke(_stroke)_ctx.pop()", "docstring": "Draw a daisy at x, y", "id": "f11545:m0"} {"signature": "def fft_bandpassfilter(data, fs, lowcut, highcut):", "body": "fft = np.fft.fft(data)bp = fft.copy()bp *= fft.dot(fft) / bp.dot(bp)ibp = * np.fft.ifft(bp)return ibp", "docstring": "http://www.swharden.com/blog/2009-01-21-signal-filtering-with-python/#comment-16801", "id": "f11546:m0"} {"signature": "def flatten_fft(scale=):", "body": "_len = len(audio.spectrogram)for i, v in enumerate(audio.spectrogram):yield scale * (i * v) / _len", "docstring": "Produces a nicer graph, I'm not sure if this is correct", "id": "f11546:m1"} {"signature": "def scaled_fft(fft, scale=):", "body": "data = np.zeros(len(fft))for i, v in enumerate(fft):data[i] = scale * (i * v) / NUM_SAMPLESreturn data", "docstring": "Produces a nicer graph, I'm not sure if this is correct", "id": "f11546:m2"} {"signature": "def quit(self):", "body": "if self.running:self.running = Falseself.join()", "docstring": "Shutdown the audio thread", "id": "f11546:c1:m4"} {"signature": "def draw_cornu_flat(x0, y0, t0, t1, s0, c0, flip, cs, ss, cmd):", "body": "for j in range(, ):t = j * s, c = eval_cornu(t0 + t * (t1 - t0))s *= flips -= s0c -= c0x = c * cs - s * ssy = s * cs + c * ssprint_pt(x0 + x, y0 + y, cmd)cmd = ''return cmd", "docstring": "Raph Levien's code draws fast LINETO segments.", "id": "f11548:m11"} {"signature": "def draw_cornu_bezier(x0, y0, t0, t1, s0, c0, flip, cs, ss, cmd, scale, rot):", "body": "s = Nonefor j in range(, ):t = j * t2 = t+ curvetime = t0 + t * (t1 - t0)curvetime2 = t0 + t2 * (t1 - t0)Dt = (curvetime2 - curvetime) * scaleif not s:s, c = eval_cornu(curvetime)s *= flips -= s0c -= c0dx1 = cos(pow(curvetime, ) + (flip * rot)) dy1 = flip * sin(pow(curvetime, ) + (flip *rot))x = ((c * cs - s * ss) +x0)y = ((s * cs + c * ss) + y0)s2,c2 = eval_cornu(curvetime2) s2 *= flips2 -= s0c2 -= c0dx2 = cos(pow(curvetime2, ) + (flip * rot)) dy2 = flip * sin(pow(curvetime2, ) + (flip * rot))x3 = ((c2 * cs - s2 * ss)+x0)y3 = ((s2 * cs + c2 * ss)+y0)x1 = (x + ((Dt/) * dx1))y1 = (y + ((Dt/) * dy1)) x2 = (x3 - ((Dt/) * dx2))y2 = (y3 - ((Dt/) * dy2))if cmd == '':print_pt(x, y, cmd)cmd = ''print_crv(x1, y1, x2, y2, x3, y3)dx1, dy1 = dx2, dy2x,y = x3, y3return cmd", "docstring": "Mark Meyer's code draws elegant CURVETO segments.", "id": "f11548:m12"} {"signature": "def overlap(self, x1, y1, x2, y2, r=):", "body": "if abs(x2-x1) < r and abs(y2-y1) < r:return Trueelse:return False", "docstring": "Returns True when point 1 and point 2 overlap.\n\n There is an r treshold in which point 1 and point 2\n are considered to overlap.", "id": "f11549:c1:m1"} {"signature": "def reflect(self, x0, y0, x, y):", "body": "rx = x0 - (x-x0)ry = y0 - (y-y0)return rx, ry", "docstring": "Reflects the point x, y through origin x0, y0.", "id": "f11549:c1:m2"} {"signature": "def angle(self, x0, y0, x1, y1):", "body": "a = degrees( atan((y1-y0) / (x1-x0+)) ) + if x1-x0 < : a += return a", "docstring": "Calculates the angle between two points.", "id": "f11549:c1:m3"} {"signature": "def distance(self, x0, y0, x1, y1):", "body": "return sqrt(pow(x1-x0, ) + pow(y1-y0, ))", "docstring": "Calculates the distance between two points.", "id": "f11549:c1:m4"} {"signature": "def coordinates(self, x0, y0, distance, angle):", "body": "x = x0 + cos(radians(angle)) * distancey = y0 + sin(radians(angle)) * distancereturn Point(x, y)", "docstring": "Calculates the coordinates of a point from the origin.", "id": "f11549:c1:m5"} {"signature": "def contains_point(self, x, y, d=):", "body": "if self.path != None and len(self.path) > and self.path.contains(x, y):if not self.path.contains(x+d, y)or not self.path.contains(x, y+d)or not self.path.contains(x-d, y)or not self.path.contains(x, y-d)or not self.path.contains(x+d, y+d)or not self.path.contains(x-d, y-d)or not self.path.contains(x+d, y-d)or not self.path.contains(x-d, y+d):return Truereturn False", "docstring": "Returns true when x, y is on the path stroke outline.", "id": "f11549:c1:m6"} {"signature": "def insert_point(self, x, y):", "body": "try: bezier = _ctx.ximport(\"\")except:from nodebox.graphics import beziern = closest = Nonedx0 = float(\"\") dy0 = float(\"\")for i in range(n):t = float(i)/npt = self.path.point(t)dx = abs(pt.x-x)dy = abs(pt.y-y)if dx+dy <= dx0+dy0:dx0 = dxdy0 = dyclosest = tdecimals = [,]for d in decimals:d = /pow(,d)for i in range():t = closest-d + float(i)*d*if t < : t = +tif t > : t = t-pt = self.path.point(t)dx = abs(pt.x-x)dy = abs(pt.y-y)if dx <= dx0 and dy <= dy0:dx0 = dxdy0 = dyclosest_precise = tclosest = closest_precise p = bezier.insert_point(self.path, closest_precise)i, t, pt = bezier._locate(self.path, closest_precise)i += pt = PathElement()pt.cmd = p[i].cmdpt.x = p[i].xpt.y = p[i].ypt.ctrl1 = Point(p[i].ctrl1.x, p[i].ctrl1.y)pt.ctrl2 = Point(p[i].ctrl2.x, p[i].ctrl2.y)pt.freehand = Falseself._points.insert(i, pt)self._points[i-].ctrl1 = Point(p[i-].ctrl1.x, p[i-].ctrl1.y)self._points[i+].ctrl1 = Point(p[i+].ctrl1.x, p[i+].ctrl1.y)self._points[i+].ctrl2 = Point(p[i+].ctrl2.x, p[i+].ctrl2.y)", "docstring": "Inserts a point on the path at the mouse location.\n\n We first need to check if the mouse location is on the path.\n Inserting point is time intensive and experimental.", "id": "f11549:c1:m7"} {"signature": "def update(self):", "body": "x, y = mouse()if self.show_grid:x, y = self.grid.snap(x, y)if _ctx._ns[\"\"]and not self.freehand:self._dirty = Trueif self.edit != Noneand not self.drag_pointand not self.drag_handle1and not self.drag_handle2:pt = self._points[self.edit]dx = pt.x+self.btn_xdy = pt.y+self.btn_yif self.overlap(dx, dy, x, y, r=self.btn_r):self.delete = self.editreturndx += self.btn_r* + if self.edit == len(self._points) - andself.overlap(dx, dy, x, y, r=self.btn_r):self.moveto = self.editreturnif self.insert:self.inserting = Truereturnif not self.drag_point andnot self.drag_handle1 andnot self.drag_handle2:self.editing = Falseindices = range(len(self._points))indices.reverse()for i in indices:pt = self._points[i]if pt != self.newand self.overlap(x, y, pt.x, pt.y)and self.new == None:if self.edit == i+and self.overlap(self._points[i+].ctrl1.x,self._points[i+].ctrl1.y, x, y):continueelse:self.edit = iself.editing = Truebreakif not self.editing:if self.edit != None:pt = self._points[self.edit]if self.overlap(pt.ctrl1.x, pt.ctrl1.y, x, y) orself.overlap(pt.ctrl2.x, pt.ctrl2.y, x, y):self.editing = Trueelse:self.edit = Noneif self.edit == None:if self.new == None:self.new = PathElement()if self.moveto == Trueor len(self._points) == :cmd = MOVETOself.moveto = Noneself.last_moveto = self.newelse:cmd = CURVETOself.new.cmd = cmdself.new.x = xself.new.y = yself.new.ctrl1 = Point(x, y)self.new.ctrl2 = Point(x, y)self.new.freehand = Falseif len(self._points) > :prev = self._points[-]rx, ry = self.reflect(prev.x, prev.y, prev.ctrl2.x, prev.ctrl2.y)self.new.ctrl1 = Point(rx, ry)self._points.append(self.new)else:rx, ry = self.reflect(self.new.x, self.new.y, x, y)self.new.ctrl2 = Point(rx, ry)elif self.new == None:pt = self._points[self.edit]if self.overlap(pt.x, pt.y, x, y)and not self.drag_handle1and not self.drag_handle2and not self.new != None:self.drag_point = Trueself.drag_handle1 = Falseself.drag_handle2 = Falseif self.overlap(pt.ctrl1.x, pt.ctrl1.y, x, y)and pt.cmd == CURVETOand not self.drag_pointand not self.drag_handle2:self.drag_point = Falseself.drag_handle1 = Trueself.drag_handle2 = Falseif self.overlap(pt.ctrl2.x, pt.ctrl2.y, x, y)and pt.cmd == CURVETOand not self.drag_pointand not self.drag_handle1:self.drag_point = Falseself.drag_handle1 = Falseself.drag_handle2 = Trueif self.drag_point == True:dx = x - pt.xdy = y - pt.ypt.x = xpt.y = ypt.ctrl2.x += dxpt.ctrl2.y += dyif self.edit < len(self._points)-:rx, ry = self.reflect(pt.x, pt.y, x, y)next = self._points[self.edit+]next.ctrl1.x += dxnext.ctrl1.y += dyif self.drag_handle1 == True:pt.ctrl1 = Point(x, y)if self.edit > and self.last_key != \"\":prev = self._points[self.edit-]d = self.distance(prev.x, prev.y, prev.ctrl2.x, prev.ctrl2.y)a = self.angle(prev.x, prev.y, pt.ctrl1.x, pt.ctrl1.y)prev.ctrl2 = self.coordinates(prev.x, prev.y, d, a+) if self.drag_handle2 == True: pt.ctrl2 = Point(x, y)if self.edit < len(self._points)-and self.last_key != \"\":next = self._points[self.edit+]d = self.distance(pt.x, pt.y, next.ctrl1.x, next.ctrl1.y)a = self.angle(pt.x, pt.y, pt.ctrl2.x, pt.ctrl2.y)next.ctrl1 = self.coordinates(pt.x, pt.y, d, a+)elif not self.freehand:self.new = Noneself.drag_point = Falseself.drag_handle1 = Falseself.drag_handle2 = Falseif self.delete != None and len(self._points) > :i = self.deletecmd = self._points[i].cmddel self._points[i]if < i < len(self._points):prev = self._points[i-]rx, ry = self.reflect(prev.x, prev.y, prev.ctrl2.x, prev.ctrl2.y)self._points[i].ctrl1 = Point(rx, ry)start_i = iwhile i > :i -= pt = self._points[i]if pt.freehand:del self._points[i]elif i < start_i- and pt.freehand == False:if pt.cmd == MOVETO:del self._points[i]breakif len(self._points) > and (cmd == MOVETO or i == ):self.last_moveto = self._points[]for pt in self._points:if pt.cmd == MOVETO:self.last_moveto = ptself.delete = Noneself.edit = Noneelif isinstance(self.moveto, int):self.moveto = Trueself.edit = Noneelif self.edit == Noneand self.contains_point(x, y, d=):self.insert = Trueelse:self.insert = Falseif self.insertingand self.contains_point(x, y, d=): self.insert_point(x, y)self.insert = Falseself.inserting = Falseif self._dirty == True:self.export_svg()self._dirty = Falseif _ctx._ns[\"\"]:self.last_key = _ctx._ns[\"\"]self.last_keycode = _ctx._ns[\"\"]if not _ctx._ns[\"\"] and self.last_key != None:if self.last_keycode == KEY_TAB:self.show_grid = not self.show_gridif self.last_key == \"\":self.edit = Noneself.freehand = not self.freehandif self.freehand:self.msg = \"\"else:self.msg = \"\"if self.last_keycode == KEY_ESC:self.edit = Noneif self.last_keycode == _ctx.KEY_BACKSPACEand self.edit != None:self.delete = self.editself.last_key = Noneself.last_code = Noneif _ctx._ns[\"\"]:dx = dy = keycode = _ctx._ns[\"\"]if keycode == _ctx.KEY_LEFT:dx = -elif keycode == _ctx.KEY_RIGHT:dx = if keycode == _ctx.KEY_UP:dy = -elif keycode == _ctx.KEY_DOWN:dy = if dx != or dy != :for pt in self._points:pt.x += dxpt.y += dypt.ctrl1.x += dxpt.ctrl1.y += dypt.ctrl2.x += dxpt.ctrl2.y += dy", "docstring": "Update runs each frame to check for mouse interaction.\n\n Alters the path by allowing the user to add new points,\n drag point handles and move their location.\n Updates are automatically stored as SVG\n in the given filename.", "id": "f11549:c1:m8"} {"signature": "def draw(self):", "body": "self.update()x, y = mouse()if self.show_grid:self.grid.draw()x, y = self.grid.snap(x, y)_ctx.strokewidth(self.strokewidth)if self.freehand:self.draw_freehand()r = _ctx.nofill()if len(self._points) > :first = True for i in range(len(self._points)):pt = self._points[i]if first:_ctx.beginpath(pt.x, pt.y)first = Falseelse:if pt.cmd == CLOSE:_ctx.closepath()elif pt.cmd == MOVETO:_ctx.moveto(pt.x, pt.y)elif pt.cmd == LINETO:_ctx.lineto(pt.x, pt.y)elif pt.cmd == CURVETO:_ctx.curveto(pt.ctrl1.x, pt.ctrl1.y, pt.ctrl2.x, pt.ctrl2.y, pt.x, pt.y)if ((i == self.edit and self.new == None)or pt == self.new)and pt.cmd == CURVETOand not pt.freehand:_ctx.stroke(self.handle_color)_ctx.nofill()_ctx.oval(pt.x-r, pt.y-r, r*, r*)_ctx.stroke(self.handle_color)_ctx.line(pt.ctrl2.x, pt.ctrl2.y, pt.x, pt.y)_ctx.fill(self.handle_color)if pt == self.newand not pt.freehand:rx, ry = self.reflect(pt.x, pt.y, pt.ctrl2.x, pt.ctrl2.y)_ctx.stroke(self.handle_color)_ctx.line(rx, ry, pt.x, pt.y)_ctx.nostroke()_ctx.fill(self.handle_color)_ctx.oval(rx-r/, ry-r/, r, r)if i == self.editand self.new == Noneand pt.cmd == CURVETOand not pt.freehand:_ctx.oval(pt.ctrl2.x-r/, pt.ctrl2.y-r/, r, r)if i > :prev = self._points[i-]_ctx.line(pt.ctrl1.x, pt.ctrl1.y, prev.x, prev.y)_ctx.oval(pt.ctrl1.x-r/, pt.ctrl1.y-r/, r, r)if i > and self._points[i-].cmd != MOVETO:_ctx.line(prev.ctrl2.x, prev.ctrl2.y, prev.x, prev.y)if i < len(self._points)-:next = self._points[i+]if next.cmd == CURVETO:_ctx.line(next.ctrl1.x, next.ctrl1.y, pt.x, pt.y)elif self.overlap(x, y, pt.x, pt.y)and not pt.freehand:self.insert = False _ctx.nofill()_ctx.stroke(self.handle_color)_ctx.oval(pt.x-r, pt.y-r, r*, r*)_ctx.fontsize()_ctx.fill(self.handle_color)txt = \"\"+str(int(pt.x))+\"\"+str(int(pt.y))+\"\"if (i == self.edit and self.new == None)or pt == self.newand not pt.freehand:_ctx.text(txt, pt.x+r, pt.y+) elif self.overlap(x, y, pt.x, pt.y)and not pt.freehand:_ctx.text(txt, pt.x+r, pt.y+)if not pt.freehand:if pt.cmd != MOVETO:_ctx.fill(self.path_color)_ctx.nostroke()else:_ctx.stroke(self.path_color)_ctx.nofill()_ctx.oval(pt.x-r/, pt.y-r/, r, r)_ctx.stroke(self.path_color)_ctx.fill(self.path_fill)_ctx.autoclosepath(False) p = _ctx.endpath()self.path = pif self.insert:_ctx.stroke(self.handle_color)_ctx.nofill()_ctx.oval(x-r*, y-r*, r*, r*)if self.edit == Noneand self.new == Noneand self.moveto != Trueand not self.freehand:_ctx.nofill()_ctx.stroke(self.new_color)rx, ry = self.reflect(pt.x, pt.y, pt.ctrl2.x, pt.ctrl2.y)_ctx.beginpath(pt.x, pt.y)_ctx.curveto(rx, ry, x, y, x, y)_ctx.endpath()if self.last_moveto != None:start = self.last_movetoelse:start = self._points[]p = _ctx.line(x, y, start.x, start.y, draw=False)try: p._nsBezierPath.setLineDash_count_phase_([,], , )except:pass_ctx.drawpath(p)elif self.edit == Noneand self.new == Noneand self.moveto != None:_ctx.stroke(self.new_color)_ctx.nofill()_ctx.oval(x-r*, y-r*, r*, r*)if self.edit != None:pt = self._points[self.edit]x = pt.x + self.btn_xy = pt.y + self.btn_yr = self.btn_r_ctx.nostroke()_ctx.fill(,,,)_ctx.fill(self.handle_color)_ctx.oval(x-r, y-r, r*, r*)_ctx.fill()_ctx.rotate()_ctx.rect(x-r+, y-, r+, )_ctx.rotate(-)_ctx.rect(x-r+, y-, r+, )_ctx.reset()if self.edit == len(self._points)-:_ctx.fill(self.handle_color)_ctx.oval(x+r*+-r, y-r, r*, r*)_ctx.fill()_ctx.rect(x+r*+-, y-r+, , r-)_ctx.rect(x+r*++, y-r+, , r-)if self.msg != \"\":self.msg_alpha -= _ctx.nostroke()_ctx.fill(,,, self.msg_alpha)_ctx.fontsize()_ctx.lineheight()w = _ctx.textwidth(self.msg)_ctx.rect(_ctx.WIDTH/-w/-, _ctx.HEIGHT/-, w+, , roundness=)_ctx.fill(,,, )_ctx.align(CENTER) _ctx.text(self.msg, , _ctx.HEIGHT/, width=_ctx.WIDTH)if self.msg_alpha <= :self.msg = \"\"self.msg_alpha = ", "docstring": "Draws the editable path and interface elements.", "id": "f11549:c1:m9"} {"signature": "def draw_freehand(self):", "body": "if _ctx._ns[\"\"]:x, y = mouse()if self.show_grid:x, y = self.grid.snap(x, y)if self.freehand_move == True:cmd = MOVETOself.freehand_move = Falseelse:cmd = LINETOpt = PathElement()if cmd != MOVETO:pt.freehand = True else:pt.freehand = Falsept.cmd = cmdpt.x = xpt.y = ypt.ctrl1 = Point(x,y)pt.ctrl2 = Point(x,y)self._points.append(pt)r = _ctx.nofill()_ctx.stroke(self.handle_color)_ctx.oval(pt.x-r, pt.y-r, r*, r*)_ctx.fontsize()_ctx.fill(self.handle_color)_ctx.text(\"\"+str(int(pt.x))+\"\"+str(int(pt.y))+\"\", pt.x+r, pt.y)self._dirty = Trueelse:self.freehand_move = Trueif self._dirty:self._points[-].freehand = Falseself.export_svg()self._dirty = False", "docstring": "Freehand sketching.", "id": "f11549:c1:m10"} {"signature": "def export_svg(self):", "body": "d = \"\"if len(self._points) > :d += \"\"+str(self._points[].x)+\"\"+str(self._points[].y)+\"\"for pt in self._points:if pt.cmd == MOVETO:d += \"\"+str(pt.x)+\"\"+str(pt.y)+\"\"elif pt.cmd == LINETO:d += \"\"+str(pt.x)+\"\"+str(pt.y)+\"\"elif pt.cmd == CURVETO:d += \"\"d += str(pt.ctrl1.x)+\"\"+str(pt.ctrl1.y)+\"\"d += str(pt.ctrl2.x)+\"\"+str(pt.ctrl2.y)+\"\"d += str(pt.x)+\"\"+str(pt.y)+\"\"c = \"\"c += str(int(self.path_color.r*)) + \"\"c += str(int(self.path_color.g*)) + \"\"c += str(int(self.path_color.b*)) + \"\"s = ''s += ''+str(_ctx.WIDTH)+''+str(_ctx.HEIGHT)+''s += ''s += ''+d+''+c+''+str(self.strokewidth)+''s += ''s += ''f = open(self.file+\"\", \"\")f.write(s)f.close()", "docstring": "Exports the path as SVG.\n\n Uses the filename given when creating this object.\n The file is automatically updated to reflect\n changes to the path.", "id": "f11549:c1:m11"} {"signature": "def __init__(self, w, h):", "body": "self.interpolation = BILINEARself.layers = Layers()self.w = wself.h = himg = Image.new(\"\", (w,h), (,,,))self.layer(img, name=\"\")", "docstring": "Creates a new canvas.\n\n Creates the working area on which to blend layers.\n The canvas background is transparent,\n but a background color could be set using the fill() function.", "id": "f11554:c0:m0"} {"signature": "def layer(self, img, x=, y=, name=\"\"):", "body": "from types import StringTypeif isinstance(img, Image.Image):img = img.convert(\"\")self.layers.append(Layer(self, img, x, y, name))return len(self.layers)-if isinstance(img, Layer):img.canvas = selfself.layers.append(img)return len(self.layers)- if type(img) == StringType: img = Image.open(img)img = img.convert(\"\")self.layers.append(Layer(self, img, x, y, name))return len(self.layers)-", "docstring": "Creates a new layer from file, Layer, PIL Image.\n\n If img is an image file or PIL Image object,\n Creates a new layer with the given image file.\n The image is positioned on the canvas at x, y.\n\n If img is a Layer,\n uses that layer's x and y position and name.", "id": "f11554:c0:m1"} {"signature": "def fill(self, rgb, x=, y=, w=None, h=None, name=\"\"):", "body": "if w == None: w = self.w - xif h == None: h = self.h - yimg = Image.new(\"\", (w,h), rgb)self.layer(img, x, y, name)", "docstring": "Creates a new fill layer.\n\n Creates a new layer filled with the given rgb color.\n For example, fill((255,0,0)) creates a red fill.\n The layers fills the entire canvas by default.", "id": "f11554:c0:m2"} {"signature": "def gradient(self, style=LINEAR, w=, h=, name=\"\"):", "body": "from types import FloatTypew0 = self.w h0 = self.hif type(w) == FloatType: w *= w0if type(h) == FloatType: h *= h0img = Image.new(\"\", (int(w),int(h)), )draw = ImageDraw.Draw(img)if style == LINEAR:for i in range(int(w)):k = * i/wdraw.rectangle((i, , i, h), fill=int(k))if style == RADIAL:r = min(w,h)/for i in range(int(r)):k = - * i/rdraw.ellipse((w/-r+i, h/-r+i, w/+r-i, h/+r-i), fill=int(k))if style == DIAMOND:r = max(w,h)for i in range(int(r)):x = int(i*w/r*)y = int(i*h/r*)k = * i/rdraw.rectangle((x, y, w-x, h-y), outline=int(k))img = img.convert(\"\")self.layer(img, , , name=\"\")", "docstring": "Creates a gradient layer.\n\n Creates a gradient layer, that is usually used\n together with the mask() function.\n\n All the image functions work on gradients,\n so they can easily be flipped, rotated, scaled, inverted,\n made brighter or darker, ...\n\n Styles for gradients are LINEAR, RADIAL and DIAMOND.", "id": "f11554:c0:m3"} {"signature": "def merge(self, layers):", "body": "layers.sort()if layers[] == : del layers[]self.flatten(layers)", "docstring": "Flattens the given layers on the canvas.\n\n Merges the given layers with the indices in the list\n on the bottom layer in the list.\n The other layers are discarded.", "id": "f11554:c0:m4"} {"signature": "def flatten(self, layers=[]):", "body": "if layers == []: layers = range(, len(self.layers))background = self.layers._get_bg()background.name = \"\"for i in layers:layer = self.layers[i]x = max(, layer.x)y = max(, layer.y)w = min(background.w, layer.x+layer.w)h = min(background.h, layer.y+layer.h)base = background.img.crop((x, y, w, h))x = max(, -layer.x)y = max(, -layer.y)w -= layer.xh -= layer.yblend = layer.img.crop((x, y, w, h))if layer.blend == NORMAL:buffer = blendif layer.blend == MULTIPLY:buffer = ImageChops.multiply(base, blend)if layer.blend == SCREEN:buffer = ImageChops.screen(base, blend)if layer.blend == OVERLAY:buffer = Blend().overlay(base, blend)if layer.blend == HUE:buffer = Blend().hue(base, blend)if layer.blend == COLOR:buffer = Blend().color(base, blend)alpha = buffer.split()[]if i == :buffer = Image.composite(base, buffer, base.split()[])else:buffer = Image.composite(buffer, base, alpha)alpha = ImageChops.lighter(alpha, base.split()[])buffer.putalpha(alpha)base = Image.blend(base, buffer, layer.alpha)x = max(, layer.x)y = max(, layer.y)background.img.paste(base, (x,y))layers.reverse()for i in layers: del self.layers[i]img = Image.new(\"\", (self.w,self.h), (,,,))self.layers._set_bg(Layer(self, img, , , name=\"\"))if len(self.layers) == :self.layers.append(background)else:self.layers.insert(layers[-], background)", "docstring": "Flattens all layers according to their blend modes.\n\n Merges all layers to the canvas,\n using the blend mode and opacity defined for each layer.\n Once flattened, the stack of layers is emptied except\n for the transparent background (bottom layer).", "id": "f11554:c0:m5"} {"signature": "def export(self, filename):", "body": "self.flatten()self.layers[].img.save(filename)return filename", "docstring": "Exports the flattened canvas.\n\n Flattens the canvas.\n PNG retains the alpha channel information.\n Other possibilities are JPEG and GIF.", "id": "f11554:c0:m6"} {"signature": "def draw(self, x, y):", "body": "try:from time import timeimport md5from os import unlinkm = md5.new()m.update(str(time()))filename = \"\" + str(m.hexdigest()) + \"\"self.export(filename)_ctx.image(filename, x, y)unlink(filename)except:pass", "docstring": "Places the flattened canvas in NodeBox.\n\n Exports to a temporary PNG file.\n Draws the PNG in NodeBox using the image() command.\n Removes the temporary file.", "id": "f11554:c0:m7"} {"signature": "def preferences(interpolation=BILINEAR):", "body": "self. interpolation = interpolation", "docstring": "Settings that influence image manipulation.\n\n Currently, only defines the image interpolation,\n which can be set to NEAREST, BICUBIC or BILINEAR.", "id": "f11554:c0:m8"} {"signature": "def index(self):", "body": "for i in range(len(self.canvas.layers)):if self.canvas.layers[i] == self: breakif self.canvas.layers[i] == self: return ielse:return None", "docstring": "Returns this layer's index in the canvas.layers[].\n\n Searches the position of this layer in the canvas'\n layers list, return None when not found.", "id": "f11554:c2:m1"} {"signature": "def copy(self):", "body": "layer = Layer(None, self.img.copy(), self.x, self.y, self.name)layer.w = self.wlayer.h = self.hlayer.alpha = self.alphalayer.blend = self.blendreturn layer", "docstring": "Returns a copy of the layer.\n\n This is different from the duplicate() method,\n which duplicates the layer as a new layer on the canvas.\n The copy() method returns a copy of the layer\n that can be added to a different canvas.", "id": "f11554:c2:m2"} {"signature": "def delete(self):", "body": "i = self.index()if i != None: del self.canvas.layers[i]", "docstring": "Removes this layer from the canvas.", "id": "f11554:c2:m3"} {"signature": "def up(self):", "body": "i = self.index()if i != None:del self.canvas.layers[i]i = min(len(self.canvas.layers), i+)self.canvas.layers.insert(i, self)", "docstring": "Moves the layer up in the stacking order.", "id": "f11554:c2:m4"} {"signature": "def down(self):", "body": "i = self.index()if i != None:del self.canvas.layers[i]i = max(, i-)self.canvas.layers.insert(i, self)", "docstring": "Moves the layer down in the stacking order.", "id": "f11554:c2:m5"} {"signature": "def bounds(self):", "body": "return self.img.size", "docstring": "Returns the size of the layer.\n\n This is the width and height of the bounding box,\n the invisible rectangle around the layer.", "id": "f11554:c2:m6"} {"signature": "def select(self, path, feather=True):", "body": "w, h = self.img.sizemask = Image.new(\"\", (w,h), )draw = ImageDraw.Draw(mask)draw = ImageDraw.Draw(mask)draw.polygon(path, fill=)if feather:mask = mask.filter(ImageFilter.SMOOTH_MORE)mask = mask.filter(ImageFilter.SMOOTH_MORE)mask = ImageChops.darker(mask, self.img.split()[])self.img.putalpha(mask)", "docstring": "Applies the polygonal lasso tool on a layer.\n\n The path paramater is a list of points,\n either [x1, y1, x2, y2, x3, y3, ...]\n or [(x1,y1), (x2,y2), (x3,y3), ...]\n\n The parts of the layer that fall outside\n this polygonal area are cut.\n\n The selection is not anti-aliased,\n but the feather parameter creates soft edges.", "id": "f11554:c2:m7"} {"signature": "def mask(self):", "body": "if len(self.canvas.layers) < : returni = self.index()if i == : returnlayer = self.canvas.layers[i-]alpha = Image.new(\"\", layer.img.size, )mask = self.canvas.layers[i] flat = ImageChops.darker(mask.img.convert(\"\"), mask.img.split()[])alpha.paste(flat, (mask.x,mask.y))alpha = ImageChops.darker(alpha, layer.img.split()[])layer.img.putalpha(alpha)self.delete()", "docstring": "Masks the layer below with this layer.\n\n Commits the current layer to the alpha channel of \n the previous layer. Primarily, mask() is useful when \n using gradient layers as masks on images below. \n\n For example:\n canvas.layer(\"image.jpg\")\n canvas.gradient()\n canvas.layer(2).flip()\n canvas.layer(2).mask()\n\n Adds a white-to-black linear gradient to\n the alpha channel of image.jpg, \n making it evolve from opaque on \n the left to transparent on the right.", "id": "f11554:c2:m8"} {"signature": "def duplicate(self):", "body": "i = self.canvas.layer(self.img.copy(), self.x, self.y, self.name)clone = self.canvas.layers[i]clone.alpha = self.alphaclone.blend = self.blend", "docstring": "Creates a copy of the current layer.\n\n This copy becomes the top layer on the canvas.", "id": "f11554:c2:m9"} {"signature": "def brightness(self, value=):", "body": "b = ImageEnhance.Brightness(self.img) self.img = b.enhance(value)", "docstring": "Increases or decreases the brightness in the layer.\n\n The given value is a percentage to increase\n or decrease the image brightness,\n for example 0.8 means brightness at 80%.", "id": "f11554:c2:m16"} {"signature": "def contrast(self, value=):", "body": "c = ImageEnhance.Contrast(self.img) self.img = c.enhance(value)", "docstring": "Increases or decreases the contrast in the layer.\n\n The given value is a percentage to increase\n or decrease the image contrast,\n for example 1.2 means contrast at 120%.", "id": "f11554:c2:m17"} {"signature": "def desaturate(self):", "body": "alpha = self.img.split()[]self.img = self.img.convert(\"\")self.img = self.img.convert(\"\")self.img.putalpha(alpha)", "docstring": "Desaturates the layer, making it grayscale.\n\n Instantly removes all color information from the layer,\n while maintaing its alpha channel.", "id": "f11554:c2:m18"} {"signature": "def invert(self):", "body": "alpha = self.img.split()[]self.img = self.img.convert(\"\")self.img = ImageOps.invert(self.img)self.img = self.img.convert(\"\")self.img.putalpha(alpha)", "docstring": "Inverts the layer.", "id": "f11554:c2:m19"} {"signature": "def translate(self, x, y):", "body": "self.x = xself.y = y", "docstring": "Positions the layer at the given coordinates.\n\n The x and y parameters define where to position \n the top left corner of the layer,\n measured from the top left of the canvas.", "id": "f11554:c2:m20"} {"signature": "def scale(self, w=, h=):", "body": "from types import FloatTypew0, h0 = self.img.sizeif type(w) == FloatType: w = int(w*w0)if type(h) == FloatType: h = int(h*h0)self.img = self.img.resize((w,h), INTERPOLATION)self.w = wself.h = h", "docstring": "Resizes the layer to the given width and height.\n\n When width w or height h is a floating-point number,\n scales percentual, \n otherwise scales to the given size in pixels.", "id": "f11554:c2:m21"} {"signature": "def distort(self, x1=,y1=, x2=,y2=, x3=,y3=, x4=,y4=):", "body": "w, h = self.img.sizequad = (-x1,-y1, -x4,h-y4, w-x3,w-y3, w-x2,-y2)self.img = self.img.transform(self.img.size, Image.QUAD, quad, INTERPOLATION)", "docstring": "Distorts the layer.\n\n Distorts the layer by translating \n the four corners of its bounding box to the given coordinates:\n upper left (x1,y1), upper right(x2,y2),\n lower right (x3,y3) and lower left (x4,y4).", "id": "f11554:c2:m22"} {"signature": "def rotate(self, angle):", "body": "from math import sqrt, pow, sin, cos, degrees, radians, asinw0, h0 = self.img.sized = sqrt(pow(w0,) + pow(h0,))d_angle = degrees(asin((w0*) / (d*)))angle = angle % if angle > and angle <= : d_angle += w = sin(radians(d_angle + angle)) * dw = max(w, sin(radians(d_angle - angle)) * d)w = int(abs(w))h = cos(radians(d_angle + angle)) * dh = max(h, cos(radians(d_angle - angle)) * d)h = int(abs(h))dx = int((w-w0) / )dy = int((h-h0) / )d = int(d)bg = ImageStat.Stat(self.img).meanbg = (int(bg[]), int(bg[]), int(bg[]), )box = Image.new(\"\", (d,d), bg)box.paste(self.img, ((d-w0)/, (d-h0)/))box = box.rotate(angle, INTERPOLATION)box = box.crop(((d-w)/+, (d-h)/, d-(d-w)/, d-(d-h)/))self.img = boxself.x += (self.w-w)/self.y += (self.h-h)/self.w = wself.h = h", "docstring": "Rotates the layer.\n\n Rotates the layer by given angle.\n Positive numbers rotate counter-clockwise,\n negative numbers rotate clockwise.\n\n Rotate commands are executed instantly,\n so many subsequent rotates will distort the image.", "id": "f11554:c2:m23"} {"signature": "def flip(self, axis=HORIZONTAL):", "body": "if axis == HORIZONTAL:self.img = self.img.transpose(Image.FLIP_LEFT_RIGHT)if axis == VERTICAL:self.img = self.img.transpose(Image.FLIP_TOP_BOTTOM)", "docstring": "Flips the layer, either HORIZONTAL or VERTICAL.", "id": "f11554:c2:m24"} {"signature": "def blur(self):", "body": "self.img = self.img.filter(ImageFilter.BLUR)", "docstring": "Blurs the layer.", "id": "f11554:c2:m25"} {"signature": "def sharpen(self, value=):", "body": "s = ImageEnhance.Sharpness(self.img) self.img = s.enhance(value)", "docstring": "Increases or decreases the sharpness in the layer.\n\n The given value is a percentage to increase\n or decrease the image sharpness,\n for example 0.8 means sharpness at 80%.", "id": "f11554:c2:m26"} {"signature": "def levels(self):", "body": "h = self.img.histogram()r = h[:]g = h[:]b = h[:]a = h[:]return r, g, b, a", "docstring": "Returns a histogram for each RGBA channel.\n\n Returns a 4-tuple of lists, r, g, b, and a.\n Each list has 255 items, a count for each pixel value.", "id": "f11554:c2:m28"} {"signature": "def overlay(self, img1, img2):", "body": "p1 = list(img1.getdata())p2 = list(img2.getdata())for i in range(len(p1)):p3 = ()for j in range(len(p1[i])):a = p1[i][j] / b = p2[i][j] / if j == :d = min(a,b)elif a > : d = *(a+b-a*b)-else: d = *a*b p3 += (int(d*),)p1[i] = p3img = Image.new(\"\", img1.size, )img.putdata(p1)return img", "docstring": "Applies the overlay blend mode.\n\n Overlays image img2 on image img1.\n The overlay pixel combines multiply and screen:\n it multiplies dark pixels values and screen light values.\n Returns a composite image with the alpha channel retained.", "id": "f11554:c3:m0"} {"signature": "def hue(self, img1, img2):", "body": "import colorsysp1 = list(img1.getdata())p2 = list(img2.getdata())for i in range(len(p1)):r1, g1, b1, a1 = p1[i]r1 = r1 / g1 = g1 / b1 = b1 / h1, s1, v1 = colorsys.rgb_to_hsv(r1, g1, b1)r2, g2, b2, a2 = p2[i]r2 = r2 / g2 = g2 / b2 = b2 / h2, s2, v2 = colorsys.rgb_to_hsv(r2, g2, b2)r3, g3, b3 = colorsys.hsv_to_rgb(h2, s1, v1)r3 = int(r3*)g3 = int(g3*)b3 = int(b3*)p1[i] = (r3, g3, b3, a1)img = Image.new(\"\", img1.size, )img.putdata(p1)return img", "docstring": "Applies the hue blend mode.\n\n Hues image img1 with image img2.\n The hue filter replaces the hues of pixels in img1\n with the hues of pixels in img2.\n Returns a composite image with the alpha channel retained.", "id": "f11554:c3:m1"} {"signature": "def color(self, img1, img2):", "body": "import colorsysp1 = list(img1.getdata())p2 = list(img2.getdata())for i in range(len(p1)):r1, g1, b1, a1 = p1[i]r1 = r1 / g1 = g1 / b1 = b1 / h1, s1, v1 = colorsys.rgb_to_hsv(r1, g1, b1)r2, g2, b2, a2 = p2[i]r2 = r2 / g2 = g2 / b2 = b2 / h2, s2, v2 = colorsys.rgb_to_hsv(r2, g2, b2)r3, g3, b3 = colorsys.hsv_to_rgb(h2, s2, v1)r3 = int(r3*)g3 = int(g3*)b3 = int(b3*)p1[i] = (r3, g3, b3, a1)img = Image.new(\"\", img1.size, )img.putdata(p1)return img", "docstring": "Applies the color blend mode.\n\n Colorize image img1 with image img2.\n The color filter replaces the hue and saturation of pixels in img1\n with the hue and saturation of pixels in img2.\n Returns a composite image with the alpha channel retained.", "id": "f11554:c3:m2"} {"signature": "def convolute(self, kernel, scale=None, offset=):", "body": "if len(kernel) == : size = (,)elif len(kernel) == : size = (,)else: returnif scale == None:scale = for x in kernel: scale += xif scale == : scale = f = ImageFilter.BuiltinFilter()f.filterargs = size, scale, offset, kernelself.layer.img = self.layer.img.filter(f)", "docstring": "A (3,3) or (5,5) convolution kernel.\n\n The kernel argument is a list with either 9 or 25 elements,\n the weight for each surrounding pixels to convolute.", "id": "f11554:c4:m6"} {"signature": "def connect(self, name):", "body": "self._name = name.rstrip(\"\")self._con = sqlite.connect(self._name + \"\")self._cur = self._con.cursor()self._tables = []self._cur.execute(\"\")for r in self._cur: self._tables.append(r[])self._indices = []self._cur.execute(\"\")for r in self._cur: self._indices.append(r[])for t in self._tables:self._cur.execute(\"\"+t+\"\")fields = []key = \"\"for r in self._cur:fields.append(r[])if r[] == \"\": key = r[]setattr(self, t, Table(self, t, key, fields))", "docstring": "Generic database.\n\n Opens the SQLite database with the given name.\n The .db extension is automatically appended to the name.\n For each table in the database an attribute is created,\n and assigned a Table object.\n\n You can do: database.table or database[table].", "id": "f11555:c0:m1"} {"signature": "def create(self, name, overwrite=True):", "body": "self._name = name.rstrip(\"\")from os import unlinkif overwrite: try: unlink(self._name + \"\")except: pass self._con = sqlite.connect(self._name + \"\")self._cur = self._con.cursor()", "docstring": "Creates an SQLite database file.\n\n Creates an SQLite database with the given name.\n The .box file extension is added automatically.\n Overwrites any existing database by default.", "id": "f11555:c0:m2"} {"signature": "def create_table(self, name, fields=[], key=\"\"):", "body": "for f in fields: if f == key: fields.remove(key)sql = \"\"+name+\"\"sql += \"\"+key+\"\"for f in fields: sql += \"\"+f+\"\"sql += \"\"self._cur.execute(sql)self._con.commit()self.index(name, key, unique=True)self.connect(self._name)", "docstring": "Creates a new table.\n\n Creates a table with the given name,\n containing the list of given fields.\n Since SQLite uses manifest typing, no data type need be supplied.\n The primary key is \"id\" by default,\n an integer that can be set or otherwise autoincrements.", "id": "f11555:c0:m6"} {"signature": "def create_index(self, table, field, unique=False, ascending=True):", "body": "if unique: u = \"\"else: u = \"\"if ascending: a = \"\"else: a = \"\"sql = \"\"+u+\"\"+table+\"\"+field+\"\"sql += \"\"+table+\"\"+field+\"\"+a+\"\"self._cur.execute(sql)self._con.commit()", "docstring": "Creates a table index.\n\n Creates an index on the given table,\n on the given field with unique values enforced or not,\n in ascending or descending order.", "id": "f11555:c0:m7"} {"signature": "def commit(self, each=):", "body": "self._commit = eachself._con.commit()", "docstring": "Sets the commit frequency.\n\n Modifications to the database,\n e.g. row insertions are commited in batch,\n specified by the given number.\n A number that is reasonably high allows for faster transactions.\n Commits anything still pending.", "id": "f11555:c0:m8"} {"signature": "def close(self):", "body": "self._con.commit()self._cur.close()self._con.close()", "docstring": "Commits any pending transactions and closes the database.", "id": "f11555:c0:m9"} {"signature": "def sql(self, sql):", "body": "self._cur.execute(sql)if sql.lower().find(\"\") >= :matches = []for r in self._cur: matches.append(r)return matches", "docstring": "Executes a raw SQL statement on the database.", "id": "f11555:c0:m10"} {"signature": "def dump(self, ext=\"\"):", "body": "self._con.commit()if ext == \"\":return self._dump_xml()", "docstring": "Dumps the data in the tables into another format (like XML).", "id": "f11555:c0:m11"} {"signature": "def __init__(self, db, name, key, fields):", "body": "self._db = dbself._name = nameself._key = keyself._fields = fieldsfor f in self._fields:import newim = lambda t, q, operator=\"\", fields=\"\", _field=f: t.find(q, operator, fields, _field)setattr(self, f, new.instancemethod(im, self, None))", "docstring": "Generic table.\n\n Constructs a table with the given name, primary key and columns.\n Each of the column names becomes the name of a table method\n that fetches rows from the table.\n\n For example, a table with an id field has the following method:\n table.id(query, operator=\"=\")", "id": "f11555:c1:m0"} {"signature": "def __len__(self):", "body": "sql = \"\"+self._key+\"\"+self._nameself._db._cur.execute(sql)i = for r in self._db._cur: i += return i", "docstring": "The row count of the table. This should be optimized.", "id": "f11555:c1:m2"} {"signature": "def find(self, q, operator=\"\", fields=\"\", key=None):", "body": "if key == None: key = self._keyif fields != \"\": fields = \"\".join(fields)try: q = unicode(q)except: passif q != \"\" and (q[] == \"\" or q[-] == \"\"):if q[] == \"\": q = \"\"+q.lstrip(\"\")if q[-] == \"\": q = q.rstrip(\"\")+\"\"operator = \"\"if q != \"\":sql = \"\"+fields+\"\"+self._name+\"\"+key+\"\"+operator+\"\"self._db._cur.execute(sql, (q,))else:sql = \"\"+fields+\"\"+self._nameself._db._cur.execute(sql)matches = []for r in self._db._cur: matches.append(r)return matches", "docstring": "A simple SQL SELECT query.\n\n Retrieves all rows from the table \n where the given query value is found in the given column (primary key if None).\n A different comparison operator (e.g. >, <, like) can be set.\n The wildcard character is * and automatically sets the operator to \"like\".\n Optionally, the fields argument can be a list of column names to select.\n Returns a list of row tuples containing fields.", "id": "f11555:c1:m3"} {"signature": "def all(self):", "body": "return self.find(\"\")", "docstring": "Returns all the rows in the table.", "id": "f11555:c1:m4"} {"signature": "def fields(self):", "body": "return self._fields", "docstring": "Returns the column names.\n\n Returns the name of each column in the database,\n in the same order row fields are returned from find().", "id": "f11555:c1:m5"} {"signature": "def append(self, *args, **kw):", "body": "if args and kw: returnif args and type(args[]) == dict:fields = [k for k in args[]]v = [args[][k] for k in args[]]if kw:fields = [k for k in kw]v = [kw[k] for k in kw]q = \"\".join([\"\" for x in fields])sql = \"\"+self._name+\"\"+\"\".join(fields)+\"\"sql += \"\"+q+\"\"self._db._cur.execute(sql, v)self._db._i += if self._db._i >= self._db._commit:self._db._i = self._db._con.commit()", "docstring": "Adds a new row to a table.\n\n Adds a row to the given table.\n The column names and their corresponding values\n must either be supplied as a dictionary of {fields:values},\n or a series of keyword arguments of field=value style.", "id": "f11555:c1:m6"} {"signature": "def edit(self, id, *args, **kw):", "body": "if args and kw: returnif args and type(args[]) == dict:fields = [k for k in args[]]v = [args[][k] for k in args[]]if kw:fields = [k for k in kw]v = [kw[k] for k in kw]sql = \"\"+self._name+\"\"+\"\".join(fields)+\"\"+self._key+\"\"+unicode(id)self._db._cur.execute(sql, v)self._db._i += if self._db._i >= self._db._commit:self._db._i = self._db._con.commit()", "docstring": "Edits the row with given id.", "id": "f11555:c1:m7"} {"signature": "def remove(self, id, operator=\"\", key=None):", "body": "if key == None: key = self._keytry: id = unicode(id)except: pass sql = \"\"+self._name+\"\"+key+\"\"+operator+\"\"self._db._cur.execute(sql, (id,))", "docstring": "Deletes the row with given id.", "id": "f11555:c1:m8"} {"signature": "def _reset(self):", "body": "self._segments = self._duration = ", "docstring": "Resets the number of drawn segments and the duration.\n\n To calculate the number of segments or the total time needed,\n we need to recurse through LSystem._grow().\n Before that, the draw(), segments() and duration() command\n will reset both tally variables.", "id": "f11556:c0:m3"} {"signature": "def _grow(self, generation, rule, angle, length, time=maxint, draw=True):", "body": "if generation == :self._duration = + maxint-timeif length <= self.threshold:self._duration = + maxint-timereturnif rule in self.commands:self.commands[rule](self, generation, rule, angle, length, time)if draw:if rule == \"\":_ctx.translate(, -min(length, length*time))elif rule == \"\":_ctx.rotate(max(-angle, -angle*time))elif rule == \"\":_ctx.rotate(min(+angle, +angle*time))elif rule == \"\":_ctx.rotate()elif rule == \"\":_ctx.push()elif rule == \"\":_ctx.pop()if rule in self.rulesand generation > and time > :for cmd in self.rules[rule]:if cmd == \"\":time -= self.costelif cmd == \"\":angle = -angleelif cmd == \"\":angle *= elif cmd == \"\":angle *= elif cmd == \"\":length *= elif cmd == \">\":length *= self._grow(generation-,cmd,angle,length*self.decrease,time,draw)elif rule == \"\"or (rule in self.rules and self.rules[rule] == \"\"):self._segments += if draw and time > :length = min(length, length*time)if self._timed:self.segment(length, generation, time, id=self._segments)else:self.segment(length, generation, None, id=self._segments)_ctx.translate(, -length)", "docstring": "Recurse through the system.\n\n When a segment is drawn, the LSsytem.segment() method will be called.\n You can customize this method to create your own visualizations.\n It takes an optional time parameter. \n\n If you divide this parameter by LSsytem.duration() you get \n a number between 0.0 and 1.0 you can use as an alpha value for example.\n\n The method also has an id parameter which is a unique number \n between 0 and LSystem.segments.", "id": "f11556:c0:m4"} {"signature": "def draw(self, x, y, generation, time=None, ease=None):", "body": "angle = self.angleif time is not None and ease:angle = min(self.angle, self.angle * time / ease)self._timed = Trueif not time:self._timed = Falsetime = maxintmode = _ctx.transform()_ctx.transform(CORNER)_ctx.push()_ctx.translate(x, y)self._reset()self._grow(generation, self.root, angle, self.d, time, draw=True)_ctx.pop()_ctx.transform(mode)", "docstring": "Draws a number of generations at the given position.\n\n The time parameter can be used to progress the system in an animatiom.\n As time nears LSystem.duration(generation), more segments will be drawn.\n\n The ease parameter can be used to gradually increase the branching angle\n as more segments are drawn.", "id": "f11556:c0:m6"} {"signature": "def segments(self, generation, time=None):", "body": "if not time:time = maxint_ctx.push()self._reset()self._grow(generation, self.root, self.angle, self.d, time, draw=False)_ctx.pop()return self._segments", "docstring": "Returns the number of segments drawn for a number of generations.\n\n The number of segments that are drawn to the screen\n depends of the number of generations and the amount of time.\n Each F command has a cost that depletes time.\n Segments will stop being drawn if generation reaches 0,\n when there is no time left \n or when the segment length falls below LSystem.threshold.", "id": "f11556:c0:m7"} {"signature": "def duration(self, generation):", "body": "_ctx.push()self._reset()self._grow(generation, self.root, self.angle, self.d, draw=False)_ctx.pop()return max(self._duration, )", "docstring": "Returns the total draw time needed based on the current cost.\n\n In an animation, the system will expand as time progresses.\n Each F command that draws a segment has a cost that depletes time.\n To calculate the total amount of time for a number of generations,\n we need to recurse through the system.\n Time does not flow through the system linearly, \n it \"branches\" from generation to generation.", "id": "f11556:c0:m8"} {"signature": "def update(self):", "body": "raise NotImplementedError", "docstring": "The method which gets executed when a new state of the object was\nreceived.", "id": "f11557:c1:m2"} {"signature": "def _label(self):", "body": "raise NotImplementedError", "docstring": "The text that should be shown in the object reprentation.", "id": "f11557:c1:m3"} {"signature": "def open_socket(self):", "body": "self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, )self.socket.setblocking()self.socket.bind((self.host, self.port))", "docstring": "Opens the socket and binds to the given host and port. Uses\nSO_REUSEADDR to be as robust as possible.", "id": "f11560:c1:m1"} {"signature": "def close_socket(self):", "body": "self.socket.close()", "docstring": "Closes the socket connection", "id": "f11560:c1:m2"} {"signature": "def refreshed(self):", "body": "return self.current_frame >= self.last_frame", "docstring": "Returns True if there was a new frame", "id": "f11560:c1:m3"} {"signature": "def load_profiles(self):", "body": "_profiles = {}for name, klass in inspect.getmembers(profiles):if inspect.isclass(klass) and name.endswith('') and name != '':profile = klass()_profiles[profile.address] = profiletry:setattr(self, profile.list_label, profile.objs)except AttributeError:continueself.manager.add(self.callback, profile.address)return _profiles", "docstring": "Loads all possible TUIO profiles and returns a dictionary with the\nprofile addresses as keys and an instance of a profile as the value", "id": "f11560:c1:m4"} {"signature": "def get_profile(self, profile):", "body": "return self.profiles.get(profile, None)", "docstring": "Returns a specific profile from the profile list and otherwise None", "id": "f11560:c1:m5"} {"signature": "def get_helpers(self):", "body": "return list([profile.list_label for profile in self.profiles.values()])", "docstring": "Returns a list of helper functions that provide access to the\n objects of each profile.", "id": "f11560:c1:m6"} {"signature": "def update(self):", "body": "try:self.manager.handle(self.socket.recv())except socket.error:pass", "docstring": "Tells the connection manager to receive the next 1024 byte of messages\nto analyze.", "id": "f11560:c1:m7"} {"signature": "def callback(self, *incoming):", "body": "message = incoming[]if message:address, command = message[], message[]profile = self.get_profile(address)if profile is not None:try:getattr(profile, command)(self, message)except AttributeError:pass", "docstring": "Gets called by the CallbackManager if a new message was received", "id": "f11560:c1:m8"} {"signature": "def hexDump(bytes):", "body": "for i in range(len(bytes)):sys.stdout.write(\"\" % (ord(bytes[i])))if (i+) % == :print(repr(bytes[i-:i+]))if(len(bytes) % != ):print(string.rjust(\"\", ), repr(bytes[i-len(bytes)%:i+]))", "docstring": "Useful utility; prints the string in hexadecimal", "id": "f11561:m0"} {"signature": "def readLong(data):", "body": "high, low = struct.unpack(\"\", data[:])big = (int(high) << ) + lowrest = data[:]return (big, rest)", "docstring": "Tries to interpret the next 8 bytes of the data\n as a 64-bit signed integer.", "id": "f11561:m4"} {"signature": "def OSCBlob(next):", "body": "if type(next) == type(\"\"):length = len(next)padded = math.ceil((len(next)) / ) * binary = struct.pack(\"\" % (padded), length, next)tag = ''else:tag = ''binary = ''return (tag, binary)", "docstring": "Convert a string into an OSC Blob,\n returning a (typetag, data) tuple.", "id": "f11561:m6"} {"signature": "def OSCArgument(next):", "body": "if type(next) == type(\"\"):OSCstringLength = math.ceil((len(next)+) / ) * binary = struct.pack(\"\" % (OSCstringLength), next)tag = \"\"elif type(next) == type():binary = struct.pack(\"\", next)tag = \"\"elif type(next) == type():binary = struct.pack(\"\", next)tag = \"\"else:binary = \"\"tag = \"\"return (tag, binary)", "docstring": "Convert some Python types to their\n OSC binary representations, returning a\n (typetag, data) tuple.", "id": "f11561:m7"} {"signature": "def parseArgs(args):", "body": "parsed = []for arg in args:print(arg)arg = arg.strip()interpretation = Nonetry:interpretation = float(arg)if string.find(arg, \"\") == -:interpretation = int(interpretation)except:interpretation = argpassparsed.append(interpretation)return parsed", "docstring": "Given a list of strings, produces a list\n where those strings have been parsed (where\n possible) as floats or integers.", "id": "f11561:m8"} {"signature": "def decodeOSC(data):", "body": "table = {\"\":readInt, \"\":readFloat, \"\":readString, \"\":readBlob}decoded = []address, rest = readString(data)typetags = \"\"if address == \"\":time, rest = readLong(rest)decoded.append(address)decoded.append(time)while len(rest)>:length, rest = readInt(rest)decoded.append(decodeOSC(rest[:length]))rest = rest[length:]elif len(rest) > :typetags, rest = readString(rest)decoded.append(address)decoded.append(typetags)if typetags[] == \"\":for tag in typetags[:]:value, rest = table[tag](rest)decoded.append(value)else:print(\"\")return decoded", "docstring": "Converts a typetagged OSC message to a Python list.", "id": "f11561:m9"} {"signature": "def append(self, argument, typehint = None):", "body": "if typehint == '':binary = OSCBlob(argument)else:binary = OSCArgument(argument)self.typetags = self.typetags + binary[]self.rawAppend(binary[])", "docstring": "Appends data to the message,\n updating the typetags based on\n the argument's type.\n If the argument is a blob (counted string)\n pass in 'b' as typehint.", "id": "f11561:c0:m6"} {"signature": "def rawAppend(self, data):", "body": "self.message = self.message + data", "docstring": "Appends raw data to the message. Use append().", "id": "f11561:c0:m7"} {"signature": "def getBinary(self):", "body": "address = OSCArgument(self.address)[]typetags = OSCArgument(self.typetags)[]return address + typetags + self.message", "docstring": "Returns the binary message (so far) with typetags.", "id": "f11561:c0:m8"} {"signature": "def handle(self, data, source = None):", "body": "decoded = decodeOSC(data)self.dispatch(decoded, source)", "docstring": "Given OSC data, tries to call the callback with the\n right address.", "id": "f11561:c1:m1"} {"signature": "def dispatch(self, message, source = None):", "body": "msgtype = \"\"try:if type(message[]) == str:address = message[]self.callbacks[address](message)elif type(message[]) == list:for msg in message:self.dispatch(msg)except KeyError as key:print('' % (address, key, message))pprint.pprint(message)except IndexError as e:print('' % (e, message))passexcept None as e:print(\"\", address, \"\", e)return", "docstring": "Sends decoded OSC data to an appropriate calback", "id": "f11561:c1:m2"} {"signature": "def add(self, callback, name):", "body": "if callback == None:del self.callbacks[name]else:self.callbacks[name] = callback", "docstring": "Adds a callback to our set of callbacks,\n or removes the callback with name if callback\n is None.", "id": "f11561:c1:m3"} {"signature": "def unbundler(self, messages):", "body": "for message in messages[:]:self.dispatch(message)", "docstring": "Dispatch the messages in a decoded bundle.", "id": "f11561:c1:m4"} {"signature": "def set(self, client, message):", "body": "raise NotImplementedError", "docstring": "The state of each alive (but unchanged) fiducial is periodically\nresent with 'set' messages.", "id": "f11562:c0:m1"} {"signature": "def alive(self, client, message):", "body": "raise NotImplementedError", "docstring": "The 'alive' message contains the session ids of all alive fiducials\nknown to reacTIVision.", "id": "f11562:c0:m2"} {"signature": "def fseq(self, client, message):", "body": "client.last_frame = client.current_frameclient.current_frame = message[]", "docstring": "fseq messages associate a unique frame id with a set of set\nand alive messages", "id": "f11562:c0:m3"} {"signature": "def objs(self):", "body": "for obj in self.objects.itervalues():if obj.sessionid in self.sessions:yield obj", "docstring": "Returns a generator list of tracked objects which are recognized with\nthis profile and are in the current session.", "id": "f11562:c0:m4"} {"signature": "def hex_to_rgb(hex):", "body": "hex = hex.lstrip(\"\")if len(hex) < :hex += hex[-] * ( - len(hex))r, g, b = hex[:], hex[:], hex[:]r, g, b = [int(n, ) / for n in (r, g, b)]return r, g, b", "docstring": "Returns RGB values for a hex color string.", "id": "f11564:m1"} {"signature": "def lab_to_rgb(l, a, b):", "body": "y = (l + ) / x = a / + yz = y - b / v = [x, y, z]for i in _range():if pow(v[i], ) > :v[i] = pow(v[i], )else:v[i] = (v[i] - / ) / x = v[] * / y = v[] * / z = v[] * / r = x * + y * - + z * -g = x * - + y * + z * b = x * + y * - + z * v = [r, g, b]for i in _range():if v[i] > :v[i] = * pow(v[i], / ) - else:v[i] = * v[i]r, g, b = v[], v[], v[]return r, g, b", "docstring": "Converts CIE Lab to RGB components.\n\n First we have to convert to XYZ color space.\n Conversion involves using a white point,\n in this case D65 which represents daylight illumination.\n\n Algorithm adopted from:\n http://www.easyrgb.com/math.php", "id": "f11564:m2"} {"signature": "def cmyk_to_rgb(c, m, y, k):", "body": "r = - min(, c + k)g = - min(, m + k)b = - min(, y + k)return r, g, b", "docstring": "Cyan, magenta, yellow, black to red, green, blue.\n ReportLab, http://www.koders.com/python/fid5C006F554616848C01AC7CB96C21426B69D2E5A9.aspx\n Results will differ from the way NSColor converts color spaces.", "id": "f11564:m3"} {"signature": "def hsv_to_rgb(h, s, v):", "body": "if s == : return v, v, vh = h / ( / )i = floor(h)f = h - ip = v * ( - s)q = v * ( - s * f)t = v * ( - s * ( - f))if i == :r = v;g = t;b = pelif i == :r = q;g = v;b = pelif i == :r = p;g = v;b = telif i == :r = p;g = q;b = velif i == :r = t;g = p;b = velse:r = v;g = p;b = qreturn r, g, b", "docstring": "Hue, saturation, brightness to red, green, blue.\n http://www.koders.com/python/fidB2FE963F658FE74D9BF74EB93EFD44DCAE45E10E.aspx\n Results will differ from the way NSColor converts color spaces.", "id": "f11564:m5"} {"signature": "def complement(clr):", "body": "clr = color(clr)colors = colorlist(clr)colors.append(clr.complement)return colors", "docstring": "Returns the color and its complement in a list.", "id": "f11564:m16"} {"signature": "def complementary(clr):", "body": "clr = color(clr)colors = colorlist(clr)c = clr.copy()if clr.brightness > :c.brightness = + c.brightness * else:c.brightness = - c.brightness * colors.append(c)c = clr.copy()c.brightness = + c.brightnessc.saturation = + c.saturation * colors.append(c)clr = clr.complementc = clr.copy()if clr.brightness > :c.brightness = + clr.brightness * else:c.brightness = - c.brightness * colors.append(c)colors.append(clr)c = clr.copy()c.brightness = + c.brightnessc.saturation = + c.saturation * colors.append(c)return colors", "docstring": "Returns a list of complementary colors.\n\nThe complement is the color 180 degrees across\nthe artistic RYB color wheel.\nThe list contains darker and softer contrasting\nand complementing colors.", "id": "f11564:m17"} {"signature": "def split_complementary(clr):", "body": "clr = color(clr)colors = colorlist(clr)clr = clr.complementcolors.append(clr.rotate_ryb(-).lighten())colors.append(clr.rotate_ryb().lighten())return colors", "docstring": "Returns a list with the split complement of the color.\n\nThe split complement are the two colors to the left and right\nof the color's complement.", "id": "f11564:m18"} {"signature": "def left_complement(clr):", "body": "left = split_complementary(clr)[]colors = complementary(clr)colors[].h = left.hcolors[].h = left.hcolors[].h = left.hcolors = colorlist(colors[], colors[], colors[], colors[], colors[], colors[])return colors", "docstring": "Returns the left half of the split complement.\n\nA list is returned with the same darker and softer colors\nas in the complementary list, but using the hue of the\nleft split complement instead of the complement itself.", "id": "f11564:m19"} {"signature": "def right_complement(clr):", "body": "right = split_complementary(clr)[]colors = complementary(clr)colors[].h = right.hcolors[].h = right.hcolors[].h = right.hcolors = colorlist(colors[], colors[], colors[], colors[], colors[], colors[])return colors", "docstring": "Returns the right half of the split complement.", "id": "f11564:m20"} {"signature": "def analogous(clr, angle=, contrast=):", "body": "contrast = max(, min(contrast, ))clr = color(clr)colors = colorlist(clr)for i, j in [(, ), (, ), (-, -), (-, )]:c = clr.rotate_ryb(angle * i)t = - j * if clr.brightness - contrast * j < t:c.brightness = telse:c.brightness = clr.brightness - contrast * jc.saturation -= colors.append(c)return colors", "docstring": "Returns colors that are next to each other on the wheel.\n\nThese yield natural color schemes (like shades of water or sky).\nThe angle determines how far the colors are apart,\nmaking it bigger will introduce more variation.\nThe contrast determines the darkness/lightness of\nthe analogue colors in respect to the given colors.", "id": "f11564:m21"} {"signature": "def monochrome(clr):", "body": "def _wrap(x, min, threshold, plus):if x - min < threshold:return x + pluselse:return x - mincolors = colorlist(clr)c = clr.copy()c.brightness = _wrap(clr.brightness, , , )c.saturation = _wrap(clr.saturation, , , )colors.append(c)c = clr.copy()c.brightness = _wrap(clr.brightness, , , )colors.append(c)c = clr.copy()c.brightness = max(, clr.brightness + ( - clr.brightness) * )c.saturation = _wrap(clr.saturation, , , )colors.append(c)c = clr.copy()c.brightness = _wrap(clr.brightness, , , )colors.append(c)return colors", "docstring": "Returns colors in the same hue with varying brightness/saturation.", "id": "f11564:m22"} {"signature": "def triad(clr, angle=):", "body": "clr = color(clr)colors = colorlist(clr)colors.append(clr.rotate_ryb(angle).lighten())colors.append(clr.rotate_ryb(-angle).lighten())return colors", "docstring": "Returns a triad of colors.\n\nThe triad is made up of this color and two other colors\nthat together make up an equilateral triangle on\nthe artistic color wheel.", "id": "f11564:m23"} {"signature": "def tetrad(clr, angle=):", "body": "clr = color(clr)colors = colorlist(clr)c = clr.rotate_ryb(angle)if clr.brightness < :c.brightness += else:c.brightness -= -colors.append(c)c = clr.rotate_ryb(angle * )if clr.brightness < :c.brightness += else:c.brightness -= -colors.append(c)colors.append(clr.rotate_ryb(angle * ).lighten())return colors", "docstring": "Returns a tetrad of colors.\n\nThe tetrad is made up of this color and three other colors\nthat together make up a cross on the artistic color wheel.", "id": "f11564:m24"} {"signature": "def compound(clr, flip=False):", "body": "def _wrap(x, min, threshold, plus):if x - min < threshold:return x + pluselse:return x - mind = if flip: d = -clr = color(clr)colors = colorlist(clr)c = clr.rotate_ryb( * d)c.brightness = _wrap(clr.brightness, , , )colors.append(c)c = clr.rotate_ryb( * d)c.saturation = _wrap(clr.saturation, , , )c.brightness = _wrap(clr.brightness, , , )colors.append(c)c = clr.rotate_ryb( * d)c.saturation = _wrap(clr.saturation, , , )c.brightness = max(, clr.brightness)colors.append(c)c = clr.rotate_ryb( * d)c.saturation = _wrap(clr.saturation, , , )c.brightness = _wrap(clr.brightness, , , )colors.append(c)c = clr.rotate_ryb( * d)c.saturation = _wrap(clr.saturation, , , )c.brightness = _wrap(clr.brightness, , , )return colors", "docstring": "Roughly the complement and some far analogs.", "id": "f11564:m25"} {"signature": "def outline(path, colors, precision=, continuous=True):", "body": "def _point_count(path, precision):return max(int(path.length * precision * ), )n = sum([_point_count(contour, precision) for contour in path.contours])contour_i = contour_n = len(path.contours) - if contour_n == : continuous = Falsei = for contour in path.contours:if not continuous: i = j = _point_count(contour, precision)first = Truefor pt in contour.points(j):if first:first = Falseelse:if not continuous:clr = float(i) / j * len(colors)else:clr = float(i) / n * len(colors) - * contour_i / contour_n_ctx.stroke(colors[int(clr)])_ctx.line(x0, y0, pt.x, pt.y)x0 = pt.xy0 = pt.yi += pt = contour.point() _ctx.line(x0, y0, pt.x, pt.y)contour_i += ", "docstring": "Outlines each contour in a path with the colors in the list.\n\nEach contour starts with the first color in the list,\nand ends with the last color in the list.\n\nBecause each line segment is drawn separately,\nworks only with corner-mode transforms.", "id": "f11564:m28"} {"signature": "def guess_name(clr):", "body": "clr = Color(clr)if clr.is_transparent: return \"\"if clr.is_black: return \"\"if clr.is_white: return \"\"if clr.is_black: return \"\"for name in named_colors:try:r, g, b = named_colors[name]except:continueif r == clr.r and g == clr.g and b == clr.b:return namefor shade in shades:if clr in shade:return shade.name + \"\" + clr.nearest_hue()breakreturn clr.nearest_hue()", "docstring": "Guesses the shade and hue name of a color.\n\nIf the given color is named in the named_colors list, return that name.\nOtherwise guess its nearest hue and shade range.", "id": "f11564:m32"} {"signature": "def shader(x, y, dx, dy, radius=, angle=, spread=):", "body": "if angle != None:radius *= d = sqrt((dx - x) ** + (dy - y) ** )a = degrees(atan2(dy - y, dx - x)) + if d <= radius:d1 = * d / radiuselse:d1 = if angle is None:return - d1angle = - angle % spread = max(, min(spread, ))if spread == :return d = abs(a - angle)if d <= spread / :d2 = d / spread + d1else:d2 = if - angle <= spread / :d = abs( - angle + a)if d <= spread / :d2 = d / spread + d1if angle < spread / :d = abs( + angle - a)if d <= spread / :d2 = d / spread + d1return - max(, min(d2, ))", "docstring": "Returns a 0.0 - 1.0 brightness adjusted to a light source.\n\nThe light source is positioned at dx, dy.\nThe returned float is calculated for x, y position\n(e.g. an oval at x, y should have this brightness).\n\nThe radius influences the strength of the light,\nangle and spread control the direction of the light.", "id": "f11564:m33"} {"signature": "def aggregated(cache=DEFAULT_CACHE):", "body": "global _aggregated_name, _aggregated_dictif _aggregated_name != cache:_aggregated_name = cache_aggregated_dict = {}for path in glob(os.path.join(cache, \"\")):if os.path.isdir(path):p = os.path.basename(path)_aggregated_dict[p] = glob(os.path.join(path, \"\"))_aggregated_dict[p] = [os.path.basename(f)[:-] for f in _aggregated_dict[p]]return _aggregated_dict", "docstring": "A dictionary of all aggregated words.\n\nThey keys in the dictionary correspond to subfolders in the aggregated cache.\nEach key has a list of words. Each of these words is the name of an XML-file\nin the subfolder. The XML-file contains color information harvested from the web\n(or handmade).", "id": "f11564:m34"} {"signature": "def search_engine(query, top=, service=\"\", license=None,cache=os.path.join(DEFAULT_CACHE, \"\")):", "body": "try:a = theme(query, cache=cache)return aexcept:passif service == \"\":from web import googlesearch_engine = googleif service == \"\":from web import yahoosearch_engine = yahooif license:yahoo.license_key = licensesorted_colors = search_engine.sort([h for h in primary_hues] + [\"\", \"\"],context=query, strict=True, cached=True)sorted_shades = search_engine.sort([str(s) for s in shades],context=query, strict=True, cached=True)f = lambda x: x.strip(\"\").split()[]n2 = sum([w for h, w in sorted_colors[:top]])sorted_colors = [(color(f(h)), w / n2) for h, w in sorted_colors[:top]]n2 = sum([w for s, w in sorted_shades[:]])sorted_shades = [(shade(f(s)), w / n2) for s, w in sorted_shades[:]]a = theme(cache=cache)a.name = queryfor clr, w1 in sorted_colors:for rng, w2 in sorted_shades:a.add_range(rng, clr, w1 * w2)a._save()return a", "docstring": "Return a color aggregate from colors and ranges parsed from the web.\nT. De Smedt, http://nodebox.net/code/index.php/Prism", "id": "f11564:m36"} {"signature": "def morguefile(query, n=, top=):", "body": "from web import morguefileimages = morguefile.search(query)[:top]path = choice(images).download(thumbnail=True, wait=)return ColorList(path, n, name=query)", "docstring": "Returns a list of colors drawn from a morgueFile image.\n\nWith the Web library installed,\ndownloads a thumbnail from morgueFile and retrieves pixel colors.", "id": "f11564:m39"} {"signature": "def __init__(self, *args, **kwargs):", "body": "self.name = \"\"RGB = Bot.RGBHSB = Bot.HSBmode, range = _ctx.color_mode, _ctx.color_rangemodes = [RGB, HSB]if len(args) == and isinstance(args[], (str, unicode)):if args[].startswith(\"\"):r, g, b = hex_to_rgb(args[])a = else:v = self.str_to_rgb(args[])if len(v) == :r, g, b = va = else:r, g, b, a = vself.name = args[]_ctx.colormode(RGB, )BaseColor.__init__(self, r, g, b, a, mode='', color_range=)elif len(args) == and isinstance(args[], BaseColor):_ctx.colormode(RGB, )BaseColor.__init__(self, args[].r, args[].g, args[].b, args[].a, mode='', color_range=)elif kwargs.has_key(\"\")and kwargs[\"\"].lower() == \"\":if kwargs.has_key(\"\") andkwargs.has_key(\"\") andkwargs.has_key(\"\"):r, g, b = lab_to_rgb(kwargs[\"\"], kwargs[\"\"], kwargs[\"\"])else:r, g, b = lab_to_rgb(*args)_ctx.colormode(RGB, )BaseColor.__init__(self, r, g, b, , mode='', color_range=)elif (kwargs.has_key(\"\")and kwargs[\"\"].lower() in modes)or mode in modes:m, ra = mode, rangeif kwargs.has_key(\"\"): m = kwargs[\"\"]if kwargs.has_key(\"\"):ra = kwargs[\"\"]else:ra = if m == \"\":if kwargs.has_key(\"\") andkwargs.has_key(\"\") andkwargs.has_key(\"\"):r, g, b = hsv_to_rgb(kwargs[\"\"], kwargs[\"\"], kwargs[\"\"])else:h, s, l, a = (args)r, g, b = hsv_to_rgb(h, s, l)elif m == \"\":if kwargs.has_key(\"\") andkwargs.has_key(\"\") andkwargs.has_key(\"\"):r, g, b = kwargs[\"\"], kwargs[\"\"], kwargs[\"\"]else:if len(args) == :r, g, b = argsa = else:r, g, b, a = (args)_ctx.colormode(RGB, ra)BaseColor.__init__(self, r, g, b, a, mode='', color_range=ra)if kwargs.has_key(\"\") and kwargs[\"\"] != \"\":self.name = kwargs[\"\"]elif self.name == \"\":self.name = self.nearest_hue()_ctx.colormode(mode, range)", "docstring": "Color object with string input and adjustment methods.\n\n Accepts strings that are (or resemble) named colors, hex strings,\n RGB, CMYK, HSB and Lab values.\n\n You can supply a \"name\" parameter.\n Other valid named parameters are \"mode\" and \"range\",\n which default to the state colormode and color range.", "id": "f11564:c1:m0"} {"signature": "def str_to_rgb(self, str):", "body": "str = str.lower()for ch in \"\":str = str.replace(ch, \"\")if named_colors.has_key(str):return named_colors[str]for suffix in [\"\", \"\", \"\", \"\"]:str = re.sub(\"\" + suffix + \"\", \"\", str)str = re.sub(\"\", \"\", str)matches = []for name in named_colors:if name in str or str in name:matches.append(named_colors[name])if len(matches) > :return choice(matches)return named_colors[\"\"]", "docstring": "Returns RGB values based on a descriptive string.\n\n If the given str is a named color, return its RGB values.\n Otherwise, return a random named color that has str\n in its name, or a random named color which name appears in str.\n\n Specific suffixes (-ish, -ed, -y and -like) are recognised\n as well, for example, if you need a random variation of \"red\"\n you can use reddish (or greenish, yellowy, etc.)", "id": "f11564:c1:m1"} {"signature": "def rotate_ryb(self, angle=):", "body": "h = self.h * angle = angle % wheel = [(, ), (, ),(, ), (, ),(, ), (, ),(, ), (, ),(, ), (, ),(, ), (, ),(, ), (, ),(, ), (, ),(, ), (, ),(, ), (, ),(, ), (, ),(, ), (, ),(, )]for i in _range(len(wheel) - ):x0, y0 = wheel[i]x1, y1 = wheel[i + ]if y1 < y0:y1 += if y0 <= h <= y1:a = * x0 + (x1 - x0) * (h - y0) / (y1 - y0)breaka = (a + angle) % for i in _range(len(wheel) - ):x0, y0 = wheel[i]x1, y1 = wheel[i + ]if y1 < y0:y1 += if x0 <= a <= x1:h = * y0 + (y1 - y0) * (a - x0) / (x1 - x0)breakh = h % return Color(h / , self.s, self.brightness, self.a, mode=\"\", name=\"\")", "docstring": "Returns a color rotated on the artistic RYB color wheel.\n\n An artistic color wheel has slightly different opposites\n (e.g. purple-yellow instead of purple-lime).\n It is mathematically incorrect but generally assumed\n to provide better complementary colors.\n\n http://en.wikipedia.org/wiki/RYB_color_model", "id": "f11564:c1:m17"} {"signature": "def nearest_hue(self, primary=False):", "body": "if self.is_black:return \"\"elif self.is_white:return \"\"elif self.is_grey:return \"\"if primary:hues = primary_hueselse:hues = named_hues.keys()nearest, d = \"\", for hue in hues:if abs(self.hue - named_hues[hue]) % < d:nearest, d = hue, abs(self.hue - named_hues[hue]) % return nearest", "docstring": "Returns the name of the nearest named hue.\n\n For example,\n if you supply an indigo color (a color between blue and violet),\n the return value is \"violet\". If primary is set to True,\n the return value is \"purple\".\n\n Primary colors leave out the fuzzy lime, teal,\n cyan, azure and violet hues.", "id": "f11564:c1:m20"} {"signature": "def blend(self, clr, factor=):", "body": "r = self.r * ( - factor) + clr.r * factorg = self.g * ( - factor) + clr.g * factorb = self.b * ( - factor) + clr.b * factora = self.a * ( - factor) + clr.a * factorreturn Color(r, g, b, a, mode=\"\")", "docstring": "Returns a mix of two colors.", "id": "f11564:c1:m21"} {"signature": "def distance(self, clr):", "body": "coord = lambda a, d: (cos(radians(a)) * d, sin(radians(a)) * d)x0, y0 = coord(self.h * , self.s)x1, y1 = coord(clr.h * , clr.s)z0 = self.brightnessz1 = clr.brightnessd = sqrt((x1 - x0) ** + (y1 - y0) ** + (z1 - z0) ** )return d", "docstring": "Returns the Euclidean distance between two colors (0.0-1.0).\n\nConsider colors arranged on the color wheel:\n- hue is the angle of a color along the center\n- saturation is the distance of a color from the center\n- brightness is the elevation of a color from the center\n (i.e. we're on color a sphere)", "id": "f11564:c1:m22"} {"signature": "def swatch(self, x, y, w=, h=, roundness=):", "body": "_ctx.fill(self)_ctx.rect(x, y, w, h, roundness)", "docstring": "Rectangle swatch for this color.", "id": "f11564:c1:m23"} {"signature": "def __init__(self, *args, **kwargs):", "body": "_list.__init__(self)self.name = \"\"self.tags = []for arg in args:if arg.__class__ == Color:self.append(arg)if arg.__class__ == BaseColor:self.append(color(arg.r, arg.g, arg.b, mode=\"\"))try:self.name = arg.labelfor r, g, b in arg:self.append(color(r, g, b, mode=\"\"))except:passif isinstance(arg, _list)or isinstance(arg, tuple):for clr in arg:if clr.__class__ == Color:self.append(clr)if clr.__class__ == BaseColor:self.append(color(clr))if isinstance(arg, (str, unicode)):if os.path.exists(arg):n = if \"\" in kwargs.keys(): n = kwargs[\"\"]self.image_to_rgb(arg, n)else:clr = Color(arg)if not clr.is_transparent:self.append(clr)self.name = argelse:self.extend(self.context_to_rgb(arg))self.tags = argif \"\" in kwargs.keys():self.name = kwargs[\"\"]if \"\" in kwargs.keys():self.tags = kwargs[\"\"]", "docstring": "Construct a list of colors.\n\nColors can be supplied as individual arguments,\nor in a list or tuple:\nColorList(clr1, clr2)\nColorList([clr1, clr2])\nColorList((clr1, clr2))\n\nYou can also supply an object from inside a\nweb.kuler.search() or web.colr.search() list.\n\nOr a string with a named color, a descriptive feel,\nor the pathname of an image.\n\nColorList furthermore takes two named parameters,\na name and a list of tags.", "id": "f11564:c2:m0"} {"signature": "def image_to_rgb(self, path, n=):", "body": "from PIL import Imageimg = Image.open(path)p = img.getdata()f = lambda p: choice(p)for i in _range(n):rgba = f(p)rgba = _list(rgba)if len(rgba) == :rgba.append()r, g, b, a = [v / for v in rgba]clr = color(r, g, b, a, mode=\"\")self.append(clr)", "docstring": "Returns a list of colors based on pixel values in the image.\n\nThe Core Image library must be present to determine pixel colors.\nF. Albers: http://nodebox.net/code/index.php/shared_2007-06-11-11-37-05", "id": "f11564:c2:m1"} {"signature": "def context_to_rgb(self, str):", "body": "matches = []for clr in context:tags = context[clr]for tag in tags:if tag.startswith(str)or str.startswith(tag):matches.append(clr)breakmatches = [color(name) for name in matches]return matches", "docstring": "Returns the colors that have the given word in their context.\n\n For example, the word \"anger\" appears\n in black, orange and red contexts,\n so the list will contain those three colors.", "id": "f11564:c2:m2"} {"signature": "def _context(self):", "body": "tags1 = Nonefor clr in self:overlap = []if clr.is_black:name = \"\"elif clr.is_white:name = \"\"elif clr.is_grey:name = \"\"else:name = clr.nearest_hue(primary=True)if name == \"\" and clr.brightness < :name = \"\"tags2 = context[name]if tags1 is None:tags1 = tags2else:for tag in tags2:if tag in tags1:if tag not in overlap:overlap.append(tag)tags1 = overlapoverlap.sort()return overlap", "docstring": "Returns the intersection of each color's context.\n\nGet the nearest named hue of each color,\nand finds overlapping tags in each hue's colors.\nFor example, a list containing yellow, deeppink and olive\nyields: femininity, friendship, happiness, joy.", "id": "f11564:c2:m3"} {"signature": "def copy(self):", "body": "return ColorList([color(clr.r, clr.g, clr.b, clr.a, mode=\"\") for clr in self],name=self.name,tags=self.tags)", "docstring": "Returns a deep copy of the list.", "id": "f11564:c2:m4"} {"signature": "def _darkest(self):", "body": "min, n = (, , ), for clr in self:if clr.r + clr.g + clr.b < n:min, n = clr, clr.r + clr.g + clr.breturn min", "docstring": "Returns the darkest color from the list.\n\nKnowing the contrast between a light and a dark swatch\ncan help us decide how to display readable typography.", "id": "f11564:c2:m5"} {"signature": "def _average(self):", "body": "r, g, b, a = , , , for clr in self:r += clr.rg += clr.gb += clr.ba += clr.alphar /= len(self)g /= len(self)b /= len(self)a /= len(self)return color(r, g, b, a, mode=\"\")", "docstring": "Returns one average color for the colors in the list.", "id": "f11564:c2:m7"} {"signature": "def sort_by_distance(self, reversed=False):", "body": "if len(self) == : return ColorList()root = self[]for clr in self[:]:if clr.brightness < root.brightness:root = clrstack = [clr for clr in self]stack.remove(root)sorted = [root]while len(stack) > :closest, distance = stack[], stack[].distance(sorted[-])for clr in stack[:]:d = clr.distance(sorted[-])if d < distance:closest, distance = clr, dstack.remove(closest)sorted.append(closest)sorted.append(stack[])if reversed: _list.reverse(sorted)return ColorList(sorted)", "docstring": "Returns a list with the smallest distance between two neighboring colors.\nThe algorithm has a factorial complexity so it may run slow.", "id": "f11564:c2:m9"} {"signature": "def _sorted_copy(self, comparison, reversed=False):", "body": "sorted = self.copy()_list.sort(sorted, comparison)if reversed:_list.reverse(sorted)return sorted", "docstring": "Returns a sorted copy with the colors arranged according to the given comparison.", "id": "f11564:c2:m10"} {"signature": "def sort(self, comparison=\"\", reversed=False):", "body": "return getattr(self, \"\" + comparison)(reversed)", "docstring": "Return a copy sorted by a given color attribute.\n\n Note that there is no \"universal solution to sorting a list of colors,\n since colors need to be represented in 2 or 3 dimensions.", "id": "f11564:c2:m22"} {"signature": "def cluster_sort(self, cmp1=\"\", cmp2=\"\", reversed=False, n=):", "body": "sorted = self.sort(cmp1)clusters = ColorList()d = i = for j in _range(len(sorted)):if getattr(sorted[j], cmp1) < d:clusters.extend(sorted[i:j].sort(cmp2))d -= / ni = jclusters.extend(sorted[i:].sort(cmp2))if reversed: _list.reverse(clusters)return clusters", "docstring": "Sorts the list by cmp1, then cuts it into n pieces which are sorted by cmp2.\n\nIf you want to cluster by hue, use n=12 (since there are 12 primary/secondary hues).\nThe resulting list will not contain n even slices:\nn is used rather to slice up the cmp1 property of the colors,\ne.g. cmp1=brightness and n=3 will cluster colors by brightness >= 0.66, 0.33, 0.0", "id": "f11564:c2:m23"} {"signature": "def reverse(self):", "body": "colors = ColorList.copy(self)_list.reverse(colors)return colors", "docstring": "Returns a reversed copy of the list.", "id": "f11564:c2:m24"} {"signature": "def repeat(self, n=, oscillate=False, callback=None):", "body": "colorlist = ColorList()colors = ColorList.copy(self)for i in _range(n):colorlist.extend(colors)if oscillate: colors = colors.reverse()if callback: colors = callback(colors)return colorlist", "docstring": "Returns a list that is a repetition of the given list.\n\nWhen oscillate is True,\nmoves from the end back to the beginning,\nand then from the beginning to the end, and so on.", "id": "f11564:c2:m25"} {"signature": "def __contains__(self, clr):", "body": "for clr2 in self:if clr.r == clr2.r andclr.g == clr2.g andclr.b == clr2.b:return Truereturn False", "docstring": "Returns True if clr's RGB values match a color in the list.", "id": "f11564:c2:m26"} {"signature": "def swatch(self, x, y, w=, h=, padding=, roundness=):", "body": "for clr in self:clr.swatch(x, y, w, h, roundness)y += h + padding", "docstring": "Rectangle swatches for all the colors in the list.", "id": "f11564:c2:m37"} {"signature": "def swarm(self, x, y, r=):", "body": "sc = _ctx.stroke(, , , )sw = _ctx.strokewidth()_ctx.push()_ctx.transform(_ctx.CORNER)_ctx.translate(x, y)for i in _range(r * ):clr = choice(self).copy()clr.alpha -= * random()_ctx.fill(clr)clr = choice(self)_ctx.stroke(clr)_ctx.strokewidth( * random())_ctx.rotate( * random())r2 = r * * random()_ctx.oval(r * random(), , r2, r2)_ctx.pop()_ctx.strokewidth(sw)if sc is None:_ctx.nostroke()else:_ctx.stroke(sc)", "docstring": "Fancy random ovals for all the colors in the list.", "id": "f11564:c2:m38"} {"signature": "def __init__(self, *colors, **kwargs):", "body": "if len(colors) == :if isinstance(colors[], _list)or isinstance(colors[], tuple):self._colors = _list(colors[])else:self._colors = [colors[]]else:self._colors = _list(colors)self._colors = [color(clr) for clr in self._colors]self._steps = if kwargs.has_key(\"\"):self._steps = kwargs[\"\"]if kwargs.has_key(\"\"):self._steps = kwargs[\"\"]self._spread = if kwargs.has_key(\"\"):self._spread = kwargs[\"\"]self._cache()", "docstring": "Creates a list of gradient colors based on a few given base colors.\n\nThe colors can be supplied as a list or tuple of colors,\nor simply an enumeration of color parameters.\n\nThe steps named parameter defining how many colors are in the list.\nThe spread named parameter controls the midpoint of the gradient", "id": "f11564:c3:m0"} {"signature": "def _interpolate(self, colors, n=):", "body": "gradient = []for i in _range(n):l = len(colors) - x = int( * i / n * l)x = min(x + , l)y = min(x + , l)base = * n / l * xd = (i - base) / ( * n / l)r = colors[x].r * ( - d) + colors[y].r * dg = colors[x].g * ( - d) + colors[y].g * db = colors[x].b * ( - d) + colors[y].b * da = colors[x].a * ( - d) + colors[y].a * dgradient.append(color(r, g, b, a, mode=\"\"))gradient.append(colors[-])return gradient", "docstring": "Returns intermediary colors for given list of colors.", "id": "f11564:c3:m5"} {"signature": "def _cache(self):", "body": "n = self.stepsif len(self._colors) == :ColorList.__init__(self, [self._colors[] for i in _range(n)])returncolors = self._interpolate(self._colors, )left = colors[:len(colors) / ]right = colors[len(colors) / :]left.append(right[])right.insert(, left[-])gradient = self._interpolate(left, int(n * self.spread))[:-]gradient.extend(self._interpolate(right, n - int(n * self.spread))[:])if self.spread > : gradient = gradient[:n]if self.spread < : gradient = gradient[-n:]ColorList.__init__(self, gradient)", "docstring": "Populates the list with a number of gradient colors.\n\nThe list has Gradient.steps colors that interpolate between\nthe fixed base Gradient.colors.\n\nThe spread parameter controls the midpoint of the gradient,\nyou can shift it right and left. A separate gradient is\ncalculated for each half and then glued together.", "id": "f11564:c3:m6"} {"signature": "def __getattr__(self, q):", "body": "if q is None:return selfcandidate = Noneif _favorites.data.has_key(q):candidate = qfor name, (tags, colors) in _favorites.data.iteritems():if q in tags:candidate = nameif candidate:tags, colors = _favorites.data[candidate]colors = ColorList([color(r, g, b, a) for r, g, b, a in colors], name=candidate)colors.tags = tags.split(\"\")return colorsreturn None", "docstring": "Returns the favorite colors list which name/tags matches q.", "id": "f11564:c4:m0"} {"signature": "def __init__(self, h=(, ), s=(, ), b=(, ), a=(, ),grayscale=False, name=\"\", length=):", "body": "ColorList.__init__(self)self.name = nameself.h = hself.s = sself.b = bself.a = aself.grayscale = grayscaleif not grayscale:self.black = ColorRange((, ), , , , True, name)self.white = ColorRange((, ), , , , True, name)self.length = length", "docstring": "A stateless list of colors whose HSB values are confined to a range.\n\nHue, saturation and brightness are confined to a (min, max) tuple,\nor a list of (min, max) tuples for discontinuous ranges, or to a single value.\nThis way you can describe concepts such as \"light\", \"dark\", etc.\n\nWith stateless we mean that you are never sure which colors are\nin the ColorRange, different colors that fall within the ranges\nare returned each time when calling color() or colors().\n\nColorRange has all the ColorList transformations (such as darken()),\nthese return ColorList objects. It's like a snapshot of the original\nstateless ColorRange.", "id": "f11564:c5:m0"} {"signature": "def copy(self, clr=None, d=):", "body": "cr = ColorRange()cr.name = self.namecr.h = deepcopy(self.h)cr.s = deepcopy(self.s)cr.b = deepcopy(self.b)cr.a = deepcopy(self.a)cr.grayscale = self.grayscaleif not self.grayscale:cr.black = self.black.copy()cr.white = self.white.copy()if clr != None:cr.h, cr.a = clr.h + d * (random() * - ), clr.areturn cr", "docstring": "Returns a copy of the range.\n\nOptionally, supply a color to get a range copy\nlimited to the hue of that color.", "id": "f11564:c5:m5"} {"signature": "def color(self, clr=None, d=):", "body": "if clr != None and not isinstance(clr, Color):clr = color(clr)if clr != None and not self.grayscale:if clr.is_black: return self.black.color(clr, d)if clr.is_white: return self.white.color(clr, d)if clr.is_grey: return choice((self.black.color(clr, d), self.white.color(clr, d)))h, s, b, a = self.h, self.s, self.b, self.aif clr != None:h, a = clr.h + d * (random() * - ), clr.ahsba = []for v in [h, s, b, a]:if isinstance(v, _list):min, max = choice(v)elif isinstance(v, tuple):min, max = velse:min, max = v, vhsba.append(min + (max - min) * random())h, s, b, a = hsbareturn color(h, s, b, a, mode=\"\")", "docstring": "Returns a color with random values in the defined h, s b, a ranges.\n\nIf a color is given, use that color's hue and alpha,\nand generate its saturation and brightness from the shade.\nThe hue is varied with the given d.\n\nIn this way you could have a \"warm\" color range\nthat returns all kinds of warm colors.\nWhen a red color is given as parameter it would generate\nall kinds of warm red colors.", "id": "f11564:c5:m6"} {"signature": "def contains(self, clr):", "body": "if not isinstance(clr, Color):return Falseif not isinstance(clr, _list):clr = [clr]for clr in clr:if clr.is_grey and not self.grayscale:return (self.black.contains(clr) orself.white.contains(clr))for r, v in [(self.h, clr.h), (self.s, clr.s), (self.b, clr.brightness), (self.a, clr.a)]:if isinstance(r, _list):passelif isinstance(r, tuple):r = [r]else:r = [(r, r)]for min, max in r:if not (min <= v <= max):return Falsereturn True", "docstring": "Returns True if the given color is part of this color range.\n\nCheck whether each h, s, b, a component of the color\nfalls within the defined range for that component.\n\nIf the given color is grayscale,\nchecks against the definitions for black and white.", "id": "f11564:c5:m8"} {"signature": "def __add__(self, colorrange):", "body": "if isinstance(colorrange, Color):colorrange = ColorList(colorrange)if isinstance(colorrange, ColorList)and not isinstance(colorrange, ColorRange):colorrange = ColorRange([(clr.h, clr.h) for clr in colorrange], [], [])hsba = [[], [], [], []]for r in [self, colorrange]:for i in _range():v = [r.h, r.s, r.b, r.a][i]if isinstance(v, _list):hsba[i].extend(v)elif isinstance(v, tuple):hsba[i].append(v)else:hsba[i].append((v, v))r = ColorRange(*hsba)return r", "docstring": "Combines two ColorRange objects into one.\n\nFor example, if you merge a dark green range and a light red range,\nyou get a range returning dark and light variations of green and red.", "id": "f11564:c5:m9"} {"signature": "def __init__(self, name=\"\", ranges=[], top=, cache=DEFAULT_CACHE, blue=\"\", guess=False, length=):", "body": "self.name = nameself.ranges = []self.cache = cacheself.top = topself.tags = []self.blue = blueself.guess = Falseself.length = self.group_swatches = Falsepath = os.path.join(self.cache, self.name + \"\")if os.path.exists(path):self._load(self.top, self.blue)elif name and zipfile.is_zipfile(os.path.join(self.cache + '')):archive = self.cache + ''zf = zipfile.ZipFile(archive, '')zpath = os.path.join(os.path.basename(self.cache), self.name + \"\")try:zi = zf.getinfo(zpath)self.cache = ziself._load(self.top, self.blue, archive=zf, member=zi.filename)except KeyError:for fn in zf.namelist():if os.path.basename(fn) == self.name + '':zi = zf.getinfo(fn)self.cache = ziself._load(self.top, self.blue, archive=zf, member=zi.filename)breakelse:a = aggregated(self.cache)for key in a:if self.name != \"\" and self.name in a[key]:self.cache = os.path.join(self.cache, key)self._load(self.top, self.blue)self.tags.append(key.replace(\"\", \"\"))self.group_swatches = Truebreakif len(ranges) > :self.ranges = rangesif len(self.ranges) == and guess:a = aggregated(self.cache)for key in a:m = difflib.get_close_matches(self.name, a[key], cutoff=)if len(m) > :self.name = m[]self.cache = os.path.join(self.cache, key)self._load(top, blue)self.tags.append(key.replace(\"\", \"\"))self.group_swatches = Trueself.guess = Truebreakif self.name != \"\" and len(self.ranges) == :raise ColorThemeNotFound", "docstring": "A set of weighted ranges linked to colors.\n\nA ColorTheme is a set of allowed colors (e.g. red, black)\nand ranges (e.g. dark, intense) for these colors.\nThese are supplied as lists of (color, range, weight) tuples.\nRanges with a greater weight will occur more in the combined range.\n\nA ColorTheme is expected to have a name,\nso it can be stored and retrieved in the XML cache.\n\nThe blue parameter denotes a color correction.\nSince most web aggregated results will yield \"blue\" instead of \"azure\" or \"cyan\",\nwe may never see these colors (e.g. azure beach will not propagate).\nSo instead of true blue we pass \"dodgerblue\", which will yield more all-round shades of blue.\nTo ignore this, set blue=\"blue\".", "id": "f11564:c7:m0"} {"signature": "def _weight_by_hue(self):", "body": "grouped = {}weights = []for clr, rng, weight in self.ranges:h = clr.nearest_hue(primary=False)if grouped.has_key(h):ranges, total_weight = grouped[h]ranges.append((clr, rng, weight))total_weight += weightgrouped[h] = (ranges, total_weight)else:grouped[h] = ([(clr, rng, weight)], weight)s = * sum([w for r, w in grouped.values()])grouped = [(grouped[h][], grouped[h][] / s, h, grouped[h][]) for h in grouped]grouped.sort()grouped.reverse()return grouped", "docstring": "Returns a list of (hue, ranges, total weight, normalized total weight)-tuples.\n\nColorTheme is made up out of (color, range, weight) tuples.\nFor consistency with XML-output in the old Prism format\n(i.e. s made up of s) we need a group\nweight per different hue.\n\nThe same is true for the swatch() draw method.\nHues are grouped as a single unit (e.g. dark red, intense red, weak red)\nafter which the dimensions (rows/columns) is determined.", "id": "f11564:c7:m3"} {"signature": "def _xml(self):", "body": "grouped = self._weight_by_hue()xml = \"\" + self.name + \"\" + \"\".join(self.tags) + \"\"for total_weight, normalized_weight, hue, ranges in grouped:if hue == self.blue: hue = \"\"clr = color(hue)xml += \"\" + clr.name + \"\" + str(normalized_weight) + \"\"xml += \"\" + str(clr.r) + \"\" + str(clr.g) + \"\"xml += \"\" + str(clr.b) + \"\" + str(clr.a) + \"\"for clr, rng, wgt in ranges:xml += \"\" + str(rng) + \"\" + str(wgt / total_weight) + \"\"xml = xml.rstrip(\"\") + \"\"xml += \"\"return xml", "docstring": "Returns the color information as XML.\n\nThe XML has the following structure:\n\n \n \n \n \n\n\nNotice that ranges are stored by name and retrieved in the _load()\nmethod with the shade() command - and are thus expected to be\nshades (e.g. intense, warm, ...) unless the shade() command would\nreturn any custom ranges as well. This can be done by appending custom\nranges to the shades list.", "id": "f11564:c7:m4"} {"signature": "def _save(self):", "body": "if not os.path.exists(self.cache):os.makedirs(self.cache)path = os.path.join(self.cache, self.name + \"\")f = open(path, \"\")f.write(self.xml)f.close()", "docstring": "Saves the color information in the cache as XML.", "id": "f11564:c7:m5"} {"signature": "def _load(self, top=, blue=\"\", archive=None, member=None):", "body": "if archive is None:path = os.path.join(self.cache, self.name + \"\")xml = open(path).read()else:assert member is not Nonexml = archive.read(member)dom = parseString(xml).documentElementattr = lambda e, a: e.attributes[a].valuefor e in dom.getElementsByTagName(\"\")[:top]:w = float(attr(e, \"\"))try:rgb = e.getElementsByTagName(\"\")[]clr = color(float(attr(rgb, \"\")),float(attr(rgb, \"\")),float(attr(rgb, \"\")),float(attr(rgb, \"\")),mode=\"\")try:clr.name = attr(e, \"\")if clr.name == \"\": clr = color(blue)except:passexcept:name = attr(e, \"\")if name == \"\": name = blueclr = color(name)for s in e.getElementsByTagName(\"\"):self.ranges.append((clr,shade(attr(s, \"\")),w * float(attr(s, \"\"))))", "docstring": "Loads a theme from aggregated web data.\n\nThe data must be old-style Prism XML: s consisting of s.\nColors named \"blue\" will be overridden with the blue parameter.\n\narchive can be a file like object (e.g. a ZipFile)\nand will be used along with 'member' if specified.", "id": "f11564:c7:m6"} {"signature": "def color(self, d=):", "body": "s = sum([w for clr, rng, w in self.ranges])r = random()for clr, rng, weight in self.ranges:if weight / s >= r: breakr -= weight / sreturn rng(clr, d)", "docstring": "Returns a random color within the theme.\n\nFetches a random range (the weight is taken into account,\nso ranges with a bigger weight have a higher chance of propagating)\nand hues it with the associated color.", "id": "f11564:c7:m7"} {"signature": "def colors(self, n=, d=):", "body": "s = sum([w for clr, rng, w in self.ranges])colors = colorlist()for i in _range(n):r = random()for clr, rng, weight in self.ranges:if weight / s >= r: breakr -= weight / scolors.append(rng(clr, d))return colors", "docstring": "Returns a number of random colors from the theme.", "id": "f11564:c7:m8"} {"signature": "def recombine(self, other, d=):", "body": "a, b = self, otherd1 = max(, min(d, ))d2 = d1c = ColorTheme(name=a.name[:int(len(a.name) * d1)] +b.name[int(len(b.name) * d2):],ranges=a.ranges[:int(len(a.ranges) * d1)] +b.ranges[int(len(b.ranges) * d2):],top=a.top,cache=os.path.join(DEFAULT_CACHE, \"\"),blue=a.blue,length=a.length * d1 + b.length * d2)c.tags = a.tags[:int(len(a.tags) * d1)]c.tags += b.tags[int(len(b.tags) * d2):]return c", "docstring": "Genetic recombination of two themes using cut and splice technique.", "id": "f11564:c7:m20"} {"signature": "def swatch(self, x, y, w=, h=, padding=, roundness=, n=, d=, grouped=None):", "body": "if grouped is None: grouped = self.group_swatchesif not grouped:s = sum([wgt for clr, rng, wgt in self.ranges])for clr, rng, wgt in self.ranges:cols = max(, int(wgt / s * n))for i in _range(cols):rng.colors(clr, n=n, d=d).swatch(x, y, w, h, padding=padding, roundness=roundness)x += w + paddingreturn x, y + n * (h + padding)grouped = self._weight_by_hue()for total_weight, normalized_weight, hue, ranges in grouped:dy = yrc = for clr, rng, weight in ranges:dx = xcols = int(normalized_weight * n)cols = max(, min(cols, n - len(grouped)))if clr.name == \"\": rng = rng.blackif clr.name == \"\": rng = rng.whitefor i in _range(cols):rows = int(weight / total_weight * n)rows = max(, rows)if (clr, rng, weight) == ranges[-] and rc + rows < n: rows += rng.colors(clr, n=rows, d=d).swatch(dx, dy, w, h, padding=padding, roundness=roundness)dx += w + paddingdy += (w + padding) * rows rc = rowsx += (w + padding) * cols + paddingreturn x, dy", "docstring": "Draws a weighted swatch with approximately n columns and rows.\n\nWhen the grouped parameter is True, colors are grouped in blocks of the same hue\n(also see the _weight_by_hue() method).", "id": "f11564:c7:m21"} {"signature": "def __init__(self, dx=, dy=, alpha=, blur=, clr=None):", "body": "Grob.__init__(self, _ctx)if clr is None:clr = color(, , , alpha, mode=\"\")self.dx = dxself.dy = dyself.blur = blurself.clr = clr.copy()self.clr.alpha = alphaglobal _shadow_shadow = self", "docstring": "Sets the dropshadow for all onscreen elements.\n\nBoth the fill and stroke of a path get a dropshadow.\n\nTODO - Implement shadow, could work as a postprocessing effect", "id": "f11564:c8:m0"} {"signature": "def __init__(self, path, clr1, clr2, type=\"\", dx=, dy=, spread=, angle=, alpha=):", "body": "raise NotImplementedError(\"\")self.path = pathself.path.inheritFromContext()self.path.fillcolor = colorlist(clr1, clr2).averageself.path.fillcolor.alpha *= alphaself.clr1 = clr1self.clr2 = clr2self.type = typeself.dx = dxself.dy = dyself.spread = spreadself.angle = angle_ctx.canvas.append(self)if _shadow:self._shadow_alpha = _shadow.clr.alphaelse:self._shadow_alpha = ", "docstring": "Fills a path with a smooth gradient between two colors.\n\nCreates a Core Image gradient and clips it to the given path.\nThe type can be radial or linear.\nThe spread is the distance between the two colors (0.0-1.0 or absolute).\nThe angle is useful for linear gradients, setting it to 90 degrees\ncreates a horizontal instead of a vertical gradient.\n\nThe gradient is in RGB color.\nIf shadows are being used, it is rendered in a clipping area\nwith a background that is the average of the two gradient colors\n(we need a fill to render a shadow).\nYou can tweak this background's opacity with the alpha parameter.", "id": "f11564:c9:m0"} {"signature": "def parse(svg, cached=False, _copy=True):", "body": "if not cached:dom = parser.parseString(svg)paths = parse_node(dom, [])else:id = _cache.id(svg)if not _cache.has_key(id):dom = parser.parseString(svg)_cache.save(id, parse_node(dom, []))paths = _cache.load(id, _copy)return paths", "docstring": "Returns cached copies unless otherwise specified.", "id": "f11566:m0"} {"signature": "def get_attribute(element, attribute, default=):", "body": "a = element.getAttribute(attribute)if a == \"\": return defaultreturn a", "docstring": "Returns XML element's attribute, or default if none.", "id": "f11566:m2"} {"signature": "def parse_node(node, paths=[], ignore=[\"\"]):", "body": "if node.nodeType == node.ELEMENT_NODE and node.tagName in ignore: return []if node.hasChildNodes():for child in node.childNodes:paths = parse_node(child, paths)if node.nodeType == node.ELEMENT_NODE:if node.tagName == \"\":paths.append(parse_line(node))elif node.tagName == \"\":paths.append(parse_rect(node))elif node.tagName == \"\":paths.append(parse_circle(node))elif node.tagName == \"\":paths.append(parse_oval(node))elif node.tagName == \"\":paths.append(parse_polygon(node))elif node.tagName == \"\":paths.append(parse_polygon(node))elif node.tagName == \"\":paths.append(parse_path(node))if node.tagName in (\"\", \"\", \"\", \"\", \"\", \"\", \"\"):paths[-] = parse_transform(node, paths[-])paths[-] = add_color_info(node, paths[-])return paths", "docstring": "Recurse the node tree and find drawable tags.\n\n Recures all the children in the node.\n If a child is something we can draw,\n a line, rect, oval or path,\n parse it to a PathElement drawable with drawpath()", "id": "f11566:m3"} {"signature": "def parse_transform(e, path):", "body": "t = get_attribute(e, \"\", default=\"\")for mode in (\"\", \"\"):if t.startswith(mode):v = t.replace(mode, \"\").lstrip(\"\").rstrip(\"\")v = v.replace(\"\", \"\").replace(\"\", \"\")v = [float(x) for x in v.split(\"\")]from nodebox.graphics import Transformt = Transform() if mode == \"\":t._set_matrix(v)elif mode == \"\":t.translate(*v)path = t.transformBezierPath(path)breake = e.parentNodeif e and e.tagName == \"\":path = parse_transform(e, path)return path", "docstring": "Transform the path according to a defined matrix.\n\n Attempts to extract a transform=\"matrix()|translate()\" attribute.\n Transforms the path accordingly.", "id": "f11566:m10"} {"signature": "def add_color_info(e, path):", "body": "_ctx.colormode(RGB, )def _color(hex, alpha=):if hex == \"\": return Nonen = int(hex[:],)r = (n>>)&g = (n>>)&b = n&return _ctx.color(r/, g/, b/, alpha)path.fill = (,,,)path.stroke = (,,,)path.strokewidth = alpha = get_attribute(e, \"\", default=\"\")if alpha == \"\":alpha = else:alpha = float(alpha)try: path.fill = _color(get_attribute(e, \"\", default=\"\"), alpha)except: passtry: path.stroke = _color(get_attribute(e, \"\", default=\"\"), alpha)except: passtry: path.strokewidth = float(get_attribute(e, \"\", default=\"\"))except: passstyle = get_attribute(e, \"\", default=\"\").split(\"\")for s in style:try:if s.startswith(\"\"):path.fill = _color(s.replace(\"\", \"\"))elif s.startswith(\"\"):path.stroke = _color(s.replace(\"\", \"\"))elif s.startswith(\"\"):path.strokewidth = float(s.replace(\"\", \"\"))except:pass path.closed = Falseif path[].x == path[len(path)-].x andpath[].y == path[len(path)-].y: path.closed = Truefor i in range(,-):if path[i].cmd == MOVETO:path.closed = Falsereturn path", "docstring": "Expand the path with color information.\n\n Attempts to extract fill and stroke colors\n from the element and adds it to path attributes.", "id": "f11566:m11"} {"signature": "def bezier_arc(x1, y1, x2, y2, start_angle=, extent=):", "body": "x1,y1, x2,y2 = min(x1,x2), max(y1,y2), max(x1,x2), min(y1,y2)if abs(extent) <= :frag_angle = float(extent)nfrag = else:nfrag = int(ceil(abs(extent)/))if nfrag == :warnings.warn('' % extent)return []frag_angle = float(extent) / nfragx_cen = (x1+x2)/y_cen = (y1+y2)/rx = (x2-x1)/ry = (y2-y1)/half_angle = radians(frag_angle) / kappa = abs( / * ( - cos(half_angle)) / sin(half_angle))if frag_angle < :sign = -else:sign = point_list = []for i in range(nfrag):theta0 = radians(start_angle + i*frag_angle)theta1 = radians(start_angle + (i+)*frag_angle)c0 = cos(theta0)c1 = cos(theta1)s0 = sin(theta0)s1 = sin(theta1)if frag_angle > :signed_kappa = -kappaelse:signed_kappa = kappapoint_list.append((x_cen + rx * c0,y_cen - ry * s0,x_cen + rx * (c0 + signed_kappa * s0),y_cen - ry * (s0 - signed_kappa * c0),x_cen + rx * (c1 - signed_kappa * s1),y_cen - ry * (s1 + signed_kappa * c1),x_cen + rx * c1,y_cen - ry * s1))return point_list", "docstring": "Compute a cubic Bezier approximation of an elliptical arc.\n\n (x1, y1) and (x2, y2) are the corners of the enclosing rectangle.\n The coordinate system has coordinates that increase to the right and down.\n Angles, measured in degress, start with 0 to the right (the positive X axis) \n and increase counter-clockwise.\n The arc extends from start_angle to start_angle+extent.\n I.e. start_angle=0 and extent=180 yields an openside-down semi-circle.\n\n The resulting coordinates are of the form (x1,y1, x2,y2, x3,y3, x4,y4)\n such that the curve goes from (x1, y1) to (x4, y4) with (x2, y2) and\n (x3, y3) as their respective Bezier control points.", "id": "f11567:m0"} {"signature": "def angle(x1, y1, x2, y2):", "body": "sign = usign = (x1*y2 - y1*x2)if usign < :sign = -num = x1*x2 + y1*y2den = hypot(x1,y1) * hypot(x2,y2)ratio = min(max(num/den, -), )return sign * degrees(acos(ratio))", "docstring": "The angle in degrees between two vectors.", "id": "f11567:m1"} {"signature": "def transform_from_local(xp, yp, cphi, sphi, mx, my):", "body": "x = xp * cphi - yp * sphi + mxy = xp * sphi + yp * cphi + myreturn (x,y)", "docstring": "Transform from the local frame to absolute space.", "id": "f11567:m2"} {"signature": "def elliptical_arc_to(x1, y1, rx, ry, phi, large_arc_flag, sweep_flag, x2, y2):", "body": "rx = abs(rx)ry = abs(ry)phi = phi % if x1==x2 and y1==y2:return []if rx == or ry == :return [(x2,y2)]rphi = radians(phi)cphi = cos(rphi)sphi = sin(rphi)dx = *(x1 - x2)dy = *(y1 - y2)x1p = cphi * dx + sphi * dyy1p = -sphi * dx + cphi * dylam = (x1p/rx)** + (y1p/ry)**if lam > :scale = sqrt(lam)rx *= scalery *= scalenum = max((rx*ry)** - (rx*y1p)** - (ry*x1p)**, )den = ((rx*y1p)** + (ry*x1p)**)a = sqrt(num / den)cxp = a * rx*y1p/rycyp = -a * ry*x1p/rxif large_arc_flag == sweep_flag:cxp = -cxpcyp = -cypmx = *(x1+x2)my = *(y1+y2)dx = (x1p-cxp) / rxdy = (y1p-cyp) / rydx2 = (-x1p-cxp) / rxdy2 = (-y1p-cyp) / rytheta1 = angle(,,dx,dy)dtheta = angle(dx,dy,dx2,dy2)if not sweep_flag and dtheta > :dtheta -= elif sweep_flag and dtheta < :dtheta += p = []control_points = bezier_arc(cxp-rx,cyp-ry,cxp+rx,cyp+ry, theta1, dtheta)for x1p,y1p, x2p,y2p, x3p,y3p, x4p,y4p in control_points:p.append((transform_from_local(x2p,y2p,cphi,sphi,mx,my) +transform_from_local(x3p,y3p,cphi,sphi,mx,my) +transform_from_local(x4p,y4p,cphi,sphi,mx,my)))return p", "docstring": "An elliptical arc approximated with Bezier curves or a line segment.\nAlgorithm taken from the SVG 1.1 Implementation Notes:\nhttp://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes", "id": "f11567:m3"} {"signature": "def cohesion(self, d=):", "body": "vx = vy = vz = for b in self.boids:if b != self:vx, vy, vz = vx+b.x, vy+b.y, vz+b.zn = len(self.boids)-vx, vy, vz = vx/n, vy/n, vz/nreturn (vx-self.x)/d, (vy-self.y)/d, (vz-self.z)/d", "docstring": "Boids move towards the flock's centre of mass.\n\n The centre of mass is the average position of all boids,\n not including itself (the \"perceived centre\").", "id": "f11568:c0:m2"} {"signature": "def separation(self, r=):", "body": "vx = vy = vz = for b in self.boids:if b != self:if abs(self.x-b.x) < r: vx += (self.x-b.x)if abs(self.y-b.y) < r: vy += (self.y-b.y)if abs(self.z-b.z) < r: vz += (self.z-b.z)return vx, vy, vz", "docstring": "Boids keep a small distance from other boids.\n\n Ensures that boids don't collide into each other,\n in a smoothly accelerated motion.", "id": "f11568:c0:m3"} {"signature": "def alignment(self, d=):", "body": "vx = vy = vz = for b in self.boids:if b != self:vx, vy, vz = vx+b.vx, vy+b.vy, vz+b.vzn = len(self.boids)-vx, vy, vz = vx/n, vy/n, vz/nreturn (vx-self.vx)/d, (vy-self.vy)/d, (vz-self.vz)/d", "docstring": "Boids match velocity with other boids.", "id": "f11568:c0:m4"} {"signature": "def limit(self, max=):", "body": "if abs(self.vx) > max: self.vx = self.vx/abs(self.vx)*maxif abs(self.vy) > max: self.vy = self.vy/abs(self.vy)*maxif abs(self.vz) > max: self.vz = self.vz/abs(self.vz)*max", "docstring": "The speed limit for a boid.\n\n Boids can momentarily go very fast,\n something that is impossible for real animals.", "id": "f11568:c0:m5"} {"signature": "def _angle(self):", "body": "from math import atan, pi, degreesa = degrees(atan(self.vy/self.vx)) + if self.vx < : a += return a", "docstring": "Returns the angle towards which the boid is steering.", "id": "f11568:c0:m6"} {"signature": "def goal(self, x, y, z, d=):", "body": "return (x-self.x)/d, (y-self.y)/d, (z-self.z)/d", "docstring": "Tendency towards a particular place.", "id": "f11568:c0:m7"} {"signature": "def constrain(self):", "body": "dx = self.w * dy = self.h * for b in self:if b.x < self.x-dx: b.vx += _ctx.random(dx)if b.y < self.y-dy: b.vy += _ctx.random(dy)if b.x > self.x+self.w+dx: b.vx -= _ctx.random(dx)if b.y > self.y+self.h+dy: b.vy -= _ctx.random(dy)if b.z < : b.vz += if b.z > : b.vz -= if b.y > self._perch_y and _ctx.random() < self._perch:b.y = self._perch_yb.vy = -abs(b.vy) * b.is_perching = Truetry:b._perch_t = self._perch_t()except:b._perch_t = self._perch_t", "docstring": "Cages the flock inside the x, y, w, h area.\n\n The actual cage is a bit larger,\n so boids don't seem to bounce of invisible walls\n (they are rather \"encouraged\" to stay in the area).\n\n If a boid touches the ground level,\n it may decide to perch there for a while.", "id": "f11568:c1:m8"} {"signature": "def update(self, shuffled=True, cohesion=, separation=, alignment=, goal=,limit=):", "body": "from random import shuffleif shuffled: shuffle(self)m1 = m2 = m3 = m4 = if not self.scattered and _ctx.random() < self._scatter:self.scattered = Trueif self.scattered:m1 = -m1m3 *= self._scatter_i += if self._scatter_i >= self._scatter_t:self.scattered = Falseself._scatter_i = if not self.has_goal:m4 = if self.flee:m4 = -m4for b in self:if b.is_perching:if b._perch_t > :b._perch_t -= continueelse:b.is_perching = Falsevx1, vy1, vz1 = b.cohesion(cohesion)vx2, vy2, vz2 = b.separation(separation)vx3, vy3, vz3 = b.alignment(alignment)vx4, vy4, vz4 = b.goal(self._gx, self._gy, self._gz, goal)b.vx += m1*vx1 + m2*vx2 + m3*vx3 + m4*vx4b.vy += m1*vy1 + m2*vy2 + m3*vy3 + m4*vy4b.vz += m1*vz1 + m2*vz2 + m3*vz3 + m4*vz4b.limit(limit)b.x += b.vxb.y += b.vyb.z += b.vzself.constrain()", "docstring": "Calculates the next motion frame for the flock.", "id": "f11568:c1:m9"} {"signature": "def _title(self):", "body": "return self.find(\"\").string", "docstring": "Returns the page title.", "id": "f11569:c2:m2"} {"signature": "def _description(self):", "body": "meta = self.find(\"\", {\"\":\"\"})if isinstance(meta, dict) andmeta.has_key(\"\"):return meta[\"\"]else:return u\"\"", "docstring": "Returns the meta description in the page.", "id": "f11569:c2:m3"} {"signature": "def _keywords(self):", "body": "meta = self.find(\"\", {\"\":\"\"})if isinstance(meta, dict) andmeta.has_key(\"\"):keywords = [k.strip() for k in meta[\"\"].split(\"\")]else:keywords = []return keywords", "docstring": "Returns the meta keywords in the page.", "id": "f11569:c2:m4"} {"signature": "def links(self, external=True):", "body": "domain = URLParser(self.url).domainlinks = []for a in self(\"\"):for attribute, value in a.attrs:if attribute == \"\":if not externalor (value.startswith(\"\") and value.find(\"\"+domain) < ):links.append(value)return links", "docstring": "Retrieves links in the page.\n\n Returns a list of URL's.\n By default, only external URL's are returned.\n External URL's starts with http:// and point to another\n domain than the domain the page is on.", "id": "f11569:c2:m5"} {"signature": "def __init__(self, name, type=\"\"):", "body": "self.path = os.path.join(CACHE_PATH, name)self.type = typeif not os.path.exists(self.path):os.makedirs(self.path)", "docstring": "The cache can be used to store data downloads.\n\n All of the data is stored in subfolders of the CACHE_PATH.\n Each filename is hashed to a unique md5 string.", "id": "f11570:c0:m0"} {"signature": "def hash(self, id):", "body": "h = md5(id).hexdigest()return os.path.join(self.path, h+self.type)", "docstring": "Creates a unique filename in the cache for the id.", "id": "f11570:c0:m1"} {"signature": "def age(self, id):", "body": "path = self.hash(id)if os.path.exists(path):modified = datetime.datetime.fromtimestamp(os.stat(path)[])age = datetime.datetime.today() - modifiedreturn age.dayselse:return ", "docstring": "Returns the age of the cache entry, in days.", "id": "f11570:c0:m5"} {"signature": "def format_data(s):", "body": "return s.encode(\"\")", "docstring": "Gogole library returns Unicode strings.", "id": "f11571:m2"} {"signature": "def search(q, start=, wait=, asynchronous=False, cached=False):", "body": "service = GOOGLE_SEARCHreturn GoogleSearch(q, start, service, \"\", wait, asynchronous, cached)", "docstring": "Returns a Google web query formatted as a GoogleSearch list object.", "id": "f11571:m4"} {"signature": "def search_images(q, start=, size=\"\", wait=, asynchronous=False, cached=False):", "body": "service = GOOGLE_IMAGESreturn GoogleSearch(q, start, service, size, wait, asynchronous, cached)", "docstring": "Returns a Google images query formatted as a GoogleSearch list object.", "id": "f11571:m5"} {"signature": "def search_news(q, start=, wait=, asynchronous=False, cached=False):", "body": "service = GOOGLE_NEWSreturn GoogleSearch(q, start, service, \"\", wait, asynchronous, cached)", "docstring": "Returns a Google news query formatted as a GoogleSearch list object.", "id": "f11571:m6"} {"signature": "def search_blogs(q, start=, wait=, asynchronous=False, cached=False):", "body": "service = GOOGLE_BLOGSreturn GoogleSearch(q, start, service, \"\", wait, asynchronous, cached)", "docstring": "Returns a Google blogs query formatted as a GoogleSearch list object.", "id": "f11571:m7"} {"signature": "def sort(words, context=\"\", strict=True, relative=True, service=GOOGLE_SEARCH,wait=, asynchronous=False, cached=False):", "body": "results = []for word in words:q = word + \"\" + contextq.strip()if strict: q = \"\"+q+\"\"r = GoogleSearch(q, , service, \"\", wait, asynchronous, cached)results.append(r)results.sort(GoogleResults.__cmp__)results.reverse()if relative and len(results) > :sum = for r in results: sum += r.totalfor r in results: r.total /= float(sum)results = [(r.query, r.total) for r in results]return results", "docstring": "Performs a Google sort on the given list.\n\n Sorts the items in the list according to \n the result count Google yields on an item.\n\n Setting a context sorts the items according\n to their relation to this context;\n for example sorting [red, green, blue] by \"love\"\n yields red as the highest results,\n likely because red is the color commonly associated with love.", "id": "f11571:m8"} {"signature": "def _parse(self, str):", "body": "str = replace_entities(str)str = strip_tags(str)str = collapse_spaces(str)return str", "docstring": "Parses the text data from an XML element defined by tag.", "id": "f11571:c2:m1"} {"signature": "def __cmp__(self, other):", "body": "if self.total > other.total:return elif self.total < other.total: return -else:return ", "docstring": "Compares with another GoogleSearch based on the number of results.", "id": "f11571:c2:m2"} {"signature": "def __init__(self, q, start=, service=GOOGLE_SEARCH, size=\"\",wait=, asynchronous=False, cached=True):", "body": "self.query = qself.service = serviceif cached:cache = \"\"else:cache = Noneurl = \"\"url = \"\"if service == GOOGLE_SEARCH : url += \"\"if service == GOOGLE_IMAGES : url += \"\"if service == GOOGLE_NEWS : url += \"\"if service == GOOGLE_BLOGS : url += \"\"arg = urllib.urlencode(((\"\", ),(\"\", q),(\"\", start),(\"\", \"\"),(\"\", GOOGLE_ID),(\"\", disambiguate_size(size))))url += argURLAccumulator.__init__(self, url, wait, asynchronous, cache, \"\")", "docstring": "Searches Google for the given query.\n\n By default, return cached results whenever possible.\n Otherwise, go online and update the local cache.\n The number of results is limited to 8 and starts at the given index.\n You can only return up to 32 results.\n\n The returned results depend on the service used: \n web pages, images, news or blogs.", "id": "f11571:c3:m0"} {"signature": "def format_data(s):", "body": "return s.encode(\"\")", "docstring": "Yahoo library returns Unicode strings.", "id": "f11572:m2"} {"signature": "def search(q, start=, count=, context=None, wait=, asynchronous=False, cached=False):", "body": "service = YAHOO_SEARCHreturn YahooSearch(q, start, count, service, context, wait, asynchronous, cached)", "docstring": "Returns a Yahoo web query formatted as a YahooSearch list object.", "id": "f11572:m3"} {"signature": "def search_images(q, start=, count=, wait=, asynchronous=False, cached=False):", "body": "service = YAHOO_IMAGESreturn YahooSearch(q, start, count, service, None, wait, asynchronous, cached)", "docstring": "Returns a Yahoo images query formatted as a YahooSearch list object.", "id": "f11572:m4"} {"signature": "def search_news(q, start=, count=, wait=, asynchronous=False, cached=False):", "body": "service = YAHOO_NEWSreturn YahooSearch(q, start, count, service, None, wait, asynchronous, cached)", "docstring": "Returns a Yahoo news query formatted as a YahooSearch list object.", "id": "f11572:m5"} {"signature": "def suggest_spelling(q, wait=, asynchronous=False, cached=False):", "body": "return YahooSpelling(q, wait, asynchronous, cached)", "docstring": "Returns list of suggested spelling corrections for the given query.", "id": "f11572:m6"} {"signature": "def sort(words, context=\"\", strict=True, relative=True, service=YAHOO_SEARCH,wait=, asynchronous=False, cached=False):", "body": "results = []for word in words:q = word + \"\" + contextq.strip()if strict: q = \"\"+q+\"\"r = YahooSearch(q, , , service, context, wait, asynchronous, cached)results.append(r)results.sort(YahooResults.__cmp__)results.reverse()if relative and len(results) > :sum = for r in results: sum += r.totalfor r in results: r.total /= float(sum)results = [(r.query, r.total) for r in results]return results", "docstring": "Performs a Yahoo sort on the given list.\n\n Sorts the items in the list according to \n the result count Yahoo yields on an item.\n\n Setting a context sorts the items according\n to their relation to this context;\n for example sorting [red, green, blue] by \"love\"\n yields red as the highest results,\n likely because red is the color commonly associated with love.", "id": "f11572:m7"} {"signature": "def _parse(self, e, tag):", "body": "tags = e.getElementsByTagName(tag)children = tags[].childNodesif len(children) != : return Noneassert children[].nodeType == xml.dom.minidom.Element.TEXT_NODEs = children[].nodeValues = format_data(s)s = replace_entities(s)return s", "docstring": "Parses the text data from an XML element defined by tag.", "id": "f11572:c3:m1"} {"signature": "def __cmp__(self, other):", "body": "if self.total > other.total:return elif self.total < other.total: return -else:return ", "docstring": "Compares with another YahooSearch based on the number of results.", "id": "f11572:c3:m2"} {"signature": "def __init__(self, q, start=, count=, service=YAHOO_SEARCH, context=None, wait=, asynchronous=False, cached=True):", "body": "self.query = qself.service = serviceif cached:cache = \"\"else:cache = Noneurl = \"\"if service == YAHOO_SEARCH and context == None : url += \"\"if service == YAHOO_SEARCH and context != None : url += \"\"if service == YAHOO_IMAGES : url += \"\"if service == YAHOO_NEWS : url += \"\"if service == YAHOO_SPELLING : url += \"\"arg = urllib.urlencode(((\"\", YAHOO_ID), (\"\", q),(\"\", start),(\"\", count),(\"\", unicode(context))))url += argURLAccumulator.__init__(self, url, wait, asynchronous, cache, \"\")", "docstring": "Searches Yahoo for the given query.\n\n By default, return cached results whenever possible.\n Otherwise, go online and update the local cache.\n The number of results is limited to count and starts at the given index.\n\n The returned results depend on the service used: \n web pages, images, news, spelling suggestion or contextual links.", "id": "f11572:c4:m0"} {"signature": "def download(self, size=SIZE_LARGE, thumbnail=False, wait=, asynchronous=False):", "body": "if thumbnail == True: size = SIZE_THUMBNAIL self._size = disambiguate_size(size)if self._size == SIZE_THUMBNAIL:url = self.url.replace(\"\", \"\")else:url = self.urlcache = \"\"extension = os.path.splitext(url)[]URLAccumulator.__init__(self, url, wait, asynchronous, cache, extension, )if not asynchronous:return self.path", "docstring": "Downloads this image to cache.\n\n Calling the download() method instantiates an asynchronous URLAccumulator.\n Once it is done downloading, this image will have its path property\n set to an image file in the cache.", "id": "f11573:c0:m2"} {"signature": "def encode_basestring(s):", "body": "def replace(match):return ESCAPE_DCT[match.group()]return '' + ESCAPE.sub(replace, s) + ''", "docstring": "Return a JSON representation of a Python string", "id": "f11589:m1"} {"signature": "def __init__(self, skipkeys=False, ensure_ascii=True,check_circular=True, allow_nan=True, sort_keys=False,indent=None, separators=None, encoding='', default=None):", "body": "self.skipkeys = skipkeysself.ensure_ascii = ensure_asciiself.check_circular = check_circularself.allow_nan = allow_nanself.sort_keys = sort_keysself.indent = indentself.current_indent_level = if separators is not None:self.item_separator, self.key_separator = separatorsif default is not None:self.default = defaultself.encoding = encoding", "docstring": "Constructor for JSONEncoder, with sensible defaults.\n\nIf skipkeys is False, then it is a TypeError to attempt\nencoding of keys that are not str, int, long, float or None. If\nskipkeys is True, such items are simply skipped.\n\nIf ensure_ascii is True, the output is guaranteed to be str\nobjects with all incoming unicode characters escaped. If\nensure_ascii is false, the output will be unicode object.\n\nIf check_circular is True, then lists, dicts, and custom encoded\nobjects will be checked for circular references during encoding to\nprevent an infinite recursion (which would cause an OverflowError).\nOtherwise, no such check takes place.\n\nIf allow_nan is True, then NaN, Infinity, and -Infinity will be\nencoded as such. This behavior is not JSON specification compliant,\nbut is consistent with most JavaScript based encoders and decoders.\nOtherwise, it will be a ValueError to encode such floats.\n\nIf sort_keys is True, then the output of dictionaries will be\nsorted by key; this is useful for regression tests to ensure\nthat JSON serializations can be compared on a day-to-day basis.\n\nIf indent is a non-negative integer, then JSON array\nelements and object members will be pretty-printed with that\nindent level. An indent level of 0 will only insert newlines.\nNone is the most compact representation.\n\nIf specified, separators should be a (item_separator, key_separator)\ntuple. The default is (', ', ': '). To get the most compact JSON\nrepresentation you should specify (',', ':') to eliminate whitespace.\n\nIf specified, default is a function that gets called for objects\nthat can't otherwise be serialized. It should return a JSON encodable\nversion of the object or raise a ``TypeError``.\n\nIf encoding is not None, then all input strings will be\ntransformed into unicode using that encoding prior to JSON-encoding.\nThe default is UTF-8.", "id": "f11589:c0:m0"} {"signature": "def default(self, o):", "body": "raise TypeError(\"\" % (o,))", "docstring": "Implement this method in a subclass such that it returns\na serializable object for ``o``, or calls the base implementation\n(to raise a ``TypeError``).\n\nFor example, to support arbitrary iterators, you could\nimplement default like this::\n\n def default(self, o):\n try:\n iterable = iter(o)\n except TypeError:\n pass\n else:\n return list(iterable)\n return JSONEncoder.default(self, o)", "id": "f11589:c0:m6"} {"signature": "def encode(self, o):", "body": "if isinstance(o, basestring):if isinstance(o, str):_encoding = self.encodingif (_encoding is not None and not (_encoding == '')):o = o.decode(_encoding)if self.ensure_ascii:return encode_basestring_ascii(o)else:return encode_basestring(o)chunks = list(self.iterencode(o))return ''.join(chunks)", "docstring": "Return a JSON string representation of a Python data structure.\n\n>>> JSONEncoder().encode({\"foo\": [\"bar\", \"baz\"]})\n'{\"foo\": [\"bar\", \"baz\"]}'", "id": "f11589:c0:m7"} {"signature": "def iterencode(self, o):", "body": "if self.check_circular:markers = {}else:markers = Nonereturn self._iterencode(o, markers)", "docstring": "Encode the given object and yield each string\nrepresentation as available.\n\nFor example::\n\n for chunk in JSONEncoder().iterencode(bigobject):\n mysocket.write(chunk)", "id": "f11589:c0:m8"} {"signature": "def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,allow_nan=True, cls=None, indent=None, separators=None,encoding='', default=None, **kw):", "body": "if (skipkeys is False and ensure_ascii is True andcheck_circular is True and allow_nan is True andcls is None and indent is None and separators is None andencoding == '' and default is None and not kw):iterable = _default_encoder.iterencode(obj)else:if cls is None:cls = JSONEncoderiterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,check_circular=check_circular, allow_nan=allow_nan, indent=indent,separators=separators, encoding=encoding,default=default, **kw).iterencode(obj)for chunk in iterable:fp.write(chunk)", "docstring": "Serialize ``obj`` as a JSON formatted stream to ``fp`` (a\n``.write()``-supporting file-like object).\n\nIf ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types\n(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) \nwill be skipped instead of raising a ``TypeError``.\n\nIf ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``\nmay be ``unicode`` instances, subject to normal Python ``str`` to\n``unicode`` coercion rules. Unless ``fp.write()`` explicitly\nunderstands ``unicode`` (as in ``codecs.getwriter()``) this is likely\nto cause an error.\n\nIf ``check_circular`` is ``False``, then the circular reference check\nfor container types will be skipped and a circular reference will\nresult in an ``OverflowError`` (or worse).\n\nIf ``allow_nan`` is ``False``, then it will be a ``ValueError`` to\nserialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)\nin strict compliance of the JSON specification, instead of using the\nJavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).\n\nIf ``indent`` is a non-negative integer, then JSON array elements and object\nmembers will be pretty-printed with that indent level. An indent level\nof 0 will only insert newlines. ``None`` is the most compact representation.\n\nIf ``separators`` is an ``(item_separator, dict_separator)`` tuple\nthen it will be used instead of the default ``(', ', ': ')`` separators.\n``(',', ':')`` is the most compact JSON representation.\n\n``encoding`` is the character encoding for str instances, default is UTF-8.\n\n``default(obj)`` is a function that should return a serializable version\nof obj or raise TypeError. The default simply raises TypeError.\n\nTo use a custom ``JSONEncoder`` subclass (e.g. one that overrides the\n``.default()`` method to serialize additional types), specify it with\nthe ``cls`` kwarg.", "id": "f11590:m0"} {"signature": "def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,allow_nan=True, cls=None, indent=None, separators=None,encoding='', default=None, **kw):", "body": "if (skipkeys is False and ensure_ascii is True andcheck_circular is True and allow_nan is True andcls is None and indent is None and separators is None andencoding == '' and default is None and not kw):return _default_encoder.encode(obj)if cls is None:cls = JSONEncoderreturn cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,check_circular=check_circular, allow_nan=allow_nan, indent=indent,separators=separators, encoding=encoding, default=default,**kw).encode(obj)", "docstring": "Serialize ``obj`` to a JSON formatted ``str``.\n\nIf ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types\n(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) \nwill be skipped instead of raising a ``TypeError``.\n\nIf ``ensure_ascii`` is ``False``, then the return value will be a\n``unicode`` instance subject to normal Python ``str`` to ``unicode``\ncoercion rules instead of being escaped to an ASCII ``str``.\n\nIf ``check_circular`` is ``False``, then the circular reference check\nfor container types will be skipped and a circular reference will\nresult in an ``OverflowError`` (or worse).\n\nIf ``allow_nan`` is ``False``, then it will be a ``ValueError`` to\nserialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in\nstrict compliance of the JSON specification, instead of using the\nJavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).\n\nIf ``indent`` is a non-negative integer, then JSON array elements and\nobject members will be pretty-printed with that indent level. An indent\nlevel of 0 will only insert newlines. ``None`` is the most compact\nrepresentation.\n\nIf ``separators`` is an ``(item_separator, dict_separator)`` tuple\nthen it will be used instead of the default ``(', ', ': ')`` separators.\n``(',', ':')`` is the most compact JSON representation.\n\n``encoding`` is the character encoding for str instances, default is UTF-8.\n\n``default(obj)`` is a function that should return a serializable version\nof obj or raise TypeError. The default simply raises TypeError.\n\nTo use a custom ``JSONEncoder`` subclass (e.g. one that overrides the\n``.default()`` method to serialize additional types), specify it with\nthe ``cls`` kwarg.", "id": "f11590:m1"} {"signature": "def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,parse_int=None, parse_constant=None, **kw):", "body": "return loads(fp.read(),encoding=encoding, cls=cls, object_hook=object_hook,parse_float=parse_float, parse_int=parse_int,parse_constant=parse_constant, **kw)", "docstring": "Deserialize ``fp`` (a ``.read()``-supporting file-like object containing\na JSON document) to a Python object.\n\nIf the contents of ``fp`` is encoded with an ASCII based encoding other\nthan utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must\nbe specified. Encodings that are not ASCII based (such as UCS-2) are\nnot allowed, and should be wrapped with\n``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``\nobject and passed to ``loads()``\n\n``object_hook`` is an optional function that will be called with the\nresult of any object literal decode (a ``dict``). The return value of\n``object_hook`` will be used instead of the ``dict``. This feature\ncan be used to implement custom decoders (e.g. JSON-RPC class hinting).\n\nTo use a custom ``JSONDecoder`` subclass, specify it with the ``cls``\nkwarg.", "id": "f11590:m2"} {"signature": "def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,parse_int=None, parse_constant=None, **kw):", "body": "if (cls is None and encoding is None and object_hook is None andparse_int is None and parse_float is None andparse_constant is None and not kw):return _default_decoder.decode(s)if cls is None:cls = JSONDecoderif object_hook is not None:kw[''] = object_hookif parse_float is not None:kw[''] = parse_floatif parse_int is not None:kw[''] = parse_intif parse_constant is not None:kw[''] = parse_constantreturn cls(encoding=encoding, **kw).decode(s)", "docstring": "Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON\ndocument) to a Python object.\n\nIf ``s`` is a ``str`` instance and is encoded with an ASCII based encoding\nother than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name\nmust be specified. Encodings that are not ASCII based (such as UCS-2)\nare not allowed and should be decoded to ``unicode`` first.\n\n``object_hook`` is an optional function that will be called with the\nresult of any object literal decode (a ``dict``). The return value of\n``object_hook`` will be used instead of the ``dict``. This feature\ncan be used to implement custom decoders (e.g. JSON-RPC class hinting).\n\n``parse_float``, if specified, will be called with the string\nof every JSON float to be decoded. By default this is equivalent to\nfloat(num_str). This can be used to use another datatype or parser\nfor JSON floats (e.g. decimal.Decimal).\n\n``parse_int``, if specified, will be called with the string\nof every JSON int to be decoded. By default this is equivalent to\nint(num_str). This can be used to use another datatype or parser\nfor JSON integers (e.g. float).\n\n``parse_constant``, if specified, will be called with one of the\nfollowing strings: -Infinity, Infinity, NaN, null, true, false.\nThis can be used to raise an exception if invalid JSON numbers\nare encountered.\n\nTo use a custom ``JSONDecoder`` subclass, specify it with the ``cls``\nkwarg.", "id": "f11590:m3"} {"signature": "def decode(s):", "body": "import warningswarnings.warn(\"\",DeprecationWarning)return loads(s)", "docstring": "demjson, python-cjson API compatibility hook. Use loads(s) instead.", "id": "f11590:m4"} {"signature": "def encode(obj):", "body": "import warningswarnings.warn(\"\",DeprecationWarning)return dumps(obj)", "docstring": "demjson, python-cjson compatibility hook. Use dumps(s) instead.", "id": "f11590:m5"} {"signature": "def read(s):", "body": "import warningswarnings.warn(\"\",DeprecationWarning)return loads(s)", "docstring": "jsonlib, JsonUtils, python-json, json-py API compatibility hook.\nUse loads(s) instead.", "id": "f11590:m6"} {"signature": "def write(obj):", "body": "import warningswarnings.warn(\"\",DeprecationWarning)return dumps(obj)", "docstring": "jsonlib, JsonUtils, python-json, json-py API compatibility hook.\nUse dumps(s) instead.", "id": "f11590:m7"} {"signature": "def __init__(self, encoding=None, object_hook=None, parse_float=None,parse_int=None, parse_constant=None, strict=True):", "body": "self.encoding = encodingself.object_hook = object_hookself.parse_float = parse_floatself.parse_int = parse_intself.parse_constant = parse_constantself.strict = strict", "docstring": "``encoding`` determines the encoding used to interpret any ``str``\nobjects decoded by this instance (utf-8 by default). It has no\neffect when decoding ``unicode`` objects.\n\nNote that currently only encodings that are a superset of ASCII work,\nstrings of other encodings should be passed in as ``unicode``.\n\n``object_hook``, if specified, will be called with the result\nof every JSON object decoded and its return value will be used in\nplace of the given ``dict``. This can be used to provide custom\ndeserializations (e.g. to support JSON-RPC class hinting).\n\n``parse_float``, if specified, will be called with the string\nof every JSON float to be decoded. By default this is equivalent to\nfloat(num_str). This can be used to use another datatype or parser\nfor JSON floats (e.g. decimal.Decimal).\n\n``parse_int``, if specified, will be called with the string\nof every JSON int to be decoded. By default this is equivalent to\nint(num_str). This can be used to use another datatype or parser\nfor JSON integers (e.g. float).\n\n``parse_constant``, if specified, will be called with one of the\nfollowing strings: -Infinity, Infinity, NaN, null, true, false.\nThis can be used to raise an exception if invalid JSON numbers\nare encountered.", "id": "f11591:c0:m0"} {"signature": "def decode(self, s, _w=WHITESPACE.match):", "body": "obj, end = self.raw_decode(s, idx=_w(s, ).end())end = _w(s, end).end()if end != len(s):raise ValueError(errmsg(\"\", s, end, len(s)))return obj", "docstring": "Return the Python representation of ``s`` (a ``str`` or ``unicode``\ninstance containing a JSON document)", "id": "f11591:c0:m1"} {"signature": "def raw_decode(self, s, **kw):", "body": "kw.setdefault('', self)try:obj, end = self._scanner.iterscan(s, **kw).next()except StopIteration:raise ValueError(\"\")return obj, end", "docstring": "Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning\nwith a JSON document) and return a 2-tuple of the Python\nrepresentation and the index in ``s`` where the document ended.\n\nThis can be used to decode a JSON document from a string that may\nhave extraneous data at the end.", "id": "f11591:c0:m2"} {"signature": "def iterscan(self, string, idx=, context=None):", "body": "match = self.scanner.scanner(string, idx).matchactions = self.actionslastend = idxend = len(string)while True:m = match()if m is None:breakmatchbegin, matchend = m.span()if lastend == matchend:breakaction = actions[m.lastindex]if action is not None:rval, next_pos = action(m, context)if next_pos is not None and next_pos != matchend:matchend = next_posmatch = self.scanner.scanner(string, matchend).matchyield rval, matchendlastend = matchend", "docstring": "Yield match, end_idx for each match", "id": "f11592:c0:m1"} {"signature": "def _parse(self):", "body": "p1 = \"\"p2 = \"\"self.links = []for p in (p1,p2):for link in re.findall(p, self.description):self.links.append(link)self.description = re.sub(p, \"\", self.description)self.description = self.description.strip()", "docstring": "Strips links from the definition and gathers them in a links property.", "id": "f11593:c0:m1"} {"signature": "def is_preformatted(str):", "body": "for chunk in str.split(\"\"):if not chunk.startswith(\"\"):return Falsereturn True", "docstring": "Determines if an item in a paragraph is preformatted.\n\n If all of the lines in the markup start with a \" \"\n this indicates preformatted text.\n Preformatted is usually used for programming code.", "id": "f11594:m1"} {"signature": "def is_list(str):", "body": "for chunk in str.split(\"\"):chunk = chunk.replace(\"\", \"\")if not chunk.lstrip().startswith(\"\")and not re.search(r\"\", chunk.lstrip()):return Falsereturn True", "docstring": "Determines if an item in a paragraph is a list.\n\n If all of the lines in the markup start with a \"*\" or \"1.\" \n this indicates a list as parsed by parse_paragraphs().\n It can be drawn with draw_list().", "id": "f11594:m2"} {"signature": "def is_math(str):", "body": "str = str.strip()if str.startswith(\"\") and str.endswith(\"\"):return Trueelse:return False", "docstring": "Determines if an item in a paragraph is a LaTeX math equation.\n\n Math equations are wrapped in tags.\n They can be drawn as an image using draw_math().", "id": "f11594:m3"} {"signature": "def draw_math(str, x, y, alpha=):", "body": "try: from web import _ctxexcept: passstr = re.sub(\"\", \"\", str.strip())img = mimetex.gif(str)w, h = _ctx.imagesize(img)_ctx.image(img, x, y, alpha=alpha)return w, h", "docstring": "Uses mimetex to generate a GIF-image from the LaTeX equation.", "id": "f11594:m4"} {"signature": "def textwidth(str):", "body": "try: from web import _ctxexcept: passl = _ctx.lineheight()_ctx.lineheight()w = _ctx.textwidth(str)_ctx.lineheight(l)return w", "docstring": "textwidth() reports incorrectly when lineheight() is smaller than 1.0", "id": "f11594:m5"} {"signature": "def draw_list(markup, x, y, w, padding=, callback=None):", "body": "try: from web import _ctxexcept: passi = for chunk in markup.split(\"\"):if callback != None: callback(chunk, i)m = re.search(\"\", chunk.lstrip())if m:indent = re.search(\"\", chunk).start()*padding*bullet = m.group()dx = textwidth(\"\")chunk = chunk.lstrip(m.group()+\"\")if chunk.lstrip().startswith(\"\"):indent = chunk.find(\"\")*padding*bullet = u\"\"dx = textwidth(\"\")chunk = chunk.lstrip(\"\")_ctx.text(bullet, x+indent, y)dx += padding + indent_ctx.text(chunk, x+dx, y, width=w-dx)y += _ctx.textheight(chunk, width=w-dx)y += _ctx.textheight(\"\") * i += ", "docstring": "Draws list markup with indentation in NodeBox.\n\n Draw list markup at x, y coordinates\n using indented bullets or numbers.\n The callback is a command that takes a str and an int.", "id": "f11594:m6"} {"signature": "def draw_table(table, x, y, w, padding=):", "body": "try: from web import _ctxexcept: passf = _ctx.fill()_ctx.stroke(f)h = _ctx.textheight(\"\") + padding*row_y = yif table.title != \"\":_ctx.fill(f)_ctx.rect(x, row_y, w, h)_ctx.fill()_ctx.text(table.title, x+padding, row_y+_ctx.fontsize()+ padding)row_y += hrowspans = [ for i in range()]previous_cell_w = for row in table:cell_x = xcell_w = * wcell_w -= previous_cell_w * len([n for n in rowspans if n > ])cell_w /= len(row)cell_h = for cell in row:this_h = _ctx.textheight(cell, width=cell_w-padding*) + padding*cell_h = max(cell_h, this_h)i = for cell in row:if rowspans[i] > :rowspans[i] -= cell_x += previous_cell_wi += m = re.search(\"\", cell.properties)if m:rowspan = int(m.group())rowspans[i] = rowspanelse:rowspan = _ctx.fill(f)_ctx.text(cell, cell_x+padding, row_y+_ctx.fontsize()+padding, cell_w-padding*)_ctx.line(cell_x, row_y, cell_x+cell_w, row_y)if cell_x > x:_ctx.nofill()_ctx.line(cell_x, row_y, cell_x, row_y+cell_h)cell_x += cell_wi += row_y += cell_hprevious_cell_w = cell_w_ctx.nofill()_ctx.rect(x, y, w, row_y-y)", "docstring": "This is a very poor algorithm to draw Wikipedia tables in NodeBox.", "id": "f11594:m7"} {"signature": "def __init__(self, title, markup, light=False, full_strip=True):", "body": "self.title = titleself.markup = markupself.full_strip = full_stripself.disambiguation = []self.categories = []self.links = []self.paragraphs = []self.images = []self.tables = []self.references = []self.translations = {}self.important = []self.re = {\"\" : r\"\",\"\" : r\"\",\"\" : r\"\",\"\" : re.compile(r\"\", re.I),\"\" : re.compile(\"\", re.DOTALL),\"\" : re.compile(r\"\", re.DOTALL),\"\" : re.compile(r\"\", re.DOTALL),\"\" : re.compile(r\"\", re.DOTALL),\"\" : re.compile(r\"\", re.DOTALL),\"\" : r\"\", \"\" : re.compile(r\"\", re.DOTALL),\"\" : r\"\",\"\" : r\"\",\"\" : re.compile(r\"\", re.DOTALL), }self.ref = \"\"self.parse(light)", "docstring": "Wikipedia page parser.\n\n The expected markup is the stuff in Wikipedia's edit textarea.\n With light=True, it will onlt parse links to other articles (which is faster).\n With full_strip=False, it will preserve some HTML markup (links, bold, italic).", "id": "f11594:c8:m0"} {"signature": "def parse(self, light=False):", "body": "markup = self.markupself.disambiguation = self.parse_disambiguation(markup)self.categories = self.parse_categories(markup)self.links = self.parse_links(markup)if not light:markup = self.convert_pre(markup)markup = self.convert_li(markup)markup = self.convert_table(markup)markup = replace_entities(markup)markup = markup.replace(\"\", \"\")markup = re.sub(\"\", \"\", markup)self.references, markup = self.parse_references(markup)markup = re.sub(\"\", \"\", markup)self.images, markup = self.parse_images(markup)self.images.extend(self.parse_gallery_images(markup))self.paragraphs = self.parse_paragraphs(markup)self.tables = self.parse_tables(markup)self.translations = self.parse_translations(markup)self.important = self.parse_important(markup)", "docstring": "Parses data from Wikipedia page markup.\n\n The markup comes from Wikipedia's edit page.\n We parse it here into objects containing plain text.\n The light version parses only links to other articles, it's faster than a full parse.", "id": "f11594:c8:m4"} {"signature": "def plain(self, markup):", "body": "if self.full_strip:markup = markup.replace(\"\", \"\")markup = markup.replace(\"\", \"\")else:markup = re.sub(\"\", \"\", markup)markup = re.sub(\"\", \"\", markup)markup = re.sub(self.re[\"\"], \"\", markup)markup = re.sub(self.re[\"\"], \"\", markup)markup = markup.replace(\"\", \"\")markup = markup.replace(\"\", \"\")if self.full_strip:markup = re.sub(r\"\", \"\", markup)else:markup = re.sub(r\"\", '', markup)markup = re.sub(r\"\", '', markup) markup = re.sub(self.re[\"\"], \"\", markup)markup = markup.replace(\"\", \"\")markup = markup.replace(\"\", \"\")markup = markup.replace(\"\", \"\")markup = markup.replace(\"\", \"\")markup = re.sub(\"\", \"\", markup)markup = re.sub(\"\", \"\", markup)markup = markup.replace(\"\", \"\")markup = markup.replace(\"\", \"\")markup = markup.replace(\"\", \"\")markup = markup.replace(\"\", \"\")markup = re.sub(self.re[\"\"], \"\", markup) markup = re.sub(\"\", \"\", markup) markup = re.sub(\"\", \"\", markup) markup = re.sub(self.ref+\"\", \"\", markup) markup = re.sub(\"\", \"\", markup) markup = re.sub(self.re[\"\"], \"\", markup) markup = re.sub(\"\", \"\", markup) markup = re.sub(\"\", \"\", markup) markup = re.sub(\"\", \"\", markup) markup = re.sub(re.compile(\"\", re.DOTALL), \"\", markup) markup = markup.replace(\"\", \"\")markup = re.sub(\"\", \"\", markup)markup = markup.split(\"\")for i in range(len(markup)):if not markup[i].startswith(\"\"):markup[i] = re.sub(r\"\", \"\", markup[i])markup = \"\".join(markup)markup = markup.replace(\"\", \"\")if self.full_strip:markup = strip_tags(markup, exclude=[\"\"], linebreaks=True)markup = markup.strip()return markup", "docstring": "Strips Wikipedia markup from given text.\n\n This creates a \"plain\" version of the markup,\n stripping images and references and the like.\n Does some commonsense maintenance as well,\n like collapsing multiple spaces.\n If you specified full_strip=False for WikipediaPage instance,\n some markup is preserved as HTML (links, bold, italic).", "id": "f11594:c8:m5"} {"signature": "def convert_pre(self, markup):", "body": "for m in re.findall(self.re[\"\"], markup):markup = markup.replace(m, m.replace(\"\", \"\"))markup = re.sub(\"\", \"\", markup)markup = re.sub(\"\", \"\", markup)return markup", "docstring": "Substitutes

 to Wikipedia markup by adding a space at the start of a line.", "id": "f11594:c8:m6"}
{"signature": "def convert_li(self, markup):", "body": "for li in re.findall(\"\", markup):markup = re.sub(li, \"\", markup)markup = markup.replace(\"\", \"\")return markup", "docstring": "Subtitutes 
  • content to Wikipedia markup.", "id": "f11594:c8:m7"} {"signature": "def convert_table(self, markup):", "body": "for table in re.findall(self.re[\"\"], markup):wiki = tablewiki = re.sub(r\"\", \"\", wiki)wiki = re.sub(r\"\", \"\", wiki)wiki = re.sub(r\"\", \"\", wiki)wiki = wiki.replace(\"\", \"\")wiki = wiki.replace(\"\", \"\")wiki = wiki.replace(\"\", \"\")markup = markup.replace(table, wiki)return markup", "docstring": "Subtitutes content to Wikipedia markup.", "id": "f11594:c8:m8"} {"signature": "def parse_links(self, markup):", "body": "links = []m = re.findall(self.re[\"\"], markup)for link in m:if link.find(\"\") >= :link = re.sub(\"\", \"\", link)link = link.replace(\"\", \"\")link = link.replace(\"\", \"\") link = link.split(\"\")link[] = link[].split(\"\")page = link[][].strip()if not page in links:links.append(page)links.sort()return links", "docstring": "Returns a list of internal Wikipedia links in the markup.\n\n # A Wikipedia link looks like:\n # [[List of operating systems#Embedded | List of embedded operating systems]]\n # It does not contain a colon, this indicates images, users, languages, etc.\n\n The return value is a list containing the first part of the link,\n without the anchor.", "id": "f11594:c8:m9"} {"signature": "def parse_images(self, markup, treshold=):", "body": "images = []m = re.findall(self.re[\"\"], markup)for p in m:p = self.parse_balanced_image(p)img = p.split(\"\")path = img[].replace(\"\", \"\").strip()description = u\"\"links = {}properties = []if len(img) > :img = \"\".join(img[:])links = self.parse_links(img)properties = self.plain(img).split(\"\")description = u\"\"if len(properties[-]) > treshold:description = properties[-]properties = properties[:-]img = WikipediaImage(path, description, links, properties)images.append(img)markup = markup.replace(p, \"\")return images, markup.strip()", "docstring": "Returns a list of images found in the markup.\n\n An image has a pathname, a description in plain text\n and a list of properties Wikipedia uses to size and place images.\n\n # A Wikipedia image looks like:\n # [[Image:Columbia Supercomputer - NASA Advanced Supercomputing Facility.jpg|right|thumb|\n # The [[NASA]] [[Columbia (supercomputer)|Columbia Supercomputer]].]]\n # Parts are separated by \"|\".\n # The first part is the image file, the last part can be a description.\n # In between are display properties, like \"right\" or \"thumb\".", "id": "f11594:c8:m10"} {"signature": "def parse_balanced_image(self, markup):", "body": "opened = closed = for i in range(len(markup)):if markup[i] == \"\": opened += if markup[i] == \"\": closed += if opened == closed:return markup[:i+]return markup", "docstring": "Corrects Wikipedia image markup.\n\n Images have a description inside their link markup that \n can contain link markup itself, make sure the outer \"[\" and \"]\" brackets \n delimiting the image are balanced correctly (e.g. no [[ ]] ]]).\n\n Called from parse_images().", "id": "f11594:c8:m11"} {"signature": "def parse_gallery_images(self, markup):", "body": "gallery = re.search(self.re[\"\"], markup)if gallery:gallery = gallery.group()gallery = gallery.replace(\"\", \"\")gallery = gallery.replace(\"\", \"\")images, markup = self.parse_images(gallery)return imagesreturn []", "docstring": "Parses images from the section.\n\n Images inside tags do not have outer \"[[\" brackets.\n Add these and then parse again.", "id": "f11594:c8:m12"} {"signature": "def parse_paragraph(self, markup):", "body": "s = self.plain(markup)s = re.sub(re.compile(\"\", re.DOTALL), \"\", s)s = re.sub(\"\", \"\", s)chunks = []ch = \"\"i = for chunk in s.split(\"\"):if chunk.startswith(\"\"):chunk = chunk.lstrip(\"\")if len(chunk.strip()) > :if not chunk.startswith(\"\"):ch += chunk + \"\"if ch.strip() != \"\":if not re.search(\"\", chunk):ch = self.parse_paragraph_list(ch)chunks.append(ch.rstrip())ch = \"\"if ch.strip() != \"\":ch = self.parse_paragraph_list(ch)chunks.append(ch.strip())return chunks", "docstring": "Creates a list from lines of text in a paragraph.\n\n Each line of text is a new item in the list,\n except lists and preformatted chunks (
  • and
    ),\n        these are kept together as a single chunk.\n\n        Lists are formatted using parse_paragraph_list().\n\n        Empty lines are stripped from the output.\n        Indentation (i.e. lines starting with \":\") is ignored.\n\n        Called from parse_paragraphs() method.", "id": "f11594:c8:m13"}
    {"signature": "def parse_paragraph_list(self, markup, indent=\"\"):", "body": "def lastleft(ch, str):n = while n < len(str) and str[n] == ch: n += return n        tally = [ for i in range()]chunks = markup.split(\"\")for i in range(len(chunks)):if chunks[i].startswith(\"\"):j = min(lastleft(\"\", chunks[i]), len(tally)-)chunks[i] = indent*(j-) + str(tally[j])+\"\" + chunks[i][j:]chunks[i] = chunks[i].replace(\"\", \"\")tally[j] += for k in range(j+, len(tally)): tally[k] = if chunks[i].startswith(\"\"):chunks[i] = \"\" + chunks[i][:]if chunks[i].startswith(\"\"):j = lastleft(\"\", chunks[i])  chunks[i] = indent*(j-) + \"\" + chunks[i][j:]chunks[i] = chunks[i].replace(\"\", \"\")return \"\".join(chunks)", "docstring": "Formats bullets and numbering of Wikipedia lists.\n\n        List items are marked by \"*\", \"#\" or \";\" at the start of a line.\n        We treat \";\" the same as \"*\",\n        and replace \"#\" with real numbering (e.g. \"2.\").\n        Sublists (e.g. *** and ###) get indented by tabs.\n\n        Called from parse_paragraphs() method.", "id": "f11594:c8:m14"}
    {"signature": "def parse_paragraph_heading_depth(self, markup):", "body": "return markup.count(\"\")/ - ", "docstring": "Returns the depth of a heading.\n\n        The depth determines parent and child relations,\n        which headings (and hence which paragraphs) are a child to a heading higher up.\n        Returns 0 for 

    =, 1 for

    ==, etc.\n\n Called from parse_paragraphs() method.", "id": "f11594:c8:m15"} {"signature": "def connect_paragraph(self, paragraph, paragraphs):", "body": "if paragraph.depth > :n = range(len(paragraphs))n.reverse()for i in n:if paragraphs[i].depth == paragraph.depth-:paragraph.parent = paragraphs[i]paragraphs[i].children.append(paragraph)breakreturn paragraph", "docstring": "Create parent/child links to other paragraphs.\n\n The paragraphs parameters is a list of all the paragraphs\n parsed up till now.\n\n The parent is the previous paragraph whose depth is less.\n The parent's children include this paragraph.\n\n Called from parse_paragraphs() method.", "id": "f11594:c8:m16"} {"signature": "def parse_paragraph_references(self, markup):", "body": "for chunk in markup.split(\"\"):m = re.search(self.ref+\"\", chunk)if m:chunk = chunk.strip(\"\")chunk = chunk.replace(m.group(), \"\")chunk = self.plain(chunk)i = int(m.group())if chunk != \"\":self.references[i-].note = chunkelif chunk.strip().startswith(\"\")and chunk.find(\"\") < :chunk = chunk.strip(\"\")chunk = self.plain(chunk)if chunk != \"\":r = WikipediaReference()r.note = chunkself.references.append(r)", "docstring": "Updates references with content from specific paragraphs.\n\n The \"references\", \"notes\", \"external links\" paragraphs \n are double-checked for references. Not all items in the list\n might have been referenced inside the article, or the item\n might contain more info than we initially parsed from it.\n\n Called from parse_paragraphs() method.", "id": "f11594:c8:m17"} {"signature": "def parse_paragraphs(self, markup):", "body": "refs = [\"\", \"\", \"\", \"\", \"\"]exclude = [\"\", \"\", \"\", \"\", \"\", \"\", \"\"]exclude.extend(refs)paragraphs = []paragraph = WikipediaParagraph(self.title)paragraph_data = \"\"for chunk in markup.split(\"\"):if not chunk.startswith(\"\"):chunk = chunk.strip()if chunk.startswith(\"\"):if paragraph.title.lower() in refsor (paragraph.parent and paragraph.parent.title.lower() in refs):self.parse_paragraph_references(paragraph_data)paragraph.extend(self.parse_paragraph(paragraph_data))paragraphs.append(paragraph)title = chunk.strip().strip(\"\")title = self.plain(title)paragraph = WikipediaParagraph(title)paragraph.depth = self.parse_paragraph_heading_depth(chunk)if paragraph.title.lower() not in exclude:paragraph = self.connect_paragraph(paragraph, paragraphs)paragraph_data = \"\"elif re.search(re.compile(\"\", re.I), chunk):paragraph.main = [link.strip(\"\") for link in chunk.split(\"\")[:]]paragraph.main = [re.sub(re.compile(\"\", re.I), \"\", link) for link in paragraph.main]elif re.search(re.compile(\"\", re.I), chunk):paragraph.related = [link.strip(\"\") for link in chunk.split(\"\")[:]]else:paragraph_data += chunk +\"\"if paragraph.title.lower() in refsor (paragraph.parent and paragraph.parent.title.lower() in refs):self.parse_paragraph_references(paragraph_data)paragraph.extend(self.parse_paragraph(paragraph_data))paragraphs.append(paragraph)paragraphs_exclude = []for paragraph in paragraphs:if paragraph.title.lower() not in excludeand not (paragraph.parent and paragraph.parent.title.lower() in exclude):paragraphs_exclude.append(paragraph)if len(paragraphs_exclude) == andlen(paragraphs_exclude[]) == :return []return paragraphs_exclude", "docstring": "Returns a list of paragraphs in the markup.\n\n A paragraph has a title and multiple lines of plain text.\n A paragraph might have parent and child paragraphs,\n denoting subtitles or bigger chapters.\n\n A paragraph might have links to additional articles.\n\n Formats numbered lists by replacing # by 1.\n Formats bulleted sublists like ** or *** with indentation.", "id": "f11594:c8:m18"} {"signature": "def parse_table_row(self, markup, row):", "body": "if row == None:row = WikipediaTableRow()markup = markup.replace(\"\", \"\")for cell in markup.lstrip(\"\").split(\"\"):i = cell.find(\"\")j = cell.find(\"\")if i> and (j< or idata = self.plain(cell[i+:])properties = cell[:i].strip()else:data = self.plain(cell)properties = u\"\"cell = WikipediaTableCell(data)cell.properties = propertiesrow.append(cell)return row", "docstring": "Parses a row of cells in a Wikipedia table.\n\n Cells in the row are separated by \"||\".\n A \"!\" indicates a row of heading columns.\n Each cell can contain properties before a \"|\",\n # e.g. align=\"right\" | Cell 2 (right aligned).", "id": "f11594:c8:m19"} {"signature": "def connect_table(self, table, chunk, markup):", "body": "k = markup.find(chunk)i = markup.rfind(\"\", , k)j = markup.find(\"\", i+)paragraph_title = markup[i:j].strip().strip(\"\")for paragraph in self.paragraphs:if paragraph.title == paragraph_title:paragraph.tables.append(table)table.paragraph = paragraph", "docstring": "Creates a link from the table to paragraph and vice versa.\n\n Finds the first heading above the table in the markup.\n This is the title of the paragraph the table belongs to.", "id": "f11594:c8:m20"} {"signature": "def parse_tables(self, markup):", "body": "tables = []m = re.findall(self.re[\"\"], markup)for chunk in m:table = WikipediaTable()table.properties = chunk.split(\"\")[].strip(\"\").strip()self.connect_table(table, chunk, markup)row = Nonefor chunk in chunk.split(\"\"):chunk = chunk.strip()if chunk.startswith(\"\"):title = self.plain(chunk.strip(\"\"))table.title = titleelif chunk.startswith(\"\"):if row: row.properties = chunk.strip(\"\").strip()table.append(row)row = Noneelif chunk.startswith(\"\"):passelif chunk.startswith(\"\")or chunk.startswith(\"\"):row = self.parse_table_row(chunk, row)if row: table.append(row)if len(table) > :tables.append(table)return tables", "docstring": "Returns a list of tables in the markup.\n\n A Wikipedia table looks like:\n {| border=\"1\"\n |-\n |Cell 1 (no modifier - not aligned)\n |-\n |align=\"right\" |Cell 2 (right aligned)\n |-\n |}", "id": "f11594:c8:m21"} {"signature": "def parse_references(self, markup):", "body": "references = []m = re.findall(self.re[\"\"], markup)for reference in m:reference = re.sub(\"\", \"\", reference)if not reference.strip().startswith(\"\") andnot re.search(\"\", reference):r = WikipediaReference()r.note = self.plain(re.sub(\"\", \"\", reference))if r.note != \"\":references.append(r)p = \"\"+self.ref+\"\"+str(len(references))+\"\"markup = markup.replace(reference, p, )else:passm = re.findall(self.re[\"\"], markup)for citation in m:c = citation.replace(\"\", \"\")r = WikipediaReference()for key in r.__dict__.keys():value = re.search(\"\"+key+\"\", c)if value:value = value.group()value = value.replace(\"\", \"\")value = value.strip().strip(\"\")value = self.plain(value)setattr(r, key, value)if r.first != \"\" and r.last != \"\":r.author = r.first + \"\" + r.lastreferences.append(r)p = \"\"+self.ref+\"\"+str(len(references))+\"\"markup = markup.replace(citation, p, )m = re.findall(self.re[\"\"], markup)for url in m:r = WikipediaReference()i = url.find(\"\")if i > :r.url = url[:i].strip()r.note = self.plain(url[i:])else:r.url = url.strip()references.append(r)p = r.note+\"\"+self.ref+\"\"+str(len(references))+\"\"markup = markup.replace(\"\"+url+\"\", p, )sorted = []m = re.findall(self.ref+\"\", markup)for i in m:sorted.append(references[int(i)-])markup = markup.replace(self.ref+\"\"+i+\"\", self.ref+\"\"+str(len(sorted))+\"\")markup = markup.replace(self.ref+\"\", self.ref)for r in references:if r not in sorted:sorted.append(r)references = sortedreturn references, markup.strip()", "docstring": "Returns a list of references found in the markup.\n\n References appear inline as footnotes, \n http:// external links, or {{cite}} citations.\n We replace it with (1)-style footnotes.\n Additional references data is gathered in\n parse_paragraph_references() when we parse paragraphs.\n\n References can also appear in image descriptions,\n tables and taxoboxes, so they might not always pop up in a paragraph.\n\n The plain() method finally replaces (1) by [1].", "id": "f11594:c8:m22"} {"signature": "def parse_categories(self, markup):", "body": "categories = []m = re.findall(self.re[\"\"], markup)for category in m:category = category.split(\"\")page = category[].strip()display = u\"\"if len(category) > : display = category[].strip()if not page in categories:categories.append(page)return categories", "docstring": "Returns a list of categories the page belongs to.\n\n # A Wikipedia category link looks like:\n # [[Category:Computing]]\n # This indicates the page is included in the given category.\n # If \"Category\" is preceded by \":\" this indicates a link to a category.", "id": "f11594:c8:m23"} {"signature": "def parse_translations(self, markup):", "body": "global languagestranslations = {}m = re.findall(self.re[\"\"], markup)for language, translation in m:if language in languages:translations[language] = translationreturn translations", "docstring": "Returns a dictionary of translations for the page title.\n\n A Wikipedia language link looks like: [[af:Rekenaar]].\n The parser will also fetch links like \"user:\" and \"media:\"\n but these are stripped against the dictionary of\n Wikipedia languages.\n\n You can get a translated page by searching Wikipedia\n with the appropriate language code and supplying\n the translated title as query.", "id": "f11594:c8:m24"} {"signature": "def parse_disambiguation(self, markup):", "body": "m = re.search(self.re[\"\"], markup)if m:return self.parse_links(m.group())else:return []", "docstring": "Gets the Wikipedia disambiguation page for this article.\n\n A Wikipedia disambiguation link refers to other pages\n with the same title but of smaller significance,\n e.g. {{dablink|For the IEEE magazine see [[Computer (magazine)]].}}", "id": "f11594:c8:m25"} {"signature": "def parse_important(self, markup):", "body": "important = []table_titles = [table.title for table in self.tables]m = re.findall(self.re[\"\"], markup)for bold in m:bold = self.plain(bold)if not bold in table_titles:important.append(bold.lower())return important", "docstring": "Returns a list of words that appear in bold in the article.\n\n Things like table titles are not added to the list,\n these are probably bold because it makes the layout nice,\n not necessarily because they are important.", "id": "f11594:c8:m26"} {"signature": "def __init__(self, q, language=\"\", light=False, wait=, asynchronous=False, cached=True,case_sensitive=False, full_strip=True):", "body": "self._light = lightself._full_strip = full_stripif cached: cache = \"\"else:cache = Noneif not case_sensitive:q = str(q.lower())q = q.replace(\"\", \"\")url = self._api_request(q, language)URLAccumulator.__init__(self, url, wait, asynchronous, cache, type=\"\", throttle=)", "docstring": "A download manager for Wikipedia pages.\n\n WikipediaSearch is a combination of\n URLAccumulator that handles asynchronous and cached web downloads and\n WikipediaPage that parses XML retrieved from the Wikipedia API.\n\n Retrieves the latest revision.\n Redirects are handled by the Wikipedia server.", "id": "f11594:c9:m1"} {"signature": "def isList(l):", "body": "return hasattr(l, '')or (type(l) in (list, tuple))", "docstring": "Convenience method that works with all 2.x versions of Python\n to determine whether or not something is listlike.", "id": "f11596:m0"} {"signature": "def isString(s):", "body": "try:return isinstance(s, str) or isinstance(s, str)except NameError:return isinstance(s, str)", "docstring": "Convenience method that works with all 2.x versions of Python\n to determine whether or not something is stringlike.", "id": "f11596:m1"} {"signature": "def buildTagMap(default, *args):", "body": "built = {}for portion in args:if hasattr(portion, ''):for k,v in list(portion.items()):built[k] = velif isList(portion):for k in portion:built[k] = defaultelse:built[portion] = defaultreturn built", "docstring": "Turns a list of maps, lists, or scalars into a single map.\n Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and\n NESTING_RESET_TAGS maps out of lists and partial maps.", "id": "f11596:m2"} {"signature": "def setup(self, parent=None, previous=None):", "body": "self.parent = parentself.previous = previousself.next = Noneself.previousSibling = Noneself.nextSibling = Noneif self.parent and self.parent.contents:self.previousSibling = self.parent.contents[-]self.previousSibling.nextSibling = self", "docstring": "Sets up the initial relations between this element and\n other elements.", "id": "f11596:c0:m0"} {"signature": "def extract(self):", "body": "if self.parent:try:self.parent.contents.remove(self)except ValueError:passlastChild = self._lastRecursiveChild()nextElement = lastChild.__next__if self.previous:self.previous.next = nextElementif nextElement:nextElement.previous = self.previousself.previous = NonelastChild.next = Noneself.parent = Noneif self.previousSibling:self.previousSibling.nextSibling = self.nextSiblingif self.nextSibling:self.nextSibling.previousSibling = self.previousSiblingself.previousSibling = self.nextSibling = Nonereturn self", "docstring": "Destructively rips this element out of the tree.", "id": "f11596:c0:m2"} {"signature": "def _lastRecursiveChild(self):", "body": "lastChild = selfwhile hasattr(lastChild, '') and lastChild.contents:lastChild = lastChild.contents[-]return lastChild", "docstring": "Finds the last element beneath this object to be parsed.", "id": "f11596:c0:m3"} {"signature": "def append(self, tag):", "body": "self.insert(len(self.contents), tag)", "docstring": "Appends the given tag to the contents of this tag.", "id": "f11596:c0:m5"} {"signature": "def findNext(self, name=None, attrs={}, text=None, **kwargs):", "body": "return self._findOne(self.findAllNext, name, attrs, text, **kwargs)", "docstring": "Returns the first item that matches the given criteria and\n appears after this Tag in the document.", "id": "f11596:c0:m6"} {"signature": "def findAllNext(self, name=None, attrs={}, text=None, limit=None,**kwargs):", "body": "return self._findAll(name, attrs, text, limit, self.nextGenerator,**kwargs)", "docstring": "Returns all items that match the given criteria and appear\n after this Tag in the document.", "id": "f11596:c0:m7"} {"signature": "def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):", "body": "return self._findOne(self.findNextSiblings, name, attrs, text,**kwargs)", "docstring": "Returns the closest sibling to this Tag that matches the\n given criteria and appears after this Tag in the document.", "id": "f11596:c0:m8"} {"signature": "def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,**kwargs):", "body": "return self._findAll(name, attrs, text, limit,self.nextSiblingGenerator, **kwargs)", "docstring": "Returns the siblings of this Tag that match the given\n criteria and appear after this Tag in the document.", "id": "f11596:c0:m9"} {"signature": "def findPrevious(self, name=None, attrs={}, text=None, **kwargs):", "body": "return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)", "docstring": "Returns the first item that matches the given criteria and\n appears before this Tag in the document.", "id": "f11596:c0:m10"} {"signature": "def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,**kwargs):", "body": "return self._findAll(name, attrs, text, limit, self.previousGenerator,**kwargs)", "docstring": "Returns all items that match the given criteria and appear\n before this Tag in the document.", "id": "f11596:c0:m11"} {"signature": "def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):", "body": "return self._findOne(self.findPreviousSiblings, name, attrs, text,**kwargs)", "docstring": "Returns the closest sibling to this Tag that matches the\n given criteria and appears before this Tag in the document.", "id": "f11596:c0:m12"} {"signature": "def findPreviousSiblings(self, name=None, attrs={}, text=None,limit=None, **kwargs):", "body": "return self._findAll(name, attrs, text, limit,self.previousSiblingGenerator, **kwargs)", "docstring": "Returns the siblings of this Tag that match the given\n criteria and appear before this Tag in the document.", "id": "f11596:c0:m13"} {"signature": "def findParent(self, name=None, attrs={}, **kwargs):", "body": "r = Nonel = self.findParents(name, attrs, )if l:r = l[]return r", "docstring": "Returns the closest parent of this Tag that matches the given\n criteria.", "id": "f11596:c0:m14"} {"signature": "def findParents(self, name=None, attrs={}, limit=None, **kwargs):", "body": "return self._findAll(name, attrs, None, limit, self.parentGenerator,**kwargs)", "docstring": "Returns the parents of this Tag that match the given\n criteria.", "id": "f11596:c0:m15"} {"signature": "def _findAll(self, name, attrs, text, limit, generator, **kwargs):", "body": "if isinstance(name, SoupStrainer):strainer = nameelse:strainer = SoupStrainer(name, attrs, text, **kwargs)results = ResultSet(strainer)g = generator()while True:try:i = next(g)except StopIteration:breakif i:found = strainer.search(i)if found:results.append(found)if limit and len(results) >= limit:breakreturn results", "docstring": "Iterates over a generator looking for things that match.", "id": "f11596:c0:m17"} {"signature": "def toEncoding(self, s, encoding=None):", "body": "if isinstance(s, str):if encoding:s = s.encode(encoding)elif isinstance(s, str):if encoding:s = s.encode(encoding)else:s = str(s)else:if encoding:s = self.toEncoding(str(s), encoding)else:s = str(s)return s", "docstring": "Encodes an object to a string in some encoding, or to Unicode.\n .", "id": "f11596:c0:m24"} {"signature": "def __new__(cls, value):", "body": "if isinstance(value, str):return str.__new__(cls, value)return str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)", "docstring": "Create a new NavigableString.\n\n When unpickling a NavigableString, this method is called with\n the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be\n passed in to the superclass's __new__ or the superclass won't know\n how to handle non-ASCII characters.", "id": "f11596:c1:m0"} {"signature": "def __getattr__(self, attr):", "body": "if attr == '':return selfelse:raise AttributeError(\"\" % (self.__class__.__name__, attr))", "docstring": "text.string gives you text. This is for backwards\n compatibility for Navigable*String, but for CData* it lets you\n get the string without the CData wrapper.", "id": "f11596:c1:m2"} {"signature": "def _invert(h):", "body": "i = {}for k,v in list(h.items()):i[v] = kreturn i", "docstring": "Cheap function to invert a hash.", "id": "f11596:c6:m0"} {"signature": "def _convertEntities(self, match):", "body": "x = match.group()if self.convertHTMLEntities and x in name2codepoint:return chr(name2codepoint[x])elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:if self.convertXMLEntities:return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]else:return '' % xelif len(x) > and x[] == '':if len(x) > and x[] == '':return chr(int(x[:], ))else:return chr(int(x[:]))elif self.escapeUnrecognizedEntities:return '' % xelse:return '' % x", "docstring": "Used in a call to re.sub to replace HTML, XML, and numeric\n entities with the appropriate Unicode characters. If HTML\n entities are being converted, any unrecognized entities are\n escaped.", "id": "f11596:c6:m1"} {"signature": "def __init__(self, parser, name, attrs=None, parent=None,previous=None):", "body": "self.parserClass = parser.__class__self.isSelfClosing = parser.isSelfClosingTag(name)self.name = nameif attrs == None:attrs = []self.attrs = attrsself.contents = []self.setup(parent, previous)self.hidden = Falseself.containsSubstitutions = Falseself.convertHTMLEntities = parser.convertHTMLEntitiesself.convertXMLEntities = parser.convertXMLEntitiesself.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntitiesconvert = lambda k_val: (k_val[],re.sub(\"\",self._convertEntities,k_val[]))self.attrs = list(map(convert, self.attrs))", "docstring": "Basic constructor.", "id": "f11596:c6:m2"} {"signature": "def get(self, key, default=None):", "body": "return self._getAttrMap().get(key, default)", "docstring": "Returns the value of the 'key' attribute for the tag, or\n the value given for 'default' if it doesn't have that\n attribute.", "id": "f11596:c6:m3"} {"signature": "def __getitem__(self, key):", "body": "return self._getAttrMap()[key]", "docstring": "tag[key] returns the value of the 'key' attribute for the tag,\n and throws an exception if it's not there.", "id": "f11596:c6:m5"} {"signature": "def __iter__(self):", "body": "return iter(self.contents)", "docstring": "Iterating over a tag iterates over its contents.", "id": "f11596:c6:m6"} {"signature": "def __len__(self):", "body": "return len(self.contents)", "docstring": "The length of a tag is the length of its list of contents.", "id": "f11596:c6:m7"} {"signature": "def __bool__(self):", "body": "return True", "docstring": "A tag is non-None even if it has no contents.", "id": "f11596:c6:m9"} {"signature": "def __setitem__(self, key, value):", "body": "self._getAttrMap()self.attrMap[key] = valuefound = Falsefor i in range(, len(self.attrs)):if self.attrs[i][] == key:self.attrs[i] = (key, value)found = Trueif not found:self.attrs.append((key, value))self._getAttrMap()[key] = value", "docstring": "Setting tag[key] sets the value of the 'key' attribute for the\n tag.", "id": "f11596:c6:m10"} {"signature": "def __delitem__(self, key):", "body": "for item in self.attrs:if item[] == key:self.attrs.remove(item)self._getAttrMap()if key in self.attrMap:del self.attrMap[key]", "docstring": "Deleting tag[key] deletes all 'key' attributes for the tag.", "id": "f11596:c6:m11"} {"signature": "def __call__(self, *args, **kwargs):", "body": "return self.findAll(*args, **kwargs)", "docstring": "Calling a tag like a function is the same as calling its\n findAll() method. Eg. tag('a') returns a list of all the A tags\n found within this tag.", "id": "f11596:c6:m12"} {"signature": "def __eq__(self, other):", "body": "if not hasattr(other, '') or not hasattr(other, '') or not hasattr(other, '') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):return Falsefor i in range(, len(self.contents)):if self.contents[i] != other.contents[i]:return Falsereturn True", "docstring": "Returns true iff this tag has the same name, the same attributes,\n and the same contents (recursively) as the given tag.\n\n NOTE: right now this will return false if two tags have the\n same attributes in a different order. Should this be fixed?", "id": "f11596:c6:m14"} {"signature": "def __ne__(self, other):", "body": "return not self == other", "docstring": "Returns true iff this tag is not identical to the other tag,\n as defined in __eq__.", "id": "f11596:c6:m15"} {"signature": "def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):", "body": "return self.__str__(encoding)", "docstring": "Renders this tag as a string.", "id": "f11596:c6:m16"} {"signature": "def _sub_entity(self, x):", "body": "return \"\" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group()[]] + \"\"", "docstring": "Used with a regular expression to substitute the\n appropriate XML entity for an XML special character.", "id": "f11596:c6:m18"} {"signature": "def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,prettyPrint=False, indentLevel=):", "body": "encodedName = self.toEncoding(self.name, encoding)attrs = []if self.attrs:for key, val in self.attrs:fmt = ''if isString(val):if self.containsSubstitutions and '' in val:val = self.substituteEncoding(val, encoding)if '' in val:fmt = \"\"if \"\" in val:val = val.replace(\"\", \"\")val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)attrs.append(fmt % (self.toEncoding(key, encoding),self.toEncoding(val, encoding)))close = ''closeTag = ''if self.isSelfClosing:close = ''else:closeTag = '' % encodedNameindentTag, indentContents = , if prettyPrint:indentTag = indentLevelspace = ('' * (indentTag-))indentContents = indentTag + contents = self.renderContents(encoding, prettyPrint, indentContents)if self.hidden:s = contentselse:s = []attributeString = ''if attrs:attributeString = '' + ''.join(attrs)if prettyPrint:s.append(space)s.append('' % (encodedName, attributeString, close))if prettyPrint:s.append(\"\")s.append(contents)if prettyPrint and contents and contents[-] != \"\":s.append(\"\")if prettyPrint and closeTag:s.append(space)s.append(closeTag)if prettyPrint and closeTag and self.nextSibling:s.append(\"\")s = ''.join(s)return s", "docstring": "Returns a string or Unicode representation of this tag and\n its contents. To get Unicode, pass None for encoding.\n\n NOTE: since Python's HTML parser consumes whitespace, this\n method is not certain to reproduce the whitespace present in\n the original string.", "id": "f11596:c6:m19"} {"signature": "def decompose(self):", "body": "contents = [i for i in self.contents]for i in contents:if isinstance(i, Tag):i.decompose()else:i.extract()self.extract()", "docstring": "Recursively destroys the contents of this tree.", "id": "f11596:c6:m20"} {"signature": "def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,prettyPrint=False, indentLevel=):", "body": "s=[]for c in self:text = Noneif isinstance(c, NavigableString):text = c.__str__(encoding)elif isinstance(c, Tag):s.append(c.__str__(encoding, prettyPrint, indentLevel))if text and prettyPrint:text = text.strip()if text:if prettyPrint:s.append(\"\" * (indentLevel-))s.append(text)if prettyPrint:s.append(\"\")return ''.join(s)", "docstring": "Renders the contents of this tag as a string in the given\n encoding. If encoding is None, returns a Unicode string..", "id": "f11596:c6:m22"} {"signature": "def find(self, name=None, attrs={}, recursive=True, text=None,**kwargs):", "body": "r = Nonel = self.findAll(name, attrs, recursive, text, , **kwargs)if l:r = l[]return r", "docstring": "Return only the first child of this Tag matching the given\n criteria.", "id": "f11596:c6:m23"} {"signature": "def findAll(self, name=None, attrs={}, recursive=True, text=None,limit=None, **kwargs):", "body": "generator = self.recursiveChildGeneratorif not recursive:generator = self.childGeneratorreturn self._findAll(name, attrs, text, limit, generator, **kwargs)", "docstring": "Extracts a list of Tag objects that match the given\n criteria. You can specify the name of the Tag and any\n attributes you want the Tag to have.\n\n The value of a key-value pair in the 'attrs' map can be a\n string, a list of strings, a regular expression object, or a\n callable that takes a string and returns whether or not the\n string matches for some custom definition of 'matches'. The\n same is true of the tag name.", "id": "f11596:c6:m24"} {"signature": "def _getAttrMap(self):", "body": "if not getattr(self, ''):self.attrMap = {}for (key, value) in self.attrs:self.attrMap[key] = valuereturn self.attrMap", "docstring": "Initializes a map representation of this tag's attributes,\n if not already initialized.", "id": "f11596:c6:m27"} {"signature": "def __init__(self, markup=\"\", parseOnlyThese=None, fromEncoding=None,markupMassage=True, smartQuotesTo=XML_ENTITIES,convertEntities=None, selfClosingTags=None, isHTML=False):", "body": "self.parseOnlyThese = parseOnlyTheseself.fromEncoding = fromEncodingself.smartQuotesTo = smartQuotesToself.convertEntities = convertEntitiesif self.convertEntities:self.smartQuotesTo = Noneif convertEntities == self.HTML_ENTITIES:self.convertXMLEntities = Falseself.convertHTMLEntities = Trueself.escapeUnrecognizedEntities = Trueelif convertEntities == self.XHTML_ENTITIES:self.convertXMLEntities = Trueself.convertHTMLEntities = Trueself.escapeUnrecognizedEntities = Falseelif convertEntities == self.XML_ENTITIES:self.convertXMLEntities = Trueself.convertHTMLEntities = Falseself.escapeUnrecognizedEntities = Falseelse:self.convertXMLEntities = Falseself.convertHTMLEntities = Falseself.escapeUnrecognizedEntities = Falseself.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)SGMLParser.__init__(self)if hasattr(markup, ''): markup = markup.read()self.markup = markupself.markupMassage = markupMassagetry:self._feed(isHTML=isHTML)except StopParsing:passself.markup = None ", "docstring": "The Soup object is initialized as the 'root tag', and the\n provided markup (which can be a string or a file-like object)\n is fed into the underlying parser.\n\n sgmllib will process most bad HTML, and the BeautifulSoup\n class has some tricks for dealing with some HTML that kills\n sgmllib, but Beautiful Soup can nonetheless choke or lose data\n if your data uses self-closing tags or declarations\n incorrectly.\n\n By default, Beautiful Soup uses regexes to sanitize input,\n avoiding the vast majority of these problems. If the problems\n don't apply to you, pass in False for markupMassage, and\n you'll get better performance.\n\n The default parser massage techniques fix the two most common\n instances of invalid HTML that choke sgmllib:\n\n
    (No space between name of closing tag and tag close)\n (Extraneous whitespace in declaration)\n\n You can pass in a custom list of (RE object, replace method)\n tuples to get Beautiful Soup to scrub your input the way you\n want.", "id": "f11596:c9:m0"} {"signature": "def convert_charref(self, name):", "body": "try:n = int(name)except ValueError:returnif not <= n <= : returnreturn self.convert_codepoint(n)", "docstring": "This method fixes a bug in Python's SGMLParser.", "id": "f11596:c9:m1"} {"signature": "def __getattr__(self, methodName):", "body": "if methodName.find('') == or methodName.find('') == or methodName.find('') == :return SGMLParser.__getattr__(self, methodName)elif methodName.find('') != :return Tag.__getattr__(self, methodName)else:raise AttributeError", "docstring": "This method routes method call requests to either the SGMLParser\n superclass or the Tag superclass, depending on the method name.", "id": "f11596:c9:m3"} {"signature": "def isSelfClosingTag(self, name):", "body": "return name in self.SELF_CLOSING_TAGSor name in self.instanceSelfClosingTags", "docstring": "Returns true iff the given string is the name of a\n self-closing tag according to this parser.", "id": "f11596:c9:m4"} {"signature": "def _popToTag(self, name, inclusivePop=True):", "body": "if name == self.ROOT_TAG_NAME:returnnumPops = mostRecentTag = Nonefor i in range(len(self.tagStack)-, , -):if name == self.tagStack[i].name:numPops = len(self.tagStack)-ibreakif not inclusivePop:numPops = numPops - for i in range(, numPops):mostRecentTag = self.popTag()return mostRecentTag", "docstring": "Pops the tag stack up to and including the most recent\n instance of the given tag. If inclusivePop is false, pops the tag\n stack up to but *not* including the most recent instqance of\n the given tag.", "id": "f11596:c9:m9"} {"signature": "def _smartPop(self, name):", "body": "nestingResetTriggers = self.NESTABLE_TAGS.get(name)isNestable = nestingResetTriggers != NoneisResetNesting = name in self.RESET_NESTING_TAGSpopTo = Noneinclusive = Truefor i in range(len(self.tagStack)-, , -):p = self.tagStack[i]if (not p or p.name == name) and not isNestable:popTo = namebreakif (nestingResetTriggers != Noneand p.name in nestingResetTriggers)or (nestingResetTriggers == None and isResetNestingand p.name in self.RESET_NESTING_TAGS):popTo = p.nameinclusive = Falsebreakp = p.parentif popTo:self._popToTag(popTo, inclusive)", "docstring": "We need to pop up to the previous tag of this type, unless\n one of this tag's nesting reset triggers comes between this\n tag and the previous tag of this type, OR unless this tag is a\n generic nesting trigger and another generic nesting trigger\n comes between this tag and the previous tag of this type.\n\n Examples:\n

    FooBar *

    * should pop to 'p', not 'b'.\n

    Foo

  • Bar *

    * should pop to 'table', not 'p'.\n

    Foo

    Bar *

    * should pop to 'tr', not 'p'.\n\n

    • *
    • * should pop to 'ul', not the first 'li'.\n
  • ** should pop to 'table', not the first 'tr'\n ``.\n\n :param content: content of current table row.", "id": "f14379:c4:m11"} {"signature": "def table_cell(self, content, **flags):", "body": "if flags['']:tag = ''else:tag = ''align = flags['']if not align:return '' % (tag, content, tag)return '' % (tag, align, content, tag)", "docstring": "Rendering a table cell. Like ````.\n\n :param content: content of current table row.", "id": "f14381:c4:m11"} {"signature": "def table_cell(self, content, **flags):", "body": "return '' + content + ''", "docstring": "Rendering a table cell. Like ``
    ** should pop to 'tr', not the first 'td'", "id": "f11596:c9:m10"} {"signature": "def _toStringSubclass(self, text, subclass):", "body": "self.endData()self.handle_data(text)self.endData(subclass)", "docstring": "Adds a certain piece of text to the tree as a NavigableString\n subclass.", "id": "f11596:c9:m14"} {"signature": "def handle_pi(self, text):", "body": "if text[:] == \"\":text = \"\"self._toStringSubclass(text, ProcessingInstruction)", "docstring": "Handle a processing instruction as a ProcessingInstruction\n object, possibly one with a %SOUP-ENCODING% slot into which an\n encoding will be plugged later.", "id": "f11596:c9:m15"} {"signature": "def handle_comment(self, text):", "body": "self._toStringSubclass(text, Comment)", "docstring": "Handle comments as Comment objects.", "id": "f11596:c9:m16"} {"signature": "def handle_charref(self, ref):", "body": "if self.convertEntities:data = chr(int(ref))else:data = '' % refself.handle_data(data)", "docstring": "Handle character references as data.", "id": "f11596:c9:m17"} {"signature": "def handle_entityref(self, ref):", "body": "data = Noneif self.convertHTMLEntities:try:data = chr(name2codepoint[ref])except KeyError:passif not data and self.convertXMLEntities:data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)if not data and self.convertHTMLEntities andnot self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):data = \"\" % refif not data:data = \"\" % refself.handle_data(data)", "docstring": "Handle entity references as data, possibly converting known\n HTML and/or XML entity references to the corresponding Unicode\n characters.", "id": "f11596:c9:m18"} {"signature": "def handle_decl(self, data):", "body": "self._toStringSubclass(data, Declaration)", "docstring": "Handle DOCTYPEs and the like as Declaration objects.", "id": "f11596:c9:m19"} {"signature": "def parse_declaration(self, i):", "body": "j = Noneif self.rawdata[i:i+] == '':k = self.rawdata.find('', i)if k == -:k = len(self.rawdata)data = self.rawdata[i+:k]j = k+self._toStringSubclass(data, CData)else:try:j = SGMLParser.parse_declaration(self, i)except SGMLParseError:toHandle = self.rawdata[i:]self.handle_data(toHandle)j = i + len(toHandle)return j", "docstring": "Treat a bogus SGML declaration as raw data. Treat a CDATA\n declaration as a CData object.", "id": "f11596:c9:m20"} {"signature": "def start_meta(self, attrs):", "body": "httpEquiv = NonecontentType = NonecontentTypeIndex = NonetagNeedsEncodingSubstitution = Falsefor i in range(, len(attrs)):key, value = attrs[i]key = key.lower()if key == '':httpEquiv = valueelif key == '':contentType = valuecontentTypeIndex = iif httpEquiv and contentType: match = self.CHARSET_RE.search(contentType)if match:if (self.declaredHTMLEncoding is not None orself.originalEncoding == self.fromEncoding):def rewrite(match):return match.group() + \"\"newAttr = self.CHARSET_RE.sub(rewrite, contentType)attrs[contentTypeIndex] = (attrs[contentTypeIndex][],newAttr)tagNeedsEncodingSubstitution = Trueelse:newCharset = match.group()if newCharset and newCharset != self.originalEncoding:self.declaredHTMLEncoding = newCharsetself._feed(self.declaredHTMLEncoding)raise StopParsingpasstag = self.unknown_starttag(\"\", attrs)if tag and tagNeedsEncodingSubstitution:tag.containsSubstitutions = True", "docstring": "Beautiful Soup can detect a charset included in a META tag,\n try to convert the document to that charset, and re-parse the\n document from the beginning.", "id": "f11596:c10:m1"} {"signature": "def _subMSChar(self, orig):", "body": "sub = self.MS_CHARS.get(orig)if type(sub) == tuple:if self.smartQuotesTo == '':sub = '' % sub[]else:sub = '' % sub[]return sub", "docstring": "Changes a MS smart quote character to an XML or HTML\n entity.", "id": "f11596:c20:m1"} {"signature": "def _toUnicode(self, data, encoding):", "body": "if (len(data) >= ) and (data[:] == '')and (data[:] != ''):encoding = ''data = data[:]elif (len(data) >= ) and (data[:] == '')and (data[:] != ''):encoding = ''data = data[:]elif data[:] == '':encoding = ''data = data[:]elif data[:] == '':encoding = ''data = data[:]elif data[:] == '':encoding = ''data = data[:]newdata = str(data, encoding)return newdata", "docstring": "Given a string and its encoding, decodes the string into Unicode.\n %encoding is a string recognized by encodings.aliases", "id": "f11596:c20:m3"} {"signature": "def _detectEncoding(self, xml_data, isHTML=False):", "body": "xml_encoding = sniffed_xml_encoding = Nonetry:if xml_data[:] == '':xml_data = self._ebcdic_to_ascii(xml_data)elif xml_data[:] == '':sniffed_xml_encoding = ''xml_data = str(xml_data, '').encode('')elif (len(xml_data) >= ) and (xml_data[:] == '')and (xml_data[:] != ''):sniffed_xml_encoding = ''xml_data = str(xml_data[:], '').encode('')elif xml_data[:] == '':sniffed_xml_encoding = ''xml_data = str(xml_data, '').encode('')elif (len(xml_data) >= ) and (xml_data[:] == '') and(xml_data[:] != ''):sniffed_xml_encoding = ''xml_data = str(xml_data[:], '').encode('')elif xml_data[:] == '':sniffed_xml_encoding = ''xml_data = str(xml_data, '').encode('')elif xml_data[:] == '':sniffed_xml_encoding = ''xml_data = str(xml_data, '').encode('')elif xml_data[:] == '':sniffed_xml_encoding = ''xml_data = str(xml_data[:], '').encode('')elif xml_data[:] == '':sniffed_xml_encoding = ''xml_data = str(xml_data[:], '').encode('')elif xml_data[:] == '':sniffed_xml_encoding = ''xml_data = str(xml_data[:], '').encode('')else:sniffed_xml_encoding = ''passexcept:xml_encoding_match = Nonexml_encoding_match = re.compile('').match(xml_data)if not xml_encoding_match and isHTML:regexp = re.compile('', re.I)xml_encoding_match = regexp.search(xml_data)if xml_encoding_match is not None:xml_encoding = xml_encoding_match.groups()[].lower()if isHTML:self.declaredHTMLEncoding = xml_encodingif sniffed_xml_encoding and(xml_encoding in ('', '', '','', '', '','', '', '', '','', '')):xml_encoding = sniffed_xml_encodingreturn xml_data, xml_encoding, sniffed_xml_encoding", "docstring": "Given a document, tries to detect its XML encoding.", "id": "f11596:c20:m4"} {"signature": "def replace_entities(ustring, placeholder=\"\"):", "body": "def _repl_func(match):try:if match.group(): return unichr( int(match.group()) ) else:try: return cp1252[ unichr(int(match.group())) ].strip()except: return unichr( name2codepoint[match.group()] )except:return placeholderif not isinstance(ustring, unicode):ustring = UnicodeDammit(ustring).unicodeustring = ustring.replace(\"\", \"\")_entity_re = re.compile(r'') return _entity_re.sub(_repl_func, ustring)", "docstring": "Replaces HTML special characters by readable characters.\n\n As taken from Leif K-Brooks algorithm on:\n http://groups-beta.google.com/group/comp.lang.python", "id": "f11598:m1"} {"signature": "def _darkest(self):", "body": "rgb, n = (, , ), for r,g,b in self:if r+g+b < n:rgb, n = (r,g,b), r+g+breturn rgb", "docstring": "Returns the darkest swatch.\n\n Knowing the contract between a light and a dark swatch\n can help us decide how to display readable typography.", "id": "f11600:c0:m1"} {"signature": "def __init__(self, q, page=, wait=, asynchronous=False, cached=True):", "body": "if cached: cache = \"\"else:cache = Noneurl = \"\"if isinstance(q, int):url += \"\" + str(q) elif q in [\"\", \"\"]:url += \"\" + qelse:url += \"\" + quote(q)if q == \"\":if cached and Cache(cache).age(url) > :Cache(cache).remove(url)if q == \"\":Cache(cache).remove(url)URLAccumulator.__init__(self, url, wait, asynchronous, cache, type=\"\", throttle=)", "docstring": "Parses color themes from Adobe Kuler.\n\n Valid queries are \"popular\", \"rating\", \n a theme id as an integer, or a search string.", "id": "f11600:c1:m0"} {"signature": "def lab_to_rgb(l, a, b):", "body": "y = (l+) / x = a/ + yz = y - b/v = [x,y,z]for i in range():if pow(v[i],) > : v[i] = pow(v[i],)else: v[i] = (v[i]-/) / x = v[] * /y = v[] * /z = v[] * /r = x * + y *- + z *-g = x *- + y * + z * b = x * + y *- + z * v = [r,g,b]for i in range():if v[i] > :v[i] = * pow(v[i], /) - else:v[i] = * v[i]r, g, b = v[], v[], v[]return r, g, b", "docstring": "Converts CIE Lab to RGB components.\n\n First we have to convert to XYZ color space.\n Conversion involves using a white point,\n in this case D65 which represents daylight illumination.\n\n Algorithms adopted from:\n http://www.easyrgb.com/math.php", "id": "f11601:m3"} {"signature": "def _darkest(self):", "body": "rgb, n = (, , ), for r,g,b in self:if r+g+b < n:rgb, n = (r,g,b), r+g+breturn rgb", "docstring": "Returns the darkest swatch.\n\n Knowing the contract between a light and a dark swatch\n can help us decide how to display readable typography.", "id": "f11601:c0:m1"} {"signature": "def __init__(self, q, page=, wait=, asynchronous=False, cached=True):", "body": "if cached: cache = \"\"else:cache = Noneurl = \"\"self.id_string = url + \"\"if isinstance(q, int):url = self.id_string + str(q) elif q in [\"\", \"\"]:url += \"\"+qurl += \"\"+str(page*)+\"\"else:url += \"\"+quote(q)url += \"\"+str(page*)+\"\"if q in [\"\", \"\"]:if cached and Cache(cache).age(url) > :Cache(cache).remove(url)URLAccumulator.__init__(self, url, wait, asynchronous, cache, type=\"\", throttle=)", "docstring": "Parses color themes from Adobe Kuler.\n\n Valid queries are \"popular\", \"rating\", \n a theme id as an integer, or a search string.", "id": "f11601:c1:m0"} {"signature": "def parse_theme(self, xml):", "body": "kt = KulerTheme() kt.author = xml.getElementsByTagName(\"\")[]kt.author = kt.author.childNodes[].childNodes[].nodeValuekt.id = int(self.parse_tag(xml, \"\"))kt.label = self.parse_tag(xml, \"\")mode = self.parse_tag(xml, \"\")for swatch in xml.getElementsByTagName(\"\"):c1 = float(self.parse_tag(swatch, \"\"))c2 = float(self.parse_tag(swatch, \"\"))c3 = float(self.parse_tag(swatch, \"\"))c4 = float(self.parse_tag(swatch, \"\"))if mode == \"\":kt.append((c1,c2,c3))if mode == \"\": kt.append(cmyk_to_rgb(c1,c2,c3,c4))if mode == \"\":kt.append(colorsys.hsv_to_rgb(c1,c2,c3))if mode == \"\":kt.append(hex_to_rgb(c1))if mode == \"\":kt.append(lab_to_rgb(c1,c2,c3))if self._cache.exists(self.id_string + str(kt.id)):xml = self._cache.read(self.id_string + str(kt.id))xml = minidom.parseString(xml)for tags in xml.getElementsByTagName(\"\"):tags = self.parse_tag(tags, \"\")tags = tags.split(\"\")kt.tags.extend(tags)return kt", "docstring": "Parses a theme from XML returned by Kuler.\n\n Gets the theme's id, label and swatches.\n All of the swatches are converted to RGB.\n If we have a full description for a theme id in cache,\n parse that to get tags associated with the theme.", "id": "f11601:c1:m3"} {"signature": "def download(self, size=SIZE_XLARGE, thumbnail=False, wait=, asynchronous=False):", "body": "if thumbnail == True: size = SIZE_THUMBNAIL self._size = disambiguate_size(size)self._wait = waitself._asynchronous = asynchronousurl = \"\"url += \"\" + self.idurl += \"\" + API_KEYURLAccumulator.__init__(self, url, wait, asynchronous, \"\", \"\", )if not asynchronous:return self.path", "docstring": "Downloads this image to cache.\n\n Calling the download() method instantiates an asynchronous URLAccumulator\n that will fetch the image's URL from Flickr.\n A second process then downloads the file at the retrieved URL.\n\n Once it is done downloading, this image will have its path property\n set to an image file in the cache.", "id": "f11602:c0:m3"} {"signature": "def open(url, wait=):", "body": "post = Noneif isinstance(url, URLParser) and url.method == \"\":post = urllib.parse.urlencode(url.query)url = str(url)if os.path.exists(url):return urllib.request.urlopen(url)else:socket.setdefaulttimeout(wait)try:request = urllib.request.Request(url, post, {\"\": USER_AGENT, \"\": REFERER})if PROXY:p = urllib.request.ProxyHandler({PROXY[]: PROXY[]})o = urllib.request.build_opener(p, urllib.request.HTTPHandler)urllib.request.install_opener(o)connection = urllib.request.urlopen(request)except urllib.error.HTTPError as e:if e.code == : raise HTTP401Authenticationif e.code == : raise HTTP403Forbiddenif e.code == : raise HTTP404NotFoundraise HTTPErrorexcept urllib.error.URLError as e:if e.reason[] == : raise URLTimeoutraise URLErrorreturn connection", "docstring": "Returns a connection to a url which you can read().\n\n When the wait amount is exceeded, raises a URLTimeout.\n When an error occurs, raises a URLError.\n 404 errors specifically return a HTTP404NotFound.", "id": "f11604:m3"} {"signature": "def is_url(url, wait=):", "body": "try: connection = open(url, wait)except:return Falsereturn True", "docstring": "Returns False when no connection can be opened to the url.", "id": "f11604:m4"} {"signature": "def not_found(url, wait=):", "body": "try: connection = open(url, wait)except HTTP404NotFound:return Trueexcept:return Falsereturn False", "docstring": "Returns True when the url generates a \"404 Not Found\" error.", "id": "f11604:m5"} {"signature": "def is_type(url, types=[], wait=):", "body": "if isinstance(types, str):types = [types]try: connection = open(url, wait)except:return Falsetype = connection.info()[\"\"]for t in types:if type.startswith(t): return Truereturn False", "docstring": "Determine the MIME-type of the document behind the url.\n\n MIME is more reliable than simply checking the document extension.\n Returns True when the MIME-type starts with anything in the list of types.", "id": "f11604:m6"} {"signature": "def __init__(self, url=\"\", method=\"\"):", "body": "is_post_urlparser = Falseif isinstance(url, URLParser) and url.method == \"\":is_post_urlparser = Trueurl.method = \"\"urlstr = str(url)if is_post_urlparser: url.method = \"\"url = urlstrurl = urllib.parse.urlsplit(url)self.protocol = url[]self.domain = url[]self.username = \"\"self.password = \"\"if self.domain.find(\"\") >= :login = self.domain.split(\"\")[]if login.find(\"\") >= :self.username = login.split(\"\")[]self.password = login.split(\"\")[]self.domain = self.domain.split(\"\")[]self.port = \"\"if self.domain.find(\"\") >= :p = self.domain.split(\"\")if p[].isdigit():self.port = p[]self.domain = p[]self.path = url[]self.page = \"\"if not self.path.endswith(\"\"):if self.path.find(\"\") >= :self.page = self.path.split(\"\")[-]self.path = self.path[:-len(self.page)]else:self.page = self.pathself.path = \"\"self.filename = self.pageself.query = {}self.method = methodif url[] != \"\":self.method = \"\"if is_post_urlparser:self.method = \"\"for param in url[].split(\"\"):key, value = \"\", \"\"if param.find(\"\") >= :try: (key, value) = param.split(\"\")except:key = paramelse:key = paramif key != \"\":self.query[key] = valueself.anchor = url[]", "docstring": "Splits an url string into different parts.\n\n The parts are:\n protocol, domain, login, username, password, port, path, page, query, anchor.\n\n The method defaults to get when the url has a query part.\n Setting it to post will submit the query by POST\n when opening the url.", "id": "f11604:c6:m0"} {"signature": "def __str__(self):", "body": "url = \"\"if self.protocol != \"\" : url += self.protocol + \"\"if self.username != \"\" : url += self.username + \"\" + self.password + \"\"if self.domain != \"\" : url += self.domainif self.port != \"\" : url += \"\" + self.portif self.path != \"\" : url += self.pathif self.page != \"\" : url += self.pageif self.method == \"\" andlen(self.query) > : url += \"\" + urllib.parse.urlencode(self.query)if self.anchor != \"\" : url += \"\" + self.anchorreturn url", "docstring": "Reforms a url string from the different parts.", "id": "f11604:c6:m1"} {"signature": "def __init__(self, url, wait=, asynchronous=False, cache=None, type=\"\", throttle=):", "body": "self.url = urlself.data = Noneself.redirect = Noneself.error = Noneif cache != None:self.cached = Trueself._cache = Cache(cache, type)else:self.cached = Falseself._cache = Noneself._domain = URLParser(self.url).domainself._throttle = throttleglobal urlaccumulator_throttleif not self._domain in urlaccumulator_throttle:urlaccumulator_throttle[self._domain] = time.time() - self._throttleself._start = time.time()self._wait = waitself._busy = Trueself._loaded = False_thread.start_new_thread(self._retrieve, (self.url,))if not asynchronous:while not self._done():time.sleep()", "docstring": "Creates a threaded connection to a url and reads data.\n\n URLAccumulator can run asynchronously which is useful for animations.\n The done property is set to True when downloading is complete.\n The error attribute contains a URLError exception when no data is found.\n\n URLAccumulator data can be cached.\n Downloads that resulted in an error will write an empty file to the cache,\n the data property will be an empty string but no error is logged\n when the data is read from the cache in later calls.\n\n URLAccumulator can be throttled.\n This ensures only a certain amount of requests to a domain\n will happen in a given period of time.\n\n URLAccumulator data is loaded.\n It has a load() method that is called once when done.", "id": "f11604:c7:m0"} {"signature": "def load(self, data):", "body": "pass", "docstring": "Override this method in subclasses to process downloaded data.", "id": "f11604:c7:m4"} {"signature": "def graph_background(s):", "body": "if s.background == None:s._ctx.background(None)else:s._ctx.background(s.background)if s.depth:try:clr = colors.color(s.background).darker()p = s._ctx.rect(, , s._ctx.WIDTH, s._ctx.HEIGHT, draw=False)colors.gradientfill(p, clr, clr.lighter())colors.shadow(dx=, dy=, blur=, alpha=, clr=s.background)except:pass", "docstring": "Graph background color.", "id": "f11607:m0"} {"signature": "def graph_traffic(s, node, alpha=):", "body": "r = node.__class__(None).rr += (node.weight+) * r * s._ctx.nostroke()if s.traffic:s._ctx.fill(s.traffic.r,s.traffic.g,s.traffic.b,s.traffic.a * alpha)s._ctx.oval(node.x-r, node.y-r, r*, r*)", "docstring": "Visualization of traffic-intensive nodes (based on their centrality).", "id": "f11607:m1"} {"signature": "def node(s, node, alpha=):", "body": "if s.depth:try:colors.shadow(dx=, dy=, blur=, alpha=*alpha)except:passs._ctx.nofill()s._ctx.nostroke()if s.fill:s._ctx.fill(s.fill.r,s.fill.g,s.fill.b,s.fill.a * alpha)if s.stroke:s._ctx.strokewidth(s.strokewidth)s._ctx.stroke(s.stroke.r,s.stroke.g,s.stroke.b,s.stroke.a * alpha * )r = node.rs._ctx.oval(node.x-r, node.y-r, r*, r*)", "docstring": "Visualization of a default node.", "id": "f11607:m2"} {"signature": "def node_label(s, node, alpha=):", "body": "if s.text:s._ctx.font(s.font)s._ctx.fontsize(s.fontsize)s._ctx.nostroke()s._ctx.fill(s.text.r,s.text.g,s.text.b,s.text.a * alpha)try:p = node._textpathexcept:txt = node.labeltry:txt = str(txt)except:try:txt = txt.decode(\"\")except:passdx, dy = , if s.align == : dx = -s._ctx.textwidth(txt, s.textwidth) / dy = s._ctx.textheight(txt) / node._textpath = s._ctx.textpath(txt, dx, dy, width=s.textwidth)p = node._textpathif s.depth:try:__colors.shadow(dx=, dy=, blur=, alpha=*alpha)except:passs._ctx.push()s._ctx.translate(node.x, node.y)s._ctx.scale(alpha)s._ctx.drawpath(p.copy())s._ctx.pop()", "docstring": "Visualization of a node's id.", "id": "f11607:m3"} {"signature": "def edges(s, edges, alpha=, weighted=False, directed=False):", "body": "p = s._ctx.BezierPath()if directed and s.stroke:pd = s._ctx.BezierPath()if weighted and s.fill:pw = [s._ctx.BezierPath() for i in range()]if len(edges) == :returnfor e in edges:try:s2 = e.node1.graph.styles[e.node1.style]except:s2 = sif s2.edge:s2.edge(s2, p, e, alpha)if directed and s.stroke:s2.edge_arrow(s2, pd, e, radius=)if weighted and s.fill:s2.edge(s2, pw[int(e.weight*)], e, alpha)s._ctx.autoclosepath(False)s._ctx.nofill()s._ctx.nostroke()if weighted and s.fill:r = e.node1.__class__(None).rs._ctx.stroke(s.fill.r,s.fill.g,s.fill.b,s.fill.a * * alpha)for w in range(, len(pw)):s._ctx.strokewidth(r*w*)s._ctx.drawpath(pw[w].copy())if s.stroke:s._ctx.strokewidth(s.strokewidth)s._ctx.stroke(s.stroke.r,s.stroke.g,s.stroke.b,s.stroke.a * * alpha)s._ctx.drawpath(p.copy())if directed and s.stroke:clr = s._ctx.color(s.stroke.r,s.stroke.g,s.stroke.b,s.stroke.a * * alpha)clr.a *= s._ctx.stroke(clr)s._ctx.drawpath(pd.copy())for e in edges:try:s2 = self.styles[e.node1.style]except:s2 = sif s2.edge_label:s2.edge_label(s2, e, alpha)", "docstring": "Visualization of the edges in a network.", "id": "f11607:m4"} {"signature": "def edge(s, path, edge, alpha=):", "body": "path.moveto(edge.node1.x, edge.node1.y)if edge.node2.style == BACK:path.curveto(edge.node1.x,edge.node2.y,edge.node2.x,edge.node2.y,edge.node2.x,edge.node2.y,)else:path.lineto(edge.node2.x,edge.node2.y)", "docstring": "Visualization of a single edge between two nodes.", "id": "f11607:m5"} {"signature": "def edge_label(s, edge, alpha=):", "body": "if s.text and edge.label != \"\":s._ctx.nostroke()s._ctx.fill(s.text.r,s.text.g,s.text.b,s.text.a * alpha*)s._ctx.lineheight()s._ctx.font(s.font)s._ctx.fontsize(s.fontsize*)try:p = edge._textpathexcept:try:txt = str(edge.label)except:try:txt = edge.label.decode(\"\")except:passedge._textpath = s._ctx.textpath(txt, s._ctx.textwidth(\"\"), , width=s.textwidth)p = edge._textpatha = degrees(atan2(edge.node2.y-edge.node1.y,edge.node2.x-edge.node1.x))d = sqrt((edge.node2.x-edge.node1.x)** +(edge.node2.y-edge.node1.y)**)d = abs(d-s._ctx.textwidth(edge.label)) * s._ctx.push()s._ctx.transform(CORNER)s._ctx.translate(edge.node1.x, edge.node1.y)s._ctx.rotate(-a)s._ctx.translate(d, s.fontsize*)s._ctx.scale(alpha)if < a % < :s._ctx.translate(s._ctx.textwidth(edge.label), -s.fontsize*)s._ctx.transform(CENTER)s._ctx.rotate()s._ctx.transform(CORNER)s._ctx.drawpath(p.copy())s._ctx.pop()", "docstring": "Visualization of the label accompanying an edge.", "id": "f11607:m7"} {"signature": "def path(s, graph, path):", "body": "def end(n):r = n.r * s._ctx.oval(n.x-r, n.y-r, r*, r*)if path and len(path) > and s.stroke:s._ctx.nofill()s._ctx.stroke(s.stroke.r,s.stroke.g,s.stroke.b,s.stroke.a)if s.name != DEFAULT:s._ctx.strokewidth(s.strokewidth)else:s._ctx.strokewidth(s.strokewidth*)first = Truefor id in path:n = graph[id]if first:first = Falses._ctx.beginpath(n.x, n.y)end(n)else:s._ctx.lineto(n.x, n.y)s._ctx.endpath()end(n)", "docstring": "Visualization of a shortest path between two nodes.", "id": "f11607:m8"} {"signature": "def create(self, stylename, **kwargs):", "body": "if stylename == \"\":self[stylename] = style(stylename, self._ctx, **kwargs)return self[stylename]k = kwargs.get(\"\", \"\")s = self[stylename] = self[k].copy(stylename)for attr in kwargs:if attr in s.__dict__:s.__dict__[attr] = kwargs[attr]return s", "docstring": "Creates a new style which inherits from the default style,\n or any other style which name is supplied to the optional template parameter.", "id": "f11607:c0:m2"} {"signature": "def __getattr__(self, a):", "body": "if a in self:return self[a]raise AttributeError(\"\"+a+\"\")", "docstring": "Keys in the dictionaries are accessible as attributes.", "id": "f11607:c0:m4"} {"signature": "def __setattr__(self, a, v):", "body": "if a == \"\":self.__dict__[\"\"] = velif len(self) > and a in list(self.values())[].__dict__:for style in list(self.values()):style.__dict__[a] = velse:raise AttributeError(\"\"+a+\"\")", "docstring": "Setting an attribute is like setting it in all of the contained styles.", "id": "f11607:c0:m5"} {"signature": "def copy(self, graph):", "body": "s = styles(graph)s.guide = self.guide.copy(graph)dict.__init__(s, [(v.name, v.copy()) for v in list(self.values())])return s", "docstring": "Returns a copy of all styles and a copy of the styleguide.", "id": "f11607:c0:m6"} {"signature": "def append(self, stylename, function):", "body": "self[stylename] = function", "docstring": "The name of a style and a function that takes a graph and a node.\n It returns True when the style should be applied to the given node.", "id": "f11607:c1:m1"} {"signature": "def apply(self):", "body": "sorted = self.order + list(self.keys())unique = [][unique.append(x) for x in sorted if x not in unique]for node in self.graph.nodes:for s in unique:if s in self and self[s](self.graph, node):node.style = s", "docstring": "Check the rules for each node in the graph and apply the style.", "id": "f11607:c1:m3"} {"signature": "def copy(self, graph):", "body": "g = styleguide(graph)g.order = self.orderdict.__init__(g, [(k, v) for k, v in self.items()])return g", "docstring": "Returns a copy of the styleguide for the given graph.", "id": "f11607:c1:m4"} {"signature": "def __init__(self, name, _ctx, **kwargs):", "body": "self.name = nameself._ctx = _ctxif not _ctx:returnself.background = _ctx.color(, , , )self.traffic = _ctx.color(, , , )self.fill = _ctx.color(, , , )self.stroke = _ctx.color(, , , )self.strokewidth = self.text = _ctx.color(, , , )self.font = \"\"self.fontsize = self.textwidth = self.align = self.depth = Trueself.graph_background = graph_backgroundself.graph_traffic = graph_trafficself.node = nodeself.node_label = node_labelself.edges = edgesself.edge = edgeself.edge_arrow = edge_arrowself.edge_label = edge_labelself.path = pathfor attr in kwargs:if attr in self.__dict__:self.__dict__[attr] = kwargs[attr]if self.depth:try:global colorscolors = _ctx.ximport(\"\")except:self.depth = False", "docstring": "Graph styling. \n The default style is used for edges.\n When text is set to None, no id label is displayed.", "id": "f11607:c2:m0"} {"signature": "def create(iterations=, distance=, layout=LAYOUT_SPRING, depth=True):", "body": "_ctx.colormode(_ctx.RGB)g = graph(iterations, distance, layout)s = style.styleg.styles.append(s(style.LIGHT , _ctx, fill = _ctx.color(, , , )))g.styles.append(s(style.DARK , _ctx, fill = _ctx.color(, , , )))g.styles.append(s(style.BACK , _ctx, fill = _ctx.color(, , , )))g.styles.append(s(style.IMPORTANT, _ctx, fill = _ctx.color(, , , )))g.styles.append(s(style.HIGHLIGHT, _ctx, stroke = _ctx.color(, , ), strokewidth=))g.styles.append(s(style.MARKED , _ctx))g.styles.append(s(style.ROOT , _ctx, text = _ctx.color(, , , ), stroke = _ctx.color(, , , ),strokewidth = , fontsize = , textwidth = ))def important_node(s, node, alpha=):style.style(None, _ctx).node(s, node, alpha)r = node.r * _ctx.nofill()_ctx.oval(node.x-r, node.y-r, r*, r*) def marked_node(s, node, alpha=):style.style(None, _ctx).node(s, node, alpha)r = node.r * _ctx.fill(s.stroke)_ctx.oval(node.x-r, node.y-r, r*, r*)g.styles.important.node = important_nodeg.styles.marked.node = marked_node g.styles.depth = depthg.styles.guide.append(style.LIGHT , lambda graph, node: graph.root in node.links)g.styles.guide.append(style.DARK , lambda graph, node: len(node.links) > )g.styles.guide.append(style.IMPORTANT , lambda graph, node: node.weight > )g.styles.guide.append(style.ROOT , lambda graph, node: node == graph.root)g.styles.guide.append(style.BACK , lambda graph, node: node == graph.events.clicked)def balance(graph, node): node.r = node.r* + node.r*node.weight*g.styles.guide.append(\"\", balance)def cluster(graph, node):if len(node.links) == : node.links.edge(node.links[]).length *= g.styles.guide.append(\"\", cluster)g.styles.guide.order = [style.LIGHT, style.DARK, style.IMPORTANT, style.ROOT, style.BACK, \"\", \"\"]return g", "docstring": "Returns a new graph with predefined styling.", "id": "f11608:m0"} {"signature": "def __init__(self, graph, id=\"\", radius=, style=style.DEFAULT, category=\"\", label=None,properties={}):", "body": "self.graph = graphself.id = idself.category = categoryself.label = label or self.idself.links = links()self.vx = self.vy = self.force = layout.Point(, )self.r = radiusself.style = styleself._visited = Falseself._betweenness = Noneself._eigenvalue = Nonefor k, v in list(properties.items()):if not k in self.__dict__:self.__dict__[k] = v", "docstring": "A node with a unique id in the graph.\n Its position is calculated by graph.layout.\n The node's radius and style define how it looks onscreen.", "id": "f11608:c0:m0"} {"signature": "def can_reach(self, node, traversable=lambda node, edge: True):", "body": "if isinstance(node, str):node = self.graph[node]for n in self.graph.nodes:n._visited = Falsereturn proximity.depth_first_search(self,visit=lambda n: node == n,traversable=traversable)", "docstring": "Returns True if given node can be reached over traversable edges.\n To enforce edge direction, use a node==edge.node1 traversable.", "id": "f11608:c0:m3"} {"signature": "def __contains__(self, pt):", "body": "if abs(self.graph.x+self.x-pt.x) < self.r* andabs(self.graph.y+self.y-pt.y) < self.r*:return Trueelse:return False", "docstring": "True if pt.x, pt.y is inside the node's absolute position.", "id": "f11608:c0:m8"} {"signature": "def copy(self, empty=False):", "body": "g = graph(self.layout.n, self.distance, self.layout.type)g.layout = self.layout.copy(g)g.styles = self.styles.copy(g)g.events = self.events.copy(g)if not empty:for n in self.nodes:g.add_node(n.id, n.r, n.style, n.category, n.label, (n == self.root), n.__dict__)for e in self.edges:g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)return g", "docstring": "Create a copy of the graph (by default with nodes and edges).", "id": "f11608:c3:m3"} {"signature": "def clear(self):", "body": "dict.clear(self)self.nodes = []self.edges = []self.root = Noneself.layout.i = self.alpha = ", "docstring": "Remove nodes and edges and reset the layout.", "id": "f11608:c3:m4"} {"signature": "def add_node(self, id, radius=, style=style.DEFAULT, category=\"\", label=None, root=False,properties={}):", "body": "if id in self: return self[id]if not isinstance(style, str) and style.__dict__.has_key[\"\"]:style = style.namen = node(self, id, radius, style, category, label, properties)self[n.id] = nself.nodes.append(n)if root: self.root = nreturn n", "docstring": "Add node from id and return the node object.", "id": "f11608:c3:m5"} {"signature": "def add_nodes(self, nodes):", "body": "try: [self.add_node(n) for n in nodes]except:pass", "docstring": "Add nodes from a list of id's.", "id": "f11608:c3:m6"} {"signature": "def add_edge(self, id1, id2, weight=, length=, label=\"\", properties={}):", "body": "if id1 == id2: return Noneif id1 not in self: self.add_node(id1)if id2 not in self: self.add_node(id2)n1 = self[id1]n2 = self[id2]if n1 in n2.links:if n2.links.edge(n1).node1 == n1:return self.edge(id1, id2)weight = max(, min(weight, ))e = edge(n1, n2, weight, length, label, properties)self.edges.append(e) n1.links.append(n2, e)n2.links.append(n1, e)return e", "docstring": "Add weighted (0.0-1.0) edge between nodes, creating them if necessary.\n The weight represents the importance of the connection (not the cost).", "id": "f11608:c3:m7"} {"signature": "def remove_node(self, id):", "body": "if id in self:n = self[id]self.nodes.remove(n)del self[id]for e in list(self.edges):if n in (e.node1, e.node2):if n in e.node1.links: e.node1.links.remove(n)if n in e.node2.links: e.node2.links.remove(n)self.edges.remove(e)", "docstring": "Remove node with given id.", "id": "f11608:c3:m8"} {"signature": "def remove_edge(self, id1, id2):", "body": "for e in list(self.edges):if id1 in (e.node1.id, e.node2.id) andid2 in (e.node1.id, e.node2.id):e.node1.links.remove(e.node2)e.node2.links.remove(e.node1)self.edges.remove(e)", "docstring": "Remove edges between nodes with given id's.", "id": "f11608:c3:m9"} {"signature": "def node(self, id):", "body": "if id in self:return self[id]return None", "docstring": "Returns the node in the graph associated with the given id.", "id": "f11608:c3:m10"} {"signature": "def edge(self, id1, id2):", "body": "if id1 in self andid2 in self andself[id2] in self[id1].links:return self[id1].links.edge(id2)return None", "docstring": "Returns the edge between the nodes with given id1 and id2.", "id": "f11608:c3:m11"} {"signature": "def __getattr__(self, a):", "body": "if a in self: return self[a]raise AttributeError(\"\"+str(a)+\"\")", "docstring": "Returns the node in the graph associated with the given id.", "id": "f11608:c3:m12"} {"signature": "def update(self, iterations=):", "body": "self.alpha += self.alpha = min(self.alpha, )if self.layout.i == :self.layout.prepare()self.layout.i += elif self.layout.i == :self.layout.iterate()elif self.layout.i < self.layout.n:n = min(iterations, self.layout.i / + )for i in range(n): self.layout.iterate()min_, max = self.layout.boundsself.x = _ctx.WIDTH - max.x*self.d - min_.x*self.dself.y = _ctx.HEIGHT - max.y*self.d - min_.y*self.dself.x /= self.y /= return not self.layout.done", "docstring": "Iterates the graph layout and updates node positions.", "id": "f11608:c3:m13"} {"signature": "def solve(self):", "body": "self.layout.solve()self.alpha = ", "docstring": "Iterates the graph layout until done.", "id": "f11608:c3:m14"} {"signature": "def offset(self, node):", "body": "x = self.x + node.x - _ctx.WIDTH/y = self.y + node.y - _ctx.HEIGHT/return x, y", "docstring": "Returns the distance from the center to the given node.", "id": "f11608:c3:m16"} {"signature": "def draw(self, dx=, dy=, weighted=False, directed=False, highlight=[], traffic=None):", "body": "self.update()s = self.styles.defaults.graph_background(s)_ctx.push()_ctx.translate(self.x+dx, self.y+dy)if traffic:if isinstance(traffic, bool): traffic = for n in self.nodes_by_betweenness()[:traffic]:try: s = self.styles[n.style]except: s = self.styles.defaultif s.graph_traffic:s.graph_traffic(s, n, self.alpha) s = self.styles.defaultif s.edges:s.edges(s, self.edges, self.alpha, weighted, directed)for n in self.nodes:try: s = self.styles[n.style]except: s = self.styles.defaultif s.node:s.node(s, n, self.alpha)try: s = self.styles.highlightexcept: s = self.styles.defaultif s.path:s.path(s, self, highlight)for n in self.nodes:try: s = self.styles[n.style]except: s = self.styles.defaultif s.node_label:s.node_label(s, n, self.alpha)_ctx.pop()", "docstring": "Layout the graph incrementally.\n\n The graph is drawn at the center of the canvas.\n The weighted and directed parameters visualize edge weight and direction.\n The highlight specifies list of connected nodes. \n The path will be colored according to the \"highlight\" style.\n Clicking and dragging events are monitored.", "id": "f11608:c3:m17"} {"signature": "def prune(self, depth=):", "body": "for n in list(self.nodes):if len(n.links) <= depth:self.remove_node(n.id)", "docstring": "Removes all nodes with less or equal links than depth.", "id": "f11608:c3:m18"} {"signature": "def betweenness_centrality(self, normalized=True):", "body": "bc = proximity.brandes_betweenness_centrality(self, normalized)for id, w in bc.items(): self[id]._betweenness = wreturn bc", "docstring": "Calculates betweenness centrality and returns an node id -> weight dictionary.\n Node betweenness weights are updated in the process.", "id": "f11608:c3:m20"} {"signature": "def eigenvector_centrality(self, normalized=True, reversed=True, rating={},start=None, iterations=, tolerance=):", "body": "ec = proximity.eigenvector_centrality(self, normalized, reversed, rating, start, iterations, tolerance)for id, w in ec.items(): self[id]._eigenvalue = wreturn ec", "docstring": "Calculates eigenvector centrality and returns an node id -> weight dictionary.\n Node eigenvalue weights are updated in the process.", "id": "f11608:c3:m21"} {"signature": "def nodes_by_betweenness(self, treshold=):", "body": "nodes = [(n.betweenness, n) for n in self.nodes if n.betweenness > treshold]nodes.sort(); nodes.reverse()return [n for w, n in nodes]", "docstring": "Returns nodes sorted by betweenness centrality.\n Nodes with a lot of passing traffic will be at the front of the list.", "id": "f11608:c3:m22"} {"signature": "def nodes_by_eigenvalue(self, treshold=):", "body": "nodes = [(n.eigenvalue, n) for n in self.nodes if n.eigenvalue > treshold]nodes.sort(); nodes.reverse()return [n for w, n in nodes]", "docstring": "Returns nodes sorted by eigenvector centrality.\n Nodes with a lot of incoming traffic will be at the front of the list", "id": "f11608:c3:m23"} {"signature": "def nodes_by_category(self, category):", "body": "return [n for n in self.nodes if n.category == category]", "docstring": "Returns nodes with the given category attribute.", "id": "f11608:c3:m24"} {"signature": "def _leaves(self):", "body": "return [node for node in self.nodes if node.is_leaf]", "docstring": "Returns a list of nodes that have only one connection.", "id": "f11608:c3:m25"} {"signature": "def crown(self, depth=):", "body": "nodes = []for node in self.leaves: nodes += node.flatten(depth-)return cluster.unique(nodes)", "docstring": "Returns a list of leaves, nodes connected to leaves, etc.", "id": "f11608:c3:m26"} {"signature": "def _density(self):", "body": "return *len(self.edges) / (len(self.nodes) * (len(self.nodes)-))", "docstring": "The number of edges in relation to the total number of possible edges.", "id": "f11608:c3:m27"} {"signature": "def load(self, id):", "body": "self.clear()self.add_node(id, root=True)for w, id2 in self.get_links(id):self.add_edge(id, id2, weight=w)if len(self) > self.max: breakfor w, id2, links in self.get_cluster(id):for id3 in links:self.add_edge(id3, id2, weight=w)self.add_edge(id, id3, weight=w)if len(self) > self.max: break if self.event.clicked: g.add_node(self.event.clicked)", "docstring": "Rebuilds the graph around the given node id.", "id": "f11608:c4:m4"} {"signature": "def click(self, node):", "body": "if not self.has_node(node.id): returnif node == self.root: returnself._dx, self._dy = self.offset(node)self.previous = self.root.idself.load(node.id)", "docstring": "Callback from graph.events when a node is clicked.", "id": "f11608:c4:m5"} {"signature": "def depth_first_search(root, visit=lambda node: False, traversable=lambda node, edge: True):", "body": "stop = visit(root)root._visited = Truefor node in root.links:if stop: return Trueif not traversable(root, root.links.edge(node)): continueif not node._visited:stop = depth_first_search(node, visit, traversable)return stop", "docstring": "Simple, multi-purpose depth-first search.\n\n Visits all the nodes connected to the root, depth-first.\n The visit function is called on each node.\n Recursion will stop if it returns True, and ubsequently dfs() will return True.\n The traversable function takes the current node and edge,\n and returns True if we are allowed to follow this connection to the next node.\n For example, the traversable for directed edges is follows:\n lambda node, edge: node == edge.node1\n\n Note: node._visited is expected to be False for all nodes.", "id": "f11609:m0"} {"signature": "def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):", "body": "v = {}for n in graph.nodes:v[n.id] = {}for e in graph.edges:id1 = e.node1.idid2 = e.node2.idif reversed:id1, id2 = id2, id1v[id1][id2] = - e.weight*if heuristic:v[id1][id2] += heuristic(id1, id2)if not directed: v[id2][id1] = v[id1][id2]if stochastic:for id1 in v:d = sum(v[id1].values())for id2 in v[id1]: v[id1][id2] /= dreturn v", "docstring": "An edge weight map indexed by node id's.\n\n A dictionary indexed by node id1's in which each value is a\n dictionary of connected node id2's linking to the edge weight.\n If directed, edges go from id1 to id2, but not the other way.\n If stochastic, all the weights for the neighbors of a given node sum to 1.\n A heuristic can be a function that takes two node id's and returns\n and additional cost for movement between the two nodes.", "id": "f11609:m1"} {"signature": "def brandes_betweenness_centrality(graph, normalized=True):", "body": "G = graph.keys()W = adjacency(graph)betweenness = dict.fromkeys(G, ) for s in G: S = [] P = {} for v in G: P[v] = [] sigma = dict.fromkeys(G, ) D = {} sigma[s] = seen = { s: } Q = [] heapq.heappush(Q, (, s, s)) while Q: (dist, pred, v) = heapq.heappop(Q) if v in D: continue sigma[v] = sigma[v] + sigma[pred] S.append(v) D[v] = seen[v] for w in graph[v].links:w = w.idvw_dist = D[v] + W[v][w]if w not in D and (w not in seen or vw_dist < seen[w]): seen[w] = vw_dist heapq.heappush(Q, (vw_dist, v, w)) P[w] = [v] elif vw_dist == seen[w]: sigma[w] = sigma[w] + sigma[v] P[w].append(v)delta = dict.fromkeys(G,) while S: w = S.pop() for v in P[w]: delta[v] = delta[v] + (float(sigma[v]) / float(sigma[w])) * ( + delta[w]) if w != s: betweenness[w] = betweenness[w] + delta[w]if normalized:m = max(betweenness.values())if m == : m = else:m = betweenness = dict([(id, w/m) for id, w in betweenness.iteritems()])return betweenness", "docstring": "Betweenness centrality for nodes in the graph.\n\n Betweenness centrality is a measure of the number of shortests paths that pass through a node.\n Nodes in high-density areas will get a good score.\n\n The algorithm is Brandes' betweenness centrality,\n from NetworkX 0.35.1: Aric Hagberg, Dan Schult and Pieter Swart,\n based on Dijkstra's algorithm for shortest paths modified from Eppstein.\n https://networkx.lanl.gov/wiki", "id": "f11609:m3"} {"signature": "def eigenvector_centrality(graph, normalized=True, reversed=True, rating={},start=None, iterations=, tolerance=):", "body": "G = graph.keys() W = adjacency (graph, directed=True, reversed=reversed)def _normalize(x):s = sum(x.values())if s != : s = / sfor k in x: x[k] *= sx = startif x is None:x = dict([(n, random()) for n in G])_normalize(x)for i in range(iterations):x0 = xx = dict.fromkeys(x0.keys(), )for n in x:for nbr in W[n]:r = if rating.has_key(n): r = rating[n]x[n] += + x0[nbr] * W[n][nbr] * r_normalize(x) e = sum([abs(x[n]-x0[n]) for n in x])if e < len(graph.nodes) * tolerance:if normalized:m = max(x.values())if m == : m = x = dict([(id, w/m) for id, w in x.iteritems()])return xwarn(\"\", Warning)return dict([(n, ) for n in G])", "docstring": "Eigenvector centrality for nodes in the graph (like Google's PageRank).\n\n Eigenvector centrality is a measure of the importance of a node in a directed network. \n It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.\n Nodes with no incoming connections have a score of zero.\n If you want to measure outgoing connections, reversed should be False.\n\n The eigenvector calculation is done by the power iteration method.\n It has no guarantee of convergence.\n A starting vector for the power iteration can be given in the start dict.\n\n You can adjust the importance of a node with the rating dictionary,\n which links node id's to a score.\n\n The algorithm is adapted from NetworkX, Aric Hagberg (hagberg@lanl.gov):\n https://networkx.lanl.gov/attachment/ticket/119/eigenvector_centrality.py", "id": "f11609:m4"} {"signature": "def copy(self, graph):", "body": "e = events(graph, self._ctx)e.clicked = self.clickedreturn e", "docstring": "Returns a copy of the event handler, remembering the last node clicked.", "id": "f11610:c1:m1"} {"signature": "def update(self):", "body": "if self.mousedown:if not self.pressed and not self.dragged:for n in self.graph.nodes:if self.mouse in n:self.pressed = nbreakelif self.pressed and not self.mouse in self.pressed:self.dragged = self.pressedself.pressed = Noneelif self.dragged and self.graph.layout.type == \"\":self.drag(self.dragged)self.graph.layout.i = min(, max(, self.graph.layout.n-))elif self.pressed and self.mouse in self.pressed:self.clicked = self.pressedself.pressed = Noneself.graph.layout.i = self.click(self.clicked)else:self.hovered = Noneself.pressed = Noneself.dragged = Nonefor n in self.graph.nodes:if self.mouse in n:self.hovered = nself.hover(n)break", "docstring": "Interacts with the graph by clicking or dragging nodes.\n Hovering a node fires the callback function events.hover().\n Clicking a node fires the callback function events.click().", "id": "f11610:c1:m4"} {"signature": "def drag(self, node):", "body": "dx = self.mouse.x - self.graph.xdy = self.mouse.y - self.graph.ys = self.graph.styles.defaultself._ctx.nofill()self._ctx.nostroke()if s.stroke: self._ctx.strokewidth(s.strokewidth)self._ctx.stroke(s.stroke.r, s.stroke.g, s.stroke.g, )p = self._ctx.line(node.x, node.y, dx, dy, draw=False)try: p._nsBezierPath.setLineDash_count_phase_([,], , )except:passself._ctx.drawpath(p)r = node.__class__(None).r * self._ctx.oval(dx-r/, dy-r/, r, r)node.vx = dx / self.graph.dnode.vy = dy / self.graph.d", "docstring": "Drags given node to mouse location.", "id": "f11610:c1:m5"} {"signature": "def hover(self, node):", "body": "if self.popup == False: returnif self.popup == True or self.popup.node != node:if self.popup_text.has_key(node.id):texts = self.popup_text[node.id]else:texts = Noneself.popup = popup(self._ctx, node, texts)self.popup.draw()", "docstring": "Displays a popup when hovering over a node.", "id": "f11610:c1:m6"} {"signature": "def textpath(self, i):", "body": "if len(self._textpaths) == i:self._ctx.font(self.font, self.fontsize)txt = self.q[i]if len(self.q) > :txt += \"\"+str(i+)+\"\" + str(len(self.q))+\"\"p = self._ctx.textpath(txt, , , width=self._w)h = self._ctx.textheight(txt, width=self._w)self._textpaths.append((p, h))return self._textpaths[i]", "docstring": "Returns a cached textpath of the given text in queue.", "id": "f11610:c2:m1"} {"signature": "def update(self):", "body": "if self.delay > :self.delay -= ; returnif self.fi == :if len(self.q) == : self.fn = float(\"\")else:self.fn = len(self.q[self.i]) / self.speedself.fn = max(self.fn, self.mf) self.fi += if self.fi > self.fn:self.fi = self.i = (self.i+) % len(self.q)", "docstring": "Rotates the queued texts and determines display time.", "id": "f11610:c2:m2"} {"signature": "def draw(self):", "body": "if len(self.q) > :self.update()if self.delay == :p, h = self.textpath(self.i)f = self.fontsizeself._ctx.fill(self.background)self._ctx.rect(self.node.x + f*, self.node.y + f*, self._w + f, h + f*, roundness=)alpha = if self.fi < : alpha = * self.fiif self.fn-self.fi < : alpha = * (self.fn-self.fi)self._ctx.fill(self.text.r,self.text.g,self.text.b,self.text.a * alpha)self._ctx.translate(self.node.x + f*, self.node.y + f*)self._ctx.drawpath(p)", "docstring": "Draws a popup rectangle with a rotating text queue.", "id": "f11610:c2:m3"} {"signature": "def copy(self, graph):", "body": "l = self.__class__(graph, self.n)l.i = return l", "docstring": "Returns a copy of the layout for the given graph.", "id": "f11611:c1:m1"} {"signature": "def sorted(list, cmp=None, reversed=False):", "body": "list = [x for x in list]list.sort(cmp)if reversed: list.reverse()return list", "docstring": "Returns a sorted copy of the list.", "id": "f11612:m0"} {"signature": "def unique(list):", "body": "unique = []; [unique.append(x) for x in list if x not in unique]return unique", "docstring": "Returns a copy of the list without duplicates.", "id": "f11612:m1"} {"signature": "def flatten(node, distance=):", "body": "if hasattr(node, \"\") and hasattr(node, \"\"):return [n.id for n in node.nodes]all = [node]if distance >= :for n in node.links: all += n.flatten(distance-)return unique(all)", "docstring": "Recursively lists the node and its links.\n\n Distance of 0 will return the given [node].\n Distance of 1 will return a list of the node and all its links.\n Distance of 2 will also include the linked nodes' links, etc.", "id": "f11612:m2"} {"signature": "def intersection(a, b):", "body": "return filter(lambda x: x in a, b)", "docstring": "Returns the intersection of lists.\n a & b -> elements that appear in a as well as in b.", "id": "f11612:m3"} {"signature": "def union(a, b):", "body": "return a + filter(lambda x: x not in a, b)", "docstring": "Returns the union of lists.\n a | b -> all elements from a and all the elements from b.", "id": "f11612:m4"} {"signature": "def difference(a, b):", "body": "return filter(lambda x: x not in b, a)", "docstring": "Returns the difference of lists.\n a - b -> elements that appear in a but not in b.", "id": "f11612:m5"} {"signature": "def subgraph(graph, id, distance=):", "body": "g = graph.copy(empty=True)if isinstance(id, (FunctionType, LambdaType)):id = [node.id for node in filter(id, graph.nodes)]if not isinstance(id, (list, tuple)):id = [id]for id in id:for n in flatten(graph[id], distance):g.add_node(n.id, n.r, n.style, n.category, n.label, (n==graph.root), n.__dict__)for e in graph.edges:if g.has_key(e.node1.id) andg.has_key(e.node2.id):g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)return g", "docstring": "Creates the subgraph of the flattened node with given id (or list of id's).\n Finds all the edges between the nodes that make up the subgraph.", "id": "f11612:m6"} {"signature": "def is_clique(graph):", "body": "if graph.density < : return Falsereturn True", "docstring": "A clique is a set of nodes in which each node is connected to all other nodes.", "id": "f11612:m7"} {"signature": "def clique(graph, id):", "body": "clique = [id]for n in graph.nodes:friend = Truefor id in clique:if n.id == id or graph.edge(n.id, id) == None:friend = Falsebreakif friend:clique.append(n.id)return clique", "docstring": "Returns the largest possible clique for the node with given id.", "id": "f11612:m8"} {"signature": "def cliques(graph, threshold=):", "body": "cliques = []for n in graph.nodes:c = clique(graph, n.id)if len(c) >= threshold: c.sort()if c not in cliques:cliques.append(c)return cliques", "docstring": "Returns all the cliques in the graph of at least the given size.", "id": "f11612:m9"} {"signature": "def partition(graph):", "body": "g = []for n in graph.nodes:c = [n.id for n in flatten(n)]f = Falsefor i in range(len(g)):if len(intersection(g[i], c)) > :g[i] = union(g[i], c)f = Truebreakif not f:g.append(c)merged = []for i in range(len(g)):merged.append(g[i])for j in range(i+, len(g)):if len(intersection(g[i], g[j])) > :merged[-].extend(g[j])g[j] = []g = mergedg = [graph.sub(g, distance=) for g in g]g.sort(lambda a, b: len(b) - len(a)) return g", "docstring": "Splits unconnected subgraphs.\n\n For each node in the graph, make a list of its id and all directly connected id's.\n If one of the nodes in this list intersects with a subgraph,\n they are all part of that subgraph.\n Otherwise, this list is part of a new subgraph.\n Return a list of subgraphs sorted by size (biggest-first).", "id": "f11612:m10"} {"signature": "def requirements(debug=True, with_examples=True, with_pgi=None):", "body": "reqs = list(BASE_REQUIREMENTS)if with_pgi is None:with_pgi = is_jythonif debug:print(\"\")print(\"\", \"\" if with_pgi else \"\")print(\"\", \"\" if with_examples else \"\")if with_pgi:reqs.append(\"\")if debug:print(\"\")else:reqs.append(PYGOBJECT)if with_examples:reqs.extend(EXAMPLE_REQUIREMENTS)if debug:print(\"\")print(\"\")for req in reqs:print(req)return reqs", "docstring": "Build requirements based on flags\n\n:param with_pgi: Use 'pgi' instead of 'gi' - False on CPython, True elsewhere\n:param with_examples:\n:return:", "id": "f11613:m0"} {"signature": "@classmethoddef cast(cls, fx_spot, domestic_curve=None, foreign_curve=None):", "body": "assert domestic_curve.origin == foreign_curve.originreturn cls(fx_spot, domestic_curve=domestic_curve, foreign_curve=foreign_curve)", "docstring": "creator method to build FxCurve\n\n:param float fx_spot: fx spot rate\n:param RateCurve domestic_curve: domestic discount curve\n:param RateCurve foreign_curve: foreign discount curve\n:return:", "id": "f11614:c0:m0"} {"signature": "def __init__(self, currency, domestic_curve):", "body": "super(FxContainer, self).__init__()self.currency = currencyself.domestic_curve = domestic_curveself.add(currency, domestic_curve)", "docstring": ":param currency: base currency of FxContainer\n:param RateCurve domestic_curve: base curve of FxContainer for discounting", "id": "f11614:c1:m0"} {"signature": "def add(self, foreign_currency, foreign_curve=None, fx_spot=):", "body": "assert isinstance(foreign_currency, type(self.currency))assert isinstance(foreign_curve, curve.RateCurve)assert isinstance(fx_spot, float)self[self.currency, foreign_currency] = FxCurve.cast(fx_spot, self.domestic_curve, foreign_curve)self[foreign_currency, self.currency] = FxCurve.cast( / fx_spot, foreign_curve, self.domestic_curve)f = foreign_currencynew = dict()for d, s in self:if s is self.currency and d is not foreign_currency:triangulated = self[d, s](self.domestic_curve.origin) * fx_spotif (d, f) in self:self[d, f].foreign_curve = foreign_curveself[d, f].fx_spot = triangulatedself[f, d].domestic_curve = foreign_curveself[f, d].fx_spot = / triangulatedelse:new[d, f] = FxCurve.cast(triangulated, self[d, s].domestic_curve, foreign_curve)new[f, d] = FxCurve.cast( / triangulated, foreign_curve, self[d, s].domestic_curve)self.update(new)", "docstring": "adds contents to FxShelf.\nIf curve is FxCurve or FxDict, spot should turn curve.currency into self.currency,\nelse spot should turn currency into self.currency by\nN in EUR * spot = N in USD for currency = EUR and self.currency = USD", "id": "f11614:c1:m3"} {"signature": "def update(self, x_list=list(), y_list=list()):", "body": "if not y_list:for x in x_list:if x in self.x_list:i = self.x_list.index(float(x))self.x_list.pop(i)self.y_list.pop(i)else:x_list = map(float, x_list)y_list = map(float, y_list)data = [(x, y) for x, y in zip(self.x_list, self.y_list) if x not in x_list]data.extend(zip(x_list, y_list))data = sorted(data)self.x_list = [float(x) for (x, y) in data]self.y_list = [float(y) for (x, y) in data]", "docstring": "update interpolation data\n:param list(float) x_list: x values\n:param list(float) y_list: y values", "id": "f11615:c0:m3"} {"signature": "def __init__(self, x_list=list(), y_list=list(), boundary_condition=None):", "body": "super(spline, self).__init__(x_list, y_list) self.intervals = list()self.interpolation_coefficients = list()self.boundary_condition = boundary_conditionfor i in range(, len(self.x_list) - ):self.intervals.append([self.x_list[i], self.x_list[i + ]])if self.y_list:self.set_interpolation_coefficients()", "docstring": ":param x_list: data\n:param y_list: data\n:param boundary_condition: Either a tuple (l, r) of values for the slope or None.\n If the argument is not specified then None will be taken as boundary conditions, which\n leads to the so called not-a-knot method for splines. Not-a-knot will determine the boundary conditions by also\n requiring that the third derivatives of the two most left and the two most right interpolation polynomials agree.\n The boundary condition (0,0) will lead to the so called natural spline", "id": "f11615:c9:m0"} {"signature": "def __call__(self, x):", "body": "if not self.y_list:raise (OverflowError, \"\")ival = spline.get_interval(x, self.intervals)i = self.intervals.index(ival)t = (x - ival[]) / (ival[] - ival[])y = self.y_lista = self.interpolation_coefficients[i][]b = self.interpolation_coefficients[i][]return ( - t) * y[i] + t * y[i + ] + t * ( - t) * (a * ( - t) + b * t)", "docstring": "returns the interpolated value for the point x.\n:param x:\n:return:", "id": "f11615:c9:m1"} {"signature": "@staticmethoddef get_interval(x, intervals):", "body": "n = len(intervals)if n < :return intervals[]n2 = n / if x < intervals[n2][]:return spline.get_interval(x, intervals[:n2])else:return spline.get_interval(x, intervals[n2:])", "docstring": "finds interval of the interpolation in which x lies.\n:param x:\n:param intervals: the interpolation intervals\n:return:", "id": "f11615:c9:m2"} {"signature": "def set_interpolation_coefficients(self):", "body": "left_boundary_slope = right_boundary_slope = if isinstance(self.boundary_condition, tuple):left_boundary_slope = self.boundary_condition[]right_boundary_slope = self.boundary_condition[]elif self.boundary_condition is None:passelse:msg = ''''.format(self.boundary_condition, type(self.boundary_condition))raise ValueError(msg)n = len(self.x_list)mat = numpy.zeros((n, n))b = numpy.zeros((n, ))x = self.x_listy = self.y_listif n > :for i in range(, n - ):mat[i, i - ] = / (x[i] - x[i - ])mat[i, i + ] = / (x[i + ] - x[i])mat[i, i] = * (mat[i, i - ] + mat[i, i + ])b[i, ] = * ((y[i] - y[i - ]) / (x[i] - x[i - ]) ** + (y[i + ] - y[i]) / (x[i + ] - x[i]) ** )elif n < :raise ValueError('')if self.boundary_condition is None: mat[, ] = / (x[] - x[]) ** mat[, ] = - / (x[] - x[]) ** mat[, ] = mat[, ] + mat[, ]b[, ] = * ((y[] - y[]) / (x[] - x[]) ** - (y[] - y[]) / (x[] - x[]) ** )mat[n - , n - ] = / (x[n - ] - x[n - ]) ** mat[n - , n - ] = - / (x[n - ] - x[n - ]) ** mat[n - , n - ] = mat[n - , n - ] + mat[n - , n - ]b[n - , ] = * ((y[n - ] - y[n - ]) / (x[n - ] - x[n - ]) ** - (y[n - ] - y[n - ]) / (x[n - ] - x[n - ]) ** )else:mat[, ] = / (x[] - x[])mat[, ] = / (x[] - x[])b[, ] = * (y[] - y[]) / (x[] - x[]) ** - * left_boundary_slopemat[n - , n - ] = / (x[n - ] - x[n - ])mat[n - , n - ] = / (x[n - ] - x[n - ])b[n - , ] = * (y[n - ] - y[n - ]) / (x[n - ] - x[n - ]) ** + * right_boundary_slopek = numpy.linalg.solve(mat, b)for i in range(, n):c1 = k[i - , ] * (x[i] - x[i - ]) - (y[i] - y[i - ])c2 = -k[i, ] * (x[i] - x[i - ]) + (y[i] - y[i - ])self.interpolation_coefficients.append([c1, c2])", "docstring": "computes the coefficients for the single polynomials of the spline.", "id": "f11615:c9:m3"} {"signature": "def __init__(self, x_list=None, y_list=None, y_inter=None):", "body": "if not y_inter:y_inter = interpolation.linear()y_left, y_mid, y_right = interpolation.constant(), interpolation.linear(), interpolation.constant()if isinstance(y_inter, (tuple, list)):if len(y_inter) == :y_left, y_mid, y_right = y_interelif len(y_inter) == :y_mid, y_right = y_intery_left = y_rightelif len(y_inter) == :y_mid = y_inter[]else:raise ValueErrorelif isinstance(y_inter, interpolation.base_interpolation):y_mid = y_interelse:raise AttributeErrorassert len(x_list) == len(y_list)assert len(x_list) == len(set(x_list))self._y_mid = type(y_mid)(x_list, y_list)self._y_right = type(y_right)(x_list, y_list)self._y_left = type(y_left)(x_list, y_list)", "docstring": "r\"\"\"\n Curve object to build function\n\n :param list(float) x_list: source values\n :param list(float) y_list: target values\n :param list(interpolation.interpolation) y_inter: interpolation function on x_list (optional)\n or triple of (left, mid, right) interpolation functions with\n left for x < x_list[0] (as default triple.right is used)\n right for x > x_list][-1] (as default interpolation.constant is used)\n mid else (as default interpolation.linear is used)\n\n Curve object to build function :math:`f:R \\rightarrow R, x \\mapsto y`\n from finite point vectors :math:`x` and :math:`y`\n using piecewise various interpolation functions.", "id": "f11617:c0:m0"} {"signature": "def _frange(start, stop=None, step=None):", "body": "if stop is None:stop = startstart = if step is None:step = r = startwhile r < stop:yield rr += step", "docstring": "_frange range like function for float inputs\n:param start:\n:type start:\n:param stop:\n:type stop:\n:param step:\n:type step:\n:return:\n:rtype:", "id": "f11618:m0"} {"signature": "def __getitem__(self, item):", "body": "amount = super(RateCashFlowList, self).__getitem__(item)if self.forward_curve is None:return self.fixed_rate * amountelse:return (self.fixed_rate + self.forward_curve(item)) * amount", "docstring": "getitem does re-calc float cash flows and does not use store notional values", "id": "f11618:c3:m1"} {"signature": "def __getitem__(self, item):", "body": "super(MultiCashFlowList, self).__getitem__(item)return sum([l[item] for l in self.legs if item in l])", "docstring": "getitem does re-calc float cash flows and does not use store notional values", "id": "f11618:c4:m1"} {"signature": "def interest_accrued(self, valuation_date):", "body": "return sum([l.interest_accrued(valuation_date) for l in self.legs if hasattr(l, '')])", "docstring": "interest_accrued\n:param valuation_date:\n:type valuation_date:\n:return:\n:rtype:", "id": "f11618:c4:m3"} {"signature": "def key_for_name(name):", "body": "return '' % name", "docstring": "Return the key name used to store the given queue name in Redis.", "id": "f11623:m0"} {"signature": "@propertydef key(self):", "body": "return key_for_name(self.name)", "docstring": "Return the key name used to store this queue in Redis.", "id": "f11623:c0:m2"} {"signature": "def clear(self):", "body": "self.__redis.delete(self.key)", "docstring": "Clear the queue of all messages, deleting the Redis key.", "id": "f11623:c0:m3"} {"signature": "def consume(self, **kwargs):", "body": "kwargs.setdefault('', True)try:while True:msg = self.get(**kwargs)if msg is None:breakyield msgexcept KeyboardInterrupt:print; return", "docstring": "Return a generator that yields whenever a message is waiting in the\n queue. Will block otherwise. Example:\n\n >>> for msg in queue.consume(timeout=1):\n ... print msg\n my message\n another message\n\n :param kwargs: any arguments that :meth:`~hotqueue.HotQueue.get` can\n accept (:attr:`block` will default to ``True`` if not given)", "id": "f11623:c0:m4"} {"signature": "def get(self, block=False, timeout=None):", "body": "if block:if timeout is None:timeout = msg = self.__redis.blpop(self.key, timeout=timeout)if msg is not None:msg = msg[]else:msg = self.__redis.lpop(self.key)if msg is not None and self.serializer is not None:msg = self.serializer.loads(msg)return msg", "docstring": "Return a message from the queue. Example:\n\n >>> queue.get()\n 'my message'\n >>> queue.get()\n 'another message'\n\n :param block: whether or not to wait until a msg is available in\n the queue before returning; ``False`` by default\n :param timeout: when using :attr:`block`, if no msg is available\n for :attr:`timeout` in seconds, give up and return ``None``", "id": "f11623:c0:m5"} {"signature": "def put(self, *msgs):", "body": "if self.serializer is not None:msgs = map(self.serializer.dumps, msgs)self.__redis.rpush(self.key, *msgs)", "docstring": "Put one or more messages onto the queue. Example:\n\n >>> queue.put(\"my message\")\n >>> queue.put(\"another message\")\n\n To put messages onto the queue in bulk, which can be significantly\n faster if you have a large number of messages:\n\n >>> queue.put(\"my message\", \"another message\", \"third message\")", "id": "f11623:c0:m6"} {"signature": "def worker(self, *args, **kwargs):", "body": "def decorator(worker):@wraps(worker)def wrapper(*args):for msg in self.consume(**kwargs):worker(*args + (msg,))return wrapperif args:return decorator(*args)return decorator", "docstring": "Decorator for using a function as a queue worker. Example:\n\n >>> @queue.worker(timeout=1)\n ... def printer(msg):\n ... print msg\n >>> printer()\n my message\n another message\n\n You can also use it without passing any keyword arguments:\n\n >>> @queue.worker\n ... def printer(msg):\n ... print msg\n >>> printer()\n my message\n another message\n\n :param kwargs: any arguments that :meth:`~hotqueue.HotQueue.get` can\n accept (:attr:`block` will default to ``True`` if not given)", "id": "f11623:c0:m7"} {"signature": "def setUp(self):", "body": "self.queue = HotQueue('')", "docstring": "Create the queue instance before the test.", "id": "f11625:c1:m0"} {"signature": "def tearDown(self):", "body": "self.queue.clear()", "docstring": "Clear the queue after the test.", "id": "f11625:c1:m1"} {"signature": "def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,to_dir=os.curdir, delay=):", "body": "to_dir = os.path.abspath(to_dir)try:from urllib.request import urlopenexcept ImportError:from urllib2 import urlopentgz_name = \"\" % versionurl = download_base + tgz_namesaveto = os.path.join(to_dir, tgz_name)src = dst = Noneif not os.path.exists(saveto): try:log.warn(\"\", url)src = urlopen(url)data = src.read()dst = open(saveto, \"\")dst.write(data)finally:if src:src.close()if dst:dst.close()return os.path.realpath(saveto)", "docstring": "Download distribute from a specified location and return its filename\n\n `version` should be a valid distribute version number that is available\n as an egg for download under the `download_base` URL (which should end\n with a '/'). `to_dir` is the directory where the egg will be downloaded.\n `delay` is the number of seconds to pause before an actual download\n attempt.", "id": "f11626:m4"} {"signature": "def _patch_file(path, content):", "body": "existing_content = open(path).read()if existing_content == content:log.warn('')return Falselog.warn('')_rename_path(path)f = open(path, '')try:f.write(content)finally:f.close()return True", "docstring": "Will backup the file then patch it", "id": "f11626:m6"} {"signature": "def _extractall(self, path=\"\", members=None):", "body": "import copyimport operatorfrom tarfile import ExtractErrordirectories = []if members is None:members = selffor tarinfo in members:if tarinfo.isdir():directories.append(tarinfo)tarinfo = copy.copy(tarinfo)tarinfo.mode = self.extract(tarinfo, path)if sys.version_info < (, ):def sorter(dir1, dir2):return cmp(dir1.name, dir2.name)directories.sort(sorter)directories.reverse()else:directories.sort(key=operator.attrgetter(''), reverse=True)for tarinfo in directories:dirpath = os.path.join(path, tarinfo.name)try:self.chown(tarinfo, dirpath)self.utime(tarinfo, dirpath)self.chmod(tarinfo, dirpath)except ExtractError:e = sys.exc_info()[]if self.errorlevel > :raiseelse:self._dbg(, \"\" % e)", "docstring": "Extract all members from the archive to the current working\n directory and set owner, modification time and permissions on\n directories afterwards. `path' specifies a different directory\n to extract to. `members' is optional and must be a subset of the\n list returned by getmembers().", "id": "f11626:m17"} {"signature": "def main(argv, version=DEFAULT_VERSION):", "body": "tarball = download_setuptools()_install(tarball)", "docstring": "Install or upgrade setuptools and EasyInstall", "id": "f11626:m18"} {"signature": "def memberness(context):", "body": "if context:texts = context.xpath('').extract()text = str(texts).lower()if len(texts) > :return elif '' in text:return elif '' not in text:return elif '' in text:return elif '' in text:return return ", "docstring": "The likelihood that the context is a \"member\".", "id": "f11628:m14"} {"signature": "def make_url(symbol, start_date='', end_date=''):", "body": "return ''% (symbol, end_date, start_date)", "docstring": "A URL that lists all 10-Q and 10-K filings of a company.", "id": "f11630:m0"} {"signature": "def _adjust_delay(self, slot, response):", "body": "if response.status in self.retry_http_codes:new_delay = max(slot.delay, ) * new_delay = max(new_delay, self.mindelay)new_delay = min(new_delay, self.maxdelay)slot.delay = new_delayself.stats.inc_value('')elif response.status == :new_delay = max(slot.delay / , self.mindelay)if new_delay < :new_delay = slot.delay = new_delay", "docstring": "Define delay adjustment policy", "id": "f11636:c0:m8"} {"signature": "def parse_10qk(self, response):", "body": "loader = ReportItemLoader(response=response)item = loader.load_item()if '' in item:doc_type = item['']if doc_type in ('', ''):return itemreturn None", "docstring": "Parse 10-Q or 10-K XML report.", "id": "f11640:c1:m1"} {"signature": "async def _try_catch_coro(emitter, event, listener, coro):", "body": "try:await coroexcept Exception as exc:if event == emitter.LISTENER_ERROR_EVENT:raiseemitter.emit(emitter.LISTENER_ERROR_EVENT, event, listener, exc)", "docstring": "Coroutine wrapper to catch errors after async scheduling.\n\n Args:\n emitter (EventEmitter): The event emitter that is attempting to\n call a listener.\n event (str): The event that triggered the emitter.\n listener (async def): The async def that was used to generate the coro.\n coro (coroutine): The coroutine that should be tried.\n\n If an exception is caught the function will use the emitter to emit the\n failure event. If, however, the current event _is_ the failure event then\n the method reraises. The reraised exception may show in debug mode for the\n event loop but is otherwise silently dropped.", "id": "f11647:m0"} {"signature": "def __init__(self, loop=None):", "body": "self._loop = loop or asyncio.get_event_loop()self._listeners = collections.defaultdict(list)self._once = collections.defaultdict(list)self._max_listeners = self.DEFAULT_MAX_LISTENERS", "docstring": "Initialize the emitter with an event loop.", "id": "f11647:c0:m0"} {"signature": "def _check_limit(self, event):", "body": "if self.count(event) > self.max_listeners:warnings.warn(''.format(event),ResourceWarning,)", "docstring": "Check if the listener limit is hit and warn if needed.", "id": "f11647:c0:m1"} {"signature": "def add_listener(self, event, listener):", "body": "self.emit('', event, listener)self._listeners[event].append(listener)self._check_limit(event)return self", "docstring": "Bind a listener to a particular event.\n\n Args:\n event (str): The name of the event to listen for. This may be any\n string value.\n listener (def or async def): The callback to execute when the event\n fires. This may be a sync or async function.", "id": "f11647:c0:m2"} {"signature": "def once(self, event, listener):", "body": "self.emit('', event, listener)self._once[event].append(listener)self._check_limit(event)return self", "docstring": "Add a listener that is only called once.", "id": "f11647:c0:m3"} {"signature": "def remove_listener(self, event, listener):", "body": "with contextlib.suppress(ValueError):self._listeners[event].remove(listener)return Truewith contextlib.suppress(ValueError):self._once[event].remove(listener)return Truereturn False", "docstring": "Remove a listener from the emitter.\n\n Args:\n event (str): The event name on which the listener is bound.\n listener: A reference to the same object given to add_listener.\n\n Returns:\n bool: True if a listener was removed else False.\n\n This method only removes one listener at a time. If a listener is\n attached multiple times then this method must be called repeatedly.\n Additionally, this method removes listeners first from the those\n registered with 'on' or 'add_listener'. If none are found it continue\n to remove afterwards from those added with 'once'.", "id": "f11647:c0:m4"} {"signature": "def remove_all_listeners(self, event=None):", "body": "if event is None:self._listeners = collections.defaultdict(list)self._once = collections.defaultdict(list)else:del self._listeners[event]del self._once[event]", "docstring": "Remove all listeners, or those of the specified *event*.\n\n It's not a good idea to remove listeners that were added elsewhere in\n the code, especially when it's on an emitter that you didn't create\n (e.g. sockets or file streams).", "id": "f11647:c0:m5"} {"signature": "@propertydef max_listeners(self):", "body": "return self._max_listeners", "docstring": "Get the max number of listeners before warning.", "id": "f11647:c0:m6"} {"signature": "@max_listeners.setterdef max_listeners(self, value):", "body": "self._max_listeners = value", "docstring": "Set the max number of listeners before warning.", "id": "f11647:c0:m7"} {"signature": "def listeners(self, event):", "body": "return self._listeners[event][:] + self._once[event][:]", "docstring": "Get an iterable of all listeners for the given event.\n\n Args:\n event (str): The name of the event for which to generate an\n iterable of listeners.\n\n The resulting iterable contains all listeners regardless of whether\n they were registered with 'on'/'add_listener' or 'once'.", "id": "f11647:c0:m8"} {"signature": "def _dispatch_coroutine(self, event, listener, *args, **kwargs):", "body": "try:coro = listener(*args, **kwargs)except Exception as exc:if event == self.LISTENER_ERROR_EVENT:raisereturn self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc)asyncio.ensure_future(_try_catch_coro(self, event, listener, coro),loop=self._loop,)", "docstring": "Schedule a coroutine for execution.\n\n Args:\n event (str): The name of the event that triggered this call.\n listener (async def): The async def that needs to be executed.\n *args: Any number of positional arguments.\n **kwargs: Any number of keyword arguments.\n\n The values of *args and **kwargs are passed, unaltered, to the async\n def when generating the coro. If there is an exception generating the\n coro, such as the wrong number of arguments, the emitter's error event\n is triggered. If the triggering event _is_ the emitter's error event\n then the exception is reraised. The reraised exception may show in\n debug mode for the event loop but is otherwise silently dropped.", "id": "f11647:c0:m9"} {"signature": "def _dispatch_function(self, event, listener, *args, **kwargs):", "body": "try:return listener(*args, **kwargs)except Exception as exc:if event == self.LISTENER_ERROR_EVENT:raisereturn self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc)", "docstring": "Execute a sync function.\n\n Args:\n event (str): The name of the event that triggered this call.\n listener (def): The def that needs to be executed.\n *args: Any number of positional arguments.\n **kwargs: Any number of keyword arguments.\n\n The values of *args and **kwargs are passed, unaltered, to the def\n when exceuting. If there is an exception executing the def, such as the\n wrong number of arguments, the emitter's error event is triggered. If\n the triggering event _is_ the emitter's error event then the exception\n is reraised. The reraised exception may show in debug mode for the\n event loop but is otherwise silently dropped.", "id": "f11647:c0:m10"} {"signature": "def _dispatch(self, event, listener, *args, **kwargs):", "body": "if (asyncio.iscoroutinefunction(listener) orisinstance(listener, functools.partial) andasyncio.iscoroutinefunction(listener.func)):return self._dispatch_coroutine(event, listener, *args, **kwargs)return self._dispatch_function(event, listener, *args, **kwargs)", "docstring": "Dispatch an event to a listener.\n\n Args:\n event (str): The name of the event that triggered this call.\n listener (def or async def): The listener to trigger.\n *args: Any number of positional arguments.\n **kwargs: Any number of keyword arguments.\n\n This method inspects the listener. If it is a def it dispatches the\n listener to a method that will execute that def. If it is an async def\n it dispatches it to a method that will schedule the resulting coro with\n the event loop.", "id": "f11647:c0:m11"} {"signature": "def emit(self, event, *args, **kwargs):", "body": "listeners = self._listeners[event]listeners = itertools.chain(listeners, self._once[event])self._once[event] = []for listener in listeners:self._loop.call_soon(functools.partial(self._dispatch,event,listener,*args,**kwargs,))return self", "docstring": "Call each listener for the event with the given arguments.\n\n Args:\n event (str): The event to trigger listeners on.\n *args: Any number of positional arguments.\n **kwargs: Any number of keyword arguments.\n\n This method passes all arguments other than the event name directly\n to the listeners. If a listener raises an exception for any reason the\n 'listener-error', or current value of LISTENER_ERROR_EVENT, is emitted.\n Listeners to this event are given the event name, listener object, and\n the exception raised. If an error listener fails it does so silently.\n\n All event listeners are fired in a deferred way so this method returns\n immediately. The calling coro must yield at some point for the event\n to propagate to the listeners.", "id": "f11647:c0:m12"} {"signature": "def count(self, event):", "body": "return len(self._listeners[event]) + len(self._once[event])", "docstring": "Get the number of listeners for the event.\n\n Args:\n event (str): The event for which to count all listeners.\n\n The resulting count is a combination of listeners added using\n 'on'/'add_listener' and 'once'.", "id": "f11647:c0:m13"} {"signature": "def __init__(self, emitter, event):", "body": "self._emitter = emitterself._event = eventself._emitter.on(event, self._push)self._data = collections.deque()self._future = None", "docstring": "Initialize the iterator with an emitter and event to fire on.", "id": "f11648:c0:m0"} {"signature": "async def _push(self, *args, **kwargs):", "body": "self._data.append((args, kwargs))if self._future is not None:future, self._future = self._future, Nonefuture.set_result(True)", "docstring": "Push new data into the buffer. Resume looping if paused.", "id": "f11648:c0:m1"} {"signature": "async def __anext__(self):", "body": "if self._data:return self._data.popleft()self._future = asyncio.Future()await self._futurereturn self._data.popleft()", "docstring": "Fetch the next set of values. Wait for new values if empty.", "id": "f11648:c0:m2"} {"signature": "def __init__(self, emitter, event):", "body": "self._emitter = emitterself._event = event", "docstring": "Initialize the iterable with an emitter and target event.", "id": "f11648:c1:m0"} {"signature": "async def __aiter__(self):", "body": "return EventIterator(self._emitter, self._event)", "docstring": "Get a new EventIterator object.", "id": "f11648:c1:m1"} {"signature": "def terminate_thread(thread):", "body": "if not thread.isAlive():returnexc = ctypes.py_object(SystemExit)res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)if res == :raise ValueError(\"\")elif res > :ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)raise SystemError(\"\")", "docstring": "Terminates a python thread from another thread.\n\n :param thread: a threading.Thread instance", "id": "f11654:m1"} {"signature": "def bracket_split(source, brackets=('', '', ''), strip=False):", "body": "starts = [e[] for e in brackets]in_bracket = n = last = while n < len(source):e = source[n]if not in_bracket and e in starts:in_bracket = start = nb_start, b_end = brackets[starts.index(e)]elif in_bracket:if e == b_start:in_bracket += elif e == b_end:in_bracket -= if not in_bracket:if source[last:start]:yield source[last:start]last = n + yield source[start + strip:n + - strip]n += if source[last:]:yield source[last:]", "docstring": "DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)", "id": "f11658:m3"} {"signature": "def pass_bracket(source, start, bracket=''):", "body": "e = bracket_split(source[start:], [bracket], False)try:cand = next(e)except StopIteration:return None, Noneif not cand.strip(): try:res = next(e)return res, start + len(cand) + len(res)except StopIteration:return None, Noneelif cand[-] == bracket[]:return cand, start + len(cand)else:return None, None", "docstring": "Returns content of brackets with brackets and first pos after brackets\n if source[start] is followed by some optional white space and brackets.\n Otherwise None", "id": "f11658:m4"} {"signature": "def except_token(source, start, token, throw=True):", "body": "start = pass_white(source, start)if start < len(source) and source[start] == token:return start + if throw:raise SyntaxError('' % token)return None", "docstring": "Token can be only a single char. Returns position after token if found. Otherwise raises syntax error if throw\n otherwise returns None", "id": "f11658:m8"} {"signature": "def except_keyword(source, start, keyword):", "body": "start = pass_white(source, start)kl = len(keyword) if kl + start > len(source):return Noneif source[start:start + kl] != keyword:return Noneif kl + start < len(source) and source[start + kl] in IDENTIFIER_PART:return Nonereturn start + kl", "docstring": "Returns position after keyword if found else None\n Note: skips white space", "id": "f11658:m9"} {"signature": "def parse_identifier(source, start, throw=True):", "body": "start = pass_white(source, start)end = startif not end < len(source):if throw:raise SyntaxError('')return Noneif source[end] not in IDENTIFIER_START:if throw:raise SyntaxError('' % source[end])return Noneend += while end < len(source) and source[end] in IDENTIFIER_PART:end += if not is_valid_lval(source[start:end]):if throw:raise SyntaxError('' % source[start:end])return Nonereturn source[start:end], end", "docstring": "passes white space from start and returns first identifier,\n if identifier invalid and throw raises SyntaxError otherwise returns None", "id": "f11658:m10"} {"signature": "def argsplit(args, sep=''):", "body": "parsed_len = last = splits = []for e in bracket_split(args, brackets=['', '', '']):if e[] not in {'', '', ''}:for i, char in enumerate(e):if char == sep:splits.append(args[last:parsed_len + i])last = parsed_len + i + parsed_len += len(e)splits.append(args[last:])return splits", "docstring": "used to split JS args (it is not that simple as it seems because\n sep can be inside brackets).\n\n pass args *without* brackets!\n\n Used also to parse array and object elements, and more", "id": "f11658:m11"} {"signature": "def split_add_ops(text):", "body": "n = text = text.replace('', '').replace('', '') spotted = False last = while n < len(text):e = text[n]if e == '' or e == '':if spotted:yield text[last:n].replace('', '').replace('', '')yield elast = n + spotted = Falseelif e == '' or e == '' or e == '':spotted = Falseelif e != '':spotted = Truen += yield text[last:n].replace('', '').replace('', '')", "docstring": "Specialized function splitting text at add/sub operators.\n Operands are *not* translated. Example result ['op1', '+', 'op2', '-', 'op3']", "id": "f11658:m12"} {"signature": "def split_at_any(text,lis,translate=False,not_before=[],not_after=[],validitate=None):", "body": "lis.sort(key=lambda x: len(x), reverse=True)last = n = text_len = len(text)while n < text_len:if any(text[:n].endswith(e)for e in not_before): n += continuefor e in lis:s = len(e)if s + n > text_len:continueif validitate and not validitate(e, text[:n], text[n + s:]):continueif any(text[n + s:].startswith(e)for e in not_after): n += breakif e == text[n:n + s]:yield text[last:n] if not translate else translate(text[last:n])yield en += slast = nbreakelse:n += yield text[last:n] if not translate else translate(text[last:n])", "docstring": "doc", "id": "f11658:m13"} {"signature": "def split_at_single(text, sep, not_before=[], not_after=[]):", "body": "n = lt, s = len(text), len(sep)last = while n < lt:if not s + n > lt:if sep == text[n:n + s]:if any(text[last:n].endswith(e) for e in not_before):passelif any(text[n + s:].startswith(e) for e in not_after):passelse:yield text[last:n]last = n + sn += s - n += yield text[last:]", "docstring": "Works like text.split(sep) but separated fragments\n cant end with not_before or start with not_after", "id": "f11658:m14"} {"signature": "def do_statement(source, start):", "body": "start = pass_white(source, start)if not start < len(source): return None, startif any(startswith_keyword(source[start:], e) for e in {'', ''}):return None, startrest = source[start:]for key, meth in KEYWORD_METHODS.items(): if rest.startswith(key):if len(key) == len(rest) or rest[len(key)] not in IDENTIFIER_PART:return meth(source, start)if rest[] == '': return do_block(source, start)cand = parse_identifier(source, start, False)if cand is not None: label, cand_start = candcand_start = pass_white(source, cand_start)if source[cand_start] == '':return do_label(source, start)return do_expression(source, start)", "docstring": "returns none if not found other functions that begin with 'do_' raise\n also this do_ type function passes white space", "id": "f11659:m5"} {"signature": "def translate_flow(source):", "body": "global TO_REGISTERTO_REGISTER = []return do_block('' % source, )[], TO_REGISTER", "docstring": "Source cant have arrays, object, constant or function literals.\n Returns PySource and variables to register", "id": "f11659:m23"} {"signature": "def translate_js(js, top=TOP_GLOBAL):", "body": "no_const, constants = remove_constants(js)no_obj, objects, obj_count = remove_objects(no_const)no_arr, arrays, arr_count = remove_arrays(no_obj)reset_inline_count()no_func, hoisted, inline = remove_functions(no_arr)py_seed, to_register = translate_flow(no_func)top += '' % str(to_register + list(hoisted.keys()))defs = ''for nested_name, nested_info in hoisted.items():nested_block, nested_args = nested_infonew_code = translate_func('', nested_block,nested_args)new_code += '' % repr(nested_name)defs += new_code + '' % repr(nested_name)for nested_name, nested_info in inline.items():nested_block, nested_args = nested_infonew_code = translate_func(nested_name, nested_block, nested_args)py_seed = inject_before_lval(py_seed,nested_name.split('')[], new_code)py_seed = defs + py_seedfor arr_lval, arr_code in arrays.items():translation, obj_count, arr_count = translate_array(arr_code, arr_lval, obj_count, arr_count)py_seed = inject_before_lval(py_seed, arr_lval, translation)for obj_lval, obj_code in objects.items():translation, obj_count, arr_count = translate_object(obj_code, obj_lval, obj_count, arr_count)py_seed = inject_before_lval(py_seed, obj_lval, translation)py_code = recover_constants(py_seed, constants)return top + py_code", "docstring": "js has to be a javascript source code.\n returns equivalent python code.", "id": "f11661:m0"} {"signature": "def translate_func(name, block, args):", "body": "inline = name.startswith('')real_name = ''if inline:name, real_name = name.split('')arglist = ''.join(args) + '' if args else ''code = '' % (name, arglist)scope = \"\" for arg in args:scope += '' % (repr(arg), arg)if real_name:scope += '' % (repr(real_name), name)code += indent('' % scope)block, nested_hoisted, nested_inline = remove_functions(block)py_code, to_register = translate_flow(block)to_register += list(nested_hoisted.keys())if to_register:code += indent('' % str(to_register))for nested_name, info in nested_hoisted.items():nested_block, nested_args = infonew_code = translate_func('', nested_block,nested_args)code += indent(new_code)code += indent('' % repr(nested_name))code += indent('' % repr(nested_name))for nested_name, info in nested_inline.items():nested_block, nested_args = infonew_code = translate_func(nested_name, nested_block, nested_args)py_code = inject_before_lval(py_code,nested_name.split('')[], new_code)if py_code.strip():code += indent(py_code)return code", "docstring": "Translates functions and all nested functions to Python code.\n name - name of that function (global functions will be available under var while\n inline will be available directly under this name )\n block - code of the function (*with* brackets {} )\n args - arguments that this function takes", "id": "f11661:m1"} {"signature": "def is_empty_object(n, last):", "body": "if n.strip():return Falselast = last.strip()markers = {'','',}if not last or last[-] in markers:return Falsereturn True", "docstring": "n may be the inside of block or object", "id": "f11662:m2"} {"signature": "def is_object(n, last):", "body": "if is_empty_object(n, last):return Trueif not n.strip():return Falseif len(argsplit(n, '')) > :return Falsecands = argsplit(n, '')if not cands[-].strip():return True for cand in cands:cand = cand.strip()kv = argsplit(cand, '')if len(kv) > : kv = kv[], ''.join(kv[:])if len(kv) == :k, v = kvif not is_lval(k.strip()):return Falsev = v.strip()if v.startswith(''):continueif v[] == '': return Falsefor e in KEYWORD_METHODS:if v.startswith(e) and len(e) < len(v) and v[len(e)] not in IDENTIFIER_PART:return Falseelif not (cand.startswith('') or cand.startswith('')):return Falsereturn True", "docstring": "n may be the inside of block or object.\n last is the code before object", "id": "f11662:m3"} {"signature": "def remove_objects(code, count=):", "body": "replacements = {} br = bracket_split(code, ['', ''])res = ''last = ''for e in br:if e[] == '':n, temp_rep, cand_count = remove_objects(e[:-], count)if is_object(n, last):res += '' + OBJECT_LVAL % countreplacements[OBJECT_LVAL % count] = ecount += else:res += '' % ncount = cand_countreplacements.update(temp_rep)elif e[] == '':if is_array(last):res += e else: n, rep, count = remove_objects(e[:-], count)res += '' % nreplacements.update(rep)else: res += elast = e return res, replacements, count", "docstring": "This function replaces objects with OBJECTS_LVALS, returns new code, replacement dict and count.\n count arg is the number that should be added to the LVAL of the first replaced object", "id": "f11662:m5"} {"signature": "def remove_arrays(code, count=):", "body": "res = ''last = ''replacements = {}for e in bracket_split(code, ['']):if e[] == '':if is_array(last):name = ARRAY_LVAL % countres += '' + namereplacements[name] = ecount += else: cand, new_replacements, count = remove_arrays(e[:-], count)res += '' % candreplacements.update(new_replacements)else:res += elast = ereturn res, replacements, count", "docstring": "removes arrays and replaces them with ARRAY_LVALS\n returns new code and replacement dict\n *NOTE* has to be called AFTER remove objects", "id": "f11662:m6"} {"signature": "def translate_array(array, lval, obj_count=, arr_count=):", "body": "array = array[:-]array, obj_rep, obj_count = remove_objects(array, obj_count)array, arr_rep, arr_count = remove_arrays(array, arr_count)array, hoisted, inline = functions.remove_functions(array, all_inline=True)assert not hoistedarr = []for e in argsplit(array, ''):e = exp_translator(e.replace('', ''))arr.append(e if e else '')arr = '' % (lval, ''.join(arr))for nested_name, nested_info in inline.items():nested_block, nested_args = nested_infonew_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)arr = new_def + arrfor lval, obj in obj_rep.items():new_def, obj_count, arr_count = translate_object(obj, lval, obj_count, arr_count)arr = new_def + arrfor lval, obj in arr_rep.items():new_def, obj_count, arr_count = translate_array(obj, lval, obj_count, arr_count)arr = new_def + arrreturn arr, obj_count, arr_count", "docstring": "array has to be any js array for example [1,2,3]\n lval has to be name of this array.\n Returns python code that adds lval to the PY scope it should be put before lval", "id": "f11662:m10"} {"signature": "def _ensure_regexp(source, n): ", "body": "markers = ''k = while True:k += if n - k < :return Truechar = source[n - k]if char in markers:return Trueif char != '' and char != '':breakreturn False", "docstring": "returns True if regexp starts at n else returns False\n checks whether it is not a division", "id": "f11663:m1"} {"signature": "def parse_num(source, start, charset):", "body": "while start < len(source) and source[start] in charset:start += return start", "docstring": "Returns a first index>=start of chat not in charset", "id": "f11663:m2"} {"signature": "def parse_exponent(source, start):", "body": "if not source[start] in {'', ''}:if source[start] in IDENTIFIER_PART:raise SyntaxError('')return startstart += if source[start] in {'', ''}:start += FOUND = Falsewhile source[start] in NUMS:FOUND = Truestart += if not FOUND or source[start] in IDENTIFIER_PART:raise SyntaxError('')return start", "docstring": "returns end of exponential, raises SyntaxError if failed", "id": "f11663:m3"} {"signature": "def remove_constants(source):", "body": "source = '' + source + ''comments = []inside_comment, single_comment = False, Falseinside_single, inside_double = False, Falseinside_regexp = Falseregexp_class_count = n = while n < len(source):char = source[n]if char == '' and not (inside_comment or inside_singleor inside_regexp):if not _is_cancelled(source, n):if inside_double:inside_double[] = n + comments.append(inside_double)inside_double = Falseelse:inside_double = [n, None, ]elif char == \"\" and not (inside_comment or inside_doubleor inside_regexp):if not _is_cancelled(source, n):if inside_single:inside_single[] = n + comments.append(inside_single)inside_single = Falseelse:inside_single = [n, None, ]elif (inside_single or inside_double):if char in LINE_TERMINATOR:if _is_cancelled(source, n):if char == CR and source[n + ] == LF:n += n += continueelse:raise SyntaxError('')else:if inside_comment:if single_comment:if char in LINE_TERMINATOR:inside_comment[] = ncomments.append(inside_comment)inside_comment = Falsesingle_comment = Falseelse: if char == '' and source[n - ] == '':inside_comment[] = n + comments.append(inside_comment)inside_comment = Falseelif inside_regexp:if not quiting_regexp:if char in LINE_TERMINATOR:raise SyntaxError('')if _is_cancelled(source, n):n += continueif char == '':regexp_class_count += elif char == '':regexp_class_count = max(regexp_class_count - , )elif char == '' and not regexp_class_count:quiting_regexp = Trueelse:if char not in IDENTIFIER_START:inside_regexp[] = ncomments.append(inside_regexp)inside_regexp = Falseelif char == '' and source[n - ] == '':single_comment = Trueinside_comment = [n - , None, ]elif char == '' and source[n - ] == '':inside_comment = [n - , None, ]elif char == '' and source[n + ] not in ('', ''):if not _ensure_regexp(source, n): n += continue quiting_regexp = Falseinside_regexp = [n, None, ]elif not (inside_comment or inside_regexp):if (char in NUMS andsource[n - ] not in IDENTIFIER_PART) or char == '':if char == '':k = parse_num(source, n + , NUMS)if k == n + : n += continuek = parse_exponent(source, k)elif char == '' and source[n + ] in {'', ''}: k = parse_num(source, n + , HEX)if k == n + or source[k] in IDENTIFIER_PART:raise SyntaxError('')else: k = parse_num(source, n + , NUMS)if source[k] == '':k = parse_num(source, k + , NUMS)k = parse_exponent(source, k)comments.append((n, k, ))n = kcontinuen += res = ''start = count = constants = {}for end, next_start, typ in comments:res += source[start:end]start = next_startif typ == : name = StringNameelif typ == : continueelif typ == : name = RegExpNameelif typ == : name = NumberNameelse:raise RuntimeError()res += '' + name % count + ''constants[name % count] = source[end:next_start]count += res += source[start:]for e in WHITE:res = res.replace(e, '')res = res.replace(CR + LF, '')for e in LINE_TERMINATOR:res = res.replace(e, '')return res.strip(), constants", "docstring": "Replaces Strings and Regexp literals in the source code with\n identifiers and *removes comments*. Identifier is of the format:\n\n PyJsStringConst(String const number)_ - for Strings\n PyJsRegExpConst(RegExp const number)_ - for RegExps\n\n Returns dict which relates identifier and replaced constant.\n\n Removes single line and multiline comments from JavaScript source code\n Pseudo comments (inside strings) will not be removed.\n\n For example this line:\n var x = \"/*PSEUDO COMMENT*/ TEXT //ANOTHER PSEUDO COMMENT\"\n will be unaltered", "id": "f11663:m4"} {"signature": "def recover_constants(py_source,replacements): ", "body": "for identifier, value in replacements.items():if identifier.startswith(''):py_source = py_source.replace(identifier,'' % repr(value))elif identifier.startswith(''):py_source = py_source.replace(identifier, '' % unify_string_literals(value))else:py_source = py_source.replace(identifier, '' % value)return py_source", "docstring": "Converts identifiers representing Js constants to the PyJs constants\n PyJsNumberConst_1_ which has the true value of 5 will be converted to PyJsNumber(5)", "id": "f11663:m5"} {"signature": "def unify_string_literals(js_string):", "body": "n = res = ''limit = len(js_string)while n < limit:char = js_string[n]if char == '':new, n = do_escape(js_string, n)res += newelse:res += charn += return res", "docstring": "this function parses the string just like javascript\n for example literal '\\d' in JavaScript would be interpreted\n as 'd' - backslash would be ignored and in Pyhon this\n would be interpreted as '\\\\d' This function fixes this problem.", "id": "f11663:m6"} {"signature": "def do_escape(source, n):", "body": "if not n + < len(source):return '' if source[n + ] in LINE_TERMINATOR:if source[n + ] == CR and n + < len(source) and source[n + ] == LF:return source[n:n + ], n + return source[n:n + ], n + if source[n + ] in ESCAPE_CHARS:return source[n:n + ], n + if source[n + ] in {'', ''}:char, length = ('', ) if source[n + ] == '' else ('', )n += end = parse_num(source, n, HEX)if end - n < length:raise SyntaxError('')return source[n - :n + length], n + lengthif source[n + ] in OCTAL:n += end = parse_num(source, n, OCTAL)end = min(end, n + ) max_num = num = len_parsed = for e in source[n:end]:cand = * num + int(e)if cand > max_num:breaknum = candlen_parsed += return '' + hex(num)[:], n + len_parsedreturn source[n + ], n + ", "docstring": "Its actually quite complicated to cover every case :)\n http://www.javascriptkit.com/jsref/escapesequence.shtml", "id": "f11663:m8"} {"signature": "def rl(self, lis, op):", "body": "it = reversed(lis)res = trans(next(it))for e in it:e = trans(e)res = op(e, res)return res", "docstring": "performs this operation on a list from *right to left*\n op must take 2 args\n a,b,c => op(a, op(b, c))", "id": "f11665:c0:m1"} {"signature": "def lr(self, lis, op):", "body": "it = iter(lis)res = trans(next(it))for e in it:e = trans(e)res = op(res, e)return res", "docstring": "performs this operation on a list from *left to right*\n op must take 2 args\n a,b,c => op(op(a, b), c)", "id": "f11665:c0:m2"} {"signature": "def translate(self):", "body": "if not self.code:return ''new = bracket_replace(self.code)cand = new.split('') if len(cand) > : return self.lr(cand, js_comma)if '' in new:cond_ind = new.find('')tenary_start = for ass in re.finditer(ASSIGNMENT_MATCH, new):cand = ass.span()[]if cand < cond_ind:tenary_start = candelse:breakactual_tenary = new[tenary_start:]spl = ''.join(split_at_any(new, ['', ''], translate=trans))tenary_translation = transform_crap(spl)assignment = new[:tenary_start] + ''return trans(assignment).replace('',tenary_translation)cand = list(split_at_single(new, '', ['', '', '', '>'], ['']))if len(cand) > : it = reversed(cand)res = trans(next(it))for e in it:e = e.strip()if not e:raise SyntaxError('')op = ''if e[-:] in OP_METHODS:op = '' + e[-:].__repr__()e = e[:-]elif e[-:] in OP_METHODS:op = '' + e[-].__repr__()e = e[:-]e = trans(e)c = list(bracket_split(e, ['']))beg, arglist = ''.join(c[:-]).strip(), c[-].strip() if beg[-:] != '':raise SyntaxError('')beg = beg[:-] + ''arglist = arglist[:-] + '' + res + op + ''res = beg + arglistreturn resorder = [OR, AND, BOR, BXOR, BAND, EQS, COMPS, BSHIFTS, ADDS, MULTS]dangerous = ['', '>']for typ in order:if '' in typ:cand = list(split_add_ops(new))else:cand = list(split_at_any(new,list(typ.keys()),False,dangerous,dangerous,validitate=comb_validitator))if not len(cand) > :continuen = res = trans(cand[])if not res:raise SyntaxError(\"\")while n < len(cand):e = cand[n]if not e:raise SyntaxError(\"\")if n % :op = typ[e]else:res = op(res, trans(e))n += return rescand = list(split_at_any(new, list(UNARY.keys()), False, validitate=unary_validitator))if len(cand) > : if '' in cand or '' in cand: if '' in cand:op = ''meths = js_post_dec, js_pre_decelse:op = ''meths = js_post_inc, js_pre_incpos = cand.index(op)if cand[pos - ].strip(): a = cand[pos - ]meth = meths[]elif cand[pos + ].strip(): a = cand[pos + ]meth = meths[]else:raise SyntaxError('')if cand[pos + :]:raise SyntaxError('')operand = meth(trans(a))cand = cand[:pos - ]else:operand = trans(cand[-])del cand[-]for i, e in enumerate(reversed(cand)):if i % :if e.strip():raise SyntaxError('')else:operand = UNARY[e](operand)return operandif new[] == '' or new[] == '':if len(list(bracket_split(new, ('', '')))) == : assert new in REPLif new[] == '':raise SyntaxError('')return '' + trans(REPL[new][:-]) + ''it = bracket_split(new, ('', ''))res = []for e in it:if e[] != '' and e[] != '':res += [x.strip() for x in e.split('')]else:res += [e.strip()]res = [x for x in res if x]if is_internal(res[]):out = res[]elif res[][] in {'', ''}:out = '' + trans(REPL[res[]][:-]) + ''elif is_valid_lval(res[]) or res[] in {'', '', '', ''}:out = '' + res[].__repr__() + ''else:if is_reserved(res[]):raise SyntaxError('' % res[])raise SyntaxError('' % res[])if len(res) == :return outn = while n < len(res): e = res[n]if e[] == '': out += trans_args(REPL[e])n += continueargs = False if n + < len(res) and res[n + ][] == '': args = trans_args(REPL[res[n + ]])[:]if args != '':args = '' + argsif e[] == '':prop = trans(REPL[e][:-])else:if not is_lval(e):raise SyntaxError('' % e)prop = e.__repr__()if args: n += out += '' + prop + argselse: out += '' + prop + ''n += return out", "docstring": "Translates outer operation and calls translate on inner operation.\n Returns fully translated code.", "id": "f11665:c0:m3"} {"signature": "def remove_functions(source, all_inline=False):", "body": "global INLINE_COUNTinline = {}hoisted = {}n = limit = len(source) - res = ''last = while n < limit:if n and source[n - ] in IDENTIFIER_PART:n += continueif source[n:n + ] == '' and source[n +] not in IDENTIFIER_PART:if source[:n].rstrip().endswith(''): n += continueif source[n + :].lstrip().startswith(''): n += continueentered = nres += source[last:n]name = ''n = pass_white(source, n + )if source[n] in IDENTIFIER_START: name, n = parse_identifier(source, n)args, n = pass_bracket(source, n, '')if not args:raise SyntaxError('')args = args.strip('')args = tuple(parse_identifier(e, )[]for e in argsplit(args)) if args else ()if len(args) - len(set(args)):raise SyntaxError('')block, n = pass_bracket(source, n, '')if not block:raise SyntaxError('')mixed = False if name and not all_inline:before = source[:entered].rstrip()if any(endswith_keyword(before, e) for e in PRE_EXP_STARTS):mixed = Trueelif before and before[-] not in PRE_ALLOWED and not before[-:] in INCREMENTS:mixed = Trueelse:hoisted[name] = block, argsif not name or mixed or all_inline: INLINE_COUNT += iname = INLINE_NAME % INLINE_COUNT res += '' + inameinline['' % (iname, name)] = block, args last = nelse:n += res += source[last:]return res, hoisted, inline", "docstring": "removes functions and returns new source, and 2 dicts.\n first dict with removed hoisted(global) functions and second with replaced inline functions", "id": "f11666:m1"} {"signature": "def is_lval(t):", "body": "if not t:return Falsei = iter(t)if i.next() not in IDENTIFIER_START:return Falsereturn all(e in IDENTIFIER_PART for e in i)", "docstring": "Does not chceck whether t is not resticted or internal", "id": "f11668:m0"} {"signature": "def is_valid_lval(t):", "body": "if not is_internal(t) and is_lval(t) and t not in RESERVED_NAMES:return Truereturn False", "docstring": "Checks whether t is valid JS identifier name (no keyword like var, function, if etc)\n Also returns false on internal", "id": "f11668:m1"} {"signature": "def import_js(path, lib_name, globals):", "body": "with codecs.open(path_as_local(path), \"\", \"\") as f:js = f.read()e = EvalJs()e.execute(js)var = e.context['']globals[lib_name] = var.to_python()", "docstring": "Imports from javascript source file.\n globals is your globals()", "id": "f11669:m2"} {"signature": "def translate_file(input_path, output_path):", "body": "js = get_file_contents(input_path)py_code = translate_js(js)lib_name = os.path.basename(output_path).split('')[]head = '' % repr(lib_name)tail = '' % lib_nameout = head + py_code + tailwrite_file_contents(output_path, out)", "docstring": "Translates input JS file to python and saves the it to the output path.\nIt appends some convenience code at the end so that it is easy to import JS objects.\n\nFor example we have a file 'example.js' with: var a = function(x) {return x}\ntranslate_file('example.js', 'example.py')\n\nNow example.py can be easily importend and used:\n>>> from example import example\n>>> example.a(30)\n30", "id": "f11669:m5"} {"signature": "def run_file(path_or_file, context=None):", "body": "if context is None:context = EvalJs()if not isinstance(context, EvalJs):raise TypeError('')eval_value = context.eval(get_file_contents(path_or_file))return eval_value, context", "docstring": "Context must be EvalJS object. Runs given path as a JS program. Returns (eval_value, context).", "id": "f11669:m6"} {"signature": "def eval_js(js):", "body": "e = EvalJs()return e.eval(js)", "docstring": "Just like javascript eval. Translates javascript to python,\n executes and returns python object.\n js is javascript source code\n\n EXAMPLE:\n >>> import js2py\n >>> add = js2py.eval_js('function add(a, b) {return a + b}')\n >>> add(1, 2) + 3\n 6\n >>> add('1', 2, 3)\n u'12'\n >>> add.constructor\n function Function() { [python code] }\n\n NOTE: For Js Number, String, Boolean and other base types returns appropriate python BUILTIN type.\n For Js functions and objects, returns Python wrapper - basically behaves like normal python object.\n If you really want to convert object to python dict you can use to_dict method.", "id": "f11669:m7"} {"signature": "def eval_js6(js):", "body": "return eval_js(js6_to_js5(js))", "docstring": "Just like eval_js but with experimental support for js6 via babel.", "id": "f11669:m8"} {"signature": "def translate_js6(js):", "body": "return translate_js(js6_to_js5(js))", "docstring": "Just like translate_js but with experimental support for js6 via babel.", "id": "f11669:m9"} {"signature": "def execute(self, js=None, use_compilation_plan=False):", "body": "try:cache = self.__dict__['']except KeyError:cache = self.__dict__[''] = {}hashkey = hashlib.md5(js.encode('')).digest()try:compiled = cache[hashkey]except KeyError:code = translate_js(js, '', use_compilation_plan=use_compilation_plan)compiled = cache[hashkey] = compile(code, '','')exec (compiled, self._context)", "docstring": "executes javascript js in current context\n\n During initial execute() the converted js is cached for re-use. That means next time you\n run the same javascript snippet you save many instructions needed to parse and convert the\n js code to python code.\n\n This cache causes minor overhead (a cache dicts is updated) but the Js=>Py conversion process\n is typically expensive compared to actually running the generated python code.\n\n Note that the cache is just a dict, it has no expiration or cleanup so when running this\n in automated situations with vast amounts of snippets it might increase memory usage.", "id": "f11669:c0:m1"} {"signature": "def eval(self, expression, use_compilation_plan=False):", "body": "code = '' % json.dumps(expression)self.execute(code, use_compilation_plan=use_compilation_plan)return self['']", "docstring": "evaluates expression in current context and returns its value", "id": "f11669:c0:m2"} {"signature": "def execute_debug(self, js):", "body": "code = translate_js(js, '')filename = '' + os.sep + '' + hashlib.md5(code.encode(\"\")).hexdigest() + ''try:with open(filename, mode='') as f:f.write(code)with open(filename, \"\") as f:pyCode = compile(f.read(), filename, '')exec(pyCode, self._context)except Exception as err:raise errfinally:os.remove(filename)try:os.remove(filename + '')except:pass", "docstring": "executes javascript js in current context\n as opposed to the (faster) self.execute method, you can use your regular debugger\n to set breakpoints and inspect the generated python code", "id": "f11669:c0:m3"} {"signature": "def eval_debug(self, expression):", "body": "code = '' % json.dumps(expression)self.execute_debug(code)return self['']", "docstring": "evaluates expression in current context and returns its value\n as opposed to the (faster) self.execute method, you can use your regular debugger\n to set breakpoints and inspect the generated python code", "id": "f11669:c0:m4"} {"signature": "def console(self):", "body": "while True:if six.PY2:code = raw_input('')else:code = input('')try:print(self.eval(code))except KeyboardInterrupt:breakexcept Exception as e:import tracebackif DEBUG:sys.stderr.write(traceback.format_exc())else:sys.stderr.write('' + str(e) + '')time.sleep()", "docstring": "starts to interact (starts interactive console) Something like code.InteractiveConsole", "id": "f11669:c0:m9"} {"signature": "def abstract_relational_comparison(self, other,self_first=True): ", "body": "px = to_primitive(self, '')py = to_primitive(other, '')if not self_first: px, py = py, pxif not (Type(px) == '' and Type(py) == ''):px, py = to_number(px), to_number(py)if is_nan(px) or is_nan(py):return None return px < py else:return px < py", "docstring": "selfif tx == ty:if tx == '' or tx == '':return Trueif tx == '' or tx == '' or tx == '':return self == otherreturn self is other elif (tx == '' and ty == '') or (ty == ''and tx == ''):return Trueelif tx == '' and ty == '':return abstract_equality_op(self, to_number(other))elif tx == '' and ty == '':return abstract_equality_op(to_number(self), other)elif tx == '':return abstract_equality_op(to_number(self), other)elif ty == '':return abstract_equality_op(self, to_number(other))elif (tx == '' or tx == '') and is_object(other):return abstract_equality_op(self, to_primitive(other))elif (ty == '' or ty == '') and is_object(self):return abstract_equality_op(to_primitive(self), other)else:return False", "docstring": "returns the result of JS == compare.\n result is PyJs type: bool", "id": "f11670:m22"} {"signature": "def instanceof_op(self, other):", "body": "if not hasattr(other, ''):return Falsereturn other.has_instance(self)", "docstring": "checks if self is instance of other", "id": "f11670:m26"} {"signature": "def in_op(self, other):", "body": "if not is_object(other):raise MakeError('',\"\")return other.has_property(to_string(self))", "docstring": "checks if self is in other", "id": "f11670:m27"} {"signature": "def ConstructArray(self, py_arr):", "body": "arr = self.NewArray(len(py_arr))arr._init(py_arr)return arr", "docstring": "note py_arr elems are NOT converted to PyJs types!", "id": "f11671:c0:m9"} {"signature": "def ConstructObject(self, py_obj):", "body": "obj = self.NewObject()for k, v in py_obj.items():obj.put(unicode(k), v)return obj", "docstring": "note py_obj items are NOT converted to PyJs types!", "id": "f11671:c0:m10"} {"signature": "def replacement_template(rep, source, span, npar):", "body": "n = res = ''while n < len(rep) - :char = rep[n]if char == '':if rep[n + ] == '':res += ''n += continueelif rep[n + ] == '':res += source[:span[]]n += continueelif rep[n + ] == '':res += source[span[]:]n += continueelif rep[n + ] in DIGS:dig = rep[n + ]if n + < len(rep) and rep[n + ] in DIGS:dig += rep[n + ]num = int(dig)if not num or num > len(npar):res += '' + digelse:res += npar[num - ] if npar[num - ] else ''n += + len(dig)continueres += charn += if n < len(rep):res += rep[-]return res", "docstring": "Takes the replacement template and some info about the match and returns filled template", "id": "f11684:m0"} {"signature": "def create(self, args, space):", "body": "raise MakeError('', '' % self.Class)", "docstring": "Generally not a constructor, raise an error", "id": "f11686:c0:m11"} {"signature": "def match(self, string, pos):", "body": "return self.pat.match(string, int(pos))", "docstring": "string is of course a py string", "id": "f11686:c3:m1"} {"signature": "def __init__(self, scope, space, parent=None):", "body": "self.space = spaceself.prototype = parentif type(scope) is not dict:assert parent is not None, ''self.own = scopeself.is_with_scope = Trueelse:self.is_with_scope = Falseif parent is None:self.own = {}for k, v in six.iteritems(scope):self.define_own_property(k, {'': v,'': False,'': False,'': False}, False)else:self.own = scope self.par = super(Scope, self)self.stack = []", "docstring": "Doc", "id": "f11686:c6:m0"} {"signature": "def registers(self, vars):", "body": "for var in vars:self.register(var)", "docstring": "register multiple variables", "id": "f11686:c6:m2"} {"signature": "def call(self, this, args=()):", "body": "if self.is_native:_args = SpaceTuple(args) _args.space = self.spacereturn self.code(this, _args) else:return self.space.exe._call(self, this,args)", "docstring": "Dont use this method from inside bytecode to call other bytecode.", "id": "f11686:c7:m1"} {"signature": "def cok(self):", "body": "if type(self) in (UNDEFINED_TYPE, NULL_TYPE):raise MakeError('','')", "docstring": "Check object coercible", "id": "f11687:m10"} {"signature": "def pad(num, n=, sign=False):", "body": "s = unicode(abs(num))if len(s) < n:s = '' * (n - len(s)) + sif not sign:return sif num >= :return '' + selse:return '' + s", "docstring": "returns n digit string representation of the num", "id": "f11697:m10"} {"signature": "def emit(self, what, *args):", "body": "if isinstance(what, basestring):return self.exe.emit(what, *args)elif isinstance(what, list):self._emit_statement_list(what)else:return getattr(self, what[''])(**what)", "docstring": "what can be either name of the op, or node, or a list of statements.", "id": "f11700:c0:m49"} {"signature": "def emit(self, op_code, *args):", "body": "self.tape.append(OP_CODES[op_code](*args))", "docstring": "Adds op_code with specified args to tape", "id": "f11703:c0:m2"} {"signature": "def compile(self, start_loc=):", "body": "self.label_locs = {} if self.label_locs is None else self.label_locsloc = start_locwhile loc < len(self.tape):if type(self.tape[loc]) == LABEL:self.label_locs[self.tape[loc].num] = locdel self.tape[loc]continueloc += self.compiled = True", "docstring": "Records locations of labels and compiles the code", "id": "f11703:c0:m3"} {"signature": "def _call(self, func, this, args):", "body": "assert not func.is_nativeold_contexts = self.contextsold_return_locs = self.return_locsold_curr_ctx = self.current_ctxself.contexts = [FakeCtx()]self.return_locs = [len(self.tape)] my_ctx = func._generate_my_context(this, args)self.current_ctx = my_ctxret = self.run(my_ctx, starting_loc=self.label_locs[func.code])self.current_ctx = old_curr_ctxself.contexts = old_contextsself.return_locs = old_return_locsreturn ret", "docstring": "Calls a bytecode function func\n NOTE: use !ONLY! when calling functions from native methods!", "id": "f11703:c0:m4"} {"signature": "def execute_fragment_under_context(self, ctx, start_label, end_label):", "body": "old_curr_ctx = self.current_ctxself.ctx_depth += old_stack_len = len(ctx.stack)old_ret_len = len(self.return_locs)old_ctx_len = len(self.contexts)try:self.current_ctx = ctxreturn self._execute_fragment_under_context(ctx, start_label, end_label)except JsException as err:if self.debug_mode:self._on_fragment_exit(\"\")del ctx.stack[old_stack_len:]del self.return_locs[old_ret_len:]del self.contexts[old_ctx_len :]return undefined, , errfinally:self.ctx_depth -= self.current_ctx = old_curr_ctxassert old_stack_len == len(ctx.stack)", "docstring": "just like run but returns if moved outside of the specified fragment\n # 4 different exectution results\n # 0=normal, 1=return, 2=jump_outside, 3=errors\n # execute_fragment_under_context returns:\n # (return_value, typ, return_value/jump_loc/py_error)\n # IMPARTANT: It is guaranteed that the length of the ctx.stack is unchanged.", "id": "f11703:c0:m5"} {"signature": "def to_key(literal_or_identifier):", "body": "if literal_or_identifier[''] == '':return literal_or_identifier['']elif literal_or_identifier[''] == '':k = literal_or_identifier['']if isinstance(k, float):return unicode(float_repr(k))elif '' in literal_or_identifier:return compose_regex(k)elif isinstance(k, bool):return u'' if k else u''elif k is None:return u''else:return unicode(k)", "docstring": "returns string representation of this object", "id": "f11704:m0"} {"signature": "def to_arr(this):", "body": "return [this.get(str(e)) for e in xrange(len(this))]", "docstring": "Returns Python array from Js array", "id": "f11706:m0"} {"signature": "def to_arr(this):", "body": "return [this.get(str(e)) for e in xrange(len(this))]", "docstring": "Returns Python array from Js array", "id": "f11707:m0"} {"signature": "def to_arr(this):", "body": "return [this.get(str(e)) for e in xrange(len(this))]", "docstring": "Returns Python array from Js array", "id": "f11710:m0"} {"signature": "def replacement_template(rep, source, span, npar):", "body": "n = res = ''while n < len(rep) - :char = rep[n]if char == '':if rep[n + ] == '':res += ''n += continueelif rep[n + ] == '':res += source[:span[]]n += continueelif rep[n + ] == '':res += source[span[]:]n += continueelif rep[n + ] in DIGS:dig = rep[n + ]if n + < len(rep) and rep[n + ] in DIGS:dig += rep[n + ]num = int(dig)if not num or num > len(npar):res += '' + digelse:res += npar[num - ] if npar[num - ] else ''n += + len(dig)continueres += charn += if n < len(rep):res += rep[-]return res", "docstring": "Takes the replacement template and some info about the match and returns filled template", "id": "f11716:m0"} {"signature": "def MakeError(name, message):", "body": "return JsToPyException(ERRORS[name](Js(message)))", "docstring": "Returns PyJsException with PyJsError inside", "id": "f11719:m1"} {"signature": "def Js(val, Clamped=False):", "body": "if isinstance(val, PyJs):return valelif val is None:return undefinedelif isinstance(val, basestring):return PyJsString(val, StringPrototype)elif isinstance(val, bool):return true if val else falseelif isinstance(val, float) or isinstance(val, int) or isinstance(val, long) or (NUMPY_AVAILABLE and isinstance(val,(numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,numpy.int32, numpy.uint32, numpy.float32, numpy.float64))):if val in NUM_BANK:return NUM_BANK[val]return PyJsNumber(float(val), NumberPrototype)elif isinstance(val, FunctionType):return PyJsFunction(val, FunctionPrototype)elif isinstance(val, dict): temp = PyJsObject({}, ObjectPrototype)for k, v in six.iteritems(val):temp.put(Js(k), Js(v))return tempelif isinstance(val, (list, tuple)): return PyJsArray(val, ArrayPrototype)elif isinstance(val, JsObjectWrapper):return val.__dict__['']elif NUMPY_AVAILABLE and isinstance(val, numpy.ndarray):if val.dtype == numpy.int8:return PyJsInt8Array(val, Int8ArrayPrototype)elif val.dtype == numpy.uint8 and not Clamped:return PyJsUint8Array(val, Uint8ArrayPrototype)elif val.dtype == numpy.uint8 and Clamped:return PyJsUint8ClampedArray(val, Uint8ClampedArrayPrototype)elif val.dtype == numpy.int16:return PyJsInt16Array(val, Int16ArrayPrototype)elif val.dtype == numpy.uint16:return PyJsUint16Array(val, Uint16ArrayPrototype)elif val.dtype == numpy.int32:return PyJsInt32Array(val, Int32ArrayPrototype)elif val.dtype == numpy.uint32:return PyJsUint16Array(val, Uint32ArrayPrototype)elif val.dtype == numpy.float32:return PyJsFloat32Array(val, Float32ArrayPrototype)elif val.dtype == numpy.float64:return PyJsFloat64Array(val, Float64ArrayPrototype)else: return py_wrap(val)", "docstring": "Converts Py type to PyJs type", "id": "f11719:m6"} {"signature": "def PyJsStrictEq(a, b):", "body": "tx, ty = Type(a), Type(b)if tx != ty:return falseif tx == '' or tx == '':return trueif a.is_primitive(): return Js(a.value == b.value)if a.Class == b.Class == '':return Js(a.obj == b.obj)return Js(a is b)", "docstring": "a===b", "id": "f11719:m11"} {"signature": "def PyJsStrictNeq(a, b):", "body": "return PyJsStrictEq(a, b).neg()", "docstring": "a!==b", "id": "f11719:m12"} {"signature": "def PyJsBshift(a, b):", "body": "return a.pyjs_bshift(b)", "docstring": "a>>>b", "id": "f11719:m13"} {"signature": "def __init__(self, value=None, prototype=None, extensible=False):", "body": "self.value = valueself.extensible = extensibleself.prototype = prototypeself.own = {}self.buff = None", "docstring": "Constructor for Number String and Boolean", "id": "f11719:c0:m0"} {"signature": "def put(self, prop, val, op=None): ", "body": "if self.Class == '' or self.Class == '':raise MakeError('','')if not isinstance(prop, basestring):prop = prop.to_string().valueif NUMPY_AVAILABLE and prop.isdigit():if self.Class == '':val = Js(numpy.int8(val.to_number().value))elif self.Class == '':val = Js(numpy.uint8(val.to_number().value))elif self.Class == '':if val < Js(numpy.uint8()):val = Js(numpy.uint8())elif val > Js(numpy.uint8()):val = Js(numpy.uint8())else:val = Js(numpy.uint8(val.to_number().value))elif self.Class == '':val = Js(numpy.int16(val.to_number().value))elif self.Class == '':val = Js(numpy.uint16(val.to_number().value))elif self.Class == '':val = Js(numpy.int32(val.to_number().value))elif self.Class == '':val = Js(numpy.uint32(val.to_number().value))elif self.Class == '':val = Js(numpy.float32(val.to_number().value))elif self.Class == '':val = Js(numpy.float64(val.to_number().value))if isinstance(self.buff, numpy.ndarray):self.buff[int(prop)] = int(val.to_number().value)if op is not None:val = getattr(self.get(prop), OP_METHODS[op])(val)if not self.can_put(prop):return valown_desc = self.get_own_property(prop)if is_data_descriptor(own_desc):if self.Class in ['', '', '', '','', '', '', '','', '']:self.define_own_property(prop, {'': val})else:self.own[prop][''] = valreturn valdesc = self.get_property(prop)if is_accessor_descriptor(desc):desc[''].call(self, (val, ))else:new = {'': val,'': True,'': True,'': True}if self.Class in ['', '', '', '','', '', '', '','', '']:self.define_own_property(prop, new)else:self.own[prop] = newreturn val", "docstring": "Just like in js: self.prop op= val\n for example when op is '+' it will be self.prop+=val\n op can be either None for simple assignment or one of:\n * / % + - << >> & ^ |", "id": "f11719:c0:m12"} {"signature": "def cok(self):", "body": "if self.Class in ('', ''):raise MakeError('','')", "docstring": "Check object coercible", "id": "f11719:c0:m27"} {"signature": "def abstract_relational_comparison(self, other, self_first=True):", "body": "px = self.to_primitive('')py = other.to_primitive('')if not self_first: px, py = py, pxif not (px.Class == '' and py.Class == ''):px, py = px.to_number(), py.to_number()if px.is_nan() or py.is_nan():return undefinedreturn Js(px.value < py.value) else:return Js(px.value < py.value)", "docstring": "selfif tx == ty:if tx == '' or tx == '':return trueif tx == '' or tx == '' or tx == '':return Js(self.value == other.value)return Js(self is other) elif (tx == '' and ty == '') or (ty == ''and tx == ''):return trueelif tx == '' and ty == '':return self.abstract_equality_comparison(other.to_number())elif tx == '' and ty == '':return self.to_number().abstract_equality_comparison(other)elif tx == '':return self.to_number().abstract_equality_comparison(other)elif ty == '':return self.abstract_equality_comparison(other.to_number())elif (tx == '' or tx == '') and other.is_object():return self.abstract_equality_comparison(other.to_primitive())elif (ty == '' or ty == '') and self.is_object():return self.to_primitive().abstract_equality_comparison(other)else:return false", "docstring": "returns the result of JS == compare.\n result is PyJs type: bool", "id": "f11719:c0:m59"} {"signature": "def instanceof(self, other):", "body": "if not hasattr(other, ''):return falsereturn other.has_instance(self)", "docstring": "checks if self is instance of other", "id": "f11719:c0:m62"} {"signature": "def __call__(self, *args):", "body": "if not self.is_callable():raise MakeError('','' % self.typeof())return self.call(self.GlobalObject, args)", "docstring": "Call a property prop as a function (this will be global object).\n\n NOTE: dont pass this and arguments here, these will be added\n automatically!", "id": "f11719:c0:m65"} {"signature": "def create(self, *args):", "body": "raise MakeError('', '' % self.Class)", "docstring": "Generally not a constructor, raise an error", "id": "f11719:c0:m66"} {"signature": "def callprop(self, prop, *args):", "body": "if not isinstance(prop, basestring):prop = prop.to_string().valuecand = self.get(prop)if not cand.is_callable():raise MakeError('','' % cand.typeof())return cand.call(self, args)", "docstring": "Call a property prop as a method (this will be self).\n\n NOTE: dont pass this and arguments here, these will be added\n automatically!", "id": "f11719:c0:m70"} {"signature": "def to_python(self):", "body": "return to_python(self)", "docstring": "returns equivalent python object.\n for example if this object is javascript array then this method will return equivalent python array", "id": "f11719:c0:m71"} {"signature": "def to_py(self):", "body": "return self.to_python()", "docstring": "returns equivalent python object.\n for example if this object is javascript array then this method will return equivalent python array", "id": "f11719:c0:m72"} {"signature": "def __init__(self, scope, closure=None):", "body": "self.prototype = closureif closure is None:self.own = {}for k, v in six.iteritems(scope):self.define_own_property(k, {'': v,'': False,'': False,'': False})else:self.own = scope", "docstring": "Doc", "id": "f11719:c2:m0"} {"signature": "def registers(self, lvals):", "body": "for lval in lvals:self.register(lval)", "docstring": "register multiple variables", "id": "f11719:c2:m2"} {"signature": "def _set_name(self, name):", "body": "if self.own.get(''):self.func_name = nameself.own[''][''] = Js(name)", "docstring": "name is py type", "id": "f11719:c7:m1"} {"signature": "def call(self, this, args=()):", "body": "if not hasattr(args, ''): args = (args, )args = tuple(Js(e) for e in args) arguments = PyJsArguments(args, self) arglen = self.argcount if len(args) > arglen:args = args[:arglen]elif len(args) < arglen:args += (undefined, ) * (arglen - len(args))args += this, arguments try:return Js(self.code(*args))except NotImplementedError:raiseexcept RuntimeError as e: raise MakeError('', e.message ifnot isinstance(e, NotImplementedError) else '')", "docstring": "Calls this function and returns a result\n (converted to PyJs type so func can return python types)\n\n this must be a PyJs object and args must be a python tuple of PyJs objects.\n\n arguments object is passed automatically and will be equal to Js(args)\n (tuple converted to arguments object).You dont need to worry about number\n of arguments you provide if you supply less then missing ones will be set\n to undefined (but not present in arguments object).\n And if you supply too much then excess will not be passed\n (but they will be present in arguments object).", "id": "f11719:c7:m3"} {"signature": "def __init__(self, value=None, prototype=None):", "body": "if not isinstance(value, basestring):raise TypeError self.value = valueself.prototype = prototypeself.own = {}self.own[''] = {'': Js(len(value)),'': False,'': False,'': False}if len(value) == :CHAR_BANK[value] = self", "docstring": "Constructor for Number String and Boolean", "id": "f11719:c10:m0"} {"signature": "def match(self, string, pos):", "body": "return self.pat.match(string, pos)", "docstring": "string is of course py string", "id": "f11719:c26:m1"} {"signature": "def fix_js_args(func):", "body": "fcode = six.get_function_code(func)fargs = fcode.co_varnames[fcode.co_argcount - :fcode.co_argcount]if fargs == ('', '') or fargs == ('', ''):return funccode = append_arguments(six.get_function_code(func), ('', ''))return types.FunctionType(code,six.get_function_globals(func),func.__name__,closure=six.get_function_closure(func))", "docstring": "Use this function when unsure whether func takes this and arguments as its last 2 args.\n It will append 2 args if it does not.", "id": "f11720:m0"} {"signature": "def dbg(x):", "body": "return ''", "docstring": "does nothing, legacy dummy function", "id": "f11745:m0"} {"signature": "def translate_js(js, HEADER=DEFAULT_HEADER, use_compilation_plan=False):", "body": "if use_compilation_plan and not '' in js and not '' in js:return translate_js_with_compilation_plan(js, HEADER=HEADER)parser = pyjsparser.PyJsParser()parsed = parser.parse(js) translating_nodes.clean_stacks()return HEADER + translating_nodes.trans(parsed)", "docstring": "js has to be a javascript source code.\n returns equivalent python code.", "id": "f11745:m1"} {"signature": "def translate_js_with_compilation_plan(js, HEADER=DEFAULT_HEADER):", "body": "match_increaser_str, match_increaser_num, compilation_plan = get_compilation_plan(js)cp_hash = hashlib.md5(compilation_plan.encode('')).digest()try:python_code = cache[cp_hash]['']except:parser = pyjsparser.PyJsParser()parsed = parser.parse(compilation_plan) translating_nodes.clean_stacks()python_code = translating_nodes.trans(parsed) cache[cp_hash] = {'': compilation_plan,'': python_code,}python_code = match_increaser_str.wrap_up(python_code)python_code = match_increaser_num.wrap_up(python_code)return HEADER + python_code", "docstring": "js has to be a javascript source code.\n returns equivalent python code.\n\n compile plans only work with the following restrictions:\n - only enabled for oneliner expressions\n - when there are comments in the js code string substitution is disabled\n - when there nested escaped quotes string substitution is disabled, so\n\n cacheable:\n Q1 == 1 && name == 'harry'\n\n not cacheable:\n Q1 == 1 && name == 'harry' // some comment\n\n not cacheable:\n Q1 == 1 && name == 'o\\'Reilly'\n\n not cacheable:\n Q1 == 1 && name /* some comment */ == 'o\\'Reilly'", "id": "f11745:m3"} {"signature": "def trasnlate(js, HEADER=DEFAULT_HEADER):", "body": "return translate_js(js, HEADER)", "docstring": "js has to be a javascript source code.\n returns equivalent python code.\n\n Equivalent to translate_js", "id": "f11745:m4"} {"signature": "def to_key(literal_or_identifier):", "body": "if literal_or_identifier[''] == '':return literal_or_identifier['']elif literal_or_identifier[''] == '':k = literal_or_identifier['']if isinstance(k, float):return unicode(float_repr(k))elif '' in literal_or_identifier:return compose_regex(k)elif isinstance(k, bool):return '' if k else ''elif k is None:return ''else:return unicode(k)", "docstring": "returns string representation of this object", "id": "f11746:m1"} {"signature": "def trans(ele, standard=False):", "body": "try:node = globals().get(ele[''])if not node:raise NotImplementedError('' % ele[''])if standard:node = node.__dict__[''] if '' in node.__dict__ else nodereturn node(**ele)except:raise", "docstring": "Translates esprima syntax tree to python by delegating to appropriate translating node", "id": "f11746:m2"} {"signature": "def limited(func):", "body": "def f(standard=False, **args):insert_pos = len(inline_stack.names) res = func(**args)if len(res) > LINE_LEN_LIMIT:name = inline_stack.require('')inline_stack.names.pop()inline_stack.names.insert(insert_pos, name)res = '' % (name, res)inline_stack.define(name, res)return name + ''else:return resf.__dict__[''] = funcreturn f", "docstring": "Decorator limiting resulting line length in order to avoid python parser stack overflow -\n If expression longer than LINE_LEN_LIMIT characters then it will be moved to upper line\n USE ONLY ON EXPRESSIONS!!!", "id": "f11746:m3"} {"signature": "def parse(javascript_code):", "body": "p = PyJsParser()return p.parse(javascript_code)", "docstring": "Returns syntax tree of javascript_code.\n\n Syntax tree has the same structure as syntax tree produced by esprima.js\n\n Same as PyJsParser().parse For your convenience :)", "id": "f11747:m0"} {"signature": "def parsePattern(self):", "body": "return {'': '', '': self.parseDisjunction()}", "docstring": "Perform sctring escape - for regexp literals", "id": "f11748:c0:m1"} {"signature": "def argsplit(args, sep=''):", "body": "parsed_len = last = splits = []for e in bracket_split(args, brackets=['', '', '']):if e[] not in ('', '', ''):for i, char in enumerate(e):if char == sep:splits.append(args[last:parsed_len + i])last = parsed_len + i + parsed_len += len(e)splits.append(args[last:])return splits", "docstring": "used to split JS args (it is not that simple as it seems because\n sep can be inside brackets).\n\n pass args *without* brackets!\n\n Used also to parse array and object elements, and more", "id": "f11749:m9"} {"signature": "def bracket_split(source, brackets=('', '', ''), strip=False):", "body": "starts = [e[] for e in brackets]in_bracket = n = last = while n < len(source):e = source[n]if not in_bracket and e in starts:in_bracket = start = nb_start, b_end = brackets[starts.index(e)]elif in_bracket:if e == b_start:in_bracket += elif e == b_end:in_bracket -= if not in_bracket:if source[last:start]:yield source[last:start]last = n + yield source[start + strip:n + - strip]n += if source[last:]:yield source[last:]", "docstring": "DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)", "id": "f11749:m10"} {"signature": "def index(request):", "body": "return {}", "docstring": "Base view to load our template", "id": "f11763:m0"} {"signature": "@view_config(route_name='')def socketio_service(request):", "body": "socketio_manage(request.environ, namespaces=nsmap, request=request)return {}", "docstring": "The view that will launch the socketio listener", "id": "f11763:m1"} {"signature": "def cpu_checker_process(self):", "body": "ret = os.system(\"\")self.emit(\"\", ret)", "docstring": "This will be a greenlet", "id": "f11763:c0:m4"} {"signature": "def simple_route(config, name, url, fn):", "body": "config.add_route(name, url)config.add_view(fn, route_name=name,renderer=\"\" % name)", "docstring": "Function to simplify creating routes in pyramid \n Takes the pyramid configuration, name of the route, url, and view\n function", "id": "f11764:m0"} {"signature": "def includeme(config):", "body": "config.scan('')", "docstring": "Pyramid configuration", "id": "f11765:m0"} {"signature": "def index(request):", "body": "return {}", "docstring": "Base view to load our template", "id": "f11770:m0"} {"signature": "def join(self, room):", "body": "self.socket.rooms.add(self._get_room_name(room))", "docstring": "Lets a user join a room on a specific Namespace.", "id": "f11770:c0:m1"} {"signature": "def leave(self, room):", "body": "self.socket.rooms.remove(self._get_room_name(room))", "docstring": "Lets a user leave a room on a specific Namespace.", "id": "f11770:c0:m2"} {"signature": "def emit_to_room(self, event, args, room):", "body": "pkt = dict(type=\"\",name=event,args=args,endpoint=self.ns_name)room_name = self._get_room_name(room)for sessid, socket in self.socket.server.sockets.iteritems():if not hasattr(socket, ''):continueif room_name in socket.rooms:socket.send_packet(pkt)", "docstring": "This is sent to all in the room (in this particular Namespace)", "id": "f11770:c0:m4"} {"signature": "def simple_route(config, name, url, fn):", "body": "config.add_route(name, url)config.add_view(fn, route_name=name,renderer=\"\" % name)", "docstring": "Function to simplify creating routes in pyramid\nTakes the pyramid configuration, name of the route, url, and view\nfunction.", "id": "f11771:m0"} {"signature": "def includeme(config):", "body": "config.scan('')", "docstring": "Pyramid configuration", "id": "f11772:m0"} {"signature": "def index(request):", "body": "return {}", "docstring": "Base view to load our template", "id": "f11781:m0"} {"signature": "def index(request):", "body": "return {}", "docstring": "Base view to load our template", "id": "f11788:m0"} {"signature": "def rooms(request, template=\"\"):", "body": "context = {\"\": ChatRoom.objects.all()}return render(request, template, context)", "docstring": "Homepage - lists all rooms.", "id": "f11797:m0"} {"signature": "def room(request, slug, template=\"\"):", "body": "context = {\"\": get_object_or_404(ChatRoom, slug=slug)}return render(request, template, context)", "docstring": "Show a room.", "id": "f11797:m1"} {"signature": "def create(request):", "body": "name = request.POST.get(\"\")if name:room, created = ChatRoom.objects.get_or_create(name=name)return redirect(room)return redirect(rooms)", "docstring": "Handles post from the \"Add room\" form on the homepage, and\nredirects to the new room.", "id": "f11797:m2"} {"signature": "def get_handler(self, *args, **options):", "body": "handler = WSGIHandler()try:from django.contrib.staticfiles.handlers import StaticFilesHandlerexcept ImportError:return handleruse_static_handler = options.get('', True)insecure_serving = options.get('', False)if (settings.DEBUG and use_static_handler or(use_static_handler and insecure_serving)):handler = StaticFilesHandler(handler)return handler", "docstring": "Returns the django.contrib.staticfiles handler.", "id": "f11799:c0:m1"} {"signature": "@app.route('')def rooms():", "body": "context = {\"\": ChatRoom.query.all()}return render_template('', **context)", "docstring": "Homepage - lists all rooms.", "id": "f11804:m4"} {"signature": "@app.route('')def room(slug):", "body": "context = {\"\": get_object_or_404(ChatRoom, slug=slug)}return render_template('', **context)", "docstring": "Show a room.", "id": "f11804:m5"} {"signature": "@app.route('', methods=[''])def create():", "body": "name = request.form.get(\"\")if name:room, created = get_or_create(ChatRoom, name=name)return redirect(url_for('', slug=room.slug))return redirect(url_for(''))", "docstring": "Handles post from the \"Add room\" form on the homepage, and\nredirects to the new room.", "id": "f11804:m6"} {"signature": "def join(self, room):", "body": "self.session[''].add(self._get_room_name(room))", "docstring": "Lets a user join a room on a specific Namespace.", "id": "f11805:c0:m1"} {"signature": "def leave(self, room):", "body": "self.session[''].remove(self._get_room_name(room))", "docstring": "Lets a user leave a room on a specific Namespace.", "id": "f11805:c0:m2"} {"signature": "def emit_to_room(self, room, event, *args):", "body": "pkt = dict(type=\"\",name=event,args=args,endpoint=self.ns_name)room_name = self._get_room_name(room)for sessid, socket in six.iteritems(self.socket.server.sockets):if '' not in socket.session:continueif room_name in socket.session[''] and self.socket != socket:socket.send_packet(pkt)", "docstring": "This is sent to all in the room (in this particular Namespace)", "id": "f11805:c0:m4"} {"signature": "def broadcast_event(self, event, *args):", "body": "pkt = dict(type=\"\",name=event,args=args,endpoint=self.ns_name)for sessid, socket in six.iteritems(self.socket.server.sockets):socket.send_packet(pkt)", "docstring": "This is sent to all in the sockets in this particular Namespace,\nincluding itself.", "id": "f11805:c1:m0"} {"signature": "def broadcast_event_not_me(self, event, *args):", "body": "pkt = dict(type=\"\",name=event,args=args,endpoint=self.ns_name)for sessid, socket in six.iteritems(self.socket.server.sockets):if socket is not self.socket:socket.send_packet(pkt)", "docstring": "This is sent to all in the sockets in this particular Namespace,\nexcept itself.", "id": "f11805:c1:m1"} {"signature": "def encode(data, json_dumps=default_json_dumps):", "body": "payload = ''msg = str(MSG_TYPES[data['']])if msg in ['', '']:msg += '' + data['']if '' in data and data[''] != '':msg += '' + data['']elif msg == '':msg += ''elif msg in ['', '', '']:if msg == '':payload = data['']if msg == '':payload = json_dumps(data[''])if msg == '':d = {}d[''] = data['']if '' in data and data[''] != []:d[''] = data['']payload = json_dumps(d)if '' in data:msg += '' + str(data[''])if data[''] == '':msg += ''msg += ''else:msg += ''if '' not in data:data[''] = ''if payload != '':msg += data[''] + '' + payloadelse:msg += data['']elif msg == '':msg += '' + data.get('', '') + '' + str(data[''])if '' in data and data[''] != []:msg += '' + json_dumps(data[''])elif msg == '':msg += ''if '' in data and data[''] != '':msg += str(ERROR_REASONS[data['']])if '' in data and data[''] != '':msg += '' + str(ERROR_ADVICES[data['']])msg += data['']elif msg == '':msg += ''return msg", "docstring": "Encode an attribute dict into a byte string.", "id": "f11806:m0"} {"signature": "def decode(rawstr, json_loads=default_json_loads):", "body": "decoded_msg = {}try:rawstr = rawstr.decode('')except AttributeError:passsplit_data = rawstr.split(\"\", )msg_type = split_data[]msg_id = split_data[]endpoint = split_data[]data = ''if msg_id != '':if \"\" in msg_id:msg_id = msg_id.split('')[]decoded_msg[''] = int(msg_id)decoded_msg[''] = ''else:decoded_msg[''] = int(msg_id)decoded_msg[''] = Truemsg_type_id = int(msg_type)if msg_type_id in MSG_VALUES:decoded_msg[''] = MSG_VALUES[int(msg_type)]else:raise Exception(\"\" % msg_type)decoded_msg[''] = endpointif len(split_data) > :data = split_data[]if msg_type == \"\": passelif msg_type == \"\": decoded_msg[''] = dataelif msg_type == \"\": passelif msg_type == \"\": decoded_msg[''] = dataelif msg_type == \"\": decoded_msg[''] = json_loads(data)elif msg_type == \"\": try:data = json_loads(data)except ValueError:print(\"\", data)decoded_msg[''] = []else:decoded_msg[''] = data.pop('')if '' in data:decoded_msg[''] = data['']else:decoded_msg[''] = []elif msg_type == \"\": if '' in data:ackId, data = data.split('')decoded_msg[''] = int(ackId)decoded_msg[''] = json_loads(data)else:decoded_msg[''] = int(data)decoded_msg[''] = []elif msg_type == \"\": if '' in data:reason, advice = data.split('')decoded_msg[''] = REASONS_VALUES[int(reason)]decoded_msg[''] = ADVICES_VALUES[int(advice)]else:decoded_msg[''] = ''if data != '':decoded_msg[''] = REASONS_VALUES[int(data)]else:decoded_msg[''] = ''elif msg_type == \"\": passreturn decoded_msg", "docstring": "Decode a rawstr packet arriving from the socket into a dict.", "id": "f11806:m1"} {"signature": "def autodiscover():", "body": "global LOADING_SOCKETIOif LOADING_SOCKETIO:returnLOADING_SOCKETIO = Trueimport impfrom django.conf import settingsfor app in settings.INSTALLED_APPS:try:app_path = import_module(app).__path__except AttributeError:continuetry:imp.find_module('', app_path)except ImportError:continueimport_module(\"\" % app)LOADING_SOCKETIO = False", "docstring": "Auto-discover INSTALLED_APPS sockets.py modules and fail silently when\nnot present. NOTE: socketio_autodiscover was inspired/copied from\ndjango.contrib.admin autodiscover", "id": "f11807:m0"} {"signature": "def default_error_handler(socket, error_name, error_message, endpoint,msg_id, quiet):", "body": "pkt = dict(type='', name='',args=[error_name, error_message],endpoint=endpoint)if msg_id:pkt[''] = msg_idif not quiet:socket.send_packet(pkt)log.error(u\"\".format(error_name, error_message, endpoint, msg_id))", "docstring": "This is the default error handler, you can override this when\n calling :func:`socketio.socketio_manage`.\n\n It basically sends an event through the socket with the 'error' name.\n\n See documentation for :meth:`Socket.error`.\n\n :param quiet: if quiet, this handler will not send a packet to the\n user, but only log for the server developer.", "id": "f11809:m0"} {"signature": "def _set_namespaces(self, namespaces):", "body": "self.namespaces = namespaces", "docstring": "This is a mapping (dict) of the different '/namespaces' to their\n BaseNamespace object derivative.\n\n This is called by socketio_manage().", "id": "f11809:c0:m1"} {"signature": "def _set_request(self, request):", "body": "self.request = request", "docstring": "Saves the request object for future use by the different Namespaces.\n\n This is called by socketio_manage().", "id": "f11809:c0:m2"} {"signature": "def _set_environ(self, environ):", "body": "self.environ = environ", "docstring": "Save the WSGI environ, for future use.\n\n This is called by socketio_manage().", "id": "f11809:c0:m3"} {"signature": "def _set_error_handler(self, error_handler):", "body": "self.error_handler = error_handler", "docstring": "Changes the default error_handler function to the one specified\n\n This is called by socketio_manage().", "id": "f11809:c0:m4"} {"signature": "def _set_json_loads(self, json_loads):", "body": "self.json_loads = json_loads", "docstring": "Change the default JSON decoder.\n\n This should be a callable that accepts a single string, and returns\n a well-formed object.", "id": "f11809:c0:m5"} {"signature": "def _set_json_dumps(self, json_dumps):", "body": "self.json_dumps = json_dumps", "docstring": "Change the default JSON decoder.\n\n This should be a callable that accepts a single string, and returns\n a well-formed object.", "id": "f11809:c0:m6"} {"signature": "def _get_next_msgid(self):", "body": "self.ack_counter += return self.ack_counter", "docstring": "This retrieves the next value for the 'id' field when sending\n an 'event' or 'message' or 'json' that asks the remote client\n to 'ack' back, so that we trigger the local callback.", "id": "f11809:c0:m7"} {"signature": "def _save_ack_callback(self, msgid, callback):", "body": "if msgid in self.ack_callbacks:return Falseself.ack_callbacks[msgid] = callback", "docstring": "Keep a reference of the callback on this socket.", "id": "f11809:c0:m8"} {"signature": "def _pop_ack_callback(self, msgid):", "body": "if msgid not in self.ack_callbacks:return Nonereturn self.ack_callbacks.pop(msgid)", "docstring": "Fetch the callback for a given msgid, if it exists, otherwise,\n return None", "id": "f11809:c0:m9"} {"signature": "def __getitem__(self, key):", "body": "return self.active_ns[key]", "docstring": "This will get the nested Namespace using its '/chat' reference.\n\n Using this, you can go from one Namespace to the other (to emit, add\n ACLs, etc..) with:\n\n adminnamespace.socket['/chat'].add_acl_method('kick-ban')", "id": "f11809:c0:m11"} {"signature": "def __hasitem__(self, key):", "body": "return key in self.active_ns", "docstring": "Verifies if the namespace is active (was initialized)", "id": "f11809:c0:m12"} {"signature": "@propertydef connected(self):", "body": "return self.state == self.STATE_CONNECTED", "docstring": "Returns whether the state is CONNECTED or not.", "id": "f11809:c0:m13"} {"signature": "def heartbeat(self):", "body": "self.timeout.set()", "docstring": "This makes the heart beat for another X seconds. Call this when\n you get a heartbeat packet in.\n\n This clear the heartbeat disconnect timeout (resets for X seconds).", "id": "f11809:c0:m15"} {"signature": "def kill(self, detach=False):", "body": "self.ack_callbacks = {}if self.connected:self.state = self.STATE_DISCONNECTINGself.server_queue.put_nowait(None)self.client_queue.put_nowait(None)if len(self.active_ns) > :log.debug(\"\" % self)self.disconnect()if detach:self.detach()gevent.killall(self.jobs)", "docstring": "This function must/will be called when a socket is to be completely\n shut down, closed by connection timeout, connection error or explicit\n disconnection from the client.\n\n It will call all of the Namespace's\n :meth:`~socketio.namespace.BaseNamespace.disconnect` methods\n so that you can shut-down things properly.", "id": "f11809:c0:m16"} {"signature": "def detach(self):", "body": "log.debug(\"\" % self)if self.sessid in self.server.sockets:self.server.sockets.pop(self.sessid)", "docstring": "Detach this socket from the server. This should be done in\n conjunction with kill(), once all the jobs are dead, detach the\n socket for garbage collection.", "id": "f11809:c0:m17"} {"signature": "def put_server_msg(self, msg):", "body": "self.heartbeat()self.server_queue.put_nowait(msg)", "docstring": "Writes to the server's pipe, to end up in in the Namespaces", "id": "f11809:c0:m18"} {"signature": "def put_client_msg(self, msg):", "body": "self.client_queue.put_nowait(msg)", "docstring": "Writes to the client's pipe, to end up in the browser", "id": "f11809:c0:m19"} {"signature": "def get_client_msg(self, **kwargs):", "body": "return self.client_queue.get(**kwargs)", "docstring": "Grab a message to send it to the browser", "id": "f11809:c0:m20"} {"signature": "def get_server_msg(self, **kwargs):", "body": "return self.server_queue.get(**kwargs)", "docstring": "Grab a message, to process it by the server and dispatch calls", "id": "f11809:c0:m21"} {"signature": "def get_multiple_client_msgs(self, **kwargs):", "body": "client_queue = self.client_queuemsgs = [client_queue.get(**kwargs)]while client_queue.qsize():msgs.append(client_queue.get())return msgs", "docstring": "Get multiple messages, in case we're going through the various\n XHR-polling methods, on which we can pack more than one message if the\n rate is high, and encode the payload for the HTTP channel.", "id": "f11809:c0:m22"} {"signature": "def error(self, error_name, error_message, endpoint=None, msg_id=None,quiet=False):", "body": "handler = self.error_handlerreturn handler(self, error_name, error_message, endpoint, msg_id, quiet)", "docstring": "Send an error to the user, using the custom or default\n ErrorHandler configured on the [TODO: Revise this] Socket/Handler\n object.\n\n :param error_name: is a simple string, for easy association on\n the client side\n\n :param error_message: is a human readable message, the user\n will eventually see\n\n :param endpoint: set this if you have a message specific to an\n end point\n\n :param msg_id: set this if your error is relative to a\n specific message\n\n :param quiet: way to make the error handler quiet. Specific to\n the handler. The default handler will only log,\n with quiet.", "id": "f11809:c0:m23"} {"signature": "def disconnect(self, silent=False):", "body": "for ns_name, ns in list(six.iteritems(self.active_ns)):ns.recv_disconnect()", "docstring": "Calling this method will call the\n :meth:`~socketio.namespace.BaseNamespace.disconnect` method on\n all the active Namespaces that were open, killing all their\n jobs and sending 'disconnect' packets for each of them.\n\n Normally, the Global namespace (endpoint = '') has special meaning,\n as it represents the whole connection,\n\n :param silent: when True, pass on the ``silent`` flag to the Namespace\n :meth:`~socketio.namespace.BaseNamespace.disconnect`\n calls.", "id": "f11809:c0:m24"} {"signature": "def remove_namespace(self, namespace):", "body": "if namespace in self.active_ns:del self.active_ns[namespace]if len(self.active_ns) == and self.connected:self.kill(detach=True)", "docstring": "This removes a Namespace object from the socket.\n\n This is usually called by\n :meth:`~socketio.namespace.BaseNamespace.disconnect`.", "id": "f11809:c0:m25"} {"signature": "def send_packet(self, pkt):", "body": "self.put_client_msg(packet.encode(pkt, self.json_dumps))", "docstring": "Low-level interface to queue a packet on the wire (encoded as wire\n protocol", "id": "f11809:c0:m26"} {"signature": "def spawn(self, fn, *args, **kwargs):", "body": "log.debug(\"\" % fn.__name__)job = gevent.spawn(fn, *args, **kwargs)self.jobs.append(job)return job", "docstring": "Spawn a new Greenlet, attached to this Socket instance.\n\n It will be monitored by the \"watcher\" method", "id": "f11809:c0:m27"} {"signature": "def _receiver_loop(self):", "body": "while True:rawdata = self.get_server_msg()if not rawdata:continue try:pkt = packet.decode(rawdata, self.json_loads)except (ValueError, KeyError, Exception) as e:self.error('',\"\"\"\" % (rawdata[:], e))continueif pkt[''] == '':continueif pkt[''] == '' and pkt[''] == '':self.kill(detach=True)continueendpoint = pkt['']if endpoint not in self.namespaces:self.error(\"\",\"\"\"\" % endpoint, endpoint=endpoint)continueelif endpoint in self.active_ns:pkt_ns = self.active_ns[endpoint]else:new_ns_class = self.namespaces[endpoint]pkt_ns = new_ns_class(self.environ, endpoint,request=self.request)for cls in type(pkt_ns).__mro__:if hasattr(cls, ''):cls.initialize(pkt_ns) self.active_ns[endpoint] = pkt_nsretval = pkt_ns.process_packet(pkt)if pkt.get('') == \"\" and pkt.get(''):if type(retval) is tuple:args = list(retval)else:args = [retval]returning_ack = dict(type='', ackId=pkt[''],args=args,endpoint=pkt.get('', ''))self.send_packet(returning_ack)if not self.connected:self.kill(detach=True) return", "docstring": "This is the loop that takes messages from the queue for the server\n to consume, decodes them and dispatches them.\n\n It is the main loop for a socket. We join on this process before\n returning control to the web framework.\n\n This process is not tracked by the socket itself, it is not going\n to be killed by the ``gevent.killall(socket.jobs)``, so it must\n exit gracefully itself.", "id": "f11809:c0:m28"} {"signature": "def _spawn_receiver_loop(self):", "body": "job = gevent.spawn(self._receiver_loop)self.jobs.append(job)return job", "docstring": "Spawns the reader loop. This is called internall by\n socketio_manage().", "id": "f11809:c0:m29"} {"signature": "def _watcher(self):", "body": "while True:gevent.sleep()if not self.connected:for ns_name, ns in list(six.iteritems(self.active_ns)):ns.recv_disconnect()gevent.killall(self.jobs)break", "docstring": "Watch out if we've been disconnected, in that case, kill\n all the jobs.", "id": "f11809:c0:m30"} {"signature": "def _spawn_watcher(self):", "body": "job = gevent.spawn(self._watcher)return job", "docstring": "This one is not waited for with joinall(socket.jobs), as it\n is an external watcher, to clean up when everything is done.", "id": "f11809:c0:m31"} {"signature": "def _heartbeat(self):", "body": "interval = self.config['']while self.connected:gevent.sleep(interval)self.put_client_msg(\"\")", "docstring": "Start the heartbeat Greenlet to check connection health.", "id": "f11809:c0:m32"} {"signature": "def _spawn_heartbeat(self):", "body": "self.spawn(self._heartbeat)self.spawn(self._heartbeat_timeout)", "docstring": "This functions returns a list of jobs", "id": "f11809:c0:m34"} {"signature": "def serve_paste(app, global_conf, **kw):", "body": "serve(app, **kw)return ", "docstring": "pserve / paster serve / waitress replacement / integration\n\n You can pass as parameters:\n\n transports = websockets, xhr-multipart, xhr-longpolling, etc...\n policy_server = True", "id": "f11810:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "self.sockets = {}if '' in kwargs:print(\"\")self.resource = kwargs.pop('', '')else:self.resource = kwargs.pop('', '')self.transports = kwargs.pop('', None)if kwargs.pop('', True):try:address = args[][]except TypeError:try:address = args[].address[]except AttributeError:address = args[].cfg_addr[]policylistener = kwargs.pop('', (address, ))self.policy_server = FlashPolicyServer(policylistener)else:self.policy_server = Noneself.config = {'': ,'': ,'': ,}for f in ('', '', ''):if f in kwargs:self.config[f] = int(kwargs.pop(f))if not '' in kwargs:kwargs[''] = SocketIOHandlerif not '' in kwargs:self.ws_handler_class = WebSocketHandlerelse:self.ws_handler_class = kwargs.pop('')log_file = kwargs.pop('', None)if log_file:kwargs[''] = open(log_file, '')super(SocketIOServer, self).__init__(*args, **kwargs)", "docstring": "This is just like the standard WSGIServer __init__, except with a\n few additional ``kwargs``:\n\n :param resource: The URL which has to be identified as a\n socket.io request. Defaults to the /socket.io/ URL.\n\n :param transports: Optional list of transports to allow. List of\n strings, each string should be one of\n handler.SocketIOHandler.handler_types.\n\n :param policy_server: Boolean describing whether or not to use the\n Flash policy server. Default True.\n\n :param policy_listener: A tuple containing (host, port) for the\n policy server. This is optional and used only if policy server\n is set to true. The default value is 0.0.0.0:843\n\n :param heartbeat_interval: int The timeout for the server, we\n should receive a heartbeat from the client within this\n interval. This should be less than the\n ``heartbeat_timeout``.\n\n :param heartbeat_timeout: int The timeout for the client when\n it should send a new heartbeat to the server. This value\n is sent to the client after a successful handshake.\n\n :param close_timeout: int The timeout for the client, when it\n closes the connection it still X amounts of seconds to do\n re open of the connection. This value is sent to the\n client after a successful handshake.\n\n :param log_file: str The file in which you want the PyWSGI\n server to write its access log. If not specified, it\n is sent to `stderr` (with gevent 0.13).", "id": "f11810:c0:m0"} {"signature": "def get_socket(self, sessid=''):", "body": "socket = self.sockets.get(sessid)if sessid and not socket:return None if socket is None:socket = Socket(self, self.config)self.sockets[socket.sessid] = socketelse:socket.incr_hits()return socket", "docstring": "Return an existing or new client Socket.", "id": "f11810:c0:m4"} {"signature": "def __init__(self, handler, config, **kwargs):", "body": "self.content_type = (\"\", \"\")self.headers = [(\"\", \"\"),(\"\", \"\"),(\"\", \"\"),(\"\", \"\"),]self.handler = handlerself.config = config", "docstring": "Base transport class.\n\n :param config: dict Should contain the config keys, like\n ``heartbeat_interval``, ``heartbeat_timeout`` and\n ``close_timeout``.", "id": "f11811:c0:m0"} {"signature": "def get_messages_payload(self, socket, timeout=None):", "body": "try:msgs = socket.get_multiple_client_msgs(timeout=timeout)data = self.encode_payload(msgs)except Empty:data = \"\"return data", "docstring": "This will fetch the messages from the Socket's queue, and if\n there are many messes, pack multiple messages in one payload and return", "id": "f11811:c1:m5"} {"signature": "def encode_payload(self, messages):", "body": "if not messages or messages[] is None:return ''if len(messages) == :return messages[].encode('')payload = u''.join([(u'' % (len(p), p))for p in messages if p is not None])return payload.encode('')", "docstring": "Encode list of messages. Expects messages to be unicode.\n\n ``messages`` - List of raw messages to encode, if necessary", "id": "f11811:c1:m6"} {"signature": "def decode_payload(self, payload):", "body": "payload = payload.decode('')if payload[] == u\"\":ret = []while len(payload) != :len_end = payload.find(u\"\", )length = int(payload[:len_end])msg_start = len_end + msg_end = length + msg_startmessage = payload[msg_start:msg_end]ret.append(message)payload = payload[msg_end:]return retreturn [payload]", "docstring": "This function can extract multiple messages from one HTTP payload.\n Some times, the XHR/JSONP/.. transports can pack more than one message\n on a single packet. They are encoding following the WebSocket\n semantics, which need to be reproduced here to unwrap the messages.\n\n The semantics are:\n\n \\ufffd + [length as a string] + \\ufffd + [payload as a unicode string]\n\n This function returns a list of messages, even though there is only\n one.\n\n Inspired by socket.io/lib/transports/http.js", "id": "f11811:c1:m7"} {"signature": "def write(self, data):", "body": "args = parse_qs(self.handler.environ.get(\"\"))if \"\" in args:i = args[\"\"]else:i = \"\"super(JSONPolling, self).write(\"\" % (i, data))", "docstring": "Just quote out stuff before sending it out", "id": "f11811:c2:m2"} {"signature": "def socketio_manage(environ, namespaces, request=None, error_handler=None,json_loads=None, json_dumps=None):", "body": "socket = environ['']socket._set_environ(environ)socket._set_namespaces(namespaces)if request:socket._set_request(request)if error_handler:socket._set_error_handler(error_handler)if json_loads:socket._set_json_loads(json_loads)if json_dumps:socket._set_json_dumps(json_dumps)receiver_loop = socket._spawn_receiver_loop()gevent.joinall([receiver_loop])return", "docstring": "Main SocketIO management function, call from within your Framework of\n choice's view.\n\n The ``environ`` variable is the WSGI ``environ``. It is used to extract\n Socket object from the underlying server (as the 'socketio' key), and will\n be attached to both the ``Socket`` and ``Namespace`` objects.\n\n The ``namespaces`` parameter is a dictionary of the namespace string\n representation as key, and the BaseNamespace namespace class descendant as\n a value. The empty string ('') namespace is the global namespace. You can\n use Socket.GLOBAL_NS to be more explicit. So it would look like:\n\n .. code-block:: python\n\n namespaces={'': GlobalNamespace,\n '/chat': ChatNamespace}\n\n The ``request`` object is not required, but will probably be useful to pass\n framework-specific things into your Socket and Namespace functions. It will\n simply be attached to the Socket and Namespace object (accessible through\n ``self.request`` in both cases), and it is not accessed in any case by the\n ``gevent-socketio`` library.\n\n Pass in an ``error_handler`` if you want to override the default\n error_handler (which is :func:`socketio.virtsocket.default_error_handler`.\n The callable you pass in should have the same signature as the default\n error handler.\n\n The ``json_loads`` and ``json_dumps`` are overrides for the default\n ``json.loads`` and ``json.dumps`` function calls. Override these at\n the top-most level here. This will affect all sockets created by this\n socketio manager, and all namespaces inside.\n\n This function will block the current \"view\" or \"controller\" in your\n framework to do the recv/send on the socket, and dispatch incoming messages\n to your namespaces.\n\n This is a simple example using Pyramid:\n\n .. code-block:: python\n\n def my_view(request):\n socketio_manage(request.environ, {'': GlobalNamespace}, request)\n\n NOTE: You must understand that this function is going to be called\n *only once* per socket opening, *even though* you are using a long\n polling mechanism. The subsequent calls (for long polling) will\n be hooked directly at the server-level, to interact with the\n active ``Socket`` instance. This means you will *not* get access\n to the future ``request`` or ``environ`` objects. This is of\n particular importance regarding sessions (like Beaker). The\n session will be opened once at the opening of the Socket, and not\n closed until the socket is closed. You are responsible for\n opening and closing the cookie-based session yourself if you want\n to keep its data in sync with the rest of your GET/POST calls.", "id": "f11812:m0"} {"signature": "def __init__(self, config, *args, **kwargs):", "body": "self.socketio_connection = Falseself.allowed_paths = Noneself.config = configsuper(SocketIOHandler, self).__init__(*args, **kwargs)self.transports = list(self.handler_types.keys())if self.server.transports:self.transports = self.server.transportsif not set(self.transports).issubset(set(self.handler_types)):raise ValueError(\"\" %list(self.handler_types.keys()))", "docstring": "Create a new SocketIOHandler.\n\n :param config: dict Configuration for timeouts and intervals\n that will go down to the other components, transports, etc..", "id": "f11815:c0:m0"} {"signature": "def handle_one_response(self):", "body": "path = self.environ.get('')if not path.lstrip('').startswith(self.server.resource + ''):return super(SocketIOHandler, self).handle_one_response()self.status = Noneself.headers_sent = Falseself.result = Noneself.response_length = self.response_use_chunked = Falserequest_method = self.environ.get(\"\")request_tokens = self.RE_REQUEST_URL.match(path)handshake_tokens = self.RE_HANDSHAKE_URL.match(path)disconnect_tokens = self.RE_DISCONNECT_URL.match(path)if handshake_tokens:return self._do_handshake(handshake_tokens.groupdict())elif disconnect_tokens:tokens = disconnect_tokens.groupdict()elif request_tokens:tokens = request_tokens.groupdict()else:return super(SocketIOHandler, self).handle_one_response()sessid = tokens[\"\"]socket = self.server.get_socket(sessid)if not socket:self.handle_bad_request()return [] if self.environ[''].startswith(''):socket.disconnect()self.handle_disconnect_request()return []transport = self.handler_types.get(tokens[\"\"])old_class = Noneif issubclass(transport, (transports.WebsocketTransport,transports.FlashSocketTransport)):old_class = self.__class__self.__class__ = self.server.ws_handler_classself.prevent_wsgi_call = True self.handle_one_response() self.environ[''] = socketself.transport = transport(self, self.config)self.transport.do_exchange(socket, request_method)if not socket.connection_established:socket.connection_established = Truesocket.state = socket.STATE_CONNECTEDsocket._spawn_heartbeat()socket._spawn_watcher()try:if socket.wsgi_app_greenlet is None:start_response = lambda status, headers, exc=None: Nonesocket.wsgi_app_greenlet = gevent.spawn(self.application,self.environ,start_response)except:self.handle_error(*sys.exc_info())if tokens[''] in ['', '']:gevent.joinall(socket.jobs)if old_class:self.__class__ = old_classif hasattr(self, '') and self.websocket:if hasattr(self.websocket, ''):del self.websocket.environdel self.websocketif self.environ:del self.environ", "docstring": "This function deals with *ONE INCOMING REQUEST* from the web.\n\n It will wire and exchange message to the queues for long-polling\n methods, otherwise, will stay alive for websockets.", "id": "f11815:c0:m5"} {"signature": "def is_method_allowed(self, method_name):", "body": "if self.allowed_methods is None:return Trueelse:return method_name in self.allowed_methods", "docstring": "ACL system: this checks if you have access to that method_name,\n according to the set ACLs", "id": "f11816:c0:m1"} {"signature": "def add_acl_method(self, method_name):", "body": "if isinstance(self.allowed_methods, set):self.allowed_methods.add(method_name)else:self.allowed_methods = set([method_name])", "docstring": "ACL system: make the method_name accessible to the current socket", "id": "f11816:c0:m2"} {"signature": "def del_acl_method(self, method_name):", "body": "if self.allowed_methods is None:raise ValueError(\"\"+ \"\"+ \"\")self.allowed_methods.remove(method_name)", "docstring": "ACL system: ensure the user will not have access to that method.", "id": "f11816:c0:m3"} {"signature": "def lift_acl_restrictions(self):", "body": "self.allowed_methods = None", "docstring": "ACL system: This removes restrictions on the Namespace's methods, so\n that all the ``on_*()`` and ``recv_*()`` can be accessed.", "id": "f11816:c0:m4"} {"signature": "def get_initial_acl(self):", "body": "return None", "docstring": "ACL system: If you define this function, you must return\n all the 'event' names that you want your User (the established\n virtual Socket) to have access to.\n\n If you do not define this function, the user will have free\n access to all of the ``on_*()`` and ``recv_*()`` functions,\n etc.. methods.\n\n Return something like: ``set(['recv_connect', 'on_public_method'])``\n\n You can later modify this list dynamically (inside\n ``on_connect()`` for example) using:\n\n .. code-block:: python\n\n self.add_acl_method('on_secure_method')\n\n ``self.request`` is available in here, if you're already ready to\n do some auth. check.\n\n The ACLs are checked by the :meth:`process_packet` and/or\n :meth:`process_event` default implementations, before calling\n the class's methods.\n\n **Beware**, returning ``None`` leaves the namespace completely\n accessible.\n\n The methods that are open are stored in the ``allowed_methods``\n attribute of the ``Namespace`` instance.", "id": "f11816:c0:m5"} {"signature": "def reset_acl(self):", "body": "self.allowed_methods = self.get_initial_acl()", "docstring": "Resets ACL to its initial value (calling\n :meth:`get_initial_acl`` and applying that again).", "id": "f11816:c0:m6"} {"signature": "def process_packet(self, packet):", "body": "packet_type = packet['']if packet_type == '':return self.process_event(packet)elif packet_type == '':return self.call_method_with_acl('', packet,packet[''])elif packet_type == '':return self.call_method_with_acl('', packet,packet[''])elif packet_type == '':self.socket.send_packet(packet)return self.call_method_with_acl('', packet)elif packet_type == '':return self.call_method_with_acl('', packet)elif packet_type == '':callback = self.socket._pop_ack_callback(packet[''])if not callback:print(\"\" % packet[''])returnreturn callback(*(packet['']))elif packet_type == '':return self.call_method_with_acl('', packet)else:print(\"\", packet)", "docstring": "If you override this, NONE of the functions in this class\n will be called. It is responsible for dispatching to\n :meth:`process_event` (which in turn calls ``on_*()`` and\n ``recv_*()`` methods).\n\n If the packet arrived here, it is because it belongs to this endpoint.\n\n For each packet arriving, the only possible path of execution, that is,\n the only methods that *can* be called are the following:\n\n * recv_connect()\n * recv_message()\n * recv_json()\n * recv_error()\n * recv_disconnect()\n * on_*()", "id": "f11816:c0:m7"} {"signature": "def process_event(self, packet):", "body": "args = packet['']name = packet['']if not allowed_event_name_regex.match(name):self.error(\"\",\"\")returnmethod_name = '' + name.replace('', '')return self.call_method_with_acl(method_name, packet, *args)", "docstring": "This function dispatches ``event`` messages to the correct\n functions. You should override this method only if you are not\n satisfied with the automatic dispatching to\n ``on_``-prefixed methods. You could then implement your own dispatch.\n See the source code for inspiration.\n\n There are two ways to deal with callbacks from the client side\n (meaning, the browser has a callback waiting for data that this\n server will be sending back):\n\n The first one is simply to return an object. If the incoming\n packet requested has an 'ack' field set, meaning the browser is\n waiting for callback data, it will automatically be packaged\n and sent, associated with the 'ackId' from the browser. The\n return value must be a *sequence* of elements, that will be\n mapped to the positional parameters of the callback function\n on the browser side.\n\n If you want to *know* that you're dealing with a packet\n that requires a return value, you can do those things manually\n by inspecting the ``ack`` and ``id`` keys from the ``packet``\n object. Your callback will behave specially if the name of\n the argument to your method is ``packet``. It will fill it\n with the unprocessed ``packet`` object for your inspection,\n like this:\n\n .. code-block:: python\n\n def on_my_callback(self, packet):\n if 'ack' in packet:\n self.emit('go_back', 'param1', id=packet['id'])", "id": "f11816:c0:m8"} {"signature": "def call_method_with_acl(self, method_name, packet, *args):", "body": "if not self.is_method_allowed(method_name):self.error('','' % method_name)returnreturn self.call_method(method_name, packet, *args)", "docstring": "You should always use this function to call the methods,\n as it checks if the user is allowed according to the ACLs.\n\n If you override :meth:`process_packet` or\n :meth:`process_event`, you should definitely want to use this\n instead of ``getattr(self, 'my_method')()``", "id": "f11816:c0:m9"} {"signature": "def call_method(self, method_name, packet, *args):", "body": "method = getattr(self, method_name, None)if method is None:self.error('','' % method_name)returnspecs = inspect.getargspec(method)func_args = specs.argsif not len(func_args) or func_args[] != '':self.error(\"\",\"\"\"\")returnif hasattr(self, ''):method = self.exception_handler_decorator(method)if len(func_args) == and func_args[] == '':return method(packet)else:return method(*args)", "docstring": "This function is used to implement the two behaviors on dispatched\n ``on_*()`` and ``recv_*()`` method calls.\n\n Those are the two behaviors:\n\n * If there is only one parameter on the dispatched method and\n it is named ``packet``, then pass in the packet dict as the\n sole parameter.\n\n * Otherwise, pass in the arguments as specified by the\n different ``recv_*()`` methods args specs, or the\n :meth:`process_event` documentation.\n\n This method will also consider the\n ``exception_handler_decorator``. See Namespace documentation\n for details and examples.", "id": "f11816:c0:m10"} {"signature": "def initialize(self):", "body": "pass", "docstring": "This is called right after ``__init__``, on the initial\n creation of a namespace so you may handle any setup job you\n need.\n\n Namespaces are created only when some packets arrive that ask\n for the namespace. They are not created altogether when a new\n :class:`~socketio.virtsocket.Socket` connection is established,\n so you can have many many namespaces assigned (when calling\n :func:`~socketio.socketio_manage`) without clogging the\n memory.\n\n If you override this method, you probably want to initialize\n the variables you're going to use in the events handled by this\n namespace, setup ACLs, etc..\n\n This method is called on all base classes following the _`method resolution order `\n so you don't need to call super() to initialize the mixins or\n other derived classes.", "id": "f11816:c0:m11"} {"signature": "def recv_message(self, data):", "body": "return data", "docstring": "This is more of a backwards compatibility hack. This will be\n called for messages sent with the original send() call on the client\n side. This is NOT the 'message' event, which you will catch with\n 'on_message()'. The data arriving here is a simple string, with no\n other info.\n\n If you want to handle those messages, you should override this method.", "id": "f11816:c0:m12"} {"signature": "def recv_json(self, data):", "body": "return data", "docstring": "This is more of a backwards compatibility hack. This will be\n called for JSON packets sent with the original json() call on the\n JavaScript side. This is NOT the 'json' event, which you will catch\n with 'on_json()'. The data arriving here is a python dict, with no\n event name.\n\n If you want to handle those messages, you should override this method.", "id": "f11816:c0:m13"} {"signature": "def recv_disconnect(self):", "body": "self.disconnect(silent=True)", "docstring": "Override this function if you want to do something when you get a\n *force disconnect* packet.\n\n By default, this function calls the :meth:`disconnect` clean-up\n function. You probably want to call it yourself also, and put\n your clean-up routines in :meth:`disconnect` rather than here,\n because that :meth:`disconnect` function gets called\n automatically upon disconnection. This function is a\n pre-handle for when you get the `disconnect packet`.", "id": "f11816:c0:m14"} {"signature": "def recv_connect(self):", "body": "pass", "docstring": "Called the first time a client connection is open on a\n Namespace. This *does not* fire on the global namespace.\n\n This allows you to do boilerplate stuff for\n the namespace like connecting to rooms, broadcasting events\n to others, doing authorization work, and tweaking the ACLs to open\n up the rest of the namespace (if it was closed at the\n beginning by having :meth:`get_initial_acl` return only\n ['recv_connect'])\n\n Also see the different :ref:`mixins ` (like\n `RoomsMixin`, `BroadcastMixin`).", "id": "f11816:c0:m15"} {"signature": "def recv_error(self, packet):", "body": "pass", "docstring": "Override this function to handle the errors we get from the client.\n\n :param packet: the full packet.", "id": "f11816:c0:m16"} {"signature": "def error(self, error_name, error_message, msg_id=None, quiet=False):", "body": "self.socket.error(error_name, error_message, endpoint=self.ns_name,msg_id=msg_id, quiet=quiet)", "docstring": "Use this to use the configured ``error_handler`` yield an\n error message to your application.\n\n :param error_name: is a short string, to associate messages to recovery\n methods\n :param error_message: is some human-readable text, describing the error\n :param msg_id: is used to associate with a request\n :param quiet: specific to error_handlers. The default doesn't send a\n message to the user, but shows a debug message on the\n developer console.", "id": "f11816:c0:m17"} {"signature": "def send(self, message, json=False, callback=None):", "body": "pkt = dict(type=\"\", data=message, endpoint=self.ns_name)if json:pkt[''] = \"\"if callback:pkt[''] = Truepkt[''] = msgid = self.socket._get_next_msgid()self.socket._save_ack_callback(msgid, callback)self.socket.send_packet(pkt)", "docstring": "Use send to send a simple string message.\n\n If ``json`` is True, the message will be encoded as a JSON object\n on the wire, and decoded on the other side.\n\n This is mostly for backwards compatibility. ``emit()`` is more fun.\n\n :param callback: This is a callback function that will be\n called automatically by the client upon\n reception. It does not verify that the\n listener over there was completed with\n success. It just tells you that the browser\n got a hold of the packet.\n :type callback: callable", "id": "f11816:c0:m18"} {"signature": "def emit(self, event, *args, **kwargs):", "body": "callback = kwargs.pop('', None)if kwargs:raise ValueError(\"\"\"\"\"\")pkt = dict(type=\"\", name=event, args=args,endpoint=self.ns_name)if callback:pkt[''] = ''pkt[''] = msgid = self.socket._get_next_msgid()self.socket._save_ack_callback(msgid, callback)self.socket.send_packet(pkt)", "docstring": "Use this to send a structured event, with a name and arguments, to\n the client.\n\n By default, it uses this namespace's endpoint. You can send messages on\n other endpoints with something like:\n\n ``self.socket['/other_endpoint'].emit()``.\n\n However, it is possible that the ``'/other_endpoint'`` was not\n initialized yet, and that would yield a ``KeyError``.\n\n The only supported ``kwargs`` is ``callback``. All other parameters\n must be passed positionally.\n\n :param event: The name of the event to trigger on the other end.\n :param callback: Pass in the callback keyword argument to define a\n call-back that will be called when the client acks.\n\n This callback is slightly different from the one from\n ``send()``, as this callback will receive parameters\n from the explicit call of the ``ack()`` function\n passed to the listener on the client side.\n\n The remote listener will need to explicitly ack (by\n calling its last argument, a function which is\n usually called 'ack') with some parameters indicating\n success or error. The 'ack' packet coming back here\n will then trigger the callback function with the\n returned values.\n :type callback: callable", "id": "f11816:c0:m19"} {"signature": "def spawn(self, fn, *args, **kwargs):", "body": "if hasattr(self, ''):fn = self.exception_handler_decorator(fn)new = gevent.spawn(fn, *args, **kwargs)self.jobs.append(new)return new", "docstring": "Spawn a new process, attached to this Namespace.\n\n It will be monitored by the \"watcher\" process in the Socket. If the\n socket disconnects, all these greenlets are going to be killed, after\n calling BaseNamespace.disconnect()\n\n This method uses the ``exception_handler_decorator``. See\n Namespace documentation for more information.", "id": "f11816:c0:m20"} {"signature": "def disconnect(self, silent=False):", "body": "if not silent:packet = {\"\": \"\",\"\": self.ns_name}self.socket.send_packet(packet)try:self.socket.remove_namespace(self.ns_name)finally:self.kill_local_jobs()", "docstring": "Send a 'disconnect' packet, so that the user knows it has been\n disconnected (booted actually). This will trigger an onDisconnect()\n call on the client side.\n\n Over here, we will kill all ``spawn``ed processes and remove the\n namespace from the Socket object.\n\n :param silent: do not actually send the packet (if they asked for a\n disconnect for example), but just kill all jobs spawned\n by this Namespace, and remove it from the Socket.", "id": "f11816:c0:m21"} {"signature": "def kill_local_jobs(self):", "body": "gevent.killall(self.jobs)self.jobs = []", "docstring": "Kills all the jobs spawned with BaseNamespace.spawn() on a namespace\n object.\n\n This will be called automatically if the ``watcher`` process detects\n that the Socket was closed.", "id": "f11816:c0:m22"} {"signature": "def run(self, args):", "body": "self.args = self.parse_and_process_args(args)if self.args.version:print(__version__)return if self.args.rulefile:for filename in self.args.rulefile:self._load_rule_file(filename)if self.args.list:self.list_rules()return if self.args.describe:self._describe_rules(self.args.args)return self.counts = { ERROR: , WARNING: , \"\": }for filename in self.args.args:if not (os.path.exists(filename)):sys.stderr.write(\"\" % filename)continueif os.path.isdir(filename):self._process_folder(filename)else:self._process_file(filename)if self.counts[ERROR] > :return self.counts[ERROR] if self.counts[ERROR] < else return ", "docstring": "Parse command line arguments, and run rflint", "id": "f11825:c0:m7"} {"signature": "def list_rules(self):", "body": "for rule in sorted(self.all_rules, key=lambda rule: rule.name):print(rule)if self.args.verbose:for line in rule.doc.split(\"\"):print(\"\", line)", "docstring": "Print a list of all rules", "id": "f11825:c0:m13"} {"signature": "def report(self, linenumber, filename, severity, message, rulename, char):", "body": "if self._print_filename is not None:print(\"\" + self._print_filename)self._print_filename = Noneif severity in (WARNING, ERROR):self.counts[severity] += else:self.counts[\"\"] += print(self.args.format.format(linenumber=linenumber, filename=filename,severity=severity, message=message.encode(''),rulename=rulename, char=char))", "docstring": "Report a rule violation", "id": "f11825:c0:m14"} {"signature": "def _get_rules(self, cls):", "body": "result = []for rule_class in cls.__subclasses__():rule_name = rule_class.__name__.lower()if rule_name not in self._rules:rule = rule_class(self)self._rules[rule_name] = ruleresult.append(self._rules[rule_name])return result", "docstring": "Returns a list of rules of a given class\n\n Rules are treated as singletons - we only instantiate each\n rule once.", "id": "f11825:c0:m15"} {"signature": "def _load_rule_file(self, filename):", "body": "if not (os.path.exists(filename)):sys.stderr.write(\"\" % filename)returntry:basename = os.path.basename(filename)(name, ext) = os.path.splitext(basename)imp.load_source(name, filename)except Exception as e:sys.stderr.write(\"\" % (filename, str(e)))", "docstring": "Import the given rule file", "id": "f11825:c0:m16"} {"signature": "def parse_and_process_args(self, args):", "body": "parser = argparse.ArgumentParser(prog=\"\",description=\"\",formatter_class=argparse.RawDescriptionHelpFormatter,epilog = (\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"))parser.add_argument(\"\", \"\", metavar=\"\", action=SetErrorAction,help=\"\")parser.add_argument(\"\", \"\", metavar=\"\", action=SetIgnoreAction,help=\"\")parser.add_argument(\"\", \"\", metavar=\"\", action=SetWarningAction,help=\"\")parser.add_argument(\"\", \"\", action=\"\",help=\"\")parser.add_argument(\"\", \"\", action=\"\",help=\"\")parser.add_argument(\"\", action=\"\", dest=\"\", default=True,help=\"\")parser.add_argument(\"\", \"\", help=\"\",default='')parser.add_argument(\"\", action=\"\", default=False,help=\"\")parser.add_argument(\"\", \"\", action=\"\", default=False,help=\"\")parser.add_argument(\"\", \"\", action=ConfigureAction,help=\"\")parser.add_argument(\"\", \"\", action=\"\", default=False,help=\"\")parser.add_argument(\"\", \"\", action=RulefileAction,help=\"\")parser.add_argument(\"\", \"\", action=ArgfileLoader,help=\"\")parser.add_argument('', metavar=\"\", nargs=argparse.REMAINDER)ns = argparse.Namespace()setattr(ns, \"\", self)args = parser.parse_args(args, ns)Rule.output_format = args.formatreturn args", "docstring": "Handle the parsing of command line arguments.", "id": "f11825:c0:m17"} {"signature": "def report(self, obj, message, linenum, char_offset=):", "body": "self.controller.report(linenumber=linenum, filename=obj.path,severity=self.severity, message=message,rulename = self.__class__.__name__,char=char_offset)", "docstring": "Report an error or warning", "id": "f11826:c0:m3"} {"signature": "@propertydef doc(self):", "body": "if not self.__doc__:return \"\"lines = self.__doc__.expandtabs().splitlines()indent = sys.maxsizefor line in lines[:]:stripped = line.lstrip()if stripped:indent = min(indent, len(line) - len(stripped))trimmed = [lines[].strip()]if indent < sys.maxsize:for line in lines[:]:trimmed.append(line[indent:].rstrip())while trimmed and not trimmed[-]:trimmed.pop()while trimmed and not trimmed[]:trimmed.pop()return ''.join(trimmed)", "docstring": "Algorithm from https://www.python.org/dev/peps/pep-0257/", "id": "f11826:c0:m4"} {"signature": "def normalize_name(string):", "body": "return string.replace(\"\", \"\").replace(\"\", \"\").lower()", "docstring": "convert to lowercase, remove spaces and underscores", "id": "f11829:m0"} {"signature": "def RobotFactory(path, parent=None):", "body": "if os.path.isdir(path):return SuiteFolder(path, parent)else:rf = RobotFile(path, parent)for table in rf.tables:if isinstance(table, TestcaseTable):rf.__class__ = SuiteFilereturn rfrf.__class__ = ResourceFilereturn rf", "docstring": "Return an instance of SuiteFile, ResourceFile, SuiteFolder\n\n Exactly which is returned depends on whether it's a file or\n folder, and if a file, the contents of the file. If there is a\n testcase table, this will return an instance of SuiteFile,\n otherwise it will return an instance of ResourceFile.", "id": "f11834:m0"} {"signature": "def walk(self, *types):", "body": "requested = types if len(types) > else [SuiteFile, ResourceFile, SuiteFolder, Testcase, Keyword]for thing in self.robot_files:if thing.__class__ in requested:yield thingif isinstance(thing, SuiteFolder):for child in thing.walk():if child.__class__ in requested:yield childelse:for child in thing.walk(*types):yield child", "docstring": "Iterator which visits all suites and suite files,\nyielding test cases and keywords", "id": "f11834:c0:m1"} {"signature": "@propertydef robot_files(self):", "body": "result = []for name in os.listdir(self.path):fullpath = os.path.join(self.path, name)if os.path.isdir(fullpath):result.append(RobotFactory(fullpath, parent=self))else:if ((name.endswith(\"\") or name.endswith(\"\")) and(name not in (\"\", \"\"))):result.append(RobotFactory(fullpath, parent=self))return result", "docstring": "Return a list of all folders, and test suite files (.txt, .robot)", "id": "f11834:c0:m2"} {"signature": "def walk(self, *types):", "body": "requested = types if len(types) > else [Testcase, Keyword]if Testcase in requested:for testcase in self.testcases:yield testcaseif Keyword in requested:for keyword in self.keywords:yield keyword", "docstring": "Iterator which can return all test cases and/or keywords\n\nYou can specify with objects to return as parameters; if\nno parameters are given, both tests and keywords will\nbe returned.\n\nFor example, to get only test cases, you could call it\nlike this:\n\n robot_file = RobotFactory(...)\n for testcase in robot_file.walk(Testcase): ...", "id": "f11834:c1:m1"} {"signature": "def _load(self, path):", "body": "self.tables = []current_table = DefaultTable(self)with Utf8Reader(path) as f:self.raw_text = f.read()f._file.seek() matcher = Matcher(re.IGNORECASE)for linenumber, raw_text in enumerate(f.readlines()):linenumber += ; raw_text = raw_text.replace(u'', '')raw_text = raw_text.rstrip()cells = TxtReader.split_row(raw_text)_heading_regex = r''if matcher(_heading_regex, cells[]):table_name = matcher.group()current_table = tableFactory(self, linenumber, table_name, raw_text)self.tables.append(current_table)else:current_table.append(Row(linenumber, raw_text, cells))", "docstring": "The general idea is to do a quick parse, creating a list of\ntables. Each table is nothing more than a list of rows, with\neach row being a list of cells. Additional parsing such as\ncombining rows into statements is done on demand. This first\npass is solely to read in the plain text and organize it by table.", "id": "f11834:c1:m2"} {"signature": "@propertydef type(self):", "body": "robot_tables = [table for table in self.tables if not isinstance(table, UnknownTable)]if len(robot_tables) == :return Nonefor table in self.tables:if isinstance(table, TestcaseTable):return \"\"return \"\"", "docstring": "Return 'suite' or 'resource' or None\n\n This will return 'suite' if a testcase table is found;\n It will return 'resource' if at least one robot table\n is found. If no tables are found it will return None", "id": "f11834:c1:m4"} {"signature": "@propertydef keywords(self):", "body": "for table in self.tables:if isinstance(table, KeywordTable):for keyword in table.keywords:yield keyword", "docstring": "Generator which returns all keywords in the suite", "id": "f11834:c1:m5"} {"signature": "def dump(self):", "body": "for table in self.tables:print(\"\" % table.name)table.dump()", "docstring": "Regurgitate the tables and rows", "id": "f11834:c1:m7"} {"signature": "@propertydef settings(self):", "body": "for table in self.tables:if isinstance(table, SettingTable):for statement in table.statements:yield statement", "docstring": "Generator which returns all of the statements in all of the settings tables", "id": "f11834:c2:m1"} {"signature": "@propertydef variables(self):", "body": "for table in self.tables:if isinstance(table, VariableTable):for statement in table.rows:if statement[] != \"\":yield statement", "docstring": "Generator which returns all of the statements in all of the variables tables", "id": "f11834:c2:m2"} {"signature": "@propertydef settings(self):", "body": "for table in self.tables:if isinstance(table, SettingTable):for statement in table.statements:yield statement", "docstring": "Generator which returns all of the statements in all of the settings tables", "id": "f11834:c3:m1"} {"signature": "@propertydef statements(self):", "body": "if len(self.rows) == :return []current_statement = Statement(self.rows[])current_statement.startline = self.rows[].linenumbercurrent_statement.endline = self.rows[].linenumberstatements = []for row in self.rows[:]:if len(row) > and row[] == \"\":current_statement += row[:]current_statement.endline = row.linenumberelse:if len(current_statement) > :statements.append(current_statement)current_statement = Statement(row)current_statement.startline = row.linenumbercurrent_statement.endline = row.linenumberif len(current_statement) > :statements.append(current_statement)while (len(statements[-]) == or ((len(statements[-]) == ) and len(statements[-][]) == )):statements.pop()return statements", "docstring": "Return a list of statements\n\n This is done by joining together any rows that\n have continuations", "id": "f11835:c1:m0"} {"signature": "def append(self, row):", "body": "if len(row) == :returnif (row[] != \"\" and (not row[].lstrip().startswith(\"\"))):self._children.append(self._childClass(self.parent, row.linenumber, row[]))if len(row.cells) > :row[] = \"\"self._children[-].append(row.linenumber, row.raw_text, row.cells)elif len(self._children) == :self.comments.append(row)else:if len(row.cells) > :self._children[-].append(row.linenumber, row.raw_text, row.cells)", "docstring": "The idea is, we recognize when we have a new testcase by \nchecking the first cell. If it's not empty and not a comment, \nwe have a new test case.", "id": "f11835:c7:m2"} {"signature": "@propertydef is_templated(self):", "body": "for table in self.parent.tables:if isinstance(table, SettingTable):for row in table.rows:if row[].lower() == \"\":return Truereturn False", "docstring": "Return True if the test is part of a suite that uses a Test Template", "id": "f11836:c0:m1"} {"signature": "def append(self, linenumber, raw_text, cells):", "body": "self.rows.append(Row(linenumber, raw_text, cells))", "docstring": "Add another row of data from a test suite", "id": "f11837:c0:m0"} {"signature": "@propertydef steps(self):", "body": "steps = []for statement in self.statements:if ((not statement.is_comment()) and(not statement.is_setting())):steps.append(statement)return steps", "docstring": "Return a list of steps (statements that are not settings or comments)", "id": "f11837:c0:m2"} {"signature": "@propertydef settings(self):", "body": "return [statement for statement in self.statements if statement.is_setting()]", "docstring": "Return a list of settings (statements with cell[1] matching \\[.*?\\])\n\n Note: this returns any statement that *looks* like a setting. If you have\n a misspelled or completely bogus setting, it'll return that too\n (eg: | | [Blockumentation] | hello, world)", "id": "f11837:c0:m3"} {"signature": "@propertydef statements(self):", "body": "if len(self.rows) == :return []current_statement = Statement(self.rows[])current_statement.startline = self.rows[].linenumbercurrent_statement.endline = self.rows[].linenumberstatements = []for row in self.rows[:]:if len(row) > and row[] == \"\" and row[] == \"\":current_statement += row[:]current_statement.endline = row.linenumberelse:if len(current_statement) > :statements.append(current_statement)current_statement = Statement(row)current_statement.startline = row.linenumbercurrent_statement.endline = row.linenumberif len(current_statement) > :statements.append(current_statement)return statements", "docstring": "Return a list of statements\n\n This is done by joining together any rows that\n have continuations", "id": "f11837:c0:m4"} {"signature": "def is_comment(self):", "body": "for cell in self[:]:if cell == \"\":continueif cell.lstrip().startswith(\"\"):return Trueelse:return Falsereturn False", "docstring": "Return True if the first non-empty cell starts with \"#", "id": "f11837:c3:m1"} {"signature": "def apply_some_settings(self, index):", "body": "old_hpp = index.settings[''] if '' in index.settings else Noneindex.settings[''] = index.reindex_all()index.settings[''] = old_hpptime.sleep() index_settings = index.get_settings()self.assertEqual(index_settings[''], ,\"\")return index_settings", "docstring": "Applies a sample setting to the index.\n\n:param index: an AlgoliaIndex that will be updated\n:return: the new settings", "id": "f11841:c0:m13"} {"signature": "def __init__(self, settings=SETTINGS):", "body": "try:app_id = settings['']api_key = settings['']except KeyError:raise AlgoliaEngineError('')self.__auto_indexing = settings.get('', True)self.__settings = settingsself.__registered_models = {}self.client = algoliasearch.Client(app_id, api_key)self.client.set_extra_header('',''% (CLIENT_VERSION, python_version(), VERSION, django_version))", "docstring": "Initializes the Algolia engine.", "id": "f11854:c2:m0"} {"signature": "def is_registered(self, model):", "body": "return model in self.__registered_models", "docstring": "Checks whether the given models is registered with Algolia engine", "id": "f11854:c2:m1"} {"signature": "def register(self, model, index_cls=AlgoliaIndex, auto_indexing=None):", "body": "if self.is_registered(model):raise RegistrationError(''.format(model))if not issubclass(index_cls, AlgoliaIndex):raise RegistrationError(''.format(index_cls))index_obj = index_cls(model, self.client, self.__settings)self.__registered_models[model] = index_objif (isinstance(auto_indexing, bool) andauto_indexing) or self.__auto_indexing:post_save.connect(self.__post_save_receiver, model)pre_delete.connect(self.__pre_delete_receiver, model)logger.info('', model)", "docstring": "Registers the given model with Algolia engine.\n\nIf the given model is already registered with Algolia engine, a\nRegistrationError will be raised.", "id": "f11854:c2:m2"} {"signature": "def unregister(self, model):", "body": "if not self.is_registered(model):raise RegistrationError(''.format(model))del self.__registered_models[model]post_save.disconnect(self.__post_save_receiver, model)pre_delete.disconnect(self.__pre_delete_receiver, model)logger.info('', model)", "docstring": "Unregisters the given model with Algolia engine.\n\nIf the given model is not registered with Algolia engine, a\nRegistrationError will be raised.", "id": "f11854:c2:m3"} {"signature": "def get_registered_models(self):", "body": "return list(self.__registered_models.keys())", "docstring": "Returns a list of models that have been registered with Algolia\nengine.", "id": "f11854:c2:m4"} {"signature": "def get_adapter(self, model):", "body": "if not self.is_registered(model):raise RegistrationError(''.format(model))return self.__registered_models[model]", "docstring": "Returns the adapter associated with the given model.", "id": "f11854:c2:m5"} {"signature": "def get_adapter_from_instance(self, instance):", "body": "model = instance.__class__return self.get_adapter(model)", "docstring": "Returns the adapter associated with the given instance.", "id": "f11854:c2:m6"} {"signature": "def save_record(self, instance, **kwargs):", "body": "adapter = self.get_adapter_from_instance(instance)adapter.save_record(instance, **kwargs)", "docstring": "Saves the record.\n\n If `update_fields` is set, this method will use partial_update_object()\n and will update only the given fields (never `_geoloc` and `_tags`).\n\n For more information about partial_update_object:\n https://github.com/algolia/algoliasearch-client-python#update-an-existing-object-in-the-index", "id": "f11854:c2:m7"} {"signature": "def delete_record(self, instance):", "body": "adapter = self.get_adapter_from_instance(instance)adapter.delete_record(instance)", "docstring": "Deletes the record.", "id": "f11854:c2:m8"} {"signature": "def update_records(self, model, qs, batch_size=, **kwargs):", "body": "adapter = self.get_adapter(model)adapter.update_records(qs, batch_size=batch_size, **kwargs)", "docstring": "Updates multiple records.\n\nThis method is optimized for speed. It takes a QuerySet and the same\narguments as QuerySet.update(). Optionally, you can specify the size\nof the batch send to Algolia with batch_size (default to 1000).\n\n>>> from algoliasearch_django import update_records\n>>> qs = MyModel.objects.filter(myField=False)\n>>> update_records(MyModel, qs, myField=True)\n>>> qs.update(myField=True)", "id": "f11854:c2:m9"} {"signature": "def raw_search(self, model, query='', params=None):", "body": "if params is None:params = {}adapter = self.get_adapter(model)return adapter.raw_search(query, params)", "docstring": "Performs a search query and returns the parsed JSON.", "id": "f11854:c2:m10"} {"signature": "def clear_index(self, model):", "body": "adapter = self.get_adapter(model)adapter.clear_index()", "docstring": "Clears the index.", "id": "f11854:c2:m11"} {"signature": "def reindex_all(self, model, batch_size=):", "body": "adapter = self.get_adapter(model)return adapter.reindex_all(batch_size)", "docstring": "Reindex all the records.\n\nBy default, this method use Model.objects.all() but you can implement\na method `get_queryset` in your subclass. This can be used to optimize\nthe performance (for example with select_related or prefetch_related).", "id": "f11854:c2:m12"} {"signature": "def reset(self, settings=None):", "body": "self.__init__(settings=settings if settings is not None else SETTINGS)", "docstring": "Reinitializes the Algolia engine and its client.\n :param settings: settings to use instead of the default django.conf.settings.algolia", "id": "f11854:c2:m13"} {"signature": "def __post_save_receiver(self, instance, **kwargs):", "body": "logger.debug('', instance.__class__)self.save_record(instance, **kwargs)", "docstring": "Signal handler for when a registered model has been saved.", "id": "f11854:c2:m14"} {"signature": "def __pre_delete_receiver(self, instance, **kwargs):", "body": "logger.debug('', instance.__class__)self.delete_record(instance)", "docstring": "Signal handler for when a registered model has been deleted.", "id": "f11854:c2:m15"} {"signature": "def handle(self, *args, **options):", "body": "batch_size = options.get('', None)if not batch_size:batch_size = self.stdout.write('')for model in get_registered_model():if options.get('', None) and not (model.__name__ inoptions['']):continuecounts = reindex_all(model, batch_size=batch_size)self.stdout.write(''.format(model.__name__, counts))", "docstring": "Run the management command.", "id": "f11857:c0:m1"} {"signature": "def handle(self, *args, **options):", "body": "self.stdout.write('')for model in get_registered_model():if options.get('', None) and not (model.__name__ inoptions['']):continueget_adapter(model).set_settings()self.stdout.write(''.format(model.__name__))", "docstring": "Run the management command.", "id": "f11858:c0:m1"} {"signature": "def handle(self, *args, **options):", "body": "self.stdout.write('')for model in get_registered_model():if options.get('', None) and not (model.__name__ inoptions['']):continueclear_index(model)self.stdout.write(''.format(model.__name__))", "docstring": "Run the management command.", "id": "f11859:c0:m1"} {"signature": "def __init__(self, model, client, settings):", "body": "self.__init_index(client, model, settings)self.model = modelself.__client = clientself.__named_fields = {}self.__translate_fields = {}if self.settings is None: self.settings = {}try:all_model_fields = [f.name for f in model._meta.get_fields() if not f.is_relation]except AttributeError: all_model_fields = [f.name for f in model._meta.local_fields]if isinstance(self.fields, str):self.fields = (self.fields,)elif isinstance(self.fields, (list, tuple, set)):self.fields = tuple(self.fields)else:raise AlgoliaIndexError('')for field in self.fields:if sys.version_info < (, ) and isinstance(field, unicode) or isinstance(field, str):attr = fieldname = fieldelif isinstance(field, (list, tuple)) and len(field) == :attr = field[]name = field[]else:raise AlgoliaIndexError(''.format(field, type(field)))self.__translate_fields[attr] = nameif attr in all_model_fields:self.__named_fields[name] = get_model_attr(attr)else:self.__named_fields[name] = check_and_get_attr(model, attr)if not self.fields:self.fields = set(all_model_fields)for elt in ('', '', ''):try:self.fields.remove(elt)except KeyError:continueself.__translate_fields = dict(zip(self.fields, self.fields))self.__named_fields = dict(zip(self.fields, map(get_model_attr,self.fields)))if self.custom_objectID in chain([''], all_model_fields) or hasattr(model, self.custom_objectID):self.objectID = get_model_attr(self.custom_objectID)else:raise AlgoliaIndexError(''.format(self.custom_objectID, model))if self.tags:if self.tags in all_model_fields:self.tags = get_model_attr(self.tags)else:self.tags = check_and_get_attr(model, self.tags)if self.geo_field:self.geo_field = check_and_get_attr(model, self.geo_field)if self.should_index:if hasattr(model, self.should_index):attr = getattr(model, self.should_index)if type(attr) is not bool: self.should_index = attrif callable(self.should_index):self._should_index_is_method = Trueelse:try:model._meta.get_field_by_name(self.should_index)except:raise AlgoliaIndexError(''.format(self.should_index, model))", "docstring": "Initializes the index.", "id": "f11860:c1:m0"} {"signature": "@staticmethoddef _validate_geolocation(geolocation):", "body": "if set(geolocation) != {'', ''}:raise AlgoliaIndexError(''.format(geolocation))", "docstring": "Make sure we have the proper geolocation format.", "id": "f11860:c1:m2"} {"signature": "def get_raw_record(self, instance, update_fields=None):", "body": "tmp = {'': self.objectID(instance)}if update_fields:if isinstance(update_fields, str):update_fields = (update_fields,)for elt in update_fields:key = self.__translate_fields.get(elt, None)if key:tmp[key] = self.__named_fields[key](instance)else:for key, value in self.__named_fields.items():tmp[key] = value(instance)if self.geo_field:loc = self.geo_field(instance)if isinstance(loc, tuple):tmp[''] = {'': loc[], '': loc[]}elif isinstance(loc, dict):self._validate_geolocation(loc)tmp[''] = locelif isinstance(loc, list):[self._validate_geolocation(geo) for geo in loc]tmp[''] = locif self.tags:if callable(self.tags):tmp[''] = self.tags(instance)if not isinstance(tmp[''], list):tmp[''] = list(tmp[''])logger.debug('', tmp[''], self.model)return tmp", "docstring": "Gets the raw record.\n\nIf `update_fields` is set, the raw record will be build with only\nthe objectID and the given fields. Also, `_geoloc` and `_tags` will\nnot be included.", "id": "f11860:c1:m3"} {"signature": "def _has_should_index(self):", "body": "return self.should_index is not None", "docstring": "Return True if this AlgoliaIndex has a should_index method or attribute", "id": "f11860:c1:m4"} {"signature": "def _should_index(self, instance):", "body": "if self._has_should_index():return self._should_really_index(instance)else:return True", "docstring": "Return True if the object should be indexed (including when self.should_index is not set).", "id": "f11860:c1:m5"} {"signature": "def _should_really_index(self, instance):", "body": "if self._should_index_is_method:is_method = inspect.ismethod(self.should_index)try:count_args = len(inspect.signature(self.should_index).parameters)except AttributeError:count_args = len(inspect.getargspec(self.should_index).args)if is_method or count_args is :return self.should_index(instance)else:return self.should_index()else:attr_type = type(self.should_index)if attr_type is DeferredAttribute:attr_value = self.should_index.__get__(instance, None)elif attr_type is str:attr_value = getattr(instance, self.should_index)elif attr_type is property:attr_value = self.should_index.__get__(instance)else:raise AlgoliaIndexError(''.format(self.should_index))if type(attr_value) is not bool:raise AlgoliaIndexError(\"\" % (instance.__class__.__name__, self.should_index))return attr_value", "docstring": "Return True if according to should_index the object should be indexed.", "id": "f11860:c1:m6"} {"signature": "def save_record(self, instance, update_fields=None, **kwargs):", "body": "if not self._should_index(instance):self.delete_record(instance)returntry:if update_fields:obj = self.get_raw_record(instance,update_fields=update_fields)result = self.__index.partial_update_object(obj)else:obj = self.get_raw_record(instance)result = self.__index.save_object(obj)logger.info('', obj[''], self.model)return resultexcept AlgoliaException as e:if DEBUG:raise eelse:logger.warning('', obj[''],self.model, e)", "docstring": "Saves the record.\n\n If `update_fields` is set, this method will use partial_update_object()\n and will update only the given fields (never `_geoloc` and `_tags`).\n\n For more information about partial_update_object:\n https://github.com/algolia/algoliasearch-client-python#update-an-existing-object-in-the-index", "id": "f11860:c1:m7"} {"signature": "def delete_record(self, instance):", "body": "objectID = self.objectID(instance)try:self.__index.delete_object(objectID)logger.info('', objectID, self.model)except AlgoliaException as e:if DEBUG:raise eelse:logger.warning('', objectID,self.model, e)", "docstring": "Deletes the record.", "id": "f11860:c1:m8"} {"signature": "def update_records(self, qs, batch_size=, **kwargs):", "body": "tmp = {}for key, value in kwargs.items():name = self.__translate_fields.get(key, None)if name:tmp[name] = valuebatch = []objectsIDs = qs.only(self.custom_objectID).values_list(self.custom_objectID, flat=True)for elt in objectsIDs:tmp[''] = eltbatch.append(dict(tmp))if len(batch) >= batch_size:self.__index.partial_update_objects(batch)batch = []if len(batch) > :self.__index.partial_update_objects(batch)", "docstring": "Updates multiple records.\n\nThis method is optimized for speed. It takes a QuerySet and the same\narguments as QuerySet.update(). Optionnaly, you can specify the size\nof the batch send to Algolia with batch_size (default to 1000).\n\n>>> from algoliasearch_django import update_records\n>>> qs = MyModel.objects.filter(myField=False)\n>>> update_records(MyModel, qs, myField=True)\n>>> qs.update(myField=True)", "id": "f11860:c1:m9"} {"signature": "def raw_search(self, query='', params=None):", "body": "if params is None:params = {}try:return self.__index.search(query, params)except AlgoliaException as e:if DEBUG:raise eelse:logger.warning('', self.index_name, e)", "docstring": "Performs a search query and returns the parsed JSON.", "id": "f11860:c1:m10"} {"signature": "def get_settings(self):", "body": "try:logger.info('', self.index_name)return self.__index.get_settings()except AlgoliaException as e:if DEBUG:raise eelse:logger.warning('',self.model, e)", "docstring": "Returns the settings of the index.", "id": "f11860:c1:m11"} {"signature": "def set_settings(self):", "body": "if not self.settings:returntry:self.__index.set_settings(self.settings)logger.info('', self.index_name)except AlgoliaException as e:if DEBUG:raise eelse:logger.warning('',self.model, e)", "docstring": "Applies the settings to the index.", "id": "f11860:c1:m12"} {"signature": "def clear_index(self):", "body": "try:self.__index.clear_index()logger.info('', self.index_name)except AlgoliaException as e:if DEBUG:raise eelse:logger.warning('', self.model, e)", "docstring": "Clears the index.", "id": "f11860:c1:m13"} {"signature": "def reindex_all(self, batch_size=):", "body": "should_keep_synonyms = Falseshould_keep_rules = Falsetry:if not self.settings:self.settings = self.get_settings()logger.debug('', self.index_name, self.settings)else:logger.debug(\"\", self.index_name, self.settings)except AlgoliaException as e:if any(\"\" in arg for arg in e.args):pass else:raise e try:if self.settings:replicas = self.settings.get('', None)slaves = self.settings.get('', None)should_keep_replicas = replicas is not Noneshould_keep_slaves = slaves is not Noneif should_keep_replicas:self.settings[''] = []logger.debug(\"\")if should_keep_slaves:self.settings[''] = []logger.debug(\"\")self.__tmp_index.wait_task(self.__tmp_index.set_settings(self.settings)[''])logger.debug('', self.index_name)rules = []synonyms = []for r in self.__index.iter_rules():rules.append(r)for s in self.__index.iter_synonyms():synonyms.append(s)if len(rules):logger.debug('', self.index_name, rules)should_keep_rules = Trueif len(synonyms):logger.debug('', self.index_name, rules)should_keep_synonyms = Trueself.__tmp_index.clear_index()logger.debug('', self.index_name)counts = batch = []if hasattr(self, ''):qs = self.get_queryset()else:qs = self.model.objects.all()for instance in qs:if not self._should_index(instance):continue batch.append(self.get_raw_record(instance))if len(batch) >= batch_size:self.__tmp_index.save_objects(batch)logger.info('', len(batch),self.index_name)batch = []counts += if len(batch) > :self.__tmp_index.save_objects(batch)logger.info('', len(batch),self.index_name)self.__client.move_index(self.__tmp_index.index_name,self.__index.index_name)logger.info('', self.index_name,self.index_name)if self.settings:if should_keep_replicas:self.settings[''] = replicaslogger.debug(\"\")if should_keep_slaves:self.settings[''] = slaveslogger.debug(\"\")if should_keep_replicas or should_keep_slaves:self.__index.set_settings(self.settings)if should_keep_rules:response = self.__index.batch_rules(rules, forward_to_replicas=True)self.__index.wait_task(response[''])logger.info(\"\".format(response), self.index_name)if should_keep_synonyms:response = self.__index.batch_synonyms(synonyms, forward_to_replicas=True)self.__index.wait_task(response[''])logger.info(\"\".format(response), self.index_name)return countsexcept AlgoliaException as e:if DEBUG:raise eelse:logger.warning('', self.model,e)", "docstring": "Reindex all the records.\n\nBy default, this method use Model.objects.all() but you can implement\na method `get_queryset` in your subclass. This can be used to optimize\nthe performance (for example with select_related or prefetch_related).", "id": "f11860:c1:m15"} {"signature": "def available_attrs(fn):", "body": "return WRAPPER_ASSIGNMENTS", "docstring": "Return the list of functools-wrappable attributes on a callable.\nThis was required as a workaround for http://bugs.python.org/issue3445\nunder Python 2.", "id": "f11861:m0"} {"signature": "def register(model):", "body": "from algoliasearch_django import AlgoliaIndex, registerdef _algolia_engine_wrapper(index_class):if not issubclass(index_class, AlgoliaIndex):raise ValueError('')register(model, index_class)return index_classreturn _algolia_engine_wrapper", "docstring": "Register the given model class and wrapped AlgoliaIndex class with the Algolia engine:\n\n@register(Author)\nclass AuthorIndex(AlgoliaIndex):\n pass", "id": "f11861:m1"} {"signature": "def _generate_contents(self, tar):", "body": "text = self.render(files=False)vpn_instances = vpn_pattern.split(text)if '' in vpn_instances:vpn_instances.remove('')for vpn in vpn_instances:lines = vpn.split('')vpn_name = lines[]text_contents = ''.join(lines[:])if text_contents.endswith(''):text_contents = text_contents[:-]self._add_file(tar=tar,name=''.format(vpn_name, config_suffix),contents=text_contents)", "docstring": "Adds configuration files to tarfile instance.\n\n:param tar: tarfile instance\n:returns: None", "id": "f11891:c0:m0"} {"signature": "@classmethoddef auto_client(cls, host, server, ca_path=None, ca_contents=None,cert_path=None, cert_contents=None, key_path=None,key_contents=None):", "body": "client = {\"\": \"\",\"\": True,\"\": \"\",\"\": True}port = server.get('') or client[''] = [{'': host, '': port}]if server.get('') == '':client[''] = ''else:client[''] = ''if '' in server or '' in server:client[''] = Trueif '' not in server or not server['']:client[''] = Falsens_cert_type = {None: '','': '','': ''}client[''] = ns_cert_type[server.get('')]remote_cert_tls = {None: '','': '','': ''}client[''] = remote_cert_tls[server.get('')]copy_keys = ['', '', '', '', '','', '', '', '', '', '', '','', '', '', '', '','', '', '', '', '','', '', '', '','', '', '', '']for key in copy_keys:if key in server:client[key] = server[key]files = cls._auto_client_files(client, ca_path, ca_contents,cert_path, cert_contents,key_path, key_contents)return {'': [client],'': files}", "docstring": "Returns a configuration dictionary representing an OpenVPN client configuration\nthat is compatible with the passed server configuration.\n\n:param host: remote VPN server\n:param server: dictionary representing a single OpenVPN server configuration\n:param ca_path: optional string representing path to CA, will consequently add\n a file in the resulting configuration dictionary\n:param ca_contents: optional string representing contents of CA file\n:param cert_path: optional string representing path to certificate, will consequently add\n a file in the resulting configuration dictionary\n:param cert_contents: optional string representing contents of cert file\n:param key_path: optional string representing path to key, will consequently add\n a file in the resulting configuration dictionary\n:param key_contents: optional string representing contents of key file\n:returns: dictionary representing a single OpenVPN client configuration", "id": "f11891:c0:m1"} {"signature": "@classmethoddef _auto_client_files(cls, client, ca_path=None, ca_contents=None, cert_path=None,cert_contents=None, key_path=None, key_contents=None):", "body": "files = []if ca_path and ca_contents:client[''] = ca_pathfiles.append(dict(path=ca_path,contents=ca_contents,mode=DEFAULT_FILE_MODE))if cert_path and cert_contents:client[''] = cert_pathfiles.append(dict(path=cert_path,contents=cert_contents,mode=DEFAULT_FILE_MODE))if key_path and key_contents:client[''] = key_pathfiles.append(dict(path=key_path,contents=key_contents,mode=DEFAULT_FILE_MODE,))return files", "docstring": "returns a list of NetJSON extra files for automatically generated clients\nproduces side effects in ``client`` dictionary", "id": "f11891:c0:m2"} {"signature": "@classmethoddef get_name(cls):", "body": "return str(cls.__name__).replace('', '').lower()", "docstring": "Returns the name of the render class without its prefix", "id": "f11892:c0:m3"} {"signature": "def cleanup(self, output):", "body": "return output", "docstring": "Performs cleanup of output (indentation, new lines)\n\n:param output: string representation of the client configuration", "id": "f11892:c0:m4"} {"signature": "def render(self):", "body": "template_name = ''.format(self.get_name())template = self.template_env.get_template(template_name)context = getattr(self.backend, '', {})output = template.render(data=context)return self.cleanup(output)", "docstring": "Renders configuration by using the jinja2 templating engine", "id": "f11892:c0:m5"} {"signature": "def __init__(self, config=None, native=None, templates=None, context=None):", "body": "self.config = Noneself.intermediate_data = Noneif config is not None:config = deepcopy(self._load(config))self.config = self._merge_config(config, templates)self.config = self._evaluate_vars(self.config, context)elif native is not None:self.parse(native)else:raise ValueError('''')", "docstring": ":param config: ``dict`` containing a valid **NetJSON** configuration dictionary\n:param native: ``str`` or file object representing a native configuration that will\n be parsed and converted to a **NetJSON** configuration dictionary\n:param templates: ``list`` containing **NetJSON** configuration dictionaries that\n will be used as a base for the main config\n:param context: ``dict`` containing configuration variables\n:raises TypeError: raised if ``config`` is not of type ``dict`` or if\n ``templates`` is not of type ``list``", "id": "f11894:c0:m0"} {"signature": "def _load(self, config):", "body": "if isinstance(config, six.string_types):try:config = json.loads(config)except ValueError:passif not isinstance(config, dict):raise TypeError('''')return config", "docstring": "Loads config from string or dict", "id": "f11894:c0:m1"} {"signature": "def _merge_config(self, config, templates):", "body": "if not templates:return configif not isinstance(templates, list):raise TypeError('')result = {}config_list = templates + [config]for merging in config_list:result = merge_config(result, self._load(merging), self.list_identifiers)return result", "docstring": "Merges config with templates", "id": "f11894:c0:m2"} {"signature": "def _evaluate_vars(self, config, context):", "body": "if not context:return configreturn evaluate_vars(config, context)", "docstring": "Evaluates configuration variables", "id": "f11894:c0:m3"} {"signature": "def _render_files(self):", "body": "output = ''files = self.config.get('', [])if files:output += ''.format(self.FILE_SECTION_DELIMITER)for f in files:mode = f.get('', DEFAULT_FILE_MODE)file_output = ''''''.format(f[''], mode, f[''])output += file_outputreturn output", "docstring": "Renders additional files specified in ``self.config['files']``", "id": "f11894:c0:m4"} {"signature": "def render(self, files=True):", "body": "self.validate()if self.intermediate_data is None:self.to_intermediate()renderers = getattr(self, '', None) or [self.renderer]output = ''for renderer_class in renderers:renderer = renderer_class(self)output += renderer.render()del rendererif files:files_output = self._render_files()if files_output:output += files_output.replace('', '')return output", "docstring": "Converts the configuration dictionary into the corresponding configuration format\n\n:param files: whether to include \"additional files\" in the output or not;\n defaults to ``True``\n:returns: string with output", "id": "f11894:c0:m6"} {"signature": "def json(self, validate=True, *args, **kwargs):", "body": "if validate:self.validate()config = deepcopy(self.config)config.update({'': ''})return json.dumps(config, *args, **kwargs)", "docstring": "returns a string formatted as **NetJSON DeviceConfiguration**;\nperforms validation before returning output;\n\n``*args`` and ``*kwargs`` will be passed to ``json.dumps``;\n\n:returns: string", "id": "f11894:c0:m7"} {"signature": "def generate(self):", "body": "tar_bytes = BytesIO()tar = tarfile.open(fileobj=tar_bytes, mode='')self._generate_contents(tar)self._process_files(tar)tar.close()tar_bytes.seek() gzip_bytes = BytesIO()gz = gzip.GzipFile(fileobj=gzip_bytes, mode='', mtime=)gz.write(tar_bytes.getvalue())gz.close()gzip_bytes.seek() return gzip_bytes", "docstring": "Returns a ``BytesIO`` instance representing an in-memory tar.gz archive\ncontaining the native router configuration.\n\n:returns: in-memory tar.gz archive, instance of ``BytesIO``", "id": "f11894:c0:m8"} {"signature": "def write(self, name, path=''):", "body": "byte_object = self.generate()file_name = ''.format(name)if not path.endswith(''):path += ''f = open(''.format(path, file_name), '')f.write(byte_object.getvalue())f.close()", "docstring": "Like ``generate`` but writes to disk.\n\n:param name: file name, the tar.gz extension will be added automatically\n:param path: directory where the file will be written to, defaults to ``./``\n:returns: None", "id": "f11894:c0:m10"} {"signature": "def _process_files(self, tar):", "body": "for file_item in self.config.get('', []):path = file_item['']if path.startswith(''):path = path[:]self._add_file(tar=tar,name=path,contents=file_item[''],mode=file_item.get('', DEFAULT_FILE_MODE))", "docstring": "Adds files specified in self.config['files'] to tarfile instance.\n\n:param tar: tarfile instance\n:returns: None", "id": "f11894:c0:m11"} {"signature": "def _add_file(self, tar, name, contents, mode=DEFAULT_FILE_MODE):", "body": "byte_contents = BytesIO(contents.encode(''))info = tarfile.TarInfo(name=name)info.size = len(contents)info.mtime = info.type = tarfile.REGTYPEinfo.mode = int(mode, ) tar.addfile(tarinfo=info, fileobj=byte_contents)", "docstring": "Adds a single file in tarfile instance.\n\n:param tar: tarfile instance\n:param name: string representing filename or path\n:param contents: string representing file contents\n:param mode: string representing file mode, defaults to 644\n:returns: None", "id": "f11894:c0:m12"} {"signature": "def to_intermediate(self):", "body": "self.validate()self.intermediate_data = OrderedDict()for converter_class in self.converters:if not converter_class.should_run_forward(self.config):continueconverter = converter_class(self)value = converter.to_intermediate()if value and isinstance(value, (tuple, list)): value = OrderedDict(value)if value:self.intermediate_data = merge_config(self.intermediate_data,value,list_identifiers=[''])", "docstring": "Converts the NetJSON configuration dictionary (self.config)\nto the intermediate data structure (self.intermediate_data) that will\nbe then used by the renderer class to generate the router configuration", "id": "f11894:c0:m13"} {"signature": "def parse(self, native):", "body": "if not hasattr(self, '') or not self.parser:raise NotImplementedError('')parser = self.parser(native)self.intermediate_data = parser.intermediate_datadel parserself.to_netjson()", "docstring": "Parses a native configuration and converts\nit to a NetJSON configuration dictionary", "id": "f11894:c0:m14"} {"signature": "def to_netjson(self):", "body": "self.__backup_intermediate_data()self.config = OrderedDict()for converter_class in self.converters:if not converter_class.should_run_backward(self.intermediate_data):continueconverter = converter_class(self)value = converter.to_netjson()if value:self.config = merge_config(self.config,value,list_identifiers=self.list_identifiers)self.__restore_intermediate_data()self.validate()", "docstring": "Converts the intermediate data structure (self.intermediate_data)\nto the NetJSON configuration dictionary (self.config)", "id": "f11894:c0:m15"} {"signature": "@classmethoddef should_run_forward(cls, config):", "body": "return cls.netjson_key in config", "docstring": "Returns True if Converter should be instantiated and run\nduring the forward conversion process (NetJSON to native)", "id": "f11895:c0:m1"} {"signature": "@classmethoddef should_run_backward(cls, intermediate_data):", "body": "return cls.intermediate_key in intermediate_data", "docstring": "Returns True if Converter should be instantiated and run\nduring the backward conversion process (native to NetJSON)", "id": "f11895:c0:m2"} {"signature": "def type_cast(self, item, schema=None):", "body": "if schema is None:schema = self._schemaproperties = schema['']for key, value in item.items():if key not in properties:continuetry:json_type = properties[key]['']except KeyError:json_type = Noneif json_type == '' and not isinstance(value, int):value = int(value)elif json_type == '' and not isinstance(value, bool):value = value == ''item[key] = valuereturn item", "docstring": "Loops over item and performs type casting\naccording to supplied schema fragment", "id": "f11895:c0:m3"} {"signature": "def to_intermediate(self):", "body": "result = OrderedDict()netjson = get_copy(self.netjson, self.netjson_key)if isinstance(netjson, list):for index, block in enumerate(netjson):result = self.to_intermediate_loop(block, result, index + )else:result = self.to_intermediate_loop(netjson, result)return result", "docstring": "Converts the NetJSON configuration dictionary (``self.config``)\nto intermediate data structure (``self.intermediate_datra``)", "id": "f11895:c0:m6"} {"signature": "def to_intermediate_loop(self, block, result, index=None): ", "body": "raise NotImplementedError()", "docstring": "Utility method called in the loop of ``to_intermediate``", "id": "f11895:c0:m7"} {"signature": "def to_netjson(self, remove_block=True):", "body": "result = OrderedDict()intermediate_data = list(self.intermediate_data[self.intermediate_key])for index, block in enumerate(intermediate_data):if self.should_skip_block(block):continueif remove_block:self.intermediate_data[self.intermediate_key].remove(block)result = self.to_netjson_loop(block, result, index + )return result", "docstring": "Converts the intermediate data structure (``self.intermediate_datra``)\nto a NetJSON configuration dictionary (``self.config``)", "id": "f11895:c0:m8"} {"signature": "def to_netjson_loop(self, block, result, index=None): ", "body": "raise NotImplementedError()", "docstring": "Utility method called in the loop of ``to_netjson``", "id": "f11895:c0:m9"} {"signature": "def _sanitize_radios(self):", "body": "for radio in self.config.get('', []):radio.setdefault('', False)", "docstring": "OpenWisp 1.x requires the following explicit entry\nin the radio sections of /uci/wireless.conf:\n option disabled '0'", "id": "f11897:c0:m1"} {"signature": "def _add_unique_file(self, item):", "body": "if item not in self.config['']:self.config[''].append(item)", "docstring": "adds a file in self.config['files'] only if not present already", "id": "f11897:c0:m3"} {"signature": "def _get_install_context(self):", "body": "config = self.configl2vpn = []for vpn in self.config.get('', []):if vpn.get('') != '':continuetap = vpn.copy()l2vpn.append(tap)bridges = []for interface in self.config.get('', []):if interface[''] != '':continuebridge = interface.copy()if bridge.get(''):bridge[''] = interface[''][].get('')bridge[''] = interface[''][].get('')bridges.append(bridge)cron = Falsefor _file in config.get('', []):path = _file['']if path.startswith('') or path.startswith(''):cron = Truebreakreturn dict(hostname=config[''][''], l2vpn=l2vpn,bridges=bridges,radios=config.get('', []), cron=cron)", "docstring": "returns the template context for install.sh and uninstall.sh", "id": "f11897:c0:m4"} {"signature": "def _add_install(self, context):", "body": "contents = self._render_template('', context)self.config.setdefault('', []) self._add_unique_file({\"\": \"\",\"\": contents,\"\": \"\"})", "docstring": "generates install.sh and adds it to included files", "id": "f11897:c0:m5"} {"signature": "def _add_uninstall(self, context):", "body": "contents = self._render_template('', context)self.config.setdefault('', []) self._add_unique_file({\"\": \"\",\"\": contents,\"\": \"\"})", "docstring": "generates uninstall.sh and adds it to included files", "id": "f11897:c0:m6"} {"signature": "def _add_tc_script(self):", "body": "context = dict(tc_options=self.config.get('', []))contents = self._render_template('', context)self.config.setdefault('', []) self._add_unique_file({\"\": \"\",\"\": contents,\"\": \"\"})", "docstring": "generates tc_script.sh and adds it to included files", "id": "f11897:c0:m8"} {"signature": "def _generate_contents(self, tar):", "body": "uci = self.render(files=False)packages = re.split('', uci)if '' in packages:packages.remove('')for package in packages:lines = package.split('')package_name = lines[]text_contents = ''.join(lines[:])text_contents = ''.format(package_name, text_contents)self._add_file(tar=tar,name=''.format(package_name),contents=text_contents)template_context = self._get_install_context()self._add_install(template_context)self._add_uninstall(template_context)self._add_openvpn_scripts()self._add_tc_script()", "docstring": "Adds configuration files to tarfile instance.\n\n:param tar: tarfile instance\n:returns: None", "id": "f11897:c0:m9"} {"signature": "def cleanup(self, output):", "body": "output = output.replace('', '').replace('', '').replace('', '')output = output.replace('', '').replace('', '')output = output.replace('', '')if output.endswith(''):return output[:-]return output", "docstring": "Generates consistent OpenWRT/LEDE UCI output", "id": "f11898:c0:m0"} {"signature": "@classmethoddef should_run_forward(cls, config):", "body": "return True", "docstring": "Always runs", "id": "f11904:c0:m0"} {"signature": "@classmethoddef should_run_backward(cls, intermediate_data):", "body": "return True", "docstring": "Always runs", "id": "f11904:c0:m1"} {"signature": "def __intermediate_addresses(self, interface):", "body": "address_list = self.get_copy(interface, '')if not address_list:return [{'': ''}]result = []static = {}dhcp = []for address in address_list:family = address.get('')if address[''] == '':address[''] = '' if family == '' else ''dhcp.append(self.__intermediate_address(address))continueif '' in address:uci_key = '' if family == '' else ''interface[uci_key] = address['']address_key = '' if family == '' else ''static.setdefault(address_key, [])static[address_key].append(''.format(**address))static.update(self.__intermediate_address(address))if static:if len(static.get('', [])) == :network = ip_interface(six.text_type(static[''][]))static[''] = str(network.ip)static[''] = str(network.netmask)if len(static.get('', [])) == :static[''] = static[''][]result.append(static)if dhcp:result += dhcpreturn result", "docstring": "converts NetJSON address to\nUCI intermediate data structure", "id": "f11905:c0:m1"} {"signature": "def __intermediate_interface(self, interface, uci_name):", "body": "interface.update({'': '','': uci_name,'': interface.pop('')})if '' in interface:del interface['']if '' in interface:if interface.get('') != '':interface[''] = interface['']del interface['']if '' in interface:interface[''] = interface['']del interface['']if '' in interface:interface[''] = not interface['']del interface['']if '' in interface:del interface['']if '' in interface:del interface['']return interface", "docstring": "converts NetJSON interface to\nUCI intermediate data structure", "id": "f11905:c0:m2"} {"signature": "def __intermediate_address(self, address):", "body": "for key in self._address_keys:if key in address:del address[key]return address", "docstring": "deletes NetJSON address keys", "id": "f11905:c0:m3"} {"signature": "def __intermediate_bridge(self, interface, i):", "body": "if interface[''] == '' and i < :bridge_members = ''.join(interface.pop(''))if bridge_members:interface[''] = bridge_memberselse:interface[''] = Truedel interface['']elif interface[''] == '' and i >= :if '' not in interface['']:interface[''] = ''.format(**interface)for attr in ['', '', '', '']:if attr in interface:del interface[attr]elif interface[''] != '':del interface['']return interface", "docstring": "converts NetJSON bridge to\nUCI intermediate data structure", "id": "f11905:c0:m4"} {"signature": "def __intermediate_proto(self, interface, address):", "body": "address_proto = address.pop('', '')if '' not in interface:return address_protoelse:return interface.pop('')", "docstring": "determines UCI interface \"proto\" option", "id": "f11905:c0:m5"} {"signature": "def __intermediate_dns_servers(self, uci, address):", "body": "if '' in uci:return uci['']if address[''] in ['', '', '']:return Nonedns = self.netjson.get('', None)if dns:return ''.join(dns)", "docstring": "determines UCI interface \"dns\" option", "id": "f11905:c0:m6"} {"signature": "def __intermediate_dns_search(self, uci, address):", "body": "if '' in uci:return uci['']if address[''] == '':return Nonedns_search = self.netjson.get('', None)if dns_search:return ''.join(dns_search)", "docstring": "determines UCI interface \"dns_search\" option", "id": "f11905:c0:m7"} {"signature": "def __intermediate_hwmode(self, radio):", "body": "protocol = radio['']if protocol in ['', '', '']:return protocol[:]if radio[''] is :return radio.get('')elif radio[''] <= :return ''else:return ''", "docstring": "possible return values are: 11a, 11b, 11g", "id": "f11914:c0:m2"} {"signature": "def __intermediate_htmode(self, radio):", "body": "protocol = radio.pop('')channel_width = radio.pop('')if '' in radio:return radio['']if protocol == '':return ''.format(channel_width)elif protocol == '':return ''.format(channel_width)return ''", "docstring": "only for mac80211 driver", "id": "f11914:c0:m3"} {"signature": "def __netjson_protocol(self, radio):", "body": "htmode = radio.get('')hwmode = radio.get('', None)if htmode.startswith(''):return ''elif htmode.startswith(''):return ''return ''.format(hwmode)", "docstring": "determines NetJSON protocol radio attribute", "id": "f11914:c0:m6"} {"signature": "def __netjson_channel(self, radio):", "body": "if radio[''] == '':return del radio['']return int(radio[''])", "docstring": "determines NetJSON channel radio attribute", "id": "f11914:c0:m7"} {"signature": "def __netjson_channel_width(self, radio):", "body": "htmode = radio.pop('')if htmode == '':return channel_width = htmode.replace('', '').replace('', '')if '' in channel_width or '' in channel_width:radio[''] = htmodechannel_width = channel_width[:-]return int(channel_width)", "docstring": "determines NetJSON channel_width radio attribute", "id": "f11914:c0:m8"} {"signature": "def _generate_contents(self, tar):", "body": "uci = self.render(files=False)packages = packages_pattern.split(uci)if '' in packages:packages.remove('')for package in packages:lines = package.split('')package_name = lines[]text_contents = ''.join(lines[:])self._add_file(tar=tar,name=''.format(config_path, package_name),contents=text_contents)", "docstring": "Adds configuration files to tarfile instance.\n\n:param tar: tarfile instance\n:returns: None", "id": "f11915:c0:m0"} {"signature": "def merge_config(template, config, list_identifiers=None):", "body": "result = template.copy()for key, value in config.items():if isinstance(value, dict):node = result.get(key, OrderedDict())result[key] = merge_config(node, value)elif isinstance(value, list) and isinstance(result.get(key), list):result[key] = merge_list(result[key], value, list_identifiers)else:result[key] = valuereturn result", "docstring": "Merges ``config`` on top of ``template``.\n\nConflicting keys are handled in the following way:\n\n* simple values (eg: ``str``, ``int``, ``float``, ecc) in ``config`` will\n overwrite the ones in ``template``\n* values of type ``list`` in both ``config`` and ``template`` will be\n merged using to the ``merge_list`` function\n* values of type ``dict`` will be merged recursively\n\n:param template: template ``dict``\n:param config: config ``dict``\n:param list_identifiers: ``list`` or ``None``\n:returns: merged ``dict``", "id": "f11916:m0"} {"signature": "def merge_list(list1, list2, identifiers=None):", "body": "identifiers = identifiers or []dict_map = {'': OrderedDict(), '': OrderedDict()}counter = for list_ in [list1, list2]:container = dict_map[''.format(counter)]for el in list_:key = id(el)if isinstance(el, dict):for id_key in identifiers:if id_key in el:key = el[id_key]breakcontainer[key] = deepcopy(el)counter += merged = merge_config(dict_map[''], dict_map[''])return list(merged.values())", "docstring": "Merges ``list2`` on top of ``list1``.\n\nIf both lists contain dictionaries which have keys specified\nin ``identifiers`` which have equal values, those dicts will\nbe merged (dicts in ``list2`` will override dicts in ``list1``).\nThe remaining elements will be summed in order to create a list\nwhich contains elements of both lists.\n\n:param list1: ``list`` from template\n:param list2: ``list`` from config\n:param identifiers: ``list`` or ``None``\n:returns: merged ``list``", "id": "f11916:m1"} {"signature": "def evaluate_vars(data, context=None):", "body": "context = context or {}if isinstance(data, (dict, list)):if isinstance(data, dict):loop_items = data.items()elif isinstance(data, list):loop_items = enumerate(data)for key, value in loop_items:data[key] = evaluate_vars(value, context)elif isinstance(data, six.string_types):vars_found = var_pattern.findall(data)for var in vars_found:var = var.strip()if len(vars_found) > :pattern = r'' % varelse:pattern = var_patternif var in context:data = re.sub(pattern, context[var], data)return data", "docstring": "Evaluates variables in ``data``\n\n:param data: data structure containing variables, may be\n ``str``, ``dict`` or ``list``\n:param context: ``dict`` containing variables\n:returns: modified data structure", "id": "f11916:m3"} {"signature": "def get_copy(dict_, key, default=None):", "body": "value = dict_.get(key, default)if value:return deepcopy(value)return value", "docstring": "Looks for a key in a dictionary, if found returns\na deepcopied value, otherwise returns default value", "id": "f11916:m4"} {"signature": "def _tabs(self, string):", "body": "return string.replace('', '')", "docstring": "replace 4 spaces with 1 tab", "id": "f11916:c0:m0"} {"signature": "def _list_errors(e):", "body": "error_list = []for value, error in zip(e.validator_value, e.context):error_list.append((value, error.message))if error.context:error_list += _list_errors(error)return error_list", "docstring": "Returns a list of violated schema fragments and related error messages\n:param e: ``jsonschema.exceptions.ValidationError`` instance", "id": "f11918:m0"} {"signature": "def __init__(self, e):", "body": "self.message = e.messageself.details = e", "docstring": "preserve jsonschema exception attributes\nin self.details", "id": "f11918:c1:m0"} {"signature": "def get_install_requires():", "body": "requirements = []for line in open('').readlines():if line.startswith('') or line == '' or line.startswith('') or line.startswith(''):continuerequirements.append(line.replace('', ''))if sys.version_info.major < :requirements.append('')return requirements", "docstring": "parse requirements.txt, ignore links, exclude comments", "id": "f11920:m0"} {"signature": "def powerset(iterable):", "body": "s = list(iterable)return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s)+))", "docstring": "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)", "id": "f11930:m0"} {"signature": "def xor_fault(a, b, out, fault):", "body": "if (a != b) == out:return fault == else:return fault == ", "docstring": "Returns True if XOR(a, b) == out and fault == 0 or XOR(a, b) != out and fault == 1.", "id": "f11931:m0"} {"signature": "def and_fault(a, b, out, fault):", "body": "if (a and b) == out:return fault == else:return fault == ", "docstring": "Returns True if AND(a, b) == out and fault == 0 or AND(a, b) != out and fault == 1.", "id": "f11931:m1"} {"signature": "def or_fault(a, b, out, fault):", "body": "if (a or b) == out:return fault == else:return fault == ", "docstring": "Returns True if OR(a, b) == out and fault == 0 or OR(a, b) != out and fault == 1.", "id": "f11931:m2"} {"signature": "def irreducible_components(constraint):", "body": "return _irreducible_components(constraint.configurations, constraint.variables)", "docstring": "Determine the sets of variables that are irreducible.\n\n Let V(C) denote the variables of constraint C. For a configuration x, let x|A denote the\n restriction of the configuration to the variables of A. Constraint C is reducible if there\n is a partition of V(C) into nonempty subsets A and B, and two constraints C_A and C_B, with\n V(C_A) = A and C_B V(C_B) = B, such that a configuration x is feasible in C if and only if x|A\n is feasible in C_A and x|B is feasible in C_B. A constraint is irreducible if it is not\n reducible.\n\n Args:\n constraint (:obj:`.Constraint`):\n Constraint to attempt to reduce.\n\n Returns:\n list[tuple]: List of tuples in which each tuple is a set of variables that is irreducible.\n\n Examples:\n This example reduces a constraint, created by specifying its valid configurations, to two\n constraints. The original constraint, that valid configurations for a,b,c are 0,0,1 and\n 1,1,1, can be represented by two reduced constraints, for example, (c=1) & (a=b). For\n comparison, an attempt to reduce a constraint representing an AND gate fails to find a\n valid reduction.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.from_configurations([(0, 0, 1), (1, 1, 1)],\n ... ['a', 'b', 'c'], dwavebinarycsp.BINARY)\n >>> dwavebinarycsp.irreducible_components(const)\n [('c',), ('a', 'b')]\n >>> const_and = dwavebinarycsp.Constraint.from_configurations([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 1)],\n ... ['a', 'b', 'c'], dwavebinarycsp.BINARY)\n >>> dwavebinarycsp.irreducible_components(const_and)\n [('a', 'b', 'c')]", "id": "f11932:m0"} {"signature": "@dimod.decorators.vartype_argument('')def and_gate(variables, vartype=dimod.BINARY, name=''):", "body": "variables = tuple(variables)if vartype is dimod.BINARY:configurations = frozenset([(, , ),(, , ),(, , ),(, , )])def func(in1, in2, out): return (in1 and in2) == outelse:configurations = frozenset([(-, -, -),(-, +, -),(+, -, -),(+, +, +)])def func(in1, in2, out): return ((in1 > ) and (in2 > )) == (out > )return Constraint(func, configurations, variables, vartype=vartype, name=name)", "docstring": "AND gate.\n\n Args:\n variables (list): Variable labels for the and gate as `[in1, in2, out]`,\n where `in1, in2` are inputs and `out` the gate's output.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n name (str, optional, default='AND'): Name for the constraint.\n\n Returns:\n Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n assigned values that match the valid states of an AND gate.\n\n Examples:\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'], name='AND1'))\n >>> csp.check({'a': 1, 'b': 0, 'c': 0})\n True", "id": "f11933:m0"} {"signature": "@dimod.decorators.vartype_argument('')def or_gate(variables, vartype=dimod.BINARY, name=''):", "body": "variables = tuple(variables)if vartype is dimod.BINARY:configs = frozenset([(, , ),(, , ),(, , ),(, , )])def func(in1, in2, out): return (in1 or in2) == outelse:configs = frozenset([(-, -, -),(-, +, +),(+, -, +),(+, +, +)])def func(in1, in2, out): return ((in1 > ) or (in2 > )) == (out > )return Constraint(func, configs, variables, vartype=vartype, name=name)", "docstring": "OR gate.\n\n Args:\n variables (list): Variable labels for the and gate as `[in1, in2, out]`,\n where `in1, in2` are inputs and `out` the gate's output.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n name (str, optional, default='OR'): Name for the constraint.\n\n Returns:\n Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n assigned values that match the valid states of an OR gate.\n\n Examples:\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)\n >>> csp.add_constraint(gates.or_gate(['x', 'y', 'z'], {-1,1}, name='OR1'))\n >>> csp.check({'x': 1, 'y': -1, 'z': 1})\n True", "id": "f11933:m1"} {"signature": "@dimod.decorators.vartype_argument('')def xor_gate(variables, vartype=dimod.BINARY, name=''):", "body": "variables = tuple(variables)if vartype is dimod.BINARY:configs = frozenset([(, , ),(, , ),(, , ),(, , )])def func(in1, in2, out): return (in1 != in2) == outelse:configs = frozenset([(-, -, -),(-, +, +),(+, -, +),(+, +, -)])def func(in1, in2, out): return ((in1 > ) != (in2 > )) == (out > )return Constraint(func, configs, variables, vartype=vartype, name=name)", "docstring": "XOR gate.\n\n Args:\n variables (list): Variable labels for the and gate as `[in1, in2, out]`,\n where `in1, in2` are inputs and `out` the gate's output.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n name (str, optional, default='XOR'): Name for the constraint.\n\n Returns:\n Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n assigned values that match the valid states of an XOR gate.\n\n Examples:\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(gates.xor_gate(['x', 'y', 'z'], name='XOR1'))\n >>> csp.check({'x': 1, 'y': 1, 'z': 1})\n False", "id": "f11933:m2"} {"signature": "@dimod.decorators.vartype_argument('')def halfadder_gate(variables, vartype=dimod.BINARY, name=''):", "body": "variables = tuple(variables)if vartype is dimod.BINARY:configs = frozenset([(, , , ),(, , , ),(, , , ),(, , , )])else:configs = frozenset([(-, -, -, -),(-, +, +, -),(+, -, +, -),(+, +, -, +)])def func(augend, addend, sum_, carry):total = (augend > ) + (addend > )if total == :return (sum_ <= ) and (carry <= )elif total == :return (sum_ > ) and (carry <= )elif total == :return (sum_ <= ) and (carry > )else:raise ValueError(\"\")return Constraint(func, configs, variables, vartype=vartype, name=name)", "docstring": "Half adder.\n\n Args:\n variables (list): Variable labels for the and gate as `[in1, in2, sum, carry]`,\n where `in1, in2` are inputs to be added and `sum` and 'carry' the resultant\n outputs.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n name (str, optional, default='HALF_ADDER'): Name for the constraint.\n\n Returns:\n Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n assigned values that match the valid states of a Boolean half adder.\n\n Examples:\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(gates.halfadder_gate(['a', 'b', 'total', 'carry'], name='HA1'))\n >>> csp.check({'a': 1, 'b': 1, 'total': 0, 'carry': 1})\n True", "id": "f11933:m3"} {"signature": "@dimod.decorators.vartype_argument('')def fulladder_gate(variables, vartype=dimod.BINARY, name=''):", "body": "variables = tuple(variables)if vartype is dimod.BINARY:configs = frozenset([(, , , , ),(, , , , ),(, , , , ),(, , , , ),(, , , , ),(, , , , ),(, , , , ),(, , , , )])else:configs = frozenset([(-, -, -, -, -),(-, -, +, +, -),(-, +, -, +, -),(-, +, +, -, +),(+, -, -, +, -),(+, -, +, -, +),(+, +, -, -, +),(+, +, +, +, +)])def func(in1, in2, in3, sum_, carry):total = (in1 > ) + (in2 > ) + (in3 > )if total == :return (sum_ <= ) and (carry <= )elif total == :return (sum_ > ) and (carry <= )elif total == :return (sum_ <= ) and (carry > )elif total == :return (sum_ > ) and (carry > )else:raise ValueError(\"\")return Constraint(func, configs, variables, vartype=vartype, name=name)", "docstring": "Full adder.\n\n Args:\n variables (list): Variable labels for the and gate as `[in1, in2, in3, sum, carry]`,\n where `in1, in2, in3` are inputs to be added and `sum` and 'carry' the resultant\n outputs.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n name (str, optional, default='FULL_ADDER'): Name for the constraint.\n\n Returns:\n Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n assigned values that match the valid states of a Boolean full adder.\n\n Examples:\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(gates.fulladder_gate(['a', 'b', 'c_in', 'total', 'c_out'], name='FA1'))\n >>> csp.check({'a': 1, 'b': 0, 'c_in': 1, 'total': 0, 'c_out': 1})\n True", "id": "f11933:m4"} {"signature": "@dimod.decorators.vartype_argument('')def sat2in4(pos, neg=tuple(), vartype=dimod.BINARY, name=''):", "body": "pos = tuple(pos)neg = tuple(neg)variables = pos + negif len(variables) != :raise ValueError(\"\")if neg and (len(neg) < ):const = sat2in4(pos=variables, vartype=vartype) for v in neg:const.flip_variable(v)const.name = name return constif vartype is dimod.BINARY:configurations = frozenset([(, , , ),(, , , ),(, , , ),(, , , ),(, , , ),(, , , )])else:configurations = frozenset([(-, -, +, +),(-, +, -, +),(+, -, -, +),(-, +, +, -),(+, -, +, -),(+, +, -, -)])def func(a, b, c, d):if a == b:return (b != c) and (c == d)elif a == c:return b == delse:return a == dreturn Constraint(func, configurations, variables, vartype=vartype, name=name)", "docstring": "Two-in-four (2-in-4) satisfiability.\n\n Args:\n pos (iterable):\n Variable labels, as an iterable, for non-negated variables of the constraint.\n Exactly four variables are specified by `pos` and `neg` together.\n neg (tuple):\n Variable labels, as an iterable, for negated variables of the constraint.\n Exactly four variables are specified by `pos` and `neg` together.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n name (str, optional, default='2-in-4'): Name for the constraint.\n\n Returns:\n Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n assigned values that satisfy a two-in-four satisfiability problem.\n\n Examples:\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.sat as sat\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(sat.sat2in4(['w', 'x', 'y', 'z'], vartype='BINARY', name='sat1'))\n >>> csp.check({'w': 1, 'x': 1, 'y': 0, 'z': 0})\n True", "id": "f11935:m0"} {"signature": "def random_2in4sat(num_variables, num_clauses, vartype=dimod.BINARY, satisfiable=True):", "body": "if num_variables < :raise ValueError(\"\")if num_clauses > * _nchoosek(num_variables, ): raise ValueError(\"\")csp = ConstraintSatisfactionProblem(vartype)variables = list(range(num_variables))constraints = set()if satisfiable:values = tuple(vartype.value)planted_solution = {v: choice(values) for v in variables}configurations = [(, , , ), (, , , ), (, , , ),(, , , ), (, , , ), (, , , )]while len(constraints) < num_clauses:constraint_variables = sorted(sample(variables, ))config = choice(configurations)pos = tuple(v for idx, v in enumerate(constraint_variables) if config[idx] == (planted_solution[v] > ))neg = tuple(v for idx, v in enumerate(constraint_variables) if config[idx] != (planted_solution[v] > ))const = sat2in4(pos=pos, neg=neg, vartype=vartype)assert const.check(planted_solution)constraints.add(const)else:while len(constraints) < num_clauses:constraint_variables = sorted(sample(variables, ))pos = tuple(v for v in constraint_variables if random() > )neg = tuple(v for v in constraint_variables if v not in pos)const = sat2in4(pos=pos, neg=neg, vartype=vartype)constraints.add(const)for const in constraints:csp.add_constraint(const)for v in variables:csp.add_variable(v)return csp", "docstring": "Random two-in-four (2-in-4) constraint satisfaction problem.\n\n Args:\n num_variables (integer): Number of variables (at least four).\n num_clauses (integer): Number of constraints that together constitute the\n constraint satisfaction problem.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n satisfiable (bool, optional, default=True): True if the CSP can be satisfied.\n\n Returns:\n CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when its variables\n are assigned values that satisfy a two-in-four satisfiability problem.\n\n Examples:\n This example creates a CSP with 6 variables and two random constraints and checks\n whether a particular assignment of variables satisifies it.\n\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories as sat\n >>> csp = sat.random_2in4sat(6, 2)\n >>> csp.constraints # doctest: +SKIP\n [Constraint.from_configurations(frozenset({(1, 0, 1, 0), (1, 0, 0, 1), (1, 1, 1, 1), (0, 1, 1, 0), (0, 0, 0, 0),\n (0, 1, 0, 1)}), (2, 4, 0, 1), Vartype.BINARY, name='2-in-4'),\n Constraint.from_configurations(frozenset({(1, 0, 1, 1), (1, 1, 0, 1), (1, 1, 1, 0), (0, 0, 0, 1),\n (0, 1, 0, 0), (0, 0, 1, 0)}), (1, 2, 4, 5), Vartype.BINARY, name='2-in-4')]\n >>> csp.check({0: 1, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0}) # doctest: +SKIP\n True", "id": "f11938:m0"} {"signature": "def random_xorsat(num_variables, num_clauses, vartype=dimod.BINARY, satisfiable=True):", "body": "if num_variables < :raise ValueError(\"\")if num_clauses > * _nchoosek(num_variables, ): raise ValueError(\"\")csp = ConstraintSatisfactionProblem(vartype)variables = list(range(num_variables))constraints = set()if satisfiable:values = tuple(vartype.value)planted_solution = {v: choice(values) for v in variables}configurations = [(, , ), (, , ), (, , ), (, , )]while len(constraints) < num_clauses:x, y, z = sample(variables, )if y > x:x, y = y, xconst = xor_gate([x, y, z], vartype=vartype)config = choice(configurations)for idx, v in enumerate(const.variables):if config[idx] != (planted_solution[v] > ):const.flip_variable(v)assert const.check(planted_solution)constraints.add(const)else:while len(constraints) < num_clauses:x, y, z = sample(variables, )if y > x:x, y = y, xconst = xor_gate([x, y, z], vartype=vartype)for idx, v in enumerate(const.variables):if random() > :const.flip_variable(v)assert const.check(planted_solution)constraints.add(const)for const in constraints:csp.add_constraint(const)for v in variables:csp.add_variable(v)return csp", "docstring": "Random XOR constraint satisfaction problem.\n\n Args:\n num_variables (integer): Number of variables (at least three).\n num_clauses (integer): Number of constraints that together constitute the\n constraint satisfaction problem.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n satisfiable (bool, optional, default=True): True if the CSP can be satisfied.\n\n Returns:\n CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when its variables\n are assigned values that satisfy a XOR satisfiability problem.\n\n Examples:\n This example creates a CSP with 5 variables and two random constraints and checks\n whether a particular assignment of variables satisifies it.\n\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories as sat\n >>> csp = sat.random_xorsat(5, 2)\n >>> csp.constraints # doctest: +SKIP\n [Constraint.from_configurations(frozenset({(1, 0, 0), (1, 1, 1), (0, 1, 0), (0, 0, 1)}), (4, 3, 0),\n Vartype.BINARY, name='XOR (0 flipped)'),\n Constraint.from_configurations(frozenset({(1, 1, 0), (0, 1, 1), (0, 0, 0), (1, 0, 1)}), (2, 0, 4),\n Vartype.BINARY, name='XOR (2 flipped) (0 flipped)')]\n >>> csp.check({0: 1, 1: 0, 2: 0, 3: 1, 4: 1}) # doctest: +SKIP\n True", "id": "f11938:m1"} {"signature": "def multiplication_circuit(nbit, vartype=dimod.BINARY):", "body": "if nbit < :raise ValueError(\"\")num_multiplier_bits = num_multiplicand_bits = nbitcsp = ConstraintSatisfactionProblem(vartype)a = {i: '' % i for i in range(nbit)}b = {j: '' % j for j in range(nbit)}p = {k: '' % k for k in range(nbit + nbit)}AND = defaultdict(dict) SUM = defaultdict(dict) CARRY = defaultdict(dict) for i in range(num_multiplier_bits):for j in range(num_multiplicand_bits):ai = a[i]bj = b[j]if i == and j == :andij = AND[i][j] = p[]gate = and_gate([ai, bj, andij], vartype=vartype, name='' % (ai, bj, andij))csp.add_constraint(gate)continueandij = AND[i][j] = '' % (i, j)gate = and_gate([ai, bj, andij], vartype=vartype, name='' % (ai, bj, andij))csp.add_constraint(gate)inputs = [andij]if i - in CARRY and j in CARRY[i - ]:inputs.append(CARRY[i - ][j])if i - in SUM and j + in SUM[i - ]:inputs.append(SUM[i - ][j + ])if len(inputs) == :SUM[i][j] = andijelif len(inputs) == :if j == :sumij = SUM[i][j] = p[i]else:sumij = SUM[i][j] = '' % (i, j)carryij = CARRY[i][j] = '' % (i, j)name = '' % (inputs[], inputs[], sumij, carryij)gate = halfadder_gate([inputs[], inputs[], sumij, carryij], vartype=vartype, name=name)csp.add_constraint(gate)else:assert len(inputs) == , ''if j == :sumij = SUM[i][j] = p[i]else:sumij = SUM[i][j] = '' % (i, j)carryij = CARRY[i][j] = '' % (i, j)name = '' % (inputs[], inputs[], inputs[], sumij, carryij)gate = fulladder_gate([inputs[], inputs[], inputs[], sumij, carryij], vartype=vartype, name=name)csp.add_constraint(gate)for col in range(nbit - ):inputs = [CARRY[nbit - ][col], SUM[nbit - ][col + ]]if col == :sumout = p[nbit + col]carryout = CARRY[nbit][col] = '' % (nbit, col)name = '' % (inputs[], inputs[], sumout, carryout)gate = halfadder_gate([inputs[], inputs[], sumout, carryout], vartype=vartype, name=name)csp.add_constraint(gate)continueinputs.append(CARRY[nbit][col - ])sumout = p[nbit + col]if col < nbit - :carryout = CARRY[nbit][col] = '' % (nbit, col)else:carryout = p[ * nbit - ]name = '' % (inputs[], inputs[], inputs[], sumout, carryout)gate = fulladder_gate([inputs[], inputs[], inputs[], sumout, carryout], vartype=vartype, name=name)csp.add_constraint(gate)return csp", "docstring": "Multiplication circuit constraint satisfaction problem.\n\n A constraint satisfaction problem that represents the binary multiplication :math:`ab=p`,\n where the multiplicands are binary variables of length `nbit`; for example,\n :math:`a_0 + 2a_1 + 4a_2 +... +2^ma_{nbit}`.\n\n The square below shows a graphic representation of the circuit::\n\n ________________________________________________________________________________\n | and20 and10 and00 |\n | | | | |\n | and21 add11\u2500\u2500and11 add01\u2500\u2500and01 | |\n | |\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518|\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518| | |\n | and22 add12\u2500\u2500and12 add02\u2500\u2500and02 | | |\n | |\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518|\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518| | | |\n | add13\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500add03 | | | |\n | \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518| | | | | |\n | p5 p4 p3 p2 p1 p0 |\n --------------------------------------------------------------------------------\n\n Args:\n nbit (int): Number of bits in the multiplicands.\n vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n input values:\n\n * Vartype.SPIN, 'SPIN', {-1, 1}\n * Vartype.BINARY, 'BINARY', {0, 1}\n\n Returns:\n CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when variables\n :math:`a,b,p` are assigned values that correctly solve binary multiplication :math:`ab=p`.\n\n Examples:\n This example creates a multiplication circuit CSP that multiplies two 3-bit numbers,\n which is then formulated as a binary quadratic model (BQM). It fixes the multiplacands\n as :math:`a=5, b=6` (:math:`101` and :math:`110`) and uses a simulated annealing sampler\n to find the product, :math:`p=30` (:math:`111100`).\n\n >>> import dwavebinarycsp\n >>> from dwavebinarycsp.factories.csp.circuits import multiplication_circuit\n >>> import neal\n >>> csp = multiplication_circuit(3)\n >>> bqm = dwavebinarycsp.stitch(csp)\n >>> bqm.fix_variable('a0', 1); bqm.fix_variable('a1', 0); bqm.fix_variable('a2', 1)\n >>> bqm.fix_variable('b0', 1); bqm.fix_variable('b1', 1); bqm.fix_variable('b2', 0)\n >>> sampler = neal.SimulatedAnnealingSampler()\n >>> response = sampler.sample(bqm)\n >>> p = next(response.samples(n=1, sorted_by='energy'))\n >>> print(p['p0'], p['p1'], p['p2'], p['p3'], p['p4'], p['p5']) # doctest: +SKIP\n 1 1 1 1 0 0", "id": "f11939:m0"} {"signature": "def load_cnf(fp):", "body": "fp = iter(fp) csp = ConstraintSatisfactionProblem(dimod.BINARY)num_clauses = num_variables = problem_pattern = re.compile(_PROBLEM_REGEX)for line in fp:matches = problem_pattern.findall(line)if matches:if len(matches) > :raise ValueErrornv, nc = matches[]num_variables, num_clauses = int(nv), int(nc)breakclause_pattern = re.compile(_CLAUSE_REGEX)for line in fp:if clause_pattern.match(line) is not None:clause = [int(v) for v in line.split('')[:-]] variables = [abs(v) for v in clause]f = _cnf_or(clause)csp.add_constraint(f, variables)for v in range(, num_variables+):csp.add_variable(v)for v in csp.variables:if v > num_variables:msg = (\"\"\"\").format(num_variables, num_clauses, v)raise ValueError(msg)if len(csp) != num_clauses:msg = (\"\"\"\").format(num_clauses, len(csp))raise ValueError(msg)return csp", "docstring": "Load a constraint satisfaction problem from a .cnf file.\n\n Args:\n fp (file, optional):\n `.write()`-supporting `file object`_ DIMACS CNF formatted_ file.\n\n Returns:\n :obj:`.ConstraintSatisfactionProblem` a binary-valued SAT problem.\n\n Examples:\n\n >>> import dwavebinarycsp as dbcsp\n ...\n >>> with open('test.cnf', 'r') as fp: # doctest: +SKIP\n ... csp = dbcsp.cnf.load_cnf(fp)\n\n .. _file object: https://docs.python.org/3/glossary.html#term-file-object\n\n .. _formatted: http://www.satcompetition.org/2009/format-benchmarks2009.html", "id": "f11940:m0"} {"signature": "@classmethod@dimod.decorators.vartype_argument('')def from_func(cls, func, variables, vartype, name=None):", "body": "variables = tuple(variables)configurations = frozenset(configfor config in itertools.product(vartype.value, repeat=len(variables))if func(*config))return cls(func, configurations, variables, vartype, name)", "docstring": "Construct a constraint from a validation function.\n\n Args:\n func (function):\n Function that evaluates True when the variables satisfy the constraint.\n\n variables (iterable):\n Iterable of variable labels.\n\n vartype (:class:`~dimod.Vartype`/str/set):\n Variable type for the constraint. Accepted input values:\n\n * :attr:`~dimod.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``\n * :attr:`~dimod.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``\n\n name (string, optional, default='Constraint'):\n Name for the constraint.\n\n Examples:\n This example creates a constraint that binary variables `a` and `b`\n are not equal.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> const = dwavebinarycsp.Constraint.from_func(operator.ne, ['a', 'b'], 'BINARY')\n >>> print(const.name)\n Constraint\n >>> (0, 1) in const.configurations\n True\n\n This example creates a constraint that :math:`out = NOT(x)`\n for spin variables.\n\n >>> import dwavebinarycsp\n >>> def not_(y, x): # y=NOT(x) for spin variables\n ... return (y == -x)\n ...\n >>> const = dwavebinarycsp.Constraint.from_func(\n ... not_,\n ... ['out', 'in'],\n ... {1, -1},\n ... name='not_spin')\n >>> print(const.name)\n not_spin\n >>> (1, -1) in const.configurations\n True", "id": "f11942:c0:m1"} {"signature": "@classmethoddef from_configurations(cls, configurations, variables, vartype, name=None):", "body": "def func(*args): return args in configurationsreturn cls(func, configurations, variables, vartype, name)", "docstring": "Construct a constraint from valid configurations.\n\n Args:\n configurations (iterable[tuple]):\n Valid configurations of the variables. Each configuration is a tuple of variable\n assignments ordered by :attr:`~Constraint.variables`.\n\n variables (iterable):\n Iterable of variable labels.\n\n vartype (:class:`~dimod.Vartype`/str/set):\n Variable type for the constraint. Accepted input values:\n\n * :attr:`~dimod.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``\n * :attr:`~dimod.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``\n\n name (string, optional, default='Constraint'):\n Name for the constraint.\n\n Examples:\n\n This example creates a constraint that variables `a` and `b` are not equal.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.from_configurations([(0, 1), (1, 0)],\n ... ['a', 'b'], dwavebinarycsp.BINARY)\n >>> print(const.name)\n Constraint\n >>> (0, 0) in const.configurations # Order matches variables: a,b\n False\n\n This example creates a constraint based on specified valid configurations\n that represents an OR gate for spin variables.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.from_configurations(\n ... [(-1, -1, -1), (1, -1, 1), (1, 1, -1), (1, 1, 1)],\n ... ['y', 'x1', 'x2'],\n ... dwavebinarycsp.SPIN, name='or_spin')\n >>> print(const.name)\n or_spin\n >>> (1, 1, -1) in const.configurations # Order matches variables: y,x1,x2\n True", "id": "f11942:c0:m2"} {"signature": "def __len__(self):", "body": "return self.variables.__len__()", "docstring": "The number of variables.", "id": "f11942:c0:m3"} {"signature": "def check(self, solution):", "body": "return self.func(*(solution[v] for v in self.variables))", "docstring": "Check that a solution satisfies the constraint.\n\n Args:\n solution (container):\n An assignment for the variables in the constraint.\n\n Returns:\n bool: True if the solution satisfies the constraint; otherwise False.\n\n Examples:\n This example creates a constraint that :math:`a \\\\ne b` on binary variables\n and tests it for two candidate solutions, with additional unconstrained\n variable c.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.from_configurations([(0, 1), (1, 0)],\n ... ['a', 'b'], dwavebinarycsp.BINARY)\n >>> solution = {'a': 1, 'b': 1, 'c': 0}\n >>> const.check(solution)\n False\n >>> solution = {'a': 1, 'b': 0, 'c': 0}\n >>> const.check(solution)\n True", "id": "f11942:c0:m10"} {"signature": "def fix_variable(self, v, value):", "body": "variables = self.variablestry:idx = variables.index(v)except ValueError:raise ValueError(\"\".format(v))if value not in self.vartype.value:raise ValueError(\"\".format(self.vartype.value, value))configurations = frozenset(config[:idx] + config[idx + :] for config in self.configurationsif config[idx] == value)if not configurations:raise UnsatError(\"\".format(v, value))variables = variables[:idx] + variables[idx + :]self.configurations = configurationsself.variables = variablesdef func(*args): return args in configurationsself.func = funcself.name = ''.format(self.name, v, value)", "docstring": "Fix the value of a variable and remove it from the constraint.\n\n Args:\n v (variable):\n Variable in the constraint to be set to a constant value.\n\n val (int):\n Value assigned to the variable. Values must match the :class:`.Vartype` of the\n constraint.\n\n Examples:\n This example creates a constraint that :math:`a \\\\ne b` on binary variables,\n fixes variable a to 0, and tests two candidate solutions.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.from_func(operator.ne,\n ... ['a', 'b'], dwavebinarycsp.BINARY)\n >>> const.fix_variable('a', 0)\n >>> const.check({'b': 1})\n True\n >>> const.check({'b': 0})\n False", "id": "f11942:c0:m11"} {"signature": "def flip_variable(self, v):", "body": "try:idx = self.variables.index(v)except ValueError:raise ValueError(\"\".format(v, self.name))if self.vartype is dimod.BINARY:original_func = self.funcdef func(*args):new_args = list(args)new_args[idx] = - new_args[idx] return original_func(*new_args)self.func = funcself.configurations = frozenset(config[:idx] + ( - config[idx],) + config[idx + :]for config in self.configurations)else: original_func = self.funcdef func(*args):new_args = list(args)new_args[idx] = -new_args[idx] return original_func(*new_args)self.func = funcself.configurations = frozenset(config[:idx] + (-config[idx],) + config[idx + :]for config in self.configurations)self.name = ''.format(self.name, v)", "docstring": "Flip a variable in the constraint.\n\n Args:\n v (variable):\n Variable in the constraint to take the complementary value of its\n construction value.\n\n Examples:\n This example creates a constraint that :math:`a = b` on binary variables\n and flips variable a.\n\n >>> import dwavebinarycsp\n >>> const = dwavebinarycsp.Constraint.from_func(operator.eq,\n ... ['a', 'b'], dwavebinarycsp.BINARY)\n >>> const.check({'a': 0, 'b': 0})\n True\n >>> const.flip_variable('a')\n >>> const.check({'a': 1, 'b': 0})\n True\n >>> const.check({'a': 0, 'b': 0})\n False", "id": "f11942:c0:m12"} {"signature": "def copy(self):", "body": "return self.__class__(self.func, self.configurations, self.variables, self.vartype, name=self.name)", "docstring": "Create a copy.\n\n Examples:\n This example copies constraint :math:`a \\\\ne b` and tests a solution\n on the copied constraint.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> const = dwavebinarycsp.Constraint.from_func(operator.ne,\n ... ['a', 'b'], 'BINARY')\n >>> const2 = const.copy()\n >>> const2 is const\n False\n >>> const2.check({'a': 1, 'b': 1})\n False", "id": "f11942:c0:m13"} {"signature": "def projection(self, variables):", "body": "variables = set(variables)if not variables.issubset(self.variables):raise ValueError(\"\")idxs = [i for i, v in enumerate(self.variables) if v in variables]configurations = frozenset(tuple(config[i] for i in idxs) for config in self.configurations)variables = tuple(self.variables[i] for i in idxs)return self.from_configurations(configurations, variables, self.vartype)", "docstring": "Create a new constraint that is the projection onto a subset of the variables.\n\n Args:\n variables (iterable):\n Subset of the constraint's variables.\n\n Returns:\n :obj:`.Constraint`: A new constraint over a subset of the variables.\n\n Examples:\n\n >>> import dwavebinarycsp\n ...\n >>> const = dwavebinarycsp.Constraint.from_configurations([(0, 0), (0, 1)],\n ... ['a', 'b'],\n ... dwavebinarycsp.BINARY)\n >>> proj = const.projection(['a'])\n >>> proj.variables\n ['a']\n >>> proj.configurations\n {(0,)}", "id": "f11942:c0:m14"} {"signature": "def add_constraint(self, constraint, variables=tuple()):", "body": "if isinstance(constraint, Constraint):if variables and (tuple(variables) != constraint.variables):raise ValueError(\"\")elif isinstance(constraint, Callable):constraint = Constraint.from_func(constraint, variables, self.vartype)elif isinstance(constraint, Iterable):constraint = Constraint.from_configurations(constraint, variables, self.vartype)else:raise TypeError(\"\")self.constraints.append(constraint)for v in constraint.variables:self.variables[v].append(constraint)", "docstring": "Add a constraint.\n\n Args:\n constraint (function/iterable/:obj:`.Constraint`):\n Constraint definition in one of the supported formats:\n\n 1. Function, with input arguments matching the order and\n :attr:`~.ConstraintSatisfactionProblem.vartype` type of the `variables`\n argument, that evaluates True when the constraint is satisfied.\n 2. List explicitly specifying each allowed configuration as a tuple.\n 3. :obj:`.Constraint` object built either explicitly or by :mod:`dwavebinarycsp.factories`.\n\n variables(iterable):\n Variables associated with the constraint. Not required when `constraint` is\n a :obj:`.Constraint` object.\n\n Examples:\n This example defines a function that evaluates True when the constraint is satisfied.\n The function's input arguments match the order and type of the `variables` argument.\n\n >>> import dwavebinarycsp\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> def all_equal(a, b, c): # works for both dwavebinarycsp.BINARY and dwavebinarycsp.SPIN\n ... return (a == b) and (b == c)\n >>> csp.add_constraint(all_equal, ['a', 'b', 'c'])\n >>> csp.check({'a': 0, 'b': 0, 'c': 0})\n True\n >>> csp.check({'a': 0, 'b': 0, 'c': 1})\n False\n\n This example explicitly lists allowed configurations.\n\n >>> import dwavebinarycsp\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)\n >>> eq_configurations = {(-1, -1), (1, 1)}\n >>> csp.add_constraint(eq_configurations, ['v0', 'v1'])\n >>> csp.check({'v0': -1, 'v1': +1})\n False\n >>> csp.check({'v0': -1, 'v1': -1})\n True\n\n This example uses a :obj:`.Constraint` object built by :mod:`dwavebinarycsp.factories`.\n\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'])) # add an AND gate\n >>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd'])) # add an XOR gate\n >>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})\n True", "id": "f11943:c0:m2"} {"signature": "def add_variable(self, v):", "body": "self.variables[v]", "docstring": "Add a variable.\n\n Args:\n v (variable):\n Variable in the constraint satisfaction problem. May be of any type that\n can be a dict key.\n\n Examples:\n This example adds two variables, one of which is already used in a constraint\n of the constraint satisfaction problem.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)\n >>> csp.add_constraint(operator.eq, ['a', 'b'])\n >>> csp.add_variable('a') # does nothing, already added as part of the constraint\n >>> csp.add_variable('c')\n >>> csp.check({'a': -1, 'b': -1, 'c': 1})\n True\n >>> csp.check({'a': -1, 'b': -1, 'c': -1})\n True", "id": "f11943:c0:m3"} {"signature": "def check(self, solution):", "body": "return all(constraint.check(solution) for constraint in self.constraints)", "docstring": "Check that a solution satisfies all of the constraints.\n\n Args:\n solution (container):\n An assignment of values for the variables in the constraint satisfaction problem.\n\n Returns:\n bool: True if the solution satisfies all of the constraints; False otherwise.\n\n Examples:\n This example creates a binary-valued constraint satisfaction problem, adds\n two logic gates implementing Boolean constraints, :math:`c = a \\wedge b`\n and :math:`d = a \\oplus c`, and verifies that the combined problem is satisfied\n for a given assignment.\n\n >>> import dwavebinarycsp\n >>> import dwavebinarycsp.factories.constraint.gates as gates\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'])) # add an AND gate\n >>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd'])) # add an XOR gate\n >>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})\n True", "id": "f11943:c0:m4"} {"signature": "def fix_variable(self, v, value):", "body": "if v not in self.variables:raise ValueError(\"\".format(v))for constraint in self.variables[v]:constraint.fix_variable(v, value)del self.variables[v]", "docstring": "Fix the value of a variable and remove it from the constraint satisfaction problem.\n\n Args:\n v (variable):\n Variable to be fixed in the constraint satisfaction problem.\n\n value (int):\n Value assigned to the variable. Values must match the\n :attr:`~.ConstraintSatisfactionProblem.vartype` of the constraint\n satisfaction problem.\n\n Examples:\n This example creates a spin-valued constraint satisfaction problem, adds two constraints,\n :math:`a = b` and :math:`b \\\\ne c`, and fixes variable b to +1.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)\n >>> csp.add_constraint(operator.eq, ['a', 'b'])\n >>> csp.add_constraint(operator.ne, ['b', 'c'])\n >>> csp.check({'a': +1, 'b': +1, 'c': -1})\n True\n >>> csp.check({'a': -1, 'b': -1, 'c': +1})\n True\n >>> csp.fix_variable('b', +1)\n >>> csp.check({'a': +1, 'b': +1, 'c': -1}) # 'b' is ignored\n True\n >>> csp.check({'a': -1, 'b': -1, 'c': +1})\n False\n >>> csp.check({'a': +1, 'c': -1})\n True\n >>> csp.check({'a': -1, 'c': +1})\n False", "id": "f11943:c0:m5"} {"signature": "def assert_penaltymodel_factory_available():", "body": "from pkg_resources import iter_entry_pointsfrom penaltymodel.core import FACTORY_ENTRYPOINTfrom itertools import chainsupported = ('', '')factories = chain(*(iter_entry_points(FACTORY_ENTRYPOINT, name) for name in supported))try:next(factories)except StopIteration:raise AssertionError(\"\"\"\".format(\"\".join(\"\".format(name) for name in supported)))", "docstring": "For `dwavebinarycsp` to be functional, at least one penalty model factory\n has to be installed. See discussion in setup.py for details.", "id": "f11946:m0"} {"signature": "def stitch(csp, min_classical_gap=, max_graph_size=):", "body": "try:dwavebinarycsp.assert_penaltymodel_factory_available()except AssertionError as e:raise RuntimeError(e)def aux_factory():for i in count():yield ''.format(i)aux = aux_factory()bqm = dimod.BinaryQuadraticModel.empty(csp.vartype)for const in csp.constraints:configurations = const.configurationsif len(const.variables) > max_graph_size:msg = (\"\"\"\"\"\"\"\").format(const=const, num_var=len(const.variables), max_graph_size=max_graph_size)raise ImpossibleBQM(msg)pmodel = Noneif len(const) == :continueif min_classical_gap <= :if len(const) == and max_graph_size >= :bqm.update(_bqm_from_1sat(const))continueelif len(const) == and max_graph_size >= :bqm.update(_bqm_from_2sat(const))continuefor G in iter_complete_graphs(const.variables, max_graph_size + , aux):spec = pm.Specification(graph=G,decision_variables=const.variables,feasible_configurations=configurations,min_classical_gap=min_classical_gap,vartype=csp.vartype)try:pmodel = pm.get_penalty_model(spec)except pm.ImpossiblePenaltyModel:continueif pmodel.classical_gap >= min_classical_gap:breakelse:msg = (\"\".format(const))raise ImpossibleBQM(msg)bqm.update(pmodel.model)return bqm", "docstring": "Build a binary quadratic model with minimal energy levels at solutions to the specified constraint satisfaction\n problem.\n\n Args:\n csp (:obj:`.ConstraintSatisfactionProblem`):\n Constraint satisfaction problem.\n\n min_classical_gap (float, optional, default=2.0):\n Minimum energy gap from ground. Each constraint violated by the solution increases\n the energy level of the binary quadratic model by at least this much relative\n to ground energy.\n\n max_graph_size (int, optional, default=8):\n Maximum number of variables in the binary quadratic model that can be used to\n represent a single constraint.\n\n Returns:\n :class:`~dimod.BinaryQuadraticModel`\n\n Notes:\n For a `min_classical_gap` > 2 or constraints with more than two variables, requires\n access to factories from the penaltymodel_ ecosystem to construct the binary quadratic\n model.\n\n .. _penaltymodel: https://github.com/dwavesystems/penaltymodel\n\n Examples:\n This example creates a binary-valued constraint satisfaction problem\n with two constraints, :math:`a = b` and :math:`b \\\\ne c`, and builds\n a binary quadratic model with a minimum energy level of -2 such that\n each constraint violation by a solution adds the default minimum energy gap.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b\n >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c\n >>> bqm = dwavebinarycsp.stitch(csp)\n >>> bqm.energy({'a': 0, 'b': 0, 'c': 1}) # satisfies csp\n -2.0\n >>> bqm.energy({'a': 0, 'b': 0, 'c': 0}) # violates one constraint\n 0.0\n >>> bqm.energy({'a': 1, 'b': 0, 'c': 0}) # violates two constraints\n 2.0\n\n This example creates a binary-valued constraint satisfaction problem\n with two constraints, :math:`a = b` and :math:`b \\\\ne c`, and builds\n a binary quadratic model with a minimum energy gap of 4.\n Note that in this case the conversion to binary quadratic model adds two\n ancillary variables that must be minimized over when solving.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> import itertools\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b\n >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c\n >>> bqm = dwavebinarycsp.stitch(csp, min_classical_gap=4.0)\n >>> list(bqm) # # doctest: +SKIP\n ['a', 'aux1', 'aux0', 'b', 'c']\n >>> min([bqm.energy({'a': 0, 'b': 0, 'c': 1, 'aux0': aux0, 'aux1': aux1}) for\n ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # satisfies csp\n -6.0\n >>> min([bqm.energy({'a': 0, 'b': 0, 'c': 0, 'aux0': aux0, 'aux1': aux1}) for\n ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # violates one constraint\n -2.0\n >>> min([bqm.energy({'a': 1, 'b': 0, 'c': 0, 'aux0': aux0, 'aux1': aux1}) for\n ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # violates two constraints\n 2.0\n\n This example finds for the previous example the minimum graph size.\n\n >>> import dwavebinarycsp\n >>> import operator\n >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b\n >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c\n >>> for n in range(8, 1, -1):\n ... try:\n ... bqm = dwavebinarycsp.stitch(csp, min_classical_gap=4.0, max_graph_size=n)\n ... except dwavebinarycsp.exceptions.ImpossibleBQM:\n ... print(n+1)\n ...\n 3", "id": "f11947:m0"} {"signature": "def _bqm_from_1sat(constraint):", "body": "configurations = constraint.configurationsnum_configurations = len(configurations)bqm = dimod.BinaryQuadraticModel.empty(constraint.vartype)if num_configurations == :val, = next(iter(configurations))v, = constraint.variablesbqm.add_variable(v, - if val > else +, vartype=dimod.SPIN)else:bqm.add_variables_from((v, ) for v in constraint.variables)return bqm", "docstring": "create a bqm for a constraint with only one variable\n\n bqm will have exactly classical gap 2.", "id": "f11947:m1"} {"signature": "def _bqm_from_2sat(constraint):", "body": "configurations = constraint.configurationsvariables = constraint.variablesvartype = constraint.vartypeu, v = constraint.variablesif len(configurations) == :return dimod.BinaryQuadraticModel.empty(constraint.vartype)components = irreducible_components(constraint)if len(components) > :const0 = Constraint.from_configurations(((config[],) for config in configurations),(u,), vartype)const1 = Constraint.from_configurations(((config[],) for config in configurations),(v,), vartype)bqm = _bqm_from_1sat(const0)bqm.update(_bqm_from_1sat(const1))return bqmassert len(configurations) > , \"\"bqm = dimod.BinaryQuadraticModel.empty(vartype)if all(operator.eq(*config) for config in configurations):bqm.add_interaction(u, v, -, vartype=dimod.SPIN) elif all(operator.ne(*config) for config in configurations):bqm.add_interaction(u, v, +, vartype=dimod.SPIN) elif (, ) not in configurations:bqm.add_interaction(u, v, , vartype=dimod.BINARY) elif (-, +) not in configurations and (, ) not in configurations:bqm.add_interaction(u, v, -, vartype=dimod.BINARY)bqm.add_variable(v, , vartype=dimod.BINARY)elif (+, -) not in configurations and (, ) not in configurations:bqm.add_interaction(u, v, -, vartype=dimod.BINARY)bqm.add_variable(u, , vartype=dimod.BINARY)else:bqm.add_interaction(u, v, , vartype=dimod.BINARY)bqm.add_variable(u, -, vartype=dimod.BINARY)bqm.add_variable(v, -, vartype=dimod.BINARY)return bqm", "docstring": "create a bqm for a constraint with two variables.\n\n bqm will have exactly classical gap 2.", "id": "f11947:m2"} {"signature": "@nx.utils.nodes_or_number()def iter_complete_graphs(start, stop, factory=None):", "body": "_, nodes = startnodes = list(nodes) if factory is None:factory = count()while len(nodes) < stop:G = nx.complete_graph(nodes)yield Gv = next(factory)while v in G:v = next(factory)nodes.append(v)", "docstring": "Iterate over complete graphs.\n\n Args:\n start (int/iterable):\n Define the size of the starting graph.\n If an int, the nodes will be index-labeled, otherwise should be an iterable of node\n labels.\n\n stop (int):\n Stops yielding graphs when the size equals stop.\n\n factory (iterator, optional):\n If provided, nodes added will be labeled according to the values returned by factory.\n Otherwise the extra nodes will be index-labeled.\n\n Yields:\n :class:`nx.Graph`", "id": "f11947:m3"} {"signature": "@propertydef options(self):", "body": "return list(self._results.keys())", "docstring": "Returns a list of strings to represent the options for the poll, in\n the order they were given when the poll was created.", "id": "f11956:c0:m4"} {"signature": "@propertydef votes(self):", "body": "return list(self._results.values())", "docstring": "Return a list of integers that correspond to the same indexed option\n which specify the current votes for that option.", "id": "f11956:c0:m5"} {"signature": "@propertydef total_votes(self):", "body": "return sum(self._results.values())", "docstring": "Returns the total number of votes on the poll.", "id": "f11956:c0:m6"} {"signature": "@propertydef url(self):", "body": "if self.id is None:return ''return ''.format(strawpoll.API._BASE_URL, self.id)", "docstring": "Returns the url of the poll. If the poll has not been submitted yet,\n an empty string is returned instead.", "id": "f11956:c0:m7"} {"signature": "def results(self, limit=None):", "body": "return self._results.most_common(limit)", "docstring": "Returns a list of tuples each containing a string representing an\n option and an int which specify the current votes for that option,\n ordered by their votes count.\n\n :param int limit: The maximum number of results to return. If not \\\n specified, every results will be returned.", "id": "f11956:c0:m8"} {"signature": "def result_at(self, index):", "body": "return list(self._results.items())[index]", "docstring": "Returns a tuple containing a string representing the option and an\n int which specify the current votes for that option.\n\n :param int index: The index of the wanted option in the options list.", "id": "f11956:c0:m9"} {"signature": "def get_poll(self, arg, *, request_policy=None):", "body": "if isinstance(arg, str):match = self._url_re.match(arg)if match:arg = match.group('')return self._http_client.get(''.format(self._POLLS, arg),request_policy=request_policy,cls=strawpoll.Poll)", "docstring": "Retrieves a poll from strawpoll.\n\n :param arg: Either the ID of the poll or its strawpoll url.\n :param request_policy: Overrides :attr:`API.requests_policy` for that \\\n request.\n :type request_policy: Optional[:class:`RequestsPolicy`]\n\n :raises HTTPException: Requesting the poll failed.\n\n :returns: A poll constructed with the requested data.\n :rtype: :class:`Poll`", "id": "f11961:c0:m3"} {"signature": "def submit_poll(self, poll, *, request_policy=None):", "body": "if poll.id is not None:raise ExistingPoll()options = poll.optionsdata = {'': poll.title,'': options,'': poll.multi,'': poll.dupcheck,'': poll.captcha}return self._http_client.post(self._POLLS,data=data,request_policy=request_policy,cls=strawpoll.Poll)", "docstring": "Submits a poll on strawpoll.\n\n :param poll: The poll to submit.\n :type poll: :class:`Poll`\n :param request_policy: Overrides :attr:`API.requests_policy` for that \\\n request.\n :type request_policy: Optional[:class:`RequestsPolicy`]\n\n :raises ExistingPoll: This poll instance has already been submitted.\n :raises HTTPException: The submission failed.\n\n :returns: The given poll updated with the data sent back from the submission.\n :rtype: :class:`Poll`\n\n .. note::\n Only polls that have a non empty title and between 2 and 30 options\n can be submitted.", "id": "f11961:c0:m4"} {"signature": "def _get_header(key):", "body": "try:return request.headers[key]except KeyError:abort(, '' + key)", "docstring": "Return message header", "id": "f11965:m0"} {"signature": "def hook(self, event_type=''):", "body": "def decorator(func):self._hooks[event_type].append(func)return funcreturn decorator", "docstring": "Registers a function as a hook. Multiple hooks can be registered for a given type, but the\norder in which they are invoke is unspecified.\n\n:param event_type: The event type this hook will be invoked for.", "id": "f11965:c0:m1"} {"signature": "def _get_digest(self):", "body": "return hmac.new(self._secret, request.data, hashlib.sha1).hexdigest() if self._secret else None", "docstring": "Return message digest if a secret key was provided", "id": "f11965:c0:m2"} {"signature": "def _postreceive(self):", "body": "digest = self._get_digest()if digest is not None:sig_parts = _get_header('').split('', )if not isinstance(digest, six.text_type):digest = six.text_type(digest)if (len(sig_parts) < or sig_parts[] != ''or not hmac.compare_digest(sig_parts[], digest)):abort(, '')event_type = _get_header('')data = request.get_json()if data is None:abort(, '')self._logger.info('', _format_event(event_type, data), _get_header(''))for hook in self._hooks.get(event_type, []):hook(data)return '', ", "docstring": "Callback from Flask", "id": "f11965:c0:m3"} {"signature": "def system(command, input=None):", "body": "logger.debug(\"\", command)p = subprocess.Popen(command,shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=MUST_CLOSE_FDS)stdoutdata, stderrdata = p.communicate(input=input)result = stdoutdata + stderrdataif p.returncode:logger.error(\"\",command)logger.error(\"\", p.returncode)logger.error(\"\")logger.error(result)sys.exit()logger.info(result)", "docstring": "commands.getoutput() replacement that also works on windows.\n\n Code mostly copied from zc.buildout.", "id": "f11970:m0"} {"signature": "def main():", "body": "usage = \"\"parser = optparse.OptionParser(usage=usage)parser.add_option(\"\", \"\",action=\"\", dest=\"\", default=False,help=\"\")parser.add_option(\"\", \"\",action=\"\", type=\"\", dest=\"\",default='',help=\"\")parser.add_option(\"\", \"\",action=\"\", type=\"\", dest=\"\",default='',help=(\"\" +\"\"))(options, args) = parser.parse_args()if options.verbose:log_level = logging.DEBUGelse:log_level = logging.INFOlogging.basicConfig(level=log_level,format=\"\")curdir = os.getcwd()testbinary = os.path.join(curdir, '', '')if not os.path.exists(testbinary):raise RuntimeError(\"\" % testbinary)coveragebinary = os.path.join(curdir, '', '')if not os.path.exists(coveragebinary):logger.debug(\"\")coveragebinary = ''logger.info(\"\")parts = [coveragebinary, '', testbinary]if options.test_args:parts.append(options.test_args)system(\"\".join(parts))logger.debug(\"\")if options.output_dir:coverage_dir = options.output_diropen_in_browser = Falseelse:coverage_dir = '' open_in_browser = Truesystem(\"\" % (coveragebinary, coverage_dir))logger.info(\"\", coverage_dir)if open_in_browser:index_file = os.path.abspath(os.path.join(coverage_dir, ''))logger.debug(\"\", index_file)webbrowser.open('' + index_file)logger.info(\"\")", "docstring": "Create coverage reports and open them in the browser.", "id": "f11970:m1"} {"signature": "def before_scenario(context, scenario):", "body": "context.log.debug(\"\"\"\"\"\"% scenario.name)context.params = {}context.exception = Nonecontext.result = None", "docstring": "Add entry to the debug log befere starting the scenario", "id": "f11975:m0"} {"signature": "def after_scenario(context, scenario):", "body": "context.log.debug(\"\"\"\"\"\"% scenario.name)", "docstring": "Add an entry to the debug log after scenario was finished", "id": "f11975:m1"} {"signature": "def before_all(context):", "body": "context.log = _setup_logging()", "docstring": "Executes the code before all the tests are run", "id": "f11975:m2"} {"signature": "def _setup_logging():", "body": "log = logging.getLogger('')fh = logging.FileHandler(filename=''.format(REPORT_DIR), mode='')log.setLevel(logging.DEBUG)ch = logging.StreamHandler()ch.setLevel(logging.INFO)fh.setLevel(logging.DEBUG)formatter = logging.Formatter('','')ch.setFormatter(formatter)fh.setFormatter(formatter)log.addHandler(ch)log.addHandler(fh)return log", "docstring": "set up the logging facility", "id": "f11975:m3"} {"signature": "def parse_hub_key(key):", "body": "if key is None:raise ValueError('')match = re.match(PATTERN, key)if not match:match = re.match(PATTERN_S0, key)if not match:raise ValueError('')return dict(map(normalise_part, zip([p for p in PARTS_S0.keys()], match.groups())))return dict(zip(PARTS.keys(), match.groups()))", "docstring": "Parse a hub key into a dictionary of component parts\n\n :param key: str, a hub key\n :returns: dict, hub key split into parts\n :raises: ValueError", "id": "f11977:m1"} {"signature": "def is_hub_key(value):", "body": "try:parse_hub_key(value)return Trueexcept (ValueError, TypeError):return False", "docstring": "Test if a value could be a hub key\n :param value: the value to test if it is a hub key\n :returns: True if it is a hub key", "id": "f11977:m2"} {"signature": "def match_part(string, part):", "body": "if not string or not re.match('' + PARTS[part] + '', string):raise ValueError(''.format(part, PARTS[part]))", "docstring": "Raise an exception if string doesn't match a part's regex\n\n :param string: str\n :param part: a key in the PARTS dict\n :raises: ValueError, TypeError", "id": "f11977:m3"} {"signature": "def idna_encode(string):", "body": "return string.encode('').decode('')", "docstring": "Encode a string as ASCII using IDNA so that it is a valid part of a URI\n\n See RFC3490.\n\n :param string: str\n :returns: ASCII string", "id": "f11977:m4"} {"signature": "def url_quote(string):", "body": "return quote(string.encode(''), safe='')", "docstring": "Percent encode a string as ASCII so that it is a valid part of a URI\n\n :param string: str\n :returns: ASCII string", "id": "f11977:m5"} {"signature": "def generate_hub_key(resolver_id, hub_id, repository_id, entity_type, entity_id=None):", "body": "parsed = urlparse(resolver_id)if not parsed.scheme:parsed = parsed._replace(scheme=PROTOCOL, netloc=idna_encode(parsed.path.lower()), path=u'')else:parsed = parsed._replace(netloc=idna_encode(parsed.netloc.lower()))resolver_id = urlunparse(parsed)hub_id = url_quote(hub_id.lower())if not entity_id:entity_id = str(uuid.uuid4()).replace('', '')else:match_part(entity_id, '')match_part(resolver_id, '')match_part(hub_id, '')match_part(repository_id, '')match_part(entity_type, '')hub_key = SEPARATOR.join([resolver_id, SCHEMA, hub_id, repository_id, entity_type, entity_id])return hub_key", "docstring": "Create and return an array of hub keys\n :param resolver_id: the service that can resolve this key\n :param hub_id: the unique id of the hub\n :param repository_id: the type of id that the provider recognises\n :param entity_type: the type of the entity to which the key refers.\n :param entity_id: ID of entity (UUID)\n :returns: a hub key\n :raises:\n :AttributeError: if a parameter has a bad value\n :TypeError: if a parameter has a bad value\n :ValueError: if a parameter has a bad value", "id": "f11977:m6"} {"signature": "def get_channel_image(self, channel, img_size=, skip_cache=False):", "body": "from bs4 import BeautifulSoupfrom wikipedia.exceptions import PageErrorimport reimport wikipediawikipedia.set_lang('')if not channel:_LOGGER.error('')returnif channel in self._cache_channel_img and not skip_cache:img = self._cache_channel_img[channel]_LOGGER.debug('', channel, img)return imgchannel_info = self.get_channel_info(channel)query = channel_info['']if not query:_LOGGER.debug('', channel)return_LOGGER.debug('', query)if '' in channel_info:if img_size > channel_info['']:_LOGGER.info('''', channel_info[''])img_size = channel_info['']try:page = wikipedia.page(query)_LOGGER.debug('', page.title)soup = BeautifulSoup(page.html(), '')images = soup.find_all('')img_src = Nonefor i in images:if i[''].startswith(''):img_src = re.sub(r'', ''.format(img_size),i[''])img = ''.format(img_src) if img_src else Noneself._cache_channel_img[channel] = imgreturn imgexcept PageError:_LOGGER.error('', channel)", "docstring": "Get the logo for a channel", "id": "f11988:c0:m31"} {"signature": "def press_key(self, key, mode=):", "body": "if isinstance(key, str):assert key in KEYS, ''.format(key)key = KEYS[key]_LOGGER.info('', self.__get_key_name(key))return self.rq('', OrderedDict([('', key), ('', mode)]))", "docstring": "modes:\n 0 -> simple press\n 1 -> long press\n 2 -> release after long press", "id": "f11988:c0:m41"} {"signature": "def get_repr(self, obj, referent=None):", "body": "objtype = type(obj)typename = str(objtype.__module__) + \"\" + objtype.__name__prettytype = typename.replace(\"\", \"\")name = getattr(obj, \"\", \"\")if name:prettytype = \"\" % (prettytype, name)key = \"\"if referent:key = self.get_refkey(obj, referent)url = reverse('', args=(typename,id(obj)))return (''''''% (url, id(obj), prettytype, key, get_repr(obj, )))", "docstring": "Return an HTML tree block describing the given object.", "id": "f11991:c0:m1"} {"signature": "def get_refkey(self, obj, referent):", "body": "if isinstance(obj, dict):for k, v in obj.items():if v is referent:return \"\" % kfor k in dir(obj) + ['']:if getattr(obj, k, None) is referent:return \"\" % kreturn \"\"", "docstring": "Return the dict key or attribute name of obj which refers to\n referent.", "id": "f11991:c0:m2"} {"signature": "def walk(self, maxresults=, maxdepth=None):", "body": "log.debug(\"\")self.seen = {}self.ignore(self, self.__dict__, self.obj, self.seen, self._ignore)self.ignore_caller()self.maxdepth = maxdepthcount = log.debug(\"\")for result in self._gen(self.obj):log.debug(\"\")yield resultcount += if maxresults and count >= maxresults:yield , , \"\"return", "docstring": "Walk the object tree, ignoring duplicates and circular refs.", "id": "f11993:c0:m3"} {"signature": "def print_tree(self, maxresults=, maxdepth=None):", "body": "self.ignore_caller()for depth, refid, rep in self.walk(maxresults, maxdepth):print((\"\" % refid), (\"\" * depth * ), rep)", "docstring": "Walk the object tree, pretty-printing each branch.", "id": "f11993:c0:m4"} {"signature": "def walk(self, maxresults=, maxdepth=None):", "body": "self.stops = self.seen = {}self.ignore(self, self.__dict__, self.seen, self._ignore)self.ignore_caller()self.maxdepth = maxdepthcount = for result in self._gen(self.obj):yield resultcount += if maxresults and count >= maxresults:yield , , \"\"return", "docstring": "Walk the object tree, showing circular referents.", "id": "f11993:c3:m0"} {"signature": "def print_tree(self, maxresults=, maxdepth=None):", "body": "self.ignore_caller()for trail in self.walk(maxresults, maxdepth):print(trail)if self.stops:print(\"\" % self.stops)", "docstring": "Walk the object tree, pretty-printing each branch.", "id": "f11993:c3:m2"} {"signature": "def has_address(start: int, data_length: int) -> bool:", "body": "return bool( & start) or (start == and data_length == )", "docstring": "Determine whether the packet has an \"address\" encoded into it.\nThere exists an undocumented bug/edge case in the spec - some packets\nwith 0x82 as _start_, still encode the address into the packet, and thus\nthrows off decoding. This edge case is handled explicitly.", "id": "f12003:m0"} {"signature": "def decode_timestamp(data: str) -> datetime.datetime:", "body": "year = + int(data[:])month = int(data[:])day = int(data[:])hour = int(data[:])minute = int(data[:])second = int(data[:])if minute == :minute = hour += return datetime.datetime(year=year, month=month, day=day, hour=hour,minute=minute, second=second)", "docstring": "Decode timestamp using bespoke decoder.\nCannot use simple strptime since the ness panel contains a bug\nthat P199E zone and state updates emitted on the hour cause a minute\nvalue of `60` to be sent, causing strptime to fail. This decoder handles\nthis edge case.", "id": "f12003:m4"} {"signature": "@classmethoddef decode(cls, _data: str) -> '':", "body": "data = DataIterator(_data)_LOGGER.debug(\"\", _data)start = data.take_hex()address = Noneif has_address(start, len(_data)):address = data.take_hex(half=is_user_interface_req(start))length = data.take_hex()data_length = length & seq = length >> command = CommandType(data.take_hex())msg_data = data.take_bytes(data_length,half=is_user_interface_req(start))timestamp = Noneif has_timestamp(start):timestamp = decode_timestamp(data.take_bytes())checksum = data.take_hex() if not data.is_consumed():raise ValueError('')return Packet(is_user_interface_resp=(is_user_interface_resp(start) andcommand == CommandType.USER_INTERFACE),address=address,seq=seq,command=command,data=msg_data,timestamp=timestamp,)", "docstring": "Packets are ASCII encoded data. Packet layout is as follows:\n\n+---------------------------------------------------------------------------+\n| start | address | length | command | data | timestamp | checksum | finish |\n| hex | hex | hex | hex | str | dec | hex | crlf |\n| 1 | 1 | 1 | 1 | n | 6 | 1 | |\n+---------------------------------------------------------------------------+\n\nTimestamp:\n Timestamps are formatted in the following format, where each field is\n decimal encoded:\n\n YY MM DD HH MM SS\n\nChecksum:\n Calculated by...?\n\nSince data is ASCII encoded, each byte uses 2 ASCII character to be\nrepresented. However, we cannot simply do a hex decode on the entire\nmessage, since the timestamp and data fields are represented using a\nnon-hex representation and therefore must be manually decoded.", "id": "f12003:c1:m5"} {"signature": "async def update(self) -> None:", "body": "_LOGGER.debug(\"\")await asyncio.gather(self.send_command(''),self.send_command(''),)", "docstring": "Force update of alarm status and zones", "id": "f12005:c0:m6"} {"signature": "async def _update_loop(self) -> None:", "body": "await asyncio.sleep(self._update_interval)while not self._closed:await self.update()await asyncio.sleep(self._update_interval)", "docstring": "Schedule a state update to keep the connection alive", "id": "f12005:c0:m11"} {"signature": "def _handle_system_status_event(self, event: SystemStatusEvent) -> None:", "body": "if event.type == SystemStatusEvent.EventType.UNSEALED:return self._update_zone(event.zone, True)elif event.type == SystemStatusEvent.EventType.SEALED:return self._update_zone(event.zone, False)elif event.type == SystemStatusEvent.EventType.ALARM:return self._update_arming_state(ArmingState.TRIGGERED)elif event.type == SystemStatusEvent.EventType.ALARM_RESTORE:if self.arming_state != ArmingState.DISARMED:return self._update_arming_state(ArmingState.ARMED)elif event.type == SystemStatusEvent.EventType.ENTRY_DELAY_START:return self._update_arming_state(ArmingState.ENTRY_DELAY)elif event.type == SystemStatusEvent.EventType.ENTRY_DELAY_END:passelif event.type == SystemStatusEvent.EventType.EXIT_DELAY_START:return self._update_arming_state(ArmingState.EXIT_DELAY)elif event.type == SystemStatusEvent.EventType.EXIT_DELAY_END:if self.arming_state == ArmingState.EXIT_DELAY:return self._update_arming_state(ArmingState.ARMED)elif event.type in Alarm.ARM_EVENTS:return self._update_arming_state(ArmingState.ARMING)elif event.type == SystemStatusEvent.EventType.DISARMED:return self._update_arming_state(ArmingState.DISARMED)elif event.type == SystemStatusEvent.EventType.ARMING_DELAYED:pass", "docstring": "DISARMED -> ARMED_AWAY -> EXIT_DELAY_START -> EXIT_DELAY_END\n (trip): -> ALARM -> OUTPUT_ON -> ALARM_RESTORE\n (disarm): -> DISARMED -> OUTPUT_OFF\n (disarm): -> DISARMED\n (disarm before EXIT_DELAY_END): -> DISARMED -> EXIT_DELAY_END\n\nTODO(NW): Check ALARM_RESTORE state transition to move back into ARMED_AWAY state", "id": "f12006:c1:m4"} {"signature": "@cli.command()def version():", "body": "print(get_version())", "docstring": "Print installed package version.", "id": "f12009:m1"} {"signature": "def listening_ports():", "body": "ports = []if not os.path.exists(PROC_TCP):return portswith open(PROC_TCP) as fh:for line in fh:if '' not in line:continueparts = line.lstrip('').split('')if parts[] != '':continuelocal_port = parts[].split('')[]local_port = int('' + local_port, base=)ports.append(local_port)return ports", "docstring": "Reads listening ports from /proc/net/tcp", "id": "f12018:m0"} {"signature": "def __iter__(self):", "body": "idx = self._paddingwhile idx < len(self._data):tobject, hole = self._read_next(idx, len(self._data))if tobject is None:returnif hole:idx += hole[] idx += tobject.bytes_length + self._paddingyield tobject, hole", "docstring": "An iterator that yields a tuple of (thrift object, hole), where\nhole is a tuple of (start, skipped) bytes.", "id": "f12019:c0:m2"} {"signature": "def get_ip_packet(data, client_port, server_port, is_loopback=False):", "body": "header = _loopback if is_loopback else _ethernettry:header.unpack(data)except Exception as ex:raise ValueError('' % ex)tcp_p = getattr(header.data, '', None)if type(tcp_p) != dpkt.tcp.TCP:raise ValueError('')if tcp_p.dport == server_port:if client_port != and tcp_p.sport != client_port:raise ValueError('')elif tcp_p.sport == server_port:if client_port != and tcp_p.dport != client_port:raise ValueError('')else:raise ValueError('')return header.data", "docstring": "if client_port is 0 any client_port is good", "id": "f12020:m0"} {"signature": "def _assertMessages(self, output):", "body": "self.assertIn('',output.getvalue())self.assertIn('',output.getvalue())self.assertIn('',output.getvalue())self.assertIn('',output.getvalue())self.assertIn('',output.getvalue())self.assertIn('',output.getvalue())self.assertIn('',output.getvalue())self.assertIn('',output.getvalue())self.assertIn('',output.getvalue())self.assertIn('',output.getvalue())", "docstring": "this is a bit fragile...", "id": "f12025:c0:m5"} {"signature": "def __len__(self):", "body": "return self._length", "docstring": "in bytes", "id": "f12030:c0:m1"} {"signature": "@propertydef bytes_length(self):", "body": "return len(self)", "docstring": "for ThriftStruct, __len__ means something different so lets\n add this other property to unify the way to refer to bytes", "id": "f12030:c0:m2"} {"signature": "@classmethoddef read(cls, data,protocol=None,fallback_protocol=TBinaryProtocol,finagle_thrift=False,max_fields=MAX_FIELDS,max_list_size=MAX_LIST_SIZE,max_map_size=MAX_MAP_SIZE,max_set_size=MAX_SET_SIZE,read_values=False):", "body": "if len(data) < cls.MIN_MESSAGE_SIZE:raise ValueError('')if protocol is None:protocol = cls.detect_protocol(data, fallback_protocol)trans = TTransport.TMemoryBuffer(data)proto = protocol(trans)header = Noneif finagle_thrift:try:header = ThriftStruct.read(proto,max_fields,max_list_size,max_map_size,max_set_size,read_values)except:trans = TTransport.TMemoryBuffer(data)proto = protocol(trans)method, mtype, seqid = proto.readMessageBegin()mtype = cls.message_type_to_str(mtype)if len(method) == or method.isspace() or method.startswith(''):raise ValueError('')if len(method) > cls.MAX_METHOD_LENGTH:raise ValueError('')valid = range(, )if any(ord(char) not in valid for char in method):raise ValueError('' % method)args = ThriftStruct.read(proto,max_fields,max_list_size,max_map_size,max_set_size,read_values)proto.readMessageEnd()msglen = trans._buffer.tell()return cls(method, mtype, seqid, args, header, msglen), msglen", "docstring": "tries to deserialize a message, might fail if data is missing", "id": "f12030:c0:m10"} {"signature": "@classmethoddef detect_protocol(cls, data, default=None):", "body": "if cls.is_compact_protocol(data):return TCompactProtocolelif cls.is_binary_protocol(data):return TBinaryProtocolelif cls.is_json_protocol(data):return TJSONProtocolif default is None:raise ValueError('')return default", "docstring": "TODO: support fbthrift, finagle-thrift, finagle-mux, CORBA", "id": "f12030:c0:m11"} {"signature": "@propertydef length(self):", "body": "return self._length", "docstring": "how many bytes have been through the stream", "id": "f12034:c0:m2"} {"signature": "def pop(self, nbytes):", "body": "size = popped = []with self._lock_packets:while size < nbytes:try:packet = self._packets.pop()size += len(packet.data.data)self._remaining -= len(packet.data.data)popped.append(packet)except IndexError:breakreturn popped", "docstring": "pops packets with _at least_ nbytes of payload", "id": "f12034:c0:m6"} {"signature": "def pop_data(self, nbytes):", "body": "last_timestamp = data = []for packet in self.pop(nbytes):last_timestamp = packet.timestampdata.append(packet.data.data)return ''.join(data), last_timestamp", "docstring": "similar to pop, but returns payload + last timestamp", "id": "f12034:c0:m7"} {"signature": "def push(self, ip_packet):", "body": "data_len = len(ip_packet.data.data)seq_id = ip_packet.data.seqif data_len == :self._next_seq_id = seq_idreturn Falseif self._next_seq_id != - and seq_id != self._next_seq_id:return Falseself._next_seq_id = seq_id + data_lenwith self._lock_packets:self._length += len(ip_packet.data.data)self._remaining += len(ip_packet.data.data)self._packets.append(ip_packet)return True", "docstring": "push the packet into the queue", "id": "f12034:c0:m8"} {"signature": "def run(self, *args, **kwargs):", "body": "while True:try:timestamp, ip_p = self._queue.popleft()src_ip = get_ip(ip_p, ip_p.src)dst_ip = get_ip(ip_p, ip_p.dst)src = intern('' % (src_ip, ip_p.data.sport))dst = intern('' % (dst_ip, ip_p.data.dport))key = intern('' % (src, dst))stream = self._streams.get(key)if stream is None:stream = Stream(src, dst)self._streams[key] = streamsetattr(ip_p, '', timestamp)pushed = stream.push(ip_p)if not pushed:continuefor handler in self._handlers:try:handler(stream)except Exception as ex:print('' % ex)except Exception:time.sleep()", "docstring": "Deal with the incoming packets", "id": "f12034:c1:m3"} {"signature": "def __init__(self, iface, port, stream_handler=None, offline=None, ip=None):", "body": "super(Sniffer, self).__init__()self.setDaemon(True)self._iface = ifaceself._port = portself._offline = offlineself._ip = ip if ip else []self._queue = deque() self._dispatcher = Dispatcher(self._queue)self._dispatcher.add_handler(stream_handler)self._wants_stop = Falseself.start()", "docstring": "A Sniffer that merges packets into a stream\n\n Params:\n ``iface`` The interface in which to listen\n ``port`` The TCP port that we care about\n ``stream_handler`` The callback for each stream\n ``offline`` Path to a pcap file\n ``ip`` A list of IPs that we care about", "id": "f12034:c2:m0"} {"signature": "def __call__(self, timestamp, src, dst, msg):", "body": "if msg.type == '':replies = self._replies[dst][src][msg.method]if len(replies) > :reply_timestamp, reply = replies.popleft()self._print_pair(timestamp, msg, reply_timestamp, reply, src, dst)else:self._requests[src][dst][msg.method].append((timestamp, msg))elif msg.type == '':requests = self._requests[dst][src][msg.method]if len(requests) > :request_timestamp, request = requests.popleft()self._print_pair(request_timestamp, request, timestamp, msg,dst, src)else:self._replies[src][dst][msg.method].append((timestamp, msg))else:print_msg(timestamp, src, dst, msg, self._format_opts,output=self._output)return True", "docstring": "We need to match up each (request, reply) pair. Presumably,\npcap _shouldn't_ deliver packets out of order, but\nthings could get mixed up somewhere withing the\nTCP stream being reassembled and the StreamHandler\nthread. So, we don't assume that a 'reply' implies\nthe corresponding 'call' has been seen.\n\nIt could also be that we started sniffing after\nthe 'call' message... but there's no easy way to tell\n(given we don't keep the startup time around...)", "id": "f12035:c1:m1"} {"signature": "def __call__(self, timestamp, src, dst, msg):", "body": "if msg.type == '':self._requests[src][dst][msg.method][msg.seqid] = (timestamp, msg)elif msg.type == '':calls = self._requests[dst][src][msg.method]if msg.seqid in calls:request_timestamp, request = calls.pop(msg.seqid)latency = timestamp - request_timestampself._latencies_by_method[msg.method].append(latency)self._seen += self._output.write('' % (self._seen, self._expected))self._output.flush()if self._seen < self._expected:return True self.report()unmatched = for src, dst in self._requests.items():for method, calls in dst.items():unmatched += len(calls)if unmatched > :self._output.write('' % unmatched)return False", "docstring": "Slightly simplified logic wrt what PairedPrinter has:\nwe assume 'call' messages will be seen before their\ncorresponding 'reply'.", "id": "f12035:c2:m1"} {"signature": "def report(self):", "body": "self._output.write('')sort_by = ''results = {}for key, latencies in self._latencies_by_method.items():result = {}result[''] = len(latencies)result[''] = sum(latencies) / len(latencies)result[''] = min(latencies)result[''] = max(latencies)latencies = sorted(latencies)result[''] = percentile(latencies, )result[''] = percentile(latencies, )result[''] = percentile(latencies, )result[''] = percentile(latencies, )results[key] = resultheaders = ['', '', '', '', '', '', '', '', '']data = []results = sorted(results.items(), key=lambda it: it[][sort_by], reverse=True)def row(key, res):data = [key] + [res[header] for header in headers[:]]return tuple(data)data = [row(key, result) for key, result in results]self._output.write('' % tabulate(data, headers=headers))self._output.flush()", "docstring": "get stats & show them", "id": "f12035:c2:m2"} {"signature": "def is_isomorphic_to(self, other):", "body": "return (isinstance(other, self.__class__)andlen(self.fields) == len(other.fields)andall(a.is_isomorphic_to(b) for a, b in zip(self.fields,other.fields)))", "docstring": "Returns true if all fields of other struct are isomorphic to this\nstruct's fields", "id": "f12036:c2:m4"} {"signature": "def __eq__(self, other):", "body": "return isinstance(other, self.__class__) and self.fields == other.fields", "docstring": "we ignore the length, it might not be set", "id": "f12036:c2:m5"} {"signature": "def __len__(self):", "body": "return len(self._fields)", "docstring": "number of fields, NOT number of bytes", "id": "f12036:c2:m6"} {"signature": "def is_isomorphic_to(self, other):", "body": "return (isinstance(other, self.__class__)and self.field_type == other.field_typeand self.field_id == other.field_id)", "docstring": "Returns true if other field's meta data (everything except value)\nis same as this one", "id": "f12036:c3:m4"} {"signature": "@classmethoddef of_structs(cls, a, b):", "body": "t_diff = ThriftDiff(a, b)t_diff._do_diff()return t_diff", "docstring": "Diff two thrift structs and return the result as a ThriftDiff instance", "id": "f12038:c0:m1"} {"signature": "@classmethoddef of_messages(cls, msg_a, msg_b):", "body": "ok_to_diff, reason = cls.can_diff(msg_a, msg_b)if not ok_to_diff:raise ValueError(reason)return [cls.of_structs(x.value, y.value)for x, y in zip(msg_a.args, msg_b.args)if x.field_type == '']", "docstring": "Diff two thrift messages by comparing their args, raises exceptions if\nfor some reason the messages can't be diffed. Only args of type 'struct'\nare compared.\n\nReturns a list of ThriftDiff results - one for each struct arg", "id": "f12038:c0:m2"} {"signature": "@staticmethoddef can_diff(msg_a, msg_b):", "body": "if msg_a.method != msg_b.method:return False, ''if len(msg_a.args) != len(msg_b.args)or not msg_a.args.is_isomorphic_to(msg_b.args):return False, ''return True, None", "docstring": "Check if two thrift messages are diff ready.\n\nReturns a tuple of (boolean, reason_string), i.e. (False, reason_string)\nif the messages can not be diffed along with the reason and\n(True, None) for the opposite case", "id": "f12038:c0:m3"} {"signature": "@propertydef common_fields(self):", "body": "return self._common_fields", "docstring": "List of isomorphically equivalent field pairs which may or may not have\nsame value", "id": "f12038:c0:m6"} {"signature": "@propertydef fields_only_in_a(self):", "body": "return self._fields_only_in_a", "docstring": "List of fields exclusive to first struct", "id": "f12038:c0:m7"} {"signature": "@propertydef fields_only_in_b(self):", "body": "return self._fields_only_in_b", "docstring": "List of fields exclusive to second struct", "id": "f12038:c0:m8"} {"signature": "@propertydef fields_with_same_value(self):", "body": "return self._fields_with_same_value", "docstring": "List of isomorphically equivalent fields for which value is also equal\nNote: this doesn't return a list of 'pairs'", "id": "f12038:c0:m9"} {"signature": "@propertydef field_with_different_value(self):", "body": "return self._fields_with_different_value", "docstring": "List of isomorphically equivalent field pairs for which value is NOT\nequal", "id": "f12038:c0:m10"} {"signature": "def capture(target_url,user_agent=\"\",proxies={}):", "body": "domain = \"\"save_url = urljoin(domain, \"\")headers = {'': user_agent,\"\": \"\",}logger.debug(\"\".format(domain + \"\"))get_kwargs = dict(timeout=,allow_redirects=True,headers=headers,)if proxies:get_kwargs[''] = proxiesresponse = requests.get(domain + \"\", **get_kwargs)response.raise_for_status()html = str(response.content)try:unique_id = html.split('', )[].split('', )[].split('', )[]logger.debug(\"\".format(unique_id))except IndexError:logger.warn(\"\")unique_id = Nonedata = {\"\": target_url,\"\": ,}if unique_id:data.update({\"\": unique_id})post_kwargs = dict(timeout=,allow_redirects=True,headers=headers,data=data)if proxies:post_kwargs[''] = proxieslogger.debug(\"\".format(save_url))response = requests.post(save_url, **post_kwargs)response.raise_for_status()if '' in response.headers:memento = str(response.headers['']).split('')[]logger.debug(\"\".format(memento))return mementoif '' in response.headers:memento = response.headers['']logger.debug(\"\".format(memento))return mementologger.debug(\"\")for i, r in enumerate(response.history):logger.debug(\"\".format(i))logger.debug(r.headers)if '' in r.headers:memento = r.headers['']logger.debug(\"\".format(i+, memento))return mementologger.error(\"\")logger.error(\"\".format(response.status_code))logger.error(response.headers)logger.error(response.text)raise Exception(\"\")", "docstring": "Archives the provided URL using archive.is\n\nReturns the URL where the capture is stored.", "id": "f12041:m0"} {"signature": "@click.command()@click.argument(\"\")@click.option(\"\", \"\", help=\"\")def cli(url, user_agent):", "body": "kwargs = {}if user_agent:kwargs[''] = user_agentarchive_url = capture(url, **kwargs)click.echo(archive_url)", "docstring": "Archives the provided URL using archive.is.", "id": "f12041:m1"} {"signature": "@click.command()@click.argument('', type=click.Path(exists=True))def upload_gif(gif):", "body": "client_id = os.environ.get('')client_secret = os.environ.get('')if client_id is None or client_secret is None:click.echo('')returnclient = ImgurClient(client_id, client_secret)click.echo(''.format(click.format_filename(gif)))response = client.upload_from_path(gif)click.echo(''.format(response['']))", "docstring": "Uploads an image file to Imgur", "id": "f12044:m0"} {"signature": "def read(*paths):", "body": "with open(os.path.join(*paths), '') as f:return f.read()", "docstring": "Build a file path from *paths* and return the contents.", "id": "f12045:m0"} {"signature": "def _unwrap_stream(uri, timeout, scanner, requests_session):", "body": "original_uri = uriseen_uris = set()deadline = time.time() + timeoutwhile time.time() < deadline:if uri in seen_uris:logger.info('''', uri)return Noneelse:seen_uris.add(uri)logger.debug('', uri)try:scan_timeout = deadline - time.time()if scan_timeout < :logger.info('''', uri, timeout)return Nonescan_result = scanner.scan(uri, timeout=scan_timeout)except exceptions.ScannerError as exc:logger.debug('', uri, exc)scan_result = Noneif scan_result is not None and not (scan_result.mime.startswith('') orscan_result.mime.startswith('')):logger.debug('', scan_result.mime, uri)return uridownload_timeout = deadline - time.time()if download_timeout < :logger.info('',uri, timeout)return Nonecontent = http.download(requests_session, uri, timeout=download_timeout)if content is None:logger.info('''', original_uri, uri)return Noneuris = playlists.parse(content)if not uris:logger.debug('',uri)return urilogger.debug('', uri, uris[])uri = uris[]", "docstring": "Get a stream URI from a playlist URI, ``uri``.\nUnwraps nested playlists until something that's not a playlist is found or\nthe ``timeout`` is reached.", "id": "f12051:m1"} {"signature": "def mylogger(name=None, filename=None, indent_offset=, level=_logging.DEBUG, stream_level=_logging.WARN, file_level=_logging.INFO):", "body": "if name is not None:logger = _logging.getLogger(name)else:logger = _logging.getLogger()logger.setLevel(level)fmtr = IndentFormatter(indent_offset=indent_offset)fmtr_msgonly = IndentFormatter('')ch = _logging.StreamHandler()ch.setLevel(stream_level)ch.setFormatter(fmtr_msgonly)logger.addHandler(ch)if filename is not None:debugh = _logging.FileHandler(filename=''.format(filename), mode='')debugh.setLevel(_logging.DEBUG)debugh.setFormatter(fmtr_msgonly)logger.addHandler(debugh)fh = _logging.FileHandler(filename=''.format(filename), mode='')fh.setLevel(file_level)fh.setFormatter(fmtr)logger.addHandler(fh)return logger", "docstring": "Sets up logging to *filename*.debug.log, *filename*.log, and the terminal. *indent_offset* attempts to line up the lowest indent level to 0. Custom levels:\n\n* *level*: Parent logging level.\n* *stream_level*: Logging level for console stream.\n* *file_level*: Logging level for general file log.", "id": "f12056:m0"} {"signature": "def frexp10(x):", "body": "expon = _np.int(_np.floor(_np.log10(_np.abs(x))))mant = x/_np.power(, expon)return (mant, expon)", "docstring": "Finds the mantissa and exponent of a number :math:`x` such that :math:`x = m 10^e`.\n\nParameters\n----------\n\nx : float\n Number :math:`x` such that :math:`x = m 10^e`.\n\nReturns\n-------\n\nmantissa : float\n Number :math:`m` such that :math:`x = m 10^e`.\nexponent : float\n Number :math:`e` such that :math:`x = m 10^e`.", "id": "f12058:m0"} {"signature": "def linspacestep(start, stop, step=):", "body": "numsteps = _np.int((stop-start)/step)return _np.linspace(start, start+step*numsteps, numsteps+)", "docstring": "Create a vector of values over an interval with a specified step size.\n\nParameters\n----------\n\nstart : float\n The beginning of the interval.\nstop : float\n The end of the interval.\nstep : float\n The step size.\n\nReturns\n-------\nvector : :class:`numpy.ndarray`\n The vector of values.", "id": "f12059:m0"} {"signature": "def gaussian(x, mu, sigma):", "body": "return _np.exp(-(x-mu)**/(*sigma**)) / (_np.sqrt(*_np.pi) * sigma)", "docstring": "Gaussian function of the form :math:`\\\\frac{1}{\\\\sqrt{2 \\\\pi}\\\\sigma} e^{-\\\\frac{(x-\\\\mu)^2}{2\\\\sigma^2}}`.\n\n.. versionadded:: 1.5\n\nParameters\n----------\nx : float\n Function variable :math:`x`.\nmu : float\n Mean of the Gaussian function.\nsigma : float\n Standard deviation of the Gaussian function.", "id": "f12060:m0"} {"signature": "def linspaceborders(array):", "body": "dela = array[] - array[]new_arr = _np.array([array[] - dela / ])delb = array[-] - array[-]array = _np.append(array, array[-] + delb)for i, val in enumerate(array):try:avg = (array[i] + array[i + ]) / new_arr = _np.append(new_arr, avg)except:passreturn new_arr", "docstring": "Generate a new array with numbers interpolated between the numbers of the input array. Extrapolates elements to the left and right sides to get the exterior border as well.\n\nParameters\n----------\n\narray : :class:`numpy.ndarray`\n The original array.\n\nReturns\n-------\n\narray : :class:`numpy.ndarray`\n The interpolated/extrapolated array.", "id": "f12063:m0"} {"signature": "def keys(obj):", "body": "return [key for key in obj.keys()]", "docstring": "Returns an array of strings of the keys like Python 2 used to do.\n\n.. versionadded:: 1.4\n\nParameters\n----------\n\nobj : object\n Object to get keys from.\n\nReturns\n-------\n\nkeys : list\n List of keys.", "id": "f12065:m0"} {"signature": "def get(f, key, default=None):", "body": "if key in f.keys():val = f[key].valueif default is None:return valelse:if _np.shape(val) == _np.shape(default):return valreturn default", "docstring": "Gets an array from datasets.\n\n.. versionadded:: 1.4", "id": "f12065:m1"} {"signature": "def curve_fit_unscaled(*args, **kwargs):", "body": "verbose = kwargs.pop('', False)popt, pcov = _spopt.curve_fit(*args, **kwargs)func = args[]x = args[]y = args[]ddof = len(popt)try:sigma = kwargs['']if sigma is None:sigma = _np.ones(len(y))y_expect = func(x, *popt)chisq_red = _chisquare(y, y_expect, sigma, ddof, verbose=verbose)pcov = pcov / chisq_redreturn popt, pcov, chisq_redexcept ValueError:print('')", "docstring": "Use the reduced chi square to unscale :mod:`scipy`'s scaled :func:`scipy.optimize.curve_fit`. *\\*args* and *\\*\\*kwargs* are passed through to :func:`scipy.optimize.curve_fit`. The tuple *popt, pcov, chisq_red* is returned, where *popt* is the optimal values for the parameters, *pcov* is the estimated covariance of *popt*, and *chisq_red* is the reduced chi square. See http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.optimize.curve_fit.html.", "id": "f12066:m0"} {"signature": "def chisquare(observe, expect, error, ddof, verbose=True):", "body": "chisq = error = error.flatten()observe = observe.flatten()expect = expect.flatten()for i, el in enumerate(observe):chisq = chisq + _np.power((el - expect[i]) / error[i], )red_chisq = chisq / (len(observe) - ddof)if verbose:print(''.format(red_chisq))return red_chisq", "docstring": "Finds the reduced chi square difference of *observe* and *expect* with a given *error* and *ddof* degrees of freedom.\n\n*verbose* flag determines if the reduced chi square is printed to the terminal.", "id": "f12067:m0"} {"signature": "@propertydef popt(self):", "body": "return self._popt", "docstring": "The fit parameters for the fit function.", "id": "f12070:c0:m1"} {"signature": "@propertydef pcov(self):", "body": "return self._pcov", "docstring": "The covariance for the fit parameters.", "id": "f12070:c0:m2"} {"signature": "@propertydef chisq_red(self):", "body": "return self._chisq_red", "docstring": "The reduced chi square.", "id": "f12070:c0:m3"} {"signature": "@propertydef y_unweighted(self):", "body": "return self._y_unweighted", "docstring": "The :math:`y` of the problem :math:`X_{ij} \\\\beta_{i} = y_j`. Setting this attribute forces a recalculation.", "id": "f12071:c0:m2"} {"signature": "@propertydef y_error(self):", "body": "return self._y_error", "docstring": "The measured error of :math:`y` of the problem :math:`X_{ij} \\\\beta_{i} = y_j`. Setting this attribute forces a recalculation.", "id": "f12071:c0:m4"} {"signature": "@propertydef X_unweighted(self):", "body": "return self._X_unweighted", "docstring": "The :math:`X`. Setting this attribute forces a recalculation.", "id": "f12071:c0:m6"} {"signature": "@propertydef X(self):", "body": "if self._X is None:X = _copy.deepcopy(self.X_unweighted)for i, el in enumerate(X):X[i, :] = el/self.y_error[i]self._X = Xreturn self._X", "docstring": "The :math:`X` weighted properly by the errors from *y_error*", "id": "f12071:c0:m8"} {"signature": "@propertydef y(self):", "body": "if self._y is None:self._y = self.y_unweighted/self.y_errorreturn self._y", "docstring": "The :math:`X` weighted properly by the errors from *y_error*", "id": "f12071:c0:m9"} {"signature": "@propertydef y_fit(self):", "body": "if self._y_fit is None:self._y_fit = _np.dot(self.X_unweighted, self.beta)return self._y_fit", "docstring": "Using the result of the linear least squares, the result of :math:`X_{ij}\\\\beta_i`", "id": "f12071:c0:m10"} {"signature": "@propertydef beta(self):", "body": "if self._beta is None:self._beta = _np.dot(_np.linalg.pinv(self.X) , self.y)return self._beta", "docstring": "The result :math:`\\\\beta` of the linear least squares", "id": "f12071:c0:m11"} {"signature": "@propertydef covar(self):", "body": "if self._covar is None:self._covar = _np.linalg.inv(_np.dot(_np.transpose(self.X), self.X))return self._covar", "docstring": "The covariance matrix for the result :math:`\\\\beta`", "id": "f12071:c0:m12"} {"signature": "@propertydef chisq_red(self):", "body": "if self._chisq_red is None:self._chisq_red = chisquare(self.y_unweighted.transpose(), _np.dot(self.X_unweighted, self.beta), self.y_error, ddof=, verbose=False)return self._chisq_red", "docstring": "The reduced chi-square of the linear least squares", "id": "f12071:c0:m13"} {"signature": "def fft(values, freq=None, timestamps=None, fill_missing=False):", "body": "if freq is None:from .. import qtfreq = qt.getDouble(title='', text='', min=, decimals=, value=)freq = freq.inputif fill_missing:(t_x, x_filled) = fill_missing_timestamps(timestamps, values)else:x_filled = valuesnum_samples = _np.size(x_filled)xfft = _sp.fftpack.rfft(x_filled)factor = freq/num_samplesnum_fft = _np.size(xfft)f = factor * _np.linspace(, num_fft, num_fft)xpow = _np.abs(xfft*_np.conj(xfft))xpow = xpow[:]f = f[:]return (f, xpow)", "docstring": "Adds options to :func:`scipy.fftpack.rfft`:\n\n* *freq* is the frequency the samples were taken at\n* *timestamps* is the time the samples were taken, to help with filling in missing data if *fill_missing* is true", "id": "f12072:m0"} {"signature": "@propertydef gamma(self):", "body": "return _sltr.GeV2gamma(self.E)", "docstring": "Relativistic :math:`\\\\gamma` of beam", "id": "f12073:c0:m1"} {"signature": "@propertydef emit_n(self):", "body": "return self.emit * self.gamma", "docstring": "Normalized emittance of beam :math:`\\\\epsilon_n = \\\\gamma\\\\epsilon`", "id": "f12073:c0:m3"} {"signature": "@propertydef emit(self):", "body": "return self._emit", "docstring": "Emittance of the beam, :math:`\\\\epsilon = \\\\sqrt{ \\\\langle x^2 \\\\rangle \\\\langle {x'}^2 \\\\rangle - \\\\langle x x' \\\\rangle^2 }`", "id": "f12073:c0:m5"} {"signature": "@propertydef sigma(self):", "body": "return _np.power(*_sltr.GeV2joule(self.E)*_spc.epsilon_0 / (self.plasma.n_p * _np.power(_spc.elementary_charge, )) , ) * _np.sqrt(self.emit)", "docstring": "Spot size of matched beam :math:`\\\\left( \\\\frac{2 E \\\\varepsilon_0 }{ n_p e^2 } \\\\right)^{1/4} \\\\sqrt{\\\\epsilon}`", "id": "f12073:c0:m6"} {"signature": "def beta(self, E):", "body": "return / _np.sqrt(self.plasma.k_ion(E))", "docstring": ":math:`\\\\beta` function of matched beam", "id": "f12073:c0:m7"} {"signature": "@propertydef sigma_prime(self):", "body": "return _np.sqrt(self.emit/self.beta(self.E))", "docstring": "Divergence of matched beam", "id": "f12073:c0:m8"} {"signature": "@propertydef gamma(self):", "body": "return _sltr.GeV2gamma(self.E)", "docstring": "Relativistic :math:`\\\\gamma` of beam", "id": "f12073:c1:m1"} {"signature": "@propertydef emit_n(self):", "body": "return self.emit * self.gamma", "docstring": "Specified beam emittance :math:`\\\\epsilon`.", "id": "f12073:c1:m3"} {"signature": "@emit_n.setterdef emit_n(self, value):", "body": "self.emit = value / self.gamma", "docstring": "Specified normalized beam emittance :math:`\\\\gamma \\\\epsilon`.", "id": "f12073:c1:m4"} {"signature": "@propertydef beta(self):", "body": "return self.sigma**/self.emit", "docstring": "The Courant-Snyder parameter :math:`\\\\beta` that is matched.", "id": "f12073:c1:m5"} {"signature": "@propertydef n_p(self):", "body": "return *_sltr.GeV2joule(self.E)*_spc.epsilon_0 / (self.beta*_spc.elementary_charge)**", "docstring": "The plasma density in SI units.", "id": "f12073:c1:m6"} {"signature": "@propertydef n_p_cgs(self):", "body": "return self.n_p*", "docstring": "The plasma density in CGS units.", "id": "f12073:c1:m7"} {"signature": "@propertydef plasma(self, species=_pt.hydrogen):", "body": "return _Plasma(self.n_p, species=species)", "docstring": "The matched :class:`Plasma`.", "id": "f12073:c1:m8"} {"signature": "@propertydef k_xi(self):", "body": "return self._k_xi", "docstring": "Ion focusing wavenumber :math:`k_\\\\xi`.", "id": "f12074:c0:m1"} {"signature": "@propertydef phi(self):", "body": "return self._phi", "docstring": "Particle phases :math:`\\\\phi`.", "id": "f12074:c0:m2"} {"signature": "@propertydef beam(self):", "body": "return self._beam", "docstring": "Initial beam object.", "id": "f12074:c0:m3"} {"signature": "@propertydef plasma(self):", "body": "return self._plasma", "docstring": "Initial plasma object.", "id": "f12074:c0:m4"} {"signature": "@propertydef s(self):", "body": "return self._s", "docstring": "Coordinates of beam (:math:`s`).", "id": "f12074:c0:m5"} {"signature": "@propertydef x(self):", "body": "return self._x", "docstring": "Coordinates of beam (:math:`x`).", "id": "f12074:c0:m6"} {"signature": "@propertydef xp(self):", "body": "return self._xp", "docstring": "Coordinates of beam (:math:`x'`).", "id": "f12074:c0:m7"} {"signature": "@propertydef spotsq(self):", "body": "return _np.mean(self.x**, axis=)", "docstring": "The beam variance :math:`\\\\langle x^2 \\\\rangle`.", "id": "f12074:c0:m9"} {"signature": "@propertydef divsq(self):", "body": "return _np.mean(self.xp**, axis=)", "docstring": "The beam divergence :math:`\\\\langle x'^2 \\\\rangle`.", "id": "f12074:c0:m10"} {"signature": "@propertydef xxp(self):", "body": "return _np.mean(self.x*self.xp, axis=)", "docstring": "The beam correlation :math:`\\\\langle x x' \\\\rangle`.", "id": "f12074:c0:m11"} {"signature": "@propertydef emit_measured(self):", "body": "return _np.sqrt(self.spotsq*self.divsq-self.xxp**)", "docstring": "The beam emittance :math:`\\\\langle x x' \\\\rangle`.", "id": "f12074:c0:m12"} {"signature": "@propertydef nb0(self):", "body": "return self.N_e / ( (*_np.pi)**(/) * self.sig_r** * self.sig_xi)", "docstring": "On-axis beam density :math:`n_{b,0}`.", "id": "f12075:c0:m2"} {"signature": "def lambda_large(self, r0):", "body": "return *_np.sqrt(*_np.pi/self.k)*r0", "docstring": "The wavelength for large (:math:`r_0 < \\\\sigma_r`) oscillations.", "id": "f12075:c0:m3"} {"signature": "def r_small(self, x, r0):", "body": "return r0*_np.cos(_np.sqrt(self.k_small) * x)", "docstring": "Approximate trajectory function for small (:math:`r_0 < \\\\sigma_r`) oscillations.", "id": "f12075:c0:m4"} {"signature": "def r_large(self, x, r0):", "body": "return r0*_np.cos(x*self.omega_big(r0))", "docstring": "Approximate trajectory function for large (:math:`r_0 > \\\\sigma_r`) oscillations.", "id": "f12075:c0:m5"} {"signature": "@propertydef k(self):", "body": "try:return self._kexcept AttributeError:self._k = e** * self.N_e / ( (*_np.pi)**(/) * e0 * self.m * c** * self.sig_xi)return self._k", "docstring": "Driving force term: :math:`r'' = -k \\\\left( \\\\frac{1-e^{-r^2/2{\\\\sigma_r}^2}}{r} \\\\right)`", "id": "f12075:c0:m6"} {"signature": "@propertydef k_small(self):", "body": "return self.k / (*self.sig_r**)", "docstring": "Small-angle driving force term: :math:`r'' = -k_{small} r`.\n\nNote: :math:`k_{small} = \\\\frac{k}{2{\\\\sigma_r^2}}`", "id": "f12075:c0:m7"} {"signature": "@propertydef sig_r(self):", "body": "return self._sig_r", "docstring": "Transverse R.M.S. width", "id": "f12075:c0:m8"} {"signature": "@propertydef species(self):", "body": "return self._species", "docstring": "The species of gas used (see :class:`periodictable.core.Element`).\n\nFor instance:\n\n >>> periodictable.hydrogen", "id": "f12076:c0:m3"} {"signature": "@propertydef dims(self):", "body": "return self._dims", "docstring": "Number of dimensions.", "id": "f12076:c0:m4"} {"signature": "@propertydef lambda_small(self):", "body": "return *_np.pi/_np.sqrt(self.k_small)", "docstring": "The wavelength for small (:math:`r_0 < \\\\sigma_r`) oscillations.", "id": "f12076:c0:m5"} {"signature": "@propertydef m(self):", "body": "return amu * self.species.mass", "docstring": "Ion mass.", "id": "f12076:c0:m7"} {"signature": "@propertydef A(self):", "body": "return self.species.mass", "docstring": "Ion mass in units of AMU.", "id": "f12076:c0:m8"} {"signature": "@propertydef N_e(self):", "body": "return self._N_e", "docstring": "Number of electrons in bunch.", "id": "f12076:c0:m9"} {"signature": "@propertydef sig_xi(self):", "body": "return self._sig_xi", "docstring": "Longitudinal R.M.S. width :math:`\\\\sigma_\\\\xi`.", "id": "f12076:c0:m10"} {"signature": "def q(self, x, q0):", "body": "y1_0 = q0y0_0 = y0 = [y0_0, y1_0]y = _sp.integrate.odeint(self._func, y0, x, Dfun=self._gradient, rtol=self.rtol, atol=self.atol)return y[:, ]", "docstring": "Numerically solved trajectory function for initial conditons :math:`q(0) = q_0` and :math:`q'(0) = 0`.", "id": "f12076:c0:m11"} {"signature": "@propertydef nb0(self):", "body": "return self._nb0", "docstring": "On-axis beam density.", "id": "f12078:c0:m2"} {"signature": "@propertydef E(self):", "body": "return self._E", "docstring": "Beam energy in GeV.", "id": "f12078:c0:m3"} {"signature": "@propertydef dE(self):", "body": "return self._dE", "docstring": "Beam energy spread.", "id": "f12078:c0:m4"} {"signature": "@propertydef emit(self):", "body": "return self._emit", "docstring": "Beam emittance :math:`\\\\epsilon`.", "id": "f12078:c0:m5"} {"signature": "@propertydef emit_n(self):", "body": "return self._emit*_sltr.GeV2gamma(self.E)", "docstring": "Normalized beam emittance :math:`\\\\gamma \\\\epsilon`.", "id": "f12078:c0:m6"} {"signature": "@propertydef s_r(self):", "body": "return self._s_r", "docstring": "The beam RMS width :math:`\\\\sigma_r`.", "id": "f12078:c1:m1"} {"signature": "def set_moments(self, sx, sxp, sxxp):", "body": "self._sx = sxself._sxp = sxpself._sxxp = sxxpemit = _np.sqrt(sx** * sxp** - sxxp**)self._store_emit(emit=emit)", "docstring": "Sets the beam moments directly.\n\nParameters\n----------\nsx : float\n Beam moment where :math:`\\\\text{sx}^2 = \\\\langle x^2 \\\\rangle`.\nsxp : float\n Beam moment where :math:`\\\\text{sxp}^2 = \\\\langle x'^2 \\\\rangle`.\nsxxp : float\n Beam moment where :math:`\\\\text{sxxp} = \\\\langle x x' \\\\rangle`.", "id": "f12078:c2:m1"} {"signature": "@propertydef sx(self):", "body": "return self._sx", "docstring": "Beam moment where :math:`\\\\text{sx}^2 = \\\\langle x^2 \\\\rangle`.", "id": "f12078:c2:m2"} {"signature": "@propertydef sxp(self):", "body": "return self._sxp", "docstring": "Beam moment where :math:`\\\\text{sxp}^2 = \\\\langle x'^2 \\\\rangle`.", "id": "f12078:c2:m3"} {"signature": "@propertydef sxxp(self):", "body": "return self._sxxp", "docstring": "Beam moment where :math:`\\\\text{sxxp} = \\\\langle x x' \\\\rangle`.", "id": "f12078:c2:m4"} {"signature": "def set_Courant_Snyder(self, beta, alpha, emit=None, emit_n=None):", "body": "self._store_emit(emit=emit, emit_n=emit_n)self._sx = _np.sqrt(beta*self.emit)self._sxp = _np.sqrt((+alpha**)/beta*self.emit)self._sxxp = -alpha*self.emit", "docstring": "Sets the beam moments indirectly using Courant-Snyder parameters.\n\nParameters\n----------\nbeta : float\n Courant-Snyder parameter :math:`\\\\beta`.\nalpha : float\n Courant-Snyder parameter :math:`\\\\alpha`.\nemit : float\n Beam emittance :math:`\\\\epsilon`.\nemit_n : float\n Normalized beam emittance :math:`\\\\gamma \\\\epsilon`.", "id": "f12078:c2:m5"} {"signature": "@propertydef beta(self):", "body": "beta = _np.sqrt(self.sx)/self.emitreturn beta", "docstring": "Courant-Snyder parameter :math:`\\\\beta`.", "id": "f12078:c2:m6"} {"signature": "@propertydef alpha(self):", "body": "alpha = -self.sxxp/self.emitreturn alpha", "docstring": "Courant-Snyder parameter :math:`\\\\alpha`.", "id": "f12078:c2:m7"} {"signature": "def lambda_large(self, y0):", "body": "return *_np.sqrt(*y0/self.k)", "docstring": "The wavelength for large (:math:`r_0 < \\\\sigma_r`) oscillations.", "id": "f12079:c0:m1"} {"signature": "@propertydef sig_x(self):", "body": "return self._sig_x", "docstring": "The R.M.S. width :math:`\\\\sigma_x`.", "id": "f12079:c0:m3"} {"signature": "@propertydef sig_y(self):", "body": "return self._sig_y", "docstring": "The R.M.S. width :math:`\\\\sigma_y`.", "id": "f12079:c0:m4"} {"signature": "@propertydef nb0(self):", "body": "return self.N_e / (*_np.sqrt() * _np.pi * self.sig_x * self.sig_y * self.sig_xi)", "docstring": "On-axis beam density :math:`n_{b,0}`.", "id": "f12079:c0:m5"} {"signature": "@propertydef k(self):", "body": "try:return self._kexcept AttributeError:self._k = _np.sqrt(_np.pi/) * e** * self.nb0 * self.sig_y / ( e0 * self.m * c**)return self._k", "docstring": "Driving force term: :math:`r'' = -k \\\\left( \\\\frac{1-e^{-r^2/2{\\\\sigma_r}^2}}{r} \\\\right)`", "id": "f12079:c0:m6"} {"signature": "@propertydef k_small(self):", "body": "return self.k * _np.sqrt(/_np.pi) / self.sig_y", "docstring": "Small-angle driving force term: :math:`r'' = -k_{small} r`.\n\nNote: :math:`k_{small} = \\\\frac{k}{2{\\\\sigma_r^2}}`", "id": "f12079:c0:m7"} {"signature": "@propertydef species(self):", "body": "return self._species", "docstring": "Species used in plasma.", "id": "f12081:c0:m1"} {"signature": "@propertydef m(self):", "body": "return self._species.mass * _spc.m_u", "docstring": "Mass of the ion in AMU.", "id": "f12081:c0:m2"} {"signature": "@propertydef E_rest(self):", "body": "return self.m * _spc.c**", "docstring": "Rest energy of the plasma ion.", "id": "f12081:c0:m3"} {"signature": "@propertydef n_p(self):", "body": "return self._n_p", "docstring": "Plasma density in SI units.", "id": "f12081:c0:m4"} {"signature": "@propertydef w_p(self):", "body": "return _np.sqrt(self.n_p * _np.power(_spc.e, ) / (_spc.m_e * _spc.epsilon_0))", "docstring": "Plasma frequency :math:`\\\\omega_p` for given plasma density", "id": "f12081:c0:m6"} {"signature": "def k_ion(self, E):", "body": "return self.n_p * _np.power(_spc.e, ) / (*_sltr.GeV2joule(E) * _spc.epsilon_0)", "docstring": "Geometric focusing force due to ion column for given plasma density as a function of *E*", "id": "f12081:c0:m7"} {"signature": "@propertydef n_p_cgs(self):", "body": "return self.n_p * ", "docstring": "Plasma density in CGS units", "id": "f12081:c0:m9"} {"signature": "@propertydef sig_xi(self):", "body": "return self._sig_xi", "docstring": "Std. dev. of :math:`\\\\xi`: :math:`\\\\sigma_\\\\xi`.", "id": "f12082:c0:m1"} {"signature": "@propertydef xi(self):", "body": "return self._xi", "docstring": "Particle coordinates :math:`\\\\xi`.", "id": "f12082:c0:m2"} {"signature": "@propertydef mean(self):", "body": "return self._mean", "docstring": "Mean for x, x'.", "id": "f12082:c0:m3"} {"signature": "@propertydef cov(self):", "body": "return self._cov", "docstring": "Covariance for x, x'.", "id": "f12082:c0:m4"} {"signature": "@propertydef nparts(self):", "body": "return self._nparts", "docstring": "Number of particles in the beam.", "id": "f12082:c0:m5"} {"signature": "@propertydef x(self):", "body": "return self._x", "docstring": "Particle coordinates :math:`x`.", "id": "f12082:c0:m6"} {"signature": "@propertydef xp(self):", "body": "return self._xp", "docstring": "Particle coordinates :math:`x'`.", "id": "f12082:c0:m7"} {"signature": "@propertydef delta(self):", "body": "return self._delta", "docstring": "Particle coordinates :math:`\\\\delta`.", "id": "f12082:c0:m8"} {"signature": "@propertydef emit(self):", "body": "return self._emit", "docstring": "Beam emittance :math:`\\\\epsilon`.", "id": "f12082:c0:m9"} {"signature": "@propertydef E(self):", "body": "return self._E", "docstring": "Beam energy in GeV.", "id": "f12082:c0:m10"} {"signature": "@propertydef sig_delta(self):", "body": "return self._sig_delta", "docstring": "Beam energy spread :math:`\\\\sigma_\\\\delta`.", "id": "f12082:c0:m11"} {"signature": "@propertydef beta(self):", "body": "return self._beta", "docstring": "Beam beta :math:`\\\\beta`.", "id": "f12082:c0:m12"} {"signature": "@propertydef alpha(self):", "body": "return self._alpha", "docstring": "Beam alpha :math:`\\\\alpha`.", "id": "f12082:c0:m13"} {"signature": "@propertydef step(self):", "body": "return self._step", "docstring": "The current step.", "id": "f12083:c0:m1"} {"signature": "def githubtunnel(user1, server1, user2, server2, port, verbose, stanford=False):", "body": "if stanford:port_shift = else:port_shift = command1 = ''.format(port--port_shift, server2, user1, server1)command2 = ''.format(port-port_shift, port-port_shift-, user2)command3 = ''.format(port, port-, user2)if verbose:print(command1)if stanford:print(command2)print(command3)try:call(shlex.split(command1))if stanford:call(shlex.split(command2))call(shlex.split(command3))except:print('')pass", "docstring": "Opens a nested tunnel, first to *user1*@*server1*, then to *user2*@*server2*, for accessing on *port*.\n\nIf *verbose* is true, prints various ssh commands.\n\nIf *stanford* is true, shifts ports up by 1.\n\nAttempts to get *user1*, *user2* from environment variable ``USER_NAME`` if called from the command line.", "id": "f12085:m0"} {"signature": "def pdf2png(file_in, file_out):", "body": "command = ''.format(file_in, file_out)_subprocess.call(_shlex.split(command))", "docstring": "Uses `ImageMagick `_ to convert an input *file_in* pdf to a *file_out* png. (Untested with other formats.)\n\nParameters\n----------\n\nfile_in : str\n The path to the pdf file to be converted.\nfile_out : str\n The path to the png file to be written.", "id": "f12088:m0"} {"signature": "def BDES2K(bdes, quad_length, energy):", "body": "bdes = _np.float_(bdes)quad_length = _np.float_(quad_length)energy = _np.float_(energy)Brho = energy/_np.float_()K = bdes/(Brho*quad_length)logger.log(level=loggerlevel, msg=''.format(bdes = bdes ,quad_length = quad_length ,energy = energy ,K = K))return K", "docstring": "Converts a quadrupole :math:`B_des` into a geometric focusing strength :math:`K`.\n\nParameters\n----------\n\nbdes : float\n The magnet value of :math:`B_des`.\nquad_length : float\n The length of the quadrupole in meters.\nenergy : float\n The design energy of the beam in GeV.\n\nReturns\n-------\nK : float\n The geometric focusing strength :math:`K`.", "id": "f12090:m0"} {"signature": "def K2BDES(K, quad_length, energy):", "body": "K = _np.float_(K)quad_length = _np.float_(quad_length)energy = _np.float_(energy)Brho = energy/_np.float_()BDES = K*Brho*quad_lengthlogger.log(level=loggerlevel, msg=''.format(bdes = BDES ,quad_length = quad_length ,energy = energy ,K = K))return BDES", "docstring": "Returns the BDES for a quadrupole with geometric strength *K* and length *quad_length* for a beam with a given *energy*.\nConverts a geometric focusing strength :math:`K` into a quadrupole :math:`B_des`.\n\nParameters\n----------\nK : float\n The geometric focusing strength :math:`K`.\nquad_length : float\n The length of the quadrupole in meters.\nenergy : float\n The design energy of the beam in GeV.\n\nReturns\n-------\nbdes : float\n The magnet value of :math:`B_des`.", "id": "f12090:m1"} {"signature": "def fitimageslice(img, res_x, res_y, xslice, yslice, avg_e_func=None, h5file=None, plot=False):", "body": "x_start = xslice[]x_end = xslice[]y_start = yslice[]y_end = yslice[]y_low = _np.round(y_start-) + y_high = _np.round(y_end-) + y_px = linspacestep(, img.shape[])y_bool = _np.logical_and(y_low < y_px, y_px < y_high)strip = img[y_bool, x_start:x_end]histdata = _np.sum(strip, )xbins = len(histdata)x = _np.linspace(, xbins, xbins)*res_xvarbool = Truegaussout = _sp.GaussResults(x,histdata,sigma_y = _np.ones(xbins),variance = varbool,background = True,p0 = [, , , ])if avg_e_func is not None:relcounts = _np.sum(strip, ) / _np.float(_np.sum(strip))Eavg = for i, val in enumerate(linspacestep(y_low, y_high-)):Eavg = Eavg + avg_e_func(val, val+, h5file, res_y)*relcounts[i]return Eavg, gaussoutelse:return gaussout", "docstring": "Fits a gaussian to a slice of an image *img* specified by *xslice* x-coordinates and *yslice* y-coordinates. *res_x* and *res_y* specify image resolution in x and y. *avg_e_func* is a function that returns the energy of the image as a function of x. It should have the form:\n\n*avg_e_func(x_1, x_2, h5file, res_y)*\n\nFits a gaussian to a slice of an image.\n\nParameters\n----------\n\nimg : array\n Image to be fitted.\nres_x : int\n Image resolution in :math:`x`.\nres_y : int\n Image resolution in :math:`y`.\nxslice : (int, int)\n Slice coordinates in :math:`x`\nyslice : (int, int)\n Slice coordinates in :math:`y`\navg_e_func : function\n Of the form *avg_e_func(x_1, x_2, h5file, res_y)*, returns the energy of the image as a function of :math:`x`.\nh5file : h5file\n Instance from dataset.\nplot : boolean\n Whether to plot or not.", "id": "f12094:m0"} {"signature": "def colorbar(ax, im, fig=None, loc=\"\", size=\"\", pad=\"\"):", "body": "if fig is None:fig = ax.get_figure()if loc == \"\" or loc == \"\":width = fig.get_figwidth()new = width * ( + _pc2f(size) + _pc2f(pad))_logger.debug(''.format(new))elif loc == \"\" or loc == \"\":height = fig.get_figheight()new = height * ( + _pc2f(size) + _pc2f(pad))_logger.debug(''.format(new))divider = _ag1.make_axes_locatable(ax)cax = divider.append_axes(loc, size=size, pad=pad)return cax, _plt.colorbar(im, cax=cax)", "docstring": "Adds a polite colorbar that steals space so :func:`matplotlib.pyplot.tight_layout` works nicely.\n\n.. versionadded:: 1.3\n\nParameters\n----------\n\nax : :class:`matplotlib.axis.Axis`\n The axis to plot to.\nim : :class:`matplotlib.image.AxesImage`\n The plotted image to use for the colorbar.\nfig : :class:`matplotlib.figure.Figure`, optional\n The figure to plot to.\nloc : str, optional\n The location to place the axes.\nsize : str, optional\n The size to allocate for the colorbar.\npad : str, optional\n The amount to pad the colorbar.", "id": "f12095:m0"} {"signature": "def pcolor_axes(array, px_to_units=px_to_units):", "body": "x_size = array.shape[]+y_size = array.shape[]+x = _np.empty((x_size, y_size))y = _np.empty((x_size, y_size))for i in range(x_size):for j in range(y_size):x[i, j], y[i, j] = px_to_units(i-, j-)return x, y", "docstring": "Return axes :code:`x, y` for *array* to be used with :func:`matplotlib.pyplot.color`.\n\n*px_to_units* is a function to convert pixels to units. By default, returns pixels.", "id": "f12096:m1"} {"signature": "def hist(x, bins=, labels=None, aspect=\"\", plot=True, ax=None, range=None):", "body": "h, edge = _np.histogram(x, bins=bins, range=range)mids = edge + (edge[]-edge[])/mids = mids[:-]if plot:if ax is None:_plt.hist(x, bins=bins, range=range)else:ax.hist(x, bins=bins, range=range)if labels is not None:_addlabel(labels[], labels[], labels[])return h, mids", "docstring": "Creates a histogram of data *x* with a *bins*, *labels* = :code:`[title, xlabel, ylabel]`.", "id": "f12098:m0"} {"signature": "def plot(*args, ax=None, **kwargs):", "body": "if ax is None:fig, ax = _setup_axes()pl = ax.plot(*args, **kwargs)if _np.shape(args)[] > :if type(args[]) is not str:min_x = min(args[])max_x = max(args[])ax.set_xlim((min_x, max_x))return pl", "docstring": "Plots but automatically resizes x axis.\n\n.. versionadded:: 1.4\n\nParameters\n----------\nargs\n Passed on to :meth:`matplotlib.axis.Axis.plot`.\nax : :class:`matplotlib.axis.Axis`, optional\n The axis to plot to.\nkwargs\n Passed on to :meth:`matplotlib.axis.Axis.plot`.", "id": "f12101:m0"} {"signature": "def set_cmap(self, cmap):", "body": "self.AxesImage.set_cmap(cmap)", "docstring": "Sets color map to *cmap*.", "id": "f12102:c0:m2"} {"signature": "@propertydef AxesImage(self):", "body": "return self._AxesImage", "docstring": "The :class:`matplotlib.image.AxesImage` from :meth:`matplotlib.axes.Axes.imshow`.", "id": "f12102:c0:m3"} {"signature": "@propertydef ax(self):", "body": "return self._ax_img", "docstring": "The :class:`matplotlib.axes.Axes` used for :meth:`matplotlib.axes.Axes.imshow`.", "id": "f12102:c0:m4"} {"signature": "@propertydef imgmax(self):", "body": "return _np.max(self.image)", "docstring": "Highest value of input image.", "id": "f12102:c0:m5"} {"signature": "@propertydef imgmin(self):", "body": "return _np.min(self.image)", "docstring": "Lowest value of input image.", "id": "f12102:c0:m6"} {"signature": "@propertydef clim_min(self):", "body": "return self.minslider.val", "docstring": "Slider value for minimum", "id": "f12102:c0:m8"} {"signature": "@propertydef clim_max(self):", "body": "return self.maxslider.val", "docstring": "Slider value for maximum", "id": "f12102:c0:m10"} {"signature": "@propertydef image(self):", "body": "return self._image", "docstring": "The image loaded.", "id": "f12102:c0:m12"} {"signature": "def tile():", "body": "figs = plt.get_fignums()x = y = toppad = size = np.array([, ])if ( len(figs) != ):fig = plt.figure(figs[])screen = fig.canvas.window.get_screen()screenx = screen.get_monitor_geometry(screen.get_primary_monitor())screenx = screenx[]fig = plt.figure(figs[])fig.canvas.manager.window.move(x, y)maxy = np.array(fig.canvas.manager.window.get_position())[]size = np.array(fig.canvas.manager.window.get_size())y = maxyx += size[]+for fig in figs[:]:fig = plt.figure(fig)size = np.array(fig.canvas.manager.window.get_size())if ( x+size[] > screenx ):x = y = maxymaxy = y+size[]+toppadelse:maxy = max(maxy, y+size[]+toppad)fig.canvas.manager.window.move(x, y)x += size[] + ", "docstring": "Tiles open figures.", "id": "f12103:m0"} {"signature": "def set_cmap(self, cmap):", "body": "self.AxesImage.set_cmap(cmap)", "docstring": "Sets color map to *cmap*.", "id": "f12104:c0:m5"} {"signature": "@propertydef num_imgs(self):", "body": "return self._num_imgs", "docstring": "The number of images.", "id": "f12104:c0:m6"} {"signature": "@propertydef AxesImage(self):", "body": "return self._AxesImage", "docstring": "The :class:`matplotlib.image.AxesImage` from :meth:`matplotlib.axes.Axes.imshow`.", "id": "f12104:c0:m7"} {"signature": "@propertydef ax(self):", "body": "return self._ax_img", "docstring": "The :class:`matplotlib.axes.Axes` used for :meth:`matplotlib.axes.Axes.imshow`.", "id": "f12104:c0:m8"} {"signature": "@propertydef imgmax(self):", "body": "if not hasattr(self, ''):imgmax = _np.max(self.images[])for img in self.images:imax = _np.max(img)if imax > imgmax:imgmax = imaxself._imgmax = imgmaxreturn self._imgmax", "docstring": "Highest value of input image.", "id": "f12104:c0:m9"} {"signature": "@propertydef imgmin(self):", "body": "if not hasattr(self, ''):imgmin = _np.min(self.images[])for img in self.images:imin = _np.min(img)if imin > imgmin:imgmin = iminself._imgmin = imgminreturn _np.min(self.image)", "docstring": "Lowest value of input image.", "id": "f12104:c0:m10"} {"signature": "@propertydef clim_min(self):", "body": "return self.minslider.val", "docstring": "Slider value for minimum", "id": "f12104:c0:m12"} {"signature": "@propertydef clim_max(self):", "body": "return self.maxslider.val", "docstring": "Slider value for maximum", "id": "f12104:c0:m14"} {"signature": "@propertydef images(self):", "body": "return self._images", "docstring": "The array of images.", "id": "f12104:c0:m16"} {"signature": "@propertydef image(self):", "body": "return self._images[self._image_ind]", "docstring": "The image loaded.", "id": "f12104:c0:m17"} {"signature": "def rgb2gray(image):", "body": "return _np.dot(image, [, , ])", "docstring": "Convert an rgb image to grayscale.", "id": "f12105:m0"} {"signature": "def NonUniformImage(x, y, z, ax=None, fig=None, cmap=None, alpha=None, scalex=True, scaley=True, add_cbar=True, **kwargs):", "body": "if ax is None and fig is None:fig, ax = _setup_axes()elif ax is None:ax = fig.gca()elif fig is None:fig = ax.get_figure()norm = kwargs.get('', None)im = _mplim.NonUniformImage(ax, **kwargs)vmin = kwargs.pop('', _np.min(z))vmax = kwargs.pop('', _np.max(z))if cmap is not None:im.set_cmap(cmap)m = _cm.ScalarMappable(cmap=im.get_cmap(), norm=norm)m.set_array(z)if add_cbar:cax, cb = _cb(ax=ax, im=m, fig=fig)if alpha is not None:im.set_alpha(alpha)im.set_data(x, y, z)ax.images.append(im)if scalex:xmin = min(x)xmax = max(x)ax.set_xlim(xmin, xmax)if scaley:ymin = min(y)ymax = max(y)ax.set_ylim(ymin, ymax)return _SI(im=im, cb=cb, cax=cax)", "docstring": "Used to plot a set of coordinates.\n\n\nParameters\n----------\nx, y : :class:`numpy.ndarray`\n 1-D ndarrays of lengths N and M, respectively, specifying pixel centers\nz : :class:`numpy.ndarray`\n An (M, N) ndarray or masked array of values to be colormapped, or a (M, N, 3) RGB array, or a (M, N, 4) RGBA array.\nax : :class:`matplotlib.axes.Axes`, optional\n The axis to plot to.\nfig : :class:`matplotlib.figure.Figure`, optional\n The figure to plot to.\ncmap : :class:`matplotlib.colors.Colormap`, optional\n The colormap to use.\nalpha : float, optional\n The transparency to use.\nscalex : bool, optional\n To set the x limits to available data\nscaley : bool, optional\n To set the y limits to available data\nadd_cbar : bool, optional\n Whether ot add a colorbar or not.\n\nReturns\n-------\nimg : :class:`matplotlib.image.NonUniformImage`\n Object representing the :class:`matplotlib.image.NonUniformImage`.", "id": "f12107:m0"} {"signature": "def showfig(fig, aspect=\"\"):", "body": "ax = fig.gca()alim = list(ax.axis())if alim[] < alim[]:temp = alim[]alim[] = alim[]alim[] = tempax.axis(alim)ax.set_aspect(aspect)fig.show()", "docstring": "Shows a figure with a typical orientation so that x and y axes are set up as expected.", "id": "f12109:m0"} {"signature": "def setup_figure(rows=, cols=, **kwargs):", "body": "fig = _plt.figure(**kwargs)gs = _gridspec.GridSpec(rows, cols)return fig, gs", "docstring": "Sets up a figure with a number of rows (*rows*) and columns (*cols*), *\\*\\*kwargs* passes through to :class:`matplotlib.figure.Figure`.\n\n.. versionchanged:: 1.3\n Supports *\\*\\*kwargs* pass-through to :class:`matplotlib.figure.Figure`.\n\n.. versionchanged:: 1.2\n Changed *gridspec_x* to *rows*, *gridspec_y* to *cols*, added *figsize* control.\n\nParameters\n----------\n\nrows : int\n Number of rows to create.\ncols : int\n Number of columns to create.\n\nReturns\n-------\n\nfig : :class:`matplotlib.figure.Figure`\n The figure.\ngs : :class:`matplotlib.gridspec.GridSpec`\n Instance with *gridspec_x* rows and *gridspec_y* columns", "id": "f12110:m0"} {"signature": "def imshow(X, ax=None, add_cbar=True, rescale_fig=True, **kwargs):", "body": "return _plot_array(X, plottype=_IMSHOW, ax=ax, add_cbar=add_cbar, rescale_fig=rescale_fig, **kwargs)", "docstring": "Plots an array *X* such that the first coordinate is the *x* coordinate and the second coordinate is the *y* coordinate, with the origin at the bottom left corner.\n\nOptional argument *ax* allows an existing axes to be used.\n\n*\\*\\*kwargs* are passed on to :meth:`matplotlib.axes.Axes.imshow`.\n\n.. versionadded:: 1.3\n\nReturns\n-------\nfig, ax, im :\n if axes aren't specified.\nim :\n if axes are specified.", "id": "f12111:m0"} {"signature": "def contour(X, ax=None, add_cbar=True, rescale_fig=True, **kwargs):", "body": "return _plot_array(X, plottype=_CONTOUR, ax=ax, add_cbar=add_cbar, rescale_fig=rescale_fig, **kwargs)", "docstring": "Plots an array *X* such that the first coordinate is the *x* coordinate and the second coordinate is the *y* coordinate, with the origin at the bottom left corner.\n\nOptional argument *ax* allows an existing axes to be used.\n\n*\\*\\*kwargs* are passed on to :meth:`matplotlib.axes.Axes.contour`.\n\n.. versionadded:: 1.3\n\nReturns\n-------\nim : :class:`matplotlib.image.AxesImage`.", "id": "f12111:m1"} {"signature": "def quiver(*args, ax=None, rescale_fig=True, **kwargs):", "body": "return _plot_array(*args, plottype=_QUIVER, ax=ax, add_cbar=False, rescale_fig=rescale_fig, **kwargs)", "docstring": "Plots an array *X* such that the first coordinate is the *x* coordinate and the second coordinate is the *y* coordinate, with the origin at the bottom left corner.\n\nOptional argument *ax* allows an existing axes to be used.\n\n*\\*\\*kwargs* are passed on to :meth:`matplotlib.axes.Axes.quiver`.\n\n.. versionadded:: 1.3\n\nReturns\n-------\nim : :class:`matplotlib.image.AxesImage`.", "id": "f12111:m2"} {"signature": "def scaled_figsize(X, figsize=None, h_pad=None, v_pad=None):", "body": "if figsize is None:figsize = _mpl.rcParams['']width, height = _np.shape(X)ratio = width / heightif ratio > figsize[]/figsize[]:figsize[] = figsize[] / ratioelse:figsize[] = figsize[] * ratioreturn figsize", "docstring": "Given an array *X*, determine a good size for the figure to be by shrinking it to fit within *figsize*. If not specified, shrinks to fit the figsize specified by the current :attr:`matplotlib.rcParams`.\n\n.. versionadded:: 1.3", "id": "f12111:m4"} {"signature": "def imshow_batch(images, cbar=True, show=True, pdf=None, figsize=(, ), rows=, columns=, cmap=None, **kwargs):", "body": "images = _np.array(images)gs = _gridspec.GridSpec(rows, columns)num_imgs = images.shape[]max_ind = num_imgs-per_page = rows*columnsnum_pages = _np.int(_np.ceil(num_imgs/per_page))fig_array = _np.empty(shape=num_pages, dtype=object)if num_pages > :logger.info('')if pdf is not None:f = _PdfPages(pdf)for p in range(num_pages):fig_array[p] = _plt.figure(figsize=figsize)pg_max_ind = _np.min( [(p+) * per_page - , max_ind] )num_rows = _np.int(_np.ceil((pg_max_ind+ - p * per_page) / columns))for i in range(num_rows):i_min_ind = p * per_page + i * columnscol_max_ind = _np.min([i_min_ind + columns - , max_ind])for j, image in enumerate(images[i_min_ind:col_max_ind+]):ax = fig_array[p].add_subplot(gs[i, j])try:if _np.issubdtype(image.dtype, _np.integer):image = _np.array(image, dtype=float)except:passplot = ax.imshow(image, **kwargs)if cmap is not None:plot.set_cmap(cmap)if cbar:fig_array[p].colorbar(plot)fig_array[p].tight_layout()if pdf is not None:f.savefig(fig_array[p])if not show:_plt.close(fig_array[p])if pdf is not None:f.close()return fig_array", "docstring": "Plots an array of *images* to a single window of size *figsize* with *rows* and *columns*.\n\n* *cmap*: Specifies color map\n* *cbar*: Add color bars\n* *show*: If false, dismisses each window after it is created and optionally saved\n* *pdf*: Save to a pdf of filename *pdf*\n* *\\*\\*kwargs* passed to :class:`matplotlib.axis.imshow`", "id": "f12112:m0"} {"signature": "def plot_featured(*args, **kwargs):", "body": "toplabel = kwargs.pop('', None)xlabel = kwargs.pop('', None)ylabel = kwargs.pop('', None)legend = kwargs.pop('', None)error = kwargs.pop('', None)figlabel = kwargs.pop('', None)fig = kwargs.pop('', None)if figlabel is not None:fig = _figure(figlabel)elif fig is None:try:fig = _plt.gcf()except:fig = _plt.fig()if error is None:_plt.plot(*args, **kwargs)else:_plt.errorbar(*args, **kwargs)_addlabel(toplabel, xlabel, ylabel, fig=fig)if legend is not None:_plt.legend(legend)return fig", "docstring": "Wrapper for matplotlib.pyplot.plot() / errorbar().\n\nTakes options:\n\n* 'error': if true, use :func:`matplotlib.pyplot.errorbar` instead of :func:`matplotlib.pyplot.plot`. *\\*args* and *\\*\\*kwargs* passed through here.\n* 'fig': figure to use.\n* 'figlabel': figure label.\n* 'legend': legend location.\n* 'toplabel': top label of plot.\n* 'xlabel': x-label of plot.\n* 'ylabel': y-label of plot.", "id": "f12113:m0"} {"signature": "def figure(title=None, **kwargs):", "body": "fig = _figure(**kwargs)if title is not None:fig.canvas.set_window_title(title)return fig", "docstring": "Creates a figure with *\\*\\*kwargs* with a window title *title*.\n\nReturns class :class:`matplotlib.figure.Figure`.", "id": "f12114:m0"} {"signature": "def savefig(filename, path=\"\", fig=None, ext='', verbose=False, **kwargs):", "body": "filename = os.path.join(path, filename)final_filename = ''.format(filename, ext).replace(\"\", \"\").replace(\"\", \"\")final_filename = os.path.abspath(final_filename)final_path = os.path.dirname(final_filename)if not os.path.exists(final_path):os.makedirs(final_path)if verbose:print(''.format(final_filename))if fig is not None:fig.savefig(final_filename, bbox_inches='', **kwargs)else:plt.savefig(final_filename, bbox_inches='', **kwargs)", "docstring": "Save the figure *fig* (optional, if not specified, latest figure in focus) to *filename* in the path *path* with extension *ext*.\n\n*\\*\\*kwargs* is passed to :meth:`matplotlib.figure.Figure.savefig`.", "id": "f12115:m0"} {"signature": "def less_labels(ax, x_fraction=, y_fraction=):", "body": "nbins = _np.size(ax.get_xticklabels())ax.locator_params(nbins=_np.floor(nbins*x_fraction), axis='')nbins = _np.size(ax.get_yticklabels())ax.locator_params(nbins=_np.floor(nbins*y_fraction), axis='')", "docstring": "Scale the number of tick labels in x and y by *x_fraction* and *y_fraction* respectively.", "id": "f12116:m0"} {"signature": "@propertydef parent(self):", "body": "return self._parent", "docstring": "The parent object.", "id": "f12117:c0:m4"} {"signature": "@propertydef verbose(self):", "body": "return self._verbose", "docstring": "Determines whether rectangle coordinates are printed to the terminal on selection.", "id": "f12117:c0:m5"} {"signature": "@propertydef RectangleSelector(self):", "body": "return self._RectangleSelector", "docstring": "The instance of :class:`matplotlib.widgets.RectangleSelector`.", "id": "f12117:c0:m7"} {"signature": "@propertydef ax(self):", "body": "return self._ax", "docstring": "The axis used.", "id": "f12117:c0:m8"} {"signature": "@propertydef selfunc(self):", "body": "return self._selfunc", "docstring": "A placeholder for the function called on each mouse release.", "id": "f12117:c0:m9"} {"signature": "@propertydef selfunc_results(self):", "body": "if self.selfunc is not None:return self.selfunc(self)", "docstring": "The results of :func:`selfunc(instance) ` where *instance* is this class.", "id": "f12117:c0:m10"} {"signature": "@propertydef eclick(self):", "body": "if self._eclick is None:raise IOError('')else:return self._eclick", "docstring": "The starting mouse click from :class:`RectangleSelector `.", "id": "f12117:c0:m11"} {"signature": "@propertydef erelease(self):", "body": "if self._erelease is None:raise IOError('')else:return self._erelease", "docstring": "The ending mouse click from :class:`RectangleSelector `.", "id": "f12117:c0:m12"} {"signature": "@propertydef x0(self):", "body": "return self._x0", "docstring": "Minimum x coordinate of rectangle.", "id": "f12117:c0:m13"} {"signature": "@propertydef x1(self):", "body": "return self._x1", "docstring": "Maximum x coordinate of rectangle.", "id": "f12117:c0:m15"} {"signature": "@propertydef y0(self):", "body": "return self._y0", "docstring": "Minimum y coordinate of rectangle.", "id": "f12117:c0:m17"} {"signature": "@propertydef y1(self):", "body": "return self._y1", "docstring": "Maximum y coordinate of rectangle.", "id": "f12117:c0:m19"} {"signature": "@propertydef xslice(self):", "body": "return [self.x0, self.x1]", "docstring": "A list `[x0, x1]`.", "id": "f12117:c0:m21"} {"signature": "@propertydef yslice(self):", "body": "return [self.y0, self.y1]", "docstring": "A list `[y0, y1]`.", "id": "f12117:c0:m23"} {"signature": "@propertydef width(self):", "body": "return self.x1-self.x0", "docstring": "Width of rectangle.", "id": "f12117:c0:m25"} {"signature": "@propertydef height(self):", "body": "return self.y1-self.y0", "docstring": "Height of rectangle.", "id": "f12117:c0:m26"} {"signature": "def hist2d(x, y, bins=, labels=None, aspect=\"\", plot=True, fig=None, ax=None, interpolation='', cbar=True, **kwargs):", "body": "h_range = kwargs.pop('', None)h_normed = kwargs.pop('', None)h_weights = kwargs.pop('', None)h, xe, ye = _np.histogram2d(x, y, bins=bins, range=h_range, normed=h_normed, weights=h_weights)extent = [xe[], xe[-], ye[], ye[-]]if plot:if ax is None:if fig is None:fig = _figure('')ax = fig.gca()ax.clear()img = ax.imshow(h.transpose(), extent=extent, interpolation=interpolation, aspect=aspect, **kwargs)if cbar:_colorbar(ax=ax, im=img)if labels is not None:_addlabel(labels[], labels[], labels[])return h, extent", "docstring": "Creates a 2-D histogram of data *x*, *y* with *bins*, *labels* = :code:`[title, xlabel, ylabel]`, aspect ration *aspect*. Attempts to use axis *ax* first, then the current axis of *fig*, then the last axis, to use an already-created window.\n\nPlotting (*plot*) is on by default, setting false doesn't attempt to create a figure.\n\n*interpolation* sets the interpolation type of :meth:`matplotlib.axis.imshow`.\n\nReturns a handle and extent as :code:`h, extent`", "id": "f12118:m0"} {"signature": "def axesfontsize(ax, fontsize):", "body": "items = ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels())for item in items:item.set_fontsize(fontsize)", "docstring": "Change the font size for the title, x and y labels, and x and y tick labels for axis *ax* to *fontsize*.", "id": "f12119:m0"} {"signature": "def setup_axes(rows=, cols=, figsize=(, ), expand=True, tight_layout=None, **kwargs):", "body": "if expand:figsize = (figsize[]*cols, figsize[]*rows)figargs = {}if isinstance(tight_layout, dict):figargs[\"\"] = tight_layoutelif tight_layout == \"\":figargs[\"\"] = {\"\": (, , , )}dpi = kwargs.pop('', None)fig, gs = _setup_figure(rows=rows, cols=cols, figsize=figsize, dpi=dpi, **figargs)axes = _np.empty(shape=(rows, cols), dtype=object)for i in range(rows):for j in range(cols):axes[i, j] = fig.add_subplot(gs[i, j], **kwargs)if axes.shape == (, ):return fig, axes[, ]else:return fig, axes", "docstring": "Sets up a figure of size *figsize* with a number of rows (*rows*) and columns (*cols*). \\*\\*kwargs passed through to :meth:`matplotlib.figure.Figure.add_subplot`.\n\n.. versionadded:: 1.2\n\nParameters\n----------\n\nrows : int\n Number of rows to create.\ncols : int\n Number of columns to create.\nfigsize : tuple\n Size of figure to create.\nexpand : bool\n Make the entire figure with size `figsize`.\n\nReturns\n-------\n\nfig : :class:`matplotlib.figure.Figure`\n The figure.\naxes : :class:`numpy.ndarray`\n An array of all of the axes. (Unless there's only one axis, in which case it returns an object instance :class:`matplotlib.axis.Axis`.)", "id": "f12120:m0"} {"signature": "def NonUniformImage_axes(img):", "body": "xmin = xmax = img.shape[]-ymin = ymax = img.shape[]-x = _np.linspace(xmin, xmax, img.shape[])y = _np.linspace(ymin, ymax, img.shape[])return x, y", "docstring": "Returns axes *x, y* for a given image *img* to be used with :func:`scisalt.matplotlib.NonUniformImage`.\n\nReturns\n-------\nx, y : float, float", "id": "f12121:m0"} {"signature": "def addlabel(ax=None, toplabel=None, xlabel=None, ylabel=None, zlabel=None, clabel=None, cb=None, windowlabel=None, fig=None, axes=None):", "body": "if (axes is None) and (ax is not None):axes = axif (windowlabel is not None) and (fig is not None):fig.canvas.set_window_title(windowlabel)if fig is None:fig = _plt.gcf()if fig is not None and axes is None:axes = fig.get_axes()if axes == []:logger.error('')if axes is not None:if toplabel is not None:axes.set_title(toplabel)if xlabel is not None:axes.set_xlabel(xlabel)if ylabel is not None:axes.set_ylabel(ylabel)if zlabel is not None:axes.set_zlabel(zlabel)if (clabel is not None) or (cb is not None):if (clabel is not None) and (cb is not None):cb.set_label(clabel)else:if clabel is None:logger.error('')else:logger.error('')", "docstring": "Adds labels to a plot.", "id": "f12122:m0"} {"signature": "@propertydef x0(self):", "body": "return self._sorted_x[]", "docstring": "The smaller x coordinate.", "id": "f12127:c0:m3"} {"signature": "@propertydef x1(self):", "body": "return self._sorted_x[]", "docstring": "The larger x coordinate.", "id": "f12127:c0:m4"} {"signature": "@propertydef y0(self):", "body": "return self._sorted_y[]", "docstring": "The smaller x coordinate.", "id": "f12127:c0:m7"} {"signature": "@propertydef y1(self):", "body": "return self._sorted_y[]", "docstring": "The larger x coordinate.", "id": "f12127:c0:m8"} {"signature": "def print2elog(author='', title='', text='', link=None, file=None, now=None):", "body": "if now is None:now = _dt.datetime.now()fulltime = now.strftime('')if not ((link is None) ^ (file is None)):link_copied = _copy_file(link, fulltime)file_copied = _copy_file(file, fulltime)else:raise ValueError('')loader = _jj.PackageLoader('', '')env = _jj.Environment(loader=loader, trim_blocks=True)template = env.get_template('')stream = template.stream(author=author, title=title, text=text, link=link_copied, file=file_copied, now=now)with _tempfile.TemporaryDirectory() as dirname:filename = ''.format(fulltime)filepath = _os.path.join(dirname, filename)with open(filepath, '') as fid:stream.dump(fid)finalpath = _os.path.join(basedir, filename)_shutil.copyfile(filepath, finalpath)", "docstring": "Prints to the elog.\n\nParameters\n----------\n\nauthor : str, optional\n Author of the elog.\ntitle : str, optional\n Title of the elog.\nlink : str, optional\n Path to a thumbnail.\nfile : str, optional\n Path to a file.\nnow : :class:`datetime.datetime`\n Time of the elog.", "id": "f12128:m1"} {"signature": "def resourcePath(self, relative_path):", "body": "from os import pathimport systry:base_path = sys._MEIPASSexcept Exception:base_path = path.dirname(path.abspath(__file__))return path.join(base_path, relative_path)", "docstring": "Get absolute path to resource, works for dev and for PyInstaller", "id": "f12131:c0:m1"} {"signature": "def addLogbook(self, physDef= \"\", mccDef=\"\", initialInstance=False):", "body": "if self.logMenuCount < :self.logMenus.append(LogSelectMenu(self.logui.multiLogLayout, initialInstance))self.logMenus[-].addLogbooks(self.logTypeList[], self.physics_programs, physDef)self.logMenus[-].addLogbooks(self.logTypeList[], self.mcc_programs, mccDef)self.logMenus[-].show()self.logMenuCount += if initialInstance:QObject.connect(self.logMenus[-].logButton, SIGNAL(\"\"), self.addLogbook)else:from functools import partialQObject.connect(self.logMenus[-].logButton, SIGNAL(\"\"), partial(self.removeLogbook, self.logMenus[-]))", "docstring": "Add new block of logbook selection windows. Only 5 allowed.", "id": "f12131:c0:m6"} {"signature": "def removeLogbook(self, menu=None):", "body": "if self.logMenuCount > and menu is not None:menu.removeMenu()self.logMenus.remove(menu)self.logMenuCount -= ", "docstring": "Remove logbook menu set.", "id": "f12131:c0:m7"} {"signature": "def selectedLogs(self):", "body": "mcclogs = []physlogs = []for i in range(len(self.logMenus)):logType = self.logMenus[i].selectedType()log = self.logMenus[i].selectedProgram()if logType == \"\":if log not in mcclogs:mcclogs.append(log)elif logType == \"\":if log not in physlogs:physlogs.append(log)return mcclogs, physlogs", "docstring": "Return selected log books by type.", "id": "f12131:c0:m8"} {"signature": "def acceptedUser(self, logType):", "body": "from urllib2 import urlopen, URLError, HTTPErrorimport jsonisApproved = FalseuserName = str(self.logui.userName.text())if userName == \"\":return False if logType == \"\":networkFault = Falsedata = []log_url = \"\" + userNametry:data = urlopen(log_url, None, ).read()data = json.loads(data)except URLError as error:print(\"\" + str(error.reason))networkFault = Trueexcept HTTPError as error:print(\"\" + str(error.reason))networkFault = Trueif networkFault:msgBox = QMessageBox()msgBox.setText(\"\")msgBox.setInformativeText(\"\")msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)msgBox.setDefaultButton(QMessageBox.Ok)if msgBox.exec_() == QMessageBox.Ok:isApproved = Trueif data != [] and (data is not None):isApproved = Trueelse:isApproved = Truereturn isApproved", "docstring": "Verify enetered user name is on accepted MCC logbook list.", "id": "f12131:c0:m9"} {"signature": "def xmlSetup(self, logType, logList):", "body": "from xml.etree.ElementTree import Element, SubElement, ElementTreefrom datetime import datetimecurr_time = datetime.now()if logType == \"\":log_entry = Element('')title = SubElement(log_entry, '')program = SubElement(log_entry, '')timestamp = SubElement(log_entry, '')priority = SubElement(log_entry, '')os_user = SubElement(log_entry, '')hostname = SubElement(log_entry, '')text = SubElement(log_entry, '')log_user = SubElement(log_entry, '')logbook = []for i in range(len(logList)):logbook.append(SubElement(log_entry, ''))logbook[i].text = logList[i].lower()log_entry.attrib[''] = \"\"program.text = \"\"priority.text = \"\"os_user.text = \"\"hostname.text = \"\"text.attrib[''] = \"\"if not self.imagePixmap.isNull():attachment = SubElement(log_entry, '')attachment.attrib[''] = \"\"attachment.attrib[''] = \"\" + self.imageTypeattachment.text = curr_time.strftime(\"\") + str(curr_time.microsecond) + \"\" + self.imageTypetimestamp.text = curr_time.strftime(\"\")fileName = \"\" + curr_time.strftime(\"\") + str(curr_time.microsecond) + \"\"else: timeString = curr_time.strftime(\"\")log_entry = Element(None)severity = SubElement(log_entry, '')location = SubElement(log_entry, '')keywords = SubElement(log_entry, '')time = SubElement(log_entry, '')isodate = SubElement(log_entry, '')log_user = SubElement(log_entry, '')category = SubElement(log_entry, '')title = SubElement(log_entry, '')metainfo = SubElement(log_entry, '')if not self.imagePixmap.isNull():imageFile = SubElement(log_entry, '')imageFile.text = timeString + \"\" + self.imageTypethumbnail = SubElement(log_entry, '')thumbnail.text = timeString + \"\"text = SubElement(log_entry, '') log_entry.attrib[''] = \"\"category.text = \"\"location.text = \"\"severity.text = \"\"keywords.text = \"\"time.text = curr_time.strftime(\"\")isodate.text = curr_time.strftime(\"\")metainfo.text = timeString + \"\"fileName = \"\" + metainfo.textlog_user.text = str(self.logui.userName.text())title.text = str(self.logui.titleEntry.text())if title.text == \"\":QMessageBox().warning(self, \"\", \"\")return Nonetext.text = str(self.logui.textEntry.toPlainText())if text.text == \"\":text.text = \"\"xmlFile = open(fileName, \"\")if logType == \"\":ElementTree(log_entry).write(xmlFile)else:xmlString = self.prettify(log_entry)xmlFile.write(xmlString)xmlFile.write(\"\") xmlFile.close()return fileName.rstrip(\"\")", "docstring": "Create xml file with fields from logbook form.", "id": "f12131:c0:m10"} {"signature": "def prettify(self, elem):", "body": "from xml.etree import ElementTreefrom re import subrawString = ElementTree.tostring(elem, '')parsedString = sub(r'', '', rawString) return parsedString[:]", "docstring": "Parse xml elements for pretty printing", "id": "f12131:c0:m11"} {"signature": "def prepareImages(self, fileName, logType):", "body": "import subprocessif self.imageType == \"\":self.imagePixmap.save(fileName + \"\", \"\", -)if logType == \"\":makePostScript = \"\" + fileName + \"\" + fileName + \"\"process = subprocess.Popen(makePostScript, shell=True)process.wait()thumbnailPixmap = self.imagePixmap.scaled(, , Qt.KeepAspectRatio)thumbnailPixmap.save(fileName + \"\", \"\", -)else:renameImage = \"\" + self.image + \"\" + fileName + \"\"process = subprocess.Popen(renameImage, shell=True)process.wait()if logType == \"\":thumbnailPixmap = self.imagePixmap.scaled(, , Qt.KeepAspectRatio)thumbnailPixmap.save(fileName + \"\", \"\", -)", "docstring": "Convert supplied QPixmap object to image file.", "id": "f12131:c0:m12"} {"signature": "def submitEntry(self):", "body": "mcclogs, physlogs = self.selectedLogs()success = Trueif mcclogs != []:if not self.acceptedUser(\"\"):QMessageBox().warning(self, \"\", \"\")returnfileName = self.xmlSetup(\"\", mcclogs)if fileName is None:returnif not self.imagePixmap.isNull():self.prepareImages(fileName, \"\")success = self.sendToLogbook(fileName, \"\")if physlogs != []:for i in range(len(physlogs)):fileName = self.xmlSetup(\"\", physlogs[i])if fileName is None:returnif not self.imagePixmap.isNull():self.prepareImages(fileName, \"\")success_phys = self.sendToLogbook(fileName, \"\", physlogs[i])success = success and success_physself.done(success)", "docstring": "Process user inputs and subit logbook entry when user clicks Submit button", "id": "f12131:c0:m13"} {"signature": "def sendToLogbook(self, fileName, logType, location=None):", "body": "import subprocesssuccess = Trueif logType == \"\":fileString = \"\"if not self.imagePixmap.isNull():fileString = fileName + \"\" + self.imageTypelogcmd = \"\" + fileName + \"\" + fileStringprocess = subprocess.Popen(logcmd, shell=True)process.wait()if process.returncode != :success = Falseelse:from shutil import copypath = \"\" + location.lower() + \"\" try:if not self.imagePixmap.isNull():copy(fileName + \"\", path)if self.imageType == \"\":copy(fileName + \"\", path)else:copy(fileName + \"\" + self.imageType, path)copy(fileName + \"\", path)except IOError as error:print(error)success = Falsereturn success", "docstring": "Process log information and push to selected logbooks.", "id": "f12131:c0:m14"} {"signature": "def clearForm(self):", "body": "self.logui.titleEntry.clear()self.logui.textEntry.clear()while self.logMenuCount > :self.removeLogbook(self.logMenus[-])", "docstring": "Clear all form fields (except author).", "id": "f12131:c0:m15"} {"signature": "def setupUI(self):", "body": "labelSizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)labelSizePolicy.setHorizontalStretch()labelSizePolicy.setVerticalStretch()menuSizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)menuSizePolicy.setHorizontalStretch()menuSizePolicy.setVerticalStretch()logTypeLayout = QHBoxLayout()logTypeLayout.setSpacing()typeLabel = QLabel(\"\")typeLabel.setMinimumSize(QSize(, ))typeLabel.setMaximumSize(QSize(, ))typeLabel.setSizePolicy(labelSizePolicy)logTypeLayout.addWidget(typeLabel)self.logType = QComboBox(self)self.logType.setMinimumSize(QSize(, ))self.logType.setMaximumSize(QSize(, ))menuSizePolicy.setHeightForWidth(self.logType.sizePolicy().hasHeightForWidth())self.logType.setSizePolicy(menuSizePolicy)logTypeLayout.addWidget(self.logType)logTypeLayout.setStretch(, )programLayout = QHBoxLayout()programLayout.setSpacing()programLabel = QLabel(\"\")programLabel.setMinimumSize(QSize(, ))programLabel.setMaximumSize(QSize(, ))programLabel.setSizePolicy(labelSizePolicy)programLayout.addWidget(programLabel)self.programName = QComboBox(self)self.programName.setMinimumSize(QSize(, ))self.programName.setMaximumSize(QSize(, ))menuSizePolicy.setHeightForWidth(self.programName.sizePolicy().hasHeightForWidth())self.programName.setSizePolicy(menuSizePolicy)programLayout.addWidget(self.programName)programLayout.setStretch(, )if self.initialInstance:self.logButton = QPushButton(\"\", self)self.logButton.setToolTip(\"\")else:self.logButton = QPushButton(\"\")self.logButton.setToolTip(\"\")self.logButton.setMinimumSize(QSize(, )) self.logButton.setMaximumSize(QSize(, )) self.logButton.setObjectName(\"\")self.logButton.setStyleSheet(\"\")self._logSelectLayout = QHBoxLayout()self._logSelectLayout.setSpacing()self._logSelectLayout.addLayout(logTypeLayout)self._logSelectLayout.addLayout(programLayout)self._logSelectLayout.addWidget(self.logButton)self._logSelectLayout.setStretch(, )self._logSelectLayout.setStretch(, )", "docstring": "Create graphical objects for menus.", "id": "f12131:c1:m1"} {"signature": "def _connectSlots(self):", "body": "QObject.connect(self.logType, SIGNAL(\"\"), self.changeLogType)", "docstring": "Connect menu change signals.", "id": "f12131:c1:m2"} {"signature": "def show(self):", "body": "self.parent.addLayout(self._logSelectLayout)self.menuCount += self._connectSlots()", "docstring": "Display menus and connect even signals.", "id": "f12131:c1:m3"} {"signature": "def addLogbooks(self, type=None, logs=[], default=\"\"):", "body": "if type is not None and len(logs) != :if type in self.logList:for logbook in logs:if logbook not in self.logList.get(type)[]:self.logList.get(type)[].append(logbook)else:self.logList[type] = []self.logList[type].append(logs)if len(self.logList[type]) > and default != \"\":self.logList.get(type)[] == defaultelse:self.logList.get(type).append(default)self.logType.clear()self.logType.addItems(list(self.logList.keys()))self.changeLogType()", "docstring": "Add or change list of logbooks.", "id": "f12131:c1:m6"} {"signature": "def removeLogbooks(self, type=None, logs=[]):", "body": "if type is not None and type in self.logList:if len(logs) == or logs == \"\":del self.logList[type]else:for logbook in logs:if logbook in self.logList[type]:self.logList[type].remove(logbook)self.changeLogType()", "docstring": "Remove unwanted logbooks from list.", "id": "f12131:c1:m7"} {"signature": "def changeLogType(self):", "body": "logType = self.selectedType()programs = self.logList.get(logType)[]default = self.logList.get(logType)[]if logType in self.logList:self.programName.clear()self.programName.addItems(programs)self.programName.setCurrentIndex(programs.index(default))", "docstring": "Populate log program list to correspond with log type selection.", "id": "f12131:c1:m8"} {"signature": "def addMenu(self):", "body": "self.parent.multiLogLayout.addLayout(self.logSelectLayout)self.getPrograms(logType, programName)", "docstring": "Add menus to parent gui.", "id": "f12131:c1:m9"} {"signature": "def removeLayout(self, layout):", "body": "for cnt in reversed(range(layout.count())):item = layout.takeAt(cnt)widget = item.widget()if widget is not None:widget.deleteLater()else:''''''self.removeLayout(item.layout())", "docstring": "Iteratively remove graphical objects from layout.", "id": "f12131:c1:m10"} {"signature": "def removeMenu(self):", "body": "self.removeLayout(self._logSelectLayout)self.deleteLater()", "docstring": "Remove layout from parent gui and destroy object on completion.", "id": "f12131:c1:m11"} {"signature": "def linkcode_resolve(domain, info):", "body": "if domain != '':return Nonemodname = info['']fullname = info['']submod = sys.modules.get(modname)if submod is None:return Noneobj = submodfor part in fullname.split(''):try:obj = getattr(obj, part)except:return Nonetry:fn = inspect.getsourcefile(obj)except:fn = Noneif not fn:return Nonetry:source, lineno = inspect.getsourcelines(obj)except:lineno = Noneif lineno:linespec = \"\" % (lineno, lineno + len(source) - )else:linespec = \"\"fn = relpath(fn, start=dirname(scisalt.__file__))if '' in scisalt.__version__:return \"\" % (fn, linespec)else:return \"\" % (scisalt.__version__, fn, linespec)", "docstring": "Determine the URL corresponding to Python object", "id": "f12137:m0"} {"signature": "def t_CONNECTION_KWD(t):", "body": "return t", "docstring": "r'connection", "id": "f12143:m0"} {"signature": "def t_CLIENT_KWD(t):", "body": "return t", "docstring": "r'client", "id": "f12143:m1"} {"signature": "def t_SERVER_KWD(t):", "body": "return t", "docstring": "r'server", "id": "f12143:m2"} {"signature": "def t_START_KWD(t):", "body": "return t", "docstring": "r'start", "id": "f12143:m3"} {"signature": "def t_END_KWD(t):", "body": "return t", "docstring": "r'end", "id": "f12143:m4"} {"signature": "def t_ACTION_KWD(t):", "body": "return t", "docstring": "r'action", "id": "f12143:m5"} {"signature": "def t_NULL_KWD(t):", "body": "return t", "docstring": "r'NULL", "id": "f12143:m6"} {"signature": "def t_TRANSPORT_KWD(t):", "body": "return t", "docstring": "r'(tcp|udp)", "id": "f12143:m7"} {"signature": "def t_REGEX_MATCH_INCOMING_KWD(t):", "body": "return t", "docstring": "r\"regex_match_incoming", "id": "f12143:m8"} {"signature": "def t_IF_KWD(t):", "body": "return t", "docstring": "r\"if", "id": "f12143:m9"} {"signature": "def t_STRING(t):", "body": "return t", "docstring": "r'(\"[^\"]*\")|(\\'[^\\']*\\')", "id": "f12143:m10"} {"signature": "def t_FLOAT(t):", "body": "t.value = float(t.value)return t", "docstring": "r'([-]?(\\d+)(\\.\\d+)(e(\\+|-)?(\\d+))? | (\\d+)e(\\+|-)?(\\d+))([lL]|[fF])?", "id": "f12143:m11"} {"signature": "def t_INTEGER(t):", "body": "t.value = int(t.value)return t", "docstring": "r'[-]?\\d+([uU]|[lL]|[uU][lL]|[lL][uU])?", "id": "f12143:m12"} {"signature": "def t_KEY(t):", "body": "return t", "docstring": "r'[a-zA-Z_][a-zA-Z0-9_#\\?]*", "id": "f12143:m13"} {"signature": "def t_carriagereturn(t):", "body": "t.lexer.lineno += len(t.value)", "docstring": "r'\\r+", "id": "f12143:m14"} {"signature": "def p_start(p):", "body": "p[] = p[] + [p[]]", "docstring": "start : model action_blocks", "id": "f12143:m16"} {"signature": "def p_model(p):", "body": "p[] = p[] + [p[]]", "docstring": "model : connection_banner transition_list", "id": "f12143:m17"} {"signature": "def p_connection_banner(p):", "body": "p[] = [p[], p[]]", "docstring": "connection_banner : CONNECTION_KWD LPAREN TRANSPORT_KWD COMMA port RPAREN COLON", "id": "f12143:m18"} {"signature": "def p_port(p):", "body": "p[] = p[]", "docstring": "port : KEY\nport : p_integer_arg", "id": "f12143:m19"} {"signature": "def p_transition_list(p):", "body": "p[] = p[] + [p[]]", "docstring": "transition_list : transition_list transition", "id": "f12143:m20"} {"signature": "def p_transitions(p):", "body": "p[] = [p[]]", "docstring": "transition_list : transition", "id": "f12143:m21"} {"signature": "def p_transition(p):", "body": "p[] = None if p[] == '' else p[]if p[] == '':p[] = MarionetteTransition(p[], p[], p[], , True)else:p[] = MarionetteTransition(p[], p[], p[], p[], False)", "docstring": "transition : START_KWD KEY NULL_KWD FLOAT\ntransition : KEY KEY NULL_KWD FLOAT\ntransition : KEY END_KWD NULL_KWD FLOAT\ntransition : START_KWD KEY KEY FLOAT\ntransition : KEY KEY KEY FLOAT\ntransition : KEY END_KWD KEY FLOAT\ntransition : START_KWD KEY NULL_KWD INTEGER\ntransition : KEY KEY NULL_KWD INTEGER\ntransition : KEY END_KWD NULL_KWD INTEGER\ntransition : START_KWD KEY KEY INTEGER\ntransition : KEY KEY KEY INTEGER\ntransition : KEY END_KWD KEY INTEGER\ntransition : START_KWD KEY NULL_KWD KEY\ntransition : KEY KEY NULL_KWD KEY\ntransition : KEY END_KWD NULL_KWD KEY\ntransition : START_KWD KEY KEY KEY\ntransition : KEY KEY KEY KEY\ntransition : KEY END_KWD KEY KEY", "id": "f12143:m22"} {"signature": "def p_action_blocks(p):", "body": "if isinstance(p[], list):if isinstance(p[][], list):p[] = p[][] + [p[]]else:p[] = p[] + p[]else:p[] = [p[], p[]]", "docstring": "action_blocks : action_blocks action_block", "id": "f12143:m23"} {"signature": "def p_action_blocks2(p):", "body": "p[] = p[]", "docstring": "action_blocks : action_block", "id": "f12143:m24"} {"signature": "def p_action_block(p):", "body": "p[] = []for i in range(len(p[])):p[] += [marionette_tg.action.MarionetteAction(p[], p[][i][],p[][i][],p[][i][],p[][i][],p[][i][])]", "docstring": "action_block : ACTION_KWD KEY COLON actions", "id": "f12143:m25"} {"signature": "def p_actions(p):", "body": "p[] = p[] + [p[]]", "docstring": "actions : actions action", "id": "f12143:m26"} {"signature": "def p_actions2(p):", "body": "p[] = [p[]]", "docstring": "actions : action", "id": "f12143:m27"} {"signature": "def p_action(p):", "body": "if len(p)==:p[] = [p[], p[], p[], p[], None]elif len(p)==:p[] = [p[], p[], p[], p[], p[]]", "docstring": "action : CLIENT_KWD KEY DOT KEY LPAREN args RPAREN\naction : SERVER_KWD KEY DOT KEY LPAREN args RPAREN\naction : CLIENT_KWD KEY DOT KEY LPAREN args RPAREN IF_KWD REGEX_MATCH_INCOMING_KWD LPAREN p_string_arg RPAREN\naction : SERVER_KWD KEY DOT KEY LPAREN args RPAREN IF_KWD REGEX_MATCH_INCOMING_KWD LPAREN p_string_arg RPAREN", "id": "f12143:m28"} {"signature": "def p_args(p):", "body": "if len(p) == :p[] = p[] + [p[]]else:p[] = [p[]]", "docstring": "args : args COMMA arg\nargs : arg", "id": "f12143:m29"} {"signature": "def p_arg(p):", "body": "p[] = p[]", "docstring": "arg : p_string_arg\narg : p_integer_arg\narg : p_float_arg", "id": "f12143:m30"} {"signature": "def p_string_arg(p):", "body": "p[] = str(p[][:-])p[] = p[].decode(\"\")", "docstring": "p_string_arg : STRING", "id": "f12143:m31"} {"signature": "def p_integer_arg(p):", "body": "p[] = int(p[])", "docstring": "p_integer_arg : INTEGER", "id": "f12143:m32"} {"signature": "def p_float_arg(p):", "body": "p[] = float(p[])", "docstring": "p_float_arg : FLOAT", "id": "f12143:m33"} {"signature": "def long_to_bytes(N, blocksize=):", "body": "bytestring = hex(N)bytestring = bytestring[:] if bytestring.startswith('') else bytestringbytestring = bytestring[:-] if bytestring.endswith('') else bytestringbytestring = '' + bytestring if (len(bytestring) % ) != else bytestringbytestring = binascii.unhexlify(bytestring)if blocksize > and len(bytestring) % blocksize != :bytestring = '' *(blocksize - (len(bytestring) % blocksize)) + bytestringreturn bytestring", "docstring": "Given an input integer ``N``, ``long_to_bytes`` returns the representation of ``N`` in bytes.\n If ``blocksize`` is greater than ``1`` then the output string will be right justified and then padded with zero-bytes,\n such that the return values length is a multiple of ``blocksize``.", "id": "f12146:m1"} {"signature": "def clear_sent_messages(self, offset=None):", "body": "if offset is None:offset = getattr(settings, '', defaults.MAILQUEUE_CLEAR_OFFSET)if type(offset) is int:offset = datetime.timedelta(hours=offset)delete_before = timezone.now() - offsetself.filter(sent=True, last_attempt__lte=delete_before).delete()", "docstring": "Deletes sent MailerMessage records", "id": "f12195:c0:m1"} {"signature": "def add_attachment(self, attachment):", "body": "if self.pk is None:self._save_without_sending()original_filename = attachment.file.name.split(os.sep)[-]file_content = ContentFile(attachment.read())new_attachment = Attachment()new_attachment.file_attachment.save(original_filename, file_content, save=False)new_attachment.email = selfnew_attachment.original_filename = original_filenametry:new_attachment.save()except Exception as e:logger.error(e)new_attachment.file_attachment.delete()", "docstring": "Takes a Django `File` object and creates an attachment for this mailer message.", "id": "f12195:c1:m1"} {"signature": "def _save_without_sending(self, *args, **kwargs):", "body": "self.do_not_send = Truesuper(MailerMessage, self).save(*args, **kwargs)", "docstring": "Saves the MailerMessage instance without sending the e-mail. This ensures\nother models (e.g. `Attachment`) have something to relate to in the database.", "id": "f12195:c1:m2"} {"signature": "def send_mail(self):", "body": "if getattr(settings, '', defaults.MAILQUEUE_CELERY):from mailqueue.tasks import send_mailsend_mail.delay(self.pk)else:self._send()", "docstring": "Public api to send mail. Makes the determinination\n of using celery or not and then calls the appropriate methods.", "id": "f12195:c1:m3"} {"signature": "def compute_style_factor_exposures(positions, risk_factor):", "body": "positions_wo_cash = positions.drop('', axis='')gross_exposure = positions_wo_cash.abs().sum(axis='')style_factor_exposure = positions_wo_cash.multiply(risk_factor).divide(gross_exposure, axis='')tot_style_factor_exposure = style_factor_exposure.sum(axis='',skipna=True)return tot_style_factor_exposure", "docstring": "Returns style factor exposure of an algorithm's positions\n\nParameters\n----------\npositions : pd.DataFrame\n Daily equity positions of algorithm, in dollars.\n - See full explanation in create_risk_tear_sheet\n\nrisk_factor : pd.DataFrame\n Daily risk factor per asset.\n - DataFrame with dates as index and equities as columns\n - Example:\n Equity(24 Equity(62\n [AAPL]) [ABT])\n 2017-04-03\t -0.51284 1.39173\n 2017-04-04\t -0.73381 0.98149\n 2017-04-05\t -0.90132 1.13981", "id": "f12198:m0"} {"signature": "def plot_style_factor_exposures(tot_style_factor_exposure, factor_name=None,ax=None):", "body": "if ax is None:ax = plt.gca()if factor_name is None:factor_name = tot_style_factor_exposure.nameax.plot(tot_style_factor_exposure.index, tot_style_factor_exposure,label=factor_name)avg = tot_style_factor_exposure.mean()ax.axhline(avg, linestyle='', label=''.format(avg))ax.axhline(, color='', linestyle='')_, _, y1, y2 = plt.axis()lim = max(abs(y1), abs(y2))ax.set(title=''.format(factor_name),ylabel=''.format(factor_name),ylim=(-lim, lim))ax.legend(frameon=True, framealpha=)return ax", "docstring": "Plots DataFrame output of compute_style_factor_exposures as a line graph\n\nParameters\n----------\ntot_style_factor_exposure : pd.Series\n Daily style factor exposures (output of compute_style_factor_exposures)\n - Time series with decimal style factor exposures\n - Example:\n 2017-04-24 0.037820\n 2017-04-25 0.016413\n 2017-04-26 -0.021472\n 2017-04-27 -0.024859\n\nfactor_name : string\n Name of style factor, for use in graph title\n - Defaults to tot_style_factor_exposure.name", "id": "f12198:m1"} {"signature": "def compute_sector_exposures(positions, sectors, sector_dict=SECTORS):", "body": "sector_ids = sector_dict.keys()long_exposures = []short_exposures = []gross_exposures = []net_exposures = []positions_wo_cash = positions.drop('', axis='')long_exposure = positions_wo_cash[positions_wo_cash > ].sum(axis='')short_exposure = positions_wo_cash[positions_wo_cash < ].abs().sum(axis='')gross_exposure = positions_wo_cash.abs().sum(axis='')for sector_id in sector_ids:in_sector = positions_wo_cash[sectors == sector_id]long_sector = in_sector[in_sector > ].sum(axis='').divide(long_exposure)short_sector = in_sector[in_sector < ].sum(axis='').divide(short_exposure)gross_sector = in_sector.abs().sum(axis='').divide(gross_exposure)net_sector = long_sector.subtract(short_sector)long_exposures.append(long_sector)short_exposures.append(short_sector)gross_exposures.append(gross_sector)net_exposures.append(net_sector)return long_exposures, short_exposures, gross_exposures, net_exposures", "docstring": "Returns arrays of long, short and gross sector exposures of an algorithm's\npositions\n\nParameters\n----------\npositions : pd.DataFrame\n Daily equity positions of algorithm, in dollars.\n - See full explanation in compute_style_factor_exposures.\n\nsectors : pd.DataFrame\n Daily Morningstar sector code per asset\n - See full explanation in create_risk_tear_sheet\n\nsector_dict : dict or OrderedDict\n Dictionary of all sectors\n - Keys are sector codes (e.g. ints or strings) and values are sector\n names (which must be strings)\n - Defaults to Morningstar sectors", "id": "f12198:m2"} {"signature": "def plot_sector_exposures_longshort(long_exposures, short_exposures,sector_dict=SECTORS, ax=None):", "body": "if ax is None:ax = plt.gca()if sector_dict is None:sector_names = SECTORS.values()else:sector_names = sector_dict.values()color_list = plt.cm.gist_rainbow(np.linspace(, , ))ax.stackplot(long_exposures[].index, long_exposures,labels=sector_names, colors=color_list, alpha=,baseline='')ax.stackplot(long_exposures[].index, short_exposures,colors=color_list, alpha=, baseline='')ax.axhline(, color='', linestyle='')ax.set(title='',ylabel='')ax.legend(loc='', frameon=True, framealpha=)return ax", "docstring": "Plots outputs of compute_sector_exposures as area charts\n\nParameters\n----------\nlong_exposures, short_exposures : arrays\n Arrays of long and short sector exposures (output of\n compute_sector_exposures).\n\nsector_dict : dict or OrderedDict\n Dictionary of all sectors\n - See full description in compute_sector_exposures", "id": "f12198:m3"} {"signature": "def plot_sector_exposures_gross(gross_exposures, sector_dict=None, ax=None):", "body": "if ax is None:ax = plt.gca()if sector_dict is None:sector_names = SECTORS.values()else:sector_names = sector_dict.values()color_list = plt.cm.gist_rainbow(np.linspace(, , ))ax.stackplot(gross_exposures[].index, gross_exposures,labels=sector_names, colors=color_list, alpha=,baseline='')ax.axhline(, color='', linestyle='')ax.set(title='',ylabel='')return ax", "docstring": "Plots output of compute_sector_exposures as area charts\n\nParameters\n----------\ngross_exposures : arrays\n Arrays of gross sector exposures (output of compute_sector_exposures).\n\nsector_dict : dict or OrderedDict\n Dictionary of all sectors\n - See full description in compute_sector_exposures", "id": "f12198:m4"} {"signature": "def plot_sector_exposures_net(net_exposures, sector_dict=None, ax=None):", "body": "if ax is None:ax = plt.gca()if sector_dict is None:sector_names = SECTORS.values()else:sector_names = sector_dict.values()color_list = plt.cm.gist_rainbow(np.linspace(, , ))for i in range(len(net_exposures)):ax.plot(net_exposures[i], color=color_list[i], alpha=,label=sector_names[i])ax.set(title='',ylabel='')return ax", "docstring": "Plots output of compute_sector_exposures as line graphs\n\nParameters\n----------\nnet_exposures : arrays\n Arrays of net sector exposures (output of compute_sector_exposures).\n\nsector_dict : dict or OrderedDict\n Dictionary of all sectors\n - See full description in compute_sector_exposures", "id": "f12198:m5"} {"signature": "def compute_cap_exposures(positions, caps):", "body": "long_exposures = []short_exposures = []gross_exposures = []net_exposures = []positions_wo_cash = positions.drop('', axis='')tot_gross_exposure = positions_wo_cash.abs().sum(axis='')tot_long_exposure = positions_wo_cash[positions_wo_cash > ].sum(axis='')tot_short_exposure = positions_wo_cash[positions_wo_cash < ].abs().sum(axis='')for bucket_name, boundaries in CAP_BUCKETS.items():in_bucket = positions_wo_cash[(caps >= boundaries[]) &(caps <= boundaries[])]gross_bucket = in_bucket.abs().sum(axis='').divide(tot_gross_exposure)long_bucket = in_bucket[in_bucket > ].sum(axis='').divide(tot_long_exposure)short_bucket = in_bucket[in_bucket < ].sum(axis='').divide(tot_short_exposure)net_bucket = long_bucket.subtract(short_bucket)gross_exposures.append(gross_bucket)long_exposures.append(long_bucket)short_exposures.append(short_bucket)net_exposures.append(net_bucket)return long_exposures, short_exposures, gross_exposures, net_exposures", "docstring": "Returns arrays of long, short and gross market cap exposures of an\nalgorithm's positions\n\nParameters\n----------\npositions : pd.DataFrame\n Daily equity positions of algorithm, in dollars.\n - See full explanation in compute_style_factor_exposures.\n\ncaps : pd.DataFrame\n Daily Morningstar sector code per asset\n - See full explanation in create_risk_tear_sheet", "id": "f12198:m6"} {"signature": "def plot_cap_exposures_longshort(long_exposures, short_exposures, ax=None):", "body": "if ax is None:ax = plt.gca()color_list = plt.cm.gist_rainbow(np.linspace(, , ))ax.stackplot(long_exposures[].index, long_exposures,labels=CAP_BUCKETS.keys(), colors=color_list, alpha=,baseline='')ax.stackplot(long_exposures[].index, short_exposures, colors=color_list,alpha=, baseline='')ax.axhline(, color='', linestyle='')ax.set(title='',ylabel='')ax.legend(loc='', frameon=True, framealpha=)return ax", "docstring": "Plots outputs of compute_cap_exposures as area charts\n\nParameters\n----------\nlong_exposures, short_exposures : arrays\n Arrays of long and short market cap exposures (output of\n compute_cap_exposures).", "id": "f12198:m7"} {"signature": "def plot_cap_exposures_gross(gross_exposures, ax=None):", "body": "if ax is None:ax = plt.gca()color_list = plt.cm.gist_rainbow(np.linspace(, , ))ax.stackplot(gross_exposures[].index, gross_exposures,labels=CAP_BUCKETS.keys(), colors=color_list, alpha=,baseline='')ax.axhline(, color='', linestyle='')ax.set(title='',ylabel='')return ax", "docstring": "Plots outputs of compute_cap_exposures as area charts\n\nParameters\n----------\ngross_exposures : array\n Arrays of gross market cap exposures (output of compute_cap_exposures).", "id": "f12198:m8"} {"signature": "def plot_cap_exposures_net(net_exposures, ax=None):", "body": "if ax is None:ax = plt.gca()color_list = plt.cm.gist_rainbow(np.linspace(, , ))cap_names = CAP_BUCKETS.keys()for i in range(len(net_exposures)):ax.plot(net_exposures[i], color=color_list[i], alpha=,label=cap_names[i])ax.axhline(, color='', linestyle='')ax.set(title='',ylabel='')return ax", "docstring": "Plots outputs of compute_cap_exposures as line graphs\n\nParameters\n----------\nnet_exposures : array\n Arrays of gross market cap exposures (output of compute_cap_exposures).", "id": "f12198:m9"} {"signature": "def compute_volume_exposures(shares_held, volumes, percentile):", "body": "shares_held = shares_held.replace(, np.nan)shares_longed = shares_held[shares_held > ]shares_shorted = - * shares_held[shares_held < ]shares_grossed = shares_held.abs()longed_frac = shares_longed.divide(volumes)shorted_frac = shares_shorted.divide(volumes)grossed_frac = shares_grossed.divide(volumes)longed_threshold = * longed_frac.apply(partial(np.nanpercentile, q= * percentile),axis='',)shorted_threshold = * shorted_frac.apply(partial(np.nanpercentile, q= * percentile),axis='',)grossed_threshold = * grossed_frac.apply(partial(np.nanpercentile, q= * percentile),axis='',)return longed_threshold, shorted_threshold, grossed_threshold", "docstring": "Returns arrays of pth percentile of long, short and gross volume exposures\nof an algorithm's held shares\n\nParameters\n----------\nshares_held : pd.DataFrame\n Daily number of shares held by an algorithm.\n - See full explanation in create_risk_tear_sheet\n\nvolume : pd.DataFrame\n Daily volume per asset\n - See full explanation in create_risk_tear_sheet\n\npercentile : float\n Percentile to use when computing and plotting volume exposures\n - See full explanation in create_risk_tear_sheet", "id": "f12198:m10"} {"signature": "def plot_volume_exposures_longshort(longed_threshold, shorted_threshold,percentile, ax=None):", "body": "if ax is None:ax = plt.gca()ax.plot(longed_threshold.index, longed_threshold,color='', label='')ax.plot(shorted_threshold.index, shorted_threshold,color='', label='')ax.axhline(, color='')ax.set(title='',ylabel=''.format( * percentile))ax.legend(frameon=True, framealpha=)return ax", "docstring": "Plots outputs of compute_volume_exposures as line graphs\n\nParameters\n----------\nlonged_threshold, shorted_threshold : pd.Series\n Series of longed and shorted volume exposures (output of\n compute_volume_exposures).\n\npercentile : float\n Percentile to use when computing and plotting volume exposures.\n - See full explanation in create_risk_tear_sheet", "id": "f12198:m11"} {"signature": "def plot_volume_exposures_gross(grossed_threshold, percentile, ax=None):", "body": "if ax is None:ax = plt.gca()ax.plot(grossed_threshold.index, grossed_threshold,color='', label='')ax.axhline(, color='')ax.set(title='',ylabel=''.format( * percentile))ax.legend(frameon=True, framealpha=)return ax", "docstring": "Plots outputs of compute_volume_exposures as line graphs\n\nParameters\n----------\ngrossed_threshold : pd.Series\n Series of grossed volume exposures (output of\n compute_volume_exposures).\n\npercentile : float\n Percentile to use when computing and plotting volume exposures\n - See full explanation in create_risk_tear_sheet", "id": "f12198:m12"} {"signature": "def generate_toy_risk_model_output(start_date='', periods=,num_styles=):", "body": "dts = pd.date_range(start_date, periods=periods)np.random.seed()tickers = ['', '', '']styles = [''.format(i) for i in range(num_styles)]returns = pd.Series(index=dts,data=np.random.randn(periods)) / factor_returns = pd.DataFrame(columns=styles, index=dts,data=np.random.randn(periods, len(styles))) / arrays = [dts, tickers]index = pd.MultiIndex.from_product(arrays, names=['', ''])positions = pd.DataFrame(columns=tickers, index=dts,data=np.random.randint(, size=(periods, len(tickers))))positions[''] = np.zeros(periods)factor_loadings = pd.DataFrame(columns=styles, index=index,data=np.random.randn(periods * len(tickers), len(styles)))return returns, positions, factor_returns, factor_loadings", "docstring": "Generate toy risk model output.\n\nParameters\n----------\nstart_date : str\n date to start generating toy data\nperiods : int\n number of days for which to generate toy data\n\nReturns\n-------\ntuple of (returns, factor_returns, positions, factor_loadings)\nreturns : pd.DataFrame\nfactor_returns : pd.DataFrame", "id": "f12207:m0"} {"signature": "def create_full_tear_sheet(returns,positions=None,transactions=None,market_data=None,benchmark_rets=None,slippage=None,live_start_date=None,sector_mappings=None,bayesian=False,round_trips=False,estimate_intraday='',hide_positions=False,cone_std=(, , ),bootstrap=False,unadjusted_returns=None,style_factor_panel=None,sectors=None,caps=None,shares_held=None,volumes=None,percentile=None,turnover_denom='',set_context=True,factor_returns=None,factor_loadings=None,pos_in_dollars=True,header_rows=None,factor_partitions=FACTOR_PARTITIONS):", "body": "if (unadjusted_returns is None) and (slippage is not None) and(transactions is not None):unadjusted_returns = returns.copy()returns = txn.adjust_returns_for_slippage(returns, positions,transactions, slippage)positions = utils.check_intraday(estimate_intraday, returns,positions, transactions)create_returns_tear_sheet(returns,positions=positions,transactions=transactions,live_start_date=live_start_date,cone_std=cone_std,benchmark_rets=benchmark_rets,bootstrap=bootstrap,turnover_denom=turnover_denom,header_rows=header_rows,set_context=set_context)create_interesting_times_tear_sheet(returns,benchmark_rets=benchmark_rets,set_context=set_context)if positions is not None:create_position_tear_sheet(returns, positions,hide_positions=hide_positions,set_context=set_context,sector_mappings=sector_mappings,estimate_intraday=False)if transactions is not None:create_txn_tear_sheet(returns, positions, transactions,unadjusted_returns=unadjusted_returns,estimate_intraday=False,set_context=set_context)if round_trips:create_round_trip_tear_sheet(returns=returns,positions=positions,transactions=transactions,sector_mappings=sector_mappings,estimate_intraday=False)if market_data is not None:create_capacity_tear_sheet(returns, positions, transactions,market_data,liquidation_daily_vol_limit=,last_n_days=,estimate_intraday=False)if style_factor_panel is not None:create_risk_tear_sheet(positions, style_factor_panel, sectors,caps, shares_held, volumes, percentile)if factor_returns is not None and factor_loadings is not None:create_perf_attrib_tear_sheet(returns, positions, factor_returns,factor_loadings, transactions,pos_in_dollars=pos_in_dollars,factor_partitions=factor_partitions)if bayesian:create_bayesian_tear_sheet(returns,live_start_date=live_start_date,benchmark_rets=benchmark_rets,set_context=set_context)", "docstring": "Generate a number of tear sheets that are useful\nfor analyzing a strategy's performance.\n\n- Fetches benchmarks if needed.\n- Creates tear sheets for returns, and significant events.\n If possible, also creates tear sheets for position analysis,\n transaction analysis, and Bayesian analysis.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - Time series with decimal returns.\n - Example:\n 2015-07-16 -0.012143\n 2015-07-17 0.045350\n 2015-07-20 0.030957\n 2015-07-21 0.004902\npositions : pd.DataFrame, optional\n Daily net position values.\n - Time series of dollar amount invested in each position and cash.\n - Days where stocks are not held can be represented by 0 or NaN.\n - Non-working capital is labelled 'cash'\n - Example:\n index 'AAPL' 'MSFT' cash\n 2004-01-09 13939.3800 -14012.9930 711.5585\n 2004-01-12 14492.6300 -14624.8700 27.1821\n 2004-01-13 -13853.2800 13653.6400 -43.6375\ntransactions : pd.DataFrame, optional\n Executed trade volumes and fill prices.\n - One row per trade.\n - Trades on different names that occur at the\n same time will have identical indicies.\n - Example:\n index amount price symbol\n 2004-01-09 12:18:01 483 324.12 'AAPL'\n 2004-01-09 12:18:01 122 83.10 'MSFT'\n 2004-01-13 14:12:23 -75 340.43 'AAPL'\nmarket_data : pd.Panel, optional\n Panel with items axis of 'price' and 'volume' DataFrames.\n The major and minor axes should match those of the\n the passed positions DataFrame (same dates and symbols).\nslippage : int/float, optional\n Basis points of slippage to apply to returns before generating\n tearsheet stats and plots.\n If a value is provided, slippage parameter sweep\n plots will be generated from the unadjusted returns.\n Transactions and positions must also be passed.\n - See txn.adjust_returns_for_slippage for more details.\nlive_start_date : datetime, optional\n The point in time when the strategy began live trading,\n after its backtest period. This datetime should be normalized.\nhide_positions : bool, optional\n If True, will not output any symbol names.\nbayesian: boolean, optional\n If True, causes the generation of a Bayesian tear sheet.\nround_trips: boolean, optional\n If True, causes the generation of a round trip tear sheet.\nsector_mappings : dict or pd.Series, optional\n Security identifier to sector mapping.\n Security ids as keys, sectors as values.\nestimate_intraday: boolean or str, optional\n Instead of using the end-of-day positions, use the point in the day\n where we have the most $ invested. This will adjust positions to\n better approximate and represent how an intraday strategy behaves.\n By default, this is 'infer', and an attempt will be made to detect\n an intraday strategy. Specifying this value will prevent detection.\ncone_std : float, or tuple, optional\n If float, The standard deviation to use for the cone plots.\n If tuple, Tuple of standard deviation values to use for the cone plots\n - The cone is a normal distribution with this standard deviation\n centered around a linear regression.\nbootstrap : boolean (optional)\n Whether to perform bootstrap analysis for the performance\n metrics. Takes a few minutes longer.\nturnover_denom : str\n Either AGB or portfolio_value, default AGB.\n - See full explanation in txn.get_turnover.\nfactor_returns : pd.Dataframe, optional\n Returns by factor, with date as index and factors as columns\nfactor_loadings : pd.Dataframe, optional\n Factor loadings for all days in the date range, with date and\n ticker as index, and factors as columns.\npos_in_dollars : boolean, optional\n indicates whether positions is in dollars\nheader_rows : dict or OrderedDict, optional\n Extra rows to display at the top of the perf stats table.\nset_context : boolean, optional\n If True, set default plotting style context.\n - See plotting.context().\nfactor_partitions : dict, optional\n dict specifying how factors should be separated in perf attrib\n factor returns and risk exposures plots\n - See create_perf_attrib_tear_sheet().", "id": "f12208:m1"} {"signature": "@plotting.customizedef create_simple_tear_sheet(returns,positions=None,transactions=None,benchmark_rets=None,slippage=None,estimate_intraday='',live_start_date=None,turnover_denom='',header_rows=None):", "body": "positions = utils.check_intraday(estimate_intraday, returns,positions, transactions)if (slippage is not None) and (transactions is not None):returns = txn.adjust_returns_for_slippage(returns, positions,transactions, slippage)always_sections = positions_sections = if positions is not None else transactions_sections = if transactions is not None else live_sections = if live_start_date is not None else benchmark_sections = if benchmark_rets is not None else vertical_sections = sum([always_sections,positions_sections,transactions_sections,live_sections,benchmark_sections,])if live_start_date is not None:live_start_date = ep.utils.get_utc_timestamp(live_start_date)plotting.show_perf_stats(returns,benchmark_rets,positions=positions,transactions=transactions,turnover_denom=turnover_denom,live_start_date=live_start_date,header_rows=header_rows)fig = plt.figure(figsize=(, vertical_sections * ))gs = gridspec.GridSpec(vertical_sections, , wspace=, hspace=)ax_rolling_returns = plt.subplot(gs[:, :])i = if benchmark_rets is not None:ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns)i += ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns)i += ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns)i += plotting.plot_rolling_returns(returns,factor_returns=benchmark_rets,live_start_date=live_start_date,cone_std=(, , ),ax=ax_rolling_returns)ax_rolling_returns.set_title('')if benchmark_rets is not None:plotting.plot_rolling_beta(returns, benchmark_rets, ax=ax_rolling_beta)plotting.plot_rolling_sharpe(returns, ax=ax_rolling_sharpe)plotting.plot_drawdown_underwater(returns, ax=ax_underwater)if positions is not None:ax_exposures = plt.subplot(gs[i, :])i += ax_top_positions = plt.subplot(gs[i, :], sharex=ax_exposures)i += ax_holdings = plt.subplot(gs[i, :], sharex=ax_exposures)i += ax_long_short_holdings = plt.subplot(gs[i, :])i += positions_alloc = pos.get_percent_alloc(positions)plotting.plot_exposures(returns, positions, ax=ax_exposures)plotting.show_and_plot_top_positions(returns,positions_alloc,show_and_plot=,hide_positions=False,ax=ax_top_positions)plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)plotting.plot_long_short_holdings(returns, positions_alloc,ax=ax_long_short_holdings)if transactions is not None:ax_turnover = plt.subplot(gs[i, :])i += ax_txn_timings = plt.subplot(gs[i, :])i += plotting.plot_turnover(returns,transactions,positions,ax=ax_turnover)plotting.plot_txn_time_hist(transactions, ax=ax_txn_timings)for ax in fig.axes:plt.setp(ax.get_xticklabels(), visible=True)", "docstring": "Simpler version of create_full_tear_sheet; generates summary performance\nstatistics and important plots as a single image.\n\n- Plots: cumulative returns, rolling beta, rolling Sharpe, underwater,\n exposure, top 10 holdings, total holdings, long/short holdings,\n daily turnover, transaction time distribution.\n- Never accept market_data input (market_data = None)\n- Never accept sector_mappings input (sector_mappings = None)\n- Never perform bootstrap analysis (bootstrap = False)\n- Never hide posistions on top 10 holdings plot (hide_positions = False)\n- Always use default cone_std (cone_std = (1.0, 1.5, 2.0))\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - Time series with decimal returns.\n - Example:\n 2015-07-16 -0.012143\n 2015-07-17 0.045350\n 2015-07-20 0.030957\n 2015-07-21 0.004902\npositions : pd.DataFrame, optional\n Daily net position values.\n - Time series of dollar amount invested in each position and cash.\n - Days where stocks are not held can be represented by 0 or NaN.\n - Non-working capital is labelled 'cash'\n - Example:\n index 'AAPL' 'MSFT' cash\n 2004-01-09 13939.3800 -14012.9930 711.5585\n 2004-01-12 14492.6300 -14624.8700 27.1821\n 2004-01-13 -13853.2800 13653.6400 -43.6375\ntransactions : pd.DataFrame, optional\n Executed trade volumes and fill prices.\n - One row per trade.\n - Trades on different names that occur at the\n same time will have identical indicies.\n - Example:\n index amount price symbol\n 2004-01-09 12:18:01 483 324.12 'AAPL'\n 2004-01-09 12:18:01 122 83.10 'MSFT'\n 2004-01-13 14:12:23 -75 340.43 'AAPL'\nbenchmark_rets : pd.Series, optional\n Daily returns of the benchmark, noncumulative.\nslippage : int/float, optional\n Basis points of slippage to apply to returns before generating\n tearsheet stats and plots.\n If a value is provided, slippage parameter sweep\n plots will be generated from the unadjusted returns.\n Transactions and positions must also be passed.\n - See txn.adjust_returns_for_slippage for more details.\nlive_start_date : datetime, optional\n The point in time when the strategy began live trading,\n after its backtest period. This datetime should be normalized.\nturnover_denom : str, optional\n Either AGB or portfolio_value, default AGB.\n - See full explanation in txn.get_turnover.\nheader_rows : dict or OrderedDict, optional\n Extra rows to display at the top of the perf stats table.\nset_context : boolean, optional\n If True, set default plotting style context.", "id": "f12208:m2"} {"signature": "@plotting.customizedef create_returns_tear_sheet(returns, positions=None,transactions=None,live_start_date=None,cone_std=(, , ),benchmark_rets=None,bootstrap=False,turnover_denom='',header_rows=None,return_fig=False):", "body": "if benchmark_rets is not None:returns = utils.clip_returns_to_benchmark(returns, benchmark_rets)plotting.show_perf_stats(returns, benchmark_rets,positions=positions,transactions=transactions,turnover_denom=turnover_denom,bootstrap=bootstrap,live_start_date=live_start_date,header_rows=header_rows)plotting.show_worst_drawdown_periods(returns)vertical_sections = if live_start_date is not None:vertical_sections += live_start_date = ep.utils.get_utc_timestamp(live_start_date)if benchmark_rets is not None:vertical_sections += if bootstrap:vertical_sections += fig = plt.figure(figsize=(, vertical_sections * ))gs = gridspec.GridSpec(vertical_sections, , wspace=, hspace=)ax_rolling_returns = plt.subplot(gs[:, :])i = ax_rolling_returns_vol_match = plt.subplot(gs[i, :],sharex=ax_rolling_returns)i += ax_rolling_returns_log = plt.subplot(gs[i, :],sharex=ax_rolling_returns)i += ax_returns = plt.subplot(gs[i, :],sharex=ax_rolling_returns)i += if benchmark_rets is not None:ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns)i += ax_rolling_volatility = plt.subplot(gs[i, :], sharex=ax_rolling_returns)i += ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns)i += ax_drawdown = plt.subplot(gs[i, :], sharex=ax_rolling_returns)i += ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns)i += ax_monthly_heatmap = plt.subplot(gs[i, ])ax_annual_returns = plt.subplot(gs[i, ])ax_monthly_dist = plt.subplot(gs[i, ])i += ax_return_quantiles = plt.subplot(gs[i, :])i += plotting.plot_rolling_returns(returns,factor_returns=benchmark_rets,live_start_date=live_start_date,cone_std=cone_std,ax=ax_rolling_returns)ax_rolling_returns.set_title('')plotting.plot_rolling_returns(returns,factor_returns=benchmark_rets,live_start_date=live_start_date,cone_std=None,volatility_match=(benchmark_rets is not None),legend_loc=None,ax=ax_rolling_returns_vol_match)ax_rolling_returns_vol_match.set_title('')plotting.plot_rolling_returns(returns,factor_returns=benchmark_rets,logy=True,live_start_date=live_start_date,cone_std=cone_std,ax=ax_rolling_returns_log)ax_rolling_returns_log.set_title('')plotting.plot_returns(returns,live_start_date=live_start_date,ax=ax_returns,)ax_returns.set_title('')if benchmark_rets is not None:plotting.plot_rolling_beta(returns, benchmark_rets, ax=ax_rolling_beta)plotting.plot_rolling_volatility(returns, factor_returns=benchmark_rets, ax=ax_rolling_volatility)plotting.plot_rolling_sharpe(returns, ax=ax_rolling_sharpe)plotting.plot_drawdown_periods(returns, top=, ax=ax_drawdown)plotting.plot_drawdown_underwater(returns=returns, ax=ax_underwater)plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap)plotting.plot_annual_returns(returns, ax=ax_annual_returns)plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist)plotting.plot_return_quantiles(returns,live_start_date=live_start_date,ax=ax_return_quantiles)if bootstrap and (benchmark_rets is not None):ax_bootstrap = plt.subplot(gs[i, :])plotting.plot_perf_stats(returns, benchmark_rets,ax=ax_bootstrap)elif bootstrap:raise ValueError('')for ax in fig.axes:plt.setp(ax.get_xticklabels(), visible=True)if return_fig:return fig", "docstring": "Generate a number of plots for analyzing a strategy's returns.\n\n- Fetches benchmarks, then creates the plots on a single figure.\n- Plots: rolling returns (with cone), rolling beta, rolling sharpe,\n rolling Fama-French risk factors, drawdowns, underwater plot, monthly\n and annual return plots, daily similarity plots,\n and return quantile box plot.\n- Will also print the start and end dates of the strategy,\n performance statistics, drawdown periods, and the return range.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame, optional\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame, optional\n Executed trade volumes and fill prices.\n - See full explanation in create_full_tear_sheet.\nlive_start_date : datetime, optional\n The point in time when the strategy began live trading,\n after its backtest period.\ncone_std : float, or tuple, optional\n If float, The standard deviation to use for the cone plots.\n If tuple, Tuple of standard deviation values to use for the cone plots\n - The cone is a normal distribution with this standard deviation\n centered around a linear regression.\nbenchmark_rets : pd.Series, optional\n Daily noncumulative returns of the benchmark.\n - This is in the same style as returns.\nbootstrap : boolean, optional\n Whether to perform bootstrap analysis for the performance\n metrics. Takes a few minutes longer.\nturnover_denom : str, optional\n Either AGB or portfolio_value, default AGB.\n - See full explanation in txn.get_turnover.\nheader_rows : dict or OrderedDict, optional\n Extra rows to display at the top of the perf stats table.\nreturn_fig : boolean, optional\n If True, returns the figure that was plotted on.", "id": "f12208:m3"} {"signature": "@plotting.customizedef create_position_tear_sheet(returns, positions,show_and_plot_top_pos=, hide_positions=False,return_fig=False, sector_mappings=None,transactions=None, estimate_intraday=''):", "body": "positions = utils.check_intraday(estimate_intraday, returns,positions, transactions)if hide_positions:show_and_plot_top_pos = vertical_sections = if sector_mappings is not None else fig = plt.figure(figsize=(, vertical_sections * ))gs = gridspec.GridSpec(vertical_sections, , wspace=, hspace=)ax_exposures = plt.subplot(gs[, :])ax_top_positions = plt.subplot(gs[, :], sharex=ax_exposures)ax_max_median_pos = plt.subplot(gs[, :], sharex=ax_exposures)ax_holdings = plt.subplot(gs[, :], sharex=ax_exposures)ax_long_short_holdings = plt.subplot(gs[, :])ax_gross_leverage = plt.subplot(gs[, :], sharex=ax_exposures)positions_alloc = pos.get_percent_alloc(positions)plotting.plot_exposures(returns, positions, ax=ax_exposures)plotting.show_and_plot_top_positions(returns,positions_alloc,show_and_plot=show_and_plot_top_pos,hide_positions=hide_positions,ax=ax_top_positions)plotting.plot_max_median_position_concentration(positions,ax=ax_max_median_pos)plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)plotting.plot_long_short_holdings(returns, positions_alloc,ax=ax_long_short_holdings)plotting.plot_gross_leverage(returns, positions,ax=ax_gross_leverage)if sector_mappings is not None:sector_exposures = pos.get_sector_exposures(positions,sector_mappings)if len(sector_exposures.columns) > :sector_alloc = pos.get_percent_alloc(sector_exposures)sector_alloc = sector_alloc.drop('', axis='')ax_sector_alloc = plt.subplot(gs[, :], sharex=ax_exposures)plotting.plot_sector_allocations(returns, sector_alloc,ax=ax_sector_alloc)for ax in fig.axes:plt.setp(ax.get_xticklabels(), visible=True)if return_fig:return fig", "docstring": "Generate a number of plots for analyzing a\nstrategy's positions and holdings.\n\n- Plots: gross leverage, exposures, top positions, and holdings.\n- Will also print the top positions held.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\nshow_and_plot_top_pos : int, optional\n By default, this is 2, and both prints and plots the\n top 10 positions.\n If this is 0, it will only plot; if 1, it will only print.\nhide_positions : bool, optional\n If True, will not output any symbol names.\n Overrides show_and_plot_top_pos to 0 to suppress text output.\nreturn_fig : boolean, optional\n If True, returns the figure that was plotted on.\nsector_mappings : dict or pd.Series, optional\n Security identifier to sector mapping.\n Security ids as keys, sectors as values.\ntransactions : pd.DataFrame, optional\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\nestimate_intraday: boolean or str, optional\n Approximate returns for intraday strategies.\n See description in create_full_tear_sheet.", "id": "f12208:m4"} {"signature": "@plotting.customizedef create_txn_tear_sheet(returns, positions, transactions,unadjusted_returns=None, estimate_intraday='',return_fig=False):", "body": "positions = utils.check_intraday(estimate_intraday, returns,positions, transactions)vertical_sections = if unadjusted_returns is not None else fig = plt.figure(figsize=(, vertical_sections * ))gs = gridspec.GridSpec(vertical_sections, , wspace=, hspace=)ax_turnover = plt.subplot(gs[, :])ax_daily_volume = plt.subplot(gs[, :], sharex=ax_turnover)ax_turnover_hist = plt.subplot(gs[, :])ax_txn_timings = plt.subplot(gs[, :])plotting.plot_turnover(returns,transactions,positions,ax=ax_turnover)plotting.plot_daily_volume(returns, transactions, ax=ax_daily_volume)try:plotting.plot_daily_turnover_hist(transactions, positions,ax=ax_turnover_hist)except ValueError:warnings.warn('', UserWarning)plotting.plot_txn_time_hist(transactions, ax=ax_txn_timings)if unadjusted_returns is not None:ax_slippage_sweep = plt.subplot(gs[, :])plotting.plot_slippage_sweep(unadjusted_returns,positions,transactions,ax=ax_slippage_sweep)ax_slippage_sensitivity = plt.subplot(gs[, :])plotting.plot_slippage_sensitivity(unadjusted_returns,positions,transactions,ax=ax_slippage_sensitivity)for ax in fig.axes:plt.setp(ax.get_xticklabels(), visible=True)if return_fig:return fig", "docstring": "Generate a number of plots for analyzing a strategy's transactions.\n\nPlots: turnover, daily volume, and a histogram of daily volume.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\nunadjusted_returns : pd.Series, optional\n Daily unadjusted returns of the strategy, noncumulative.\n Will plot additional swippage sweep analysis.\n - See pyfolio.plotting.plot_swippage_sleep and\n pyfolio.plotting.plot_slippage_sensitivity\nestimate_intraday: boolean or str, optional\n Approximate returns for intraday strategies.\n See description in create_full_tear_sheet.\nreturn_fig : boolean, optional\n If True, returns the figure that was plotted on.", "id": "f12208:m5"} {"signature": "@plotting.customizedef create_round_trip_tear_sheet(returns, positions, transactions,sector_mappings=None,estimate_intraday='', return_fig=False):", "body": "positions = utils.check_intraday(estimate_intraday, returns,positions, transactions)transactions_closed = round_trips.add_closing_transactions(positions,transactions)trades = round_trips.extract_round_trips(transactions_closed,portfolio_value=positions.sum(axis='') / ( + returns))if len(trades) < :warnings.warn(\"\"\"\"\"\", UserWarning)returnround_trips.print_round_trip_stats(trades)plotting.show_profit_attribution(trades)if sector_mappings is not None:sector_trades = round_trips.apply_sector_mappings_to_round_trips(trades, sector_mappings)plotting.show_profit_attribution(sector_trades)fig = plt.figure(figsize=(, * ))gs = gridspec.GridSpec(, , wspace=, hspace=)ax_trade_lifetimes = plt.subplot(gs[, :])ax_prob_profit_trade = plt.subplot(gs[, ])ax_holding_time = plt.subplot(gs[, ])ax_pnl_per_round_trip_dollars = plt.subplot(gs[, ])ax_pnl_per_round_trip_pct = plt.subplot(gs[, ])plotting.plot_round_trip_lifetimes(trades, ax=ax_trade_lifetimes)plotting.plot_prob_profit_trade(trades, ax=ax_prob_profit_trade)trade_holding_times = [x.days for x in trades['']]sns.distplot(trade_holding_times, kde=False, ax=ax_holding_time)ax_holding_time.set(xlabel='')sns.distplot(trades.pnl, kde=False, ax=ax_pnl_per_round_trip_dollars)ax_pnl_per_round_trip_dollars.set(xlabel='')sns.distplot(trades.returns.dropna() * , kde=False,ax=ax_pnl_per_round_trip_pct)ax_pnl_per_round_trip_pct.set(xlabel='')gs.tight_layout(fig)if return_fig:return fig", "docstring": "Generate a number of figures and plots describing the duration,\nfrequency, and profitability of trade \"round trips.\"\nA round trip is started when a new long or short position is\nopened and is only completed when the number of shares in that\nposition returns to or crosses zero.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\nsector_mappings : dict or pd.Series, optional\n Security identifier to sector mapping.\n Security ids as keys, sectors as values.\nestimate_intraday: boolean or str, optional\n Approximate returns for intraday strategies.\n See description in create_full_tear_sheet.\nreturn_fig : boolean, optional\n If True, returns the figure that was plotted on.", "id": "f12208:m6"} {"signature": "@plotting.customizedef create_interesting_times_tear_sheet(returns, benchmark_rets=None, legend_loc='', return_fig=False):", "body": "rets_interesting = timeseries.extract_interesting_date_ranges(returns)if not rets_interesting:warnings.warn('''', UserWarning)returnutils.print_table(pd.DataFrame(rets_interesting).describe().transpose().loc[:, ['', '', '']] * ,name='',float_format=''.format)if benchmark_rets is not None:returns = utils.clip_returns_to_benchmark(returns, benchmark_rets)bmark_interesting = timeseries.extract_interesting_date_ranges(benchmark_rets)num_plots = len(rets_interesting)num_rows = int((num_plots + ) / )fig = plt.figure(figsize=(, num_rows * ))gs = gridspec.GridSpec(num_rows, , wspace=, hspace=)for i, (name, rets_period) in enumerate(rets_interesting.items()):ax = plt.subplot(gs[int(i / ), i % ])ep.cum_returns(rets_period).plot(ax=ax, color='', label='', alpha=, lw=)if benchmark_rets is not None:ep.cum_returns(bmark_interesting[name]).plot(ax=ax, color='', label='', alpha=)ax.legend(['',''],loc=legend_loc, frameon=True, framealpha=)else:ax.legend([''],loc=legend_loc, frameon=True, framealpha=)ax.set_title(name)ax.set_ylabel('')ax.set_xlabel('')if return_fig:return fig", "docstring": "Generate a number of returns plots around interesting points in time,\nlike the flash crash and 9/11.\n\nPlots: returns around the dotcom bubble burst, Lehmann Brothers' failure,\n9/11, US downgrade and EU debt crisis, Fukushima meltdown, US housing\nbubble burst, EZB IR, Great Recession (August 2007, March and September\nof 2008, Q1 & Q2 2009), flash crash, April and October 2014.\n\nbenchmark_rets must be passed, as it is meaningless to analyze performance\nduring interesting times without some benchmark to refer to.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\nbenchmark_rets : pd.Series\n Daily noncumulative returns of the benchmark.\n - This is in the same style as returns.\nlegend_loc : plt.legend_loc, optional\n The legend's location.\nreturn_fig : boolean, optional\n If True, returns the figure that was plotted on.", "id": "f12208:m7"} {"signature": "@plotting.customizedef create_capacity_tear_sheet(returns, positions, transactions,market_data,liquidation_daily_vol_limit=,trade_daily_vol_limit=,last_n_days=utils.APPROX_BDAYS_PER_MONTH * ,days_to_liquidate_limit=,estimate_intraday=''):", "body": "positions = utils.check_intraday(estimate_intraday, returns,positions, transactions)print(\"\"\"\"\"\"\"\"\"\")max_days_by_ticker = capacity.get_max_days_to_liquidate_by_ticker(positions, market_data,max_bar_consumption=liquidation_daily_vol_limit,capital_base=,mean_volume_window=)max_days_by_ticker.index = (max_days_by_ticker.index.map(utils.format_asset))print(\"\")utils.print_table(max_days_by_ticker[max_days_by_ticker.days_to_liquidate >days_to_liquidate_limit])max_days_by_ticker_lnd = capacity.get_max_days_to_liquidate_by_ticker(positions, market_data,max_bar_consumption=liquidation_daily_vol_limit,capital_base=,mean_volume_window=,last_n_days=last_n_days)max_days_by_ticker_lnd.index = (max_days_by_ticker_lnd.index.map(utils.format_asset))print(\"\".format(last_n_days))utils.print_table(max_days_by_ticker_lnd[max_days_by_ticker_lnd.days_to_liquidate > ])llt = capacity.get_low_liquidity_transactions(transactions, market_data)llt.index = llt.index.map(utils.format_asset)print(''''.format(trade_daily_vol_limit * ))utils.print_table(llt[llt[''] > trade_daily_vol_limit * ])llt = capacity.get_low_liquidity_transactions(transactions, market_data, last_n_days=last_n_days)print(\"\".format(last_n_days))utils.print_table(llt[llt[''] > trade_daily_vol_limit * ])bt_starting_capital = positions.iloc[].sum() / ( + returns.iloc[])fig, ax_capacity_sweep = plt.subplots(figsize=(, ))plotting.plot_capacity_sweep(returns, transactions, market_data,bt_starting_capital,min_pv=,max_pv=,step_size=,ax=ax_capacity_sweep)", "docstring": "Generates a report detailing portfolio size constraints set by\nleast liquid tickers. Plots a \"capacity sweep,\" a curve describing\nprojected sharpe ratio given the slippage penalties that are\napplied at various capital bases.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\nmarket_data : pd.Panel\n Panel with items axis of 'price' and 'volume' DataFrames.\n The major and minor axes should match those of the\n the passed positions DataFrame (same dates and symbols).\nliquidation_daily_vol_limit : float\n Max proportion of a daily bar that can be consumed in the\n process of liquidating a position in the\n \"days to liquidation\" analysis.\ntrade_daily_vol_limit : float\n Flag daily transaction totals that exceed proportion of\n daily bar.\nlast_n_days : integer\n Compute max position allocation and dollar volume for only\n the last N days of the backtest\ndays_to_liquidate_limit : integer\n Display all tickers with greater max days to liquidation.\nestimate_intraday: boolean or str, optional\n Approximate returns for intraday strategies.\n See description in create_full_tear_sheet.", "id": "f12208:m8"} {"signature": "@plotting.customizedef create_bayesian_tear_sheet(returns, benchmark_rets=None,live_start_date=None, samples=,return_fig=False, stoch_vol=False,progressbar=True):", "body": "if not have_bayesian:raise NotImplementedError(\"\"\"\"\"\")if live_start_date is None:raise NotImplementedError('')live_start_date = ep.utils.get_utc_timestamp(live_start_date)df_train = returns.loc[returns.index < live_start_date]df_test = returns.loc[returns.index >= live_start_date]print(\"\")previous_time = time()start_time = previous_timetrace_t, ppc_t = bayesian.run_model('', df_train,returns_test=df_test,samples=samples, ppc=True,progressbar=progressbar)previous_time = timer(\"\", previous_time)print(\"\")trace_best = bayesian.run_model('', df_train,returns_test=df_test,samples=samples,progressbar=progressbar)previous_time = timer(\"\", previous_time)fig = plt.figure(figsize=(, * ))gs = gridspec.GridSpec(, , wspace=, hspace=)axs = []row = ax_cone = plt.subplot(gs[row, :])bayesian.plot_bayes_cone(df_train, df_test, ppc_t, ax=ax_cone)previous_time = timer(\"\", previous_time)row += axs.append(plt.subplot(gs[row, ]))axs.append(plt.subplot(gs[row, ]))row += axs.append(plt.subplot(gs[row, ]))axs.append(plt.subplot(gs[row, ]))row += axs.append(plt.subplot(gs[row, ]))axs.append(plt.subplot(gs[row, ]))row += axs.append(plt.subplot(gs[row, :]))bayesian.plot_best(trace=trace_best, axs=axs)previous_time = timer(\"\", previous_time)row += ax_ret_pred_day = plt.subplot(gs[row, ])ax_ret_pred_week = plt.subplot(gs[row, ])day_pred = ppc_t[:, ]p5 = scipy.stats.scoreatpercentile(day_pred, )sns.distplot(day_pred,ax=ax_ret_pred_day)ax_ret_pred_day.axvline(p5, linestyle='', linewidth=)ax_ret_pred_day.set_xlabel('')ax_ret_pred_day.set_ylabel('')ax_ret_pred_day.text(, , '' % p5,verticalalignment='',horizontalalignment='',transform=ax_ret_pred_day.transAxes)previous_time = timer(\"\", previous_time)week_pred = (np.cumprod(ppc_t[:, :] + , ) - )[:, -]p5 = scipy.stats.scoreatpercentile(week_pred, )sns.distplot(week_pred,ax=ax_ret_pred_week)ax_ret_pred_week.axvline(p5, linestyle='', linewidth=)ax_ret_pred_week.set_xlabel('')ax_ret_pred_week.set_ylabel('')ax_ret_pred_week.text(, , '' % p5,verticalalignment='',horizontalalignment='',transform=ax_ret_pred_week.transAxes)previous_time = timer(\"\", previous_time)if benchmark_rets is not None:print(\"\")benchmark_rets = benchmark_rets.loc[df_train.index]trace_alpha_beta = bayesian.run_model('', df_train,bmark=benchmark_rets,samples=samples,progressbar=progressbar)previous_time = timer(\"\", previous_time)row += ax_alpha = plt.subplot(gs[row, ])ax_beta = plt.subplot(gs[row, ])sns.distplot(( + trace_alpha_beta[''][:])** - ,ax=ax_alpha)sns.distplot(trace_alpha_beta[''][:], ax=ax_beta)ax_alpha.set_xlabel('')ax_alpha.set_ylabel('')ax_beta.set_xlabel('')ax_beta.set_ylabel('')previous_time = timer(\"\", previous_time)if stoch_vol:returns_cutoff = print(\"\"\"\".format(returns_cutoff))if df_train.size > returns_cutoff:df_train_truncated = df_train[-returns_cutoff:]_, trace_stoch_vol = bayesian.model_stoch_vol(df_train_truncated)previous_time = timer(\"\", previous_time)row += ax_volatility = plt.subplot(gs[row, :])bayesian.plot_stoch_vol(df_train_truncated, trace=trace_stoch_vol, ax=ax_volatility)previous_time = timer(\"\", previous_time)total_time = time() - start_timeprint(\"\".format(total_time))gs.tight_layout(fig)if return_fig:return fig", "docstring": "Generate a number of Bayesian distributions and a Bayesian\ncone plot of returns.\n\nPlots: Sharpe distribution, annual volatility distribution,\nannual alpha distribution, beta distribution, predicted 1 and 5\nday returns distributions, and a cumulative returns cone plot.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\nbenchmark_rets : pd.Series, optional\n Daily noncumulative returns of the benchmark.\n - This is in the same style as returns.\nlive_start_date : datetime, optional\n The point in time when the strategy began live\n trading, after its backtest period.\nsamples : int, optional\n Number of posterior samples to draw.\nreturn_fig : boolean, optional\n If True, returns the figure that was plotted on.\nstoch_vol : boolean, optional\n If True, run and plot the stochastic volatility model\nprogressbar : boolean, optional\n If True, show a progress bar", "id": "f12208:m9"} {"signature": "@plotting.customizedef create_risk_tear_sheet(positions,style_factor_panel=None,sectors=None,caps=None,shares_held=None,volumes=None,percentile=None,returns=None,transactions=None,estimate_intraday='',return_fig=False):", "body": "positions = utils.check_intraday(estimate_intraday, returns,positions, transactions)idx = positions.index & style_factor_panel.iloc[].index & sectors.index& caps.index & shares_held.index & volumes.indexpositions = positions.loc[idx]vertical_sections = if style_factor_panel is not None:vertical_sections += len(style_factor_panel.items)new_style_dict = {}for item in style_factor_panel.items:new_style_dict.update({item:style_factor_panel.loc[item].loc[idx]})style_factor_panel = pd.Panel()style_factor_panel = style_factor_panel.from_dict(new_style_dict)if sectors is not None:vertical_sections += sectors = sectors.loc[idx]if caps is not None:vertical_sections += caps = caps.loc[idx]if (shares_held is not None) & (volumes is not None)& (percentile is not None):vertical_sections += shares_held = shares_held.loc[idx]volumes = volumes.loc[idx]if percentile is None:percentile = fig = plt.figure(figsize=[, vertical_sections * ])gs = gridspec.GridSpec(vertical_sections, , wspace=, hspace=)if style_factor_panel is not None:style_axes = []style_axes.append(plt.subplot(gs[, :]))for i in range(, len(style_factor_panel.items)):style_axes.append(plt.subplot(gs[i, :], sharex=style_axes[]))j = for name, df in style_factor_panel.iteritems():sfe = risk.compute_style_factor_exposures(positions, df)risk.plot_style_factor_exposures(sfe, name, style_axes[j])j += if sectors is not None:i += ax_sector_longshort = plt.subplot(gs[i:i+, :], sharex=style_axes[])i += ax_sector_gross = plt.subplot(gs[i, :], sharex=style_axes[])i += ax_sector_net = plt.subplot(gs[i, :], sharex=style_axes[])long_exposures, short_exposures, gross_exposures, net_exposures= risk.compute_sector_exposures(positions, sectors)risk.plot_sector_exposures_longshort(long_exposures, short_exposures,ax=ax_sector_longshort)risk.plot_sector_exposures_gross(gross_exposures, ax=ax_sector_gross)risk.plot_sector_exposures_net(net_exposures, ax=ax_sector_net)if caps is not None:i += ax_cap_longshort = plt.subplot(gs[i:i+, :], sharex=style_axes[])i += ax_cap_gross = plt.subplot(gs[i, :], sharex=style_axes[])i += ax_cap_net = plt.subplot(gs[i, :], sharex=style_axes[])long_exposures, short_exposures, gross_exposures, net_exposures= risk.compute_cap_exposures(positions, caps)risk.plot_cap_exposures_longshort(long_exposures, short_exposures,ax_cap_longshort)risk.plot_cap_exposures_gross(gross_exposures, ax_cap_gross)risk.plot_cap_exposures_net(net_exposures, ax_cap_net)if volumes is not None:i += ax_vol_longshort = plt.subplot(gs[i:i+, :], sharex=style_axes[])i += ax_vol_gross = plt.subplot(gs[i, :], sharex=style_axes[])longed_threshold, shorted_threshold, grossed_threshold= risk.compute_volume_exposures(positions, volumes, percentile)risk.plot_volume_exposures_longshort(longed_threshold,shorted_threshold, percentile,ax_vol_longshort)risk.plot_volume_exposures_gross(grossed_threshold, percentile,ax_vol_gross)for ax in fig.axes:plt.setp(ax.get_xticklabels(), visible=True)if return_fig:return fig", "docstring": "Creates risk tear sheet: computes and plots style factor exposures, sector\nexposures, market cap exposures and volume exposures.\n\nParameters\n----------\npositions : pd.DataFrame\n Daily equity positions of algorithm, in dollars.\n - DataFrame with dates as index, equities as columns\n - Last column is cash held\n - Example:\n Equity(24 Equity(62\n [AAPL]) [ABT]) cash\n 2017-04-03\t-108062.40 \t 4401.540 2.247757e+07\n 2017-04-04\t-108852.00\t 4373.820 2.540999e+07\n 2017-04-05\t-119968.66\t 4336.200 2.839812e+07\n\nstyle_factor_panel : pd.Panel\n Panel where each item is a DataFrame that tabulates style factor per\n equity per day.\n - Each item has dates as index, equities as columns\n - Example item:\n Equity(24 Equity(62\n [AAPL]) [ABT])\n 2017-04-03\t -0.51284 1.39173\n 2017-04-04\t -0.73381 0.98149\n 2017-04-05\t -0.90132\t 1.13981\n\nsectors : pd.DataFrame\n Daily Morningstar sector code per asset\n - DataFrame with dates as index and equities as columns\n - Example:\n Equity(24 Equity(62\n [AAPL]) [ABT])\n 2017-04-03\t 311.0 206.0\n 2017-04-04\t 311.0 206.0\n 2017-04-05\t 311.0\t 206.0\n\ncaps : pd.DataFrame\n Daily market cap per asset\n - DataFrame with dates as index and equities as columns\n - Example:\n Equity(24 Equity(62\n [AAPL]) [ABT])\n 2017-04-03 1.327160e+10 6.402460e+10\n 2017-04-04\t 1.329620e+10 6.403694e+10\n 2017-04-05\t 1.297464e+10\t 6.397187e+10\n\nshares_held : pd.DataFrame\n Daily number of shares held by an algorithm.\n - Example:\n Equity(24 Equity(62\n [AAPL]) [ABT])\n 2017-04-03 1915 -2595\n 2017-04-04\t 1968 -3272\n 2017-04-05\t 2104 -3917\n\nvolumes : pd.DataFrame\n Daily volume per asset\n - DataFrame with dates as index and equities as columns\n - Example:\n Equity(24 Equity(62\n [AAPL]) [ABT])\n 2017-04-03 34940859.00 4665573.80\n 2017-04-04\t 35603329.10 4818463.90\n 2017-04-05\t 41846731.75\t 4129153.10\n\npercentile : float\n Percentile to use when computing and plotting volume exposures.\n - Defaults to 10th percentile", "id": "f12208:m10"} {"signature": "@plotting.customizedef create_perf_attrib_tear_sheet(returns,positions,factor_returns,factor_loadings,transactions=None,pos_in_dollars=True,return_fig=False,factor_partitions=FACTOR_PARTITIONS):", "body": "portfolio_exposures, perf_attrib_data = perf_attrib.perf_attrib(returns, positions, factor_returns, factor_loadings, transactions,pos_in_dollars=pos_in_dollars)display(Markdown(\"\"))perf_attrib.show_perf_attrib_stats(returns, positions, factor_returns,factor_loadings, transactions,pos_in_dollars)vertical_sections = + * max(len(factor_partitions), )current_section = fig = plt.figure(figsize=[, vertical_sections * ])gs = gridspec.GridSpec(vertical_sections, ,wspace=, hspace=)perf_attrib.plot_returns(perf_attrib_data,ax=plt.subplot(gs[current_section]))current_section += if factor_partitions is not None:for factor_type, partitions in factor_partitions.iteritems():columns_to_select = perf_attrib_data.columns.intersection(partitions)perf_attrib.plot_factor_contribution_to_perf(perf_attrib_data[columns_to_select],ax=plt.subplot(gs[current_section]),title=('').format(factor_type))current_section += for factor_type, partitions in factor_partitions.iteritems():perf_attrib.plot_risk_exposures(portfolio_exposures[portfolio_exposures.columns.intersection(partitions)],ax=plt.subplot(gs[current_section]),title=''.format(factor_type))current_section += else:perf_attrib.plot_factor_contribution_to_perf(perf_attrib_data,ax=plt.subplot(gs[current_section]))current_section += perf_attrib.plot_risk_exposures(portfolio_exposures,ax=plt.subplot(gs[current_section]))gs.tight_layout(fig)if return_fig:return fig", "docstring": "Generate plots and tables for analyzing a strategy's performance.\n\nParameters\n----------\nreturns : pd.Series\n Returns for each day in the date range.\n\npositions: pd.DataFrame\n Daily holdings (in dollars or percentages), indexed by date.\n Will be converted to percentages if positions are in dollars.\n Short positions show up as cash in the 'cash' column.\n\nfactor_returns : pd.DataFrame\n Returns by factor, with date as index and factors as columns\n\nfactor_loadings : pd.DataFrame\n Factor loadings for all days in the date range, with date\n and ticker as index, and factors as columns.\n\ntransactions : pd.DataFrame, optional\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n - Default is None.\n\npos_in_dollars : boolean, optional\n Flag indicating whether `positions` are in dollars or percentages\n If True, positions are in dollars.\n\nreturn_fig : boolean, optional\n If True, returns the figure that was plotted on.\n\nfactor_partitions : dict\n dict specifying how factors should be separated in factor returns\n and risk exposures plots\n - Example:\n {'style': ['momentum', 'size', 'value', ...],\n 'sector': ['technology', 'materials', ... ]}", "id": "f12208:m11"} {"signature": "def daily_txns_with_bar_data(transactions, market_data):", "body": "transactions.index.name = ''txn_daily = pd.DataFrame(transactions.assign(amount=abs(transactions.amount)).groupby(['', pd.TimeGrouper('')]).sum()[''])txn_daily[''] = market_data[''].unstack()txn_daily[''] = market_data[''].unstack()txn_daily = txn_daily.reset_index().set_index('')return txn_daily", "docstring": "Sums the absolute value of shares traded in each name on each day.\nAdds columns containing the closing price and total daily volume for\neach day-ticker combination.\n\nParameters\n----------\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet\nmarket_data : pd.Panel\n Contains \"volume\" and \"price\" DataFrames for the tickers\n in the passed positions DataFrames\n\nReturns\n-------\ntxn_daily : pd.DataFrame\n Daily totals for transacted shares in each traded name.\n price and volume columns for close price and daily volume for\n the corresponding ticker, respectively.", "id": "f12209:m0"} {"signature": "def days_to_liquidate_positions(positions, market_data,max_bar_consumption=,capital_base=,mean_volume_window=):", "body": "DV = market_data[''] * market_data['']roll_mean_dv = DV.rolling(window=mean_volume_window,center=False).mean().shift()roll_mean_dv = roll_mean_dv.replace(, np.nan)positions_alloc = pos.get_percent_alloc(positions)positions_alloc = positions_alloc.drop('', axis=)days_to_liquidate = (positions_alloc * capital_base) /(max_bar_consumption * roll_mean_dv)return days_to_liquidate.iloc[mean_volume_window:]", "docstring": "Compute the number of days that would have been required\nto fully liquidate each position on each day based on the\ntrailing n day mean daily bar volume and a limit on the proportion\nof a daily bar that we are allowed to consume.\n\nThis analysis uses portfolio allocations and a provided capital base\nrather than the dollar values in the positions DataFrame to remove the\neffect of compounding on days to liquidate. In other words, this function\nassumes that the net liquidation portfolio value will always remain\nconstant at capital_base.\n\nParameters\n----------\npositions: pd.DataFrame\n Contains daily position values including cash\n - See full explanation in tears.create_full_tear_sheet\nmarket_data : pd.Panel\n Panel with items axis of 'price' and 'volume' DataFrames.\n The major and minor axes should match those of the\n the passed positions DataFrame (same dates and symbols).\nmax_bar_consumption : float\n Max proportion of a daily bar that can be consumed in the\n process of liquidating a position.\ncapital_base : integer\n Capital base multiplied by portfolio allocation to compute\n position value that needs liquidating.\nmean_volume_window : float\n Trailing window to use in mean volume calculation.\n\nReturns\n-------\ndays_to_liquidate : pd.DataFrame\n Number of days required to fully liquidate daily positions.\n Datetime index, symbols as columns.", "id": "f12209:m1"} {"signature": "def get_max_days_to_liquidate_by_ticker(positions, market_data,max_bar_consumption=,capital_base=,mean_volume_window=,last_n_days=None):", "body": "dtlp = days_to_liquidate_positions(positions, market_data,max_bar_consumption=max_bar_consumption,capital_base=capital_base,mean_volume_window=mean_volume_window)if last_n_days is not None:dtlp = dtlp.loc[dtlp.index.max() - pd.Timedelta(days=last_n_days):]pos_alloc = pos.get_percent_alloc(positions)pos_alloc = pos_alloc.drop('', axis=)liq_desc = pd.DataFrame()liq_desc[''] = dtlp.unstack()liq_desc[''] = pos_alloc.unstack() * liq_desc.index.levels[].name = ''liq_desc.index.levels[].name = ''worst_liq = liq_desc.reset_index().sort_values('', ascending=False).groupby('').first()return worst_liq", "docstring": "Finds the longest estimated liquidation time for each traded\nname over the course of backtest (or last n days of the backtest).\n\nParameters\n----------\npositions: pd.DataFrame\n Contains daily position values including cash\n - See full explanation in tears.create_full_tear_sheet\nmarket_data : pd.Panel\n Panel with items axis of 'price' and 'volume' DataFrames.\n The major and minor axes should match those of the\n the passed positions DataFrame (same dates and symbols).\nmax_bar_consumption : float\n Max proportion of a daily bar that can be consumed in the\n process of liquidating a position.\ncapital_base : integer\n Capital base multiplied by portfolio allocation to compute\n position value that needs liquidating.\nmean_volume_window : float\n Trailing window to use in mean volume calculation.\nlast_n_days : integer\n Compute for only the last n days of the passed backtest data.\n\nReturns\n-------\ndays_to_liquidate : pd.DataFrame\n Max Number of days required to fully liquidate each traded name.\n Index of symbols. Columns for days_to_liquidate and the corresponding\n date and position_alloc on that day.", "id": "f12209:m2"} {"signature": "def get_low_liquidity_transactions(transactions, market_data,last_n_days=None):", "body": "txn_daily_w_bar = daily_txns_with_bar_data(transactions, market_data)txn_daily_w_bar.index.name = ''txn_daily_w_bar = txn_daily_w_bar.reset_index()if last_n_days is not None:md = txn_daily_w_bar.date.max() - pd.Timedelta(days=last_n_days)txn_daily_w_bar = txn_daily_w_bar[txn_daily_w_bar.date > md]bar_consumption = txn_daily_w_bar.assign(max_pct_bar_consumed=(txn_daily_w_bar.amount/txn_daily_w_bar.volume)*).sort_values('', ascending=False)max_bar_consumption = bar_consumption.groupby('').first()return max_bar_consumption[['', '']]", "docstring": "For each traded name, find the daily transaction total that consumed\nthe greatest proportion of available daily bar volume.\n\nParameters\n----------\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\nmarket_data : pd.Panel\n Panel with items axis of 'price' and 'volume' DataFrames.\n The major and minor axes should match those of the\n the passed positions DataFrame (same dates and symbols).\nlast_n_days : integer\n Compute for only the last n days of the passed backtest data.", "id": "f12209:m3"} {"signature": "def customize(func):", "body": "@wraps(func)def call_w_context(*args, **kwargs):set_context = kwargs.pop('', True)if set_context:with plotting_context(), axes_style():return func(*args, **kwargs)else:return func(*args, **kwargs)return call_w_context", "docstring": "Decorator to set plotting context and axes style during function call.", "id": "f12210:m0"} {"signature": "def plotting_context(context='', font_scale=, rc=None):", "body": "if rc is None:rc = {}rc_default = {'': }for name, val in rc_default.items():rc.setdefault(name, val)return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)", "docstring": "Create pyfolio default plotting style context.\n\nUnder the hood, calls and returns seaborn.plotting_context() with\nsome custom settings. Usually you would use in a with-context.\n\nParameters\n----------\ncontext : str, optional\n Name of seaborn context.\nfont_scale : float, optional\n Scale font by factor font_scale.\nrc : dict, optional\n Config flags.\n By default, {'lines.linewidth': 1.5}\n is being used and will be added to any\n rc passed in, unless explicitly overriden.\n\nReturns\n-------\nseaborn plotting context\n\nExample\n-------\n>>> with pyfolio.plotting.plotting_context(font_scale=2):\n>>> pyfolio.create_full_tear_sheet(..., set_context=False)\n\nSee also\n--------\nFor more information, see seaborn.plotting_context().", "id": "f12210:m1"} {"signature": "def axes_style(style='', rc=None):", "body": "if rc is None:rc = {}rc_default = {}for name, val in rc_default.items():rc.setdefault(name, val)return sns.axes_style(style=style, rc=rc)", "docstring": "Create pyfolio default axes style context.\n\nUnder the hood, calls and returns seaborn.axes_style() with\nsome custom settings. Usually you would use in a with-context.\n\nParameters\n----------\nstyle : str, optional\n Name of seaborn style.\nrc : dict, optional\n Config flags.\n\nReturns\n-------\nseaborn plotting context\n\nExample\n-------\n>>> with pyfolio.plotting.axes_style(style='whitegrid'):\n>>> pyfolio.create_full_tear_sheet(..., set_context=False)\n\nSee also\n--------\nFor more information, see seaborn.plotting_context().", "id": "f12210:m2"} {"signature": "def plot_monthly_returns_heatmap(returns, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()monthly_ret_table = ep.aggregate_returns(returns, '')monthly_ret_table = monthly_ret_table.unstack().round()sns.heatmap(monthly_ret_table.fillna() *,annot=True,annot_kws={\"\": },alpha=,center=,cbar=False,cmap=matplotlib.cm.RdYlGn,ax=ax, **kwargs)ax.set_ylabel('')ax.set_xlabel('')ax.set_title(\"\")return ax", "docstring": "Plots a heatmap of returns by month.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to seaborn plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m3"} {"signature": "def plot_annual_returns(returns, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()x_axis_formatter = FuncFormatter(utils.percentage)ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))ax.tick_params(axis='', which='')ann_ret_df = pd.DataFrame(ep.aggregate_returns(returns,''))ax.axvline( *ann_ret_df.values.mean(),color='',linestyle='',lw=,alpha=)( * ann_ret_df.sort_index(ascending=False)).plot(ax=ax, kind='', alpha=, **kwargs)ax.axvline(, color='', linestyle='', lw=)ax.set_ylabel('')ax.set_xlabel('')ax.set_title(\"\")ax.legend([''], frameon=True, framealpha=)return ax", "docstring": "Plots a bar graph of returns by year.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m4"} {"signature": "def plot_monthly_returns_dist(returns, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()x_axis_formatter = FuncFormatter(utils.percentage)ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))ax.tick_params(axis='', which='')monthly_ret_table = ep.aggregate_returns(returns, '')ax.hist( * monthly_ret_table,color='',alpha=,bins=,**kwargs)ax.axvline( * monthly_ret_table.mean(),color='',linestyle='',lw=,alpha=)ax.axvline(, color='', linestyle='', lw=, alpha=)ax.legend([''], frameon=True, framealpha=)ax.set_ylabel('')ax.set_xlabel('')ax.set_title(\"\")return ax", "docstring": "Plots a distribution of monthly returns.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m5"} {"signature": "def plot_holdings(returns, positions, legend_loc='', ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()positions = positions.copy().drop('', axis='')df_holdings = positions.replace(, np.nan).count(axis=)df_holdings_by_month = df_holdings.resample('').mean()df_holdings.plot(color='', alpha=, lw=, ax=ax, **kwargs)df_holdings_by_month.plot(color='',lw=,ax=ax,**kwargs)ax.axhline(df_holdings.values.mean(),color='',ls='',lw=)ax.set_xlim((returns.index[], returns.index[-]))leg = ax.legend(['','',''],loc=legend_loc, frameon=True,framealpha=)leg.get_frame().set_edgecolor('')ax.set_title('')ax.set_ylabel('')ax.set_xlabel('')return ax", "docstring": "Plots total amount of stocks with an active position, either short\nor long. Displays daily total, daily average per month, and\nall-time daily average.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\npositions : pd.DataFrame, optional\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\nlegend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m6"} {"signature": "def plot_long_short_holdings(returns, positions,legend_loc='', ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()positions = positions.drop('', axis='')positions = positions.replace(, np.nan)df_longs = positions[positions > ].count(axis=)df_shorts = positions[positions < ].count(axis=)lf = ax.fill_between(df_longs.index, , df_longs.values,color='', alpha=, lw=)sf = ax.fill_between(df_shorts.index, , df_shorts.values,color='', alpha=, lw=)bf = patches.Rectangle([, ], , , color='')leg = ax.legend([lf, sf, bf],['' % (df_longs.max(),df_longs.min()),'' % (df_shorts.max(),df_shorts.min()),''], loc=legend_loc, frameon=True,framealpha=)leg.get_frame().set_edgecolor('')ax.set_xlim((returns.index[], returns.index[-]))ax.set_title('')ax.set_ylabel('')ax.set_xlabel('')return ax", "docstring": "Plots total amount of stocks with an active position, breaking out\nshort and long into transparent filled regions.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\npositions : pd.DataFrame, optional\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\nlegend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m7"} {"signature": "def plot_drawdown_periods(returns, top=, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()y_axis_formatter = FuncFormatter(utils.two_dec_places)ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))df_cum_rets = ep.cum_returns(returns, starting_value=)df_drawdowns = timeseries.gen_drawdown_table(returns, top=top)df_cum_rets.plot(ax=ax, **kwargs)lim = ax.get_ylim()colors = sns.cubehelix_palette(len(df_drawdowns))[::-]for i, (peak, recovery) in df_drawdowns[['', '']].iterrows():if pd.isnull(recovery):recovery = returns.index[-]ax.fill_between((peak, recovery),lim[],lim[],alpha=,color=colors[i])ax.set_ylim(lim)ax.set_title('' % top)ax.set_ylabel('')ax.legend([''], loc='',frameon=True, framealpha=)ax.set_xlabel('')return ax", "docstring": "Plots cumulative returns highlighting top drawdown periods.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\ntop : int, optional\n Amount of top drawdowns periods to plot (default 10).\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m8"} {"signature": "def plot_drawdown_underwater(returns, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()y_axis_formatter = FuncFormatter(utils.percentage)ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))df_cum_rets = ep.cum_returns(returns, starting_value=)running_max = np.maximum.accumulate(df_cum_rets)underwater = - * ((running_max - df_cum_rets) / running_max)(underwater).plot(ax=ax, kind='', color='', alpha=, **kwargs)ax.set_ylabel('')ax.set_title('')ax.set_xlabel('')return ax", "docstring": "Plots how far underwaterr returns are over time, or plots current\ndrawdown vs. date.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m9"} {"signature": "def plot_perf_stats(returns, factor_returns, ax=None):", "body": "if ax is None:ax = plt.gca()bootstrap_values = timeseries.perf_stats_bootstrap(returns,factor_returns,return_stats=False)bootstrap_values = bootstrap_values.drop('', axis='')sns.boxplot(data=bootstrap_values, orient='', ax=ax)return ax", "docstring": "Create box plot of some performance metrics of the strategy.\nThe width of the box whiskers is determined by a bootstrap.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m10"} {"signature": "def show_perf_stats(returns, factor_returns=None, positions=None,transactions=None, turnover_denom='',live_start_date=None, bootstrap=False,header_rows=None):", "body": "if bootstrap:perf_func = timeseries.perf_stats_bootstrapelse:perf_func = timeseries.perf_statsperf_stats_all = perf_func(returns,factor_returns=factor_returns,positions=positions,transactions=transactions,turnover_denom=turnover_denom)date_rows = OrderedDict()if len(returns.index) > :date_rows[''] = returns.index[].strftime('')date_rows[''] = returns.index[-].strftime('')if live_start_date is not None:live_start_date = ep.utils.get_utc_timestamp(live_start_date)returns_is = returns[returns.index < live_start_date]returns_oos = returns[returns.index >= live_start_date]positions_is = Nonepositions_oos = Nonetransactions_is = Nonetransactions_oos = Noneif positions is not None:positions_is = positions[positions.index < live_start_date]positions_oos = positions[positions.index >= live_start_date]if transactions is not None:transactions_is = transactions[(transactions.index <live_start_date)]transactions_oos = transactions[(transactions.index >live_start_date)]perf_stats_is = perf_func(returns_is,factor_returns=factor_returns,positions=positions_is,transactions=transactions_is,turnover_denom=turnover_denom)perf_stats_oos = perf_func(returns_oos,factor_returns=factor_returns,positions=positions_oos,transactions=transactions_oos,turnover_denom=turnover_denom)if len(returns.index) > :date_rows[''] = int(len(returns_is) /APPROX_BDAYS_PER_MONTH)date_rows[''] = int(len(returns_oos) /APPROX_BDAYS_PER_MONTH)perf_stats = pd.concat(OrderedDict([('', perf_stats_is),('', perf_stats_oos),('', perf_stats_all),]), axis=)else:if len(returns.index) > :date_rows[''] = int(len(returns) /APPROX_BDAYS_PER_MONTH)perf_stats = pd.DataFrame(perf_stats_all, columns=[''])for column in perf_stats.columns:for stat, value in perf_stats[column].iteritems():if stat in STAT_FUNCS_PCT:perf_stats.loc[stat, column] = str(np.round(value * ,)) + ''if header_rows is None:header_rows = date_rowselse:header_rows = OrderedDict(header_rows)header_rows.update(date_rows)utils.print_table(perf_stats,float_format=''.format,header_rows=header_rows,)", "docstring": "Prints some performance metrics of the strategy.\n\n- Shows amount of time the strategy has been run in backtest and\n out-of-sample (in live trading).\n\n- Shows Omega ratio, max drawdown, Calmar ratio, annual return,\n stability, Sharpe ratio, annual volatility, alpha, and beta.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\npositions : pd.DataFrame, optional\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame, optional\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet\nturnover_denom : str, optional\n Either AGB or portfolio_value, default AGB.\n - See full explanation in txn.get_turnover.\nlive_start_date : datetime, optional\n The point in time when the strategy began live trading, after\n its backtest period.\nbootstrap : boolean, optional\n Whether to perform bootstrap analysis for the performance\n metrics.\n - For more information, see timeseries.perf_stats_bootstrap\nheader_rows : dict or OrderedDict, optional\n Extra rows to display at the top of the displayed table.", "id": "f12210:m11"} {"signature": "def plot_returns(returns,live_start_date=None,ax=None):", "body": "if ax is None:ax = plt.gca()ax.set_label('')ax.set_ylabel('')if live_start_date is not None:live_start_date = ep.utils.get_utc_timestamp(live_start_date)is_returns = returns.loc[returns.index < live_start_date]oos_returns = returns.loc[returns.index >= live_start_date]is_returns.plot(ax=ax, color='')oos_returns.plot(ax=ax, color='')else:returns.plot(ax=ax, color='')return ax", "docstring": "Plots raw returns over time.\n\nBacktest returns are in green, and out-of-sample (live trading)\nreturns are in red.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nlive_start_date : datetime, optional\n The date when the strategy began live trading, after\n its backtest period. This date should be normalized.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m12"} {"signature": "def plot_rolling_returns(returns,factor_returns=None,live_start_date=None,logy=False,cone_std=None,legend_loc='',volatility_match=False,cone_function=timeseries.forecast_cone_bootstrap,ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()ax.set_xlabel('')ax.set_ylabel('')ax.set_yscale('' if logy else '')if volatility_match and factor_returns is None:raise ValueError('''')elif volatility_match and factor_returns is not None:bmark_vol = factor_returns.loc[returns.index].std()returns = (returns / returns.std()) * bmark_volcum_rets = ep.cum_returns(returns, )y_axis_formatter = FuncFormatter(utils.two_dec_places)ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))if factor_returns is not None:cum_factor_returns = ep.cum_returns(factor_returns[cum_rets.index], )cum_factor_returns.plot(lw=, color='',label=factor_returns.name, alpha=,ax=ax, **kwargs)if live_start_date is not None:live_start_date = ep.utils.get_utc_timestamp(live_start_date)is_cum_returns = cum_rets.loc[cum_rets.index < live_start_date]oos_cum_returns = cum_rets.loc[cum_rets.index >= live_start_date]else:is_cum_returns = cum_retsoos_cum_returns = pd.Series([])is_cum_returns.plot(lw=, color='', alpha=,label='', ax=ax, **kwargs)if len(oos_cum_returns) > :oos_cum_returns.plot(lw=, color='', alpha=,label='', ax=ax, **kwargs)if cone_std is not None:if isinstance(cone_std, (float, int)):cone_std = [cone_std]is_returns = returns.loc[returns.index < live_start_date]cone_bounds = cone_function(is_returns,len(oos_cum_returns),cone_std=cone_std,starting_value=is_cum_returns[-])cone_bounds = cone_bounds.set_index(oos_cum_returns.index)for std in cone_std:ax.fill_between(cone_bounds.index,cone_bounds[float(std)],cone_bounds[float(-std)],color='', alpha=)if legend_loc is not None:ax.legend(loc=legend_loc, frameon=True, framealpha=)ax.axhline(, linestyle='', color='', lw=)return ax", "docstring": "Plots cumulative rolling returns versus some benchmarks'.\n\nBacktest returns are in green, and out-of-sample (live trading)\nreturns are in red.\n\nAdditionally, a non-parametric cone plot may be added to the\nout-of-sample returns region.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\nlive_start_date : datetime, optional\n The date when the strategy began live trading, after\n its backtest period. This date should be normalized.\nlogy : bool, optional\n Whether to log-scale the y-axis.\ncone_std : float, or tuple, optional\n If float, The standard deviation to use for the cone plots.\n If tuple, Tuple of standard deviation values to use for the cone plots\n - See timeseries.forecast_cone_bounds for more details.\nlegend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\nvolatility_match : bool, optional\n Whether to normalize the volatility of the returns to those of the\n benchmark returns. This helps compare strategies with different\n volatilities. Requires passing of benchmark_rets.\ncone_function : function, optional\n Function to use when generating forecast probability cone.\n The function signiture must follow the form:\n def cone(in_sample_returns (pd.Series),\n days_to_project_forward (int),\n cone_std= (float, or tuple),\n starting_value= (int, or float))\n See timeseries.forecast_cone_bootstrap for an example.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m13"} {"signature": "def plot_rolling_beta(returns, factor_returns, legend_loc='',ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()y_axis_formatter = FuncFormatter(utils.two_dec_places)ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))ax.set_title(\"\" + str(factor_returns.name))ax.set_ylabel('')rb_1 = timeseries.rolling_beta(returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * )rb_1.plot(color='', lw=, alpha=, ax=ax, **kwargs)rb_2 = timeseries.rolling_beta(returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * )rb_2.plot(color='', lw=, alpha=, ax=ax, **kwargs)ax.axhline(rb_1.mean(), color='', linestyle='', lw=)ax.axhline(, color='', linestyle='', lw=)ax.set_xlabel('')ax.legend(['',''],loc=legend_loc, frameon=True, framealpha=)ax.set_ylim((-, ))return ax", "docstring": "Plots the rolling 6-month and 12-month beta versus date.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\nlegend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m14"} {"signature": "def plot_rolling_volatility(returns, factor_returns=None,rolling_window=APPROX_BDAYS_PER_MONTH * ,legend_loc='', ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()y_axis_formatter = FuncFormatter(utils.two_dec_places)ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))rolling_vol_ts = timeseries.rolling_volatility(returns, rolling_window)rolling_vol_ts.plot(alpha=, lw=, color='', ax=ax,**kwargs)if factor_returns is not None:rolling_vol_ts_factor = timeseries.rolling_volatility(factor_returns, rolling_window)rolling_vol_ts_factor.plot(alpha=, lw=, color='', ax=ax,**kwargs)ax.set_title('')ax.axhline(rolling_vol_ts.mean(),color='',linestyle='',lw=)ax.axhline(, color='', linestyle='', lw=)ax.set_ylabel('')ax.set_xlabel('')if factor_returns is None:ax.legend(['', ''],loc=legend_loc, frameon=True, framealpha=)else:ax.legend(['', '', ''],loc=legend_loc, frameon=True, framealpha=)return ax", "docstring": "Plots the rolling volatility versus date.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\nrolling_window : int, optional\n The days window over which to compute the volatility.\nlegend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m15"} {"signature": "def plot_rolling_sharpe(returns, factor_returns=None,rolling_window=APPROX_BDAYS_PER_MONTH * ,legend_loc='', ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()y_axis_formatter = FuncFormatter(utils.two_dec_places)ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))rolling_sharpe_ts = timeseries.rolling_sharpe(returns, rolling_window)rolling_sharpe_ts.plot(alpha=, lw=, color='', ax=ax,**kwargs)if factor_returns is not None:rolling_sharpe_ts_factor = timeseries.rolling_sharpe(factor_returns, rolling_window)rolling_sharpe_ts_factor.plot(alpha=, lw=, color='', ax=ax,**kwargs)ax.set_title('')ax.axhline(rolling_sharpe_ts.mean(),color='',linestyle='',lw=)ax.axhline(, color='', linestyle='', lw=)ax.set_ylabel('')ax.set_xlabel('')if factor_returns is None:ax.legend(['', ''],loc=legend_loc, frameon=True, framealpha=)else:ax.legend(['', '', ''],loc=legend_loc, frameon=True, framealpha=)return ax", "docstring": "Plots the rolling Sharpe ratio versus date.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor for\n which the benchmark rolling Sharpe is computed. Usually\n a benchmark such as market returns.\n - This is in the same style as returns.\nrolling_window : int, optional\n The days window over which to compute the sharpe ratio.\nlegend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m16"} {"signature": "def plot_gross_leverage(returns, positions, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()gl = timeseries.gross_lev(positions)gl.plot(lw=, color='', legend=False, ax=ax, **kwargs)ax.axhline(gl.mean(), color='', linestyle='', lw=)ax.set_title('')ax.set_ylabel('')ax.set_xlabel('')return ax", "docstring": "Plots gross leverage versus date.\n\nGross leverage is the sum of long and short exposure per share\ndivided by net asset value.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m17"} {"signature": "def plot_exposures(returns, positions, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()pos_no_cash = positions.drop('', axis=)l_exp = pos_no_cash[pos_no_cash > ].sum(axis=) / positions.sum(axis=)s_exp = pos_no_cash[pos_no_cash < ].sum(axis=) / positions.sum(axis=)net_exp = pos_no_cash.sum(axis=) / positions.sum(axis=)ax.fill_between(l_exp.index,,l_exp.values,label='', color='', alpha=)ax.fill_between(s_exp.index,,s_exp.values,label='', color='', alpha=)ax.plot(net_exp.index, net_exp.values,label='', color='', linestyle='')ax.set_xlim((returns.index[], returns.index[-]))ax.set_title(\"\")ax.set_ylabel('')ax.legend(loc='', frameon=True, framealpha=)ax.set_xlabel('')return ax", "docstring": "Plots a cake chart of the long and short exposure.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\npositions_alloc : pd.DataFrame\n Portfolio allocation of positions. See\n pos.get_percent_alloc.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m18"} {"signature": "def show_and_plot_top_positions(returns, positions_alloc,show_and_plot=, hide_positions=False,legend_loc='', ax=None,**kwargs):", "body": "positions_alloc = positions_alloc.copy()positions_alloc.columns = positions_alloc.columns.map(utils.format_asset)df_top_long, df_top_short, df_top_abs = pos.get_top_long_short_abs(positions_alloc)if show_and_plot == or show_and_plot == :utils.print_table(pd.DataFrame(df_top_long * , columns=['']),float_format=''.format,name='')utils.print_table(pd.DataFrame(df_top_short * , columns=['']),float_format=''.format,name='')utils.print_table(pd.DataFrame(df_top_abs * , columns=['']),float_format=''.format,name='')if show_and_plot == or show_and_plot == :if ax is None:ax = plt.gca()positions_alloc[df_top_abs.index].plot(title='',alpha=, ax=ax, **kwargs)if legend_loc == '':box = ax.get_position()ax.set_position([box.x0, box.y0 + box.height * ,box.width, box.height * ])ax.legend(loc='', frameon=True, framealpha=,bbox_to_anchor=(, -), ncol=)else:ax.legend(loc=legend_loc)ax.set_xlim((returns.index[], returns.index[-]))ax.set_ylabel('')if hide_positions:ax.legend_.remove()return ax", "docstring": "Prints and/or plots the exposures of the top 10 held positions of\nall time.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\npositions_alloc : pd.DataFrame\n Portfolio allocation of positions. See pos.get_percent_alloc.\nshow_and_plot : int, optional\n By default, this is 2, and both prints and plots.\n If this is 0, it will only plot; if 1, it will only print.\nhide_positions : bool, optional\n If True, will not output any symbol names.\nlegend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\n By default, the legend will display below the plot.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes, conditional\n The axes that were plotted on.", "id": "f12210:m19"} {"signature": "def plot_max_median_position_concentration(positions, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()alloc_summary = pos.get_max_median_position_concentration(positions)colors = ['', '', '', '']alloc_summary.plot(linewidth=, color=colors, alpha=, ax=ax)ax.legend(loc='', frameon=True, framealpha=)ax.set_ylabel('')ax.set_title('')return ax", "docstring": "Plots the max and median of long and short position concentrations\nover the time.\n\nParameters\n----------\npositions : pd.DataFrame\n The positions that the strategy takes over time.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m20"} {"signature": "def plot_sector_allocations(returns, sector_alloc, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()sector_alloc.plot(title='',alpha=, ax=ax, **kwargs)box = ax.get_position()ax.set_position([box.x0, box.y0 + box.height * ,box.width, box.height * ])ax.legend(loc='', frameon=True, framealpha=,bbox_to_anchor=(, -), ncol=)ax.set_xlim((sector_alloc.index[], sector_alloc.index[-]))ax.set_ylabel('')ax.set_xlabel('')return ax", "docstring": "Plots the sector exposures of the portfolio over time.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nsector_alloc : pd.DataFrame\n Portfolio allocation of positions. See pos.get_sector_alloc.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m21"} {"signature": "def plot_return_quantiles(returns, live_start_date=None, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()is_returns = returns if live_start_date is Noneelse returns.loc[returns.index < live_start_date]is_weekly = ep.aggregate_returns(is_returns, '')is_monthly = ep.aggregate_returns(is_returns, '')sns.boxplot(data=[is_returns, is_weekly, is_monthly],palette=[\"\", \"\", \"\"],ax=ax, **kwargs)if live_start_date is not None:oos_returns = returns.loc[returns.index >= live_start_date]oos_weekly = ep.aggregate_returns(oos_returns, '')oos_monthly = ep.aggregate_returns(oos_returns, '')sns.swarmplot(data=[oos_returns, oos_weekly, oos_monthly], ax=ax,color=\"\",marker=\"\", **kwargs)red_dots = matplotlib.lines.Line2D([], [], color=\"\", marker=\"\",label=\"\",linestyle='')ax.legend(handles=[red_dots], frameon=True, framealpha=)ax.set_xticklabels(['', '', ''])ax.set_title('')return ax", "docstring": "Creates a box plot of daily, weekly, and monthly return\ndistributions.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nlive_start_date : datetime, optional\n The point in time when the strategy began live trading, after\n its backtest period.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to seaborn plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m22"} {"signature": "def plot_turnover(returns, transactions, positions,legend_loc='', ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()y_axis_formatter = FuncFormatter(utils.two_dec_places)ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))df_turnover = txn.get_turnover(positions, transactions)df_turnover_by_month = df_turnover.resample(\"\").mean()df_turnover.plot(color='', alpha=, lw=, ax=ax, **kwargs)df_turnover_by_month.plot(color='',alpha=,lw=,ax=ax,**kwargs)ax.axhline(df_turnover.mean(), color='', linestyle='', lw=, alpha=)ax.legend(['','',''],loc=legend_loc, frameon=True, framealpha=)ax.set_title('')ax.set_xlim((returns.index[], returns.index[-]))ax.set_ylim((, ))ax.set_ylabel('')ax.set_xlabel('')return ax", "docstring": "Plots turnover vs. date.\n\nTurnover is the number of shares traded for a period as a fraction\nof total shares.\n\nDisplays daily total, daily average per month, and all-time daily\naverage.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\nlegend_loc : matplotlib.loc, optional\n The location of the legend on the plot.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m23"} {"signature": "def plot_slippage_sweep(returns, positions, transactions,slippage_params=(, , , , , , ),ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()slippage_sweep = pd.DataFrame()for bps in slippage_params:adj_returns = txn.adjust_returns_for_slippage(returns, positions,transactions, bps)label = str(bps) + \"\"slippage_sweep[label] = ep.cum_returns(adj_returns, )slippage_sweep.plot(alpha=, lw=, ax=ax)ax.set_title('')ax.set_ylabel('')ax.legend(loc='', frameon=True, framealpha=)return ax", "docstring": "Plots equity curves at different per-dollar slippage assumptions.\n\nParameters\n----------\nreturns : pd.Series\n Timeseries of portfolio returns to be adjusted for various\n degrees of slippage.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\nslippage_params: tuple\n Slippage pameters to apply to the return time series (in\n basis points).\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to seaborn plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m24"} {"signature": "def plot_slippage_sensitivity(returns, positions, transactions,ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()avg_returns_given_slippage = pd.Series()for bps in range(, ):adj_returns = txn.adjust_returns_for_slippage(returns, positions,transactions, bps)avg_returns = ep.annual_return(adj_returns)avg_returns_given_slippage.loc[bps] = avg_returnsavg_returns_given_slippage.plot(alpha=, lw=, ax=ax)ax.set_title('')ax.set_xticks(np.arange(, , ))ax.set_ylabel('')ax.set_xlabel('')return ax", "docstring": "Plots curve relating per-dollar slippage to average annual returns.\n\nParameters\n----------\nreturns : pd.Series\n Timeseries of portfolio returns to be adjusted for various\n degrees of slippage.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to seaborn plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m25"} {"signature": "def plot_daily_turnover_hist(transactions, positions,ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()turnover = txn.get_turnover(positions, transactions)sns.distplot(turnover, ax=ax, **kwargs)ax.set_title('')ax.set_xlabel('')return ax", "docstring": "Plots a histogram of daily turnover rates.\n\nParameters\n----------\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to seaborn plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m27"} {"signature": "def plot_daily_volume(returns, transactions, ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()daily_txn = txn.get_txn_vol(transactions)daily_txn.txn_shares.plot(alpha=, lw=, ax=ax, **kwargs)ax.axhline(daily_txn.txn_shares.mean(), color='',linestyle='', lw=, alpha=)ax.set_title('')ax.set_xlim((returns.index[], returns.index[-]))ax.set_ylabel('')ax.set_xlabel('')return ax", "docstring": "Plots trading volume per day vs. date.\n\nAlso displays all-time daily average.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m28"} {"signature": "def plot_txn_time_hist(transactions, bin_minutes=, tz='',ax=None, **kwargs):", "body": "if ax is None:ax = plt.gca()txn_time = transactions.copy()txn_time.index = txn_time.index.tz_convert(pytz.timezone(tz))txn_time.index = txn_time.index.map(lambda x: x.hour * + x.minute)txn_time[''] = (txn_time.amount * txn_time.price).abs()txn_time = txn_time.groupby(level=).sum().reindex(index=range(, ))txn_time.index = (txn_time.index / bin_minutes).astype(int) * bin_minutestxn_time = txn_time.groupby(level=).sum()txn_time[''] = txn_time.index.map(lambda x:str(datetime.time(int(x / ),x % ))[:-])trade_value_sum = txn_time.trade_value.sum()txn_time.trade_value = txn_time.trade_value.fillna() / trade_value_sumax.bar(txn_time.index, txn_time.trade_value, width=bin_minutes, **kwargs)ax.set_xlim(, )ax.set_xticks(txn_time.index[::int( / bin_minutes)])ax.set_xticklabels(txn_time.time_str[::int( / bin_minutes)])ax.set_title('')ax.set_ylabel('')ax.set_xlabel('')return ax", "docstring": "Plots a histogram of transaction times, binning the times into\nbuckets of a given duration.\n\nParameters\n----------\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\nbin_minutes : float, optional\n Sizes of the bins in minutes, defaults to 5 minutes.\ntz : str, optional\n Time zone to plot against. Note that if the specified\n zone does not apply daylight savings, the distribution\n may be partially offset.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m29"} {"signature": "def show_worst_drawdown_periods(returns, top=):", "body": "drawdown_df = timeseries.gen_drawdown_table(returns, top=top)utils.print_table(drawdown_df.sort_values('', ascending=False),name='',float_format=''.format,)", "docstring": "Prints information about the worst drawdown periods.\n\nPrints peak dates, valley dates, recovery dates, and net\ndrawdowns.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\ntop : int, optional\n Amount of top drawdowns periods to plot (default 5).", "id": "f12210:m30"} {"signature": "def plot_monthly_returns_timeseries(returns, ax=None, **kwargs):", "body": "def cumulate_returns(x):return ep.cum_returns(x)[-]if ax is None:ax = plt.gca()monthly_rets = returns.resample('').apply(lambda x: cumulate_returns(x))monthly_rets = monthly_rets.to_period()sns.barplot(x=monthly_rets.index,y=monthly_rets.values,color='')locs, labels = plt.xticks()plt.setp(labels, rotation=)xticks_coord = []xticks_label = []count = for i in monthly_rets.index:if i.month == :xticks_label.append(i)xticks_coord.append(count)ax.axvline(count, color='', ls='', alpha=)count += ax.axhline(, color='', ls='')ax.set_xticks(xticks_coord)ax.set_xticklabels(xticks_label)return ax", "docstring": "Plots monthly returns as a timeseries.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n**kwargs, optional\n Passed to seaborn plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m31"} {"signature": "def plot_round_trip_lifetimes(round_trips, disp_amount=, lsize=, ax=None):", "body": "if ax is None:ax = plt.subplot()symbols_sample = round_trips.symbol.unique()np.random.seed()sample = np.random.choice(round_trips.symbol.unique(), replace=False,size=min(disp_amount, len(symbols_sample)))sample_round_trips = round_trips[round_trips.symbol.isin(sample)]symbol_idx = pd.Series(np.arange(len(sample)), index=sample)for symbol, sym_round_trips in sample_round_trips.groupby(''):for _, row in sym_round_trips.iterrows():c = '' if row.long else ''y_ix = symbol_idx[symbol] + ax.plot([row[''], row['']],[y_ix, y_ix], color=c,linewidth=lsize, solid_capstyle='')ax.set_yticks(range(disp_amount))ax.set_yticklabels([utils.format_asset(s) for s in sample])ax.set_ylim((-, min(len(sample), disp_amount) - ))blue = patches.Rectangle([, ], , , color='', label='')red = patches.Rectangle([, ], , , color='', label='')leg = ax.legend(handles=[blue, red], loc='',frameon=True, framealpha=)leg.get_frame().set_edgecolor('')ax.grid(False)return ax", "docstring": "Plots timespans and directions of a sample of round trip trades.\n\nParameters\n----------\nround_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m32"} {"signature": "def show_profit_attribution(round_trips):", "body": "total_pnl = round_trips[''].sum()pnl_attribution = round_trips.groupby('')[''].sum() / total_pnlpnl_attribution.name = ''pnl_attribution.index = pnl_attribution.index.map(utils.format_asset)utils.print_table(pnl_attribution.sort_values(inplace=False,ascending=False,),name='',float_format=''.format,)", "docstring": "Prints the share of total PnL contributed by each\ntraded name.\n\nParameters\n----------\nround_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m33"} {"signature": "def plot_prob_profit_trade(round_trips, ax=None):", "body": "x = np.linspace(, , )round_trips[''] = round_trips.pnl > dist = sp.stats.beta(round_trips.profitable.sum(),(~round_trips.profitable).sum())y = dist.pdf(x)lower_perc = dist.ppf()upper_perc = dist.ppf()lower_plot = dist.ppf()upper_plot = dist.ppf()if ax is None:ax = plt.subplot()ax.plot(x, y)ax.axvline(lower_perc, color='')ax.axvline(upper_perc, color='')ax.set_xlabel('')ax.set_ylabel('')ax.set_xlim(lower_plot, upper_plot)ax.set_ylim((, y.max() + ))return ax", "docstring": "Plots a probability distribution for the event of making\na profitable trade.\n\nParameters\n----------\nround_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\nax : matplotlib.Axes, optional\n Axes upon which to plot.\n\nReturns\n-------\nax : matplotlib.Axes\n The axes that were plotted on.", "id": "f12210:m34"} {"signature": "def plot_cones(name, bounds, oos_returns, num_samples=, ax=None,cone_std=(, , ), random_seed=None, num_strikes=):", "body": "if ax is None:fig = figure.Figure(figsize=(, ))FigureCanvasAgg(fig)axes = fig.add_subplot()else:axes = axreturns = ep.cum_returns(oos_returns, starting_value=)bounds_tmp = bounds.copy()returns_tmp = returns.copy()cone_start = returns.index[]colors = [\"\", \"\", \"\", \"\"]for c in range(num_strikes + ):if c > :tmp = returns.loc[cone_start:]bounds_tmp = bounds_tmp.iloc[:len(tmp)]bounds_tmp = bounds_tmp.set_index(tmp.index)crossing = (tmp < bounds_tmp[float(-)].iloc[:len(tmp)])if crossing.sum() <= :breakcone_start = crossing.loc[crossing].index[]returns_tmp = returns.loc[cone_start:]bounds_tmp = (bounds - ( - returns.loc[cone_start]))for std in cone_std:x = returns_tmp.indexy1 = bounds_tmp[float(std)].iloc[:len(returns_tmp)]y2 = bounds_tmp[float(-std)].iloc[:len(returns_tmp)]axes.fill_between(x, y1, y2, color=colors[c], alpha=)label = ''.format((returns.iloc[-] - ) * )axes.plot(returns.index, returns.values, color='', lw=,label=label)if name is not None:axes.set_title(name)axes.axhline(, color='', alpha=)axes.legend(frameon=True, framealpha=)if ax is None:return figelse:return axes", "docstring": "Plots the upper and lower bounds of an n standard deviation\ncone of forecasted cumulative returns. Redraws a new cone when\ncumulative returns fall outside of last cone drawn.\n\nParameters\n----------\nname : str\n Account name to be used as figure title.\nbounds : pandas.core.frame.DataFrame\n Contains upper and lower cone boundaries. Column names are\n strings corresponding to the number of standard devations\n above (positive) or below (negative) the projected mean\n cumulative returns.\noos_returns : pandas.core.frame.DataFrame\n Non-cumulative out-of-sample returns.\nnum_samples : int\n Number of samples to draw from the in-sample daily returns.\n Each sample will be an array with length num_days.\n A higher number of samples will generate a more accurate\n bootstrap cone.\nax : matplotlib.Axes, optional\n Axes upon which to plot.\ncone_std : list of int/float\n Number of standard devations to use in the boundaries of\n the cone. If multiple values are passed, cone bounds will\n be generated for each value.\nrandom_seed : int\n Seed for the pseudorandom number generator used by the pandas\n sample method.\nnum_strikes : int\n Upper limit for number of cones drawn. Can be anything from 0 to 3.\n\nReturns\n-------\nReturns are either an ax or fig option, but not both. If a\nmatplotlib.Axes instance is passed in as ax, then it will be modified\nand returned. This allows for users to plot interactively in jupyter\nnotebook. When no ax object is passed in, a matplotlib.figure instance\nis generated and returned. This figure can then be used to save\nthe plot as an image without viewing it.\n\nax : matplotlib.Axes\n The axes that were plotted on.\nfig : matplotlib.figure\n The figure instance which contains all the plot elements.", "id": "f12210:m35"} {"signature": "def get_percent_alloc(values):", "body": "return values.divide(values.sum(axis=''),axis='')", "docstring": "Determines a portfolio's allocations.\n\nParameters\n----------\nvalues : pd.DataFrame\n Contains position values or amounts.\n\nReturns\n-------\nallocations : pd.DataFrame\n Positions and their allocations.", "id": "f12213:m0"} {"signature": "def get_top_long_short_abs(positions, top=):", "body": "positions = positions.drop('', axis='')df_max = positions.max()df_min = positions.min()df_abs_max = positions.abs().max()df_top_long = df_max[df_max > ].nlargest(top)df_top_short = df_min[df_min < ].nsmallest(top)df_top_abs = df_abs_max.nlargest(top)return df_top_long, df_top_short, df_top_abs", "docstring": "Finds the top long, short, and absolute positions.\n\nParameters\n----------\npositions : pd.DataFrame\n The positions that the strategy takes over time.\ntop : int, optional\n How many of each to find (default 10).\n\nReturns\n-------\ndf_top_long : pd.DataFrame\n Top long positions.\ndf_top_short : pd.DataFrame\n Top short positions.\ndf_top_abs : pd.DataFrame\n Top absolute positions.", "id": "f12213:m1"} {"signature": "def get_max_median_position_concentration(positions):", "body": "expos = get_percent_alloc(positions)expos = expos.drop('', axis=)longs = expos.where(expos.applymap(lambda x: x > ))shorts = expos.where(expos.applymap(lambda x: x < ))alloc_summary = pd.DataFrame()alloc_summary[''] = longs.max(axis=)alloc_summary[''] = longs.median(axis=)alloc_summary[''] = shorts.median(axis=)alloc_summary[''] = shorts.min(axis=)return alloc_summary", "docstring": "Finds the max and median long and short position concentrations\nin each time period specified by the index of positions.\n\nParameters\n----------\npositions : pd.DataFrame\n The positions that the strategy takes over time.\n\nReturns\n-------\npd.DataFrame\n Columns are max long, max short, median long, and median short\n position concentrations. Rows are timeperiods.", "id": "f12213:m2"} {"signature": "def extract_pos(positions, cash):", "body": "positions = positions.copy()positions[''] = positions.amount * positions.last_sale_pricecash.name = ''values = positions.reset_index().pivot_table(index='',columns='',values='')if ZIPLINE:for asset in values.columns:if type(asset) in [Equity, Future]:values[asset] = values[asset] * asset.price_multipliervalues = values.join(cash).fillna()values.columns.name = ''return values", "docstring": "Extract position values from backtest object as returned by\nget_backtest() on the Quantopian research platform.\n\nParameters\n----------\npositions : pd.DataFrame\n timeseries containing one row per symbol (and potentially\n duplicate datetime indices) and columns for amount and\n last_sale_price.\ncash : pd.Series\n timeseries containing cash in the portfolio.\n\nReturns\n-------\npd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.", "id": "f12213:m3"} {"signature": "def get_sector_exposures(positions, symbol_sector_map):", "body": "cash = positions['']positions = positions.drop('', axis=)unmapped_pos = np.setdiff1d(positions.columns.values,list(symbol_sector_map.keys()))if len(unmapped_pos) > :warn_message = \"\"\"\"\"\".format(\"\".join(map(str, unmapped_pos)))warnings.warn(warn_message, UserWarning)sector_exp = positions.groupby(by=symbol_sector_map, axis=).sum()sector_exp[''] = cashreturn sector_exp", "docstring": "Sum position exposures by sector.\n\nParameters\n----------\npositions : pd.DataFrame\n Contains position values or amounts.\n - Example\n index 'AAPL' 'MSFT' 'CHK' cash\n 2004-01-09 13939.380 -15012.993 -403.870 1477.483\n 2004-01-12 14492.630 -18624.870 142.630 3989.610\n 2004-01-13 -13853.280 13653.640 -100.980 100.000\nsymbol_sector_map : dict or pd.Series\n Security identifier to sector mapping.\n Security ids as keys/index, sectors as values.\n - Example:\n {'AAPL' : 'Technology'\n 'MSFT' : 'Technology'\n 'CHK' : 'Natural Resources'}\n\nReturns\n-------\nsector_exp : pd.DataFrame\n Sectors and their allocations.\n - Example:\n index 'Technology' 'Natural Resources' cash\n 2004-01-09 -1073.613 -403.870 1477.4830\n 2004-01-12 -4132.240 142.630 3989.6100\n 2004-01-13 -199.640 -100.980 100.0000", "id": "f12213:m4"} {"signature": "def get_long_short_pos(positions):", "body": "pos_wo_cash = positions.drop('', axis=)longs = pos_wo_cash[pos_wo_cash > ].sum(axis=).fillna()shorts = pos_wo_cash[pos_wo_cash < ].sum(axis=).fillna()cash = positions.cashnet_liquidation = longs + shorts + cashdf_pos = pd.DataFrame({'': longs.divide(net_liquidation, axis=''),'': shorts.divide(net_liquidation,axis='')})df_pos[''] = df_pos[''] + df_pos['']return df_pos", "docstring": "Determines the long and short allocations in a portfolio.\n\nParameters\n----------\npositions : pd.DataFrame\n The positions that the strategy takes over time.\n\nReturns\n-------\ndf_long_short : pd.DataFrame\n Long and short allocations as a decimal\n percentage of the total net liquidation", "id": "f12213:m5"} {"signature": "def model_returns_t_alpha_beta(data, bmark, samples=, progressbar=True):", "body": "data_bmark = pd.concat([data, bmark], axis=).dropna()with pm.Model() as model:sigma = pm.HalfCauchy('',beta=)nu = pm.Exponential('', / )X = data_bmark.iloc[:, ]y = data_bmark.iloc[:, ]alpha_reg = pm.Normal('', mu=, sd=)beta_reg = pm.Normal('', mu=, sd=)mu_reg = alpha_reg + beta_reg * Xpm.StudentT('',nu=nu + ,mu=mu_reg,sd=sigma,observed=y)trace = pm.sample(samples, progressbar=progressbar)return model, trace", "docstring": "Run Bayesian alpha-beta-model with T distributed returns.\n\nThis model estimates intercept (alpha) and slope (beta) of two\nreturn sets. Usually, these will be algorithm returns and\nbenchmark returns (e.g. S&P500). The data is assumed to be T\ndistributed and thus is robust to outliers and takes tail events\ninto account. If a pandas.DataFrame is passed as a benchmark, then\nmultiple linear regression is used to estimate alpha and beta.\n\nParameters\n----------\nreturns : pandas.Series\n Series of simple returns of an algorithm or stock.\nbmark : pandas.DataFrame\n DataFrame of benchmark returns (e.g., S&P500) or risk factors (e.g.,\n Fama-French SMB, HML, and UMD).\n If bmark has more recent returns than returns_train, these dates\n will be treated as missing values and predictions will be\n generated for them taking market correlations into account.\nsamples : int (optional)\n Number of posterior samples to draw.\n\nReturns\n-------\nmodel : pymc.Model object\n PyMC3 model containing all random variables.\ntrace : pymc3.sampling.BaseTrace object\n A PyMC3 trace object that contains samples for each parameter\n of the posterior.", "id": "f12216:m0"} {"signature": "def model_returns_normal(data, samples=, progressbar=True):", "body": "with pm.Model() as model:mu = pm.Normal('', mu=, sd=, testval=data.mean())sigma = pm.HalfCauchy('', beta=, testval=data.std())returns = pm.Normal('', mu=mu, sd=sigma, observed=data)pm.Deterministic('',returns.distribution.variance** *np.sqrt())pm.Deterministic('',returns.distribution.mean /returns.distribution.variance** *np.sqrt())trace = pm.sample(samples, progressbar=progressbar)return model, trace", "docstring": "Run Bayesian model assuming returns are normally distributed.\n\nParameters\n----------\nreturns : pandas.Series\n Series of simple returns of an algorithm or stock.\nsamples : int (optional)\n Number of posterior samples to draw.\n\nReturns\n-------\nmodel : pymc.Model object\n PyMC3 model containing all random variables.\ntrace : pymc3.sampling.BaseTrace object\n A PyMC3 trace object that contains samples for each parameter\n of the posterior.", "id": "f12216:m1"} {"signature": "def model_returns_t(data, samples=, progressbar=True):", "body": "with pm.Model() as model:mu = pm.Normal('', mu=, sd=, testval=data.mean())sigma = pm.HalfCauchy('', beta=, testval=data.std())nu = pm.Exponential('', / , testval=)returns = pm.StudentT('', nu=nu + , mu=mu, sd=sigma,observed=data)pm.Deterministic('',returns.distribution.variance** * np.sqrt())pm.Deterministic('', returns.distribution.mean /returns.distribution.variance** *np.sqrt())trace = pm.sample(samples, progressbar=progressbar)return model, trace", "docstring": "Run Bayesian model assuming returns are Student-T distributed.\n\nCompared with the normal model, this model assumes returns are\nT-distributed and thus have a 3rd parameter (nu) that controls the\nmass in the tails.\n\nParameters\n----------\nreturns : pandas.Series\n Series of simple returns of an algorithm or stock.\nsamples : int, optional\n Number of posterior samples to draw.\n\nReturns\n-------\nmodel : pymc.Model object\n PyMC3 model containing all random variables.\ntrace : pymc3.sampling.BaseTrace object\n A PyMC3 trace object that contains samples for each parameter\n of the posterior.", "id": "f12216:m2"} {"signature": "def model_best(y1, y2, samples=, progressbar=True):", "body": "y = np.concatenate((y1, y2))mu_m = np.mean(y)mu_p = * / np.std(y)**sigma_low = np.std(y) / sigma_high = np.std(y) * with pm.Model() as model:group1_mean = pm.Normal('', mu=mu_m, tau=mu_p,testval=y1.mean())group2_mean = pm.Normal('', mu=mu_m, tau=mu_p,testval=y2.mean())group1_std = pm.Uniform('', lower=sigma_low,upper=sigma_high, testval=y1.std())group2_std = pm.Uniform('', lower=sigma_low,upper=sigma_high, testval=y2.std())nu = pm.Exponential('', / , testval=) + returns_group1 = pm.StudentT('', nu=nu, mu=group1_mean,lam=group1_std**-, observed=y1)returns_group2 = pm.StudentT('', nu=nu, mu=group2_mean,lam=group2_std**-, observed=y2)diff_of_means = pm.Deterministic('',group2_mean - group1_mean)pm.Deterministic('',group2_std - group1_std)pm.Deterministic('', diff_of_means /pm.math.sqrt((group1_std** +group2_std**) / ))pm.Deterministic('',returns_group1.distribution.variance** *np.sqrt())pm.Deterministic('',returns_group2.distribution.variance** *np.sqrt())pm.Deterministic('', returns_group1.distribution.mean /returns_group1.distribution.variance** *np.sqrt())pm.Deterministic('', returns_group2.distribution.mean /returns_group2.distribution.variance** *np.sqrt())trace = pm.sample(samples, progressbar=progressbar)return model, trace", "docstring": "Bayesian Estimation Supersedes the T-Test\n\nThis model runs a Bayesian hypothesis comparing if y1 and y2 come\nfrom the same distribution. Returns are assumed to be T-distributed.\n\nIn addition, computes annual volatility and Sharpe of in and\nout-of-sample periods.\n\nThis model replicates the example used in:\nKruschke, John. (2012) Bayesian estimation supersedes the t\ntest. Journal of Experimental Psychology: General.\n\nParameters\n----------\ny1 : array-like\n Array of returns (e.g. in-sample)\ny2 : array-like\n Array of returns (e.g. out-of-sample)\nsamples : int, optional\n Number of posterior samples to draw.\n\nReturns\n-------\nmodel : pymc.Model object\n PyMC3 model containing all random variables.\ntrace : pymc3.sampling.BaseTrace object\n A PyMC3 trace object that contains samples for each parameter\n of the posterior.\n\nSee Also\n--------\nplot_stoch_vol : plotting of tochastic volatility model", "id": "f12216:m3"} {"signature": "def model_stoch_vol(data, samples=, progressbar=True):", "body": "from pymc3.distributions.timeseries import GaussianRandomWalkwith pm.Model() as model:nu = pm.Exponential('', / , testval=)sigma = pm.Exponential('', / , testval=)s = GaussianRandomWalk('', sigma**-, shape=len(data))volatility_process = pm.Deterministic('',pm.math.exp(- * s))pm.StudentT('', nu, lam=volatility_process, observed=data)trace = pm.sample(samples, progressbar=progressbar)return model, trace", "docstring": "Run stochastic volatility model.\n\nThis model estimates the volatility of a returns series over time.\nReturns are assumed to be T-distributed. lambda (width of\nT-distributed) is assumed to follow a random-walk.\n\nParameters\n----------\ndata : pandas.Series\n Return series to model.\nsamples : int, optional\n Posterior samples to draw.\n\nReturns\n-------\nmodel : pymc.Model object\n PyMC3 model containing all random variables.\ntrace : pymc3.sampling.BaseTrace object\n A PyMC3 trace object that contains samples for each parameter\n of the posterior.\n\nSee Also\n--------\nplot_stoch_vol : plotting of tochastic volatility model", "id": "f12216:m5"} {"signature": "def plot_stoch_vol(data, trace=None, ax=None):", "body": "if trace is None:trace = model_stoch_vol(data)if ax is None:fig, ax = plt.subplots(figsize=(, ))data.abs().plot(ax=ax)ax.plot(data.index, np.exp(trace['', ::].T), '', alpha=)ax.set(title='', xlabel='', ylabel='')ax.legend(['', ''],frameon=True, framealpha=)return ax", "docstring": "Generate plot for stochastic volatility model.\n\nParameters\n----------\ndata : pandas.Series\n Returns to model.\ntrace : pymc3.sampling.BaseTrace object, optional\n trace as returned by model_stoch_vol\n If not passed, sample from model.\nax : matplotlib.axes object, optional\n Plot into axes object\n\nReturns\n-------\nax object\n\nSee Also\n--------\nmodel_stoch_vol : run stochastic volatility model", "id": "f12216:m6"} {"signature": "def compute_bayes_cone(preds, starting_value=):", "body": "def scoreatpercentile(cum_preds, p):return [stats.scoreatpercentile(c, p) for c in cum_preds.T]cum_preds = np.cumprod(preds + , ) * starting_valueperc = {p: scoreatpercentile(cum_preds, p) for p in (, , , )}return perc", "docstring": "Compute 5, 25, 75 and 95 percentiles of cumulative returns, used\nfor the Bayesian cone.\n\nParameters\n----------\npreds : numpy.array\n Multiple (simulated) cumulative returns.\nstarting_value : int (optional)\n Have cumulative returns start around this value.\n Default = 1.\n\nReturns\n-------\ndict of percentiles over time\n Dictionary mapping percentiles (5, 25, 75, 95) to a\n timeseries.", "id": "f12216:m7"} {"signature": "def _groupby_consecutive(txn, max_delta=pd.Timedelta('')):", "body": "def vwap(transaction):if transaction.amount.sum() == :warnings.warn('')return np.nanreturn (transaction.amount * transaction.price).sum() /transaction.amount.sum()out = []for sym, t in txn.groupby(''):t = t.sort_index()t.index.name = ''t = t.reset_index()t[''] = t.amount > t[''] = (t.order_sign.shift() != t.order_sign).astype(int).cumsum()t[''] = ((t.dt.sub(t.dt.shift())) >max_delta).astype(int).cumsum()grouped_price = (t.groupby(('','')).apply(vwap))grouped_price.name = ''grouped_rest = t.groupby(('', '')).agg({'': '','': '','': ''})grouped = grouped_rest.join(grouped_price)out.append(grouped)out = pd.concat(out)out = out.set_index('')return out", "docstring": "Merge transactions of the same direction separated by less than\n max_delta time duration.\n\n Parameters\n ----------\n transactions : pd.DataFrame\n Prices and amounts of executed round_trips. One row per trade.\n - See full explanation in tears.create_full_tear_sheet\n\n max_delta : pandas.Timedelta (optional)\n Merge transactions in the same direction separated by less\n than max_delta time duration.\n\n\n Returns\n -------\n transactions : pd.DataFrame", "id": "f12217:m1"} {"signature": "def extract_round_trips(transactions,portfolio_value=None):", "body": "transactions = _groupby_consecutive(transactions)roundtrips = []for sym, trans_sym in transactions.groupby(''):trans_sym = trans_sym.sort_index()price_stack = deque()dt_stack = deque()trans_sym[''] = trans_sym.price *np.sign(trans_sym.amount)trans_sym[''] = trans_sym.amount.abs().astype(int)for dt, t in trans_sym.iterrows():if t.price < :warnings.warn('''')continueindiv_prices = [t.signed_price] * t.abs_amountif (len(price_stack) == ) or(copysign(, price_stack[-]) == copysign(, t.amount)):price_stack.extend(indiv_prices)dt_stack.extend([dt] * len(indiv_prices))else:pnl = invested = cur_open_dts = []for price in indiv_prices:if len(price_stack) != and(copysign(, price_stack[-]) != copysign(, price)):prev_price = price_stack.popleft()prev_dt = dt_stack.popleft()pnl += -(price + prev_price)cur_open_dts.append(prev_dt)invested += abs(prev_price)else:price_stack.append(price)dt_stack.append(dt)roundtrips.append({'': pnl,'': cur_open_dts[],'': dt,'': price < ,'': pnl / invested,'': sym,})roundtrips = pd.DataFrame(roundtrips)roundtrips[''] = roundtrips[''].sub(roundtrips[''])if portfolio_value is not None:pv = pd.DataFrame(portfolio_value,columns=['']).assign(date=portfolio_value.index)roundtrips[''] = roundtrips.close_dt.apply(lambda x:x.replace(hour=,minute=,second=))tmp = roundtrips.join(pv, on='', lsuffix='')roundtrips[''] = tmp.pnl / tmp.portfolio_valueroundtrips = roundtrips.drop('', axis='')return roundtrips", "docstring": "Group transactions into \"round trips\". First, transactions are\n grouped by day and directionality. Then, long and short\n transactions are matched to create round-trip round_trips for which\n PnL, duration and returns are computed. Crossings where a position\n changes from long to short and vice-versa are handled correctly.\n\n Under the hood, we reconstruct the individual shares in a\n portfolio over time and match round_trips in a FIFO-order.\n\n For example, the following transactions would constitute one round trip:\n index amount price symbol\n 2004-01-09 12:18:01 10 50 'AAPL'\n 2004-01-09 15:12:53 10 100 'AAPL'\n 2004-01-13 14:41:23 -10 100 'AAPL'\n 2004-01-13 15:23:34 -10 200 'AAPL'\n\n First, the first two and last two round_trips will be merged into a two\n single transactions (computing the price via vwap). Then, during\n the portfolio reconstruction, the two resulting transactions will\n be merged and result in 1 round-trip trade with a PnL of\n (150 * 20) - (75 * 20) = 1500.\n\n Note, that round trips do not have to close out positions\n completely. For example, we could have removed the last\n transaction in the example above and still generated a round-trip\n over 10 shares with 10 shares left in the portfolio to be matched\n with a later transaction.\n\n Parameters\n ----------\n transactions : pd.DataFrame\n Prices and amounts of executed round_trips. One row per trade.\n - See full explanation in tears.create_full_tear_sheet\n\n portfolio_value : pd.Series (optional)\n Portfolio value (all net assets including cash) over time.\n Note that portfolio_value needs to beginning of day, so either\n use .shift() or positions.sum(axis='columns') / (1+returns).\n\n Returns\n -------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip. The returns column\n contains returns in respect to the portfolio value while\n rt_returns are the returns in regards to the invested capital\n into that partiulcar round-trip.", "id": "f12217:m2"} {"signature": "def add_closing_transactions(positions, transactions):", "body": "closed_txns = transactions[['', '', '']]pos_at_end = positions.drop('', axis=).iloc[-]open_pos = pos_at_end.replace(, np.nan).dropna()end_dt = open_pos.name + pd.Timedelta(seconds=)for sym, ending_val in open_pos.iteritems():txn_sym = transactions[transactions.symbol == sym]ending_amount = txn_sym.amount.sum()ending_price = ending_val / ending_amountclosing_txn = {'': sym,'': -ending_amount,'': ending_price}closing_txn = pd.DataFrame(closing_txn, index=[end_dt])closed_txns = closed_txns.append(closing_txn)closed_txns = closed_txns[closed_txns.amount != ]return closed_txns", "docstring": "Appends transactions that close out all positions at the end of\nthe timespan covered by positions data. Utilizes pricing information\nin the positions DataFrame to determine closing price.\n\nParameters\n----------\npositions : pd.DataFrame\n The positions that the strategy takes over time.\ntransactions : pd.DataFrame\n Prices and amounts of executed round_trips. One row per trade.\n - See full explanation in tears.create_full_tear_sheet\n\nReturns\n-------\nclosed_txns : pd.DataFrame\n Transactions with closing transactions appended.", "id": "f12217:m3"} {"signature": "def apply_sector_mappings_to_round_trips(round_trips, sector_mappings):", "body": "sector_round_trips = round_trips.copy()sector_round_trips.symbol = sector_round_trips.symbol.apply(lambda x: sector_mappings.get(x, ''))sector_round_trips = sector_round_trips.dropna(axis=)return sector_round_trips", "docstring": "Translates round trip symbols to sectors.\n\nParameters\n----------\nround_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\nsector_mappings : dict or pd.Series, optional\n Security identifier to sector mapping.\n Security ids as keys, sectors as values.\n\nReturns\n-------\nsector_round_trips : pd.DataFrame\n Round trips with symbol names replaced by sector names.", "id": "f12217:m4"} {"signature": "def gen_round_trip_stats(round_trips):", "body": "stats = {}stats[''] = agg_all_long_short(round_trips, '', PNL_STATS)stats[''] = agg_all_long_short(round_trips, '',SUMMARY_STATS)stats[''] = agg_all_long_short(round_trips, '',DURATION_STATS)stats[''] = agg_all_long_short(round_trips, '',RETURN_STATS)stats[''] =round_trips.groupby('')[''].agg(RETURN_STATS).Treturn stats", "docstring": "Generate various round-trip statistics.\n\n Parameters\n ----------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\n\n Returns\n -------\n stats : dict\n A dictionary where each value is a pandas DataFrame containing\n various round-trip statistics.\n\n See also\n --------\n round_trips.print_round_trip_stats", "id": "f12217:m5"} {"signature": "def print_round_trip_stats(round_trips, hide_pos=False):", "body": "stats = gen_round_trip_stats(round_trips)print_table(stats[''], float_format=''.format,name='')print_table(stats[''], float_format=''.format, name='')print_table(stats[''], float_format=''.format,name='')print_table(stats[''] * , float_format=''.format,name='')if not hide_pos:stats[''].columns = stats[''].columns.map(format_asset)print_table(stats[''] * ,float_format=''.format, name='')", "docstring": "Print various round-trip statistics. Tries to pretty-print tables\n with HTML output if run inside IPython NB.\n\n Parameters\n ----------\n round_trips : pd.DataFrame\n DataFrame with one row per round trip trade.\n - See full explanation in round_trips.extract_round_trips\n\n See also\n --------\n round_trips.gen_round_trip_stats", "id": "f12217:m6"} {"signature": "def perf_attrib(returns,positions,factor_returns,factor_loadings,transactions=None,pos_in_dollars=True):", "body": "(returns,positions,factor_returns,factor_loadings) = _align_and_warn(returns,positions,factor_returns,factor_loadings,transactions=transactions,pos_in_dollars=pos_in_dollars)positions = _stack_positions(positions, pos_in_dollars=pos_in_dollars)return ep.perf_attrib(returns, positions, factor_returns, factor_loadings)", "docstring": "Attributes the performance of a returns stream to a set of risk factors.\n\nPreprocesses inputs, and then calls empyrical.perf_attrib. See\nempyrical.perf_attrib for more info.\n\nPerformance attribution determines how much each risk factor, e.g.,\nmomentum, the technology sector, etc., contributed to total returns, as\nwell as the daily exposure to each of the risk factors. The returns that\ncan be attributed to one of the given risk factors are the\n`common_returns`, and the returns that _cannot_ be attributed to a risk\nfactor are the `specific_returns`, or the alpha. The common_returns and\nspecific_returns summed together will always equal the total returns.\n\nParameters\n----------\nreturns : pd.Series\n Returns for each day in the date range.\n - Example:\n 2017-01-01 -0.017098\n 2017-01-02 0.002683\n 2017-01-03 -0.008669\n\npositions: pd.DataFrame\n Daily holdings (in dollars or percentages), indexed by date.\n Will be converted to percentages if positions are in dollars.\n Short positions show up as cash in the 'cash' column.\n - Examples:\n AAPL TLT XOM cash\n 2017-01-01 34 58 10 0\n 2017-01-02 22 77 18 0\n 2017-01-03 -15 27 30 15\n\n AAPL TLT XOM cash\n 2017-01-01 0.333333 0.568627 0.098039 0.0\n 2017-01-02 0.188034 0.658120 0.153846 0.0\n 2017-01-03 0.208333 0.375000 0.416667 0.0\n\nfactor_returns : pd.DataFrame\n Returns by factor, with date as index and factors as columns\n - Example:\n momentum reversal\n 2017-01-01 0.002779 -0.005453\n 2017-01-02 0.001096 0.010290\n\nfactor_loadings : pd.DataFrame\n Factor loadings for all days in the date range, with date and ticker as\n index, and factors as columns.\n - Example:\n momentum reversal\n dt ticker\n 2017-01-01 AAPL -1.592914 0.852830\n TLT 0.184864 0.895534\n XOM 0.993160 1.149353\n 2017-01-02 AAPL -0.140009 -0.524952\n TLT -1.066978 0.185435\n XOM -1.798401 0.761549\n\n\ntransactions : pd.DataFrame, optional\n Executed trade volumes and fill prices. Used to check the turnover of\n the algorithm. Default is None, in which case the turnover check is\n skipped.\n\n - One row per trade.\n - Trades on different names that occur at the\n same time will have identical indicies.\n - Example:\n index amount price symbol\n 2004-01-09 12:18:01 483 324.12 'AAPL'\n 2004-01-09 12:18:01 122 83.10 'MSFT'\n 2004-01-13 14:12:23 -75 340.43 'AAPL'\n\npos_in_dollars : bool\n Flag indicating whether `positions` are in dollars or percentages\n If True, positions are in dollars.\n\nReturns\n-------\ntuple of (risk_exposures_portfolio, perf_attribution)\n\nrisk_exposures_portfolio : pd.DataFrame\n df indexed by datetime, with factors as columns\n - Example:\n momentum reversal\n dt\n 2017-01-01 -0.238655 0.077123\n 2017-01-02 0.821872 1.520515\n\nperf_attribution : pd.DataFrame\n df with factors, common returns, and specific returns as columns,\n and datetimes as index\n - Example:\n momentum reversal common_returns specific_returns\n dt\n 2017-01-01 0.249087 0.935925 1.185012 1.185012\n 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980", "id": "f12219:m0"} {"signature": "def compute_exposures(positions, factor_loadings, stack_positions=True,pos_in_dollars=True):", "body": "if stack_positions:positions = _stack_positions(positions, pos_in_dollars=pos_in_dollars)return ep.compute_exposures(positions, factor_loadings)", "docstring": "Compute daily risk factor exposures.\n\nNormalizes positions (if necessary) and calls ep.compute_exposures.\nSee empyrical.compute_exposures for more info.\n\nParameters\n----------\npositions: pd.DataFrame or pd.Series\n Daily holdings (in dollars or percentages), indexed by date, OR\n a series of holdings indexed by date and ticker.\n - Examples:\n AAPL TLT XOM cash\n 2017-01-01 34 58 10 0\n 2017-01-02 22 77 18 0\n 2017-01-03 -15 27 30 15\n\n AAPL TLT XOM cash\n 2017-01-01 0.333333 0.568627 0.098039 0.0\n 2017-01-02 0.188034 0.658120 0.153846 0.0\n 2017-01-03 0.208333 0.375000 0.416667 0.0\n\n dt ticker\n 2017-01-01 AAPL 0.417582\n TLT 0.010989\n XOM 0.571429\n 2017-01-02 AAPL 0.202381\n TLT 0.535714\n XOM 0.261905\n\nfactor_loadings : pd.DataFrame\n Factor loadings for all days in the date range, with date and ticker as\n index, and factors as columns.\n - Example:\n momentum reversal\n dt ticker\n 2017-01-01 AAPL -1.592914 0.852830\n TLT 0.184864 0.895534\n XOM 0.993160 1.149353\n 2017-01-02 AAPL -0.140009 -0.524952\n TLT -1.066978 0.185435\n XOM -1.798401 0.761549\n\nstack_positions : bool\n Flag indicating whether `positions` should be converted to long format.\n\npos_in_dollars : bool\n Flag indicating whether `positions` are in dollars or percentages\n If True, positions are in dollars.\n\nReturns\n-------\nrisk_exposures_portfolio : pd.DataFrame\n df indexed by datetime, with factors as columns.\n - Example:\n momentum reversal\n dt\n 2017-01-01 -0.238655 0.077123\n 2017-01-02 0.821872 1.520515", "id": "f12219:m1"} {"signature": "def create_perf_attrib_stats(perf_attrib, risk_exposures):", "body": "summary = OrderedDict()total_returns = perf_attrib['']specific_returns = perf_attrib['']common_returns = perf_attrib['']summary[''] =ep.annual_return(specific_returns)summary[''] =ep.annual_return(common_returns)summary[''] =ep.annual_return(total_returns)summary[''] =ep.sharpe_ratio(specific_returns)summary[''] =ep.cum_returns_final(specific_returns)summary[''] =ep.cum_returns_final(common_returns)summary[''] =ep.cum_returns_final(total_returns)summary = pd.Series(summary, name='')annualized_returns_by_factor = [ep.annual_return(perf_attrib[c])for c in risk_exposures.columns]cumulative_returns_by_factor = [ep.cum_returns_final(perf_attrib[c])for c in risk_exposures.columns]risk_exposure_summary = pd.DataFrame(data=OrderedDict([('',risk_exposures.mean(axis='')),('', annualized_returns_by_factor),('', cumulative_returns_by_factor),]),index=risk_exposures.columns,)return summary, risk_exposure_summary", "docstring": "Takes perf attribution data over a period of time and computes annualized\nmultifactor alpha, multifactor sharpe, risk exposures.", "id": "f12219:m2"} {"signature": "def show_perf_attrib_stats(returns,positions,factor_returns,factor_loadings,transactions=None,pos_in_dollars=True):", "body": "risk_exposures, perf_attrib_data = perf_attrib(returns,positions,factor_returns,factor_loadings,transactions,pos_in_dollars=pos_in_dollars,)perf_attrib_stats, risk_exposure_stats =create_perf_attrib_stats(perf_attrib_data, risk_exposures)percentage_formatter = ''.formatfloat_formatter = ''.formatsummary_stats = perf_attrib_stats.loc[['','','','']]for col_name in ('','','',):summary_stats[col_name] = percentage_formatter(summary_stats[col_name])summary_stats[''] = float_formatter(summary_stats[''])print_table(summary_stats, name='')print_table(risk_exposure_stats,name='',formatters={'': float_formatter,'': percentage_formatter,'': percentage_formatter,},)", "docstring": "Calls `perf_attrib` using inputs, and displays outputs using\n`utils.print_table`.", "id": "f12219:m3"} {"signature": "def plot_returns(perf_attrib_data, cost=None, ax=None):", "body": "if ax is None:ax = plt.gca()returns = perf_attrib_data['']total_returns_label = ''cumulative_returns_less_costs = _cumulative_returns_less_costs(returns,cost)if cost is not None:total_returns_label += ''specific_returns = perf_attrib_data['']common_returns = perf_attrib_data['']ax.plot(cumulative_returns_less_costs, color='',label=total_returns_label)ax.plot(ep.cum_returns(specific_returns), color='',label='')ax.plot(ep.cum_returns(common_returns), color='',label='')if cost is not None:ax.plot(-ep.cum_returns(cost), color='',label='')ax.set_title('')ax.set_ylabel('')configure_legend(ax)return ax", "docstring": "Plot total, specific, and common returns.\n\nParameters\n----------\nperf_attrib_data : pd.DataFrame\n df with factors, common returns, and specific returns as columns,\n and datetimes as index. Assumes the `total_returns` column is NOT\n cost adjusted.\n - Example:\n momentum reversal common_returns specific_returns\n dt\n 2017-01-01 0.249087 0.935925 1.185012 1.185012\n 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980\n\ncost : pd.Series, optional\n if present, gets subtracted from `perf_attrib_data['total_returns']`,\n and gets plotted separately\n\nax : matplotlib.axes.Axes\n axes on which plots are made. if None, current axes will be used\n\nReturns\n-------\nax : matplotlib.axes.Axes", "id": "f12219:m4"} {"signature": "def plot_alpha_returns(alpha_returns, ax=None):", "body": "if ax is None:ax = plt.gca()ax.hist(alpha_returns, color='', label='')ax.set_title('')ax.axvline(, color='', linestyle='', label='')avg = alpha_returns.mean()ax.axvline(avg, color='', label=''.format(avg))configure_legend(ax)return ax", "docstring": "Plot histogram of daily multi-factor alpha returns (specific returns).\n\nParameters\n----------\nalpha_returns : pd.Series\n series of daily alpha returns indexed by datetime\n\nax : matplotlib.axes.Axes\n axes on which plots are made. if None, current axes will be used\n\nReturns\n-------\nax : matplotlib.axes.Axes", "id": "f12219:m5"} {"signature": "def plot_factor_contribution_to_perf(perf_attrib_data,ax=None,title='',):", "body": "if ax is None:ax = plt.gca()factors_to_plot = perf_attrib_data.drop(['', ''], axis='', errors='')factors_cumulative = pd.DataFrame()for factor in factors_to_plot:factors_cumulative[factor] = ep.cum_returns(factors_to_plot[factor])for col in factors_cumulative:ax.plot(factors_cumulative[col])ax.axhline(, color='')configure_legend(ax, change_colors=True)ax.set_ylabel('')ax.set_title(title)return ax", "docstring": "Plot each factor's contribution to performance.\n\nParameters\n----------\nperf_attrib_data : pd.DataFrame\n df with factors, common returns, and specific returns as columns,\n and datetimes as index\n - Example:\n momentum reversal common_returns specific_returns\n dt\n 2017-01-01 0.249087 0.935925 1.185012 1.185012\n 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980\n\nax : matplotlib.axes.Axes\n axes on which plots are made. if None, current axes will be used\n\ntitle : str, optional\n title of plot\n\nReturns\n-------\nax : matplotlib.axes.Axes", "id": "f12219:m6"} {"signature": "def plot_risk_exposures(exposures, ax=None,title=''):", "body": "if ax is None:ax = plt.gca()for col in exposures:ax.plot(exposures[col])configure_legend(ax, change_colors=True)ax.set_ylabel('')ax.set_title(title)return ax", "docstring": "Parameters\n----------\nexposures : pd.DataFrame\n df indexed by datetime, with factors as columns\n - Example:\n momentum reversal\n dt\n 2017-01-01 -0.238655 0.077123\n 2017-01-02 0.821872 1.520515\n\nax : matplotlib.axes.Axes\n axes on which plots are made. if None, current axes will be used\n\nReturns\n-------\nax : matplotlib.axes.Axes", "id": "f12219:m7"} {"signature": "def _align_and_warn(returns,positions,factor_returns,factor_loadings,transactions=None,pos_in_dollars=True):", "body": "missing_stocks = positions.columns.difference(factor_loadings.index.get_level_values().unique())num_stocks = len(positions.columns) - missing_stocks = missing_stocks.drop('')num_stocks_covered = num_stocks - len(missing_stocks)missing_ratio = round(len(missing_stocks) / num_stocks, ndigits=)if num_stocks_covered == :raise ValueError(\"\"\"\"\"\")if len(missing_stocks) > :if len(missing_stocks) > :missing_stocks_displayed = (\"\").format(len(missing_stocks),''.join(missing_stocks[:].map(str)),missing_stocks[-])avg_allocation_msg = \"\"else:missing_stocks_displayed = (\"\").format(list(missing_stocks))avg_allocation_msg = \"\"missing_stocks_warning_msg = (\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\").format(missing_stocks_displayed,missing_ratio,avg_allocation_msg,positions[missing_stocks[:].union(missing_stocks[[-]])].mean(),)warnings.warn(missing_stocks_warning_msg)positions = positions.drop(missing_stocks, axis='',errors='')missing_factor_loadings_index = positions.index.difference(factor_loadings.index.get_level_values().unique())missing_factor_loadings_index = positions.index.difference(factor_loadings.index.get_level_values().unique())if len(missing_factor_loadings_index) > :if len(missing_factor_loadings_index) > :missing_dates_displayed = (\"\").format(missing_factor_loadings_index[],missing_factor_loadings_index[-])else:missing_dates_displayed = list(missing_factor_loadings_index)warning_msg = (\"\"\"\").format(len(missing_factor_loadings_index), missing_dates_displayed)warnings.warn(warning_msg)positions = positions.drop(missing_factor_loadings_index,errors='')returns = returns.drop(missing_factor_loadings_index, errors='')factor_returns = factor_returns.drop(missing_factor_loadings_index,errors='')if transactions is not None and pos_in_dollars:turnover = get_turnover(positions, transactions).mean()if turnover > PERF_ATTRIB_TURNOVER_THRESHOLD:warning_msg = (\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\")warnings.warn(warning_msg)return (returns, positions, factor_returns, factor_loadings)", "docstring": "Make sure that all inputs have matching dates and tickers,\nand raise warnings if necessary.", "id": "f12219:m8"} {"signature": "def _stack_positions(positions, pos_in_dollars=True):", "body": "if pos_in_dollars:positions = get_percent_alloc(positions)positions = positions.drop('', axis='')positions = positions.stack()positions.index = positions.index.set_names(['', ''])return positions", "docstring": "Convert positions to percentages if necessary, and change them\nto long format.\n\nParameters\n----------\npositions: pd.DataFrame\n Daily holdings (in dollars or percentages), indexed by date.\n Will be converted to percentages if positions are in dollars.\n Short positions show up as cash in the 'cash' column.\n\npos_in_dollars : bool\n Flag indicating whether `positions` are in dollars or percentages\n If True, positions are in dollars.", "id": "f12219:m9"} {"signature": "def _cumulative_returns_less_costs(returns, costs):", "body": "if costs is None:return ep.cum_returns(returns)return ep.cum_returns(returns - costs)", "docstring": "Compute cumulative returns, less costs.", "id": "f12219:m10"} {"signature": "def one_dec_places(x, pos):", "body": "return '' % x", "docstring": "Adds 1/10th decimal to plot ticks.", "id": "f12220:m0"} {"signature": "def two_dec_places(x, pos):", "body": "return '' % x", "docstring": "Adds 1/100th decimal to plot ticks.", "id": "f12220:m1"} {"signature": "def percentage(x, pos):", "body": "return '' % x", "docstring": "Adds percentage sign to plot ticks.", "id": "f12220:m2"} {"signature": "def format_asset(asset):", "body": "try:import zipline.assetsexcept ImportError:return assetif isinstance(asset, zipline.assets.Asset):return asset.symbolelse:return asset", "docstring": "If zipline asset objects are used, we want to print them out prettily\nwithin the tear sheet. This function should only be applied directly\nbefore displaying.", "id": "f12220:m3"} {"signature": "def vectorize(func):", "body": "def wrapper(df, *args, **kwargs):if df.ndim == :return func(df, *args, **kwargs)elif df.ndim == :return df.apply(func, *args, **kwargs)return wrapper", "docstring": "Decorator so that functions can be written to work on Series but\nmay still be called with DataFrames.", "id": "f12220:m4"} {"signature": "def print_table(table,name=None,float_format=None,formatters=None,header_rows=None):", "body": "if isinstance(table, pd.Series):table = pd.DataFrame(table)if name is not None:table.columns.name = namehtml = table.to_html(float_format=float_format, formatters=formatters)if header_rows is not None:n_cols = html.split('')[].split('')[].count('')rows = ''for name, value in header_rows.items():rows += ('' +'') % (name, n_cols, value)html = html.replace('', '' + rows)display(HTML(html))", "docstring": "Pretty print a pandas DataFrame.\n\nUses HTML output if running inside Jupyter Notebook, otherwise\nformatted text output.\n\nParameters\n----------\ntable : pandas.Series or pandas.DataFrame\n Table to pretty-print.\nname : str, optional\n Table name to display in upper left corner.\nfloat_format : function, optional\n Formatter to use for displaying table elements, passed as the\n `float_format` arg to pd.Dataframe.to_html.\n E.g. `'{0:.2%}'.format` for displaying 100 as '100.00%'.\nformatters : list or dict, optional\n Formatters to use by column, passed as the `formatters` arg to\n pd.Dataframe.to_html.\nheader_rows : dict, optional\n Extra rows to display at the top of the table.", "id": "f12220:m6"} {"signature": "def standardize_data(x):", "body": "return (x - np.mean(x)) / np.std(x)", "docstring": "Standardize an array with mean and standard deviation.\n\nParameters\n----------\nx : np.array\n Array to standardize.\n\nReturns\n-------\nnp.array\n Standardized array.", "id": "f12220:m7"} {"signature": "def detect_intraday(positions, transactions, threshold=):", "body": "daily_txn = transactions.copy()daily_txn.index = daily_txn.index.datetxn_count = daily_txn.groupby(level=).symbol.nunique().sum()daily_pos = positions.drop('', axis=).replace(, np.nan)return daily_pos.count(axis=).sum() / txn_count < threshold", "docstring": "Attempt to detect an intraday strategy. Get the number of\npositions held at the end of the day, and divide that by the\nnumber of unique stocks transacted every day. If the average quotient\nis below a threshold, then an intraday strategy is detected.\n\nParameters\n----------\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n\nReturns\n-------\nboolean\n True if an intraday strategy is detected.", "id": "f12220:m8"} {"signature": "def check_intraday(estimate, returns, positions, transactions):", "body": "if estimate == '':if positions is not None and transactions is not None:if detect_intraday(positions, transactions):warnings.warn('' +'' +'')return estimate_intraday(returns, positions, transactions)else:return positionselse:return positionselif estimate:if positions is not None and transactions is not None:return estimate_intraday(returns, positions, transactions)else:raise ValueError('')else:return positions", "docstring": "Logic for checking if a strategy is intraday and processing it.\n\nParameters\n----------\nestimate: boolean or str, optional\n Approximate returns for intraday strategies.\n See description in tears.create_full_tear_sheet.\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n\nReturns\n-------\npd.DataFrame\n Daily net position values, adjusted for intraday movement.", "id": "f12220:m9"} {"signature": "def estimate_intraday(returns, positions, transactions, EOD_hour=):", "body": "txn_val = transactions.copy()txn_val.index.names = ['']txn_val[''] = txn_val.amount * txn_val.pricetxn_val = txn_val.reset_index().pivot_table(index='', values='',columns='').replace(np.nan, )txn_val[''] = txn_val.index.datetxn_val = txn_val.groupby('').cumsum()txn_val[''] = txn_val.abs().sum(axis=)condition = (txn_val[''] == txn_val.groupby(pd.TimeGrouper(''))[''].transform(max))txn_val = txn_val[condition].drop('', axis=)txn_val[''] = -txn_val.sum(axis=)positions_shifted = positions.copy().shift().fillna()starting_capital = positions.iloc[].sum() / ( + returns[])positions_shifted.cash[] = starting_capitaltxn_val.index = txn_val.index.normalize()corrected_positions = positions_shifted.add(txn_val, fill_value=)corrected_positions.index.name = ''corrected_positions.columns.name = ''return corrected_positions", "docstring": "Intraday strategies will often not hold positions at the day end.\nThis attempts to find the point in the day that best represents\nthe activity of the strategy on that day, and effectively resamples\nthe end-of-day positions with the positions at this point of day.\nThe point of day is found by detecting when our exposure in the\nmarket is at its maximum point. Note that this is an estimate.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\n\nReturns\n-------\npd.DataFrame\n Daily net position values, resampled for intraday behavior.", "id": "f12220:m10"} {"signature": "def clip_returns_to_benchmark(rets, benchmark_rets):", "body": "if (rets.index[] < benchmark_rets.index[])or (rets.index[-] > benchmark_rets.index[-]):clipped_rets = rets[benchmark_rets.index]else:clipped_rets = retsreturn clipped_rets", "docstring": "Drop entries from rets so that the start and end dates of rets match those\nof benchmark_rets.\n\nParameters\n----------\nrets : pd.Series\n Daily returns of the strategy, noncumulative.\n - See pf.tears.create_full_tear_sheet for more details\n\nbenchmark_rets : pd.Series\n Daily returns of the benchmark, noncumulative.\n\nReturns\n-------\nclipped_rets : pd.Series\n Daily noncumulative returns with index clipped to match that of\n benchmark returns.", "id": "f12220:m11"} {"signature": "def to_utc(df):", "body": "try:df.index = df.index.tz_localize('')except TypeError:df.index = df.index.tz_convert('')return df", "docstring": "For use in tests; applied UTC timestamp to DataFrame.", "id": "f12220:m12"} {"signature": "def to_series(df):", "body": "return df[df.columns[]]", "docstring": "For use in tests; converts DataFrame's first column to Series.", "id": "f12220:m13"} {"signature": "def register_return_func(func):", "body": "SETTINGS[''] = func", "docstring": "Registers the 'returns_func' that will be called for\nretrieving returns data.\n\nParameters\n----------\nfunc : function\n A function that returns a pandas Series of asset returns.\n The signature of the function must be as follows\n\n >>> func(symbol)\n\n Where symbol is an asset identifier\n\nReturns\n-------\nNone", "id": "f12220:m14"} {"signature": "def get_symbol_rets(symbol, start=None, end=None):", "body": "return SETTINGS[''](symbol,start=start,end=end)", "docstring": "Calls the currently registered 'returns_func'\n\nParameters\n----------\nsymbol : object\n An identifier for the asset whose return\n series is desired.\n e.g. ticker symbol or database ID\nstart : date, optional\n Earliest date to fetch data for.\n Defaults to earliest date available.\nend : date, optional\n Latest date to fetch data for.\n Defaults to latest date available.\n\nReturns\n-------\npandas.Series\n Returned by the current 'returns_func'", "id": "f12220:m15"} {"signature": "def configure_legend(ax, autofmt_xdate=True, change_colors=False,rotation=, ha=''):", "body": "chartBox = ax.get_position()ax.set_position([chartBox.x0, chartBox.y0,chartBox.width * , chartBox.height])handles, labels = ax.get_legend_handles_labels()handles_and_labels_sorted = sorted(zip(handles, labels),key=lambda x: x[].get_ydata()[-],reverse=True)handles_sorted = [h[] for h in handles_and_labels_sorted]labels_sorted = [h[] for h in handles_and_labels_sorted]if change_colors:for handle, color in zip(handles_sorted,cycle(COLORS)):handle.set_color(color)ax.legend(handles=handles_sorted,labels=labels_sorted,frameon=True,framealpha=,loc='',bbox_to_anchor=(, ),fontsize='')if autofmt_xdate:for label in ax.get_xticklabels():label.set_ha(ha)label.set_rotation(rotation)", "docstring": "Format legend for perf attribution plots:\n- put legend to the right of plot instead of overlapping with it\n- make legend order match up with graph lines\n- set colors according to colormap", "id": "f12220:m16"} {"signature": "def sample_colormap(cmap_name, n_samples):", "body": "colors = []colormap = cm.cmap_d[cmap_name]for i in np.linspace(, , n_samples):colors.append(colormap(i))return colors", "docstring": "Sample a colormap from matplotlib", "id": "f12220:m17"} {"signature": "def var_cov_var_normal(P, c, mu=, sigma=):", "body": "alpha = sp.stats.norm.ppf( - c, mu, sigma)return P - P * (alpha + )", "docstring": "Variance-covariance calculation of daily Value-at-Risk in a\nportfolio.\n\nParameters\n----------\nP : float\n Portfolio value.\nc : float\n Confidence level.\nmu : float, optional\n Mean.\n\nReturns\n-------\nfloat\n Variance-covariance.", "id": "f12221:m0"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def max_drawdown(returns):", "body": "return ep.max_drawdown(returns)", "docstring": "Determines the maximum drawdown of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n\nReturns\n-------\nfloat\n Maximum drawdown.\n\nNote\n-----\nSee https://en.wikipedia.org/wiki/Drawdown_(economics) for more details.", "id": "f12221:m1"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def annual_return(returns, period=DAILY):", "body": "return ep.annual_return(returns, period=period)", "docstring": "Determines the mean annual growth rate of returns.\n\nParameters\n----------\nreturns : pd.Series\n Periodic returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nperiod : str, optional\n Defines the periodicity of the 'returns' data for purposes of\n annualizing. Can be 'monthly', 'weekly', or 'daily'.\n - Defaults to 'daily'.\n\nReturns\n-------\nfloat\n Annual Return as CAGR (Compounded Annual Growth Rate).", "id": "f12221:m2"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def annual_volatility(returns, period=DAILY):", "body": "return ep.annual_volatility(returns, period=period)", "docstring": "Determines the annual volatility of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n Periodic returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nperiod : str, optional\n Defines the periodicity of the 'returns' data for purposes of\n annualizing volatility. Can be 'monthly' or 'weekly' or 'daily'.\n - Defaults to 'daily'.\n\nReturns\n-------\nfloat\n Annual volatility.", "id": "f12221:m3"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def calmar_ratio(returns, period=DAILY):", "body": "return ep.calmar_ratio(returns, period=period)", "docstring": "Determines the Calmar ratio, or drawdown ratio, of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nperiod : str, optional\n Defines the periodicity of the 'returns' data for purposes of\n annualizing. Can be 'monthly', 'weekly', or 'daily'.\n - Defaults to 'daily'.\n\nReturns\n-------\nfloat\n Calmar ratio (drawdown ratio) as float. Returns np.nan if there is no\n calmar ratio.\n\nNote\n-----\nSee https://en.wikipedia.org/wiki/Calmar_ratio for more details.", "id": "f12221:m4"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def omega_ratio(returns, annual_return_threshhold=):", "body": "return ep.omega_ratio(returns,required_return=annual_return_threshhold)", "docstring": "Determines the Omega ratio of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nannual_return_threshold : float, optional\n Minimum acceptable return of the investor. Annual threshold over which\n returns are considered positive or negative. It is converted to a\n value appropriate for the period of the returns for this ratio.\n E.g. An annual minimum acceptable return of 100 translates to a daily\n minimum acceptable return of 0.01848.\n (1 + 100) ** (1. / 252) - 1 = 0.01848\n Daily returns must exceed this value to be considered positive. The\n daily return yields the desired annual return when compounded over\n the average number of business days in a year.\n (1 + 0.01848) ** 252 - 1 = 99.93\n - Defaults to 0.0\n\n\nReturns\n-------\nfloat\n Omega ratio.\n\nNote\n-----\nSee https://en.wikipedia.org/wiki/Omega_ratio for more details.", "id": "f12221:m5"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def sortino_ratio(returns, required_return=, period=DAILY):", "body": "return ep.sortino_ratio(returns, required_return=required_return)", "docstring": "Determines the Sortino ratio of a strategy.\n\nParameters\n----------\nreturns : pd.Series or pd.DataFrame\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nrequired_return: float / series\n minimum acceptable return\nperiod : str, optional\n Defines the periodicity of the 'returns' data for purposes of\n annualizing. Can be 'monthly', 'weekly', or 'daily'.\n - Defaults to 'daily'.\n\nReturns\n-------\ndepends on input type\nseries ==> float\nDataFrame ==> np.array\n\n Annualized Sortino ratio.", "id": "f12221:m6"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def downside_risk(returns, required_return=, period=DAILY):", "body": "return ep.downside_risk(returns,required_return=required_return,period=period)", "docstring": "Determines the downside deviation below a threshold\n\nParameters\n----------\nreturns : pd.Series or pd.DataFrame\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nrequired_return: float / series\n minimum acceptable return\nperiod : str, optional\n Defines the periodicity of the 'returns' data for purposes of\n annualizing. Can be 'monthly', 'weekly', or 'daily'.\n - Defaults to 'daily'.\n\nReturns\n-------\ndepends on input type\nseries ==> float\nDataFrame ==> np.array\n\n Annualized downside deviation", "id": "f12221:m7"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def sharpe_ratio(returns, risk_free=, period=DAILY):", "body": "return ep.sharpe_ratio(returns, risk_free=risk_free, period=period)", "docstring": "Determines the Sharpe ratio of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nrisk_free : int, float\n Constant risk-free return throughout the period.\nperiod : str, optional\n Defines the periodicity of the 'returns' data for purposes of\n annualizing. Can be 'monthly', 'weekly', or 'daily'.\n - Defaults to 'daily'.\n\nReturns\n-------\nfloat\n Sharpe ratio.\nnp.nan\n If insufficient length of returns or if if adjusted returns are 0.\n\nNote\n-----\nSee https://en.wikipedia.org/wiki/Sharpe_ratio for more details.", "id": "f12221:m8"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def alpha_beta(returns, factor_returns):", "body": "return ep.alpha_beta(returns, factor_returns=factor_returns)", "docstring": "Calculates both alpha and beta.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nfactor_returns : pd.Series\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n\nReturns\n-------\nfloat\n Alpha.\nfloat\n Beta.", "id": "f12221:m9"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def alpha(returns, factor_returns):", "body": "return ep.alpha(returns, factor_returns=factor_returns)", "docstring": "Calculates annualized alpha.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nfactor_returns : pd.Series\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n\nReturns\n-------\nfloat\n Alpha.", "id": "f12221:m10"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def beta(returns, factor_returns):", "body": "return ep.beta(returns, factor_returns)", "docstring": "Calculates beta.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nfactor_returns : pd.Series\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n\nReturns\n-------\nfloat\n Beta.", "id": "f12221:m11"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def stability_of_timeseries(returns):", "body": "return ep.stability_of_timeseries(returns)", "docstring": "Determines R-squared of a linear fit to the cumulative\nlog returns. Computes an ordinary least squares linear fit,\nand returns R-squared.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\n\nReturns\n-------\nfloat\n R-squared.", "id": "f12221:m12"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def tail_ratio(returns):", "body": "return ep.tail_ratio(returns)", "docstring": "Determines the ratio between the right (95%) and left tail (5%).\n\nFor example, a ratio of 0.25 means that losses are four times\nas bad as profits.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\n\nReturns\n-------\nfloat\n tail ratio", "id": "f12221:m13"} {"signature": "def common_sense_ratio(returns):", "body": "return ep.tail_ratio(returns) *( + ep.annual_return(returns))", "docstring": "Common sense ratio is the multiplication of the tail ratio and the\nGain-to-Pain-Ratio -- sum(profits) / sum(losses).\n\nSee http://bit.ly/1ORzGBk for more information on motivation of\nthis metric.\n\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n\nReturns\n-------\nfloat\n common sense ratio", "id": "f12221:m14"} {"signature": "def normalize(returns, starting_value=):", "body": "return starting_value * (returns / returns.iloc[])", "docstring": "Normalizes a returns timeseries based on the first value.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nstarting_value : float, optional\n The starting returns (default 1).\n\nReturns\n-------\npd.Series\n Normalized returns.", "id": "f12221:m15"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def cum_returns(returns, starting_value=):", "body": "return ep.cum_returns(returns, starting_value=starting_value)", "docstring": "Compute cumulative returns from simple returns.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nstarting_value : float, optional\n The starting returns (default 1).\n\nReturns\n-------\npandas.Series\n Series of cumulative returns.\n\nNotes\n-----\nFor increased numerical accuracy, convert input to log returns\nwhere it is possible to sum instead of multiplying.", "id": "f12221:m16"} {"signature": "@deprecated(msg=DEPRECATION_WARNING)def aggregate_returns(returns, convert_to):", "body": "return ep.aggregate_returns(returns, convert_to=convert_to)", "docstring": "Aggregates returns by week, month, or year.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nconvert_to : str\n Can be 'weekly', 'monthly', or 'yearly'.\n\nReturns\n-------\npd.Series\n Aggregated returns.", "id": "f12221:m17"} {"signature": "def rolling_beta(returns, factor_returns,rolling_window=APPROX_BDAYS_PER_MONTH * ):", "body": "if factor_returns.ndim > :return factor_returns.apply(partial(rolling_beta, returns),rolling_window=rolling_window)else:out = pd.Series(index=returns.index)for beg, end in zip(returns.index[:-rolling_window],returns.index[rolling_window:]):out.loc[end] = ep.beta(returns.loc[beg:end],factor_returns.loc[beg:end])return out", "docstring": "Determines the rolling beta of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series or pd.DataFrame\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - If DataFrame is passed, computes rolling beta for each column.\n - This is in the same style as returns.\nrolling_window : int, optional\n The size of the rolling window, in days, over which to compute\n beta (default 6 months).\n\nReturns\n-------\npd.Series\n Rolling beta.\n\nNote\n-----\nSee https://en.wikipedia.org/wiki/Beta_(finance) for more details.", "id": "f12221:m18"} {"signature": "def rolling_regression(returns, factor_returns,rolling_window=APPROX_BDAYS_PER_MONTH * ,nan_threshold=):", "body": "ret_no_na = returns.dropna()columns = [''] + factor_returns.columns.tolist()rolling_risk = pd.DataFrame(columns=columns,index=ret_no_na.index)rolling_risk.index.name = ''for beg, end in zip(ret_no_na.index[:-rolling_window],ret_no_na.index[rolling_window:]):returns_period = ret_no_na[beg:end]factor_returns_period = factor_returns.loc[returns_period.index]if np.all(factor_returns_period.isnull().mean()) < nan_threshold:factor_returns_period_dnan = factor_returns_period.dropna()reg = linear_model.LinearRegression(fit_intercept=True).fit(factor_returns_period_dnan,returns_period.loc[factor_returns_period_dnan.index])rolling_risk.loc[end, factor_returns.columns] = reg.coef_rolling_risk.loc[end, ''] = reg.intercept_return rolling_risk", "docstring": "Computes rolling factor betas using a multivariate linear regression\n(separate linear regressions is problematic because the factors may be\nconfounded).\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.DataFrame\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - Computes rolling beta for each column.\n - This is in the same style as returns.\nrolling_window : int, optional\n The days window over which to compute the beta. Defaults to 6 months.\nnan_threshold : float, optional\n If there are more than this fraction of NaNs, the rolling regression\n for the given date will be skipped.\n\nReturns\n-------\npandas.DataFrame\n DataFrame containing rolling beta coefficients to SMB, HML and UMD", "id": "f12221:m19"} {"signature": "def gross_lev(positions):", "body": "exposure = positions.drop('', axis=).abs().sum(axis=)return exposure / positions.sum(axis=)", "docstring": "Calculates the gross leverage of a strategy.\n\nParameters\n----------\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\n\nReturns\n-------\npd.Series\n Gross leverage.", "id": "f12221:m20"} {"signature": "def value_at_risk(returns, period=None, sigma=):", "body": "if period is not None:returns_agg = ep.aggregate_returns(returns, period)else:returns_agg = returns.copy()value_at_risk = returns_agg.mean() - sigma * returns_agg.std()return value_at_risk", "docstring": "Get value at risk (VaR).\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nperiod : str, optional\n Period over which to calculate VaR. Set to 'weekly',\n 'monthly', or 'yearly', otherwise defaults to period of\n returns (typically daily).\nsigma : float, optional\n Standard deviations of VaR, default 2.", "id": "f12221:m21"} {"signature": "def perf_stats(returns, factor_returns=None, positions=None,transactions=None, turnover_denom=''):", "body": "stats = pd.Series()for stat_func in SIMPLE_STAT_FUNCS:stats[STAT_FUNC_NAMES[stat_func.__name__]] = stat_func(returns)if positions is not None:stats[''] = gross_lev(positions).mean()if transactions is not None:stats[''] = get_turnover(positions,transactions,turnover_denom).mean()if factor_returns is not None:for stat_func in FACTOR_STAT_FUNCS:res = stat_func(returns, factor_returns)stats[STAT_FUNC_NAMES[stat_func.__name__]] = resreturn stats", "docstring": "Calculates various performance metrics of a strategy, for use in\nplotting.show_perf_stats.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n - If None, do not compute alpha, beta, and information ratio.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in tears.create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet.\nturnover_denom : str\n Either AGB or portfolio_value, default AGB.\n - See full explanation in txn.get_turnover.\n\nReturns\n-------\npd.Series\n Performance metrics.", "id": "f12221:m22"} {"signature": "def perf_stats_bootstrap(returns, factor_returns=None, return_stats=True,**kwargs):", "body": "bootstrap_values = OrderedDict()for stat_func in SIMPLE_STAT_FUNCS:stat_name = STAT_FUNC_NAMES[stat_func.__name__]bootstrap_values[stat_name] = calc_bootstrap(stat_func,returns)if factor_returns is not None:for stat_func in FACTOR_STAT_FUNCS:stat_name = STAT_FUNC_NAMES[stat_func.__name__]bootstrap_values[stat_name] = calc_bootstrap(stat_func,returns,factor_returns=factor_returns)bootstrap_values = pd.DataFrame(bootstrap_values)if return_stats:stats = bootstrap_values.apply(calc_distribution_stats)return stats.T[['', '', '', '']]else:return bootstrap_values", "docstring": "Calculates various bootstrapped performance metrics of a strategy.\n\n Parameters\n ----------\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n - If None, do not compute alpha, beta, and information ratio.\n return_stats : boolean (optional)\n If True, returns a DataFrame of mean, median, 5 and 95 percentiles\n for each perf metric.\n If False, returns a DataFrame with the bootstrap samples for\n each perf metric.\n\n Returns\n -------\n pd.DataFrame\n if return_stats is True:\n - Distributional statistics of bootstrapped sampling\n distribution of performance metrics.\n if return_stats is False:\n - Bootstrap samples for each performance metric.", "id": "f12221:m23"} {"signature": "def calc_bootstrap(func, returns, *args, **kwargs):", "body": "n_samples = kwargs.pop('', )out = np.empty(n_samples)factor_returns = kwargs.pop('', None)for i in range(n_samples):idx = np.random.randint(len(returns), size=len(returns))returns_i = returns.iloc[idx].reset_index(drop=True)if factor_returns is not None:factor_returns_i = factor_returns.iloc[idx].reset_index(drop=True)out[i] = func(returns_i, factor_returns_i,*args, **kwargs)else:out[i] = func(returns_i,*args, **kwargs)return out", "docstring": "Performs a bootstrap analysis on a user-defined function returning\n a summary statistic.\n\n Parameters\n ----------\n func : function\n Function that either takes a single array (commonly returns)\n or two arrays (commonly returns and factor returns) and\n returns a single value (commonly a summary\n statistic). Additional args and kwargs are passed as well.\n returns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n factor_returns : pd.Series, optional\n Daily noncumulative returns of the benchmark factor to which betas are\n computed. Usually a benchmark such as market returns.\n - This is in the same style as returns.\n n_samples : int, optional\n Number of bootstrap samples to draw. Default is 1000.\n Increasing this will lead to more stable / accurate estimates.\n\n Returns\n -------\n numpy.ndarray\n Bootstrapped sampling distribution of passed in func.", "id": "f12221:m24"} {"signature": "def calc_distribution_stats(x):", "body": "return pd.Series({'': np.mean(x),'': np.median(x),'': np.std(x),'': np.percentile(x, ),'': np.percentile(x, ),'': np.percentile(x, ),'': np.percentile(x, ),'': np.subtract.reduce(np.percentile(x, [, ])),})", "docstring": "Calculate various summary statistics of data.\n\n Parameters\n ----------\n x : numpy.ndarray or pandas.Series\n Array to compute summary statistics for.\n\n Returns\n -------\n pandas.Series\n Series containing mean, median, std, as well as 5, 25, 75 and\n 95 percentiles of passed in values.", "id": "f12221:m25"} {"signature": "def get_max_drawdown_underwater(underwater):", "body": "valley = np.argmin(underwater) peak = underwater[:valley][underwater[:valley] == ].index[-]try:recovery = underwater[valley:][underwater[valley:] == ].index[]except IndexError:recovery = np.nan return peak, valley, recovery", "docstring": "Determines peak, valley, and recovery dates given an 'underwater'\nDataFrame.\n\nAn underwater DataFrame is a DataFrame that has precomputed\nrolling drawdown.\n\nParameters\n----------\nunderwater : pd.Series\n Underwater returns (rolling drawdown) of a strategy.\n\nReturns\n-------\npeak : datetime\n The maximum drawdown's peak.\nvalley : datetime\n The maximum drawdown's valley.\nrecovery : datetime\n The maximum drawdown's recovery.", "id": "f12221:m26"} {"signature": "def get_max_drawdown(returns):", "body": "returns = returns.copy()df_cum = cum_returns(returns, )running_max = np.maximum.accumulate(df_cum)underwater = df_cum / running_max - return get_max_drawdown_underwater(underwater)", "docstring": "Determines the maximum drawdown of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\n\nReturns\n-------\nfloat\n Maximum drawdown.\n\nNote\n-----\nSee https://en.wikipedia.org/wiki/Drawdown_(economics) for more details.", "id": "f12221:m27"} {"signature": "def get_top_drawdowns(returns, top=):", "body": "returns = returns.copy()df_cum = ep.cum_returns(returns, )running_max = np.maximum.accumulate(df_cum)underwater = df_cum / running_max - drawdowns = []for t in range(top):peak, valley, recovery = get_max_drawdown_underwater(underwater)if not pd.isnull(recovery):underwater.drop(underwater[peak: recovery].index[:-],inplace=True)else:underwater = underwater.loc[:peak]drawdowns.append((peak, valley, recovery))if (len(returns) == ) or (len(underwater) == ):breakreturn drawdowns", "docstring": "Finds top drawdowns, sorted by drawdown amount.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\ntop : int, optional\n The amount of top drawdowns to find (default 10).\n\nReturns\n-------\ndrawdowns : list\n List of drawdown peaks, valleys, and recoveries. See get_max_drawdown.", "id": "f12221:m28"} {"signature": "def gen_drawdown_table(returns, top=):", "body": "df_cum = ep.cum_returns(returns, )drawdown_periods = get_top_drawdowns(returns, top=top)df_drawdowns = pd.DataFrame(index=list(range(top)),columns=['','','','',''])for i, (peak, valley, recovery) in enumerate(drawdown_periods):if pd.isnull(recovery):df_drawdowns.loc[i, ''] = np.nanelse:df_drawdowns.loc[i, ''] = len(pd.date_range(peak,recovery,freq=''))df_drawdowns.loc[i, ''] = (peak.to_pydatetime().strftime(''))df_drawdowns.loc[i, ''] = (valley.to_pydatetime().strftime(''))if isinstance(recovery, float):df_drawdowns.loc[i, ''] = recoveryelse:df_drawdowns.loc[i, ''] = (recovery.to_pydatetime().strftime(''))df_drawdowns.loc[i, ''] = ((df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * df_drawdowns[''] = pd.to_datetime(df_drawdowns[''])df_drawdowns[''] = pd.to_datetime(df_drawdowns[''])df_drawdowns[''] = pd.to_datetime(df_drawdowns[''])return df_drawdowns", "docstring": "Places top drawdowns in a table.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\ntop : int, optional\n The amount of top drawdowns to find (default 10).\n\nReturns\n-------\ndf_drawdowns : pd.DataFrame\n Information about top drawdowns.", "id": "f12221:m29"} {"signature": "def rolling_volatility(returns, rolling_vol_window):", "body": "return returns.rolling(rolling_vol_window).std()* np.sqrt(APPROX_BDAYS_PER_YEAR)", "docstring": "Determines the rolling volatility of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nrolling_vol_window : int\n Length of rolling window, in days, over which to compute.\n\nReturns\n-------\npd.Series\n Rolling volatility.", "id": "f12221:m30"} {"signature": "def rolling_sharpe(returns, rolling_sharpe_window):", "body": "return returns.rolling(rolling_sharpe_window).mean()/ returns.rolling(rolling_sharpe_window).std()* np.sqrt(APPROX_BDAYS_PER_YEAR)", "docstring": "Determines the rolling Sharpe ratio of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nrolling_sharpe_window : int\n Length of rolling window, in days, over which to compute.\n\nReturns\n-------\npd.Series\n Rolling Sharpe ratio.\n\nNote\n-----\nSee https://en.wikipedia.org/wiki/Sharpe_ratio for more details.", "id": "f12221:m31"} {"signature": "def simulate_paths(is_returns, num_days,starting_value=, num_samples=, random_seed=None):", "body": "samples = np.empty((num_samples, num_days))seed = np.random.RandomState(seed=random_seed)for i in range(num_samples):samples[i, :] = is_returns.sample(num_days, replace=True,random_state=seed)return samples", "docstring": "Gnerate alternate paths using available values from in-sample returns.\n\nParameters\n----------\nis_returns : pandas.core.frame.DataFrame\n Non-cumulative in-sample returns.\nnum_days : int\n Number of days to project the probability cone forward.\nstarting_value : int or float\n Starting value of the out of sample period.\nnum_samples : int\n Number of samples to draw from the in-sample daily returns.\n Each sample will be an array with length num_days.\n A higher number of samples will generate a more accurate\n bootstrap cone.\nrandom_seed : int\n Seed for the pseudorandom number generator used by the pandas\n sample method.\n\nReturns\n-------\nsamples : numpy.ndarray", "id": "f12221:m32"} {"signature": "def summarize_paths(samples, cone_std=(, , ), starting_value=):", "body": "cum_samples = ep.cum_returns(samples.T,starting_value=starting_value).Tcum_mean = cum_samples.mean(axis=)cum_std = cum_samples.std(axis=)if isinstance(cone_std, (float, int)):cone_std = [cone_std]cone_bounds = pd.DataFrame(columns=pd.Float64Index([]))for num_std in cone_std:cone_bounds.loc[:, float(num_std)] = cum_mean + cum_std * num_stdcone_bounds.loc[:, float(-num_std)] = cum_mean - cum_std * num_stdreturn cone_bounds", "docstring": "Gnerate the upper and lower bounds of an n standard deviation\ncone of forecasted cumulative returns.\n\nParameters\n----------\nsamples : numpy.ndarray\n Alternative paths, or series of possible outcomes.\ncone_std : list of int/float\n Number of standard devations to use in the boundaries of\n the cone. If multiple values are passed, cone bounds will\n be generated for each value.\n\nReturns\n-------\nsamples : pandas.core.frame.DataFrame", "id": "f12221:m33"} {"signature": "def forecast_cone_bootstrap(is_returns, num_days, cone_std=(, , ),starting_value=, num_samples=,random_seed=None):", "body": "samples = simulate_paths(is_returns=is_returns,num_days=num_days,starting_value=starting_value,num_samples=num_samples,random_seed=random_seed)cone_bounds = summarize_paths(samples=samples,cone_std=cone_std,starting_value=starting_value)return cone_bounds", "docstring": "Determines the upper and lower bounds of an n standard deviation\ncone of forecasted cumulative returns. Future cumulative mean and\nstandard devation are computed by repeatedly sampling from the\nin-sample daily returns (i.e. bootstrap). This cone is non-parametric,\nmeaning it does not assume that returns are normally distributed.\n\nParameters\n----------\nis_returns : pd.Series\n In-sample daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\nnum_days : int\n Number of days to project the probability cone forward.\ncone_std : int, float, or list of int/float\n Number of standard devations to use in the boundaries of\n the cone. If multiple values are passed, cone bounds will\n be generated for each value.\nstarting_value : int or float\n Starting value of the out of sample period.\nnum_samples : int\n Number of samples to draw from the in-sample daily returns.\n Each sample will be an array with length num_days.\n A higher number of samples will generate a more accurate\n bootstrap cone.\nrandom_seed : int\n Seed for the pseudorandom number generator used by the pandas\n sample method.\n\nReturns\n-------\npd.DataFrame\n Contains upper and lower cone boundaries. Column names are\n strings corresponding to the number of standard devations\n above (positive) or below (negative) the projected mean\n cumulative returns.", "id": "f12221:m34"} {"signature": "def extract_interesting_date_ranges(returns):", "body": "returns_dupe = returns.copy()returns_dupe.index = returns_dupe.index.map(pd.Timestamp)ranges = OrderedDict()for name, (start, end) in PERIODS.items():try:period = returns_dupe.loc[start:end]if len(period) == :continueranges[name] = periodexcept BaseException:continuereturn ranges", "docstring": "Extracts returns based on interesting events. See\ngen_date_range_interesting.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in tears.create_full_tear_sheet.\n\nReturns\n-------\nranges : OrderedDict\n Date ranges, with returns, of all valid events.", "id": "f12221:m35"} {"signature": "def deprecated(msg=None, stacklevel=):", "body": "def deprecated_dec(fn):@wraps(fn)def wrapper(*args, **kwargs):warnings.warn(msg or \"\" % fn.__name__,category=DeprecationWarning,stacklevel=stacklevel)return fn(*args, **kwargs)return wrapperreturn deprecated_dec", "docstring": "Used to mark a function as deprecated.\nParameters\n----------\nmsg : str\n The message to display in the deprecation warning.\nstacklevel : int\n How far up the stack the warning needs to go, before\n showing the relevant calling lines.\nUsage\n-----\n@deprecated(msg='function_a is deprecated! Use function_b instead.')\ndef function_a(*args, **kwargs):", "id": "f12222:m0"} {"signature": "def map_transaction(txn):", "body": "if isinstance(txn[''], dict):sid = txn['']['']symbol = txn['']['']else:sid = txn['']symbol = txn['']return {'': sid,'': symbol,'': txn[''],'': txn[''],'': txn[''],'': txn[''],'': txn['']}", "docstring": "Maps a single transaction row to a dictionary.\n\nParameters\n----------\ntxn : pd.DataFrame\n A single transaction object to convert to a dictionary.\n\nReturns\n-------\ndict\n Mapped transaction.", "id": "f12223:m0"} {"signature": "def make_transaction_frame(transactions):", "body": "transaction_list = []for dt in transactions.index:txns = transactions.loc[dt]if len(txns) == :continuefor txn in txns:txn = map_transaction(txn)transaction_list.append(txn)df = pd.DataFrame(sorted(transaction_list, key=lambda x: x['']))df[''] = -df[''] * df['']df.index = list(map(pd.Timestamp, df.dt.values))return df", "docstring": "Formats a transaction DataFrame.\n\nParameters\n----------\ntransactions : pd.DataFrame\n Contains improperly formatted transactional data.\n\nReturns\n-------\ndf : pd.DataFrame\n Daily transaction volume and dollar ammount.\n - See full explanation in tears.create_full_tear_sheet.", "id": "f12223:m1"} {"signature": "def get_txn_vol(transactions):", "body": "txn_norm = transactions.copy()txn_norm.index = txn_norm.index.normalize()amounts = txn_norm.amount.abs()prices = txn_norm.pricevalues = amounts * pricesdaily_amounts = amounts.groupby(amounts.index).sum()daily_values = values.groupby(values.index).sum()daily_amounts.name = \"\"daily_values.name = \"\"return pd.concat([daily_values, daily_amounts], axis=)", "docstring": "Extract daily transaction data from set of transaction objects.\n\nParameters\n----------\ntransactions : pd.DataFrame\n Time series containing one row per symbol (and potentially\n duplicate datetime indices) and columns for amount and\n price.\n\nReturns\n-------\npd.DataFrame\n Daily transaction volume and number of shares.\n - See full explanation in tears.create_full_tear_sheet.", "id": "f12223:m2"} {"signature": "def adjust_returns_for_slippage(returns, positions, transactions,slippage_bps):", "body": "slippage = * slippage_bpsportfolio_value = positions.sum(axis=)pnl = portfolio_value * returnstraded_value = get_txn_vol(transactions).txn_volumeslippage_dollars = traded_value * slippageadjusted_pnl = pnl.add(-slippage_dollars, fill_value=)adjusted_returns = returns * adjusted_pnl / pnlreturn adjusted_returns", "docstring": "Apply a slippage penalty for every dollar traded.\n\nParameters\n----------\nreturns : pd.Series\n Daily returns of the strategy, noncumulative.\n - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame\n Daily net position values.\n - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in create_full_tear_sheet.\nslippage_bps: int/float\n Basis points of slippage to apply.\n\nReturns\n-------\npd.Series\n Time series of daily returns, adjusted for slippage.", "id": "f12223:m3"} {"signature": "def get_turnover(positions, transactions, denominator=''):", "body": "txn_vol = get_txn_vol(transactions)traded_value = txn_vol.txn_volumeif denominator == '':AGB = positions.drop('', axis=).abs().sum(axis=)denom = AGB.rolling().mean()denom.iloc[] = AGB.iloc[] / elif denominator == '':denom = positions.sum(axis=)else:raise ValueError(\"\"\"\"\"\".format(denominator))denom.index = denom.index.normalize()turnover = traded_value.div(denom, axis='')turnover = turnover.fillna()return turnover", "docstring": "- Value of purchases and sales divided\nby either the actual gross book or the portfolio value\nfor the time step.\n\nParameters\n----------\npositions : pd.DataFrame\n Contains daily position values including cash.\n - See full explanation in tears.create_full_tear_sheet\ntransactions : pd.DataFrame\n Prices and amounts of executed trades. One row per trade.\n - See full explanation in tears.create_full_tear_sheet\ndenominator : str, optional\n Either 'AGB' or 'portfolio_value', default AGB.\n - AGB (Actual gross book) is the gross market\n value (GMV) of the specific algo being analyzed.\n Swapping out an entire portfolio of stocks for\n another will yield 200% turnover, not 100%, since\n transactions are being made for both sides.\n - We use average of the previous and the current end-of-period\n AGB to avoid singularities when trading only into or\n out of an entire book in one trading period.\n - portfolio_value is the total value of the algo's\n positions end-of-period, including cash.\n\nReturns\n-------\nturnover_rate : pd.Series\n timeseries of portfolio turnover rates.", "id": "f12223:m4"} {"signature": "def ask(self):", "body": "with self.mutex:if not len(self.queue): raise Emptyutcnow = dt.datetime.utcnow()if self.queue[][] <= utcnow:self.ready.notify()return return (self.queue[][] - utcnow).total_seconds()", "docstring": "Return the wait time in seconds required to retrieve the\nitem currently at the head of the queue.\n\nNote that there is no guarantee that a call to `get()` will\nsucceed even if `ask()` returns 0. By the time the calling\nthread reacts, other threads may have caused a different\nitem to be at the head of the queue.", "id": "f12227:c3:m1"} {"signature": "def qsize(self):", "body": "with self.mutex:return len(self.queue)", "docstring": "Return the approximate size of the queue.\n\nThe answer will not be reliable, as producers and consumers\ncan change the queue size before the result can be used.", "id": "f12227:c3:m4"} {"signature": "def get_self(session, user_details=None):", "body": "if user_details:user_details[''] = Trueresponse = make_get_request(session, '', params_data=user_details)json_data = response.json()if response.status_code == :return json_data['']else:raise SelfNotRetrievedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Get details about the currently authenticated user", "id": "f12288:m0"} {"signature": "def get_user_by_id(session, user_id, user_details=None):", "body": "if user_details:user_details[''] = Trueresponse = make_get_request(session, ''.format(user_id), params_data=user_details)json_data = response.json()if response.status_code == :return json_data['']else:raise UserNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Get details about specific user", "id": "f12288:m1"} {"signature": "def get_self_user_id(session):", "body": "response = make_get_request(session, '')if response.status_code == :return response.json()['']['']else:raise UserIdNotRetrievedException('' % response.text, response.text)", "docstring": "Get the currently authenticated user ID", "id": "f12288:m2"} {"signature": "def add_user_jobs(session, job_ids):", "body": "jobs_data = {'': job_ids}response = make_post_request(session, '', json_data=jobs_data)json_data = response.json()if response.status_code == :return json_data['']else:raise UserJobsNotAddedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Add a list of jobs to the currently authenticated user", "id": "f12288:m3"} {"signature": "def set_user_jobs(session, job_ids):", "body": "jobs_data = {'': job_ids}response = make_put_request(session, '', json_data=jobs_data)json_data = response.json()if response.status_code == :return json_data['']else:raise UserJobsNotSetException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Replace the currently authenticated user's list of jobs with a new list of\njobs", "id": "f12288:m4"} {"signature": "def delete_user_jobs(session, job_ids):", "body": "jobs_data = {'': job_ids}response = make_delete_request(session, '', json_data=jobs_data)json_data = response.json()if response.status_code == :return json_data['']else:raise UserJobsNotDeletedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Remove a list of jobs from the currently authenticated user", "id": "f12288:m5"} {"signature": "def get_users(session, query):", "body": "response = make_get_request(session, '', params_data=query)json_data = response.json()if response.status_code == :return json_data['']else:raise UsersNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Get one or more users", "id": "f12288:m6"} {"signature": "def create_project(session, title, description,currency, budget, jobs):", "body": "project_data = {'': title,'': description,'': currency,'': budget,'': jobs}response = make_post_request(session, '', json_data=project_data)json_data = response.json()if response.status_code == :project_data = json_data['']p = Project(project_data)p.url = urljoin(session.url, '' % p.seo_url)return pelse:raise ProjectNotCreatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''],)", "docstring": "Create a project", "id": "f12290:m0"} {"signature": "def create_hourly_project(session, title, description,currency, budget, jobs, hourly_project_info):", "body": "project_data = {'': title,'': description,'': currency,'': budget,'': jobs,'': '','': hourly_project_info}response = make_post_request(session, '', json_data=project_data)json_data = response.json()if response.status_code == :project_data = json_data['']p = Project(project_data)p.url = urljoin(session.url, '' % p.seo_url)return pelse:raise ProjectNotCreatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''],)", "docstring": "Create a fixed project", "id": "f12290:m1"} {"signature": "def create_local_project(session, title, description,currency, budget, jobs, location):", "body": "project_data = {'': title,'': description,'': currency,'': budget,'': jobs,'': True,'': location}response = make_post_request(session, '', json_data=project_data)json_data = response.json()if response.status_code == :project_data = json_data['']p = Project(project_data)p.url = urljoin(session.url, '' % p.seo_url)return pelse:raise ProjectNotCreatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''],)", "docstring": "Create a fixed project", "id": "f12290:m2"} {"signature": "def create_hireme_project(session, title, description,currency, budget, jobs, hireme_initial_bid):", "body": "jobs.append(create_job_object(id=)) project_data = {'': title,'': description,'': currency,'': budget,'': jobs,'': True,'': hireme_initial_bid}response = make_post_request(session, '', json_data=project_data)json_data = response.json()if response.status_code == :project_data = json_data['']p = Project(project_data)p.url = urljoin(session.url, '' % p.seo_url)return pelse:raise ProjectNotCreatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''],)", "docstring": "Create a fixed project", "id": "f12290:m3"} {"signature": "def get_projects(session, query):", "body": "response = make_get_request(session, '', params_data=query)json_data = response.json()if response.status_code == :return json_data['']else:raise ProjectsNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Get one or more projects", "id": "f12290:m4"} {"signature": "def get_project_by_id(session, project_id, project_details=None, user_details=None):", "body": "query = {}if project_details:query.update(project_details)if user_details:query.update(user_details)response = make_get_request(session, ''.format(project_id), params_data=query)json_data = response.json()if response.status_code == :return json_data['']else:raise ProjectsNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Get a single project by ID", "id": "f12290:m5"} {"signature": "def search_projects(session,query,search_filter=None,project_details=None,user_details=None,limit=,offset=,active_only=None):", "body": "search_data = {'': query,'': limit,'': offset,}if search_filter:search_data.update(search_filter)if project_details:search_data.update(project_details)if user_details:search_data.update(user_details)endpoint = ''.format('' if active_only else '')response = make_get_request(session, endpoint, params_data=search_data)json_data = response.json()if response.status_code == :return json_data['']else:raise ProjectsNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Search for all projects", "id": "f12290:m6"} {"signature": "def place_project_bid(session, project_id, bidder_id, description, amount,period, milestone_percentage):", "body": "bid_data = {'': project_id,'': bidder_id,'': description,'': amount,'': period,'': milestone_percentage,}response = make_post_request(session, '', json_data=bid_data)json_data = response.json()if response.status_code == :bid_data = json_data['']return Bid(bid_data)else:raise BidNotPlacedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Place a bid on a project", "id": "f12290:m7"} {"signature": "def get_bids(session, project_ids=[], bid_ids=[], limit=, offset=):", "body": "get_bids_data = {}if bid_ids:get_bids_data[''] = bid_idsif project_ids:get_bids_data[''] = project_idsget_bids_data[''] = limitget_bids_data[''] = offsetresponse = make_get_request(session, '', params_data=get_bids_data)json_data = response.json()if response.status_code == :return json_data['']else:raise BidsNotFoundException(message=json_data[''], error_code=json_data[''],request_id=json_data[''])", "docstring": "Get the list of bids", "id": "f12290:m8"} {"signature": "def get_milestones(session, project_ids=[], milestone_ids=[], user_details=None, limit=, offset=):", "body": "get_milestones_data = {}if milestone_ids:get_milestones_data[''] = milestone_idsif project_ids:get_milestones_data[''] = project_idsget_milestones_data[''] = limitget_milestones_data[''] = offsetif user_details:get_milestones_data.update(user_details)response = make_get_request(session, '', params_data=get_milestones_data)json_data = response.json()if response.status_code == :return json_data['']else:raise MilestonesNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Get the list of milestones", "id": "f12290:m9"} {"signature": "def get_milestone_by_id(session, milestone_id, user_details=None):", "body": "endpoint = ''.format(milestone_id)response = make_get_request(session, endpoint, params_data=user_details)json_data = response.json()if response.status_code == :return json_data['']else:raise MilestonesNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Get a specific milestone", "id": "f12290:m10"} {"signature": "def award_project_bid(session, bid_id):", "body": "headers = {'': ''}bid_data = {'': ''}endpoint = ''.format(bid_id)response = make_put_request(session, endpoint, headers=headers,params_data=bid_data)json_data = response.json()if response.status_code == :return json_data['']else:json_data = response.json()raise BidNotAwardedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Award a bid on a project", "id": "f12290:m11"} {"signature": "def revoke_project_bid(session, bid_id):", "body": "headers = {'': ''}bid_data = {'': ''}endpoint = ''.format(bid_id)response = make_put_request(session, endpoint, headers=headers,params_data=bid_data)json_data = response.json()if response.status_code == :return json_data['']else:json_data = response.json()raise BidNotRevokedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Revoke a bid on a project", "id": "f12290:m12"} {"signature": "def accept_project_bid(session, bid_id):", "body": "headers = {'': ''}bid_data = {'': ''}endpoint = ''.format(bid_id)response = make_put_request(session, endpoint, headers=headers,params_data=bid_data)json_data = response.json()if response.status_code == :return json_data['']else:json_data = response.json()raise BidNotAcceptedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Accept a bid on a project", "id": "f12290:m13"} {"signature": "def retract_project_bid(session, bid_id):", "body": "headers = {'': ''}bid_data = {'': ''}endpoint = ''.format(bid_id)response = make_put_request(session, endpoint, headers=headers,params_data=bid_data)json_data = response.json()if response.status_code == :return json_data['']else:json_data = response.json()raise BidNotRetractedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Retract a bid on a project", "id": "f12290:m14"} {"signature": "def highlight_project_bid(session, bid_id):", "body": "headers = {'': ''}bid_data = {'': ''}endpoint = ''.format(bid_id)response = make_put_request(session, endpoint, headers=headers,params_data=bid_data)json_data = response.json()if response.status_code == :return json_data['']else:json_data = response.json()raise BidNotHighlightedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Highlight a bid on a project", "id": "f12290:m15"} {"signature": "def create_milestone_payment(session, project_id, bidder_id, amount,reason, description):", "body": "milestone_data = {'': project_id,'': bidder_id,'': amount,'': reason,'': description}response = make_post_request(session, '',json_data=milestone_data)json_data = response.json()if response.status_code == :milestone_data = json_data['']return Milestone(milestone_data)else:raise MilestoneNotCreatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Create a milestone payment", "id": "f12290:m16"} {"signature": "def post_track(session, user_id, project_id, latitude, longitude):", "body": "tracking_data = {'': user_id,'': project_id,'': {'': latitude,'': longitude}}response = make_post_request(session, '',json_data=tracking_data)json_data = response.json()if response.status_code == :return json_data['']else:raise TrackNotCreatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Start tracking a project by creating a track", "id": "f12290:m17"} {"signature": "def update_track(session, track_id, latitude, longitude, stop_tracking=False):", "body": "tracking_data = {'': {'': latitude,'': longitude,},'': stop_tracking}response = make_put_request(session, ''.format(track_id),json_data=tracking_data)json_data = response.json()if response.status_code == :return json_data['']else:raise TrackNotUpdatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Updates the current location by creating a new track point and appending\nit to the given track", "id": "f12290:m18"} {"signature": "def get_track_by_id(session, track_id, track_point_limit=None, track_point_offset=None):", "body": "tracking_data = {}if track_point_limit:tracking_data[''] = track_point_limitif track_point_offset:tracking_data[''] = track_point_offsetresponse = make_get_request(session, ''.format(track_id),params_data=tracking_data)json_data = response.json()if response.status_code == :return json_data['']else:raise TrackNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Gets a specific track", "id": "f12290:m19"} {"signature": "def release_milestone_payment(session, milestone_id, amount):", "body": "params_data = {'': '',}milestone_data = {'': amount,}endpoint = ''.format(milestone_id)response = make_put_request(session, endpoint, params_data=params_data,json_data=milestone_data)json_data = response.json()if response.status_code == :return json_data['']else:raise MilestoneNotReleasedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Release a milestone payment", "id": "f12290:m20"} {"signature": "def request_release_milestone_payment(session, milestone_id):", "body": "params_data = {'': '',}endpoint = ''.format(milestone_id)response = make_put_request(session, endpoint, params_data=params_data)json_data = response.json()if response.status_code == :return json_data['']else:raise MilestoneNotRequestedReleaseException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Release a milestone payment", "id": "f12290:m21"} {"signature": "def cancel_milestone_payment(session, milestone_id):", "body": "params_data = {'': '',}endpoint = ''.format(milestone_id)response = make_put_request(session, endpoint, params_data=params_data)json_data = response.json()if response.status_code == :return json_data['']else:raise MilestoneNotCancelledException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Release a milestone payment", "id": "f12290:m22"} {"signature": "def create_milestone_request(session, project_id, bid_id, description, amount):", "body": "milestone_request_data = {'': project_id,'': bid_id,'': description,'': amount,}response = make_post_request(session, '',json_data=milestone_request_data)json_data = response.json()if response.status_code == :milestone_request_data = json_data['']return MilestoneRequest(milestone_request_data)else:raise MilestoneRequestNotCreatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Create a milestone request", "id": "f12290:m23"} {"signature": "def accept_milestone_request(session, milestone_request_id):", "body": "params_data = {'': '',}endpoint = ''.format(milestone_request_id)response = make_put_request(session, endpoint, params_data=params_data)json_data = response.json()if response.status_code == :return json_data['']else:raise MilestoneRequestNotAcceptedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Accept a milestone request", "id": "f12290:m24"} {"signature": "def reject_milestone_request(session, milestone_request_id):", "body": "params_data = {'': '',}endpoint = ''.format(milestone_request_id)response = make_put_request(session, endpoint, params_data=params_data)json_data = response.json()if response.status_code == :return json_data['']else:raise MilestoneRequestNotRejectedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Reject a milestone request", "id": "f12290:m25"} {"signature": "def delete_milestone_request(session, milestone_request_id):", "body": "params_data = {'': '',}endpoint = ''.format(milestone_request_id)response = make_put_request(session, endpoint, params_data=params_data)json_data = response.json()if response.status_code == :return json_data['']else:raise MilestoneRequestNotDeletedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Delete a milestone request", "id": "f12290:m26"} {"signature": "def post_review(session, review):", "body": "response = make_post_request(session, '', json_data=review)json_data = response.json()if response.status_code == :return json_data['']else:raise ReviewNotPostedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Post a review", "id": "f12290:m27"} {"signature": "def get_jobs(session, job_ids, seo_details, lang):", "body": "get_jobs_data = {'': job_ids,'': seo_details,'': lang,}response = make_get_request(session, '', params_data=get_jobs_data)json_data = response.json()if response.status_code == :return json_data['']else:raise JobsNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Get a list of jobs", "id": "f12290:m28"} {"signature": "def create_thread(session, member_ids, context_type, context, message):", "body": "headers = {'': ''}thread_data = {'': member_ids,'': context_type,'': context,'': message,}response = make_post_request(session, '', headers,form_data=thread_data)json_data = response.json()if response.status_code == :return Thread(json_data[''])else:raise ThreadNotCreatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Create a thread", "id": "f12295:m0"} {"signature": "def create_project_thread(session, member_ids, project_id, message):", "body": "return create_thread(session, member_ids, '', project_id, message)", "docstring": "Create a project thread", "id": "f12295:m1"} {"signature": "def post_message(session, thread_id, message):", "body": "headers = {'': ''}message_data = {'': message,}endpoint = ''.format(thread_id)response = make_post_request(session, endpoint, headers,form_data=message_data)json_data = response.json()if response.status_code == :return Message(json_data[''])else:raise MessageNotCreatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Add a message to a thread", "id": "f12295:m2"} {"signature": "def post_attachment(session, thread_id, attachments):", "body": "files = []filenames = []for attachment in attachments:files.append(attachment[''])filenames.append(attachment[''])message_data = {'': filenames,}endpoint = ''.format(thread_id)response = make_post_request(session, endpoint,form_data=message_data, files=files)json_data = response.json()if response.status_code == :return Message(json_data[''])else:raise MessageNotCreatedException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Add a message to a thread", "id": "f12295:m3"} {"signature": "def get_messages(session, query, limit=, offset=):", "body": "query[''] = limitquery[''] = offsetresponse = make_get_request(session, '', params_data=query)json_data = response.json()if response.status_code == :return json_data['']else:raise MessagesNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Get one or more messages", "id": "f12295:m4"} {"signature": "def search_messages(session, thread_id, query, limit=,offset=, message_context_details=None,window_above=None, window_below=None):", "body": "query = {'': thread_id,'': query,'': limit,'': offset}if message_context_details:query[''] = message_context_detailsif window_above:query[''] = window_aboveif window_below:query[''] = window_belowresponse = make_get_request(session, '', params_data=query)json_data = response.json()if response.status_code == :return json_data['']else:raise MessagesNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Search for messages", "id": "f12295:m5"} {"signature": "def get_threads(session, query):", "body": "response = make_get_request(session, '', params_data=query)json_data = response.json()if response.status_code == :return json_data['']else:raise ThreadsNotFoundException(message=json_data[''],error_code=json_data[''],request_id=json_data[''])", "docstring": "Get one or more threads", "id": "f12295:m6"} {"signature": "def description(description):", "body": "def wrapper(func):@wraps(func)def wrapped(self, *args, **kwargs):return func(self, *args, **kwargs)wrapped.description = descriptionreturn wrappedreturn wrapper", "docstring": "Set description to test_method", "id": "f12310:m0"} {"signature": "def __init__(self, methodName='', options=None):", "body": "super(TestCase, self).__init__(methodName, options)self.reset_cookie()self.session_id_key = self.conf_get(SECTION_FUNKLOAD_FRIENDLY,'',self.__class__.session_id_key,quiet=True)self.site_url = self.conf_get('','',self.__class__.site_url,quiet=True)", "docstring": "Constructor", "id": "f12310:c0:m0"} {"signature": "def reset_cookie(self):", "body": "self.cookie = CookieDict()", "docstring": "Reset cookie data.", "id": "f12310:c0:m1"} {"signature": "def _connect(self, url, params, ok_codes, rtype, description,redirect=False, consumer=None):", "body": "if self.cookie:self.setHeader('', self.cookie.render_to_string())response = super(TestCase, self)._connect(url, params, ok_codes, rtype, description,redirect=redirect, consumer=consumer)for key, value in response.headers.items():if key.lower() == '':self.cookie.from_cookie_string(value)content_type_header = response.headers.get('')if content_type_header:if '' in content_type_header:content_type, charset_pair = content_type_header.split('', )if '' in charset_pair:_key, charset = charset_pair.split('', )else:charset = Noneelse:content_type = content_type_headercharset = Noneelse:content_type = charset = Noneresponse.content_type_header = content_type_headerif content_type:response.content_type = content_type.strip()else:response.content_type = Noneresponse.charset = charsetif response.content_type == '':response.data = json.loads(response.body)else:response.data = Nonereturn response", "docstring": "Override FunkLoadTestCase._connect", "id": "f12310:c0:m2"} {"signature": "def set_session_id(self, session_id):", "body": "self.cookie[self.session_id_key] = session_id", "docstring": "Set session ID to cookie", "id": "f12310:c0:m3"} {"signature": "def clear_session_id(self):", "body": "return self.cookie.get(self.session_id_key)", "docstring": "Clear session ID", "id": "f12310:c0:m4"} {"signature": "def conf_get(self, section, key, default=FunkLoadTestCase._marker, quiet=False):", "body": "if section == self.test_name and key == '':method = getattr(self, section, None)if method and hasattr(method, ''):return method.description.encode('')if section in ['', '']:if section == '':test_name = self.__class__.__name__.lower()else: test_name = self.test_nameif key == '':result_directory = super(TestCase, self).conf_get(section, '',default=self.result_directory, quiet=quiet)result_path = os.path.join(result_directory,''.format(test_name))return result_pathelif key == '':log_directory = super(TestCase, self).conf_get(section, '', default=self.log_directory,quiet=quiet)log_to = os.path.join(log_directory,''.format(test_name))return log_toreturn super(TestCase, self).conf_get(section, key, default=default, quiet=quiet)", "docstring": "Override FunkLoadTestCase.conf_get", "id": "f12310:c0:m5"} {"signature": "def __init__(self, data, content_type=''):", "body": "Data.__init__(self,content_type,json.dumps(data))", "docstring": "Constructor\n\n :param data: data will be encoding to JSON.\n :type data: dict\n :param content_type: Content type for data\n :type content_type: str", "id": "f12311:c0:m0"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(CookieDict, self).__init__(*args, **kwargs)", "docstring": "Constructor\n\n The argument is same the OrderedDict.", "id": "f12312:c0:m0"} {"signature": "def render_to_string(self):", "body": "values = ''for key, value in self.items():values += ''.format(key, value)return values", "docstring": "Render to cookie strings.", "id": "f12312:c0:m1"} {"signature": "def from_cookie_string(self, cookie_string):", "body": "for key_value in cookie_string.split(''):if '' in key_value:key, value = key_value.split('', )else:key = key_valuestrip_key = key.strip()if strip_key and strip_key.lower() not in COOKIE_ATTRIBUTE_NAMES:self[strip_key] = value.strip()", "docstring": "update self with cookie_string.", "id": "f12312:c0:m2"} {"signature": "def _update_state(self, data):", "body": "raise NotImplementedError", "docstring": "Determines the next desired point in the parameter space using\nthe parsed data returned from the public update\nmethod. Returns the value that will next emitted on the next\niteration using the list of dictionaries format for\narguments. If the update fails or data is None, StopIteration\nshould be supplied as the return value.", "id": "f12318:c0:m1"} {"signature": "def _initial_state(self, **kwargs):", "body": "raise NotImplementedError", "docstring": "Reset the the DynamicArgs object to its initial state and used\nto reset the object adter the iterator is exhausted. The\nreturn value is the initial argument to be returned by\nnext().", "id": "f12318:c0:m2"} {"signature": "def update(self, tids, info):", "body": "outputs_dir = os.path.join(info[''], '')pattern = '' % info['']flist = os.listdir(outputs_dir)try:outputs = []for tid in tids:matches = fnmatch.filter(flist, pattern.format(tid=tid))if len(matches) != :self.warning(\"\" % tid)contents = open(os.path.join(outputs_dir, matches[]),'').read()outputs.append(self.output_extractor(contents))self._next_val = self._update_state(outputs)self.trace.append((outputs, self._next_val))except:self.warning(\"\")self._next_val = StopIteration", "docstring": "Called to update the state of the iterator. This methods\nreceives the set of task ids from the previous set of tasks\ntogether with the launch information to allow the output\nvalues to be parsed using the output_extractor. This data is then\nused to determine the next desired point in the parameter\nspace by calling the _update_state method.", "id": "f12318:c0:m4"} {"signature": "def show(self):", "body": "copied = self.copy()enumerated = [el for el in enumerate(copied)]for (group_ind, specs) in enumerated:if len(enumerated) > : print(\"\" % group_ind)ordering = self.constant_keys + self.varying_keysspec_lines = [''.join(['' % (k, s[k]) for k in ordering]) for s in specs]print(''.join(['' % (i,l) for (i,l) in enumerate(spec_lines)]))print('' % self.__class__.__name__)", "docstring": "When dynamic, not all argument values may be available.", "id": "f12318:c0:m5"} {"signature": "def _trace_summary(self):", "body": "for (i, (val, args)) in enumerate(self.trace):if args is StopIteration:info = \"\"else:pprint = ''.join('' + ''.join('' % (k,v)for (k,v) in arg.items()) + '' for arg in args)info = (\"\" % pprint )if i == : print(\"\" % (i, info))else: print(\"\" % (i, info.capitalize(), val))", "docstring": "Summarizes the trace of values used to update the DynamicArgs\nand the arguments subsequently returned. May be used to\nimplement the summary method.", "id": "f12318:c0:m6"} {"signature": "def __add__(self, other):", "body": "if not other: return selfdynamic = (isinstance(self, DynamicArgs), isinstance(other, DynamicArgs))if dynamic == (True, True):raise Exception('')elif (True in dynamic):return DynamicConcatenate(self,other)else:return Concatenate(self,other)", "docstring": "Concatenates two argument specifiers. See Concatenate and\nDynamicConcatenate documentation respectively.", "id": "f12318:c0:m7"} {"signature": "def __mul__(self, other):", "body": "if not other: return []dynamic = (isinstance(self, DynamicArgs), isinstance(other, DynamicArgs))if dynamic == (True, True):raise Exception('')elif (True in dynamic):return DynamicCartesianProduct(self, other)else:return CartesianProduct(self, other)", "docstring": "Takes the cartesian product of two argument specifiers. See\nCartesianProduct and DynamicCartesianProduct documentation.", "id": "f12318:c0:m8"} {"signature": "def __len__(self):", "body": "raise NotImplementedError", "docstring": "Many DynamicArgs won't have a length that can be\nprecomputed. Most DynamicArgs objects will have an iteration\nlimit to guarantee eventual termination. If so, the maximum\npossible number of arguments that could be generated should be\nreturned.", "id": "f12318:c0:m9"} {"signature": "def _update_state(self, vals):", "body": "self._steps_complete += if self._steps_complete == self.max_steps:self._termination_info = (False, self._best_val, self._arg)return StopIterationarg_inc, arg_dec = valsbest_val = min(arg_inc, arg_dec, self._best_val)if best_val == self._best_val:self._termination_info = (True, best_val, self._arg)return StopIterationself._arg += self.stepsize if (arg_dec > arg_inc) else -self.stepsizeself._best_val= best_valreturn [{self.key:self._arg+self.stepsize},{self.key:self._arg-self.stepsize}]", "docstring": "Takes as input a list or tuple of two elements. First the\nvalue returned by incrementing by 'stepsize' followed by the\nvalue returned after a 'stepsize' decrement.", "id": "f12318:c1:m2"} {"signature": "def __call__(self, spec, tid=None, info={}):", "body": "raise NotImplementedError", "docstring": "Formats a single argument specification supplied as a\ndictionary of argument name/value pairs. The info dictionary\ncontains launch information as defined in the _setup_launch\nmethod of Launcher.", "id": "f12319:c0:m1"} {"signature": "def show(self, args, file_handle=None, **kwargs):", "body": "full_string = ''info = {'': '','': '','': '','': '','': '','': '','': tuple(time.localtime()),'': args.varying_keys,'': args.constant_keys,'': args.constant_items}quoted_cmds = [ subprocess.list2cmdline([el for el in self(self._formatter(s),'',info)])for s in args.specs]cmd_lines = ['' % (i, qcmds) for (i,qcmds)in enumerate(quoted_cmds)]full_string += ''.join(cmd_lines)if file_handle:file_handle.write(full_string)file_handle.flush()else:print(full_string)", "docstring": "Write to file_handle if supplied, othewise print output", "id": "f12319:c0:m3"} {"signature": "def verify(self, args):", "body": "return", "docstring": "Optional, final check that ensures valid arguments have been\npassed before launch. Allows the constant and varying_keys to\nbe be checked and can inspect the specs attribute if an\ninstance of Args. If invalid, raise an Exception with the\nappropriate error message, otherwise return None.", "id": "f12319:c0:m4"} {"signature": "def finalize(self, info):", "body": "return", "docstring": "Optional method that allows a Command to save state before\nlaunch. The info argument is supplied by the Launcher.", "id": "f12319:c0:m5"} {"signature": "def summary(self):", "body": "raise NotImplementedError", "docstring": "A succinct summary of the Command configuration. Unlike the\nrepr, a summary does not have to be complete but must supply\nkey information relevant to the user. Must begin by stating\nthe executable.", "id": "f12319:c0:m6"} {"signature": "def update(self):", "body": "launches = []for path in os.listdir(self.output_dir):full_path = os.path.join(self.output_dir, path)if os.path.isdir(full_path):launches.append(self._get_launch_info(full_path))self.launches = sorted(launches)", "docstring": "Update the launch information -- use if additional launches were\n made.", "id": "f12319:c2:m6"} {"signature": "def get_root_directory(self, timestamp=None):", "body": "if timestamp is None: timestamp = self.timestampif self.timestamp_format is not None:root_name = (time.strftime(self.timestamp_format, timestamp)+ '' + self.batch_name)else:root_name = self.batch_namepath = os.path.join(self.output_directory,*(self.subdir+[root_name]))return os.path.abspath(path)", "docstring": "A helper method that supplies the root directory name given a\ntimestamp.", "id": "f12319:c3:m1"} {"signature": "def _append_log(self, specs):", "body": "self._spec_log += specs log_path = os.path.join(self.root_directory, (\"\" % self.batch_name))core.Log.write_log(log_path, [spec for (_, spec) in specs], allow_append=True)", "docstring": "The log contains the tids and corresponding specifications\nused during launch with the specifications in JSON format.", "id": "f12319:c3:m2"} {"signature": "def _record_info(self, setup_info=None):", "body": "info_path = os.path.join(self.root_directory, ('' % self.batch_name))if setup_info is None:try:with open(info_path, '') as info_file:setup_info = json.load(info_file)except:setup_info = {}setup_info.update({'' : tuple(time.localtime())})else:setup_info.update({'' : None,'' : self.metadata})with open(info_path, '') as info_file:json.dump(setup_info, info_file, sort_keys=True, indent=)", "docstring": "All launchers should call this method to write the info file\nat the end of the launch. The .info file is saved given\nsetup_info supplied by _setup_launch into the\nroot_directory. When called without setup_info, the existing\ninfo file is updated with the end-time.", "id": "f12319:c3:m3"} {"signature": "def _setup_launch(self):", "body": "self.root_directory = self.get_root_directory()if not os.path.isdir(self.root_directory):os.makedirs(self.root_directory)platform_dict = {}python_version = (platform.python_implementation()+ platform.python_version())platform_dict[''] = platform.platform()platform_dict[''] = python_versionplatform_dict[''] = str(lancet_version)return {'': self.root_directory,'': self.batch_name,'': self.tag,'': self.description,'': repr(self),'' : platform_dict,'': self.timestamp,'': self.timestamp_format,'': self.args.varying_keys,'': self.args.constant_keys,'': self.args.constant_items}", "docstring": "Method to be used by all launchers that prepares the root\ndirectory and generate basic launch information for command\ntemplates to use (including a registered timestamp).", "id": "f12319:c3:m4"} {"signature": "def _launch_process_group(self, process_commands, streams_path):", "body": "processes = {}def check_complete_processes(wait=False):\"\"\"\"\"\"result = Falsefor proc in list(processes):if wait: proc.wait()if proc.poll() is not None:self.debug(\"\"% (processes[proc][''], proc.poll()))processes[proc][''].close()processes[proc][''].close()del processes[proc]result = Truereturn resultfor cmd, tid in process_commands:self.debug(\"\" % tid)job_timestamp = time.strftime('')basename = \"\" % (self.batch_name, job_timestamp, tid)stdout_handle = open(os.path.join(streams_path, \"\"% (basename, tid)), \"\")stderr_handle = open(os.path.join(streams_path, \"\"% (basename, tid)), \"\")proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle)processes[proc] = { '' : tid,'' : stdout_handle,'' : stderr_handle }if self.max_concurrency:while len(processes) >= self.max_concurrency:if not check_complete_processes(len(processes)==):time.sleep()while len(processes) > :if not check_complete_processes(True):time.sleep()", "docstring": "Launches processes defined by process_commands, but only\nexecutes max_concurrency processes at a time; if a process\ncompletes and there are still outstanding processes to be\nexecuted, the next processes are run until max_concurrency is\nreached again.", "id": "f12319:c3:m6"} {"signature": "def __call__(self):", "body": "launchinfo = self._setup_launch()streams_path = self._setup_streams_path()self.command.finalize(launchinfo)self._record_info(launchinfo)last_tid = last_tids = []for gid, groupspecs in enumerate(self.args):tids = list(range(last_tid, last_tid+len(groupspecs)))last_tid += len(groupspecs)allcommands = [self.command(self.command._formatter(spec), tid, launchinfo)for (spec,tid) in zip(groupspecs,tids)]self._append_log(list(zip(tids,groupspecs)))self.message(\"\" % (gid, len(allcommands)))self._launch_process_group(zip(allcommands,tids), streams_path)last_tids = tids[:]if self.dynamic:self.args.update(last_tids, launchinfo)self._record_info()if self.reduction_fn is not None:self.reduction_fn(self._spec_log, self.root_directory)", "docstring": "Call to start Launcher execution. Typically invoked by\nreview_and_launch but may be called directly by the user.", "id": "f12319:c3:m7"} {"signature": "def summary(self):", "body": "print(\"\" % self.__class__.__name__)print(\"\" % self.batch_name)if self.tag:print(\"\" % self.tag)print(\"\" % self.get_root_directory())print(\"\" % self.max_concurrency)if self.description:print(\"\" % self.description)", "docstring": "A succinct summary of the Launcher configuration. Unlike the\nrepr, a summary does not have to be complete but must supply\nkey information relevant to the user.", "id": "f12319:c3:m8"} {"signature": "def _qsub_args(self, override_options, cmd_args, append_options=[]):", "body": "opt_dict = type(self.qsub_flag_options)()opt_dict.update(self.qsub_flag_options)opt_dict.update(override_options)if type(self.qsub_flag_options) == dict: ordered_options = [(k, opt_dict[k]) for k in sorted(opt_dict)]else:ordered_options = list(opt_dict.items())ordered_options += append_optionsunpacked_groups = [[(k,v) for v in val] if type(val)==list else [(k,val)]for (k,val) in ordered_options]unpacked_kvs = [el for group in unpacked_groups for el in group]ordered_pairs = [(k,v) if (k[]=='') else ('' % (k), v)for (k,v) in unpacked_kvs]ordered_options = [[k]+([v] if type(v) == str else list(v)) for (k,v) in ordered_pairs]flattened_options = [el for kvs in ordered_options for el in kvs]return ([''] + self.qsub_switches+ flattened_options + [pipes.quote(c) for c in cmd_args])", "docstring": "Method to generate Popen style argument list for qsub using\nthe qsub_switches and qsub_flag_options parameters. Switches\nare returned first. The qsub_flag_options follow in keys()\nordered if not a vanilla Python dictionary (ie. a Python 2.7+\nor param.external OrderedDict). Otherwise the keys are sorted\nalphanumerically. Note that override_options is a list of\nkey-value pairs.", "id": "f12319:c4:m1"} {"signature": "def __call__(self):", "body": "self._launchinfo = self._setup_launch()self.command.finalize(self._launchinfo)self.job_timestamp = time.strftime('')streams_path = self._setup_streams_path()self.qsub_flag_options[''] = streams_pathself.qsub_flag_options[''] = streams_pathself.collate_and_launch()self._record_info(self._launchinfo)", "docstring": "Main entry point for the launcher. Collects the static\ninformation about the launch and sets up the stdout and stderr\nstream output directories. Generates the first call to\ncollate_and_launch().", "id": "f12319:c4:m2"} {"signature": "def collate_and_launch(self):", "body": "try: specs = next(self.spec_iter)except StopIteration:self.qdel_batch()if self.reduction_fn is not None:self.reduction_fn(self._spec_log, self.root_directory)self._record_info()returntid_specs = [(self.last_tid + i, spec) for (i,spec) in enumerate(specs)]self.last_tid += len(specs)self._append_log(tid_specs)if self.dynamic:self.args.update(self.last_tids, self._launchinfo)self.last_tids = [tid for (tid,_) in tid_specs]output_dir = self.qsub_flag_options['']error_dir = self.qsub_flag_options['']self._qsub_block(output_dir, error_dir, tid_specs)if self.dynamic or (self.reduction_fn is not None):pickle_path = os.path.join(self.root_directory, '')pickle.dump(self, open(pickle_path,''), protocol=)", "docstring": "Method that collates the previous jobs and launches the next\nblock of concurrent jobs when using DynamicArgs. This method\nis invoked on initial launch and then subsequently via a\ncommandline call (to Python via qsub) to collate the\npreviously run jobs and launch the next block of jobs.", "id": "f12319:c4:m3"} {"signature": "def _qsub_collate_and_launch(self, output_dir, error_dir, job_names):", "body": "job_name = \"\" % (self.batch_name,self.job_timestamp,self.collate_count)overrides = [(\"\",error_dir), ('',job_name), (\"\",output_dir),('',''.join(job_names))]resume_cmds =[\"\",(\"\"% self.root_directory),\"\",\"\"]cmd_args = [self.command.executable,'', ''.join(resume_cmds)]popen_args = self._qsub_args(overrides, cmd_args)p = subprocess.Popen(popen_args, stdout=subprocess.PIPE)(stdout, stderr) = p.communicate()self.debug(stdout)if p.poll() != :raise EnvironmentError(\"\" % p.poll())self.collate_count += self.message(\"\")return job_name", "docstring": "The method that actually runs qsub to invoke the python\nprocess with the necessary commands to trigger the next\ncollation step and next block of jobs.", "id": "f12319:c4:m4"} {"signature": "def _qsub_block(self, output_dir, error_dir, tid_specs):", "body": "processes = []job_names = []for (tid, spec) in tid_specs:job_name = \"\" % (self.batch_name, self.job_timestamp, tid)job_names.append(job_name)cmd_args = self.command(self.command._formatter(spec),tid, self._launchinfo)popen_args = self._qsub_args([(\"\",error_dir), ('',job_name), (\"\",output_dir)],cmd_args)p = subprocess.Popen(popen_args, stdout=subprocess.PIPE)(stdout, stderr) = p.communicate()self.debug(stdout)if p.poll() != :raise EnvironmentError(\"\" % p.poll())processes.append(p)self.message(\"\" % len(processes))if (self.reduction_fn is not None) or self.dynamic:self._qsub_collate_and_launch(output_dir, error_dir, job_names)", "docstring": "This method handles static argument specifiers and cases where\nthe dynamic specifiers cannot be queued before the arguments\nare known.", "id": "f12319:c4:m5"} {"signature": "def qdel_batch(self):", "body": "p = subprocess.Popen(['', '' % (self.batch_name,self.job_timestamp)],stdout=subprocess.PIPE)(stdout, stderr) = p.communicate()return p.poll()", "docstring": "Runs qdel command to remove all remaining queued jobs using\nthe * pattern . Necessary when StopIteration is\nraised with scheduled jobs left on the queue.\nReturns exit-code of qdel.", "id": "f12319:c4:m6"} {"signature": "def _launch_process_group(self, process_commands, streams_path):", "body": "processes = []for cmd, tid in process_commands:job_timestamp = time.strftime('')basename = \"\" % (self.batch_name, job_timestamp, tid)stdout_path = os.path.join(streams_path, \"\" % (basename, tid))stderr_path = os.path.join(streams_path, \"\" % (basename, tid))process = { '' : tid,'' : cmd,'' : stdout_path,'' : stderr_path }processes.append(process)json_path = os.path.join(self.root_directory, self.json_name % (tid))with open(json_path, '') as json_file:json.dump(processes, json_file, sort_keys=True, indent=)p = subprocess.Popen([self.script_path, json_path, self.batch_name,str(len(processes)), str(self.max_concurrency)])if p.wait() != :raise EnvironmentError(\"\" % p.poll())", "docstring": "Aggregates all process_commands and the designated output files into a\nlist, and outputs it as JSON, after which the wrapper script is called.", "id": "f12319:c5:m1"} {"signature": "def cross_check_launchers(self, launchers):", "body": "if len(launchers) == : raise Exception('')timestamps = [launcher.timestamp for launcher in launchers]if not all(timestamps[] == tstamp for tstamp in timestamps):raise Exception(\"\"\"\")root_directories = []for launcher in launchers:command = launcher.commandargs = launcher.argscommand.verify(args)root_directory = launcher.get_root_directory()if os.path.isdir(root_directory):raise Exception(\"\" % root_directory)if root_directory in root_directories:raise Exception(\"\")root_directories.append(root_directory)", "docstring": "Performs consistency checks across all the launchers.", "id": "f12319:c6:m1"} {"signature": "def _launch_all(self, launchers):", "body": "for launcher in launchers:print(\"\" % launcher.batch_name)launcher()return True", "docstring": "Launches all available launchers.", "id": "f12319:c6:m3"} {"signature": "def _review_all(self, launchers):", "body": "if self.launch_args is not None:proceed = self.review_args(self.launch_args,show_repr=True,heading='')if not proceed: return Falsereviewers = [self.review_args,self.review_command,self.review_launcher]for (count, launcher) in enumerate(launchers):if not all(reviewer(launcher) for reviewer in reviewers):print(\"\")return Falseif len(launchers)!= and count < len(launchers)-:skip_remaining = self.input_options(['', '',''],'', default='')if skip_remaining == '': breakelif skip_remaining == '': return Falseif self.input_options(['',''], '', default='') != '':return Falseelse:return self._launch_all(launchers)", "docstring": "Runs the review process for all the launchers.", "id": "f12319:c6:m4"} {"signature": "def review_args(self, obj, show_repr=False, heading=''):", "body": "args = obj.args if isinstance(obj, Launcher) else objprint('' % self.summary_heading(heading))args.summary()if show_repr: print(\"\" % args)response = self.input_options(['', '',''],'', default='')if response == '': return Falseif response == '': args.show()print('')return True", "docstring": "Reviews the given argument specification. Can review the\nmeta-arguments (launch_args) or the arguments themselves.", "id": "f12319:c6:m6"} {"signature": "def input_options(self, options, prompt='', default=None):", "body": "check_options = [x.lower() for x in options]while True:response = input('' % (prompt, ''.join(options))).lower()if response in check_options: return response.strip()elif response == '' and default is not None:return default.lower().strip()", "docstring": "Helper to prompt the user for input on the commandline.", "id": "f12319:c6:m9"} {"signature": "def save(self, filename, metadata={}, **data):", "body": "intersection = set(metadata.keys()) & set(data.keys())if intersection:msg = ''raise Exception(msg % ''.join(intersection))", "docstring": "The implementation in the base class simply checks there is no\nclash between the metadata and data keys.", "id": "f12320:c0:m1"} {"signature": "def metadata(self, filename):", "body": "raise NotImplementedError", "docstring": "The metadata returned as a dictionary.", "id": "f12320:c0:m2"} {"signature": "def data(self, filename):", "body": "raise NotImplementedError", "docstring": "Data returned as a dictionary.", "id": "f12320:c0:m3"} {"signature": "def _savepath(self, filename):", "body": "(basename, ext) = os.path.splitext(filename)basename = basename if (ext in self.extensions) else filenameext = ext if (ext in self.extensions) else self.extensions[]savepath = os.path.abspath(os.path.join(self.directory,'' % (basename, ext)))return (tempfile.mkstemp(ext, basename + \"\", self.directory)[]if self.hash_suffix else savepath)", "docstring": "Returns the full path for saving the file, adding an extension\nand making the filename unique as necessary.", "id": "f12320:c0:m5"} {"signature": "@classmethoddef file_supported(cls, filename):", "body": "if not isinstance(filename, str):return False(_, ext) = os.path.splitext(filename)if ext not in cls.extensions:return Falseelse:return True", "docstring": "Returns a boolean indicating whether the filename has an\nappropriate extension for this class.", "id": "f12320:c0:m6"} {"signature": "def save(self, filename, imdata, **data):", "body": "if isinstance(imdata, numpy.ndarray):imdata = Image.fromarray(numpy.uint8(imdata))elif isinstance(imdata, Image.Image):imdata.save(self._savepath(filename))", "docstring": "Data may be either a PIL Image object or a Numpy array.", "id": "f12320:c6:m2"} {"signature": "def load_ipython_extension(ip):", "body": "global _loadedif not _loaded:_loaded = Truefrom lancet import launchif sys.version_info[] == :launch.input = lambda *args, **kwargs: raw_input(*args, **kwargs)plaintext_formatter = ip.display_formatter.formatters['']plaintext_formatter.for_type(Args, repr_pretty_annotated)plaintext_formatter.for_type(Command, repr_pretty_unannotated)plaintext_formatter.for_type(Launcher, repr_pretty_unannotated)plaintext_formatter.for_type(FileType, repr_pretty_unannotated)plaintext_formatter.for_type(review_and_launch, repr_pretty_unannotated)", "docstring": "IPython pretty printing support (optional). To load the extension\nyou may execute the following in IPython:\n\n%load_ext lancet", "id": "f12321:m2"} {"signature": "def __call__(self, paths=[], **params_to_override):", "body": "p=param.ParamOverrides(self, dict(params_to_override, paths=paths))if p.paths == []:raise Exception(\"\")paths = [p.paths] if isinstance(p.paths, str) else p.pathsdef _desc(path, ind):for vcs in p.commands.keys():if os.path.exists(os.path.join(path, vcs)):proc = subprocess.Popen(p.commands[vcs][ind],stdout=subprocess.PIPE,stderr=subprocess.PIPE, cwd=path)return str(proc.communicate()[].decode()).strip()abspaths = [os.path.abspath(path) for path in paths]return {'' : dict((path, _desc(path,)) for path in abspaths),'': dict((path, _desc(path,)) for path in abspaths),'': dict((path, _desc(path,)) for path in abspaths)}", "docstring": "Takes a single path string or a list of path strings and\nreturns the corresponing version control information.", "id": "f12321:c0:m0"} {"signature": "def set_fp_precision(value):", "body": "Arguments.set_default('', value)", "docstring": "Function to set the floating precision across lancet.", "id": "f12322:m2"} {"signature": "def to_table(args, vdims=[]):", "body": "if not Table:return \"\"kdims = [dim for dim in args.constant_keys + args.varying_keysif dim not in vdims]items = [tuple([spec[k] for k in kdims+vdims])for spec in args.specs]return Table(items, kdims=kdims, vdims=vdims)", "docstring": "Helper function to convet an Args object to a HoloViews Table", "id": "f12322:m3"} {"signature": "def pprint_args(self, pos_args, keyword_args, infix_operator=None, extra_params={}):", "body": "if infix_operator and not (len(pos_args)== and keyword_args==[]):raise Exception('''')(kwargs,_,_,_) = self._pprint_argsself._pprint_args = (keyword_args + kwargs, pos_args, infix_operator, extra_params)", "docstring": "Method to define the positional arguments and keyword order\nfor pretty printing.", "id": "f12322:c0:m0"} {"signature": "def _pprint(self, cycle=False, flat=False, annotate=False, onlychanged=True, level=, tab = ''):", "body": "(kwargs, pos_args, infix_operator, extra_params) = self._pprint_args(br, indent) = ('' if flat else '', '' if flat else tab * level)prettify = lambda x: isinstance(x, PrettyPrinted) and not flatpretty = lambda x: x._pprint(flat=flat, level=level+) if prettify(x) else repr(x)params = dict(self.get_param_values())show_lexsort = getattr(self, '', None) is not Nonemodified = [k for (k,v) in self.get_param_values(onlychanged=onlychanged)]pkwargs = [(k, params[k]) for k in kwargs if (k in modified)] + list(extra_params.items())arg_list = [(k,params[k]) for k in pos_args] + pkwargslines = []if annotate: len_ckeys, len_vkeys = len(self.constant_keys), len(self.varying_keys)info_triple = (len(self),'' % len_ckeys if len_ckeys else '','' % len_vkeys if len_vkeys else '')annotation = '' % info_triplelines = [annotation]if show_lexsort: lines.append('')if cycle:lines.append('' % self.__class__.__name__)elif infix_operator:level = level - triple = (pretty(params[pos_args[]]), infix_operator, pretty(params[pos_args[]]))lines.append('' % triple)else:lines.append('' % self.__class__.__name__)for (k,v) in arg_list:lines.append('' % (br+indent, k, pretty(v)))lines.append('')lines = lines[:-] +[br+(tab*(level-))+''] if show_lexsort:lines.append('' % ''.join(repr(el) for el in self._lexorder))return ''.join(lines)", "docstring": "Pretty printer that prints only the modified keywords and\ngenerates flat representations (for repr) and optionally\nannotates the top of the repr with a comment.", "id": "f12322:c0:m1"} {"signature": "@classmethoddef spec_formatter(cls, spec):", "body": "return type(spec)((k, str(v)) for (k,v) in spec.items())", "docstring": "Formats the elements of an argument set appropriately", "id": "f12322:c1:m3"} {"signature": "@propertydef constant_keys(self):", "body": "raise NotImplementedError", "docstring": "Returns the list of parameter names whose values are constant\nas the argument specifier is iterated. Note that the union of\nconstant and varying_keys should partition the entire set of\nkeys in the case where there are no unsortable keys.", "id": "f12322:c1:m4"} {"signature": "@propertydef constant_items(self):", "body": "raise NotImplementedError", "docstring": "Returns the set of constant items as a list of tuples. This\nallows easy conversion to dictionary format. Note, the items\nshould be supplied in the same key ordering as for\nconstant_keys for consistency.", "id": "f12322:c1:m5"} {"signature": "@propertydef varying_keys(self):", "body": "raise NotImplementedError", "docstring": "Returns the list of parameters whose values vary as the\nargument specifier is iterated. Whenever it is possible, keys\nshould be sorted from those slowest to faster varying and\nsorted alphanumerically within groups that vary at the same\nrate.", "id": "f12322:c1:m6"} {"signature": "def __next__(self):", "body": "raise StopIteration", "docstring": "Called to get a list of specifications: dictionaries with\nparameter name keys and string values.", "id": "f12322:c1:m8"} {"signature": "def copy(self):", "body": "return copy.copy(self)", "docstring": "Convenience method to avoid using the specifier without\nexhausting it.", "id": "f12322:c1:m9"} {"signature": "def _collect_by_key(self,specs):", "body": "allkeys = itertools.chain.from_iterable([[(k, run[k]) for k in run] for run in specs])collection = defaultdict(list)for (k,v) in allkeys: collection[k].append(v)return collection", "docstring": "Returns a dictionary like object with the lists of values\ncollapsed by their respective key. Useful to find varying vs\nconstant keys and to find how fast keys vary.", "id": "f12322:c1:m10"} {"signature": "def __add__(self, other):", "body": "return self._operator(Concatenate, other)", "docstring": "Concatenates two argument specifiers.", "id": "f12322:c1:m12"} {"signature": "def __mul__(self, other):", "body": "return self._operator(CartesianProduct, other)", "docstring": "Takes the Cartesian product of two argument specifiers.", "id": "f12322:c1:m13"} {"signature": "def _cartesian_product(self, first_specs, second_specs):", "body": "return [ dict(zip(list(s1.keys()) + list(s2.keys()),list(s1.values()) + list(s2.values())))for s1 in first_specs for s2 in second_specs ]", "docstring": "Takes the Cartesian product of the specifications. Result will\ncontain N specifications where N = len(first_specs) *\nlen(second_specs) and keys are merged.\nExample: [{'a':1},{'b':2}] * [{'c':3},{'d':4}] =\n[{'a':1,'c':3},{'a':1,'d':4},{'b':2,'c':3},{'b':2,'d':4}]", "id": "f12322:c1:m14"} {"signature": "def summary(self):", "body": "print(\"\" % len(self))varying_keys = ''.join('' % k for k in self.varying_keys)print(\"\" % varying_keys)items = ''.join(['' % (k,v)for (k,v) in self.constant_items])if self.constant_items:print(\"\" % items)", "docstring": "A succinct summary of the argument specifier. Unlike the repr,\na summary does not have to be complete but must supply the\nmost relevant information about the object to the user.", "id": "f12322:c1:m15"} {"signature": "def _build_specs(self, specs, kwargs, fp_precision):", "body": "if specs is None:overrides = param.ParamOverrides(self, kwargs,allow_extra_keywords=True)extra_kwargs = overrides.extra_keywords()kwargs = dict([(k,v) for (k,v) in kwargs.items()if k not in extra_kwargs])rounded_specs = list(self.round_floats([extra_kwargs],fp_precision))if extra_kwargs=={}: return [], kwargs, Trueelse: return rounded_specs, kwargs, Falsereturn list(self.round_floats(specs, fp_precision)), kwargs, True", "docstring": "Returns the specs, the remaining kwargs and whether or not the\nconstructor was called with kwarg or explicit specs.", "id": "f12322:c3:m1"} {"signature": "def _unique(self, sequence, idfun=repr):", "body": "seen = {}return [seen.setdefault(idfun(e),e) for e in sequenceif idfun(e) not in seen]", "docstring": "Note: repr() must be implemented properly on all objects. This\nis implicitly assumed by Lancet when Python objects need to be\nformatted to string representation.", "id": "f12322:c3:m4"} {"signature": "def show(self, exclude=[]):", "body": "ordering = self.constant_keys + self.varying_keysspec_lines = [''.join(['' % (k, s[k]) for k in orderingif (k in s) and (k not in exclude)])for s in self.specs]print(''.join(['' % (i,l) for (i,l) in enumerate(spec_lines)]))", "docstring": "Convenience method to inspect the available argument values in\nhuman-readable format. The ordering of keys is determined by\nhow quickly they vary.\n\nThe exclude list allows specific keys to be excluded for\nreadability (e.g. to hide long, absolute filenames).", "id": "f12322:c3:m5"} {"signature": "def lexsort(self, *order):", "body": "if order == []:raise Exception(\"\"\"\"\"\")if not set(el[:] for el in order).issubset(set(self.varying_keys)):raise Exception(\"\")sorted_args = copy.deepcopy(self)specs_param = sorted_args.params('')specs_param.constant = Falsesorted_args.specs = self._lexsorted_specs(order)specs_param.constant = Truesorted_args._lexorder = orderreturn sorted_args", "docstring": "The lexical sort order is specified by a list of string\narguments. Each string is a key name prefixed by '+' or '-'\nfor ascending and descending sort respectively. If the key is\nnot found in the operand's set of varying keys, it is ignored.", "id": "f12322:c3:m6"} {"signature": "def _lexsorted_specs(self, order):", "body": "specs = self.specs[:]if not all(el[] in ['', ''] for el in order):raise Exception(\"\"\"\"\"\")sort_cycles = [(el[:], True if el[]=='' else False)for el in reversed(order)if el[:] in self.varying_keys]for (key, ascending) in sort_cycles:specs = sorted(specs, key=lambda s: s.get(key, None),reverse=(not ascending))return specs", "docstring": "A lexsort is specified using normal key string prefixed by '+'\n(for ascending) or '-' for (for descending).\n\nNote that in Python 2, if a key is missing, None is returned\n(smallest Python value). In Python 3, an Exception will be\nraised regarding comparison of heterogenous types.", "id": "f12322:c3:m7"} {"signature": "def linspace(self, start, stop, n):", "body": "if n == : return [start]L = [] * nnm1 = n - nm1inv = / nm1for i in range(n):L[i] = nm1inv * (start*(nm1 - i) + stop*i)return L", "docstring": "Simple replacement for numpy linspace", "id": "f12322:c6:m1"} {"signature": "@staticmethoddef extract_log(log_path, dict_type=dict):", "body": "log_path = (log_path if os.path.isfile(log_path)else os.path.join(os.getcwd(), log_path))with open(log_path,'') as log:splits = (line.split() for line in log)uzipped = ((int(split[]), json.loads(\"\".join(split[:]))) for split in splits)szipped = [(i, dict((str(k),v) for (k,v) in d.items())) for (i,d) in uzipped]return dict_type(szipped)", "docstring": "Parses the log file generated by a launcher and returns\ndictionary with tid keys and specification values.\n\nOrdering can be maintained by setting dict_type to the\nappropriate constructor (i.e. OrderedDict). Keys are converted\nfrom unicode to strings for kwarg use.", "id": "f12322:c8:m0"} {"signature": "@staticmethoddef write_log(log_path, data, allow_append=True):", "body": "append = os.path.isfile(log_path)islist = isinstance(data, list)if append and not allow_append:raise Exception('''' % log_path)if not (islist or isinstance(data, Args)):raise Exception('''')specs = data if islist else data.specsif not all(isinstance(el,dict) for el in specs):raise Exception('')log_file = open(log_path, '') if append else open(log_path, '')start = int(log_file.readlines()[-].split()[])+ if append else ascending_indices = range(start, start+len(data))log_str = ''.join(['' % (tid, json.dumps(el))for (tid, el) in zip(ascending_indices,specs)])log_file.write(\"\"+log_str if append else log_str)log_file.close()", "docstring": "Writes the supplied specifications to the log path. The data\nmay be supplied as either as a an Args or as a list of\ndictionaries.\n\nBy default, specifications will be appropriately appended to\nan existing log file. This can be disabled by setting\nallow_append to False.", "id": "f12322:c8:m1"} {"signature": "@classmethoddef directory(cls, directory, root=None, extension=None, **kwargs):", "body": "root = os.getcwd() if root is None else rootsuffix = '' if extension is None else '' + extension.rsplit('')[-]pattern = directory + os.sep + '' + suffixkey = os.path.join(root, directory,'').rsplit(os.sep)[-]format_parse = list(string.Formatter().parse(key))if not all([el is None for el in zip(*format_parse)[]]):raise Exception('')return cls(key, pattern, root, **kwargs)", "docstring": "Load all the files in a given directory selecting only files\nwith the given extension if specified. The given kwargs are\npassed through to the normal constructor.", "id": "f12322:c9:m0"} {"signature": "def fields(self):", "body": "parse = list(string.Formatter().parse(self.pattern))return [f for f in zip(*parse)[] if f is not None]", "docstring": "Return the fields specified in the pattern using Python's\nformatting mini-language.", "id": "f12322:c9:m2"} {"signature": "def _load_expansion(self, key, root, pattern):", "body": "path_pattern = os.path.join(root, pattern)expanded_paths = self._expand_pattern(path_pattern)specs=[]for (path, tags) in expanded_paths:filelist = [os.path.join(path,f) for f in os.listdir(path)] if os.path.isdir(path) else [path]for filepath in filelist:specs.append(dict(tags,**{key:os.path.abspath(filepath)}))return sorted(specs, key=lambda s: s[key])", "docstring": "Loads the files that match the given pattern.", "id": "f12322:c9:m3"} {"signature": "def _expand_pattern(self, pattern):", "body": "(globpattern, regexp, fields, types) = self._decompose_pattern(pattern)filelist = glob.glob(globpattern)expansion = []for fname in filelist:if fields == []:expansion.append((fname, {}))continuematch = re.match(regexp, fname)if match is None: continuematch_items = match.groupdict().items()tags = dict((k,types.get(k, str)(v)) for (k,v) in match_items)expansion.append((fname, tags))return expansion", "docstring": "From the pattern decomposition, finds the absolute paths\nmatching the pattern.", "id": "f12322:c9:m4"} {"signature": "def _decompose_pattern(self, pattern):", "body": "sep = ''float_codes = ['','','', '','', '', '']typecodes = dict([(k,float) for k in float_codes]+ [('',bin), ('',int), ('',oct), ('',hex)])parse = list(string.Formatter().parse(pattern))text, fields, codes, _ = zip(*parse)types = []for (field, code) in zip(fields, codes):if code in ['', None]: continueconstructor = typecodes.get(code[-], None)if constructor: types += [(field, constructor)]stars = ['' if not f else '' for f in fields]globpat = ''.join(text+star for (text,star) in zip(text,stars))refields = ['' if not f else sep+(''% f)+sep for f in fields]parts = ''.join(text+group for (text,group) in zip(text, refields)).split(sep)for i in range(, len(parts), ): parts[i] = re.escape(parts[i])regexp_pattern = ''.join(parts).replace('','')fields = list(f for f in fields if f)return globpat, regexp_pattern , fields, dict(types)", "docstring": "Given a path pattern with format declaration, generates a\nfour-tuple (glob_pattern, regexp pattern, fields, type map)", "id": "f12322:c9:m5"} {"signature": "@classmethoddef from_pattern(cls, pattern, filetype=None, key='', root=None, ignore=[]):", "body": "filepattern = FilePattern(key, pattern, root=root)if FileInfo.filetype and filetype is None:filetype = FileInfo.filetypeelif filetype is None:raise Exception(\"\"\"\"\"\")return FileInfo(filepattern, key, filetype, ignore=ignore)", "docstring": "Convenience method to directly chain a pattern processed by\nFilePattern into a FileInfo instance.\n\nNote that if a default filetype has been set on FileInfo, the\nfiletype argument may be omitted.", "id": "f12322:c10:m1"} {"signature": "def load(self, val, **kwargs):", "body": "if Table and isinstance(val, Table):return self.load_table(val, **kwargs)elif DataFrame and isinstance(val, DataFrame):return self.load_dframe(val, **kwargs)else:raise Exception(\"\" % type(val))", "docstring": "Load the file contents into the supplied pandas dataframe or\nHoloViews Table. This allows a selection to be made over the\nmetadata before loading the file contents (may be slow).", "id": "f12322:c10:m3"} {"signature": "def load_table(self, table):", "body": "items, data_keys = [], Nonefor key, filename in table.items():data_dict = self.filetype.data(filename[])current_keys = tuple(sorted(data_dict.keys()))values = [data_dict[k] for k in current_keys]if data_keys is None:data_keys = current_keyselif data_keys != current_keys:raise Exception(\"\")items.append((key, values))return Table(items, kdims=table.kdims, vdims=data_keys)", "docstring": "Load the file contents into the supplied Table using the\nspecified key and filetype. The input table should have the\nfilenames as values which will be replaced by the loaded\ndata. If data_key is specified, this key will be used to index\nthe loaded data to retrive the specified item.", "id": "f12322:c10:m4"} {"signature": "def load_dframe(self, dframe):", "body": "filename_series = dframe[self.key]loaded_data = filename_series.map(self.filetype.data)keys = [list(el.keys()) for el in loaded_data.values]for key in set().union(*keys):key_exists = key in dframe.columnsif key_exists:self.warning(\"\"\"\" % key)suffix = '' if key_exists else ''dframe[key+suffix] = loaded_data.map(lambda x: x.get(key, np.nan))return dframe", "docstring": "Load the file contents into the supplied dataframe using the\nspecified key and filetype.", "id": "f12322:c10:m5"} {"signature": "def _info(self, source, key, filetype, ignore):", "body": "specs, mdata = [], {}mdata_clashes = set()for spec in source.specs:if key not in spec:raise Exception(\"\" % key)mdata = dict((k,v) for (k,v) in filetype.metadata(spec[key]).items()if k not in ignore)mdata_spec = {}mdata_spec.update(spec)mdata_spec.update(mdata)specs.append(mdata_spec)mdata_clashes = mdata_clashes | (set(spec.keys()) & set(mdata.keys()))if mdata_clashes:self.warning(\"\")return specs", "docstring": "Generates the union of the source.specs and the metadata\ndictionary loaded by the filetype object.", "id": "f12322:c10:m6"} {"signature": "def check_pseudo_package(path):", "body": "if not os.path.isdir(path):raise Exception(\"\" % path)else:assets = os.listdir(path)if len(assets) == :raise Exception(\"\" % path)", "docstring": "Verifies that a fake subpackage path for assets (notebooks, svgs,\npngs etc) both exists and is populated with files.", "id": "f12324:m0"} {"signature": "def stable(rankings, A, B):", "body": "partners = dict((a, (rankings[(a, )], )) for a in A)is_stable = False while is_stable == False:is_stable = Truefor b in B:is_paired = False for n in range(, len(B) + ):a = rankings[(b, n)]a_partner, a_n = partners[a]if a_partner == b:if is_paired:is_stable = Falsepartners[a] = (rankings[(a, a_n + )], a_n + )else:is_paired = Truereturn sorted((a, b) for (a, (b, n)) in partners.items())", "docstring": "r\"\"\"\n rankings[(a, n)] = partner that a ranked n^th\n\n >>> from itertools import product\n >>> A = ['1','2','3','4','5','6']\n >>> B = ['a','b','c','d','e','f']\n >>> rank = dict()\n >>> rank['1'] = (1,4,2,6,5,3)\n >>> rank['2'] = (3,1,2,4,5,6)\n >>> rank['3'] = (1,2,4,3,5,6)\n >>> rank['4'] = (4,1,2,5,3,6)\n >>> rank['5'] = (1,2,3,6,4,5)\n >>> rank['6'] = (2,1,4,3,5,6)\n >>> rank['a'] = (1,2,3,4,5,6)\n >>> rank['b'] = (2,1,4,3,5,6)\n >>> rank['c'] = (5,1,6,3,2,4)\n >>> rank['d'] = (1,3,2,5,4,6)\n >>> rank['e'] = (4,1,3,6,2,5)\n >>> rank['f'] = (2,1,4,3,6,5)\n >>> Arankings = dict(((a, rank[a][b_]), B[b_]) for (a, b_) in product(A, range(0, 6)))\n >>> Brankings = dict(((b, rank[b][a_]), A[a_]) for (b, a_) in product(B, range(0, 6)))\n >>> rankings = Arankings\n >>> rankings.update(Brankings)\n >>> stable(rankings, A, B)\n [('1', 'a'), ('2', 'b'), ('3', 'd'), ('4', 'f'), ('5', 'c'), ('6', 'e')]", "id": "f12331:m2"} {"signature": "def remove_namespace(doc, namespace):", "body": "ns = u'' % namespacensl = len(ns)for elem in doc.getiterator():if elem.tag.startswith(ns):elem.tag = elem.tag[nsl:]elem.attrib[''] = namespace", "docstring": "Remove namespace in the passed document in place.", "id": "f12332:m0"} {"signature": "def encode(self, xtree):", "body": "raise NotImplementedError()", "docstring": "returns a tuple of (contentType, data)", "id": "f12332:c0:m0"} {"signature": "def decode(self, contentType, xmldata):", "body": "raise NotImplementedError()", "docstring": "Returns an ElementTree document model of `xmldata`, which has been\ndeclared to be of `contentType` format/encoding.", "id": "f12332:c0:m1"} {"signature": "def __init__(self,engine=None, storage=None, prefix='', owner=None,autoCommit=None,router=None, protocol=None, synchronizer=None, codec=None,):", "body": "self.autoCommit = autoCommit if autoCommit is not None else engine is Noneself._model = model.createModel(engine = engine,storage = storage,prefix = prefix,owner_id = owner,context = self,)self.router = routerself.protocol = protocolself.synchronizer = synchronizerself.codec = codecfor attr in dir(self._model):if attr in ('', '', '', ''):continuevalue = getattr(self._model, attr)if issubclass(value.__class__, sqlalchemy.ext.declarative.DeclarativeMeta)and value != self._model.DatabaseObject:setattr(self, attr, value)", "docstring": "The Context constructor accepts the following parameters, of which\nall are optional:\n\n:param owner:\n\n an integer owner ID. Necessary primarily when the adapter\n storage is shared between multiple users/adapter agents\n (i.e. in server contexts). If it is not shared, `owner` can be\n left as ``None`` (the default).\n\n:param storage:\n\n the sqlalchemy storage specification where all the SyncML-\n related data should be stored.\n\n NOTE: can be overridden by parameter `engine`.\n\n NOTE: the storage driver **MUST** support cascading deletes;\n this is done automatically for connections created directly by\n pysyncml for mySQL and sqlite, but it is up to the calling\n program to ensure this for other databases or if the database\n engine is passed in via parameter `engine`. Specifically, when\n pysyncml creates the sqlalchemy engine (i.e. by calling\n ``sqlalchemy.create_engine(storage)``), then InnoDB is requested\n for mySQL tables and ``PRAGMA foreign_keys=ON`` is issued for\n sqlite databases. pysyncml provides a helper function to ensure\n that sqlite databases have cascading deletes enabled::\n\n import sqlalchemy, pysyncml\n db = sqlalchemy.create_engine(...)\n pysyncml.enableSqliteCascadingDeletes(db)\n\n:param engine:\n\n the sqlalchemy storage engine where all the SyncML-related\n data should be stored.\n\n NOTE: overrides parameter `storage`.\n\n NOTE: see notes under parameter `storage` for details on\n cascading delete support.\n\n TODO: it would be great to add a check to ensure that provided\n storage engines have cascading deletes enabled.\n\n:param prefix:\n\n sets a database table name prefix. This is primarily useful when\n using the `engine` parameter, as multiple pysyncml contexts can\n then be defined within the same database namespace. Defaults to\n ``pysyncml``.\n\n:param autoCommit:\n\n whether or not to execute a storage engine \"commit\" when syncing\n is complete. The default behavior is dependent on if `engine` is\n provided: if not ``None``, then `autoCommit` defaults to\n ``False``, otherwise, defaults to ``True``.\n\n:param router:\n\n overrides the default router with an object that must implement\n the interface specified by :class:`pysyncml.router.Router`.\n\n:param protocol:\n\n sets the semantic objective to/from protocol evaluation and\n resolution object, which must implement the\n :class:`pysyncml.protocol.Protocol` interface.\n\n:param synchronizer:\n\n this is the engine for handling sync requests and dispatching\n them to the various agents. If specified, the object must\n implement the :class:`pysyncml.synchronizer.Synchronizer`\n interface.\n\n:param codec:\n\n specify the codec used to encode the SyncML commands - typically\n either ``\\'xml\\'`` (the default) or ``\\'wbxml\\'``. It can also\n be an object that implements the :class:`pysyncml.codec.Codec`\n interface.", "id": "f12334:c0:m0"} {"signature": "def Adapter(self, **kw):", "body": "try:ret = self._model.Adapter.q(isLocal=True).one()for k, v in kw.items():setattr(ret, k, v)except NoResultFound:ret = self._model.Adapter(**kw)ret.isLocal = Trueself._model.session.add(ret)if ret.devID is not None:self._model.session.flush()ret.context = selfret.router = self.router or router.Router(ret)ret.protocol = self.protocol or protocol.Protocol(ret)ret.synchronizer = self.synchronizer or synchronizer.Synchronizer(ret)ret.codec = self.codec or ''if isinstance(ret.codec, basestring):ret.codec = codec.Codec.factory(ret.codec)if ret.devID is not None:peers = ret.getKnownPeers()if len(peers) == and peers[].url is not None:ret._peer = peers[]return ret", "docstring": ".. TODO:: move this documentation into model/adapter.py?...\n\nThe Adapter constructor supports the following parameters:\n\n:param devID:\n\n sets the local adapter\\'s device identifier. For servers, this\n should be the externally accessible URL that launches the SyncML\n transaction, and for clients this should be a unique ID, such as\n the IMEI number (for mobile phones). If not specified, it will\n be defaulted to the `devID` of the `devinfo` object. If it\n cannot be loaded from the database or from the `devinfo`, then\n it must be provided before any synchronization can begin.\n\n:param name:\n\n sets the local adapter\\'s device name - usually a human-friendly\n description of this SyncML\\'s function.\n\n:param devinfo:\n\n sets the local adapter :class:`pysyncml.devinfo.DeviceInfo`. If\n not specified, it will be auto-loaded from the database. If it\n cannot be loaded from the database, then it must be provided\n before any synchronization can begin.\n\n:param peer:\n\n TODO: document...\n\n:param maxGuidSize:\n\n TODO: document...\n\n:param maxMsgSize:\n\n TODO: document...\n\n:param maxObjSize:\n\n TODO: document...\n\n:param conflictPolicy:\n\n sets the default conflict handling policy for this adapter,\n and can be overriden on a per-store basis (applies only when\n operating as the server role).", "id": "f12334:c0:m1"} {"signature": "def RemoteAdapter(self, **kw):", "body": "ret = self._model.Adapter(isLocal=False, **kw)self._model.session.add(ret)if ret.devID is not None:self._model.session.flush()return ret", "docstring": ".. TODO:: move this documentation into model/adapter.py?...\n\nThe RemoteAdapter constructor supports the following parameters:\n\n:param url:\n\n specifies the URL that this remote SyncML server can be reached\n at. The URL must be a fully-qualified URL.\n\n:param auth:\n\n set what kind of authentication scheme to use, which generally is\n one of the following values:\n\n **None**:\n\n indicates no authentication is required.\n\n **pysyncml.NAMESPACE_AUTH_BASIC**:\n\n specifies to use \"Basic-Auth\" authentication scheme.\n\n **pysyncml.NAMESPACE_AUTH_MD5**:\n\n specifies to use MD5 \"Digest-Auth\" authentication scheme.\n NOTE: this may not be implemented yet...\n\n:param username:\n\n if the `auth` is not ``None``, then the username to authenticate\n as must be provided via this parameter.\n\n:param password:\n\n if the `auth` is not ``None``, then the password to authenticate\n with must be provided via this parameter.", "id": "f12334:c0:m2"} {"signature": "def deleteAllItems(self):", "body": "for item in self.getAllItems():self.deleteItem(item.id)", "docstring": "[OPTIONAL] Deletes all items stored by this Agent. The default\nimplementation simply iterates over :meth:`getAllItems` and\ndeletes them one at a time.", "id": "f12337:c0:m1"} {"signature": "def dumpItem(self, item, stream, contentType=None, version=None):", "body": "raise NotImplementedError()", "docstring": "Converts the specified `item` to serialized form (such that it can\ntransported over the wire) and writes it to the provided file-like\n`stream` object. For agents that support multiple content-types,\nthe desired `contentType` and `version` will be specified as a\nparameter. If `contentType` and `version` are None, appropriate\ndefault values should be used. For agents that concurrently use\nmultiple content-types, the return value may be a two-element\ntuple of (contentType, version), thus overriding or enhancing the\nprovided values.", "id": "f12337:c0:m2"} {"signature": "def dumpsItem(self, item, contentType=None, version=None):", "body": "buf = six.StringIO()ret = self.dumpItem(item, buf, contentType, version)if ret is None:return buf.getvalue()return (ret[], ret[], buf.getvalue())", "docstring": "[OPTIONAL] Identical to :meth:`dump`, except the serialized form\nis returned as a string representation. As documented in\n:meth:`dump`, the return value can optionally be a three-element\ntuple of (contentType, version, data) if the provided content-type\nshould be overridden or enhanced. The default implementation just\nwraps :meth:`dump`.", "id": "f12337:c0:m3"} {"signature": "def loadItem(self, stream, contentType=None, version=None):", "body": "raise NotImplementedError()", "docstring": "Reverses the effects of the :meth:`dumpItem` method, and returns\nthe de-serialized Item from the file-like source `stream`.\n\nNote: `version` will typically be ``None``, so it should either be\nauto-determined, or not used. This is an issue in the SyncML\nprotocol, and is only here for symmetry with :meth:`dumpItem`\nand as \"future-proofing\".", "id": "f12337:c0:m4"} {"signature": "def loadsItem(self, data, contentType=None, version=None):", "body": "buf = six.StringIO(data)return self.loadItem(buf, contentType, version)", "docstring": "[OPTIONAL] Identical to :meth:`loadItem`, except the serialized\nform is provided as a string representation in `data` instead of\nas a stream. The default implementation just wraps\n:meth:`loadItem`.", "id": "f12337:c0:m5"} {"signature": "def getAllItems(self):", "body": "raise NotImplementedError()", "docstring": "Returns an iterable of all the items stored in the local datastore.", "id": "f12337:c0:m6"} {"signature": "def addItem(self, item):", "body": "raise NotImplementedError()", "docstring": "The specified `item`, which will have been created via a prior\n``loadItem()``, is added to the local datastore. This method\nreturns either a new :class:`pysyncml.Item\n` instance or the same `item` that was\npassed --- in either case, the returned item **MUST** have a valid\n:attr:`pysyncml.Item.id ` attribute.", "id": "f12337:c0:m7"} {"signature": "def getItem(self, itemID):", "body": "raise NotImplementedError()", "docstring": "Returns the :class:`pysyncml.Item `\ninstance associated with the specified `itemID`, which may or may\nnot have been converted to a string.", "id": "f12337:c0:m8"} {"signature": "def replaceItem(self, item, reportChanges):", "body": "raise NotImplementedError()", "docstring": "Updates the local datastore item with ID `item.id` to the value\nprovided as `item`, which will have been created via a prior\n``loadItem()``.\n\nIf `reportChanges` is True, then the return value will be used to\ntrack the changes that were applied. If `reportChanges` is True but\n``None`` is returned, then change tracking will be disabled for\nthis change, which will cascade to any past or future changes that\nhave not yet been synchronized. The return value must be a string\n(or an object that supports coercion via ``str()``). If multiple\nchanges accumulate for an object, they will be concatenated, in\norder, and delimited via a semicolon (\";\"). See :doc:`../merging`\nfor details.", "id": "f12337:c0:m9"} {"signature": "def deleteItem(self, itemID):", "body": "raise NotImplementedError()", "docstring": "Deletes the local datastore item with ID `itemID`.", "id": "f12337:c0:m10"} {"signature": "def matchItem(self, item):", "body": "for match in self.getAllItems():if cmp(match, item) == :return matchreturn None", "docstring": "[OPTIONAL] Attempts to find the specified item and returns an item\nthat describes the same object although it's specific properties\nmay be different. For example, a contact whose name is an\nidentical match, but whose telephone number has changed would\nreturn the matched item. ``None`` should be returned if no match\nis found, otherwise the item that `item` matched should be\nreturned.\n\nThis is used primarily when a slow-sync is invoked and objects\nthat exist in both peers should not be replicated.\n\nNote that **NO** merging of the items' properties should be done;\nthat will be initiated via a separate call to :meth:`mergeItems`.\n\nThis method by default will iterate over all items (by calling\n:meth:`getAllItems`) and compare them using ``cmp()``. This means\nthat if the items managed by this agent implement the ``__eq__``\nor ``__cmp__`` methods, then matching items will be detected and\nreturned. Otherwise, any items that exist in both peers will be\nduplicated on slow-sync.\n\nSub-classes *should* implement a more efficient method of finding\nmatching items.\n\nSee :doc:`../merging` for details.", "id": "f12337:c0:m11"} {"signature": "def mergeItems(self, localItem, remoteItem, changeSpec):", "body": "raise ConflictError('')", "docstring": "[OPTIONAL] Merges the properties of `remoteItem`, which is an item\nprovided by a remote peer during a synchronization, into the\n`localItem`, which is an item retrieved from this agent either via\n:meth:`getItem` or :meth:`matchItem`. `changeSpec` will represent\nthe changes applied to `localItem` since `remoteItem` was last\nsynchronized, or will be ``None`` when called as a result of a\nslow-sync :meth:`matchItem` call.\n\nThis method should return a new change-spec (see\n:meth:`replaceItem` for details) that represents the changes\napplied to `localItem` from `remoteItem`.\n\nIf the items cannot be merged, then a `pysyncml.ConflictError`\nshould be raised with more descriptive information on what failed\nduring the merge --- in which case pysyncml will revert to the\nconflict resolution policy defined by `store.conflictPolicy` or\n`adapter.conflictPolicy`.\n\nIMPORTANT: if the merge fails, `localItem` and `remoteItem` must\nstay untouched by this call; most importantly, if the merge fails\nwith a ConflictError, then `remoteItem` must be in the identical\nstate as when it entered the call.\n\nThis method by default raises a ConflictError, which means that if\nany changes are made to the same item simultaneously by two\ndifferent peers, they will result in a conflict and will not be\nauto-mergeable.\n\nSee :doc:`../merging` for details.", "id": "f12337:c0:m12"} {"signature": "def pushChange(self, attribute, currentValue, newValue):", "body": "raise NotImplementedError()", "docstring": "Record the change to the specified `attribute` from the original\nvalue `currentValue` to `newValue`. The merger object itself\n(i.e. ``self``) is returned, allowing multiple changes to be\nchained. The change-spec returned by :meth:`getChangeSpec` will be\nupdated according to this merger's change detection strategy.\n\nIf `currentValue` is ``None``, the field is assumed to be *added*.\nConversely, if `newValue` is ``None``, the field is assumed to be\n*deleted*. If both are ``None`` (or, more generally speaking,\nequal), the request is ignored.", "id": "f12343:c0:m1"} {"signature": "def getChangeSpec(self):", "body": "raise NotImplementedError()", "docstring": "Returns the current change-spec representing all calls to\n:meth:`pushChange` since construction of this merger.", "id": "f12343:c0:m2"} {"signature": "def mergeChanges(self, attribute, localValue, remoteValue):", "body": "raise NotImplementedError()", "docstring": "Returns the value of the specified `attribute` as determined by\nthe change-spec stored by this, the current `localValue` of this\nSyncML peer (i.e. the serve-side) and the client-provided\n`remoteValue` (i.e. of the remote client-side). Raises a\n`pysyncml.ConflictError` if the local changes conflict with the\nvalue provided by the remote peer.\n\nIf `localValue` is ``None``, the field is assumed to not exist\nlocally. Conversely, if `remoteValue` is ``None``, the field is\nassumed to not exist on the remote peer. If both are ``None`` (or,\nmore generally speaking, equal), the value is returned as-is\nwithout further investigation.", "id": "f12343:c0:m3"} {"signature": "def newMerger(self, changeSpec=None):", "body": "raise NotImplementedError()", "docstring": "Returns a :class:`Merger` for the specified `changeSpec` which can\nbe ``None`` if the merger is intended to generate a change-spec.", "id": "f12343:c1:m0"} {"signature": "def __init__(self, sharedDefault=True, default=None, mergers=None, **kw):", "body": "self.shared = sharedDefaultself.default = default or AttributeMergerFactory()self.mergers = mergers or dict()self.mergers.update(kw)", "docstring": "The CompositeMergerFactory constructor accepts the following\nparameters:\n\n:param default:\n\n The default merger factory (if unspecified, defaults to an\n AttributeMergerFactory). See `sharedDefault` if the default\n is not an attribute-based merger factory.\n\n:param mergers:\n\n A dictionary of (attribute => MergerFactory) that override the\n default merger factory for the specified attribute. If\n unspecified, all attributes will use the default merger factory.\n\n:param sharedDefault:\n\n The `sharedDefault` parameter controls how default attributes\n get handled. When ``True`` (the default), then all default\n attributes will share a Merger and the Merger will be passed the\n attribute name during operations. When ``False``, then each\n attribute will get its own Merger and operations will not get\n the attribute name. It is important that the `default` and\n `sharedDefault` parameters match - for example, if `default` is\n set to a ``TextMergerFactory``, then `sharedDefault` must be set\n to ``False``.", "id": "f12343:c6:m0"} {"signature": "def append(self, fieldname, changeType, initialValue=None, isMd5=False):", "body": "raise NotImplementedError()", "docstring": "Add a change to this ChangeTracker.\n\n:param fieldname:\n\n The item attribute that was changed in some way. The type of\n `fieldname` is dependent on which subclass of ChangeTracker is\n being used.\n\n:param changeType:\n\n The type of change that was applied to `fieldname`, which can be\n one of ``pysyncml.ITEM_ADDED``, ``pysyncml.ITEM_MODIFIED``, or\n ``pysyncml.ITEM_DELETED``.\n\n:param initialValue:\n\n For non-ADDED change types, specifies the *initial* value of the\n field, before the change was applied. Note that if the\n `initialValue` is very large, an MD5 checksum can be provided\n instead, in which case `isMd5` should be set to ``True``.\n\n:param isMd5:\n\n Specifies whether `initialValue` is an MD5 checksum or not. For\n large values of `initialValue` the ChangeTrackers will\n automatically convert it to a checksum, but this allows the\n caller to potentially do some additional optimizations.", "id": "f12347:c1:m3"} {"signature": "def isChange(self, fieldname, changeType, newValue=None, isMd5=False):", "body": "raise NotImplementedError()", "docstring": "Checks to see if the specified field should be changed to the\n`newValue`, first checking to see if the change conflicts with the\nchange-spec stored by this ChangeTracker. IMPORTANT: the\n`changeType` is relative to the **current** local value, as\nrecorded by the changes stored by this tracker from the\n**initial** value.\n\nSee :meth:`update` for a layer above.\n\nThis method will terminate in one of the following three ways:\n\n* returns None:\n\n The `newValue` is actually outdated, but does not conflict.\n The field should be left as-is.\n\n* returns `changeObject`:\n\n If any form of change should be applied as a result of this\n change request, then `changeObject` will be non-None and will\n define how. The exact nature of the object is ChangeTracker\n subclass dependent.\n\n* raises `pysyncml.ConflictError`:\n\n The `newValue` conflicts with a change made by another source\n and should be handled by following conflict resolution policy.\n\nFor example, if two clients and a server are tracking changes\nmade to the following fields::\n\n initial values (on server, client 1 and client 2):\n field \"a\" => \"A\" (will not change)\n field \"b\" => \"B\" (will be modified by client 1)\n field \"c\" => \"C\" (will be deleted by client 1)\n field \"d\" => \"D\" (will be modified by client 2)\n field \"e\" => \"E\" (will be a conflict)\n field \"f\" => \"F\" (will be modified identically)\n\n client 1 changes:\n does not alter field \"a\"\n modifies field \"b\" to \"Bmod\"\n deletes field \"c\"\n does not alter field \"d\"\n deletes field \"e\"\n modifies field \"f\" to \"Fmod\"\n\n client 2 changes (simultaneous to client 1 changes):\n does not alter field \"a\"\n does not alter field \"b\"\n does not alter field \"c\"\n modifies field \"d\" to \"Dmod\"\n modifies field \"e\" to \"Emod\"\n modifies field \"f\" to \"Fmod\"\n\n client 1 synchronizes with server ==> server values:\n field \"b\" => \"Bmod\"\n deletes fields \"c\" and \"e\"\n field \"f\" => \"Fmod\"\n change-spec for client 2: \"mod:b@vB,f@vF|del:c@vC,e@vE\"\n\n when client 2 synchronizes, the framework detects a conflict and\n requests a merge attempt by the agent. the agent then compares the\n current values and those presented by client 2 and determines:\n - field \"a\" is unchanged\n - field \"b\" differs: changed to \"B\"\n - field \"c\" differs: added as \"C\"\n - field \"d\" differs: change to \"Dmod\"\n - field \"e\" differs: added as \"Dmod\"\n - field \"f\" is unchanged\n\n for the fields that are mismatches (i.e. fields \"b\", \"c\", \"d\",\n and \"e\"), the agent checks with this change tracker (\"ct\") to\n see if it was actually a change, and if so, if it conflicts:\n\n - ct.isChange('b', 'B') ==> None\n - ct.isChange('c', 'C') ==> None\n - ct.isChange('d', 'Dmod') ==> 'd'\n - ct.isChange('e', 'Emod') ==> raises ConflictError\n\nNote that this assumes that the caller will have verified that the\nremote `currentValue` is **not** equal to the local active value -\ni.e. that there is some difference between the `fieldname` values,\nand a resolution needs to be negotiated.\n\n:param newValue:\n\n A string representing the value that is being tested for\n conflicts or outdated-ness.\n\n.. TODO:: perhaps rename this method?...", "id": "f12347:c1:m4"} {"signature": "def update(self, fieldname, localValue, remoteValue):", "body": "if localValue == remoteValue:return localValuect = constants.ITEM_DELETED if remoteValue is None else constants.ITEM_MODIFIEDif localValue is None:ct = constants.ITEM_ADDEDchanged = self.isChange(fieldname, ct, remoteValue)if changed is None:return localValueself.append(changed, ct, initialValue=localValue, isMd5=False)return remoteValue", "docstring": "Returns the appropriate current value, based on the changes\nrecorded by this ChangeTracker, the value stored by the server\n(`localValue`), and the value stored by the synchronizing client\n(`remoteValue`). If `remoteValue` conflicts with changes stored\nlocally, then a `pysyncml.ConflictError` is raised.\n\nIf a change needs to be applied because `remoteValue` has been\nupdated, then the new value will be returned, and this\nChangeTracker will be updated such that a call to\n:meth:`getChangeSpec` will incorporate the change.\n\n:param fieldname:\n\n The name of the fieldname being evaluated.\n\n:param localValue:\n\n The value of the field as stored by the server, usually the one that\n also stored the current change-spec. If `localValue` is ``None``,\n then it is assumed that the field was potentially added (this will\n first be verified against the stored change-spec).\n\n:param remoteValue:\n\n The new value being presented that may or may not be a source of\n conflict. If `remoteValue` is ``None``, then it is assumed that\n the field was potentially deleted (this will first be verified\n against the stored change-spec).", "id": "f12347:c1:m5"} {"signature": "def getFullChangeSpec(self):", "body": "return self._changes2spec(self.allchanges)", "docstring": "Returns a string-representation of *all* changes recorded by this\nChangeTracker, including those provided in the constructor and any\ncalls to `pushChangeSpec()`. Note that this is usually *NOT* what\nyou are looking for when reporting changes to the pysyncml\nframework -- for that, see :meth:`getChangeSpec`.", "id": "f12347:c1:m6"} {"signature": "def getChangeSpec(self):", "body": "return self._changes2spec(self.changes)", "docstring": "Returns a string-representation of the changes recorded by this\nChangeTracker that were reported since construction (or calls to\npushChangeSpec()) by calls to :meth:`append` or :meth:`update`.\n\nThis is similar to, but distinct from, :meth:`getFullChangeSpec`.", "id": "f12347:c1:m7"} {"signature": "def __init__(self, changeSpec=None, *args, **kw):", "body": "self.baseline = dict()self.current = dict()super(AttributeChangeTracker, self).__init__(changeSpec, *args, **kw)", "docstring": "Initializes this AttributeChangeTracker with the provided\n`changeSpec`, which is expected to be in the same format as what\nwould have been returned by a call to ``str()`` on this\nobject. The change-spec will look similar to::\n\n add:tel-home|mod:firstname@m68b329d...,lastname@mh4d9...|del:tel-pager@mba45...\n\nIf `changeSpec` is not specified, this AttributeChangeTracker will\nstart assuming no prior changes were made to any fields.", "id": "f12347:c2:m0"} {"signature": "def isChange(self, fieldname, changeType, newValue=None, isMd5=False):", "body": "changes = self._collapseChanges(self.baseline, self.current)if fieldname not in changes:return fieldnamecur = changes[fieldname]if changeType == constants.ITEM_DELETED:if cur.op == constants.ITEM_ADDED or cur.op == constants.ITEM_DELETED:return Noneraise ConflictError(''% (fieldname,))if isMd5Equal(newValue, isMd5, cur.ival, cur.md5):return Noneraise ConflictError('' % (fieldname,))", "docstring": "Implements as specified in :meth:`.ChangeTracker.isChange` where\nthe `changeObject` is simply the fieldname that needs to be\nupdated with the `newValue`. Currently, this is always equal to\n`fieldname`.", "id": "f12347:c2:m6"} {"signature": "def __init__(self, changeSpec=None, *args, **kw):", "body": "self.baseline = []self.current = []super(ListChangeTracker, self).__init__(changeSpec, *args, **kw)", "docstring": "Initializes this ListChangeTracker with the provided `changeSpec`,\nwhich is expected to be in the same format as what would have been\nreturned by a call to ``str()`` on this object. The change-spec\nwill look similar to::\n\n 2:a,1:M68b329d...,1:mh4d9,2:Dba45...,3:a\n\nIf `changeSpec` is not specified, this ListChangeTracker will\nstart assuming no prior changes were made to any content and will\nexpect changes to be reported via :meth:`pushChange`.", "id": "f12347:c3:m0"} {"signature": "def append(self, listIndex, changeType, initialValue=None, isMd5=False):", "body": "if not isMd5 and initialValue is not None and len(initialValue) > :initialValue = hashlib.md5(initialValue).hexdigest()isMd5 = Truecur = adict(index = int(listIndex),op = changeType,ival = initialValue,md5 = isMd5)for idx, val in enumerate(self.current):if val.index < cur.index:continueif val.index > cur.index:self.current.insert(idx, cur)breakraise InvalidChangeSpec('' % (cur.index,))else:self.current.append(cur)", "docstring": "Adds a change spec to the current list of changes. The `listIndex`\nrepresents the line number (in multi-line mode) or word number (in\nsingle-line mode), and must be **INCLUSIVE** of both additions and\ndeletions.", "id": "f12347:c3:m7"} {"signature": "def isChange(self, listIndex, changeType, newValue=None, isMd5=False, token=None):", "body": "adjust = token = token index = int(listIndex)ret = indexchanges = self._collapseChanges(self.baseline, self.current)for cur in changes:if cur.index > index:if changeType != constants.ITEM_ADDED:return (ret, None)if token is None or token[] != index - adjust:token = (ret, )token = (ret, token[] + )return (ret, token)if cur.index != index:if cur.op == constants.ITEM_DELETED:index += adjust += continueif token is not None and token[] == index - adjust:index += token[]continueif changeType == constants.ITEM_DELETED:if cur.op == constants.ITEM_ADDED:return (None, None)raise ConflictError('' % (index,))if changeType == constants.ITEM_ADDED:if token is None:token = (ret, )token = (ret, token[] + )if cur.op == constants.ITEM_DELETED:if isMd5Equal(newValue, isMd5, cur.ival, cur.md5):return (None, token)return (ret, token)if cur.op == constants.ITEM_DELETED:index += adjust += continueif cur.op == constants.ITEM_ADDED:raise ConflictError('' % (index,))if isMd5Equal(newValue, isMd5, cur.ival, cur.md5):return (None, None)raise ConflictError('' % (index,))if changeType != constants.ITEM_ADDED:return (ret, None)if token is None or token[] != index - adjust:token = (ret, )token = (ret, token[] + )return (ret, token)", "docstring": "Implements as specified in :meth:`.ChangeTracker.isChange` where\nthe `changeObject` is a two-element tuple. The first element is\nthe index at which the change should be applied, and the second\nelement is an abstract token that should be passed back into this\nmethod at every iteration.\n\nIMPORTANT: unlike the AttributeChangeTracker, the\nListChangeTracker's `isChange()` method is sensitive to order\n(which is why it uses the `changeObject` and `token`\nmechanisms. Therefore, it is important to call `isChange()`\nsequentially with all changes in the order that they occur in the\nchange list.", "id": "f12347:c3:m8"} {"signature": "def cmpToDataStore_uri(base, ds1, ds2):", "body": "ret = difflib.get_close_matches(base.uri, [ds1.uri, ds2.uri], , cutoff=)if len(ret) <= :return if ret[] == ds1.uri:return -return ", "docstring": "Bases the comparison of the datastores on URI alone.", "id": "f12348:m6"} {"signature": "def getAddressSize():", "body": "return int(platform.architecture(bits='')[].replace('', ''))", "docstring": "Returns the size of a memory address reference on the current\n platform (e.g. 32 or 64 for respectively 32-bit or 64-bit operating\n platforms) - defaults to 32 if it cannot be determined.", "id": "f12349:m9"} {"signature": "def getMaxMemorySize(context=None):", "body": "return min(sys.maxint, int(pow(,)-))", "docstring": "Returns the maximum size of a memory object. By default this is,\n set to ``sys.maxint``, however the `context` may override this behavior.\n\n NOTE: currently, this is being hardcoded to a maximum of 2GB for\n compatibility with funambol servers, which croak above that\n value.\n\n TODO: allow the context to control this, or implement auto-detect to\n determine what the remote peer can support...", "id": "f12349:m10"} {"signature": "def describeStats(stats, stream, title=None, details=True, totals=True, gettext=None):", "body": "from . import statemodeStringLut = dict(((constants.SYNCTYPE_TWO_WAY, ''),(constants.SYNCTYPE_SLOW_SYNC, ''),(constants.SYNCTYPE_ONE_WAY_FROM_CLIENT, ''),(constants.SYNCTYPE_REFRESH_FROM_CLIENT, ''),(constants.SYNCTYPE_ONE_WAY_FROM_SERVER, ''),(constants.SYNCTYPE_REFRESH_FROM_SERVER, ''),))if gettext is not None:_ = gettextelse:_ = lambda s: swSrc = len(_(''))wMode = len(_(''))wCon = len(_(''))wCol = len(_(''))wMrg = len(_(''))wHereAdd = wPeerAdd = len(_(''))wHereMod = wPeerMod = len(_(''))wHereDel = wPeerDel = len(_(''))wHereErr = wPeerErr = len(_(''))totLoc = totRem = totErr = totCol = totMrg = for key in stats.keys():wSrc = max(wSrc, len(key))wMode = max(wMode, len(modeStringLut.get(stats[key].mode)))wCol = max(wCol, len(num2str(stats[key].conflicts)))wMrg = max(wMrg, len(num2str(stats[key].merged)))wHereAdd = max(wHereAdd, len(num2str(stats[key].hereAdd)))wPeerAdd = max(wPeerAdd, len(num2str(stats[key].peerAdd)))wHereMod = max(wHereMod, len(num2str(stats[key].hereMod)))wPeerMod = max(wPeerMod, len(num2str(stats[key].peerMod)))wHereDel = max(wHereDel, len(num2str(stats[key].hereDel)))wPeerDel = max(wPeerDel, len(num2str(stats[key].peerDel)))wHereErr = max(wHereErr, len(num2str(stats[key].hereErr)))wPeerErr = max(wPeerErr, len(num2str(stats[key].peerErr)))totLoc += stats[key].hereAdd + stats[key].hereMod + stats[key].hereDeltotRem += stats[key].peerAdd + stats[key].peerMod + stats[key].peerDeltotErr += stats[key].hereErr + stats[key].peerErrtotCol += stats[key].conflictstotMrg += stats[key].mergedif wCon > wCol + + wMrg:diff = wCon - ( wCol + + wMrg )wCol += diff / wMrg = wCon - - wColelse:wCon = wCol + + wMrgif details:tWid = ( wSrc + + wMode + + wHereAdd + wHereMod + wHereDel + wHereErr + + + wPeerAdd + wPeerMod + wPeerDel + wPeerErr + + + wCon )else:if title is None:tWid = else:tWid = len(title)if totals:sumlist = []for val, singular, plural in [(totLoc, _(''), _('')),(totRem, _(''), _('')),(totErr, _(''), _('')),]:if val == :sumlist.append(num2str(val) + '' + singular)elif val > :sumlist.append(num2str(val) + '' + plural)if len(sumlist) <= :sumlist = _('')elif len(sumlist) == :sumlist = sumlist[]else:sumlist = ''.join(sumlist[:-]) + '' + _('') + '' + sumlist[-]if totMrg > or totCol > :sumlist += ''if totMrg == :sumlist += num2str(totMrg) + '' + _('')elif totMrg > :sumlist += num2str(totMrg) + '' + _('')if totMrg > and totCol > :sumlist += '' + _('') + ''if totCol == :sumlist += num2str(totCol) + '' + _('')elif totCol > :sumlist += num2str(totCol) + '' + _('')sumlist += ''if len(sumlist) > tWid:wSrc += len(sumlist) - tWidtWid = len(sumlist)if title is not None:stream.write('' + '' * tWid + '')stream.write(''.format(title, w=tWid))stream.write('')hline = ''+ '' * wSrc+ ''+ '' * wMode+ ''+ '' * ( wHereAdd + wHereMod + wHereDel + wHereErr + )+ ''+ '' * ( wPeerAdd + wPeerMod + wPeerDel + wPeerErr + )+ ''+ '' * wCon+ ''if details:stream.write(hline)stream.write('' + '' * wSrc)stream.write('' + '' * wMode)stream.write(''.format(_(''), w=( wHereAdd + wHereMod + wHereDel + wHereErr + )))stream.write(''.format(_(''), w=( wPeerAdd + wPeerMod + wPeerDel + wPeerErr + )))stream.write(''.format(_(''), w=wCon))stream.write('')stream.write(''.format(_(''), w=wSrc))stream.write(''.format(_(''), w=wMode))stream.write(''.format(_(''), w=wHereAdd))stream.write(''.format(_(''), w=wHereMod))stream.write(''.format(_(''), w=wHereDel))stream.write(''.format(_(''), w=wHereErr))stream.write(''.format(_(''), w=wPeerAdd))stream.write(''.format(_(''), w=wPeerMod))stream.write(''.format(_(''), w=wPeerDel))stream.write(''.format(_(''), w=wPeerErr))stream.write(''.format(_(''), w=wCol))stream.write(''.format(_(''), w=wMrg))stream.write('')hsline = '' + '' * wSrc+ '' + '' * wMode+ '' + '' * wHereAdd+ '' + '' * wHereMod+ '' + '' * wHereDel+ '' + '' * wHereErr+ '' + '' * wPeerAdd+ '' + '' * wPeerMod+ '' + '' * wPeerDel+ '' + '' * wPeerErr+ '' + '' * wCol+ '' + '' * wMrg+ ''stream.write(hsline)def numcol(val, wid):if val == :return ''.format('', w=wid)return ''.format(num2str(val), w=wid)for key in sorted(stats.keys(), key=lambda k: str(k).lower()):stream.write(''.format(key, w=wSrc))stream.write(''.format(modeStringLut.get(stats[key].mode), w=wMode))stream.write(numcol(stats[key].hereAdd, wHereAdd))stream.write(numcol(stats[key].hereMod, wHereMod))stream.write(numcol(stats[key].hereDel, wHereDel))stream.write(numcol(stats[key].hereErr, wHereErr))stream.write(numcol(stats[key].peerAdd, wPeerAdd))stream.write(numcol(stats[key].peerMod, wPeerMod))stream.write(numcol(stats[key].peerDel, wPeerDel))stream.write(numcol(stats[key].peerErr, wPeerErr))stream.write(numcol(stats[key].conflicts, wCol))stream.write(numcol(stats[key].merged, wMrg))stream.write('')stream.write(hsline)if totals:if title is None and not details:stream.write('' + '' * tWid + '')stream.write(''.format(sumlist, w=tWid))stream.write('')stream.write('' + '' * tWid + '')return", "docstring": "Renders an ASCII-table of the synchronization statistics `stats`,\nexample output:\n\n.. code-block::\n\n +----------------------------------------------------------------------------------+\n | TITLE |\n +----------+------+-------------------------+--------------------------+-----------+\n | | | Local | Remote | Conflicts |\n | Source | Mode | Add | Mod | Del | Err | Add | Mod | Del | Err | Col | Mrg |\n +----------+------+-------+-----+-----+-----+--------+-----+-----+-----+-----+-----+\n | contacts | <= | - | - | - | - | 10,387 | - | - | - | - | - |\n | note | SS | 1,308 | - | 2 | - | - | - | - | - | - | - |\n +----------+------+-------+-----+-----+-----+--------+-----+-----+-----+-----+-----+\n | 1,310 local changes and 10,387 remote changes. |\n +----------------------------------------------------------------------------------+\n\n:Parameters:\n\nstats : dict\n\n The synchronization stats returned by a call to Adapter.sync().\n\nstream : file-like-object\n\n An output file-like object that has at least a `write()` method,\n e.g. ``sys.stdout`` can be used.\n\ntitle : str, optional, default: null\n\n A title placed at the top of the table -- if omitted (the default),\n then no title is rendered.\n\ndetails : bool, optional, default: true\n\n If truthy, a per-datastore listing of changes will be displayed\n (as in the above example).\n\ntotals : bool, optional, default: true\n\n If truthy, a summary of all changes will be displayed (as in the\n above example).\n\ngettext : callable, optional, @DEPRECATED(0.2.0), default: null\n\n A `gettext.gettext` compatible callable used for translating\n localized content (such as number formatting, etc.).\n\n NOTE: this parameter is deprecated, and will be replaced with\n a generalized i18n solution.", "id": "f12349:m12"} {"signature": "def dump(self, stream, contentType=None, version=None):", "body": "raise NotImplementedError()", "docstring": "Converts this Item to serialized form (such that it can be\ntransported over the wire) and writes it to the provided file-like\n`stream` object. For agents that support multiple content-types,\nthe desired `contentType` and `version` will be specified as a\nparameter. If `contentType` and `version` are None, appropriate\ndefault values should be used. For agents that concurrently use\nmultiple content-types, the return value may be a two-element\ntuple of (contentType, version), thus overriding or enhancing the\nprovided values.", "id": "f12353:c0:m1"} {"signature": "def dumps(self, contentType=None, version=None):", "body": "buf = six.StringIO()ret = self.dump(buf, contentType, version)if ret is None:return buf.getvalue()return (ret[], ret[], buf.getvalue())", "docstring": "[OPTIONAL] Identical to :meth:`dump`, except the serialized form\nis returned as a string representation. As documented in\n:meth:`dump`, the return value can optionally be a three-element\ntuple of (contentType, version, data) if the provided content-type\nshould be overridden or enhanced. The default implementation just\nwraps :meth:`dump`.", "id": "f12353:c0:m2"} {"signature": "@classmethoddef load(cls, stream, contentType=None, version=None):", "body": "raise NotImplementedError()", "docstring": "Reverses the effects of the :meth:`dump` method, and returns the\nde-serialized Item from the file-like source `stream`.\n\nNote: `version` will typically be ``None``, so it should either be\nauto-determined, or not used. This is an issue in the SyncML\nprotocol, and is only here for symmetry with :meth:`dump` and as\n\"future-proofing\".", "id": "f12353:c0:m3"} {"signature": "@classmethoddef loads(cls, data, contentType=None, version=None):", "body": "buf = six.StringIO(data)return cls.load(buf, contentType, version)", "docstring": "[OPTIONAL] Identical to :meth:`load`, except the serialized form\nis provided as a string representation in `data` instead of as a\nstream. The default implementation just wraps :meth:`load`.", "id": "f12353:c0:m4"} {"signature": "def __init__(self, name=None, parent=None,created=None, modified=None, accessed=None,contentType=None, body=None, size=None,hidden=None, system=None, archived=None, delete=None,writable=None, readable=None, executable=None,*args, **kw):", "body": "super(FileItem, self).__init__(*args, **kw)self.name = nameself.parent = parentself.created = createdself.modified = modifiedself.accessed = accessedself.contentType = contentTypeself.body = bodyself.size = sizeif self.size is None and self.body is not None:self.size = len(body)self.hidden = hiddenself.system = systemself.archived = archivedself.delete = deleteself.writable = writableself.readable = readableself.executable = executable", "docstring": "FileItem constructor which takes the following optional parameters:\n\n:param name:\n\n the file name (relative to the parent folder).\n\n:param parent:\n\n the file\\'s containing folder.\n\n:param created:\n\n the file\\'s creation time, in number of seconds since\n the epoch.\n\n:param modified:\n\n the file\\'s last modification time, in number of seconds\n since the epoch.\n\n:param accessed:\n\n the file\\'s last accessed time, in number of seconds\n since the epoch.\n\n:param contentType:\n\n the file\\'s content-type.\n\n:param body:\n\n the file\\'s content.\n\n:param size:\n\n the size of file\\'s content, specified as an integer. If not\n specified and `body` is specified, the size will be taken from\n the `body` parameter.\n\n:param hidden:\n\n the file\\'s \"hidden\" boolean attribute.\n\n:param system:\n\n the file\\'s \"system\" boolean attribute.\n\n:param archived:\n\n the file\\'s \"archived\" boolean attribute.\n\n:param delete:\n\n the file\\'s \"delete\" boolean attribute.\n\n:param writable:\n\n the file\\'s \"writable\" boolean attribute.\n\n:param readable:\n\n the file\\'s \"readable\" boolean attribute.\n\n:param executable:\n\n the file\\'s \"executable\" boolean attribute.", "id": "f12357:c0:m0"} {"signature": "def dump(self, stream, contentType=None, version=None):", "body": "if contentType is None:contentType = constants.TYPE_OMADS_FILEif ctype.getBaseType(contentType) != constants.TYPE_OMADS_FILE:raise common.InvalidContentType('' % (contentType,))if version is None:version = ''if version != '':raise common.InvalidContentType('' % (version,))root = ET.Element('')if self.name is not None:ET.SubElement(root, '').text = self.namefor attr in ('', '', ''):if getattr(self, attr) is None:continueET.SubElement(root, attr).text = common.ts_iso(getattr(self, attr))if self.contentType is not None:ET.SubElement(root, '').text = self.contentTypeattrs = [attrfor attr in ('', '', '', '', '', '', '')if getattr(self, attr) is not None]if len(attrs) > :xa = ET.SubElement(root, '')for attr in attrs:ET.SubElement(xa, attr[]).text = '' if getattr(self, attr) else ''if self.body is not None:ET.SubElement(root, '').text = self.bodyif self.body is None and self.size is not None:ET.SubElement(root, '').text = str(self.size)if len(self.extensions) > :xe = ET.SubElement(root, '')for name, values in self.extensions.items():ET.SubElement(xe, '').text = namefor value in values:ET.SubElement(xe, '').text = valueET.ElementTree(root).write(stream)return (constants.TYPE_OMADS_FILE + '', '')", "docstring": "Serializes this FileItem to a byte-stream and writes it to the\nfile-like object `stream`. `contentType` and `version` must be one\nof the supported content-types, and if not specified, will default\nto ``application/vnd.omads-file``.", "id": "f12357:c0:m2"} {"signature": "@classmethoddef load(cls, stream, contentType=None, version=None):", "body": "if contentType is None:contentType = constants.TYPE_OMADS_FILEif ctype.getBaseType(contentType) == constants.TYPE_OMADS_FOLDER:from .folder import FolderItemreturn FolderItem.load(stream, contentType, version)if ctype.getBaseType(contentType) != constants.TYPE_OMADS_FILE:raise common.InvalidContentType('' % (contentType,))if version is None:version = ''if version != '':raise common.InvalidContentType('' % (version,))ret = FileItem()data = stream.read()xdoc = ET.fromstring(data)if xdoc.tag != '':raise common.InvalidContent(''% (xdoc.tag,))ret.name = xdoc.findtext('')ret.body = xdoc.findtext('')ret.size = xdoc.findtext('')if ret.body is not None:ret.size = len(ret.body)elif ret.size is not None:ret.size = int(ret.size)for attr in ('', '', ''):val = xdoc.findtext(attr)if val is not None:setattr(ret, attr, int(common.parse_ts_iso(val)))for attr in ('', '', '', '','', '', ''):val = xdoc.findtext('' + attr[])if val is not None:setattr(ret, attr, val.lower() == '')return ret", "docstring": "Reverses the effects of the :meth:`dump` method, creating a FileItem\nfrom the specified file-like `stream` object.", "id": "f12357:c0:m3"} {"signature": "def __init__(self, name=None, parent=None,created=None, modified=None, accessed=None,role=None,hidden=None, system=None, archived=None, delete=None,writable=None, readable=None, executable=None,*args, **kw):", "body": "super(FolderItem, self).__init__(*args, **kw)self.name = nameself.parent = parentself.created = createdself.modified = modifiedself.accessed = accessedself.role = roleself.hidden = hiddenself.system = systemself.archived = archivedself.delete = deleteself.writable = writableself.readable = readableself.executable = executable", "docstring": "FolderItem constructor which takes the following optional parameters:\n\n:param name:\n\n the folder name (relative to the parent folder).\n\n:param parent:\n\n the folder\\'s containing folder.\n\n:param created:\n\n the folder\\'s creation time, in number of seconds since\n the epoch.\n\n:param modified:\n\n the folder\\'s last modification time, in number of seconds\n since the epoch.\n\n:param accessed:\n\n the folder\\'s last accessed time, in number of seconds\n since the epoch.\n\n:param role:\n\n the folder\\'s role, primarily used when dealing with collections\n of emails.\n\n:param hidden:\n\n the folder\\'s \"hidden\" boolean attribute.\n\n:param system:\n\n the folder\\'s \"system\" boolean attribute.\n\n:param archived:\n\n the folder\\'s \"archived\" boolean attribute.\n\n:param delete:\n\n the folder\\'s \"delete\" boolean attribute.\n\n:param writable:\n\n the folder\\'s \"writable\" boolean attribute.\n\n:param readable:\n\n the folder\\'s \"readable\" boolean attribute.\n\n:param executable:\n\n the folder\\'s \"executable\" boolean attribute.", "id": "f12358:c0:m0"} {"signature": "def dump(self, stream, contentType=None, version=None):", "body": "if contentType is None:contentType = constants.TYPE_OMADS_FOLDERif ctype.getBaseType(contentType) != constants.TYPE_OMADS_FOLDER:raise common.InvalidContentType('' % (contentType,))if version is None:version = ''if version != '':raise common.InvalidContentType('' % (version,))root = ET.Element('')if self.name is not None:ET.SubElement(root, '').text = self.namefor attr in ('', '', ''):if getattr(self, attr) is None:continueET.SubElement(root, attr).text = common.ts_iso(getattr(self, attr))if self.role is not None:ET.SubElement(root, '').text = self.roleattrs = [attrfor attr in ('', '', '', '', '', '', '')if getattr(self, attr) is not None]if len(attrs) > :xa = ET.SubElement(root, '')for attr in attrs:ET.SubElement(xa, attr[]).text = '' if getattr(self, attr) else ''if len(self.extensions) > :xe = ET.SubElement(root, '')for name, values in self.extensions.items():ET.SubElement(xe, '').text = namefor value in values:ET.SubElement(xe, '').text = valueET.ElementTree(root).write(stream)return (constants.TYPE_OMADS_FOLDER + '', '')", "docstring": "Serializes this FolderItem to a byte-stream and writes it to the\nfile-like object `stream`. `contentType` and `version` must be one\nof the supported content-types, and if not specified, will default\nto ``application/vnd.omads-folder``.", "id": "f12358:c0:m2"} {"signature": "@classmethoddef load(cls, stream, contentType=None, version=None):", "body": "if contentType is None:contentType = constants.TYPE_OMADS_FOLDERif ctype.getBaseType(contentType) == constants.TYPE_OMADS_FILE:return FileItem.load(stream, contentType, version)if ctype.getBaseType(contentType) != constants.TYPE_OMADS_FOLDER:raise common.InvalidContentType('' % (contentType,))if version is None:version = ''if version != '':raise common.InvalidContentType('' % (version,))ret = FolderItem()xdoc = ET.fromstring(stream.read())if xdoc.tag != '':raise common.InvalidContent(''% (xdoc.tag,))ret.name = xdoc.findtext('')ret.role = xdoc.findtext('')for attr in ('', '', ''):val = xdoc.findtext(attr)if val is not None:setattr(ret, attr, int(common.parse_ts_iso(val)))for attr in ('', '', '', '','', '', ''):val = xdoc.findtext('' + attr[])if val is not None:setattr(ret, attr, val.lower() == '')return ret", "docstring": "Reverses the effects of the :meth:`dump` method, creating a FileItem\nfrom the specified file-like `stream` object.", "id": "f12358:c0:m3"} {"signature": "def __init__(self, name=None, body=None, *args, **kw):", "body": "super(NoteItem, self).__init__(*args, **kw)self.name = nameself.body = body", "docstring": "NoteItem constructor which takes attributes `name` and `body`.", "id": "f12359:c0:m0"} {"signature": "def dump(self, stream, contentType=None, version=None):", "body": "if contentType is None or contentType == constants.TYPE_TEXT_PLAIN:stream.write(self.body)returnif contentType == constants.TYPE_SIF_NOTE:root = ET.Element('')ET.SubElement(root, '').text = ''if self.name is not None:ET.SubElement(root, '').text = self.nameif self.body is not None:ET.SubElement(root, '').text = self.bodyfor name, values in self.extensions.items():for value in values:ET.SubElement(root, name).text = valueET.ElementTree(root).write(stream)returnraise common.InvalidContentType('' % (contentType,))", "docstring": "Serializes this NoteItem to a byte-stream and writes it to the\nfile-like object `stream`. `contentType` and `version` must be one\nof the supported content-types, and if not specified, will default\nto ``text/plain``.", "id": "f12359:c0:m1"} {"signature": "@classmethoddef load(cls, stream, contentType=None, version=None):", "body": "if contentType is None or contentType == constants.TYPE_TEXT_PLAIN:data = stream.read()name = data.split('')[]name = re.compile(r'', re.IGNORECASE).sub('', name).strip()return NoteItem(name=name, body=data)if contentType == constants.TYPE_SIF_NOTE:data = ET.parse(stream).getroot()ret = NoteItem(name=data.findtext(''), body=data.findtext(''))for child in data:if child.tag in ('', '', ''):continueret.addExtension(child.tag, child.text)return retraise common.InvalidContentType('' % (contentType,))", "docstring": "Reverses the effects of the :meth:`dump` method, creating a NoteItem\nfrom the specified file-like `stream` object.", "id": "f12359:c0:m2"} {"signature": "def scan(self, store):", "body": "if self.ignoreRoot is None:self.ignoreRoot = re.compile('' % (re.escape(self.engine.syncSubdir),))dbnotes = list(self.engine.model.NoteItem.q())dbnames = dict((e.name, e) for e in dbnotes)fsnotes = list(self._scandir(''))fsnames = dict((e.name, e) for e in fsnotes)for fsent in fsnames.values():if fsent.name in dbnames and dbnames[fsent.name].sha256 == fsent.sha256:log.debug('', fsent.name)del dbnames[fsent.name]del fsnames[fsent.name]dbskip = []for dbent in dbnames.values():if dbent.id in dbskip or dbent.name in fsnames:continuefor fsent in fsnames.values():if fsent.sha256 != dbent.sha256 or fsent.name not in dbnames:continuelog.debug('', fsent.name, dbent.name)dbother = dbnames[fsent.name]del dbnames[dbent.name]del dbnames[fsent.name]del fsnames[fsent.name]dbskip.append(dbother.id)store.registerChange(dbent.id, pysyncml.ITEM_DELETED)for key, val in fsent.items():setattr(dbother, key, val)if self.engine.options.syncFilename:store.registerChange(dbother.id, pysyncml.ITEM_MODIFIED)breakdbskip = []for dbent in dbnames.values():if dbent.id in dbskip:continuefor fsent in fsnames.values():if fsent.sha256 != dbent.sha256:continuelog.debug('', dbent.name, fsent.name)del dbnames[dbent.name]del fsnames[fsent.name]for key, val in fsent.items():setattr(dbent, key, val)if self.engine.options.syncFilename:store.registerChange(dbent.id, pysyncml.ITEM_MODIFIED)breakfor fsent in fsnames.values():if fsent.name in dbnames:log.debug('', fsent.name)dbent = dbnames[fsent.name]del dbnames[fsent.name]store.registerChange(dbent.id, pysyncml.ITEM_MODIFIED)else:log.debug('', fsent.name)dbent = self.engine.model.NoteItem()self.engine.dbsession.add(dbent)store.registerChange(dbent.id, pysyncml.ITEM_ADDED)for key, val in fsent.items():setattr(dbent, key, val)del fsnames[fsent.name]for dbent in dbnames.values():store.registerChange(dbent.id, pysyncml.ITEM_DELETED)self.engine.dbsession.add(dbent)", "docstring": "Scans the local files for changes (either additions, modifications or\ndeletions) and reports them to the `store` object, which is expected to\nimplement the :class:`pysyncml.Store` interface.", "id": "f12362:c1:m1"} {"signature": "def hook(name):", "body": "def hookTarget(wrapped):if not hasattr(wrapped, ''):wrapped.__hook__ = [name]else:wrapped.__hook__.append(name)return wrappedreturn hookTarget", "docstring": "Decorator used to tag a method that should be used as a hook for the\nspecified `name` hook type.", "id": "f12363:m1"} {"signature": "def __init__(self,appLabel = None,appDisplay = None,appModelVersion = None,defaultDevID = None,defaultListen = ,devinfoParams = dict(),storeParams = dict(),agent = None,hooks = None,*args, **kw):", "body": "super(CommandLineSyncEngine, self).__init__()self.appLabel = appLabelself.appDisplay = appDisplayself.appModelVersion = appModelVersionself.defaultDevID = defaultDevIDself.defaultListen = defaultListenself.devinfoParams = devinfoParamsself.storeParams = storeParamsself.agent = agentself.dataDir = Noneself._hooks = dict()if self.defaultDevID is None:self.defaultDevID = '' % (self.appLabel, uuid.getnode(), time.time())for meth in dir(self):meth = getattr(self, meth)if not callable(meth) or not hasattr(meth, ''):continuefor name in meth.__hook__:self.addHook(name, meth)for name, funcs in hooks or []:for func in funcs:self.addHook(name, func)", "docstring": "The CommandLineClient base constructor accepts the following parameters:\n\n:param appLabel:\n\n A short unique identifier for this application, typically set to\n the program\\'s name, for example \"sync-notes\". This label should not\n contain any special characters, especially those that are not allowed\n as part of a filename. Amongst other things, it is used to:\n\n * generate the default device ID, if not specified,\n * create application-specific configuration directory/file names,\n * default datastore URI,\n * and much more.\n\n:param appDisplay:\n\n The name of the application when displayed to the user, for\n example \"Note Synchronizer\".\n\nThe CommandLineClient also has the following additional attributes:\n\n:param dataDir:\n\n The directory that contains all of the database files - this\n is usually taken care of by one of the pre-existing subclasses.\n It is expected to end with a \"/\".\n\nTODO: document all options...", "id": "f12363:c1:m0"} {"signature": "def addHook(self, name, callable):", "body": "if name not in self._hooks:self._hooks[name] = []self._hooks[name].append(callable)", "docstring": "Subscribes `callable` to listen to events of `name` type. The\nparameters passed to `callable` are dependent on the specific\nevent being triggered.", "id": "f12363:c1:m1"} {"signature": "def _makeAdapter(self):", "body": "self._callHooks('')context = pysyncml.Context(storage='' % (self.dataDir,),owner=None, autoCommit=True)self._callHooks('', context)adapter = context.Adapter()if hasattr(self, '') and self.serverConf.policy is not None:adapter.conflictPolicy = self.serverConf.policyif self.options.name is not None or self.appDisplay is not None:adapter.name = self.options.name or self.appDisplayif adapter.devinfo is None:log.info('')else:if self.options.devid is not None and self.options.devid != adapter.devinfo.devID:log.info('')adapter.devinfo = Noneif adapter.devinfo is None:devinfoParams = dict(devID = self.options.devid or self.defaultDevID,devType = pysyncml.DEVTYPE_SERVER if self.options.server elsepysyncml.DEVTYPE_WORKSTATION,manufacturerName = '',modelName = self.appLabel,softwareVersion = pysyncml.version,hierarchicalSync = self.agent.hierarchicalSync if self.agent is not None else False,)if self.devinfoParams is not None:devinfoParams.update(self.devinfoParams)adapter.devinfo = context.DeviceInfo(**devinfoParams)self._callHooks('', context, adapter)if not self.options.server:if adapter.peer is None:if self.options.remote is None:self.options.remote = input('')if self.options.username is None:self.options.username = input('')if len(self.options.username) <= :self.options.username = Nonelog.info('')else:if self.options.remote is not None:if self.options.remote != adapter.peer.urlor self.options.username != adapter.peer.usernameor self.options.password != adapter.peer.password:log.info('')adapter.peer = Noneif adapter.peer is None:auth = Noneif self.options.username is not None:auth = pysyncml.NAMESPACE_AUTH_BASICif self.options.password is None:self.options.password = getpass.getpass('')adapter.peer = context.RemoteAdapter(url = self.options.remote,auth = auth,username = self.options.username,password = self.options.password,)self._callHooks('', context, adapter, adapter.peer)uri = self.storeParams.get('', self.appLabel)if uri in adapter.stores:store = adapter.stores[uri]store.agent = self.agentelse:storeParams = dict(uri = uri,displayName = self.options.name or self.appDisplay,agent = self.agent,maxObjSize = None)if self.storeParams is not None:storeParams.update(self.storeParams)store = adapter.addStore(context.Store(**storeParams))self._callHooks('', context, adapter, store)if self.options.local:def locprint(msg):print(msg)else:locprint = log.infodef showChanges(changes, prefix):for c in changes:if c.state != pysyncml.ITEM_DELETED:item = self.agent.getItem(c.itemID)else:item = '' % (c.itemID,)locprint('' % (prefix, item, pysyncml.state2string(c.state)))if self.options.server:peers = adapter.getKnownPeers()if len(peers) > :locprint('')else:locprint('')for peer in peers:for puri, pstore in list(peer.stores.items()):if pstore.binding is None or pstore.binding.uri != store.uri:continuechanges = list(pstore.getRegisteredChanges())if len(changes) <= :locprint('' % (peer.devID, puri))else:locprint('' % (peer.devID, puri))showChanges(changes, '')else:if store.peer is None:locprint('')else:changes = list(store.peer.getRegisteredChanges())if len(changes) <= :locprint('')else:locprint('')showChanges(changes, '')self._callHooks('', context, adapter)return (context, adapter)", "docstring": "Creates a tuple of ( Context, Adapter ) based on the options\nspecified by `self.options`. The Context is the pysyncml.Context created for\nthe storage location specified in `self.options`, and the Adapter is a newly\ncreated Adapter if a previously created one was not found.", "id": "f12363:c1:m10"} {"signature": "def configure(self, argv=None):", "body": "self._setupOptions()self._parseOptions(argv)self._setupLogging()self._setupModel()self.dbsession.commit()return self", "docstring": "Configures this engine based on the options array passed into\n`argv`. If `argv` is ``None``, then ``sys.argv`` is used instead.\nDuring configuration, the command line options are merged with\npreviously stored values. Then the logging subsystem and the\ndatabase model are initialized, and all storable settings are\nserialized to configurations files.", "id": "f12363:c1:m14"} {"signature": "def run(self, stdout=sys.stdout, stderr=sys.stderr):", "body": "if self.options.local or self.options.describe:context, adapter = self._makeAdapter()if self.options.describe:self.describe(stdout)adapter.describe(stdout)self.dbsession.rollback()return if self.options.server:return self._runServer(stdout, stderr)return self._runClient(stdout, stderr)", "docstring": "Runs this SyncEngine by executing one of the following functions\n(as controlled by command-line options or stored parameters):\n\n* Display local pending changes.\n* Describe local configuration.\n* Run an HTTP server and engage server-side mode.\n* Connect to a remote SyncML peer and engage client-side mode.\n\nNOTE: when running in the first two modes, all database interactions\nare rolled back in order to keep the SyncEngine idempotent.", "id": "f12363:c1:m15"} {"signature": "def __init__(self, syncSubdir='', defaultDir=None,*args, **kw):", "body": "super(DirectorySyncEngine, self).__init__(*args, **kw)self.syncSubdir = syncSubdirself.defaultDir = defaultDirself.rootDir = None", "docstring": "In addition to the :class:`CommandLineSyncEngine` constructor parameters,\nthe `DirectorySyncEngine` accepts the following:\n\n:param syncSubdir:\n\n Specifies the name of the immediate subdirectory of the base\n directory that should be created to contain configuration, state\n and synchronization data. Removal of this directory will reset\n all client/server states, and synchronization will need to resume\n via a \"slow-sync\". The application should ignore this directory\n when manipulating any data. The default is ``\".sync\"``.\n\n:param defaultDir:\n\n If specified, will allow the user to invoke the application without\n needing to identify the directory to synchronize, and instead will\n default to this value. If used, this `CommandLineSyncEngine` begins to\n resemble how the :class:`LocalUserSyncEngine` operates, but diverges in\n the fact that the synchronization data is kept in the same directory\n as the synchronized items.\n\nIn addition to the :class:`CommandLineSyncEngine` attributes,\nthe `DirectorySyncEngine` also provides the following:\n\n:param rootDir:\n\n The path (potentially either relative or absolute) to the\n directory under control by this synchronization engine. The path,\n if valid, ends with a slash (\"/\").", "id": "f12363:c2:m0"} {"signature": "def __init__(self,*args, **kw):", "body": "super(LocalUserSyncEngine, self).__init__(*args, **kw)raise NotImplementedError()", "docstring": "TODO: document & implement...", "id": "f12363:c3:m0"} {"signature": "def toSyncML(self, nodeName=None, uniqueVerCt=False):", "body": "if uniqueVerCt:ret = []for v in self.versions:tmp = ET.Element(nodeName or '')ET.SubElement(tmp, '').text = self.ctypeET.SubElement(tmp, '').text = vret.append(tmp)return retret = ET.Element(nodeName or '')ET.SubElement(ret, '').text = self.ctypefor v in self.versions:ET.SubElement(ret, '').text = vreturn ret", "docstring": "Returns an ElementTree node representing this ContentTypeInfo. If\n`nodeName` is not None, then it will be used as the containing\nelement node name (this is useful, for example, to differentiate\nbetween a standard content-type and a preferred content-type). If\n`uniqueVerCt` is True, then an array of elements will be returned\ninstead of a single element with multiple VerCT elements (for\ncontent-types that support multiple versions).", "id": "f12370:c0:m4"} {"signature": "def __init__(self, ctype=None, versions=None,preferred=False, transmit=True, receive=True,*args, **kw):", "body": "super(ContentTypeInfo, self).__init__(*args, **kw)self.ctype = ctypeif isinstance(versions, basestring):versions = [versions]self.versions = versionsself.preferred = preferredself.transmit = transmitself.receive = receive", "docstring": "The ContentTypeInfo constructor supports the following parameters:\n\n:param ctype:\n\n specifies the content-type string, for example ``\\'text/plain\\'``.\n\n:param versions:\n\n a version string (or list thereof) of the specified `ctype` that\n are supported, for example ``[\\'1.0\\', \\'1.1\\']``.\n\n:param preferred:\n\n boolean specifying whether or not this is the preferred\n content-type. Note that only one ContentTypeInfo can be marked\n as being preferred.\n\n:param transmit:\n\n boolean specifying whether or not the Agent can `transmit` this\n content-type, i.e. a call to :meth:`pysyncml.Agent.dumpItem\n ` with this content-type\n will succeed.\n\n:param receive:\n\n boolean specifying whether or not the Agent can `receive` this\n content-type, i.e. a call to :meth:`pysyncml.Agent.loadItem\n ` with this content-type\n will succeed.", "id": "f12370:c1:m0"} {"signature": "def commands2tree(self, adapter, session, commands):", "body": "hdrcmd = commands[]commands = commands[:]if hdrcmd.name != constants.CMD_SYNCHDR:raise common.InternalError(''% (hdrcmd.name, constants.CMD_SYNCHDR))if hdrcmd.version != constants.SYNCML_VERSION_1_2:raise common.FeatureNotSupported('' % (hdrcmd.version,))xsync = ET.Element(constants.NODE_SYNCML)xhdr = ET.SubElement(xsync, hdrcmd.name)if hdrcmd.version == constants.SYNCML_VERSION_1_2:ET.SubElement(xhdr, '').text = constants.SYNCML_DTD_VERSION_1_2ET.SubElement(xhdr, '').text = hdrcmd.versionET.SubElement(xhdr, '').text = hdrcmd.sessionIDET.SubElement(xhdr, '').text = hdrcmd.msgIDxsrc = ET.SubElement(xhdr, '')ET.SubElement(xsrc, '').text = hdrcmd.sourceif hdrcmd.sourceName is not None:ET.SubElement(xsrc, '').text = hdrcmd.sourceNamextgt = ET.SubElement(xhdr, '')ET.SubElement(xtgt, '').text = hdrcmd.targetif hdrcmd.targetName is not None:ET.SubElement(xtgt, '').text = hdrcmd.targetNameif hdrcmd.respUri is not None:ET.SubElement(xhdr, '').text = hdrcmd.respUriif hdrcmd.auth is not None and not session.authAccepted:if hdrcmd.auth != constants.NAMESPACE_AUTH_BASIC:raise NotImplementedError('' % (common.auth2string(hdrcmd.auth),))if hdrcmd.auth == constants.NAMESPACE_AUTH_BASIC:xcred = ET.SubElement(xhdr, '')xmeta = ET.SubElement(xcred, '')ET.SubElement(xmeta, '', {'': constants.NAMESPACE_METINF}).text = ''ET.SubElement(xmeta, '', {'': constants.NAMESPACE_METINF}).text = hdrcmd.authET.SubElement(xcred, '').text = base64.b64encode('' % (adapter.peer.username, adapter.peer.password))if hdrcmd.maxMsgSize is not None or hdrcmd.maxObjSize is not None:xmeta = ET.SubElement(xhdr, '')if hdrcmd.maxMsgSize is not None:ET.SubElement(xmeta, '', {'': constants.NAMESPACE_METINF}).text = hdrcmd.maxMsgSizeif hdrcmd.maxObjSize is not None:ET.SubElement(xmeta, '', {'': constants.NAMESPACE_METINF}).text = hdrcmd.maxObjSizexbody = ET.SubElement(xsync, constants.NODE_SYNCBODY)for cmdidx, cmd in enumerate(commands):xcmd = ET.SubElement(xbody, cmd.name)if cmd.cmdID is not None:ET.SubElement(xcmd, '').text = cmd.cmdIDif cmd.name == constants.CMD_ALERT:ET.SubElement(xcmd, '').text = str(cmd.data)xitem = ET.SubElement(xcmd, '')ET.SubElement(ET.SubElement(xitem, ''), '').text = cmd.sourceET.SubElement(ET.SubElement(xitem, ''), '').text = cmd.targetif cmd.lastAnchor is not Noneor cmd.nextAnchor is not Noneor cmd.maxObjSize is not None:xmeta = ET.SubElement(xitem, '')xanch = ET.SubElement(xmeta, '', {'': constants.NAMESPACE_METINF})if cmd.lastAnchor is not None:ET.SubElement(xanch, '').text = cmd.lastAnchorif cmd.nextAnchor is not None:ET.SubElement(xanch, '').text = cmd.nextAnchorif cmd.maxObjSize is not None:ET.SubElement(xmeta, '', {'': constants.NAMESPACE_METINF}).text = cmd.maxObjSizecontinueif cmd.name == constants.CMD_STATUS:ET.SubElement(xcmd, '').text = cmd.msgRefET.SubElement(xcmd, '').text = cmd.cmdRefET.SubElement(xcmd, '').text = cmd.statusOfif cmd.sourceRef is not None:ET.SubElement(xcmd, '').text = cmd.sourceRefif cmd.targetRef is not None:ET.SubElement(xcmd, '').text = cmd.targetRefET.SubElement(xcmd, '').text = cmd.statusCodeif cmd.nextAnchor is not None or cmd.lastAnchor is not None:xdata = ET.SubElement(ET.SubElement(xcmd, ''), '')xanch = ET.SubElement(xdata, '', {'': constants.NAMESPACE_METINF})if cmd.lastAnchor is not None:ET.SubElement(xanch, '').text = cmd.lastAnchorif cmd.nextAnchor is not None:ET.SubElement(xanch, '').text = cmd.nextAnchorif cmd.errorCode is not None or cmd.errorMsg is not None:xerr = ET.SubElement(xcmd, '')if cmd.errorCode is not None:ET.SubElement(xerr, '').text = cmd.errorCodeif cmd.errorMsg is not None:ET.SubElement(xerr, '').text = cmd.errorMsgif cmd.errorTrace is not None:ET.SubElement(xerr, '').text = cmd.errorTracecontinueif cmd.name in [constants.CMD_GET, constants.CMD_PUT]:ET.SubElement(ET.SubElement(xcmd, ''), '',{'': constants.NAMESPACE_METINF}).text = cmd.typeif cmd.source is not None or cmd.target is not None or cmd.data:xitem = ET.SubElement(xcmd, '')if cmd.source is not None:xsrc = ET.SubElement(xitem, '')ET.SubElement(xsrc, '').text = cmd.sourceET.SubElement(xsrc, '').text = cmd.sourceif cmd.target is not None:xtgt = ET.SubElement(xitem, '')ET.SubElement(xtgt, '').text = cmd.targetET.SubElement(xtgt, '').text = cmd.targetif cmd.data is not None:if isinstance(cmd.data, str):ET.SubElement(xitem, '').text = cmd.dataelse:ET.SubElement(xitem, '').append(cmd.data)continueif cmd.name == constants.CMD_RESULTS:ET.SubElement(xcmd, '').text = cmd.msgRefET.SubElement(xcmd, '').text = cmd.cmdRefET.SubElement(ET.SubElement(xcmd, ''), '',{'': constants.NAMESPACE_METINF}).text = cmd.typexitem = ET.SubElement(xcmd, '')xsrc = ET.SubElement(xitem, '')ET.SubElement(xsrc, '').text = cmd.sourceET.SubElement(xsrc, '').text = cmd.sourceif cmd.data is not None:if isinstance(cmd.data, str):ET.SubElement(xitem, '').text = cmd.dataelse:ET.SubElement(xitem, '').append(cmd.data)continueif cmd.name == constants.CMD_SYNC:ET.SubElement(ET.SubElement(xcmd, ''), '').text = cmd.sourceET.SubElement(ET.SubElement(xcmd, ''), '').text = cmd.targetif cmd.noc is not None:ET.SubElement(xcmd, '').text = cmd.nocif cmd.data is not None:for scmd in cmd.data:xscmd = ET.SubElement(xcmd, scmd.name)if scmd.cmdID is not None:ET.SubElement(xscmd, '').text = scmd.cmdIDif scmd.type is not None or( scmd.format is not None and scmd.format != constants.FORMAT_AUTO ):xsmeta = ET.SubElement(xscmd, '')if scmd.format is not None and scmd.format != constants.FORMAT_AUTO:ET.SubElement(xsmeta, '', {'': constants.NAMESPACE_METINF}).text = scmd.formatif scmd.type is not None:ET.SubElement(xsmeta, '', {'': constants.NAMESPACE_METINF}).text = scmd.typexsitem = ET.SubElement(xscmd, '')if scmd.source is not None:ET.SubElement(ET.SubElement(xsitem, ''), '').text = scmd.sourceif scmd.sourceParent is not None:ET.SubElement(ET.SubElement(xsitem, ''), '').text = scmd.sourceParentif scmd.target is not None:ET.SubElement(ET.SubElement(xsitem, ''), '').text = scmd.targetif scmd.targetParent is not None:ET.SubElement(ET.SubElement(xsitem, ''), '').text = scmd.targetParentif scmd.data is not None:if isinstance(scmd.data, str):ET.SubElement(xsitem, '').text = scmd.dataelse:ET.SubElement(xsitem, '').append(scmd.data)continueif cmd.name == constants.CMD_MAP:ET.SubElement(ET.SubElement(xcmd, ''), '').text = cmd.sourceET.SubElement(ET.SubElement(xcmd, ''), '').text = cmd.targetif cmd.sourceItem is not None or cmd.targetItem is not None:xitem = ET.SubElement(xcmd, constants.CMD_MAPITEM)if cmd.sourceItem is not None:ET.SubElement(ET.SubElement(xitem, ''), '').text = cmd.sourceItemif cmd.targetItem is not None:ET.SubElement(ET.SubElement(xitem, ''), '').text = cmd.targetItemcontinueif cmd.name == constants.CMD_FINAL:if cmdidx + < len(commands):raise common.InternalError('' % (cmd.name,))continueraise common.InternalError('' % (cmd.name,))return xsync", "docstring": "Consumes state.Command commands and converts them to an ET protocol tree", "id": "f12372:c0:m5"} {"signature": "def tree2commands(self, adapter, session, lastcmds, xsync):", "body": "assert xsync.tag == constants.NODE_SYNCMLassert len(xsync) == assert xsync[].tag == constants.CMD_SYNCHDRassert xsync[].tag == constants.NODE_SYNCBODYversion = xsync[].findtext('')if version != constants.SYNCML_VERSION_1_2:raise common.FeatureNotSupported(''% (version, constants.SYNCML_VERSION_1_2))verdtd = xsync[].findtext('')if verdtd != constants.SYNCML_DTD_VERSION_1_2:raise common.FeatureNotSupported(''% (verdtd, constants.SYNCML_DTD_VERSION_1_2))ret = self.initialize(adapter, session, xsync)hdrcmd = ret[]if session.isServer:log.debug('',hdrcmd.target, hdrcmd.sessionID, hdrcmd.msgID)else:log.debug('',lastcmds[].target, lastcmds[].sessionID, lastcmds[].msgID)try:return self._tree2commands(adapter, session, lastcmds, xsync, ret)except Exception as e:if not session.isServer:raisecode = '' % (e.__class__.__module__, e.__class__.__name__)msg = ''.join(traceback.format_exception_only(type(e), e)).strip()log.exception('', msg)return [hdrcmd,state.Command(name = constants.CMD_STATUS,cmdID = '',msgRef = session.pendingMsgID,cmdRef = ,sourceRef = xsync[].findtext(''),targetRef = xsync[].findtext(''),statusOf = constants.CMD_SYNCHDR,statusCode = constants.STATUS_COMMAND_FAILED,errorCode = code,errorMsg = msg,errorTrace = ''.join(traceback.format_exception(type(e), e, sys.exc_info()[])),),state.Command(name=constants.CMD_FINAL)]", "docstring": "Consumes an ET protocol tree and converts it to state.Command commands", "id": "f12372:c0:m6"} {"signature": "def page_jump(self, count):", "body": "for i in range(count):self.get_data()", "docstring": "Page through data quickly. Used to resume failed job or jump to another\npage\n:param count: The number of pages to iterate over", "id": "f12376:c1:m4"} {"signature": "def get_data(self):", "body": "pass", "docstring": "Obtain the data to iterate over from the API\n:return:", "id": "f12376:c1:m5"} {"signature": "def log_error(self, e):", "body": "if not environ.get(''):self.log_function(e)if hasattr(e, '') and hasattr(e.response, ''):self.log_function(e.response.text)", "docstring": "Print errors. Stop travis-ci from leaking api keys\n\n:param e: The error\n:return: None", "id": "f12377:c0:m2"} {"signature": "def _sleep(self, seconds):", "body": "for _ in range(int(seconds)):if not self.force_stop:sleep()", "docstring": "Sleep between requests, but don't force asynchronous code to wait\n\n:param seconds: The number of seconds to sleep\n:return: None", "id": "f12377:c0:m3"} {"signature": "def get(self, *args, **kwargs):", "body": "try:req_func = self.session.get if self.session else requests.getreq = req_func(*args, **kwargs)req.raise_for_status()self.failed_last = Falsereturn reqexcept requests.exceptions.RequestException as e:self.log_error(e)for i in range(, self.num_retries):sleep_time = self.retry_rate * iself.log_function(\"\" % sleep_time)self._sleep(sleep_time)try:req = requests.get(*args, **kwargs)req.raise_for_status()self.log_function(\"\")return reqexcept requests.exceptions.RequestException:self.log_function(\"\")if not self.failed_last:self.failed_last = Trueraise ApiError(e)else:raise FatalApiError(e)", "docstring": "An interface for get requests that handles errors more gracefully to\nprevent data loss", "id": "f12377:c0:m5"} {"signature": "def node_edge(self, node, edge, fields=None, params=None):", "body": "if fields:fields = \"\".join(fields)parameters = {\"\": fields,\"\": self.key}parameters = self.merge_params(parameters, params)return self.api_call('' % (node, edge), parameters)", "docstring": ":param node:\n:param edge:\n:param fields:\n:param params:\n:return:", "id": "f12377:c3:m2"} {"signature": "def post(self, post_id, fields=None, **params):", "body": "if fields:fields = \"\".join(fields)parameters = {\"\": fields,\"\": self.key}parameters = self.merge_params(parameters, params)return self.api_call('' % post_id, parameters)", "docstring": ":param post_id:\n:param fields:\n:param params:\n:return:", "id": "f12377:c3:m3"} {"signature": "def page_posts(self, page_id, after='', post_type=\"\",include_hidden=False, fields=None, **params):", "body": "if fields:fields = \"\".join(fields)parameters = {\"\": self.key,\"\": after,\"\": fields,\"\": include_hidden}parameters = self.merge_params(parameters, params)return self.api_call('' % (page_id, post_type), parameters)", "docstring": ":param page_id:\n:param after:\n:param post_type: Can be 'posts', 'feed', 'tagged', 'promotable_posts'\n:param include_hidden:\n:param fields:\n:param params:\n:return:", "id": "f12377:c3:m4"} {"signature": "def post_comments(self, post_id, after='', order=\"\",filter=\"\", fields=None, **params):", "body": "if fields:fields = \"\".join(fields)parameters = {\"\": self.key,\"\": after,\"\": order,\"\": fields,\"\": filter}parameters = self.merge_params(parameters, params)return self.api_call('' % post_id, parameters)", "docstring": ":param post_id:\n:param after:\n:param order: Can be 'ranked', 'chronological', 'reverse_chronological'\n:param filter: Can be 'stream', 'toplevel'\n:param fields: Can be 'id', 'application', 'attachment', 'can_comment',\n'can_remove', 'can_hide', 'can_like', 'can_reply_privately', 'comments',\n'comment_count', 'created_time', 'from', 'likes', 'like_count',\n'live_broadcast_timestamp', 'message', 'message_tags', 'object',\n'parent', 'private_reply_conversation', 'user_likes'\n:param params:\n:return:", "id": "f12377:c3:m5"} {"signature": "def flatten(dictionary, parent_key=False, separator=''):", "body": "items = []for key, value in dictionary.items():new_key = str(parent_key) + separator + key if parent_key else keyif isinstance(value, collections.MutableMapping):items.extend(flatten(value, new_key, separator).items())elif isinstance(value, list):for k, v in enumerate(value):items.extend(flatten({str(k): v}, new_key).items())else:items.append((new_key, value))return dict(items)", "docstring": "Turn a nested dictionary into a flattened dictionary\n\n:param dictionary: The dictionary to flatten\n:param parent_key: The string to prepend to dictionary's keys\n:param separator: The string used to separate flattened keys\n:return: A flattened dictionary", "id": "f12379:m0"} {"signature": "def fill_gaps(list_dicts):", "body": "field_names = [] for datum in list_dicts:for key in datum.keys():if key not in field_names:field_names.append(key)for datum in list_dicts:for key in field_names:if key not in datum:datum[key] = ''return list(field_names), list_dicts", "docstring": "Fill gaps in a list of dictionaries. Add empty keys to dictionaries in\nthe list that don't contain other entries' keys\n\n:param list_dicts: A list of dictionaries\n:return: A list of field names, a list of dictionaries with identical keys", "id": "f12379:m1"} {"signature": "def to_csv(data, field_names=None, filename='',overwrite=True,write_headers=True, append=False, flat=True,primary_fields=None, sort_fields=True):", "body": "if not overwrite and path.isfile(filename):raise FileExistsError('')write_type = '' if not append else ''if flat or not field_names:data = [flatten(datum) for datum in data]if not field_names:field_names, data = fill_gaps(data)if sort_fields:field_names.sort()if primary_fields:for key in primary_fields[::-]:field_names.insert(, field_names.pop(field_names.index(key)))data = sorted(data, key=lambda k: k[field_names[]], reverse=True)with open(filename, write_type, encoding='') as f:writer = csv.DictWriter(f, fieldnames=field_names, lineterminator='')if not append or write_headers:writer.writeheader()for datum in data:for key in list(datum.keys()):if key not in field_names:del datum[key]elif type(datum[key]) is str:datum[key] = datum[key].strip()datum[key] = str(datum[key])writer.writerow(datum)", "docstring": "DEPRECATED Write a list of dicts to a csv file\n\n:param data: List of dicts\n:param field_names: The list column names\n:param filename: The name of the file\n:param overwrite: Overwrite the file if exists\n:param write_headers: Write the headers to the csv file\n:param append: Write new rows if the file exists\n:param flat: Flatten the dictionary before saving\n:param primary_fields: The first columns of the csv file\n:param sort_fields: Sort the field names alphabetically\n:return: None", "id": "f12379:m2"} {"signature": "def to_json(data, filename='', indent=):", "body": "with open(filename, '') as f:f.write(json.dumps(data, indent=indent))", "docstring": "Write an object to a json file\n\n:param data: The object\n:param filename: The name of the file\n:param indent: The indentation of the file\n:return: None", "id": "f12379:m3"} {"signature": "def save_file(filename, source, folder=\"\"):", "body": "r = requests.get(source, stream=True)if r.status_code == :if not path.isdir(folder):makedirs(folder, exist_ok=True)with open(\"\" % (folder, filename), '') as f:for chunk in r:f.write(chunk)", "docstring": "Download and save a file at path\n\n:param filename: The name of the file\n:param source: The location of the resource online\n:param folder: The directory the file will be saved in\n:return: None", "id": "f12379:m4"} {"signature": "def reload(clear=False):", "body": "Config().reload(clear)", "docstring": "Shortcut method for calling reload.", "id": "f12385:m0"} {"signature": "def setting(name, default=None, allow_default=True):", "body": "return Setting(name, default, allow_default)", "docstring": "Shortcut method for getting a setting descriptor.\n\n See :class:`pyconfig.Setting` for details.", "id": "f12385:m1"} {"signature": "def get(name, default=None, allow_default=True):", "body": "return Config().get(name, default, allow_default=allow_default)", "docstring": "Shortcut method for getting a setting value.\n\n :param str name: Setting key name.\n :param default: Default value of setting if it's not explicitly\n set. Defaults to `None`\n :param bool allow_default: If true, use the parameter default as\n default if the key is not set, else raise\n :exc:`KeyError`. Defaults to `None`\n :raises: :exc:`KeyError` if allow_default is false and the setting is\n not set.", "id": "f12385:m2"} {"signature": "def set(name, value):", "body": "Config().set(name, value)", "docstring": "Shortcut method to change a setting.", "id": "f12385:m3"} {"signature": "def reload_hook(func):", "body": "Config().add_reload_hook(func)return func", "docstring": "Decorator for registering a reload hook.", "id": "f12385:m4"} {"signature": "def clear():", "body": "Config().clear()", "docstring": "Shortcut for clearing all settings.", "id": "f12385:m5"} {"signature": "def deferred():", "body": "pass", "docstring": "Import this to indicate that a module should be deferred to load its\nsettings last. This allows you to override some settings from a pyconfig\nplugin with another plugin in a reliable manner.\n\nThis is a special instance that pyconfig looks for by name. You must use\nthe import style ``from pyconfig import deferred`` for this to work.\n\nIf you are not deferring a module, you may use ``deferred`` as a variable\nname without confusing or conflicting with pyconfig's behavior.\n\nExample::\n\n from pyconfig import Namespace, deferred\n\n my_settings = Namespace()\n my_settings.some_setting = 'overridden by deferred'", "id": "f12385:m6"} {"signature": "def env(key, default):", "body": "value = os.environ.get(key, None)if value is not None:log.info('', key.lower().replace('', ''), value)return valuekey = key.lower().replace('', '')value = get(key)if value is not None:return valuereturn default", "docstring": "Helper to try to get a setting from the environment, or pyconfig, or\nfinally use a provided default.", "id": "f12385:m7"} {"signature": "def env_key(key, default):", "body": "env = key.upper().replace('', '')return os.environ.get(env, default)", "docstring": "Try to get `key` from the environment.\n\nThis mutates `key` to replace dots with underscores and makes it all\nuppercase.\n\n my.database.host => MY_DATABASE_HOST", "id": "f12385:m8"} {"signature": "def set(self, name, value):", "body": "if not self.settings.get('', False):name = name.lower()log.info(\"\", name, repr(value))with self.mut_lock:self.settings[name] = value", "docstring": "Changes a setting value.\n\n This implements a locking mechanism to ensure some level of thread\n safety.\n\n :param str name: Setting key name.\n :param value: Setting value.", "id": "f12385:c1:m1"} {"signature": "def _update(self, conf_dict, base_name=None):", "body": "for name in conf_dict:if name.startswith(''):continuevalue = conf_dict[name]if value is Namespace:continueif base_name:name = base_name + '' + nameif isinstance(value, Namespace):for name, value in value.iteritems(name):self.set(name, value)elif callable(value):value = value()if value is not None:self.set(name, value)else:self.set(name, value)", "docstring": "Updates the current configuration with the values in `conf_dict`.\n\n :param dict conf_dict: Dictionary of key value settings.\n :param str base_name: Base namespace for setting keys.", "id": "f12385:c1:m2"} {"signature": "def load(self, clear=False):", "body": "if clear:self.settings = {}defer = []for conf in pkg_resources.iter_entry_points(''):if conf.attrs:raise RuntimeError(\"\")mod_name = conf.module_namebase_name = conf.name if conf.name != '' else Nonelog.info(\"\", mod_name)mod_dict = runpy.run_module(mod_name)if mod_dict.get('', None) is deferred:log.info(\"\", mod_name)mod_dict.pop('')defer.append((mod_name, base_name, mod_dict))continueself._update(mod_dict, base_name)for mod_name, base_name, mod_dict in defer:log.info(\"\", mod_name)self._update(mod_dict, base_name)if etcd().configured:mod_dict = etcd().load()if mod_dict:self._update(mod_dict)mod_dict = Nonetry:mod_dict = runpy.run_module('')except ImportError:passexcept ValueError as err:if getattr(err, '') != '':raisemod_name = ''if sys.version_info < (, ):loader, code, fname = runpy._get_module_details(mod_name)else:_, loader, code, fname = runpy._get_module_details(mod_name)mod_dict = runpy._run_code(code, {}, {}, mod_name, fname, loader,pkg_name=None)if mod_dict:log.info(\"\")self._update(mod_dict)self.call_reload_hooks()", "docstring": "Loads all the config plugin modules to build a working configuration.\n\nIf there is a ``localconfig`` module on the python path, it will be\nloaded last, overriding other settings.\n\n:param bool clear: Clear out the previous settings before loading", "id": "f12385:c1:m3"} {"signature": "def call_reload_hooks(self):", "body": "for hook in self.reload_hooks:hook()", "docstring": "Calls all the reload hooks that are registered.", "id": "f12385:c1:m4"} {"signature": "def get(self, name, default, allow_default=True):", "body": "if not self.settings.get('', False):name = name.lower()if name not in self.settings:if not allow_default:raise LookupError(''.format(name=name))self.settings[name] = defaultreturn self.settings[name]", "docstring": "Return a setting value.\n\n :param str name: Setting key name.\n :param default: Default value of setting if it's not explicitly\n set.\n :param bool allow_default: If true, use the parameter default as\n default if the key is not set, else raise\n :exc:`LookupError`\n :raises: :exc:`LookupError` if allow_default is false and the setting is\n not set.", "id": "f12385:c1:m5"} {"signature": "def reload(self, clear=False):", "body": "log.info(\"\")self.load(clear)", "docstring": "Reloads the configuration.", "id": "f12385:c1:m6"} {"signature": "def add_reload_hook(self, hook):", "body": "self.reload_hooks.append(hook)", "docstring": "Registers a reload hook that's called when :meth:`load` is called.\n\n :param function hook: Hook to register.", "id": "f12385:c1:m7"} {"signature": "def clear(self):", "body": "self.settings = {}", "docstring": "Clears all the cached configuration.", "id": "f12385:c1:m8"} {"signature": "def init(self, hosts=None, cacert=None, client_cert=None, client_key=None):", "body": "try:import etcdself.module = etcdexcept ImportError:passif not self.module:returnself._parse_jetconfig()hosts = env('', hosts)protocol = env('', None)cacert = env('', cacert)client_cert = env('', client_cert)client_key = env('', client_key)username = Nonepassword = Noneauth = env('', None)if auth:auth = auth.split('')auth.append('')username = auth[]password = auth[]hosts = self._parse_hosts(hosts)if hosts is None:returnkw = {}kw[''] = Trueif protocol:kw[''] = protocolif username:kw[''] = usernameif password:kw[''] = passwordif cacert:kw[''] = os.path.abspath(cacert)if client_cert and client_key:kw[''] = ((os.path.abspath(client_cert),os.path.abspath(client_key)))elif client_cert:kw[''] = os.path.abspath(client_cert)if cacert or client_cert or client_key:kw[''] = ''self.client = self.module.Client(hosts, **kw)", "docstring": "Handle creating the new etcd client instance and other business.\n\n:param hosts: Host string or list of hosts (default: `'127.0.0.1:2379'`)\n:param cacert: CA cert filename (optional)\n:param client_cert: Client cert filename (optional)\n:param client_key: Client key filename (optional)\n:type ca: str\n:type cert: str\n:type key: str", "id": "f12385:c2:m2"} {"signature": "def load(self, prefix=None, depth=None):", "body": "prefix = prefix or self.prefixprefix = '' + prefix.strip('') + ''if depth is None:depth = self.inherit_depthif not self.configured:log.debug(\"\")returnif self.watching:log.info(\"\", prefix)self.start_watching()log.info(\"\", prefix)try:result = self.client.get(prefix)except self.module.EtcdKeyNotFound:result = Noneif not result:log.info(\"\")return {}update = {}for item in result.children:key = item.keyvalue = item.valuetry:value = pytool.json.from_json(value)except:passif not self.case_sensitive:key = key.lower()if key.startswith(prefix):key = key[len(prefix):]update[key] = valueinherited = Config().settings.get(self.inherit_key,update.get(self.inherit_key, None))if depth > and inherited:log.info(\"\")inherited = self.load(inherited, depth - ) or {}inherited.update(update)update = inheritedreturn update", "docstring": "Return a dictionary of settings loaded from etcd.", "id": "f12385:c2:m3"} {"signature": "def get_watcher(self):", "body": "if not self.watching:raise StopIteration()return self.client.eternal_watch(self.prefix, recursive=True)", "docstring": "Return a etcd watching generator which yields events as they happen.", "id": "f12385:c2:m4"} {"signature": "def start_watching(self):", "body": "if self.watcher and self.watcher.is_alive():returnself.watcher = Watcher()self.watcher.start()", "docstring": "Begins watching etcd for changes.", "id": "f12385:c2:m5"} {"signature": "def _parse_hosts(self, hosts):", "body": "if hosts is None:returnif isinstance(hosts, six.string_types):hosts = [host.strip() for host in hosts.split('')]hosts = [host.split('') for host in hosts]hosts = [(host[], int(host[])) for host in hosts]return tuple(hosts)", "docstring": "Return hosts parsed into a tuple of tuples.\n\n:param hosts: String or list of hosts", "id": "f12385:c2:m6"} {"signature": "def _parse_jetconfig(self):", "body": "conf = env('', None)if not conf:returnimport urlparseauth = Noneport = Noneconf = conf.split('').pop()entry = urlparse.urlparse(conf)scheme = entry.schemehost = entry.netloc or entry.path if '' in host:auth, host = host.split('')if '' in host:host, port = host.split('')if not port and scheme == '':port = ''if scheme:os.environ[''] = schemeif auth:os.environ[''] = authif port:host = host + \"\" + portos.environ[''] = host", "docstring": "Undocumented cross-compatability functionality with jetconfig\n(https://github.com/shakefu/jetconfig) that is very sloppy.", "id": "f12385:c2:m7"} {"signature": "def main():", "body": "parser = argparse.ArgumentParser(description=\"\"\"\")target_group = parser.add_mutually_exclusive_group()target_group.add_argument('', '',help=\"\",metavar='')target_group.add_argument('', '',help=\"\",metavar='')parser.add_argument('', '',help=\"\",action='')parser.add_argument('', '',help=\"\",action='')key_group = parser.add_mutually_exclusive_group()key_group.add_argument('', '',help=\"\",action='')key_group.add_argument('', '',help=\"\",action='')parser.add_argument('', '',help=\"\",action='')parser.add_argument('', '',help=\"\",action='')parser.add_argument('', '',help=\"\" % bool(pygments),action='', default=bool(pygments),const=(not bool(pygments)))args = parser.parse_args()if args.color and not pygments:_error(\"\"\"\")if args.module:_handle_module(args)if args.filename:_handle_file(args)", "docstring": "Main script for `pyconfig` command.", "id": "f12386:m0"} {"signature": "def _handle_module(args):", "body": "module = _get_module_filename(args.module)if not module:_error(\"\", args.module)elif isinstance(module, Unparseable):_error(\"\", args.module)_parse_and_output(module, args)", "docstring": "Handles the -m argument.", "id": "f12386:m1"} {"signature": "def _handle_file(args):", "body": "filename = args.filename_parse_and_output(filename, args)", "docstring": "Handle the --file argument.", "id": "f12386:m2"} {"signature": "def _error(msg, *args):", "body": "print(msg % args, file=sys.stderr)sys.exit()", "docstring": "Print an error message and exit.\n\n:param msg: A message to print\n:type msg: str", "id": "f12386:m3"} {"signature": "def _get_module_filename(module):", "body": "module = module.split('')package = ''.join(module[:-])module = module[-]try:if not package:module = __import__(module)else:package = __import__(package, fromlist=[module])module = getattr(package, module, None)filename = getattr(module, '', None)if not filename:return Unparseable()if filename.endswith(''):filename = filename[:-]if not os.path.exists(filename) and os.path.isfile(filename):return Unparseable()if filename.endswith(''):filename = filename[:-]return filenameexcept ImportError:return", "docstring": "Return the filename of `module` if it can be imported.\n\nIf `module` is a package, its directory will be returned.\n\nIf it cannot be imported ``None`` is returned.\n\nIf the ``__file__`` attribute is missing, or the module or package is a\ncompiled egg, then an :class:`Unparseable` instance is returned, since the\nsource can't be retrieved.\n\n:param module: A module name, such as ``'test.test_config'``\n:type module: str", "id": "f12386:m4"} {"signature": "def _parse_and_output(filename, args):", "body": "relpath = os.path.dirname(filename)if os.path.isfile(filename):calls = _parse_file(filename, relpath)elif os.path.isdir(filename):calls = _parse_dir(filename, relpath)else:_error(\"\", filename)if not calls:_error(\"\")if args.load_configs:keys = set()for call in calls:keys.add(call.key)conf = pyconfig.Config()for key, value in conf.settings.items():if key in keys:continuecalls.append(_PyconfigCall('', key, value, [None]*))_output(calls, args)", "docstring": "Parse `filename` appropriately and then output calls according to the\n`args` specified.\n\n:param filename: A file or directory\n:param args: Command arguments\n:type filename: str", "id": "f12386:m5"} {"signature": "def _output(calls, args):", "body": "if args.natural_sort or args.source:calls = sorted(calls, key=lambda c: (c.filename, c.lineno))else:calls = sorted(calls, key=lambda c: c.key)out = []if args.only_keys:keys = set()for call in calls:if call.key in keys:continueout.append(_format_call(call, args))keys.add(call.key)out = ''.join(out)if args.color:out = _colorize(out)print(out, end='')returnkeys = set()for call in calls:if call.default:keys.add(call.key)for call in calls:if not args.all and not call.default and call.key in keys:continueout.append(_format_call(call, args))out = ''.join(out)if args.color:out = _colorize(out)print(out, end='')", "docstring": "Outputs `calls`.\n\n:param calls: List of :class:`_PyconfigCall` instances\n:param args: :class:`~argparse.ArgumentParser` instance\n:type calls: list\n:type args: argparse.ArgumentParser", "id": "f12386:m6"} {"signature": "def _format_call(call, args):", "body": "out = ''if args.source:out += call.annotation() + ''if args.only_keys:out += call.get_key()return outif args.view_call:out += call.as_call()elif args.load_configs:out += call.as_live()else:out += call.as_namespace()return out", "docstring": "Return `call` formatted appropriately for `args`.\n\n:param call: A pyconfig call object\n:param args: Arguments from the command\n:type call: :class:`_PyconfigCall`", "id": "f12386:m7"} {"signature": "def _colorize(output):", "body": "if not pygments:return outputreturn pygments.highlight(output,pygments.lexers.PythonLexer(),pygments.formatters.Terminal256Formatter(style=''))", "docstring": "Return `output` colorized with Pygments, if available.", "id": "f12386:m8"} {"signature": "def _parse_dir(directory, relpath):", "body": "relpath = os.path.dirname(relpath)pyconfig_calls = []for root, dirs, files in os.walk(directory):for filename in files:if not filename.endswith(''):continuefilename = os.path.join(root, filename)pyconfig_calls.extend(_parse_file(filename, relpath))return pyconfig_calls", "docstring": "Return a list of :class:`_PyconfigCall` from recursively parsing\n`directory`.\n\n:param directory: Directory to walk looking for python files\n:param relpath: Path to make filenames relative to\n:type directory: str\n:type relpath: str", "id": "f12386:m9"} {"signature": "def _parse_file(filename, relpath=None):", "body": "with open(filename, '') as source:source = source.read()pyconfig_calls = []try:nodes = ast.parse(source, filename=filename)except SyntaxError:return []first_lines = source[:]match = re.match('', first_lines)if match:try:coding = match.group()source = source.decode(coding)except:print(\"\", filename)try:source = source.split('')except:print(\"\", filename);return []if relpath:filename = os.path.relpath(filename, relpath)for call in ast.walk(nodes):if not isinstance(call, _ast.Call):continuefunc = call.funcif not isinstance(call.func, _ast.Attribute):continueif getattr(func.value, '', None) != '':continueif func.attr not in ['', '', '']:continueargs = []if call.args:arg = call.args[]if isinstance(arg, _ast.Str):args.append(arg.s)else:args.append(_map_arg(arg))for arg in call.args[:]:args.append(_map_arg(arg))line = (filename, source[call.lineno-], call.lineno, call.col_offset)call = _PyconfigCall(func.attr, args[], args[:], line)pyconfig_calls.append(call)return pyconfig_calls", "docstring": "Return a list of :class:`_PyconfigCall` from parsing `filename`.\n\n:param filename: A file to parse\n:param relpath: Relative directory to strip (optional)\n:type filename: str\n:type relpath: str", "id": "f12386:m10"} {"signature": "def _map_arg(arg):", "body": "if isinstance(arg, _ast.Str):return repr(arg.s)elif isinstance(arg, _ast.Num):return arg.nelif isinstance(arg, _ast.Name):name = arg.idif name == '':return Trueelif name == '':return Falseelif name == '':return Nonereturn nameelse:return Unparseable()", "docstring": "Return `arg` appropriately parsed or mapped to a usable value.", "id": "f12386:m11"} {"signature": "def as_namespace(self, namespace=None):", "body": "key = self.keyif namespace and key.startswith(namespace):key = key[len(namespace) + :]return \"\" % (self.get_key(), self._default() or NotSet())", "docstring": "Return this call as if it were being assigned in a pyconfig namespace.\n\nIf `namespace` is specified and matches the top level of this call's\n:attr:`key`, then that section of the key will be removed.", "id": "f12386:c2:m1"} {"signature": "def as_live(self):", "body": "key = self.get_key()default = pyconfig.get(key)if default:default = repr(default)else:default = self._default() or NotSet()return \"\" % (key, default)", "docstring": "Return this call as if it were being assigned in a pyconfig namespace,\nbut load the actual value currently available in pyconfig.", "id": "f12386:c2:m2"} {"signature": "def as_call(self):", "body": "default = self._default()default = '' + default if default else ''return \"\" % (self.method, self.get_key(), default)", "docstring": "Return this call as it is called in its source.", "id": "f12386:c2:m3"} {"signature": "def annotation(self):", "body": "if not self.source:return \"\"return \"\" % (self.filename, self.lineno)", "docstring": "Return this call's source annotation.", "id": "f12386:c2:m4"} {"signature": "def get_key(self):", "body": "if not isinstance(self.key, Unparseable):return self.keyline = self.source[self.col_offset:]regex = re.compile('''''')match = regex.match(line)if not match:return Unparseable()return \"\" % match.group()", "docstring": "Return the call key, even if it has to be parsed from the source.", "id": "f12386:c2:m5"} {"signature": "def _source_call_only(self):", "body": "line = self.source[self.col_offset:]regex = re.compile('''''')match = regex.match(line)if not match:return self.sourcereturn match.group()", "docstring": "Return the source line stripped down to just the pyconfig call.", "id": "f12386:c2:m6"} {"signature": "def _default_value_only(self):", "body": "line = self.source[self.col_offset:]regex = re.compile('''''')match = regex.match(line)if not match:return ''return match.group()", "docstring": "Return only the default value, if there is one.", "id": "f12386:c2:m7"} {"signature": "def _default(self):", "body": "try:iter(self.default)except TypeError:return repr(self.default)for v in self.default:if isinstance(v, Unparseable):default = self._default_value_only()if default:return defaultreturn ''.join(str(v) for v in self.default)", "docstring": "Return the default argument, formatted nicely.", "id": "f12386:c2:m8"} {"signature": "def receive(self):", "body": "return self._socket.recv().decode()", "docstring": "Receives information from the Netcat socket.", "id": "f12396:c0:m2"} {"signature": "def _receive_until(self, s):", "body": "return self._socket.recv_until(s)", "docstring": "Recieve data from the socket until the given substring is observed.\nData in the same datagram as the substring, following the substring,\nwill not be returned and will be cached for future receives.", "id": "f12396:c0:m4"} {"signature": "def send(self, s):", "body": "self._socket.send(s.encode())return self.read()", "docstring": "Sends the given command to Niko Home Control and returns the output of\nthe system.\n\nAliases: write, put, sendall, send_all", "id": "f12396:c0:m5"} {"signature": "def set_parameter(self, key, value):", "body": "if value is None or isinstance(value, (int, float, bool)):value = str(value)if key.endswith(''):value = urlsafe_b64encode(value.encode(''))value = value.replace(b(''), b(''))self._parameters[key] = value", "docstring": "Set a url parameter.\n\nParameters\n----------\nkey : str\n If key ends with '64', the value provided will be automatically\n base64 encoded.", "id": "f12406:c0:m2"} {"signature": "def delete_parameter(self, key):", "body": "if key in self._parameters:del self._parameters[key]", "docstring": "Deletes the value associated with `key` from recorded parameters.\n\nParameters\n----------\nkey : str\n\nRaises\n------\nKeyError\n If key doesn't exist in recorded parameters.", "id": "f12406:c0:m3"} {"signature": "def __str__(self):", "body": "query = {}for key in self._parameters:query[key] = self._parameters[key]path = self._pathif self._include_library_param:query[\"\"] = \"\" + __version__if path.startswith(\"\"):try:path = quote(path, safe=\"\")except KeyError:path = quote(path.encode(''), safe=\"\")if not path.startswith(\"\"):path = \"\" + path if not path.startswith(\"\") and not self._str_is_ascii(path):try:path = quote(path)except KeyError:path = quote(path.encode(''))query = \"\".join((quote(key, \"\") + \"\" + quote(query[key], \"\"))for key in sorted(query))if self._sign_key:delim = \"\" if query == \"\" else \"\"signing_value = self._sign_key + path + delim + querysignature = hashlib.md5(signing_value.encode('')).hexdigest()if query:query += \"\" + signatureelse:query = \"\" + signaturereturn urlparse.urlunparse([self._scheme,self._host,path,\"\",query,\"\", ])", "docstring": "Generate URL from the recorded parameters.\n\nReturns\n-------\nstr", "id": "f12406:c0:m5"} {"signature": "def create_url(self, path, params={}, opts={}):", "body": "if opts:warnings.warn('',DeprecationWarning, stacklevel=)params = params or optsif self._shard_strategy == SHARD_STRATEGY_CRC:crc = zlib.crc32(path.encode('')) & index = crc % len(self._domains) domain = self._domains[index]elif self._shard_strategy == SHARD_STRATEGY_CYCLE:domain = self._domains[self._shard_next_index]self._shard_next_index = (self._shard_next_index + ) % len(self._domains)else:domain = self._domains[]scheme = \"\" if self._use_https else \"\"url_obj = UrlHelper(domain,path,scheme,sign_key=self._sign_key,include_library_param=self._include_library_param,params=params)return str(url_obj)", "docstring": "Create URL with supplied path and `opts` parameters dict.\n\nParameters\n----------\npath : str\nopts : dict\n Dictionary specifying URL parameters. Non-imgix parameters are\n added to the URL unprocessed. For a complete list of imgix\n supported parameters, visit https://docs.imgix.com/apis/url .\n (default {})\n\nReturns\n-------\nstr\n imgix URL", "id": "f12407:c0:m2"} {"signature": "def diff(iterable):", "body": "a, b = tee(iterable)next(b, None)return (i - j for i, j in izip(a, b))", "docstring": "Diff elements of a sequence:\n s -> s0 - s1, s1 - s2, s2 - s3, ...", "id": "f12409:m0"} {"signature": "def unpack(endian, fmt, data):", "body": "if fmt == '':val = struct.unpack(''.join([endian, str(len(data)), '']),data)[]else:num = len(data) // struct.calcsize(fmt)val = struct.unpack(''.join([endian, str(num), fmt]), data)if len(val) == :val = val[]return val", "docstring": "Unpack a byte string to the given format. If the byte string\n contains more bytes than required for the given format, the function\n returns a tuple of values.", "id": "f12409:m1"} {"signature": "def read_file_header(fd, endian):", "body": "fields = [('', '', ),('', '', ),('', '', ),('', '', )]hdict = {}for name, fmt, num_bytes in fields:data = fd.read(num_bytes)hdict[name] = unpack(endian, fmt, data)hdict[''] = hdict[''].strip()v_major = hdict[''] >> v_minor = hdict[''] & hdict[''] = '' % (v_major, v_minor)return hdict", "docstring": "Read mat 5 file header of the file fd.\n Returns a dict with header values.", "id": "f12409:m2"} {"signature": "def read_element_tag(fd, endian):", "body": "data = fd.read()mtpn = unpack(endian, '', data[:])num_bytes = mtpn >> if num_bytes > :mtpn = mtpn & if num_bytes > :raise ParseError('''')data = data[: + num_bytes]else:num_bytes = unpack(endian, '', data[:])data = Nonereturn (mtpn, num_bytes, data)", "docstring": "Read data element tag: type and number of bytes.\n If tag is of the Small Data Element (SDE) type the element data\n is also returned.", "id": "f12409:m3"} {"signature": "def read_elements(fd, endian, mtps, is_name=False):", "body": "mtpn, num_bytes, data = read_element_tag(fd, endian)if mtps and mtpn not in [etypes[mtp][''] for mtp in mtps]:raise ParseError(''.format(mtpn, ''.join(''.format(etypes[mtp][''], mtp) for mtp in mtps)))if not data:data = fd.read(num_bytes)mod8 = num_bytes % if mod8:fd.seek( - mod8, )if is_name:fmt = ''val = [unpack(endian, fmt, s)for s in data.split(b'') if s]if len(val) == :val = ''elif len(val) == :val = asstr(val[])else:val = [asstr(s) for s in val]else:fmt = etypes[inv_etypes[mtpn]]['']val = unpack(endian, fmt, data)return val", "docstring": "Read elements from the file.\n\n If list of possible matrix data types mtps is provided, the data type\n of the elements are verified.", "id": "f12409:m4"} {"signature": "def read_header(fd, endian):", "body": "flag_class, nzmax = read_elements(fd, endian, [''])header = {'': flag_class & ,'': (flag_class >> & ) == ,'': (flag_class >> & ) == ,'': (flag_class >> & ) == ,'': nzmax}header[''] = read_elements(fd, endian, [''])header[''] = len(header[''])if header[''] != :raise ParseError('')header[''] = read_elements(fd, endian, [''], is_name=True)return header", "docstring": "Read and return the matrix header.", "id": "f12409:m5"} {"signature": "def read_var_header(fd, endian):", "body": "mtpn, num_bytes = unpack(endian, '', fd.read())next_pos = fd.tell() + num_bytesif mtpn == etypes['']['']:data = fd.read(num_bytes)dcor = zlib.decompressobj()fd_var = BytesIO(dcor.decompress(data))del datafd = fd_varif dcor.flush() != b'':raise ParseError('')mtpn, num_bytes = unpack(endian, '', fd.read())if mtpn != etypes['']['']:raise ParseError(''''.format(etypes[''][''], mtpn))header = read_header(fd, endian)return header, next_pos, fd", "docstring": "Read full header tag.\n\n Return a dict with the parsed header, the file position of next tag,\n a file like object for reading the uncompressed element data.", "id": "f12409:m6"} {"signature": "def squeeze(array):", "body": "if len(array) == :array = array[]return array", "docstring": "Return array contents if array contains only one element.\n Otherwise, return the full array.", "id": "f12409:m7"} {"signature": "def read_numeric_array(fd, endian, header, data_etypes):", "body": "if header['']:raise ParseError('')data = read_elements(fd, endian, data_etypes)if not isinstance(data, Sequence):return datarowcount = header[''][]colcount = header[''][]array = [list(data[c * rowcount + r] for c in range(colcount))for r in range(rowcount)]return squeeze(array)", "docstring": "Read a numeric matrix.\n Returns an array with rows of the numeric matrix.", "id": "f12409:m8"} {"signature": "def read_cell_array(fd, endian, header):", "body": "array = [list() for i in range(header[''][])]for row in range(header[''][]):for col in range(header[''][]):vheader, next_pos, fd_var = read_var_header(fd, endian)varray = read_var_array(fd_var, endian, vheader)array[row].append(varray)fd.seek(next_pos)if header[''][] == :return squeeze(array[])return squeeze(array)", "docstring": "Read a cell array.\n Returns an array with rows of the cell array.", "id": "f12409:m9"} {"signature": "def read_struct_array(fd, endian, header):", "body": "field_name_length = read_elements(fd, endian, [''])if field_name_length > :raise ParseError(''.format(field_name_length))fields = read_elements(fd, endian, [''], is_name=True)if isinstance(fields, basestring):fields = [fields]empty = lambda: [list() for i in range(header[''][])]array = {}for row in range(header[''][]):for col in range(header[''][]):for field in fields:vheader, next_pos, fd_var = read_var_header(fd, endian)data = read_var_array(fd_var, endian, vheader)if field not in array:array[field] = empty()array[field][row].append(data)fd.seek(next_pos)for field in fields:rows = array[field]for i in range(header[''][]):rows[i] = squeeze(rows[i])array[field] = squeeze(array[field])return array", "docstring": "Read a struct array.\n Returns a dict with fields of the struct array.", "id": "f12409:m10"} {"signature": "def read_var_array(fd, endian, header):", "body": "mc = inv_mclasses[header['']]if mc in numeric_class_etypes:return read_numeric_array(fd, endian, header,set(compressed_numeric).union([numeric_class_etypes[mc]]))elif mc == '':raise ParseError('')elif mc == '':return read_char_array(fd, endian, header)elif mc == '':return read_cell_array(fd, endian, header)elif mc == '':return read_struct_array(fd, endian, header)elif mc == '':raise ParseError('')elif mc == '':raise ParseError('')elif mc == '':raise ParseError('')", "docstring": "Read variable array (of any supported type).", "id": "f12409:m12"} {"signature": "def eof(fd):", "body": "b = fd.read()end = len(b) == if not end:curpos = fd.tell()fd.seek(curpos - )return end", "docstring": "Determine if end-of-file is reached for file fd.", "id": "f12409:m13"} {"signature": "def loadmat(filename, meta=False):", "body": "if isinstance(filename, basestring):fd = open(filename, '')else:fd = filenamefd.seek()tst_str = fd.read()little_endian = (tst_str[:] == b'')endian = ''if (sys.byteorder == '' and little_endian) or(sys.byteorder == '' and not little_endian):passelif sys.byteorder == '':endian = '>'else:endian = ''maj_ind = int(little_endian)maj_val = ord(tst_str[maj_ind]) if ispy2 else tst_str[maj_ind]if maj_val != :raise ParseError('')mdict = {}if meta:fd.seek()mdict[''] = read_file_header(fd, endian)mdict[''] = []while not eof(fd):hdr, next_position, fd_var = read_var_header(fd, endian)name = hdr['']if name in mdict:raise ParseError(''.format(name))mdict[name] = read_var_array(fd_var, endian, hdr)if meta and hdr['']:mdict[''].append(name)fd.seek(next_position)fd.close()return mdict", "docstring": "Load data from MAT-file:\n\n data = loadmat(filename, meta=False)\n\n The filename argument is either a string with the filename, or\n a file like object.\n\n The returned parameter ``data`` is a dict with the variables found\n in the MAT file.\n\n Call ``loadmat`` with parameter meta=True to include meta data, such\n as file header information and list of globals.\n\n A ``ParseError`` exception is raised if the MAT-file is corrupt or\n contains a data type that cannot be parsed.", "id": "f12409:m14"} {"signature": "def diff(iterable):", "body": "a, b = tee(iterable)next(b, None)return (i - j for i, j in izip(a, b))", "docstring": "Diff elements of a sequence:\n s -> s0 - s1, s1 - s2, s2 - s3, ...", "id": "f12410:m0"} {"signature": "def write_elements(fd, mtp, data, is_name=False):", "body": "fmt = etypes[mtp]['']if isinstance(data, Sequence):if fmt == '' or is_name:if isinstance(data, bytes):if is_name and len(data) > :raise ValueError(''''.format(data))fmt = ''.format(len(data))data = (data,)else:fmt = ''.join(''.format(len(s)) for s in data)else:l = len(data)if l == :fmt = ''if l > :fmt = ''.format(l, fmt)else:data = (data,)num_bytes = struct.calcsize(fmt)if num_bytes <= :if num_bytes < :fmt += ''.format( - num_bytes)fd.write(struct.pack('' + fmt, etypes[mtp][''],*chain([num_bytes], data)))returnfd.write(struct.pack('', etypes[mtp][''], num_bytes))mod8 = num_bytes % if mod8:fmt += ''.format( - mod8)fd.write(struct.pack(fmt, *data))", "docstring": "Write data element tag and data.\n\n The tag contains the array type and the number of\n bytes the array data will occupy when written to file.\n\n If data occupies 4 bytes or less, it is written immediately\n as a Small Data Element (SDE).", "id": "f12410:m2"} {"signature": "def write_var_header(fd, header):", "body": "fd.write(struct.pack('', etypes[''][''], ))fd.write(struct.pack('', mclasses[header['']]))write_elements(fd, '', header[''])write_elements(fd, '', asbytes(header['']), is_name=True)", "docstring": "Write variable header", "id": "f12410:m3"} {"signature": "def write_var_data(fd, data):", "body": "fd.write(struct.pack('', etypes[''][''], len(data)))fd.write(data)", "docstring": "Write variable data to file", "id": "f12410:m4"} {"signature": "def write_compressed_var_array(fd, array, name):", "body": "bd = BytesIO()write_var_array(bd, array, name)data = zlib.compress(bd.getvalue())bd.close()fd.write(struct.pack('', etypes[''][''], len(data)))fd.write(data)", "docstring": "Write compressed variable data to file", "id": "f12410:m5"} {"signature": "def write_numeric_array(fd, header, array):", "body": "bd = BytesIO()write_var_header(bd, header)if not isinstance(array, basestring) and header[''][] > :array = list(chain.from_iterable(izip(*array)))write_elements(bd, header[''], array)data = bd.getvalue()bd.close()write_var_data(fd, data)", "docstring": "Write the numeric array", "id": "f12410:m6"} {"signature": "def write_var_array(fd, array, name=''):", "body": "header, array = guess_header(array, name)mc = header['']if mc in numeric_class_etypes:return write_numeric_array(fd, header, array)elif mc == '':return write_char_array(fd, header, array)elif mc == '':return write_cell_array(fd, header, array)elif mc == '':return write_struct_array(fd, header, array)else:raise ValueError(''.format(mc))", "docstring": "Write variable array (of any supported type)", "id": "f12410:m10"} {"signature": "def guess_header(array, name=''):", "body": "header = {}if isinstance(array, Sequence) and len(array) == :array = array[]if isinstance(array, basestring):header.update({'': '', '': '','': ( if len(array) > else , len(array))})elif isinstance(array, Sequence) and len(array) == :header.update({'': '', '': '', '': (, )})elif isinstance(array, Mapping):field_types = [type(j) for j in array.values()]field_lengths = [ if isinstance(j, (basestring, int, float))else len(j) for j in array.values()]if len(field_lengths) == :equal_lengths = Trueequal_types = Trueelse:equal_lengths = not any(diff(field_lengths))equal_types = all([field_types[] == f for f in field_types])header.update({'': '','': (,field_lengths[] if equal_lengths and equal_types else )})elif isinstance(array, int):header.update({'': '', '': '', '': (, )})elif isinstance(array, float):header.update({'': '', '': '', '': (, )})elif isinstance(array, Sequence):if isarray(array, lambda i: isinstance(i, int), ):header.update({'': '', '': '','': (, len(array))})elif isarray(array, lambda i: isinstance(i, (int, float)), ):header.update({'': '', '': '','': (, len(array))})elif (isarray(array, lambda i: isinstance(i, Sequence), ) andany(diff(len(s) for s in array))):header.update({'': '','': (, len(array))})elif isarray(array, lambda i: isinstance(i, basestring), ):header.update({'': '', '': '','': (len(array), len(array[]))})elif isarray(array, lambda i: isinstance(i, Sequence), ):if any(diff(len(j) for j in array)):header.update({'': '','': (len(array), len(array[]))})elif isarray(array, lambda i: isinstance(i, int)):header.update({'': '', '': '','': (len(array), len(array[]))})elif isarray(array, lambda i: isinstance(i, (int, float))):header.update({'': '','': '','': (len(array), len(array[]))})elif isarray(array, lambda i: isinstance(i, (int, float, basestring, Sequence, Mapping))):header.update({'': '','': (, len(array))})if not header:raise ValueError('''')header[''] = namereturn header, array", "docstring": "Guess the array header information.\n Returns a header dict, with class, data type, and size information.", "id": "f12410:m12"} {"signature": "def savemat(filename, data):", "body": "if not isinstance(data, Mapping):raise ValueError('')if isinstance(filename, basestring):fd = open(filename, '')else:fd = filenamewrite_file_header(fd)for name, array in data.items():write_compressed_var_array(fd, array, name)fd.close()", "docstring": "Save data to MAT-file:\n\n savemat(filename, data)\n\n The filename argument is either a string with the filename, or\n a file like object.\n\n The parameter ``data`` shall be a dict with the variables.\n\n A ``ValueError`` exception is raised if data has invalid format, or if the\n data structure cannot be mapped to a known MAT array type.", "id": "f12410:m13"} {"signature": "def __init__(self,lex_optimize=True,yacc_optimize=True,tabfile='',yacc_debug=False,scope=None,outputdir=tempfile.gettempdir(),importlvl=,verbose=False,fail_with_exc=False):", "body": "self.verbose = verboseself.importlvl = importlvlself.lex = lexer.LessLexer()if not tabfile:tabfile = ''self.ignored = ('', '', '')self.tokens = [t for t in self.lex.tokens if t not in self.ignored]self.parser = ply.yacc.yacc(module=self,start='',debug=yacc_debug,optimize=yacc_optimize,tabmodule=tabfile,outputdir=outputdir)self.scope = scope if scope else Scope()self.stash = {}self.result = Noneself.target = Noneself.fail_with_exc = fail_with_excif fail_with_exc:self.register = ErrorRegister()else:self.register = PrintErrorRegister()", "docstring": "Parser object\n\n Kwargs:\n lex_optimize (bool): Optimize lexer\n yacc_optimize (bool): Optimize parser\n tabfile (str): Yacc tab filename\n yacc_debug (bool): yacc debug mode\n scope (Scope): Inherited scope\n outputdir (str): Output (debugging)\n importlvl (int): Import depth\n verbose (bool): Verbose mode\n fail_with_exc (bool): Throw exception on syntax error instead\n of printing to stderr", "id": "f12427:c2:m0"} {"signature": "def parse(self, filename=None, file=None, debuglevel=):", "body": "self.scope.push()if not file:file = filenameelse:if hasattr(file, ''):if filename is not None:raise AssertionError('')filename = file.nameelse:filename = ''self.target = filenameif self.verbose and not self.fail_with_exc:print('' % filename, file=sys.stderr)self.result = self.parser.parse(file, lexer=self.lex, debug=debuglevel)self.post_parse()self.register.close()", "docstring": "Parse file.\n kwargs:\n filename (str): File to parse\n debuglevel (int): Parser debuglevel", "id": "f12427:c2:m1"} {"signature": "def post_parse(self):", "body": "if self.result:out = []for pu in self.result:try:out.append(pu.parse(self.scope))except SyntaxError as e:self.handle_error(e, )self.result = list(utility.flatten(out))", "docstring": "Post parse cycle. nodejs version allows calls to mixins\n not yet defined or known to the parser. We defer all calls\n to mixins until after first cycle when all names are known.", "id": "f12427:c2:m2"} {"signature": "def scopemap(self):", "body": "utility.debug_print(self.result)", "docstring": "Output scopemap.", "id": "f12427:c2:m3"} {"signature": "def p_tunit(self, p):", "body": "p[] = [u for u in p[] if u]", "docstring": "tunit : unit_list", "id": "f12427:c2:m4"} {"signature": "def p_unit_list(self, p):", "body": "if isinstance(p[], list):if len(p) >= :if isinstance(p[], list):p[].extend(p[])else:p[].append(p[])else:p[] = [p[]]p[] = p[]", "docstring": "unit_list : unit_list unit\n | unit", "id": "f12427:c2:m5"} {"signature": "def p_unit(self, p):", "body": "p[] = p[]", "docstring": "unit : statement\n | variable_decl\n | block_decl\n | mixin_decl\n | call_mixin\n | import_statement", "id": "f12427:c2:m6"} {"signature": "def p_statement_aux(self, p):", "body": "p[] = Statement(list(p)[:], p.lineno())p[].parse(None)", "docstring": "statement : css_charset t_ws css_string t_semicolon\n | css_namespace t_ws css_string t_semicolon", "id": "f12427:c2:m7"} {"signature": "def p_statement_namespace(self, p):", "body": "p[] = Statement(list(p)[:], p.lineno())p[].parse(None)", "docstring": "statement : css_namespace t_ws word css_string t_semicolon", "id": "f12427:c2:m8"} {"signature": "def p_statement_import(self, p):", "body": "if self.importlvl > :raise ImportError('')if isinstance(p[], string_types):ipath = utility.destring(p[])elif isinstance(p[], list):p[] = Import(p[], p.lineno()).parse(self.scope)ipath = utility.destring(p[])elif isinstance(p[], Call):p[] = p[].parse(self.scope) ipath = utility.destring(p[][:-])fn, fe = os.path.splitext(ipath)if not fe or fe.lower() == '':try:cpath = os.path.dirname(os.path.abspath(self.target))if not fe:ipath += ''filename = \"\" % (cpath, os.sep, ipath)if os.path.exists(filename):recurse = LessParser(importlvl=self.importlvl + ,verbose=self.verbose,scope=self.scope)recurse.parse(filename=filename, debuglevel=)p[] = recurse.resultelse:err = \"\" % filenameself.handle_error(err, p.lineno(), '')p[] = Noneexcept ImportError as e:self.handle_error(e, p)else:p[] = Statement(list(p)[:], p.lineno())p[].parse(None)sys.stdout.flush()", "docstring": "import_statement : css_import t_ws string t_semicolon\n | css_import t_ws css_string t_semicolon\n | css_import t_ws css_string media_query_list t_semicolon\n | css_import t_ws fcall t_semicolon\n | css_import t_ws fcall media_query_list t_semicolon", "id": "f12427:c2:m9"} {"signature": "def p_block(self, p):", "body": "p[] = Block(list(p)[:-], p.lineno())self.scope.pop()self.scope.add_block(p[])", "docstring": "block_decl : block_open declaration_list brace_close", "id": "f12427:c2:m10"} {"signature": "def p_block_replace(self, p):", "body": "m = p[].parse(None)block = self.scope.blocks(m.raw())if block:p[] = block.copy_inner(self.scope)else:p[] = Deferred(p[], None, p.lineno())", "docstring": "block_decl : identifier t_semicolon", "id": "f12427:c2:m11"} {"signature": "def p_block_open(self, p):", "body": "try:p[].parse(self.scope)except SyntaxError:passp[] = p[]self.scope.current = p[]", "docstring": "block_open : identifier brace_open", "id": "f12427:c2:m12"} {"signature": "def p_block_open_media_query(self, p):", "body": "p[] = Identifier(p[]).parse(self.scope)", "docstring": "block_open : media_query_decl brace_open", "id": "f12427:c2:m13"} {"signature": "def p_font_face_open(self, p):", "body": "p[] = Identifier([p[], p[]]).parse(self.scope)", "docstring": "block_open : css_font_face t_ws brace_open", "id": "f12427:c2:m14"} {"signature": "def p_keyframe_open(self, p):", "body": "p[] = KeyframeSelector([p[]]).parse(self.scope)", "docstring": "block_open : css_keyframe_selector brace_open\n | number brace_open", "id": "f12427:c2:m15"} {"signature": "def p_mixin(self, p):", "body": "self.scope.add_mixin(Mixin(list(p)[:], p.lineno()).parse(self.scope))self.scope.pop()p[] = None", "docstring": "mixin_decl : open_mixin declaration_list brace_close", "id": "f12427:c2:m16"} {"signature": "def p_open_mixin(self, p):", "body": "p[].parse(self.scope)self.scope.current = p[]p[] = [p[], p[]]if len(p) > :p[].append(p[])else:p[].append(None)", "docstring": "open_mixin : identifier t_popen mixin_args_list t_pclose brace_open\n | identifier t_popen mixin_args_list t_pclose mixin_guard brace_open", "id": "f12427:c2:m17"} {"signature": "def p_mixin_guard(self, p):", "body": "p[] = p[]", "docstring": "mixin_guard : less_when mixin_guard_cond_list", "id": "f12427:c2:m18"} {"signature": "def p_mixin_guard_cond_list_aux(self, p):", "body": "p[].append(p[])p[].append(p[])p[] = p[]", "docstring": "mixin_guard_cond_list : mixin_guard_cond_list t_comma mixin_guard_cond\n | mixin_guard_cond_list less_and mixin_guard_cond", "id": "f12427:c2:m19"} {"signature": "def p_mixin_guard_cond_list(self, p):", "body": "p[] = [p[]]", "docstring": "mixin_guard_cond_list : mixin_guard_cond", "id": "f12427:c2:m20"} {"signature": "def p_mixin_guard_cond_rev(self, p):", "body": "p[] = utility.reverse_guard(list(p)[:-])", "docstring": "mixin_guard_cond : less_not t_popen argument mixin_guard_cmp argument t_pclose\n | less_not t_popen argument t_pclose", "id": "f12427:c2:m21"} {"signature": "def p_mixin_guard_cond(self, p):", "body": "p[] = list(p)[:-]", "docstring": "mixin_guard_cond : t_popen argument mixin_guard_cmp argument t_pclose\n | t_popen argument t_pclose", "id": "f12427:c2:m22"} {"signature": "def p_mixin_guard_cmp(self, p):", "body": "p[] = ''.join(list(p)[:])", "docstring": "mixin_guard_cmp : '>'\n | '<'\n | '='\n | '>' '='\n | '=' '<'", "id": "f12427:c2:m23"} {"signature": "def p_call_mixin(self, p):", "body": "p[].parse(None)p[] = Deferred(p[], p[], p.lineno())", "docstring": "call_mixin : identifier t_popen mixin_args_list t_pclose t_semicolon", "id": "f12427:c2:m24"} {"signature": "def p_mixin_args_arguments(self, p):", "body": "p[] = [p[]]", "docstring": "mixin_args_list : less_arguments", "id": "f12427:c2:m25"} {"signature": "def p_mixin_args_list_aux(self, p):", "body": "p[].extend([p[]])p[] = p[]", "docstring": "mixin_args_list : mixin_args_list t_comma mixin_args\n | mixin_args_list t_semicolon mixin_args", "id": "f12427:c2:m26"} {"signature": "def p_mixin_args_list(self, p):", "body": "p[] = [p[]]", "docstring": "mixin_args_list : mixin_args", "id": "f12427:c2:m27"} {"signature": "def p_mixin_args_aux(self, p):", "body": "p[].extend(list(p)[:])p[] = p[]", "docstring": "mixin_args : mixin_args argument", "id": "f12427:c2:m28"} {"signature": "def p_mixin_args(self, p):", "body": "p[] = [p[]]", "docstring": "mixin_args : argument\n | mixin_kwarg", "id": "f12427:c2:m29"} {"signature": "def p_mixin_args_empty(self, p):", "body": "p[] = None", "docstring": "mixin_args : empty", "id": "f12427:c2:m30"} {"signature": "def p_mixin_kwarg(self, p):", "body": "p[] = Variable(list(p)[:], p.lineno())", "docstring": "mixin_kwarg : variable t_colon mixin_kwarg_arg_list", "id": "f12427:c2:m31"} {"signature": "def p_margument_list_aux(self, p):", "body": "p[].extend(list(p)[:])p[] = p[]", "docstring": "mixin_kwarg_arg_list : mixin_kwarg_arg_list argument", "id": "f12427:c2:m32"} {"signature": "def p_margument_list(self, p):", "body": "p[] = [p[]]", "docstring": "mixin_kwarg_arg_list : argument", "id": "f12427:c2:m33"} {"signature": "def p_declaration_list(self, p):", "body": "if len(p) > :p[].extend(p[])p[] = p[]", "docstring": "declaration_list : declaration_list declaration\n | declaration\n | empty", "id": "f12427:c2:m34"} {"signature": "def p_declaration(self, p):", "body": "p[] = p[] if isinstance(p[], list) else [p[]]", "docstring": "declaration : variable_decl\n | property_decl\n | block_decl\n | mixin_decl\n | call_mixin\n | import_statement", "id": "f12427:c2:m35"} {"signature": "def p_variable_decl(self, p):", "body": "p[] = Variable(list(p)[:-], p.lineno())p[].parse(self.scope)", "docstring": "variable_decl : variable t_colon style_list t_semicolon", "id": "f12427:c2:m36"} {"signature": "def p_property_decl(self, p):", "body": "l = len(p)p[] = Property(list(p)[:-], p.lineno(l - ))", "docstring": "property_decl : prop_open style_list t_semicolon\n | prop_open style_list css_important t_semicolon\n | prop_open empty t_semicolon", "id": "f12427:c2:m37"} {"signature": "def p_property_decl_arguments(self, p):", "body": "p[] = Property([p[], [p[]]], p.lineno())", "docstring": "property_decl : prop_open less_arguments t_semicolon", "id": "f12427:c2:m38"} {"signature": "def p_prop_open_ie_hack(self, p):", "body": "p[] = (p[][], p[][])", "docstring": "prop_open : '*' prop_open", "id": "f12427:c2:m39"} {"signature": "def p_prop_open(self, p):", "body": "p[] = (p[][], '')", "docstring": "prop_open : property t_colon\n | vendor_property t_colon\n | word t_colon", "id": "f12427:c2:m40"} {"signature": "def p_style_list_aux(self, p):", "body": "p[].extend(list(p)[:])p[] = p[]", "docstring": "style_list : style_list style\n | style_list t_comma style\n | style_list t_ws style", "id": "f12427:c2:m41"} {"signature": "def p_style_list(self, p):", "body": "p[] = [p[]]", "docstring": "style_list : style", "id": "f12427:c2:m42"} {"signature": "def p_style(self, p):", "body": "p[] = p[]", "docstring": "style : expression\n | string\n | word\n | property\n | vendor_property\n | estring", "id": "f12427:c2:m43"} {"signature": "def p_identifier(self, p):", "body": "p[] = Identifier(p[], )", "docstring": "identifier : identifier_list\n | page\n | page filter", "id": "f12427:c2:m44"} {"signature": "def p_identifier_istr(self, p):", "body": "p[] = Identifier(Call([p[], p[]]), )", "docstring": "identifier : t_popen estring t_pclose", "id": "f12427:c2:m45"} {"signature": "def p_identifier_list_aux(self, p):", "body": "p[].extend([p[]])p[].extend(p[])p[] = p[]", "docstring": "identifier_list : identifier_list t_comma identifier_group", "id": "f12427:c2:m46"} {"signature": "def p_identifier_list(self, p):", "body": "p[] = p[]", "docstring": "identifier_list : identifier_group", "id": "f12427:c2:m47"} {"signature": "def p_identifier_list_keyframe(self, p):", "body": "p[] = list(p)[:]", "docstring": "identifier_list : css_keyframes t_ws css_ident\n | css_keyframes t_ws css_ident t_ws", "id": "f12427:c2:m48"} {"signature": "def p_identifier_list_viewport(self, p):", "body": "p[] = list(p)[:]", "docstring": "identifier_list : css_viewport\n | css_viewport t_ws", "id": "f12427:c2:m49"} {"signature": "def p_identifier_group_op(self, p):", "body": "p[].extend([p[]])if len(p) > :p[].extend(p[])p[] = p[]", "docstring": "identifier_group : identifier_group child_selector ident_parts\n | identifier_group '+' ident_parts\n | identifier_group general_sibling_selector ident_parts\n | identifier_group '*'", "id": "f12427:c2:m50"} {"signature": "def p_identifier_group(self, p):", "body": "p[] = p[]", "docstring": "identifier_group : ident_parts", "id": "f12427:c2:m51"} {"signature": "def p_ident_parts_aux(self, p):", "body": "if isinstance(p[], list):p[].extend(p[])else:p[].append(p[])p[] = p[]", "docstring": "ident_parts : ident_parts ident_part\n | ident_parts filter_group", "id": "f12427:c2:m52"} {"signature": "def p_ident_parts(self, p):", "body": "if not isinstance(p[], list):p[] = [p[]]p[] = p[]", "docstring": "ident_parts : ident_part\n | selector\n | filter_group", "id": "f12427:c2:m53"} {"signature": "def p_media_query_decl(self, p):", "body": "p[] = list(p)[:]", "docstring": "media_query_decl : css_media t_ws\n | css_media t_ws media_query_list", "id": "f12427:c2:m54"} {"signature": "def p_media_query_list_aux(self, p):", "body": "p[] = list(p)[:]", "docstring": "media_query_list : media_query_list t_comma media_query", "id": "f12427:c2:m55"} {"signature": "def p_media_query_list(self, p):", "body": "p[] = [p[]]", "docstring": "media_query_list : media_query", "id": "f12427:c2:m56"} {"signature": "def p_media_query_a(self, p):", "body": "p[] = list(p)[:]", "docstring": "media_query : media_type\n | media_type media_query_expression_list\n | not media_type\n | not media_type media_query_expression_list\n | only media_type\n | only media_type media_query_expression_list", "id": "f12427:c2:m57"} {"signature": "def p_media_query_b(self, p):", "body": "p[] = list(p)[:]", "docstring": "media_query : media_query_expression media_query_expression_list\n | media_query_expression", "id": "f12427:c2:m58"} {"signature": "def p_media_query_expression_list_aux(self, p):", "body": "p[] = list(p)[:]", "docstring": "media_query_expression_list : media_query_expression_list and media_query_expression\n | and media_query_expression", "id": "f12427:c2:m59"} {"signature": "def p_media_query_expression(self, p):", "body": "p[] = list(p)[:]", "docstring": "media_query_expression : t_popen css_media_feature t_pclose\n | t_popen css_media_feature t_colon media_query_value t_pclose", "id": "f12427:c2:m60"} {"signature": "def p_media_query_value(self, p):", "body": "if utility.is_variable(p[]):var = self.scope.variables(''.join(p[]))if var:value = var.value[]if hasattr(value, ''):p[] = value.parse(self.scope)else:p[] = valueif isinstance(p[], Expression):p[] = p[].parse(self.scope)else:p[] = p[]", "docstring": "media_query_value : number\n | variable\n | word\n | color\n | expression", "id": "f12427:c2:m61"} {"signature": "def p_selector(self, p):", "body": "p[] = p[]", "docstring": "selector : '*'\n | '+'\n | child_selector\n | general_sibling_selector", "id": "f12427:c2:m62"} {"signature": "def p_ident_part(self, p):", "body": "p[] = p[]", "docstring": "ident_part : iclass\n | id\n | dom\n | combinator\n | color", "id": "f12427:c2:m63"} {"signature": "def p_ident_part_aux(self, p):", "body": "p[] = [p[], p[]]", "docstring": "ident_part : combinator vendor_property", "id": "f12427:c2:m64"} {"signature": "def p_filter_group_aux(self, p):", "body": "p[].extend(p[])p[] = p[]", "docstring": "filter_group : filter_group filter", "id": "f12427:c2:m65"} {"signature": "def p_filter_group(self, p):", "body": "p[] = p[]", "docstring": "filter_group : filter", "id": "f12427:c2:m66"} {"signature": "def p_filter(self, p):", "body": "p[] = list(p)[:]", "docstring": "filter : css_filter\n | css_filter t_ws\n | t_colon word\n | t_colon vendor_property\n | t_colon vendor_property t_ws\n | t_colon css_property\n | t_colon css_property t_ws\n | t_colon css_filter\n | t_colon css_filter t_ws\n | t_colon t_colon word\n | t_colon t_colon vendor_property", "id": "f12427:c2:m67"} {"signature": "def p_ms_filter(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "ms_filter : css_ms_filter\n | css_ms_filter t_ws", "id": "f12427:c2:m68"} {"signature": "def p_fcall(self, p):", "body": "p[] = Call(list(p)[:], )", "docstring": "fcall : word t_popen argument_list t_pclose\n | property t_popen argument_list t_pclose\n | vendor_property t_popen argument_list t_pclose\n | less_open_format argument_list t_pclose\n | ms_filter t_popen argument_list t_pclose", "id": "f12427:c2:m69"} {"signature": "def p_argument_list_empty(self, p):", "body": "p[] = ''", "docstring": "argument_list : empty", "id": "f12427:c2:m70"} {"signature": "def p_argument_list_aux(self, p):", "body": "p[].extend(list(p)[:])p[] = p[]", "docstring": "argument_list : argument_list argument\n | argument_list t_comma argument", "id": "f12427:c2:m71"} {"signature": "def p_argument_list(self, p):", "body": "p[] = [p[]]", "docstring": "argument_list : argument", "id": "f12427:c2:m72"} {"signature": "def p_argument(self, p):", "body": "p[] = p[]", "docstring": "argument : expression\n | string\n | estring\n | word\n | id\n | css_uri\n | '='\n | fcall", "id": "f12427:c2:m73"} {"signature": "def p_expression_aux(self, p):", "body": "p[] = Expression(list(p)[:], )", "docstring": "expression : expression '+' expression\n | expression '-' expression\n | expression '/' expression\n | expression '*' expression\n | word '/' expression", "id": "f12427:c2:m74"} {"signature": "def p_expression_p_neg(self, p):", "body": "p[] = NegatedExpression([p[]], )", "docstring": "expression : '-' t_popen expression t_pclose", "id": "f12427:c2:m75"} {"signature": "def p_expression_p(self, p):", "body": "p[] = p[]", "docstring": "expression : t_popen expression t_pclose", "id": "f12427:c2:m76"} {"signature": "def p_expression(self, p):", "body": "p[] = p[]", "docstring": "expression : factor", "id": "f12427:c2:m77"} {"signature": "def p_factor(self, p):", "body": "p[] = p[]", "docstring": "factor : color\n | number\n | variable\n | css_dom\n | fcall", "id": "f12427:c2:m78"} {"signature": "def p_escaped_string(self, p):", "body": "p[] = p[]", "docstring": "estring : t_eopen style_list t_eclose\n | t_eopen identifier_list t_eclose", "id": "f12427:c2:m79"} {"signature": "def p_string_part(self, p):", "body": "p[] = p[]", "docstring": "string_part : variable\n | css_string", "id": "f12427:c2:m80"} {"signature": "def p_string_part_list_aux(self, p):", "body": "p[].extend([p[]])p[] = p[]", "docstring": "string_part_list : string_part_list string_part", "id": "f12427:c2:m81"} {"signature": "def p_string_part_list(self, p):", "body": "p[] = [p[]]", "docstring": "string_part_list : string_part", "id": "f12427:c2:m82"} {"signature": "def p_string_aux(self, p):", "body": "p[] = ['', p[], '']", "docstring": "string : t_isopen string_part_list t_isclose", "id": "f12427:c2:m83"} {"signature": "def p_string(self, p):", "body": "p[] = p[]", "docstring": "string : css_string", "id": "f12427:c2:m84"} {"signature": "def p_variable_neg(self, p):", "body": "p[] = ['', p[]]", "docstring": "variable : '-' variable", "id": "f12427:c2:m85"} {"signature": "def p_variable_strange(self, p):", "body": "p[] = p[]", "docstring": "variable : t_popen variable t_pclose", "id": "f12427:c2:m86"} {"signature": "def p_variable(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "variable : less_variable\n | less_variable t_ws", "id": "f12427:c2:m87"} {"signature": "def p_color(self, p):", "body": "try:p[] = Color().fmt(p[])if len(p) > :p[] = [p[], p[]]except ValueError:self.handle_error('' % p[], p.lineno(),'')p[] = p[]", "docstring": "color : css_color\n | css_color t_ws", "id": "f12427:c2:m88"} {"signature": "def p_number(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "number : css_number\n | css_number t_ws", "id": "f12427:c2:m89"} {"signature": "def p_dom(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "dom : css_dom\n | css_dom t_ws", "id": "f12427:c2:m90"} {"signature": "def p_word(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "word : css_ident\n | css_ident t_ws", "id": "f12427:c2:m91"} {"signature": "def p_class(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "class : css_class\n | css_class t_ws", "id": "f12427:c2:m92"} {"signature": "def p_interpolated_class_part(self, p):", "body": "p[] = list(p)[:]", "docstring": "iclass_part : less_variable\n | less_variable t_ws\n | class", "id": "f12427:c2:m93"} {"signature": "def p_interpolated_class_part_list_aux(self, p):", "body": "p[].extend([p[]])p[] = p[]", "docstring": "iclass_part_list : iclass_part_list iclass_part", "id": "f12427:c2:m94"} {"signature": "def p_interpolated_class_part_list(self, p):", "body": "p[] = [p[]]", "docstring": "iclass_part_list : iclass_part", "id": "f12427:c2:m95"} {"signature": "def p_interpolated_class(self, p):", "body": "p[] = p[]", "docstring": "iclass : iclass_part_list", "id": "f12427:c2:m96"} {"signature": "def p_id(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "id : css_id\n | css_id t_ws", "id": "f12427:c2:m97"} {"signature": "def p_property(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "property : css_property\n | css_property t_ws", "id": "f12427:c2:m98"} {"signature": "def p_page(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "page : css_page\n | css_page t_ws", "id": "f12427:c2:m99"} {"signature": "def p_vendor_property(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "vendor_property : css_vendor_property\n | css_vendor_property t_ws", "id": "f12427:c2:m100"} {"signature": "def p_media_type(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "media_type : css_media_type\n | css_media_type t_ws", "id": "f12427:c2:m101"} {"signature": "def p_combinator(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "combinator : '&' t_ws\n | '&'", "id": "f12427:c2:m102"} {"signature": "def p_child_selector(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "child_selector : '>' t_ws\n | '>'", "id": "f12427:c2:m103"} {"signature": "def p_general_sibling_selector(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "general_sibling_selector : t_tilde t_ws\n | t_tilde", "id": "f12427:c2:m104"} {"signature": "def p_scope_open(self, p):", "body": "self.scope.push()p[] = p[]", "docstring": "brace_open : t_bopen", "id": "f12427:c2:m105"} {"signature": "def p_scope_close(self, p):", "body": "p[] = p[]", "docstring": "brace_close : t_bclose", "id": "f12427:c2:m106"} {"signature": "def p_and(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "and : t_and t_ws\n | t_and", "id": "f12427:c2:m107"} {"signature": "def p_not(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "not : t_not t_ws\n | t_not", "id": "f12427:c2:m108"} {"signature": "def p_only(self, p):", "body": "p[] = tuple(list(p)[:])", "docstring": "only : t_only t_ws\n | t_only", "id": "f12427:c2:m109"} {"signature": "def p_empty(self, p):", "body": "pass", "docstring": "empty :", "id": "f12427:c2:m110"} {"signature": "def p_error(self, t):", "body": "if t:error_msg = \"\" %(self.target, t.lineno, t.type, t.value)self.register.register(error_msg)while True:t = self.lex.token()if not t or t.value == '':if len(self.scope) > :self.scope.pop()breakself.parser.restart()return t", "docstring": "Internal error handler\n args:\n t (Lex token): Error token", "id": "f12427:c2:m111"} {"signature": "def handle_error(self, e, line, t=''):", "body": "self.register.register(\"\" % (t, line, e))", "docstring": "Custom error handler\n args:\n e (Mixed): Exception or str\n line (int): line number\n t(str): Error type", "id": "f12427:c2:m112"} {"signature": "def t_css_ms_filter(self, t):", "body": "return t", "docstring": "r'(?:progid:|DX\\.)[^;\\(]*", "id": "f12428:c0:m2"} {"signature": "def t_t_bopen(self, t):", "body": "t.lexer.in_property_decl = Falsereturn t", "docstring": "r'\\{", "id": "f12428:c0:m3"} {"signature": "def t_t_bclose(self, t):", "body": "return t", "docstring": "r'\\}", "id": "f12428:c0:m4"} {"signature": "def t_t_colon(self, t):", "body": "return t", "docstring": "r':", "id": "f12428:c0:m5"} {"signature": "def t_t_comma(self, t):", "body": "t.lexer.in_property_decl = Falsereturn t", "docstring": "r',", "id": "f12428:c0:m6"} {"signature": "def t_css_number(self, t):", "body": "return t", "docstring": "r'-?(\\d*\\.\\d+|\\d+)(s|%|in|ex|[ecm]m|p[txc]|deg|g?rad|ms?|k?hz|dpi|dpcm|dppx)?", "id": "f12428:c0:m7"} {"signature": "def t_iselector_less_variable(self, t):", "body": "return t", "docstring": "r'@\\{[^@\\}]+\\}", "id": "f12428:c0:m9"} {"signature": "def t_iselector_t_eclose(self, t):", "body": "t.lexer.pop_state()return t", "docstring": "r'\"|\\", "id": "f12428:c0:m10"} {"signature": "def t_iselector_css_class(self, t):", "body": "return t", "docstring": "r'[_a-z0-9\\-]+", "id": "f12428:c0:m12"} {"signature": "def t_iselector_t_ws(self, t):", "body": "t.lexer.pop_state()t.value = ''return t", "docstring": "r'[ \\t\\f\\v]+", "id": "f12428:c0:m13"} {"signature": "def t_iselector_t_bopen(self, t):", "body": "t.lexer.pop_state()return t", "docstring": "r'\\{", "id": "f12428:c0:m14"} {"signature": "def t_iselector_t_colon(self, t):", "body": "t.lexer.pop_state()return t", "docstring": "r':", "id": "f12428:c0:m15"} {"signature": "def t_mediaquery_t_not(self, t):", "body": "return t", "docstring": "r'not", "id": "f12428:c0:m16"} {"signature": "def t_mediaquery_t_only(self, t):", "body": "return t", "docstring": "r'only", "id": "f12428:c0:m17"} {"signature": "def t_mediaquery_t_and(self, t):", "body": "return t", "docstring": "r'and", "id": "f12428:c0:m18"} {"signature": "def t_mediaquery_t_popen(self, t):", "body": "return t", "docstring": "r'\\(", "id": "f12428:c0:m19"} {"signature": "def t_mediaquery_t_bopen(self, t):", "body": "t.lexer.pop_state()return t", "docstring": "r'\\{", "id": "f12428:c0:m22"} {"signature": "def t_mediaquery_t_semicolon(self, t):", "body": "t.lexer.pop_state() t.lexer.pop_state() return t", "docstring": "r';", "id": "f12428:c0:m23"} {"signature": "def t_import_t_semicolon(self, t):", "body": "t.lexer.pop_state()return t", "docstring": "r';", "id": "f12428:c0:m25"} {"signature": "def t_less_variable(self, t):", "body": "v = t.value.lower()if v in reserved.tokens:t.type = reserved.tokens[v]if t.type == \"\":t.lexer.push_state(\"\")elif t.type == \"\":t.lexer.push_state(\"\")return t", "docstring": "r'@@?[\\w-]+|@\\{[^@\\}]+\\}", "id": "f12428:c0:m26"} {"signature": "def t_css_color(self, t):", "body": "return t", "docstring": "r'\\#[0-9]([0-9a-f]{5}|[0-9a-f]{2})", "id": "f12428:c0:m27"} {"signature": "def t_newline(self, t):", "body": "t.lexer.lineno += t.value.count('')", "docstring": "r'[\\n\\r]+", "id": "f12428:c0:m30"} {"signature": "def t_css_comment(self, t):", "body": "t.lexer.lineno += t.value.count('')pass", "docstring": "r'(/\\*(.|\\n|\\r)*?\\*/)", "id": "f12428:c0:m31"} {"signature": "def t_less_comment(self, t):", "body": "pass", "docstring": "r'//.*", "id": "f12428:c0:m32"} {"signature": "def t_css_important(self, t):", "body": "t.value = ''return t", "docstring": "r'!\\s*important", "id": "f12428:c0:m33"} {"signature": "def t_t_ws(self, t):", "body": "t.value = ''return t", "docstring": "r'[ \\t\\f\\v]+", "id": "f12428:c0:m34"} {"signature": "def t_t_popen(self, t):", "body": "t.lexer.push_state('')return t", "docstring": "r'\\(", "id": "f12428:c0:m35"} {"signature": "def t_less_open_format(self, t):", "body": "t.lexer.push_state('')return t", "docstring": "r'%\\(", "id": "f12428:c0:m36"} {"signature": "def t_parn_t_pclose(self, t):", "body": "t.lexer.pop_state()return t", "docstring": "r'\\)", "id": "f12428:c0:m37"} {"signature": "def t_t_pclose(self, t):", "body": "return t", "docstring": "r'\\)", "id": "f12428:c0:m38"} {"signature": "def t_t_semicolon(self, t):", "body": "t.lexer.in_property_decl = Falsereturn t", "docstring": "r';", "id": "f12428:c0:m39"} {"signature": "def t_t_eopen(self, t):", "body": "if t.value[] == '':t.lexer.push_state('')elif t.value[] == '':t.lexer.push_state('')return t", "docstring": "r'~\"|~\\", "id": "f12428:c0:m40"} {"signature": "def t_t_tilde(self, t):", "body": "return t", "docstring": "r'~", "id": "f12428:c0:m41"} {"signature": "def t_escapequotes_less_variable(self, t):", "body": "return t", "docstring": "r'@\\{[^@\"\\}]+\\}", "id": "f12428:c0:m42"} {"signature": "def t_escapeapostrophe_less_variable(self, t):", "body": "return t", "docstring": "r'@\\{[^@\\'\\}]+\\}", "id": "f12428:c0:m43"} {"signature": "def t_escapequotes_t_eclose(self, t):", "body": "t.lexer.pop_state()return t", "docstring": "r", "id": "f12428:c0:m44"} {"signature": "def t_escapeapostrophe_t_eclose(self, t):", "body": "t.lexer.pop_state()return t", "docstring": "r'\\", "id": "f12428:c0:m45"} {"signature": "def t_css_string(self, t):", "body": "t.lexer.lineno += t.value.count('')return t", "docstring": "r'\"[^\"@]*\"|\\'[^\\'@]*\\", "id": "f12428:c0:m46"} {"signature": "def t_t_isopen(self, t):", "body": "if t.value[] == '':t.lexer.push_state('')elif t.value[] == '':t.lexer.push_state('')return t", "docstring": "r'\"|\\", "id": "f12428:c0:m47"} {"signature": "def t_istringquotes_less_variable(self, t):", "body": "return t", "docstring": "r'@\\{[^@\"\\}]+\\}", "id": "f12428:c0:m48"} {"signature": "def t_istringapostrophe_less_variable(self, t):", "body": "return t", "docstring": "r'@\\{[^@\\'\\}]+\\}", "id": "f12428:c0:m49"} {"signature": "def t_istringapostrophe_css_string(self, t):", "body": "t.lexer.lineno += t.value.count('')return t", "docstring": "r'[^\\'@]+", "id": "f12428:c0:m50"} {"signature": "def t_istringquotes_css_string(self, t):", "body": "t.lexer.lineno += t.value.count('')return t", "docstring": "r'[^\"@]+", "id": "f12428:c0:m51"} {"signature": "def t_istringquotes_t_isclose(self, t):", "body": "t.lexer.pop_state()return t", "docstring": "r", "id": "f12428:c0:m52"} {"signature": "def t_istringapostrophe_t_isclose(self, t):", "body": "t.lexer.pop_state()return t", "docstring": "r'\\", "id": "f12428:c0:m53"} {"signature": "def file(self, filename):", "body": "with open(filename) as f:self.lexer.input(f.read())return self", "docstring": "Lex file.", "id": "f12428:c0:m56"} {"signature": "def input(self, file):", "body": "if isinstance(file, string_types):with open(file) as f:self.lexer.input(f.read())else:self.lexer.input(file.read())", "docstring": "Load lexer with content from `file` which can be a path or a file\nlike object.", "id": "f12428:c0:m57"} {"signature": "def token(self):", "body": "if self.next_:t = self.next_self.next_ = Nonereturn twhile True:t = self.lexer.token()if not t:return tif t.type == '' and (self.pretok or(self.last and self.last.type not in self.significant_ws)):continueself.pretok = Falseif t.type == '' and self.last and self.last.type not in ['', ''] and self.last.type != ''and not (hasattr(t, '') and (t.lexer.lexstate == '' or t.lexer.lexstate == '')):self.next_ = ttok = lex.LexToken()tok.type = ''tok.value = ''tok.lineno = t.linenotok.lexpos = t.lexposself.last = tokself.lexer.in_property_decl = Falsereturn tokself.last = tbreakreturn t", "docstring": "Token function. Contains 2 hacks:\n 1. Injects ';' into blocks where the last property\n leaves out the ;\n 2. Strips out whitespace from nonsignificant locations\n to ease parsing.", "id": "f12428:c0:m58"} {"signature": "def flatten(lst):", "body": "for elm in lst:if isinstance(elm, collections.Iterable) and not isinstance(elm, string_types):for sub in flatten(elm):yield subelse:yield elm", "docstring": "Flatten list.\n Args:\n lst (list): List to flatten\n Returns:\n generator", "id": "f12430:m0"} {"signature": "def pairwise(lst):", "body": "if not lst:returnlength = len(lst)for i in range(length - ):yield lst[i], lst[i + ]yield lst[-], None", "docstring": "yield item i and item i+1 in lst. e.g.\n (lst[0], lst[1]), (lst[1], lst[2]), ..., (lst[-1], None)\n Args:\n lst (list): List to process\n Returns:\n list", "id": "f12430:m1"} {"signature": "def rename(blocks, scope, stype):", "body": "for p in blocks:if isinstance(p, stype):p.tokens[].parse(scope)if p.tokens[]:scope.push()scope.current = p.tokens[]rename(p.tokens[], scope, stype)scope.pop()", "docstring": "Rename all sub-blocks moved under another\n block. (mixins)\n Args:\n lst (list): block list\n scope (object): Scope object", "id": "f12430:m2"} {"signature": "def blocksearch(block, name):", "body": "if hasattr(block, ''):for b in block.tokens[]:b = (b if hasattr(b, '') and b.raw() == name else blocksearch(b, name))if b:return breturn False", "docstring": "Recursive search for name in block (inner blocks)\n Args:\n name (str): search term\n Returns:\n Block OR False", "id": "f12430:m3"} {"signature": "def reverse_guard(lst):", "body": "rev = {'': '', '>': '', '': '', '': '>'}return [rev[l] if l in rev else l for l in lst]", "docstring": "Reverse guard expression. not\n (@a > 5) -> (@a =< 5)\n Args:\n lst (list): Expression\n returns:\n list", "id": "f12430:m4"} {"signature": "def debug_print(lst, lvl=):", "body": "pad = ''.join([''] * lvl)t = type(lst)if t is list:for p in lst:debug_print(p, lvl)elif hasattr(lst, ''):print(pad, t)debug_print(list(flatten(lst.tokens)), lvl + )", "docstring": "Print scope tree\n args:\n lst (list): parse result\n lvl (int): current nesting level", "id": "f12430:m5"} {"signature": "def destring(value):", "body": "return value.strip('')", "docstring": "Strip quotes from string\n args:\n value (str)\n returns:\n str", "id": "f12430:m6"} {"signature": "def analyze_number(var, err=''):", "body": "n, u = split_unit(var)if not isinstance(var, string_types):return (var, u)if is_color(var):return (var, '')if is_int(n):n = int(n)elif is_float(n):n = float(n)else:raise SyntaxError('' % (err, var))return (n, u)", "docstring": "Analyse number for type and split from unit\n 1px -> (q, 'px')\n args:\n var (str): number string\n kwargs:\n err (str): Error message\n raises:\n SyntaxError\n returns:\n tuple", "id": "f12430:m7"} {"signature": "def with_unit(number, unit=None):", "body": "if isinstance(number, tuple):number, unit = numberif number == :return ''if unit:number = str(number)if number.startswith(''):number = '' + numberreturn \"\" % (number, unit)return number if isinstance(number, string_types) else str(number)", "docstring": "Return number with unit\n args:\n number (mixed): Number\n unit (str): Unit\n returns:\n str", "id": "f12430:m8"} {"signature": "def is_color(value):", "body": "if not value or not isinstance(value, string_types):return Falseif value[] == '' and len(value) in [, , , ]:try:int(value[:], )return Trueexcept ValueError:passreturn False", "docstring": "Is string CSS color\n args:\n value (str): string\n returns:\n bool", "id": "f12430:m9"} {"signature": "def is_variable(value):", "body": "if isinstance(value, string_types):return (value.startswith('') or value.startswith(''))elif isinstance(value, tuple):value = ''.join(value)return (value.startswith('') or value.startswith(''))return False", "docstring": "Check if string is LESS variable\n args:\n value (str): string\n returns:\n bool", "id": "f12430:m10"} {"signature": "def is_int(value):", "body": "try:int(str(value))return Trueexcept (ValueError, TypeError):passreturn False", "docstring": "Is value integer\n args:\n value (str): string\n returns:\n bool", "id": "f12430:m11"} {"signature": "def is_float(value):", "body": "if not is_int(value):try:float(str(value))return Trueexcept (ValueError, TypeError):passreturn False", "docstring": "Is value float\n args:\n value (str): string\n returns:\n bool", "id": "f12430:m12"} {"signature": "def split_unit(value):", "body": "r = re.search('', str(value))return r.groups() if r else ('', '')", "docstring": "Split a number from its unit\n 1px -> (q, 'px')\n Args:\n value (str): input\n returns:\n tuple", "id": "f12430:m13"} {"signature": "def away_from_zero_round(value, ndigits=):", "body": "if sys.version_info[] >= :p = **ndigitsreturn float(math.floor((value * p) + math.copysign(, value))) / pelse:return round(value, ndigits)", "docstring": "Round half-way away from zero.\n\n Python2's round() method.", "id": "f12430:m14"} {"signature": "def convergent_round(value, ndigits=):", "body": "if sys.version_info[] < :if value < :return -convergent_round(-value)epsilon = integral_part, _ = divmod(value, )if abs(value - (integral_part + )) < epsilon:if integral_part % < epsilon:return integral_partelse:nearest_even = integral_part + return math.ceil(nearest_even)return round(value, ndigits)", "docstring": "Convergent rounding.\n\n Round to neareas even, similar to Python3's round() method.", "id": "f12430:m15"} {"signature": "def pc_or_float(s):", "body": "if isinstance(s, string_types) and '' in s:return float(s.strip('')) / return float(s)", "docstring": "Utility function to process strings that contain either percentiles or floats\n args:\n str: s\n returns:\n float", "id": "f12430:m16"} {"signature": "def permutations_with_replacement(iterable, r=None):", "body": "pool = tuple(iterable)n = len(pool)r = n if r is None else rfor indices in itertools.product(range(n), repeat=r):yield list(pool[i] for i in indices)", "docstring": "Return successive r length permutations of elements in the iterable.\n\n Similar to itertools.permutation but withouth repeated values filtering.", "id": "f12430:m17"} {"signature": "def __init__(self, init=False):", "body": "super(Scope, self).__init__()self._mixins = {}if init:self.push()self.deferred = Falseself.real = []", "docstring": "Scope\n Args:\n init (bool): Initiate scope", "id": "f12432:c0:m0"} {"signature": "def push(self):", "body": "self.append({'': {},'': [],'': [],'': None})", "docstring": "Push level on scope", "id": "f12432:c0:m1"} {"signature": "@propertydef scopename(self):", "body": "return [r[''] for r in self if r['']]", "docstring": "Current scope name as list\n Returns:\n list", "id": "f12432:c0:m4"} {"signature": "def add_block(self, block):", "body": "self[-][''].append(block)self[-][''].append(block.raw())", "docstring": "Add block element to scope\n Args:\n block (Block): Block object", "id": "f12432:c0:m5"} {"signature": "def remove_block(self, block, index=\"\"):", "body": "self[index][\"\"].remove(block)self[index][\"\"].remove(block.raw())", "docstring": "Remove block element from scope\n Args:\n block (Block): Block object", "id": "f12432:c0:m6"} {"signature": "def add_mixin(self, mixin):", "body": "raw = mixin.tokens[][].raw()if raw in self._mixins:self._mixins[raw].append(mixin)else:self._mixins[raw] = [mixin]", "docstring": "Add mixin to scope\n Args:\n mixin (Mixin): Mixin object", "id": "f12432:c0:m7"} {"signature": "def add_variable(self, variable):", "body": "self[-][''][variable.name] = variable", "docstring": "Add variable to scope\n Args:\n variable (Variable): Variable object", "id": "f12432:c0:m8"} {"signature": "def variables(self, name):", "body": "if isinstance(name, tuple):name = name[]if name.startswith(''):name = '' + name[:-]i = len(self)while i >= :i -= if name in self[i]['']:return self[i][''][name]return False", "docstring": "Search for variable by name. Searches scope top down\n Args:\n name (string): Search term\n Returns:\n Variable object OR False", "id": "f12432:c0:m9"} {"signature": "def mixins(self, name):", "body": "m = self._smixins(name)if m:return mreturn self._smixins(name.replace('', ''))", "docstring": "Search mixins for name.\n Allow '>' to be ignored. '.a .b()' == '.a > .b()'\n Args:\n name (string): Search term\n Returns:\n Mixin object list OR False", "id": "f12432:c0:m10"} {"signature": "def _smixins(self, name):", "body": "return (self._mixins[name] if name in self._mixins else False)", "docstring": "Inner wrapper to search for mixins by name.", "id": "f12432:c0:m11"} {"signature": "def blocks(self, name):", "body": "b = self._blocks(name)if b:return breturn self._blocks(name.replace('', ''))", "docstring": "Search for defined blocks recursively.\nAllow '>' to be ignored. '.a .b' == '.a > .b'\nArgs:\n name (string): Search term\nReturns:\n Block object OR False", "id": "f12432:c0:m12"} {"signature": "def _blocks(self, name):", "body": "i = len(self)while i >= :i -= if name in self[i]['']:for b in self[i]['']:r = b.raw()if r and r == name:return belse:for b in self[i]['']:r = b.raw()if r and name.startswith(r):b = utility.blocksearch(b, name)if b:return breturn False", "docstring": "Inner wrapper to search for blocks by name.", "id": "f12432:c0:m13"} {"signature": "def update(self, scope, at=):", "body": "if hasattr(scope, '') and not at:self._mixins.update(scope._mixins)self[at][''].update(scope[at][''])self[at][''].extend(scope[at][''])self[at][''].extend(scope[at][''])", "docstring": "Update scope. Add another scope to this one.\n Args:\n scope (Scope): Scope object\n Kwargs:\n at (int): Level to update", "id": "f12432:c0:m14"} {"signature": "def swap(self, name):", "body": "if name.startswith(''):var = self.variables(name[:])if var is False:raise SyntaxError('' % name)name = '' + utility.destring(var.value[])var = self.variables(name)if var is False:raise SyntaxError('' % name)elif name.startswith(''):var = self.variables('' + name[:-])if var is False:raise SyntaxError('' % name)if isinstance(var.value[], string_types):var.value[] = utility.destring(var.value[])else:var = self.variables(name)if var is False:raise SyntaxError('' % name)return var.value", "docstring": "Swap variable name for variable value\n Args:\n name (str): Variable name\n Returns:\n Variable value (Mixed)", "id": "f12432:c0:m15"} {"signature": "def process(self, expression):", "body": "a, o, b = expressionc1 = self._hextorgb(a)c2 = self._hextorgb(b)r = ['']for i in range():v = self.operate(c1[i], c2[i], o)if v > :v = if v < :v = r.append(\"\" % int(v))return ''.join(r)", "docstring": "Process color expression\n args:\n expression (tuple): color expression\n returns:\n str", "id": "f12433:c0:m0"} {"signature": "def operate(self, left, right, operation):", "body": "operation = {'': operator.add,'': operator.sub,'': operator.mul,'': operator.truediv}.get(operation)return operation(left, right)", "docstring": "Do operation on colors\n args:\n left (str): left side\n right (str): right side\n operation (str): Operation\n returns:\n str", "id": "f12433:c0:m1"} {"signature": "def rgb(self, *args):", "body": "if len(args) == :args = args[:]if len(args) == :try:return self._rgbatohex(list(map(int, args)))except ValueError:if all((a for a in argsif a[-] == '' and >= int(a[:-]) >= )):return self._rgbatohex([int(a[:-]) * / for a in args])raise ValueError('')", "docstring": "Translate rgb(...) to color string\n raises:\n ValueError\n returns:\n str", "id": "f12433:c0:m2"} {"signature": "def rgba(self, *args):", "body": "if len(args) == :try:falpha = float(list(args)[])if falpha > :args = args[:]if falpha == :values = self._rgbatohex_raw(list(map(int, args)))return \"\" % ''.join([str(a) for a in values])return self._rgbatohex(list(map(int, args)))except ValueError:if all((a for a in argsif a[-] == '' and >= int(a[:-]) >= )):alpha = list(args)[]if alpha[-] == '' and float(alpha[:-]) == :values = self._rgbatohex_raw([int(a[:-]) * / for a in args])return \"\" % ''.join([str(a) for a in values])return self._rgbatohex([int(a[:-]) * / for a in args])raise ValueError('')", "docstring": "Translate rgba(...) to color string\n raises:\n ValueError\n returns:\n str", "id": "f12433:c0:m3"} {"signature": "def argb(self, *args):", "body": "if len(args) == and type(args[]) is str:match = re.match(r'', args[])if match:rgb = re.sub(r'', '', match.group()).split('')else:rgb = list(self._hextorgb(args[]))else:rgb = list(args)if len(rgb) == :return self._rgbatohex([] + list(map(int, rgb)))elif len(rgb) == :rgb = [rgb.pop()] + rgb try:fval = float(list(rgb)[])if fval > :rgb = [] + rgb[:] elif >= fval >= :rgb = [fval * ] + rgb[:] else:rgb = [] + rgb[:] return self._rgbatohex(list(map(int, rgb)))except ValueError:if all((a for a in rgbif a[-] == '' and >= int(a[:-]) >= )):return self._rgbatohex([int(a[:-]) * / for a in rgb])raise ValueError('')", "docstring": "Translate argb(...) to color string\n\n Creates a hex representation of a color in #AARRGGBB format (NOT\n #RRGGBBAA!). This format is used in Internet Explorer, and .NET\n and Android development.\n\n raises:\n ValueError\n returns:\n str", "id": "f12433:c0:m4"} {"signature": "def hsl(self, *args):", "body": "if len(args) == :return self.hsla(*args)elif len(args) == :h, s, l = argsrgb = colorsys.hls_to_rgb(int(h) / , utility.pc_or_float(l), utility.pc_or_float(s))color = (utility.convergent_round(c * ) for c in rgb)return self._rgbatohex(color)raise ValueError('')", "docstring": "Translate hsl(...) to color string\n raises:\n ValueError\n returns:\n str", "id": "f12433:c0:m5"} {"signature": "def hsla(self, *args):", "body": "if len(args) == :h, s, l, a = argsrgb = colorsys.hls_to_rgb(int(h) / , utility.pc_or_float(l), utility.pc_or_float(s))color = [float(utility.convergent_round(c * )) for c in rgb]color.append(utility.pc_or_float(a))return \"\" % tuple(color)raise ValueError('')", "docstring": "Translate hsla(...) to color string\n raises:\n ValueError\n returns:\n str", "id": "f12433:c0:m6"} {"signature": "def hue(self, color, *args):", "body": "if color:h, l, s = self._hextohls(color)return utility.convergent_round(h * , )raise ValueError('')", "docstring": "Return the hue value of a color\n args:\n color (str): color\n raises:\n ValueError\n returns:\n float", "id": "f12433:c0:m7"} {"signature": "def saturation(self, color, *args):", "body": "if color:h, l, s = self._hextohls(color)return s * raise ValueError('')", "docstring": "Return the saturation value of a color\n args:\n color (str): color\n raises:\n ValueError\n returns:\n float", "id": "f12433:c0:m8"} {"signature": "def lightness(self, color, *args):", "body": "if color:h, l, s = self._hextohls(color)return l * raise ValueError('')", "docstring": "Return the lightness value of a color\n args:\n color (str): color\n raises:\n ValueError\n returns:\n float", "id": "f12433:c0:m9"} {"signature": "def lighten(self, color, diff, *args):", "body": "if color and diff:return self._ophsl(color, diff, , operator.add)raise ValueError('')", "docstring": "Lighten a color\n args:\n color (str): color\n diff (str): percentage\n returns:\n str", "id": "f12433:c0:m11"} {"signature": "def darken(self, color, diff, *args):", "body": "if color and diff:return self._ophsl(color, diff, , operator.sub)raise ValueError('')", "docstring": "Darken a color\n args:\n color (str): color\n diff (str): percentage\n returns:\n str", "id": "f12433:c0:m12"} {"signature": "def saturate(self, color, diff, *args):", "body": "if color and diff:return self._ophsl(color, diff, , operator.add)raise ValueError('')", "docstring": "Saturate a color\n args:\n color (str): color\n diff (str): percentage\n returns:\n str", "id": "f12433:c0:m13"} {"signature": "def desaturate(self, color, diff, *args):", "body": "if color and diff:return self._ophsl(color, diff, , operator.sub)raise ValueError('')", "docstring": "Desaturate a color\n args:\n color (str): color\n diff (str): percentage\n returns:\n str", "id": "f12433:c0:m14"} {"signature": "def greyscale(self, color, *args):", "body": "if color:return self.desaturate(color, )raise ValueError('')", "docstring": "Simply 100% desaturate.\n args:\n color (str): color\n returns:\n str", "id": "f12433:c0:m16"} {"signature": "def grayscale(self, color, *args):", "body": "return self.greyscale(color, *args)", "docstring": "Wrapper for greyscale, other spelling", "id": "f12433:c0:m17"} {"signature": "def spin(self, color, degree, *args):", "body": "if color and degree:if isinstance(degree, string_types):degree = float(degree.strip(''))h, l, s = self._hextohls(color)h = ((h * ) + degree) % h = + h if h < else hrgb = colorsys.hls_to_rgb(h / , l, s)color = (utility.convergent_round(c * ) for c in rgb)return self._rgbatohex(color)raise ValueError('')", "docstring": "Spin color by degree. (Increase / decrease hue)\n args:\n color (str): color\n degree (str): percentage\n raises:\n ValueError\n returns:\n str", "id": "f12433:c0:m18"} {"signature": "def mix(self, color1, color2, weight=, *args):", "body": "if color1 and color2:if isinstance(weight, string_types):weight = float(weight.strip(''))weight = ((weight / ) * ) - rgb1 = self._hextorgb(color1)rgb2 = self._hextorgb(color2)alpha = w1 = (((weight if weight * alpha == - else weight + alpha) /( + weight * alpha)) + )w1 = w1 / w2 = - w1rgb = [rgb1[] * w1 + rgb2[] * w2,rgb1[] * w1 + rgb2[] * w2,rgb1[] * w1 + rgb2[] * w2,]return self._rgbatohex(rgb)raise ValueError('')", "docstring": "This algorithm factors in both the user-provided weight\n and the difference between the alpha values of the two colors\n to decide how to perform the weighted average of the two RGB values.\n\n It works by first normalizing both parameters to be within [-1, 1],\n where 1 indicates \"only use color1\", -1 indicates \"only use color 0\",\n and all values in between indicated a proportionately weighted average.\n\n Once we have the normalized variables w and a,\n we apply the formula (w + a)/(1 + w*a)\n to get the combined weight (in [-1, 1]) of color1.\n This formula has two especially nice properties:\n\n * When either w or a are -1 or 1, the combined weight is also that number\n (cases where w * a == -1 are undefined, and handled as a special case).\n\n * When a is 0, the combined weight is w, and vice versa\n\n Finally, the weight of color1 is renormalized to be within [0, 1]\n and the weight of color2 is given by 1 minus the weight of color1.\n\n Copyright (c) 2006-2009 Hampton Catlin, Nathan Weizenbaum, and Chris Eppstein\n http://sass-lang.com\n args:\n color1 (str): first color\n color2 (str): second color\n weight (int/str): weight\n raises:\n ValueError\n returns:\n str", "id": "f12433:c0:m19"} {"signature": "def fmt(self, color):", "body": "if utility.is_color(color):color = color.lower().strip('')if len(color) in [, ]:color = ''.join([c * for c in color])return '' % colorraise ValueError('')", "docstring": "Format CSS Hex color code.\n uppercase becomes lowercase, 3 digit codes expand to 6 digit.\n args:\n color (str): color\n raises:\n ValueError\n returns:\n str", "id": "f12433:c0:m20"} {"signature": "def parse(self, scope):", "body": "self.keyframe, = [e[] if isinstance(e, tuple) else e for e in self.tokensif str(e).strip()]self.subparse = Falsereturn self", "docstring": "Parse node.\n args:\n scope (Scope): Current scope\n raises:\n SyntaxError\n returns:\n self", "id": "f12440:c0:m0"} {"signature": "def copy(self):", "body": "return KeyframeSelector(self.tokens, )", "docstring": "Return copy of self\n Returns:\n KeyframeSelector object", "id": "f12440:c0:m1"} {"signature": "def fmt(self, fills):", "body": "return self.keyframe", "docstring": "Format identifier\n args:\n fills (dict): replacements\n returns:\n str (CSS)", "id": "f12440:c0:m2"} {"signature": "def parse(self, scope):", "body": "self.name, args, self.guards = self.tokens[]self.args = [a for a in utility.flatten(args) if a]self.body = Block([None, self.tokens[]], )self.vars = list(utility.flatten([list(v.values()) for v in [s[''] for s in scope]]))return self", "docstring": "Parse node\n args:\n scope (Scope): current scope\n raises:\n SyntaxError\n returns:\n self", "id": "f12441:c0:m0"} {"signature": "def raw(self):", "body": "return self.name.raw()", "docstring": "Raw mixin name\n returns:\n str", "id": "f12441:c0:m1"} {"signature": "def parse_args(self, args, scope):", "body": "arguments = list(zip(args,[''] * len(args))) if args and args[] else Nonezl = itertools.zip_longest if sys.version_info[] == else itertools.izip_longestif self.args:parsed = [v if hasattr(v, '') else v for v in copy.copy(self.args)]args = args if isinstance(args, list) else [args]vars = [self._parse_arg(var, arg, scope)for arg, var in zl([a for a in args], parsed)]for var in vars:if var:var.parse(scope)if not arguments:arguments = [v.value for v in vars if v]if not arguments:arguments = ''Variable(['', None, arguments]).parse(scope)", "docstring": "Parse arguments to mixin. Add them to scope\n as variables. Sets upp special variable @arguments\n as well.\n args:\n args (list): arguments\n scope (Scope): current scope\n raises:\n SyntaxError", "id": "f12441:c0:m2"} {"signature": "def _parse_arg(self, var, arg, scope):", "body": "if isinstance(var, Variable):if arg:if utility.is_variable(arg[]):tmp = scope.variables(arg[])if not tmp:return Noneval = tmp.valueelse:val = argvar = Variable(var.tokens[:-] + [val])else:if utility.is_variable(var):if arg is None:raise SyntaxError('')elif utility.is_variable(arg[]):tmp = scope.variables(arg[])if not tmp:return Noneval = tmp.valueelse:val = argvar = Variable([var, None, val])else:return Nonereturn var", "docstring": "Parse a single argument to mixin.\n args:\n var (Variable object): variable\n arg (mixed): argument\n scope (Scope object): current scope\n returns:\n Variable object or None", "id": "f12441:c0:m3"} {"signature": "def parse_guards(self, scope):", "body": "if self.guards:cor = True if '' in self.guards else Falsefor g in self.guards:if isinstance(g, list):res = (g[].parse(scope)if len(g) == else Expression(g).parse(scope))if cor:if res:return Trueelif not res:return Falsereturn True", "docstring": "Parse guards on mixin.\n args:\n scope (Scope): current scope\n raises:\n SyntaxError\n returns:\n bool (passes guards)", "id": "f12441:c0:m4"} {"signature": "def call(self, scope, args=[]):", "body": "ret = Falseif args:args = [[a.parse(scope) if isinstance(a, Expression) else a for a in arg] if arg else arg for arg in args]try:self.parse_args(args, scope)except SyntaxError:passelse:if self.parse_guards(scope):body = self.body.copy()ret = body.tokens[]if ret:utility.rename(ret, scope, Block)return ret", "docstring": "Call mixin. Parses a copy of the mixins body\n in the current scope and returns it.\n args:\n scope (Scope): current scope\n args (list): arguments\n raises:\n SyntaxError\n returns:\n list or False", "id": "f12441:c0:m5"} {"signature": "def parse(self, scope):", "body": "self.parsed = list(utility.flatten(self.tokens))if self.parsed[] == '':if len(self.parsed) > :self.parsed.insert(, '')return self", "docstring": "Parse node\n args:\n scope (Scope): current scope\n raises:\n SyntaxError\n returns:\n self", "id": "f12442:c0:m0"} {"signature": "def fmt(self, fills):", "body": "return ''.join(self.parsed) + fills['']", "docstring": "Format node\n args:\n fills (dict): replacements\n returns:\n str", "id": "f12442:c0:m1"} {"signature": "def parse(self, scope):", "body": "assert (len(self.tokens) == )expr = self.process(self.tokens, scope)A, O, B = [e[] if isinstance(e, tuple) else e for e in exprif str(e).strip()]try:a, ua = utility.analyze_number(A, '')b, ub = utility.analyze_number(B, '')except SyntaxError:return ''.join([str(A), str(O), str(B)])if (a is False or b is False):return ''.join([str(A), str(O), str(B)])if ua == '' or ub == '':return color.Color().process((A, O, B))if a == and O == '':return ''.join([str(A), str(O), str(B), ''])out = self.operate(a, b, O)if isinstance(out, bool):return outreturn self.with_units(out, ua, ub)", "docstring": "Parse Node\n args:\n scope (Scope): Scope object\n raises:\n SyntaxError\n returns:\n str", "id": "f12443:c0:m0"} {"signature": "def with_units(self, val, ua, ub):", "body": "if not val:return str(val)if ua or ub:if ua and ub:if ua == ub:return str(val) + uaelse:return str(val) + uaelif ua:return str(val) + uaelif ub:return str(val) + ubreturn repr(val)", "docstring": "Return value with unit.\n args:\n val (mixed): result\n ua (str): 1st unit\n ub (str): 2nd unit\n raises:\n SyntaxError\n returns:\n str", "id": "f12443:c0:m1"} {"signature": "def operate(self, vala, valb, oper):", "body": "operation = {'': operator.add,'': operator.sub,'': operator.mul,'': operator.truediv,'': operator.eq,'>': operator.gt,'': operator.lt,'': operator.ge,'': operator.le,}.get(oper)if operation is None:raise SyntaxError(\"\" % oper)ret = operation(vala, valb)if oper in '' and int(ret) == ret:ret = int(ret)return ret", "docstring": "Perform operation\n args:\n vala (mixed): 1st value\n valb (mixed): 2nd value\n oper (str): operation\n returns:\n mixed", "id": "f12443:c0:m2"} {"signature": "def expression(self):", "body": "return utility.flatten(self.tokens)", "docstring": "Return str representation of expression\n returns:\n str", "id": "f12443:c0:m3"} {"signature": "def parse(self, scope):", "body": "if not self.parsed:if len(self.tokens) > :property, style, _ = self.tokensself.important = Trueelse:property, style = self.tokensself.important = Falseself.property = ''.join(property)self.parsed = []if style:style = self.preprocess(style)self.parsed = self.process(style, scope)return self", "docstring": "Parse node\n args:\n scope (Scope): current scope\n raises:\n SyntaxError\n returns:\n self", "id": "f12444:c0:m0"} {"signature": "def preprocess(self, style):", "body": "if self.property == '':style = [''.join(u.expression()) if hasattr(u, '') else ufor u in style]else:style = [(u, '') if hasattr(u, '') else ufor u in style]return style", "docstring": "Hackish preprocessing from font shorthand tags.\n Skips expression parse on certain tags.\n args:\n style (list): .\n returns:\n list", "id": "f12444:c0:m1"} {"signature": "def fmt(self, fills):", "body": "f = \"\"imp = '' if self.important else ''if fills['']:self.parsed = ['' % fills[''] if p == '' else p for p in self.parsed]style = ''.join([p.fmt(fills) if hasattr(p, '') else str(p) for p in self.parsed])style = re.sub(\"\", \"\", style)fills.update({'': self.property,'': style.strip(),'': imp})return f % fills", "docstring": "Format node\n args:\n fills (dict): replacements\n returns:\n str", "id": "f12444:c0:m2"} {"signature": "def copy(self):", "body": "return Property([t for t in self.tokens], )", "docstring": "Return a full copy of self\n Returns:\n Property object", "id": "f12444:c0:m3"} {"signature": "def __init__(self, mixin, args, lineno=):", "body": "self.tokens = [mixin, args]self.lineno = lineno", "docstring": "This node represents mixin calls. The calls\n to these mixins are deferred until the second\n parse cycle. lessc.js allows calls to mixins not\n yet defined or known.\n args:\n mixin (Mixin): Mixin object\n args (list): Call arguments", "id": "f12447:c0:m0"} {"signature": "def parse(self, scope, error=False, depth=):", "body": "res = Falseident, args = self.tokensident.parse(scope)mixins = scope.mixins(ident.raw())if not mixins:ident.parse(None)mixins = scope.mixins(ident.raw())if depth > :raise SyntaxError('' % ident.raw(True))if not mixins:if scope.deferred:store = [t for t in scope.deferred.parsed[-]]i = while scope.deferred.parsed[-]:scope.current = scope.deferredident.parse(scope)mixins = scope.mixins(ident.raw())scope.current = Noneif mixins or i > :breakscope.deferred.parsed[-].pop()i += scope.deferred.parsed[-] = storeif not mixins:block = scope.blocks(ident.raw())if not block:ident.parse(None)block = scope.blocks(ident.raw())if block:scope.current = scope.real[-] if scope.real else Noneres = block.copy_inner(scope)scope.current = Noneif mixins:for mixin in mixins:scope.current = scope.real[-] if scope.real else Noneres = mixin.call(scope, args)if res:[scope.add_variable(v) for v in mixin.vars]scope.deferred = identbreakif res:store = [t for t in scope.deferred.parsed[-]] if scope.deferred else Falsetmp_res = []for p in res:if p:if isinstance(p, Deferred):tmp_res.append(p.parse(scope, depth=depth + ))else:tmp_res.append(p.parse(scope))res = tmp_reswhile (any(t for t in res if isinstance(t, Deferred))):res = [p.parse(scope) for p in res if p]if store:scope.deferred.parsed[-] = storeif error and not res:raise SyntaxError('' % ident.raw(True))return res", "docstring": "Parse function. We search for mixins\n first within current scope then fallback\n to global scope. The special scope.deferred\n is used when local scope mixins are called\n within parent mixins.\n If nothing is found we fallback to block-mixin\n as lessc.js allows calls to blocks and mixins to\n be inter-changable.\n clx: This method is a HACK that stems from\n poor design elsewhere. I will fix it\n when I have more time.\n args:\n scope (Scope): Current scope\n returns:\n mixed", "id": "f12447:c0:m1"} {"signature": "def copy(self):", "body": "return self", "docstring": "Returns self (used when Block objects are copy'd)\n returns:\n self", "id": "f12447:c0:m2"} {"signature": "def parse(self, scope):", "body": "if not self.parsed:self.parsed = ''.join(self.process(self.tokens, scope))return self.parsed", "docstring": "Parse node\n args:\n scope (Scope): current scope\n raises:\n SyntaxError\n returns:\n parsed", "id": "f12448:c0:m0"} {"signature": "def copy(self):", "body": "return Import([t for t in self.tokens], )", "docstring": "Return a full copy of self\n Returns:\n Import object", "id": "f12448:c0:m2"} {"signature": "def parse(self, scope):", "body": "self.name, _, self.value = self.tokensif isinstance(self.name, tuple):if len(self.name) > :self.name, pad = self.nameself.value.append(pad)else:self.name = self.name[]scope.add_variable(self)return self", "docstring": "Parse function\n args:\n scope (Scope): Scope object\n returns:\n self", "id": "f12449:c0:m0"} {"signature": "def copy(self):", "body": "return Variable([t for t in self.tokens])", "docstring": "Return a copy of self\n Returns:\n Variable object", "id": "f12449:c0:m1"} {"signature": "def parse(self, scope):", "body": "names = []name = []self._subp = ('', '', '','', '')if self.tokens and hasattr(self.tokens, ''):self.tokens = list(utility.flatten([id.split() + ['']for id in self.tokens.parse(scope).split('')]))self.tokens.pop()if self.tokens and any(hasattr(t, '') for t in self.tokens):tmp_tokens = []for t in self.tokens:if hasattr(t, ''):tmp_tokens.append(t.parse(scope))else:tmp_tokens.append(t)self.tokens = list(utility.flatten(tmp_tokens))if self.tokens and self.tokens[] in self._subp:name = list(utility.flatten(self.tokens))self.subparse = Trueelse:self.subparse = Falsefor n in utility.flatten(self.tokens):if n == '':name.append('')elif n in '':if name and name[-] == '':name.pop()name.append('' % n)elif n == '':names.append(name)name = []else:name.append(n)names.append(name)parsed = self.root(scope, names) if scope else namesdef replace_variables(tokens, scope):return [scope.swap(t)if (utility.is_variable(t) and not t in reserved.tokens) else tfor t in tokens]parsed = [list(utility.flatten(replace_variables(part, scope)))for part in parsed]self.parsed = [[i for i, j in utility.pairwise(part)if i != '' or (j and '' not in j)] for part in parsed]return self", "docstring": "Parse node. Block identifiers are stored as\n strings with spaces replaced with ?\n args:\n scope (Scope): Current scope\n raises:\n SyntaxError\n returns:\n self", "id": "f12450:c0:m0"} {"signature": "def root(self, scope, names):", "body": "parent = scope.scopenameif parent:parent = parent[-]if parent.parsed:parsed_names = []for name in names:ampersand_count = name.count('')if ampersand_count:filtered_parts = []for part in parent.parsed:if part and part[] not in self._subp:filtered_parts.append(part)permutations = list(utility.permutations_with_replacement(filtered_parts, ampersand_count))for permutation in permutations:parsed = []for name_part in name:if name_part == \"\":parent_part = permutation.pop()if parsed and parsed[-].endswith(''):parsed.extend('')if parent_part[-] == '':parent_part.pop()parsed.extend(parent_part)else:parsed.append(name_part)parsed_names.append(parsed)else:for part in parent.parsed:if part and part[] not in self._subp:parsed = []if name[] == \"\":parsed.extend(name)else:parsed.extend(part)if part[-] != '':parsed.append('')parsed.extend(name)parsed_names.append(parsed)else:parsed_names.append(name)return parsed_namesreturn names", "docstring": "Find root of identifier, from scope\n args:\n scope (Scope): current scope\n names (list): identifier name list (, separated identifiers)\n returns:\n list", "id": "f12450:c0:m1"} {"signature": "def raw(self, clean=False):", "body": "if clean:return ''.join(''.join(p) for p in self.parsed).replace('', '')return ''.join(''.join(p) for p in self.parsed).strip().strip('')", "docstring": "Raw identifier.\n args:\n clean (bool): clean name\n returns:\n str", "id": "f12450:c0:m2"} {"signature": "def copy(self):", "body": "tokens = ([t for t in self.tokens]if isinstance(self.tokens, list) else self.tokens)return Identifier(tokens, )", "docstring": "Return copy of self\n Returns:\n Identifier object", "id": "f12450:c0:m3"} {"signature": "def fmt(self, fills):", "body": "name = ''.join(''.join(p).strip() for p in self.parsed)name = re.sub('', '', name) % fillsreturn name.replace('', fills['']).replace('', '')", "docstring": "Format identifier\n args:\n fills (dict): replacements\n returns:\n str (CSS)", "id": "f12450:c0:m4"} {"signature": "def parse(self, scope):", "body": "if not self.parsed:scope.push()self.name, inner = self.tokensscope.current = self.namescope.real.append(self.name)if not self.name.parsed:self.name.parse(scope)if not inner:inner = []inner = list(utility.flatten([p.parse(scope) for p in inner if p]))self.parsed = []self.inner = []if not hasattr(self, \"\"):self.inner_media_queries = []for p in inner:if p is not None:if isinstance(p, Block):if (len(scope) == and p.tokens[] is not None):p_is_mediaquery = p.name.tokens[] == ''append_list = []reparse_p = Falsefor child in p.tokens[]:if isinstance(child, Block) and child.name.raw().startswith(\"\"):p.tokens[].remove(child)if p_is_mediaquery: reparse_p = Truepart_a = p.name.tokens[:][][][]part_b = child.name.tokens[:][][]new_ident_tokens = ['', '', [part_a, ('', '', ''),part_b]]child.tokens[] = Identifier(new_ident_tokens)child.parsed = Nonechild = child.parse(scope)else:child.block_name = p.nameappend_list.append(child)if reparse_p:p.parsed = Nonep = p.parse(scope)if not p_is_mediaquery and not append_list:self.inner.append(p)else:append_list.insert(, p) for media_query in append_list:self.inner_media_queries.append(media_query)else:self.inner.append(p)else:self.parsed.append(p)if self.inner_media_queries:scope.remove_block(self, index=-)for mb in self.inner_media_queries:if hasattr(mb, ''):cb_name = mb.block_nameelse:cb_name = self.tokens[]cb = Block([cb_name, mb.tokens[]]).parse(scope)new_mb = Block([mb.tokens[], [cb]]).parse(scope)self.inner.append(new_mb)scope.add_block(new_mb)scope.real.pop()scope.pop()return self", "docstring": "Parse block node.\n args:\n scope (Scope): Current scope\n raises:\n SyntaxError\n returns:\n self", "id": "f12451:c0:m0"} {"signature": "def raw(self, clean=False):", "body": "try:return self.tokens[].raw(clean)except (AttributeError, TypeError):pass", "docstring": "Raw block name\n args:\n clean (bool): clean name\n returns:\n str", "id": "f12451:c0:m1"} {"signature": "def fmt(self, fills):", "body": "f = \"\"out = []name = self.name.fmt(fills)if self.parsed and any(p for p in self.parsedif str(type(p)) != \"\"):fills.update({'':name,'':''.join([p.fmt(fills) for p in self.parsed if p]),})out.append(f % fills)if hasattr(self, ''):if self.name.subparse and len(self.inner) > : inner = ''.join([p.fmt(fills) for p in self.inner])inner = inner.replace(fills[''],fills[''] + fills['']).rstrip(fills[''])if not fills['']:inner = inner.strip()fills.update({'': name,'': fills[''] + inner})out.append(f % fills)else:out.append(''.join([p.fmt(fills) for p in self.inner]))return ''.join(out)", "docstring": "Format block (CSS)\n args:\n fills (dict): Fill elements\n returns:\n str (CSS)", "id": "f12451:c0:m2"} {"signature": "def copy(self):", "body": "name, inner = self.tokensif inner:inner = [u.copy() if u else u for u in inner]if name:name = name.copy()return Block([name, inner], )", "docstring": "Return a full copy of self\n returns: Block object", "id": "f12451:c0:m3"} {"signature": "def copy_inner(self, scope):", "body": "if self.tokens[]:tokens = [u.copy() if u else u for u in self.tokens[]]out = [p for p in tokens if p]utility.rename(out, scope, Block)return outreturn None", "docstring": "Copy block contents (properties, inner blocks).\n Renames inner block from current scope.\n Used for mixins.\n args:\n scope (Scope): Current scope\n returns:\n list (block contents)", "id": "f12451:c0:m4"} {"signature": "def __init__(self, tokens, lineno=):", "body": "self.tokens = tokensself.lineno = linenoself.parsed = False", "docstring": "Base Node\n args:\n tokens (list): tokenlist\n lineno (int): Line number of node", "id": "f12452:c0:m0"} {"signature": "def parse(self, scope):", "body": "return self", "docstring": "Base parse function\n args:\n scope (Scope): Current scope\n returns:\n self", "id": "f12452:c0:m1"} {"signature": "def process(self, tokens, scope):", "body": "while True:tokens = list(utility.flatten(tokens))done = Trueif any(t for t in tokens if hasattr(t, '')):tokens = [t.parse(scope) if hasattr(t, '') else tfor t in tokens]done = Falseif any(t for t in tokensif (utility.is_variable(t)) or str(type(t)) ==\"\"):tokens = self.replace_variables(tokens, scope)done = Falseif done:breakreturn tokens", "docstring": "Process tokenslist, flattening and parsing it\n args:\n tokens (list): tokenlist\n scope (Scope): Current scope\n returns:\n list", "id": "f12452:c0:m2"} {"signature": "def replace_variables(self, tokens, scope):", "body": "list = []for t in tokens:if utility.is_variable(t):list.append(scope.swap(t))elif str(type(t)) == \"\":list.append(scope.swap(t.name))else:list.append(t)return list", "docstring": "Replace variables in tokenlist\n args:\n tokens (list): tokenlist\n scope (Scope): Current scope\n returns:\n list", "id": "f12452:c0:m3"} {"signature": "def fmt(self, fills):", "body": "raise ValueError('')", "docstring": "Format node\n args:\n fills (dict): replacements\n returns:\n str", "id": "f12452:c0:m4"} {"signature": "def parse(self, scope):", "body": "name = ''.join(self.tokens[])parsed = self.process(self.tokens[:], scope)if name == '':name = ''elif name in ('', ''):name = ''color = Color.Color()args = [t for t in parsedif not isinstance(t, string_types) or t not in '']if hasattr(self, name):try:return getattr(self, name)(*args)except ValueError:passif hasattr(color, name):try:result = getattr(color, name)(*args)try:return result + ''except TypeError:return resultexcept ValueError:passreturn name + ''.join([p for p in parsed])", "docstring": "Parse Node within scope.\n the functions ~( and e( map to self.escape\n and %( maps to self.sformat\n args:\n scope (Scope): Current scope", "id": "f12453:c0:m0"} {"signature": "def escape(self, string, *args):", "body": "return utility.destring(string.strip(''))", "docstring": "Less Escape.\n args:\n string (str): string to escape\n returns:\n str", "id": "f12453:c0:m1"} {"signature": "def sformat(self, string, *args):", "body": "format = stringitems = []m = re.findall('', format)if m and not args:raise SyntaxError('')i = for n in m:v = {'': urlquote,'': utility.destring,}.get(n, str)(args[i])items.append(v)i += format = format.replace('', '')format = format.replace('', '')return format % tuple(items)", "docstring": "String format.\n args:\n string (str): string to format\n args (list): format options\n returns:\n str", "id": "f12453:c0:m2"} {"signature": "def isnumber(self, string, *args):", "body": "try:n, u = utility.analyze_number(string)except SyntaxError:return Falsereturn True", "docstring": "Is number\n args:\n string (str): match\n returns:\n bool", "id": "f12453:c0:m3"} {"signature": "def iscolor(self, string, *args):", "body": "return (string in lessColors)", "docstring": "Is color\n args:\n string (str): match\n returns:\n bool", "id": "f12453:c0:m4"} {"signature": "def isurl(self, string, *args):", "body": "arg = utility.destring(string)regex = re.compile(r'' r''r'' r''r'' r''r'',re.IGNORECASE)return regex.match(arg)", "docstring": "Is url\n args:\n string (str): match\n returns:\n bool", "id": "f12453:c0:m5"} {"signature": "def isstring(self, string, *args):", "body": "regex = re.compile(r'')return regex.match(string)", "docstring": "Is string\n args:\n string (str): match\n returns:\n bool", "id": "f12453:c0:m6"} {"signature": "def iskeyword(self, string, *args):", "body": "return (string in ('', '', ''))", "docstring": "Is less keyword\n args:\n string (str): match\n returns:\n bool", "id": "f12453:c0:m7"} {"signature": "def increment(self, value, *args):", "body": "n, u = utility.analyze_number(value)return utility.with_unit(n + , u)", "docstring": "Increment function\n args:\n value (str): target\n returns:\n str", "id": "f12453:c0:m8"} {"signature": "def decrement(self, value, *args):", "body": "n, u = utility.analyze_number(value)return utility.with_unit(n - , u)", "docstring": "Decrement function\n args:\n value (str): target\n returns:\n str", "id": "f12453:c0:m9"} {"signature": "def add(self, *args):", "body": "if (len(args) <= ):return return sum([int(v) for v in args])", "docstring": "Add integers\n args:\n args (list): target\n returns:\n str", "id": "f12453:c0:m10"} {"signature": "def round(self, value, *args):", "body": "n, u = utility.analyze_number(value)return utility.with_unit(int(utility.away_from_zero_round(float(n))), u)", "docstring": "Round number\n args:\n value (str): target\n returns:\n str", "id": "f12453:c0:m11"} {"signature": "def ceil(self, value, *args):", "body": "n, u = utility.analyze_number(value)return utility.with_unit(int(math.ceil(n)), u)", "docstring": "Ceil number\n args:\n value (str): target\n returns:\n str", "id": "f12453:c0:m12"} {"signature": "def floor(self, value, *args):", "body": "n, u = utility.analyze_number(value)return utility.with_unit(int(math.floor(n)), u)", "docstring": "Floor number\n args:\n value (str): target\n returns:\n str", "id": "f12453:c0:m13"} {"signature": "def percentage(self, value, *args):", "body": "n, u = utility.analyze_number(value)n = int(n * )u = ''return utility.with_unit(n, u)", "docstring": "Return percentage value\n args:\n value (str): target\n returns:\n str", "id": "f12453:c0:m14"} {"signature": "def ldirectory(inpath, outpath, args, scope):", "body": "yacctab = '' if args.debug else Noneif not outpath:sys.exit(\"\")else:if not os.path.isdir(outpath):if args.verbose:print(\"\" % outpath, file=sys.stderr)if not args.dry_run:os.mkdir(outpath)less = glob.glob(os.path.join(inpath, ''))f = formatter.Formatter(args)for lf in less:outf = os.path.splitext(os.path.basename(lf))minx = '' if args.min_ending else ''outf = \"\" % (outpath, outf[], minx)if not args.force and os.path.exists(outf):recompile = os.path.getmtime(outf) < os.path.getmtime(lf)else:recompile = Trueif recompile:print('' % (lf, outf))p = parser.LessParser(yacc_debug=(args.debug),lex_optimize=True,yacc_optimize=(not args.debug),scope=scope,tabfile=yacctab,verbose=args.verbose)p.parse(filename=lf, debuglevel=)css = f.format(p)if not args.dry_run:with open(outf, '') as outfile:outfile.write(css)elif args.verbose:print('' % lf, file=sys.stderr)sys.stdout.flush()if args.recurse:[ldirectory(os.path.join(inpath, name), os.path.join(outpath, name), args,scope) for name in os.listdir(inpath)if os.path.isdir(os.path.join(inpath, name))and not name.startswith('') and not name == outpath]", "docstring": "Compile all *.less files in directory\n Args:\n inpath (str): Path to compile\n outpath (str): Output directory\n args (object): Argparse Object\n scope (Scope): Scope object or None", "id": "f12455:m0"} {"signature": "def run():", "body": "aparse = argparse.ArgumentParser(description='', epilog='')aparse.add_argument('', '', action='', version=VERSION_STR)aparse.add_argument('','',action=\"\",type=str,help=\"\")aparse.add_argument('','',action=\"\",default=False,help=\"\")aparse.add_argument('','',action=\"\",default=False,help=\"\")fgroup = aparse.add_argument_group('')fgroup.add_argument('','',action=\"\",default=False,help=\"\")fgroup.add_argument('','',action=\"\",default=False,help=\"\")fgroup.add_argument('', '', help=\"\", action=\"\")fgroup.add_argument('','',help=\"\",default=)dgroup = aparse.add_argument_group('', '''')dgroup.add_argument('', '', action=\"\", help=\"\")dgroup.add_argument('','',action=\"\",help=\"\")dgroup.add_argument('','',action=\"\",help=\"\")dgroup.add_argument('','',action=\"\",default=False,help=\"\")dgroup.add_argument('','',action=\"\",default=False,help=\"\")group = aparse.add_argument_group('')group.add_argument('','',action=\"\",default=False,help=\"\")group.add_argument('','',action=\"\",default=False,help=\"\")group.add_argument('','',action=\"\",default=False,help=\"\")group.add_argument('','',action=\"\",default=False,help=\"\")aparse.add_argument('', help=\"\")aparse.add_argument('', nargs='', help=\"\")args = aparse.parse_args()try:if args.lex_only:lex = lexer.LessLexer()ll = lex.file(args.target)while True:tok = ll.token()if not tok:breakif hasattr(tok,\"\"): print(tok, \"\", tok.lexer.lexstate)else:print(tok)print('')sys.exit()yacctab = '' if args.debug else Nonescope = Noneif args.include:for u in args.include.split(''):if os.path.exists(u):p = parser.LessParser(yacc_debug=(args.debug),lex_optimize=True,yacc_optimize=(not args.debug),tabfile=yacctab,verbose=args.verbose)p.parse(filename=u, debuglevel=args.debug)if not scope:scope = p.scopeelse:scope.update(p.scope)else:sys.exit('' % u)sys.stdout.flush()p = Nonef = formatter.Formatter(args)if not os.path.exists(args.target):sys.exit(\"\" % args.target)if os.path.isdir(args.target):ldirectory(args.target, args.out, args, scope)if args.dry_run:print('', file=sys.stderr)else:p = parser.LessParser(yacc_debug=(args.debug),lex_optimize=True,yacc_optimize=(not args.debug),scope=copy.deepcopy(scope),verbose=args.verbose)p.parse(filename=args.target, debuglevel=args.debug)if args.scopemap:args.no_css = Truep.scopemap()if not args.no_css and p:out = f.format(p)if args.output:if not args.dont_create_dirs and not os.path.exists(os.path.dirname(args.output)):try:os.makedirs(os.path.dirname(args.output))except OSError as exc: if exc.errno != errno.EEXIST:raisewith open(args.output, \"\") as f:f.write(out)else:print(out)except (KeyboardInterrupt, SystemExit, IOError):sys.exit('')", "docstring": "Run compiler", "id": "f12455:m1"} {"signature": "def get_version():", "body": "return ''.join(str(i) for i in VERSION[:])", "docstring": "Returns only digit parts of version.", "id": "f12465:m1"} {"signature": "def get_user_ip(request):", "body": "ip = get_real_ip(request)if ip is None:ip = get_ip(request)if ip is None:ip = ''return ip", "docstring": "Return user ip\n\n :param request: Django request object\n :return: user ip", "id": "f12466:m0"} {"signature": "def __init__(self, **kwargs):", "body": "api_key = kwargs.get('', None)api_host = kwargs.get('', None)api_timeout = kwargs.get('', None)use_cache = kwargs.get('', None)cache_backend = kwargs.get('', None)cache_timeout = kwargs.get('', None)no_api_key = Trueif hasattr(settings, ''):self._api_key = api_key if api_key else settings.CACHED_HTTPBL_API_KEYif self._api_key is not None:no_api_key = Falseif no_api_key:raise ImproperlyConfigured('''')self._last_result = Noneself._api_host = api_host if api_host else settings.CACHED_HTTPBL_API_HOSTself._api_timeout = api_timeout if api_timeout else settings.CACHED_HTTPBL_API_TIMEOUTself._use_cache = use_cache if use_cache else settings.CACHED_HTTPBL_USE_CACHEself._cache_backend = cache_backend if cache_backend else settings.CACHED_HTTPBL_CACHE_BACKENDself._cache_timeout = cache_timeout if cache_timeout else settings.CACHED_HTTPBL_CACHE_TIMEOUTself._cache_version = if self._use_cache and self._cache_backend is None:self._cache_backend = ''if self._use_cache:try:self._cache = cache.caches[self._cache_backend]try:self._cache_version = int(self._cache.get(''.format(self._api_key)))except TypeError:self._cache.set(''.format(self._api_key), str())except cache.InvalidCacheBackendError:raise ImproperlyConfigured('')", "docstring": "Instantiate the CachedHTTPBL object.\n\n:param kwargs: optional parameters\n:return:", "id": "f12468:c0:m0"} {"signature": "def check_ip(self, ip):", "body": "self._last_result = Noneif is_valid_ipv4(ip):key = Noneif self._use_cache:key = self._make_cache_key(ip)self._last_result = self._cache.get(key, version=self._cache_version)if self._last_result is None:error, age, threat, type = self._request_httpbl(ip)if error == or error == :self._last_result = {'': error,'': age,'': threat,'': type}if self._use_cache:self._cache.set(key, self._last_result, timeout=self._api_timeout, version=self._cache_version)if self._last_result is not None and settings.CACHED_HTTPBL_USE_LOGGING:logger.info(''''.format(ip,self._last_result[''],self._last_result[''],self._last_result[''],self._last_result['']))return self._last_result", "docstring": "Check IP trough the httpBL API\n\n:param ip: ipv4 ip address\n:return: httpBL results or None if any error is occurred", "id": "f12468:c0:m3"} {"signature": "def is_threat(self, result=None, harmless_age=None, threat_score=None, threat_type=None):", "body": "harmless_age = harmless_age if harmless_age is not None else settings.CACHED_HTTPBL_HARMLESS_AGEthreat_score = threat_score if threat_score is not None else settings.CACHED_HTTPBL_THREAT_SCOREthreat_type = threat_type if threat_type is not None else -result = result if result is not None else self._last_resultthreat = Falseif result is not None:if result[''] < harmless_age and result[''] > threat_score:threat = Trueif threat_type > -:if result[''] & threat_type:threat = Trueelse:threat = Falsereturn threat", "docstring": "Check if IP is a threat\n\n:param result: httpBL results; if None, then results from last check_ip() used (optional)\n:param harmless_age: harmless age for check if httpBL age is older (optional)\n:param threat_score: threat score for check if httpBL threat is lower (optional)\n:param threat_type: threat type, if not equal httpBL score type, then return False (optional)\n:return: True or False", "id": "f12468:c0:m4"} {"signature": "def is_suspicious(self, result=None):", "body": "result = result if result is not None else self._last_resultsuspicious = Falseif result is not None:suspicious = True if result[''] > else Falsereturn suspicious", "docstring": "Check if IP is suspicious\n\n:param result: httpBL results; if None, then results from last check_ip() used (optional)\n:return: True or False", "id": "f12468:c0:m5"} {"signature": "def invalidate_ip(self, ip):", "body": "if self._use_cache:key = self._make_cache_key(ip)self._cache.delete(key, version=self._cache_version)", "docstring": "Invalidate httpBL cache for IP address\n\n:param ip: ipv4 IP address", "id": "f12468:c0:m6"} {"signature": "def invalidate_cache(self):", "body": "if self._use_cache:self._cache_version += self._cache.increment(''.format(self._api_key))", "docstring": "Invalidate httpBL cache", "id": "f12468:c0:m7"} {"signature": "def cached_httpbl_exempt(view_func):", "body": "def wrapped_view(*args, **kwargs):return view_func(*args, **kwargs)wrapped_view.cached_httpbl_exempt = Truereturn wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)", "docstring": "Marks a view function as being exempt from the cached httpbl view protection.", "id": "f12469:m0"} {"signature": "def __init__(self, oauth_version, consumer_key, consumer_secret, **kwargs):", "body": "self.oauth_version = oauth_versiondata = {}if kwargs.get(''):logger.debug(\"\")self.from_file = kwargs.get('')data = get_data(self.from_file)vars(self).update(data)else:self.consumer_key = consumer_keyself.consumer_secret = consumer_secretvars(self).update(kwargs)self.oauth_version = oauth_versionself.callback_uri = vars(self).get('',CALLBACK_URI)if self.oauth_version == '':service_params = {'': self.consumer_key,'' : self.consumer_secret,'': services[self.oauth_version]['']}else:service_params = {'': self.consumer_key,'': self.consumer_secret}service_params.update({'' : '','' : services[self.oauth_version][''],'' : services[self.oauth_version][''],'': vars(self).get('',None)})self.oauth = services[oauth_version][''](**service_params)if vars(self).get('') and vars(self).get('') and vars(self).get(''):if not self.token_is_valid():data.update(self.refresh_access_token())elif vars(self).get('') and vars(self).get('') and vars(self).get(''):if not self.token_is_valid():data.update(self.refresh_access_token())else:data.update(self.handler()) if self.oauth_version == '':self.session = self.oauth.get_session((self.access_token, self.access_token_secret))else:self.session = self.oauth.get_session(token=self.access_token)write_data(data, vars(self).get('',''))", "docstring": "consumer_key : client key\nconsumer_secret : client secret\naccess_token : access token\naccess_token_secret : access token secret\nfrom_file : file containing the credentials\nbase_url : Base url", "id": "f12475:c0:m0"} {"signature": "def handler(self,):", "body": "if self.oauth_version == '':request_token, request_token_secret = self.oauth.get_request_token(params={'': self.callback_uri})logger.debug(\"\".format(request_token, request_token_secret))authorize_url = self.oauth.get_authorize_url(request_token)else:authorize_url = self.oauth.get_authorize_url(client_secret=self.consumer_secret, redirect_uri=self.callback_uri, response_type='')logger.debug(\"\".format(authorize_url))webbrowser.open(authorize_url)self.verifier = input(\"\")self.token_time = time.time()credentials = {'': self.token_time}if self.oauth_version == '':raw_access = self.oauth.get_raw_access_token(request_token, request_token_secret, params={\"\": self.verifier})parsed_access = parse_utf8_qsl(raw_access.content)self.access_token = parsed_access['']self.access_token_secret = parsed_access['']self.session_handle = parsed_access['']self.guid = parsed_access['']credentials.update({'': self.access_token,'': self.access_token_secret,'': self.session_handle,'': self.guid})else:headers = self.generate_oauth2_headers()raw_access = self.oauth.get_raw_access_token(data={\"\": self.verifier, '': self.callback_uri,'':''}, headers=headers)credentials.update(self.oauth2_access_parser(raw_access))return credentials", "docstring": "* get request token if OAuth1\n * Get user authorization\n * Get access token", "id": "f12475:c0:m1"} {"signature": "def generate_oauth2_headers(self):", "body": "encoded_credentials = base64.b64encode((''.format(self.consumer_key,self.consumer_secret)).encode(''))headers={'':''.format(encoded_credentials.decode('')),'': ''}return headers", "docstring": "Generates header for oauth2", "id": "f12475:c0:m2"} {"signature": "def oauth2_access_parser(self, raw_access):", "body": "parsed_access = json.loads(raw_access.content.decode(''))self.access_token = parsed_access['']self.token_type = parsed_access['']self.refresh_token = parsed_access['']self.guid = parsed_access['']credentials = {'': self.access_token,'': self.token_type,'': self.refresh_token,'': self.guid}return credentials", "docstring": "Parse oauth2 access", "id": "f12475:c0:m3"} {"signature": "def refresh_access_token(self,):", "body": "logger.debug(\"\")self.token_time = time.time()credentials = {'': self.token_time}if self.oauth_version == '':self.access_token, self.access_token_secret = self.oauth.get_access_token(self.access_token, self.access_token_secret, params={\"\": self.session_handle})credentials.update({'': self.access_token,'': self.access_token_secret,'': self.session_handle,'': self.token_time})else:headers = self.generate_oauth2_headers()raw_access = self.oauth.get_raw_access_token(data={\"\": self.refresh_token, '': self.callback_uri,'':''}, headers=headers)credentials.update(self.oauth2_access_parser(raw_access)) return credentials", "docstring": "Refresh access token", "id": "f12475:c0:m4"} {"signature": "def token_is_valid(self,):", "body": "elapsed_time = time.time() - self.token_timelogger.debug(\"\".format(elapsed_time))if elapsed_time > : logger.debug(\"\")return Falselogger.debug(\"\")return True", "docstring": "Check the validity of the token :3600s", "id": "f12475:c0:m5"} {"signature": "def get_data(filename):", "body": "name, ext = get_file_extension(filename)func = json_get_data if ext == '' else yaml_get_datareturn func(filename)", "docstring": "Calls right function according to file extension", "id": "f12477:m1"} {"signature": "def write_data(data, filename):", "body": "name, ext = get_file_extension(filename)func = json_write_data if ext == '' else yaml_write_datareturn func(data, filename)", "docstring": "Call right func to save data according to file extension", "id": "f12477:m2"} {"signature": "def json_write_data(json_data, filename):", "body": "with open(filename, '') as fp:json.dump(json_data, fp, indent=, sort_keys=True, ensure_ascii=False)return Truereturn False", "docstring": "Write json data into a file", "id": "f12477:m3"} {"signature": "def json_get_data(filename):", "body": "with open(filename) as fp:json_data = json.load(fp)return json_datareturn False", "docstring": "Get data from json file", "id": "f12477:m4"} {"signature": "def yaml_get_data(filename):", "body": "with open(filename, '') as fd:yaml_data = yaml.load(fd)return yaml_datareturn False", "docstring": "Get data from .yml file", "id": "f12477:m5"} {"signature": "def yaml_write_data(yaml_data, filename):", "body": "with open(filename, '') as fd:yaml.dump(yaml_data, fd, default_flow_style=False)return Truereturn False", "docstring": "Write data into a .yml file", "id": "f12477:m6"} {"signature": "def __init__(self, name, level=logging.DEBUG):", "body": "super(YahooLogger, self).__init__(name)self.name = nameself.level = levelself.setLevel(self.level)formatter = logging.Formatter(\"\")stream_handler = logging.StreamHandler()stream_handler.setFormatter(formatter)self.addHandler(stream_handler)", "docstring": "- name : logger name\n- filename : file containing logs", "id": "f12478:c0:m0"} {"signature": "def passcode(callsign):", "body": "assert isinstance(callsign, str)callsign = callsign.split('')[].upper()code = for i, char in enumerate(callsign):code ^= ord(char) << ( if not i % else )return code & ", "docstring": "Takes a CALLSIGN and returns passcode", "id": "f12491:m0"} {"signature": "def parse_header(head):", "body": "try:(fromcall, path) = head.split('>', )except:raise ParseError(\"\")if (not <= len(fromcall) <= ornot re.findall(r\"\", fromcall, re.I)):raise ParseError(\"\")path = path.split('')if len(path[]) == :raise ParseError(\"\")tocall = path[]path = path[:]validate_callsign(tocall, \"\")for digi in path:if not re.findall(r\"\", digi, re.I):raise ParseError(\"\")parsed = {'': fromcall,'': tocall,'': path,}viacall = \"\"if len(path) >= and re.match(r\"\", path[-]):viacall = path[-]parsed.update({'': viacall})return parsed", "docstring": "Parses the header part of packet\nReturns a dict", "id": "f12495:m1"} {"signature": "def parse(packet):", "body": "if not isinstance(packet, string_type_parse):raise TypeError(\"\", type(packet))if len(packet) == :raise ParseError(\"\", packet)if isinstance(packet, bytes):packet = _unicode_packet(packet)packet = packet.rstrip(\"\")logger.debug(\"\", packet)try:(head, body) = packet.split('', )except:raise ParseError(\"\", packet)if len(body) == :raise ParseError(\"\", packet)parsed = {'': packet,}try:parsed.update(parse_header(head))except ParseError as msg:raise ParseError(str(msg), packet)packet_type = body[]body = body[:]if len(body) == and packet_type != '>':raise ParseError(\"\", packet)try:_try_toparse_body(packet_type, body, parsed)except (UnknownFormat, ParseError) as exp:exp.packet = packetraiseif '' not in parsed:if not re.match(r\"\"\"\"\"\"\"\", parsed['']):raise UnknownFormat(\"\", packet)parsed.update({'': '','': packet_type + body,})logger.debug(\"\")return parsed", "docstring": "Parses an APRS packet and returns a dict with decoded data\n\n- All attributes are in metric units", "id": "f12496:m1"} {"signature": "def parse_comment_telemetry(text):", "body": "parsed = {}match = re.findall(r\"\", text)if match and len(match[][]) % == :text, telemetry, post = match[]text += posttemp = [] * for i in range():temp[i] = base91.to_decimal(telemetry[i*:i*+])parsed.update({'': {'': temp[],'': temp[:]}})if temp[] != '':parsed[''].update({'': \"\".format(temp[] & )[::-]})return (text, parsed)", "docstring": "Looks for base91 telemetry found in comment field\nReturns [remaining_text, telemetry]", "id": "f12498:m0"} {"signature": "def __init__(self, callsign, passwd=\"\", host=\"\", port=, skip_login=False):", "body": "self.logger = logging.getLogger(\"\" % (__name__, self.__class__.__name__))self._parse = parseself.set_server(host, port)self.set_login(callsign, passwd, skip_login)self.sock = Noneself.filter = \"\" self._connected = Falseself.buf = b''", "docstring": "callsign - used when login in\npasswd - for verification, or \"-1\" if only listening\nHost & port - aprs-is server", "id": "f12502:c0:m0"} {"signature": "def set_filter(self, filter_text):", "body": "self.filter = filter_textself.logger.info(\"\", self.filter)if self._connected:self._sendall(\"\" % self.filter)", "docstring": "Set a specified aprs-is filter for this connection", "id": "f12502:c0:m2"} {"signature": "def set_login(self, callsign, passwd=\"\", skip_login=False):", "body": "self.__dict__.update(locals())", "docstring": "Set callsign and password", "id": "f12502:c0:m3"} {"signature": "def set_server(self, host, port):", "body": "self.server = (host, port)", "docstring": "Set server ip/host and port to use", "id": "f12502:c0:m4"} {"signature": "def connect(self, blocking=False, retry=):", "body": "if self._connected:returnwhile True:try:self._connect()if not self.skip_login:self._send_login()breakexcept (LoginError, ConnectionError):if not blocking:raiseself.logger.info(\"\" % retry)time.sleep(retry)", "docstring": "Initiate connection to APRS server and attempt to login\n\nblocking = False - Should we block until connected and logged-in\nretry = 30 - Retry interval in seconds", "id": "f12502:c0:m5"} {"signature": "def close(self):", "body": "self._connected = Falseself.buf = b''if self.sock is not None:self.sock.close()", "docstring": "Closes the socket\nCalled internally when Exceptions are raised", "id": "f12502:c0:m6"} {"signature": "def sendall(self, line):", "body": "if isinstance(line, APRSPacket):line = str(line)elif not isinstance(line, string_type):raise TypeError(\"\", type(line))if not self._connected:raise ConnectionError(\"\")if line == \"\":returnline = line.rstrip(\"\") + \"\"try:self.sock.setblocking()self.sock.settimeout()self._sendall(line)except socket.error as exp:self.close()raise ConnectionError(str(exp))", "docstring": "Send a line, or multiple lines sperapted by '\\\\r\\\\n'", "id": "f12502:c0:m7"} {"signature": "def consumer(self, callback, blocking=True, immortal=False, raw=False):", "body": "if not self._connected:raise ConnectionError(\"\")line = b''while True:try:for line in self._socket_readlines(blocking):if line[:] != b'':if raw:callback(line)else:callback(self._parse(line))else:self.logger.debug(\"\", line.decode(''))except ParseError as exp:self.logger.log(, \"\", exp.message, exp.packet)except UnknownFormat as exp:self.logger.log(, \"\", exp.message, exp.packet)except LoginError as exp:self.logger.error(\"\", exp.__class__.__name__, exp.message)except (KeyboardInterrupt, SystemExit):raiseexcept (ConnectionDrop, ConnectionError):self.close()if not immortal:raiseelse:self.connect(blocking=blocking)continueexcept GenericError:passexcept StopIteration:breakexcept:self.logger.error(\"\", line)raiseif not blocking:break", "docstring": "When a position sentence is received, it will be passed to the callback function\n\nblocking: if true (default), runs forever, otherwise will return after one sentence\n You can still exit the loop, by raising StopIteration in the callback function\n\nimmortal: When true, consumer will try to reconnect and stop propagation of Parse exceptions\n if false (default), consumer will return\n\nraw: when true, raw packet is passed to callback, otherwise the result from aprs.parse()", "id": "f12502:c0:m8"} {"signature": "def _open_socket(self):", "body": "self.sock = socket.create_connection(self.server, )", "docstring": "Creates a socket", "id": "f12502:c0:m9"} {"signature": "def _connect(self):", "body": "self.logger.info(\"\", self.server[], self.server[])try:self._open_socket()peer = self.sock.getpeername()self.logger.info(\"\", str(peer))self.sock.setblocking()self.sock.settimeout()self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, )banner = self.sock.recv()if is_py3:banner = banner.decode('')if banner[] == \"\":self.logger.debug(\"\", banner.rstrip())else:raise ConnectionError(\"\")except ConnectionError as e:self.logger.error(str(e))self.close()raiseexcept (socket.error, socket.timeout) as e:self.close()self.logger.error(\"\" % str(e))if str(e) == \"\":raise ConnectionError(\"\")else:raise ConnectionError(e)self._connected = True", "docstring": "Attemps connection to the server", "id": "f12502:c0:m10"} {"signature": "def _send_login(self):", "body": "login_str = \"\"login_str = login_str.format(self.callsign,self.passwd,(\"\" + self.filter) if self.filter != \"\" else \"\",__version__)self.logger.info(\"\")try:self._sendall(login_str)self.sock.settimeout()test = self.sock.recv(len(login_str) + )if is_py3:test = test.decode('')test = test.rstrip()self.logger.debug(\"\", test)_, _, callsign, status, _ = test.split('', )if callsign == \"\":raise LoginError(\"\")if callsign != self.callsign:raise LoginError(\"\" % test)if status != \"\" and self.passwd != \"\":raise LoginError(\"\")if self.passwd == \"\":self.logger.info(\"\")else:self.logger.info(\"\")except LoginError as e:self.logger.error(str(e))self.close()raiseexcept:self.close()self.logger.error(\"\")raise LoginError(\"\")", "docstring": "Sends login string to server", "id": "f12502:c0:m11"} {"signature": "def _socket_readlines(self, blocking=False):", "body": "try:self.sock.setblocking()except socket.error as e:self.logger.error(\"\" % str(e))raise ConnectionDrop(\"\")while True:short_buf = b''newline = b''select.select([self.sock], [], [], None if blocking else )try:short_buf = self.sock.recv()if not short_buf:self.logger.error(\"\")raise ConnectionDrop(\"\")except socket.error as e:self.logger.error(\"\" % str(e))if \"\" in str(e):if not blocking:if len(self.buf) == :breakself.buf += short_bufwhile newline in self.buf:line, self.buf = self.buf.split(newline, )yield line", "docstring": "Generator for complete lines, received from the server", "id": "f12502:c0:m12"} {"signature": "def to_decimal(text):", "body": "if not isinstance(text, string_type):raise TypeError(\"\" % type(text))if findall(r\"\", text):raise ValueError(\"\")text = text.lstrip('')decimal = length = len(text) - for i, char in enumerate(text):decimal += (ord(char) - ) * ( ** (length - i))return decimal if text != '' else ", "docstring": "Takes a base91 char string and returns decimal", "id": "f12504:m0"} {"signature": "def from_decimal(number, width=):", "body": "text = []if not isinstance(number, int_type):raise TypeError(\"\", type(number))elif not isinstance(width, int_type):raise TypeError(\"\", type(number))elif number < :raise ValueError(\"\")elif number > :max_n = ceil(log(number) / log())for n in _range(int(max_n), -, -):quotient, number = divmod(number, **n)text.append(chr( + quotient))return \"\".join(text).lstrip('').rjust(max(, width), '')", "docstring": "Takes a decimal and returns base91 char string.\nWith optional parameter for fix with output", "id": "f12504:m1"} {"signature": "def try_convert_to_date(self, word):", "body": "for frm in self.search_date_formats:try:return datetime.datetime.strptime(word, frm).date()except ValueError:passreturn None", "docstring": "Tries to convert word to date(datetime) using search_date_formats\nReturn None if word fits no one format", "id": "f12511:c0:m2"} {"signature": "def get_search_query(self):", "body": "return self.search_use_q and self.request.GET.get('', '').strip()", "docstring": "Get query from request.GET 'q' parameter when search_use_q is set to True\nOverride this method to provide your own query to search", "id": "f12511:c0:m3"} {"signature": "def get_params_for_field(self, field_name, sort_type=None):", "body": "if not sort_type:if self.initial_sort == field_name:sort_type = '' if self.initial_sort_type == '' else ''else:sort_type = ''self.initial_params[self.sort_param_name] = self.sort_fields[field_name]self.initial_params[self.sort_type_param_name] = sort_typereturn '' % self.initial_params.urlencode()", "docstring": "If sort_type is None - inverse current sort for field, if no sorted - use asc", "id": "f12511:c1:m2"} {"signature": "def get_formset(self):", "body": "result = generic_inlineformset_factory(self.inline_model, **self.get_factory_kwargs())return result", "docstring": "Returns the final formset class from generic_inlineformset_factory.", "id": "f12512:c0:m0"} {"signature": "def construct_formset(self):", "body": "formset_class = self.get_formset()if hasattr(self, ''):klass = type(self).__name__raise DeprecationWarning(''''''.format(klass),)return formset_class(**self.get_formset_kwargs())", "docstring": "Returns an instance of the formset", "id": "f12513:c0:m0"} {"signature": "def get_initial(self):", "body": "return self.initial[:]", "docstring": "Returns a copy of the initial data to use for formsets on this view.", "id": "f12513:c0:m1"} {"signature": "def get_prefix(self):", "body": "return self.prefix", "docstring": "Returns the prefix used for formsets on this view.", "id": "f12513:c0:m2"} {"signature": "def get_formset_class(self):", "body": "return self.formset_class", "docstring": "Returns the formset class to use in the formset factory", "id": "f12513:c0:m3"} {"signature": "def get_form_class(self):", "body": "return self.form_class", "docstring": "Returns the form class to use with the formset in this view", "id": "f12513:c0:m4"} {"signature": "def get_formset(self):", "body": "return formset_factory(self.get_form_class(), **self.get_factory_kwargs())", "docstring": "Returns the formset class from the formset factory", "id": "f12513:c0:m5"} {"signature": "def get_formset_kwargs(self):", "body": "kwargs = self.formset_kwargs.copy()kwargs.update({'': self.get_initial(),'': self.get_prefix(),})if self.request.method in ('', ''):kwargs.update({'': self.request.POST.copy(),'': self.request.FILES,})return kwargs", "docstring": "Returns the keyword arguments for instantiating the formset.", "id": "f12513:c0:m6"} {"signature": "def get_factory_kwargs(self):", "body": "for attr in ['', '', '', '', '','', '', '', '']:if hasattr(self, attr):klass = type(self).__name__raise DeprecationWarning(''''.format(klass, attr))kwargs = self.factory_kwargs.copy()if self.get_formset_class():kwargs[''] = self.get_formset_class()return kwargs", "docstring": "Returns the keyword arguments for calling the formset factory", "id": "f12513:c0:m7"} {"signature": "def get_success_url(self):", "body": "if self.success_url:url = self.success_urlelse:url = self.request.get_full_path()return url", "docstring": "Returns the supplied URL.", "id": "f12513:c2:m0"} {"signature": "def formset_valid(self, formset):", "body": "return HttpResponseRedirect(self.get_success_url())", "docstring": "If the formset is valid redirect to the supplied URL", "id": "f12513:c2:m1"} {"signature": "def formset_invalid(self, formset):", "body": "return self.render_to_response(self.get_context_data(formset=formset))", "docstring": "If the formset is invalid, re-render the context data with the\ndata-filled formset and errors.", "id": "f12513:c2:m2"} {"signature": "def get_formset_kwargs(self):", "body": "kwargs = super(ModelFormSetMixin, self).get_formset_kwargs()kwargs[''] = self.get_queryset()return kwargs", "docstring": "Returns the keyword arguments for instantiating the formset.", "id": "f12513:c3:m0"} {"signature": "def get_factory_kwargs(self):", "body": "kwargs = super(ModelFormSetMixin, self).get_factory_kwargs()kwargs.setdefault('', self.fields)kwargs.setdefault('', self.exclude)if self.get_form_class():kwargs[''] = self.get_form_class()return kwargs", "docstring": "Returns the keyword arguments for calling the formset factory", "id": "f12513:c3:m1"} {"signature": "def get_formset(self):", "body": "return modelformset_factory(self.model, **self.get_factory_kwargs())", "docstring": "Returns the formset class from the model formset factory", "id": "f12513:c3:m2"} {"signature": "def formset_valid(self, formset):", "body": "self.object_list = formset.save()return super(ModelFormSetMixin, self).formset_valid(formset)", "docstring": "If the formset is valid, save the associated models.", "id": "f12513:c3:m3"} {"signature": "def get_inline_model(self):", "body": "return self.inline_model", "docstring": "Returns the inline model to use with the inline formset", "id": "f12513:c4:m0"} {"signature": "def get_formset_kwargs(self):", "body": "if hasattr(self, ''):klass = type(self).__name__raise DeprecationWarning(''''.format(klass))kwargs = super(BaseInlineFormSetFactory, self).get_formset_kwargs()kwargs[''] = self.objectreturn kwargs", "docstring": "Returns the keyword arguments for instantiating the formset.", "id": "f12513:c4:m1"} {"signature": "def get_factory_kwargs(self):", "body": "kwargs = super(BaseInlineFormSetFactory, self).get_factory_kwargs()kwargs.setdefault('', self.fields)kwargs.setdefault('', self.exclude)if self.get_form_class():kwargs[''] = self.get_form_class()return kwargs", "docstring": "Returns the keyword arguments for calling the formset factory", "id": "f12513:c4:m2"} {"signature": "def get_formset(self):", "body": "return inlineformset_factory(self.model, self.get_inline_model(), **self.get_factory_kwargs())", "docstring": "Returns the formset class from the inline formset factory", "id": "f12513:c4:m3"} {"signature": "def get(self, request, *args, **kwargs):", "body": "formset = self.construct_formset()return self.render_to_response(self.get_context_data(formset=formset))", "docstring": "Handles GET requests and instantiates a blank version of the formset.", "id": "f12513:c7:m0"} {"signature": "def post(self, request, *args, **kwargs):", "body": "formset = self.construct_formset()if formset.is_valid():return self.formset_valid(formset)else:return self.formset_invalid(formset)", "docstring": "Handles POST requests, instantiating a formset instance with the passed\nPOST variables and then checked for validity.", "id": "f12513:c7:m1"} {"signature": "def daterange(start_date, end_date):", "body": "for n in range(int((end_date - start_date).days + )):yield start_date + datetime.timedelta(n)", "docstring": "Returns an iterator of dates between two provided ones", "id": "f12514:m0"} {"signature": "def get_end_date_field(self):", "body": "return self.end_date_field", "docstring": "Returns the model field to use for end dates", "id": "f12514:c0:m2"} {"signature": "def get_start_date(self, obj):", "body": "obj_date = getattr(obj, self.get_date_field())try:obj_date = obj_date.date()except AttributeError:passreturn obj_date", "docstring": "Returns the start date for a model instance", "id": "f12514:c0:m3"} {"signature": "def get_end_date(self, obj):", "body": "obj_date = getattr(obj, self.get_end_date_field())try:obj_date = obj_date.date()except AttributeError:passreturn obj_date", "docstring": "Returns the end date for a model instance", "id": "f12514:c0:m4"} {"signature": "def get_first_of_week(self):", "body": "if self.first_of_week is None:raise ImproperlyConfigured(\"\" % self.__class__.__name__)if self.first_of_week not in range():raise ImproperlyConfigured(\"\" % self.__class__.__name__)return self.first_of_week", "docstring": "Returns an integer representing the first day of the week.\n\n0 represents Monday, 6 represents Sunday.", "id": "f12514:c0:m5"} {"signature": "def get_queryset(self):", "body": "qs = super(BaseCalendarMonthView, self).get_queryset()year = self.get_year()month = self.get_month()date_field = self.get_date_field()end_date_field = self.get_end_date_field()date = _date_from_string(year, self.get_year_format(),month, self.get_month_format())since = dateuntil = self.get_next_month(date)if since.weekday() != self.get_first_of_week():diff = math.fabs(since.weekday() - self.get_first_of_week())since = since - datetime.timedelta(days=diff)if until.weekday() != ((self.get_first_of_week() + ) % ):diff = math.fabs(((self.get_first_of_week() + ) % ) - until.weekday())until = until + datetime.timedelta(days=diff)if end_date_field:predicate1 = Q(**{'' % date_field: since,end_date_field: None})predicate2 = Q(**{'' % date_field: since,'' % end_date_field: until})predicate3 = Q(**{'' % date_field: since,'' % end_date_field: since,'' % end_date_field: until})predicate4 = Q(**{'' % date_field: since,'' % date_field: until,'' % end_date_field: until})predicate5 = Q(**{'' % date_field: since,'' % end_date_field: until})return qs.filter(predicate1 | predicate2 | predicate3 | predicate4 | predicate5)return qs.filter(**{'' % date_field: since})", "docstring": "Returns a queryset of models for the month requested", "id": "f12514:c0:m6"} {"signature": "def get_context_data(self, **kwargs):", "body": "data = super(BaseCalendarMonthView, self).get_context_data(**kwargs)year = self.get_year()month = self.get_month()date = _date_from_string(year, self.get_year_format(),month, self.get_month_format())cal = Calendar(self.get_first_of_week())month_calendar = []now = datetime.datetime.utcnow()date_lists = defaultdict(list)multidate_objs = []for obj in data['']:obj_date = self.get_start_date(obj)end_date_field = self.get_end_date_field()if end_date_field:end_date = self.get_end_date(obj)if end_date and end_date != obj_date:multidate_objs.append({'': obj,'': [x for x in daterange(obj_date, end_date)]})continue date_lists[obj_date].append(obj)for week in cal.monthdatescalendar(date.year, date.month):week_range = set(daterange(week[], week[]))week_events = []for val in multidate_objs:intersect_length = len(week_range.intersection(val['']))if intersect_length:slot = width = intersect_length nowrap_previous = True nowrap_next = True if val[''][] >= week[]:slot = + (val[''][] - week[]).dayselse:nowrap_previous = Falseif val[''][-] > week[]:nowrap_next = Falseweek_events.append({'': val[''],'': slot,'': width,'': nowrap_previous,'': nowrap_next,})week_calendar = {'': week_events,'': [],}for day in week:week_calendar[''].append({'': day,'': date_lists[day],'': day == now.date(),'': day.month == date.month,})month_calendar.append(week_calendar)data[''] = month_calendardata[''] = [DAYS[x] for x in cal.iterweekdays()]data[''] = datedata[''] = self.get_next_month(date)data[''] = self.get_previous_month(date)return data", "docstring": "Injects variables necessary for rendering the calendar into the context.\n\nVariables added are: `calendar`, `weekdays`, `month`, `next_month` and `previous_month`.", "id": "f12514:c0:m7"} {"signature": "def construct_formset(self):", "body": "formset = super(InlineFormSetFactory, self).construct_formset()formset.model = self.inline_modelreturn formset", "docstring": "Overrides construct_formset to attach the model class as\nan attribute of the returned formset instance.", "id": "f12515:c0:m1"} {"signature": "def get_inlines(self):", "body": "return self.inlines", "docstring": "Returns the inline formset classes", "id": "f12515:c2:m0"} {"signature": "def forms_valid(self, form, inlines):", "body": "response = self.form_valid(form)for formset in inlines:formset.save()return response", "docstring": "If the form and formsets are valid, save the associated models.", "id": "f12515:c2:m1"} {"signature": "def forms_invalid(self, form, inlines):", "body": "return self.render_to_response(self.get_context_data(form=form, inlines=inlines))", "docstring": "If the form or formsets are invalid, re-render the context data with the\ndata-filled form and formsets and errors.", "id": "f12515:c2:m2"} {"signature": "def construct_inlines(self):", "body": "inline_formsets = []for inline_class in self.get_inlines():inline_instance = inline_class(self.model, self.request, self.object, self.kwargs, self)inline_formset = inline_instance.construct_formset()inline_formsets.append(inline_formset)return inline_formsets", "docstring": "Returns the inline formset instances", "id": "f12515:c2:m3"} {"signature": "def get(self, request, *args, **kwargs):", "body": "form_class = self.get_form_class()form = self.get_form(form_class)inlines = self.construct_inlines()return self.render_to_response(self.get_context_data(form=form, inlines=inlines, **kwargs))", "docstring": "Handles GET requests and instantiates a blank version of the form and formsets.", "id": "f12515:c3:m0"} {"signature": "def post(self, request, *args, **kwargs):", "body": "form_class = self.get_form_class()form = self.get_form(form_class)if form.is_valid():self.object = form.save(commit=False)form_validated = Trueelse:form_validated = Falseinlines = self.construct_inlines()if all_valid(inlines) and form_validated:return self.forms_valid(form, inlines)return self.forms_invalid(form, inlines)", "docstring": "Handles POST requests, instantiating a form and formset instances with the passed\nPOST variables and then checked for validity.", "id": "f12515:c3:m1"} {"signature": "def get_inlines_names(self):", "body": "return self.inlines_names", "docstring": "Returns a list of names of context variables for each inline in `inlines`.", "id": "f12515:c8:m0"} {"signature": "def get_context_data(self, **kwargs):", "body": "context = {}inlines_names = self.get_inlines_names()if inlines_names:context.update(zip(inlines_names, kwargs.get('', [])))if '' in kwargs:context[inlines_names[]] = kwargs['']context.update(kwargs)return super(NamedFormsetsMixin, self).get_context_data(**context)", "docstring": "If `inlines_names` has been defined, add each formset to the context under\nits corresponding entry in `inlines_names`", "id": "f12515:c8:m1"} {"signature": "def connect(self):", "body": "return self.response", "docstring": "Connect to http/https server.", "id": "f12528:c0:m1"} {"signature": "def close(self):", "body": "try:self.response.close()self.logger.debug(\"\")except Exception as e:self.unknown(\"\" % e)", "docstring": "Close the http/https connect.", "id": "f12528:c0:m2"} {"signature": "def execute(self, command, timeout=None):", "body": "try:self.channel = self.ssh.get_transport().open_session()except paramiko.SSHException as e:self.unknown(\"\" % e)try:self.channel.settimeout(self.args.timeout if not timeout else timeout)except socket.timeout as e:self.unknown(\"\" % e)try:self.logger.debug(\"\".format(command))self.channel.exec_command(command)except paramiko.SSHException as e:self.unknown(\"\" % e)try:self.stdin = self.channel.makefile('', -)self.stderr = map(string.strip, self.channel.makefile_stderr('', -).readlines())self.stdout = map(string.strip, self.channel.makefile('', -).readlines())except Exception as e:self.unknown(\"\" % e)try:self.status = self.channel.recv_exit_status()except paramiko.SSHException as e:self.unknown(\"\" % e)else:if self.status != :self.unknown(\"\" % (self.status, self.errors))else:return self.stdoutfinally:self.logger.debug(\"\")", "docstring": "Execute a shell command.", "id": "f12530:c0:m1"} {"signature": "def close(self):", "body": "try:self.ssh.close()self.logger.debug(\"\")except paramiko.SSHException as e:self.unknown(\"\" % e)", "docstring": "Close and exit the connection.", "id": "f12530:c0:m2"} {"signature": "def query(self, wql):", "body": "try:self.__wql = ['', '',self.args.domain + '' + self.args.user + '' + self.args.password,'' + self.args.host,'', self.args.namespace,'', self.args.delimiter,wql]self.logger.debug(\"\".format(self.__wql))self.__output = subprocess.check_output(self.__wql)self.logger.debug(\"\".format(self.__output))self.logger.debug(\"\")self.__wmi_output = self.__output.splitlines()[:]self.logger.debug(\"\".format(self.__wmi_output))self.__csv_header = csv.DictReader(self.__wmi_output, delimiter='')self.logger.debug(\"\".format(self.__csv_header))return list(self.__csv_header)except subprocess.CalledProcessError as e:self.unknown(\"\" % e)", "docstring": "Connect by wmi and run wql.", "id": "f12531:c0:m1"} {"signature": "def connect(self):", "body": "return self.ftp", "docstring": "Connect to ftp server.", "id": "f12532:c0:m1"} {"signature": "def quit(self):", "body": "try:self.ftp.quit()self.logger.debug(\"\")except ftplib.Error as e:self.unknown(\"\" % e)", "docstring": "Close and exit the connection.", "id": "f12532:c0:m2"} {"signature": "def close(self):", "body": "try:self.conn.close()self.logger.debug(\"\")except pymysql.Error as e:self.unknown(\"\" % e)", "docstring": "Close the connection.", "id": "f12534:c0:m2"} {"signature": "def close(self):", "body": "try:self.conn.close()self.logger.debug(\"\")except pymssql.Error as e:self.unknown(\"\" % e)", "docstring": "Close the connection.", "id": "f12535:c0:m2"} {"signature": "def read(readme):", "body": "extend = os.path.splitext(readme)[]if (extend == ''):import codecsreturn codecs.open(readme, '', '').read()elif (extend == ''):import pypandocreturn pypandoc.convert(readme, '')", "docstring": "Give reST format README for pypi.", "id": "f12537:m0"} {"signature": "def main():", "body": "plugin = Register()if plugin.args.option == '':plugin.command_handle()else:plugin.unknown(\"\")", "docstring": "Register your own mode and handle method here.", "id": "f12538:m0"} {"signature": "def command_handle(self):", "body": "self.__results = self.execute(self.args.command)self.close()self.logger.debug(\"\".format(self.__results))if not self.__results:self.unknown(\"\".format(self.args.command))if len(self.__results) != :self.unknown(\"\".format(self.args.command))self.__result = int(self.__results[])self.logger.debug(\"\".format(self.__result))if not isinstance(self.__result, (int, long)):self.unknown(\"\".format(self.args.command))status = self.okif self.__result > self.args.warning:status = self.warningif self.__result > self.args.critical:status = self.criticalself.shortoutput = \"\".format(self.args.command, self.__result)[self.longoutput.append(line)for line in self.__results if self.__results]self.perfdata.append(\"\".format(crit=self.args.critical,warn=self.args.warning,result=self.__result,command=self.args.command))status(self.output(long_output_limit=None))self.logger.debug(\"\")", "docstring": "Get the number of the shell command.", "id": "f12538:c0:m2"} {"signature": "def main():", "body": "plugin = Register()if plugin.args.option == '':plugin.sql_handle()else:plugin.unknown(\"\")", "docstring": "Register your own mode and handle method here.", "id": "f12539:m0"} {"signature": "def main():", "body": "plugin = Register()if plugin.args.option == '':plugin.filenumber_handle()elif plugin.args.option == '':plugin.fileage_handle()elif plugin.args.option == '':plugin.sqlserverlocks_handle()else:plugin.unknown(\"\")", "docstring": "Register your own mode and handle method here.", "id": "f12540:m0"} {"signature": "def filenumber_handle(self):", "body": "self.file_list = []self.count = status = self.okif self.args.recursion:self.__result, self.__file_list = self.__get_folder(self.args.path)else:self.__result, self.__file_list = self.__get_file(self.args.path)if self.__result > self.args.critical:status = self.criticalelif self.__result > self.args.warning:status = self.warningelse:status = self.okself.shortoutput = \"\".format(self.__result,self.args.path)self.logger.debug(\"\".format(self.__file_list))[self.longoutput.append(file_data.get(''))for file_data in self.__file_list]self.perfdata.append(\"\".format(crit=self.args.critical,warn=self.args.warning,result=self.__result,path=self.args.path))status(self.output(long_output_limit=None))self.logger.debug(\"\")", "docstring": "Get the number of file in the folder.", "id": "f12540:c0:m4"} {"signature": "def __get_current_datetime(self):", "body": "self.wql_time = \"\"self.current_time = self.query(self.wql_time)self.current_time_string = str(self.current_time[].get('').split('')[])self.current_time_format = datetime.datetime.strptime(self.current_time_string, '')return self.current_time_format", "docstring": "Get current datetime for every file.", "id": "f12540:c1:m4"} {"signature": "def fileage_handle(self):", "body": "self.file_list = []self.ok_file = []self.warn_file = []self.crit_file = []status = self.okif self.args.recursion:self.__file_list = self.__get_folder(self.args.path)else:self.__file_list = self.__get_file(self.args.path)self.logger.debug(\"\".format(self.__file_list))for file_dict in self.__file_list:self.filename = file_dict.get('')if self.filename and self.filename != '':self.logger.debug(\"\".format(self.filename))self.file_datetime_string = file_dict.get('').split('')[]self.file_datetime = datetime.datetime.strptime(self.file_datetime_string, '')self.logger.debug(\"\".format(self.file_datetime))self.current_datetime = self.__get_current_datetime()self.logger.debug(\"\".format(self.current_datetime))self.__delta_datetime = self.current_datetime - self.file_datetimeself.logger.debug(\"\".format(self.__delta_datetime))self.logger.debug(\"\".format(datetime.timedelta(minutes=self.args.warning)))self.logger.debug(\"\".format(datetime.timedelta(minutes=self.args.critical)))if self.__delta_datetime > datetime.timedelta(minutes=self.args.critical):self.crit_file.append(self.filename)elif self.__delta_datetime > datetime.timedelta(minutes=self.args.warning):self.warn_file.append(self.filename)else:self.ok_file.append(self.filename)if self.crit_file:status = self.criticalelif self.warn_file:status = self.warningelse:status = self.okself.shortoutput = \"\".format(len(self.crit_file))if self.crit_file:self.longoutput.append(\"\")[self.longoutput.append(filename)for filename in self.crit_file if self.crit_file]if self.warn_file:self.longoutput.append(\"\")[self.longoutput.append(filename)for filename in self.warn_file if self.warn_file]if self.ok_file:self.longoutput.append(\"\")[self.longoutput.append(filename)for filename in self.ok_file if self.ok_file]self.perfdata.append(\"\".format(crit=self.args.critical,warn=self.args.warning,result=len(self.crit_file),path=self.args.drive + self.args.path))status(self.output(long_output_limit=None))self.logger.debug(\"\")", "docstring": "Get the number of file in the folder.", "id": "f12540:c1:m5"} {"signature": "def main():", "body": "plugin = Register()if plugin.args.option == '':plugin.filenumber_handle()elif plugin.args.option == '':plugin.fileage_handle()elif plugin.args.option == '':plugin.sqlserverlocks_handle()else:plugin.unknown(\"\")", "docstring": "Register your own mode and handle method here.", "id": "f12542:m0"} {"signature": "def filenumber_handle(self):", "body": "self.file_list = []self.count = status = self.okif self.args.recursion:self.__result, self.__file_list = self.__get_folder(self.args.path)else:self.__result, self.__file_list = self.__get_file(self.args.path)if self.__result > self.args.critical:status = self.criticalelif self.__result > self.args.warning:status = self.warningelse:status = self.okself.shortoutput = \"\".format(self.__result,self.args.path)self.logger.debug(\"\".format(self.__file_list))[self.longoutput.append(file_data.get(''))for file_data in self.__file_list]self.perfdata.append(\"\".format(crit=self.args.critical,warn=self.args.warning,result=self.__result,path=self.args.path))status(self.output(long_output_limit=None))self.logger.debug(\"\")", "docstring": "Get the number of file in the folder.", "id": "f12542:c0:m4"} {"signature": "def __get_current_datetime(self):", "body": "self.wql_time = \"\"self.current_time = self.query(self.wql_time)self.current_time_string = str(self.current_time[].get('').split('')[])self.current_time_format = datetime.datetime.strptime(self.current_time_string, '')return self.current_time_format", "docstring": "Get current datetime for every file.", "id": "f12542:c1:m4"} {"signature": "def fileage_handle(self):", "body": "self.file_list = []self.ok_file = []self.warn_file = []self.crit_file = []status = self.okif self.args.recursion:self.__file_list = self.__get_folder(self.args.path)else:self.__file_list = self.__get_file(self.args.path)self.logger.debug(\"\".format(self.__file_list))for file_dict in self.__file_list:self.filename = file_dict.get('')if self.filename and self.filename != '':self.logger.debug(\"\".format(self.filename))self.file_datetime_string = file_dict.get('').split('')[]self.file_datetime = datetime.datetime.strptime(self.file_datetime_string, '')self.logger.debug(\"\".format(self.file_datetime))self.current_datetime = self.__get_current_datetime()self.logger.debug(\"\".format(self.current_datetime))self.__delta_datetime = self.current_datetime - self.file_datetimeself.logger.debug(\"\".format(self.__delta_datetime))self.logger.debug(\"\".format(datetime.timedelta(minutes=self.args.warning)))self.logger.debug(\"\".format(datetime.timedelta(minutes=self.args.critical)))if self.__delta_datetime > datetime.timedelta(minutes=self.args.critical):self.crit_file.append(self.filename)elif self.__delta_datetime > datetime.timedelta(minutes=self.args.warning):self.warn_file.append(self.filename)else:self.ok_file.append(self.filename)if self.crit_file:status = self.criticalelif self.warn_file:status = self.warningelse:status = self.okself.shortoutput = \"\".format(len(self.crit_file))if self.crit_file:self.longoutput.append(\"\")[self.longoutput.append(filename)for filename in self.crit_file if self.crit_file]if self.warn_file:self.longoutput.append(\"\")[self.longoutput.append(filename)for filename in self.warn_file if self.warn_file]if self.ok_file:self.longoutput.append(\"\")[self.longoutput.append(filename)for filename in self.ok_file if self.ok_file]self.perfdata.append(\"\".format(crit=self.args.critical,warn=self.args.warning,result=len(self.crit_file),path=self.args.drive + self.args.path))status(self.output(long_output_limit=None))self.logger.debug(\"\")", "docstring": "Get the number of file in the folder.", "id": "f12542:c1:m5"} {"signature": "def main():", "body": "plugin = Register()if plugin.args.option == '':plugin.filenumber_handle()else:plugin.unknown(\"\")", "docstring": "Register your own mode and handle method here.", "id": "f12543:m0"} {"signature": "def filenumber_handle(self):", "body": "self.__results = []self.__dirs = []self.__files = []self.__ftp = self.connect()self.__ftp.dir(self.args.path, self.__results.append)self.logger.debug(\"\".format(self.__results))self.quit()status = self.okfor data in self.__results:if \"\" in data:self.__dirs.append(str(data.split()[]))else:self.__files.append(str(data.split()[]))self.__result = len(self.__files)self.logger.debug(\"\".format(self.__result))if self.__result > self.args.warning:status = self.warningif self.__result > self.args.critical:status = self.criticalself.shortoutput = \"\".format(self.__result,self.args.path)[self.longoutput.append(line)for line in self.__results if self.__results]self.perfdata.append(\"\".format(crit=self.args.critical,warn=self.args.warning,result=self.__result,path=self.args.path))self.logger.debug(\"\")status(self.output())", "docstring": "Get the number of files in the folder.", "id": "f12543:c0:m2"} {"signature": "def main():", "body": "plugin = Register()if plugin.args.option == '':plugin.sqlserverlocks_handle()else:plugin.unknown(\"\")", "docstring": "Register your own mode and handle method here.", "id": "f12544:m0"} {"signature": "def main():", "body": "plugin = Register()if plugin.args.option == '':plugin.sql_handle()elif plugin.args.option == '':plugin.database_used_handle()elif plugin.args.option == '':plugin.database_log_used_handle()else:plugin.unknown(\"\")", "docstring": "Register your own mode and handle method here.", "id": "f12545:m0"} {"signature": "def combine_filenames(filenames, max_length=):", "body": "path = Nonenames = []extension = Nonetimestamps = []shas = []filenames.sort()concat_names = \"\".join(filenames)if concat_names in COMBINED_FILENAMES_GENERATED:return COMBINED_FILENAMES_GENERATED[concat_names]for filename in filenames:name = os.path.basename(filename)if not extension:extension = os.path.splitext(name)[]elif os.path.splitext(name)[] != extension:raise ValueError(\"\")for base in MEDIA_ROOTS:try:shas.append(md5(os.path.join(base, filename)))breakexcept IOError:passif path is None:path = os.path.dirname(filename)else:if len(os.path.dirname(filename)) < len(path):path = os.path.dirname(filename)m = hashlib.md5()m.update(\"\".join(shas))new_filename = \"\" % m.hexdigest()new_filename = new_filename[:max_length]new_filename += extensionCOMBINED_FILENAMES_GENERATED[concat_names] = new_filenamereturn os.path.join(path, new_filename)", "docstring": "Return a new filename to use as the combined file name for a\n bunch of files, based on the SHA of their contents.\n A precondition is that they all have the same file extension\n\n Given that the list of files can have different paths, we aim to use the\n most common path.\n\n Example:\n /somewhere/else/foo.js\n /somewhere/bar.js\n /somewhere/different/too/foobar.js\n The result will be\n /somewhere/148713695b4a4b9083e506086f061f9c.js\n\n Another thing to note, if the filenames have timestamps in them, combine\n them all and use the highest timestamp.", "id": "f12547:m2"} {"signature": "def apply_orientation(im):", "body": "try:kOrientationEXIFTag = if hasattr(im, ''): e = im._getexif() if e is not None:orientation = e[kOrientationEXIFTag]f = orientation_funcs[orientation]return f(im)except:pass return im", "docstring": "Extract the oritentation EXIF tag from the image, which should be a PIL Image instance,\nand if there is an orientation tag that would rotate the image, apply that rotation to\nthe Image instance given to do an in-place rotation.\n\n:param Image im: Image instance to inspect\n:return: A possibly transposed image instance", "id": "f12547:m14"} {"signature": "@click.group()def cli():", "body": "pass", "docstring": "Ink. Publication made simple.", "id": "f12547:m50"} {"signature": "@cli.command()def write():", "body": "click.echo(\"\")title = click.prompt(\"\")url = slugify(title)url = click.prompt(\"\", default=url)click.echo(\"\" % url)scaffold_piece(title, url)", "docstring": "Start a new piece", "id": "f12547:m54"} {"signature": "@cli.command()def scaffold():", "body": "click.echo(\"\")title = click.prompt(\"\")url = click.prompt(\"\")click.echo(\"\" % url)", "docstring": "Start a new site.", "id": "f12547:m55"} {"signature": "@cli.command()def publish():", "body": "try:build_site(dev_mode=False, clean=True)click.echo('')call(\"\" % (BUILD_DIR, CONFIG[\"\"],), shell=True)if \"\" in CONFIG and \"\" in CONFIG[\"\"] and CONFIG[\"\"][\"\"]:do_purge()except (KeyboardInterrupt, SystemExit):raisesys.exit()", "docstring": "Publish the site", "id": "f12547:m56"} {"signature": "@cli.command()def promote():", "body": "if \"\" not in os.environ:warn(\"\")echo(\"\")echo(\"\")echo(\"\")echo(\"\")echo(\"\")profiles = buffer_get(\"\")for p in profiles:supported_profile = Falseif p[\"\"].lower() == \"\" or p[\"\"].lower() == \"\":facebook_profiles.append(p)supported_profile = Trueelif p[\"\"].lower() == \"\":twitter_profiles.append(p)supported_profile = Trueif supported_profile:click.secho(\"\" % (p[\"\"], p[\"\"]), fg=\"\")echo(\"\")site_json_filename = os.path.join(ROOT_DIR, BUILD_DIR, \"\", \"\")with open(site_json_filename, \"\") as site_json:site = load(site_json)echo('')posts = {}unpublished_posts = []for dirpath, dirnames, filenames in os.walk(os.path.join(ROOT_DIR, \"\"), topdown=False):for filename in filenames:if \"\" in filename:if exists(dirpath, \"\") and exists(dirpath, \"\"):with open(os.path.join(dirpath, \"\")) as f:social = load(f)with open(os.path.join(dirpath, \"\")) as f:meta = load(f)if \"\" in meta:site_json_entry = Nonefor sp in site[\"\"]:if meta[\"\"] == sp[\"\"]:site_json_entry = spbreakposts[meta[\"\"]] = {\"\": meta,\"\": social,\"\": dirpath,\"\": site_json_entry,}if \"\" not in social or social[\"\"] is not True:unpublished_posts.append(meta[\"\"])else:warn(\"\" % dirpath.replace(ROOT_DIR))automark_set = Falseautomark = Nonefor u in unpublished_posts:post = posts[u]if \"\" in post[\"\"] and post[\"\"][\"\"] and len(post[\"\"][\"\"]) > :facebook_posts = []twitter_posts = []mark_as_published = Falsehas_valid_post = Falsefor p in post[\"\"][\"\"]:try:if len(list(p.keys())) != :error(\"\" % u)breakif list(p.keys())[] == \"\":facebook_posts.append(p[\"\"])if post_in_future(p[\"\"], post):has_valid_post = Trueelif list(p.keys())[] == \"\":if post_in_future(p[\"\"], post):has_valid_post = Truetwitter_posts.append(p[\"\"])else:warn(\"\" % list(p.keys())[])except:error(\"\" % post[\"\"][\"\"])import tracebacktraceback.print_exc()if not has_valid_post:if automark:mark_as_published = Trueelse:warn('' % post[\"\"][\"\"])if click.confirm(\"\"):mark_as_published = Trueif not automark_set:if click.confirm(\"\"):automark = Trueautomark_set = Trueelse:echo('' % post[\"\"][\"\"])if len(facebook_posts) > :echo(\"\")for p in facebook_posts:if (len(p[\"\"]) > ):truncated_content = \"\" % p[\"\"][:]else:truncated_content = p[\"\"]if post_in_future(p, post):echo(\"\" % (publish_datetime(p, post).strftime(\"\"),truncated_content,))else:warn(\"\" % (publish_datetime(p, post).strftime(\"\"),truncated_content,))echo(\"\")if len(twitter_posts) > :for p in twitter_posts:if (len(p[\"\"]) > ):truncated_content = \"\" % p[\"\"][:]else:truncated_content = p[\"\"]if post_in_future(p, post):echo(\"\" % (publish_datetime(p, post).strftime(\"\"),truncated_content,))else:warn(\"\" % (publish_datetime(p, post).strftime(\"\"),truncated_content,))if click.confirm(click.style(\"\", fg=\"\")):mark_as_published = Trueecho(\"\")for p in facebook_posts:if post_in_future(p, post):publish_facebook(p, post)if (len(p[\"\"]) > ):truncated_content = \"\" % p[\"\"][:]else:truncated_content = p[\"\"]click.secho(\"\" % (publish_datetime(p, post).strftime(\"\"),truncated_content,), fg=\"\")for p in twitter_posts:if post_in_future(p, post):publish_twitter(p, post)if (len(p[\"\"]) > ):truncated_content = \"\" % p[\"\"][:]else:truncated_content = p[\"\"]click.secho(\"\" % (publish_datetime(p, post).strftime(\"\"),truncated_content,), fg=\"\")echo(\"\")if mark_as_published or automark:post[\"\"][\"\"] = Truewith open(os.path.join(post[\"\"], \"\"), \"\") as f:dump(post[\"\"], f, default_flow_style=False, width=)if click.confirm(\"\"):print (\"\")", "docstring": "Schedule all the social media posts.", "id": "f12547:m57"} {"signature": "@cli.command()def list():", "body": "click.echo('')", "docstring": "List all posts", "id": "f12547:m58"} {"signature": "@cli.command()def serve():", "body": "build_site(dev_mode=True)serve_site()", "docstring": "Serve the site for local testing and editing.", "id": "f12547:m59"} {"signature": "@contextlib.contextmanagerdef replaced_directory(dirname):", "body": "if dirname[-] == '':dirname = dirname[:-]full_path = os.path.abspath(dirname)if not os.path.isdir(full_path):raise AttributeError('')base, name = os.path.split(full_path)tempdir = tempfile.mkdtemp()shutil.move(full_path, tempdir)os.mkdir(full_path)try:yield tempdirfinally:shutil.rmtree(full_path)moved = os.path.join(tempdir, name)shutil.move(moved, base)shutil.rmtree(tempdir)", "docstring": "This ``Context Manager`` is used to move the contents of a directory\n elsewhere temporarily and put them back upon exit. This allows testing\n code to use the same file directories as normal code without fear of\n damage.\n\n The name of the temporary directory which contains your files is yielded.\n\n :param dirname:\n Path name of the directory to be replaced.\n\n\n Example:\n\n .. code-block:: python\n\n with replaced_directory('/foo/bar/') as rd:\n # \"/foo/bar/\" has been moved & renamed\n with open('/foo/bar/thing.txt', 'w') as f:\n f.write('stuff')\n f.close()\n\n\n # got here? => \"/foo/bar/ is now restored and temp has been wiped, \n # \"thing.txt\" is gone", "id": "f12549:m3"} {"signature": "@contextlib.contextmanagerdef capture_stdout():", "body": "stdout = sys.stdouttry:capture_out = StringIO()sys.stdout = capture_outyield capture_outfinally:sys.stdout = stdout", "docstring": "This ``Context Manager`` redirects STDOUT to a ``StringIO`` objects\n which is returned from the ``Context``. On exit STDOUT is restored.\n\n Example:\n\n .. code-block:: python\n\n with capture_stdout() as capture:\n print('foo')\n\n # got here? => capture.getvalue() will now have \"foo\\\\n\"", "id": "f12549:m4"} {"signature": "@contextlib.contextmanagerdef capture_stderr():", "body": "stderr = sys.stderrtry:capture_out = StringIO()sys.stderr = capture_outyield capture_outfinally:sys.stderr = stderr", "docstring": "This ``Context Manager`` redirects STDERR to a ``StringIO`` objects\n which is returned from the ``Context``. On exit STDERR is restored.\n\n Example:\n\n .. code-block:: python\n\n with capture_stderr() as capture:\n print('foo')\n\n # got here? => capture.getvalue() will now have \"foo\\\\n\"", "id": "f12549:m5"} {"signature": "def pprint(data):", "body": "print(json.dumps(data, sort_keys=True, indent=, separators=('', '')))", "docstring": "Alternative to `pprint.PrettyPrinter()` that uses `json.dumps()` for\n sorting and displaying data. \n\n :param data: item to print to STDOUT. The item must be json serializable!", "id": "f12549:m6"} {"signature": "def time_zone_by_country_and_region(country_code, region_code=None):", "body": "timezone = country_dict.get(country_code)if not timezone:return Noneif isinstance(timezone, str):return timezonereturn timezone.get(region_code)", "docstring": "Returns time zone from country and region code.\n\n:arg country_code: Country code\n:arg region_code: Region code", "id": "f12577:m0"} {"signature": "def ip2long(ip):", "body": "try:return int(binascii.hexlify(socket.inet_aton(ip)), )except socket.error:return int(binascii.hexlify(socket.inet_pton(socket.AF_INET6, ip)), )", "docstring": "Wrapper function for IPv4 and IPv6 converters.\n\n:arg ip: IPv4 or IPv6 address", "id": "f12578:m0"} {"signature": "def str2fp(data):", "body": "return BytesIO(bytearray(data, const.ENCODING)) if const.PY3 else StringIO(data)", "docstring": "Convert bytes data to file handle object (StringIO or BytesIO).\n\n:arg data: String data to transform", "id": "f12578:m1"} {"signature": "def __call__(cls, *args, **kwargs):", "body": "if len(args) > :filename = args[]elif '' in kwargs:filename = kwargs['']else:return Noneif not kwargs.get('', True):return super(_GeoIPMetaclass, cls).__call__(*args, **kwargs)try:cls._instance_lock.acquire()if filename not in cls._instances:cls._instances[filename] = super(_GeoIPMetaclass, cls).__call__(*args, **kwargs)finally:cls._instance_lock.release()return cls._instances[filename]", "docstring": "Singleton method to gets an instance without reparsing\nthe database, the filename is being used as cache key.", "id": "f12580:c1:m0"} {"signature": "def __init__(self, filename, flags=STANDARD, cache=True):", "body": "self._lock = Lock()self._flags = flagsself._netmask = Noneif self._flags & const.MMAP_CACHE and mmap is None: import warningswarnings.warn(\"\")self._flags &= ~const.MMAP_CACHEif self._flags & const.MMAP_CACHE:f = codecs.open(filename, '', ENCODING)access = mmap.ACCESS_READself._fp = mmap.mmap(f.fileno(), , access=access)self._type = ''f.close()elif self._flags & const.MEMORY_CACHE:f = codecs.open(filename, '', ENCODING)self._memory = f.read()self._fp = util.str2fp(self._memory)self._type = ''f.close()else:self._fp = codecs.open(filename, '', ENCODING)self._type = ''try:self._lock.acquire()self._setup_segments()finally:self._lock.release()", "docstring": "Create and return an GeoIP instance.\n\n:arg filename: File path to a GeoIP database\n:arg flags: Flags that affect how the database is processed.\n Currently supported flags are STANDARD (default),\n MEMORY_CACHE (preload the whole file into memory) and\n MMAP_CACHE (access the file via mmap)\n:arg cache: Used in tests to skip instance caching", "id": "f12580:c2:m0"} {"signature": "def _setup_segments(self):", "body": "self._databaseType = const.COUNTRY_EDITIONself._recordLength = const.STANDARD_RECORD_LENGTHself._databaseSegments = const.COUNTRY_BEGINfilepos = self._fp.tell()self._fp.seek(-, os.SEEK_END)for i in range(const.STRUCTURE_INFO_MAX_SIZE):chars = chr() * delim = self._fp.read()if PY3 and type(delim) is bytes:delim = delim.decode(ENCODING)if PY2:chars = chars.decode(ENCODING)if type(delim) is str:delim = delim.decode(ENCODING)if delim == chars:byte = self._fp.read()self._databaseType = ord(byte)if self._databaseType >= :self._databaseType -= if self._databaseType == const.REGION_EDITION_REV0:self._databaseSegments = const.STATE_BEGIN_REV0elif self._databaseType == const.REGION_EDITION_REV1:self._databaseSegments = const.STATE_BEGIN_REV1elif self._databaseType in (const.CITY_EDITION_REV0,const.CITY_EDITION_REV1,const.CITY_EDITION_REV1_V6,const.ORG_EDITION,const.ISP_EDITION,const.NETSPEED_EDITION_REV1,const.NETSPEED_EDITION_REV1_V6,const.ASNUM_EDITION,const.ASNUM_EDITION_V6):self._databaseSegments = buf = self._fp.read(const.SEGMENT_RECORD_LENGTH)if PY3 and type(buf) is bytes:buf = buf.decode(ENCODING)for j in range(const.SEGMENT_RECORD_LENGTH):self._databaseSegments += (ord(buf[j]) << (j * ))LONG_RECORDS = (const.ORG_EDITION, const.ISP_EDITION)if self._databaseType in LONG_RECORDS:self._recordLength = const.ORG_RECORD_LENGTHbreakelse:self._fp.seek(-, os.SEEK_CUR)self._fp.seek(filepos, os.SEEK_SET)", "docstring": "Parses the database file to determine what kind of database is\nbeing used and setup segment sizes and start points that will\nbe used by the seek*() methods later.", "id": "f12580:c2:m1"} {"signature": "def _seek_country(self, ipnum):", "body": "try:offset = seek_depth = if len(str(ipnum)) > else for depth in range(seek_depth, -, -):if self._flags & const.MEMORY_CACHE:startIndex = * self._recordLength * offsetendIndex = startIndex + ( * self._recordLength)buf = self._memory[startIndex:endIndex]else:startIndex = * self._recordLength * offsetreadLength = * self._recordLengthtry:self._lock.acquire()self._fp.seek(startIndex, os.SEEK_SET)buf = self._fp.read(readLength)finally:self._lock.release()if PY3 and type(buf) is bytes:buf = buf.decode(ENCODING)x = [, ]for i in range():for j in range(self._recordLength):byte = buf[self._recordLength * i + j]x[i] += ord(byte) << (j * )if ipnum & ( << depth):if x[] >= self._databaseSegments:self._netmask = seek_depth - depth + return x[]offset = x[]else:if x[] >= self._databaseSegments:self._netmask = seek_depth - depth + return x[]offset = x[]except (IndexError, UnicodeDecodeError):passraise GeoIPError('')", "docstring": "Using the record length and appropriate start points, seek to the\ncountry that corresponds to the converted IP address integer.\nReturn offset of record.\n\n:arg ipnum: Result of ip2long conversion", "id": "f12580:c2:m2"} {"signature": "def _get_org(self, ipnum):", "body": "seek_org = self._seek_country(ipnum)if seek_org == self._databaseSegments:return Noneread_length = ( * self._recordLength - ) * self._databaseSegmentstry:self._lock.acquire()self._fp.seek(seek_org + read_length, os.SEEK_SET)buf = self._fp.read(const.MAX_ORG_RECORD_LENGTH)finally:self._lock.release()if PY3 and type(buf) is bytes:buf = buf.decode(ENCODING)return buf[:buf.index(chr())]", "docstring": "Seek and return organization or ISP name for ipnum.\nReturn org/isp name.\n\n:arg ipnum: Result of ip2long conversion", "id": "f12580:c2:m3"} {"signature": "def _get_region(self, ipnum):", "body": "region_code = Nonecountry_code = Noneseek_country = self._seek_country(ipnum)def get_region_code(offset):region1 = chr(offset // + )region2 = chr(offset % + )return ''.join([region1, region2])if self._databaseType == const.REGION_EDITION_REV0:seek_region = seek_country - const.STATE_BEGIN_REV0if seek_region >= :country_code = ''region_code = get_region_code(seek_region - )else:country_code = const.COUNTRY_CODES[seek_region]elif self._databaseType == const.REGION_EDITION_REV1:seek_region = seek_country - const.STATE_BEGIN_REV1if seek_region < const.US_OFFSET:passelif seek_region < const.CANADA_OFFSET:country_code = ''region_code = get_region_code(seek_region - const.US_OFFSET)elif seek_region < const.WORLD_OFFSET:country_code = ''region_code = get_region_code(seek_region - const.CANADA_OFFSET)else:index = (seek_region - const.WORLD_OFFSET) // const.FIPS_RANGEif index < len(const.COUNTRY_CODES):country_code = const.COUNTRY_CODES[index]elif self._databaseType in const.CITY_EDITIONS:rec = self._get_record(ipnum)region_code = rec.get('')country_code = rec.get('')return {'': country_code, '': region_code}", "docstring": "Seek and return the region information.\nReturns dict containing country_code and region_code.\n\n:arg ipnum: Result of ip2long conversion", "id": "f12580:c2:m4"} {"signature": "def _get_record(self, ipnum):", "body": "seek_country = self._seek_country(ipnum)if seek_country == self._databaseSegments:return {}read_length = ( * self._recordLength - ) * self._databaseSegmentstry:self._lock.acquire()self._fp.seek(seek_country + read_length, os.SEEK_SET)buf = self._fp.read(const.FULL_RECORD_LENGTH)finally:self._lock.release()if PY3 and type(buf) is bytes:buf = buf.decode(ENCODING)record = {'': ,'': ,'': None,'': None}latitude = longitude = char = ord(buf[])record[''] = const.COUNTRY_CODES[char]record[''] = const.COUNTRY_CODES3[char]record[''] = const.COUNTRY_NAMES[char]record[''] = const.CONTINENT_NAMES[char]def read_data(buf, pos):cur = poswhile buf[cur] != '':cur += return cur, buf[pos:cur] if cur > pos else Noneoffset, record[''] = read_data(buf, )offset, record[''] = read_data(buf, offset + )offset, record[''] = read_data(buf, offset + )offset = offset + for j in range():latitude += (ord(buf[offset + j]) << (j * ))for j in range():longitude += (ord(buf[offset + j + ]) << (j * ))record[''] = (latitude / ) - record[''] = (longitude / ) - if self._databaseType in (const.CITY_EDITION_REV1, const.CITY_EDITION_REV1_V6):if record[''] == '':dma_area = for j in range():dma_area += ord(buf[offset + j + ]) << (j * )record[''] = int(floor(dma_area / ))record[''] = dma_area % record[''] = const.DMA_MAP.get(record[''])params = (record[''], record[''])record[''] = time_zone_by_country_and_region(*params)return record", "docstring": "Populate location dict for converted IP.\nReturns dict with numerous location properties.\n\n:arg ipnum: Result of ip2long conversion", "id": "f12580:c2:m5"} {"signature": "def _gethostbyname(self, hostname):", "body": "if self._databaseType in const.IPV6_EDITIONS:response = socket.getaddrinfo(hostname, , socket.AF_INET6)family, socktype, proto, canonname, sockaddr = response[]address, port, flow, scope = sockaddrreturn addresselse:return socket.gethostbyname(hostname)", "docstring": "Hostname lookup method, supports both IPv4 and IPv6.", "id": "f12580:c2:m6"} {"signature": "def id_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)return self.id_by_addr(addr)", "docstring": "Returns the database ID for specified hostname.\nThe id might be useful as array index. 0 is unknown.\n\n:arg hostname: Hostname to get ID from.", "id": "f12580:c2:m7"} {"signature": "def id_by_addr(self, addr):", "body": "if self._databaseType in (const.PROXY_EDITION, const.NETSPEED_EDITION_REV1, const.NETSPEED_EDITION_REV1_V6):raise GeoIPError('')ipv = if addr.find('') >= else if ipv == and self._databaseType not in (const.COUNTRY_EDITION, const.NETSPEED_EDITION):raise GeoIPError('')if ipv == and self._databaseType != const.COUNTRY_EDITION_V6:raise GeoIPError('')ipnum = util.ip2long(addr)return self._seek_country(ipnum) - const.COUNTRY_BEGIN", "docstring": "Returns the database ID for specified address.\nThe ID might be useful as array index. 0 is unknown.\n\n:arg addr: IPv4 or IPv6 address (eg. 203.0.113.30)", "id": "f12580:c2:m8"} {"signature": "def last_netmask(self):", "body": "return self._netmask", "docstring": "Returns the netmask depth of the last lookup.", "id": "f12580:c2:m9"} {"signature": "def country_code_by_addr(self, addr):", "body": "VALID_EDITIONS = (const.COUNTRY_EDITION, const.COUNTRY_EDITION_V6)if self._databaseType in VALID_EDITIONS:country_id = self.id_by_addr(addr)return const.COUNTRY_CODES[country_id]elif self._databaseType in const.REGION_CITY_EDITIONS:return self.region_by_addr(addr).get('')raise GeoIPError('')", "docstring": "Returns 2-letter country code (e.g. US) from IP address.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m10"} {"signature": "def country_code_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)return self.country_code_by_addr(addr)", "docstring": "Returns 2-letter country code (e.g. US) from hostname.\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m11"} {"signature": "def netspeed_by_addr(self, addr):", "body": "if self._databaseType == const.NETSPEED_EDITION:return const.NETSPEED_NAMES[self.id_by_addr(addr)]elif self._databaseType in (const.NETSPEED_EDITION_REV1,const.NETSPEED_EDITION_REV1_V6):ipnum = util.ip2long(addr)return self._get_org(ipnum)raise GeoIPError('')", "docstring": "Returns NetSpeed name from address.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m12"} {"signature": "def netspeed_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)return self.netspeed_by_addr(addr)", "docstring": "Returns NetSpeed name from hostname. Can be Unknown, Dial-up,\nCable, or Corporate.\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m13"} {"signature": "def country_name_by_addr(self, addr):", "body": "VALID_EDITIONS = (const.COUNTRY_EDITION, const.COUNTRY_EDITION_V6)if self._databaseType in VALID_EDITIONS:country_id = self.id_by_addr(addr)return const.COUNTRY_NAMES[country_id]elif self._databaseType in const.CITY_EDITIONS:return self.record_by_addr(addr).get('')else:message = ''raise GeoIPError(message)", "docstring": "Returns full country name for specified IP address.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m14"} {"signature": "def country_name_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)return self.country_name_by_addr(addr)", "docstring": "Returns full country name for specified hostname.\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m15"} {"signature": "def org_by_addr(self, addr):", "body": "valid = (const.ORG_EDITION, const.ISP_EDITION,const.ASNUM_EDITION, const.ASNUM_EDITION_V6)if self._databaseType not in valid:message = ''raise GeoIPError(message)ipnum = util.ip2long(addr)return self._get_org(ipnum)", "docstring": "Returns Organization, ISP, or ASNum name for given IP address.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m16"} {"signature": "def org_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)return self.org_by_addr(addr)", "docstring": "Returns Organization, ISP, or ASNum name for given hostname.\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m17"} {"signature": "def record_by_addr(self, addr):", "body": "if self._databaseType not in const.CITY_EDITIONS:message = ''raise GeoIPError(message)ipnum = util.ip2long(addr)rec = self._get_record(ipnum)if not rec:return Nonereturn rec", "docstring": "Returns dictionary with city data containing `country_code`, `country_name`,\n`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,\n`metro_code`, `area_code`, `region_code` and `time_zone`.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m18"} {"signature": "def record_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)return self.record_by_addr(addr)", "docstring": "Returns dictionary with city data containing `country_code`, `country_name`,\n`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,\n`metro_code`, `area_code`, `region_code` and `time_zone`.\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m19"} {"signature": "def region_by_addr(self, addr):", "body": "if self._databaseType not in const.REGION_CITY_EDITIONS:message = ''raise GeoIPError(message)ipnum = util.ip2long(addr)return self._get_region(ipnum)", "docstring": "Returns dictionary containing `country_code` and `region_code`.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m20"} {"signature": "def region_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)return self.region_by_addr(addr)", "docstring": "Returns dictionary containing `country_code` and `region_code`.\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m21"} {"signature": "def time_zone_by_addr(self, addr):", "body": "if self._databaseType not in const.CITY_EDITIONS:message = ''raise GeoIPError(message)ipnum = util.ip2long(addr)return self._get_record(ipnum).get('')", "docstring": "Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m22"} {"signature": "def time_zone_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)return self.time_zone_by_addr(addr)", "docstring": "Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m23"} {"signature": "def isAlphanum(c):", "body": "return ((c >= '' and c <= '') or (c >= '' and c <= '') or(c >= '' and c <= '') or c == '' or c == '' or c == '' or (c is not None and ord(c) > ))", "docstring": "return true if the character is a letter, digit, underscore,\n dollar sign, or non-ASCII character.", "id": "f12583:m1"} {"signature": "def _get(self):", "body": "c = self.theLookaheadself.theLookahead = Noneif c == None:c = self.instream.read()if c >= '' or c == '':return cif c == '': return ''if c == '':return ''return ''", "docstring": "return the next character from stdin. Watch out for lookahead. If\n the character is a control character, translate it to a space or\n linefeed.", "id": "f12583:c3:m2"} {"signature": "def _next(self):", "body": "c = self._get()if c == '' and self.theA != '':p = self._peek()if p == '':c = self._get()while c > '':c = self._get()return cif p == '':c = self._get()while :c = self._get()if c == '':if self._peek() == '':self._get()return ''if c == '':raise UnterminatedComment()return c", "docstring": "get the next character, excluding comments. peek() is used to see\n if an unescaped '/' is followed by a '/' or '*'.", "id": "f12583:c3:m4"} {"signature": "def _action(self, action):", "body": "if action <= :self._outA()if action <= :self.theA = self.theBif self.theA == \"\" or self.theA == '':while :self._outA()self.theA = self._get()if self.theA == self.theB:breakif self.theA <= '':raise UnterminatedStringLiteral()if self.theA == '':self._outA()self.theA = self._get()if action <= :self.theB = self._next()if self.theB == '' and (self.theA == '' or self.theA == '' orself.theA == '' or self.theA == '' orself.theA == '' or self.theA == '' orself.theA == '' or self.theA == '' orself.theA == '' or self.theA == '' orself.theA == '' or self.theA == '' orself.theA == ''):self._outA()self._outB()while :self.theA = self._get()if self.theA == '':breakelif self.theA == '':self._outA()self.theA = self._get()elif self.theA <= '':raise UnterminatedRegularExpression()self._outA()self.theB = self._next()", "docstring": "do something! What you do is determined by the argument:\n 1 Output A. Copy B to A. Get the next B.\n 2 Copy B to A. Get the next B. (Delete A).\n 3 Get the next B. (Delete B).\n action treats a string as a single character. Wow!\n action recognizes a regular expression if it is preceded by ( or , or =.", "id": "f12583:c3:m5"} {"signature": "def _jsmin(self):", "body": "self.theA = ''self._action()while self.theA != '':if self.theA == '':if isAlphanum(self.theB):self._action()else:self._action()elif self.theA == '':if self.theB in ['', '', '', '', '']:self._action()elif self.theB == '':self._action()else:if isAlphanum(self.theB):self._action()else:self._action()else:if self.theB == '':if isAlphanum(self.theA):self._action()else:self._action()elif self.theB == '':if self.theA in ['', '', '', '', '', '', '']:self._action()else:if isAlphanum(self.theA):self._action()else:self._action()else:self._action()", "docstring": "Copy the input to the output, deleting the characters which are\n insignificant to JavaScript. Comments will be removed. Tabs will be\n replaced with spaces. Carriage returns will be replaced with linefeeds.\n Most spaces and linefeeds will be removed.", "id": "f12583:c3:m6"} {"signature": "def minimalize(css, level=NORMAL):", "body": "return CssMinifier(level).minify(css)", "docstring": "Compress css using level method and return new css as a string.", "id": "f12584:m0"} {"signature": "def minify(self, css):", "body": "css = css.replace(\"\", \"\") for rule in _REPLACERS[self.level]:css = re.compile(rule[], re.MULTILINE|re.UNICODE|re.DOTALL).sub(rule[], css)return css", "docstring": "Tries to minimize the length of CSS code passed as parameter. Returns string.", "id": "f12584:c0:m1"} {"signature": "def _get_search_path(main_file_dir, sys_path):", "body": "paths = []for pth in sys_path:pth = path.abspath(pth)if (pth != main_file_dirand pth == path.commonprefix((pth, main_file_dir))):paths.append(pth)if paths:paths.sort()return paths[-]", "docstring": "Find the parent python path that contains the __main__'s file directory\n\n:param main_file_dir: __main__'s file directory\n:param sys_path: paths list to match directory against (like sys.path)", "id": "f12604:m0"} {"signature": "def _print_exc(e):", "body": "msg = ('''')_log_error(msg % e)", "docstring": "Log exception as error.\n:param e: exception to be logged.", "id": "f12604:m1"} {"signature": "def _try_search_paths(main_globals):", "body": "fl = main_globals['']search_path = Noneif not path.isabs(fl) and os.getenv(''):cwd_fl = path.abspath(path.join(os.getenv(''), fl))main_dir = path.dirname(cwd_fl)search_path = _get_search_path(main_dir, sys.path)if not search_path:main_dir = path.dirname(path.abspath(fl))search_path = _get_search_path(main_dir, sys.path)if not search_path:main_dir = path.dirname(path.realpath(fl))sys_path = [path.realpath(p) for p in sys.path]search_path = _get_search_path(main_dir, sys_path)return main_dir, search_path", "docstring": "Try different strategies to found the path containing the __main__'s file.\nWill try strategies, in the following order:\n 1. Building file's path with PWD env var.\n 2. Building file's path from absolute file's path.\n 3. Buidling file's path from real file's path.\n\n:param main_globals: globals dictionary in __main__", "id": "f12604:m2"} {"signature": "def _solve_pkg(main_globals):", "body": "main_dir, search_path = _try_search_paths(main_globals)if not search_path:_log_debug('' % main_dir)returnpkg_str = path.relpath(main_dir, search_path).replace(path.sep, '')site_pkgs = ''if pkg_str.startswith(site_pkgs):pkg_str = pkg_str[len(site_pkgs):]assert pkg_str_log_debug('' % pkg_str)try:if '' in main_globals['']:_log_debug('')sys.modules[pkg_str] = sys.modules['']sys.modules[pkg_str].__path__ = [main_dir]parent_pkg_str = ''.join(pkg_str.split('')[:-])if parent_pkg_str:importlib.import_module(parent_pkg_str)else:_log_debug('' % pkg_str)importlib.import_module(pkg_str)main_globals[''] = pkg_strreturn pkg_strexcept ImportError as e:_print_exc(e)", "docstring": "Find parent python path of __main__. From there solve the package\ncontaining __main__, import it and set __package__ variable.\n\n:param main_globals: globals dictionary in __main__", "id": "f12604:m3"} {"signature": "def _log(msg):", "body": "sys.stderr.write(msg + '')sys.stderr.flush()", "docstring": "Central log function (all levels)\n:param msg: message to log", "id": "f12604:m4"} {"signature": "def _log_debug(msg):", "body": "if _log_level <= DEBUG:if _log_level == TRACE:traceback.print_stack()_log(msg)", "docstring": "Log at debug level\n:param msg: message to log", "id": "f12604:m5"} {"signature": "def _log_error(msg):", "body": "if _log_level <= ERROR:_log(msg)", "docstring": "Log at error level\n:param msg: message to log", "id": "f12604:m6"} {"signature": "def init(log_level=ERROR):", "body": "global _initializedif _initialized:returnelse:_initialized = Trueframe = currentframe()frame = frame.f_back_init(frame, log_level)", "docstring": "Enables explicit relative import in sub-modules when ran as __main__\n:param log_level: module's inner logger level (equivalent to logging pkg)", "id": "f12604:m7"} {"signature": "def _init(frame, log_level=ERROR):", "body": "global _log_level_log_level = log_levelmain_globals = frame.f_globalspkg = main_globals.get('')file_ = main_globals.get('')if pkg or not file_:_log_debug('''' % (pkg, file_))returntry:_solve_pkg(main_globals)except Exception as e:_print_exc(e)", "docstring": "Enables explicit relative import in sub-modules when ran as __main__\n:param log_level: module's inner logger level (equivalent to logging pkg)", "id": "f12604:m9"} {"signature": "def __init__(self, access_token=DEMO_TOKEN, timeout=DEFAULT_TIMEOUT, websession=None):", "body": "if websession is None:async def _create_session():return aiohttp.ClientSession()loop = asyncio.get_event_loop()self.websession = loop.run_until_complete(_create_session())else:self.websession = websessionself._timeout = timeoutself._access_token = access_tokenself._name = Noneself._home_ids = []self._all_home_ids = []self._homes = {}self.sub_manager = None", "docstring": "Initialize the Tibber connection.", "id": "f12605:c0:m0"} {"signature": "async def close_connection(self):", "body": "await self.websession.close()", "docstring": "Close the Tibber connection.", "id": "f12605:c0:m1"} {"signature": "def sync_close_connection(self):", "body": "loop = asyncio.get_event_loop()task = loop.create_task(self.close_connection())loop.run_until_complete(task)", "docstring": "Close the Tibber connection.", "id": "f12605:c0:m2"} {"signature": "async def rt_connect(self, loop):", "body": "if self.sub_manager is not None:returnself.sub_manager = SubscriptionManager(loop, \"\".format(self._access_token), SUB_ENDPOINT)self.sub_manager.start()", "docstring": "Start subscription manager for real time data.", "id": "f12605:c0:m3"} {"signature": "async def rt_disconnect(self):", "body": "if self.sub_manager is None:returnawait self.sub_manager.stop()", "docstring": "Stop subscription manager.", "id": "f12605:c0:m4"} {"signature": "async def execute(self, document, variable_values=None):", "body": "res = await self._execute(document, variable_values)if res is None:return Nonereturn res.get(\"\")", "docstring": "Execute gql.", "id": "f12605:c0:m5"} {"signature": "async def _execute(self, document, variable_values=None, retry=):", "body": "query_str = print_ast(document)payload = {\"\": query_str, \"\": variable_values or {}}post_args = {\"\": {\"\": \"\" + self._access_token},\"\": payload,}try:with async_timeout.timeout(self._timeout):resp = await self.websession.post(API_ENDPOINT, **post_args)if resp.status != :_LOGGER.error(\"\", resp.status)return Noneresult = await resp.json()except aiohttp.ClientError as err:_LOGGER.error(\"\", err, exc_info=True)if retry > :return await self._execute(document, variable_values, retry - )raiseexcept asyncio.TimeoutError as err:_LOGGER.error(\"\", err, exc_info=True)if retry > :return await self._execute(document, variable_values, retry - )raiseerrors = result.get(\"\")if errors:_LOGGER.error(\"\", errors)return result", "docstring": "Execute gql.", "id": "f12605:c0:m6"} {"signature": "def sync_update_info(self, *_):", "body": "loop = asyncio.get_event_loop()task = loop.create_task(self.update_info())loop.run_until_complete(task)", "docstring": "Update home info.", "id": "f12605:c0:m7"} {"signature": "async def update_info(self, *_):", "body": "query = gql(\"\"\"\"\"\")res = await self._execute(query)if res is None:returnerrors = res.get(\"\", [])if errors:msg = errors[].get(\"\", \"\")_LOGGER.error(msg)raise InvalidLogin(msg)data = res.get(\"\")if not data:returnviewer = data.get(\"\")if not viewer:returnself._name = viewer.get(\"\")homes = viewer.get(\"\", [])self._home_ids = []for _home in homes:home_id = _home.get(\"\")self._all_home_ids += [home_id]subs = _home.get(\"\")if subs:status = subs[].get(\"\", \"\").lower()if not home_id or status != \"\":continueself._home_ids += [home_id]", "docstring": "Update home info async.", "id": "f12605:c0:m8"} {"signature": "@propertydef name(self):", "body": "return self._name", "docstring": "Return name of user.", "id": "f12605:c0:m9"} {"signature": "@propertydef home_ids(self):", "body": "return self.get_home_ids(only_active=True)", "docstring": "Return list of home ids.", "id": "f12605:c0:m10"} {"signature": "def get_home_ids(self, only_active=True):", "body": "if only_active:return self._home_idsreturn self._all_home_ids", "docstring": "Return list of home ids.", "id": "f12605:c0:m11"} {"signature": "def get_homes(self, only_active=True):", "body": "return [self.get_home(home_id) for home_id in self.get_home_ids(only_active)]", "docstring": "Return list of Tibber homes.", "id": "f12605:c0:m12"} {"signature": "def get_home(self, home_id):", "body": "if home_id not in self._all_home_ids:_LOGGER.error(\"\", home_id)return Noneif home_id not in self._homes.keys():self._homes[home_id] = TibberHome(home_id, self)return self._homes[home_id]", "docstring": "Retun an instance of TibberHome for given home id.", "id": "f12605:c0:m13"} {"signature": "async def send_notification(self, title, message):", "body": "query = gql(\"\"\"\"\"\"% (title, message))res = await self.execute(query)if not res:return Falsenoti = res.get(\"\", {})successful = noti.get(\"\", False)pushed_to_number_of_devices = noti.get(\"\", )_LOGGER.debug(\"\",successful,pushed_to_number_of_devices,)return successful", "docstring": "Send notification.", "id": "f12605:c0:m14"} {"signature": "def __init__(self, home_id, tibber_control):", "body": "self._tibber_control = tibber_controlself._home_id = home_idself._current_price_total = Noneself._current_price_info = {}self._price_info = {}self._level_info = {}self.sub_manager = Noneself.info = {}self._subscription_id = Noneself._data = None", "docstring": "Initialize the Tibber home class.", "id": "f12605:c1:m0"} {"signature": "def sync_update_info(self):", "body": "loop = asyncio.get_event_loop()task = loop.create_task(self.update_info())loop.run_until_complete(task)", "docstring": "Update current price info.", "id": "f12605:c1:m1"} {"signature": "async def update_info(self):", "body": "query = gql(\"\"\"\"\"\"% self._home_id)self.info = await self._tibber_control.execute(query)", "docstring": "Update current price info async.", "id": "f12605:c1:m2"} {"signature": "def sync_update_current_price_info(self):", "body": "loop = asyncio.get_event_loop()task = loop.create_task(self.update_current_price_info())loop.run_until_complete(task)", "docstring": "Update current price info.", "id": "f12605:c1:m3"} {"signature": "async def update_current_price_info(self):", "body": "query = gql(\"\"\"\"\"\"% self.home_id)price_info_temp = await self._tibber_control.execute(query)if not price_info_temp:_LOGGER.error(\"\")returntry:home = price_info_temp[\"\"][\"\"]current_subscription = home[\"\"]price_info = current_subscription[\"\"][\"\"]except (KeyError, TypeError):_LOGGER.error(\"\")returnif price_info:self._current_price_info = price_info", "docstring": "Update current price info async.", "id": "f12605:c1:m4"} {"signature": "def sync_update_price_info(self):", "body": "loop = asyncio.get_event_loop()task = loop.create_task(self.update_price_info())loop.run_until_complete(task)", "docstring": "Update current price info.", "id": "f12605:c1:m5"} {"signature": "async def update_price_info(self):", "body": "query = gql(\"\"\"\"\"\"% self.home_id)price_info_temp = await self._tibber_control.execute(query)if not price_info_temp:_LOGGER.error(\"\")returnself._price_info = {}self._level_info = {}for key in [\"\", \"\", \"\"]:try:home = price_info_temp[\"\"][\"\"]current_subscription = home[\"\"]price_info = current_subscription[\"\"][key]except (KeyError, TypeError):_LOGGER.error(\"\", key)continueif key == \"\":self._current_price_info = price_infocontinuefor data in price_info:self._price_info[data.get(\"\")] = data.get(\"\")self._level_info[data.get(\"\")] = data.get(\"\")", "docstring": "Update price info async.", "id": "f12605:c1:m6"} {"signature": "@propertydef current_price_total(self):", "body": "if not self._current_price_info:return Nonereturn self._current_price_info.get(\"\")", "docstring": "Get current price total.", "id": "f12605:c1:m7"} {"signature": "@propertydef current_price_info(self):", "body": "return self._current_price_info", "docstring": "Get current price info.", "id": "f12605:c1:m8"} {"signature": "@propertydef price_total(self):", "body": "return self._price_info", "docstring": "Get dictionary with price total, key is date-time.", "id": "f12605:c1:m9"} {"signature": "@propertydef price_level(self):", "body": "return self._level_info", "docstring": "Get dictionary with price level, key is date-time.", "id": "f12605:c1:m10"} {"signature": "@propertydef home_id(self):", "body": "return self._home_id", "docstring": "Return home id.", "id": "f12605:c1:m11"} {"signature": "@propertydef has_active_subscription(self):", "body": "try:sub = self.info[\"\"][\"\"][\"\"][\"\"]except (KeyError, TypeError):return Falsereturn sub == \"\"", "docstring": "Return home id.", "id": "f12605:c1:m12"} {"signature": "@propertydef has_real_time_consumption(self):", "body": "try:return self.info[\"\"][\"\"][\"\"][\"\"]except (KeyError, TypeError):return False", "docstring": "Return home id.", "id": "f12605:c1:m13"} {"signature": "@propertydef address1(self):", "body": "try:return self.info[\"\"][\"\"][\"\"][\"\"]except (KeyError, TypeError):_LOGGER.error(\"\")return \"\"", "docstring": "Return the home adress1.", "id": "f12605:c1:m14"} {"signature": "@propertydef consumption_unit(self):", "body": "return \"\"", "docstring": "Return the consumption.", "id": "f12605:c1:m15"} {"signature": "@propertydef currency(self):", "body": "try:current_subscription = self.info[\"\"][\"\"][\"\"]return current_subscription[\"\"][\"\"][\"\"]except (KeyError, TypeError, IndexError):_LOGGER.error(\"\")return \"\"", "docstring": "Return the currency.", "id": "f12605:c1:m16"} {"signature": "@propertydef country(self):", "body": "try:return self.info[\"\"][\"\"][\"\"][\"\"]except (KeyError, TypeError):_LOGGER.error(\"\")return \"\"", "docstring": "Return the country.", "id": "f12605:c1:m17"} {"signature": "@propertydef price_unit(self):", "body": "currency = self.currencyconsumption_unit = self.consumption_unitif not currency or not consumption_unit:_LOGGER.error(\"\")return \"\"return currency + \"\" + consumption_unit", "docstring": "Return the price unit.", "id": "f12605:c1:m18"} {"signature": "async def rt_subscribe(self, loop, async_callback):", "body": "if self._subscription_id is not None:_LOGGER.error(\"\")returnawait self._tibber_control.rt_connect(loop)document = gql(\"\"\"\"\"\"% self.home_id)sub_query = print_ast(document)self._subscription_id = await self._tibber_control.sub_manager.subscribe(sub_query, async_callback)", "docstring": "Connect to Tibber and subscribe to Tibber rt subscription.", "id": "f12605:c1:m19"} {"signature": "async def rt_unsubscribe(self):", "body": "if self._subscription_id is None:_LOGGER.error(\"\")returnawait self._tibber_control.sub_manager.unsubscribe(self._subscription_id)", "docstring": "Unsubscribe to Tibber rt subscription.", "id": "f12605:c1:m20"} {"signature": "@propertydef rt_subscription_running(self):", "body": "return (self._tibber_control.sub_manager is not Noneand self._tibber_control.sub_manager.is_runningand self._subscription_id is not None)", "docstring": "Is real time subscription running.", "id": "f12605:c1:m21"} {"signature": "async def get_historic_data(self, n_data):", "body": "query = gql(\"\"\"\"\"\"% (self.home_id, n_data))data = await self._tibber_control.execute(query)if not data:_LOGGER.error(\"\")returndata = data[\"\"][\"\"][\"\"]if data is None:self._data = []returnself._data = data[\"\"]", "docstring": "Get historic data.", "id": "f12605:c1:m22"} {"signature": "def sync_get_historic_data(self, n_data):", "body": "loop = asyncio.get_event_loop()task = loop.create_task(self.get_historic_data(n_data))loop.run_until_complete(task)return self._data", "docstring": "get_historic_data.", "id": "f12605:c1:m23"} {"signature": "def setUp(self): ", "body": "self.tibber = tibber.Tibber()self.tibber.sync_update_info()", "docstring": "things to be run when tests are started.", "id": "f12606:c0:m0"} {"signature": "def tearDown(self): ", "body": "self.tibber.sync_close_connection()", "docstring": "Stop stuff we started.", "id": "f12606:c0:m1"} {"signature": "def setUp(self): ", "body": "async def _create_session():return aiohttp.ClientSession()loop = asyncio.get_event_loop()self.websession = loop.run_until_complete(_create_session())self.tibber = tibber.Tibber(websession=self.websession)self.tibber.sync_update_info()", "docstring": "things to be run when tests are started.", "id": "f12606:c1:m0"} {"signature": "def tearDown(self): ", "body": "self.tibber.sync_close_connection()", "docstring": "Stop stuff we started.", "id": "f12606:c1:m1"} {"signature": "def setUp(self): ", "body": "self.tibber = tibber.Tibber(access_token='') self.assertRaises(tibber.InvalidLogin, self.tibber.sync_update_info)", "docstring": "things to be run when tests are started.", "id": "f12606:c2:m0"} {"signature": "def tearDown(self): ", "body": "self.tibber.sync_close_connection()", "docstring": "Stop stuff we started.", "id": "f12606:c2:m1"} {"signature": "def setUp(self): ", "body": "self.tibber = tibber.Tibber(access_token='')self.tibber.sync_update_info()", "docstring": "things to be run when tests are started.", "id": "f12606:c3:m0"} {"signature": "def tearDown(self): ", "body": "self.tibber.sync_close_connection()", "docstring": "Stop stuff we started.", "id": "f12606:c3:m1"} {"signature": "def create(self, server):", "body": "if len(self.geometries) == :raise Exception('')return server.post('',self.as_payload(),replacements={'': self.__challenge__.slug,'': self.identifier})", "docstring": "Create the task on the server", "id": "f12608:c0:m1"} {"signature": "def update(self, server):", "body": "return server.put('',self.as_payload(),replacements={'': self.__challenge__.slug,'': self.identifier})", "docstring": "Update existing task on the server", "id": "f12608:c0:m2"} {"signature": "def exists(self, server):", "body": "try:server.get('',replacements={'': self.__challenge__.slug,'': self.identifier})except Exception:return Falsereturn True", "docstring": "Check if a task exists on the server", "id": "f12608:c0:m3"} {"signature": "@classmethoddef from_server(cls, server, slug, identifier):", "body": "task = server.get('',replacements={'': slug,'': identifier})return cls(**task)", "docstring": "Retrieve a task from the server", "id": "f12608:c0:m6"} {"signature": "@classmethoddef from_payload(cls, payload):", "body": "return cls(**payload)", "docstring": "Create a task from JSON", "id": "f12608:c0:m7"} {"signature": "def create(self, server):", "body": "return server.post('',self.as_payload(),replacements={'': self.slug})", "docstring": "Create the challenge on the server", "id": "f12611:c0:m1"} {"signature": "def update(self, server):", "body": "return server.put('',self.as_payload(),replacements={'': self.slug})", "docstring": "Update existing challenge on the server", "id": "f12611:c0:m2"} {"signature": "def exists(self, server):", "body": "try:server.get('',replacements={'': self.slug})except Exception:return Falsereturn True", "docstring": "Check if a challenge exists on the server", "id": "f12611:c0:m4"} {"signature": "@classmethoddef from_server(cls, server, slug):", "body": "challenge = server.get('',replacements={'': slug})return cls(**challenge)", "docstring": "Retrieve a challenge from the MapRoulette server\n :type server", "id": "f12611:c0:m6"} {"signature": "def create(self, server):", "body": "for chunk in self.__cut_to_size():server.post('',chunk.as_payload(),replacements={'': chunk.challenge.slug})", "docstring": "Create the tasks on the server", "id": "f12612:c0:m1"} {"signature": "def update(self, server):", "body": "for chunk in self.__cut_to_size():server.put('',chunk.as_payload(),replacements={'': chunk.challenge.slug})", "docstring": "Update existing tasks on the server", "id": "f12612:c0:m2"} {"signature": "def reconcile(self, server):", "body": "if not self.challenge.exists(server):raise Exception('')existing = MapRouletteTaskCollection.from_server(server, self.challenge)same = []new = []changed = []deleted = []for task in self.tasks:if task.identifier in [existing_task.identifier for existing_task in existing.tasks]:if task == existing.get_by_identifier(task.identifier):same.append(task)else:changed.append(task)else:new.append(task)for task in existing.tasks:if task.identifier not in [task.identifier for task in self.tasks]:deleted.append(task)if new:newCollection = MapRouletteTaskCollection(self.challenge, tasks=new)newCollection.create(server)if changed:changedCollection = MapRouletteTaskCollection(self.challenge, tasks=changed)changedCollection.update(server)if deleted:deletedCollection = MapRouletteTaskCollection(self.challenge, tasks=deleted)for task in deletedCollection.tasks:task.status = ''deletedCollection.update(server)return {'': same, '': new, '': changed, '': deleted}", "docstring": "Reconcile this collection with the server.", "id": "f12612:c0:m3"} {"signature": "def add(self, task):", "body": "self.tasks.append(task)", "docstring": "Add task colleciton to the Collection.", "id": "f12612:c0:m4"} {"signature": "def __create_task_collection(self, challenge):", "body": "task_collection = MapRouletteTaskCollection(challenge)i = while i < self.A_TON:i += task_collection.tasks.append(MapRouletteTask(challenge=challenge,identifier=''.format(uuid.uuid4()),geometries=self.__random_point()))return task_collection", "docstring": "Return a collection of A_TON of tasks with random Point geometries", "id": "f12615:c0:m12"} {"signature": "def add(self, addend_mat, axis=):", "body": "if self.finalized:if axis == :raise NotImplementedError('')elif axis == :for hid in xrange(self.shape[]):self.data[hid] = self.data[hid] + addend_matelif axis == :raise NotImplementedError('')else:raise RuntimeError('')else:raise RuntimeError('')", "docstring": "In-place addition\n\n:param addend_mat: A matrix to be added on the Sparse3DMatrix object\n:param axis: The dimension along the addend_mat is added\n:return: Nothing (as it performs in-place operations)", "id": "f12617:c0:m12"} {"signature": "def multiply(self, multiplier, axis=None):", "body": "if self.finalized:if multiplier.ndim == :if axis == : raise NotImplementedError('')elif axis == : sz = len(multiplier)multiplier_mat = lil_matrix((sz, sz))multiplier_mat.setdiag(multiplier)for hid in xrange(self.shape[]):self.data[hid] = self.data[hid] * multiplier_matelif axis == : for hid in xrange(self.shape[]):self.data[hid].data *= multiplier[self.data[hid].indices]else:raise RuntimeError('')elif multiplier.ndim == :if axis == : for hid in xrange(self.shape[]):self.data[hid].data *= multiplier[self.data[hid].indices, hid]elif axis == : for hid in xrange(self.shape[]):self.data[hid] = self.data[hid].multiply(multiplier)elif axis == : for hid in xrange(self.shape[]):multiplier_vec = multiplier[hid, :]multiplier_vec = multiplier_vec.ravel()self.data[hid].data *= multiplier_vec.repeat(np.diff(self.data[hid].indptr))else:raise RuntimeError('')elif isinstance(multiplier, Sparse3DMatrix): for hid in xrange(self.shape[]):self.data[hid] = self.data[hid].multiply(multiplier.data[hid])else:raise RuntimeError('')else:raise RuntimeError('')", "docstring": "In-place multiplication\n\n:param multiplier: A matrix or vector to be multiplied\n:param axis: The dim along which 'multiplier' is multiplied\n:return: Nothing (as it performs in-place operations)", "id": "f12617:c0:m13"} {"signature": "def prepare(self, pseudocount=, lenfile=None, read_length=):", "body": "if self.probability.num_groups > :self.grp_conv_mat = lil_matrix((self.probability.num_loci, self.probability.num_groups))for i in range(self.probability.num_groups):self.grp_conv_mat[self.probability.groups[i], i] = self.grp_conv_mat = self.grp_conv_mat.tocsc()self.t2t_mat = eye(self.probability.num_loci, self.probability.num_loci)self.t2t_mat = self.t2t_mat.tolil()for tid_list in self.probability.groups:for ii in range(len(tid_list)):for jj in range(ii):i = tid_list[ii]j = tid_list[jj]self.t2t_mat[i, j] = self.t2t_mat[j, i] = self.t2t_mat = self.t2t_mat.tocsc()if lenfile is not None:hid = dict(list(zip(self.probability.hname, np.arange(len(self.probability.hname)))))self.target_lengths = np.zeros((self.probability.num_loci, self.probability.num_haplotypes))if self.probability.num_haplotypes > :with open(lenfile) as fh:for curline in fh:item = curline.rstrip().split(\"\")locus, hap = item[].split(\"\")self.target_lengths[self.probability.lid[locus], hid[hap]] = max(float(item[]) - read_length + , )elif self.probability.num_haplotypes > :with open(lenfile) as fh:for curline in fh:item = curline.rstrip().split(\"\")self.target_lengths[self.probability.lid[item[]], ] = max(float(item[]) - read_length + , )else:raise RuntimeError('')self.target_lengths = self.target_lengths.transpose()if not np.all(self.target_lengths > ):raise RuntimeError('')self.probability.normalize_reads(axis=APM.Axis.READ) self.allelic_expression = self.probability.sum(axis=APM.Axis.READ)if self.target_lengths is not None: self.allelic_expression = np.divide(self.allelic_expression, self.target_lengths)if pseudocount > : orig_allelic_expression_sum = self.allelic_expression.sum()nzloci = np.nonzero(self.allelic_expression)[]self.allelic_expression[:, nzloci] += pseudocountself.allelic_expression *= (orig_allelic_expression_sum / self.allelic_expression.sum())", "docstring": "Initializes the probability of read origin according to the alignment profile\n\n:param pseudocount: Uniform prior for allele specificity estimation\n:return: Nothing (as it performs an in-place operations)", "id": "f12618:c0:m1"} {"signature": "def reset(self, pseudocount=):", "body": "self.probability.reset()self.probability.normalize_reads(axis=APM.Axis.READ) self.allelic_expression = self.probability.sum(axis=APM.Axis.READ)if self.target_lengths is not None: self.allelic_expression = np.divide(self.allelic_expression, self.target_lengths)if pseudocount > : orig_allelic_expression_sum = self.allelic_expression.sum()nzloci = np.nonzero(self.allelic_expression)[]self.allelic_expression[:, nzloci] += pseudocountself.allelic_expression *= (orig_allelic_expression_sum / self.allelic_expression.sum())", "docstring": "Initializes the probability of read origin according to the alignment profile\n\n:param pseudocount: Uniform prior for allele specificity estimation\n:return: Nothing (as it performs an in-place operations)", "id": "f12618:c0:m2"} {"signature": "def update_probability_at_read_level(self, model=):", "body": "self.probability.reset() if model == :self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)self.probability.normalize_reads(axis=APM.Axis.HAPLOGROUP, grouping_mat=self.t2t_mat)haplogroup_sum_mat = self.allelic_expression * self.t2t_matself.probability.multiply(haplogroup_sum_mat, axis=APM.Axis.READ)self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat)self.probability.multiply(haplogroup_sum_mat.sum(axis=), axis=APM.Axis.HAPLOTYPE)self.probability.normalize_reads(axis=APM.Axis.READ)elif model == :self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)self.probability.normalize_reads(axis=APM.Axis.LOCUS)self.probability.multiply(self.allelic_expression.sum(axis=), axis=APM.Axis.HAPLOTYPE)self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat)self.probability.multiply((self.allelic_expression * self.t2t_mat).sum(axis=), axis=APM.Axis.HAPLOTYPE)self.probability.normalize_reads(axis=APM.Axis.READ)elif model == :self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat)self.probability.multiply((self.allelic_expression * self.t2t_mat).sum(axis=), axis=APM.Axis.HAPLOTYPE)self.probability.normalize_reads(axis=APM.Axis.READ)elif model == :self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)self.probability.normalize_reads(axis=APM.Axis.READ)else:raise RuntimeError('')", "docstring": "Updates the probability of read origin at read level\n\n:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)\n:return: Nothing (as it performs in-place operations)", "id": "f12618:c0:m4"} {"signature": "def update_allelic_expression(self, model=):", "body": "self.update_probability_at_read_level(model)self.allelic_expression = self.probability.sum(axis=APM.Axis.READ)if self.target_lengths is not None:self.allelic_expression = np.divide(self.allelic_expression, self.target_lengths)", "docstring": "A single EM step: Update probability at read level and then re-estimate allelic specific expression\n\n:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)\n:return: Nothing (as it performs in-place operations)", "id": "f12618:c0:m5"} {"signature": "def run(self, model, tol=, max_iters=, verbose=True):", "body": "orig_err_states = np.seterr(all='')np.seterr(under='')if verbose:print()print(\"\")print(\"\")num_iters = err_sum = time0 = time.time()target_err = * tolwhile err_sum > target_err and num_iters < max_iters:prev_isoform_expression = self.get_allelic_expression().sum(axis=)prev_isoform_expression *= ( / prev_isoform_expression.sum())self.update_allelic_expression(model=model)curr_isoform_expression = self.get_allelic_expression().sum(axis=)curr_isoform_expression *= ( / curr_isoform_expression.sum())err = np.abs(curr_isoform_expression - prev_isoform_expression)err_sum = err.sum()num_iters += if verbose:time1 = time.time()delmin, s = divmod(int(time1 - time0), )h, m = divmod(delmin, )print(\"\" % (num_iters, h, m, s, err_sum))", "docstring": "Runs EM iterations\n\n:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)\n:param tol: Tolerance for termination\n:param max_iters: Maximum number of iterations until termination\n:param verbose: Display information on how EM is running\n:return: Nothing (as it performs in-place operations)", "id": "f12618:c0:m6"} {"signature": "def report_read_counts(self, filename, grp_wise=False, reorder='', notes=None):", "body": "expected_read_counts = self.probability.sum(axis=APM.Axis.READ)if grp_wise:lname = self.probability.gnameexpected_read_counts = expected_read_counts * self.grp_conv_matelse:lname = self.probability.lnametotal_read_counts = expected_read_counts.sum(axis=)if reorder == '':report_order = np.argsort(total_read_counts.flatten())report_order = report_order[::-]elif reorder == '':report_order = np.argsort(total_read_counts.flatten())elif reorder == '':report_order = np.arange(len(lname)) cntdata = np.vstack((expected_read_counts, total_read_counts))fhout = open(filename, '')fhout.write(\"\" + \"\".join(self.probability.hname) + \"\")if notes is not None:fhout.write(\"\")fhout.write(\"\")for locus_id in report_order:lname_cur = lname[locus_id]fhout.write(\"\".join([lname_cur] + list(map(str, cntdata[:, locus_id].ravel()))))if notes is not None:fhout.write(\"\" % notes[lname_cur])fhout.write(\"\")fhout.close()", "docstring": "Exports expected read counts\n\n:param filename: File name for output\n:param grp_wise: whether the report is at isoform level or gene level\n:param reorder: whether the report should be either 'decreasing' or 'increasing' order or just 'as-is'\n:return: Nothing but the method writes a file", "id": "f12618:c0:m7"} {"signature": "def report_depths(self, filename, tpm=True, grp_wise=False, reorder='', notes=None):", "body": "if grp_wise:lname = self.probability.gnamedepths = self.allelic_expression * self.grp_conv_matelse:lname = self.probability.lnamedepths = self.allelic_expressionif tpm:depths *= ( / depths.sum())total_depths = depths.sum(axis=)if reorder == '':report_order = np.argsort(total_depths.flatten())report_order = report_order[::-]elif reorder == '':report_order = np.argsort(total_depths.flatten())elif reorder == '':report_order = np.arange(len(lname)) cntdata = np.vstack((depths, total_depths))fhout = open(filename, '')fhout.write(\"\" + \"\".join(self.probability.hname) + \"\")if notes is not None:fhout.write(\"\")fhout.write(\"\")for locus_id in report_order:lname_cur = lname[locus_id]fhout.write(\"\".join([lname_cur] + list(map(str, cntdata[:, locus_id].ravel()))))if notes is not None:fhout.write(\"\" % notes[lname_cur])fhout.write(\"\")fhout.close()", "docstring": "Exports expected depths\n\n:param filename: File name for output\n:param grp_wise: whether the report is at isoform level or gene level\n:param reorder: whether the report should be either 'decreasing' or 'increasing' order or just 'as-is'\n:return: Nothing but the method writes a file", "id": "f12618:c0:m8"} {"signature": "def export_posterior_probability(self, filename, title=\"\"):", "body": "self.probability.save(h5file=filename, title=title)", "docstring": "Writes the posterior probability of read origin\n\n:param filename: File name for output\n:param title: The title of the posterior probability matrix\n:return: Nothing but the method writes a file in EMASE format (PyTables)", "id": "f12618:c0:m9"} {"signature": "def bundle(self, reset=False, shallow=False): ", "body": "if self.finalized:if self.groups is not None and self.gname is not None:grp_conv_mat = lil_matrix((self.num_loci, self.num_groups))for i in range(self.num_groups):grp_conv_mat[self.groups[i], i] = grp_align = Sparse3DMatrix.__mul__(self, grp_conv_mat) grp_align.num_loci = self.num_groupsgrp_align.num_haplotypes = self.num_haplotypesgrp_align.num_reads = self.num_readsgrp_align.shape = (grp_align.num_loci, grp_align.num_haplotypes, grp_align.num_reads)if not shallow:grp_align.lname = copy.copy(self.gname)grp_align.hname = self.hnamegrp_align.rname = copy.copy(self.rname)grp_align.lid = dict(list(zip(grp_align.lname, np.arange(grp_align.num_loci))))grp_align.rid = copy.copy(self.rid)if reset:grp_align.reset()return grp_alignelse:raise RuntimeError('')else:raise RuntimeError('')", "docstring": "Returns ``AlignmentPropertyMatrix`` object in which loci are bundled using grouping information.\n\n:param reset: whether to reset the values at the loci\n:param shallow: whether to copy all the meta data", "id": "f12620:c0:m6"} {"signature": "def normalize_reads(self, axis, grouping_mat=None):", "body": "if self.finalized:if axis == self.Axis.LOCUS: normalizer = self.sum(axis=self.Axis.HAPLOTYPE) normalizer.eliminate_zeros()for hid in range(self.num_haplotypes):self.data[hid].eliminate_zeros() self.data[hid] = np.divide(self.data[hid], normalizer) elif axis == self.Axis.HAPLOTYPE: for hid in range(self.num_haplotypes):normalizer = self.data[hid].sum(axis=self.Axis.HAPLOTYPE) normalizer = normalizer.A.flatten()self.data[hid].data /= normalizer[self.data[hid].indices]elif axis == self.Axis.READ: sum_mat = self.sum(axis=self.Axis.LOCUS)normalizer = sum_mat.sum(axis=self.Axis.HAPLOTYPE)normalizer = normalizer.ravel()for hid in range(self.num_haplotypes):self.data[hid].data /= normalizer[self.data[hid].indices]elif axis == self.Axis.GROUP: if grouping_mat is None:raise RuntimeError('')normalizer = self.sum(axis=self.Axis.HAPLOTYPE) * grouping_matfor hid in range(self.num_haplotypes):self.data[hid].eliminate_zeros() self.data[hid] = np.divide(self.data[hid], normalizer)elif axis == self.Axis.HAPLOGROUP: if grouping_mat is None:raise RuntimeError('')for hid in range(self.num_haplotypes): normalizer = self.data[hid] * grouping_mat self.data[hid].eliminate_zeros() self.data[hid] = np.divide(self.data[hid], normalizer)else:raise RuntimeError('')else:raise RuntimeError('')", "docstring": "Read-wise normalization\n\n:param axis: The dimension along which we want to normalize values\n:param grouping_mat: An incidence matrix that specifies which isoforms are from a same gene\n:return: Nothing (as the method performs in-place operations)\n:rtype: None", "id": "f12620:c0:m11"} {"signature": "def pull_alignments_from(self, reads_to_use, shallow=False):", "body": "new_alnmat = self.copy(shallow=shallow)for hid in range(self.num_haplotypes):hdata = new_alnmat.data[hid]hdata.data *= reads_to_use[hdata.indices]hdata.eliminate_zeros()if new_alnmat.count is not None:new_alnmat.count[np.logical_not(reads_to_use)] = return new_alnmat", "docstring": "Pull out alignments of certain reads\n\n:param reads_to_use: numpy array of dtype=bool specifying which reads to use\n:param shallow: whether to copy sparse 3D matrix only or not\n:return: a new AlignmentPropertyMatrix object that particular reads are", "id": "f12620:c0:m12"} {"signature": "def get_unique_reads(self, ignore_haplotype=False, shallow=False):", "body": "if self.finalized:if ignore_haplotype:summat = self.sum(axis=self.Axis.HAPLOTYPE)nnz_per_read = np.diff(summat.tocsr().indptr)unique_reads = np.logical_and(nnz_per_read > , nnz_per_read < )else: alncnt_per_read = self.sum(axis=self.Axis.LOCUS).sum(axis=self.Axis.HAPLOTYPE)unique_reads = np.logical_and(alncnt_per_read > , alncnt_per_read < )return self.pull_alignments_from(unique_reads, shallow=shallow)else:raise RuntimeError('')", "docstring": "Pull out alignments of uniquely-aligning reads\n\n:param ignore_haplotype: whether to regard allelic multiread as uniquely-aligning read\n:param shallow: whether to copy sparse 3D matrix only or not\n:return: a new AlignmentPropertyMatrix object that particular reads are", "id": "f12620:c0:m13"} {"signature": "def print_read(self, rid):", "body": "if self.rname is not None:print(self.rname[rid])print('')r = self.get_read_data(rid)aligned_loci = np.unique(r.nonzero()[])for locus in aligned_loci:nzvec = r[:, locus].todense().transpose()[].A.flatten()if self.lname is not None:print(self.lname[locus], end='')else:print(locus, end='')print(nzvec)", "docstring": "Prints nonzero rows of the read wanted", "id": "f12620:c0:m20"} {"signature": "def __getattr__(self, name, *args, **kwargs):", "body": "def function(*args, **kwargs):if name not in StrictRedisCluster._loop_keys:tag_start = Nonekey_type = hash_tag = ''list_ht = []if isinstance(args[], (basestring, bytes)):key_type = ''list_ht.append(args[])else:if isinstance(args[], list):key_type = ''list_ht.append(args[][])else:key_type = ''list_ht = iterkeys(args[])for k in list_ht:try:tag_start = k.index('')hash_tag = kbreakexcept Exception as e:tag_start = Noneif name in StrictRedisCluster._tag_keys and not tag_start:try:return getattr(self, '' + name)(*args, **kwargs)except AttributeError:raise redis.DataError(\"\" % name)hkey = args[]if tag_start is not None:L = list(args)if key_type != '':if key_type == '':hkey = L[][][tag_start + :-]L[][] = L[][][:tag_start]else:hkey = hash_tag[tag_start + :-]L[][hash_tag[:tag_start]] = L[][hash_tag]del L[][hash_tag]else:hkey = L[][tag_start + :-]L[] = L[][:tag_start]args = tuple(L)node = self._getnodenamefor(hkey)if name in StrictRedisCluster._write_keys:redisent = self.redises[node]elif name in StrictRedisCluster._read_keys:redisent = self.redises[node + '']else:raise redis.DataError(\"\" % name)return getattr(redisent, name)(*args, **kwargs)else:if name not in self._loop_keys_admin:try:return getattr(self, '' + name)(*args, **kwargs)except AttributeError:raise redis.DataError(\"\" % name)result = {}for alias, redisent in iteritems(self.redises):if (name in StrictRedisCluster._write_keys and alias.find('') >= ) or (name in StrictRedisCluster._read_keys and alias.find('') == -):res = Noneelse:res = getattr(redisent, name)(*args, **kwargs)result[alias] = resreturn resultreturn function", "docstring": "Magic method to handle all redis commands\n- string name The name of the command called.\n- tuple args of supplied arguments to the command.", "id": "f12629:c0:m1"} {"signature": "def _getnodenamefor(self, name):", "body": "return '' + str((abs(binascii.crc32(b(name)) & ) % self.no_servers) + )", "docstring": "Return the node name where the ``name`` would land to", "id": "f12629:c0:m2"} {"signature": "def getnodefor(self, name):", "body": "node = self._getnodenamefor(name)return {node: self.cluster[''][node]}", "docstring": "Return the node where the ``name`` would land to", "id": "f12629:c0:m3"} {"signature": "def __setitem__(self, name, value):", "body": "return self.set(name, value)", "docstring": "Set the value at key ``name`` to ``value``", "id": "f12629:c0:m4"} {"signature": "def __getitem__(self, name):", "body": "value = self.get(name)if value:return valueraise KeyError(name)", "docstring": "Return the value at key ``name``, raises a KeyError if the key\ndoesn't exist.", "id": "f12629:c0:m5"} {"signature": "def __delitem__(self, *names):", "body": "return self.delete(*names)", "docstring": "Delete one or more keys specified by ``names``", "id": "f12629:c0:m6"} {"signature": "def object(self, infotype, key):", "body": "redisent = self.redises[self._getnodenamefor(key) + '']return getattr(redisent, '')(infotype, key)", "docstring": "Return the encoding, idletime, or refcount about the key", "id": "f12629:c0:m7"} {"signature": "def _rc_brpoplpush(self, src, dst, timeout=):", "body": "rpop = self.brpop(src, timeout)if rpop is not None:self.lpush(dst, rpop[])return rpop[]return None", "docstring": "Pop a value off the tail of ``src``, push it on the head of ``dst``\nand then return it.\n\nThis command blocks until a value is in ``src`` or until ``timeout``\nseconds elapse, whichever is first. A ``timeout`` value of 0 blocks\nforever.\nNot atomic", "id": "f12629:c0:m8"} {"signature": "def _rc_rpoplpush(self, src, dst):", "body": "rpop = self.rpop(src)if rpop is not None:self.lpush(dst, rpop)return rpopreturn None", "docstring": "RPOP a value off of the ``src`` list and LPUSH it\non to the ``dst`` list. Returns the value.", "id": "f12629:c0:m9"} {"signature": "def _rc_sdiff(self, src, *args):", "body": "args = list_or_args(src, args)src_set = self.smembers(args.pop())if src_set is not set([]):for key in args:src_set.difference_update(self.smembers(key))return src_set", "docstring": "Returns the members of the set resulting from the difference between\nthe first set and all the successive sets.", "id": "f12629:c0:m10"} {"signature": "def _rc_sdiffstore(self, dst, src, *args):", "body": "args = list_or_args(src, args)result = self.sdiff(*args)if result is not set([]):return self.sadd(dst, *list(result))return ", "docstring": "Store the difference of sets ``src``, ``args`` into a new\nset named ``dest``. Returns the number of keys in the new set.", "id": "f12629:c0:m11"} {"signature": "def _rc_sinter(self, src, *args):", "body": "args = list_or_args(src, args)src_set = self.smembers(args.pop())if src_set is not set([]):for key in args:src_set.intersection_update(self.smembers(key))return src_set", "docstring": "Returns the members of the set resulting from the difference between\nthe first set and all the successive sets.", "id": "f12629:c0:m12"} {"signature": "def _rc_sinterstore(self, dst, src, *args):", "body": "args = list_or_args(src, args)result = self.sinter(*args)if result is not set([]):return self.sadd(dst, *list(result))return ", "docstring": "Store the difference of sets ``src``, ``args`` into a new\nset named ``dest``. Returns the number of keys in the new set.", "id": "f12629:c0:m13"} {"signature": "def _rc_smove(self, src, dst, value):", "body": "if self.type(src) != b(\"\"):return self.smove(src + \"\" + src + \"\", dst, value)if self.type(dst) != b(\"\"):return self.smove(dst + \"\" + dst + \"\", src, value)if self.srem(src, value):return if self.sadd(dst, value) else return ", "docstring": "Move ``value`` from set ``src`` to set ``dst``\nnot atomic", "id": "f12629:c0:m14"} {"signature": "def _rc_sunion(self, src, *args):", "body": "args = list_or_args(src, args)src_set = self.smembers(args.pop())if src_set is not set([]):for key in args:src_set.update(self.smembers(key))return src_set", "docstring": "Returns the members of the set resulting from the union between\nthe first set and all the successive sets.", "id": "f12629:c0:m15"} {"signature": "def _rc_sunionstore(self, dst, src, *args):", "body": "args = list_or_args(src, args)result = self.sunion(*args)if result is not set([]):return self.sadd(dst, *list(result))return ", "docstring": "Store the union of sets ``src``, ``args`` into a new\nset named ``dest``. Returns the number of keys in the new set.", "id": "f12629:c0:m16"} {"signature": "def _rc_mset(self, mapping):", "body": "result = Truefor k, v in iteritems(mapping):result = result and self.set(k, v)return result", "docstring": "Sets each key in the ``mapping`` dict to its corresponding value", "id": "f12629:c0:m17"} {"signature": "def _rc_msetnx(self, mapping):", "body": "for k in iterkeys(mapping):if self.exists(k):return Falsereturn self._rc_mset(mapping)", "docstring": "Sets each key in the ``mapping`` dict to its corresponding value if\nnone of the keys are already set", "id": "f12629:c0:m18"} {"signature": "def _rc_mget(self, keys, *args):", "body": "args = list_or_args(keys, args)result = []for key in args:result.append(self.get(key))return result", "docstring": "Returns a list of values ordered identically to ``*args``", "id": "f12629:c0:m19"} {"signature": "def _rc_rename(self, src, dst):", "body": "if src == dst:return self.rename(src + \"\" + src + \"\", src)if not self.exists(src):return self.rename(src + \"\" + src + \"\", src)self.delete(dst)ktype = self.type(src)kttl = self.ttl(src)if ktype == b(''):return Falseif ktype == b(''):self.set(dst, self.get(src))elif ktype == b(''):self.hmset(dst, self.hgetall(src))elif ktype == b(''):for k in self.lrange(src, , -):self.rpush(dst, k)elif ktype == b(''):for k in self.smembers(src):self.sadd(dst, k)elif ktype == b(''):for k, v in self.zrange(src, , -, withscores=True):self.zadd(dst, v, k)kttl = - if kttl is None or kttl < else int(kttl)if kttl != -:self.expire(dst, kttl)return self.delete(src)", "docstring": "Rename key ``src`` to ``dst``", "id": "f12629:c0:m20"} {"signature": "def _rc_renamenx(self, src, dst):", "body": "if self.exists(dst):return Falsereturn self._rc_rename(src, dst)", "docstring": "Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist", "id": "f12629:c0:m21"} {"signature": "def _rc_keys(self, pattern=''):", "body": "result = []for alias, redisent in iteritems(self.redises):if alias.find('') == -:continueresult.extend(redisent.keys(pattern))return result", "docstring": "Returns a list of keys matching ``pattern``", "id": "f12629:c0:m22"} {"signature": "def _rc_dbsize(self):", "body": "result = for alias, redisent in iteritems(self.redises):if alias.find('') == -:continueresult += redisent.dbsize()return result", "docstring": "Returns the number of keys in the current database", "id": "f12629:c0:m23"} {"signature": "def comma_separated_str_to_list(config_val, evar):", "body": "if not config_val:return []return [token.strip() for token in config_val.split('')]", "docstring": "Splits a comma-separated environment variable into a list of strings.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n a value for.\n:rtype: list\n:return: The equivalent list for a comma-separated string.", "id": "f12632:m0"} {"signature": "def comma_separated_to_set(config_val, evar):", "body": "return set(comma_separated_str_to_list(config_val, evar))", "docstring": "Splits a comma-separated environment variable into a set of strings.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n a value for.\n:rtype: set\n:return: The equivalent set for a comma-separated string.", "id": "f12632:m1"} {"signature": "def value_to_none(config_val, evar):", "body": "if not config_val:return Nonereturn config_val", "docstring": "Given a value that evaluates to a boolean False, return None.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n a value for.\n:rtype: str or None\n:return: Either the non-False value or None.", "id": "f12632:m2"} {"signature": "def value_to_int(config_val, evar):", "body": "return int(config_val)", "docstring": "Convert the value to int.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n a value for.\n:rtype: int", "id": "f12632:m3"} {"signature": "def value_to_bool(config_val, evar):", "body": "if not config_val:return Falseif config_val.strip().lower() == '':return Trueelse:return False", "docstring": "Massages the 'true' and 'false' strings to bool equivalents.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n a value for.\n:rtype: bool\n:return: True or False, depending on the value.", "id": "f12632:m4"} {"signature": "def validate_is_not_none(config_val, evar):", "body": "if config_val is None:raise ValueError(\"\"\"\".format(evar_name=evar.name))return config_val", "docstring": "If the value is ``None``, fail validation.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n a value for.\n:raises: ValueError if the config value is None.", "id": "f12632:m5"} {"signature": "def validate_is_boolean_true(config_val, evar):", "body": "if config_val is None:raise ValueError(\"\"\"\".format(evar_name=evar.name))return config_val", "docstring": "Make sure the value evaluates to boolean True.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n a value for.\n:raises: ValueError if the config value evaluates to boolean False.", "id": "f12632:m6"} {"signature": "def value_to_python_log_level(config_val, evar):", "body": "if not config_val:config_val = evar.default_valconfig_val = config_val.upper()return logging._checkLevel(config_val)", "docstring": "Convert an evar value into a Python logging level constant.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n a value for.\n:return: A validated string.\n:raises: ValueError if the log level is invalid.", "id": "f12632:m7"} {"signature": "def __init__(self, evar_defs):", "body": "self.evar_defs = evar_defs", "docstring": ":param dict evar_defs: Pass in a dict whose keys are config\n names and the values are :py:class:`EnvironmentVariable`\n instances.", "id": "f12634:c0:m0"} {"signature": "def load_values(self):", "body": "for config_name, evar in self.evar_defs.items():if evar.is_required and evar.name not in os.environ:raise RuntimeError((\"\"\"\").format(evar_name=evar.name, help_txt=evar.help_txt))if evar.name in os.environ:self[config_name] = os.environ.get(evar.name)else:self[config_name] = evar.default_valfor filter in evar.filters:current_val = self.get(config_name)new_val = filter(current_val, evar)self[config_name] = new_valself._filter_all()", "docstring": "Go through the env var map, transferring the values to this object\nas attributes.\n\n:raises: RuntimeError if a required env var isn't defined.", "id": "f12634:c0:m1"} {"signature": "def _filter_all(self):", "body": "pass", "docstring": "This runs after all individual env vars have been loaded.\nFeel free to modify values or raise exceptions as need be.\n\n:raises: ValueError if something is amiss.", "id": "f12634:c0:m2"} {"signature": "def __init__(self, name, is_required=True, default_val=None,filters=None, help_txt=None):", "body": "self.name = nameself.is_required = is_requiredself.default_val = default_valself.filters = filters or []self.help_txt = help_txt", "docstring": ":param str name: The name of the environment variable. *This is\n case-sensitive!*\n:keyword bool is_required: If ``True``, this variable must be defined\n when your Python process starts. If ``False``, the default loaded\n value will match ``default_val``.\n:keyword default_val: If ``is_required`` is ``False`` and this\n environment variable is not defined, this value will be loaded.\n:keyword list filters: A list of functions to pass the environment\n variable's value (or default value) through. Order is\n significant!\n:keyword str help_txt: Optional help text describing the environment\n variable.", "id": "f12634:c1:m0"} {"signature": "def is_rarfile(filename):", "body": "mode = constants.RAR_OM_LIST_INCSPLITarchive = unrarlib.RAROpenArchiveDataEx(filename, mode=mode)try:handle = unrarlib.RAROpenArchiveEx(ctypes.byref(archive))except unrarlib.UnrarException:return Falseunrarlib.RARCloseArchive(handle)return (archive.OpenResult == constants.SUCCESS)", "docstring": "Return true if file is a valid RAR file.", "id": "f12640:m0"} {"signature": "def __init__(self, header):", "body": "self.filename = header.FileNameWself._raw_time = header.FileTimeself.date_time = unrarlib.dostime_to_timetuple(header.FileTime)self.compress_size = header.PackSize + (header.PackSizeHigh << )self.file_size = header.UnpSize + (header.UnpSizeHigh << )self.create_system = header.HostOSself.extract_version = header.UnpVerself.CRC = header.FileCRCself.flag_bits = header.Flagsif header.CmtState == constants.RAR_COMMENTS_SUCCESS:self.comment = header.CmtBuf.valueelse:self.comment = None", "docstring": "Initialize a RarInfo object with a member header data.", "id": "f12640:c1:m0"} {"signature": "def __init__(self, filename, mode='', pwd=None):", "body": "self.filename = filenamemode = constants.RAR_OM_LIST_INCSPLITarchive = unrarlib.RAROpenArchiveDataEx(filename, mode=mode)handle = self._open(archive)self.pwd = pwdif self.pwd is not None:unrarlib.RARSetPassword(handle, b(self.pwd))self.filelist = []self.NameToInfo = {}if archive.CmtState == constants.RAR_COMMENTS_SUCCESS:self.comment = archive.CmtBuf.valueelse:self.comment = Noneself._load_metadata(handle)self._close(handle)", "docstring": "Load RAR archive file with mode read only \"r\".", "id": "f12640:c3:m0"} {"signature": "def _read_header(self, handle):", "body": "header_data = unrarlib.RARHeaderDataEx()try:res = unrarlib.RARReadHeaderEx(handle, ctypes.byref(header_data))rarinfo = RarInfo(header=header_data)except unrarlib.ArchiveEnd:return Noneexcept unrarlib.MissingPassword:raise RuntimeError(\"\")except unrarlib.BadPassword:raise RuntimeError(\"\")except unrarlib.UnrarException as e:raise BadRarFile(str(e))return rarinfo", "docstring": "Read current member header into a RarInfo object.", "id": "f12640:c3:m1"} {"signature": "def _process_current(self, handle, op, dest_path=None, dest_name=None):", "body": "unrarlib.RARProcessFileW(handle, op, dest_path, dest_name)", "docstring": "Process current member with 'op' operation.", "id": "f12640:c3:m2"} {"signature": "def _load_metadata(self, handle):", "body": "rarinfo = self._read_header(handle)while rarinfo:self.filelist.append(rarinfo)self.NameToInfo[rarinfo.filename] = rarinfoself._process_current(handle, constants.RAR_SKIP)rarinfo = self._read_header(handle)", "docstring": "Load archive members metadata.", "id": "f12640:c3:m3"} {"signature": "def _open(self, archive):", "body": "try:handle = unrarlib.RAROpenArchiveEx(ctypes.byref(archive))except unrarlib.UnrarException:raise BadRarFile(\"\")return handle", "docstring": "Open RAR archive file.", "id": "f12640:c3:m4"} {"signature": "def _close(self, handle):", "body": "try:unrarlib.RARCloseArchive(handle)except unrarlib.CloseError:raise BadRarFile(\"\")", "docstring": "Close RAR archive file.", "id": "f12640:c3:m5"} {"signature": "def open(self, member, pwd=None):", "body": "if isinstance(member, RarInfo):member = member.filenamearchive = unrarlib.RAROpenArchiveDataEx(self.filename, mode=constants.RAR_OM_EXTRACT)handle = self._open(archive)password = pwd or self.pwdif password is not None:unrarlib.RARSetPassword(handle, b(password))data = _ReadIntoMemory()c_callback = unrarlib.UNRARCALLBACK(data._callback)unrarlib.RARSetCallback(handle, c_callback, )try:rarinfo = self._read_header(handle)while rarinfo is not None:if rarinfo.filename == member:self._process_current(handle, constants.RAR_TEST)breakelse:self._process_current(handle, constants.RAR_SKIP)rarinfo = self._read_header(handle)if rarinfo is None:data = Noneexcept unrarlib.MissingPassword:raise RuntimeError(\"\")except unrarlib.BadPassword:raise RuntimeError(\"\")except unrarlib.BadDataError:if password is not None:raise RuntimeError(\"\")else:raise RuntimeError(\"\")except unrarlib.UnrarException as e:raise BadRarFile(\"\" % str(e))finally:self._close(handle)if data is None:raise KeyError('' % member)return data.get_bytes()", "docstring": "Return file-like object for 'member'.\n\n 'member' may be a filename or a RarInfo object.", "id": "f12640:c3:m6"} {"signature": "def read(self, member, pwd=None):", "body": "return self.open(member, pwd).read()", "docstring": "Return file bytes (as a string) for name.", "id": "f12640:c3:m7"} {"signature": "def namelist(self):", "body": "names = []for member in self.filelist:names.append(member.filename)return names", "docstring": "Return a list of file names in the archive.", "id": "f12640:c3:m8"} {"signature": "def setpassword(self, pwd):", "body": "self.pwd = pwd", "docstring": "Set default password for encrypted files.", "id": "f12640:c3:m9"} {"signature": "def getinfo(self, name):", "body": "rarinfo = self.NameToInfo.get(name)if rarinfo is None:raise KeyError('' % name)return rarinfo", "docstring": "Return the instance of RarInfo given 'name'.", "id": "f12640:c3:m10"} {"signature": "def infolist(self):", "body": "return self.filelist", "docstring": "Return a list of class RarInfo instances for files in the\n archive.", "id": "f12640:c3:m11"} {"signature": "def printdir(self):", "body": "print(\"\" % (\"\", \"\", \"\"))for rarinfo in self.filelist:date = \"\" % rarinfo.date_time[:]print(\"\" % (rarinfo.filename, date, rarinfo.file_size))", "docstring": "Print a table of contents for the RAR file.", "id": "f12640:c3:m12"} {"signature": "def extract(self, member, path=None, pwd=None):", "body": "if isinstance(member, RarInfo):member = member.filenameif path is None:path = os.getcwd()self._extract_members([member], path, pwd)return os.path.join(path, member)", "docstring": "Extract a member from the archive to the current working directory,\n using its full name. Its file information is extracted as accurately\n as possible. `member' may be a filename or a RarInfo object. You can\n specify a different directory using `path'.", "id": "f12640:c3:m14"} {"signature": "def extractall(self, path=None, members=None, pwd=None):", "body": "if members is None:members = self.namelist()self._extract_members(members, path, pwd)", "docstring": "Extract all members from the archive to the current working\n directory. `path' specifies a different directory to extract to.\n `members' is optional and must be a subset of the list returned\n by namelist().", "id": "f12640:c3:m15"} {"signature": "def _extract_members(self, members, targetpath, pwd):", "body": "archive = unrarlib.RAROpenArchiveDataEx(self.filename, mode=constants.RAR_OM_EXTRACT)handle = self._open(archive)password = pwd or self.pwdif password is not None:unrarlib.RARSetPassword(handle, b(password))try:rarinfo = self._read_header(handle)while rarinfo is not None:if rarinfo.filename in members:self._process_current(handle, constants.RAR_EXTRACT, targetpath)else:self._process_current(handle, constants.RAR_SKIP)rarinfo = self._read_header(handle)except unrarlib.MissingPassword:raise RuntimeError(\"\")except unrarlib.BadPassword:raise RuntimeError(\"\")except unrarlib.BadDataError:raise RuntimeError(\"\")except unrarlib.UnrarException as e:raise BadRarFile(\"\" % str(e))finally:self._close(handle)", "docstring": "Extract the RarInfo objects 'members' to a physical\n file on the path targetpath.", "id": "f12640:c3:m16"} {"signature": "def dostime_to_timetuple(dostime):", "body": "dostime = dostime >> dostime = dostime & day = dostime & month = (dostime >> ) & year = + (dostime >> )second = * (dostime & )minute = (dostime >> ) & hour = dostime >> return (year, month, day, hour, minute, second)", "docstring": "Convert a RAR archive member DOS time to a Python time tuple.", "id": "f12642:m0"} {"signature": "def _c_func(func, restype, argtypes, errcheck=None):", "body": "func.restype = restypefunc.argtypes = argtypesif errcheck is not None:func.errcheck = errcheckreturn func", "docstring": "Wrap c function setting prototype.", "id": "f12642:m1"} {"signature": "def __repr__(self):", "body": "res = []for field in self._fields_:field_value = repr(getattr(self, field[]))res.append('' % (field[], field_value))return self.__class__.__name__ + '' + ''.join(res) + ''", "docstring": "Print the structure fields.", "id": "f12642:c16:m0"} {"signature": "def serial_ports():", "body": "df_comports = sd.comports()df_teensy_comports = df_comports.loc[df_comports.hardware_id.str.contains('',case=False)]return df_teensy_comports", "docstring": "Returns\n-------\npandas.DataFrame\n Table of serial ports that match the USB vendor ID and product ID for\n the `Teensy 3.2`_ board.\n\n.. Teensy 3.2: https://www.pjrc.com/store/teensy32.html", "id": "f12647:m0"} {"signature": "def get_sketch_directory():", "body": "return package_path().joinpath('', '').realpath()", "docstring": "Return directory containing the Arduino sketch.", "id": "f12649:m1"} {"signature": "def get_includes():", "body": "import base_node_rpcreturn ([get_sketch_directory()] +list(get_lib_directory().walkdirs('')) +base_node_rpc.get_includes())", "docstring": "Return directories containing the Arduino header files.\n\nNotes\n=====\n\nFor example:\n\n import arduino_rpc\n ...\n print ' '.join(['-I%s' % i for i in arduino_rpc.get_includes()])\n ...", "id": "f12649:m3"} {"signature": "def get_sources():", "body": "import base_node_rpcreturn (get_sketch_directory().files('') +list(get_lib_directory().walkfiles('')) +base_node_rpc.get_sources())", "docstring": "Return Arduino source file paths. This includes any supplementary source\nfiles that are not contained in Arduino libraries.", "id": "f12649:m4"} {"signature": "def get_firmwares():", "body": "return OrderedDict([(board_dir.name, [f.abspath() for f inboard_dir.walkfiles('')])for board_dir inpackage_path().joinpath('').dirs()])", "docstring": "Return compiled Arduino hex file paths.\n\nThis function may be used to locate firmware binaries that are available\nfor flashing to [Arduino][1] boards.\n\n[1]: http://arduino.cc", "id": "f12649:m5"} {"signature": "def parse_args(args=None):", "body": "from argparse import ArgumentParserif args is None:args = sys.argvparser = ArgumentParser(description='''''')parser.add_argument('', help='''')args = parser.parse_args()return args", "docstring": "Parses arguments, returns (options, args).", "id": "f12650:m1"} {"signature": "def makename(package, module):", "body": "if package:name = packageif module:name += '' + moduleelse:name = modulereturn name", "docstring": "Join package and module with a dot.", "id": "f12651:m0"} {"signature": "def write_file(name, text, opts):", "body": "if opts.dryrun:returnfname = os.path.join(opts.destdir, \"\" % (name, opts.suffix))if not opts.force and os.path.isfile(fname):print('' % fname)else:print('' % fname)f = open(fname, '')f.write(text)f.close()", "docstring": "Write the output file for module/package .", "id": "f12651:m1"} {"signature": "def format_heading(level, text):", "body": "underlining = ['', '', '', ][level-] * len(text)return '' % (text, underlining)", "docstring": "Create a heading of [1, 2 or 3 supported].", "id": "f12651:m2"} {"signature": "def format_directive(module, package=None):", "body": "directive = '' % makename(package, module)for option in OPTIONS:directive += '' % optionreturn directive", "docstring": "Create the automodule directive and add the options.", "id": "f12651:m3"} {"signature": "def create_module_file(package, module, opts):", "body": "text = format_heading(, '' % module)text += format_heading(, '' % module)text += format_directive(module, package)write_file(makename(package, module), text, opts)", "docstring": "Build the text of the file and write the file.", "id": "f12651:m4"} {"signature": "def create_package_file(root, master_package, subroot, py_files, opts, subs):", "body": "package = os.path.split(root)[-]text = format_heading(, '' % package)for py_file in py_files:if shall_skip(os.path.join(root, py_file)):continueis_package = py_file == INITpy_file = os.path.splitext(py_file)[]py_path = makename(subroot, py_file)if is_package:heading = '' % packageelse:heading = '' % py_filetext += format_heading(, heading)text += format_directive(is_package and subroot or py_path, master_package)text += ''subs = [sub for sub in subs if os.path.isfile(os.path.join(root, sub, INIT))]if subs:text += format_heading(, '')text += ''for sub in subs:text += '' % (makename(master_package, subroot), sub)text += ''write_file(makename(master_package, subroot), text, opts)", "docstring": "Build the text of the file and write the file.", "id": "f12651:m5"} {"signature": "def create_modules_toc_file(master_package, modules, opts, name=''):", "body": "text = format_heading(, '' % opts.header)text += ''text += '' % opts.maxdepthmodules.sort()prev_module = ''for module in modules:if module.startswith(prev_module + ''):continueprev_module = moduletext += '' % modulewrite_file(name, text, opts)", "docstring": "Create the module's index.", "id": "f12651:m6"} {"signature": "def shall_skip(module):", "body": "return os.path.getsize(module) < ", "docstring": "Check if we want to skip this module.", "id": "f12651:m7"} {"signature": "def recurse_tree(path, excludes, opts):", "body": "path = os.path.abspath(path)if INIT in os.listdir(path):package_name = path.split(os.path.sep)[-]else:package_name = Nonetoc = []tree = os.walk(path, False)for root, subs, files in tree:py_files = sorted([f for f in files if os.path.splitext(f)[] == ''])if INIT in py_files:py_files.remove(INIT)py_files.insert(, INIT)subs = sorted([sub for sub in subs if sub[] not in ['', '']])if \"\" in root or \"\" in rootor not py_filesor is_excluded(root, excludes):continueif INIT in py_files:if (subsorlen(py_files) > ornot shall_skip(os.path.join(root, INIT))):subroot = root[len(path):].lstrip(os.path.sep).replace(os.path.sep, '')create_package_file(root, package_name, subroot, py_files, opts, subs)toc.append(makename(package_name, subroot))elif root == path:for py_file in py_files:if not shall_skip(os.path.join(path, py_file)):module = os.path.splitext(py_file)[]create_module_file(package_name, module, opts)toc.append(makename(package_name, module))if not opts.notoc:create_modules_toc_file(package_name, toc, opts)", "docstring": "Look for every file in the directory tree and create the corresponding\nReST files.", "id": "f12651:m8"} {"signature": "def normalize_excludes(rootpath, excludes):", "body": "sep = os.path.sepf_excludes = []for exclude in excludes:if not os.path.isabs(exclude) and not exclude.startswith(rootpath):exclude = os.path.join(rootpath, exclude)if not exclude.endswith(sep):exclude += sepf_excludes.append(exclude)return f_excludes", "docstring": "Normalize the excluded directory list:\n* must be either an absolute path or start with rootpath,\n* otherwise it is joined with rootpath\n* with trailing slash", "id": "f12651:m9"} {"signature": "def is_excluded(root, excludes):", "body": "sep = os.path.sepif not root.endswith(sep):root += sepfor exclude in excludes:if root.startswith(exclude):return Truereturn False", "docstring": "Check if the directory is in the exclude list.\n\nNote: by having trailing slashes, we avoid common prefix issues, like\n e.g. an exlude \"foo\" also accidentally excluding \"foobar\".", "id": "f12651:m10"} {"signature": "def main():", "body": "parser = optparse.OptionParser(usage=\"\"\"\"\"\")parser.add_option(\"\", \"\", action=\"\", dest=\"\", help=\"\", default=\"\")parser.add_option(\"\", \"\", action=\"\", dest=\"\", help=\"\", default=\"\")parser.add_option(\"\", \"\", action=\"\", dest=\"\", help=\"\", default=\"\")parser.add_option(\"\", \"\", action=\"\", dest=\"\", help=\"\", type=\"\", default=)parser.add_option(\"\", \"\", action=\"\", dest=\"\", help=\"\")parser.add_option(\"\", \"\", action=\"\", dest=\"\", help=\"\")parser.add_option(\"\", \"\", action=\"\", dest=\"\", help=\"\")(opts, args) = parser.parse_args()if not args:parser.error(\"\")else:rootpath, excludes = args[], args[:]if os.path.isdir(rootpath):if opts.destdir and os.path.isdir(opts.destdir):excludes = normalize_excludes(rootpath, excludes)recurse_tree(rootpath, excludes, opts)else:print('' % opts.destdir)else:print('' % rootpath)", "docstring": "Parse and check the command line arguments.", "id": "f12651:m11"} {"signature": "def seeded_auth_token(client, service, seed):", "body": "hash_func = hashlib.md5()token = ''.join((client, service, seed)).encode('')hash_func.update(token)return hash_func.hexdigest()", "docstring": "Return an auth token based on the client+service+seed tuple.", "id": "f12653:m0"} {"signature": "def setUp(self):", "body": "tests_dir = os.path.dirname(__file__)bin_dir = script_path = os.path.join(tests_dir, '', '', '')script_path = os.path.join(bin_dir, '')self.app_dir = os.path.join(tests_dir, '', '')self.pub_conf = os.path.join(self.app_dir, '')self.conf = os.path.join(self.app_dir, '')env = {'': os.environ[''],'': ''.join(sys.path),}p = subprocess.Popen((script_path, '', self.app_dir, '', '','', ''),stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE, env=env)out, err = p.communicate()self.assertEqual(p.wait(), )self.assertEqual(err, b'')", "docstring": "Calls configurator in the sample app.", "id": "f12656:c0:m0"} {"signature": "def clean_sources(self, sources):", "body": "return [tuple(path.rsplit('', )[].rsplit('', )[:])for path in sources]", "docstring": "Convert a config_sources result into a create_sources list.\n\n i.e. the last two path components, minus a file extension", "id": "f12658:c1:m4"} {"signature": "def write(self, name, contents):", "body": "fn = os.path.join(self.tmpdir, name)with open(fn, '') as f:f.write(contents)return fn", "docstring": "Write contents to tmpdir/name. Return full filename.", "id": "f12658:c2:m3"} {"signature": "def filter(config):", "body": "keys = ['','','']return filter_dict(config, keys)", "docstring": "The subset of configuration keys to be made public.", "id": "f12660:m0"} {"signature": "def write_config(config, app_dir, filename=''):", "body": "path = os.path.join(app_dir, filename)with open(path, '') as f:json.dump(config, f, indent=, cls=DetectMissingEncoder,separators=('', ''))", "docstring": "Write configuration to the applicaiton directory.", "id": "f12662:m0"} {"signature": "def get_config_module(config_pathname):", "body": "configs_mod = ''if configs_mod not in sys.modules:sys.modules[configs_mod] = types.ModuleType(configs_mod)module_name = os.path.basename(config_pathname).rsplit('', )[]module_name = configs_mod + '' + module_namereturn _load_module(module_name, config_pathname)", "docstring": "Imports the config file to yoconfigurator.configs..", "id": "f12662:m2"} {"signature": "def filter_config(config, deploy_config):", "body": "if not os.path.isfile(deploy_config):return DotDict()config_module = get_config_module(deploy_config)return config_module.filter(config)", "docstring": "Return a config subset using the filter defined in the deploy config.", "id": "f12663:m0"} {"signature": "def merge_dicts(d1, d2, _path=None):", "body": "if _path is None:_path = ()if isinstance(d1, dict) and isinstance(d2, dict):for k, v in d2.items():if isinstance(v, MissingValue) and v.name is None:v.name = ''.join(_path + (k,))if isinstance(v, DeletedValue):d1.pop(k, None)elif k not in d1:if isinstance(v, dict):d1[k] = merge_dicts({}, v, _path + (k,))else:d1[k] = velse:if isinstance(d1[k], dict) and isinstance(v, dict):d1[k] = merge_dicts(d1[k], v, _path + (k,))elif isinstance(d1[k], list) and isinstance(v, list):d1[k] += velif isinstance(d1[k], MissingValue):d1[k] = velif d1[k] is None:d1[k] = velif type(d1[k]) == type(v):d1[k] = velse:raise TypeError(''% (type(d1[k]), type(v)))else:raise TypeError('' % (type(d1), type(d2)))return d1", "docstring": "Merge dictionary d2 into d1, overriding entries in d1 with values from d2.\n\nd1 is mutated.\n\n_path is for internal, recursive use.", "id": "f12664:m0"} {"signature": "def filter_dict(unfiltered, filter_keys):", "body": "filtered = DotDict()for k in filter_keys:filtered[k] = unfiltered[k]return filtered", "docstring": "Return a subset of a dictionary using the specified keys.", "id": "f12664:m1"} {"signature": "def _convert_item(self, obj):", "body": "if isinstance(obj, dict) and not isinstance(obj, DotDict):obj = DotDict(obj)elif isinstance(obj, list):for i, item in enumerate(obj):if isinstance(item, dict) and not isinstance(item, DotDict):obj[i] = DotDict(item)return obj", "docstring": "Convert obj into a DotDict, or list of DotDict.\n\nDirectly nested lists aren't supported.\nReturns the result", "id": "f12664:c0:m6"} {"signature": "def config_sources(app, environment, cluster, configs_dirs, app_dir,local=False, build=False):", "body": "sources = [(configs_dirs, ''),(configs_dirs, ''),(configs_dirs, ''),(configs_dirs, ''),(configs_dirs, '' % environment),(configs_dirs, '' % (environment, cluster)),(configs_dirs, ''),(configs_dirs, ''),(configs_dirs, ''),([app_dir], '' % app),([app_dir], '' % (app, environment)),([app_dir], '' % (app, environment, cluster)),(configs_dirs, app),(configs_dirs, '' % (app, environment)),(configs_dirs, '' % (app, environment, cluster)),([app_dir], '' % app),([app_dir], '' % app),(configs_dirs, '' % app),(configs_dirs, '' % app),(configs_dirs, '' % app),]if not build:sources = [source for source in sourcesif not source[].endswith('')]if not local:sources = [source for source in sourcesif not source[].endswith('')]return available_sources(sources)", "docstring": "Return the config files for an environment & cluster specific app.", "id": "f12665:m0"} {"signature": "def available_sources(sources):", "body": "for dirs, name in sources:for directory in dirs:fn = os.path.join(directory, name) + ''if os.path.isfile(fn):yield fn", "docstring": "Yield the sources that are present.", "id": "f12665:m1"} {"signature": "def smush_config(sources, initial=None):", "body": "if initial is None:initial = {}config = DotDict(initial)for fn in sources:log.debug('', fn)mod = get_config_module(fn)config = mod.update(config)log.debug('', json.dumps(config, indent=,cls=LenientJSONEncoder))return config", "docstring": "Merge the configuration sources and return the resulting DotDict.", "id": "f12665:m2"} {"signature": "def get_name(self):", "body": "return self.name", "docstring": "Return the name of this role", "id": "f12670:c0:m1"} {"signature": "def add_parent(self, parent):", "body": "parent.children.add(self)self.parents.add(parent)", "docstring": "Add a parent to this role,\n and add role itself to the parent's children set.\n you should override this function if neccessary.\n\n Example::\n\n logged_user = RoleMixin('logged_user')\n student = RoleMixin('student')\n student.add_parent(logged_user)\n\n :param parent: Parent role to add in.", "id": "f12670:c0:m2"} {"signature": "def add_parents(self, *parents):", "body": "for parent in parents:self.add_parent(parent)", "docstring": "Add parents to this role. Also should override if neccessary.\n Example::\n\n editor_of_articles = RoleMixin('editor_of_articles')\n editor_of_photonews = RoleMixin('editor_of_photonews')\n editor_of_all = RoleMixin('editor_of_all')\n editor_of_all.add_parents(editor_of_articles, editor_of_photonews)\n\n :param parents: Parents to add.", "id": "f12670:c0:m3"} {"signature": "@staticmethoddef get_by_name(name):", "body": "return RoleMixin.roles[name]", "docstring": "A static method to return the role which has the input name.\n\n :param name: The name of role.", "id": "f12670:c0:m6"} {"signature": "def add_role(self, role):", "body": "self.roles.add(role)", "docstring": "Add a role to this user.\n\n :param role: Role to add.", "id": "f12670:c1:m1"} {"signature": "def add_roles(self, *roles):", "body": "for role in roles:self.add_role(role)", "docstring": "Add roles to this user.\n\n :param roles: Roles to add.", "id": "f12670:c1:m2"} {"signature": "def allow(self, role, method, resource, with_children=True):", "body": "if with_children:for r in role.get_children():permission = (r.get_name(), method, resource)if permission not in self._allowed:self._allowed.append(permission)if role == '':permission = (role, method, resource)else:permission = (role.get_name(), method, resource)if permission not in self._allowed:self._allowed.append(permission)", "docstring": "Add allowing rules.\n\n :param role: Role of this rule.\n :param method: Method to allow in rule, include GET, POST, PUT etc.\n :param resource: Resource also view function.\n :param with_children: Allow role's children in rule as well\n if with_children is `True`", "id": "f12671:c0:m1"} {"signature": "def deny(self, role, method, resource, with_children=False):", "body": "if with_children:for r in role.get_children():permission = (r.get_name(), method, resource)if permission not in self._denied:self._denied.append(permission)permission = (role.get_name(), method, resource)if permission not in self._denied:self._denied.append(permission)", "docstring": "Add denying rules.\n\n :param role: Role of this rule.\n :param method: Method to deny in rule, include GET, POST, PUT etc.\n :param resource: Resource also view function.\n :param with_children: Deny role's children in rule as well\n if with_children is `True`", "id": "f12671:c0:m2"} {"signature": "def exempt(self, resource):", "body": "if resource not in self._exempt:self._exempt.append(resource)", "docstring": "Exempt a view function from being checked permission\n\n :param resource: The view function exempt from checking.", "id": "f12671:c0:m3"} {"signature": "def is_allowed(self, role, method, resource):", "body": "return (role, method, resource) in self._allowed", "docstring": "Check whether role is allowed to access resource\n\n :param role: Role to be checked.\n :param method: Method to be checked.\n :param resource: View function to be checked.", "id": "f12671:c0:m4"} {"signature": "def is_denied(self, role, method, resource):", "body": "return (role, method, resource) in self._denied", "docstring": "Check wherther role is denied to access resource\n\n :param role: Role to be checked.\n :param method: Method to be checked.\n :param resource: View function to be checked.", "id": "f12671:c0:m5"} {"signature": "def is_exempt(self, resource):", "body": "return resource in self._exempt", "docstring": "Return whether resource is exempted.\n\n :param resource: View function to be checked.", "id": "f12671:c0:m6"} {"signature": "def __init__(self, app=None, **kwargs):", "body": "self.acl = AccessControlList()self.before_acl = {'': [], '': []}self._role_model = kwargs.get('', RoleMixin)self._user_model = kwargs.get('', UserMixin)self._user_loader = kwargs.get('', lambda: current_user)self.permission_failed_hook = kwargs.get('')if app is not None:self.app = appself.init_app(app)else:self.app = None", "docstring": "Initialize with app.", "id": "f12671:c2:m0"} {"signature": "def init_app(self, app):", "body": "app.config.setdefault('', False)self.use_white = app.config['']if not hasattr(app, ''):app.extensions = {}app.extensions[''] = _RBACState(self, app)self.acl.allow(anonymous, '', '')app.before_first_request(self._setup_acl)app.before_request(self._authenticate)", "docstring": "Initialize application in Flask-RBAC.\n Adds (RBAC, app) to flask extensions.\n Adds hook to authenticate permission before request.\n\n :param app: Flask object", "id": "f12671:c2:m1"} {"signature": "def as_role_model(self, model_cls):", "body": "self._role_model = model_clsreturn model_cls", "docstring": "A decorator to set custom model or role.\n\n :param model_cls: Model of role.", "id": "f12671:c2:m2"} {"signature": "def as_user_model(self, model_cls):", "body": "self._user_model = model_clsreturn model_cls", "docstring": "A decorator to set custom model or user.\n\n :param model_cls: Model of user.", "id": "f12671:c2:m3"} {"signature": "def set_role_model(self, model):", "body": "self._role_model = model", "docstring": "Set custom model of role.\n\n :param model: Model of role.", "id": "f12671:c2:m4"} {"signature": "def set_user_model(self, model):", "body": "self._user_model = model", "docstring": "Set custom model of User\n\n :param model: Model of user", "id": "f12671:c2:m5"} {"signature": "def set_user_loader(self, loader):", "body": "self._user_loader = loader", "docstring": "Set user loader, which is used to load current user.\n An example::\n\n from flask_login import current_user\n rbac.set_user_loader(lambda: current_user)\n\n :param loader: Current user function.", "id": "f12671:c2:m6"} {"signature": "def set_hook(self, hook):", "body": "self.permission_failed_hook = hook", "docstring": "Set hook which called when permission is denied\n If you haven't set any hook, Flask-RBAC will call::\n\n abort(403)\n\n :param hook: Hook function", "id": "f12671:c2:m7"} {"signature": "def has_permission(self, method, endpoint, user=None):", "body": "app = self.get_app()_user = user or self._user_loader()if not hasattr(_user, ''):roles = [anonymous]else:roles = _user.get_roles()return self._check_permission(roles, method, endpoint)", "docstring": "Return does the current user can access the resource.\n Example::\n\n @app.route('/some_url', methods=['GET', 'POST'])\n @rbac.allow(['anonymous'], ['GET'])\n def a_view_func():\n return Response('Blah Blah...')\n\n If you are not logged.\n\n `rbac.has_permission('GET', 'a_view_func')` return True.\n `rbac.has_permission('POST', 'a_view_func')` return False.\n\n :param method: The method wait to check.\n :param endpoint: The application endpoint.\n :param user: user who you need to check. Current user by default.", "id": "f12671:c2:m8"} {"signature": "def allow(self, roles, methods, with_children=True):", "body": "def decorator(view_func):_methods = [m.upper() for m in methods]for r, m, v in itertools.product(roles, _methods, [view_func.__name__]):self.before_acl[''].append((r, m, v, with_children))return view_funcreturn decorator", "docstring": "This is a decorator function.\n\n You can allow roles to access the view func with it.\n\n An example::\n\n @app.route('/website/setting', methods=['GET', 'POST'])\n @rbac.allow(['administrator', 'super_user'], ['GET', 'POST'])\n def website_setting():\n return Response('Setting page.')\n\n :param roles: List, each name of roles. Please note that,\n `anonymous` is refered to anonymous.\n If you add `anonymous` to the rule,\n everyone can access the resource,\n unless you deny other roles.\n :param methods: List, each name of methods.\n methods is valid in ['GET', 'POST', 'PUT', 'DELETE']\n :param with_children: Whether allow children of roles as well.\n True by default.", "id": "f12671:c2:m9"} {"signature": "def deny(self, roles, methods, with_children=False):", "body": "def decorator(view_func):_methods = [m.upper() for m in methods]for r, m, v in itertools.product(roles, _methods, [view_func.__name__]):self.before_acl[''].append((r, m, v, with_children))return view_funcreturn decorator", "docstring": "This is a decorator function.\n\n You can deny roles to access the view func with it.\n\n An example::\n\n @app.route('/article/post', methods=['GET', 'POST'])\n @rbac.deny(['anonymous', 'unactivated_role'], ['GET', 'POST'])\n def article_post():\n return Response('post page.')\n\n :param roles: List, each name of roles.\n :param methods: List, each name of methods.\n methods is valid in ['GET', 'POST', 'PUT', 'DELETE']\n :param with_children: Whether allow children of roles as well.\n True by default.", "id": "f12671:c2:m10"} {"signature": "def exempt(self, view_func):", "body": "self.acl.exempt(view_func.__name__)return view_func", "docstring": "Exempt a view function from being checked permission.\n It is useful when you are using white list checking.\n\n Example::\n\n @app.route('/everyone/can/access')\n @rbac.exempt\n def everyone_can_access():\n return 'Hello~'\n\n :param view_func: The view function going to be exempted.", "id": "f12671:c2:m11"} {"signature": "def get_app(self, reference_app=None):", "body": "if reference_app is not None:return reference_appif self.app is not None:return self.appctx = connection_stack.topif ctx is not None:return ctx.appraise RuntimeError('''''')", "docstring": "Helper method that implements the logic to look up an application.", "id": "f12671:c2:m12"} {"signature": "def setEnvironmentalData(self, humidity, temperature):", "body": "''''''hum_perc = humidity << parts = math.fmod(temperature)fractional = parts[]temperature = parts[]temp_high = ((temperature + ) << )temp_low = ((fractional / ) & )temp_conv = (temp_high | temp_low)buf = [hum_perc, ,((temp_conv >> ) & ), (temp_conv & )]self._device.writeList(CCS811_ENV_DATA, buf)", "docstring": "Humidity is stored as an unsigned 16 bits in 1/512%RH. The\n default value is 50% = 0x64, 0x00. As an example 48.5%\n humidity would be 0x61, 0x00.", "id": "f12674:c0:m6"} {"signature": "def _python_cmd(*args):", "body": "args = (sys.executable,) + argsreturn subprocess.call(args) == ", "docstring": "Return True if the command succeeded.", "id": "f12676:m0"} {"signature": "def get_zip_class():", "body": "class ContextualZipFile(zipfile.ZipFile):def __enter__(self):return selfdef __exit__(self, type, value, traceback):self.closereturn zipfile.ZipFile if hasattr(zipfile.ZipFile, '') elseContextualZipFile", "docstring": "Supplement ZipFile class to support context manager for Python 2.6", "id": "f12676:m3"} {"signature": "def _clean_check(cmd, target):", "body": "try:subprocess.check_call(cmd)except subprocess.CalledProcessError:if os.access(target, os.F_OK):os.unlink(target)raise", "docstring": "Run the command to download target. If the command fails, clean up before\nre-raising the error.", "id": "f12676:m7"} {"signature": "def download_file_powershell(url, target):", "body": "target = os.path.abspath(target)cmd = ['','',\"\" % vars(),]_clean_check(cmd, target)", "docstring": "Download the file at url to target using Powershell (which will validate\ntrust). Raise an exception if the command cannot complete.", "id": "f12676:m8"} {"signature": "def download_file_insecure(url, target):", "body": "try:from urllib.request import urlopenexcept ImportError:from urllib2 import urlopensrc = dst = Nonetry:src = urlopen(url)data = src.read()dst = open(target, \"\")dst.write(data)finally:if src:src.close()if dst:dst.close()", "docstring": "Use Python to download the file, even though it cannot authenticate the\nconnection.", "id": "f12676:m14"} {"signature": "def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,to_dir=os.curdir, delay=, downloader_factory=get_best_downloader):", "body": "to_dir = os.path.abspath(to_dir)zip_name = \"\" % versionurl = download_base + zip_namesaveto = os.path.join(to_dir, zip_name)if not os.path.exists(saveto): log.warn(\"\", url)downloader = downloader_factory()downloader(url, saveto)return os.path.realpath(saveto)", "docstring": "Download setuptools from a specified location and return its filename\n`version` should be a valid setuptools version number that is available\nas an egg for download under the `download_base` URL (which should end\nwith a '/'). `to_dir` is the directory where the egg will be downloaded.\n`delay` is the number of seconds to pause before an actual download\nattempt.\n``downloader_factory`` should be a function taking no arguments and\nreturning a function for downloading a URL to a target.", "id": "f12676:m16"} {"signature": "def _build_install_args(options):", "body": "return [''] if options.user_install else []", "docstring": "Build the arguments to 'python setup.py install' on the setuptools package", "id": "f12676:m17"} {"signature": "def _parse_args():", "body": "parser = optparse.OptionParser()parser.add_option('', dest='', action='', default=False,help='')parser.add_option('', dest='', metavar=\"\",default=DEFAULT_URL,help='')parser.add_option('', dest='', action='',const=lambda: download_file_insecure, default=get_best_downloader,help='')parser.add_option('', help=\"\",default=DEFAULT_VERSION,)options, args = parser.parse_args()return options", "docstring": "Parse the command line for options", "id": "f12676:m18"} {"signature": "def main():", "body": "options = _parse_args()archive = download_setuptools(version=options.version,download_base=options.download_base,downloader_factory=options.downloader_factory,)return _install(archive, _build_install_args(options))", "docstring": "Install or upgrade setuptools and EasyInstall", "id": "f12676:m19"} {"signature": "def superable(cls) :", "body": "name = cls.__name__super_name = '' % (name,)setattr(cls,super_name,super(cls))return cls", "docstring": "Provide .__super in python 2.x classes without having to specify the current \n class name each time super is used (DRY principle).", "id": "f12679:m0"} {"signature": "def __init__(self, client_id, server_token, secret):", "body": "self.client_id = client_idself.server_token = server_tokenself.secret = secretsuper(Uber, self).__init__(self.client_id, self.server_token, self.secret)", "docstring": "Instantiate a new Uber object.\n:param client_id: Client ID for an application provided by Uber.\n:param server_token: Server token for an application provided by Uber.\n:param secret: Secret for an application provided by Uber.", "id": "f12685:c0:m0"} {"signature": "def get_products(self, latitude, longitude):", "body": "endpoint = ''query_parameters = {'': latitude,'': longitude}return self.get_json(endpoint, '', query_parameters, None, None)", "docstring": "Get a list of all Uber products based on latitude and longitude coordinates.\n:param latitude: Latitude for which product list is required.\n:param longitude: Longitude for which product list is required.\n:return: JSON", "id": "f12685:c0:m1"} {"signature": "def get_price_estimate(self, start_latitude, start_longitude, end_latitude, end_longitude):", "body": "endpoint = ''query_parameters = {'': start_latitude,'': start_longitude,'': end_latitude,'': end_longitude}return self.get_json(endpoint, '', query_parameters, None, None)", "docstring": "Returns the fare estimate based on two sets of coordinates.\n:param start_latitude: Starting latitude or latitude of pickup address.\n:param start_longitude: Starting longitude or longitude of pickup address.\n:param end_latitude: Ending latitude or latitude of destination address.\n:param end_longitude: Ending longitude or longitude of destination address.\n:return: JSON", "id": "f12685:c0:m2"} {"signature": "def get_time_estimate(self, start_latitude, start_longitude, customer_uuid=None, product_id=None):", "body": "endpoint = ''query_parameters = {'': start_latitude,'': start_longitude}if customer_uuid is not None:query_parameters[''] = customer_uuidelif product_id is not None:query_parameters[''] = product_idelif customer_uuid is not None and product_id is not None:query_parameters[''] = customer_uuidquery_parameters[''] = product_idreturn self.get_json(endpoint, '', query_parameters, None, None)", "docstring": "Get the ETA for Uber products.\n:param start_latitude: Starting latitude.\n:param start_longitude: Starting longitude.\n:param customer_uuid: (Optional) Customer unique ID.\n:param product_id: (Optional) If ETA is needed only for a specific product type.\n:return: JSON", "id": "f12685:c0:m3"} {"signature": "def get_promotions(self, start_latitude, start_longitude, end_latitude, end_longitude):", "body": "endpoint = ''query_parameters = {'': start_latitude,'': start_longitude,'': end_latitude,'': end_longitude}return self.get_json(endpoint, '', query_parameters, None, None)", "docstring": "Get promotions for new user based on user location.\n:param start_latitude: Starting latitude or latitude of pickup address.\n:param start_longitude: Starting longitude or longitude of pickup address.\n:param end_latitude: Ending latitude or latitude of destination address.\n:param end_longitude: Ending longitude or longitude of destination address.\n:return: JSON", "id": "f12685:c0:m4"} {"signature": "def __init__(self, client_id, server_token, secret):", "body": "self.client_id = client_idif self.server_token == '' or self.server_token is None:raise UberpyException('')else:self.server_token = server_tokenself.secret = secretself.client = Http()", "docstring": "Instantiate a new Api object.\n:param client_id: Client ID for an application provided by Uber.\n:param server_token: Server token for an application provided by Uber.\n:param secret: Secret for an application provided by Uber.", "id": "f12687:c0:m0"} {"signature": "def add_credentials(self, query_parameters):", "body": "query_parameters[''] = self.server_tokenreturn query_parameters", "docstring": "Adds the Uber server token to the query parameters to make an authorised request.\n:param query_parameters: Query parameters to be sent.\n:return: string", "id": "f12687:c0:m1"} {"signature": "@staticmethoddef sanitise_path(path):", "body": "if path[] != '':path = '' + pathreturn path", "docstring": "Adds a '/' to the path if it does not exist.\n:param path: Path that is to be sanitised.\n:return: string", "id": "f12687:c0:m2"} {"signature": "@staticmethoddef check_status(content, response):", "body": "if response.status == :raise MalformedRequestException(content, response)if response.status == :raise UnauthorisedException(content, response)if response.status == :raise NotFoundException(content, response)if response.status == :raise UnacceptableContentException(content, response)if response.status == :raise InvalidRequestException(content, response)if response.status == :raise RateLimitException(content, response)if response.status >= :raise ServerException(content, response)", "docstring": "Check the response that is returned for known exceptions and errors.\n:param response: Response that is returned from the call.\n:raise:\n MalformedRequestException if `response.status` is 400\n UnauthorisedException if `response.status` is 401\n NotFoundException if `response.status` is 404\n UnacceptableContentException if `response.status` is 406\n InvalidRequestException if `response.status` is 422\n RateLimitException if `response.status` is 429\n ServerException if `response.status` > 500", "id": "f12687:c0:m3"} {"signature": "def build_request(self, path, query_parameters):", "body": "url = '' + self.sanitise_path(path)url += '' + urlencode(query_parameters)return url", "docstring": "Build the HTTP request by adding query parameters to the path.\n:param path: API endpoint/path to be used.\n:param query_parameters: Query parameters to be added to the request.\n:return: string", "id": "f12687:c0:m4"} {"signature": "def get_json(self, uri_path, http_method='', query_parameters=None, body=None, headers=None):", "body": "query_parameters = query_parameters or {}headers = headers or {}query_parameters = self.add_credentials(query_parameters)uri = self.build_request(uri_path, query_parameters)if http_method in ('', '', '') and '' not in headers:headers[''] = ''headers[''] = ''response, content = self.client.request(uri=uri,method=http_method,body=body,headers=headers)self.check_status(content, response)return json.loads(content.decode(''))", "docstring": "Fetches the JSON returned, after making the call and checking for errors.\n:param uri_path: Endpoint to be used to make a request.\n:param http_method: HTTP method to be used.\n:param query_parameters: Parameters to be added to the request.\n:param body: Optional body, if required.\n:param headers: Optional headers, if required.\n:return: JSON", "id": "f12687:c0:m5"} {"signature": "def __init__(self,case_sensitive=True,unknown_value=u\"\"):", "body": "self.case_sensitive = case_sensitiveself.unknown_value = unknown_valueself._parse(os.path.join(os.path.dirname(__file__), \"\"))", "docstring": "Creates a detector parsing given data file", "id": "f12691:c1:m0"} {"signature": "def _parse(self, filename):", "body": "self.names = {}with codecs.open(filename, encoding=\"\") as f:for line in f:if any(map(lambda c: < ord(c) < , line)):line = line.encode(\"\").decode(\"\")self._eat_name_line(line.strip())", "docstring": "Opens data file and for each line, calls _eat_name_line", "id": "f12691:c1:m1"} {"signature": "def _eat_name_line(self, line):", "body": "if line[] not in \"\":parts = line.split()country_values = line[:-]name = map_name(parts[])if not self.case_sensitive:name = name.lower()if parts[] == \"\":self._set(name, u\"\", country_values)elif parts[] == \"\" or parts[] == \"\":self._set(name, u\"\", country_values)elif parts[] == \"\":self._set(name, u\"\", country_values)elif parts[] == \"\" or parts[] == \"\":self._set(name, u\"\", country_values)elif parts[] == \"\":self._set(name, self.unknown_value, country_values)else:raise \"\" % parts[]", "docstring": "Parses one line of data file", "id": "f12691:c1:m2"} {"signature": "def _set(self, name, gender, country_values):", "body": "if '' in name:for replacement in ['', '', '']:self._set(name.replace('', replacement), gender, country_values)else:if name not in self.names:self.names[name] = {}self.names[name][gender] = country_values", "docstring": "Sets gender and relevant country values for names dictionary of detector", "id": "f12691:c1:m3"} {"signature": "def _most_popular_gender(self, name, counter):", "body": "if name not in self.names:return self.unknown_valuemax_count, max_tie = (, )best = self.names[name].keys()[]for gender, country_values in self.names[name].items():count, tie = counter(country_values)if count > max_count or (count == max_count and tie > max_tie):max_count, max_tie, best = count, tie, genderreturn best if max_count > else self.unknown_value", "docstring": "Finds the most popular gender for the given name counting by given counter", "id": "f12691:c1:m4"} {"signature": "def get_gender(self, name, country=None):", "body": "if not self.case_sensitive:name = name.lower()if name not in self.names:return self.unknown_valueelif not country:def counter(country_values):country_values = map(ord, country_values.replace(\"\", \"\"))return (len(country_values),sum(map(lambda c: c > and c- or c-, country_values)))return self._most_popular_gender(name, counter)elif country in self.__class__.COUNTRIES:index = self.__class__.COUNTRIES.index(country)counter = lambda e: (ord(e[index])-, )return self._most_popular_gender(name, counter)else:raise NoCountryError(\"\" % country)", "docstring": "Returns best gender for the given name and country pair", "id": "f12691:c1:m5"} {"signature": "def multiglob_compile(globs, prefix=False):", "body": "if not globs:return re.compile('')elif prefix:globs = [x + '' for x in globs]return re.compile(''.join(fnmatch.translate(x) for x in globs))", "docstring": "Generate a single \"A or B or C\" regex from a list of shell globs.\n\n :param globs: Patterns to be processed by :mod:`fnmatch`.\n :type globs: iterable of :class:`~__builtins__.str`\n\n :param prefix: If ``True``, then :meth:`~re.RegexObject.match` will\n perform prefix matching rather than exact string matching.\n :type prefix: :class:`~__builtins__.bool`\n\n :rtype: :class:`re.RegexObject`", "id": "f12694:m0"} {"signature": "def hashFile(handle, want_hex=False, limit=None, chunk_size=CHUNK_SIZE):", "body": "fhash, read = hashlib.sha1(), if isinstance(handle, str):handle = file(handle, '')if limit:chunk_size = min(chunk_size, limit)for block in iter(lambda: handle.read(chunk_size), ''):fhash.update(block)read += chunk_sizeif < limit <= read:breakreturn want_hex and fhash.hexdigest() or fhash.digest()", "docstring": "Generate a hash from a potentially long file.\n Digesting will obey :const:`CHUNK_SIZE` to conserve memory.\n\n :param handle: A file-like object or path to hash from.\n :param want_hex: If ``True``, returned hash will be hex-encoded.\n :type want_hex: :class:`~__builtins__.bool`\n\n :param limit: Maximum number of bytes to read (rounded up to a multiple of\n ``CHUNK_SIZE``)\n :type limit: :class:`~__builtins__.int`\n\n :param chunk_size: Size of :meth:`~__builtins__.file.read` operations\n in bytes.\n :type chunk_size: :class:`~__builtins__.int`\n\n\n :rtype: :class:`~__builtins__.str`\n :returns: A binary or hex-encoded SHA1 hash.\n\n .. note:: It is your responsibility to close any file-like objects you pass\n in", "id": "f12694:m1"} {"signature": "def getPaths(roots, ignores=None):", "body": "paths, count, ignores = [], , ignores or []ignore_re = multiglob_compile(ignores, prefix=False)for root in roots:root = os.path.realpath(root)if os.path.isfile(root):paths.append(root)continuefor fldr in os.walk(root):out.write(\"\"% count)for subdir in fldr[]:dirpath = os.path.join(fldr[], subdir)if ignore_re.match(dirpath):fldr[].remove(subdir)for filename in fldr[]:filepath = os.path.join(fldr[], filename)if ignore_re.match(filepath):continue paths.append(filepath)count += out.write(\"\" % (len(paths)),newline=True)return paths", "docstring": "Recursively walk a set of paths and return a listing of contained files.\n\n:param roots: Relative or absolute paths to files or folders.\n:type roots: :class:`~__builtins__.list` of :class:`~__builtins__.str`\n\n:param ignores: A list of :py:mod:`fnmatch` globs to avoid walking and\n omit from results\n:type ignores: :class:`~__builtins__.list` of :class:`~__builtins__.str`\n\n:returns: Absolute paths to only files.\n:rtype: :class:`~__builtins__.list` of :class:`~__builtins__.str`\n\n.. todo:: Try to optimize the ignores matching. Running a regex on every\n filename is a fairly significant percentage of the time taken according\n to the profiler.", "id": "f12694:m2"} {"signature": "def groupBy(groups_in, classifier, fun_desc='', keep_uniques=False,*args, **kwargs):", "body": "groups, count, group_count = {}, , len(groups_in)for pos, paths in enumerate(groups_in.values()):out.write(\"\"\"\" % (pos + , group_count, fun_desc, count, len(paths)))for key, group in list(classifier(paths, *args, **kwargs).items()):groups.setdefault(key, set()).update(group)count += len(group)if not keep_uniques:groups = dict([(x, groups[x]) for x in groups if len(groups[x]) > ])out.write(\"\"% (len(groups), fun_desc, count), newline=True)return groups", "docstring": "Subdivide groups of paths according to a function.\n\n :param groups_in: Grouped sets of paths.\n :type groups_in: :class:`~__builtins__.dict` of iterables\n\n :param classifier: Function to group a list of paths by some attribute.\n :type classifier: ``function(list, *args, **kwargs) -> str``\n\n :param fun_desc: Human-readable term for what the classifier operates on.\n (Used in log messages)\n :type fun_desc: :class:`~__builtins__.str`\n\n :param keep_uniques: If ``False``, discard groups with only one member.\n :type keep_uniques: :class:`~__builtins__.bool`\n\n\n :returns: A dict mapping classifier keys to groups of matches.\n :rtype: :class:`~__builtins__.dict`\n\n\n :attention: Grouping functions generally use a :class:`~__builtins__.set`\n ``groups`` as extra protection against accidentally counting a given\n file twice. (Complimentary to use of :func:`os.path.realpath` in\n :func:`~fastdupes.getPaths`)\n\n .. todo:: Find some way to bring back the file-by-file status text", "id": "f12694:m3"} {"signature": "def groupify(function):", "body": "@wraps(function)def wrapper(paths, *args, **kwargs): groups = {}for path in paths:key = function(path, *args, **kwargs)if key is not None:groups.setdefault(key, set()).add(path)return groupsreturn wrapper", "docstring": "Decorator to convert a function which takes a single value and returns\n a key into one which takes a list of values and returns a dict of key-group\n mappings.\n\n :param function: A function which takes a value and returns a hash key.\n :type function: ``function(value) -> key``\n\n :rtype:\n .. parsed-literal::\n function(iterable) ->\n {key: :class:`~__builtins__.set` ([value, ...]), ...}", "id": "f12694:m4"} {"signature": "@groupifydef sizeClassifier(path, min_size=DEFAULTS['']):", "body": "filestat = _stat(path)if stat.S_ISLNK(filestat.st_mode):return if filestat.st_size < min_size:return return filestat.st_size", "docstring": "Sort a file into a group based on on-disk size.\n\n :param paths: See :func:`fastdupes.groupify`\n\n :param min_size: Files smaller than this size (in bytes) will be ignored.\n :type min_size: :class:`__builtins__.int`\n\n :returns: See :func:`fastdupes.groupify`\n\n .. todo:: Rework the calling of :func:`~os.stat` to minimize the number of\n calls. It's a fairly significant percentage of the time taken according\n to the profiler.", "id": "f12694:m5"} {"signature": "@groupifydef hashClassifier(path, limit=HEAD_SIZE):", "body": "return hashFile(path, limit=limit)", "docstring": "Sort a file into a group based on its SHA1 hash.\n\n :param paths: See :func:`fastdupes.groupify`\n\n :param limit: Only this many bytes will be counted in the hash.\n Values which evaluate to ``False`` indicate no limit.\n :type limit: :class:`__builtins__.int`\n\n :returns: See :func:`fastdupes.groupify`", "id": "f12694:m6"} {"signature": "def groupByContent(paths):", "body": "handles, results = [], []hList = []for path in paths:try:hList.append((path, open(path, ''), ''))except IOError:pass handles.append(hList)while handles:more, done = compareChunks(handles.pop())handles.extend(more)results.extend(done)return dict((x[], x) for x in results)", "docstring": "Byte-for-byte comparison on an arbitrary number of files in parallel.\n\n This operates by opening all files in parallel and comparing\n chunk-by-chunk. This has the following implications:\n\n - Reads the same total amount of data as hash comparison.\n - Performs a *lot* of disk seeks. (Best suited for SSDs)\n - Vulnerable to file handle exhaustion if used on its own.\n\n :param paths: List of potentially identical files.\n :type paths: iterable\n\n :returns: A dict mapping one path to a list of all paths (self included)\n with the same contents.\n\n .. todo:: Start examining the ``while handles:`` block to figure out how to\n minimize thrashing in situations where read-ahead caching is active.\n Compare savings by read-ahead to savings due to eliminating false\n positives as quickly as possible. This is a 2-variable min/max problem.\n\n .. todo:: Look into possible solutions for pathological cases of thousands\n of files with the same size and same pre-filter results. (File handle\n exhaustion)", "id": "f12694:m7"} {"signature": "def compareChunks(handles, chunk_size=CHUNK_SIZE):", "body": "chunks = [(path, fh, fh.read(chunk_size)) for path, fh, _ in handles]more, done = [], []while chunks:matches, non_matches = [chunks[]], []for chunk in chunks[:]:if matches[][] == chunk[]:matches.append(chunk)else:non_matches.append(chunk)if len(matches) == or matches[][] == \"\":for x in matches:x[].close()done.append([x[] for x in matches])else:more.append(matches)chunks = non_matchesreturn more, done", "docstring": "Group a list of file handles based on equality of the next chunk of\n data read from them.\n\n :param handles: A list of open handles for file-like objects with\n otentially-identical contents.\n :param chunk_size: The amount of data to read from each handle every time\n this function is called.\n\n :returns: Two lists of lists:\n\n * Lists to be fed back into this function individually\n * Finished groups of duplicate paths. (including unique files as\n single-file lists)\n\n :rtype: ``(list, list)``\n\n .. attention:: File handles will be closed when no longer needed\n .. todo:: Discard chunk contents immediately once they're no longer needed", "id": "f12694:m8"} {"signature": "def pruneUI(dupeList, mainPos=, mainLen=):", "body": "dupeList = sorted(dupeList)print()for pos, val in enumerate(dupeList):print(\"\" % (pos + , val))while True:choice = input(\"\" % (mainPos, mainLen)).strip()if not choice:print (\"\"\"\")continueelif choice.lower() == '':return []try:out = [int(x) - for x in choice.replace('', '').split()]return [val for pos, val in enumerate(dupeList) if pos not in out]except ValueError:print(\"\"\"\")", "docstring": "Display a list of files and prompt for ones to be kept.\n\n The user may enter ``all`` or one or more numbers separated by spaces\n and/or commas.\n\n .. note:: It is impossible to accidentally choose to keep none of the\n displayed files.\n\n :param dupeList: A list duplicate file paths\n :param mainPos: Used to display \"set X of Y\"\n :param mainLen: Used to display \"set X of Y\"\n :type dupeList: :class:`~__builtins__.list`\n :type mainPos: :class:`~__builtins__.int`\n :type mainLen: :class:`~__builtins__.int`\n\n :returns: A list of files to be deleted.\n :rtype: :class:`~__builtins__.int`", "id": "f12694:m9"} {"signature": "def find_dupes(paths, exact=False, ignores=None, min_size=):", "body": "groups = {'': getPaths(paths, ignores)}groups = groupBy(groups, sizeClassifier, '', min_size=min_size)groups = groupBy(groups, hashClassifier, '', limit=HEAD_SIZE)if exact:groups = groupBy(groups, groupByContent, fun_desc='')else:groups = groupBy(groups, hashClassifier, fun_desc='')return groups", "docstring": "High-level code to walk a set of paths and find duplicate groups.\n\n :param exact: Whether to compare file contents by hash or by reading\n chunks in parallel.\n :type exact: :class:`~__builtins__.bool`\n\n :param paths: See :meth:`~fastdupes.getPaths`\n :param ignores: See :meth:`~fastdupes.getPaths`\n :param min_size: See :meth:`~fastdupes.sizeClassifier`\n\n :returns: A list of groups of files with identical contents\n :rtype: ``[[path, ...], [path, ...]]``", "id": "f12694:m10"} {"signature": "def print_defaults():", "body": "maxlen = max([len(x) for x in DEFAULTS])for key in DEFAULTS:value = DEFAULTS[key]if isinstance(value, (list, set)):value = ''.join(value)print(\"\" % (maxlen, key, value))", "docstring": "Pretty-print the contents of :data:`DEFAULTS`", "id": "f12694:m11"} {"signature": "def delete_dupes(groups, prefer_list=None, interactive=True, dry_run=False):", "body": "prefer_list = prefer_list or []prefer_re = multiglob_compile(prefer_list, prefix=True)for pos, group in enumerate(groups.values()):preferred = [x for x in group if prefer_re.match(x)]pruneList = [x for x in group if x not in preferred]if not preferred:if interactive:pruneList = pruneUI(group, pos + , len(groups))preferred = [x for x in group if x not in pruneList]else:preferred, pruneList = pruneList, []assert preferred for path in pruneList:print(\"\" % path)if not dry_run:os.remove(path)", "docstring": "Code to handle the :option:`--delete` command-line option.\n\n :param groups: A list of groups of paths.\n :type groups: iterable\n\n :param prefer_list: A whitelist to be compiled by\n :func:`~fastdupes.multiglob_compile` and used to skip some prompts.\n\n :param interactive: If ``False``, assume the user wants to keep all copies\n when a prompt would otherwise be displayed.\n :type interactive: :class:`~__builtins__.bool`\n\n :param dry_run: If ``True``, only pretend to delete files.\n :type dry_run: :class:`~__builtins__.bool`\n\n .. todo:: Add a secondary check for symlinks for safety.", "id": "f12694:m12"} {"signature": "def main():", "body": "from optparse import OptionParser, OptionGroupparser = OptionParser(usage=\"\",version=\"\" % (__appname__, __version__))parser.add_option('', '', action=\"\", dest=\"\",default=False, help=\"\"\"\")parser.add_option('', '', action=\"\", dest=\"\",default=False, help=\"\"\"\"\"\"\"\"\"\"\"\")filter_group = OptionGroup(parser, \"\")filter_group.add_option('', '', action=\"\", dest=\"\",metavar=\"\", help=\"\"\"\"\"\"\"\")filter_group.add_option('', action=\"\", type=\"\",dest=\"\", metavar=\"\", help=\"\"\"\")parser.add_option_group(filter_group)behaviour_group = OptionGroup(parser, \"\")behaviour_group.add_option('', '', action=\"\",dest=\"\", help=\"\"\"\")behaviour_group.add_option('', '', action=\"\",dest=\"\", metavar=\"\", help=\"\"\"\"\"\")behaviour_group.add_option('', action=\"\", dest=\"\",metavar=\"\", default=[], help=\"\"\"\"\"\")behaviour_group.add_option('', action=\"\",dest=\"\", help=\"\"\"\")parser.add_option_group(behaviour_group)parser.set_defaults(**DEFAULTS) opts, args = parser.parse_args()if '' in opts.exclude:opts.exclude = opts.exclude[opts.exclude.index('') + :]opts.exclude = [x.rstrip(os.sep + (os.altsep or '')) for x in opts.exclude]if opts.defaults:print_defaults()sys.exit()groups = find_dupes(args, opts.exact, opts.exclude, opts.min_size)if opts.delete:delete_dupes(groups, opts.prefer, not opts.noninteractive,opts.dry_run)else:for dupeSet in list(groups.values()):print(''.join(dupeSet) + '')", "docstring": "The main entry point, compatible with setuptools.", "id": "f12694:m13"} {"signature": "def write(self, text, newline=False):", "body": "if not self.isatty:self.fobj.write('' % text)returnmsg_len = len(text)self.max_len = max(self.max_len, msg_len)self.fobj.write(\"\" % (self.max_len, text))if newline or not self.isatty:self.fobj.write('')self.max_len = ", "docstring": "Use ``\\\\r`` to overdraw the current line with the given text.\n\n This function transparently handles tracking how much overdrawing is\n necessary to erase the previous line when used consistently.\n\n :param text: The text to be outputted\n :param newline: Whether to start a new line and reset the length count.\n :type text: :class:`~__builtins__.str`\n :type newline: :class:`~__builtins__.bool`", "id": "f12694:c0:m1"} {"signature": "def __init__(self, digits=, only_fields=None):", "body": "self.digits = digitstry:self.only_fields = set(only_fields)except TypeError:self.only_fields = None", "docstring": "Create a processor that rounds numbers in the event values\n\n :param digits: The number of digits to round to\n :param only_fields: An iterable specifying the fields to round", "id": "f12703:c0:m0"} {"signature": "def __init__(self, json_fields):", "body": "self.fields = json_fieldsself.prettify = self.fast_prettify if fast_json_available else self.slow_prettify", "docstring": "Create a processor that prettifies JSON strings in the event values\n\n :param json_fields: An iterable specifying the fields to prettify", "id": "f12703:c1:m0"} {"signature": "def __init__(self, xml_fields):", "body": "self.fields = xml_fieldsif fast_xml_available:self.prettify = self.fast_prettifyself.lxml_parser = etree.XMLParser(remove_blank_text=True)else:self.prettify = self.slow_prettifyself.lxml_parser = None", "docstring": "Create a processor that prettifies XML strings in the event values\n\n :param xml_fields: An iterable specifying the fields to prettify", "id": "f12703:c2:m0"} {"signature": "def __init__(self, field_map):", "body": "self.lexers = {field: get_lexer_by_name(language)for field, language in field_map.items()}", "docstring": "Create a processor that syntax highlights code in the event values\n\n The syntax highlighting will use with ANSI terminal color codes.\n\n :param field_map: A mapping with field names mapped to languages, e.g.\n ``{'body': 'json': 'soap_response': 'xml'}``", "id": "f12703:c3:m0"} {"signature": "def __init__(self, fields, target=sys.stdout):", "body": "self.fields = fieldsself.target = target", "docstring": "Create a processor that prints the requested fields' values\n\n This is useful for strings with newlines in them. Keep in mind that the\n fields will be popped from the event dictionary, so they will not be\n visible to anything (other processors and the logger itself) after this\n processor has printed them.\n\n :param fields: An iterable specifying the fields to print\n :param target: A file-like object to print to", "id": "f12703:c4:m0"} {"signature": "def strip_minidom_whitespace(node):", "body": "for child in node.childNodes:if child.nodeType == Node.TEXT_NODE:if child.nodeValue:child.nodeValue = child.nodeValue.strip()elif child.nodeType == Node.ELEMENT_NODE:strip_minidom_whitespace(child)", "docstring": "Strips all whitespace from a minidom XML node and its children\n\n This operation is made in-place.", "id": "f12704:m0"} {"signature": "def merge_two_dicts(x, y):", "body": "z = x.copy()z.update(y)return z", "docstring": "Given two dicts, merge them into a new dict as a shallow copy.", "id": "f12711:m0"} {"signature": "def _mock_response(self,status_code=,content=b'',raise_for_status=None):", "body": "mock_resp = Mock()mock_resp.raise_for_status = Mock()mock_resp.status_code = status_codeif raise_for_status:mock_resp.raise_for_status.side_effect = raise_for_statusreturn mock_respmock_resp.content = contentmock_resp.iter_content = Mock()iter_result = iter([bytes([b]) for b in content])mock_resp.iter_content.return_value = iter_resultreturn mock_resp", "docstring": "Build a mock for each response, include errors and content data", "id": "f12714:c0:m0"} {"signature": "def cleanup_none(self):", "body": "for (prop, default) in self.defaults.items():if getattr(self, prop) == '':setattr(self, prop, None)", "docstring": "Removes the temporary value set for None attributes.", "id": "f12717:c0:m1"} {"signature": "def new_game(self, mode=None):", "body": "self._g = GameObject()_mode = mode or \"\"logging.debug(\"\".format(_mode))mode_info = self._g.get_game_type(gametype=_mode)logging.debug(\"\".format(mode_info, type(mode_info)))if not mode_info:self._g = Noneraise ValueError(''.format(_mode))logging.debug(\"\".format(mode_info.digitType))dw = DigitWord(wordtype=mode_info.digitType)dw.random(mode_info.digits)logging.debug(\"\".format(dw.word))_game = {\"\": str(uuid.uuid4()),\"\": \"\",\"\": int(time()) + ,\"\": dw.word,\"\": _mode,\"\": mode_info.guesses_allowed,\"\": }logging.debug(\"\".format(_game))self._g.from_json(jsonstr=json.dumps(_game))return self._g.to_json()", "docstring": "new_game() creates a new game. Docs TBC.\n\n:return: JSON String containing the game object.", "id": "f12727:c0:m5"} {"signature": "def load_game(self, jsonstr):", "body": "logging.debug(\"\")logging.debug(\"\")self._g = GameObject()logging.debug(\"\".format(jsonstr))self._g.from_json(jsonstr=jsonstr)", "docstring": "load_game() takes a JSON string representing a game object and calls the underlying\ngame object (_g) to load the JSON. The underlying object will handle schema validation\nand transformation.\n\n:param jsonstr: A valid JSON string representing a GameObject (see above)\n\n:return: None", "id": "f12727:c0:m6"} {"signature": "def save_game(self):", "body": "logging.debug(\"\")logging.debug(\"\")self._validate_game_object(op=\"\")logging.debug(\"\")return self._g.to_json()", "docstring": "save_game() asks the underlying game object (_g) to dump the contents of\nitself as JSON and then returns the JSON to\n\n:return: A JSON representation of the game object", "id": "f12727:c0:m7"} {"signature": "def guess(self, *args):", "body": "logging.debug(\"\")logging.debug(\"\")self._validate_game_object(op=\"\")logging.debug(\"\")_return_results = {\"\": None,\"\": None,\"\": [],\"\": \"\"}logging.debug(\"\")if self._g.status.lower() == \"\":_return_results[\"\"] = self._start_again(\"\")elif self._g.status.lower() == \"\":_return_results[\"\"] = self._start_again(\"\")elif self._g.guesses_remaining < :_return_results[\"\"] = self._start_again(\"\")elif self._g.ttl < time():_return_results[\"\"] = self._start_again(\"\")else:logging.debug(\"\")_wordtype = DigitWord.HEXDIGIT if self._g.mode.lower() == '' else DigitWord.DIGITguess = DigitWord(*args, wordtype=_wordtype)logging.debug(\"\")self._g.guesses_remaining -= self._g.guesses_made += logging.debug(\"\")_return_results[\"\"] = []_return_results[\"\"] = _return_results[\"\"] = logging.debug(\"\")for i in self._g.answer.compare(guess):logging.debug(\"\".format(i.index))if i.match is True:logging.debug(\"\")_return_results[\"\"] += elif i.in_word is True:logging.debug(\"\")_return_results[\"\"] += logging.debug(\"\")_return_results[\"\"].append(i.get_object())logging.debug(\"\")if _return_results[\"\"] == len(self._g.answer.word):logging.debug(\"\")self._g.status = \"\"self._g.guesses_remaining = _return_results[\"\"] = \"\"\"\".format(self._get_text_answer())elif self._g.guesses_remaining < :logging.debug(\"\")self._g.status = \"\"_return_results[\"\"] = \"\"\"\".format(self._get_text_answer())_return_results[\"\"] = self._g.statuslogging.debug(\"\")return _return_results", "docstring": "guess() allows a guess to be made. Before the guess is made, the method\nchecks to see if the game has been won, lost, or there are no tries\nremaining. It then creates a return object stating the number of bulls\n(direct matches), cows (indirect matches), an analysis of the guess (a\nlist of analysis objects), and a status.\n\n:param args: any number of integers (or string representations of integers)\nto the number of Digits in the answer; i.e. in normal mode, there would be\na DigitWord to guess of 4 digits, so guess would expect guess(1, 2, 3, 4)\nand a shorter (guess(1, 2)) or longer (guess(1, 2, 3, 4, 5)) sequence will\nraise an exception.\n\n:return: a JSON object containing the analysis of the guess:\n\n{\n \"cows\": {\"type\": \"integer\"},\n \"bulls\": {\"type\": \"integer\"},\n \"analysis\": {\"type\": \"array of DigitWordAnalysis\"},\n \"status\": {\"type\": \"string\"}\n}", "id": "f12727:c0:m8"} {"signature": "def _start_again(self, message=None):", "body": "logging.debug(\"\".format(message))the_answer = self._get_text_answer()return \"\".format(message,the_answer)", "docstring": "Simple method to form a start again message and give the answer in readable form.", "id": "f12727:c0:m9"} {"signature": "def _validate_game_object(self, op=\"\"):", "body": "if self._g is None:raise ValueError(\"\"\"\")if not isinstance(self._g, GameObject):raise TypeError(\"\".format(op))", "docstring": "A helper method to provide validation of the game object (_g). If the\ngame object does not exist or if (for any reason) the object is not a GameObject,\nthen an exception will be raised.\n\n:param op: A string describing the operation (e.g. guess, save, etc.) taking place\n:return: Nothing", "id": "f12727:c0:m11"} {"signature": "def __init__(self, game_json=None, game_modes=None, mode=None):", "body": "self._game_modes = Noneself.load_modes(input_modes=game_modes)self.game = Noneself.load(game_json=game_json, mode=mode)", "docstring": "Initialize a GameController object to allow the game to be played. The controller\ncreates a game object (see GameObject.py) and allows guesses to be made against\nthe 'hidden' object.\n\n:param game_json: , if provided is a JSON serialized representation\nof a game; if not provided a new game is instantiated.\n:param game_modes: , a list of GameMode objects representing game modes.\n:param mode: , the mode the game should be played in; may be a GameMode\nobject or a str representing the name of a GameMode object already defined (e.g.\npassed via game_modes).", "id": "f12730:c0:m0"} {"signature": "def guess(self, *args):", "body": "if self.game is None:raise ValueError(\"\")response_object = {\"\": None,\"\": None,\"\": None,\"\": None}if self.game.status == self.GAME_WON:response_object[\"\"] =self._start_again_message(\"\")elif self.game.status == self.GAME_LOST:response_object[\"\"] =self._start_again_message(\"\")elif self.game.guesses_remaining < :response_object[\"\"] =self._start_again_message(\"\")else:guess_made = DigitWord(*args, wordtype=self.game.mode.digit_type)comparison = self.game.answer.compare(guess_made)self.game.guesses_made += response_object[\"\"] = response_object[\"\"] = response_object[\"\"] = []for comparison_object in comparison:if comparison_object.match:response_object[\"\"] += elif comparison_object.in_word:response_object[\"\"] += response_object[\"\"].append(comparison_object.get_object())if response_object[\"\"] == self.game.mode.digits:self.game.status = self.GAME_WONself.game.guesses_made = self.game.mode.guesses_allowedresponse_object[\"\"] = self._start_again_message(\"\")elif self.game.guesses_remaining < :self.game.status = self.GAME_LOSTresponse_object[\"\"] = self._start_again_message(\"\")return response_object", "docstring": "Make a guess, comparing the hidden object to a set of provided digits. The digits should\nbe passed as a set of arguments, e.g:\n\n* for a normal game: 0, 1, 2, 3\n* for a hex game: 0xA, 0xB, 5, 4\n* alternate for hex game: 'A', 'b', 5, 4\n\n:param args: An iterable of digits (int or str)\n:return: A dictionary object detailing the analysis and results of the guess", "id": "f12730:c0:m3"} {"signature": "def load(self, game_json=None, mode=None):", "body": "if game_json is None: if mode is not None:if isinstance(mode, str):_game_object = GameObject(mode=self._match_mode(mode=mode))elif isinstance(mode, GameMode):_game_object = GameObject(mode=mode)else:raise TypeError(\"\")else:_game_object = GameObject(mode=self._game_modes[])_game_object.status = self.GAME_PLAYINGelse:if not isinstance(game_json, str):raise TypeError(\"\")game_dict = json.loads(game_json)if not '' in game_dict:raise ValueError(\"\")_mode = GameMode(**game_dict[\"\"])_game_object = GameObject(mode=_mode, source_game=game_dict)self.game = copy.deepcopy(_game_object)", "docstring": "Load a game from a serialized JSON representation. The game expects a well defined\nstructure as follows (Note JSON string format):\n\n'{\n \"guesses_made\": int,\n \"key\": \"str:a 4 word\",\n \"status\": \"str: one of playing, won, lost\",\n \"mode\": {\n \"digits\": int,\n \"digit_type\": DigitWord.DIGIT | DigitWord.HEXDIGIT,\n \"mode\": GameMode(),\n \"priority\": int,\n \"help_text\": str,\n \"instruction_text\": str,\n \"guesses_allowed\": int\n },\n \"ttl\": int,\n \"answer\": [int|str0, int|str1, ..., int|strN]\n}'\n\n* \"mode\" will be cast to a GameMode object\n* \"answer\" will be cast to a DigitWord object\n\n:param game_json: The source JSON - MUST be a string\n:param mode: A mode (str or GameMode) for the game being loaded\n:return: A game object", "id": "f12730:c0:m4"} {"signature": "def save(self):", "body": "return json.dumps(self.game.dump())", "docstring": "Save returns a string of the JSON serialized game object.\n\n:return: str of JSON serialized data", "id": "f12730:c0:m5"} {"signature": "def load_modes(self, input_modes=None):", "body": "_modes = [GameMode(mode=\"\", priority=, digits=, digit_type=DigitWord.DIGIT, guesses_allowed=),GameMode(mode=\"\", priority=, digits=, digit_type=DigitWord.DIGIT, guesses_allowed=),GameMode(mode=\"\", priority=, digits=, digit_type=DigitWord.DIGIT, guesses_allowed=),GameMode(mode=\"\", priority=, digits=, digit_type=DigitWord.HEXDIGIT, guesses_allowed=)]if input_modes is not None:if not isinstance(input_modes, list):raise TypeError(\"\")for mode in input_modes:if not isinstance(mode, GameMode):raise TypeError(\"\")_modes.append(mode)self._game_modes = copy.deepcopy(_modes)", "docstring": "Loads modes (GameMode objects) to be supported by the game object. Four default\nmodes are provided (normal, easy, hard, and hex) but others could be provided\neither by calling load_modes directly or passing a list of GameMode objects to\nthe instantiation call.\n\n:param input_modes: A list of GameMode objects; nb: even if only one new GameMode\nobject is provided, it MUST be passed as a list - for example, passing GameMode gm1\nwould require passing [gm1] NOT gm1.\n\n:return: A list of GameMode objects (both defaults and any added).", "id": "f12730:c0:m6"} {"signature": "def _start_again_message(self, message=None):", "body": "logging.debug(\"\".format(message))the_answer = ''.join([str(d) for d in self.game.answer][:-]) + '' + [str(d) for d in self.game.answer][-]return \"\".format(message,\"\" if message[-] not in [\"\", \"\", \"\", \"\", \"\"] else \"\",the_answer)", "docstring": "Simple method to form a start again message and give the answer in readable form.", "id": "f12730:c0:m8"} {"signature": "def __init__(self,mode=None,priority=None,digits=None,digit_type=None,guesses_allowed=None,instruction_text=None,help_text=None):", "body": "self._mode = Noneself._priority = Noneself._digits = Noneself._digit_type = Noneself._guesses_allowed = Noneself._instruction_text = Noneself._help_text = Noneself.mode = modeself.priority = priorityself.digits = digitsself.digit_type = digit_typeself.guesses_allowed = guesses_allowedself.instruction_text = instruction_textself.help_text = help_text", "docstring": "Constructor to create a new mode.\n\n:param mode: A text name for the mode.\n:param priority: priority of modes (in terms of returning a list)\n:param digits: number of digits used in this mode.\n:param digit_type: type of digit, e.g. DigitWord.HEXDIGIT or DigitWord.DIGIT\n:param guesses_allowed: Number of guesses permitted.\n:param instruction_text: Instruction text (dependent upon caller to show)\n:param help_text: Help text (dependent upon caller to show)", "id": "f12731:c0:m0"} {"signature": "def __str__(self):", "body": "return str(self.dump())", "docstring": "Override of __str__ method.\n:return: representation of the GameMode", "id": "f12731:c0:m1"} {"signature": "def __repr__(self):", "body": "return \"\".format(self._mode)", "docstring": "Override of __repr__ method.\n:return: representation of object showing mode name", "id": "f12731:c0:m2"} {"signature": "@propertydef mode(self):", "body": "return self._mode", "docstring": "The name of the mode.\n:return: ", "id": "f12731:c0:m3"} {"signature": "@propertydef priority(self):", "body": "return self._priority", "docstring": "The priority of the mode when collected in a list. For example: priority 10 is less than 20,\nso 10 will come before 20 in a list of GameMode objects.\n\nThis is useful because other modules might return a sorted list of GameMode objects to their\ncallers and priority provides a simple means to sort and sequence a collection of GameMode\nobjects.\n\n:return: ", "id": "f12731:c0:m5"} {"signature": "@propertydef digits(self):", "body": "return self._digits", "docstring": "The number of digits used by the DigitWord used in this mode; e.g. a value of 3 would\nindicate there are three digits (e.g. 1, 2, and 3), while a value of 5 would indicate\nfive values (e.g. 0, 1, 2, 3, 4).\n\n:return: ", "id": "f12731:c0:m7"} {"signature": "@propertydef digit_type(self):", "body": "return self._digit_type", "docstring": "The digit_type is a flag used to specify the type of digit to be used; for example, a\ndigit (DigitWord.DIGIT) enables a single digit between 0 and 9, while a hex digit\n(DigitWord.HEXDIGIT) enables a single digit between 0 and F.\n\n:return: ", "id": "f12731:c0:m9"} {"signature": "@propertydef guesses_allowed(self):", "body": "return self._guesses_allowed", "docstring": "The number of guesses the mode is allowed; for example an easy mode might allow\n20 guesses while a hard mode only allowed 7.\n\n:return: ", "id": "f12731:c0:m11"} {"signature": "@propertydef instruction_text(self):", "body": "return self._instruction_text", "docstring": "Instructions on how to use the mode (if present).\n:return: ", "id": "f12731:c0:m13"} {"signature": "@propertydef help_text(self):", "body": "return self._help_text", "docstring": "Help text intended to guide the user on how to use and interact with the game\nmode.\n\n:return: ", "id": "f12731:c0:m15"} {"signature": "def dump(self):", "body": "return {\"\": self._mode,\"\": self._priority,\"\": self._digits,\"\": self._digit_type,\"\": self._guesses_allowed,\"\": self._instruction_text,\"\": self._help_text}", "docstring": "Dump (convert to a dict) the GameMode object\n:return: ", "id": "f12731:c0:m17"} {"signature": "def __init__(self,mode=None,source_game=None):", "body": "if mode is None:raise ValueError(\"\")if not isinstance(mode, GameMode):raise TypeError(\"\")self._key = None self._status = None self._ttl = None self._answer = None self._mode = None self._guesses_remaining = None self._guesses_made = None if source_game:self.load(source=source_game)else:self.new(mode=mode)", "docstring": "Initialize a game object to hold the state, properties, and control of the game.\n\n:param mode: A GameMode object defining the game play mode.\n:param source_game: A JSON Serialized representation of the game.", "id": "f12733:c0:m0"} {"signature": "def dump(self):", "body": "return {\"\": self._key,\"\": self._status,\"\": self._ttl,\"\": self._answer.word,\"\": self._mode.dump(),\"\": self._guesses_made}", "docstring": "Dump (return) a dict representation of the GameObject. This is a Python\ndict and is NOT serialized. NB: the answer (a DigitWord object) and the\nmode (a GameMode object) are converted to python objects of a list and\ndict respectively.\n\n:return: python of the GameObject as detailed above.", "id": "f12733:c0:m10"} {"signature": "def load(self, source=None):", "body": "if not source:raise ValueError(\"\")if not isinstance(source, dict):raise TypeError(\"\".format(type(source)))required_keys = (\"\",\"\",\"\",\"\",\"\",\"\")if not all(key in source for key in required_keys):raise ValueError(\"\".format(source))_mode = GameMode(**source[\"\"])self._key = source[\"\"]self._status = source[\"\"]self._ttl = source[\"\"]self._answer = DigitWord(*source[\"\"], wordtype=_mode.digit_type)self._mode = _modeself._guesses_made = source[\"\"]", "docstring": "Load the representation of a GameObject from a Python representing\nthe game object.\n\n:param source: a Python as detailed above.\n\n:return:", "id": "f12733:c0:m11"} {"signature": "def new(self, mode):", "body": "dw = DigitWord(wordtype=mode.digit_type)dw.random(mode.digits)self._key = str(uuid.uuid4())self._status = \"\"self._ttl = self._answer = dwself._mode = modeself._guesses_remaining = mode.guesses_allowedself._guesses_made = ", "docstring": "Create a new instance of a game. Note, a mode MUST be provided and MUST be of\ntype GameMode.\n\n:param mode: ", "id": "f12733:c0:m12"} {"signature": "def __init__(self, game):", "body": "super(GameController, self).__init__()self._game_modes = [GameMode(mode=\"\", priority=, digits=, digit_type=DigitWord.DIGIT, guesses_allowed=),GameMode(mode=\"\", priority=, digits=, digit_type=DigitWord.DIGIT, guesses_allowed=),GameMode(mode=\"\", priority=, digits=, digit_type=DigitWord.DIGIT, guesses_allowed=),GameMode(mode=\"\", priority=, digits=, digit_type=DigitWord.HEXDIGIT, guesses_allowed=)]if game:self.load(game=game)else:self._game = Noneself._mode = None", "docstring": "Initialize the Game.", "id": "f12734:c0:m0"} {"signature": "def _start_again(self, message=None):", "body": "logging.debug(\"\".format(message))the_answer = self._game.answer_strreturn \"\".format(message,the_answer)", "docstring": "Simple method to form a start again message and give the answer in readable form.", "id": "f12734:c0:m18"} {"signature": "def _validate(self, op=\"\"):", "body": "if self._game is None:raise ValueError(\"\".format(op) +\"\"\"\")", "docstring": "A helper method to provide validation of the game object (_g). If the\ngame object does not exist or if (for any reason) the object is not a GameObject,\nthen an exception will be raised.\n\n:param op: A string describing the operation (e.g. guess, save, etc.) taking place\n:return: Nothing", "id": "f12734:c0:m20"} {"signature": "def assert_equal(data_path, expected_path):", "body": "from litezip.main import parse_litezipdata_struct = parse_litezip(data_path)expected = parse_litezip(expected_path)def _keyed(s):return sorted({t[]: t[:] for t in s}.keys())assert _keyed(data_struct) == _keyed(expected)relative_expected = convert_to_relative_paths(expected,expected_path)relative_data_struct = convert_to_relative_paths(data_struct, data_path)assert relative_data_struct == relative_expected", "docstring": "Asserts the data at `data_path` is equal to that at `expected_path`.", "id": "f12741:m1"} {"signature": "def convert_to_relative_paths(struct, base):", "body": "def _rel(p):return p.relative_to(base)new_struct = []for obj in struct:new_obj = type(obj)(obj.id, _rel(obj.file),tuple([_rel(y.data) for y in obj.resources]))new_struct.append(new_obj)return tuple(new_struct)", "docstring": "Makes the given litezip `struct`'s `Path` objects relative to `base`.", "id": "f12744:m0"} {"signature": "def get_keywords():", "body": "git_refnames = \"\"git_full = \"\"git_date = \"\"keywords = {\"\": git_refnames, \"\": git_full, \"\": git_date}return keywords", "docstring": "Get the keywords needed to look up the version information.", "id": "f12746:m0"} {"signature": "def get_config():", "body": "cfg = VersioneerConfig()cfg.VCS = \"\"cfg.style = \"\"cfg.tag_prefix = \"\"cfg.parentdir_prefix = \"\"cfg.versionfile_source = \"\"cfg.verbose = Falsereturn cfg", "docstring": "Create, populate and return the VersioneerConfig() object.", "id": "f12746:m1"} {"signature": "def register_vcs_handler(vcs, method): ", "body": "def decorate(f):\"\"\"\"\"\"if vcs not in HANDLERS:HANDLERS[vcs] = {}HANDLERS[vcs][method] = freturn freturn decorate", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f12746:m2"} {"signature": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,env=None):", "body": "assert isinstance(commands, list)p = Nonefor c in commands:try:dispcmd = str([c] + args)p = subprocess.Popen([c] + args, cwd=cwd, env=env,stdout=subprocess.PIPE,stderr=(subprocess.PIPE if hide_stderrelse None))breakexcept EnvironmentError:e = sys.exc_info()[]if e.errno == errno.ENOENT:continueif verbose:print(\"\" % dispcmd)print(e)return None, Noneelse:if verbose:print(\"\" % (commands,))return None, Nonestdout = p.communicate()[].strip()if sys.version_info[] >= :stdout = stdout.decode()if p.returncode != :if verbose:print(\"\" % dispcmd)print(\"\" % stdout)return None, p.returncodereturn stdout, p.returncode", "docstring": "Call the given command(s).", "id": "f12746:m3"} {"signature": "def versions_from_parentdir(parentdir_prefix, root, verbose):", "body": "rootdirs = []for i in range():dirname = os.path.basename(root)if dirname.startswith(parentdir_prefix):return {\"\": dirname[len(parentdir_prefix):],\"\": None,\"\": False, \"\": None, \"\": None}else:rootdirs.append(root)root = os.path.dirname(root) if verbose:print(\"\" %(str(rootdirs), parentdir_prefix))raise NotThisMethod(\"\")", "docstring": "Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory", "id": "f12746:m4"} {"signature": "@register_vcs_handler(\"\", \"\")def git_get_keywords(versionfile_abs):", "body": "keywords = {}try:f = open(versionfile_abs, \"\")for line in f.readlines():if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()f.close()except EnvironmentError:passreturn keywords", "docstring": "Extract version information from the given file.", "id": "f12746:m5"} {"signature": "@register_vcs_handler(\"\", \"\")def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:raise NotThisMethod(\"\")date = keywords.get(\"\")if date is not None:date = date.strip().replace(\"\", \"\", ).replace(\"\", \"\", )refnames = keywords[\"\"].strip()if refnames.startswith(\"\"):if verbose:print(\"\")raise NotThisMethod(\"\")refs = set([r.strip() for r in refnames.strip(\"\").split(\"\")])TAG = \"\"tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])if not tags:tags = set([r for r in refs if re.search(r'', r)])if verbose:print(\"\" % \"\".join(refs - tags))if verbose:print(\"\" % \"\".join(sorted(tags)))for ref in sorted(tags):if ref.startswith(tag_prefix):r = ref[len(tag_prefix):]if verbose:print(\"\" % r)return {\"\": r,\"\": keywords[\"\"].strip(),\"\": False, \"\": None,\"\": date}if verbose:print(\"\")return {\"\": \"\",\"\": keywords[\"\"].strip(),\"\": False, \"\": \"\", \"\": None}", "docstring": "Get version information from git keywords.", "id": "f12746:m6"} {"signature": "@register_vcs_handler(\"\", \"\")def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):", "body": "GITS = [\"\"]if sys.platform == \"\":GITS = [\"\", \"\"]out, rc = run_command(GITS, [\"\", \"\"], cwd=root,hide_stderr=True)if rc != :if verbose:print(\"\" % root)raise NotThisMethod(\"\")describe_out, rc = run_command(GITS, [\"\", \"\", \"\",\"\", \"\",\"\", \"\" % tag_prefix],cwd=root)if describe_out is None:raise NotThisMethod(\"\")describe_out = describe_out.strip()full_out, rc = run_command(GITS, [\"\", \"\"], cwd=root)if full_out is None:raise NotThisMethod(\"\")full_out = full_out.strip()pieces = {}pieces[\"\"] = full_outpieces[\"\"] = full_out[:] pieces[\"\"] = Nonegit_describe = describe_outdirty = git_describe.endswith(\"\")pieces[\"\"] = dirtyif dirty:git_describe = git_describe[:git_describe.rindex(\"\")]if \"\" in git_describe:mo = re.search(r'', git_describe)if not mo:pieces[\"\"] = (\"\"% describe_out)return piecesfull_tag = mo.group()if not full_tag.startswith(tag_prefix):if verbose:fmt = \"\"print(fmt % (full_tag, tag_prefix))pieces[\"\"] = (\"\"% (full_tag, tag_prefix))return piecespieces[\"\"] = full_tag[len(tag_prefix):]pieces[\"\"] = int(mo.group())pieces[\"\"] = mo.group()else:pieces[\"\"] = Nonecount_out, rc = run_command(GITS, [\"\", \"\", \"\"],cwd=root)pieces[\"\"] = int(count_out) date = run_command(GITS, [\"\", \"\", \"\", \"\"],cwd=root)[].strip()pieces[\"\"] = date.strip().replace(\"\", \"\", ).replace(\"\", \"\", )return pieces", "docstring": "Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.", "id": "f12746:m7"} {"signature": "def plus_or_dot(pieces):", "body": "if \"\" in pieces.get(\"\", \"\"):return \"\"return \"\"", "docstring": "Return a + if we don't already have one, else return a .", "id": "f12746:m8"} {"signature": "def render_pep440(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += plus_or_dot(pieces)rendered += \"\" % (pieces[\"\"], pieces[\"\"])if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % (pieces[\"\"],pieces[\"\"])if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f12746:m9"} {"signature": "def render_pep440_pre(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE", "id": "f12746:m10"} {"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += plus_or_dot(pieces)rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f12746:m11"} {"signature": "def render_pep440_old(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f12746:m12"} {"signature": "def render_git_describe(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f12746:m13"} {"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]rendered += \"\" % (pieces[\"\"], pieces[\"\"])else:rendered = pieces[\"\"]if pieces[\"\"]:rendered += \"\"return rendered", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)", "id": "f12746:m14"} {"signature": "def render(pieces, style):", "body": "if pieces[\"\"]:return {\"\": \"\",\"\": pieces.get(\"\"),\"\": None,\"\": pieces[\"\"],\"\": None}if not style or style == \"\":style = \"\" if style == \"\":rendered = render_pep440(pieces)elif style == \"\":rendered = render_pep440_pre(pieces)elif style == \"\":rendered = render_pep440_post(pieces)elif style == \"\":rendered = render_pep440_old(pieces)elif style == \"\":rendered = render_git_describe(pieces)elif style == \"\":rendered = render_git_describe_long(pieces)else:raise ValueError(\"\" % style)return {\"\": rendered, \"\": pieces[\"\"],\"\": pieces[\"\"], \"\": None,\"\": pieces.get(\"\")}", "docstring": "Render the given version pieces into the requested style.", "id": "f12746:m15"} {"signature": "def get_versions():", "body": "cfg = get_config()verbose = cfg.verbosetry:return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,verbose)except NotThisMethod:passtry:root = os.path.realpath(__file__)for i in cfg.versionfile_source.split(''):root = os.path.dirname(root)except NameError:return {\"\": \"\", \"\": None,\"\": None,\"\": \"\",\"\": None}try:pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)return render(pieces, cfg.style)except NotThisMethod:passtry:if cfg.parentdir_prefix:return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)except NotThisMethod:passreturn {\"\": \"\", \"\": None,\"\": None,\"\": \"\", \"\": None}", "docstring": "Get version information or return default if unable to do so.", "id": "f12746:m16"} {"signature": "def _parse_document_id(elm_tree):", "body": "xpath = ''return [x for x in elm_tree.xpath(xpath, namespaces=COLLECTION_NSMAP)][]", "docstring": "Given the parsed xml to an `ElementTree`,\n parse the id from the content.", "id": "f12747:m0"} {"signature": "def _find_resources(directory, excludes=[]):", "body": "return sorted([r for r in directory.glob('')if True not in [e(r) for e in excludes]])", "docstring": "Return a list of resource paths from the directory.\n Ignore records via the list of `excludes`,\n which are callables that take a file parameter (as a `Path` instance).", "id": "f12747:m1"} {"signature": "def parse_module(path, excludes=None):", "body": "file = path / MODULE_FILENAMEif not file.exists():raise MissingFile(file)id = _parse_document_id(etree.parse(file.open()))excludes = excludes or []excludes.extend([lambda filepath: filepath.name == MODULE_FILENAME,])resources_paths = _find_resources(path, excludes=excludes)resources = tuple(_resource_from_path(res) for res in resources_paths)return Module(id, file, resources)", "docstring": "Parse the file structure to a data structure given the path to\n a module directory.", "id": "f12747:m4"} {"signature": "def parse_collection(path, excludes=None):", "body": "file = path / COLLECTION_FILENAMEif not file.exists():raise MissingFile(file)id = _parse_document_id(etree.parse(file.open()))excludes = excludes or []excludes.extend([lambda filepath: filepath.name == COLLECTION_FILENAME,lambda filepath: filepath.is_dir(),])resources_paths = _find_resources(path, excludes=excludes)resources = tuple(_resource_from_path(res) for res in resources_paths)return Collection(id, file, resources)", "docstring": "Parse a file structure to a data structure given the path to\n a collection directory.", "id": "f12747:m5"} {"signature": "def parse_litezip(path):", "body": "struct = [parse_collection(path)]struct.extend([parse_module(x) for x in path.iterdir()if x.is_dir() and x.name.startswith('')])return tuple(sorted(struct))", "docstring": "Parse a litezip file structure to a data structure given the path\n to the litezip directory.", "id": "f12747:m6"} {"signature": "def _arg_parser():", "body": "description = \"\"parser = argparse.ArgumentParser(description=description)verbose_group = parser.add_mutually_exclusive_group()verbose_group.add_argument('', '', action='',dest='', default=None,help=\"\")verbose_group.add_argument('', '', action='',dest='', default=None,help=\"\")parser.add_argument('',help=\"\")return parser", "docstring": "Factory for creating the argument parser", "id": "f12750:m0"} {"signature": "def _arg_parser():", "body": "description = \"\"parser = argparse.ArgumentParser(description=description)verbose_group = parser.add_mutually_exclusive_group()verbose_group.add_argument('', '', action='',dest='', default=None,help=\"\")verbose_group.add_argument('', '', action='',dest='', default=None,help=\"\")parser.add_argument('', '',help=\"\")parser.add_argument('',help=\"\")return parser", "docstring": "Factory for creating the argument parser", "id": "f12751:m0"} {"signature": "def is_valid_identifier(id):", "body": "return VALID_ID_REGEX.match(id) is not None", "docstring": "Validate that the given `id`.", "id": "f12752:m0"} {"signature": "def validate_content(*objs):", "body": "from .main import Collection, Modulevalidator = {Collection: cnxml.validate_collxml,Module: cnxml.validate_cnxml,}[type(objs[])]return validator(*[obj.file for obj in objs])", "docstring": "Runs the correct validator for given `obj`ects. Assumes all same type", "id": "f12752:m1"} {"signature": "def validate_litezip(struct):", "body": "msgs = []def _fmt_err(err):return (Path(err.filename), \"\".format(*(err[:])))obj_by_type = {}for obj in struct:if not is_valid_identifier(obj.id):msg = (obj.file.parent,\"\".format(obj.id),)logger.info(\"\".format(*msg))msgs.append(msg)obj_by_type.setdefault(type(obj), []).append(obj)for obtype in obj_by_type:content_msgs = list([_fmt_err(err) for err invalidate_content(*obj_by_type[obtype])])for msg in content_msgs:logger.info(\"\".format(*msg))msgs.extend(content_msgs)return msgs", "docstring": "Validate the given litezip as `struct`.\n Returns a list of validation messages.", "id": "f12752:m2"} {"signature": "def convert_completezip(path):", "body": "for filepath in path.glob(''):filepath.rename(filepath.parent / '')logger.debug(''.format(filepath))for filepath in path.glob(''):filepath.unlink()return parse_litezip(path)", "docstring": "Converts a completezip file structure to a litezip file structure.\n Returns a litezip data structure.", "id": "f12753:m0"} {"signature": "def configure_logging(config):", "body": "dictConfig(config)", "docstring": "Configure logging given a dictified configuration.", "id": "f12755:m0"} {"signature": "def orthogonal(shape, scale=):", "body": "flat_shape = (shape[], np.prod(shape[:]))a = np.random.normal(, , flat_shape)u, _, v = np.linalg.svd(a, full_matrices=False)q = u if u.shape == flat_shape else v q = q.reshape(shape)return sharedX(scale * q[:shape[], :shape[]])", "docstring": "benanne lasagne ortho init (faster than qr approach)", "id": "f12769:m2"} {"signature": "def fit(self, trX, trY, batch_size=, n_epochs=, len_filter=LenFilter(), snapshot_freq=, path=None):", "body": "if len_filter is not None:trX, trY = len_filter.filter(trX, trY)trY = standardize_targets(trY, cost=self.cost)n = t = time()costs = []for e in range(n_epochs):epoch_costs = []for xmb, ymb in self.iterator.iterXY(trX, trY):c = self._train(xmb, ymb)epoch_costs.append(c)n += len(ymb)if self.verbose >= :n_per_sec = n / (time() - t)n_left = len(trY) - n % len(trY)time_left = n_left/n_per_secsys.stdout.write(\"\" % (e, n, np.mean(epoch_costs[-:]), time_left))sys.stdout.flush()costs.extend(epoch_costs)status = \"\" % (e, n, np.mean(epoch_costs[-:]), time() - t)if self.verbose >= :sys.stdout.write(\"\"+status)sys.stdout.flush()sys.stdout.write(\"\")elif self.verbose == :print(status)if path and e % snapshot_freq == :save(self, \"\".format(path, e))return costs", "docstring": "Train model on given training examples and return the list of costs after each minibatch is processed.\n\n Args:\n trX (list) -- Inputs\n trY (list) -- Outputs\n batch_size (int, optional) -- number of examples in a minibatch (default 64)\n n_epochs (int, optional) -- number of epochs to train for (default 1)\n len_filter (object, optional) -- object to filter training example by length (default LenFilter())\n snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1)\n path (str, optional) -- prefix of path where model snapshots are saved.\n If None, no snapshots are saved (default None)\n\n Returns:\n list -- costs of model after processing each minibatch", "id": "f12771:c0:m1"} {"signature": "def __init__(self, **kwargs):", "body": "MapcheteProcess.__init__(self, **kwargs)self.identifier = \"\",self.title = \"\",self.version = \"\",self.abstract = \"\"", "docstring": "Process initialization.", "id": "f12776:c0:m0"} {"signature": "def execute(mp):", "body": "assert ", "docstring": "User defined process.", "id": "f12777:m0"} {"signature": "def execute(mp):", "body": "with mp.open(\"\", resampling=\"\") as raster_file:if raster_file.is_empty():return \"\"dem = raster_file.read()tags = {: {\"\": True},\"\": \"\"}return dem, tags", "docstring": "User defined process.", "id": "f12778:m0"} {"signature": "def execute(mp,some_float_parameter,some_string_parameter,some_integer_parameter,some_bool_parameter):", "body": "assert some_integer_parameter == assert some_float_parameter == assert some_string_parameter == ''assert some_bool_parameter is Truereturn \"\"", "docstring": "User defined process.", "id": "f12779:m0"} {"signature": "def execute():", "body": "pass", "docstring": "Function needs to have exactly one argument.", "id": "f12781:m0"} {"signature": "def execute(mp):", "body": "with mp.open(mp.params[\"\"][\"\"]) as vector_file:return [dict(geometry=feature[\"\"],properties=dict(name=feature[\"\"].get(\"\", None),id=feature[\"\"].get(\"\", None),area=shape(feature[\"\"]).area))for feature in vector_file.read()]", "docstring": "User defined process.", "id": "f12782:m0"} {"signature": "def execute(mp):", "body": "with mp.open(\"\", resampling=\"\") as raster_file:if raster_file.is_empty():return \"\"dem = raster_file.read()return mp.hillshade(dem).astype(\"\")", "docstring": "User defined process.", "id": "f12783:m0"} {"signature": "def execute(mp):", "body": "return ", "docstring": "User defined process.", "id": "f12785:m0"} {"signature": "def _worker(mp, tile):", "body": "return tile, mp.execute(tile)", "docstring": "Multiprocessing worker processing a tile.", "id": "f12793:m15"} {"signature": "def execute(mp):", "body": "with mp.open(\"\", resampling=\"\") as raster_file:if raster_file.is_empty():return \"\"dem = raster_file.read()return dem", "docstring": "User defined process.", "id": "f12796:m0"} {"signature": "def extract_contours(array, tile, interval=, field='', base=):", "body": "import matplotlib.pyplot as pltlevels = _get_contour_values(array.min(), array.max(), interval=interval, base=base)if not levels:return []contours = plt.contour(array, levels)index = out_contours = []for level in range(len(contours.collections)):elevation = levels[index]index += paths = contours.collections[level].get_paths()for path in paths:out_coords = [(tile.left + (y * tile.pixel_x_size),tile.top - (x * tile.pixel_y_size),)for x, y in zip(path.vertices[:, ], path.vertices[:, ])]if len(out_coords) >= :out_contours.append(dict(properties={field: elevation},geometry=mapping(LineString(out_coords))))return out_contours", "docstring": "Extract contour lines from an array.\n\nParameters\n----------\narray : array\n input elevation data\ntile : Tile\n tile covering the array\ninterval : integer\n elevation value interval when drawing contour lines\nfield : string\n output field name containing elevation value\nbase : integer\n elevation base value the intervals are computed from\n\nReturns\n-------\ncontours : iterable\n contours as GeoJSON-like pairs of properties and geometry", "id": "f12801:m0"} {"signature": "def _get_contour_values(min_val, max_val, base=, interval=):", "body": "i = baseout = []if min_val < base:while i >= min_val:i -= intervalwhile i <= max_val:if i >= min_val:out.append(i)i += intervalreturn out", "docstring": "Return a list of values between min and max within an interval.", "id": "f12801:m1"} {"signature": "def calculate_slope_aspect(elevation, xres, yres, z=, scale=):", "body": "z = float(z)scale = float(scale)height, width = elevation.shape[] - , elevation.shape[] - window = [z * elevation[row:(row + height), col:(col + width)]for (row, col) in product(range(), range())]x = ((window[] + window[] + window[] + window[])- (window[] + window[] + window[] + window[])) / ( * xres * scale)y = ((window[] + window[] + window[] + window[])- (window[] + window[] + window[] + window[])) / ( * yres * scale)slope = math.pi/ - np.arctan(np.sqrt(x*x + y*y))aspect = np.arctan2(x, y)return slope, aspect", "docstring": "Calculate slope and aspect map.\n\nReturn a pair of arrays 2 pixels smaller than the input elevation array.\n\nSlope is returned in radians, from 0 for sheer face to pi/2 for\nflat ground. Aspect is returned in radians, counterclockwise from -pi\nat north around to pi.\n\nLogic here is borrowed from hillshade.cpp:\nhttp://www.perrygeo.net/wordpress/?p=7\n\nParameters\n----------\nelevation : array\n input elevation data\nxres : float\n column width\nyres : float\n row height\nz : float\n vertical exaggeration factor\nscale : float\n scale factor of pixel size units versus height units (insert 112000\n when having elevation values in meters in a geodetic projection)\n\nReturns\n-------\nslope shade : array", "id": "f12802:m0"} {"signature": "def hillshade(elevation, tile, azimuth=, altitude=, z=, scale=):", "body": "azimuth = float(azimuth)altitude = float(altitude)z = float(z)scale = float(scale)xres = tile.tile.pixel_x_sizeyres = -tile.tile.pixel_y_sizeslope, aspect = calculate_slope_aspect(elevation, xres, yres, z=z, scale=scale)deg2rad = math.pi / shaded = np.sin(altitude * deg2rad) * np.sin(slope)+ np.cos(altitude * deg2rad) * np.cos(slope)* np.cos((azimuth - ) * deg2rad - aspect)shaded = (((shaded+)/)*-).astype(\"\")return ma.masked_array(data=np.pad(shaded, , mode=''), mask=elevation.mask)", "docstring": "Return hillshaded numpy array.\n\nParameters\n----------\nelevation : array\n input elevation data\ntile : Tile\n tile covering the array\nz : float\n vertical exaggeration factor\nscale : float\n scale factor of pixel size units versus height units (insert 112000\n when having elevation values in meters in a geodetic projection)", "id": "f12802:m1"} {"signature": "def clip_array_with_vector(array, array_affine, geometries, inverted=False, clip_buffer=):", "body": "buffered_geometries = []for feature in geometries:feature_geom = to_shape(feature[\"\"])if feature_geom.is_empty:continueif feature_geom.geom_type == \"\":buffered_geom = unary_union([g.buffer(clip_buffer) for g in feature_geom])else:buffered_geom = feature_geom.buffer(clip_buffer)if not buffered_geom.is_empty:buffered_geometries.append(buffered_geom)if buffered_geometries:if array.ndim == :return ma.masked_array(array, geometry_mask(buffered_geometries, array.shape, array_affine,invert=inverted))elif array.ndim == :mask = geometry_mask(buffered_geometries, (array.shape[], array.shape[]),array_affine, invert=inverted)return ma.masked_array(array, mask=np.stack((mask for band in array)))else:fill = False if inverted else Truereturn ma.masked_array(array, mask=np.full(array.shape, fill, dtype=bool))", "docstring": "Clip input array with a vector list.\n\nParameters\n----------\narray : array\n input raster data\narray_affine : Affine\n Affine object describing the raster's geolocation\ngeometries : iterable\n iterable of dictionaries, where every entry has a 'geometry' and\n 'properties' key.\ninverted : bool\n invert clip (default: False)\nclip_buffer : integer\n buffer (in pixels) geometries before clipping\n\nReturns\n-------\nclipped array : array", "id": "f12803:m0"} {"signature": "def validate_values(config, values):", "body": "if not isinstance(config, dict):raise TypeError(\"\")for value, vtype in values:if value not in config:raise ValueError(\"\" % value)if not isinstance(config[value], vtype):raise TypeError(\"\" % (value, vtype))return True", "docstring": "Validate whether value is found in config and has the right type.\n\nParameters\n----------\nconfig : dict\n configuration dictionary\nvalues : list\n list of (str, type) tuples of values and value types expected in config\n\nReturns\n-------\nTrue if config is valid.\n\nRaises\n------\nException if value is not found or has the wrong type.", "id": "f12805:m0"} {"signature": "def get_hash(x):", "body": "if isinstance(x, str):return hash(x)elif isinstance(x, dict):return hash(yaml.dump(x))", "docstring": "Return hash of x.", "id": "f12805:m1"} {"signature": "def get_zoom_levels(process_zoom_levels=None, init_zoom_levels=None):", "body": "process_zoom_levels = _validate_zooms(process_zoom_levels)if init_zoom_levels is None:return process_zoom_levelselse:init_zoom_levels = _validate_zooms(init_zoom_levels)if not set(init_zoom_levels).issubset(set(process_zoom_levels)):raise MapcheteConfigError(\"\")return init_zoom_levels", "docstring": "Validate and return zoom levels.", "id": "f12805:m2"} {"signature": "def snap_bounds(bounds=None, pyramid=None, zoom=None):", "body": "if not isinstance(bounds, (tuple, list)):raise TypeError(\"\")if len(bounds) != :raise ValueError(\"\")if not isinstance(pyramid, BufferedTilePyramid):raise TypeError(\"\")bounds = Bounds(*bounds)lb = pyramid.tile_from_xy(bounds.left, bounds.bottom, zoom, on_edge_use=\"\").boundsrt = pyramid.tile_from_xy(bounds.right, bounds.top, zoom, on_edge_use=\"\").boundsreturn Bounds(lb.left, lb.bottom, rt.right, rt.top)", "docstring": "Snaps bounds to tiles boundaries of specific zoom level.\n\nParameters\n----------\nbounds : bounds to be snapped\npyramid : TilePyramid\nzoom : int\n\nReturns\n-------\nBounds(left, bottom, right, top)", "id": "f12805:m3"} {"signature": "def clip_bounds(bounds=None, clip=None):", "body": "bounds = Bounds(*bounds)clip = Bounds(*clip)return Bounds(max(bounds.left, clip.left),max(bounds.bottom, clip.bottom),min(bounds.right, clip.right),min(bounds.top, clip.top))", "docstring": "Clips bounds by clip.\n\nParameters\n----------\nbounds : bounds to be clipped\nclip : clip bounds\n\nReturns\n-------\nBounds(left, bottom, right, top)", "id": "f12805:m4"} {"signature": "def raw_conf(mapchete_file):", "body": "return _map_to_new_config(yaml.load(open(mapchete_file, \"\").read()))", "docstring": "Loads a mapchete_file into a dictionary.\n\nParameters\n----------\nmapchete_file : str\n Path to a Mapchete file.\n\nReturns\n-------\ndictionary", "id": "f12805:m5"} {"signature": "def raw_conf_process_pyramid(raw_conf):", "body": "return BufferedTilePyramid(raw_conf[\"\"][\"\"],metatiling=raw_conf[\"\"].get(\"\", ),pixelbuffer=raw_conf[\"\"].get(\"\", ))", "docstring": "Loads the process pyramid of a raw configuration.\n\nParameters\n----------\nraw_conf : dict\n Raw mapchete configuration as dictionary.\n\nReturns\n-------\nBufferedTilePyramid", "id": "f12805:m6"} {"signature": "def bounds_from_opts(wkt_geometry=None, point=None, bounds=None, zoom=None, raw_conf=None):", "body": "if wkt_geometry:return wkt.loads(wkt_geometry).boundselif point:x, y = pointzoom_levels = get_zoom_levels(process_zoom_levels=raw_conf[\"\"],init_zoom_levels=zoom)tp = raw_conf_process_pyramid(raw_conf)return tp.tile_from_xy(x, y, max(zoom_levels)).boundselse:return bounds", "docstring": "Loads the process pyramid of a raw configuration.\n\nParameters\n----------\nraw_conf : dict\n Raw mapchete configuration as dictionary.\n\nReturns\n-------\nBufferedTilePyramid", "id": "f12805:m7"} {"signature": "def _validate_zooms(zooms):", "body": "if isinstance(zooms, dict):if any([a not in zooms for a in [\"\", \"\"]]):raise MapcheteConfigError(\"\")zmin = _validate_zoom(zooms[\"\"])zmax = _validate_zoom(zooms[\"\"])if zmin > zmax:raise MapcheteConfigError(\"\")return list(range(zmin, zmax + ))elif isinstance(zooms, list):if len(zooms) == :return zoomselif len(zooms) == :zmin, zmax = sorted([_validate_zoom(z) for z in zooms])return list(range(zmin, zmax + ))else:return zoomselse:return [_validate_zoom(zooms)]", "docstring": "Return a list of zoom levels.\n\nFollowing inputs are converted:\n- int --> [int]\n- dict{min, max} --> range(min, max + 1)\n- [int] --> [int]\n- [int, int] --> range(smaller int, bigger int + 1)", "id": "f12805:m10"} {"signature": "def _validate_zoom(zoom):", "body": "if any([not isinstance(zoom, int), zoom < ]):raise MapcheteConfigError(\"\")return zoom", "docstring": "Assert zoom value is positive integer.", "id": "f12805:m11"} {"signature": "def _raw_at_zoom(config, zooms):", "body": "params_per_zoom = {}for zoom in zooms:params = {}for name, element in config.items():if name not in _RESERVED_PARAMETERS:out_element = _element_at_zoom(name, element, zoom)if out_element is not None:params[name] = out_elementparams_per_zoom[zoom] = paramsreturn params_per_zoom", "docstring": "Return parameter dictionary per zoom level.", "id": "f12805:m13"} {"signature": "def _element_at_zoom(name, element, zoom):", "body": "if isinstance(element, dict):if \"\" in element:return elementout_elements = {}for sub_name, sub_element in element.items():out_element = _element_at_zoom(sub_name, sub_element, zoom)if name == \"\":out_elements[sub_name] = out_elementelif out_element is not None:out_elements[sub_name] = out_elementif len(out_elements) == and name != \"\":return next(iter(out_elements.values()))if len(out_elements) == :return Nonereturn out_elementselif isinstance(name, str):if name.startswith(\"\"):return _filter_by_zoom(conf_string=name.strip(\"\").strip(), zoom=zoom,element=element)else:return elementelse:return element", "docstring": "Return the element filtered by zoom level.\n\n- An input integer or float gets returned as is.\n- An input string is checked whether it starts with \"zoom\". Then, the\n provided zoom level gets parsed and compared with the actual zoom\n level. If zoom levels match, the element gets returned.\nTODOs/gotchas:\n- Elements are unordered, which can lead to unexpected results when\n defining the YAML config.\n- Provided zoom levels for one element in config file are not allowed\n to \"overlap\", i.e. there is not yet a decision mechanism implemented\n which handles this case.", "id": "f12805:m14"} {"signature": "def _filter_by_zoom(element=None, conf_string=None, zoom=None):", "body": "for op_str, op_func in [(\"\", operator.eq),(\"\", operator.le),(\"\", operator.ge),(\"\", operator.lt),(\">\", operator.gt),]:if conf_string.startswith(op_str):return element if op_func(zoom, _strip_zoom(conf_string, op_str)) else None", "docstring": "Return element only if zoom condition matches with config string.", "id": "f12805:m15"} {"signature": "def _strip_zoom(input_string, strip_string):", "body": "try:return int(input_string.strip(strip_string))except Exception as e:raise MapcheteConfigError(\"\" % e)", "docstring": "Return zoom level as integer or throw error.", "id": "f12805:m16"} {"signature": "def _flatten_tree(tree, old_path=None):", "body": "flat_tree = []for key, value in tree.items():new_path = \"\".join([old_path, key]) if old_path else keyif isinstance(value, dict) and \"\" not in value:flat_tree.extend(_flatten_tree(value, old_path=new_path))else:flat_tree.append((new_path, value))return flat_tree", "docstring": "Flatten dict tree into dictionary where keys are paths of old dict.", "id": "f12805:m17"} {"signature": "def _unflatten_tree(flat):", "body": "tree = {}for key, value in flat.items():path = key.split(\"\")if len(path) == :tree[key] = valueelse:if not path[] in tree:tree[path[]] = _unflatten_tree({\"\".join(path[:]): value})else:branch = _unflatten_tree({\"\".join(path[:]): value})if not path[] in tree[path[]]:tree[path[]][path[]] = branch[path[]]else:tree[path[]][path[]].update(branch[path[]])return tree", "docstring": "Reverse tree flattening.", "id": "f12805:m18"} {"signature": "def __init__(self, input_config, zoom=None, bounds=None, single_input_file=None,mode=\"\", debug=False):", "body": "self._raw = _map_to_new_config(_config_to_dict(input_config))self._raw[\"\"] = zoomself._raw[\"\"] = boundsself._cache_area_at_zoom = {}self._cache_full_process_area = Nonetry:validate_values(self._raw, _MANDATORY_PARAMETERS)except Exception as e:raise MapcheteConfigError(e)logger.debug(\"\")self.config_dir = self._raw[\"\"]self.process_name = self._raw[\"\"]self.process_funclogger.debug(\"\")try:process_metatiling = self._raw[\"\"].get(\"\", )output_metatiling = self._raw[\"\"].get(\"\", process_metatiling)if output_metatiling > process_metatiling:raise ValueError(\"\")self.process_pyramid = BufferedTilePyramid(self._raw[\"\"][\"\"],metatiling=process_metatiling,pixelbuffer=self._raw[\"\"].get(\"\", ))self.output_pyramid = BufferedTilePyramid(self._raw[\"\"][\"\"],metatiling=output_metatiling,pixelbuffer=self._raw[\"\"].get(\"\", ))except Exception as e:logger.exception(e)raise MapcheteConfigError(e)if mode not in [\"\", \"\", \"\", \"\"]:raise MapcheteConfigError(\"\" % mode)self.mode = modelogger.debug(\"\")self._params_at_zoom = _raw_at_zoom(self._raw, self.init_zoom_levels)logger.debug(\"\")self.outputlogger.debug(\"\")self.input", "docstring": "Initialize configuration.", "id": "f12805:c0:m0"} {"signature": "@cached_propertydef zoom_levels(self):", "body": "return _validate_zooms(self._raw[\"\"])", "docstring": "Process zoom levels as defined in the configuration.", "id": "f12805:c0:m1"} {"signature": "@cached_propertydef init_zoom_levels(self):", "body": "return get_zoom_levels(process_zoom_levels=self._raw[\"\"],init_zoom_levels=self._raw[\"\"])", "docstring": "Zoom levels this process is currently initialized with.\n\nThis gets triggered by using the ``zoom`` kwarg. If not set, it will\nbe equal to self.zoom_levels.", "id": "f12805:c0:m2"} {"signature": "@cached_propertydef bounds(self):", "body": "if self._raw[\"\"] is None:return self.process_pyramid.boundselse:return Bounds(*_validate_bounds(self._raw[\"\"]))", "docstring": "Process bounds as defined in the configuration.", "id": "f12805:c0:m3"} {"signature": "@cached_propertydef init_bounds(self):", "body": "if self._raw[\"\"] is None:return self.boundselse:return Bounds(*_validate_bounds(self._raw[\"\"]))", "docstring": "Process bounds this process is currently initialized with.\n\nThis gets triggered by using the ``init_bounds`` kwarg. If not set, it will\nbe equal to self.bounds.", "id": "f12805:c0:m4"} {"signature": "@cached_propertydef effective_bounds(self):", "body": "return snap_bounds(bounds=clip_bounds(bounds=self.init_bounds, clip=self.process_pyramid.bounds),pyramid=self.process_pyramid,zoom=min(self.baselevels[\"\"]) if self.baselevels else min(self.init_zoom_levels))", "docstring": "Effective process bounds required to initialize inputs.\n\nProcess bounds sometimes have to be larger, because all intersecting process\ntiles have to be covered as well.", "id": "f12805:c0:m5"} {"signature": "@cached_propertydef output(self):", "body": "output_params = dict(self._raw[\"\"],grid=self.output_pyramid.grid,pixelbuffer=self.output_pyramid.pixelbuffer,metatiling=self.output_pyramid.metatiling)if \"\" in output_params:output_params.update(path=absolute_path(path=output_params[\"\"], base_dir=self.config_dir))if \"\" not in output_params:raise MapcheteConfigError(\"\")if output_params[\"\"] not in available_output_formats():raise MapcheteConfigError(\"\" % (output_params[\"\"], str(available_output_formats())))writer = load_output_writer(output_params)try:writer.is_valid_with_config(output_params)except Exception as e:logger.exception(e)raise MapcheteConfigError(\"\" % (writer.METADATA[\"\"], e))return writer", "docstring": "Output object of driver.", "id": "f12805:c0:m6"} {"signature": "@cached_propertydef input(self):", "body": "delimiters = dict(zoom=self.init_zoom_levels,bounds=self.init_bounds,process_bounds=self.bounds,effective_bounds=self.effective_bounds)raw_inputs = {get_hash(v): vfor zoom in self.init_zoom_levelsif \"\" in self._params_at_zoom[zoom]for key, v in _flatten_tree(self._params_at_zoom[zoom][\"\"])if v is not None}initalized_inputs = {}for k, v in raw_inputs.items():if isinstance(v, str):logger.debug(\"\", v)try:reader = load_input_reader(dict(path=absolute_path(path=v, base_dir=self.config_dir),pyramid=self.process_pyramid,pixelbuffer=self.process_pyramid.pixelbuffer,delimiters=delimiters),readonly=self.mode == \"\")except Exception as e:logger.exception(e)raise MapcheteDriverError(\"\" % (v, e))logger.debug(\"\", v, reader)elif isinstance(v, dict):logger.debug(\"\", v)try:reader = load_input_reader(dict(abstract=deepcopy(v),pyramid=self.process_pyramid,pixelbuffer=self.process_pyramid.pixelbuffer,delimiters=delimiters,conf_dir=self.config_dir),readonly=self.mode == \"\")except Exception as e:logger.exception(e)raise MapcheteDriverError(\"\" % (v, e))logger.debug(\"\", v, reader)else:raise MapcheteConfigError(\"\", type(v))reader.bbox(out_crs=self.process_pyramid.crs)initalized_inputs[k] = readerreturn initalized_inputs", "docstring": "Input items used for process stored in a dictionary.\n\nKeys are the hashes of the input parameters, values the respective\nInputData classes.", "id": "f12805:c0:m7"} {"signature": "@cached_propertydef baselevels(self):", "body": "if \"\" not in self._raw:return {}baselevels = self._raw[\"\"]minmax = {k: v for k, v in baselevels.items() if k in [\"\", \"\"]}if not minmax:raise MapcheteConfigError(\"\")for v in minmax.values():if not isinstance(v, int) or v < :raise MapcheteConfigError(\"\" % minmax.values())zooms = list(range(minmax.get(\"\", min(self.zoom_levels)),minmax.get(\"\", max(self.zoom_levels)) + ))if not set(self.zoom_levels).difference(set(zooms)):raise MapcheteConfigError(\"\")return dict(zooms=zooms,lower=baselevels.get(\"\", \"\"),higher=baselevels.get(\"\", \"\"),tile_pyramid=BufferedTilePyramid(self.output_pyramid.grid,pixelbuffer=self.output_pyramid.pixelbuffer,metatiling=self.process_pyramid.metatiling))", "docstring": "Optional baselevels configuration.\n\nbaselevels:\n min: \n max: \n lower: \n higher: ", "id": "f12805:c0:m8"} {"signature": "def params_at_zoom(self, zoom):", "body": "if zoom not in self.init_zoom_levels:raise ValueError(\"\")out = dict(self._params_at_zoom[zoom], input={}, output=self.output)if \"\" in self._params_at_zoom[zoom]:flat_inputs = {}for k, v in _flatten_tree(self._params_at_zoom[zoom][\"\"]):if v is None:flat_inputs[k] = Noneelse:flat_inputs[k] = self.input[get_hash(v)]out[\"\"] = _unflatten_tree(flat_inputs)else:out[\"\"] = {}return out", "docstring": "Return configuration parameters snapshot for zoom as dictionary.\n\nParameters\n----------\nzoom : int\n zoom level\n\nReturns\n-------\nconfiguration snapshot : dictionary\nzoom level dependent process configuration", "id": "f12805:c0:m10"} {"signature": "def area_at_zoom(self, zoom=None):", "body": "if zoom is None:if not self._cache_full_process_area:logger.debug(\"\")self._cache_full_process_area = cascaded_union([self._area_at_zoom(z) for z in self.init_zoom_levels]).buffer()return self._cache_full_process_areaelse:if zoom not in self.init_zoom_levels:raise ValueError(\"\")return self._area_at_zoom(zoom)", "docstring": "Return process bounding box for zoom level.\n\nParameters\n----------\nzoom : int or None\n if None, the union of all zoom level areas is returned\n\nReturns\n-------\nprocess area : shapely geometry", "id": "f12805:c0:m11"} {"signature": "def bounds_at_zoom(self, zoom=None):", "body": "return () if self.area_at_zoom(zoom).is_empty else Bounds(*self.area_at_zoom(zoom).bounds)", "docstring": "Return process bounds for zoom level.\n\nParameters\n----------\nzoom : integer or list\n\nReturns\n-------\nprocess bounds : tuple\n left, bottom, right, top", "id": "f12805:c0:m13"} {"signature": "@cached_propertydef crs(self):", "body": "warnings.warn(DeprecationWarning(\"\"))return self.process_pyramid.crs", "docstring": "Deprecated.", "id": "f12805:c0:m14"} {"signature": "@cached_propertydef metatiling(self):", "body": "warnings.warn(DeprecationWarning(\"\"))return self.process_pyramid.metatiling", "docstring": "Deprecated.", "id": "f12805:c0:m15"} {"signature": "@cached_propertydef pixelbuffer(self):", "body": "warnings.warn(DeprecationWarning(\"\"))return self.process_pyramid.pixelbuffer", "docstring": "Deprecated.", "id": "f12805:c0:m16"} {"signature": "@cached_propertydef inputs(self):", "body": "warnings.warn(DeprecationWarning(\"\"))return self.input", "docstring": "Deprecated.", "id": "f12805:c0:m17"} {"signature": "@cached_propertydef process_file(self):", "body": "warnings.warn(DeprecationWarning(\"\"))return os.path.join(self._raw[\"\"], self._raw[\"\"])", "docstring": "Deprecated.", "id": "f12805:c0:m18"} {"signature": "def at_zoom(self, zoom):", "body": "warnings.warn(DeprecationWarning(\"\"))return self.params_at_zoom(zoom)", "docstring": "Deprecated.", "id": "f12805:c0:m19"} {"signature": "def process_area(self, zoom=None):", "body": "warnings.warn(DeprecationWarning(\"\"))return self.area_at_zoom(zoom)", "docstring": "Deprecated.", "id": "f12805:c0:m20"} {"signature": "def process_bounds(self, zoom=None):", "body": "warnings.warn(DeprecationWarning(\"\"))return self.bounds_at_zoom(zoom)", "docstring": "Deprecated.", "id": "f12805:c0:m21"} {"signature": "def __init__(self, input_params, **kwargs):", "body": "self.pyramid = input_params[\"\"]self.pixelbuffer = input_params[\"\"]self.crs = self.pyramid.crs", "docstring": "Initialize relevant input information.", "id": "f12806:c0:m0"} {"signature": "def open(self, tile, **kwargs):", "body": "raise NotImplementedError", "docstring": "Return InputTile object.\n\nParameters\n----------\ntile : ``Tile``\n\nReturns\n-------\ninput tile : ``InputTile``\n tile view of input data", "id": "f12806:c0:m1"} {"signature": "def bbox(self, out_crs=None):", "body": "raise NotImplementedError", "docstring": "Return data bounding box.\n\nParameters\n----------\nout_crs : ``rasterio.crs.CRS``\n rasterio CRS object (default: CRS of process pyramid)\n\nReturns\n-------\nbounding box : geometry\n Shapely geometry object", "id": "f12806:c0:m2"} {"signature": "def exists(self):", "body": "raise NotImplementedError", "docstring": "Check if data or file even exists.\n\nReturns\n-------\nfile exists : bool", "id": "f12806:c0:m3"} {"signature": "def cleanup(self):", "body": "pass", "docstring": "Optional cleanup function called when Mapchete exits.", "id": "f12806:c0:m4"} {"signature": "def __init__(self, tile, **kwargs):", "body": "", "docstring": "Initialize.", "id": "f12806:c1:m0"} {"signature": "def read(self, **kwargs):", "body": "raise NotImplementedError", "docstring": "Read reprojected & resampled input data.\n\nReturns\n-------\ndata : array or list\n NumPy array for raster data or feature list for vector data", "id": "f12806:c1:m1"} {"signature": "def is_empty(self):", "body": "raise NotImplementedError", "docstring": "Check if there is data within this tile.\n\nReturns\n-------\nis empty : bool", "id": "f12806:c1:m2"} {"signature": "def __enter__(self):", "body": "return self", "docstring": "Required for 'with' statement.", "id": "f12806:c1:m3"} {"signature": "def __exit__(self, t, v, tb):", "body": "pass", "docstring": "Clean up.", "id": "f12806:c1:m4"} {"signature": "def __init__(self, output_params, readonly=False):", "body": "self.pixelbuffer = output_params[\"\"]if \"\" in output_params:warnings.warn(DeprecationWarning(\"\"))if \"\" not in output_params:output_params[\"\"] = output_params.pop(\"\")self.pyramid = TilePyramid(grid=output_params[\"\"],metatiling=output_params[\"\"])self.crs = self.pyramid.crsself._bucket = Noneif not readonly:write_output_metadata(output_params)", "docstring": "Initialize.", "id": "f12806:c2:m0"} {"signature": "def read(self, output_tile):", "body": "raise NotImplementedError", "docstring": "Read existing process output.\n\nParameters\n----------\noutput_tile : ``BufferedTile``\n must be member of output ``TilePyramid``\n\nReturns\n-------\nprocess output : array or list", "id": "f12806:c2:m1"} {"signature": "def write(self, process_tile, data):", "body": "raise NotImplementedError", "docstring": "Write data from one or more process tiles.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``", "id": "f12806:c2:m2"} {"signature": "def tiles_exist(self, process_tile=None, output_tile=None):", "body": "if process_tile and output_tile:raise ValueError(\"\")if process_tile:return any(path_exists(self.get_path(tile))for tile in self.pyramid.intersecting(process_tile))if output_tile:return path_exists(self.get_path(output_tile))", "docstring": "Check whether output tiles of a tile (either process or output) exists.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``\noutput_tile : ``BufferedTile``\n must be member of output ``TilePyramid``\n\nReturns\n-------\nexists : bool", "id": "f12806:c2:m3"} {"signature": "def is_valid_with_config(self, config):", "body": "raise NotImplementedError", "docstring": "Check if output format is valid with other process parameters.\n\nParameters\n----------\nconfig : dictionary\n output configuration parameters\n\nReturns\n-------\nis_valid : bool", "id": "f12806:c2:m4"} {"signature": "def get_path(self, tile):", "body": "return os.path.join(*[self.path,str(tile.zoom),str(tile.row),str(tile.col) + self.file_extension])", "docstring": "Determine target file path.\n\nParameters\n----------\ntile : ``BufferedTile``\n must be member of output ``TilePyramid``\n\nReturns\n-------\npath : string", "id": "f12806:c2:m5"} {"signature": "def prepare_path(self, tile):", "body": "makedirs(os.path.dirname(self.get_path(tile)))", "docstring": "Create directory and subdirectory if necessary.\n\nParameters\n----------\ntile : ``BufferedTile``\n must be member of output ``TilePyramid``", "id": "f12806:c2:m6"} {"signature": "def for_web(self, data):", "body": "raise NotImplementedError", "docstring": "Convert data to web output (raster only).\n\nParameters\n----------\ndata : array\n\nReturns\n-------\nweb data : array", "id": "f12806:c2:m7"} {"signature": "def empty(self, process_tile):", "body": "raise NotImplementedError", "docstring": "Return empty data.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``\n\nReturns\n-------\nempty data : array or list\n empty array with correct data type for raster data or empty list\n for vector data", "id": "f12806:c2:m8"} {"signature": "def output_is_valid(self, process_data):", "body": "if self.METADATA[\"\"] == \"\":return (is_numpy_or_masked_array(process_data) oris_numpy_or_masked_array_with_tags(process_data))elif self.METADATA[\"\"] == \"\":return is_feature_list(process_data)", "docstring": "Check whether process output is allowed with output driver.\n\nParameters\n----------\nprocess_data : raw process output\n\nReturns\n-------\nTrue or False", "id": "f12806:c2:m9"} {"signature": "def output_cleaned(self, process_data):", "body": "if self.METADATA[\"\"] == \"\":if is_numpy_or_masked_array(process_data):return process_dataelif is_numpy_or_masked_array_with_tags(process_data):data, tags = process_datareturn self.output_cleaned(data), tagselif self.METADATA[\"\"] == \"\":return list(process_data)", "docstring": "Return verified and cleaned output.\n\nParameters\n----------\nprocess_data : raw process output\n\nReturns\n-------\nNumPy array or list of features.", "id": "f12806:c2:m10"} {"signature": "def extract_subset(self, input_data_tiles=None, out_tile=None):", "body": "if self.METADATA[\"\"] == \"\":mosaic = create_mosaic(input_data_tiles)return extract_from_array(in_raster=prepare_array(mosaic.data,nodata=self.nodata,dtype=self.output_params[\"\"]),in_affine=mosaic.affine,out_tile=out_tile)elif self.METADATA[\"\"] == \"\":return [feature for feature in list(chain.from_iterable([features for _, features in input_data_tiles]))if shape(feature[\"\"]).intersects(out_tile.bbox)]", "docstring": "Extract subset from multiple tiles.\n\ninput_data_tiles : list of (``Tile``, process data) tuples\nout_tile : ``Tile``\n\nReturns\n-------\nNumPy array or list of features.", "id": "f12806:c2:m11"} {"signature": "def open(self, tile, process):", "body": "raise NotImplementedError", "docstring": "Open process output as input for other process.\n\nParameters\n----------\ntile : ``Tile``\nprocess : ``MapcheteProcess``", "id": "f12806:c2:m12"} {"signature": "def _read_as_tiledir(self,out_tile=None,td_crs=None,tiles_paths=None,profile=None,validity_check=False,indexes=None,resampling=None,dst_nodata=None,gdal_opts=None,**kwargs):", "body": "return _read_as_tiledir(data_type=self.METADATA[\"\"],out_tile=out_tile,td_crs=td_crs,tiles_paths=tiles_paths,profile=profile,validity_check=validity_check,indexes=indexes,resampling=resampling,dst_nodata=dst_nodata,gdal_opts=gdal_opts,**{k: v for k, v in kwargs.items() if k != \"\"})", "docstring": "Read reprojected & resampled input data.\n\nParameters\n----------\nvalidity_check : bool\n vector file: also run checks if reprojected geometry is valid,\n otherwise throw RuntimeError (default: True)\n\nindexes : list or int\n raster file: a list of band numbers; None will read all.\ndst_nodata : int or float, optional\n raster file: if not set, the nodata value from the source dataset\n will be used\ngdal_opts : dict\n raster file: GDAL options passed on to rasterio.Env()\n\nReturns\n-------\ndata : list for vector files or numpy array for raster files", "id": "f12806:c2:m13"} {"signature": "def available_output_formats():", "body": "output_formats = []for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):driver_ = v.load()if hasattr(driver_, \"\") and (driver_.METADATA[\"\"] in [\"\", \"\"]):output_formats.append(driver_.METADATA[\"\"])return output_formats", "docstring": "Return all available output formats.\n\nReturns\n-------\nformats : list\n all available output formats", "id": "f12807:m1"} {"signature": "def available_input_formats():", "body": "input_formats = []for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):logger.debug(\"\", v)driver_ = v.load()if hasattr(driver_, \"\") and (driver_.METADATA[\"\"] in [\"\", \"\"]):input_formats.append(driver_.METADATA[\"\"])return input_formats", "docstring": "Return all available input formats.\n\nReturns\n-------\nformats : list\n all available input formats", "id": "f12807:m2"} {"signature": "def load_output_writer(output_params, readonly=False):", "body": "if not isinstance(output_params, dict):raise TypeError(\"\")driver_name = output_params[\"\"]for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):_driver = v.load()if all([hasattr(_driver, attr) for attr in [\"\", \"\"]]) and (_driver.METADATA[\"\"] == driver_name):return _driver.OutputData(output_params, readonly=readonly)raise MapcheteDriverError(\"\" % driver_name)", "docstring": "Return output class of driver.\n\nReturns\n-------\noutput : ``OutputData``\n output writer object", "id": "f12807:m3"} {"signature": "def load_input_reader(input_params, readonly=False):", "body": "logger.debug(\"\", input_params)if not isinstance(input_params, dict):raise TypeError(\"\")if \"\" in input_params:driver_name = input_params[\"\"][\"\"]elif \"\" in input_params:if os.path.splitext(input_params[\"\"])[]:input_file = input_params[\"\"]driver_name = driver_from_file(input_file)else:logger.debug(\"\", input_params[\"\"])driver_name = \"\"else:raise MapcheteDriverError(\"\" % input_params)for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):driver_ = v.load()if hasattr(driver_, \"\") and (driver_.METADATA[\"\"] == driver_name):return v.load().InputData(input_params, readonly=readonly)raise MapcheteDriverError(\"\" % driver_name)", "docstring": "Return input class of driver.\n\nReturns\n-------\ninput_params : ``InputData``\n input parameters", "id": "f12807:m4"} {"signature": "def driver_from_file(input_file):", "body": "file_ext = os.path.splitext(input_file)[].split(\"\")[]if file_ext not in _file_ext_to_driver():raise MapcheteDriverError(\"\" % file_ext)driver = _file_ext_to_driver()[file_ext]if len(driver) > :warnings.warn(DeprecationWarning(\"\" % driver[]))return driver[]", "docstring": "Guess driver from file extension.\n\nReturns\n-------\ndriver : string\n driver name", "id": "f12807:m5"} {"signature": "def write_output_metadata(output_params):", "body": "if \"\" in output_params:metadata_path = os.path.join(output_params[\"\"], \"\")logger.debug(\"\", metadata_path)try:existing_params = read_output_metadata(metadata_path)logger.debug(\"\", metadata_path)logger.debug(\"\", pformat(existing_params))existing_tp = existing_params[\"\"]current_params = params_to_dump(output_params)logger.debug(\"\", pformat(current_params))current_tp = BufferedTilePyramid(**current_params[\"\"])if existing_tp != current_tp:raise MapcheteConfigError(\"\"\"\" % (existing_tp, current_tp))existing_format = existing_params[\"\"][\"\"]current_format = current_params[\"\"][\"\"]if existing_format != current_format:raise MapcheteConfigError(\"\"\"\" % ((existing_format, current_format)))except FileNotFoundError:logger.debug(\"\", metadata_path)dump_params = params_to_dump(output_params)write_json(metadata_path, dump_params)else:logger.debug(\"\")", "docstring": "Dump output JSON and verify parameters if output metadata exist.", "id": "f12807:m8"} {"signature": "def __init__(self, input_params, **kwargs):", "body": "super(InputData, self).__init__(input_params, **kwargs)self.path = input_params[\"\"]self.process = Mapchete(MapcheteConfig(self.path, mode=\"\",bounds=input_params[\"\"][\"\"]))", "docstring": "Initialize.", "id": "f12809:c0:m0"} {"signature": "def open(self, tile, **kwargs):", "body": "return self.process.config.output.open(tile, self.process, **kwargs)", "docstring": "Return InputTile object.\n\nParameters\n----------\ntile : ``Tile``\n\nReturns\n-------\ninput tile : ``InputTile``\n tile view of input data", "id": "f12809:c0:m1"} {"signature": "def bbox(self, out_crs=None):", "body": "return reproject_geometry(self.process.config.area_at_zoom(),src_crs=self.process.config.process_pyramid.crs,dst_crs=self.pyramid.crs if out_crs is None else out_crs)", "docstring": "Return data bounding box.\n\nParameters\n----------\nout_crs : ``rasterio.crs.CRS``\n rasterio CRS object (default: CRS of process pyramid)\n\nReturns\n-------\nbounding box : geometry\n Shapely geometry object", "id": "f12809:c0:m2"} {"signature": "def __init__(self, input_params, **kwargs):", "body": "super(InputData, self).__init__(input_params, **kwargs)self.path = input_params[\"\"]", "docstring": "Initialize.", "id": "f12810:c0:m0"} {"signature": "def open(self, tile, **kwargs):", "body": "return InputTile(tile, self, **kwargs)", "docstring": "Return InputTile object.\n\nParameters\n----------\ntile : ``Tile``\n\nReturns\n-------\ninput tile : ``InputTile``\n tile view of input data", "id": "f12810:c0:m1"} {"signature": "def bbox(self, out_crs=None):", "body": "out_crs = self.pyramid.crs if out_crs is None else out_crswith fiona.open(self.path) as inp:inp_crs = CRS(inp.crs)bbox = box(*inp.bounds)return reproject_geometry(bbox, src_crs=inp_crs, dst_crs=out_crs)", "docstring": "Return data bounding box.\n\nParameters\n----------\nout_crs : ``rasterio.crs.CRS``\n rasterio CRS object (default: CRS of process pyramid)\n\nReturns\n-------\nbounding box : geometry\n Shapely geometry object", "id": "f12810:c0:m2"} {"signature": "def __init__(self, tile, vector_file, **kwargs):", "body": "self.tile = tileself.vector_file = vector_fileself._cache = {}", "docstring": "Initialize.", "id": "f12810:c1:m0"} {"signature": "def read(self, validity_check=True, **kwargs):", "body": "return [] if self.is_empty() else self._read_from_cache(validity_check)", "docstring": "Read reprojected & resampled input data.\n\nParameters\n----------\nvalidity_check : bool\n also run checks if reprojected geometry is valid, otherwise throw\n RuntimeError (default: True)\n\nReturns\n-------\ndata : list", "id": "f12810:c1:m1"} {"signature": "def is_empty(self):", "body": "if not self.tile.bbox.intersects(self.vector_file.bbox()):return Truereturn len(self._read_from_cache(True)) == ", "docstring": "Check if there is data within this tile.\n\nReturns\n-------\nis empty : bool", "id": "f12810:c1:m2"} {"signature": "def __init__(self, output_params, **kwargs):", "body": "super(OutputData, self).__init__(output_params)self.path = output_params[\"\"]self.file_extension = \"\"self.output_params = output_paramsself.nodata = output_params.get(\"\", GTIFF_DEFAULT_PROFILE[\"\"])self._bucket = self.path.split(\"\")[] if self.path.startswith(\"\") else None", "docstring": "Initialize.", "id": "f12811:c0:m0"} {"signature": "def read(self, output_tile, **kwargs):", "body": "try:return read_raster_no_crs(self.get_path(output_tile))except FileNotFoundError:return self.empty(output_tile)", "docstring": "Read existing process output.\n\nParameters\n----------\noutput_tile : ``BufferedTile``\n must be member of output ``TilePyramid``\n\nReturns\n-------\nNumPy array", "id": "f12811:c0:m1"} {"signature": "def write(self, process_tile, data):", "body": "if (isinstance(data, tuple) andlen(data) == andisinstance(data[], dict)):data, tags = dataelse:tags = {}data = prepare_array(data,masked=True,nodata=self.nodata,dtype=self.profile(process_tile)[\"\"])if data.mask.all():logger.debug(\"\")else:bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else Nonefor tile in self.pyramid.intersecting(process_tile):out_path = self.get_path(tile)self.prepare_path(tile)out_tile = BufferedTile(tile, self.pixelbuffer)write_raster_window(in_tile=process_tile,in_data=data,out_profile=self.profile(out_tile),out_tile=out_tile,out_path=out_path,tags=tags,bucket_resource=bucket_resource)", "docstring": "Write data from process tiles into GeoTIFF file(s).\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``\ndata : ``np.ndarray``", "id": "f12811:c0:m2"} {"signature": "def is_valid_with_config(self, config):", "body": "return validate_values(config, [(\"\", int),(\"\", str),(\"\", str)])", "docstring": "Check if output format is valid with other process parameters.\n\nParameters\n----------\nconfig : dictionary\n output configuration parameters\n\nReturns\n-------\nis_valid : bool", "id": "f12811:c0:m3"} {"signature": "def profile(self, tile=None):", "body": "dst_metadata = GTIFF_DEFAULT_PROFILEdst_metadata.pop(\"\", None)dst_metadata.update(count=self.output_params[\"\"],dtype=self.output_params[\"\"],driver=\"\")if tile is not None:dst_metadata.update(crs=tile.crs, width=tile.width, height=tile.height,affine=tile.affine)else:for k in [\"\", \"\", \"\", \"\"]:dst_metadata.pop(k, None)if \"\" in self.output_params:dst_metadata.update(nodata=self.output_params[\"\"])try:if \"\" in self.output_params:warnings.warn(DeprecationWarning(\"\"))dst_metadata.update(compress=self.output_params[\"\"])else:dst_metadata.update(compress=self.output_params[\"\"])dst_metadata.update(predictor=self.output_params[\"\"])except KeyError:passreturn dst_metadata", "docstring": "Create a metadata dictionary for rasterio.\n\nParameters\n----------\ntile : ``BufferedTile``\n\nReturns\n-------\nmetadata : dictionary\n output profile dictionary used for rasterio.", "id": "f12811:c0:m4"} {"signature": "def empty(self, process_tile):", "body": "profile = self.profile(process_tile)return ma.masked_array(data=np.full((profile[\"\"], ) + process_tile.shape, profile[\"\"],dtype=profile[\"\"]),mask=True)", "docstring": "Return empty data.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``\n\nReturns\n-------\nempty data : array\n empty array with data type provided in output profile", "id": "f12811:c0:m5"} {"signature": "def for_web(self, data):", "body": "return memory_file(prepare_array(data, masked=True, nodata=self.nodata, dtype=self.profile()[\"\"]),self.profile()), \"\"", "docstring": "Convert data to web output (raster only).\n\nParameters\n----------\ndata : array\n\nReturns\n-------\nweb data : array", "id": "f12811:c0:m6"} {"signature": "def open(self, tile, process, **kwargs):", "body": "return InputTile(tile, process, kwargs.get(\"\", None))", "docstring": "Open process output as input for other process.\n\nParameters\n----------\ntile : ``Tile``\nprocess : ``MapcheteProcess``\nkwargs : keyword arguments", "id": "f12811:c0:m7"} {"signature": "def __init__(self, tile, process, resampling):", "body": "self.tile = tileself.process = processself.pixelbuffer = Noneself.resampling = resampling", "docstring": "Initialize.", "id": "f12811:c1:m0"} {"signature": "def read(self, indexes=None, **kwargs):", "body": "band_indexes = self._get_band_indexes(indexes)arr = self.process.get_raw_output(self.tile)if len(band_indexes) == :return arr[band_indexes[] - ]else:return ma.concatenate([ma.expand_dims(arr[i - ], ) for i in band_indexes])", "docstring": "Read reprojected & resampled input data.\n\nParameters\n----------\nindexes : integer or list\n band number or list of band numbers\n\nReturns\n-------\ndata : array", "id": "f12811:c1:m1"} {"signature": "def is_empty(self, indexes=None):", "body": "return not self.tile.bbox.intersects(self.process.config.area_at_zoom())", "docstring": "Check if there is data within this tile.\n\nReturns\n-------\nis empty : bool", "id": "f12811:c1:m2"} {"signature": "def _get_band_indexes(self, indexes=None):", "body": "if indexes:if isinstance(indexes, list):return indexeselse:return [indexes]else:return range(, self.process.config.output.profile(self.tile)[\"\"] + )", "docstring": "Return valid band indexes.", "id": "f12811:c1:m3"} {"signature": "def __enter__(self):", "body": "return self", "docstring": "Enable context manager.", "id": "f12811:c1:m4"} {"signature": "def __exit__(self, t, v, tb):", "body": "pass", "docstring": "Clear cache on close.", "id": "f12811:c1:m5"} {"signature": "def __init__(self, input_params, **kwargs):", "body": "super(InputData, self).__init__(input_params, **kwargs)if \"\" in input_params:self._params = input_params[\"\"]self.path = absolute_path(path=self._params[\"\"],base_dir=input_params[\"\"])logger.debug(\"\", input_params)self.td_pyramid = BufferedTilePyramid(self._params[\"\"],metatiling=self._params.get(\"\", ),tile_size=self._params.get(\"\", ),pixelbuffer=self._params.get(\"\", ))self._read_as_tiledir_func = base._read_as_tiledirelif \"\" in input_params:self.path = absolute_path(path=input_params[\"\"], base_dir=input_params.get(\"\"))try:params = read_output_metadata(os.path.join(self.path, \"\"))except FileNotFoundError:raise MapcheteConfigError(\"\" % input_params[\"\"])self.td_pyramid = params[\"\"]self.output_data = load_output_writer(dict(params[\"\"],metatiling=self.td_pyramid.metatiling,pixelbuffer=self.td_pyramid.pixelbuffer,pyramid=self.td_pyramid,grid=self.td_pyramid.grid,path=self.path),readonly=True)self._params = dict(path=self.path,grid=self.td_pyramid.grid.to_dict(),metatiling=self.td_pyramid.metatiling,pixelbuffer=self.td_pyramid.pixelbuffer,tile_size=self.td_pyramid.tile_size,extension=self.output_data.file_extension.split(\"\")[-],**params[\"\"])self._read_as_tiledir_func = self.output_data._read_as_tiledirvalidate_values(self._params,[(\"\", str),(\"\", (str, dict)),(\"\", str)])self._ext = self._params[\"\"]self._bounds = self._params.get(\"\", self.td_pyramid.bounds)self._file_type = (\"\" if self._params[\"\"] == \"\" else \"\")if self._file_type == \"\":self._params[\"\"] = self._params.get(\"\", self._params.get(\"\", None))validate_values(self._params, [(\"\", str), (\"\", int)])self._profile = {\"\": self._params.get(\"\", ),\"\": self._params[\"\"],\"\": self._params[\"\"]}else:self._profile = None", "docstring": "Initialize.", "id": "f12812:c0:m0"} {"signature": "def open(self,tile,tile_directory_zoom=None,matching_method=\"\",matching_max_zoom=None,matching_precision=,fallback_to_higher_zoom=False,resampling=\"\",**kwargs):", "body": "td_bounds = reproject_geometry(tile.bbox,src_crs=tile.tp.crs,dst_crs=self.td_pyramid.crs).boundsif tile_directory_zoom is not None:zoom = tile_directory_zoomelse:zoom = tile_to_zoom_level(tile, dst_pyramid=self.td_pyramid, matching_method=matching_method,precision=matching_precision)if matching_max_zoom is not None:zoom = min([zoom, matching_max_zoom])if fallback_to_higher_zoom:tiles_paths = []while len(tiles_paths) == and zoom >= :tiles_paths = _get_tiles_paths(basepath=self.path,ext=self._ext,pyramid=self.td_pyramid,bounds=td_bounds,zoom=zoom)logger.debug(\"\", len(tiles_paths), zoom)zoom -= else:tiles_paths = _get_tiles_paths(basepath=self.path,ext=self._ext,pyramid=self.td_pyramid,bounds=td_bounds,zoom=zoom)logger.debug(\"\", len(tiles_paths), zoom)return InputTile(tile,tiles_paths=tiles_paths,file_type=self._file_type,profile=self._profile,td_crs=self.td_pyramid.crs,resampling=resampling,read_as_tiledir_func=self._read_as_tiledir_func,**kwargs)", "docstring": "Return InputTile object.\n\nParameters\n----------\ntile : ``Tile``\ntile_directory_zoom : None\n If set, data will be read from exactly this zoom level\nmatching_method : str ('gdal' or 'min') (default: 'gdal')\n gdal: Uses GDAL's standard method. Here, the target resolution is calculated\n by averaging the extent's pixel sizes over both x and y axes. This\n approach returns a zoom level which may not have the best quality but will\n speed up reading significantly.\n min: Returns the zoom level which matches the minimum resolution of the\n extents four corner pixels. This approach returns the zoom level with the\n best possible quality but with low performance. If the tile extent is\n outside of the destination pyramid, a TopologicalError will be raised.\nmatching_max_zoom : int (default: None)\n If set, it will prevent reading from zoom levels above the maximum.\nmatching_precision : int\n Round resolutions to n digits before comparing.\nfallback_to_higher_zoom : bool (default: False)\n In case no data is found at zoom level, try to read data from higher zoom\n levels. Enabling this setting can lead to many IO requests in areas with no\n data.\nresampling : string\n raster file: one of \"nearest\", \"average\", \"bilinear\" or \"lanczos\"\n\nReturns\n-------\ninput tile : ``InputTile``\n tile view of input data", "id": "f12812:c0:m1"} {"signature": "def bbox(self, out_crs=None):", "body": "return reproject_geometry(box(*self._bounds),src_crs=self.td_pyramid.crs,dst_crs=self.pyramid.crs if out_crs is None else out_crs)", "docstring": "Return data bounding box.\n\nParameters\n----------\nout_crs : ``rasterio.crs.CRS``\n rasterio CRS object (default: CRS of process pyramid)\n\nReturns\n-------\nbounding box : geometry\n Shapely geometry object", "id": "f12812:c0:m2"} {"signature": "def __init__(self, tile, **kwargs):", "body": "self.tile = tileself._tiles_paths = kwargs[\"\"]self._file_type = kwargs[\"\"]self._profile = kwargs[\"\"]self._td_crs = kwargs[\"\"]self._resampling = kwargs[\"\"]self._read_as_tiledir = kwargs[\"\"]", "docstring": "Initialize.", "id": "f12812:c1:m0"} {"signature": "def read(self,validity_check=False,indexes=None,resampling=None,dst_nodata=None,gdal_opts=None,**kwargs):", "body": "return self._read_as_tiledir(data_type=self._file_type,out_tile=self.tile,td_crs=self._td_crs,tiles_paths=self._tiles_paths,profile=self._profile,validity_check=validity_check,indexes=indexes,resampling=resampling if resampling else self._resampling,dst_nodata=dst_nodata,gdal_opts=gdal_opts,**{k: v for k, v in kwargs.items() if k != \"\"})", "docstring": "Read reprojected & resampled input data.\n\nParameters\n----------\nvalidity_check : bool\n vector file: also run checks if reprojected geometry is valid,\n otherwise throw RuntimeError (default: True)\n\nindexes : list or int\n raster file: a list of band numbers; None will read all.\ndst_nodata : int or float, optional\n raster file: if not set, the nodata value from the source dataset\n will be used\ngdal_opts : dict\n raster file: GDAL options passed on to rasterio.Env()\n\nReturns\n-------\ndata : list for vector files or numpy array for raster files", "id": "f12812:c1:m1"} {"signature": "def is_empty(self):", "body": "return len(self._tiles_paths) == ", "docstring": "Check if there is data within this tile.\n\nReturns\n-------\nis empty : bool", "id": "f12812:c1:m2"} {"signature": "def __init__(self, output_params, **kwargs):", "body": "super(OutputData, self).__init__(output_params)self.path = output_params[\"\"]self.file_extension = \"\"self.output_params = output_paramsself.output_params[\"\"] = PNG_DEFAULT_PROFILE[\"\"]self.nodata = output_params.get(\"\", PNG_DEFAULT_PROFILE[\"\"])self._bucket = self.path.split(\"\")[] if self.path.startswith(\"\") else None", "docstring": "Initialize.", "id": "f12813:c0:m0"} {"signature": "def write(self, process_tile, data):", "body": "rgba = self._prepare_array_for_png(data)data = ma.masked_where(rgba == self.nodata, rgba)if data.mask.all():logger.debug(\"\")else:bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else Nonefor tile in self.pyramid.intersecting(process_tile):out_path = self.get_path(tile)self.prepare_path(tile)out_tile = BufferedTile(tile, self.pixelbuffer)write_raster_window(in_tile=process_tile,in_data=data,out_profile=self.profile(out_tile),out_tile=out_tile,out_path=out_path,bucket_resource=bucket_resource)", "docstring": "Write data from one or more process tiles.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``", "id": "f12813:c0:m1"} {"signature": "def read(self, output_tile, **kwargs):", "body": "try:return read_raster_no_crs(self.get_path(output_tile))except FileNotFoundError:return self.empty(output_tile)", "docstring": "Read existing process output.\n\nParameters\n----------\noutput_tile : ``BufferedTile``\n must be member of output ``TilePyramid``\n\nReturns\n-------\nprocess output : ``BufferedTile`` with appended data", "id": "f12813:c0:m2"} {"signature": "def is_valid_with_config(self, config):", "body": "return validate_values(config, [(\"\", str)])", "docstring": "Check if output format is valid with other process parameters.\n\nParameters\n----------\nconfig : dictionary\n output configuration parameters\n\nReturns\n-------\nis_valid : bool", "id": "f12813:c0:m3"} {"signature": "def profile(self, tile=None):", "body": "dst_metadata = PNG_DEFAULT_PROFILEdst_metadata.pop(\"\", None)if tile is not None:dst_metadata.update(width=tile.width, height=tile.height, affine=tile.affine,crs=tile.crs)try:dst_metadata.update(count=self.output_params[\"\"])except KeyError:passreturn dst_metadata", "docstring": "Create a metadata dictionary for rasterio.\n\nParameters\n----------\ntile : ``BufferedTile``\n\nReturns\n-------\nmetadata : dictionary\n output profile dictionary used for rasterio.", "id": "f12813:c0:m4"} {"signature": "def for_web(self, data):", "body": "rgba = self._prepare_array_for_png(data)data = ma.masked_where(rgba == self.nodata, rgba)return memory_file(data, self.profile()), ''", "docstring": "Convert data to web output.\n\nParameters\n----------\ndata : array\n\nReturns\n-------\nweb data : array", "id": "f12813:c0:m5"} {"signature": "def empty(self, process_tile):", "body": "bands = (self.output_params[\"\"]if \"\" in self.output_paramselse PNG_DEFAULT_PROFILE[\"\"])return ma.masked_array(data=ma.zeros((bands, ) + process_tile.shape),mask=ma.zeros((bands, ) + process_tile.shape),dtype=PNG_DEFAULT_PROFILE[\"\"])", "docstring": "Return empty data.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``\n\nReturns\n-------\nempty data : array\n empty array with data type given in output parameters", "id": "f12813:c0:m6"} {"signature": "def __init__(self, output_params, **kwargs):", "body": "super(OutputData, self).__init__(output_params)self.path = output_params[\"\"]self.file_extension = \"\"self.output_params = output_paramsself._profile = dict(PNG_DEFAULT_PROFILE)self.nodata = self._profile[\"\"]try:self.old_band_num = output_params[\"\"]self._profile.update(count=)except KeyError:self.old_band_num = Falseself.output_params.update(dtype=self._profile[\"\"])self._bucket = self.path.split(\"\")[] if self.path.startswith(\"\") else None", "docstring": "Initialize.", "id": "f12814:c0:m0"} {"signature": "def write(self, process_tile, data):", "body": "data = self._prepare_array(data)if data.mask.all():logger.debug(\"\")else:bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else Nonefor tile in self.pyramid.intersecting(process_tile):out_path = self.get_path(tile)self.prepare_path(tile)out_tile = BufferedTile(tile, self.pixelbuffer)write_raster_window(in_tile=process_tile,in_data=data,out_profile=self.profile(out_tile),out_tile=out_tile,out_path=out_path,bucket_resource=bucket_resource)", "docstring": "Write data from process tiles into PNG file(s).\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``", "id": "f12814:c0:m1"} {"signature": "def read(self, output_tile, **kwargs):", "body": "try:return ma.masked_values(read_raster_no_crs(self.get_path(output_tile), indexes=( if self.old_band_num else )),)except FileNotFoundError:return self.empty(output_tile)", "docstring": "Read existing process output.\n\nParameters\n----------\noutput_tile : ``BufferedTile``\n must be member of output ``TilePyramid``\n\nReturns\n-------\nprocess output : ``BufferedTile`` with appended data", "id": "f12814:c0:m2"} {"signature": "def is_valid_with_config(self, config):", "body": "return validate_values(config, [(\"\", str)])", "docstring": "Check if output format is valid with other process parameters.\n\nParameters\n----------\nconfig : dictionary\n output configuration parameters\n\nReturns\n-------\nis_valid : bool", "id": "f12814:c0:m3"} {"signature": "def profile(self, tile=None):", "body": "dst_metadata = dict(self._profile)if tile is not None:dst_metadata.update(width=tile.width,height=tile.height,affine=tile.affine, driver=\"\",crs=tile.crs)return dst_metadata", "docstring": "Create a metadata dictionary for rasterio.\n\nParameters\n----------\ntile : ``BufferedTile``\n\nReturns\n-------\nmetadata : dictionary\n output profile dictionary used for rasterio.", "id": "f12814:c0:m4"} {"signature": "def for_web(self, data):", "body": "return (memory_file(self._prepare_array(data), self.profile()), \"\")", "docstring": "Convert data to web output.\n\nParameters\n----------\ndata : array\n\nReturns\n-------\nMemoryFile(), MIME type", "id": "f12814:c0:m5"} {"signature": "def empty(self, process_tile):", "body": "return ma.masked_values(np.zeros(process_tile.shape), )", "docstring": "Return empty data.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``\n\nReturns\n-------\nempty data : array or list\n empty array with correct data type for raster data or empty list\n for vector data", "id": "f12814:c0:m6"} {"signature": "def __init__(self, output_params, **kwargs):", "body": "super(OutputData, self).__init__(output_params)self.path = output_params[\"\"]self.file_extension = \"\"self.output_params = output_paramsself._bucket = self.path.split(\"\")[] if self.path.startswith(\"\") else None", "docstring": "Initialize.", "id": "f12815:c0:m0"} {"signature": "def read(self, output_tile, **kwargs):", "body": "path = self.get_path(output_tile)try:with fiona.open(path, \"\") as src:return list(src)except DriverError as e:for i in (\"\", \"\"):if i in str(e):return self.empty(output_tile)else:raise", "docstring": "Read existing process output.\n\nParameters\n----------\noutput_tile : ``BufferedTile``\n must be member of output ``TilePyramid``\n\nReturns\n-------\nprocess output : list", "id": "f12815:c0:m1"} {"signature": "def write(self, process_tile, data):", "body": "if data is None or len(data) == :returnif not isinstance(data, (list, types.GeneratorType)):raise TypeError(\"\")data = list(data)if not len(data):logger.debug(\"\")else:bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else Nonefor tile in self.pyramid.intersecting(process_tile):out_path = self.get_path(tile)self.prepare_path(tile)out_tile = BufferedTile(tile, self.pixelbuffer)write_vector_window(in_data=data,out_schema=self.output_params[\"\"],out_tile=out_tile,out_path=out_path,bucket_resource=bucket_resource)", "docstring": "Write data from process tiles into GeoJSON file(s).\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``", "id": "f12815:c0:m2"} {"signature": "def is_valid_with_config(self, config):", "body": "validate_values(config, [(\"\", dict), (\"\", str)])validate_values(config[\"\"], [(\"\", dict), (\"\", str)])if config[\"\"][\"\"] not in [\"\", \"\", \"\", \"\", \"\",\"\", \"\"]:raise TypeError(\"\")return True", "docstring": "Check if output format is valid with other process parameters.\n\nParameters\n----------\nconfig : dictionary\n output configuration parameters\n\nReturns\n-------\nis_valid : bool", "id": "f12815:c0:m3"} {"signature": "def empty(self, process_tile=None):", "body": "return []", "docstring": "Return empty data.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n must be member of process ``TilePyramid``\n\nReturns\n-------\nempty data : list", "id": "f12815:c0:m4"} {"signature": "def for_web(self, data):", "body": "return list(data), \"\"", "docstring": "Convert data to web output (raster only).\n\nParameters\n----------\ndata : array\n\nReturns\n-------\nweb data : array", "id": "f12815:c0:m5"} {"signature": "def open(self, tile, process):", "body": "return InputTile(tile, process)", "docstring": "Open process output as input for other process.\n\nParameters\n----------\ntile : ``Tile``\nprocess : ``MapcheteProcess``", "id": "f12815:c0:m6"} {"signature": "def __init__(self, tile, process):", "body": "self.tile = tileself.process = processself._cache = {}", "docstring": "Initialize.", "id": "f12815:c1:m0"} {"signature": "def read(self, validity_check=True, no_neighbors=False, **kwargs):", "body": "if no_neighbors:raise NotImplementedError()return self._from_cache(validity_check=validity_check)", "docstring": "Read data from process output.\n\nParameters\n----------\nvalidity_check : bool\n run geometry validity check (default: True)\nno_neighbors : bool\n don't include neighbor tiles if there is a pixelbuffer (default:\n False)\n\nReturns\n-------\nfeatures : list\n GeoJSON-like list of features", "id": "f12815:c1:m1"} {"signature": "def is_empty(self, validity_check=True):", "body": "return len(self._from_cache(validity_check=validity_check)) == ", "docstring": "Check if there is data within this tile.\n\nReturns\n-------\nis empty : bool", "id": "f12815:c1:m2"} {"signature": "def __enter__(self):", "body": "return self", "docstring": "Enable context manager.", "id": "f12815:c1:m4"} {"signature": "def __exit__(self, t, v, tb):", "body": "self._cache = {}", "docstring": "Clear cache on close.", "id": "f12815:c1:m5"} {"signature": "def get_segmentize_value(input_file=None, tile_pyramid=None):", "body": "warnings.warn(DeprecationWarning(\"\"))return io.get_segmentize_value(input_file, tile_pyramid)", "docstring": "Return the recommended segmentation value in input file units.\n\nIt is calculated by multiplyling raster pixel size with tile shape in\npixels.\n\nParameters\n----------\ninput_file : str\n location of a file readable by rasterio\ntile_pyramied : ``TilePyramid`` or ``BufferedTilePyramid``\n tile pyramid to estimate target tile size\n\nReturns\n-------\nsegmenize value : float\n length suggested of line segmentation to reproject file bounds", "id": "f12816:m0"} {"signature": "def __init__(self, input_params, **kwargs):", "body": "super(InputData, self).__init__(input_params, **kwargs)self.path = input_params[\"\"]", "docstring": "Initialize.", "id": "f12816:c0:m0"} {"signature": "@cached_propertydef profile(self):", "body": "with rasterio.open(self.path, \"\") as src:return deepcopy(src.meta)", "docstring": "Return raster metadata.", "id": "f12816:c0:m1"} {"signature": "def open(self, tile, **kwargs):", "body": "return InputTile(tile, self, **kwargs)", "docstring": "Return InputTile object.\n\nParameters\n----------\ntile : ``Tile``\n\nReturns\n-------\ninput tile : ``InputTile``\n tile view of input data", "id": "f12816:c0:m2"} {"signature": "def bbox(self, out_crs=None):", "body": "out_crs = self.pyramid.crs if out_crs is None else out_crswith rasterio.open(self.path) as inp:inp_crs = inp.crsout_bbox = bbox = box(*inp.bounds)if inp_crs != out_crs:return reproject_geometry(segmentize_geometry(bbox, inp.transform[] * self.pyramid.tile_size),src_crs=inp_crs, dst_crs=out_crs)else:return out_bbox", "docstring": "Return data bounding box.\n\nParameters\n----------\nout_crs : ``rasterio.crs.CRS``\n rasterio CRS object (default: CRS of process pyramid)\n\nReturns\n-------\nbounding box : geometry\n Shapely geometry object", "id": "f12816:c0:m3"} {"signature": "def exists(self):", "body": "return os.path.isfile(self.path)", "docstring": "Check if data or file even exists.\n\nReturns\n-------\nfile exists : bool", "id": "f12816:c0:m4"} {"signature": "def __init__(self, tile, raster_file, resampling=\"\", **kwargs):", "body": "self.tile = tileself.raster_file = raster_fileself.resampling = resamplingif io.path_is_remote(raster_file.path):file_ext = os.path.splitext(raster_file.path)[]self.gdal_opts = {\"\": True,\"\": \"\" % file_ext}else:self.gdal_opts = {}", "docstring": "Initialize.", "id": "f12816:c1:m0"} {"signature": "def read(self, indexes=None, **kwargs):", "body": "return read_raster_window(self.raster_file.path,self.tile,indexes=self._get_band_indexes(indexes),resampling=self.resampling,gdal_opts=self.gdal_opts)", "docstring": "Read reprojected & resampled input data.\n\nReturns\n-------\ndata : array", "id": "f12816:c1:m1"} {"signature": "def is_empty(self, indexes=None):", "body": "return not self.tile.bbox.intersects(self.raster_file.bbox(out_crs=self.tile.crs))", "docstring": "Check if there is data within this tile.\n\nReturns\n-------\nis empty : bool", "id": "f12816:c1:m2"} {"signature": "def _get_band_indexes(self, indexes=None):", "body": "if indexes:if isinstance(indexes, list):return indexeselse:return [indexes]else:return range(, self.raster_file.profile[\"\"] + )", "docstring": "Return valid band indexes.", "id": "f12816:c1:m3"} {"signature": "def execute(mp):", "body": "pass", "docstring": "Insert your python code here.\n\nAccess input data specified in the .mapchete file:\n\nwith mp.open(\"\") as src:\n data = src.read()\n\nFor vector data a list of features is returned, for raster data a numpy\narray. Data is already reprojected.\n\nTo write the process output simply return a feature list or numpy array:\n\nreturn modified_data\n\nPlease note the returned data type has to match the output type specified\nin the .mapchete file.", "id": "f12817:m0"} {"signature": "def open(config, mode=\"\", zoom=None, bounds=None, single_input_file=None,with_cache=False, debug=False):", "body": "return Mapchete(MapcheteConfig(config, mode=mode, zoom=zoom, bounds=bounds,single_input_file=single_input_file, debug=debug),with_cache=with_cache)", "docstring": "Open a Mapchete process.\n\nParameters\n----------\nconfig : MapcheteConfig object, config dict or path to mapchete file\n Mapchete process configuration\nmode : string\n * ``memory``: Generate process output on demand without reading\n pre-existing data or writing new data.\n * ``readonly``: Just read data without processing new data.\n * ``continue``: (default) Don't overwrite existing output.\n * ``overwrite``: Overwrite existing output.\nzoom : list or integer\n process zoom level or a pair of minimum and maximum zoom level\nbounds : tuple\n left, bottom, right, top process boundaries in output pyramid\nsingle_input_file : string\n single input file if supported by process\nwith_cache : bool\n process output data cached in memory\n\nReturns\n-------\nMapchete\n a Mapchete process object", "id": "f12819:m0"} {"signature": "def count_tiles(geometry, pyramid, minzoom, maxzoom, init_zoom=):", "body": "if not <= init_zoom <= minzoom <= maxzoom:raise ValueError(\"\")unbuffered_pyramid = TilePyramid(pyramid.grid, tile_size=pyramid.tile_size,metatiling=pyramid.metatiling)geometry = geometry.buffer(-)return _count_tiles([unbuffered_pyramid.tile(*tile_id)for tile_id in product([init_zoom],range(pyramid.matrix_height(init_zoom)),range(pyramid.matrix_width(init_zoom)))], geometry, minzoom, maxzoom)", "docstring": "Count number of tiles intersecting with geometry.\n\nParameters\n----------\ngeometry : shapely geometry\npyramid : TilePyramid\nminzoom : int\nmaxzoom : int\ninit_zoom : int\n\nReturns\n-------\nnumber of tiles", "id": "f12819:m1"} {"signature": "def _get_zoom_level(zoom, process):", "body": "if zoom is None:return reversed(process.config.zoom_levels)if isinstance(zoom, int):return [zoom]elif len(zoom) == :return reversed(range(min(zoom), max(zoom)+))elif len(zoom) == :return zoom", "docstring": "Determine zoom levels.", "id": "f12819:m6"} {"signature": "def _process_worker(process, process_tile):", "body": "logger.debug((process_tile.id, \"\" % current_process().name))if (process.config.mode == \"\" andprocess.config.output.tiles_exist(process_tile)):logger.debug((process_tile.id, \"\"))return ProcessInfo(tile=process_tile,processed=False,process_msg=\"\",written=False,write_msg=\"\")else:with Timer() as t:try:output = process.execute(process_tile, raise_nodata=True)except MapcheteNodataTile:output = Noneprocessor_message = \"\" % tlogger.debug((process_tile.id, processor_message))writer_info = process.write(process_tile, output)return ProcessInfo(tile=process_tile,processed=True,process_msg=processor_message,written=writer_info.written,write_msg=writer_info.write_msg)", "docstring": "Worker function running the process.", "id": "f12819:m7"} {"signature": "def __init__(self, config, with_cache=False):", "body": "logger.info(\"\")if not isinstance(config, MapcheteConfig):raise TypeError(\"\")self.config = configself.process_name = self.config.process_nameself.with_cache = True if self.config.mode == \"\" else with_cacheif self.with_cache:self.process_tile_cache = LRUCache(maxsize=)self.current_processes = {}self.process_lock = threading.Lock()self._count_tiles_cache = {}", "docstring": "Initialize Mapchete processing endpoint.\n\nParameters\n----------\nconfig : MapcheteConfig\n Mapchete process configuration\nwith_cache : bool\n cache processed output data in memory (default: False)", "id": "f12819:c0:m0"} {"signature": "def get_process_tiles(self, zoom=None):", "body": "if zoom or zoom == :for tile in self.config.process_pyramid.tiles_from_geom(self.config.area_at_zoom(zoom), zoom):yield tileelse:for zoom in reversed(self.config.zoom_levels):for tile in self.config.process_pyramid.tiles_from_geom(self.config.area_at_zoom(zoom), zoom):yield tile", "docstring": "Yield process tiles.\n\nTiles intersecting with the input data bounding boxes as well as\nprocess bounds, if provided, are considered process tiles. This is to\navoid iterating through empty tiles.\n\nParameters\n----------\nzoom : integer\n zoom level process tiles should be returned from; if none is given,\n return all process tiles\n\nyields\n------\nBufferedTile objects", "id": "f12819:c0:m1"} {"signature": "def batch_process(self, zoom=None, tile=None, multi=cpu_count(), max_chunksize=):", "body": "list(self.batch_processor(zoom, tile, multi, max_chunksize))", "docstring": "Process a large batch of tiles.\n\nParameters\n----------\nprocess : MapcheteProcess\n process to be run\nzoom : list or int\n either single zoom level or list of minimum and maximum zoom level;\n None processes all (default: None)\ntile : tuple\n zoom, row and column of tile to be processed (cannot be used with\n zoom)\nmulti : int\n number of workers (default: number of CPU cores)\nmax_chunksize : int\n maximum number of process tiles to be queued for each worker;\n (default: 1)", "id": "f12819:c0:m2"} {"signature": "def batch_processor(self, zoom=None, tile=None, multi=cpu_count(), max_chunksize=):", "body": "if zoom and tile:raise ValueError(\"\")if tile:yield _run_on_single_tile(self, tile)elif multi > :for process_info in _run_with_multiprocessing(self, list(_get_zoom_level(zoom, self)), multi, max_chunksize):yield process_infoelif multi == :for process_info in _run_without_multiprocessing(self, list(_get_zoom_level(zoom, self))):yield process_info", "docstring": "Process a large batch of tiles and yield report messages per tile.\n\nParameters\n----------\nzoom : list or int\n either single zoom level or list of minimum and maximum zoom level;\n None processes all (default: None)\ntile : tuple\n zoom, row and column of tile to be processed (cannot be used with\n zoom)\nmulti : int\n number of workers (default: number of CPU cores)\nmax_chunksize : int\n maximum number of process tiles to be queued for each worker;\n (default: 1)", "id": "f12819:c0:m3"} {"signature": "def count_tiles(self, minzoom, maxzoom, init_zoom=):", "body": "if (minzoom, maxzoom) not in self._count_tiles_cache:self._count_tiles_cache[(minzoom, maxzoom)] = count_tiles(self.config.area_at_zoom(), self.config.process_pyramid,minzoom, maxzoom, init_zoom=)return self._count_tiles_cache[(minzoom, maxzoom)]", "docstring": "Count number of tiles intersecting with geometry.\n\nParameters\n----------\ngeometry : shapely geometry\npyramid : TilePyramid\nminzoom : int\nmaxzoom : int\ninit_zoom : int\n\nReturns\n-------\nnumber of tiles", "id": "f12819:c0:m4"} {"signature": "def execute(self, process_tile, raise_nodata=False):", "body": "if self.config.mode not in [\"\", \"\", \"\"]:raise ValueError(\"\")if isinstance(process_tile, tuple):process_tile = self.config.process_pyramid.tile(*process_tile)elif isinstance(process_tile, BufferedTile):passelse:raise TypeError(\"\")if process_tile.zoom not in self.config.zoom_levels:return self.config.output.empty(process_tile)return self._execute(process_tile, raise_nodata=raise_nodata)", "docstring": "Run the Mapchete process.\n\nExecute, write and return data.\n\nParameters\n----------\nprocess_tile : Tile or tile index tuple\n Member of the process tile pyramid (not necessarily the output\n pyramid, if output has a different metatiling setting)\n\nReturns\n-------\ndata : NumPy array or features\n process output", "id": "f12819:c0:m5"} {"signature": "def read(self, output_tile):", "body": "if self.config.mode not in [\"\", \"\", \"\"]:raise ValueError(\"\")if isinstance(output_tile, tuple):output_tile = self.config.output_pyramid.tile(*output_tile)elif isinstance(output_tile, BufferedTile):passelse:raise TypeError(\"\")return self.config.output.read(output_tile)", "docstring": "Read from written process output.\n\nParameters\n----------\noutput_tile : BufferedTile or tile index tuple\n Member of the output tile pyramid (not necessarily the process\n pyramid, if output has a different metatiling setting)\n\nReturns\n-------\ndata : NumPy array or features\n process output", "id": "f12819:c0:m6"} {"signature": "def write(self, process_tile, data):", "body": "if isinstance(process_tile, tuple):process_tile = self.config.process_pyramid.tile(*process_tile)elif not isinstance(process_tile, BufferedTile):raise ValueError(\"\" % type(process_tile))if self.config.mode not in [\"\", \"\"]:raise ValueError(\"\")if self.config.mode == \"\" and (self.config.output.tiles_exist(process_tile)):message = \"\"logger.debug((process_tile.id, message))return ProcessInfo(tile=process_tile,processed=False,process_msg=None,written=False,write_msg=message)elif data is None:message = \"\"logger.debug((process_tile.id, message))return ProcessInfo(tile=process_tile,processed=False,process_msg=None,written=False,write_msg=message)else:with Timer() as t:self.config.output.write(process_tile=process_tile, data=data)message = \"\" % tlogger.debug((process_tile.id, message))return ProcessInfo(tile=process_tile,processed=False,process_msg=None,written=True,write_msg=message)", "docstring": "Write data into output format.\n\nParameters\n----------\nprocess_tile : BufferedTile or tile index tuple\n process tile\ndata : NumPy array or features\n data to be written", "id": "f12819:c0:m7"} {"signature": "def get_raw_output(self, tile, _baselevel_readonly=False):", "body": "if not isinstance(tile, (BufferedTile, tuple)):raise TypeError(\"\")if isinstance(tile, tuple):tile = self.config.output_pyramid.tile(*tile)if _baselevel_readonly:tile = self.config.baselevels[\"\"].tile(*tile.id)if tile.zoom not in self.config.zoom_levels:return self.config.output.empty(tile)if tile.crs != self.config.process_pyramid.crs:raise NotImplementedError(\"\")if self.config.mode == \"\":process_tile = self.config.process_pyramid.intersecting(tile)[]return self._extract(in_tile=process_tile,in_data=self._execute_using_cache(process_tile),out_tile=tile)process_tile = self.config.process_pyramid.intersecting(tile)[]if tile.pixelbuffer > self.config.output.pixelbuffer:output_tiles = list(self.config.output_pyramid.tiles_from_bounds(tile.bounds, tile.zoom))else:output_tiles = self.config.output_pyramid.intersecting(tile)if self.config.mode == \"\" or _baselevel_readonly:if self.config.output.tiles_exist(process_tile):return self._read_existing_output(tile, output_tiles)else:return self.config.output.empty(tile)elif self.config.mode == \"\" and not _baselevel_readonly:if self.config.output.tiles_exist(process_tile):return self._read_existing_output(tile, output_tiles)else:return self._process_and_overwrite_output(tile, process_tile)elif self.config.mode == \"\" and not _baselevel_readonly:return self._process_and_overwrite_output(tile, process_tile)", "docstring": "Get output raw data.\n\nThis function won't work with multiprocessing, as it uses the\n``threading.Lock()`` class.\n\nParameters\n----------\ntile : tuple, Tile or BufferedTile\n If a tile index is given, a tile from the output pyramid will be\n assumed. Tile cannot be bigger than process tile!\n\nReturns\n-------\ndata : NumPy array or features\n process output", "id": "f12819:c0:m8"} {"signature": "def _extract(self, in_tile=None, in_data=None, out_tile=None):", "body": "return self.config.output.extract_subset(input_data_tiles=[(in_tile, in_data)],out_tile=out_tile)", "docstring": "Extract data from tile.", "id": "f12819:c0:m12"} {"signature": "def __enter__(self):", "body": "return self", "docstring": "Enable context manager.", "id": "f12819:c0:m16"} {"signature": "def __exit__(self, t, v, tb):", "body": "for ip in self.config.input.values():if ip is not None:ip.cleanup()if self.with_cache:self.process_tile_cache = Noneself.current_processes = Noneself.process_lock = None", "docstring": "Cleanup on close.", "id": "f12819:c0:m17"} {"signature": "def __init__(self, tile, config=None, params=None):", "body": "self.identifier = \"\"self.title = \"\"self.version = \"\"self.abstract = \"\"self.tile = tileself.tile_pyramid = tile.tile_pyramidself.params = params if params else config.params_at_zoom(tile.zoom)self.config = config", "docstring": "Initialize Mapchete process.", "id": "f12819:c1:m0"} {"signature": "def write(self, data, **kwargs):", "body": "raise DeprecationWarning(\"\")", "docstring": "Deprecated.", "id": "f12819:c1:m1"} {"signature": "def read(self, **kwargs):", "body": "if self.tile.pixelbuffer > self.config.output.pixelbuffer:output_tiles = list(self.config.output_pyramid.tiles_from_bounds(self.tile.bounds, self.tile.zoom))else:output_tiles = self.config.output_pyramid.intersecting(self.tile)return self.config.output.extract_subset(input_data_tiles=[(output_tile, self.config.output.read(output_tile))for output_tile in output_tiles],out_tile=self.tile,)", "docstring": "Read existing output data from a previous run.\n\nReturns\n-------\nprocess output : NumPy array (raster) or feature iterator (vector)", "id": "f12819:c1:m2"} {"signature": "def open(self, input_id, **kwargs):", "body": "if not isinstance(input_id, str):return input_id.open(self.tile, **kwargs)if input_id not in self.params[\"\"]:raise ValueError(\"\" % input_id)return self.params[\"\"][input_id].open(self.tile, **kwargs)", "docstring": "Open input data.\n\nParameters\n----------\ninput_id : string\n input identifier from configuration file or file path\nkwargs : driver specific parameters (e.g. resampling)\n\nReturns\n-------\ntiled input data : InputTile\n reprojected input data within tile", "id": "f12819:c1:m3"} {"signature": "def hillshade(self, elevation, azimuth=, altitude=, z=, scale=):", "body": "return commons_hillshade.hillshade(elevation, self, azimuth, altitude, z, scale)", "docstring": "Calculate hillshading from elevation data.\n\nParameters\n----------\nelevation : array\n input elevation data\nazimuth : float\n horizontal angle of light source (315: North-West)\naltitude : float\n vertical angle of light source (90 would result in slope shading)\nz : float\n vertical exaggeration factor\nscale : float\n scale factor of pixel size units versus height units (insert 112000\n when having elevation values in meters in a geodetic projection)\n\nReturns\n-------\nhillshade : array", "id": "f12819:c1:m4"} {"signature": "def contours(self, elevation, interval=, field='', base=):", "body": "return commons_contours.extract_contours(elevation, self.tile, interval=interval, field=field, base=base)", "docstring": "Extract contour lines from elevation data.\n\nParameters\n----------\nelevation : array\n input elevation data\ninterval : integer\n elevation value interval when drawing contour lines\nfield : string\n output field name containing elevation value\nbase : integer\n elevation base value the intervals are computed from\n\nReturns\n-------\ncontours : iterable\n contours as GeoJSON-like pairs of properties and geometry", "id": "f12819:c1:m5"} {"signature": "def clip(self, array, geometries, inverted=False, clip_buffer=):", "body": "return commons_clip.clip_array_with_vector(array, self.tile.affine, geometries,inverted=inverted, clip_buffer=clip_buffer*self.tile.pixel_x_size)", "docstring": "Clip array by geometry.\n\nParameters\n----------\narray : array\n raster data to be clipped\ngeometries : iterable\n geometries used to clip source array\ninverted : bool\n invert clipping (default: False)\nclip_buffer : int\n buffer (in pixels) geometries before applying clip\n\nReturns\n-------\nclipped array : array", "id": "f12819:c1:m6"} {"signature": "def read_raster_window(input_files,tile,indexes=None,resampling=\"\",src_nodata=None,dst_nodata=None,gdal_opts=None):", "body": "with rasterio.Env(**get_gdal_options(gdal_opts,is_remote=path_is_remote(input_files[] if isinstance(input_files, list) else input_files, s3=True))) as env:logger.debug(\"\", input_files, env.options)return _read_raster_window(input_files,tile,indexes=indexes,resampling=resampling,src_nodata=src_nodata,dst_nodata=dst_nodata)", "docstring": "Return NumPy arrays from an input raster.\n\nNumPy arrays are reprojected and resampled to tile properties from input\nraster. If tile boundaries cross the antimeridian, data on the other side\nof the antimeridian will be read and concatenated to the numpy array\naccordingly.\n\nParameters\n----------\ninput_files : string or list\n path to a raster file or list of paths to multiple raster files readable by\n rasterio.\ntile : Tile\n a Tile object\nindexes : list or int\n a list of band numbers; None will read all.\nresampling : string\n one of \"nearest\", \"average\", \"bilinear\" or \"lanczos\"\nsrc_nodata : int or float, optional\n if not set, the nodata value from the source dataset will be used\ndst_nodata : int or float, optional\n if not set, the nodata value from the source dataset will be used\ngdal_opts : dict\n GDAL options passed on to rasterio.Env()\n\nReturns\n-------\nraster : MaskedArray", "id": "f12820:m0"} {"signature": "def _get_warped_array(input_file=None,indexes=None,dst_bounds=None,dst_shape=None,dst_crs=None,resampling=None,src_nodata=None,dst_nodata=None):", "body": "try:return _rasterio_read(input_file=input_file,indexes=indexes,dst_bounds=dst_bounds,dst_shape=dst_shape,dst_crs=dst_crs,resampling=resampling,src_nodata=src_nodata,dst_nodata=dst_nodata)except Exception as e:logger.exception(\"\", input_file, e)raise", "docstring": "Extract a numpy array from a raster file.", "id": "f12820:m3"} {"signature": "def read_raster_no_crs(input_file, indexes=None, gdal_opts=None):", "body": "with warnings.catch_warnings():warnings.simplefilter(\"\")try:with rasterio.Env(**get_gdal_options(gdal_opts, is_remote=path_is_remote(input_file, s3=True))):with rasterio.open(input_file, \"\") as src:return src.read(indexes=indexes, masked=True)except RasterioIOError as e:for i in (\"\", \"\"):if i in str(e):raise FileNotFoundError(\"\" % input_file)else:raise", "docstring": "Wrapper function around rasterio.open().read().\n\nParameters\n----------\ninput_file : str\n Path to file\nindexes : int or list\n Band index or list of band indexes to be read.\n\nReturns\n-------\nMaskedArray\n\nRaises\n------\nFileNotFoundError if file cannot be found.", "id": "f12820:m5"} {"signature": "def write_raster_window(in_tile=None, in_data=None, out_profile=None, out_tile=None, out_path=None,tags=None, bucket_resource=None):", "body": "if not isinstance(out_path, str):raise TypeError(\"\")logger.debug(\"\", out_path)if out_path == \"\":raise DeprecationWarning(\"\"\"\")out_tile = in_tile if out_tile is None else out_tile_validate_write_window_params(in_tile, out_tile, in_data, out_profile)window_data = extract_from_array(in_raster=in_data,in_affine=in_tile.affine,out_tile=out_tile) if in_tile != out_tile else in_dataif \"\" in out_profile:out_profile[\"\"] = out_profile.pop(\"\")if window_data.all() is not ma.masked:try:if out_path.startswith(\"\"):with RasterWindowMemoryFile(in_tile=out_tile,in_data=window_data,out_profile=out_profile,out_tile=out_tile,tags=tags) as memfile:logger.debug((out_tile.id, \"\", out_path))bucket_resource.put_object(Key=\"\".join(out_path.split(\"\")[:]),Body=memfile)else:with rasterio.open(out_path, '', **out_profile) as dst:logger.debug((out_tile.id, \"\", out_path))dst.write(window_data.astype(out_profile[\"\"], copy=False))_write_tags(dst, tags)except Exception as e:logger.exception(\"\", out_path, e)raiseelse:logger.debug((out_tile.id, \"\", out_path))", "docstring": "Write a window from a numpy array to an output file.\n\nParameters\n----------\nin_tile : ``BufferedTile``\n ``BufferedTile`` with a data attribute holding NumPy data\nin_data : array\nout_profile : dictionary\n metadata dictionary for rasterio\nout_tile : ``Tile``\n provides output boundaries; if None, in_tile is used\nout_path : string\n output path to write to\ntags : optional tags to be added to GeoTIFF file\nbucket_resource : boto3 bucket resource to write to in case of S3 output", "id": "f12820:m6"} {"signature": "def extract_from_array(in_raster=None, in_affine=None, out_tile=None):", "body": "if isinstance(in_raster, ReferencedRaster):in_affine = in_raster.affinein_raster = in_raster.dataminrow, maxrow, mincol, maxcol = bounds_to_ranges(out_bounds=out_tile.bounds, in_affine=in_affine, in_shape=in_raster.shape)if (minrow >= andmincol >= andmaxrow <= in_raster.shape[-] andmaxcol <= in_raster.shape[-]):return in_raster[..., minrow:maxrow, mincol:maxcol]else:raise ValueError(\"\")", "docstring": "Extract raster data window array.\n\nParameters\n----------\nin_raster : array or ReferencedRaster\nin_affine : ``Affine`` required if in_raster is an array\nout_tile : ``BufferedTile``\n\nReturns\n-------\nextracted array : array", "id": "f12820:m9"} {"signature": "def resample_from_array(in_raster=None,in_affine=None,out_tile=None,in_crs=None,resampling=\"\",nodataval=):", "body": "if isinstance(in_raster, ma.MaskedArray):passif isinstance(in_raster, np.ndarray):in_raster = ma.MaskedArray(in_raster, mask=in_raster == nodataval)elif isinstance(in_raster, ReferencedRaster):in_affine = in_raster.affinein_crs = in_raster.crsin_raster = in_raster.dataelif isinstance(in_raster, tuple):in_raster = ma.MaskedArray(data=np.stack(in_raster),mask=np.stack([band.maskif isinstance(band, ma.masked_array)else np.where(band == nodataval, True, False)for band in in_raster]),fill_value=nodataval)else:raise TypeError(\"\" % type(in_raster))if in_raster.ndim == :in_raster = ma.expand_dims(in_raster, axis=)elif in_raster.ndim == :passelse:raise TypeError(\"\")if in_raster.fill_value != nodataval:ma.set_fill_value(in_raster, nodataval)out_shape = (in_raster.shape[], ) + out_tile.shapedst_data = np.empty(out_shape, in_raster.dtype)in_raster = ma.masked_array(data=in_raster.filled(), mask=in_raster.mask, fill_value=nodataval)reproject(in_raster,dst_data,src_transform=in_affine,src_crs=in_crs if in_crs else out_tile.crs,dst_transform=out_tile.affine,dst_crs=out_tile.crs,resampling=Resampling[resampling])return ma.MaskedArray(dst_data, mask=dst_data == nodataval)", "docstring": "Extract and resample from array to target tile.\n\nParameters\n----------\nin_raster : array\nin_affine : ``Affine``\nout_tile : ``BufferedTile``\nresampling : string\n one of rasterio's resampling methods (default: nearest)\nnodataval : integer or float\n raster nodata value (default: 0)\n\nReturns\n-------\nresampled array : array", "id": "f12820:m10"} {"signature": "def create_mosaic(tiles, nodata=):", "body": "if isinstance(tiles, GeneratorType):tiles = list(tiles)elif not isinstance(tiles, list):raise TypeError(\"\")if not all([isinstance(pair, tuple) for pair in tiles]):raise TypeError(\"\")if not all([all([isinstance(tile, BufferedTile), isinstance(data, np.ndarray)])for tile, data in tiles]):raise TypeError(\"\")if len(tiles) == :raise ValueError(\"\")logger.debug(\"\", len(tiles))if len(tiles) == :tile, data = tiles[]return ReferencedRaster(data=data,affine=tile.affine,bounds=tile.bounds,crs=tile.crs)pyramid, resolution, dtype = _get_tiles_properties(tiles)shift = _shift_required(tiles)m_left, m_bottom, m_right, m_top = None, None, None, Nonefor tile, data in tiles:num_bands = data.shape[] if data.ndim > else left, bottom, right, top = tile.boundsif shift:left += pyramid.x_size / right += pyramid.x_size / if right > pyramid.right:right -= pyramid.x_sizeleft -= pyramid.x_sizem_left = min([left, m_left]) if m_left is not None else leftm_bottom = min([bottom, m_bottom]) if m_bottom is not None else bottomm_right = max([right, m_right]) if m_right is not None else rightm_top = max([top, m_top]) if m_top is not None else topheight = int(round((m_top - m_bottom) / resolution))width = int(round((m_right - m_left) / resolution))mosaic = ma.MaskedArray(data=np.full((num_bands, height, width), dtype=dtype, fill_value=nodata),mask=np.ones((num_bands, height, width)))affine = Affine(resolution, , m_left, , -resolution, m_top)for tile, data in tiles:data = prepare_array(data, nodata=nodata, dtype=dtype)t_left, t_bottom, t_right, t_top = tile.boundsif shift:t_left += pyramid.x_size / t_right += pyramid.x_size / if t_right > pyramid.right:t_right -= pyramid.x_sizet_left -= pyramid.x_sizeminrow, maxrow, mincol, maxcol = bounds_to_ranges(out_bounds=(t_left, t_bottom, t_right, t_top),in_affine=affine,in_shape=(height, width))mosaic[:, minrow:maxrow, mincol:maxcol] = datamosaic.mask[:, minrow:maxrow, mincol:maxcol] = data.maskif shift:affine = Affine(resolution, , m_left - pyramid.x_size / , , -resolution, m_top)return ReferencedRaster(data=mosaic,affine=affine,bounds=Bounds(m_left, m_bottom, m_right, m_top),crs=tile.crs)", "docstring": "Create a mosaic from tiles. Tiles must be connected (also possible over Antimeridian),\notherwise strange things can happen!\n\nParameters\n----------\ntiles : iterable\n an iterable containing tuples of a BufferedTile and an array\nnodata : integer or float\n raster nodata value to initialize the mosaic with (default: 0)\n\nReturns\n-------\nmosaic : ReferencedRaster", "id": "f12820:m11"} {"signature": "def bounds_to_ranges(out_bounds=None, in_affine=None, in_shape=None):", "body": "return itertools.chain(*from_bounds(*out_bounds, transform=in_affine, height=in_shape[-], width=in_shape[-]).round_lengths(pixel_precision=).round_offsets(pixel_precision=).toranges())", "docstring": "Return bounds range values from geolocated input.\n\nParameters\n----------\nout_bounds : tuple\n left, bottom, right, top\nin_affine : Affine\n input geolocation\nin_shape : tuple\n input shape\n\nReturns\n-------\nminrow, maxrow, mincol, maxcol", "id": "f12820:m12"} {"signature": "def tiles_to_affine_shape(tiles):", "body": "if not tiles:raise TypeError(\"\")pixel_size = tiles[].pixel_x_sizeleft, bottom, right, top = (min([t.left for t in tiles]),min([t.bottom for t in tiles]),max([t.right for t in tiles]),max([t.top for t in tiles]),)return (Affine(pixel_size, , left, , -pixel_size, top),Shape(width=int(round((right - left) / pixel_size, )),height=int(round((top - bottom) / pixel_size, )),))", "docstring": "Return Affine and shape of combined tiles.\n\nParameters\n----------\ntiles : iterable\n an iterable containing BufferedTiles\n\nReturns\n-------\nAffine, Shape", "id": "f12820:m13"} {"signature": "def _shift_required(tiles):", "body": "if tiles[][].tile_pyramid.is_global:tile_cols = sorted(list(set([t[].col for t in tiles])))if tile_cols == list(range(min(tile_cols), max(tile_cols) + )):return Falseelse:def gen_groups(items):\"\"\"\"\"\"j = items[]group = [j]for i in items[:]:if i == j + :group.append(i)else:yield groupgroup = [i]j = iyield groupgroups = list(gen_groups(tile_cols))if len(groups) == :return Falsenormal_distance = groups[-][-] - groups[][]antimeridian_distance = (groups[][-] + tiles[][].tile_pyramid.matrix_width(tiles[][].zoom)) - groups[-][]return antimeridian_distance < normal_distanceelse:return False", "docstring": "Determine if distance over antimeridian is shorter than normal distance.", "id": "f12820:m15"} {"signature": "def memory_file(data=None, profile=None):", "body": "memfile = MemoryFile()profile.update(width=data.shape[-], height=data.shape[-])with memfile.open(**profile) as dataset:dataset.write(data)return memfile", "docstring": "Return a rasterio.io.MemoryFile instance from input.\n\nParameters\n----------\ndata : array\n array to be written\nprofile : dict\n rasterio profile for MemoryFile", "id": "f12820:m16"} {"signature": "def prepare_array(data, masked=True, nodata=, dtype=\"\"):", "body": "if isinstance(data, (list, tuple)):return _prepare_iterable(data, masked, nodata, dtype)elif isinstance(data, np.ndarray) and data.ndim == :data = ma.expand_dims(data, axis=)if isinstance(data, ma.MaskedArray):return _prepare_masked(data, masked, nodata, dtype)elif isinstance(data, np.ndarray):if masked:return ma.masked_values(data.astype(dtype, copy=False), nodata, copy=False)else:return data.astype(dtype, copy=False)else:raise ValueError(\"\")", "docstring": "Turn input data into a proper array for further usage.\n\nOutut array is always 3-dimensional with the given data type. If the output\nis masked, the fill_value corresponds to the given nodata value and the\nnodata value will be burned into the data array.\n\nParameters\n----------\ndata : array or iterable\n array (masked or normal) or iterable containing arrays\nnodata : integer or float\n nodata value (default: 0) used if input is not a masked array and\n for output array\nmasked : bool\n return a NumPy Array or a NumPy MaskedArray (default: True)\ndtype : string\n data type of output array (default: \"int16\")\n\nReturns\n-------\narray : array", "id": "f12820:m17"} {"signature": "def __init__(self, in_tile=None, in_data=None, out_profile=None, out_tile=None, tags=None):", "body": "out_tile = in_tile if out_tile is None else out_tile_validate_write_window_params(in_tile, out_tile, in_data, out_profile)self.data = extract_from_array(in_raster=in_data,in_affine=in_tile.affine,out_tile=out_tile)if \"\" in out_profile:out_profile[\"\"] = out_profile.pop(\"\")self.profile = out_profileself.tags = tags", "docstring": "Prepare data & profile.", "id": "f12820:c0:m0"} {"signature": "def __enter__(self):", "body": "self.rio_memfile = MemoryFile()with self.rio_memfile.open(**self.profile) as dst:dst.write(self.data.astype(self.profile[\"\"], copy=False))_write_tags(dst, self.tags)return self.rio_memfile", "docstring": "Open MemoryFile, write data and return.", "id": "f12820:c0:m1"} {"signature": "def __exit__(self, *args):", "body": "self.rio_memfile.close()", "docstring": "Make sure MemoryFile is closed.", "id": "f12820:c0:m2"} {"signature": "def get_best_zoom_level(input_file, tile_pyramid_type):", "body": "tile_pyramid = BufferedTilePyramid(tile_pyramid_type)with rasterio.open(input_file, \"\") as src:xmin, ymin, xmax, ymax = reproject_geometry(segmentize_geometry(box(src.bounds.left, src.bounds.bottom, src.bounds.right,src.bounds.top),get_segmentize_value(input_file, tile_pyramid)),src_crs=src.crs, dst_crs=tile_pyramid.crs).boundsx_dif = xmax - xminy_dif = ymax - yminsize = float(src.width + src.height)avg_resolution = ((x_dif / float(src.width)) * (float(src.width) / size) +(y_dif / float(src.height)) * (float(src.height) / size))for zoom in range(, ):if tile_pyramid.pixel_x_size(zoom) <= avg_resolution:return zoom-", "docstring": "Determine the best base zoom level for a raster.\n\n\"Best\" means the maximum zoom level where no oversampling has to be done.\n\nParameters\n----------\ninput_file : path to raster file\ntile_pyramid_type : ``TilePyramid`` projection (``geodetic`` or``mercator``)\n\nReturns\n-------\nzoom : integer", "id": "f12821:m0"} {"signature": "def get_segmentize_value(input_file=None, tile_pyramid=None):", "body": "with rasterio.open(input_file, \"\") as input_raster:pixelsize = input_raster.transform[]return pixelsize * tile_pyramid.tile_size", "docstring": "Return the recommended segmentation value in input file units.\n\nIt is calculated by multiplyling raster pixel size with tile shape in\npixels.\n\nParameters\n----------\ninput_file : str\n location of a file readable by rasterio\ntile_pyramied : ``TilePyramid`` or ``BufferedTilePyramid``\n tile pyramid to estimate target tile size\n\nReturns\n-------\nsegmenize value : float\n length suggested of line segmentation to reproject file bounds", "id": "f12821:m1"} {"signature": "def tile_to_zoom_level(tile, dst_pyramid=None, matching_method=\"\", precision=):", "body": "def width_height(bounds):try:l, b, r, t = reproject_geometry(box(*bounds), src_crs=tile.crs, dst_crs=dst_pyramid.crs).boundsexcept ValueError:raise TopologicalError(\"\")return r - l, t - bif tile.tp.crs == dst_pyramid.crs:return tile.zoomelse:if matching_method == \"\":transform, width, height = calculate_default_transform(tile.tp.crs,dst_pyramid.crs,tile.width,tile.height,*tile.bounds)tile_resolution = round(transform[], precision)elif matching_method == \"\":l, b, r, t = tile.boundsx = tile.pixel_x_sizey = tile.pixel_y_sizeres = []for bounds in [(l, t - y, l + x, t), (l, b, l + x, b + y), (r - x, b, r, b + y), (r - x, t - y, r, t) ]:try:w, h = width_height(bounds)res.extend([w, h])except TopologicalError:logger.debug(\"\")if res:tile_resolution = round(min(res), precision)else:raise TopologicalError(\"\")else:raise ValueError(\"\", matching_method)logger.debug(\"\",tile_resolution)zoom = while True:td_resolution = round(dst_pyramid.pixel_x_size(zoom), precision)if td_resolution <= tile_resolution:breakzoom += logger.debug(\"\", tile_resolution, zoom, td_resolution)return zoom", "docstring": "Determine the best zoom level in target TilePyramid from given Tile.\n\n\nParameters\n----------\ntile : BufferedTile\ndst_pyramid : BufferedTilePyramid\nmatching_method : str ('gdal' or 'min')\n gdal: Uses GDAL's standard method. Here, the target resolution is calculated by\n averaging the extent's pixel sizes over both x and y axes. This approach\n returns a zoom level which may not have the best quality but will speed up\n reading significantly.\n min: Returns the zoom level which matches the minimum resolution of the extent's\n four corner pixels. This approach returns the zoom level with the best\n possible quality but with low performance. If the tile extent is outside of\n the destination pyramid, a TopologicalError will be raised.\nprecision : int\n Round resolutions to n digits before comparing.\n\nReturns\n-------\nzoom : int", "id": "f12821:m2"} {"signature": "def path_is_remote(path, s3=True):", "body": "prefixes = (\"\", \"\", \"\")if s3:prefixes += (\"\", \"\")return path.startswith(prefixes)", "docstring": "Determine whether file path is remote or local.\n\nParameters\n----------\npath : path to file\n\nReturns\n-------\nis_remote : bool", "id": "f12821:m3"} {"signature": "def path_exists(path):", "body": "if path.startswith((\"\", \"\")):try:urlopen(path).info()return Trueexcept HTTPError as e:if e.code == :return Falseelse:raiseelif path.startswith(\"\"):bucket = get_boto3_bucket(path.split(\"\")[])key = \"\".join(path.split(\"\")[:])for obj in bucket.objects.filter(Prefix=key):if obj.key == key:return Trueelse:return Falseelse:logger.debug(\"\", path, os.path.exists(path))return os.path.exists(path)", "docstring": "Check if file exists either remote or local.\n\nParameters:\n-----------\npath : path to file\n\nReturns:\n--------\nexists : bool", "id": "f12821:m4"} {"signature": "def absolute_path(path=None, base_dir=None):", "body": "if path_is_remote(path):return pathelse:if os.path.isabs(path):return pathelse:if base_dir is None or not os.path.isabs(base_dir):raise TypeError(\"\")return os.path.abspath(os.path.join(base_dir, path))", "docstring": "Return absolute path if path is local.\n\nParameters:\n-----------\npath : path to file\nbase_dir : base directory used for absolute path\n\nReturns:\n--------\nabsolute path", "id": "f12821:m5"} {"signature": "def relative_path(path=None, base_dir=None):", "body": "if path_is_remote(path) or not os.path.isabs(path):return pathelse:return os.path.relpath(path, base_dir)", "docstring": "Return relative path if path is local.\n\nParameters:\n-----------\npath : path to file\nbase_dir : directory where path sould be relative to\n\nReturns:\n--------\nrelative path", "id": "f12821:m6"} {"signature": "def makedirs(path):", "body": "if not path_is_remote(path):try:os.makedirs(path)except OSError:pass", "docstring": "Silently create all subdirectories of path if path is local.\n\nParameters:\n-----------\npath : path", "id": "f12821:m7"} {"signature": "def write_json(path, params):", "body": "logger.debug(\"\", params, path)if path.startswith(\"\"):bucket = get_boto3_bucket(path.split(\"\")[])key = \"\".join(path.split(\"\")[:])logger.debug(\"\", key)bucket.put_object(Key=key,Body=json.dumps(params, sort_keys=True, indent=))else:makedirs(os.path.dirname(path))with open(path, '') as dst:json.dump(params, dst, sort_keys=True, indent=)", "docstring": "Write local or remote.", "id": "f12821:m8"} {"signature": "def read_json(path):", "body": "if path.startswith((\"\", \"\")):try:return json.loads(urlopen(path).read().decode())except HTTPError:raise FileNotFoundError(\"\", path)elif path.startswith(\"\"):bucket = get_boto3_bucket(path.split(\"\")[])key = \"\".join(path.split(\"\")[:])for obj in bucket.objects.filter(Prefix=key):if obj.key == key:return json.loads(obj.get()[''].read().decode())raise FileNotFoundError(\"\", path)else:try:with open(path, \"\") as src:return json.loads(src.read())except:raise FileNotFoundError(\"\", path)", "docstring": "Read local or remote.", "id": "f12821:m9"} {"signature": "def get_gdal_options(opts, is_remote=False):", "body": "user_opts = {} if opts is None else dict(**opts)if is_remote:return dict(GDAL_HTTP_OPTS, **user_opts)else:return user_opts", "docstring": "Return a merged set of custom and default GDAL/rasterio Env options.\n\nIf is_remote is set to True, the default GDAL_HTTP_OPTS are appended.\n\nParameters\n----------\nopts : dict or None\n Explicit GDAL options.\nis_remote : bool\n Indicate whether Env is for a remote file.\n\nReturns\n-------\ndictionary", "id": "f12821:m11"} {"signature": "def reproject_geometry(geometry, src_crs=None, dst_crs=None, error_on_clip=False, validity_check=True,antimeridian_cutting=False):", "body": "src_crs = _validated_crs(src_crs)dst_crs = _validated_crs(dst_crs)def _reproject_geom(geometry, src_crs, dst_crs):if geometry.is_empty:return geometryelse:out_geom = to_shape(transform_geom(src_crs.to_dict(),dst_crs.to_dict(),mapping(geometry),antimeridian_cutting=antimeridian_cutting))return _repair(out_geom) if validity_check else out_geomif src_crs == dst_crs or geometry.is_empty:return _repair(geometry)elif (dst_crs.is_epsg_code and dst_crs.get(\"\") in CRS_BOUNDS and dst_crs.get(\"\") != \"\" ):wgs84_crs = CRS().from_epsg()crs_bbox = box(*CRS_BOUNDS[dst_crs.get(\"\")])geometry_4326 = _reproject_geom(geometry, src_crs, wgs84_crs)if error_on_clip and not geometry_4326.within(crs_bbox):raise RuntimeError(\"\")return _reproject_geom(crs_bbox.intersection(geometry_4326), wgs84_crs, dst_crs)else:return _reproject_geom(geometry, src_crs, dst_crs)", "docstring": "Reproject a geometry to target CRS.\n\nAlso, clips geometry if it lies outside the destination CRS boundary.\nSupported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical\nMercator) and 3035 (ETRS89 / ETRS-LAEA).\n\nParameters\n----------\ngeometry : ``shapely.geometry``\nsrc_crs : ``rasterio.crs.CRS`` or EPSG code\n CRS of source data\ndst_crs : ``rasterio.crs.CRS`` or EPSG code\n target CRS\nerror_on_clip : bool\n raises a ``RuntimeError`` if a geometry is outside of CRS bounds\n (default: False)\nvalidity_check : bool\n checks if reprojected geometry is valid and throws ``TopologicalError``\n if invalid (default: True)\nantimeridian_cutting : bool\n cut geometry at Antimeridian; can result in a multipart output geometry\n\nReturns\n-------\ngeometry : ``shapely.geometry``", "id": "f12822:m0"} {"signature": "def segmentize_geometry(geometry, segmentize_value):", "body": "if geometry.geom_type != \"\":raise TypeError(\"\")return Polygon(LinearRing([pfor l in map(lambda x: LineString([x[], x[]]),zip(geometry.exterior.coords[:-], geometry.exterior.coords[:]))for p in [l.interpolate(segmentize_value * i).coords[]for i in range(int(l.length / segmentize_value))] + [l.coords[]]]))", "docstring": "Segmentize Polygon outer ring by segmentize value.\n\nJust Polygon geometry type supported.\n\nParameters\n----------\ngeometry : ``shapely.geometry``\nsegmentize_value: float\n\nReturns\n-------\ngeometry : ``shapely.geometry``", "id": "f12822:m3"} {"signature": "def read_vector_window(input_files, tile, validity_check=True):", "body": "if not isinstance(input_files, list):input_files = [input_files]return [featurefor feature in chain.from_iterable([_read_vector_window(path, tile, validity_check=validity_check)for path in input_files])]", "docstring": "Read a window of an input vector dataset.\n\nAlso clips geometry.\n\nParameters:\n-----------\ninput_file : string\n path to vector file\ntile : ``Tile``\n tile extent to read data from\nvalidity_check : bool\n checks if reprojected geometry is valid and throws ``RuntimeError`` if\n invalid (default: True)\n\nReturns\n-------\nfeatures : list\n a list of reprojected GeoJSON-like features", "id": "f12822:m4"} {"signature": "def write_vector_window(in_data=None, out_schema=None, out_tile=None, out_path=None, bucket_resource=None):", "body": "try:os.remove(out_path)except OSError:passout_features = []for feature in in_data:try:for out_geom in multipart_to_singleparts(clean_geometry_type(to_shape(feature[\"\"]).intersection(out_tile.bbox),out_schema[\"\"])):out_features.append({\"\": mapping(out_geom),\"\": feature[\"\"]})except Exception as e:logger.warning(\"\", e)continueif out_features:try:if out_path.startswith(\"\"):with VectorWindowMemoryFile(tile=out_tile,features=out_features,schema=out_schema,driver=\"\") as memfile:logger.debug((out_tile.id, \"\", out_path))bucket_resource.put_object(Key=\"\".join(out_path.split(\"\")[:]),Body=memfile)else:with fiona.open(out_path, '', schema=out_schema, driver=\"\",crs=out_tile.crs.to_dict()) as dst:logger.debug((out_tile.id, \"\", out_path))dst.writerecords(out_features)except Exception as e:logger.error(\"\", out_path, e)raiseelse:logger.debug((out_tile.id, \"\", out_path))", "docstring": "Write features to GeoJSON file.\n\nParameters\n----------\nin_data : features\nout_schema : dictionary\n output schema for fiona\nout_tile : ``BufferedTile``\n tile used for output extent\nout_path : string\n output path for GeoJSON file", "id": "f12822:m6"} {"signature": "def clean_geometry_type(geometry, target_type, allow_multipart=True):", "body": "multipart_geoms = {\"\": MultiPoint,\"\": MultiLineString,\"\": MultiPolygon,\"\": MultiPoint,\"\": MultiLineString,\"\": MultiPolygon}if target_type not in multipart_geoms.keys():raise TypeError(\"\" % target_type)if geometry.geom_type == target_type:return geometryelif allow_multipart:target_multipart_type = multipart_geoms[target_type]if geometry.geom_type == \"\":return target_multipart_type([clean_geometry_type(g, target_type, allow_multipart)for g in geometry])elif any([isinstance(geometry, target_multipart_type),multipart_geoms[geometry.geom_type] == target_multipart_type]):return geometryraise GeometryTypeError(\"\" % (geometry.geom_type, target_type))", "docstring": "Return geometry of a specific type if possible.\n\nFilters and splits up GeometryCollection into target types. This is\nnecessary when after clipping and/or reprojecting the geometry types from\nsource geometries change (i.e. a Polygon becomes a LineString or a\nLineString becomes Point) in some edge cases.\n\nParameters\n----------\ngeometry : ``shapely.geometry``\ntarget_type : string\n target geometry type\nallow_multipart : bool\n allow multipart geometries (default: True)\n\nReturns\n-------\ncleaned geometry : ``shapely.geometry``\n returns None if input geometry type differs from target type\n\nRaises\n------\nGeometryTypeError : if geometry type does not match target_type", "id": "f12822:m8"} {"signature": "def to_shape(geom):", "body": "return shape(geom) if isinstance(geom, dict) else geom", "docstring": "Convert geometry to shapely geometry if necessary.\n\nParameters:\n-----------\ngeom : shapely geometry or GeoJSON mapping\n\nReturns:\n--------\nshapely geometry", "id": "f12822:m9"} {"signature": "def multipart_to_singleparts(geom):", "body": "if isinstance(geom, base.BaseGeometry):if hasattr(geom, \"\"):for subgeom in geom:yield subgeomelse:yield geom", "docstring": "Yield single part geometries if geom is multipart, otherwise yield geom.\n\nParameters:\n-----------\ngeom : shapely geometry\n\nReturns:\n--------\nshapely single part geometries", "id": "f12822:m10"} {"signature": "def __init__(self, tile=None, features=None, schema=None, driver=None):", "body": "self.tile = tileself.schema = schemaself.driver = driverself.features = features", "docstring": "Prepare data & profile.", "id": "f12822:c0:m0"} {"signature": "def __enter__(self):", "body": "self.fio_memfile = MemoryFile()with self.fio_memfile.open(schema=self.schema,driver=self.driver,crs=self.tile.crs) as dst:dst.writerecords(self.features)return self.fio_memfile", "docstring": "Open MemoryFile, write data and return.", "id": "f12822:c0:m1"} {"signature": "def __exit__(self, *args):", "body": "self.fio_memfile.close()", "docstring": "Make sure MemoryFile is closed.", "id": "f12822:c0:m2"} {"signature": "def zoom_index_gen(mp=None,out_dir=None,zoom=None,geojson=False,gpkg=False,shapefile=False,txt=False,vrt=False,fieldname=\"\",basepath=None,for_gdal=True,threading=False,):", "body": "for zoom in get_zoom_levels(process_zoom_levels=zoom):with ExitStack() as es:index_writers = []if geojson:index_writers.append(es.enter_context(VectorFileWriter(driver=\"\",out_path=_index_file_path(out_dir, zoom, \"\"),crs=mp.config.output_pyramid.crs,fieldname=fieldname)))if gpkg:index_writers.append(es.enter_context(VectorFileWriter(driver=\"\",out_path=_index_file_path(out_dir, zoom, \"\"),crs=mp.config.output_pyramid.crs,fieldname=fieldname)))if shapefile:index_writers.append(es.enter_context(VectorFileWriter(driver=\"\",out_path=_index_file_path(out_dir, zoom, \"\"),crs=mp.config.output_pyramid.crs,fieldname=fieldname)))if txt:index_writers.append(es.enter_context(TextFileWriter(out_path=_index_file_path(out_dir, zoom, \"\"))))if vrt:index_writers.append(es.enter_context(VRTFileWriter(out_path=_index_file_path(out_dir, zoom, \"\"),output=mp.config.output,out_pyramid=mp.config.output_pyramid)))logger.debug(\"\", index_writers)def _worker(tile):tile_path = _tile_path(orig_path=mp.config.output.get_path(tile),basepath=basepath,for_gdal=for_gdal)indexes = [i for i in index_writersif not i.entry_exists(tile=tile, path=tile_path)]if indexes:output_exists = mp.config.output.tiles_exist(output_tile=tile)else:output_exists = Nonereturn tile, tile_path, indexes, output_existswith concurrent.futures.ThreadPoolExecutor() as executor:for task in concurrent.futures.as_completed((executor.submit(_worker, i)for i in mp.config.output_pyramid.tiles_from_geom(mp.config.area_at_zoom(zoom), zoom))):tile, tile_path, indexes, output_exists = task.result()if indexes and output_exists:logger.debug(\"\", tile_path)logger.debug(\"\" % len(indexes))for index in indexes:index.write(tile, tile_path)yield tile", "docstring": "Generate indexes for given zoom level.\n\nParameters\n----------\nmp : Mapchete object\n process output to be indexed\nout_dir : path\n optionally override process output directory\nzoom : int\n zoom level to be processed\ngeojson : bool\n generate GeoJSON index (default: False)\ngpkg : bool\n generate GeoPackage index (default: False)\nshapefile : bool\n generate Shapefile index (default: False)\ntxt : bool\n generate tile path list textfile (default: False)\nvrt : bool\n GDAL-style VRT file (default: False)\nfieldname : str\n field name which contains paths of tiles (default: \"location\")\nbasepath : str\n if set, use custom base path instead of output path\nfor_gdal : bool\n use GDAL compatible remote paths, i.e. add \"/vsicurl/\" before path\n (default: True)", "id": "f12823:m0"} {"signature": "def __init__(self, grid=None, metatiling=, tile_size=, pixelbuffer=):", "body": "TilePyramid.__init__(self, grid, metatiling=metatiling, tile_size=tile_size)self.tile_pyramid = TilePyramid(grid, metatiling=metatiling, tile_size=tile_size)self.metatiling = metatilingif isinstance(pixelbuffer, int) and pixelbuffer >= :self.pixelbuffer = pixelbufferelse:raise ValueError(\"\")", "docstring": "Initialize.", "id": "f12824:c0:m0"} {"signature": "def tile(self, zoom, row, col):", "body": "tile = self.tile_pyramid.tile(zoom, row, col)return BufferedTile(tile, pixelbuffer=self.pixelbuffer)", "docstring": "Return ``BufferedTile`` object of this ``BufferedTilePyramid``.\n\nParameters\n----------\nzoom : integer\n zoom level\nrow : integer\n tile matrix row\ncol : integer\n tile matrix column\n\nReturns\n-------\nbuffered tile : ``BufferedTile``", "id": "f12824:c0:m1"} {"signature": "def tiles_from_bounds(self, bounds, zoom):", "body": "for tile in self.tiles_from_bbox(box(*bounds), zoom):yield self.tile(*tile.id)", "docstring": "Return all tiles intersecting with bounds.\n\nBounds values will be cleaned if they cross the antimeridian or are\noutside of the Northern or Southern tile pyramid bounds.\n\nParameters\n----------\nbounds : tuple\n (left, bottom, right, top) bounding values in tile pyramid CRS\nzoom : integer\n zoom level\n\nYields\n------\nintersecting tiles : generator\n generates ``BufferedTiles``", "id": "f12824:c0:m2"} {"signature": "def tiles_from_bbox(self, geometry, zoom):", "body": "for tile in self.tile_pyramid.tiles_from_bbox(geometry, zoom):yield self.tile(*tile.id)", "docstring": "All metatiles intersecting with given bounding box.\n\nParameters\n----------\ngeometry : ``shapely.geometry``\nzoom : integer\n zoom level\n\nYields\n------\nintersecting tiles : generator\n generates ``BufferedTiles``", "id": "f12824:c0:m3"} {"signature": "def tiles_from_geom(self, geometry, zoom):", "body": "for tile in self.tile_pyramid.tiles_from_geom(geometry, zoom):yield self.tile(*tile.id)", "docstring": "Return all tiles intersecting with input geometry.\n\nParameters\n----------\ngeometry : ``shapely.geometry``\nzoom : integer\n zoom level\n\nYields\n------\nintersecting tiles : ``BufferedTile``", "id": "f12824:c0:m4"} {"signature": "def intersecting(self, tile):", "body": "return [self.tile(*intersecting_tile.id)for intersecting_tile in self.tile_pyramid.intersecting(tile)]", "docstring": "Return all BufferedTiles intersecting with tile.\n\nParameters\n----------\ntile : ``BufferedTile``\n another tile", "id": "f12824:c0:m5"} {"signature": "def to_dict(self):", "body": "return dict(grid=self.grid.to_dict(),metatiling=self.metatiling,tile_size=self.tile_size,pixelbuffer=self.pixelbuffer)", "docstring": "Return dictionary representation of pyramid parameters.", "id": "f12824:c0:m6"} {"signature": "def from_dict(config_dict):", "body": "return BufferedTilePyramid(**config_dict)", "docstring": "Initialize TilePyramid from configuration dictionary.", "id": "f12824:c0:m7"} {"signature": "def __init__(self, tile, pixelbuffer=):", "body": "assert not isinstance(tile, BufferedTile)Tile.__init__(self, tile.tile_pyramid, tile.zoom, tile.row, tile.col)self._tile = tileself.pixelbuffer = pixelbuffer", "docstring": "Initialize.", "id": "f12824:c1:m0"} {"signature": "@cached_propertydef height(self):", "body": "return self._tile.shape(pixelbuffer=self.pixelbuffer).height", "docstring": "Return buffered height.", "id": "f12824:c1:m5"} {"signature": "@cached_propertydef width(self):", "body": "return self._tile.shape(pixelbuffer=self.pixelbuffer).width", "docstring": "Return buffered width.", "id": "f12824:c1:m6"} {"signature": "@cached_propertydef shape(self):", "body": "return self._tile.shape(pixelbuffer=self.pixelbuffer)", "docstring": "Return buffered shape.", "id": "f12824:c1:m7"} {"signature": "@cached_propertydef affine(self):", "body": "return self._tile.affine(pixelbuffer=self.pixelbuffer)", "docstring": "Return buffered Affine.", "id": "f12824:c1:m8"} {"signature": "@cached_propertydef bounds(self):", "body": "return self._tile.bounds(pixelbuffer=self.pixelbuffer)", "docstring": "Return buffered bounds.", "id": "f12824:c1:m9"} {"signature": "@cached_propertydef bbox(self):", "body": "return self._tile.bbox(pixelbuffer=self.pixelbuffer)", "docstring": "Return buffered bounding box.", "id": "f12824:c1:m10"} {"signature": "def get_children(self):", "body": "return [BufferedTile(t, self.pixelbuffer) for t in self._tile.get_children()]", "docstring": "Get tile children (intersecting tiles in next zoom level).\n\nReturns\n-------\nchildren : list\n a list of ``BufferedTiles``", "id": "f12824:c1:m11"} {"signature": "def get_parent(self):", "body": "return BufferedTile(self._tile.get_parent(), self.pixelbuffer)", "docstring": "Get tile parent (intersecting tile in previous zoom level).\n\nReturns\n-------\nparent : ``BufferedTile``", "id": "f12824:c1:m12"} {"signature": "def get_neighbors(self, connectedness=):", "body": "return [BufferedTile(t, self.pixelbuffer)for t in self._tile.get_neighbors(connectedness=connectedness)]", "docstring": "Return tile neighbors.\n\nTile neighbors are unique, i.e. in some edge cases, where both the left\nand right neighbor wrapped around the antimeridian is the same. Also,\nneighbors ouside the northern and southern TilePyramid boundaries are\nexcluded, because they are invalid.\n\n-------------\n| 8 | 1 | 5 |\n-------------\n| 4 | x | 2 |\n-------------\n| 7 | 3 | 6 |\n-------------\n\nParameters\n----------\nconnectedness : int\n [4 or 8] return four direct neighbors or all eight.\n\nReturns\n-------\nlist of BufferedTiles", "id": "f12824:c1:m13"} {"signature": "def is_on_edge(self):", "body": "return (self.left <= self.tile_pyramid.left or self.bottom <= self.tile_pyramid.bottom or self.right >= self.tile_pyramid.right or self.top >= self.tile_pyramid.top )", "docstring": "Determine whether tile touches or goes over pyramid edge.", "id": "f12824:c1:m14"} {"signature": "def user_process_logger(pname):", "body": "warnings.warn(DeprecationWarning(\"\"\"\"))return logging.getLogger(\"\" + pname)", "docstring": "Logger to be used within a user process file.", "id": "f12825:m3"} {"signature": "def driver_logger(dname):", "body": "warnings.warn(DeprecationWarning(\"\"))return logging.getLogger(\"\" + dname)", "docstring": "Logger to be used from a driver plugin.", "id": "f12825:m4"} {"signature": "def execute(mp,td_resampling=\"\",td_matching_method=\"\",td_matching_max_zoom=None,td_matching_precision=,td_fallback_to_higher_zoom=False,clip_pixelbuffer=,**kwargs):", "body": "if \"\" in mp.params[\"\"]:clip_geom = mp.open(\"\").read()if not clip_geom:logger.debug(\"\")return \"\"else:clip_geom = []with mp.open(\"\",matching_method=td_matching_method,matching_max_zoom=td_matching_max_zoom,matching_precision=td_matching_precision,fallback_to_higher_zoom=td_fallback_to_higher_zoom,resampling=td_resampling) as raster:raster_data = raster.read()if raster.is_empty() or raster_data[].mask.all():logger.debug(\"\")return \"\"if clip_geom:clipped = mp.clip(np.where(raster_data[].mask, mp.params[\"\"].nodata, raster_data),clip_geom,clip_buffer=clip_pixelbuffer,inverted=True)return np.where(clipped.mask, clipped, mp.params[\"\"].nodata)else:return np.where(raster_data[].mask, mp.params[\"\"].nodata, raster_data)", "docstring": "Convert and optionally clip input raster data.\n\nInputs:\n-------\nraster\n singleband or multiband data input\nclip (optional)\n vector data used to clip output\n\nParameters\n----------\ntd_resampling : str (default: 'nearest')\n Resampling used when reading from TileDirectory.\ntd_matching_method : str ('gdal' or 'min') (default: 'gdal')\n gdal: Uses GDAL's standard method. Here, the target resolution is\n calculated by averaging the extent's pixel sizes over both x and y\n axes. This approach returns a zoom level which may not have the\n best quality but will speed up reading significantly.\n min: Returns the zoom level which matches the minimum resolution of the\n extents four corner pixels. This approach returns the zoom level\n with the best possible quality but with low performance. If the\n tile extent is outside of the destination pyramid, a\n TopologicalError will be raised.\ntd_matching_max_zoom : int (optional, default: None)\n If set, it will prevent reading from zoom levels above the maximum.\ntd_matching_precision : int (default: 8)\n Round resolutions to n digits before comparing.\ntd_fallback_to_higher_zoom : bool (default: False)\n In case no data is found at zoom level, try to read data from higher\n zoom levels. Enabling this setting can lead to many IO requests in\n areas with no data.\nclip_pixelbuffer : int\n Use pixelbuffer when clipping output by geometry. (default: 0)\n\nOutput\n------\nnp.ndarray", "id": "f12826:m0"} {"signature": "def execute(mp,resampling=\"\",scale_method=None,scales_minmax=None):", "body": "with mp.open(\"\", resampling=resampling) as raster_file:if raster_file.is_empty():return \"\"scaled = ()mask = ()raster_data = raster_file.read()if raster_data.ndim == :raster_data = ma.expand_dims(raster_data, axis=)if not scale_method:scales_minmax = [(i, i) for i in range(len(raster_data))]for band, (scale_min, scale_max) in zip(raster_data, scales_minmax):if scale_method in [\"\", \"\"]:scaled += (_stretch_array(band, scale_min, scale_max), )elif scale_method == \"\":scaled += (np.clip(band, scale_min, scale_max), )else:scaled += (band, )mask += (band.mask, )return ma.masked_array(np.stack(scaled), np.stack(mask))", "docstring": "Read, stretch and return raster data.\n\nInputs:\n-------\nraster\n raster file\n\nParameters:\n-----------\nresampling : str\n rasterio.Resampling method\nscale_method : str\n - dtype_scale: use dtype minimum and maximum values\n - minmax_scale: use dataset bands minimum and maximum values\n - crop: clip data to output dtype\nscales_minmax : tuple\n tuple of band specific scale values\n\nOutput:\n-------\nnp.ndarray", "id": "f12827:m1"} {"signature": "def execute(mp):", "body": "with mp.open(\"\", resampling=\"\") as raster_file:if raster_file.is_empty():return \"\"dem = raster_file.read()return dem", "docstring": "Example process for testing.\n\nInputs:\n-------\nfile1\n raster file\n\nParameters:\n-----------\n\nOutput:\n-------\nnp.ndarray", "id": "f12828:m0"} {"signature": "@click.command(help=\"\")@utils.arg_create_mapchete_file@utils.arg_process_file@utils.arg_out_format@utils.opt_out_path@utils.opt_pyramid_type@utils.opt_forcedef create(mapchete_file,process_file,out_format,out_path=None,pyramid_type=None,force=False):", "body": "if os.path.isfile(process_file) or os.path.isfile(mapchete_file):if not force:raise IOError(\"\")out_path = out_path if out_path else os.path.join(os.getcwd(), \"\")process_template = pkg_resources.resource_filename(\"\", \"\")process_file = os.path.join(os.getcwd(), process_file)copyfile(process_template, process_file)mapchete_template = pkg_resources.resource_filename(\"\", \"\")output_options = dict(format=out_format, path=out_path, **FORMAT_MANDATORY[out_format])pyramid_options = {'': pyramid_type}substitute_elements = {'': process_file,'': dump({'': output_options}, default_flow_style=False),'': dump({'': pyramid_options}, default_flow_style=False)}with open(mapchete_template, '') as config_template:config = Template(config_template.read())customized_config = config.substitute(substitute_elements)with open(mapchete_file, '') as target_config:target_config.write(customized_config)", "docstring": "Create an empty Mapchete and process file in a given directory.", "id": "f12832:m0"} {"signature": "@click.command(help=\"\")@utils.arg_mapchete_files@utils.opt_zoom@utils.opt_bounds@utils.opt_point@utils.opt_wkt_geometry@utils.opt_tile@utils.opt_overwrite@utils.opt_multi@utils.opt_input_file@utils.opt_logfile@utils.opt_verbose@utils.opt_no_pbar@utils.opt_debug@utils.opt_max_chunksize@utils.opt_vrt@utils.opt_idx_out_dirdef execute(mapchete_files,zoom=None,bounds=None,point=None,wkt_geometry=None,tile=None,overwrite=False,multi=None,input_file=None,logfile=None,verbose=False,no_pbar=False,debug=False,max_chunksize=None,vrt=False,idx_out_dir=None):", "body": "multi = multi if multi else cpu_count()mode = \"\" if overwrite else \"\"if debug or not verbose:verbose_dst = open(os.devnull, '')else:verbose_dst = sys.stdoutfor mapchete_file in mapchete_files:tqdm.tqdm.write(\"\" % mapchete_file, file=verbose_dst)with click_spinner.spinner(disable=debug) as spinner:if tile:tile = raw_conf_process_pyramid(raw_conf(mapchete_file)).tile(*tile)with mapchete.open(mapchete_file,mode=mode,bounds=tile.bounds,zoom=tile.zoom,single_input_file=input_file) as mp:spinner.stop()tqdm.tqdm.write(\"\", file=verbose_dst)for result in mp.batch_processor(tile=tile):utils.write_verbose_msg(result, dst=verbose_dst)tqdm.tqdm.write(\"\" % mapchete_file, file=verbose_dst)if vrt:tqdm.tqdm.write(\"\", file=verbose_dst)for tile in tqdm.tqdm(zoom_index_gen(mp=mp,zoom=tile.zoom,out_dir=(idx_out_dir if idx_out_dir else mp.config.output.path),vrt=vrt,),total=mp.count_tiles(tile.zoom, tile.zoom),unit=\"\",disable=debug or no_pbar):logger.debug(\"\", tile)tqdm.tqdm.write(\"\" % mapchete_file,file=verbose_dst)else:with mapchete.open(mapchete_file,mode=mode,zoom=zoom,bounds=bounds_from_opts(wkt_geometry=wkt_geometry,point=point,bounds=bounds,raw_conf=raw_conf(mapchete_file)),single_input_file=input_file) as mp:spinner.stop()tiles_count = mp.count_tiles(min(mp.config.init_zoom_levels),max(mp.config.init_zoom_levels))tqdm.tqdm.write(\"\" % (tiles_count, multi),file=verbose_dst)for process_info in tqdm.tqdm(mp.batch_processor(multi=multi, zoom=zoom, max_chunksize=max_chunksize),total=tiles_count,unit=\"\",disable=debug or no_pbar):utils.write_verbose_msg(process_info, dst=verbose_dst)tqdm.tqdm.write(\"\" % mapchete_file, file=verbose_dst)if vrt:tqdm.tqdm.write(\"\", file=verbose_dst)for tile in tqdm.tqdm(zoom_index_gen(mp=mp,zoom=mp.config.init_zoom_levels,out_dir=(idx_out_dir if idx_out_direlse mp.config.output.path),vrt=vrt),total=mp.count_tiles(min(mp.config.init_zoom_levels),max(mp.config.init_zoom_levels)),unit=\"\",disable=debug or no_pbar):logger.debug(\"\", tile)tqdm.tqdm.write(\"\" % mapchete_file,file=verbose_dst)", "docstring": "Execute a Mapchete process.", "id": "f12834:m0"} {"signature": "@click.command(help=\"\")@utils.arg_mapchete_file@utils.opt_port@utils.opt_internal_cache@utils.opt_zoom@utils.opt_bounds@utils.opt_overwrite@utils.opt_readonly@utils.opt_memory@utils.opt_input_file@utils.opt_debug@utils.opt_logfiledef serve(mapchete_file,port=None,internal_cache=None,zoom=None,bounds=None,overwrite=False,readonly=False,memory=False,input_file=None,debug=False,logfile=None):", "body": "app = create_app(mapchete_files=[mapchete_file], zoom=zoom,bounds=bounds, single_input_file=input_file,mode=_get_mode(memory, readonly, overwrite), debug=debug)if os.environ.get(\"\") == \"\":logger.debug(\"\")else:app.run(threaded=True, debug=True, port=port, host='',extra_files=[mapchete_file])", "docstring": "Serve a Mapchete process.\n\nCreates the Mapchete host and serves both web page with OpenLayers and the\nWMTS simple REST endpoint.", "id": "f12836:m0"} {"signature": "def create_app(mapchete_files=None, zoom=None, bounds=None, single_input_file=None,mode=\"\", debug=None):", "body": "from flask import Flask, render_template_stringapp = Flask(__name__)mapchete_processes = {os.path.splitext(os.path.basename(mapchete_file))[]: mapchete.open(mapchete_file, zoom=zoom, bounds=bounds,single_input_file=single_input_file, mode=mode, with_cache=True,debug=debug)for mapchete_file in mapchete_files}mp = next(iter(mapchete_processes.values()))pyramid_type = mp.config.process_pyramid.gridpyramid_srid = mp.config.process_pyramid.crs.to_epsg()process_bounds = \"\".join([str(i) for i in mp.config.bounds_at_zoom()])grid = \"\" if pyramid_srid == else \"\"web_pyramid = BufferedTilePyramid(pyramid_type)@app.route('', methods=[''])def index():\"\"\"\"\"\"return render_template_string(pkgutil.get_data('', '').decode(\"\"),srid=pyramid_srid,process_bounds=process_bounds,is_mercator=(pyramid_srid == ),process_names=mapchete_processes.keys())@app.route(\"\".join([\"\", \"\", \"\", \"\", \"\",grid, \"\", \"\", \"\"]),methods=[''])def get(mp_name, zoom, row, col, file_ext):\"\"\"\"\"\"logger.debug(\"\", zoom, row, col,mp_name)return _tile_response(mapchete_processes[mp_name], web_pyramid.tile(zoom, row, col),debug)return app", "docstring": "Configure and create Flask app.", "id": "f12836:m1"} {"signature": "@click.command(help=\"\")@utils.opt_input_formats@utils.opt_output_formats@utils.opt_debugdef formats(input_formats, output_formats, debug=False):", "body": "if input_formats == output_formats:show_inputs, show_outputs = True, Trueelse:show_inputs, show_outputs = input_formats, output_formatsif show_inputs:click.echo(\"\")for driver in available_input_formats():click.echo(\"\" % driver)if show_outputs:click.echo(\"\")for driver in available_output_formats():click.echo(\"\" % driver)", "docstring": "List input and/or output formats.", "id": "f12837:m0"} {"signature": "@click.command(help=\"\")@utils.arg_input_raster@utils.arg_out_dir@utils.opt_pyramid_type_mercator@utils.opt_output_format@utils.opt_resampling_method@utils.opt_scale_method@utils.opt_zoom@utils.opt_bounds@utils.opt_overwrite@utils.opt_debugdef pyramid(input_raster,output_dir,pyramid_type=None,output_format=None,resampling_method=None,scale_method=None,zoom=None,bounds=None,overwrite=False,debug=False):", "body": "bounds = bounds if bounds else Noneoptions = dict(pyramid_type=pyramid_type,scale_method=scale_method,output_format=output_format,resampling=resampling_method,zoom=zoom,bounds=bounds,overwrite=overwrite)raster2pyramid(input_raster, output_dir, options)", "docstring": "Create tile pyramid out of input raster.", "id": "f12838:m0"} {"signature": "def raster2pyramid(input_file, output_dir, options):", "body": "pyramid_type = options[\"\"]scale_method = options[\"\"]output_format = options[\"\"]resampling = options[\"\"]zoom = options[\"\"]bounds = options[\"\"]mode = \"\" if options[\"\"] else \"\"minzoom, maxzoom = _get_zoom(zoom, input_file, pyramid_type)with rasterio.open(input_file, \"\") as input_raster:output_bands = input_raster.countinput_dtype = input_raster.dtypes[]output_dtype = input_raster.dtypes[]nodataval = input_raster.nodatavals[]nodataval = nodataval if nodataval else if output_format == \"\" and output_bands > :output_bands = output_dtype = ''scales_minmax = ()if scale_method == \"\":for index in range(, output_bands+):scales_minmax += (DTYPE_RANGES[input_dtype], )elif scale_method == \"\":for index in range(, output_bands+):band = input_raster.read(index)scales_minmax += ((band.min(), band.max()), )elif scale_method == \"\":for index in range(, output_bands+):scales_minmax += ((, ), )if input_dtype == \"\":scale_method = Nonescales_minmax = ()for index in range(, output_bands+):scales_minmax += ((None, None), )config = dict(process=\"\",output={\"\": output_dir,\"\": output_format,\"\": output_bands,\"\": output_dtype},pyramid=dict(pixelbuffer=, grid=pyramid_type),scale_method=scale_method,scales_minmax=scales_minmax,input={\"\": input_file},config_dir=os.getcwd(),zoom_levels=dict(min=minzoom, max=maxzoom),nodataval=nodataval,resampling=resampling,bounds=bounds,baselevel={\"\": maxzoom, \"\": resampling},mode=mode)with mapchete.open(config, zoom=zoom, bounds=bounds) as mp:if not os.path.exists(output_dir):os.makedirs(output_dir)mp.batch_process(zoom=[minzoom, maxzoom])", "docstring": "Create a tile pyramid out of an input raster dataset.", "id": "f12838:m1"} {"signature": "def _get_zoom(zoom, input_raster, pyramid_type):", "body": "if not zoom:minzoom = maxzoom = get_best_zoom_level(input_raster, pyramid_type)elif len(zoom) == :minzoom = zoom[]maxzoom = zoom[]elif len(zoom) == :if zoom[] < zoom[]:minzoom = zoom[]maxzoom = zoom[]else:minzoom = zoom[]maxzoom = zoom[]return minzoom, maxzoom", "docstring": "Determine minimum and maximum zoomlevel.", "id": "f12838:m2"} {"signature": "def set_process_timezone(TZ):", "body": "try:prev_timezone = os.environ['']except KeyError:prev_timezone = Noneos.environ[''] = TZtime.tzset() return prev_timezone", "docstring": "Parameters\n----------\nTZ: string", "id": "f12841:m0"} {"signature": "def wgs84_distance(lat1, lon1, lat2, lon2):", "body": "dLat = math.radians(lat2 - lat1)dLon = math.radians(lon2 - lon1)a = (math.sin(dLat / ) * math.sin(dLat / ) +math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *math.sin(dLon / ) * math.sin(dLon / ))c = * math.atan2(math.sqrt(a), math.sqrt( - a))d = EARTH_RADIUS * creturn d", "docstring": "Distance (in meters) between two points in WGS84 coord system.", "id": "f12841:m1"} {"signature": "@contextlib.contextmanagerdef create_file(fname=None, fname_tmp=None, tmpdir=None,save_tmpfile=False, keepext=False):", "body": "if fname == '':yield fnamereturnif fname_tmp is None:basename = os.path.basename(fname)root, ext = os.path.splitext(basename)dir_ = this_dir = os.path.dirname(fname)if not keepext:root = root + extext = ''if tmpdir:if tmpdir is True:for dir__ in possible_tmpdirs:if os.access(dir__, os.F_OK):dir_ = dir__breaktmpfile = tempfile.NamedTemporaryFile(prefix='' + root + '', suffix=ext, dir=dir_, delete=False)fname_tmp = tmpfile.nametry:yield fname_tmpexcept Exception as e:if save_tmpfile:print(\"\" % fname_tmp)else:os.unlink(fname_tmp)raisetry:os.rename(fname_tmp, fname)os.chmod(fname, & ~current_umask)except OSError as e:tmpfile2 = tempfile.NamedTemporaryFile(prefix='' + root + '', suffix=ext, dir=this_dir, delete=False)shutil.copy(fname_tmp, tmpfile2.name)os.rename(tmpfile2.name, fname)os.chmod(fname, & ~current_umask)os.unlink(fname_tmp)", "docstring": "Context manager for making files with possibility of failure.\n\n If you are creating a file, it is possible that the code will fail\n and leave a corrupt intermediate file. This is especially damaging\n if this is used as automatic input to another process. This context\n manager helps by creating a temporary filename, your code runs and\n creates that temporary file, and then if no exceptions are raised,\n the context manager will move the temporary file to the original\n filename you intended to open.\n\n Parameters\n ----------\n fname : str\n Target filename, this file will be created if all goes well\n fname_tmp : str\n If given, this is used as the temporary filename.\n tmpdir : str or bool\n If given, put temporary files in this directory. If `True`,\n then find a good tmpdir that is not on local filesystem.\n save_tmpfile : bool\n If true, the temporary file is not deleteted if an exception\n is raised.\n keepext : bool, default False\n If true, have tmpfile have same extension as final file.\n\n Returns (as context manager value)\n ----------------------------------\n fname_tmp: str\n Temporary filename to be used. Same as `fname_tmp`\n if given as an argument.\n\n Raises\n ------\n Re-raises any except occuring during the context block.", "id": "f12841:m4"} {"signature": "def execute(cur, *args):", "body": "stmt = args[]if len(args) > :stmt = stmt.replace('', '').replace('', '')print(stmt % (args[]))return cur.execute(*args)", "docstring": "Utility function to print sqlite queries before executing.\n\n Use instead of cur.execute(). First argument is cursor.\n\n cur.execute(stmt)\n becomes\n util.execute(cur, stmt)", "id": "f12841:m5"} {"signature": "def str_time_to_day_seconds(time):", "body": "t = str(time).split('')seconds = int(t[]) * + int(t[]) * + int(t[])return seconds", "docstring": "Converts time strings to integer seconds\n:param time: %H:%M:%S string\n:return: integer seconds", "id": "f12841:m8"} {"signature": "def makedirs(path):", "body": "if not os.path.isdir(path):os.makedirs(path)return path", "docstring": "Create directories if they do not exist, otherwise do nothing.\n\nReturn path for convenience", "id": "f12841:m10"} {"signature": "def timeit(method):", "body": "def timed(*args, **kw):time_start = time.time()result = method(*args, **kw)time_end = time.time()print('' % (method.__name__, time_end - time_start))return resultreturn timed", "docstring": "A Python decorator for printing out the execution time for a function.\n\nAdapted from:\nwww.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods", "id": "f12841:m11"} {"signature": "def source_csv_to_pandas(path, table, read_csv_args=None):", "body": "if '' not in table:table += ''if isinstance(path, dict):data_obj = path[table]f = data_obj.split(\"\")else:if os.path.isdir(path):f = open(os.path.join(path, table))else:z = zipfile.ZipFile(path)for path in z.namelist():if table in path:table = pathbreaktry:f = zip_open(z, table)except KeyError as e:return pd.DataFrame()if read_csv_args:df = pd.read_csv(**read_csv_args)else:df = pd.read_csv(f)return df", "docstring": "Parameters\n----------\npath: str\n path to directory or zipfile\ntable: str\n name of table\nread_csv_args:\n string arguments passed to the read_csv function\n\nReturns\n-------\ndf: pandas:DataFrame", "id": "f12841:m13"} {"signature": "def draw_net_using_node_coords(net):", "body": "import matplotlib.pyplot as pltfig = plt.figure()node_coords = {}for node, data in net.nodes(data=True):node_coords[node] = (data[''], data[''])ax = fig.add_subplot()networkx.draw(net, pos=node_coords, ax=ax, node_size=)return fig", "docstring": "Plot a networkx.Graph by using the lat and lon attributes of nodes.\nParameters\n----------\nnet : networkx.Graph\nReturns\n-------\nfig : matplotlib.figure\n the figure object where the network is plotted", "id": "f12841:m16"} {"signature": "def difference_of_pandas_dfs(df_self, df_other, col_names=None):", "body": "df = pd.concat([df_self, df_other])df = df.reset_index(drop=True)df_gpby = df.groupby(col_names)idx = [x[] for x in list(df_gpby.groups.values()) if len(x) == ]df_sym_diff = df.reindex(idx)df_diff = pd.concat([df_other, df_sym_diff])df_diff = df_diff.reset_index(drop=True)df_gpby = df_diff.groupby(col_names)idx = [x[] for x in list(df_gpby.groups.values()) if len(x) == ]df_diff = df_diff.reindex(idx)return df_diff", "docstring": "Returns a dataframe with all of df_other that are not in df_self, when considering the columns specified in col_names\n:param df_self: pandas Dataframe\n:param df_other: pandas Dataframe\n:param col_names: list of column names\n:return:", "id": "f12841:m18"} {"signature": "def get_segments(self):", "body": "cur = self._gtfs.get_cursor()cur.execute('''''')", "docstring": "Get segment\n\nReturns\n-------\nsegments: list[Segment]", "id": "f12842:c0:m1"} {"signature": "def calculate_trip_shape_breakpoints(conn):", "body": "from gtfspy import shapescur = conn.cursor()breakpoints_cache = {}count_bad_shape_ordering = count_bad_shape_fit = count_no_shape_fit = trip_Is = [x[] for x incur.execute('').fetchall()]for trip_I in trip_Is:row = cur.execute('''''', (trip_I,)).fetchone()if row is None:continueshape_id = row[]if shape_id is None or shape_id == '':continuecur.execute('''''',(trip_I,))stop_points = [dict(seq=row[],lat=row[],lon=row[],stop_I=row[])for row in cur if row[] and row[]]cache_key = (shape_id, tuple(x[''] for x in stop_points))if cache_key in breakpoints_cache:breakpoints = breakpoints_cache[cache_key]else:shape_points = shapes.get_shape_points(cur, shape_id)breakpoints, badness= shapes.find_segments(stop_points, shape_points)if breakpoints != sorted(breakpoints):count_bad_shape_ordering += breakpoints_cache[cache_key] = Nonecontinue breakpoints_cache[cache_key] = breakpointsif badness > * len(breakpoints):count_bad_shape_fit += if breakpoints is None:continueif len(breakpoints) == :count_no_shape_fit += continueassert len(breakpoints) == len(stop_points)cur.executemany('''',((int(bkpt), int(trip_I), int(stpt['']))for bkpt, stpt in zip(breakpoints, stop_points)))if count_bad_shape_fit > :print(\"\" % count_bad_shape_fit)if count_bad_shape_ordering > :print(\"\" % count_bad_shape_ordering)if count_no_shape_fit > :print(\"\" % count_no_shape_fit)conn.commit()", "docstring": "Pre-compute the shape points corresponding to each trip's stop.\n\n Depends: shapes", "id": "f12850:m1"} {"signature": "def __init__(self, gtfssource=None, print_progress=True):", "body": "if isinstance(gtfssource, string_types + (dict,)):_gtfs_sources = [gtfssource]else:assert isinstance(gtfssource, list)_gtfs_sources = gtfssourceself.print_progress = print_progressself.gtfs_sources = []for source in _gtfs_sources:if isinstance(source, dict):self.gtfs_sources.append(source)elif isinstance(source, string_types):if os.path.isdir(source):self.gtfs_sources.append(source)else:z = zipfile.ZipFile(source, mode='')zip_commonprefix = os.path.commonprefix(z.namelist())zip_source_datum = {\"\": source,\"\": zip_commonprefix}self.gtfs_sources.append(zip_source_datum)", "docstring": "Parameters\n----------\ngtfssource: str, dict, list\n str: path to GTFS directory or zipfile\n dict:\n dictionary of files to use to as the GTFS files. This\n is mainly useful for testing, not for any normal use of\n GTFS. For example, to provide an agency.txt file,\n do this:\n d = {'agency.txt':\n 'agency_id, agency_name, agency_timezone,agency_url\\n' \\\n '555,CompNet,Europe/Lala,http://x'\n }\n Of course you probably wouldn't want all the string data\n inline like that. You can provide the data as a string or\n a file-like object (with a .read() attribute and line\n iteration).\n list: a list of the above elements to import (i.e. \"merge\") multiple GTFS feeds to the same database\n\nprint_progress: boolean\n whether to print progress of the", "id": "f12857:c0:m0"} {"signature": "def exists(self):", "body": "return any(self.exists_by_source())", "docstring": "Does this GTFS contain this file? (file specified by the class)", "id": "f12857:c0:m1"} {"signature": "def exists_by_source(self):", "body": "exists_list = []for source in self.gtfs_sources:if isinstance(source, dict):if self.fname in source:if source[self.fname]:exists_list.append(True)continueif \"\" in source:try:Z = zipfile.ZipFile(source[''], mode='')Z.getinfo(os.path.join(source[''], self.fname))exists_list.append(True)continueexcept KeyError:print(self.fname, '', source)exists_list.append(False)continueelif isinstance(source, string_types):if os.path.exists(os.path.join(source, self.fname)):exists_list.append(True)continueexists_list.append(False)return exists_list", "docstring": "Does this GTFS contain this file? (file specified by the class)", "id": "f12857:c0:m2"} {"signature": "def gen_rows0(self):", "body": "return self.gen_rows(*(self._get_csv_reader_generators()))", "docstring": "Iterate through all rows in all files file.\n\n The file is specified by the class - there is one class per\n table. This opens the file, does basic sanitaition, and\n iterates over all rows as dictionaries, converted by\n csv.DictReader. This function opens files from both .zip and\n raw directories. The actual logic of converting all data to\n Python is done in the .gen_rows() method which must be\n defined in each subclass.", "id": "f12857:c0:m4"} {"signature": "def create_table(self, conn):", "body": "cur = conn.cursor()if self.tabledef is None:returnif not self.tabledef.startswith(''):cur.execute(''% (self.table, self.tabledef))else:cur.execute(self.tabledef)conn.commit()", "docstring": "Make table definitions", "id": "f12857:c0:m7"} {"signature": "def insert_data(self, conn):", "body": "cur = conn.cursor()csv_reader_generators, prefixes = self._get_csv_reader_generators()for csv_reader, prefix in zip(csv_reader_generators, prefixes):try:row = next(iter(self.gen_rows([csv_reader], [prefix])))fields = row.keys()except StopIteration:print(\"\" % (self.fname, self.table, prefix))continuestmt = '''''' % (self.table,(''.join([x for x in fields if x[] != ''] + self.extra_keys)),(''.join([\"\" + x for x in fields if x[] != ''] + self.extra_values)))if self.print_progress:print('' % (self.fname, self.table, prefix))from itertools import chainrows = chain([row], self.gen_rows([csv_reader], [prefix]))cur.executemany(stmt, rows)conn.commit()", "docstring": "Load data from GTFS file into database", "id": "f12857:c0:m8"} {"signature": "def import_(self, conn):", "body": "if self.print_progress:print('', self.__class__.__name__)self._conn = connself.create_table(conn)if self.mode in ('', '') and self.fname and self.exists() and self.table not in ignore_tables:self.insert_data(conn)if self.mode in ('', '') and hasattr(self, ''):self.create_index(conn)if self.mode in ('', '') and hasattr(self, ''):self.run_post_import(conn)conn.commit()", "docstring": "Do the actual import. Copy data and store in connection object.\n\n This function:\n - Creates the tables\n - Imports data (using self.gen_rows)\n - Run any post_import hooks.\n - Creates any indexs\n - Does *not* run self.make_views - those must be done\n after all tables are loaded.", "id": "f12857:c0:m11"} {"signature": "@classmethoddef make_views(cls, conn):", "body": "pass", "docstring": "The make views should be run after all tables imported.", "id": "f12857:c0:m12"} {"signature": "@classmethoddef copy(cls, conn, **where):", "body": "cur = conn.cursor()if where and cls.copy_where:copy_where = cls.copy_where.format(**where)else:copy_where = ''cur.execute('''' % (cls.table, cls.table, copy_where))", "docstring": "Copy data from one table to another while filtering data at the same time\n\n Parameters\n ----------\n conn: sqlite3 DB connection. It must have a second database\n attached as \"other\".\n **where : keyword arguments\n specifying (start_ut and end_ut for filtering, see the copy_where clause in the subclasses)", "id": "f12857:c0:m13"} {"signature": "@classmethoddef make_views(cls, conn):", "body": "conn.execute('')conn.execute('''''')conn.commit()conn.execute('')conn.execute('''''''''''''')conn.commit()", "docstring": "Create day_trips and day_stop_times views.\n\n day_trips: day_trips2 x trips = days x trips\n day_stop_times: day_trips2 x trips x stop_times = days x trips x stop_times", "id": "f12859:c0:m2"} {"signature": "def createcolorbar(cmap, norm):", "body": "cax, kw = matplotlib.colorbar.make_axes(matplotlib.pyplot.gca())c = matplotlib.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)return c", "docstring": "Create a colourbar with limits of lwr and upr", "id": "f12860:m3"} {"signature": "def print_coords(rows, prefix=''):", "body": "lat = [row[''] for row in rows]lon = [row[''] for row in rows]print(''+'' * )print(\"\" % (prefix, prefix, lat, lon))print(''*)", "docstring": "Print coordinates within a sequence.\n\n This is only used for debugging. Printed in a form that can be\n pasted into Python for visualization.", "id": "f12861:m0"} {"signature": "def find_segments(stops, shape):", "body": "if not shape:return [], break_points = []last_i = cumul_d = badness = d_last_stop = float('')lstlat, lstlon = None, Nonebreak_shape_points = []for stop in stops:stlat, stlon = stop[''], stop['']best_d = float('')if badness > and badness > * len(break_points):return [], badnessfor i in range(last_i, len(shape)):d = wgs84_distance(stlat, stlon, shape[i][''], shape[i][''])if lstlat:d_last_stop = wgs84_distance(lstlat, lstlon, shape[i][''], shape[i][''])if d < best_d:best_d = dbest_i = icumul_d += dif (d_last_stop < d) or (d > ) or (i < best_i + ):continueelse:badness += best_dbreak_points.append(best_i)last_i = best_ilstlat, lstlon = stlat, stlonbreak_shape_points.append(shape[best_i])breakelse:badness += best_dbreak_points.append(best_i)last_i = best_ilstlat, lstlon = stlat, stlonbreak_shape_points.append(shape[best_i])passreturn break_points, badness", "docstring": "Find corresponding shape points for a list of stops and create shape break points.\n\n Parameters\n ----------\n stops: stop-sequence (list)\n List of stop points\n shape: list of shape points\n shape-sequence of shape points\n\n Returns\n -------\n break_points: list[int]\n stops[i] corresponds to shape[break_points[i]]. This list can\n be used to partition the shape points into segments between\n one stop and the next.\n badness: float\n Lower indicates better fit to the shape. This is the sum of\n distances (in meters) between every each stop and its closest\n shape point. This is not needed in normal use, but in the\n cases where you must determine the best-fitting shape for a\n stop-sequence, use this.", "id": "f12861:m1"} {"signature": "def find_best_segments(cur, stops, shape_ids, route_id=None,breakpoints_cache=None):", "body": "cache_key = Noneif breakpoints_cache is not None:cache_key = (route_id, tuple(x[''] for x in stops))if cache_key in breakpoints_cache:print('')return breakpoints_cache[cache_key]if route_id is not None:cur.execute('''''',(route_id,))data = cur.fetchall()if not data:print(\"\" % route_id)return [], None, None, Noneshape_ids = zip(*data)[]results = []for shape_id in shape_ids:shape = get_shape_points(cur, shape_id)breakpoints, badness = find_segments(stops, shape)results.append([badness, breakpoints, shape, shape_id])if len(stops) > and badness < *(len(stops)):breakbest = np.argmin(zip(*results)[])badness = results[best][]breakpoints = results[best][]shape = results[best][]shape_id = results[best][]if breakpoints_cache is not None:print(\"\", cache_key[], hash(cache_key[:]))breakpoints_cache[cache_key] = breakpoints, badness, shape, shape_idreturn breakpoints, badness, shape, shape_id", "docstring": "Finds the best shape_id for a stop-sequence.\n\n This is used in cases like when you have GPS data with a route\n name, but you don't know the route direction. It tries shapes\n going both directions and returns the shape that best matches.\n Could be used in other cases as well.\n\n Parameters\n ----------\n cur : sqlite3.Cursor\n database cursor\n stops : list\n shape_ids : list of shape_id:s\n route_id : route_id to search for stops\n breakpoints_cache : dict\n If given, use this to cache results from this function.", "id": "f12861:m2"} {"signature": "def return_segments(shape, break_points):", "body": "segs = []bp = bp2 = for i in range(len(break_points)-):bp = break_points[i] if break_points[i] is not None else bp2bp2 = break_points[i+] if break_points[i+] is not None else bpsegs.append(shape[bp:bp2+])segs.append([])return segs", "docstring": "Break a shape into segments between stops using break_points.\n\n This function can use the `break_points` outputs from\n `find_segments`, and cuts the shape-sequence into pieces\n corresponding to each stop.", "id": "f12861:m3"} {"signature": "def gen_cumulative_distances(stops):", "body": "stops[][''] = for i in range(, len(stops)):stops[i][''] = stops[i-][''] + wgs84_distance(stops[i-][''], stops[i-][''],stops[i][''], stops[i][''],)for stop in stops:stop[''] = int(stop[''])", "docstring": "Add a 'd' key for distances to a stop/shape-sequence.\n\nThis takes a shape-sequence or stop-sequence, and adds an extra\n'd' key that is cumulative, geographic distances between each\npoint. This uses `wgs84_distance` from the util module. The\ndistances are in meters. Distances are rounded to the nearest\ninteger, because otherwise JSON size increases greatly.\n\nParameters\n----------\nstops: list\n elements are dicts with 'lat' and 'lon' keys\n and the function adds the 'd' key ('d' stands for distance)\n to the dictionaries", "id": "f12861:m4"} {"signature": "def get_shape_points(cur, shape_id):", "body": "cur.execute('''''', (shape_id,))shape_points = [dict(seq=row[], lat=row[], lon=row[], d=row[])for row in cur]return shape_points", "docstring": "Given a shape_id, return its shape-sequence.\n\nParameters\n----------\ncur: sqlite3.Cursor\n cursor to a GTFS database\nshape_id: str\n id of the route\n\nReturns\n-------\nshape_points: list\n elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape", "id": "f12861:m5"} {"signature": "def get_shape_points2(cur, shape_id):", "body": "cur.execute('''''', (shape_id,))shape_points = {'': [], '': [], '': [], '': []}for row in cur:shape_points[''].append(row[])shape_points[''].append(row[])shape_points[''].append(row[])shape_points[''].append(row[])return shape_points", "docstring": "Given a shape_id, return its shape-sequence (as a dict of lists).\nget_shape_points function returns them as a list of dicts\n\nParameters\n----------\ncur: sqlite3.Cursor\n cursor to a GTFS database\nshape_id: str\n id of the route\n\nReturns\n-------\nshape_points: dict of lists\n dict contains keys 'seq', 'lat', 'lon', and 'd'(istance) of the shape", "id": "f12861:m6"} {"signature": "def get_route_shape_segments(cur, route_id):", "body": "cur.execute('''''', (route_id,))shape_points = [dict(seq=row[], lat=row[], lon=row[]) for row in cur]return shape_points", "docstring": "Given a route_id, return its stop-sequence.\n\nParameters\n----------\ncur: sqlite3.Cursor\n cursor to a GTFS database\nroute_id: str\n id of the route\n\nReturns\n-------\nshape_points: list\n elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape", "id": "f12861:m7"} {"signature": "def get_shape_between_stops(cur, trip_I, seq_stop1=None, seq_stop2=None, shape_breaks=None):", "body": "assert (seq_stop1 and seq_stop2) or shape_breaksif not shape_breaks:shape_breaks = []for seq_stop in [seq_stop1, seq_stop2]:query = \"\"\"\"\"\" % (trip_I, seq_stop)for row in cur.execute(query):shape_breaks.append(row[])assert len(shape_breaks) == query = \"\"\"\"\"\" % (trip_I, shape_breaks[], shape_breaks[])shapedict = {'': [], '': [], '': []}for row in cur.execute(query):shapedict[''].append(row[])shapedict[''].append(row[])shapedict[''].append(row[])return shapedict", "docstring": "Given a trip_I (shortened id), return shape points between two stops\n(seq_stop1 and seq_stop2).\n\nTrip_I is used for matching obtaining the full shape of one trip (route).\nFrom the resulting shape we then obtain only shape points between\nstop_seq1 and stop_seq2\ntrip_I---(trips)--->shape_id\ntrip_I, seq_stop1----(stop_times)---> shape_break1\ntrip_I, seq_stop2----(stop_times)---> shape_break2\nshapes_id+shape_break1+shape_break2 --(shapes)--> result\n\nParameters\n----------\ncur : sqlite3.Cursor\n cursor to sqlite3 DB containing GTFS\ntrip_I : int\n transformed trip_id (i.e. a new column that is created when\n GTFS is imported to a DB)\nseq_stop1: int\n a positive inger describing the index of the point of the shape that\n corresponds to the first stop\nseq_stop2: int\n a positive inger describing the index of the point of the shape that\n corresponds to the second stop\nshape_breaks: ??\n\nReturns\n-------\nshapedict: dict\n Dictionary containing the latitudes and longitudes:\n lats=shapedict['lat']\n lons=shapedict['lon']", "id": "f12861:m8"} {"signature": "def get_trip_points(cur, route_id, offset=, tripid_glob=''):", "body": "extra_where = ''if tripid_glob:extra_where = \"\" % tripid_globcur.execute('''''''''''''' % extra_where, (route_id, offset))stop_points = [dict(seq=row[], lat=row[], lon=row[]) for row in cur]return stop_points", "docstring": "Get all scheduled stops on a particular route_id.\n\n Given a route_id, return the trip-stop-list with\n latitude/longitudes. This is a bit more tricky than it seems,\n because we have to go from table route->trips->stop_times. This\n functions finds an arbitrary trip (in trip table) with this route ID\n and, and then returns all stop points for that trip.\n\n Parameters\n ----------\n cur : sqlite3.Cursor\n cursor to sqlite3 DB containing GTFS\n route_id : string or any\n route_id to get stop points of\n offset : int\n LIMIT offset if you don't want the first trip returned.\n tripid_glob : string\n If given, allows you to limit tripids which can be selected.\n Mainly useful in debugging.\n\n Returns\n -------\n stop-list\n List of stops in stop-seq format.", "id": "f12861:m9"} {"signature": "def interpolate_shape_times(shape_distances, shape_breaks, stop_times):", "body": "shape_times = np.zeros(len(shape_distances))shape_times[:shape_breaks[]] = stop_times[]for i in range(len(shape_breaks)-):cur_break = shape_breaks[i]cur_time = stop_times[i]next_break = shape_breaks[i+]next_time = stop_times[i+]if cur_break == next_break:shape_times[cur_break] = stop_times[i]else:cur_distances = shape_distances[cur_break:next_break+]norm_distances = ((np.array(cur_distances)-float(cur_distances[])) /float(cur_distances[-] - cur_distances[]))times = (-norm_distances)*cur_time+norm_distances*next_timeshape_times[cur_break:next_break] = times[:-]shape_times[shape_breaks[-]:] = stop_times[-]return list(shape_times)", "docstring": "Interpolate passage times for shape points.\n\nParameters\n----------\nshape_distances: list\n list of cumulative distances along the shape\nshape_breaks: list\n list of shape_breaks\nstop_times: list\n list of stop_times\n\nReturns\n-------\nshape_times: list of ints (seconds) / numpy array\n interpolated shape passage times\n\nThe values of stop times before the first shape-break are given the first\nstopping time, and the any shape points after the last break point are\ngiven the value of the last shape point.", "id": "f12861:m10"} {"signature": "def get_convex_hull_coordinates(gtfs):", "body": "lons, lats = _get_stop_lat_lons(gtfs)lon_lats = list(zip(lons, lats))polygon = MultiPoint(lon_lats).convex_hullhull_lons, hull_lats= polygon.exterior.coords.xyreturn hull_lats, hull_lons", "docstring": "Parameters\n----------\ngtfs: gtfs.GTFS\n\nReturns\n-------\nlons: list\n of floats\nlats: list\n of floats", "id": "f12863:m0"} {"signature": "def get_buffered_area_of_stops(gtfs, buffer_meters, resolution):", "body": "lons, lats = _get_stop_lat_lons(gtfs)a = compute_buffered_area_of_stops(lats, lons, buffer_meters, resolution)return a", "docstring": "Compute the total area of all buffered stops in PT network.\n\nParameters\n----------\ngtfs: gtfs.GTFS\nbuffer_meters: meters around the stop to buffer.\nresolution: increases the accuracy of the calculated area with computation time. Default = 16 \n\nReturns\n-------\nTotal area covered by the buffered stops in square meters.", "id": "f12863:m5"} {"signature": "def __init__(self, gtfs, start_time_ut, lat, lon, max_duration_ut, min_transfer_time=,shapes=True, walk_speed=):", "body": "self.gtfs = gtfsself.start_time_ut = start_time_utself.lat = latself.lon = lonself.max_duration_ut = max_duration_utself.min_transfer_time = min_transfer_timeself.shapes = shapesself.event_heap = Noneself.walk_speed = walk_speedself._uninfected_stops = Noneself._stop_I_to_spreading_stop = Noneself._initialized = Falseself._has_run = False", "docstring": "Parameters\n----------\ngtfs: GTFS\n the underlying GTFS (database) connection for getting data\nstart_time_ut: number\n Start time of the spreading.\nlat: float\n latitude of the spreading seed location\nlon: float\n longitude of the spreading seed location\nmax_duration_ut: int\n maximum duration of the spreading process (in seconds)\nmin_transfer_time : int\n minimum transfer time in seconds\nshapes : bool\n whether to include shapes", "id": "f12864:c0:m0"} {"signature": "def _run(self):", "body": "if self._has_run:raise RuntimeError(\"\"\"\")i = while self.event_heap.size() > and len(self._uninfected_stops) > :event = self.event_heap.pop_next_event()this_stop = self._stop_I_to_spreading_stop[event.from_stop_I]if event.arr_time_ut > self.start_time_ut + self.max_duration_ut:breakif this_stop.can_infect(event):target_stop = self._stop_I_to_spreading_stop[event.to_stop_I]already_visited = target_stop.has_been_visited()target_stop.visit(event)if not already_visited:self._uninfected_stops.remove(event.to_stop_I)print(i, self.event_heap.size())transfer_distances = self.gtfs.get_straight_line_transfer_distances(event.to_stop_I)self.event_heap.add_walk_events_to_heap(transfer_distances, event, self.start_time_ut,self.walk_speed, self._uninfected_stops,self.max_duration_ut)i += self._has_run = True", "docstring": "Run the actual simulation.", "id": "f12864:c0:m3"} {"signature": "def get_min_visit_time(self):", "body": "if not self.visit_events:return float('')else:return min(self.visit_events, key=lambda event: event.arr_time_ut).arr_time_ut", "docstring": "Get the earliest visit time of the stop.", "id": "f12865:c0:m1"} {"signature": "def visit(self, event):", "body": "to_visit = Falseif event.arr_time_ut <= self.min_transfer_time+self.get_min_visit_time():to_visit = Trueelse:for ve in self.visit_events:if (event.trip_I == ve.trip_I) and event.arr_time_ut < ve.arr_time_ut:to_visit = Trueif to_visit:self.visit_events.append(event)min_time = self.get_min_visit_time()self.visit_events = [v for v in self.visit_events if v.arr_time_ut <= min_time+self.min_transfer_time]return to_visit", "docstring": "Visit the stop if it has not been visited already by an event with\nearlier arr_time_ut (or with other trip that does not require a transfer)\n\nParameters\n----------\nevent : Event\n an instance of the Event (namedtuple)\n\nReturns\n-------\nvisited : bool\n if visit is stored, returns True, otherwise False", "id": "f12865:c0:m3"} {"signature": "def can_infect(self, event):", "body": "if event.from_stop_I != self.stop_I:return Falseif not self.has_been_visited():return Falseelse:time_sep = event.dep_time_ut-self.get_min_visit_time()if (time_sep >= self.min_transfer_time) or (event.trip_I == - and time_sep >= ):return Trueelse:for visit in self.visit_events:if (event.trip_I == visit.trip_I) and (time_sep >= ):return Truereturn False", "docstring": "Whether the spreading stop can infect using this event.", "id": "f12865:c0:m5"} {"signature": "def __init__(self, pd_df=None):", "body": "self.heap = []keys = ['', '', '', '', '']n = len(pd_df)key_to_j = {}for j, key in enumerate(pd_df.columns.values):key_to_j[key] = jpd_df_values = pd_df.valuesfor i in range(n):vals = []for key in keys:j = key_to_j[key]vals.append(pd_df_values[i, j])e = Event(*vals)self.add_event(e)", "docstring": "Parameters\n----------\npd_df : Pandas.Dataframe\n Initial list of", "id": "f12866:c0:m0"} {"signature": "def add_event(self, event):", "body": "assert event.dep_time_ut <= event.arr_time_utheappush(self.heap, event)", "docstring": "Add an event to the heap/priority queue\n\nParameters\n----------\nevent : Event", "id": "f12866:c0:m1"} {"signature": "def size(self):", "body": "return len(self.heap)", "docstring": "Return the size of the heap", "id": "f12866:c0:m3"} {"signature": "def add_walk_events_to_heap(self, transfer_distances, e, start_time_ut, walk_speed, uninfected_stops, max_duration_ut):", "body": "n = len(transfer_distances)dists_values = transfer_distances.valuesto_stop_I_index = np.nonzero(transfer_distances.columns == '')[][]d_index = np.nonzero(transfer_distances.columns == '')[][]for i in range(n):transfer_to_stop_I = dists_values[i, to_stop_I_index]if transfer_to_stop_I in uninfected_stops:d = dists_values[i, d_index]transfer_arr_time = e.arr_time_ut + int(d/float(walk_speed))if transfer_arr_time > start_time_ut+max_duration_ut:continuete = Event(transfer_arr_time, e.arr_time_ut, e.to_stop_I, transfer_to_stop_I, WALK)self.add_event(te)", "docstring": "Parameters\n----------\ntransfer_distances:\ne : Event\nstart_time_ut : int\nwalk_speed : float\nuninfected_stops : list\nmax_duration_ut : int", "id": "f12866:c0:m4"} {"signature": "def timeit(method):", "body": "def timed(*args, **kw):time_start = time.time()result = method(*args, **kw)time_end = time.time()print('' % (method.__name__, time_end-time_start, str(args)[:], kw))return resultreturn timed", "docstring": "A Python decorator for printing out the execution time for a function.\n\nAdapted from:\nwww.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods", "id": "f12868:m0"} {"signature": "def get_run_time(self):", "body": "assert self._has_runreturn self._run_time", "docstring": "Returns\n-------\nrun_time: float\n running time of the algorithm in seconds", "id": "f12869:c0:m3"} {"signature": "def update_pareto_optimal_tuples(self, new_pareto_tuple):", "body": "if new_pareto_tuple.duration() > self._walk_to_target_duration:direct_walk_label = self._label_class.direct_walk_label(new_pareto_tuple.departure_time,self._walk_to_target_duration)if not direct_walk_label.dominates(new_pareto_tuple):raisedirect_walk_label = self._label_class.direct_walk_label(new_pareto_tuple.departure_time, self._walk_to_target_duration)if direct_walk_label.dominates(new_pareto_tuple):return Falseif self._new_paretotuple_is_dominated_by_old_tuples(new_pareto_tuple):return Falseelse:self._remove_old_tuples_dominated_by_new_and_insert_new_paretotuple(new_pareto_tuple)return True", "docstring": "# this function should be optimized\n\nParameters\n----------\nnew_pareto_tuple: LabelTimeSimple\n\nReturns\n-------\nadded: bool\n whether new_pareto_tuple was added to the set of pareto-optimal tuples", "id": "f12870:c0:m2"} {"signature": "def evaluate_earliest_arrival_time_at_target(self, dep_time, transfer_margin):", "body": "minimum = dep_time + self._walk_to_target_durationdep_time_plus_transfer_margin = dep_time + transfer_marginfor label in self._labels:if label.departure_time >= dep_time_plus_transfer_margin and label.arrival_time_target < minimum:minimum = label.arrival_time_targetreturn float(minimum)", "docstring": "Get the earliest arrival time at the target, given a departure time.\n\nParameters\n----------\ndep_time : float, int\n time in unix seconds\ntransfer_margin: float, int\n transfer margin in seconds\n\nReturns\n-------\narrival_time : float\n Arrival time in the given time unit (seconds after unix epoch).", "id": "f12870:c0:m5"} {"signature": "def _truncate_colormap(cmap, minval=, maxval=, n=):", "body": "new_cmap = LinearSegmentedColormap.from_list(''.format(n=cmap.name, a=minval, b=maxval),cmap(numpy.linspace(minval, maxval, n)))return new_cmap", "docstring": "Truncates a colormap to use.\nCode originall from http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib", "id": "f12871:m2"} {"signature": "def __init__(self, labels, walk_to_target_duration, start_time_dep, end_time_dep):", "body": "self._node_profile_final_labels = labelsself.start_time_dep = start_time_depself.end_time_dep = end_time_depself.all_labels = [label for label in self._node_profile_final_labels if(start_time_dep <= label.departure_time <= end_time_dep)]after_label_candidates = [label for label in self._node_profile_final_labels if(label.departure_time > self.end_time_dep)]after_label_candidates.sort(key=lambda el: (el.arrival_time_target, el.n_boardings))min_n_boardings_observed = float('')after_labels = []for candidate_after_label in after_label_candidates:if candidate_after_label.n_boardings < min_n_boardings_observed:after_labels.append(candidate_after_label)min_n_boardings_observed = candidate_after_label.n_boardingsself.all_labels.extend(after_labels)if len(after_labels) is :self._labels_within_time_frame = self.all_labelselse:self._labels_within_time_frame = self.all_labels[:-len(after_labels)]self._walk_to_target_duration = walk_to_target_durationself._n_boardings_to_simple_time_analyzers = {}self._transfers_on_fastest_paths_analyzer = self._get_transfers_on_fastest_path_analyzer()", "docstring": "Initialize the data structures required by\n\nParameters\n----------\nnode_profile: NodeProfileMultiObjective", "id": "f12871:c0:m1"} {"signature": "def get_time_profile_analyzer(self, max_n_boardings=None):", "body": "if max_n_boardings is None:max_n_boardings = self.max_trip_n_boardings()if not max_n_boardings in self._n_boardings_to_simple_time_analyzers:if max_n_boardings == :valids = []else:candidate_labels = [LabelTimeSimple(label.departure_time, label.arrival_time_target)for label in self._node_profile_final_labels if((self.start_time_dep <= label.departure_time)and label.n_boardings <= max_n_boardings)]valids = compute_pareto_front(candidate_labels)valids.sort(key=lambda label: -label.departure_time)profile = NodeProfileSimple(self._walk_to_target_duration)for valid in valids:profile.update_pareto_optimal_tuples(valid)npat = NodeProfileAnalyzerTime.from_profile(profile, self.start_time_dep, self.end_time_dep)self._n_boardings_to_simple_time_analyzers[max_n_boardings] = npatreturn self._n_boardings_to_simple_time_analyzers[max_n_boardings]", "docstring": "Parameters\n----------\nmax_n_boardings: int\n The maximum number of boardings allowed for the labels used to construct the \"temporal distance profile\"\n\nReturns\n-------\nanalyzer: NodeProfileAnalyzerTime", "id": "f12871:c0:m10"} {"signature": "def median_temporal_distances(self, min_n_boardings=None, max_n_boardings=None):", "body": "if min_n_boardings is None:min_n_boardings = if max_n_boardings is None:max_n_boardings = self.max_trip_n_boardings()if max_n_boardings is None:max_n_boardings = median_temporal_distances = [float('') for _ in range(min_n_boardings, max_n_boardings + )]for n_boardings in range(min_n_boardings, max_n_boardings + ):simple_analyzer = self.get_time_profile_analyzer(n_boardings)median_temporal_distances[n_boardings] = simple_analyzer.median_temporal_distance()return median_temporal_distances", "docstring": "Returns\n-------\nmean_temporal_distances: list\n list indices encode the number of vehicle legs each element\n in the list tells gets the mean temporal distance", "id": "f12871:c0:m26"} {"signature": "def n_pareto_optimal_trips(self):", "body": "return float(len(self._labels_within_time_frame))", "docstring": "Get number of pareto-optimal trips\n\nReturns\n-------\nn_trips: float", "id": "f12871:c0:m36"} {"signature": "def __init__(self,transit_events,targets,start_time_ut=None,end_time_ut=None,transfer_margin=,walk_network=None,walk_speed=,verbose=False,track_vehicle_legs=True,track_time=True,track_route=False):", "body": "AbstractRoutingAlgorithm.__init__(self)assert (len(transit_events) == len(set(transit_events))), \"\"self._transit_connections = transit_eventsif start_time_ut is None:start_time_ut = transit_events[-].departure_timeif end_time_ut is None:end_time_ut = transit_events[].departure_timeself._start_time = start_time_utself._end_time = end_time_utself._transfer_margin = transfer_marginif walk_network is None:walk_network = networkx.Graph()self._walk_network = walk_networkself._walk_speed = walk_speedself._verbose = verboseself._count_vehicle_legs = track_vehicle_legsself._consider_time = track_timeassert(track_time or track_vehicle_legs)if track_vehicle_legs:if track_time:if track_route:self._label_class = LabelTimeBoardingsAndRouteelse:self._label_class = LabelTimeWithBoardingsCountelse:self._label_class = LabelVehLegCountelse:if track_route:self._label_class = LabelTimeAndRouteelse:self._label_class = LabelTimeprint(\"\", str(self._label_class))self._stop_departure_times, self._stop_arrival_times = self.__compute_stop_dep_and_arrival_times()self._all_nodes = set.union(set(self._stop_departure_times.keys()),set(self._stop_arrival_times.keys()),set(self._walk_network.nodes()))self._pseudo_connections = self.__compute_pseudo_connections()self._add_pseudo_connection_departures_to_stop_departure_times()self._all_connections = self._pseudo_connections + self._transit_connectionsself._all_connections.sort(key=lambda connection: (-connection.departure_time, -connection.seq))self._augment_all_connections_with_arrival_stop_next_dep_time()if isinstance(targets, list):self._targets = targetselse:self._targets = [targets]self.reset(self._targets)", "docstring": "Parameters\n----------\ntransit_events: list[Connection]\n events are assumed to be ordered in DECREASING departure_time (!)\ntargets: int, list\n index of the target stop\nstart_time_ut : int, optional\n start time in unixtime seconds\nend_time_ut: int, optional\n end time in unixtime seconds (no connections will be scanned after this time)\ntransfer_margin: int, optional\n required extra margin required for transfers in seconds\nwalk_speed: float, optional\n walking speed between stops in meters / second.\nwalk_network: networkx.Graph, optional\n each edge should have the walking distance as a data attribute (\"distance_shape\") expressed in meters\nverbose: boolean, optional\n whether to print out progress\ntrack_vehicle_legs: boolean, optional\n whether to consider the number of vehicle legs\ntrack_time: boolean, optional\n whether to consider time in the set of pareto_optimal", "id": "f12872:c0:m0"} {"signature": "def _finalize_profiles(self):", "body": "for stop, stop_profile in self._stop_profiles.items():assert (isinstance(stop_profile, NodeProfileMultiObjective))neighbor_label_bags = []walk_durations_to_neighbors = []departure_arrival_stop_pairs = []if stop_profile.get_walk_to_target_duration() != and stop in self._walk_network.node:neighbors = networkx.all_neighbors(self._walk_network, stop)for neighbor in neighbors:neighbor_profile = self._stop_profiles[neighbor]assert (isinstance(neighbor_profile, NodeProfileMultiObjective))neighbor_real_connection_labels = neighbor_profile.get_labels_for_real_connections()neighbor_label_bags.append(neighbor_real_connection_labels)walk_durations_to_neighbors.append(int(self._walk_network.get_edge_data(stop, neighbor)[\"\"] /self._walk_speed))departure_arrival_stop_pairs.append((stop, neighbor))stop_profile.finalize(neighbor_label_bags, walk_durations_to_neighbors, departure_arrival_stop_pairs)", "docstring": "Deal with the first walks by joining profiles to other stops within walking distance.", "id": "f12872:c0:m9"} {"signature": "@propertydef stop_profiles(self):", "body": "assert self._has_runreturn self._stop_profiles", "docstring": "Returns\n-------\n_stop_profiles : dict[int, NodeProfileMultiObjective]\n The pareto tuples necessary.", "id": "f12872:c0:m10"} {"signature": "def read_data_as_dataframe(self,travel_impedance_measure,from_stop_I=None,to_stop_I=None,statistic=None):", "body": "to_select = []where_clauses = []to_select.append(\"\")to_select.append(\"\")if from_stop_I is not None:where_clauses.append(\"\" + str(int(from_stop_I)))if to_stop_I is not None:where_clauses.append(\"\" + str(int(to_stop_I)))where_clause = \"\"if len(where_clauses) > :where_clause = \"\" + \"\".join(where_clauses)if not statistic:to_select.extend([\"\", \"\", \"\", \"\"])else:to_select.append(statistic)to_select_clause = \"\".join(to_select)if not to_select_clause:to_select_clause = \"\"sql = \"\" + to_select_clause + \"\" + travel_impedance_measure + where_clause + \"\"df = pd.read_sql(sql, self.conn)return df", "docstring": "Recover pre-computed travel_impedance between od-pairs from the database.\n\nReturns\n-------\nvalues: number | Pandas DataFrame", "id": "f12874:c0:m1"} {"signature": "def insert_data(self, travel_impedance_measure_name, data):", "body": "f = floatdata_tuple = [(int(x[\"\"]), int(x[\"\"]), f(x[\"\"]), f(x[\"\"]), f(x[\"\"]), f(x[\"\"])) forx in data]insert_stmt = '''''' + travel_impedance_measure_name + ''''''self.conn.executemany(insert_stmt, data_tuple)self.conn.commit()", "docstring": "Parameters\n----------\ntravel_impedance_measure_name: str\ndata: list[dict]\n Each list element must contain keys:\n \"from_stop_I\", \"to_stop_I\", \"min\", \"max\", \"median\" and \"mean\"", "id": "f12874:c0:m5"} {"signature": "def __init__(self, transit_events, seed_stop, start_time,end_time, transfer_margin, walk_network, walk_speed):", "body": "AbstractRoutingAlgorithm.__init__(self)self._seed = seed_stopself._connections = transit_eventsself._start_time = start_timeself._end_time = end_timeself._transfer_margin = transfer_marginself._walk_network = walk_networkself._walk_speed = walk_speedself.__stop_labels = defaultdict(lambda: float(''))self.__stop_labels[seed_stop] = start_timeself.__trip_reachable = defaultdict(lambda: False)", "docstring": "Parameters\n----------\ntransit_events: list[Connection]\nseed_stop: int\n index of the seed node\nstart_time : int\n start time in unixtime seconds\nend_time: int\n end time in unixtime seconds (no new connections will be scanned after this time)\ntransfer_margin: int\n required extra margin required for transfers in seconds\nwalk_speed: float\n walking speed between stops in meters / second\nwalk_network: networkx.Graph\n each edge should have the walking distance as a data attribute (\"d_walk\") expressed in meters", "id": "f12875:c0:m0"} {"signature": "def get_arrival_times(self):", "body": "assert self._has_runreturn self.__stop_labels", "docstring": "Returns\n-------\narrival_times: dict[int, float]\n maps integer stop_ids to floats", "id": "f12875:c0:m1"} {"signature": "def _scan_footpaths(self, stop_id, walk_departure_time):", "body": "for _, neighbor, data in self._walk_network.edges_iter(nbunch=[stop_id], data=True):d_walk = data[\"\"]arrival_time = walk_departure_time + d_walk / self._walk_speedself._update_stop_label(neighbor, arrival_time)", "docstring": "Scan the footpaths originating from stop_id\n\nParameters\n----------\nstop_id: int", "id": "f12875:c0:m4"} {"signature": "def __init__(self,dep_times=None,walk_to_target_duration=float(''),label_class=LabelTimeWithBoardingsCount,transit_connection_dep_times=None,closest_target=None,node_id=None):", "body": "if dep_times is None:dep_times = []n_dep_times = len(dep_times)assert n_dep_times == len(set(dep_times)), \"\"self._departure_times = list(reversed(sorted(dep_times)))self.dep_times_to_index = dict(zip(self._departure_times, range(len(self._departure_times))))self._label_bags = [[]] * len(self._departure_times)self._walk_to_target_duration = walk_to_target_durationself._min_dep_time = float('')self.label_class = label_classself.closest_target = closest_targetif self.label_class == LabelTimeBoardingsAndRoute and self._walk_to_target_duration < float(''):assert (self.closest_target is not None)if transit_connection_dep_times is not None:self._connection_dep_times = transit_connection_dep_timeselse:self._connection_dep_times = dep_timesassert (isinstance(self._connection_dep_times, (list, numpy.ndarray)))self._closed = Falseself._finalized = Falseself._final_pareto_optimal_labels = Noneself._real_connection_labels = Noneself.node_id = node_id", "docstring": "Parameters\n----------\ndep_times\nwalk_to_target_duration\nlabel_class: label class to be used\ntransit_connection_dep_times:\n if not given, all connections are assumed to be real connections\nclosest_target: int, optional\n stop_I of the closest target if within walking distance (and Routes are recorded)", "id": "f12877:c0:m0"} {"signature": "def _check_dep_time_is_valid(self, dep_time):", "body": "assert dep_time <= self._min_dep_time, \"\"dep_time_index = self.dep_times_to_index[dep_time]if self._min_dep_time < float(''):min_dep_index = self.dep_times_to_index[self._min_dep_time]assert min_dep_index == dep_time_index or (min_dep_index == dep_time_index - ),\"\"else:assert dep_time_index is , \"\"self._min_dep_time = dep_time", "docstring": "A simple checker, that connections are coming in descending order of departure time\nand that no departure time has been \"skipped\".\n\nParameters\n----------\ndep_time\n\nReturns\n-------\nNone", "id": "f12877:c0:m1"} {"signature": "def get_walk_to_target_duration(self):", "body": "return self._walk_to_target_duration", "docstring": "Get walking distance to target node.\n\nReturns\n-------\nwalk_to_target_duration: float", "id": "f12877:c0:m2"} {"signature": "def update(self, new_labels, departure_time_backup=None):", "body": "if self._closed:raise RuntimeError(\"\")try:departure_time = next(iter(new_labels)).departure_timeexcept StopIteration:departure_time = departure_time_backupself._check_dep_time_is_valid(departure_time)for new_label in new_labels:assert (new_label.departure_time == departure_time)dep_time_index = self.dep_times_to_index[departure_time]if dep_time_index > :mod_prev_labels = [label.get_copy_with_specified_departure_time(departure_time) for labelin self._label_bags[dep_time_index - ]]else:mod_prev_labels = list()mod_prev_labels += self._label_bags[dep_time_index]walk_label = self._get_label_to_target(departure_time)if walk_label:new_labels = new_labels + [walk_label]new_frontier = merge_pareto_frontiers(new_labels, mod_prev_labels)self._label_bags[dep_time_index] = new_frontierreturn True", "docstring": "Update the profile with the new labels.\nEach new label should have the same departure_time.\n\nParameters\n----------\nnew_labels: list[LabelTime]\n\nReturns\n-------\nadded: bool\n whether new_pareto_tuple was added to the set of pareto-optimal tuples", "id": "f12877:c0:m3"} {"signature": "def evaluate(self, dep_time, first_leg_can_be_walk=True, connection_arrival_time=None):", "body": "walk_labels = list()if first_leg_can_be_walk and self._walk_to_target_duration != float(''):if connection_arrival_time is not None:walk_labels.append(self._get_label_to_target(connection_arrival_time))else:walk_labels.append(self._get_label_to_target(dep_time))if dep_time in self.dep_times_to_index:assert (dep_time != float(''))index = self.dep_times_to_index[dep_time]labels = self._label_bags[index]pareto_optimal_labels = merge_pareto_frontiers(labels, walk_labels)else:pareto_optimal_labels = walk_labelsif not first_leg_can_be_walk:pareto_optimal_labels = [label for label in pareto_optimal_labels if not label.first_leg_is_walk]return pareto_optimal_labels", "docstring": "Get the pareto_optimal set of Labels, given a departure time.\n\nParameters\n----------\ndep_time : float, int\n time in unix seconds\nfirst_leg_can_be_walk : bool, optional\n whether to allow walking to target to be included into the profile\n (I.e. whether this function is called when scanning a pseudo-connection:\n \"double\" walks are not allowed.)\nconnection_arrival_time: float, int, optional\n used for computing the walking label if dep_time, i.e., connection.arrival_stop_next_departure_time, is infinity)\nconnection: connection object\n\nReturns\n-------\npareto_optimal_labels : set\n Set of Labels", "id": "f12877:c0:m4"} {"signature": "def get_final_optimal_labels(self):", "body": "assert self._finalized, \"\"return self._final_pareto_optimal_labels", "docstring": "Get pareto-optimal labels.\n\nReturns\n-------", "id": "f12877:c0:m7"} {"signature": "def finalize(self, neighbor_label_bags=None, walk_durations=None, departure_arrival_stop_pairs=None):", "body": "assert (not self._finalized)if self._final_pareto_optimal_labels is None:self._compute_real_connection_labels()if neighbor_label_bags is not None:assert (len(walk_durations) == len(neighbor_label_bags))self._compute_final_pareto_optimal_labels(neighbor_label_bags,walk_durations,departure_arrival_stop_pairs)else:self._final_pareto_optimal_labels = self._real_connection_labelsself._finalized = Trueself._closed = True", "docstring": "Parameters\n----------\nneighbor_label_bags: list\n each list element is a list of labels corresponding to a neighboring node\n (note: only labels with first connection being a departure should be included)\nwalk_durations: list\ndeparture_arrival_stop_pairs: list of tuples\nReturns\n-------\nNone", "id": "f12877:c0:m8"} {"signature": "def __init__(self, profile_blocks, cutoff_distance=None, **kwargs):", "body": "for i, block in enumerate(profile_blocks[:-]):assert block.start_time < block.end_timeassert block.end_time == profile_blocks[i + ].start_timeassert block.distance_start >= block.distance_endself._profile_blocks = profile_blocksself._start_time = profile_blocks[].start_timeself._end_time = profile_blocks[-].end_timeself._cutoff_distance = cutoff_distanceif cutoff_distance is not None:self._apply_cutoff(cutoff_distance)self.from_stop_I = Noneself.to_stop_I = Nonefor key, value in kwargs.items():if key == \"\":self.from_stop_I = valueif key == \"\":self.to_stop_I = value", "docstring": "Parameters\n----------\nprofile_blocks: list[gtfspy.routing.profile_block.ProfileBlock]", "id": "f12878:c0:m0"} {"signature": "def largest_finite_distance(self):", "body": "block_start_distances = [block.distance_start for block in self._profile_blocks ifblock.distance_start < float('')]block_end_distances = [block.distance_end for block in self._profile_blocks ifblock.distance_end < float('')]distances = block_start_distances + block_end_distancesif len(distances) > :return max(distances)else:return None", "docstring": "Compute the maximum temporal distance.\n\nReturns\n-------\nmax_temporal_distance : float", "id": "f12878:c0:m6"} {"signature": "def _temporal_distance_cdf(self):", "body": "distance_split_points = set()for block in self._profile_blocks:if block.distance_start != float(''):distance_split_points.add(block.distance_end)distance_split_points.add(block.distance_start)distance_split_points_ordered = numpy.array(sorted(list(distance_split_points)))temporal_distance_split_widths = distance_split_points_ordered[:] - distance_split_points_ordered[:-]trip_counts = numpy.zeros(len(temporal_distance_split_widths))delta_peaks = defaultdict(lambda: )for block in self._profile_blocks:if block.distance_start == block.distance_end:delta_peaks[block.distance_end] += block.width()else:start_index = numpy.searchsorted(distance_split_points_ordered, block.distance_end)end_index = numpy.searchsorted(distance_split_points_ordered, block.distance_start)trip_counts[start_index:end_index] += unnormalized_cdf = numpy.array([] + list(numpy.cumsum(temporal_distance_split_widths * trip_counts)))if not (numpy.isclose([unnormalized_cdf[-]],[self._end_time - self._start_time - sum(delta_peaks.values())], atol=).all()):print(unnormalized_cdf[-], self._end_time - self._start_time - sum(delta_peaks.values()))raise RuntimeError(\"\")if len(delta_peaks) > :for peak in delta_peaks.keys():if peak == float(''):continueindex = numpy.nonzero(distance_split_points_ordered == peak)[][]unnormalized_cdf = numpy.insert(unnormalized_cdf, index, unnormalized_cdf[index])distance_split_points_ordered = numpy.insert(distance_split_points_ordered, index,distance_split_points_ordered[index])unnormalized_cdf[(index + ):] = unnormalized_cdf[(index + ):] + delta_peaks[peak]norm_cdf = unnormalized_cdf / (unnormalized_cdf[-] + delta_peaks[float('')])return distance_split_points_ordered, norm_cdf", "docstring": "Temporal distance cumulative density function.\n\nReturns\n-------\nx_values: numpy.array\n values for the x-axis\ncdf: numpy.array\n cdf values", "id": "f12878:c0:m8"} {"signature": "def _temporal_distance_pdf(self):", "body": "temporal_distance_split_points_ordered, norm_cdf = self._temporal_distance_cdf()delta_peak_loc_to_probability_mass = {}non_delta_peak_split_points = [temporal_distance_split_points_ordered[]]non_delta_peak_densities = []for i in range(, len(temporal_distance_split_points_ordered) - ):left = temporal_distance_split_points_ordered[i]right = temporal_distance_split_points_ordered[i + ]width = right - leftprob_mass = norm_cdf[i + ] - norm_cdf[i]if width == :delta_peak_loc_to_probability_mass[left] = prob_masselse:non_delta_peak_split_points.append(right)non_delta_peak_densities.append(prob_mass / float(width))assert (len(non_delta_peak_densities) == len(non_delta_peak_split_points) - )return numpy.array(non_delta_peak_split_points),numpy.array(non_delta_peak_densities), delta_peak_loc_to_probability_mass", "docstring": "Temporal distance probability density function.\n\nReturns\n-------\nnon_delta_peak_split_points: numpy.array\nnon_delta_peak_densities: numpy.array\n len(density) == len(temporal_distance_split_points_ordered) -1\ndelta_peak_loc_to_probability_mass : dict", "id": "f12878:c0:m9"} {"signature": "def get_upstream_stops_ratio(self, target, trough_stops, ratio):", "body": "if isinstance(trough_stops, list):trough_stops = \"\".join(trough_stops)query = \"\"\"\"\"\".format(target=target, trough_stops=trough_stops, ratio=ratio)df = read_sql_query(query, self.conn)return df", "docstring": "Selects the stops for which the ratio or higher proportion of trips to the target passes trough a set of trough stops\n:param target: target of trips\n:param trough_stops: stops where the selected trips are passing trough\n:param ratio: threshold for inclusion\n:return:", "id": "f12892:c0:m10"} {"signature": "def passing_journeys_per_stop(self):", "body": "pass", "docstring": ":return:", "id": "f12892:c0:m11"} {"signature": "def n_departure_stop_alternatives(self):", "body": "pass", "docstring": ":return:", "id": "f12892:c0:m13"} {"signature": "def get_journey_time(self):", "body": "pass", "docstring": "(using the connection objects)\n:return:", "id": "f12892:c0:m21"} {"signature": "def get_journey_time_per_mode(self, modes=None):", "body": "pass", "docstring": ":param modes: return these\n:return:", "id": "f12892:c0:m22"} {"signature": "def __init__(self, gtfs_path, journey_db_path, routing_params=None, multitarget_routing=False,track_vehicle_legs=True, track_route=False):", "body": "self.multitarget_routing = multitarget_routingself.track_route = track_routeself.track_vehicle_legs = track_vehicle_legsself.gtfs_path = gtfs_pathself.gtfs = GTFS(self.gtfs_path)self.gtfs_meta = self.gtfs.metaself.gtfs._dont_close = Trueself.od_pairs = Noneself._targets = Noneself._origins = Noneself.diff_conn = Noneif not routing_params:routing_params = dict()self.routing_params_input = routing_paramsassert os.path.exists(journey_db_path) or routing_params is not Nonejourney_db_pre_exists = os.path.isfile(journey_db_path)timeout = self.conn = sqlite3.connect(journey_db_path, timeout)if not journey_db_pre_exists:self.initialize_database()self.routing_parameters = Parameters(self.conn)self._assert_journey_computation_paramaters_match()self.journey_properties = {\"\": (_T_WALK_STR, _T_WALK_STR)}if routing_params.get('', False) orself.routing_parameters.get('', False):self.journey_properties[\"\"] = (float(\"\"), )if self.track_route:additional_journey_parameters = {\"\": (float(''), ),\"\": (float(''), ),\"\": (_T_WALK_STR, _T_WALK_STR),\"\": (float(''), )}self.journey_properties.update(additional_journey_parameters)self.travel_impedance_measure_names = list(self.journey_properties.keys())self.travel_impedance_measure_names += [\"\"]", "docstring": ":param gtfs: GTFS object\n:param list_of_stop_profiles: dict of NodeProfileMultiObjective\n:param multitarget_routing: bool", "id": "f12893:c0:m0"} {"signature": "@timeitdef import_journey_data_for_target_stop(self, target_stop_I, origin_stop_I_to_journey_labels, enforce_synchronous_writes=False):", "body": "cur = self.conn.cursor()self.conn.isolation_level = ''cur.execute('')if self.track_route:self._insert_journeys_with_route_into_db(origin_stop_I_to_journey_labels, target_stop=int(target_stop_I))else:self._insert_journeys_into_db_no_route(origin_stop_I_to_journey_labels, target_stop=int(target_stop_I))print(\"\")self.conn.commit()", "docstring": "Parameters\n----------\norigin_stop_I_to_journey_labels: dict\n key: origin_stop_Is\n value: list of labels\ntarget_stop_I: int", "id": "f12893:c0:m2"} {"signature": "def _insert_journeys_into_db_no_route(self, stop_profiles, target_stop=None):", "body": "print(\"\")journey_id = journey_list = []tot = len(stop_profiles)for i, (origin_stop, labels) in enumerate(stop_profiles.items(), start=):for label in labels:assert (isinstance(label, LabelTimeWithBoardingsCount))if self.multitarget_routing:target_stop = Noneelse:target_stop = int(target_stop)values = [int(journey_id),int(origin_stop),target_stop,int(label.departure_time),int(label.arrival_time_target),int(label.n_boardings)]journey_list.append(values)journey_id += print(\"\")insert_journeys_stmt = '''''' % (\"\".join([\"\" for x in range()]))self._executemany_exclusive(insert_journeys_stmt, journey_list)self.conn.commit()", "docstring": "con.isolation_level = 'EXCLUSIVE'\ncon.execute('BEGIN EXCLUSIVE')\n#exclusive access starts here. Nothing else can r/w the db, do your magic here.\ncon.commit()", "id": "f12893:c0:m5"} {"signature": "def _journey_label_generator(self, destination_stop_Is=None, origin_stop_Is=None):", "body": "conn = self.connconn.row_factory = sqlite3.Rowif destination_stop_Is is None:destination_stop_Is = self.get_targets_having_journeys()if origin_stop_Is is None:origin_stop_Is = self.get_origins_having_journeys()for destination_stop_I in destination_stop_Is:if self.track_route:label_features = \"\"\"\"\"\"\"\"else:label_features = \"\"\"\"sql = \"\" + label_features + \"\" % destination_stop_Idf = pd.read_sql_query(sql, self.conn)for origin_stop_I in origin_stop_Is:selection = df.loc[df[''] == origin_stop_I]journey_labels = []for journey in selection.to_dict(orient=''):journey[\"\"] = -try:journey_labels.append(LabelGeneric(journey))except Exception as e:print(journey)raise eyield origin_stop_I, destination_stop_I, journey_labels", "docstring": "Parameters\n----------\ndestination_stop_Is: list-like\norigin_stop_Is: list-like\n\nYields\n------\n(origin_stop_I, destination_stop_I, journey_labels) : tuple", "id": "f12893:c0:m19"} {"signature": "def _insert_travel_impedance_data_to_db(self, travel_impedance_measure_name, data):", "body": "f = floatdata_tuple = [(x[\"\"], x[\"\"], f(x[\"\"]), f(x[\"\"]), f(x[\"\"]), f(x[\"\"])) for x in data]insert_stmt = '''''' + travel_impedance_measure_name + ''''''self.conn.executemany(insert_stmt, data_tuple)self.conn.commit()", "docstring": "Parameters\n----------\ntravel_impedance_measure_name: str\ndata: list[dict]\n Each list element must contain keys:\n \"from_stop_I\", \"to_stop_I\", \"min\", \"max\", \"median\" and \"mean\"", "id": "f12893:c0:m27"} {"signature": "def get_transit_connections(gtfs, start_time_ut, end_time_ut):", "body": "if start_time_ut + * < end_time_ut:warn(\"\"\"\")assert (isinstance(gtfs, GTFS))events_df = temporal_network(gtfs, start_time_ut=start_time_ut, end_time_ut=end_time_ut)assert (isinstance(events_df, pandas.DataFrame))return list(map(lambda e: Connection(e.from_stop_I, e.to_stop_I, e.dep_time_ut, e.arr_time_ut, e.trip_I, e.seq),events_df.itertuples()))", "docstring": "Parameters\n----------\ngtfs: gtfspy.GTFS\nend_time_ut: int\nstart_time_ut: int\n\nReturns\n-------\nlist[Connection]", "id": "f12895:m0"} {"signature": "def get_walk_network(gtfs, max_link_distance_m=):", "body": "assert (isinstance(gtfs, GTFS))return walk_transfer_stop_to_stop_network(gtfs, max_link_distance=max_link_distance_m)", "docstring": "Parameters\n----------\ngtfs: gtfspy.GTFS\n\nReturns\n-------\nwalk_network: networkx.Graph:", "id": "f12895:m1"} {"signature": "def update_pareto_optimal_tuples(self, new_label):", "body": "assert (isinstance(new_label, LabelTime))if self._labels:assert (new_label.departure_time <= self._labels[-].departure_time)best_later_departing_arrival_time = self._labels[-].arrival_time_targetelse:best_later_departing_arrival_time = float('')walk_to_target_arrival_time = new_label.departure_time + self._walk_to_target_durationbest_arrival_time = min(walk_to_target_arrival_time,best_later_departing_arrival_time,new_label.arrival_time_target)if (new_label.arrival_time_target < walk_to_target_arrival_time andnew_label.arrival_time_target < best_later_departing_arrival_time):self._labels.append(LabelTime(new_label.departure_time, best_arrival_time))return Trueelse:return False", "docstring": "Parameters\n----------\nnew_label: LabelTime\n\nReturns\n-------\nupdated: bool", "id": "f12896:c0:m2"} {"signature": "def evaluate_earliest_arrival_time_at_target(self, dep_time, transfer_margin):", "body": "minimum = dep_time + self._walk_to_target_durationfor label in self._labels[::-]:if label.departure_time >= dep_time + transfer_margin:minimum = min(minimum, label.arrival_time_target)breakreturn float(minimum)", "docstring": "Get the earliest arrival time at the target, given a departure time.\n\nParameters\n----------\ndep_time : float, int\n time in unix seconds\ntransfer_margin: float, int\n transfer margin in seconds\n\nReturns\n-------\narrival_time : float\n Arrival time in the given time unit (seconds after unix epoch).", "id": "f12896:c0:m3"} {"signature": "def __init__(self, legs=None):", "body": "self.legs = []self.departure_time = Noneself.arrival_time = Noneself.trip_ids = set()self.n_boardings = if legs is not None:for leg in legs:self.add_leg(leg)", "docstring": "Parameters\n----------\nlegs: list[Connection]", "id": "f12897:c0:m0"} {"signature": "def add_leg(self, leg):", "body": "assert(isinstance(leg, Connection))if not self.legs:self.departure_time = leg.departure_timeself.arrival_time = leg.arrival_timeif leg.trip_id and (not self.legs or (leg.trip_id != self.legs[-].trip_id)):self.n_boardings += self.arrival_time = leg.arrival_timeself.legs.append(leg)", "docstring": "Parameters\n----------\nleg: Connection", "id": "f12897:c0:m1"} {"signature": "def get_transfer_stop_pairs(self):", "body": "transfer_stop_pairs = []previous_arrival_stop = Nonecurrent_trip_id = Nonefor leg in self.legs:if leg.trip_id is not None and leg.trip_id != current_trip_id and previous_arrival_stop is not None:transfer_stop_pair = (previous_arrival_stop, leg.departure_stop)transfer_stop_pairs.append(transfer_stop_pair)previous_arrival_stop = leg.arrival_stopcurrent_trip_id = leg.trip_idreturn transfer_stop_pairs", "docstring": "Get stop pairs through which transfers take place\n\nReturns\n-------\ntransfer_stop_pairs: list", "id": "f12897:c0:m6"} {"signature": "def __init__(self,transit_events,target_stop,start_time=None,end_time=None,transfer_margin=,walk_network=None,walk_speed=,verbose=False):", "body": "AbstractRoutingAlgorithm.__init__(self)self._target = target_stopself._connections = transit_eventsif start_time is None:start_time = transit_events[-].departure_timeif end_time is None:end_time = transit_events[].departure_timeself._start_time = start_timeself._end_time = end_timeself._transfer_margin = transfer_marginif walk_network is None:walk_network = networkx.Graph()self._walk_network = walk_networkself._walk_speed = float(walk_speed)self._verbose = verboseself.__trip_min_arrival_time = defaultdict(lambda: float(\"\"))self._stop_profiles = defaultdict(lambda: NodeProfileSimple())self._stop_profiles[self._target] = NodeProfileSimple()if target_stop in walk_network.nodes():for target_neighbor in walk_network.neighbors(target_stop):edge_data = walk_network.get_edge_data(target_neighbor, target_stop)walk_duration = edge_data[\"\"] / self._walk_speedself._stop_profiles[target_neighbor] = NodeProfileSimple(walk_duration)", "docstring": "Parameters\n----------\ntransit_events: list[Connection]\n events are assumed to be ordered in DECREASING departure_time (!)\ntarget_stop: int\n index of the target stop\nstart_time : int, optional\n start time in unixtime seconds\nend_time: int, optional\n end time in unixtime seconds (no connections will be scanned after this time)\ntransfer_margin: int, optional\n required extra margin required for transfers in seconds\nwalk_speed: float, optional\n walking speed between stops in meters / second.\nwalk_network: networkx.Graph, optional\n each edge should have the walking distance as a data attribute (\"distance_shape\") expressed in meters\nverbose: boolean, optional\n whether to print out progress", "id": "f12898:c0:m0"} {"signature": "def _scan_footpaths_to_departure_stop(self, connection_dep_stop, connection_dep_time, arrival_time_target):", "body": "for _, neighbor, data in self._walk_network.edges_iter(nbunch=[connection_dep_stop],data=True):d_walk = data['']neighbor_dep_time = connection_dep_time - d_walk / self._walk_speedpt = LabelTimeSimple(departure_time=neighbor_dep_time, arrival_time_target=arrival_time_target)self._stop_profiles[neighbor].update_pareto_optimal_tuples(pt)", "docstring": "A helper method for scanning the footpaths. Updates self._stop_profiles accordingly", "id": "f12898:c0:m2"} {"signature": "@propertydef stop_profiles(self):", "body": "assert self._has_runreturn self._stop_profiles", "docstring": "Returns\n-------\n_stop_profiles : dict[int, NodeProfileSimple]\n The pareto tuples necessary.", "id": "f12898:c0:m3"} {"signature": "def __init__(self, labels, walk_time_to_target, start_time_dep, end_time_dep):", "body": "self.start_time_dep = start_time_depself.end_time_dep = end_time_depall_pareto_optimal_tuples = [pt for pt in labels if(start_time_dep < pt.departure_time < end_time_dep)]labels_after_dep_time = [label for label in labels if label.departure_time >= self.end_time_dep]if labels_after_dep_time:next_label_after_end_time = min(labels_after_dep_time, key=lambda el: el.arrival_time_target)all_pareto_optimal_tuples.append(next_label_after_end_time)all_pareto_optimal_tuples = sorted(all_pareto_optimal_tuples, key=lambda ptuple: ptuple.departure_time)arrival_time_target_at_end_time = end_time_dep + walk_time_to_targetprevious_trip = Nonefor trip_tuple in all_pareto_optimal_tuples:if previous_trip:assert(trip_tuple.arrival_time_target > previous_trip.arrival_time_target)if trip_tuple.departure_time > self.end_time_depand trip_tuple.arrival_time_target < arrival_time_target_at_end_time:arrival_time_target_at_end_time = trip_tuple.arrival_time_targetprevious_trip = trip_tupleself._walk_time_to_target = walk_time_to_targetself._profile_blocks = []previous_departure_time = start_time_depself.trip_durations = []self.trip_departure_times = []for trip_pareto_tuple in all_pareto_optimal_tuples:if trip_pareto_tuple.departure_time > self.end_time_dep:continueif self._walk_time_to_target <= trip_pareto_tuple.duration():print(self._walk_time_to_target, trip_pareto_tuple.duration())assert(self._walk_time_to_target > trip_pareto_tuple.duration())effective_trip_previous_departure_time = max(previous_departure_time,trip_pareto_tuple.departure_time - (self._walk_time_to_target - trip_pareto_tuple.duration()))if effective_trip_previous_departure_time > previous_departure_time:walk_block = ProfileBlock(start_time=previous_departure_time,end_time=effective_trip_previous_departure_time,distance_start=self._walk_time_to_target,distance_end=self._walk_time_to_target)self._profile_blocks.append(walk_block)trip_waiting_time = trip_pareto_tuple.departure_time - effective_trip_previous_departure_timetrip_block = ProfileBlock(end_time=trip_pareto_tuple.departure_time,start_time=effective_trip_previous_departure_time,distance_start=trip_pareto_tuple.duration() + trip_waiting_time,distance_end=trip_pareto_tuple.duration())self.trip_durations.append(trip_pareto_tuple.duration())self.trip_departure_times.append(trip_pareto_tuple.departure_time)self._profile_blocks.append(trip_block)previous_departure_time = trip_pareto_tuple.departure_timeif not self._profile_blocks or self._profile_blocks[-].end_time < end_time_dep:if len(self._profile_blocks) > :dep_previous = self._profile_blocks[-].end_timeelse:dep_previous = start_time_depwaiting_time = end_time_dep - dep_previousdistance_end_trip = arrival_time_target_at_end_time - end_time_depwalking_wait_time = min(end_time_dep - dep_previous,waiting_time - (self._walk_time_to_target - distance_end_trip))walking_wait_time = max(, walking_wait_time)if walking_wait_time > :walk_block = ProfileBlock(start_time=dep_previous,end_time=dep_previous + walking_wait_time,distance_start=self._walk_time_to_target,distance_end=self._walk_time_to_target)assert (walk_block.start_time <= walk_block.end_time)assert (walk_block.distance_end <= walk_block.distance_start)self._profile_blocks.append(walk_block)trip_waiting_time = waiting_time - walking_wait_timeif trip_waiting_time > :try:trip_block = ProfileBlock(start_time=dep_previous + walking_wait_time,end_time=dep_previous + walking_wait_time + trip_waiting_time,distance_start=distance_end_trip + trip_waiting_time,distance_end=distance_end_trip)assert (trip_block.start_time <= trip_block.end_time)assert (trip_block.distance_end <= trip_block.distance_start)self._profile_blocks.append(trip_block)except AssertionError as e:assert(trip_waiting_time < **-)self.profile_block_analyzer = ProfileBlockAnalyzer(profile_blocks=self._profile_blocks)", "docstring": "Initialize the data structures required by\n\nParameters\n----------\nnode_profile: NodeProfileSimple", "id": "f12899:c0:m1"} {"signature": "def n_pareto_optimal_trips(self):", "body": "return float(len(self.trip_durations))", "docstring": "Get number of pareto-optimal trips\n\nReturns\n-------\nn_trips: float", "id": "f12899:c0:m2"} {"signature": "@_if_no_trips_return_infdef min_trip_duration(self):", "body": "return numpy.min(self.trip_durations)", "docstring": "Get minimum travel time to destination.\n\nReturns\n-------\nfloat: min_trip_duration\n float('nan') if no trips take place", "id": "f12899:c0:m3"} {"signature": "@_if_no_trips_return_infdef max_trip_duration(self):", "body": "return numpy.max(self.trip_durations)", "docstring": "Get minimum travel time to destination.\n\nReturns\n-------\nfloat: max_trip_duration\n float('inf') if no trips take place", "id": "f12899:c0:m4"} {"signature": "@_if_no_trips_return_infdef mean_trip_duration(self):", "body": "return numpy.mean(self.trip_durations)", "docstring": "Get average travel time to destination.\n\nReturns\n-------\nfloat: max_trip_duration\n float('inf') if no trips take place", "id": "f12899:c0:m5"} {"signature": "@_if_no_trips_return_infdef median_trip_duration(self):", "body": "return numpy.median(self.trip_durations)", "docstring": "Get average travel time to destination.\n\nReturns\n-------\nfloat: max_trip_duration\n float('inf') if no trips take place", "id": "f12899:c0:m6"} {"signature": "def mean_temporal_distance(self):", "body": "total_width = self.end_time_dep - self.start_time_deptotal_area = sum([block.area() for block in self._profile_blocks])return total_area / total_width", "docstring": "Get mean temporal distance (in seconds) to the target.\n\nReturns\n-------\nmean_temporal_distance : float", "id": "f12899:c0:m7"} {"signature": "def median_temporal_distance(self):", "body": "return self.profile_block_analyzer.median()", "docstring": "Returns\n-------\nmedian_temporal_distance : float", "id": "f12899:c0:m8"} {"signature": "def min_temporal_distance(self):", "body": "return self.profile_block_analyzer.min()", "docstring": "Compute the minimum temporal distance to target.\n\nReturns\n-------\nmin_temporal_distance: float", "id": "f12899:c0:m9"} {"signature": "def max_temporal_distance(self):", "body": "return self.profile_block_analyzer.max()", "docstring": "Compute the maximum temporal distance.\n\nReturns\n-------\nmax_temporal_distance : float", "id": "f12899:c0:m10"} {"signature": "def largest_finite_temporal_distance(self):", "body": "return self.profile_block_analyzer.largest_finite_distance()", "docstring": "Compute the maximum temporal distance.\n\nReturns\n-------\nmax_temporal_distance : float", "id": "f12899:c0:m11"} {"signature": "def plot_temporal_distance_cdf(self):", "body": "xvalues, cdf = self.profile_block_analyzer._temporal_distance_cdf()fig = plt.figure()ax = fig.add_subplot()xvalues = numpy.array(xvalues) / ax.plot(xvalues, cdf, \"\")ax.fill_between(xvalues, cdf, color=\"\", alpha=)ax.set_ylabel(\"\")ax.set_xlabel(\"\")return fig", "docstring": "Plot the temporal distance cumulative density function.\n\nReturns\n-------\nfig: matplotlib.Figure", "id": "f12899:c0:m12"} {"signature": "def plot_temporal_distance_pdf(self, use_minutes=True, color=\"\", ax=None):", "body": "from matplotlib import pyplot as pltplt.rc('', usetex=True)temporal_distance_split_points_ordered, densities, delta_peaks = self._temporal_distance_pdf()xs = []for i, x in enumerate(temporal_distance_split_points_ordered):xs.append(x)xs.append(x)xs = numpy.array(xs)ys = []for y in densities:ys.append(y)ys.append(y)ys.append()ys = numpy.array(ys)xlabel = \"\"ylabel = \"\"if use_minutes:xs /= ys *= xlabel = \"\"delta_peaks = {peak / : mass for peak, mass in delta_peaks.items()}if ax is None:fig = plt.figure()ax = fig.add_subplot()ax.plot(xs, ys, \"\")ax.fill_between(xs, ys, color=\"\", alpha=)if delta_peaks:peak_height = max(ys) * max_x = max(xs)min_x = min(xs)now_max_x = max(xs) + * (max_x - min_x)now_min_x = min_x - * (max_x - min_x)text_x_offset = * (now_max_x - max_x)for loc, mass in delta_peaks.items():ax.plot([loc, loc], [, peak_height], color=\"\", lw=)ax.text(loc + text_x_offset, peak_height * , \"\" % (mass), color=\"\")ax.set_xlim(now_min_x, now_max_x)tot_delta_peak_mass = sum(delta_peaks.values())transit_text_x = (min_x + max_x) / transit_text_y = min(ys[ys > ]) / ax.text(transit_text_x,transit_text_y,\"\" % ( - tot_delta_peak_mass),color=\"\",va=\"\",ha=\"\")ax.set_xlabel(xlabel)ax.set_ylabel(ylabel)ax.set_ylim(bottom=)return ax.figure", "docstring": "Plot the temporal distance probability density function.\n\nReturns\n-------\nfig: matplotlib.Figure", "id": "f12899:c0:m13"} {"signature": "def plot_temporal_distance_pdf_horizontal(self, use_minutes=True,color=\"\",ax=None,duration_divider=,legend_font_size=None,legend_loc=None):", "body": "from matplotlib import pyplot as pltplt.rc('', usetex=True)if ax is None:fig = plt.figure()ax = fig.add_subplot()temporal_distance_split_points_ordered, densities, delta_peaks = self._temporal_distance_pdf()xs = []for i, x in enumerate(temporal_distance_split_points_ordered):xs.append(x)xs.append(x)xs = numpy.array(xs)ys = []for y in densities:ys.append(y)ys.append(y)ys.append()ys = numpy.array(ys)xlabel = \"\"ylabel = \"\"if use_minutes:xs /= duration_dividerys *= duration_dividerxlabel = \"\"delta_peaks = {peak / : mass for peak, mass in delta_peaks.items()}if delta_peaks:peak_height = max(ys) * max_x = max(xs)min_x = min(xs)now_max_x = max(xs) + * (max_x - min_x)now_min_x = min_x - * (max_x - min_x)text_x_offset = * (now_max_x - max_x)for loc, mass in delta_peaks.items():text = \"\" + (\"\" % (mass))ax.plot([, peak_height], [loc, loc], color=color, lw=, label=text)ax.plot(ys, xs, \"\")if delta_peaks:tot_delta_peak_mass = sum(delta_peaks.values())fill_label = \"\" % (-tot_delta_peak_mass)else:fill_label = Noneax.fill_betweenx(xs, ys, color=color, alpha=, label=fill_label)ax.set_ylabel(xlabel)ax.set_xlabel(ylabel)ax.set_xlim(left=, right=max(ys) * )if delta_peaks:if legend_font_size is None:legend_font_size = if legend_loc is None:legend_loc = \"\"ax.legend(loc=legend_loc, prop={'': legend_font_size})if True:line_tyles = [\"\", \"\", \"\"][::-]to_plot_funcs = [self.max_temporal_distance, self.mean_temporal_distance, self.min_temporal_distance]xmin, xmax = ax.get_xlim()for to_plot_func, ls in zip(to_plot_funcs, line_tyles):y = to_plot_func() / duration_dividerassert y < float('')ax.plot([xmin, xmax*], [y, y], color=\"\", ls=ls, lw=)return ax.figure", "docstring": "Plot the temporal distance probability density function.\n\nReturns\n-------\nfig: matplotlib.Figure", "id": "f12899:c0:m14"} {"signature": "def plot_temporal_distance_profile(self,timezone=None,color=\"\",alpha=,ax=None,lw=,label=\"\",plot_tdist_stats=False,plot_trip_stats=False,format_string=\"\",plot_journeys=False,duration_divider=,fill_color=\"\",journey_letters=None,return_letters=False):", "body": "if ax is None:fig = plt.figure()ax = fig.add_subplot()if timezone is None:warnings.warn(\"\")timezone = pytz.timezone(\"\")def _ut_to_unloc_datetime(ut):dt = datetime.datetime.fromtimestamp(ut, timezone)return dt.replace(tzinfo=None)if format_string:x_axis_formatter = md.DateFormatter(format_string)ax.xaxis.set_major_formatter(x_axis_formatter)else:_ut_to_unloc_datetime = lambda x: xax.set_xlim(_ut_to_unloc_datetime(self.start_time_dep),_ut_to_unloc_datetime(self.end_time_dep))if plot_tdist_stats:line_tyles = [\"\", \"\", \"\"][::-]to_plot_labels = [\"\", \"\", \"\"]to_plot_funcs = [self.max_temporal_distance, self.mean_temporal_distance, self.min_temporal_distance]xmin, xmax = ax.get_xlim()for to_plot_label, to_plot_func, ls in zip(to_plot_labels, to_plot_funcs, line_tyles):y = to_plot_func() / duration_dividerassert y < float(''), to_plot_labelto_plot_label = to_plot_label + \"\" % (y)ax.plot([xmin, xmax], [y, y], color=\"\", ls=ls, lw=, label=to_plot_label)if plot_trip_stats:assert (not plot_tdist_stats)line_tyles = [\"\", \"\", \"\"]to_plot_labels = [\"\", \"\", \"\"]to_plot_funcs = [self.min_trip_duration, self.max_trip_duration, self.mean_trip_duration]xmin, xmax = ax.get_xlim()for to_plot_label, to_plot_func, ls in zip(to_plot_labels, to_plot_funcs, line_tyles):y = to_plot_func() / duration_dividerif not numpy.math.isnan(y):ax.plot([xmin, xmax], [y, y], color=\"\", ls=ls, lw=)txt = to_plot_label + \"\" % yax.text(xmax + * (xmax - xmin), y, txt, color=\"\", va=\"\", ha=\"\")old_xmax = xmaxxmax += (xmax - xmin) * ymin, ymax = ax.get_ylim()ax.fill_between([old_xmax, xmax], ymin, ymax, color=\"\", alpha=)ax.set_xlim(xmin, xmax)vertical_lines, slopes = self.profile_block_analyzer.get_vlines_and_slopes_for_plotting()for i, line in enumerate(slopes):xs = [_ut_to_unloc_datetime(x) for x in line['']]if i is :label = u\"\"else:label = Noneax.plot(xs, numpy.array(line['']) / duration_divider, \"\", color=color, lw=lw, label=label)for line in vertical_lines:xs = [_ut_to_unloc_datetime(x) for x in line['']]ax.plot(xs, numpy.array(line['']) / duration_divider, \"\", color=color) assert (isinstance(ax, plt.Axes))if plot_journeys:xs = [_ut_to_unloc_datetime(x) for x in self.trip_departure_times]ys = self.trip_durationsax.plot(xs, numpy.array(ys) / duration_divider, \"\", color=\"\", ms=, label=\"\")if journey_letters is None:journey_letters = \"\"def cycle_journey_letters(journey_letters):saved = []for element in journey_letters:yield elementsaved.append(element)count = while saved:for element in saved:yield element + str(count)count += journey_letters_iterator = cycle_journey_letters(journey_letters)time_letters = {int(time): letter for letter, time in zip(journey_letters_iterator, self.trip_departure_times)}for x, y, letter in zip(xs, ys, journey_letters_iterator):walking = - self._walk_time_to_target / if numpy.isfinite(self._walk_time_to_target) else ax.text(x + datetime.timedelta(seconds=(self.end_time_dep - self.start_time_dep) / ),(y + walking) / duration_divider, letter, va=\"\", ha=\"\")fill_between_x = []fill_between_y = []for line in slopes:xs = [_ut_to_unloc_datetime(x) for x in line['']]fill_between_x.extend(xs)fill_between_y.extend(numpy.array(line[\"\"]) / duration_divider)ax.fill_between(fill_between_x, y1=fill_between_y, color=fill_color, alpha=alpha, label=label)ax.set_ylim(bottom=)ax.set_ylim(ax.get_ylim()[], ax.get_ylim()[] * )if rcParams['']:ax.set_xlabel(r\"\")else:ax.set_xlabel(\"\")ax.set_ylabel(r\"\")if plot_journeys and return_letters:return ax, time_letterselse:return ax", "docstring": "Parameters\n----------\ntimezone: str\ncolor: color\nformat_string: str, None\n if None, the original values are used\nplot_journeys: bool, optional\n if True, small dots are plotted at the departure times", "id": "f12899:c0:m15"} {"signature": "def _temporal_distance_pdf(self):", "body": "temporal_distance_split_points_ordered, norm_cdf = self.profile_block_analyzer._temporal_distance_cdf()delta_peak_loc_to_probability_mass = {}non_delta_peak_split_points = [temporal_distance_split_points_ordered[]]non_delta_peak_densities = []for i in range(, len(temporal_distance_split_points_ordered) - ):left = temporal_distance_split_points_ordered[i]right = temporal_distance_split_points_ordered[i + ]width = right - leftprob_mass = norm_cdf[i + ] - norm_cdf[i]if width == :delta_peak_loc_to_probability_mass[left] = prob_masselse:non_delta_peak_split_points.append(right)non_delta_peak_densities.append(prob_mass / float(width))assert (len(non_delta_peak_densities) == len(non_delta_peak_split_points) - )return numpy.array(non_delta_peak_split_points),numpy.array(non_delta_peak_densities),delta_peak_loc_to_probability_mass", "docstring": "Temporal distance probability density function.\n\nReturns\n-------\nnon_delta_peak_split_points: numpy.array\nnon_delta_peak_densities: numpy.array\n len(density) == len(temporal_distance_split_points_ordered) -1\ndelta_peak_loc_to_probability_mass : dict", "id": "f12899:c0:m16"} {"signature": "def __init__(self,transit_events,target_stop,start_time=None,end_time=None,transfer_margin=,walk_network=None,walk_speed=,verbose=False):", "body": "AbstractRoutingAlgorithm.__init__(self)self._target = target_stopself._transit_connections = transit_eventsif start_time is None:start_time = transit_events[-].departure_timeif end_time is None:end_time = transit_events[].departure_timeself._start_time = start_timeself._end_time = end_timeself._transfer_margin = transfer_marginif walk_network is None:walk_network = networkx.Graph()self._walk_network = walk_networkself._walk_speed = float(walk_speed)self._verbose = verboseself.__trip_min_arrival_time = defaultdict(lambda: float(\"\"))self._stop_profiles = defaultdict(lambda: NodeProfileC())self._stop_profiles[self._target] = NodeProfileC()if target_stop in walk_network.nodes():for target_neighbor in walk_network.neighbors(target_stop):edge_data = walk_network.get_edge_data(target_neighbor, target_stop)walk_duration = edge_data[\"\"] / self._walk_speedself._stop_profiles[target_neighbor] = NodeProfileC(walk_duration)pseudo_connection_set = compute_pseudo_connections(transit_events, self._start_time, self._end_time,self._transfer_margin, self._walk_network,self._walk_speed)self._pseudo_connections = list(pseudo_connection_set)self._all_connections = self._pseudo_connections + self._transit_connectionsself._all_connections.sort(key=lambda connection: -connection.departure_time)", "docstring": "Parameters\n----------\ntransit_events: list[Connection]\n events are assumed to be ordered in DECREASING departure_time (!)\ntarget_stop: int\n index of the target stop\nstart_time : int, optional\n start time in unixtime seconds\nend_time: int, optional\n end time in unixtime seconds (no connections will be scanned after this time)\ntransfer_margin: int, optional\n required extra margin required for transfers in seconds\nwalk_speed: float, optional\n walking speed between stops in meters / second.\nwalk_network: networkx.Graph, optional\n each edge should have the walking distance as a data attribute (\"distance_shape\") expressed in meters\nverbose: boolean, optional\n whether to print out progress", "id": "f12900:c0:m0"} {"signature": "@propertydef stop_profiles(self):", "body": "assert self._has_runreturn self._stop_profiles", "docstring": "Returns\n-------\n_stop_profiles : dict[int, NodeProfileSimple]\n The pareto tuples necessary.", "id": "f12900:c0:m2"} {"signature": "def __init__(self, labels, start_time_dep, end_time_dep, walk_duration=float(''), label_props_to_consider=None, **kwargs):", "body": "for label in labels:assert (hasattr(label, \"\"))assert (hasattr(label, \"\"))self.start_time_dep = start_time_depself.end_time_dep = end_time_depself.walk_duration = walk_durationif label_props_to_consider is None:self.label_props = []else:self.label_props = label_props_to_considerself._fastest_path_labels = self._compute_fastest_path_labels(labels)for label in self._fastest_path_labels:for prop in self.label_props:assert (hasattr(label, prop))self.kwargs = kwargs", "docstring": "Parameters\n----------\nlabels: list\n List of labels (each label should at least have attributes \"departure_time\" and \"arrival_time\")\nwalk_duration: float\n What is the maximum duration for a journey to be considered.\nlabel_props_to_consider: list", "id": "f12901:c0:m0"} {"signature": "def get_time_analyzer(self):", "body": "return NodeProfileAnalyzerTime(self._fastest_path_labels,self.walk_duration,self.start_time_dep,self.end_time_dep)", "docstring": "Returns\n-------\nNodeProfileAnalyzerTime", "id": "f12901:c0:m5"} {"signature": "def get_prop_analyzer_flat(self, property, value_no_next_journey, value_cutoff):", "body": "kwargs = self.kwargsfp_blocks = self.get_fastest_path_temporal_distance_blocks()prop_blocks = []for b in fp_blocks:if b.is_flat():if b.distance_end == self.walk_duration and b.distance_end != float(''):prop_value = value_cutoffelse:prop_value = value_no_next_journeyelse:prop_value = b[property]prop_block = ProfileBlock(b.start_time, b.end_time, prop_value, prop_value)prop_blocks.append(prop_block)return ProfileBlockAnalyzer(prop_blocks, **kwargs)", "docstring": "Get a journey property analyzer, where each journey is weighted by the number of.\n\nParameters\n----------\nproperty: string\n Name of the property, needs to be one of label_props given on initialization.\nvalue_no_next_journey:\n Value of the profile, when there is no next journey available.\nvalue_cutoff: number\n default value of the property when cutoff is applied\n\nReturns\n-------\nProfileBlockAnalyzer", "id": "f12901:c0:m9"} {"signature": "def compute_pseudo_connections(transit_connections, start_time_dep,end_time_dep, transfer_margin,walk_network, walk_speed):", "body": "pseudo_connection_set = set() for c in transit_connections:if start_time_dep <= c.departure_time <= end_time_dep:walk_arr_stop = c.departure_stopwalk_arr_time = c.departure_time - transfer_marginfor _, walk_dep_stop, data in walk_network.edges(nbunch=[walk_arr_stop], data=True):walk_dep_time = walk_arr_time - data[''] / float(walk_speed)if walk_dep_time > end_time_dep or walk_dep_time < start_time_dep:continuepseudo_connection = Connection(walk_dep_stop,walk_arr_stop,walk_dep_time,walk_arr_time,Connection.WALK_TRIP_ID,Connection.WALK_SEQ,is_walk=True)pseudo_connection_set.add(pseudo_connection)return pseudo_connection_set", "docstring": "Given a set of transit events and the static walk network,\n\"transform\" the static walking network into a set of \"pseudo-connections\".\n\nAs a first approximation, we add pseudo-connections to depart after each arrival of a transit connection\nto it's arrival stop.\n\nParameters\n----------\ntransit_connections: list[Connection]\nstart_time_dep : int\n start time in unixtime seconds\nend_time_dep: int\n end time in unixtime seconds (no new connections will be scanned after this time)\ntransfer_margin: int\n required extra margin required for transfers in seconds\nwalk_speed: float\n walking speed between stops in meters / second\nwalk_network: networkx.Graph\n each edge should have the walking distance as a data attribute (\"d_walk\") expressed in meters\n\nReturns\n-------\npseudo_connections: set[Connection]", "id": "f12902:m0"} {"signature": "def add_walk_distances_to_db_python(gtfs, osm_path, cutoff_distance_m=):", "body": "if isinstance(gtfs, str):gtfs = GTFS(gtfs)assert (isinstance(gtfs, GTFS))print(\"\")walk_network = create_walk_network_from_osm(osm_path)print(\"\")stop_I_to_nearest_osm_node, stop_I_to_nearest_osm_node_distance = match_stops_to_nodes(gtfs, walk_network)transfers = gtfs.get_straight_line_transfer_distances()from_I_to_to_stop_Is = {stop_I: set() for stop_I in stop_I_to_nearest_osm_node}for transfer_tuple in transfers.itertuples():from_I = transfer_tuple.from_stop_Ito_I = transfer_tuple.to_stop_Ifrom_I_to_to_stop_Is[from_I].add(to_I)print(\"\")for from_I, to_stop_Is in from_I_to_to_stop_Is.items():from_node = stop_I_to_nearest_osm_node[from_I]from_dist = stop_I_to_nearest_osm_node_distance[from_I]shortest_paths = networkx.single_source_dijkstra_path_length(walk_network,from_node,cutoff=cutoff_distance_m - from_dist,weight=\"\")for to_I in to_stop_Is:to_distance = stop_I_to_nearest_osm_node_distance[to_I]to_node = stop_I_to_nearest_osm_node[to_I]osm_distance = shortest_paths.get(to_node, float(''))total_distance = from_dist + osm_distance + to_distancefrom_stop_I_transfers = transfers[transfers[''] == from_I]straigth_distance = from_stop_I_transfers[from_stop_I_transfers[\"\"] == to_I][\"\"].values[]assert (straigth_distance < total_distance + ) if total_distance <= cutoff_distance_m:gtfs.conn.execute(\"\"\"\" + str(int(total_distance)) +\"\" + str(from_I) + \"\" + str(to_I))gtfs.conn.commit()", "docstring": "Computes the walk paths between stops, and updates these to the gtfs database.\n\nParameters\n----------\ngtfs: gtfspy.GTFS or str\n A GTFS object or a string representation.\nosm_path: str\n path to the OpenStreetMap file\ncutoff_distance_m: number\n maximum allowed distance in meters\n\nReturns\n-------\nNone\n\nSee Also\n--------\ngtfspy.calc_transfers\ncompute_walk_paths_java", "id": "f12903:m0"} {"signature": "def match_stops_to_nodes(gtfs, walk_network):", "body": "network_nodes = walk_network.nodes(data=\"\")stop_Is = set(gtfs.get_straight_line_transfer_distances()[''])stops_df = gtfs.stops()geo_index = GeoGridIndex(precision=)for net_node, data in network_nodes:geo_index.add_point(GeoPoint(data[''], data[''], ref=net_node))stop_I_to_node = {}stop_I_to_dist = {}for stop_I in stop_Is:stop_lat = float(stops_df[stops_df.stop_I == stop_I].lat)stop_lon = float(stops_df[stops_df.stop_I == stop_I].lon)geo_point = GeoPoint(stop_lat, stop_lon)min_dist = float('')min_dist_node = Nonesearch_distances_m = [, ]for search_distance_m in search_distances_m:for point, distance in geo_index.get_nearest_points(geo_point, search_distance_m, \"\"):if distance < min_dist:min_dist = distance * min_dist_node = point.refif min_dist_node is not None:breakif min_dist_node is None:warn(\"\" + str(stops_df[stops_df.stop_I == stop_I]))stop_I_to_node[stop_I] = min_dist_nodestop_I_to_dist[stop_I] = min_distreturn stop_I_to_node, stop_I_to_dist", "docstring": "Parameters\n----------\ngtfs : a GTFS object\nwalk_network : networkx.Graph\n\nReturns\n-------\nstop_I_to_node: dict\n maps stop_I to closest walk_network node\nstop_I_to_dist: dict\n maps stop_I to the distance to the closest walk_network node", "id": "f12903:m1"} {"signature": "def compute_walk_paths_java(gtfs_db_path, osm_file, cache_db=None):", "body": "raise NotImplementedError(\"\")", "docstring": "Parameters\n----------\ngtfs_db_path: str (path to the gtfs database)\nosm_file: str\ncache_db: str\n\nReturns\n-------\nNone", "id": "f12903:m3"} {"signature": "@classmethoddef setUpClass(cls):", "body": "cls.gtfs_source_dir = os.path.join(os.path.dirname(__file__), \"\")cls.G = GTFS.from_directory_as_inmemory_db(cls.gtfs_source_dir)", "docstring": "This method is run once before executing any tests", "id": "f12904:c0:m0"} {"signature": "def setUp(self):", "body": "self.gtfs_source_dir = self.__class__.gtfs_source_dirself.gtfs = self.__class__.Gself.extract_output_dir = os.path.join(self.gtfs_source_dir, \"\", \"\")if not os.path.exists(self.extract_output_dir):makedirs(self.extract_output_dir)", "docstring": "This method is run once before _each_ test method is executed", "id": "f12904:c0:m1"} {"signature": "@classmethoddef setUpClass(cls):", "body": "cls.gtfs_source_dir = os.path.join(os.path.dirname(__file__), \"\")cls.G = GTFS.from_directory_as_inmemory_db(cls.gtfs_source_dir)", "docstring": "This method is run once before executing any tests", "id": "f12905:c0:m0"} {"signature": "def setUp(self):", "body": "self.gtfs = GTFS.from_directory_as_inmemory_db(self.gtfs_source_dir)", "docstring": "This method is run once before _each_ test method is executed", "id": "f12905:c0:m1"} {"signature": "@classmethoddef setup_class(cls):", "body": "pass", "docstring": "This method is run once for each class before any tests are run", "id": "f12907:c0:m0"} {"signature": "@classmethoddef teardown_class(cls):", "body": "pass", "docstring": "This method is run once for each class _after_ all tests are run", "id": "f12907:c0:m1"} {"signature": "def setUp(self):", "body": "self.conn = sqlite3.connect('')self.agencyText =''''self.stopsText =''''''''''''''''''''self.calendarText =''''''''''self.calendarDatesText =''''''''''self.tripText =\"\"\"\"\"\"\"\"\"\"self.routesText =\"\"\"\"\"\"\"\"self.shapeText =\"\"\"\"\"\"\"\"\"\"self.stopTimesText =\"\"\"\"\"\"\"\"\"\"self.frequenciesText =\"\"\"\"self.transfersText =\"\"\"\"\"\"\"\"\"\"self.feedInfoText =\"\"\"\"self.fdict = {'': self.agencyText,'': self.stopsText,'': self.calendarText,'': self.calendarDatesText,'': self.tripText,'': self.routesText,'': self.shapeText,'': self.stopTimesText,'': self.frequenciesText,'': self.transfersText,'': self.feedInfoText}self.orig_row_factory = self.conn.row_factory", "docstring": "This method is run once before _each_ test method is executed", "id": "f12907:c0:m3"} {"signature": "def printTable(self, table_name):", "body": "prev_row_factory = self.setRowConn()print(\"\")print(\"\" + table_name)print(\"\")cur = self.conn.execute(\"\" % table_name)names = [d[] for d in cur.description]for name in names:print(name + '', end=\"\")print(\"\")for row in cur:print(row)self.conn.row_factory = prev_row_factory", "docstring": "Pretty prints a table with name table_name.\n\nParameters\n----------\ntable_name : str\n name of the table", "id": "f12907:c0:m6"} {"signature": "def tearDown(self):", "body": "pass", "docstring": "This method is run once after _each_ test method is executed", "id": "f12907:c0:m7"} {"signature": "@classmethoddef setUpClass(cls):", "body": "cls.gtfs_source_dir = os.path.join(os.path.dirname(__file__), \"\")cls.G = GTFS.from_directory_as_inmemory_db(cls.gtfs_source_dir)", "docstring": "This method is run once before executing any tests", "id": "f12909:c0:m0"} {"signature": "def setUp(self):", "body": "self.gtfs = GTFS.from_directory_as_inmemory_db(self.gtfs_source_dir)", "docstring": "This method is run once before _each_ test method is executed", "id": "f12909:c0:m1"} {"signature": "@classmethoddef setUpClass(cls):", "body": "cls.gtfs_source_dir = os.path.join(os.path.dirname(__file__), \"\")cls.G = GTFS.from_directory_as_inmemory_db(cls.gtfs_source_dir)", "docstring": "This method is run once before executing any tests", "id": "f12911:c0:m0"} {"signature": "def setUp(self):", "body": "self.gtfs_source_dir = self.__class__.gtfs_source_dirself.gtfs = self.__class__.G", "docstring": "This method is run once before _each_ test method is executed", "id": "f12911:c0:m1"} {"signature": "def tearDown(self):", "body": "pass", "docstring": "This method is run once after _each_ test method is executed", "id": "f12911:c0:m2"} {"signature": "def remove_all_trips_fully_outside_buffer(db_conn, center_lat, center_lon, buffer_km, update_secondary_data=True):", "body": "distance_function_str = add_wgs84_distance_function_to_db(db_conn)stops_within_buffer_query_sql = \"\" + distance_function_str +\"\".format(lat=float(center_lat), lon=float(center_lon), d_m=int(*buffer_km))select_all_trip_Is_where_stop_I_is_within_buffer_sql = \"\" + stops_within_buffer_query_sql + \"\"trip_Is_to_remove_sql = \"\" + select_all_trip_Is_where_stop_I_is_within_buffer_sql + \"\"trip_Is_to_remove = pandas.read_sql(trip_Is_to_remove_sql, db_conn)[\"\"].valuestrip_Is_to_remove_string = \"\".join([str(trip_I) for trip_I in trip_Is_to_remove])remove_all_trips_fully_outside_buffer_sql = \"\" + trip_Is_to_remove_string + \"\"remove_all_stop_times_where_trip_I_fully_outside_buffer_sql = \"\" + trip_Is_to_remove_string + \"\"db_conn.execute(remove_all_trips_fully_outside_buffer_sql)db_conn.execute(remove_all_stop_times_where_trip_I_fully_outside_buffer_sql)delete_stops_not_in_stop_times_and_not_as_parent_stop(db_conn)db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)db_conn.execute(DELETE_DAYS_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)db_conn.execute(DELETE_DAY_TRIPS2_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)db_conn.execute(DELETE_CALENDAR_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)db_conn.execute(DELETE_CALENDAR_DATES_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)db_conn.execute(DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS)db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)if update_secondary_data:update_secondary_data_copies(db_conn)", "docstring": "Not used in the regular filter process for the time being.\n\nParameters\n----------\ndb_conn: sqlite3.Connection\n connection to the GTFS object\ncenter_lat: float\ncenter_lon: float\nbuffer_km: float", "id": "f12919:m2"} {"signature": "def remove_dangling_shapes(db_conn):", "body": "db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL =\"\"trip_min_max_shape_seqs= pandas.read_sql(SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL, db_conn)rows = []for row in trip_min_max_shape_seqs.itertuples():shape_id, min_shape_break, max_shape_break = row.shape_id, row.min_shape_break, row.max_shape_breakif min_shape_break is None or max_shape_break is None:min_shape_break = float('')max_shape_break = float('')rows.append( (shape_id, min_shape_break, max_shape_break) )DELETE_SQL_BASE = \"\"db_conn.executemany(DELETE_SQL_BASE, rows)remove_dangling_shapes_references(db_conn)", "docstring": "Remove dangling entries from the shapes directory.\n\nParameters\n----------\ndb_conn: sqlite3.Connection\n connection to the GTFS object", "id": "f12919:m3"} {"signature": "def __init__(self,G,copy_db_path,buffer_distance_km=None,buffer_lat=None,buffer_lon=None,update_metadata=True,start_date=None,end_date=None,agency_ids_to_preserve=None,agency_distance=None):", "body": "if start_date and end_date:if isinstance(start_date, (datetime.datetime, datetime.date)):self.start_date = start_date.strftime(\"\")else:self.start_date = start_dateif isinstance(end_date, (datetime.datetime, datetime.date)):end_date_dt = end_dateself.end_date = end_date.strftime(\"\")else:self.end_date = end_dateend_date_dt = datetime.datetime.strptime(self.end_date, \"\")end_date_to_include = end_date_dt - datetime.timedelta(days=)self.end_date_to_include_str = end_date_to_include.strftime(\"\")else:self.start_date = Noneself.end_date = Noneself.copy_db_conn = Noneself.copy_db_path = copy_db_pathself.agency_ids_to_preserve = agency_ids_to_preserveself.gtfs = Gself.buffer_lat = buffer_latself.buffer_lon = buffer_lonself.buffer_distance_km = buffer_distance_kmself.update_metadata = update_metadataif agency_distance is not None:raise NotImplementedErrorself.this_db_path = self.gtfs.get_main_database_path()assert os.path.exists(self.this_db_path), \"\"assert os.path.exists(os.path.dirname(os.path.abspath(copy_db_path))),\"\"assert not os.path.exists(copy_db_path), \"\" % copy_db_path", "docstring": "Copy a database, and then based on various filters.\nOnly method `create_filtered_copy` is provided as we do not want to take the risk of\nlosing the data stored in the original database.\n\nG: gtfspy.gtfs.GTFS\n the original database\ncopy_db_path : str\n path to another database database\nupdate_metadata : boolean, optional\n whether to update metadata of the feed, defaulting to true\n (this option is mainly available for testing purposes)\nstart_date : str, or datetime.datetime\n filter out all data taking place before end_date (the start_time_ut of the end date)\n Date format \"YYYY-MM-DD\"\n (end_date_ut is not included after filtering)\nend_date : str, or datetime.datetime\n Filter out all data taking place after end_date\n The end_date is not included after filtering.\nagency_ids_to_preserve : iterable\n List of agency_ids to retain (str) (e.g. 'HSL' for Helsinki)\n Only routes by the listed agencies are then considered\nagency_distance : float\n Only evaluated in combination with agency filter.\n Distance (in km) to the other near-by stops that should be included in addition to\n the ones defined by the agencies.\n All vehicle trips going through at least two such stops would then be included in the\n export. Note that this should not be a recursive thing.\n Or should it be? :)\nbuffer_lat : float\n Latitude of the buffer zone center\nbuffer_lon : float\n Longitude of the buffer zone center\nbuffer_distance : float\n Distance from the buffer zone center (in kilometers)\n\nReturns\n-------\nNone", "id": "f12919:c0:m0"} {"signature": "def _delete_rows_by_start_and_end_date(self):", "body": "if (self.start_date is not None) and (self.end_date is not None):start_date_ut = self.gtfs.get_day_start_ut(self.start_date)end_date_ut = self.gtfs.get_day_start_ut(self.end_date)if self.copy_db_conn.execute(\"\"\"\").fetchone() != (,):raise ValueError(\"\"\"\")logging.info(\"\")table_to_preserve_map = {\"\": \"\"\"\"\"\",\"\": \"\"\"\"\"\",\"\": '''''',\"\": \"\"\"\"\"\"}table_to_remove_map = {key: \"\" + to_preserve + \"\"for key, to_preserve in table_to_preserve_map.items() }GTFS(self.copy_db_conn).set_current_process_time_zone()for table, query_template in table_to_remove_map.items():param_dict = {\"\": str(start_date_ut),\"\": str(end_date_ut)}query = \"\" + table + \"\" +query_template.format(**param_dict)self.copy_db_conn.execute(query)self.copy_db_conn.commit()return FILTEREDelse:return NOT_FILTERED", "docstring": "Removes rows from the sqlite database copy that are out of the time span defined by start_date and end_date\n:param gtfs: GTFS object\n:param copy_db_conn: sqlite database connection\n:param start_date:\n:param end_date:\n:return:", "id": "f12919:c0:m2"} {"signature": "def _filter_by_calendar(self):", "body": "if (self.start_date is not None) and (self.end_date is not None):logging.info(\"\")start_date_query = \"\"\"\"\"\".format(start_date=self.start_date)self.copy_db_conn.execute(start_date_query)end_date_query = \"\"\"\"\"\".format(end_date_to_include=self.end_date_to_include_str)self.copy_db_conn.execute(end_date_query)self.copy_db_conn.execute(DELETE_TRIPS_NOT_IN_DAYS_SQL)self.copy_db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)self.copy_db_conn.execute(DELETE_STOP_TIMES_NOT_REFERENCED_IN_TRIPS_SQL)delete_stops_not_in_stop_times_and_not_as_parent_stop(self.copy_db_conn)self.copy_db_conn.execute(DELETE_STOP_DISTANCE_ENTRIES_WITH_NONEXISTENT_STOPS_SQL)self.copy_db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)self.copy_db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)self.copy_db_conn.commit()return FILTEREDelse:return NOT_FILTERED", "docstring": "update calendar table's services\n:param copy_db_conn:\n:param start_date:\n:param end_date:\n:return:", "id": "f12919:c0:m4"} {"signature": "def _filter_by_agency(self):", "body": "if self.agency_ids_to_preserve is not None:logging.info(\"\")agency_ids_to_preserve = list(self.agency_ids_to_preserve)agencies = pandas.read_sql(\"\", self.copy_db_conn)agencies_to_remove = []for idx, row in agencies.iterrows():if row[''] not in agency_ids_to_preserve:agencies_to_remove.append(row[''])for agency_id in agencies_to_remove:self.copy_db_conn.execute('', (agency_id,))self.copy_db_conn.execute('''')self.copy_db_conn.execute('''')self.copy_db_conn.execute('''')self.copy_db_conn.execute('''')self.copy_db_conn.execute('''')self.copy_db_conn.execute('''')self.copy_db_conn.execute('''')self.copy_db_conn.execute('''')self.copy_db_conn.execute('''')self.copy_db_conn.commit()return FILTEREDelse:return NOT_FILTERED", "docstring": "filter by agency ids\n:param copy_db_conn:\n:param agency_ids_to_preserve:\n:return:", "id": "f12919:c0:m5"} {"signature": "def _filter_spatially(self):", "body": "if self.buffer_lat is None or self.buffer_lon is None or self.buffer_distance_km is None:return NOT_FILTEREDprint(\"\" + str(self.buffer_lat) +\"\" + str(self.buffer_lon) +\"\" + str(self.buffer_distance_km))remove_all_trips_fully_outside_buffer(self.copy_db_conn,self.buffer_lat,self.buffer_lon,self.buffer_distance_km,update_secondary_data=False)logging.info(\"\")find_distance_func_name = add_wgs84_distance_function_to_db(self.copy_db_conn)assert find_distance_func_name == \"\"stop_distance_filter_sql_base = (\"\" +\"\" +\"\")stops_within_buffer_sql = stop_distance_filter_sql_base.format(buffer_lat=float(self.buffer_lat),buffer_lon=float(self.buffer_lon),buffer_distance_meters=int(self.buffer_distance_km * ))stops_within_buffer = set(row[] for row in self.copy_db_conn.execute(stops_within_buffer_sql))stops_within_buffer_string = \"\" +\"\".join(str(stop_I) for stop_I in stops_within_buffer) + \"\"trip_min_max_include_seq_sql = ('''''''').format(stop_I_list=stops_within_buffer_string)trip_I_min_seq_max_seq_df = pandas.read_sql(trip_min_max_include_seq_sql, self.copy_db_conn)for trip_I_seq_row in trip_I_min_seq_max_seq_df.itertuples():trip_I = trip_I_seq_row.trip_Imin_seq = trip_I_seq_row.min_seqmax_seq = trip_I_seq_row.max_seqif min_seq == max_seq:self.copy_db_conn.execute(\"\".format(trip_I=trip_I))self.copy_db_conn.execute(\"\".format(trip_I=trip_I))else:DELETE_STOP_TIME_ENTRIES_SQL =\"\".format(trip_I=trip_I, max_seq=max_seq, min_seq=min_seq)self.copy_db_conn.execute(DELETE_STOP_TIME_ENTRIES_SQL)STOPS_NOT_WITHIN_BUFFER__FOR_TRIP_SQL =\"\".format(stops_within_hard_buffer=stops_within_buffer_string, trip_I=trip_I)stop_times_within_buffer_df = pandas.read_sql(STOPS_NOT_WITHIN_BUFFER__FOR_TRIP_SQL, self.copy_db_conn)if stop_times_within_buffer_df[''].all():continueelse:_split_trip(self.copy_db_conn, trip_I, stop_times_within_buffer_df)SHAPE_IDS_NOT_WITHIN_BUFFER_SQL =\"\"\"\".format(buffer_lat=self.buffer_lat,buffer_lon=self.buffer_lon,buffer_distance_meters=self.buffer_distance_km * )DELETE_ALL_SHAPE_IDS_NOT_WITHIN_BUFFER_SQL = \"\"+ SHAPE_IDS_NOT_WITHIN_BUFFER_SQL + \"\"self.copy_db_conn.execute(DELETE_ALL_SHAPE_IDS_NOT_WITHIN_BUFFER_SQL)SET_SHAPE_ID_TO_NULL_FOR_HARD_BUFFER_FILTERED_SHAPE_IDS =\"\" + SHAPE_IDS_NOT_WITHIN_BUFFER_SQL + \"\"self.copy_db_conn.execute(SET_SHAPE_ID_TO_NULL_FOR_HARD_BUFFER_FILTERED_SHAPE_IDS)self.copy_db_conn.execute('''''''''')self.copy_db_conn.execute('''''''''''''')delete_stops_not_in_stop_times_and_not_as_parent_stop(self.copy_db_conn)self.copy_db_conn.execute(DELETE_TRIPS_NOT_REFERENCED_IN_STOP_TIMES)self.copy_db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)self.copy_db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)self.copy_db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)self.copy_db_conn.execute(DELETE_STOP_DISTANCE_ENTRIES_WITH_NONEXISTENT_STOPS_SQL)self.copy_db_conn.execute(DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS)remove_dangling_shapes(self.copy_db_conn)self.copy_db_conn.commit()return FILTERED", "docstring": "Filter the feed based on self.buffer_distance_km from self.buffer_lon and self.buffer_lat.\n\n1. First include all stops that are within self.buffer_distance_km from self.buffer_lon and self.buffer_lat.\n2. Then include all intermediate stops that are between any of the included stop pairs with some PT trip.\n3. Repeat step 2 until no more stops are to be included.\n\nAs a summary this process should get rid of PT network tendrils, but should preserve the PT network intact\nat its core.", "id": "f12919:c0:m6"} {"signature": "def get_spatial_bounds(gtfs, as_dict=False):", "body": "stats = get_stats(gtfs)lon_min = stats['']lon_max = stats['']lat_min = stats['']lat_max = stats['']if as_dict:return {'': lon_min, '': lon_max, '': lat_min, '': lat_max}else:return lon_min, lon_max, lat_min, lat_max", "docstring": "Parameters\n----------\ngtfs\n\nReturns\n-------\nmin_lon: float\nmax_lon: float\nmin_lat: float\nmax_lat: float", "id": "f12921:m0"} {"signature": "def get_median_lat_lon_of_stops(gtfs):", "body": "stops = gtfs.get_table(\"\")median_lat = numpy.percentile(stops[''].values, )median_lon = numpy.percentile(stops[''].values, )return median_lat, median_lon", "docstring": "Get median latitude AND longitude of stops\n\nParameters\n----------\ngtfs: GTFS\n\nReturns\n-------\nmedian_lat : float\nmedian_lon : float", "id": "f12921:m2"} {"signature": "def get_centroid_of_stops(gtfs):", "body": "stops = gtfs.get_table(\"\")mean_lat = numpy.mean(stops[''].values)mean_lon = numpy.mean(stops[''].values)return mean_lat, mean_lon", "docstring": "Get mean latitude AND longitude of stops\n\nParameters\n----------\ngtfs: GTFS\n\nReturns\n-------\nmean_lat : float\nmean_lon : float", "id": "f12921:m3"} {"signature": "def write_stats_as_csv(gtfs, path_to_csv, re_write=False):", "body": "stats_dict = get_stats(gtfs)if re_write:os.remove(path_to_csv)", "docstring": "Writes data from get_stats to csv file\n\nParameters\n----------\ngtfs: GTFS\npath_to_csv: str\n filepath to the csv file to be generated\nre_write:\n insted of appending, create a new one.", "id": "f12921:m4"} {"signature": "def get_stats(gtfs):", "body": "stats = {}for table in ['', '', '', '', '', '', '', '','', '', '', '', '']:stats[\"\" + table] = gtfs.get_row_count(table)agencies = gtfs.get_table(\"\")stats[\"\"] = \"\".join(agencies[''].values)stops = gtfs.get_table(\"\")lats = stops[''].valueslons = stops[''].valuespercentiles = [, , , , ]try:lat_percentiles = numpy.percentile(lats, percentiles)except IndexError:lat_percentiles = [None] * lat_min, lat_10, lat_median, lat_90, lat_max = lat_percentilesstats[\"\"] = lat_minstats[\"\"] = lat_10stats[\"\"] = lat_medianstats[\"\"] = lat_90stats[\"\"] = lat_maxtry:lon_percentiles = numpy.percentile(lons, percentiles)except IndexError:lon_percentiles = [None] * lon_min, lon_10, lon_median, lon_90, lon_max = lon_percentilesstats[\"\"] = lon_minstats[\"\"] = lon_10stats[\"\"] = lon_medianstats[\"\"] = lon_90stats[\"\"] = lon_maxif len(lats) > :stats[\"\"] = wgs84_distance(lat_min, lon_median, lat_max, lon_median) / stats[\"\"] = wgs84_distance(lon_min, lat_median, lon_max, lat_median) / else:stats[\"\"] = Nonestats[\"\"] = Nonefirst_day_start_ut, last_day_start_ut = gtfs.get_day_start_ut_span()stats[\"\"] = first_day_start_utif last_day_start_ut is None:stats[\"\"] = Noneelse:stats[\"\"] = last_day_start_ut + * stats[\"\"] = gtfs.get_min_date()stats[\"\"] = gtfs.get_max_date()max_activity_date = gtfs.execute_custom_query('''''''''').fetchone()if max_activity_date:stats[\"\"] = max_activity_date[]max_activity_hour = gtfs.get_cursor().execute('''''', (stats[\"\"],)).fetchone()if max_activity_hour:stats[\"\"] = max_activity_hour[]else:stats[\"\"] = Noneif max_activity_date and max_activity_hour:fleet_size_estimates = _fleet_size_estimate(gtfs, stats[''], stats[''])stats.update(fleet_size_estimates)stats[''] = _distribution(gtfs, '', '')stats[''] = _distribution(gtfs, '', '')stats[''] = _distribution(gtfs, '', '')stats[''] = _distribution(gtfs, '', '')stats[''] = _distribution(gtfs, '', '')stats[''] = _distribution(gtfs, '', '')stats = _feed_calendar_span(gtfs, stats)return stats", "docstring": "Get basic statistics of the GTFS data.\n\nParameters\n----------\ngtfs: GTFS\n\nReturns\n-------\nstats: dict\n A dictionary of various statistics.\n Keys should be strings, values should be inputtable to a database (int, date, str, ...)\n (but not a list)", "id": "f12921:m5"} {"signature": "def _distribution(gtfs, table, column):", "body": "cur = gtfs.conn.cursor()cur.execute(''''''.format(column=column, table=table))return ''.join('' % (t, c) for t, c in cur)", "docstring": "Count occurrences of values AND return it as a string.\n\n Example return value: '1:5 2:15", "id": "f12921:m6"} {"signature": "def _fleet_size_estimate(gtfs, hour, date):", "body": "results = {}fleet_size_list = []cur = gtfs.conn.cursor()rows = cur.execute('''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''', (hour, hour, date, date))for row in rows:fleet_size_list.append(str(row[]) + '' + str(row[]))results[''] = \"\".join(fleet_size_list)fleet_size_list = []fleet_size_dict = {}if hour:for minute in range(hour * , (hour + ) * , ):rows = gtfs.conn.cursor().execute('''''''''''''''',(minute, minute, date))for row in rows:if fleet_size_dict.get(row[], ) < row[]:fleet_size_dict[row[]] = row[]for key in fleet_size_dict.keys():fleet_size_list.append(str(key) + '' + str(fleet_size_dict[key]))results[\"\"] = ''.join(fleet_size_list)return results", "docstring": "Calculates fleet size estimates by two separate formula:\n 1. Considering all routes separately with no interlining and doing a deficit calculation at every terminal\n 2. By looking at the maximum number of vehicles in simultaneous movement\n\nParameters\n----------\ngtfs: GTFS\nhour: int\ndate: ?\n\nReturns\n-------\nresults: dict\n a dict with keys:\n fleet_size_route_based\n fleet_size_max_movement", "id": "f12921:m7"} {"signature": "def _feed_calendar_span(gtfs, stats):", "body": "n_feeds = _n_gtfs_sources(gtfs)[]max_start = Nonemin_end = Noneif n_feeds > :for i in range(n_feeds):feed_key = \"\" + str(i) + \"\"start_key = feed_key + \"\"end_key = feed_key + \"\"calendar_span = gtfs.conn.cursor().execute('''', (feed_key + '',)).fetchone()stats[start_key] = calendar_span[]stats[end_key] = calendar_span[]if calendar_span[] is not None and calendar_span[] is not None:if not max_start and not min_end:max_start = calendar_span[]min_end = calendar_span[]else:if gtfs.get_day_start_ut(calendar_span[]) > gtfs.get_day_start_ut(max_start):max_start = calendar_span[]if gtfs.get_day_start_ut(calendar_span[]) < gtfs.get_day_start_ut(min_end):min_end = calendar_span[]stats[\"\"] = max_startstats[\"\"] = min_endelse:stats[\"\"] = stats[\"\"]stats[\"\"] = stats[\"\"]return stats", "docstring": "Computes the temporal coverage of each source feed\n\nParameters\n----------\ngtfs: gtfspy.GTFS object\nstats: dict\n where to append the stats\n\nReturns\n-------\nstats: dict", "id": "f12921:m9"} {"signature": "def update_stats(gtfs):", "body": "stats = get_stats(gtfs)gtfs.update_stats(stats)", "docstring": "Computes stats AND stores them into the underlying gtfs object (i.e. database).\n\nParameters\n----------\ngtfs: GTFS", "id": "f12921:m10"} {"signature": "def trip_stats(gtfs, results_by_mode=False):", "body": "conn = gtfs.connconn.create_function(\"\", , wgs84_distance)cur = conn.cursor()query = ''''''''''''''''''''''''''''''''q_result = pd.read_sql_query(query, conn)q_result[''] = * q_result[''] / q_result['']q_result[''] = q_result[''] / q_result[''] = q_result[''] / q_result = q_result.loc[q_result[''] != float(\"\")]if results_by_mode:q_results = {}for type in q_result[''].unique().tolist():q_results[type] = q_result.loc[q_result[''] == type]return q_resultselse:return q_result", "docstring": "Parameters\n----------\ngtfs: GTFS\nresults_by_mode: bool\n\nReturns\n-------\nif results_by_mode is False:\n q_result: pandas.DataFrame\nif results_by_mode is True:\n q_results: dict\n a dict with the following keys:\n [ADD HERE]", "id": "f12921:m11"} {"signature": "def route_frequencies(gtfs, results_by_mode=False):", "body": "day = gtfs.get_suitable_date_for_daily_extract()query = (\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\".format(day=day))return pd.DataFrame(gtfs.execute_custom_query_pandas(query))", "docstring": "Return the frequency of all types of routes per day.\n\nParameters\n-----------\ngtfs: GTFS\n\nReturns\n-------\npandas.DataFrame with columns\n route_I, type, frequency", "id": "f12921:m13"} {"signature": "def hourly_frequencies(gtfs, st, et, route_type):", "body": "timeframe = et-sthours = timeframe/ day = gtfs.get_suitable_date_for_daily_extract()stops = gtfs.get_stops_for_route_type(route_type).T.drop_duplicates().Tquery = (\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\".format(h=hours, st=st, et=et, day=day))try:trips_frequency = gtfs.execute_custom_query_pandas(query).T.drop_duplicates().Tdf = pd.merge(stops[['', '', '']], trips_frequency[['', '']],on='', how='')return df.apply(pd.to_numeric)except:raise ValueError(\"\")", "docstring": "Return all the number of vehicles (i.e. busses,trams,etc) that pass hourly through a stop in a time frame.\n\nParameters\n----------\ngtfs: GTFS\nst : int\n start time of the time framein unix time\net : int\n end time of the time frame in unix time\nroute_type: int\n\nReturns\n-------\nnumeric pandas.DataFrame with columns\n stop_I, lat, lon, frequency", "id": "f12921:m14"} {"signature": "def get_vehicle_hours_by_type(gtfs, route_type):", "body": "day = gtfs.get_suitable_date_for_daily_extract()query = (\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\".format(day=day, route_type=route_type))df = gtfs.execute_custom_query_pandas(query)return df[''].item()", "docstring": "Return the sum of vehicle hours in a particular day by route type.", "id": "f12921:m17"} {"signature": "def trips_frequencies(gtfs):", "body": "query = (\"\"\"\"\"\"\"\"\"\")return(gtfs.execute_custom_query_pandas(query))", "docstring": "Get the frequency of trip_I in a particular day", "id": "f12921:m18"} {"signature": "def __init__(self, fname_or_conn):", "body": "if isinstance(fname_or_conn, string_types):if os.path.isfile(fname_or_conn):self.conn = sqlite3.connect(fname_or_conn)self.fname = fname_or_connself.conn.execute('')self.conn.execute('')else:raise FileNotFoundError(\"\" + fname_or_conn + \"\")elif isinstance(fname_or_conn, sqlite3.Connection):self.conn = fname_or_connself._dont_close = Trueelse:raise NotImplementedError(\"\" + str(type(fname_or_conn)) + \"\")assert self.conn.execute(\"\").fetchone() is not Noneself.meta = GTFSMetadata(self.conn)self.conn.create_function(\"\", , wgs84_distance)self._timezone = pytz.timezone(self.get_timezone_name())", "docstring": "Open a GTFS object\n\n Parameters\n ----------\n fname_or_conn: str | sqlite3.Connection\n path to the preprocessed gtfs database or a connection to a gtfs database", "id": "f12922:c0:m0"} {"signature": "@classmethoddef from_directory_as_inmemory_db(cls, gtfs_directory):", "body": "from gtfspy.import_gtfs import import_gtfsconn = sqlite3.connect(\"\")import_gtfs(gtfs_directory,conn,preserve_connection=True,print_progress=False)return cls(conn)", "docstring": "Instantiate a GTFS object by computing\n\nParameters\n----------\ngtfs_directory: str\n path to the directory for importing the database", "id": "f12922:c0:m2"} {"signature": "def get_main_database_path(self):", "body": "cur = self.conn.cursor()cur.execute(\"\")rows = cur.fetchall()for row in rows:if row[] == str(\"\"):return row[]", "docstring": "Should return the path to the database\n\nReturns\n-------\npath : unicode\n path to the database, empty string for in-memory databases", "id": "f12922:c0:m3"} {"signature": "def get_shape_distance_between_stops(self, trip_I, from_stop_seq, to_stop_seq):", "body": "query_template = \"\"stop_seqs = [from_stop_seq, to_stop_seq]shape_breaks = []for seq in stop_seqs:q = query_template.format(seq=seq, trip_I=trip_I)shape_breaks.append(self.conn.execute(q).fetchone())query_template = \"\"\"\"\"\"distance_query = query_template.format(trip_I=trip_I, from_stop_seq=from_stop_seq, to_stop_seq=to_stop_seq)return self.conn.execute(distance_query).fetchone()[]", "docstring": "Get the distance along a shape between stops\n\nParameters\n----------\ntrip_I : int\n trip_ID along which we travel\nfrom_stop_seq : int\n the sequence number of the 'origin' stop\nto_stop_seq : int\n the sequence number of the 'destination' stop\n\nReturns\n-------\ndistance : float, None\n If the shape calculation succeeded, return a float, otherwise return None\n (i.e. in the case where the shapes table is empty)", "id": "f12922:c0:m5"} {"signature": "def get_directly_accessible_stops_within_distance(self, stop, distance):", "body": "query = \"\"\"\"\"\" % (stop, distance)return pd.read_sql_query(query, self.conn)", "docstring": "Returns stops that are accessible without transfer from the stops that are within a specific walking distance\n:param stop: int\n:param distance: int\n:return:", "id": "f12922:c0:m8"} {"signature": "def get_cursor(self):", "body": "return self.conn.cursor()", "docstring": "Return a cursor to the underlying sqlite3 object", "id": "f12922:c0:m9"} {"signature": "def get_table(self, table_name):", "body": "return pd.read_sql(\"\" + table_name, self.conn)", "docstring": "Return a pandas.DataFrame object corresponding to the sql table\n\nParameters\n----------\ntable_name: str\n name of the table in the database\n\nReturns\n-------\ndf : pandas.DataFrame", "id": "f12922:c0:m10"} {"signature": "def get_row_count(self, table):", "body": "return self.conn.cursor().execute(\"\" + table).fetchone()[]", "docstring": "Get number of rows in a table", "id": "f12922:c0:m11"} {"signature": "def get_table_names(self):", "body": "return list(pd.read_sql(\"\", self.conn)[\"\"])", "docstring": "Return a list of the underlying tables in the database.\n\nReturns\n-------\ntable_names: list[str]", "id": "f12922:c0:m12"} {"signature": "def set_current_process_time_zone(self):", "body": "TZ = self.conn.execute('').fetchall()[][]return set_process_timezone(TZ)", "docstring": "This function queries a GTFS connection, finds the timezone of this\ndatabase, and sets it in the TZ environment variable. This is a\nprocess-global configuration, by the nature of the C library!\n\nReturns\n-------\nNone\n\nAlters os.environ['TZ']", "id": "f12922:c0:m13"} {"signature": "def get_timezone_name(self):", "body": "tz_name = self.conn.execute('').fetchone()if tz_name is None:raise ValueError(\"\")return tz_name[]", "docstring": "Get name of the GTFS timezone\n\nReturns\n-------\ntimezone_name : str\n name of the time zone, e.g. \"Europe/Helsinki\"", "id": "f12922:c0:m15"} {"signature": "def get_timezone_string(self, dt=None):", "body": "if dt is None:download_date = self.meta.get('')if download_date:dt = datetime.datetime.strptime(download_date, '')else:dt = datetime.datetime.today()loc_dt = self._timezone.localize(dt)timezone_string = loc_dt.strftime(\"\")return timezone_string", "docstring": "Return the timezone of the GTFS database object as a string.\nThe assumed time when the timezone (difference) is computed\nis the download date of the file.\nThis might not be optimal in all cases.\n\nSo this function should return values like:\n \"+0200\" or \"-1100\"\n\nParameters\n----------\ndt : datetime.datetime, optional\n The (unlocalized) date when the timezone should be computed.\n Defaults first to download_date, and then to the runtime date.\n\nReturns\n-------\ntimezone_string : str", "id": "f12922:c0:m16"} {"signature": "def unixtime_seconds_to_gtfs_datetime(self, unixtime):", "body": "return datetime.datetime.fromtimestamp(unixtime, self._timezone)", "docstring": "Convert unixtime to localized datetime\n\nParameters\n----------\nunixtime : int\n\nReturns\n-------\ngtfs_datetime: datetime.datetime\n time localized to gtfs_datetime's timezone", "id": "f12922:c0:m17"} {"signature": "def unlocalized_datetime_to_ut_seconds(self, unlocalized_datetime):", "body": "loc_dt = self._timezone.localize(unlocalized_datetime)unixtime_seconds = calendar.timegm(loc_dt.utctimetuple())return unixtime_seconds", "docstring": "Convert datetime (in GTFS timezone) to unixtime\n\nParameters\n----------\nunlocalized_datetime : datetime.datetime\n (tz coerced to GTFS timezone, should NOT be UTC.)\n\nReturns\n-------\noutput : int (unixtime)", "id": "f12922:c0:m18"} {"signature": "def get_day_start_ut(self, date):", "body": "if isinstance(date, string_types):date = datetime.datetime.strptime(date, '')date_noon = datetime.datetime(date.year, date.month, date.day, , , )ut_noon = self.unlocalized_datetime_to_ut_seconds(date_noon)return ut_noon - * * ", "docstring": "Get day start time (as specified by GTFS) as unix time in seconds\n\nParameters\n----------\ndate : str | unicode | datetime.datetime\n something describing the date\n\nReturns\n-------\nday_start_ut : int\n start time of the day in unixtime", "id": "f12922:c0:m19"} {"signature": "def get_trip_trajectories_within_timespan(self, start, end, use_shapes=True, filter_name=None):", "body": "trips = []trip_df = self.get_tripIs_active_in_range(start, end)print(\"\" + str(len(trip_df)) + \"\")shape_cache = {}for row in trip_df.itertuples():trip_I = row.trip_Iday_start_ut = row.day_start_utshape_id = row.shape_idtrip = {}name, route_type = self.get_route_name_and_type_of_tripI(trip_I)trip[''] = int(route_type)trip[''] = str(name)if filter_name and (name != filter_name):continuestop_lats = []stop_lons = []stop_dep_times = []shape_breaks = []stop_seqs = []stop_time_df = self.get_trip_stop_time_data(trip_I, day_start_ut)for stop_row in stop_time_df.itertuples():stop_lats.append(float(stop_row.lat))stop_lons.append(float(stop_row.lon))stop_dep_times.append(float(stop_row.dep_time_ut))try:stop_seqs.append(int(stop_row.seq))except TypeError:stop_seqs.append(None)if use_shapes:try:shape_breaks.append(int(stop_row.shape_break))except (TypeError, ValueError):shape_breaks.append(None)if use_shapes:if shape_id not in shape_cache:shape_cache[shape_id] = shapes.get_shape_points2(self.conn.cursor(), shape_id)shape_data = shape_cache[shape_id]try:trip[''] = shapes.interpolate_shape_times(shape_data[''], shape_breaks, stop_dep_times)trip[''] = shape_data['']trip[''] = shape_data['']start_break = shape_breaks[]end_break = shape_breaks[-]trip[''] = trip[''][start_break:end_break + ]trip[''] = trip[''][start_break:end_break + ]trip[''] = trip[''][start_break:end_break + ]except:trip[''] = stop_dep_timestrip[''] = stop_latstrip[''] = stop_lonselse:trip[''] = stop_dep_timestrip[''] = stop_latstrip[''] = stop_lonstrips.append(trip)return {\"\": trips}", "docstring": "Get complete trip data for visualizing public transport operation based on gtfs.\n\nParameters\n----------\nstart: number\n Earliest position data to return (in unix time)\nend: number\n Latest position data to return (in unix time)\nuse_shapes: bool, optional\n Whether or not shapes should be included\nfilter_name: str\n Pick only routes having this name.\n\nReturns\n-------\ntrips: dict\n trips['trips'] is a list whose each element (e.g. el = trips['trips'][0])\n is a dict with the following properties:\n el['lats'] -- list of latitudes\n el['lons'] -- list of longitudes\n el['times'] -- list of passage_times\n el['route_type'] -- type of vehicle as specified by GTFS\n el['name'] -- name of the route", "id": "f12922:c0:m20"} {"signature": "def get_stop_count_data(self, start_ut, end_ut):", "body": "trips_df = self.get_tripIs_active_in_range(start_ut, end_ut)stop_counts = Counter()for row in trips_df.itertuples():stops_seq = self.get_trip_stop_time_data(row.trip_I, row.day_start_ut)for stop_time_row in stops_seq.itertuples(index=False):if (stop_time_row.dep_time_ut >= start_ut) and (stop_time_row.dep_time_ut <= end_ut):stop_counts[stop_time_row.stop_I] += all_stop_data = self.stops()counts = [stop_counts[stop_I] for stop_I in all_stop_data[\"\"].values]all_stop_data.loc[:, \"\"] = pd.Series(counts, index=all_stop_data.index)return all_stop_data", "docstring": "Get stop count data.\n\nParameters\n----------\nstart_ut : int\n start time in unixtime\nend_ut : int\n end time in unixtime\n\nReturns\n-------\nstopData : pandas.DataFrame\n each row in the stopData dataFrame is a dictionary with the following elements\n stop_I, count, lat, lon, name\n with data types\n (int, int, float, float, str)", "id": "f12922:c0:m21"} {"signature": "def get_segment_count_data(self, start, end, use_shapes=True):", "body": "cur = self.conn.cursor()trips_df = self.get_tripIs_active_in_range(start, end)segment_counts = Counter()seg_to_info = {}tripI_to_seq = defaultdict(list)for row in trips_df.itertuples():stops_df = self.get_trip_stop_time_data(row.trip_I, row.day_start_ut)for i in range(len(stops_df) - ):(stop_I, dep_time_ut, s_lat, s_lon, s_seq, shape_break) = stops_df.iloc[i](stop_I_n, dep_time_ut_n, s_lat_n, s_lon_n, s_seq_n, shape_break_n) = stops_df.iloc[i + ]if (dep_time_ut >= start) and (dep_time_ut_n <= end):seg = (stop_I, stop_I_n)segment_counts[seg] += if seg not in seg_to_info:seg_to_info[seg] = {u\"\": row.trip_I,u\"\": [s_lat, s_lat_n],u\"\": [s_lon, s_lon_n],u\"\": row.shape_id,u\"\": [s_seq, s_seq_n],u\"\": [shape_break, shape_break_n]}tripI_to_seq[row.trip_I].append(seg)stop_names = {}for (stop_I, stop_J) in segment_counts.keys():for s in [stop_I, stop_J]:if s not in stop_names:stop_names[s] = self.stop(s)[u''].values[]seg_data = []for seg, count in segment_counts.items():segInfo = seg_to_info[seg]shape_breaks = segInfo[u\"\"]seg_el = {}if use_shapes and shape_breaks and shape_breaks[] and shape_breaks[]:shape = shapes.get_shape_between_stops(cur,segInfo[u''],shape_breaks=shape_breaks)seg_el[u''] = segInfo[u''][:] + shape[u''] + segInfo[u''][:]seg_el[u''] = segInfo[u''][:] + shape[u''] + segInfo[u''][:]else:seg_el[u''] = segInfo[u'']seg_el[u''] = segInfo[u'']seg_el[u''] = stop_names[seg[]] + u\"\" + stop_names[seg[]]seg_el[u''] = countseg_data.append(seg_el)return seg_data", "docstring": "Get segment data including PTN vehicle counts per segment that are\nfully _contained_ within the interval (start, end)\n\nParameters\n----------\nstart : int\n start time of the simulation in unix time\nend : int\n end time of the simulation in unix time\nuse_shapes : bool, optional\n whether to include shapes (if available)\n\nReturns\n-------\nseg_data : list\n each element in the list is a dict containing keys:\n \"trip_I\", \"lats\", \"lons\", \"shape_id\", \"stop_seqs\", \"shape_breaks\"", "id": "f12922:c0:m22"} {"signature": "def get_all_route_shapes(self, use_shapes=True):", "body": "cur = self.conn.cursor()query = \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"data = pd.read_sql_query(query, self.conn)routeShapes = []for i, row in enumerate(data.itertuples()):datum = {\"\": str(row.name), \"\": int(row.type), \"\": row.route_I, \"\": str(row.agency_id),\"\": str(row.agency_name)}if use_shapes and row.shape_id:shape = shapes.get_shape_points2(cur, row.shape_id)lats = shape['']lons = shape['']else:stop_shape = self.get_trip_stop_coordinates(row.trip_I)lats = list(stop_shape[''])lons = list(stop_shape[''])datum[''] = [float(lat) for lat in lats]datum[''] = [float(lon) for lon in lons]routeShapes.append(datum)return routeShapes", "docstring": "Get the shapes of all routes.\n\nParameters\n----------\nuse_shapes : bool, optional\n by default True (i.e. use shapes as the name of the function indicates)\n if False (fall back to lats and longitudes)\n\nReturns\n-------\nrouteShapes: list of dicts that should have the following keys\n name, type, agency, lats, lons\n with types\n list, list, str, list, list", "id": "f12922:c0:m23"} {"signature": "def get_tripIs_active_in_range(self, start, end):", "body": "to_select = \"\"query = \"\" + to_select +\"\"\"\"\"\".format(start_ut=start, end_ut=end)return pd.read_sql_query(query, self.conn)", "docstring": "Obtain from the (standard) GTFS database, list of trip_IDs (and other trip_related info)\nthat are active between given 'start' and 'end' times.\n\nThe start time of a trip is determined by the departure time at the last stop of the trip.\nThe end time of a trip is determined by the arrival time at the last stop of the trip.\n\nParameters\n----------\nstart, end : int\n the start and end of the time interval in unix time seconds\n\nReturns\n-------\nactive_trips : pandas.DataFrame with columns\n trip_I, day_start_ut, start_time_ut, end_time_ut, shape_id", "id": "f12922:c0:m24"} {"signature": "def get_trip_counts_per_day(self):", "body": "query = \"\"trip_counts_per_day = pd.read_sql_query(query, self.conn, index_col=\"\")max_day = trip_counts_per_day.index.max()min_day = trip_counts_per_day.index.min()min_date = datetime.datetime.strptime(min_day, '')max_date = datetime.datetime.strptime(max_day, '')num_days = (max_date - min_date).daysdates = [min_date + datetime.timedelta(days=x) for x in range(num_days + )]trip_counts = []date_strings = []for date in dates:date_string = date.strftime(\"\")date_strings.append(date_string)try:value = trip_counts_per_day.loc[date_string, '']except KeyError:value = trip_counts.append(value)for date_string in trip_counts_per_day.index:assert date_string in date_stringsdata = {\"\": dates, \"\": date_strings, \"\": trip_counts}return pd.DataFrame(data)", "docstring": "Get trip counts per day between the start and end day of the feed.\n\nReturns\n-------\ntrip_counts : pandas.DataFrame\n Has columns \"date_str\" (dtype str) \"trip_counts\" (dtype int)", "id": "f12922:c0:m25"} {"signature": "def get_suitable_date_for_daily_extract(self, date=None, ut=False):", "body": "daily_trips = self.get_trip_counts_per_day()max_daily_trips = daily_trips[u''].max(axis=)if date in daily_trips[u'']:start_index = daily_trips[daily_trips[u''] == date].index.tolist()[]daily_trips[u''] = daily_trips.indexdaily_trips[u''] = abs(start_index - daily_trips.index)daily_trips = daily_trips.sort_values(by=[u'', u'']).reindex()for row in daily_trips.itertuples():if row.trip_counts >= * max_daily_trips:if ut:return self.get_day_start_ut(row.date_str)else:return row.date_str", "docstring": "Parameters\n----------\ndate : str\nut : bool\n Whether to return the date as a string or as a an int (seconds after epoch).\n\nReturns\n-------\nSelects suitable date for daily extract\nIterates trough the available dates forward and backward from the download date accepting the first day that has\nat least 90 percent of the number of trips of the maximum date. The condition can be changed to something else.\nIf the download date is out of range, the process will look through the dates from first to last.", "id": "f12922:c0:m26"} {"signature": "def get_weekly_extract_start_date(self, ut=False, weekdays_at_least_of_max=,verbose=False, download_date_override=None):", "body": "daily_trip_counts = self.get_trip_counts_per_day()if isinstance(download_date_override, str):search_start_date = datetime.datetime.strptime(download_date_override, \"\")elif isinstance(download_date_override, datetime.datetime):search_start_date = download_date_overrideelse:assert download_date_override is Nonedownload_date_str = self.meta['']if download_date_str == \"\":warnings.warn(\"\"\"\" + self.get_weekly_extract_start_date.__name__ +\"\")search_start_date = daily_trip_counts[''].min()else:search_start_date = datetime.datetime.strptime(download_date_str, \"\")feed_min_date = daily_trip_counts[''].min()feed_max_date = daily_trip_counts[''].max()assert (feed_max_date - feed_min_date >= datetime.timedelta(days=)),\"\"next_monday_from_search_start_date = search_start_date + timedelta(days=( - search_start_date.weekday()))if not (feed_min_date <= next_monday_from_search_start_date <= feed_max_date):warnings.warn(\"\"\"\")next_monday_from_search_start_date = feed_min_date + timedelta(days=( - feed_min_date.weekday()))max_trip_count = daily_trip_counts[''].quantile()threshold = weekdays_at_least_of_max * max_trip_countthreshold_fulfilling_days = daily_trip_counts[''] > thresholdsearch_start_monday_index = daily_trip_counts[daily_trip_counts[''] == next_monday_from_search_start_date].index[]while_loop_monday_index = search_start_monday_indexwhile len(daily_trip_counts.index) >= while_loop_monday_index + :if all(threshold_fulfilling_days[while_loop_monday_index:while_loop_monday_index + ]):row = daily_trip_counts.iloc[while_loop_monday_index]if ut:return self.get_day_start_ut(row.date_str)else:return row['']while_loop_monday_index += while_loop_monday_index = search_start_monday_index - while while_loop_monday_index >= :if all(threshold_fulfilling_days[while_loop_monday_index:while_loop_monday_index + ]):row = daily_trip_counts.iloc[while_loop_monday_index]if ut:return self.get_day_start_ut(row.date_str)else:return row['']while_loop_monday_index -= raise RuntimeError(\"\")", "docstring": "Find a suitable weekly extract start date (monday).\nThe goal is to obtain as 'usual' week as possible.\nThe weekdays of the weekly extract week should contain\nat least 0.9 of the total maximum of trips.\n\nParameters\n----------\nut: return unixtime?\nweekdays_at_least_of_max: float\n\ndownload_date_override: str, semi-optional\n Download-date in format %Y-%m-%d, weeks close to this.\n Overrides the (possibly) recorded downloaded date in the database\n\nReturns\n-------\ndate: int or str\n\nRaises\n------\nerror: RuntimeError\n If no download date could be found.", "id": "f12922:c0:m27"} {"signature": "def get_spreading_trips(self, start_time_ut, lat, lon,max_duration_ut= * ,min_transfer_time=,use_shapes=False):", "body": "from gtfspy.spreading.spreader import Spreaderspreader = Spreader(self, start_time_ut, lat, lon, max_duration_ut, min_transfer_time, use_shapes)return spreader.spread()", "docstring": "Starting from a specific point and time, get complete single source\nshortest path spreading dynamics as trips, or \"events\".\n\nParameters\n----------\nstart_time_ut: number\n Start time of the spreading.\nlat: float\n latitude of the spreading seed location\nlon: float\n longitude of the spreading seed location\nmax_duration_ut: int\n maximum duration of the spreading process (in seconds)\nmin_transfer_time : int\n minimum transfer time in seconds\nuse_shapes : bool\n whether to include shapes\n\nReturns\n-------\ntrips: dict\n trips['trips'] is a list whose each element (e.g. el = trips['trips'][0])\n is a dict with the following properties:\n el['lats'] : list of latitudes\n el['lons'] : list of longitudes\n el['times'] : list of passage_times\n el['route_type'] : type of vehicle as specified by GTFS, or -1 if walking\n el['name'] : name of the route", "id": "f12922:c0:m28"} {"signature": "def get_closest_stop(self, lat, lon):", "body": "cur = self.conn.cursor()min_dist = float(\"\")min_stop_I = Nonerows = cur.execute(\"\")for stop_I, lat_s, lon_s in rows:dist_now = wgs84_distance(lat, lon, lat_s, lon_s)if dist_now < min_dist:min_dist = dist_nowmin_stop_I = stop_Ireturn min_stop_I", "docstring": "Get closest stop to a given location.\n\nParameters\n----------\nlat: float\n latitude coordinate of the location\nlon: float\n longitude coordinate of the location\n\nReturns\n-------\nstop_I: int\n the index of the stop in the database", "id": "f12922:c0:m29"} {"signature": "def get_route_name_and_type_of_tripI(self, trip_I):", "body": "cur = self.conn.cursor()results = cur.execute(\"\".format(trip_I=trip_I))name, rtype = results.fetchone()return u\"\" % str(name), int(rtype)", "docstring": "Get route short name and type\n\nParameters\n----------\ntrip_I: int\n short trip index created when creating the database\n\nReturns\n-------\nname: str\n short name of the route, eg. 195N\ntype: int\n route_type according to the GTFS standard", "id": "f12922:c0:m32"} {"signature": "def get_route_name_and_type(self, route_I):", "body": "cur = self.conn.cursor()results = cur.execute(\"\", (route_I,))name, rtype = results.fetchone()return name, int(rtype)", "docstring": "Get route short name and type\n\nParameters\n----------\nroute_I: int\n route index (database specific)\n\nReturns\n-------\nname: str\n short name of the route, eg. 195N\ntype: int\n route_type according to the GTFS standard", "id": "f12922:c0:m33"} {"signature": "def get_trip_stop_coordinates(self, trip_I):", "body": "query = \"\"\"\"\"\".format(trip_I=trip_I)stop_coords = pd.read_sql(query, self.conn)return stop_coords", "docstring": "Get coordinates for a given trip_I\n\nParameters\n----------\ntrip_I : int\n the integer id of the trip\n\nReturns\n-------\nstop_coords : pandas.DataFrame\n with columns \"lats\" and \"lons\"", "id": "f12922:c0:m34"} {"signature": "def get_trip_stop_time_data(self, trip_I, day_start_ut):", "body": "to_select = \"\" + str(day_start_ut) + \"\"str_to_run = \"\" + to_select + \"\"\"\"\"\"str_to_run = str_to_run.format(trip_I=trip_I)return pd.read_sql_query(str_to_run, self.conn)", "docstring": "Obtain from the (standard) GTFS database, trip stop data\n(departure time in ut, lat, lon, seq, shape_break) as a pandas DataFrame\n\nSome filtering could be applied here, if only e.g. departure times\ncorresponding within some time interval should be considered.\n\nParameters\n----------\ntrip_I : int\n integer index of the trip\nday_start_ut : int\n the start time of the day in unix time (seconds)\n\nReturns\n-------\ndf: pandas.DataFrame\n df has the following columns\n 'departure_time_ut, lat, lon, seq, shape_break'", "id": "f12922:c0:m35"} {"signature": "def get_events_by_tripI_and_dsut(self, trip_I, day_start_ut,start_ut=None, end_ut=None):", "body": "assert day_start_ut <= start_utassert day_start_ut <= end_utassert start_ut <= end_utevents = []if not self.tripI_takes_place_on_dsut(trip_I, day_start_ut):return eventsquery = \"\"\"\"\"\"params = [day_start_ut, day_start_ut,trip_I]if start_ut:query += \"\"params += [start_ut, day_start_ut]if end_ut:query += \"\"params += [end_ut, day_start_ut]query += \"\"cur = self.conn.cursor()rows = cur.execute(query, params)stop_data = list(rows)for i in range(len(stop_data) - ):event = {\"\": stop_data[i][],\"\": stop_data[i + ][],\"\": stop_data[i][],\"\": stop_data[i + ][]}events.append(event)return events", "docstring": "Get trip data as a list of events (i.e. dicts).\n\nParameters\n----------\ntrip_I : int\n shorthand index of the trip.\nday_start_ut : int\n the start time of the day in unix time (seconds)\nstart_ut : int, optional\n consider only events that start after this time\n If not specified, this filtering is not applied.\nend_ut : int, optional\n Consider only events that end before this time\n If not specified, this filtering is not applied.\n\nReturns\n-------\nevents: list of dicts\n each element contains the following data:\n from_stop: int (stop_I)\n to_stop: int (stop_I)\n dep_time_ut: int (in unix time)\n arr_time_ut: int (in unix time)", "id": "f12922:c0:m36"} {"signature": "def tripI_takes_place_on_dsut(self, trip_I, day_start_ut):", "body": "query = \"\"params = (trip_I, day_start_ut)cur = self.conn.cursor()rows = list(cur.execute(query, params))if len(rows) == :return Falseelse:assert len(rows) == , ''return True", "docstring": "Check that a trip takes place during a day\n\nParameters\n----------\ntrip_I : int\n index of the trip in the gtfs data base\nday_start_ut : int\n the starting time of the day in unix time (seconds)\n\nReturns\n-------\ntakes_place: bool\n boolean value describing whether the trip takes place during\n the given day or not", "id": "f12922:c0:m37"} {"signature": "def day_start_ut(self, ut):", "body": "old_tz = self.set_current_process_time_zone()ut = time.mktime(time.localtime(ut)[:] + (, , , , , -)) - set_process_timezone(old_tz)return ut", "docstring": "Convert unixtime to unixtime on GTFS start-of-day.\n\nGTFS defines the start of a day as \"noon minus 12 hours\" to solve\nmost DST-related problems. This means that on DST-changing days,\nthe day start isn't midnight. This function isn't idempotent.\nRunning it twice on the \"move clocks backwards\" day will result in\nbeing one day too early.\n\nParameters\n----------\nut: int\n Unixtime\n\nReturns\n-------\nut: int\n Unixtime corresponding to start of day", "id": "f12922:c0:m38"} {"signature": "def increment_day_start_ut(self, day_start_ut, n_days=):", "body": "old_tz = self.set_current_process_time_zone()day0 = time.localtime(day_start_ut + ) dayN = time.mktime(day0[:] + (day0[] + n_days,) + (, , , , , -)) - set_process_timezone(old_tz)return dayN", "docstring": "Increment the GTFS-definition of \"day start\".\n\n Parameters\n ----------\n day_start_ut : int\n unixtime of the previous start of day. If this time is between\n 12:00 or greater, there *will* be bugs. To solve this, run the\n input through day_start_ut first.\n n_days: int\n number of days to increment", "id": "f12922:c0:m39"} {"signature": "def _get_possible_day_starts(self, start_ut, end_ut, max_time_overnight=None):", "body": "if max_time_overnight is None:max_time_overnight = * * assert start_ut < end_utstart_day_ut = self.day_start_ut(start_ut)start_day_ds = start_ut - start_day_utend_day_ut = self.day_start_ut(end_ut)if start_day_ds < max_time_overnight:start_day_ut = self.increment_day_start_ut(start_day_ut, n_days=-)day_start_times_ut = [start_day_ut]while day_start_times_ut[-] < end_day_ut:day_start_times_ut.append(self.increment_day_start_ut(day_start_times_ut[-]))start_times_ds = []end_times_ds = []for dsut in day_start_times_ut:day_start_ut = max(, start_ut - dsut)start_times_ds.append(day_start_ut)day_end_ut = end_ut - dsutend_times_ds.append(day_end_ut)return day_start_times_ut, start_times_ds, end_times_ds", "docstring": "Get all possible day start times between start_ut and end_ut\nCurrently this function is used only by get_tripIs_within_range_by_dsut\n\nParameters\n----------\nstart_ut : list\n start time in unix time\nend_ut : list\n end time in unix time\nmax_time_overnight : list\n the maximum length of time that a trip can take place on\n during the next day (i.e. after midnight run times like 25:35)\n\nReturns\n-------\nday_start_times_ut : list\n list of ints (unix times in seconds) for returning all possible day\n start times\nstart_times_ds : list\n list of ints (unix times in seconds) stating the valid start time in\n day seconds\nend_times_ds : list\n list of ints (unix times in seconds) stating the valid end times in\n day_seconds", "id": "f12922:c0:m40"} {"signature": "def get_tripIs_within_range_by_dsut(self,start_time_ut,end_time_ut):", "body": "cur = self.conn.cursor()assert start_time_ut <= end_time_utdst_ut, st_ds, et_ds =self._get_possible_day_starts(start_time_ut, end_time_ut, )assert len(dst_ut) >= trip_I_dict = {}for day_start_ut, start_ds, end_ds inzip(dst_ut, st_ds, et_ds):query = \"\"\"\"\"\"params = (day_start_ut, end_ds, start_ds)trip_Is = [el[] for el in cur.execute(query, params)]if len(trip_Is) > :trip_I_dict[day_start_ut] = trip_Isreturn trip_I_dict", "docstring": "Obtain a list of trip_Is that take place during a time interval.\nThe trip needs to be only partially overlapping with the given time interval.\nThe grouping by dsut (day_start_ut) is required as same trip_I could\ntake place on multiple days.\n\nParameters\n----------\nstart_time_ut : int\n start of the time interval in unix time (seconds)\nend_time_ut: int\n end of the time interval in unix time (seconds)\n\nReturns\n-------\ntrip_I_dict: dict\n keys: day_start_times to list of integers (trip_Is)", "id": "f12922:c0:m41"} {"signature": "def stops(self):", "body": "return self.get_table(\"\")", "docstring": "Get all stop data as a pandas DataFrame\n\nReturns\n-------\ndf: pandas.DataFrame", "id": "f12922:c0:m42"} {"signature": "def stop(self, stop_I):", "body": "return pd.read_sql_query(\"\".format(stop_I=stop_I), self.conn)", "docstring": "Get all stop data as a pandas DataFrame for all stops, or an individual stop'\n\nParameters\n----------\nstop_I : int\n stop index\n\nReturns\n-------\nstop: pandas.DataFrame", "id": "f12922:c0:m43"} {"signature": "def get_stops_for_route_type(self, route_type):", "body": "if route_type is WALK:return self.stops()else:return pd.read_sql_query(\"\"\"\"\"\"\"\"\"\", self.conn, params=(route_type,))", "docstring": "Parameters\n----------\nroute_type: int\n\nReturns\n-------\nstops: pandas.DataFrame", "id": "f12922:c0:m47"} {"signature": "def generate_routable_transit_events(self, start_time_ut=None, end_time_ut=None, route_type=None):", "body": "from gtfspy.networks import temporal_networkdf = temporal_network(self, start_time_ut=start_time_ut, end_time_ut=end_time_ut, route_type=route_type)df.sort_values(\"\", ascending=False, inplace=True)for row in df.itertuples():yield row", "docstring": "Generates events that take place during a time interval [start_time_ut, end_time_ut].\nEach event needs to be only partially overlap the given time interval.\nDoes not include walking events.\nThis is just a quick and dirty implementation to get a way of quickly get a\nmethod for generating events compatible with the routing algorithm\n\nParameters\n----------\nstart_time_ut: int\nend_time_ut: int\nroute_type: ?\n\nYields\n------\nevent: namedtuple\n containing:\n dep_time_ut: int\n arr_time_ut: int\n from_stop_I: int\n to_stop_I: int\n trip_I : int\n route_type : int\n seq: int", "id": "f12922:c0:m49"} {"signature": "def get_transit_events(self, start_time_ut=None, end_time_ut=None, route_type=None):", "body": "table_name = self._get_day_trips_table_name()event_query = \"\"\"\"\"\" + table_name + \"\"\"\"\"\"\"\"where_clauses = []if end_time_ut:where_clauses.append(table_name + \"\".format(end_time_ut=end_time_ut))where_clauses.append(\"\".format(end_time_ut=end_time_ut))if start_time_ut:where_clauses.append(table_name + \"\".format(start_time_ut=start_time_ut))where_clauses.append(\"\".format(start_time_ut=start_time_ut))if route_type is not None:assert route_type in ALL_ROUTE_TYPESwhere_clauses.append(\"\".format(route_type=route_type))if len(where_clauses) > :event_query += \"\"for i, where_clause in enumerate(where_clauses):if i is not :event_query += \"\"event_query += where_clauseevent_query += \"\"events_result = pd.read_sql_query(event_query, self.conn)from_indices = numpy.nonzero((events_result[''][:-].values == events_result[''][:].values) *(events_result[''][:-].values < events_result[''][:].values))[]to_indices = from_indices + assert (events_result[''][from_indices].values == events_result[''][to_indices].values).all()trip_Is = events_result[''][from_indices]from_stops = events_result[''][from_indices]to_stops = events_result[''][to_indices]shape_ids = events_result[''][from_indices]dep_times = events_result[''][from_indices]arr_times = events_result[''][to_indices]route_types = events_result[''][from_indices]route_ids = events_result[''][from_indices]route_Is = events_result[''][from_indices]durations = arr_times.values - dep_times.valuesassert (durations >= ).all()from_seqs = events_result[''][from_indices]to_seqs = events_result[''][to_indices]data_tuples = zip(from_stops, to_stops, dep_times, arr_times,shape_ids, route_types, route_ids, trip_Is,durations, from_seqs, to_seqs, route_Is)columns = [\"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\"]df = pd.DataFrame.from_records(data_tuples, columns=columns)return df", "docstring": "Obtain a list of events that take place during a time interval.\nEach event needs to be only partially overlap the given time interval.\nDoes not include walking events.\n\nParameters\n----------\nstart_time_ut : int\n start of the time interval in unix time (seconds)\nend_time_ut: int\n end of the time interval in unix time (seconds)\nroute_type: int\n consider only events for this route_type\n\nReturns\n-------\nevents: pandas.DataFrame\n with the following columns and types\n dep_time_ut: int\n arr_time_ut: int\n from_stop_I: int\n to_stop_I: int\n trip_I : int\n shape_id : int\n route_type : int\n\nSee also\n--------\nget_transit_events_in_time_span : an older version of the same thing", "id": "f12922:c0:m50"} {"signature": "def get_route_difference_with_other_db(self, other_gtfs, start_time, end_time, uniqueness_threshold=None,uniqueness_ratio=None):", "body": "from gtfspy.stats import frequencies_by_generated_routethis_df = frequencies_by_generated_route(self, start_time, end_time)other_df = frequencies_by_generated_route(other_gtfs, start_time, end_time)this_routes = {x: set(x.split('')) for x in this_df[\"\"]}other_routes = {x: set(x.split('')) for x in other_df[\"\"]}this_uniques = list(this_routes.keys())other_uniques = list(other_routes.keys())print(\"\", len(this_uniques))print(\"\", len(other_uniques))for i_key, i in this_routes.items():for j_key, j in other_routes.items():union = i | jintersection = i & jsymmetric_difference = i ^ jif uniqueness_ratio:if len(intersection) / len(union) >= uniqueness_ratio:try:this_uniques.remove(i_key)this_df = this_df[this_df[\"\"] != i_key]except ValueError:passtry:other_uniques.remove(j_key)other_df = other_df[other_df[\"\"] != j_key]except ValueError:passprint(\"\", len(this_df))print(\"\", len(other_df))return this_df, other_df", "docstring": "Compares the routes based on stops in the schedule with the routes in another db and returns the ones without match.\nUniqueness thresholds or ratio can be used to allow small differences\n:param uniqueness_threshold:\n:param uniqueness_ratio:\n:return:", "id": "f12922:c0:m51"} {"signature": "def get_straight_line_transfer_distances(self, stop_I=None):", "body": "if stop_I is not None:query = u\"\"\"\"\"\"params = (u\"\".format(stop_I=stop_I),)else:query = \"\"\"\"\"\"params = Nonestop_data_df = pd.read_sql_query(query, self.conn, params=params)return stop_data_df", "docstring": "Get (straight line) distances to stations that can be transferred to.\n\nParameters\n----------\nstop_I : int, optional\n If not specified return all possible transfer distances\n\nReturns\n-------\ndistances: pandas.DataFrame\n each row has the following items\n from_stop_I: int\n to_stop_I: int\n d: float or int #distance in meters", "id": "f12922:c0:m53"} {"signature": "def get_approximate_schedule_time_span_in_ut(self):", "body": "first_day_start_ut, last_day_start_ut = self.get_day_start_ut_span()return first_day_start_ut, last_day_start_ut + * ", "docstring": "Return conservative estimates of start_time_ut and end_time_uts.\nAll trips, events etc. should start after start_time_ut_conservative and end before end_time_ut_conservative\n\nReturns\n-------\nstart_time_ut_conservative : int\nend_time_ut_conservative : int", "id": "f12922:c0:m55"} {"signature": "def get_day_start_ut_span(self):", "body": "cur = self.conn.cursor()first_day_start_ut, last_day_start_ut =cur.execute(\"\").fetchone()return first_day_start_ut, last_day_start_ut", "docstring": "Return the first and last day_start_ut\n\nReturns\n-------\nfirst_day_start_ut: int\nlast_day_start_ut: int", "id": "f12922:c0:m56"} {"signature": "def print_validation_warnings(self):", "body": "from .timetable_validator import TimetableValidatorvalidator = TimetableValidator(self)return validator.validate_and_get_warnings()", "docstring": "See Validator.validate for more information.\n\nReturns\n-------\nwarnings_container: validator.TimetableValidationWarningsContainer", "id": "f12922:c0:m59"} {"signature": "def homogenize_stops_table_with_other_db(self, source):", "body": "cur = self.conn.cursor()self.attach_gtfs_database(source)query_inner_join = \"\"\"\"\"\"df_inner_join = self.execute_custom_query_pandas(query_inner_join)print(\"\", len(df_inner_join.index))df_not_in_other = self.execute_custom_query_pandas(\"\" + query_inner_join)print(\"\", len(df_not_in_other.index))df_not_in_self = self.execute_custom_query_pandas(\"\" +query_inner_join.replace(\"\", \"\"))print(\"\", len(df_not_in_self.index))try:self.execute_custom_query(\"\"\"\"\"\")self.execute_custom_query(\"\"\"\"\"\")except sqlite3.OperationalError:passstop_id_stub = \"\"counter = rows_to_update_self = []rows_to_update_other = []rows_to_add_to_self = []rows_to_add_to_other = []for items in df_inner_join.itertuples(index=False):rows_to_update_self.append((counter, items[]))rows_to_update_other.append((counter, items[]))counter += for items in df_not_in_other.itertuples(index=False):rows_to_update_self.append((counter, items[]))rows_to_add_to_other.append((stop_id_stub + str(counter),) + tuple(items[x] for x in [, , , , , , ])+ (counter,))counter += for items in df_not_in_self.itertuples(index=False):rows_to_update_other.append((counter, items[]))rows_to_add_to_self.append((stop_id_stub + str(counter),) + tuple(items[x] for x in [, , , , , , ])+ (counter,))counter += query_add_row = \"\"\"\"\"\" % (\"\".join([\"\" for x in range()]))query_update_row = \"\"\"\"\"\"print(\"\")cur.executemany(query_add_row, rows_to_add_to_self)cur.executemany(query_update_row, rows_to_update_self)cur.executemany(query_add_row.replace(\"\", \"\"), rows_to_add_to_other)cur.executemany(query_update_row.replace(\"\", \"\"), rows_to_update_other)self.conn.commit()print(\"\")", "docstring": "This function takes an external database, looks of common stops and adds the missing stops to both databases.\nIn addition the stop_pair_I column is added. This id links the stops between these two sources.\n:param source: directory of external database\n:return:", "id": "f12922:c0:m64"} {"signature": "def update_stop_coordinates(self, stop_updates):", "body": "cur = self.conn.cursor()stop_values = [(values.lat, values.lon, values.stop_id) for values in stop_updates.itertuples()]cur.executemany(\"\"\"\"\"\", stop_values)self.conn.commit()", "docstring": ":param stop_updates: DataFrame\n:return:", "id": "f12922:c0:m71"} {"signature": "def __setitem__(self, key, value):", "body": "if isinstance(value, bytes):value = value.decode('')self._conn.execute('''',(key, value)).fetchone()self._conn.commit()", "docstring": "Get metadata from the DB", "id": "f12922:c1:m2"} {"signature": "def walk_transfer_stop_to_stop_network(gtfs, max_link_distance=None):", "body": "if max_link_distance is None:max_link_distance = net = networkx.Graph()_add_stops_to_net(net, gtfs.get_table(\"\"))stop_distances = gtfs.get_table(\"\")if stop_distances[\"\"][] is None:osm_distances_available = Falsewarn(\"\"\"\")else:osm_distances_available = Truefor stop_distance_tuple in stop_distances.itertuples():from_node = stop_distance_tuple.from_stop_Ito_node = stop_distance_tuple.to_stop_Iif osm_distances_available:if stop_distance_tuple.d_walk > max_link_distance or isnan(stop_distance_tuple.d_walk):continuedata = {'': stop_distance_tuple.d, '': stop_distance_tuple.d_walk}else:if stop_distance_tuple.d > max_link_distance:continuedata = {'': stop_distance_tuple.d}net.add_edge(from_node, to_node, data)return net", "docstring": "Construct the walk network.\nIf OpenStreetMap-based walking distances have been computed, then those are used as the distance.\nOtherwise, the great circle distances (\"d\") is used.\n\nParameters\n----------\ngtfs: gtfspy.GTFS\nmax_link_distance: int, optional\n If given, all walking transfers with great circle distance longer\n than this limit (expressed in meters) will be omitted.\n\nReturns\n-------\nnet: networkx.DiGraph\n edges have attributes\n d:\n straight-line distance between stops\n d_walk:\n distance along the road/tracks/..", "id": "f12923:m0"} {"signature": "def stop_to_stop_network_for_route_type(gtfs,route_type,link_attributes=None,start_time_ut=None,end_time_ut=None):", "body": "if link_attributes is None:link_attributes = DEFAULT_STOP_TO_STOP_LINK_ATTRIBUTESassert(route_type in route_types.TRANSIT_ROUTE_TYPES)stops_dataframe = gtfs.get_stops_for_route_type(route_type)net = networkx.DiGraph()_add_stops_to_net(net, stops_dataframe)events_df = gtfs.get_transit_events(start_time_ut=start_time_ut,end_time_ut=end_time_ut,route_type=route_type)if len(net.nodes()) < :assert events_df.shape[] == link_event_groups = events_df.groupby(['', ''], sort=False)for key, link_events in link_event_groups:from_stop_I, to_stop_I = keyassert isinstance(link_events, pd.DataFrame)if link_attributes is None:net.add_edge(from_stop_I, to_stop_I)else:link_data = {}if \"\" in link_attributes:link_data[''] = float(link_events[''].min())if \"\" in link_attributes:link_data[''] = float(link_events[''].max())if \"\" in link_attributes:link_data[''] = float(link_events[''].median())if \"\" in link_attributes:link_data[''] = float(link_events[''].mean())if \"\" in link_attributes:link_data[''] = int(link_events.shape[])if \"\" in link_attributes:link_data[''] = route_types.ROUTE_TYPE_TO_APPROXIMATE_CAPACITY[route_type]* int(link_events.shape[])if \"\" in link_attributes:from_lat = net.node[from_stop_I]['']from_lon = net.node[from_stop_I]['']to_lat = net.node[to_stop_I]['']to_lon = net.node[to_stop_I]['']distance = wgs84_distance(from_lat, from_lon, to_lat, to_lon)link_data[''] = int(distance)if \"\" in link_attributes:assert \"\" in link_events.columns.valuesfound = Nonefor i, shape_id in enumerate(link_events[\"\"].values):if shape_id is not None:found = ibreakif found is None:link_data[\"\"] = Noneelse:link_event = link_events.iloc[found]distance = gtfs.get_shape_distance_between_stops(link_event[\"\"],int(link_event[\"\"]),int(link_event[\"\"]))link_data[''] = distanceif \"\" in link_attributes:link_data[\"\"] = link_events.groupby(\"\").size().to_dict()net.add_edge(from_stop_I, to_stop_I, attr_dict=link_data)return net", "docstring": "Get a stop-to-stop network describing a single mode of travel.\n\nParameters\n----------\ngtfs : gtfspy.GTFS\nroute_type : int\n See gtfspy.route_types.TRANSIT_ROUTE_TYPES for the list of possible types.\nlink_attributes: list[str], optional\n defaulting to use the following link attributes:\n \"n_vehicles\" : Number of vehicles passed\n \"duration_min\" : minimum travel time between stops\n \"duration_max\" : maximum travel time between stops\n \"duration_median\" : median travel time between stops\n \"duration_avg\" : average travel time between stops\n \"d\" : distance along straight line (wgs84_distance)\n \"distance_shape\" : minimum distance along shape\n \"capacity_estimate\" : approximate capacity passed through the stop\n \"route_I_counts\" : dict from route_I to counts\nstart_time_ut: int\n start time of the time span (in unix time)\nend_time_ut: int\n end time of the time span (in unix time)\n\nReturns\n-------\nnet: networkx.DiGraph\n A directed graph Directed graph", "id": "f12923:m1"} {"signature": "def stop_to_stop_networks_by_type(gtfs):", "body": "route_type_to_network = dict()for route_type in route_types.ALL_ROUTE_TYPES:if route_type == route_types.WALK:net = walk_transfer_stop_to_stop_network(gtfs)else:net = stop_to_stop_network_for_route_type(gtfs, route_type)route_type_to_network[route_type] = netassert len(route_type_to_network) == len(route_types.ALL_ROUTE_TYPES)return route_type_to_network", "docstring": "Compute stop-to-stop networks for all travel modes (route_types).\n\nParameters\n----------\ngtfs: gtfspy.GTFS\n\nReturns\n-------\ndict: dict[int, networkx.DiGraph]\n keys should be one of route_types.ALL_ROUTE_TYPES (i.e. GTFS route_types)", "id": "f12923:m2"} {"signature": "def combined_stop_to_stop_transit_network(gtfs, start_time_ut=None, end_time_ut=None):", "body": "multi_di_graph = networkx.MultiDiGraph()for route_type in route_types.TRANSIT_ROUTE_TYPES:graph = stop_to_stop_network_for_route_type(gtfs, route_type,start_time_ut=start_time_ut, end_time_ut=end_time_ut)for from_node, to_node, data in graph.edges(data=True):data[''] = route_typemulti_di_graph.add_edges_from(graph.edges(data=True))multi_di_graph.add_nodes_from(graph.nodes(data=True))return multi_di_graph", "docstring": "Compute stop-to-stop networks for all travel modes and combine them into a single network.\nThe modes of transport are encoded to a single network.\nThe network consists of multiple links corresponding to each travel mode.\nWalk mode is not included.\n\nParameters\n----------\ngtfs: gtfspy.GTFS\n\nReturns\n-------\nnet: networkx.MultiDiGraph\n keys should be one of route_types.TRANSIT_ROUTE_TYPES (i.e. GTFS route_types)", "id": "f12923:m3"} {"signature": "def _add_stops_to_net(net, stops):", "body": "for stop in stops.itertuples():data = {\"\": stop.lat,\"\": stop.lon,\"\": stop.name}net.add_node(stop.stop_I, data)", "docstring": "Add nodes to the network from the pandas dataframe describing (a part of the) stops table in the GTFS database.\n\nParameters\n----------\nnet: networkx.Graph\nstops: pandas.DataFrame", "id": "f12923:m4"} {"signature": "def temporal_network(gtfs,start_time_ut=None,end_time_ut=None,route_type=None):", "body": "events_df = gtfs.get_transit_events(start_time_ut=start_time_ut,end_time_ut=end_time_ut,route_type=route_type)events_df.drop('', , inplace=True)events_df.drop('', , inplace=True)events_df.drop('', , inplace=True)events_df.drop('', , inplace=True)events_df.rename(columns={'': \"\"},inplace=True)return events_df", "docstring": "Compute the temporal network of the data, and return it as a pandas.DataFrame\n\nParameters\n----------\ngtfs : gtfspy.GTFS\nstart_time_ut: int | None\n start time of the time span (in unix time)\nend_time_ut: int | None\n end time of the time span (in unix time)\nroute_type: int | None\n Specifies which mode of public transport are included, or whether all modes should be included.\n The int should be one of the standard GTFS route_types:\n (see also gtfspy.route_types.TRANSIT_ROUTE_TYPES )\n If route_type is not specified, all modes are included.\n\nReturns\n-------\nevents_df: pandas.DataFrame\n Columns: departure_stop, arrival_stop, departure_time_ut, arrival_time_ut, route_type, route_I, trip_I", "id": "f12923:m5"} {"signature": "def route_to_route_network(gtfs, walking_threshold, start_time, end_time):", "body": "graph = networkx.Graph()routes = gtfs.get_table(\"\")for i in routes.itertuples():graph.add_node(i.route_id, attr_dict={\"\": i.type, \"\": route_types.ROUTE_TYPE_TO_COLOR[i.type]})query = \"\"\"\"\"\" % (walking_threshold, start_time, end_time, start_time,end_time)df = gtfs.execute_custom_query_pandas(query)for items in df.itertuples():graph.add_edge(items.route_id1, items.route_id2)graph.remove_nodes_from(networkx.isolates(graph))return graph", "docstring": "Creates networkx graph where the nodes are bus routes and a edge indicates that there is a possibility to transfer\nbetween the routes\n:param gtfs:\n:param walking_threshold:\n:param start_time:\n:param end_time:\n:return:", "id": "f12923:m6"} {"signature": "def get_warning_counter(self):", "body": "return self._warnings_counter", "docstring": "Returns\n-------\ncounter: collections.Counter", "id": "f12924:c0:m4"} {"signature": "def get_warnings_by_query_rows(self):", "body": "return self._warnings_records", "docstring": "Returns\n-------\nwarnings_record: defaultdict(list)\n maps each row to a list of warnings", "id": "f12924:c0:m5"} {"signature": "def write_walk_transfer_edges(gtfs, output_file_name):", "body": "transfers = gtfs.get_table(\"\")transfers.drop([u\"\", u\"\"], , inplace=True)with util.create_file(output_file_name, tmpdir=True, keepext=True) as tmpfile:transfers.to_csv(tmpfile, encoding='', index=False)", "docstring": "Parameters\n----------\ngtfs: gtfspy.GTFS\noutput_file_name: str", "id": "f12925:m0"} {"signature": "def write_nodes(gtfs, output, fields=None):", "body": "nodes = gtfs.get_table(\"\")if fields is not None:nodes = nodes[fields]with util.create_file(output, tmpdir=True, keepext=True) as tmpfile:nodes.to_csv(tmpfile, encoding='', index=False, sep=\"\")", "docstring": "Parameters\n----------\ngtfs: gtfspy.GTFS\noutput: str\n Path to the output file\nfields: list, optional\n which pieces of information to provide", "id": "f12925:m1"} {"signature": "def write_stops_geojson(gtfs, out_file, fields=None):", "body": "geojson = create_stops_geojson_dict(gtfs, fields)if hasattr(out_file, \"\"):out_file.write(json.dumps(geojson))else:with util.create_file(out_file, tmpdir=True, keepext=True) as tmpfile_path:tmpfile = open(tmpfile_path, '')tmpfile.write(json.dumps(geojson))", "docstring": "Parameters\n----------\ngtfs: gtfspy.GTFS\nout_file: file-like or path to file\nfields: dict\n simultaneously map each original_name to the new_name\nReturns\n-------", "id": "f12925:m3"} {"signature": "def write_combined_transit_stop_to_stop_network(gtfs, output_path, fmt=None):", "body": "if fmt is None:fmt = \"\"multi_di_graph = combined_stop_to_stop_transit_network(gtfs)_write_stop_to_stop_network_edges(multi_di_graph, output_path, fmt=fmt)", "docstring": "Parameters\n----------\ngtfs : gtfspy.GTFS\noutput_path : str\nfmt: None, optional\n defaulting to \"edg\" and writing results as \".edg\" files\n If \"csv\" csv files are produced instead", "id": "f12925:m4"} {"signature": "def write_static_networks(gtfs, output_dir, fmt=None):", "body": "if fmt is None:fmt = \"\"single_layer_networks = stop_to_stop_networks_by_type(gtfs)util.makedirs(output_dir)for route_type, net in single_layer_networks.items():tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]file_name = os.path.join(output_dir, \"\" + tag + \"\" + fmt)if len(net.edges()) > :_write_stop_to_stop_network_edges(net, file_name, fmt=fmt)", "docstring": "Parameters\n----------\ngtfs: gtfspy.GTFS\noutput_dir: (str, unicode)\n a path where to write\nfmt: None, optional\n defaulting to \"edg\" and writing results as \".edg\" files\n If \"csv\" csv files are produced instead", "id": "f12925:m5"} {"signature": "def write_temporal_networks_by_route_type(gtfs, extract_output_dir):", "body": "util.makedirs(extract_output_dir)for route_type in route_types.TRANSIT_ROUTE_TYPES:pandas_data_frame = temporal_network(gtfs, start_time_ut=None, end_time_ut=None, route_type=route_type)tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]out_file_name = os.path.join(extract_output_dir, tag + \"\")pandas_data_frame.to_csv(out_file_name, encoding='', index=False)", "docstring": "Write temporal networks by route type to disk.\n\nParameters\n----------\ngtfs: gtfspy.GTFS\nextract_output_dir: str", "id": "f12925:m6"} {"signature": "def write_temporal_network(gtfs, output_filename, start_time_ut=None, end_time_ut=None):", "body": "util.makedirs(os.path.dirname(os.path.abspath(output_filename)))pandas_data_frame = temporal_network(gtfs, start_time_ut=start_time_ut, end_time_ut=end_time_ut)pandas_data_frame.to_csv(output_filename, encoding='', index=False)", "docstring": "Parameters\n----------\ngtfs : gtfspy.GTFS\noutput_filename : str\n path to the directory where to store the extracts\nstart_time_ut: int | None\n start time of the extract in unixtime (seconds after epoch)\nend_time_ut: int | None\n end time of the extract in unixtime (seconds after epoch)", "id": "f12925:m7"} {"signature": "def _write_stop_to_stop_network_edges(net, file_name, data=True, fmt=None):", "body": "if fmt is None:fmt = \"\"if fmt == \"\":if data:networkx.write_edgelist(net, file_name, data=True)else:networkx.write_edgelist(net, file_name)elif fmt == \"\":with open(file_name, '') as f:edge_iter = net.edges_iter(data=True)_, _, edg_data = next(edge_iter)edg_data_keys = list(sorted(edg_data.keys()))header = \"\".join([\"\", \"\"] + edg_data_keys)f.write(header)for from_node_I, to_node_I, data in net.edges_iter(data=True):f.write(\"\")values = [str(from_node_I), str(to_node_I)]data_values = []for key in edg_data_keys:if key == \"\":route_I_counts_string = str(data[key]).replace(\"\", \"\")[:-]data_values.append(route_I_counts_string)else:data_values.append(str(data[key]))all_values = values + data_valuesf.write(\"\".join(all_values))", "docstring": "Write out a network\n\nParameters\n----------\nnet: networkx.DiGraph\nbase_name: str\n path to the filename (without extension)\ndata: bool, optional\n whether or not to write out any edge data present\nfmt: str, optional\n If \"csv\" write out the network in csv format.", "id": "f12925:m8"} {"signature": "def write_gtfs(gtfs, output):", "body": "output = os.path.abspath(output)uuid_str = \"\" + str(uuid.uuid1())if output[-:] == '':zip = Trueout_basepath = os.path.dirname(os.path.abspath(output))if not os.path.exists(out_basepath):raise IOError(out_basepath + \"\")tmp_dir = os.path.join(out_basepath, str(uuid_str))else:zip = Falseout_basepath = outputtmp_dir = os.path.join(out_basepath + \"\" + str(uuid_str))os.makedirs(tmp_dir, exist_ok=True)gtfs_table_to_writer = {\"\": _write_gtfs_agencies,\"\": _write_gtfs_calendar,\"\": _write_gtfs_calendar_dates,\"\": _write_gtfs_feed_info,\"\": _write_gtfs_routes,\"\": _write_gtfs_shapes,\"\": _write_gtfs_stops,\"\": _write_gtfs_stop_times,\"\": _write_gtfs_transfers,\"\": _write_gtfs_trips,}for table, writer in gtfs_table_to_writer.items():fname_to_write = os.path.join(tmp_dir, table + '')print(fname_to_write)writer(gtfs, open(os.path.join(tmp_dir, table + ''), ''))if zip:shutil.make_archive(output[:-], '', tmp_dir)shutil.rmtree(tmp_dir)else:print(\"\" + str(tmp_dir) + \"\" + out_basepath)os.rename(tmp_dir, out_basepath)", "docstring": "Write out the database according to the GTFS format.\n\nParameters\n----------\ngtfs: gtfspy.GTFS\noutput: str\n Path where to put the GTFS files\n if output ends with \".zip\" a ZIP-file is created instead.\n\nReturns\n-------\nNone", "id": "f12925:m13"} {"signature": "def _remove_I_columns(df):", "body": "all_columns = list(filter(lambda el: el[-:] == \"\", df.columns))for column in all_columns:del df[column]", "docstring": "Remove columns ending with I from a pandas.DataFrame\n\nParameters\n----------\ndf: dataFrame\n\nReturns\n-------\nNone", "id": "f12925:m14"} {"signature": "def plot_trip_counts_per_day(G, ax=None, highlight_dates=None, highlight_date_labels=None, show=False):", "body": "daily_trip_counts = G.get_trip_counts_per_day()if ax is None:_fig, ax = plt.subplots()daily_trip_counts[\"\"] = pandas.to_datetime(daily_trip_counts[\"\"])daily_trip_counts.plot(\"\", \"\", kind=\"\", ax=ax, marker=\"\", color=\"\", ls=\"\",label=\"\")ax.set_xlabel(\"\")ax.set_ylabel(\"\")if highlight_dates is not None:assert isinstance(highlight_dates, list)if highlight_date_labels is not None:assert isinstance(highlight_date_labels, list)assert len(highlight_dates) == len(highlight_date_labels), \"\"else:highlight_date_labels = [None] * len(highlight_dates)for i, (highlight_date, label) in enumerate(zip(highlight_dates, highlight_date_labels)):color = \"\" + str(int(i % + ))highlight_date = pandas.to_datetime(highlight_date)ax.axvline(highlight_date, color=color, label=label)ax.legend(loc=\"\")ax.grid()if show:plt.show()return ax", "docstring": "Parameters\n----------\nG: gtfspy.GTFS\nax: maptlotlib.Axes, optional\nhighlight_dates: list[str|datetime.datetime]\n The values of highlight dates should represent dates, and or datetime objects.\nhighlight_date_labels: list\n The labels for each highlight dates.\nshow: bool, optional\n whether or not to immediately show the results\n\nReturns\n-------\nax: maptlotlib.Axes object", "id": "f12926:m0"} {"signature": "def __init__(self, gtfssource, gtfs, verbose=True):", "body": "if isinstance(gtfssource, string_types + (dict,)):self.gtfs_sources = [gtfssource]else:assert isinstance(gtfssource, list)self.gtfs_sources = gtfssourceassert len(self.gtfs_sources) > , \"\"if not isinstance(gtfs, GTFS):self.gtfs = GTFS(gtfs)else:self.gtfs = gtfsself.location = self.gtfs.get_location_name()self.warnings_container = WarningsContainer()self.verbose=verbose", "docstring": "Parameters\n----------\ngtfs_sources: list, string, dict\n list of paths to the strings, or a dictionary directly containing the gtfs data directly\ngtfs: gtfspy.gtfs.GTFS, or path to a relevant .sqlite GTFS database\nverbose: bool\n Whether or not to print warnings on-the-fly.", "id": "f12927:c0:m0"} {"signature": "def _validate_table_row_counts(self):", "body": "for db_table_name in DB_TABLE_NAME_TO_SOURCE_FILE.keys():table_name_source_file = DB_TABLE_NAME_TO_SOURCE_FILE[db_table_name]row_warning_str = DB_TABLE_NAME_TO_ROWS_MISSING_WARNING[db_table_name]database_row_count = self.gtfs.get_row_count(db_table_name)source_row_count = for gtfs_source in self.gtfs_sources:frequencies_in_source = source_csv_to_pandas(gtfs_source, '')try:if table_name_source_file == '' and not frequencies_in_source.empty:source_row_count += self._frequency_generated_trips_rows(gtfs_source)elif table_name_source_file == '' and not frequencies_in_source.empty:source_row_count += self._compute_number_of_frequency_generated_stop_times(gtfs_source)else:df = source_csv_to_pandas(gtfs_source, table_name_source_file)source_row_count += len(df.index)except IOError as e:if hasattr(e, \"\") and db_table_name in e.filename:passelse:raise eif source_row_count == database_row_count and self.verbose:print(\"\" + table_name_source_file + \"\"+ str(database_row_count) + \"\")else:difference = database_row_count - source_row_count('' + str(table_name_source_file) + '' + str(source_row_count) +'' + str(database_row_count) + \"\")if table_name_source_file == \"\" and difference > :query = \"\"+ str(int(difference)) +\"\"number_of_entries_added_by_calendar_dates_loader = self.gtfs.execute_custom_query(query).fetchone()[]if number_of_entries_added_by_calendar_dates_loader == difference and self.verbose:print(\"\")else:if self.verbose:print(\"\")self.warnings_container.add_warning(row_warning_str, self.location, difference)else:self.warnings_container.add_warning(row_warning_str, self.location, difference)", "docstring": "Imports source .txt files, checks row counts and then compares the rowcounts with the gtfsobject\n:return:", "id": "f12927:c0:m2"} {"signature": "def _validate_no_null_values(self):", "body": "for table in DB_TABLE_NAMES:null_not_ok_warning = \"\".format(table=table)null_warn_warning = \"\".format(table=table)null_not_ok_fields = DB_TABLE_NAME_TO_FIELDS_WHERE_NULL_NOT_OK[table]null_warn_fields = DB_TABLE_NAME_TO_FIELDS_WHERE_NULL_OK_BUT_WARN[table]df = self.gtfs.get_table(table)for warning, fields in zip([null_not_ok_warning, null_warn_warning], [null_not_ok_fields, null_warn_fields]):null_unwanted_df = df[fields]rows_having_null = null_unwanted_df.isnull().any()if sum(rows_having_null) > :rows_having_unwanted_null = df[rows_having_null.values]self.warnings_container.add_warning(warning, rows_having_unwanted_null, len(rows_having_unwanted_null))", "docstring": "Loads the tables from the gtfs object and counts the number of rows that have null values in\nfields that should not be null. Stores the number of null rows in warnings_container", "id": "f12927:c0:m3"} {"signature": "def _validate_danglers(self):", "body": "for query, warning in zip(DANGLER_QUERIES, DANGLER_WARNINGS):dangler_count = self.gtfs.execute_custom_query(query).fetchone()[]if dangler_count > :if self.verbose:print(str(dangler_count) + \"\" + warning)self.warnings_container.add_warning(warning, self.location, count=dangler_count)", "docstring": "Checks for rows that are not referenced in the the tables that should be linked\n\nstops <> stop_times using stop_I\nstop_times <> trips <> days, using trip_I\ntrips <> routes, using route_I\n:return:", "id": "f12927:c0:m4"} {"signature": "def _frequency_generated_trips_rows(self, gtfs_soure_path, return_df_freq=False):", "body": "df_freq = source_csv_to_pandas(gtfs_soure_path, '')df_trips = source_csv_to_pandas(gtfs_soure_path, \"\")df_freq[''] = df_freq.apply(lambda row: len(range(str_time_to_day_seconds(row['']),str_time_to_day_seconds(row['']),row[''])), axis=)df_trips_freq = pd.merge(df_freq, df_trips, how='', on='')n_freq_generated_trips = int(df_trips_freq[''].fillna().sum(axis=))if return_df_freq:return df_trips_freqelse:return n_freq_generated_trips", "docstring": "This function calculates the equivalent rowcounts for trips when\ntaking into account the generated rows in the gtfs object\nParameters\n----------\ngtfs_soure_path: path to the source file\nparam txt: txt file in question\n:return: sum of all trips", "id": "f12927:c0:m5"} {"signature": "def _compute_number_of_frequency_generated_stop_times(self, gtfs_source_path):", "body": "df_freq = self._frequency_generated_trips_rows(gtfs_source_path, return_df_freq=True)df_stop_times = source_csv_to_pandas(gtfs_source_path, \"\")df_stop_freq = pd.merge(df_freq, df_stop_times, how='', on='')return int(df_stop_freq[''].fillna().sum(axis=))", "docstring": "Parameters\n----------\nSame as for \"_frequency_generated_trips_rows\" but for stop times table\ngtfs_source_path:\ntable_name:\n\nReturn\n------", "id": "f12927:c0:m6"} {"signature": "def import_gtfs(gtfs_sources, output, preserve_connection=False,print_progress=True, location_name=None, **kwargs):", "body": "if isinstance(output, sqlite3.Connection):conn = outputelse:conn = sqlite3.connect(output)if not isinstance(gtfs_sources, list):gtfs_sources = [gtfs_sources]cur = conn.cursor()time_import_start = time.time()cur.execute('')cur.execute('')cur.execute('')cur.execute('')conn.isolation_level = None cur.execute('')cur.execute('')conn.isolation_level = '' loaders = [L(gtfssource=gtfs_sources, print_progress=print_progress, **kwargs) for L in Loaders]for loader in loaders:loader.assert_exists_if_required()for loader in loaders:loader.import_(conn)for Loader in loaders:Loader.post_import_round2(conn)for Loader in loaders:Loader.make_views(conn)for F in postprocessors:F(conn)from gtfspy import gtfs as mod_gtfsG = mod_gtfs.GTFS(output)G.meta[''] = time.time()G.meta[''] = time.ctime()G.meta[''] = time.time() - time_import_startG.meta[''] = ''G.meta[''] = ''G.meta[''] = len(gtfs_sources)download_date_strs = []for i, source in enumerate(gtfs_sources):if len(gtfs_sources) == :prefix = \"\"else:prefix = \"\" + str(i) + \"\"if isinstance(source, string_types):G.meta[prefix + ''] = decode_six(source) if source else Nonefilename_date_list = re.findall(r'', source)if filename_date_list:date_str = filename_date_list[-]G.meta[prefix + ''] = date_strdownload_date_strs.append(date_str)if location_name:G.meta[''] = location_nameelse:location_name_list = re.findall(r'', source)if location_name_list:G.meta[prefix + ''] = location_name_list[-]else:try:G.meta[prefix + ''] = source.split(\"\")[-]except:G.meta[prefix + ''] = sourceif G.meta[''] == \"\":unique_download_dates = list(set(download_date_strs))if len(unique_download_dates) == :G.meta[''] = unique_download_dates[]G.meta[''] = cur.execute('').fetchone()[]stats.update_stats(G)del Gif print_progress:print(\"\")conn.isolation_level = None cur.execute('')conn.isolation_level = '' if print_progress:print(\"\")cur.execute('')if not (preserve_connection is True):conn.close()", "docstring": "Import a GTFS database\n\n gtfs_sources: str, dict, list\n Paths to the gtfs zip file or to the directory containing the GTFS data.\n Alternatively, a dict can be provide that maps gtfs filenames\n (like 'stops.txt' and 'agencies.txt') to their string presentations.\n\n output: str or sqlite3.Connection\n path to the new database to be created, or an existing\n sqlite3 connection\n preserve_connection: bool, optional\n Whether to close the connection in the end, or not.\n print_progress: bool, optional\n Whether to print progress output\n location_name: str, optional\n set the location of this database", "id": "f12928:m0"} {"signature": "def validate_day_start_ut(conn):", "body": "G = GTFS(conn)cur = conn.execute('')for date, day_start_ut in cur:assert day_start_ut == G.get_day_start_ut(date)", "docstring": "This validates the day_start_ut of the days table.", "id": "f12928:m1"} {"signature": "def main_make_views(gtfs_fname):", "body": "print(\"\")conn = GTFS(fname_or_conn=gtfs_fname).connfor L in Loaders:L(None).make_views(conn)conn.commit()", "docstring": "Re-create all views.", "id": "f12928:m2"} {"signature": "def plot_route_network_from_gtfs(g, ax=None, spatial_bounds=None, map_alpha=, scalebar=True, legend=True,return_smopy_map=False, map_style=None):", "body": "assert(isinstance(g, GTFS))route_shapes = g.get_all_route_shapes()if spatial_bounds is None:spatial_bounds = get_spatial_bounds(g, as_dict=True)if ax is not None:bbox = ax.get_window_extent().transformed(ax.figure.dpi_scale_trans.inverted())width, height = bbox.width, bbox.heightspatial_bounds = _expand_spatial_bounds_to_fit_axes(spatial_bounds, width, height)return plot_as_routes(route_shapes,ax=ax,spatial_bounds=spatial_bounds,map_alpha=map_alpha,plot_scalebar=scalebar,legend=legend,return_smopy_map=return_smopy_map,map_style=map_style)", "docstring": "Parameters\n----------\ng: A gtfspy.gtfs.GTFS object\n Where to get the data from?\nax: matplotlib.Axes object, optional\n If None, a new figure and an axis is created\nspatial_bounds: dict, optional\n with str keys: lon_min, lon_max, lat_min, lat_max\nreturn_smopy_map: bool, optional\n defaulting to false\n\nReturns\n-------\nax: matplotlib.axes.Axes", "id": "f12929:m1"} {"signature": "def plot_as_routes(route_shapes, ax=None, spatial_bounds=None, map_alpha=, plot_scalebar=True, legend=True,return_smopy_map=False, line_width_attribute=None, line_width_scale=, map_style=None):", "body": "lon_min = spatial_bounds['']lon_max = spatial_bounds['']lat_min = spatial_bounds['']lat_max = spatial_bounds['']if ax is None:fig = plt.figure()ax = fig.add_subplot()smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max, map_style=map_style)ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),numpy.array([lon_min, lon_max]))route_types_to_lines = {}for shape in route_shapes:route_type = ROUTE_TYPE_CONVERSION[shape['']]lats = numpy.array(shape[''])lons = numpy.array(shape[''])if line_width_attribute:line_width = line_width_scale * shape[line_width_attribute]else:line_width = xs, ys = smopy_map.to_pixels(lats, lons)line, = ax.plot(xs, ys, linewidth=line_width, color=ROUTE_TYPE_TO_COLOR[route_type], zorder=ROUTE_TYPE_TO_ZORDER[route_type])route_types_to_lines[route_type] = lineif legend:lines = list(route_types_to_lines.values())labels = [ROUTE_TYPE_TO_SHORT_DESCRIPTION[route_type] for route_type in route_types_to_lines.keys()]ax.legend(lines, labels, loc=\"\")if plot_scalebar:_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())ax.set_xticks([])ax.set_yticks([])ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())if return_smopy_map:return ax, smopy_mapelse:return ax", "docstring": "Parameters\n----------\nroute_shapes: list of dicts that should have the following keys\n name, type, agency, lats, lons\n with types\n list, list, str, list, list\nax: axis object\nspatial_bounds: dict\nmap_alpha:\nplot_scalebar: bool\nlegend:\nreturn_smopy_map:\nline_width_attribute:\nline_width_scale:\n\nReturns\n-------\nax: matplotlib.axes object", "id": "f12929:m2"} {"signature": "def _expand_spatial_bounds_to_fit_axes(bounds, ax_width, ax_height):", "body": "b = boundsheight_meters = util.wgs84_distance(b[''], b[''], b[''], b[''])width_meters = util.wgs84_distance(b[''], b[''], b[''], b[''])x_per_y_meters = width_meters / height_metersx_per_y_axes = ax_width / ax_heightif x_per_y_axes > x_per_y_meters: width_meters_new = (height_meters * x_per_y_axes)d_lon_new = ((b[''] - b['']) / width_meters) * width_meters_newmean_lon = (b[''] + b[''])/lon_min = mean_lon - d_lon_new / lon_max = mean_lon + d_lon_new / spatial_bounds = {\"\": lon_min,\"\": lon_max,\"\": b[''],\"\": b['']}else:height_meters_new = (width_meters / x_per_y_axes)d_lat_new = ((b[''] - b['']) / height_meters) * height_meters_newmean_lat = (b[''] + b['']) / lat_min = mean_lat - d_lat_new / lat_max = mean_lat + d_lat_new / spatial_bounds = {\"\": b[''],\"\": b[''],\"\": lat_min,\"\": lat_max}return spatial_bounds", "docstring": "Parameters\n----------\nbounds: dict\nax_width: float\nax_height: float\n\nReturns\n-------\nspatial_bounds", "id": "f12929:m5"} {"signature": "def plot_all_stops(g, ax=None, scalebar=False):", "body": "assert(isinstance(g, GTFS))lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)if ax is None:fig = plt.figure()ax = fig.add_subplot()ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=)stops = g.stops()lats = numpy.array(stops[''])lons = numpy.array(stops[''])xs, ys = smopy_map.to_pixels(lats, lons)ax.scatter(xs, ys, color=\"\", s=)ax.set_xlim(min(xs), max(xs))ax.set_ylim(max(ys), min(ys))return ax", "docstring": "Parameters\n----------\ng: A gtfspy.gtfs.GTFS object\nax: matplotlib.Axes object, optional\n If None, a new figure and an axis is created, otherwise results are plotted on the axis.\nscalebar: bool, optional\n Whether to include a scalebar to the plot.\n\nReturns\n-------\nax: matplotlib.Axes", "id": "f12929:m9"} {"signature": "def __init__(self, gtfs, buffer_params=None):", "body": "if not isinstance(gtfs, GTFS):self.gtfs = GTFS(gtfs)else:self.gtfs = gtfsself.buffer_params = buffer_paramsself.warnings_container = WarningsContainer()", "docstring": "Parameters\n----------\ngtfs: GTFS, or path to a GTFS object\n A GTFS object", "id": "f12930:c0:m0"} {"signature": "def validate_and_get_warnings(self):", "body": "self.warnings_container.clear()self._validate_stops_with_same_stop_time()self._validate_speeds_and_trip_times()self._validate_stop_spacings()self._validate_stop_sequence()self._validate_misplaced_stops()return self.warnings_container", "docstring": "Validates/checks a given GTFS feed with respect to a number of different issues.\n\nThe set of warnings that are checked for, can be found in the gtfs_validator.ALL_WARNINGS\n\nReturns\n-------\nwarnings: WarningsContainer", "id": "f12930:c0:m1"} {"signature": "def get_queryset(self, request):", "body": "qs = super(GalleryAdmin, self).get_queryset(request)return qs.annotate(photo_count=Count(''))", "docstring": "Add number of photos to each gallery.", "id": "f12946:c1:m1"} {"signature": "def save_model(self, request, obj, form, change):", "body": "obj.author = request.userobj.save()", "docstring": "Set currently authenticated user as the author of the gallery.", "id": "f12946:c1:m2"} {"signature": "def save_formset(self, request, form, formset, change):", "body": "instances = formset.save(commit=False)for instance in instances:if isinstance(instance, Photo):instance.author = request.userinstance.save()", "docstring": "For each photo set it's author to currently authenticated user.", "id": "f12946:c1:m3"} {"signature": "@register.simple_tagdef get_recent_galleries(count=):", "body": "return Gallery.objects.published().order_by('')[:count]", "docstring": "Returns most recent galleries.", "id": "f12947:m0"} {"signature": "@register.simple_tagdef get_gallery_archive_dates():", "body": "return Gallery.objects.published().dates('', '', order='')", "docstring": "Returns datetime objects for all months in which galleries were added.", "id": "f12947:m1"} {"signature": "@register.simple_tagdef get_popular_tags(count=):", "body": "return Photo.objects.popular_tags(count)", "docstring": "Returns most popular tags.", "id": "f12947:m2"} {"signature": "def __str__(self):", "body": "return self.title", "docstring": "The string representation of a gallery is its title.", "id": "f12958:c1:m0"} {"signature": "def __str__(self):", "body": "return self.title", "docstring": "The string representation of a photo is its title.", "id": "f12958:c3:m0"} {"signature": "def save(self, *args, **kwargs):", "body": "try:img = Image.open(self.image.file)raw_exif = img._getexif()if raw_exif:self.exif = {ExifTags.TAGS[k]: sanitize_exif_value(k, v) for k, v in raw_exif.items() if k in ExifTags.TAGS}except Exception:passsuper(Photo, self).save(*args, **kwargs)", "docstring": "Updates EXIF data before saving.", "id": "f12958:c3:m1"} {"signature": "def get_next_photo(self):", "body": "try:next_photo = Photo.objects.filter(gallery=self.gallery,created__gt=self.created,)[]except IndexError:next_photo = Photo.objects.filter(gallery=self.gallery)[]return next_photo", "docstring": "Returns next photo from the same gallery (in chronological order).\n\nWraps around from last photo in the gallery to the first one.", "id": "f12958:c3:m3"} {"signature": "def get_previous_photo(self):", "body": "try:previous_photo = Photo.objects.filter(gallery=self.gallery,created__lt=self.created,).latest('')except Photo.DoesNotExist:previous_photo = Photo.objects.filter(gallery=self.gallery).latest('')return previous_photo", "docstring": "Returns previous photo from the same gallery (in chronological order).\n\nWraps around from first photo in the gallery to the last one.", "id": "f12958:c3:m4"} {"signature": "def __get_rev(self, key, version, **kwa):", "body": "if '' in kwa:doc = kwa['']else:if type(version) is int:if version == :order = pymongo.ASCENDINGelif version == -:order = pymongo.DESCENDINGdoc = self._collection.find_one({'': key}, sort=[['', order]])elif type(version) is datetime:ver = self.__round_time(version)doc = self._collection.find_one({'': key, '': ver})if doc is None:raise KeyError(''.format(key, str(version)))coded_val = doc['']return pickle.loads(coded_val)", "docstring": "Obtain particular version of the doc at key.", "id": "f12967:c0:m1"} {"signature": "def __round_time(self, dt):", "body": "round_to = self._resolution.total_seconds()seconds = (dt - dt.min).secondsrounding = (seconds + round_to / ) // round_to * round_toreturn dt + timedelta(, rounding - seconds, -dt.microsecond)", "docstring": "Round a datetime object to a multiple of a timedelta\n dt : datetime.datetime object, default now.", "id": "f12967:c0:m2"} {"signature": "def __getitem__(self, _key):", "body": "if type(_key) is tuple and len(_key) == :key, revision = _keyelif type(_key) is str:key = _keyrevision = -else:raise KeyError('')if type(key) is not str:raise KeyError('')if type(revision) is slice:return self.__get_revs(key, revision)elif type(revision) in [int, datetime]:return self.__get_rev(key, revision)else:raise KeyError('')", "docstring": "Obtain Revisions or Iterables.\n\n Obtain specific revisions:\n >>> obj[key] # Return the most recent revision\n >>> obj[key, -1] # Return the most recent revision\n >>> obj[key, 0] # Return the oldest available revision\n >>> obj[key, date(...)] # Return the revision on the supplied date\n\n Obtain iterables:\n - Return an iterator which yields all revs.\n >>> obj[key, :]\n - Return an iterator which yields the revisions between supplied dates.\n >>> obj[key, date(...):]\n >>> obj[key, :date(...)]\n >>> obj[key, date(...):date(...)]", "id": "f12967:c0:m5"} {"signature": "def _hashkey(self, method, url, **kwa):", "body": "to_hash = ''.join([str(method), str(url),str(kwa.get('', '')),str(kwa.get('', ''))])return hashlib.md5(to_hash.encode()).hexdigest()", "docstring": "Find a hash value for the linear combination of invocation methods.", "id": "f12967:c1:m3"} {"signature": "def _re_flatten(p):", "body": "if '' not in p:return preturn re.sub(r'', lambda m: m.group() iflen(m.group()) % else m.group() + '', p)", "docstring": "Turn all capturing groups in a regular expression pattern into\n non-capturing groups.", "id": "f12971:m6"} {"signature": "def abort(code=, text=''):", "body": "raise HTTPError(code, text)", "docstring": "Aborts execution and causes a HTTP error.", "id": "f12971:m9"} {"signature": "def redirect(url, code=None):", "body": "if not code:code = if request.get('') == \"\" else res = response.copy(cls=HTTPResponse)res.status = coderes.body = \"\"res.set_header('', urljoin(request.url, url))raise res", "docstring": "Aborts execution and causes a 303 or 302 redirect, depending on\n the HTTP protocol version.", "id": "f12971:m10"} {"signature": "def _file_iter_range(fp, offset, bytes, maxread= * ):", "body": "fp.seek(offset)while bytes > :part = fp.read(min(bytes, maxread))if not part: breakbytes -= len(part)yield part", "docstring": "Yield chunks from a range in a file. No chunk is bigger than maxread.", "id": "f12971:m11"} {"signature": "def static_file(filename, root,mimetype='',download=False,charset=''):", "body": "root = os.path.abspath(root) + os.sepfilename = os.path.abspath(os.path.join(root, filename.strip('')))headers = dict()if not filename.startswith(root):return HTTPError(, \"\")if not os.path.exists(filename) or not os.path.isfile(filename):return HTTPError(, \"\")if not os.access(filename, os.R_OK):return HTTPError(, \"\")if mimetype == '':if download and download != True:mimetype, encoding = mimetypes.guess_type(download)else:mimetype, encoding = mimetypes.guess_type(filename)if encoding: headers[''] = encodingif mimetype:if mimetype[:] == '' and charset and '' not in mimetype:mimetype += '' % charsetheaders[''] = mimetypeif download:download = os.path.basename(filename if download == True else download)headers[''] = '' % downloadstats = os.stat(filename)headers[''] = clen = stats.st_sizelm = time.strftime(\"\", time.gmtime(stats.st_mtime))headers[''] = lmims = request.environ.get('')if ims:ims = parse_date(ims.split(\"\")[].strip())if ims is not None and ims >= int(stats.st_mtime):headers[''] = time.strftime(\"\",time.gmtime())return HTTPResponse(status=, **headers)body = '' if request.method == '' else open(filename, '')headers[\"\"] = \"\"ranges = request.environ.get('')if '' in request.environ:ranges = list(parse_range_header(request.environ[''], clen))if not ranges:return HTTPError(, \"\")offset, end = ranges[]headers[\"\"] = \"\" % (offset, end - , clen)headers[\"\"] = str(end - offset)if body: body = _file_iter_range(body, offset, end - offset)return HTTPResponse(body, status=, **headers)return HTTPResponse(body, **headers)", "docstring": "Open a file in a safe way and return :exc:`HTTPResponse` with status\n code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,\n ``Content-Length`` and ``Last-Modified`` headers are set if possible.\n Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``\n requests.\n\n :param filename: Name or path of the file to send.\n :param root: Root path for file lookups. Should be an absolute directory\n path.\n :param mimetype: Defines the content-type header (default: guess from\n file extension)\n :param download: If True, ask the browser to open a `Save as...` dialog\n instead of opening the file with the associated program. You can\n specify a custom filename as a string. If not specified, the\n original filename is used (default: False).\n :param charset: The charset to use for files with a ``text/*``\n mime-type. (default: UTF-8)", "id": "f12971:m12"} {"signature": "def debug(mode=True):", "body": "global DEBUGif mode: warnings.simplefilter('')DEBUG = bool(mode)", "docstring": "Change the debug level.\n There is only one debug level supported at the moment.", "id": "f12971:m13"} {"signature": "def parse_date(ims):", "body": "try:ts = email.utils.parsedate_tz(ims)return time.mktime(ts[:] + (, )) - (ts[] or ) - time.timezoneexcept (TypeError, ValueError, IndexError, OverflowError):return None", "docstring": "Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch.", "id": "f12971:m15"} {"signature": "def parse_auth(header):", "body": "try:method, data = header.split(None, )if method.lower() == '':user, pwd = touni(base64.b64decode(tob(data))).split('', )return user, pwdexcept (KeyError, ValueError):return None", "docstring": "Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None", "id": "f12971:m16"} {"signature": "def parse_range_header(header, maxlen=):", "body": "if not header or header[:] != '': returnranges = [r.split('', ) for r in header[:].split('') if '' in r]for start, end in ranges:try:if not start: start, end = max(, maxlen - int(end)), maxlenelif not end: start, end = int(start), maxlenelse: start, end = int(start), min(int(end) + , maxlen)if <= start < end <= maxlen:yield start, endexcept ValueError:pass", "docstring": "Yield (start, end) ranges parsed from a HTTP Range header. Skip\n unsatisfiable ranges. The end index is non-inclusive.", "id": "f12971:m17"} {"signature": "def _lscmp(a, b):", "body": "return not sum( if x == y else for x, y in zip(a, b)) and len(a) == len(b)", "docstring": "Compares two strings in a cryptographically safe way:\n Runtime is not affected by length of common prefix.", "id": "f12971:m19"} {"signature": "def cookie_encode(data, key):", "body": "msg = base64.b64encode(pickle.dumps(data, -))sig = base64.b64encode(hmac.new(tob(key), msg).digest())return tob('') + sig + tob('') + msg", "docstring": "Encode and sign a pickle-able object. Return a (byte) string", "id": "f12971:m20"} {"signature": "def cookie_decode(data, key):", "body": "data = tob(data)if cookie_is_encoded(data):sig, msg = data.split(tob(''), )if _lscmp(sig[:], base64.b64encode(hmac.new(tob(key), msg).digest())):return pickle.loads(base64.b64decode(msg))return None", "docstring": "Verify and decode an encoded string. Return an object or None.", "id": "f12971:m21"} {"signature": "def cookie_is_encoded(data):", "body": "return bool(data.startswith(tob('')) and tob('') in data)", "docstring": "Return True if the argument looks like a encoded cookie.", "id": "f12971:m22"} {"signature": "def html_escape(string):", "body": "return string.replace('', '').replace('', '').replace('>', '').replace('', '').replace(\"\", '')", "docstring": "Escape HTML special characters ``&<>`` and quotes ``'\"``.", "id": "f12971:m23"} {"signature": "def html_quote(string):", "body": "return '' % html_escape(string).replace('', '').replace('', '').replace('', '')", "docstring": "Escape and quote a string to be used as an HTTP attribute.", "id": "f12971:m24"} {"signature": "def yieldroutes(func):", "body": "path = '' + func.__name__.replace('', '').lstrip('')spec = getargspec(func)argc = len(spec[]) - len(spec[] or [])path += ('' * argc) % tuple(spec[][:argc])yield pathfor arg in spec[][argc:]:path += '' % argyield path", "docstring": "Return a generator for routes that match the signature (name, args)\n of the func parameter. This may yield more than one route if the function\n takes optional keyword arguments. The output is best described by example::\n\n a() -> '/a'\n b(x, y) -> '/b//'\n c(x, y=5) -> '/c/' and '/c//'\n d(x=5, y=6) -> '/d' and '/d/' and '/d//'", "id": "f12971:m25"} {"signature": "def path_shift(script_name, path_info, shift=):", "body": "if shift == : return script_name, path_infopathlist = path_info.strip('').split('')scriptlist = script_name.strip('').split('')if pathlist and pathlist[] == '': pathlist = []if scriptlist and scriptlist[] == '': scriptlist = []if < shift <= len(pathlist):moved = pathlist[:shift]scriptlist = scriptlist + movedpathlist = pathlist[shift:]elif > shift >= -len(scriptlist):moved = scriptlist[shift:]pathlist = moved + pathlistscriptlist = scriptlist[:shift]else:empty = '' if shift < else ''raise AssertionError(\"\" % empty)new_script_name = '' + ''.join(scriptlist)new_path_info = '' + ''.join(pathlist)if path_info.endswith('') and pathlist: new_path_info += ''return new_script_name, new_path_info", "docstring": "Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.\n\n :return: The modified paths.\n :param script_name: The SCRIPT_NAME path.\n :param script_name: The PATH_INFO path.\n :param shift: The number of path fragments to shift. May be negative to\n change the shift direction. (default: 1)", "id": "f12971:m26"} {"signature": "def auth_basic(check, realm=\"\", text=\"\"):", "body": "def decorator(func):@functools.wraps(func)def wrapper(*a, **ka):user, password = request.auth or (None, None)if user is None or not check(user, password):err = HTTPError(, text)err.add_header('', '' % realm)return errreturn func(*a, **ka)return wrapperreturn decorator", "docstring": "Callback decorator to require HTTP auth (basic).\n TODO: Add route(check_auth=...) parameter.", "id": "f12971:m27"} {"signature": "def make_default_app_wrapper(name):", "body": "@functools.wraps(getattr(Bottle, name))def wrapper(*a, **ka):return getattr(app(), name)(*a, **ka)return wrapper", "docstring": "Return a callable that relays calls to the current default app.", "id": "f12971:m28"} {"signature": "def load(target, **namespace):", "body": "module, target = target.split(\"\", ) if '' in target else (target, None)if module not in sys.modules: __import__(module)if not target: return sys.modules[module]if target.isalnum(): return getattr(sys.modules[module], target)package_name = module.split('')[]namespace[package_name] = sys.modules[package_name]return eval('' % (module, target), namespace)", "docstring": "Import a module or fetch an object from a module.\n\n * ``package.module`` returns `module` as a module object.\n * ``pack.mod:name`` returns the module variable `name` from `pack.mod`.\n * ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.\n\n The last form accepts not only function calls, but any type of\n expression. Keyword arguments passed to this function are available as\n local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``", "id": "f12971:m29"} {"signature": "def load_app(target):", "body": "global NORUNNORUN, nr_old = True, NORUNtmp = default_app.push() try:rv = load(target) return rv if callable(rv) else tmpfinally:default_app.remove(tmp) NORUN = nr_old", "docstring": "Load a bottle application from a module and make sure that the import\n does not affect the current default application, but returns a separate\n application object. See :func:`load` for the target parameter.", "id": "f12971:m30"} {"signature": "def run(app=None,server='',host='',port=,interval=,reloader=False,quiet=False,plugins=None,debug=None, **kargs):", "body": "if NORUN: returnif reloader and not os.environ.get(''):import subprocesslockfile = Nonetry:fd, lockfile = tempfile.mkstemp(prefix='', suffix='')os.close(fd) while os.path.exists(lockfile):args = [sys.executable] + sys.argvenviron = os.environ.copy()environ[''] = ''environ[''] = lockfilep = subprocess.Popen(args, env=environ)while p.poll() is None: os.utime(lockfile, None) time.sleep(interval)if p.poll() != :if os.path.exists(lockfile): os.unlink(lockfile)sys.exit(p.poll())except KeyboardInterrupt:passfinally:if os.path.exists(lockfile):os.unlink(lockfile)returntry:if debug is not None: _debug(debug)app = app or default_app()if isinstance(app, basestring):app = load_app(app)if not callable(app):raise ValueError(\"\" % app)for plugin in plugins or []:if isinstance(plugin, basestring):plugin = load(plugin)app.install(plugin)if server in server_names:server = server_names.get(server)if isinstance(server, basestring):server = load(server)if isinstance(server, type):server = server(host=host, port=port, **kargs)if not isinstance(server, ServerAdapter):raise ValueError(\"\" % server)server.quiet = server.quiet or quietif not server.quiet:_stderr(\"\" %(__version__, repr(server)))_stderr(\"\" %(server.host, server.port))_stderr(\"\")if reloader:lockfile = os.environ.get('')bgcheck = FileCheckerThread(lockfile, interval)with bgcheck:server.run(app)if bgcheck.status == '':sys.exit()else:server.run(app)except KeyboardInterrupt:passexcept (SystemExit, MemoryError):raiseexcept:if not reloader: raiseif not getattr(server, '', quiet):print_exc()time.sleep(interval)sys.exit()", "docstring": "Start a server instance. This method blocks until the server terminates.\n\n :param app: WSGI application or target string supported by\n :func:`load_app`. (default: :func:`default_app`)\n :param server: Server adapter to use. See :data:`server_names` keys\n for valid names or pass a :class:`ServerAdapter` subclass.\n (default: `wsgiref`)\n :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on\n all interfaces including the external one. (default: 127.0.0.1)\n :param port: Server port to bind to. Values below 1024 require root\n privileges. (default: 8080)\n :param reloader: Start auto-reloading server? (default: False)\n :param interval: Auto-reloader interval in seconds (default: 1)\n :param quiet: Suppress output to stdout and stderr? (default: False)\n :param options: Options passed to the server adapter.", "id": "f12971:m31"} {"signature": "def template(*args, **kwargs):", "body": "tpl = args[] if args else Noneadapter = kwargs.pop('', SimpleTemplate)lookup = kwargs.pop('', TEMPLATE_PATH)tplid = (id(lookup), tpl)if tplid not in TEMPLATES or DEBUG:settings = kwargs.pop('', {})if isinstance(tpl, adapter):TEMPLATES[tplid] = tplif settings: TEMPLATES[tplid].prepare(**settings)elif \"\" in tpl or \"\" in tpl or \"\" in tpl or '' in tpl:TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)else:TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)if not TEMPLATES[tplid]:abort(, '' % tpl)for dictarg in args[:]:kwargs.update(dictarg)return TEMPLATES[tplid].render(kwargs)", "docstring": "Get a rendered template as a string iterator.\nYou can use a name, a filename or a template string as first parameter.\nTemplate rendering arguments can be passed as dictionaries\nor directly (as keyword arguments).", "id": "f12971:m32"} {"signature": "def view(tpl_name, **defaults):", "body": "def decorator(func):@functools.wraps(func)def wrapper(*args, **kwargs):result = func(*args, **kwargs)if isinstance(result, (dict, DictMixin)):tplvars = defaults.copy()tplvars.update(result)return template(tpl_name, **tplvars)elif result is None:return template(tpl_name, defaults)return resultreturn wrapperreturn decorator", "docstring": "Decorator: renders a template for a handler.\n The handler can control its behavior like that:\n\n - return a dict of template vars to fill out the template\n - return something other than a dict and the view decorator will not\n process the template, but return the handler result as is.\n This includes returning a HTTPResponse(dict) to get,\n for instance, JSON with autojson or other castfilters.", "id": "f12971:m33"} {"signature": "def add_filter(self, name, func):", "body": "self.filters[name] = func", "docstring": "Add a filter. The provided function is called with the configuration\n string as parameter and must return a (regexp, to_python, to_url) tuple.\n The first element is a string, the last two are callables or None.", "id": "f12971:c9:m1"} {"signature": "def add(self, rule, method, target, name=None):", "body": "anons = keys = [] pattern = '' filters = [] builder = [] is_static = Truefor key, mode, conf in self._itertokens(rule):if mode:is_static = Falseif mode == '': mode = self.default_filtermask, in_filter, out_filter = self.filters[mode](conf)if not key:pattern += '' % maskkey = '' % anonsanons += else:pattern += '' % (key, mask)keys.append(key)if in_filter: filters.append((key, in_filter))builder.append((key, out_filter or str))elif key:pattern += re.escape(key)builder.append((None, key))self.builder[rule] = builderif name: self.builder[name] = builderif is_static and not self.strict_order:self.static.setdefault(method, {})self.static[method][self.build(rule)] = (target, None)returntry:re_pattern = re.compile('' % pattern)re_match = re_pattern.matchexcept re.error:raise RouteSyntaxError(\"\" %(rule, _e()))if filters:def getargs(path):url_args = re_match(path).groupdict()for name, wildcard_filter in filters:try:url_args[name] = wildcard_filter(url_args[name])except ValueError:raise HTTPError(, '')return url_argselif re_pattern.groupindex:def getargs(path):return re_match(path).groupdict()else:getargs = Noneflatpat = _re_flatten(pattern)whole_rule = (rule, flatpat, target, getargs)if (flatpat, method) in self._groups:if DEBUG:msg = ''warnings.warn(msg % (method, rule), RuntimeWarning)self.dyna_routes[method][self._groups[flatpat, method]] = whole_ruleelse:self.dyna_routes.setdefault(method, []).append(whole_rule)self._groups[flatpat, method] = len(self.dyna_routes[method]) - self._compile(method)", "docstring": "Add a new rule or replace the target for an existing rule.", "id": "f12971:c9:m3"} {"signature": "def build(self, _name, *anons, **query):", "body": "builder = self.builder.get(_name)if not builder:raise RouteBuildError(\"\", _name)try:for i, value in enumerate(anons):query['' % i] = valueurl = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])return url if not query else url + '' + urlencode(query)except KeyError:raise RouteBuildError('' % _e().args[])", "docstring": "Build an URL by filling the wildcards in a rule.", "id": "f12971:c9:m5"} {"signature": "def match(self, environ):", "body": "verb = environ[''].upper()path = environ[''] or ''if verb == '':methods = ['', verb, '', '']else:methods = ['', verb, '']for method in methods:if method in self.static and path in self.static[method]:target, getargs = self.static[method][path]return target, getargs(path) if getargs else {}elif method in self.dyna_regexes:for combined, rules in self.dyna_regexes[method]:match = combined(path)if match:target, getargs = rules[match.lastindex - ]return target, getargs(path) if getargs else {}allowed = set([])nocheck = set(methods)for method in set(self.static) - nocheck:if path in self.static[method]:allowed.add(verb)for method in set(self.dyna_regexes) - allowed - nocheck:for combined, rules in self.dyna_regexes[method]:match = combined(path)if match:allowed.add(method)if allowed:allow_header = \"\".join(sorted(allowed))raise HTTPError(, \"\", Allow=allow_header)raise HTTPError(, \"\" + repr(path))", "docstring": "Return a (target, url_args) tuple or raise HTTPError(400/404/405).", "id": "f12971:c9:m6"} {"signature": "@cached_propertydef call(self):", "body": "return self._make_callback()", "docstring": "The route callback with all plugins applied. This property is\n created on demand and then cached to speed up subsequent requests.", "id": "f12971:c10:m1"} {"signature": "def reset(self):", "body": "self.__dict__.pop('', None)", "docstring": "Forget any cached values. The next time :attr:`call` is accessed,\n all plugins are re-applied.", "id": "f12971:c10:m2"} {"signature": "def prepare(self):", "body": "self.call", "docstring": "Do all on-demand work immediately (useful for debugging).", "id": "f12971:c10:m3"} {"signature": "def all_plugins(self):", "body": "unique = set()for p in reversed(self.app.plugins + self.plugins):if True in self.skiplist: breakname = getattr(p, '', False)if name and (name in self.skiplist or name in unique): continueif p in self.skiplist or type(p) in self.skiplist: continueif name: unique.add(name)yield p", "docstring": "Yield all Plugins affecting this route.", "id": "f12971:c10:m4"} {"signature": "def get_undecorated_callback(self):", "body": "func = self.callbackfunc = getattr(func, '' if py3k else '', func)closure_attr = '' if py3k else ''while hasattr(func, closure_attr) and getattr(func, closure_attr):attributes = getattr(func, closure_attr)func = attributes[].cell_contentsif not isinstance(func, FunctionType):func = filter(lambda x: isinstance(x, FunctionType),map(lambda x: x.cell_contents, attributes))func = list(func)[] return func", "docstring": "Return the callback. If the callback is a decorated function, try to\n recover the original function.", "id": "f12971:c10:m6"} {"signature": "def get_callback_args(self):", "body": "return getargspec(self.get_undecorated_callback())[]", "docstring": "Return a list of argument names the callback (most likely) accepts\n as keyword arguments. If the callback is a decorated function, try\n to recover the original function before inspection.", "id": "f12971:c10:m7"} {"signature": "def get_config(self, key, default=None):", "body": "for conf in (self.config, self.app.config):if key in conf: return conf[key]return default", "docstring": "Lookup a config field and return its value, first checking the\n route.config, then route.app.config.", "id": "f12971:c10:m8"} {"signature": "def add_hook(self, name, func):", "body": "if name in self.__hook_reversed:self._hooks[name].insert(, func)else:self._hooks[name].append(func)", "docstring": "Attach a callback to a hook. Three hooks are currently implemented:\n\n before_request\n Executed once before each request. The request context is\n available, but no routing has happened yet.\n after_request\n Executed once after each request regardless of its outcome.\n app_reset\n Called whenever :meth:`Bottle.reset` is called.", "id": "f12971:c11:m2"} {"signature": "def remove_hook(self, name, func):", "body": "if name in self._hooks and func in self._hooks[name]:self._hooks[name].remove(func)return True", "docstring": "Remove a callback from a hook.", "id": "f12971:c11:m3"} {"signature": "def trigger_hook(self, __name, *args, **kwargs):", "body": "return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]", "docstring": "Trigger a hook and return a list of results.", "id": "f12971:c11:m4"} {"signature": "def hook(self, name):", "body": "def decorator(func):self.add_hook(name, func)return funcreturn decorator", "docstring": "Return a decorator that attaches a callback to a hook. See\n :meth:`add_hook` for details.", "id": "f12971:c11:m5"} {"signature": "def mount(self, prefix, app, **options):", "body": "segments = [p for p in prefix.split('') if p]if not segments: raise ValueError('')path_depth = len(segments)def mountpoint_wrapper():try:request.path_shift(path_depth)rs = HTTPResponse([])def start_response(status, headerlist, exc_info=None):if exc_info:_raise(*exc_info)rs.status = statusfor name, value in headerlist:rs.add_header(name, value)return rs.body.appendbody = app(request.environ, start_response)if body and rs.body: body = itertools.chain(rs.body, body)rs.body = body or rs.bodyreturn rsfinally:request.path_shift(-path_depth)options.setdefault('', True)options.setdefault('', '')options.setdefault('', {'': prefix, '': app})options[''] = mountpoint_wrapperself.route('' % ''.join(segments), **options)if not prefix.endswith(''):self.route('' + ''.join(segments), **options)", "docstring": "Mount an application (:class:`Bottle` or plain WSGI) to a specific\n URL prefix. Example::\n\n root_app.mount('/admin/', admin_app)\n\n :param prefix: path prefix or `mount-point`. If it ends in a slash,\n that slash is mandatory.\n :param app: an instance of :class:`Bottle` or a WSGI application.\n\n All other parameters are passed to the underlying :meth:`route` call.", "id": "f12971:c11:m6"} {"signature": "def merge(self, routes):", "body": "if isinstance(routes, Bottle):routes = routes.routesfor route in routes:self.add_route(route)", "docstring": "Merge the routes of another :class:`Bottle` application or a list of\n :class:`Route` objects into this application. The routes keep their\n 'owner', meaning that the :data:`Route.app` attribute is not\n changed.", "id": "f12971:c11:m7"} {"signature": "def install(self, plugin):", "body": "if hasattr(plugin, ''): plugin.setup(self)if not callable(plugin) and not hasattr(plugin, ''):raise TypeError(\"\")self.plugins.append(plugin)self.reset()return plugin", "docstring": "Add a plugin to the list of plugins and prepare it for being\n applied to all routes of this application. A plugin may be a simple\n decorator or an object that implements the :class:`Plugin` API.", "id": "f12971:c11:m8"} {"signature": "def uninstall(self, plugin):", "body": "removed, remove = [], pluginfor i, plugin in list(enumerate(self.plugins))[::-]:if remove is True or remove is plugin or remove is type(plugin)or getattr(plugin, '', True) == remove:removed.append(plugin)del self.plugins[i]if hasattr(plugin, ''): plugin.close()if removed: self.reset()return removed", "docstring": "Uninstall plugins. Pass an instance to remove a specific plugin, a type\n object to remove all plugins that match that type, a string to remove\n all plugins with a matching ``name`` attribute or ``True`` to remove all\n plugins. Return the list of removed plugins.", "id": "f12971:c11:m9"} {"signature": "def reset(self, route=None):", "body": "if route is None: routes = self.routeselif isinstance(route, Route): routes = [route]else: routes = [self.routes[route]]for route in routes:route.reset()if DEBUG:for route in routes:route.prepare()self.trigger_hook('')", "docstring": "Reset all routes (force plugins to be re-applied) and clear all\n caches. If an ID or route object is given, only that specific route\n is affected.", "id": "f12971:c11:m10"} {"signature": "def close(self):", "body": "for plugin in self.plugins:if hasattr(plugin, ''): plugin.close()", "docstring": "Close the application and all installed plugins.", "id": "f12971:c11:m11"} {"signature": "def run(self, **kwargs):", "body": "run(self, **kwargs)", "docstring": "Calls :func:`run` with the same parameters.", "id": "f12971:c11:m12"} {"signature": "def match(self, environ):", "body": "return self.router.match(environ)", "docstring": "Search for a matching route and return a (:class:`Route` , urlargs)\n tuple. The second value is a dictionary with parameters extracted\n from the URL. Raise :exc:`HTTPError` (404/405) on a non-match.", "id": "f12971:c11:m13"} {"signature": "def get_url(self, routename, **kargs):", "body": "scriptname = request.environ.get('', '').strip('') + ''location = self.router.build(routename, **kargs).lstrip('')return urljoin(urljoin('', scriptname), location)", "docstring": "Return a string that matches a named route", "id": "f12971:c11:m14"} {"signature": "def add_route(self, route):", "body": "self.routes.append(route)self.router.add(route.rule, route.method, route, name=route.name)if DEBUG: route.prepare()", "docstring": "Add a route object, but do not change the :data:`Route.app`\n attribute.", "id": "f12971:c11:m15"} {"signature": "def route(self,path=None,method='',callback=None,name=None,apply=None,skip=None, **config):", "body": "if callable(path): path, callback = None, pathplugins = makelist(apply)skiplist = makelist(skip)def decorator(callback):if isinstance(callback, basestring): callback = load(callback)for rule in makelist(path) or yieldroutes(callback):for verb in makelist(method):verb = verb.upper()route = Route(self, rule, verb, callback,name=name,plugins=plugins,skiplist=skiplist, **config)self.add_route(route)return callbackreturn decorator(callback) if callback else decorator", "docstring": "A decorator to bind a function to a request URL. Example::\n\n @app.route('/hello/')\n def hello(name):\n return 'Hello %s' % name\n\n The ``:name`` part is a wildcard. See :class:`Router` for syntax\n details.\n\n :param path: Request path or a list of paths to listen to. If no\n path is specified, it is automatically generated from the\n signature of the function.\n :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of\n methods to listen to. (default: `GET`)\n :param callback: An optional shortcut to avoid the decorator\n syntax. ``route(..., callback=func)`` equals ``route(...)(func)``\n :param name: The name for this route. (default: None)\n :param apply: A decorator or plugin or a list of plugins. These are\n applied to the route callback in addition to installed plugins.\n :param skip: A list of plugins, plugin classes or names. Matching\n plugins are not installed to this route. ``True`` skips all.\n\n Any additional keyword arguments are stored as route-specific\n configuration and passed to plugins (see :meth:`Plugin.apply`).", "id": "f12971:c11:m16"} {"signature": "def get(self, path=None, method='', **options):", "body": "return self.route(path, method, **options)", "docstring": "Equals :meth:`route`.", "id": "f12971:c11:m17"} {"signature": "def post(self, path=None, method='', **options):", "body": "return self.route(path, method, **options)", "docstring": "Equals :meth:`route` with a ``POST`` method parameter.", "id": "f12971:c11:m18"} {"signature": "def put(self, path=None, method='', **options):", "body": "return self.route(path, method, **options)", "docstring": "Equals :meth:`route` with a ``PUT`` method parameter.", "id": "f12971:c11:m19"} {"signature": "def delete(self, path=None, method='', **options):", "body": "return self.route(path, method, **options)", "docstring": "Equals :meth:`route` with a ``DELETE`` method parameter.", "id": "f12971:c11:m20"} {"signature": "def patch(self, path=None, method='', **options):", "body": "return self.route(path, method, **options)", "docstring": "Equals :meth:`route` with a ``PATCH`` method parameter.", "id": "f12971:c11:m21"} {"signature": "def error(self, code=):", "body": "def wrapper(handler):self.error_handler[int(code)] = handlerreturn handlerreturn wrapper", "docstring": "Decorator: Register an output handler for a HTTP error code", "id": "f12971:c11:m22"} {"signature": "def _cast(self, out, peek=None):", "body": "if not out:if '' not in response:response[''] = return []if isinstance(out, (tuple, list))and isinstance(out[], (bytes, unicode)):out = out[][:].join(out) if isinstance(out, unicode):out = out.encode(response.charset)if isinstance(out, bytes):if '' not in response:response[''] = len(out)return [out]if isinstance(out, HTTPError):out.apply(response)out = self.error_handler.get(out.status_code,self.default_error_handler)(out)return self._cast(out)if isinstance(out, HTTPResponse):out.apply(response)return self._cast(out.body)if hasattr(out, ''):if '' in request.environ:return request.environ[''](out)elif hasattr(out, '') or not hasattr(out, ''):return WSGIFileWrapper(out)try:iout = iter(out)first = next(iout)while not first:first = next(iout)except StopIteration:return self._cast('')except HTTPResponse:first = _e()except (KeyboardInterrupt, SystemExit, MemoryError):raiseexcept:if not self.catchall: raisefirst = HTTPError(, '', _e(), format_exc())if isinstance(first, HTTPResponse):return self._cast(first)elif isinstance(first, bytes):new_iter = itertools.chain([first], iout)elif isinstance(first, unicode):encoder = lambda x: x.encode(response.charset)new_iter = imap(encoder, itertools.chain([first], iout))else:msg = '' % type(first)return self._cast(HTTPError(, msg))if hasattr(out, ''):new_iter = _closeiter(new_iter, out.close)return new_iter", "docstring": "Try to convert the parameter into something WSGI compatible and set\n correct HTTP headers when possible.\n Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,\n iterable of strings and iterable of unicodes", "id": "f12971:c11:m25"} {"signature": "def wsgi(self, environ, start_response):", "body": "try:out = self._cast(self._handle(environ))if response._status_code in (, , , )or environ[''] == '':if hasattr(out, ''): out.close()out = []start_response(response._status_line, response.headerlist)return outexcept (KeyboardInterrupt, SystemExit, MemoryError):raiseexcept:if not self.catchall: raiseerr = ''% html_escape(environ.get('', ''))if DEBUG:err += ''''% (html_escape(repr(_e())), html_escape(format_exc()))environ[''].write(err)headers = [('', '')]start_response('', headers, sys.exc_info())return [tob(err)]", "docstring": "The bottle WSGI-interface.", "id": "f12971:c11:m26"} {"signature": "def __call__(self, environ, start_response):", "body": "return self.wsgi(environ, start_response)", "docstring": "Each instance of :class:'Bottle' is a WSGI application.", "id": "f12971:c11:m27"} {"signature": "def __enter__(self):", "body": "default_app.push(self)return self", "docstring": "Use this application as default for all module-level shortcuts.", "id": "f12971:c11:m28"} {"signature": "def __init__(self, environ=None):", "body": "self.environ = {} if environ is None else environself.environ[''] = self", "docstring": "Wrap a WSGI environ dictionary.", "id": "f12971:c12:m0"} {"signature": "@DictProperty('', '', read_only=True)def app(self):", "body": "raise RuntimeError('')", "docstring": "Bottle application handling this request.", "id": "f12971:c12:m1"} {"signature": "@DictProperty('', '', read_only=True)def route(self):", "body": "raise RuntimeError('')", "docstring": "The bottle :class:`Route` object that matches this request.", "id": "f12971:c12:m2"} {"signature": "@DictProperty('', '', read_only=True)def url_args(self):", "body": "raise RuntimeError('')", "docstring": "The arguments extracted from the URL.", "id": "f12971:c12:m3"} {"signature": "@propertydef path(self):", "body": "return '' + self.environ.get('', '').lstrip('')", "docstring": "The value of ``PATH_INFO`` with exactly one prefixed slash (to fix\n broken clients and avoid the \"empty path\" edge case).", "id": "f12971:c12:m4"} {"signature": "@propertydef method(self):", "body": "return self.environ.get('', '').upper()", "docstring": "The ``REQUEST_METHOD`` value as an uppercase string.", "id": "f12971:c12:m5"} {"signature": "@DictProperty('', '', read_only=True)def headers(self):", "body": "return WSGIHeaderDict(self.environ)", "docstring": "A :class:`WSGIHeaderDict` that provides case-insensitive access to\n HTTP request headers.", "id": "f12971:c12:m6"} {"signature": "def get_header(self, name, default=None):", "body": "return self.headers.get(name, default)", "docstring": "Return the value of a request header, or a given default value.", "id": "f12971:c12:m7"} {"signature": "@DictProperty('', '', read_only=True)def cookies(self):", "body": "cookies = SimpleCookie(self.environ.get('', '')).values()return FormsDict((c.key, c.value) for c in cookies)", "docstring": "Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT\n decoded. Use :meth:`get_cookie` if you expect signed cookies.", "id": "f12971:c12:m8"} {"signature": "def get_cookie(self, key, default=None, secret=None):", "body": "value = self.cookies.get(key)if secret and value:dec = cookie_decode(value, secret) return dec[] if dec and dec[] == key else defaultreturn value or default", "docstring": "Return the content of a cookie. To read a `Signed Cookie`, the\n `secret` must match the one used to create the cookie (see\n :meth:`BaseResponse.set_cookie`). If anything goes wrong (missing\n cookie or wrong signature), return a default value.", "id": "f12971:c12:m9"} {"signature": "@DictProperty('', '', read_only=True)def query(self):", "body": "get = self.environ[''] = FormsDict()pairs = _parse_qsl(self.environ.get('', ''))for key, value in pairs:get[key] = valuereturn get", "docstring": "The :attr:`query_string` parsed into a :class:`FormsDict`. These\n values are sometimes called \"URL arguments\" or \"GET parameters\", but\n not to be confused with \"URL wildcards\" as they are provided by the\n :class:`Router`.", "id": "f12971:c12:m10"} {"signature": "@DictProperty('', '', read_only=True)def forms(self):", "body": "forms = FormsDict()for name, item in self.POST.allitems():if not isinstance(item, FileUpload):forms[name] = itemreturn forms", "docstring": "Form values parsed from an `url-encoded` or `multipart/form-data`\n encoded POST or PUT request body. The result is returned as a\n :class:`FormsDict`. All keys and values are strings. File uploads\n are stored separately in :attr:`files`.", "id": "f12971:c12:m11"} {"signature": "@DictProperty('', '', read_only=True)def params(self):", "body": "params = FormsDict()for key, value in self.query.allitems():params[key] = valuefor key, value in self.forms.allitems():params[key] = valuereturn params", "docstring": "A :class:`FormsDict` with the combined values of :attr:`query` and\n :attr:`forms`. File uploads are stored in :attr:`files`.", "id": "f12971:c12:m12"} {"signature": "@DictProperty('', '', read_only=True)def files(self):", "body": "files = FormsDict()for name, item in self.POST.allitems():if isinstance(item, FileUpload):files[name] = itemreturn files", "docstring": "File uploads parsed from `multipart/form-data` encoded POST or PUT\n request body. The values are instances of :class:`FileUpload`.", "id": "f12971:c12:m13"} {"signature": "@DictProperty('', '', read_only=True)def json(self):", "body": "ctype = self.environ.get('', '').lower().split('')[]if ctype == '':b = self._get_body_string()if not b:return Nonereturn json_loads(b)return None", "docstring": "If the ``Content-Type`` header is ``application/json``, this\n property holds the parsed content of the request body. Only requests\n smaller than :attr:`MEMFILE_MAX` are processed to avoid memory\n exhaustion.", "id": "f12971:c12:m14"} {"signature": "def _get_body_string(self):", "body": "clen = self.content_lengthif clen > self.MEMFILE_MAX:raise HTTPError(, '')if clen < : clen = self.MEMFILE_MAX + data = self.body.read(clen)if len(data) > self.MEMFILE_MAX: raise HTTPError(, '')return data", "docstring": "read body until content-length or MEMFILE_MAX into a string. Raise\n HTTPError(413) on requests that are to large.", "id": "f12971:c12:m18"} {"signature": "@propertydef body(self):", "body": "self._body.seek()return self._body", "docstring": "The HTTP request body as a seek-able file-like object. Depending on\n :attr:`MEMFILE_MAX`, this is either a temporary file or a\n :class:`io.BytesIO` instance. Accessing this property for the first\n time reads and replaces the ``wsgi.input`` environ variable.\n Subsequent accesses just do a `seek(0)` on the file object.", "id": "f12971:c12:m19"} {"signature": "@propertydef chunked(self):", "body": "return '' in self.environ.get('', '').lower()", "docstring": "True if Chunked transfer encoding was.", "id": "f12971:c12:m20"} {"signature": "@DictProperty('', '', read_only=True)def POST(self):", "body": "post = FormsDict()if not self.content_type.startswith(''):pairs = _parse_qsl(tonat(self._get_body_string(), ''))for key, value in pairs:post[key] = valuereturn postsafe_env = {'': ''} for key in ('', '', ''):if key in self.environ: safe_env[key] = self.environ[key]args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)if py31:args[''] = NCTextIOWrapper(args[''],encoding='',newline='')elif py3k:args[''] = ''data = cgi.FieldStorage(**args)self[''] = data data = data.list or []for item in data:if item.filename:post[item.name] = FileUpload(item.file, item.name,item.filename, item.headers)else:post[item.name] = item.valuereturn post", "docstring": "The values of :attr:`forms` and :attr:`files` combined into a single\n :class:`FormsDict`. Values are either strings (form values) or\n instances of :class:`cgi.FieldStorage` (file uploads).", "id": "f12971:c12:m21"} {"signature": "@propertydef url(self):", "body": "return self.urlparts.geturl()", "docstring": "The full request URI including hostname and scheme. If your app\n lives behind a reverse proxy or load balancer and you get confusing\n results, make sure that the ``X-Forwarded-Host`` header is set\n correctly.", "id": "f12971:c12:m22"} {"signature": "@DictProperty('', '', read_only=True)def urlparts(self):", "body": "env = self.environhttp = env.get('')or env.get('', '')host = env.get('') or env.get('')if not host:host = env.get('', '')port = env.get('')if port and port != ('' if http == '' else ''):host += '' + portpath = urlquote(self.fullpath)return UrlSplitResult(http, host, path, env.get(''), '')", "docstring": "The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.\n The tuple contains (scheme, host, path, query_string and fragment),\n but the fragment is always empty because it is not visible to the\n server.", "id": "f12971:c12:m23"} {"signature": "@propertydef fullpath(self):", "body": "return urljoin(self.script_name, self.path.lstrip(''))", "docstring": "Request path including :attr:`script_name` (if present).", "id": "f12971:c12:m24"} {"signature": "@propertydef query_string(self):", "body": "return self.environ.get('', '')", "docstring": "The raw :attr:`query` part of the URL (everything in between ``?``\n and ``#``) as a string.", "id": "f12971:c12:m25"} {"signature": "@propertydef script_name(self):", "body": "script_name = self.environ.get('', '').strip('')return '' + script_name + '' if script_name else ''", "docstring": "The initial portion of the URL's `path` that was removed by a higher\n level (server or routing middleware) before the application was\n called. This script path is returned with leading and tailing\n slashes.", "id": "f12971:c12:m26"} {"signature": "def path_shift(self, shift=):", "body": "script, path = path_shift(self.environ.get('', ''), self.path, shift)self[''], self[''] = script, path", "docstring": "Shift path segments from :attr:`path` to :attr:`script_name` and\n vice versa.\n\n :param shift: The number of path segments to shift. May be negative\n to change the shift direction. (default: 1)", "id": "f12971:c12:m27"} {"signature": "@propertydef content_length(self):", "body": "return int(self.environ.get('') or -)", "docstring": "The request body length as an integer. The client is responsible to\n set this header. Otherwise, the real length of the body is unknown\n and -1 is returned. In this case, :attr:`body` will be empty.", "id": "f12971:c12:m28"} {"signature": "@propertydef content_type(self):", "body": "return self.environ.get('', '').lower()", "docstring": "The Content-Type header as a lowercase-string (default: empty).", "id": "f12971:c12:m29"} {"signature": "@propertydef is_xhr(self):", "body": "requested_with = self.environ.get('', '')return requested_with.lower() == ''", "docstring": "True if the request was triggered by a XMLHttpRequest. This only\n works with JavaScript libraries that support the `X-Requested-With`\n header (most of the popular libraries do).", "id": "f12971:c12:m30"} {"signature": "@propertydef is_ajax(self):", "body": "return self.is_xhr", "docstring": "Alias for :attr:`is_xhr`. \"Ajax\" is not the right term.", "id": "f12971:c12:m31"} {"signature": "@propertydef auth(self):", "body": "basic = parse_auth(self.environ.get('', ''))if basic: return basicruser = self.environ.get('')if ruser: return (ruser, None)return None", "docstring": "HTTP authentication data as a (user, password) tuple. This\n implementation currently supports basic (not digest) authentication\n only. If the authentication happened at a higher level (e.g. in the\n front web-server or a middleware), the password field is None, but\n the user field is looked up from the ``REMOTE_USER`` environ\n variable. On any errors, None is returned.", "id": "f12971:c12:m32"} {"signature": "@propertydef remote_route(self):", "body": "proxy = self.environ.get('')if proxy: return [ip.strip() for ip in proxy.split('')]remote = self.environ.get('')return [remote] if remote else []", "docstring": "A list of all IPs that were involved in this request, starting with\n the client IP and followed by zero or more proxies. This does only\n work if all proxies support the ```X-Forwarded-For`` header. Note\n that this information can be forged by malicious clients.", "id": "f12971:c12:m33"} {"signature": "@propertydef remote_addr(self):", "body": "route = self.remote_routereturn route[] if route else None", "docstring": "The client IP as a string. Note that this information can be forged\n by malicious clients.", "id": "f12971:c12:m34"} {"signature": "def copy(self):", "body": "return Request(self.environ.copy())", "docstring": "Return a new :class:`Request` with a shallow :attr:`environ` copy.", "id": "f12971:c12:m35"} {"signature": "def __setitem__(self, key, value):", "body": "if self.environ.get(''):raise KeyError('')self.environ[key] = valuetodelete = ()if key == '':todelete = ('', '', '', '', '', '')elif key == '':todelete = ('', '')elif key.startswith(''):todelete = ('', '')for key in todelete:self.environ.pop('' + key, None)", "docstring": "Change an environ value and clear all caches that depend on it.", "id": "f12971:c12:m42"} {"signature": "def __getattr__(self, name):", "body": "try:var = self.environ['' % name]return var.__get__(self) if hasattr(var, '') else varexcept KeyError:raise AttributeError('' % name)", "docstring": "Search in self.environ for additional user defined attributes.", "id": "f12971:c12:m44"} {"signature": "def copy(self, cls=None):", "body": "cls = cls or BaseResponseassert issubclass(cls, BaseResponse)copy = cls()copy.status = self.statuscopy._headers = dict((k, v[:]) for (k, v) in self._headers.items())if self._cookies:copy._cookies = SimpleCookie()copy._cookies.load(self._cookies.output(header=''))return copy", "docstring": "Returns a copy of self.", "id": "f12971:c14:m1"} {"signature": "@propertydef status_line(self):", "body": "return self._status_line", "docstring": "The HTTP status line as a string (e.g. ``404 Not Found``).", "id": "f12971:c14:m4"} {"signature": "@propertydef status_code(self):", "body": "return self._status_code", "docstring": "The HTTP status code as an integer (e.g. 404).", "id": "f12971:c14:m5"} {"signature": "@propertydef headers(self):", "body": "hdict = HeaderDict()hdict.dict = self._headersreturn hdict", "docstring": "An instance of :class:`HeaderDict`, a case-insensitive dict-like\n view on the response headers.", "id": "f12971:c14:m8"} {"signature": "def get_header(self, name, default=None):", "body": "return self._headers.get(_hkey(name), [default])[-]", "docstring": "Return the value of a previously defined header. If there is no\n header with that name, return a default value.", "id": "f12971:c14:m13"} {"signature": "def set_header(self, name, value):", "body": "self._headers[_hkey(name)] = [value if isinstance(value, unicode)else str(value)]", "docstring": "Create a new response header, replacing any previously defined\n headers with the same name.", "id": "f12971:c14:m14"} {"signature": "def add_header(self, name, value):", "body": "self._headers.setdefault(_hkey(name), []).append(value if isinstance(value, unicode) else str(value))", "docstring": "Add an additional response header, not removing duplicates.", "id": "f12971:c14:m15"} {"signature": "def iter_headers(self):", "body": "return self.headerlist", "docstring": "Yield (header, value) tuples, skipping headers that are not\n allowed with the current response status code.", "id": "f12971:c14:m16"} {"signature": "@propertydef headerlist(self):", "body": "out = []headers = list(self._headers.items())if '' not in self._headers:headers.append(('', [self.default_content_type]))if self._status_code in self.bad_headers:bad_headers = self.bad_headers[self._status_code]headers = [h for h in headers if h[] not in bad_headers]out += [(name, val) for (name, vals) in headers for val in vals]if self._cookies:for c in self._cookies.values():out.append(('', c.OutputString()))if py3k:return [(k, v.encode('').decode('')) for (k, v) in out]else:return [(k, v.encode('') if isinstance(v, unicode) else v)for (k, v) in out]", "docstring": "WSGI conform list of (header, value) tuples.", "id": "f12971:c14:m17"} {"signature": "@propertydef charset(self, default=''):", "body": "if '' in self.content_type:return self.content_type.split('')[-].split('')[].strip()return default", "docstring": "Return the charset specified in the content-type header (default: utf8).", "id": "f12971:c14:m18"} {"signature": "def set_cookie(self, name, value, secret=None, **options):", "body": "if not self._cookies:self._cookies = SimpleCookie()if secret:value = touni(cookie_encode((name, value), secret))elif not isinstance(value, basestring):raise TypeError('')if len(value) > : raise ValueError('')self._cookies[name] = valuefor key, value in options.items():if key == '':if isinstance(value, timedelta):value = value.seconds + value.days * * if key == '':if isinstance(value, (datedate, datetime)):value = value.timetuple()elif isinstance(value, (int, float)):value = time.gmtime(value)value = time.strftime(\"\", value)self._cookies[name][key.replace('', '')] = value", "docstring": "Create a new cookie or replace an old one. If the `secret` parameter is\n set, create a `Signed Cookie` (described below).\n\n :param name: the name of the cookie.\n :param value: the value of the cookie.\n :param secret: a signature key required for signed cookies.\n\n Additionally, this method accepts all RFC 2109 attributes that are\n supported by :class:`cookie.Morsel`, including:\n\n :param max_age: maximum age in seconds. (default: None)\n :param expires: a datetime object or UNIX timestamp. (default: None)\n :param domain: the domain that is allowed to read the cookie.\n (default: current domain)\n :param path: limits the cookie to a given path (default: current path)\n :param secure: limit the cookie to HTTPS connections (default: off).\n :param httponly: prevents client-side javascript to read this cookie\n (default: off, requires Python 2.6 or newer).\n\n If neither `expires` nor `max_age` is set (default), the cookie will\n expire at the end of the browser session (as soon as the browser\n window is closed).\n\n Signed cookies may store any pickle-able object and are\n cryptographically signed to prevent manipulation. Keep in mind that\n cookies are limited to 4kb in most browsers.\n\n Warning: Signed cookies are not encrypted (the client can still see\n the content) and not copy-protected (the client can restore an old\n cookie). The main intention is to make pickling and unpickling\n save, not to store secret information at client side.", "id": "f12971:c14:m19"} {"signature": "def delete_cookie(self, key, **kwargs):", "body": "kwargs[''] = -kwargs[''] = self.set_cookie(key, '', **kwargs)", "docstring": "Delete a cookie. Be sure to use the same `domain` and `path`\n settings as used to create the cookie.", "id": "f12971:c14:m20"} {"signature": "def __init__(self, name, impmask):", "body": "self.name = nameself.impmask = impmaskself.module = sys.modules.setdefault(name, imp.new_module(name))self.module.__dict__.update({'': __file__,'': [],'': [],'': self})sys.meta_path.append(self)", "docstring": "Create a virtual package that redirects imports (see PEP 302).", "id": "f12971:c22:m0"} {"signature": "def get(self, key, default=None, index=-, type=None):", "body": "try:val = self.dict[key][index]return type(val) if type else valexcept Exception:passreturn default", "docstring": "Return the most recent value for a key.\n\n :param default: The default value to be returned if the key is not\n present or the type conversion fails.\n :param index: An index for the list of available values.\n :param type: If defined, this callable is used to cast the value\n into a specific type. Exception are suppressed and result in\n the default value to be returned.", "id": "f12971:c23:m8"} {"signature": "def append(self, key, value):", "body": "self.dict.setdefault(key, []).append(value)", "docstring": "Add a new value to the list of values for this key.", "id": "f12971:c23:m9"} {"signature": "def replace(self, key, value):", "body": "self.dict[key] = [value]", "docstring": "Replace the list of values with a single value.", "id": "f12971:c23:m10"} {"signature": "def getall(self, key):", "body": "return self.dict.get(key) or []", "docstring": "Return a (possibly empty) list of values for a key.", "id": "f12971:c23:m11"} {"signature": "def decode(self, encoding=None):", "body": "copy = FormsDict()enc = copy.input_encoding = encoding or self.input_encodingcopy.recode_unicode = Falsefor key, value in self.allitems():copy.append(self._fix(key, enc), self._fix(value, enc))return copy", "docstring": "Returns a copy with all keys and values de- or recoded to match\n :attr:`input_encoding`. Some libraries (e.g. WTForms) want a\n unicode dictionary.", "id": "f12971:c24:m1"} {"signature": "def getunicode(self, name, default=None, encoding=None):", "body": "try:return self._fix(self[name], encoding)except (UnicodeError, KeyError):return default", "docstring": "Return the value as a unicode string, or the default.", "id": "f12971:c24:m2"} {"signature": "def _ekey(self, key):", "body": "key = key.replace('', '').upper()if key in self.cgikeys:return keyreturn '' + key", "docstring": "Translate header field name to CGI/WSGI environ key.", "id": "f12971:c26:m1"} {"signature": "def raw(self, key, default=None):", "body": "return self.environ.get(self._ekey(key), default)", "docstring": "Return the header value as is (may be bytes or unicode).", "id": "f12971:c26:m2"} {"signature": "def load_config(self, filename):", "body": "conf = ConfigParser()conf.read(filename)for section in conf.sections():for key, value in conf.items(section):if section not in ('', ''):key = section + '' + keyself[key] = valuereturn self", "docstring": "Load values from an ``*.ini`` style config file.\n\n If the config file contains sections, their names are used as\n namespaces for the values within. The two special sections\n ``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).", "id": "f12971:c27:m1"} {"signature": "def load_dict(self, source, namespace=''):", "body": "for key, value in source.items():if isinstance(key, str):nskey = (namespace + '' + key).strip('')if isinstance(value, dict):self.load_dict(value, namespace=nskey)else:self[nskey] = valueelse:raise TypeError('' % type(key))return self", "docstring": "Load values from a dictionary structure. Nesting can be used to\n represent namespaces.\n\n >>> c = ConfigDict()\n >>> c.load_dict({'some': {'namespace': {'key': 'value'} } })\n {'some.namespace.key': 'value'}", "id": "f12971:c27:m2"} {"signature": "def update(self, *a, **ka):", "body": "prefix = ''if a and isinstance(a[], str):prefix = a[].strip('') + ''a = a[:]for key, value in dict(*a, **ka).items():self[prefix + key] = value", "docstring": "If the first parameter is a string, all keys are prefixed with this\n namespace. Apart from that it works just as the usual dict.update().\n Example: ``update('some.namespace', key='value')``", "id": "f12971:c27:m3"} {"signature": "def meta_get(self, key, metafield, default=None):", "body": "return self._meta.get(key, {}).get(metafield, default)", "docstring": "Return the value of a meta field for a key.", "id": "f12971:c27:m7"} {"signature": "def meta_set(self, key, metafield, value):", "body": "self._meta.setdefault(key, {})[metafield] = valueif key in self:self[key] = self[key]", "docstring": "Set the meta field for a key to a new value. This triggers the\n on-change handler for existing keys.", "id": "f12971:c27:m8"} {"signature": "def meta_list(self, key):", "body": "return self._meta.get(key, {}).keys()", "docstring": "Return an iterable of meta field names defined for a key.", "id": "f12971:c27:m9"} {"signature": "def __call__(self):", "body": "return self[-]", "docstring": "Return the current default application.", "id": "f12971:c28:m0"} {"signature": "def push(self, value=None):", "body": "if not isinstance(value, Bottle):value = Bottle()self.append(value)return value", "docstring": "Add a new :class:`Bottle` instance to the stack", "id": "f12971:c28:m1"} {"signature": "def add_path(self, path, base=None, index=None, create=False):", "body": "base = os.path.abspath(os.path.dirname(base or self.base))path = os.path.abspath(os.path.join(base, os.path.dirname(path)))path += os.sepif path in self.path:self.path.remove(path)if create and not os.path.isdir(path):os.makedirs(path)if index is None:self.path.append(path)else:self.path.insert(index, path)self.cache.clear()return os.path.exists(path)", "docstring": "Add a new path to the list of search paths. Return False if the\n path does not exist.\n\n :param path: The new search path. Relative paths are turned into\n an absolute and normalized form. If the path looks like a file\n (not ending in `/`), the filename is stripped off.\n :param base: Path used to absolutize relative search paths.\n Defaults to :attr:`base` which defaults to ``os.getcwd()``.\n :param index: Position within the list of search paths. Defaults\n to last index (appends to the list).\n\n The `base` parameter makes it easy to reference files installed\n along with a python module or package::\n\n res.add_path('./resources/', __file__)", "id": "f12971:c31:m1"} {"signature": "def __iter__(self):", "body": "search = self.path[:]while search:path = search.pop()if not os.path.isdir(path): continuefor name in os.listdir(path):full = os.path.join(path, name)if os.path.isdir(full): search.append(full)else: yield full", "docstring": "Iterate over all existing files in all registered paths.", "id": "f12971:c31:m2"} {"signature": "def lookup(self, name):", "body": "if name not in self.cache or DEBUG:for path in self.path:fpath = os.path.join(path, name)if os.path.isfile(fpath):if self.cachemode in ('', ''):self.cache[name] = fpathreturn fpathif self.cachemode == '':self.cache[name] = Nonereturn self.cache[name]", "docstring": "Search for a resource and return an absolute file path, or `None`.\n\n The :attr:`path` list is searched in order. The first match is\n returend. Symlinks are followed. The result is cached to speed up\n future lookups.", "id": "f12971:c31:m3"} {"signature": "def open(self, name, mode='', *args, **kwargs):", "body": "fname = self.lookup(name)if not fname: raise IOError(\"\" % name)return self.opener(fname, mode=mode, *args, **kwargs)", "docstring": "Find a resource and return a file object, or raise IOError.", "id": "f12971:c31:m4"} {"signature": "def __init__(self, fileobj, name, filename, headers=None):", "body": "self.file = fileobjself.name = nameself.raw_filename = filenameself.headers = HeaderDict(headers) if headers else HeaderDict()", "docstring": "Wrapper for file uploads.", "id": "f12971:c32:m0"} {"signature": "@cached_propertydef filename(self):", "body": "fname = self.raw_filenameif not isinstance(fname, unicode):fname = fname.decode('', '')fname = normalize('', fname)fname = fname.encode('', '').decode('')fname = os.path.basename(fname.replace('', os.path.sep))fname = re.sub(r'', '', fname).strip()fname = re.sub(r'', '', fname).strip('')return fname[:] or ''", "docstring": "Name of the file on the client file system, but normalized to ensure\n file system compatibility. An empty filename is returned as 'empty'.\n\n Only ASCII letters, digits, dashes, underscores and dots are\n allowed in the final filename. Accents are removed, if possible.\n Whitespace is replaced by a single dash. Leading or tailing dots\n or dashes are removed. The filename is limited to 255 characters.", "id": "f12971:c32:m1"} {"signature": "def save(self, destination, overwrite=False, chunk_size= ** ):", "body": "if isinstance(destination, basestring): if os.path.isdir(destination):destination = os.path.join(destination, self.filename)if not overwrite and os.path.exists(destination):raise IOError('')with open(destination, '') as fp:self._copy_file(fp, chunk_size)else:self._copy_file(destination, chunk_size)", "docstring": "Save file to disk or copy its content to an open file(-like) object.\n If *destination* is a directory, :attr:`filename` is added to the\n path. Existing files are not overwritten by default (IOError).\n\n :param destination: File path, directory or file(-like) object.\n :param overwrite: If True, replace existing files. (default: False)\n :param chunk_size: Bytes to read at a time. (default: 64kb)", "id": "f12971:c32:m3"} {"signature": "def __init__(self,source=None,name=None,lookup=None,encoding='', **settings):", "body": "self.name = nameself.source = source.read() if hasattr(source, '') else sourceself.filename = source.filename if hasattr(source, '') else Noneself.lookup = [os.path.abspath(x) for x in lookup] if lookup else []self.encoding = encodingself.settings = self.settings.copy() self.settings.update(settings) if not self.source and self.name:self.filename = self.search(self.name, self.lookup)if not self.filename:raise TemplateError('' % repr(name))if not self.source and not self.filename:raise TemplateError('')self.prepare(**self.settings)", "docstring": "Create a new template.\n If the source parameter (str or buffer) is missing, the name argument\n is used to guess a template filename. Subclasses can assume that\n self.source and/or self.filename are set. Both are strings.\n The lookup, encoding and settings parameters are stored as instance\n variables.\n The lookup parameter stores a list containing directory paths.\n The encoding parameter should be used to decode byte strings or files.\n The settings parameter contains a dict for engine-specific settings.", "id": "f12971:c56:m0"} {"signature": "@classmethoddef search(cls, name, lookup=None):", "body": "if not lookup:depr('',True) lookup = ['']if os.path.isabs(name) and os.path.isfile(name):depr('', True) return os.path.abspath(name)for spath in lookup:spath = os.path.abspath(spath) + os.sepfname = os.path.abspath(os.path.join(spath, name))if not fname.startswith(spath): continueif os.path.isfile(fname): return fnamefor ext in cls.extensions:if os.path.isfile('' % (fname, ext)):return '' % (fname, ext)", "docstring": "Search name in all directories specified in lookup.\n First without, then with common extensions. Return first hit.", "id": "f12971:c56:m1"} {"signature": "@classmethoddef global_config(cls, key, *args):", "body": "if args:cls.settings = cls.settings.copy() cls.settings[key] = args[]else:return cls.settings[key]", "docstring": "This reads or sets the global settings stored in class.settings.", "id": "f12971:c56:m2"} {"signature": "def prepare(self, **options):", "body": "raise NotImplementedError", "docstring": "Run preparations (parsing, caching, ...).\n It should be possible to call this again to refresh a template or to\n update settings.", "id": "f12971:c56:m3"} {"signature": "def render(self, *args, **kwargs):", "body": "raise NotImplementedError", "docstring": "Render the template with the specified local variables and return\n a single byte or unicode string. If it is a byte string, the encoding\n must match self.encoding. This method must be thread-safe!\n Local variables may be provided in dictionaries (args)\n or directly, as keywords (kwargs).", "id": "f12971:c56:m4"} {"signature": "def render(self, *args, **kwargs):", "body": "env = {}stdout = []for dictarg in args:env.update(dictarg)env.update(kwargs)self.execute(stdout, env)return ''.join(stdout)", "docstring": "Render the template using keyword arguments as local variables.", "id": "f12971:c60:m6"} {"signature": "def get_syntax(self):", "body": "return self._syntax", "docstring": "Tokens as a space separated string (default: <% %> % {{ }})", "id": "f12971:c62:m1"} {"signature": "def pathjoin(*args, **kwargs):", "body": "log.debug('' % list(args))def _pathjoin(*args, **kwargs):len_ = len(args) - if len_ < :raise Exception('')elif len_ == :if not isinstance(args, basestring):if hasattr(args, ''):_args = args_argsargs = args[]for i, arg in enumerate(args):if not i:yield arg.rstrip('')elif i == len_:yield arg.lstrip('')else:yield arg.strip('')joined_path = u''.join(_pathjoin(*args))return sanitize_path(joined_path)", "docstring": "Arguments:\n args (list): *args list of paths\n if len(args) == 1, args[0] is not a string, and args[0] is iterable,\n set args to args[0].\n\nBasically::\n\n joined_path = u'/'.join(\n [args[0].rstrip('/')] +\n [a.strip('/') for a in args[1:-1]] +\n [args[-1].lstrip('/')])", "id": "f12972:m0"} {"signature": "def rewrite_path(FS, _path):", "body": "path = sanitize_path(_path)log.debug('' % path)if FS.exists(path):if FS.isdir(path):dir_index_html_path = pathjoin(path, '')if (FS.exists(dir_index_html_path)and FS.isfile(dir_index_html_path)):path = dir_index_html_pathelse:if not (path.endswith('') or path.endswith('')):path_dot_html = path + \"\"if FS.exists(path_dot_html) and FS.isfile(path_dot_html):path = path_dot_htmlreturn path", "docstring": "Args:\n _path (str): path to rewrite (in search of index.html)\n root_path (str): filesystem root_path", "id": "f12972:m6"} {"signature": "def generate_dirlist_html(FS, filepath):", "body": "yield ''if filepath == '':filepath = ''for name in FS.listdir(filepath):full_path = pathjoin(filepath, name)if FS.isdir(full_path):full_path = full_path + ''yield u''.format(cgi.escape(full_path)) yield ''", "docstring": "Generate directory listing HTML\n\nArguments:\n FS (FS): filesystem object to read files from\n filepath (str): path to generate directory listings for\n\nKeyword Arguments:\n list_dir (callable: list[str]): list file names in a directory\n isdir (callable: bool): os.path.isdir\n\nYields:\n str: lines of an HTML table", "id": "f12972:m7"} {"signature": "def git_static_file(filename,mimetype='',download=False,charset=''):", "body": "filename = filename.strip('')headers = dict()FS = request.app.config['']if not FS.exists(filename):return HTTPError(, \"\")if mimetype == '':if download and download is not True:mimetype, encoding = mimetypes.guess_type(download)else:mimetype, encoding = mimetypes.guess_type(filename)if encoding:headers[''] = encodingif mimetype:if mimetype[:] == '' and charset and '' not in mimetype:mimetype += '' % charsetheaders[''] = mimetypeif download:download = os.path.basename(filename if download else download)headers[''] = '' % downloadinfo = FS.getinfo(filename)headers[''] = clen = info['']lm = time.strftime(\"\",time.gmtime(info['']))headers[''] = lmims = request.environ.get('')if ims:ims = parse_date(ims.split(\"\")[].strip())mtime = info['']if mtime and ims is not None and ims >= int(mtime):headers[''] = time.strftime(\"\",time.gmtime())return HTTPResponse(status=, **headers)body = '' if request.method == '' else FS.get_fileobj(filename)clenreturn HTTPResponse(body, **headers)", "docstring": "This method is derived from bottle.static_file:\n\n Open [a file] and return :exc:`HTTPResponse` with status\n code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,\n ``Content-Length`` and ``Last-Modified`` headers are set if possible.\n Special support for ``If-Modified-Since`` [...].\n\n :param filename: Name or path of the file to send.\n :param mimetype: Defines the content-type header (default: guess from\n file extension)\n :param download: If True, ask the browser to open a `Save as...` dialog\n instead of opening the file with the associated program. You can\n specify a custom filename as a string. If not specified, the\n original filename is used (default: False).\n :param charset: The charset to use for files with a ``text/*``\n mime-type. (default: UTF-8)", "id": "f12972:m11"} {"signature": "def exists(self, path):", "body": "", "docstring": "TODO", "id": "f12972:c2:m1"} {"signature": "def isdir(self, path):", "body": "", "docstring": "TODO", "id": "f12972:c2:m2"} {"signature": "def isfile(self, path):", "body": "", "docstring": "TODO", "id": "f12972:c2:m3"} {"signature": "def on_load(self):", "body": "return True", "docstring": "Executes when a plugin is loaded.\n\nOverride this if your plugin needs to do initialization when loading.\nDo not use this to restore runtime changes to variables -- they will be overwritten later on by\nPluginManager.load_state()", "id": "f12990:c0:m1"} {"signature": "def on_unload(self):", "body": "return True", "docstring": "(Not Implemented Yet) Executes when a plugin is unloaded.\n\nOverride this if your plugin needs to do cleanup when unloading.", "id": "f12990:c0:m2"} {"signature": "def on_connect(self):", "body": "return True", "docstring": "Executes immediately after connecting to slack.\n\nWill not fire on reconnects.", "id": "f12990:c0:m3"} {"signature": "def send_message(self, channel, text):", "body": "if isinstance(channel, SlackIM) or isinstance(channel, SlackUser):self._bot.send_im(channel, text)elif isinstance(channel, SlackRoom):self._bot.send_message(channel, text)elif isinstance(channel, basestring):if channel[] == '':self._bot.send_im(channel[:], text)elif channel[] == '':self._bot.send_message(channel[:], text)else:self._bot.send_message(channel, text)else:self._bot.send_message(channel, text)", "docstring": "Used to send a message to the specified channel.\n\n* channel - can be a channel or user\n* text - message to send", "id": "f12990:c0:m4"} {"signature": "def start_timer(self, duration, func, *args):", "body": "t = threading.Timer(duration, self._timer_callback, (func, args))self._timer_callbacks[func] = tt.start()self.log.info(\"\", func.__name__, duration)", "docstring": "Schedules a function to be called after some period of time.\n\n* duration - time in seconds to wait before firing\n* func - function to be called\n* args - arguments to pass to the function", "id": "f12990:c0:m5"} {"signature": "def stop_timer(self, func):", "body": "if func in self._timer_callbacks:t = self._timer_callbacks[func]t.cancel()del self._timer_callbacks[func]", "docstring": "Stops a timer if it hasn't fired yet\n\n* func - the function passed in start_timer", "id": "f12990:c0:m6"} {"signature": "def get_user(self, username):", "body": "if hasattr(self._bot, ''):user = self._bot.user_manager.get_by_username(username)if user:return useruser = SlackUser.get_user(self._bot.sc, username)self._bot.user_manager.set(user)return userreturn SlackUser.get_user(self._bot.sc, username)", "docstring": "Utility function to query slack for a particular user\n\n:param username: The username of the user to lookup\n:return: SlackUser object or None", "id": "f12990:c0:m8"} {"signature": "def get_channel(self, channel):", "body": "return SlackChannel.get_channel(self._bot.sc, channel)", "docstring": "Utility function to query slack for a particular channel\n\n:param channel: The channel name or id of the channel to lookup\n:return: SlackChannel object or None", "id": "f12990:c0:m9"} {"signature": "def cmd(admin_only=False, acl='', aliases=None, while_ignored=False, *args, **kwargs):", "body": "def wrapper(func):func.is_cmd = Truefunc.is_subcmd = len(func.__name__.split('')) > func.cmd_name = func.__name__.replace('', '')func.admin_only = admin_onlyfunc.acl = aclfunc.aliases = aliasesfunc.while_ignored = while_ignoredreturn funcreturn wrapper", "docstring": "Decorator to mark plugin functions as commands in the form of !\n\n* admin_only - indicates only users in bot_admin are allowed to execute (only used if AuthManager is loaded)\n* acl - indicates which ACL to perform permission checks against (only used if AuthManager is loaded)\n* aliases - register function with additional commands (i.e. !alias1, !alias2, etc)\n* while_ignored - allows a command to be run, even if channel has been !sleep", "id": "f12991:m0"} {"signature": "def webhook(*args, **kwargs):", "body": "def wrapper(func):func.is_webhook = Truefunc.route = args[]func.form_params = kwargs.get('', [])func.method = kwargs.get('', '')return funcreturn wrapper", "docstring": "Decorator to mark plugin functions as entry points for web calls\n\n* route - web route to register, uses Flask syntax\n* method - GET/POST, defaults to POST", "id": "f12991:m1"} {"signature": "def eventhandler(*args, **kwargs):", "body": "def wrapper(func):if isinstance(kwargs[''], basestring):kwargs[''] = [kwargs['']]func.is_eventhandler = Truefunc.events = kwargs['']return funcreturn wrapper", "docstring": "Decorator. Marks a function as a receiver for the specified slack event(s).\n\n* events - String or list of events to handle", "id": "f12992:m0"} {"signature": "def start(self):", "body": "self.bot_start_time = datetime.now()self.webserver = Webserver(self.config[''][''], self.config[''][''])self.plugins.load()self.plugins.load_state()self._find_event_handlers()self.sc = ThreadedSlackClient(self.config[''])self.always_send_dm = ['']if '' in self.config:self.always_send_dm.extend(map(lambda x: '' + x, self.config['']))logging.getLogger('').setLevel(logging.INFO)self.is_setup = Trueif self.test_mode:self.metrics[''] = (datetime.now() - self.bot_start_time).total_seconds() * ", "docstring": "Initializes the bot, plugins, and everything.", "id": "f12992:c0:m1"} {"signature": "def run(self, start=True):", "body": "if not self.is_setup:raise NotSetupErrorself.webserver.start()first_connect = Truetry:while self.runnable:if self.reconnect_needed:if not self.sc.rtm_connect(with_team_state=start):return Falseself.reconnect_needed = Falseif first_connect:first_connect = Falseself.plugins.connect()try:events = self.sc.rtm_read()except AttributeError:self.log.exception('')self.runnable = Falseevents = []except:self.log.exception('')self.reconnect_needed = Trueevents = []for e in events:try:self._handle_event(e)except KeyboardInterrupt:self.runnable = Falseexcept:self.log.exception('')sleep()except KeyboardInterrupt:passexcept:self.log.exception('')", "docstring": "Connects to slack and enters the main loop.\n\n* start - If True, rtm.start API is used. Else rtm.connect API is used\n\nFor more info, refer to\nhttps://python-slackclient.readthedocs.io/en/latest/real_time_messaging.html#rtm-start-vs-rtm-connect", "id": "f12992:c0:m3"} {"signature": "def stop(self):", "body": "if self.webserver is not None:self.webserver.stop()if not self.test_mode:self.plugins.save_state()", "docstring": "Does cleanup of bot and plugins.", "id": "f12992:c0:m4"} {"signature": "def send_message(self, channel, text, thread=None, reply_broadcast=None):", "body": "if isinstance(channel, SlackRoomIMBase):channel = channel.idself.log.debug(\"\", channel, text)self.sc.rtm_send_message(channel, text, thread=thread, reply_broadcast=reply_broadcast)", "docstring": "Sends a message to the specified channel\n\n* channel - The channel to send to. This can be a SlackChannel object, a channel id, or a channel name\n(without the #)\n* text - String to send\n* thread - reply to the thread. See https://api.slack.com/docs/message-threading#threads_party\n* reply_broadcast - Set to true to indicate your reply is germane to all members of a channel", "id": "f12992:c0:m5"} {"signature": "def send_im(self, user, text):", "body": "if isinstance(user, SlackUser):user = user.idchannelid = self._find_im_channel(user)else:channelid = user.idself.send_message(channelid, text)", "docstring": "Sends a message to a user as an IM\n\n* user - The user to send to. This can be a SlackUser object, a user id, or the username (without the @)\n* text - String to send", "id": "f12992:c0:m6"} {"signature": "def push(self, message):", "body": "if self._ignore_event(message):return None, Noneargs = self._parse_message(message)self.log.debug(\"\", args)cmd, msg_args = self._find_longest_prefix_command(args)if cmd is not None:if message.user is None:self.log.debug(\"\", message)return None, Nonesender = message.user.usernameif message.channel is not None:sender = \"\" % (message.channel.name, sender)self.log.info(\"\", sender, cmd, msg_args)f = self._get_command(cmd, message.user)if f:if self._is_channel_ignored(f, message.channel):self.log.info(\"\", message.channel, cmd)return '', \"\"return cmd, f.execute(message, msg_args)return '', \"\" % cmdreturn None, None", "docstring": "Takes a SlackEvent, parses it for a command, and runs against registered plugin", "id": "f12993:c3:m1"} {"signature": "def _ignore_event(self, message):", "body": "if hasattr(message, '') and message.subtype in self.ignored_events:return Truereturn False", "docstring": "message_replied event is not truly a message event and does not have a message.text\ndon't process such events\n\ncommands may not be idempotent, so ignore message_changed events.", "id": "f12993:c3:m2"} {"signature": "def register_plugin(self, plugin):", "body": "self.log.info(\"\", type(plugin).__name__)self._register_commands(plugin)plugin.on_load()", "docstring": "Registers a plugin and commands with the dispatcher for push()", "id": "f12993:c3:m4"} {"signature": "@cmd()def echo(self, msg, args):", "body": "self.log.debug(\"\", args)return ''.join(args)", "docstring": "Simply repeats whatever is said.", "id": "f12997:c0:m0"} {"signature": "@cmd()def xyzzy(self, msg, args):", "body": "return \"\" % msg.user", "docstring": "Nothing happens.", "id": "f12997:c0:m1"} {"signature": "@cmd()def alert(self, msg, args):", "body": "self.send_message(self.config[''], '')return None", "docstring": "Alert everyone.", "id": "f12997:c0:m2"} {"signature": "@cmd()def shortsleep(self, msg, args):", "body": "self.start_timer(, self._sleep_func)", "docstring": "Sleep for a bit, then print a message.", "id": "f12997:c0:m4"} {"signature": "@cmd()def shortsleep2(self, msg, args):", "body": "self.start_timer(, self._sleep_func2, msg.channel, ''.join(args))", "docstring": "Sleep for a bit, then echo the message back", "id": "f12997:c0:m5"} {"signature": "@cmd(admin_only=True)def admincmd(self, msg, args):", "body": "return ''", "docstring": "A command only admins should be able to run.", "id": "f12997:c1:m0"} {"signature": "def get(self, userid):", "body": "if userid in self.users:return self.users[userid]return None", "docstring": "Retrieve user by id", "id": "f12999:c0:m1"} {"signature": "def get_by_username(self, username):", "body": "res = filter(lambda x: x.username == username, self.users.values())if len(res) > :return res[]return None", "docstring": "Retrieve user by username", "id": "f12999:c0:m2"} {"signature": "def set(self, user):", "body": "self.log.info(\"\", user.id, user.username)self.load_user_info(user)self.log.info(\"\", user.id, user.username)self.load_user_rights(user)self.log.info(\"\", user.id, user.username)self._add_user_to_cache(user)return user", "docstring": "Adds a user object to the user manager\n\nuser - a SlackUser object", "id": "f12999:c0:m3"} {"signature": "def load_user_info(self, user):", "body": "pass", "docstring": "Loads additional user information and stores in user object", "id": "f12999:c0:m5"} {"signature": "def load_user_rights(self, user):", "body": "if user.username in self.admins:user.is_admin = Trueelif not hasattr(user, ''):user.is_admin = False", "docstring": "Sets permissions on user object", "id": "f12999:c0:m6"} {"signature": "@cmd()def help(self, msg, args):", "body": "output = []if len(args) == :commands = sorted(self._bot.dispatcher.commands.items(), key=itemgetter())commands = filter(lambda x: x[].is_subcmd is False, commands)if self._should_filter_help_commands(msg.user):commands = filter(lambda x: x[].admin_only is False, commands)for name, cmd in commands:output.append(self._get_short_help_for_command(name))else:name = '' + args[]output = [self._get_help_for_command(name)]return ''.join(output)", "docstring": "Displays help for each command", "id": "f13000:c0:m0"} {"signature": "@cmd(admin_only=True)def save(self, msg, args):", "body": "self.send_message(msg.channel, \"\")self._bot.plugins.save_state()self.send_message(msg.channel, \"\")", "docstring": "Causes the bot to write its current state to backend.", "id": "f13000:c0:m4"} {"signature": "@cmd(admin_only=True)def shutdown(self, msg, args):", "body": "self.log.info(\"\", msg.user.username)self._bot.runnable = Falsereturn \"\"", "docstring": "Causes the bot to gracefully shutdown.", "id": "f13000:c0:m5"} {"signature": "@cmd()def whoami(self, msg, args):", "body": "output = [\"\" % msg.user]if hasattr(self._bot.dispatcher, '') and msg.user.is_admin is True:output.append(\"\")output.append(\"\" % (self._bot.version, self._bot.commit))return ''.join(output)", "docstring": "Prints information about the user and bot version.", "id": "f13000:c0:m6"} {"signature": "@cmd()@channel_wrapperdef sleep(self, channel):", "body": "self.log.info('', channel)self._bot.dispatcher.ignore(channel)self.send_message(channel, '')", "docstring": "Causes the bot to ignore all messages from the channel.\n\n Usage:\n !sleep [channel name] - ignore the specified channel (or current if none specified)", "id": "f13000:c0:m7"} {"signature": "@cmd(admin_only=True, while_ignored=True)@channel_wrapperdef wake(self, channel):", "body": "self.log.info('', channel)self._bot.dispatcher.unignore(channel)self.send_message(channel, '')", "docstring": "Causes the bot to resume operation in the channel.\n\n Usage:\n !wake [channel name] - unignore the specified channel (or current if none specified)", "id": "f13000:c0:m8"} {"signature": "@cmd(admin_only=True)def acl(self, msg, args):", "body": "if len(args) == :return \"\"valid_actions = ['', '', '', '', '', '']return \"\" % ''.join(valid_actions)", "docstring": "ACL Management.\n\n Usage:\n !acl _action_ [args]\n\n Actions:\n new _acl_ - Create a new ACL\n delete _acl_ - Delete an ACL\n\n allow _acl_ _user_ - Add user to the acl allow block\n deny _acl_ _user_ - Add user to the acl deny block\n remove _acl_ _user_ - Remove user from acl allow and deny blocks\n\n show - Show all defined ACLs\n show _acl_ - Show allow and deny blocks of specified ACL", "id": "f13001:c0:m1"} {"signature": "@cmd(admin_only=True)def acl_show(self, msg, args):", "body": "name = args[] if len(args) > else Noneif name is None:return \"\" % (msg.user, ''.join(self._acl.keys()))if name not in self._acl:return \"\" % namereturn ''.join([\"\" % (msg.user, name),\"\" % ''.join(self._acl[name]['']),\"\" % ''.join(self._acl[name][''])])", "docstring": "Show current allow and deny blocks for the given acl.", "id": "f13001:c0:m7"} {"signature": "def add_user_to_allow(self, name, user):", "body": "if not self.remove_user_from_acl(name, user):return Falseif name not in self._acl:return Falseself._acl[name][''].append(user)return True", "docstring": "Add a user to the given acl allow block.", "id": "f13001:c0:m8"} {"signature": "def add_user_to_deny(self, name, user):", "body": "if not self.remove_user_from_acl(name, user):return Falseif name not in self._acl:return Falseself._acl[name][''].append(user)return True", "docstring": "Add a user to the given acl deny block.", "id": "f13001:c0:m9"} {"signature": "def remove_user_from_acl(self, name, user):", "body": "if name not in self._acl:return Falseif user in self._acl[name]['']:self._acl[name][''].remove(user)if user in self._acl[name]['']:self._acl[name][''].remove(user)return True", "docstring": "Remove a user from the given acl (both allow and deny).", "id": "f13001:c0:m10"} {"signature": "def create_acl(self, name):", "body": "if name in self._acl:return Falseself._acl[name] = {'': [],'': []}return True", "docstring": "Create a new acl.", "id": "f13001:c0:m11"} {"signature": "def delete_acl(self, name):", "body": "if name not in self._acl:return Falsedel self._acl[name]return True", "docstring": "Delete an acl.", "id": "f13001:c0:m12"} {"signature": "def __init__(self, id, sc=None, **kwargs):", "body": "self.id = idself._sc = scself.logger = logging.getLogger(type(self).__name__)self.logger.setLevel(logging.DEBUG)", "docstring": "Base class for rooms (channels, groups) and IMs", "id": "f13009:c0:m0"} {"signature": "def _list_networks():", "body": "output = core.run(\"\")networks = {}net_lines = [n.strip() for n in output.splitlines()[:]]for line in net_lines:if not line:continuename, state, auto = line.split()networks[name] = state == \"\"return networks", "docstring": "Return a dictionary of network name to active status bools.\n\n Sample virsh net-list output::\n\n Name State Autostart\n -----------------------------------------\n default active yes\n juju-test inactive no\n foobar inactive no\n\n Parsing the above would return::\n {\"default\": True, \"juju-test\": False, \"foobar\": False}\n\n See: http://goo.gl/kXwfC", "id": "f13057:m3"} {"signature": "def __init__(self, fsapi_device_url, pin, timeout=DEFAULT_TIMEOUT_IN_SECONDS):", "body": "self.fsapi_device_url = fsapi_device_urlself.pin = pinself.timeout = timeoutself.sid = Noneself.__webfsapi = Noneself.__modes = Noneself.__volume_steps = Noneself.__equalisers = Noneself.__session = aiohttp.ClientSession()", "docstring": "Initialize the Frontier Silicon device.", "id": "f13081:c0:m0"} {"signature": "def __del__(self):", "body": "self.call('')if not self.__session.closed:if self.__session._connector_owner:self.__session._connector.close()self.__session._connector = None", "docstring": "Destroy the device and http sessions.", "id": "f13081:c0:m1"} {"signature": "@asyncio.coroutinedef get_fsapi_endpoint(self):", "body": "endpoint = yield from self.__session.get(self.fsapi_device_url, timeout = self.timeout)text = yield from endpoint.text(encoding='')doc = objectify.fromstring(text)return doc.webfsapi.text", "docstring": "Parse the fsapi endpoint from the device url.", "id": "f13081:c0:m2"} {"signature": "@asyncio.coroutinedef create_session(self):", "body": "req_url = '' % (self.__webfsapi, '')sid = yield from self.__session.get(req_url, params=dict(pin=self.pin),timeout = self.timeout)text = yield from sid.text(encoding='')doc = objectify.fromstring(text)return doc.sessionId.text", "docstring": "Create a session on the frontier silicon device.", "id": "f13081:c0:m3"} {"signature": "@asyncio.coroutinedef call(self, path, extra=None):", "body": "try:if not self.__webfsapi:self.__webfsapi = yield from self.get_fsapi_endpoint()if not self.sid:self.sid = yield from self.create_session()if not isinstance(extra, dict):extra = dict()params = dict(pin=self.pin, sid=self.sid)params.update(**extra)req_url = ('' % (self.__webfsapi, path))result = yield from self.__session.get(req_url, params=params,timeout = self.timeout)if result.status == :text = yield from result.text(encoding='')else:self.sid = yield from self.create_session()params = dict(pin=self.pin, sid=self.sid)params.update(**extra)result = yield from self.__session.get(req_url, params=params,timeout = self.timeout)text = yield from result.text(encoding='')return objectify.fromstring(text)except Exception as e:logging.info('' +traceback.format_exc())return None", "docstring": "Execute a frontier silicon API call.", "id": "f13081:c0:m4"} {"signature": "@asyncio.coroutinedef handle_get(self, item):", "body": "res = yield from self.call(''.format(item))return res", "docstring": "Helper method for reading a value by using the fsapi API.", "id": "f13081:c0:m5"} {"signature": "@asyncio.coroutinedef handle_set(self, item, value):", "body": "doc = yield from self.call(''.format(item), dict(value=value))if doc is None:return Nonereturn doc.status == ''", "docstring": "Helper method for setting a value by using the fsapi API.", "id": "f13081:c0:m6"} {"signature": "@asyncio.coroutinedef handle_text(self, item):", "body": "doc = yield from self.handle_get(item)if doc is None:return Nonereturn doc.value.c8_array.text or None", "docstring": "Helper method for fetching a text value.", "id": "f13081:c0:m7"} {"signature": "@asyncio.coroutinedef handle_int(self, item):", "body": "doc = yield from self.handle_get(item)if doc is None:return Nonereturn int(doc.value.u8.text) or None", "docstring": "Helper method for fetching a integer value.", "id": "f13081:c0:m8"} {"signature": "@asyncio.coroutinedef handle_long(self, item):", "body": "doc = yield from self.handle_get(item)if doc is None:return Nonereturn int(doc.value.u32.text) or None", "docstring": "Helper method for fetching a long value. Result is integer.", "id": "f13081:c0:m9"} {"signature": "@asyncio.coroutinedef handle_list(self, item):", "body": "doc = yield from self.call(''+item+'', dict(maxItems=,))if doc is None:return []if not doc.status == '':return []ret = list()for index, item in enumerate(list(doc.iterchildren(''))):temp = dict(band=index)for field in list(item.iterchildren()):temp[field.get('')] = list(field.iterchildren()).pop()ret.append(temp)return ret", "docstring": "Helper method for fetching a list(map) value.", "id": "f13081:c0:m10"} {"signature": "@asyncio.coroutinedef collect_labels(self, items):", "body": "if items is None:return []return [str(item['']) for item in items if item['']]", "docstring": "Helper methods for extracting the labels from a list with maps.", "id": "f13081:c0:m11"} {"signature": "@asyncio.coroutinedef get_friendly_name(self):", "body": "return (yield from self.handle_text(self.API.get('')))", "docstring": "Get the friendly name of the device.", "id": "f13081:c0:m12"} {"signature": "@asyncio.coroutinedef set_friendly_name(self, value):", "body": "return (yield from self.handle_set(self.API.get(''), value))", "docstring": "Set the friendly name of the device.", "id": "f13081:c0:m13"} {"signature": "@asyncio.coroutinedef get_power(self):", "body": "power = (yield from self.handle_int(self.API.get('')))return bool(power)", "docstring": "Check if the device is on.", "id": "f13081:c0:m14"} {"signature": "@asyncio.coroutinedef set_power(self, value=False):", "body": "power = (yield from self.handle_set(self.API.get(''), int(value)))return bool(power)", "docstring": "Power on or off the device.", "id": "f13081:c0:m15"} {"signature": "@asyncio.coroutinedef get_modes(self):", "body": "if not self.__modes:self.__modes = yield from self.handle_list(self.API.get(''))return self.__modes", "docstring": "Get the modes supported by this device.", "id": "f13081:c0:m16"} {"signature": "@asyncio.coroutinedef get_mode_list(self):", "body": "self.__modes = yield from self.get_modes()return (yield from self.collect_labels(self.__modes))", "docstring": "Get the label list of the supported modes.", "id": "f13081:c0:m17"} {"signature": "@asyncio.coroutinedef get_mode(self):", "body": "mode = Noneint_mode = (yield from self.handle_long(self.API.get('')))modes = yield from self.get_modes()for temp_mode in modes:if temp_mode[''] == int_mode:mode = temp_mode['']return str(mode)", "docstring": "Get the currently active mode on the device (DAB, FM, Spotify).", "id": "f13081:c0:m18"} {"signature": "@asyncio.coroutinedef set_mode(self, value):", "body": "mode = -modes = yield from self.get_modes()for temp_mode in modes:if temp_mode[''] == value:mode = temp_mode['']return (yield from self.handle_set(self.API.get(''), mode))", "docstring": "Set the currently active mode on the device (DAB, FM, Spotify).", "id": "f13081:c0:m19"} {"signature": "@asyncio.coroutinedef get_volume_steps(self):", "body": "if not self.__volume_steps:self.__volume_steps = yield from self.handle_int(self.API.get(''))return self.__volume_steps", "docstring": "Read the maximum volume level of the device.", "id": "f13081:c0:m20"} {"signature": "@asyncio.coroutinedef get_volume(self):", "body": "return (yield from self.handle_int(self.API.get('')))", "docstring": "Read the volume level of the device.", "id": "f13081:c0:m21"} {"signature": "@asyncio.coroutinedef set_volume(self, value):", "body": "return (yield from self.handle_set(self.API.get(''), value))", "docstring": "Set the volume level of the device.", "id": "f13081:c0:m22"} {"signature": "@asyncio.coroutinedef get_mute(self):", "body": "mute = (yield from self.handle_int(self.API.get('')))return bool(mute)", "docstring": "Check if the device is muted.", "id": "f13081:c0:m23"} {"signature": "@asyncio.coroutinedef set_mute(self, value=False):", "body": "mute = (yield from self.handle_set(self.API.get(''), int(value)))return bool(mute)", "docstring": "Mute or unmute the device.", "id": "f13081:c0:m24"} {"signature": "@asyncio.coroutinedef get_play_status(self):", "body": "status = yield from self.handle_int(self.API.get(''))return self.PLAY_STATES.get(status)", "docstring": "Get the play status of the device.", "id": "f13081:c0:m25"} {"signature": "@asyncio.coroutinedef get_play_name(self):", "body": "return (yield from self.handle_text(self.API.get('')))", "docstring": "Get the name of the played item.", "id": "f13081:c0:m26"} {"signature": "@asyncio.coroutinedef get_play_text(self):", "body": "return (yield from self.handle_text(self.API.get('')))", "docstring": "Get the text associated with the played media.", "id": "f13081:c0:m27"} {"signature": "@asyncio.coroutinedef get_play_artist(self):", "body": "return (yield from self.handle_text(self.API.get('')))", "docstring": "Get the artists of the current media(song).", "id": "f13081:c0:m28"} {"signature": "@asyncio.coroutinedef get_play_album(self):", "body": "return (yield from self.handle_text(self.API.get('')))", "docstring": "Get the songs's album.", "id": "f13081:c0:m29"} {"signature": "@asyncio.coroutinedef get_play_graphic(self):", "body": "return (yield from self.handle_text(self.API.get('')))", "docstring": "Get the album art associated with the song/album/artist.", "id": "f13081:c0:m30"} {"signature": "@asyncio.coroutinedef get_play_duration(self):", "body": "return (yield from self.handle_long(self.API.get('')))", "docstring": "Get the duration of the played media.", "id": "f13081:c0:m31"} {"signature": "@asyncio.coroutinedef play_control(self, value):", "body": "return (yield from self.handle_set(self.API.get(''), value))", "docstring": "Control the player of the device.\n\n1=Play; 2=Pause; 3=Next; 4=Previous (song/station)", "id": "f13081:c0:m32"} {"signature": "@asyncio.coroutinedef play(self):", "body": "return (yield from self.play_control())", "docstring": "Play media.", "id": "f13081:c0:m33"} {"signature": "@asyncio.coroutinedef pause(self):", "body": "return (yield from self.play_control())", "docstring": "Pause playing.", "id": "f13081:c0:m34"} {"signature": "@asyncio.coroutinedef forward(self):", "body": "return (yield from self.play_control())", "docstring": "Next media.", "id": "f13081:c0:m35"} {"signature": "@asyncio.coroutinedef rewind(self):", "body": "return (yield from self.play_control())", "docstring": "Previous media.", "id": "f13081:c0:m36"} {"signature": "@asyncio.coroutinedef get_equalisers(self):", "body": "if not self.__equalisers:self.__equalisers = yield from self.handle_list(self.API.get(''))return self.__equalisers", "docstring": "Get the equaliser modes supported by this device.", "id": "f13081:c0:m37"} {"signature": "@asyncio.coroutinedef get_equaliser_list(self):", "body": "self.__equalisers = yield from self.get_equalisers()return (yield from self.collect_labels(self.__equalisers))", "docstring": "Get the label list of the supported modes.", "id": "f13081:c0:m38"} {"signature": "@asyncio.coroutinedef get_sleep(self):", "body": "return (yield from self.handle_long(self.API.get('')))", "docstring": "Check when and if the device is going to sleep.", "id": "f13081:c0:m39"} {"signature": "@asyncio.coroutinedef set_sleep(self, value=False):", "body": "return (yield from self.handle_set(self.API.get(''), int(value)))", "docstring": "Set device sleep timer.", "id": "f13081:c0:m40"} {"signature": "def shortDescription(self):", "body": "doc = self.id()[self.id().rfind('')+:]return \"\".format(self.__class__.__name__, doc)", "docstring": "Get's the one liner description to be displayed.\nSource:\nhttp://erikzaadi.com/2012/09/13/inheritance-within-python-unit-tests/", "id": "f13084:c0:m0"} {"signature": "def iter_verbs(self, c):", "body": "for verb in ['', '', '']:yield getattr(c, verb)", "docstring": "A simple helper method to iterate through a range of\n HTTP Verbs and return the test_client bound instance,\n keeping writing our tests as DRY as possible.", "id": "f13084:c0:m1"} {"signature": "def cross_origin(*args, **kwargs):", "body": "_options = kwargsdef decorator(f):LOG.debug(\"\", f, _options)if _options.get('', True):f.required_methods = getattr(f, '', set())f.required_methods.add('')f.provide_automatic_options = Falsedef wrapped_function(*args, **kwargs):options = get_cors_options(current_app, _options)if options.get('') and request.method == '':resp = current_app.make_default_options_response()else:resp = make_response(f(*args, **kwargs))set_cors_headers(resp, options)setattr(resp, FLASK_CORS_EVALUATED, True)return respreturn update_wrapper(wrapped_function, f)return decorator", "docstring": "This function is the decorator which is used to wrap a Flask route with.\nIn the simplest case, simply use the default parameters to allow all\norigins in what is the most permissive configuration. If this method\nmodifies state or performs authentication which may be brute-forced, you\nshould add some degree of protection, such as Cross Site Forgery\nRequest protection.\n\n:param origins:\n The origin, or list of origins to allow requests from.\n The origin(s) may be regular expressions, case-sensitive strings,\n or else an asterisk\n\n Default : '*'\n:type origins: list, string or regex\n\n:param methods:\n The method or list of methods which the allowed origins are allowed to\n access for non-simple requests.\n\n Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]\n:type methods: list or string\n\n:param expose_headers:\n The header or list which are safe to expose to the API of a CORS API\n specification.\n\n Default : None\n:type expose_headers: list or string\n\n:param allow_headers:\n The header or list of header field names which can be used when this\n resource is accessed by allowed origins. The header(s) may be regular\n expressions, case-sensitive strings, or else an asterisk.\n\n Default : '*', allow all headers\n:type allow_headers: list, string or regex\n\n:param supports_credentials:\n Allows users to make authenticated requests. If true, injects the\n `Access-Control-Allow-Credentials` header in responses. This allows\n cookies and credentials to be submitted across domains.\n\n :note: This option cannot be used in conjuction with a '*' origin\n\n Default : False\n:type supports_credentials: bool\n\n:param max_age:\n The maximum time for which this CORS request maybe cached. This value\n is set as the `Access-Control-Max-Age` header.\n\n Default : None\n:type max_age: timedelta, integer, string or None\n\n:param send_wildcard: If True, and the origins parameter is `*`, a wildcard\n `Access-Control-Allow-Origin` header is sent, rather than the\n request's `Origin` header.\n\n Default : False\n:type send_wildcard: bool\n\n:param vary_header:\n If True, the header Vary: Origin will be returned as per the W3\n implementation guidelines.\n\n Setting this header when the `Access-Control-Allow-Origin` is\n dynamically generated (e.g. when there is more than one allowed\n origin, and an Origin than '*' is returned) informs CDNs and other\n caches that the CORS headers are dynamic, and cannot be cached.\n\n If False, the Vary header will never be injected or altered.\n\n Default : True\n:type vary_header: bool\n\n:param automatic_options:\n Only applies to the `cross_origin` decorator. If True, Flask-CORS will\n override Flask's default OPTIONS handling to return CORS headers for\n OPTIONS requests.\n\n Default : True\n:type automatic_options: bool", "id": "f13102:m0"} {"signature": "def get_regexp_pattern(regexp):", "body": "try:return regexp.patternexcept AttributeError:return str(regexp)", "docstring": "Helper that returns regexp pattern from given value.\n\n:param regexp: regular expression to stringify\n:type regexp: _sre.SRE_Pattern or str\n:returns: string representation of given regexp pattern\n:rtype: str", "id": "f13104:m1"} {"signature": "def set_cors_headers(resp, options):", "body": "if hasattr(resp, FLASK_CORS_EVALUATED):LOG.debug('')return respif (not isinstance(resp.headers, Headers)and not isinstance(resp.headers, MultiDict)):resp.headers = MultiDict(resp.headers)headers_to_set = get_cors_headers(options, request.headers, request.method)LOG.debug('', str(headers_to_set))for k, v in headers_to_set.items():resp.headers.add(k, v)return resp", "docstring": "Performs the actual evaluation of Flas-CORS options and actually\nmodifies the response object.\n\nThis function is used both in the decorator and the after_request\ncallback", "id": "f13104:m5"} {"signature": "def re_fix(reg):", "body": "return r'' if reg == r'' else reg", "docstring": "Replace the invalid regex r'*' with the valid, wildcard regex r'/.*' to\nenable the CORS app extension to have a more user friendly api.", "id": "f13104:m7"} {"signature": "def try_match(request_origin, maybe_regex):", "body": "if isinstance(maybe_regex, RegexObject):return re.match(maybe_regex, request_origin)elif probably_regex(maybe_regex):return re.match(maybe_regex, request_origin, flags=re.IGNORECASE)else:try:return request_origin.lower() == maybe_regex.lower()except AttributeError:return request_origin == maybe_regex", "docstring": "Safely attempts to match a pattern or string to a request origin.", "id": "f13104:m9"} {"signature": "def get_cors_options(appInstance, *dicts):", "body": "options = DEFAULT_OPTIONS.copy()options.update(get_app_kwarg_dict(appInstance))if dicts:for d in dicts:options.update(d)return serialize_options(options)", "docstring": "Compute CORS options for an application by combining the DEFAULT_OPTIONS,\nthe app's configuration-specified options and any dictionaries passed. The\nlast specified option wins.", "id": "f13104:m10"} {"signature": "def get_app_kwarg_dict(appInstance=None):", "body": "app = (appInstance or current_app)app_config = getattr(app, '', {})return {k.lower().replace('', ''): app_config.get(k)for k in CONFIG_OPTIONSif app_config.get(k) is not None}", "docstring": "Returns the dictionary of CORS specific app configurations.", "id": "f13104:m11"} {"signature": "def flexible_str(obj):", "body": "if obj is None:return Noneelif(not isinstance(obj, string_types)and isinstance(obj, collections.Iterable)):return ''.join(str(item) for item in sorted(obj))else:return str(obj)", "docstring": "A more flexible str function which intelligently handles stringifying\nstrings, lists and other iterables. The results are lexographically sorted\nto ensure generated responses are consistent when iterables such as Set\nare used.", "id": "f13104:m12"} {"signature": "def ensure_iterable(inst):", "body": "if isinstance(inst, string_types):return [inst]elif not isinstance(inst, collections.Iterable):return [inst]else:return inst", "docstring": "Wraps scalars or string types as a list, or returns the iterable instance.", "id": "f13104:m14"} {"signature": "def serialize_options(opts):", "body": "options = (opts or {}).copy()for key in opts.keys():if key not in DEFAULT_OPTIONS:LOG.warning(\"\", key)options[''] = sanitize_regex_param(options.get(''))options[''] = sanitize_regex_param(options.get(''))if r'' in options[''] and options[''] and options['']:raise ValueError(\"\"\"\"\"\")serialize_option(options, '')serialize_option(options, '', upper=True)if isinstance(options.get(''), timedelta):options[''] = str(int(options[''].total_seconds()))return options", "docstring": "A helper method to serialize and processes the options dictionary.", "id": "f13104:m16"} {"signature": "@app.route(\"\", methods=[''])@cross_origin()def helloWorld():", "body": "return ''''''", "docstring": "This view has CORS enabled for all domains, representing the simplest\nconfiguration of view-based decoration. The expected result is as\nfollows:\n\n$ curl --include -X GET http://127.0.0.1:5000/ \\\n --header Origin:www.examplesite.com\n\n>> HTTP/1.0 200 OK\nContent-Type: text/html; charset=utf-8\nContent-Length: 184\nAccess-Control-Allow-Origin: *\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:29:56 GMT\n\n

    Hello CORS!

    Read about my spec at the\nW3 Or, checkout my documentation\non Github", "id": "f13106:m0"} {"signature": "@app.route(\"\", methods=['', ''])@cross_origin(allow_headers=[''])def cross_origin_json_post():", "body": "return jsonify(success=True)", "docstring": "This view has CORS enabled for all domains, and allows browsers\n to send the Content-Type header, allowing cross domain AJAX POST\n requests.\n\nBrowsers will first make a preflight request to verify that the resource\n allows cross-origin POSTs with a JSON Content-Type, which can be simulated\n as:\n $ curl --include -X OPTIONS http://127.0.0.1:5000/api/v1/users/create \\\n --header Access-Control-Request-Method:POST \\\n --header Access-Control-Request-Headers:Content-Type \\\n --header Origin:www.examplesite.com\n >> HTTP/1.0 200 OK\n Content-Type: text/html; charset=utf-8\n Allow: POST, OPTIONS\n Access-Control-Allow-Origin: *\n Access-Control-Allow-Headers: Content-Type\n Access-Control-Allow-Methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT\n Content-Length: 0\n Server: Werkzeug/0.9.6 Python/2.7.9\n Date: Sat, 31 Jan 2015 22:25:22 GMT\n\n\n $ curl --include -X POST http://127.0.0.1:5000/api/v1/users/create \\\n --header Content-Type:application/json \\\n --header Origin:www.examplesite.com\n\n\n >> HTTP/1.0 200 OK\n Content-Type: application/json\n Content-Length: 21\n Access-Control-Allow-Origin: *\n Server: Werkzeug/0.9.6 Python/2.7.9\n Date: Sat, 31 Jan 2015 22:25:04 GMT\n\n {\n \"success\": true\n }", "id": "f13106:m1"} {"signature": "@app.route(\"\")def helloWorld():", "body": "return", "docstring": "Since the path '/' does not match the regular expression r'/api/*',\nthis route does not have CORS headers set.", "id": "f13107:m0"} {"signature": "@app.route(\"\")def list_users():", "body": "return jsonify(user=\"\")", "docstring": "Since the path matches the regular expression r'/api/*', this resource\nautomatically has CORS headers set. The expected result is as follows:\n\n$ curl --include -X GET http://127.0.0.1:5000/api/v1/users/ \\\n --header Origin:www.examplesite.com\nHTTP/1.0 200 OK\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Origin: *\nContent-Length: 21\nContent-Type: application/json\nDate: Sat, 09 Aug 2014 00:26:41 GMT\nServer: Werkzeug/0.9.4 Python/2.7.8\n\n{\n \"success\": true\n}", "id": "f13107:m1"} {"signature": "@app.route(\"\", methods=[''])def create_user():", "body": "return jsonify(success=True)", "docstring": "Since the path matches the regular expression r'/api/*', this resource\nautomatically has CORS headers set.\n\nBrowsers will first make a preflight request to verify that the resource\nallows cross-origin POSTs with a JSON Content-Type, which can be simulated\nas:\n$ curl --include -X OPTIONS http://127.0.0.1:5000/api/v1/users/create \\\n --header Access-Control-Request-Method:POST \\\n --header Access-Control-Request-Headers:Content-Type \\\n --header Origin:www.examplesite.com\n>> HTTP/1.0 200 OK\nContent-Type: text/html; charset=utf-8\nAllow: POST, OPTIONS\nAccess-Control-Allow-Origin: *\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT\nContent-Length: 0\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:25:22 GMT\n\n\n$ curl --include -X POST http://127.0.0.1:5000/api/v1/users/create \\\n --header Content-Type:application/json \\\n --header Origin:www.examplesite.com\n\n\n>> HTTP/1.0 200 OK\nContent-Type: application/json\nContent-Length: 21\nAccess-Control-Allow-Origin: *\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:25:04 GMT\n\n{\n \"success\": true\n}", "id": "f13107:m2"} {"signature": "@app.route(\"\")def get_exception():", "body": "raise Exception(\"\")", "docstring": "Since the path matches the regular expression r'/api/*', this resource\nautomatically has CORS headers set.\n\nBrowsers will first make a preflight request to verify that the resource\nallows cross-origin POSTs with a JSON Content-Type, which can be simulated\nas:\n$ curl --include -X OPTIONS http://127.0.0.1:5000/exception \\\n --header Access-Control-Request-Method:POST \\\n --header Access-Control-Request-Headers:Content-Type \\\n --header Origin:www.examplesite.com\n>> HTTP/1.0 200 OK\nContent-Type: text/html; charset=utf-8\nAllow: POST, OPTIONS\nAccess-Control-Allow-Origin: *\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT\nContent-Length: 0\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:25:22 GMT", "id": "f13107:m3"} {"signature": "@api_v1.route(\"\")def list_users():", "body": "return jsonify(user=\"\")", "docstring": "Since the path matches the regular expression r'/api/*', this resource\nautomatically has CORS headers set. The expected result is as follows:\n\n$ curl --include -X GET http://127.0.0.1:5000/api/v1/users/ \\\n --header Origin:www.examplesite.com\nHTTP/1.0 200 OK\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Origin: *\nContent-Length: 21\nContent-Type: application/json\nDate: Sat, 09 Aug 2014 00:26:41 GMT\nServer: Werkzeug/0.9.4 Python/2.7.8\n\n{\n \"success\": true\n}", "id": "f13108:m0"} {"signature": "@api_v1.route(\"\", methods=[''])def create_user():", "body": "return jsonify(success=True)", "docstring": "Since the path matches the regular expression r'/api/*', this resource\nautomatically has CORS headers set.\n\nBrowsers will first make a preflight request to verify that the resource\nallows cross-origin POSTs with a JSON Content-Type, which can be simulated\nas:\n$ curl --include -X OPTIONS http://127.0.0.1:5000/api/v1/users/create \\\n --header Access-Control-Request-Method:POST \\\n --header Access-Control-Request-Headers:Content-Type \\\n --header Origin:www.examplesite.com\n>> HTTP/1.0 200 OK\nContent-Type: text/html; charset=utf-8\nAllow: POST, OPTIONS\nAccess-Control-Allow-Origin: *\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT\nContent-Length: 0\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:25:22 GMT\n\n\n$ curl --include -X POST http://127.0.0.1:5000/api/v1/users/create \\\n --header Content-Type:application/json \\\n --header Origin:www.examplesite.com\n\n\n>> HTTP/1.0 200 OK\nContent-Type: application/json\nContent-Length: 21\nAccess-Control-Allow-Origin: *\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:25:04 GMT\n\n{\n \"success\": true\n}", "id": "f13108:m1"} {"signature": "@public_routes.route(\"\")def helloWorld():", "body": "return ''''''", "docstring": "Since the path '/' does not match the regular expression r'/api/*',\nthis route does not have CORS headers set.", "id": "f13108:m2"} {"signature": "def write_data_to_files_in_temp_directory(data: List[Any], spread_over_n_files: int, separator: str='', dir: str=None,file_prefix=\"\") -> str:", "body": "if dir is None:dir = mkdtemp(suffix=write_data_to_files_in_temp_directory.__name__)datum_per_file = ceil(len(data) / spread_over_n_files)for i in range(spread_over_n_files):start_at = i * datum_per_fileend_at = start_at + datum_per_fileto_write = separator.join([str(x) for x in data[start_at:end_at]])write_to_temp_file(dir, to_write, file_prefix=file_prefix)return dir", "docstring": "Writes the given data over the given number of files in a temporary directory.\n:param data: the data that is to be written to the files\n:param spread_over_n_files: the number of files in which the data is to be spread over\n:param separator: the separator between data items in each file\n:param dir: the specific temp directory to use\n:param file_prefix: prefix to the files created\n:return: the location of the temp directory", "id": "f13111:m0"} {"signature": "def write_to_temp_file(dir: str, contents: str, file_prefix=\"\") -> str:", "body": "temp_temp_file_location = mkstemp(dir=dir, prefix=file_prefix)[]destination = os.path.join(dir, os.path.basename(temp_temp_file_location))with open(temp_temp_file_location, '') as file:file.write(contents)os.rename(temp_temp_file_location, destination)return destination", "docstring": "Writes the given contents to a temp file, with the given name prefix, within the given directory.\n:param dir: the directory to place the temp file in\n:param contents: the contents of the temp file\n:param file_prefix: (optional) name prefix of the temp file\n:return: the path to the created temp file", "id": "f13111:m1"} {"signature": "def extract_data_from_file(file_location: str, parser: Callable[[str], Any]=lambda data: data, separator: str=None)-> List[Any]:", "body": "with open(file_location, '') as file:contents = file.read()if separator is not None:raw_data = contents.split(separator)else:raw_data = [contents]extracted = []for item in raw_data:parsed = parser(item)extracted.append(parsed)return extracted", "docstring": "Extracts data from the file at the given location, using the given parser.\n:param file_location: the location of the file to read data from\n:param parser: the parser to extract data from the file\n:param separator: (optional) separator for data in the file\n:return: the extracted data", "id": "f13111:m2"} {"signature": "def block_until_synchronised_files_data_source_started(source: SynchronisedFilesDataSource):", "body": "blocked = Truedef unblock(*args):nonlocal blockedblocked = Falseevent_handler = FileSystemEventHandler()event_handler.on_modified = unblocksource._observer.schedule(event_handler, source._directory_location, recursive=True)temp_file_name = \"\" % block_until_synchronised_files_data_source_started.__name__temp_file_path = os.path.join(source._directory_location, temp_file_name)i = while blocked:with open(temp_file_path, '') as file:file.write(str(i))sleep( / )i += ", "docstring": "Blocks until the given synchronised files data source has started to notice changes in the file system (may be a few\nmilliseconds after it has been started).\n:param source: the synchronised files data source that has been started", "id": "f13113:m0"} {"signature": "def _add_more_data_in_nested_directory(self, number_of_extra_files: int=) -> Tuple[str, List[int]]:", "body": "nested_directory_path = os.path.join(self.temp_directory, \"\")os.makedirs(nested_directory_path)more_data = [i for i in range()]write_data_to_files_in_temp_directory(more_data, number_of_extra_files, dir=nested_directory_path,file_prefix=TestSynchronisedFilesDataSource._FILE_PREFIX)return (nested_directory_path, more_data)", "docstring": "Adds more data in a directory nested inside the temp directory.\n:param number_of_extra_files: (optional) the number of files to put the new data in inside the nested directory\n:return: a tuple where the first value is the path to the new nested directory and the second is the new data", "id": "f13116:c1:m11"} {"signature": "def _create_data_file_in_temp_directory(self) -> str:", "body": "temp_file_location = mkstemp()[]rule_file_location = \"\" % temp_file_locationos.rename(temp_file_location, rule_file_location)return rule_file_location", "docstring": "Creates a data file in the temp directory used by this test.\n:return: the file path of the created file", "id": "f13117:c1:m5"} {"signature": "@abstractmethoddef this_will_fail_unless(self, condition:bool) -> bool:", "body": "", "docstring": "This method will raise an exception unless condition is True", "id": "f13121:c1:m0"} {"signature": "@staticmethoddef _add_n_mock_listeners_to_listenable(number_of_listeners_to_add: int, listenable: Listenable) -> List[MagicMock]:", "body": "listeners = []for i in range(number_of_listeners_to_add):listener = MagicMock()listenable.add_listener(listener)listeners.append(listener)return listeners", "docstring": "Adds the given number of mock listeners to the given listenable.\n:param number_of_listeners_to_add: the number of mock listeners to add\n:param listenable: the listenable to add the listeners to\n:return: the mock listeners that were added", "id": "f13125:c0:m6"} {"signature": "def __init__(self, seq=()):", "body": "self._data = dict(seq)self._key_locks = ThreadSafeDefaultdict(Lock)", "docstring": "Constructor.\n:param seq: initial metadata items", "id": "f13127:c1:m0"} {"signature": "def rename(self, key: Any, new_key: Any):", "body": "if new_key == key:returnrequired_locks = [self._key_locks[key], self._key_locks[new_key]]ordered_required_locks = sorted(required_locks, key=lambda x: id(x))for lock in ordered_required_locks:lock.acquire()try:if key not in self._data:raise KeyError(\"\" % key)self._data[new_key] = self[key]del self._data[key]finally:for lock in required_locks:lock.release()", "docstring": "Renames an item in this collection as a transaction.\n\nWill override if new key name already exists.\n:param key: the current name of the item\n:param new_key: the new name that the item should have", "id": "f13127:c1:m1"} {"signature": "def register(registerable: Any):", "body": "listenable = registration_event_listenable_map[type(registerable)]event = RegistrationEvent(registerable, RegistrationEvent.Type.REGISTERED)listenable.notify_listeners(event)", "docstring": "Registers an object, notifying any listeners that may be interested in it.\n:param registerable: the object to register", "id": "f13128:m0"} {"signature": "def unregister(registerable: Any):", "body": "listenable = registration_event_listenable_map[type(registerable)]event = RegistrationEvent(registerable, RegistrationEvent.Type.UNREGISTERED)listenable.notify_listeners(event)", "docstring": "Unregisters an object, notifying any listeners that may be interested in it.\n:param registerable: the object to unregister", "id": "f13128:m1"} {"signature": "def __init__(self, directory_location: str, data_type: type):", "body": "super().__init__(directory_location)self._data_type = data_type", "docstring": "Constructor.\n:param directory_location: the location of the directory\n:param data_type: the type of data that is loaded from files in the given directory", "id": "f13128:c0:m0"} {"signature": "@staticmethoddef _load_module(path: str):", "body": "spec = spec_from_file_location(os.path.basename(path), path)module = module_from_spec(spec)spec.loader.exec_module(module)", "docstring": "Dynamically loads the python module at the given path.\n:param path: the path to load the module from", "id": "f13128:c0:m2"} {"signature": "def __init__(self, sources: Iterable[DataSource]=()):", "body": "self.sources = copy.copy(sources)", "docstring": "Constructor.\n:param sources: the sources of instances of `DataSourceType`", "id": "f13129:c0:m0"} {"signature": "@abstractmethoddef get_all(self) -> Sequence[DataSourceType]:", "body": "", "docstring": "Gets the data at the source.\n:return: instances of `DataSourceType`", "id": "f13130:c0:m0"} {"signature": "def __init__(self, directory_location: str):", "body": "super().__init__()self._directory_location = directory_location", "docstring": "Default constructor.\n:param directory_location: the location of the directory that contains files holding data", "id": "f13132:c0:m0"} {"signature": "@abstractmethoddef extract_data_from_file(self, file_path: str) -> Iterable[DataSourceType]:", "body": "", "docstring": "Extracts data from the file at the given file path.\n:param file_path: the path to the file to extract data from\n:return: the extracted data", "id": "f13132:c0:m1"} {"signature": "@abstractmethoddef is_data_file(self, file_path: str) -> bool:", "body": "", "docstring": "Determines whether the file at the given path is of interest.\n:param file_path: path to the updated file\n:return: whether the file is of interest", "id": "f13132:c0:m2"} {"signature": "def no_error_extract_data_from_file(self, file_path: str) -> Iterable[DataSourceType]:", "body": "try:return self.extract_data_from_file(file_path)except Exception as e:logging.warning(e)return []", "docstring": "Proxy for `extract_data_from_file` that suppresses any errors and instead just returning an empty list.\n:param file_path: see `extract_data_from_file`\n:return: see `extract_data_from_file`", "id": "f13132:c0:m4"} {"signature": "def _load_all_in_directory(self) -> Dict[str, Iterable[DataSourceType]]:", "body": "origin_mapped_data = dict() for file_path in glob.iglob(\"\" % self._directory_location, recursive=True):if self.is_data_file(file_path):origin_mapped_data[file_path] = self.no_error_extract_data_from_file(file_path)return origin_mapped_data", "docstring": "Loads all of the data from the files in directory location.\n:return: a origin map of all the loaded data", "id": "f13132:c0:m5"} {"signature": "@staticmethoddef _extract_data_from_origin_map(origin_mapped_data: Dict[str, Iterable[DataSourceType]])-> Iterable[DataSourceType]:", "body": "data = []for _, data_item in origin_mapped_data.items():data.extend(data_item)return data", "docstring": "Extracts the data from a data origin map.\n:param origin_mapped_data: a map containing the origin of the data as the key string and the data as the value\n:return: the data contained within the map", "id": "f13132:c0:m6"} {"signature": "def __init__(self, directory_location: str):", "body": "super().__init__(directory_location)self._status_lock = Lock()self._running = Falseself._observer = Noneself._origin_mapped_data = dict() self._event_handler = FileSystemEventHandler()self._event_handler.on_created = self._on_file_createdself._event_handler.on_modified = self._on_file_modifiedself._event_handler.on_deleted = self._on_file_deletedself._event_handler.on_moved = self._on_file_movedself._event_handler.on_any_event = SynchronisedFilesDataSource._on_any_event", "docstring": "Default constructor.\n:param directory_location: the location of the directory that contains files holding data", "id": "f13132:c2:m0"} {"signature": "def start(self):", "body": "with self._status_lock:if self._running:raise RuntimeError(\"\")self._running = Trueself._observer = Observer()self._observer.schedule(self._event_handler, self._directory_location, recursive=True)self._observer.start()self._origin_mapped_data = self._load_all_in_directory()", "docstring": "Monitors data kept in files in the predefined directory in a new thread.\n\nNote: Due to the underlying library, it may take a few milliseconds after this method is started for changes to\nstart to being noticed.", "id": "f13132:c2:m2"} {"signature": "def stop(self):", "body": "with self._status_lock:if self._running:assert self._observer is not Noneself._observer.stop()self._running = Falseself._origin_mapped_data = dict()", "docstring": "Stops monitoring the predefined directory.", "id": "f13132:c2:m3"} {"signature": "def _on_file_created(self, event: FileSystemEvent):", "body": "if not event.is_directory and self.is_data_file(event.src_path):assert event.src_path not in self._origin_mapped_dataself._origin_mapped_data[event.src_path] = self.no_error_extract_data_from_file(event.src_path)self.notify_listeners(FileSystemChange.CREATE)", "docstring": "Called when a file in the monitored directory has been created.\n:param event: the file system event", "id": "f13132:c2:m4"} {"signature": "def _on_file_modified(self, event: FileSystemEvent):", "body": "if not event.is_directory and self.is_data_file(event.src_path):assert event.src_path in self._origin_mapped_dataself._origin_mapped_data[event.src_path] = self.no_error_extract_data_from_file(event.src_path)self.notify_listeners(FileSystemChange.MODIFY)", "docstring": "Called when a file in the monitored directory has been modified.\n:param event: the file system event", "id": "f13132:c2:m5"} {"signature": "def _on_file_deleted(self, event: FileSystemEvent):", "body": "if not event.is_directory and self.is_data_file(event.src_path):assert event.src_path in self._origin_mapped_datadel(self._origin_mapped_data[event.src_path])self.notify_listeners(FileSystemChange.DELETE)", "docstring": "Called when a file in the monitored directory has been deleted.\n:param event: the file system event", "id": "f13132:c2:m6"} {"signature": "def _on_file_moved(self, event: FileSystemMovedEvent):", "body": "if not event.is_directory and self.is_data_file(event.src_path):delete_event = FileSystemEvent(event.src_path)delete_event.event_type = EVENT_TYPE_DELETEDself._on_file_deleted(delete_event)create_event = FileSystemEvent(event.dest_path)create_event.event_type = EVENT_TYPE_CREATEDself._on_file_created(create_event)", "docstring": "Called when a file in the monitored directory has been moved.\n\nBreaks move down into a delete and a create (which it is sometimes detected as!).\n:param event: the file system event", "id": "f13132:c2:m7"} {"signature": "@staticmethoddef _on_any_event(event: FileSystemEvent):", "body": "logging.debug(\"\" % event)", "docstring": "Called when any file system event was detected\n:param event: the detected event", "id": "f13132:c2:m8"} {"signature": "def _create_client(base_url: str, tls: TLSConfig=False) -> Optional[APIClient]:", "body": "try:client = APIClient(base_url=base_url, tls=tls, version=\"\")return client if client.ping() else Noneexcept:return None", "docstring": "Creates a Docker client with the given details.\n:param base_url: the base URL of the Docker daemon\n:param tls: the Docker daemon's TLS config (if any)\n:return: the created client else None if unable to connect the client to the daemon", "id": "f13133:m0"} {"signature": "def create_client() -> APIClient:", "body": "global _clientclient = _client()if client is None:docker_environment = kwargs_from_env(assert_hostname=False)if \"\" in docker_environment:client = _create_client(docker_environment.get(\"\"), docker_environment.get(\"\"))if client is None:raise ConnectionError(\"\"% docker_environment)else:logging.info(\"\")else:client = _create_client(\"\")if client is not None:logging.info(\"\")else:raise ConnectionError(\"\"\"\")_client = weakref.ref(client)assert isinstance(client, APIClient)return client", "docstring": "Clients a Docker client.\n\nWill raise a `ConnectionError` if the Docker daemon is not accessible.\n:return: the Docker client", "id": "f13133:m1"} {"signature": "def __init__(self, default_mkdtemp_kwargs: dict=None, default_mkstemp_kwargs: dict=None):", "body": "self.default_mkdtemp_kwargs = default_mkdtemp_kwargs if default_mkdtemp_kwargs is not None else {}self.default_mkstemp_kwargs = default_mkstemp_kwargs if default_mkstemp_kwargs is not None else {}self._temp_directories = set() self._temp_files = set() atexit.register(self.tear_down)", "docstring": "Constructor.\n:param default_mkdtemp_kwargs:\n:param default_mkstemp_kwargs:", "id": "f13135:c0:m0"} {"signature": "def tear_down(self):", "body": "while len(self._temp_directories) > :directory = self._temp_directories.pop()shutil.rmtree(directory, ignore_errors=True)while len(self._temp_files) > :file = self._temp_files.pop()try:os.remove(file)except OSError:pass", "docstring": "Tears down all temp files and directories.", "id": "f13135:c0:m1"} {"signature": "def create_temp_directory(self, **mkdtemp_kwargs) -> str:", "body": "kwargs = {**self.default_mkdtemp_kwargs, **mkdtemp_kwargs}location = tempfile.mkdtemp(**kwargs)self._temp_directories.add(location)return location", "docstring": "Creates a temp directory.\n:param mkdtemp_kwargs: named arguments to be passed to `tempfile.mkdtemp`\n:return: the location of the temp directory", "id": "f13135:c0:m2"} {"signature": "def create_temp_file(self, **mkstemp_kwargs) -> Tuple[int, str]:", "body": "kwargs = {**self.default_mkstemp_kwargs, **mkstemp_kwargs}handle, location = tempfile.mkstemp(**kwargs)self._temp_files.add(location)return handle, location", "docstring": "Creates a temp file.\n:param mkstemp_kwargs: named arguments to be passed to `tempfile.mkstemp`\n:return: tuple where the first element is the file handle and the second is the location of the temp file", "id": "f13135:c0:m3"} {"signature": "def create_random_string(postfix: str= \"\", prefix: str=\"\") -> str:", "body": "return \"\" % (prefix, uuid4(), postfix)", "docstring": "Creates a random string.\n:param postfix: optional postfix\n:param prefix: optional prefix\n:return: created string", "id": "f13138:m0"} {"signature": "def get_open_port() -> int:", "body": "free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)free_socket.bind((\"\", ))free_socket.listen()port = free_socket.getsockname()[]free_socket.close()return port", "docstring": "Gets a PORT that will (probably) be available on the machine.\nIt is possible that in-between the time in which the open PORT of found and when it is used, another process may\nbind to it instead.\n:return: the (probably) available PORT", "id": "f13138:m1"} {"signature": "def extract_version_number(string: str) -> str:", "body": "matched = _EXTRACT_VERSION_PATTERN.search(string)if matched is None:raise ValueError(\"\")return matched.group().replace(\"\", \"\")", "docstring": "Extracts a version from a string in the form: `.*[0-9]+(_[0-9]+)*.*`, e.g. Irods4_1_9CompatibleController.\n\nIf the string contains multiple version numbers, the first (from left) is extracted.\n\nWill raise a `ValueError` if there is no version number in the given string.\n:param string: the string containing the version number\n:return: the extracted version", "id": "f13138:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "self._lock = Lock(*args, **kwargs)self._stat_lock = Lock()self._waiting = self._locked = False self._last_released = datetime.now()", "docstring": "Wraps Lock constructor", "id": "f13142:c0:m0"} {"signature": "def acquire(self, *args, **kwargs):", "body": "with self._stat_lock:self._waiting += self._lock.acquire(*args, **kwargs)with self._stat_lock:self._locked = Trueself._waiting -= ", "docstring": "Wraps Lock.acquire", "id": "f13142:c0:m1"} {"signature": "def release(self):", "body": "self._lock.release()with self._stat_lock:self._locked = Falseself._last_released = datetime.now()", "docstring": "Wraps Lock.release", "id": "f13142:c0:m2"} {"signature": "def waiting_to_acquire(self) -> int:", "body": "with self._stat_lock:return self._waiting", "docstring": "Return the number of threads waiting to acquire the lock", "id": "f13142:c0:m3"} {"signature": "def is_locked(self) -> bool:", "body": "with self._stat_lock:return self._locked", "docstring": "Is the lock currently acquired", "id": "f13142:c0:m4"} {"signature": "def last_released(self) -> datetime:", "body": "with self._stat_lock:return self._last_released", "docstring": "Return the last lock release time", "id": "f13142:c0:m5"} {"signature": "def __init__(self, target: _RegistrationTarget, event_type: Type):", "body": "self.target = targetself.event_type = event_type", "docstring": "Constructor.\n:param target: the object the event refers to\n:param event_type: the type of update event", "id": "f13143:c2:m0"} {"signature": "def get_listeners(self) -> Sequence[Callable[[_ListenableDataType], None]]:", "body": "return self._listeners", "docstring": "Get all of the registered listeners.\n:return: list of the registered listeners", "id": "f13145:c0:m1"} {"signature": "def add_listener(self, listener: Callable[[_ListenableDataType], None]):", "body": "self._listeners.append(listener)", "docstring": "Adds a listener.\n:param listener: the event listener", "id": "f13145:c0:m2"} {"signature": "def remove_listener(self, listener: Callable[[_ListenableDataType], None]):", "body": "self._listeners.remove(listener)", "docstring": "Removes a listener.\n:param listener: the event listener to remove", "id": "f13145:c0:m3"} {"signature": "def notify_listeners(self, data: Optional[_ListenableDataType]=_NO_DATA_MARKER):", "body": "for listener in self._listeners:if data is not Listenable._NO_DATA_MARKER:listener(data)else:listener()", "docstring": "Notify event listeners, passing them the given data (if any).\n:param data: the data to pass to the event listeners", "id": "f13145:c0:m4"} {"signature": "@staticmethoddef get_higher_priority_value(value: int):", "body": "if value == Priority.MAX_PRIORITY:raise ValueError(\"\")return value - ", "docstring": "Gets a higher priority value than that given.\n\nWill raise a `ValueError` if already highest priority value.\n:param value: gets a higher priority value than this\n:return: the higher priority value", "id": "f13146:c0:m0"} {"signature": "@staticmethoddef get_lower_priority_value(value: int):", "body": "if value == Priority.MIN_PRIORITY:raise ValueError(\"\")return value + ", "docstring": "Gets a lower priority value than that given.\n\nWill raise a `ValueError` if already lowest priority value.\n:param value: gets a lower priority value than this\n:return: the lower priority value", "id": "f13146:c0:m1"} {"signature": "@register.filter(is_safe=False)def to_data_string_with_default(value, arg=''):", "body": "if isinstance(value, bool):if value:return ''return ''return value or arg", "docstring": "Given a Python boolean value converts it to string representation so\n we can use it in HTML data attributes. If value is None use given default\n or '' if default is not provided.\n\n ----- ------\n Value Output\n ----- ------\n True \"true\"\n False \"false\"\n None arg", "id": "f13148:m1"} {"signature": "@register.tagdef social_widget_render(parser, token):", "body": "bits = token.split_contents()tag_name = bits[]if len(bits) < :raise TemplateSyntaxError(\"\" %tag_name)args = []kwargs = {}bits = bits[:]if len(bits):for bit in bits:match = kwarg_re.match(bit)if not match:raise TemplateSyntaxError(\"\" %tag_name)name, value = match.groups()if name:name = name.replace('', '')kwargs[name] = parser.compile_filter(value)else:args.append(parser.compile_filter(value))return SocialWidgetNode(args, kwargs)", "docstring": "Renders the selected social widget. You can specify optional settings\n that will be passed to widget template.\n\n Sample usage:\n {% social_widget_render widget_template ke1=val1 key2=val2 %}\n\n For example to render Twitter follow button you can use code like this:\n {% social_widget_render 'twitter/follow_button.html' username=\"ev\" %}", "id": "f13148:m2"} {"signature": "@register.assignment_tagdef social_get_facebook_locale(locale):", "body": "if locale is None:return ''return normalize(locale).split('')[]", "docstring": "Normalize the locale string and split the value needed for the api url", "id": "f13148:m3"} {"signature": "def _subscribe_mock(self, signal_name, weak):", "body": "callback = mock.Mock()decorator = self.model.subscribe(signal_name, weak)return decorator(callback)", "docstring": "Create a mock callback function and subscribe it to the specific\n signal on ``self.model``.", "id": "f13173:c0:m4"} {"signature": "@taskdef mongo(daemon=False, port=):", "body": "cmd = \"\".format(port)if daemon:cmd += \"\"run(cmd)", "docstring": "Run the mongod process.", "id": "f13190:m0"} {"signature": "def find_version(fname):", "body": "version = ''with open(fname, '') as fp:reg = re.compile(r'')for line in fp:m = reg.match(line)if m:version = m.group()breakif not version:raise RuntimeError('')return version", "docstring": "Attempts to find the version number in the file names fname.\n Raises RuntimeError if not found.", "id": "f13191:m1"} {"signature": "def freeze(value):", "body": "if isinstance(value, list):return FrozenList(*value)if isinstance(value, dict):return FrozenDict(**value)return value", "docstring": "Cast value to its frozen counterpart.", "id": "f13196:m0"} {"signature": "def set_nested(data, value, *keys):", "body": "if len(keys) == :data[keys[]] = valueelse:if keys[] not in data:data[keys[]] = {}set_nested(data[keys[]], value, *keys[:])", "docstring": "Assign to a nested dictionary.\n\n :param dict data: Dictionary to mutate\n :param value: Value to set\n :param list *keys: List of nested keys\n\n >>> data = {}\n >>> set_nested(data, 'hi', 'k0', 'k1', 'k2')\n >>> data\n {'k0': {'k1': {'k2': 'hi'}}}", "id": "f13197:m0"} {"signature": "def proxy_factory(BaseSchema, label, ProxiedClass, get_key):", "body": "def local():key = get_key()try:return proxies[BaseSchema][label][key]except KeyError:proxies[BaseSchema][label][key] = ProxiedClass()return proxies[BaseSchema][label][key]return LocalProxy(local)", "docstring": "Create a proxy to a class instance stored in ``proxies``.\n\n :param class BaseSchema: Base schema (e.g. ``StoredObject``)\n :param str label: Name of class variable to set\n :param class ProxiedClass: Class to get or create\n :param function get_key: Extension-specific key function; may return e.g.\n the current Flask request", "id": "f13200:m0"} {"signature": "def with_proxies(proxy_map, get_key):", "body": "def wrapper(cls):for label, ProxiedClass in six.iteritems(proxy_map):proxy = proxy_factory(cls, label, ProxiedClass, get_key)setattr(cls, label, proxy)return clsreturn wrapper", "docstring": "Class decorator factory; adds proxy class variables to target class.\n\n :param dict proxy_map: Mapping between class variable labels and proxied\n classes\n :param function get_key: Extension-specific key function; may return e.g.\n the current Flask request", "id": "f13200:m1"} {"signature": "def _to_primary_key(self, value):", "body": "if value is None:return Noneif isinstance(value, self.base_class):if not value._is_loaded:raise exceptions.DatabaseError('')return value._primary_keyreturn self.base_class._to_primary_key(value)", "docstring": "Return primary key; if value is StoredObject, verify\nthat it is loaded.", "id": "f13209:c0:m4"} {"signature": "def _get_underlying_data(self, instance):", "body": "self._touch(instance)return self.data.get(instance, None)", "docstring": "Return data from raw data store, rather than overridden\n __get__ methods. Should NOT be overwritten.", "id": "f13211:c0:m16"} {"signature": "def warn_if_detached(func):", "body": "@wraps(func)def wrapped(this, *args, **kwargs):if '' in this.__dict__ and this._detached:warnings.warn('')return func(this, *args, **kwargs)return wrapped", "docstring": "Warn if self / cls is detached.", "id": "f13215:m3"} {"signature": "def has_storage(func):", "body": "@wraps(func)def wrapped(*args, **kwargs):me = args[]if not hasattr(me, '') ornot me._storage:raise exceptions.ImproperConfigurationError(''.format(me._name.upper()))return func(*args, **kwargs)return wrapped", "docstring": "Ensure that self/cls contains a Storage backend.", "id": "f13215:m4"} {"signature": "def rm_fwd_refs(obj):", "body": "for stack, key in obj._backrefs_flat:backref_key, parent_schema_name, parent_field_name = stackparent_schema = obj._collections[parent_schema_name]parent_key_store = parent_schema._pk_to_storage(key)parent_object = parent_schema.load(parent_key_store)if parent_object is None:continueif parent_object._fields[parent_field_name]._list:getattr(parent_object, parent_field_name).remove(obj)else:parent_field_object = parent_object._fields[parent_field_name]setattr(parent_object, parent_field_name, parent_field_object._gen_default())parent_object.save()", "docstring": "When removing an object, other objects with references to the current\n object should remove those references. This function identifies objects\n with forward references to the current object, then removes those\n references.\n\n :param obj: Object to which forward references should be removed", "id": "f13215:m5"} {"signature": "def rm_back_refs(obj):", "body": "for ref in _collect_refs(obj):ref['']._remove_backref(ref['']._backref_field_name,obj,ref[''],strict=False)", "docstring": "When removing an object with foreign fields, back-references from\n other objects to the current object should be deleted. This function\n identifies foreign fields of the specified object whose values are not\n None and which specify back-reference keys, then removes back-references\n from linked objects to the specified object.\n\n :param obj: Object for which back-references should be removed", "id": "f13215:m7"} {"signature": "def ensure_backrefs(obj, fields=None):", "body": "for ref in _collect_refs(obj, fields):updated = ref['']._update_backref(ref['']._backref_field_name,obj,ref[''],)if updated:logging.debug(''.format(obj._name, obj._primary_key, ref[''],ref['']._name, ref['']._primary_key,))", "docstring": "Ensure that all forward references on the provided object have the\n appropriate backreferences.\n\n :param StoredObject obj: Database record\n :param list fields: Optional list of field names to check", "id": "f13215:m8"} {"signature": "@propertydef _storage_key(self):", "body": "return self._pk_to_storage(self._primary_key)", "docstring": "Primary key passed through translator.", "id": "f13215:c2:m10"} {"signature": "def get_changed_fields(self, cached_data, storage_data):", "body": "if not self._is_loaded or cached_data is None:return []return [fieldfor field in self._fieldsif cached_data.get(field) != storage_data.get(field)]", "docstring": "Get fields that differ between the cache_sandbox and the current object.\n Validation and after_save methods should only be run on diffed\n fields.\n\n :param cached_data: Storage-formatted data from cache_sandbox\n :param storage_data: Storage-formatted data from object\n :return: List of diffed fields", "id": "f13215:c2:m27"} {"signature": "@classmethod@has_storage@log_storagedef load(cls, key=None, data=None, _is_loaded=True):", "body": "signals.load.send(cls,key=key,data=data,)if key is not None:key = cls._check_pk_type(key)cached_object = cls._load_from_cache(key)if cached_object is not None:return cached_objectif data is None:data = cls._storage[].get(cls._primary_name, cls._pk_to_storage(key))if data is None:return Nonedata = cls.from_storage(data)if cls._version_of and '' in data and data[''] != cls._version:old_object = cls._version_of.load(data=data)new_object = cls(_is_loaded=_is_loaded)cls.migrate(old_object, new_object)new_object._stored_key = new_object._primary_keyreturn new_objectret = cls(_is_loaded=_is_loaded, **data)ret._stored_key = ret._primary_keyreturn ret", "docstring": "Get a record by its primary key.", "id": "f13215:c2:m33"} {"signature": "@classmethoddef migrate_all(cls):", "body": "for record in cls.find():record.save()", "docstring": "Migrate all records in this collection.", "id": "f13215:c2:m34"} {"signature": "@classmethoddef migrate(cls, old, new, verbose=True, dry_run=False, rm_refs=True):", "body": "if verbose:logging.basicConfig(format='',level=logging.DEBUG)deleted_fields = [field for field in old._fields if field not in new._fields]added_fields = [field for field in new._fields if field not in old._fields]logging.info(''.format(deleted_fields))logging.info(''.format(added_fields))if old._primary_name != new._primary_name:logging.info(\"\"\"\"\"\"\"\"\"\".format(old_name=old._primary_name,old_field=old._fields[old._primary_name],new_name=new._primary_name,new_field=new._fields[new._primary_name]))for field in old._fields:if field not in cls._fields:if rm_refs:logging.info(\"\"\"\"\"\"\"\".format(name=field,field=old._fields[field]))if not dry_run:rm_fwd_refs(old)else:logging.info(\"\"\"\"\"\"\"\".format(name=field,field=old._fields[field]))continueold_field_obj = old._fields[field]new_field_obj = new._fields[field]if old_field_obj != new_field_obj:if not old_field_obj._required and new_field_obj._required:logging.info(\"\"\"\"\"\"\"\"\"\"\"\"\"\".format(name=field))else:logging.info(\"\"\"\"\"\"\"\"\"\".format(name=field,old_field=old_field_obj,new_field=new_field_obj))continueif not dry_run:field_object = cls._fields[field]field_object.__set__(new,getattr(old, field),safe=True)if not dry_run:new.__backrefs = old.__backrefsif not dry_run:cls._migrate(old, new)", "docstring": "Migrate record to new schema.\n\n :param old: Record from original schema\n :param new: Record from new schema\n :param verbose: Print detailed info\n :param dry_run: Dry run; make no changes if true\n :param rm_refs: Remove references on deleted fields", "id": "f13215:c2:m35"} {"signature": "@classmethoddef _migrate(cls, old, new):", "body": "return new", "docstring": "Subclasses can override this class to perform a custom migration.\n This is run after the migrate() method.\n\n Example:\n ::\n\n class NewSchema(StoredObject):\n _id = fields.StringField(primary=True, index=True)\n my_string = fields.StringField()\n\n @classmethod\n def _migrate(cls, old, new):\n new.my_string = old.my_string + 'yo'\n\n _meta = {\n 'version_of': OldSchema,\n 'version': 2,\n 'optimistic': True\n }\n\n :param old: Record from original schema\n :param new: Record from new schema", "id": "f13215:c2:m36"} {"signature": "def validate_record(self):", "body": "for validator in self._record_validators:validator(self)", "docstring": "Apply record-level validation. Run on `save`.", "id": "f13215:c2:m40"} {"signature": "@has_storage@log_storagedef save(self, force=False):", "body": "if self._detached:raise exceptions.DatabaseError('')for field_name, field_object in self._fields.items():if hasattr(field_object, ''):field_object.on_before_save(self)signals.before_save.send(self.__class__,instance=self)cached_data = self._get_cached_data(self._stored_key)storage_data = self.to_storage()if self._primary_key is not None and cached_data is not None:fields_changed = set(self.get_changed_fields(cached_data, storage_data))else:fields_changed = set(self._fields.keys())if not fields_changed and not force:return []for field_name in fields_changed:field_object = self._fields[field_name]field_object.do_validate(getattr(self, field_name), self)self.validate_record()primary_changed = (self._primary_key != self._stored_keyandself._primary_name in fields_changed)if self._is_loaded:if primary_changed and not getattr(self, '', False):self.delegate(self._storage[].remove,False,RawQuery(self._primary_name, '', self._stored_key))self._clear_caches(self._stored_key)self.insert(self._primary_key, storage_data)else:self.update_one(self, storage_data=storage_data, saved=True, inmem=True)elif self._is_optimistic and self._primary_key is None:self._optimistic_insert()else:self.insert(self._primary_key, storage_data)if self._is_loaded and primary_changed:if not getattr(self, '', False):self._updating_key = Trueupdate_backref_keys(self)self._stored_key = self._primary_keyself._updating_key = Falseelse:self._stored_key = self._primary_keyself._is_loaded = Truesignals.save.send(self.__class__,instance=self,fields_changed=fields_changed,cached_data=cached_data or {},)storage_data[self._primary_name] = self._storage_keyself._set_cache(self._primary_key, self, storage_data)return fields_changed", "docstring": "Save a record.\n\n :param bool force: Save even if no fields have changed; used to update\n back-references\n :returns: List of changed fields", "id": "f13215:c2:m41"} {"signature": "def update_fields(self, **kwargs):", "body": "for key, value in kwargs.items():self._fields[key].__set__(self, value, safe=True)", "docstring": "Update multiple fields, specified by keyword arguments.\n\n Example::\n\n person.update(given_name='Fred', family_name='Mercury')\n\n ... is equivalent to ... ::\n\n\n person.given_name = 'Fred'\n person.family_name = 'Mercury'\n\n :param **kwargs: field names and the values to set", "id": "f13215:c2:m42"} {"signature": "@classmethod@has_storage@log_storagedef find(cls, query=None, **kwargs):", "body": "cls._process_query(query)return cls._storage[].QuerySet(cls,cls._storage[].find(query, **kwargs))", "docstring": ":param query:\n:param kwargs:\n:return: an iterable of :class:`StoredObject` instances", "id": "f13215:c2:m49"} {"signature": "@classmethoddef delegate(cls, method, conflict=None, *args, **kwargs):", "body": "if cls.queue.active:action = WriteAction(method, *args, **kwargs)if conflict:logger.warn(''''''.format(action))cls.queue.push(action)else:method(*args, **kwargs)", "docstring": "Execute or queue a database action. Variable positional and keyword\n arguments are passed to the provided method.\n\n :param function method: Method to execute or queue\n :param bool conflict: Potential conflict between cache_sandbox and backend,\n e.g., in the event of bulk updates or removes that bypass the\n cache_sandbox", "id": "f13215:c2:m51"} {"signature": "@classmethoddef start_queue(cls):", "body": "cls.queue.start()", "docstring": "Start the queue. Between calling `start_queue` and `commit_queue`,\n all writes will be deferred to the queue.", "id": "f13215:c2:m52"} {"signature": "@classmethoddef clear_queue(cls):", "body": "cls.queue.clear()", "docstring": "Clear the queue.", "id": "f13215:c2:m53"} {"signature": "@classmethoddef cancel_queue(cls):", "body": "if cls.queue:cls._cache.clear()cls._object_cache.clear()cls.clear_queue()", "docstring": "Cancel any pending actions. This method clears the queue and also\n clears caches if any actions are pending.", "id": "f13215:c2:m54"} {"signature": "@classmethoddef commit_queue(cls):", "body": "try:cls.queue.commit()cls.clear_queue()except:cls.cancel_queue()raise", "docstring": "Commit all queued actions. If any actions fail, clear caches. Note:\n the queue will be cleared whether an error is raised or not.", "id": "f13215:c2:m55"} {"signature": "@classmethoddef subscribe(cls, signal_name, weak=True):", "body": "try:signal = getattr(signals, signal_name)except AttributeError:raise ValueError(''.format(signal_name))sender = None if cls._is_root else clsreturn signal.connect_via(sender, weak)", "docstring": ":param str signal_name: Name of signal to subscribe to; must be found\n in ``signals.py``.\n:param bool weak: Create weak reference to callback\n:returns: Decorator created by ``Signal::connect_via``\n:raises: ValueError if signal is not found\n\nExample usage: ::\n\n >>> @Schema.subscribe('before_save')\n ... def listener(cls, instance):\n ... instance.value += 1", "id": "f13215:c2:m56"} {"signature": "@classmethod@has_storagedef remove_one(cls, which, rm=True):", "body": "obj = cls._which_to_obj(which)rm_fwd_refs(obj)rm_back_refs(obj)cls._clear_caches(obj._storage_key)if rm:cls.delegate(cls._storage[].remove,False,RawQuery(obj._primary_name, '', obj._storage_key))obj._detached = True", "docstring": "Remove an object, along with its references and back-references.\n Remove the object from the cache_sandbox and sets its _detached flag to True.\n\n :param which: Object selector: Query, StoredObject, or primary key\n :param rm: Remove data from backend", "id": "f13215:c2:m64"} {"signature": "@classmethod@has_storagedef remove(cls, query=None):", "body": "objs = cls.find(query)for obj in objs:cls.remove_one(obj, rm=False)cls.delegate(cls._storage[].remove,False,query)", "docstring": "Remove objects by query.\n\n :param query: Query object", "id": "f13215:c2:m65"} {"signature": "def _generate_random_id(self, n=):", "body": "alphabet = ''return ''.join(random.sample(alphabet, n))", "docstring": "Generated random alphanumeric key.\n\n :param n: Number of characters in random key", "id": "f13217:c3:m1"} {"signature": "def _optimistic_insert(self, primary_name, value, n=):", "body": "while True:try:key = self._generate_random_id(n)value[primary_name] = keyself.insert(primary_name, key, value)breakexcept KeyExistsException:passreturn key", "docstring": "Attempt to insert with randomly generated key until insert\n is successful.\n\n :param str primary_name: The name of the primary key.\n :param dict value: The dictionary representation of the record.\n :param n: Number of characters in random key", "id": "f13217:c3:m2"} {"signature": "@abc.abstractmethoddef insert(self, primary_name, key, value):", "body": "pass", "docstring": "Insert a new record.\n\n :param str primary_name: Name of primary key\n :param key: The value of the primary key\n :param dict value: The dictionary of attribute:value pairs", "id": "f13217:c3:m3"} {"signature": "@abc.abstractmethoddef update(self, query, data):", "body": "pass", "docstring": "Update multiple records with new data.\n\n :param query: A query object.\n :param dict data: Dictionary of key:value pairs.", "id": "f13217:c3:m4"} {"signature": "@abc.abstractmethoddef get(self, primary_name, key):", "body": "pass", "docstring": "Get a single record.\n\n :param str primary_name: The name of the primary key.\n :param key: The value of the primary key.", "id": "f13217:c3:m5"} {"signature": "@abc.abstractmethoddef remove(self, query=None):", "body": "pass", "docstring": "Remove records.", "id": "f13217:c3:m6"} {"signature": "@abc.abstractmethoddef flush(self):", "body": "pass", "docstring": "Flush the database.", "id": "f13217:c3:m7"} {"signature": "@abc.abstractmethoddef find_one(self, query=None, **kwargs):", "body": "pass", "docstring": "Gets a single object from the collection.\n\n If no matching documents are found, raises `NoResultsFound`.\n If >1 matching documents are found, raises `MultipleResultsFound`.\n\n :params: One or more `Query` or `QuerySet` objects may be passed\n\n :returns: The selected document", "id": "f13217:c3:m8"} {"signature": "@abc.abstractmethoddef find(self, query=None, **kwargs):", "body": "pass", "docstring": "Return a generator of query results. Takes optional `by_pk` keyword\nargument; if true, return keys rather than\nvalues.\n\n:param query:\n\n:return: a generator of :class:`~.storedobject.StoredObject` instances", "id": "f13217:c3:m9"} {"signature": "def sort(self, *keys):", "body": "self._sort = keysreturn self", "docstring": "Iteratively sort data by keys in reverse order.", "id": "f13219:c0:m7"} {"signature": "def __init__(self, collection_name, prefix='', ext=''):", "body": "filename = collection_name + '' + extif prefix:self.filename = prefix + filenameelse:self.filename = filenameself.store = {}if os.path.exists(self.filename):with open(self.filename, '') as fp:data = fp.read()self.store = pickle.loads(data)", "docstring": "Build pickle file name and load data if exists.\n\n :param collection_name: Collection name\n :param prefix: File prefix.\n :param ext: File extension.", "id": "f13219:c1:m0"} {"signature": "def _remove_by_pk(self, key, flush=True):", "body": "try:del self.store[key]except Exception as error:passif flush:self.flush()", "docstring": "Retrieve value from store.\n\n :param key: Key", "id": "f13219:c1:m5"} {"signature": "def contact(request):", "body": "form = ContactForm(request.POST or None)if form.is_valid():subject = form.cleaned_data['']message = form.cleaned_data['']sender = form.cleaned_data['']cc_myself = form.cleaned_data['']recipients = settings.CONTACTFORM_RECIPIENTSif cc_myself:recipients.append(sender)send_mail(getattr(settings, \"\", '') + subject, message, sender, recipients)return render(request, '')return render( request, '', {'': form})", "docstring": "Displays the contact form and sends the email", "id": "f13230:m0"} {"signature": "def log_callback(wrapped_function):", "body": "def debug_log(message):\"\"\"\"\"\"logger.debug(message.encode('').decode())@functools.wraps(wrapped_function)def _wrapper(parser, match, **kwargs):func_name = wrapped_function.__name__debug_log(u''.format(func_name=func_name,matched_string=match.group(),))try:result = wrapped_function(parser, match, **kwargs)except IgnoredMatchException:debug_log(u''.format(func_name=func_name))raisedebug_log(u''.format(func_name=func_name,result=result,))return resultreturn _wrapper", "docstring": "Decorator that produces DEBUG level log messages before and after\n calling a parser method.\n\n If a callback raises an IgnoredMatchException the log will show 'IGNORED'\n instead to indicate that the parser will not create any objects from\n the matched string.\n\n Example:\n DEBUG:poyo.parser:parse_simple <- 123: 456.789\n DEBUG:poyo.parser:parse_int <- 123\n DEBUG:poyo.parser:parse_int -> 123\n DEBUG:poyo.parser:parse_float <- 456.789\n DEBUG:poyo.parser:parse_float -> 456.789\n DEBUG:poyo.parser:parse_simple -> ", "id": "f13238:m0"} {"signature": "@log_callbackdef parse_comment(self, match):", "body": "raise IgnoredMatchException", "docstring": "Ignore line comments.", "id": "f13238:c0:m3"} {"signature": "@log_callbackdef parse_blankline(self, match):", "body": "raise IgnoredMatchException", "docstring": "Ignore blank lines.", "id": "f13238:c0:m4"} {"signature": "@log_callbackdef parse_dashes(self, match):", "body": "raise IgnoredMatchException", "docstring": "Ignore lines that contain three dash symbols.", "id": "f13238:c0:m5"} {"signature": "def find_match(self):", "body": "for pattern, callback in self.rules:match = pattern.match(self.source, pos=self.pos)if not match:continuetry:node = callback(match)except IgnoredMatchException:passelse:self.seen.append(node)return matchraise NoMatchException(''''.format(self.source[self.pos:]))", "docstring": "Try to find a pattern that matches the source and calll a parser\n method to create Python objects.\n\n A callback that raises an IgnoredMatchException indicates that the\n given string data is ignored by the parser and no objects are created.\n\n If none of the pattern match a NoMatchException is raised.", "id": "f13238:c0:m15"} {"signature": "def __call__(self):", "body": "while self.pos < self.max_pos:match = self.find_match()self.pos = match.end()return self.root()", "docstring": "Parse the given string data and sequentually update the current\n cursor position until the end is reached.\n\n Return the Root object if successful.", "id": "f13238:c0:m16"} {"signature": "def add_child(self, child):", "body": "if not isinstance(child, ChildMixin):raise TypeError(''''.format(type(child)))child.parent = selfself._children.append(child)", "docstring": "If the given object is an instance of Child add it to self and\n register self as a parent.", "id": "f13239:c1:m3"} {"signature": "@classmethoddef setUpClass(cls):", "body": "cls.test_server = AdsTestServer(logging=True)cls.test_server.start()time.sleep()", "docstring": "Setup the ADS testserver.", "id": "f13247:c0:m0"} {"signature": "@classmethoddef tearDownClass(cls):", "body": "cls.test_server.stop()time.sleep()", "docstring": "Tear down the testserver.", "id": "f13247:c0:m1"} {"signature": "def setUp(self):", "body": "self.test_server.request_history = []self.plc = pyads.Connection(TEST_SERVER_AMS_NET_ID,TEST_SERVER_AMS_PORT,TEST_SERVER_IP_ADDRESS)", "docstring": "Establish connection to the testserver.", "id": "f13247:c0:m2"} {"signature": "def assert_command_id(self, request, target_id):", "body": "command_id = request.ams_header.command_idcommand_id = struct.unpack('', command_id)[]self.assertEqual(command_id, target_id)", "docstring": "Assert command_id and target_id.", "id": "f13247:c0:m3"} {"signature": "def __enter__(self):", "body": "self.start()return self", "docstring": "Enter context.", "id": "f13251:c0:m1"} {"signature": "def __exit__(self, exc_type, exc_value, traceback):", "body": "self.close()", "docstring": "Exit context.", "id": "f13251:c0:m2"} {"signature": "def stop(self):", "body": "for client in self.clients:client.close()self.clients = []if self._run:logger.info(\"\")self._run = Falseself.server.close()", "docstring": "Close client connections and stop main server loop.", "id": "f13251:c0:m3"} {"signature": "def close(self):", "body": "self.stop()", "docstring": "Close the server thread.", "id": "f13251:c0:m4"} {"signature": "def run(self):", "body": "self._run = Trueself.server.listen()logger.info(\"\".format(self.ip_address or \"\", self.port))while self._run:ready, _, _ = select.select([self.server], [], [], )if ready:try:client, address = self.server.accept()except:continuelogger.info(\"\".format(*address))client_thread = AdsClientConnection(handler=self.handler, client=client, address=address, server=self)client_thread.daemon = Trueclient_thread.start()self.clients.append(client_thread)", "docstring": "Listen for incoming connections from clients.", "id": "f13251:c0:m5"} {"signature": "def stop(self):", "body": "if self._run:logger.info(\"\".format(*self.client_address))self._run = Falseself.join()", "docstring": "Stop the client thread.", "id": "f13251:c1:m1"} {"signature": "def close(self):", "body": "if self.is_alive():self.stop()self.client.close()", "docstring": "Close the client connection.", "id": "f13251:c1:m2"} {"signature": "def run(self):", "body": "self._run = Truewhile self._run:ready, _, _ = select.select([self.client], [], [], )if not ready:continuedata, _ = self.client.recvfrom()if not data:self.client.close()self._run = Falsecontinueif len(data) < :logger.warning(\"\".format(*self.client_address, data=data))continuerequest_packet = self.construct_request(data)self.server.request_history.append(request_packet)response = self.handler.handle_request(request_packet)if isinstance(response, (AmsResponseData,)):response_bytes = self.construct_response(response, request_packet)self.client.send(response_bytes)continuelogger.error(\"\")", "docstring": "Listen for data on client connection and delegate requests.", "id": "f13251:c1:m3"} {"signature": "def construct_response(self, response_data, request):", "body": "target_net_id = request.ams_header.source_net_idtarget_port = request.ams_header.source_portsource_net_id = request.ams_header.target_net_idsource_port = request.ams_header.target_portcommand_id = request.ams_header.command_idinvoke_id = request.ams_header.invoke_idstate_flags = response_data.state_flagsams_length = struct.pack(\"\", len(response_data.data))error_code = response_data.error_codedata = response_data.dataams_header = \"\".encode(\"\").join((target_net_id,target_port,source_net_id,source_port,command_id,state_flags,ams_length,error_code,invoke_id,data,))ams_tcp_header = \"\".encode(\"\") + struct.pack(\"\", len(ams_header))return ams_tcp_header + ams_header", "docstring": "Construct binary AMS response to return to the client.\n\n :param AmsResponseData response_data: Data to include in the response\n :param AmsPacket request: The originating request for the response", "id": "f13251:c1:m4"} {"signature": "def construct_request(self, request_bytes):", "body": "data = request_bytes tcp_header = AmsTcpHeader(data[:])ams_header = AmsHeader(data[:],data[:],data[:],data[:],data[:],data[:],data[:],data[:],data[:],data[:],)return AmsPacket(tcp_header, ams_header)", "docstring": "Unpack an AMS packet from binary data.\n\n :param bytes request_bytes: The raw request data\n :rtype AmsPacket:\n :return: AmsPacket with fields populated from the binary data", "id": "f13251:c1:m5"} {"signature": "def handle_request(self, request):", "body": "raise not NotImplementedError()", "docstring": "Handle incoming requests.\n\n :param AmsPacket request: The request data received from the client\n :rtype: AmsResponseData\n :return: Data needed to construct the AMS response packet", "id": "f13251:c2:m0"} {"signature": "def handle_request(self, request):", "body": "command_id_bytes = request.ams_header.command_idcommand_id = struct.unpack(\"\", command_id_bytes)[]state = struct.unpack(\"\", request.ams_header.state_flags)[]state = state | state = struct.pack(\"\", state)if command_id == constants.ADSCOMMAND_READDEVICEINFO:logger.info(\"\")major_version = \"\".encode(\"\")minor_version = \"\".encode(\"\")version_build = \"\".encode(\"\")device_name = \"\".encode(\"\")response_content = (major_version + minor_version + version_build + device_name)elif command_id == constants.ADSCOMMAND_READ:logger.info(\"\")response_length = struct.unpack(\"\", request.ams_header.data[:])[]response_value = ((\"\" * (response_length - )) + \"\").encode(\"\")response_content = struct.pack(\"\", len(response_value)) + response_valueelif command_id == constants.ADSCOMMAND_WRITE:logger.info(\"\")response_content = \"\".encode(\"\")elif command_id == constants.ADSCOMMAND_READSTATE:logger.info(\"\")ads_state = struct.pack(\"\", constants.ADSSTATE_RUN)device_state = struct.pack(\"\", )response_content = ads_state + device_stateelif command_id == constants.ADSCOMMAND_WRITECTRL:logger.info(\"\")response_content = \"\".encode(\"\")elif command_id == constants.ADSCOMMAND_ADDDEVICENOTE:logger.info(\"\")handle = (\"\" * ).encode(\"\")response_content = handleelif command_id == constants.ADSCOMMAND_DELDEVICENOTE:logger.info(\"\")response_content = \"\".encode(\"\")elif command_id == constants.ADSCOMMAND_DEVICENOTE:logger.info(\"\")response_content = \"\".encode(\"\")elif command_id == constants.ADSCOMMAND_READWRITE:logger.info(\"\")response_length = struct.unpack(\"\", request.ams_header.data[:])[]response_value = ((\"\" * (response_length - )) + \"\").encode(\"\")response_content = struct.pack(\"\", len(response_value)) + response_valueelse:logger.info(\"\".format(hex(command_id)))error_code = \"\".encode(\"\")return AmsResponseData(state, error_code, \"\".encode(\"\"))error_code = (\"\" * ).encode(\"\")response_data = error_code + response_contentreturn AmsResponseData(state, request.ams_header.error_code, response_data)", "docstring": "Handle incoming requests and send a response.", "id": "f13251:c3:m0"} {"signature": "def handle_request(self, request):", "body": "command_id_bytes = request.ams_header.command_idcommand_id = struct.unpack(\"\", command_id_bytes)[]state = struct.unpack(\"\", request.ams_header.state_flags)[]state = state | state = struct.pack(\"\", state)def handle_read_device_info():\"\"\"\"\"\"logger.info(\"\")major_version = \"\".encode(\"\")minor_version = \"\".encode(\"\")version_build = \"\".encode(\"\")device_name = \"\".encode(\"\")response_content = (major_version + minor_version + version_build + device_name)return response_contentdef handle_read():\"\"\"\"\"\"data = request.ams_header.dataindex_group = struct.unpack(\"\", data[:])[]index_offset = struct.unpack(\"\", data[:])[]plc_datatype = struct.unpack(\"\", data[:])[]logger.info((\"\"\"\").format(index_group, index_offset, plc_datatype))if index_group == constants.ADSIGRP_SYM_VALBYHND:response_value = self._named_data[index_offset].valueelse:response_value = self._data[(index_group, index_offset)][:plc_datatype]return struct.pack(\"\", len(response_value)) + response_valuedef handle_write():\"\"\"\"\"\"data = request.ams_header.dataindex_group = struct.unpack(\"\", data[:])[]index_offset = struct.unpack(\"\", data[:])[]plc_datatype = struct.unpack(\"\", data[:])[]value = data[:( + plc_datatype)]logger.info((\"\"\"\").format(index_group, index_offset, plc_datatype, value))if index_group == constants.ADSIGRP_SYM_RELEASEHND:return b\"\"elif index_group == constants.ADSIGRP_SYM_VALBYHND:self._named_data[index_offset].value = valuereturn b\"\"self._data[(index_group, index_offset)] = valuereturn b\"\"def handle_read_write():\"\"\"\"\"\"data = request.ams_header.dataindex_group = struct.unpack(\"\", data[:])[]index_offset = struct.unpack(\"\", data[:])[]read_length = struct.unpack(\"\", data[:])[]write_length = struct.unpack(\"\", data[:])[]write_data = data[:( + write_length)]logger.info((\"\"\"\"\"\").format(index_group, index_offset, read_length, write_length, write_data))if index_group == constants.ADSIGRP_SYM_HNDBYNAME:var_name = write_data.decode()names = [x.name for x in self._named_data]try:handle = names.index(var_name)except ValueError:self._named_data.append(PLCVariable(name=var_name, value=bytes()))handle = len(self._named_data) - read_data = struct.pack(\"\", handle)else:read_data = self._data[(index_group, index_offset)][:read_length]self._data[(index_group, index_offset)] = write_datareturn struct.pack(\"\", len(read_data)) + read_datadef handle_read_state():\"\"\"\"\"\"logger.info(\"\")ads_state = struct.pack(\"\", constants.ADSSTATE_RUN)device_state = struct.pack(\"\", )return ads_state + device_statedef handle_writectrl():\"\"\"\"\"\"logger.info(\"\")return b\"\"def handle_add_devicenote():\"\"\"\"\"\"logger.info(\"\")handle = (\"\" * ).encode(\"\")return handledef handle_delete_devicenote():\"\"\"\"\"\"logger.info(\"\")return b\"\"def handle_devicenote():\"\"\"\"\"\"logger.info(\"\")return b\"\"function_map = {constants.ADSCOMMAND_READDEVICEINFO: handle_read_device_info,constants.ADSCOMMAND_READ: handle_read,constants.ADSCOMMAND_WRITE: handle_write,constants.ADSCOMMAND_READWRITE: handle_read_write,constants.ADSCOMMAND_READSTATE: handle_read_state,constants.ADSCOMMAND_WRITECTRL: handle_writectrl,constants.ADSCOMMAND_ADDDEVICENOTE: handle_add_devicenote,constants.ADSCOMMAND_DELDEVICENOTE: handle_delete_devicenote,constants.ADSCOMMAND_DEVICENOTE: handle_devicenote,}try:response_content = function_map[command_id]()except KeyError:logger.info(\"\".format(hex(command_id)))error_code = \"\".encode(\"\")return AmsResponseData(state, error_code, \"\".encode(\"\"))error_code = (\"\" * ).encode(\"\")response_data = error_code + response_contentreturn AmsResponseData(state, request.ams_header.error_code, response_data)", "docstring": "Handle incoming requests and create a response.", "id": "f13251:c5:m1"} {"signature": "def PLCTYPE_ARR_REAL(n):", "body": "return c_float * n", "docstring": "Return an array with n float values.", "id": "f13253:m0"} {"signature": "def PLCTYPE_ARR_LREAL(n):", "body": "return c_double * n", "docstring": "Return an array with n double values.", "id": "f13253:m1"} {"signature": "def PLCTYPE_ARR_INT(n):", "body": "return c_int16 * n", "docstring": "Return an array with n int16 values.", "id": "f13253:m2"} {"signature": "def PLCTYPE_ARR_DINT(n):", "body": "return c_int32 * n", "docstring": "Return an array with n int32 values.", "id": "f13253:m3"} {"signature": "def PLCTYPE_ARR_SHORT(n):", "body": "return c_short * n", "docstring": "Return an array with n short values.", "id": "f13253:m4"} {"signature": "def _parse_ams_netid(ams_netid):", "body": "try:id_numbers = list(map(int, ams_netid.split(\"\")))except ValueError:raise ValueError(\"\")if len(id_numbers) != :raise ValueError(\"\")ams_netid_st = SAmsNetId()ams_netid_st.b = (c_ubyte * )(*id_numbers)return ams_netid_st", "docstring": "Parse an AmsNetId from *str* to *SAmsNetId*.\n\n :param str ams_netid: NetId as a string\n :rtype: SAmsNetId\n :return: NetId as a struct", "id": "f13258:m0"} {"signature": "def open_port():", "body": "global portport = port or adsPortOpenEx()return port", "docstring": "Connect to the TwinCAT message router.\n\n :rtype: int\n :return: port number", "id": "f13258:m1"} {"signature": "def close_port():", "body": "global portif port is not None:adsPortCloseEx(port)port = None", "docstring": "Close the connection to the TwinCAT message router.", "id": "f13258:m2"} {"signature": "def get_local_address():", "body": "if port is not None:return adsGetLocalAddressEx(port)return None", "docstring": "Return the local AMS-address and the port number.\n\n :rtype: AmsAddr", "id": "f13258:m3"} {"signature": "def set_local_address(ams_netid):", "body": "if isinstance(ams_netid, str):ams_netid_st = _parse_ams_netid(ams_netid)else:ams_netid_st = ams_netidassert isinstance(ams_netid_st, SAmsNetId)if linux:return adsSetLocalAddress(ams_netid_st)else:raise ADSError(text=\"\")", "docstring": "Set the local NetID (**Linux only**).\n\n :param str: new AmsNetID\n :rtype: None\n\n **Usage:**\n\n >>> import pyads\n >>> pyads.open_port()\n >>> pyads.set_local_address('0.0.0.0.1.1')", "id": "f13258:m4"} {"signature": "def read_state(adr):", "body": "if port is not None:return adsSyncReadStateReqEx(port, adr)return None", "docstring": "Read the current ADS-state and the machine-state.\n\n Read the current ADS-state and the machine-state from the\n ADS-server.\n\n :param AmsAddr adr: local or remote AmsAddr\n :rtype: (int, int)\n :return: adsState, deviceState", "id": "f13258:m5"} {"signature": "def write_control(adr, ads_state, device_state, data, plc_datatype):", "body": "if port is not None:return adsSyncWriteControlReqEx(port, adr, ads_state, device_state, data, plc_datatype)", "docstring": "Change the ADS state and the machine-state of the ADS-server.\n\n :param AmsAddr adr: local or remote AmsAddr\n :param int ads_state: new ADS-state, according to ADSTATE constants\n :param int device_state: new machine-state\n :param data: additional data\n :param int plc_datatype: datatype, according to PLCTYPE constants\n\n :note: Despite changing the ADS-state and the machine-state it is possible\n to send additional data to the ADS-server. For current ADS-devices\n additional data is not progressed.\n Every ADS-device is able to communicate its current state to other\n devices.\n There is a difference between the device-state and the state of the\n ADS-interface (AdsState). The possible states of an ADS-interface\n are defined in the ADS-specification.", "id": "f13258:m6"} {"signature": "def read_device_info(adr):", "body": "if port is not None:return adsSyncReadDeviceInfoReqEx(port, adr)return None", "docstring": "Read the name and the version number of the ADS-server.\n\n :param AmsAddr adr: local or remote AmsAddr\n :rtype: string, AdsVersion\n :return: device name, version", "id": "f13258:m7"} {"signature": "def write(adr, index_group, index_offset, value, plc_datatype):", "body": "if port is not None:return adsSyncWriteReqEx(port, adr, index_group, index_offset, value, plc_datatype)", "docstring": "Send data synchronous to an ADS-device.\n\n :param AmsAddr adr: local or remote AmsAddr\n :param int index_group: PLC storage area, according to the INDEXGROUP\n constants\n :param int index_offset: PLC storage address\n :param value: value to write to the storage address of the PLC\n :param Type plc_datatype: type of the data given to the PLC,\n according to PLCTYPE constants", "id": "f13258:m8"} {"signature": "def read_write(adr,index_group,index_offset,plc_read_datatype,value,plc_write_datatype,return_ctypes=False,):", "body": "if port is not None:return adsSyncReadWriteReqEx2(port,adr,index_group,index_offset,plc_read_datatype,value,plc_write_datatype,return_ctypes,)return None", "docstring": "Read and write data synchronous from/to an ADS-device.\n\n :param AmsAddr adr: local or remote AmsAddr\n :param int index_group: PLC storage area, according to the INDEXGROUP\n constants\n :param int index_offset: PLC storage address\n :param Type plc_read_datatype: type of the data given to the PLC to respond\n to, according to PLCTYPE constants\n :param value: value to write to the storage address of the PLC\n :param Type plc_write_datatype: type of the data given to the PLC, according to\n PLCTYPE constants\n :param bool return_ctypes: return ctypes instead of python types if True\n (default: False)\n :rtype: PLCTYPE\n :return: value: **value**", "id": "f13258:m9"} {"signature": "def read(adr, index_group, index_offset, plc_datatype, return_ctypes=False):", "body": "if port is not None:return adsSyncReadReqEx2(port, adr, index_group, index_offset, plc_datatype, return_ctypes)return None", "docstring": "Read data synchronous from an ADS-device.\n\n :param AmsAddr adr: local or remote AmsAddr\n :param int index_group: PLC storage area, according to the INDEXGROUP\n constants\n :param int index_offset: PLC storage address\n :param int plc_datatype: type of the data given to the PLC, according to\n PLCTYPE constants\n :param bool return_ctypes: return ctypes instead of python types if True\n (default: False)\n :return: value: **value**", "id": "f13258:m10"} {"signature": "def read_by_name(adr, data_name, plc_datatype, return_ctypes=False):", "body": "if port is not None:return adsSyncReadByNameEx(port, adr, data_name, plc_datatype, return_ctypes)return None", "docstring": "Read data synchronous from an ADS-device from data name.\n\n :param AmsAddr adr: local or remote AmsAddr\n :param string data_name: data name\n :param int plc_datatype: type of the data given to the PLC, according to\n PLCTYPE constants\n :param bool return_ctypes: return ctypes instead of python types if True\n (default: False)\n :return: value: **value**", "id": "f13258:m11"} {"signature": "def write_by_name(adr, data_name, value, plc_datatype):", "body": "if port is not None:return adsSyncWriteByNameEx(port, adr, data_name, value, plc_datatype)", "docstring": "Send data synchronous to an ADS-device from data name.\n\n :param AmsAddr adr: local or remote AmsAddr\n :param string data_name: PLC storage address\n :param value: value to write to the storage address of the PLC\n :param int plc_datatype: type of the data given to the PLC,\n according to PLCTYPE constants", "id": "f13258:m12"} {"signature": "def add_route(adr, ip_address):", "body": "return adsAddRoute(adr.netIdStruct(), ip_address)", "docstring": "Establish a new route in the AMS Router (linux Only).\n\n :param pyads.structs.AmsAddr adr: AMS Address of routing endpoint\n :param str ip_address: ip address of the routing endpoint", "id": "f13258:m13"} {"signature": "def delete_route(adr):", "body": "return adsDelRoute(adr.netIdStruct())", "docstring": "Remove existing route from the AMS Router (Linux Only).\n\n :param pyads.structs.AmsAddr adr: AMS Address associated with the routing\n entry which is to be removed from the router.", "id": "f13258:m14"} {"signature": "def add_device_notification(adr, data_name, attr, callback, user_handle=None):", "body": "if port is not None:return adsSyncAddDeviceNotificationReqEx(port, adr, data_name, attr, callback, user_handle)return None", "docstring": "Add a device notification.\n\n :param pyads.structs.AmsAddr adr: AMS Address associated with the routing\n entry which is to be removed from the router.\n :param str data_name: PLC storage address\n :param pyads.structs.NotificationAttrib attr: object that contains\n all the attributes for the definition of a notification\n :param callback: callback function that gets executed on in the event\n of a notification\n\n :rtype: (int, int)\n :returns: notification handle, user handle\n\n Save the notification handle and the user handle on creating a\n notification if you want to be able to remove the notification\n later in your code.", "id": "f13258:m15"} {"signature": "def del_device_notification(adr, notification_handle, user_handle):", "body": "if port is not None:return adsSyncDelDeviceNotificationReqEx(port, adr, notification_handle, user_handle)", "docstring": "Remove a device notification.\n\n :param pyads.structs.AmsAddr adr: AMS Address associated with the routing\n entry which is to be removed from the router.\n :param notification_handle: address of the variable that contains\n the handle of the notification\n :param user_handle: user handle", "id": "f13258:m16"} {"signature": "def set_timeout(ms):", "body": "if port is not None:return adsSyncSetTimeoutEx(port, ms)", "docstring": "Set timeout.", "id": "f13258:m17"} {"signature": "def __enter__(self):", "body": "self.open()return self", "docstring": "Open on entering with-block.", "id": "f13258:c0:m1"} {"signature": "def __exit__(self, _type, _val, _traceback):", "body": "self.close()", "docstring": "Close on leaving with-block.", "id": "f13258:c0:m2"} {"signature": "def open(self):", "body": "if self._open:returnself._port = adsPortOpenEx()if linux:adsAddRoute(self._adr.netIdStruct(), self.ip_address)self._open = True", "docstring": "Connect to the TwinCAT message router.", "id": "f13258:c0:m3"} {"signature": "def close(self):", "body": "if not self._open:returnif linux:adsDelRoute(self._adr.netIdStruct())if self._port is not None:adsPortCloseEx(self._port)self._port = Noneself._open = False", "docstring": ":summary: Close the connection to the TwinCAT message router.", "id": "f13258:c0:m4"} {"signature": "def get_local_address(self):", "body": "if self._port is not None:return adsGetLocalAddressEx(self._port)return None", "docstring": "Return the local AMS-address and the port number.\n\n :rtype: AmsAddr", "id": "f13258:c0:m5"} {"signature": "def read_state(self):", "body": "if self._port is not None:return adsSyncReadStateReqEx(self._port, self._adr)return None", "docstring": "Read the current ADS-state and the machine-state.\n\n Read the current ADS-state and the machine-state from the ADS-server.\n\n :rtype: (int, int)\n :return: adsState, deviceState", "id": "f13258:c0:m6"} {"signature": "def write_control(self, ads_state, device_state, data, plc_datatype):", "body": "if self._port is not None:return adsSyncWriteControlReqEx(self._port, self._adr, ads_state, device_state, data, plc_datatype)", "docstring": "Change the ADS state and the machine-state of the ADS-server.\n\n :param int ads_state: new ADS-state, according to ADSTATE constants\n :param int device_state: new machine-state\n :param data: additional data\n :param int plc_datatype: datatype, according to PLCTYPE constants\n\n :note: Despite changing the ADS-state and the machine-state it is\n possible to send additional data to the ADS-server. For current\n ADS-devices additional data is not progressed.\n Every ADS-device is able to communicate its current state to other\n devices. There is a difference between the device-state and the\n state of the ADS-interface (AdsState). The possible states of an\n ADS-interface are defined in the ADS-specification.", "id": "f13258:c0:m7"} {"signature": "def read_device_info(self):", "body": "if self._port is not None:return adsSyncReadDeviceInfoReqEx(self._port, self._adr)return None", "docstring": "Read the name and the version number of the ADS-server.\n\n :rtype: string, AdsVersion\n :return: device name, version", "id": "f13258:c0:m8"} {"signature": "def write(self, index_group, index_offset, value, plc_datatype):", "body": "if self._port is not None:return adsSyncWriteReqEx(self._port, self._adr, index_group, index_offset, value, plc_datatype)", "docstring": "Send data synchronous to an ADS-device.\n\n :param int index_group: PLC storage area, according to the INDEXGROUP\n constants\n :param int index_offset: PLC storage address\n :param value: value to write to the storage address of the PLC\n :param int plc_datatype: type of the data given to the PLC,\n according to PLCTYPE constants", "id": "f13258:c0:m9"} {"signature": "def read_write(self,index_group,index_offset,plc_read_datatype,value,plc_write_datatype,return_ctypes=False,):", "body": "if self._port is not None:return adsSyncReadWriteReqEx2(self._port,self._adr,index_group,index_offset,plc_read_datatype,value,plc_write_datatype,return_ctypes,)return None", "docstring": "Read and write data synchronous from/to an ADS-device.\n\n :param int index_group: PLC storage area, according to the INDEXGROUP\n constants\n :param int index_offset: PLC storage address\n :param int plc_read_datatype: type of the data given to the PLC to\n respond to, according to PLCTYPE constants\n :param value: value to write to the storage address of the PLC\n :param plc_write_datatype: type of the data given to the PLC,\n according to PLCTYPE constants\n :rtype: PLCTYPE\n :param bool return_ctypes: return ctypes instead of python types if True\n (default: False)\n :return: value: **value**", "id": "f13258:c0:m10"} {"signature": "def read(self, index_group, index_offset, plc_datatype, return_ctypes=False):", "body": "if self._port is not None:return adsSyncReadReqEx2(self._port, self._adr, index_group, index_offset, plc_datatype, return_ctypes)return None", "docstring": "Read data synchronous from an ADS-device.\n\n :param int index_group: PLC storage area, according to the INDEXGROUP\n constants\n :param int index_offset: PLC storage address\n :param int plc_datatype: type of the data given to the PLC, according\n to PLCTYPE constants\n :return: value: **value**\n :param bool return_ctypes: return ctypes instead of python types if True\n (default: False)", "id": "f13258:c0:m11"} {"signature": "def read_by_name(self, data_name, plc_datatype, return_ctypes=False):", "body": "if self._port:return adsSyncReadByNameEx(self._port, self._adr, data_name, plc_datatype, return_ctypes)return None", "docstring": "Read data synchronous from an ADS-device from data name.\n\n :param string data_name: data name\n :param int plc_datatype: type of the data given to the PLC, according\n to PLCTYPE constants\n :return: value: **value**\n :param bool return_ctypes: return ctypes instead of python types if True\n (default: False)", "id": "f13258:c0:m12"} {"signature": "def write_by_name(self, data_name, value, plc_datatype):", "body": "if self._port:return adsSyncWriteByNameEx(self._port, self._adr, data_name, value, plc_datatype)", "docstring": "Send data synchronous to an ADS-device from data name.\n\n :param string data_name: PLC storage address\n :param value: value to write to the storage address of the PLC\n :param int plc_datatype: type of the data given to the PLC,\n according to PLCTYPE constants", "id": "f13258:c0:m13"} {"signature": "def add_device_notification(self, data_name, attr, callback, user_handle=None):", "body": "if self._port is not None:notification_handle, user_handle = adsSyncAddDeviceNotificationReqEx(self._port, self._adr, data_name, attr, callback, user_handle)return notification_handle, user_handlereturn None", "docstring": "Add a device notification.\n\n :param str data_name: PLC storage address\n :param pyads.structs.NotificationAttrib attr: object that contains\n all the attributes for the definition of a notification\n :param callback: callback function that gets executed on in the event\n of a notification\n\n :rtype: (int, int)\n :returns: notification handle, user handle\n\n Save the notification handle and the user handle on creating a\n notification if you want to be able to remove the notification\n later in your code.\n\n **Usage**:\n\n >>> import pyads\n >>> from ctypes import size_of\n >>>\n >>> # Connect to the local TwinCAT PLC\n >>> plc = pyads.Connection('127.0.0.1.1.1', 851)\n >>>\n >>> # Create callback function that prints the value\n >>> def mycallback(adr, notification, user):\n >>> contents = notification.contents\n >>> value = next(\n >>> map(int,\n >>> bytearray(contents.data)[0:contents.cbSampleSize])\n >>> )\n >>> print(value)\n >>>\n >>> with plc:\n >>> # Add notification with default settings\n >>> attr = pyads.NotificationAttrib(size_of(pyads.PLCTYPE_INT))\n >>>\n >>> hnotification, huser = plc.add_device_notification(\n >>> adr, attr, mycallback)\n >>>\n >>> # Remove notification\n >>> plc.del_device_notification(hnotification, huser)", "id": "f13258:c0:m14"} {"signature": "def del_device_notification(self, notification_handle, user_handle):", "body": "if self._port is not None:adsSyncDelDeviceNotificationReqEx(self._port, self._adr, notification_handle, user_handle)", "docstring": "Remove a device notification.\n\n :param notification_handle: address of the variable that contains\n the handle of the notification\n :param user_handle: user handle", "id": "f13258:c0:m15"} {"signature": "@propertydef is_open(self):", "body": "return self._open", "docstring": "Show the current connection state.\n\n :return: True if connection is open", "id": "f13258:c0:m16"} {"signature": "def set_timeout(self, ms):", "body": "if self._port is not None:adsSyncSetTimeoutEx(self._port, ms)", "docstring": "Set Timeout.", "id": "f13258:c0:m17"} {"signature": "def notification(self, plc_datatype=None):", "body": "def notification_decorator(func):def func_wrapper(notification, data_name):contents = notification.contentsdata = contents.datadata_size = contents.cbSampleSizedatatype_map = {PLCTYPE_BOOL: \"\",PLCTYPE_BYTE: \"\",PLCTYPE_DINT: \"\",PLCTYPE_DWORD: \"\",PLCTYPE_INT: \"\",PLCTYPE_LREAL: \"\",PLCTYPE_REAL: \"\",PLCTYPE_SINT: \"\",PLCTYPE_UDINT: \"\",PLCTYPE_UINT: \"\",PLCTYPE_USINT: \"\",PLCTYPE_WORD: \"\",} if plc_datatype == PLCTYPE_STRING:dest = (c_ubyte * data_size)()memmove(addressof(dest), addressof(data), data_size)value = bytearray(dest).split(b\"\", )[].decode(\"\")elif issubclass(plc_datatype, Structure):value = plc_datatype()fit_size = min(data_size, sizeof(value))memmove(addressof(value), addressof(data), fit_size)elif plc_datatype not in datatype_map:value = dataelse:value = struct.unpack(datatype_map[plc_datatype], bytearray(data)[:data_size])[]dt = filetime_to_dt(contents.nTimeStamp)return func(contents.hNotification, data_name, dt, value)return func_wrapperreturn notification_decorator", "docstring": "Decorate a callback function.\n\n **Decorator**.\n\n A decorator that can be used for callback functions in order to\n convert the data of the NotificationHeader into the fitting\n Python type.\n\n :param plc_datatype: The PLC datatype that needs to be converted. This can\n be any basic PLC datatype or a `ctypes.Structure`.\n\n The callback functions need to be of the following type:\n\n >>> def callback(handle, name, timestamp, value)\n\n * `handle`: the notification handle\n * `name`: the variable name\n * `timestamp`: the timestamp as datetime value\n * `value`: the converted value of the variable\n\n **Usage**:\n\n >>> import pyads\n >>>\n >>> plc = pyads.Connection('172.18.3.25.1.1', 851)\n >>>\n >>>\n >>> @plc.notification(pyads.PLCTYPE_STRING)\n >>> def callback(handle, name, timestamp, value):\n >>> print(handle, name, timestamp, value)\n >>>\n >>>\n >>> with plc:\n >>> attr = pyads.NotificationAttrib(20,\n >>> pyads.ADSTRANS_SERVERCYCLE)\n >>> handles = plc.add_device_notification('GVL.test', attr,\n >>> callback)\n >>> while True:\n >>> pass", "id": "f13258:c0:m18"} {"signature": "def __init__(self, stAdsVersion):", "body": "self.version = stAdsVersion.versionself.revision = stAdsVersion.revisionself.build = stAdsVersion.build", "docstring": "Create new AdsVersion object.\n\n :param pyads.constants.SAdsVersion stAdsVersion: ctypes structure\n with the version info", "id": "f13260:c1:m0"} {"signature": "def __init__(self, netid=None, port=None):", "body": "self._ams_addr = SAmsAddr()if netid is not None:self.netid = netidif port is not None:self.port = port", "docstring": "Create a new AmsAddr object by a given netid and port.\n\n :param netid: NetId of an ADS device\n :param port: port of an ADS device", "id": "f13260:c4:m0"} {"signature": "def toString(self):", "body": "return self.netid + \"\" + str(self._ams_addr.port)", "docstring": "Textual representation of the AMS address.\n\n :rtype: string\n :return: textual representation of the AMS adress", "id": "f13260:c4:m1"} {"signature": "@propertydef netid(self):", "body": "return ''.join(map(str, self._ams_addr.netId.b))", "docstring": "Netid of the AmsAddress.\n\n The Netid is always returned as a String. If the NetId is set\n it can be passed as a String or as a SAmsNetId struct.", "id": "f13260:c4:m2"} {"signature": "@propertydef port(self):", "body": "return self._ams_addr.port", "docstring": "Port of the AmsAddress object.", "id": "f13260:c4:m4"} {"signature": "def amsAddrStruct(self):", "body": "return self._ams_addr", "docstring": "Return the c-types structure SAmsAddr.", "id": "f13260:c4:m6"} {"signature": "def netIdStruct(self):", "body": "return self._ams_addr.netId", "docstring": "Return the c-types structure SAmsNetId.", "id": "f13260:c4:m7"} {"signature": "def setAdr(self, adrString):", "body": "self.netid = adrString", "docstring": "Set the AMS-adress according to the given IP-address.\n\n :type adrString: string\n :param adrString: ip-address of an ADS device", "id": "f13260:c4:m8"} {"signature": "def __repr__(self):", "body": "return ''.format(self.netid, self.port)", "docstring": "Return object name.", "id": "f13260:c4:m9"} {"signature": "def __init__(self, length, trans_mode=ADSTRANS_SERVERONCHA,max_delay=, cycle_time=):", "body": "self._attrib = SAdsNotificationAttrib()if length:self._attrib.cbLength = lengthif trans_mode:self._attrib.nTransMode = trans_modeif max_delay:self._attrib.nMaxDelay = int(max_delay * )if cycle_time:self._attrib.nCycleTime = int(cycle_time * )", "docstring": "Create a new NotificationAttrib object.\n\n :param int length: length of the data\n :param int trans_mode: transmission mode\n :param float max_delay: maximum delay in ms\n :param float cycle_time: cycle time in ms", "id": "f13260:c5:m0"} {"signature": "def notificationAttribStruct(self):", "body": "return self._attrib", "docstring": "Return the raw struct.", "id": "f13260:c5:m1"} {"signature": "@propertydef length(self):", "body": "return self._attrib.cbLength", "docstring": "Notification data length.", "id": "f13260:c5:m2"} {"signature": "@propertydef trans_mode(self):", "body": "return self._attrib.nTransMode", "docstring": "Mode of transmission.\n\n This can be one of the following:\n\n * ADSTRANS_NOTRANS\n * ADSTRANS_CLIENTCYCLE\n * ADSTRANS_CLIENT1REQ\n * ADSTRANS_SERVERCYCLE\n * ADSTRANS_SERVERONCHA", "id": "f13260:c5:m4"} {"signature": "@propertydef max_delay(self):", "body": "return self._attrib.nMaxDelay", "docstring": "Maximum allowed delay between notifications in ms.", "id": "f13260:c5:m6"} {"signature": "@propertydef cycle_time(self):", "body": "return self._attrib.nCycleTime", "docstring": "Notification cycle time in ms for cycle transmission mode.", "id": "f13260:c5:m8"} {"signature": "def __repr__(self):", "body": "return (''.format(self.length, self.trans_mode, self.max_delay,self.cycle_time))", "docstring": "Return object name.", "id": "f13260:c5:m10"} {"signature": "def router_function(fn):", "body": "@wraps(fn)def wrapper(*args, **kwargs):if platform_is_windows(): raise RuntimeError(\"\"\"\")return fn(*args, **kwargs)return wrapper", "docstring": "Raise a runtime error if on Win32 systems.\n\n Decorator.\n\n Decorator for functions that interact with the router for the Linux\n implementation of the ADS library.\n\n Unlike the Windows implementation which uses a separate router daemon,\n the Linux library manages AMS routing in-process. As such, routing must be\n configured programatically via. the provided API. These endpoints are\n invalid on Win32 systems, so an exception will be raised.", "id": "f13261:m0"} {"signature": "@router_functiondef adsAddRoute(net_id, ip_address):", "body": "add_route = _adsDLL.AdsAddRouteadd_route.restype = ctypes.c_longip_address_p = ctypes.c_char_p(ip_address.encode(\"\"))error_code = add_route(net_id, ip_address_p)if error_code:raise ADSError(error_code)", "docstring": "Establish a new route in the AMS Router.\n\n :param pyads.structs.SAmsNetId net_id: net id of routing endpoint\n :param str ip_address: ip address of the routing endpoint", "id": "f13261:m1"} {"signature": "@router_functiondef adsDelRoute(net_id):", "body": "delete_route = _adsDLL.AdsDelRoutedelete_route(net_id)", "docstring": "Remove existing route from the AMS Router.\n\n :param pyads.structs.SAmsNetId net_id: net id associated with the routing\n entry which is to be removed from the router.", "id": "f13261:m2"} {"signature": "def adsPortOpenEx():", "body": "port_open_ex = _adsDLL.AdsPortOpenExport_open_ex.restype = ctypes.c_longport = port_open_ex()if port == :raise RuntimeError(\"\")return port", "docstring": "Connect to the TwinCAT message router.\n\n :rtype: int\n :return: port number", "id": "f13261:m3"} {"signature": "def adsPortCloseEx(port):", "body": "port_close_ex = _adsDLL.AdsPortCloseExport_close_ex.restype = ctypes.c_longerror_code = port_close_ex(port)if error_code:raise ADSError(error_code)", "docstring": "Close the connection to the TwinCAT message router.", "id": "f13261:m4"} {"signature": "def adsGetLocalAddressEx(port):", "body": "get_local_address_ex = _adsDLL.AdsGetLocalAddressExams_address_struct = SAmsAddr()error_code = get_local_address_ex(port, ctypes.pointer(ams_address_struct))if error_code:raise ADSError(error_code)local_ams_address = AmsAddr()local_ams_address._ams_addr = ams_address_structreturn local_ams_address", "docstring": "Return the local AMS-address and the port number.\n\n :rtype: pyads.structs.AmsAddr\n :return: AMS-address", "id": "f13261:m5"} {"signature": "def adsSetLocalAddress(ams_netid):", "body": "set_local_address = _adsDLL.AdsSetLocalAddressset_local_address(ams_netid)", "docstring": "Change the local NetId.\n\n :param pyads.structs.SAmsNetId ams_netid: new AmsNetID\n :rtype: None", "id": "f13261:m6"} {"signature": "def adsSyncReadStateReqEx(port, address):", "body": "sync_read_state_request = _adsDLL.AdsSyncReadStateReqExams_address_pointer = ctypes.pointer(address.amsAddrStruct())ads_state = ctypes.c_int()ads_state_pointer = ctypes.pointer(ads_state)device_state = ctypes.c_int()device_state_pointer = ctypes.pointer(device_state)error_code = sync_read_state_request(port, ams_address_pointer, ads_state_pointer, device_state_pointer)if error_code:raise ADSError(error_code)return (ads_state.value, device_state.value)", "docstring": "Read the current ADS-state and the machine-state.\n\n Read the current ADS-state and the machine-state from the\n ADS-server.\n\n :param pyads.structs.AmsAddr address: local or remote AmsAddr\n :rtype: (int, int)\n :return: ads_state, device_state", "id": "f13261:m7"} {"signature": "def adsSyncReadDeviceInfoReqEx(port, address):", "body": "sync_read_device_info_request = _adsDLL.AdsSyncReadDeviceInfoReqExams_address_pointer = ctypes.pointer(address.amsAddrStruct())device_name_buffer = ctypes.create_string_buffer()device_name_pointer = ctypes.pointer(device_name_buffer)ads_version = SAdsVersion()ads_version_pointer = ctypes.pointer(ads_version)error_code = sync_read_device_info_request(port, ams_address_pointer, device_name_pointer, ads_version_pointer)if error_code:raise ADSError(error_code)return (device_name_buffer.value.decode(), AdsVersion(ads_version))", "docstring": "Read the name and the version number of the ADS-server.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr address: local or remote AmsAddr\n :rtype: string, AdsVersion\n :return: device name, version", "id": "f13261:m8"} {"signature": "def adsSyncWriteControlReqEx(port, address, ads_state, device_state, data, plc_data_type):", "body": "sync_write_control_request = _adsDLL.AdsSyncWriteControlReqExams_address_pointer = ctypes.pointer(address.amsAddrStruct())ads_state_c = ctypes.c_ulong(ads_state)device_state_c = ctypes.c_ulong(device_state)if plc_data_type == PLCTYPE_STRING:data = ctypes.c_char_p(data.encode(\"\"))data_pointer = datadata_length = len(data_pointer.value) + else:data = plc_data_type(data)data_pointer = ctypes.pointer(data)data_length = ctypes.sizeof(data)error_code = sync_write_control_request(port,ams_address_pointer,ads_state_c,device_state_c,data_length,data_pointer,)if error_code:raise ADSError(error_code)", "docstring": "Change the ADS state and the machine-state of the ADS-server.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr adr: local or remote AmsAddr\n :param int ads_state: new ADS-state, according to ADSTATE constants\n :param int device_state: new machine-state\n :param data: additional data\n :param int plc_data_type: plc datatype, according to PLCTYPE constants", "id": "f13261:m9"} {"signature": "def adsSyncWriteReqEx(port, address, index_group, index_offset, value, plc_data_type):", "body": "sync_write_request = _adsDLL.AdsSyncWriteReqExams_address_pointer = ctypes.pointer(address.amsAddrStruct())index_group_c = ctypes.c_ulong(index_group)index_offset_c = ctypes.c_ulong(index_offset)if plc_data_type == PLCTYPE_STRING:data = ctypes.c_char_p(value.encode(\"\"))data_pointer = data data_length = len(data_pointer.value) + else:if type(plc_data_type).__name__ == \"\":data = plc_data_type(*value)else:data = plc_data_type(value)data_pointer = ctypes.pointer(data)data_length = ctypes.sizeof(data)error_code = sync_write_request(port,ams_address_pointer,index_group_c,index_offset_c,data_length,data_pointer,)if error_code:raise ADSError(error_code)", "docstring": "Send data synchronous to an ADS-device.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr address: local or remote AmsAddr\n :param int indexGroup: PLC storage area, according to the INDEXGROUP\n constants\n :param int index_offset: PLC storage address\n :param value: value to write to the storage address of the PLC\n :param int plc_data_type: type of the data given to the PLC,\n according to PLCTYPE constants", "id": "f13261:m10"} {"signature": "def adsSyncReadWriteReqEx2(port,address,index_group,index_offset,read_data_type,value,write_data_type,return_ctypes=False,):", "body": "sync_read_write_request = _adsDLL.AdsSyncReadWriteReqEx2ams_address_pointer = ctypes.pointer(address.amsAddrStruct())index_group_c = ctypes.c_ulong(index_group)index_offset_c = ctypes.c_ulong(index_offset)if read_data_type == PLCTYPE_STRING:read_data = (STRING_BUFFER * PLCTYPE_STRING)()else:read_data = read_data_type()read_data_pointer = ctypes.pointer(read_data)read_length = ctypes.c_ulong(ctypes.sizeof(read_data))bytes_read = ctypes.c_ulong()bytes_read_pointer = ctypes.pointer(bytes_read)if write_data_type == PLCTYPE_STRING:write_data_pointer = ctypes.c_char_p(value.encode(\"\")) write_length = len(value) + else:if type(write_data_type).__name__ == \"\":write_data = write_data_type(*value)else:write_data = write_data_type(value)write_data_pointer = ctypes.pointer(write_data)write_length = ctypes.sizeof(write_data)err_code = sync_read_write_request(port,ams_address_pointer,index_group_c,index_offset_c,read_length,read_data_pointer,write_length,write_data_pointer,bytes_read_pointer,)if err_code:raise ADSError(err_code)if read_data_type != PLCTYPE_STRING and bytes_read.value != read_length.value:raise RuntimeError(\"\".format(read_length.value, bytes_read.value))if return_ctypes:return read_dataif read_data_type == PLCTYPE_STRING:return read_data.value.decode(\"\")if type(read_data_type).__name__ == \"\":return [i for i in read_data]if hasattr(read_data, \"\"):return read_data.valuereturn read_data", "docstring": "Read and write data synchronous from/to an ADS-device.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr address: local or remote AmsAddr\n :param int index_group: PLC storage area, according to the INDEXGROUP\n constants\n :param int index_offset: PLC storage address\n :param Type read_data_type: type of the data given to the PLC to respond to,\n according to PLCTYPE constants\n :param value: value to write to the storage address of the PLC\n :param Type write_data_type: type of the data given to the PLC, according to\n PLCTYPE constants\n :param bool return_ctypes: return ctypes instead of python types if True\n (default: False)\n :rtype: read_data_type\n :return: value: value read from PLC", "id": "f13261:m11"} {"signature": "def adsSyncReadReqEx2(port, address, index_group, index_offset, data_type, return_ctypes=False):", "body": "sync_read_request = _adsDLL.AdsSyncReadReqEx2ams_address_pointer = ctypes.pointer(address.amsAddrStruct())index_group_c = ctypes.c_ulong(index_group)index_offset_c = ctypes.c_ulong(index_offset)if data_type == PLCTYPE_STRING:data = (STRING_BUFFER * PLCTYPE_STRING)()else:data = data_type()data_pointer = ctypes.pointer(data)data_length = ctypes.c_ulong(ctypes.sizeof(data))bytes_read = ctypes.c_ulong()bytes_read_pointer = ctypes.pointer(bytes_read)error_code = sync_read_request(port,ams_address_pointer,index_group_c,index_offset_c,data_length,data_pointer,bytes_read_pointer,)if error_code:raise ADSError(error_code)if data_type != PLCTYPE_STRING and bytes_read.value != data_length.value:raise RuntimeError(\"\".format(data_length.value, bytes_read.value))if return_ctypes:return dataif data_type == PLCTYPE_STRING:return data.value.decode(\"\")if type(data_type).__name__ == \"\":return [i for i in data]if hasattr(data, \"\"):return data.valuereturn data", "docstring": "Read data synchronous from an ADS-device.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr address: local or remote AmsAddr\n :param int index_group: PLC storage area, according to the INDEXGROUP\n constants\n :param int index_offset: PLC storage address\n :param Type data_type: type of the data given to the PLC, according to\n PLCTYPE constants\n :param bool return_ctypes: return ctypes instead of python types if True\n (default: False)\n :rtype: data_type\n :return: value: **value**", "id": "f13261:m12"} {"signature": "def adsSyncReadByNameEx(port, address, data_name, data_type, return_ctypes=False):", "body": "handle = adsSyncReadWriteReqEx2(port,address,ADSIGRP_SYM_HNDBYNAME,,PLCTYPE_UDINT,data_name,PLCTYPE_STRING,)value = adsSyncReadReqEx2(port, address, ADSIGRP_SYM_VALBYHND, handle, data_type, return_ctypes)adsSyncWriteReqEx(port, address, ADSIGRP_SYM_RELEASEHND, , handle, PLCTYPE_UDINT)return value", "docstring": "Read data synchronous from an ADS-device from data name.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr address: local or remote AmsAddr\n :param string data_name: data name\n :param Type data_type: type of the data given to the PLC, according to\n PLCTYPE constants\n :param bool return_ctypes: return ctypes instead of python types if True\n (default: False)\n :rtype: data_type\n :return: value: **value**", "id": "f13261:m13"} {"signature": "def adsSyncWriteByNameEx(port, address, data_name, value, data_type):", "body": "handle = adsSyncReadWriteReqEx2(port,address,ADSIGRP_SYM_HNDBYNAME,,PLCTYPE_UDINT,data_name,PLCTYPE_STRING,)adsSyncWriteReqEx(port, address, ADSIGRP_SYM_VALBYHND, handle, value, data_type)adsSyncWriteReqEx(port, address, ADSIGRP_SYM_RELEASEHND, , handle, PLCTYPE_UDINT)", "docstring": "Send data synchronous to an ADS-device from data name.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr address: local or remote AmsAddr\n :param string data_name: PLC storage address\n :param value: value to write to the storage address of the PLC\n :param Type data_type: type of the data given to the PLC,\n according to PLCTYPE constants", "id": "f13261:m14"} {"signature": "def adsSyncAddDeviceNotificationReqEx(port, adr, data_name, pNoteAttrib, callback, user_handle=None):", "body": "global callback_storeif NOTEFUNC is None:raise TypeError(\"\")adsSyncAddDeviceNotificationReqFct = _adsDLL.AdsSyncAddDeviceNotificationReqExpAmsAddr = ctypes.pointer(adr.amsAddrStruct())hnl = adsSyncReadWriteReqEx2(port, adr, ADSIGRP_SYM_HNDBYNAME, , PLCTYPE_UDINT, data_name, PLCTYPE_STRING)nIndexGroup = ctypes.c_ulong(ADSIGRP_SYM_VALBYHND)nIndexOffset = ctypes.c_ulong(hnl)attrib = pNoteAttrib.notificationAttribStruct()pNotification = ctypes.c_ulong()nHUser = ctypes.c_ulong(hnl)if user_handle is not None:nHUser = ctypes.c_ulong(user_handle)adsSyncAddDeviceNotificationReqFct.argtypes = [ctypes.c_ulong,ctypes.POINTER(SAmsAddr),ctypes.c_ulong,ctypes.c_ulong,ctypes.POINTER(SAdsNotificationAttrib),NOTEFUNC,ctypes.c_ulong,ctypes.POINTER(ctypes.c_ulong),]adsSyncAddDeviceNotificationReqFct.restype = ctypes.c_longdef wrapper(addr, notification, user):return callback(notification, data_name)c_callback = NOTEFUNC(wrapper)err_code = adsSyncAddDeviceNotificationReqFct(port,pAmsAddr,nIndexGroup,nIndexOffset,ctypes.byref(attrib),c_callback,nHUser,ctypes.byref(pNotification),)if err_code:raise ADSError(err_code)callback_store[pNotification.value] = c_callbackreturn (pNotification.value, hnl)", "docstring": "Add a device notification.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr adr: local or remote AmsAddr\n :param string data_name: PLC storage address\n :param pyads.structs.NotificationAttrib pNoteAttrib: notification attributes\n :param callback: Callback function to handle notification\n :param user_handle: User Handle\n :rtype: (int, int)\n :returns: notification handle, user handle", "id": "f13261:m15"} {"signature": "def adsSyncDelDeviceNotificationReqEx(port, adr, notification_handle, user_handle):", "body": "adsSyncDelDeviceNotificationReqFct = _adsDLL.AdsSyncDelDeviceNotificationReqExpAmsAddr = ctypes.pointer(adr.amsAddrStruct())nHNotification = ctypes.c_ulong(notification_handle)err_code = adsSyncDelDeviceNotificationReqFct(port, pAmsAddr, nHNotification)callback_store.pop(notification_handle, None)if err_code:raise ADSError(err_code)adsSyncWriteReqEx(port, adr, ADSIGRP_SYM_RELEASEHND, , user_handle, PLCTYPE_UDINT)", "docstring": "Remove a device notification.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param pyads.structs.AmsAddr adr: local or remote AmsAddr\n :param int notification_handle: Notification Handle\n :param int user_handle: User Handle", "id": "f13261:m16"} {"signature": "def adsSyncSetTimeoutEx(port, nMs):", "body": "adsSyncSetTimeoutFct = _adsDLL.AdsSyncSetTimeoutExcms = ctypes.c_long(nMs)err_code = adsSyncSetTimeoutFct(port, cms)if err_code:raise ADSError(err_code)", "docstring": "Set Timeout.\n\n :param int port: local AMS port as returned by adsPortOpenEx()\n :param int nMs: timeout in ms", "id": "f13261:m17"} {"signature": "def __str__(self):", "body": "return \"\" + self.msg", "docstring": "Return text representation of the object.", "id": "f13261:c0:m1"} {"signature": "def dt_to_filetime(dt):", "body": "if (dt.tzinfo is None) or (dt.tzinfo.utcoffset(dt) is None):dt = dt.replace(tzinfo=utc)return EPOCH_AS_FILETIME + (timegm(dt.timetuple()) *HUNDREDS_OF_NANOSECONDS)", "docstring": "Convert a datetime to Microsoft filetime format.\n\n If the object is time zone-naive, it is forced to UTC before conversion.\n\n >>> \"%.0f\" % dt_to_filetime(datetime(2009, 7, 25, 23, 0))\n '128930364000000000'\n >>> dt_to_filetime(datetime(1970, 1, 1, 0, 0, tzinfo=utc))\n 116444736000000000L\n >>> dt_to_filetime(datetime(1970, 1, 1, 0, 0))\n 116444736000000000L", "id": "f13262:m0"} {"signature": "def filetime_to_dt(ft):", "body": "return datetime.utcfromtimestamp((ft - EPOCH_AS_FILETIME) /HUNDREDS_OF_NANOSECONDS)", "docstring": "Convert a Microsoft filetime number to a Python datetime.\n\n The new datetime object is time zone-naive but is equivalent to tzinfo=utc.\n\n >>> filetime_to_dt(116444736000000000)\n datetime.datetime(1970, 1, 1, 0, 0)\n >>> filetime_to_dt(128930364000000000)\n datetime.datetime(2009, 7, 25, 23, 0)", "id": "f13262:m1"} {"signature": "def utcoffset(self, dt):", "body": "return ZERO", "docstring": "Return offset of localtime from UTC time.", "id": "f13262:c0:m0"} {"signature": "def tzname(self, dt):", "body": "return \"\"", "docstring": "Return name of the timezone.", "id": "f13262:c0:m1"} {"signature": "def dst(self, dt):", "body": "return ZERO", "docstring": "Return daylight savings time.", "id": "f13262:c0:m2"} {"signature": "def platform_is_linux():", "body": "return sys.platform.startswith('') orsys.platform.startswith('')", "docstring": "Return True if current platform is Linux or Mac OS.", "id": "f13263:m0"} {"signature": "def platform_is_windows():", "body": "return sys.platform == ''", "docstring": "Return True if current platform is Windows.", "id": "f13263:m1"} {"signature": "def remove_binaries():", "body": "patterns = (\"\",\"\",\"\",\"\",\"\",)for f in functools.reduce(operator.iconcat, [glob.glob(p) for p in patterns]):os.remove(f)", "docstring": "Remove all binary files in the adslib directory.", "id": "f13265:m5"} {"signature": "def get_child_models(self):", "body": "child_models = []for related_object in get_all_related_objects(self.base_model._meta):model = getattr(related_object, '', related_object.model)if issubclass(model, self.base_model):class SettingValueAdmin(self.base_admin_class):passchild_models.append((model, SettingValueAdmin))return child_models", "docstring": "Returns a list of ``(Model, ModelAdmin)`` tuples for ``base_model``\nsubclasses.", "id": "f13266:c0:m0"} {"signature": "def get_value(self, obj):", "body": "return obj.value", "docstring": "Returns the ``value`` field from the child model.", "id": "f13266:c0:m1"} {"signature": "def get_value(self, context, default):", "body": "if default is None:settings = self.setting_model.objects.as_dict()else:settings = self.setting_model.objects.as_dict(default=default)return settings", "docstring": "Returns a ``SettingDict`` object.", "id": "f13269:c0:m0"} {"signature": "def get_value(self, context, name, default):", "body": "settings = self.setting_model.objects.filter(name=name)if default is None:settings = settings.as_dict()else:settings = settings.as_dict(default=default)value = settings[name]return value", "docstring": "Returns the value of the named setting.", "id": "f13269:c1:m0"} {"signature": "def render_tag(self, context, name, nodelist):", "body": "settings = self.setting_model.objects.filter(name=name).as_dict()try:value = settings[name]except KeyError:value = settings[name] = nodelist.render(context)return value", "docstring": "Returns the value of the named setting.", "id": "f13269:c2:m0"} {"signature": "def settings(request):", "body": "settings = Setting.objects.all().as_dict(default='')context = {'': settings,}return context", "docstring": "Adds a ``SettingDict`` object for the ``Setting`` model to the context as\n``SETTINGS``. Automatically creates non-existent settings with an empty\nstring as the default value.", "id": "f13275:m0"} {"signature": "def __init__(self, model=None, queryset=None, default=None):", "body": "if model:self.model = modelself.queryset = model.objects.all()elif queryset is not None: if model:raise ValueError('')self.model = queryset.modelself.queryset = querysetelse:raise ValueError('')self.default = defaultself.empty_cache = Truesuper(SettingDict, self).__init__()", "docstring": "All setting values for the given model or queryset will be lazily\nloaded into the cache on first access.\n\nIf ``default`` is not ``None``, non-existent settings will be created\non access.", "id": "f13278:c0:m0"} {"signature": "def __delitem__(self, key):", "body": "if self.empty_cache:self.refresh()deleted = self.model.objects.filter(name=key).delete()try:super(SettingDict, self).__delitem__(key)except KeyError:if not deleted:raise", "docstring": "Deletes a setting from the dict and the database.", "id": "f13278:c0:m1"} {"signature": "def __getitem__(self, key):", "body": "if self.empty_cache:self.refresh()try:value = super(SettingDict, self).__getitem__(key)except KeyError:try:value = self.model.objects.get(name=key).valueexcept self.model.DoesNotExist:if self.default is None:raise KeyError(key)value = self.defaultself.model.objects.create(name=key, value=value)super(SettingDict, self).__setitem__(key, value)return value", "docstring": "Returns the setting value for ``key`` from the cache if possible,\notherwise from the database. Adds values that are fetched from the\ndatabase to the cache.", "id": "f13278:c0:m2"} {"signature": "def __setitem__(self, key, value):", "body": "if self.empty_cache:self.refresh()with transaction.atomic():self.model.objects.filter(name=key).delete()self.model.objects.create(name=key, value=value)super(SettingDict, self).__setitem__(key, value)", "docstring": "Tries to delete and then creates a setting, in case the value type has\nchanged. Otherwise, we would need to get, update (if same type), or\ndelete and create (if not same type).", "id": "f13278:c0:m3"} {"signature": "def refresh(self):", "body": "args = [(obj.name, obj.value) for obj in self.queryset.all()]super(SettingDict, self).update(args)self.empty_cache = False", "docstring": "Updates the cache with setting values from the database.", "id": "f13278:c0:m4"} {"signature": "def as_dict(self, default=None):", "body": "settings = SettingDict(queryset=self, default=default)return settings", "docstring": "Returns a ``SettingDict`` object for this queryset.", "id": "f13279:c0:m0"} {"signature": "def create(self, name, value):", "body": "if value is None:raise ValueError('')model = Setting.get_model_for_value(value)obj = super(SettingQuerySet, model.objects.all()).create(name=name, value=value)return obj", "docstring": "Creates and returns an object of the appropriate type for ``value``.", "id": "f13279:c0:m1"} {"signature": "@classmethoddef get_model_for_value(cls, value):", "body": "for related_object in get_all_related_objects(cls._meta):model = getattr(related_object, '', related_object.model)if issubclass(model, cls):if model.is_compatible(value):return modelraise ValueError('' % value)", "docstring": "Iterates through setting value subclasses, returning one that is\ncompatible with the type of ``value``. Calls ``is_compatible()`` on\neach subclass.", "id": "f13279:c2:m0"} {"signature": "@classmethoddef is_compatible(cls, value):", "body": "if not hasattr(cls, ''):raise NotImplementedError('''')return isinstance(value, cls.value_type)", "docstring": "Returns ``True`` if this model should be used to store ``value``.\n\nChecks if ``value`` is an instance of ``value_type``. Override this\nmethod if you need more advanced behaviour. For example, to distinguish\nbetween single and multi-line text.", "id": "f13279:c3:m0"} {"signature": "async def trigger(self, event, data=None, socket_id=None):", "body": "json_data = json.dumps(data, cls=self.pusher.encoder)query_string = self.signed_query(event, json_data, socket_id)signed_path = \"\" % (self.path, query_string)pusher = self.pusherabsolute_url = pusher.get_absolute_path(signed_path)response = await pusher.http.post(absolute_url, data=json_data,headers=[('', '')])response.raise_for_status()return response.status_code == ", "docstring": "Trigger an ``event`` on this channel", "id": "f13281:c1:m1"} {"signature": "def bind(self, event, callback):", "body": "self._registered_callbacks[event] = callbackreturn True", "docstring": "Bind to an ``event`` in this channel. The ``callback`` function is\n executed every time the event is triggered.", "id": "f13281:c1:m2"} {"signature": "async def connect(self):", "body": "if not self._consumer:waiter = self._waiter = asyncio.Future()try:address = self._websocket_host()self.logger.info('', address)self._consumer = await self.http.get(address)if self._consumer.status_code != :raise PusherError(\"\")except Exception as exc:waiter.set_exception(exc)raiseelse:await waiterreturn self._consumer", "docstring": "Connect to a Pusher websocket", "id": "f13281:c2:m2"} {"signature": "def on_message(self, websocket, message):", "body": "waiter = self._waiterself._waiter = Noneencoded = json.loads(message)event = encoded.get('')channel = encoded.get('')data = json.loads(encoded.get(''))try:if event == PUSHER_ERROR:raise PusherError(data[''], data[''])elif event == PUSHER_CONNECTION:self.socket_id = data.get('')self.logger.info('',self.socket_id)waiter.set_result(self.socket_id)elif event == PUSHER_SUBSCRIBED:self.logger.info('',encoded.get(''))elif channel:self[channel]._event(event, data)except Exception as exc:if waiter:waiter.set_exception(exc)else:self.logger.exception('')", "docstring": "Handle websocket incoming messages", "id": "f13281:c2:m5"} {"signature": "@propertydef http_session(self):", "body": "return self.endpoint.http_session", "docstring": "HTTP session object", "id": "f13282:c0:m3"} {"signature": "@propertydef http_session(self):", "body": "return self.client.http_session", "docstring": "HTTP session object", "id": "f13282:c2:m1"} {"signature": "def get_git_changeset(filename=None):", "body": "dirname = os.path.dirname(filename or __file__)git_show = sh('',cwd=dirname)timestamp = git_show.partition('')[]try:timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))except ValueError:return Nonereturn timestamp.strftime('')", "docstring": "Returns a numeric identifier of the latest git changeset.\n\n The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\n This value isn't guaranteed to be unique, but collisions are very unlikely,\n so it's sufficient for generating the development version numbers.", "id": "f13283:m2"} {"signature": "async def upload_file(self, bucket, file, uploadpath=None, key=None,ContentType=None, **kw):", "body": "is_filename = Falseif hasattr(file, ''):if hasattr(file, ''):file.seek()file = file.read()size = len(file)elif key:size = len(file)else:is_filename = Truesize = os.stat(file).st_sizekey = os.path.basename(file)assert key, ''if not ContentType:ContentType, _ = mimetypes.guess_type(key)if uploadpath:if not uploadpath.endswith(''):uploadpath = '' % uploadpathkey = '' % (uploadpath, key)params = dict(Bucket=bucket, Key=key)if not ContentType:ContentType = ''params[''] = ContentTypeif size > MULTI_PART_SIZE and is_filename:resp = await _multipart(self, file, params)elif is_filename:with open(file, '') as fp:params[''] = fp.read()resp = await self.put_object(**params)else:params[''] = fileresp = await self.put_object(**params)if '' not in resp:resp[''] = keyif '' not in resp:resp[''] = bucketreturn resp", "docstring": "Upload a file to S3 possibly using the multi-part uploader\n Return the key uploaded", "id": "f13284:c0:m0"} {"signature": "async def copy_storage_object(self, source_bucket, source_key,bucket, key):", "body": "info = await self.head_object(Bucket=source_bucket, Key=source_key)size = info['']if size > MULTI_PART_SIZE:result = await _multipart_copy(self, source_bucket, source_key,bucket, key, size)else:result = await self.copy_object(Bucket=bucket, Key=key,CopySource=_source_string(source_bucket, source_key))return result", "docstring": "Copy a file from one bucket into another", "id": "f13284:c0:m1"} {"signature": "def upload_folder(self, bucket, folder, key=None, skip=None,content_types=None):", "body": "uploader = FolderUploader(self, bucket, folder, key, skip,content_types)return uploader.start()", "docstring": "Recursively upload a ``folder`` into a backet.\n\n :param bucket: bucket where to upload the folder to\n :param folder: the folder location in the local file system\n :param key: Optional key where the folder is uploaded\n :param skip: Optional list of files to skip\n :param content_types: Optional dictionary mapping suffixes to\n content types\n :return: a coroutine", "id": "f13284:c0:m2"} {"signature": "async def _upload_file(self, full_path):", "body": "rel_path = os.path.relpath(full_path, self.folder)key = s3_key(os.path.join(self.key, rel_path))ct = self.content_types.get(key.split('')[-])with open(full_path, '') as fp:file = fp.read()try:await self.botocore.upload_file(self.bucket, file, key=key,ContentType=ct)except Exception as exc:LOGGER.error('', key, exc)self.failures[key] = self.all.pop(full_path)returnsize = self.all.pop(full_path)self.success[key] = sizeself.total_size += sizepercentage = *( - len(self.all)/self.total_files)message = ''.format(percentage, key, convert_bytes(size))LOGGER.info(message)", "docstring": "Coroutine for uploading a single file", "id": "f13284:c1:m3"} {"signature": "def get_paginator(self, operation_name):", "body": "if not self.can_paginate(operation_name):raise OperationNotPageableError(operation_name=operation_name)else:actual_operation_name = self._PY_TO_OP_NAME[operation_name]Paginator.PAGE_ITERATOR_CLS = AsyncPageIteratorpaginator = Paginator(getattr(self, operation_name),self._cache[''][actual_operation_name])return paginator", "docstring": "Create a paginator for an operation.\n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is ``create_foo``, and you'd normally invoke the\n operation as ``client.create_foo(**kwargs)``, if the\n ``create_foo`` operation can be paginated, you can use the\n call ``client.get_paginator(\"create_foo\")``.\n :raise OperationNotPageableError: Raised if the operation is not\n pageable. You can use the ``client.can_paginate`` method to\n check if an operation is pageable.\n :rtype: L{botocore.paginate.Paginator}\n :return: A paginator object.", "id": "f13289:c1:m3"} {"signature": "def get_session(env_vars=None):", "body": "return AsyncSession(session_vars=env_vars)", "docstring": "Return a new session object.", "id": "f13292:m0"} {"signature": "def get_cache_key(user_or_username, size, prefix):", "body": "if isinstance(user_or_username, get_user_model()):user_or_username = user_or_username.usernamereturn '' % (prefix, user_or_username, size)", "docstring": "Returns a cache key consisten of a username and image size.", "id": "f13303:m0"} {"signature": "def cache_result(func):", "body": "def cache_set(key, value):cache.set(key, value, AVATAR_CACHE_TIMEOUT)return valuedef cached_func(user, size):prefix = func.__name__cached_funcs.add(prefix)key = get_cache_key(user, size, prefix=prefix)return cache.get(key) or cache_set(key, func(user, size))return cached_func", "docstring": "Decorator to cache the result of functions that take a ``user`` and a\n``size`` value.", "id": "f13303:m1"} {"signature": "def invalidate_cache(user, size=None):", "body": "sizes = set(AUTO_GENERATE_AVATAR_SIZES)if size is not None:sizes.add(size)for prefix in cached_funcs:for size in sizes:cache.delete(get_cache_key(user, size, prefix))", "docstring": "Function to be called when saving or changing an user's avatars.", "id": "f13303:m2"} {"signature": "@cache_result@register.simple_tagdef primary_avatar(user, size=AVATAR_DEFAULT_SIZE):", "body": "alt = unicode(user)url = reverse('', kwargs={'' : user, '' : size})return \"\"\"\"\"\" % (url, alt,)", "docstring": "This tag tries to get the default avatar for a user without doing any db\nrequests. It achieve this by linking to a special view that will do all the \nwork for us. If that special view is then cached by a CDN for instance,\nwe will avoid many db calls.", "id": "f13306:m2"} {"signature": "def _get_next(request):", "body": "next = request.POST.get('', request.GET.get('',request.META.get('', None)))if not next:next = request.pathreturn next", "docstring": "The part that's the least straightforward about views in this module is how they\ndetermine their redirects after they have finished computation.\n\nIn short, they will try and determine the next place to go in the following order:\n\n1. If there is a variable named ``next`` in the *POST* parameters, the view will\nredirect to that variable's value.\n2. If there is a variable named ``next`` in the *GET* parameters, the view will\nredirect to that variable's value.\n3. If Django can determine the previous page from the HTTP headers, the view will\nredirect to that previous page.", "id": "f13309:m0"} {"signature": "def start(self):", "body": "self.receiver = self.Receiver(self.read,self.write,self.send_lock,self.senders,self.frames_received,callback=self.receive_callback,fcs_nack=self.fcs_nack,)self.receiver.start()", "docstring": "Starts HDLC controller's threads.", "id": "f13317:c0:m1"} {"signature": "def stop(self):", "body": "if self.receiver != None:self.receiver.join()for s in self.senders.values():s.join()", "docstring": "Stops HDLC controller's threads.", "id": "f13317:c0:m2"} {"signature": "def set_send_callback(self, callback):", "body": "if not hasattr(callback, ''):raise TypeError('')self.send_callback = callback", "docstring": "Sets the send callback function.\n\nIf the HDLC controller has already been started, the new\ncallback function will be taken into account for the next\ndata frames to be sent.", "id": "f13317:c0:m3"} {"signature": "def set_receive_callback(self, callback):", "body": "if not hasattr(callback, ''):raise TypeError('')self.receive_callback = callback", "docstring": "Sets the receive callback function.\n\nThis method has to be called before starting the\nHDLC controller.", "id": "f13317:c0:m4"} {"signature": "def set_sending_timeout(self, sending_timeout):", "body": "if sending_timeout >= HDLController.MIN_SENDING_TIMEOUT:self.sending_timeout = sending_timeout", "docstring": "Sets the sending timeout.", "id": "f13317:c0:m5"} {"signature": "def get_senders_number(self):", "body": "return len(self.senders)", "docstring": "Returns the number of active senders.", "id": "f13317:c0:m6"} {"signature": "def send(self, data):", "body": "while len(self.senders) >= self.window:passself.senders[self.new_seq_no] = self.Sender(self.write,self.send_lock,data,self.new_seq_no,timeout=self.sending_timeout,callback=self.send_callback,)self.senders[self.new_seq_no].start()self.new_seq_no = (self.new_seq_no + ) % HDLController.MAX_SEQ_NO", "docstring": "Sends a new data frame.\n\nThis method will block until a new room is available for\na new sender. This limit is determined by the size of the window.", "id": "f13317:c0:m7"} {"signature": "def get_data(self):", "body": "return self.frames_received.get()", "docstring": "Gets the next frame received.\n\nThis method will block until a new data frame is available.", "id": "f13317:c0:m8"} {"signature": "def make_request(self, url, method='', headers=None,data=None, callback=None, errors=STRICT,verify=False, timeout=None, **params):", "body": "if '' not in url.lower():params.update({'': self.api_key})return super(Flights, self).make_request(url, method, headers,data, callback, errors,verify, **params)", "docstring": "Call the `make_request` method from apiwrapper.\nSo we can inject the apikey when it's not available.", "id": "f13321:c0:m1"} {"signature": "def create_session(self, **params):", "body": "service_url = self.PRICING_SESSION_URLreturn self.make_request(service_url,method='',headers=self._headers(),callback=lambda resp: resp.headers[''],data=params)", "docstring": "Create the session\ndate format: YYYY-mm-dd\nlocation: ISO code.\nAfter creating the session,\nthis method will return the poll_url.", "id": "f13321:c0:m2"} {"signature": "def get_result(self, errors=STRICT, **params):", "body": "service_url = self.create_session(**params)return self.poll(service_url, errors=errors)", "docstring": "Get all results, no filtering,\netc. by creating and polling the session.", "id": "f13321:c0:m3"} {"signature": "def make_request(self, url, method='', headers=None, data=None,callback=None, errors=STRICT, verify=False, timeout=None, **params):", "body": "error_modes = (STRICT, GRACEFUL, IGNORE)error_mode = errors or GRACEFULif error_mode.lower() not in error_modes:raise ValueError(''% ''.join(error_modes))if callback is None:callback = self._default_resp_callbackrequest = getattr(requests, method.lower())log.debug('' % url)log.debug('' % method)log.debug('' % params)log.debug('' % headers)log.debug('' % timeout)r = request(url, headers=headers, data=data, verify=verify, timeout=timeout, params=params)log.debug('' % r.url)try:r.raise_for_status()return callback(r)except Exception as e:return self._with_error_handling(r, e,error_mode, self.response_format)", "docstring": "Reusable method for performing requests.\n:param url - URL to request\n:param method - request method, default is 'get'\n:param headers - request headers\n:param data - post data\n:param callback - callback to be applied to response,\n default callback will parse response as json object.\n:param errors - specifies communication errors handling mode, possible\n values are:\n * strict (default) - throw an error as soon as one\n occurred\n * graceful - ignore certain errors, e.g. EmptyResponse\n * ignore - ignore all errors and return a result in\n any case.\n NOTE that it DOES NOT mean that no\n exceptions can be\n raised from this method, it mostly ignores\n communication\n related errors.\n * None or empty string equals to default\n:param verify - whether or not to verify SSL cert, default to False\n:param timeout - the timeout of the request in second, default to None\n:param params - additional query parameters for request", "id": "f13322:c5:m2"} {"signature": "@staticmethoddef _with_error_handling(resp, error, mode, response_format):", "body": "def safe_parse(r):try:return APIWrapper._parse_resp(r, response_format)except (ValueError, SyntaxError) as ex:log.error(ex)r.parsed = Nonereturn rif isinstance(error, requests.HTTPError):if resp.status_code == :resp = safe_parse(resp)if resp.parsed is not None:parsed_resp = resp.parsedmessages = []if response_format == '' andparsed_resp.find('') is not None:messages = [e.find('').textfor e in parsed_resp.findall('')]elif response_format == '' and '' in parsed_resp:messages = [e['']for e in parsed_resp['']]error = requests.HTTPError('' % (error, ''.join(messages)), response=resp)elif resp.status_code == :error = requests.HTTPError('' % error,response=resp)if STRICT == mode:raise errorelif GRACEFUL == mode:if isinstance(error, EmptyResponse):log.warning(error)resp.parsed = Nonereturn respelif isinstance(error, requests.HTTPError):if resp.status_code == : log.warning(error)return safe_parse(resp)else:raise errorelse:raise errorelse:log.error(error)return safe_parse(resp)", "docstring": "Static method for error handling.\n\n:param resp - API response\n:param error - Error thrown\n:param mode - Error mode\n:param response_format - XML or json", "id": "f13322:c5:m5"} {"signature": "def poll(self, url, initial_delay=, delay=, tries=, errors=STRICT, is_complete_callback=None, **params):", "body": "time.sleep(initial_delay)poll_response = Noneif is_complete_callback == None:is_complete_callback = self._default_poll_callbackfor n in range(tries):poll_response = self.make_request(url, headers=self._headers(),errors=errors, **params)if is_complete_callback(poll_response):return poll_responseelse:time.sleep(delay)if STRICT == errors:raise ExceededRetries(\"\".format(tries))else:return poll_response", "docstring": "Poll the URL\n:param url - URL to poll, should be returned by 'create_session' call\n:param initial_delay - specifies how many seconds to wait before the first poll\n:param delay - specifies how many seconds to wait between the polls\n:param tries - number of polls to perform\n:param errors - errors handling mode, see corresponding parameter in 'make_request' method\n:param params - additional query params for each poll request", "id": "f13322:c5:m6"} {"signature": "def _default_poll_callback(self, poll_resp):", "body": "if poll_resp.parsed is None:return Falsesuccess_list = ['', True, '']status = Noneif self.response_format == '':status = poll_resp.parsed.find('').textelif self.response_format == '':status = poll_resp.parsed.get('', poll_resp.parsed.get(''))if status is None:raise RuntimeError('')return status in success_list", "docstring": "Checks the condition in poll response to determine if it is complete\nand no subsequent poll requests should be done.", "id": "f13322:c5:m7"} {"signature": "def cls_factory(cname):", "body": "class Foo(MetaTest):\"\"\"\"\"\"passFoo.__name__ = cnamereturn Foo", "docstring": "Constructs a class named `cname` inheriting from `MetaTest`.", "id": "f13326:m0"} {"signature": "def produce_classes(search_path, suite_name):", "body": "classes = {}suite_fname = \"\" % suite_namesuite_path = os.sep.join([CIJ_ROOT, \"\", suite_fname])suite = open(suite_path).read().splitlines() if suite_name else []listing = tindex(search_path)if listing is None:return {}tests = suite if suite else listingif not tests:return {}cname = \"\"if suite:cname += suite_name.capitalize()cls = cls_factory(cname)for test in tests: tname = \"\" % test.replace(\"\", \"\").replace(\"\", \"\")setattr(cls, tname, cls.meta_test)classes[cname] = clsreturn classes", "docstring": "Construct a class with a name picked up by test-collection", "id": "f13326:m1"} {"signature": "def get_descriptor_table(version=\"\"):", "body": "if version == \"\":return DescriptorTableDenalielif version == \"\":return DescriptorTableSpec20elif version == \"\":return Noneelse:raise RuntimeError(\"\")", "docstring": "Get descriptor table by version(denali, spec20)", "id": "f13328:m0"} {"signature": "def get_sizeof_descriptor_table(version=\"\"):", "body": "if version == \"\":return sizeof(DescriptorTableDenali)elif version == \"\":return sizeof(DescriptorTableSpec20)elif version == \"\":return else:raise RuntimeError(\"\")", "docstring": "Get sizeof DescriptorTable", "id": "f13328:m1"} {"signature": "def cat_file(path):", "body": "cmd = [\"\", path]status, stdout, _ = cij.ssh.command(cmd, shell=True, echo=True)if status:raise RuntimeError(\"\" % path)return stdout.strip()", "docstring": "Cat file and return content", "id": "f13330:m0"} {"signature": "def env():", "body": "if cij.ssh.env():cij.err(\"\")return nvme = cij.env_to_dict(PREFIX, REQUIRED)nvme[\"\"] = os.path.join(\"\", nvme[\"\"])try:sysfs = os.path.join(\"\", nvme[\"\"], \"\")nvme[\"\"] = cat_file(os.path.join(sysfs, \"\"))if nvme[\"\"] == \"\":luns = \"\"chs = \"\"elif nvme[\"\"] == \"\":luns = \"\"chs = \"\"else:raise RuntimeError(\"\" % nvme[\"\"])nvme[\"\"] = cat_file(os.path.join(sysfs, \"\"))nvme[\"\"] = cat_file(os.path.join(sysfs, luns))nvme[\"\"] = cat_file(os.path.join(sysfs, chs))nvme[\"\"] = str(int(nvme[\"\"]) * int(nvme[\"\"]))nvme[\"\"] = str(int(nvme[\"\"]) * int(nvme[\"\"]))if nvme[\"\"] == \"\":cmd = [\"\", \"\", nvme[\"\"], \"\"]status, stdout, _ = cij.ssh.command(cmd, shell=True)if status:raise RuntimeError(\"\")buff = cij.bin.Buffer(types=IdentifyCDS, length=)buff.memcopy(stdout)if buff[].VS[] == :nvme[\"\"] = \"\"else:nvme[\"\"] = \"\"else:nvme[\"\"] = \"\"nvme[\"\"] = str(get_sizeof_descriptor_table(nvme[\"\"]))nvme[\"\"] = str(int(nvme[\"\"]) *int(nvme[\"\"]))except StandardError:traceback.print_exc()return cij.env_export(PREFIX, EXPORTED, nvme)return ", "docstring": "Verify NVME variables and construct exported variables", "id": "f13330:m1"} {"signature": "def fmt(lbaf=):", "body": "if env():cij.err(\"\")return nvme = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)cmd = [\"\", \"\", nvme[\"\"], \"\", str(lbaf)]rcode, _, _ = cij.ssh.command(cmd, shell=True)return rcode", "docstring": "Do format for NVMe device", "id": "f13330:m2"} {"signature": "def exists():", "body": "if env():cij.err(\"\")return nvme = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)cmd = [\"\", \"\", nvme[\"\"], \"\"]rcode, _, _ = cij.ssh.command(cmd, shell=True, echo=False)if rcode:return Falsereturn True", "docstring": "Verify that the ENV defined NVMe device exists", "id": "f13330:m3"} {"signature": "def get_meta(offset, length, output):", "body": "if env():cij.err(\"\")return nvme = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)max_size = with open(output, \"\") as fout:for off in range(offset, length, max_size):size = min(length - off, max_size)cmd = [\"\",nvme[\"\"],\"\",\"\" % off,\"\" % size,\"\"]status, stdout, _ = cij.ssh.command(cmd, shell=True)if status:cij.err(\"\")return fout.write(stdout)return ", "docstring": "Get chunk meta of NVMe device", "id": "f13330:m4"} {"signature": "def comp_meta(file_bef, file_aft, mode=\"\"):", "body": "if env():cij.err(\"\")return nvme = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)num_chk = int(nvme[\"\"])meta_bef = cij.bin.Buffer(types=get_descriptor_table(nvme['']), length=num_chk)meta_aft = cij.bin.Buffer(types=get_descriptor_table(nvme['']), length=num_chk)meta_bef.read(file_bef)meta_aft.read(file_aft)for chk in range(num_chk):ignore = [\"\", \"\"]if mode == \"\" and meta_bef[chk].CS == :ignore.append(\"\")if meta_bef.compare(meta_aft, chk, ignore=ignore):cij.warn(\"\" % chk)meta_bef.dump(chk)cij.warn(\"\" % chk)meta_aft.dump(chk)cij.err(\"\" % chk)return return ", "docstring": "Compare chunk meta, mode=[pfail, power, reboot]", "id": "f13330:m5"} {"signature": "def env():", "body": "if cij.ssh.env():cij.err(\"\")return nvm = cij.env_to_dict(PREFIX, REQUIRED)if \"\" in nvm[\"\"]:nvm[\"\"] = \"\" % nvm[\"\"]else:nvm[\"\"] = \"\" % nvm[\"\"]cij.env_export(PREFIX, EXPORTED, nvm)return ", "docstring": "Verify NVME variables and construct exported variables", "id": "f13331:m0"} {"signature": "def exists():", "body": "if env():cij.err(\"\")return nvm = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)cmd = ['' % nvm[\"\"]]rcode, _, _ = cij.ssh.command(cmd, shell=True, echo=False)return rcode", "docstring": "Verify that the ENV defined NVMe device exists", "id": "f13331:m1"} {"signature": "def dev_get_rprt(dev_name, pugrp=None, punit=None):", "body": "cmd = [\"\", \"\", dev_name]if not (pugrp is None and punit is None):cmd = [\"\", \"\", dev_name, str(pugrp), str(punit)]_, _, _, struct = cij.test.command_to_struct(cmd)if not struct:return Nonereturn struct[\"\"]", "docstring": "Get-log-page chunk information\n\nIf the pugrp and punit is set, then provide report only for that pugrp/punit\n\n@returns the first chunk in the given state if one exists, None otherwise", "id": "f13331:m2"} {"signature": "def dev_get_chunk(dev_name, state, pugrp=None, punit=None):", "body": "rprt = dev_get_rprt(dev_name, pugrp, punit)if not rprt:return Nonereturn next((d for d in rprt if d[\"\"] == state), None)", "docstring": "Get a chunk-descriptor for the first chunk in the given state.\n\nIf the pugrp and punit is set, then search only that pugrp/punit\n\n@returns the first chunk in the given state if one exists, None otherwise", "id": "f13331:m3"} {"signature": "@staticmethoddef get_chunk_status(chk, yml):", "body": "cs = yml[''][chk]['']return cs", "docstring": "Get item of chunk meta table", "id": "f13332:c0:m2"} {"signature": "def get_chunk_information(self, chk, lun, chunk_name):", "body": "cmd = [\"\", self.envs,\"\" % (chk, lun, chunk_name)]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "Get chunk information", "id": "f13332:c0:m3"} {"signature": "def is_bad_chunk(self, chk, yml):", "body": "cs = self.get_chunk_status(chk, yml)if cs >= :return Truereturn False", "docstring": "Check the chunk is offline or not", "id": "f13332:c0:m4"} {"signature": "def is_free_chunk(self, chk):", "body": "cs = self.get_chunk_status(chk)if cs & != :return Truereturn False", "docstring": "Check the chunk is free or not", "id": "f13332:c0:m5"} {"signature": "def is_closed_chunk(self, chk):", "body": "cs = self.get_chunk_status(chk)if cs & != :return Truereturn False", "docstring": "Check the chunk is free or not", "id": "f13332:c0:m6"} {"signature": "def is_open_chunk(self, chk):", "body": "cs = self.get_chunk_status(chk)if cs & != :return Truereturn False", "docstring": "Check the chunk is free or not", "id": "f13332:c0:m7"} {"signature": "def s20_to_gen(self, pugrp, punit, chunk, sectr):", "body": "cmd = [\"\", self.envs,\"\" % (pugrp, punit, chunk, sectr)]status, stdout, _ = cij.ssh.command(cmd, shell=True)if status:raise RuntimeError(\"\")return int(re.findall(r\"\", stdout)[], )", "docstring": "S20 unit to gen address", "id": "f13332:c0:m8"} {"signature": "def gen_to_dev(self, address):", "body": "cmd = [\"\", self.envs, \"\".format(address)]status, stdout, _ = cij.ssh.command(cmd, shell=True)if status:raise RuntimeError(\"\")return int(re.findall(r\"\", stdout)[], )", "docstring": "Generic address to device address", "id": "f13332:c0:m9"} {"signature": "def dev_to_gen(self, address):", "body": "cmd = [\"\", self.envs, \"\".format(address)]status, stdout, _ = cij.ssh.command(cmd, shell=True)if status:raise RuntimeError(\"\")return int(re.findall(r\"\", stdout)[], )", "docstring": "Generic address to device address", "id": "f13332:c0:m10"} {"signature": "def vblk_erase(self, address):", "body": "cmd = [\"\", self.envs, \"\" % address]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_vblk erase", "id": "f13332:c0:m11"} {"signature": "def vblk_write(self, address, file_name=None):", "body": "cmd = [\"\", self.envs, \"\" % address]if file_name:cmd += [\"\".format(file_name)]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_vblk write", "id": "f13332:c0:m12"} {"signature": "def vblk_read(self, address, file_name=None):", "body": "cmd = [\"\", self.envs, \"\" % address]if file_name:cmd += [\"\".format(file_name)]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_vblk read", "id": "f13332:c0:m13"} {"signature": "def vector_erase(self, address):", "body": "cmd = [\"\", self.envs, \"\" % address]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "vector erase", "id": "f13332:c0:m14"} {"signature": "def vector_write(self, address_list, file_name):", "body": "address = [\"\".format(i) for i in address_list]cmd = [\"\", self.envs, \"\".join(address), \"\".format(file_name)]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_cmd write", "id": "f13332:c0:m15"} {"signature": "def vector_read(self, address_list, file_name):", "body": "address = [\"\".format(i) for i in address_list]cmd = [\"\", self.envs, \"\".join(address),\"\".format(file_name)]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_cmd read", "id": "f13332:c0:m16"} {"signature": "def scalar_write(self, address, block_count, data_file, meta_file):", "body": "cmd = [\"\", \"\", self.envs, \"\".format(address),\"\".format(block_count-), \"\".format(data_file), \"\".format(meta_file),\"\".format(block_count * self.get_env(\"\", \"\")),\"\".format(block_count * self.get_env(\"\", \"\"))]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvme write", "id": "f13332:c0:m17"} {"signature": "def scalar_read(self, address, block_count, data_file, meta_file):", "body": "cmd = [\"\", \"\", self.envs, \"\".format(address),\"\".format(block_count - ), \"\".format(data_file),\"\".format(meta_file),\"\".format(block_count * self.get_env(\"\", \"\")),\"\".format(block_count * self.get_env(\"\", \"\"))]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvme read", "id": "f13332:c0:m18"} {"signature": "def slc_erase(self, address, BE_ID=, PMODE=):", "body": "cmd = [\"\" % BE_ID, \"\" % PMODE, \"\", self.envs, \"\" % address]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "slc erase", "id": "f13332:c0:m23"} {"signature": "def get_envs(self, key):", "body": "return self.envs[key]", "docstring": "Get environment of liblightnvm", "id": "f13334:c0:m1"} {"signature": "def get_chunk_meta(self, meta_file):", "body": "chunks = self.envs[\"\"]if cij.nvme.get_meta(, chunks * self.envs[\"\"], meta_file):raise RuntimeError(\"\")chunk_meta = cij.bin.Buffer(types=self.envs[\"\"], length=chunks)chunk_meta.read(meta_file)return chunk_meta", "docstring": "Get chunk meta table", "id": "f13334:c0:m2"} {"signature": "def get_chunk_meta_item(self, chunk_meta, grp, pug, chk):", "body": "num_chk = self.envs[\"\"]num_pu = self.envs[\"\"]index = grp * num_pu * num_chk + pug * num_chk + chkreturn chunk_meta[index]", "docstring": "Get item of chunk meta table", "id": "f13334:c0:m3"} {"signature": "def is_free_chunk(self, chunk_meta, grp, pug, chk):", "body": "meta = self.get_chunk_meta_item(chunk_meta, grp, pug, chk)if meta.CS & != :return Truereturn False", "docstring": "Check the chunk is free or not", "id": "f13334:c0:m4"} {"signature": "def is_closed_chunk(self, chunk_meta, grp, pug, chk):", "body": "meta = self.get_chunk_meta_item(chunk_meta, grp, pug, chk)if meta.CS & != :return Truereturn False", "docstring": "Check the chunk is free or not", "id": "f13334:c0:m5"} {"signature": "def is_open_chunk(self, chunk_meta, grp, pug, chk):", "body": "meta = self.get_chunk_meta_item(chunk_meta, grp, pug, chk)if meta.CS & != :return Truereturn False", "docstring": "Check the chunk is free or not", "id": "f13334:c0:m6"} {"signature": "def is_bad_chunk(self, chunk_meta, grp, pug, chk):", "body": "meta = self.get_chunk_meta_item(chunk_meta, grp, pug, chk)if meta.CS & != :return Truereturn False", "docstring": "Check the chunk is offline or not", "id": "f13334:c0:m7"} {"signature": "def s20_to_gen(self, pugrp, punit, chunk, sectr):", "body": "cmd = [\"\", self.envs[\"\"],\"\" % (pugrp, punit, chunk, sectr)]status, stdout, _ = cij.ssh.command(cmd, shell=True)if status:raise RuntimeError(\"\")return int(re.findall(r\"\", stdout)[], )", "docstring": "S20 unit to generic address", "id": "f13334:c0:m8"} {"signature": "def gen_to_dev(self, address):", "body": "cmd = [\"\", self.envs[\"\"], \"\".format(address)]status, stdout, _ = cij.ssh.command(cmd, shell=True)if status:raise RuntimeError(\"\")return int(re.findall(r\"\", stdout)[], )", "docstring": "Generic address to device address", "id": "f13334:c0:m9"} {"signature": "def vblk_erase(self, address):", "body": "cmd = [\"\", self.envs[\"\"], \"\" % address]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_vblk erase", "id": "f13334:c0:m10"} {"signature": "def vblk_write(self, address, meta=False):", "body": "cmd = list()if meta:cmd.append(\"\")cmd += [\"\", self.envs[\"\"], \"\" % address]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_vblk write", "id": "f13334:c0:m11"} {"signature": "def vblk_read(self, address, meta=False):", "body": "cmd = list()if meta:cmd.append(\"\")cmd += [\"\", self.envs[\"\"], \"\" % address]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_vblk read", "id": "f13334:c0:m12"} {"signature": "def vector_erase(self, address):", "body": "cmd = [\"\", self.envs[\"\"], \"\".format(address)]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_cmd erase", "id": "f13334:c0:m13"} {"signature": "def vector_write(self, address_list, file_name=None):", "body": "address = [\"\".format(i) for i in address_list]cmd = [\"\", self.envs[\"\"], \"\".join(address)]if file_name:cmd += [\"\".format(file_name)]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_cmd write", "id": "f13334:c0:m14"} {"signature": "def vector_read(self, address_list, file_name=None):", "body": "address = [\"\".format(i) for i in address_list]cmd = [\"\", self.envs[\"\"], \"\".join(address)]if file_name:cmd += [\"\".format(file_name)]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvm_cmd read", "id": "f13334:c0:m15"} {"signature": "def scalar_write(self, address, block_count, data_file, meta_file):", "body": "cmd = [\"\", \"\", self.envs[\"\"], \"\".format(address),\"\".format(block_count-), \"\".format(data_file), \"\".format(meta_file),\"\".format(block_count * self.envs[\"\"]),\"\".format(block_count * self.envs[\"\"])]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvme write", "id": "f13334:c0:m16"} {"signature": "def scalar_read(self, address, block_count, data_file, meta_file):", "body": "cmd = [\"\", \"\", self.envs[\"\"], \"\".format(address),\"\".format(block_count - ), \"\".format(data_file),\"\".format(meta_file),\"\".format(block_count * self.envs[\"\"]),\"\".format(block_count * self.envs[\"\"])]status, _, _ = cij.ssh.command(cmd, shell=True)return status", "docstring": "nvme read", "id": "f13334:c0:m17"} {"signature": "def read(*parts):", "body": "here = os.path.abspath(os.path.dirname(__file__))with codecs.open(os.path.join(here, *parts), '') as pfp:return pfp.read()", "docstring": "Read parts to use a e.g. long_description", "id": "f13335:m0"} {"signature": "def expand_path(path):", "body": "return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))", "docstring": "Expands variables from the given path and turns it into absolute path", "id": "f13336:m0"} {"signature": "def regex_find(pattern, content):", "body": "find = re.findall(pattern, content)if not find:cij.err(\"\" % pattern)cij.err(\"\" % content)return ''if len(find) >= :cij.err(\"\" % pattern)cij.err(\"\" % content)return ''return find[]", "docstring": "Find the given 'pattern' in 'content", "id": "f13336:m1"} {"signature": "def execute(cmd=None, shell=True, echo=True):", "body": "if echo:cij.emph(\"\" % (shell, cmd))rcode = stdout, stderr = (\"\", \"\")if cmd:if shell:cmd = \"\".join(cmd)proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=shell, close_fds=True)stdout, stderr = proc.communicate()rcode = proc.returncodeif rcode and echo:cij.warn(\"\" % stdout)cij.err(\"\" % stderr)cij.err(\"\" % rcode)return rcode, stdout, stderr", "docstring": "Execute the given 'cmd'\n\n@returns (rcode, stdout, stderr)", "id": "f13336:m2"} {"signature": "def env():", "body": "if cij.ssh.env():cij.err(\"\")return board = cij.env_to_dict(PREFIX, REQUIRED) if board is None:cij.err(\"\")return board[\"\"] = \"\".join([board[r] for r in REQUIRED[:-]])board[\"\"] = \"\".join([board[\"\"], board[\"\"]])cij.env_export(PREFIX, EXPORTED, board) return ", "docstring": "Verify BOARD variables and construct exported variables", "id": "f13337:m0"} {"signature": "def generate_rt_pic(process_data, para_meter, scale):", "body": "pic_path = para_meter[''] + ''plt.figure(figsize=( * scale, * scale))for key in process_data.keys():plt.plot(process_data[key][:, ], process_data[key][:, ], label=str(key))plt.title(para_meter[''])plt.xlabel(para_meter[''])plt.ylabel(para_meter[''])plt.legend(loc='')plt.savefig(pic_path)return pic_path", "docstring": "generate rater pic", "id": "f13338:m0"} {"signature": "def generate_steady_rt_pic(process_data, para_meter, scale, steady_time):", "body": "pic_path_steady = para_meter[''] + ''plt.figure(figsize=( * scale, * scale))for key in process_data.keys():if len(process_data[key]) < steady_time:steady_time = len(process_data[key])plt.scatter(process_data[key][- * steady_time:, ],process_data[key][- * steady_time:, ], label=str(key), s=)steady_value = np.mean(process_data[key][- * steady_time:, ])steady_value_5 = steady_value * ( + )steady_value_10 = steady_value * ( + )steady_value_ng_5 = steady_value * ( - )steady_value_ng_10 = steady_value * ( - )plt.plot(process_data[key][- * steady_time:, ], [steady_value] * steady_time, '')plt.plot(process_data[key][- * steady_time:, ], [steady_value_5] * steady_time, '')plt.plot(process_data[key][- * steady_time:, ],[steady_value_ng_5] * steady_time, '')plt.plot(process_data[key][- * steady_time:, ], [steady_value_10] * steady_time, '')plt.plot(process_data[key][- * steady_time:, ],[steady_value_ng_10] * steady_time, '')plt.title(para_meter[''] + '')plt.xlabel(para_meter[''] + '')plt.ylabel(para_meter[''] + '')plt.legend(loc='')plt.savefig(pic_path_steady)return pic_path_steady", "docstring": "generate rate steady", "id": "f13338:m1"} {"signature": "def process_rt_data(source_data, is_bw=False):", "body": "print(\"\", len(source_data))filter_data = {}for index in range():filter_mask = source_data[:, ] == indexif np.any(filter_mask):filter_data[index] = sum_data(round_data(source_data[filter_mask]), is_bw)return filter_data", "docstring": "process data", "id": "f13338:m2"} {"signature": "def sum_data(filter_data, is_bw):", "body": "for index in range(len(filter_data) - ):if filter_data[index][] > filter_data[index + ][]:max_index = index + breakelse:max_index = len(filter_data)print(\"\", max_index + )num_jobs = int(round(len(filter_data) * / max_index))print(\"\", num_jobs)dict_time = Counter(filter_data[:, ])list_sum = []for time_index in range(, max_index + ):if dict_time.get(time_index * , ) != num_jobs:print(\"\" % (time_index * , dict_time.get(time_index * , ), num_jobs))continuefilter_mask = (filter_data[:, ] == time_index * )sum_rst = np.sum(filter_data[filter_mask][:, ])if is_bw:sum_rst = sum_rst / list_sum.append([time_index, sum_rst])return np.array(list_sum)", "docstring": "caculate sum", "id": "f13338:m3"} {"signature": "def round_data(filter_data):", "body": "for index, _ in enumerate(filter_data):filter_data[index][] = round(filter_data[index][] / ) * return filter_data", "docstring": "round the data", "id": "f13338:m4"} {"signature": "def import_source(self, sheet, source, delimiter=\"\"):", "body": "if '' in sheet:raise RuntimeError(\"\" % sheet)if not source.endswith(\"\") and not source.endswith(\"\"):raise RuntimeError(\"\" % source)self.source_sheet = sheetsource_data = np.loadtxt(source, dtype=str, delimiter=delimiter)self.source_data = {\"\": source_data[].tolist(),\"\": source_data[:]}cell_format_title = self.workbook.add_format({'': True,'': u'','': '','': })cell_format = self.workbook.add_format({'': False,'': u'','': })worksheet = self.workbook.add_worksheet(sheet)worksheet.write_row('', self.source_data[''], cell_format_title)_, col_num = self.source_data[''].shapefor i in range(col_num):try:data_array = self.source_data[''][:, i].astype(float)except ValueError:data_array = self.source_data[''][:, i]worksheet.write_column(, i, data_array.tolist(), cell_format)", "docstring": "Function:\n Save original data into specific sheet, and try to translate data to float type\nInput:\n sheet: Must be a non exists sheet\n source: File path of source", "id": "f13338:c0:m1"} {"signature": "def generate_chart(self, properties):", "body": "if not {'', '', '', ''}.issubset(set(properties.keys())):raise RuntimeError(\"\" % properties.keys())mask = self.__filter_data(properties[''])chart = self.__generate_chart(mask, properties)sheet = properties['']if sheet in self.sheet_dict.keys():self.sheet_dict[sheet] += worksheet = self.workbook.get_worksheet_by_name(sheet)else:self.sheet_dict[sheet] = worksheet = self.workbook.add_worksheet(sheet)worksheet.insert_chart('' % ( + (self.sheet_dict[sheet] - ) * ), chart)", "docstring": "Function:\n Generate and save chart to specific sheet.\nInput:\n sheet: If already exists, new chart will be added below.\n Otherwise, it would create a new sheet;\n x_axis: Specify x axis;\n y_axis: Specify y axis;\n series: Specify series;\n filters: dict type, use to filter useful data from original data;\n title: if None, the chart will create without title;\n x_axis_name: if None, use x_axis instead;\n y_axis_name: if None, use y_axis instead;", "id": "f13338:c0:m2"} {"signature": "def close(self):", "body": "self.workbook.close()", "docstring": "Close work book", "id": "f13338:c0:m6"} {"signature": "def gen_config_sheet(self, sheetname, plist):", "body": "worksheet_cfg = self.workbook.add_worksheet(sheetname)cell_format = self.workbook.add_format({'': False, '': u''})cell_format_title = self.workbook.add_format({'': , '': '','': '', '': ,'': u'', '': False})worksheet_cfg.set_column('', , cell_format)worksheet_cfg.write_row('', plist[], cell_format_title)for i in range(, len(plist)):worksheet_cfg.write_row('' % (i+), plist[i], cell_format)", "docstring": "generate configuration", "id": "f13338:c1:m1"} {"signature": "def gen_data_sheet(self, datafile, para_meter, scale=, steady_time=):", "body": "filename = os.path.splitext(os.path.split(datafile)[])[][:-]para_meter[''] = filenamesource_data = np.loadtxt(datafile, dtype=int, delimiter='')[:, :]is_bw = ''in para_meter[''].lower()file_data = process_rt_data(source_data, is_bw)pic_path = generate_rt_pic(file_data, para_meter, scale)pic_path_steady = generate_steady_rt_pic(file_data, para_meter, scale, steady_time)if para_meter[''] in self.sheetname_dict.keys():self.sheetname_dict[para_meter['']] =self.sheetname_dict[para_meter['']] + chart_sheet = self.workbook.get_worksheet_by_name(para_meter[''])else:self.sheetname_dict[para_meter['']] = chart_sheet = self.workbook.add_worksheet(para_meter[''])chart_sheet.insert_image('' %( + (self.sheetname_dict[para_meter['']] - ) * ),pic_path)chart_sheet.insert_image('' %( + (self.sheetname_dict[para_meter['']] - ) * ),pic_path_steady)self.__insert_value(chart_sheet, file_data, + (self.sheetname_dict[para_meter['']] - ) * ,steady_time)self.pic_list.append(pic_path)self.pic_list.append(pic_path_steady)", "docstring": "datafile, sheetname, x_axis_name, y_axis_name, title,\nFunction:\n Turn realtime bw data into picture, and save into specific sheet\nInput:\n sheetname: If already exists, new chart will be added continuely.\n Otherwise, it would create new sheet;\n x_axis_name: x_axis name;\n y_axis_name: y_axis name;\n title: picure name;\n scale\uff1b size of picture.", "id": "f13338:c1:m2"} {"signature": "def closexls(self):", "body": "self.workbook.close()", "docstring": "close the xlsx", "id": "f13338:c1:m4"} {"signature": "def extract_hook_names(ent):", "body": "hnames = []for hook in ent[\"\"][\"\"] + ent[\"\"][\"\"]:hname = os.path.basename(hook[\"\"])hname = os.path.splitext(hname)[]hname = hname.strip()hname = hname.replace(\"\", \"\")hname = hname.replace(\"\", \"\")if hname in hnames:continuehnames.append(hname)hnames.sort()return hnames", "docstring": "Extract hook names from the given entity", "id": "f13339:m0"} {"signature": "def tcase_comment(tcase):", "body": "src = open(tcase[\"\"]).read()if len(src) < :cij.err(\"\" % tcase[\"\"])return Noneext = os.path.splitext(tcase[\"\"])[-]if ext not in [\"\", \"\"]:cij.err(\"\" % (ext, tcase[\"\"]))return Nonecomment = []for line in src.splitlines()[:]:if ext == \"\" and not line.startswith(\"\"):breakelif ext == \"\" and not '' in line:breakcomment.append(line)return comment", "docstring": "Extract testcase comment section / testcase description\n\n@returns the testcase-comment from the tcase[\"fpath\"] as a list of strings", "id": "f13339:m1"} {"signature": "def tcase_parse_descr(tcase):", "body": "descr_short = \"\"descr_long = \"\"try:comment = tcase_comment(tcase)except (IOError, OSError, ValueError) as exc:comment = []cij.err(\"\" % (exc, tcase))comment = [l for l in comment if l.strip()] for line_number, line in enumerate(comment):if line.startswith(\"\"):comment[line_number] = line[:]if comment:descr_short = comment[]if len(comment) > :descr_long = \"\".join(comment[:])return descr_short, descr_long", "docstring": "Parse descriptions from the the given tcase", "id": "f13339:m2"} {"signature": "def runlogs_to_html(run_root):", "body": "if not os.path.isdir(run_root):return \"\"hook_enter = []hook_exit = []tcase = []for fpath in glob.glob(os.sep.join([run_root, \"\"])):if \"\" in fpath:hook_exit.append(fpath)continueif \"\" in fpath:hook_enter.append(fpath)continuetcase.append(fpath)content = \"\"for fpath in hook_enter + tcase + hook_exit:content += \"\" % fpathcontent += open(fpath, \"\").read()content += \"\" % fpathreturn content", "docstring": "Returns content of the given 'fpath' with HTML annotations, currently simply\na conversion of ANSI color codes to HTML elements", "id": "f13339:m3"} {"signature": "def src_to_html(fpath):", "body": "if not os.path.exists(fpath):return \"\" % fpathreturn open(fpath, \"\").read()", "docstring": "Returns content of the given 'fpath' with HTML annotations for syntax\nhighlighting", "id": "f13339:m4"} {"signature": "def aux_listing(aux_root):", "body": "listing = []for root, _, fnames in os.walk(aux_root):count = len(aux_root.split(os.sep))prefix = root.split(os.sep)[count:]for fname in fnames:listing.append(os.sep.join(prefix + [fname]))return listing", "docstring": "Listing", "id": "f13339:m5"} {"signature": "def process_tsuite(tsuite):", "body": "tsuite[\"\"] = runlogs_to_html(tsuite[\"\"])tsuite[\"\"] = aux_listing(tsuite[\"\"])tsuite[\"\"] = extract_hook_names(tsuite)return True", "docstring": "Goes through the tsuite and processes \"*.log", "id": "f13339:m6"} {"signature": "def process_tcase(tcase):", "body": "tcase[\"\"] = src_to_html(tcase[\"\"])tcase[\"\"] = runlogs_to_html(tcase[\"\"])tcase[\"\"] = aux_listing(tcase[\"\"])tcase[\"\"], tcase[\"\"] = tcase_parse_descr(tcase)tcase[\"\"] = extract_hook_names(tcase)return True", "docstring": "Goes through the trun and processes \"run.log", "id": "f13339:m7"} {"signature": "def process_trun(trun):", "body": "trun[\"\"] = runlogs_to_html(trun[\"\"])trun[\"\"] = aux_listing(trun[\"\"])trun[\"\"] = extract_hook_names(trun)return True", "docstring": "Goes through the trun and processes \"run.log", "id": "f13339:m8"} {"signature": "def postprocess(trun):", "body": "plog = []plog.append((\"\", process_trun(trun)))for tsuite in trun[\"\"]:plog.append((\"\", process_tsuite(tsuite)))for tcase in tsuite[\"\"]:plog.append((\"\", process_tcase(tcase)))for task, success in plog:if not success:cij.err(\"\" % task)return sum((success for task, success in plog))", "docstring": "Perform postprocessing of the given test run", "id": "f13339:m9"} {"signature": "def dset_to_html(dset, tmpl_fpath):", "body": "def stamp_to_datetime(stamp):\"\"\"\"\"\"return datetime.datetime.fromtimestamp(int(stamp))def strftime(dtime, fmt):\"\"\"\"\"\"return dtime.strftime(fmt)def ansi_to_html(ansi):\"\"\"\"\"\"conv = ansi2html.Ansi2HTMLConverter(scheme=\"\",inline=True)html = conv.convert(ansi, full=False)with open(\"\", \"\") as html_file:html_file.write(html)return htmltmpl_dpath = os.path.dirname(tmpl_fpath)tmpl_fname = os.path.basename(tmpl_fpath)env = jinja2.Environment(autoescape=True,loader=jinja2.FileSystemLoader(tmpl_dpath))env.filters[''] = stamp_to_datetimeenv.filters[''] = strftimeenv.filters[''] = ansi_to_htmltmpl = env.get_template(tmpl_fname)return tmpl.render(dset=dset)", "docstring": "@returns A HTML representation of the given 'dset' using the template at\n'tmpl_fpath'", "id": "f13339:m10"} {"signature": "def rehome(old, new, struct):", "body": "if old == new:returnif isinstance(struct, list):for item in struct:rehome(old, new, item)elif isinstance(struct, dict):for key, val in struct.iteritems():if isinstance(val, (dict, list)):rehome(old, new, val)elif \"\" in key:continueelif \"\" in key:continueelif \"\" in key or \"\" in key:struct[key] = struct[key].replace(old, new)", "docstring": "Replace all absolute paths to \"re-home\" it", "id": "f13339:m11"} {"signature": "def main(args):", "body": "trun = cij.runner.trun_from_file(args.trun_fpath)rehome(trun[\"\"][\"\"], args.output, trun)postprocess(trun)cij.emph(\"\" % args.tmpl_fpath)cij.emph(\"\" % args.output)html_fpath = os.sep.join([args.output, \"\" % args.tmpl_name])cij.emph(\"\" % html_fpath)try: with open(html_fpath, '') as html_file:html_file.write(dset_to_html(trun, args.tmpl_fpath))except (IOError, OSError, ValueError) as exc:import tracebacktraceback.print_exc()cij.err(\"\" % exc)return return ", "docstring": "Main entry point", "id": "f13339:m12"} {"signature": "def tindex(spath=None):", "body": "spath = spath if spath else os.environ.get(\"\", None)if spath is None:return Nonetests = [] for root, _, files in os.walk(spath):if root != spath:continuetests += [f for f in files if f[-:] in [\"\", \"\"]]return tests", "docstring": "Lists tindex in CIJ_TESTCASES\n\n@Returns On success, a list of filenames is returned. On error, None is\nreturned", "id": "f13340:m0"} {"signature": "def envs():", "body": "variables = {}for req in REQS:prefix = req.upper()variables[prefix] = cij.env_to_dict(prefix, getattr(cij, req).REQUIRED + getattr(cij, req).EXPORTED)return variables", "docstring": "Return variables defined by modules required by test", "id": "f13340:m1"} {"signature": "def require(req):", "body": "REQS.append(req)", "docstring": "Add test requirement", "id": "f13340:m2"} {"signature": "def enter():", "body": "if cij.ssh.env():tfail(\"\")for req in REQS:if getattr(cij, req).env():tfail()cij.emph(\"\")", "docstring": "Enter the test, check requirements and setup aux. environment", "id": "f13340:m3"} {"signature": "def texit(msg=None, rcode=):", "body": "msg = \"\" % msg if msg else \"\"if rcode:cij.err(\"\" % msg)else:cij.good(\"\" % msg)sys.exit(rcode)", "docstring": "Exit the test", "id": "f13340:m4"} {"signature": "def tpass(msg=None):", "body": "texit(msg, )", "docstring": "Testing: Exit test indicating test passed", "id": "f13340:m5"} {"signature": "def tfail(msg=None):", "body": "texit(msg, )", "docstring": "Testing: Exit test indicating test failed", "id": "f13340:m6"} {"signature": "def command(cmd, ssh=True, shell=True, echo=True):", "body": "if ssh:return cij.ssh.command(cmd, shell, echo)return cij.util.execute(cmd, shell, echo)", "docstring": "Execute the given 'cmd'\n\n@returns (rcode, stdout, stderr)", "id": "f13340:m7"} {"signature": "def command_to_struct(cmd):", "body": "struct = Nonercode, stdout, stderr = command(cmd)try:lines = []for line in stdout.splitlines():if line.strip().startswith(\"\"):continuelines.append(line)struct = yaml.safe_load(\"\".join(lines))except (yaml.YAMLError) as exc:cij.err(\"\" % exc)return rcode, stdout, stderr, struct", "docstring": "Same as `command` except it tries to convert stdout to struct\n\n@returns (rcode, struct, stderr, struct)", "id": "f13340:m8"} {"signature": "def env():", "body": "if cij.ssh.env():cij.err(\"\")return lnvm = cij.env_to_dict(PREFIX, REQUIRED)nvme = cij.env_to_dict(\"\", [\"\"])if \"\" not in lnvm.keys():cij.err(\"\")return if \"\" not in lnvm.keys():cij.err(\"\")return if \"\" not in lnvm.keys():cij.err(\"\")return lnvm[\"\"] = \"\" % (nvme[\"\"], int(lnvm[\"\"]), int(lnvm[\"\"]))lnvm[\"\"] = \"\" % lnvm[\"\"]cij.env_export(PREFIX, EXPORTED, lnvm)return ", "docstring": "Verify LNVM variables and construct exported variables", "id": "f13341:m0"} {"signature": "def create():", "body": "if env():cij.err(\"\")return nvme = cij.env_to_dict(\"\", [\"\"])lnvm = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)cij.emph(\"\" % lnvm[\"\"])cmd = [\"\" % (nvme[\"\"], lnvm[\"\"], lnvm[\"\"], lnvm[\"\"], lnvm[\"\"])]rcode, _, _ = cij.ssh.command(cmd, shell=True)if rcode:cij.err(\"\")return return ", "docstring": "Create LNVM device", "id": "f13341:m1"} {"signature": "def recover():", "body": "if env():cij.err(\"\")return nvme = cij.env_to_dict(\"\", [\"\"])lnvm = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)cij.emph(\"\" % lnvm[\"\"])cmd = [\"\" % (nvme[\"\"], lnvm[\"\"], lnvm[\"\"], lnvm[\"\"], lnvm[\"\"])]rcode, _, _ = cij.ssh.command(cmd, shell=True)if rcode:cij.err(\"\")return return ", "docstring": "Recover LNVM device", "id": "f13341:m2"} {"signature": "def remove():", "body": "if env():cij.err(\"\")return lnvm = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)cij.emph(\"\" % lnvm[\"\"])cmd = [\"\" % (lnvm[\"\"])]rcode, _, _ = cij.ssh.command(cmd, shell=True)if rcode:cij.err(\"\")return return ", "docstring": "Remove LNVM device", "id": "f13341:m3"} {"signature": "def exists():", "body": "if env():cij.err(\"\")return lnvm = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)cmd = ['' % lnvm[\"\"]]rcode, _, _ = cij.ssh.command(cmd, shell=True, echo=False)if rcode:return Falsereturn True", "docstring": "Verify that the ENV defined LNVM device exists", "id": "f13341:m4"} {"signature": "def env():", "body": "if cij.ssh.env():cij.err(\"\")return return ", "docstring": "Verify FIO variables and construct exported variables", "id": "f13342:m0"} {"signature": "def __run(self, shell=True, echo=True):", "body": "if env():return cij.emph(\"\" % (shell, self.__prefix + self.__suffix))return cij.ssh.command(self.__prefix, shell, echo, self.__suffix)", "docstring": "Run DMESG job", "id": "f13342:c0:m1"} {"signature": "def start(self):", "body": "self.__thread = Thread(target=self.__run, args=(True, False))self.__thread.setDaemon(True)self.__thread.start()", "docstring": "Start DMESG job in thread", "id": "f13342:c0:m2"} {"signature": "def terminate(self):", "body": "if self.__thread:cmd = [\"\"]status, output, _ = cij.util.execute(cmd, shell=True, echo=True)if status:cij.warn(\"\")return tty = output.split()[]cmd = [\"\".format(\"\".join(self.__prefix), tty)]status, _, _ = cij.util.execute(cmd, shell=True, echo=True)if status:cij.warn(\"\")return self.__thread.join()self.__thread = Nonereturn ", "docstring": "Terminate DMESG job", "id": "f13342:c0:m3"} {"signature": "def index(search_path, ext=None):", "body": "if ext is None:ext = \"\"fnames = set([])for _, _, files in os.walk(search_path):for fname in files:if os.path.splitext(fname)[-] in EXTS[ext]:fnames.add(fname)return fnames", "docstring": "@returns a set of filenames with extension 'ext' in 'search_path", "id": "f13343:m0"} {"signature": "def get_time_stamp():", "body": "if CIJ_ECHO_TIME_STAMP == \"\":return time.strftime('', time.localtime(time.time()))return \"\"", "docstring": "Get time stampe if CIJ_ECHO_TIME_STAMP is 1", "id": "f13343:m1"} {"signature": "def info(txt):", "body": "print(\"\" % (PR_EMPH_CC, get_time_stamp(), txt, PR_NC))sys.stdout.flush()", "docstring": "Print, emphasized 'neutral', the given 'txt' message", "id": "f13343:m2"} {"signature": "def good(txt):", "body": "print(\"\" % (PR_GOOD_CC, get_time_stamp(), txt, PR_NC))sys.stdout.flush()", "docstring": "Print, emphasized 'good', the given 'txt' message", "id": "f13343:m3"} {"signature": "def warn(txt):", "body": "print(\"\" % (PR_WARN_CC, get_time_stamp(), txt, PR_NC))sys.stdout.flush()", "docstring": "Print, emphasized 'warning', the given 'txt' message", "id": "f13343:m4"} {"signature": "def err(txt):", "body": "print(\"\" % (PR_ERR_CC, get_time_stamp(), txt, PR_NC))sys.stdout.flush()", "docstring": "Print, emphasized 'error', the given 'txt' message", "id": "f13343:m5"} {"signature": "def emph(txt, rval=None):", "body": "if rval is None: info(txt)elif rval == : good(txt)else: err(txt)", "docstring": "Print, emphasized based on rval", "id": "f13343:m6"} {"signature": "def paths_from_env(prefix=None, names=None):", "body": "def expand_path(path):\"\"\"\"\"\"return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))if prefix is None:prefix = \"\"if names is None:names = [\"\", \"\", \"\", \"\", \"\", \"\",\"\", \"\"]conf = {v: os.environ.get(\"\".join([prefix, v])) for v in names}for env in (e for e in conf.keys() if e[:len(prefix)] in names and conf[e]):conf[env] = expand_path(conf[env])if not os.path.exists(conf[env]):err(\"\" % (prefix, env, conf[env]))return conf", "docstring": "Construct dict of paths from environment variables", "id": "f13343:m7"} {"signature": "def env_to_dict(prefix, names):", "body": "env = {}for name in names:env[name] = ENV.get(\"\".join([prefix, name]))if env[name] is None:return Nonereturn env", "docstring": "Construct dict from environment variables named: PREFIX_NAME\n\n@returns dict of names", "id": "f13343:m8"} {"signature": "def env_export(prefix, exported, env):", "body": "for exp in exported:ENV[\"\".join([prefix, exp])] = env[exp]", "docstring": "Define the list of 'exported' variables with 'prefix' with values from 'env'", "id": "f13343:m9"} {"signature": "def dump(buf, indent=, skip=\"\"):", "body": "if not isinstance(type(buf), (type(Union), type(Structure))):raise RuntimeError(\"\" % type(buf))for field in getattr(buf, ''):name, types = field[], field[]if name in skip:returnvalue = getattr(buf, name)if isinstance(types, (type(Union), type(Structure))):cij.info(\"\" % (\"\" * indent, name))dump(value, indent+, skip)elif isinstance(types, type(Array)):for i, item in enumerate(value):name_index = \"\" % (name, i)if isinstance(types, (type(Union), type(Structure))):cij.info(\"\" % (\"\" * indent, name_index))dump(item, indent + , skip)else:cij.info(\"\" % (\"\" * indent, name_index, item))else:cij.info(\"\" % (\"\" * indent, name, value))", "docstring": "Dump UnionType/StructType to STDOUT", "id": "f13344:m0"} {"signature": "def compare(buf_a, buf_b, ignore):", "body": "for field in getattr(buf_a, ''):name, types = field[], field[]if name in ignore:continueval_a = getattr(buf_a, name)val_b = getattr(buf_b, name)if isinstance(types, (type(Union), type(Structure))):if compare(val_a, val_b, ignore):return elif isinstance(types, type(Array)):for i, _ in enumerate(val_a):if isinstance(types, (type(Union), type(Structure))):if compare(val_a[i], val_b[i], ignore):return else:if val_a[i] != val_b[i]:return else:if val_a != val_b:return return ", "docstring": "Compare of two Buffer item", "id": "f13344:m1"} {"signature": "def length(self):", "body": "return self.m_len", "docstring": "Get length of types", "id": "f13344:c0:m3"} {"signature": "def size(self):", "body": "return self.m_size", "docstring": "Get size of buffer", "id": "f13344:c0:m4"} {"signature": "def types(self):", "body": "return self.m_types", "docstring": "Get types of buffer", "id": "f13344:c0:m5"} {"signature": "def memcopy(self, stream, offset=, length=float(\"\")):", "body": "data = [ord(i) for i in list(stream)]size = min(length, len(data), self.m_size)buff = cast(self.m_buf, POINTER(c_uint8))for i in range(size):buff[offset + i] = data[i]", "docstring": "Copy stream to buffer", "id": "f13344:c0:m6"} {"signature": "def write(self, path):", "body": "with open(path, \"\") as fout:fout.write(self.m_buf)", "docstring": "Write buffer to file", "id": "f13344:c0:m7"} {"signature": "def read(self, path):", "body": "with open(path, \"\") as fout:memmove(self.m_buf, fout.read(self.m_size), self.m_size)", "docstring": "Read file to buffer", "id": "f13344:c0:m8"} {"signature": "def dump(self, offset=, length=):", "body": "for i in range(offset, offset + length):if \"\" in str(self.m_types):cij.info(\"\" % (i, self.m_buf[i]))else:cij.info(\"\" % i)dump(self.m_buf[i], )", "docstring": "Dump item", "id": "f13344:c0:m9"} {"signature": "def compare(self, buf, offset=, length=, ignore=\"\"):", "body": "for i in range(offset, offset + length):if isinstance(self.m_types, (type(Union), type(Structure))):if compare(self.m_buf[i], buf[i], ignore=ignore):return elif self.m_buf[i] != buf[i]:return return ", "docstring": "Compare buffer", "id": "f13344:c0:m10"} {"signature": "def power_on(self, interval=):", "body": "if self.__power_on_port is None:cij.err(\"\")return return self.__press(self.__power_on_port, interval=interval)", "docstring": "230v power on", "id": "f13345:c0:m5"} {"signature": "def power_off(self, interval=):", "body": "if self.__power_off_port is None:cij.err(\"\")return return self.__press(self.__power_off_port, interval=interval)", "docstring": "230v power off", "id": "f13345:c0:m6"} {"signature": "def power_btn(self, interval=):", "body": "if self.__power_btn_port is None:cij.err(\"\")return return self.__press(self.__power_btn_port, interval=interval)", "docstring": "TARGET power button", "id": "f13345:c0:m7"} {"signature": "def env():", "body": "if cij.ssh.env():cij.err(\"\")return block = cij.env_to_dict(PREFIX, REQUIRED)block[\"\"] = \"\" % block[\"\"]cij.env_export(PREFIX, EXPORTED, block)return ", "docstring": "Verify BLOCK variables and construct exported variables", "id": "f13346:m0"} {"signature": "def exists():", "body": "if env():cij.err(\"\")return block = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)cmd = ['' % block[\"\"]]rcode, _, _ = cij.ssh.command(cmd, shell=True, echo=False)return rcode", "docstring": "Verify that the ENV defined BLOCK device exists", "id": "f13346:m1"} {"signature": "def env():", "body": "if cij.ssh.env():cij.err(\"\")return return ", "docstring": "Verify FIO variables and construct exported variables", "id": "f13347:m0"} {"signature": "def pkill():", "body": "if env():return cmd = [\"\"]status, _, _ = cij.ssh.command(cmd, shell=True, echo=False)if not status:status, _, _ = cij.ssh.command([\"\"], shell=True)if status:return return ", "docstring": "Kill all of FIO processes", "id": "f13347:m1"} {"signature": "def run(self):", "body": "self.output = self.target(*self.args)", "docstring": "Start run thread", "id": "f13347:c0:m1"} {"signature": "def result(self):", "body": "return self.output", "docstring": "Get result of thread", "id": "f13347:c0:m2"} {"signature": "def __parse_parms(self):", "body": "args = list()for key, val in self.__parm.items():key = key.replace(\"\", \"\").lower()if key == \"\":args.append(\"\")if val is None:args.append(\"\" % key)else:args.append(\"\" % (key, val))return args", "docstring": "Translate dict parameters to string", "id": "f13347:c1:m1"} {"signature": "def import_parms(self, args):", "body": "for key, val in args.items():self.set_parm(key, val)", "docstring": "Import external dict to internal dict", "id": "f13347:c1:m2"} {"signature": "def set_parm(self, key, val=None):", "body": "self.__parm[key] = val", "docstring": "Set parameter of FIO\nIf val is None, the parameter is \"--key\"\nIf val is not None, the parameter is \"--key=val\"", "id": "f13347:c1:m3"} {"signature": "def get_parm(self, key):", "body": "if key in self.__parm.keys():return self.__parm[key]return None", "docstring": "Get parameter of FIO", "id": "f13347:c1:m4"} {"signature": "def start(self):", "body": "self.__thread = Threads(target=self.run, args=(True, True, False))self.__thread.setDaemon(True)self.__thread.start()", "docstring": "Run FIO job in thread", "id": "f13347:c1:m5"} {"signature": "def join(self, timeout=None):", "body": "if self.__thread:self.__thread.join(timeout)", "docstring": "Wait until the FIO thread terminates", "id": "f13347:c1:m6"} {"signature": "def result(self):", "body": "if self.__thread:return self.__thread.result()return None", "docstring": "Get result of FIO thread", "id": "f13347:c1:m7"} {"signature": "def run(self, shell=True, cmdline=False, echo=True):", "body": "if env():return cmd = [\"\"] + self.__parse_parms()if cmdline:cij.emph(\"\" % (shell, cmd))return cij.ssh.command(cmd, shell, echo)", "docstring": "Run FIO job", "id": "f13347:c1:m8"} {"signature": "def env():", "body": "ipmi = cij.env_to_dict(PREFIX, REQUIRED)if ipmi is None:ipmi[\"\"] = \"\"ipmi[\"\"] = \"\"ipmi[\"\"] = \"\"ipmi[\"\"] = \"\"cij.info(\"\" % (ipmi[\"\"], ipmi[\"\"], ipmi[\"\"], ipmi[\"\"]))cij.env_export(PREFIX, EXPORTED, ipmi)return ", "docstring": "Verify IPMI environment", "id": "f13348:m0"} {"signature": "def cmd(command):", "body": "env()ipmi = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)command = \"\" % (ipmi[\"\"], ipmi[\"\"], ipmi[\"\"], ipmi[\"\"], command)cij.info(\"\" % command)return cij.util.execute(command, shell=True, echo=True)", "docstring": "Send IPMI 'command' via ipmitool", "id": "f13348:m1"} {"signature": "def pwr_on():", "body": "cmd(\"\")", "docstring": "Target On", "id": "f13348:m2"} {"signature": "def pwr_off():", "body": "cmd(\"\")", "docstring": "Target Off", "id": "f13348:m3"} {"signature": "def pwr_reset():", "body": "cmd(\"\")", "docstring": "Target reset", "id": "f13348:m4"} {"signature": "def env():", "body": "if cij.ssh.env():cij.err(\"\")return pci = cij.env_to_dict(PREFIX, REQUIRED)pci[\"\"] = \"\"pci[\"\"] = os.sep.join([pci[\"\"], \"\", pci[\"\"]])cij.env_export(PREFIX, EXPORTED, pci)return ", "docstring": "Verify PCI variables and construct exported variables", "id": "f13349:m0"} {"signature": "def exists():", "body": "if env():cij.err(\"\")return pci = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)cmd = ['' % pci[\"\"]]rcode, _, _ = cij.ssh.command(cmd, shell=True, echo=False)return rcode", "docstring": "Verify that the ENV defined PCI device exists", "id": "f13349:m1"} {"signature": "def yml_fpath(output_path):", "body": "return os.sep.join([output_path, \"\"])", "docstring": "Returns the path to the trun-file", "id": "f13350:m0"} {"signature": "def script_run(trun, script):", "body": "if trun[\"\"][\"\"]:cij.emph(\"\" % script)cij.emph(\"\" % script[\"\"])launchers = {\"\": \"\",\"\": \"\"}ext = os.path.splitext(script[\"\"])[-]if not ext in launchers.keys():cij.err(\"\" % script[\"\"])return launch = launchers[ext]with open(script[\"\"], \"\") as log_fd:log_fd.write(\"\" % script[\"\"])log_fd.flush()bgn = time.time()cmd = ['', '','''''''' % (trun[\"\"][\"\"],script[\"\"],launch,script[\"\"])]if trun[\"\"][\"\"] > :cij.emph(\"\" % \"\".join(cmd))evars = os.environ.copy()evars.update({k: str(script[\"\"][k]) for k in script[\"\"]})process = Popen(cmd,stdout=log_fd,stderr=STDOUT,cwd=script[\"\"],env=evars)process.wait()script[\"\"] = process.returncodescript[\"\"] = time.time() - bgnif trun[\"\"][\"\"]:cij.emph(\"\" % script[\"\"])cij.emph(\"\" % script[\"\"],script[\"\"])return script[\"\"]", "docstring": "Execute a script or testcase", "id": "f13350:m1"} {"signature": "def hook_setup(parent, hook_fpath):", "body": "hook = copy.deepcopy(HOOK)hook[\"\"] = os.path.splitext(os.path.basename(hook_fpath))[]hook[\"\"] = hook[\"\"].replace(\"\", \"\").replace(\"\", \"\")hook[\"\"] = parent[\"\"]hook[\"\"] = hook_fpathhook[\"\"] = \"\" % os.path.basename(hook[\"\"])hook[\"\"] = os.sep.join([hook[\"\"], hook[\"\"]])hook[\"\"] = os.sep.join([hook[\"\"],\"\" % hook[\"\"]])hook[\"\"].update(copy.deepcopy(parent[\"\"]))shutil.copyfile(hook[\"\"], hook[\"\"])return hook", "docstring": "Setup hook", "id": "f13350:m2"} {"signature": "def hooks_setup(trun, parent, hnames=None):", "body": "hooks = {\"\": [],\"\": []}if hnames is None: return hooksfor hname in hnames: for med in HOOK_PATTERNS:for ptn in HOOK_PATTERNS[med]:fpath = os.sep.join([trun[\"\"][\"\"], ptn % hname])if not os.path.exists(fpath):continuehook = hook_setup(parent, fpath)if not hook:continuehooks[med].append(hook)if not hooks[\"\"] + hooks[\"\"]:cij.err(\"\" % hname)return Nonereturn hooks", "docstring": "Setup test-hooks\n@returns dict of hook filepaths {\"enter\": [], \"exit\": []}", "id": "f13350:m3"} {"signature": "def trun_to_file(trun, fpath=None):", "body": "if fpath is None:fpath = yml_fpath(trun[\"\"][\"\"])with open(fpath, '') as yml_file:data = yaml.dump(trun, explicit_start=True, default_flow_style=False)yml_file.write(data)", "docstring": "Dump the given trun to file", "id": "f13350:m4"} {"signature": "def trun_from_file(fpath):", "body": "with open(fpath, '') as yml_file:return yaml.safe_load(yml_file)", "docstring": "Returns trun from the given fpath", "id": "f13350:m5"} {"signature": "def trun_emph(trun):", "body": "if trun[\"\"][\"\"] > : cij.emph(\"\")for cvar in sorted(trun[\"\"].keys()):cij.emph(\"\" % (cvar, trun[\"\"][cvar]))cij.emph(\"\")if trun[\"\"][\"\"]:cij.emph(\"\")cij.emph(\"\" % trun[\"\"][\"\"])cij.emph(\"\" % yml_fpath(trun[\"\"][\"\"]))cij.emph(\"\")", "docstring": "Print essential info on", "id": "f13350:m6"} {"signature": "def tcase_setup(trun, parent, tcase_fname):", "body": "case = copy.deepcopy(TESTCASE)case[\"\"] = tcase_fnamecase[\"\"] = os.sep.join([trun[\"\"][\"\"], case[\"\"]])if not os.path.exists(case[\"\"]):cij.err('' % case[\"\"])return Nonecase[\"\"] = os.path.splitext(case[\"\"])[]case[\"\"] = \"\".join([parent[\"\"], case[\"\"]])case[\"\"] = os.sep.join([parent[\"\"], case[\"\"]])case[\"\"] = os.sep.join([case[\"\"], \"\"])case[\"\"] = os.sep.join([case[\"\"], \"\"])case[\"\"] = os.sep.join([case[\"\"], case[\"\"]])case[\"\"].update(copy.deepcopy(parent[\"\"]))os.makedirs(case[\"\"]) os.makedirs(case[\"\"])shutil.copyfile(case[\"\"], case[\"\"]) case[\"\"] = hooks_setup(trun, case, parent.get(\"\"))return case", "docstring": "Create and initialize a testcase", "id": "f13350:m7"} {"signature": "def tsuite_exit(trun, tsuite):", "body": "if trun[\"\"][\"\"]:cij.emph(\"\")rcode = for hook in reversed(tsuite[\"\"][\"\"]): rcode = script_run(trun, hook)if rcode:breakif trun[\"\"][\"\"]:cij.emph(\"\" % rcode, rcode)return rcode", "docstring": "Triggers when exiting the given testsuite", "id": "f13350:m8"} {"signature": "def tsuite_enter(trun, tsuite):", "body": "if trun[\"\"][\"\"]:cij.emph(\"\" % tsuite[\"\"])rcode = for hook in tsuite[\"\"][\"\"]: rcode = script_run(trun, hook)if rcode:breakif trun[\"\"][\"\"]:cij.emph(\"\" % rcode, rcode)return rcode", "docstring": "Triggers when entering the given testsuite", "id": "f13350:m9"} {"signature": "def tsuite_setup(trun, declr, enum):", "body": "suite = copy.deepcopy(TESTSUITE) suite[\"\"] = declr.get(\"\")if suite[\"\"] is None:cij.err(\"\")return Nonesuite[\"\"] = declr.get(\"\")suite[\"\"] = \"\" % (suite[\"\"], enum)suite[\"\"] = os.sep.join([trun[\"\"][\"\"], suite[\"\"]])suite[\"\"] = os.sep.join([suite[\"\"], \"\"])suite[\"\"].update(copy.deepcopy(trun[\"\"]))suite[\"\"].update(copy.deepcopy(declr.get(\"\", {})))os.makedirs(suite[\"\"])os.makedirs(suite[\"\"])suite[\"\"] = hooks_setup(trun, suite, declr.get(\"\"))suite[\"\"] = declr.get(\"\", [])suite[\"\"] = \"\" % suite[\"\"]suite[\"\"] = os.sep.join([trun[\"\"][\"\"], suite[\"\"]])tcase_fpaths = [] if os.path.exists(suite[\"\"]): suite_lines = (l.strip() for l in open(suite[\"\"]).read().splitlines())tcase_fpaths.extend((l for l in suite_lines if len(l) > and l[] != \"\"))else: tcase_fpaths.extend(declr.get(\"\", []))if len(set(tcase_fpaths)) != len(tcase_fpaths):cij.err(\"\")return Nonefor tcase_fname in tcase_fpaths: tcase = tcase_setup(trun, suite, tcase_fname)if not tcase:cij.err(\"\")return Nonesuite[\"\"].append(tcase)return suite", "docstring": "Creates and initialized a TESTSUITE struct and site-effects such as creating\noutput directories and forwarding initialization of testcases", "id": "f13350:m10"} {"signature": "def tcase_exit(trun, tsuite, tcase):", "body": "if trun[\"\"][\"\"]:cij.emph(\"\" % tcase[\"\"])rcode = for hook in reversed(tcase[\"\"][\"\"]): rcode = script_run(trun, hook)if rcode:breakif trun[\"\"][\"\"]:cij.emph(\"\" % rcode, rcode)return rcode", "docstring": "...", "id": "f13350:m11"} {"signature": "def tcase_enter(trun, tsuite, tcase):", "body": "if trun[\"\"][\"\"]:cij.emph(\"\")cij.emph(\"\" % tcase[\"\"])cij.emph(\"\" % tcase[\"\"])rcode = for hook in tcase[\"\"][\"\"]: rcode = script_run(trun, hook)if rcode:breakif trun[\"\"][\"\"]:cij.emph(\"\" % rcode, rcode)return rcode", "docstring": "setup res_root and aux_root, log info and run tcase-enter-hooks\n\n@returns 0 when all hooks succeed, some value othervise", "id": "f13350:m12"} {"signature": "def trun_exit(trun):", "body": "if trun[\"\"][\"\"]:cij.emph(\"\")rcode = for hook in reversed(trun[\"\"][\"\"]): rcode = script_run(trun, hook)if rcode:breakif trun[\"\"][\"\"]:cij.emph(\"\" % rcode, rcode)return rcode", "docstring": "Triggers when exiting the given testrun", "id": "f13350:m13"} {"signature": "def trun_enter(trun):", "body": "if trun[\"\"][\"\"]:cij.emph(\"\")trun[\"\"][\"\"] = int(time.time()) rcode = for hook in trun[\"\"][\"\"]: rcode = script_run(trun, hook)if rcode:breakif trun[\"\"][\"\"]:cij.emph(\"\" % rcode, rcode)return rcode", "docstring": "Triggers when entering the given testrun", "id": "f13350:m14"} {"signature": "def trun_setup(conf):", "body": "declr = Nonetry:with open(conf[\"\"]) as declr_fd:declr = yaml.safe_load(declr_fd)except AttributeError as exc:cij.err(\"\" % exc)if not declr:return Nonetrun = copy.deepcopy(TRUN)trun[\"\"] = cij.VERSIONtrun[\"\"] = copy.deepcopy(conf)trun[\"\"] = conf[\"\"]trun[\"\"] = os.sep.join([trun[\"\"], \"\"])trun[\"\"].update(copy.deepcopy(declr.get(\"\", {})))os.makedirs(trun[\"\"])hook_names = declr.get(\"\", [])if \"\" not in hook_names:hook_names = [\"\"] + hook_namesif hook_names[] != \"\":return Nonetrun[\"\"] = hooks_setup(trun, trun, hook_names)for enum, declr in enumerate(declr[\"\"]): tsuite = tsuite_setup(trun, declr, enum)if tsuite is None:cij.err(\"\" % tsuite)return trun[\"\"].append(tsuite)trun[\"\"][\"\"] += len(tsuite[\"\"])return trun", "docstring": "Setup the testrunner data-structure, embedding the parsed environment\nvariables and command-line arguments and continues with setup for testplans,\ntestsuites, and testcases", "id": "f13350:m15"} {"signature": "def main(conf):", "body": "fpath = yml_fpath(conf[\"\"])if os.path.exists(fpath): cij.err(\"\" % fpath)return trun = trun_setup(conf) if not trun:return trun_to_file(trun) trun_emph(trun) tr_err = tr_ent_err = trun_enter(trun)for tsuite in (ts for ts in trun[\"\"] if not tr_ent_err):ts_err = ts_ent_err = tsuite_enter(trun, tsuite)for tcase in (tc for tc in tsuite[\"\"] if not ts_ent_err):tc_err = tcase_enter(trun, tsuite, tcase)if not tc_err:tc_err += script_run(trun, tcase)tc_err += tcase_exit(trun, tsuite, tcase)tcase[\"\"] = \"\" if tc_err else \"\"trun[\"\"][tcase[\"\"]] += trun[\"\"][\"\"] -= ts_err += tc_err trun_to_file(trun) if not ts_ent_err:ts_err += tsuite_exit(trun, tsuite)ts_err += ts_ent_err tr_err += ts_errtsuite[\"\"] = \"\" if ts_err else \"\"cij.emph(\"\" % tsuite[\"\"], tsuite[\"\"] != \"\")if not tr_ent_err:trun_exit(trun)tr_err += tr_ent_errtrun[\"\"] = \"\" if tr_err else \"\"trun[\"\"][\"\"] = int(time.time()) + trun_to_file(trun) cij.emph(\"\" % trun[\"\"])cij.emph(\"\" % trun[\"\"], trun[\"\"] != \"\")return trun[\"\"][\"\"] + trun[\"\"][\"\"]", "docstring": "CIJ Test Runner main entry point", "id": "f13350:m16"} {"signature": "def env():", "body": "ssh = cij.env_to_dict(PREFIX, REQUIRED)if \"\" in ssh:ssh[\"\"] = cij.util.expand_path(ssh[\"\"])if cij.ENV.get(\"\") is None:cij.ENV[\"\"] = \"\"cij.warn(\"\" % (cij.ENV.get(\"\")))if cij.ENV.get(\"\") is None:cij.ENV[\"\"] = \"\"cij.warn(\"\" % (cij.ENV.get(\"\")))return ", "docstring": "Verify SSH variables and construct exported variables", "id": "f13351:m0"} {"signature": "def command(cmd, shell=True, echo=True, suffix=None):", "body": "if env():cij.err(\"\")return prefix = []if cij.ENV.get(\"\") == \"\":prefix.append(\"\")if cij.ENV.get(\"\"):prefix.append(\"\")prefix.append(cij.ENV.get(\"\"))prefix.append(\"\")args = []if cij.ENV.get(\"\"):args.append(\"\")args.append(cij.ENV.get(\"\"))if cij.ENV.get(\"\"):args.append(\"\")args.append(cij.ENV.get(\"\"))args.append(\"\".join([cij.ENV.get(\"\"), cij.ENV.get(\"\")]))wrapped = prefix + args + [\"\" % \"\".join(cmd)]if suffix:wrapped += suffixreturn cij.util.execute(wrapped, shell, echo)", "docstring": "SSH: Run the given command over SSH as defined in environment", "id": "f13351:m1"} {"signature": "def push(src, dst, folder=False):", "body": "if env():cij.err(\"\")return args = []if cij.ENV.get(\"\"):args.append(\"\")args.append(cij.ENV.get(\"\"))if cij.ENV.get(\"\"):args.append(\"\")args.append(cij.ENV.get(\"\"))if folder:args.append(\"\")target = \"\" % (\"\".join([cij.ENV.get(\"\"), cij.ENV.get(\"\")]), dst)wrapped = [\"\", \"\".join(args), src, target]return cij.util.execute(wrapped, shell=True, echo=True)", "docstring": "SSH: push data to remote linux", "id": "f13351:m2"} {"signature": "def pull(src, dst, folder=False):", "body": "if env():cij.err(\"\")return args = []if cij.ENV.get(\"\"):args.append(\"\")args.append(cij.ENV.get(\"\"))if cij.ENV.get(\"\"):args.append(\"\")args.append(cij.ENV.get(\"\"))if folder:args.append(\"\")target = \"\" % (\"\".join([cij.ENV.get(\"\"), cij.ENV.get(\"\")]), src)wrapped = [\"\", \"\".join(args), target, dst]return cij.util.execute(wrapped, shell=True, echo=True)", "docstring": "SSH: pull data from remote linux", "id": "f13351:m3"} {"signature": "def wait(timeout=):", "body": "if env():cij.err(\"\")return timeout_backup = cij.ENV.get(\"\")try:time_start = time.time()cij.ENV[\"\"] = \"\"while True:time_current = time.time()if (time_current - time_start) > timeout:cij.err(\"\")return status, _, _ = command([\"\"], shell=True, echo=False)if not status:breakcij.info(\"\" % (time_current - time_start))finally:if timeout_backup is None:del cij.ENV[\"\"]else:cij.ENV[\"\"] = timeout_backupreturn ", "docstring": "Wait util target connected", "id": "f13351:m4"} {"signature": "def reboot(timeout=, extra=\"\"):", "body": "if env():cij.err(\"\")return timeout_backup = cij.ENV.get(\"\")try:time_start = time.time()status, last_uptime, _ = command([\"\"], shell=True, echo=False)if status:return cij.ENV[\"\"] = \"\"cij.info(\"\" % cij.ENV.get(\"\"))command([\"\" % extra], shell=True, echo=False)while True:time_current = time.time()if (time_current - time_start) > timeout:cij.err(\"\")return status, current_uptime, _ = command([\"\"], shell=True, echo=False)if not status and current_uptime != last_uptime:breakcij.info(\"\" % (time_current - time_start))finally:if timeout_backup is None:del cij.ENV[\"\"]else:cij.ENV[\"\"] = timeout_backupreturn ", "docstring": "Reboot target", "id": "f13351:m5"} {"signature": "def main():", "body": "cmd = [\"\"]rcode, _, _ = cij.ssh.command(cmd, shell=True, echo=False)return cij.test.FAIL if rcode else cij.test.PASS", "docstring": "@returns cij.test.PASS on success and cij.test.FAIL otherwise", "id": "f13352:m0"} {"signature": "def aggregate(l):", "body": "tree = radix.Radix()for item in l:try:tree.add(item)except (ValueError) as err:raise Exception(\"\".format(item))return aggregate_tree(tree).prefixes()", "docstring": "Aggregate a `list` of prefixes.\n\n Keyword arguments:\n l -- a python list of prefixes\n\n Example use:\n >>> aggregate([\"10.0.0.0/8\", \"10.0.0.0/24\"])\n ['10.0.0.0/8']", "id": "f13355:m0"} {"signature": "def aggregate_tree(l_tree):", "body": "def _aggregate_phase1(tree):n_tree = radix.Radix()for prefix in tree.prefixes():if tree.search_worst(prefix).prefix == prefix:n_tree.add(prefix)return n_treedef _aggregate_phase2(tree):n_tree = radix.Radix()for rnode in tree:p = text(ip_network(text(rnode.prefix)).supernet())r = tree.search_covered(p)if len(r) == :if r[].prefixlen == r[].prefixlen == rnode.prefixlen:n_tree.add(p)else:n_tree.add(rnode.prefix)else:n_tree.add(rnode.prefix)return n_treel_tree = _aggregate_phase1(l_tree)if len(l_tree.prefixes()) == :return l_treewhile True:r_tree = _aggregate_phase2(l_tree)if l_tree.prefixes() == r_tree.prefixes():breakelse:l_tree = r_treedel r_treereturn l_tree", "docstring": "Walk a py-radix tree and aggregate it.\n\n Arguments\n l_tree -- radix.Radix() object", "id": "f13355:m1"} {"signature": "def get_field_value_from_context(field_name, context_list):", "body": "field_path = field_name.split('')if field_path[] == '':context_index = field_path.pop()else:context_index = -while field_path[] == '':context_index -= field_path.pop()try:field_value = context_list[context_index]while len(field_path):field = field_path.pop()if isinstance(field_value, (list, tuple, ListModel)):if field.isdigit():field = int(field)field_value = field_value[field]elif isinstance(field_value, dict):try:field_value = field_value[field]except KeyError:if field.isdigit():field = int(field)field_value = field_value[field]else:field_value = Noneelse:field_value = getattr(field_value, field)return field_valueexcept (IndexError, AttributeError, KeyError, TypeError):return None", "docstring": "Helper to get field value from string path.\nString '' is used to go up on context stack. It just\ncan be used at the beginning of path: ..field_name_1\nOn the other hand, '' is used to start lookup from first item on context.", "id": "f13359:m0"} {"signature": "def _get_checking_value(self, value):", "body": "return value", "docstring": "It must be override on descendant validators", "id": "f13359:c6:m1"} {"signature": "def __init__(self, error_code_map=None, error_messages=None,message_values=None, hidden=False, *args, **kwargs):", "body": "self.error_code_map = self.error_code_map.copy()self.error_messages = self.error_messages.copy()self.message_values = self.message_values.copy()if error_code_map:self.error_code_map.update(error_code_map)if error_messages:self.error_messages.update(error_messages)if message_values:self.message_values.update(message_values)self.messages = {}self.hidden = hidden", "docstring": ":param error_code_map: Map of orginial error codes to custom error codes\n:rparam error_code_map: dict\n:param error_messages: Map of error codes to error messages\n:rparam error_messages: dict\n:param message_values: Map of placeholders to values\n:rparam error_messages: dict", "id": "f13360:c1:m0"} {"signature": "def error(self, error_code, value, **kwargs):", "body": "code = self.error_code_map.get(error_code, error_code)try:message = Template(self.error_messages[code])except KeyError:message = Template(self.error_messages[error_code])placeholders = {\"\": self.hidden_value if self.hidden else value}placeholders.update(kwargs)placeholders.update(self.message_values)self.messages[code] = message.safe_substitute(placeholders)", "docstring": "Helper to add error to messages field. It fills placeholder with extra call parameters\nor values from message_value map.\n\n:param error_code: Error code to use\n:rparam error_code: str\n:param value: Value checked\n:param kwargs: Map of values to use in placeholders", "id": "f13360:c1:m1"} {"signature": "def __init__(self, comp_value=None, *args, **kwargs):", "body": "super(EqualTo, self).__init__(*args, **kwargs)self.comp_value = comp_valueself.message_values.update({'': self.comp_value})", "docstring": ":param comp_value: Static value to use on check", "id": "f13360:c2:m0"} {"signature": "def __init__(self, comp_value=None, *args, **kwargs):", "body": "super(NotEqualTo, self).__init__(*args, **kwargs)self.comp_value = comp_valueself.message_values.update({'': self.comp_value})", "docstring": ":param comp_value: Static value to use on check", "id": "f13360:c3:m0"} {"signature": "def __init__(self, token=None, case_sensitive=True, *args, **kwargs):", "body": "super(StringNotContaining, self).__init__(*args, **kwargs)self.token = tokenself.case_sensitive = case_sensitiveself.message_values.update({'': self.token})", "docstring": ":param token: Static value to see check it is contained in the string\n:param case_sensitive: Boolean to check the string matching case or not", "id": "f13360:c4:m0"} {"signature": "def date_proc(func):", "body": "@wraps(func)def wrapped(request, *args, **kwargs):if '' in request.GET and request.GET[''] == '':raise Http404(\"\")elif '' not in request.GET:date = datetime.today()return func(request, date)else:date = tuple(int(intValue) for intValue in request.GET[''].split(''))if len(date) == :date = datetime(*date)elif len(date) == :date = datetime(*date, day = )else:date = datetime(*date, month = , day = )return func(request, date)return wrapped", "docstring": "An decorator checking whether date parameter is passing in or not. If not, default date value is all PTT data.\n Else, return PTT data with right date.\n Args:\n func: function you want to decorate.\n request: WSGI request parameter getten from django.\n\n Returns:\n date:\n a datetime variable, you can only give year, year + month or year + month + day, three type.\n The missing part would be assigned default value 1 (for month is Jan, for day is 1).", "id": "f13365:m0"} {"signature": "def queryString_required(strList):", "body": "def _dec(function):@wraps(function)def _wrap(request, *args, **kwargs):for i in strList:if i not in request.GET:raise Http404(\"\")return function(request, *args, **kwargs)return _wrapreturn _dec", "docstring": "An decorator checking whether queryString key is valid or not\n Args:\n str: allowed queryString key\n\n Returns:\n if contains invalid queryString key, it will raise exception.", "id": "f13365:m1"} {"signature": "def queryString_required_ClassVersion(strList):", "body": "def _dec(function):@wraps(function)def _wrap(classInstance, request, *args, **kwargs):for i in strList:if i not in request.GET:raise Http404(\"\")return function(classInstance, request, *args, **kwargs)return _wrapreturn _dec", "docstring": "An decorator checking whether queryString key is valid or not\n Args:\n str: allowed queryString key\n\n Returns:\n if contains invalid queryString key, it will raise exception.", "id": "f13365:m2"} {"signature": "def getJsonFromApi(view, request):", "body": "jsonText = view(request)jsonText = json.loads(jsonText.content.decode(''))return jsonText", "docstring": "Return json from querying Web Api\n\n Args:\n view: django view function.\n request: http request object got from django.\n\n Returns: json format dictionary", "id": "f13365:m5"} {"signature": "def _set_range(self, start, stop, value, value_len):", "body": "assert stop >= start and value_len >= range_len = stop - startif range_len < value_len:self._insert_zeros(stop, stop + value_len - range_len)self._copy_to_range(start, value, value_len)elif range_len > value_len:self._del_range(stop - (range_len - value_len), stop)self._copy_to_range(start, value, value_len)else:self._copy_to_range(start, value, value_len)", "docstring": "Assumes that start and stop are already in 'buffer' coordinates. value is a byte iterable.\nvalue_len is fractional.", "id": "f13399:c1:m8"} {"signature": "def pack(self):", "body": "fields = self._all_fields()ctx = PackContext(self, fields)for field in fields:if field.pack_if.deref(ctx):try:ctx.output_buffer.set(field.pack_ref.deref(ctx), field.pack_absolute_position_ref.deref(ctx))except:raise chain_exceptions(InstructBufferError(\"\", ctx, type(self),field.attr_name()))result = bytearray(ctx.output_buffer.get())static_byte_size = type(self).byte_sizeif static_byte_size:static_byte_size = int(math.ceil(static_byte_size))assert len(result) <= static_byte_size,(\"\" +\"\").format(type(self), len(result),static_byte_size)if len(result) < static_byte_size:result += bytearray(static_byte_size - len(result))return result", "docstring": "Packs the object and returns a buffer representing the packed object.", "id": "f13400:c2:m1"} {"signature": "def unpack(self, buffer):", "body": "fields = self._all_fields()ctx = UnpackContext(self, fields, buffer)for field in fields:try:if field.unpack_if.deref(ctx):for prev_field in field.unpack_after:prev_field.unpack_value_ref.deref(ctx)field.unpack_value_ref.deref(ctx)else:setattr(self, field.attr_name(), None)except:raise chain_exceptions(InstructBufferError(\"\", ctx, type(self), field.attr_name()))return self.calc_byte_size(ctx)", "docstring": "Unpacks the object's fields from buffer.", "id": "f13400:c2:m2"} {"signature": "def calc_byte_size(self, ctx=None):", "body": "if ctx is None:ctx = PackContext(self, type(self).__fields__)return TotalSizeReference().deref(ctx)", "docstring": "Returns this instance's size. If the size has to be calculated it may require packing some of the fields.", "id": "f13400:c2:m3"} {"signature": "def __init__(self, start, stop):", "body": "self.start = start if start is not None else self.stop = stopassert self.start >= , \"\".format(self.start)if self.stop is not None:assert self.stop >= , \"\".format(self.stop)assert self.start <= self.stop, \"\".format(self.start, self.stop)", "docstring": ":param start: range start (non-negative)\n:type start: int or float\n:param stop: range stop (can be None for open range). If not None, must be >= start.\n:type stop: int, float or None", "id": "f13401:c1:m0"} {"signature": "def is_open(self):", "body": "return self.stop is None", "docstring": ":returns: True if the range is open, i.e. stop is None\n:rtype: bool", "id": "f13401:c1:m1"} {"signature": "def to_closed(self, new_stop):", "body": "assert new_stop >= self.start, \"\".format(self.start, new_stop)return SequentialRange(self.start, self.stop if self.stop is not None else new_stop)", "docstring": ":param new_stop: new stop value to use if existing stop is None. Must be >= start.\n:type new_stop: int or float\n:returns: new closed range, using the current stop if self is a closed range or new_stop if self is open.\n:rtype: SequentialRange", "id": "f13401:c1:m2"} {"signature": "def byte_length(self):", "body": "if self.is_open():return Nonereturn self.stop - self.start", "docstring": ":returns: length of range if a closed range or None if open range.\n:rtype: int or float or None", "id": "f13401:c1:m3"} {"signature": "def max_stop(self):", "body": "return self.stop", "docstring": ":returns: length of range if a closed range or None if open range.\n:rtype: int or float or None", "id": "f13401:c1:m4"} {"signature": "def to_slice(self):", "body": "return slice(self.start, self.stop, )", "docstring": ":returns: a slice object from start to stop\n:rtype: slice", "id": "f13401:c1:m5"} {"signature": "def overlaps(self, other):", "body": "a, b = (self, other) if self.start <= other.start else (other, self)a_imag_stop = a.to_closed(sys.maxsize).stopreturn a_imag_stop > b.start", "docstring": ":returns: True if there's an overlap (non-empty intersection) between two ranges\n:rtype: bool", "id": "f13401:c1:m6"} {"signature": "def contains(self, point):", "body": "return self.start <= point and (self.stop is None or self.stop > point)", "docstring": ":param point: point to check if contained in range\n:type point: int or float\n:returns: True if the range contains `point`\n:rtype: bool", "id": "f13401:c1:m7"} {"signature": "def byte_length(self):", "body": "sum = for r in self:if r.is_open():return Nonesum += r.byte_length()return sum", "docstring": ":returns: sum of lengthes of all ranges or None if one of the ranges is open\n:rtype: int, float or None", "id": "f13401:c2:m2"} {"signature": "def has_overlaps(self):", "body": "sorted_list = sorted(self)for i in range(, len(sorted_list) - ):if sorted_list[i].overlaps(sorted_list[i + ]):return Truereturn False", "docstring": ":returns: True if one or more range in the list overlaps with another\n:rtype: bool", "id": "f13401:c2:m3"} {"signature": "def max_stop(self):", "body": "m = for r in self:if r.is_open():return Nonem = max(m, r.stop)return m", "docstring": ":returns: maximum stop in list or None if there's at least one open range\n:type: int, float or None", "id": "f13401:c2:m4"} {"signature": "def sorted(self):", "body": "return SequentialRangeList(sorted(self))", "docstring": ":returns: a new sorted SequentialRangeList\n:rtype: SequentialRangeList", "id": "f13401:c2:m5"} {"signature": "def byte_offset(self, bytes):", "body": "remaining_bytes = bytesfor r in self:if r.is_open() or r.byte_length() >= remaining_bytes:return r.start + remaining_byteselse:remaining_bytes -= r.byte_length()assert False, \"\".format(bytes, self)", "docstring": "Maps `bytes` length to a sequence's offset. For example, if we do byte_offset(5) and our list of sequences is\n[(0, 2), (10, 11), (40, 45)] then the returned value will be 42.\nNote that `bytes` must be <= byte_length().\n:returns: actual offset in one of the sequences in the range for request byte length.\n:rtype: int or float", "id": "f13401:c2:m6"} {"signature": "def deref(self, ctx):", "body": "if self in ctx.call_nodes:raise CyclicReferenceError(ctx, self)if self in ctx.cached_results:return ctx.cached_results[self]try:ctx.call_nodes.add(self)ctx.call_stack.append(self)result = self.evaluate(ctx)ctx.cached_results[self] = resultreturn resultexcept:if ctx.exception_call_stack is None:ctx.exception_call_stack = list(ctx.call_stack)raisefinally:ctx.call_stack.pop()ctx.call_nodes.remove(self)", "docstring": "Returns the value this reference is pointing to. This method uses 'ctx' to resolve the reference and return\nthe value this reference references.\nIf the call was already made, it returns a cached result.\nIt also makes sure there's no cyclic reference, and if so raises CyclicReferenceError.", "id": "f13411:c2:m1"} {"signature": "def __new__(cls, min_val_or_tuple=, max_val=sys.maxsize):", "body": "if isinstance(min_val_or_tuple, (list, tuple, MinMax)):assert len(min_val_or_tuple) == min_val, max_val = min_val_or_tupleelse:min_val = min_val_or_tupleassert min_val <= max_valreturn super(MinMax, cls).__new__(cls, max(, min_val), min(max_val, sys.maxsize))", "docstring": "Initialize a new MinMax instance. This initializer works in four different modes:\n * Copy constructor. If you pass it a MinMax object it'll copy the min/max values.\n * Construct from tuple/list. Construct the min/max by accessing [0] and [1].\n * Construct from a single argument. User provides the min value, max is set to sys.maxsize.\n * Construct from two arguments. User provides the min and max values.", "id": "f13418:c2:m0"} {"signature": "def create_from_string(self, string, context=EMPTY_CONTEXT, *args, **kwargs):", "body": "if not PY2 and not isinstance(string, bytes):raise TypeError(\"\")io = StringIO(string)instance = self.create_from_stream(io, context, *args, **kwargs)io.close()return instance", "docstring": "Deserializes a new instance from a string.\nThis is a convenience method that creates a StringIO object and calls create_instance_from_stream().", "id": "f13418:c4:m3"} {"signature": "def safe_repr(obj):", "body": "try:obj_repr = repr(obj)except:obj_repr = \"\".format(type(obj), id(obj))return obj_repr", "docstring": "Returns a repr of an object and falls back to a minimal representation of type and ID if the call to repr raised\n an error.\n\n :param obj: object to safe repr\n :returns: repr string or '(type repr error)' string\n :rtype: str", "id": "f13419:m0"} {"signature": "def keep_kwargs_partial(func, *args, **keywords):", "body": "def newfunc(*fargs, **fkeywords):newkeywords = fkeywords.copy()newkeywords.update(keywords)return func(*(args + fargs), **newkeywords)newfunc.func = funcnewfunc.args = argsnewfunc.keywords = keywordsreturn newfunc", "docstring": "Like functools.partial but instead of using the new kwargs, keeps the old ones.", "id": "f13421:m0"} {"signature": "def ConstField(name, value, marshal=None):", "body": "if marshal is None:marshal = valueif isinstance(marshal, Struct):marshal = type(marshal)elif not isinstance(marshal, Marshal):raise InstructError(\"\" %(name, value))return OrigConstField(name, marshal, value)", "docstring": "This macro can be used in several methods:\n\n>>> ConstField(\"foo\", 5, UBInt8)\n\nThis created a constant field called ``foo`` with a value of 5 and is serialized/deserialized using UBInt8.\n\n>>> ConstField(\"foo\", MyStruct(my_field=1, my_other_field=2))\n\nThis time ``foo`` is set with the ``MyStruct`` instance passed here. Notice that we don't need to pass an I/O\nargument because the value is an I/O instance by itself.\n\n:param name: name of the field\n:param value: the value to use as a constant\n:param marshal: a marshal instance to serialize/deserialize this field (optional if ``value`` is a marshal)\n:rtype: Field", "id": "f13424:m0"} {"signature": "def generate_image(source, outname, settings, options=None):", "body": "logger = logging.getLogger(__name__)if settings[''] or source.endswith(''):utils.copy(source, outname, symlink=settings[''])returnimg = _read_image(source)original_format = img.formatif settings[''] and settings['']:logger.warning(\"\"\"\"\"\")if settings[''] and _has_exif_tags(img):if options is not None:options = deepcopy(options)else:options = {}options[''] = img.info['']if settings['']:try:img = Transpose().process(img)except (IOError, IndexError):passif settings['']:try:logger.debug('', settings[''])processor_cls = getattr(pilkit.processors,settings[''])except AttributeError:logger.error('', settings[''])sys.exit()width, height = settings['']if img.size[] < img.size[]:height, width = width, heightprocessor = processor_cls(width, height, upscale=False)img = processor.process(img)for receiver in signals.img_resized.receivers_for(img):img = receiver(img, settings=settings)outformat = img.format or original_format or ''logger.debug('', outname, outformat)save_image(img, outname, outformat, options=options, autoconvert=True)", "docstring": "Image processor, rotate and resize the image.\n\n :param source: path to an image\n :param outname: output filename\n :param settings: settings dict\n :param options: dict with PIL options (quality, optimize, progressive)", "id": "f13446:m2"} {"signature": "def generate_thumbnail(source, outname, box, fit=True, options=None,thumb_fit_centering=(, )):", "body": "logger = logging.getLogger(__name__)img = _read_image(source)original_format = img.formatif fit:img = ImageOps.fit(img, box, PILImage.ANTIALIAS,centering=thumb_fit_centering)else:img.thumbnail(box, PILImage.ANTIALIAS)outformat = img.format or original_format or ''logger.debug('', outname, outformat)save_image(img, outname, outformat, options=options, autoconvert=True)", "docstring": "Create a thumbnail image.", "id": "f13446:m3"} {"signature": "def process_image(filepath, outpath, settings):", "body": "logger = logging.getLogger(__name__)logger.info('', filepath)filename = os.path.split(filepath)[]outname = os.path.join(outpath, filename)ext = os.path.splitext(filename)[]if ext in ('', '', '', ''):options = settings['']elif ext == '':options = {'': True}else:options = {}try:generate_image(filepath, outname, settings, options=options)if settings['']:thumb_name = os.path.join(outpath, get_thumb(settings, filename))generate_thumbnail(outname, thumb_name, settings[''],fit=settings[''], options=options,thumb_fit_centering=settings[\"\"])except Exception as e:logger.info('', e)if logger.getEffectiveLevel() == logging.DEBUG:raiseelse:return Status.FAILUREreturn Status.SUCCESS", "docstring": "Process one image: resize, create thumbnail.", "id": "f13446:m4"} {"signature": "def get_size(file_path):", "body": "try:im = _read_image(file_path)except (IOError, IndexError, TypeError, AttributeError) as e:logger = logging.getLogger(__name__)logger.error(\"\", file_path, e)else:width, height = im.sizereturn {'': width,'': height}", "docstring": "Return image size (width and height).", "id": "f13446:m5"} {"signature": "def get_exif_data(filename):", "body": "logger = logging.getLogger(__name__)img = _read_image(filename)try:exif = img._getexif() or {}except ZeroDivisionError:logger.warning('')return Nonedata = {TAGS.get(tag, tag): value for tag, value in exif.items()}if '' in data:try:data[''] = {GPSTAGS.get(tag, tag): valuefor tag, value in data[''].items()}except AttributeError:logger = logging.getLogger(__name__)logger.info('')del data['']return data", "docstring": "Return a dict with the raw EXIF data.", "id": "f13446:m6"} {"signature": "def get_iptc_data(filename):", "body": "logger = logging.getLogger(__name__)iptc_data = {}raw_iptc = {}try:img = _read_image(filename)raw_iptc = IptcImagePlugin.getiptcinfo(img)except SyntaxError:logger.info('', filename)if raw_iptc and (, ) in raw_iptc:iptc_data[\"\"] = raw_iptc[(, )].decode('', errors='')if raw_iptc and (, ) in raw_iptc:iptc_data[\"\"] = raw_iptc[(, )].decode('',errors='')if raw_iptc and (, ) in raw_iptc:iptc_data[\"\"] = raw_iptc[(, )].decode('',errors='')return iptc_data", "docstring": "Return a dict with the raw IPTC data.", "id": "f13446:m7"} {"signature": "def dms_to_degrees(v):", "body": "d = float(v[][]) / float(v[][])m = float(v[][]) / float(v[][])s = float(v[][]) / float(v[][])return d + (m / ) + (s / )", "docstring": "Convert degree/minute/second to decimal degrees.", "id": "f13446:m8"} {"signature": "def get_exif_tags(data, datetime_format=''):", "body": "logger = logging.getLogger(__name__)simple = {}for tag in ('', '', ''):if tag in data:if isinstance(data[tag], tuple):simple[tag] = data[tag][].strip()else:simple[tag] = data[tag].strip()if '' in data:fnumber = data['']try:simple[''] = float(fnumber[]) / fnumber[]except Exception:logger.debug('', fnumber, exc_info=True)if '' in data:focal = data['']try:simple[''] = round(float(focal[]) / focal[])except Exception:logger.debug('', focal,exc_info=True)if '' in data:exptime = data['']if isinstance(exptime, tuple):try:simple[''] = str(fractions.Fraction(exptime[],exptime[]))except ZeroDivisionError:logger.info('', exptime)elif isinstance(exptime, int):simple[''] = str(exptime)else:logger.info('', exptime)if data.get(''):simple[''] = data['']if '' in data:date = data[''].rsplit('')[]try:simple[''] = datetime.strptime(date, '')simple[''] = simple[''].strftime(datetime_format)except (ValueError, TypeError) as e:logger.info('', e)if '' in data:info = data['']lat_info = info.get('')lon_info = info.get('')lat_ref_info = info.get('')lon_ref_info = info.get('')if lat_info and lon_info and lat_ref_info and lon_ref_info:try:lat = dms_to_degrees(lat_info)lon = dms_to_degrees(lon_info)except (ZeroDivisionError, ValueError, TypeError):logger.info('')else:simple[''] = {'': - lat if lat_ref_info != '' else lat,'': - lon if lon_ref_info != '' else lon,}return simple", "docstring": "Make a simplified version with common tags from raw EXIF data.", "id": "f13446:m9"} {"signature": "def init_logging(name, level=logging.INFO):", "body": "logger = logging.getLogger(name)logger.setLevel(level)try:if os.isatty(sys.stdout.fileno()) andnot sys.platform.startswith(''):formatter = ColoredFormatter()elif level == logging.DEBUG:formatter = Formatter('')else:formatter = Formatter('')except Exception:formatter = Formatter('')handler = logging.StreamHandler()handler.setFormatter(formatter)logger.addHandler(handler)", "docstring": "Logging config\n\n Set the level and create a more detailed formatter for debug mode.", "id": "f13448:m1"} {"signature": "def load_exif(album):", "body": "if not hasattr(album.gallery, \"\"):_restore_cache(album.gallery)cache = album.gallery.exifCachefor media in album.medias:if media.type == \"\":key = os.path.join(media.path, media.filename)if key in cache:media.exif = cache[key]", "docstring": "Loads the exif data of all images in an album from cache", "id": "f13450:m0"} {"signature": "def _restore_cache(gallery):", "body": "cachePath = os.path.join(gallery.settings[\"\"], \"\")try:if os.path.exists(cachePath):with open(cachePath, \"\") as cacheFile:gallery.exifCache = pickle.load(cacheFile)logger.debug(\"\", len(gallery.exifCache))else:gallery.exifCache = {}except Exception as e:logger.warn(\"\", e)gallery.exifCache = {}", "docstring": "Restores the exif data cache from the cache file", "id": "f13450:m1"} {"signature": "def save_cache(gallery):", "body": "if hasattr(gallery, \"\"):cache = gallery.exifCacheelse:cache = gallery.exifCache = {}for album in gallery.albums.values():for image in album.images:cache[os.path.join(image.path, image.filename)] = image.exifcachePath = os.path.join(gallery.settings[\"\"], \"\")if len(cache) == :if os.path.exists(cachePath):os.remove(cachePath)returntry:with open(cachePath, \"\") as cacheFile:pickle.dump(cache, cacheFile)logger.debug(\"\", len(gallery.exifCache))except Exception as e:logger.warn(\"\", e)os.remove(cachePath)", "docstring": "Stores the exif data of all images in the gallery", "id": "f13450:m2"} {"signature": "def filter_nomedia(album, settings=None):", "body": "nomediapath = os.path.join(album.src_path, \"\")if os.path.isfile(nomediapath):if os.path.getsize(nomediapath) == :logger.info(\"\"\"\", album.name)_remove_albums_with_subdirs(album.gallery.albums, [album.path])try:os.rmdir(album.dst_path)except OSError as e:passalbum.subdirs = []album.medias = []else:with open(nomediapath, \"\") as nomediaFile:logger.info(\"\"\"\", album.name)ignored = nomediaFile.read().split(\"\")album.medias = [media for media in album.mediasif media.src_filename not in ignored]album.subdirs = [dirname for dirname in album.subdirsif dirname not in ignored]_remove_albums_with_subdirs(album.gallery.albums,ignored, album.path + os.path.sep)", "docstring": "Removes all filtered Media and subdirs from an Album", "id": "f13451:m1"} {"signature": "def do_compress(self, filename, compressed_filename):", "body": "raise NotImplementedError", "docstring": "Perform actual compression.\nThis should be implemented by subclasses.", "id": "f13455:c0:m1"} {"signature": "def compress(self, filename):", "body": "compressed_filename = self.get_compressed_filename(filename)if not compressed_filename:returnself.do_compress(filename, compressed_filename)", "docstring": "Compress a file, only if needed.", "id": "f13455:c0:m2"} {"signature": "def get_compressed_filename(self, filename):", "body": "if not os.path.splitext(filename)[][:] in self.suffixes_to_compress:return Falsefile_stats = Nonecompressed_stats = Nonecompressed_filename = ''.format(filename, self.suffix)try:file_stats = os.stat(filename)compressed_stats = os.stat(compressed_filename)except OSError: passif file_stats and compressed_stats:return (compressed_filenameif file_stats.st_mtime > compressed_stats.st_mtimeelse False)else:return compressed_filename", "docstring": "If the given filename should be compressed, returns the\n compressed filename.\n\n A file can be compressed if:\n\n - It is a whitelisted extension\n - The compressed file does not exist\n - The compressed file exists by is older than the file itself\n\n Otherwise, it returns False.", "id": "f13455:c0:m3"} {"signature": "def reduce_opacity(im, opacity):", "body": "assert opacity >= and opacity <= if im.mode != '':im = im.convert('')else:im = im.copy()alpha = im.split()[]alpha = ImageEnhance.Brightness(alpha).enhance(opacity)im.putalpha(alpha)return im", "docstring": "Returns an image with reduced opacity.", "id": "f13456:m0"} {"signature": "def watermark(im, mark, position, opacity=):", "body": "if opacity < :mark = reduce_opacity(mark, opacity)if im.mode != '':im = im.convert('')layer = Image.new('', im.size, (, , , ))if position == '':for y in range(, im.size[], mark.size[]):for x in range(, im.size[], mark.size[]):layer.paste(mark, (x, y))elif position == '':ratio = min(float(im.size[]) / mark.size[], float(im.size[]) / mark.size[])w = int(mark.size[] * ratio)h = int(mark.size[] * ratio)mark = mark.resize((w, h))layer.paste(mark, (int((im.size[] - w) / ),int((im.size[] - h) / )))else:layer.paste(mark, position)return Image.composite(layer, im, layer)", "docstring": "Adds a watermark to an image.", "id": "f13456:m1"} {"signature": "def generate_media_pages(gallery):", "body": "writer = PageWriter(gallery.settings, index_title=gallery.title)for album in gallery.albums.values():medias = album.mediasnext_medias = medias[:] + [None]previous_medias = [None] + medias[:-]media_groups = zip(medias, next_medias, previous_medias)for media_group in media_groups:writer.write(album, media_group)", "docstring": "Generates and writes the media pages for all media in the gallery", "id": "f13457:m0"} {"signature": "def write(self, album, media_group):", "body": "from sigal import __url__ as sigal_linkfile_path = os.path.join(album.dst_path, media_group[].filename)page = self.template.render({'': album,'': media_group[],'': media_group[-],'': media_group[],'': self.index_title,'': self.settings,'': sigal_link,'': {'': os.path.basename(self.theme),'': url_from_path(os.path.relpath(self.theme_path,album.dst_path))},})output_file = \"\" % file_pathwith open(output_file, '', encoding='') as f:f.write(page)", "docstring": "Generate the media page and save it", "id": "f13457:c0:m0"} {"signature": "@click.group()@click.version_option(version=__version__)def main():", "body": "pass", "docstring": "Sigal - Simple Static Gallery Generator.\n\n Sigal is yet another python script to prepare a static gallery of images:\n resize images, create thumbnails with some options, generate html pages.", "id": "f13459:m0"} {"signature": "@main.command()@argument('', default=_DEFAULT_CONFIG_FILE)def init(path):", "body": "if os.path.isfile(path):print(\"\")sys.exit()from pkg_resources import resource_stringconf = resource_string(__name__, '')with open(path, '', encoding='') as f:f.write(conf.decode(''))print(\"\".format(path))", "docstring": "Copy a sample config file in the current directory (default to\n 'sigal.conf.py'), or use the provided 'path'.", "id": "f13459:m1"} {"signature": "@main.command()@argument('', required=False)@argument('', required=False)@option('', '', is_flag=True,help=\"\")@option('', '', is_flag=True, help=\"\")@option('', '', is_flag=True,help=\"\")@option('', '', default=_DEFAULT_CONFIG_FILE, show_default=True,help=\"\")@option('', '', help=\"\"\"\")@option('', help=\"\")@option('', '', help=\"\")def build(source, destination, debug, verbose, force, config, theme, title,ncpu):", "body": "level = ((debug and logging.DEBUG) or (verbose and logging.INFO) orlogging.WARNING)init_logging(__name__, level=level)logger = logging.getLogger(__name__)if not os.path.isfile(config):logger.error(\"\", config)sys.exit()start_time = time.time()settings = read_settings(config)for key in ('', '', ''):arg = locals()[key]if arg is not None:settings[key] = os.path.abspath(arg)logger.info(\"\", key.capitalize(), settings[key])if not settings[''] or not os.path.isdir(settings['']):logger.error(\"\", settings[''])sys.exit()relative_check = Truetry:relative_check = os.path.relpath(settings[''],settings['']).startswith('')except ValueError:passif not relative_check:logger.error(\"\"\"\")sys.exit()if title:settings[''] = titlelocale.setlocale(locale.LC_ALL, settings[''])init_plugins(settings)gal = Gallery(settings, ncpu=ncpu)gal.build(force=force)for src, dst in settings['']:src = os.path.join(settings[''], src)dst = os.path.join(settings[''], dst)logger.debug('', src, dst)copy(src, dst, symlink=settings[''], rellink=settings[''])stats = gal.statsdef format_stats(_type):opt = [\"\".format(stats[_type + '' + subtype], subtype)for subtype in ('', '')if stats[_type + '' + subtype] > ]opt = ''.format(''.join(opt)) if opt else ''return ''.format(stats[_type], _type, opt)print(''.format(format_stats(''), format_stats(''),time.time() - start_time))", "docstring": "Run sigal to process a directory.\n\n If provided, 'source', 'destination' and 'theme' will override the\n corresponding values from the settings file.", "id": "f13459:m2"} {"signature": "def init_plugins(settings):", "body": "logger = logging.getLogger(__name__)logger.debug('', settings[''])for path in settings['']:sys.path.insert(, path)for plugin in settings['']:try:if isinstance(plugin, str):mod = importlib.import_module(plugin)mod.register(settings)else:plugin.register(settings)logger.debug('', plugin)except Exception as e:logger.error('', plugin, e)for path in settings['']:sys.path.remove(path)", "docstring": "Load plugins and call register().", "id": "f13459:m3"} {"signature": "@main.command()@argument('', default='')@option('', '', help=\"\", default=)@option('', '', default=_DEFAULT_CONFIG_FILE,show_default=True, help='')def serve(destination, port, config):", "body": "if os.path.exists(destination):passelif os.path.exists(config):settings = read_settings(config)destination = settings.get('')if not os.path.exists(destination):sys.stderr.write(\"\"\"\".format(destination))sys.exit()else:sys.stderr.write(\"\"\"\".format(destination=destination, config=config))sys.exit()print(''.format(destination))os.chdir(destination)Handler = server.SimpleHTTPRequestHandlerhttpd = socketserver.TCPServer((\"\", port), Handler, False)print(\"\".format(port))try:httpd.allow_reuse_address = Truehttpd.server_bind()httpd.server_activate()httpd.serve_forever()except KeyboardInterrupt:print('')", "docstring": "Run a simple web server.", "id": "f13459:m4"} {"signature": "@main.command()@argument('')@argument('', nargs=-)@option('', '', default=False, is_flag=True,help='')def set_meta(target, keys, overwrite=False):", "body": "if not os.path.exists(target):sys.stderr.write(\"\".format(target))sys.exit()if len(keys) < or len(keys) % > :sys.stderr.write(\"\")sys.exit()if os.path.isdir(target):descfile = os.path.join(target, '')else:descfile = os.path.splitext(target)[] + ''if os.path.exists(descfile) and not overwrite:sys.stderr.write(\"\"\"\".format(descfile))sys.exit()with open(descfile, \"\") as fp:for i in range(len(keys) // ):k, v = keys[i * :(i + ) * ]fp.write(\"\".format(k.capitalize(), v))print(\"\".format(len(keys) // , descfile))", "docstring": "Write metadata keys to .md file.\n\n TARGET can be a media file or an album directory. KEYS are key/value pairs.\n\n Ex, to set the title of test.jpg to \"My test image\":\n\n sigal set_meta test.jpg title \"My test image\"", "id": "f13459:m5"} {"signature": "def get_thumb(settings, filename):", "body": "path, filen = os.path.split(filename)name, ext = os.path.splitext(filen)if ext.lower() in settings['']:ext = ''return join(path, settings[''], settings[''] +name + settings[''] + ext)", "docstring": "Return the path to the thumb.\n\n examples:\n >>> default_settings = create_settings()\n >>> get_thumb(default_settings, \"bar/foo.jpg\")\n \"bar/thumbnails/foo.jpg\"\n >>> get_thumb(default_settings, \"bar/foo.png\")\n \"bar/thumbnails/foo.png\"\n\n for videos, it returns a jpg file:\n >>> get_thumb(default_settings, \"bar/foo.webm\")\n \"bar/thumbnails/foo.jpg\"", "id": "f13460:m0"} {"signature": "def read_settings(filename=None):", "body": "logger = logging.getLogger(__name__)logger.info(\"\")settings = _DEFAULT_CONFIG.copy()if filename:logger.debug(\"\", filename)settings_path = os.path.dirname(filename)tempdict = {}with open(filename) as f:code = compile(f.read(), filename, '')exec(code, tempdict)settings.update((k, v) for k, v in tempdict.items()if k not in [''])paths = ['', '', '']if os.path.isdir(join(settings_path, settings[''])) andos.path.isdir(join(settings_path, settings[''],'')):paths.append('')for p in paths:path = settings[p]if path and not isabs(path):settings[p] = abspath(normpath(join(settings_path, path)))logger.debug(\"\", p, path, settings[p])for key in ('', '', ''):w, h = settings[key]if h > w:settings[key] = (h, w)logger.warning(\"\"\"\", key)if not settings['']:logger.info('')logger.debug('', pformat(settings, width=))return settings", "docstring": "Read settings from a config file in the source_dir root.", "id": "f13460:m1"} {"signature": "def create_settings(**kwargs):", "body": "settings = _DEFAULT_CONFIG.copy()settings.update(kwargs)return settings", "docstring": "Create a new default setting copy and initialize it with kwargs.", "id": "f13460:m2"} {"signature": "def generate_context(self, album):", "body": "from . import __url__ as sigal_linkself.logger.info(\"\", album)return {'': album,'': self.index_title,'': self.settings,'': sigal_link,'': {'': os.path.basename(self.theme),'': url_from_path(os.path.relpath(self.theme_path,album.dst_path))},}", "docstring": "Generate the context dict for the given path.", "id": "f13461:c0:m1"} {"signature": "def write(self, album):", "body": "page = self.template.render(**self.generate_context(album))output_file = os.path.join(album.dst_path, album.output_file)with open(output_file, '', encoding='') as f:f.write(page)", "docstring": "Generate the HTML page and save it.", "id": "f13461:c0:m2"} {"signature": "def copy(src, dst, symlink=False, rellink=False):", "body": "func = os.symlink if symlink else shutil.copy2if symlink and os.path.lexists(dst):os.remove(dst)if rellink: func(os.path.relpath(src, os.path.dirname(dst)), dst)else:func(src, dst)", "docstring": "Copy or symlink the file.", "id": "f13462:m0"} {"signature": "def check_or_create_dir(path):", "body": "if not os.path.isdir(path):os.makedirs(path)", "docstring": "Create the directory if it does not exist", "id": "f13462:m1"} {"signature": "def url_from_path(path):", "body": "if os.sep != '':path = ''.join(path.split(os.sep))return quote(path)", "docstring": "Transform path to url, converting backslashes to slashes if needed.", "id": "f13462:m2"} {"signature": "def read_markdown(filename):", "body": "global MDwith open(filename, '', encoding='') as f:text = f.read()if MD is None:MD = Markdown(extensions=['',''],output_format='')else:MD.reset()MD.Meta = {}output = {'': Markup(MD.convert(text))}try:meta = MD.Meta.copy()except AttributeError:passelse:output[''] = metatry:output[''] = MD.Meta[''][]except KeyError:passreturn output", "docstring": "Reads markdown file, converts output and fetches title and meta-data for\n further processing.", "id": "f13462:m3"} {"signature": "def is_valid_html5_video(ext):", "body": "return ext in VIDEO_MIMES.keys()", "docstring": "Checks if ext is a supported HTML5 video.", "id": "f13462:m4"} {"signature": "def get_mime(ext):", "body": "return VIDEO_MIMES[ext]", "docstring": "Returns mime type for extension.", "id": "f13462:m5"} {"signature": "def check_subprocess(cmd, source, outname):", "body": "logger = logging.getLogger(__name__)try:res = subprocess.run(cmd, stdout=subprocess.PIPE,stderr=subprocess.PIPE)except KeyboardInterrupt:logger.debug('', outname)if os.path.isfile(outname):os.remove(outname)raiseif res.returncode:logger.debug('', res.stdout.decode(''))logger.debug('', res.stderr.decode(''))if os.path.isfile(outname):logger.debug('', outname)os.remove(outname)raise SubprocessException('' + source)", "docstring": "Run the command to resize the video and remove the output file if the\n processing fails.", "id": "f13463:m0"} {"signature": "def video_size(source, converter=''):", "body": "res = subprocess.run([converter, '', source], stderr=subprocess.PIPE)stderr = res.stderr.decode('')pattern = re.compile(r'')match = pattern.search(stderr)rot_pattern = re.compile(r'')rot_match = rot_pattern.search(stderr)if match:x, y = int(match.groups()[]), int(match.groups()[])else:x = y = if rot_match:x, y = y, xreturn x, y", "docstring": "Returns the dimensions of the video.", "id": "f13463:m1"} {"signature": "def generate_video(source, outname, settings, options=None):", "body": "logger = logging.getLogger(__name__)converter = settings['']w_src, h_src = video_size(source, converter=converter)w_dst, h_dst = settings['']logger.debug('', w_src, h_src, w_dst, h_dst)base, src_ext = splitext(source)base, dst_ext = splitext(outname)if dst_ext == src_ext and w_src <= w_dst and h_src <= h_dst:logger.debug('')shutil.copy(source, outname)returnif h_dst * w_src < h_src * w_dst:resize_opt = ['', \"\" % h_dst]else:resize_opt = ['', \"\" % w_dst]if w_src <= w_dst and h_src <= h_dst:resize_opt = []cmd = [converter, '', source, ''] if options is not None:cmd += optionscmd += resize_opt + [outname]logger.debug('', ''.join(cmd))check_subprocess(cmd, source, outname)", "docstring": "Video processor.\n\n :param source: path to a video\n :param outname: path to the generated video\n :param settings: settings dict\n :param options: array of options passed to ffmpeg", "id": "f13463:m2"} {"signature": "def generate_thumbnail(source, outname, box, delay, fit=True, options=None,converter=''):", "body": "logger = logging.getLogger(__name__)tmpfile = outname + \"\"cmd = [converter, '', source, '', '', '','', delay, '', '', '', tmpfile]logger.debug('', ''.join(cmd))check_subprocess(cmd, source, outname)image.generate_thumbnail(tmpfile, outname, box, fit=fit, options=options)os.unlink(tmpfile)", "docstring": "Create a thumbnail image for the video source, based on ffmpeg.", "id": "f13463:m3"} {"signature": "def process_video(filepath, outpath, settings):", "body": "logger = logging.getLogger(__name__)filename = os.path.split(filepath)[]basename, ext = splitext(filename)try:if settings[''] and is_valid_html5_video(ext):outname = os.path.join(outpath, filename)utils.copy(filepath, outname, symlink=settings[''])else:valid_formats = ['', '']video_format = settings['']if video_format not in valid_formats:logger.error('',valid_formats)raise ValueErroroutname = os.path.join(outpath, basename + '' + video_format)generate_video(filepath, outname, settings,options=settings.get(video_format + ''))except Exception:if logger.getEffectiveLevel() == logging.DEBUG:raiseelse:return Status.FAILUREif settings['']:thumb_name = os.path.join(outpath, get_thumb(settings, filename))try:generate_thumbnail(outname, thumb_name, settings[''],settings[''], fit=settings[''],options=settings[''],converter=settings[''])except Exception:if logger.getEffectiveLevel() == logging.DEBUG:raiseelse:return Status.FAILUREreturn Status.SUCCESS", "docstring": "Process a video: resize, create thumbnail.", "id": "f13463:m4"} {"signature": "@propertydef url(self):", "body": "return url_from_path(self.filename)", "docstring": "URL of the media.", "id": "f13464:c0:m3"} {"signature": "@propertydef big(self):", "body": "if self.settings['']:s = self.settingsif s['']:return self.filenameorig_path = join(s[''], self.path, s[''])check_or_create_dir(orig_path)big_path = join(orig_path, self.src_filename)if not isfile(big_path):copy(self.src_path, big_path, symlink=s[''],rellink=self.settings[''])return join(s[''], self.src_filename)", "docstring": "Path to the original image, if ``keep_orig`` is set (relative to the\n album directory). Copy the file if needed.", "id": "f13464:c0:m4"} {"signature": "@propertydef big_url(self):", "body": "if self.big is not None:return url_from_path(self.big)", "docstring": "URL of the original media.", "id": "f13464:c0:m5"} {"signature": "@propertydef thumbnail(self):", "body": "if not isfile(self.thumb_path):self.logger.debug('', self)path = (self.dst_path if os.path.exists(self.dst_path)else self.src_path)try:s = self.settingsif self.type == '':image.generate_thumbnail(path, self.thumb_path, s[''],fit=s[''])elif self.type == '':video.generate_thumbnail(path, self.thumb_path, s[''],s[''], fit=s[''],converter=s[''])except Exception as e:self.logger.error('', e)returnreturn url_from_path(self.thumb_name)", "docstring": "Path to the thumbnail image (relative to the album directory).", "id": "f13464:c0:m6"} {"signature": "def _get_metadata(self):", "body": "self.description = ''self.meta = {}self.title = ''descfile = splitext(self.src_path)[] + ''if isfile(descfile):meta = read_markdown(descfile)for key, val in meta.items():setattr(self, key, val)", "docstring": "Get image metadata from filename.md: title, description, meta.", "id": "f13464:c0:m7"} {"signature": "def _get_metadata(self):", "body": "descfile = join(self.src_path, self.description_file)self.description = ''self.meta = {}self.title = os.path.basename(self.path if self.path != ''else self.src_path)if isfile(descfile):meta = read_markdown(descfile)for key, val in meta.items():setattr(self, key, val)try:self.author = self.meta[''][]except KeyError:self.author = self.settings.get('')", "docstring": "Get album metadata from `description_file` (`index.md`):\n\n -> title, thumbnail image, description", "id": "f13464:c3:m5"} {"signature": "def create_output_directories(self):", "body": "check_or_create_dir(self.dst_path)if self.medias:check_or_create_dir(join(self.dst_path,self.settings['']))if self.medias and self.settings['']:self.orig_path = join(self.dst_path, self.settings[''])check_or_create_dir(self.orig_path)", "docstring": "Create output directories for thumbnails and original images.", "id": "f13464:c3:m6"} {"signature": "@propertydef images(self):", "body": "for media in self.medias:if media.type == '':yield media", "docstring": "List of images (:class:`~sigal.gallery.Image`).", "id": "f13464:c3:m9"} {"signature": "@propertydef videos(self):", "body": "for media in self.medias:if media.type == '':yield media", "docstring": "List of videos (:class:`~sigal.gallery.Video`).", "id": "f13464:c3:m10"} {"signature": "@propertydef albums(self):", "body": "root_path = self.path if self.path != '' else ''return [self.gallery.albums[join(root_path, path)]for path in self.subdirs]", "docstring": "List of :class:`~sigal.gallery.Album` objects for each\n sub-directory.", "id": "f13464:c3:m11"} {"signature": "@propertydef url(self):", "body": "url = self.name.encode('')return url_quote(url) + '' + self.url_ext", "docstring": "URL of the album, relative to its parent.", "id": "f13464:c3:m12"} {"signature": "@propertydef thumbnail(self):", "body": "if self._thumbnail:return self._thumbnailthumbnail = self.meta.get('', [''])[]if thumbnail and isfile(join(self.src_path, thumbnail)):self._thumbnail = url_from_path(join(self.name, get_thumb(self.settings, thumbnail)))self.logger.debug(\"\", self, self._thumbnail)return self._thumbnailelse:for f in self.medias:ext = splitext(f.filename)[]if ext.lower() in self.settings['']:size = f.sizeif size is None:size = get_size(f.src_path)if size[''] > size['']:self._thumbnail = (url_quote(self.name) + '' +f.thumbnail)self.logger.debug(\"\",self, self._thumbnail)return self._thumbnailif not self._thumbnail and self.medias:for media in self.medias:if media.thumbnail is not None:self._thumbnail = (url_quote(self.name) + '' +media.thumbnail)breakelse:self.logger.warning(\"\", self)return Noneself.logger.debug(\"\",self, self._thumbnail)return self._thumbnailif not self._thumbnail:for path, album in self.gallery.get_albums(self.path):if album.thumbnail:self._thumbnail = (url_quote(self.name) + '' +album.thumbnail)self.logger.debug(\"\",self, self._thumbnail)return self._thumbnailself.logger.error('', self)return None", "docstring": "Path to the thumbnail of the album.", "id": "f13464:c3:m13"} {"signature": "@propertydef breadcrumb(self):", "body": "if self.path == '':return []path = self.pathbreadcrumb = [((self.url_ext or ''), self.title)]while True:path = os.path.normpath(os.path.join(path, ''))if path == '':breakurl = (url_from_path(os.path.relpath(path, self.path)) + '' +self.url_ext)breadcrumb.append((url, self.gallery.albums[path].title))breadcrumb.reverse()return breadcrumb", "docstring": "List of ``(url, title)`` tuples defining the current breadcrumb\n path.", "id": "f13464:c3:m15"} {"signature": "@propertydef show_map(self):", "body": "return any(image.has_location() for image in self.images)", "docstring": "Check if we have at least one photo with GPS location in the album", "id": "f13464:c3:m16"} {"signature": "@cached_propertydef zip(self):", "body": "zip_gallery = self.settings['']if zip_gallery and len(self) > :zip_gallery = zip_gallery.format(album=self)archive_path = join(self.dst_path, zip_gallery)if (self.settings.get('', False) andisfile(archive_path)):self.logger.debug(\"\",archive_path)return zip_galleryarchive = zipfile.ZipFile(archive_path, '', allowZip64=True)attr = ('' if self.settings[''] == ''else '')for p in self:path = getattr(p, attr)try:archive.write(path, os.path.split(path)[])except OSError as e:self.logger.warn('', p, e)archive.close()self.logger.debug('', archive_path)return zip_gallery", "docstring": "Make a ZIP archive with all media files and return its path.\n\n If the ``zip_gallery`` setting is set,it contains the location of a zip\n archive with all original images of the corresponding directory.", "id": "f13464:c3:m17"} {"signature": "@propertydef title(self):", "body": "return self.settings[''] or self.albums[''].title", "docstring": "Title of the gallery.", "id": "f13464:c4:m1"} {"signature": "def get_albums(self, path):", "body": "for name in self.albums[path].subdirs:subdir = os.path.normpath(join(path, name))yield subdir, self.albums[subdir]for subname, album in self.get_albums(subdir):yield subname, self.albums[subdir]", "docstring": "Return the list of all sub-directories of path.", "id": "f13464:c4:m3"} {"signature": "def build(self, force=False):", "body": "if not self.albums:self.logger.warning(\"\")returndef log_func(x):available_length = get_terminal_size()[] - if x and available_length > :return x.name[:available_length]else:return \"\"try:with progressbar(self.albums.values(), label=\"\",item_show_func=log_func, show_eta=False,file=self.progressbar_target) as albums:media_list = [f for album in albumsfor f in self.process_dir(album, force=force)]except KeyboardInterrupt:sys.exit('')bar_opt = {'': \"\",'': True,'': self.progressbar_target}failed_files = []if self.pool:try:with progressbar(length=len(media_list), **bar_opt) as bar:for res in self.pool.imap_unordered(worker, media_list):if res:failed_files.append(res)bar.update()self.pool.close()self.pool.join()except KeyboardInterrupt:self.pool.terminate()sys.exit('')except pickle.PicklingError:self.logger.critical(\"\"\"\"\"\",exc_info=True)sys.exit('')else:with progressbar(media_list, **bar_opt) as medias:for media_item in medias:res = process_file(media_item)if res:failed_files.append(res)if failed_files:self.remove_files(failed_files)if self.settings['']:album_writer = AlbumPageWriter(self.settings,index_title=self.title)album_list_writer = AlbumListPageWriter(self.settings,index_title=self.title)with progressbar(self.albums.values(),label=\"\" % \"\",item_show_func=log_func, show_eta=False,file=self.progressbar_target) as albums:for album in albums:if album.albums:if album.medias:self.logger.warning(\"\"\"\"\"\",album.title, album.title)album_list_writer.write(album)else:album_writer.write(album)print('')signals.gallery_build.send(self)", "docstring": "Create the image gallery", "id": "f13464:c4:m4"} {"signature": "def process_dir(self, album, force=False):", "body": "for f in album:if isfile(f.dst_path) and not force:self.logger.info(\"\", f.filename)self.stats[f.type + ''] += else:self.stats[f.type] += yield (f.type, f.path, f.filename, f.src_path, album.dst_path,self.settings)", "docstring": "Process a list of images in a directory.", "id": "f13464:c4:m6"} {"signature": "def installAndBuild(args, following_args):", "body": "build_status = generate_status = install_status = if not hasattr(args, ''):vars(args)[''] = []if '' in args.build_targets:logging.error('')return {'':}cwd = os.getcwd()c = validate.currentDirectoryModule()if not c:return {'':}try:target, errors = c.satisfyTarget(args.target, additional_config=args.config)except access_common.AccessException as e:logging.error(e)return {'':}if errors:for error in errors:logging.error(error)return {'':}vars(args)[''] = Nonevars(args)[''] = Falseif not hasattr(args, ''):if '' in args.build_targets:vars(args)[''] = ''elif not len(args.build_targets):vars(args)[''] = ''else:vars(args)[''] = ''install_status = install.execCommand(args, [])builddir = os.path.join(cwd, '', target.getName())all_deps = c.getDependenciesRecursive(target = target,available_components = [(c.getName(), c)],test = True)missing = for d in all_deps.values():if not d and not (d.isTestDependency() and args.install_test_deps != ''):logging.error('' % os.path.split(d.path)[])missing += if missing:logging.error('')return {'': , '':install_status, '':missing}generator = cmakegen.CMakeGen(builddir, target)config = generator.configure(c, all_deps)logging.debug(\"\", config[''])script_environment = {'': config['']}runScriptWithModules(c, all_deps.values(), '', script_environment)app = c if len(c.getBinaries()) else Nonefor error in generator.generateRecursive(c, all_deps, builddir, application=app):logging.error(error)generate_status = logging.debug(\"\")runScriptWithModules(c, all_deps.values(), '', script_environment)if (not hasattr(args, '')) or (not args.generate_only):error = target.build(builddir, c, args, release_build=args.release_build,build_args=following_args, targets=args.build_targets,release_no_debug_info_build=args.release_no_debug_info_build)if error:logging.error(error)build_status = else:runScriptWithModules(c, all_deps.values(), '', script_environment)if install_status:logging.warning(\"\"+\"\"+\"\")return {'': build_status or generate_status or install_status,'': missing,'': build_status,'': generate_status,'': install_status}", "docstring": "Perform the build command, but provide detailed error information.\n Returns {status:0, build_status:0, generate_status:0, install_status:0} on success.\n If status: is nonzero there was some sort of error. Other properties\n are optional, and may not be set if that step was not attempted.", "id": "f13478:m3"} {"signature": "def displayOutdated(modules, dependency_specs, use_colours):", "body": "if use_colours:DIM = colorama.Style.DIM NORMAL = colorama.Style.NORMAL BRIGHT = colorama.Style.BRIGHT YELLOW = colorama.Fore.YELLOW RED = colorama.Fore.RED GREEN = colorama.Fore.GREEN RESET = colorama.Style.RESET_ALL else:DIM = BRIGHT = YELLOW = RED = GREEN = RESET = u''status = from yotta.lib import accessfrom yotta.lib import access_commonfrom yotta.lib import sourceparsefor name, m in modules.items():if m.isTestDependency():continuetry:latest_v = access.latestSuitableVersion(name, '', registry='', quiet=True)except access_common.Unavailable as e:latest_v = Noneif not m:m_version = u'' + RESET + BRIGHT + RED + u\"\" + RESETelse:m_version = DIM + u'' % (m.version)if not latest_v:print(u'' % (RED, name, m_version, NORMAL, RESET))status = continueelif not m or m.version < latest_v:update_prevented_by = ''if m:specs_preventing_update = [x for x in dependency_specsif x.name == name and notsourceparse.parseSourceURL(x.nonShrinkwrappedVersionReq()).semanticSpecMatches(latest_v)]shrinkwrap_prevents_update = [x for x in dependency_specsif x.name == name and x.isShrinkwrapped() and notsourceparse.parseSourceURL(x.versionReq()).semanticSpecMatches(latest_v)]if len(specs_preventing_update):update_prevented_by = '' % (''.join(['' % (x.version_req, x.specifying_module) for x in specs_preventing_update]))if len(shrinkwrap_prevents_update):update_prevented_by += ''if m.version.major() < latest_v.major():colour = GREENelif m.version.minor() < latest_v.minor():colour = YELLOWelse:colour = REDelse:colour = REDprint(u'' % (name, m_version, RESET, colour, latest_v.version, update_prevented_by, RESET))if not status:status = return status", "docstring": "print information about outdated modules,\n return 0 if there is nothing to be done and nonzero otherwise", "id": "f13517:m2"} {"signature": "def findCTests(builddir, recurse_yotta_modules=False):", "body": "tests = []add_test_re = re.compile('', flags=re.IGNORECASE)for root, dirs, files in os.walk(builddir, topdown=True):if not recurse_yotta_modules:dirs = [d for d in dirs if d != '']if '' in files:with open(os.path.join(root, ''), '') as ctestf:dir_tests = []for line in ctestf:if line.lower().startswith(''):match = add_test_re.search(line)if match:dir_tests.append((match.group(), match.group()))else:logging.error(\"\" %line.rstrip(''))if len(dir_tests):tests.append((root, dir_tests))return tests", "docstring": "returns a list of (directory_path, [list of tuples of (test name, test command)])", "id": "f13531:m1"} {"signature": "def parseSourceURL(source_url):", "body": "name, spec = _getNonRegistryRef(source_url)if spec:return spectry:url_is_spec = version.Spec(source_url)except ValueError:url_is_spec = Noneif url_is_spec is not None:return VersionSource('', '', source_url)raise InvalidVersionSpec(\"\" % (source_url))", "docstring": "Parse the specified version source URL (or version spec), and return an\n instance of VersionSource", "id": "f13533:m1"} {"signature": "def isValidSpec(spec_or_source_url):", "body": "try:parseSourceURL(spec_or_source_url)return Trueexcept InvalidVersionSpec:return False", "docstring": "Check if the specified version source URL (or version spec), can be\n parsed successfully.", "id": "f13533:m2"} {"signature": "def parseTargetNameAndSpec(target_name_and_spec):", "body": "import rename, spec = _getNonRegistryRef(target_name_and_spec)if name:return name, target_name_and_specsplit_at = ''if target_name_and_spec.find('') > target_name_and_spec.find('') and'' in target_name_and_spec:split_at = ''name = target_name_and_spec.split(split_at)[]spec = target_name_and_spec[len(name)+:]name = name.strip()if not spec:spec = ''return name, spec", "docstring": "Parse targetname[@versionspec] and return a tuple\n (target_name_string, version_spec_string).\n\n targetname[,versionspec] is also supported (this is how target names\n and specifications are stored internally, and was the documented way of\n setting the spec on the commandline)\n\n Also accepts raw github version specs (Owner/reponame#whatever), as the\n name can be deduced from these.\n\n Note that the specification split from the name is not validated. If\n there is no specification (just a target name) passed in, then '*' will\n be returned as the specification.", "id": "f13533:m3"} {"signature": "def parseModuleNameAndSpec(module_name_and_spec):", "body": "import rename, spec = _getNonRegistryRef(module_name_and_spec)if name:return name, module_name_and_specname = module_name_and_spec.split('')[]spec = module_name_and_spec[len(name)+:]name = name.strip()if not spec:spec = ''return name, spec", "docstring": "Parse modulename[@versionspec] and return a tuple\n (module_name_string, version_spec_string).\n\n Also accepts raw github version specs (Owner/reponame#whatever), as the\n name can be deduced from these.\n\n Note that the specification split from the name is not validated. If\n there is no specification (just a module name) passed in, then '*' will\n be returned as the specification.", "id": "f13533:m4"} {"signature": "def __init__(self, version_string, url=None):", "body": "super(Version, self).__init__()self.url = urlversion_string = str(version_string.strip())self.version = Noneif version_string.startswith('') or version_string.startswith(''):self.version = semantic_version.Version(version_string[:], partial=False)elif not version_string:self.version = TipVersion()else:self.version = semantic_version.Version(version_string, partial=False)self.url = url", "docstring": "Wrap the semantic_version Version class so that we can represent\n 'tip' versions as well as specific versions, and store an optional\n URL that can represent the location from which we can retrieve this\n version.\n\n Also add some useful methods for manipulating versions.", "id": "f13534:c1:m0"} {"signature": "def dropRootPrivs(fn):", "body": "def wrapped_fn(*args, **kwargs):q = multiprocessing.Queue()p = multiprocessing.Process(target=_dropPrivsReturnViaQueue, args=(q, fn, args, kwargs))p.start()r = Nonee = Nonewhile True:msg = q.get()if msg[] == '':r = msg[]if msg[] == '':e = msg[](msg[])if msg[] == '':if e is not None:raise e return rreturn wrapped_fn", "docstring": "decorator to drop su/sudo privilages before running a function on\n unix/linux.\n The *real* uid is modified, so privileges are permanently dropped for\n the process. (i.e. make sure you don't need to do\n\n If there is a SUDO_UID environment variable, then we drop to that,\n otherwise we drop to nobody.", "id": "f13539:m2"} {"signature": "def which(program):", "body": "if os.path.split(program)[]:if os.path.exists(program) and os.access(program, os.X_OK):return programelse:for path in os.environ[''].split(os.pathsep):progpath = os.path.join(path, program)if os.path.exists(progpath) and os.access(progpath, os.X_OK):return progpathreturn None", "docstring": "look for \"program\" in PATH, and return the path to it, or None if it\n was not found", "id": "f13539:m8"} {"signature": "def remoteComponentFor(name, version_required, registry=''):", "body": "try:vs = sourceparse.parseSourceURL(version_required)except ValueError as e:raise access_common.Unavailable('' % (e))if vs.source_type == '':if registry not in ('', ''):raise Exception('' % registry)return registry_access.RegistryThing.createFromSource(vs, name, registry=registry)elif vs.source_type == '':return github_access.GithubComponent.createFromSource(vs, name)elif vs.source_type == '':return git_access.GitComponent.createFromSource(vs, name)elif vs.source_type == '':return hg_access.HGComponent.createFromSource(vs, name)else:raise Exception('' % vs.source_type)", "docstring": "Return a RemoteComponent sublclass for the specified component name and\n source url (or version specification)\n Raises an exception if any arguments are invalid.", "id": "f13541:m0"} {"signature": "def satisfyVersionFromSearchPaths(name, version_required, search_paths, update=False, type='', inherit_shrinkwrap=None):", "body": "from yotta.lib import packv = Nonetry:sv = sourceparse.parseSourceURL(version_required)except ValueError as e:logging.error(e)return Nonetry:local_version = searchPathsFor(name,sv.semanticSpec(),search_paths,type,inherit_shrinkwrap = inherit_shrinkwrap)except pack.InvalidDescription as e:logger.error(e)return Nonelogger.debug(\"\" % (('', '')[not local_version], name))if local_version:if update and not local_version.installedLinked():v = latestSuitableVersion(name, version_required, registry=_registryNamespaceForType(type))if local_version:local_version.setLatestAvailable(v)if local_version.installedLinked() or not local_version.outdated():logger.debug(\"\" % local_version.path)if name != local_version.getName():raise Exception('' % (local_version.getName(), name, local_version.path))return local_versionlogger.info('' % (name,local_version.getVersion(),v))fsutils.rmRf(local_version.path)return _satisfyVersionByInstallingVersion(name, version_required, local_version.path, v, type=type, inherit_shrinkwrap=inherit_shrinkwrap)return None", "docstring": "returns a Component/Target for the specified version, if found in the\n list of search paths. If `update' is True, then also check for newer\n versions of the found component, and update it in-place (unless it was\n installed via a symlink).", "id": "f13541:m7"} {"signature": "def satisfyVersionByInstalling(name, version_required, working_directory, type='', inherit_shrinkwrap=None):", "body": "v = latestSuitableVersion(name, version_required, _registryNamespaceForType(type))install_into = os.path.join(working_directory, name)return _satisfyVersionByInstallingVersion(name, version_required, install_into, v, type=type, inherit_shrinkwrap = inherit_shrinkwrap)", "docstring": "installs and returns a Component/Target for the specified name+version\n requirement, into a subdirectory of `working_directory'", "id": "f13541:m8"} {"signature": "def _satisfyVersionByInstallingVersion(name, version_required, working_directory, version, type='', inherit_shrinkwrap=None):", "body": "assert(version)logger.info('', version)version.unpackInto(working_directory)r = _clsForType(type)(working_directory, inherit_shrinkwrap = inherit_shrinkwrap)if not r:raise Exception('' % (name, version_required, type))if name != r.getName():raise Exception('' % (type, name, version_required, r.getName()))r.runScript('')return r", "docstring": "installs and returns a Component/Target for the specified version requirement into\n 'working_directory' using the provided remote version object.\n This function is not normally called via `satisfyVersionByInstalling',\n which looks up a suitable remote version object.", "id": "f13541:m9"} {"signature": "def satisfyVersion(name,version_required,available,search_paths,working_directory,update_installed=None,type='', inherit_shrinkwrap=None):", "body": "r = satisfyFromAvailable(name, available, type=type)if r is not None:if not sourceparse.parseSourceURL(version_required).semanticSpecMatches(r.getVersion()):raise access_common.SpecificationNotMet(\"\" % (type, name, version_required))return rr = satisfyVersionFromSearchPaths(name,version_required,search_paths,(update_installed == ''),type = type,inherit_shrinkwrap = inherit_shrinkwrap)if r is not None:return rreturn satisfyVersionByInstalling(name, version_required, working_directory, type=type, inherit_shrinkwrap = inherit_shrinkwrap)", "docstring": "returns a Component/Target for the specified version (either to an already\n installed copy (from the available list, or from disk), or to a newly\n downloaded one), or None if the version could not be satisfied.\n\n update_installed = None / 'Update'\n None: prevent any attempt to look for new versions if the\n component/target already exists\n Update: replace any existing version with the newest available, if\n the newest available has a higher version", "id": "f13541:m10"} {"signature": "def _returnRequestError(fn):", "body": "@functools.wraps(fn)def wrapped(*args, **kwargs):try:return fn(*args, **kwargs)except requests.exceptions.RequestException as e:return \"\" % (e.response.status_code, e.message)return wrapped", "docstring": "Decorator that captures requests.exceptions.RequestException errors\n and returns them as an error message. If no error occurs the reture\n value of the wrapped function is returned (normally None).", "id": "f13542:m4"} {"signature": "def _handleAuth(fn):", "body": "@functools.wraps(fn)def wrapped(*args, **kwargs):from yotta.lib import authinteractive = globalconf.get('')try:return fn(*args, **kwargs)except requests.exceptions.HTTPError as e:if e.response.status_code == requests.codes.unauthorized: logger.debug('', fn)auth.authorizeUser(provider=None, interactive=interactive)if interactive:logger.debug('')return fn(*args, **kwargs)raisereturn wrapped", "docstring": "Decorator to re-try API calls after asking the user for authentication.", "id": "f13542:m5"} {"signature": "def _friendlyAuthError(fn):", "body": "@functools.wraps(fn)def wrapped(*args, **kwargs):try:return fn(*args, **kwargs)except requests.exceptions.HTTPError as e:if e.response.status_code == requests.codes.unauthorized: logger.error('')elif e.response.status_code == requests.codes.bad and '' in e.response.text.lower(): logger.error('', e.response.status_code, e.response.text)logger.error('')else:logger.error('', e.response.status_code, e.response.text)raisereturn wrapped", "docstring": "Decorator to print a friendly you-are-not-authorised message. Use\n **outside** the _handleAuth decorator to only print the message after\n the user has been given a chance to login.", "id": "f13542:m6"} {"signature": "def _raiseUnavailableFor401(message):", "body": "def __raiseUnavailableFor401(fn):def wrapped(*args, **kwargs):try:return fn(*args, **kwargs)except requests.exceptions.HTTPError as e:if e.response.status_code == requests.codes.unauthorized:raise access_common.Unavailable(message)else:raisereturn wrappedreturn __raiseUnavailableFor401", "docstring": "Returns a decorator to swallow a requests exception for modules that\n are not accessible without logging in, and turn it into an Unavailable\n exception.", "id": "f13542:m7"} {"signature": "@_swallowRequestExceptions(fail_return=\"\")@_retryConnectionErrors@_friendlyAuthError@_handleAuthdef publish(namespace, name, version, description_file, tar_file, readme_file,readme_file_ext, registry=None):", "body": "registry = registry or Registry_Base_URLurl = '' % (registry,namespace,name,version)if readme_file_ext == '':readme_section_name = ''elif readme_file_ext == '':readme_section_name = ''else:raise ValueError('' % readme_file_ext)body = OrderedDict([('', (None, description_file.read(),'')),('',('', tar_file)),(readme_section_name, (readme_section_name, readme_file))])headers = _headersForRegistry(registry)response = requests.put(url, headers=headers, files=body)response.raise_for_status()return None", "docstring": "Publish a tarblob to the registry, if the request fails, an exception\n is raised, which either triggers re-authentication, or is turned into a\n return value by the decorators. (If successful, the decorated function\n returns None)", "id": "f13542:m23"} {"signature": "@_swallowRequestExceptions(fail_return=\"\")@_retryConnectionErrors@_friendlyAuthError@_handleAuthdef unpublish(namespace, name, version, registry=None):", "body": "registry = registry or Registry_Base_URLurl = '' % (registry,namespace,name,version)headers = _headersForRegistry(registry)response = requests.delete(url, headers=headers)response.raise_for_status()return None", "docstring": "Try to unpublish a recently published version. Return any errors that\n occur.", "id": "f13542:m24"} {"signature": "@_swallowRequestExceptions(fail_return=None)@_retryConnectionErrors@_friendlyAuthError@_handleAuthdef listOwners(namespace, name, registry=None):", "body": "registry = registry or Registry_Base_URLurl = '' % (registry,namespace,name)request_headers = _headersForRegistry(registry)response = requests.get(url, headers=request_headers)if response.status_code == :logger.error('' % (namespace[:-], name))return Noneresponse.raise_for_status()return ordered_json.loads(response.text)", "docstring": "List the owners of a module or target (owners are the people with\n permission to publish versions and add/remove the owners).", "id": "f13542:m25"} {"signature": "@_swallowRequestExceptions(fail_return=None)@_retryConnectionErrors@_friendlyAuthError@_handleAuthdef addOwner(namespace, name, owner, registry=None):", "body": "registry = registry or Registry_Base_URLurl = '' % (registry,namespace,name,owner)request_headers = _headersForRegistry(registry)response = requests.put(url, headers=request_headers)if response.status_code == :logger.error('' % (namespace[:-], name))returnresponse.raise_for_status()return True", "docstring": "Add an owner for a module or target (owners are the people with\n permission to publish versions and add/remove the owners).", "id": "f13542:m26"} {"signature": "@_swallowRequestExceptions(fail_return=None)@_retryConnectionErrors@_friendlyAuthError@_handleAuthdef removeOwner(namespace, name, owner, registry=None):", "body": "registry = registry or Registry_Base_URLurl = '' % (registry,namespace,name,owner)request_headers = _headersForRegistry(registry)response = requests.delete(url, headers=request_headers)if response.status_code == :logger.error('' % (namespace[:-], name))returnresponse.raise_for_status()return True", "docstring": "Remove an owner for a module or target (owners are the people with\n permission to publish versions and add/remove the owners).", "id": "f13542:m27"} {"signature": "@_retryConnectionErrorsdef search(query='', keywords=[], registry=None):", "body": "registry = registry or Registry_Base_URLurl = '' % registryheaders = _headersForRegistry(registry)params = {'': ,'': }if len(query):params[''] = queryif len(keywords):params[''] = keywordswhile True:response = requests.get(url, headers=headers, params=params)response.raise_for_status()objects = ordered_json.loads(response.text)if len(objects):for o in objects:yield oparams[''] += params['']else:break", "docstring": "generator of objects returned by the search endpoint (both modules and\n targets).\n\n Query is a full-text search (description, name, keywords), keywords\n search only the module/target description keywords lists.\n\n If both parameters are specified the search is the intersection of the\n two queries.", "id": "f13542:m29"} {"signature": "def setAPIKey(registry, api_key):", "body": "if (registry is None) or (registry == Registry_Base_URL):returnsources = _getSources()source = Nonefor s in sources:if _sourceMatches(s, registry):source = sif source is None:source = {'':'','':registry,}sources.append(source)source[''] = api_keysettings.set('', sources)", "docstring": "Set the api key for accessing a registry. This is only necessary for\n development/test registries.", "id": "f13542:m31"} {"signature": "def getPublicKey(registry=None):", "body": "registry = registry or Registry_Base_URLpubkey_pem = Noneif _isPublicRegistry(registry):pubkey_pem = settings.getProperty('', '')else:for s in _getSources():if _sourceMatches(s, registry):if '' in s and s[''] and '' in s['']:pubkey_pem = s['']['']breakif not pubkey_pem:pubkey_pem, privatekey_pem = _generateAndSaveKeys()else:pubkey_pem = pubkey_pem.encode('')if b'' in pubkey_pem:pubkey = serialization.load_pem_public_key(pubkey_pem, default_backend())else:pubkey_der = binascii.unhexlify(pubkey_pem)pubkey = serialization.load_der_public_key(pubkey_der, default_backend())return _pubkeyWireFormat(pubkey)", "docstring": "Return the user's public key (generating and saving a new key pair if necessary)", "id": "f13542:m32"} {"signature": "@_retryConnectionErrorsdef getAuthData(registry=None):", "body": "registry = registry or Registry_Base_URLurl = '' % (registry)request_headers = _headersForRegistry(registry)logger.debug('', request_headers)try:response = requests.get(url, headers=request_headers)except requests.RequestException as e:logger.debug(str(e))return Noneif response.status_code == requests.codes.unauthorized: logger.debug('')return Noneelif response.status_code == requests.codes.not_found: logger.debug('')return Nonebody = response.textlogger.debug('' % body);r = {}parsed_response = ordered_json.loads(body)if '' in parsed_response:raise AuthError(parsed_response[''])for token in parsed_response:if '' in token and token[''] and '' in token:r[token['']] = token['']breaklogger.debug('' % r);return r", "docstring": "Poll the registry to get the result of a completed authentication\n (which, depending on the authentication the user chose or was directed\n to, will include a github or other access token)", "id": "f13542:m33"} {"signature": "@classmethoddef createFromSource(cls, vs, name, registry):", "body": "if registry == '':name_match = re.match('', name)if not name_match:raise access_common.AccessException('' % name)else:name_match = re.match('', name)if not name_match:raise access_common.AccessException('' % name)assert(vs.semantic_spec)return RegistryThing(name, vs.semantic_spec, registry)", "docstring": "returns a registry component for anything that's a valid package\n name (this does not guarantee that the component actually exists in\n the registry: use availableVersions() for that).", "id": "f13542:c2:m1"} {"signature": "def availableVersions(self):", "body": "return _listVersions(self.namespace, self.name)", "docstring": "return a list of Version objects, each able to retrieve a tarball", "id": "f13542:c2:m3"} {"signature": "def _mergeDictionaries(*args):", "body": "result = type(args[])()for k, v in itertools.chain(*[x.items() for x in args]):if not k in result:result[k] = velif isinstance(result[k], dict) and isinstance(v, dict):result[k] = _mergeDictionaries(result[k], v)return result", "docstring": "merge dictionaries of dictionaries recursively, with elements from\n dictionaries earlier in the argument sequence taking precedence", "id": "f13543:m2"} {"signature": "def _mirrorStructure(dictionary, value):", "body": "result = type(dictionary)()for k in dictionary.keys():if isinstance(dictionary[k], dict):result[k] = _mirrorStructure(dictionary[k], value)else:result[k] = valuereturn result", "docstring": "create a new nested dictionary object with the same structure as\n 'dictionary', but with all scalar values replaced with 'value'", "id": "f13543:m3"} {"signature": "def loadAdditionalConfig(config_path):", "body": "error = Noneconfig = {}if not config_path:return (error, config)if os.path.isfile(config_path):try:config = ordered_json.load(config_path)except Exception as e:error = \"\" % (config_path, e)else:try:config = ordered_json.loads(config_path)except Exception as e:if '' in config_path or '' in config_path:error = \"\" % eelse:error = \"\" % config_pathlogger.debug('', config)return (error, config)", "docstring": "returns (error, config)", "id": "f13543:m5"} {"signature": "def baseTargetSpec(self):", "body": "inherits = self.description.get('', {})if len(inherits) == :name, version_req = list(inherits.items())[]shrinkwrap_version_req = self.getShrinkwrapMapping('').get(name, None)if shrinkwrap_version_req is not None:logger.debug('', shrinkwrap_version_req, name)return pack.DependencySpec(name,version_req,shrinkwrap_version_req = shrinkwrap_version_req)elif len(inherits) > :logger.error('', self.getName())return None", "docstring": "returns pack.DependencySpec for the base target of this target (or\n None if this target does not inherit from another target.", "id": "f13543:c0:m1"} {"signature": "def __init__(self, leaf_target, base_targets, app_config, additional_config):", "body": "super(DerivedTarget, self).__init__(path = leaf_target.path,installed_linked = leaf_target.installed_linked,latest_suitable_version = leaf_target.latest_suitable_version)self.hierarchy = [leaf_target] + base_targets[:]self.config = Noneself.config_blame = Noneself.app_config = app_configself.additional_config = additional_config or {}", "docstring": "Initialise a DerivedTarget (representing an inheritance hierarchy of\n Targets.), given the most-derived Target description, and a set of\n available Targets to compose the rest of the lineage from.\n\n DerivedTarget provides build & debug commands, and access to the\n derived target config info (merged with the application config\n info from config.json, if any).\n\n It's possible to update the application config for an existing\n DerivedTarget instance.\n\n DerivedTarget can also be used as a stand-in for the most-derived\n (leaf) target in the inheritance hierarchy.", "id": "f13543:c1:m0"} {"signature": "def getScript(self, scriptname):", "body": "for t in self.hierarchy:s = t.getScript(scriptname)if s:return sreturn None", "docstring": "return the specified script if one exists (possibly inherited from\n a base target)", "id": "f13543:c1:m3"} {"signature": "def _loadConfig(self):", "body": "config_dicts = [self.additional_config, self.app_config] + [t.getConfig() for t in self.hierarchy]config_blame = [_mirrorStructure(self.additional_config, ''),_mirrorStructure(self.app_config, ''),] + [_mirrorStructure(t.getConfig(), t.getName()) for t in self.hierarchy]self.config = _mergeDictionaries(*config_dicts)self.config_blame = _mergeDictionaries(*config_blame)", "docstring": "load the configuration information from the target hierarchy", "id": "f13543:c1:m4"} {"signature": "def getToolchainFiles(self):", "body": "return reversed([os.path.join(x.path, x.description['']) for x in self.hierarchy if '' in x.description])", "docstring": "return a list of toolchain file paths in override order (starting\n at the bottom/leaf of the hierarchy and ending at the base).\n The list is returned in the order they should be included\n (most-derived last).", "id": "f13543:c1:m10"} {"signature": "def getAdditionalIncludes(self):", "body": "return reversed([os.path.join(t.path, include_file)for t in self.hierarchyfor include_file in t.description.get('', [])])", "docstring": "Return the list of cmake files which are to be included by yotta in\n every module built. The list is returned in the order they should\n be included (most-derived last).", "id": "f13543:c1:m11"} {"signature": "def inheritsFrom(self, target_name):", "body": "for t in self.hierarchy:if t and t.getName() == target_name or target_name in t.description.get('', {}):return Truereturn False", "docstring": "Return true if this target inherits from the named target (directly\n or indirectly. Also returns true if this target is the named\n target. Otherwise return false.", "id": "f13543:c1:m12"} {"signature": "def exec_helper(self, cmd, builddir):", "body": "try:child = subprocess.Popen(cmd, cwd=builddir)child.wait()except OSError as e:if e.errno == errno.ENOENT:if cmd[] == '':return ''else:return '' % (cmd[])else:return '' % (cmd)if child.returncode:return '' % (cmd)", "docstring": "Execute the given command, returning an error message if an error occured\n or None if the command was succesful.", "id": "f13543:c1:m17"} {"signature": "@fsutils.dropRootPrivsdef build(self, builddir, component, args, release_build=False, build_args=None, targets=None,release_no_debug_info_build=False):", "body": "if build_args is None:build_args = []if targets is None:targets = []if release_no_debug_info_build:build_type = ''elif release_build:build_type = ''else:build_type = ''cmd = ['', '', '' % build_type, '', args.cmake_generator, '']res = self.exec_helper(cmd, builddir)if res is not None:return resfrom yotta.lib import cmake_fixupscmake_fixups.applyFixupsForFenerator(args.cmake_generator, builddir, component)build_command = self.overrideBuildCommand(args.cmake_generator, targets=targets)if build_command:cmd = build_command + build_argselse:cmd = ['', '', builddir]if len(targets):cmd += ['', targets[]]cmd += build_argsres = self.exec_helper(cmd, builddir)if res is not None:return reshint = self.hintForCMakeGenerator(args.cmake_generator, component)if hint:logger.info(hint)", "docstring": "Execute the commands necessary to build this component, and all of\n its dependencies.", "id": "f13543:c1:m18"} {"signature": "def findProgram(self, builddir, program):", "body": "if os.path.isfile(os.path.join(builddir, program)):logging.info('' % program)return programexact_matches = []insensitive_matches = []approx_matches = []for path, dirs, files in os.walk(builddir):if program in files:exact_matches.append(os.path.relpath(os.path.join(path, program), builddir))continuefiles_lower = [f.lower() for f in files]if program.lower() in files_lower:insensitive_matches.append(os.path.relpath(os.path.join(path, files[files_lower.index(program.lower())]),builddir))continuepg_basen_lower_noext = os.path.splitext(os.path.basename(program).lower())[]for f in files_lower:if pg_basen_lower_noext in f:approx_matches.append(os.path.relpath(os.path.join(path, files[files_lower.index(f)]),builddir))if len(exact_matches) == :logging.info('', program, exact_matches[])return exact_matches[]elif len(exact_matches) > :logging.error('' % (program,''.join([''+os.path.join(m, program)+'' for m in exact_matches])))return Nonereduced_approx_matches = []for m in approx_matches:root = os.path.splitext(m)[]if (m == root) or (root not in approx_matches):reduced_approx_matches.append(m)approx_matches = reduced_approx_matchesfor matches in (insensitive_matches, approx_matches):if len(matches) == :logging.info('' % (program, matches[]))return matches[]elif len(matches) > :logging.error('' % (program,''.join(matches)))return Nonelogging.error('' % program)return None", "docstring": "Return the builddir-relative path of program, if only a partial\n path is specified. Returns None and logs an error message if the\n program is ambiguous or not found", "id": "f13543:c1:m19"} {"signature": "@fsutils.dropRootPrivsdef start(self, builddir, program, forward_args):", "body": "child = Nonetry:prog_path = self.findProgram(builddir, program)if prog_path is None:returnstart_env, start_vars = self.buildProgEnvAndVars(prog_path, builddir)if self.getScript(''):cmd = [os.path.expandvars(string.Template(x).safe_substitute(**start_vars))for x in self.getScript('')] + forward_argselse:cmd = shlex.split('' + prog_path) + forward_argslogger.debug('', cmd)child = subprocess.Popen(cmd, cwd = builddir, env = start_env)child.wait()if child.returncode:return \"\" % child.returncodechild = Noneexcept OSError as e:import errnoif e.errno == errno.ENOEXEC:return (\"\"+\"\"\"\") % prog_pathfinally:if child is not None:_tryTerminate(child)", "docstring": "Launch the specified program. Uses the `start` script if specified\n by the target, attempts to run it natively if that script is not\n defined.", "id": "f13543:c1:m21"} {"signature": "def debug(self, builddir, program):", "body": "try:signal.signal(signal.SIGINT, _ignoreSignal);if self.getScript('') is not None:return self._debugWithScript(builddir, program)elif '' in self.description:logger.warning(''+'', self.getName())return self._debugDeprecated(builddir, program)else:return \"\" % selffinally:signal.signal(signal.SIGINT, signal.SIG_DFL);", "docstring": "Launch a debugger for the specified program. Uses the `debug`\n script if specified by the target, falls back to the `debug` and\n `debugServer` commands if not. `program` is inserted into the\n $program variable in commands.", "id": "f13543:c1:m22"} {"signature": "def _handleAuth(fn):", "body": "@functools.wraps(fn)def wrapped(*args, **kwargs):interactive = globalconf.get('')def retryWithAuthOrRaise(original_exception):auth.authorizeUser(provider='', interactive=interactive)if not interactive:raise original_exceptionelse:logger.debug('', settings.getProperty('', ''))return fn(*args, **kwargs)def handleRateLimitExceeded(original_exception):if not _userAuthedWithGithub():logger.warning('')return retryWithAuthOrRaise(original_exception)else:raise original_exceptiontry:return fn(*args, **kwargs)except requests.exceptions.HTTPError as e:if e.response.status_code == :return handleRateLimitExceeded(e)if e.response.status_code == :return retryWithAuthOrRaise(e)raiseexcept github.BadCredentialsException as e:logger.debug(\"\")return retryWithAuthOrRaise(e)except github.UnknownObjectException as e:logger.debug(\"\")if not _userAuthedWithGithub():logger.info('')return retryWithAuthOrRaise(e)raiseexcept github.RateLimitExceededException as e:return handleRateLimitExceeded(e)except github.GithubException as e:if e.status == :return handleRateLimitExceeded(e)raisereturn wrapped", "docstring": "Decorator to re-try API calls after asking the user for authentication.", "id": "f13545:m2"} {"signature": "@_handleAuthdef _getTags(repo):", "body": "logger.debug('', repo)g = Github(settings.getProperty('', ''))repo = g.get_repo(repo)tags = repo.get_tags()logger.debug('', repo, [t.name for t in tags])return {t.name: _ensureDomainPrefixed(t.tarball_url) for t in tags}", "docstring": "return a dictionary of {tag: tarball_url}", "id": "f13545:m3"} {"signature": "@_handleAuthdef _getTipArchiveURL(repo):", "body": "g = Github(settings.getProperty('', ''))repo = g.get_repo(repo)return repo.get_archive_link('')", "docstring": "return a string containing a tarball url", "id": "f13545:m6"} {"signature": "@_handleAuthdef _getCommitArchiveURL(repo, commit):", "body": "g = Github(settings.getProperty('', ''))repo = g.get_repo(repo)return repo.get_archive_link('', commit)", "docstring": "return a string containing a tarball url", "id": "f13545:m7"} {"signature": "@_handleAuthdef _getTarball(url, into_directory, cache_key, origin_info=None):", "body": "try:access_common.unpackFromCache(cache_key, into_directory)except KeyError as e:tok = settings.getProperty('', '')headers = {}if tok is not None:headers[''] = '' + str(tok)logger.debug('', url)response = requests.get(url, allow_redirects=True, stream=True, headers=headers)response.raise_for_status()logger.debug('', url)logger.debug('', response.headers)response.raise_for_status()access_common.unpackTarballStream(stream = response,into_directory = into_directory,hash = {},cache_key = cache_key,origin_info = origin_info)", "docstring": "unpack the specified tarball url into the specified directory", "id": "f13545:m8"} {"signature": "@classmethoddef createFromSource(cls, vs, name=None):", "body": "return GithubComponent(vs.location, vs.spec, vs.semantic_spec, name)", "docstring": "returns a github component for any github url (including\n git+ssh:// git+http:// etc. or None if this is not a Github URL.\n For all of these we use the github api to grab a tarball, because\n that's faster.\n\n Normally version will be empty, unless the original url was of the\n form: 'owner/repo @version' or 'url://...#version', which can be used\n to grab a particular tagged version.\n\n (Note that for github components we ignore the component name - it\n doesn't have to match the github module name)", "id": "f13545:c1:m1"} {"signature": "def availableVersions(self):", "body": "r = []for t in self._getTags():logger.debug(\"\", t)if not len(t[].strip()):continuetry:r.append(GithubComponentVersion(t[], t[], url=t[], name=self.name, cache_key=None))except ValueError:logger.debug('', t)return r", "docstring": "return a list of Version objects, each with a tarball URL set", "id": "f13545:c1:m5"} {"signature": "def availableTags(self):", "body": "return [GithubComponentVersion('', t[], t[], self.name, cache_key=_createCacheKey('', t[], t[], self.name)) for t in self._getTags()]", "docstring": "return a list of GithubComponentVersion objects for all tags", "id": "f13545:c1:m6"} {"signature": "def availableBranches(self):", "body": "return [GithubComponentVersion('', b[], b[], self.name, cache_key=None) for b in _getBranchHeads(self.repo).items()]", "docstring": "return a list of GithubComponentVersion objects for the tip of each branch", "id": "f13545:c1:m7"} {"signature": "def commitVersion(self):", "body": "import recommit_match = re.match('', self.tagOrBranchSpec(), re.I)if commit_match:return GithubComponentVersion('', '', _getCommitArchiveURL(self.repo, self.tagOrBranchSpec()), self.name, cache_key=None)return None", "docstring": "return a GithubComponentVersion object for a specific commit if valid", "id": "f13545:c1:m9"} {"signature": "@classmethoddef createFromSource(cls, vs, name=None):", "body": "if vs.location.startswith(''):location = vs.location[:]else:location = vs.locationreturn HGComponent(location, vs.spec)", "docstring": "returns a hg component for any hg:// url, or None if this is not\n a hg component.\n\n Normally version will be empty, unless the original url was of the\n form 'hg+ssh://...#version', which can be used to grab a particular\n tagged version.", "id": "f13548:c2:m1"} {"signature": "def _truthyConfValue(v):", "body": "if v is False:return Falseelif v is None:return Falseelif v == :return Falseelse:return True", "docstring": "Determine yotta-config truthiness. In yotta config land truthiness is\n different to python or json truthiness (in order to map nicely only\n preprocessor and CMake definediness):\n\n json -> python -> truthy/falsey\n false -> False -> Falsey\n null -> None -> Falsey\n undefined -> None -> Falsey\n 0 -> 0 -> Falsey\n \"\" -> \"\" -> Truthy (different from python)\n \"0\" -> \"0\" -> Truthy\n {} -> {} -> Truthy (different from python)\n [] -> [] -> Truthy (different from python)\n everything else is truthy", "id": "f13549:m0"} {"signature": "def getDependencySpecs(self, target=None):", "body": "deps = []def specForDependency(name, version_spec, istest):shrinkwrap = self.getShrinkwrapMapping()shrinkwrap_version_req = Noneif name in shrinkwrap:shrinkwrap_version_req = shrinkwrap[name]logger.debug('', self.getName(), shrinkwrap_version_req, name)return pack.DependencySpec(name,version_spec,istest,shrinkwrap_version_req = shrinkwrap_version_req,specifying_module = self.getName())deps += [specForDependency(x[], x[], False) for x in self.description.get('', {}).items()]target_deps = self.description.get('', {})if target is not None:for conf_key, target_conf_deps in target_deps.items():if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated():logger.debug('' %(conf_key, self.getName()))deps += [specForDependency(x[], x[], False) for x in target_conf_deps.items()]deps += [specForDependency(x[], x[], True) for x in self.description.get('', {}).items()]target_deps = self.description.get('', {})if target is not None:for conf_key, target_conf_deps in target_deps.items():if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated():logger.debug('' %(conf_key, self.getName()))deps += [specForDependency(x[], x[], True) for x in target_conf_deps.items()]seen = set()r = []for dep in deps:if not dep.name in seen:r.append(dep)seen.add(dep.name)return r", "docstring": "Returns [DependencySpec]\n\n These are returned in the order that they are listed in the\n component description file: this is so that dependency resolution\n proceeds in a predictable way.", "id": "f13549:c0:m1"} {"signature": "def satisfyTarget(self, target_name_and_version, update_installed=False, additional_config=None, install_missing=True):", "body": "from yotta.lib import targetapplication_dir = Noneif self.isApplication():application_dir = self.pathreturn target.getDerivedTarget(target_name_and_version,self.targetsPath(),install_missing = install_missing,application_dir = application_dir,update_installed = update_installed,additional_config = additional_config,shrinkwrap = self.getShrinkwrap())", "docstring": "Ensure that the specified target name (and optionally version,\n github ref or URL) is installed in the targets directory of the\n current component\n\n returns (derived_target, errors)", "id": "f13549:c0:m12"} {"signature": "def getTarget(self, target_name_and_version, additional_config=None):", "body": "derived_target, errors = self.satisfyTarget(target_name_and_version,additional_config = additional_config,install_missing = False)if len(errors):return Noneelse:return derived_target", "docstring": "Return a derived target object representing the selected target: if\n the target is not installed, or is invalid then the returned object\n will test false in a boolean context.\n\n Returns derived_target\n\n Errors are not displayed.", "id": "f13549:c0:m13"} {"signature": "def installedDependencies(self):", "body": "return self.installed_dependencies", "docstring": "Return true if satisfyDependencies has been called.\n\n Note that this is slightly different to when all of the\n dependencies are actually satisfied, but can be used as if it means\n that.", "id": "f13549:c0:m14"} {"signature": "def isApplication(self):", "body": "return bool(len(self.getBinaries()))", "docstring": "Return true if this module is an application instead of a reusable\n library", "id": "f13549:c0:m15"} {"signature": "def getBinaries(self):", "body": "if '' in self.description:return {os.path.normpath(self.description['']): self.getName()}else:return {}", "docstring": "Return a dictionary of binaries to compile: {\"dirname\":\"exename\"},\n this is used when automatically generating CMakeLists\n\n Note that currently modules may define only a single executable\n binary or library to be built by the automatic build system, by\n specifying `\"bin\": \"dir-to-be-built-into-binary\"`, or `\"lib\":\n \"dir-to-be-built-into-library\"`, and the bin/lib will always have\n the same name as the module. The default behaviour if nothing is\n specified is for the 'source' directory to be built into a library.\n\n The module.json syntax may allow for other combinations in the\n future (and callers of this function should not rely on it\n returning only a single item). For example, a \"bin\": {\"dirname\":\n \"exename\"} syntax might be supported, however currently more\n complex builds must be controlled by custom CMakeLists.", "id": "f13549:c0:m16"} {"signature": "def getLibs(self, explicit_only=False):", "body": "if '' in self.description:return {os.path.normpath(self.description['']): self.getName()}elif '' not in self.description and not explicit_only:return {'': self.getName()}else:return {}", "docstring": "Return a dictionary of libraries to compile: {\"dirname\":\"libname\"},\n this is used when automatically generating CMakeLists.\n\n If explicit_only is not set, then in the absence of both 'lib' and\n 'bin' sections in the module.json file, the \"source\" directory\n will be returned.\n\n Note that currently modules may define only a single executable\n binary or library to be built by the automatic build system, by\n specifying `\"bin\": \"dir-to-be-built-into-binary\"`, or `\"lib\":\n \"dir-to-be-built-into-library\"`, and the bin/lib will always have\n the same name as the module. The default behaviour if nothing is\n specified is for the 'source' directory to be built into a library.\n\n The module.json syntax may allow for other combinations in the\n future (and callers of this function should not rely on it\n returning only a single item). For example, a \"bin\": {\"dirname\":\n \"exename\"} syntax might be supported, however currently more\n complex builds must be controlled by custom CMakeLists.", "id": "f13549:c0:m17"} {"signature": "def licenses(self):", "body": "if '' in self.description:return [self.description['']]else:return [x[''] for x in self.description['']]", "docstring": "Return a list of licenses that apply to this module. (Strings,\n which may be SPDX identifiers)", "id": "f13549:c0:m18"} {"signature": "def getExtraIncludes(self):", "body": "if '' in self.description:return [os.path.normpath(x) for x in self.description['']]else:return []", "docstring": "Some components must export whole directories full of headers into\n the search path. This is really really bad, and they shouldn't do\n it, but support is provided as a concession to compatibility.", "id": "f13549:c0:m19"} {"signature": "def getExtraSysIncludes(self):", "body": "if '' in self.description:return [os.path.normpath(x) for x in self.description['']]else:return []", "docstring": "Some components (e.g. libc) must export directories of header files\n into the system include search path. They do this by adding a\n 'extraSysIncludes' : [ array of directories ] field in their\n package description. This function returns the list of directories\n (or an empty list), if it doesn't exist.", "id": "f13549:c0:m20"} {"signature": "def sourceDirValidationError(dirname, component_name):", "body": "if dirname == component_name:return '' % (component_name, dirname)elif dirname.lower() in ('', '') and dirname != '':return '' % (component_name, dirname)elif isPotentialTestDir(dirname) and dirname != '':return '' % (component_name, dirname)elif not Source_Dir_Regex.match(dirname):corrected = Source_Dir_Invalid_Regex.sub('', dirname.lower())if not corrected:corrected = ''return '' % (component_name, dirname, corrected)else:return None", "docstring": "validate source directory names in components", "id": "f13551:m1"} {"signature": "def read(self, filenames):", "body": "for fn in filenames:try:self.configs[fn] = ordered_json.load(fn)except IOError:self.configs[fn] = OrderedDict()except Exception as e:self.configs[fn] = OrderedDict()logging.warning(\"\",fn, e)", "docstring": "Read a list of files. Their configuration values are merged, with\n preference to values from files earlier in the list.", "id": "f13552:c0:m1"} {"signature": "def get(self, path):", "body": "path = _splitPath(path)for config in self.configs.values():cur = configfor el in path:if el in cur:cur = cur[el]else:cur = Nonebreakif cur is not None:return curreturn None", "docstring": "return a configuration value\n\n usage:\n get('section.property')\n\n Note that currently array indexes are not supported. You must\n get the whole array.\n\n returns None if any path element or the property is missing", "id": "f13552:c0:m2"} {"signature": "def set(self, path, value=None, filename=None):", "body": "if filename is None:config = self._firstConfig()[]else:config = self.configs[filename]path = _splitPath(path)for el in path[:-]:if el in config:config = config[el]else:config[el] = OrderedDict()config = config[el]config[path[-]] = value", "docstring": "Set a configuration value. If no filename is specified, the\n property is set in the first configuration file. Note that if a\n filename is specified and the property path is present in an\n earlier filename then set property will be hidden.\n\n usage:\n set('section.property', value='somevalue')\n\n Note that currently array indexes are not supported. You must\n set the whole array.", "id": "f13552:c0:m3"} {"signature": "def nonShrinkwrappedVersionReq(self):", "body": "return self.version_req", "docstring": "return the dependency specification ignoring any shrinkwrap", "id": "f13553:c2:m2"} {"signature": "def versionReq(self):", "body": "return self.shrinkwrap_version_req or self.version_req", "docstring": "return the dependency specification, which may be from a shrinkwrap file", "id": "f13553:c2:m3"} {"signature": "def origin(self):", "body": "if self.origin_info is None:self.origin_info = {}try:self.origin_info = ordered_json.load(os.path.join(self.path, Origin_Info_Fname))except IOError:passreturn self.origin_info.get('', None)", "docstring": "Read the .yotta_origin.json file (if present), and return the value\n of the 'url' property", "id": "f13553:c3:m3"} {"signature": "def getError(self):", "body": "return self.error", "docstring": "If this isn't a valid component/target, return some sort of\n explanation about why that is.", "id": "f13553:c3:m6"} {"signature": "def setError(self, error):", "body": "self.error = error", "docstring": "Set an error: note that setting an error does not make the module\n invalid if it would otherwise be valid.", "id": "f13553:c3:m7"} {"signature": "def outdated(self):", "body": "if self.latest_suitable_version and self.latest_suitable_version > self.version:return self.latest_suitable_versionelse:return None", "docstring": "Return a truthy object if a newer suitable version is available,\n otherwise return None.\n (in fact the object returned is a ComponentVersion that can be used\n to get the newer version)", "id": "f13553:c3:m11"} {"signature": "def vcsIsClean(self):", "body": "if not self.vcs:return Truereturn self.vcs.isClean()", "docstring": "Return true if the directory is not version controlled, or if it is\n version controlled with a supported system and is in a clean state", "id": "f13553:c3:m12"} {"signature": "def commitVCS(self, tag=None):", "body": "if not self.vcs:returnself.vcs.commit(message='' % tag, tag=tag)", "docstring": "Commit the current working directory state (or do nothing if the\n working directory is not version controlled)", "id": "f13553:c3:m13"} {"signature": "def getVersion(self):", "body": "return self.version", "docstring": "Return the version as specified by the package file.\n This will always be a real version: 1.2.3, not a hash or a URL.\n\n Note that a component installed through a URL still provides a real\n version - so if the first component to depend on some component C\n depends on it via a URI, and a second component depends on a\n specific version 1.2.3, dependency resolution will only succeed if\n the version of C obtained from the URL happens to be 1.2.3", "id": "f13553:c3:m14"} {"signature": "def ignores(self, path):", "body": "test_path = PurePath('', path)test_paths = tuple([test_path] + list(test_path.parents))for exp in self.ignore_patterns:for tp in test_paths:if tp.match(exp):logger.debug('', path, tp, exp)return Truereturn False", "docstring": "Test if this module ignores the file at \"path\", which must be a\n path relative to the root of the module.\n\n If a file is within a directory that is ignored, the file is also\n ignored.", "id": "f13553:c3:m18"} {"signature": "def writeDescription(self):", "body": "ordered_json.dump(os.path.join(self.path, self.description_filename), self.description)if self.vcs:self.vcs.markForCommit(self.description_filename)", "docstring": "Write the current (possibly modified) component description to a\n package description file in the component directory.", "id": "f13553:c3:m21"} {"signature": "def generateTarball(self, file_object):", "body": "archive_name = '' % (self.getName(), self.getVersion())def filterArchive(tarinfo):if tarinfo.name.find(archive_name) == :unprefixed_name = tarinfo.name[len(archive_name)+:]tarinfo.mode &= else:unprefixed_name = tarinfo.nameif self.ignores(unprefixed_name):return Noneelse:return tarinfowith tarfile.open(fileobj=file_object, mode='') as tf:logger.info('' % archive_name)tf.add(self.path, arcname=archive_name, filter=filterArchive)", "docstring": "Write a tarball of the current component/target to the file object\n \"file_object\", which must already be open for writing at position 0", "id": "f13553:c3:m22"} {"signature": "def publish(self, registry=None):", "body": "if (registry is None) or (registry == registry_access.Registry_Base_URL):if '' in self.description and self.description['']:return \"\" % (self.description_filename.split('')[])upload_archive = os.path.join(self.path, '')fsutils.rmF(upload_archive)fd = os.open(upload_archive, os.O_CREAT | os.O_EXCL | os.O_RDWR | getattr(os, \"\", ))with os.fdopen(fd, '') as tar_file:tar_file.truncate()self.generateTarball(tar_file)logger.debug('', tar_file.tell())tar_file.seek()shasum = hashlib.sha256()while True:chunk = tar_file.read()if not chunk:breakshasum.update(chunk)logger.debug('', shasum.hexdigest())tar_file.seek()with self.findAndOpenReadme() as readme_file_wrapper:if not readme_file_wrapper:logger.warning(\"\")with open(self.getDescriptionFile(), '') as description_file:return registry_access.publish(self.getRegistryNamespace(),self.getName(),self.getVersion(),description_file,tar_file,readme_file_wrapper.file,readme_file_wrapper.extension().lower(),registry=registry)", "docstring": "Publish to the appropriate registry, return a description of any\n errors that occured, or None if successful.\n No VCS tagging is performed.", "id": "f13553:c3:m24"} {"signature": "def unpublish(self, registry=None):", "body": "return registry_access.unpublish(self.getRegistryNamespace(),self.getName(),self.getVersion(),registry=registry)", "docstring": "Try to un-publish the current version. Return a description of any\n errors that occured, or None if successful.", "id": "f13553:c3:m25"} {"signature": "def getScript(self, scriptname):", "body": "script = self.description.get('', {}).get(scriptname, None)if script is not None:if isinstance(script, str) or isinstance(script, type(u'')):import shlexscript = shlex.split(script)if len(script) and script[].lower().endswith(''):if not os.path.isabs(script[]):absscript = os.path.abspath(os.path.join(self.path, script[]))logger.debug('', script[], absscript)script[] = absscriptimport sysscript = [sys.executable] + scriptreturn script", "docstring": "Return the specified script command. If the first part of the\n command is a .py file, then the current python interpreter is\n prepended.\n\n If the script is a single string, rather than an array, it is\n shlex-split.", "id": "f13553:c3:m26"} {"signature": "@fsutils.dropRootPrivsdef runScript(self, scriptname, additional_environment=None):", "body": "import subprocessimport shlexcommand = self.getScript(scriptname)if command is None:logger.debug('', self, scriptname)return if not len(command):logger.error(\"\", scriptname, self.getName())return env = os.environ.copy()if additional_environment is not None:env.update(additional_environment)errcode = child = Nonetry:logger.debug('', command)child = subprocess.Popen(command, cwd = self.path, env = env)child.wait()if child.returncode:logger.error(\"\",scriptname,self.getName(),child.returncode)errcode = child.returncodechild = Nonefinally:if child is not None:tryTerminate(child)return errcode", "docstring": "Run the specified script from the scripts section of the\n module.json file in the directory of this module.", "id": "f13553:c3:m27"} {"signature": "def dropRootPrivs(fn):", "body": "def wrapper(*args, **kwargs):return fn(*args, **kwargs)return wrapper", "docstring": "decorator to drop su/sudo privilages before running a function on\n unix/linux.\n\n ** on windows this function does nothing **", "id": "f13555:m0"} {"signature": "def which(program):", "body": "if os.path.exists(program) and os.access(program, os.X_OK):return programfor path in os.environ[''].split(os.pathsep):path = path.strip('')for ext in os.environ.get('', '').split(os.pathsep):progpath = os.path.join(path, program + ext)if os.path.exists(progpath) and os.access(progpath, os.X_OK):return progpathreturn None", "docstring": "look for \"program\" in PATH (respecting PATHEXT), and return the path to\n it, or None if it was not found", "id": "f13555:m6"} {"signature": "def islast(generator):", "body": "next_x = Nonefirst = Truefor x in generator:if not first:yield (next_x, False)next_x = xfirst = Falseif not first:yield (next_x, True)", "docstring": "indicate whether the current item is the last one in a generator", "id": "f13556:m0"} {"signature": "def configure(self, component, all_dependencies):", "body": "r = {}builddir = self.buildrootavailable_dependencies = OrderedDict((k, v) for k, v in all_dependencies.items() if v)self.set_toplevel_definitions = ''if self.build_info_include_file is None:self.build_info_include_file, build_info_definitions = self.getBuildInfo(component.path, builddir)self.set_toplevel_definitions += build_info_definitionsif self.config_include_file is None:self.config_include_file, config_definitions, self.config_json_file = self._getConfigData(available_dependencies, component, builddir, self.build_info_include_file)self.set_toplevel_definitions += config_definitionsself.configured = Truereturn {'': self.config_include_file,'': self.config_json_file,'': self.build_info_include_file}", "docstring": "Ensure all config-time files have been generated. Return a\n dictionary of generated items.", "id": "f13557:c1:m2"} {"signature": "def generateRecursive(self, component, all_components, builddir=None, modbuilddir=None, processed_components=None, application=None):", "body": "assert(self.configured)if builddir is None:builddir = self.buildrootif modbuilddir is None:modbuilddir = os.path.join(builddir, '')if processed_components is None:processed_components = dict()if not self.target:yield '' % self.targettoplevel = not len(processed_components)logger.debug('' % (component, self.target))recursive_deps = component.getDependenciesRecursive(available_components = all_components,target = self.target,available_only = True,test = True)dependencies = component.getDependencies(all_components,target = self.target,available_only = True,test = True)for name, dep in dependencies.items():if not dep:if dep.isTestDependency():logger.debug('' % (name, component))else:yield '' % (name, component)processed_components[component.getName()] = componentnew_dependencies = OrderedDict([(name,c) for name,c in dependencies.items() if c and not name in processed_components])self.generate(builddir, modbuilddir, component, new_dependencies, dependencies, recursive_deps, application, toplevel)logger.debug('' % component)for d in recursive_deps.values():logger.debug('' % d)processed_components.update(new_dependencies)for name, c in new_dependencies.items():for error in self.generateRecursive(c, all_components, os.path.join(modbuilddir, name), modbuilddir, processed_components, application=application):yield error", "docstring": "generate top-level CMakeLists for this component and its\n dependencies: the CMakeLists are all generated in self.buildroot,\n which MUST be out-of-source\n\n !!! NOTE: experimenting with a slightly different way of doing\n things here, this function is a generator that yields any errors\n produced, so the correct use is:\n\n for error in gen.generateRecursive(...):\n print(error)", "id": "f13557:c1:m3"} {"signature": "def _validateListedSubdirsExist(self, component):", "body": "lib_subdirs = component.getLibs(explicit_only=True)bin_subdirs = component.getBinaries()ok = Truefor d in lib_subdirs:if not os.path.exists(os.path.join(component.path, d)):logger.warning(\"\", d, component)ok = Falsefor d in bin_subdirs:if not os.path.exists(os.path.join(component.path, d)):logger.warning(\"\", d, component)ok = Falsereturn ok", "docstring": "Return true if all the subdirectories which this component lists in\n its module.json file exist (although their validity is otherwise\n not checked).\n\n If they don't, warning messages are printed.", "id": "f13557:c1:m5"} {"signature": "def _listSubDirectories(self, component, toplevel):", "body": "manual_subdirs = []auto_subdirs = []header_subdirs = []lib_subdirs = component.getLibs()bin_subdirs = component.getBinaries()test_subdirs = []resource_subdirs = []top_sources = []start_on_top = \"\" in [os.path.normpath(x) for x in list(lib_subdirs.keys()) + list(bin_subdirs.keys())]for f in sorted(os.listdir(component.path)):if f in Ignore_Subdirs or f.startswith('') or f.startswith(''):continuecheck_cmakefile_path = os.path.join(f, '')if os.path.isfile(os.path.join(component.path, check_cmakefile_path)) and notcomponent.ignores(check_cmakefile_path):self.checkStandardSourceDir(f, component)manual_subdirs.append(f)if f in ('',):test_subdirs.append(f)else:if os.path.isfile(os.path.join(component.path, f)):if not component.ignores(f) and start_on_top:sf = self.createSourceFile(f, os.path.join(component.path, f), \"\")if sf is not None:top_sources.append(sf)else:sources = self.containsSourceFiles(os.path.join(component.path, f), component)if sources:if f in ('',):auto_subdirs.append((f, sources))test_subdirs.append(f)elif start_on_top:from yotta.lib import validateif not validate.isPotentialTestDir(f):top_sources.extend(sources)if f == component.getName():header_subdirs.append((f, sources))elif os.path.normpath(f) in [fsutils.fullySplitPath(x)[] for x in lib_subdirs] oros.path.normpath(f) in [fsutils.fullySplitPath(x)[] for x in bin_subdirs]:for full_subpath in list(lib_subdirs.keys()) + list(bin_subdirs.keys()):if fsutils.fullySplitPath(full_subpath)[] == os.path.normpath(f):sources = self.containsSourceFiles(os.path.join(component.path, full_subpath), component)auto_subdirs.append((full_subpath, sources))elif f == component.getName():header_subdirs.append((f, sources))elif toplevel and((f in ('',)) or(os.path.normpath(f) in lib_subdirs or start_on_top) or(os.path.normpath(f) in bin_subdirs or start_on_top) and notcomponent.ignores(f)):logger.warning(\"\", f, component)if f in ('',):resource_subdirs.append(os.path.join(component.path, f))check_directory_name_cases = list(lib_subdirs.keys()) + list(bin_subdirs.keys()) + ['', '']if f.lower() in check_directory_name_cases + [''] and notf in check_directory_name_cases and notcomponent.ignores(f):self.checkStandardSourceDir(f, component)if top_sources:auto_subdirs.append((component.getName(), top_sources))return {\"\": manual_subdirs,\"\": auto_subdirs,\"\": header_subdirs,\"\": {component.getName(): component.getName()} if (start_on_top and component.isApplication()) else bin_subdirs,\"\": {component.getName(): component.getName()} if (start_on_top and not component.isApplication()) else lib_subdirs,\"\": test_subdirs,\"\": resource_subdirs}", "docstring": "return: {\n manual: [list of subdirectories with manual CMakeLists],\n auto: [list of pairs: (subdirectories name to autogenerate, a list of source files in that dir)],\n bin: {dictionary of subdirectory name to binary name},\n lib: {dictionary of subdirectory name to binary name},\n test: [list of directories that build tests],\n resource: [list of directories that contain resources]\n }", "id": "f13557:c1:m6"} {"signature": "def _getConfigData(self, all_dependencies, component, builddir, build_info_header_path):", "body": "from yotta.lib import ordered_jsonadd_defs_header = ''set_definitions = ''definitions = []definitions.append(('', sanitizePreprocessorSymbol(self.target.getName())))definitions.append(('' % sanitizePreprocessorSymbol(self.target.getName()),None))full_build_info_header_path = replaceBackslashes(os.path.abspath(build_info_header_path))logger.debug('', full_build_info_header_path)definitions.append(('', ''+full_build_info_header_path+''))for target in self.target.getSimilarTo_Deprecated():if '' not in target:definitions.append(('' % sanitizePreprocessorSymbol(target),None))merged_config = self.target.getMergedConfig()logger.debug('', merged_config)definitions += self._definitionsForConfig(merged_config, ['', ''])add_defs_header += ''for k, v in definitions:if v is not None:add_defs_header += '' % (k, v)set_definitions += '' % (k, v)else:add_defs_header += '' % kset_definitions += '' % kadd_defs_header += ''for dep in list(all_dependencies.values()) + [component]:add_defs_header += \"\" % (sanitizePreprocessorSymbol(dep.getName()), str(dep.getVersion()))add_defs_header += \"\" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().major())add_defs_header += \"\" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().minor())add_defs_header += \"\" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().patch())defines = component.getDefines()if defines:add_defs_header += \"\"for name, value in defines.items():add_defs_header += \"\" % (name, value)add_defs_header += ''config_include_file = os.path.join(builddir, '')config_json_file = os.path.join(builddir, '')set_definitions += '' % replaceBackslashes(os.path.abspath(config_json_file))self._writeFile(config_include_file,''+''+add_defs_header+'')self._writeFile(config_json_file,ordered_json.dumps(merged_config))return (config_include_file, set_definitions, config_json_file)", "docstring": "returns (path_to_config_header, cmake_set_definitions)", "id": "f13557:c1:m8"} {"signature": "def getBuildInfo(self, sourcedir, builddir):", "body": "cmake_defs = ''preproc_defs = ''import datetimefrom yotta.lib import vcsnow = datetime.datetime.utcnow()vcs_instance = vcs.getVCS(sourcedir)if self.build_uuid is None:import uuidself.build_uuid = uuid.uuid4()definitions = [('', now.year, ''),('', now.month, ''),('', now.day, ''),('', now.hour, ''),('', now.minute, ''),('', now.second, ''),('', self.build_uuid, ''),]if vcs_instance is not None:commit_id = Nonerepotype = vcs_instance.__class__.__name__try:commit_id = vcs_instance.getCommitId()except vcs.VCSNotInstalled as e:logger.warning('', repotype)commit_id = Noneexcept vcs.VCSError as e:logger.debug('', e)logger.warning('',str(e).split('')[],repotype)if commit_id is not None:clean_state = int(vcs_instance.isClean())description = vcs_instance.getDescription()definitions += [('', commit_id, ''),('', clean_state, ''),('', description, '')]for d in definitions:preproc_defs += '' % dcmake_defs += '' % dbuildinfo_include_file = os.path.join(builddir, '')self._writeFile(buildinfo_include_file,''+''+preproc_defs+'')return (buildinfo_include_file, cmake_defs)", "docstring": "Write the build info header file, and return (path_to_written_header, set_cmake_definitions)", "id": "f13557:c1:m9"} {"signature": "def generate(self, builddir, modbuilddir, component, active_dependencies, immediate_dependencies, all_dependencies, application, toplevel):", "body": "include_root_dirs = ''if application is not None and component is not application:include_root_dirs += '' % replaceBackslashes(application.path)include_sys_dirs = ''include_other_dirs = ''for name, c in itertools.chain(((component.getName(), component),), all_dependencies.items()):if c is not component and c.isTestDependency():continueinclude_root_dirs += '' % replaceBackslashes(c.path)dep_sys_include_dirs = c.getExtraSysIncludes()for d in dep_sys_include_dirs:include_sys_dirs += '' % replaceBackslashes(os.path.join(c.path, d))dep_extra_include_dirs = c.getExtraIncludes()for d in dep_extra_include_dirs:include_other_dirs += '' % replaceBackslashes(os.path.join(c.path, d))add_depend_subdirs = ''for name, c in active_dependencies.items():depend_subdir = replaceBackslashes(os.path.join(modbuilddir, name))relpath = replaceBackslashes(os.path.relpath(depend_subdir, self.buildroot))add_depend_subdirs +=''''''''% (depend_subdir, relpath)delegate_to_existing = Nonedelegate_build_dir = Nonemodule_is_empty = Falseif os.path.isfile(os.path.join(component.path, '')) and not component.ignores(''):delegate_to_existing = component.pathadd_own_subdirs = []logger.debug(\"\", builddir)delegate_build_dir = os.path.join(builddir, '')else:self._validateListedSubdirsExist(component)subdirs = self._listSubDirectories(component, toplevel)manual_subdirs = subdirs['']autogen_subdirs = subdirs['']binary_subdirs = subdirs['']lib_subdirs = subdirs['']test_subdirs = subdirs['']resource_subdirs = subdirs['']header_subdirs = subdirs['']logger.debug(\"\", component, lib_subdirs, binary_subdirs)add_own_subdirs = []for f in manual_subdirs:if os.path.isfile(os.path.join(component.path, f, '')):if f in test_subdirs and component.isTestDependency():continueadd_own_subdirs.append((os.path.join(component.path, f), f))all_subdirs = manual_subdirs + [x[] for x in autogen_subdirs]if component.isTestDependency():if len(autogen_subdirs) + len(add_own_subdirs) == :module_is_empty = Trueelse:if len(autogen_subdirs) + len(add_own_subdirs) <= len(test_subdirs):module_is_empty = Truefor f, source_files in autogen_subdirs:if f in test_subdirs:if component.isTestDependency():continueself.generateTestDirList(builddir, f, source_files, component, immediate_dependencies, toplevel=toplevel, module_is_empty=module_is_empty)else:if f in binary_subdirs:is_executable = Trueobject_name = binary_subdirs[f]else:assert(f in lib_subdirs)object_name = lib_subdirs[f]for header_dir, header_files in header_subdirs:source_files.extend(header_files)self.generateSubDirList(builddir = builddir,dirname = f,source_files = source_files,component = component,all_subdirs = all_subdirs,immediate_dependencies = immediate_dependencies,object_name = object_name,resource_subdirs = resource_subdirs,is_executable = (f in binary_subdirs))add_own_subdirs.append((os.path.join(builddir, f), f))if component.isTestDependency():test_subdirs = []if module_is_empty:if len(binary_subdirs):logger.warning('')else:add_own_subdirs.append(self.createDummyLib(component, builddir, [x[] for x in immediate_dependencies.items() if not x[].isTestDependency()]))toolchain_file_path = os.path.join(builddir, '')if toplevel:template = jinja_environment.get_template('')file_contents = template.render({ \"\": self.target.getToolchainFiles()})self._writeFile(toolchain_file_path, file_contents)template = jinja_environment.get_template('')relpath = os.path.relpath(builddir, self.buildroot)file_contents = template.render({ \"\": toplevel,\"\": self.target.getName(),\"\": self.set_toplevel_definitions,\"\": toolchain_file_path,\"\": component,\"\": relpath,\"\": include_root_dirs,\"\": include_sys_dirs,\"\": include_other_dirs,\"\": add_depend_subdirs,\"\": add_own_subdirs,\"\": self.config_include_file,\"\": delegate_to_existing,\"\": delegate_build_dir,\"\": active_dependencies,\"\": module_is_empty,\"\": self.target.getAdditionalIncludes()})self._writeFile(os.path.join(builddir, ''), file_contents)", "docstring": "active_dependencies is the dictionary of components that need to be\n built for this component, but will not already have been built for\n another component.", "id": "f13557:c1:m10"} {"signature": "def long_to_bytes(n, blocksize=):", "body": "s = b''n = int(n)pack = struct.packwhile n > :s = pack('', n & ) + sn = n >> for i in range(len(s)):if s[i] != '':breakelse:s = ''i = s = s[i:]if blocksize > and len(s) % blocksize:s = (blocksize - len(s) % blocksize) * '' + sreturn s", "docstring": "long_to_bytes(n:long, blocksize:int) : string\n Convert a long integer to a byte string.\n\n If optional blocksize is given and greater than zero, pad the front of the\n byte string with binary zeros so that the length is a multiple of\n blocksize.", "id": "f13558:m0"} {"signature": "def pruneCache():", "body": "cache_dir = folders.cacheDirectory()def fullpath(f):return os.path.join(cache_dir, f)def getMTimeSafe(f):try:return os.stat(f).st_mtimeexcept FileNotFoundError:import timereturn time.clock()fsutils.mkDirP(cache_dir)max_cached_modules = getMaxCachedModules()for f in sorted([f for f in os.listdir(cache_dir) ifos.path.isfile(fullpath(f)) and not f.endswith('') and not f.endswith('')],key = lambda f: getMTimeSafe(fullpath(f)),reverse = True)[max_cached_modules:]:cache_logger.debug('', f)removeFromCache(f)cache_logger.debug('', max_cached_modules)", "docstring": "Prune the cache", "id": "f13560:m2"} {"signature": "def sometimesPruneCache(p):", "body": "def decorator(fn):@functools.wraps(fn)def wrapped(*args, **kwargs):r = fn(*args, **kwargs)if random.random() < p:pruneCache()return rreturn wrappedreturn decorator", "docstring": "return decorator to prune cache after calling fn with a probability of p", "id": "f13560:m3"} {"signature": "def unpackFromCache(cache_key, to_directory):", "body": "if cache_key is None:raise NotInCache('')cache_key = _encodeCacheKey(cache_key)cache_dir = folders.cacheDirectory()fsutils.mkDirP(cache_dir)path = os.path.join(cache_dir, cache_key)logger.debug('', path, to_directory)try:unpackFrom(path, to_directory)try:shutil.copy(path + '', os.path.join(to_directory, ''))except IOError as e:if e.errno == errno.ENOENT:passelse:raisecache_logger.debug('', cache_key, to_directory)returnexcept IOError as e:if e.errno == errno.ENOENT:cache_logger.debug('', cache_key)raise NotInCache('')except OSError as e:if e.errno == errno.ENOTEMPTY:logger.error('')else:raise", "docstring": "If the specified cache key exists, unpack the tarball into the\n specified directory, otherwise raise NotInCache (a KeyError subclass).", "id": "f13560:m6"} {"signature": "def _downloadToCache(stream, hashinfo={}, origin_info=dict()):", "body": "hash_name = Nonehash_value = Nonem = Noneif len(hashinfo):for h in ('',):if h in hashinfo:hash_name = hhash_value = hashinfo[h]m = getattr(hashlib, h)()breakif not hash_name:logger.warning('', hashinfo)cache_dir = folders.cacheDirectory()fsutils.mkDirP(cache_dir)file_size = (download_file, download_fname) = tempfile.mkstemp(dir=cache_dir, suffix='')with os.fdopen(download_file, '') as f:f.seek()for chunk in stream.iter_content():f.write(chunk)if hash_name:m.update(chunk)if hash_name:calculated_hash = m.hexdigest()logger.debug('' % (hash_name, calculated_hash, hash_value))if hash_value and (hash_value != calculated_hash):raise Exception('')file_size = f.tell()logger.debug('', file_size, download_fname)f.truncate()extended_origin_info = {'': hashinfo,'': file_size}extended_origin_info.update(origin_info)ordered_json.dump(download_fname + '', extended_origin_info)return os.path.basename(download_fname)", "docstring": "Download the specified stream to a temporary cache directory, and\n returns a cache key that can be used to access/remove the file.\n You should use either removeFromCache(cache_key) or _moveCachedFile to\n move the downloaded file to a known key after downloading.", "id": "f13560:m7"} {"signature": "def _moveCachedFile(from_key, to_key):", "body": "cache_dir = folders.cacheDirectory()from_path = os.path.join(cache_dir, from_key)to_path = os.path.join(cache_dir, to_key)try:os.rename(from_path, to_path)os.rename(from_path+'', to_path+'')except Exception as e:if (isinstance(e, OSError) and e.errno == errno.ENOENT) or(isinstance(e, getattr(__builtins__, \"\", type(None))) and e.errno == ):fsutils.rmF(from_path)else:raise", "docstring": "Move a file atomically within the cache: used to make cached files\n available at known keys, so they can be used by other processes.", "id": "f13560:m8"} {"signature": "@sometimesPruneCache()def unpackTarballStream(stream, into_directory, hash={}, cache_key=None, origin_info=dict()):", "body": "cache_key = _encodeCacheKey(cache_key)if getMaxCachedModules() == :cache_key = Nonenew_cache_key = _downloadToCache(stream, hash, origin_info)unpackFromCache(new_cache_key, into_directory)if cache_key is None:removeFromCache(new_cache_key)else:_moveCachedFile(new_cache_key, cache_key)", "docstring": "Unpack a responses stream that contains a tarball into a directory. If\n a hash is provided, then it will be used as a cache key (for future\n requests you can try to retrieve the key value from the cache first,\n before making the request)", "id": "f13560:m9"} {"signature": "def availableVersions(self):", "body": "r = []for t in self.vcs.tags():logger.debug(\"\", t)if not len(t.strip()):continuetry:r.append(GitCloneVersion(t, t, self))except ValueError:logger.debug('', t)return r", "docstring": "return a list of GitCloneVersion objects for tags which are valid\n semantic version idenfitifiers.", "id": "f13561:c1:m2"} {"signature": "def availableTags(self):", "body": "return [GitCloneVersion('', t, self) for t in self.vcs.tags()]", "docstring": "return a list of GitCloneVersion objects for all tags", "id": "f13561:c1:m3"} {"signature": "def availableBranches(self):", "body": "return [GitCloneVersion('', b, self) for b in self.vcs.branches()]", "docstring": "return a list of GitCloneVersion objects for the tip of each branch", "id": "f13561:c1:m4"} {"signature": "def commitVersion(self, spec):", "body": "import recommit_match = re.match('', spec, re.I)if commit_match:return GitCloneVersion('', spec, self)return None", "docstring": "return a GithubComponentVersion object for a specific commit if valid", "id": "f13561:c1:m6"} {"signature": "@classmethoddef createFromSource(cls, vs, name=None):", "body": "return GitComponent(vs.location, vs.spec, vs.semantic_spec)", "docstring": "returns a git component for any git:// url, or None if this is not\n a git component.\n\n Normally version will be empty, unless the original url was of the\n form 'git://...#version', which can be used to grab a particular\n tag or branch, or ...#>=1.2.3, which can be used to specify\n semantic version specifications on tags.", "id": "f13561:c2:m1"} {"signature": "def checkDependenciesForShrinkwrap(dependency_list):", "body": "from yotta.lib import sourceparseerrors = []available_versions = {}for mod in dependency_list.get('', []):available_versions[mod['']] = mod['']for mod in dependency_list.get('', []):for spec_info in mod.get('', []):name = spec_info['']spec = spec_info['']if spec_info.get('', False):continueif not name in available_versions:errors.append('' % (name, mod['']))else:available_version = available_versions[name]parsed_spec = sourceparse.parseSourceURL(spec)if not parsed_spec.semanticSpecMatches(available_version):errors.append('' % (name, available_version, parsed_spec.semanticSpec(), mod['']))return errors", "docstring": "return a list of errors encountered (e.g. dependency missing or\n specification not met", "id": "f13566:m2"} {"signature": "def force_unicode(value):", "body": "if isinstance(value, six.binary_type):return value.decode('')return value", "docstring": "If input string is binary, then decode from utf-8.", "id": "f13574:m0"} {"signature": "def cut_head(lst):", "body": "assert len(lst) > , ''return lst[], lst[:]", "docstring": "Returns first element and an iterator over rest element.\n\n TODO: will be useful to make it work not only with lists\n but also with any iterable.", "id": "f13574:m3"} {"signature": "def serialize_text(out, text):", "body": "padding = len(out)add_padding = padding_adder(padding)text = add_padding(text, ignore_first_line=True)return out + text", "docstring": "This method is used to append content of the `text`\n argument to the `out` argument.\n\n Depending on how many lines in the text, a\n padding can be added to all lines except the first\n one.\n\n Concatenation result is appended to the `out` argument.", "id": "f13574:m4"} {"signature": "def serialize_list(out, lst, delimiter=u'', max_length=):", "body": "have_multiline_items = any(map(is_multiline, lst))result_will_be_too_long = sum(map(len, lst)) > max_lengthif have_multiline_items or result_will_be_too_long:padding = len(out)add_padding = padding_adder(padding)head, rest = cut_head(lst)rest = map(add_padding, rest)head = add_padding(head, ignore_first_line=True)lst = chain((head,), rest)delimiter += u''else:delimiter += u''return out + delimiter.join(lst)", "docstring": "This method is used to serialize list of text\n pieces like [\"some=u'Another'\", \"blah=124\"]\n\n Depending on how many lines are in these items,\n they are concatenated in row or as a column.\n\n Concatenation result is appended to the `out` argument.", "id": "f13574:m5"} {"signature": "def format_value(value):", "body": "value_id = id(value)if value_id in recursion_breaker.processed:return u''recursion_breaker.processed.add(value_id)try:if isinstance(value, six.binary_type):return u\"\".format(value.decode(''))elif isinstance(value, six.text_type):return u\"\".format(value)elif isinstance(value, (list, tuple)):values = list(map(format_value, value))result = serialize_list(u'', values, delimiter=u'') + u''return force_unicode(result)elif isinstance(value, dict):items = six.iteritems(value)items = (tuple(map(format_value, item))for item in items)items = list(items)items.sort()items = [serialize_text(u''.format(key),item_value)for key, item_value in items]result = serialize_list(u'', items, delimiter=u'') + u''return force_unicode(result)return force_unicode(repr(value))finally:recursion_breaker.processed.remove(value_id)", "docstring": "This function should return unicode representation of the value", "id": "f13574:m6"} {"signature": "def make_repr(*args, **kwargs):", "body": "def method(self):cls_name = self.__class__.__name__if args:field_names = argselse:def undercored(name): return name.startswith('')def is_method(name): return callable(getattr(self, name))def good_name(name):return not undercored(name) and not is_method(name)field_names = filter(good_name, dir(self))field_names = sorted(field_names)field_getters = zip(field_names,map(attrgetter, field_names))field_getters = chain(field_getters,kwargs.items())fields = ((name, format_value(getter(self)))for name, getter in field_getters)fields = ((u''.format(name), value)for name, value in fields)fields = list(starmap(serialize_text, fields))beginning = u''.format(cls_name=cls_name,)result = serialize_list(beginning,fields)result += u'>'if ON_PYTHON2:result = result.encode('')return resultreturn method", "docstring": "Returns __repr__ method which returns ASCII\n representaion of the object with given fields.\n\n Without arguments, ``make_repr`` generates a method\n which outputs all object's non-protected (non-undercored)\n arguments which are not callables.\n\n Accepts ``*args``, which should be a names of object's\n attributes to be included in the output::\n\n __repr__ = make_repr('foo', 'bar')\n\n If you want to generate attribute's content on the fly,\n then you should use keyword arguments and pass a callable\n of one argument::\n\n __repr__ = make_repr(foo=lambda obj: obj.blah + 100500)", "id": "f13574:m7"} {"signature": "def sample(self, random_state=None):", "body": "from numpy_sugar import epsilonfrom numpy_sugar.linalg import sum2diagfrom numpy_sugar.random import multivariate_normalif random_state is None:random_state = RandomState()m = self._mean.value()K = self._cov.value().copy()sum2diag(K, +epsilon.small, out=K)return self._lik.sample(multivariate_normal(m, K, random_state), random_state)", "docstring": "r\"\"\"Sample from the specified distribution.\n\n Parameters\n ----------\n random_state : random_state\n Set the initial random state.\n\n Returns\n -------\n numpy.ndarray\n Sample.", "id": "f13577:c0:m1"} {"signature": "def bernoulli_sample(offset,G,heritability=,causal_variants=None,causal_variance=,random_state=None,):", "body": "link = LogitLink()mean, cov = _mean_cov(offset, G, heritability, causal_variants, causal_variance, random_state)lik = BernoulliProdLik(link)sampler = GGPSampler(lik, mean, cov)return sampler.sample(random_state)", "docstring": "r\"\"\"Bernoulli likelihood sampling.\n\n Sample according to\n\n .. math::\n\n \\mathbf y \\sim \\prod_{i=1}^n\n \\text{Bernoulli}(\\mu_i = \\text{logit}(z_i))\n \\mathcal N(~ o \\mathbf 1 + \\mathbf a^\\intercal \\boldsymbol\\alpha;\n ~ (h^2 - v_c)\\mathrm G^\\intercal\\mathrm G +\n (1-h^2-v_c)\\mathrm I ~)\n\n using the canonical Logit link function to define the conditional Bernoulli\n mean :math:`\\mu_i`.\n\n The causal :math:`\\mathbf a` covariates and the corresponding effect-sizes\n are randomly draw according to the following idea. The ``causal_variants``,\n if given, are first mean-zero and std-one normalized and then having\n its elements divided by the squared-root the the number of variances::\n\n causal_variants = _stdnorm(causal_variants, axis=0)\n causal_variants /= sqrt(causal_variants.shape[1])\n\n The causal effect-sizes :math:`\\boldsymbol\\alpha` are draw from\n :math:`\\{-1, +1\\}` and subsequently normalized for mean-zero and std-one\"\"\n\n Parameters\n ----------\n random_state : random_state\n Set the initial random state.\n\n Example\n -------\n\n .. doctest::\n\n >>> from glimix_core.random import bernoulli_sample\n >>> from numpy.random import RandomState\n >>> offset = 5\n >>> G = [[1, -1], [2, 1]]\n >>> bernoulli_sample(offset, G, random_state=RandomState(0))\n array([1., 1.])", "id": "f13578:m0"} {"signature": "def binomial_sample(ntrials,offset,G,heritability=,causal_variants=None,causal_variance=,random_state=None,):", "body": "link = LogitLink()mean, cov = _mean_cov(offset, G, heritability, causal_variants, causal_variance, random_state)lik = BinomialProdLik(ntrials, link)sampler = GGPSampler(lik, mean, cov)return sampler.sample(random_state)", "docstring": "Binomial likelihood sampling.\n\n Parameters\n ----------\n random_state : random_state\n Set the initial random state.\n\n Example\n -------\n\n .. doctest::\n\n >>> from glimix_core.random import binomial_sample\n >>> from numpy.random import RandomState\n >>> ntrials = [5, 15]\n >>> offset = 0.5\n >>> G = [[1, -1], [2, 1]]\n >>> binomial_sample(ntrials, offset, G, random_state=RandomState(0))\n array([ 2., 14.])", "id": "f13578:m1"} {"signature": "def poisson_sample(offset,G,heritability=,causal_variants=None,causal_variance=,random_state=None,):", "body": "mean, cov = _mean_cov(offset, G, heritability, causal_variants, causal_variance, random_state)link = LogLink()lik = PoissonProdLik(link)sampler = GGPSampler(lik, mean, cov)return sampler.sample(random_state)", "docstring": "Poisson likelihood sampling.\n\n Parameters\n ----------\n random_state : random_state\n Set the initial random state.\n\n Example\n -------\n\n .. doctest::\n\n >>> from glimix_core.random import poisson_sample\n >>> from numpy.random import RandomState\n >>> offset = -0.5\n >>> G = [[0.5, -1], [2, 1]]\n >>> poisson_sample(offset, G, random_state=RandomState(0))\n array([0, 6])", "id": "f13578:m2"} {"signature": "def sample(self, random_state=None):", "body": "from numpy_sugar import epsilonfrom numpy_sugar.linalg import sum2diagfrom numpy_sugar.random import multivariate_normalif random_state is None:random_state = RandomState()m = self._mean.value()K = self._cov.value().copy()sum2diag(K, +epsilon.small, out=K)return multivariate_normal(m, K, random_state)", "docstring": "r\"\"\"Sample from the specified distribution.\n\n Parameters\n ----------\n random_state : random_state\n Set the initial random state.\n\n Returns\n -------\n numpy.ndarray\n Sample.", "id": "f13579:c0:m1"} {"signature": "def fit(self, verbose=True, factr=, pgtol=):", "body": "self._maximize(verbose=verbose, factr=factr, pgtol=pgtol)", "docstring": "r\"\"\"Maximise the marginal likelihood.\n\n Parameters\n ----------\n verbose : bool\n ``True`` for progress output; ``False`` otherwise.\n Defaults to ``True``.\n factr : float, optional\n The iteration stops when\n ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is\n the machine precision.\n pgtol : float, optional\n The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol``\n where ``pg_i`` is the i-th component of the projected gradient.\n\n Notes\n -----\n Please, refer to :func:`scipy.optimize.fmin_l_bfgs_b` for further information\n about ``factr`` and ``pgtol``.", "id": "f13587:c0:m2"} {"signature": "def lml(self):", "body": "return self.value()", "docstring": "r\"\"\"Log of the marginal likelihood.\n\n Returns\n -------\n float\n :math:`\\log p(\\mathbf y)`", "id": "f13587:c0:m3"} {"signature": "def __init__(self, A, X):", "body": "self._A = asarray(A, float)self._X = asarray(X, float)vecB = zeros((X.shape[], A.shape[])).ravel()self._vecB = Vector(vecB)self._nparams = vecB.sizeFunction.__init__(self, \"\", vecB=self._vecB)", "docstring": "Constructor.\n\nParameters\n----------\nA : array_like\n p\u00d7p array.\nX : array_like\n n\u00d7c array.", "id": "f13588:c0:m0"} {"signature": "@propertydef nparams(self):", "body": "return self._nparams", "docstring": "Number of parameters.", "id": "f13588:c0:m1"} {"signature": "@propertydef A(self):", "body": "return self._A", "docstring": "Matrix A.", "id": "f13588:c0:m2"} {"signature": "@propertydef X(self):", "body": "return self._X", "docstring": "Matrix X.", "id": "f13588:c0:m3"} {"signature": "@propertydef AX(self):", "body": "return kron(self.A, self.X)", "docstring": "A \u2297 X.", "id": "f13588:c0:m4"} {"signature": "def value(self):", "body": "return self.AX @ self._vecB.value", "docstring": "Kronecker mean function.\n\nReturns\n-------\n\ud835\udc26 : ndarray\n (A\u2297X)vec(B).", "id": "f13588:c0:m5"} {"signature": "def gradient(self):", "body": "return {\"\": self.AX}", "docstring": "Gradient of the linear mean function.\n\nReturns\n-------\nvecB : ndarray\n Derivative of M over vec(B).", "id": "f13588:c0:m6"} {"signature": "@propertydef B(self):", "body": "return unvec(self._vecB.value, (self.X.shape[], self.A.shape[]))", "docstring": "Effect-sizes parameter, B.", "id": "f13588:c0:m7"} {"signature": "def __init__(self, means):", "body": "self._means = [c for c in means]Function.__init__(self, \"\", composite=self._means)", "docstring": "Constructor.\n\nParameters\n----------\nmeans : list\n List of mean functions.", "id": "f13593:c0:m0"} {"signature": "def value(self):", "body": "return add.reduce([mean.value() for mean in self._means])", "docstring": "Sum of mean vectors, \ud835\udc1f\u2080 + \ud835\udc1f\u2081 + \u2026.\n\nReturns\n-------\n\ud835\udc26 : ndarray\n \ud835\udc1f\u2080 + \ud835\udc1f\u2081 + \u2026.", "id": "f13593:c0:m1"} {"signature": "def gradient(self):", "body": "grad = {}for i, f in enumerate(self._means):for varname, g in f.gradient().items():grad[f\"\"] = greturn grad", "docstring": "Sum of mean function derivatives.\n\nReturns\n-------\n\u2202\ud835\udc26 : dict\n \u2202\ud835\udc1f\u2080 + \u2202\ud835\udc1f\u2081 + \u2026.", "id": "f13593:c0:m2"} {"signature": "def __init__(self, n):", "body": "self._offset = Scalar()self._offset.bounds = (-, +)self._n = nFunction.__init__(self, \"\", offset=self._offset)", "docstring": "Constructor.\n\nParameters\n----------\nn : int\n Size of the \ud835\udfcf array.", "id": "f13595:c0:m0"} {"signature": "def fix_offset(self):", "body": "self._fix(\"\")", "docstring": "Prevent \u03b8 update during optimization.", "id": "f13595:c0:m1"} {"signature": "def unfix_offset(self):", "body": "self._unfix(\"\")", "docstring": "Enable \u03b8 update during optimization.", "id": "f13595:c0:m2"} {"signature": "def value(self):", "body": "return full(self._n, self._offset.value)", "docstring": "Offset mean.\n\nReturns\n-------\n\ud835\udc26 : (n,) ndarray\n \u03b8\u22c5\ud835\udfcf.", "id": "f13595:c0:m3"} {"signature": "def gradient(self):", "body": "return dict(offset=ones(self._n))", "docstring": "Gradient of the offset function.\n\nReturns\n-------\noffset : (n,) ndarray\n Vector \ud835\udfcf.", "id": "f13595:c0:m4"} {"signature": "@propertydef offset(self):", "body": "return self._offset.value", "docstring": "Offset parameter.", "id": "f13595:c0:m5"} {"signature": "def __init__(self, X):", "body": "X = asarray(X, float)m = X.shape[]self._effsizes = Vector(zeros(m))self._effsizes.bounds = [(-, +)] * mself._X = XFunction.__init__(self, \"\", effsizes=self._effsizes)", "docstring": "Constructor.\n\nParameters\n----------\nX : array_like\n Covariates X, from X\ud835\udf36.", "id": "f13596:c0:m0"} {"signature": "@propertydef X(self):", "body": "return self._X", "docstring": "An n\u00d7m matrix of covariates.", "id": "f13596:c0:m1"} {"signature": "def value(self):", "body": "return self._X @ self._effsizes", "docstring": "Linear mean function.\n\nReturns\n-------\n\ud835\udc26 : (n,) ndarray\n X\ud835\udf36.", "id": "f13596:c0:m2"} {"signature": "def gradient(self):", "body": "return dict(effsizes=self._X)", "docstring": "Gradient of the linear mean function over the effect sizes.\n\nReturns\n-------\neffsizes : (n, m) ndarray\n X.", "id": "f13596:c0:m3"} {"signature": "@propertydef effsizes(self):", "body": "return self._effsizes.value", "docstring": "Effect-sizes parameter, \ud835\udf36, of size m.", "id": "f13596:c0:m4"} {"signature": "def __init__(self, dim):", "body": "self._dim = dimself._I = eye(dim)self._logscale = Scalar()Function.__init__(self, \"\", logscale=self._logscale)self._logscale.bounds = (-, +)", "docstring": "Constructor.\n\nParameters\n----------\ndim : int\n Matrix dimension, d.", "id": "f13597:c0:m0"} {"signature": "@propertydef scale(self):", "body": "return exp(self._logscale)", "docstring": "Scale parameter.", "id": "f13597:c0:m1"} {"signature": "@propertydef dim(self):", "body": "return self._I.shape[]", "docstring": "Dimension of the matrix, d.\n\nIt corresponds to the number of rows and to the number of columns.", "id": "f13597:c0:m3"} {"signature": "def value(self):", "body": "return self.scale * self._I", "docstring": "Covariance matrix.\n\nReturns\n-------\nK : ndarray\n s\u22c5I, for scale s and a d\u00d7d identity matrix I.", "id": "f13597:c0:m4"} {"signature": "def gradient(self):", "body": "return dict(logscale=self.value())", "docstring": "Derivative of the covariance matrix over log(s), s\u22c5I.\n\nReturns\n-------\nlogscale : ndarray\n s\u22c5I, for scale s and a d\u00d7d identity matrix I.", "id": "f13597:c0:m5"} {"signature": "def __init__(self, K0):", "body": "from numpy_sugar.linalg import check_symmetryself._logscale = Scalar()Function.__init__(self, \"\", logscale=self._logscale)self._logscale.bounds = (-, +)if not check_symmetry(K0):raise ValueError(\"\")self._K0 = K0", "docstring": "Constructor.\n\nParameters\n----------\nK0 : array_like\n A semi-definite positive matrix.", "id": "f13598:c0:m0"} {"signature": "@propertydef scale(self):", "body": "return float(exp(self._logscale.value))", "docstring": "Scale parameter, s.", "id": "f13598:c0:m1"} {"signature": "def value(self):", "body": "return self.scale * self._K0", "docstring": "Covariance matrix, s\u22c5K\u2080.\n\nReturns\n-------\nK : ndarray\n s\u22c5K\u2080.", "id": "f13598:c0:m3"} {"signature": "def gradient(self):", "body": "return dict(logscale=self.scale * self._K0)", "docstring": "Derivative of the covariance matrix over log(s).\n\nReturns\n-------\nlogscale : float\n s\u22c5K\u2080.", "id": "f13598:c0:m4"} {"signature": "def __init__(self, dim):", "body": "from numpy_sugar import epsilondim = int(dim)tsize = ((dim + ) * dim) // self._L = zeros((dim, dim))self._tril1 = tril_indices_from(self._L, k=-)self._diag = diag_indices_from(self._L)self._L[self._tril1] = self._L[self._diag] = self._epsilon = epsilon.small * self._Lu = Vector(zeros(tsize))self._Lu.value[: tsize - dim] = n = self.L.shape[]self._grad_Lu = zeros((n, n, self._Lu.shape[]))Function.__init__(self, \"\", Lu=self._Lu)bounds = [(-inf, +inf)] * (tsize - dim)bounds += [(log(epsilon.small * ), +)] * dimself._Lu.bounds = boundsself._cache = {\"\": None}self.listen(self._parameters_update)self._nparams = tsize", "docstring": "Constructor.\n\nParameters\n----------\ndim : int\n Dimension d of the free-form covariance matrix.", "id": "f13599:c0:m0"} {"signature": "@propertydef nparams(self):", "body": "return self._nparams", "docstring": "Number of parameters.", "id": "f13599:c0:m2"} {"signature": "def listen(self, func):", "body": "self._Lu.listen(func)", "docstring": "Listen to parameters change.\n\nParameters\n----------\nfunc : callable\n Function to be called when a parameter changes.", "id": "f13599:c0:m3"} {"signature": "@propertydef shape(self):", "body": "n = self._L.shape[]return (n, n)", "docstring": "Array shape.", "id": "f13599:c0:m4"} {"signature": "def fix(self):", "body": "self._Lu.fix()", "docstring": "Disable parameter optimisation.", "id": "f13599:c0:m5"} {"signature": "def unfix(self):", "body": "self._Lu.unfix()", "docstring": "Enable parameter optimisation.", "id": "f13599:c0:m6"} {"signature": "def eigh(self):", "body": "from numpy.linalg import svdif self._cache[\"\"] is not None:return self._cache[\"\"]U, S = svd(self.L)[:]S *= SS += self._epsilonself._cache[\"\"] = S, Ureturn self._cache[\"\"]", "docstring": "Eigen decomposition of K.\n\nReturns\n-------\nS : ndarray\n The eigenvalues in ascending order, each repeated according to its\n multiplicity.\nU : ndarray\n Normalized eigenvectors.", "id": "f13599:c0:m7"} {"signature": "@propertydef Lu(self):", "body": "return self._Lu.value", "docstring": "Lower-triangular, flat part of L.", "id": "f13599:c0:m8"} {"signature": "@propertydef L(self):", "body": "m = len(self._tril1[])self._L[self._tril1] = self._Lu.value[:m]self._L[self._diag] = exp(self._Lu.value[m:])return self._L", "docstring": "Lower-triangular matrix L such that K = LL\u1d40 + \u03f5I.\n\nReturns\n-------\nL : (d, d) ndarray\n Lower-triangular matrix.", "id": "f13599:c0:m10"} {"signature": "def logdet(self):", "body": "from numpy.linalg import slogdetK = self.value()sign, logdet = slogdet(K)if sign != :msg = \"\"msg += f\"\"raise RuntimeError(msg)return logdet", "docstring": "Log of \uff5cK\uff5c.\n\nReturns\n-------\nfloat\n Log-determinant of K.", "id": "f13599:c0:m12"} {"signature": "def value(self):", "body": "K = dot(self.L, self.L.T)return K + self._epsilon * eye(K.shape[])", "docstring": "Covariance matrix.\n\nReturns\n-------\nK : ndarray\n Matrix K = LL\u1d40 + \u03f5I, for a very small positive number \u03f5.", "id": "f13599:c0:m13"} {"signature": "def gradient(self):", "body": "L = self.Lself._grad_Lu[:] = for i in range(len(self._tril1[])):row = self._tril1[][i]col = self._tril1[][i]self._grad_Lu[row, :, i] = L[:, col]self._grad_Lu[:, row, i] += L[:, col]m = len(self._tril1[])for i in range(len(self._diag[])):row = self._diag[][i]col = self._diag[][i]self._grad_Lu[row, :, m + i] = L[row, col] * L[:, col]self._grad_Lu[:, row, m + i] += L[row, col] * L[:, col]return {\"\": self._grad_Lu}", "docstring": "Derivative of the covariance matrix over the parameters of L.\n\nReturns\n-------\nLu : ndarray\n Derivative of K over the lower triangular part of L.", "id": "f13599:c0:m14"} {"signature": "def __init__(self, covariances):", "body": "self._covariances = [c for c in covariances]Function.__init__(self, \"\", composite=self._covariances)", "docstring": "Constructor.\n\nParameters\n----------\ncovariances : list\n List of covariance functions.", "id": "f13607:c0:m0"} {"signature": "def value(self):", "body": "return add.reduce([cov.value() for cov in self._covariances])", "docstring": "r\"\"\"\n Sum of covariance matrices.\n\n Returns\n -------\n K : ndarray\n K\u2080 + K\u2081 + \u22ef", "id": "f13607:c0:m1"} {"signature": "def gradient(self):", "body": "grad = {}for i, f in enumerate(self._covariances):for varname, g in f.gradient().items():grad[f\"\"] = greturn grad", "docstring": "Sum of covariance function derivatives.\n\nReturns\n-------\ndict\n \u2202K\u2080 + \u2202K\u2081 + \u22ef", "id": "f13607:c0:m2"} {"signature": "def __init__(self, n, m):", "body": "self._L = ones((n, m))self._Lu = Vector(self._L.ravel())Function.__init__(self, \"\", Lu=self._Lu)", "docstring": "Constructor.\n\nParameters\n----------\nn : int\n Covariance dimension.\nm : int\n Upper limit of the covariance matrix rank.", "id": "f13608:c0:m0"} {"signature": "@propertydef nparams(self):", "body": "return self._L.size", "docstring": "Number of parameters.", "id": "f13608:c0:m1"} {"signature": "def listen(self, func):", "body": "self._Lu.listen(func)", "docstring": "Listen to parameters change.\n\nParameters\n----------\nfunc : callable\n Function to be called when a parameter changes.", "id": "f13608:c0:m2"} {"signature": "def fix(self):", "body": "self._Lu.fix()", "docstring": "Disable parameter optimisation.", "id": "f13608:c0:m3"} {"signature": "def unfix(self):", "body": "self._Lu.unfix()", "docstring": "Enable parameter optimisation.", "id": "f13608:c0:m4"} {"signature": "@propertydef Lu(self):", "body": "return self._Lu.value", "docstring": "Lower-triangular, flat part of L.", "id": "f13608:c0:m5"} {"signature": "@propertydef L(self):", "body": "return self._L", "docstring": "Matrix L from K = LL\u1d40.\n\nReturns\n-------\nL : (n, m) ndarray\n Parametric matrix.", "id": "f13608:c0:m7"} {"signature": "@propertydef shape(self):", "body": "n = self._L.shape[]return (n, n)", "docstring": "Array shape.", "id": "f13608:c0:m9"} {"signature": "def value(self):", "body": "return dot(self.L, self.L.T)", "docstring": "Covariance matrix.\n\nReturns\n-------\nK : (n, n) ndarray\n K = LL\u1d40.", "id": "f13608:c0:m10"} {"signature": "def gradient(self):", "body": "L = self.Ln = self.L.shape[]grad = {\"\": zeros((n, n, n * self._L.shape[]))}for ii in range(self._L.shape[] * self._L.shape[]):row = ii // self._L.shape[]col = ii % self._L.shape[]grad[\"\"][row, :, ii] = L[:, col]grad[\"\"][:, row, ii] += L[:, col]return grad", "docstring": "Derivative of the covariance matrix over the lower triangular, flat part of L.\n\nIt is equal to\n\n \u2202K/\u2202L\u1d62\u2c7c = AL\u1d40 + LA\u1d40,\n\nwhere A\u1d62\u2c7c is an n\u00d7m matrix of zeros except at [A\u1d62\u2c7c]\u1d62\u2c7c=1.\n\nReturns\n-------\nLu : ndarray\n Derivative of K over the lower-triangular, flat part of L.", "id": "f13608:c0:m11"} {"signature": "def __init__(self, G, dim, rank):", "body": "self._cache = {\"\": None}self._C0 = LRFreeFormCov(dim, rank)self._C0.name = \"\"self._C1 = FreeFormCov(dim)self._C1.name = \"\"G = atleast_2d(asarray(G, float))self._G = Gself._Sxe = Noneself._Sx = Noneself._Lx = Noneself._LxG = Noneself._diag_LxGGLx = Noneself._Lxe = Noneself._LxGe = Noneself._diag_LxGGLxe = NoneFunction.__init__(self, \"\", composite=[(\"\", self._C0), (\"\", self._C1)])self._C0.listen(self._parameters_update)self._C1.listen(self._parameters_update)", "docstring": "Constructor.\n\nParameters\n----------\ndim : int\n Dimension d for the square matrices C\u2080 and C\u2081.\nrank : int\n Maximum rank of the C\u2081 matrix.", "id": "f13610:c0:m0"} {"signature": "@propertydef nparams(self):", "body": "return self._C0.nparams + self._C1.nparams", "docstring": "Number of parameters.", "id": "f13610:c0:m2"} {"signature": "@property@lru_cache(maxsize=None)def Ge(self):", "body": "from scipy.linalg import svdfrom numpy_sugar.linalg import ddotU, S, _ = svd(self._G, full_matrices=False, check_finite=False)if U.shape[] < self._G.shape[]:return ddot(U, S)return self._G", "docstring": "Result of US from the SVD decomposition G = USV\u1d40.", "id": "f13610:c0:m3"} {"signature": "def listen(self, func):", "body": "self._C0.listen(func)self._C1.listen(func)", "docstring": "Listen to parameters change.\n\nParameters\n----------\nfunc : callable\n Function to be called when a parameter changes.", "id": "f13610:c0:m7"} {"signature": "@propertydef Lx(self):", "body": "self._init_svd()return self._Lx", "docstring": "L\u2093.", "id": "f13610:c0:m8"} {"signature": "@propertydef _LhD(self):", "body": "from numpy_sugar.linalg import ddotself._init_svd()if self._cache[\"\"] is not None:return self._cache[\"\"]S1, U1 = self.C1.eigh()U1S1 = ddot(U1, / sqrt(S1))Sh, Uh = eigh(U1S1.T @ self.C0.value() @ U1S1)self._cache[\"\"] = {\"\": (U1S1 @ Uh).T,\"\": / (kron(Sh, self._Sx) + ),\"\": / (kron(Sh, self._Sxe) + ),}return self._cache[\"\"]", "docstring": "Implements L\u2095 and D.\n\nReturns\n-------\nLh : ndarray\n U\u2095\u1d40 S\u2081\u207b\u00bd U\u2081\u1d40.\nD : ndarray\n (S\u2095 \u2297 S\u2093 + I\u2095\u2093)\u207b\u00b9.", "id": "f13610:c0:m10"} {"signature": "@propertydef Lh(self):", "body": "return self._LhD[\"\"]", "docstring": "L\u2095.", "id": "f13610:c0:m11"} {"signature": "@propertydef D(self):", "body": "return self._LhD[\"\"]", "docstring": "(S\u2095 \u2297 S\u2093 + I\u2095\u2093)\u207b\u00b9.", "id": "f13610:c0:m12"} {"signature": "@propertydef G(self):", "body": "return self._G", "docstring": "User-provided matrix G, n\u00d7m.", "id": "f13610:c0:m14"} {"signature": "@propertydef C0(self):", "body": "return self._C0", "docstring": "Semi-definite positive matrix C\u2080.", "id": "f13610:c0:m15"} {"signature": "@propertydef C1(self):", "body": "return self._C1", "docstring": "Definite positive matrix C\u2081.", "id": "f13610:c0:m16"} {"signature": "def value(self):", "body": "C0 = self._C0.value()C1 = self._C1.value()return kron(C0, self._GG) + kron(C1, self._I)", "docstring": "Covariance matrix K = C\u2080 \u2297 GG\u1d40 + C\u2081 \u2297 I.\n\nReturns\n-------\nK : ndarray\n C\u2080 \u2297 GG\u1d40 + C\u2081 \u2297 I.", "id": "f13610:c0:m17"} {"signature": "def gradient(self):", "body": "self._init_svd()C0 = self._C0.gradient()[\"\"].TC1 = self._C1.gradient()[\"\"].Tgrad = {\"\": kron(C0, self._X).T, \"\": kron(C1, self._I).T}return grad", "docstring": "Gradient of K.\n\nReturns\n-------\nC0 : ndarray\n Derivative of C\u2080 over its parameters.\nC1 : ndarray\n Derivative of C\u2081 over its parameters.", "id": "f13610:c0:m18"} {"signature": "def gradient_dot(self, v):", "body": "self._init_svd()V = unvec(v, (self.G.shape[], -) + v.shape[:])r = {}C = self._C0.gradient()[\"\"]r[\"\"] = tensordot(V.T @ self.G @ self.G.T, C, axes=([-], []))r[\"\"] = r[\"\"].reshape(V.shape[:] + (-,) + (C.shape[-],), order=\"\")C = self._C1.gradient()[\"\"]r[\"\"] = tensordot(V.T, C, axes=([-], []))r[\"\"] = r[\"\"].reshape(V.shape[:] + (-,) + (C.shape[-],), order=\"\")return r", "docstring": "Implements \u2202K\u22c5v.\n\nParameters\n----------\nv : array_like\n Vector from \u2202K\u22c5v.\n\nReturns\n-------\nC0.Lu : ndarray\n \u2202K\u22c5v, where the gradient is taken over the C\u2080 parameters.\nC1.Lu : ndarray\n \u2202K\u22c5v, where the gradient is taken over the C\u2081 parameters.", "id": "f13610:c0:m19"} {"signature": "def solve(self, v):", "body": "from numpy_sugar.linalg import ddotself._init_svd()L = kron(self.Lh, self.Lx)return L.T @ ddot(self.D, L @ v, left=True)", "docstring": "Implements the product K\u207b\u00b9\u22c5v.\n\nParameters\n----------\nv : array_like\n Array to be multiplied.\n\nReturns\n-------\nx : ndarray\n Solution x to the equation K\u22c5x = y.", "id": "f13610:c0:m20"} {"signature": "def logdet(self):", "body": "self._init_svd()return -log(self._De).sum() + self.G.shape[] * self.C1.logdet()", "docstring": "Implements log|K| = - log|D| + n\u22c5log|C\u2081|.\n\nReturns\n-------\nlogdet : float\n Log-determinant of K.", "id": "f13610:c0:m21"} {"signature": "def logdet_gradient(self):", "body": "from numpy_sugar.linalg import dotdself._init_svd()dC0 = self._C0.gradient()[\"\"]grad_C0 = zeros_like(self._C0.Lu)for i in range(self._C0.Lu.shape[]):t = kron(dotd(self.Lh, dC0[..., i] @ self.Lh.T), self._diag_LxGGLxe)grad_C0[i] = (self._De * t).sum()dC1 = self._C1.gradient()[\"\"]grad_C1 = zeros_like(self._C1.Lu)p = self._Sxe.shape[]np = self._G.shape[] - pfor i in range(self._C1.Lu.shape[]):t = (dotd(self.Lh, dC1[..., i] @ self.Lh.T) * np).sum()t1 = kron(dotd(self.Lh, dC1[..., i] @ self.Lh.T), eye(p))t += (self._De * t1).sum()grad_C1[i] = treturn {\"\": grad_C0, \"\": grad_C1}", "docstring": "Implements \u2202log|K| = Tr[K\u207b\u00b9\u2202K].\n\nIt can be shown that::\n\n \u2202log|K| = diag(D)\u1d40diag(L(\u2202K)L\u1d40) = diag(D)\u1d40(diag(L\u2095\u2202C\u2080L\u2095\u1d40)\u2297diag(L\u2093GG\u1d40L\u2093\u1d40)),\n\nwhen the derivative is over the parameters of C\u2080. Similarly,\n\n \u2202log|K| = diag(D)\u1d40diag(L(\u2202K)L\u1d40) = diag(D)\u1d40(diag(L\u2095\u2202C\u2081L\u2095\u1d40)\u2297diag(I)),\n\nover the parameters of C\u2081.\n\nReturns\n-------\nC0 : ndarray\n Derivative of C\u2080 over its parameters.\nC1 : ndarray\n Derivative of C\u2081 over its parameters.", "id": "f13610:c0:m22"} {"signature": "def LdKL_dot(self, v, v1=None):", "body": "self._init_svd()def dot(a, b):r = tensordot(a, b, axes=([], []))if a.ndim > b.ndim:return r.transpose([, , ])return rLh = self.LhV = unvec(v, (self.Lx.shape[], -) + v.shape[:])LdKL_dot = {\"\": empty((v.shape[],) + v.shape[:] + (self._C0.Lu.shape[],)),\"\": empty((v.shape[],) + v.shape[:] + (self._C1.Lu.shape[],)),}dC0 = self._C0.gradient()[\"\"]for i in range(self._C0.Lu.shape[]):t = dot(self._LxG, dot(self._LxG.T, dot(V, Lh @ dC0[..., i] @ Lh.T)))LdKL_dot[\"\"][..., i] = t.reshape((-,) + t.shape[:], order=\"\")dC1 = self._C1.gradient()[\"\"]for i in range(self._C1.Lu.shape[]):t = dot(V, Lh @ dC1[..., i] @ Lh.T)LdKL_dot[\"\"][..., i] = t.reshape((-,) + t.shape[:], order=\"\")return LdKL_dot", "docstring": "Implements L(\u2202K)L\u1d40v.\n\nThe array v can have one or two dimensions and the first dimension has to have\nsize n\u22c5p.\n\nLet vec(V) = v. We have\n\n L(\u2202K)L\u1d40\u22c5v = ((L\u2095\u2202C\u2080L\u2095\u1d40) \u2297 (L\u2093GG\u1d40L\u2093\u1d40))vec(V) = vec(L\u2093GG\u1d40L\u2093\u1d40VL\u2095\u2202C\u2080L\u2095\u1d40),\n\nwhen the derivative is over the parameters of C\u2080. Similarly,\n\n L(\u2202K)L\u1d40v = ((L\u2095\u2202C\u2081L\u2095\u1d40) \u2297 (L\u2093L\u2093\u1d40))vec(V) = vec(L\u2093L\u2093\u1d40VL\u2095\u2202C\u2081L\u2095\u1d40),\n\nover the parameters of C\u2081.", "id": "f13610:c0:m23"} {"signature": "def __init__(self, X):", "body": "self._logscale = Scalar()self._X = XFunction.__init__(self, \"\", logscale=self._logscale)self._logscale.bounds = (-, +)", "docstring": "Constructor.\n\nParameters\n----------\nX : array_like\n Matrix X from K = s\u22c5XX\u1d40.", "id": "f13611:c0:m0"} {"signature": "@propertydef X(self):", "body": "return self._X", "docstring": "Matrix X from K = s\u22c5XX\u1d40.", "id": "f13611:c0:m1"} {"signature": "def fix(self):", "body": "self._fix(\"\")", "docstring": "Prevent s update during optimization.", "id": "f13611:c0:m2"} {"signature": "def unfix(self):", "body": "self._unfix(\"\")", "docstring": "Enable s update during optimization.", "id": "f13611:c0:m3"} {"signature": "@propertydef scale(self):", "body": "return exp(self._logscale.value)", "docstring": "Scale parameter.", "id": "f13611:c0:m4"} {"signature": "def value(self):", "body": "X = self.Xreturn self.scale * (X @ X.T)", "docstring": "Covariance matrix.\n\nReturns\n-------\nK : ndarray\n s\u22c5XX\u1d40.", "id": "f13611:c0:m6"} {"signature": "def gradient(self):", "body": "return dict(logscale=self.value())", "docstring": "Derivative of the covariance matrix over log(s).\n\nReturns\n-------\nlogscale : ndarray\n s\u22c5XX\u1d40.", "id": "f13611:c0:m7"} {"signature": "def _bstar_1effect(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM):", "body": "from numpy_sugar import epsilonfrom numpy_sugar.linalg import dotdfrom numpy import sumr = full(MTBM[].shape[], yTBy)r -= * add.reduce([dot(i, beta) for i in yTBX])r -= * add.reduce([i * alpha for i in yTBM])r += add.reduce([dotd(beta.T, dot(i, beta)) for i in XTBX])r += add.reduce([dotd(beta.T, i * alpha) for i in XTBM])r += add.reduce([sum(alpha * i * beta, axis=) for i in XTBM])r += add.reduce([alpha * i.ravel() * alpha for i in MTBM])return clip(r, epsilon.tiny, inf)", "docstring": "Same as :func:`_bstar_set` but for single-effect.", "id": "f13612:m0"} {"signature": "def _bstar_set(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM):", "body": "from numpy_sugar import epsilonr = yTByr -= * add.reduce([i @ beta for i in yTBX])r -= * add.reduce([i @ alpha for i in yTBM])r += add.reduce([beta.T @ i @ beta for i in XTBX])r += * add.reduce([beta.T @ i @ alpha for i in XTBM])r += add.reduce([alpha.T @ i @ alpha for i in MTBM])return clip(r, epsilon.tiny, inf)", "docstring": "Compute -2\ud835\udc32\u1d40BE\u2c7c\ud835\udc1b\u2c7c + (\ud835\udc1b\u2c7cE\u2c7c)\u1d40BE\u2c7c\ud835\udc1b\u2c7c.\n\nFor \ud835\udc1b\u2c7c = [\ud835\udf37\u2c7c\u1d40 \ud835\udf36\u2c7c\u1d40]\u1d40.", "id": "f13612:m1"} {"signature": "@cachedef null_lml(self):", "body": "n = self._nsamplesscale = self.null_scalereturn (self._static_lml() - n * log(scale)) / ", "docstring": "Log of the marginal likelihood for the null hypothesis.\n\nIt is implemented as ::\n\n 2\u00b7log(p(Y)) = -n\u00b7log(2\ud835\udf0bs) - log\uff5cD\uff5c - n,\n\nReturns\n-------\nlml : float\n Log of the marginal likelihood.", "id": "f13612:c0:m1"} {"signature": "@property@cachedef null_beta(self):", "body": "ETBE = self._ETBEyTBX = self._yTBXA = sum(i.XTBX for i in ETBE)b = sum(yTBX)return rsolve(A, b)", "docstring": "Optimal \ud835\udf37 according to the marginal likelihood.\n\nIt is compute by solving the equation ::\n\n (X\u1d40BX)\ud835\udf37 = X\u1d40B\ud835\udc32.\n\nReturns\n-------\nbeta : ndarray\n Optimal \ud835\udf37.", "id": "f13612:c0:m2"} {"signature": "@property@cachedef null_beta_covariance(self):", "body": "A = sum(i @ j.T for (i, j) in zip(self._XTQDi, self._XTQ))return self.null_scale * pinv(A)", "docstring": "Covariance of the optimal \ud835\udf37 according to the marginal likelihood.\n\nReturns\n-------\nbeta_covariance : ndarray\n (X\u1d40(s(K + vI))\u207b\u00b9X)\u207b\u00b9.", "id": "f13612:c0:m3"} {"signature": "@property@cachedef null_beta_se(self):", "body": "return sqrt(self.null_beta_covariance.diagonal())", "docstring": "Standard errors of the optimal \ud835\udf37.\n\nReturns\n-------\nbeta_se : ndarray\n Square root of the diagonal of the beta covariance.", "id": "f13612:c0:m4"} {"signature": "@property@cachedef null_scale(self):", "body": "n = self._nsamplesbeta = self.null_betasqrdot = self._yTBy - dot(sum(self._yTBX), beta)return sqrdot / n", "docstring": "Optimal s according to the marginal likelihood.\n\nThe optimal s is given by ::\n\n s = n\u207b\u00b9\ud835\udc32\u1d40B(\ud835\udc32 - X\ud835\udf37),\n\nwhere \ud835\udf37 is optimal.\n\nReturns\n-------\nscale : float\n Optimal scale.", "id": "f13612:c0:m5"} {"signature": "def fast_scan(self, M, verbose=True):", "body": "from tqdm import tqdmif M.ndim != :raise ValueError(\"\")p = M.shape[]lmls = empty(p)effsizes0 = empty((p, self._XTQ[].shape[]))effsizes0_se = empty((p, self._XTQ[].shape[]))effsizes1 = empty(p)effsizes1_se = empty(p)scales = empty(p)if verbose:nchunks = min(p, )else:nchunks = min(p, )chunk_size = (p + nchunks - ) // nchunksfor i in tqdm(range(nchunks), desc=\"\", disable=not verbose):start = i * chunk_sizestop = min(start + chunk_size, M.shape[])r = self._fast_scan_chunk(M[:, start:stop])lmls[start:stop] = r[\"\"]effsizes0[start:stop, :] = r[\"\"]effsizes0_se[start:stop, :] = r[\"\"]effsizes1[start:stop] = r[\"\"]effsizes1_se[start:stop] = r[\"\"]scales[start:stop] = r[\"\"]return {\"\": lmls,\"\": effsizes0,\"\": effsizes0_se,\"\": effsizes1,\"\": effsizes1_se,\"\": scales,}", "docstring": "LMLs, fixed-effect sizes, and scales for single-marker scan.\n\nParameters\n----------\nM : array_like\n Matrix of fixed-effects across columns.\nverbose : bool, optional\n ``True`` for progress information; ``False`` otherwise.\n Defaults to ``True``.\n\nReturns\n-------\nlmls : ndarray\n Log of the marginal likelihoods.\neffsizes0 : ndarray\n Covariate fixed-effect sizes.\neffsizes1 : ndarray\n Candidate set fixed-effect sizes.\nscales : ndarray\n Scales.", "id": "f13612:c0:m6"} {"signature": "def scan(self, M):", "body": "from numpy_sugar.linalg import ddotfrom numpy_sugar import is_all_finiteM = asarray(M, float)if M.shape[] == :return {\"\": self.null_lml(),\"\": self.null_beta,\"\": self.null_beta_se,\"\": empty(()),\"\": empty(()),\"\": self.null_scale,}if not is_all_finite(M):raise ValueError(\"\")MTQ = [dot(M.T, Q) for Q in self._QS[] if Q.size > ]yTBM = [dot(i, j.T) for (i, j) in zip(self._yTQDi, MTQ)]XTBM = [dot(i, j.T) for (i, j) in zip(self._XTQDi, MTQ)]D = self._DMTBM = [ddot(i, / j) @ i.T for i, j in zip(MTQ, D) if j.min() > ]return self._multicovariate_set(yTBM, XTBM, MTBM)", "docstring": "LML, fixed-effect sizes, and scale of the candidate set.\n\nParameters\n----------\nM : array_like\n Fixed-effects set.\n\nReturns\n-------\nlml : float\n Log of the marginal likelihood.\neffsizes0 : ndarray\n Covariates fixed-effect sizes.\neffsizes0_se : ndarray\n Covariates fixed-effect size standard errors.\neffsizes1 : ndarray\n Candidate set fixed-effect sizes.\neffsizes1_se : ndarray\n Candidate fixed-effect size standard errors.\nscale : ndarray\n Optimal scale.", "id": "f13612:c0:m7"} {"signature": "def __init__(self, Y, A, X, G, terms):", "body": "self._Y = asarray(Y, float)self._A = asarray(A, float)self._X = asarray(X, float)self._G = asarray(G, float)self._H = terms[\"\"]self._logdetK = terms[\"\"]self._W = terms[\"\"]self._yKiy = terms[\"\"]self._WA = terms[\"\"]self._WL0 = terms[\"\"]self._Lz = terms[\"\"]self._XRiM = terms[\"\"]self._ZiXRiy = terms[\"\"]self._ZiXRiM = terms[\"\"]self._MRiM = terms[\"\"]self._MRiXZiXRiM = terms[\"\"]self._MRiy = terms[\"\"]self._MRiXZiXRiy = terms[\"\"]", "docstring": "Constructor.\n\nParameters\n----------\nY : (n, p) array_like\n Outcome matrix.\nA : (n, n) array_like\n Trait-by-trait design matrix.\nX : (n, c) array_like\n Covariates design matrix.\nG : (n, r) array_like\n Matrix G from the GG\u1d40 term.\nterms : dict\n Pre-computed terms.", "id": "f13613:c0:m0"} {"signature": "@cachedef null_lml(self):", "body": "np = self._nsamples * self._ntraitsscale = self.null_scalereturn self._static_lml() / - np * safe_log(scale) / - np / ", "docstring": "Log of the marginal likelihood for the null hypothesis.\n\nIt is implemented as ::\n\n 2\u00b7log(p(Y)) = -n\u00b7p\u00b7log(2\ud835\udf0bs) - log\uff5cK\uff5c - n\u00b7p,\n\nfor which s and \ud835\udea9 are optimal.\n\nReturns\n-------\nlml : float\n Log of the marginal likelihood.", "id": "f13613:c0:m1"} {"signature": "@property@cachedef null_beta(self):", "body": "return rsolve(self._MKiM, self._MKiy)", "docstring": "Optimal \ud835\udec3 according to the marginal likelihood.\n\nIt is compute by solving the equation ::\n\n M\u1d40K\u207b\u00b9M\ud835\udec3 = M\u1d40K\u207b\u00b9\ud835\udc32,\n\nfor \ud835\udc32 = vec(Y) and M = (A \u2297 X)vec(\ud835\udea9).\n\nReturns\n-------\neffsizes : ndarray\n Optimal \ud835\udec3.", "id": "f13613:c0:m2"} {"signature": "@property@cachedef null_beta_covariance(self):", "body": "return self.null_scale * pinv(self._H)", "docstring": "Covariance of the optimal \ud835\udec3 according to the marginal likelihood.\n\nReturns\n-------\neffsizes-covariance : ndarray\n s(M\u1d40K\u207b\u00b9M)\u207b\u00b9.", "id": "f13613:c0:m3"} {"signature": "@property@cachedef null_beta_se(self):", "body": "return sqrt(self.null_beta_covariance.diagonal())", "docstring": "Standard errors of the optimal \ud835\udec3.\n\nReturns\n-------\nbeta_se : ndarray\n Square root of the diagonal of the beta covariance.", "id": "f13613:c0:m4"} {"signature": "@property@cachedef null_scale(self):", "body": "np = self._nsamples * self._ntraitsb = vec(self.null_beta)mKiy = b.T @ self._MKiysqrtdot = self._yKiy - mKiyscale = sqrtdot / npreturn scale", "docstring": "Optimal s according to the marginal likelihood.\n\nThe optimal s is given by\n\n s = (n\u00b7p)\u207b\u00b9\ud835\udc32\u1d40K\u207b\u00b9(\ud835\udc32 - \ud835\udc26),\n\nwhere \ud835\udc26 = (A \u2297 X)vec(\ud835\udea9) and \ud835\udea9 is optimal.\n\nReturns\n-------\nscale : float\n Optimal scale.", "id": "f13613:c0:m5"} {"signature": "def scan(self, A1, X1):", "body": "from numpy import emptyfrom numpy.linalg import multi_dotfrom numpy_sugar import epsilon, is_all_finitefrom scipy.linalg import cho_solveA1 = asarray(A1, float)X1 = asarray(X1, float)if not is_all_finite(A1):raise ValueError(\"\")if not is_all_finite(X1):raise ValueError(\"\")if A1.shape[] == :beta_se = sqrt(self.null_beta_covariance.diagonal())return {\"\": self.null_lml(),\"\": unvec(self.null_beta, (self._ncovariates, -)),\"\": unvec(beta_se, (self._ncovariates, -)),\"\": empty((,)),\"\": empty((,)),\"\": self.null_scale,}X1X1 = X1.T @ X1XX1 = self._X.T @ X1AWA1 = self._WA.T @ A1A1W = A1.T @ self._WGX1 = self._G.T @ X1MRiM1 = kron(AWA1, XX1)M1RiM1 = kron(A1W @ A1, X1X1)M1Riy = vec(multi_dot([X1.T, self._Y, A1W.T]))XRiM1 = kron(self._WL0.T @ A1, GX1)ZiXRiM1 = cho_solve(self._Lz, XRiM1)MRiXZiXRiM1 = self._XRiM.T @ ZiXRiM1M1RiXZiXRiM1 = XRiM1.T @ ZiXRiM1M1RiXZiXRiy = XRiM1.T @ self._ZiXRiyT0 = [[self._MRiM, MRiM1], [MRiM1.T, M1RiM1]]T1 = [[self._MRiXZiXRiM, MRiXZiXRiM1], [MRiXZiXRiM1.T, M1RiXZiXRiM1]]T2 = [self._MRiy, M1Riy]T3 = [self._MRiXZiXRiy, M1RiXZiXRiy]MKiM = block(T0) - block(T1)MKiy = block(T2) - block(T3)beta = rsolve(MKiM, MKiy)mKiy = beta.T @ MKiycp = self._ntraits * self._ncovariateseffsizes0 = unvec(beta[:cp], (self._ncovariates, self._ntraits))effsizes1 = unvec(beta[cp:], (X1.shape[], A1.shape[]))np = self._nsamples * self._ntraitssqrtdot = self._yKiy - mKiyscale = clip(sqrtdot / np, epsilon.tiny, inf)lml = self._static_lml() / - np * safe_log(scale) / - np / effsizes_se = sqrt(clip(scale * pinv(MKiM).diagonal(), epsilon.tiny, inf))effsizes0_se = unvec(effsizes_se[:cp], (self._ncovariates, self._ntraits))effsizes1_se = unvec(effsizes_se[cp:], (X1.shape[], A1.shape[]))return {\"\": lml,\"\": effsizes0,\"\": effsizes1,\"\": scale,\"\": effsizes0_se,\"\": effsizes1_se,}", "docstring": "LML, fixed-effect sizes, and scale of the candidate set.\n\nParameters\n----------\nA1 : (p, e) array_like\n Trait-by-environments design matrix.\nX1 : (n, m) array_like\n Variants set matrix.\n\nReturns\n-------\nlml : float\n Log of the marginal likelihood for the set.\neffsizes0 : (c, p) ndarray\n Fixed-effect sizes for the covariates.\neffsizes0_se : (c, p) ndarray\n Fixed-effect size standard errors for the covariates.\neffsizes1 : (m, e) ndarray\n Fixed-effect sizes for the candidates.\neffsizes1_se : (m, e) ndarray\n Fixed-effect size standard errors for the candidates.\nscale : float\n Optimal scale.", "id": "f13613:c0:m6"} {"signature": "def __init__(self, y, X, QS=None, restricted=False):", "body": "from numpy_sugar import is_all_finitefrom numpy_sugar.linalg import ddot, economic_svdlogistic = Scalar()logistic.listen(self._delta_update)logistic.bounds = (-numbers.logmax, +numbers.logmax)Function.__init__(self, \"\", logistic=logistic)self._logistic = logisticy = asarray(y, float).ravel()if not is_all_finite(y):raise ValueError(\"\")X = atleast_2d(asarray(X, float).T).Tif not is_all_finite(X):raise ValueError(\"\")self._optimal = {\"\": False, \"\": False}if QS is None:QS = economic_qs_zeros(len(y))self.delta = logistic.fix()else:self.delta = if QS[][].shape[] != len(y):msg = \"\"raise ValueError(msg)if y.shape[] != X.shape[]:msg = \"\"raise ValueError(msg)self._Darr = []n = y.shape[]d = self.deltaif QS[].size > :self._Darr += [QS[] * ( - d) + d]if QS[].size < n:self._Darr += [full(n - QS[].size, d)]self._y = yself._QS = QSSVD = economic_svd(X)self._X = {\"\": X, \"\": ddot(SVD[], SVD[]), \"\": SVD[]}self._tbeta = zeros(len(SVD[]))self._scale = self._fix = {\"\": False, \"\": False}self._restricted = restricted", "docstring": "Constructor.\n\nParameters\n----------\ny : array_like\n Outcome.\nX : array_like\n Covariates as a two-dimensional array.\nQS : tuple\n Economic eigendecompositon in form of ``((Q0, Q1), S0)`` of a\n covariance matrix ``K``.\nrestricted : bool\n ``True`` for restricted maximum likelihood optimization; ``False``\n otherwise. Defaults to ``False``.", "id": "f13620:c0:m0"} {"signature": "@propertydef beta(self):", "body": "from numpy_sugar.linalg import rsolvereturn rsolve(self._X[\"\"], rsolve(self._X[\"\"], self.mean()))", "docstring": "Fixed-effect sizes.\n\nReturns\n-------\neffect-sizes : numpy.ndarray\n Optimal fixed-effect sizes.\n\nNotes\n-----\nSetting the derivative of log(p(\ud835\udc32)) over effect sizes equal\nto zero leads to solutions \ud835\udf37 from equation ::\n\n (Q\u1d40X)\u1d40D\u207b\u00b9(Q\u1d40X)\ud835\udf37 = (Q\u1d40X)\u1d40D\u207b\u00b9(Q\u1d40\ud835\udc32).", "id": "f13620:c0:m1"} {"signature": "@propertydef beta_covariance(self):", "body": "from numpy_sugar.linalg import ddottX = self._X[\"\"]Q = concatenate(self._QS[], axis=)S0 = self._QS[]D = self.v0 * S0 + self.v1D = D.tolist() + [self.v1] * (len(self._y) - len(D))D = asarray(D)A = inv(tX.T @ (Q @ ddot( / D, Q.T @ tX)))VT = self._X[\"\"]H = lstsq(VT, A, rcond=None)[]return lstsq(VT, H.T, rcond=None)[]", "docstring": "Estimates the covariance-matrix of the optimal beta.\n\nReturns\n-------\nbeta-covariance : ndarray\n (X\u1d40(s((1-\ud835\udeff)K + \ud835\udeffI))\u207b\u00b9X)\u207b\u00b9.\n\nReferences\n----------\n.. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John\n Wiley & Sons.", "id": "f13620:c0:m3"} {"signature": "def fix(self, param):", "body": "if param == \"\":super()._fix(\"\")else:self._fix[param] = True", "docstring": "Disable parameter optimization.\n\nParameters\n----------\nparam : str\n Possible values are ``\"delta\"``, ``\"beta\"``, and ``\"scale\"``.", "id": "f13620:c0:m4"} {"signature": "def unfix(self, param):", "body": "if param == \"\":self._unfix(\"\")else:self._fix[param] = False", "docstring": "Enable parameter optimization.\n\nParameters\n----------\nparam : str\n Possible values are ``\"delta\"``, ``\"beta\"``, and ``\"scale\"``.", "id": "f13620:c0:m5"} {"signature": "@propertydef v0(self):", "body": "return self.scale * ( - self.delta)", "docstring": "First variance.\n\nReturns\n-------\nv0 : float\n s(1 - \ud835\udeff).", "id": "f13620:c0:m6"} {"signature": "@propertydef v1(self):", "body": "return self.scale * self.delta", "docstring": "Second variance.\n\nReturns\n-------\nv1 : float\n s\ud835\udeff.", "id": "f13620:c0:m7"} {"signature": "def fit(self, verbose=True):", "body": "if not self._isfixed(\"\"):self._maximize_scalar(desc=\"\", rtol=, atol=, verbose=verbose)if not self._fix[\"\"]:self._update_beta()if not self._fix[\"\"]:self._update_scale()", "docstring": "Maximise the marginal likelihood.\n\nParameters\n----------\nverbose : bool, optional\n ``True`` for progress output; ``False`` otherwise.\n Defaults to ``True``.", "id": "f13620:c0:m8"} {"signature": "def get_fast_scanner(self):", "body": "v0 = self.v0v1 = self.v1QS = (self._QS[], v0 * self._QS[])return FastScanner(self._y, self.X, QS, v1)", "docstring": "Return :class:`.FastScanner` for association scan.\n\nReturns\n-------\nfast-scanner : :class:`.FastScanner`\n Instance of a class designed to perform very fast association scan.", "id": "f13620:c0:m9"} {"signature": "def value(self):", "body": "if not self._fix[\"\"]:self._update_beta()if not self._fix[\"\"]:self._update_scale()return self.lml()", "docstring": "Internal use only.", "id": "f13620:c0:m10"} {"signature": "def gradient(self):", "body": "raise NotImplementedError", "docstring": "Not implemented.", "id": "f13620:c0:m11"} {"signature": "@propertydef nsamples(self):", "body": "return len(self._y)", "docstring": "Number of samples, n.", "id": "f13620:c0:m12"} {"signature": "@propertydef ncovariates(self):", "body": "return self._X[\"\"].shape[]", "docstring": "Number of covariates, c.", "id": "f13620:c0:m13"} {"signature": "def lml(self):", "body": "reml = (self._logdetXX() - self._logdetH()) / if self._optimal[\"\"]:lml = self._lml_optimal_scale()else:lml = self._lml_arbitrary_scale()return lml + reml", "docstring": "Log of the marginal likelihood.\n\nReturns\n-------\nlml : float\n Log of the marginal likelihood.\n\nNotes\n-----\nThe log of the marginal likelihood is given by ::\n\n 2\u22c5log(p(\ud835\udc32)) = -n\u22c5log(2\u03c0) - n\u22c5log(s) - log|D| - (Q\u1d40\ud835\udc32)\u1d40s\u207b\u00b9D\u207b\u00b9(Q\u1d40\ud835\udc32)\n + (Q\u1d40\ud835\udc32)\u1d40s\u207b\u00b9D\u207b\u00b9(Q\u1d40X\ud835\udf37)/2 - (Q\u1d40X\ud835\udf37)\u1d40s\u207b\u00b9D\u207b\u00b9(Q\u1d40X\ud835\udf37).\n\nBy using the optimal \ud835\udf37, the log of the marginal likelihood can be rewritten\nas::\n\n 2\u22c5log(p(\ud835\udc32)) = -n\u22c5log(2\u03c0) - n\u22c5log(s) - log|D| + (Q\u1d40\ud835\udc32)\u1d40s\u207b\u00b9D\u207b\u00b9Q\u1d40(X\ud835\udf37-\ud835\udc32).\n\n\nIn the extreme case where \ud835\udf37 is such that \ud835\udc32 = X\ud835\udf37, the maximum is attained as\ns\u21920.\n\nFor optimals \ud835\udf37 and s, the log of the marginal likelihood can be further\nsimplified to ::\n\n 2\u22c5log(p(\ud835\udc32; \ud835\udf37, s)) = -n\u22c5log(2\u03c0) - n\u22c5log s - log|D| - n.", "id": "f13620:c0:m14"} {"signature": "@propertydef X(self):", "body": "return self._X[\"\"]", "docstring": "Covariates matrix.\n\nReturns\n-------\nX : ndarray\n Covariates.", "id": "f13620:c0:m15"} {"signature": "@propertydef delta(self):", "body": "v = float(self._logistic.value)if v > :v = / ( + exp(-v))else:v = exp(v)v = v / (v + )return min(max(v, epsilon.tiny), - epsilon.tiny)", "docstring": "Variance ratio between ``K`` and ``I``.", "id": "f13620:c0:m16"} {"signature": "@propertydef scale(self):", "body": "return self._scale", "docstring": "Scaling factor.\n\nReturns\n-------\nscale : float\n Scaling factor.\n\nNotes\n-----\nSetting the derivative of log(p(\ud835\udc32; \ud835\udf37)), for which \ud835\udf37 is optimal, over\nscale equal to zero leads to the maximum ::\n\n s = n\u207b\u00b9(Q\u1d40\ud835\udc32)\u1d40D\u207b\u00b9 Q\u1d40(\ud835\udc32-X\ud835\udf37).\n\nIn the case of restricted marginal likelihood ::\n\n s = (n-c)\u207b\u00b9(Q\u1d40\ud835\udc32)\u1d40D\u207b\u00b9 Q\u1d40(\ud835\udc32-X\ud835\udf37),\n\nwhere s is the number of covariates.", "id": "f13620:c0:m18"} {"signature": "def mean(self):", "body": "return self._X[\"\"] @ self._tbeta", "docstring": "Mean of the prior.\n\nFormally, \ud835\udc26 = X\ud835\udf37.\n\nReturns\n-------\nmean : ndarray\n Mean of the prior.", "id": "f13620:c0:m20"} {"signature": "def covariance(self):", "body": "from numpy_sugar.linalg import ddot, sum2diagQ0 = self._QS[][]S0 = self._QS[]return sum2diag(dot(ddot(Q0, self.v0 * S0), Q0.T), self.v1)", "docstring": "Covariance of the prior.\n\nReturns\n-------\ncovariance : ndarray\n v\u2080K + v\u2081I.", "id": "f13620:c0:m21"} {"signature": "@cachedef _logdetXX(self):", "body": "if not self._restricted:return ldet = slogdet(self._X[\"\"].T @ self._X[\"\"])if ldet[] != :raise ValueError(\"\")return ldet[]", "docstring": "log(\uff5cX\u1d40X\uff5c).", "id": "f13620:c0:m23"} {"signature": "def _logdetH(self):", "body": "if not self._restricted:return ldet = slogdet(sum(self._XTQDiQTX) / self.scale)if ldet[] != :raise ValueError(\"\")return ldet[]", "docstring": "log(\uff5cH\uff5c) for H = s\u207b\u00b9X\u1d40QD\u207b\u00b9Q\u1d40X.", "id": "f13620:c0:m24"} {"signature": "def _lml_optimal_scale(self):", "body": "assert self._optimal[\"\"]n = len(self._y)lml = -self._df * log2pi - self._df - n * log(self.scale)lml -= sum(npsum(log(D)) for D in self._D)return lml / ", "docstring": "Log of the marginal likelihood for optimal scale.\n\nImplementation for unrestricted LML::\n\nReturns\n-------\nlml : float\n Log of the marginal likelihood.", "id": "f13620:c0:m25"} {"signature": "def _lml_arbitrary_scale(self):", "body": "s = self.scaleD = self._Dn = len(self._y)lml = -self._df * log2pi - n * log(s)lml -= sum(npsum(log(d)) for d in D)d = (mTQ - yTQ for (mTQ, yTQ) in zip(self._mTQ, self._yTQ))lml -= sum((i / j) @ i for (i, j) in zip(d, D)) / sreturn lml / ", "docstring": "Log of the marginal likelihood for arbitrary scale.\n\nReturns\n-------\nlml : float\n Log of the marginal likelihood.", "id": "f13620:c0:m26"} {"signature": "@propertydef _df(self):", "body": "if not self._restricted:return self.nsamplesreturn self.nsamples - self._X[\"\"].shape[]", "docstring": "Degrees of freedom.", "id": "f13620:c0:m27"} {"signature": "def __init__(self, Y, A, X, G, rank=, restricted=False):", "body": "from numpy_sugar import is_all_finiteY = asfortranarray(Y, float)yrank = matrix_rank(Y)if Y.shape[] > yrank:warnings.warn(f\"\"+ \"\",UserWarning,)A = asarray(A, float)X = asarray(X, float)Xrank = matrix_rank(X)if X.shape[] > Xrank:warnings.warn(f\"\"+ \"\",UserWarning,)G = asarray(G, float).copy()self._G_norm = max(G.min(), G.max())G /= self._G_normif not is_all_finite(Y):raise ValueError(\"\")if not is_all_finite(A):msg = \"\"raise ValueError(msg)if not is_all_finite(X):raise ValueError(\"\")if not is_all_finite(G):raise ValueError(\"\")self._Y = Yself._cov = Kron2SumCov(G, Y.shape[], rank)self._cov.listen(self._parameters_update)self._mean = KronMean(A, X)self._cache = {\"\": None}self._restricted = restrictedcomposite = [(\"\", self._cov.C0), (\"\", self._cov.C1)]Function.__init__(self, \"\", composite=composite)nparams = self._mean.nparams + self._cov.nparamsif nparams > Y.size:msg = \"\"msg += \"\"warnings.warn(msg, UserWarning)", "docstring": "Constructor.\n\nParameters\n----------\nY : (n, p) array_like\n Outcome matrix.\nA : (n, n) array_like\n Trait-by-trait design matrix.\nX : (n, c) array_like\n Covariates design matrix.\nG : (n, r) array_like\n Matrix G from the GG\u1d40 term.\nrank : optional, int\n Maximum rank of matrix C\u2080. Defaults to ``1``.", "id": "f13621:c0:m0"} {"signature": "@propertydef beta_covariance(self):", "body": "H = self._terms[\"\"]return inv(H)", "docstring": "Estimates the covariance-matrix of the optimal beta.\n\nReturns\n-------\nbeta-covariance : ndarray\n (M\u1d40K\u207b\u00b9M)\u207b\u00b9.\n\nReferences\n----------\n.. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John\n Wiley & Sons.", "id": "f13621:c0:m1"} {"signature": "def get_fast_scanner(self):", "body": "terms = self._termsreturn KronFastScanner(self._Y, self._mean.A, self._mean.X, self._cov.Ge, terms)", "docstring": "Return :class:`.FastScanner` for association scan.\n\nReturns\n-------\n:class:`.FastScanner`\n Instance of a class designed to perform very fast association scan.", "id": "f13621:c0:m2"} {"signature": "@propertydef A(self):", "body": "return self._mean.A", "docstring": "A from the equation \ud835\udc26 = (A \u2297 X) vec(B).\n\nReturns\n-------\nA : ndarray\n A.", "id": "f13621:c0:m3"} {"signature": "@propertydef B(self):", "body": "self._termsreturn asarray(self._mean.B, float)", "docstring": "Fixed-effect sizes B from \ud835\udc26 = (A \u2297 X) vec(B).\n\nReturns\n-------\nfixed-effects : ndarray\n B from \ud835\udc26 = (A \u2297 X) vec(B).", "id": "f13621:c0:m4"} {"signature": "@propertydef beta(self):", "body": "return vec(self.B)", "docstring": "Fixed-effect sizes \ud835\udec3 = vec(B).\n\nReturns\n-------\nfixed-effects : ndarray\n \ud835\udec3 from \ud835\udec3 = vec(B).", "id": "f13621:c0:m5"} {"signature": "@propertydef C0(self):", "body": "return self._cov.C0.value() / (self._G_norm ** )", "docstring": "C\u2080 from equation K = C\u2080 \u2297 GG\u1d40 + C\u2081 \u2297 I.\n\nReturns\n-------\nC0 : ndarray\n C\u2080.", "id": "f13621:c0:m6"} {"signature": "@propertydef C1(self):", "body": "return self._cov.C1.value()", "docstring": "C\u2081 from equation K = C\u2080 \u2297 GG\u1d40 + C\u2081 \u2297 I.\n\nReturns\n-------\nC1 : ndarray\n C\u2081.", "id": "f13621:c0:m7"} {"signature": "def mean(self):", "body": "self._termsreturn self._mean.value()", "docstring": "Mean \ud835\udc26 = (A \u2297 X) vec(B).\n\nReturns\n-------\nmean : ndarray\n \ud835\udc26.", "id": "f13621:c0:m8"} {"signature": "def covariance(self):", "body": "return self._cov.value()", "docstring": "Covariance K = C\u2080 \u2297 GG\u1d40 + C\u2081 \u2297 I.\n\nReturns\n-------\ncovariance : ndarray\n K.", "id": "f13621:c0:m9"} {"signature": "@propertydef X(self):", "body": "return self._mean.X", "docstring": "X from equation M = (A \u2297 X).\n\nReturns\n-------\nX : ndarray\n X from M = (A \u2297 X).", "id": "f13621:c0:m10"} {"signature": "@propertydef M(self):", "body": "return self._mean.AX", "docstring": "M = (A \u2297 X).\n\nReturns\n-------\nM : ndarray\n M from M = (A \u2297 X).", "id": "f13621:c0:m11"} {"signature": "@propertydef nsamples(self):", "body": "return self._Y.shape[]", "docstring": "Number of samples, n.", "id": "f13621:c0:m12"} {"signature": "@propertydef ntraits(self):", "body": "return self._Y.shape[]", "docstring": "Number of traits, p.", "id": "f13621:c0:m13"} {"signature": "@propertydef ncovariates(self):", "body": "return self._mean.X.shape[]", "docstring": "Number of covariates, c.", "id": "f13621:c0:m14"} {"signature": "def value(self):", "body": "return self.lml()", "docstring": "Log of the marginal likelihood.", "id": "f13621:c0:m15"} {"signature": "def gradient(self):", "body": "return self._lml_gradient()", "docstring": "Gradient of the log of the marginal likelihood.", "id": "f13621:c0:m16"} {"signature": "def lml(self):", "body": "terms = self._termsyKiy = terms[\"\"]mKiy = terms[\"\"]mKim = terms[\"\"]lml = -self._df * log2pi + self._logdet_MM - self._logdetKlml -= self._logdetHlml += -yKiy - mKim + * mKiyreturn lml / ", "docstring": "Log of the marginal likelihood.\n\nLet \ud835\udc32 = vec(Y), M = A\u2297X, and H = M\u1d40K\u207b\u00b9M. The restricted log of the marginal\nlikelihood is given by [R07]_::\n\n 2\u22c5log(p(\ud835\udc32)) = -(n\u22c5p - c\u22c5p) log(2\u03c0) + log(\uff5cM\u1d40M\uff5c) - log(\uff5cK\uff5c) - log(\uff5cH\uff5c)\n - (\ud835\udc32-\ud835\udc26)\u1d40 K\u207b\u00b9 (\ud835\udc32-\ud835\udc26),\n\nwhere \ud835\udc26 = M\ud835\udec3 for \ud835\udec3 = H\u207b\u00b9M\u1d40K\u207b\u00b9\ud835\udc32.\n\nFor implementation purpose, let X = (L\u2080 \u2297 G) and R = (L\u2081 \u2297 I)(L\u2081 \u2297 I)\u1d40.\nThe covariance can be written as::\n\n K = XX\u1d40 + R.\n\nFrom the Woodbury matrix identity, we have\n\n \ud835\udc32\u1d40K\u207b\u00b9\ud835\udc32 = \ud835\udc32\u1d40R\u207b\u00b9\ud835\udc32 - \ud835\udc32\u1d40R\u207b\u00b9XZ\u207b\u00b9X\u1d40R\u207b\u00b9\ud835\udc32,\n\nwhere Z = I + X\u1d40R\u207b\u00b9X. Note that R\u207b\u00b9 = (U\u2081S\u2081\u207b\u00b9U\u2081\u1d40) \u2297 I and ::\n\n X\u1d40R\u207b\u00b9\ud835\udc32 = (L\u2080\u1d40W \u2297 G\u1d40)\ud835\udc32 = vec(G\u1d40YWL\u2080),\n\nwhere W = U\u2081S\u2081\u207b\u00b9U\u2081\u1d40. The term G\u1d40Y can be calculated only once and it will form a\nr\u00d7p matrix. We similarly have ::\n\n X\u1d40R\u207b\u00b9M = (L\u2080\u1d40WA) \u2297 (G\u1d40X),\n\nfor which G\u1d40X is pre-computed.\n\nThe log-determinant of the covariance matrix is given by\n\n log(\uff5cK\uff5c) = log(\uff5cZ\uff5c) - log(\uff5cR\u207b\u00b9\uff5c) = log(\uff5cZ\uff5c) - 2\u00b7n\u00b7log(\uff5cU\u2081S\u2081\u207b\u00bd\uff5c).\n\nThe log of the marginal likelihood can be rewritten as::\n\n 2\u22c5log(p(\ud835\udc32)) = -(n\u22c5p - c\u22c5p) log(2\u03c0) + log(\uff5cM\u1d40M\uff5c)\n - log(\uff5cZ\uff5c) + 2\u00b7n\u00b7log(\uff5cU\u2081S\u2081\u207b\u00bd\uff5c)\n - log(\uff5cM\u1d40R\u207b\u00b9M - M\u1d40R\u207b\u00b9XZ\u207b\u00b9X\u1d40R\u207b\u00b9M\uff5c)\n - \ud835\udc32\u1d40R\u207b\u00b9\ud835\udc32 + (\ud835\udc32\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc32)\n - \ud835\udc26\u1d40R\u207b\u00b9\ud835\udc26 + (\ud835\udc26\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc26)\n + 2\ud835\udc32\u1d40R\u207b\u00b9\ud835\udc26 - 2(\ud835\udc32\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc26).\n\nReturns\n-------\nlml : float\n Log of the marginal likelihood.\n\nReferences\n----------\n.. [R07] LaMotte, L. R. (2007). A direct derivation of the REML likelihood\n function. Statistical Papers, 48(2), 321-327.", "id": "f13621:c0:m17"} {"signature": "def fit(self, verbose=True):", "body": "self._maximize(verbose=verbose)", "docstring": "Maximise the marginal likelihood.\n\nParameters\n----------\nverbose : bool, optional\n ``True`` for progress output; ``False`` otherwise.\n Defaults to ``True``.", "id": "f13621:c0:m18"} {"signature": "def _lml_gradient(self):", "body": "from scipy.linalg import cho_solveterms = self._termsdC0 = self._cov.C0.gradient()[\"\"]dC1 = self._cov.C1.gradient()[\"\"]b = terms[\"\"]W = terms[\"\"]Lh = terms[\"\"]Lz = terms[\"\"]WA = terms[\"\"]WL0 = terms[\"\"]YW = terms[\"\"]MRiM = terms[\"\"]MRiy = terms[\"\"]XRiM = terms[\"\"]XRiy = terms[\"\"]ZiXRiM = terms[\"\"]ZiXRiy = terms[\"\"]WdC0 = _mdot(W, dC0)WdC1 = _mdot(W, dC1)AWdC0 = _mdot(WA.T, dC0)AWdC1 = _mdot(WA.T, dC1)MR0M = _mkron(_mdot(AWdC0, WA), self._XGGX)MR1M = _mkron(_mdot(AWdC1, WA), self._XX)MR0X = _mkron(_mdot(AWdC0, WL0), self._XGGG)MR1X = _mkron(_mdot(AWdC1, WL0), self._GX.T)MR0y = vec(_mdot(self._XGGY, _mdot(WdC0, WA)))MR1y = vec(_mdot(self._XY, WdC1, WA))XR0X = _mkron(_mdot(WL0.T, dC0, WL0), self._GGGG)XR1X = _mkron(_mdot(WL0.T, dC1, WL0), self._GG)XR0y = vec(_mdot(self._GGGY, WdC0, WL0))XR1y = vec(_mdot(self._GY, WdC1, WL0))yR0y = vec(_mdot(self._GY, WdC0)).T @ vec(self._GY @ W)yR1y = (YW.T * _mdot(self._Y, WdC1).T).T.sum(axis=(, ))ZiXR0X = cho_solve(Lz, XR0X)ZiXR1X = cho_solve(Lz, XR1X)ZiXR0y = cho_solve(Lz, XR0y)ZiXR1y = cho_solve(Lz, XR1y)MK0y = MR0y - _mdot(XRiM.T, ZiXR0y) - _mdot(MR0X, ZiXRiy)MK0y += _mdot(XRiM.T, ZiXR0X, ZiXRiy)MK1y = MR1y - _mdot(XRiM.T, ZiXR1y) - _mdot(MR1X, ZiXRiy)MK1y += _mdot(XRiM.T, ZiXR1X, ZiXRiy)yK0y = yR0y - * XR0y.T @ ZiXRiy + ZiXRiy.T @ _mdot(XR0X, ZiXRiy)yK1y = yR1y - * XR1y.T @ ZiXRiy + ZiXRiy.T @ _mdot(XR1X, ZiXRiy)MR0XZiXRiM = _mdot(MR0X, ZiXRiM)MK0M = MR0M - MR0XZiXRiM - MR0XZiXRiM.transpose([, , ])MK0M += _mdot(ZiXRiM.T, XR0X, ZiXRiM)MR1XZiXRiM = _mdot(MR1X, ZiXRiM)MK1M = MR1M - MR1XZiXRiM - MR1XZiXRiM.transpose([, , ])MK1M += _mdot(ZiXRiM.T, XR1X, ZiXRiM)MK0m = _mdot(MK0M, b)mK0y = b.T @ MK0ymK0m = b.T @ MK0mMK1m = _mdot(MK1M, b)mK1y = b.T @ MK1ymK1m = b.T @ MK1mXRim = XRiM @ bMRim = MRiM @ bdb = {\"\": cho_solve(Lh, MK0m - MK0y), \"\": cho_solve(Lh, MK1m - MK1y)}grad = {\"\": -trace(WdC0) * self._trGG + trace(ZiXR0X),\"\": -trace(WdC1) * self.nsamples + trace(ZiXR1X),}if self._restricted:grad[\"\"] += cho_solve(Lh, MK0M).diagonal().sum()grad[\"\"] += cho_solve(Lh, MK1M).diagonal().sum()mKiM = MRim.T - XRim.T @ ZiXRiMyKiM = MRiy.T - XRiy.T @ ZiXRiMgrad[\"\"] += yK0y - * mK0y + mK0m - * _mdot(mKiM, db[\"\"])grad[\"\"] += * _mdot(yKiM, db[\"\"])grad[\"\"] += yK1y - * mK1y + mK1m - * _mdot(mKiM, db[\"\"])grad[\"\"] += * _mdot(yKiM, db[\"\"])grad[\"\"] /= grad[\"\"] /= return grad", "docstring": "Gradient of the log of the marginal likelihood.\n\nLet \ud835\udc32 = vec(Y), \ud835\udd42 = K\u207b\u00b9\u2202(K)K\u207b\u00b9, and H = M\u1d40K\u207b\u00b9M. The gradient is given by::\n\n 2\u22c5\u2202log(p(\ud835\udc32)) = -tr(K\u207b\u00b9\u2202K) - tr(H\u207b\u00b9\u2202H) + \ud835\udc32\u1d40\ud835\udd42\ud835\udc32 - \ud835\udc26\u1d40\ud835\udd42(2\u22c5\ud835\udc32-\ud835\udc26)\n - 2\u22c5(\ud835\udc26-\ud835\udc32)\u1d40K\u207b\u00b9\u2202(\ud835\udc26).\n\nObserve that\n\n \u2202\ud835\udec3 = -H\u207b\u00b9(\u2202H)\ud835\udec3 - H\u207b\u00b9M\u1d40\ud835\udd42\ud835\udc32 and \u2202H = -M\u1d40\ud835\udd42M.\n\nLet Z = I + X\u1d40R\u207b\u00b9X and \ud835\udce1 = R\u207b\u00b9(\u2202K)R\u207b\u00b9. We use Woodbury matrix identity to\nwrite ::\n\n \ud835\udc32\u1d40\ud835\udd42\ud835\udc32 = \ud835\udc32\u1d40\ud835\udce1\ud835\udc32 - 2(\ud835\udc32\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc32) + (\ud835\udc32\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc32)\n M\u1d40\ud835\udd42M = M\u1d40\ud835\udce1M - 2(M\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9M) + (M\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9M)\n M\u1d40\ud835\udd42\ud835\udc32 = M\u1d40\ud835\udce1\ud835\udc32 - (M\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40\ud835\udce1\ud835\udc32) - (M\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc32)\n + (M\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc32)\n H\u207b\u00b9 = M\u1d40R\u207b\u00b9M - (M\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9M),\n\nwhere we have used parentheses to separate expressions\nthat we will compute separately. For example, we have ::\n\n \ud835\udc32\u1d40\ud835\udce1\ud835\udc32 = \ud835\udc32\u1d40(U\u2081S\u2081\u207b\u00b9U\u2081\u1d40 \u2297 I)(\u2202C\u2080 \u2297 GG\u1d40)(U\u2081S\u2081\u207b\u00b9U\u2081\u1d40 \u2297 I)\ud835\udc32\n = \ud835\udc32\u1d40(U\u2081S\u2081\u207b\u00b9U\u2081\u1d40\u2202C\u2080 \u2297 G)(U\u2081S\u2081\u207b\u00b9U\u2081\u1d40 \u2297 G\u1d40)\ud835\udc32\n = vec(G\u1d40YU\u2081S\u2081\u207b\u00b9U\u2081\u1d40\u2202C\u2080)\u1d40vec(G\u1d40YU\u2081S\u2081\u207b\u00b9U\u2081\u1d40),\n\nwhen the derivative is over the parameters of C\u2080. Otherwise, we have\n\n \ud835\udc32\u1d40\ud835\udce1\ud835\udc32 = vec(YU\u2081S\u2081\u207b\u00b9U\u2081\u1d40\u2202C\u2081)\u1d40vec(YU\u2081S\u2081\u207b\u00b9U\u2081\u1d40).\n\nThe above equations can be more compactly written as\n\n \ud835\udc32\u1d40\ud835\udce1\ud835\udc32 = vec(E\u1d62\u1d40YW\u2202C\u1d62)\u1d40vec(E\u1d62\u1d40YW),\n\nwhere W = U\u2081S\u2081\u207b\u00b9U\u2081\u1d40, E\u2080 = G, and E\u2081 = I. We will now just state the results for\nthe other instances of the aBc form, which follow similar derivations::\n\n X\u1d40\ud835\udce1X = (L\u2080\u1d40W\u2202C\u1d62WL\u2080) \u2297 (G\u1d40E\u1d62E\u1d62\u1d40G)\n M\u1d40\ud835\udce1y = (A\u1d40W\u2202C\u1d62\u2297X\u1d40E\u1d62)vec(E\u1d62\u1d40YW) = vec(X\u1d40E\u1d62E\u1d62\u1d40YW\u2202C\u1d62WA)\n M\u1d40\ud835\udce1X = A\u1d40W\u2202C\u1d62WL\u2080 \u2297 X\u1d40E\u1d62E\u1d62\u1d40G\n M\u1d40\ud835\udce1M = A\u1d40W\u2202C\u1d62WA \u2297 X\u1d40E\u1d62E\u1d62\u1d40X\n X\u1d40\ud835\udce1\ud835\udc32 = G\u1d40E\u1d62E\u1d62\u1d40YW\u2202C\u1d62WL\u2080\n\nFrom Woodbury matrix identity and Kronecker product properties we have ::\n\n tr(K\u207b\u00b9\u2202K) = tr[W\u2202C\u1d62]tr[E\u1d62E\u1d62\u1d40] - tr[Z\u207b\u00b9(X\u1d40\ud835\udce1X)]\n tr(H\u207b\u00b9\u2202H) = - tr[(M\u1d40R\u207b\u00b9M)(M\u1d40\ud835\udd42M)] + tr[(M\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9M)(M\u1d40\ud835\udd42M)]\n\nNote also that ::\n\n \u2202\ud835\udec3 = H\u207b\u00b9M\u1d40\ud835\udd42M\ud835\udec3 - H\u207b\u00b9M\u1d40\ud835\udd42\ud835\udc32.\n\nReturns\n-------\nC0.Lu : ndarray\n Gradient of the log of the marginal likelihood over C\u2080 parameters.\nC1.Lu : ndarray\n Gradient of the log of the marginal likelihood over C\u2081 parameters.", "id": "f13621:c0:m32"} {"signature": "def L(self):", "body": "from numpy_sugar.linalg import ddot, sum2diagif self._L_cache is not None:return self._L_caches = self._cov[\"\"]d = self._cov[\"\"]Q = self._cov[\"\"][][]S = self._cov[\"\"][]ddot(self.A * self._site.tau, Q, left=True, out=self._NxR)B = dot(Q.T, self._NxR, out=self._RxR)B *= - dsum2diag(B, / S / s, out=B)self._L_cache = _cho_factor(B)return self._L_cache", "docstring": "r\"\"\"Cholesky decomposition of :math:`\\mathrm B`.\n\n .. math::\n\n \\mathrm B = \\mathrm Q^{\\intercal}\\tilde{\\mathrm{T}}\\mathrm Q\n + \\mathrm{S}^{-1}", "id": "f13627:c0:m4"} {"signature": "def _initialize(self):", "body": "if self._mean is None or self._cov is None:returnQ = self._cov[\"\"][][]S = self._cov[\"\"][]if S.size > :self.tau[:] = / npsum((Q * sqrt(S)) ** , axis=)else:self.tau[:] = self.eta[:] = self._meanself.eta[:] *= self.tau", "docstring": "r\"\"\"Initialize the mean and covariance of the posterior.\n\n Given that :math:`\\tilde{\\mathrm T}` is a matrix of zeros right before\n the first EP iteration, we have\n\n .. math::\n\n \\boldsymbol\\mu = \\mathrm K^{-1} \\mathbf m ~\\text{ and }~\n \\Sigma = \\mathrm K\n\n as the initial posterior mean and covariance.", "id": "f13628:c0:m5"} {"signature": "def L(self):", "body": "from scipy.linalg import cho_factorfrom numpy_sugar.linalg import ddot, sum2diagif self._L_cache is not None:return self._L_cacheQ = self._cov[\"\"][][]S = self._cov[\"\"][]B = dot(Q.T, ddot(self._site.tau, Q, left=True))sum2diag(B, / S, out=B)self._L_cache = cho_factor(B, lower=True)[]return self._L_cache", "docstring": "r\"\"\"Cholesky decomposition of :math:`\\mathrm B`.\n\n .. math::\n\n \\mathrm B = \\mathrm Q^{\\intercal}\\tilde{\\mathrm{T}}\\mathrm Q\n + \\mathrm{S}^{-1}", "id": "f13628:c0:m10"} {"signature": "def rsolve(A, y):", "body": "from numpy_sugar.linalg import rsolve as _rsolvetry:beta = _rsolve(A, y)except LinAlgError:msg = \"\"msg += \"\"warnings.warn(msg, RuntimeWarning)beta = zeros(A.shape[])return beta", "docstring": "Robust solve Ax=y.", "id": "f13632:m1"} {"signature": "def hsolve(A00, A01, A11, y0, y1):", "body": "from numpy_sugar import epsilonn = _norm(A00, A01)u0 = A00 - nu1 = A01nu = _norm(u0, u1)with errstate(invalid=\"\", divide=\"\"):v0 = nan_to_num(u0 / nu)v1 = nan_to_num(u1 / nu)B00 = - * v0 * v0B01 = - * v0 * v1B11 = - * v1 * v1D00 = B00 * A00 + B01 * A01D01 = B00 * A01 + B01 * A11D11 = B01 * A01 + B11 * A11b0 = y0 - * y0 * v0 * v0 - * y1 * v0 * v1b1 = y1 - * y0 * v1 * v0 - * y1 * v1 * v1n = _norm(D00, D01)u0 = D00 - nu1 = D01nu = _norm(u0, u1)with errstate(invalid=\"\", divide=\"\"):v0 = nan_to_num(u0 / nu)v1 = nan_to_num(u1 / nu)E00 = - * v0 * v0E01 = - * v0 * v1E11 = - * v1 * v1F00 = E00 * D00 + E01 * D01F01 = E01 * D11F11 = E11 * D11F11 = (npy_abs(F11) > epsilon.small) * F11with errstate(divide=\"\", invalid=\"\"):Fi00 = nan_to_num(F00 / F00 / F00)Fi11 = nan_to_num(F11 / F11 / F11)Fi10 = nan_to_num(-(F01 / F00) * Fi11)c0 = Fi00 * b0c1 = Fi10 * b0 + Fi11 * b1x0 = E00 * c0 + E01 * c1x1 = E01 * c0 + E11 * c1return array([x0, x1])", "docstring": "Solver for the linear equations of two variables and equations only.\n\nIt uses Householder reductions to solve A\ud835\udc31 = \ud835\udc32 in a robust manner.\n\nParameters\n----------\nA : array_like\n Coefficient matrix.\ny : array_like\n Ordinate values.\n\nReturns\n-------\nndarray\n Solution \ud835\udc31.", "id": "f13632:m7"} {"signature": "def economic_qs_zeros(n):", "body": "Q0 = empty((n, ))Q1 = eye(n)S0 = empty()return ((Q0, Q1), S0)", "docstring": "Eigen decomposition of a zero matrix.", "id": "f13633:m0"} {"signature": "def multivariate_normal(random, mean, cov):", "body": "from numpy.linalg import choleskyL = cholesky(cov)return L @ random.randn(L.shape[]) + mean", "docstring": "Draw random samples from a multivariate normal distribution.\n\nParameters\n----------\nrandom : np.random.RandomState instance\n Random state.\nmean : array_like\n Mean of the n-dimensional distribution.\ncov : array_like\n Covariance matrix of the distribution. It must be symmetric and\n positive-definite for proper sampling.\n\nReturns\n-------\nout : ndarray\n The drawn sample.", "id": "f13641:m0"} {"signature": "def get_fast_scanner(self):", "body": "from numpy_sugar.linalg import ddot, economic_qs, sum2diagy = self.eta / self.tauif self._QS is None:K = eye(y.shape[]) / self.tauelse:Q0 = self._QS[][]S0 = self._QS[]K = dot(ddot(Q0, self.v0 * S0), Q0.T)K = sum2diag(K, / self.tau)return FastScanner(y, self._X, economic_qs(K), self.v1)", "docstring": "r\"\"\"Return :class:`glimix_core.lmm.FastScanner` for the current\n delta.", "id": "f13643:c0:m12"} {"signature": "def value(self):", "body": "from numpy_sugar.linalg import ddot, sum2diagif self._cache[\"\"] is not None:return self._cache[\"\"]scale = exp(self.logscale)delta = / ( + exp(-self.logitdelta))v0 = scale * ( - delta)v1 = scale * deltamu = self.eta / self.taun = len(mu)if self._QS is None:K = zeros((n, n))else:Q0 = self._QS[][]S0 = self._QS[]K = dot(ddot(Q0, S0), Q0.T)A = sum2diag(sum2diag(v0 * K, v1), / self.tau)m = mu - self.mean()v = -n * log( * pi)v -= slogdet(A)[]v -= dot(m, solve(A, m))self._cache[\"\"] = v / return self._cache[\"\"]", "docstring": "r\"\"\"Log of the marginal likelihood.\n\n Formally,\n\n .. math::\n\n - \\frac{n}{2}\\log{2\\pi} - \\frac{1}{2} \\log{\\left|\n v_0 \\mathrm K + v_1 \\mathrm I + \\tilde{\\Sigma} \\right|}\n - \\frac{1}{2}\n \\left(\\tilde{\\boldsymbol\\mu} -\n \\mathrm X\\boldsymbol\\beta\\right)^{\\intercal}\n \\left( v_0 \\mathrm K + v_1 \\mathrm I +\n \\tilde{\\Sigma} \\right)^{-1}\n \\left(\\tilde{\\boldsymbol\\mu} -\n \\mathrm X\\boldsymbol\\beta\\right)\n\n Returns\n -------\n float\n :math:`\\log{p(\\tilde{\\boldsymbol\\mu})}`", "id": "f13643:c0:m16"} {"signature": "@propertydef beta(self):", "body": "return asarray(self._variables.get(\"\").value, float)", "docstring": "r\"\"\"Fixed-effect sizes.\n\n Returns\n -------\n :class:`numpy.ndarray`\n :math:`\\boldsymbol\\beta`.", "id": "f13646:c0:m2"} {"signature": "def copy(self):", "body": "return copy(self)", "docstring": "r\"\"\"Create a copy of this object.", "id": "f13646:c0:m4"} {"signature": "def covariance(self):", "body": "from numpy_sugar.linalg import ddot, sum2diagQ0 = self._QS[][]S0 = self._QS[]return sum2diag(dot(ddot(Q0, self.v0 * S0), Q0.T), self.v1)", "docstring": "r\"\"\"Covariance of the prior.\n\n Returns\n -------\n :class:`numpy.ndarray`\n :math:`v_0 \\mathrm K + v_1 \\mathrm I`.", "id": "f13646:c0:m5"} {"signature": "@propertydef delta(self):", "body": "return / ( + exp(-self.logitdelta))", "docstring": "r\"\"\"Get or set the ratio of variance between ``K`` and ``I``.\n\n Returns\n -------\n float\n :math:`\\delta`.", "id": "f13646:c0:m6"} {"signature": "def fix(self, var_name):", "body": "Function._fix(self, _to_internal_name(var_name))", "docstring": "r\"\"\"Prevent a variable to be adjusted.\n\n Parameters\n ----------\n var_name : str\n Variable name.", "id": "f13646:c0:m8"} {"signature": "def fit(self, verbose=True, factr=, pgtol=):", "body": "self._verbose = verboseself._maximize(verbose=verbose, factr=factr, pgtol=pgtol)self._verbose = False", "docstring": "r\"\"\"Maximise the marginal likelihood.\n\n Parameters\n ----------\n verbose : bool\n ``True`` for progress output; ``False`` otherwise.\n Defaults to ``True``.\n factr : float, optional\n The iteration stops when\n ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is\n the machine precision.\n pgtol : float, optional\n The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol``\n where ``pg_i`` is the i-th component of the projected gradient.\n\n Notes\n -----\n Please, refer to :func:`scipy.optimize.fmin_l_bfgs_b` for further information\n about ``factr`` and ``pgtol``.", "id": "f13646:c0:m9"} {"signature": "def lml(self):", "body": "return self.value()", "docstring": "r\"\"\"Log of the marginal likelihood.\n\n Returns\n -------\n float\n :math:`\\log p(\\mathbf y)`", "id": "f13646:c0:m10"} {"signature": "def posteriori_mean(self):", "body": "from numpy_sugar.linalg import rsolveSigma = self.posteriori_covariance()eta = self._ep._posterior.etareturn dot(Sigma, eta + rsolve(GLMM.covariance(self), self.mean()))", "docstring": "r\"\"\" Mean of the estimated posteriori.\n\n This is also the maximum a posteriori estimation of the latent variable.", "id": "f13646:c0:m15"} {"signature": "def posteriori_covariance(self):", "body": "K = GLMM.covariance(self)tau = self._ep._posterior.taureturn pinv(pinv(K) + diag( / tau))", "docstring": "r\"\"\" Covariance of the estimated posteriori.", "id": "f13646:c0:m16"} {"signature": "def mean(self):", "body": "return dot(self._X, self.beta)", "docstring": "r\"\"\"Mean of the prior.\n\n Returns\n -------\n :class:`numpy.ndarray`\n :math:`\\mathrm X\\boldsymbol\\beta`.", "id": "f13646:c0:m17"} {"signature": "@propertydef scale(self):", "body": "return exp(self.logscale)", "docstring": "r\"\"\"Get or set the overall variance.\n\n Returns\n -------\n float\n :math:`s`.", "id": "f13646:c0:m18"} {"signature": "def unfix(self, var_name):", "body": "Function._unfix(self, _to_internal_name(var_name))", "docstring": "r\"\"\"Let a variable be adjusted.\n\n Parameters\n ----------\n var_name : str\n Variable name.", "id": "f13646:c0:m21"} {"signature": "@propertydef v0(self):", "body": "return self.scale * ( - self.delta)", "docstring": "r\"\"\"First variance.\n\n Returns\n -------\n float\n :math:`v_0 = s (1 - \\delta)`", "id": "f13646:c0:m22"} {"signature": "@propertydef v1(self):", "body": "return self.scale * self.delta", "docstring": "r\"\"\"Second variance.\n\n Returns\n -------\n float\n :math:`v_1 = s \\delta`", "id": "f13646:c0:m23"} {"signature": "def gradient(self):", "body": "self._update_approx()g = self._ep.lml_derivatives(self._X)ed = exp(-self.logitdelta)es = exp(self.logscale)grad = dict()grad[\"\"] = g[\"\"] * (ed / ( + ed)) / ( + ed)grad[\"\"] = g[\"\"] * esgrad[\"\"] = g[\"\"]return grad", "docstring": "r\"\"\"Gradient of the log of the marginal likelihood.\n\n Returns\n -------\n dict\n Map between variables to their gradient values.", "id": "f13648:c0:m10"} {"signature": "def fit(self, verbose=True, factr=, pgtol=):", "body": "self._maximize(verbose=verbose, factr=factr, pgtol=pgtol)", "docstring": "r\"\"\"Maximise the marginal likelihood.\n\n Parameters\n ----------\n verbose : bool\n ``True`` for progress output; ``False`` otherwise.\n Defaults to ``True``.\n factr : float, optional\n The iteration stops when\n ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is\n the machine precision.\n pgtol : float, optional\n The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol``\n where ``pg_i`` is the i-th component of the projected gradient.\n\n Notes\n -----\n Please, refer to :func:`scipy.optimize.fmin_l_bfgs_b` for further information\n about ``factr`` and ``pgtol``.", "id": "f13649:c0:m1"} {"signature": "def lml(self):", "body": "return self.value()", "docstring": "r\"\"\"Log of the marginal likelihood.\n\n Returns\n -------\n float\n :math:`\\log p(\\mathbf y)`", "id": "f13649:c0:m2"} {"signature": "@propertydef name(self):", "body": "return \"\"", "docstring": "r\"\"\"Get the name of this likelihood.", "id": "f13654:c0:m1"} {"signature": "@propertydef outcome(self):", "body": "return self._outcome", "docstring": "r\"\"\"Get or set an array of outcomes.", "id": "f13654:c0:m2"} {"signature": "def mean(self, x):", "body": "return x", "docstring": "r\"\"\"Outcome mean.", "id": "f13654:c0:m4"} {"signature": "@propertydef sample_size(self):", "body": "return len(self.outcome)", "docstring": "r\"\"\"Get the number of samples.", "id": "f13654:c0:m6"} {"signature": "@propertydef name(self):", "body": "return \"\"", "docstring": "r\"\"\"Get the name of this likelihood.", "id": "f13654:c1:m1"} {"signature": "@propertydef outcome(self):", "body": "return self._outcome", "docstring": "r\"\"\"Get or set an array of outcomes.", "id": "f13654:c1:m2"} {"signature": "def mean(self, x):", "body": "return self._link.inv(x)", "docstring": "r\"\"\"Outcome mean.", "id": "f13654:c1:m4"} {"signature": "@propertydef sample_size(self):", "body": "return len(self.outcome)", "docstring": "r\"\"\"Get the number of samples.", "id": "f13654:c1:m6"} {"signature": "@propertydef name(self):", "body": "return \"\"", "docstring": "r\"\"\"Get the name of this likelihood.", "id": "f13654:c2:m1"} {"signature": "@propertydef ntrials(self):", "body": "return self._ntrials", "docstring": "r\"\"\"Get the array of number of trials.", "id": "f13654:c2:m2"} {"signature": "@propertydef nsuccesses(self):", "body": "return self._nsuccesses", "docstring": "r\"\"\"Get or set an array of successful trials.", "id": "f13654:c2:m3"} {"signature": "def mean(self, x):", "body": "return self._link.inv(x)", "docstring": "r\"\"\"Mean of the number of successful trials.", "id": "f13654:c2:m5"} {"signature": "@propertydef sample_size(self):", "body": "return len(self.nsuccesses)", "docstring": "r\"\"\"Get the number of samples.", "id": "f13654:c2:m7"} {"signature": "@propertydef name(self):", "body": "return \"\"", "docstring": "r\"\"\"Get the name of this likelihood.", "id": "f13654:c3:m1"} {"signature": "@propertydef noccurrences(self):", "body": "return self._noccurrences", "docstring": "r\"\"\"Get or set an array of number of occurrences.", "id": "f13654:c3:m2"} {"signature": "def mean(self, x):", "body": "return self._link.inv(x)", "docstring": "r\"\"\"Mean of the number of occurrences.", "id": "f13654:c3:m4"} {"signature": "@propertydef sample_size(self):", "body": "return len(self.noccurrences)", "docstring": "r\"\"\"Get the number of samples.", "id": "f13654:c3:m6"} {"signature": "def __getitem__(self, key):", "body": "if not self.data_loaded:dim_size = self.z_variable_info[key]['']dim_size = dim_size[]if dim_size == :dim_size += rec_num = self.z_variable_info[key]['']status, data = fortran_cdf.get_z_var(self.fname, key, dim_size, rec_num)if status == :if dim_size == :data = data[, :]return dataelse:raise IOError(fortran_cdf.statusreporter(status))else:return chameleon(self.fname, key, self.data[key],self.meta[key],self.z_variable_info[key])", "docstring": "return CDF variable by name", "id": "f13658:c0:m3"} {"signature": "def inquire(self):", "body": "name = copy.deepcopy(self.fname)stats = fortran_cdf.inquire(name)status = stats[]if status == :self._num_dims = stats[]self._dim_sizes = stats[]self._encoding = stats[]self._majority = stats[]self._max_rec = stats[]self._num_r_vars = stats[]self._num_z_vars = stats[]self._num_attrs = stats[]else:raise IOError(fortran_cdf.statusreporter(status))", "docstring": "Maps to fortran CDF_Inquire.\n\n Assigns parameters returned by CDF_Inquire\n to pysatCDF instance. Not intended\n for regular direct use by user.", "id": "f13658:c0:m4"} {"signature": "def _read_all_z_variable_info(self):", "body": "self.z_variable_info = {}self.z_variable_names_by_num = {}info = fortran_cdf.z_var_all_inquire(self.fname, self._num_z_vars,len(self.fname))status = info[]data_types = info[]num_elems = info[]rec_varys = info[]dim_varys = info[]num_dims = info[]dim_sizes = info[]rec_nums = info[]var_nums = info[]var_names = info[]if status == :for i in np.arange(len(data_types)):out = {}out[''] = data_types[i]out[''] = num_elems[i]out[''] = rec_varys[i]out[''] = dim_varys[i]out[''] = num_dims[i]out[''] = dim_sizes[i, :]if out[''][] == :out[''][] += out[''] = rec_nums[i]out[''] = var_nums[i]var_name = ''.join(var_names[i].astype(''))out[''] = var_name.rstrip()self.z_variable_info[out['']] = outself.z_variable_names_by_num[out['']] = var_nameelse:raise IOError(fortran_cdf.statusreporter(status))", "docstring": "Gets all CDF z-variable information, not data though.\n\n Maps to calls using var_inquire. Gets information on\n data type, number of elements, number of dimensions, etc.", "id": "f13658:c0:m5"} {"signature": "def load_all_variables(self):", "body": "self.data = {}file_var_names = self.z_variable_info.keys()dim_sizes = []rec_nums = []data_types = []names = []for i, name in enumerate(file_var_names):dim_sizes.extend(self.z_variable_info[name][''])rec_nums.append(self.z_variable_info[name][''])data_types.append(self.z_variable_info[name][''])names.append(name.ljust())dim_sizes = np.array(dim_sizes)rec_nums = np.array(rec_nums)data_types = np.array(data_types)self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_real4)self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_real4)self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_real8)self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_real8)self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_int4)self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_int4,data_offset= ** )self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_int2)self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_int2,data_offset= ** )self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_int1)self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_int1,data_offset= ** )self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_int1)self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_real8,epoch=True)self._call_multi_fortran_z(names, data_types, rec_nums, * dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_epoch16,epoch16=True)self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,self.cdf_data_types[''],fortran_cdf.get_multi_z_tt2000,epoch=True)self.data_loaded = True", "docstring": "Loads all variables from CDF.\n\n Note this routine is called automatically\n upon instantiation.", "id": "f13658:c0:m6"} {"signature": "def _call_multi_fortran_z(self, names, data_types, rec_nums,dim_sizes, input_type_code, func,epoch=False, data_offset=None, epoch16=False):", "body": "idx, = np.where(data_types == input_type_code)if len(idx) > :max_rec = rec_nums[idx].max()sub_names = np.array(names)[idx]sub_sizes = dim_sizes[idx]status, data = func(self.fname, sub_names.tolist(),sub_sizes, sub_sizes.sum(), max_rec, len(sub_names))if status == :if data_offset is not None:data = data.astype(int)idx, idy, = np.where(data < )data[idx, idy] += data_offsetif epoch:data -= data = data.astype('')if epoch16:data[::, :] -= data = data[::, :] * + data[::, :] / data = data.astype('')sub_sizes /= self._process_return_multi_z(data, sub_names, sub_sizes)else:raise IOError(fortran_cdf.statusreporter(status))", "docstring": "Calls fortran functions to load CDF variable data\n\n Parameters\n ----------\n names : list_like\n list of variables names\n data_types : list_like\n list of all loaded data type codes as used by CDF\n rec_nums : list_like\n list of record numbers in CDF file. Provided by variable_info\n dim_sizes :\n list of dimensions as provided by variable_info.\n input_type_code : int\n Specific type code to load\n func : function\n Fortran function via python interface that will be used for actual loading.\n epoch : bool\n Flag indicating type is epoch. Translates things to datetime standard.\n data_offset :\n Offset value to be applied to data. Required for unsigned integers in CDF.\n epoch16 : bool\n Flag indicating type is epoch16. Translates things to datetime standard.", "id": "f13658:c0:m7"} {"signature": "def _process_return_multi_z(self, data, names, dim_sizes):", "body": "d1 = d2 = for name, dim_size in zip(names, dim_sizes):d2 = d1 + dim_sizeif dim_size == :self.data[name.rstrip()] = data[d1, :]else:self.data[name.rstrip()] = data[d1:d2, :]d1 += dim_size", "docstring": "process and attach data from fortran_cdf.get_multi_*", "id": "f13658:c0:m8"} {"signature": "def _read_all_attribute_info(self):", "body": "num = copy.deepcopy(self._num_attrs)fname = copy.deepcopy(self.fname)out = fortran_cdf.inquire_all_attr(fname, num, len(fname))status = out[]names = out[].astype('')scopes = out[]max_gentries = out[]max_rentries = out[]max_zentries = out[]attr_nums = out[]global_attrs_info = {}var_attrs_info = {}if status == :for name, scope, gentry, rentry, zentry, num in zip(names, scopes, max_gentries,max_rentries, max_zentries,attr_nums):name = ''.join(name)name = name.rstrip()nug = {}nug[''] = scopenug[''] = gentrynug[''] = rentrynug[''] = zentrynug[''] = numflag = (gentry == ) & (rentry == ) & (zentry == )if not flag:if scope == :global_attrs_info[name] = nugelif scope == :var_attrs_info[name] = nugself.global_attrs_info = global_attrs_infoself.var_attrs_info = var_attrs_infoelse:raise IOError(fortran_cdf.statusreporter(status))", "docstring": "Read all attribute properties, g, r, and z attributes", "id": "f13658:c0:m9"} {"signature": "def _read_all_z_attribute_data(self):", "body": "self.meta = {}max_entries = []attr_nums = []names = []attr_names = []names = self.var_attrs_info.keys()num_z_attrs = len(names)exp_attr_nums = []for key in names:max_entries.append(self.var_attrs_info[key][''])attr_nums.append(self.var_attrs_info[key][''])attr_nums = np.array(attr_nums)max_entries = np.array(max_entries)info = fortran_cdf.z_attr_all_inquire(self.fname, attr_nums,num_z_attrs, max_entries, self._num_z_vars, len(self.fname))status = info[]data_types = info[]num_elems = info[]entry_nums = info[]if status == :for i, name in enumerate(names):self.var_attrs_info[name][''] = data_types[i]self.var_attrs_info[name][''] = num_elems[i]self.var_attrs_info[name][''] = entry_nums[i]exp_attr_nums.extend([self.var_attrs_info[name]['']] * len(entry_nums[i]))attr_names.extend([name] * len(entry_nums[i]))else:raise IOError(fortran_cdf.statusreporter(status))data_types = data_types.flatten()num_elems = num_elems.flatten()entry_nums = entry_nums.flatten()attr_nums = np.array(exp_attr_nums)idx, = np.where(entry_nums > )data_types = data_types[idx]num_elems = num_elems[idx]entry_nums = entry_nums[idx]attr_nums = attr_nums[idx]attr_names = np.array(attr_names)[idx]var_names = [self.z_variable_names_by_num[i].rstrip() for i in entry_nums]self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_real4)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_real4)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_real8)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_real8)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_int1)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_int1)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_int1,data_offset=)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_int2)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_int2,data_offset=)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_int4)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_int4,data_offset= ** )self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_char)self._call_multi_fortran_z_attr(attr_names, data_types, num_elems,entry_nums, attr_nums, var_names, self.cdf_data_types[''],fortran_cdf.get_multi_z_attr_char)", "docstring": "Read all CDF z-attribute data", "id": "f13658:c0:m10"} {"signature": "def _call_multi_fortran_z_attr(self, names, data_types, num_elems,entry_nums, attr_nums, var_names,input_type_code, func, data_offset=None):", "body": "idx, = np.where(data_types == input_type_code)if len(idx) > :max_num = num_elems[idx].max()sub_num_elems = num_elems[idx]sub_names = np.array(names)[idx]sub_var_names = np.array(var_names)[idx]sub_entry_nums = entry_nums[idx]sub_attr_nums = attr_nums[idx]status, data = func(self.fname, sub_attr_nums, sub_entry_nums,len(sub_attr_nums), max_num, len(self.fname))if (status == ).all():if data_offset is not None:data = data.astype(int)idx, idy, = np.where(data < )data[idx, idy] += data_offsetself._process_return_multi_z_attr(data, sub_names,sub_var_names, sub_num_elems)else:idx, = np.where(status != )raise IOError(fortran_cdf.statusreporter(status[idx][]))", "docstring": "Calls Fortran function that reads attribute data.\n\n data_offset translates unsigned into signed.\n If number read in is negative, offset added.", "id": "f13658:c0:m11"} {"signature": "def _process_return_multi_z_attr(self, data, attr_names, var_names, sub_num_elems):", "body": "for i, (attr_name, var_name, num_e) in enumerate(zip(attr_names, var_names, sub_num_elems)):if var_name not in self.meta.keys():self.meta[var_name] = {}if num_e == :self.meta[var_name][attr_name] = data[i, ]else:if data[i].dtype == '':self.meta[var_name][attr_name] = ''.join(data[i, :num_e].astype('')).rstrip()else:self.meta[var_name][attr_name] = data[i, :num_e]", "docstring": "process and attach data from fortran_cdf.get_multi_*", "id": "f13658:c0:m12"} {"signature": "def to_pysat(self, flatten_twod=True, units_label='', name_label='',fill_label='', plot_label='', min_label='', max_label='', notes_label='', desc_label='',axis_label = ''):", "body": "import stringimport pysatimport pandascdata = self.data.copy()meta = pysat.Meta(pysat.DataFrame.from_dict(self.meta, orient=''),units_label=units_label, name_label=name_label,fill_label=fill_label, plot_label=plot_label,min_label=min_label, max_label=max_label,notes_label=notes_label, desc_label=desc_label,axis_label=axis_label)lower_names = [name.lower() for name in meta.keys()] for name, true_name in zip(lower_names, meta.keys()):if name == '':meta.data.rename(index={true_name: ''}, inplace=True)epoch = cdata.pop(true_name)cdata[''] = epochtwo_d_data = []drop_list = []for name in cdata.keys():temp = np.shape(cdata[name])if len(temp) == :if not flatten_twod:frame = pysat.DataFrame(cdata[name].flatten(), columns=[name])drop_list.append(name)step = temp[]new_list = []new_index = np.arange(step)for i in np.arange(len(epoch)):new_list.append(frame.iloc[i*step:(i+)*step, :])new_list[-].index = new_indexnew_frame = pandas.Series(new_list, index=epoch, name=name)two_d_data.append(new_frame)else:new_names = [name + ''.format(i=i) for i in np.arange(temp[] - )]new_names.append(name + '')new_names.insert(, name)drop_list.append(name)frame = pysat.DataFrame(cdata[name].T,index=epoch,columns=new_names)two_d_data.append(frame)for name in drop_list:_ = cdata.pop(name)data = pysat.DataFrame(cdata, index=epoch)two_d_data.append(data)data = pandas.concat(two_d_data, axis=)data.drop('', axis=, inplace=True)return data, meta", "docstring": "Exports loaded CDF data into data, meta for pysat module\n\nNotes\n-----\nThe *_labels should be set to the values in the file, if present.\nNote that once the meta object returned from this function is attached\nto a pysat.Instrument object then the *_labels on the Instrument\nare assigned to the newly attached Meta object.\n\nThe pysat Meta object will use data with labels that match the patterns\nin *_labels even if the case does not match.\n\nParameters\n----------\nflatten_twod : bool (True)\n If True, then two dimensional data is flattened across \n columns. Name mangling is used to group data, first column\n is 'name', last column is 'name_end'. In between numbers are \n appended 'name_1', 'name_2', etc. All data for a given 2D array\n may be accessed via, data.ix[:,'item':'item_end']\n If False, then 2D data is stored as a series of DataFrames, \n indexed by Epoch. data.ix[0, 'item']\nunits_label : str\n Identifier within metadata for units. Defults to CDAWab standard.\nname_label : str\n Identifier within metadata for variable name. Defults to 'long_name',\n not normally present within CDAWeb files. If not, will use values\n from the variable name in the file.\nfill_label : str\n Identifier within metadata for Fill Values. Defults to CDAWab standard.\nplot_label : str\n Identifier within metadata for variable name used when plotting.\n Defults to CDAWab standard.\nmin_label : str\n Identifier within metadata for minimim variable value. \n Defults to CDAWab standard.\nmax_label : str\n Identifier within metadata for maximum variable value.\n Defults to CDAWab standard.\nnotes_label : str\n Identifier within metadata for notes. Defults to CDAWab standard.\ndesc_label : str\n Identifier within metadata for a variable description.\n Defults to CDAWab standard.\naxis_label : str\n Identifier within metadata for axis name used when plotting. \n Defults to CDAWab standard.\n\n\nReturns\n-------\npandas.DataFrame, pysat.Meta\n Data and Metadata suitable for attachment to a pysat.Instrument\n object.", "id": "f13658:c0:m13"} {"signature": "def setup(self):", "body": "", "docstring": "Runs before every method to create a clean testing setup.", "id": "f13659:c0:m0"} {"signature": "def teardown(self):", "body": "", "docstring": "Runs after every method to clean up previous testing.", "id": "f13659:c0:m1"} {"signature": "def __init__(self, *args, **kwargs):", "body": "kwargs[''] = Bucketsuper(S3Connection, self).__init__(*args, **kwargs)", "docstring": "Set the base class for bucket objects created in the connection to\n the MimicDB bucket class.", "id": "f13661:c0:m0"} {"signature": "def get_all_buckets(self, *args, **kwargs):", "body": "if kwargs.pop('', None):buckets = super(S3Connection, self).get_all_buckets(*args, **kwargs)for bucket in buckets:mimicdb.backend.sadd(tpl.connection, bucket.name)return bucketsreturn [Bucket(self, bucket) for bucket in mimicdb.backend.smembers(tpl.connection)]", "docstring": "Return a list of buckets in MimicDB.\n\n :param boolean force: If true, API call is forced to S3", "id": "f13661:c0:m1"} {"signature": "def get_bucket(self, bucket_name, validate=True, headers=None, force=None):", "body": "if force:bucket = super(S3Connection, self).get_bucket(bucket_name, validate, headers)mimicdb.backend.sadd(tpl.connection, bucket.name)return bucketif mimicdb.backend.sismember(tpl.connection, bucket_name):return Bucket(self, bucket_name)else:if validate:raise S3ResponseError(, '')else:return Bucket(self, bucket_name)", "docstring": "Return a bucket from MimicDB if it exists. Return a\n S3ResponseError if the bucket does not exist and validate is passed.\n\n :param boolean force: If true, API call is forced to S3", "id": "f13661:c0:m2"} {"signature": "def create_bucket(self, *args, **kwargs):", "body": "bucket = super(S3Connection, self).create_bucket(*args, **kwargs)if bucket:mimicdb.backend.sadd(tpl.connection, bucket.name)return bucket", "docstring": "Add the bucket to MimicDB after successful creation.", "id": "f13661:c0:m3"} {"signature": "def delete_bucket(self, *args, **kwargs):", "body": "super(S3Connection, self).delete_bucket(*args, **kwargs)bucket = kwargs.get('', args[] if args else None)if bucket:mimicdb.backend.srem(tpl.connection, bucket)", "docstring": "Delete the bucket on S3 before removing it from MimicDB.\n If the delete fails (usually because the bucket is not empty), do\n not remove the bucket from the set.", "id": "f13661:c0:m4"} {"signature": "def sync(self, *buckets):", "body": "if buckets:for _bucket in buckets:for key in mimicdb.backend.smembers(tpl.bucket % _bucket):mimicdb.backend.delete(tpl.key % (_bucket, key))mimicdb.backend.delete(tpl.bucket % _bucket)bucket = self.get_bucket(_bucket, force=True)for key in bucket.list(force=True):mimicdb.backend.sadd(tpl.bucket % bucket.name, key.name)mimicdb.backend.hmset(tpl.key % (bucket.name, key.name), dict(size=key.size, md5=key.etag.strip('')))else:for bucket in mimicdb.backend.smembers(tpl.connection):for key in mimicdb.backend.smembers(tpl.bucket % bucket):mimicdb.backend.delete(tpl.key % (bucket, key))mimicdb.backend.delete(tpl.bucket % bucket)for bucket in self.get_all_buckets(force=True):for key in bucket.list(force=True):mimicdb.backend.sadd(tpl.bucket % bucket.name, key.name)mimicdb.backend.hmset(tpl.key % (bucket.name, key.name), dict(size=key.size, md5=key.etag.strip('')))", "docstring": "Sync either a list of buckets or the entire connection.\n\n Force all API calls to S3 and populate the database with the current\n state of S3.\n\n :param \\*string \\*buckets: Buckets to sync", "id": "f13661:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "bucket = kwargs.get('', args[] if args else None)name = kwargs.get('', args[] if len(args) > else None)self._name = nameif name and bucket:meta = mimicdb.backend.hgetall(tpl.key % (bucket.name, name))if meta:mimicdb.backend.sadd(tpl.bucket % bucket.name, name)self._load_meta(meta[''], meta[''])super(Key, self).__init__(*args, **kwargs)", "docstring": "Add the key to the bucket set if the key name is set and metadata is\n available for it, otherwise wait until uploaded or downloaded.", "id": "f13662:c0:m0"} {"signature": "def _load_meta(self, size, md5):", "body": "if not hasattr(self, ''):self.local_hashes = {}self.size = int(size)if (re.match('', md5)):self.md5 = md5", "docstring": "Set key attributes to retrived metadata. Might be extended in the\n future to support more attributes.", "id": "f13662:c0:m1"} {"signature": "@name.setterdef name(self, value):", "body": "self._name = valueif value:meta = mimicdb.backend.hgetall(tpl.key % (self.bucket.name, value))if meta:mimicdb.backend.sadd(tpl.bucket % self.bucket.name, value)self._load_meta(meta[''], meta[''])", "docstring": "Key name can be set by Key.key or Key.name. Key.key sets Key.name\n internally, so just handle this property. When changing the key\n name, try to load it's metadata from MimicDB. If it's not available,\n the key hasn't been uploaded, downloaded or synced so don't add it to\n the bucket set (it also might have just been deleted,\n see boto.s3.bucket.py#785)", "id": "f13662:c0:m3"} {"signature": "def _send_file_internal(self, *args, **kwargs):", "body": "super(Key, self)._send_file_internal(*args, **kwargs)mimicdb.backend.sadd(tpl.bucket % self.bucket.name, self.name)mimicdb.backend.hmset(tpl.key % (self.bucket.name, self.name),dict(size=self.size, md5=self.md5))", "docstring": "Called internally for any type of upload. After upload finishes,\n make sure the key is in the bucket set and save the metadata.", "id": "f13662:c0:m4"} {"signature": "def _get_file_internal(self, *args, **kwargs):", "body": "super(Key, self)._get_file_internal(*args, **kwargs)mimicdb.backend.sadd(tpl.bucket % self.bucket.name, self.name)mimicdb.backend.hmset(tpl.key % (self.bucket.name, self.name),dict(size=self.size, md5=self.md5))", "docstring": "Called internally for any type of download. After download finishes,\n make sure the key is in the bucket set and save the metadata.", "id": "f13662:c0:m5"} {"signature": "def __init__(self, *args, **kwargs):", "body": "kwargs[''] = Keysuper(Bucket, self).__init__(*args, **kwargs)", "docstring": "Set the class for key objects created in the bucket to the MimicDB\n key class.", "id": "f13663:c0:m0"} {"signature": "def __iter__(self, *args, **kwargs):", "body": "return self.list()", "docstring": "__iter__ can not be forced to check S3, so return an iterable of\n keys from MimicDB.", "id": "f13663:c0:m1"} {"signature": "def get_key(self, *args, **kwargs):", "body": "if kwargs.pop('', None):headers = kwargs.get('', {})headers[''] = Truekwargs[''] = headersreturn super(Bucket, self).get_key(*args, **kwargs)", "docstring": "Return the key from MimicDB.\n\n :param boolean force: If true, API call is forced to S3", "id": "f13663:c0:m2"} {"signature": "def _get_key_internal(self, *args, **kwargs):", "body": "if args[] is not None and '' in args[]:key, res = super(Bucket, self)._get_key_internal(*args, **kwargs)if key:mimicdb.backend.sadd(tpl.bucket % self.name, key.name)mimicdb.backend.hmset(tpl.key % (self.name, key.name),dict(size=key.size,md5=key.etag.strip('')))return key, reskey = Noneif mimicdb.backend.sismember(tpl.bucket % self.name, args[]):key = Key(self)key.name = args[]return key, None", "docstring": "Return None if key is not in the bucket set.\n\n Pass 'force' in the headers to check S3 for the key, and after fetching\n the key from S3, save the metadata and key to the bucket set.", "id": "f13663:c0:m3"} {"signature": "def get_all_keys(self, *args, **kwargs):", "body": "if kwargs.pop('', None):headers = kwargs.get('', args[] if len(args) else None) or dict()headers[''] = Truekwargs[''] = headersreturn super(Bucket, self).get_all_keys(*args, **kwargs)", "docstring": "Return a list of keys from MimicDB.\n\n :param boolean force: If true, API call is forced to S3", "id": "f13663:c0:m4"} {"signature": "def delete_keys(self, *args, **kwargs):", "body": "ikeys = iter(kwargs.get('', args[] if args else []))while True:try:key = ikeys.next()except StopIteration:breakif isinstance(key, basestring):mimicdb.backend.srem(tpl.bucket % self.name, key)mimicdb.backend.delete(tpl.key % (self.name, key))elif isinstance(key, BotoKey) or isinstance(key, Key):mimicdb.backend.srem(tpl.bucket % self.name, key.name)mimicdb.backend.delete(tpl.key % (self.name, key.name))return super(Bucket, self).delete_keys(*args, **kwargs)", "docstring": "Remove each key or key name in an iterable from the bucket set.", "id": "f13663:c0:m5"} {"signature": "def _delete_key_internal(self, *args, **kwargs):", "body": "mimicdb.backend.srem(tpl.bucket % self.name, args[])mimicdb.backend.delete(tpl.key % (self.name, args[]))return super(Bucket, self)._delete_key_internal(*args, **kwargs)", "docstring": "Remove key name from bucket set.", "id": "f13663:c0:m6"} {"signature": "def list(self, *args, **kwargs):", "body": "if kwargs.pop('', None):headers = kwargs.get('', args[] if len(args) > else None) or dict()headers[''] = Truekwargs[''] = headersfor key in super(Bucket, self).list(*args, **kwargs):yield keyelse:prefix = kwargs.get('', args[] if args else '')for key in mimicdb.backend.smembers(tpl.bucket % self.name):if key.startswith(prefix):k = Key(self, key)meta = mimicdb.backend.hgetall(tpl.key % (self.name, key))if meta:k._load_meta(meta[''], meta[''])yield k", "docstring": "Return an iterable of keys from MimicDB.\n\n :param boolean force: If true, API call is forced to S3", "id": "f13663:c0:m7"} {"signature": "def _get_all(self, *args, **kwargs):", "body": "headers = kwargs.get('', args[] if len(args) > else None) or dict()if '' in headers:keys = super(Bucket, self)._get_all(*args, **kwargs)for key in keys:mimicdb.backend.sadd(tpl.bucket % self.name, key.name)mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('')))key.name = key.namereturn keysprefix = kwargs.get('', '')return list(self.list(prefix=prefix))", "docstring": "If 'force' is in the headers, retrieve the list of keys from S3.\n Otherwise, use the list() function to retrieve the keys from MimicDB.", "id": "f13663:c0:m8"} {"signature": "def sync(self):", "body": "for key in mimicdb.backend.smembers(tpl.bucket % self.name):mimicdb.backend.delete(tpl.key % (self.name, key))mimicdb.backend.delete(tpl.bucket % self.name)mimicdb.backend.sadd(tpl.connection, self.name)for key in self.list(force=True):mimicdb.backend.sadd(tpl.bucket % self.name, key.name)mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('')))", "docstring": "Sync a bucket.\n\n Force all API calls to S3 and populate the database with the current state of S3.", "id": "f13663:c0:m9"} {"signature": "def __init__(self, backend=None, namespace=None):", "body": "if not backend:from .backends.default import Redisbackend = Redis()globals()[''] = backendif namespace:from .backends import tpltpl.set_namespace(namespace)", "docstring": "Initialze the MimicDB backend with an optional namespace.", "id": "f13664:c0:m0"} {"signature": "def display(content):", "body": "if isinstance(content, gp.GPServer):IPython.display.display(GPAuthWidget(content))elif isinstance(content, gp.GPTask):IPython.display.display(GPTaskWidget(content))elif isinstance(content, gp.GPJob):IPython.display.display(GPJobWidget(content))else:IPython.display.display(content)", "docstring": "Display a widget, text or other media in a notebook without the need to import IPython at the top level.\n\nAlso handles wrapping GenePattern Python Library content in widgets.\n:param content:\n:return:", "id": "f13676:m2"} {"signature": "def register(self, server, username, password):", "body": "session = gp.GPServer(server, username, password)valid_username = username != \"\" and username is not Noneindex = self._get_index(server)new_server = index == -if valid_username and new_server:self.sessions.append(session)if valid_username and not new_server:self.sessions[index] = sessionreturn session", "docstring": "Register a new GenePattern server session for the provided\nserver, username and password. Return the session.\n:param server:\n:param username:\n:param password:\n:return:", "id": "f13676:c0:m0"} {"signature": "def get(self, server):", "body": "if isinstance(server, int):if server >= len(self.sessions):return Noneelse:return self.sessions[server]index = self._get_index(server)if index == -:return Noneelse:return self.sessions[index]", "docstring": "Returns a registered GPServer object with a matching GenePattern server url or index\nReturns None if no matching result was found\n:param server:\n:return:", "id": "f13676:c0:m1"} {"signature": "def clean(self):", "body": "self.sessions = []", "docstring": "Clear all GenePattern sessions from the sessions list\n:return:", "id": "f13676:c0:m2"} {"signature": "def _get_index(self, server_url):", "body": "for i in range(len(self.sessions)):session = self.sessions[i]if session.url == server_url:return ireturn -", "docstring": "Returns a registered GPServer object with a matching GenePattern server url\nReturns -1 if no matching result was found\n:param server_url:\n:return:", "id": "f13676:c0:m3"} {"signature": "def get_data_files():", "body": "return [('', ['',]),('',['' + f for f in os.listdir('')]),('', ['']),]", "docstring": "Get the data files for the package.", "id": "f13679:m0"} {"signature": "def get_number_of_app_ports(app):", "body": "mode = _get_networking_mode(app)ports_list = Noneif mode == '':ports_list = _get_port_definitions(app)elif mode == '':ports_list = _get_port_definitions(app)if ports_list is None:ports_list = _get_container_port_mappings(app)elif mode == '':ports_list = _get_ip_address_discovery_ports(app)if not ports_list:ports_list = _get_container_port_mappings(app)else:raise RuntimeError(\"\".format(mode))return len(ports_list)", "docstring": "Get the number of ports for the given app JSON. This roughly follows the\nlogic in marathon-lb for finding app IPs/ports, although we are only\ninterested in the quantity of ports an app should have and don't consider\nthe specific IPs/ports of individual tasks:\nhttps://github.com/mesosphere/marathon-lb/blob/v1.10.3/utils.py#L393-L415\n\n:param app: The app JSON from the Marathon API.\n:return: The number of ports for the app.", "id": "f13680:m0"} {"signature": "def _get_networking_mode(app):", "body": "networks = app.get('')if networks:return networks[-].get('', '')container = app.get('')if container is not None and '' in container:docker_network = container[''].get('')if docker_network == '':return ''elif docker_network == '':return ''return '' if _is_legacy_ip_per_task(app) else ''", "docstring": "Get the Marathon networking mode for the app.", "id": "f13680:m1"} {"signature": "def _get_container_port_mappings(app):", "body": "container = app['']port_mappings = container.get('')if port_mappings is None and '' in container:port_mappings = container[''].get('')return port_mappings", "docstring": "Get the ``portMappings`` field for the app container.", "id": "f13680:m2"} {"signature": "def _get_port_definitions(app):", "body": "if '' in app:return app['']if '' in app:return app['']return None", "docstring": "Get the ``portDefinitions`` field for the app if present.", "id": "f13680:m3"} {"signature": "def _get_ip_address_discovery_ports(app):", "body": "if not _is_legacy_ip_per_task(app):return Nonereturn app['']['']['']", "docstring": "Get the ports from the ``ipAddress`` field for the app if present.", "id": "f13680:m4"} {"signature": "def _is_legacy_ip_per_task(app):", "body": "return app.get('') is not None", "docstring": "Return whether the application is using IP-per-task on Marathon < 1.5.\n:param app: The application to check.\n:return: True if using IP per task, False otherwise.", "id": "f13680:m5"} {"signature": "def marathon_timestamp():", "body": "return datetime.utcnow().strftime('')[:-] + ''", "docstring": "Make a Marathon/JodaTime-like timestamp string in ISO8601 format with\nmilliseconds for the current time in UTC.", "id": "f13681:m0"} {"signature": "def check_called_get_apps(self):", "body": "was_called, self._called_get_apps = self._called_get_apps, Falsereturn was_called", "docstring": "Check and reset the ``_called_get_apps`` flag.", "id": "f13681:c1:m1"} {"signature": "def check_signalled_hup(self):", "body": "was_signalled, self._signalled_hup = self._signalled_hup, Falsereturn was_signalled", "docstring": "Check and reset the ``_signalled_hup`` flag.", "id": "f13681:c2:m1"} {"signature": "def check_signalled_usr1(self):", "body": "was_signalled, self._signalled_usr1 = self._signalled_usr1, Falsereturn was_signalled", "docstring": "Check and reset the ``_signalled_usr1`` flag.", "id": "f13681:c2:m2"} {"signature": "def get_kv_data(self, path):", "body": "return self._kv_data.get(path)", "docstring": "Read KV data at the given path. Returns the data and metadata.", "id": "f13683:c0:m1"} {"signature": "def set_kv_data(self, path, data):", "body": "existing_data = self.get_kv_data(path)if existing_data is not None:existing_version = existing_data['']['']value = self._kv_v2(data, existing_version + )else:value = self._kv_v2(data)self._kv_data[path] = valuereturn value['']", "docstring": "Create or update KV data at the given path. Returns the metadata for\nthe newly stored data.", "id": "f13683:c0:m2"} {"signature": "def set_pre_create_update(self, cb):", "body": "self._pre_create_update = cb", "docstring": "Set a 0-args callback that will be called before any create or update\nrequest is processed in any way. This is useful for intercepting writes\nto simulate concurrent access to Vault.", "id": "f13683:c1:m1"} {"signature": "def IsJsonResponseWithCode(code):", "body": "return MatchesStructure(code=Equals(code),headers=HasHeader('', ['']))", "docstring": "Match the status code on a treq.response object and check that a header is\nset to indicate that the content type is JSON.", "id": "f13687:m0"} {"signature": "def WithErrorTypeAndMessage(error_type, message):", "body": "return MatchesAll(MatchesStructure(value=IsInstance(error_type)),After(methodcaller(''), Equals(message)))", "docstring": "Check that a Twisted failure was caused by a certain error type with a\ncertain message.", "id": "f13687:m1"} {"signature": "def matches_time_or_just_before(time, tolerance=timedelta(seconds=)):", "body": "return MatchesAll(GreaterThan(time - tolerance),MatchesAny(LessThan(time), Equals(time)))", "docstring": "Match a time to be equal to a certain time or just before it. Useful when\nchecking for a time that is now +/- some amount of time.", "id": "f13687:m2"} {"signature": "def __init__(self, key, values):", "body": "super(HasHeader, self).__init__(values)self.key = key", "docstring": "Checks for a certain header with certain values in the headers of a\nresponse or request. Note that headers may be specified multiple times\nand that the order of repeated headers is important.\n\n:param str key:\n The header name/key.\n:param list values:\n The list of values for the header.", "id": "f13687:c0:m0"} {"signature": "def match(self, headers):", "body": "if not headers.hasHeader(self.key):headers_content = text_content(repr(dict(headers.getAllRawHeaders())))return Mismatch('' % (self.key,),details={'': headers_content})raw_values = headers.getRawHeaders(self.key)return super(HasHeader, self).match(raw_values)", "docstring": ":param twisted.web.http_headers.Headers headers:\n The response or request headers object.", "id": "f13687:c0:m2"} {"signature": "def read_request_json(request):", "body": "return json.loads(request.content.read().decode(''))", "docstring": "Read the body of a request and decode it as JSON. The counterpart to\n``marathon_acme.server.write_request_json`` but only used in tests.", "id": "f13690:m0"} {"signature": "def IsSseResponse():", "body": "return MatchesStructure(code=Equals(),headers=HasHeader('', ['']))", "docstring": "Match a status code of 200 on a treq.response object and check that a\nheader is set to indicate that the content type is an event stream.", "id": "f13691:m2"} {"signature": "def IsMarathonEvent(event_type, **kwargs):", "body": "matching_dict = {'': Equals(event_type),'': After(_parse_marathon_event_timestamp,matches_time_or_just_before(datetime.utcnow()))}matching_dict.update(kwargs)return MatchesDict(matching_dict)", "docstring": "Match a dict (deserialized from JSON) as a Marathon event. Matches the\nevent type and checks for a recent timestamp.\n\n:param event_type: The event type ('eventType' field value)\n:param kwargs: Any other matchers to apply to the dict", "id": "f13691:m3"} {"signature": "def _parse_marathon_event_timestamp(timestamp):", "body": "return datetime.strptime(timestamp, '')", "docstring": "Parse Marathon's ISO8601-like timestamps into a datetime.", "id": "f13691:m4"} {"signature": "def _parse_field_value(line):", "body": "if line.startswith(''):return None, Noneif '' not in line:return line, ''field, value = line.split('', )value = value[:] if value.startswith('') else valuereturn field, value", "docstring": "Parse the field and value from a line.", "id": "f13695:m0"} {"signature": "def __init__(self, handler, max_length=MAX_LENGTH, timeout=None, reactor=None):", "body": "self._handler = handlerself._max_length = max_lengthself._timeout = timeoutif reactor is None:from twisted.internet import reactor as _reactorreactor = _reactorself._reactor = reactorself._waiting = []self._buffer = b''self._reset_event_data()", "docstring": ":param handler:\n A 2-args callable that will be called back with the event and data\n when a complete message is received.\n:param int max_length:\n The maximum length in bytes of a single line in an SSE event that\n will be accepted.\n:param float timeout:\n Amount of time in seconds to wait for some data to be received\n before timing out. (Default: None - no timeout).\n:param reactor:\n Reactor to use to timeout the connection.", "id": "f13695:c0:m0"} {"signature": "def _abortConnection(self):", "body": "transport = self.transportif isinstance(transport, TransportProxyProducer):transport = transport._producerif hasattr(transport, ''):transport.abortConnection()else:self.log.error(''.format(transport))", "docstring": "We need a way to close the connection when an event line is too long\nor if we time out waiting for an event. This is normally done by\ncalling :meth:`~twisted.internet.interfaces.ITransport.loseConnection``\nor :meth:`~twisted.internet.interfaces.ITCPTransport.abortConnection`,\nbut newer versions of Twisted make this complicated.\n\nDespite what the documentation says for\n:class:`twisted.internet.protocol.Protocol`, the ``transport``\nattribute is not necessarily a\n:class:`twisted.internet.interfaces.ITransport`. Looking at the\ndocumentation for :class:`twisted.internet.interfaces.IProtocol`, the\n``transport`` attribute is actually not defined and neither is the\ntype of the ``transport`` parameter to\n:meth:`~twisted.internet.interfaces.IProtocol.makeConnection`.\n\n``SseProtocol`` will most often be used with HTTP requests initiated\nwith :class:`twisted.web.client.Agent` which, in newer versions of\nTwisted, ends up giving us a\n:class:`twisted.web._newclient.TransportProxyProducer` for our\n``transport``. This is just a\n:class:`twisted.internet.interfaces.IPushProducer` that wraps the\nactual transport. If our transport is one of these, try call\n``abortConnection()`` on the underlying transport.", "id": "f13695:c0:m3"} {"signature": "def when_finished(self):", "body": "d = Deferred()self._waiting.append(d)return d", "docstring": "Get a deferred that will be fired when the connection is closed.", "id": "f13695:c0:m5"} {"signature": "def dataReceived(self, data):", "body": "self.resetTimeout()lines = (self._buffer + data).splitlines()if data.endswith(b'') or data.endswith(b''):self._buffer = b''else:self._buffer = lines.pop(-)for line in lines:if self.transport.disconnecting:returnif len(line) > self._max_length:self.lineLengthExceeded(line)returnelse:self.lineReceived(line)if len(self._buffer) > self._max_length:self.lineLengthExceeded(self._buffer)return", "docstring": "Translates bytes into lines, and calls lineReceived.\n\nCopied from ``twisted.protocols.basic.LineOnlyReceiver`` but using\nstr.splitlines() to split on ``\\r\\n``, ``\\n``, and ``\\r``.", "id": "f13695:c0:m6"} {"signature": "def _handle_field_value(self, field, value):", "body": "if field == '':self._event = valueelif field == '':self._data_lines.append(value)elif field == '':passelif field == '':pass", "docstring": "Handle the field, value pair.", "id": "f13695:c0:m9"} {"signature": "def _dispatch_event(self):", "body": "data = self._prepare_data()if data is not None:self._handler(self._event, data)self._reset_event_data()", "docstring": "Dispatch the event to the handler.", "id": "f13695:c0:m10"} {"signature": "def _prepare_data(self):", "body": "if not self._data_lines:return Nonereturn ''.join(self._data_lines)", "docstring": "Join the data lines into a single string for delivery to the callback.", "id": "f13695:c0:m11"} {"signature": "def main(reactor, argv=sys.argv[:], env=os.environ,acme_url=LETSENCRYPT_DIRECTORY.asText()):", "body": "parser = argparse.ArgumentParser(description='')parser.add_argument('', '',help='''',default=acme_url)parser.add_argument('', '',help='''')parser.add_argument('', '', metavar='',help='''',default='')parser.add_argument('', '', metavar='',help='''',default='')parser.add_argument('', '',help='''',default='')parser.add_argument('',help=(''''''),action='')parser.add_argument('',help='''',default='')parser.add_argument('',help=(''''''''),type=float,default=)parser.add_argument('',help=(''''''),type=float,default=)parser.add_argument('',help='''',choices=['', '', '', '', ''],default=''),parser.add_argument('',help=(''''''),action='')parser.add_argument('', metavar='',help=(''''''''))parser.add_argument('', action='', version=__version__)args = parser.parse_args(argv)init_logging(args.log_level)marathon_addrs = args.marathon.split('')mlb_addrs = args.lb.split('')sse_timeout = args.sse_timeout if args.sse_timeout > else Noneacme_url = URL.fromText(_to_unicode(args.acme))endpoint_description = parse_listen_addr(args.listen)log_args = [('', args.storage_path),('', args.vault),('', acme_url),('', args.email),('', args.allow_multiple_certs),('', marathon_addrs),('', sse_timeout),('', mlb_addrs),('', args.group),('', endpoint_description),]log_args = [''.format(k, v) for k, v in log_args]log.info(''.format(__version__, ''.join(log_args)))if args.vault:key_d, cert_store = init_vault_storage(reactor, env, args.storage_path)else:key_d, cert_store = init_file_storage(args.storage_path)key_d.addCallback(create_txacme_client_creator, reactor, acme_url)key_d.addCallback(create_marathon_acme, cert_store, args.email,args.allow_multiple_certs, marathon_addrs, args.marathon_timeout,sse_timeout, mlb_addrs, args.group, reactor)return key_d.addCallback(lambda ma: ma.run(endpoint_description))", "docstring": "A tool to automatically request, renew and distribute Let's Encrypt\ncertificates for apps running on Marathon and served by marathon-lb.", "id": "f13696:m0"} {"signature": "def parse_listen_addr(listen_addr):", "body": "if '' not in listen_addr:raise ValueError(\"\"'' % (listen_addr,))host, port = listen_addr.rsplit('', )if host == '':protocol = ''interface = Noneelse:if host.startswith('') and host.endswith(''): host = host[:-]ip_address = ipaddress.ip_address(_to_unicode(host))protocol = '' if ip_address.version == else ''interface = str(ip_address)if not port.isdigit() or int(port) < or int(port) > :raise ValueError(\"\" % (port,))args = [protocol, port]kwargs = {'': interface} if interface is not None else {}return _create_tx_endpoints_string(args, kwargs)", "docstring": "Parse an address of the form [ipaddress]:port into a tcp or tcp6 Twisted\nendpoint description string for use with\n``twisted.internet.endpoints.serverFromString``.", "id": "f13696:m2"} {"signature": "def create_marathon_acme(client_creator, cert_store, acme_email, allow_multiple_certs,marathon_addrs, marathon_timeout, sse_timeout, mlb_addrs, group,reactor):", "body": "marathon_client = MarathonClient(marathon_addrs, timeout=marathon_timeout,sse_kwargs={'': sse_timeout},reactor=reactor)marathon_lb_client = MarathonLbClient(mlb_addrs, reactor=reactor)return MarathonAcme(marathon_client,group,cert_store,marathon_lb_client,client_creator,reactor,acme_email,allow_multiple_certs)", "docstring": "Create a marathon-acme instance.\n\n:param client_creator:\n The txacme client creator function.\n:param cert_store:\n The txacme certificate store instance.\n:param acme_email:\n Email address to use when registering with the ACME service.\n:param allow_multiple_certs:\n Whether to allow multiple certificates per app port.\n:param marathon_addr:\n Address for the Marathon instance to find app domains that require\n certificates.\n:param marathon_timeout:\n Amount of time in seconds to wait for response headers to be received\n from Marathon.\n:param sse_timeout:\n Amount of time in seconds to wait for some event data to be received\n from Marathon.\n:param mlb_addrs:\n List of addresses for marathon-lb instances to reload when a new\n certificate is issued.\n:param group:\n The marathon-lb group (``HAPROXY_GROUP``) to consider when finding\n app domains.\n:param reactor: The reactor to use.", "id": "f13696:m4"} {"signature": "def init_storage_dir(storage_dir):", "body": "storage_path = FilePath(storage_dir)default_cert_path = storage_path.child('')if not default_cert_path.exists():default_cert_path.setContent(generate_wildcard_pem_bytes())unmanaged_certs_path = storage_path.child('')if not unmanaged_certs_path.exists():unmanaged_certs_path.createDirectory()certs_path = storage_path.child('')if not certs_path.exists():certs_path.createDirectory()return storage_path, certs_path", "docstring": "Initialise the storage directory with the certificates directory and a\ndefault wildcard self-signed certificate for HAProxy.\n\n:return: the storage path and certs path", "id": "f13696:m5"} {"signature": "def init_logging(log_level):", "body": "log_level_filter = LogLevelFilterPredicate(LogLevel.levelWithName(log_level))log_level_filter.setLogLevelForNamespace('', LogLevel.warn)log_observer = FilteringLogObserver(textFileLogObserver(sys.stdout), [log_level_filter])globalLogPublisher.addObserver(log_observer)", "docstring": "Initialise the logging by adding an observer to the global log publisher.\n\n:param str log_level: The minimum log level to log messages for.", "id": "f13696:m6"} {"signature": "def __init__(self, responder_resource):", "body": "self.responder_resource = responder_resourceself.health_handler = None", "docstring": ":param responder_resource:\n An ``IResponse`` used to respond to ACME HTTP challenge validation\n requests.", "id": "f13697:c0:m0"} {"signature": "def listen(self, reactor, endpoint_description):", "body": "endpoint = serverFromString(reactor, endpoint_description)return endpoint.listen(Site(self.app.resource()))", "docstring": "Run the server, i.e. start listening for requests on the given host and\nport.\n\n:param reactor: The ``IReactorTCP`` to use.\n:param endpoint_description:\n The Twisted description for the endpoint to listen on.\n:return:\n A deferred that returns an object that provides ``IListeningPort``.", "id": "f13697:c0:m1"} {"signature": "@app.route('', branch=True, methods=[''])def acme_challenge(self, request):", "body": "return self.responder_resource", "docstring": "Respond to ACME challenge validation requests on\n``/.well-known/acme-challenge/`` using the ACME responder resource.", "id": "f13697:c0:m2"} {"signature": "@app.route('', methods=[''])def acme_challenge_ping(self, request):", "body": "request.setResponseCode(OK)write_request_json(request, {'': ''})", "docstring": "Respond to requests on ``/.well-known/acme-challenge/ping`` to debug\npath routing issues.", "id": "f13697:c0:m3"} {"signature": "def set_health_handler(self, health_handler):", "body": "self.health_handler = health_handler", "docstring": "Set the handler for the health endpoint.\n\n:param health_handler:\n The handler for health status requests. This must be a callable\n that returns a Health object.", "id": "f13697:c0:m4"} {"signature": "@app.route('', methods=[''])def health(self, request):", "body": "if self.health_handler is None:return self._no_health_handler(request)health = self.health_handler()response_code = OK if health.healthy else SERVICE_UNAVAILABLErequest.setResponseCode(response_code)write_request_json(request, health.json_message)", "docstring": "Listens to incoming health checks from Marathon on ``/health``.", "id": "f13697:c0:m5"} {"signature": "def __init__(self, healthy, json_message={}):", "body": "self.healthy = healthyself.json_message = json_message", "docstring": "Health objects store the current health status of the service.\n\n:param bool healthy:\n The service is either healthy (True) or unhealthy (False).\n:param json_message:\n An object that can be serialized as JSON that will be sent as a\n message when the health status is requested.", "id": "f13697:c1:m0"} {"signature": "def get_single_header(headers, key):", "body": "raw_headers = headers.getRawHeaders(key)if raw_headers is None:return Noneheader, _ = cgi.parse_header(raw_headers[-])return header", "docstring": "Get a single value for the given key out of the given set of headers.\n\n:param twisted.web.http_headers.Headers headers:\n The set of headers in which to look for the header value\n:param str key:\n The header key", "id": "f13698:m0"} {"signature": "def raise_for_status(response):", "body": "http_error_msg = ''if <= response.code < :http_error_msg = '' % (response.code, uridecode(response.request.absoluteURI))elif <= response.code < :http_error_msg = '' % (response.code, uridecode(response.request.absoluteURI))if http_error_msg:raise HTTPError(http_error_msg, response=response)return response", "docstring": "Raises a `requests.exceptions.HTTPError` if the response did not succeed.\nAdapted from the Requests library:\nhttps://github.com/kennethreitz/requests/blob/v2.8.1/requests/models.py#L825-L837", "id": "f13698:m1"} {"signature": "def __init__(self, url=None, client=None, timeout=DEFAULT_TIMEOUT,reactor=None):", "body": "self.url = urlself._timeout = timeoutself._client, self._reactor = default_client(reactor, client)", "docstring": "Create a client with the specified default URL.", "id": "f13698:c0:m0"} {"signature": "def _compose_url(self, url, kwargs):", "body": "if url is None:url = self.urlif url is None:raise ValueError('')split_result = urisplit(url)userinfo = split_result.userinfocompose_kwargs = {}for key in ['', '', '', '', '']:if key in kwargs:compose_kwargs[key] = kwargs.pop(key)else:compose_kwargs[key] = getattr(split_result, key)if '' in kwargs:compose_kwargs[''] = kwargs.pop('')else:compose_kwargs[''] = split_result.queryif '' not in kwargs and userinfo is not None:kwargs[''] = tuple(userinfo.split('', ))return uricompose(**compose_kwargs)", "docstring": "Compose a URL starting with the given URL (or self.url if that URL is\nNone) and using the values in kwargs.\n\n:param str url:\n The base URL to use. If None, ``self.url`` will be used instead.\n:param dict kwargs:\n A dictionary of values to override in the base URL. Relevant keys\n will be popped from the dictionary.", "id": "f13698:c0:m3"} {"signature": "def request(self, method, url=None, **kwargs):", "body": "url = self._compose_url(url, kwargs)kwargs.setdefault('', self._timeout)d = self._client.request(method, url, reactor=self._reactor, **kwargs)d.addCallback(self._log_request_response, method, url, kwargs)d.addErrback(self._log_request_error, url)return d", "docstring": "Perform a request.\n\n:param: method:\n The HTTP method to use (example is `GET`).\n:param: url:\n The URL to use. The default value is the URL this client was\n created with (`self.url`) (example is `http://localhost:8080`)\n:param: kwargs:\n Any other parameters that will be passed to `treq.request`, for\n example headers. Or any URL parameters to override, for example\n path, query or fragment.", "id": "f13698:c0:m4"} {"signature": "def HasRequestProperties(method, url, query=None):", "body": "if query is None:query = {}return MatchesStructure(method=Equals(method.encode('')),path=Equals(url.encode('')),uri=After(lambda u: urisplit(u).getquerydict(), Equals(query)))", "docstring": "Check if a HTTP request object has certain properties.\n\nParses the query dict from the request URI rather than using the request\n\"args\" property as the args do not include query parameters that have no\nvalue.\n\n:param str method:\n The HTTP method.\n:param str url:\n The HTTP URL, without any query parameters. Should already be percent\n encoded.\n:param dict query:\n A dictionary of HTTP query parameters.", "id": "f13702:m0"} {"signature": "def json_response(request, json_data, response_code=):", "body": "request.setResponseCode(response_code)write_request_json(request, json_data)request.finish()", "docstring": "Set the response code, write encoded JSON, and finish() a request.", "id": "f13703:m0"} {"signature": "def get_client(self, client):", "body": "raise NotImplementedError()", "docstring": "To be implemented by subclass", "id": "f13704:c0:m2"} {"signature": "def add_agent(self, location, agent):", "body": "self.agents[location] = agent", "docstring": "Add an agent for URIs with the specified location.\n:param bytes location:\n The URI authority/location (e.g. b'example.com:80')\n:param agent: The twisted.web.iweb.IAgent to use for the location", "id": "f13704:c1:m1"} {"signature": "def __init__(self, endpoints, *args, **kwargs):", "body": "super(MarathonLbClient, self).__init__(*args, **kwargs)self.endpoints = endpoints", "docstring": ":param endpoints:\n The list of marathon-lb endpoints. All marathon-lb endpoints will\n be called at once for any request.", "id": "f13707:c0:m0"} {"signature": "def _request(self, endpoint, *args, **kwargs):", "body": "kwargs[''] = endpointreturn (super(MarathonLbClient, self).request(*args, **kwargs).addCallback(raise_for_status))", "docstring": "Perform a request to a specific endpoint. Raise an error if the status\ncode indicates a client or server error.", "id": "f13707:c0:m2"} {"signature": "def _check_request_results(self, results):", "body": "responses = []failed_endpoints = []for index, result_tuple in enumerate(results):success, result = result_tupleif success:responses.append(result)else:endpoint = self.endpoints[index]self.log.failure('''', result, LogLevel.error, endpoint=endpoint)responses.append(None)failed_endpoints.append(endpoint)if len(failed_endpoints) == len(self.endpoints):raise RuntimeError('')if failed_endpoints:self.log.error('''', x=len(failed_endpoints), y=len(self.endpoints),endpoints=failed_endpoints)return responses", "docstring": "Check the result of each request that we made. If a failure occurred,\nbut some requests succeeded, log and count the failures. If all\nrequests failed, raise an error.\n\n:return:\n The list of responses, with a None value for any requests that\n failed.", "id": "f13707:c0:m3"} {"signature": "def mlb_signal_hup(self):", "body": "return self.request('', path='')", "docstring": "Trigger a SIGHUP signal to be sent to marathon-lb. Causes a full reload\nof the config as though a relevant event was received from Marathon.", "id": "f13707:c0:m4"} {"signature": "def mlb_signal_usr1(self):", "body": "return self.request('', path='')", "docstring": "Trigger a SIGUSR1 signal to be sent to marathon-lb. Causes the existing\nconfig to be reloaded, whether it has changed or not.", "id": "f13707:c0:m5"} {"signature": "def __init__(self, url, token, *args, **kwargs):", "body": "super(VaultClient, self).__init__(*args, url=url, **kwargs)self._token = token", "docstring": ":param url: the URL for Vault\n:param token: the Vault auth token", "id": "f13708:c2:m0"} {"signature": "@classmethoddef from_env(cls, reactor=None, env=os.environ):", "body": "address = env.get('', '')token = env.get('', '')ca_cert = env.get('')tls_server_name = env.get('')client_cert = env.get('')client_key = env.get('')cf = ClientPolicyForHTTPS.from_pem_files(caKey=ca_cert, privateKey=client_key, certKey=client_cert,tls_server_name=tls_server_name)client, reactor = default_client(reactor, contextFactory=cf)return cls(address, token, client=client, reactor=reactor)", "docstring": "Create a Vault client with configuration from the environment. Supports\na limited number of the available config options:\nhttps://www.vaultproject.io/docs/commands/index.html#environment-variables\nhttps://github.com/hashicorp/vault/blob/v0.11.3/api/client.go#L28-L40\n\nSupported:\n- ``VAULT_ADDR``\n- ``VAULT_CACERT``\n- ``VAULT_CLIENT_CERT``\n- ``VAULT_CLIENT_KEY``\n- ``VAULT_TLS_SERVER_NAME``\n- ``VAULT_TOKEN``\n\nNot currently supported:\n- ``VAULT_CAPATH``\n- ``VAULT_CLIENT_TIMEOUT``\n- ``VAULT_MAX_RETRIES``\n- ``VAULT_MFA``\n- ``VAULT_RATE_LIMIT``\n- ``VAULT_SKIP_VERIFY``\n- ``VAULT_WRAP_TTL``", "id": "f13708:c2:m1"} {"signature": "def read(self, path, **params):", "body": "d = self.request('', '' + path, params=params)return d.addCallback(self._handle_response)", "docstring": "Read data from Vault. Returns the JSON-decoded response.", "id": "f13708:c2:m5"} {"signature": "def write(self, path, **data):", "body": "d = self.request('', '' + path, json=data)return d.addCallback(self._handle_response, check_cas=True)", "docstring": "Write data to Vault. Returns the JSON-decoded response.", "id": "f13708:c2:m6"} {"signature": "def read_kv2(self, path, version=None, mount_path=''):", "body": "params = {}if version is not None:params[''] = versionread_path = ''.format(mount_path, path)return self.read(read_path, **params)", "docstring": "Read some data from a key/value version 2 secret engine.", "id": "f13708:c2:m7"} {"signature": "def create_or_update_kv2(self, path, data, cas=None, mount_path=''):", "body": "params = {'': {},'': data}if cas is not None:params[''][''] = caswrite_path = ''.format(mount_path, path)return self.write(write_path, **params)", "docstring": "Create or update some data in a key/value version 2 secret engine.\n\n:raises CasError:\n Raises an error if the ``cas`` value, when provided, doesn't match\n Vault's version for the key.", "id": "f13708:c2:m8"} {"signature": "def raise_for_not_ok_status(response):", "body": "if response.code != OK:raise HTTPError('' % (response.code, uridecode(response.request.absoluteURI)))return response", "docstring": "Raises a `requests.exceptions.HTTPError` if the response has a non-200\nstatus code.", "id": "f13710:m0"} {"signature": "def _sse_content_with_protocol(response, handler, **sse_kwargs):", "body": "protocol = SseProtocol(handler, **sse_kwargs)finished = protocol.when_finished()response.deliverBody(protocol)return finished, protocol", "docstring": "Sometimes we need the protocol object so that we can manipulate the\nunderlying transport in tests.", "id": "f13710:m1"} {"signature": "def sse_content(response, handler, **sse_kwargs):", "body": "raise_for_not_ok_status(response)raise_for_header(response, '', '')finished, _ = _sse_content_with_protocol(response, handler, **sse_kwargs)return finished", "docstring": "Callback to collect the Server-Sent Events content of a response. Callbacks\npassed will receive event data.\n\n:param response:\n The response from the SSE request.\n:param handler:\n The handler for the SSE protocol.", "id": "f13710:m2"} {"signature": "def __init__(self, endpoints, sse_kwargs=None, **kwargs):", "body": "super(MarathonClient, self).__init__(**kwargs)self.endpoints = endpointsself._sse_kwargs = {} if sse_kwargs is None else sse_kwargs", "docstring": ":param endpoints:\n A priority-ordered list of Marathon endpoints. Each endpoint will\n be tried one-by-one until the request succeeds or all endpoints\n fail.", "id": "f13710:c0:m0"} {"signature": "def _request(self, failure, endpoints, *args, **kwargs):", "body": "if not endpoints:return failureendpoint = endpoints.pop()d = super(MarathonClient, self).request(*args, url=endpoint, **kwargs)d.addErrback(self._request, endpoints, *args, **kwargs)return d", "docstring": "Recursively make requests to each endpoint in ``endpoints``.", "id": "f13710:c0:m2"} {"signature": "def get_json_field(self, field, **kwargs):", "body": "d = self.request('', headers={'': ''}, **kwargs)d.addCallback(raise_for_status)d.addCallback(raise_for_header, '', '')d.addCallback(json_content)d.addCallback(self._get_json_field, field)return d", "docstring": "Perform a GET request and get the contents of the JSON response.\n\nMarathon's JSON responses tend to contain an object with a single key\nwhich points to the actual data of the response. For example /v2/apps\nreturns something like {\"apps\": [ {\"app1\"}, {\"app2\"} ]}. We're\ninterested in the contents of \"apps\".\n\nThis method will raise an error if:\n* There is an error response code\n* The field with the given name cannot be found", "id": "f13710:c0:m4"} {"signature": "def _get_json_field(self, response_json, field_name):", "body": "if field_name not in response_json:raise KeyError('''' % (field_name, json.dumps(response_json),))return response_json[field_name]", "docstring": "Get a JSON field from the response JSON.\n\n:param: response_json:\n The parsed JSON content of the response.\n:param: field_name:\n The name of the field in the JSON to get.", "id": "f13710:c0:m5"} {"signature": "def get_apps(self):", "body": "return self.get_json_field('', path='')", "docstring": "Get the currently running Marathon apps, returning a list of app\ndefinitions.", "id": "f13710:c0:m6"} {"signature": "def get_events(self, callbacks):", "body": "d = self.request('', path='', unbuffered=True,params={'': sorted(callbacks.keys())},headers={'': '','': ''})def handler(event, data):callback = callbacks.get(event)if callback is not None:callback(json.loads(data))return d.addCallback(sse_content, handler, reactor=self._reactor, **self._sse_kwargs)", "docstring": "Attach to Marathon's event stream using Server-Sent Events (SSE).\n\n:param callbacks:\n A dict mapping event types to functions that handle the event data", "id": "f13710:c0:m7"} {"signature": "@classmethoddef from_pem_files(cls, caKey=None, privateKey=None, certKey=None,tls_server_name=None):", "body": "trust_root, client_certificate = None, Noneif caKey:trust_root = ssl.Certificate.loadPEM(FilePath(caKey).getContent())if privateKey and certKey:certPEM = FilePath(certKey).getContent()keyPEM = FilePath(privateKey).getContent()client_certificate = (ssl.PrivateCertificate.loadPEM(certPEM + b'' + keyPEM))return cls(trustRoot=trust_root, clientCertificate=client_certificate,tls_server_name=tls_server_name)", "docstring": "Load certificates from PEM files to create a ClientPolicyForHTTPS\ninstance.\n\n:param caKey:\n Path to the CA certificate file. If not provided, the system trust\n chain will be used.\n:param privateKey:\n Path to the client private key file. If either this or certKey are\n not provided, a client-side certificate will not be used.\n:param certKey:\n Path to the client certificate file. If either this or privateKey\n are not provided, a client-side certificate will not be used.", "id": "f13711:c0:m1"} {"signature": "def sort_pem_objects(pem_objects):", "body": "keys, certs, ca_certs = [], [], []for pem_object in pem_objects:if isinstance(pem_object, pem.Key):keys.append(pem_object)else:if _is_ca(pem_object):ca_certs.append(pem_object)else:certs.append(pem_object)[key], [cert] = keys, certsreturn key, cert, ca_certs", "docstring": "Given a list of pem objects, sort the objects into the private key, leaf\ncertificate, and list of CA certificates in the trust chain. This function\nassumes that the list of pem objects will contain exactly one private key\nand exactly one leaf certificate and that only key and certificate type\nobjects are provided.", "id": "f13712:m0"} {"signature": "def _cert_data_to_pem_objects(cert_data):", "body": "pem_objects = []for key in ['', '', '']:pem_objects.extend(pem.parse(cert_data[key].encode('')))return pem_objects", "docstring": "Given a non-None response from the Vault key/value store, convert the\nkey/values into a list of PEM objects.", "id": "f13712:m3"} {"signature": "def store(self, server_name, pem_objects):", "body": "key, cert, ca_certs = sort_pem_objects(pem_objects)data = _cert_data_from_pem_objects(key, cert, ca_certs)self.log.debug(\"\",server_name=server_name)d = self._client.create_or_update_kv2('' + server_name, data, mount_path=self._mount_path)def live_value(cert_response):cert_version = cert_response['']['']return _live_value(cert, cert_version)d.addCallback(live_value)return d.addCallback(self._update_live, server_name)", "docstring": "The procedure for storing certificates is as follows:\n\n1. The new certificate is stored without a CAS parameter. This assumes\n that the certificate we are storing is always up-to-date.\n 1.1 From Vault's response, take the new certificate version:\n ``v_cert_new``.\n2. The live map is read.\n 2.1 The version of the live map is kept: ``v_live``\n 2.2 Check if the certificate version in the live map is\n ``>= v_cert_new``.\n 2.2.1 If so, assume somebody else updated the live map. Finish.\n 2.2.2 If not, continue.\n3. Update the live map and write it with ``cas=v_live``.\n 3.1 If the CAS fails, go back to step 2.", "id": "f13712:c0:m2"} {"signature": "def maybe_key(pem_path):", "body": "acme_key_file = pem_path.child(u'')if acme_key_file.exists():key = _load_pem_private_key_bytes(acme_key_file.getContent())else:key = generate_private_key(u'')acme_key_file.setContent(_dump_pem_private_key_bytes(key))return succeed(JWKRSA(key=key))", "docstring": "Set up a client key if one does not exist already.\n\nhttps://gist.github.com/glyph/27867a478bb71d8b6046fbfb176e1a33#file-local-certs-py-L32-L50\n\n:type pem_path: twisted.python.filepath.FilePath\n:param pem_path:\n The path to the certificate directory to use.\n:rtype: twisted.internet.defer.Deferred", "id": "f13714:m2"} {"signature": "def maybe_key_vault(client, mount_path):", "body": "d = client.read_kv2('', mount_path=mount_path)def get_or_create_key(client_key):if client_key is not None:key_data = client_key['']['']key = _load_pem_private_key_bytes(key_data[''].encode(''))return JWKRSA(key=key)else:key = generate_private_key(u'')key_data = {'': _dump_pem_private_key_bytes(key).decode('')}d = client.create_or_update_kv2('', key_data, mount_path=mount_path)return d.addCallback(lambda _result: JWKRSA(key=key))return d.addCallback(get_or_create_key)", "docstring": "Set up a client key in Vault if one does not exist already.\n\n:param client:\n The Vault API client to use.\n:param mount_path:\n The Vault key/value mount path to use.\n:rtype: twisted.internet.defer.Deferred", "id": "f13714:m3"} {"signature": "def create_txacme_client_creator(key, reactor, url, alg=RS256):", "body": "jws_client = JWSClient(HTTPClient(agent=Agent(reactor)), key, alg)return partial(txacme_Client.from_url, reactor, url, key, alg, jws_client)", "docstring": "Create a creator for txacme clients to provide to the txacme service. See\n``txacme.client.Client.from_url()``. We create the underlying JWSClient\nwith a non-persistent pool to avoid\nhttps://github.com/mithrandi/txacme/issues/86.\n\n:return: a callable that returns a deffered that returns the client", "id": "f13714:m4"} {"signature": "def generate_wildcard_pem_bytes():", "body": "key = generate_private_key(u'')name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, u'')])cert = (x509.CertificateBuilder().issuer_name(name).subject_name(name).not_valid_before(datetime.today() - timedelta(days=)).not_valid_after(datetime.now() + timedelta(days=)).serial_number(int(uuid.uuid4())).public_key(key.public_key()).sign(private_key=key,algorithm=hashes.SHA256(),backend=default_backend()))return b''.join((key.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption()),cert.public_bytes(serialization.Encoding.PEM)))", "docstring": "Generate a wildcard (subject name '*') self-signed certificate valid for\n10 years.\n\nhttps://cryptography.io/en/latest/x509/tutorial/#creating-a-self-signed-certificate\n\n:return: Bytes representation of the PEM certificate data", "id": "f13714:m5"} {"signature": "def parse_domain_label(domain_label):", "body": "return domain_label.replace('', '').split()", "docstring": "Parse the list of comma-separated domains from the app label.", "id": "f13715:m0"} {"signature": "def __init__(self, marathon_client, group, cert_store, mlb_client,txacme_client_creator, reactor, email=None,allow_multiple_certs=False):", "body": "self.marathon_client = marathon_clientself.group = groupself.reactor = reactorresponder = HTTP01Responder()self.server = MarathonAcmeServer(responder.resource)mlb_cert_store = MlbCertificateStore(cert_store, mlb_client)self.txacme_service = AcmeIssuingService(mlb_cert_store, txacme_client_creator, reactor, [responder], email)self._allow_multiple_certs = allow_multiple_certsself._server_listening = None", "docstring": "Create the marathon-acme service.\n\n:param marathon_client: The Marathon API client.\n:param group: The name of the marathon-lb group.\n:param cert_store: The ``ICertificateStore`` instance to use.\n:param mlb_clinet: The marathon-lb API client.\n:param txacme_client_creator: Callable to create the txacme client.\n:param reactor: The reactor to use.\n:param email: The ACME registration email.\n:param allow_multiple_certs:\n Whether to allow multiple certificates per app port.", "id": "f13715:c0:m0"} {"signature": "def listen_events(self, reconnects=):", "body": "self.log.info('')self._attached = Falsedef on_finished(result, reconnects):self.log.warn('''',reconnects=reconnects)reconnects += return self.listen_events(reconnects)def log_failure(failure):self.log.failure('', failure)return failurereturn self.marathon_client.get_events({'': self._sync_on_event_stream_attached,'': self._sync_on_api_post_event}).addCallbacks(on_finished, log_failure, callbackArgs=[reconnects])", "docstring": "Start listening for events from Marathon, running a sync when we first\nsuccessfully subscribe and triggering a sync on API request events.", "id": "f13715:c0:m3"} {"signature": "def sync(self):", "body": "self.log.info('')def log_success(result):self.log.info('')return resultdef log_failure(failure):self.log.failure('', failure, LogLevel.error)return failurereturn (self.marathon_client.get_apps().addCallback(self._apps_acme_domains).addCallback(self._filter_new_domains).addCallback(self._issue_certs).addCallbacks(log_success, log_failure))", "docstring": "Fetch the list of apps from Marathon, find the domains that require\ncertificates, and issue certificates for any domains that don't already\nhave a certificate.", "id": "f13715:c0:m6"} {"signature": "def _issue_cert(self, domain):", "body": "def errback(failure):failure.trap(txacme_ServerError)acme_error = failure.value.messageif acme_error.code in ['', '','', '']:self.log.error('''', code=acme_error.code, domain=domain,detail=acme_error.detail)else:return failured = self.txacme_service.issue_cert(domain)return d.addErrback(errback)", "docstring": "Issue a certificate for the given domain.", "id": "f13715:c0:m11"} {"signature": "def print_file_info():", "body": "tpl = TableLogger(columns='')for f in os.listdir(''):size = os.stat(f).st_sizedate_created = datetime.fromtimestamp(os.path.getctime(f))date_modified = datetime.fromtimestamp(os.path.getmtime(f))tpl(f, date_created, date_modified, size)", "docstring": "Prints file details in the current directory", "id": "f13717:m2"} {"signature": "def __call__(self, value):", "body": "fmt = self.fmt(value)if len(fmt) > self.col_width:fmt = fmt[:self.col_width - ] + ''fmt = self.just(fmt, self.col_width)return fmt", "docstring": "Formats a given value\n\nArgs:\n value: value to format\n\nReturns:\n str: formatted value", "id": "f13718:c0:m1"} {"signature": "def __call__(self, *args):", "body": "if len(self.formatters) == :self.setup(*args)row_cells = []if self.rownum:row_cells.append()if self.timestamp:row_cells.append(datetime.datetime.now())if self.time_diff:row_cells.append()row_cells.extend(args)if len(row_cells) != len(self.formatters):raise ValueError(''.format(len(self.formatters), len(row_cells)))line = self.format_row(*row_cells)self.print_line(line)", "docstring": "Prints a formatted row\n\n Args:\n args: row cells", "id": "f13719:c0:m1"} {"signature": "def setup_formatters(self, *args):", "body": "formatters = []col_offset = if self.rownum:formatters.append(fmt.RowNumberFormatter.setup())col_offset += if self.timestamp:formatters.append(fmt.DatetimeFormatter.setup(datetime.datetime.now(),fmt=''.format,col_width=))col_offset += if self.time_diff:formatters.append(fmt.TimeDeltaFormatter.setup())col_offset += for coli, value in enumerate(args):fmt_class = type2fmt.get(type(value), fmt.GenericFormatter)kwargs = {}if self.default_colwidth is not None:kwargs[''] = self.default_colwidthif coli in self.column_widths:kwargs[''] = self.column_widths[coli]elif self.columns and self.columns[coli + col_offset] in self.column_widths:kwargs[''] = self.column_widths[self.columns[coli + col_offset]]if fmt_class == fmt.FloatFormatter and self.float_format is not None:kwargs[''] = self.float_formatif coli in self.column_formatters:kwargs[''] = self.column_formatters[coli]elif self.columns and self.columns[coli + col_offset] in self.column_formatters:kwargs[''] = self.column_formatters[self.columns[coli + col_offset]]formatter = fmt_class.setup(value, **kwargs)formatters.append(formatter)self.formatters = formatters", "docstring": "Setup formatters by observing the first row.\n\n Args:\n *args: row cells", "id": "f13719:c0:m4"} {"signature": "def setup(self, *args):", "body": "self.setup_formatters(*args)if self.columns:self.print_header()elif self.border and not self.csv:self.print_line(self.make_horizontal_border())", "docstring": "Do preparations before printing the first row\n\n Args:\n *args: first row cells", "id": "f13719:c0:m5"} {"signature": "def csv_format(self, row):", "body": "if PY2:buf = io.BytesIO()csvwriter = csv.writer(buf)csvwriter.writerow([c.strip().encode(self.encoding) for c in row])csv_line = buf.getvalue().decode(self.encoding).rstrip()else:buf = io.StringIO()csvwriter = csv.writer(buf)csvwriter.writerow([c.strip() for c in row])csv_line = buf.getvalue().rstrip()return csv_line", "docstring": "Converts row values into a csv line\n\n Args:\n row: a list of row cells as unicode\n Returns:\n csv_line (unicode)", "id": "f13719:c0:m10"} {"signature": "def close(self):", "body": "if self.file is not None:self.file.close()", "docstring": "Closes underlying output file", "id": "f13719:c0:m11"} {"signature": "def strToBool(val):", "body": "if isinstance(val, str):val = val.lower()return val in ['', '', '', True]", "docstring": "Helper function to turn a string representation of \"true\" into\nboolean True.", "id": "f13724:m1"} {"signature": "def get_page_url(page_num, current_app, url_view_name, url_extra_args, url_extra_kwargs, url_param_name, url_get_params, url_anchor):", "body": "if url_view_name is not None:url_extra_kwargs[url_param_name] = page_numtry:url = reverse(url_view_name, args=url_extra_args, kwargs=url_extra_kwargs, current_app=current_app)except NoReverseMatch as e: if settings.SETTINGS_MODULE:if django.VERSION < (, , ):separator = ''else:separator = '' project_name = settings.SETTINGS_MODULE.split('')[]try:url = reverse(project_name + separator + url_view_name, args=url_extra_args, kwargs=url_extra_kwargs, current_app=current_app)except NoReverseMatch:raise e else:raise e else:url = ''url_get_params = url_get_params or QueryDict(url)url_get_params = url_get_params.copy()url_get_params[url_param_name] = str(page_num)if len(url_get_params) > :if not isinstance(url_get_params, QueryDict):tmp = QueryDict(mutable=True)tmp.update(url_get_params)url_get_params = tmpurl += '' + url_get_params.urlencode()if (url_anchor is not None):url += '' + url_anchorreturn url", "docstring": "Helper function to return a valid URL string given the template tag parameters", "id": "f13724:m2"} {"signature": "@register.tagdef bootstrap_paginate(parser, token):", "body": "bits = token.split_contents()if len(bits) < :raise TemplateSyntaxError(\"\"\"\" % bits[])page = parser.compile_filter(bits[])kwargs = {}bits = bits[:]kwarg_re = re.compile(r'')if len(bits):for bit in bits:match = kwarg_re.match(bit)if not match:raise TemplateSyntaxError(\"\")name, value = match.groups()kwargs[name] = parser.compile_filter(value)return BootstrapPaginationNode(page, kwargs)", "docstring": "Renders a Page object as a Twitter Bootstrap styled pagination bar.\nCompatible with Bootstrap 3.x and 4.x only.\n\nExample::\n\n {% bootstrap_paginate page_obj range=10 %}\n\n\nNamed Parameters::\n\n range - The size of the pagination bar (ie, if set to 10 then, at most,\n 10 page numbers will display at any given time) Defaults to\n None, which shows all pages.\n\n\n size - Accepts \"small\", and \"large\". Defaults to\n None which is the standard size.\n\n show_prev_next - Accepts \"true\" or \"false\". Determines whether or not\n to show the previous and next page links. Defaults to\n \"true\"\n\n\n show_first_last - Accepts \"true\" or \"false\". Determines whether or not\n to show the first and last page links. Defaults to\n \"false\"\n\n previous_label - The text to display for the previous page link.\n Defaults to \"←\"\n\n next_label - The text to display for the next page link. Defaults to\n \"→\"\n\n first_label - The text to display for the first page link. Defaults to\n \"«\"\n\n last_label - The text to display for the last page link. Defaults to\n \"»\"\n\n url_view_name - The named URL to use. Defaults to None. If None, then the\n default template simply appends the url parameter as a\n relative URL link, eg: 1\n\n url_param_name - The name of the parameter to use in the URL. If\n url_view_name is set to None, this string is used as the\n parameter name in the relative URL path. If a URL\n name is specified, this string is used as the\n parameter name passed into the reverse() method for\n the URL.\n\n url_extra_args - This is used only in conjunction with url_view_name.\n When referencing a URL, additional arguments may be\n passed in as a list.\n\n url_extra_kwargs - This is used only in conjunction with url_view_name.\n When referencing a URL, additional named arguments\n may be passed in as a dictionary.\n\n url_get_params - The other get parameters to pass, only the page\n number will be overwritten. Use this to preserve\n filters.\n\n url_anchor - The anchor to use in URLs. Defaults to None.\n\n extra_pagination_classes - A space separated list of CSS class names\n that will be added to the top level
      \n HTML element. In particular, this can be\n utilized in Bootstrap 4 installatinos to\n add the appropriate alignment classes from\n Flexbox utilites, eg: justify-content-center", "id": "f13724:m3"} {"signature": "@register.tagdef bootstrap_pager(parser, token):", "body": "bits = token.split_contents()if len(bits) < :raise TemplateSyntaxError(\"\"\"\" % bits[])page = parser.compile_filter(bits[])kwargs = {}bits = bits[:]kwarg_re = re.compile(r'')if len(bits):for bit in bits:match = kwarg_re.match(bit)if not match:raise TemplateSyntaxError(\"\")name, value = match.groups()kwargs[name] = parser.compile_filter(value)return BootstrapPagerNode(page, kwargs)", "docstring": "Renders a Page object as a Twitter Bootstrap styled pager bar.\nCompatible with Bootstrap 2.x and 3.x only.\n\nExample::\n\n {% bootstrap_pager page_obj %}\n\n\nNamed Parameters::\n\n\n previous_label - The label to show for the Previous link (defaults to \"Previous Page\")\n\n next_label - The label to show for the Next link (defualts to \"Next Page\")\n\n previous_title - The link title for the previous link (defaults to \"Previous Page\")\n\n next_title - The link title for the next link (defaults to \"Next Page\")\n\n url_view_name - The named URL to use. Defaults to None. If None, then the\n default template simply appends the url parameter as a\n relative URL link, eg: 1\n\n url_param_name - The name of the parameter to use in the URL. If\n url_view_name is set to None, this string is used as the\n parameter name in the relative URL path. If a URL\n name is specified, this string is used as the\n parameter name passed into the reverse() method for\n the URL.\n\n url_extra_args - This is used only in conjunction with url_view_name.\n When referencing a URL, additional arguments may be\n passed in as a list.\n\n url_extra_kwargs - This is used only in conjunction with url_view_name.\n When referencing a URL, additional named arguments\n may be passed in as a dictionary.\n\n url_get_params - The other get parameters to pass, only the page\n number will be overwritten. Use this to preserve\n filters.\n\n url_anchor - The anchor to use in URLs. Defaults to None.\n\n extra_pager_classes - A space separated list of CSS class names\n that will be added to the top level
        \n HTML element. This could be used to,\n as an example, add a class to prevent\n the pager from showing up when printing.", "id": "f13724:m4"} {"signature": "def assemble_one(asmcode, pc=, fork=DEFAULT_FORK):", "body": "try:instruction_table = instruction_tables[fork]asmcode = asmcode.strip().split('')instr = instruction_table[asmcode[].upper()]if pc:instr.pc = pcif instr.operand_size > :assert len(asmcode) == instr.operand = int(asmcode[], )return instrexcept:raise AssembleError(\"\" % pc)", "docstring": "Assemble one EVM instruction from its textual representation.\n\n :param asmcode: assembly code for one instruction\n :type asmcode: str\n :param pc: program counter of the instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: An Instruction object\n :rtype: Instruction\n\n Example use::\n\n >>> print assemble_one('LT')", "id": "f13733:m0"} {"signature": "def assemble_all(asmcode, pc=, fork=DEFAULT_FORK):", "body": "asmcode = asmcode.split('')asmcode = iter(asmcode)for line in asmcode:if not line.strip():continueinstr = assemble_one(line, pc=pc, fork=fork)yield instrpc += instr.size", "docstring": "Assemble a sequence of textual representation of EVM instructions\n\n :param asmcode: assembly code for any number of instructions\n :type asmcode: str\n :param pc: program counter of the first instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: An generator of Instruction objects\n :rtype: generator[Instructions]\n\n Example use::\n\n >>> assemble_one('''PUSH1 0x60\\n \\\n PUSH1 0x40\\n \\\n MSTORE\\n \\\n PUSH1 0x2\\n \\\n PUSH2 0x108\\n \\\n PUSH1 0x0\\n \\\n POP\\n \\\n SSTORE\\n \\\n PUSH1 0x40\\n \\\n MLOAD\\n \\\n ''')", "id": "f13733:m1"} {"signature": "def disassemble_one(bytecode, pc=, fork=DEFAULT_FORK):", "body": "instruction_table = instruction_tables[fork]if isinstance(bytecode, bytes):bytecode = bytearray(bytecode)if isinstance(bytecode, str):bytecode = bytearray(bytecode.encode(''))bytecode = iter(bytecode)try:opcode = next(bytecode)except StopIteration:returnassert isinstance(opcode, int)instruction = copy.copy(instruction_table.get(opcode, None))if instruction is None:instruction = Instruction(opcode, '', , , , , '')instruction.pc = pctry:if instruction.has_operand:instruction.parse_operand(bytecode)except ParseError:instruction = Nonefinally:return instruction", "docstring": "Disassemble a single instruction from a bytecode\n\n :param bytecode: the bytecode stream\n :type bytecode: str | bytes | bytearray | iterator\n :param pc: program counter of the instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: an Instruction object\n :rtype: Instruction\n\n Example use::\n\n >>> print disassemble_one('\\x60\\x10')", "id": "f13733:m2"} {"signature": "def disassemble_all(bytecode, pc=, fork=DEFAULT_FORK):", "body": "if isinstance(bytecode, bytes):bytecode = bytearray(bytecode)if isinstance(bytecode, str):bytecode = bytearray(bytecode.encode(''))bytecode = iter(bytecode)while True:instr = disassemble_one(bytecode, pc=pc, fork=fork)if not instr:returnpc += instr.sizeyield instr", "docstring": "Disassemble all instructions in bytecode\n\n :param bytecode: an evm bytecode (binary)\n :type bytecode: str | bytes | bytearray | iterator\n :param pc: program counter of the first instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: An generator of Instruction objects\n :rtype: list[Instruction]\n\n Example use::\n\n >>> for inst in disassemble_all(bytecode):\n ... print(instr)\n\n ...\n PUSH1 0x60\n PUSH1 0x40\n MSTORE\n PUSH1 0x2\n PUSH2 0x108\n PUSH1 0x0\n POP\n SSTORE\n PUSH1 0x40\n MLOAD", "id": "f13733:m3"} {"signature": "def disassemble(bytecode, pc=, fork=DEFAULT_FORK):", "body": "return ''.join(map(str, disassemble_all(bytecode, pc=pc, fork=fork)))", "docstring": "Disassemble an EVM bytecode\n\n :param bytecode: binary representation of an evm bytecode\n :type bytecode: str | bytes | bytearray\n :param pc: program counter of the first instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: the text representation of the assembler code\n\n Example use::\n\n >>> disassemble(\"\\x60\\x60\\x60\\x40\\x52\\x60\\x02\\x61\\x01\\x00\")\n ...\n PUSH1 0x60\n BLOCKHASH\n MSTORE\n PUSH1 0x2\n PUSH2 0x100", "id": "f13733:m4"} {"signature": "def assemble(asmcode, pc=, fork=DEFAULT_FORK):", "body": "return b''.join(x.bytes for x in assemble_all(asmcode, pc=pc, fork=fork))", "docstring": "Assemble an EVM program\n\n :param asmcode: an evm assembler program\n :type asmcode: str\n :param pc: program counter of the first instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: the hex representation of the bytecode\n :rtype: str\n\n Example use::\n\n >>> assemble('''PUSH1 0x60\\n \\\n BLOCKHASH\\n \\\n MSTORE\\n \\\n PUSH1 0x2\\n \\\n PUSH2 0x100\\n \\\n ''')\n ...\n b\"\\x60\\x60\\x60\\x40\\x52\\x60\\x02\\x61\\x01\\x00\"", "id": "f13733:m5"} {"signature": "def disassemble_hex(bytecode, pc=, fork=DEFAULT_FORK):", "body": "if bytecode.startswith(''):bytecode = bytecode[:]bytecode = unhexlify(bytecode)return disassemble(bytecode, pc=pc, fork=fork)", "docstring": "Disassemble an EVM bytecode\n\n :param bytecode: canonical representation of an evm bytecode (hexadecimal)\n :type bytecode: str\n :param pc: program counter of the first instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: the text representation of the assembler code\n :rtype: str\n\n Example use::\n\n >>> disassemble_hex(\"0x6060604052600261010\")\n ...\n PUSH1 0x60\n BLOCKHASH\n MSTORE\n PUSH1 0x2\n PUSH2 0x100", "id": "f13733:m6"} {"signature": "def assemble_hex(asmcode, pc=, fork=DEFAULT_FORK):", "body": "if isinstance(asmcode, list):return '' + hexlify(b''.join([x.bytes for x in asmcode])).decode('')return '' + hexlify(assemble(asmcode, pc=pc, fork=fork)).decode('')", "docstring": "Assemble an EVM program\n\n :param asmcode: an evm assembler program\n :type asmcode: str | iterator[Instruction]\n :param pc: program counter of the first instruction(optional)\n :type pc: int\n :param fork: fork name (optional)\n :type fork: str\n :return: the hex representation of the bytecode\n :rtype: str\n\n Example use::\n\n >>> assemble_hex('''PUSH1 0x60\\n \\\n BLOCKHASH\\n \\\n MSTORE\\n \\\n PUSH1 0x2\\n \\\n PUSH2 0x100\\n \\\n ''')\n ...\n \"0x6060604052600261010\"", "id": "f13733:m7"} {"signature": "def block_to_fork(block_number):", "body": "forks_by_block = {: \"\",: \"\",: \"\",: \"\",: \"\",: \"\",: \"\" }fork_names = list(forks_by_block.values())fork_blocks = list(forks_by_block.keys())return fork_names[bisect(fork_blocks, block_number) - ]", "docstring": "Convert block number to fork name.\n\n :param block_number: block number\n :type block_number: int\n :return: fork name\n :rtype: str\n\n Example use::\n\n >>> block_to_fork(0)\n ...\n \"frontier\"\n >>> block_to_fork(4370000)\n ...\n \"byzantium\"\n >>> block_to_fork(4370001)\n ...\n \"byzantium\"", "id": "f13733:m8"} {"signature": "def __init__(self, opcode, name, operand_size, pops, pushes, fee, description, operand=None, pc=):", "body": "self._opcode = opcodeself._name = nameself._operand_size = operand_sizeself._pops = popsself._pushes = pushesself._fee = feeself._description = descriptionself._operand = operand self._pc = pc", "docstring": "This represents an EVM instruction.\nEVMAsm will create this for you.\n\n:param opcode: the opcode value\n:param name: instruction name\n:param operand_size: immediate operand size in bytes\n:param pops: number of items popped from the stack\n:param pushes: number of items pushed into the stack\n:param fee: gas fee for the instruction\n:param description: textual description of the instruction\n:param operand: optional immediate operand\n:param pc: optional program counter of this instruction in the program\n\nExample use::\n\n >>> instruction = assemble_one('PUSH1 0x10')\n >>> print('Instruction: %s'% instruction)\n >>> print('\\tdescription:', instruction.description)\n >>> print('\\tgroup:', instruction.group)\n >>> print('\\tpc:', instruction.pc)\n >>> print('\\tsize:', instruction.size)\n >>> print('\\thas_operand:', instruction.has_operand)\n >>> print('\\toperand_size:', instruction.operand_size)\n >>> print('\\toperand:', instruction.operand)\n >>> print('\\tsemantics:', instruction.semantics)\n >>> print('\\tpops:', instruction.pops)\n >>> print('\\tpushes:', instruction.pushes)\n >>> print('\\tbytes:', '0x'+instruction.bytes.encode('hex'))\n >>> print('\\twrites to stack:', instruction.writes_to_stack)\n >>> print('\\treads from stack:', instruction.reads_from_stack)\n >>> print('\\twrites to memory:', instruction.writes_to_memory)\n >>> print('\\treads from memory:', instruction.reads_from_memory)\n >>> print('\\twrites to storage:', instruction.writes_to_storage)\n >>> print('\\treads from storage:', instruction.reads_from_storage)\n >>> print('\\tis terminator', instruction.is_terminator)", "id": "f13733:c4:m0"} {"signature": "def __eq__(self, other):", "body": "return self._opcode == other._opcode andself._name == other._name andself._operand == other._operand andself._operand_size == other._operand_size andself._pops == other._pops andself._pushes == other._pushes andself._fee == other._fee andself._pc == other._pc andself._description == other._description", "docstring": "Instructions are equal if all features match", "id": "f13733:c4:m1"} {"signature": "@propertydef opcode(self):", "body": "return self._opcode", "docstring": "The opcode as an integer", "id": "f13733:c4:m4"} {"signature": "@propertydef mnemonic(self):", "body": "return self.name", "docstring": "Alias for name", "id": "f13733:c4:m5"} {"signature": "@propertydef name(self):", "body": "if self._name == '':return '' % self.operand_sizeelif self._name == '':return '' % self.popselif self._name == '':return '' % (self.pops - )elif self._name == '':return '' % (self.pops - )return self._name", "docstring": "The instruction name/mnemonic", "id": "f13733:c4:m6"} {"signature": "def parse_operand(self, buf):", "body": "buf = iter(buf)try:operand = for _ in range(self.operand_size):operand <<= operand |= next(buf)self._operand = operandexcept StopIteration:raise ParseError(\"\")", "docstring": "Parses an operand from buf\n\n :param buf: a buffer\n :type buf: iterator/generator/string", "id": "f13733:c4:m7"} {"signature": "@propertydef operand_size(self):", "body": "return self._operand_size", "docstring": "The immediate operand size", "id": "f13733:c4:m8"} {"signature": "@propertydef has_operand(self):", "body": "return self.operand_size > ", "docstring": "True if the instruction uses an immediate operand", "id": "f13733:c4:m9"} {"signature": "@propertydef pops(self):", "body": "return self._pops", "docstring": "Number words popped from the stack", "id": "f13733:c4:m12"} {"signature": "@propertydef pushes(self):", "body": "return self._pushes", "docstring": "Number words pushed to the stack", "id": "f13733:c4:m13"} {"signature": "@propertydef size(self):", "body": "return self._operand_size + ", "docstring": "Size of the encoded instruction", "id": "f13733:c4:m14"} {"signature": "@propertydef fee(self):", "body": "return self._fee", "docstring": "The basic gas fee of the instruction", "id": "f13733:c4:m15"} {"signature": "@propertydef semantics(self):", "body": "return self._name", "docstring": "Canonical semantics", "id": "f13733:c4:m16"} {"signature": "@propertydef description(self):", "body": "return self._description", "docstring": "Colloquial description of the instruction", "id": "f13733:c4:m17"} {"signature": "@propertydef bytes(self):", "body": "b = [bytes([self._opcode])]for offset in reversed(range(self.operand_size)):b.append(bytes([(self.operand >> offset * ) & ]))return b''.join(b)", "docstring": "Encoded instruction", "id": "f13733:c4:m18"} {"signature": "@pc.setterdef pc(self, value):", "body": "self._pc = value", "docstring": "Location in the program (optional)", "id": "f13733:c4:m20"} {"signature": "@propertydef group(self):", "body": "classes = {: '',: '',: '',: '',: '',: '',: '',: '',: '',: '',: '',: ''}return classes.get(self.opcode >> , '')", "docstring": "Instruction classification as per the yellow paper", "id": "f13733:c4:m21"} {"signature": "@propertydef uses_stack(self):", "body": "return self.reads_from_stack or self.writes_to_stack", "docstring": "True if the instruction reads/writes from/to the stack", "id": "f13733:c4:m22"} {"signature": "@propertydef reads_from_stack(self):", "body": "return self.pops > ", "docstring": "True if the instruction reads from stack", "id": "f13733:c4:m23"} {"signature": "@propertydef writes_to_stack(self):", "body": "return self.pushes > ", "docstring": "True if the instruction writes to the stack", "id": "f13733:c4:m24"} {"signature": "@propertydef writes_to_memory(self):", "body": "return self.semantics in ('', '', '', '', '')", "docstring": "True if the instruction writes to memory", "id": "f13733:c4:m25"} {"signature": "@propertydef reads_from_memory(self):", "body": "return self.semantics in ('', '', '', '', '', '', '')", "docstring": "True if the instruction reads from memory", "id": "f13733:c4:m26"} {"signature": "@propertydef writes_to_storage(self):", "body": "return self.semantics in ''", "docstring": "True if the instruction writes to the storage", "id": "f13733:c4:m27"} {"signature": "@propertydef reads_from_storage(self):", "body": "return self.semantics in ''", "docstring": "True if the instruction reads from the storage", "id": "f13733:c4:m28"} {"signature": "@propertydef is_terminator(self):", "body": "return self.semantics in ('', '', '', '', '', '', '')", "docstring": "True if the instruction is a basic block terminator", "id": "f13733:c4:m29"} {"signature": "@propertydef is_endtx(self):", "body": "return self.semantics in ('', '', '', '', '')", "docstring": "True if the instruction is a transaction terminator", "id": "f13733:c4:m30"} {"signature": "@propertydef is_starttx(self):", "body": "return self.semantics in ('', '', '', '', '')", "docstring": "True if the instruction is a transaction initiator", "id": "f13733:c4:m31"} {"signature": "@propertydef is_branch(self):", "body": "return self.semantics in ('', '')", "docstring": "True if the instruction is a jump", "id": "f13733:c4:m32"} {"signature": "@propertydef is_environmental(self):", "body": "return self.group == ''", "docstring": "True if the instruction access enviromental data", "id": "f13733:c4:m33"} {"signature": "@propertydef is_system(self):", "body": "return self.group == ''", "docstring": "True if the instruction is a system operation", "id": "f13733:c4:m34"} {"signature": "@propertydef uses_block_info(self):", "body": "return self.group == ''", "docstring": "True if the instruction access block information", "id": "f13733:c4:m35"} {"signature": "@propertydef is_arithmetic(self):", "body": "return self.semantics in ('', '', '', '', '', '', '', '', '', '', '', '', '', '')", "docstring": "True if the instruction is an arithmetic operation", "id": "f13733:c4:m36"} {"signature": "def _shell(cmd, check=True, stdin=None, stdout=None, stderr=None): ", "body": "return subprocess.run(cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr)", "docstring": "Runs a subprocess shell with check=True by default", "id": "f13737:m0"} {"signature": "def _pypi_push(dist):", "body": "for filename in os.listdir(dist):full_path = os.path.join(dist, filename)if os.path.isfile(full_path):_shell('' + shlex.quote(full_path), check=False)_shell('' + shlex.quote(dist + ''))", "docstring": "Push created package to PyPI.\n\n Requires the following defined environment variables:\n - TWINE_USERNAME: The PyPI username to upload this package under\n - TWINE_PASSWORD: The password to the user's account\n\n Args:\n dist (str):\n The distribution to push. Must be a valid directory; shell globs are\n NOT allowed.", "id": "f13737:m1"} {"signature": "def deploy(target):", "body": "if not os.getenv(CIRCLECI_ENV_VAR): raise EnvironmentError('')current_branch = os.getenv('')if (target == '') and (current_branch != ''):raise EnvironmentError(('''').format(current_branch=current_branch))if target in ('', ''):pypi_username = os.getenv(''.format(target=target))pypi_password = os.getenv(''.format(target=target))else:raise ValueError(\"\".format(target=target))if not (pypi_username and pypi_password): raise EnvironmentError((\"\"\"\").format(target=target))os.environ[''] = pypi_usernameos.environ[''] = pypi_password_shell('')_shell('')_shell('')ret = _shell('', stdout=subprocess.PIPE)version = ret.stdout.decode('').strip()print(''.format(version=version))_shell(''.format(version=version))_shell(''.format(version=version))_shell('')_shell('')_shell('')_pypi_push('')_shell('')print(''.format(version=version))", "docstring": "Deploys the package and documentation.\n\n Proceeds in the following steps:\n\n 1. Ensures proper environment variables are set and checks that we are on Circle CI\n 2. Tags the repository with the new version\n 3. Creates a standard distribution and a wheel\n 4. Updates version.py to have the proper version\n 5. Commits the ChangeLog, AUTHORS, and version.py file\n 6. Pushes to PyPI\n 7. Pushes the tags and newly committed files\n\n Raises:\n `EnvironmentError`:\n - Not running on CircleCI\n - `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables\n are missing\n - Attempting to deploy to production from a branch that isn't master", "id": "f13737:m2"} {"signature": "def _get_current_branch():", "body": "result = temple.utils.shell('', stdout=subprocess.PIPE)return result.stdout.decode('').strip()", "docstring": "Determine the current git branch", "id": "f13738:m0"} {"signature": "def clean():", "body": "temple.check.in_git_repo()current_branch = _get_current_branch()update_branch = temple.constants.UPDATE_BRANCH_NAMEtemp_update_branch = temple.constants.TEMP_UPDATE_BRANCH_NAMEif current_branch in (update_branch, temp_update_branch):err_msg = ('').format(current_branch)raise temple.exceptions.InvalidCurrentBranchError(err_msg)if temple.check._has_branch(update_branch):temple.utils.shell(''.format(update_branch))if temple.check._has_branch(temp_update_branch):temple.utils.shell(''.format(temp_update_branch))", "docstring": "Cleans up temporary resources\n\n Tries to clean up:\n\n 1. The temporary update branch used during ``temple update``\n 2. The primary update branch used during ``temple update``", "id": "f13738:m1"} {"signature": "def _cookiecutter_configs_have_changed(template, old_version, new_version):", "body": "temple.check.is_git_ssh_path(template)repo_path = temple.utils.get_repo_path(template)github_client = temple.utils.GithubClient()api = ''.format(repo_path)old_config_resp = github_client.get(api, params={'': old_version})old_config_resp.raise_for_status()new_config_resp = github_client.get(api, params={'': new_version})new_config_resp.raise_for_status()return old_config_resp.json()[''] != new_config_resp.json()['']", "docstring": "Given an old version and new version, check if the cookiecutter.json files have changed\n\n When the cookiecutter.json files change, it means the user will need to be prompted for\n new context\n\n Args:\n template (str): The git SSH path to the template\n old_version (str): The git SHA of the old version\n new_version (str): The git SHA of the new version\n\n Returns:\n bool: True if the cookiecutter.json files have been changed in the old and new versions", "id": "f13748:m0"} {"signature": "def _apply_template(template, target, *, checkout, extra_context):", "body": "with tempfile.TemporaryDirectory() as tempdir:repo_dir = cc_main.cookiecutter(template,checkout=checkout,no_input=True,output_dir=tempdir,extra_context=extra_context)for item in os.listdir(repo_dir):src = os.path.join(repo_dir, item)dst = os.path.join(target, item)if os.path.isdir(src):if os.path.exists(dst):shutil.rmtree(dst)shutil.copytree(src, dst)else:if os.path.exists(dst):os.remove(dst)shutil.copy2(src, dst)", "docstring": "Apply a template to a temporary directory and then copy results to target.", "id": "f13748:m4"} {"signature": "@temple.utils.set_cmd_env_var('')def up_to_date(version=None):", "body": "temple.check.in_git_repo()temple.check.is_temple_project()temple_config = temple.utils.read_temple_config()old_template_version = temple_config['']new_template_version = version or _get_latest_template_version(temple_config[''])return new_template_version == old_template_version", "docstring": "Checks if a temple project is up to date with the repo\n\n Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'update' for the duration of this\n function.\n\n Args:\n version (str, optional): Update against this git SHA or branch of the template\n\n Returns:\n boolean: True if up to date with ``version`` (or latest version), False otherwise\n\n Raises:\n `NotInGitRepoError`: When running outside of a git repo\n `InvalidTempleProjectError`: When not inside a valid temple repository", "id": "f13748:m5"} {"signature": "def _needs_new_cc_config_for_update(old_template, old_version, new_template, new_version):", "body": "if old_template != new_template:return Trueelse:return _cookiecutter_configs_have_changed(new_template,old_version,new_version)", "docstring": "Given two templates and their respective versions, return True if a new cookiecutter\nconfig needs to be obtained from the user", "id": "f13748:m6"} {"signature": "@temple.utils.set_cmd_env_var('')def update(old_template=None, old_version=None, new_template=None, new_version=None,enter_parameters=False):", "body": "update_branch = temple.constants.UPDATE_BRANCH_NAMEtemp_update_branch = temple.constants.TEMP_UPDATE_BRANCH_NAMEtemple.check.in_git_repo()temple.check.in_clean_repo()temple.check.is_temple_project()temple.check.not_has_branch(update_branch)temple.check.not_has_branch(temp_update_branch)temple.check.has_env_vars(temple.constants.GITHUB_API_TOKEN_ENV_VAR)temple_config = temple.utils.read_temple_config()old_template = old_template or temple_config['']new_template = new_template or temple_config['']old_version = old_version or temple_config['']new_version = new_version or _get_latest_template_version(new_template)if new_template == old_template and new_version == old_version and not enter_parameters:print('')return Falseprint(''.format(update_branch))temple.utils.shell(''.format(update_branch),stderr=subprocess.DEVNULL)print(''.format(temp_update_branch))temple.utils.shell(''.format(temp_update_branch),stderr=subprocess.DEVNULL)temple.utils.shell('',stdout=subprocess.DEVNULL)_apply_template(old_template,'',checkout=old_version,extra_context=temple_config)temple.utils.shell('')temple.utils.shell(''.format(old_version),stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)print('')temple.utils.shell(''.format(update_branch),stderr=subprocess.DEVNULL)temple.utils.shell(''.format(temp_update_branch),stderr=subprocess.DEVNULL)print('')temple.utils.shell(''.format(temp_update_branch),stderr=subprocess.DEVNULL)temple.utils.shell('',stdout=subprocess.DEVNULL)needs_new_cc_config = _needs_new_cc_config_for_update(old_template, old_version,new_template, new_version)if needs_new_cc_config:if old_template != new_template:cc_config_input_msg = ('''''').format(temple.utils.get_repo_path(new_template))else:cc_config_input_msg = ('''''''')input(cc_config_input_msg)if needs_new_cc_config or enter_parameters:_, temple_config = (temple.utils.get_cookiecutter_config(new_template,default_config=temple_config,version=new_version))_apply_template(new_template,'',checkout=new_version,extra_context=temple_config)temple.utils.write_temple_config(temple_config, new_template, new_version)temple.utils.shell('')temple.utils.shell(''.format(new_version),stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)print('')temple.utils.shell(''.format(update_branch),stderr=subprocess.DEVNULL)temple.utils.shell(''.format(temp_update_branch),check=False,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)temple.utils.shell(''.format(temple.constants.TEMPLE_CONFIG_FILE),check=False,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)print(''.format(temp_update_branch))temple.utils.shell(''.format(temp_update_branch),stdout=subprocess.DEVNULL)print(textwrap.dedent(\"\"\"\"\"\").format(update_branch))return True", "docstring": "Updates the temple project to the latest template\n\n Proceeeds in the following steps:\n\n 1. Ensure we are inside the project repository\n 2. Obtain the latest version of the package template\n 3. If the package is up to date with the latest template, return\n 4. If not, create an empty template branch with a new copy of the old template\n 5. Create an update branch from HEAD and merge in the new template copy\n 6. Create a new copy of the new template and merge into the empty template branch\n 7. Merge the updated empty template branch into the update branch\n 8. Ensure temple.yaml reflects what is in the template branch\n 9. Remove the empty template branch\n\n Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'update' for the\n duration of this function.\n\n Two branches will be created during the update process, one named\n ``_temple_update`` and one named ``_temple_update_temp``. At the end of\n the process, ``_temple_update_temp`` will be removed automatically. The\n work will be left in ``_temple_update`` in an uncommitted state for\n review. The update will fail early if either of these branches exist\n before the process starts.\n\n Args:\n old_template (str, default=None): The old template from which to update. Defaults\n to the template in temple.yaml\n old_version (str, default=None): The old version of the template. Defaults to\n the version in temple.yaml\n new_template (str, default=None): The new template for updating. Defaults to the\n template in temple.yaml\n new_version (str, default=None): The new version of the new template to update.\n Defaults to the latest version of the new template\n enter_parameters (bool, default=False): Force entering template parameters for the project\n\n Raises:\n `NotInGitRepoError`: When not inside of a git repository\n `InvalidTempleProjectError`: When not inside a valid temple repository\n `InDirtyRepoError`: When an update is triggered while the repo is in a dirty state\n `ExistingBranchError`: When an update is triggered and there is an existing\n update branch\n\n Returns:\n boolean: True if update was performed or False if template was already up to date", "id": "f13748:m7"} {"signature": "@main.command()@click.argument('', nargs=, required=True)@click.option('', '', default=None,help='')def setup(template, version):", "body": "temple.setup.setup(template, version=version)", "docstring": "Setup new project. Takes a full git SSH path to the template as returned\nby \"temple ls\". In order to start a project from a\nparticular version (instead of the latest), use the \"-v\" option.", "id": "f13749:m1"} {"signature": "@main.command()@click.option('', '', is_flag=True,help='')@click.option('', '', is_flag=True,help='')@click.option('', '', default=None,help='')def update(check, enter_parameters, version):", "body": "if check:if temple.update.up_to_date(version=version):print('')else:msg = ('''')raise temple.exceptions.NotUpToDateWithTemplateError(msg)else:temple.update.update(new_version=version, enter_parameters=enter_parameters)", "docstring": "Update package with latest template. Must be inside of the project\nfolder to run.\n\nUsing \"-e\" will prompt for re-entering the template parameters again\neven if the project is up to date.\n\nUse \"-v\" to update to a particular version of a template.\n\nUsing \"-c\" will perform a check that the project is up to date\nwith the latest version of the template (or the version specified by \"-v\").\nNo updating will happen when using this option.", "id": "f13749:m2"} {"signature": "@main.command()@click.argument('', nargs=, required=True)@click.argument('', nargs=, required=False)@click.option('', '', is_flag=True,help='')def ls(github_user, template, long_format):", "body": "github_urls = temple.ls.ls(github_user, template=template)for ssh_path, info in github_urls.items():if long_format:print(ssh_path, '', info[''] or '')else:print(ssh_path)", "docstring": "List packages created with temple. Enter a github user or\norganization to list all templates under the user or org.\nUsing a template path as the second argument will list all projects\nthat have been started with that template.\n\nUse \"-l\" to print the Github repository descriptions of templates\nor projects.", "id": "f13749:m3"} {"signature": "@main.command()def clean():", "body": "temple.clean.clean()", "docstring": "Cleans temporary resources created by temple, such as the temple update branch", "id": "f13749:m4"} {"signature": "@main.command()@click.argument('', nargs=, required=True)@click.option('', '', default=None,help='')def switch(template, version):", "body": "temple.update.update(new_template=template, new_version=version)", "docstring": "Switch a project's template to a different template.", "id": "f13749:m5"} {"signature": "def is_git_ssh_path(template_path):", "body": "if not template_path.startswith('') or not template_path.endswith(''):raise temple.exceptions.InvalidTemplatePathError('')", "docstring": "Raises a `InvalidTemplatePathError` if ``template_path`` is not a git SSH url\n\n Note that the git SSH url must be in the form as provided from Github or from\n ``temple ls``. For example, ``git@github.com:user/template.git``.", "id": "f13751:m0"} {"signature": "def _in_git_repo():", "body": "ret = temple.utils.shell('', stderr=subprocess.DEVNULL, check=False)return ret.returncode == ", "docstring": "Returns True if inside a git repo, False otherwise", "id": "f13751:m1"} {"signature": "def in_git_repo():", "body": "if not _in_git_repo():msg = ''raise temple.exceptions.NotInGitRepoError(msg)", "docstring": "Raises `NotInGitRepoError` if not inside a git repository", "id": "f13751:m2"} {"signature": "def not_in_git_repo():", "body": "if _in_git_repo():msg = ''raise temple.exceptions.InGitRepoError(msg)", "docstring": "Raises `InGitRepoError` if inside of a git repository", "id": "f13751:m3"} {"signature": "def _in_clean_repo():", "body": "ret = temple.utils.shell('', check=False)return ret.returncode == ", "docstring": "Returns True if the git repo is not dirty, False otherwise", "id": "f13751:m4"} {"signature": "def in_clean_repo():", "body": "if not _in_clean_repo():msg = ''raise temple.exceptions.InDirtyRepoError(msg)", "docstring": "Raises `InDirtyRepoError` if inside a dirty repository", "id": "f13751:m5"} {"signature": "def _has_branch(branch):", "body": "ret = temple.utils.shell(''.format(branch),stderr=subprocess.DEVNULL,stdout=subprocess.DEVNULL,check=False)return ret.returncode == ", "docstring": "Return True if the target branch exists.", "id": "f13751:m6"} {"signature": "def not_has_branch(branch):", "body": "if _has_branch(branch):msg = ''.format(branch)raise temple.exceptions.ExistingBranchError(msg)", "docstring": "Raises `ExistingBranchError` if the specified branch exists.", "id": "f13751:m7"} {"signature": "def has_env_vars(*env_vars):", "body": "for env_var in env_vars:if not os.environ.get(env_var):msg = ('').format(env_var, temple.constants.TEMPLE_DOCS_URL)raise temple.exceptions.InvalidEnvironmentError(msg)", "docstring": "Raises `InvalidEnvironmentError` when one isnt set", "id": "f13751:m8"} {"signature": "def is_temple_project():", "body": "if not os.path.exists(temple.constants.TEMPLE_CONFIG_FILE):msg = ''.format(temple.constants.TEMPLE_CONFIG_FILE)raise temple.exceptions.InvalidTempleProjectError(msg)", "docstring": "Raises `InvalidTempleProjectError` if repository is not a temple project", "id": "f13751:m9"} {"signature": "def _parse_link_header(headers):", "body": "links = {}if '' in headers:link_headers = headers[''].split('')for link_header in link_headers:(url, rel) = link_header.split('')url = url[:-]rel = rel[:-]links[rel] = urlreturn links", "docstring": "Parses Github's link header for pagination.\n\n TODO eventually use a github client for this", "id": "f13753:m0"} {"signature": "def _code_search(query, github_user=None):", "body": "github_client = temple.utils.GithubClient()headers = {'': ''}resp = github_client.get('',params={'': query, '': },headers=headers)if resp.status_code == requests.codes.unprocessable_entity and github_user:raise temple.exceptions.InvalidGithubUserError(''.format(github_user))resp.raise_for_status()resp_data = resp.json()repositories = collections.defaultdict(dict)while True:repositories.update({''.format(repo['']['']): repo['']for repo in resp_data['']})next_url = _parse_link_header(resp.headers).get('')if next_url:resp = requests.get(next_url, headers=headers)resp.raise_for_status()resp_data = resp.json()else:breakreturn repositories", "docstring": "Performs a Github API code search\n\n Args:\n query (str): The query sent to Github's code search\n github_user (str, optional): The Github user being searched in the query string\n\n Returns:\n dict: A dictionary of repository information keyed on the git SSH url\n\n Raises:\n `InvalidGithubUserError`: When ``github_user`` is invalid", "id": "f13753:m1"} {"signature": "@temple.utils.set_cmd_env_var('')def ls(github_user, template=None):", "body": "temple.check.has_env_vars(temple.constants.GITHUB_API_TOKEN_ENV_VAR)if template:temple.check.is_git_ssh_path(template)search_q = ''.format(github_user,temple.constants.TEMPLE_CONFIG_FILE,template)else:search_q = ''.format(github_user)results = _code_search(search_q, github_user)return collections.OrderedDict(sorted(results.items()))", "docstring": "Lists all temple templates and packages associated with those templates\n\n If ``template`` is None, returns the available templates for the configured\n Github org.\n\n If ``template`` is a Github path to a template, returns all projects spun\n up with that template.\n\n ``ls`` uses the github search API to find results.\n\n Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'ls' for the duration of this\n function.\n\n Args:\n github_user (str): The github user or org being searched.\n template (str, optional): The template git repo path. If provided, lists\n all projects that have been created with the provided template. Note\n that the template path is the SSH path\n (e.g. git@github.com:CloverHealth/temple.git)\n\n Returns:\n dict: A dictionary of repository information keyed on the SSH Github url\n\n Raises:\n `InvalidGithubUserError`: When ``github_user`` is invalid", "id": "f13753:m2"} {"signature": "def get_repo_path(template):", "body": "return template[:-].split('')[]", "docstring": "Given a git SSH path (e.g git@github.com:owner/repo.git), return the repo path\n\n The repo path is in the form of \"owner/repo\"", "id": "f13754:m0"} {"signature": "def shell(cmd, check=True, stdin=None, stdout=None, stderr=None):", "body": "return subprocess.run(cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr)", "docstring": "Runs a subprocess shell with check=True by default", "id": "f13754:m1"} {"signature": "@contextlib.contextmanagerdef cd(path):", "body": "old_dir = os.getcwd()os.chdir(path)try:yieldfinally:os.chdir(old_dir)", "docstring": "A context manager for changing into a directory", "id": "f13754:m2"} {"signature": "def read_temple_config():", "body": "with open(temple.constants.TEMPLE_CONFIG_FILE) as temple_config_file:return yaml.load(temple_config_file, Loader=yaml.SafeLoader)", "docstring": "Reads the temple YAML configuration file in the repository", "id": "f13754:m3"} {"signature": "def write_temple_config(temple_config, template, version):", "body": "with open(temple.constants.TEMPLE_CONFIG_FILE, '') as temple_config_file:versioned_config = {**temple_config,**{'': version, '': template},}yaml.dump(versioned_config, temple_config_file, Dumper=yaml.SafeDumper)", "docstring": "Writes the temple YAML configuration", "id": "f13754:m4"} {"signature": "def get_cookiecutter_config(template, default_config=None, version=None):", "body": "default_config = default_config or {}config_dict = cc_config.get_user_config()repo_dir, _ = cc_repository.determine_repo_dir(template=template,abbreviations=config_dict[''],clone_to_dir=config_dict[''],checkout=version,no_input=True)context_file = os.path.join(repo_dir, '')context = cc_generate.generate_context(context_file=context_file,default_context={**config_dict[''], **default_config})return repo_dir, cc_prompt.prompt_for_config(context)", "docstring": "Obtains the configuration used for cookiecutter templating\n\n Args:\n template: Path to the template\n default_config (dict, optional): The default configuration\n version (str, optional): The git SHA or branch to use when\n checking out template. Defaults to latest version\n\n Returns:\n tuple: The cookiecutter repo directory and the config dict", "id": "f13754:m5"} {"signature": "def set_cmd_env_var(value):", "body": "def func_decorator(function):@functools.wraps(function)def wrapper(*args, **kwargs):previous_cmd_env_var = os.getenv(temple.constants.TEMPLE_ENV_VAR)os.environ[temple.constants.TEMPLE_ENV_VAR] = valuetry:ret_val = function(*args, **kwargs)finally:if previous_cmd_env_var is None:del os.environ[temple.constants.TEMPLE_ENV_VAR]else:os.environ[temple.constants.TEMPLE_ENV_VAR] = previous_cmd_env_varreturn ret_valreturn wrapperreturn func_decorator", "docstring": "Decorator that sets the temple command env var to value", "id": "f13754:m6"} {"signature": "def _call_api(self, verb, url, **request_kwargs):", "body": "api = ''.format(url)auth_headers = {'': ''.format(self.api_token)}headers = {**auth_headers, **request_kwargs.pop('', {})}return getattr(requests, verb)(api, headers=headers, **request_kwargs)", "docstring": "Perform a github API call\n\n Args:\n verb (str): Can be \"post\", \"put\", or \"get\"\n url (str): The base URL with a leading slash for Github API (v3)\n auth (str or HTTPBasicAuth): A Github API token or a HTTPBasicAuth object", "id": "f13754:c0:m1"} {"signature": "def get(self, url, **request_kwargs):", "body": "return self._call_api('', url, **request_kwargs)", "docstring": "Github API get", "id": "f13754:c0:m2"} {"signature": "def _patched_run_hook(hook_name, project_dir, context):", "body": "if hook_name == '':with temple.utils.cd(project_dir):temple.utils.write_temple_config(context[''],context[''],context[''])return cc_hooks.run_hook(hook_name, project_dir, context)", "docstring": "Used to patch cookiecutter's ``run_hook`` function.\n\n This patched version ensures that the temple.yaml file is created before\n any cookiecutter hooks are executed", "id": "f13755:m0"} {"signature": "def _generate_files(repo_dir, config, template, version):", "body": "with unittest.mock.patch('', side_effect=_patched_run_hook):cc_generate.generate_files(repo_dir=repo_dir,context={'': config,'': template,'': version},overwrite_if_exists=False,output_dir='')", "docstring": "Uses cookiecutter to generate files for the project.\n\n Monkeypatches cookiecutter's \"run_hook\" to ensure that the temple.yaml file is\n generated before any hooks run. This is important to ensure that hooks can also\n perform any actions involving temple.yaml", "id": "f13755:m1"} {"signature": "@temple.utils.set_cmd_env_var('')def setup(template, version=None):", "body": "temple.check.is_git_ssh_path(template)temple.check.not_in_git_repo()repo_path = temple.utils.get_repo_path(template)msg = ('''').format(repo_path)print(msg)cc_repo_dir, config = temple.utils.get_cookiecutter_config(template, version=version)if not version:with temple.utils.cd(cc_repo_dir):ret = temple.utils.shell('', stdout=subprocess.PIPE)version = ret.stdout.decode('').strip()_generate_files(repo_dir=cc_repo_dir, config=config, template=template, version=version)", "docstring": "Sets up a new project from a template\n\n Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'setup' during the duration\n of this function.\n\n Args:\n template (str): The git SSH path to a template\n version (str, optional): The version of the template to use when updating. Defaults\n to the latest version", "id": "f13755:m2"} {"signature": "@contextlib.contextmanagerdef _setenv(key, value):", "body": "old_value = os.environ.get(key, None)if value is None:os.environ.pop(key, None)else:os.environ[key] = valueyieldif old_value is None:os.environ.pop(key, None)else:os.environ[key] = value", "docstring": "Context manager to set an environment variable temporarily.", "id": "f13759:m0"} {"signature": "def _string_to_base64(string):", "body": "utf8_encoded = string.encode('')return base64.urlsafe_b64encode(utf8_encoded)", "docstring": "Encodes string to utf-8 and then base64", "id": "f13759:m1"} {"signature": "@decoratordef x_runtime(f, *args, **kwargs):", "body": "_t0 = now()r = f(*args, **kwargs)_t1 = now()r.headers[''] = ''.format(Decimal(str(_t1 - _t0)))return r", "docstring": "X-Runtime Flask Response Decorator.", "id": "f13762:m0"} {"signature": "@decoratordef gzip(f, *args, **kwargs):", "body": "data = f(*args, **kwargs)if isinstance(data, Response):content = data.dataelse:content = datagzip_buffer = BytesIO()gzip_file = gzip2.GzipFile(mode='',compresslevel=,fileobj=gzip_buffer)gzip_file.write(content)gzip_file.close()gzip_data = gzip_buffer.getvalue()if isinstance(data, Response):data.data = gzip_datadata.headers[''] = ''data.headers[''] = str(len(data.data))return datareturn gzip_data", "docstring": "GZip Flask Response Decorator.", "id": "f13762:m1"} {"signature": "@decoratordef deflate(f, *args, **kwargs):", "body": "data = f(*args, **kwargs)if isinstance(data, Response):content = data.dataelse:content = datadeflater = zlib.compressobj()deflated_data = deflater.compress(content)deflated_data += deflater.flush()if isinstance(data, Response):data.data = deflated_datadata.headers[''] = ''data.headers[''] = str(len(data.data))return datareturn deflated_data", "docstring": "Deflate Flask Response Decorator.", "id": "f13762:m2"} {"signature": "def json_safe(string, content_type=''):", "body": "try:string = string.decode('')json.dumps(string)return stringexcept (ValueError, TypeError):return b''.join([b'',content_type.encode(''),b'',base64.b64encode(string)]).decode('')", "docstring": "Returns JSON-safe version of `string`.\n\n If `string` is a Unicode string or a valid UTF-8,\n it is returned unmodified,\n as it can safely be encoded to JSON string.\n\n If `string` contains raw/binary data, it is Base64-encoded, formatted and\n returned according to \"data\" URL scheme (RFC2397). Since JSON is not\n suitable for binary data, some additional encoding was necessary; \"data\"\n URL scheme was chosen for its simplicity.", "id": "f13764:m0"} {"signature": "def get_files(request):", "body": "files = dict()for k, v in request.files.items():content_type = (request.files[k].content_type or '')val = json_safe(v.read(), content_type)if files.get(k):if not isinstance(files[k], list):files[k] = [files[k]]files[k].append(val)else:files[k] = valreturn files", "docstring": "Returns files dict from request context.", "id": "f13764:m1"} {"signature": "def get_headers(request, hide_env=True):", "body": "headers = dict(request.headers.items())if hide_env and ('' not in request.args):for key in ENV_HEADERS:try:del headers[key]except KeyError:passreturn CaseInsensitiveDict(headers.items())", "docstring": "Returns headers dict from request context.", "id": "f13764:m2"} {"signature": "def semiflatten(multi):", "body": "if multi:result = multi.to_dict()for k, v in result.items():if len(v) == :result[k] = v[]return resultelse:return multi", "docstring": "Convert a MutiDict into a regular dict. If there are more than one value\n for a key, the result will have a list of values for the key. Otherwise it\n will have the plain value.", "id": "f13764:m3"} {"signature": "def get_url(request):", "body": "protocol = (request.headers.get('') orrequest.headers.get(''))if protocol is None and request.headers.get('') == '':protocol = ''if protocol is None:return request.urlurl = list(urlparse(request.url))url[] = protocolreturn urlunparse(url)", "docstring": "Since we might be hosted behind a proxy, we need to check the\nX-Forwarded-Proto, X-Forwarded-Protocol, or X-Forwarded-SSL headers\nto find out what protocol was used to access us.", "id": "f13764:m4"} {"signature": "def get_dict(request, *keys, **extras):", "body": "_keys = ('', '', '', '', '', '','', '')assert all(map(_keys.__contains__, keys))data = request.dataform = request.formform = semiflatten(request.form)try:_json = json.loads(data.decode(''))except (ValueError, TypeError):_json = Noned = dict(url=get_url(request),args=semiflatten(request.args),form=form,data=json_safe(data),origin=request.headers.get('', request.remote_addr),headers=get_headers(request),files=get_files(request),json=_json)out_d = dict()for key in keys:out_d[key] = d.get(key)out_d.update(extras)return out_d", "docstring": "Returns request dict of given keys.", "id": "f13764:m5"} {"signature": "def status_code(code):", "body": "redirect = dict(headers=dict(location=REDIRECT_LOCATION))code_map = {: redirect,: redirect,: redirect,: dict(data=''),: redirect,: redirect,: dict(headers={'': ''}),: dict(data='',headers={'': ''}),: dict(data=json.dumps({'': '','': ACCEPTED_MEDIA_TYPES}),headers={'': ''}),: dict(headers={'': ''}),: dict( data=ASCII_ART,headers={'': ''}),}r = Response()r.status_code = codeif code in code_map:m = code_map[code]if '' in m:r.data = m['']if '' in m:r.headers = m['']return r", "docstring": "Returns response object of given status code.", "id": "f13764:m6"} {"signature": "def check_basic_auth(request, user, passwd):", "body": "auth = request.authorizationreturn auth and auth.username == user and auth.password == passwd", "docstring": "Checks user authentication using HTTP Basic Auth.", "id": "f13764:m7"} {"signature": "def HA1(realm, username, password):", "body": "if not realm:realm = u''return H(b''.join([username.encode(''),realm.encode(''),password.encode('')]))", "docstring": "Create HA1 hash by realm, username, password\n\n HA1 = md5(A1) = MD5(username:realm:password)", "id": "f13764:m9"} {"signature": "def HA2(credentails, request):", "body": "if credentails.get('') == '' or credentails.get('') is None:return H(b''.join([request[''].encode(''),request[''].encode('')]))elif credentails.get('') == '':for k in '', '', '':if k not in request:raise ValueError('' % k)return H('' % (request[''],request[''],H(request[''])))raise ValueError", "docstring": "Create HA2 md5 hash\n\n If the qop directive's value is \"auth\" or is unspecified, then HA2:\n HA2 = md5(A2) = MD5(method:digestURI)\n If the qop directive's value is \"auth-int\" , then HA2 is\n HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))", "id": "f13764:m10"} {"signature": "def response(credentails, password, request):", "body": "response = NoneHA1_value = HA1(credentails.get(''),credentails.get(''),password)HA2_value = HA2(credentails, request)if credentails.get('') is None:response = H(b''.join([HA1_value.encode(''),credentails.get('', '').encode(''),HA2_value.encode('')]))elif (credentails.get('') == '' orcredentails.get('') == ''):for k in '', '', '', '':if k not in credentails:raise ValueError('' % k)response = H(b''.join([HA1_value.encode(''),credentails.get('').encode(''),credentails.get('').encode(''),credentails.get('').encode(''),credentails.get('').encode(''),HA2_value.encode('')]))else:raise ValueError('')return response", "docstring": "Compile digest auth response\n\n If the qop directive's value is \"auth\" or \"auth-int\" ,\n then compute the response as follows:\n RESPONSE = MD5(HA1:nonce:nonceCount:clienNonce:qop:HA2)\n Else if the qop directive is unspecified,\n then compute the response as follows:\n RESPONSE = MD5(HA1:nonce:HA2)\n\n Arguments:\n - `credentails`: credentails dict\n - `password`: request user password\n - `request`: request dict", "id": "f13764:m11"} {"signature": "def check_digest_auth(request, user, passwd):", "body": "if request.headers.get(''):credentails = parse_authorization_header(request.headers.get(''))if not credentails:returnresponse_hash = response(credentails, passwd,dict(uri=request.script_root + request.path,body=request.data,method=request.method))if credentails.get('') == response_hash:return Truereturn False", "docstring": "Check user authentication using HTTP Digest auth", "id": "f13764:m12"} {"signature": "def secure_cookie(request):", "body": "return request.environ[''] == ''", "docstring": "Return true if cookie should have secure attribute", "id": "f13764:m13"} {"signature": "def __parse_request_range(range_header_text):", "body": "left = Noneright = Noneif not range_header_text:return left, rightrange_header_text = range_header_text.strip()if not range_header_text.startswith(''):return left, rightcomponents = range_header_text.split('')if len(components) != :return left, rightcomponents = components[].split('')try:right = int(components[])except:passtry:left = int(components[])except:passreturn left, right", "docstring": "Return a tuple describing the byte range requested in a GET request\n If the range is open ended on the left or right side, then a value of None\n will be set.\n RFC7233:\n http://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7233.html#header.range\n Examples:\n Range : bytes=1024-\n Range : bytes=10-20\n Range : bytes=-999", "id": "f13764:m14"} {"signature": "@app.route('')def view_landing_page(request):", "body": "tracking_enabled = '' in os.environreturn render_template('', request=request,tracking_enabled=tracking_enabled)", "docstring": "Generates Landing Page.", "id": "f13765:m2"} {"signature": "@app.route('')def view_html_page(request):", "body": "return render_template('')", "docstring": "Simple Html Page", "id": "f13765:m3"} {"signature": "@app.route('')def view_robots_page(request):", "body": "response = Response()response.content = ROBOT_TXTresponse.content_type = ''return response", "docstring": "Simple Html Page", "id": "f13765:m4"} {"signature": "@app.route('')def view_deny_page(request):", "body": "response = Response()response.content = ANGRY_ASCIIresponse.content_type = ''return response", "docstring": "Simple Html Page", "id": "f13765:m5"} {"signature": "@app.route('')def view_origin(request):", "body": "return jsonify(origin=request.headers.get('',request.remote_addr))", "docstring": "Returns Origin IP.", "id": "f13765:m6"} {"signature": "@app.route('')def view_headers(request):", "body": "return jsonify(get_dict(request, ''))", "docstring": "Returns HTTP HEADERS.", "id": "f13765:m7"} {"signature": "@app.route('')def view_user_agent(request):", "body": "headers = get_headers(request)return jsonify({'': headers['']})", "docstring": "Returns User-Agent.", "id": "f13765:m8"} {"signature": "@app.route('', methods=('', ''))def view_get(request):", "body": "return jsonify(get_dict(request, '', '', '', ''))", "docstring": "Returns GET Data.", "id": "f13765:m9"} {"signature": "@app.route('', methods=('',))def view_post(request):", "body": "return jsonify(get_dict(request, '', '', '', '','', '', '', ''))", "docstring": "Returns POST Data.", "id": "f13765:m10"} {"signature": "@app.route('', methods=('',))def view_put(request):", "body": "return jsonify(get_dict(request, '', '', '', '','', '', '', ''))", "docstring": "Returns PUT Data.", "id": "f13765:m11"} {"signature": "@app.route('', methods=('',))def view_patch(request):", "body": "return jsonify(get_dict(request, '', '', '', '','', '', '', ''))", "docstring": "Returns PATCH Data.", "id": "f13765:m12"} {"signature": "@app.route('', methods=('',))def view_delete(request):", "body": "return jsonify(get_dict(request, '', '', '', '','', '', '', ''))", "docstring": "Returns DELETE Data.", "id": "f13765:m13"} {"signature": "@app.route('')@filters.gzipdef view_gzip_encoded_content(request):", "body": "return jsonify(get_dict(request, '', '',method=request.method, gzipped=True))", "docstring": "Returns GZip-Encoded Data.", "id": "f13765:m14"} {"signature": "@app.route('')@filters.deflatedef view_deflate_encoded_content(request):", "body": "return jsonify(get_dict(request, '', '',method=request.method, deflated=True))", "docstring": "Returns Deflate-Encoded Data.", "id": "f13765:m15"} {"signature": "@app.route('')def redirect_n_times(request, n):", "body": "n = int(n)assert n > absolute = request.args.get('', '').lower() == ''if n == :return redirect(app.url_for('', _request=request,_external=absolute))if absolute:return _redirect(request, '', n, True)else:return _redirect(request, '', n, False)", "docstring": "302 Redirects n times.", "id": "f13765:m16"} {"signature": "@app.route('')def redirect_to(request):", "body": "args = CaseInsensitiveDict(request.args.items())response = Response('')response.status_code = response.headers[''] = args[''].encode('')return response", "docstring": "302 Redirects to the given URL.", "id": "f13765:m18"} {"signature": "@app.route('')def relative_redirect_n_times(request, n):", "body": "n = int(n)assert n > response = Response('')response.status_code = if n == :response.headers[''] = url_for('')return responseresponse.headers[''] = app.url_for('', n=n - )return response", "docstring": "302 Redirects n times.", "id": "f13765:m19"} {"signature": "@app.route('')def absolute_redirect_n_times(request, n):", "body": "n = int(n)assert n > if n == :return redirect(app.url_for('', _request=request,_external=True))return _redirect(request, '', n, True)", "docstring": "302 Redirects n times.", "id": "f13765:m20"} {"signature": "@app.route('')def stream_n_messages(request, n):", "body": "n = int(n)response = get_dict(request, '', '', '', '')n = min(n, )def generate_stream():for i in range(n):response[''] = iyield json.dumps(response, default=json_dumps_default) + ''return Response(generate_stream(), headers={'': '',})", "docstring": "Stream n JSON messages", "id": "f13765:m21"} {"signature": "@app.route('',methods=['', '', '', '', '', ''])def view_status_code(request, codes):", "body": "if '' not in codes:code = int(codes)return status_code(code)choices = []for choice in codes.split(''):if '' not in choice:code = choiceweight = else:code, weight = choice.split('')choices.append((int(code), float(weight)))code = weighted_choice(choices)return status_code(code)", "docstring": "Return status code or random status code if more than one are given", "id": "f13765:m22"} {"signature": "@app.route('')def response_headers(request):", "body": "headers = Headers(request.args.to_dict())response = jsonify(headers)while True:content_len_shown = response.headers['']d = {}for key in response.headers.keys():value = response.headers.get_all(key)if len(value) == :value = value[]d[key] = valueresponse = jsonify(d)for key, value in headers.to_list():response.headers.add(key, value)if response.headers[''] == content_len_shown:breakreturn response", "docstring": "Returns a set of response headers from the query string", "id": "f13765:m23"} {"signature": "@app.route('')def view_cookies(request, hide_env=True):", "body": "cookies = dict(request.cookies.items())if hide_env and ('' not in request.args):for key in ENV_COOKIES:try:del cookies[key]except KeyError:passreturn jsonify(cookies=cookies)", "docstring": "Returns cookie data.", "id": "f13765:m24"} {"signature": "@app.route('')def view_forms_post(request):", "body": "return render_template('')", "docstring": "Simple HTML form.", "id": "f13765:m25"} {"signature": "@app.route('')def set_cookie(request, name, value):", "body": "r = app.make_response(redirect(url_for('')))r.set_cookie(key=name, value=value, secure=secure_cookie(request))return r", "docstring": "Sets a cookie and redirects to cookie list.", "id": "f13765:m26"} {"signature": "@app.route('')def set_cookies(request):", "body": "cookies = dict(request.args.items())r = app.make_response(redirect(url_for('')))for key, value in cookies.items():r.set_cookie(key=key, value=value, secure=secure_cookie(request))return r", "docstring": "Sets cookie(s) as provided by the query string\n and redirects to cookie list.", "id": "f13765:m27"} {"signature": "@app.route('')def delete_cookies(request):", "body": "cookies = dict(request.args.items())r = app.make_response(redirect(url_for('')))for key, value in cookies.items():r.delete_cookie(key=key)return r", "docstring": "Deletes cookie(s) as provided by the query string\n and redirects to cookie list.", "id": "f13765:m28"} {"signature": "@app.route('')def basic_auth(request, user='', passwd=''):", "body": "if not check_basic_auth(request, user, passwd):return status_code()return jsonify(authenticated=True, user=user)", "docstring": "Prompts the user for authorization using HTTP Basic Auth.", "id": "f13765:m29"} {"signature": "@app.route('')def hidden_basic_auth(request, user='', passwd=''):", "body": "if not check_basic_auth(request, user, passwd):return status_code()return jsonify(authenticated=True, user=user)", "docstring": "Prompts the user for authorization using HTTP Basic Auth.", "id": "f13765:m30"} {"signature": "@app.route('')def digest_auth(request, qop=None, user='', passwd=''):", "body": "if qop not in ('', ''):qop = Noneif '' not in request.headers ornot check_digest_auth(user, passwd) or'' not in request.headers:response = app.make_response('')response.status_code = nonce = H(b''.join([getattr(request, '', u'').encode(''),b'',str(time.time()).encode(''),b'',os.urandom()]))opaque = H(os.urandom())auth = WWWAuthenticate('')auth.set_digest('', nonce, opaque=opaque,qop=('', '') if qop is None else (qop, ))response.headers[''] = auth.to_header()response.headers[''] = ''return responsereturn jsonify(authenticated=True, user=user)", "docstring": "Prompts the user for authorization using HTTP Digest auth", "id": "f13765:m31"} {"signature": "@app.route('')def delay_response(request, delay):", "body": "delay = min(float(delay), )time.sleep(delay)return jsonify(get_dict(request, '', '', '', '','', '', ''))", "docstring": "Returns a delayed response", "id": "f13765:m32"} {"signature": "@app.route('')def drip(request):", "body": "args = CaseInsensitiveDict(request.args.items())duration = float(args.get('', ))numbytes = int(args.get('', ))code = int(args.get('', ))pause = duration / numbytesdelay = float(args.get('', ))if delay > :time.sleep(delay)def generate_bytes():for i in xrange(numbytes):yield u''.encode('')time.sleep(pause)response = Response(generate_bytes(), headers={'': '','': str(numbytes),})response.status_code = codereturn response", "docstring": "Drips data over a duration after an optional initial delay.", "id": "f13765:m33"} {"signature": "@app.route('')def decode_base64(request, value):", "body": "encoded = value.encode('') return base64.urlsafe_b64decode(encoded).decode('')", "docstring": "Decodes base64url-encoded string", "id": "f13765:m34"} {"signature": "@app.route('', methods=('',))def cache(request):", "body": "is_conditional = (request.headers.get('') orrequest.headers.get(''))if is_conditional is None:response = view_get(request)response.headers[''] = http_date()response.headers[''] = uuid.uuid4().hexreturn responseelse:return status_code()", "docstring": "Returns a 304 if an If-Modified-Since header or\n If-None-Match is present. Returns the same as a GET otherwise.", "id": "f13765:m35"} {"signature": "@app.route('')def cache_control(request, value):", "body": "value = int(value)response = view_get(request)response.headers[''] = ''.format(value)return response", "docstring": "Sets a Cache-Control header.", "id": "f13765:m36"} {"signature": "@app.route('')def random_bytes(request, n):", "body": "n = int(n)n = min(n, * ) params = CaseInsensitiveDict(request.args.items())if '' in params:random.seed(int(params['']))response = Response()response.data = bytearray(random.randint(, ) for i in range(n))response.content_type = ''return response", "docstring": "Returns n random bytes generated with given seed.", "id": "f13765:m38"} {"signature": "@app.route('')def stream_random_bytes(request, n):", "body": "n = int(n)n = min(n, * ) params = CaseInsensitiveDict(request.args.items())if '' in params:random.seed(int(params['']))if '' in params:chunk_size = max(, int(params['']))else:chunk_size = * def generate_bytes():chunks = bytearray()for i in xrange(n):chunks.append(random.randint(, ))if len(chunks) == chunk_size:yield(bytes(chunks))chunks = bytearray()if chunks:yield(bytes(chunks))headers = {'': ''}return Response(generate_bytes(), headers=headers)", "docstring": "Streams n random bytes generated with given seed,\n at given chunk size per packet.", "id": "f13765:m39"} {"signature": "@app.route('')def range_request(request, numbytes):", "body": "numbytes = int(numbytes)if numbytes <= or numbytes > ( * ):response = Response(headers={'': '' % numbytes,'': ''})response.status_code = response.content = ''return responseparams = CaseInsensitiveDict(request.args.items())if '' in params:chunk_size = max(, int(params['']))else:chunk_size = * duration = float(params.get('', ))pause_per_byte = duration / numbytesrequest_headers = get_headers(request)first_byte_pos, last_byte_pos = get_request_range(request_headers,numbytes)if (first_byte_pos > last_byte_pos orfirst_byte_pos not in xrange(, numbytes) orlast_byte_pos not in xrange(, numbytes)):response = Response(headers={'': '' % numbytes,'': '','': '' % numbytes})response.status_code = return responsedef generate_bytes():chunks = bytearray()for i in xrange(first_byte_pos, last_byte_pos + ):chunks.append(ord('') + (i % ))if len(chunks) == chunk_size:yield(bytes(chunks))time.sleep(pause_per_byte * chunk_size)chunks = bytearray()if chunks:time.sleep(pause_per_byte * len(chunks))yield(bytes(chunks))content_range = '' % (first_byte_pos, last_byte_pos,numbytes)response_headers = {'': '','': '' % numbytes,'': '','': content_range}response = Response(generate_bytes(), headers=response_headers)if (first_byte_pos == ) and (last_byte_pos == (numbytes - )):response.status_code = else:response.status_code = return response", "docstring": "Streams n random bytes generated with given seed,\n at given chunk size per packet.", "id": "f13765:m40"} {"signature": "@app.route('')def link_page(request, n, offset):", "body": "n = int(n)offset = int(offset)n = min(max(, n), ) link = \"\"html = ['']for i in xrange(n):if i == offset:html.append(''.format(i))else:html.append(link.format(url_for('', n=n, offset=i), i))html.append('')return ''.join(html)", "docstring": "Generate a page containing n links to other pages which do the same.", "id": "f13765:m41"} {"signature": "@app.route('')def links(request, n):", "body": "n = int(n)return redirect(url_for('', n=n, offset=))", "docstring": "Redirect to first links page.", "id": "f13765:m42"} {"signature": "@app.route('')def image(request):", "body": "headers = get_headers(request)if '' not in headers:return image_png(request) accept = headers[''].lower()if '' in accept:return image_webp(request)elif '' in accept:return image_svg(request)elif '' in accept:return image_jpeg(request)elif '' in accept or '' in accept:return image_png(request)else:return status_code()", "docstring": "Returns a simple image of the type suggest by the Accept header.", "id": "f13765:m43"} {"signature": "def weighted_choice(choices):", "body": "values, weights = zip(*choices)total = cum_weights = []for w in weights:total += wcum_weights.append(total)x = random.uniform(, total)i = bisect.bisect(cum_weights, x)return values[i]", "docstring": "Returns a value from choices chosen by weighted random selection\n\n choices should be a list of (value, weight) tuples.\n\n eg. weighted_choice([('val1', 5), ('val2', 0.3), ('val3', 1)])", "id": "f13766:m0"} {"signature": "def setup_environ(self):", "body": "env = self.base_environ = {}env[''] = self.server_nameenv[''] = ''env[''] = str(self.server_port)env[''] = ''env[''] = ''env[''] = ''", "docstring": "https://www.python.org/dev/peps/pep-0333/#environ-variables", "id": "f13788:c0:m3"} {"signature": "def get_environ(self):", "body": "env = self.base_environ.copy()env[''] = self.request_methodif '' in self.path:path, query = self.path.split('', )else:path, query = self.path, ''env[''] = urllib.parse.unquote(path)env[''] = queryenv[''] = self.headers.get('', '')env[''] = self.headers.get('', '')env[''] = self.request_versionenv[''] = self.client_address[]env[''] = self.client_address[]env[''] = (, )env[''] = ''env[''] = io.BytesIO(self.raw_request)env[''] = sys.stderrenv[''] = Falseenv[''] = Trueenv[''] = Falsefor k, v in self.headers.items():k = k.replace('', '').upper()if k in env:continueenv['' + k] = vreturn env", "docstring": "https://www.python.org/dev/peps/pep-0333/#environ-variables", "id": "f13788:c0:m10"} {"signature": "def get_func(self, path):", "body": "for url_match, func_pair in self._urls_regex_map.items():m = url_match.match(path)if m is not None:return func_pair.func, func_pair.methods, m.groupdict()return None, None, None", "docstring": ":return: (func, methods)", "id": "f13789:c0:m2"} {"signature": "@classmethoddef _replace_type_to_regex(cls, match):", "body": "groupdict = match.groupdict()_type = groupdict.get('')type_regex = cls.TYPE_REGEX_MAP.get(_type, '')name = groupdict.get('')return r''.format(name=name, type_regex=type_regex)", "docstring": "/ -> r'(?P\\d+)", "id": "f13789:c1:m3"} {"signature": "def cookie_dump(key, value='', max_age=None, expires=None, path='',domain=None, secure=False, httponly=False):", "body": "cookie = SimpleCookie()cookie[key] = valuefor attr in ('', '', '', '','', ''):attr_key = attr.replace('', '')attr_value = locals()[attr]if attr_value:cookie[key][attr_key] = attr_valuereturn cookie", "docstring": ":rtype: ``Cookie.SimpleCookie``", "id": "f13791:m0"} {"signature": "def response_status_string(code):", "body": "mean = HTTP_STATUS_CODES.get(code, '').upper()return ''.format(code=code, mean=mean)", "docstring": "e.g. ``200 OK``", "id": "f13791:m1"} {"signature": "@propertydef method(self):", "body": "return self.environ['']", "docstring": "``GET``, ``POST`` etc.", "id": "f13791:c0:m1"} {"signature": "@propertydef cookies(self):", "body": "http_cookie = self.environ.get('', '')_cookies = {k: v.valuefor (k, v) in SimpleCookie(http_cookie).items()}return _cookies", "docstring": "Request cookies\n\n :rtype: dict", "id": "f13791:c0:m11"} {"signature": "@propertydef is_ajax(self):", "body": "requested_with = self.headers.get('', '').lower()return requested_with == ''", "docstring": "The ``X-Requested-With`` header equal to ``HttpRequest``", "id": "f13791:c0:m20"} {"signature": "def _exec(self, globals_dict=None):", "body": "globals_dict = globals_dict or {}globals_dict.setdefault('', {})exec(self._code, globals_dict)return globals_dict", "docstring": "exec compiled code", "id": "f13792:c0:m6"} {"signature": "def handle_extends(self, text):", "body": "match = self.re_extends.match(text)if match:extra_text = self.re_extends.sub('', text, count=)blocks = self.get_blocks(extra_text)path = os.path.join(self.base_dir, match.group(''))with open(path, encoding='') as fp:return self.replace_blocks_in_extends(fp.read(), blocks)else:return None", "docstring": "replace all blocks in extends with current blocks", "id": "f13792:c1:m9"} {"signature": "def flush_buffer(self):", "body": "self.code_builder.add_line('',self.result_var, ''.join(self.buffered))self.buffered = []", "docstring": "flush all buffered string into code", "id": "f13792:c1:m13"} {"signature": "def strip_token(self, text, start, end):", "body": "text = text.replace(start, '', )text = text.replace(end, '', )return text", "docstring": "{{ a }} -> a", "id": "f13792:c1:m14"} {"signature": "def cleanup_extra_whitespaces(self, text):", "body": "return re.sub(r'', r'', text)", "docstring": "cleanup extra whitespaces let numbers of whitespaces <=1", "id": "f13792:c1:m15"} {"signature": "def not_in(self, value):", "body": "return ''.format(self), value", "docstring": ":type value: tuple", "id": "f13797:c1:m13"} {"signature": "def in_(self, value):", "body": "return ''.format(self), value", "docstring": ":type value: tuple", "id": "f13797:c1:m14"} {"signature": "@classmethoddef __prepare__(cls, name, bases):", "body": "return collections.OrderedDict()", "docstring": "\u8ba9 attr_dict \u6709\u5e8f", "id": "f13797:c2:m1"} {"signature": "def url_resolve(self, path):", "body": "return self._router.get_func(path)", "docstring": "url -> view\n\n :return: (func, methods, func_kwargs)", "id": "f13798:c0:m4"} {"signature": "def __call__(self, environ, start_response):", "body": "self.start_response = start_responsepath = environ['']method = environ['']func, methods, func_kwargs = self.url_resolve(path)try:if func is None:self.notfound()if method not in methods:self.abort()request = Request(environ)result = self.handle_before_request_hooks(request, view_func=func)if isinstance(result, Response):response = resultelse:response = self.handle_view(request, func, func_kwargs)self.handle_after_request_hooks(request, response, view_func=func)except HTTPException as ex:response = ex.responsereturn self._start_response(response)", "docstring": "for wsgi server", "id": "f13798:c0:m5"} {"signature": "def __init__(self, metadata, **kwargs):", "body": "if '' not in metadata:metadata[''] = []self.metadata = metadataif '' in kwargs:self.uri_generator = kwargs.get('')else:self.uri_generator = DefaultUrnGenerator(self.metadata.get(''))if '' in kwargs:self.concept_scheme = kwargs.get('')else:self.concept_scheme = ConceptScheme(uri=DefaultConceptSchemeUrnGenerator().generate(id=self.metadata.get('')))", "docstring": "Create a new provider and register some metadata.\n\n\n :param uri_generator: An object that implements the\n :class:`skosprovider.uri.UriGenerator` interface.\n :param concept_scheme: A :class:`~skosprovider.skos.ConceptScheme`. If\n not present, a default :class:`~skosprovider.skos.ConceptScheme`\n will be created with a uri generated by the\n :class:`~skosprovider.uri.DefaultConceptSchemeUrnGenerator` in\n combination with the provider `id`.\n :param dict metadata: Metadata essential to this provider. Possible\n metadata:\n\n * `id`: A unique identifier for the vocabulary. Required.\n * `default_language`: Used to determine what language to use when \\\n returning labels if no language is specified. Will default \\\n to `en` if not specified.\n * `subject`: A list of subjects or tags that define what the \\\n provider is about or what the provider can handle. This \\\n information can then be used when querying a \\\n :class:`~skosprovider.registry.Registry` for providers.\n * `dataset`: A :class:`dict` detailing the dataset the \\\n conceptscheme and all concepts and collections are part of. \\\n Currently the contents of the dictionary are undefined \\\n except for a :term:`uri` attribute that must be present.", "id": "f13799:c0:m0"} {"signature": "def _get_language(self, **kwargs):", "body": "return kwargs.get('',self.metadata.get('', ''))", "docstring": "Determine what language to render labels in.\n\n Will first check if there's a language keyword specified in **kwargs.\n If not, will check the default language of the provider. If there's no\n default language, will fall back to 'en'.\n\n :rtype: str", "id": "f13799:c0:m1"} {"signature": "def _get_sort(self, **kwargs):", "body": "return kwargs.get('', None)", "docstring": "Determine on what attribute to sort.\n\n :rtype: str", "id": "f13799:c0:m2"} {"signature": "def _get_sort_order(self, **kwargs):", "body": "return kwargs.get('', '')", "docstring": "Determine the sort order.\n\n :rtype: str\n :returns: 'asc' or 'desc'", "id": "f13799:c0:m3"} {"signature": "def _sort(self, concepts, sort=None, language='', reverse=False):", "body": "sorted = copy.copy(concepts)if sort:sorted.sort(key=methodcaller('', sort, language), reverse=reverse)return sorted", "docstring": "Returns a sorted version of a list of concepts. Will leave the original\nlist unsorted.\n\n:param list concepts: A list of concepts and collections.\n:param string sort: What to sort on: `id`, `label` or `sortlabel`\n:param string language: Language to use when sorting on `label` or\n `sortlabel`.\n:param boolean reverse: Reverse the sort order?\n:rtype: list", "id": "f13799:c0:m4"} {"signature": "def get_vocabulary_id(self):", "body": "return self.metadata.get('')", "docstring": "Get an identifier for the vocabulary.\n\n :rtype: String or number.", "id": "f13799:c0:m5"} {"signature": "def get_metadata(self):", "body": "return self.metadata", "docstring": "Get some metadata on the provider or the vocab it represents.\n\n :rtype: Dict.", "id": "f13799:c0:m6"} {"signature": "@abc.abstractmethoddef get_by_id(self, id):", "body": "", "docstring": "Get all information on a concept or collection, based on id.\n\n Providers should assume that all id's passed are strings. If a provider\n knows that internally it uses numeric identifiers, it's up to the\n provider to do the typecasting. Generally, this should not be done by\n changing the id's themselves (eg. from int to str), but by doing the\n id comparisons in a type agnostic way.\n\n Since this method could be used to find both concepts and collections,\n it's assumed that there are no id collisions between concepts and\n collections.\n\n :rtype: :class:`skosprovider.skos.Concept` or\n :class:`skosprovider.skos.Collection` or `False` if the concept or\n collection is unknown to the provider.", "id": "f13799:c0:m7"} {"signature": "@abc.abstractmethoddef get_by_uri(self, uri):", "body": "", "docstring": "Get all information on a concept or collection, based on a\n :term:`URI`.\n\n :rtype: :class:`skosprovider.skos.Concept` or\n :class:`skosprovider.skos.Collection` or `False` if the concept or\n collection is unknown to the provider.", "id": "f13799:c0:m8"} {"signature": "@abc.abstractmethoddef get_all(self, **kwargs):", "body": "", "docstring": "Returns all concepts and collections in this provider.\n\n :param string language: Optional. If present, it should be a\n :term:`language-tag`. This language-tag is passed on to the\n underlying providers and used when selecting the label to display\n for each concept.\n :param string sort: Optional. If present, it should either be `id`,\n `label` or `sortlabel`. The `sortlabel` option means the providers should\n take into account any `sortLabel` if present, if not it will\n fallback to a regular label to sort on.\n :param string sort_order: Optional. What order to sort in: `asc` or\n `desc`. Defaults to `asc`\n\n :returns: A :class:`lst` of concepts and collections. Each of these is a dict\n with the following keys:\n\n * id: id within the conceptscheme\n * uri: :term:`uri` of the concept or collection\n * type: concept or collection\n * label: A label to represent the concept or collection. It is \\\n determined by looking at the `language` parameter, the default \\\n language of the provider and finally falls back to `en`.", "id": "f13799:c0:m9"} {"signature": "@abc.abstractmethoddef get_top_concepts(self, **kwargs):", "body": "", "docstring": "Returns all top-level concepts in this provider.\n\nTop-level concepts are concepts that have no broader concepts\nthemselves. They might have narrower concepts, but this is not\nmandatory.\n\n:param string language: Optional. If present, it should be a\n :term:`language-tag`. This language-tag is passed on to the\n underlying providers and used when selecting the label to display\n for each concept.\n:param string sort: Optional. If present, it should either be `id`,\n `label` or `sortlabel`. The `sortlabel` option means the providers should\n take into account any `sortLabel` if present, if not it will\n fallback to a regular label to sort on.\n:param string sort_order: Optional. What order to sort in: `asc` or\n `desc`. Defaults to `asc`\n\n:returns: A :class:`lst` of concepts, NOT collections. Each of these\n is a dict with the following keys:\n\n * id: id within the conceptscheme\n * uri: :term:`uri` of the concept or collection\n * type: concept or collection\n * label: A label to represent the concept or collection. It is \\\n determined by looking at the `language` parameter, the default \\\n language of the provider and finally falls back to `en`.", "id": "f13799:c0:m10"} {"signature": "@abc.abstractmethoddef find(self, query, **kwargs):", "body": "", "docstring": "Find concepts that match a certain query.\n\n Currently query is expected to be a dict, so that complex queries can\n be passed. You can use this dict to search for concepts or collections\n with a certain label, with a certain type and for concepts that belong\n to a certain collection.\n\n .. code-block:: python\n\n # Find anything that has a label of church.\n provider.find({'label': 'church'})\n\n # Find all concepts that are a part of collection 5.\n provider.find({'type': 'concept', 'collection': {'id': 5})\n\n # Find all concepts, collections or children of these\n # that belong to collection 5.\n provider.find({'collection': {'id': 5, 'depth': 'all'})\n\n # Find anything that has a label of church.\n # Preferentially display a label in Dutch.\n provider.find({'label': 'church'}, language='nl')\n\n :param query: A dict that can be used to express a query. The following\n keys are permitted:\n\n * `label`: Search for something with this label value. An empty \\\n label is equal to searching for all concepts.\n * `type`: Limit the search to certain SKOS elements. If not \\\n present or `None`, `all` is assumed:\n\n * `concept`: Only return :class:`skosprovider.skos.Concept` \\\n instances.\n * `collection`: Only return \\\n :class:`skosprovider.skos.Collection` instances.\n * `all`: Return both :class:`skosprovider.skos.Concept` and \\\n :class:`skosprovider.skos.Collection` instances.\n * `collection`: Search only for concepts belonging to a certain \\\n collection. This argument should be a dict with two keys:\n\n * `id`: The id of a collection. Required.\n * `depth`: Can be `members` or `all`. Optional. If not \\\n present, `members` is assumed, meaning only concepts or \\\n collections that are a direct member of the collection \\\n should be considered. When set to `all`, this method \\\n should return concepts and collections that are a member \\\n of the collection or are a narrower concept of a member \\\n of the collection.\n\n :param string language: Optional. If present, it should be a\n :term:`language-tag`. This language-tag is passed on to the\n underlying providers and used when selecting the label to display\n for each concept.\n :param string sort: Optional. If present, it should either be `id`,\n `label` or `sortlabel`. The `sortlabel` option means the providers should\n take into account any `sortLabel` if present, if not it will\n fallback to a regular label to sort on.\n :param string sort_order: Optional. What order to sort in: `asc` or\n `desc`. Defaults to `asc`\n\n :returns: A :class:`lst` of concepts and collections. Each of these\n is a dict with the following keys:\n\n * id: id within the conceptscheme\n * uri: :term:`uri` of the concept or collection\n * type: concept or collection\n * label: A label to represent the concept or collection. It is \\\n determined by looking at the `language` parameter, the default \\\n language of the provider and finally falls back to `en`.", "id": "f13799:c0:m11"} {"signature": "@abc.abstractmethoddef expand(self, id):", "body": "", "docstring": "Expand a concept or collection to all it's narrower\n concepts.\n\n This method should recurse and also return narrower concepts\n of narrower concepts.\n\n If the id passed belongs to a :class:`skosprovider.skos.Concept`,\n the id of the concept itself should be include in the return value.\n\n If the id passed belongs to a :class:`skosprovider.skos.Collection`,\n the id of the collection itself must not be present in the return value\n In this case the return value includes all the member concepts and\n their narrower concepts.\n\n :param id: A concept or collection id.\n :rtype: A list of id's or `False` if the concept or collection doesn't\n exist.", "id": "f13799:c0:m12"} {"signature": "def get_top_display(self, **kwargs):", "body": "", "docstring": "Returns all concepts or collections that form the top-level of a\ndisplay hierarchy.\n\nAs opposed to the :meth:`get_top_concepts`, this method can possibly\nreturn both concepts and collections.\n\n:param string language: Optional. If present, it should be a\n :term:`language-tag`. This language-tag is passed on to the\n underlying providers and used when selecting the label to display\n for each concept.\n:param string sort: Optional. If present, it should either be `id`,\n `label` or `sortlabel`. The `sortlabel` option means the providers should\n take into account any `sortLabel` if present, if not it will\n fallback to a regular label to sort on.\n:param string sort_order: Optional. What order to sort in: `asc` or\n `desc`. Defaults to `asc`\n\n:returns: A :class:`lst` of concepts and collections. Each of these\n is a dict with the following keys:\n\n * id: id within the conceptscheme\n * uri: :term:`uri` of the concept or collection\n * type: concept or collection\n * label: A label to represent the concept or collection. It is\\\n determined by looking at the `language` parameter, the default\\\n language of the provider and finally falls back to `en`.", "id": "f13799:c0:m13"} {"signature": "def get_children_display(self, id, **kwargs):", "body": "", "docstring": "Return a list of concepts or collections that should be displayed\nunder this concept or collection.\n\n:param string language: Optional. If present, it should be a\n :term:`language-tag`. This language-tag is passed on to the\n underlying providers and used when selecting the label to display\n for each concept.\n:param string sort: Optional. If present, it should either be `id`,\n `label` or `sortlabel`. The `sortlabel` option means the providers should\n take into account any `sortLabel` if present, if not it will\n fallback to a regular label to sort on.\n:param string sort_order: Optional. What order to sort in: `asc` or\n `desc`. Defaults to `asc`\n\n:param str id: A concept or collection id.\n:returns: A :class:`lst` of concepts and collections. Each of these\n is a dict with the following keys:\n\n * id: id within the conceptscheme\n * uri: :term:`uri` of the concept or collection\n * type: concept or collection\n * label: A label to represent the concept or collection. It is \\\n determined by looking at the `language` parameter, the default \\\n language of the provider and finally falls back to `en`.", "id": "f13799:c0:m14"} {"signature": "def __init__(self, metadata, list, **kwargs):", "body": "super(MemoryProvider, self).__init__(metadata, **kwargs)self.list = listif '' in kwargs:self.case_insensitive = kwargs['']", "docstring": ":param dict metadata: A dictionary with keywords like language.\n:param list list: A list of :class:`skosprovider.skos.Concept` and\n :class:`skosprovider.skos.Collection` instances.\n:param Boolean case_insensitive: Should searching for labels be done\n case-insensitive?", "id": "f13799:c1:m0"} {"signature": "def _normalise_query(self, query):", "body": "if '' in query and query[''] not in ['', '']:del query['']return query", "docstring": ":param query: A dict that can be used to express a query.\n:rtype: dict", "id": "f13799:c1:m4"} {"signature": "def _include_in_find(self, c, query):", "body": "include = Trueif include and '' in query:include = query[''] == c.typeif include and '' in query:def finder(l, query):if not self.case_insensitive:return l.label.find(query[''])else:return l.label.upper().find(query[''].upper())include = any([finder(l, query) >= for l in c.labels])if include and '' in query:coll = self.get_by_id(query[''][''])if not coll or not isinstance(coll, Collection):raise ValueError('')if '' in query[''] and query[''][''] == '':members = self.expand(coll.id)else:members = coll.membersinclude = any([True for id in members if str(id) == str(c.id)]) return include", "docstring": ":param c: A :class:`skosprovider.skos.Concept` or\n :class:`skosprovider.skos.Collection`.\n:param query: A dict that can be used to express a query.\n:rtype: boolean", "id": "f13799:c1:m5"} {"signature": "def _get_find_dict(self, c, **kwargs):", "body": "language = self._get_language(**kwargs)return {'': c.id,'': c.uri,'': c.type,'': None if c.label() is None else c.label(language).label}", "docstring": "Return a dict that can be used in the return list of the :meth:`find`\nmethod.\n\n:param c: A :class:`skosprovider.skos.Concept` or\n :class:`skosprovider.skos.Collection`.\n:rtype: dict", "id": "f13799:c1:m6"} {"signature": "def __init__(self, metadata, reader, **kwargs):", "body": "super(SimpleCsvProvider, self).__init__(metadata, [], **kwargs)self.list = [self._from_row(row) for row in reader]", "docstring": ":param metadata: A metadata dictionary.\n:param reader: A csv reader.", "id": "f13799:c3:m0"} {"signature": "def label(labels=[], language='', sortLabel=False):", "body": "if not labels:return Noneif not language:language = ''labels = [dict_to_label(l) for l in labels]l = Falseif sortLabel:l = find_best_label_for_type(labels, language, '')if not l:l = find_best_label_for_type(labels, language, '')if not l:l = find_best_label_for_type(labels, language, '')if l:return lelse:return label(labels, '', sortLabel) if language != '' else None", "docstring": "Provide a label for a list of labels.\n\nThe items in the list of labels are assumed to be either instances of\n:class:`Label`, or dicts with at least the key `label` in them. These will\nbe passed to the :func:`dict_to_label` function.\n\nThis method tries to find a label by looking if there's\na pref label for the specified language. If there's no pref label,\nit looks for an alt label. It disregards hidden labels.\n\nWhile matching languages, preference will be given to exact matches. But,\nif no exact match is present, an inexact match will be attempted. This might\nbe because a label in language `nl-BE` is being requested, but only `nl` or\neven `nl-NL` is present. Similarly, when requesting `nl`, a label with\nlanguage `nl-NL` or even `nl-Latn-NL` will also be considered,\nproviding no label is present that has an exact match with the\nrequested language.\n\nIf language 'any' was specified, all labels will be considered,\nregardless of language.\n\nTo find a label without a specified language, pass `None` as language.\n\nIf a language or None was specified, and no label could be found, this\nmethod will automatically try to find a label in some other language.\n\nFinally, if no label could be found, None is returned.\n\n:param string language: The preferred language to receive the label in. This\n should be a valid IANA language tag.\n:param boolean sortLabel: Should sortLabels be considered or not? If True,\n sortLabels will be preferred over prefLabels. Bear in mind that these\n are still language dependent. So, it's possible to have a different\n sortLabel per language.\n:rtype: A :class:`Label` or `None` if no label could be found.", "id": "f13800:m0"} {"signature": "def find_best_label_for_type(labels, language, labeltype):", "body": "typelabels = [l for l in labels if l.type == labeltype]if not typelabels:return Falseif language == '':return typelabels[]exact = filter_labels_by_language(typelabels, language)if exact:return exact[]inexact = filter_labels_by_language(typelabels, language, True)if inexact:return inexact[]return False", "docstring": "Find the best label for a certain labeltype.\n\n:param list labels: A list of :class:`Label`.\n:param str language: An IANA language string, eg. `nl` or `nl-BE`.\n:param str labeltype: Type of label to look for, eg. `prefLabel`.", "id": "f13800:m1"} {"signature": "def filter_labels_by_language(labels, language, broader=False):", "body": "if language == '':return labelsif broader:language = tags.tag(language).language.formatreturn [l for l in labels if tags.tag(l.language).language.format == language]else:language = tags.tag(language).formatreturn [l for l in labels if tags.tag(l.language).format == language]", "docstring": "Filter a list of labels, leaving only labels of a certain language.\n\n:param list labels: A list of :class:`Label`.\n:param str language: An IANA language string, eg. `nl` or `nl-BE`.\n:param boolean broader: When true, will also match `nl-BE` when filtering\n on `nl`. When false, only exact matches are considered.", "id": "f13800:m2"} {"signature": "def dict_to_label(dict):", "body": "try:return Label(dict[''],dict.get('', ''),dict.get('', ''))except (KeyError, AttributeError, TypeError):return dict", "docstring": "Transform a dict with keys `label`, `type` and `language` into a\n:class:`Label`.\n\nOnly the `label` key is mandatory. If `type` is not present, it will\ndefault to `prefLabel`. If `language` is not present, it will default\nto `und`.\n\nIf the argument passed is not a dict, this method just\nreturns the argument.", "id": "f13800:m3"} {"signature": "def dict_to_note(dict):", "body": "if isinstance(dict, Note):return dictreturn Note(dict[''],dict.get('', ''),dict.get('', ''),dict.get(''))", "docstring": "Transform a dict with keys `note`, `type` and `language` into a\n:class:`Note`.\n\nOnly the `note` key is mandatory. If `type` is not present, it will\ndefault to `note`. If `language` is not present, it will default to `und`.\nIf `markup` is not present it will default to `None`.\n\nIf the argument passed is already a :class:`Note`, this method just returns\nthe argument.", "id": "f13800:m4"} {"signature": "def dict_to_source(dict):", "body": "if isinstance(dict, Source):return dictreturn Source(dict[''],dict.get(''))", "docstring": "Transform a dict with key 'citation' into a :class:`Source`.\n\nIf the argument passed is already a :class:`Source`, this method just\nreturns the argument.", "id": "f13800:m5"} {"signature": "@staticmethoddef is_valid_type(type):", "body": "return type in Label.valid_types", "docstring": "Check if the argument is a valid SKOS label type.\n\n:param string type: The type to be checked.", "id": "f13800:c0:m3"} {"signature": "@staticmethoddef is_valid_type(type):", "body": "return type in Note.valid_types", "docstring": "Check if the argument is a valid SKOS note type.\n\n:param string type: The type to be checked.", "id": "f13800:c1:m3"} {"signature": "@staticmethoddef is_valid_markup(markup):", "body": "return markup in valid_markup", "docstring": "Check the argument is a valid type of markup.\n\n:param string markup: The type to be checked.", "id": "f13800:c1:m4"} {"signature": "@staticmethoddef is_valid_markup(markup):", "body": "return markup in valid_markup", "docstring": "Check the argument is a valid type of markup.\n\n:param string markup: The type to be checked.", "id": "f13800:c2:m1"} {"signature": "def label(self, language=''):", "body": "return label(self.labels, language)", "docstring": "Provide a single label for this conceptscheme.\n\nThis uses the :func:`label` function to determine which label to\nreturn.\n\n:param string language: The preferred language to receive the label in.\n This should be a valid IANA language tag.\n:rtype: :class:`skosprovider.skos.Label` or False if no labels were found.", "id": "f13800:c3:m1"} {"signature": "def _sortkey(self, key='', language=''):", "body": "if key == '':return self.urielse:l = label(self.labels, language, key == '')return l.label.lower() if l else ''", "docstring": "Provide a single sortkey for this conceptscheme.\n\n:param string key: Either `uri`, `label` or `sortlabel`.\n:param string language: The preferred language to receive the label in\n if key is `label` or `sortlabel`. This should be a valid IANA language tag.\n:rtype: :class:`str`", "id": "f13800:c3:m2"} {"signature": "def label(self, language=''):", "body": "return label(self.labels, language)", "docstring": "Provide a single label for this concept.\n\nThis uses the :func:`label` function to determine which label to return.\n\n:param string language: The preferred language to receive the label in.\n This should be a valid IANA language tag.\n:rtype: :class:`skosprovider.skos.Label` or False if no labels were found.", "id": "f13800:c4:m1"} {"signature": "def _sortkey(self, key='', language=''):", "body": "if key == '':return str(self.id)elif key == '':return self.uri if self.uri else ''else:l = label(self.labels, language, key == '')return l.label.lower() if l else ''", "docstring": "Provide a single sortkey for this collection.\n\n:param string key: Either `id`, `uri`, `label` or `sortlabel`.\n:param string language: The preferred language to receive the label in\n if key is `label` or `sortlabel`. This should be a valid IANA language tag.\n:rtype: :class:`str`", "id": "f13800:c4:m2"} {"signature": "def label(self, language=''):", "body": "return label(self.labels, language, False)", "docstring": "Provide a single label for this collection.\n\nThis uses the :func:`label` function to determine which label to return.\n\n:param string language: The preferred language to receive the label in.\n This should be a valid IANA language tag.\n:rtype: :class:`skosprovider.skos.Label` or False if no labels were found.", "id": "f13800:c5:m1"} {"signature": "def _sortkey(self, key='', language=''):", "body": "if key == '':return str(self.id)elif key == '':return self.uri if self.uri else ''else:l = label(self.labels, language, key == '')return l.label.lower() if l else ''", "docstring": "Provide a single sortkey for this collection.\n\n:param string key: Either `id`, `uri`, `label` or `sortlabel`.\n:param string language: The preferred language to receive the label in\n if key is `label` or `sortlabel`. This should be a valid IANA language tag.\n:rtype: :class:`str`", "id": "f13800:c5:m2"} {"signature": "def is_uri(uri):", "body": "if uri is None:return Falsereturn rfc3987.match(uri, rule='')", "docstring": "Check if a string is a valid URI according to rfc3987\n\n:param string uri:\n:rtype: boolean", "id": "f13801:m0"} {"signature": "@abc.abstractmethoddef generate(self, **kwargs):", "body": "", "docstring": "Generate a :term:`URI` based on parameters passed.", "id": "f13801:c0:m0"} {"signature": "def generate(self, **kwargs):", "body": "return self.pattern % kwargs['']", "docstring": "Generate a :term:`URI` based on parameters passed.\n\n:param id: The id of the concept or collection.\n:rtype: string", "id": "f13801:c1:m1"} {"signature": "def generate(self, **kwargs):", "body": "return (self.pattern % (self.vocabulary_id, kwargs[''])).lower()", "docstring": "Generate a :term:`URI` based on parameters passed.\n\n:param id: The id of the concept or collection.\n:rtype: string", "id": "f13801:c2:m1"} {"signature": "def generate(self, **kwargs):", "body": "return (self.pattern % (kwargs[''])).lower()", "docstring": "Generate a :term:`URI` based on parameters passed.\n\n:param id: The id of the conceptscheme.\n:rtype: string", "id": "f13801:c3:m0"} {"signature": "def generate(self, **kwargs):", "body": "if kwargs[''] not in ['', '']:raise ValueError('' % kwargs[''])return (self.pattern % (self.vocabulary_id, kwargs[''], kwargs[''])).lower()", "docstring": "Generate a :term:`URI` based on parameters passed.\n\n:param id: The id of the concept or collection.\n:param type: What we're generating a :term:`URI` for: `concept`\n or `collection`.\n:rtype: string", "id": "f13801:c4:m1"} {"signature": "def register_provider(self, provider):", "body": "if provider.get_vocabulary_id() in self.providers:raise RegistryException('')self.providers[provider.get_vocabulary_id()] = providerif provider.concept_scheme.uri in self.concept_scheme_uri_map:raise RegistryException('' % provider.concept_scheme.uri)self.concept_scheme_uri_map[provider.concept_scheme.uri] = provider.get_vocabulary_id()", "docstring": "Register a :class:`skosprovider.providers.VocabularyProvider`.\n\n:param skosprovider.providers.VocabularyProvider provider: The provider\n to register.\n:raises RegistryException: A provider with this id or uri has already \n been registered.", "id": "f13802:c1:m1"} {"signature": "def remove_provider(self, id):", "body": "if id in self.providers:p = self.providers.get(id, False)del self.providers[id]del self.concept_scheme_uri_map[p.concept_scheme.uri]return pelif id in self.concept_scheme_uri_map:id = self.concept_scheme_uri_map[id]return self.remove_provider(id)else:return False", "docstring": "Remove the provider with the given id or :term:`URI`.\n\n:param str id: The identifier for the provider.\n:returns: A :class:`skosprovider.providers.VocabularyProvider` or\n `False` if the id is unknown.", "id": "f13802:c1:m2"} {"signature": "def get_provider(self, id):", "body": "if id in self.providers:return self.providers.get(id, False)elif is_uri(id) and id in self.concept_scheme_uri_map:return self.providers.get(self.concept_scheme_uri_map[id], False)return False", "docstring": "Get a provider by id or :term:`uri`.\n\n:param str id: The identifier for the provider. This can either be the\n id with which it was registered or the :term:`uri` of the conceptscheme\n that the provider services.\n:returns: A :class:`skosprovider.providers.VocabularyProvider`\n or `False` if the id or uri is unknown.", "id": "f13802:c1:m3"} {"signature": "def get_providers(self, **kwargs):", "body": "if '' in kwargs:ids = [self.concept_scheme_uri_map.get(id, id) for id in kwargs['']]providers = [self.providers[k] for k in self.providers.keys() if k in ids]else:providers = list(self.providers.values())if '' in kwargs:providers = [p for p in providers if kwargs[''] in p.metadata['']]return providers", "docstring": "Get all providers registered.\n\n If keyword `ids` is present, get only the providers with these ids.\n\n If keys `subject` is present, get only the providers that have this subject.\n\n .. code-block:: python\n\n # Get all providers with subject 'biology'\n registry.get_providers(subject='biology')\n\n # Get all providers with id 1 or 2\n registry.get_providers(ids=[1,2])\n\n # Get all providers with id 1 or 2 and subject 'biology'\n registry.get_providers(ids=[1,2], subject='biology']\n\n :param list ids: Only return providers with one of the Ids or :term:`URIs `.\n :param str subject: Only return providers with this subject.\n :returns: A list of :class:`providers `", "id": "f13802:c1:m4"} {"signature": "def find(self, query, **kwargs):", "body": "if '' not in kwargs:providers = self.get_providers()else:pargs = kwargs['']if isinstance(pargs, list):providers = self.get_providers(ids=pargs)else:providers = self.get_providers(**pargs)kwarguments = {}if '' in kwargs:kwarguments[''] = kwargs['']return [{'': p.get_vocabulary_id(), '': p.find(query, **kwarguments)}for p in providers]", "docstring": "Launch a query across all or a selection of providers.\n\n .. code-block:: python\n\n # Find anything that has a label of church in any provider.\n registry.find({'label': 'church'})\n\n # Find anything that has a label of church with the BUILDINGS provider.\n # Attention, this syntax was deprecated in version 0.3.0\n registry.find({'label': 'church'}, providers=['BUILDINGS'])\n\n # Find anything that has a label of church with the BUILDINGS provider.\n registry.find({'label': 'church'}, providers={'ids': ['BUILDINGS']})\n\n # Find anything that has a label of church with a provider\n # marked with the subject 'architecture'.\n registry.find({'label': 'church'}, providers={'subject': 'architecture'})\n\n # Find anything that has a label of church in any provider.\n # If possible, display the results with a Dutch label.\n registry.find({'label': 'church'}, language='nl')\n\n :param dict query: The query parameters that will be passed on to each\n :meth:`~skosprovider.providers.VocabularyProvider.find` method of\n the selected.\n :class:`providers `.\n :param dict providers: Optional. If present, it should be a dictionary.\n This dictionary can contain any of the keyword arguments available\n to the :meth:`get_providers` method. The query will then only\n be passed to the providers confirming to these arguments.\n :param string language: Optional. If present, it should be a\n :term:`language-tag`. This language-tag is passed on to the\n underlying providers and used when selecting the label to display\n for each concept.\n :returns: a list of :class:`dict`.\n Each dict has two keys: id and concepts.", "id": "f13802:c1:m5"} {"signature": "def get_all(self, **kwargs):", "body": "kwarguments = {}if '' in kwargs:kwarguments[''] = kwargs['']return [{'': p.get_vocabulary_id(), '': p.get_all(**kwarguments)}for p in self.providers.values()]", "docstring": "Get all concepts from all providers.\n\n .. code-block:: python\n\n # get all concepts in all providers.\n registry.get_all()\n\n # get all concepts in all providers.\n # If possible, display the results with a Dutch label.\n registry.get_all(language='nl')\n\n :param string language: Optional. If present, it should be a\n :term:`language-tag`. This language-tag is passed on to the\n underlying providers and used when selecting the label to display\n for each concept.\n\n :returns: a list of :class:`dict`.\n Each dict has two keys: id and concepts.", "id": "f13802:c1:m6"} {"signature": "def get_by_uri(self, uri):", "body": "if not is_uri(uri):raise ValueError('' % uri)csuris = [csuri for csuri in self.concept_scheme_uri_map.keys() if uri.startswith(csuri)]for csuri in csuris:c = self.get_provider(csuri).get_by_uri(uri)if c:return cfor p in self.providers.values():c = p.get_by_uri(uri)if c:return creturn False", "docstring": "Get a concept or collection by its uri.\n\n Returns a single concept or collection if one exists with this uri.\n Returns False otherwise.\n\n :param string uri: The uri to find a concept or collection for.\n :raises ValueError: The uri is invalid.\n :rtype: :class:`skosprovider.skos.Concept` or\n :class:`skosprovider.skos.Collection`", "id": "f13802:c1:m7"} {"signature": "def dict_dumper(provider):", "body": "ret = []for stuff in provider.get_all():c = provider.get_by_id(stuff[''])labels = [l.__dict__ for l in c.labels]notes = [n.__dict__ for n in c.notes]sources = [s.__dict__ for s in c.sources]if isinstance(c, Concept):ret.append({'': c.id,'': c.uri,'': c.type,'': labels,'': notes,'': sources,'': c.narrower,'': c.broader,'': c.related,'': c.member_of,'': c.subordinate_arrays,'': c.matches})elif isinstance(c, Collection):ret.append({'': c.id,'': c.uri,'': c.type,'': labels,'': notes,'': sources,'': c.members,'': c.member_of,'': c.superordinates})return ret", "docstring": "Dump a provider to a format that can be passed to a\n:class:`skosprovider.providers.DictionaryProvider`.\n\n:param skosprovider.providers.VocabularyProvider provider: The provider\n that wil be turned into a `dict`.\n:rtype: A list of dicts.\n\n.. versionadded:: 0.2.0", "id": "f13803:m0"} {"signature": "def __init__(self, message):", "body": "self.message = message", "docstring": ":param message: More information about the exception.", "id": "f13804:c0:m0"} {"signature": "def includeme(config):", "body": "settings = config.registry.settingsif asbool(settings.get('', True)):LOGGER.debug('')config.include('')config.include('')config.include('')config.include('')config.add_xmlrpc_endpoint('', '')config.add_xmlrpc_method(RPCInterface, attr='', endpoint='', method='')config.add_xmlrpc_method(RPCInterface, attr='', endpoint='', method='')config.add_xmlrpc_method(RPCInterface, attr='', endpoint='', method='')config.add_xmlrpc_method(RPCInterface, attr='', endpoint='', method='')config.add_xmlrpc_method(RPCInterface, attr='', endpoint='', method='')config.add_xmlrpc_method(RPCInterface, attr='', endpoint='', method='')config.add_xmlrpc_method(RPCInterface, attr='', endpoint='', method='')config.add_xmlrpc_method(RPCInterface, attr='', endpoint='', method='')config.add_xmlrpc_method(RPCInterface, attr='', endpoint='', method='')", "docstring": "The callable makes it possible to include rpcinterface\n in a Pyramid application.\n\n Calling ``config.include(twitcher.rpcinterface)`` will result in this\n callable being called.\n\n Arguments:\n\n * ``config``: the ``pyramid.config.Configurator`` object.", "id": "f13833:m0"} {"signature": "def generate_token(self, valid_in_hours=, environ=None):", "body": "return self.tokenmgr.generate_token(valid_in_hours, environ)", "docstring": "Implementation of :meth:`twitcher.api.ITokenManager.generate_token`.", "id": "f13833:c0:m1"} {"signature": "def revoke_token(self, token):", "body": "return self.tokenmgr.revoke_token(token)", "docstring": "Implementation of :meth:`twitcher.api.ITokenManager.revoke_token`.", "id": "f13833:c0:m2"} {"signature": "def revoke_all_tokens(self):", "body": "return self.tokenmgr.revoke_all_tokens()", "docstring": "Implementation of :meth:`twitcher.api.ITokenManager.revoke_all_tokens`.", "id": "f13833:c0:m3"} {"signature": "def register_service(self, url, data=None, overwrite=True):", "body": "return self.srvreg.register_service(url, data, overwrite)", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.register_service`.", "id": "f13833:c0:m4"} {"signature": "def unregister_service(self, name):", "body": "return self.srvreg.unregister_service(name)", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.unregister_service`.", "id": "f13833:c0:m5"} {"signature": "def get_service_by_name(self, name):", "body": "return self.srvreg.get_service_by_name(name)", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.get_service_by_name`.", "id": "f13833:c0:m6"} {"signature": "def get_service_by_url(self, url):", "body": "return self.srvreg.get_service_by_url(url)", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.get_service_by_url`.", "id": "f13833:c0:m7"} {"signature": "def list_services(self):", "body": "return self.srvreg.list_services()", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.list_services`.", "id": "f13833:c0:m8"} {"signature": "def clear_services(self):", "body": "return self.srvreg.clear_services()", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.clear_services`.", "id": "f13833:c0:m9"} {"signature": "def owsproxy(request):", "body": "try:service_name = request.matchdict.get('')extra_path = request.matchdict.get('')store = servicestore_factory(request.registry)service = store.fetch_by_name(service_name)except Exception as err:return OWSAccessFailed(\"\".format(service_name, err.message))else:return _send_request(request, service, extra_path, request_params=request.query_string)", "docstring": "TODO: use ows exceptions", "id": "f13836:m1"} {"signature": "def owsproxy_delegate(request):", "body": "twitcher_url = request.registry.settings.get('')protected_path = request.registry.settings.get('', '')url = twitcher_url + protected_path + ''if request.matchdict.get(''):url += '' + request.matchdict.get('')if request.matchdict.get(''):url += '' + request.matchdict.get('')url += '' + urlparse.urlencode(request.params)LOGGER.debug(\"\", url)resp = requests.request(method=request.method.upper(), url=url, data=request.body,headers=request.headers, verify=False)return Response(resp.content, status=resp.status_code, headers=resp.headers)", "docstring": "Delegates owsproxy request to external twitcher service.", "id": "f13836:m2"} {"signature": "def create_access_token(self, valid_in_hours=, data=None):", "body": "data = data or {}token = AccessToken(token=self.generate(),expires_at=expires_at(hours=valid_in_hours),data=data)return token", "docstring": "Creates an access token.\n\nTODO: check valid in hours\nTODO: maybe specify how often a token can be used", "id": "f13838:c0:m0"} {"signature": "def generate(self):", "body": "return uuid.uuid4().get_hex()", "docstring": ":return: A new token", "id": "f13838:c1:m0"} {"signature": "def _retrieve_certificate(self, access_token, timeout=):", "body": "logger.debug(\"\")key_pair = crypto.PKey()key_pair.generate_key(crypto.TYPE_RSA, )private_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, key_pair).decode(\"\")cert_request = crypto.X509Req()cert_request.set_pubkey(key_pair)cert_request.sign(key_pair, '')der_cert_req = crypto.dump_certificate_request(crypto.FILETYPE_ASN1, cert_request)encoded_cert_req = base64.b64encode(der_cert_req)token = {'': access_token, '': ''}client = OAuth2Session(token=token)response = client.post(self.certificate_url,data={'': encoded_cert_req},verify=False,timeout=timeout,)if response.ok:content = \"\".format(response.text, private_key)with open(self.esgf_credentials, '') as fh:fh.write(content)logger.debug('')else:msg = \"\".format(response.status_code, response.reason)raise Exception(msg)return True", "docstring": "Generates a new private key and certificate request, submits the request to be\nsigned by the SLCS CA and returns the certificate.", "id": "f13839:c0:m3"} {"signature": "def main(global_config, **settings):", "body": "from pyramid.config import Configuratorconfig = Configurator(settings=settings)config.include('')config.include('')config.include('')config.include('')config.include('')config.scan()return config.make_wsgi_app()", "docstring": "This function returns a Pyramid WSGI application.", "id": "f13840:m0"} {"signature": "def ows_security_tween_factory(handler, registry):", "body": "security = owssecurity_factory(registry)def ows_security_tween(request):try:security.check_request(request)return handler(request)except OWSException as err:logger.exception(\"\")return errexcept Exception as err:logger.exception(\"\")return OWSNoApplicableCode(\"\".format(err))return ows_security_tween", "docstring": "A tween factory which produces a tween which raises an exception\n if access to OWS service is not allowed.", "id": "f13843:m1"} {"signature": "def now_secs():", "body": "return int(time.time())", "docstring": "Return the current time in seconds since the Epoch.", "id": "f13844:m3"} {"signature": "def localize_datetime(dt, tz_name=''):", "body": "tz_aware_dt = dtif dt.tzinfo is None:utc = pytz.timezone('')aware = utc.localize(dt)timezone = pytz.timezone(tz_name)tz_aware_dt = aware.astimezone(timezone)else:logger.warn('')return tz_aware_dt", "docstring": "Provide a timzeone-aware object for a given datetime and timezone name", "id": "f13844:m5"} {"signature": "def baseurl(url):", "body": "parsed_url = urlparse.urlparse(url)if not parsed_url.netloc or parsed_url.scheme not in (\"\", \"\"):raise ValueError('')service_url = \"\" % (parsed_url.scheme, parsed_url.netloc, parsed_url.path.strip())return service_url", "docstring": "return baseurl of given url", "id": "f13844:m6"} {"signature": "@propertydef url(self):", "body": "return self['']", "docstring": "Service URL.", "id": "f13845:c0:m1"} {"signature": "@propertydef name(self):", "body": "return self.get('', '')", "docstring": "Service name.", "id": "f13845:c0:m2"} {"signature": "@propertydef type(self):", "body": "return self.get('', '')", "docstring": "Service type.", "id": "f13845:c0:m3"} {"signature": "@propertydef purl(self):", "body": "return self.get('', '')", "docstring": "Service optional public URL (purl).", "id": "f13845:c0:m4"} {"signature": "def has_purl(self):", "body": "return is_valid_url(self.purl)", "docstring": "Return true if we have a valid public URL (purl).", "id": "f13845:c0:m5"} {"signature": "@propertydef public(self):", "body": "return self.get('', False)", "docstring": "Flag if service has public access.", "id": "f13845:c0:m6"} {"signature": "@propertydef auth(self):", "body": "return self.get('', '')", "docstring": "Authentication method: public, token, cert.", "id": "f13845:c0:m7"} {"signature": "@propertydef verify(self):", "body": "value = self.get('', '')if isinstance(value, bool):verify = valueelif value.lower() == '':verify = Trueelif value.lower() == '':verify = Falseelse:verify = valuereturn verify", "docstring": "Verify ssl service certificate.", "id": "f13845:c0:m8"} {"signature": "@propertydef token(self):", "body": "return self['']", "docstring": "Access token string.", "id": "f13845:c1:m1"} {"signature": "@propertydef expires_in(self):", "body": "time_left = self.expires_at - now_secs()if time_left > :return time_leftreturn ", "docstring": "Returns the time until the token expires.\n:return: The remaining time until expiration in seconds or 0 if the\n token has expired.", "id": "f13845:c1:m3"} {"signature": "def is_expired(self):", "body": "if self.expires_at is None:return Trueif self.expires_in > :return Falsereturn True", "docstring": "Determines if the token has expired.\n:return: `True` if the token has expired. Otherwise `False`.", "id": "f13845:c1:m4"} {"signature": "def save_token(self, access_token):", "body": "raise NotImplementedError", "docstring": "Stores an access token with additional data.", "id": "f13846:c0:m0"} {"signature": "def delete_token(self, token):", "body": "raise NotImplementedError", "docstring": "Deletes an access token from the store using its token string to identify it.\nThis invalidates both the access token and the token.\n\n:param token: A string containing the token.\n:return: None.", "id": "f13846:c0:m1"} {"signature": "def fetch_by_token(self, token):", "body": "raise NotImplementedError", "docstring": "Fetches an access token from the store using its token string to\nidentify it.\n\n:param token: A string containing the token.\n:return: An instance of :class:`twitcher.datatype.AccessToken`.", "id": "f13846:c0:m2"} {"signature": "def clear_tokens(self):", "body": "raise NotImplementedError", "docstring": "Removes all tokens from database.", "id": "f13846:c0:m3"} {"signature": "def save_service(self, service, overwrite=True):", "body": "raise NotImplementedError", "docstring": "Stores an OWS service in storage.\n\n:param service: An instance of :class:`twitcher.datatype.Service`.", "id": "f13846:c1:m0"} {"signature": "def delete_service(self, name):", "body": "raise NotImplementedError", "docstring": "Removes service from database.", "id": "f13846:c1:m1"} {"signature": "def list_services(self):", "body": "raise NotImplementedError", "docstring": "Lists all services in database.", "id": "f13846:c1:m2"} {"signature": "def fetch_by_name(self, name):", "body": "raise NotImplementedError", "docstring": "Get service for given ``name`` from storage.\n\n:param token: A string containing the service name.\n:return: An instance of :class:`twitcher.datatype.Service`.", "id": "f13846:c1:m3"} {"signature": "def fetch_by_url(self, url):", "body": "raise NotImplementedError", "docstring": "Get service for given ``url`` from storage.\n\n:param token: A string containing the service url.\n:return: An instance of :class:`twitcher.datatype.Service`.", "id": "f13846:c1:m4"} {"signature": "def clear_services(self):", "body": "raise NotImplementedError", "docstring": "Removes all OWS services from storage.", "id": "f13846:c1:m5"} {"signature": "def save_service(self, service, overwrite=True):", "body": "name = namesgenerator.get_sane_name(service.name)if not name:name = namesgenerator.get_random_name()if self.collection.count_documents({'': name}) > :name = namesgenerator.get_random_name(retry=True)if self.collection.count_documents({'': name}) > :if overwrite:self.collection.delete_one({'': name})else:raise Exception(\"\")self.collection.insert_one(Service(name=name,url=baseurl(service.url),type=service.type,purl=service.purl,public=service.public,auth=service.auth,verify=service.verify))return self.fetch_by_name(name=name)", "docstring": "Stores an OWS service in mongodb.", "id": "f13847:c2:m0"} {"signature": "def delete_service(self, name):", "body": "self.collection.delete_one({'': name})return True", "docstring": "Removes service from mongodb storage.", "id": "f13847:c2:m1"} {"signature": "def list_services(self):", "body": "my_services = []for service in self.collection.find().sort('', pymongo.ASCENDING):my_services.append(Service(service))return my_services", "docstring": "Lists all services in mongodb storage.", "id": "f13847:c2:m2"} {"signature": "def fetch_by_name(self, name):", "body": "service = self.collection.find_one({'': name})if not service:raise ServiceNotFoundreturn Service(service)", "docstring": "Gets service for given ``name`` from mongodb storage.", "id": "f13847:c2:m3"} {"signature": "def fetch_by_url(self, url):", "body": "service = self.collection.find_one({'': url})if not service:raise ServiceNotFoundreturn Service(service)", "docstring": "Gets service for given ``url`` from mongodb storage.", "id": "f13847:c2:m4"} {"signature": "def clear_services(self):", "body": "self.collection.drop()return True", "docstring": "Removes all OWS services from mongodb storage.", "id": "f13847:c2:m5"} {"signature": "def save_service(self, service, overwrite=True):", "body": "name = namesgenerator.get_sane_name(service.name)if not name:name = namesgenerator.get_random_name()if name in self.name_index:name = namesgenerator.get_random_name(retry=True)if name in self.name_index:if overwrite:self._delete(name=name)else:raise Exception(\"\")self._insert(Service(name=name,url=baseurl(service.url),type=service.type,purl=service.purl,public=service.public,auth=service.auth,verify=service.verify))return self.fetch_by_name(name=name)", "docstring": "Store an OWS service in database.", "id": "f13848:c1:m3"} {"signature": "def delete_service(self, name):", "body": "self._delete(name=name)return True", "docstring": "Removes service from registry database.", "id": "f13848:c1:m4"} {"signature": "def list_services(self):", "body": "my_services = []for service in self.name_index.values():my_services.append(Service(service))return my_services", "docstring": "Lists all services in memory storage.", "id": "f13848:c1:m5"} {"signature": "def fetch_by_name(self, name):", "body": "service = self.name_index.get(name)if not service:raise ServiceNotFoundreturn Service(service)", "docstring": "Get service for given ``name`` from memory storage.", "id": "f13848:c1:m6"} {"signature": "def clear_services(self):", "body": "self.name_index = {}return True", "docstring": "Removes all OWS services from memory storage.", "id": "f13848:c1:m8"} {"signature": "def tokenstore_factory(registry, database=None):", "body": "database = database or ''if database == '':db = _mongodb(registry)store = MongodbTokenStore(db.tokens)else:store = MemoryTokenStore()return store", "docstring": "Creates a token store with the interface of :class:`twitcher.store.AccessTokenStore`.\nBy default the mongodb implementation will be used.\n\n:param database: A string with the store implementation name: \"mongodb\" or \"memory\".\n:return: An instance of :class:`twitcher.store.AccessTokenStore`.", "id": "f13849:m0"} {"signature": "def servicestore_factory(registry, database=None):", "body": "database = database or ''if database == '':db = _mongodb(registry)store = MongodbServiceStore(collection=db.services)else:store = MemoryServiceStore()return store", "docstring": "Creates a service store with the interface of :class:`twitcher.store.ServiceStore`.\nBy default the mongodb implementation will be used.\n\n:return: An instance of :class:`twitcher.store.ServiceStore`.", "id": "f13849:m1"} {"signature": "def generate_token(self, valid_in_hours=, data=None):", "body": "raise NotImplementedError", "docstring": "Generates an access token which is valid for ``valid_in_hours``.\n\nArguments:\n\n* :param valid_in_hours: an int with number of hours the token is valid.\n* :param data: a dict with extra data used with this token.\n\nPossible keys: ``esgf_access_token``, ``esgf_slcs_service_url`` or ``esgf_credentials``.", "id": "f13850:c0:m0"} {"signature": "def revoke_token(self, token):", "body": "raise NotImplementedError", "docstring": "Remove token from tokenstore.", "id": "f13850:c0:m1"} {"signature": "def revoke_all_tokens(self):", "body": "raise NotImplementedError", "docstring": "Removes all tokens from tokenstore.", "id": "f13850:c0:m2"} {"signature": "def register_service(self, url, data, overwrite):", "body": "raise NotImplementedError", "docstring": "Adds an OWS service with the given ``url`` to the service store.\n\n:param data: a dict with additional information like ``name``.", "id": "f13850:c1:m0"} {"signature": "def unregister_service(self, name):", "body": "raise NotImplementedError", "docstring": "Removes OWS service with the given ``name`` from the service store.", "id": "f13850:c1:m1"} {"signature": "def get_service_by_name(self, name):", "body": "raise NotImplementedError", "docstring": "Gets service with given ``name`` from service store.", "id": "f13850:c1:m2"} {"signature": "def get_service_by_url(self, url):", "body": "raise NotImplementedError", "docstring": "Gets service with given ``url`` from service store.", "id": "f13850:c1:m3"} {"signature": "def list_services(self):", "body": "raise NotImplementedError", "docstring": "Lists all registred OWS services.", "id": "f13850:c1:m4"} {"signature": "def clear_services(self):", "body": "raise NotImplementedError", "docstring": "Removes all services from the service store.", "id": "f13850:c1:m5"} {"signature": "def generate_token(self, valid_in_hours=, data=None):", "body": "data = data or {}access_token = self.tokengenerator.create_access_token(valid_in_hours=valid_in_hours,data=data,)self.store.save_token(access_token)return access_token.params", "docstring": "Implementation of :meth:`twitcher.api.ITokenManager.generate_token`.", "id": "f13850:c2:m1"} {"signature": "def revoke_token(self, token):", "body": "try:self.store.delete_token(token)except Exception:LOGGER.exception('')return Falseelse:return True", "docstring": "Implementation of :meth:`twitcher.api.ITokenManager.revoke_token`.", "id": "f13850:c2:m2"} {"signature": "def revoke_all_tokens(self):", "body": "try:self.store.clear_tokens()except Exception:LOGGER.exception('')return Falseelse:return True", "docstring": "Implementation of :meth:`twitcher.api.ITokenManager.revoke_all_tokens`.", "id": "f13850:c2:m3"} {"signature": "def register_service(self, url, data=None, overwrite=True):", "body": "data = data or {}args = dict(data)args[''] = urlservice = Service(**args)service = self.store.save_service(service, overwrite=overwrite)return service.params", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.register_service`.", "id": "f13850:c3:m1"} {"signature": "def unregister_service(self, name):", "body": "try:self.store.delete_service(name=name)except Exception:LOGGER.exception('')return Falseelse:return True", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.unregister_service`.", "id": "f13850:c3:m2"} {"signature": "def get_service_by_name(self, name):", "body": "try:service = self.store.fetch_by_name(name=name)except Exception:LOGGER.error('', name)return {}else:return service.params", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.get_service_by_name`.", "id": "f13850:c3:m3"} {"signature": "def get_service_by_url(self, url):", "body": "try:service = self.store.fetch_by_url(url=url)except Exception:LOGGER.error('', url)return {}else:return service.params", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.get_service_by_url`.", "id": "f13850:c3:m4"} {"signature": "def list_services(self):", "body": "try:services = [service.params for service in self.store.list_services()]except Exception:LOGGER.error('')return []else:return services", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.list_services`.", "id": "f13850:c3:m5"} {"signature": "def clear_services(self):", "body": "try:self.store.clear_services()except Exception:LOGGER.error('')return Falseelse:return True", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.clear_services`.", "id": "f13850:c3:m6"} {"signature": "def _get_param(self, param, allowed_values=None, optional=False):", "body": "request_params = self._request_params()if param in request_params:value = request_params[param].lower()if allowed_values is not None:if value in allowed_values:self.params[param] = valueelse:raise OWSInvalidParameterValue(\"\" % (param, value), value=param)elif optional:self.params[param] = Noneelse:raise OWSMissingParameterValue('' % param, value=param)return self.params[param]", "docstring": "Get parameter in GET request.", "id": "f13851:c2:m1"} {"signature": "def _get_service(self):", "body": "return self._get_param(param=\"\", allowed_values=allowed_service_types)", "docstring": "Check mandatory service name parameter in GET request.", "id": "f13851:c2:m2"} {"signature": "def _get_request_type(self):", "body": "return self._get_param(param=\"\", allowed_values=allowed_request_types[self.params['']])", "docstring": "Find requested request type in GET request.", "id": "f13851:c2:m3"} {"signature": "def _get_version(self):", "body": "version = self._get_param(param=\"\", allowed_values=allowed_versions[self.params['']],optional=True)if version is None and self._get_request_type() != \"\":raise OWSMissingParameterValue('', value=\"\")else:return version", "docstring": "Find requested version in GET request.", "id": "f13851:c2:m4"} {"signature": "def _get_service(self):", "body": "if \"\" in self.document.attrib:value = self.document.attrib[\"\"].lower()if value in allowed_service_types:self.params[\"\"] = valueelse:raise OWSInvalidParameterValue(\"\" % value, value=\"\")else:raise OWSMissingParameterValue('', value=\"\")return self.params[\"\"]", "docstring": "Check mandatory service name parameter in POST request.", "id": "f13851:c3:m1"} {"signature": "def _get_request_type(self):", "body": "value = self.document.tag.lower()if value in allowed_request_types[self.params['']]:self.params[\"\"] = valueelse:raise OWSInvalidParameterValue(\"\" % value, value=\"\")return self.params[\"\"]", "docstring": "Find requested request type in POST request.", "id": "f13851:c3:m2"} {"signature": "def _get_version(self):", "body": "if \"\" in self.document.attrib:value = self.document.attrib[\"\"].lower()if value in allowed_versions[self.params['']]:self.params[\"\"] = valueelse:raise OWSInvalidParameterValue(\"\" % value, value=\"\")elif self._get_request_type() == \"\":self.params[\"\"] = Noneelse:raise OWSMissingParameterValue('', value=\"\")return self.params[\"\"]", "docstring": "Find requested version in POST request.", "id": "f13851:c3:m3"} {"signature": "def get_random_name(retry=False):", "body": "name = \"\" % (left[random.randint(, len(left) - )], right[random.randint(, len(right) - )])if retry is True:name = \"\" % (name, random.randint(, ))return name", "docstring": "generates a random name from the list of adjectives and birds in this package\nformatted as \"adjective_surname\". For example 'loving_sugarbird'. If retry is non-zero, a random\ninteger between 0 and 100 will be added to the end of the name, e.g `loving_sugarbird3`", "id": "f13853:m0"} {"signature": "def get_state(self):", "body": "return [os.path.join(dp, f)for dp, _, fn in os.walk(self.dir)for f in fn]", "docstring": "Get the current directory state", "id": "f13862:c0:m1"} {"signature": "def tick(self):", "body": "self.current += if self.current == self.factor:sys.stdout.write('')sys.stdout.flush()self.current = ", "docstring": "Add one tick to progress bar", "id": "f13862:c1:m2"} {"signature": "def attrs_sqlalchemy(maybe_cls=None):", "body": "def wrap(cls):warnings.warn(UserWarning(''))these = {name: attr.ib()for name in inspect(cls).columns.keys()}return attr.s(cls, these=these, init=False)if maybe_cls is None:return wrapelse:return wrap(maybe_cls)", "docstring": "A class decorator that adds ``__repr__``, ``__eq__``, ``__cmp__``, and\n``__hash__`` methods according to the fields defined on the SQLAlchemy\nmodel class.", "id": "f13868:m0"} {"signature": "def read(*parts):", "body": "with codecs.open(os.path.join(HERE, *parts), '', '') as f:return f.read()", "docstring": "Build an absolute path from *parts* and and return the contents of the\nresulting file. Assume UTF-8 encoding.", "id": "f13869:m0"} {"signature": "def find_meta(meta):", "body": "meta_match = re.search(r\"\".format(meta=meta),META_FILE, re.M)if meta_match:return meta_match.group()raise RuntimeError(''.format(meta=meta))", "docstring": "Extract __*meta*__ from META_FILE.", "id": "f13869:m1"} {"signature": "def check_create_folder(folder_path):", "body": "if not exists(folder_path):makedirs(folder_path)return folder_path", "docstring": "Check whether a folder exists, if not the folder is created.\n Always return folder_path.", "id": "f13874:m0"} {"signature": "def fetch(self, url, path, filename):", "body": "logger.debug('', url)remote_file_size = self.get_remote_file_size(url)if exists(join(path, filename)):size = getsize(join(path, filename))if size == remote_file_size:logger.error('' % filename)print('' % filename)return [join(path, filename), size]logger.debug('' % filename)print('' % filename)fetch(url, path)print('' % path)logger.debug('' % path)return [join(path, filename), remote_file_size]", "docstring": "Verify if the file is already downloaded and complete. If they don't\n exists or if are not complete, use homura download function to fetch\n files. Return a list with the path of the downloaded file and the size\n of the remote file.", "id": "f13874:c1:m1"} {"signature": "def remote_file_exists(self, url):", "body": "return requests.head(url).status_code == ", "docstring": "Check whether the remote file exists on Storage", "id": "f13874:c1:m2"} {"signature": "def get_remote_file_size(self, url):", "body": "headers = requests.head(url).headersreturn int(headers[''])", "docstring": "Gets the filesize of a remote file", "id": "f13874:c1:m3"} {"signature": "def validate_bands(self, bands):", "body": "if not isinstance(bands, list):logger.error('')raise TypeError('')valid_bands = list(range(, )) + ['']for band in bands:if band not in valid_bands:logger.error('' % band)raise InvalidBandError('' % band)", "docstring": "Validate bands parameter.", "id": "f13874:c1:m4"} {"signature": "def validate_sceneInfo(self):", "body": "if self.sceneInfo.prefix not in self.__satellitesMap:logger.error(''% (self.sceneInfo.name, self.sceneInfo.prefix))raise WrongSceneNameError(''% (self.sceneInfo.name, self.sceneInfo.prefix))", "docstring": "Check scene name and whether remote file exists. Raises\n WrongSceneNameError if the scene name is wrong.", "id": "f13874:c2:m1"} {"signature": "def remote_file_exists(self):", "body": "return super(GoogleDownloader, self).remote_file_exists(self.remote_file_url)", "docstring": "Verify if the remote file exists. Returns True or False.", "id": "f13874:c2:m2"} {"signature": "def download(self, bands, download_dir=None, metadata=False):", "body": "super(GoogleDownloader, self).validate_bands(bands)pattern = re.compile('', re.I)image_list = []band_list = ['' % (i,) if isinstance(i, int) else i for i in bands]if download_dir is None:download_dir = DOWNLOAD_DIRcheck_create_folder(join(download_dir, self.sceneInfo.name))filename = \"\" % (self.sceneInfo.name, self.__remote_file_ext)downloaded = self.fetch(self.remote_file_url, download_dir, filename)try:tar = tarfile.open(downloaded[], '')folder_path = join(download_dir, self.sceneInfo.name)logger.debug('', folder_path)tar.extractall(folder_path)remove(downloaded[])images_path = listdir(folder_path)for image_path in images_path:matched = pattern.match(image_path)file_path = join(folder_path, image_path)if matched and matched.group() in band_list:image_list.append([file_path, getsize(file_path)])elif matched:remove(file_path)except tarfile.ReadError as error:logger.error('', error)print('')return image_list", "docstring": "Download remote .tar.bz file.", "id": "f13874:c2:m3"} {"signature": "def validate_sceneInfo(self):", "body": "if self.sceneInfo.prefix not in self.__prefixesValid:raise WrongSceneNameError(''% (self.sceneInfo.name, self.sceneInfo.prefix))", "docstring": "Check whether sceneInfo is valid to download from AWS Storage.", "id": "f13874:c3:m1"} {"signature": "def remote_file_exists(self):", "body": "url = join(self.base_url, '')return super(AWSDownloader, self).remote_file_exists(url)", "docstring": "Verify whether the file (scene) exists on AWS Storage.", "id": "f13874:c3:m2"} {"signature": "def download(self, bands, download_dir=None, metadata=False):", "body": "super(AWSDownloader, self).validate_bands(bands)if download_dir is None:download_dir = DOWNLOAD_DIRdest_dir = check_create_folder(join(download_dir, self.sceneInfo.name))downloaded = []for band in bands:if band == '':filename = '' % (self.sceneInfo.name, band, self.__remote_file_ext)else:filename = '' % (self.sceneInfo.name, band, self.__remote_file_ext)band_url = join(self.base_url, filename)downloaded.append(self.fetch(band_url, dest_dir, filename))if metadata:filename = '' % (self.sceneInfo.name)url = join(self.base_url, filename)self.fetch(url, dest_dir, filename)return downloaded", "docstring": "Download each specified band and metadata.", "id": "f13874:c3:m3"} {"signature": "def validate_bands(self, bands):", "body": "if not isinstance(bands, list):logger.error('')raise TypeError('')valid_bands = list(range(, )) + ['']for band in bands:if band not in valid_bands:logger.error('' % band)raise InvalidBandError('' % band)", "docstring": "Validate bands parameter.", "id": "f13874:c3:m4"} {"signature": "def preprocess_constraints(ml, cl, n):", "body": "ml_graph, cl_graph = {}, {}for i in range(n):ml_graph[i] = set()cl_graph[i] = set()def add_both(d, i, j):d[i].add(j)d[j].add(i)for (i, j) in ml:ml_graph[i].add(j)ml_graph[j].add(i)for (i, j) in cl:cl_graph[i].add(j)cl_graph[j].add(i)def dfs(i, graph, visited, component):visited[i] = Truefor j in graph[i]:if not visited[j]:dfs(j, graph, visited, component)component.append(i)visited = [False] * nneighborhoods = []for i in range(n):if not visited[i] and ml_graph[i]:component = []dfs(i, ml_graph, visited, component)for x1 in component:for x2 in component:if x1 != x2:ml_graph[x1].add(x2)neighborhoods.append(component)for (i, j) in cl:for x in ml_graph[i]:add_both(cl_graph, x, j)for y in ml_graph[j]:add_both(cl_graph, i, y)for x in ml_graph[i]:for y in ml_graph[j]:add_both(cl_graph, x, y)for i in ml_graph:for j in ml_graph[i]:if j != i and j in cl_graph[i]:raise InconsistentConstraintsException(''.format(i, j))return ml_graph, cl_graph, neighborhoods", "docstring": "Create a graph of constraints for both must- and cannot-links", "id": "f13878:m0"} {"signature": "def _dist(self, x, y, A):", "body": "return scipy.spatial.distance.mahalanobis(x, y, A) ** ", "docstring": "(x - y)^T A (x - y)", "id": "f13880:c0:m4"} {"signature": "def _dist(self, x, y, A):", "body": "return scipy.spatial.distance.mahalanobis(x, y, A) ** ", "docstring": "(x - y)^T A (x - y)", "id": "f13883:c0:m4"} {"signature": "def query(self, i, j):", "body": "if self.queries_cnt < self.max_queries_cnt:self.queries_cnt += return self.labels[i] == self.labels[j]else:raise MaximumQueriesExceeded", "docstring": "Query the oracle to find out whether i and j should be must-linked", "id": "f13891:c1:m1"} {"signature": "def compact(db_spec, poll_interval=):", "body": "server = get_server_from_specifier(db_spec)db = get_db_from_specifier(db_spec)logger = logging.getLogger('')logger.info('' % (db_spec,repr_bytes(db.info()['']),))logger.debug('' + urlparse.urljoin(db.resource.uri + '', ''))resp_headers, resp_body = db.resource.post('')if not poll_interval:if not (resp_body.get('', False) andresp_headers[''] == ''):err = CompactionError('')err.response = (resp_headers, resp_body)raise errdef check_completed():logger.debug('')logger.debug('' + db.resource.uri + '')db_info = db.info()completed = not db_info.get('', False)if completed and db_info.get('', None):logger.info('' % (db_spec,repr_bytes(db_info[''])))return completedreturn check_completedelif poll_interval > :logger.debug('')logger.debug('' + db.resource.uri + '')running = db.info().get('', False)while running:time.sleep(poll_interval)logger.debug('')logger.debug('' + db.resource.uri + '')running = db.info().get('', False)size_after = db.info().get('', None)if size_after:logger.info('' % (db_spec,repr_bytes(size_after)))return Trueelse:raise ValueError('')", "docstring": "Compact a CouchDB database with optional synchronicity.\n\nThe ``compact`` function will compact a CouchDB database stored on an\nrunning CouchDB server. By default, this process occurs *asynchronously*,\nmeaning that the compaction will occur in the background. Often, you'll want\nto know when the process has completed; for this reason, ``compact`` will\nreturn a function which, when called, will return the state of the\ncompaction. If it has completed, ``True`` will be returned; otherwise,\n``False``. This may be called multiple times.\n\nAlternatively, you may opt to run ``compact`` in synchronous mode, for\ndebugging or profiling purposes. If this is the case, an optional keyword\nargument ``poll_interval`` is accepted, which should be a number (in\nseconds) representing the time to take between polls. A sensible default\nmay be around 0.5 (seconds).\n\nBecause this function operates on database specifiers, you can choose to\noperate on the local server or any remote server.", "id": "f13901:m0"} {"signature": "def specifier_to_db(db_spec):", "body": "local_match = LOCAL_RE.match(db_spec)remote_match = REMOTE_RE.match(db_spec)plain_match = PLAIN_RE.match(db_spec)if local_match:return local_match.groupdict()['']elif remote_match:hostname, portnum, database = map(remote_match.groupdict().get,('', '', ''))local_url = settings._('', '')localhost, localport = urlparse.urlparse(local_url)[].split('')if (localhost == hostname) and (localport == portnum):return databasereturn '' % (hostname, portnum, database)elif plain_match:return plain_match.groupdict()['']raise ValueError('' % (db_spec,))", "docstring": "Return the database string for a database specifier.\n\nThe database specifier takes a custom format for specifying local and remote\ndatabases. A local database is specified by the following format:\n\n local:\n\nFor example, a database called 'sessions' would be specified by the string\n``'local:sessions'``. Remote databases are specified like this:\n\n remote:::\n\nFor example, a database called 'log' on the server 'dev.example.com' at port\nnumber 5984 would be specified by ``'remote:dev.example.com:5984:log'``.\n\nThese specifiers are translated into strings acceptable to CouchDB; local\nspecs are turned into the database name alone, and remote specs are turned\ninto ``'http://host:port/db_name'`` URLs.", "id": "f13902:m0"} {"signature": "def db_to_specifier(db_string):", "body": "local_match = PLAIN_RE.match(db_string)remote_match = URL_RE.match(db_string)if local_match:return '' + local_match.groupdict()['']elif remote_match:hostname, portnum, database = map(remote_match.groupdict().get,('', '', ''))local_url = settings._('', '')localhost, localport = urlparse.urlparse(local_url)[].split('')if (localhost == hostname) and (localport == portnum):return '' + databasereturn '' % (hostname, portnum, database)raise ValueError('' % (db_string,))", "docstring": "Return the database specifier for a database string.\n\nThis accepts a database name or URL, and returns a database specifier in the\nformat accepted by ``specifier_to_db``. It is recommended that you consult\nthe documentation for that function for an explanation of the format.", "id": "f13902:m1"} {"signature": "def get_server_from_db(db_string):", "body": "local_match = PLAIN_RE.match(db_string)remote_match = URL_RE.match(db_string)if local_match:return shortcuts.get_server()elif remote_match:hostname, portnum, database = map(remote_match.groupdict().get,('', '', ''))local_url = settings._('', '')localhost, localport = urlparse.urlparse(local_url)[].split('')if (localhost == hostname) and (localport == portnum):return shortcuts.get_server()return shortcuts.get_server(server_url=('' % (hostname, portnum)))raise ValueError('' % (db_string,))", "docstring": "Return a CouchDB server instance from a database string.", "id": "f13902:m2"} {"signature": "def get_server_from_specifier(db_spec):", "body": "return get_server_from_db(specifier_to_db(db_spec))", "docstring": "Return a CouchDB server instance from a database specifier.", "id": "f13902:m3"} {"signature": "def get_db_from_db(db_string):", "body": "server = get_server_from_db(db_string)local_match = PLAIN_RE.match(db_string)remote_match = URL_RE.match(db_string)if local_match:return server[local_match.groupdict()['']]elif remote_match:return server[remote_match.groupdict()['']]raise ValueError('' % (db_string,))", "docstring": "Return a CouchDB database instance from a database string.", "id": "f13902:m4"} {"signature": "def get_db_from_specifier(db_spec):", "body": "return get_db_from_db(specifier_to_db(db_spec))", "docstring": "Return a CouchDB database instance from a database specifier.", "id": "f13902:m5"} {"signature": "def ensure_specifier_exists(db_spec):", "body": "local_match = LOCAL_RE.match(db_spec)remote_match = REMOTE_RE.match(db_spec)plain_match = PLAIN_RE.match(db_spec)if local_match:db_name = local_match.groupdict().get('')server = shortcuts.get_server()if db_name not in server:server.create(db_name)return Trueelif remote_match:hostname, portnum, database = map(remote_match.groupdict().get,('', '', ''))server = shortcuts.get_server(server_url=('' % (hostname, portnum)))if database not in server:server.create(database)return Trueelif plain_match:db_name = plain_match.groupdict().get('')server = shortcuts.get_server()if db_name not in server:server.create(db_name)return Truereturn False", "docstring": "Make sure a DB specifier exists, creating it if necessary.", "id": "f13902:m6"} {"signature": "def ensure_db_exists(db_string):", "body": "return ensure_specifier_exists(db_to_specifier(db_string))", "docstring": "Make sure a DB string exists, creating it if necessary.", "id": "f13902:m7"} {"signature": "def replicate_existing(source_db, target_db):", "body": "server = shortcuts.get_server()logger = logging.getLogger('')logger.debug('' + urllib.parse.urljoin(server.resource.uri, ''))source, target = specifier_to_db(source_db), specifier_to_db(target_db)logger.debug('' % (source,))logger.debug('' % (target,))try:resp_headers, resp_body = server.resource.post(path='',content=json.dumps({'': source, '': target}))except couchdb.client.ServerError as exc:logger.error('')raise ReplicationError(exc.args)result = resp_body[''][]if resp_body['']:logger.info('' % (resp_body[''][:],))logger.info('' + result[''])logger.info('' + result[''])result[''] = datetime.datetime.strptime(result[''],'')result[''] = datetime.datetime.strptime(result[''],'')timedelta = result[''] - result['']if timedelta.days:logger.info('' % (timedelta.days,timedelta.seconds + (timedelta.microseconds * ())))else:logger.info('' % (timedelta.seconds + (timedelta.microseconds * ())))result[''] = resp_body['']result[''] = resp_body['']result[''] = resp_body['']if result[''] == :docs_read = ''else:docs_read = '' % (result[''],)if result[''] == :docs_written = ''else:docs_written = '' % (result[''],)if result[''] == :missing_checked = '' % (result[''],)else:missing_checked = '' % (result[''], result[''],)logging.info('' % (docs_read, docs_written))logging.info(missing_checked)return resultelse:logger.error('' % (resp_body[''][:],))result[''] = resp_body['']result[''] = resp_body['']result[''] = resp_body['']raise ReplicationFailure(resp_headers, result)", "docstring": "Replicate an existing database to another existing database.", "id": "f13903:m0"} {"signature": "def replicate(source_spec, target_spec):", "body": "ensure_specifier_exists(target_spec)return replicate_existing(source_spec, target_spec)", "docstring": "Replicate one existing database to another (optionally existing) DB.", "id": "f13903:m1"} {"signature": "def get_server(server_url=''):", "body": "return couchdb.client.Server(server_url if server_url else settings._(''))", "docstring": "Return a CouchDB server instance based on Django project settings.", "id": "f13904:m0"} {"signature": "def get_db(db_name, server_url=''):", "body": "return get_server(server_url)[db_name]", "docstring": "Return a CouchDB database instance, given its name.", "id": "f13904:m1"} {"signature": "def get_doc(doc_id, db_name, server_url='', rev=None):", "body": "db = get_server(server_url)[db_name]if rev:headers, response = db.resource.get(doc_id, rev=rev)return couchdb.client.Document(response)return db[doc_id]", "docstring": "Return a CouchDB document, given its ID, revision and database name.", "id": "f13904:m2"} {"signature": "def get_or_create_db(db_name, server_url=''):", "body": "server = get_server(server_url)if db_name in server:return server[db_name]return server.create(db_name)", "docstring": "Return an (optionally existing) CouchDB database instance.", "id": "f13904:m3"} {"signature": "def generator_to_list(function):", "body": "def wrapper(*args, **kwargs):return list(function(*args, **kwargs))wrapper.__name__ = function.__name__wrapper.__doc__ = function.__doc__return wrapper", "docstring": "Wrap a generator function so that it returns a list when called.\n\nFor example:\n\n # Define a generator\n >>> def mygen(n):\n ... i = 0\n ... while i < n:\n ... yield i\n ... i += 1\n # This is how it might work\n >>> generator = mygen(5)\n >>> generator.next()\n 0\n >>> generator.next()\n 1\n # Wrap it in generator_to_list, and it will behave differently.\n >>> mygen = generator_to_list(mygen)\n >>> mygen(5)\n [0, 1, 2, 3, 4]", "id": "f13909:m0"} {"signature": "def logrotate(filename):", "body": "match = re.match(r'' + re.escape(os.path.extsep) + r'', filename)if os.path.exists(filename):if match:prefix, number = match.groups()number = int(number)while os.path.exists(os.path.extsep.join((prefix, str(number)))):number += return os.path.extsep.join((prefix, str(number)))elif match:return filenamereturn logrotate(os.path.extsep.join((filename, '')))", "docstring": "Return the next available filename for a particular filename prefix.\n\nFor example:\n\n >>> import os\n # Make three (empty) files in a directory\n >>> fp0 = open('file.0', 'w')\n >>> fp1 = open('file.1', 'w')\n >>> fp2 = open('file.2', 'w')\n >>> fp0.close(), fp1.close(), fp2.close()\n (None, None, None)\n # Use logrotate to get the next available filename.\n >>> logrotate('file')\n 'file.3'\n >>> logrotate('file.2')\n 'file.3'\n >>> logrotate('file.1')\n 'file.3'\n\nThis can be used to get the next available filename for logging, allowing\nyou to rotate log files, without using Python's ``logging`` module.", "id": "f13909:m1"} {"signature": "def get_function(function_name):", "body": "module, basename = str(function_name).rsplit('', )try:return getattr(__import__(module, fromlist=[basename]), basename)except (ImportError, AttributeError):raise FunctionNotFound(function_name)", "docstring": "Given a Python function name, return the function it refers to.", "id": "f13910:m0"} {"signature": "def one_lineify(json_data):", "body": "return json_data.replace('', '').replace('', '')", "docstring": "Prevent JSON data from taking up multiple lines.", "id": "f13910:m1"} {"signature": "def js_error(exc):", "body": "return json.dumps({'': type(exc).__name__,'': str(exc)})", "docstring": "Transform a Python exception into a CouchDB JSON error.", "id": "f13910:m2"} {"signature": "def handle_reset(self):", "body": "self.functions.clear()self.function_counter = ", "docstring": "Reset the current function list.", "id": "f13910:c2:m0"} {"signature": "def handle_add_fun(self, function_name):", "body": "function_name = function_name.strip()try:function = get_function(function_name)except Exception as exc:self.wfile.write(js_error(exc) + NEWLINE)returnif not getattr(function, '', None):self.functions[function_name] = (self.function_counter, function)else:self.functions[function_name] = (self.function_counter,function(self.log))self.function_counter += return True", "docstring": "Add a function to the function list, in order.", "id": "f13910:c2:m1"} {"signature": "@utils.generator_to_listdef handle_map_doc(self, document):", "body": "for function in sorted(list(self.functions.values()), key=lambda x: x[]):try:yield [list(function(document))]except Exception as exc:yield []self.log(repr(exc))", "docstring": "Return the mapping of a document according to the function list.", "id": "f13910:c2:m2"} {"signature": "def handle_reduce(self, reduce_function_names, mapped_docs):", "body": "reduce_functions = []for reduce_function_name in reduce_function_names:try:reduce_function = get_function(reduce_function_name)if getattr(reduce_function, '', None):reduce_function = reduce_function(self.log)reduce_functions.append(reduce_function)except Exception as exc:self.log(repr(exc))reduce_functions.append(lambda *args, **kwargs: None)keys, values = list(zip((key, value) for ((key, doc_id), value) in mapped_docs))results = []for reduce_function in reduce_functions:try:results.append(reduce_function(keys, values, rereduce=False))except Exception as exc:self.log(repr(exc))results.append(None)return [True, results]", "docstring": "Reduce several mapped documents by several reduction functions.", "id": "f13910:c2:m3"} {"signature": "def handle_rereduce(self, reduce_function_names, values):", "body": "reduce_functions = []for reduce_function_name in reduce_function_names:try:reduce_function = get_function(reduce_function_name)if getattr(reduce_function, '', None):reduce_function = reduce_function(self.log)reduce_functions.append(reduce_function)except Exception as exc:self.log(repr(exc))reduce_functions.append(lambda *args, **kwargs: None)results = []for reduce_function in reduce_functions:try:results.append(reduce_function(None, values, rereduce=True))except Exception as exc:self.log(repr(exc))results.append(None)return [True, results]", "docstring": "Re-reduce a set of values, with a list of rereduction functions.", "id": "f13910:c2:m4"} {"signature": "def handle_validate(self, function_name, new_doc, old_doc, user_ctx):", "body": "try:function = get_function(function_name)except Exception as exc:self.log(repr(exc))return Falsetry:return function(new_doc, old_doc, user_ctx)except Exception as exc:self.log(repr(exc))return repr(exc)", "docstring": "Validate...this function is undocumented, but still in CouchDB.", "id": "f13910:c2:m5"} {"signature": "def handle(self):", "body": "while True:try:line = self.rfile.readline()try:cmd = json.loads(line)except Exception as exc:self.wfile.write(repr(exc) + NEWLINE)continueelse:handler = getattr(self, '' + cmd[], None)if not handler:self.wfile.write(repr(CommandNotFound(cmd[])) + NEWLINE)continuereturn_value = handler(*cmd[:])if not return_value:continueself.wfile.write(one_lineify(json.dumps(return_value)) + NEWLINE)except Exception as exc:self.wfile.write(repr(exc) + NEWLINE)continue", "docstring": "The main function called to handle a request.", "id": "f13910:c2:m6"} {"signature": "def log(self, string):", "body": "self.wfile.write(json.dumps({'': string}) + NEWLINE)", "docstring": "Log an event on the CouchDB server.", "id": "f13910:c2:m7"} {"signature": "def __init__(self, x, y, z, formatter=numpy_formatter):", "body": "x = np.asarray(x, dtype=np.float64)y = np.asarray(y, dtype=np.float64)z = np.ma.asarray(z, dtype=np.float64)if x.shape != z.shape:raise TypeError((\"\"\"\").format(str(x.shape), str(z.shape)))if y.shape != z.shape:raise TypeError((\"\"\"\").format(str(y.shape), str(z.shape)))mask = z.mask if z.mask is not np.ma.nomask and z.mask.any() else Noneself._contour_generator = _contour.QuadContourGenerator(x, y, z.filled(), mask, True, )super().__init__(formatter)", "docstring": "Initialize a :class:`QuadContourGenerator`, see class docstring.", "id": "f13913:c0:m0"} {"signature": "@classmethoddef from_curvilinear(cls, x, y, z, formatter=numpy_formatter):", "body": "return cls(x, y, z, formatter)", "docstring": "Construct a contour generator from a curvilinear grid.\n\n Note\n ----\n This is an alias for the default constructor.\n\n Parameters\n ----------\n x : array_like\n x coordinates of each point in `z`. Must be the same size as `z`.\n y : array_like\n y coordinates of each point in `z`. Must be the same size as `z`.\n z : array_like\n The 2-dimensional curvilinear grid of data to compute\n contours for. Masked arrays are supported.\n formatter : callable\n A conversion function to convert from the internal `Matplotlib`_\n contour format to an external format. See :ref:`formatters` for\n more information.\n\n Returns\n -------\n : :class:`QuadContourGenerator`\n Initialized contour generator.", "id": "f13913:c0:m1"} {"signature": "@classmethoddef from_rectilinear(cls, x, y, z, formatter=numpy_formatter):", "body": "x = np.asarray(x, dtype=np.float64)y = np.asarray(y, dtype=np.float64)z = np.ma.asarray(z, dtype=np.float64)if x.ndim != :raise TypeError(\"\".format(x.ndim))if y.ndim != :raise TypeError(\"\".format(y.ndim))if z.ndim != :raise TypeError(\"\".format(z.ndim))if x.size != z.shape[]:raise TypeError((\"\"\"\"\"\").format(x.size, z.shape[]))if y.size != z.shape[]:raise TypeError((\"\"\"\"\"\").format(y.size, z.shape[]))y, x = np.meshgrid(y, x, indexing='')return cls(x, y, z, formatter)", "docstring": "Construct a contour generator from a rectilinear grid.\n\n Parameters\n ----------\n x : array_like\n x coordinates of each column of `z`. Must be the same length as\n the number of columns in `z`. (len(x) == z.shape[1])\n y : array_like\n y coordinates of each row of `z`. Must be the same length as the\n number of columns in `z`. (len(y) == z.shape[0])\n z : array_like\n The 2-dimensional rectilinear grid of data to compute contours for.\n Masked arrays are supported.\n formatter : callable\n A conversion function to convert from the internal `Matplotlib`_\n contour format to an external format. See :ref:`formatters` for\n more information.\n\n Returns\n -------\n : :class:`QuadContourGenerator`\n Initialized contour generator.", "id": "f13913:c0:m2"} {"signature": "@classmethoddef from_uniform(cls, z, origin=(, ), step=(, ), formatter=numpy_formatter):", "body": "z = np.ma.asarray(z, dtype=np.float64)if z.ndim != :raise TypeError(\"\".format(z.ndim))if len(origin) != :raise TypeError(\"\".format(len(origin)))if len(step) != :raise TypeError(\"\".format(len(step)))if any(s == for s in step):raise ValueError(\"\".format(str(step)))y, x = np.mgrid[origin[]:(origin[]+step[]*z.shape[]):step[],origin[]:(origin[]+step[]*z.shape[]):step[]]return cls(x, y, z, formatter)", "docstring": "Construct a contour generator from a uniform grid.\n\n NOTE\n ----\n The default `origin` and `step` values is equivalent to calling\n :meth:`matplotlib.axes.Axes.contour` with only the `z` argument.\n\n Parameters\n ----------\n z : array_like\n The 2-dimensional uniform grid of data to compute contours for.\n Masked arrays are supported.\n origin : (number.Number, number.Number)\n The (x, y) coordinate of data point `z[0,0]`.\n step : (number.Number, number.Number)\n The (x, y) distance between data points in `z`.\n formatter : callable\n A conversion function to convert from the internal `Matplotlib`_\n contour format to an external format. See :ref:`formatters` for\n more information.\n\n Returns\n -------\n : :class:`QuadContourGenerator`\n Initialized contour generator.", "id": "f13913:c0:m3"} {"signature": "def null_formatter(level, vertices, codes=None):", "body": "return level, vertices, codes", "docstring": "Null formatter that passes through the raw vertices and codes.", "id": "f13915:m0"} {"signature": "def numpy_formatter(_, vertices, codes=None):", "body": "if codes is None:return verticesnumpy_vertices = []for vertices_, codes_ in zip(vertices, codes):starts = np.nonzero(codes_ == MPLPATHCODE.MOVETO)[]stops = np.nonzero(codes_ == MPLPATHCODE.CLOSEPOLY)[]for start, stop in zip(starts, stops):numpy_vertices.append(vertices_[start:stop+, :])return numpy_vertices", "docstring": "`NumPy`_ style contour formatter.\n\n Contours are returned as a list of Nx2 arrays containing the x and y\n vertices of the contour line.\n\n For filled contours the direction of vertices matters:\n\n * CCW (ACW): The vertices give the exterior of a contour polygon.\n * CW: The vertices give a hole of a contour polygon. This hole will\n always be inside the exterior of the last contour exterior.\n\n .. note:: This is the fastest format.\n\n .. _NumPy: http://www.numpy.org", "id": "f13915:m1"} {"signature": "def matlab_formatter(level, vertices, codes=None):", "body": "vertices = numpy_formatter(level, vertices, codes)if codes is not None:level = level[]headers = np.vstack(([v.shape[] for v in vertices],[level]*len(vertices))).Tvertices = np.vstack(list(it.__next__() for it initertools.cycle((iter(headers), iter(vertices)))))return vertices", "docstring": "`MATLAB`_ style contour formatter.\n\n Contours are returned as a single Nx2, `MATLAB`_ style, contour array.\n There are two types of rows in this format:\n\n * Header: The first element of a header row is the level of the contour\n (the lower level for filled contours) and the second element is the\n number of vertices (to follow) belonging to this contour line.\n * Vertex: x,y coordinate pairs of the vertex.\n\n A header row is always followed by the coresponding number of vertices.\n Another header row may follow if there are more contour lines.\n\n For filled contours the direction of vertices matters:\n\n * CCW (ACW): The vertices give the exterior of a contour polygon.\n * CW: The vertices give a hole of a contour polygon. This hole will\n always be inside the exterior of the last contour exterior.\n\n For further explanation of this format see the `Mathworks documentation\n `_\n noting that the MATLAB format used in the `contours` package is the\n transpose of that used by `MATLAB`_ (since `MATLAB`_ is column-major\n and `NumPy`_ is row-major by default).\n\n .. _NumPy: http://www.numpy.org\n\n .. _MATLAB: https://www.mathworks.com/products/matlab.html", "id": "f13915:m2"} {"signature": "def shapely_formatter(_, vertices, codes=None):", "body": "elements = []if codes is None:for vertices_ in vertices:if np.all(vertices_[, :] == vertices_[-, :]):if len(vertices) < :elements.append(Point(vertices_[, :]))else:elements.append(LinearRing(vertices_))else:elements.append(LineString(vertices_))else:for vertices_, codes_ in zip(vertices, codes):starts = np.nonzero(codes_ == MPLPATHCODE.MOVETO)[]stops = np.nonzero(codes_ == MPLPATHCODE.CLOSEPOLY)[]try:rings = [LinearRing(vertices_[start:stop+, :])for start, stop in zip(starts, stops)]elements.append(Polygon(rings[], rings[:]))except ValueError as err:if np.any(stop - start - == ):if stops[] < starts[]+:passelse:rings = [LinearRing(vertices_[start:stop+, :])for start, stop in zip(starts, stops)if stop >= start+]elements.append(Polygon(rings[], rings[:]))else:raise(err)return elements", "docstring": "`Shapely`_ style contour formatter.\n\n Contours are returned as a list of :class:`shapely.geometry.LineString`,\n :class:`shapely.geometry.LinearRing`, and :class:`shapely.geometry.Point`\n geometry elements.\n\n Filled contours return a list of :class:`shapely.geometry.Polygon`\n elements instead.\n\n .. note:: If possible, `Shapely speedups`_ will be enabled.\n\n .. _Shapely: http://toblerity.org/shapely/manual.html\n\n .. _Shapely speedups: http://toblerity.org/shapely/manual.html#performance\n\n\n See Also\n --------\n `descartes `_ : Use `Shapely`_\n or GeoJSON-like geometric objects as matplotlib paths and patches.", "id": "f13915:m3"} {"signature": "def __init__(self, formatter=numpy_formatter, *args, **kwargs):", "body": "self.formatter = formatter", "docstring": "Initialize a :class:`ContourMixin`, see class docstring.", "id": "f13915:c1:m0"} {"signature": "def contour(self, level):", "body": "if not isinstance(level, numbers.Number):raise TypeError((\"\"\"\").format(type(level)))vertices = self._contour_generator.create_contour(level)return self.formatter(level, vertices)", "docstring": "Get contour lines at the given level.\n\n Parameters\n ----------\n level : numbers.Number\n The data level to calculate the contour lines for.\n\n Returns\n -------\n :\n The result of the :attr:`formatter` called on the contour at the\n given `level`.", "id": "f13915:c1:m1"} {"signature": "def filled_contour(self, min=None, max=None):", "body": "if min is None:min = np.finfo(np.float64).minif max is None:max = np.finfo(np.float64).maxvertices, codes = (self._contour_generator.create_filled_contour(min, max))return self.formatter((min, max), vertices, codes)", "docstring": "Get contour polygons between the given levels.\n\n Parameters\n ----------\n min : numbers.Number or None\n The minimum data level of the contour polygon. If :obj:`None`,\n ``numpy.finfo(numpy.float64).min`` will be used.\n max : numbers.Number or None\n The maximum data level of the contour polygon. If :obj:`None`,\n ``numpy.finfo(numpy.float64).max`` will be used.\n\n Returns\n -------\n :\n The result of the :attr:`formatter` called on the filled contour\n between `min` and `max`.", "id": "f13915:c1:m2"} {"signature": "def get_pickling_errors(obj, seen=None):", "body": "if seen == None:seen = []if hasattr(obj, \"\"):state = obj.__getstate__()else:return Noneif state == None:return ''if isinstance(state,tuple):if not isinstance(state[], dict):state=state[]else:state=state[].update(state[])result = {}for i in state:try:pickle.dumps(state[i], protocol=)except pickle.PicklingError as e:if not state[i] in seen:seen.append(state[i])result[i]=get_pickling_errors(state[i],seen)return result", "docstring": "Investigate pickling errors.", "id": "f13917:m0"} {"signature": "def get_dump_method(dump, protocol=-):", "body": "if dump is None:dump = ''if dump.startswith(''):if dump == '':proto = protocolelse:proto = dump.strip('')try:proto = int(proto)assert proto>=-except:raise Exception(\"\")code = \"\"\"\"\"\"rt dillopen('', '') as fd:dill.dump( value, fd, protocol=%i )fd.flush()os.fsync(fd.fileno())protoelif dump == '':code = \"\"\"\"\"\"rt numpyopen('', '') as fd:numpy.save(file=fd, arr=value)fd.flush()os.fsync(fd.fileno())elif dump == '':code =", "docstring": "Get dump function code string", "id": "f13917:m1"} {"signature": "def get_pull_method(pull):", "body": "if pull is None or pull.startswith(''):code = \"\"\"\"\"\"rt dillopen('', '') as fd:PULLED_DATA = dill.load( fd )elif pull == '':code = \"\"\"\"\"\"rt numpyopen('', '') as fd:PULLED_DATA=numpy.load(file=fd)elif pull == '':code =", "docstring": "Get pull function code string", "id": "f13917:m2"} {"signature": "def path_required(func):", "body": "@wraps(func)def wrapper(self, *args, **kwargs):if self.path is None:warnings.warn('')returnreturn func(self, *args, **kwargs)return wrapper", "docstring": "Decorate methods when repository path is required.", "id": "f13917:m3"} {"signature": "def __clean_before_after(self, stateBefore, stateAfter, keepNoneEmptyDirectory=True):", "body": "errors = []afterDict = {}[afterDict.setdefault(list(aitem)[],[]).append(aitem) for aitem in stateAfter]for bitem in reversed(stateBefore):relaPath = list(bitem)[]basename = os.path.basename(relaPath)btype = bitem[relaPath]['']alist = afterDict.get(relaPath, [])aitem = [a for a in alist if a[relaPath]['']==btype]if len(aitem)>:errors.append(\"\"%(basename,btype,relaPath))continueif not len(aitem):removeDirs = []removeFiles = []if btype == '':if not len(relaPath):errors.append(\"\")continueremoveDirs.append(os.path.join(self.__path,relaPath))removeFiles.append(os.path.join(self.__path,relaPath,self.__dirInfo))removeFiles.append(os.path.join(self.__path,relaPath,self.__dirLock))elif btype == '':removeFiles.append(os.path.join(self.__path,relaPath))removeFiles.append(os.path.join(self.__path,relaPath,self.__fileInfo%basename))removeFiles.append(os.path.join(self.__path,relaPath,self.__fileLock%basename))else:removeDirs.append(os.path.join(self.__path,relaPath))removeFiles.append(os.path.join(self.__path,relaPath,self.__fileInfo%basename))for fpath in removeFiles:if os.path.isfile(fpath):try:os.remove(fpath)except Exception as err:errors.append(\"\"%(fpath, str(err)))for dpath in removeDirs:if os.path.isdir(dpath):if keepNoneEmptyDirectory or not len(os.listdir(dpath)):try:shutil.rmtree(dpath)except Exception as err:errors.append(\"\"%(fpath, str(err)))return len(errors)==, errors", "docstring": "clean repository given before and after states", "id": "f13917:c1:m6"} {"signature": "@propertydef info(self):", "body": "return self.__repo['']", "docstring": "Get repository information", "id": "f13917:c1:m12"} {"signature": "@propertydef path(self):", "body": "return self.__path", "docstring": "The repository instance path which points to the directory where\n .pyreprepo is.", "id": "f13917:c1:m13"} {"signature": "@propertydef uniqueName(self):", "body": "return self.__repo['']", "docstring": "Get repository unique name as generated when repository was created", "id": "f13917:c1:m14"} {"signature": "def get_stats(self):", "body": "if self.__path is None:return ,nfiles = ndirs = for fdict in self.get_repository_state():fdname = list(fdict)[]if fdname == '':continueif fdict[fdname].get('', False):nfiles += elif fdict[fdname].get('', False):ndirs += else:raise Exception('')return ndirs,nfiles", "docstring": "Get repository descriptive stats\n\n:Returns:\n #. numberOfDirectories (integer): Number of diretories in repository\n #. numberOfFiles (integer): Number of files in repository", "id": "f13917:c1:m15"} {"signature": "def reset(self):", "body": "self.__path = Noneself.__repo = {'': str(uuid.uuid1()),'': time.time(),'': None,'': str(__version__),'': '','': []}", "docstring": "Reset repository instance.", "id": "f13917:c1:m16"} {"signature": "def is_repository(self, path):", "body": "if path.strip() in ('',''):path = os.getcwd()repoPath = os.path.realpath( os.path.expanduser(path) )if os.path.isfile( os.path.join(repoPath,self.__repoFile) ):return Trueelse:try:from .OldRepository import RepositoryREP = Repository()result = REP.is_repository(repoPath)except:return Falseelse:if result:warnings.warn(\"\")return result", "docstring": "Check if there is a Repository in path.\n\n:Parameters:\n #. path (string): The real path of the directory where to check if\n there is a repository.\n\n:Returns:\n #. result (boolean): Whether it's a repository or not.", "id": "f13917:c1:m17"} {"signature": "def load_repository(self, path, verbose=True, ntrials=):", "body": "assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"repo = Nonefor _trial in range(ntrials):try:self.__load_repository(path=path, verbose=True)except Exception as err1:try:from .OldRepository import RepositoryREP = Repository(path)except Exception as err2:error = \"\"%(err1, err2)if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:error = Nonerepo = REPbreakelse:error = Nonerepo = selfbreakassert error is None, errorreturn repo", "docstring": "Load repository from a directory path and update the current instance.\nFirst, new repository still will be loaded. If failed, then old\nstyle repository load will be tried.\n\n:Parameters:\n #. path (string): The path of the directory from where to load\n the repository from. If '.' or an empty string is passed,\n the current working directory will be used.\n #. verbose (boolean): Whether to be verbose about abnormalities\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. repository (pyrep.Repository): returns self repository with loaded data.", "id": "f13917:c1:m18"} {"signature": "def create_repository(self, path, info=None, description=None, replace=True, allowNoneEmpty=True, raiseError=True):", "body": "assert isinstance(raiseError, bool), \"\"assert isinstance(allowNoneEmpty, bool), \"\"assert isinstance(replace, bool), \"\"assert isinstance(path, basestring), \"\"if info is None:info = ''try:pickle.dumps(info)except Exception as err:raise Exception(\"\"%str(err))if description is None:description = ''assert isinstance(description, basestring), \"\"if path.strip() in ('',''):path = os.getcwd()realPath = os.path.realpath( os.path.expanduser(path) )message = []if self.is_repository(realPath):if not replace:message.append(\"\"%path)return False, messageelse:message.append(\"\"%path)try:for _df in os.listdir(realPath):_p = os.path.join(realPath, _df)if os.path.isdir(_p):shutil.rmtree( _p )else:os.remove(_p)except Exception as err:message.append(\"\"%(str(err)))return False, ''.join(message)if not os.path.isdir(realPath):os.makedirs(realPath)elif len(os.listdir(realPath)) and not allowNoneEmpty:return False, \"\"oldRepo = self.__repoself.reset()self.__path = realPath.rstrip(os.sep)self.__repo[''] = infosaved = self.save(description=description)if not saved:self.__repo = oldRepomessage.append(\"\")return False, ''.join(message)return True, ''.join(message)", "docstring": "create a repository in a directory. This method insures the creation of\nthe directory in the system if it is missing.\\n\n\n**N.B. If replace is True and existing repository is found in path, create_repository erases all existing files and directories in path.**\n\n:Parameters:\n #. path (string): The real absolute path where to create the Repository.\n If '.' or an empty string is passed, the current working directory will be used.\n #. description (None, str): Repository main directory information.\n #. info (None, object): Repository information. It can\n be None or any pickle writable type of data.\n #. replace (boolean): Whether to replace existing repository.\n #. allowNoneEmpty (boolean): Allow creating repository in none-empty\n directory.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n\n:Returns:\n #. success (boolean): Whether creating repository was successful\n #. message (None, str): Any returned message.", "id": "f13917:c1:m19"} {"signature": "def remove_repository(self, path=None, removeEmptyDirs=True):", "body": "assert isinstance(removeEmptyDirs, bool), \"\"if path is not None:if path != self.__path:repo = Repository()repo.load_repository(path)else:repo = selfelse:repo = selfassert repo.path is not None, \"\"for fdict in reversed(repo.get_repository_state()):relaPath = list(fdict)[]realPath = os.path.join(repo.path, relaPath)path, name = os.path.split(realPath)if fdict[relaPath][''] == '':if os.path.isfile(realPath):os.remove(realPath)if os.path.isfile(os.path.join(repo.path,path,self.__fileInfo%name)):os.remove(os.path.join(repo.path,path,self.__fileInfo%name))if os.path.isfile(os.path.join(repo.path,path,self.__fileLock%name)):os.remove(os.path.join(repo.path,path,self.__fileLock%name))if os.path.isfile(os.path.join(repo.path,path,self.__fileClass%name)):os.remove(os.path.join(repo.path,path,self.__fileClass%name))elif fdict[relaPath][''] == '':if os.path.isfile(os.path.join(realPath,self.__dirInfo)):os.remove(os.path.join(realPath,self.__dirInfo))if os.path.isfile(os.path.join(realPath,self.__dirLock)):os.remove(os.path.join(realPath,self.__dirLock))if not len(os.listdir(realPath)) and removeEmptyDirs:shutil.rmtree( realPath )if os.path.isfile(os.path.join(repo.path,self.__repoFile)):os.remove(os.path.join(repo.path,self.__repoFile))if os.path.isfile(os.path.join(repo.path,self.__repoLock)):os.remove(os.path.join(repo.path,self.__repoLock))", "docstring": "Remove all repository from path along with all repository tracked files.\n\n:Parameters:\n #. path (None, string): The path the repository to remove.\n #. removeEmptyDirs (boolean): Whether to remove remaining empty\n directories.", "id": "f13917:c1:m20"} {"signature": "@path_requireddef save(self, description=None, raiseError=True, ntrials=):", "body": "assert isinstance(raiseError, bool), \"\"assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"if description is not None:assert isinstance(description, basestring), \"\"dirInfoPath = os.path.join(self.__path, self.__dirInfo)if description is None and not os.path.isfile(dirInfoPath):description = ''LR = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))acquired, code = LR.acquire_lock()m = \"\"%(code,)if not acquired:assert not raiseError, Exception(m)return False, mfor _trial in range(ntrials):try:repoInfoPath = os.path.join(self.__path, self.__repoFile)error = Noneself.__save_dirinfo(description=description, dirInfoPath=dirInfoPath)if os.path.isfile(repoInfoPath):with open(repoInfoPath, '') as fd:repo = self.__load_repository_pickle_file(os.path.join(self.__path, self.__repoFile))self.__repo[''] = repo['']with open(repoInfoPath, '') as fd:self.__repo[\"\"] = time.time()pickle.dump( self.__repo,fd, protocol=self._DEFAULT_PICKLE_PROTOCOL )fd.flush()os.fsync(fd.fileno())except Exception as err:error = \"\"%errif self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:breakLR.release_lock()assert error is None or not raiseError, errorreturn error is None, error", "docstring": "Save repository '.pyreprepo' to disk and create (if missing) or\nupdate (if description is not None) '.pyrepdirinfo'.\n\n:Parameters:\n #. description (None, str): Repository main directory information.\n If given will be replaced.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. success (bool): Whether saving was successful.\n #. error (None, string): Fail to save repository message in case\n saving is not successful. If success is True, error will be None.", "id": "f13917:c1:m21"} {"signature": "def is_name_allowed(self, path):", "body": "assert isinstance(path, basestring), \"\"name = os.path.basename(path)if not len(name):return False, \"\"for em in [self.__repoLock,self.__repoFile,self.__dirInfo,self.__dirLock]:if name == em:return False, \"\"%emfor pm in [self.__fileInfo,self.__fileLock]:if name == pm or (name.endswith(pm[:]) and name.startswith('')):return False, \"\"%pmreturn True, None", "docstring": "Get whether creating a file or a directory from the basenane of the given\npath is allowed\n\n:Parameters:\n #. path (str): The absolute or relative path or simply the file\n or directory name.\n\n:Returns:\n #. allowed (bool): Whether name is allowed.\n #. message (None, str): Reason for the name to be forbidden.", "id": "f13917:c1:m22"} {"signature": "def to_repo_relative_path(self, path, split=False):", "body": "path = os.path.normpath(path)if path == '':path = ''path = path.split(self.__path)[-].strip(os.sep)if split:return path.split(os.sep)else:return path", "docstring": "Given a path, return relative path to diretory\n\n:Parameters:\n #. path (str): Path as a string\n #. split (boolean): Whether to split path to its components\n\n:Returns:\n #. relativePath (str, list): Relative path as a string or as a list\n of components if split is True", "id": "f13917:c1:m23"} {"signature": "@path_requireddef get_repository_state(self, relaPath=None):", "body": "state = []def _walk_dir(relaPath, dirList):dirDict = {'':'','':os.path.isdir(os.path.join(self.__path,relaPath)),'':os.path.isfile(os.path.join(self.__path,relaPath,self.__dirInfo)),}state.append({relaPath:dirDict})for fname in sorted([f for f in dirList if isinstance(f, basestring)]):relaFilePath = os.path.join(relaPath,fname)realFilePath = os.path.join(self.__path,relaFilePath)fileDict = {'':'','':os.path.isfile(realFilePath),'':os.path.isfile(os.path.join(self.__path,relaPath,self.__fileInfo%fname)),}state.append({relaFilePath:fileDict})for ddict in sorted([d for d in dirList if isinstance(d, dict)], key=lambda k: list(k)[]):dirname = list(ddict)[]_walk_dir(relaPath=os.path.join(relaPath,dirname), dirList=ddict[dirname])if relaPath is None:_walk_dir(relaPath='', dirList=self.__repo[''])else:assert isinstance(relaPath, basestring), \"\"relaPath = self.to_repo_relative_path(path=relaPath, split=False)spath = relaPath.split(os.sep)dirList = self.__repo['']while len(spath):dirname = spath.pop()dList = [d for d in dirList if isinstance(d, dict)]if not len(dList):dirList = NonebreakcDict = [d for d in dList if dirname in d]if not len(cDict):dirList = NonebreakdirList = cDict[][dirname]if dirList is not None:_walk_dir(relaPath=relaPath, dirList=dirList)return state", "docstring": "Get a list representation of repository state along with useful\ninformation. List state is ordered relativeley to directories level\n\n:Parameters:\n #. relaPath (None, str): relative directory path from where to\n start. If None all repository representation is returned.\n\n:Returns:\n #. state (list): List representation of the repository.\n List items are all dictionaries. Every dictionary has a single\n key which is the file or the directory name and the value is a\n dictionary of information including:\n\n * 'type': the type of the tracked whether it's file, dir, or objectdir\n * 'exists': whether file or directory actually exists on disk\n * 'pyrepfileinfo': In case of a file or an objectdir whether .%s_pyrepfileinfo exists\n * 'pyrepdirinfo': In case of a directory whether .pyrepdirinfo exists", "id": "f13917:c1:m24"} {"signature": "def get_repository_directory(self, relativePath):", "body": "return copy.deepcopy(self.__get_repository_directory(relativePath))", "docstring": "Get repository directory list copy.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path .\n\n:Returns:\n #. dirList (None, list): List of directories and files in repository\n directory. If directory is not tracked in repository None is\n returned", "id": "f13917:c1:m25"} {"signature": "def get_file_info(self, relativePath):", "body": "relativePath = self.to_repo_relative_path(path=relativePath, split=False)fileName = os.path.basename(relativePath)isRepoFile,fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)if not isRepoFile:return None, \"\"if not infoOnDisk:return None, \"\"fileInfoPath = os.path.join(self.__path,os.path.dirname(relativePath),self.__fileInfo%fileName)try:with open(fileInfoPath, '') as fd:info = pickle.load(fd)except Exception as err:return None, \"\"%str(err)return info, ''", "docstring": "Get file information dict from the repository given its relative path.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of\n the file.\n\n:Returns:\n #. info (None, dictionary): The file information dictionary.\n If None, it means an error has occurred.\n #. errorMessage (string): The error message if any error occurred.", "id": "f13917:c1:m26"} {"signature": "def is_repository_directory(self, relativePath):", "body": "return self.__get_repository_directory(relativePath) is not None", "docstring": "Get whether directory is registered in repository.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path.\n\n:Returns:\n #. result (boolean): Whether directory is tracked and registered.", "id": "f13917:c1:m27"} {"signature": "def is_repository_file(self, relativePath):", "body": "relativePath = self.to_repo_relative_path(path=relativePath, split=False)if relativePath == '':return False, False, False, FalserelaDir, name = os.path.split(relativePath)fileOnDisk = os.path.isfile(os.path.join(self.__path, relativePath))infoOnDisk = os.path.isfile(os.path.join(self.__path,os.path.dirname(relativePath),self.__fileInfo%name))classOnDisk = os.path.isfile(os.path.join(self.__path,os.path.dirname(relativePath),self.__fileClass%name))cDir = self.__repo['']if len(relaDir):for dirname in relaDir.split(os.sep):dList = [d for d in cDir if isinstance(d, dict)]if not len(dList):cDir = NonebreakcDict = [d for d in dList if dirname in d]if not len(cDict):cDir = NonebreakcDir = cDict[][dirname]if cDir is None:return False, fileOnDisk, infoOnDisk, classOnDiskif str(name) not in [str(i) for i in cDir]:return False, fileOnDisk, infoOnDisk, classOnDiskreturn True, fileOnDisk, infoOnDisk, classOnDisk", "docstring": "Check whether a given relative path is a repository file path\n\n:Parameters:\n #. relativePath (string): File relative path\n\n:Returns:\n #. isRepoFile (boolean): Whether file is a repository file.\n #. isFileOnDisk (boolean): Whether file is found on disk.\n #. isFileInfoOnDisk (boolean): Whether file info is found on disk.\n #. isFileClassOnDisk (boolean): Whether file class is found on disk.", "id": "f13917:c1:m28"} {"signature": "@path_requireddef walk_files_path(self, relativePath=\"\", fullPath=False, recursive=False):", "body": "assert isinstance(fullPath, bool), \"\"assert isinstance(recursive, bool), \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)dirList = self.__get_repository_directory(relativePath=relativePath)assert dirList is not None, \"\"%relativePathdef _walk(rpath, dlist,recursive):for fname in dlist:if isinstance(fname, basestring):if fullPath:yield os.path.join(self.__path, rpath, fname)else:yield os.path.join(rpath, fname)if recursive:for ddict in dlist:if isinstance(ddict, dict):dname = list(ddict)[]for p in _walk(rpath=os.path.join(rpath,dname), dlist=ddict[dname],recursive=recursive):yield preturn _walk(rpath=relativePath, dlist=dirList, recursive=recursive)", "docstring": "Walk the repository relative path and yield file relative/full path.\n\n:parameters:\n #. relativePath (string): The relative path from which start the walk.\n #. fullPath (boolean): Whether to return full or relative path.\n #. recursive (boolean): Whether walk all directories files recursively", "id": "f13917:c1:m29"} {"signature": "def walk_files_info(self, relativePath=\"\", fullPath=False, recursive=False):", "body": "assert isinstance(fullPath, bool), \"\"assert isinstance(recursive, bool), \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)for relaPath in self.walk_files_path(relativePath=relativePath, fullPath=False, recursive=recursive):fpath, fname = os.path.split(relaPath)fileInfoPath = os.path.join(self.__path,fpath,self.__fileInfo%fname)if os.path.isfile(fileInfoPath):with open(fileInfoPath, '') as fd:info = pickle.load(fd)else:info = Noneif fullPath:yield (os.path.join(self.__path, relaPath), info)else:yield (relaPath, info)", "docstring": "Walk the repository relative path and yield tuple of two items where\nfirst item is file relative/full path and second item is file info.\nIf file info is not found on disk, second item will be None.\n\n:parameters:\n #. relativePath (string): The relative path from which start the walk.\n #. fullPath (boolean): Whether to return full or relative path.\n #. recursive (boolean): Whether walk all directories files recursively", "id": "f13917:c1:m30"} {"signature": "def walk_directories_path(self, relativePath=\"\", fullPath=False, recursive=False):", "body": "assert isinstance(fullPath, bool), \"\"assert isinstance(recursive, bool), \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)dirList = self.__get_repository_directory(relativePath=relativePath)assert dirList is not None, \"\"%relativePathdef _walk(rpath, dlist,recursive):for ddict in dlist:if isinstance(ddict, dict):dname = list(ddict)[]if fullPath:yield os.path.join(self.__path, rpath, dname)else:yield os.path.join(rpath, dname)if recursive:for ddict in dlist:if isinstance(ddict, dict):dname = list(ddict)[]for p in _walk(rpath=os.path.join(rpath,dname), dlist=ddict[dname],recursive=recursive):yield preturn _walk(rpath=relativePath, dlist=dirList, recursive=recursive)", "docstring": "Walk repository relative path and yield directory relative/full path\n\n:parameters:\n #. relativePath (string): The relative path from which start the walk.\n #. fullPath (boolean): Whether to return full or relative path.\n #. recursive (boolean): Whether walk all directories files recursively.", "id": "f13917:c1:m31"} {"signature": "def walk_directories_info(self, relativePath=\"\", fullPath=False, recursive=False):", "body": "assert isinstance(fullPath, bool), \"\"assert isinstance(recursive, bool), \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)for dpath in self.walk_directories_path(relativePath=relativePath, fullPath=False, recursive=recursive):dirInfoPath = os.path.join(self.__path,dpath,self.__dirInfo)if os.path.isfile(dirInfoPath):with open(dirInfoPath, '') as fd:info = pickle.load(fd)else:info = Noneif fullPath:yield (os.path.join(self.__path, dpath), info)else:yield (dpath, info)", "docstring": "Walk the repository relative path and yield tuple of two items where\nfirst item is directory relative/full path and second item is directory\ninfo. If directory file info is not found on disk, second item will be None.\n\n:parameters:\n #. relativePath (string): The relative path from which start the walk.\n #. fullPath (boolean): Whether to return full or relative path.\n #. recursive (boolean): Whether walk all directories files recursively.", "id": "f13917:c1:m32"} {"signature": "@path_requireddef create_package(self, path=None, name=None, mode=None):", "body": "assert mode in (None, '', '', '', ''), ''%str(mode)if mode is None:mode = ''if path is None:root = os.path.split(self.__path)[]elif path.strip() in ('',''):root = os.getcwd()else:root = os.path.realpath( os.path.expanduser(path) )assert os.path.isdir(root), ''%pathif name is None:ext = mode.split(\"\")if len(ext) == :if len(ext[]):ext = \"\"+ext[]else:ext = ''else:ext = ''name = os.path.split(self.__path)[]+exttarfilePath = os.path.join(root, name)try:tarHandler = tarfile.TarFile.open(tarfilePath, mode=mode)except Exception as e:raise Exception(\"\"%e)for dpath in sorted(list(self.walk_directories_path(recursive=True))):t = tarfile.TarInfo( dpath )t.type = tarfile.DIRTYPEtarHandler.addfile(t)tarHandler.add(os.path.join(self.__path,dpath,self.__dirInfo), arcname=self.__dirInfo)for fpath in self.walk_files_path(recursive=True):relaPath, fname = os.path.split(fpath)tarHandler.add(os.path.join(self.__path,fpath), arcname=fname)tarHandler.add(os.path.join(self.__path,relaPath,self.__fileInfo%fname), arcname=self.__fileInfo%fname)tarHandler.add(os.path.join(self.__path,relaPath,self.__fileClass%fname), arcname=self.__fileClass%fname)tarHandler.add(os.path.join(self.__path,self.__repoFile), arcname=\"\")tarHandler.close()", "docstring": "Create a tar file package of all the repository files and directories.\nOnly files and directories that are tracked in the repository\nare stored in the package tar file.\n\n**N.B. On some systems packaging requires root permissions.**\n\n:Parameters:\n #. path (None, string): The real absolute path where to create the\n package. If None, it will be created in the same directory as\n the repository. If '.' or an empty string is passed, the current\n working directory will be used.\n #. name (None, string): The name to give to the package file\n If None, the package directory name will be used with the\n appropriate extension added.\n #. mode (None, string): The writing mode of the tarfile.\n If None, automatically the best compression mode will be chose.\n Available modes are ('w', 'w:', 'w:gz', 'w:bz2')", "id": "f13917:c1:m33"} {"signature": "@path_requireddef add_directory(self, relativePath, description=None, clean=False,raiseError=True, ntrials=):", "body": "assert isinstance(raiseError, bool), \"\"assert isinstance(relativePath, basestring), \"\"if description is not None:assert isinstance(description, basestring), \"\"assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"path = self.to_repo_relative_path(path=relativePath, split=False)if self.is_repository_directory(path):return True, \"\"allowed, reason = self.is_name_allowed(path)if not allowed:if raiseError:raise Exception(reason)return False, reasonLR = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))acquired, code = LR.acquire_lock()if not acquired:m = \"\"%(code,)if raiseError:raise Exception(m)return False,mfor _trial in range(ntrials):try:repo = self.__load_repository_pickle_file(os.path.join(self.__path, self.__repoFile))self.__repo[''] = repo['']except Exception as err:error = str(err)if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:error = Nonebreakif error is not None:_ = LR.release_lock()assert not raiseError, Exception(error)return False, errorerror = NoneposList = self.__repo['']dirPath = self.__pathspath = path.split(os.sep)for idx, name in enumerate(spath):LD = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(dirPath, self.__dirLock))acquired, code = LD.acquire_lock()if not acquired:error = \"\"%(code,dirPath)breakfor _trial in range(ntrials):try:dirPath = os.path.join(dirPath, name)riPath = os.path.join(dirPath, self.__dirInfo)dList = [d for d in posList if isinstance(d, dict)]dList = [d for d in dList if name in d]if not len(dList) and clean and os.path.exists(dirPath):try:shutil.rmtree( dirPath, ignore_errors=True )except Exception as err:error = \"\"%(dirPath, err)breakif not os.path.exists(dirPath):try:os.mkdir(dirPath)except Exception as err:error = \"\"%(dirPath, err)breakself.__save_dirinfo(description=[None, description][idx==len(spath)-],dirInfoPath=riPath, create=True)if not len(dList):rsd = {name:[]}posList.append(rsd)posList = rsd[name]else:assert len(dList) == , \"\"posList = dList[][name]except Exception as err:LD.release_lock()error = \"\"%(dirPath, str(err))if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:LD.release_lock()breakif error is not None:breakif error is None:try:_, error = self.__save_repository_pickle_file(lockFirst=False, raiseError=False)except Exception as err:error = str(err)passtry:LD.release_lock()except:passtry:LR.release_lock()except:passassert error is None or not raiseError, errorreturn error is None, error", "docstring": "Add a directory in the repository and creates its attribute in the\nRepository with utc timestamp. It insures adding all the missing\ndirectories in the path.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path to\n where directory must be added.\n #. description (None, string): Any random description about the\n added directory.\n #. clean (boolean): Whether to remove existing non repository\n tracked files and folders in all created directory chain tree.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. success (boolean): Whether adding the directory was successful.\n #. message (None, string): Reason why directory was not added or\n random information.", "id": "f13917:c1:m34"} {"signature": "def get_repository_parent_directory(self, relativePath):", "body": "return copy.deepcopy(self.__get_repository_parent_directory(relativePath))", "docstring": "Get repository parent directory list copy.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path .\n\n:Returns:\n #. dirList (None, list): List of directories and files in repository\n parent directory. If directory is not tracked in repository\n None is returned", "id": "f13917:c1:m35"} {"signature": "@path_requireddef remove_directory(self, relativePath, clean=False, raiseError=True, ntrials=):", "body": "assert isinstance(raiseError, bool), \"\"assert isinstance(clean, bool), \"\"assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)parentPath, dirName = os.path.split(relativePath)if relativePath == '':return False, \"\"if not self.is_repository_directory(relativePath):return False, \"\"%relativePathrealPath = os.path.join(self.__path,relativePath)if not os.path.isdir(realPath):error = \"\"assert not raiseError, errorreturn False, errorLD = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path,parentPath,self.__dirLock))acquired, code = LD.acquire_lock()if not acquired:error = \"\"%(code,realPath)assert not raiseError, errorreturn False, errorLR = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))acquired, code = LR.acquire_lock()if not acquired:LD.release_lock()m = \"\"%(code,)assert raiseError, Exception(m)return False,mfor _trial in range(ntrials):error = Nonetry:dirList = self.__get_repository_parent_directory(relativePath=relativePath)assert dirList is not None, \"\"%(relativePath,)stateBefore = self.get_repository_state(relaPath=parentPath)_files = [f for f in dirList if isinstance(f, basestring)]_dirs = [d for d in dirList if isinstance(d, dict)]_dirs = [d for d in dirList if dirName not in d]_ = [dirList.pop() for _ in range(len(dirList))]dirList.extend(_files)dirList.extend(_dirs)if clean:shutil.rmtree(realPath)else:stateAfter = self.get_repository_state(relaPath=parentPath)success, errors = self.__clean_before_after(stateBefore=stateBefore, stateAfter=stateAfter, keepNoneEmptyDirectory=True)assert success, \"\".join(errors)except Exception as err:error = str(err)if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:breakif error is None:_, error = self.__save_repository_pickle_file(lockFirst=False, raiseError=False)LD.release_lock()LR.release_lock()assert error is None or not raiseError, \"\"%(relativePath, ntrials, error,)return error is None, error", "docstring": "Remove directory from repository tracking.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the\n directory to remove from the repository.\n #. clean (boolean): Whether to os remove directory. If False only\n tracked files will be removed along with left empty directories.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. success (boolean): Whether removing the directory was successful.\n #. reason (None, string): Reason why directory was not removed.", "id": "f13917:c1:m36"} {"signature": "@path_requireddef rename_directory(self, relativePath, newName, raiseError=True, ntrials=):", "body": "assert isinstance(raiseError, bool), \"\"assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)parentPath, dirName = os.path.split(relativePath)if relativePath == '':error = \"\"assert not raiseError, errorreturn False, errorrealPath = os.path.join(self.__path,relativePath)newRealPath = os.path.join(os.path.dirname(realPath), newName)if os.path.isdir(newRealPath):error = \"\"%(newRealPath,)assert not raiseError, errorreturn False, errorLD = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path,parentPath, self.__dirLock))acquired, code = LD.acquire_lock()if not acquired:error = \"\"%(code,dirPath)assert not raiseError, errorreturn False, errorerror = NoneLR = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))acquired, code = LR.acquire_lock()if not acquired:LD.release_lock()m = \"\"%(code,dirPath)assert raiseError, Exception(m)return False,mfor _trial in range(ntrials):try:repo = self.__load_repository_pickle_file(os.path.join(self.__path, self.__repoFile))self.__repo[''] = repo['']except Exception as err:error = str(err)if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:error = Nonebreakif error is not None:LD.release_lock()LR.release_lock()assert not raiseError, Exception(error)return False, errorfor _trial in range(ntrials):error = Nonetry:dirList = self.__get_repository_parent_directory(relativePath=relativePath)assert dirList is not None, \"\"%(relativePath,)_dirDict = [nd for nd in dirList if isinstance(nd,dict)]_dirDict = [nd for nd in _dirDict if dirName in nd]assert len(_dirDict) == , \"\"os.rename(realPath, newRealPath)_dirDict[][newName] = _dirDict[][dirName]_dirDict[].pop(dirName)self.__save_dirinfo(description=None, dirInfoPath=parentPath, create=False)except Exception as err:error = str(err)if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:error = Nonebreakif error is None:_, error = self.__save_repository_pickle_file(lockFirst=False, raiseError=False)LR.release_lock()LD.release_lock()assert error is None or not raiseError, \"\"%(relativePath, newName, ntrials, error,)return error is None, error", "docstring": "Rename a directory in the repository. It insures renaming the directory in the system.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of\n the directory to be renamed.\n #. newName (string): The new directory name.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. success (boolean): Whether renaming the directory was successful.\n #. message (None, string): Some explanatory message or error reason\n why directory was not renamed.", "id": "f13917:c1:m37"} {"signature": "@path_requireddef copy_directory(self, relativePath, newRelativePath,overwrite=False, raiseError=True, ntrials=):", "body": "assert isinstance(raiseError, bool), \"\"assert isinstance(overwrite, bool), \"\"assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)if relativePath == '':m = \"\"assert not raiseError, mreturn False, mrealPath = os.path.join(self.__path,relativePath)parentRealPath, dirName = os.path.split(realPath)parentRelativePath = os.path.dirname(relativePath)if not self.is_repository_directory(relativePath):m = \"\"%(relativePath)assert not raiseError, mreturn False, mnewRelativePath = self.to_repo_relative_path(path=newRelativePath, split=False)newRealPath = os.path.join(self.__path,newRelativePath)newParentRealPath, newDirName = os.path.split(newRealPath)newParentRelativePath = os.path.dirname(newRelativePath)if realPath == newRealPath:m = \"\"assert not raiseError, mreturn False, mif self.is_repository_directory(newRelativePath):m = \"\"%(newRelativePath)assert not raiseError, mreturn False, mif os.path.isdir(newRealPath):if overwrite:try:shutil.rmtree(newRealPath)except Exception as err:assert not raiseError, str(err)return False, str(err)else:error = \"\"%(newRealPath,)assert not raiseError, errorreturn False, errortry:success, reason = self.add_directory(newParentRelativePath, raiseError=False, ntrials=ntrials)except Exception as err:reason = \"\"%(str(err))success = Falseif not success:assert not raiseError, reasonreturn False, reasonLR = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))acquired, code = LR.acquire_lock()if not acquired:m = \"\"%(code,)assert raiseError, Exception(m)return False,mtry:repo = self.__load_repository_pickle_file(os.path.join(self.__path, self.__repoFile))self.__repo[''] = repo['']except Exception as err:LR.release_lock()assert not raiseError, Exception(str(err))return False,mL0 = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(parentRealPath, self.__dirLock))acquired, code = L0.acquire_lock()if not acquired:LR.release_lock()error = \"\"%(code,dirPath)assert not raiseError, errorreturn False, errorL1 = Noneif parentRealPath != newParentRealPath:L1 = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(newParentRealPath, self.__dirLock))acquired, code = L1.acquire_lock()if not acquired:L0.release_lock()LR.release_lock()error = \"\"%(code,dirPath)assert not raiseError, errorreturn False, errorerror = Nonefor _trial in range(ntrials):try:assert self.is_repository_directory(relativePath), \"\"%(relativePath)assert not self.is_repository_directory(newRelativePath), \"\"%(relativePath)dirList = self.__get_repository_parent_directory(relativePath=relativePath)assert dirList is not None, \"\"%(relativePath,)newDirList = self.__get_repository_parent_directory(relativePath=newRelativePath)assert newDirList is not None, \"\"%(newRelativePath,)_dirDict = [nd for nd in dirList if isinstance(nd,dict)]_dirDict = [nd for nd in _dirDict if dirName in nd]assert len(_dirDict) == , \"\"_newDirDict = [nd for nd in newDirList if isinstance(nd,dict)]_newDirDict = [nd for nd in _newDirDict if newDirName in nd]assert len(_newDirDict) == , \"\"_newDirDict = copy.deepcopy(_dirDict[])if dirName != newDirName:_newDirDict[newDirName] = _newDirDict.pop(dirName)copy_tree(realPath, newRealPath)newDirList.append(_newDirDict)self.__save_dirinfo(description=None, dirInfoPath=newParentRelativePath, create=False)except Exception as err:error = str(err)if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:error = Nonebreakif error is None:_, error = self.__save_repository_pickle_file(lockFirst=False, raiseError=False)LR.release_lock()L0.release_lock()if L1 is not None:L1.release_lock()assert error is None or not raiseError, \"\"%(relativePath, newRelativePath, ntrials, error,)return error is None, error", "docstring": "Copy a directory in the repository. New directory must not exist.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of\n the directory to be copied.\n #. newRelativePath (string): The new directory relative path.\n #. overwrite (boolean): Whether to overwrite existing but not tracked\n directory in repository.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. success (boolean): Whether renaming the directory was successful.\n #. message (None, string): Some explanatory message or error reason\n why directory was not renamed.", "id": "f13917:c1:m38"} {"signature": "@path_requireddef dump_file(self, value, relativePath,description=None,dump=None, pull=None,replace=False, raiseError=True, ntrials=):", "body": "assert isinstance(raiseError, bool), \"\"assert isinstance(replace, bool), \"\"assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"if description is None:description = ''assert isinstance(description, basestring), \"\"if pull is None and dump is not None:if dump.startswith('') or dump.startswith('') or dump.startswith('') or dump =='':pull = dumpdump = get_dump_method(dump, protocol=self._DEFAULT_PICKLE_PROTOCOL)pull = get_pull_method(pull)relativePath = self.to_repo_relative_path(path=relativePath, split=False)savePath = os.path.join(self.__path,relativePath)fPath, fName = os.path.split(savePath)success, reason = self.is_name_allowed(savePath)if not success:assert not raiseError, reasonreturn False, reasontry:success, reason = self.add_directory(fPath, raiseError=False, ntrials=ntrials)except Exception as err:reason = \"\"%(str(err))success = Falseif not success:assert not raiseError, reasonreturn False, reasonLR = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))acquired, code = LR.acquire_lock()if not acquired:m = \"\"%(code,)assert raiseError, Exception(m)return False,mLF = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))acquired, code = LF.acquire_lock()if not acquired:LR.release_lock()error = \"\"%(code,relativePath)assert not raiseError, errorreturn False, errorfor _trial in range(ntrials):try:repo = self.__load_repository_pickle_file(os.path.join(self.__path, self.__repoFile))self.__repo[''] = repo['']except Exception as err:error = str(err)if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:error = Nonebreakif error is not None:LR.release_lock()LF.release_lock()assert not raiseError, Exception(error)return False, errorfor _trial in range(ntrials):error = Nonetry:isRepoFile, fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)if isRepoFile:assert replace, \"\"fileInfoPath = os.path.join(self.__path,os.path.dirname(relativePath),self.__fileInfo%fName)if isRepoFile and fileOnDisk:with open(fileInfoPath, '') as fd:info = pickle.load(fd)assert info[''] == self.__repo[''], \"\"info[''] = time.time()else:info = {'':self.__repo['']}info[''] = info[''] = time.time()info[''] = dumpinfo[''] = pullinfo[''] = descriptionif not isRepoFile:dirList = self.__get_repository_directory(fPath)my_exec( dump.replace(\"\", str(savePath)), locals=locals(), globals=globals(), description='' )with open(fileInfoPath, '') as fd:pickle.dump( info,fd, protocol=self._DEFAULT_PICKLE_PROTOCOL)fd.flush()os.fsync(fd.fileno())fileClassPath = os.path.join(self.__path,os.path.dirname(relativePath),self.__fileClass%fName)with open(fileClassPath, '') as fd:if value is None:klass = Noneelse:klass = value.__class__pickle.dump(klass , fd, protocol=self._DEFAULT_PICKLE_PROTOCOL )fd.flush()os.fsync(fd.fileno())if not isRepoFile:dirList.append(fName)except Exception as err:error = \"\"%(str(err),)try:if '' in dump:mi = get_pickling_errors(value)if mi is not None:error += ''%str(mi)except:passif self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:error = Nonebreakif error is None:_, error = self.__save_repository_pickle_file(lockFirst=False, raiseError=False)LR.release_lock()LF.release_lock()assert not raiseError or error is None, \"\"%(relativePath, ntrials, error,)return success, error", "docstring": "Dump a file using its value to the system and creates its\nattribute in the Repository with utc timestamp.\n\n:Parameters:\n #. value (object): The value of a file to dump and add to the\n repository. It is any python object or file.\n #. relativePath (str): The relative to the repository path to where\n to dump the file.\n #. description (None, string): Any description about the file.\n #. dump (None, string): The dumping method.\n If None it will be set automatically to pickle and therefore the\n object must be pickleable. If a string is given, it can be a\n keyword ('json','pickle','dill') or a string compileable code to\n dump the data. The string code must include all the necessary\n imports and a '$FILE_PATH' that replaces the absolute file path\n when the dumping will be performed.\\n\n e.g. \"import numpy as np; np.savetxt(fname='$FILE_PATH', X=value, fmt='%.6e')\"\n #. pull (None, string): The pulling method. If None it will be set\n automatically to pickle and therefore the object must be\n pickleable. If a string is given, it can be a keyword\n ('json','pickle','dill') or a string compileable code to pull\n the data. The string code must include all the necessary imports,\n a '$FILE_PATH' that replaces the absolute file path when the\n dumping will be performed and finally a PULLED_DATA variable.\\n\n e.g \"import numpy as np; PULLED_DATA=np.loadtxt(fname='$FILE_PATH')\"\n #. replace (boolean): Whether to replace any existing file.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. success (boolean): Whether renaming the directory was successful.\n #. message (None, string): Some explanatory message or error reason\n why directory was not dumped.", "id": "f13917:c1:m39"} {"signature": "def dump(self, *args, **kwargs):", "body": "return self.dump_file(*args, **kwargs)", "docstring": "Alias to dump_file", "id": "f13917:c1:m40"} {"signature": "@path_requireddef copy_file(self, relativePath, newRelativePath,force=False, raiseError=True, ntrials=):", "body": "assert isinstance(raiseError, bool), \"\"assert isinstance(force, bool), \"\"assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)realPath = os.path.join(self.__path,relativePath)fPath, fName = os.path.split(realPath)newRelativePath = self.to_repo_relative_path(path=newRelativePath, split=False)newRealPath = os.path.join(self.__path,newRelativePath)nfPath, nfName = os.path.split(newRealPath)LO = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))acquired, code = LO.acquire_lock()if not acquired:error = \"\"%(code,relativePath)assert not raiseError, errorreturn False, errortry:success, reason = self.add_directory(nfPath, raiseError=False, ntrials=ntrials)except Exception as err:reason = \"\"%(str(err))success = Falseif not success:LO.release_lock()assert not raiseError, reasonreturn False, reasonLN = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(nfPath,self.__fileLock%nfName))acquired, code = LN.acquire_lock()if not acquired:LO.release_lock()error = \"\"%(code,newRelativePath)assert not raiseError, errorreturn False, errorfor _trial in range(ntrials):copied = Falseerror = Nonetry:isRepoFile,fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)assert isRepoFile, \"\"%(relativePath,)assert fileOnDisk, \"\"%(relativePath,)assert infoOnDisk, \"\"%self.__fileInfo%fNameassert classOnDisk, \"\"%self.__fileClass%fNamenisRepoFile,nfileOnDisk,ninfoOnDisk,nclassOnDisk = self.is_repository_file(newRelativePath)assert not nisRepoFile or force, \"\"nDirList = self.__get_repository_directory(nfPath)if os.path.isfile(newRealPath):os.remove(newRealPath)if os.path.isfile(os.path.join(nfPath,self.__fileInfo%nfName)):os.remove(os.path.join(nfPath,self.__fileInfo%nfName))if os.path.isfile(os.path.join(nfPath,self.__fileClass%nfName)):os.remove(os.path.join(nfPath,self.__fileClass%nfName))shutil.copy(realPath, newRealPath)shutil.copy(os.path.join(fPath,self.__fileInfo%fName), os.path.join(nfPath,self.__fileInfo%nfName))shutil.copy(os.path.join(fPath,self.__fileClass%fName), os.path.join(nfPath,self.__fileClass%nfName))if nfName not in nDirList:nDirList.append(nfName)except Exception as err:copied = Falseerror = str(err)if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:error = Nonecopied = TruebreakLO.release_lock()LN.release_lock()assert copied or not raiseError, \"\"%(relativePath, newRelativePath, ntrials, error,)return copied, ''.join(message)", "docstring": "Copy a file in the repository.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of\n the file that needst to be renamed.\n #. newRelativePath (string): The new relative to the repository path\n of where to move and rename the file.\n #. force (boolean): Whether to force renaming even when another\n repository file exists. In this case old repository file\n will be removed from the repository and the system as well.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. success (boolean): Whether renaming the file was successful.\n #. message (None, string): Some explanatory message or error reason\n why directory was not updated.", "id": "f13917:c1:m41"} {"signature": "@path_requireddef update_file(self, value, relativePath, description=False,dump=False, pull=False, raiseError=True, ntrials=):", "body": "assert isinstance(raiseError, bool), \"\"assert description is False or description is None or isinstance(description, basestring), \"\"assert dump is False or dump is None or isinstance(dump, basestring), \"\"assert pull is False or pull is None or isinstance(pull, basestring), \"\"assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)savePath = os.path.join(self.__path,relativePath)fPath, fName = os.path.split(savePath)LF = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))acquired, code = LF.acquire_lock()if not acquired:error = \"\"%(code,relativePath)assert not raiseError, errorreturn False, errorfor _trial in range(ntrials):message = []updated = Falsetry:isRepoFile, fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)assert isRepoFile, \"\"%(relativePath,)if not fileOnDisk:assert description is not False, \"\"%(relativePath,)assert dump is not False, \"\"%(relativePath,)assert pull is not False, \"\"%(relativePath,)info = {}info[''] = self.__repo['']info[''] = info[''] = time.time()else:with open(os.path.join(fPath,self.__fileInfo%fName), '') as fd:info = pickle.load(fd)info[''] = time.time()if not fileOnDisk:message.append(\"\"%relativePath)if not infoOnDisk:message.append(\"\"%self.__fileInfo%fName)if not classOnDisk:message.append(\"\"%self.__fileClass%fName)if (description is False) or (dump is False) or (pull is False):if description is False:description = info['']elif description is None:description = ''if dump is False:dump = info['']elif dump is None:dump = get_dump_method(dump, protocol=self._DEFAULT_PICKLE_PROTOCOL)if pull is False:pull = info['']elif pull is None:pull = get_pull_method(pull)info[''] = dumpinfo[''] = pullinfo[''] = descriptionmy_exec( dump.replace(\"\", str(savePath)), locals=locals(), globals=globals(), description='' )_path = os.path.join(fPath,self.__fileInfo%fName)with open(_path, '') as fd:pickle.dump( info,fd, protocol=self._DEFAULT_PICKLE_PROTOCOL )fd.flush()os.fsync(fd.fileno())fileClassPath = os.path.join(self.__path,os.path.dirname(relativePath),self.__fileClass%fName)with open(fileClassPath, '') as fd:if value is None:klass = Noneelse:klass = value.__class__pickle.dump(klass , fd, protocol=self._DEFAULT_PICKLE_PROTOCOL )fd.flush()os.fsync(fd.fileno())except Exception as err:message.append(str(err))updated = Falsetry:if '' in dump:mi = get_pickling_errors(value)if mi is not None:message.append(''%str(mi))except:passif self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], ''.join(message)))else:updated = TruebreakLF.release_lock()assert updated or not raiseError, \"\"%(relativePath, ''.join(message),)return updated, ''.join(message)", "docstring": "Update the value of a file that is already in the Repository.\\n\n If file is not registered in repository, and error will be thrown.\\n\n If file is missing in the system, it will be regenerated as dump method\n is called.\n Unlike dump_file, update_file won't block the whole repository but only\n the file being updated.\n\n :Parameters:\n #. value (object): The value of a file to update.\n #. relativePath (str): The relative to the repository path of the\n file to be updated.\n #. description (False, string): Any random description about the file.\n If False is given, the description info won't be updated,\n otherwise it will be update to what description argument value is.\n #. dump (False, string): The new dump method. If False is given,\n the old one will be used.\n #. pull (False, string): The new pull method. If False is given,\n the old one will be used.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. success (boolean): Whether renaming the directory was successful.\n #. message (None, string): Some explanatory message or error reason\n why directory was not updated.", "id": "f13917:c1:m42"} {"signature": "def update(self, *args, **kwargs):", "body": "return self.update_file(*args, **kwargs)", "docstring": "Alias to update_file", "id": "f13917:c1:m43"} {"signature": "@path_requireddef pull_file(self, relativePath, pull=None, update=True, ntrials=):", "body": "assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)realPath = os.path.join(self.__path,relativePath)fPath, fName = os.path.split(realPath)isRepoFile,fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)if not isRepoFile:fileOnDisk = [\"\",\"\"][fileOnDisk]infoOnDisk = [\"\",\"\"%self.__fileInfo%fName][infoOnDisk]classOnDisk = [\"\",\"\"%self.__fileClass%fName][classOnDisk]assert False, \"\"%(relativePath,fileOnDisk,infoOnDisk,classOnDisk)assert fileOnDisk, \"\"%(relativePath,)if not infoOnDisk:if pull is not None:warnings.warn(\"\"%(self.__fileInfo%fName))else:raise Exception(\"\"%(relativePath,(self.__fileInfo%fName)))LF = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))acquired, code = LF.acquire_lock()if not acquired:error = \"\"%(code,relativePath)return False, errorfor _trial in range(ntrials):error = Nonetry:if pull is not None:pull = get_pull_method(pull)else:with open(os.path.join(fPath,self.__fileInfo%fName), '') as fd:info = pickle.load(fd)pull = info['']my_exec( pull.replace(\"\", str(realPath) ), locals=locals(), globals=globals(), description='' )except Exception as err:LF.release_lock()m = str(pull).replace(\"\", str(realPath) )error = \"\"%(m,err)if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:breakLF.release_lock()assert error is None, \"\"%(ntrials, error)return locals()['']", "docstring": "Pull a file's data from the Repository.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path from\n where to pull the file.\n #. pull (None, string): The pulling method.\n If None, the pull method saved in the file info will be used.\n If a string is given, the string should include all the necessary\n imports, a '$FILE_PATH' that replaces the absolute file path when\n the dumping will be performed and finally a PULLED_DATA variable.\n e.g \"import numpy as np; PULLED_DATA=np.loadtxt(fname='$FILE_PATH')\"\n #. update (boolean): If pull is not None, Whether to update the pull\n method stored in the file info by the given pull method.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. data (object): The pulled data from the file.", "id": "f13917:c1:m44"} {"signature": "def pull(self, *args, **kwargs):", "body": "return self.pull_file(*args, **kwargs)", "docstring": "Alias to pull_file", "id": "f13917:c1:m45"} {"signature": "@path_requireddef rename_file(self, relativePath, newRelativePath,force=False, raiseError=True, ntrials=):", "body": "assert isinstance(raiseError, bool), \"\"assert isinstance(force, bool), \"\"assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)realPath = os.path.join(self.__path,relativePath)fPath, fName = os.path.split(realPath)newRelativePath = self.to_repo_relative_path(path=newRelativePath, split=False)newRealPath = os.path.join(self.__path,newRelativePath)nfPath, nfName = os.path.split(newRealPath)LO = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))acquired, code = LO.acquire_lock()if not acquired:error = \"\"%(code,relativePath)assert not raiseError, errorreturn False, errortry:success, reason = self.add_directory(nfPath, raiseError=False, ntrials=ntrials)except Exception as err:reason = \"\"%(str(err))success = Falseif not success:LO.release_lock()assert not raiseError, reasonreturn False, reasonLN = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(nfPath,self.__fileLock%nfName))acquired, code = LN.acquire_lock()if not acquired:LO.release_lock()error = \"\"%(code,newRelativePath)assert not raiseError, errorreturn False, errorfor _trial in range(ntrials):renamed = Falseerror = Nonetry:isRepoFile,fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)assert isRepoFile, \"\"%(relativePath,)assert fileOnDisk, \"\"%(relativePath,)assert infoOnDisk, \"\"%self.__fileInfo%fNameassert classOnDisk, \"\"%self.__fileClass%fNamenisRepoFile,nfileOnDisk,ninfoOnDisk,nclassOnDisk = self.is_repository_file(newRelativePath)assert not nisRepoFile or force, \"\"oDirList = self.__get_repository_directory(fPath)nDirList = self.__get_repository_directory(nfPath)if os.path.isfile(newRealPath):os.remove(newRealPath)if os.path.isfile(os.path.join(nfPath,self.__fileInfo%nfName)):os.remove(os.path.join(nfPath,self.__fileInfo%nfName))if os.path.isfile(os.path.join(nfPath,self.__fileClass%nfName)):os.remove(os.path.join(nfPath,self.__fileClass%nfName))os.rename(realPath, newRealPath)os.rename(os.path.join(fPath,self.__fileInfo%fName), os.path.join(nfPath,self.__fileInfo%nfName))os.rename(os.path.join(fPath,self.__fileClass%fName), os.path.join(nfPath,self.__fileClass%nfName))findex = oDirList.index(fName)oDirList.pop(findex)if nfName not in nDirList:nDirList.append(nfName)except Exception as err:renamed = Falseerror = str(err)if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], str(error)))else:renamed = TruebreakLO.release_lock()LN.release_lock()try:if os.path.isfile(os.path.join(fPath,self.__fileLock%fName)):os.remove(os.path.join(fPath,self.__fileLock%fName))except:passassert renamed or not raiseError, \"\"%(relativePath, newRelativePath, ntrials, error,)return renamed, error", "docstring": "Rename a file in the repository. It insures renaming the file in the system.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of\n the file that needst to be renamed.\n #. newRelativePath (string): The new relative to the repository path\n of where to move and rename the file.\n #. force (boolean): Whether to force renaming even when another\n repository file exists. In this case old repository file\n will be removed from the repository and the system as well.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.\n\n:Returns:\n #. success (boolean): Whether renaming the file was successful.\n #. message (None, string): Some explanatory message or error reason\n why directory was not updated.", "id": "f13917:c1:m46"} {"signature": "@path_requireddef remove_file(self, relativePath, removeFromSystem=False,raiseError=True, ntrials=):", "body": "assert isinstance(raiseError, bool), \"\"assert isinstance(removeFromSystem, bool), \"\"assert isinstance(ntrials, int), \"\"assert ntrials>, \"\"relativePath = self.to_repo_relative_path(path=relativePath, split=False)realPath = os.path.join(self.__path,relativePath)fPath, fName = os.path.split(realPath)LF = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))acquired, code = LF.acquire_lock()if not acquired:error = \"\"%(code,relativePath)assert not raiseError, errorreturn False, errorfor _trial in range(ntrials):removed = Falsemessage = []try:isRepoFile,fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)if not isRepoFile:message(\"\"%(relativePath,))if fileOnDisk:message.append(\"\")if infoOnDisk:message.append(\"\"%self.__fileInfo%fName)if classOnDisk:message.append(\"\"%self.__fileClass%fName)else:dirList = self.__get_repository_directory(fPath)findex = dirList.index(fName)dirList.pop(findex)if os.path.isfile(realPath):os.remove(realPath)if os.path.isfile(os.path.join(fPath,self.__fileInfo%fName)):os.remove(os.path.join(fPath,self.__fileInfo%fName))if os.path.isfile(os.path.join(fPath,self.__fileClass%fName)):os.remove(os.path.join(fPath,self.__fileClass%fName))except Exception as err:removed = Falsemessage.append(str(err))if self.DEBUG_PRINT_FAILED_TRIALS: print(\"\"%(_trial, inspect.stack()[][], ''.join(message)))else:removed = TruebreakLF.release_lock()try:if os.path.isfile(os.path.join(fPath,self.__fileLock%fName)):os.remove(os.path.join(fPath,self.__fileLock%fName))except:passassert removed or not raiseError, \"\"%(relativePath, ntrials, ''.join(message),)return removed, ''.join(message)", "docstring": "Remove file from repository.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the\n file to remove.\n #. removeFromSystem (boolean): Whether to remove file from disk as\n well.\n #. raiseError (boolean): Whether to raise encountered error instead\n of returning failure.\n #. ntrials (int): After aquiring all locks, ntrials is the maximum\n number of trials allowed before failing.\n In rare cases, when multiple processes\n are accessing the same repository components, different processes\n can alter repository components between successive lock releases\n of some other process. Bigger number of trials lowers the\n likelyhood of failure due to multiple processes same time\n alteration.", "id": "f13917:c1:m47"} {"signature": "def path_required(func):", "body": "@wraps(func)def wrapper(self, *args, **kwargs):if self.path is None:warnings.warn('')returnreturn func(self, *args, **kwargs)return wrapper", "docstring": "Decorate methods when repository path is required.", "id": "f13923:m0"} {"signature": "def acquire_lock(func):", "body": "@wraps(func)def wrapper(self, *args, **kwargs):with self.locker as r:acquired, code, _ = rif acquired:try:r = func(self, *args, **kwargs)except Exception as err:e = str(err)else:e = Noneelse:warnings.warn(\"\"%(code,func.__name__) )e = Noner = Noneif e is not None:traceback.print_stack()raise Exception(e)return rreturn wrapper", "docstring": "Decorate methods when locking repository is required.", "id": "f13923:m1"} {"signature": "def sync_required(func):", "body": "@wraps(func)def wrapper(self, *args, **kwargs):if not self._keepSynchronized:r = func(self, *args, **kwargs)else:state = self._load_state()if state is None:r = func(self, *args, **kwargs)elif state == self.state:r = func(self, *args, **kwargs)else:warnings.warn(\"\"%self.path)r = Nonereturn rreturn wrapper", "docstring": "Decorate methods when synchronizing repository is required.", "id": "f13923:m2"} {"signature": "def get_pickling_errors(obj, seen=None):", "body": "if seen == None:seen = []if hasattr(obj, \"\"):state = obj.__getstate__()else:return Noneif state == None:return ''if isinstance(state,tuple):if not isinstance(state[], dict):state=state[]else:state=state[].update(state[])result = {}for i in state:try:pickle.dumps(state[i], protocol=)except pickle.PicklingError as e:if not state[i] in seen:seen.append(state[i])result[i]=get_pickling_errors(state[i],seen)return result", "docstring": "Investigate pickling errors.", "id": "f13923:m3"} {"signature": "@propertydef state(self):", "body": "return self.__state", "docstring": "Repository state.", "id": "f13923:c0:m10"} {"signature": "@propertydef locker(self):", "body": "return self.__locker", "docstring": "Repository locker manager.", "id": "f13923:c0:m11"} {"signature": "@propertydef path(self):", "body": "return self.__path", "docstring": "The repository instance path which points to the folder and\n directory where .pyrepinfo is.", "id": "f13923:c0:m12"} {"signature": "@propertydef info(self):", "body": "return copy.deepcopy( self.__info )", "docstring": "The unique user defined information of this repository instance.", "id": "f13923:c0:m13"} {"signature": "@propertydef version(self):", "body": "return dict.__getitem__(self,\"\")", "docstring": "The version of this repository.", "id": "f13923:c0:m14"} {"signature": "@propertydef id(self):", "body": "return dict.__getitem__(self,\"\")", "docstring": "The universally unique id of this repository.", "id": "f13923:c0:m15"} {"signature": "def set_ACID(self, ACID):", "body": "assert isinstance(ACID, bool), \"\"self.__ACID = ACID", "docstring": "Set the gobal ACID poperty of the repository.\n\n:parameters:\n #. ACID (boolean): Whether to ensure the ACID (Atomicity, Consistency, Isolation, Durability)\n properties of the repository upon dumping a file. This is ensured by dumping the file in\n a temporary path first and then moving it to the desired path.", "id": "f13923:c0:m16"} {"signature": "def get_list_representation(self):", "body": "if self.__path is None:return []repr = [ self.__path+\"\"+''.join(list(dict.__getitem__(self, '')))+'' ]for directory in sorted(list(self.walk_directories_relative_path())):directoryRepr = os.path.normpath(directory)dirInfoDict, errorMessage = self.get_directory_info(directory)assert dirInfoDict is not None, errorMessagedirectoryRepr += \"\"+''.join( list(dict.__getitem__(dirInfoDict, '')))+''repr.append(directoryRepr)return repr", "docstring": "Gets a representation of the Repository content in a list of directories(files) format.\n\n:Returns:\n #. repr (list): The list representation of the Repository content.", "id": "f13923:c0:m17"} {"signature": "def walk_files_relative_path(self, relativePath=\"\"):", "body": "def walk_files(directory, relativePath):directories = dict.__getitem__(directory, '')files = dict.__getitem__(directory, '')for f in sorted(files):yield os.path.join(relativePath, f)for k in sorted(dict.keys(directories)):path = os.path.join(relativePath, k)dir = directories.__getitem__(k)for e in walk_files(dir, path):yield edir, errorMessage = self.get_directory_info(relativePath)assert dir is not None, errorMessagereturn walk_files(dir, relativePath='')", "docstring": "Walk the repository and yield all found files relative path joined with file name.\n\n:parameters:\n #. relativePath (str): The relative path from which start the walk.", "id": "f13923:c0:m18"} {"signature": "def walk_files_info(self, relativePath=\"\"):", "body": "def walk_files(directory, relativePath):directories = dict.__getitem__(directory, '')files = dict.__getitem__(directory, '')for fname in sorted(files):info = dict.__getitem__(files,fname)yield os.path.join(relativePath, fname), infofor k in sorted(dict.keys(directories)):path = os.path.join(relativePath, k)dir = dict.__getitem__(directories, k)for e in walk_files(dir, path):yield edir, errorMessage = self.get_directory_info(relativePath)assert dir is not None, errorMessagereturn walk_files(dir, relativePath='')", "docstring": "Walk the repository and yield tuples as the following:\\n\n(relative path to relativePath joined with file name, file info dict).\n\n:parameters:\n #. relativePath (str): The relative path from which start the walk.", "id": "f13923:c0:m19"} {"signature": "def walk_directories_relative_path(self, relativePath=\"\"):", "body": "def walk_directories(directory, relativePath):directories = dict.__getitem__(directory, '')dirNames = dict.keys(directories)for d in sorted(dirNames):yield os.path.join(relativePath, d)for k in sorted(dict.keys(directories)):path = os.path.join(relativePath, k)dir = dict.__getitem__(directories, k)for e in walk_directories(dir, path):yield edir, errorMessage = self.get_directory_info(relativePath)assert dir is not None, errorMessagereturn walk_directories(dir, relativePath='')", "docstring": "Walk repository and yield all found directories relative path\n\n:parameters:\n #. relativePath (str): The relative path from which start the walk.", "id": "f13923:c0:m20"} {"signature": "def walk_directories_info(self, relativePath=\"\"):", "body": "def walk_directories(directory, relativePath):directories = dict.__getitem__(directory, '')for fname in sorted(directories):info = dict.__getitem__(directories,fname)yield os.path.join(relativePath, fname), infofor k in sorted(dict.keys(directories)):path = os.path.join(relativePath, k)dir = dict.__getitem__(directories, k)for e in walk_directories(dir, path):yield edir, errorMessage = self.get_directory_info(relativePath)assert dir is not None, errorMessagereturn walk_directories(dir, relativePath='')", "docstring": "Walk repository and yield all found directories relative path.\n\n:parameters:\n #. relativePath (str): The relative path from which start the walk.", "id": "f13923:c0:m21"} {"signature": "def walk_directory_files_relative_path(self, relativePath=\"\"):", "body": "relativePath = os.path.normpath(relativePath)dirInfoDict, errorMessage = self.get_directory_info(relativePath)assert dirInfoDict is not None, errorMessagefor fname in dict.__getitem__(dirInfoDict, \"\"):yield os.path.join(relativePath, fname)", "docstring": "Walk a certain directory in repository and yield all found\nfiles relative path joined with file name.\n\n:parameters:\n #. relativePath (str): The relative path of the directory.", "id": "f13923:c0:m22"} {"signature": "def walk_directory_files_info(self, relativePath=\"\"):", "body": "relativePath = os.path.normpath(relativePath)dirInfoDict, errorMessage = self.get_directory_info(relativePath)assert dirInfoDict is not None, errorMessagefor fname in dict.__getitem__(dirInfoDict, \"\"):yield os.path.join(relativePath, fname), dict.__getitem__(dirInfoDict, \"\")[fname]", "docstring": "Walk a certain directory in repository and yield tuples as the following:\\n\n(relative path joined with file name, file info dict).\n\n:parameters:\n #. relativePath (str): The relative path of the directory.", "id": "f13923:c0:m23"} {"signature": "def walk_directory_directories_relative_path(self, relativePath=\"\"):", "body": "errorMessage = \"\"relativePath = os.path.normpath(relativePath)dirInfoDict, errorMessage = self.get_directory_info(relativePath)assert dirInfoDict is not None, errorMessagefor dname in dict.__getitem__(dirInfoDict, \"\"):yield os.path.join(relativePath, dname)", "docstring": "Walk a certain directory in repository and yield all found directories relative path.\n\n:parameters:\n #. relativePath (str): The relative path of the directory.", "id": "f13923:c0:m24"} {"signature": "def walk_directory_directories_info(self, relativePath=\"\"):", "body": "relativePath = os.path.normpath(relativePath)dirInfoDict, errorMessage = self.get_directory_info(relativePath)assert dirInfoDict is not None, errorMessagefor fname in dict.__getitem__(dirInfoDict, \"\"):yield os.path.join(relativePath, fname), dict.__getitem__(dirInfoDict, \"\")[fname]", "docstring": "Walk a certain directory in repository and yield tuples as the following:\\n\n(relative path joined with directory name, file info dict).\n\n:parameters:\n #. relativePath (str): The relative path of the directory.", "id": "f13923:c0:m25"} {"signature": "@acquire_lockdef synchronize(self, verbose=False):", "body": "if self.__path is None:returnfor dirPath in sorted(list(self.walk_directories_relative_path())):realPath = os.path.join(self.__path, dirPath)if os.path.isdir(realPath):continueif verbose: warnings.warn(\"\"%realPath)keys = dirPath.split(os.sep)dirInfoDict = selffor idx in range(len(keys)-):dirs = dict.get(dirInfoDict, '', None)if dirs is None: breakdirInfoDict = dict.get(dirs, keys[idx], None)if dirInfoDict is None: breakif dirInfoDict is not None:dirs = dict.get(dirInfoDict, '', None)if dirs is not None:dict.pop( dirs, keys[-], None )for filePath in sorted(list(self.walk_files_relative_path())):realPath = os.path.join(self.__path, filePath)if os.path.isfile( realPath ):continueif verbose: warnings.warn(\"\"%realPath)keys = filePath.split(os.sep)dirInfoDict = selffor idx in range(len(keys)-):dirs = dict.get(dirInfoDict, '', None)if dirs is None: breakdirInfoDict = dict.get(dirs, keys[idx], None)if dirInfoDict is None: breakif dirInfoDict is not None:files = dict.get(dirInfoDict, '', None)if files is not None:dict.pop( files, keys[-], None )", "docstring": "Synchronizes the Repository information with the directory.\nAll registered but missing files and directories in the directory,\nwill be automatically removed from the Repository.\n\n:parameters:\n #. verbose (boolean): Whether to be warn and inform about any abnormalities.", "id": "f13923:c0:m26"} {"signature": "def load_repository(self, path):", "body": "if path.strip() in ('',''):path = os.getcwd()repoPath = os.path.realpath( os.path.expanduser(path) )if not self.is_repository(repoPath):raise Exception(\"\"%str(repoPath))repoInfoPath = os.path.join(repoPath, \"\")try:fd = open(repoInfoPath, '')except Exception as e:raise Exception(\"\"%e)L = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(repoPath, \"\"))acquired, code = L.acquire_lock()if not acquired:warnings.warn(\"\"%(code,) )returntry:try:repo = pickle.load( fd )except Exception as e:fd.close()raise Exception(\"\"%e)finally:fd.close()if not isinstance(repo, Repository):raise Exception(\"\"%s)else:self.__reset_repository()self.__update_repository(repo)self.__path = repoPathself.__state = self._get_or_create_state()except Exception as e:L.release_lock()raise Exception(e)finally:L.release_lock()self.__locker = Lreturn self", "docstring": "Load repository from a directory path and update the current instance.\n\n:Parameters:\n #. path (string): The path of the directory from where to load the repository.\n If '.' or an empty string is passed, the current working directory will be used.\n\n:Returns:\n #. repository (pyrep.Repository): returns self repository with loaded data.", "id": "f13923:c0:m27"} {"signature": "def connect(self, path):", "body": "return self.load_repository(path)", "docstring": "Alias to load_repository", "id": "f13923:c0:m28"} {"signature": "def create_repository(self, path, info=None, verbose=True):", "body": "try:info = copy.deepcopy( info )except:raise Exception(\"\")if path.strip() in ('',''):path = os.getcwd()realPath = os.path.realpath( os.path.expanduser(path) )if not os.path.isdir(realPath):os.makedirs(realPath)self.__path = realPathself.__info = infoif self.is_repository(realPath):if verbose:warnings.warn(\"\"%path)self.__reset_repository()lp = ''if self.__path is not None:lp = os.path.join(self.__path,lp)self.__locker.set_lock_path(lp)self.__locker.set_lock_pass(str(uuid.uuid1()))self.save()", "docstring": "create a repository in a directory.\nThis method insures the creation of the directory in the system if it is missing.\\n\n\n**N.B. This method erases existing pyrep repository in the path but not the repository files.**\n\n:Parameters:\n #. path (string): The real absolute path where to create the Repository.\n If '.' or an empty string is passed, the current working directory will be used.\n #. info (None, object): Any information that can identify the repository.\n #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m29"} {"signature": "def get_repository(self, path, info=None, verbose=True):", "body": "if path.strip() in ('',''):path = os.getcwd()realPath = os.path.realpath( os.path.expanduser(path) )if not os.path.isdir(realPath):os.makedirs(realPath)if not self.is_repository(realPath):self.create_repository(realPath, info=info, verbose=verbose)else:self.load_repository(realPath)", "docstring": "Create a repository at given real path or load any existing one.\nThis method insures the creation of the directory in the system if it is missing.\\n\nUnlike create_repository, this method doesn't erase any existing repository\nin the path but loads it instead.\n\n**N.B. On some systems and some paths, creating a directory may requires root permissions.**\n\n:Parameters:\n #. path (string): The real absolute path where to create the Repository.\n If '.' or an empty string is passed, the current working directory will be used.\n #. info (None, object): Any information that can identify the repository.\n #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m30"} {"signature": "def remove_repository(self, path=None, relatedFiles=False, relatedFolders=False, verbose=True):", "body": "if path is not None:realPath = os.path.realpath( os.path.expanduser(path) )else:realPath = self.__pathif realPath is None:if verbose: warnings.warn('')returnif not self.is_repository(realPath):if verbose: warnings.warn(\"\"%realPath)returnif realPath == os.path.realpath('') :if verbose: warnings.warn('')returnif path is not None:repo = Repository()repo.load_repository(realPath)else:repo = selfif relatedFiles:for relativePath in repo.walk_files_relative_path():realPath = os.path.join(repo.path, relativePath)if not os.path.isfile(realPath):continueif not os.path.exists(realPath):continueos.remove( realPath )if relatedFolders:for relativePath in reversed(list(repo.walk_directories_relative_path())):realPath = os.path.join(repo.path, relativePath)if not os.path.isdir(realPath):continueif not os.path.exists(realPath):continueif not len(os.listdir(realPath)):os.rmdir( realPath )os.remove( os.path.join(repo.path, \"\" ) )for fname in (\"\", \"\"):p = os.path.join(repo.path, fname )if os.path.exists( p ):os.remove( p )if os.path.isdir(repo.path):if not len(os.listdir(repo.path)):os.rmdir( repo.path )repo.__reset_repository()", "docstring": "Remove .pyrepinfo file from path if exists and related files and directories\nwhen respective flags are set to True.\n\n:Parameters:\n #. path (None, string): The path of the directory where to remove an existing repository.\n If None, current repository is removed if initialized.\n #. relatedFiles (boolean): Whether to also remove all related files from system as well.\n #. relatedFolders (boolean): Whether to also remove all related directories from system as well.\n Directories will be removed only if they are left empty after removing the files.\n #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m31"} {"signature": "@path_required@acquire_lock@sync_requireddef save(self):", "body": "repoInfoPath = os.path.join(self.__path, \"\")try:fdinfo = open(repoInfoPath, '')except Exception as e:raise Exception(\"\"%e)try:pickle.dump( self, fdinfo, protocol= )except Exception as e:fdinfo.flush()os.fsync(fdinfo.fileno())fdinfo.close()raise Exception( \"\"%e )finally:fdinfo.flush()os.fsync(fdinfo.fileno())fdinfo.close()repoTimePath = os.path.join(self.__path, \"\")try:self.__state = (\"\"%time.time()).encode()with open(repoTimePath, '') as fdtime:fdtime.write( self.__state )fdtime.flush()os.fsync(fdtime.fileno())except Exception as e:raise Exception(\"\"%e)", "docstring": "Save repository .pyrepinfo to disk.", "id": "f13923:c0:m32"} {"signature": "@path_requireddef create_package(self, path=None, name=None, mode=None):", "body": "assert mode in (None, '', '', '', ''), ''%str(mode)if mode is None:mode = ''mode = ''if path is None:root = os.path.split(self.__path)[]elif path.strip() in ('',''):root = os.getcwd()else:root = os.path.realpath( os.path.expanduser(path) )assert os.path.isdir(root), ''%pathif name is None:ext = mode.split(\"\")if len(ext) == :if len(ext[]):ext = \"\"+ext[]else:ext = ''else:ext = ''name = os.path.split(self.__path)[]+extself.save()tarfilePath = os.path.join(root, name)try:tarHandler = tarfile.TarFile.open(tarfilePath, mode=mode)except Exception as e:raise Exception(\"\"%e)for directory in sorted(list(self.walk_directories_relative_path())):t = tarfile.TarInfo( directory )t.type = tarfile.DIRTYPEtarHandler.addfile(t)for file in self.walk_files_relative_path():tarHandler.add(os.path.join(self.__path,file), arcname=file)tarHandler.add(os.path.join(self.__path,\"\"), arcname=\"\")tarHandler.close()", "docstring": "Create a tar file package of all the repository files and directories.\nOnly files and directories that are stored in the repository info\nare stored in the package tar file.\n\n**N.B. On some systems packaging requires root permissions.**\n\n:Parameters:\n #. path (None, string): The real absolute path where to create the package.\n If None, it will be created in the same directory as the repository\n If '.' or an empty string is passed, the current working directory will be used.\n #. name (None, string): The name to give to the package file\n If None, the package directory name will be used with the appropriate extension added.\n #. mode (None, string): The writing mode of the tarfile.\n If None, automatically the best compression mode will be chose.\n Available modes are ('w', 'w:', 'w:gz', 'w:bz2')", "id": "f13923:c0:m33"} {"signature": "def is_repository(self, path):", "body": "realPath = os.path.realpath( os.path.expanduser(path) )if not os.path.isdir(realPath):return Falseif \"\" not in os.listdir(realPath):return Falsereturn True", "docstring": "Check if there is a Repository in path.\n\n:Parameters:\n #. path (string): The real path of the directory where to check if there is a repository.\n\n:Returns:\n #. result (boolean): Whether its a repository or not.", "id": "f13923:c0:m34"} {"signature": "def get_directory_info(self, relativePath):", "body": "relativePath = os.path.normpath(relativePath)if relativePath in ('',''):return self, \"\"currentDir = self.__pathdirInfoDict = selffor dir in relativePath.split(os.sep):dirInfoDict = dict.__getitem__(dirInfoDict, \"\")currentDir = os.path.join(currentDir, dir)if not os.path.exists(currentDir):return None, \"\"%currentDirval = dirInfoDict.get(dir, None)if val is None:return None, \"\"%currentDirdirInfoDict = valreturn dirInfoDict, \"\"", "docstring": "get directory info from the Repository.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the directory.\n\n:Returns:\n #. info (None, dictionary): The directory information dictionary.\n If None, it means an error has occurred.\n #. error (string): The error message if any error occurred.", "id": "f13923:c0:m35"} {"signature": "def get_parent_directory_info(self, relativePath):", "body": "relativePath = os.path.normpath(relativePath)if relativePath in ('',''):return self, \"\"parentDirPath, _ = os.path.split(relativePath)return self.get_directory_info(parentDirPath)", "docstring": "get parent directory info of a file or directory from the Repository.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the file or directory of which the parent directory info is requested.\n\n:Returns:\n #. info (None, dictionary): The directory information dictionary.\n If None, it means an error has occurred.\n #. error (string): The error message if any error occurred.", "id": "f13923:c0:m36"} {"signature": "def get_file_info(self, relativePath, name=None):", "body": "relativePath = os.path.normpath(relativePath)if relativePath == '':relativePath = ''assert name != '', \"\"if name is None:assert len(relativePath), \"\"relativePath,name = os.path.split(relativePath)errorMessage = \"\"dirInfoDict, errorMessage = self.get_directory_info(relativePath)if dirInfoDict is None:return None, errorMessagefileInfo = dict.__getitem__(dirInfoDict, \"\").get(name, None)if fileInfo is None:errorMessage = \"\"%(name, relativePath)return fileInfo, errorMessage", "docstring": "get file information dict from the repository given its relative path and name.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the directory where the file is.\n #. name (string): The file name.\n If None is given, name will be split from relativePath.\n\n:Returns:\n #. info (None, dictionary): The file information dictionary.\n If None, it means an error has occurred.\n #. errorMessage (string): The error message if any error occurred.", "id": "f13923:c0:m38"} {"signature": "def get_file_info_by_id(self, id):", "body": "for path, info in self.walk_files_info():if info['']==id:return path, inforeturn None, None", "docstring": "Given an id, get the corresponding file info as the following:\\n\n(relative path joined with file name, file info dict)\n\nParameters:\n #. id (string): The file unique id string.\n\n:Returns:\n #. relativePath (string): The file relative path joined with file name.\n If None, it means file was not found.\n #. info (None, dictionary): The file information dictionary.\n If None, it means file was not found.", "id": "f13923:c0:m39"} {"signature": "def get_file_relative_path_by_id(self, id):", "body": "for path, info in self.walk_files_info():if info['']==id:return pathreturn None", "docstring": "Given an id, get the corresponding file info relative path joined with file name.\n\nParameters:\n #. id (string): The file unique id string.\n\n:Returns:\n #. relativePath (string): The file relative path joined with file name.\n If None, it means file was not found.", "id": "f13923:c0:m40"} {"signature": "def get_file_relative_path_by_name(self, name, skip=):", "body": "if skip is None:paths = []else:paths = Nonefor path, info in self.walk_files_info():_, n = os.path.split(path)if n==name:if skip is None:paths.append(path)elif skip>:skip -= else:paths = pathbreakreturn paths", "docstring": "Get file relative path given the file name. If file name is redundant in different\ndirectories in the repository, this method ensures to return all or some of the\nfiles according to skip value.\n\nParameters:\n #. name (string): The file name.\n #. skip (None, integer): As file names can be identical, skip determines\n the number of satisfying files name to skip before returning.\\n\n If None is given, a list of all files relative path will be returned.\n\n:Returns:\n #. relativePath (string, list): The file relative path.\n If None, it means file was not found.\\n\n If skip is None a list of all found files relative paths will be returned.", "id": "f13923:c0:m41"} {"signature": "def get_file_info_by_name(self, name, skip=):", "body": "if skip is None:paths = []infos = []else:paths = Noneinfos = Nonefor path, info in self.walk_files_info():_, n = os.path.split(path)if n==name:if skip is None:paths.append(path)infos.append(info)elif skip>:skip -= else:paths = pathinfos = infobreakreturn paths, infos", "docstring": "Get file information tuple given the file name. If file name is redundant in different\ndirectories in the repository, this method ensures to return all or some of the\nfiles infos according to skip value.\n\nParameters:\n #. name (string): The file name.\n #. skip (None, integer): As file names can be identical, skip determines\n the number of satisfying files name to skip before returning.\\n\n If None is given, a list of all files relative path will be returned.\n\n:Returns:\n #. relativePath (string, list): The file relative path joined with file name.\n If None, it means file was not found.\\n\n If skip is None a list of all found files relative paths will be returned.\n #. info (None, dictionary, list): The file information dictionary.\n If None, it means file was not found.\\n\n If skip is None a list of all found files info dicts will be returned.", "id": "f13923:c0:m42"} {"signature": "@acquire_lock@sync_requireddef add_directory(self, relativePath, info=None):", "body": "path = os.path.normpath(relativePath)currentDir = self.pathcurrentDict = selfif path in (\"\",\"\"):return currentDictsave = Falsefor dir in path.split(os.sep):dirPath = os.path.join(currentDir, dir)if not os.path.exists(dirPath):os.mkdir(dirPath)currentDict = dict.__getitem__(currentDict, \"\")if currentDict.get(dir, None) is None:save = TruecurrentDict[dir] = {\"\":{}, \"\":{},\"\":datetime.utcnow(),\"\":str(uuid.uuid1()),\"\": info} currentDict = currentDict[dir]currentDir = dirPathif save:self.save()return currentDict", "docstring": "Adds a directory in the repository and creates its\nattribute in the Repository with utc timestamp.\nIt insures adding all the missing directories in the path.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the directory to add in the repository.\n #. info (None, string, pickable object): Any random info about the folder.\n\n:Returns:\n #. info (dict): The directory info dict.", "id": "f13923:c0:m43"} {"signature": "@acquire_lock@sync_requireddef remove_directory(self, relativePath, removeFromSystem=False):", "body": "relativePath = os.path.normpath(relativePath)parentDirInfoDict, errorMessage = self.get_parent_directory_info(relativePath)assert parentDirInfoDict is not None, errorMessagepath, name = os.path.split(relativePath)if dict.__getitem__(parentDirInfoDict, '').get(name, None) is None:raise Exception(\"\"%(name, path))if removeFromSystem:for rp in self.walk_files_relative_path(relativePath=relativePath):ap = os.path.join(self.__path, relativePath, rp)if not os.path.isfile(ap):continueif not os.path.exists(ap):continueif os.path.isfile(ap):os.remove( ap )for rp in self.walk_directories_relative_path(relativePath=relativePath):ap = os.path.join(self.__path, relativePath, rp)if not os.path.isdir(ap):continueif not os.path.exists(ap):continueif not len(os.listdir(ap)):os.rmdir(ap)dict.__getitem__(parentDirInfoDict, '').pop(name, None)ap = os.path.join(self.__path, relativePath)if not os.path.isdir(ap):if not len(os.listdir(ap)):os.rmdir(ap)self.save()", "docstring": "Remove directory from repository.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the directory to remove from the repository.\n #. removeFromSystem (boolean): Whether to also remove directory and all files from the system.\\n\n Only files saved in the repository will be removed and empty left directories.", "id": "f13923:c0:m44"} {"signature": "@acquire_lock@sync_requireddef move_directory(self, relativePath, relativeDestination, replace=False, verbose=True):", "body": "relativePath = os.path.normpath(relativePath)relativeDestination = os.path.normpath(relativeDestination)filesInfo = list( self.walk_files_info(relativePath=relativePath) )dirsPath = list( self.walk_directories_relative_path(relativePath=relativePath) )dirInfoDict, errorMessage = self.get_directory_info(relativePath)assert dirInfoDict is not None, errorMessageself.remove_directory(relativePath=relativePath, removeFromSystem=False)self.add_directory(relativeDestination)for RP, info in filesInfo:source = os.path.join(self.__path, relativePath, RP)destination = os.path.join(self.__path, relativeDestination, RP)newDirRP, fileName = os.path.split(os.path.join(relativeDestination, RP))dirInfoDict = self.add_directory( newDirRP )if os.path.isfile(destination):if replace:os.remove(destination)if verbose:warnings.warn(\"\"%(fileName, newDirRP))else:if verbose:warnings.warn(\"\"%(fileName,destination))continueos.rename(source, destination)dict.__getitem__(dirInfoDict, \"\")[fileName] = infoself.save()", "docstring": "Move a directory in the repository from one place to another. It insures moving all the\nfiles and subdirectories in the system.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the directory to be moved.\n #. relativeDestination (string): The new relative to the repository path of the directory.\n #. replace (boolean): Whether to replace existing files with the same name in the new created directory.\n #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m45"} {"signature": "@acquire_lock@sync_requireddef rename_directory(self, relativePath, newName, replace=False, verbose=True):", "body": "relativePath = os.path.normpath(relativePath)parentDirInfoDict, errorMessage = self.get_parent_directory_info(relativePath)assert parentDirInfoDict is not None, errorMessageparentDirPath, dirName = os.path.split(relativePath)realPath = os.path.join(self.__path, relativePath)assert os.path.isdir( realPath ), \"\"%realPathassert dirName in dict.__getitem__(parentDirInfoDict, \"\"), \"\"%(dirName, parentDirPath)assert newName not in dict.__getitem__(parentDirInfoDict, \"\"), \"\"%(newName, parentDirPath)newRealPath = os.path.join(self.__path, parentDirPath, newName)if os.path.isdir( newRealPath ):if replace:shutil.rmtree(newRealPath)if verbose:warnings.warn( \"\"%newRealPath )else:raise Exception( \"\"%newRealPath )os.rename(realPath, newRealPath)dict.__setitem__( dict.__getitem__(parentDirInfoDict, \"\"),newName,dict.__getitem__(parentDirInfoDict, \"\").pop(dirName) )self.save()", "docstring": "Rename a directory in the repository. It insures renaming the directory in the system.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the directory to be renamed.\n #. newName (string): The new directory name.\n #. replace (boolean): Whether to force renaming when new name exists in the system.\n It fails when new folder name is registered in repository.\n #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m46"} {"signature": "@acquire_lock@sync_requireddef rename_file(self, relativePath, name, newName, replace=False, verbose=True):", "body": "relativePath = os.path.normpath(relativePath)if relativePath == '':relativePath = ''dirInfoDict, errorMessage = self.get_directory_info(relativePath)assert dirInfoDict is not None, errorMessageassert name in dict.__getitem__(dirInfoDict, \"\"), \"\"%(name, relativePath)realPath = os.path.join(self.__path, relativePath, name)assert os.path.isfile(realPath), \"\"%realPathassert newName not in dict.__getitem__(dirInfoDict, \"\"), \"\"%(newName, relativePath)newRealPath = os.path.join(self.__path, relativePath, newName)if os.path.isfile( newRealPath ):if replace:os.remove(newRealPath)if verbose:warnings.warn( \"\"%(newRealPath,realPath) )else:raise Exception( \"\"%newRealPath )os.rename(realPath, newRealPath)dict.__setitem__( dict.__getitem__(dirInfoDict, \"\"),newName,dict.__getitem__(dirInfoDict, \"\").pop(name) )self.save()", "docstring": "Rename a directory in the repository. It insures renaming the file in the system.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the directory where the file is located.\n #. name (string): The file name.\n #. newName (string): The file new name.\n #. replace (boolean): Whether to force renaming when new folder name exists in the system.\n It fails when new folder name is registered in repository.\n #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m47"} {"signature": "@acquire_lock@sync_requireddef remove_file(self, relativePath, name=None, removeFromSystem=False):", "body": "relativePath = os.path.normpath(relativePath)if relativePath == '':relativePath = ''assert name != '', \"\"assert name != '', \"\"assert name != '', \"\"if name is None:assert len(relativePath), \"\"relativePath, name = os.path.split(relativePath)dirInfoDict, errorMessage = self.get_directory_info(relativePath)assert dirInfoDict is not None, errorMessageassert name in dict.__getitem__(dirInfoDict, \"\"), \"\"%(name, relativePath)dict.__getitem__(dirInfoDict, \"\").pop(name)if removeFromSystem:ap = os.path.join(self.__path, relativePath, name )if os.path.isfile(ap):os.remove( ap )self.save()", "docstring": "Remove file from repository.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the directory where the file should be dumped.\n If relativePath does not exist, it will be created automatically.\n #. name (string): The file name.\n If None is given, name will be split from relativePath.\n #. removeFromSystem (boolean): Whether to also remove directory and all files from the system.\\n\n Only files saved in the repository will be removed and empty left directories.", "id": "f13923:c0:m48"} {"signature": "@acquire_lock@sync_requireddef dump_copy(self, path, relativePath, name=None,description=None,replace=False, verbose=False):", "body": "relativePath = os.path.normpath(relativePath)if relativePath == '':relativePath = ''if name is None:_,name = os.path.split(path)self.add_directory(relativePath)realPath = os.path.join(self.__path, relativePath)dirInfoDict, errorMessage = self.get_directory_info(relativePath)assert dirInfoDict is not None, errorMessageif name in dict.__getitem__(dirInfoDict, \"\"):if not replace:if verbose:warnings.warn(\"\"%(name))returndump = \"\"pull = \"\"try:shutil.copyfile(path, os.path.join(realPath,name))except Exception as e:if verbose:warnings.warn(e)returnklass = Nonedict.__getitem__(dirInfoDict, \"\")[name] = {\"\":dump,\"\":pull,\"\":datetime.utcnow(),\"\":str(uuid.uuid1()),\"\": klass,\"\":description}self.save()", "docstring": "Copy an exisitng system file to the repository.\nattribute in the Repository with utc timestamp.\n\n:Parameters:\n #. path (str): The full path of the file to copy into the repository.\n #. relativePath (str): The relative to the repository path of the directory where the file should be dumped.\n If relativePath does not exist, it will be created automatically.\n #. name (string): The file name.\n If None is given, name will be split from path.\n #. description (None, string, pickable object): Any random description about the file.\n #. replace (boolean): Whether to replace any existing file with the same name if existing.\n #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m49"} {"signature": "@acquire_lock@sync_requireddef dump_file(self, value, relativePath, name=None,description=None, klass=None,dump=None, pull=None,replace=False, ACID=None, verbose=False):", "body": "if ACID is None:ACID = self.__ACIDassert isinstance(ACID, bool), \"\"relativePath = os.path.normpath(relativePath)if relativePath == '':relativePath = ''assert name != '', \"\"assert name != '', \"\"assert name != '', \"\"if name is None:assert len(relativePath), \"\"relativePath,name = os.path.split(relativePath)self.add_directory(relativePath)realPath = os.path.join(self.__path, relativePath)dirInfoDict, errorMessage = self.get_directory_info(relativePath)assert dirInfoDict is not None, errorMessageif name in dict.__getitem__(dirInfoDict, \"\"):if not replace:if verbose:warnings.warn(\"\"%(name))returnif dump is None:dump=DEFAULT_DUMPif pull is None:pull=DEFAULT_PULLif ACID:savePath = os.path.join(tempfile.gettempdir(), str(uuid.uuid1()))else:savePath = os.path.join(realPath,name)try:exec( dump.replace(\"\", str(savePath)) )except Exception as e:message = \"\"%eif '' in dump:message += ''%str(get_pickling_errors(value))raise Exception( message )if ACID:try:shutil.copyfile(savePath, os.path.join(realPath,name))except Exception as e:os.remove(savePath)if verbose:warnings.warn(e)returnos.remove(savePath)if klass is None and value is not None:klass = value.__class__if klass is not None:assert inspect.isclass(klass), \"\"dict.__getitem__(dirInfoDict, \"\")[name] = {\"\":dump,\"\":pull,\"\":datetime.utcnow(),\"\":str(uuid.uuid1()),\"\": klass,\"\":description}self.save()", "docstring": "Dump a file using its value to the system and creates its\nattribute in the Repository with utc timestamp.\n\n:Parameters:\n #. value (object): The value of a file to dump and add to the repository. It is any python object or file.\n #. relativePath (str): The relative to the repository path of the directory where the file should be dumped.\n If relativePath does not exist, it will be created automatically.\n #. name (string): The file name.\n If None is given, name will be split from relativePath.\n #. description (None, string, pickable object): Any random description about the file.\n #. klass (None, class): The dumped object class. If None is given\n klass will be automatically set to the following value.__class__\n #. dump (None, string): The dumping method.\n If None it will be set automatically to pickle and therefore the object must be pickleable.\n If a string is given, the string should include all the necessary imports\n and a '$FILE_PATH' that replaces the absolute file path when the dumping will be performed.\\n\n e.g. \"import numpy as np; np.savetxt(fname='$FILE_PATH', X=value, fmt='%.6e')\"\n #. pull (None, string): The pulling method.\n If None it will be set automatically to pickle and therefore the object must be pickleable.\n If a string is given, the string should include all the necessary imports,\n a '$FILE_PATH' that replaces the absolute file path when the dumping will be performed\n and finally a PULLED_DATA variable.\\n\n e.g \"import numpy as np; PULLED_DATA=np.loadtxt(fname='$FILE_PATH')\"\n #. replace (boolean): Whether to replace any existing file with the same name if existing.\n #. ACID (None, boolean): Whether to ensure the ACID (Atomicity, Consistency, Isolation, Durability)\n properties of the repository upon dumping a file. This is ensured by dumping the file in\n a temporary path first and then moving it to the desired path.\n If None is given, repository ACID property will be used.\n #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m50"} {"signature": "def dump(self, *args, **kwargs):", "body": "self.dump_file(*args, **kwargs)", "docstring": "Alias to dump_file", "id": "f13923:c0:m51"} {"signature": "@acquire_lock@sync_requireddef update_file(self, value, relativePath, name=None,description=False, klass=False,dump=False, pull=False,ACID=None, verbose=False):", "body": "if ACID is None:ACID = self.__ACIDassert isinstance(ACID, bool), \"\"relativePath = os.path.normpath(relativePath)if relativePath == '':relativePath = ''assert name != '', \"\"assert name != '', \"\"assert name != '', \"\"if name is None:assert len(relativePath), \"\"relativePath,name = os.path.split(relativePath)fileInfoDict, errorMessage = self.get_file_info(relativePath, name)assert fileInfoDict is not None, errorMessagerealPath = os.path.join(self.__path, relativePath)if verbose:if not os.path.isfile( os.path.join(realPath, name) ):warnings.warn(\"\"%os.path.join(realPath, name))if not dump:dump = fileInfoDict[\"\"]if not pull:pull = fileInfoDict[\"\"]if ACID:savePath = os.path.join(tempfile.gettempdir(), name)else:savePath = os.path.join(realPath,name)try:exec( dump.replace(\"\", str(savePath)) )except Exception as e:message = \"\"%eif '' in dump:message += ''%str(get_pickling_errors(value))raise Exception( message )if ACID:try:shutil.copyfile(savePath, os.path.join(realPath,name))except Exception as e:os.remove(savePath)if verbose:warnings.warn(e)returnos.remove(savePath)fileInfoDict[\"\"] = datetime.utcnow()if description is not False:fileInfoDict[\"\"] = descriptionif klass is not False:assert inspect.isclass(klass), \"\"fileInfoDict[\"\"] = klassself.save()", "docstring": "Update the value and the utc timestamp of a file that is already in the Repository.\\n\nIf file is not registered in repository, and error will be thrown.\\n\nIf file is missing in the system, it will be regenerated as dump method is called.\n\n:Parameters:\n #. value (object): The value of the file to update. It is any python object or a file.\n #. relativePath (str): The relative to the repository path of the directory where the file should be dumped.\n #. name (None, string): The file name.\n If None is given, name will be split from relativePath.\n #. description (False, string, pickable object): Any random description about the file.\n If False is given, the description info won't be updated,\n otherwise it will be update to what description argument value is.\n #. klass (False, class): The dumped object class. If False is given,\n the class info won't be updated, otherwise it will be update to what klass argument value is.\n #. dump (False, string): The new dump method. If False is given, the old one will be used.\n #. pull (False, string): The new pull method. If False is given, the old one will be used.\n #. ACID (boolean): Whether to ensure the ACID (Atomicity, Consistency, Isolation, Durability)\n properties of the repository upon dumping a file. This is ensured by dumping the file in\n a temporary path first and then moving it to the desired path.\n If None is given, repository ACID property will be used.\n #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m52"} {"signature": "def update(self, *args, **kwargs):", "body": "self.update_file(*args, **kwargs)", "docstring": "Alias to update_file", "id": "f13923:c0:m53"} {"signature": "def pull_file(self, relativePath, name=None, pull=None, update=True):", "body": "relativePath = os.path.normpath(relativePath)if relativePath == '':relativePath = ''assert name != '', \"\"assert name != '', \"\"assert name != '', \"\"if name is None:assert len(relativePath), \"\"relativePath,name = os.path.split(relativePath)fileInfo, errorMessage = self.get_file_info(relativePath, name)assert fileInfo is not None, errorMessagerealPath = os.path.join(self.__path, relativePath)assert os.path.exists(realPath), \"\"%(relativePath, self.__path)fileAbsPath = os.path.join(realPath, name)assert os.path.isfile(fileAbsPath), \"\"%(name,realPath)if pull is None:pull = fileInfo[\"\"]try:namespace = {}namespace.update( globals() )exec( pull.replace(\"\", str(os.path.join(realPath,name)) ), namespace )except Exception as e:m = pull.replace(\"\", str(os.path.join(realPath,name)) )raise Exception( \"\"%(m,e) )if update:fileInfo[\"\"] = pullreturn namespace['']", "docstring": "Pull a file's data from the Repository.\n\n:Parameters:\n #. relativePath (string): The relative to the repository path of the directory where the file should be pulled.\n #. name (string): The file name.\n If None is given, name will be split from relativePath.\n #. pull (None, string): The pulling method.\n If None, the pull method saved in the file info will be used.\n If a string is given, the string should include all the necessary imports,\n a '$FILE_PATH' that replaces the absolute file path when the dumping will be performed\n and finally a PULLED_DATA variable.\n e.g \"import numpy as np; PULLED_DATA=np.loadtxt(fname='$FILE_PATH')\"\n #. update (boolean): If pull is not None, Whether to update the pull method stored in the file info by the given pull method.\n\n:Returns:\n #. data (object): The pulled data from the file.", "id": "f13923:c0:m54"} {"signature": "def pull(self, *args, **kwargs):", "body": "return self.pull_file(*args, **kwargs)", "docstring": "Alias to pull_file", "id": "f13923:c0:m55"} {"signature": "def get_version():", "body": "return __version__", "docstring": "Get pyrep's version number.", "id": "f13924:m0"} {"signature": "def get_author():", "body": "return __author__", "docstring": "Get pyrep's author's name.", "id": "f13924:m1"} {"signature": "def get_email():", "body": "return __email__", "docstring": "Get pyrep's author's email.", "id": "f13924:m2"} {"signature": "def get_doc():", "body": "return __onlinedoc__", "docstring": "Get pyrep's official online documentation link.", "id": "f13924:m3"} {"signature": "def get_repository():", "body": "return __repository__", "docstring": "Get pyrep's official online repository link.", "id": "f13924:m4"} {"signature": "def get_pypi():", "body": "return __pypi__", "docstring": "Get pyrep pypi's link.", "id": "f13924:m5"} {"signature": "def __init__(self,api_key=None,json_encoder=SendwithusJSONEncoder,raise_errors=False,default_timeout=None,**kwargs):", "body": "if not api_key:raise Exception(\"\")self.API_KEY = api_keyself.DEFAULT_TIMEOUT = default_timeoutself._json_encoder = json_encoderself._raise_errors = raise_errorsif '' in kwargs:self.API_HOST = kwargs['']if '' in kwargs:self.API_PROTO = kwargs['']if '' in kwargs:self.API_PORT = kwargs['']if '' in kwargs:self.API_VERSION = kwargs['']if '' in kwargs:self.DEBUG = kwargs['']if self.DEBUG:logging.basicConfig(format=LOGGER_FORMAT, level=logging.DEBUG)logger.debug('')logger.propagate = True", "docstring": "Constructor, expects api key", "id": "f13929:c0:m0"} {"signature": "def _parse_response(self, response):", "body": "if not self._raise_errors:return responseis_4xx_error = str(response.status_code)[] == ''is_5xx_error = str(response.status_code)[] == ''content = response.contentif response.status_code == :raise AuthenticationError(content)elif is_4xx_error:raise APIError(content)elif is_5xx_error:raise ServerError(content)return response", "docstring": "Parses the API response and raises appropriate errors if\n raise_errors was set to True", "id": "f13929:c0:m5"} {"signature": "def _api_request(self, endpoint, http_method, *args, **kwargs):", "body": "logger.debug('' % endpoint)auth = self._build_http_auth()headers = self._build_request_headers(kwargs.get(''))logger.debug('' % headers)path = self._build_request_path(endpoint)logger.debug('' % path)data = self._build_payload(kwargs.get(''))if not data:data = kwargs.get('')logger.debug('' % data)req_kw = dict(auth=auth,headers=headers,timeout=kwargs.get('', self.DEFAULT_TIMEOUT))if (http_method == self.HTTP_POST):if (data):r = requests.post(path, data=data, **req_kw)else:r = requests.post(path, **req_kw)elif http_method == self.HTTP_PUT:if (data):r = requests.put(path, data=data, **req_kw)else:r = requests.put(path, **req_kw)elif http_method == self.HTTP_DELETE:r = requests.delete(path, **req_kw)else:r = requests.get(path, **req_kw)logger.debug('' % r.status_code)try:logger.debug('' % r.json())except:logger.debug('' % r.content)return self._parse_response(r)", "docstring": "Private method for api requests", "id": "f13929:c0:m6"} {"signature": "def get_log(self, log_id, timeout=None):", "body": "return self._api_request(self.GET_LOG_ENDPOINT % log_id,self.HTTP_GET,timeout=timeout)", "docstring": "API call to get a specific log entry", "id": "f13929:c0:m7"} {"signature": "def get_log_events(self, log_id, timeout=None):", "body": "return self._api_request(self.GET_LOG_EVENTS_ENDPOINT % log_id,self.HTTP_GET,timeout=timeout)", "docstring": "API call to get a specific log entry", "id": "f13929:c0:m8"} {"signature": "def emails(self):", "body": "return self.templates()", "docstring": "[DEPRECATED] API call to get a list of emails", "id": "f13929:c0:m9"} {"signature": "def templates(self, timeout=None):", "body": "return self._api_request(self.TEMPLATES_ENDPOINT,self.HTTP_GET,timeout=timeout)", "docstring": "API call to get a list of templates", "id": "f13929:c0:m10"} {"signature": "def get_template(self, template_id, version=None, timeout=None):", "body": "if (version):return self._api_request(self.TEMPLATES_VERSION_ENDPOINT % (template_id, version),self.HTTP_GET,timeout=timeout)else:return self._api_request(self.TEMPLATES_SPECIFIC_ENDPOINT % template_id,self.HTTP_GET,timeout=timeout)", "docstring": "API call to get a specific template", "id": "f13929:c0:m11"} {"signature": "def create_email(self, name, subject, html, text=''):", "body": "return self.create_template(name, subject, html, text)", "docstring": "[DECPRECATED] API call to create an email", "id": "f13929:c0:m12"} {"signature": "def create_template(self,name,subject,html,text='',timeout=None):", "body": "payload = {'': name,'': subject,'': html,'': text}return self._api_request(self.TEMPLATES_ENDPOINT,self.HTTP_POST,payload=payload,timeout=timeout)", "docstring": "API call to create a template", "id": "f13929:c0:m13"} {"signature": "def create_new_locale(self,template_id,locale,version_name,subject,text='',html='',timeout=None):", "body": "payload = {'': locale,'': version_name,'': subject}if html:payload[''] = htmlif text:payload[''] = textreturn self._api_request(self.TEMPLATES_LOCALES_ENDPOINT % template_id,self.HTTP_POST,payload=payload,timeout=timeout)", "docstring": "API call to create a new locale and version of a template", "id": "f13929:c0:m14"} {"signature": "def create_new_version(self,name,subject,text='',template_id=None,html=None,locale=None,timeout=None):", "body": "if(html):payload = {'': name,'': subject,'': html,'': text}else:payload = {'': name,'': subject,'': text}if locale:url = self.TEMPLATES_SPECIFIC_LOCALE_VERSIONS_ENDPOINT % (template_id,locale)else:url = self.TEMPLATES_NEW_VERSION_ENDPOINT % template_idreturn self._api_request(url,self.HTTP_POST,payload=payload,timeout=timeout)", "docstring": "API call to create a new version of a template", "id": "f13929:c0:m15"} {"signature": "def update_template_version(self,name,subject,template_id,version_id,text='',html=None,timeout=None):", "body": "if(html):payload = {'': name,'': subject,'': html,'': text}else:payload = {'': name,'': subject,'': text}return self._api_request(self.TEMPLATES_VERSION_ENDPOINT % (template_id, version_id),self.HTTP_PUT,payload=payload,timeout=timeout)", "docstring": "API call to update a template version", "id": "f13929:c0:m16"} {"signature": "def snippets(self, timeout=None):", "body": "return self._api_request(self.SNIPPETS_ENDPOINT,self.HTTP_GET,timeout=timeout)", "docstring": "API call to get list of snippets", "id": "f13929:c0:m17"} {"signature": "def get_snippet(self, snippet_id, timeout=None):", "body": "return self._api_request(self.SNIPPET_ENDPOINT % (snippet_id),self.HTTP_GET,timeout=timeout)", "docstring": "API call to get a specific Snippet", "id": "f13929:c0:m18"} {"signature": "def create_snippet(self, name, body, timeout=None):", "body": "payload = {'': name,'': body}return self._api_request(self.SNIPPETS_ENDPOINT,self.HTTP_POST,payload=payload,timeout=timeout)", "docstring": "API call to create a Snippet", "id": "f13929:c0:m19"} {"signature": "def _make_file_dict(self, f):", "body": "if isinstance(f, dict):file_obj = f['']if '' in f:file_name = f['']else:file_name = file_obj.nameelse:file_obj = ffile_name = f.nameb64_data = base64.b64encode(file_obj.read())return {'': file_name,'': b64_data.decode() if six.PY3 else b64_data,}", "docstring": "Make a dictionary with filename and base64 file data", "id": "f13929:c0:m24"} {"signature": "def send(self,email_id,recipient,email_data=None,sender=None,cc=None,bcc=None,tags=[],headers={},esp_account=None,locale=None,email_version_name=None,inline=None,files=[],timeout=None):", "body": "if not email_data:email_data = {}if isinstance(recipient, string_types):warnings.warn(\"\",DeprecationWarning)recipient = {'': recipient}payload = {'': email_id,'': recipient,'': email_data}if sender:payload[''] = senderif cc:if not type(cc) == list:logger.error('' % type(cc))payload[''] = ccif bcc:if not type(bcc) == list:logger.error('' % type(bcc))payload[''] = bccif tags:if not type(tags) == list:logger.error('' % (type(tags)))payload[''] = tagsif headers:if not type(headers) == dict:logger.error('' % (type(headers)))payload[''] = headersif esp_account:if not isinstance(esp_account, string_types):logger.error('' % (type(esp_account)))payload[''] = esp_accountif locale:if not isinstance(locale, string_types):logger.error('' % (type(locale)))payload[''] = localeif email_version_name:if not isinstance(email_version_name, string_types):logger.error('' % (type(email_version_name)))payload[''] = email_version_nameif inline:payload[''] = self._make_file_dict(inline)if files:payload[''] = [self._make_file_dict(f) for f in files]return self._api_request(self.SEND_ENDPOINT,self.HTTP_POST,payload=payload,timeout=timeout)", "docstring": "API call to send an email", "id": "f13929:c0:m25"} {"signature": "def _api_request(self, endpoint, http_method, *args, **kwargs):", "body": "logger.debug('' % endpoint)path = self._build_request_path(endpoint, absolute=False)logger.debug('' % path)data = Noneif '' in kwargs:data = kwargs['']logger.debug('' % data)command = {\"\": path,\"\": http_method}if data:command[''] = dataself._commands.append(command)", "docstring": "Private method for api requests", "id": "f13929:c1:m1"} {"signature": "def execute(self, timeout=None):", "body": "logger.debug('' % len(self._commands))auth = self._build_http_auth()headers = self._build_request_headers()logger.debug('' % headers)logger.debug('' % len(self._commands))path = self._build_request_path(self.BATCH_ENDPOINT)data = json.dumps(self._commands, cls=self._json_encoder)r = requests.post(path,auth=auth,headers=headers,data=data,timeout=(self.DEFAULT_TIMEOUT if timeout is None else timeout))self._commands = []logger.debug('' % r.status_code)try:logger.debug('' % r.json())except:logger.debug('' % r.content)return r", "docstring": "Execute all currently queued batch commands", "id": "f13929:c1:m2"} {"signature": "def foo(n):", "body": "if n == :return nelse:return foo(n - )", "docstring": "Return 0 for all n >= 0", "id": "f13971:m0"} {"signature": "def baz(n):", "body": "return True", "docstring": "Return True", "id": "f13971:m1"} {"signature": "def foo(n):", "body": "if n == :return nelse:return foo(n - )", "docstring": "Return 1 for all n >= 0", "id": "f13972:m0"} {"signature": "def bar(n):", "body": "if n == :return nelse:return foo(n)", "docstring": "Return 2 for all n >= 0", "id": "f13972:m1"} {"signature": "def _get_col_index(name):", "body": "index = string.ascii_uppercase.indexcol = for c in name.upper():col = col * + index(c) + return col", "docstring": "Convert column name to index.", "id": "f13974:m0"} {"signature": "def _get_range(book, range_, sheet):", "body": "filename = Noneif isinstance(book, str):filename = bookbook = opxl.load_workbook(book, data_only=True)elif isinstance(book, opxl.Workbook):passelse:raise TypeErrorif _is_range_address(range_):sheet_names = [name.upper() for name in book.sheetnames]index = sheet_names.index(sheet.upper())data = book.worksheets[index][range_]else:data = _get_namedrange(book, range_, sheet)if data is None:raise ValueError(\"\" % (range_, filename or book))return data", "docstring": "Return a range as nested dict of openpyxl cells.", "id": "f13974:m2"} {"signature": "def read_range(filepath, range_expr, sheet=None, dict_generator=None):", "body": "def default_generator(cells):for row_ind, row in enumerate(cells):for col_ind, cell in enumerate(row):yield (row_ind, col_ind), cell.valuebook = opxl.load_workbook(filepath, data_only=True)if _is_range_address(range_expr):sheet_names = [name.upper() for name in book.sheetnames]index = sheet_names.index(sheet.upper())cells = book.worksheets[index][range_expr]else:cells = _get_namedrange(book, range_expr, sheet)if isinstance(cells, opxl.cell.Cell):return cells.valueif dict_generator is None:dict_generator = default_generatorgen = dict_generator(cells)return {keyval[]: keyval[] for keyval in gen}", "docstring": "Read values from an Excel range into a dictionary.\n\n `range_expr` ie either a range address string, such as \"A1\", \"$C$3:$E$5\",\n or a defined name string for a range, such as \"NamedRange1\".\n If a range address is provided, `sheet` argument must also be provided.\n If a named range is provided and `sheet` is not, book level defined name\n is searched. If `sheet` is also provided, sheet level defined name for the\n specified `sheet` is searched.\n If range_expr points to a single cell, its value is returned.\n\n `dictgenerator` is a generator function that yields keys and values of \n the returned dictionary. the excel range, as a nested tuple of openpyxl's\n Cell objects, is passed to the generator function as its single argument.\n If not specified, default generator is used, which maps tuples of row and\n column indexes, both starting with 0, to their values.\n\n Args:\n filepath (str): Path to an Excel file.\n range_epxr (str): Range expression, such as \"A1\", \"$G4:$K10\", \n or named range \"NamedRange1\"\n sheet (str): Sheet name (case ignored).\n None if book level defined range name is passed as `range_epxr`.\n dict_generator: A generator function taking a nested tuple of cells \n as a single parameter.\n\n Returns:\n Nested list containing range values.", "id": "f13974:m3"} {"signature": "def _get_namedrange(book, rangename, sheetname=None):", "body": "def cond(namedef):if namedef.type.upper() == \"\":if namedef.name.upper() == rangename.upper():if sheetname is None:if not namedef.localSheetId:return Trueelse: sheet_id = [sht.upper() for sht in book.sheetnames].index(sheetname.upper())if namedef.localSheetId == sheet_id:return Truereturn Falsedef get_destinations(name_def):\"\"\"\"\"\"from openpyxl.formula import Tokenizerfrom openpyxl.utils.cell import SHEETRANGE_REif name_def.type == \"\":tok = Tokenizer(\"\" + name_def.value)for part in tok.items:if part.subtype == \"\":m = SHEETRANGE_RE.match(part.value)if m.group(\"\"):sheet_name = m.group(\"\")else:sheet_name = m.group(\"\")yield sheet_name, m.group(\"\")namedef = next((item for item in book.defined_names.definedName if cond(item)), None)if namedef is None:return Nonedests = get_destinations(namedef)xlranges = []sheetnames_upper = [name.upper() for name in book.sheetnames]for sht, addr in dests:if sheetname:sht = sheetnameindex = sheetnames_upper.index(sht.upper())xlranges.append(book.worksheets[index][addr])if len(xlranges) == :return xlranges[]else:return xlranges", "docstring": "Get range from a workbook.\n\n A workbook can contain multiple definitions for a single name,\n as a name can be defined for the entire book or for\n a particular sheet.\n\n If sheet is None, the book-wide def is searched,\n otherwise sheet-local def is looked up.\n\n Args:\n book: An openpyxl workbook object.\n rangename (str): Range expression, such as \"A1\", \"$G4:$K10\",\n named range \"NamedRange1\".\n sheetname (str, optional): None for book-wide name def,\n sheet name for sheet-local named range.\n\n Returns:\n Range object specified by the name.", "id": "f13974:m4"} {"signature": "def cellsiter_to_dataframe(cellsiter, args, drop_allna=True):", "body": "from modelx.core.cells import shareable_parametersif len(args):indexes = shareable_parameters(cellsiter)else:indexes = get_all_params(cellsiter.values())result = Nonefor cells in cellsiter.values():df = cells_to_dataframe(cells, args)if drop_allna and df.isnull().all().all():continue if df.index.names != [None]:if isinstance(df.index, pd.MultiIndex):if _pd_ver < (, ):df = _reset_naindex(df)df = df.reset_index()missing_params = set(indexes) - set(df)for params in missing_params:df[params] = np.nanif result is None:result = dfelse:try:result = pd.merge(result, df, how=\"\")except MergeError:result = pd.concat([result, df], axis=)except ValueError:cols = set(result.columns) & set(df.columns)for col in cols:if (len([str(frame[col].dtype)for frame in (result, df)if str(frame[col].dtype) == \"\"])== ):if str(result[col].dtype) == \"\":frame = dfelse:frame = resultframe[[col]] = frame[col].astype(\"\")result = pd.merge(result, df, how=\"\")if result is None:return pd.DataFrame()else:return result.set_index(indexes) if indexes else result", "docstring": "Convert multiple cells to a frame.\n\n If args is an empty sequence, all values are included.\n If args is specified, cellsiter must have shareable parameters.\n\n Args:\n cellsiter: A mapping from cells names to CellsImpl objects.\n args: A sequence of arguments", "id": "f13975:m0"} {"signature": "def cells_to_series(cells, args):", "body": "paramlen = len(cells.formula.parameters)is_multidx = paramlen > if len(cells.data) == :data = {}indexes = Noneelif paramlen == : data = list(cells.data.values())indexes = [np.nan]else:if len(args) > :defaults = tuple(param.defaultfor param in cells.formula.signature.parameters.values())updated_args = []for arg in args:if len(arg) > paramlen:arg = arg[:paramlen]elif len(arg) < paramlen:arg += defaults[len(arg) :]updated_args.append(arg)items = [(arg, cells.data[arg])for arg in updated_argsif arg in cells.data]else:items = [(key, value) for key, value in cells.data.items()]if not is_multidx: items = [(key[], value) for key, value in items]if len(items) == :indexes, data = None, {}else:indexes, data = zip(*items)if is_multidx:indexes = pd.MultiIndex.from_tuples(indexes)result = pd.Series(data=data, name=cells.name, index=indexes)if indexes is not None and any(i is not np.nan for i in indexes):result.index.names = list(cells.formula.parameters)return result", "docstring": "Convert a CellImpl into a Series.\n\n `args` must be a sequence of argkeys.\n\n `args` can be longer or shorter then the number of cell's parameters.\n If shorter, then defaults are filled if any, else raise error.\n If longer, then redundant args are ignored.", "id": "f13975:m3"} {"signature": "def to_frame(self, *args):", "body": "if sys.version_info < (, , ):from collections import OrderedDictimpls = OrderedDict()for name, obj in self.items():impls[name] = obj._implelse:impls = get_impls(self)return _to_frame_inner(impls, args)", "docstring": "Convert the cells in the view into a DataFrame object.\n\n If ``args`` is not given, this method returns a DataFrame that\n has an Index or a MultiIndex depending of the number of\n cells parameters and columns each of which corresponds to each\n cells included in the view.\n\n ``args`` can be given to calculate cells values and limit the\n DataFrame indexes to the given arguments.\n\n The cells in this view may have different number of parameters,\n but parameters shared among multiple cells\n must appear in the same position in all the parameter lists.\n For example,\n Having ``foo()``, ``bar(x)`` and ``baz(x, y=1)`` is okay\n because the shared parameter ``x`` is always the first parameter,\n but this method does not work if the view has ``quz(x, z=2, y=1)``\n cells in addition to the first three cells, because ``y`` appears\n in different positions.\n\n Args:\n args(optional): multiple arguments,\n or an iterator of arguments to the cells.", "id": "f13977:c4:m1"} {"signature": "@propertydef bases(self):", "body": "return get_interfaces(self._impl.bases)", "docstring": "List of base classes.", "id": "f13977:c7:m2"} {"signature": "@propertydef _direct_bases(self):", "body": "return get_interfaces(self._impl.direct_bases)", "docstring": "Directly inherited base classes", "id": "f13977:c7:m3"} {"signature": "def _is_base(self, other):", "body": "return self._impl.is_base(other._impl)", "docstring": "True if the space is a base space of ``other``, False otherwise.", "id": "f13977:c7:m4"} {"signature": "def _is_sub(self, other):", "body": "return self._impl.is_sub(other._impl)", "docstring": "True if the space is a sub space of ``other``, False otherwise.", "id": "f13977:c7:m5"} {"signature": "def _is_static(self):", "body": "return isinstance(self._impl, StaticSpaceImpl)", "docstring": "True if the space is a static space, False if dynamic.", "id": "f13977:c7:m6"} {"signature": "def _is_derived(self):", "body": "return self._impl.is_derived", "docstring": "True if the space is a derived space, False otherwise.", "id": "f13977:c7:m7"} {"signature": "def _is_defined(self):", "body": "return self._impl.is_defined()", "docstring": "True if the space is a defined space, False otherwise.", "id": "f13977:c7:m8"} {"signature": "def _is_root(self):", "body": "return isinstance(self._impl, RootDynamicSpaceImpl)", "docstring": "True if ths space is a dynamic space, False otherwise.", "id": "f13977:c7:m9"} {"signature": "def _is_dynamic(self):", "body": "return self._impl.is_dynamic()", "docstring": "True if the space is in a dynamic space, False otherwise.", "id": "f13977:c7:m10"} {"signature": "@propertydef cells(self):", "body": "return self._impl.cells.interfaces", "docstring": "A mapping of cells names to the cells objects in the space.", "id": "f13977:c7:m11"} {"signature": "@propertydef _self_cells(self):", "body": "return self._impl.self_cells.interfaces", "docstring": "A mapping that associates names to cells defined in the space", "id": "f13977:c7:m12"} {"signature": "@propertydef _derived_cells(self):", "body": "return self._impl.derived_cells.interfaces", "docstring": "A mapping associating names to derived cells.", "id": "f13977:c7:m13"} {"signature": "@propertydef all_spaces(self):", "body": "return self._impl.spaces.interfaces", "docstring": "A mapping associating names to all(static and dynamic) spaces.", "id": "f13977:c7:m14"} {"signature": "@propertydef spaces(self):", "body": "return self._impl.static_spaces.interfaces", "docstring": "A mapping associating names to static spaces.", "id": "f13977:c7:m15"} {"signature": "@propertydef static_spaces(self):", "body": "return self._impl.static_spaces.interfaces", "docstring": "A mapping associating names to static spaces.\n\n Alias to :py:meth:`spaces`", "id": "f13977:c7:m16"} {"signature": "@propertydef dynamic_spaces(self):", "body": "return self._impl.dynamic_spaces.interfaces", "docstring": "A mapping associating names to dynamic spaces.", "id": "f13977:c7:m17"} {"signature": "@propertydef _self_spaces(self):", "body": "return self._impl.self_spaces.interfaces", "docstring": "A mapping associating names to self spaces.", "id": "f13977:c7:m18"} {"signature": "@propertydef _derived_spaces(self):", "body": "return self._impl.derived_spaces.interfaces", "docstring": "A mapping associating names to derived spaces.", "id": "f13977:c7:m19"} {"signature": "@propertydef argvalues(self):", "body": "return self._impl.argvalues_if", "docstring": "A tuple of space arguments.", "id": "f13977:c7:m20"} {"signature": "@propertydef parameters(self):", "body": "return tuple(self._impl.formula.parameters)", "docstring": "A tuple of parameter strings.", "id": "f13977:c7:m21"} {"signature": "@propertydef refs(self):", "body": "return self._impl.refs.interfaces", "docstring": "A map associating names to objects accessible by the names.", "id": "f13977:c7:m22"} {"signature": "@propertydef _self_refs(self):", "body": "return self._impl.self_refs.interfaces", "docstring": "A mapping associating names to self refs.", "id": "f13977:c7:m23"} {"signature": "@propertydef formula(self):", "body": "return self._impl.formula", "docstring": "Property to get, set, delete formula.", "id": "f13977:c7:m24"} {"signature": "def has_params(self):", "body": "return bool(self._impl.formula)", "docstring": "Check if the parameter function is set.", "id": "f13977:c7:m25"} {"signature": "def to_frame(self, *args):", "body": "return self._impl.to_frame(args)", "docstring": "Convert the space itself into a Pandas DataFrame object.", "id": "f13977:c7:m29"} {"signature": "@propertydef frame(self):", "body": "return self._impl.to_frame(())", "docstring": "Alias of ``to_frame()``.", "id": "f13977:c7:m30"} {"signature": "@propertydef _baseattrs(self):", "body": "result = super()._baseattrsresult[\"\"] = self.static_spaces._baseattrsresult[\"\"] = self.dynamic_spaces._baseattrsresult[\"\"] = self.cells._baseattrsresult[\"\"] = self.refs._baseattrsif self.has_params():result[\"\"] = \"\".join(self.parameters)else:result[\"\"] = \"\"return result", "docstring": "A dict of members expressed in literals", "id": "f13977:c7:m31"} {"signature": "def new_cells(self, name=None, formula=None):", "body": "return self._impl.new_cells(name, formula).interface", "docstring": "Create a cells in the space.\n\n Args:\n name: If omitted, the model is named automatically ``CellsN``,\n where ``N`` is an available number.\n func: The function to define the formula of the cells.\n\n Returns:\n The new cells.", "id": "f13977:c8:m0"} {"signature": "def add_bases(self, *bases):", "body": "return self._impl.add_bases(get_impls(bases))", "docstring": "Add base spaces.", "id": "f13977:c8:m1"} {"signature": "def remove_bases(self, *bases):", "body": "return self._impl.remove_bases(bases)", "docstring": "Remove base spaces.", "id": "f13977:c8:m2"} {"signature": "def import_funcs(self, module):", "body": "newcells = self._impl.new_cells_from_module(module)return get_interfaces(newcells)", "docstring": "Create a cells from a module.", "id": "f13977:c8:m3"} {"signature": "def new_cells_from_module(self, module):", "body": "newcells = self._impl.new_cells_from_module(module)return get_interfaces(newcells)", "docstring": "Create a cells from a module.\n\n Alias to :py:meth:`import_funcs`.", "id": "f13977:c8:m4"} {"signature": "def reload(self):", "body": "self._impl.reload()return self", "docstring": "Reload the source module and update the formulas.\n\n If the space was created from a module, reload the module and\n update the formulas of its cells.\n\n If a cell in the space is not created from a function definition\n in the source module of the space, it is not updated.\n\n If the formula of a cell in the space was created from a function\n definition in the source module of the space and the definition is\n missing from the updated module, the formula is cleared and\n values calculated directly or indirectly depending the cells\n are cleared.\n\n If the formula of a cell in the space has not been changed\n before and after reloading the source module, the values held\n in the cell and relevant cells are retained.\n\n Returns:\n This method returns the space itself.", "id": "f13977:c8:m5"} {"signature": "def new_cells_from_excel(self,book,range_,sheet=None,names_row=None,param_cols=None,param_order=None,transpose=False,names_col=None,param_rows=None,):", "body": "return self._impl.new_cells_from_excel(book,range_,sheet,names_row,param_cols,param_order,transpose,names_col,param_rows,)", "docstring": "Create multiple cells from an Excel range.\n\n This method reads values from a range in an Excel file,\n create cells and populate them with the values in the range.\n To use this method, ``openpyxl`` package must be installed.\n\n The Excel file to read data from is specified by ``book``\n parameters. The ``range_`` can be a range address, such as \"$G4:$K10\",\n or a named range. In case a range address is given,\n ``sheet`` must also be given.\n\n By default, cells data are interpreted as being laid out side-by-side.\n ``names_row`` is a row index (starting from 0) to specify the\n row that contains the names of cells and parameters.\n Cells and parameter names must be contained in a single row.\n ``param_cols`` accepts a sequence (such as list or tuple) of\n column indexes (starting from 0) that indicate columns that\n contain cells arguments.\n\n **2-dimensional cells definitions**\n\n The optional ``names_col`` and ``param_rows`` parameters are used,\n when data for one cells spans more than one column.\n In such cases, the cells data is 2-dimensional, and\n there must be parameter row(s) across the columns\n that contain arguments of the parameters.\n A sequence of row indexes that indicate parameter rows\n is passed to ``param_rows``.\n The names of those parameters must be contained in the\n same rows as parameter values (arguments), and\n ``names_col`` is to indicate the column position at which\n the parameter names are defined.\n\n **Horizontal arrangement**\n\n By default, cells data are interpreted as being placed\n side-by-side, regardless of whether one cells corresponds\n to a single column or multiple columns.\n ``transpose`` parameter is used to alter this orientation,\n and if it is set to ``True``, cells values are\n interpreted as being placed one above the other.\n \"row(s)\" and \"col(s)\" in the parameter\n names are interpreted inversely, i.e.\n all indexes passed to \"row(s)\" parameters are interpreted\n as column indexes,\n and all indexes passed to \"col(s)\" parameters as row indexes.\n\n\n Args:\n book (str): Path to an Excel file.\n range_ (str): Range expression, such as \"A1\", \"$G4:$K10\",\n or named range \"NamedRange1\".\n sheet (str): Sheet name (case ignored).\n names_row (optional): an index number indicating\n what row contains the names of cells and parameters.\n Defaults to the top row (0).\n param_cols (optional): a sequence of index numbers\n indicating parameter columns.\n Defaults to only the leftmost column ([0]).\n names_col (optional): an index number, starting from 0,\n indicating what column contains additional parameters.\n param_rows (optional): a sequence of index numbers, starting from\n 0, indicating rows of additional parameters, in case cells are\n defined in two dimensions.\n transpose (optional): Defaults to ``False``.\n If set to ``True``, \"row(s)\" and \"col(s)\" in the parameter\n names are interpreted inversely, i.e.\n all indexes passed to \"row(s)\" parameters are interpreted\n as column indexes,\n and all indexes passed to \"col(s)\" parameters as row indexes.\n param_order (optional): a sequence to reorder the parameters.\n The elements of the sequence are the indexes of ``param_cols``\n elements, and optionally the index of ``param_rows`` elements\n shifted by the length of ``param_cols``.", "id": "f13977:c8:m6"} {"signature": "def __contains__(self, item):", "body": "if isinstance(item, str):return item in self._impl.namespaceelif isinstance(item, Cells):return item._impl in self._impl.cells.values()elif isinstance(item, StaticSpace):return item._impl in self._impl.spaces.values()else:return False", "docstring": "Check if item is in the space.\n\n item can be either a cells or space.\n\n Args:\n item: a cells or space to check.\n\n Returns:\n True if item is a direct child of the space, False otherwise.", "id": "f13977:c8:m7"} {"signature": "def set_formula(self, formula):", "body": "self._impl.set_formula(formula)", "docstring": "Set if the parameter function.", "id": "f13977:c8:m11"} {"signature": "@propertydef direct_bases(self):", "body": "return list(self.model.spacegraph.predecessors(self))", "docstring": "Return an iterator over direct base spaces", "id": "f13977:c9:m11"} {"signature": "def get_object(self, name):", "body": "parts = name.split(\"\")child = parts.pop()if parts:return self.spaces[child].get_object(\"\".join(parts))else:return self._namespace_impl[child]", "docstring": "Retrieve an object by a dotted name relative to the space.", "id": "f13977:c9:m27"} {"signature": "def _get_dynamic_base(self, bases_):", "body": "bases = tuple(base.bases[] if base.is_dynamic() else base for base in bases_)if len(bases) == :return bases[]elif len(bases) > :return self.model.get_dynamic_base(bases)else:RuntimeError(\"\")", "docstring": "Create or get the base space from a list of spaces\n\n if a direct base space in `bases` is dynamic, replace it with\n its base.", "id": "f13977:c9:m30"} {"signature": "def _new_dynspace(self,name=None,bases=None,formula=None,refs=None,arguments=None,source=None,):", "body": "if name is None:name = self.spacenamer.get_next(self.namespace)if name in self.namespace:raise ValueError(\"\" % name)if not is_valid_name(name):raise ValueError(\"\" % name)space = RootDynamicSpaceImpl(parent=self,name=name,formula=formula,refs=refs,source=source,arguments=arguments,)space.is_derived = Falseself._set_space(space)if bases: dynbase = self._get_dynamic_base(bases)space._dynbase = dynbasedynbase._dynamic_subs.append(space)return space", "docstring": "Create a new dynamic root space.", "id": "f13977:c9:m31"} {"signature": "def get_dynspace(self, args, kwargs=None):", "body": "node = get_node(self, *convert_args(args, kwargs))key = node[KEY]if key in self.param_spaces:return self.param_spaces[key]else:last_self = self.system.selfself.system.self = selftry:space_args = self.eval_formula(node)finally:self.system.self = last_selfif space_args is None:space_args = {\"\": [self]} else:if \"\" in space_args:bases = get_impls(space_args[\"\"])if isinstance(bases, StaticSpaceImpl):space_args[\"\"] = [bases]elif bases is None:space_args[\"\"] = [self] else:space_args[\"\"] = baseselse:space_args[\"\"] = [self]space_args[\"\"] = node_get_args(node)space = self._new_dynspace(**space_args)self.param_spaces[key] = spacespace.inherit(clear_value=False)return space", "docstring": "Create a dynamic root space\n\n Called from interface methods", "id": "f13977:c9:m32"} {"signature": "def restore_state(self, system):", "body": "super().restore_state(system)BaseSpaceContainerImpl.restore_state(self, system)for cells in self._cells.values():cells.restore_state(system)", "docstring": "Called after unpickling to restore some attributes manually.", "id": "f13977:c9:m38"} {"signature": "def new_cells_from_excel(self,book,range_,sheet=None,names_row=None,param_cols=None,param_order=None,transpose=False,names_col=None,param_rows=None,):", "body": "import modelx.io.excel as xlcellstable = xl.CellsTable(book,range_,sheet,names_row,param_cols,param_order,transpose,names_col,param_rows,)if cellstable.param_names:sig = \"\".join(cellstable.param_names) + \"\"else:sig = \"\"blank_func = \"\" + sig + \"\"for cellsdata in cellstable.items():cells = self.new_cells(name=cellsdata.name, formula=blank_func)for args, value in cellsdata.items():cells.set_value(args, value)", "docstring": "Create multiple cells from an Excel range.\n\n Args:\n book (str): Path to an Excel file.\n range_ (str): Range expression, such as \"A1\", \"$G4:$K10\",\n or named range \"NamedRange1\".\n sheet (str): Sheet name (case ignored).\n names_row: Cells names in a sequence, or an integer number, or\n a string expression indicating row (or column depending on\n ```orientation```) to read cells names from.\n param_cols: a sequence of them\n indicating parameter columns (or rows depending on ```\n orientation```)\n param_order: a sequence of integers representing\n the order of params and extra_params.\n transpose: in which direction 'vertical' or 'horizontal'\n names_col: a string or a list of names of the extra params.\n param_rows: integer or string expression, or a sequence of them\n indicating row (or column) to be interpreted as parameters.", "id": "f13977:c10:m5"} {"signature": "def set_attr(self, name, value):", "body": "if not is_valid_name(name):raise ValueError(\"\" % name)if name in self.namespace:if name in self.refs:if name in self.self_refs:self.new_ref(name, value)else:raise KeyError(\"\" % name)elif name in self.cells:if self.cells[name].is_scalar():self.cells[name].set_value((), value)else:raise AttributeError(\"\" % name)else:raise ValueErrorelse:self.new_ref(name, value)", "docstring": "Implementation of attribute setting\n\n ``space.name = value`` by user script\n Called from ``Space.__setattr__``", "id": "f13977:c10:m7"} {"signature": "def del_attr(self, name):", "body": "if name in self.namespace:if name in self.cells:self.del_cells(name)elif name in self.spaces:self.del_space(name)elif name in self.refs:self.del_ref(name)else:raise RuntimeError(\"\")else:raise KeyError(\"\" % (name, self.name))", "docstring": "Implementation of attribute deletion\n\n ``del space.name`` by user script\n Called from ``StaticSpace.__delattr__``", "id": "f13977:c10:m8"} {"signature": "def del_space(self, name):", "body": "if name not in self.spaces:raise ValueError(\"\" % name)if name in self.static_spaces:space = self.static_spaces[name]if space.is_derived:raise ValueError(\"\" % repr(space.interface))else:self.static_spaces.del_item(name)self.model.spacegraph.remove_node(space)self.inherit()self.model.spacegraph.update_subspaces(self)elif name in self.dynamic_spaces:self.dynamic_spaces.del_item(name)else:raise ValueError(\"\")", "docstring": "Delete a space.", "id": "f13977:c10:m15"} {"signature": "def del_cells(self, name):", "body": "if name in self.cells:cells = self.cells[name]self.cells.del_item(name)self.inherit()self.model.spacegraph.update_subspaces(self)elif name in self.dynamic_spaces:cells = self.dynamic_spaces.pop(name)self.dynamic_spaces.set_update()else:raise KeyError(\"\" % name)NullImpl(cells)", "docstring": "Implementation of cells deletion\n\n ``del space.name`` where name is a cells, or\n ``del space.cells['name']``", "id": "f13977:c10:m16"} {"signature": "@propertydef evalrepr(self):", "body": "args = [repr(arg) for arg in get_interfaces(self.argvalues)]param = \"\".join(args)return \"\" % (self.parent.evalrepr, param)", "docstring": "Evaluable repr", "id": "f13977:c13:m8"} {"signature": "def fix_lamdaline(source):", "body": "strio = io.StringIO(source)gen = tokenize.generate_tokens(strio.readline)tkns = []try:for t in gen:tkns.append(t)except tokenize.TokenError:passlambda_pos = [(t.type, t.string) for t in tkns].index((tokenize.NAME, \"\"))tkns = tkns[lambda_pos:]lastop_pos = (len(tkns) - - [t.type for t in tkns[::-]].index(tokenize.OP))lastop = tkns[lastop_pos]fiedlineno = lastop.start[]fixedline = lastop.line[: lastop.start[]] + lastop.line[lastop.end[] :]tkns = tkns[:lastop_pos]fixedlines = \"\"last_lineno = for t in tkns:if last_lineno == t.start[]:continueelif t.start[] == fiedlineno:fixedlines += fixedlinelast_lineno = t.start[]else:fixedlines += t.linelast_lineno = t.start[]return fixedlines", "docstring": "Remove the last redundant token from lambda expression\n\n lambda x: return x)\n ^\n Return string without irrelevant tokens\n returned from inspect.getsource on lamda expr returns", "id": "f13978:m0"} {"signature": "def find_funcdef(source):", "body": "try:module_node = compile(source, \"\", mode=\"\", flags=ast.PyCF_ONLY_AST)except SyntaxError:return find_funcdef(fix_lamdaline(source))for node in ast.walk(module_node):if isinstance(node, ast.FunctionDef) or isinstance(node, ast.Lambda):return noderaise ValueError(\"\")", "docstring": "Find the first FuncDef ast object in source", "id": "f13978:m1"} {"signature": "def extract_params(source):", "body": "funcdef = find_funcdef(source)params = []for node in ast.walk(funcdef.args):if isinstance(node, ast.arg):if node.arg not in params:params.append(node.arg)return params", "docstring": "Extract parameters from a function definition", "id": "f13978:m2"} {"signature": "def extract_names(source):", "body": "if source is None:return Nonesource = dedent(source)funcdef = find_funcdef(source)params = extract_params(source)names = []if isinstance(funcdef, ast.FunctionDef):stmts = funcdef.bodyelif isinstance(funcdef, ast.Lambda):stmts = [funcdef.body]else:raise ValueError(\"\")for stmt in stmts:for node in ast.walk(stmt):if isinstance(node, ast.Name):if node.id not in names and node.id not in params:names.append(node.id)return names", "docstring": "Extract names from a function definition\n\n Looks for a function definition in the source.\n Only the first function definition is examined.\n\n Returns:\n a list names(identifiers) used in the body of the function\n excluding function parameters.", "id": "f13978:m3"} {"signature": "def is_funcdef(src):", "body": "module_node = ast.parse(dedent(src))if len(module_node.body) == and isinstance(module_node.body[], ast.FunctionDef):return Trueelse:return False", "docstring": "True if src is a function definition", "id": "f13978:m5"} {"signature": "def remove_decorator(source: str):", "body": "lines = source.splitlines()atok = asttokens.ASTTokens(source, parse=True)for node in ast.walk(atok.tree):if isinstance(node, ast.FunctionDef):breakif node.decorator_list:deco_first = node.decorator_list[]deco_last = node.decorator_list[-]line_first = atok.tokens[deco_first.first_token.index - ].start[]line_last = atok.tokens[deco_last.last_token.index + ].start[]lines = lines[:line_first - ] + lines[line_last:]return \"\".join(lines) + \"\"", "docstring": "Remove decorators from function definition", "id": "f13978:m6"} {"signature": "def replace_funcname(source: str, name: str):", "body": "lines = source.splitlines()atok = asttokens.ASTTokens(source, parse=True)for node in ast.walk(atok.tree):if isinstance(node, ast.FunctionDef):breaki = node.first_token.indexfor i in range(node.first_token.index, node.last_token.index):if (atok.tokens[i].type == token.NAMEand atok.tokens[i].string == \"\"):breaklineno, col_begin = atok.tokens[i + ].startlineno_end, col_end = atok.tokens[i + ].endassert lineno == lineno_endlines[lineno-] = (lines[lineno-][:col_begin] + name + lines[lineno-][col_end:])return \"\".join(lines) + \"\"", "docstring": "Replace function name", "id": "f13978:m7"} {"signature": "def has_lambda(src):", "body": "module_node = ast.parse(dedent(src))lambdaexp = [node for node in ast.walk(module_node)if isinstance(node, ast.Lambda)]return bool(lambdaexp)", "docstring": "True if only one lambda expression is included", "id": "f13978:m8"} {"signature": "def __getstate__(self):", "body": "return {\"\": self.source, \"\": self.module}", "docstring": "Specify members to pickle.", "id": "f13978:c1:m7"} {"signature": "def _reload(self, module=None):", "body": "if self.module is None:raise RuntimeErrorelif module is None:import importlibmodule = ModuleSource(importlib.reload(module))elif module.name != self.module:raise RuntimeErrorif self.name in module.funcs:func = module.funcs[self.name]self.__init__(func=func)else:self.__init__(func=NULL_FORMULA)return self", "docstring": "Reload the source function from the source module.\n\n **Internal use only**\n Update the source function of the formula.\n This method is used to updated the underlying formula\n when the source code of the module in which the source function\n is read from is modified.\n\n If the formula was not created from a module, an error is raised.\n If ``module_`` is not given, the source module of the formula is\n reloaded. If ``module_`` is given and matches the source module,\n then the module_ is used without being reloaded.\n If ``module_`` is given and does not match the source module of\n the formula, an error is raised.\n\n Args:\n module_: A ``ModuleSource`` object\n\n Returns:\n self", "id": "f13978:c1:m10"} {"signature": "def clear_descendants(self, source, clear_source=True):", "body": "desc = nx.descendants(self, source)if clear_source:desc.add(source)self.remove_nodes_from(desc)return desc", "docstring": "Remove all descendants of(reachable from) `source`.\n\n Args:\n source: Node descendants\n clear_source(bool): Remove origin too if True.\n Returns:\n set: The removed nodes.", "id": "f13980:c0:m0"} {"signature": "def clear_obj(self, obj):", "body": "obj_nodes = self.get_nodes_with(obj)removed = set()for node in obj_nodes:if self.has_node(node):removed.update(self.clear_descendants(node))return removed", "docstring": "Remove all nodes with `obj` and their descendants.", "id": "f13980:c0:m1"} {"signature": "def get_nodes_with(self, obj):", "body": "result = set()if nx.__version__[] == \"\":nodes = self.nodes_iter()else:nodes = self.nodesfor node in nodes:if node[OBJ] == obj:result.add(node)return result", "docstring": "Return nodes with `obj`.", "id": "f13980:c0:m2"} {"signature": "def fresh_copy(self):", "body": "return DependencyGraph()", "docstring": "Overriding Graph.fresh_copy", "id": "f13980:c0:m3"} {"signature": "def add_path(self, nodes, **attr):", "body": "if nx.__version__[] == \"\":return super().add_path(nodes, **attr)else:return nx.add_path(self, nodes, **attr)", "docstring": "In replacement for Deprecated add_path method", "id": "f13980:c0:m4"} {"signature": "def rename(self, name):", "body": "self._impl.system.rename_model(new_name=name, old_name=self.name)", "docstring": "Rename the model itself", "id": "f13980:c1:m0"} {"signature": "def save(self, filepath):", "body": "self._impl.save(filepath)", "docstring": "Save the model to a file.", "id": "f13980:c1:m1"} {"signature": "def close(self):", "body": "self._impl.close()", "docstring": "Close the model.", "id": "f13980:c1:m2"} {"signature": "@propertydef cellgraph(self):", "body": "return self._impl.cellgraph", "docstring": "A directed graph of cells.", "id": "f13980:c1:m7"} {"signature": "@propertydef refs(self):", "body": "return self._impl.global_refs.interfaces", "docstring": "Return a mapping of global references.", "id": "f13980:c1:m8"} {"signature": "def rename(self, name):", "body": "if is_valid_name(name):if name not in self.system.models:self.name = namereturn True else: return Falseelse:raise ValueError(\"\" % name)", "docstring": "Rename self. Must be called only by its system.", "id": "f13980:c2:m1"} {"signature": "def clear_descendants(self, source, clear_source=True):", "body": "removed = self.cellgraph.clear_descendants(source, clear_source)for node in removed:del node[OBJ].data[node[KEY]]", "docstring": "Clear values and nodes calculated from `source`.", "id": "f13980:c2:m2"} {"signature": "def clear_obj(self, obj):", "body": "removed = self.cellgraph.clear_obj(obj)for node in removed:del node[OBJ].data[node[KEY]]", "docstring": "Clear values and nodes of `obj` and their dependants.", "id": "f13980:c2:m3"} {"signature": "def get_object(self, name):", "body": "parts = name.split(\"\")space = self.spaces[parts.pop()]if parts:return space.get_object(\"\".join(parts))else:return space", "docstring": "Retrieve an object by a dotted name relative to the model.", "id": "f13980:c2:m12"} {"signature": "def restore_state(self, system):", "body": "Impl.restore_state(self, system)BaseSpaceContainerImpl.restore_state(self, system)mapping = {}for node in self.cellgraph:if isinstance(node, tuple):name, key = nodeelse:name, key = node, Nonecells = self.get_object(name)mapping[node] = get_node(cells, key, None)self.cellgraph = nx.relabel_nodes(self.cellgraph, mapping)", "docstring": "Called after unpickling to restore some attributes manually.", "id": "f13980:c2:m15"} {"signature": "def get_dynamic_base(self, bases: tuple):", "body": "try:return self._dynamic_bases_inverse[bases]except KeyError:name = self._dynamic_base_namer.get_next(self._dynamic_bases)base = self._new_space(name=name)self.spacegraph.add_space(base)self._dynamic_bases[name] = baseself._dynamic_bases_inverse[bases] = basebase.add_bases(bases)return base", "docstring": "Create of get a base space for a tuple of bases", "id": "f13980:c2:m22"} {"signature": "def get_bases(self, node):", "body": "return self.predecessors(node)", "docstring": "Direct Bases iterator", "id": "f13980:c3:m3"} {"signature": "def check_mro(self, bases):", "body": "try:self.add_node(\"\")for base in bases:nx.DiGraph.add_edge(self, base, \"\")result = self.get_mro(\"\")[:]finally:self.remove_node(\"\")return result", "docstring": "Check if C3 MRO is possible with given bases", "id": "f13980:c3:m4"} {"signature": "def get_mro(self, space):", "body": "seqs = [self.get_mro(base) for base in self.get_bases(space)] + [list(self.get_bases(space))]res = []while True:non_empty = list(filter(None, seqs))if not non_empty:res.insert(, space)return resfor seq in non_empty: candidate = seq[]not_head = [s for s in non_empty if candidate in s[:]]if not_head:candidate = Noneelse:breakif not candidate: raise TypeError(\"\")res.append(candidate)for seq in non_empty:if seq[] == candidate:del seq[]", "docstring": "Calculate the Method Resolution Order of bases using the C3 algorithm.\n\n Code modified from\n http://code.activestate.com/recipes/577748-calculate-the-mro-of-a-class/\n\n Args:\n bases: sequence of direct base spaces.\n\n Returns:\n mro as a list of bases including node itself", "id": "f13980:c3:m5"} {"signature": "def custom_showwarning(message, category, filename=\"\", lineno=-, file=None, line=None):", "body": "if file is None:file = sys.stderrif file is None:returntext = \"\" % (category.__name__, message)try:file.write(text)except OSError:pass", "docstring": "Hook to override default showwarning.\n\n https://stackoverflow.com/questions/2187269/python-print-only-the-message-on-warnings", "id": "f13981:m0"} {"signature": "def is_ipython():", "body": "try:__IPYTHON__return Trueexcept NameError:return False", "docstring": "True if the current shell is an IPython shell.\n\n Note __IPYTHON__ is not yet set before IPython kernel is initialized.\n\n https://stackoverflow.com/questions/5376837/how-can-i-do-an-if-run-from-ipython-test-in-python", "id": "f13981:m1"} {"signature": "def custom_showtraceback(self,exc_tuple=None,filename=None,tb_offset=None,exception_only=False,running_compiled_code=False,):", "body": "self.default_showtraceback(exc_tuple,filename,tb_offset,exception_only=True,running_compiled_code=running_compiled_code,)", "docstring": "Custom showtraceback for monkey-patching IPython's InteractiveShell\n\n https://stackoverflow.com/questions/1261668/cannot-override-sys-excepthook", "id": "f13981:m2"} {"signature": "def excepthook(self, except_type, exception, traceback):", "body": "if except_type is DeepReferenceError:print(exception.msg)else:self.default_excepthook(except_type, exception, traceback)", "docstring": "Not Used: Custom exception hook to replace sys.excepthook\n\n This is for CPython's default shell. IPython does not use sys.exepthook.\n\n https://stackoverflow.com/questions/27674602/hide-traceback-unless-a-debug-flag-is-set", "id": "f13981:m3"} {"signature": "def tracemessage(self, maxlen=):", "body": "result = \"\"for i, value in enumerate(self):result += \"\".format(i, get_node_repr(value))result = result.strip(\"\")lines = result.split(\"\")if maxlen and len(lines) > maxlen:i = int(maxlen / )lines = lines[:i] + [\"\"] + lines[-(maxlen - i) :]result = \"\".join(lines)return result", "docstring": "if maxlen > 0, the message is shortened to maxlen traces.", "id": "f13981:c1:m4"} {"signature": "def setup_ipython(self):", "body": "if self.is_ipysetup:returnfrom ipykernel.kernelapp import IPKernelAppself.shell = IPKernelApp.instance().shell if not self.shell and is_ipython():self.shell = get_ipython()if self.shell:shell_class = type(self.shell)shell_class.default_showtraceback = shell_class.showtracebackshell_class.showtraceback = custom_showtracebackself.is_ipysetup = Trueelse:raise RuntimeError(\"\")", "docstring": "Monkey patch shell's error handler.\n\n This method is to monkey-patch the showtraceback method of\n IPython's InteractiveShell to\n\n __IPYTHON__ is not detected when starting an IPython kernel,\n so this method is called from start_kernel in spyder-modelx.", "id": "f13981:c2:m1"} {"signature": "def restore_ipython(self):", "body": "if not self.is_ipysetup:returnshell_class = type(self.shell)shell_class.showtraceback = shell_class.default_showtracebackdel shell_class.default_showtracebackself.is_ipysetup = False", "docstring": "Restore default IPython showtraceback", "id": "f13981:c2:m2"} {"signature": "def configure_python(self):", "body": "sys.setrecursionlimit(**)warnings.showwarning = custom_showwarningthreading.stack_size()", "docstring": "Configure Python settings for modelx\n\n The error handler is configured later.", "id": "f13981:c2:m3"} {"signature": "def restore_python(self):", "body": "orig = self.orig_settingssys.setrecursionlimit(orig[\"\"])if \"\" in orig:sys.tracebacklimit = orig[\"\"]else:if hasattr(sys, \"\"):del sys.tracebacklimitif \"\" in orig:warnings.showwarning = orig[\"\"]orig.clear()threading.stack_size()", "docstring": "Restore Python settings to the original states", "id": "f13981:c2:m4"} {"signature": "def get_object(self, name):", "body": "parts = name.split(\"\")model_name = parts.pop()return self.models[model_name].get_object(\"\".join(parts))", "docstring": "Retrieve an object by its absolute name.", "id": "f13981:c2:m14"} {"signature": "def convert_args(args, kwargs):", "body": "found = Falsefor arg in args:if isinstance(arg, Cells):found = Truebreakif found:args = tuple(arg.value if isinstance(arg, Cells) else arg for arg in args)if kwargs is not None:for key, arg in kwargs.items():if isinstance(arg, Cells):kwargs[key] = arg.valuereturn args, kwargs", "docstring": "If args and kwargs contains Cells, Convert them to their values.", "id": "f13982:m0"} {"signature": "def shareable_parameters(cells):", "body": "result = []for c in cells.values():params = c.formula.parametersfor i in range(min(len(result), len(params))):if params[i] != result[i]:return Nonefor i in range(len(result), len(params)):result.append(params[i])return result", "docstring": "Return parameter names if the parameters are shareable among cells.\n\n Parameters are shareable among multiple cells when all the cells\n have the parameters in the same order if they ever have any.\n\n For example, if cells are foo(), bar(x), baz(x, y), then\n ('x', 'y') are shareable parameters amounts them, as 'x' and 'y'\n appear in the same order in the parameter list if they ever appear.\n\n Args:\n cells: An iterator yielding cells.\n\n Returns:\n None if parameters are not share,\n tuple of shareable parameter names,\n () if cells are all scalars.", "id": "f13982:m1"} {"signature": "def match(self, *args, **kwargs):", "body": "return self._impl.find_match(args, kwargs)", "docstring": "Returns the best matching args and their value.\n\n If the cells returns None for the given arguments,\n continue to get a value by passing arguments\n masking the given arguments with Nones.\n The search of non-None value starts from the given arguments\n to the all None arguments in the lexicographical order.\n The masked arguments that returns non-None value\n first is returned with the value.", "id": "f13982:c1:m3"} {"signature": "def __setitem__(self, key, value):", "body": "self._impl.set_value(tuplize_key(self, key), value)", "docstring": "Set value of a particular cell", "id": "f13982:c1:m5"} {"signature": "def copy(self, space=None, name=None):", "body": "return Cells(space=space, name=name, formula=self.formula)", "docstring": "Make a copy of itself and return it.", "id": "f13982:c1:m7"} {"signature": "def clear(self, *args, **kwargs):", "body": "return self._impl.clear_value(*args, **kwargs)", "docstring": "Clear all the values.", "id": "f13982:c1:m9"} {"signature": "def __bool__(self):", "body": "return self._impl.single_value != ", "docstring": "True if self != 0. Called for bool(self).", "id": "f13982:c1:m10"} {"signature": "def __add__(self, other):", "body": "return self._impl.single_value + other", "docstring": "self + other", "id": "f13982:c1:m11"} {"signature": "def __radd__(self, other):", "body": "return self.__add__(other)", "docstring": "other + self", "id": "f13982:c1:m12"} {"signature": "def __neg__(self):", "body": "return -self._impl.single_value", "docstring": "-self", "id": "f13982:c1:m13"} {"signature": "def __pos__(self):", "body": "return +self._impl.single_value", "docstring": "+self", "id": "f13982:c1:m14"} {"signature": "def __sub__(self, other):", "body": "return self + -other", "docstring": "self - other", "id": "f13982:c1:m15"} {"signature": "def __rsub__(self, other):", "body": "return -self + other", "docstring": "other - self", "id": "f13982:c1:m16"} {"signature": "def __mul__(self, other):", "body": "return self._impl.single_value * other", "docstring": "self * other", "id": "f13982:c1:m17"} {"signature": "def __rmul__(self, other):", "body": "return self.__mul__(other)", "docstring": "other * self", "id": "f13982:c1:m18"} {"signature": "def __truediv__(self, other):", "body": "return self._impl.single_value / other", "docstring": "self / other: Should promote to float when necessary.", "id": "f13982:c1:m19"} {"signature": "def __rtruediv__(self, other):", "body": "return other / self._impl.single_value", "docstring": "other / self", "id": "f13982:c1:m20"} {"signature": "def __pow__(self, exponent):", "body": "return self._impl.single_value ** exponent", "docstring": "self ** exponent\n should promote to float or complex when necessary.", "id": "f13982:c1:m21"} {"signature": "def __rpow__(self, base):", "body": "return base ** self._impl.single_value", "docstring": "base ** self", "id": "f13982:c1:m22"} {"signature": "def __abs__(self):", "body": "raise NotImplementedError", "docstring": "Returns the Real distance from 0. Called for abs(self).", "id": "f13982:c1:m23"} {"signature": "def __eq__(self, other):", "body": "if self._impl.is_scalar():return self._impl.single_value == otherelif isinstance(other, Cells):return self is otherelse:raise TypeError", "docstring": "self == other", "id": "f13982:c1:m24"} {"signature": "def __lt__(self, other):", "body": "return self._impl.single_value < other", "docstring": "self < other", "id": "f13982:c1:m25"} {"signature": "def __le__(self, other):", "body": "return self.__eq__(other) or self.__lt__(other)", "docstring": "self <= other", "id": "f13982:c1:m26"} {"signature": "def __gt__(self, other):", "body": "return self._impl.single_value > other", "docstring": "self > other", "id": "f13982:c1:m27"} {"signature": "def __ge__(self, other):", "body": "return self.__eq__(other) or self.__gt__(other)", "docstring": "self >= other", "id": "f13982:c1:m28"} {"signature": "def to_series(self, *args):", "body": "return self._impl.to_series(args)", "docstring": "Convert the cells itself into a Pandas Series and return it.", "id": "f13982:c1:m29"} {"signature": "@propertydef series(self):", "body": "return self._impl.to_series(())", "docstring": "Alias of ``to_series()``.", "id": "f13982:c1:m30"} {"signature": "def to_frame(self, *args):", "body": "return self._impl.to_frame(args)", "docstring": "Convert the cells itself into a Pandas DataFrame and return it.\n\n if no `args` are passed, the returned DataFrame contains as many\n values as the cells have.\n\n if A sequence of arguments to the cells is passed as `args`,\n the returned DataFrame contains values only for the specified `args`.\n\n Args:\n args: A sequence or iterable of arguments to the cells.\n\n Returns:\n a DataFrame with a column named after the cells,\n with indexes named after the parameters of the cells.", "id": "f13982:c1:m31"} {"signature": "@propertydef frame(self):", "body": "return self._impl.to_frame(())", "docstring": "Alias of ``to_frame()``.", "id": "f13982:c1:m32"} {"signature": "@propertydef formula(self):", "body": "return self._impl.formula", "docstring": "Property to get, set, delete formula.", "id": "f13982:c1:m33"} {"signature": "@propertydef parameters(self):", "body": "return self._impl.formula.parameters", "docstring": "A tuple of parameter strings.", "id": "f13982:c1:m36"} {"signature": "def set_formula(self, func):", "body": "self._impl.set_formula(func)", "docstring": "Set formula from a function.\n Deprecated since version 0.0.5. Use formula property instead.", "id": "f13982:c1:m37"} {"signature": "def clear_formula(self):", "body": "self._impl.clear_formula()", "docstring": "Clear the formula.\n Deprecated since version 0.0.5. Use formula property instead.", "id": "f13982:c1:m38"} {"signature": "@propertydef value(self):", "body": "return self._impl.single_value", "docstring": "Get, set, delete the scalar value.\n The cells must be a scalar cells.", "id": "f13982:c1:m39"} {"signature": "def node(self, *args, **kwargs):", "body": "return CellNode(get_node(self._impl, *convert_args(args, kwargs)))", "docstring": "Return a :class:`CellNode` object for the given arguments.", "id": "f13982:c1:m42"} {"signature": "def preds(self, *args, **kwargs):", "body": "return self._impl.predecessors(args, kwargs)", "docstring": "Return a list of predecessors of a cell.\n\n This method returns a list of CellNode objects, whose elements are\n predecessors of (i.e. referenced in the formula\n of) the cell specified by the given arguments.", "id": "f13982:c1:m43"} {"signature": "def succs(self, *args, **kwargs):", "body": "return self._impl.successors(args, kwargs)", "docstring": "Return a list of successors of a cell.\n\n This method returns a list of CellNode objects, whose elements are\n successors of (i.e. referencing in their formulas)\n the cell specified by the given arguments.", "id": "f13982:c1:m44"} {"signature": "@propertydef _baseattrs(self):", "body": "result = super()._baseattrsresult[\"\"] = \"\".join(self.parameters)return result", "docstring": "A dict of members expressed in literals", "id": "f13982:c1:m45"} {"signature": "@propertydef cells(self):", "body": "return self._impl[OBJ].interface", "docstring": "Return the Cells object", "id": "f13982:c3:m1"} {"signature": "@propertydef args(self):", "body": "return self._impl[KEY]", "docstring": "Return a tuple of the cells' arguments.", "id": "f13982:c3:m2"} {"signature": "@propertydef has_value(self):", "body": "return self._impl[OBJ].has_cell(self._impl[KEY])", "docstring": "Return ``True`` if the cell has a value.", "id": "f13982:c3:m3"} {"signature": "@propertydef value(self):", "body": "if self.has_value:return self._impl[OBJ].get_value(self._impl[KEY])else:raise ValueError(\"\")", "docstring": "Return the value of the cells.", "id": "f13982:c3:m4"} {"signature": "@propertydef preds(self):", "body": "return self.cells.preds(*self.args)", "docstring": "A list of nodes that this node refers to.", "id": "f13982:c3:m5"} {"signature": "@propertydef succs(self):", "body": "return self.cells.succs(*self.args)", "docstring": "A list of nodes that refer to this node.", "id": "f13982:c3:m6"} {"signature": "@propertydef _baseattrs(self):", "body": "result = {\"\": type(self).__name__,\"\": self.cells._baseattrs,\"\": self.args,\"\": self.value if self.has_value else None,\"\": len(self.preds),\"\": len(self.succs),\"\": self.cells._impl.repr_parent(),\"\": self.cells._get_repr(),}return result", "docstring": "A dict of members expressed in literals", "id": "f13982:c3:m7"} {"signature": "def get_interfaces(impls):", "body": "if impls is None:return Noneelif isinstance(impls, OrderMixin):result = OrderedDict()for name in impls.order:result[name] = impls[name].interfacereturn resultelif isinstance(impls, Mapping):return {name: impls[name].interface for name in impls}elif isinstance(impls, Sequence):return [impl.interface for impl in impls]else:return impls.interface", "docstring": "Get interfaces from their implementations.", "id": "f13983:m0"} {"signature": "def get_impls(interfaces):", "body": "if interfaces is None:return Noneelif isinstance(interfaces, Mapping):return {name: interfaces[name]._impl for name in interfaces}elif isinstance(interfaces, Sequence):return [interfaces._impl for interfaces in interfaces]else:return interfaces._impl", "docstring": "Get impls from their interfaces.", "id": "f13983:m1"} {"signature": "def update_lazyevals(self):", "body": "if self.lazy_evals is None:returnelif isinstance(self.lazy_evals, LazyEval):self.lazy_evals.get_updated()else:for lz in self.lazy_evals:lz.get_updated()", "docstring": "Update all LazyEvals in self\n\n self.lzy_evals must be set to LazyEval object(s) enough to\n update all owned LazyEval objects.", "id": "f13983:c0:m2"} {"signature": "@propertydef evalrepr(self):", "body": "if self.is_model():return self.get_fullname()else:return self.parent.evalrepr + \"\" + self.name", "docstring": "Evaluable repr", "id": "f13983:c0:m4"} {"signature": "def restore_state(self, system):", "body": "self.system = system", "docstring": "Called after unpickling to restore some attributes manually.", "id": "f13983:c0:m5"} {"signature": "@propertydef name(self):", "body": "return self._impl.name", "docstring": "Name of the object.", "id": "f13983:c5:m1"} {"signature": "@propertydef fullname(self):", "body": "return self._impl.get_fullname()", "docstring": "Dotted name of the object.\n\n Names joined by dots, such as 'Model1.Space1.Cells1',\n each element in the string is the name of the parent object\n of the next one joined by a dot.", "id": "f13983:c5:m2"} {"signature": "@propertydef parent(self):", "body": "if self._impl.parent is None:return Noneelse:return self._impl.parent.interface", "docstring": "The parent of this object. None for models.\n\n The parent object of a cells is a space that contains the cells.\n The parent object of a space is either a model or another space\n that contains the space.", "id": "f13983:c5:m3"} {"signature": "@propertydef model(self):", "body": "return self._impl.model.interface", "docstring": "The model this object belongs to.\n\n This is a property of Model, Space and Cells.\n For models, this property is themselves.", "id": "f13983:c5:m4"} {"signature": "@propertydef allow_none(self):", "body": "return self._impl.allow_none", "docstring": "Whether a cells can have None as its value.\n\n This is a property of Model, Space and Cells.\n If ``allow_none`` of a cells is False,\n the cells cannot have None as its value.\n Assigning None to the cells\n or its formula returning None raises an Error.\n If True, the cells can have None as their value.\n If set to None, ``allow_none`` of its parent is looked up,\n and the search continues until True or False is found.\n\n Returns:\n True if the cells can have None, False if it cannot,\n or None if a default value from the parent is to be used.", "id": "f13983:c5:m9"} {"signature": "@propertydef _baseattrs(self):", "body": "result = {\"\": type(self).__name__,\"\": id(self),\"\": self.name,\"\": self.fullname,\"\": self._get_repr(),}return result", "docstring": "A dict of members expressed in literals", "id": "f13983:c5:m11"} {"signature": "def _to_attrdict(self, attrs=None):", "body": "result = self._baseattrsfor attr in attrs:if hasattr(self, attr):result[attr] = getattr(self, attr)._to_attrdict(attrs)return result", "docstring": "Get extra attributes", "id": "f13983:c5:m12"} {"signature": "def get_updated_data(self):", "body": "self.get_updated()return self.data", "docstring": "Get updated ``data`` instead of self.", "id": "f13983:c7:m1"} {"signature": "@propertydef _baseattrs(self):", "body": "result = {\"\": type(self).__name__}try:result[\"\"] = {name: item._baseattrsfor name, item in self.items()if name[] != \"\"}except:raise RuntimeError(\"\" % self)return result", "docstring": "A dict of members expressed in literals", "id": "f13983:c14:m6"} {"signature": "def __init__(self, owner):", "body": "LazyEval.__init__(self, [])self.owner = ownerself.namespace_impl = owner._namespace_impl self.observe(self.namespace_impl)self.altfunc = Noneself.set_update()", "docstring": "Create altered function from owner's formula.\n\n owner is a StaticSpaceImpl or CellsImpl, which has formula, and\n namespace_impl as its members.", "id": "f13983:c16:m0"} {"signature": "def _update_data(self):", "body": "func = self.owner.formula.funccodeobj = func.__code__name = func.__name__ namespace_impl = self.owner._namespace_impl.get_updated()namespace = namespace_impl.interfacesselfnode = get_node(self.owner, None, None)for name in self.owner.formula.srcnames:if name in namespace_impl and isinstance(namespace_impl[name], ReferenceImpl):refnode = get_node(namespace_impl[name], None, None)self.owner.model.lexdep.add_path([selfnode, refnode])closure = func.__closure__ if closure is not None: closure = create_closure(self.owner.interface)self.altfunc = FunctionType(codeobj, namespace, name=name, closure=closure)", "docstring": "Update altfunc", "id": "f13983:c16:m1"} {"signature": "@propertydef spaces(self):", "body": "return self._impl.spaces.interfaces", "docstring": "A mapping of the names of child spaces to the Space objects", "id": "f13984:c0:m0"} {"signature": "def cur_space(self, name=None):", "body": "if name is None:return self._impl.model.currentspace.interfaceelse:self._impl.model.currentspace = self._impl.spaces[name]return self.cur_space()", "docstring": "Set the current space to Space ``name`` and return it.\n\n If called without arguments, the current space is returned.\n Otherwise, the current space is set to the space named ``name``\n and the space is returned.", "id": "f13984:c0:m1"} {"signature": "@propertydef _baseattrs(self):", "body": "result = super()._baseattrsresult[\"\"] = self.spaces._baseattrsreturn result", "docstring": "A dict of members expressed in literals", "id": "f13984:c0:m2"} {"signature": "def new_space(self, name=None, bases=None, formula=None, refs=None):", "body": "space = self._impl.model.currentspace = self._impl.new_space(name=name, bases=get_impls(bases), formula=formula, refs=refs)return space.interface", "docstring": "Create a child space.\n\n Args:\n name (str, optional): Name of the space. Defaults to ``SpaceN``,\n where ``N`` is a number determined automatically.\n bases (optional): A space or a sequence of spaces to be the base\n space(s) of the created space.\n formula (optional): Function to specify the parameters of\n dynamic child spaces. The signature of this function is used\n for setting parameters for dynamic child spaces.\n This function should return a mapping of keyword arguments\n to be passed to this method when the dynamic child spaces\n are created.\n\n Returns:\n The new child space.", "id": "f13984:c1:m0"} {"signature": "def import_module(self, module=None, recursive=False, **params):", "body": "if module is None:if \"\" in params:warnings.warn(\"\")module = params.pop(\"\")else:raise ValueError(\"\")if \"\" in params:params[\"\"] = get_impls(params[\"\"])space = (self._impl.model.currentspace) = self._impl.new_space_from_module(module, recursive=recursive, **params)return get_interfaces(space)", "docstring": "Create a child space from an module.\n\n Args:\n module: a module object or name of the module object.\n recursive: Not yet implemented.\n **params: arguments to pass to ``new_space``\n\n Returns:\n The new child space created from the module.", "id": "f13984:c1:m1"} {"signature": "def new_space_from_module(self, module, recursive=False, **params):", "body": "if \"\" in params:params[\"\"] = get_impls(params[\"\"])space = (self._impl.model.currentspace) = self._impl.new_space_from_module(module, recursive=recursive, **params)return get_interfaces(space)", "docstring": "Create a child space from an module.\n\n Alias to :py:meth:`import_module`.\n\n Args:\n module: a module object or name of the module object.\n recursive: Not yet implemented.\n **params: arguments to pass to ``new_space``\n\n Returns:\n The new child space created from the module.", "id": "f13984:c1:m2"} {"signature": "def new_space_from_excel(self,book,range_,sheet=None,name=None,names_row=None,param_cols=None,space_param_order=None,cells_param_order=None,transpose=False,names_col=None,param_rows=None,):", "body": "space = self._impl.new_space_from_excel(book,range_,sheet,name,names_row,param_cols,space_param_order,cells_param_order,transpose,names_col,param_rows,)return get_interfaces(space)", "docstring": "Create a child space from an Excel range.\n\n To use this method, ``openpyxl`` package must be installed.\n\n Args:\n book (str): Path to an Excel file.\n range_ (str): Range expression, such as \"A1\", \"$G4:$K10\",\n or named range \"NamedRange1\".\n sheet (str): Sheet name (case ignored).\n name (str, optional): Name of the space. Defaults to ``SpaceN``,\n where ``N`` is a number determined automatically.\n names_row (optional): an index number indicating\n what row contains the names of cells and parameters.\n Defaults to the top row (0).\n param_cols (optional): a sequence of index numbers\n indicating parameter columns.\n Defaults to only the leftmost column ([0]).\n names_col (optional): an index number, starting from 0,\n indicating what column contains additional parameters.\n param_rows (optional): a sequence of index numbers, starting from\n 0, indicating rows of additional parameters, in case cells are\n defined in two dimensions.\n transpose (optional): Defaults to ``False``.\n If set to ``True``, \"row(s)\" and \"col(s)\" in the parameter\n names are interpreted inversely, i.e.\n all indexes passed to \"row(s)\" parameters are interpreted\n as column indexes,\n and all indexes passed to \"col(s)\" parameters as row indexes.\n space_param_order: a sequence to specify space parameters and\n their orders. The elements of the sequence denote the indexes\n of ``param_cols`` elements, and optionally the index of\n ``param_rows`` elements shifted by the length of\n ``param_cols``. The elements of this parameter and\n ``cell_param_order`` must not overlap.\n cell_param_order (optional): a sequence to reorder the parameters.\n The elements of the sequence denote the indexes of\n ``param_cols`` elements, and optionally the index of\n ``param_rows`` elements shifted by the length of\n ``param_cols``. The elements of this parameter and\n ``cell_space_order`` must not overlap.\n\n Returns:\n The new child space created from the Excel range.", "id": "f13984:c1:m3"} {"signature": "def restore_state(self, system):", "body": "for space in self._spaces.values():space.restore_state(system)", "docstring": "Called after unpickling to restore some attributes manually.", "id": "f13984:c2:m3"} {"signature": "def _set_space(self, space):", "body": "raise NotImplementedError", "docstring": "To be overridden in subclasses.", "id": "f13984:c2:m9"} {"signature": "def new_space(self,name=None,bases=None,formula=None,*,refs=None,source=None,is_derived=False,prefix=\"\"):", "body": "from modelx.core.space import StaticSpaceImplif name is None:name = self.spacenamer.get_next(self.namespace, prefix)if name in self.namespace:raise ValueError(\"\" % name)if not prefix and not is_valid_name(name):raise ValueError(\"\" % name)space = self._new_space(name=name,formula=formula,refs=refs,source=source,is_derived=is_derived,)self._set_space(space)self.model.spacegraph.add_space(space)if bases is not None:if isinstance(bases, StaticSpaceImpl):bases = [bases]space.add_bases(bases)return space", "docstring": "Create a new child space.\n\n Args:\n name (str): Name of the space. If omitted, the space is\n created automatically.\n bases: If specified, the new space becomes a derived space of\n the `base` space.\n formula: Function whose parameters used to set space parameters.\n refs: a mapping of refs to be added.\n arguments: ordered dict of space parameter names to their values.\n source: A source module from which cell definitions are read.\n prefix: Prefix to the autogenerated name when name is None.", "id": "f13984:c3:m0"} {"signature": "def configure_python():", "body": "_system.configure_python()", "docstring": "Configure Python ``sys`` settings for modelx.\n\n This function is called implicitly when importing modelx.\n To restore the Python settings, call :py:func:`restore_python`", "id": "f13986:m0"} {"signature": "def restore_python():", "body": "_system.restore_python()", "docstring": "Restore Python ``sys`` settings for modelx.\n\n Restore ``sys`` settings to the original states before\n importing modelx.", "id": "f13986:m1"} {"signature": "def setup_ipython():", "body": "_system.setup_ipython()", "docstring": "Set up IPython shell for modelx.\n\n Suppress IPython's default traceback messages upon error.", "id": "f13986:m2"} {"signature": "def restore_ipython():", "body": "_system.restore_ipython()", "docstring": "Restore IPython' default error message.\n\n Bring back IPython's default traceback message upon error for debugging.", "id": "f13986:m3"} {"signature": "def set_recursion(maxdepth=):", "body": "_system.callstack.maxdepth = maxdepth", "docstring": "Set formula recursion limit.\n\n Args:\n maxdepth: The maximum depth of the modelx interpreter stack.", "id": "f13986:m4"} {"signature": "def new_model(name=None):", "body": "return _system.new_model(name).interface", "docstring": "Create and return a new model.\n\n The current model is set set to the created model.\n\n Args:\n name (:obj:`str`, optional): The name of the model to create.\n Defaults to ``ModelN``, with ``N``\n being an automatically assigned integer.\n\n Returns:\n The new model.", "id": "f13986:m5"} {"signature": "def new_space(name=None, bases=None, formula=None):", "body": "return cur_model().new_space(name, bases, formula)", "docstring": "Create and return a new space in the current model.\n\n The ``currentspace`` of the current model is set to the created model.\n\n Args:\n name (:obj:`str`, optional): The name of the space to create.\n Defaults to ``SpaceN``, with ``N``\n being an automatically assigned integer.\n\n Returns:\n The new space.", "id": "f13986:m6"} {"signature": "def defcells(space=None, name=None, *funcs):", "body": "if isinstance(space, _FunctionType) and name is None:func = spacereturn _system.currentspace.new_cells(formula=func).interfaceelif (isinstance(space, _Space) or space is None) and (isinstance(name, str) or name is None):if space is None:space = _system.currentspace.interfacereturn _CellsMaker(space=space._impl, name=name)elif all(isinstance(func, _FunctionType) for func in (space, name) + funcs):return [defcells(func) for func in (space, name) + funcs]else:raise TypeError(\"\")", "docstring": "Decorator/function to create cells from Python functions.\n\n Convenience decorator/function to create new cells directly from function\n definitions or function objects substituting for calling\n :py:meth:`new_cells `\n method of the parent space.\n\n There are 3 ways to use ``defcells`` to define cells from functions.\n\n **1. As a decorator without arguments**\n\n To create a cells from a function definition in the current space of the\n current model with the same name as the function's::\n\n @defcells\n def foo(x):\n return x\n\n **2. As a decorator with arguments**\n\n To create a cells from a function definition in a given space and/or with\n a given name::\n\n @defcells(space=space, name=name)\n def foo(x):\n return x\n\n **3. As a function**\n\n To create a multiple cells from a multiple function definitions::\n\n def foo(x):\n return x\n\n def bar(y):\n return foo(y)\n\n foo, bar = defcells(foo, bar)\n\n Args:\n space(optional): For the 2nd usage, a space to create the cells in.\n Defaults to the current space of the current model.\n name(optional): For the 2nd usage, a name of the created cells.\n Defaults to the function name.\n *funcs: For the 3rd usage, function objects. (``space`` and ``name``\n also take function objects for the 3rd usage.)\n\n Returns:\n For the 1st and 2nd usage, the newly created single cells is returned.\n For the 3rd usage, a list of newly created cells are returned.", "id": "f13986:m7"} {"signature": "def get_models():", "body": "return _get_interfaces(_system.models)", "docstring": "Returns a dict that maps model names to models.", "id": "f13986:m8"} {"signature": "def get_object(name: str):", "body": "elms = name.split(\"\")parent = get_models()[elms.pop()]while len(elms) > :obj = elms.pop()parent = getattr(parent, obj)return parent", "docstring": "Get a modelx object from its full name.", "id": "f13986:m9"} {"signature": "def _get_node(name: str, args: str):", "body": "obj = get_object(name)args = ast.literal_eval(args)if not isinstance(args, tuple):args = (args,)return obj.node(*args)", "docstring": "Get node from object name and arg string\n\n Not Used. Left for future reference purpose.", "id": "f13986:m10"} {"signature": "def cur_model(model=None):", "body": "if model is None:if _system.currentmodel is not None:return _system.currentmodel.interfaceelse:return Noneelse:if isinstance(model, _Model):_system.currentmodel = model._implelse:_system.currentmodel = _system.models[model]return _system.currentmodel.interface", "docstring": "Get and/or set the current model.\n\n If ``model`` is given, set the current model to ``model`` and return it.\n ``model`` can be the name of a model object, or a model object itself.\n If ``model`` is not given, the current model is returned.", "id": "f13986:m11"} {"signature": "def cur_space(space=None):", "body": "if space is None:if _system.currentmodel is not None:if _system.currentmodel.currentspace is not None:return _system.currentmodel.currentspace.interfaceelse:return Noneelse:return Noneelse:if isinstance(space, _Space):cur_model(space.model)_system.currentmodel.currentspace = space._implelse:_system.currentmodel.currentspace = _system.currentmodel.spaces[space]return cur_space()", "docstring": "Get and/or set the current space of the current model.\n\n If ``name`` is given, the current space of the current model is\n set to ``name`` and return it.\n If ``name`` is not given, the current space of the current model\n is returned.", "id": "f13986:m12"} {"signature": "def open_model(path, name=None):", "body": "return _system.open_model(path, name)", "docstring": "Load a model saved from a file and return it.\n\n Args:\n path (:obj:`str`): Path to the file to load the model from.\n name (optional): If specified, the model is renamed to this name.\n\n Returns:\n A new model created from the file.", "id": "f13986:m13"} {"signature": "def get_node(obj, args, kwargs):", "body": "if args is None and kwargs is None:return (obj,)if kwargs is None:kwargs = {}return obj, _bind_args(obj, args, kwargs)", "docstring": "Create a node from arguments and return it", "id": "f13987:m1"} {"signature": "def node_get_args(node):", "body": "obj = node[OBJ]key = node[KEY]boundargs = obj.formula.signature.bind(*key)boundargs.apply_defaults()return boundargs.arguments", "docstring": "Return an ordered mapping from params to args", "id": "f13987:m2"} {"signature": "def tuplize_key(obj, key, remove_extra=False):", "body": "paramlen = len(obj.formula.parameters)if isinstance(key, str):key = (key,)elif not isinstance(key, Sequence):key = (key,)if not remove_extra:return keyelse:arglen = len(key)if arglen:return key[: min(arglen, paramlen)]else:return key", "docstring": "Args", "id": "f13987:m3"} {"signature": "def reorderChild(self, parent, newitem):", "body": "source = self.getItem(parent).childItemstarget = newitem.childItemsi = while i < len(source):if source[i] == target[i]:i += continueelse:i0 = ij0 = source.index(target[i0])j = j0 + while j < len(source):if source[j] == target[j - j0 + i0]:j += continueelse:breakself.moveRows(parent, i0, j0, j - j0)i += j - j0", "docstring": "Reorder a list to match target by moving a sequence at a time.\n\n Written for QtAbstractItemModel.moveRows.", "id": "f13989:c8:m6"} {"signature": "def moveRows(self, parent, index_to, index_from, length):", "body": "source = self.getItem(parent).childItemsself.beginMoveRows(parent, index_from, index_from + length - , parent, index_to)sublist = [source.pop(index_from) for _ in range(length)]for _ in range(length):source.insert(index_to, sublist.pop())self.endMoveRows()", "docstring": "Move a sub sequence in a list\n\n index_to must be smaller than index_from", "id": "f13989:c8:m7"} {"signature": "def get_modeltree(model=None):", "body": "if model is None:model = mx.cur_model()treemodel = ModelTreeModel(model._baseattrs)view = QTreeView()view.setModel(treemodel)view.setWindowTitle(\"\" % model.name)view.setAlternatingRowColors(True)return view", "docstring": "Alias to :func:`get_tree`.", "id": "f13991:m0"} {"signature": "def get_tree(model=None):", "body": "if model is None:model = mx.cur_model()treemodel = ModelTreeModel(model._baseattrs)view = QTreeView()view.setModel(treemodel)view.setWindowTitle(\"\" % model.name)view.setAlternatingRowColors(True)return view", "docstring": "Get QTreeView object containing the model tree.\n\n Args:\n model: :class:`Model ` object.\n Defaults to the current model.", "id": "f13991:m1"} {"signature": "def show_tree(model=None):", "body": "if model is None:model = mx.cur_model()view = get_modeltree(model)app = QApplication.instance()if not app:raise RuntimeError(\"\")view.show()app.exec_()", "docstring": "Display the model tree window.\n\n Args:\n model: :class:`Model ` object.\n Defaults to the current model.\n\n Warnings:\n For this function to work with Spyder, *Graphics backend* option\n of Spyder must be set to *inline*.", "id": "f13991:m2"} {"signature": "def get_bases(self, node):", "body": "return self.predecessors(node)", "docstring": "Direct Bases iterator", "id": "f13995:c0:m1"} {"signature": "def get_mro(self, space):", "body": "seqs = [self.get_mro(base) for basein self.get_bases(space)] + [list(self.get_bases(space))]res = []while True:non_empty = list(filter(None, seqs))if not non_empty:res.insert(, space)return resfor seq in non_empty: candidate = seq[]not_head = [s for s in non_empty if candidate in s[:]]if not_head:candidate = Noneelse:breakif not candidate:raise TypeError(\"\")res.append(candidate)for seq in non_empty:if seq[] == candidate:del seq[]", "docstring": "Calculate the Method Resolution Order of bases using the C3 algorithm.\n\n Code modified from\n http://code.activestate.com/recipes/577748-calculate-the-mro-of-a-class/\n\n Args:\n bases: sequence of direct base spaces.\n\n Returns:\n mro as a list of bases including node itself", "id": "f13995:c0:m2"} {"signature": "def reorder_list(source, targetorder):", "body": "i = while i < len(source):if source[i] == targetorder[i]:i += continueelse:i0 = ij0 = source.index(targetorder[i0])j = j0 + while j < len(source):if source[j] == targetorder[j - j0 + i0]:j += continueelse:breakmove_elements(source, i0, j0, j - j0)i += j - j0", "docstring": "Reorder a list to match target by moving a sequence at a time.\n\n Written for QtAbstractItemModel.moveRows.", "id": "f13998:m0"} {"signature": "def move_elements(source, index_to, index_from, length):", "body": "sublist = [source.pop(index_from) for _ in range(length)]for _ in range(length):source.insert(index_to, sublist.pop())", "docstring": "Move a sub sequence in a list", "id": "f13998:m1"} {"signature": "def _alter_code(code, **attrs):", "body": "PyCode_New = ctypes.pythonapi.PyCode_NewPyCode_New.argtypes = (ctypes.c_int,ctypes.c_int,ctypes.c_int,ctypes.c_int,ctypes.c_int,ctypes.py_object,ctypes.py_object,ctypes.py_object,ctypes.py_object,ctypes.py_object,ctypes.py_object,ctypes.py_object,ctypes.py_object,ctypes.c_int,ctypes.py_object)PyCode_New.restype = ctypes.py_objectargs = [[code.co_argcount, ''],[code.co_kwonlyargcount, ''],[code.co_nlocals, ''],[code.co_stacksize, ''],[code.co_flags, ''],[code.co_code, ''],[code.co_consts, ''],[code.co_names, ''],[code.co_varnames, ''],[code.co_freevars, ''],[code.co_cellvars, ''],[code.co_filename, ''],[code.co_name, ''],[code.co_firstlineno, ''],[code.co_lnotab, '']]for arg in args:if arg[] in attrs:arg[] = attrs[arg[]]return PyCode_New(args[][], args[][], args[][], args[][], args[][], args[][], args[][], args[][], args[][], args[][], args[][], args[][], args[][], args[][], args[][])", "docstring": "Create a new code object by altering some of ``code`` attributes\n\n Args:\n code: code objcect\n attrs: a mapping of names of code object attrs to their values", "id": "f13999:m0"} {"signature": "def alter_freevars(func, globals_=None, **vars):", "body": "if globals_ is None:globals_ = func.__globals__frees = tuple(vars.keys())oldlocs = func.__code__.co_namesnewlocs = tuple(name for name in oldlocs if name not in frees)code = _alter_code(func.__code__,co_freevars=frees,co_names=newlocs,co_flags=func.__code__.co_flags | inspect.CO_NESTED)closure = _create_closure(*vars.values())return FunctionType(code, globals_, closure=closure)", "docstring": "Replace local variables with free variables\n\n Warnings:\n This function does not work.", "id": "f13999:m3"} {"signature": "def get_description():", "body": "with open(path.join(here, ''), '') as f:data = f.read()return data", "docstring": "Get long description from README.", "id": "f14001:m0"} {"signature": "def main():", "body": "print(\"\")", "docstring": "Entry point for the application script", "id": "f14007:m0"} {"signature": "def annual_payment():", "body": "n = payback_periodreturn principal / pv_payments(n)", "docstring": "How much amount per payment for a given payback period.", "id": "f14009:m0"} {"signature": "def pv_payments(n):", "body": "if n == :return / ( + interest_rate)else:return pv_payments(n - ) + / ( + interest_rate) ** n", "docstring": "Present value of repayments of 1 for n years.", "id": "f14009:m1"} {"signature": "def payback_period():", "body": "n = while annual_payment * pv_payments(n) < principal:n += return n", "docstring": "Payback period for a given annual payment", "id": "f14009:m2"} {"signature": "def __init__(self, hostname: str = '',port: int = , retries: int = ,log_errors: bool = False) -> None:", "body": "self.hostname = hostnameself.port = portself.retries = retriesself.log_errors = log_errorsself._log_fp = Path(tempfile.gettempdir(), '')if self._log_fp.is_file():self._log_fp.open('').close()", "docstring": ":param hostname: The IP address of the TweeboParser API server.\n:param port: The Port that the TweeboParser API server is attached to.\n:param retries: Number of times to retry json decoding the\n returned data.", "id": "f14017:c0:m0"} {"signature": "def log_error(self, text: str) -> None:", "body": "if self.log_errors:with self._log_fp.open('') as log_file:log_file.write(f'')", "docstring": "Given some error text it will log the text if self.log_errors is True\n\n:param text: Error text to log", "id": "f14017:c0:m1"} {"signature": "def parse_conll(self, texts: List[str], retry_count: int = ) -> List[str]:", "body": "post_data = {'': texts, '': ''}try:response = requests.post(f'',json=post_data,headers={'': ''})response.raise_for_status()except (requests.exceptions.ConnectionError,requests.exceptions.Timeout) as server_error:raise ServerError(server_error, self.hostname, self.port)except requests.exceptions.HTTPError as http_error:raise http_errorelse:try:return response.json()except json.JSONDecodeError as json_exception:if retry_count == self.retries:self.log_error(response.text)raise Exception(''f'')return self.parse_conll(texts, retry_count + )", "docstring": "Processes the texts using TweeboParse and returns them in CoNLL format.\n\n:param texts: The List of Strings to be processed by TweeboParse.\n:param retry_count: The number of times it has retried for. Default\n 0 does not require setting, main purpose is for\n recursion.\n:return: A list of CoNLL formated strings.\n:raises ServerError: Caused when the server is not running.\n:raises :py:class:`requests.exceptions.HTTPError`: Caused when the\n input texts is not formated correctly e.g. When you give it a\n String not a list of Strings.\n:raises :py:class:`json.JSONDecodeError`: Caused if after self.retries\n attempts to parse the data it cannot decode the data.\n\n:Example:", "id": "f14017:c0:m2"} {"signature": "def parse_stanford(self, texts: List[str], retry_count: int = ) -> List[Dict[str, Union[str, int]]]:", "body": "post_data = {'': texts, '': ''}try:response = requests.post(f'',json=post_data,headers={'': ''})response.raise_for_status()except (requests.exceptions.ConnectionError,requests.exceptions.Timeout,requests.exceptions.InvalidSchema) as server_error:raise ServerError(server_error, self.hostname, self.port)except requests.exceptions.HTTPError as http_error:raise http_errorelse:try:return response.json()except json.JSONDecodeError as json_exception:if retry_count == self.retries:self.log_error(response.text)raise Exception(''f'')return self.parse_stanford(texts, retry_count + )", "docstring": "Processes the texts using TweeboParse and returns them in a Stanford\nstyled format (as in the same format as the json return of the Stanford\nCoreNLP server dependency parser).\n\n:param texts: The List of Strings to be processed by TweeboParse.\n:param retry_count: The number of times it has retried for. Default\n 0 does not require setting, main purpose is for\n recursion.\n:return: A list of dicts.\n:raises ServerError: Caused when the server is not running.\n:raises :py:class:`requests.exceptions.HTTPError`: Caused when the\n input texts is not formated correctly e.g. When you give it a\n String not a list of Strings.\n:raises :py:class:`json.JSONDecodeError`: Caused if after self.retries\n attempts to parse the data it cannot decode the data.\n\n:Example:\n::\n from tweebo_parser import API\n tweebo_api = API()\n text_data = ['hello how are you', 'Where are we going']\n result = tweebo_api.parse_stanford(text_data)\n print(result)\n [{}]", "id": "f14017:c0:m3"} {"signature": "def __init__(self, excpetion: requests.exceptions.RequestException,hostname: str, port: int) -> None:", "body": "message = f''if isinstance(excpetion, requests.exceptions.Timeout):message = ''f''elif isinstance(excpetion, requests.exceptions.ConnectionError):message = ''f''self.message = message", "docstring": ":param exception: The requests exception instance that is raised.\n:param hostname: The IP address of the API server.\n:param port: The Port that the API server is attached to.", "id": "f14017:c1:m0"} {"signature": "def case(*, to, **kwargs):", "body": "if len(kwargs) != :raise ValueError(\"\")[(typ, string)] = kwargs.items()types = {'', '', '', ''}if typ not in types:raise ValueError(f\"\")if to not in types:raise ValueError(f\"\")def pascal_iter(string):yield from (m.group() for m in re.finditer(r'', string))def snake_iter(string):yield from (m.group() for m in re.finditer(r'', string))inputs = {'': pascal_iter,'': pascal_iter,'': snake_iter,'': snake_iter,}def out_fun(sep, case=None, case_fst=None):if case is None:case = lambda x: xif case_fst is None:case_fst = casereturn lambda tokens: sep.join(case_fst(token) if i == else case(token) for i, token in enumerate(tokens))outputs = {'': out_fun('', str.capitalize),'': out_fun('', str.capitalize, str.lower),'': out_fun('', str.lower),'': out_fun('', str.upper),}tokens = inputs[typ](string)return outputs[to](tokens)", "docstring": "Converts an identifier from one case type to another.\n An identifier is an ASCII string consisting of letters, digits and underscores, not starting with a digit.\n The supported case types are camelCase, PascalCase, snake_case, and CONSTANT_CASE,\n identified as camel, pascal, snake, and constant.\n The input identifier is given as a keyword argument with one of these names,\n and the output type is given as a string in the `to` keyword argument.\n If a given string does not conform to the specified case type (such as underscores in camel or pascal case strings,\n or double__underscores in general), the result may not be as desired,\n although things like snaKe_casE or CONStaNT_CASe will generally work.", "id": "f14033:m0"} {"signature": "def LatexText(*args, **kwargs):", "body": "return LatexFixer(*args, **kwargs).tostring()", "docstring": "Transform a unicode string into another more compatible with latex,\n fixing some common typographical errors", "id": "f14040:m0"} {"signature": "def tostring(self):", "body": "return self.data", "docstring": "Return self as instance of str()", "id": "f14040:c0:m1"} {"signature": "def _sentence_to_interstitial_spacing(self):", "body": "not_sentence_end_chars = ['']abbreviations = ['', '', '','', '']titles = ['', '', '', '','', '', '', '', '','', '', '', '', '','', '', '', '', '','', '', '', '', '','', '', '', '', '']for abbrev in abbreviations:for x in not_sentence_end_chars:self._str_replacement(abbrev + x, abbrev + '')for title in titles:for x in not_sentence_end_chars:self._str_replacement(title + x, title + '')", "docstring": "Fix common spacing errors caused by LaTeX's habit\n of using an inter-sentence space after any full stop.", "id": "f14040:c0:m2"} {"signature": "def _interstitial_to_sentence_spacing(self):", "body": "pass", "docstring": "Fix errors where inter-sentence spacing\n is not used after after a word ending with a capital letter.", "id": "f14040:c0:m3"} {"signature": "def _latex_symbols(self):", "body": "substs = [('', ''), ('', ''),('', '')]for sub in substs:self._str_replacement(*sub)", "docstring": "Replace unicode symbols with latex commands\n where those symbols cannot be represented in utf mode.", "id": "f14040:c0:m4"} {"signature": "def _hyphens_to_dashes(self):", "body": "problematic_hyphens = [(r'', r''),(r'', ''),(r'', '')]for problem_case in problematic_hyphens:self._regex_replacement(*problem_case)", "docstring": "Transform hyphens to various kinds of dashes", "id": "f14040:c0:m5"} {"signature": "def _str_replacement(self, target, replacement):", "body": "self.data = self.data.replace(target, replacement)", "docstring": "Replace target with replacement", "id": "f14040:c0:m6"} {"signature": "def _regex_replacement(self, target, replacement):", "body": "match = re.compile(target)self.data = match.sub(replacement, self.data)", "docstring": "Regex substitute target with replacement", "id": "f14040:c0:m7"} {"signature": "def _process_events(self, events):", "body": "for f, callback, transferred, key, ov in events:try:self._logger.debug(''.format(callback))value = callback(transferred, key, ov)except OSError:self._logger.warning('', exc_info=sys.exc_info())else:f.set_result(value)", "docstring": "Process events from proactor.", "id": "f14045:c0:m1"} {"signature": "def select(self, timeout=None):", "body": "if not self.__events:self._poll(timeout)tmp = self.__eventsself.__events = []return tmp", "docstring": "Override in order to handle events in a threadsafe manner.", "id": "f14045:c1:m1"} {"signature": "def _poll(self, timeout=None):", "body": "if timeout is None:ms = UINT32_MAX elif timeout < :raise ValueError(\"\")else:ms = math.ceil(timeout * )if ms >= UINT32_MAX:raise ValueError(\"\")with QtCore.QMutexLocker(self._lock):while True:status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)if status is None:breakerr, transferred, key, address = statustry:f, ov, obj, callback = self._cache.pop(address)except KeyError:if key not in (, _overlapped.INVALID_HANDLE_VALUE):_winapi.CloseHandle(key)ms = continueif obj in self._stopped_serving:f.cancel()elif not f.done():self.__events.append((f, callback, transferred, key, ov))ms = ", "docstring": "Override in order to handle events in a threadsafe manner.", "id": "f14045:c1:m5"} {"signature": "def with_logger(cls):", "body": "attr_name = ''cls_name = cls.__qualname__module = cls.__module__if module is not None:cls_name = module + '' + cls_nameelse:raise AssertionErrorsetattr(cls, attr_name, logging.getLogger(cls_name))return cls", "docstring": "Class decorator to add a logger to a class.", "id": "f14046:m0"} {"signature": "def _fileobj_to_fd(fileobj):", "body": "if isinstance(fileobj, int):fd = fileobjelse:try:fd = int(fileobj.fileno())except (AttributeError, TypeError, ValueError) as ex:raise ValueError(\"\".format(fileobj)) from exif fd < :raise ValueError(\"\".format(fd))return fd", "docstring": "Return a file descriptor from a file object.\n\nParameters:\nfileobj -- file object or file descriptor\n\nReturns:\ncorresponding file descriptor\n\nRaises:\nValueError if the object is invalid", "id": "f14047:m0"} {"signature": "def select(self, *args, **kwargs):", "body": "raise NotImplementedError", "docstring": "Implement abstract method even though we don't need it.", "id": "f14047:c1:m1"} {"signature": "def _fileobj_lookup(self, fileobj):", "body": "try:return _fileobj_to_fd(fileobj)except ValueError:for key in self._fd_to_key.values():if key.fileobj is fileobj:return key.fdraise", "docstring": "Return a file descriptor from a file object.\n\n This wraps _fileobj_to_fd() to do an exhaustive search in case\n the object is invalid but we still have it in our map. This\n is used by unregister() so we can unregister an object that\n was previously registered even if it is closed. It is also\n used by _SelectorMapping.", "id": "f14047:c1:m2"} {"signature": "def _key_from_fd(self, fd):", "body": "try:return self._fd_to_key[fd]except KeyError:return None", "docstring": "Return the key associated to a given file descriptor.\n\nParameters:\nfd -- file descriptor\n\nReturns:\ncorresponding key, or None if not found", "id": "f14047:c1:m10"} {"signature": "def _process_event(self, key, mask):", "body": "self._logger.debug(''.format(key, mask))fileobj, (reader, writer) = key.fileobj, key.dataif mask & selectors.EVENT_READ and reader is not None:if reader._cancelled:self.remove_reader(fileobj)else:self._logger.debug(''.format(reader))reader._run()if mask & selectors.EVENT_WRITE and writer is not None:if writer._cancelled:self.remove_writer(fileobj)else:self._logger.debug(''.format(writer))writer._run()", "docstring": "Selector has delivered us an event.", "id": "f14047:c2:m3"} {"signature": "def asyncClose(fn):", "body": "@functools.wraps(fn)def wrapper(*args, **kwargs):f = asyncio.ensure_future(fn(*args, **kwargs))while not f.done():QApplication.instance().processEvents()return wrapper", "docstring": "Allow to run async code before application is closed.", "id": "f14048:m1"} {"signature": "def asyncSlot(*args):", "body": "def outer_decorator(fn):@Slot(*args)@functools.wraps(fn)def wrapper(*args, **kwargs):asyncio.ensure_future(fn(*args, **kwargs))return wrapperreturn outer_decorator", "docstring": "Make a Qt async slot run on asyncio loop.", "id": "f14048:m2"} {"signature": "def run_forever(self):", "body": "self.__is_running = Trueself._before_run_forever()try:self._logger.debug('')rslt = self.__app.exec_()self._logger.debug(''.format(rslt))return rsltfinally:self._after_run_forever()self.__is_running = False", "docstring": "Run eventloop forever.", "id": "f14048:c3:m1"} {"signature": "def run_until_complete(self, future):", "body": "self._logger.debug(''.format(future))future = asyncio.ensure_future(future, loop=self)def stop(*args): self.stop() future.add_done_callback(stop)try:self.run_forever()finally:future.remove_done_callback(stop)self.__app.processEvents() if not future.done():raise RuntimeError('')self._logger.debug(''.format(future))return future.result()", "docstring": "Run until Future is complete.", "id": "f14048:c3:m2"} {"signature": "def stop(self):", "body": "if not self.__is_running:self._logger.debug('')returnself._logger.debug('')self.__is_running = Falseself.__app.exit()self._logger.debug('')", "docstring": "Stop event loop.", "id": "f14048:c3:m3"} {"signature": "def is_running(self):", "body": "return self.__is_running", "docstring": "Return True if the event loop is running, False otherwise.", "id": "f14048:c3:m4"} {"signature": "def close(self):", "body": "if self.is_running():raise RuntimeError(\"\")if self.is_closed():returnself._logger.debug('')if self.__default_executor is not None:self.__default_executor.shutdown()super().close()self._timer.stop()self.__app = Nonefor notifier in itertools.chain(self._read_notifiers.values(), self._write_notifiers.values()):notifier.setEnabled(False)self._read_notifiers = Noneself._write_notifiers = None", "docstring": "Release all resources used by the event loop.\n\nThe loop cannot be restarted after it has been closed.", "id": "f14048:c3:m5"} {"signature": "def call_later(self, delay, callback, *args, context=None):", "body": "if asyncio.iscoroutinefunction(callback):raise TypeError(\"\")if not callable(callback):raise TypeError(''.format(type(callback).__name__))self._logger.debug(''.format(callback, args, delay))if sys.version_info >= (, ):return self._add_callback(asyncio.Handle(callback, args, self, context=context), delay)return self._add_callback(asyncio.Handle(callback, args, self), delay)", "docstring": "Register callback to be invoked after a certain delay.", "id": "f14048:c3:m6"} {"signature": "def call_soon(self, callback, *args, context=None):", "body": "return self.call_later(, callback, *args, context=context)", "docstring": "Register a callback to be run on the next iteration of the event loop.", "id": "f14048:c3:m8"} {"signature": "def call_at(self, when, callback, *args, context=None):", "body": "return self.call_later(when - self.time(), callback, *args, context=context)", "docstring": "Register callback to be invoked at a certain time.", "id": "f14048:c3:m9"} {"signature": "def time(self):", "body": "return time.monotonic()", "docstring": "Get time according to event loop's clock.", "id": "f14048:c3:m10"} {"signature": "def add_reader(self, fd, callback, *args):", "body": "self._check_closed()try:existing = self._read_notifiers[fd]except KeyError:passelse:existing.setEnabled(False)existing.activated.disconnect()notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read)notifier.setEnabled(True)self._logger.debug(''.format(fd))notifier.activated.connect(lambda: self.__on_notifier_ready(self._read_notifiers, notifier, fd, callback, args) )self._read_notifiers[fd] = notifier", "docstring": "Register a callback for when a file descriptor is ready for reading.", "id": "f14048:c3:m11"} {"signature": "def remove_reader(self, fd):", "body": "if self.is_closed():returnself._logger.debug(''.format(fd))try:notifier = self._read_notifiers.pop(fd)except KeyError:return Falseelse:notifier.setEnabled(False)return True", "docstring": "Remove reader callback.", "id": "f14048:c3:m12"} {"signature": "def add_writer(self, fd, callback, *args):", "body": "self._check_closed()try:existing = self._write_notifiers[fd]except KeyError:passelse:existing.setEnabled(False)existing.activated.disconnect()notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Write)notifier.setEnabled(True)self._logger.debug(''.format(fd))notifier.activated.connect(lambda: self.__on_notifier_ready(self._write_notifiers, notifier, fd, callback, args) )self._write_notifiers[fd] = notifier", "docstring": "Register a callback for when a file descriptor is ready for writing.", "id": "f14048:c3:m13"} {"signature": "def remove_writer(self, fd):", "body": "if self.is_closed():returnself._logger.debug(''.format(fd))try:notifier = self._write_notifiers.pop(fd)except KeyError:return Falseelse:notifier.setEnabled(False)return True", "docstring": "Remove writer callback.", "id": "f14048:c3:m14"} {"signature": "def call_soon_threadsafe(self, callback, *args, context=None):", "body": "self.__call_soon_signal.emit(callback, args)", "docstring": "Thread-safe version of call_soon.", "id": "f14048:c3:m17"} {"signature": "def run_in_executor(self, executor, callback, *args):", "body": "self._logger.debug(''.format(callback, args))if isinstance(callback, asyncio.Handle):assert not argsassert not isinstance(callback, asyncio.TimerHandle)if callback._cancelled:f = asyncio.Future()f.set_result(None)return fcallback, args = callback.callback, callback.argsif executor is None:self._logger.debug('')executor = self.__default_executorif executor is None:self._logger.debug('')executor = self.__default_executor = QThreadExecutor()return asyncio.wrap_future(executor.submit(callback, *args))", "docstring": "Run callback in executor.\n\n If no executor is provided, the default executor will be used, which defers execution to\n a background thread.", "id": "f14048:c3:m18"} {"signature": "def default_exception_handler(self, context):", "body": "self._logger.debug('')message = context.get('')if not message:message = ''try:exception = context['']except KeyError:exc_info = Falseelse:exc_info = (type(exception), exception, exception.__traceback__)log_lines = [message]for key in [k for k in sorted(context) if k not in {'', ''}]:log_lines.append(''.format(key, context[key]))self.__log_error(''.join(log_lines), exc_info=exc_info)", "docstring": "Handle exceptions.\n\n This is the default exception handler.\n\n This is called when an exception occurs and no exception\n handler is set, and can be called by a custom exception\n handler that wants to defer to the default behavior.\n\n context parameter has the same meaning as in\n `call_exception_handler()`.", "id": "f14048:c3:m21"} {"signature": "def assert_tags(self, field, expected_value, result_value):", "body": "expected_value = set(expected_value)msg = (\"\"\"\")self.assertEqual(len(result_value), len(expected_value), msg=msg)for tag in result_value:self.assertTrue(tag in expected_value)", "docstring": "\\", "id": "f14057:c0:m0"} {"signature": "def loadData(self):", "body": "full_id = self.id().split('')test, module, cls, func = full_idpath = os.path.join(os.path.dirname(CURRENT_PATH),'',\"\",module.partition('')[],\"\" % func)path = os.path.abspath(path)content = FileHelper.loadResourceFile(path)self.data = json.loads(content)", "docstring": "\\", "id": "f14059:c0:m1"} {"signature": "def runArticleAssertions(self, article, fields):", "body": "for field in fields:expected_value = self.data[''][field]result_value = getattr(article, field, None)if field in ['']:self.assertEqual(type(result_value), type(datetime.today()))result_value = result_value.isoformat(sep='')assertion = '' % fieldif hasattr(self, assertion):getattr(self, assertion)(field, expected_value, result_value)continuemsg = \"\" % (field, expected_value, result_value)self.assertEqual(expected_value, result_value, msg=msg)", "docstring": "\\", "id": "f14059:c0:m4"} {"signature": "def getArticle(self, config_=None):", "body": "self.loadData()self.loadHtml()config = self.getConfig()if config is not None:if isinstance(config_, dict):for k, v in list(config_.items()):if hasattr(config, k):setattr(config, k, v)self.parser = config.get_parser()target_language = self.data.get('')if target_language:config.target_language = target_languageconfig.use_meta_language = Falsewith open(''.format(CURRENT_PATH), '') as fobj:img_content = fobj.read()with open(''.format(CURRENT_PATH), '') as fobj:blank_img = fobj.read()g = Goose(config=config)with requests_mock.Mocker(real_http=False) as m:m.get('', content=blank_img)m.get('', content=blank_img)m.get('', content=img_content)if \"\" in self.data:m.get(self.data[''], text=self.html)return g.extract(url=self.data[''])else:return g.extract(raw_html=self.html)", "docstring": "\\", "id": "f14059:c0:m6"} {"signature": "def clean_title(self, title):", "body": "if \"\" in list(self.article.opengraph.keys()):site_name = self.article.opengraph['']title = title.replace(site_name, '').strip()elif (self.article.schema and \"\" in self.article.schema and\"\" in self.article.schema[\"\"]):site_name = self.article.schema[\"\"][\"\"]title = title.replace(site_name, '').strip()if self.article.domain:pattern = re.compile(self.article.domain, re.IGNORECASE)title = pattern.sub(\"\", title).strip()title_words = title.split()if title_words and title_words[] in TITLE_SPLITTERS:title_words.pop()if not title_words:return \"\"if title_words[-] in TITLE_SPLITTERS:title_words.pop(-)title = \"\".join(title_words).strip()return title", "docstring": "Clean title with the use of og:site_name\n in this case try to get rid of site name\n and use TITLE_SPLITTERS to reformat title", "id": "f14072:c0:m0"} {"signature": "def get_title(self):", "body": "title = ''if \"\" in list(self.article.opengraph.keys()):return self.clean_title(self.article.opengraph[''])elif self.article.schema and \"\" in self.article.schema:return self.clean_title(self.article.schema[''])meta_headline = self.parser.getElementsByTag(self.article.doc,tag=\"\",attr=\"\",value=\"\")if meta_headline is not None and len(meta_headline) > :title = self.parser.getAttribute(meta_headline[], '')return self.clean_title(title)title_element = self.parser.getElementsByTag(self.article.doc, tag='')if title_element is not None and len(title_element) > :title = self.parser.getText(title_element[])return self.clean_title(title)return title", "docstring": "\\\n Fetch the article title and analyze it", "id": "f14072:c0:m1"} {"signature": "def get_video(self, node):", "body": "video = Video()video._embed_code = self.get_embed_code(node)video._embed_type = self.get_embed_type(node)video._width = self.get_width(node)video._height = self.get_height(node)video._src = self.get_src(node)video._provider = self.get_provider(video.src)return video", "docstring": "Create a video object from a video embed", "id": "f14073:c0:m7"} {"signature": "@staticmethoddef get_video_tag(node):", "body": "return Video()", "docstring": "extract html video tags", "id": "f14073:c0:m9"} {"signature": "def get_language(self):", "body": "if self.config.use_meta_language:if self.article.meta_lang:return self.article.meta_lang[:]return self.config.target_language", "docstring": "Returns the language is by the article or\nthe configuration language", "id": "f14074:c0:m0"} {"signature": "def is_boostable(self, node):", "body": "para = \"\"steps_away = minimum_stopword_count = max_stepsaway_from_node = nodes = self.walk_siblings(node)for current_node in nodes:current_node_tag = self.parser.getTag(current_node)if current_node_tag == para:if steps_away >= max_stepsaway_from_node:return Falsepara_text = self.parser.getText(current_node)word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(para_text)if word_stats.get_stopword_count() > minimum_stopword_count:return Truesteps_away += return False", "docstring": "\\\n alot of times the first paragraph might be the caption under an image\n so we'll want to make sure if we're going to boost a parent node that\n it should be connected to other paragraphs,\n at least for the first n paragraphs so we'll want to make sure that\n the next sibling is a paragraph and has at\n least some substatial weight to it", "id": "f14074:c0:m4"} {"signature": "def get_siblings_content(self, current_sibling, baselinescore_siblings_para):", "body": "if current_sibling.tag == '' and self.parser.getText(current_sibling):tmp = current_siblingif tmp.tail:tmp = deepcopy(tmp)tmp.tail = ''return [tmp]else:potential_paragraphs = self.parser.getElementsByTag(current_sibling, tag='')if potential_paragraphs is None:return Noneparagraphs = list()for first_paragraph in potential_paragraphs:text = self.parser.getText(first_paragraph)if text: word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text)paragraph_score = word_stats.get_stopword_count()sibling_baseline_score = float()high_link_density = self.is_highlink_density(first_paragraph)score = float(baselinescore_siblings_para * sibling_baseline_score)if score < paragraph_score and not high_link_density:para = self.parser.createElement(tag='', text=text, tail=None)paragraphs.append(para)return paragraphs", "docstring": "adds any siblings that may have a decent score to this node", "id": "f14074:c0:m7"} {"signature": "def get_siblings_score(self, top_node):", "body": "base = paragraphs_number = paragraphs_score = nodes_to_check = self.parser.getElementsByTag(top_node, tag='')for node in nodes_to_check:text_node = self.parser.getText(node)word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node)high_link_density = self.is_highlink_density(node)if word_stats.get_stopword_count() > and not high_link_density:paragraphs_number += paragraphs_score += word_stats.get_stopword_count()if paragraphs_number > :base = paragraphs_score // paragraphs_numberreturn base", "docstring": "we could have long articles that have tons of paragraphs\nso if we tried to calculate the base score against\nthe total text score of those paragraphs it would be unfair.\nSo we need to normalize the score based on the average scoring\nof the paragraphs within the top node.\nFor example if our total score of 10 paragraphs was 1000\nbut each had an average value of 100 then 100 should be our base.", "id": "f14074:c0:m8"} {"signature": "def update_score(self, node, add_to_score):", "body": "current_score = score_string = self.parser.getAttribute(node, '')if score_string:current_score = int(score_string)new_score = current_score + int(add_to_score)self.parser.setAttribute(node, \"\", str(new_score))", "docstring": "adds a score to the gravityScore Attribute we put on divs\nwe'll get the current score then add the score\nwe're passing in to the current", "id": "f14074:c0:m9"} {"signature": "def update_node_count(self, node, add_to_count):", "body": "current_score = count_string = self.parser.getAttribute(node, '')if count_string:current_score = int(count_string)new_score = current_score + add_to_countself.parser.setAttribute(node, \"\", str(new_score))", "docstring": "\\\n stores how many decent nodes are under a parent node", "id": "f14074:c0:m10"} {"signature": "def is_highlink_density(self, element):", "body": "links = self.parser.getElementsByTag(element, tag='')if not links:return Falsetext = self.parser.getText(element)words = text.split('')words_number = float(len(words))link_text_parts = []for link in links:link_text_parts.append(self.parser.getText(link))link_text = ''.join(link_text_parts)link_words = link_text.split('')number_of_link_words = float(len(link_words))number_of_links = float(len(links))link_divisor = float(number_of_link_words / words_number)score = float(link_divisor * number_of_links)if score >= :return Truereturn False", "docstring": "checks the density of links within a node,\nis there not much text and most of it contains linky shit?\nif so it's no good", "id": "f14074:c0:m11"} {"signature": "def get_score(self, node):", "body": "return self.get_node_gravity_score(node) or ", "docstring": "returns the gravityScore as an integer from this node", "id": "f14074:c0:m12"} {"signature": "def nodes_to_check(self, docs):", "body": "nodes_to_check = []for doc in docs:for tag in ['', '', '']:items = self.parser.getElementsByTag(doc, tag=tag)nodes_to_check += itemsreturn nodes_to_check", "docstring": "\\\n returns a list of nodes we want to search\n on like paragraphs and tables", "id": "f14074:c0:m14"} {"signature": "def post_cleanup(self):", "body": "parse_tags = ['']if self.config.parse_lists:parse_tags.extend(['', ''])if self.config.parse_headers:parse_tags.extend(['', '', '', '', '', ''])target_node = self.article.top_nodenode = self.add_siblings(target_node)for elm in self.parser.getChildren(node):e_tag = self.parser.getTag(elm)if e_tag not in parse_tags:if (self.is_highlink_density(elm) or self.is_table_and_no_para_exist(elm) ornot self.is_nodescore_threshold_met(node, elm)):self.parser.remove(elm)return node", "docstring": "\\\n remove any divs that looks like non-content,\n clusters of links, or paras with no gusto", "id": "f14074:c0:m17"} {"signature": "def check_large_images(self, node, parent_depth_level, sibling_depth_level):", "body": "good_images = self.get_image_candidates(node)if good_images:scored_images = self.fetch_images(good_images, parent_depth_level)if scored_images:highscore_image = sorted(list(scored_images.items()),key=lambda x: x[], reverse=True)[][]main_image = Image()main_image._src = highscore_image.srcmain_image._width = highscore_image.widthmain_image._height = highscore_image.heightmain_image._extraction_type = \"\"score_len = len(scored_images)main_image._confidence_score = / score_len if score_len > else return main_imagedepth_obj = self.get_depth_level(node, parent_depth_level, sibling_depth_level)if depth_obj:return self.check_large_images(depth_obj.node, depth_obj.parent_depth,depth_obj.sibling_depth)return None", "docstring": "\\\n although slow the best way to determine the best image is to download\n them and check the actual dimensions of the image when on disk\n so we'll go through a phased approach...\n 1. get a list of ALL images from the parent node\n 2. filter out any bad image names that we know of (gifs, ads, etc..)\n 3. do a head request on each file to make sure it meets\n our bare requirements\n 4. any images left over let's do a full GET request,\n download em to disk and check their dimensions\n 5. Score images based on different factors like height/width\n and possibly things like color density", "id": "f14075:c1:m3"} {"signature": "def fetch_images(self, images, depth_level):", "body": "image_results = {}initial_area = float()total_score = float()cnt = float()min_width = for image in images[:]:src = self.parser.getAttribute(image, attr='')src = self.build_image_path(src)src = self.add_schema_if_none(src)local_image = self.get_local_image(src)width = local_image.widthheight = local_image.heightsrc = local_image.srcfile_extension = local_image.file_extensionif file_extension != '' or file_extension != '':if (depth_level >= and local_image.width > ) or depth_level < :if not self.is_banner_dimensions(width, height):if width > min_width:sequence_score = float( / cnt)area = float(width * height)total_score = float()if initial_area == :initial_area = area * float()total_score = else:area_difference = float(area / initial_area)total_score = sequence_score * area_differenceimage_results.update({local_image: total_score})cnt += return image_results", "docstring": "\\\n download the images to temp disk and set their dimensions\n - we're going to score the images in the order in which\n they appear so images higher up will have more importance,\n - we'll count the area of the 1st image as a score\n of 1 and then calculate how much larger or small each image after it is\n - we'll also make sure to try and weed out banner\n type ad blocks that have big widths and small heights or vice versa\n - so if the image is 3rd found in the dom it's\n sequence score would be 1 / 3 = .33 * diff\n in area from the first image", "id": "f14075:c1:m5"} {"signature": "@staticmethoddef is_banner_dimensions(width, height):", "body": "if width == height:return Falseif width > height:diff = float(width / height)if diff > :return Trueif height > width:diff = float(height / width)if diff > :return Truereturn False", "docstring": "\\\n returns true if we think this is kind of a bannery dimension\n like 600 / 100 = 6 may be a fishy dimension for a good image", "id": "f14075:c1:m7"} {"signature": "def filter_bad_names(self, images):", "body": "good_images = []for image in images:if self.is_valid_filename(image):good_images.append(image)return good_images if len(good_images) > else None", "docstring": "\\\n takes a list of image elements\n and filters out the ones with bad names", "id": "f14075:c1:m9"} {"signature": "def is_valid_filename(self, image_node):", "body": "src = self.parser.getAttribute(image_node, attr='')if not src:return Falseif self.badimages_names_re.search(src):return Falsereturn True", "docstring": "\\\n will check the image src against a list\n of bad image files we know of like buttons, etc...", "id": "f14075:c1:m10"} {"signature": "def get_images_bytesize_match(self, images):", "body": "cnt = max_bytes_size = good_images = []for image in images:if cnt > :return good_imagessrc = self.parser.getAttribute(image, attr='')src = self.build_image_path(src)src = self.add_schema_if_none(src)local_image = self.get_local_image(src)if local_image:filesize = local_image.bytesif (filesize == or filesize > self.images_min_bytes) and filesize < max_bytes_size:good_images.append(image)else:images.remove(image)cnt += return good_images if len(good_images) > else None", "docstring": "\\\n loop through all the images and find the ones\n that have the best bytez to even make them a candidate", "id": "f14075:c1:m12"} {"signature": "def check_link_tag(self):", "body": "node = self.article.raw_docmeta = self.parser.getElementsByTag(node, tag='', attr='', value='')for item in meta:src = self.parser.getAttribute(item, attr='')if src:return self.get_image(src, extraction_type='')return None", "docstring": "\\\n checks to see if we were able to\n find open link_src on this page", "id": "f14075:c1:m14"} {"signature": "def check_known_schemas(self):", "body": "if '' in self.article.opengraph:return self.get_image(self.article.opengraph[\"\"],extraction_type='')elif (self.article.schema and '' in self.article.schema and\"\" in self.article.schema[\"\"]):return self.get_image(self.article.schema[\"\"][\"\"],extraction_type='')return None", "docstring": "\\\n checks to see if we were able to find the image via known schemas:\n\n Supported Schemas\n - Open Graph\n - schema.org", "id": "f14075:c1:m15"} {"signature": "def get_local_image(self, src):", "body": "return ImageUtils.store_image(self.fetcher, self.article.link_hash, src, self.config)", "docstring": "\\\n returns the bytes of the image file on disk", "id": "f14075:c1:m16"} {"signature": "def check_known_elements(self):", "body": "domain = self.get_clean_domain()if domain in list(self.custom_site_mapping.keys()):classes = self.custom_site_mapping.get(domain).split('')for classname in classes:KNOWN_IMG_DOM_NAMES.append(classname)image = Nonedoc = self.article.raw_docdef _check_elements(elements):for element in elements:tag = self.parser.getTag(element)if tag == '':return elementimages = self.parser.getElementsByTag(element, tag='')if images:return images[]return Nonefor css in KNOWN_IMG_DOM_NAMES:elements = self.parser.getElementsByTag(doc, attr=\"\", value=css)image = _check_elements(elements)if image is not None:src = self.parser.getAttribute(image, attr='')if src:return self.get_image(src, score=, extraction_type='')for css in KNOWN_IMG_DOM_NAMES:elements = self.parser.getElementsByTag(doc, attr='', value=css)image = _check_elements(elements)if image is not None:src = self.parser.getAttribute(image, attr='')if src:return self.get_image(src, score=, extraction_type='')return None", "docstring": "\\\n in here we check for known image contains from sites\n we've checked out like yahoo, techcrunch, etc... that have\n * known places to look for good images.\n * TODO: enable this to use a series of settings files\n so people can define what the image ids/classes\n are on specific sites", "id": "f14075:c1:m18"} {"signature": "def build_image_path(self, src):", "body": "o = urlparse(src)if o.netloc != '':return o.geturl()return urljoin(self.article.final_url, src)", "docstring": "\\\n This method will take an image path and build\n out the absolute path to that image\n * using the initial url we crawled\n so we can find a link to the image\n if they use relative urls like ../myimage.jpg", "id": "f14075:c1:m19"} {"signature": "def get_favicon(self):", "body": "kwargs = {'': '', '': '', '': ''}meta = self.parser.getElementsByTag(self.article.doc, **kwargs)if meta:favicon = self.parser.getAttribute(meta[], '')return faviconreturn ''", "docstring": "Extract the favicon from a website\nhttp://en.wikipedia.org/wiki/Favicon\n\n", "id": "f14082:c0:m1"} {"signature": "def get_canonical_link(self):", "body": "if self.article.final_url:kwargs = {'': '', '': '', '': ''}meta = self.parser.getElementsByTag(self.article.doc, **kwargs)if meta is not None and len(meta) > :href = self.parser.getAttribute(meta[], '')if href:href = href.strip()o = urlparse(href)if not o.hostname:tmp = urlparse(self.article.final_url)domain = '' % (tmp.scheme, tmp.hostname)href = urljoin(domain, href)return hrefreturn self.article.final_url", "docstring": "if the article has meta canonical link set in the url", "id": "f14082:c0:m2"} {"signature": "def get_meta_lang(self):", "body": "attr = self.parser.getAttribute(self.article.doc, attr='')if attr is None:items = [{'': '', '': '', '': ''},{'': '', '': '', '': ''}]for item in items:meta = self.parser.getElementsByTag(self.article.doc, **item)if meta:attr = self.parser.getAttribute(meta[], attr='')breakif attr:value = attr[:]if re.search(RE_LANG, value):return value.lower()return None", "docstring": "Extract content language from meta", "id": "f14082:c0:m3"} {"signature": "def get_meta_content(self, meta_name):", "body": "meta = self.parser.css_select(self.article.doc, meta_name)content = Noneif meta is not None and len(meta) > :content = self.parser.getAttribute(meta[], '')if content:return content.strip()return ''", "docstring": "Extract a given meta content form document", "id": "f14082:c0:m4"} {"signature": "def get_meta_description(self):", "body": "return self.get_meta_content(\"\")", "docstring": "if the article has meta description set in the source, use that", "id": "f14082:c0:m5"} {"signature": "def get_meta_keywords(self):", "body": "return self.get_meta_content(\"\")", "docstring": "if the article has meta keywords set in the source, use that", "id": "f14082:c0:m6"} {"signature": "def get_meta_encoding(self):", "body": "encoding = get_encodings_from_content(self.article.raw_html)return encoding and encoding[] or None", "docstring": "Parse the meta encoding", "id": "f14082:c0:m7"} {"signature": "@propertydef top_image_node(self):", "body": "return self._top_image_node", "docstring": "etree: The most likely top image element node\n\n Note:\n Read only", "id": "f14085:c0:m1"} {"signature": "@propertydef src(self):", "body": "return self._src", "docstring": "str: Source URL for the image\n\n Note:\n Read only", "id": "f14085:c0:m2"} {"signature": "@propertydef confidence_score(self):", "body": "return self._confidence_score", "docstring": "float: The confidence score that this is the main image\n\n Note:\n Read only", "id": "f14085:c0:m3"} {"signature": "@propertydef height(self):", "body": "return self._height", "docstring": "int: The image height in pixels\n\n Note:\n Read only", "id": "f14085:c0:m4"} {"signature": "@propertydef width(self):", "body": "return self._width", "docstring": "int: The image width in pixels\n\n Note:\n Read only", "id": "f14085:c0:m5"} {"signature": "@propertydef extraction_type(self):", "body": "return self._extraction_type", "docstring": "str: The extraction type used\n\n Note:\n Read only", "id": "f14085:c0:m6"} {"signature": "@propertydef bytes(self):", "body": "return self._bytes", "docstring": "int: The size of the image in bytes\n\n Note:\n Read only", "id": "f14085:c0:m7"} {"signature": "def get_language(self):", "body": "if self.config.use_meta_language:if self.article.meta_lang:return self.article.meta_lang[:]return self.config.target_language", "docstring": "\\\n Returns the language is by the article or\n the configuration language", "id": "f14086:c0:m1"} {"signature": "def links_to_text(self):", "body": "self.parser.stripTags(self.get_top_node(), '')", "docstring": "\\\n cleans up and converts any nodes that\n should be considered text into text", "id": "f14086:c0:m6"} {"signature": "def make_list_elms_pretty(self):", "body": "for elm in self.parser.getElementsByTag(self.top_node, tag=''):elm.text = r''.format(elm.text)", "docstring": "make any list element read like a list", "id": "f14086:c0:m7"} {"signature": "def remove_negativescores_nodes(self):", "body": "gravity_items = self.parser.css_select(self.top_node, \"\")for item in gravity_items:score = self.parser.getAttribute(item, '')score = int(score, )if score < :item.getparent().remove(item)", "docstring": "\\\n if there are elements inside our top node\n that have a negative gravity score,\n let's give em the boot", "id": "f14086:c0:m8"} {"signature": "def replace_with_text(self):", "body": "self.parser.stripTags(self.get_top_node(), '', '', '', '', '')", "docstring": "\\\n replace common tags with just\n text so we don't have any crazy formatting issues\n so replace
        , , , etc....\n with whatever text is inside them\n code : http://lxml.de/api/lxml.etree-module.html#strip_tags", "id": "f14086:c0:m9"} {"signature": "def remove_fewwords_paragraphs(self):", "body": "all_nodes = self.parser.getElementsByTags(self.get_top_node(), [''])all_nodes.reverse()for elm in all_nodes:tag = self.parser.getTag(elm)text = self.parser.getText(elm)stop_words = self.stopwords_class(language=self.get_language()).get_stopword_count(text)if ((tag != '' or text != '') and stop_words.get_stopword_count() < andlen(self.parser.getElementsByTag(elm, tag='')) == andlen(self.parser.getElementsByTag(elm, tag='')) == ):self.parser.remove(elm)else:trimmed = self.parser.getText(elm)if trimmed.startswith(\"\") and trimmed.endswith(\"\"):self.parser.remove(elm)", "docstring": "\\\n remove paragraphs that have less than x number of words,\n would indicate that it's some sort of link", "id": "f14086:c0:m10"} {"signature": "def get_encodings_from_content(content):", "body": "if isinstance(content, bytes):find_charset = re.compile(br'', flags=re.I).findallfind_xml = re.compile(br'').findallreturn [encoding.decode('') for encoding infind_charset(content) + find_xml(content)]else:find_charset = re.compile(r'', flags=re.I).findallfind_xml = re.compile(r'').findallreturn find_charset(content) + find_xml(content)", "docstring": "Code from:\nhttps://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py\nReturn encodings from given content string.\n:param content: string to extract encodings from.", "id": "f14087:m0"} {"signature": "@classmethoddef store_image(cls, http_client, link_hash, src, config):", "body": "image = cls.read_localfile(link_hash, src, config)if image:return imageif src.startswith(''):image = cls.write_localfile_base64(link_hash, src, config)return imagedata = http_client.fetch(src)if data:image = cls.write_localfile(data, link_hash, src, config)if image:return imagereturn None", "docstring": "\\\n Writes an image src http string to disk as a temporary file\n and returns the LocallyStoredImage object\n that has the info you should need on the image", "id": "f14088:c0:m1"} {"signature": "def smart_unicode(string, encoding='', strings_only=False, errors=''):", "body": "return force_unicode(string, encoding, strings_only, errors)", "docstring": "Returns a unicode object representing 's'. Treats bytestrings using the\n'encoding' codec.\n\nIf strings_only is True, don't convert (some) non-string-like objects.", "id": "f14090:m0"} {"signature": "def is_protected_type(obj):", "body": "return isinstance(obj, (type(None), int,datetime.datetime, datetime.date, datetime.time,float, Decimal))", "docstring": "Determine if the object instance is of a protected type.\n\n Objects of protected types are preserved as-is when passed to\n force_unicode(strings_only=True).", "id": "f14090:m1"} {"signature": "def force_unicode(string, encoding='', strings_only=False, errors=''):", "body": "if isinstance(string, str):return stringif strings_only and is_protected_type(string):return stringtry:if not isinstance(string, str):if hasattr(string, ''):string = string.__unicode__()else:try:string = str(string, encoding, errors)except UnicodeEncodeError:if not isinstance(string, Exception):raisestring = ''.join([force_unicode(arg, encoding,strings_only,errors) for arg in string])elif not isinstance(string, str):string = string.decode(encoding, errors)except UnicodeDecodeError as ex:if not isinstance(string, Exception):raise DjangoUnicodeDecodeError(string, *ex.args)else:string = ''.join([force_unicode(arg, encoding, strings_only,errors) for arg in string])return string", "docstring": "Similar to smart_unicode, except that lazy instances are resolved to\nstrings, rather than kept as lazy objects.\n\nIf strings_only is True, don't convert (some) non-string-like objects.", "id": "f14090:m2"} {"signature": "def smart_str(string, encoding='', strings_only=False, errors=''):", "body": "if strings_only and isinstance(string, (type(None), int)):return stringif isinstance(string, str):try:return string.encode(encoding, errors)except UnicodeEncodeError:return string.encode('', errors)elif not isinstance(string, bytes):try:return str(string).encode(encoding, errors)except UnicodeEncodeError:if isinstance(string, Exception):return ''.join([smart_str(arg, encoding, strings_only,errors) for arg in string])return str(string).encode(encoding, errors)else:return string", "docstring": "Returns a bytestring version of 's', encoded as specified in 'encoding'.\n\nIf strings_only is True, don't convert (some) non-string-like objects.", "id": "f14090:m3"} {"signature": "@propertydef title(self):", "body": "return self._title", "docstring": "str: Title extracted from the HTML source\n\n Note:\n Read only", "id": "f14091:c0:m1"} {"signature": "@propertydef cleaned_text(self):", "body": "return self._cleaned_text", "docstring": "str: Cleaned text of the article without HTML tags; most commonly desired property\n\n Note:\n Read only", "id": "f14091:c0:m2"} {"signature": "@propertydef meta_description(self):", "body": "return self._meta_description", "docstring": "str: Contents of the meta-description field from the HTML source\n\n Note:\n Read only", "id": "f14091:c0:m3"} {"signature": "@propertydef meta_lang(self):", "body": "return self._meta_lang", "docstring": "str: Contents of the meta-lang field from the HTML source\n\n Note:\n Read only", "id": "f14091:c0:m4"} {"signature": "@propertydef meta_favicon(self):", "body": "return self._meta_favicon", "docstring": "str: Contents of the meta-favicon field from the HTML source\n\n Note:\n Read only", "id": "f14091:c0:m5"} {"signature": "@propertydef meta_keywords(self):", "body": "return self._meta_keywords", "docstring": "str: Contents of the meta-keywords field from the HTML source\n\n Note:\n Read only", "id": "f14091:c0:m6"} {"signature": "@propertydef meta_encoding(self):", "body": "return self._meta_encoding", "docstring": "str: Contents of the encoding/charset field from the HTML source\n\n Note:\n Read only", "id": "f14091:c0:m7"} {"signature": "@propertydef canonical_link(self):", "body": "return self._canonical_link", "docstring": "str: The canonical link of the article if found in the meta data\n\n Note:\n Read only", "id": "f14091:c0:m8"} {"signature": "@propertydef domain(self):", "body": "return self._domain", "docstring": "str: Domain of the article parsed\n\n Note:\n Read only", "id": "f14091:c0:m9"} {"signature": "@propertydef top_node(self):", "body": "return self._top_node", "docstring": "etree: The top Element that is a candidate for the main body of the article\n\n Note:\n Read only", "id": "f14091:c0:m10"} {"signature": "@propertydef top_image(self):", "body": "return self._top_image", "docstring": "Image: The top image object that likely represents the article\n\n Returns:\n Image: See more information on the goose3.Image class\n Note:\n Read only", "id": "f14091:c0:m11"} {"signature": "@propertydef tags(self):", "body": "return self._tags", "docstring": "list(str): List of article tags (non-metadata tags)\n\n Note:\n Read only", "id": "f14091:c0:m12"} {"signature": "@propertydef opengraph(self):", "body": "return self._opengraph", "docstring": "dict: All opengraph tag data\n\n Note:\n Read only", "id": "f14091:c0:m13"} {"signature": "@propertydef tweets(self):", "body": "return self._tweets", "docstring": "list(str): A listing of embeded tweets in the article\n\n Note:\n Read only", "id": "f14091:c0:m14"} {"signature": "@propertydef movies(self):", "body": "return self._movies", "docstring": "list(Video): A listing of all videos within the article such as\n YouTube or Vimeo\n\n Returns:\n list(Video): See more information on the goose3.Video class\n Note:\n Read only", "id": "f14091:c0:m15"} {"signature": "@propertydef links(self):", "body": "return self._links", "docstring": "list(str): A listing of URL links within the article\n\n Note:\n Read only", "id": "f14091:c0:m16"} {"signature": "@propertydef authors(self):", "body": "return self._authors", "docstring": "list(str): A listing of authors as parsed from the meta tags\n\n Note:\n Read only", "id": "f14091:c0:m17"} {"signature": "@propertydef final_url(self):", "body": "return self._final_url", "docstring": "str: The URL that was used to pull and parsed; `None` if raw_html was used\n and no url element was found.\n\n Note:\n Read only", "id": "f14091:c0:m18"} {"signature": "@propertydef link_hash(self):", "body": "return self._link_hash", "docstring": "str: The MD5 of the final url to be used for various identification tasks\n\n Note:\n Read only", "id": "f14091:c0:m19"} {"signature": "@propertydef raw_html(self):", "body": "return self._raw_html", "docstring": "str: The HTML represented as a string\n\n Note:\n Read only", "id": "f14091:c0:m20"} {"signature": "@propertydef doc(self):", "body": "return self._doc", "docstring": "etree: lxml document that is being processed\n\n Note:\n Read only", "id": "f14091:c0:m21"} {"signature": "@propertydef raw_doc(self):", "body": "return self._raw_doc", "docstring": "etree: Original, uncleaned, and untouched lxml document to be processed\n\n Note:\n Read only", "id": "f14091:c0:m22"} {"signature": "@propertydef schema(self):", "body": "return self._schema", "docstring": "dict: All schema tag data\n\n Note:\n Read only", "id": "f14091:c0:m23"} {"signature": "@propertydef publish_date(self):", "body": "return self._publish_date", "docstring": "str: The date the article was published based on meta tag extraction\n\n Note:\n Read only", "id": "f14091:c0:m24"} {"signature": "@propertydef publish_datetime_utc(self):", "body": "return self._publish_datetime_utc", "docstring": "datetime.datetime: The date time version of the published date based on meta tag extraction \\\n in the UTC timezone, if timezone information is known\n\n Note:\n Read only", "id": "f14091:c0:m25"} {"signature": "@propertydef additional_data(self):", "body": "return self._additional_data", "docstring": "dict: A property bucket for consumers of goose3 to store custom data extractions\n\n Note:\n Read only", "id": "f14091:c0:m26"} {"signature": "@propertydef infos(self):", "body": "data = {\"\": {\"\": self.meta_description,\"\": self.meta_lang,\"\": self.meta_keywords,\"\": self.meta_favicon,\"\": self.canonical_link,\"\": self.meta_encoding},\"\": None,\"\": self.domain,\"\": self.title,\"\": self.cleaned_text,\"\": self.opengraph,\"\": self.tags,\"\": self.tweets,\"\": [],\"\": self.links,\"\": self.authors,\"\": self.publish_date}if self.top_image is not None:data[''] = {'': self.top_image.src,'': self.top_image.width,'': self.top_image.height,'': ''}for movie in self.movies:data[''].append({'': movie.embed_type,'': movie.provider,'': movie.width,'': movie.height,'': movie.embed_code,'': movie.src,})return data", "docstring": "dict: The summation of all data available about the extracted article\n\n Note:\n Read only", "id": "f14091:c0:m27"} {"signature": "@propertydef known_context_patterns(self):", "body": "return self._known_context_patterns", "docstring": "list: The context patterns to search to find the likely article content\n\n Note:\n Each entry must be a dictionary with the following keys: `attr` and `value` \\\n or just `tag`", "id": "f14092:c3:m1"} {"signature": "@known_context_patterns.setterdef known_context_patterns(self, val):", "body": "def create_pat_from_dict(val):''''''if \"\" in val:pat = ArticleContextPattern(tag=val[\"\"])if \"\" in val:pat.attr = val[\"\"]pat.value = val[\"\"]elif \"\" in val:pat = ArticleContextPattern(attr=val[\"\"], value=val[\"\"])if \"\" in val:pat.domain = val[\"\"]return patif isinstance(val, list):self._known_context_patterns = [x if isinstance(x, ArticleContextPattern) else create_pat_from_dict(x)for x in val] + self.known_context_patternselif isinstance(val, ArticleContextPattern):self._known_context_patterns.insert(, val)elif isinstance(val, dict):self._known_context_patterns.insert(, create_pat_from_dict(val))else:raise Exception(\"\".format(type(val)))", "docstring": "val must be an ArticleContextPattern, a dictionary, or list of \\\n dictionaries\n e.g., {'attr': 'class', 'value': 'my-article-class'}\n or [{'attr': 'class', 'value': 'my-article-class'},\n {'attr': 'id', 'value': 'my-article-id'}]", "id": "f14092:c3:m2"} {"signature": "@propertydef known_publish_date_tags(self):", "body": "return self._known_publish_date_tags", "docstring": "list: The tags to search to find the likely published date\n\n Note:\n Each entry must be a dictionary with the following keys: `attribute`, `value`, \\\n and `content`.", "id": "f14092:c3:m3"} {"signature": "@known_publish_date_tags.setterdef known_publish_date_tags(self, val):", "body": "def create_pat_from_dict(val):''''''if \"\" in val:pat = PublishDatePattern(tag=val[\"\"])if \"\" in val:pat.attr = val[\"\"]pat.value = val[\"\"]elif \"\" in val:pat = PublishDatePattern(attr=val[\"\"], value=val[\"\"],content=val[\"\"])if \"\" in val:pat.subcontent = val[\"\"]if \"\" in val:pat.domain = val[\"\"]return patif isinstance(val, list):self._known_publish_date_tags = [x if isinstance(x, PublishDatePattern) else create_pat_from_dict(x)for x in val] + self.known_publish_date_tagselif isinstance(val, PublishDatePattern):self._known_publish_date_tags.insert(, val)elif isinstance(val, dict):self._known_publish_date_tags.insert(, create_pat_from_dict(val))else:raise Exception(\"\".format(type(val)))", "docstring": "val must be a dictionary or list of dictionaries\n e.g., {'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'}\n or [{'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'},\n {'attrribute': 'property', 'value': 'pub_time', 'content': 'content'}]", "id": "f14092:c3:m4"} {"signature": "@propertydef known_author_patterns(self):", "body": "return self._known_author_patterns", "docstring": "list: The tags to search to find the likely published date\n\n Note:\n Each entry must be a dictionary with the following keys: `attribute`, `value`, \\\n and `content`.", "id": "f14092:c3:m5"} {"signature": "@known_author_patterns.setterdef known_author_patterns(self, val):", "body": "def create_pat_from_dict(val):''''''if \"\" in val:pat = AuthorPattern(tag=val[\"\"])if \"\" in val:pat.attr = val[\"\"]pat.value = val[\"\"]elif \"\" in val:pat = AuthorPattern(attr=val[\"\"], value=val[\"\"],content=val[\"\"])if \"\" in val:pat.subpattern = create_pat_from_dict(val[\"\"])return patif isinstance(val, list):self._known_author_patterns = [x if isinstance(x, AuthorPattern) else create_pat_from_dict(x)for x in val] + self.known_author_patternselif isinstance(val, AuthorPattern):self._known_author_patterns.insert(, val)elif isinstance(val, dict):self._known_author_patterns.insert(, create_pat_from_dict(val))else:raise Exception(\"\".format(type(val)))", "docstring": "val must be a dictionary or list of dictionaries\n e.g., {'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'}\n or [{'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'},\n {'attrribute': 'property', 'value': 'pub_time', 'content': 'content'}]", "id": "f14092:c3:m6"} {"signature": "@propertydef strict(self):", "body": "return self._strict", "docstring": "bool: Enable `strict mode` and throw exceptions instead of\n swallowing them.\n\n Note:\n Defaults to `True`", "id": "f14092:c3:m7"} {"signature": "@strict.setterdef strict(self, val):", "body": "self._strict = bool(val)", "docstring": "set the strict property", "id": "f14092:c3:m8"} {"signature": "@propertydef http_timeout(self):", "body": "return self._http_timeout", "docstring": "float: The time delay to pass to `requests` to wait for the response\n in seconds\n\n Note:\n Defaults to 30.0", "id": "f14092:c3:m9"} {"signature": "@http_timeout.setterdef http_timeout(self, val):", "body": "self._http_timeout = float(val)", "docstring": "set the http_timeout property", "id": "f14092:c3:m10"} {"signature": "@propertydef local_storage_path(self):", "body": "return self._local_storage_path", "docstring": "str: The local path to store temporary files\n\n Note:\n Defaults to the value of `os.path.join(tempfile.gettempdir(), 'goose')`", "id": "f14092:c3:m11"} {"signature": "@local_storage_path.setterdef local_storage_path(self, val):", "body": "self._local_storage_path = val", "docstring": "set the local_storage_path property", "id": "f14092:c3:m12"} {"signature": "@propertydef debug(self):", "body": "return self._debug", "docstring": "bool: Turn on or off debugging\n\n Note:\n Defaults to `False`\n Warning:\n Debugging is currently not implemented", "id": "f14092:c3:m13"} {"signature": "@debug.setterdef debug(self, val):", "body": "self._debug = bool(val)", "docstring": "set the debug property", "id": "f14092:c3:m14"} {"signature": "@propertydef parser_class(self):", "body": "return self._parser_class", "docstring": "str: The key of the parser to use\n\n Note:\n Defaults to `lxml`", "id": "f14092:c3:m15"} {"signature": "@parser_class.setterdef parser_class(self, val):", "body": "self._parser_class = val", "docstring": "set the parser_class property", "id": "f14092:c3:m16"} {"signature": "@propertydef available_parsers(self):", "body": "return self._available_parsers", "docstring": "list(str): A list of all possible parser values for the parser_class\n\n Note:\n Not settable", "id": "f14092:c3:m17"} {"signature": "@propertydef http_auth(self):", "body": "return self._http_auth", "docstring": "tuple: Authentication class and information to pass to the requests\n library\n\n See Also:\n `Requests Authentication `__", "id": "f14092:c3:m18"} {"signature": "@http_auth.setterdef http_auth(self, val):", "body": "self._http_auth = val", "docstring": "set the http_auth property", "id": "f14092:c3:m19"} {"signature": "@propertydef http_proxies(self):", "body": "return self._http_proxies", "docstring": "dict: Proxy information to pass directly to the supporting `requests` object\n\n See Also:\n `Requests Proxy Support `__", "id": "f14092:c3:m20"} {"signature": "@http_proxies.setterdef http_proxies(self, val):", "body": "self._http_proxies = val", "docstring": "set the http_proxies property", "id": "f14092:c3:m21"} {"signature": "@propertydef http_headers(self):", "body": "return self._http_headers", "docstring": "dict: Custom headers to pass directly to the supporting `requests` object\n\n See Also:\n `Requests Custom Headers `__", "id": "f14092:c3:m22"} {"signature": "@http_headers.setterdef http_headers(self, val):", "body": "self._http_headers = val", "docstring": "set the http_headers property", "id": "f14092:c3:m23"} {"signature": "@propertydef browser_user_agent(self):", "body": "return self._browser_user_agent", "docstring": "Browser user agent string to use when making URL requests\n\n Note:\n Defaults to `Goose/{goose3.__version__}`\n\n Examples:\n Using the non-standard browser agent string is advised when pulling\n frequently\n\n >>> config.browser_user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2)'\n >>> config.browser_user_agent = 'AppleWebKit/534.52.7 (KHTML, like Gecko)'\n >>> config.browser_user_agent = 'Version/5.1.2 Safari/534.52.7'", "id": "f14092:c3:m24"} {"signature": "@browser_user_agent.setterdef browser_user_agent(self, val):", "body": "self._browser_user_agent = val", "docstring": "set the browser user agent string", "id": "f14092:c3:m25"} {"signature": "@propertydef imagemagick_identify_path(self):", "body": "return self._imagemagick_identify_path", "docstring": "str: Path to the identify program that is part of imagemagick\n\n Note:\n Defaults to `\"/opt/local/bin/identify\"`\n Warning:\n Currently not used / implemented", "id": "f14092:c3:m26"} {"signature": "@imagemagick_identify_path.setterdef imagemagick_identify_path(self, val):", "body": "self._imagemagick_identify_path = val", "docstring": "set the imagemagick identify program path", "id": "f14092:c3:m27"} {"signature": "@propertydef imagemagick_convert_path(self):", "body": "return self._imagemagick_convert_path", "docstring": "str: Path to the convert program that is part of imagemagick\n\n Note:\n Defaults to `\"/opt/local/bin/convert\"`\n Warning:\n Currently not used / implemented", "id": "f14092:c3:m28"} {"signature": "@imagemagick_convert_path.setterdef imagemagick_convert_path(self, val):", "body": "self._imagemagick_convert_path = val", "docstring": "set the imagemagick convert program path", "id": "f14092:c3:m29"} {"signature": "@propertydef stopwords_class(self):", "body": "return self._stopwords_class", "docstring": "StopWords: The StopWords class to use when analyzing article content\n\n Note:\n Defaults to the english stop words\n Note:\n Current stop words available in `goose3.text` include: \\n\n `StopWords`, `StopWordsChinese`, `StopWordsArabic`, and `StopWordsKorean`", "id": "f14092:c3:m30"} {"signature": "@stopwords_class.setterdef stopwords_class(self, val):", "body": "self._stopwords_class = val", "docstring": "set the stopwords class to use", "id": "f14092:c3:m31"} {"signature": "@propertydef target_language(self):", "body": "return self._target_language", "docstring": "str: The default target language if the language is not extractable\n or if use_meta_language is set to False\n\n Note:\n Default language is 'en'", "id": "f14092:c3:m32"} {"signature": "@target_language.setterdef target_language(self, val):", "body": "self._target_language = val", "docstring": "set the target language property", "id": "f14092:c3:m33"} {"signature": "@propertydef use_meta_language(self):", "body": "return self._use_meta_language", "docstring": "bool: Determine if language should be extracted from the meta tags\n or not. If this is set to `False` then the target_language will be\n used. Also, if extraction fails then the target_language will be\n utilized.\n\n Note:\n Defaults to `True`", "id": "f14092:c3:m34"} {"signature": "@use_meta_language.setterdef use_meta_language(self, val):", "body": "self._use_meta_language = bool(val)", "docstring": "set the use_meta_language property", "id": "f14092:c3:m35"} {"signature": "@propertydef enable_image_fetching(self):", "body": "return self._enable_image_fetching", "docstring": "bool: Turn on or off image extraction\n\n Note:\n Defaults to `False`", "id": "f14092:c3:m36"} {"signature": "@enable_image_fetching.setterdef enable_image_fetching(self, val):", "body": "self._enable_image_fetching = bool(val)", "docstring": "set the enable_image_fetching property", "id": "f14092:c3:m37"} {"signature": "@propertydef images_min_bytes(self):", "body": "return self._images_min_bytes", "docstring": "int: Minimum number of bytes for an image to be evaluated to be the\n main image of the site\n\n Note:\n Defaults to 4500 bytes", "id": "f14092:c3:m38"} {"signature": "@images_min_bytes.setterdef images_min_bytes(self, val):", "body": "self._images_min_bytes = int(val)", "docstring": "set the images_min_bytes property", "id": "f14092:c3:m39"} {"signature": "@propertydef pretty_lists(self):", "body": "return self._pretty_lists", "docstring": "bool: Specify if lists should be pretty printed in the cleaned_text\n output\n\n Note:\n Defaults to `True`", "id": "f14092:c3:m40"} {"signature": "@pretty_lists.setterdef pretty_lists(self, val):", "body": "self._pretty_lists = bool(val)", "docstring": "set if lists should be pretty printed", "id": "f14092:c3:m41"} {"signature": "@parse_lists.setterdef parse_lists(self, val):", "body": "self._parse_lists = bool(val)", "docstring": "set if headers should be parsed", "id": "f14092:c3:m43"} {"signature": "@propertydef parse_headers(self):", "body": "return self._parse_headers", "docstring": "bool: Specify if headers should be pulled or not in the cleaned_text\n output\n\n Note:\n Defaults to `True`", "id": "f14092:c3:m44"} {"signature": "@parse_headers.setterdef parse_headers(self, val):", "body": "self._parse_headers = bool(val)", "docstring": "set if headers should be parsed", "id": "f14092:c3:m45"} {"signature": "def get_parser(self):", "body": "return AVAILABLE_PARSERS[self.parser_class]", "docstring": "Retrieve the current parser class to use for extraction\n\n Returns:\n Parser: The parser to use", "id": "f14092:c3:m46"} {"signature": "def __enter__(self):", "body": "return self", "docstring": "Setup the context manager", "id": "f14093:c0:m1"} {"signature": "def __exit__(self, exc_type, exc_val, exc_tb):", "body": "self.close()", "docstring": "Define what to do when the context manager exits", "id": "f14093:c0:m2"} {"signature": "def close(self):", "body": "if self.fetcher is not None:self.shutdown_network()self.finalizer.atexit = False", "docstring": "Close the network connection and perform any other required cleanup\n\n Note:\n Auto closed when using goose as a context manager or when garbage collected", "id": "f14093:c0:m3"} {"signature": "def extract(self, url=None, raw_html=None):", "body": "crawl_candidate = CrawlCandidate(self.config, url, raw_html)return self.__crawl(crawl_candidate)", "docstring": "Extract the most likely article content from the html page\n\n Args:\n url (str): URL to pull and parse\n raw_html (str): String representation of the HTML page\n Returns:\n Article: Representation of the article contents \\\n including other parsed and extracted metadata", "id": "f14093:c0:m4"} {"signature": "def shutdown_network(self):", "body": "self.fetcher.close()self.fetcher = None", "docstring": "Close the network connection\n\n Note:\n Auto closed when using goose as a context manager or when garbage collected", "id": "f14093:c0:m5"} {"signature": "def __crawl(self, crawl_candidate):", "body": "def crawler_wrapper(parser, parsers_lst, crawl_candidate):try:crawler = Crawler(self.config, self.fetcher)article = crawler.crawl(crawl_candidate)except (UnicodeDecodeError, ValueError) as ex:if parsers_lst:parser = parsers_lst.pop() return crawler_wrapper(parser, parsers_lst, crawl_candidate)else:raise exreturn articleparsers = list(self.config.available_parsers)parsers.remove(self.config.parser_class)return crawler_wrapper(self.config.parser_class, parsers, crawl_candidate)", "docstring": "wrap the crawling functionality", "id": "f14093:c0:m6"} {"signature": "@propertydef embed_type(self):", "body": "return self._embed_type", "docstring": "str: The type of embeding such as embed, object, or iframe\n\n Note:\n Read only", "id": "f14095:c0:m1"} {"signature": "@propertydef provider(self):", "body": "return self._provider", "docstring": "str: The video provider\n\n Note:\n Read only", "id": "f14095:c0:m2"} {"signature": "@propertydef width(self):", "body": "return self._width", "docstring": "int: The video width in pixels\n\n Note:\n Read only", "id": "f14095:c0:m3"} {"signature": "@propertydef height(self):", "body": "return self._height", "docstring": "int: The video height in pixels\n\n Note:\n Read only", "id": "f14095:c0:m4"} {"signature": "@propertydef embed_code(self):", "body": "return self._embed_code", "docstring": "str: The embed code of the video\n\n Note:\n Read only", "id": "f14095:c0:m5"} {"signature": "@propertydef src(self):", "body": "return self._src", "docstring": "str: The URL source of the video\n\n Note:\n Read only", "id": "f14095:c0:m6"} {"signature": "def read_file(filepath):", "body": "with open(filepath, '') as filepointer:res = filepointer.read()return res", "docstring": "read the file", "id": "f14099:m0"} {"signature": "def get_html_theme_path():", "body": "cur_dir = path.abspath(path.dirname(path.dirname(__file__)))return cur_dir", "docstring": "Return list of HTML theme paths.", "id": "f14100:m0"} {"signature": "def get_param_values(request, model=None):", "body": "if type(request) == dict:return requestparams = get_payload(request)try:del params['']params[params.pop('')] = params.pop('')except KeyError:passreturn {k.rstrip(''): safe_eval(v) if not type(v) == list else [safe_eval(sv) for sv in v]for k, v in params.items()}", "docstring": "Converts the request parameters to Python.\n\n:param request: || \n\n:return: ", "id": "f14110:m0"} {"signature": "def get_context(request, model=None):", "body": "param_values = get_param_values(request, model=model)context = param_values.pop('', {})if isinstance(context, (unicode, str)):context = projex.rest.unjsonify(context)has_limit = '' in context or '' in param_valuesorb_context = orb.Context(**context)used = set()query_context = {}for key in orb.Context.Defaults:if key in param_values:used.add(key)query_context[key] = param_values.get(key)schema_values = {}if model:for key, value in request.matchdict.items():if model.schema().column(key, raise_=False):schema_values[key] = valuefor key, value in param_values.items():root_key = key.split('')[]schema_object = model.schema().column(root_key, raise_=False) or model.schema().collector(root_key)if schema_object:value = param_values.pop(key)if isinstance(schema_object, orb.Collector) and type(value) not in (tuple, list):value = [value]schema_values[key] = valuequery_context[''] = {'': request}try:default_context = request.orb_default_contextexcept AttributeError:try:query_context[''].update(request.orb_scope)except AttributeError:passelse:if '' in default_context:query_context[''].update(default_context.pop(''))for k, v in default_context.items():query_context.setdefault(k, v)orb_context.update(query_context)return schema_values, orb_context", "docstring": "Extracts ORB context information from the request.\n\n:param request: \n:param model: || None\n\n:return: { key: value} values, ", "id": "f14110:m1"} {"signature": "def register(self, service, name=''):", "body": "try:is_model = issubclass(service, orb.Model)except StandardError:is_model = Falseif is_model:self.services[service.schema().dbname()] = (ModelService, service)else:super(OrbApiFactory, self).register(service, name=name)", "docstring": "Exposes a given service to this API.", "id": "f14114:c0:m2"} {"signature": "def make_postcard(self, npix=, shape=(, ), buffer_size=):", "body": "source = self.kicclient = kplr.API()targ = client.target(source)channel = [targ.params[''], targ.params[''], targ.params[''], targ.params['']]col = [targ.params[''], targ.params[''], targ.params[''], targ.params['']] row = [targ.params[''], targ.params[''], targ.params[''], targ.params['']] if None in row:raise ValueError('')if None in col:raise ValueError('')center = np.array([npix/, npix/])if (np.min(col) < npix/):jump = npix/ - np.min(col) + buffer_sizecol += jumpcenter[] -= jumpif (np.min(row) < npix/):jump = npix/ - np.min(row) + buffer_sizerow += jumpcenter[] -= jumpif (np.max(row) > shape[] - npix/):jump = shape[]-npix/ - np.max(row) - buffer_sizerow += jumpcenter[] -= jumpif (np.max(col) > shape[] - npix/):jump = shape[]-npix/ - np.max(col) - buffer_sizecol += jumpcenter[] -= jumpfin_arr = np.zeros((len(self.times), npix, npix))for icount, iname in enumerate(self.obs_filenames): a = fits.open(self.ffi_dir+iname)quarter = a[].header['']if int(quarter) == :season = else:season = (int(quarter) - ) % img = a[channel[season]].dataimg -= np.median(img)ymin = int(max([int(row[season])-npix/,]))ymax = int(min([int(row[season])+npix/,img.shape[]]))xmin = int(max([int(col[season])-npix/,]))xmax = int(min([int(col[season])+npix/,img.shape[]]))pimg = img[ymin:ymax,xmin:xmax]fin_arr[icount,:,:] = pimgself.postcard = fin_arrself.integrated_postcard = np.sum(self.postcard, axis=)self.center = center", "docstring": "Develop a \"postcard\" region around the target star.\nOther stars in this postcard will be used as possible reference stars.\n\nArgs: \n npix: The size of the postcard region. The region will be a square with sides npix pixels\n (default: ``300``)\n shape: The size of each individual image. For Kepler/K2 FFIs this should never need to be\n changed from the default, but will be different for e.g. TESS FFIs (default: ``(1070, 1132)``)\n buffer_size: The number of pixels at the edge of the detector to avoid (default: ``15``)", "id": "f14117:c0:m1"} {"signature": "def find_other_sources(self, edge_lim = , min_val = , ntargets = , extend_region_size=, remove_excess=,plot_flag = False, plot_window=):", "body": "j,i = self.centerregion = self.integrated_postcard + if plot_flag == True:ff = plt.imshow(self.integrated_postcard, interpolation='', cmap='', vmax = np.percentile(region, ))plt.colorbar(ff)plt.show()targets = np.zeros_like(self.integrated_postcard)sizeimg = np.shape(targets)[]jj = j + ii = i + edge = edge_limlim = max(min_val, self.integrated_postcard[int(j), int(i)]*edge)maxpt = np.percentile(self.integrated_postcard, )bin_img = (region > lim)lab_img, n_features = label(bin_img)key_targ = (lab_img == (lab_img[int(j), int(i)]))tot = np.sum(key_targ)targets[key_targ] = region[key_targ] = lim = np.zeros(ntargets)for peaks in range(,ntargets):k = np.argmax(region)j,i = np.unravel_index(k, region.shape)lim[peaks] = max(maxpt, edge*region[j,i])bin_img = (region >= lim[peaks])lab_img, n_features = label(bin_img)key_targ = (lab_img == (lab_img[j,i]))targets[key_targ] = peaks + region[key_targ] = lab_img, n_features = label(targets)for i in range(, ntargets+):for j in range(extend_region_size):border= mh.labeled.border(targets, , i)targets[border*(region < ()*lim[peaks])] = ifor i in range(, ntargets+):for j in range(, ntargets+):if i != j:border = mh.labeled.border(targets, i, j)if np.sum(border) != :targets[targets == j] = itargets = mh.labeled.remove_bordering(targets)for k in range(remove_excess):for i in range(ntargets):if np.sum(self.integrated_postcard[targets == i]) < :targets[targets > i] -= self.targets = targetsif plot_flag == True:plt.imshow(self.targets, interpolation='')plt.show()plt.imshow(((targets == )*self.integrated_postcard + (targets == )*)[jj-plot_window:jj+plot_window,ii-plot_window:ii+plot_window], interpolation='', cmap='', vmax=np.percentile(self.integrated_postcard, ))plt.show()plt.imshow((np.ceil(targets/)*self.integrated_postcard+np.ceil(targets/)*), interpolation='', cmap='', vmax=np.percentile(self.integrated_postcard, ))plt.show()", "docstring": "Identify apertures for all sources on the postcard, both for the \ntarget and potential reference stars\n\nArgs: \n edge_lim: The initial limit for the creation of apertures. The aperture will be a region of\n contiguous pixels with flux values larger than the product of ``edge_lim`` and the brightest\n pixel value for this star, as long as that product is larger than ``min_val`` (default: ``0.015``)\n min_val: Threshold for the minimum flux value in the ``integrated_postcard`` for a pixel to be included \n in the default apertures (default: ``5000``)\n ntargets: The maximum number of potential reference stars to be included in the analysis (default: ``250``)\n extend_region_size: After the initial apertures are generated, they will be optionally extended an\n additional number of pixels following this flag. Safe practice for reasonable apertures is to \n leave ``min_val`` at a value well above the noise and then extend apertures via this flag until \n they are of suitable size (default: ``3``)\n remove_excess: Stars with apertures that touch will be combined into a single aperture. \n This is done by iterating through the starlist; this flag represents the number of times the\n list will be iterated through to delete redundant apertures (default: ``4``)\n plot_flag: If true, a series of diagnostic plots will appear while this function runs to observe\n apertures for the target star and other stars.\n (default: ``False``)\n plot_window: If ``plot_flag`` is ``True``, the size of the region to be plotted around the target star\n to show the drawn aperture for visualization purposes only (default: ``15``)", "id": "f14117:c0:m5"} {"signature": "def do_photometry(self):", "body": "std_f = np.zeros()data_save = np.zeros_like(self.postcard)self.obs_flux = np.zeros_like(self.reference_flux)for i in range():g = np.where(self.qs == i)[]wh = np.where(self.times[g] > )data_save[g] = np.roll(self.postcard[g], int(self.roll_best[i,]), axis=)data_save[g] = np.roll(data_save[g], int(self.roll_best[i,]), axis=)self.target_flux_pixels = data_save[:,self.targets == ]self.target_flux = np.sum(self.target_flux_pixels, axis=)self.obs_flux[g] = self.target_flux[g] / self.reference_flux[g]self.obs_flux[g] /= np.median(self.obs_flux[g[wh]])fitline = np.polyfit(self.times[g][wh], self.obs_flux[g][wh], )std_f[i] = np.max([np.std(self.obs_flux[g][wh]/(fitline[]*self.times[g][wh]+fitline[])), ])self.flux_uncert = std_f", "docstring": "Does photometry and estimates uncertainties by calculating the scatter around a linear fit to the data\nin each orientation. This function is called by other functions and generally the user will not need\nto interact with it directly.", "id": "f14117:c0:m6"} {"signature": "def generate_panel(self, img):", "body": "plt.figure(figsize=(,))ax = plt.gca()fig = plt.gcf()plt.subplot()data_save = np.zeros_like(self.postcard)self.roll_best = np.zeros((,))for i in range():g = np.where(self.qs == i)[]wh = np.where(self.times[g] > )self.roll_best[i] = self.do_rolltest(g, wh)self.do_photometry()for i in range():g = np.where(self.qs == i)[]plt.errorbar(self.times[g], self.obs_flux[g], yerr=self.flux_uncert[i], fmt=fmt[i])plt.xlabel('', fontsize=)plt.ylabel('', fontsize=)plt.subplot()implot = plt.imshow(img, interpolation='', cmap='', vmin=*, vmax=*)cid = fig.canvas.mpl_connect('', self.onclick)plt.show(block=True)", "docstring": "Creates the figure shown in ``adjust_aperture`` for visualization purposes. Called by other functions\nand generally not called by the user directly.\n\nArgs: \n img: The data frame to be passed through to be plotted. A cutout of the ``integrated_postcard``", "id": "f14117:c0:m7"} {"signature": "def adjust_aperture(self, image_region=, ignore_bright=):", "body": "self.ignore_bright = ignore_brightself.calc_fluxes()self.coordsx = []self.coordsy = []jj, ii = self.centerjj, ii = int(jj), int(ii) plt.ion()img = np.sum(((self.targets == )*self.postcard + (self.targets == )*)[:,jj-image_region:jj+image_region,ii-image_region:ii+image_region], axis=)self.generate_panel(img)while len(self.coordsx) != :for i in range(len(self.coordsx)):if self.targets[self.coordsy[i]+jj-image_region,self.coordsx[i]+ii-image_region] != :self.targets[self.coordsy[i]+jj-image_region,self.coordsx[i]+ii-image_region] = elif self.targets[self.coordsy[i]+jj-image_region,self.coordsx[i]+ii-image_region] == :self.targets[self.coordsy[i]+jj-image_region,self.coordsx[i]+ii-image_region] = if self.coordsy[i] == :thiscol = np.where(self.targets[:,self.coordsx[i]+ii-image_region] == )self.targets[thiscol,self.coordsx[i]+ii-image_region] = if self.coordsx[i] == :thiscol = np.where(self.targets[self.coordsy[i]+jj-image_region,:] == )self.targets[self.coordsy[i]+jj-image_region, thiscol] = self.coordsx = []self.coordsy = []img = np.sum(((self.targets == )*self.postcard + (self.targets == )*)[:,jj-image_region:jj+image_region,ii-image_region:ii+image_region],axis=)self.generate_panel(img)", "docstring": "Develop a panel showing the current aperture and the light curve as judged from that aperture.\nClicking on individual pixels on the aperture will toggle those pixels on or off into the\naperture (which will be updated after closing the plot).\nClicking on the 0th row or column will turn off all pixels in that column or row, respectively.\nWill iterate continuously until the figure is closed without updating any pixels.\n\n\nArgs: \n image_region: The size of the region around the target star to be plotted. Images will be a square \n with side length ``image_region`` (default: ``15``)\n ignore_bright: The number of brightest stars to be ignored in the determination of the flux from \n reference stars. If there is reason to believe (for example) that saturated stars may behave\n differently than the target star, they can be avoided with this flag (default: ``0``)", "id": "f14117:c0:m8"} {"signature": "def data_for_target(self, do_roll=True, ignore_bright=):", "body": "self.ignore_bright = ignore_brightself.calc_fluxes()self.roll_best = np.zeros((,))if do_roll == True:for i in range():g = np.where(self.qs == i)[]wh = np.where(self.times[g] > )self.roll_best[i] = self.do_rolltest(g, wh)self.do_photometry()", "docstring": "Determine the normalized photometry, accounting for effects shared by reference stars. Does not provide\nthe opportunity to adjust the aperture\n\nArgs: \n image_region: If ``True`` allow the aperture to be shifted up to one pixel in both the x and y\n directions to account for differential velocity aberration (default: ``True``)\n ignore_bright: The number of brightest stars to be ignored in the determination of the flux from \n reference stars. If there is reason to believe (for example) that saturated stars may behave\n differently than the target star, they can be avoided with this flag (default: ``0``)", "id": "f14117:c0:m9"} {"signature": "def calc_fluxes(self, min_flux = , outlier_iterations=,max_outlier_obs=, outlier_limit=):", "body": "jj, ii = self.centernumer = np.zeros(len(self.times))denom = np.zeros(len(self.times))factr = np.zeros(len(self.times))numer_pix = self.postcard[:,self.targets == ]numer = np.sum(numer_pix, axis=)tar_vals = np.zeros((len(self.times), int(np.max(self.targets)+--self.ignore_bright)))for i in range(+self.ignore_bright,int(np.max(self.targets)+)):tval = np.sum(self.postcard[:,self.targets == i], axis=)tar_vals[:,i--self.ignore_bright] = tval for i in range(len(self.obs_filenames)):if np.max(tar_vals[i]) < min_flux:tar_vals[self.qs == self.qs[i]] = all_tar = np.zeros((len(self.times), int(np.max(self.targets)-self.ignore_bright)))all_tar[:,] = numerall_tar[:,:] = tar_valsself.photometry_array = all_tarfor i in range(len(tar_vals[])):for j in range():g = np.where(self.qs == j)[] tar_vals[g,i] /= (np.median(tar_vals[g,i])+)tar_vals_old = tar_vals + for i in range(outlier_iterations):nonzeros = np.where(tar_vals[,:] != )[]med = np.median(tar_vals[:,nonzeros], axis=)std = np.std(tar_vals[:,nonzeros], axis=)if np.sum(tar_vals) != :tar_vals_old = tar_vals + for k in range(len(tar_vals[])):h = np.where((np.abs(med-tar_vals[:,k])/std) > outlier_limit)[]if len(h) >= max_outlier_obs:tar_vals[:,k] = if np.sum(tar_vals) == :tar_vals = tar_vals_old + denom = np.sum(tar_vals, axis=)self.target_flux_pixels = numer_pixself.reference_flux = denom", "docstring": "Determine the suitable reference stars, and then the total flux in those stars and \nin the target star in each epoch\n\nArgs: \n min_flux: The size of the region around the target star to be plotted. Images will be a square \n with side length ``image_region`` (default: ``5000``)\n outlier_iterations: The number of iterations to remove outliers from the reference star sample\n (stars at epochs with more than ``max_outlier_obs`` observations more than ``outlier_limit`` standard\n deviations from the median value for all stars after normalization) (default: ``5``)\n max_outlier_obs: The maximum number of epochs at which a star is allowed to be more than ``outlier_limit``\n standard deviations from the median value for all stars before it is removed as a suitable\n reference star (default: ``4``)\n outlier_limit: The level of deviation (measured in standard deviations) which a target is allowed\n to be discrepant from the median. If it is this discrepant at more than ``max_outlier_obs``\n epochs, it is removed from consideration (default: ``1.7``)", "id": "f14117:c0:m10"} {"signature": "def calc_centroids(self):", "body": "self.cm = np.zeros((len(self.postcard), ))for i in range(len(self.postcard)):target = self.postcard[i]target[self.targets != ] = self.cm[i] = center_of_mass(target)", "docstring": "Identify the centroid positions for the target star at all epochs. Useful for verifying that there is\nno correlation between flux and position, as might be expected for high proper motion stars.", "id": "f14117:c0:m11"} {"signature": "def define_spotsignal(self):", "body": "client = kplr.API()star = client.star(self.kic)lcs = star.get_light_curves(short_cadence=False)time, flux, ferr, qual = [], [], [], []for lc in lcs:with lc.open() as f:hdu_data = f[].datatime.append(hdu_data[\"\"])flux.append(hdu_data[\"\"])ferr.append(hdu_data[\"\"])qual.append(hdu_data[\"\"])tout = np.array([])fout = np.array([])eout = np.array([])for i in range(len(flux)):t = time[i][qual[i] == ]f = flux[i][qual[i] == ]e = ferr[i][qual[i] == ]t = t[np.isfinite(f)]e = e[np.isfinite(f)]f = f[np.isfinite(f)]e /= np.median(f)f /= np.median(f)tout = np.append(tout, t[:]+)fout = np.append(fout, f[:])eout = np.append(eout, e[:])self.spot_signal = np.zeros()for i in range(len(self.times)):if self.times[i] < :self.spot_signal[i] = else:self.spot_signal[i] = fout[np.abs(self.times[i] - tout) == np.min(np.abs(self.times[i] - tout))]", "docstring": "Identify the \"expected\" flux value at the time of each observation based on the \nKepler long-cadence data, to ensure variations observed are not the effects of a single\nlarge starspot. Only works if the target star was targeted for long or short cadence\nobservations during the primary mission.", "id": "f14117:c0:m12"} {"signature": "def model_uncert(self):", "body": "Y = self.photometry_array.TY /= np.median(Y, axis=)[:, None]C = np.median(Y, axis=)nstars, nobs = np.shape(Y)Z = np.empty((nstars, ))qs = self.qs.astype(int)for s in range():Z[:, s] = np.median((Y / C)[:, qs == s], axis=)resid2 = (Y - Z[:, qs] * C)**z = Z[:, qs]trend = z * C[None, :]lnS = np.log(np.nanmedian(resid2, axis=))jitter = np.log(*np.nanmedian(np.abs(np.diff(Y, axis=))))cal_ferr = np.sqrt(np.exp(*(jitter/trend))+z***np.exp(lnS)[None, :])self.modeled_uncert = cal_ferrself.target_uncert = cal_ferr[]", "docstring": "Estimate the photometric uncertainties on each data point following Equation A.2 of The Paper.\nBased on the kepcal package of Dan Foreman-Mackey.", "id": "f14117:c0:m13"} {"signature": "def multinest(parameter_names, transform, loglikelihood, output_basename, **problem):", "body": "import numpyfrom numpy import log, expimport pymultinestparameters = parameter_namesn_params = len(parameters)def myprior(cube, ndim, nparams):params = transform([cube[i] for i in range(ndim)])for i in range(ndim):cube[i] = params[i]def myloglike(cube, ndim, nparams):l = loglikelihood([cube[i] for i in range(ndim)])return lmn_args = dict(importance_nested_sampling = False, outputfiles_basename = output_basename,resume = problem.get('', False), verbose = True,n_live_points = problem.get('', ),const_efficiency_mode = False)if '' in problem:mn_args[''] = problem['']pymultinest.run(myloglike, myprior, n_params, **mn_args)import jsonwith file('' % output_basename, '') as f:json.dump(parameters, f, indent=)a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename = output_basename)s = a.get_stats()with open('' % a.outputfiles_basename, mode='') as f:json.dump(s, f, indent=)chain = a.get_equal_weighted_posterior()[:,:-]lower = [m[''][] for m in s['']]upper = [m[''][] for m in s['']]stdev = (numpy.array(upper) - numpy.array(lower)) / center = [m[''] for m in s['']]data = numpy.loadtxt('' % output_basename)i = data[:,-].argmax()final = data[i,:-] return dict(start=final, chain=chain,stdev=stdev, upper=upper, lower=lower,method='')", "docstring": "**MultiNest Nested Sampling**\n\nvia `PyMultiNest `_.\n\n:param parameter_names: name of parameters; not directly used here, \n but for multinest_marginal.py plotting tool.", "id": "f14121:m0"} {"signature": "def optimize(function, x0, cons=[], ftol=, disp=, plot=False):", "body": "if disp > :print()print('')print()print('', function, '', x0)points = []values = []def recordfunction(x):v = function(x)points.append(x)values.append(v)return v(a, b, c), (va, vb, vc) = seek_minimum_bracket(recordfunction, x0, cons=cons, ftol=ftol, disp=disp, plot=plot)if disp > :print('')print('' % len(points), (a, b, c), (va, vb, vc))if disp > :if plot:plot_values(values, points, lastpoint=-, ftol=ftol)pause()result = brent(recordfunction, a, b, c, va, vb, vc, cons=cons, ftol=ftol, disp=disp, plot=plot)if disp > :print('')print('' % len(points), result)if disp > or len(points) > :if plot:plot_values(values, points, lastpoint=-, ftol=ftol)if disp > :pause()if disp > :print('')print()print('')print()global nevalneval += len(points)return result", "docstring": "**Optimization method based on Brent's method**\n\nFirst, a bracket (a b c) is sought that contains the minimum (b value is \nsmaller than both a or c).\n\nThe bracket is then recursively halfed. Here we apply some modifications\nto ensure our suggested point is not too close to either a or c,\nbecause that could be problematic with the local approximation.\nAlso, if the bracket does not seem to include the minimum,\nit is expanded generously in the right direction until it covers it.\n\nThus, this function is fail safe, and will always find a local minimum.", "id": "f14122:m7"} {"signature": "def cache2errors(function, cache, disp=, ftol=):", "body": "vals = numpy.array(sorted(cache, key=lambda x: x[]))if disp > : print('', vals)vi = vals[:,].min()def renormedfunc(x):y = function(x)cache.append([x, y])if disp > : print('', x, y, y - (vi + ))return y - (vi + )vals[:,] -= vi + lowmask = vals[:,] < highmask = vals[:,] > indices = numpy.arange(len(vals))b, vb = vals[indices[lowmask][ ],:]c, vc = vals[indices[lowmask][-],:]if any(vals[:,][highmask] < b):if disp > : print('')a, va = vals[indices[highmask][vals[:,][highmask] < b][-],:]else:a = bva = vbwhile b > -:a = b - max(vals[-,] - vals[,], )va = renormedfunc(a)if disp > : print('' % (b, vb, a, va))if va > :if disp > : print('')breakelse:b = avb = vaif disp > : print('', a, b, va, vb)if va > and vb < :leftroot = scipy.optimize.brentq(renormedfunc, a, b, rtol=ftol)else:if disp > : print('')leftroot = aif disp > : print('', leftroot)if any(vals[:,][highmask] > c):if disp > : print('')d, vd = vals[indices[highmask][vals[:,][highmask] > c][ ],:]else:d = cvd = vcwhile c < :d = c + max(vals[-,] - vals[,], )vd = renormedfunc(d)if disp > : print('' % (c, vc, d, vd))if vd > :if disp > : print('')breakelse:c = dvc = vdif disp > : print('', c, d, vc, vd)if vd > and vc < :rightroot = scipy.optimize.brentq(renormedfunc, c, d, rtol=ftol)else:if disp > : print('')rightroot = dif disp > : print('', rightroot)assert leftroot < rightrootif disp > :fullvals = numpy.array(sorted(cache, key=lambda x: x[]))fullvals[:,] -= vi + plt.figure()plt.plot(fullvals[:,], fullvals[:,], '')plt.plot(vals[:,], vals[:,], '')plt.xlim(a, d)plt.ylim(min(va, vb, vc, vd), max(va, vb, vc, vd))ymin, ymax = plt.ylim()plt.vlines([leftroot, rightroot], ymin, ymax, linestyles='')plt.savefig('')return leftroot, rightroot", "docstring": "This function will attempt to identify 1 sigma errors, assuming your\nfunction is a chi^2. For this, the 1-sigma is bracketed.\n\nIf you were smart enough to build a cache list of [x,y] into your function,\nyou can pass it here. The values bracketing 1 sigma will be used as \nstarting values.\nIf no such values exist, e.g. because all values were very close to the \noptimum (good starting values), the bracket is expanded.", "id": "f14122:m8"} {"signature": "def de(output_basename, parameter_names, transform, loglikelihood, prior, nsteps=, vizfunc=None, printfunc=None, **problem):", "body": "import jsonimport inspyredimport randomprng = random.Random()if '' in problem:prng.seed(problem[''])n_params = len(parameter_names)seeds = problem.get('', [])if '' in problem:seeds.append(problem[''])prefix = output_basenamedef viz(candidate, args):if vizfunc is not None:vizfunc(candidate)def print_candidate(candidate, l, args):if printfunc is not None:printfunc(cube=candidate, loglikelihood=l)else:print(l, candidate)def eval_candidate(candidate):params = transform(candidate)l = loglikelihood(params)p = prior(params)if numpy.isinf(p) and p < :print('')return -if numpy.isnan(l):return -return l, p@inspyred.ec.utilities.memoize@inspyred.ec.evaluators.evaluatordef fitness(candidate, args):l, p = eval_candidate(candidate)return (l + p)cutoff_store = def solution_archiver(random, population, archive, args):psize = len(population)population.sort(reverse=True)best = population[].fitnessall_candidates = sorted(population + archive, reverse=True)all_fitness = numpy.array([c.fitness for c in all_candidates])mask = best - all_fitness > cutoff_store / if mask.sum() < :mask = best - all_fitness > cutoff_storenewarchive = [c for i, c in enumerate(all_candidates) if i == or all_fitness[i - ] != c.fitness]print('', len(archive), len(newarchive))json.dump([{'': [float(f) for f in c.candidate], '':c.fitness} for c in newarchive], open(prefix + '', ''), indent=)return newarchivedef observer(population, num_generations, num_evaluations, args):population.sort(reverse=True)candidate = population[]print((''.format(num_evaluations)), '', end='') print_candidate(candidate.candidate, candidate.fitness, args)if num_evaluations % len(population) == or num_evaluations < len(population) or args.get('', False):viz(candidate.candidate, args)def generator(random, args): u = [random.uniform(, ) for _ in range(n_params)]u = [random.gauss(, ) for _ in range(n_params)]return bounder(u, args)ea = inspyred.ec.DEA(prng)ea.terminator = inspyred.ec.terminators.evaluation_terminationea.archiver = solution_archiverbounder = inspyred.ec.Bounder(lower_bound=, upper_bound=-)import copyfrom math import log@inspyred.ec.variators.mutatordef double_exponential_mutation(random, candidate, args):mut_rate = args.setdefault('', )mean = args.setdefault('', )stdev = args.setdefault('', )scale = log() / - (stdev)bounder = args[''].boundermutant = copy.copy(candidate)for i, m in enumerate(mutant):dice = random.random()if dice < mut_rate:sign = (dice < mut_rate / ) * - delta = -log(random.random()) / scalemutant[i] += delta * signmutant = bounder(mutant, args)return mutantdef minute_gaussian_mutation(random, candidates, args):args = dict(args)args[''] = args[''] = return inspyred.ec.variators.gaussian_mutation(random, candidates, args)ea.variator = [inspyred.ec.variators.n_point_crossover, inspyred.ec.variators.gaussian_mutation, minute_gaussian_mutation]ea.replacer = inspyred.ec.replacers.steady_state_replacementea.observer = observerpop_size = final_pop = ea.evolve(pop_size=pop_size, max_evaluations=nsteps, maximize=True, seeds=seeds, gaussian_stdev=, bounder=bounder, generator=generator, evaluator=fitness,)best = max(final_pop)seeds = [c.candidate for c in ea.archive]print('', best)return {'': best.candidate, '': best.fitness,'': seeds, '': ''}", "docstring": "**Differential evolution**\n\nvia `inspyred `_\n\nspecially tuned. steady state replacement, n-point crossover, \n pop size 20, gaussian mutation noise 0.01 & 1e-6.\nstores intermediate results (can be used for resume, see seeds)\n\n:param start: start point\n:param seeds: list of start points\n:param vizfunc: callback to do visualization of current best solution\n:param printfunc: callback to summarize current best solution\n:param seed: RNG initialization (if set)", "id": "f14123:m0"} {"signature": "def mcmc_advance(start, stdevs, logp, nsteps = , adapt=True, callback=None):", "body": "import scipyfrom numpy import logimport progressbarprob = logp(start)chain = [start]accepts = [True]probs = [prob]assert not numpy.isnan(start).any()assert not numpy.isnan(stdevs).any()i = widgets=['', progressbar.Percentage(), progressbar.Counter(''),progressbar.Bar(), progressbar.ETA()]pbar = progressbar.ProgressBar(widgets=widgets,maxval=nsteps).start()prev = startprev_prob = probprint('', prob)stepchange = while len(chain) < nsteps:i = i + next = scipy.random.normal(prev, stdevs)next[next > ] = next[next < ] = next_prob = logp(next)assert not numpy.isnan(next).any()assert not numpy.isnan(next_prob).any()delta = next_prob - prev_probdice = log(scipy.random.uniform(, ))accept = delta > diceif accept:prev = nextprev_prob = next_probif adapt: stdevs *= ( + stepchange)else:if adapt: stdevs *= ( + stepchange)**(-) if callback: callback(prev_prob, prev, accept)chain.append(prev)accepts.append(accept)probs.append(prev_prob)if adapt: stepchange = min(, / i)widgets[] = '' % numpy.mean(numpy.array(accepts[len(accepts)/:])+)pbar.update(pbar.currval + )pbar.finish()return chain, probs, accepts, stdevs", "docstring": "Generic Metropolis MCMC. Advances the chain by nsteps.\nCalled by :func:`mcmc`\n\n:param adapt: enables adaptive stepwidth alteration (converges).", "id": "f14125:m0"} {"signature": "def mcmc(transform, loglikelihood, parameter_names, nsteps=, nburn=, stdevs=, start = , **problem):", "body": "if '' in problem:numpy.random.seed(problem[''])n_params = len(parameter_names)def like(cube):cube = numpy.array(cube)if (cube <= ).any() or (cube >= -).any():return -params = transform(cube)return loglikelihood(params)start = start + numpy.zeros(n_params)stdevs = stdevs + numpy.zeros(n_params)def compute_stepwidths(chain):return numpy.std(chain, axis=) / import matplotlib.pyplot as pltplt.figure(figsize=(, ))steps = numpy.array([]*(n_params))print('')chain, prob, _, steps_ = mcmc_advance(start, steps, like, nsteps=nburn / , adapt=True)steps = compute_stepwidths(chain)print('')chain, prob, _, steps_ = mcmc_advance(chain[-], steps, like, nsteps=nburn / , adapt=True)steps = compute_stepwidths(chain)print('')chain, prob, _, steps_ = mcmc_advance(chain[-], steps, like, nsteps=nsteps)chain = numpy.array(chain)i = numpy.argmax(prob)final = chain[-]print('')chain = numpy.array([transform(params) for params in chain])return dict(start=chain[-], maximum=chain[i], seeds=[final, chain[i]], chain=chain, method='')", "docstring": "**Metropolis Hastings MCMC**\n\nwith automatic step width adaption. \nBurnin period is also used to guess steps.\n\n:param nburn: number of burnin steps\n:param stdevs: step widths to start with", "id": "f14125:m1"} {"signature": "def ensemble(transform, loglikelihood, parameter_names, nsteps=, nburn=, start=, **problem):", "body": "import emceeimport progressbarif '' in problem:numpy.random.seed(problem[''])n_params = len(parameter_names)nwalkers = + n_params * if nwalkers > :nwalkers = p0 = [numpy.random.rand(n_params) for i in range(nwalkers)]start = start + numpy.zeros(n_params)p0[] = startdef like(cube):cube = numpy.array(cube)if (cube <= ).any() or (cube >= -).any():return -params = transform(cube)return loglikelihood(params)sampler = emcee.EnsembleSampler(nwalkers, n_params, like,live_dangerously=True)print('')pos, prob, state = sampler.run_mcmc(p0, nburn / nwalkers)sampler.reset()print('')pbar = progressbar.ProgressBar(widgets=[progressbar.Percentage(), progressbar.Counter(''),progressbar.Bar(), progressbar.ETA()],maxval=nsteps).start()for results in sampler.sample(pos, iterations=nsteps / nwalkers, rstate0=state):pbar.update(pbar.currval + )pbar.finish()print(\"\", numpy.mean(sampler.acceptance_fraction))chain = sampler.flatchainfinal = chain[-]print('')chain_post = numpy.array([transform(params) for params in chain])chain_prob = sampler.flatlnprobabilityreturn dict(start=final, chain=chain_post, chain_prior=chain,chain_prob=chain_prob,method='')", "docstring": "**Ensemble MCMC**\n\nvia `emcee `_", "id": "f14125:m2"} {"signature": "def classical(transform, loglikelihood, parameter_names, prior, start = , ftol=, disp=, nsteps=,method='', **args):", "body": "import scipy.optimizen_params = len(parameter_names)def minfunc(params):l = loglikelihood(params)p = prior(params)if numpy.isinf(p) and p < :print('')return -if numpy.isnan(l):return -return -l - pdef minfunc_cube(cube):cube = numpy.array(cube)if (cube <= ).any() or (cube >= -).any():return params = transform(cube)l = loglikelihood(params)p = prior(params)if numpy.isinf(p) and p < :print('')return -if numpy.isnan(l):return -return -l - pstart = start + numpy.zeros(n_params)ret = {}if method == '':final, value, _niter, neval, warnflag = scipy.optimize.fmin(minfunc_cube, start, ftol=ftol, disp=disp, maxfun=nsteps, full_output=True)elif method == '':cons = [lambda params: params[i] for i in range(n_params)]cons += [lambda params: - params[i] for i in range(n_params)]final = scipy.optimize.fmin_cobyla(minfunc_cube, start, cons, rhoend=ftol / , disp=disp, maxfun=nsteps)neval = nstepselif method == '' or method == '':\"\"\"\"\"\"s = ''.join(parameter_names)s = \"\"\"\"\"\" % (s, s)if method == '':f = eval(s, dict(minfunc=minfunc, numpy=numpy))start = transform(start)else:f = eval(s, dict(minfunc=minfunc_cube, numpy=numpy))import minuitm = minuit.Minuit(f)for i, p in enumerate(parameter_names):m.values[p] = start[i]if method == '':m.limits[p] = (, - )m.up = m.tol = ftol * m.printMode = dispif method == '':m.migrad()elif method == '':m.hesse()final = [m.values[p] for p in parameter_names]neval = m.ncallserrors = [m.errors[p] for p in parameter_names]if method == '':c0 = finalp0 = transform(c0)stdev = numpy.zeros(n_params)lower = numpy.zeros(n_params)upper = numpy.zeros(n_params)for i, w in enumerate(errors):c1 = numpy.copy(c0)c1[i] -= wc2 = numpy.copy(c0)c2[i] += wp1 = transform(c1)p2 = transform(c2)stdev[i] = numpy.abs(p2[i] - p1[i]) / lower[i] = min(p2[i], p1[i])upper[i] = max(p2[i], p1[i])ret[''] = stdevret[''] = upperret[''] = lowerelif method == '':ret[''] = errorsret[''] = numpy.matrix([[m.covariance[(a, b)] for b in parameter_names] for a in parameter_names])else:from openopt import NLPlo = [] * n_paramshi = [-] * n_paramsiprint = if disp == else if disp == else p = NLP(f=minfunc_cube, x0=start, lb=lo, ub=hi,maxFunEvals=nsteps, ftol=ftol, iprint=iprint)r = p.solve(method)final = r.xfneval = r.evals['']ret.update(dict(start=final, maximum=transform(final), method=method, neval=neval))return ret", "docstring": "**Classic optimization methods**\n\n:param start: start position vector (before transform)\n:param ftol: accuracy required to stop at optimum\n:param disp: verbosity\n:param nsteps: number of steps\n:param method: string\n neldermead, cobyla (via `scipy.optimize `_)\n bobyqa, ralg, algencan, ipopt, mma, auglag and many others from the OpenOpt framework (via `openopt.NLP `_)\n minuit (via `PyMinuit `_)", "id": "f14126:m0"} {"signature": "def onebyone(transform, loglikelihood, parameter_names, prior, start = , ftol=, disp=, nsteps=,parallel=False, find_uncertainties=False, **args):", "body": "def minfunc(cube):cube = numpy.array(cube)if (cube <= ).any() or (cube >= -).any():return params = transform(cube)l = loglikelihood(params)p = prior(params)if numpy.isinf(p) and p < :print('')return -if numpy.isnan(l):return -return -l - pif parallel:func = opt_grid_parallelelse:func = opt_gridn_params = len(parameter_names)start = start + numpy.zeros(n_params)ret = func(start, minfunc, [(, -)] * n_params, ftol=ftol, disp=disp, compute_errors=find_uncertainties)if find_uncertainties:c0 = ret[]p0 = transform(c0)stdev = numpy.zeros(n_params)lower = numpy.zeros(n_params)upper = numpy.zeros(n_params)for i, (lo, hi) in enumerate(ret[]):c1 = numpy.copy(c0)c1[i] = loc2 = numpy.copy(c0)c2[i] = hip1 = transform(c1)p2 = transform(c2)stdev[i] = numpy.abs(p2[i] - p1[i]) / lower[i] = min(p2[i], p1[i])upper[i] = max(p2[i], p1[i])return dict(start=ret[], maximum=p0,stdev=stdev, upper=upper, lower=lower,method='')else:return dict(start=ret, maximum=transform(ret), method='')", "docstring": "**Convex optimization based on Brent's method**\n\nA strict assumption of one optimum between the parameter limits is used.\nThe bounds are narrowed until it is found, i.e. the likelihood function is flat\nwithin the bounds.\n* If optimum outside bracket, expands bracket until contained.\n* Thus guaranteed to return local optimum.\n* Supports parallelization (multiple parameters are treated independently)\n* Supports finding ML uncertainties (Delta-Chi^2=1)\n\nVery useful for 1-3d problems.\nOtherwise useful, reproducible/deterministic algorithm for finding the minimum in \nwell-behaved likelihoods, where the parameters are weakly independent,\nor to find a good starting point. \nOptimizes each parameter in order, assuming they are largely independent.\n\nFor 1-dimensional algorithm used, see :func:`jbopt.opt_grid`\n\n:param ftol: difference in values at which the function can be considered flat\n:param compute_errors: compute standard deviation of gaussian around optimum", "id": "f14126:m1"} {"signature": "def opt_normalizations(params, func, limits, abandon_threshold=, noimprovement_threshold=,disp=):", "body": "newparams = numpy.copy(params)lower = [lo for lo, hi in limits]upper = [hi for lo, hi in limits]for i, p in enumerate(params):startval = pbeststat = func(newparams)bestval = startvalif disp > :print('' % (startval, beststat))go_up = Truego_down = Truefor n in list(**numpy.arange(, )) + [None] + list(**numpy.arange(, )):if n is None:startval = bestvalif disp > :print('' % (startval))go_up = Truego_down = Truecontinueif go_up and startval * n > upper[i]:if disp > :print('' %(startval, n, upper[i]))go_up = Falseif go_down and startval / n < lower[i]:if disp > :print('' %(startval, n, lower[i]))go_down = Falseif go_up:if disp > :print('' % (startval * n))newparams[i] = startval * nnewstat = func(newparams)if disp > :print('' % (newparams[i], newstat))if newstat <= beststat:bestval = newparams[i]beststat = newstatif disp > :print('' % newparams[i])if newstat > beststat + abandon_threshold:go_up = Falseif go_down:if disp > :print('' % (startval / n))newparams[i] = startval / nnewstat = func(newparams)if disp > :print('' % (newparams[i], newstat))if newstat + noimprovement_threshold < beststat: bestval = newparams[i]beststat = newstatif disp > :print('' % newparams[i])if newstat > beststat + abandon_threshold:go_down = Falsenewparams[i] = bestvalprint('' % (i, newparams[i]))print('' % (beststat))return newparams", "docstring": "**optimization algorithm for scale variables (positive value of unknown magnitude)**\n\nEach parameter is a normalization of a feature, and its value is sought.\nThe parameters are handled in order (assumed to be independent), \nbut a second round can be run.\nVarious magnitudes of the normalization are tried. If the normalization converges\nto zero, the largest value yielding a comparable value is used.\n\nOptimizes each normalization parameter in rough steps \nusing multiples of 3 of start point\nto find reasonable starting values for another algorithm.\n\nparameters, minimization function, parameter space definition [(lo, hi) for i in params]\n\n:param abandon_threshold:\n if in one direction the function increases by this much over the best value, \n abort search in this direction\n:param noimprovement_threshold:\n when decreasing the normalization, if the function increases by less than \n this amount, abort search in this direction\n:param disp:\n verbosity", "id": "f14127:m0"} {"signature": "def opt_grid(params, func, limits, ftol=, disp=, compute_errors=True):", "body": "caches = [[] for p in params]newparams = numpy.copy(params)errors = [[] for p in params]for i, p in enumerate(params):cache = []def func1(x0):newparams[i] = x0v = func(newparams)cache.append([x0, v])return vlo, hi = limits[i]bestval = optimize(func1, x0=p,cons=[lambda x: x - lo, lambda x: hi - x],ftol=ftol, disp=disp - )beststat = func1(bestval)if compute_errors:errors[i] = cache2errors(func1, cache, disp=disp - )newparams[i] = bestvalcaches[i] = cacheif disp > :if compute_errors:print('' % (i, bestval, errors[i][], errors[i][], beststat))else:print('' %(i, bestval, beststat))beststat = func(newparams)if disp > :print('' % (beststat))if compute_errors:return newparams, errorselse:return newparams", "docstring": "see :func:`optimize1d.optimize`, considers each parameter in order\n\n:param ftol: \n difference in values at which the function can be considered flat\n:param compute_errors:\n compute standard deviation of gaussian around optimum", "id": "f14127:m1"} {"signature": "def opt_grid_parallel(params, func, limits, ftol=, disp=, compute_errors=True):", "body": "import multiprocessingdef spawn(f):def fun(q_in, q_out):while True:i, x = q_in.get()if i == None:breakq_out.put((i, f(x)))return fundef parmap(f, X, nprocs=multiprocessing.cpu_count()):q_in = multiprocessing.Queue()q_out = multiprocessing.Queue()proc = [multiprocessing.Process(target=spawn(f), args=(q_in, q_out)) for _ in range(nprocs)]for p in proc:p.daemon = Truep.start()sent = [q_in.put((i, x)) for i, x in enumerate(X)][q_in.put((None, None)) for _ in range(nprocs)]res = [q_out.get() for _ in range(len(sent))][p.join() for p in proc]return [x for i, x in sorted(res)]nthreads = multiprocessing.cpu_count()caches = [[] for p in params]newparams = numpy.copy(params)errors = [[] for p in params]indices = list(range(, len(params), nthreads))k = while k < len(params):j = min(len(params), k + nthreads * )def run1d(xxx_todo_changeme):(i, curparams, curlimits) = xxx_todo_changemecache = []def func1(x0):curparams[i] = x0v = func(curparams)cache.append([x0, v])return vlo, hi = curlimitsbestval = optimize(func1, x0=p,cons=[lambda x: x - lo, lambda x: hi - x],ftol=ftol, disp=disp - )beststat = func1(bestval)if compute_errors:errors = cache2errors(func1, cache, disp=disp - )return bestval, beststat, errors, cachereturn bestval, beststat, cacheresults = parmap(run1d, [(i, numpy.copy(newparams), limits[i]) for i in range(k, j)])for i, r in enumerate(results):if compute_errors:v, s, e, c = rif disp > :print('' % (i + k, v, e[], e[], s))else:v, s, c = re = []if disp > :print('' % (i + k, v, s))newparams[i + k] = vcaches[i + k] = cerrors[i + k] = ek = jbeststat = func(newparams)if disp > :print('' % (beststat))if compute_errors:return newparams, errorselse:return newparams", "docstring": "parallelized version of :func:`opt_grid`", "id": "f14127:m2"} {"signature": "def set_chromosomes(self, chromosomes=None):", "body": "if chromosomes and chromosomes in valid_chromosomes:self.chromosomes = chromosomeselse:self.chromosomes = random.choice([XX, XY])", "docstring": "This model uses the XY sex-determination system. Sex != gender.\n Assign either XX or XY randomly with a 50/50 chance of each, unless\n are passed as an argument.", "id": "f14134:c0:m1"} {"signature": "def set_gender(self, gender=None):", "body": "if gender and gender in genders:self.gender = genderelse:if not self.chromosomes: self.set_chromosomes()self.gender = npchoice(genders, , p=p_gender[self.chromosomes])[]", "docstring": "This model recognizes that sex chromosomes don't always line up with\n gender. Assign M, F, or NB according to the probabilities in p_gender.", "id": "f14134:c0:m2"} {"signature": "def set_inherited_traits(self, egg_donor, sperm_donor):", "body": "if type(egg_donor) == str:self.reproduce_asexually(egg_donor, sperm_donor)else:self.reproduce_sexually(egg_donor, sperm_donor)", "docstring": "Accept either strings or Gods as inputs.", "id": "f14134:c0:m3"} {"signature": "def reproduce_asexually(self, egg_word, sperm_word):", "body": "egg = self.generate_gamete(egg_word)sperm = self.generate_gamete(sperm_word)self.genome = list(set(egg + sperm)) self.generation = self.divinity = god", "docstring": "Produce two gametes, an egg and a sperm, from the input strings.\n Combine them to produce a genome a la sexual reproduction.", "id": "f14134:c0:m4"} {"signature": "def reproduce_sexually(self, egg_donor, sperm_donor):", "body": "egg_word = random.choice(egg_donor.genome)egg = self.generate_gamete(egg_word)sperm_word = random.choice(sperm_donor.genome)sperm = self.generate_gamete(sperm_word)self.genome = list(set(egg + sperm)) self.parents = [egg_donor.name, sperm_donor.name]self.generation = max(egg_donor.generation, sperm_donor.generation) + sum_ = egg_donor.divinity + sperm_donor.divinityself.divinity = int(npchoice(divinities, , p=p_divinity[sum_])[])", "docstring": "Produce two gametes, an egg and a sperm, from input Gods. Combine\n them to produce a genome a la sexual reproduction. Assign divinity\n according to probabilities in p_divinity. The more divine the parents,\n the more divine their offspring.", "id": "f14134:c0:m5"} {"signature": "def set_name(self):", "body": "if not self.gender: self.set_gender()name = ''if self.gender == female:name = names.female_names.pop()elif self.gender == male:name = names.male_names.pop()else:try:name = names.nb_names.pop()except:name = names.male_names.pop()self.name = name", "docstring": "Pick a random name from the lists loaded with the model. For Gods that\n identify as neither M nor F, the model attempts to retrieve an androgynous\n name. Note: not all of the scraped name lists contain androgynous names.", "id": "f14134:c0:m6"} {"signature": "def set_epithet(self):", "body": "if self.divinity == human:obsession = random.choice(self.genome)if self.gender == female:self.epithet = ''elif self.gender == male:self.epithet = ''else:self.epithet = ''self.epithet += '' + obsessionreturn if self.gender == female:title = ''elif self.gender == male:title = ''else:title = ''if self.divinity == demi_god:title = '' + title if self.gender == non_binary else '' + titlenum_domains = npchoice([,,,], , p=[, , , ])[]if num_domains == :template = ''if num_domains == :template = ''elif num_domains == :template = '' elif num_domains == :template = ''self.domains = [d.title() for d in random.sample(self.genome, num_domains)]self.epithet = template % (title, *self.domains)", "docstring": "Divine an appropriate epithet for this God. (See what I did there?)", "id": "f14134:c0:m7"} {"signature": "def generate_gamete(self, egg_or_sperm_word):", "body": "p_rate_of_mutation = [, ]should_use_mutant_pool = (npchoice([,], , p=p_rate_of_mutation)[] == )if should_use_mutant_pool:pool = tokens.secondary_tokenselse:pool = tokens.primary_tokensreturn get_matches(egg_or_sperm_word, pool, )", "docstring": "Extract 23 'chromosomes' aka words from 'gene pool' aka list of tokens\n by searching the list of tokens for words that are related to the given\n egg_or_sperm_word.", "id": "f14134:c0:m8"} {"signature": "def print_parents(self):", "body": "if self.gender == female:title = ''elif self.gender == male:title = ''else:title = ''p1 = self.parents[]p2 = self.parents[]template = ''print(template % (title, p1.name, p1.epithet, p2.name, p2.epithet))", "docstring": "Print parents' names and epithets.", "id": "f14134:c0:m9"} {"signature": "def send_birth_announcement(parent_a, parent_b, child):", "body": "print(\"\" % (child.name, child.epithet))", "docstring": "Convenience method for presentations.", "id": "f14135:m0"} {"signature": "def add_god(self, god):", "body": "self.gods[god.name] = god", "docstring": "Add a god to this Pantheon's gods dictionary.", "id": "f14135:c0:m1"} {"signature": "def get_god(self, name_of_god):", "body": "try:return self.gods[name_of_god]except:print('' % name_of_god)", "docstring": "Retrieve a god from this Pantheon's gods dictionary.", "id": "f14135:c0:m2"} {"signature": "def spawn(self, generations):", "body": "egg_donors = [god for god in self.gods.values() if god.chromosomes == '']sperm_donors = [god for god in self.gods.values() if god.chromosomes == '']for i in range(generations):print(\"\" % (i+))gen_xx = []gen_xy = []for egg_donor in egg_donors:sperm_donor = random.choice(sperm_donors)brood = self.breed(egg_donor, sperm_donor)for child in brood:if child.divinity > human:self.add_god(child)if child.chromosomes == '':gen_xx.append(child)else:gen_xy.append(child)egg_donors = [ed for ed in egg_donors if ed.generation > (i-)]sperm_donors = [sd for sd in sperm_donors if sd.generation > (i-)]egg_donors += gen_xxsperm_donors += gen_xy", "docstring": "Grow this Pantheon by multiplying Gods.", "id": "f14135:c0:m3"} {"signature": "def breed(self, egg_donor, sperm_donor):", "body": "offspring = []try:num_children = npchoice([,], , p=[, ])[] for _ in range(num_children):child = God(egg_donor, sperm_donor)offspring.append(child)send_birth_announcement(egg_donor, sperm_donor, child)except ValueError:print(\"\")return offspring", "docstring": "Get it on.", "id": "f14135:c0:m4"} {"signature": "def sequence_genes():", "body": "tokenize_corpora()", "docstring": "An alias.", "id": "f14136:m0"} {"signature": "def filter_genes(pool, filters):", "body": "save_tokens_to_dir(pool, filters)", "docstring": "An alias.", "id": "f14136:m1"} {"signature": "def define_gene_pool(pool, individuals):", "body": "make_tokens_dir(pool, individuals)", "docstring": "An alias.", "id": "f14136:m2"} {"signature": "def select_gene_pool(dir_):", "body": "set_token_lists(dir_)", "docstring": "An alias.", "id": "f14136:m3"} {"signature": "def list_gene_pools():", "body": "get_tokens_dirs()", "docstring": "An alias.", "id": "f14136:m4"} {"signature": "def tokenize_texts():", "body": "text_files = [fname for fname in os.listdir(corpora_dir)if fname.split('')[] == '']for text_fname in text_files:json_fname = text_fname.split('')[] + ''if os.path.isfile(corpora_dir + json_fname):continue print(\"\" + text_fname)text = open(corpora_dir + text_fname).read()words = nltk.word_tokenize(text)with open(corpora_dir + json_fname, '') as outjson:json.dump(words, outjson)", "docstring": "Generate a json file for each txt file in the /data/corpora directory.", "id": "f14136:m5"} {"signature": "def list_tokenized_texts():", "body": "return [f for f in os.listdir(corpora_dir) if f.split('')[] == '']", "docstring": "Retrieve the filenames of all tokenized text files in /data/corpora.\n Useful when you want to hand-pick for make_tokens_dir().", "id": "f14136:m6"} {"signature": "def make_tokens_dir(dir_, sources):", "body": "os.mkdir(tokens_dir + dir_)for source in sources:if not os.path.isfile(corpora_dir + source):print('' + source)returnwith open(tokens_dir + dir_ + '', '') as outjson:json.dump(sources, outjson)", "docstring": "Create a new directory named . Create a new file within it called\n sources.json. The input is a list of names of tokenized texts.\n Write into sources.json.", "id": "f14136:m7"} {"signature": "def make_tokens_list(dir_, filters):", "body": "with open(tokens_dir + dir_ + '', '') as injson:data = json.load(injson)sources = [corpora_dir + fname for fname in data]with open('', '') as f:skipwords = [line.rstrip() for line in f]tokens_list = []for fname in sources:print(\"\" + fname)with open(fname, '') as injson:data = json.load(injson)words = [w.lower() for w in data if not w == '']filtered = [w for w,p in nltk.pos_tag(words) if p in filters]sanitized = [w for w in filtered if not w in skipwords]tokens_list += sanitizedtokens_list = list(set(tokens_list)) target = tokens_dir + dir_ + '' + ''.join(filters) + ''with open(target, '') as outjson:json.dump(tokens_list, outjson)", "docstring": "Find sources.json in . It contains a list of tokenized texts. For\n each tokenized text listed in sources.json, read its tokens, filter them,\n and add them to an aggregated list. Write the aggregated list to disk using\n a filename based on the given.", "id": "f14136:m8"} {"signature": "def get_matches(word, tokens, limit, offset=):", "body": "return closest(tokens, word_vec(word), limit, offset)", "docstring": "Return words from that are most closely related to .", "id": "f14137:m0"} {"signature": "def word_vec(word):", "body": "return nlp.vocab[word].vector", "docstring": "Return spaCy's vector for .", "id": "f14137:m1"} {"signature": "def cosine(vec1, vec2):", "body": "if norm(vec1) > and norm(vec2) > :return dot(vec1, vec2) / (norm(vec1) * norm(vec2))else:return ", "docstring": "Compare vectors. Borrowed from A. Parish.", "id": "f14137:m2"} {"signature": "def closest(tokens, search_vec, limit, offset=):", "body": "return sorted(tokens,key=lambda x: cosine(search_vec, word_vec(x)),reverse=True)[offset:offset+limit]", "docstring": "Return the words from whose vectors most closely\n resemble the search_vec. Skip the first results.", "id": "f14137:m3"} {"signature": "def set_name_lists(ethnicity=None):", "body": "if not ethnicity: ethnicity = random.choice(get_ethnicities())print(\"\" + ethnicity)filename = names_dir + ethnicity + ''try:with open(filename, '') as injson:data = json.load(injson)except:return '' + filenameelse:names = [ tuple(name.split('')) for name in data ]random.shuffle(names)global female_namesfemale_names = [name for name,gender,*desc in names if gender == '']global male_namesmale_names = [name for name,gender,*desc in names if gender == '']global nb_namesnb_names = [name for name,gender,*desc in names if gender == '']", "docstring": "Set three globally available lists of names.", "id": "f14138:m0"} {"signature": "def get_ethnicities():", "body": "ethnicities = [ fname.split('')[] for fname in os.listdir(names_dir) ]return ethnicities", "docstring": "Retrieve a list of the ethnicities for which name data was scraped.\n Exclude the file extension for human friendliness.", "id": "f14138:m1"} {"signature": "def print_ethnicities():", "body": "print('')print(''.join(get_ethnicities()))print(\"\")", "docstring": "Print a list of the ethnicities for which name data was scraped.", "id": "f14138:m2"} {"signature": "def get_finders():", "body": "if hasattr(settings, ''):finders = settings.MEDIA_FIXTURES_FILES_FINDERSelse:finders = ('',)for finder_path in finders:yield get_finder(finder_path)", "docstring": "Set the media fixtures finders on settings.py. \nExample:\n MEDIA_FIXTURES_FILES_FINDERS = (\n 'django_media_fixtures.finders.FileSystemFinder',\n 'django_media_fixtures.finders.AppDirectoriesFinder', # default\n )", "id": "f14149:m0"} {"signature": "@lru_cache.lru_cache(maxsize=None)def get_finder(import_path):", "body": "Finder = import_string(import_path)if not issubclass(Finder, BaseFinder):raise ImproperlyConfigured('' %(Finder, BaseFinder))return Finder()", "docstring": "Imports the media fixtures files finder class described by import_path, where\nimport_path is the full Python path to the class.", "id": "f14149:m1"} {"signature": "def find(self, path, all=False):", "body": "matches = []for prefix, root in self.locations:if root not in searched_locations:searched_locations.append(root)matched_path = self.find_location(root, path, prefix)if matched_path:if not all:return matched_pathmatches.append(matched_path)return matches", "docstring": "Looks for files in the extra locations\nas defined in ``MEDIA_FIXTURES_FILES_DIRS``.", "id": "f14149:c0:m1"} {"signature": "def find_location(self, root, path, prefix=None):", "body": "if prefix:prefix = '' % (prefix, os.sep)if not path.startswith(prefix):return Nonepath = path[len(prefix):]path = safe_join(root, path)if os.path.exists(path):return path", "docstring": "Finds a requested media file in a location, returning the found\nabsolute path (or ``None`` if no match).", "id": "f14149:c0:m2"} {"signature": "def list(self, ignore_patterns):", "body": "for prefix, root in self.locations:storage = self.storages[root]for path in utils.get_files(storage, ignore_patterns):yield path, storage", "docstring": "List all files in all locations.", "id": "f14149:c0:m3"} {"signature": "def list(self, ignore_patterns):", "body": "for storage in six.itervalues(self.storages):if storage.exists(''): for path in utils.get_files(storage, ignore_patterns):yield path, storage", "docstring": "List all files in all app storages.", "id": "f14149:c1:m1"} {"signature": "def find(self, path, all=False):", "body": "matches = []for app in self.apps:app_location = self.storages[app].locationif app_location not in searched_locations:searched_locations.append(app_location)match = self.find_in_app(app, path)if match:if not all:return matchmatches.append(match)return matches", "docstring": "Looks for files in the app directories.", "id": "f14149:c1:m2"} {"signature": "def find_in_app(self, app, path):", "body": "storage = self.storages.get(app, None)if storage:if storage.exists(path):matched_path = storage.path(path)if matched_path:return matched_path", "docstring": "Find a requested media file in an app's media fixtures locations.", "id": "f14149:c1:m3"} {"signature": "def set_options(self, **options):", "body": "self.interactive = options['']self.verbosity = options['']self.symlink = options['']self.clear = options['']self.dry_run = options['']ignore_patterns = options['']if options['']:ignore_patterns += ['', '', '']self.ignore_patterns = list(set(ignore_patterns))self.post_process = options['']", "docstring": "Set instance variables based on an options dict", "id": "f14150:c0:m2"} {"signature": "def collect(self):", "body": "if self.symlink and not self.local:raise CommandError(\"\")if self.clear:self.clear_dir('')if self.symlink:handler = self.link_fileelse:handler = self.copy_filefound_files = OrderedDict()for finder in get_finders():for path, storage in finder.list(self.ignore_patterns):if getattr(storage, '', None):prefixed_path = os.path.join(storage.prefix, path)else:prefixed_path = pathif prefixed_path not in found_files:found_files[prefixed_path] = (storage, path)handler(path, prefixed_path, storage)if self.post_process and hasattr(self.storage, ''):processor = self.storage.post_process(found_files,dry_run=self.dry_run)for original_path, processed_path, processed in processor:if isinstance(processed, Exception):self.stderr.write(\"\" % original_path)self.stderr.write(\"\")raise processedif processed:self.log(\"\" %(original_path, processed_path), level=)self.post_processed_files.append(original_path)else:self.log(\"\" % original_path)return {'': self.copied_files + self.symlinked_files,'': self.unmodified_files,'': self.post_processed_files,}", "docstring": "Perform the bulk of the work of collectmedia.\n\nSplit off from handle() to facilitate testing.", "id": "f14150:c0:m3"} {"signature": "def log(self, msg, level=):", "body": "if self.verbosity >= level:self.stdout.write(msg)", "docstring": "Small log helper", "id": "f14150:c0:m5"} {"signature": "def clear_dir(self, path):", "body": "dirs, files = self.storage.listdir(path)for f in files:fpath = os.path.join(path, f)if self.dry_run:self.log(\"\" %smart_text(fpath), level=)else:self.log(\"\" % smart_text(fpath), level=)self.storage.delete(fpath)for d in dirs:self.clear_dir(os.path.join(path, d))", "docstring": "Deletes the given relative path using the destination storage backend.", "id": "f14150:c0:m7"} {"signature": "def delete_file(self, path, prefixed_path, source_storage):", "body": "if self.storage.exists(prefixed_path):try:target_last_modified =self.storage.modified_time(prefixed_path)except (OSError, NotImplementedError, AttributeError):passelse:try:source_last_modified = source_storage.modified_time(path)except (OSError, NotImplementedError, AttributeError):passelse:if self.local:full_path = self.storage.path(prefixed_path)else:full_path = Noneif (target_last_modified.replace(microsecond=)>= source_last_modified.replace(microsecond=)):if not ((self.symlink and full_pathand not os.path.islink(full_path)) or(not self.symlink and full_pathand os.path.islink(full_path))):if prefixed_path not in self.unmodified_files:self.unmodified_files.append(prefixed_path)self.log(\"\" % path)return Falseif self.dry_run:self.log(\"\" % path)else:self.log(\"\" % path)self.storage.delete(prefixed_path)return True", "docstring": "Checks if the target file should be deleted if it already exists", "id": "f14150:c0:m8"} {"signature": "def link_file(self, path, prefixed_path, source_storage):", "body": "if prefixed_path in self.symlinked_files:return self.log(\"\" % path)if not self.delete_file(path, prefixed_path, source_storage):returnsource_path = source_storage.path(path)if self.dry_run:self.log(\"\" % source_path, level=)else:self.log(\"\" % source_path, level=)full_path = self.storage.path(prefixed_path)try:os.makedirs(os.path.dirname(full_path))except OSError:passtry:if os.path.lexists(full_path):os.unlink(full_path)os.symlink(source_path, full_path)except AttributeError:import platformraise CommandError(\"\" %platform.python_version())except NotImplementedError:import platformraise CommandError(\"\"\"\" % platform.platform())except OSError as e:raise CommandError(e)if prefixed_path not in self.symlinked_files:self.symlinked_files.append(prefixed_path)", "docstring": "Attempt to link ``path``", "id": "f14150:c0:m9"} {"signature": "def copy_file(self, path, prefixed_path, source_storage):", "body": "if prefixed_path in self.copied_files:return self.log(\"\" % path)if not self.delete_file(path, prefixed_path, source_storage):returnsource_path = source_storage.path(path)if self.dry_run:self.log(\"\" % source_path, level=)else:self.log(\"\" % source_path, level=)with source_storage.open(path) as source_file:self.storage.save(prefixed_path, source_file)self.copied_files.append(prefixed_path)", "docstring": "Attempt to copy ``path`` with storage", "id": "f14150:c0:m10"} {"signature": "def generate_local_url(self, js_name):", "body": "host = self._settings[''].format(**self._host_context).rstrip('')return ''.format(host, js_name)", "docstring": "Generate the local url for a js file.\n:param js_name:\n:return:", "id": "f14158:c0:m6"} {"signature": "def to_css_length(l):", "body": "if isinstance(l, (int, float)):return ''.format(l)else:return l", "docstring": "Return the standard length string of css.\nIt's compatible with number values in old versions.\n:param l: source css length.\n:return: A string.", "id": "f14160:m0"} {"signature": "@staticmethoddef as_object(data):", "body": "return json.dumps(data, indent=, default=json_encoder)", "docstring": "Dump object to multiple-line javascript object.\n:param data:\n:return:", "id": "f14160:c0:m0"} {"signature": "@staticmethoddef as_parameters(*parameters, variables=None):", "body": "s = json.dumps(parameters)s = s[:-]if variables:for v in variables:if v in parameters:s = s.replace('' + v + '', v)return s", "docstring": "Dump python list as the parameter of javascript function\n:param parameters:\n:param variables:\n:return:", "id": "f14160:c0:m1"} {"signature": "def ifetch_single(iterable, key, default=EMPTY, getter=None):", "body": "def _getter(item):if getter:custom_getter = partial(getter, key=key)return custom_getter(item)else:try:attrgetter = operator.attrgetter(key)return attrgetter(item)except AttributeError:passtry:itemgetter = operator.itemgetter(key)return itemgetter(item)except KeyError:passif default is not EMPTY:return defaultraise ValueError('' % (item, key))return map(_getter, iterable)", "docstring": "getter() g(item, key):pass", "id": "f14165:m0"} {"signature": "def __init__(self, base_url, username='', password='', oauth_client=None):", "body": "self.base_url = base_urlself.endpoints = {}if oauth_client:self.request_handler = oauth_clientelse:self.request_handler = Api._httplib2_init(username, password)", "docstring": "Grabs everything we need to connect to a REST API\n base_url: 'http://google.com' -- no trailing slashes\n username = 'travisby@gmail.com'\n password = 'helloworld'\n oauth_client = oauth2.client()", "id": "f14195:c0:m0"} {"signature": "def update_endpoints(self, endpoints):", "body": "self.endpoints.update(endpoints)", "docstring": "Adds to the endpoints collection\n endpoints = {\n 'get_users': 'users',\n 'classes': 'crazyURL/withExtraStuff',\n 'get_user': 'user/%(id)s' # requires url_data\n 'get_me': 'user/3' # Don't be afraid to hard code!\n }", "id": "f14195:c0:m1"} {"signature": "def clear_endpoints(self):", "body": "self.endpoints = {}", "docstring": "Clears all stored endpoints", "id": "f14195:c0:m2"} {"signature": "def get(self, endpoint, url_data=None, parameters=None):", "body": "return self.request_handler.request(self._url(endpoint, url_data, parameters),method=Api._method[''],)", "docstring": "Returns the response and body for a get request\n endpoints = 'users' # resource to access\n url_data = {}, () # Used to modularize endpoints, see __init__\n parameters = {}, ((),()) # URL parameters: google.com?q=a&f=b", "id": "f14195:c0:m3"} {"signature": "def post(self, endpoint, data, url_data=None, parameters=None):", "body": "return self.request_handler.request(self._url(endpoint, url_data, parameters),method=Api._method[''],body=urllib.urlencode(data))", "docstring": "Returns the response and body for a post request\n endpoints = 'users' # resource to access\n data = {'username': 'blah, 'password': blah} # POST body\n url_data = {}, () # Used to modularize endpoints, see __init__\n parameters = {}, ((),()) # URL paramters, ex: google.com?q=a&f=b", "id": "f14195:c0:m4"} {"signature": "def put(self, endpoint, data, url_data=None, parameters=None):", "body": "return self.request_handler.request(self._url(endpoint, url_data, parameters),method=Api._method[''],body=urllib.urlencode(data))", "docstring": "Returns the response and body for a put request\n endpoints = 'users' # resource to access\n data = {'username': 'blah, 'password': blah} # PUT body\n url_data = {}, () # Used to modularize endpoints, see __init__\n parameters = {}, ((),()) # URL paramters, ex: google.com?q=a&f=b", "id": "f14195:c0:m5"} {"signature": "def delete(self, endpoint, data, url_data=None, parameters=None):", "body": "return self.request_handler.request(self._url(endpoint, url_data, parameters),method=Api._method[''],body=urllib.urlencode(data))", "docstring": "Returns the response and body for a delete request\n endpoints = 'users' # resource to access\n data = {'username': 'blah, 'password': blah} # DELETE body\n url_data = {}, () # Used to modularize endpoints, see __init__\n parameters = {}, ((),()) # URL paramters, ex: google.com?q=a&f=b", "id": "f14195:c0:m6"} {"signature": "def head(self, endpoint, url_data=None, parameters=None):", "body": "return self.request_handler.request(self._url(endpoint, url_data, parameters),method=Api._method[''])", "docstring": "Returns the response and body for a head request\n endpoints = 'users' # resource to access\n url_data = {}, () # Used to modularize endpoints, see __init__\n parameters = {}, ((),()) # URL paramters, ex: google.com?q=a&f=b", "id": "f14195:c0:m7"} {"signature": "def _url(self, endpoint, url_data=None, parameters=None):", "body": "try:url = '' % (self.base_url, self.endpoints[endpoint])except KeyError:raise EndPointDoesNotExist(endpoint)if url_data:url = url % url_dataif parameters:url = '' % (url, urllib.urlencode(parameters, True))return url", "docstring": "Generate URL on the modularized endpoints and url parameters", "id": "f14195:c0:m8"} {"signature": "@staticmethoddef _httplib2_init(username, password):", "body": "obj = httplib2.Http()if username and password:obj.add_credentials(username, password)return obj", "docstring": "Used to instantiate a regular HTTP request object", "id": "f14195:c0:m9"} {"signature": "def _accept(self, target):", "body": "if isinstance(target, datetime.timedelta):target = target.total_seconds()if target is None:target = float('')return target", "docstring": "Accept None or \u221e or datetime or numeric for target", "id": "f14199:c2:m1"} {"signature": "def strftime(fmt, t):", "body": "if isinstance(t, (time.struct_time, tuple)):t = datetime.datetime(*t[:])assert isinstance(t, (datetime.datetime, datetime.time, datetime.date))try:year = t.yearif year < :t = t.replace(year=)except AttributeError:year = subs = (('', '' % year),('', '' % (year % )),('', '' % (t.microsecond // )),('', '' % (t.microsecond % )))def doSub(s, sub):return s.replace(*sub)def doSubs(s):return functools.reduce(doSub, subs, s)fmt = ''.join(map(doSubs, fmt.split('')))return t.strftime(fmt)", "docstring": "A class to replace the strftime in datetime package or time module.\n Identical to strftime behavior in those modules except supports any\n year.\n Also supports datetime.datetime times.\n Also supports milliseconds using %s\n Also supports microseconds using %u", "id": "f14200:m0"} {"signature": "def strptime(s, fmt, tzinfo=None):", "body": "res = time.strptime(s, fmt)return datetime.datetime(tzinfo=tzinfo, *res[:])", "docstring": "A function to replace strptime in the time module. Should behave\nidentically to the strptime function except it returns a datetime.datetime\nobject instead of a time.struct_time object.\nAlso takes an optional tzinfo parameter which is a time zone info object.", "id": "f14200:m1"} {"signature": "def datetime_mod(dt, period, start=None):", "body": "if start is None:start = datetime.datetime.combine(dt.date(), datetime.time())delta = dt - startdef get_time_delta_microseconds(td):return (td.days * seconds_per_day + td.seconds) * + td.microsecondsdelta, period = map(get_time_delta_microseconds, (delta, period))offset = datetime.timedelta(microseconds=delta % period)result = dt - offsetreturn result", "docstring": "Find the time which is the specified date/time truncated to the time delta\nrelative to the start date/time.\nBy default, the start time is midnight of the same day as the specified\ndate/time.\n\n>>> datetime_mod(datetime.datetime(2004, 1, 2, 3),\n... datetime.timedelta(days = 1.5),\n... start = datetime.datetime(2004, 1, 1))\ndatetime.datetime(2004, 1, 1, 0, 0)\n>>> datetime_mod(datetime.datetime(2004, 1, 2, 13),\n... datetime.timedelta(days = 1.5),\n... start = datetime.datetime(2004, 1, 1))\ndatetime.datetime(2004, 1, 2, 12, 0)\n>>> datetime_mod(datetime.datetime(2004, 1, 2, 13),\n... datetime.timedelta(days = 7),\n... start = datetime.datetime(2004, 1, 1))\ndatetime.datetime(2004, 1, 1, 0, 0)\n>>> datetime_mod(datetime.datetime(2004, 1, 10, 13),\n... datetime.timedelta(days = 7),\n... start = datetime.datetime(2004, 1, 1))\ndatetime.datetime(2004, 1, 8, 0, 0)", "id": "f14200:m2"} {"signature": "def datetime_round(dt, period, start=None):", "body": "result = datetime_mod(dt, period, start)if abs(dt - result) >= period // :result += periodreturn result", "docstring": "Find the nearest even period for the specified date/time.\n\n>>> datetime_round(datetime.datetime(2004, 11, 13, 8, 11, 13),\n... datetime.timedelta(hours = 1))\ndatetime.datetime(2004, 11, 13, 8, 0)\n>>> datetime_round(datetime.datetime(2004, 11, 13, 8, 31, 13),\n... datetime.timedelta(hours = 1))\ndatetime.datetime(2004, 11, 13, 9, 0)\n>>> datetime_round(datetime.datetime(2004, 11, 13, 8, 30),\n... datetime.timedelta(hours = 1))\ndatetime.datetime(2004, 11, 13, 9, 0)", "id": "f14200:m3"} {"signature": "def get_nearest_year_for_day(day):", "body": "now = time.gmtime()result = now.tm_yearif day - now.tm_yday > // :result -= if now.tm_yday - day > // :result += return result", "docstring": "Returns the nearest year to now inferred from a Julian date.", "id": "f14200:m4"} {"signature": "def gregorian_date(year, julian_day):", "body": "result = datetime.date(year, , )result += datetime.timedelta(days=julian_day - )return result", "docstring": "Gregorian Date is defined as a year and a julian day (1-based\nindex into the days of the year).\n\n>>> gregorian_date(2007, 15)\ndatetime.date(2007, 1, 15)", "id": "f14200:m5"} {"signature": "def get_period_seconds(period):", "body": "if isinstance(period, six.string_types):try:name = '' + period.lower()result = globals()[name]except KeyError:msg = \"\"raise ValueError(msg)elif isinstance(period, numbers.Number):result = periodelif isinstance(period, datetime.timedelta):result = period.days * get_period_seconds('') + period.secondselse:raise TypeError('')return result", "docstring": "return the number of seconds in the specified period\n\n>>> get_period_seconds('day')\n86400\n>>> get_period_seconds(86400)\n86400\n>>> get_period_seconds(datetime.timedelta(hours=24))\n86400\n>>> get_period_seconds('day + os.system(\"rm -Rf *\")')\nTraceback (most recent call last):\n...\nValueError: period not in (second, minute, hour, day, month, year)", "id": "f14200:m6"} {"signature": "def get_date_format_string(period):", "body": "if isinstance(period, six.string_types) and period.lower() == '':return ''file_period_secs = get_period_seconds(period)format_pieces = ('', '', '', '', '')seconds_per_second = intervals = (seconds_per_year,seconds_per_day,seconds_per_hour,seconds_per_minute,seconds_per_second,)mods = list(map(lambda interval: file_period_secs % interval, intervals))format_pieces = format_pieces[: mods.index() + ]return ''.join(format_pieces)", "docstring": "For a given period (e.g. 'month', 'day', or some numeric interval\nsuch as 3600 (in secs)), return the format string that can be\nused with strftime to format that time to specify the times\nacross that interval, but no more detailed.\nFor example,\n\n>>> get_date_format_string('month')\n'%Y-%m'\n>>> get_date_format_string(3600)\n'%Y-%m-%d %H'\n>>> get_date_format_string('hour')\n'%Y-%m-%d %H'\n>>> get_date_format_string(None)\nTraceback (most recent call last):\n ...\nTypeError: period must be a string or integer\n>>> get_date_format_string('garbage')\nTraceback (most recent call last):\n ...\nValueError: period not in (second, minute, hour, day, month, year)", "id": "f14200:m7"} {"signature": "def divide_timedelta_float(td, divisor):", "body": "dsm = [getattr(td, attr) for attr in ('', '', '')]dsm = map(lambda elem: elem / divisor, dsm)return datetime.timedelta(*dsm)", "docstring": "Divide a timedelta by a float value\n\n>>> one_day = datetime.timedelta(days=1)\n>>> half_day = datetime.timedelta(days=.5)\n>>> divide_timedelta_float(one_day, 2.0) == half_day\nTrue\n>>> divide_timedelta_float(one_day, 2) == half_day\nTrue", "id": "f14200:m8"} {"signature": "def calculate_prorated_values():", "body": "rate = six.moves.input(\"\")res = re.match(r'', rate).groupdict()value = float(res[''])value_per_second = value / get_period_seconds(res[''])for period in ('', '', '', '', ''):period_value = value_per_second * get_period_seconds(period)print(\"\".format(**locals()))", "docstring": "A utility function to prompt for a rate (a string in units per\nunit time), and return that same rate for various time periods.", "id": "f14200:m9"} {"signature": "def parse_timedelta(str):", "body": "deltas = (_parse_timedelta_part(part.strip()) for part in str.split(''))return sum(deltas, datetime.timedelta())", "docstring": "Take a string representing a span of time and parse it to a time delta.\nAccepts any string of comma-separated numbers each with a unit indicator.\n\n>>> parse_timedelta('1 day')\ndatetime.timedelta(days=1)\n\n>>> parse_timedelta('1 day, 30 seconds')\ndatetime.timedelta(days=1, seconds=30)\n\n>>> parse_timedelta('47.32 days, 20 minutes, 15.4 milliseconds')\ndatetime.timedelta(days=47, seconds=28848, microseconds=15400)\n\nSupports weeks, months, years\n\n>>> parse_timedelta('1 week')\ndatetime.timedelta(days=7)\n\n>>> parse_timedelta('1 year, 1 month')\ndatetime.timedelta(days=395, seconds=58685)\n\nNote that months and years strict intervals, not aligned\nto a calendar:\n\n>>> now = datetime.datetime.now()\n>>> later = now + parse_timedelta('1 year')\n>>> diff = later.replace(year=now.year) - now\n>>> diff.seconds\n20940", "id": "f14200:m10"} {"signature": "def divide_timedelta(td1, td2):", "body": "try:return td1 / td2except TypeError:return td1.total_seconds() / td2.total_seconds()", "docstring": "Get the ratio of two timedeltas\n\n>>> one_day = datetime.timedelta(days=1)\n>>> one_hour = datetime.timedelta(hours=1)\n>>> divide_timedelta(one_hour, one_day) == 1 / 24\nTrue", "id": "f14200:m12"} {"signature": "def date_range(start=None, stop=None, step=None):", "body": "if step is None:step = datetime.timedelta(days=)if start is None:start = datetime.datetime.now()while start < stop:yield startstart += step", "docstring": "Much like the built-in function range, but works with dates\n\n>>> range_items = date_range(\n... datetime.datetime(2005,12,21),\n... datetime.datetime(2005,12,25),\n... )\n>>> my_range = tuple(range_items)\n>>> datetime.datetime(2005,12,21) in my_range\nTrue\n>>> datetime.datetime(2005,12,22) in my_range\nTrue\n>>> datetime.datetime(2005,12,25) in my_range\nFalse", "id": "f14200:m13"} {"signature": "@classmethoddef construct_datetime(cls, *args, **kwargs):", "body": "if len(args) == :arg = args[]method = cls.__get_dt_constructor(type(arg).__module__,type(arg).__name__,)result = method(arg)try:result = result.replace(tzinfo=kwargs.pop(''))except KeyError:passif kwargs:first_key = kwargs.keys()[]tmpl = (\"\"\"\")raise TypeError(tmpl.format(**locals()))else:result = datetime.datetime(*args, **kwargs)return result", "docstring": "Construct a datetime.datetime from a number of different time\n types found in python and pythonwin", "id": "f14200:c1:m0"} {"signature": "@staticmethoddef __dt_from___builtin___time__(pyt):", "body": "fmtString = ''result = strptime(pyt.Format(fmtString), fmtString)microseconds_per_day = seconds_per_day * microseconds = float(pyt) * microseconds_per_daymicrosecond = int(microseconds % )result = result.replace(microsecond=microsecond)return result", "docstring": "Construct a datetime.datetime from a pythonwin time", "id": "f14200:c1:m3"} {"signature": "def now():", "body": "return datetime.datetime.utcnow().replace(tzinfo=pytz.utc)", "docstring": "Provide the current timezone-aware datetime.\n\nA client may override this function to change the default behavior,\nsuch as to use local time or timezone-na\u00efve times.", "id": "f14201:m0"} {"signature": "def from_timestamp(ts):", "body": "return datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc)", "docstring": "Convert a numeric timestamp to a timezone-aware datetime.\n\nA client may override this function to change the default behavior,\nsuch as to use local time or timezone-na\u00efve times.", "id": "f14201:m1"} {"signature": "@staticmethoddef _from_timestamp(input):", "body": "if not isinstance(input, numbers.Real):return inputreturn from_timestamp(input)", "docstring": "If input is a real number, interpret it as a Unix timestamp\n(seconds sinc Epoch in UTC) and return a timezone-aware\ndatetime object. Otherwise return input unchanged.", "id": "f14201:c0:m2"} {"signature": "@classmethoddef at_time(cls, at, target):", "body": "at = cls._from_timestamp(at)cmd = cls.from_datetime(at)cmd.delay = at - now()cmd.target = targetreturn cmd", "docstring": "Construct a DelayedCommand to come due at `at`, where `at` may be\na datetime or timestamp.", "id": "f14201:c0:m3"} {"signature": "def _next_time(self):", "body": "return self._localize(self + self.delay)", "docstring": "Add delay to self, localized", "id": "f14201:c1:m0"} {"signature": "@staticmethoddef _localize(dt):", "body": "try:tz = dt.tzinforeturn tz.localize(dt.replace(tzinfo=None))except AttributeError:return dt", "docstring": "Rely on pytz.localize to ensure new result honors DST.", "id": "f14201:c1:m1"} {"signature": "@classmethoddef daily_at(cls, at, target):", "body": "daily = datetime.timedelta(days=)when = datetime.datetime.combine(datetime.date.today(), at)if when < now():when += dailyreturn cls.at_time(cls._localize(when), daily, target)", "docstring": "Schedule a command to run at a specific time each day.", "id": "f14201:c2:m1"} {"signature": "@abc.abstractmethoddef run(self, command):", "body": "", "docstring": "Run the command", "id": "f14201:c3:m3"} {"signature": "def remove_parse_timedelta(items):", "body": "if sys.version_info > (, ):returnnames = list(map(operator.attrgetter(''), items))del items[names.index('')]", "docstring": "Repr on older Pythons is different, so remove the offending\ntest.", "id": "f14202:m1"} {"signature": "@staticmethoddef exec_func_with_sys_argv(func_exec, custom_argv, *args_func_exec, **kwargs_func_exec):", "body": "with patch.object(sys, '', custom_argv):print(''.format(sys.argv))func_exec(*args_func_exec, **kwargs_func_exec)", "docstring": "Exec a CLI function patching sys.argv.\nFor test CLI main functions with argparse\n\n:param func_exec:\n:param custom_argv:\n:param kwargs_func_exec:", "id": "f14211:c0:m0"} {"signature": "def get_data_coeficientes_perfilado_2017(force_download=False):", "body": "path_perfs = os.path.join(STORAGE_DIR, '')if force_download or not os.path.exists(path_perfs):cols_sheet1 = ['', '', '','', '', '', '', '']perfs_2017 = pd.read_excel(URL_PERFILES_2017, header=None, skiprows=[, ], names=cols_sheet1)perfs_2017[''] = pd.DatetimeIndex(start='', freq='', tz=TZ, end='')perfs_2017 = perfs_2017.set_index('').drop(['', '', ''], axis=)coefs_alpha_beta_gamma = pd.read_excel(URL_PERFILES_2017, sheetname=)print(''.format(path_perfs))with pd.HDFStore(path_perfs, '') as st:st.put('', coefs_alpha_beta_gamma)st.put('', perfs_2017)print(''.format(os.path.getsize(path_perfs) / ))else:with pd.HDFStore(path_perfs, '') as st:coefs_alpha_beta_gamma = st['']perfs_2017 = st['']return perfs_2017, coefs_alpha_beta_gamma", "docstring": "Extrae la informaci\u00f3n de las dos hojas del Excel proporcionado por REE\n con los perfiles iniciales para 2017.\n :param force_download: Descarga el fichero 'raw' del servidor, en vez de acudir a la copia local.\n :return: perfiles_2017, coefs_alpha_beta_gamma\n :rtype: tuple", "id": "f14216:m0"} {"signature": "def get_data_perfiles_estimados_2017(force_download=False):", "body": "global DATA_PERFILES_2017if (DATA_PERFILES_2017 is None) or force_download:perf_demref_2017, _ = get_data_coeficientes_perfilado_2017(force_download=force_download)cols_usar = ['', '', '', '']perfs_2017 = perf_demref_2017[cols_usar].copy()perfs_2017.columns = [''.format(p) for p in '']DATA_PERFILES_2017 = perfs_2017return perfs_2017return DATA_PERFILES_2017", "docstring": "Extrae perfiles estimados para 2017 con el formato de los CSV's mensuales con los perfiles definitivos.\n :param force_download: bool para forzar la descarga del excel de la web de REE.\n :return: perfiles_2017\n :rtype: pd.Dataframe", "id": "f14216:m1"} {"signature": "def get_data_perfiles_finales_mes(a\u00f1o, mes=None):", "body": "mask_ts = ''if (type(a\u00f1o) is int) and (mes is not None):ts = dt.datetime(a\u00f1o, mes, , , )else:ts = a\u00f1ourl_perfiles_finales = mask_ts.format(ts)cols_drop = ['', '', '', '', '']try:perfiles_finales = pd.read_csv(url_perfiles_finales, sep='', encoding='', compression='').dropna(how='', axis=)perfiles_finales[''] = pd.DatetimeIndex(start=''.format(ts), freq='', tz=TZ,end=''.format((ts + dt.timedelta(days=)).replace(day=)- dt.timedelta(days=)))return perfiles_finales.set_index('').drop(cols_drop, axis=)except HTTPError as e:print(''.format(e))perfiles_2017 = get_data_perfiles_estimados_2017()return perfiles_2017[(perfiles_2017.index.year == ts.year) & (perfiles_2017.index.month == ts.month)]", "docstring": "Lee el fichero CSV comprimido con los perfiles finales de consumo el\u00e9ctrico para\n el mes dado desde la web de REE. Desecha columnas de fecha e informaci\u00f3n de DST.\n :param a\u00f1o: :int: a\u00f1o \u00f3 :datetime_obj: ts\n :param mes: :int: mes (OPC)\n :return: perfiles_mes\n :rtype: pd.Dataframe", "id": "f14216:m2"} {"signature": "def perfiles_consumo_en_intervalo(t0, tf):", "body": "t_ini = pd.Timestamp(t0)t_fin = pd.Timestamp(tf)assert (t_fin > t_ini)marca_fin = ''.format(t_fin)marca_ini = ''.format(t_ini)if marca_ini == marca_fin:perfiles = get_data_perfiles_finales_mes(t_ini)else:dates = pd.DatetimeIndex(start=t_ini.replace(day=),end=t_fin.replace(day=), freq='')perfiles = pd.concat([get_data_perfiles_finales_mes(t) for t in dates])return perfiles.loc[t_ini:t_fin].iloc[:-]", "docstring": "Descarga de perfiles horarios para un intervalo dado\n Con objeto de calcular el precio medio ponderado de aplicaci\u00f3n para dicho intervalo.\n :return: perfiles_intervalo\n :rtype: pd.Dataframe", "id": "f14216:m3"} {"signature": "def print_info(x):", "body": "cprint(x, '')", "docstring": "Prints in blue", "id": "f14217:m0"} {"signature": "def print_infob(x):", "body": "cprint(x, '', attrs=[''])", "docstring": "Prints in bold + blue", "id": "f14217:m1"} {"signature": "def print_ok(x):", "body": "cprint(x, '', attrs=[''])", "docstring": "Prints in bold + green", "id": "f14217:m2"} {"signature": "def print_secc(x):", "body": "cprint('' + x, '', attrs=['', ''])", "docstring": "Prints in bold + blue + underline & starts with ' ==>", "id": "f14217:m3"} {"signature": "def print_err(x):", "body": "cprint('' + str(x), on_color='', attrs=[''])", "docstring": "Prints in bold + red background & starts with 'ERROR:", "id": "f14217:m4"} {"signature": "def print_warn(x):", "body": "cprint('' + str(x), '')", "docstring": "Prints in magenta & starts with 'WARNING:", "id": "f14217:m5"} {"signature": "def print_bold(x):", "body": "cprint(x, attrs=[''])", "docstring": "Prints in bold", "id": "f14217:m6"} {"signature": "def print_boldu(x):", "body": "cprint(x, '', attrs=['', ''])", "docstring": "Prints in bold + underline", "id": "f14217:m7"} {"signature": "def print_yellowb(x):", "body": "cprint(x, '', attrs=[''])", "docstring": "Prints in yellow + underline", "id": "f14217:m8"} {"signature": "def print_grey(x):", "body": "cprint(x, '')", "docstring": "Prints in grey", "id": "f14217:m9"} {"signature": "def print_greyb(x):", "body": "cprint(x, '', attrs=[''])", "docstring": "Prints in bold + grey", "id": "f14217:m10"} {"signature": "def print_red(x):", "body": "cprint(x, '')", "docstring": "Prints in red", "id": "f14217:m11"} {"signature": "def print_redb(x):", "body": "cprint(x, '', attrs=[''])", "docstring": "Prints in bold + red", "id": "f14217:m12"} {"signature": "def print_green(x):", "body": "cprint(x, '')", "docstring": "Prints in green", "id": "f14217:m13"} {"signature": "def print_yellow(x):", "body": "cprint(x, '')", "docstring": "Prints in yellow", "id": "f14217:m14"} {"signature": "def print_blue(x):", "body": "cprint(x, '')", "docstring": "Prints in blue", "id": "f14217:m15"} {"signature": "def print_magenta(x):", "body": "cprint(x, '')", "docstring": "Prints in magenta", "id": "f14217:m16"} {"signature": "def print_cyan(x):", "body": "cprint(x, '')", "docstring": "Prints in cyan", "id": "f14217:m17"} {"signature": "def print_white(x):", "body": "cprint(x, '')", "docstring": "Prints in white", "id": "f14217:m18"} {"signature": "def ppdict(dict_to_print, br='', html=False, key_align='', sort_keys=True,key_preffix='', key_suffix='', value_prefix='', value_suffix='', left_margin=, indent=):", "body": "if dict_to_print:if sort_keys:dic = dict_to_print.copy()keys = list(dic.keys())keys.sort()dict_to_print = OrderedDict()for k in keys:dict_to_print[k] = dic[k]tmp = ['']ks = [type(x) == str and \"\" % x or x for x in dict_to_print.keys()]vs = [type(x) == str and \"\" % x or x for x in dict_to_print.values()]max_key_len = max([len(str(x)) for x in ks])for i in range(len(ks)):k = {: str(ks[i]).ljust(max_key_len),key_align == '': str(ks[i]).rjust(max_key_len)}[]v = vs[i]tmp.append('' * indent + ''.format(key_preffix, k, key_suffix,value_prefix, v, value_suffix))tmp[-] = tmp[-][:-] tmp.append('')if left_margin:tmp = ['' * left_margin + x for x in tmp]if html:return ''.format(br.join(tmp).replace('', ''))else:return br.join(tmp)else:return ''", "docstring": "Indent representation of a dict", "id": "f14217:m19"} {"signature": "def __repr__(self):", "body": "def _linetotal(str_line, total_value):return ''.format(str_line, total_value)if self._str_repr is None:detalle_tfijo = ''.join([MASK_T_FIJO.format(pot=self._potencia_contratada, dias=ndias, y=a\u00f1o,dias_y=ndias_a\u00f1o, coste=coste, coef_t_fijo=coef_p)for (ndias, ndias_a\u00f1o, a\u00f1o), (coste, coef_p)in zip(self._periodos_fact, self._termino_fijo) if ndias > ])if self.consumo_total > :det_tvar = []tramo_mult = Truefor i, (tea, tcu, cons) in enumerate(zip(self._coste_peaje_acceso_tea,self._coste_ponderado_energia_tcu,self._consumos_totales_por_periodo)):if len(self._periodos_fact) > :if i == :ts_ini, ts_fin = self._t0, self._t0.replace(day=, month=)else:ts_ini, ts_fin = self._tf.replace(day=, month=), self._tfif tramo_mult and (ts_fin.date() > ts_ini.date()):det_tvar.append(MASK_T_VAR_PERIOD_TRAMO.format(tramo=i + , ts_ini=ts_ini, ts_fin=ts_fin))else:tramo_mult = Falsedet_tvar += [MASK_T_VAR_PERIOD.format(ind_periodo=j + , consumo_periodo=cons_p, coste_tcu=tcu_p,valor_medio_periodo=(tcu_p + tea_p) / cons_p,coste_periodo=tcu_p + tea_p, valor_med_tcu=tcu_p / cons_p,valor_med_tea=tea_p / cons_p, coste_tea=tea_p)for j, (tea_p, tcu_p, cons_p) in enumerate(zip(tea, tcu, cons)) if cons_p > ]elif abs(cons) > :det_tvar.append(MASK_T_VAR_PERIOD.format(ind_periodo=i + , consumo_periodo=cons,valor_medio_periodo=(tcu + tea) / cons,coste_periodo=tcu + tea, valor_med_tcu=tcu / cons,coste_tcu=tcu, valor_med_tea=tea / cons, coste_tea=tea))else:det_tvar = ['']detalle_impelec = _linetotal(MASK_T_IMP_ELEC.format(self._impuesto_electrico_general * ,self.coste_termino_fijo, self.coste_termino_consumo),self.impuesto_electrico_general)_, impuesto_gen, impuesto_medida = DATOS_ZONAS_IMPUESTOS[self._zona_impuestos]subt_fijo_var = self._termino_fijo_total + self._termino_variable_totalsubt_fijo_var += self._termino_impuesto_electrico + self._descuento_bono_socialif impuesto_gen != impuesto_medida:detalle_iva = MASK_T_IVA_M.format(impuesto_gen * , subt_fijo_var,impuesto_medida * , self._termino_equipo_medida)else:detalle_iva = MASK_T_IVA_U.format(impuesto_gen * , subt_fijo_var + self._termino_equipo_medida)detalle_descuento = ''if self._con_bono_social:detalle_descuento = '' + _linetotal('', self.descuento_bono_social) + ''params = dict(ts_ini=self._t0, ts_fin=self._tf, cod_peaje=self._tipo_peaje,consumo_total=self.consumo_total,desc_peaje=DATOS_TIPO_PEAJE[self._tipo_peaje][],p_contrato=self._round(self._potencia_contratada),con_bono='' if self._con_bono_social else '',coste_medida=self._round(self.gasto_equipo_medida),desc_impuesto=DATOS_ZONAS_IMPUESTOS[self._zona_impuestos][], dias_fact=self.num_dias_factura,detalle_descuento=''.format(detalle_descuento),coste_impuesto_elec=self.impuesto_electrico_general,total_termino_fijo=_linetotal(\"\", self.coste_termino_fijo),total_termino_variable=_linetotal(\"\", self.coste_termino_consumo),total_equipo_medida=_linetotal(\"\", self.gasto_equipo_medida),total_factura=_linetotal(\"\", self.coste_total),detalle_term_fijo=detalle_tfijo, detalle_iva=_linetotal(detalle_iva, self.coste_iva),detalle_term_variable=''.join(det_tvar), detalle_term_impuesto_elec=detalle_impelec)self._str_repr = TEMPLATE_FACTURA.format(**params)return self._str_repr", "docstring": "Representaci\u00f3n en texto de la factura el\u00e9ctrica.", "id": "f14218:c0:m1"} {"signature": "def to_dict(self, include_text_repr=False, include_html_repr=False):", "body": "if self._dict_repr is None:tea_tcu_cons = [(self._round(tea), self._round(tcu), self._round(ct))for tea, tcu, ct in zip(self._coste_peaje_acceso_tea, self._coste_ponderado_energia_tcu,self._consumos_totales_por_periodo)]periodos_fact = [(ndias, ndias_a\u00f1o, a\u00f1o, coste, coef_p)for (ndias, ndias_a\u00f1o, a\u00f1o), (coste, coef_p)in zip(self._periodos_fact, self._termino_fijo)],self._dict_repr = dict(cups=self._cups, cod_peaje=self._tipo_peaje, consumo_total=self.consumo_total,ts_ini=''.format(self._t0), ts_fin=''.format(self._tf),p_contrato=self._round(self._potencia_contratada), dias_fact=self.num_dias_factura,con_bono=self._con_bono_social, coste_medida=self._round(self.gasto_equipo_medida),descuento_bono_social=self._round(self.descuento_bono_social),desc_peaje=DATOS_TIPO_PEAJE[self._tipo_peaje][],desc_impuesto=DATOS_ZONAS_IMPUESTOS[self._zona_impuestos][],coste_impuesto_elec=self._round(self.impuesto_electrico_general),coste_termino_fijo=self._round(self.coste_termino_fijo),coste_termino_consumo=self._round(self.coste_termino_consumo),impuesto_elec=self._impuesto_electrico_general * ,periodos_fact=periodos_fact, tea_tcu_consumo=tea_tcu_cons,total_factura=self._round(self.coste_total),tipos_iva=DATOS_ZONAS_IMPUESTOS[self._zona_impuestos][:],coste_iva=self._round(self.coste_iva))dict_params = self._dict_repr.copy()if include_text_repr:dict_params.update(text_repr=str(self))if include_html_repr:dict_params.update(html_repr=self.to_html())return dict_params", "docstring": "Representaci\u00f3n como `dict` de los componentes de la factura el\u00e9ctrica.\n :param include_text_repr: bool para incluir la representaci\u00f3n de la factura en texto plano\n :param include_html_repr: bool para incluir la representaci\u00f3n de la factura en HTML\n :return dict_factura\n :rtype dict", "id": "f14218:c0:m2"} {"signature": "def to_html(self, web_completa=False):", "body": "if self._html_repr is None:params = self.to_dict()fact_templ = _render_jinja2_template(TEMPLATE_FACTURA_HTML, dict(factura=params))self._html_repr = fact_templif web_completa:if self._html_repr_completa is None:self._html_repr_completa = _render_jinja2_template(TEMPLATE_FACTURA_WEB,dict(factura_html=self._html_repr))return self._html_repr_completareturn self._html_repr", "docstring": "Genera una representaci\u00f3n en HTML de la factura el\u00e9ctrica.\n Para su renderizado o env\u00edo por email. Utiliza clases CSS de bootstrap 4.0.\n :param web_completa: bool para generar una p\u00e1gina web completa ( ...)", "id": "f14218:c0:m3"} {"signature": "def _check_hourly_data(self, consumo_horario):", "body": "if not consumo_horario.index.is_unique:consumo_horario = (consumo_horario.reset_index().drop_duplicates(subset='', keep='').set_index(''))[COL_CONSUMO]horas = (consumo_horario.index[-] - consumo_horario.index[]).total_seconds() / + if round(horas, ) != round(float(len(consumo_horario.index)), ):print(''''.format(len(consumo_horario.index), horas))consumo_horario = _reindex_consumo(consumo_horario)if consumo_horario.index.tz is None:tz = self._pvpc_horario.index.tztry:consumo_horario.index = consumo_horario.index.tz_localize(tz, ambiguous='')except AmbiguousTimeError as e:consumo_horario.index = consumo_horario.index.tz_localize(tz, ambiguous='')new_idx = pd.DatetimeIndex(start=consumo_horario.index[], freq='', tz=tz,end=consumo_horario.index[-])consumo_horario = consumo_horario.reindex(new_idx).interpolate()print(''.format(e))return consumo_horario.rename(COL_CONSUMO)", "docstring": "Checkea, y corrige si es necesario, el \u00edndice temporal de la serie de datos de consumo horario,\n asignando timezone si naive, y reindexando si faltan horas. Para el caso de 'missing values',\n al reindexar, aplica un bfill + ffill de m\u00e1ximo 1 valor, y el resto los pone a 0:\n `.reindex(new_idx).fillna(method='bfill', limit=1).fillna(method='ffill', limit=1).fillna(0.)`", "id": "f14218:c0:m6"} {"signature": "def _consumo_numerico(self):", "body": "if (type(self._consumo) is float) or (type(self._consumo) is int):return True, [float(self._consumo)]elif (type(self._consumo) is tuple) or (type(self._consumo) is list):return True, self._consumoreturn False, self._consumo", "docstring": "Devuelve los datos de consumo y un booleano indicando si se trata de valores totales o es un pd.Series.\n:return: son_totales, consumo_kWh", "id": "f14218:c0:m8"} {"signature": "def _coste_tea_tcu(self, consumo, tcu, periodo_fac):", "body": "coefs_ener = TERM_ENER_PEAJE_ACCESO_EUR_KWH_TEA[periodo_fac][self._tipo_peaje]if len(coefs_ener) > : name = consumo.namehay_discr, cons_discr = self._asigna_periodos_discr_horaria(consumo)assert hay_discrassert tcu.index.equals(cons_discr.index)return [(cons_discr[cons_discr[''.format(i + )]][name].sum() * coef,(cons_discr[cons_discr[''.format(i + )]][name] * tcu[cons_discr[''.format(i + )]]).sum(),cons_discr[cons_discr[''.format(i + )]][name].sum())for i, coef in enumerate(coefs_ener)]else:if not tcu.index.equals(consumo.index):print('')consumo = _reindex_consumo(consumo)return [(consumo.sum() * coefs_ener[], (consumo * tcu).sum(), consumo.sum())]", "docstring": "Devuelve TEA, TCU, CONSUMO como lista de tuplas por periodo de facturaci\u00f3n.\n :return [(TEA_P1, TCU_P1, C_P1), (TEA_P2, TCU_P2, C_P2), ...]\n :rtype list", "id": "f14218:c0:m9"} {"signature": "def _calcula_iva_y_total(self):", "body": "subt_fijo_var = self._termino_fijo_total + self._termino_variable_totalsubt_fijo_var += self._termino_impuesto_electrico + self._descuento_bono_social_, impuesto_gen, impuesto_medida = DATOS_ZONAS_IMPUESTOS[self._zona_impuestos]self._terminos_iva = (subt_fijo_var * impuesto_gen, self._termino_equipo_medida * impuesto_medida)self._termino_iva_total = self._round(self._terminos_iva[] + self._terminos_iva[])subt_fijo_var += self._termino_equipo_medida + self._termino_iva_totalself._total_factura = self._round(subt_fijo_var)", "docstring": "A\u00f1ade el IVA y obtiene el total.", "id": "f14218:c0:m10"} {"signature": "def _calcula_factura(self):", "body": "self._dict_repr = Noneself._str_repr = Noneself._html_repr = Noneself._html_repr_completa = Nonecod_tarifa = DATOS_TIPO_PEAJE[self._tipo_peaje][]year = self._t0.yearyear_f = self._tf.yearself._num_dias_factura = (self._tf - self._t0).days if year_f > year:ts_limit = pd.Timestamp(''.format(year))days_1 = (ts_limit - self._t0).daysdays_2 = (self._tf - ts_limit).daysn_days_y1 = (pd.Timestamp(''.format(year + )) - pd.Timestamp(''.format(year))).daysn_days_y2 = (pd.Timestamp(''.format(year_f + )) - pd.Timestamp(''.format(year_f))).daysself._periodos_fact = ((days_1, n_days_y1, year), (days_2, n_days_y2, year_f))else:n_days_y = (pd.Timestamp(''.format(year + )) - pd.Timestamp(''.format(year))).daysself._periodos_fact = ((self._num_dias_factura, n_days_y, year),)if self._pvpc_data is None:pvpc_data = PVPC(update=True, verbose=False)self._pvpc_data = pvpc_calc_tcu_cp_feu_d(pvpc_data.data[''], verbose=False, convert_kwh=True)cols_tarifa = list(filter(lambda x: cod_tarifa in x, self._pvpc_data.columns))pvpc_t_ini, pvpc_t_fin = self._t0 + pd.Timedelta(''), self._tf + pd.Timedelta('')self._pvpc_horario = self._pvpc_data[cols_tarifa].loc[pvpc_t_ini:pvpc_t_fin].iloc[:-]self._termino_fijo, self._termino_fijo_total = [], for (days_fac, days_year, year) in self._periodos_fact:coef_potencia = MARGEN_COMERCIALIZACI\u00d3N_EUR_KW_A\u00d1O_MCF + TERM_POT_PEAJE_ACCESO_EUR_KW_A\u00d1O_TPA[year]coste = self._potencia_contratada * days_fac * coef_potencia / days_yearself._termino_fijo.append((coste, coef_potencia))self._termino_fijo_total += costeself._termino_fijo_total = self._round(self._termino_fijo_total)if self._consumo is not None:son_totales, consumo_calc = self._consumo_numerico()if son_totales:c_coef = ''.format(cod_tarifa)hay_discr, perfs_interv = self._asigna_periodos_discr_horaria(self._pvpc_horario[c_coef])if not hay_discr:self._consumo_horario = (perfs_interv * consumo_calc[] / perfs_interv.sum()).rename(COL_CONSUMO)else:consumos_horarios_periodos = []for i, cons_periodo_i in enumerate(consumo_calc):c = ''.format(i + )idx = perfs_interv[perfs_interv[c]].indexconsumos_horarios_periodos.append(perfs_interv.loc[idx, c_coef] * cons_periodo_i/ perfs_interv.loc[idx, c_coef].sum())self._consumo_horario = pd.Series(pd.concat(consumos_horarios_periodos)).rename(COL_CONSUMO).sort_index()else:self._consumo_horario = self._check_hourly_data(consumo_calc)t0, tf = self._consumo_horario.index[], self._consumo_horario.index[-]self._pvpc_horario = self._pvpc_horario.loc[t0:tf]col_tcu = ''.format(cod_tarifa)if len(self._periodos_fact) > :ts_limit = pd.Timestamp(''.format(self._tf.year)).tz_localize(self._consumo_horario.index.tz)consumo_1 = self._consumo_horario.loc[:ts_limit].iloc[:-]consumo_2 = self._consumo_horario.loc[ts_limit:]pvpc_1 = self._pvpc_horario.loc[:ts_limit].iloc[:-]pvpc_2 = self._pvpc_horario.loc[ts_limit:]tea_y1, tcu_y1, cons_tot_y1 = list(zip(*self._coste_tea_tcu(consumo_1, pvpc_1[col_tcu], self._t0.year)))tea_y2, tcu_y2, cons_tot_y2 = list(zip(*self._coste_tea_tcu(consumo_2, pvpc_2[col_tcu], self._tf.year)))self._coste_peaje_acceso_tea = (tea_y1, tea_y2)self._coste_ponderado_energia_tcu = (tcu_y1, tcu_y2)self._consumos_totales_por_periodo = (cons_tot_y1, cons_tot_y2)coste_variable_tot = sum([sum(x) for x in self._coste_peaje_acceso_tea])coste_variable_tot += sum([sum(x) for x in self._coste_ponderado_energia_tcu])else:tea, tcu, cons_tot = list(zip(*self._coste_tea_tcu(self._consumo_horario, self._pvpc_horario[col_tcu],self._t0.year)))self._coste_peaje_acceso_tea = teaself._coste_ponderado_energia_tcu = tcuself._consumos_totales_por_periodo = cons_totcoste_variable_tot = self._round_sum(self._coste_peaje_acceso_tea)coste_variable_tot += self._round_sum(self._coste_ponderado_energia_tcu)self._consumo = self._consumo_horarioelse:self._coste_peaje_acceso_tea = (,)self._coste_ponderado_energia_tcu = (,)self._consumos_totales_por_periodo = (,)coste_variable_tot = self._termino_variable_total = self._round(coste_variable_tot)subt_fijo_var = self._termino_fijo_total + self._termino_variable_totalself._descuento_bono_social = if self._con_bono_social:self._descuento_bono_social = self._round(- * self._round(subt_fijo_var))subt_fijo_var += self._descuento_bono_socialself._termino_impuesto_electrico = self._round(self._impuesto_electrico_general * subt_fijo_var)subt_fijo_var += self._termino_impuesto_electricoif self.alquiler_euros is not None:self._termino_equipo_medida = self._round(self.alquiler_euros)else:frac_a\u00f1o = sum([nd / dy for nd, dy, _ in self._periodos_fact])self._termino_equipo_medida = self._round(frac_a\u00f1o * self.alquiler_euros_a\u00f1o)self._calcula_iva_y_total()", "docstring": "M\u00e9todo para regenerar el c\u00e1lculo de la factura el\u00e9ctrica.", "id": "f14218:c0:m11"} {"signature": "@propertydef consumo_horario(self):", "body": "return self._consumo_horario", "docstring": "Devuelve los datos de consumo como time series horario. Si el consumo no tiene discriminaci\u00f3n horaria,\n se aplican los perfiles de consumo para la tarifa seleccionada y el intervalo facturado,\n estimando los valores horarios.", "id": "f14218:c0:m12"} {"signature": "@propertydef pvpc_horas_periodo(self):", "body": "return self._pvpc_horario", "docstring": "Devuelve los datos del PVPC en el intervalo facturado para el tipo de tarifa considerado.\n :return: pvpc \u20ac/kWh\n :rtype: pd.Dataframe", "id": "f14218:c0:m13"} {"signature": "@propertydef num_dias_factura(self):", "body": "return self._num_dias_factura", "docstring": "Devuelve el # de d\u00edas del periodo facturado.", "id": "f14218:c0:m14"} {"signature": "@propertydef tipo_peaje(self):", "body": "return self._tipo_peaje", "docstring": "Devuelve el tipo de tarifa asociada a la factura el\u00e9ctrica.", "id": "f14218:c0:m15"} {"signature": "@tipo_peaje.setterdef tipo_peaje(self, tarifa):", "body": "peajes = [TIPO_PEAJE_GEN, TIPO_PEAJE_NOC, TIPO_PEAJE_VHC]codes_peajes = [DATOS_TIPO_PEAJE[p][] for p in peajes]if type(tarifa) is int:self._tipo_peaje = peajes[tarifa - ]elif tarifa in peajes:self._tipo_peaje = peajes[peajes.index(tarifa)]elif tarifa in codes_peajes:self._tipo_peaje = peajes[codes_peajes.index(tarifa)]else:print(''.format(tarifa, ''.join(peajes), ''.join(codes_peajes)))self._calcula_factura()", "docstring": "Establece el tipo de tarifa asociada a la factura el\u00e9ctrica.\n :param tarifa: int 1|2|3 \u00f3 str GEN|NOC|VHC \u00f3 str 2.0A|2.0DHA|2.0DHS", "id": "f14218:c0:m16"} {"signature": "@propertydef consumo_total(self):", "body": "if self._consumo is not None:return self._round(self._consumo.sum())print('')return ", "docstring": "Devuelve el consumo total del periodo facturado en kWh.\n :return consumo_kWh\n :rtype float", "id": "f14218:c0:m17"} {"signature": "@consumo_total.setterdef consumo_total(self, nuevo_consumo):", "body": "self._consumo = nuevo_consumoself._calcula_factura()", "docstring": "Establece el consumo energ\u00e9tico del periodo de facturaci\u00f3n en kWh.\n :param nuevo_consumo: Consumo en kWh, bien como float, bien como lista de 1, 2 o 3 elementos,\n bien como time series de datos horarios.", "id": "f14218:c0:m18"} {"signature": "@propertydef gasto_equipo_medida(self):", "body": "return self._termino_equipo_medida", "docstring": "Devuelve el gasto relativo al alquiler de equipos de medida antes de impuestos, en \u20ac.\n :return gasto_alquiler\n :rtype float", "id": "f14218:c0:m19"} {"signature": "@gasto_equipo_medida.setterdef gasto_equipo_medida(self, nuevo_gasto):", "body": "self.alquiler_euros = nuevo_gastoself._termino_equipo_medida = self._round(nuevo_gasto)self._calcula_iva_y_total()", "docstring": "Establece el gasto relativo al alquiler de equipos de medida antes de impuestos, en \u20ac, de forma absoluta.\n :param nuevo_gasto: Gasto en \u20ac", "id": "f14218:c0:m20"} {"signature": "@propertydef coste_termino_fijo(self):", "body": "return self._termino_fijo_total", "docstring": "Calcula el coste del t\u00e9rmino de potencia antes de impuestos, en \u20ac.\n :return gasto_potencia\n :rtype float", "id": "f14218:c0:m21"} {"signature": "@propertydef coste_termino_consumo(self):", "body": "return self._termino_variable_total", "docstring": "Calcula el coste asociado al consumo de energ\u00eda en el periodo facturado, antes de impuestos, en \u20ac.\n :return gasto_energia\n :rtype float", "id": "f14218:c0:m22"} {"signature": "@propertydef descuento_bono_social(self):", "body": "return self._descuento_bono_social", "docstring": "Calcula el importe del descuento por Bono Social, si lo hay, antes de impuestos, en \u20ac.\n :return descuento\n :rtype float", "id": "f14218:c0:m23"} {"signature": "@propertydef impuesto_electrico_general(self):", "body": "return self._termino_impuesto_electrico", "docstring": "Calcula el importe del impuesto el\u00e9ctrico, antes de IVA o equivalente, en \u20ac.\n :return impuesto\n :rtype float", "id": "f14218:c0:m24"} {"signature": "@propertydef coste_iva(self):", "body": "return self._termino_iva_total", "docstring": "Calcula el importe del IVA o equivalente, en \u20ac.\n :return impuesto\n :rtype float", "id": "f14218:c0:m25"} {"signature": "@propertydef coste_total(self):", "body": "return self._total_factura", "docstring": "Calcula el importe total de la factura el\u00e9ctrica, en \u20ac.\n :return coste\n :rtype float", "id": "f14218:c0:m26"} {"signature": "def generacion_csv_oficial_consumo_horario(self, save_pathdir=None):", "body": "df_csv = pd.DataFrame(self._consumo_horario)columns = ''.split('')date_fmt = ''time_fmt = ''metodo_obt = ''df_csv[columns[]] = self._cupsdf_csv[columns[]] = [date_fmt.format(x) for x in df_csv.index]df_csv[columns[]] = [int(time_fmt.format(x)) + for x in df_csv.index]df_csv[columns[]] = df_csv[self._consumo_horario.name].round()df_csv[columns[]] = metodo_obtdf_csv.drop(self._consumo_horario.name, axis=, inplace=True)if save_pathdir is not None: params_csv = dict(index=False, sep='', decimal='', float_format='')path_csv = os.path.join(os.path.expanduser(save_pathdir),''.format(pd.Timestamp(self._t0), self._tf))df_csv.to_csv(path_csv, **params_csv)return df_csv", "docstring": "Genera o graba el consumo horario en CSV, con el formato utilizado por las distribuidoras (oficial de facto),\npara su importaci\u00f3n en otras herramientas, como la existente en: https://facturaluz2.cnmc.es/facturaluz2.html\n\nEl formato es el siguiente:\n```\n CUPS;Fecha;Hora;Consumo_kWh;Metodo_obtencion\n ES00XXXXXXXXXXXXXXDB;06/09/2015;1;0,267;R\n ES00XXXXXXXXXXXXXXDB;06/09/2015;2;0,143;R\n ES00XXXXXXXXXXXXXXDB;06/09/2015;3;0,118;R\n ...\n```\n:param save_pathdir: (OPC) path_str de destino para grabar el CSV.\n:return: dataframe de consumo con 'formato oficial'\n:rtype: pd.dataframe", "id": "f14218:c0:m27"} {"signature": "def reparto_coste(self, detallado=False):", "body": "cod_tarifa = DATOS_TIPO_PEAJE[self._tipo_peaje][]_, impuesto_gen, impuesto_medida = DATOS_ZONAS_IMPUESTOS[self._zona_impuestos]coste_horario = pd.DataFrame(self._consumo_horario)for (ndias, ndias_a\u00f1o, a\u00f1o), (coste, coef_p) in zip(self._periodos_fact, self._termino_fijo):b_period_f = coste_horario.index.year == a\u00f1ocoste_horario.loc[b_period_f, ''] = coef_p * self._potencia_contratada / ( * ndias_a\u00f1o)coefs_ener = TERM_ENER_PEAJE_ACCESO_EUR_KWH_TEA[a\u00f1o][self._tipo_peaje]tcu = self._pvpc_horario[''.format(cod_tarifa)].loc[b_period_f]hay_discr, cons_discr = self._asigna_periodos_discr_horaria(coste_horario.loc[b_period_f, COL_CONSUMO])if hay_discr:for c, coef in zip(cons_discr.columns[:], coefs_ener):sub_pd = cons_discr[cons_discr[c]]coste_horario.loc[sub_pd.index, ''] = coste_horario.loc[sub_pd.index, COL_CONSUMO] * coefelse:coste_horario.loc[b_period_f, ''] = coste_horario.loc[b_period_f, COL_CONSUMO] * coefs_ener[]coste_horario.loc[b_period_f, ''] += tcu * coste_horario.loc[b_period_f, COL_CONSUMO]cols_suma = ['', '']if self._con_bono_social:coste_horario[''] = coste_horario[cols_suma].sum(axis=) * -else:coste_horario[''] = cols_suma += ['']coste_horario[''] = coste_horario[cols_suma].sum(axis=) * self._impuesto_electrico_generalcoste_horario[''] = self.gasto_equipo_medida / len(self._consumo_horario)cols_suma += ['']coste_horario[''] = coste_horario[cols_suma].sum(axis=) * impuesto_gencoste_horario[''] += coste_horario[''] * impuesto_medidacols_suma += ['', '']if detallado:return coste_horarioelse:return coste_horario[cols_suma].sum(axis=).rename('')", "docstring": "Devuelve un pd.DataFrame o pd.Series con el coste facturado repartido por horas, conforme al consumo y precio\n de cada hora.\n Dado que el suministrador aplica convenientes redondeos por cada t\u00e9rmino facturado, se observar\u00e1 que la suma de\n la serie de coste no coincide necesariamente con el importe total de la factura, admitiendo cierto error m\u00ednimo.\n La mejor aproximaci\u00f3n se consigue r\u00e1pidamente obteniendo el df de costes y haciendo la suma:\n `df_coste.drop(COL_CONSUMO, axis=1).sum(axis=0).round(2).sum()`\n Aunque \u00e9sta tampoco es exactamente igual al proceso seguido por el c\u00e1lculo de la factura.\n\n :param detallado: Si `True`, devuelve el pd.Dataframe con coste repartido en por columnas,\n coste total como pd.Series por defecto.\n :return: coste_horario", "id": "f14218:c0:m28"} {"signature": "def plot_consumo_diario(self, ax=None):", "body": "p_params = dict(figsize=(, )) if ax is None else dict(ax=ax)consumo_diario = self._consumo_horario.groupby(pd.TimeGrouper('')).sum()ax = consumo_diario.plot(color='', lw=, **p_params)params_lines = dict(lw=, linestyle='', alpha=)xlim = consumo_diario[], consumo_diario.index[-]ax.hlines([consumo_diario.mean()], *xlim, color='', **params_lines)ax.hlines([consumo_diario.max()], *xlim, color='', **params_lines)ax.hlines([consumo_diario.min()], *xlim, color='', **params_lines)ax.set_title(''.format(self.consumo_total))ax.set_ylabel('')ax.set_xlabel('')ax.set_ylim((, consumo_diario.max() * ))ax.grid('', axis='')return ax", "docstring": "Gr\u00e1fica del consumo diario en el intervalo.\n :param ax: optional matplotlib axes\n :return: matplotlib axes", "id": "f14218:c0:m29"} {"signature": "def plot_patron_semanal_consumo(self, ax=None):", "body": "consumo_diario = self._consumo_horario.groupby(pd.TimeGrouper('')).sum()media_semanal = consumo_diario.groupby(lambda x: x.weekday).mean().round()d\u00edas_semana = ['', '', '', '', '', '', '']media_semanal.columns = d\u00edas_semanap_params = dict(figsize=(, )) if ax is None else dict(ax=ax)ax = media_semanal.T.plot(kind='', color='', legend=False, **p_params)ax.set_xticklabels(d\u00edas_semana, rotation=)ax.set_title('')ax.set_ylabel('')ax.grid('', axis='')ax.hlines([consumo_diario.mean()], -, , lw=, color='', linestyle='')return ax", "docstring": "Gr\u00e1fica de consumo medio por d\u00eda de la semana (patr\u00f3n semanal de consumo).\n :param ax: optional matplotlib axes\n :return: matplotlib axes", "id": "f14218:c0:m30"} {"signature": "def main_cli():", "body": "def _get_parser_args():p = argparse.ArgumentParser(description='')p.add_argument('', '', action='', help='')p.add_argument('', '', action='', nargs='',help=\"\"\"\"\"\")p.add_argument('', '', '', action='',help=\"\")p.add_argument('', '', '', action='',help=\"\")p.add_argument('', '', action='', help=\"\")p.add_argument('', '', action='', help='')arguments = p.parse_args()return arguments, pdef _parse_date(string, columns):try:ts = pd.Timestamp(string)print_cyan(''.format(string, ts, ts.date()))columns.remove(string)return ts.date().isoformat()except ValueError:passargs, parser = _get_parser_args()print_secc('')if args.dem:db_web = DatosREE(update=args.update, force_update=args.forceupdate, verbose=args.verbose)else:db_web = PVPC(update=args.update, force_update=args.forceupdate, verbose=args.verbose)data = db_web.data['']if args.info is not None:if len(args.info) > :cols = args.info.copy()dates = [d for d in [_parse_date(s, cols) for s in args.info] if d]if len(dates) == :data = data.loc[dates[]:dates[]]elif len(dates) == :data = data.loc[dates[]]if len(cols) > :try:data = data[[c.upper() for c in cols]]except KeyError as e:print_red(''.format(e, data.columns))print_info(data)else:print_secc('')print_info(data.iloc[-:])print_cyan(data.columns)if args.plot:if args.dem:from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_horaprint_red('')else:from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_horaif len(data) < :pvpcplot_grid_hora(data)else:print_red(''.format(len(data), data.index[], data.index[-]))pvpcplot_grid_hora(db_web.data[''].iloc[-:])pvpcplot_tarifas_hora(db_web.data[''].iloc[-:])", "docstring": "Actualiza la base de datos de PVPC/DEMANDA almacenados como dataframe en local,\ncreando una nueva si no existe o hubiere alg\u00fan problema. Los datos registrados se guardan en HDF5", "id": "f14219:m0"} {"signature": "def dem_url_dia(dt_day=''):", "body": "def _url_tipo_dato(str_dia, k):url = SERVER + ''.format(D_TIPOS_REQ_DEM[k])if type(str_dia) is str:return url + '' + str_diaelse:return url + '' + str_dia.date().isoformat()urls = [_url_tipo_dato(dt_day, k) for k in D_TIPOS_REQ_DEM.keys()]return urls", "docstring": "Obtiene las urls de descarga de los datos de demanda energ\u00e9tica de un d\u00eda concreto.", "id": "f14220:m0"} {"signature": "def dem_procesa_datos_dia(key_day, response):", "body": "dfs_import, df_import, dfs_maxmin, hay_errores = [], None, [], for r in response:tipo_datos, data = _extract_func_json_data(r)if tipo_datos is not None:if ('' in tipo_datos) and data:df_import = _import_daily_max_min(data)dfs_maxmin.append(df_import)elif data:df_import = _import_json_ts_data(data)dfs_import.append(df_import)if tipo_datos is None or df_import is None:hay_errores += if hay_errores == :print_redb(''.format(key_day))return None, -else: data_import = {}if dfs_import:data_import[KEYS_DATA_DEM[]] = dfs_import[].join(dfs_import[])if len(dfs_maxmin) == :data_import[KEYS_DATA_DEM[]] = dfs_maxmin[].join(dfs_maxmin[])elif dfs_maxmin:data_import[KEYS_DATA_DEM[]] = dfs_maxmin[]if not data_import:print_err(''.format(key_day, hay_errores))return None, -return data_import, ", "docstring": "Procesa los datos descargados en JSON.", "id": "f14220:m4"} {"signature": "def dem_data_dia(str_dia='', str_dia_fin=None):", "body": "params = {'': DATE_FMT, '': False, '': , \"\": ,'': dem_procesa_datos_dia, '': dem_url_dia,'': {'': False, '': HEADERS}}if str_dia_fin is not None:params[''] = Truedata, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia_fin, **params)else:data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia, **params)if not hay_errores:return dataelse:print_err(str_import)return None", "docstring": "Obtiene datos de demanda energ\u00e9tica en un d\u00eda concreto o un intervalo, accediendo directamente a la web.", "id": "f14220:m5"} {"signature": "def url_data_dia(self, key_dia):", "body": "return pvpc_url_dia(key_dia)", "docstring": "Devuelve la url de descarga de datos para `key_dia`.", "id": "f14221:c0:m1"} {"signature": "def procesa_data_dia(self, key_dia, datos_para_procesar):", "body": "return pvpc_procesa_datos_dia(key_dia, datos_para_procesar, verbose=self.verbose)", "docstring": "Procesa los datos descargados correspondientes a un d\u00eda `key_dia`.", "id": "f14221:c0:m2"} {"signature": "def get_resample_data(self):", "body": "if self.data is not None:if self._pvpc_mean_daily is None:self._pvpc_mean_daily = self.data[''].resample('').mean()if self._pvpc_mean_monthly is None:self._pvpc_mean_monthly = self.data[''].resample('').mean()return self._pvpc_mean_daily, self._pvpc_mean_monthly", "docstring": "Obtiene los dataframes de los datos de PVPC con resampling diario y mensual.", "id": "f14221:c0:m3"} {"signature": "@propertydef tarifas(self):", "body": "return ['', '', '']", "docstring": "Devuelve los c\u00f3digos de las tarifas monof\u00e1sicas de < 10 kW.", "id": "f14221:c0:m4"} {"signature": "@propertydef colores_tarifas(self):", "body": "return {'': '', '': '', '': ''}", "docstring": "Devuelve un dict con los colores (hex values) por tarifa.", "id": "f14221:c0:m5"} {"signature": "def url_data_dia(self, key_dia):", "body": "return dem_url_dia(key_dia)", "docstring": "Devuelve la url de descarga de datos para `key_dia`.", "id": "f14221:c1:m1"} {"signature": "def procesa_data_dia(self, str_dia, datos_para_procesar):", "body": "return dem_procesa_datos_dia(str_dia, datos_para_procesar)", "docstring": "Procesa los datos descargados correspondientes a un d\u00eda `key_dia`.", "id": "f14221:c1:m2"} {"signature": "def post_update_data(self):", "body": "if self.data is not None:self.data[''] = self.busca_errores_data()", "docstring": "Definici\u00f3n opcional para analizar la informaci\u00f3n descargada en busca de errores,\nque quedan almacenados en `self.data['errores']`.", "id": "f14221:c1:m3"} {"signature": "def last_entry(self, data_revisar=None, key_revisar=None):", "body": "if data_revisar is None and key_revisar is None:data_revisar = self.data[self.masterkey][pd.notnull(self.data[self.masterkey][''])]super(DatosREE, self).printif('', '')super(DatosREE, self).printif(data_revisar.tail(), '')return super(DatosREE, self).last_entry(data_revisar, '')else:return super(DatosREE, self).last_entry(data_revisar, key_revisar)", "docstring": "Definici\u00f3n espec\u00edfica para filtrar por datos de demanda energ\u00e9tica (pues los datos se extienden m\u00e1s all\u00e1 del\ntiempo presente debido a las columnas de potencia prevista y programada.\n\n:param data_revisar: (OPC) Se puede pasar un dataframe espec\u00edfico\n:param key_revisar: (OPC) Normalmente, para utilizar 'dem'\n:return: tmax, num_entradas", "id": "f14221:c1:m4"} {"signature": "def integridad_data(self, data_integr=None, key=None):", "body": "if data_integr is None and key is None and all(k in self.data.keys() for k in KEYS_DATA_DEM):assert(self.data[KEYS_DATA_DEM[]].index.freq == FREQ_DAT_DEMand self.data[KEYS_DATA_DEM[]].index.tz == self.TZ)if self.data[KEYS_DATA_DEM[]] is not None:assert(self.data[KEYS_DATA_DEM[]].index.freq == '')super(DatosREE, self).integridad_data(data_integr, key)", "docstring": "Definici\u00f3n espec\u00edfica para comprobar timezone y frecuencia de los datos, adem\u00e1s de comprobar\nque el index de cada dataframe de la base de datos sea de fechas, \u00fanico (sin duplicados) y creciente\n:param data_integr:\n:param key:", "id": "f14221:c1:m5"} {"signature": "def busca_errores_data(self):", "body": "data_busqueda = self.append_delta_index(TS_DATA_DEM, data_delta=self.data[self.masterkey].copy())idx_desconex = (((data_busqueda.index < '') & (data_busqueda.index >= self.DATE_INI)) &((data_busqueda.delta_T > ) | data_busqueda[''].isnull() |data_busqueda[''].isnull() | data_busqueda[''].isnull()))sosp = data_busqueda[idx_desconex].copy()assert len(sosp) == return pd.DataFrame()", "docstring": "Busca errores o inconsistencias en los datos adquiridos\n:return: Dataframe de errores encontrados", "id": "f14221:c1:m6"} {"signature": "def pvpc_url_dia(dt_day):", "body": "if type(dt_day) is str:return SERVER + '' + '' + dt_dayelse:return SERVER + '' + '' + dt_day.date().isoformat()", "docstring": "Obtiene la url de descarga de los datos de PVPC de un d\u00eda concreto.\n\n Anteriormente era: 'http://www.esios.ree.es/Solicitar?fileName=pvpcdesglosehorario_' + str_dia\n + '&fileType=xml&idioma=es', pero ahora es en JSON y requiere token_auth en headers.", "id": "f14223:m0"} {"signature": "def pvpc_calc_tcu_cp_feu_d(df, verbose=True, convert_kwh=True):", "body": "if '' + TARIFAS[] not in df.columns:if convert_kwh:cols_mwh = [c + t for c in COLS_PVPC for t in TARIFAS if c != '']df[cols_mwh] = df[cols_mwh].applymap(lambda x: x / )gb_t = df.groupby(lambda x: TARIFAS[np.argmax([t in x for t in TARIFAS])], axis=)for k, g in gb_t:if verbose:print(''.format(k))print(g.head())df[''.format(k)] = g[k] - g[''.format(k)]cols_cp = [c + k for c in COLS_PVPC if c not in ['', '', '']]df[''.format(k)] = g[cols_cp].sum(axis=)cols_k = ['' + k, '' + k, '' + k]g = df[cols_k].groupby('' + k)pr = g.apply(lambda x: x['' + k].dot(x['' + k]) / x['' + k].sum())pr.name = '' + kdf = df.join(pr, on='' + k, rsuffix='')df['' + k] += df['' + k]return df", "docstring": "Procesa TCU, CP, FEU diario.\n\n :param df:\n :param verbose:\n :param convert_kwh:\n :return:", "id": "f14223:m1"} {"signature": "def pvpc_procesa_datos_dia(_, response, verbose=True):", "body": "try:d_data = response['']df = _process_json_pvpc_hourly_data(pd.DataFrame(d_data))return df, except Exception as e:if verbose:print(''.format(e))return None, -", "docstring": "Procesa la informaci\u00f3n JSON descargada y forma el dataframe de los datos de un d\u00eda.", "id": "f14223:m3"} {"signature": "def pvpc_data_dia(str_dia, str_dia_fin=None):", "body": "params = {'': DATE_FMT, '': False,'': pvpc_procesa_datos_dia, '': pvpc_url_dia,'': {'': True, '': HEADERS}}if str_dia_fin is not None:params[''] = Truedata, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia_fin, **params)else:data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia, **params)if not hay_errores:return dataelse:return str_import", "docstring": "Obtiene datos de PVPC en un d\u00eda concreto o un intervalo, accediendo directamente a la web.", "id": "f14223:m4"} {"signature": "def close(self):", "body": "self.env = None", "docstring": "Close environment. No other method calls possible afterwards.", "id": "f14224:c0:m2"} {"signature": "def seed(self, seed): ", "body": "if seed is None:self.env.seed = round(time.time())else:self.env.seed = seedreturn self.env.seed", "docstring": "Sets the random seed of the environment to the given value (current time, if seed=None).\nNaturally deterministic Environments (e.g. ALE or some gym Envs) don't have to implement this method.\n\nArgs:\n seed (int): The seed to use for initializing the pseudo-random number generator (default=epoch time in sec).\nReturns: The actual seed (int) used OR None if Environment did not override this method (no seeding supported).", "id": "f14224:c0:m3"} {"signature": "def reset(self):", "body": "self.env.reset_game()return self.env.getScreenRGB()", "docstring": "Reset environment and setup for new episode.\n\nReturns:\n initial state of reset environment.", "id": "f14224:c0:m4"} {"signature": "def execute(self, action):", "body": "if self.env.game_over():return self.env.getScreenRGB(), True, action_space = self.env.getActionSet()reward = self.env.act(action_space[action])new_state = self.env.getScreenRGB()done = self.env.game_over()return new_state, done, reward", "docstring": "Executes action, observes next state and reward.\n\nArgs:\n actions: Action to execute.\n\nReturns:\n (Dict of) next state(s), boolean indicating terminal, and reward signal.", "id": "f14224:c0:m5"} {"signature": "@propertydef states(self):", "body": "screen = self.env.getScreenRGB()return dict(shape=screen.shape, type='')", "docstring": "Return the state space. Might include subdicts if multiple states are\navailable simultaneously.\n\nReturns: dict of state properties (shape and type).", "id": "f14224:c0:m6"} {"signature": "@propertydef actions(self):", "body": "return dict(num_actions=len(self.env.getActionSet()), type='')", "docstring": "Return the action space. Might include subdicts if multiple actions are\navailable simultaneously.\n\nReturns: dict of action properties (continuous, number of actions)", "id": "f14224:c0:m7"} {"signature": "def __init__(self, mode_id=, visible=True):", "body": "self.mode_id = int(mode_id)self.engine = mx.MazeExplorer(mode_id, visible)", "docstring": "Initialize MazeExplorer.\n\nArgs:\n mode_id: Game mode ID. See https://github.com/mryellow/maze_explorer\n visible: Show output window", "id": "f14225:c0:m0"} {"signature": "def __init__(self, state=None, initial_score=):", "body": "self._score = initial_scoreif state is None:self._state = np.zeros((, ), dtype=np.int)self.add_random_tile()self.add_random_tile()else:self._state = state", "docstring": "Init the Game object.\n Args:\n state: Shape (4, 4) numpy array to initialize the state with. If None,\n the state will be initialized with with two random tiles (as done\n in the original game).\n initial_score: Score to initialize the Game with.", "id": "f14226:c0:m6"} {"signature": "def copy(self):", "body": "return Game2048(np.copy(self._state), self._score)", "docstring": "Return a copy of self.", "id": "f14226:c0:m7"} {"signature": "def game_over(self):", "body": "for action in range():if self.is_action_available(action):return Falsereturn True", "docstring": "Whether the game is over.", "id": "f14226:c0:m8"} {"signature": "def available_actions(self):", "body": "return [action for action in range() if self.is_action_available(action)]", "docstring": "Computes the set of actions that are available.", "id": "f14226:c0:m9"} {"signature": "def is_action_available(self, action):", "body": "temp_state = np.rot90(self._state, action)return self._is_action_available_left(temp_state)", "docstring": "Determines whether action is available.\n That is, executing it would change the state.", "id": "f14226:c0:m10"} {"signature": "def _is_action_available_left(self, state):", "body": "for row in range():has_empty = Falsefor col in range():has_empty |= state[row, col] == if state[row, col] != and has_empty:return Trueif (state[row, col] != and col > andstate[row, col] == state[row, col - ]):return Truereturn False", "docstring": "Determines whether action 'Left' is available.", "id": "f14226:c0:m11"} {"signature": "def do_action(self, action):", "body": "temp_state = np.rot90(self._state, action)reward = self._do_action_left(temp_state)self._state = np.rot90(temp_state, -action)self._score += rewardself.add_random_tile()return reward", "docstring": "Execute action, add a new tile, update the score & return the reward.", "id": "f14226:c0:m12"} {"signature": "def _do_action_left(self, state):", "body": "reward = for row in range():merge_candidate = -merged = np.zeros((,), dtype=np.bool)for col in range():if state[row, col] == :continueif (merge_candidate != - andnot merged[merge_candidate] andstate[row, merge_candidate] == state[row, col]):state[row, col] = merged[merge_candidate] = Truestate[row, merge_candidate] += reward += ** state[row, merge_candidate]else:merge_candidate += if col != merge_candidate:state[row, merge_candidate] = state[row, col]state[row, col] = return reward", "docstring": "Executes action 'Left'.", "id": "f14226:c0:m13"} {"signature": "def add_random_tile(self):", "body": "x_pos, y_pos = np.where(self._state == )assert len(x_pos) != empty_index = np.random.choice(len(x_pos))value = np.random.choice([, ], p=[, ])self._state[x_pos[empty_index], y_pos[empty_index]] = value", "docstring": "Adds a random tile to the grid. Assumes that it has empty fields.", "id": "f14226:c0:m14"} {"signature": "def print_state(self):", "body": "def tile_string(value):\"\"\"\"\"\"if value > :return '' % ( ** value,)return \"\"separator_line = '' * print(separator_line)for row in range():print(\"\" + \"\".join([tile_string(v) for v in self._state[row, :]]) + \"\")print(separator_line)", "docstring": "Prints the current state.", "id": "f14226:c0:m15"} {"signature": "def state(self):", "body": "return self._state", "docstring": "Return current state.", "id": "f14226:c0:m16"} {"signature": "def score(self):", "body": "return self._score", "docstring": "Return current score.", "id": "f14226:c0:m17"} {"signature": "def __init__(self, host=\"\", port=):", "body": "Environment.__init__(self)self.port = int(port) or self.host = host or \"\"self.socket = Noneself.buffer_size = self.last_observation = None", "docstring": "A remote Environment that one can connect to through tcp.\nImplements a simple msgpack protocol to get the step/reset/etc.. commands to the\nremote server and simply waits (blocks) for a response.\n\nArgs:\n host (str): The hostname to connect to.\n port (int): The port to connect to.", "id": "f14227:c0:m0"} {"signature": "def close(self):", "body": "self.disconnect()", "docstring": "Same as disconnect method.", "id": "f14227:c0:m2"} {"signature": "def connect(self, timeout=):", "body": "if self.socket:raise TensorForceError(\"\" +\"\".format(self.host, self.port))self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)if timeout < or timeout is None:timeout = err = start_time = time.time()while time.time() - start_time < timeout:self.socket.settimeout()err = self.socket.connect_ex((self.host, self.port))if err == :breaktime.sleep()if err != :raise TensorForceError(\"\".format(self.host, self.port, err, errno.errorcode[err], os.strerror(err)))", "docstring": "Starts the server tcp connection on the given host:port.\n\nArgs:\n timeout (int): The time (in seconds) for which we will attempt a connection to the remote\n (every 5sec). After that (or if timeout is None or 0), an error is raised.", "id": "f14227:c0:m3"} {"signature": "def disconnect(self):", "body": "if not self.socket:logging.warning(\"\")returnself.socket.close()self.socket = None", "docstring": "Ends our server tcp connection.", "id": "f14227:c0:m4"} {"signature": "def __init__(self, max_msg_len=):", "body": "self.max_msg_len = max_msg_lenmnp.patch()", "docstring": "Args:\n max_msg_len (int): The maximum number of bytes to read from the socket.", "id": "f14227:c1:m0"} {"signature": "def send(self, message, socket_):", "body": "if not socket_:raise TensorForceError(\"\")elif not isinstance(message, dict):raise TensorForceError(\"\")message = msgpack.packb(message)len_ = len(message)socket_.send(bytes(\"\".format(len_), encoding=\"\") + message)", "docstring": "Sends a message (dict) to the socket. Message consists of a 8-byte len header followed by a msgpack-numpy\n encoded dict.\n\nArgs:\n message: The message dict (e.g. {\"cmd\": \"reset\"})\n socket_: The python socket object to use.", "id": "f14227:c1:m1"} {"signature": "def recv(self, socket_, encoding=None):", "body": "unpacker = msgpack.Unpacker(encoding=encoding)response = socket_.recv() if response == b\"\":raise TensorForceError(\"\" +\"\")orig_len = int(response)received_len = while True:data = socket_.recv(min(orig_len - received_len, self.max_msg_len))if not data:raise TensorForceError(\"\".format(orig_len - received_len))data_len = len(data)received_len += data_lenunpacker.feed(data)if received_len == orig_len:breakfor message in unpacker:sts = message.get(\"\", message.get(b\"\"))if sts:if sts == \"\" or sts == b\"\":return messageelse:raise TensorForceError(\"\".format(message.get(\"\", \"\")))else:raise TensorForceError(\"\")raise TensorForceError(\"\".format(orig_len))", "docstring": "Receives a message as msgpack-numpy encoded byte-string from the given socket object.\nBlocks until something was received.\n\nArgs:\n socket_: The python socket object to use.\n encoding (str): The encoding to use for unpacking messages from the socket.\nReturns: The decoded (as dict) message received.", "id": "f14227:c1:m2"} {"signature": "def __init__(self,host=\"\",port=,connect=True,discretize_actions=False,delta_time=/,num_ticks=):", "body": "RemoteEnvironment.__init__(self, host, port)self.game_name = Noneself.action_space_desc = Noneself.observation_space_desc = Noneself.discretize_actions = discretize_actionsself.discretized_actions = Noneself.delta_time = delta_timeself.num_ticks = num_ticksself.protocol = MsgPackNumpyProtocol()if connect:self.connect()", "docstring": "Args:\n host (str): The hostname to connect to.\n port (int): The port to connect to.\n connect (bool): Whether to connect already in this c'tor.\n discretize_actions (bool): Whether to treat axis-mappings defined in UE4 game as discrete actions.\n This would be necessary e.g. for agents that use q-networks where the output are q-values per discrete\n state-action pair.\n delta_time (float): The fake delta time to use for each single game tick.\n num_ticks (int): The number of ticks to be executed in a single act call (each tick will\n repeat the same given actions).", "id": "f14228:c0:m0"} {"signature": "def reset(self):", "body": "self.protocol.send({\"\": \"\"}, self.socket)response = self.protocol.recv(self.socket)return self.extract_observation(response)", "docstring": "same as step (no kwargs to pass), but needs to block and return observation_dict\n- stores the received observation in self.last_observation", "id": "f14228:c0:m4"} {"signature": "def execute(self, action):", "body": "action_mappings, axis_mappings = [], []if self.discretize_actions:combination = self.discretized_actions[action]for key, value in combination:if isinstance(value, bool):action_mappings.append((key, value))else:axis_mappings.append((key, value))elif action:try:action_mappings, axis_mappings = self.translate_abstract_actions_to_keys(action)except KeyError as e:raise TensorForceError(\"\".format(e))message = dict(cmd=\"\",delta_time=self.delta_time,num_ticks=self.num_ticks,actions=action_mappings,axes=axis_mappings)self.protocol.send(message, self.socket)response = self.protocol.recv(self.socket)r = response.pop(b\"\", )is_terminal = response.pop(b\"\", False)obs = self.extract_observation(response)self.last_observation = obsreturn obs, is_terminal, r", "docstring": "Executes a single step in the UE4 game. This step may be comprised of one or more actual game ticks for all of\nwhich the same given\naction- and axis-inputs (or action number in case of discretized actions) are repeated.\nUE4 distinguishes between action-mappings, which are boolean actions (e.g. jump or dont-jump) and axis-mappings,\nwhich are continuous actions\nlike MoveForward with values between -1.0 (run backwards) and 1.0 (run forwards), 0.0 would mean: stop.", "id": "f14228:c0:m6"} {"signature": "def translate_abstract_actions_to_keys(self, abstract):", "body": "if len(abstract) >= and not isinstance(abstract[], (list, tuple)):abstract = list((abstract,))actions, axes = [], []for a in abstract:first_key = self.action_space_desc[a[]][\"\"][]if isinstance(first_key, (bytes, str)):actions.append((first_key, a[]))elif isinstance(first_key, tuple):axes.append((first_key[], a[] * first_key[]))else:raise TensorForceError(\"\".format(a[]))return actions, axes", "docstring": "Translates a list of tuples ([pretty mapping], [value]) to a list of tuples ([some key], [translated value])\neach single item in abstract will undergo the following translation:\n\nExample1:\nwe want: \"MoveRight\": 5.0\npossible keys for the action are: (\"Right\", 1.0), (\"Left\", -1.0)\nresult: \"Right\": 5.0 * 1.0 = 5.0\n\nExample2:\nwe want: \"MoveRight\": -0.5\npossible keys for the action are: (\"Left\", -1.0), (\"Right\", 1.0)\nresult: \"Left\": -0.5 * -1.0 = 0.5 (same as \"Right\": -0.5)", "id": "f14228:c0:m9"} {"signature": "def discretize_action_space_desc(self):", "body": "unique_list = []for nice, record in self.action_space_desc.items():list_for_record = []if record[\"\"] == \"\":head_key = record[\"\"][][]head_value = record[\"\"][][]list_for_record.append((head_key, ))set_ = set()for key_and_scale in self.action_space_desc[nice][\"\"]:if key_and_scale[] not in set_:list_for_record.append((head_key, key_and_scale[] / head_value))set_.add(key_and_scale[])else:list_for_record = [(record[\"\"][], False), (record[\"\"][], True)]unique_list.append(list_for_record)def so(in_):st = \"\"for i in in_:st += str(i[])return stcombinations = list(itertools.product(*unique_list))combinations = list(map(lambda x: sorted(list(x), key=lambda y: y[]), combinations))combinations = sorted(combinations, key=so)self.discretized_actions = combinations", "docstring": "Creates a list of discrete action(-combinations) in case we want to learn with a discrete set of actions,\nbut only have action-combinations (maybe even continuous) available from the env.\nE.g. the UE4 game has the following action/axis-mappings:\n\n```javascript\n{\n'Fire':\n {'type': 'action', 'keys': ('SpaceBar',)},\n'MoveRight':\n {'type': 'axis', 'keys': (('Right', 1.0), ('Left', -1.0), ('A', -1.0), ('D', 1.0))},\n}\n```\n\n-> this method will discretize them into the following 6 discrete actions:\n\n```javascript\n[\n[(Right, 0.0),(SpaceBar, False)],\n[(Right, 0.0),(SpaceBar, True)]\n[(Right, -1.0),(SpaceBar, False)],\n[(Right, -1.0),(SpaceBar, True)],\n[(Right, 1.0),(SpaceBar, False)],\n[(Right, 1.0),(SpaceBar, True)],\n]\n```", "id": "f14228:c0:m10"} {"signature": "def sanity_check_states(states_spec):", "body": "states = copy.deepcopy(states_spec)is_unique = ('' in states)if is_unique:states = dict(state=states)for name, state in states.items():if isinstance(state[''], int):state[''] = (state[''],)if '' not in state:state[''] = ''return states, is_unique", "docstring": "Sanity checks a states dict, used to define the state space for an MDP.\nThrows an error or warns if mismatches are found.\n\nArgs:\n states_spec (Union[None,dict]): The spec-dict to check (or None).\n\nReturns: Tuple of 1) the state space desc and 2) whether there is only one component in the state space.", "id": "f14229:m0"} {"signature": "def sanity_check_actions(actions_spec):", "body": "actions = copy.deepcopy(actions_spec)is_unique = ('' in actions)if is_unique:actions = dict(action=actions)for name, action in actions.items():if '' not in action:action[''] = ''if action[''] == '':if '' not in action:raise TensorForceError(\"\")elif action[''] == '':if ('' in action) != ('' in action):raise TensorForceError(\"\")if '' not in action:action[''] = ()if isinstance(action[''], int):action[''] = (action[''],)return actions, is_unique", "docstring": "Sanity checks an actions dict, used to define the action space for an MDP.\nThrows an error or warns if mismatches are found.\n\nArgs:\n actions_spec (Union[None,dict]): The spec-dict to check (or None).\n\nReturns: Tuple of 1) the action space desc and 2) whether there is only one component in the action space.", "id": "f14229:m1"} {"signature": "def sanity_check_execution_spec(execution_spec):", "body": "def_ = dict(type=\"\",distributed_spec=None,session_config=None)if execution_spec is None:return def_assert isinstance(execution_spec, dict), \"\".format(type(execution_spec).__name__)type_ = execution_spec.get(\"\")if type_ == \"\":def_ = dict(job=\"\", task_index=, cluster_spec={\"\": [\"\"],\"\": [\"\"]})def_.update(execution_spec.get(\"\", {}))execution_spec[\"\"] = def_execution_spec[\"\"] = execution_spec.get(\"\")return execution_specelif type_ == \"\":return execution_specelif type_ == \"\":return execution_specif execution_spec.get('') != None:assert type(execution_spec['']) is int, \"\".format(type(execution_spec['']).__name__)assert execution_spec[''] > , \"\".format(execution_spec[''])return execution_specraise TensorForceError(\"\".format(type_))", "docstring": "Sanity checks a execution_spec dict, used to define execution logic (distributed vs single, shared memories, etc..)\nand distributed learning behavior of agents/models.\nThrows an error or warns if mismatches are found.\n\nArgs:\n execution_spec (Union[None,dict]): The spec-dict to check (or None). Dict needs to have the following keys:\n - type: \"single\", \"distributed\"\n - distributed_spec: The distributed_spec dict with the following fields:\n - cluster_spec: TensorFlow ClusterSpec object (required).\n - job: The tf-job name.\n - task_index: integer (required).\n - protocol: communication protocol (default: none, i.e. 'grpc').\n - session_config: dict with options for a TensorFlow ConfigProto object (default: None).\n\nReturns: A cleaned-up (in-place) version of the given execution-spec.", "id": "f14229:m2"} {"signature": "def close(self):", "body": "self.env = None", "docstring": "Close environment. No other method calls possible afterwards.", "id": "f14230:c0:m2"} {"signature": "def seed(self, seed):", "body": "return self.seed(seed)[]", "docstring": "Sets the random seed of the environment to the given value (current time, if seed=None).\nNaturally deterministic Environments (e.g. ALE or some gym Envs) don't have to implement this method.\n\nArgs:\n seed (int): The seed to use for initializing the pseudo-random number generator (default=epoch time in sec).\nReturns: The actual seed (int) used OR None if Environment did not override this method (no seeding supported).", "id": "f14230:c0:m3"} {"signature": "def reset(self):", "body": "return self.env.reset()", "docstring": "Reset environment and setup for new episode.\n\nReturns:\n initial state of reset environment.", "id": "f14230:c0:m4"} {"signature": "def execute(self, action):", "body": "next_state, rew, done, _ = self.env.step(action)return next_state, rew, done", "docstring": "Executes action, observes next state and reward.\n\nArgs:\n actions: Actions to execute.\n\nReturns:\n Tuple of (next state, bool indicating terminal, reward)", "id": "f14230:c0:m5"} {"signature": "def __init__(self,rom,frame_skip=,repeat_action_probability=,loss_of_life_termination=False,loss_of_life_reward=,display_screen=False,seed=np.random.RandomState()):", "body": "self.ale = ALEInterface()self.rom = romself.ale.setBool(b'', display_screen)self.ale.setInt(b'', seed.randint(, ))self.ale.setFloat(b'', repeat_action_probability)self.ale.setBool(b'', False)self.ale.setInt(b'', frame_skip)self.ale.loadROM(rom.encode())width, height = self.ale.getScreenDims()self.gamescreen = np.empty((height, width, ), dtype=np.uint8)self.frame_skip = frame_skipself.action_inds = self.ale.getMinimalActionSet()self.loss_of_life_reward = loss_of_life_rewardself.cur_lives = self.ale.lives()self.loss_of_life_termination = loss_of_life_terminationself.life_lost = False", "docstring": "Initialize ALE.\n\nArgs:\n rom: Rom filename and directory.\n frame_skip: Repeat action for n frames. Default 1.\n repeat_action_probability: Repeats last action with given probability. Default 0.\n loss_of_life_termination: Signals a terminal state on loss of life. Default False.\n loss_of_life_reward: Reward/Penalty on loss of life (negative values are a penalty). Default 0.\n display_screen: Displays the emulator screen. Default False.\n seed: Random seed", "id": "f14231:c0:m0"} {"signature": "def __init__(self, env_id):", "body": "self.env_id = env_idself.env = gym.make(env_id)", "docstring": "Initialize OpenAI universe environment.\n\nArgs:\n env_id: string with id/descriptor of the universe environment, e.g. 'HarvestDay-v0'.", "id": "f14232:c0:m0"} {"signature": "def _int_to_pos(self, flat_position):", "body": "return flat_position % self.env.action_space.screen_shape[],flat_position % self.env.action_space.screen_shape[]", "docstring": "Returns x, y from flat_position integer.\n\n Args:\n flat_position: flattened position integer\n\n Returns: x, y", "id": "f14232:c0:m6"} {"signature": "def _wait_state(self, state, reward, terminal):", "body": "while state == [None] or not state:state, terminal, reward = self._execute(dict(key=))return state, terminal, reward", "docstring": "Wait until there is a state.", "id": "f14232:c0:m11"} {"signature": "def __init__(self,level_id,repeat_action=,state_attribute='',settings={'': '', '': '', '': '', '': ''}):", "body": "self.level_id = level_idself.level = deepmind_lab.Lab(level=level_id, observations=[state_attribute], config=settings)self.repeat_action = repeat_actionself.state_attribute = state_attribute", "docstring": "Initialize DeepMind Lab environment.\n\nArgs:\n level_id: string with id/descriptor of the level, e.g. 'seekavoid_arena_01'.\n repeat_action: number of frames the environment is advanced, executing the given action during every frame.\n state_attribute: Attributes which represents the state for this environment, should adhere to the\n specification given in DeepMindLabEnvironment.state_spec(level_id).\n settings: dict specifying additional settings as key-value string pairs. The following options\n are recognized: 'width' (horizontal resolution of the observation frames), 'height'\n (vertical resolution of the observation frames), 'fps' (frames per second) and 'appendCommand'\n (commands for the internal Quake console).", "id": "f14233:c0:m0"} {"signature": "def close(self):", "body": "self.level.close()self.level = None", "docstring": "Closes the environment and releases the underlying Quake III Arena instance.\nNo other method calls possible afterwards.", "id": "f14233:c0:m2"} {"signature": "def reset(self):", "body": "self.level.reset() return self.level.observations()[self.state_attribute]", "docstring": "Resets the environment to its initialization state. This method needs to be called to start a\nnew episode after the last episode ended.\n\n:return: initial state", "id": "f14233:c0:m3"} {"signature": "def execute(self, action):", "body": "adjusted_action = list()for action_spec in self.level.action_spec():if action_spec[''] == - and action_spec[''] == :adjusted_action.append(action[action_spec['']] - )else:adjusted_action.append(action[action_spec['']]) action = np.array(adjusted_action, dtype=np.intc)reward = self.level.step(action=action, num_steps=self.repeat_action)state = self.level.observations()['']terminal = not self.level.is_running()return state, terminal, reward", "docstring": "Pass action to universe environment, return reward, next step, terminal state and\nadditional info.\n\n:param action: action to execute as numpy array, should have dtype np.intc and should adhere to\n the specification given in DeepMindLabEnvironment.action_spec(level_id)\n:return: dict containing the next state, the reward, and a boolean indicating if the\n next state is a terminal state", "id": "f14233:c0:m4"} {"signature": "@propertydef num_steps(self):", "body": "return self.level.num_steps()", "docstring": "Number of frames since the last reset() call.", "id": "f14233:c0:m7"} {"signature": "@propertydef fps(self):", "body": "return self.level.fps()", "docstring": "An advisory metric that correlates discrete environment steps (\"frames\") with real\n(wallclock) time: the number of frames per (real) second.", "id": "f14233:c0:m8"} {"signature": "def __init__(self, game, ui, visualize=False):", "body": "self.game = gameself.init_game = copy.deepcopy(self.game)self.ui = uiself.visualize = visualizefirst_obs, first_reward, _ = self.game.its_showtime()self._actions = DMPycolab.get_action_space(self.ui)self._states = DMPycolab.get_state_space(first_obs, self.ui._croppers)", "docstring": "Initialize Pycolab environment.\n\nArgs:\n game: Pycolab Game Engine object. See https://github.com/deepmind/pycolab/tree/master/pycolab/examples\n ui: Pycolab CursesUI object. See https://github.com/deepmind/pycolab/tree/master/pycolab/examples\n visualize: If set True, the program will visualize the trainings of Pycolab game # TODO", "id": "f14234:c0:m0"} {"signature": "def __init__(self, gym_id, monitor=None, monitor_safe=False, monitor_video=, visualize=False):", "body": "self.gym_id = gym_idself.gym = gym.make(gym_id) self.visualize = visualizeif monitor:if monitor_video == :video_callable = Falseelse:video_callable = (lambda x: x % monitor_video == )self.gym = gym.wrappers.Monitor(self.gym, monitor, force=not monitor_safe, video_callable=video_callable)self._states = OpenAIGym.state_from_space(space=self.gym.observation_space)self._actions = OpenAIGym.action_from_space(space=self.gym.action_space)", "docstring": "Initialize OpenAI Gym.\n\nArgs:\n gym_id: OpenAI Gym environment ID. See https://gym.openai.com/envs\n monitor: Output directory. Setting this to None disables monitoring.\n monitor_safe: Setting this to True prevents existing log files to be overwritten. Default False.\n monitor_video: Save a video every monitor_video steps. Setting this to 0 disables recording of videos.\n visualize: If set True, the program will visualize the trainings of gym's environment. Note that such\n visualization is probabily going to slow down the training.", "id": "f14235:c0:m0"} {"signature": "def set_state(self, **kwargs):", "body": "raise NotImplementedError", "docstring": "Sets the current state of the environment manually to some other state and returns a new observation.\n\nArgs:\n **kwargs: The set instruction(s) to be executed by the environment.\n A single set instruction usually set a single property of the\n state/observation vector to some new value.\nReturns: The observation dictionary of the Environment after(!) setting it to the new state.", "id": "f14236:c0:m0"} {"signature": "def __init__(self,config_file):", "body": "self.game = DoomGame()self.game.load_config(config_file)self.game.init()self.state_shape = self.featurize(self.game.get_state()).shapeself.num_actions = len(self.game.get_available_buttons())", "docstring": "Initialize ViZDoom environment.\n\nArgs:\n config_file: .cfg file path, which defines how a world works and look like (maps)", "id": "f14237:c0:m0"} {"signature": "def __init__(self,env_id,visualize=False):", "body": "envs = [Arm2DEnv,L2RunEnv,ProstheticsEnv]self.env = envs[env_id](visualize=visualize)self.state_shape = len(self.env.reset())self.num_actions = len(self.env.action_space.sample())", "docstring": "Initialize OpenSimulator environment.\n\nArgs:\n visualize: render enviroment\n env: environment id to use ([0:Arm2DEnv, 1:L2RunEnv, 2:ProstheticsEnv])", "id": "f14238:c0:m0"} {"signature": "def prod(xs):", "body": "p = for x in xs:p *= xreturn p", "docstring": "Computes the product along the elements in an iterable. Returns 1 for empty iterable.\n\n Args:\n xs: Iterable containing numbers.\n\n Returns: Product along iterable.", "id": "f14239:m0"} {"signature": "def np_dtype(dtype):", "body": "if dtype == '' or dtype == float or dtype == np.float32 or dtype == tf.float32:return np.float32elif dtype == np.float64 or dtype == tf.float64:return np.float64elif dtype == np.float16 or dtype == tf.float16:return np.float16elif dtype == '' or dtype == int or dtype == np.int32 or dtype == tf.int32:return np.int32elif dtype == np.int64 or dtype == tf.int64:return np.int64elif dtype == np.int16 or dtype == tf.int16:return np.int16elif dtype == '' or dtype == bool or dtype == np.bool_ or dtype == tf.bool:return np.bool_else:raise TensorForceError(\"\".format(str(dtype)))", "docstring": "Translates dtype specifications in configurations to numpy data types.\n Args:\n dtype: String describing a numerical type (e.g. 'float') or numerical type primitive.\n\n Returns: Numpy data type", "id": "f14239:m3"} {"signature": "def tf_dtype(dtype):", "body": "if dtype == '' or dtype == float or dtype == np.float32 or dtype == tf.float32:return tf.float32elif dtype == np.float64 or dtype == tf.float64:return tf.float64elif dtype == np.float16 or dtype == tf.float16:return tf.float16elif dtype == '' or dtype == int or dtype == np.int32 or dtype == tf.int32:return tf.int32elif dtype == np.int64 or dtype == tf.int64:return tf.int64elif dtype == np.int16 or dtype == tf.int16:return tf.int16elif dtype == '' or dtype == bool or dtype == np.bool_ or dtype == tf.bool:return tf.boolelse:raise TensorForceError(\"\".format(str(dtype)))", "docstring": "Translates dtype specifications in configurations to tensorflow data types.\n\n Args:\n dtype: String describing a numerical type (e.g. 'float'), numpy data type,\n or numerical type primitive.\n\n Returns: TensorFlow data type", "id": "f14239:m4"} {"signature": "def get_tensor_dependencies(tensor):", "body": "dependencies = set()dependencies.update(tensor.op.inputs)for sub_op in tensor.op.inputs:dependencies.update(get_tensor_dependencies(sub_op))return dependencies", "docstring": "Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph).\n\nArgs:\n tensor (tf.Tensor): The input tensor.\n\nReturns: Set of all dependencies (including needed placeholders) for the input tensor.", "id": "f14239:m6"} {"signature": "def get_object(obj, predefined_objects=None, default_object=None, kwargs=None):", "body": "args = ()kwargs = dict() if kwargs is None else kwargsif isinstance(obj, str) and os.path.isfile(obj):with open(obj, '') as fp:obj = json.load(fp=fp)if isinstance(obj, dict):kwargs.update(obj)obj = kwargs.pop('', None)if predefined_objects is not None and obj in predefined_objects:obj = predefined_objects[obj]elif isinstance(obj, str):if obj.find('') != -:module_name, function_name = obj.rsplit('', )module = importlib.import_module(module_name)obj = getattr(module, function_name)else:raise TensorForceError(\"\".format(obj,list(predefined_objects or ())))elif callable(obj):passelif default_object is not None:args = (obj,)obj = default_objectelse:return objreturn obj(*args, **kwargs)", "docstring": "Utility method to map some kind of object specification to its content,\ne.g. optimizer or baseline specifications to the respective classes.\n\nArgs:\n obj: A specification dict (value for key 'type' optionally specifies\n the object, options as follows), a module path (e.g.,\n my_module.MyClass), a key in predefined_objects, or a callable\n (e.g., the class type object).\n predefined_objects: Dict containing predefined set of objects,\n accessible via their key\n default_object: Default object is no other is specified\n kwargs: Arguments for object creation\n\nReturns: The retrieved object", "id": "f14239:m7"} {"signature": "def prepare_kwargs(raw, string_parameter=''):", "body": "kwargs = dict()if isinstance(raw, dict):kwargs.update(raw)elif isinstance(raw, str):kwargs[string_parameter] = rawreturn kwargs", "docstring": "Utility method to convert raw string/diction input into a dictionary to pass\ninto a function. Always returns a dictionary.\n\nArgs:\n raw: string or dictionary, string is assumed to be the name of the activation\n activation function. Dictionary will be passed through unchanged.\n\nReturns: kwargs dictionary for **kwargs", "id": "f14239:m8"} {"signature": "def register_saver_ops(self):", "body": "variables = self.get_savable_variables()if variables is None or len(variables) == :self._saver = Nonereturnbase_scope = self._get_base_variable_scope()variables_map = {strip_name_scope(v.name, base_scope): v for v in variables}self._saver = tf.train.Saver(var_list=variables_map,reshape=False,sharded=False,max_to_keep=,keep_checkpoint_every_n_hours=,name=None,restore_sequentially=False,saver_def=None,builder=None,defer_build=False,allow_empty=True,write_version=tf.train.SaverDef.V2,pad_step_number=False,save_relative_paths=True)", "docstring": "Registers the saver operations to the graph in context.", "id": "f14239:c0:m0"} {"signature": "def get_savable_variables(self):", "body": "raise NotImplementedError()", "docstring": "Returns the list of all the variables this component is responsible to save and restore.\n\nReturns:\n The list of variables that will be saved or restored.", "id": "f14239:c0:m1"} {"signature": "def save(self, sess, save_path, timestep=None):", "body": "if self._saver is None:raise TensorForceError(\"\")return self._saver.save(sess=sess,save_path=save_path,global_step=timestep,write_meta_graph=False,write_state=True, )", "docstring": "Saves this component's managed variables.\n\nArgs:\n sess: The session for which to save the managed variables.\n save_path: The path to save data to.\n timestep: Optional, the timestep to append to the file name.\n\nReturns:\n Checkpoint path where the model was saved.", "id": "f14239:c0:m2"} {"signature": "def restore(self, sess, save_path):", "body": "if self._saver is None:raise TensorForceError(\"\")self._saver.restore(sess=sess, save_path=save_path)", "docstring": "Restores the values of the managed variables from disk location.\n\nArgs:\n sess: The session for which to save the managed variables.\n save_path: The path used to save the data to.", "id": "f14239:c0:m3"} {"signature": "def _get_base_variable_scope(self):", "body": "raise NotImplementedError()", "docstring": "Returns the portion of the variable scope that is considered a base for this component. The variables will be\nsaved with names relative to that scope.\n\nReturns:\n The name of the base variable scope, should always end with \"/\".", "id": "f14239:c0:m4"} {"signature": "def WorkerAgentGenerator(agent_class):", "body": "if isinstance(agent_class, str):agent_class = AgentsDictionary.get(agent_class)if not agent_class and agent_class.find('') != -:module_name, function_name = agent_class.rsplit('', )module = importlib.import_module(module_name)agent_class = getattr(module, function_name)class WorkerAgent(agent_class):\"\"\"\"\"\"def __init__(self, model=None, **kwargs):self.model = modelif not issubclass(agent_class, LearningAgent):kwargs.pop(\"\")super(WorkerAgent, self).__init__(**kwargs)def initialize_model(self):return self.modelreturn WorkerAgent", "docstring": "Worker Agent generator, receives an Agent class and creates a Worker Agent class that inherits from that Agent.", "id": "f14240:m0"} {"signature": "def clone_worker_agent(agent, factor, environment, network, agent_config):", "body": "ret = [agent]for i in xrange(factor - ):worker = WorkerAgentGenerator(type(agent))(states=environment.states,actions=environment.actions,network=network,model=agent.model,**agent_config)ret.append(worker)return ret", "docstring": "Clones a given Agent (`factor` times) and returns a list of the cloned Agents with the original Agent\nin the first slot.\n\nArgs:\n agent (Agent): The Agent object to clone.\n factor (int): The length of the final list.\n environment (Environment): The Environment to use for all cloned agents.\n network (LayeredNetwork): The Network to use (or None) for an Agent's Model.\n agent_config (dict): A dict of Agent specifications passed into the Agent's c'tor as kwargs.\nReturns:\n The list with `factor` cloned agents (including the original one).", "id": "f14240:m1"} {"signature": "def __init__(self, agent, environment, repeat_actions=, save_path=None, save_episodes=None, save_frequency=None,save_frequency_unit=None, agents=None, environments=None):", "body": "if agents is not None:warnings.warn(\"\",category=DeprecationWarning)agent = agentsif environments is not None:warnings.warn(\"\",category=DeprecationWarning)environment = environmentssuper(ThreadedRunner, self).__init__(agent, environment, repeat_actions)if len(agent) != len(environment):raise TensorForceError(\"\".format(a=len(self.agent), e=len(self.environment)))self.save_path = save_pathself.save_episodes = save_episodesif self.save_episodes is not None:warnings.warn(\"\"\"\",category=DeprecationWarning)self.save_frequency = self.save_episodesself.save_frequency_unit = \"\"else:self.save_frequency = save_frequencyself.save_frequency_unit = save_frequency_unitself.episode_list_lock = threading.Lock()self.should_stop = Falseself.time = None", "docstring": "Initialize a ThreadedRunner object.\n\nArgs:\n save_path (str): Path where to save the shared model.\n save_episodes (int): Deprecated: Every how many (global) episodes do we save the shared model?\n save_frequency (int): The frequency with which to save the model (could be sec, steps, or episodes).\n save_frequency_unit (str): \"s\" (sec), \"t\" (timesteps), \"e\" (episodes)\n agents (List[Agent]): Deprecated: List of Agent objects. Use `agent`, instead.\n environments (List[Environment]): Deprecated: List of Environment objects. Use `environment`, instead.", "id": "f14240:c0:m0"} {"signature": "def __init__(self, agent, environment, repeat_actions=, history=None):", "body": "self.agent = agentself.environment = environmentself.repeat_actions = repeat_actionsself.global_episode = None self.global_timestep = None self.start_time = None self.episode_rewards = None self.episode_timesteps = None self.episode_times = None self.reset(history)", "docstring": "Args:\n agent (Agent): Agent object (or list of Agent objects) to use for the run.\n environment (Environment): Environment object (or list of Environment objects) to use for the run.\n repeat_actions (int): How many times the same given action will be repeated in subsequent calls to\n Environment's `execute` method. Rewards collected in these calls are accumulated and reported\n as a sum in the following call to Agent's `observe` method.\n history (dict): A dictionary containing an already run experiment's results. Keys should be:\n episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)", "id": "f14241:c0:m0"} {"signature": "def reset(self, history=None):", "body": "if not history:history = dict()self.episode_rewards = history.get(\"\", list())self.episode_timesteps = history.get(\"\", list())self.episode_times = history.get(\"\", list())", "docstring": "Resets the Runner's internal stats counters.\nIf history is empty, use default values in history.get().\n\nArgs:\n history (dict): A dictionary containing an already run experiment's results. Keys should be:\n episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)", "id": "f14241:c0:m1"} {"signature": "def close(self):", "body": "raise NotImplementedError", "docstring": "Should perform clean up operations on Runner's Agent(s) and Environment(s).", "id": "f14241:c0:m2"} {"signature": "def run(self, num_episodes, num_timesteps, max_episode_timesteps, deterministic, episode_finished, summary_report,summary_interval):", "body": "raise NotImplementedError", "docstring": "Executes this runner by starting to act (via Agent(s)) in the given Environment(s).\nStops execution according to certain conditions (e.g. max. number of episodes, etc..).\nCalls callback functions after each episode and/or after some summary criteria are met.\n\nArgs:\n num_episodes (int): Max. number of episodes to run globally in total (across all threads/workers).\n num_timesteps (int): Max. number of time steps to run globally in total (across all threads/workers)\n max_episode_timesteps (int): Max. number of timesteps per episode.\n deterministic (bool): Whether to use exploration when selecting actions.\n episode_finished (callable): A function to be called once an episodes has finished. Should take\n a BaseRunner object and some worker ID (e.g. thread-ID or task-ID). Can decide for itself\n every how many episodes it should report something and what to report.\n summary_report (callable): Deprecated; Function that could produce a summary over the training\n progress so far.\n summary_interval (int): Deprecated; The number of time steps to execute (globally)\n before summary_report is called.", "id": "f14241:c0:m3"} {"signature": "@propertydef episode(self):", "body": "return self.global_episode", "docstring": "Deprecated property `episode` -> global_episode.", "id": "f14241:c0:m4"} {"signature": "@propertydef timestep(self):", "body": "return self.global_timestep", "docstring": "Deprecated property `timestep` -> global_timestep.", "id": "f14241:c0:m5"} {"signature": "def __init__(self, agent, environment, repeat_actions=, history=None, id_=):", "body": "super(ParallelRunner, self).__init__(agent, environment, repeat_actions, history)self.id = id_ self.current_timestep = None self.episode_actions = []self.num_parallel = self.agent.execution['']print(''.format(self.num_parallel))", "docstring": "Initialize a single Runner object (one Agent/one Environment).\n\nArgs:\n id_ (int): The ID of this Runner (for distributed TF runs).", "id": "f14242:c0:m0"} {"signature": "def __init__(self, agent, environment, repeat_actions=, history=None, id_=):", "body": "super(Runner, self).__init__(agent, environment, repeat_actions, history)self.id = id_ self.current_timestep = None", "docstring": "Initialize a single Runner object (one Agent/one Environment).\n\nArgs:\n id_ (int): The ID of this Runner (for distributed TF runs).", "id": "f14244:c0:m0"} {"signature": "def __init__(self, specification):", "body": "self.specification = dict()for action_type, shape in specification.items():if action_type in ('', '', '', ''):if isinstance(shape, int):self.specification[action_type] = (shape,)else:self.specification[action_type] = tuple(shape)else:raise TensorForceError('')self.single_state_action = (len(specification) == )", "docstring": "Initializes a minimal test environment, which is used for the unit tests.\nGiven a specification of actions types and shapes, the environment states consist\nof the same number of pairs (x, y). The (mean of) an action a gives the next state via (1-a, a),\nand the 'correct' state is always (0, 1).\n\nArgs:\n specification: Takes a dict type (keys)-> shape (values specifying the action\n structure of the environment. Use shape () for single scalar actions.", "id": "f14245:c0:m0"} {"signature": "def pre_run(self, agent, environment):", "body": "pass", "docstring": "Called before `Runner.run`.", "id": "f14247:c0:m0"} {"signature": "def close(self):", "body": "pass", "docstring": "Close environment. No other method calls possible afterwards.", "id": "f14270:c0:m1"} {"signature": "def seed(self, seed):", "body": "return None", "docstring": "Sets the random seed of the environment to the given value (current time, if seed=None).\nNaturally deterministic Environments (e.g. ALE or some gym Envs) don't have to implement this method.\n\nArgs:\n seed (int): The seed to use for initializing the pseudo-random number generator (default=epoch time in sec).\nReturns: The actual seed (int) used OR None if Environment did not override this method (no seeding supported).", "id": "f14270:c0:m2"} {"signature": "def reset(self):", "body": "raise NotImplementedError", "docstring": "Reset environment and setup for new episode.\n\nReturns:\n initial state of reset environment.", "id": "f14270:c0:m3"} {"signature": "def execute(self, action):", "body": "raise NotImplementedError", "docstring": "Executes action, observes next state(s) and reward.\n\nArgs:\n actions: Actions to execute.\n\nReturns:\n Tuple of (next state, bool indicating terminal, reward)", "id": "f14270:c0:m4"} {"signature": "@propertydef states(self):", "body": "raise NotImplementedError", "docstring": "Return the state space. Might include subdicts if multiple states are \navailable simultaneously.\n\nReturns:\n States specification, with the following attributes\n (required):\n - type: one of 'bool', 'int', 'float' (default: 'float').\n - shape: integer, or list/tuple of integers (required).", "id": "f14270:c0:m5"} {"signature": "@propertydef actions(self):", "body": "raise NotImplementedError", "docstring": "Return the action space. Might include subdicts if multiple actions are \navailable simultaneously.\n\nReturns:\n actions (spec, or dict of specs): Actions specification, with the following attributes\n (required):\n - type: one of 'bool', 'int', 'float' (required).\n - shape: integer, or list/tuple of integers (default: []).\n - num_actions: integer (required if type == 'int').\n - min_value and max_value: float (optional if type == 'float', default: none).", "id": "f14270:c0:m6"} {"signature": "@staticmethoddef from_spec(spec, kwargs):", "body": "env = tensorforce.util.get_object(obj=spec,predefined_objects=tensorforce.environments.environments,kwargs=kwargs)assert isinstance(env, Environment)return env", "docstring": "Creates an environment from a specification dict.", "id": "f14270:c0:m7"} {"signature": "def setup_components_and_tf_funcs(self, custom_getter=None):", "body": "self.network = Network.from_spec(spec=self.network_spec,kwargs=dict(summary_labels=self.summary_labels))assert len(self.internals_spec) == self.internals_spec = self.network.internals_spec()for name in sorted(self.internals_spec):internal = self.internals_spec[name]self.internals_input[name] = tf.placeholder(dtype=util.tf_dtype(internal['']),shape=(None,) + tuple(internal['']),name=('' + name))if internal[''] == '':self.internals_init[name] = np.zeros(shape=internal[''])else:raise TensorForceError(\"\")custom_getter = super(DistributionModel, self).setup_components_and_tf_funcs(custom_getter)self.distributions = self.create_distributions()self.fn_kl_divergence = tf.make_template(name_='',func_=self.tf_kl_divergence,custom_getter_=custom_getter)return custom_getter", "docstring": "Creates and stores Network and Distribution objects.\nGenerates and stores all template functions.", "id": "f14274:c0:m1"} {"signature": "def create_distributions(self):", "body": "distributions = dict()for name in sorted(self.actions_spec):action = self.actions_spec[name]if self.distributions_spec is not None and name in self.distributions_spec:kwargs = dict(action)kwargs[''] = namekwargs[''] = self.summary_labelsdistributions[name] = Distribution.from_spec(spec=self.distributions_spec[name],kwargs=kwargs)elif action[''] == '':distributions[name] = Bernoulli(shape=action[''],scope=name,summary_labels=self.summary_labels)elif action[''] == '':distributions[name] = Categorical(shape=action[''],num_actions=action[''],scope=name,summary_labels=self.summary_labels)elif action[''] == '':if '' in action:distributions[name] = Beta(shape=action[''],min_value=action[''],max_value=action[''],scope=name,summary_labels=self.summary_labels)else:distributions[name] = Gaussian(shape=action[''],scope=name,summary_labels=self.summary_labels)return distributions", "docstring": "Creates and returns the Distribution objects based on self.distributions_spec.\n\nReturns: Dict of distributions according to self.distributions_spec.", "id": "f14274:c0:m2"} {"signature": "def __init__(self,states,actions,scope,device,saver,summarizer,execution,batching_capacity,variable_noise,states_preprocessing,actions_exploration,reward_preprocessing,tf_session_dump_dir=\"\"):", "body": "self.network = Noneself.states_spec = statesself.internals_spec = dict()self.actions_spec = actionsself.scope = scopeself.device = deviceif saver is None or saver.get('') is None:self.saver_spec = Noneelse:self.saver_spec = saverif summarizer is None or summarizer.get('') is None:self.summarizer_spec = Noneself.summary_labels = set()else:self.summarizer_spec = summarizerself.summary_labels = set(self.summarizer_spec.get('', ()))self.summarizer = Noneself.graph_summary = Noneself.summarizer_init_op = Noneself.flush_summarizer = Noneself.execution_spec = executionself.execution_type = self.execution_spec[\"\"]self.session_config = self.execution_spec[\"\"]self.distributed_spec = self.execution_spec[\"\"]assert batching_capacity is None or (isinstance(batching_capacity, int) and batching_capacity > )self.batching_capacity = batching_capacity or self.tf_session_dump_dir = tf_session_dump_dirself.num_parallel = self.execution_spec.get('')if self.num_parallel is None:self.num_parallel = self.list_states_buffer = dict()self.list_internals_buffer = dict()self.list_actions_buffer = dict()self.list_buffer_index = [None for _ in range(self.num_parallel)]self.episode_output = Noneself.episode_index_input = Noneself.unbuffered_episode_output = Noneassert variable_noise is None or variable_noise > self.variable_noise = variable_noiseself.states_preprocessing_spec = states_preprocessingself.actions_exploration_spec = actions_explorationself.reward_preprocessing_spec = reward_preprocessingself.variables = Noneself.all_variables = Noneself.registered_variables = Noneself.list_timestep = [None for _ in range(self.num_parallel)]self.episode = Noneself.global_timestep = Noneself.global_episode = Noneself.states_input = dict()self.states_preprocessing = dict()self.internals_input = dict()self.internals_init = dict()self.actions_input = dict()self.actions_exploration = dict()self.terminal_input = Noneself.reward_input = Noneself.reward_preprocessing = Noneself.deterministic_input = Noneself.independent_input = Noneself.update_input = Noneself.fn_initialize = Noneself.fn_preprocess = Noneself.fn_actions_and_internals = Noneself.fn_observe_timestep = Noneself.fn_action_exploration = Noneself.graph = Noneself.global_model = Noneself.is_local_model = Trueself.server = Noneself.summarizer = Noneself.saver = Noneself.saver_directory = Noneself.scaffold = Noneself.session = Noneself.monitored_session = Noneself.actions_output = Noneself.internals_output = Noneself.timestep_output = Noneself.list_buffer_index_reset_op = Noneself.setup()", "docstring": "Model.\n\nArgs:\n states (spec): The state-space description dictionary.\n actions (spec): The action-space description dictionary.\n scope (str): The root scope str to use for tf variable scoping.\n device (str): The name of the device to run the graph of this model on.\n saver (spec): Dict specifying whether and how to save the model's parameters.\n summarizer (spec): Dict specifying which tensorboard summaries should be created and added to the graph.\n execution (spec): Dict specifying whether and how to do distributed training on the model's graph.\n batching_capacity (int): Batching capacity.\n variable_noise (float): The stddev value of a Normal distribution used for adding random\n noise to the model's output (for each batch, noise can be toggled and - if active - will be resampled).\n Use None for not adding any noise.\n states_preprocessing (spec / dict of specs): Dict specifying whether and how to preprocess state signals\n (e.g. normalization, greyscale, etc..).\n actions_exploration (spec / dict of specs): Dict specifying whether and how to add exploration to the model's\n \"action outputs\" (e.g. epsilon-greedy).\n reward_preprocessing (spec): Dict specifying whether and how to preprocess rewards coming\n from the Environment (e.g. reward normalization).\n tf_session_dump_dir (str): If non-empty string, all session.run calls will be dumped using the tensorflow\n offline-debug session into the given directory.\n execution: (dict)\n - num_parallel: (int) number of parallel episodes", "id": "f14275:c0:m0"} {"signature": "def setup(self):", "body": "graph_default_context = self.setup_graph()if self.execution_type == \"\" and self.server is None and self.is_local_model:self.start_server()with tf.device(device_name_or_function=self.device):with tf.variable_scope(name_or_scope=self.scope, reuse=False):self.variables = dict()self.all_variables = dict()self.registered_variables = set()self.setup_placeholders()self.setup_components_and_tf_funcs()self.fn_initialize()if self.summarizer_spec is not None:with tf.name_scope(name=''):self.summarizer = tf.contrib.summary.create_file_writer(logdir=self.summarizer_spec[''],max_queue=None,flush_millis=(self.summarizer_spec.get('', ) * ),filename_suffix=None,name=None)default_summarizer = self.summarizer.as_default()assert '' not in self.summarizer_specrecord_summaries = tf.contrib.summary.always_record_summaries()default_summarizer.__enter__()record_summaries.__enter__()states = util.map_tensors(fn=tf.identity, tensors=self.states_input)internals = util.map_tensors(fn=tf.identity, tensors=self.internals_input)actions = util.map_tensors(fn=tf.identity, tensors=self.actions_input)terminal = tf.identity(input=self.terminal_input)reward = tf.identity(input=self.reward_input)deterministic = tf.identity(input=self.deterministic_input)independent = tf.identity(input=self.independent_input)episode_index = tf.identity(input=self.episode_index_input)states, actions, reward = self.fn_preprocess(states=states, actions=actions, reward=reward)self.create_operations(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward,deterministic=deterministic,independent=independent,index=episode_index)if '' in self.summary_labels or '' in self.summary_labels:for name in sorted(states):tf.contrib.summary.histogram(name=('' + name), tensor=states[name])if '' in self.summary_labels or '' in self.summary_labels:for name in sorted(actions):tf.contrib.summary.histogram(name=('' + name), tensor=actions[name])if '' in self.summary_labels or '' in self.summary_labels:tf.contrib.summary.histogram(name='', tensor=reward)if '' in self.summary_labels:with tf.name_scope(name=''):graph_def = self.graph.as_graph_def()graph_str = tf.constant(value=graph_def.SerializeToString(),dtype=tf.string,shape=())self.graph_summary = tf.contrib.summary.graph(param=graph_str,step=self.global_timestep)if '' in self.summarizer_spec:self.graph_summary = tf.group(self.graph_summary,*self.summarizer_spec[''].build_metagraph_list())if self.summarizer_spec is not None:record_summaries.__exit__(None, None, None)default_summarizer.__exit__(None, None, None)with tf.name_scope(name=''):self.flush_summarizer = tf.contrib.summary.flush()self.summarizer_init_op = tf.contrib.summary.summary_writer_initializer_op()assert len(self.summarizer_init_op) == self.summarizer_init_op = self.summarizer_init_op[]if self.execution_type == \"\" and not self.is_local_model:returnself.setup_saver()self.setup_scaffold()hooks = self.setup_hooks()self.setup_session(self.server, hooks, graph_default_context)", "docstring": "Sets up the TensorFlow model graph, starts the servers (distributed mode), creates summarizers\nand savers, initializes (and enters) the TensorFlow session.", "id": "f14275:c0:m1"} {"signature": "def setup_graph(self):", "body": "graph_default_context = Noneif self.execution_type == \"\":self.graph = tf.Graph()graph_default_context = self.graph.as_default()graph_default_context.__enter__()self.global_model = Noneelif self.execution_type == \"\":if self.distributed_spec[\"\"] == \"\":return Noneelif self.distributed_spec[\"\"] == \"\":if self.is_local_model:graph = tf.Graph()graph_default_context = graph.as_default()graph_default_context.__enter__()self.global_model = deepcopy(self)self.global_model.is_local_model = Falseself.global_model.setup()self.graph = graphself.as_local_model()self.scope += '' + str(self.distributed_spec[\"\"])else:self.graph = tf.get_default_graph() self.global_model = Noneself.device = tf.train.replica_device_setter(worker_device=self.device,cluster=self.distributed_spec[\"\"])else:raise TensorForceError(\"\".format(self.distributed_spec[\"\"]))else:raise TensorForceError(\"\".format(self.distributed_spec[\"\"]))return graph_default_context", "docstring": "Creates our Graph and figures out, which shared/global model to hook up to.\nIf we are in a global-model's setup procedure, we do not create\na new graph (return None as the context). We will instead use the already existing local replica graph\nof the model.\n\nReturns: None or the graph's as_default()-context.", "id": "f14275:c0:m2"} {"signature": "def start_server(self):", "body": "self.server = tf.train.Server(server_or_cluster_def=self.distributed_spec[\"\"],job_name=self.distributed_spec[\"\"],task_index=self.distributed_spec[\"\"],protocol=self.distributed_spec.get(\"\"),config=self.distributed_spec.get(\"\"),start=True)if self.distributed_spec[\"\"] == \"\":self.server.join()quit()", "docstring": "Creates and stores a tf server (and optionally joins it if we are a parameter-server).\nOnly relevant, if we are running in distributed mode.", "id": "f14275:c0:m3"} {"signature": "def setup_placeholders(self):", "body": "for name in sorted(self.states_spec):self.states_input[name] = tf.placeholder(dtype=util.tf_dtype(self.states_spec[name]['']),shape=(None,) + tuple(self.states_spec[name]['']),name=('' + name))if self.states_preprocessing_spec is None:for name in sorted(self.states_spec):self.states_spec[name][''] = self.states_spec[name]['']elif not isinstance(self.states_preprocessing_spec, list) andall(name in self.states_spec for name in self.states_preprocessing_spec):for name in sorted(self.states_spec):if name in self.states_preprocessing_spec:preprocessing = PreprocessorStack.from_spec(spec=self.states_preprocessing_spec[name],kwargs=dict(shape=self.states_spec[name]['']))self.states_spec[name][''] = self.states_spec[name]['']self.states_spec[name][''] = preprocessing.processed_shape(shape=self.states_spec[name][''])self.states_preprocessing[name] = preprocessingelse:self.states_spec[name][''] = self.states_spec[name]['']elif \"\" in self.states_preprocessing_spec:preprocessing = PreprocessorStack.from_spec(spec=self.states_preprocessing_spec,kwargs=dict(shape=self.states_spec[name]['']))for name in sorted(self.states_spec):self.states_spec[name][''] = self.states_spec[name]['']self.states_spec[name][''] = preprocessing.processed_shape(shape=self.states_spec[name][''])self.states_preprocessing[name] = preprocessingelse:for name in sorted(self.states_spec):preprocessing = PreprocessorStack.from_spec(spec=self.states_preprocessing_spec,kwargs=dict(shape=self.states_spec[name]['']))self.states_spec[name][''] = self.states_spec[name]['']self.states_spec[name][''] = preprocessing.processed_shape(shape=self.states_spec[name][''])self.states_preprocessing[name] = preprocessingfor name in sorted(self.actions_spec):self.actions_input[name] = tf.placeholder(dtype=util.tf_dtype(self.actions_spec[name]['']),shape=(None,) + tuple(self.actions_spec[name]['']),name=('' + name))if self.actions_exploration_spec is None:passelif all(name in self.actions_spec for name in self.actions_exploration_spec):for name in sorted(self.actions_spec):if name in self.actions_exploration:self.actions_exploration[name] = Exploration.from_spec(spec=self.actions_exploration_spec[name])else:for name in sorted(self.actions_spec):self.actions_exploration[name] = Exploration.from_spec(spec=self.actions_exploration_spec)self.terminal_input = tf.placeholder(dtype=util.tf_dtype(''), shape=(None,), name='')self.reward_input = tf.placeholder(dtype=util.tf_dtype(''), shape=(None,), name='')if self.reward_preprocessing_spec is not None:self.reward_preprocessing = PreprocessorStack.from_spec(spec=self.reward_preprocessing_spec,kwargs=dict(shape=()))if self.reward_preprocessing.processed_shape(shape=()) != ():raise TensorForceError(\"\")self.deterministic_input = tf.placeholder(dtype=util.tf_dtype(''), shape=(), name='')self.independent_input = tf.placeholder(dtype=util.tf_dtype(''), shape=(), name='')", "docstring": "Creates the TensorFlow placeholders, variables, ops and functions for this model.\nNOTE: Does not add the internal state placeholders and initialization values to the model yet as that requires\nthe model's Network (if any) to be generated first.", "id": "f14275:c0:m4"} {"signature": "def setup_components_and_tf_funcs(self, custom_getter=None):", "body": "if custom_getter is None:def custom_getter(getter, name, registered=False, **kwargs):\"\"\"\"\"\"if registered:self.registered_variables.add(name)elif name in self.registered_variables:registered = Truevariable = getter(name=name, **kwargs)if registered:passelif name in self.all_variables:assert variable is self.all_variables[name]if kwargs.get('', True):assert variable is self.variables[name]if '' in self.summary_labels:tf.contrib.summary.histogram(name=name, tensor=variable)else:self.all_variables[name] = variableif kwargs.get('', True):self.variables[name] = variableif '' in self.summary_labels:tf.contrib.summary.histogram(name=name, tensor=variable)return variableself.fn_initialize = tf.make_template(name_='',func_=self.tf_initialize,custom_getter_=custom_getter)self.fn_preprocess = tf.make_template(name_='',func_=self.tf_preprocess,custom_getter_=custom_getter)self.fn_actions_and_internals = tf.make_template(name_='',func_=self.tf_actions_and_internals,custom_getter_=custom_getter)self.fn_observe_timestep = tf.make_template(name_='',func_=self.tf_observe_timestep,custom_getter_=custom_getter)self.fn_action_exploration = tf.make_template(name_='',func_=self.tf_action_exploration,custom_getter_=custom_getter)return custom_getter", "docstring": "Allows child models to create model's component objects, such as optimizer(s), memory(s), etc..\nCreates all tensorflow functions via tf.make_template calls on all the class' \"tf_\"-methods.\n\nArgs:\n custom_getter: The `custom_getter_` object to use for `tf.make_template` when creating TensorFlow functions.\n If None, use a default custom_getter_.\n\nReturns: The custom_getter passed in (or a default one if custom_getter was None).", "id": "f14275:c0:m5"} {"signature": "def setup_saver(self):", "body": "if self.execution_type == \"\":global_variables = self.get_variables(include_submodules=True, include_nontrainable=True)else:global_variables = self.global_model.get_variables(include_submodules=True, include_nontrainable=True)for c in self.get_savable_components():c.register_saver_ops()self.saver = tf.train.Saver(var_list=global_variables, reshape=False,sharded=False,max_to_keep=,keep_checkpoint_every_n_hours=,name=None,restore_sequentially=False,saver_def=None,builder=None,defer_build=False,allow_empty=True,write_version=tf.train.SaverDef.V2,pad_step_number=False,save_relative_paths=True)", "docstring": "Creates the tf.train.Saver object and stores it in self.saver.", "id": "f14275:c0:m6"} {"signature": "def setup_scaffold(self):", "body": "if self.execution_type == \"\":global_variables = self.get_variables(include_submodules=True, include_nontrainable=True)init_op = tf.variables_initializer(var_list=global_variables)if self.summarizer_init_op is not None:init_op = tf.group(init_op, self.summarizer_init_op)if self.graph_summary is None:ready_op = tf.report_uninitialized_variables(var_list=global_variables)ready_for_local_init_op = Nonelocal_init_op = Noneelse:ready_op = Noneready_for_local_init_op = tf.report_uninitialized_variables(var_list=global_variables)local_init_op = self.graph_summaryelse:global_variables = self.global_model.get_variables(include_submodules=True, include_nontrainable=True)local_variables = self.get_variables(include_submodules=True, include_nontrainable=True)init_op = tf.variables_initializer(var_list=global_variables)if self.summarizer_init_op is not None:init_op = tf.group(init_op, self.summarizer_init_op)ready_op = tf.report_uninitialized_variables(var_list=(global_variables + local_variables))ready_for_local_init_op = tf.report_uninitialized_variables(var_list=global_variables)if self.graph_summary is None:local_init_op = tf.group(tf.variables_initializer(var_list=local_variables),*(tf.assign(ref=local_var, value=global_var) for local_var, global_var in zip(self.get_variables(include_submodules=True),self.global_model.get_variables(include_submodules=True))))else:local_init_op = tf.group(tf.variables_initializer(var_list=local_variables),self.graph_summary,*(tf.assign(ref=local_var, value=global_var) for local_var, global_var in zip(self.get_variables(include_submodules=True),self.global_model.get_variables(include_submodules=True))))def init_fn(scaffold, session):if self.saver_spec is not None and self.saver_spec.get('', True):directory = self.saver_spec['']file = self.saver_spec.get('')if file is None:file = tf.train.latest_checkpoint(checkpoint_dir=directory,latest_filename=None )elif not os.path.isfile(file):file = os.path.join(directory, file)if file is not None:try:scaffold.saver.restore(sess=session, save_path=file)session.run(fetches=self.list_buffer_index_reset_op)except tf.errors.NotFoundError:raise TensorForceError(\"\")self.scaffold = tf.train.Scaffold(init_op=init_op,init_feed_dict=None,init_fn=init_fn,ready_op=ready_op,ready_for_local_init_op=ready_for_local_init_op,local_init_op=local_init_op,summary_op=None,saver=self.saver,copy_from_scaffold=None)", "docstring": "Creates the tf.train.Scaffold object and assigns it to self.scaffold.\nOther fields of the Scaffold are generated automatically.", "id": "f14275:c0:m7"} {"signature": "def setup_hooks(self):", "body": "hooks = list()if self.saver_spec is not None and (self.execution_type == '' or self.distributed_spec[''] == ):self.saver_directory = self.saver_spec['']hooks.append(tf.train.CheckpointSaverHook(checkpoint_dir=self.saver_directory,save_secs=self.saver_spec.get('', None if '' in self.saver_spec else ),save_steps=self.saver_spec.get(''), saver=None, checkpoint_basename=self.saver_spec.get('', ''),scaffold=self.scaffold,listeners=None))else:self.saver_directory = Nonereturn hooks", "docstring": "Creates and returns a list of hooks to use in a session. Populates self.saver_directory.\n\nReturns: List of hooks to use in a session.", "id": "f14275:c0:m8"} {"signature": "def setup_session(self, server, hooks, graph_default_context):", "body": "if self.execution_type == \"\":session_creator = tf.train.ChiefSessionCreator(scaffold=self.scaffold,master=server.target,config=self.session_config,checkpoint_dir=None,checkpoint_filename_with_path=None)self.monitored_session = tf.train.MonitoredSession(session_creator=session_creator,hooks=hooks,stop_grace_period_secs= )if self.tf_session_dump_dir != \"\":self.monitored_session = DumpingDebugWrapperSession(self.monitored_session, self.tf_session_dump_dir)else:self.monitored_session = tf.train.SingularMonitoredSession(hooks=hooks,scaffold=self.scaffold,master='', config=self.session_config, checkpoint_dir=None)if graph_default_context:graph_default_context.__exit__(None, None, None)self.graph.finalize()self.monitored_session.__enter__()self.session = self.monitored_session._tf_sess()", "docstring": "Creates and then enters the session for this model (finalizes the graph).\n\nArgs:\n server (tf.train.Server): The tf.train.Server object to connect to (None for single execution).\n hooks (list): A list of (saver, summary, etc..) hooks to be passed to the session.\n graph_default_context: The graph as_default() context that we are currently in.", "id": "f14275:c0:m9"} {"signature": "def close(self):", "body": "if self.flush_summarizer is not None:self.monitored_session.run(fetches=self.flush_summarizer)if self.saver_directory is not None:self.save(append_timestep=True)self.monitored_session.__exit__(None, None, None)", "docstring": "Saves the model (of saver dir is given) and closes the session.", "id": "f14275:c0:m10"} {"signature": "def tf_initialize(self):", "body": "with tf.device(device_name_or_function=(self.global_model.device if self.global_model else self.device)):collection = self.graph.get_collection(name='')if len(collection) == :self.global_timestep = tf.get_variable(name='',shape=(),dtype=tf.int64,trainable=False,initializer=tf.constant_initializer(value=, dtype=tf.int64),collections=['', tf.GraphKeys.GLOBAL_STEP])else:assert len(collection) == self.global_timestep = collection[]collection = self.graph.get_collection(name='')if len(collection) == :self.global_episode = tf.get_variable(name='',shape=(),dtype=tf.int64,trainable=False,initializer=tf.constant_initializer(value=, dtype=tf.int64),collections=[''])else:assert len(collection) == self.global_episode = collection[]self.timestep = tf.get_variable(name='',shape=(),dtype=tf.int64,initializer=tf.constant_initializer(value=, dtype=tf.int64),trainable=False)self.episode = tf.get_variable(name='',shape=(),dtype=tf.int64,initializer=tf.constant_initializer(value=, dtype=tf.int64),trainable=False)self.episode_index_input = tf.placeholder(name='',shape=(),dtype=tf.int32,)for name in sorted(self.states_spec):self.list_states_buffer[name] = tf.get_variable(name=(''.format(name)),shape=((self.num_parallel, self.batching_capacity,) + tuple(self.states_spec[name][''])),dtype=util.tf_dtype(self.states_spec[name]['']),trainable=False)for name in sorted(self.internals_spec):self.list_internals_buffer[name] = tf.get_variable(name=(''.format(name)),shape=((self.num_parallel, self.batching_capacity,) + tuple(self.internals_spec[name][''])),dtype=util.tf_dtype(self.internals_spec[name]['']),trainable=False)for name in sorted(self.actions_spec):self.list_actions_buffer[name]= tf.get_variable(name=(''.format(name)),shape=((self.num_parallel, self.batching_capacity,) + tuple(self.actions_spec[name][''])),dtype=util.tf_dtype(self.actions_spec[name]['']),trainable=False)self.list_buffer_index = tf.get_variable(name='',shape=(self.num_parallel,),dtype=util.tf_dtype(''),trainable=False)", "docstring": "Creates tf Variables for the local state/internals/action-buffers and for the local and global counters\nfor timestep and episode.", "id": "f14275:c0:m12"} {"signature": "def tf_preprocess(self, states, actions, reward):", "body": "for name in sorted(self.states_preprocessing):states[name] = self.states_preprocessing[name].process(tensor=states[name])if self.reward_preprocessing is not None:reward = self.reward_preprocessing.process(tensor=reward)return states, actions, reward", "docstring": "Applies preprocessing ops to the raw states/action/reward inputs.\n\nArgs:\n states (dict): Dict of raw state tensors.\n actions (dict): Dict or raw action tensors.\n reward: 1D (float) raw rewards tensor.\n\nReturns: The preprocessed versions of the input tensors.", "id": "f14275:c0:m13"} {"signature": "def tf_action_exploration(self, action, exploration, action_spec):", "body": "action_shape = tf.shape(input=action)exploration_value = exploration.tf_explore(episode=self.global_episode,timestep=self.global_timestep,shape=action_spec[''])exploration_value = tf.expand_dims(input=exploration_value, axis=)if action_spec[''] == '':action = tf.where(condition=(tf.random_uniform(shape=action_shape) < exploration_value),x=(tf.random_uniform(shape=action_shape) < ),y=action)elif action_spec[''] == '':action = tf.where(condition=(tf.random_uniform(shape=action_shape) < exploration_value),x=tf.random_uniform(shape=action_shape, maxval=action_spec[''], dtype=util.tf_dtype('')),y=action)elif action_spec[''] == '':noise = tf.random_normal(shape=action_shape, dtype=util.tf_dtype(''))action += noise * exploration_valueif '' in action_spec:action = tf.clip_by_value(t=action,clip_value_min=action_spec[''],clip_value_max=action_spec[''])return action", "docstring": "Applies optional exploration to the action (post-processor for action outputs).\n\nArgs:\n action (tf.Tensor): The original output action tensor (to be post-processed).\n exploration (Exploration): The Exploration object to use.\n action_spec (dict): Dict specifying the action space.\nReturns:\n The post-processed action output tensor.", "id": "f14275:c0:m14"} {"signature": "def tf_actions_and_internals(self, states, internals, deterministic):", "body": "raise NotImplementedError", "docstring": "Creates and returns the TensorFlow operations for retrieving the actions and - if applicable -\nthe posterior internal state Tensors in reaction to the given input states (and prior internal states).\n\nArgs:\n states (dict): Dict of state tensors (each key represents one state space component).\n internals (dict): Dict of internal state tensors (each key represents one internal space component).\n deterministic: Boolean tensor indicating whether action should be chosen\n deterministically.\n\nReturns:\n tuple:\n 1) dict of output actions (with or without exploration applied (see `deterministic`))\n 2) list of posterior internal state Tensors (empty for non-internal state models)", "id": "f14275:c0:m15"} {"signature": "def tf_observe_timestep(self, states, internals, actions, terminal, reward):", "body": "raise NotImplementedError", "docstring": "Creates the TensorFlow operations for processing a batch of observations coming in from our buffer (state,\naction, internals) as well as from the agent's python-batch (terminal-signals and rewards from the env).\n\nArgs:\n states (dict): Dict of state tensors (each key represents one state space component).\n internals (dict): Dict of prior internal state tensors (each key represents one internal state component).\n actions (dict): Dict of action tensors (each key represents one action space component).\n terminal: 1D (bool) tensor of terminal signals.\n reward: 1D (float) tensor of rewards.\n\nReturns:\n The observation operation depending on the model type.", "id": "f14275:c0:m16"} {"signature": "def create_act_operations(self, states, internals, deterministic, independent, index):", "body": "operations = list()if self.variable_noise is not None and self.variable_noise > :self.fn_actions_and_internals(states=states,internals=internals,deterministic=deterministic)noise_deltas = list()for variable in self.get_variables():noise_delta = tf.random_normal(shape=util.shape(variable), mean=, stddev=self.variable_noise)noise_deltas.append(noise_delta)operations.append(variable.assign_add(delta=noise_delta))with tf.control_dependencies(control_inputs=operations):self.actions_output, self.internals_output = self.fn_actions_and_internals(states=states,internals=internals,deterministic=deterministic)with tf.control_dependencies(control_inputs=[self.actions_output[name] for name in sorted(self.actions_output)]):operations = list()if self.variable_noise is not None and self.variable_noise > :for variable, noise_delta in zip(self.get_variables(), noise_deltas):operations.append(variable.assign_sub(delta=noise_delta))with tf.control_dependencies(control_inputs=operations):for name in sorted(self.actions_exploration):self.actions_output[name] = tf.cond(pred=self.deterministic_input,true_fn=(lambda: self.actions_output[name]),false_fn=(lambda: self.fn_action_exploration(action=self.actions_output[name],exploration=self.actions_exploration[name],action_spec=self.actions_spec[name])))def independent_act():\"\"\"\"\"\"return self.global_timestepdef normal_act():\"\"\"\"\"\"operations = list()batch_size = tf.shape(input=states[next(iter(sorted(states)))])[]for name in sorted(states):operations.append(tf.assign(ref=self.list_states_buffer[name][index, self.list_buffer_index[index]: self.list_buffer_index[index] + batch_size],value=states[name]))for name in sorted(internals):operations.append(tf.assign(ref=self.list_internals_buffer[name][index, self.list_buffer_index[index]: self.list_buffer_index[index] + batch_size],value=internals[name]))for name in sorted(self.actions_output):operations.append(tf.assign(ref=self.list_actions_buffer[name][index, self.list_buffer_index[index]: self.list_buffer_index[index] + batch_size],value=self.actions_output[name]))with tf.control_dependencies(control_inputs=operations):operations = list()operations.append(tf.assign(ref=self.list_buffer_index[index: index+],value=tf.add(self.list_buffer_index[index: index+], tf.constant([]))))operations.append(tf.assign_add(ref=self.timestep,value=tf.to_int64(x=batch_size)))operations.append(tf.assign_add(ref=self.global_timestep,value=tf.to_int64(x=batch_size)))with tf.control_dependencies(control_inputs=operations):return self.global_timestep + self.timestep_output = tf.cond(pred=independent,true_fn=independent_act,false_fn=normal_act)", "docstring": "Creates and stores tf operations that are fetched when calling act(): actions_output, internals_output and\ntimestep_output.\n\nArgs:\n states (dict): Dict of state tensors (each key represents one state space component).\n internals (dict): Dict of prior internal state tensors (each key represents one internal state component).\n deterministic: 0D (bool) tensor (whether to not use action exploration).\n independent (bool): 0D (bool) tensor (whether to store states/internals/action in local buffer).", "id": "f14275:c0:m17"} {"signature": "def create_observe_operations(self, terminal, reward, index):", "body": "num_episodes = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype(''))increment_episode = tf.assign_add(ref=self.episode, value=tf.to_int64(x=num_episodes))increment_global_episode = tf.assign_add(ref=self.global_episode, value=tf.to_int64(x=num_episodes))with tf.control_dependencies(control_inputs=(increment_episode, increment_global_episode)):fn = (lambda x: tf.stop_gradient(input=x[:self.list_buffer_index[index]]))states = util.map_tensors(fn=fn, tensors=self.list_states_buffer, index=index)internals = util.map_tensors(fn=fn, tensors=self.list_internals_buffer, index=index)actions = util.map_tensors(fn=fn, tensors=self.list_actions_buffer, index=index)terminal = tf.stop_gradient(input=terminal)reward = tf.stop_gradient(input=reward)observation = self.fn_observe_timestep(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)with tf.control_dependencies(control_inputs=(observation,)):reset_index = tf.assign(ref=self.list_buffer_index[index], value=)with tf.control_dependencies(control_inputs=(reset_index,)):self.episode_output = self.global_episode + self.list_buffer_index_reset_op = tf.group(*(tf.assign(ref=self.list_buffer_index[n], value=) for n in range(self.num_parallel)))", "docstring": "Returns the tf op to fetch when an observation batch is passed in (e.g. an episode's rewards and\nterminals). Uses the filled tf buffers for states, actions and internals to run\nthe tf_observe_timestep (model-dependent), resets buffer index and increases counters (episodes,\ntimesteps).\n\nArgs:\n terminal: The 1D tensor (bool) of terminal signals to process (more than one True within that list is ok).\n reward: The 1D tensor (float) of rewards to process.\n\nReturns: Tf op to fetch when `observe()` is called.", "id": "f14275:c0:m18"} {"signature": "def create_atomic_observe_operations(self, states, actions, internals, terminal, reward, index):", "body": "num_episodes = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype(''))increment_episode = tf.assign_add(ref=self.episode, value=tf.to_int64(x=num_episodes))increment_global_episode = tf.assign_add(ref=self.global_episode, value=tf.to_int64(x=num_episodes))with tf.control_dependencies(control_inputs=(increment_episode, increment_global_episode)):states = util.map_tensors(fn=tf.stop_gradient, tensors=states)internals = util.map_tensors(fn=tf.stop_gradient, tensors=internals)actions = util.map_tensors(fn=tf.stop_gradient, tensors=actions)terminal = tf.stop_gradient(input=terminal)reward = tf.stop_gradient(input=reward)observation = self.fn_observe_timestep(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)with tf.control_dependencies(control_inputs=(observation,)):self.unbuffered_episode_output = self.global_episode + ", "docstring": "Returns the tf op to fetch when unbuffered observations are passed in.\n\nArgs:\n states (any): One state (usually a value tuple) or dict of states if multiple states are expected.\n actions (any): One action (usually a value tuple) or dict of states if multiple actions are expected.\n internals (any): Internal list.\n terminal (bool): boolean indicating if the episode terminated after the observation.\n reward (float): scalar reward that resulted from executing the action.\n\nReturns: Tf op to fetch when `observe()` is called.", "id": "f14275:c0:m19"} {"signature": "def create_operations(self, states, internals, actions, terminal, reward, deterministic, independent, index):", "body": "self.create_act_operations(states=states,internals=internals,deterministic=deterministic,independent=independent,index=index)self.create_observe_operations(reward=reward,terminal=terminal,index=index)self.create_atomic_observe_operations(states=states,actions=actions,internals=internals,reward=reward,terminal=terminal,index=index)", "docstring": "Creates and stores tf operations for when `act()` and `observe()` are called.", "id": "f14275:c0:m20"} {"signature": "def get_variables(self, include_submodules=False, include_nontrainable=False):", "body": "if include_nontrainable:model_variables = [self.all_variables[key] for key in sorted(self.all_variables)]states_preprocessing_variables = [variable for name in sorted(self.states_preprocessing)for variable in self.states_preprocessing[name].get_variables()]model_variables += states_preprocessing_variablesactions_exploration_variables = [variable for name in sorted(self.actions_exploration)for variable in self.actions_exploration[name].get_variables()]model_variables += actions_exploration_variablesif self.reward_preprocessing is not None:reward_preprocessing_variables = self.reward_preprocessing.get_variables()model_variables += reward_preprocessing_variableselse:model_variables = [self.variables[key] for key in sorted(self.variables)]return model_variables", "docstring": "Returns the TensorFlow variables used by the model.\n\nArgs:\n include_submodules: Includes variables of submodules (e.g. baseline, target network)\n if true.\n include_nontrainable: Includes non-trainable variables if true.\n\nReturns:\n List of variables.", "id": "f14275:c0:m21"} {"signature": "def reset(self):", "body": "fetches = [self.global_episode, self.global_timestep]for name in sorted(self.states_preprocessing):fetch = self.states_preprocessing[name].reset()if fetch is not None:fetches.extend(fetch)if self.flush_summarizer is not None:fetches.append(self.flush_summarizer)fetch_list = self.monitored_session.run(fetches=fetches)episode, timestep = fetch_list[:]return episode, timestep, self.internals_init", "docstring": "Resets the model to its initial state on episode start. This should also reset all preprocessor(s).\n\nReturns:\n tuple:\n Current episode, timestep counter and the shallow-copied list of internal state initialization Tensors.", "id": "f14275:c0:m22"} {"signature": "def get_feed_dict(self,states=None,internals=None,actions=None,terminal=None,reward=None,deterministic=None,independent=None,index=None):", "body": "feed_dict = dict()batched = Noneif states is not None:if batched is None:name = next(iter(states))state = np.asarray(states[name])batched = (state.ndim != len(self.states_spec[name]['']))if batched:feed_dict.update({self.states_input[name]: states[name] for name in sorted(self.states_input)})else:feed_dict.update({self.states_input[name]: (states[name],) for name in sorted(self.states_input)})if internals is not None:if batched is None:name = next(iter(internals))internal = np.asarray(internals[name])batched = (internal.ndim != len(self.internals_spec[name]['']))if batched:feed_dict.update({self.internals_input[name]: internals[name] for name in sorted(self.internals_input)})else:feed_dict.update({self.internals_input[name]: (internals[name],) for name in sorted(self.internals_input)})if actions is not None:if batched is None:name = next(iter(actions))action = np.asarray(actions[name])batched = (action.ndim != len(self.actions_spec[name]['']))if batched:feed_dict.update({self.actions_input[name]: actions[name] for name in sorted(self.actions_input)})else:feed_dict.update({self.actions_input[name]: (actions[name],) for name in sorted(self.actions_input)})if terminal is not None:if batched is None:terminal = np.asarray(terminal)batched = (terminal.ndim == )if batched:feed_dict[self.terminal_input] = terminalelse:feed_dict[self.terminal_input] = (terminal,)if reward is not None:if batched is None:reward = np.asarray(reward)batched = (reward.ndim == )if batched:feed_dict[self.reward_input] = rewardelse:feed_dict[self.reward_input] = (reward,)if deterministic is not None:feed_dict[self.deterministic_input] = deterministicif independent is not None:feed_dict[self.independent_input] = independentfeed_dict[self.episode_index_input] = indexreturn feed_dict", "docstring": "Returns the feed-dict for the model's acting and observing tf fetches.\n\nArgs:\n states (dict): Dict of state values (each key represents one state space component).\n internals (dict): Dict of internal state values (each key represents one internal state component).\n actions (dict): Dict of actions (each key represents one action space component).\n terminal (List[bool]): List of is-terminal signals.\n reward (List[float]): List of reward signals.\n deterministic (bool): Whether actions should be picked without exploration.\n independent (bool): Whether we are doing an independent act (not followed by call to observe;\n not to be stored in model's buffer).\n\nReturns: The feed dict to use for the fetch.", "id": "f14275:c0:m23"} {"signature": "def act(self, states, internals, deterministic=False, independent=False, fetch_tensors=None, index=):", "body": "name = next(iter(states))state = np.asarray(states[name])batched = (state.ndim != len(self.states_spec[name]['']))if batched:assert state.shape[] <= self.batching_capacityfetches = [self.actions_output, self.internals_output, self.timestep_output]if self.network is not None and fetch_tensors is not None:for name in fetch_tensors:valid, tensor = self.network.get_named_tensor(name)if valid:fetches.append(tensor)else:keys = self.network.get_list_of_named_tensor()raise TensorForceError(''.format(name, keys))feed_dict = self.get_feed_dict(states=states,internals=internals,deterministic=deterministic,independent=independent,index=index)fetch_list = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)actions, internals, timestep = fetch_list[:]if not batched:actions = {name: actions[name][] for name in sorted(actions)}internals = {name: internals[name][] for name in sorted(internals)}if self.network is not None and fetch_tensors is not None:fetch_dict = dict()for index_, tensor in enumerate(fetch_list[:]):name = fetch_tensors[index_]fetch_dict[name] = tensorreturn actions, internals, timestep, fetch_dictelse:return actions, internals, timestep", "docstring": "Does a forward pass through the model to retrieve action (outputs) given inputs for state (and internal\nstate, if applicable (e.g. RNNs))\n\nArgs:\n states (dict): Dict of state values (each key represents one state space component).\n internals (dict): Dict of internal state values (each key represents one internal state component).\n deterministic (bool): If True, will not apply exploration after actions are calculated.\n independent (bool): If true, action is not followed by observe (and hence not included\n in updates).\n fetch_tensors (list): List of names of additional tensors (from the model's network) to fetch (and return).\n index: (int) index of the episode we want to produce the next action\n\nReturns:\n tuple:\n - Actual action-outputs (batched if state input is a batch).\n - Actual values of internal states (if applicable) (batched if state input is a batch).\n - The timestep (int) after calculating the (batch of) action(s).", "id": "f14275:c0:m24"} {"signature": "def observe(self, terminal, reward, index=):", "body": "fetches = self.episode_outputfeed_dict = self.get_feed_dict(terminal=terminal, reward=reward, index=index)episode = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)return episode", "docstring": "Adds an observation (reward and is-terminal) to the model without updating its trainable variables.\n\nArgs:\n terminal (List[bool]): List of is-terminal signals.\n reward (List[float]): List of reward signals.\n index: (int) parallel episode you want to observe\n\nReturns:\n The value of the model-internal episode counter.", "id": "f14275:c0:m25"} {"signature": "def save(self, directory=None, append_timestep=True):", "body": "if self.flush_summarizer is not None:self.monitored_session.run(fetches=self.flush_summarizer)return self.saver.save(sess=self.session,save_path=(self.saver_directory if directory is None else directory),global_step=(self.global_timestep if append_timestep else None),meta_graph_suffix='',write_meta_graph=True,write_state=True)", "docstring": "Save TensorFlow model. If no checkpoint directory is given, the model's default saver\ndirectory is used. Optionally appends current timestep to prevent overwriting previous\ncheckpoint files. Turn off to be able to load model from the same given path argument as\ngiven here.\n\nArgs:\n directory: Optional checkpoint directory.\n append_timestep: Appends the current timestep to the checkpoint file if true.\n\nReturns:\n Checkpoint path where the model was saved.", "id": "f14275:c0:m27"} {"signature": "def restore(self, directory=None, file=None):", "body": "if file is None:file = tf.train.latest_checkpoint(checkpoint_dir=(self.saver_directory if directory is None else directory),)elif directory is None:file = os.path.join(self.saver_directory, file)elif not os.path.isfile(file):file = os.path.join(directory, file)self.saver.restore(sess=self.session, save_path=file)self.session.run(fetches=self.list_buffer_index_reset_op)", "docstring": "Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is\nrestored. If no checkpoint directory is given, the model's default saver directory is\nused (unless file specifies the entire path).\n\nArgs:\n directory: Optional checkpoint directory.\n file: Optional checkpoint file, or path if directory not given.", "id": "f14275:c0:m28"} {"signature": "def get_components(self):", "body": "return dict()", "docstring": "Returns a dictionary of component name to component of all the components within this model.\n\nReturns:\n (dict) The mapping of name to component.", "id": "f14275:c0:m29"} {"signature": "def get_savable_components(self):", "body": "components = self.get_components()components = [components[name] for name in sorted(components)]return set(filter(lambda x: isinstance(x, util.SavableComponent), components))", "docstring": "Returns the list of all of the components this model consists of that can be individually saved and restored.\nFor instance the network or distribution.\n\nReturns:\n List of util.SavableComponent", "id": "f14275:c0:m30"} {"signature": "def save_component(self, component_name, save_path):", "body": "component = self.get_component(component_name=component_name)self._validate_savable(component=component, component_name=component_name)return component.save(sess=self.session, save_path=save_path)", "docstring": "Saves a component of this model to the designated location.\n\nArgs:\n component_name: The component to save.\n save_path: The location to save to.\nReturns:\n Checkpoint path where the component was saved.", "id": "f14275:c0:m32"} {"signature": "def restore_component(self, component_name, save_path):", "body": "component = self.get_component(component_name=component_name)self._validate_savable(component=component, component_name=component_name)component.restore(sess=self.session, save_path=save_path)", "docstring": "Restores a component's parameters from a save location.\n\nArgs:\n component_name: The component to restore.\n save_path: The save location.", "id": "f14275:c0:m33"} {"signature": "def get_component(self, component_name):", "body": "mapping = self.get_components()return mapping[component_name] if component_name in mapping else None", "docstring": "Looks up a component by its name.\n\nArgs:\n component_name: The name of the component to look up.\nReturns:\n The component for the provided name or None if there is no such component.", "id": "f14275:c0:m34"} {"signature": "def __init__(self,states,actions,scope,device,saver,summarizer,execution,batching_capacity,variable_noise,states_preprocessing,actions_exploration,reward_preprocessing,update_mode,memory,optimizer,discount):", "body": "self.update_mode = update_modeself.memory_spec = memoryself.optimizer_spec = optimizerassert discount is None or discount >= self.discount = discountself.memory = Noneself.optimizer = Noneself.fn_discounted_cumulative_reward = Noneself.fn_reference = Noneself.fn_loss_per_instance = Noneself.fn_regularization_losses = Noneself.fn_loss = Noneself.fn_optimization = Noneself.fn_import_experience = Nonesuper(MemoryModel, self).__init__(states=states,actions=actions,scope=scope,device=device,saver=saver,summarizer=summarizer,execution=execution,batching_capacity=batching_capacity,variable_noise=variable_noise,states_preprocessing=states_preprocessing,actions_exploration=actions_exploration,reward_preprocessing=reward_preprocessing)", "docstring": "Memory model.\n\nArgs:\n states (spec): The state-space description dictionary.\n actions (spec): The action-space description dictionary.\n scope (str): The root scope str to use for tf variable scoping.\n device (str): The name of the device to run the graph of this model on.\n saver (spec): Dict specifying whether and how to save the model's parameters.\n summarizer (spec): Dict specifying which tensorboard summaries should be created and added to the graph.\n execution (spec): Dict specifying whether and how to do distributed training on the model's graph.\n batching_capacity (int): Batching capacity.\n variable_noise (float): The stddev value of a Normal distribution used for adding random\n noise to the model's output (for each batch, noise can be toggled and - if active - will be resampled).\n Use None for not adding any noise.\n states_preprocessing (spec / dict of specs): Dict specifying whether and how to preprocess state signals\n (e.g. normalization, greyscale, etc..).\n actions_exploration (spec / dict of specs): Dict specifying whether and how to add exploration to the model's\n \"action outputs\" (e.g. epsilon-greedy).\n reward_preprocessing (spec): Dict specifying whether and how to preprocess rewards coming\n from the Environment (e.g. reward normalization).\n update_mode (spec): Update mode.\n memory (spec): Memory.\n optimizer (spec): Dict specifying the tf optimizer to use for tuning the model's trainable parameters.\n discount (float): The RL reward discount factor (gamma).", "id": "f14277:c0:m0"} {"signature": "def as_local_model(self):", "body": "super(MemoryModel, self).as_local_model()self.optimizer_spec = dict(type='',optimizer=self.optimizer_spec)", "docstring": "Makes sure our optimizer is wrapped into the global_optimizer meta. This is only relevant for distributed RL.", "id": "f14277:c0:m1"} {"signature": "def setup_components_and_tf_funcs(self, custom_getter=None):", "body": "custom_getter = super(MemoryModel, self).setup_components_and_tf_funcs(custom_getter)self.memory = Memory.from_spec(spec=self.memory_spec,kwargs=dict(states=self.states_spec,internals=self.internals_spec,actions=self.actions_spec,summary_labels=self.summary_labels))self.optimizer = Optimizer.from_spec(spec=self.optimizer_spec,kwargs=dict(summary_labels=self.summary_labels))self.fn_discounted_cumulative_reward = tf.make_template(name_='',func_=self.tf_discounted_cumulative_reward,custom_getter_=custom_getter)self.fn_reference = tf.make_template(name_='',func_=self.tf_reference,custom_getter_=custom_getter)self.fn_loss_per_instance = tf.make_template(name_='',func_=self.tf_loss_per_instance,custom_getter_=custom_getter)self.fn_regularization_losses = tf.make_template(name_='',func_=self.tf_regularization_losses,custom_getter_=custom_getter)self.fn_loss = tf.make_template(name_='',func_=self.tf_loss,custom_getter_=custom_getter)self.fn_optimization = tf.make_template(name_='',func_=self.tf_optimization,custom_getter_=custom_getter)self.fn_import_experience = tf.make_template(name_='',func_=self.tf_import_experience,custom_getter_=custom_getter)return custom_getter", "docstring": "Constructs the memory and the optimizer objects.\nGenerates and stores all template functions.", "id": "f14277:c0:m2"} {"signature": "def tf_initialize(self):", "body": "super(MemoryModel, self).tf_initialize()self.memory.initialize()", "docstring": "Also initializes our Memory object (self.memory).", "id": "f14277:c0:m3"} {"signature": "def tf_discounted_cumulative_reward(self, terminal, reward, discount=None, final_reward=, horizon=):", "body": "if discount is None:discount = self.discountdef cumulate(cumulative, reward_terminal_horizon_subtract):rew, is_terminal, is_over_horizon, sub = reward_terminal_horizon_subtractreturn tf.where(condition=is_terminal,x=rew,y=tf.where(condition=is_over_horizon,x=(rew + cumulative * discount - sub),y=(rew + cumulative * discount)))def len_(cumulative, term):return tf.where(condition=term,x=tf.ones(shape=(), dtype=tf.int32),y=cumulative + )reward = tf.reverse(tensor=reward, axis=(,))terminal = tf.reverse(tensor=terminal, axis=(,))lengths = tf.scan(fn=len_, elems=terminal, initializer=)off_horizon = tf.greater(lengths, tf.fill(dims=tf.shape(lengths), value=horizon))if horizon > :horizon_subtractions = tf.map_fn(lambda x: (discount ** horizon) * x, reward, dtype=tf.float32)horizon_subtractions = tf.concat([np.zeros(shape=(horizon,)), horizon_subtractions], axis=)horizon_subtractions = tf.slice(horizon_subtractions, begin=(,), size=tf.shape(reward))else:horizon_subtractions = tf.zeros(shape=tf.shape(reward))reward = tf.scan(fn=cumulate,elems=(reward, terminal, off_horizon, horizon_subtractions),initializer=final_reward if horizon != else )return tf.reverse(tensor=reward, axis=(,))", "docstring": "Creates and returns the TensorFlow operations for calculating the sequence of discounted cumulative rewards\nfor a given sequence of single rewards.\n\nExample:\nsingle rewards = 2.0 1.0 0.0 0.5 1.0 -1.0\nterminal = False, False, False, False True False\ngamma = 0.95\nfinal_reward = 100.0 (only matters for last episode (r=-1.0) as this episode has no terminal signal)\nhorizon=3\noutput = 2.95 1.45 1.38 1.45 1.0 94.0\n\nArgs:\n terminal: Tensor (bool) holding the is-terminal sequence. This sequence may contain more than one\n True value. If its very last element is False (not terminating), the given `final_reward` value\n is assumed to follow the last value in the single rewards sequence (see below).\n reward: Tensor (float) holding the sequence of single rewards. If the last element of `terminal` is False,\n an assumed last reward of the value of `final_reward` will be used.\n discount (float): The discount factor (gamma). By default, take the Model's discount factor.\n final_reward (float): Reward value to use if last episode in sequence does not terminate (terminal sequence\n ends with False). This value will be ignored if horizon == 1 or discount == 0.0.\n horizon (int): The length of the horizon (e.g. for n-step cumulative rewards in continuous tasks\n without terminal signals). Use 0 (default) for an infinite horizon. Note that horizon=1 leads to the\n exact same results as a discount factor of 0.0.\n\nReturns:\n Discounted cumulative reward tensor with the same shape as `reward`.", "id": "f14277:c0:m4"} {"signature": "def tf_reference(self, states, internals, actions, terminal, reward, next_states, next_internals, update):", "body": "return None", "docstring": "Creates the TensorFlow operations for obtaining the reference tensor(s), in case of a\ncomparative loss.\n\nArgs:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n actions: Dict of action tensors.\n terminal: Terminal boolean tensor.\n reward: Reward tensor.\n next_states: Dict of successor state tensors.\n next_internals: List of posterior internal state tensors.\n update: Boolean tensor indicating whether this call happens during an update.\n\nReturns:\n Reference tensor(s).", "id": "f14277:c0:m5"} {"signature": "def tf_loss_per_instance(self, states, internals, actions, terminal, reward,next_states, next_internals, update, reference=None):", "body": "raise NotImplementedError", "docstring": "Creates the TensorFlow operations for calculating the loss per batch instance.\n\nArgs:\n states: Dict of state tensors.\n internals: Dict of prior internal state tensors.\n actions: Dict of action tensors.\n terminal: Terminal boolean tensor.\n reward: Reward tensor.\n next_states: Dict of successor state tensors.\n next_internals: List of posterior internal state tensors.\n update: Boolean tensor indicating whether this call happens during an update.\n reference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\n Loss per instance tensor.", "id": "f14277:c0:m6"} {"signature": "def tf_regularization_losses(self, states, internals, update):", "body": "return dict()", "docstring": "Creates the TensorFlow operations for calculating the regularization losses for the given input states.\n\nArgs:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n update: Boolean tensor indicating whether this call happens during an update.\n\nReturns:\n Dict of regularization loss tensors.", "id": "f14277:c0:m7"} {"signature": "def tf_loss(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):", "body": "loss_per_instance = self.fn_loss_per_instance(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward,next_states=next_states,next_internals=next_internals,update=update,reference=reference)updated = self.memory.update_batch(loss_per_instance=loss_per_instance)with tf.control_dependencies(control_inputs=(updated,)):loss = tf.reduce_mean(input_tensor=loss_per_instance, axis=)if '' in self.summary_labels:tf.contrib.summary.scalar(name='', tensor=loss)losses = self.fn_regularization_losses(states=states, internals=internals, update=update)if len(losses) > :loss += tf.add_n(inputs=[losses[name] for name in sorted(losses)])if '' in self.summary_labels:for name in sorted(losses):tf.contrib.summary.scalar(name=('' + name), tensor=losses[name])if '' in self.summary_labels or '' in self.summary_labels:tf.contrib.summary.scalar(name='', tensor=loss)return loss", "docstring": "Creates the TensorFlow operations for calculating the full loss of a batch.\n\nArgs:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n actions: Dict of action tensors.\n terminal: Terminal boolean tensor.\n reward: Reward tensor.\n next_states: Dict of successor state tensors.\n next_internals: List of posterior internal state tensors.\n update: Boolean tensor indicating whether this call happens during an update.\n reference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\n Loss tensor.", "id": "f14277:c0:m8"} {"signature": "def optimizer_arguments(self, states, internals, actions, terminal, reward, next_states, next_internals):", "body": "arguments = dict(time=self.global_timestep,variables=self.get_variables(),arguments=dict(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward,next_states=next_states,next_internals=next_internals,update=tf.constant(value=True)),fn_reference=self.fn_reference,fn_loss=self.fn_loss)if self.global_model is not None:arguments[''] = self.global_model.get_variables()return arguments", "docstring": "Returns the optimizer arguments including the time, the list of variables to optimize,\nand various functions which the optimizer might require to perform an update step.\n\nArgs:\n states (dict): Dict of state tensors.\n internals (dict): Dict of prior internal state tensors.\n actions (dict): Dict of action tensors.\n terminal: 1D boolean is-terminal tensor.\n reward: 1D (float) rewards tensor.\n next_states (dict): Dict of successor state tensors.\n next_internals (dict): Dict of posterior internal state tensors.\n\nReturns:\n Optimizer arguments as dict to be used as **kwargs to the optimizer.", "id": "f14277:c0:m9"} {"signature": "def tf_optimization(self, states, internals, actions, terminal, reward, next_states=None, next_internals=None):", "body": "arguments = self.optimizer_arguments(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward,next_states=next_states,next_internals=next_internals)return self.optimizer.minimize(**arguments)", "docstring": "Creates the TensorFlow operations for performing an optimization update step based\non the given input states and actions batch.\n\nArgs:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n actions: Dict of action tensors.\n terminal: Terminal boolean tensor.\n reward: Reward tensor.\n next_states: Dict of successor state tensors.\n next_internals: List of posterior internal state tensors.\n\nReturns:\n The optimization operation.", "id": "f14277:c0:m10"} {"signature": "def tf_observe_timestep(self, states, internals, actions, terminal, reward):", "body": "stored = self.memory.store(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)with tf.control_dependencies(control_inputs=(stored,)):unit = self.update_mode['']batch_size = self.update_mode['']frequency = self.update_mode.get('', batch_size)first_update = self.update_mode.get('', )if unit == '':optimize = tf.logical_and(x=tf.equal(x=(self.timestep % frequency), y=),y=tf.logical_and(x=tf.greater_equal(x=self.timestep, y=batch_size),y=tf.greater_equal(x=self.timestep, y=first_update)))elif unit == '':optimize = tf.logical_and(x=tf.equal(x=(self.episode % frequency), y=),y=tf.logical_and(x=tf.greater(x=tf.count_nonzero(input_tensor=terminal), y=),y=tf.logical_and(x=tf.greater_equal(x=self.episode, y=batch_size),y=tf.greater_equal(x=self.episode, y=first_update))))elif unit == '':sequence_length = self.update_mode.get('', )optimize = tf.logical_and(x=tf.equal(x=(self.timestep % frequency), y=),y=tf.logical_and(x=tf.greater_equal(x=self.timestep, y=(batch_size + sequence_length - )),y=tf.greater_equal(x=self.timestep, y=first_update)))else:raise TensorForceError(\"\".format(unit))def true_fn():if unit == '':batch = self.memory.retrieve_timesteps(n=batch_size)elif unit == '':batch = self.memory.retrieve_episodes(n=batch_size)elif unit == '':batch = self.memory.retrieve_sequences(n=batch_size, sequence_length=sequence_length)batch = util.map_tensors(fn=(lambda tensor: tf.stop_gradient(input=tensor)),tensors=batch)optimize = self.fn_optimization(**batch)with tf.control_dependencies(control_inputs=(optimize,)):return tf.logical_and(x=True, y=True)return tf.cond(pred=optimize, true_fn=true_fn, false_fn=tf.no_op)", "docstring": "Creates and returns the op that - if frequency condition is hit - pulls a batch from the memory\nand does one optimization step.", "id": "f14277:c0:m11"} {"signature": "def tf_import_experience(self, states, internals, actions, terminal, reward):", "body": "return self.memory.store(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)", "docstring": "Imports experiences into the TensorFlow memory structure. Can be used to import\noff-policy data.\n\n:param states: Dict of state values to import with keys as state names and values as values to set.\n:param internals: Internal values to set, can be fetched from agent via agent.current_internals\n if no values available.\n:param actions: Dict of action values to import with keys as action names and values as values to set.\n:param terminal: Terminal value(s)\n:param reward: Reward value(s)", "id": "f14277:c0:m12"} {"signature": "def import_experience(self, states, internals, actions, terminal, reward):", "body": "fetches = self.import_experience_outputfeed_dict = self.get_feed_dict(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)", "docstring": "Stores experiences.", "id": "f14277:c0:m15"} {"signature": "def tf_baseline_loss(self, states, internals, reward, update, reference=None):", "body": "if self.baseline_mode == '':loss = self.baseline.loss(states=states,internals=internals,reward=reward,update=update,reference=reference)elif self.baseline_mode == '':loss = self.baseline.loss(states=self.network.apply(x=states, internals=internals, update=update),internals=internals,reward=reward,update=update,reference=reference)regularization_loss = self.baseline.regularization_loss()if regularization_loss is not None:loss += regularization_lossreturn loss", "docstring": "Creates the TensorFlow operations for calculating the baseline loss of a batch.\n\nArgs:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n reward: Reward tensor.\n update: Boolean tensor indicating whether this call happens during an update.\n reference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\n Loss tensor.", "id": "f14283:c0:m5"} {"signature": "def baseline_optimizer_arguments(self, states, internals, reward):", "body": "arguments = dict(time=self.global_timestep,variables=self.baseline.get_variables(),arguments=dict(states=states,internals=internals,reward=reward,update=tf.constant(value=True),),fn_reference=self.baseline.reference,fn_loss=self.fn_baseline_loss,)if self.global_model is not None:arguments[''] = self.global_model.baseline.get_variables()return arguments", "docstring": "Returns the baseline optimizer arguments including the time, the list of variables to \noptimize, and various functions which the optimizer might require to perform an update \nstep.\n\nArgs:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n reward: Reward tensor.\n\nReturns:\n Baseline optimizer arguments as dict.", "id": "f14283:c0:m6"} {"signature": "def setup_components_and_tf_funcs(self, custom_getter=None):", "body": "custom_getter = super(QDemoModel, self).setup_components_and_tf_funcs(custom_getter)self.demo_memory = Replay(states=self.states_spec,internals=self.internals_spec,actions=self.actions_spec,include_next_states=True,capacity=self.demo_memory_capacity,scope='',summary_labels=self.summary_labels)self.fn_import_demo_experience = tf.make_template(name_='',func_=self.tf_import_demo_experience,custom_getter_=custom_getter)self.fn_demo_loss = tf.make_template(name_='',func_=self.tf_demo_loss,custom_getter_=custom_getter)self.fn_combined_loss = tf.make_template(name_='',func_=self.tf_combined_loss,custom_getter_=custom_getter)self.fn_demo_optimization = tf.make_template(name_='',func_=self.tf_demo_optimization,custom_getter_=custom_getter)return custom_getter", "docstring": "Constructs the extra Replay memory.", "id": "f14284:c0:m1"} {"signature": "def tf_import_demo_experience(self, states, internals, actions, terminal, reward):", "body": "return self.demo_memory.store(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)", "docstring": "Imports a single experience to memory.", "id": "f14284:c0:m3"} {"signature": "def tf_demo_loss(self, states, actions, terminal, reward, internals, update, reference=None):", "body": "embedding = self.network.apply(x=states, internals=internals, update=update)deltas = list()for name in sorted(actions):action = actions[name]distr_params = self.distributions[name].parameterize(x=embedding)state_action_value = self.distributions[name].state_action_value(distr_params=distr_params, action=action)if self.actions_spec[name][''] == '':num_actions = action = tf.cast(x=action, dtype=util.tf_dtype(''))else:num_actions = self.actions_spec[name]['']one_hot = tf.one_hot(indices=action, depth=num_actions)ones = tf.ones_like(tensor=one_hot, dtype=tf.float32)inverted_one_hot = ones - one_hotstate_action_values = self.distributions[name].state_action_value(distr_params=distr_params)state_action_values = state_action_values + inverted_one_hot * self.expert_marginsupervised_selector = tf.reduce_max(input_tensor=state_action_values, axis=-)delta = supervised_selector - state_action_valueaction_size = util.prod(self.actions_spec[name][''])delta = tf.reshape(tensor=delta, shape=(-, action_size))deltas.append(delta)loss_per_instance = tf.reduce_mean(input_tensor=tf.concat(values=deltas, axis=), axis=)loss_per_instance = tf.square(x=loss_per_instance)return tf.reduce_mean(input_tensor=loss_per_instance, axis=)", "docstring": "Extends the q-model loss via the dqfd large-margin loss.", "id": "f14284:c0:m4"} {"signature": "def tf_combined_loss(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):", "body": "q_model_loss = self.fn_loss(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward,next_states=next_states,next_internals=next_internals,update=update,reference=reference)demo_loss = self.fn_demo_loss(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward,update=update,reference=reference)return q_model_loss + self.supervised_weight * demo_loss", "docstring": "Combines Q-loss and demo loss.", "id": "f14284:c0:m5"} {"signature": "def get_variables(self, include_submodules=False, include_nontrainable=False):", "body": "model_variables = super(QDemoModel, self).get_variables(include_submodules=include_submodules,include_nontrainable=include_nontrainable)if include_nontrainable:demo_memory_variables = self.demo_memory.get_variables()model_variables += demo_memory_variablesreturn model_variables", "docstring": "Returns the TensorFlow variables used by the model.\n\nReturns:\n List of variables.", "id": "f14284:c0:m9"} {"signature": "def import_demo_experience(self, states, internals, actions, terminal, reward):", "body": "fetches = self.import_demo_experience_outputfeed_dict = self.get_feed_dict(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)", "docstring": "Stores demonstrations in the demo memory.", "id": "f14284:c0:m10"} {"signature": "def demo_update(self):", "body": "fetches = self.demo_optimization_outputself.monitored_session.run(fetches=fetches)", "docstring": "Performs a demonstration update by calling the demo optimization operation.\nNote that the batch data does not have to be fetched from the demo memory as this is now part of\nthe TensorFlow operation of the demo update.", "id": "f14284:c0:m11"} {"signature": "def tf_q_delta(self, q_value, next_q_value, terminal, reward):", "body": "for _ in range(util.rank(q_value) - ):terminal = tf.expand_dims(input=terminal, axis=)reward = tf.expand_dims(input=reward, axis=)multiples = (,) + util.shape(q_value)[:]terminal = tf.tile(input=terminal, multiples=multiples)reward = tf.tile(input=reward, multiples=multiples)zeros = tf.zeros_like(tensor=next_q_value)next_q_value = tf.where(condition=terminal, x=zeros, y=(self.discount * next_q_value))return reward + next_q_value - q_value", "docstring": "Creates the deltas (or advantage) of the Q values.\n\n:return: A list of deltas per action", "id": "f14285:c0:m4"} {"signature": "def target_optimizer_arguments(self):", "body": "variables = self.target_network.get_variables() + [variable for name in sorted(self.target_distributions)for variable in self.target_distributions[name].get_variables()]source_variables = self.network.get_variables() + [variable for name in sorted(self.distributions)for variable in self.distributions[name].get_variables()]arguments = dict(time=self.global_timestep,variables=variables,source_variables=source_variables)if self.global_model is not None:arguments[''] = self.global_model.target_network.get_variables() + [variable for name in sorted(self.global_model.target_distributions)for variable in self.global_model.target_distributions[name].get_variables()]return arguments", "docstring": "Returns the target optimizer arguments including the time, the list of variables to \noptimize, and various functions which the optimizer might require to perform an update \nstep.\n\nReturns:\n Target optimizer arguments as dict.", "id": "f14285:c0:m6"} {"signature": "def __init__(self,states,actions,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,):", "body": "self.scope = scopeself.device = deviceself.saver = saverself.summarizer = summarizerself.execution = sanity_check_execution_spec(execution)super(RandomAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity)", "docstring": "Initializes the random agent.\n\nArgs:\n scope (str): TensorFlow scope (default: name of agent).\n device: TensorFlow device (default: none)\n saver (spec): Saver specification, with the following attributes (default: none):\n - directory: model directory.\n - file: model filename (optional).\n - seconds or steps: save frequency (default: 600 seconds).\n - load: specifies whether model is loaded, if existent (default: true).\n - basename: optional file basename (default: 'model.ckpt').\n summarizer (spec): Summarizer specification, with the following attributes (default:\n none):\n - directory: summaries directory.\n - seconds or steps: summarize frequency (default: 120 seconds).\n - labels: list of summary labels to record (default: []).\n - meta_param_recorder_class: ???.\n execution (spec): Execution specification (see sanity_check_execution_spec for details).", "id": "f14286:c0:m0"} {"signature": "def __init__(self,states,actions,network,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,variable_noise=None,states_preprocessing=None,actions_exploration=None,reward_preprocessing=None,update_mode=None,memory=None,discount=,distributions=None,entropy_regularization=None,baseline_mode=None,baseline=None,baseline_optimizer=None,gae_lambda=None,likelihood_ratio_clipping=None,learning_rate=,ls_max_iterations=,ls_accept_ratio=,ls_unroll_loop=False):", "body": "if update_mode is None:update_mode = dict(unit='',batch_size=)elif '' in update_mode:passelse:update_mode[''] = ''if memory is None:memory = dict(type='',include_next_states=False,capacity=( * update_mode['']))else:assert not memory['']assert (update_mode[''] != '' or memory[''] == '')optimizer = dict(type='',optimizer=dict(type='',learning_rate=learning_rate),ls_max_iterations=ls_max_iterations,ls_accept_ratio=ls_accept_ratio,ls_mode='', ls_parameter=, ls_unroll_loop=ls_unroll_loop)self.baseline_mode = baseline_modeself.baseline = baselineself.baseline_optimizer = baseline_optimizerself.gae_lambda = gae_lambdaself.likelihood_ratio_clipping = likelihood_ratio_clippingsuper(ACKTRAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity,scope=scope,device=device,saver=saver,summarizer=summarizer,execution=execution,variable_noise=variable_noise,states_preprocessing=states_preprocessing,actions_exploration=actions_exploration,reward_preprocessing=reward_preprocessing,update_mode=update_mode,memory=memory,optimizer=optimizer,discount=discount,network=network,distributions=distributions,entropy_regularization=entropy_regularization)", "docstring": "Initializes the ACKTR agent.\n\nArgs:\n update_mode (spec): Update mode specification, with the following attributes:\n - unit: 'episodes' if given (default: 'episodes').\n - batch_size: integer (default: 10).\n - frequency: integer (default: batch_size).\n memory (spec): Memory specification, see core.memories module for more information\n (default: {type='latest', include_next_states=false, capacity=1000*batch_size}).\n optimizer (spec): ACKTR agent implicitly defines an optimized-step KFAC optimizer.\n baseline_mode (str): One of 'states', 'network' (default: none).\n baseline (spec): Baseline specification, see core.baselines module for more information\n (default: none).\n baseline_optimizer (spec): Baseline optimizer specification, see core.optimizers module\n for more information (default: none).\n gae_lambda (float): Lambda factor for generalized advantage estimation (default: none).\n likelihood_ratio_clipping (float): Likelihood ratio clipping for policy gradient\n (default: none).\n learning_rate (float): Learning rate of natural-gradient optimizer (default: 1e-3).\n cg_max_iterations (int): Conjugate-gradient max iterations (default: 20).\n cg_damping (float): Conjugate-gradient damping (default: 1e-3).\n cg_unroll_loop (bool): Conjugate-gradient unroll loop (default: false).\n ls_max_iterations (int): Line-search max iterations (default: 10).\n ls_accept_ratio (float): Line-search accept ratio (default: 0.9).\n ls_unroll_loop (bool): Line-search unroll loop (default: false).", "id": "f14287:c0:m0"} {"signature": "def __init__(self,states,actions,network,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,variable_noise=None,states_preprocessing=None,actions_exploration=None,reward_preprocessing=None,update_mode=None,memory=None,optimizer=None,discount=,distributions=None,entropy_regularization=None,target_sync_frequency=,target_update_weight=,double_q_model=False,huber_loss=None):", "body": "if update_mode is None:update_mode = dict(unit='',batch_size=,frequency=)elif '' in update_mode:assert update_mode[''] == ''else:update_mode[''] = ''if memory is None:memory = dict(type='',include_next_states=True,capacity=( * update_mode['']))else:assert memory['']if optimizer is None:optimizer = dict(type='',learning_rate=)self.target_sync_frequency = target_sync_frequencyself.target_update_weight = target_update_weightself.double_q_model = double_q_modelself.huber_loss = huber_losssuper(DQNAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity,scope=scope,device=device,saver=saver,summarizer=summarizer,execution=execution,variable_noise=variable_noise,states_preprocessing=states_preprocessing,actions_exploration=actions_exploration,reward_preprocessing=reward_preprocessing,update_mode=update_mode,memory=memory,optimizer=optimizer,discount=discount,network=network,distributions=distributions,entropy_regularization=entropy_regularization)", "docstring": "Initializes the DQN agent.\n\nArgs:\n update_mode (spec): Update mode specification, with the following attributes:\n - unit: 'timesteps' if given (default: 'timesteps').\n - batch_size: integer (default: 32).\n - frequency: integer (default: 4).\n memory (spec): Memory specification, see core.memories module for more information\n (default: {type='replay', include_next_states=true, capacity=1000*batch_size}).\n optimizer (spec): Optimizer specification, see core.optimizers module for more\n information (default: {type='adam', learning_rate=1e-3}).\n target_sync_frequency (int): Target network sync frequency (default: 10000).\n target_update_weight (float): Target network update weight (default: 1.0).\n double_q_model (bool): Specifies whether double DQN mode is used (default: false).\n huber_loss (float): Huber loss clipping (default: none).", "id": "f14288:c0:m0"} {"signature": "def __init__(self,states,actions,network,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,variable_noise=None,states_preprocessing=None,actions_exploration=None,reward_preprocessing=None,update_mode=None,memory=None,optimizer=None,discount=,distributions=None,entropy_regularization=None,target_sync_frequency=,target_update_weight=,double_q_model=False,huber_loss=None):", "body": "if update_mode is None:update_mode = dict(unit='',batch_size=)elif '' in update_mode:assert update_mode[''] == ''else:update_mode[''] = ''if memory is None:memory = dict(type='',include_next_states=True,capacity=( * update_mode['']))else:assert memory['']if optimizer is None:optimizer = dict(type='',learning_rate=)self.target_sync_frequency = target_sync_frequencyself.target_update_weight = target_update_weightself.double_q_model = double_q_modelself.huber_loss = huber_losssuper(DQNNstepAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity,scope=scope,device=device,saver=saver,summarizer=summarizer,execution=execution,variable_noise=variable_noise,states_preprocessing=states_preprocessing,actions_exploration=actions_exploration,reward_preprocessing=reward_preprocessing,update_mode=update_mode,memory=memory,optimizer=optimizer,discount=discount,network=network,distributions=distributions,entropy_regularization=entropy_regularization)", "docstring": "Initializes the DQN n-step agent.\n\nArgs:\n update_mode (spec): Update mode specification, with the following attributes:\n - unit: 'episodes' if given (default: 'episodes').\n - batch_size: integer (default: 10).\n - frequency: integer (default: batch_size).\n memory (spec): Memory specification, see core.memories module for more information\n (default: {type='latest', include_next_states=true, capacity=1000*batch_size}).\n optimizer (spec): Optimizer specification, see core.optimizers module for more\n information (default: {type='adam', learning_rate=1e-3}).\n target_sync_frequency (int): Target network sync frequency (default: 10000).\n target_update_weight (float): Target network update weight (default: 1.0).\n double_q_model (bool): Specifies whether double DQN mode is used (default: false).\n huber_loss (float): Huber loss clipping (default: none).", "id": "f14289:c0:m0"} {"signature": "def __init__(self,states,actions,batched_observe=True,batching_capacity=):", "body": "self.states, self.unique_state = sanity_check_states(states)self.actions, self.unique_action = sanity_check_actions(actions)self.batched_observe = batched_observeself.batching_capacity = batching_capacityself.current_states = Noneself.current_actions = Noneself.current_internals = Noneself.next_internals = Noneself.current_terminal = Noneself.current_reward = Noneself.timestep = Noneself.episode = Noneself.model = self.initialize_model()if self.batched_observe:assert self.batching_capacity is not Noneself.observe_terminal = [list() for _ in range(self.model.num_parallel)]self.observe_reward = [list() for _ in range(self.model.num_parallel)]self.reset()", "docstring": "Initializes the agent.\n\nArgs:\n states (spec, or dict of specs): States specification, with the following attributes\n (required):\n - type: one of 'bool', 'int', 'float' (default: 'float').\n - shape: integer, or list/tuple of integers (required).\n actions (spec, or dict of specs): Actions specification, with the following attributes\n (required):\n - type: one of 'bool', 'int', 'float' (required).\n - shape: integer, or list/tuple of integers (default: []).\n - num_actions: integer (required if type == 'int').\n - min_value and max_value: float (optional if type == 'float', default: none).\n batched_observe (bool): Specifies whether calls to model.observe() are batched, for\n improved performance (default: true).\n batching_capacity (int): Batching capacity of agent and model (default: 1000).", "id": "f14290:c0:m0"} {"signature": "def initialize_model(self):", "body": "raise NotImplementedError", "docstring": "Creates and returns the model (including a local replica in case of distributed learning) for this agent\nbased on specifications given by user. This method needs to be implemented by the different agent subclasses.", "id": "f14290:c0:m3"} {"signature": "def reset(self):", "body": "self.episode, self.timestep, self.next_internals = self.model.reset()self.current_internals = self.next_internals", "docstring": "Resets the agent to its initial state (e.g. on experiment start). Updates the Model's internal episode and\ntime step counter, internal states, and resets preprocessors.", "id": "f14290:c0:m4"} {"signature": "def act(self, states, deterministic=False, independent=False, fetch_tensors=None, buffered=True, index=):", "body": "self.current_internals = self.next_internalsif self.unique_state:self.current_states = dict(state=np.asarray(states))else:self.current_states = {name: np.asarray(states[name]) for name in sorted(states)}if fetch_tensors is not None:self.current_actions, self.next_internals, self.timestep, self.fetched_tensors = self.model.act(states=self.current_states,internals=self.current_internals,deterministic=deterministic,independent=independent,fetch_tensors=fetch_tensors,index=index)if self.unique_action:return self.current_actions[''], self.fetched_tensorselse:return self.current_actions, self.fetched_tensorsself.current_actions, self.next_internals, self.timestep = self.model.act(states=self.current_states,internals=self.current_internals,deterministic=deterministic,independent=independent,index=index)if buffered:if self.unique_action:return self.current_actions['']else:return self.current_actionselse:if self.unique_action:return self.current_actions[''], self.current_states, self.current_internalselse:return self.current_actions, self.current_states, self.current_internals", "docstring": "Return action(s) for given state(s). States preprocessing and exploration are applied if\nconfigured accordingly.\n\nArgs:\n states (any): One state (usually a value tuple) or dict of states if multiple states are expected.\n deterministic (bool): If true, no exploration and sampling is applied.\n independent (bool): If true, action is not followed by observe (and hence not included\n in updates).\n fetch_tensors (list): Optional String of named tensors to fetch\n buffered (bool): If true (default), states and internals are not returned but buffered\n with observes. Must be false for multi-threaded mode as we need atomic inserts.\nReturns:\n Scalar value of the action or dict of multiple actions the agent wants to execute.\n (fetched_tensors) Optional dict() with named tensors fetched", "id": "f14290:c0:m5"} {"signature": "def observe(self, terminal, reward, index=):", "body": "self.current_terminal = terminalself.current_reward = rewardif self.batched_observe:self.observe_terminal[index].append(self.current_terminal)self.observe_reward[index].append(self.current_reward)if self.current_terminal or len(self.observe_terminal[index]) >= self.batching_capacity:self.episode = self.model.observe(terminal=self.observe_terminal[index],reward=self.observe_reward[index],index=index)self.observe_terminal[index] = list()self.observe_reward[index] = list()else:self.episode = self.model.observe(terminal=self.current_terminal,reward=self.current_reward)", "docstring": "Observe experience from the environment to learn from. Optionally pre-processes rewards\nChild classes should call super to get the processed reward\nEX: terminal, reward = super()...\n\nArgs:\n terminal (bool): boolean indicating if the episode terminated after the observation.\n reward (float): scalar reward that resulted from executing the action.", "id": "f14290:c0:m6"} {"signature": "def atomic_observe(self, states, actions, internals, reward, terminal):", "body": "self.current_terminal = terminalself.current_reward = rewardif self.unique_state:states = dict(state=states)if self.unique_action:actions = dict(action=actions)self.episode = self.model.atomic_observe(states=states,actions=actions,internals=internals,terminal=self.current_terminal,reward=self.current_reward)", "docstring": "Utility method for unbuffered observing where each tuple is inserted into TensorFlow via\na single session call, thus avoiding race conditions in multi-threaded mode.\n\nObserve full experience tuplefrom the environment to learn from. Optionally pre-processes rewards\nChild classes should call super to get the processed reward\nEX: terminal, reward = super()...\n\nArgs:\n states (any): One state (usually a value tuple) or dict of states if multiple states are expected.\n actions (any): One action (usually a value tuple) or dict of states if multiple actions are expected.\n internals (any): Internal list.\n terminal (bool): boolean indicating if the episode terminated after the observation.\n reward (float): scalar reward that resulted from executing the action.", "id": "f14290:c0:m7"} {"signature": "def save_model(self, directory=None, append_timestep=True):", "body": "return self.model.save(directory=directory, append_timestep=append_timestep)", "docstring": "Save TensorFlow model. If no checkpoint directory is given, the model's default saver\ndirectory is used. Optionally appends current timestep to prevent overwriting previous\ncheckpoint files. Turn off to be able to load model from the same given path argument as\ngiven here.\n\nArgs:\n directory (str): Optional checkpoint directory.\n append_timestep (bool): Appends the current timestep to the checkpoint file if true.\n If this is set to True, the load path must include the checkpoint timestep suffix.\n For example, if stored to models/ and set to true, the exported file will be of the\n form models/model.ckpt-X where X is the last timestep saved. The load path must\n precisely match this file name. If this option is turned off, the checkpoint will\n always overwrite the file specified in path and the model can always be loaded under\n this path.\n\nReturns:\n Checkpoint path were the model was saved.", "id": "f14290:c0:m10"} {"signature": "def restore_model(self, directory=None, file=None):", "body": "self.model.restore(directory=directory, file=file)", "docstring": "Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is\nrestored. If no checkpoint directory is given, the model's default saver directory is\nused (unless file specifies the entire path).\n\nArgs:\n directory: Optional checkpoint directory.\n file: Optional checkpoint file, or path if directory not given.", "id": "f14290:c0:m11"} {"signature": "@staticmethoddef from_spec(spec, kwargs):", "body": "agent = util.get_object(obj=spec,predefined_objects=tensorforce.agents.agents,kwargs=kwargs)assert isinstance(agent, Agent)return agent", "docstring": "Creates an agent from a specification dict.", "id": "f14290:c0:m12"} {"signature": "def __init__(self,states,actions,network,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,variable_noise=None,states_preprocessing=None,actions_exploration=None,reward_preprocessing=None,update_mode=None,memory=None,optimizer=None,discount=,distributions=None,entropy_regularization=None,baseline_mode=None,baseline=None,baseline_optimizer=None,gae_lambda=None):", "body": "if update_mode is None:update_mode = dict(unit='',batch_size=)elif '' in update_mode:passelse:update_mode[''] = ''if memory is None:memory = dict(type='',include_next_states=False,capacity=( * update_mode['']))else:assert not memory['']if optimizer is None:optimizer = dict(type='',learning_rate=)self.baseline_mode = baseline_modeself.baseline = baselineself.baseline_optimizer = baseline_optimizerself.gae_lambda = gae_lambdasuper(VPGAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity,scope=scope,device=device,saver=saver,summarizer=summarizer,execution=execution,variable_noise=variable_noise,states_preprocessing=states_preprocessing,actions_exploration=actions_exploration,reward_preprocessing=reward_preprocessing,update_mode=update_mode,memory=memory,optimizer=optimizer,discount=discount,network=network,distributions=distributions,entropy_regularization=entropy_regularization)", "docstring": "Initializes the VPG agent.\n\nArgs:\n update_mode (spec): Update mode specification, with the following attributes:\n - unit: 'episodes' if given (default: 'episodes').\n - batch_size: integer (default: 10).\n - frequency: integer (default: batch_size).\n memory (spec): Memory specification, see core.memories module for more information\n (default: {type='latest', include_next_states=false, capacity=1000*batch_size}).\n optimizer (spec): Optimizer specification, see core.optimizers module for more\n information (default: {type='adam', learning_rate=1e-3}).\n baseline_mode (str): One of 'states', 'network' (default: none).\n baseline (spec): Baseline specification, see core.baselines module for more information\n (default: none).\n baseline_optimizer (spec): Baseline optimizer specification, see core.optimizers module\n for more information (default: none).\n gae_lambda (float): Lambda factor for generalized advantage estimation (default: none).", "id": "f14292:c0:m0"} {"signature": "def __init__(self,states,actions,network,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,variable_noise=None,states_preprocessing=None,actions_exploration=None,reward_preprocessing=None,update_mode=None,memory=None,discount=,distributions=None,entropy_regularization=None,baseline_mode=None,baseline=None,baseline_optimizer=None,gae_lambda=None,likelihood_ratio_clipping=,step_optimizer=None,subsampling_fraction=,optimization_steps=):", "body": "if update_mode is None:update_mode = dict(unit='',batch_size=)elif '' in update_mode:passelse:update_mode[''] = ''if memory is None:memory = dict(type='',include_next_states=False,capacity=( * update_mode['']))else:assert not memory['']assert (update_mode[''] != '' or memory[''] == '')if step_optimizer is None:step_optimizer = dict(type='',learning_rate=)optimizer = dict(type='',optimizer=dict(type='',optimizer=step_optimizer,fraction=subsampling_fraction),num_steps=optimization_steps)self.baseline_mode = baseline_modeself.baseline = baselineself.baseline_optimizer = baseline_optimizerself.gae_lambda = gae_lambdaself.likelihood_ratio_clipping = likelihood_ratio_clippingsuper(PPOAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity,scope=scope,device=device,saver=saver,summarizer=summarizer,execution=execution,variable_noise=variable_noise,states_preprocessing=states_preprocessing,actions_exploration=actions_exploration,reward_preprocessing=reward_preprocessing,update_mode=update_mode,memory=memory,optimizer=optimizer,discount=discount,network=network,distributions=distributions,entropy_regularization=entropy_regularization)", "docstring": "Initializes the PPO agent.\n\nArgs:\n update_mode (spec): Update mode specification, with the following attributes:\n - unit: 'episodes' if given (default: 'episodes').\n - batch_size: integer (default: 10).\n - frequency: integer (default: batch_size).\n memory (spec): Memory specification, see core.memories module for more information\n (default: {type='latest', include_next_states=false, capacity=1000*batch_size}).\n optimizer (spec): PPO agent implicitly defines a multi-step subsampling optimizer.\n baseline_mode (str): One of 'states', 'network' (default: none).\n baseline (spec): Baseline specification, see core.baselines module for more information\n (default: none).\n baseline_optimizer (spec): Baseline optimizer specification, see core.optimizers module\n for more information (default: none).\n gae_lambda (float): Lambda factor for generalized advantage estimation (default: none).\n likelihood_ratio_clipping (float): Likelihood ratio clipping for policy gradient\n (default: 0.2).\n step_optimizer (spec): Step optimizer specification of implicit multi-step subsampling\n optimizer, see core.optimizers module for more information (default: {type='adam',\n learning_rate=1e-3}).\n subsampling_fraction (float): Subsampling fraction of implicit subsampling optimizer\n (default: 0.1).\n optimization_steps (int): Number of optimization steps for implicit multi-step\n optimizer (default: 50).", "id": "f14293:c0:m0"} {"signature": "def __init__(self,states,actions,network,update_mode,memory,optimizer,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,variable_noise=None,states_preprocessing=None,actions_exploration=None,reward_preprocessing=None,discount=,distributions=None,entropy_regularization=None):", "body": "self.scope = scopeself.device = deviceself.saver = saverself.summarizer = summarizerself.execution = sanity_check_execution_spec(execution)self.variable_noise = variable_noiseself.states_preprocessing = states_preprocessingself.actions_exploration = actions_explorationself.reward_preprocessing = reward_preprocessingself.update_mode = update_modeself.memory = memoryself.optimizer = optimizerself.discount = discountself.network = networkself.distributions = distributionsself.entropy_regularization = entropy_regularizationif self.summarizer is None:summary_labels = set()else:summary_labels = set(self.summarizer.get('', ()))self.meta_param_recorder = Noneif any(k in summary_labels for k in ['', '']):self.meta_param_recorder = MetaParameterRecorder(inspect.currentframe())if '' in self.summarizer:self.meta_param_recorder.merge_custom(self.summarizer[''])if '' in summary_labels:self.summarizer[''] = self.meta_param_recorderif '' in summary_labels:self.meta_param_recorder.text_output(format_type=)super(LearningAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity)", "docstring": "Initializes the learning agent.\n\nArgs:\n update_mode (spec): Update mode specification, with the following attributes\n (required):\n - unit: one of 'timesteps', 'episodes', 'sequences' (required).\n - batch_size: integer (required).\n - frequency: integer (default: batch_size).\n - length: integer (optional if unit == 'sequences', default: 8).\n memory (spec): Memory specification, see core.memories module for more information\n (required).\n optimizer (spec): Optimizer specification, see core.optimizers module for more\n information (required).\n network (spec): Network specification, usually a list of layer specifications, see\n core.networks module for more information (required).\n scope (str): TensorFlow scope (default: name of agent).\n device: TensorFlow device (default: none)\n saver (spec): Saver specification, with the following attributes (default: none):\n - directory: model directory.\n - file: model filename (optional).\n - seconds or steps: save frequency (default: 600 seconds).\n - load: specifies whether model is loaded, if existent (default: true).\n - basename: optional file basename (default: 'model.ckpt').\n summarizer (spec): Summarizer specification, with the following attributes (default:\n none):\n - directory: summaries directory.\n - seconds or steps: summarize frequency (default: 120 seconds).\n - labels: list of summary labels to record (default: []).\n - meta_param_recorder_class: ???.\n execution (spec): Execution specification (see sanity_check_execution_spec for details).\n variable_noise (float): Standard deviation of variable noise (default: none).\n states_preprocessing (spec, or dict of specs): States preprocessing specification, see\n core.preprocessors module for more information (default: none)\n actions_exploration (spec, or dict of specs): Actions exploration specification, see\n core.explorations module for more information (default: none).\n reward_preprocessing (spec): Reward preprocessing specification, see core.preprocessors\n module for more information (default: none).\n discount (float): Discount factor for future rewards (default: 0.99).\n distributions (spec / dict of specs): Distributions specifications, see\n core.distributions module for more information (default: none).\n entropy_regularization (float): Entropy regularization weight (default: none).", "id": "f14294:c0:m0"} {"signature": "def import_experience(self, experiences):", "body": "if isinstance(experiences, dict):if self.unique_state:experiences[''] = dict(state=experiences[''])if self.unique_action:experiences[''] = dict(action=experiences[''])self.model.import_experience(**experiences)else:if self.unique_state:states = dict(state=list())else:states = {name: list() for name in experiences[]['']}internals = [list() for _ in experiences[]['']]if self.unique_action:actions = dict(action=list())else:actions = {name: list() for name in experiences[]['']}terminal = list()reward = list()for experience in experiences:if self.unique_state:states[''].append(experience[''])else:for name in sorted(states):states[name].append(experience[''][name])for n, internal in enumerate(internals):internal.append(experience[''][n])if self.unique_action:actions[''].append(experience[''])else:for name in sorted(actions):actions[name].append(experience[''][name])terminal.append(experience[''])reward.append(experience[''])self.model.import_experience(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)", "docstring": "Imports experiences.\n\nArgs:\n experiences:", "id": "f14294:c0:m1"} {"signature": "def __init__(self,states,actions,network,critic_network,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,variable_noise=None,states_preprocessing=None,actions_exploration=None,reward_preprocessing=None,update_mode=None,memory=None,optimizer=None,discount=,distributions=None,entropy_regularization=None,critic_optimizer=None,target_sync_frequency=,target_update_weight=):", "body": "if update_mode is None:update_mode = dict(unit='',batch_size=)elif '' in update_mode:assert update_mode[''] == ''else:update_mode[''] = ''if memory is None:memory = dict(type='',include_next_states=True,capacity=( * update_mode['']))else:assert memory['']if optimizer is None:optimizer = dict(type='',learning_rate=)if critic_optimizer is None:critic_optimizer = dict(type='',learning_rate=)self.critic_network = critic_networkself.critic_optimizer = critic_optimizerself.target_sync_frequency = target_sync_frequencyself.target_update_weight = target_update_weightsuper(DDPGAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity,scope=scope,device=device,saver=saver,summarizer=summarizer,execution=execution,variable_noise=variable_noise,states_preprocessing=states_preprocessing,actions_exploration=actions_exploration,reward_preprocessing=reward_preprocessing,update_mode=update_mode,memory=memory,optimizer=optimizer,discount=discount,network=network,distributions=distributions,entropy_regularization=entropy_regularization)", "docstring": "Initializes the DDPG agent.\n\nArgs:\n update_mode (spec): Update mode specification, with the following attributes:\n - unit: 'timesteps' if given (default: 'timesteps').\n - batch_size: integer (default: 10).\n - frequency: integer (default: batch_size).\n memory (spec): Memory specification, see core.memories module for more information\n (default: {type='replay', include_next_states=true, capacity=1000*batch_size}).\n optimizer (spec): Optimizer specification, see core.optimizers module for more\n information (default: {type='adam', learning_rate=1e-3}).\n critic_network (spec): Critic network specification, size_t0 and size_t1.\n critic_optimizer (spec): Critic optimizer specification, see core.optimizers module for\n more information (default: {type='adam', learning_rate=1e-3}).\n target_sync_frequency (int): Target network sync frequency (default: 10000).\n target_update_weight (float): Target network update weight (default: 1.0).", "id": "f14295:c0:m0"} {"signature": "def __init__(self,states,actions,network,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,variable_noise=None,states_preprocessing=None,actions_exploration=None,reward_preprocessing=None,update_mode=None,memory=None,discount=,distributions=None,entropy_regularization=None,baseline_mode=None,baseline=None,baseline_optimizer=None,gae_lambda=None,likelihood_ratio_clipping=None,learning_rate=,cg_max_iterations=,cg_damping=,cg_unroll_loop=True,ls_max_iterations=,ls_accept_ratio=,ls_unroll_loop=False):", "body": "if update_mode is None:update_mode = dict(unit='',batch_size=)elif '' in update_mode:passelse:update_mode[''] = ''if memory is None:memory = dict(type='',include_next_states=False,capacity=( * update_mode['']))else:assert not memory['']assert (update_mode[''] != '' or memory[''] == '')optimizer = dict(type='',optimizer=dict(type='',learning_rate=learning_rate,cg_max_iterations=cg_max_iterations,cg_damping=cg_damping,cg_unroll_loop=cg_unroll_loop,),ls_max_iterations=ls_max_iterations,ls_accept_ratio=ls_accept_ratio,ls_mode='', ls_parameter=, ls_unroll_loop=ls_unroll_loop)self.baseline_mode = baseline_modeself.baseline = baselineself.baseline_optimizer = baseline_optimizerself.gae_lambda = gae_lambdaself.likelihood_ratio_clipping = likelihood_ratio_clippingsuper(TRPOAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity,scope=scope,device=device,saver=saver,summarizer=summarizer,execution=execution,variable_noise=variable_noise,states_preprocessing=states_preprocessing,actions_exploration=actions_exploration,reward_preprocessing=reward_preprocessing,update_mode=update_mode,memory=memory,optimizer=optimizer,discount=discount,network=network,distributions=distributions,entropy_regularization=entropy_regularization)", "docstring": "Initializes the TRPO agent.\n\nArgs:\n update_mode (spec): Update mode specification, with the following attributes:\n - unit: 'episodes' if given (default: 'episodes').\n - batch_size: integer (default: 10).\n - frequency: integer (default: batch_size).\n memory (spec): Memory specification, see core.memories module for more information\n (default: {type='latest', include_next_states=false, capacity=1000*batch_size}).\n optimizer (spec): TRPO agent implicitly defines a optimized-step natural-gradient\n optimizer.\n baseline_mode (str): One of 'states', 'network' (default: none).\n baseline (spec): Baseline specification, see core.baselines module for more information\n (default: none).\n baseline_optimizer (spec): Baseline optimizer specification, see core.optimizers module\n for more information (default: none).\n gae_lambda (float): Lambda factor for generalized advantage estimation (default: none).\n likelihood_ratio_clipping (float): Likelihood ratio clipping for policy gradient\n (default: none).\n learning_rate (float): Learning rate of natural-gradient optimizer (default: 1e-3).\n cg_max_iterations (int): Conjugate-gradient max iterations (default: 20).\n cg_damping (float): Conjugate-gradient damping (default: 1e-3).\n cg_unroll_loop (bool): Conjugate-gradient unroll loop (default: false).\n ls_max_iterations (int): Line-search max iterations (default: 10).\n ls_accept_ratio (float): Line-search accept ratio (default: 0.9).\n ls_unroll_loop (bool): Line-search unroll loop (default: false).", "id": "f14296:c0:m0"} {"signature": "def __init__(self,states,actions,network,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,variable_noise=None,states_preprocessing=None,actions_exploration=None,reward_preprocessing=None,update_mode=None,memory=None,optimizer=None,discount=,distributions=None,entropy_regularization=None,target_sync_frequency=,target_update_weight=,double_q_model=False,huber_loss=None):", "body": "if update_mode is None:update_mode = dict(unit='',batch_size=,frequency=)elif '' in update_mode:assert update_mode[''] == ''else:update_mode[''] = ''if memory is None:memory = dict(type='',include_next_states=True,capacity=( * update_mode['']))else:assert memory['']if optimizer is None:optimizer = dict(type='',learning_rate=)self.target_sync_frequency = target_sync_frequencyself.target_update_weight = target_update_weightself.double_q_model = double_q_modelself.huber_loss = huber_losssuper(NAFAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity,scope=scope,device=device,saver=saver,summarizer=summarizer,execution=execution,variable_noise=variable_noise,states_preprocessing=states_preprocessing,actions_exploration=actions_exploration,reward_preprocessing=reward_preprocessing,update_mode=update_mode,memory=memory,optimizer=optimizer,discount=discount,network=network,distributions=distributions,entropy_regularization=entropy_regularization)", "docstring": "Initializes the NAF agent.\n\nArgs:\n update_mode (spec): Update mode specification, with the following attributes:\n - unit: 'timesteps' if given (default: 'timesteps').\n - batch_size: integer (default: 32).\n - frequency: integer (default: 4).\n memory (spec): Memory specification, see core.memories module for more information\n (default: {type='replay', include_next_states=true, capacity=1000*batch_size}).\n optimizer (spec): Optimizer specification, see core.optimizers module for more\n information (default: {type='adam', learning_rate=1e-3}).\n target_sync_frequency (int): Target network sync frequency (default: 10000).\n target_update_weight (float): Target network update weight (default: 1.0).\n double_q_model (bool): Specifies whether double DQN mode is used (default: false).\n huber_loss (float): Huber loss clipping (default: none).", "id": "f14297:c0:m0"} {"signature": "def __init__(self,states,actions,network,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None,variable_noise=None,states_preprocessing=None,actions_exploration=None,reward_preprocessing=None,update_mode=None,memory=None,optimizer=None,discount=,distributions=None,entropy_regularization=None,target_sync_frequency=,target_update_weight=,huber_loss=None,expert_margin=,supervised_weight=,demo_memory_capacity=,demo_sampling_ratio=):", "body": "if update_mode is None:update_mode = dict(unit='',batch_size=,frequency=)elif '' in update_mode:assert update_mode[''] == ''else:update_mode[''] = ''if memory is None:memory = dict(type='',include_next_states=True,capacity=( * update_mode['']))else:assert memory['']if optimizer is None:optimizer = dict(type='',learning_rate=)self.target_sync_frequency = target_sync_frequencyself.target_update_weight = target_update_weightself.double_q_model = Trueself.huber_loss = huber_lossself.expert_margin = expert_marginself.supervised_weight = supervised_weightself.demo_memory_capacity = demo_memory_capacityself.demo_batch_size = int(demo_sampling_ratio * update_mode[''] / ( - demo_sampling_ratio))assert self.demo_batch_size > , ''''''.format(self.demo_batch_size)super(DQFDAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity,scope=scope,device=device,saver=saver,summarizer=summarizer,execution=execution,variable_noise=variable_noise,states_preprocessing=states_preprocessing,actions_exploration=actions_exploration,reward_preprocessing=reward_preprocessing,update_mode=update_mode,memory=memory,optimizer=optimizer,discount=discount,network=network,distributions=distributions,entropy_regularization=entropy_regularization)", "docstring": "Initializes the DQFD agent.\n\nArgs:\n update_mode (spec): Update mode specification, with the following attributes:\n - unit: 'timesteps' if given (default: 'timesteps').\n - batch_size: integer (default: 32).\n - frequency: integer (default: 4).\n memory (spec): Memory specification, see core.memories module for more information\n (default: {type='replay', include_next_states=true, capacity=1000*batch_size}).\n optimizer (spec): Optimizer specification, see core.optimizers module for more\n information (default: {type='adam', learning_rate=1e-3}).\n target_sync_frequency (int): Target network sync frequency (default: 10000).\n target_update_weight (float): Target network update weight (default: 1.0).\n huber_loss (float): Huber loss clipping (default: none).\n expert_margin (float): Enforced supervised margin between expert action Q-value and\n other Q-values (default: 0.5).\n supervised_weight (float): Weight of supervised loss term (default: 0.1).\n demo_memory_capacity (int): Capacity of expert demonstration memory (default: 10000).\n demo_sampling_ratio (float): Runtime sampling ratio of expert data (default: 0.2).", "id": "f14298:c0:m0"} {"signature": "def import_demonstrations(self, demonstrations):", "body": "if isinstance(demonstrations, dict):if self.unique_state:demonstrations[''] = dict(state=demonstrations[''])if self.unique_action:demonstrations[''] = dict(action=demonstrations[''])self.model.import_demo_experience(**demonstrations)else:if self.unique_state:states = dict(state=list())else:states = {name: list() for name in demonstrations[]['']}internals = {name: list() for name in demonstrations[]['']}if self.unique_action:actions = dict(action=list())else:actions = {name: list() for name in demonstrations[]['']}terminal = list()reward = list()for demonstration in demonstrations:if self.unique_state:states[''].append(demonstration[''])else:for name, state in states.items():state.append(demonstration[''][name])for name, internal in internals.items():internal.append(demonstration[''][name])if self.unique_action:actions[''].append(demonstration[''])else:for name, action in actions.items():action.append(demonstration[''][name])terminal.append(demonstration[''])reward.append(demonstration[''])self.model.import_demo_experience(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)", "docstring": "Imports demonstrations, i.e. expert observations. Note that for large numbers of observations,\nset_demonstrations is more appropriate, which directly sets memory contents to an array an expects\na different layout.\n\nArgs:\n demonstrations: List of observation dicts", "id": "f14298:c0:m2"} {"signature": "def pretrain(self, steps):", "body": "for _ in xrange(steps):self.model.demo_update()", "docstring": "Computes pre-train updates.\n\nArgs:\n steps: Number of updates to execute.", "id": "f14298:c0:m3"} {"signature": "def __init__(self,states,actions,action_values,batched_observe=True,batching_capacity=,scope='',device=None,saver=None,summarizer=None,execution=None):", "body": "self.scope = scopeself.device = deviceself.saver = saverself.summarizer = summarizerself.execution = sanity_check_execution_spec(execution)self.batching_capacity = batching_capacityself.action_values = action_valuessuper(ConstantAgent, self).__init__(states=states,actions=actions,batched_observe=batched_observe,batching_capacity=batching_capacity)", "docstring": "Initializes the constant agent.\n\nArgs:\n action_values (value, or dict of values): Action values returned by the agent\n (required).\n scope (str): TensorFlow scope (default: name of agent).\n device: TensorFlow device (default: none)\n saver (spec): Saver specification, with the following attributes (default: none):\n - directory: model directory.\n - file: model filename (optional).\n - seconds or steps: save frequency (default: 600 seconds).\n - load: specifies whether model is loaded, if existent (default: true).\n - basename: optional file basename (default: 'model.ckpt').\n summarizer (spec): Summarizer specification, with the following attributes (default:\n none):\n - directory: summaries directory.\n - seconds or steps: summarize frequency (default: 120 seconds).\n - labels: list of summary labels to record (default: []).\n - meta_param_recorder_class: ???.\n execution (spec): Execution specification (see sanity_check_execution_spec for details).", "id": "f14299:c0:m0"} {"signature": "def __init__(self,sigma=,mu=,scope='',summary_labels=()):", "body": "self.sigma = sigmaself.mu = float(mu) super(GaussianNoise, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Initializes distribution values for gaussian noise", "id": "f14301:c0:m0"} {"signature": "def __init__(self,sigma=,mu=,theta=,scope='',summary_labels=()):", "body": "self.sigma = sigmaself.mu = float(mu) self.theta = thetasuper(OrnsteinUhlenbeckProcess, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Initializes an Ornstein-Uhlenbeck process which is a mean reverting stochastic process\nintroducing time-correlated noise.", "id": "f14304:c0:m0"} {"signature": "def tf_explore(self, episode, timestep, shape):", "body": "raise NotImplementedError", "docstring": "Creates exploration value, e.g. compute an epsilon for epsilon-greedy or sample normal \nnoise.", "id": "f14306:c0:m1"} {"signature": "def get_variables(self):", "body": "return [self.variables[key] for key in sorted(self.variables)]", "docstring": "Returns exploration variables.\n\nReturns:\n List of variables.", "id": "f14306:c0:m2"} {"signature": "@staticmethoddef from_spec(spec):", "body": "exploration = util.get_object(obj=spec,predefined_objects=tensorforce.core.explorations.explorations)assert isinstance(exploration, Exploration)return exploration", "docstring": "Creates an exploration object from a specification dict.", "id": "f14306:c0:m3"} {"signature": "def __init__(self, named_tensors=None, scope='', summary_labels=None):", "body": "self.scope = scopeself.summary_labels = set(summary_labels or ())self.named_tensors = named_tensorsself.variables = dict()self.all_variables = dict()def custom_getter(getter, name, registered=False, **kwargs):variable = getter(name=name, registered=True, **kwargs)if registered:passelif name in self.all_variables:assert variable is self.all_variables[name]if kwargs.get('', True):assert variable is self.variables[name]if '' in self.summary_labels:tf.contrib.summary.histogram(name=name, tensor=variable)else:self.all_variables[name] = variableif kwargs.get('', True):self.variables[name] = variableif '' in self.summary_labels:tf.contrib.summary.histogram(name=name, tensor=variable)return variableself.apply = tf.make_template(name_=(scope + ''),func_=self.tf_apply,custom_getter_=custom_getter)self.regularization_loss = tf.make_template(name_=(scope + ''),func_=self.tf_regularization_loss,custom_getter_=custom_getter)", "docstring": "Layer.", "id": "f14307:c0:m0"} {"signature": "def tf_apply(self, x, update):", "body": "raise NotImplementedError", "docstring": "Creates the TensorFlow operations for applying the layer to the given input.\n\nArgs:\n x: Layer input tensor.\n update: Boolean tensor indicating whether this call happens during an update.\n\nReturns:\n Layer output tensor.", "id": "f14307:c0:m1"} {"signature": "def tf_regularization_loss(self):", "body": "return None", "docstring": "Creates the TensorFlow operations for the layer regularization loss.\n\nReturns:\n Regularization loss tensor.", "id": "f14307:c0:m2"} {"signature": "def internals_spec(self):", "body": "return dict()", "docstring": "Returns the internal states specification.\n\nReturns:\n Internal states specification", "id": "f14307:c0:m3"} {"signature": "def get_variables(self, include_nontrainable=False):", "body": "if include_nontrainable:return [self.all_variables[key] for key in sorted(self.all_variables)]else:return [self.variables[key] for key in sorted(self.variables)]", "docstring": "Returns the TensorFlow variables used by the layer.\n\nReturns:\n List of variables.", "id": "f14307:c0:m4"} {"signature": "@staticmethoddef from_spec(spec, kwargs=None):", "body": "layer = util.get_object(obj=spec,predefined_objects=tensorforce.core.networks.layers,kwargs=kwargs)assert isinstance(layer, Layer)return layer", "docstring": "Creates a layer from a specification dict.", "id": "f14307:c0:m5"} {"signature": "def __init__(self,names,aggregation_type='',axis=,named_tensors=None,scope='',summary_labels=()):", "body": "self.names = namesself.aggregation_type = aggregation_typeself.axis = axissuper(Input, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Input layer.\n\nArgs:\n names: A list of strings that name the inputs to merge\n axis: Axis to merge the inputs", "id": "f14307:c1:m0"} {"signature": "def __init__(self,name,named_tensors=None,scope='',summary_labels=()):", "body": "self.name = namesuper(Output, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Output layer.\n\nArgs:\n output: A string that names the tensor, will be added to available inputs", "id": "f14307:c2:m0"} {"signature": "def __init__(self, layer, named_tensors=None, scope='', summary_labels=(), **kwargs):", "body": "self.layer_spec = layerself.layer = util.get_object(obj=layer, predefined_objects=TFLayer.tf_layers, kwargs=kwargs)self.first_scope = Nonesuper(TFLayer, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new layer instance of a TensorFlow layer.\n\nArgs:\n name: The name of the layer, one of 'dense'.\n **kwargs: Additional arguments passed on to the TensorFlow layer constructor.", "id": "f14307:c3:m0"} {"signature": "def __init__(self,name='',alpha=None,beta=,max=None,min=None,named_tensors=None,scope='',summary_labels=()):", "body": "self.name = nameself.alpha = Noneself.max = Noneself.min = Noneself.beta_learn = Falsesuper(Nonlinearity, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)if max is not None:self.max = float(max)if min is not None:self.min = float(min)if alpha is not None:self.alpha = float(alpha)if beta == '':self.beta_learn = Trueself.beta = Noneelse:self.beta = tf.constant(float(beta), dtype=util.tf_dtype(''))", "docstring": "Non-linearity activation layer.\n\nArgs:\n name: Non-linearity name, one of 'elu', 'relu', 'selu', 'sigmoid', 'swish',\n 'leaky_relu' (or 'lrelu'), 'crelu', 'softmax', 'softplus', 'softsign', 'tanh' or 'none'.\n alpha: (float|int) Alpha value for leaky Relu\n beta: (float|int|'learn') Beta value or 'learn' to train value (default 1.0)\n max: (float|int) maximum (beta * input) value passed to non-linearity function\n min: (float|int) minimum (beta * input) value passed to non-linearity function\n summary_labels: Requested summary labels for tensorboard export, add 'beta' to watch beta learning", "id": "f14307:c4:m0"} {"signature": "def __init__(self,pooling_type='',window=,stride=,padding='',named_tensors=None,scope='',summary_labels=()):", "body": "self.pooling_type = pooling_typeif isinstance(window, int):self.window = (, window, window, )elif len(window) == :self.window = (, window[], window[], )else:raise TensorForceError(''.format(window))if isinstance(stride, int):self.stride = (, stride, stride, )elif len(window) == :self.stride = (, stride[], stride[], )else:raise TensorForceError(''.format(stride))self.padding = paddingsuper(Pool2d, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "2-dimensional pooling layer.\n\nArgs:\n pooling_type: Either 'max' or 'average'.\n window: Pooling window size, either an integer or pair of integers.\n stride: Pooling stride, either an integer or pair of integers.\n padding: Pooling padding, one of 'VALID' or 'SAME'.", "id": "f14307:c7:m0"} {"signature": "def __init__(self,indices,size,l2_regularization=,l1_regularization=,named_tensors=None,scope='',summary_labels=()):", "body": "self.indices = indicesself.size = sizeself.l2_regularization = l2_regularizationself.l1_regularization = l1_regularizationsuper(Embedding, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Embedding layer.\n\nArgs:\n indices: Number of embedding indices.\n size: Embedding size.\n l2_regularization: L2 regularization weight.\n l1_regularization: L1 regularization weight.", "id": "f14307:c8:m0"} {"signature": "def __init__(self,size,weights=None,bias=True,l2_regularization=,l1_regularization=,trainable=True,named_tensors=None,scope='',summary_labels=()):", "body": "self.size = sizeself.weights_init = weightsself.bias_init = biasself.l2_regularization = l2_regularizationself.l1_regularization = l1_regularizationself.trainable = trainablesuper(Linear, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Linear layer.\n\nArgs:\n size: Layer size.\n weights: Weight initialization, random if None.\n bias: Bias initialization, random if True, no bias added if False.\n l2_regularization: L2 regularization weight.\n l1_regularization: L1 regularization weight.", "id": "f14307:c9:m0"} {"signature": "def __init__(self,size=None,weights=None,bias=True,activation='',l2_regularization=,l1_regularization=,skip=False,trainable=True,named_tensors=None,scope='',summary_labels=(),):", "body": "self.skip = skipif self.skip and size is not None:raise TensorForceError('''')self.linear = Linear(size=size,weights=weights,bias=bias,l2_regularization=l2_regularization,l1_regularization=l1_regularization,summary_labels=summary_labels,trainable=trainable)if self.skip:self.linear_skip = Linear(size=size,bias=bias,l2_regularization=l2_regularization,l1_regularization=l1_regularization,summary_labels=summary_labels,trainable=trainable)self.nonlinearity = Nonlinearity(summary_labels=summary_labels, **util.prepare_kwargs(activation))super(Dense, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Dense layer.\n\nArgs:\n size: Layer size, if None than input size matches the output size of the layer\n weights: Weight initialization, random if None.\n bias: If true, bias is added.\n activation: Type of nonlinearity, or dict with name & arguments\n l2_regularization: L2 regularization weight.\n l1_regularization: L1 regularization weight.\n skip: Add skip connection like ResNet (https://arxiv.org/pdf/1512.03385.pdf),\n doubles layers and ShortCut from Input to output", "id": "f14307:c10:m0"} {"signature": "def __init__(self,size,bias=False,activation='',l2_regularization=,l1_regularization=,output=None,named_tensors=None,scope='',summary_labels=()):", "body": "self.expectation_layer = Linear(size=, bias=bias,l2_regularization=l2_regularization,l1_regularization=l1_regularization,summary_labels=summary_labels,)self.advantage_layer = Linear(size=size,bias=bias,l2_regularization=l2_regularization,l1_regularization=l1_regularization,summary_labels=summary_labels,)self.output = outputself.nonlinearity = Nonlinearity(summary_labels=summary_labels, **util.prepare_kwargs(activation))super(Dueling, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Dueling layer.\n\n[Dueling Networks] (https://arxiv.org/pdf/1511.06581.pdf)\nImplement Y = Expectation[x] + (Advantage[x] - Mean(Advantage[x]))\n\nArgs:\n size: Layer size.\n bias: If true, bias is added.\n activation: Type of nonlinearity, or dict with name & arguments\n l2_regularization: L2 regularization weight.\n l1_regularization: L1 regularization weight.\n output: None or tuple of output names for ('expectation','advantage','mean_advantage')", "id": "f14307:c11:m0"} {"signature": "def __init__(self,size,window=,stride=,padding='',bias=True,activation='',l2_regularization=,l1_regularization=,named_tensors=None,scope='',summary_labels=()):", "body": "self.size = sizeself.window = windowself.stride = strideself.padding = paddingself.bias = biasself.l2_regularization = l2_regularizationself.l1_regularization = l1_regularizationself.nonlinearity = Nonlinearity(summary_labels=summary_labels, **util.prepare_kwargs(activation))super(Conv1d, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "1D convolutional layer.\n\nArgs:\n size: Number of filters\n window: Convolution window size\n stride: Convolution stride\n padding: Convolution padding, one of 'VALID' or 'SAME'\n bias: If true, a bias is added\n activation: Type of nonlinearity, or dict with name & arguments\n l2_regularization: L2 regularization weight\n l1_regularization: L1 regularization weight", "id": "f14307:c12:m0"} {"signature": "def __init__(self,size,window=,stride=,padding='',bias=True,activation='',l2_regularization=,l1_regularization=,named_tensors=None,scope='',summary_labels=()):", "body": "self.size = sizeif isinstance(window, int):self.window = (window, window)elif len(window) == :self.window = tuple(window)else:raise TensorForceError(''.format(window))self.stride = strideself.padding = paddingself.bias = biasself.l2_regularization = l2_regularizationself.l1_regularization = l1_regularizationself.nonlinearity = Nonlinearity(summary_labels=summary_labels, **util.prepare_kwargs(activation))super(Conv2d, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "2D convolutional layer.\n\nArgs:\n size: Number of filters\n window: Convolution window size, either an integer or pair of integers.\n stride: Convolution stride, either an integer or pair of integers.\n padding: Convolution padding, one of 'VALID' or 'SAME'\n bias: If true, a bias is added\n activation: Type of nonlinearity, or dict with name & arguments\n l2_regularization: L2 regularization weight\n l1_regularization: L1 regularization weight", "id": "f14307:c13:m0"} {"signature": "def __init__(self, size, dropout=None, lstmcell_args={}, named_tensors=None, scope='', summary_labels=()):", "body": "self.size = sizeself.dropout = dropoutself.lstmcell_args = lstmcell_argssuper(InternalLstm, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "LSTM layer.\n\nArgs:\n size: LSTM size.\n dropout: Dropout rate.", "id": "f14307:c14:m0"} {"signature": "def __init__(self, size, dropout=None, named_tensors=None, scope='', summary_labels=(), return_final_state=True):", "body": "self.size = sizeself.dropout = dropoutself.return_final_state = return_final_statesuper(Lstm, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "LSTM layer.\n\nArgs:\n size: LSTM size.\n dropout: Dropout rate.", "id": "f14307:c15:m0"} {"signature": "def __init__(self, scope='', summary_labels=None):", "body": "self.summary_labels = set(summary_labels or ())self.variables = dict()self.all_variables = dict()self.named_tensors = dict()def custom_getter(getter, name, registered=False, **kwargs):variable = getter(name=name, registered=True, **kwargs)if registered:passelif name in self.all_variables:assert variable is self.all_variables[name]if kwargs.get('', True):assert variable is self.variables[name]if '' in self.summary_labels:tf.contrib.summary.histogram(name=name, tensor=variable)else:self.all_variables[name] = variableif kwargs.get('', True):self.variables[name] = variableif '' in self.summary_labels:tf.contrib.summary.histogram(name=name, tensor=variable)return variableself.apply = tf.make_template(name_=(scope + ''),func_=self.tf_apply,custom_getter_=custom_getter)self.regularization_loss = tf.make_template(name_=(scope + ''),func_=self.tf_regularization_loss,custom_getter_=custom_getter)", "docstring": "Neural network.", "id": "f14309:c0:m0"} {"signature": "def tf_apply(self, x, internals, update, return_internals=False):", "body": "raise NotImplementedError", "docstring": "Creates the TensorFlow operations for applying the network to the given input.\n\nArgs:\n x: Network input tensor or dict of input tensors.\n internals: List of prior internal state tensors\n update: Boolean tensor indicating whether this call happens during an update.\n return_internals: If true, also returns posterior internal state tensors\n\nReturns:\n Network output tensor, plus optionally list of posterior internal state tensors", "id": "f14309:c0:m1"} {"signature": "def tf_regularization_loss(self):", "body": "return None", "docstring": "Creates the TensorFlow operations for the network regularization loss.\n\nReturns:\n Regularization loss tensor", "id": "f14309:c0:m2"} {"signature": "def internals_spec(self):", "body": "return dict()", "docstring": "Returns the internal states specification.\n\nReturns:\n Internal states specification", "id": "f14309:c0:m3"} {"signature": "def get_variables(self, include_nontrainable=False):", "body": "if include_nontrainable:return [self.all_variables[key] for key in sorted(self.all_variables)]else:return [self.variables[key] for key in sorted(self.variables)]", "docstring": "Returns the TensorFlow variables used by the network.\n\nReturns:\n List of variables", "id": "f14309:c0:m4"} {"signature": "def get_named_tensor(self, name):", "body": "if name in self.named_tensors:return True, self.named_tensors[name]else:return False, None", "docstring": "Returns a named tensor if available.\n\nReturns:\n valid: True if named tensor found, False otherwise\n tensor: If valid, will be a tensor, otherwise None", "id": "f14309:c0:m5"} {"signature": "def get_list_of_named_tensor(self):", "body": "return list(self.named_tensors)", "docstring": "Returns a list of the names of tensors available.\n\nReturns:\n List of the names of tensors available.", "id": "f14309:c0:m6"} {"signature": "@staticmethoddef from_spec(spec, kwargs=None):", "body": "network = util.get_object(obj=spec,default_object=LayeredNetwork,kwargs=kwargs)assert isinstance(network, Network)return network", "docstring": "Creates a network from a specification dict.", "id": "f14309:c0:m8"} {"signature": "def __init__(self, scope='', summary_labels=()):", "body": "super(LayerBasedNetwork, self).__init__(scope=scope, summary_labels=summary_labels)self.layers = list()", "docstring": "Layer-based network.", "id": "f14309:c1:m0"} {"signature": "def __init__(self, layers, scope='', summary_labels=()):", "body": "self.layers_spec = layerssuper(LayeredNetwork, self).__init__(scope=scope, summary_labels=summary_labels)self.parse_layer_spec(layer_spec=self.layers_spec, layer_counter=Counter())", "docstring": "Single-stack layered network.\n\nArgs:\n layers: List of layer specification dicts.", "id": "f14309:c2:m0"} {"signature": "def tf_reset(self):", "body": "pass", "docstring": "Resets this preprocessor to some initial state. This method is called whenever an episode ends.\nThis could be useful if the preprocessor stores certain episode-sequence information to do the processing\nand this information has to be reset after the episode terminates.", "id": "f14313:c0:m1"} {"signature": "def tf_process(self, tensor):", "body": "return tensor", "docstring": "Process state (tensor).\n\nArgs:\n tensor (tf.Tensor): The Tensor to process.\n\nReturns: The pre-processed Tensor.", "id": "f14313:c0:m2"} {"signature": "def processed_shape(self, shape):", "body": "return shape", "docstring": "Shape of preprocessed state given original shape.\n\nArgs:\n shape (tuple): The original (unprocessed) shape.\n\nReturns: The processed tensor shape.", "id": "f14313:c0:m3"} {"signature": "def get_variables(self):", "body": "return [self.variables[key] for key in sorted(self.variables)]", "docstring": "Returns the TensorFlow variables used by the preprocessor.\n\nReturns:\n List of variables.", "id": "f14313:c0:m4"} {"signature": "def reset(self):", "body": "fetches = []for processor in self.preprocessors:fetches.extend(processor.reset() or [])return fetches", "docstring": "Calls `reset` on all our Preprocessor objects.\n\nReturns:\n A list of tensors to be fetched.", "id": "f14313:c1:m1"} {"signature": "def process(self, tensor):", "body": "for processor in self.preprocessors:tensor = processor.process(tensor=tensor)return tensor", "docstring": "Process state.\n\nArgs:\n tensor: tensor to process\n\nReturns: processed state", "id": "f14313:c1:m2"} {"signature": "def processed_shape(self, shape):", "body": "for processor in self.preprocessors:shape = processor.processed_shape(shape=shape)return shape", "docstring": "Shape of preprocessed state given original shape.\n\nArgs:\n shape: original state shape\n\nReturns: processed state shape", "id": "f14313:c1:m3"} {"signature": "@staticmethoddef from_spec(spec, kwargs=None):", "body": "if isinstance(spec, dict):spec = [spec]stack = PreprocessorStack()for preprocessor_spec in spec:preprocessor_kwargs = copy.deepcopy(kwargs)preprocessor = util.get_object(obj=preprocessor_spec,predefined_objects=tensorforce.core.preprocessors.preprocessors,kwargs=preprocessor_kwargs)assert isinstance(preprocessor, Preprocessor)stack.preprocessors.append(preprocessor)return stack", "docstring": "Creates a preprocessing stack from a specification dict.", "id": "f14313:c1:m5"} {"signature": "def __init__(self, shape, length=, add_rank=False, scope='', summary_labels=()):", "body": "self.length = lengthself.add_rank = add_rankself.reset_op = Nonesuper(Sequence, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Args:\n length (int): The number of states to concatenate. In the beginning, when no previous state is available,\n concatenate the given first state with itself `length` times.\n add_rank (bool): Whether to add another rank to the end of the input with dim=length-of-the-sequence.\n This could be useful if e.g. a grayscale image of w x h pixels is coming from the env\n (no color channel). The output of the preprocessor would then be of shape [batch] x w x h x [length].", "id": "f14314:c0:m0"} {"signature": "def __init__(self, shape, weights=(, , ), remove_rank=False, scope='', summary_labels=()):", "body": "self.weights = weightsself.remove_rank = remove_ranksuper(Grayscale, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Args:\n weights (tuple): The weights to multiply each color channel with (in order: red, blue, green).\n remove_rank (bool): If True, will remove the color channel rank from the input tensor.", "id": "f14317:c0:m0"} {"signature": "def __init__(self, network, scope='', summary_labels=()):", "body": "self.network = Network.from_spec(spec=network,kwargs=dict(summary_labels=summary_labels))assert len(self.network.internals_spec()) == self.linear = Linear(size=, bias=, scope='', summary_labels=summary_labels)super(NetworkBaseline, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Network baseline.\n\nArgs:\n network_spec: Network specification dict", "id": "f14322:c0:m0"} {"signature": "def __init__(self, conv_sizes, dense_sizes, scope='', summary_labels=()):", "body": "network = []for size in conv_sizes:network.append(dict(type='', size=size))network[][''] = network.append(dict(type='')) for size in dense_sizes:network.append(dict(type='', size=size))super(CNNBaseline, self).__init__(network=network, scope=scope, summary_labels=summary_labels)", "docstring": "CNN baseline.\n\nArgs:\n conv_sizes: List of convolutional layer sizes\n dense_sizes: List of dense layer sizes", "id": "f14324:c0:m0"} {"signature": "def __init__(self, scope='', summary_labels=None):", "body": "self.summary_labels = set(summary_labels or ())self.variables = dict()self.all_variables = dict()def custom_getter(getter, name, registered=False, **kwargs):variable = getter(name=name, registered=True, **kwargs)if registered:passelif name in self.all_variables:assert variable is self.all_variables[name]if kwargs.get('', True):assert variable is self.variables[name]if '' in self.summary_labels:tf.contrib.summary.histogram(name=name, tensor=variable)else:self.all_variables[name] = variableif kwargs.get('', True):self.variables[name] = variableif '' in self.summary_labels:tf.contrib.summary.histogram(name=name, tensor=variable)return variableself.predict = tf.make_template(name_=(scope + ''),func_=self.tf_predict,custom_getter_=custom_getter)self.reference = tf.make_template(name_=(scope + ''),func_=self.tf_reference,custom_getter_=custom_getter)self.loss = tf.make_template(name_=(scope + ''),func_=self.tf_loss,custom_getter_=custom_getter)self.regularization_loss = tf.make_template(name_=(scope + ''),func_=self.tf_regularization_loss,custom_getter_=custom_getter)", "docstring": "Baseline.", "id": "f14325:c0:m0"} {"signature": "def tf_predict(self, states, internals, update):", "body": "raise NotImplementedError", "docstring": "Creates the TensorFlow operations for predicting the value function of given states.\nArgs:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n update: Boolean tensor indicating whether this call happens during an update.\nReturns:\n State value tensor", "id": "f14325:c0:m1"} {"signature": "def tf_reference(self, states, internals, reward, update):", "body": "return None", "docstring": "Creates the TensorFlow operations for obtaining the reference tensor(s), in case of a\ncomparative loss.\n\nArgs:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n reward: Reward tensor.\n update: Boolean tensor indicating whether this call happens during an update.\n\nReturns:\n Reference tensor(s).", "id": "f14325:c0:m2"} {"signature": "def tf_loss(self, states, internals, reward, update, reference=None):", "body": "prediction = self.predict(states=states, internals=internals, update=update)return tf.nn.l2_loss(t=(prediction - reward))", "docstring": "Creates the TensorFlow operations for calculating the L2 loss between predicted\nstate values and actual rewards.\n\nArgs:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n reward: Reward tensor.\n update: Boolean tensor indicating whether this call happens during an update.\n reference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\n Loss tensor", "id": "f14325:c0:m3"} {"signature": "def tf_regularization_loss(self):", "body": "return None", "docstring": "Creates the TensorFlow operations for the baseline regularization loss/\n\nReturns:\n Regularization loss tensor", "id": "f14325:c0:m4"} {"signature": "def get_variables(self, include_nontrainable=False):", "body": "if include_nontrainable:return [self.all_variables[key] for key in sorted(self.all_variables)]else:return [self.variables[key] for key in sorted(self.variables)]", "docstring": "Returns the TensorFlow variables used by the baseline.\n\nReturns:\n List of variables", "id": "f14325:c0:m5"} {"signature": "@staticmethoddef from_spec(spec, kwargs=None):", "body": "baseline = util.get_object(obj=spec,predefined_objects=tensorforce.core.baselines.baselines,kwargs=kwargs)assert isinstance(baseline, Baseline)return baseline", "docstring": "Creates a baseline from a specification dict.", "id": "f14325:c0:m6"} {"signature": "def __init__(self, sizes, scope='', summary_labels=()):", "body": "network = []for size in sizes:network.append(dict(type='', size=size))super(MLPBaseline, self).__init__(network=network, scope=scope, summary_labels=summary_labels)", "docstring": "Multi-layer perceptron baseline.\n\nArgs:\n sizes: List of dense layer sizes", "id": "f14326:c0:m0"} {"signature": "def __init__(self, baselines, scope='', summary_labels=()):", "body": "self.baselines = dict()for name in sorted(baselines):self.baselines[name] = Baseline.from_spec(spec=baselines[name],kwargs=dict(summary_labels=summary_labels))self.linear = Linear(size=, bias=, scope='', summary_labels=summary_labels)super(AggregatedBaseline, self).__init__(scope, summary_labels)", "docstring": "Aggregated baseline.\n\nArgs:\n baselines: Dict of per-state baseline specification dicts", "id": "f14327:c0:m0"} {"signature": "def __init__(self, shape, probability=, scope='', summary_labels=()):", "body": "self.shape = shapeaction_size = util.prod(self.shape)self.logit = Linear(size=action_size, bias=log(probability), scope='', summary_labels=summary_labels)super(Bernoulli, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Bernoulli distribution.\n\nArgs:\n shape: Action shape.\n probability: Optional distribution bias.", "id": "f14328:c0:m0"} {"signature": "def __init__(self, shape, min_value, max_value, alpha=, beta=, scope='', summary_labels=()):", "body": "assert min_value is None or max_value > min_valueself.shape = shapeself.min_value = min_valueself.max_value = max_valueaction_size = util.prod(self.shape)self.alpha = Linear(size=action_size, bias=alpha, scope='', summary_labels=summary_labels)self.beta = Linear(size=action_size, bias=beta, scope='', summary_labels=summary_labels)super(Beta, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Beta distribution.\n\nArgs:\n shape: Action shape.\n min_value: Minimum value of continuous actions.\n max_value: Maximum value of continuous actions.\n alpha: Optional distribution bias for the alpha value.\n beta: Optional distribution bias for the beta value.", "id": "f14329:c0:m0"} {"signature": "def __init__(self, shape, num_actions, probabilities=None, scope='', summary_labels=()):", "body": "self.num_actions = num_actionsaction_size = util.prod(shape) * self.num_actionsif probabilities is None:logits = else:logits = [log(prob) for _ in range(util.prod(shape)) for prob in probabilities]self.logits = Linear(size=action_size, bias=logits, scope='', summary_labels=summary_labels)super(Categorical, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Categorical distribution.\n\nArgs:\n shape: Action shape.\n num_actions: Number of discrete action alternatives.\n probabilities: Optional distribution bias.", "id": "f14330:c0:m0"} {"signature": "def __init__(self, shape, scope='', summary_labels=None):", "body": "self.shape = shapeself.scope = scopeself.summary_labels = set(summary_labels or ())self.variables = dict()self.all_variables = dict()def custom_getter(getter, name, registered=False, **kwargs):variable = getter(name=name, registered=True, **kwargs)if registered:passelif name in self.all_variables:assert variable is self.all_variables[name]if kwargs.get('', True):assert variable is self.variables[name]if '' in self.summary_labels:tf.contrib.summary.histogram(name=name, tensor=variable)else:self.all_variables[name] = variableif kwargs.get('', True):self.variables[name] = variableif '' in self.summary_labels:tf.contrib.summary.histogram(name=name, tensor=variable)return variableself.parameterize = tf.make_template(name_=(scope + ''),func_=self.tf_parameterize,custom_getter_=custom_getter)self.sample = tf.make_template(name_=(scope + ''),func_=self.tf_sample,custom_getter_=custom_getter)self.log_probability = tf.make_template(name_=(scope + ''),func_=self.tf_log_probability,custom_getter_=custom_getter)self.entropy = tf.make_template(name_=(scope + ''),func_=self.tf_entropy,custom_getter_=custom_getter)self.kl_divergence = tf.make_template(name_=(scope + ''),func_=self.tf_kl_divergence,custom_getter_=custom_getter)self.regularization_loss = tf.make_template(name_=(scope + ''),func_=self.tf_regularization_loss,custom_getter_=custom_getter)", "docstring": "Distribution.\n\nArgs:\n shape: Action shape.", "id": "f14332:c0:m0"} {"signature": "def tf_parameterize(self, x):", "body": "raise NotImplementedError", "docstring": "Creates the tensorFlow operations for parameterizing a distribution conditioned on the\ngiven input.\n\nArgs:\n x: Input tensor which the distribution is conditioned on.\n\nReturns:\n tuple of distribution parameter tensors.", "id": "f14332:c0:m1"} {"signature": "def tf_sample(self, distr_params, deterministic):", "body": "raise NotImplementedError", "docstring": "Creates the tensorFlow operations for sampling an action based on a distribution.\n\nArgs:\n distr_params: tuple of distribution parameter tensors.\n deterministic: Boolean input tensor indicating whether the maximum likelihood action\n should be returned.\n\nReturns:\n Sampled action tensor.", "id": "f14332:c0:m2"} {"signature": "def tf_log_probability(self, distr_params, action):", "body": "raise NotImplementedError", "docstring": "Creates the tensorFlow operations for calculating the log probability of an action for a \ndistribution.\n\nArgs:\n distr_params: tuple of distribution parameter tensors.\n action: Action tensor.\n\nReturns:\n KL divergence tensor.", "id": "f14332:c0:m3"} {"signature": "def tf_entropy(self, distr_params):", "body": "raise NotImplementedError", "docstring": "Creates the tensorFlow operations for calculating the entropy of a distribution.\n\nArgs:\n distr_params: tuple of distribution parameter tensors.\n\nReturns:\n Entropy tensor.", "id": "f14332:c0:m4"} {"signature": "def tf_kl_divergence(self, distr_params1, distr_params2):", "body": "raise NotImplementedError", "docstring": "Creates the tensorFlow operations for calculating the KL divergence between two \ndistributions.\n\nArgs:\n distr_params1: tuple of parameter tensors for first distribution.\n distr_params2: tuple of parameter tensors for second distribution.\n\nReturns:\n KL divergence tensor.", "id": "f14332:c0:m5"} {"signature": "def tf_regularization_loss(self):", "body": "return None", "docstring": "Creates the tensorFlow operations for the distribution regularization loss.\n\nReturns:\n Regularization loss tensor.", "id": "f14332:c0:m6"} {"signature": "def get_variables(self, include_nontrainable=False):", "body": "if include_nontrainable:return [self.all_variables[key] for key in sorted(self.all_variables)]else:return [self.variables[key] for key in sorted(self.variables)]", "docstring": "Returns the tensorFlow variables used by the distribution.\n\nReturns:\n List of variables.", "id": "f14332:c0:m7"} {"signature": "@staticmethoddef from_spec(spec, kwargs=None):", "body": "distribution = util.get_object(obj=spec,predefined_objects=tensorforce.core.distributions.distributions,kwargs=kwargs)assert isinstance(distribution, Distribution)return distribution", "docstring": "Creates a distribution from a specification dict.", "id": "f14332:c0:m8"} {"signature": "def __init__(self, shape, mean=, log_stddev=, scope='', summary_labels=()):", "body": "self.shape = shapeaction_size = util.prod(self.shape)self.mean = Linear(size=action_size, bias=mean, scope='', summary_labels=summary_labels)self.log_stddev = Linear(size=action_size, bias=log_stddev, scope='', summary_labels=summary_labels)super(Gaussian, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Categorical distribution.\n\nArgs:\n shape: Action shape.\n mean: Optional distribution bias for the mean.\n log_stddev: Optional distribution bias for the standard deviation.", "id": "f14333:c0:m0"} {"signature": "def __init__(self, scope='', summary_labels=None):", "body": "self.summary_labels = set(summary_labels or ())self.variables = dict()def custom_getter(getter, name, registered=False, **kwargs):variable = getter(name=name, registered=True, **kwargs)if registered:passelif name in self.variables:assert variable is self.variables[name]else:assert not kwargs['']self.variables[name] = variablereturn variableself.step = tf.make_template(name_=(scope + ''),func_=self.tf_step,custom_getter=custom_getter)", "docstring": "Creates a new optimizer instance.", "id": "f14334:c0:m0"} {"signature": "def tf_step(self, time, variables, **kwargs):", "body": "raise NotImplementedError", "docstring": "Creates the TensorFlow operations for performing an optimization step on the given variables, including\nactually changing the values of the variables.\n\nArgs:\n time: Time tensor.\n variables: List of variables to optimize.\n **kwargs: Additional arguments depending on the specific optimizer implementation. \n For instance, often includes `fn_loss` if a loss function is optimized.\n\nReturns:\n List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14334:c0:m1"} {"signature": "def apply_step(self, variables, deltas):", "body": "if len(variables) != len(deltas):raise TensorForceError(\"\")return tf.group(*(tf.assign_add(ref=variable, value=delta) for variable, delta in zip(variables, deltas)))", "docstring": "Applies the given (and already calculated) step deltas to the variable values.\n\nArgs:\n variables: List of variables.\n deltas: List of deltas of same length.\n\nReturns:\n The step-applied operation. A tf.group of tf.assign_add ops.", "id": "f14334:c0:m2"} {"signature": "def minimize(self, time, variables, **kwargs):", "body": "deltas = self.step(time=time, variables=variables, **kwargs)with tf.control_dependencies(control_inputs=deltas):return tf.no_op()", "docstring": "Performs an optimization step.\n\nArgs:\n time: Time tensor.\n variables: List of variables to optimize.\n **kwargs: Additional optimizer-specific arguments. The following arguments are used\n by some optimizers:\n - arguments: Dict of arguments for callables, like fn_loss.\n - fn_loss: A callable returning the loss of the current model.\n - fn_reference: A callable returning the reference values, in case of a comparative \n loss.\n - fn_kl_divergence: A callable returning the KL-divergence relative to the\n current model.\n - sampled_loss: A sampled loss (integer).\n - return_estimated_improvement: Returns the estimated improvement resulting from\n the natural gradient calculation if true.\n - source_variables: List of source variables to synchronize with.\n - global_variables: List of global variables to apply the proposed optimization\n step to.\n\n\nReturns:\n The optimization operation.", "id": "f14334:c0:m3"} {"signature": "def get_variables(self):", "body": "return [self.variables[key] for key in sorted(self.variables)]", "docstring": "Returns the TensorFlow variables used by the optimizer.\n\nReturns:\n List of variables.", "id": "f14334:c0:m4"} {"signature": "@staticmethoddef from_spec(spec, kwargs=None):", "body": "optimizer = util.get_object(obj=spec,predefined_objects=tensorforce.core.optimizers.optimizers,kwargs=kwargs)assert isinstance(optimizer, Optimizer)return optimizer", "docstring": "Creates an optimizer from a specification dict.", "id": "f14334:c0:m5"} {"signature": "def __init__(self, optimizer, scope='', summary_labels=(), **kwargs):", "body": "self.optimizer = Optimizer.from_spec(spec=optimizer, kwargs=kwargs)super(MetaOptimizer, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new meta optimizer instance.\n\nArgs:\n optimizer: The optimizer which is modified by this meta optimizer.", "id": "f14335:c0:m0"} {"signature": "def __init__(self, optimizer, scope=None, summary_labels=(), **kwargs):", "body": "self.tf_optimizer_type = optimizerself.tf_optimizer = TFOptimizer.tf_optimizers[optimizer](**kwargs)super(TFOptimizer, self).__init__(scope=(scope or optimizer), summary_labels=summary_labels)", "docstring": "Creates a new optimizer instance of a TensorFlow optimizer.\n\nArgs:\n optimizer: The name of the optimizer. Must be one of the keys of the tf_optimizers dict.\n **kwargs: Arguments passed on to the TensorFlow optimizer constructor as **kwargs.", "id": "f14336:c0:m0"} {"signature": "def tf_step(self, time, variables, **kwargs):", "body": "arguments = kwargs[\"\"]fn_loss = kwargs[\"\"]loss = fn_loss(**arguments)with tf.control_dependencies(control_inputs=(loss,)):previous_variables = [variable + for variable in variables]with tf.control_dependencies(control_inputs=previous_variables):applied = self.tf_optimizer.minimize(loss=loss, var_list=variables) with tf.control_dependencies(control_inputs=(applied,)):return [variable - previous_variablefor variable, previous_variable in zip(variables, previous_variables)]", "docstring": "Keyword Args:\n arguments: Dict of arguments for passing to fn_loss as **kwargs.\n fn_loss: A callable taking arguments as kwargs and returning the loss op of the current model.", "id": "f14336:c0:m1"} {"signature": "def __init__(self, optimizer, fraction=, scope='', summary_labels=()):", "body": "assert isinstance(fraction, float) and fraction > self.fraction = fractionsuper(SubsamplingStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new subsampling-step meta optimizer instance.\n\nArgs:\n optimizer: The optimizer which is modified by this meta optimizer.\n fraction: The fraction of instances of the batch to subsample.", "id": "f14337:c0:m0"} {"signature": "def tf_step(self,time,variables,arguments,**kwargs):", "body": "arguments_iter = iter(arguments.values())some_argument = next(arguments_iter)try:while not isinstance(some_argument, tf.Tensor) or util.rank(some_argument) == :if isinstance(some_argument, dict):if some_argument:arguments_iter = iter(some_argument.values())some_argument = next(arguments_iter)elif isinstance(some_argument, list):if some_argument:arguments_iter = iter(some_argument)some_argument = next(arguments_iter)elif some_argument is None or util.rank(some_argument) == :some_argument = next(arguments_iter)else:raise TensorForceError(\"\")except StopIteration:raise TensorForceError(\"\")batch_size = tf.shape(input=some_argument)[]num_samples = tf.cast(x=(self.fraction * tf.cast(x=batch_size, dtype=util.tf_dtype(''))),dtype=util.tf_dtype(''))num_samples = tf.maximum(x=num_samples, y=)indices = tf.random_uniform(shape=(num_samples,), maxval=batch_size, dtype=tf.int32)subsampled_arguments = util.map_tensors(fn=(lambda arg: arg if util.rank(arg) == else tf.gather(params=arg, indices=indices)),tensors=arguments)return self.optimizer.step(time=time,variables=variables,arguments=subsampled_arguments,**kwargs)", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\n time: Time tensor.\n variables: List of variables to optimize.\n arguments: Dict of arguments for callables, like fn_loss.\n **kwargs: Additional arguments passed on to the internal optimizer.\n\nReturns:\n List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14337:c0:m1"} {"signature": "def __init__(self, optimizer, num_steps=, unroll_loop=False, scope='', summary_labels=()):", "body": "assert isinstance(num_steps, int) and num_steps > self.num_steps = num_stepsassert isinstance(unroll_loop, bool)self.unroll_loop = unroll_loopsuper(MultiStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new multi-step meta optimizer instance.\n\nArgs:\n optimizer: The optimizer which is modified by this meta optimizer.\n num_steps: Number of optimization steps to perform.", "id": "f14338:c0:m0"} {"signature": "def tf_step(self, time, variables, arguments, fn_reference=None, **kwargs):", "body": "arguments[''] = fn_reference(**arguments)deltas = self.optimizer.step(time=time, variables=variables, arguments=arguments, **kwargs)if self.unroll_loop:for _ in xrange(self.num_steps - ):with tf.control_dependencies(control_inputs=deltas):step_deltas = self.optimizer.step(time=time, variables=variables, arguments=arguments, **kwargs)deltas = [delta1 + delta2 for delta1, delta2 in zip(deltas, step_deltas)]return deltaselse:def body(iteration, deltas):with tf.control_dependencies(control_inputs=deltas):step_deltas = self.optimizer.step(time=time, variables=variables, arguments=arguments, **kwargs)deltas = [delta1 + delta2 for delta1, delta2 in zip(deltas, step_deltas)]return iteration + , deltasdef cond(iteration, deltas):return iteration < self.num_steps - _, deltas = tf.while_loop(cond=cond, body=body, loop_vars=(, deltas))return deltas", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\n time: Time tensor.\n variables: List of variables to optimize.\n arguments: Dict of arguments for callables, like fn_loss.\n fn_reference: A callable returning the reference values, in case of a comparative loss.\n **kwargs: Additional arguments passed on to the internal optimizer.\n\nReturns:\n List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14338:c0:m1"} {"signature": "def __init__(self, learning_rate, num_samples=, unroll_loop=False, scope='', summary_labels=()):", "body": "assert isinstance(learning_rate, float) and learning_rate > self.learning_rate = learning_rateassert isinstance(num_samples, int) and num_samples > self.num_samples = num_samplesassert isinstance(unroll_loop, bool)self.unroll_loop = unroll_loopsuper(Evolutionary, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new evolutionary optimizer instance.\n\nArgs:\n learning_rate: Learning rate.\n num_samples: Number of sampled perturbations.", "id": "f14339:c0:m0"} {"signature": "def tf_step(self,time,variables,arguments,fn_loss,**kwargs):", "body": "unperturbed_loss = fn_loss(**arguments)perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables]applied = self.apply_step(variables=variables, deltas=perturbations)with tf.control_dependencies(control_inputs=(applied,)):perturbed_loss = fn_loss(**arguments)direction = tf.sign(x=(unperturbed_loss - perturbed_loss))deltas_sum = [direction * perturbation for perturbation in perturbations]if self.unroll_loop:previous_perturbations = perturbationsfor sample in xrange(self.num_samples):with tf.control_dependencies(control_inputs=deltas_sum):perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables]perturbation_deltas = [pert - prev_pert for pert, prev_pert in zip(perturbations, previous_perturbations)]applied = self.apply_step(variables=variables, deltas=perturbation_deltas)previous_perturbations = perturbationswith tf.control_dependencies(control_inputs=(applied,)):perturbed_loss = fn_loss(**arguments)direction = tf.sign(x=(unperturbed_loss - perturbed_loss))deltas_sum = [delta + direction * perturbation for delta, perturbation in zip(deltas_sum, perturbations)]else:def body(iteration, deltas_sum, previous_perturbations):with tf.control_dependencies(control_inputs=deltas_sum):perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables]perturbation_deltas = [pert - prev_pert for pert, prev_pert in zip(perturbations, previous_perturbations)]applied = self.apply_step(variables=variables, deltas=perturbation_deltas)with tf.control_dependencies(control_inputs=(applied,)):perturbed_loss = fn_loss(**arguments)direction = tf.sign(x=(unperturbed_loss - perturbed_loss))deltas_sum = [delta + direction * perturbation for delta, perturbation in zip(deltas_sum, perturbations)]return iteration + , deltas_sum, perturbationsdef cond(iteration, deltas_sum, previous_perturbation):return iteration < self.num_samples - _, deltas_sum, perturbations = tf.while_loop(cond=cond, body=body, loop_vars=(, deltas_sum, perturbations))with tf.control_dependencies(control_inputs=deltas_sum):deltas = [delta / self.num_samples for delta in deltas_sum]perturbation_deltas = [delta - pert for delta, pert in zip(deltas, perturbations)]applied = self.apply_step(variables=variables, deltas=perturbation_deltas)with tf.control_dependencies(control_inputs=(applied,)):return [delta + for delta in deltas]", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\n time: Time tensor.\n variables: List of variables to optimize.\n arguments: Dict of arguments for callables, like fn_loss.\n fn_loss: A callable returning the loss of the current model.\n **kwargs: Additional arguments, not used.\n\nReturns:\n List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14339:c0:m1"} {"signature": "def __init__(self, sync_frequency=, update_weight=, scope='', summary_labels=()):", "body": "assert isinstance(sync_frequency, int) and sync_frequency > self.sync_frequency = sync_frequencyassert isinstance(update_weight, float) and update_weight > self.update_weight = update_weightsuper(Synchronization, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new synchronization optimizer instance.\n\nArgs:\n sync_frequency: The interval between optimization calls actually performing a \n synchronization step.\n update_weight: The update weight, 1.0 meaning a full assignment of the source \n variables values.", "id": "f14340:c0:m0"} {"signature": "def tf_step(self, time, variables, source_variables, **kwargs):", "body": "assert all(util.shape(source) == util.shape(target) for source, target in zip(source_variables, variables))last_sync = tf.get_variable(name='',shape=(),dtype=tf.int64,initializer=tf.constant_initializer(value=(-self.sync_frequency), dtype=tf.int64),trainable=False)def sync():deltas = list()for source_variable, target_variable in zip(source_variables, variables):delta = self.update_weight * (source_variable - target_variable)deltas.append(delta)applied = self.apply_step(variables=variables, deltas=deltas)last_sync_updated = last_sync.assign(value=time)with tf.control_dependencies(control_inputs=(applied, last_sync_updated)):return [delta + for delta in deltas]def no_sync():deltas = list()for variable in variables:delta = tf.zeros(shape=util.shape(variable))deltas.append(delta)return deltasdo_sync = (time - last_sync >= self.sync_frequency)return tf.cond(pred=do_sync, true_fn=sync, false_fn=no_sync)", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\n time: Time tensor.\n variables: List of variables to optimize.\n source_variables: List of source variables to synchronize with.\n **kwargs: Additional arguments, not used.\n\nReturns:\n List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14340:c0:m1"} {"signature": "def __init__(self, optimizer, clipping_value, scope='', summary_labels=()):", "body": "assert isinstance(clipping_value, float) and clipping_value > self.clipping_value = clipping_valuesuper(ClippedStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new multi-step meta optimizer instance.\n\nArgs:\n optimizer: The optimizer which is modified by this meta optimizer.\n clipping_value: Clip deltas at this value.", "id": "f14341:c0:m0"} {"signature": "def tf_step(self, time, variables, **kwargs):", "body": "deltas = self.optimizer.step(time=time, variables=variables, **kwargs)with tf.control_dependencies(control_inputs=deltas):clipped_deltas = list()exceeding_deltas = list()for delta in deltas:clipped_delta = tf.clip_by_value(t=delta,clip_value_min=-self.clipping_value,clip_value_max=self.clipping_value)clipped_deltas.append(clipped_delta)exceeding_deltas.append(clipped_delta - delta)applied = self.apply_step(variables=variables, deltas=exceeding_deltas)with tf.control_dependencies(control_inputs=(applied,)):return [delta + for delta in clipped_deltas]", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\n time: Time tensor.\n variables: List of variables to optimize.\n **kwargs: Additional arguments passed on to the internal optimizer.\n\nReturns:\n List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14341:c0:m1"} {"signature": "def __init__(self, optimizer, scope='', summary_labels=()):", "body": "super(GlobalOptimizer, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new global optimizer instance.\n\nArgs:\n optimizer: The optimizer which is modified by this meta optimizer.", "id": "f14342:c0:m0"} {"signature": "def tf_step(self, time, variables, **kwargs):", "body": "global_variables = kwargs[\"\"]assert all(util.shape(global_variable) == util.shape(local_variable)for global_variable, local_variable in zip(global_variables, variables))local_deltas = self.optimizer.step(time=time, variables=variables, **kwargs)with tf.control_dependencies(control_inputs=local_deltas):applied = self.optimizer.apply_step(variables=global_variables, deltas=local_deltas)with tf.control_dependencies(control_inputs=(applied,)):update_deltas = list()for global_variable, local_variable in zip(global_variables, variables):delta = global_variable - local_variableupdate_deltas.append(delta)applied = self.apply_step(variables=variables, deltas=update_deltas)with tf.control_dependencies(control_inputs=(applied,)):return [local_delta + update_delta for local_delta, update_delta in zip(local_deltas, update_deltas)]", "docstring": "Keyword Args:\n global_variables: List of global variables to apply the proposed optimization step to.\n\nReturns:\n List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14342:c0:m1"} {"signature": "def __init__(self,optimizer,ls_max_iterations=,ls_accept_ratio=,ls_mode='',ls_parameter=,ls_unroll_loop=False,scope='',summary_labels=()):", "body": "self.solver = LineSearch(max_iterations=ls_max_iterations,accept_ratio=ls_accept_ratio,mode=ls_mode,parameter=ls_parameter,unroll_loop=ls_unroll_loop)super(OptimizedStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new optimized step meta optimizer instance.\n\nArgs:\n optimizer: The optimizer which is modified by this meta optimizer.\n ls_max_iterations: Maximum number of line search iterations.\n ls_accept_ratio: Line search acceptance ratio.\n ls_mode: Line search mode, see LineSearch solver.\n ls_parameter: Line search parameter, see LineSearch solver.\n ls_unroll_loop: Unroll line search loop if true.", "id": "f14345:c0:m0"} {"signature": "def tf_step(self,time,variables,arguments,fn_loss,fn_reference,**kwargs):", "body": "arguments[''] = fn_reference(**arguments)loss_before = -fn_loss(**arguments)with tf.control_dependencies(control_inputs=(loss_before,)):deltas = self.optimizer.step(time=time,variables=variables,arguments=arguments,fn_loss=fn_loss,return_estimated_improvement=True,**kwargs)if isinstance(deltas, tuple):if len(deltas) != :raise TensorForceError(\"\")deltas, estimated_improvement = deltasestimated_improvement = -estimated_improvementelse:estimated_improvement = Nonewith tf.control_dependencies(control_inputs=deltas):loss_step = -fn_loss(**arguments)with tf.control_dependencies(control_inputs=(loss_step,)):def evaluate_step(deltas):with tf.control_dependencies(control_inputs=deltas):applied = self.apply_step(variables=variables, deltas=deltas)with tf.control_dependencies(control_inputs=(applied,)):return -fn_loss(**arguments)return self.solver.solve(fn_x=evaluate_step,x_init=deltas,base_value=loss_before,target_value=loss_step,estimated_improvement=estimated_improvement)", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\n time: Time tensor.\n variables: List of variables to optimize.\n arguments: Dict of arguments for callables, like fn_loss.\n fn_loss: A callable returning the loss of the current model.\n fn_reference: A callable returning the reference values, in case of a comparative loss.\n **kwargs: Additional arguments passed on to the internal optimizer.\n\nReturns:\n List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14345:c0:m1"} {"signature": "def __init__(self,learning_rate,cg_max_iterations=,cg_damping=,cg_unroll_loop=False,scope='',summary_labels=()):", "body": "assert learning_rate > self.learning_rate = learning_rateself.solver = ConjugateGradient(max_iterations=cg_max_iterations,damping=cg_damping,unroll_loop=cg_unroll_loop)super(NaturalGradient, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new natural gradient optimizer instance.\n\nArgs:\n learning_rate: Learning rate, i.e. KL-divergence of distributions between optimization steps.\n cg_max_iterations: Conjugate gradient solver max iterations.\n cg_damping: Conjugate gradient solver damping factor.\n cg_unroll_loop: Unroll conjugate gradient loop if true.", "id": "f14346:c0:m0"} {"signature": "def tf_step(self,time,variables,arguments,fn_loss,fn_kl_divergence,return_estimated_improvement=False,**kwargs):", "body": "kldiv = fn_kl_divergence(**arguments)kldiv_gradients = tf.gradients(ys=kldiv, xs=variables)def fisher_matrix_product(deltas):deltas = [tf.stop_gradient(input=delta) for delta in deltas]delta_kldiv_gradients = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(delta * grad)) for delta, grad in zip(deltas, kldiv_gradients)])return tf.gradients(ys=delta_kldiv_gradients, xs=variables)loss = fn_loss(**arguments)loss_gradients = tf.gradients(ys=loss, xs=variables)deltas = self.solver.solve(fn_x=fisher_matrix_product, x_init=None, b=[-grad for grad in loss_gradients])delta_fisher_matrix_product = fisher_matrix_product(deltas=deltas)constant = * tf.add_n(inputs=[tf.reduce_sum(input_tensor=(delta_F * delta))for delta_F, delta in zip(delta_fisher_matrix_product, deltas)])def natural_gradient_step():lagrange_multiplier = tf.sqrt(x=(constant / self.learning_rate))estimated_deltas = [delta / lagrange_multiplier for delta in deltas]estimated_improvement = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(grad * delta))for grad, delta in zip(loss_gradients, estimated_deltas)])applied = self.apply_step(variables=variables, deltas=estimated_deltas)with tf.control_dependencies(control_inputs=(applied,)):if return_estimated_improvement:return [estimated_delta + for estimated_delta in estimated_deltas], estimated_improvementelse:return [estimated_delta + for estimated_delta in estimated_deltas]def zero_step():if return_estimated_improvement:return [tf.zeros_like(tensor=delta) for delta in deltas], else:return [tf.zeros_like(tensor=delta) for delta in deltas]return tf.cond(pred=(constant > ), true_fn=natural_gradient_step, false_fn=zero_step)", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\n time: Time tensor.\n variables: List of variables to optimize.\n arguments: Dict of arguments for callables, like fn_loss.\n fn_loss: A callable returning the loss of the current model.\n fn_kl_divergence: A callable returning the KL-divergence relative to the current model.\n return_estimated_improvement: Returns the estimated improvement resulting from the \n natural gradient calculation if true.\n **kwargs: Additional arguments, not used.\n\nReturns:\n List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14346:c0:m1"} {"signature": "def __init__(self,learning_rate=,momentum=,clip_kl=,kfac_update=,stats_accum_iter=,full_stats_init=False,cold_iter=,cold_lr=None,async_=False,async_stats=False,epsilon=,stats_decay=,blockdiag_bias=False,channel_fac=False,factored_damping=False,approxT2=False,use_float64=False,weight_decay_dict={},max_grad_norm=,scope='',summary_labels=()):", "body": "self.max_grad_norm = max_grad_normself._lr = learning_rateself._momentum = momentumself._clip_kl = clip_klself._channel_fac = channel_facself._kfac_update = kfac_updateself._async = async_self._async_stats = async_statsself._epsilon = epsilonself._stats_decay = stats_decayself._blockdiag_bias = blockdiag_biasself._approxT2 = approxT2self._use_float64 = use_float64self._factored_damping = factored_dampingself._cold_iter = cold_iterif cold_lr == None:self._cold_lr = self._lrelse:self._cold_lr = cold_lrself._stats_accum_iter = stats_accum_iterself._weight_decay_dict = weight_decay_dictself._diag_init_coeff = self._full_stats_init = full_stats_initif not self._full_stats_init:self._stats_accum_iter = self._cold_iterself.sgd_step = tf.Variable(, name='', trainable=False)self.global_step = tf.Variable(, name='', trainable=False)self.cold_step = tf.Variable(, name='', trainable=False)self.factor_step = tf.Variable(, name='', trainable=False)self.stats_step = tf.Variable(, name='', trainable=False)self.vFv = tf.Variable(, name='', trainable=False)self.factors = {}self.param_vars = []self.stats = {}self.stats_eigen = {}super(KFAC, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Initializes a KFAC optimizer.\n\nFor more information on arguments, see the Kfac Optimization paper https://arxiv.org/pdf/1503.05671.pdf", "id": "f14347:c0:m0"} {"signature": "def apply_stats(self, statsUpdates):", "body": "def updateAccumStats():if self._full_stats_init:return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff= / self._stats_accum_iter)), tf.no_op)else:return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff= / self._stats_accum_iter))def updateRunningAvgStats(statsUpdates, fac_iter=):return tf.group(*self._apply_stats(statsUpdates))if self._async_stats:update_stats = self._apply_stats(statsUpdates)queue = tf.FIFOQueue(, [item.dtype for item in update_stats], shapes=[item.get_shape() for item in update_stats])enqueue_op = queue.enqueue(update_stats)def dequeue_stats_op():return queue.dequeue()self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor()), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))else:update_stats_op = tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)self._update_stats_op = update_stats_opreturn update_stats_op", "docstring": "compute stats and update/apply the new stats to the running average", "id": "f14347:c0:m5"} {"signature": "def computeStatsEigen(self):", "body": "with tf.device(''):def removeNone(tensor_list):local_list = []for item in tensor_list:if item is not None:local_list.append(item)return local_listdef copyStats(var_list):print(\"\")redundant_stats = {}copied_list = []for item in var_list:if item is not None:if item not in redundant_stats:if self._use_float64:redundant_stats[item] = tf.cast(tf.identity(item), tf.float64)else:redundant_stats[item] = tf.identity(item)copied_list.append(redundant_stats[item])else:copied_list.append(None)return copied_liststats_eigen = self.stats_eigencomputedEigen = {}eigen_reverse_lookup = {}updateOps = []with tf.control_dependencies([]):for stats_var in stats_eigen:if stats_var not in computedEigen:eigens = tf.self_adjoint_eig(stats_var)e = eigens[]Q = eigens[]if self._use_float64:e = tf.cast(e, tf.float32)Q = tf.cast(Q, tf.float32)updateOps.append(e)updateOps.append(Q)computedEigen[stats_var] = {'': e, '': Q}eigen_reverse_lookup[e] = stats_eigen[stats_var]['']eigen_reverse_lookup[Q] = stats_eigen[stats_var]['']self.eigen_reverse_lookup = eigen_reverse_lookupself.eigen_update_list = updateOpsreturn updateOps", "docstring": "compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue", "id": "f14347:c0:m8"} {"signature": "def tf_step(self, time, variables, **kwargs):", "body": "fn_loss = kwargs[\"\"]if variables is None:variables = tf.trainable_variablesreturn tf.gradients(fn_loss, variables)", "docstring": "Creates the TensorFlow operations for performing an optimization step on the given variables, including\nactually changing the values of the variables.\n\nArgs:\n time: Time tensor. Not used for this optimizer.\n variables: List of variables to optimize.\n **kwargs: \n fn_loss : loss function tensor to differentiate.\n\nReturns:\n List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14347:c0:m15"} {"signature": "def apply_step(self, variables, deltas, loss_sampled):", "body": "update_stats_op = self.compute_and_apply_stats(loss_sampled, var_list=var_list)grads = [(a, b) for a, b in zip(deltas, varlist)]kfacOptim, _ = self.apply_gradients_kfac(grads)return kfacOptim", "docstring": "Applies the given (and already calculated) step deltas to the variable values.\n\nArgs:\n variables: List of variables.\n deltas: List of deltas of same length.\n loss_sampled : the sampled loss\n\nReturns:\n The step-applied operation. A tf.group of tf.assign_add ops.", "id": "f14347:c0:m16"} {"signature": "def minimize(self, time, variables, **kwargs):", "body": "loss = kwargs[\"\"]sampled_loss = kwargs[\"\"]min_op, _ = self.minimize_(loss, sampled_loss, var_list=variables)return min_op", "docstring": "Performs an optimization step.\n\nArgs:\n time: Time tensor. Not used for this\n variables: List of variables to optimize.\n **kwargs: \n fn_loss : loss function tensor that is differentiated\n sampled_loss : the sampled loss from running the model.\n\nReturns:\n The optimization operation.", "id": "f14347:c0:m17"} {"signature": "def __init__(self, max_iterations, accept_ratio, mode, parameter, unroll_loop=False):", "body": "assert accept_ratio >= self.accept_ratio = accept_ratioif mode not in ('', ''):raise TensorForceError(\"\".format(mode))self.mode = modeself.parameter = parametersuper(LineSearch, self).__init__(max_iterations=max_iterations, unroll_loop=unroll_loop)", "docstring": "Creates a new line search solver instance.\n\nArgs:\n max_iterations: Maximum number of iterations before termination.\n accept_ratio: Lower limit of what improvement ratio over $x = x'$ is acceptable \n (based either on a given estimated improvement or with respect to the value at \n $x = x'$).\n mode: Mode of movement between $x_0$ and $x'$, either 'linear' or 'exponential'.\n parameter: Movement mode parameter, additive or multiplicative, respectively.\n unroll_loop: Unrolls the TensorFlow while loop if true.", "id": "f14348:c0:m0"} {"signature": "def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None):", "body": "return super(LineSearch, self).tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement)", "docstring": "Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$.\n\nArgs:\n fn_x: A callable returning the value $f(x)$ at $x$.\n x_init: Initial solution guess $x_0$.\n base_value: Value $f(x')$ at $x = x'$.\n target_value: Value $f(x_0)$ at $x = x_0$.\n estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None.\n\nReturns:\n A solution $x$ to the problem as given by the solver.", "id": "f14348:c0:m1"} {"signature": "def tf_initialize(self, x_init, base_value, target_value, estimated_improvement):", "body": "self.base_value = base_valueif estimated_improvement is None: estimated_improvement = tf.abs(x=base_value)first_step = super(LineSearch, self).tf_initialize(x_init)improvement = tf.divide(x=(target_value - self.base_value),y=tf.maximum(x=estimated_improvement, y=util.epsilon))last_improvement = improvement - if self.mode == '':deltas = [-t * self.parameter for t in x_init]self.estimated_incr = -estimated_improvement * self.parameterelif self.mode == '':deltas = [-t * self.parameter for t in x_init]return first_step + (deltas, improvement, last_improvement, estimated_improvement)", "docstring": "Initialization step preparing the arguments for the first iteration of the loop body.\n\nArgs:\n x_init: Initial solution guess $x_0$.\n base_value: Value $f(x')$ at $x = x'$.\n target_value: Value $f(x_0)$ at $x = x_0$.\n estimated_improvement: Estimated value at $x = x_0$, $f(x')$ if None.\n\nReturns:\n Initial arguments for tf_step.", "id": "f14348:c0:m2"} {"signature": "def tf_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement):", "body": "x, next_iteration, deltas, improvement, last_improvement, estimated_improvement = super(LineSearch, self).tf_step(x, iteration, deltas, improvement, last_improvement, estimated_improvement)next_x = [t + delta for t, delta in zip(x, deltas)]if self.mode == '':next_deltas = deltasnext_estimated_improvement = estimated_improvement + self.estimated_increlif self.mode == '':next_deltas = [delta * self.parameter for delta in deltas]next_estimated_improvement = estimated_improvement * self.parametertarget_value = self.fn_x(next_deltas)next_improvement = tf.divide(x=(target_value - self.base_value),y=tf.maximum(x=next_estimated_improvement, y=util.epsilon))return next_x, next_iteration, next_deltas, next_improvement, improvement, next_estimated_improvement", "docstring": "Iteration loop body of the line search algorithm.\n\nArgs:\n x: Current solution estimate $x_t$.\n iteration: Current iteration counter $t$.\n deltas: Current difference $x_t - x'$.\n improvement: Current improvement $(f(x_t) - f(x')) / v'$.\n last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\n estimated_improvement: Current estimated value $v'$.\n\nReturns:\n Updated arguments for next iteration.", "id": "f14348:c0:m3"} {"signature": "def tf_next_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement):", "body": "next_step = super(LineSearch, self).tf_next_step(x, iteration, deltas, improvement, last_improvement, estimated_improvement)def undo_deltas():value = self.fn_x([-delta for delta in deltas])with tf.control_dependencies(control_inputs=(value,)):return tf.less(x=value, y=value) improved = tf.cond(pred=(improvement > last_improvement),true_fn=(lambda: True),false_fn=undo_deltas)next_step = tf.logical_and(x=next_step, y=improved)next_step = tf.logical_and(x=next_step, y=(improvement < self.accept_ratio))return tf.logical_and(x=next_step, y=(estimated_improvement > util.epsilon))", "docstring": "Termination condition: max number of iterations, or no improvement for last step, or \nimprovement less than acceptable ratio, or estimated value not positive.\n\nArgs:\n x: Current solution estimate $x_t$.\n iteration: Current iteration counter $t$.\n deltas: Current difference $x_t - x'$.\n improvement: Current improvement $(f(x_t) - f(x')) / v'$.\n last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\n estimated_improvement: Current estimated value $v'$.\n\nReturns:\n True if another iteration should be performed.", "id": "f14348:c0:m4"} {"signature": "def __init__(self, max_iterations, unroll_loop=False):", "body": "assert max_iterations >= self.max_iterations = max_iterationsassert isinstance(unroll_loop, bool)self.unroll_loop = unroll_loopsuper(Iterative, self).__init__()self.initialize = tf.make_template(name_='', func_=self.tf_initialize)self.step = tf.make_template(name_='', func_=self.tf_step)self.next_step = tf.make_template(name_='', func_=self.tf_next_step)", "docstring": "Creates a new iterative solver instance.\n\nArgs:\n max_iterations: Maximum number of iterations before termination.\n unroll_loop: Unrolls the TensorFlow while loop if true.", "id": "f14349:c0:m0"} {"signature": "def tf_solve(self, fn_x, x_init, *args):", "body": "self.fn_x = fn_xargs = self.initialize(x_init, *args)if self.unroll_loop:for _ in range(self.max_iterations):next_step = self.next_step(*args)step = (lambda: self.step(*args))do_nothing = (lambda: args)args = tf.cond(pred=next_step, true_fn=step, false_fn=do_nothing)else:args = tf.while_loop(cond=self.next_step, body=self.step, loop_vars=args)return args[]", "docstring": "Iteratively solves an equation/optimization for $x$ involving an expression $f(x)$.\n\nArgs:\n fn_x: A callable returning an expression $f(x)$ given $x$.\n x_init: Initial solution guess $x_0$.\n *args: Additional solver-specific arguments.\n\nReturns:\n A solution $x$ to the problem as given by the solver.", "id": "f14349:c0:m1"} {"signature": "def tf_initialize(self, x_init, *args):", "body": "return x_init, ", "docstring": "Initialization step preparing the arguments for the first iteration of the loop body \n(default: initial solution guess and iteration counter).\n\nArgs:\n x_init: Initial solution guess $x_0$.\n *args: Additional solver-specific arguments.\n\nReturns:\n Initial arguments for tf_step.", "id": "f14349:c0:m2"} {"signature": "def tf_step(self, x, iteration, *args):", "body": "return (x, iteration + ) + args", "docstring": "Iteration loop body of the iterative solver (default: increment iteration step). The \nfirst two loop arguments have to be the current solution estimate and the iteration step.\n\nArgs:\n x: Current solution estimate.\n iteration: Current iteration counter.\n *args: Additional solver-specific arguments.\n\nReturns:\n Updated arguments for next iteration.", "id": "f14349:c0:m3"} {"signature": "def tf_next_step(self, x, iteration, *args):", "body": "return iteration < self.max_iterations", "docstring": "Termination condition (default: max number of iterations).\n\nArgs:\n x: Current solution estimate.\n iteration: Current iteration counter.\n *args: Additional solver-specific arguments.\n\nReturns:\n True if another iteration should be performed.", "id": "f14349:c0:m4"} {"signature": "def __init__(self):", "body": "self.solve = tf.make_template(name_='', func_=self.tf_solve)", "docstring": "Creates a new solver instance.", "id": "f14350:c0:m0"} {"signature": "def tf_solve(self, fn_x, *args):", "body": "raise NotImplementedError", "docstring": "Solves an equation/optimization for $x$ involving an expression $f(x)$.\n\nArgs:\n fn_x: A callable returning an expression $f(x)$ given $x$.\n *args: Additional solver-specific arguments.\n\nReturns:\n A solution $x$ to the problem as given by the solver.", "id": "f14350:c0:m1"} {"signature": "@staticmethoddef from_config(config, kwargs=None):", "body": "return util.get_object(obj=config,predefined=tensorforce.core.optimizers.solvers.solvers,kwargs=kwargs)", "docstring": "Creates a solver from a specification dict.", "id": "f14350:c0:m2"} {"signature": "def __init__(self, max_iterations, damping, unroll_loop=False):", "body": "assert damping >= self.damping = dampingsuper(ConjugateGradient, self).__init__(max_iterations=max_iterations, unroll_loop=unroll_loop)", "docstring": "Creates a new conjugate gradient solver instance.\n\nArgs:\n max_iterations: Maximum number of iterations before termination.\n damping: Damping factor.\n unroll_loop: Unrolls the TensorFlow while loop if true.", "id": "f14352:c0:m0"} {"signature": "def tf_solve(self, fn_x, x_init, b):", "body": "return super(ConjugateGradient, self).tf_solve(fn_x, x_init, b)", "docstring": "Iteratively solves the system of linear equations $A x = b$.\n\nArgs:\n fn_x: A callable returning the left-hand side $A x$ of the system of linear equations.\n x_init: Initial solution guess $x_0$, zero vector if None.\n b: The right-hand side $b$ of the system of linear equations.\n\nReturns:\n A solution $x$ to the problem as given by the solver.", "id": "f14352:c0:m1"} {"signature": "def tf_initialize(self, x_init, b):", "body": "if x_init is None:x_init = [tf.zeros(shape=util.shape(t)) for t in b]initial_args = super(ConjugateGradient, self).tf_initialize(x_init)conjugate = residual = [t - fx for t, fx in zip(b, self.fn_x(x_init))]squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in residual])return initial_args + (conjugate, residual, squared_residual)", "docstring": "Initialization step preparing the arguments for the first iteration of the loop body: \n$x_0, 0, p_0, r_0, r_0^2$.\n\nArgs:\n x_init: Initial solution guess $x_0$, zero vector if None.\n b: The right-hand side $b$ of the system of linear equations.\n\nReturns:\n Initial arguments for tf_step.", "id": "f14352:c0:m2"} {"signature": "def tf_step(self, x, iteration, conjugate, residual, squared_residual):", "body": "x, next_iteration, conjugate, residual, squared_residual = super(ConjugateGradient, self).tf_step(x, iteration, conjugate, residual, squared_residual)A_conjugate = self.fn_x(conjugate)if self.damping > :A_conjugate = [A_conj + self.damping * conj for A_conj, conj in zip(A_conjugate, conjugate)]conjugate_A_conjugate = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(conj * A_conj)) for conj, A_conj in zip(conjugate, A_conjugate)])alpha = squared_residual / tf.maximum(x=conjugate_A_conjugate, y=util.epsilon)next_x = [t + alpha * conj for t, conj in zip(x, conjugate)]next_residual = [res - alpha * A_conj for res, A_conj in zip(residual, A_conjugate)]next_squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in next_residual])beta = next_squared_residual / tf.maximum(x=squared_residual, y=util.epsilon)next_conjugate = [res + beta * conj for res, conj in zip(next_residual, conjugate)]return next_x, next_iteration, next_conjugate, next_residual, next_squared_residual", "docstring": "Iteration loop body of the conjugate gradient algorithm.\n\nArgs:\n x: Current solution estimate $x_t$.\n iteration: Current iteration counter $t$.\n conjugate: Current conjugate $c_t$.\n residual: Current residual $r_t$.\n squared_residual: Current squared residual $r_t^2$.\n\nReturns:\n Updated arguments for next iteration.", "id": "f14352:c0:m3"} {"signature": "def tf_next_step(self, x, iteration, conjugate, residual, squared_residual):", "body": "next_step = super(ConjugateGradient, self).tf_next_step(x, iteration, conjugate, residual, squared_residual)return tf.logical_and(x=next_step, y=(squared_residual >= util.epsilon))", "docstring": "Termination condition: max number of iterations, or residual sufficiently small.\n\nArgs:\n x: Current solution estimate $x_t$.\n iteration: Current iteration counter $t$.\n conjugate: Current conjugate $c_t$.\n residual: Current residual $r_t$.\n squared_residual: Current squared residual $r_t^2$.\n\nReturns:\n True if another iteration should be performed.", "id": "f14352:c0:m4"} {"signature": "def put(self, item, priority=None):", "body": "if not self._isfull():self._memory.append(None)position = self._next_position_then_increment()old_priority = if self._memory[position] is Noneelse (self._memory[position].priority or )row = _SumRow(item, priority)self._memory[position] = rowself._update_internal_nodes(position, (row.priority or ) - old_priority)", "docstring": "Stores a transition in replay memory.\n\nIf the memory is full, the oldest entry is replaced.", "id": "f14354:c0:m1"} {"signature": "def move(self, external_index, new_priority):", "body": "index = external_index + (self._capacity - )return self._move(index, new_priority)", "docstring": "Change the priority of a leaf node", "id": "f14354:c0:m2"} {"signature": "def _move(self, index, new_priority):", "body": "item, old_priority = self._memory[index]old_priority = old_priority or self._memory[index] = _SumRow(item, new_priority)self._update_internal_nodes(index, new_priority - old_priority)", "docstring": "Change the priority of a leaf node.", "id": "f14354:c0:m3"} {"signature": "def _update_internal_nodes(self, index, delta):", "body": "while index > :index = (index - ) // self._memory[index] += delta", "docstring": "Update internal priority sums when leaf priority has been changed.\nArgs:\n index: leaf node index\n delta: change in priority", "id": "f14354:c0:m4"} {"signature": "def _next_position_then_increment(self):", "body": "start = self._capacity - position = start + self._positionself._position = (self._position + ) % self._capacityreturn position", "docstring": "Similar to position++.", "id": "f14354:c0:m6"} {"signature": "def _sample_with_priority(self, p):", "body": "parent = while True:left = * parent + if left >= len(self._memory):return parentleft_p = self._memory[left] if left < self._capacity - else (self._memory[left].priority or )if p <= left_p:parent = leftelse:if left + >= len(self._memory):raise RuntimeError('')p -= left_pparent = left + ", "docstring": "Sample random element with priority greater than p.", "id": "f14354:c0:m7"} {"signature": "def sample_minibatch(self, batch_size):", "body": "pool_size = len(self)if pool_size == :return []delta_p = self._memory[] / batch_sizechosen_idx = []if abs(self._memory[]) < util.epsilon:chosen_idx = np.random.randint(self._capacity - , self._capacity - + len(self), size=batch_size).tolist()else:for i in xrange(batch_size):lower = max(i * delta_p, )upper = min((i + ) * delta_p, self._memory[])p = random.uniform(lower, upper)chosen_idx.append(self._sample_with_priority(p))return [(i, self._memory[i]) for i in chosen_idx]", "docstring": "Sample minibatch of size batch_size.", "id": "f14354:c0:m8"} {"signature": "def __len__(self):", "body": "return len(self._memory) - (self._capacity - )", "docstring": "Return the current number of transitions.", "id": "f14354:c0:m9"} {"signature": "def get_batch(self, batch_size, next_states=False):", "body": "if batch_size > len(self.observations):raise TensorForceError(\"\")states = {name: np.zeros((batch_size,) + tuple(state['']), dtype=util.np_dtype(state[''])) for name, state in self.states_spec.items()}internals = [np.zeros((batch_size,) + shape, dtype)for shape, dtype in self.internals_spec]actions = {name: np.zeros((batch_size,) + tuple(action['']), dtype=util.np_dtype(action[''])) for name, action in self.actions_spec.items()}terminal = np.zeros((batch_size,), dtype=util.np_dtype(''))reward = np.zeros((batch_size,), dtype=util.np_dtype(''))if next_states:next_states = {name: np.zeros((batch_size,) + tuple(state['']), dtype=util.np_dtype(state[''])) for name, state in self.states_spec.items()}next_internals = [np.zeros((batch_size,) + shape, dtype)for shape, dtype in self.internals_spec]unseen_indices = list(xrange(self.none_priority_index + self.observations._capacity - ,len(self.observations) + self.observations._capacity - ))self.batch_indices = unseen_indices[:batch_size]remaining = batch_size - len(self.batch_indices)if remaining:samples = self.observations.sample_minibatch(remaining)sample_indices = [i for i, o in samples]self.batch_indices += sample_indicesnp.random.shuffle(self.batch_indices)for n, index in enumerate(self.batch_indices):observation, _ = self.observations._memory[index]for name, state in states.items():state[n] = observation[][name]for k, internal in enumerate(internals):internal[n] = observation[][k]for name, action in actions.items():action[n] = observation[][name]terminal[n] = observation[]reward[n] = observation[]if next_states:for name, next_state in next_states.items():next_state[n] = observation[][name]for k, next_internal in enumerate(next_internals):next_internal[n] = observation[][k]if next_states:return dict(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward,next_states=next_states,next_internals=next_internals)else:return dict(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)", "docstring": "Samples a batch of the specified size according to priority.\n\nArgs:\n batch_size: The batch size\n next_states: A boolean flag indicating whether 'next_states' values should be included\n\nReturns: A dict containing states, actions, rewards, terminals, internal states (and next states)", "id": "f14354:c1:m2"} {"signature": "def update_batch(self, loss_per_instance):", "body": "if self.batch_indices is None:raise TensorForceError(\"\")for index, loss in zip(self.batch_indices, loss_per_instance):new_priority = (np.abs(loss) + self.prioritization_constant) ** self.prioritization_weightself.observations._move(index, new_priority)self.none_priority_index += ", "docstring": "Computes priorities according to loss.\n\nArgs:\n loss_per_instance:", "id": "f14354:c1:m3"} {"signature": "def __init__(self, states, internals, actions, include_next_states, capacity, scope='', summary_labels=None):", "body": "self.capacity = capacityself.scope = scopeself.states_memory = dict() self.internals_memory = dict() self.actions_memory = dict() self.terminal_memory = None self.reward_memory = None self.memory_index = None self.episode_indices = None self.episode_count = None self.retrieve_indices = Nonesuper(Queue, self).__init__(states=states,internals=internals,actions=actions,include_next_states=include_next_states,scope=scope,summary_labels=summary_labels)", "docstring": "Queue memory.\n\nArgs:\n capacity: Memory capacity.", "id": "f14355:c0:m0"} {"signature": "def tf_retrieve_indices(self, indices):", "body": "states = dict()for name in sorted(self.states_memory):states[name] = tf.gather(params=self.states_memory[name], indices=indices)internals = dict()for name in sorted(self.internals_memory):internals[name] = tf.gather(params=self.internals_memory[name], indices=indices)actions = dict()for name in sorted(self.actions_memory):actions[name] = tf.gather(params=self.actions_memory[name], indices=indices)terminal = tf.gather(params=self.terminal_memory, indices=indices)reward = tf.gather(params=self.reward_memory, indices=indices)if self.include_next_states:assert util.rank(indices) == next_indices = (indices + ) % self.capacitynext_states = dict()for name in sorted(self.states_memory):next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices)next_internals = dict()for name in sorted(self.internals_memory):next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices)return dict(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward,next_states=next_states,next_internals=next_internals)else:return dict(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)", "docstring": "Fetches experiences for given indices.\n\nArgs:\n indices: Index tensor\n\nReturns: Batch of experiences", "id": "f14355:c0:m4"} {"signature": "def __init__(self, states, internals, actions, include_next_states, capacity, scope='', summary_labels=None):", "body": "super(Replay, self).__init__(states=states,internals=internals,actions=actions,include_next_states=include_next_states,capacity=capacity,scope=scope,summary_labels=summary_labels)", "docstring": "Replay memory.\n\nArgs:\n states (dict): States specification.\n internals (dict): Internal states specification.\n actions (dict): Actions specification.\n include_next_states (bool): Include subsequent state if true.\n capacity (int): Memory capacity (number of state/internals/action/(next-state)? records).", "id": "f14356:c0:m0"} {"signature": "def __init__(self, states, internals, actions, include_next_states, scope='', summary_labels=None):", "body": "self.states_spec = statesself.internals_spec = internalsself.actions_spec = actionsself.include_next_states = include_next_statesself.scope = scopeself.summary_labels = set(summary_labels or ())self.variables = dict()self.initialize = None self.store = Noneself.retrieve_timesteps = Noneself.retrieve_episodes = Noneself.retrieve_sequences = Noneself.update_batch = Noneself.setup_template_funcs()", "docstring": "Args:\n states (dict): States specification.\n internals (dict): Internal states specification.\n actions (dict): Actions specification.\n include_next_states (bool): Include subsequent state if true.\n scope (str): The tf variable scope to use when creating variables for this memory.\n summary_labels (list): List of summary labels.", "id": "f14357:c0:m0"} {"signature": "def tf_initialize(self):", "body": "raise NotImplementedError", "docstring": "Initializes the memory. Called by a memory-model in its own tf_initialize method.", "id": "f14357:c0:m2"} {"signature": "def tf_store(self, states, internals, actions, terminal, reward):", "body": "raise NotImplementedError", "docstring": "Stores experiences, i.e. a batch of timesteps.\n\nArgs:\n states: Dict of state tensors.\n internals: List of prior internal state tensors.\n actions: Dict of action tensors.\n terminal: Terminal boolean tensor.\n reward: Reward tensor.", "id": "f14357:c0:m3"} {"signature": "def tf_retrieve_timesteps(self, n):", "body": "raise NotImplementedError", "docstring": "Retrieves a given number of timesteps from the stored experiences.\n\nArgs:\n n: Number of timesteps to retrieve.\n\nReturns:\n Dicts containing the retrieved experiences.", "id": "f14357:c0:m4"} {"signature": "def tf_retrieve_episodes(self, n):", "body": "raise NotImplementedError", "docstring": "Retrieves a given number of episodes from the stored experiences.\n\nArgs:\n n: Number of episodes to retrieve.\n\nReturns:\n Dicts containing the retrieved experiences.", "id": "f14357:c0:m5"} {"signature": "def tf_retrieve_sequences(self, n, sequence_length):", "body": "raise NotImplementedError", "docstring": "Retrieves a given number of temporally consistent timestep sequences from the stored\nexperiences.\n\nArgs:\n n: Number of sequences to retrieve.\n sequence_length: Length of timestep sequences.\n\nReturns:\n Dicts containing the retrieved experiences.", "id": "f14357:c0:m6"} {"signature": "def tf_update_batch(self, loss_per_instance):", "body": "return tf.no_op()", "docstring": "Updates the internal information of the latest batch instances based on their loss.\n\nArgs:\n loss_per_instance: Loss per instance tensor.", "id": "f14357:c0:m7"} {"signature": "def get_variables(self):", "body": "return [self.variables[key] for key in sorted(self.variables)]", "docstring": "Returns the TensorFlow variables used by the memory.\n\nReturns:\n List of variables.", "id": "f14357:c0:m8"} {"signature": "@staticmethoddef from_spec(spec, kwargs=None):", "body": "memory = util.get_object(obj=spec,predefined_objects=tensorforce.core.memories.memories,kwargs=kwargs)assert isinstance(memory, Memory)return memory", "docstring": "Creates a memory from a specification dict.", "id": "f14357:c0:m9"} {"signature": "def __init__(self,states,internals,actions,include_next_states,capacity,prioritization_weight=,buffer_size=,scope='',summary_labels=None):", "body": "self.capacity = capacityself.buffer_size = buffer_sizeself.prioritization_weight = prioritization_weightself.retrieve_indices = Noneself.states_memory = dict()self.internals_memory = dict()self.actions_memory = dict()self.terminal_memory = Noneself.reward_memory = Noneself.memory_index = Noneself.priorities = Noneself.buffer_index = Noneself.states_buffer = dict()self.internals_buffer = dict()self.actions_buffer = dict()self.terminal_buffer = Noneself.reward_buffer = Noneself.batch_indices = Noneself.last_batch_buffer_elems = Noneself.memory_size = Nonesuper(PrioritizedReplay, self).__init__(states=states,internals=internals,actions=actions,include_next_states=include_next_states,scope=scope,summary_labels=summary_labels)", "docstring": "Prioritized experience replay.\n\nArgs:\n states: States specification.\n internals: Internal states specification.\n actions: Actions specification.\n include_next_states: Include subsequent state if true.\n capacity: Memory capacity.\n prioritization_weight: Prioritization weight.\n buffer_size: Buffer size. The buffer is used to insert experiences before experiences\n have been computed via updates. Note that if the buffer is to small in comparison\n to the frequency with which updates are performed, old experiences from the buffer\n will be overwritten before they are moved to the main memory.", "id": "f14360:c0:m0"} {"signature": "def tf_retrieve_indices(self, buffer_elements, priority_indices):", "body": "states = dict()buffer_start = self.buffer_index - buffer_elementsbuffer_end = self.buffer_indexfor name in sorted(self.states_memory):buffer_state_memory = self.states_buffer[name]buffer_states = buffer_state_memory[buffer_start:buffer_end]memory_states = tf.gather(params=self.states_memory[name], indices=priority_indices)states[name] = tf.concat(values=(buffer_states, memory_states), axis=)internals = dict()for name in sorted(self.internals_memory):internal_buffer_memory = self.internals_buffer[name]buffer_internals = internal_buffer_memory[buffer_start:buffer_end]memory_internals = tf.gather(params=self.internals_memory[name], indices=priority_indices)internals[name] = tf.concat(values=(buffer_internals, memory_internals), axis=)actions = dict()for name in sorted(self.actions_memory):action_buffer_memory = self.actions_buffer[name]buffer_action = action_buffer_memory[buffer_start:buffer_end]memory_action = tf.gather(params=self.actions_memory[name], indices=priority_indices)actions[name] = tf.concat(values=(buffer_action, memory_action), axis=)buffer_terminal = self.terminal_buffer[buffer_start:buffer_end]priority_terminal = tf.gather(params=self.terminal_memory, indices=priority_indices)terminal = tf.concat(values=(buffer_terminal, priority_terminal), axis=)buffer_reward = self.reward_buffer[buffer_start:buffer_end]priority_reward = tf.gather(params=self.reward_memory, indices=priority_indices)reward = tf.concat(values=(buffer_reward, priority_reward), axis=)if self.include_next_states:assert util.rank(priority_indices) == next_priority_indices = (priority_indices + ) % self.capacitynext_buffer_start = (buffer_start + ) % self.buffer_sizenext_buffer_end = (buffer_end + ) % self.buffer_sizenext_states = dict()for name in sorted(self.states_memory):buffer_state_memory = self.states_buffer[name]buffer_next_states = buffer_state_memory[next_buffer_start:next_buffer_end]memory_next_states = tf.gather(params=self.states_memory[name], indices=next_priority_indices)next_states[name] = tf.concat(values=(buffer_next_states, memory_next_states), axis=)next_internals = dict()for name in sorted(self.internals_memory):buffer_internal_memory = self.internals_buffer[name]buffer_next_internals = buffer_internal_memory[next_buffer_start:next_buffer_end]memory_next_internals = tf.gather(params=self.internals_memory[name], indices=next_priority_indices)next_internals[name] = tf.concat(values=(buffer_next_internals, memory_next_internals), axis=)return dict(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward,next_states=next_states,next_internals=next_internals)else:return dict(states=states,internals=internals,actions=actions,terminal=terminal,reward=reward)", "docstring": "Fetches experiences for given indices by combining entries from buffer\nwhich have no priorities, and entries from priority memory.\n\nArgs:\n buffer_elements: Number of buffer elements to retrieve\n priority_indices: Index tensor for priority memory\n\nReturns: Batch of experiences", "id": "f14360:c0:m5"} {"signature": "def tf_update_batch(self, loss_per_instance):", "body": "mask = tf.not_equal(x=self.batch_indices,y=tf.zeros(shape=tf.shape(input=self.batch_indices), dtype=tf.int32))priority_indices = tf.reshape(tensor=tf.where(condition=mask), shape=[-])sampled_buffer_batch = self.tf_retrieve_indices(buffer_elements=self.last_batch_buffer_elems,priority_indices=priority_indices)states = sampled_buffer_batch['']internals = sampled_buffer_batch['']actions = sampled_buffer_batch['']terminal = sampled_buffer_batch['']reward = sampled_buffer_batch['']priorities = loss_per_instance ** self.prioritization_weightassignments = list()memory_end_index = self.memory_index + self.last_batch_buffer_elemsmemory_insert_indices = tf.range(start=self.memory_index,limit=memory_end_index) % self.capacityfor name in sorted(states):assignments.append(tf.scatter_update(ref=self.states_memory[name],indices=memory_insert_indices,updates=states[name][:self.last_batch_buffer_elems]))for name in sorted(internals):assignments.append(tf.scatter_update(ref=self.internals_buffer[name],indices=memory_insert_indices,updates=internals[name][:self.last_batch_buffer_elems]))assignments.append(tf.scatter_update(ref=self.priorities,indices=memory_insert_indices,updates=priorities[:self.last_batch_buffer_elems]))assignments.append(tf.scatter_update(ref=self.terminal_memory,indices=memory_insert_indices,updates=terminal[:self.last_batch_buffer_elems]))assignments.append(tf.scatter_update(ref=self.reward_memory,indices=memory_insert_indices,updates=reward[:self.last_batch_buffer_elems]))for name in sorted(actions):assignments.append(tf.scatter_update(ref=self.actions_memory[name],indices=memory_insert_indices,updates=actions[name][:self.last_batch_buffer_elems]))main_memory_priorities = priorities[self.last_batch_buffer_elems:]main_memory_priorities = main_memory_priorities[:tf.shape(priority_indices)[]]assignments.append(tf.scatter_update(ref=self.priorities,indices=priority_indices,updates=main_memory_priorities))with tf.control_dependencies(control_inputs=assignments):assignments = list()sorted_priorities, sorted_indices = tf.nn.top_k(input=self.priorities,k=self.capacity,sorted=True)assignments.append(tf.assign(ref=self.priorities, value=sorted_priorities))assignments.append(tf.scatter_update(ref=self.terminal_memory,indices=sorted_indices,updates=self.terminal_memory))for name in sorted(self.states_memory):assignments.append(tf.scatter_update(ref=self.states_memory[name],indices=sorted_indices,updates=self.states_memory[name]))for name in sorted(self.actions_memory):assignments.append(tf.scatter_update(ref=self.actions_memory[name],indices=sorted_indices,updates=self.actions_memory[name]))for name in sorted(self.internals_memory):assignments.append(tf.scatter_update(ref=self.internals_memory[name],indices=sorted_indices,updates=self.internals_memory[name]))assignments.append(tf.scatter_update(ref=self.reward_memory,indices=sorted_indices,updates=self.reward_memory))with tf.control_dependencies(control_inputs=assignments):assignments = list()assignments.append(tf.assign_sub(ref=self.buffer_index, value=self.last_batch_buffer_elems))total_inserted_elements = self.memory_size + self.last_batch_buffer_elemsassignments.append(tf.assign(ref=self.memory_size,value=tf.minimum(x=total_inserted_elements, y=self.capacity)))assignments.append(tf.assign(ref=self.memory_index, value=memory_end_index))assignments.append(tf.assign(ref=self.batch_indices,value=tf.zeros(shape=tf.shape(self.batch_indices), dtype=tf.int32)))with tf.control_dependencies(control_inputs=assignments):return tf.no_op()", "docstring": "Updates priority memory by performing the following steps:\n\n1. Use saved indices from prior retrieval to reconstruct the batch\nelements which will have their priorities updated.\n2. Compute priorities for these elements.\n3. Insert buffer elements to memory, potentially overwriting existing elements.\n4. Update priorities of existing memory elements\n5. Resort memory.\n6. Update buffer insertion index.\n\nNote that this implementation could be made more efficient by maintaining\na sorted version via sum trees.\n\n:param loss_per_instance: Losses from recent batch to perform priority update", "id": "f14360:c0:m6"} {"signature": "def __init__(self, current_frame):", "body": "self.ignore_unknown_dtypes = Falseself.meta_params = dict()self.method_calling = inspect.getframeinfo(current_frame)[]_, _, __, self.vals_current = inspect.getargvalues(current_frame)if '' in self.vals_current:self.recorded_class_type = self.vals_current['']self.meta_params[''] = str(self.vals_current[''])frame_list = inspect.getouterframes(current_frame)for frame in frame_list:args, varargs, keywords, vals = inspect.getargvalues(frame[])if '' in vals:if self.recorded_class_type == vals['']:for i in args:self.meta_params[i] = vals[i]del self.meta_params['']", "docstring": "Init the MetaPrameterRecord with \"Agent\" parameters by passing inspect.currentframe() from Agent Class.\n\nThe Init will search back to find the parent class to capture all passed parameters and store\nthem in \"self.meta_params\".\n\nNOTE: Currently only optimized for TensorBoard output.\n\nTODO: Add JSON Export, TEXT EXPORT\n\nArgs:\n current_frame: Frame value from class to obtain metaparameters[= inspect.currentframe()]", "id": "f14363:c0:m0"} {"signature": "def build_metagraph_list(self):", "body": "ops = []self.ignore_unknown_dtypes = Truefor key in sorted(self.meta_params):value = self.convert_data_to_string(self.meta_params[key])if len(value) == :continueif isinstance(value, str):ops.append(tf.contrib.summary.generic(name=key, tensor=tf.convert_to_tensor(str(value))))else:ops.append(tf.contrib.summary.generic(name=key, tensor=tf.as_string(tf.convert_to_tensor(value))))return ops", "docstring": "Convert MetaParams into TF Summary Format and create summary_op.\n\nReturns:\n Merged TF Op for TEXT summary elements, should only be executed once to reduce data duplication.", "id": "f14363:c0:m7"} {"signature": "def make_game():", "body": "return ascii_art.ascii_art_to_game(GAME_ART, what_lies_beneath='',sprites=dict([('', PlayerSprite)] +[(c, UpwardLaserBoltSprite) for c in UPWARD_BOLT_CHARS] +[(c, DownwardLaserBoltSprite) for c in DOWNWARD_BOLT_CHARS]),drapes=dict(X=MarauderDrape,B=BunkerDrape),update_schedule=['', '', ''] + list(_ALL_BOLT_CHARS))", "docstring": "Builds and returns an Extraterrestrial Marauders game.", "id": "f14375:m0"} {"signature": "def __init__(self, corner, position, character):", "body": "super(PlayerSprite, self).__init__(corner, position, character, impassable='', confined_to_board=True)", "docstring": "Simply indicates to the superclass that we can't walk off the board.", "id": "f14375:c2:m0"} {"signature": "def __init__(self, corner, position, character):", "body": "super(UpwardLaserBoltSprite, self).__init__(corner, position, character, impassable='')self._teleport((-, -))", "docstring": "Starts the Sprite in a hidden position off of the board.", "id": "f14375:c3:m0"} {"signature": "def _fly(self, board, layers, things, the_plot):", "body": "if (self.character in the_plot[''] orself.character in the_plot['']):return self._teleport((-, -))self._north(board, the_plot)", "docstring": "Handles the behaviour of visible bolts flying toward Marauders.", "id": "f14375:c3:m2"} {"signature": "def _fire(self, layers, things, the_plot):", "body": "if the_plot.get('') == the_plot.frame: returnthe_plot[''] = the_plot.framerow, col = things[''].positionself._teleport((row-, col))", "docstring": "Launches a new bolt from the player.", "id": "f14375:c3:m3"} {"signature": "def __init__(self, corner, position, character):", "body": "super(DownwardLaserBoltSprite, self).__init__(corner, position, character, impassable='')self._teleport((-, -))", "docstring": "Starts the Sprite in a hidden position off of the board.", "id": "f14375:c4:m0"} {"signature": "def _fly(self, board, layers, things, the_plot):", "body": "if self.character in the_plot['']:return self._teleport((-, -))if self.position == things[''].position: the_plot.terminate_episode()self._south(board, the_plot)", "docstring": "Handles the behaviour of visible bolts flying toward the player.", "id": "f14375:c4:m2"} {"signature": "def _fire(self, layers, the_plot):", "body": "if the_plot.get('') == the_plot.frame: returnthe_plot[''] = the_plot.framecol = np.random.choice(np.nonzero(layers[''].sum(axis=))[])row = np.nonzero(layers[''][:, col])[][-] + self._teleport((row, col))", "docstring": "Launches a new bolt from a random Marauder.", "id": "f14375:c4:m3"} {"signature": "def escape(text, quote=False, smart_amp=True):", "body": "if smart_amp:text = _escape_pattern.sub('', text)else:text = text.replace('', '')text = text.replace('', '')text = text.replace('>', '')if quote:text = text.replace('', '')text = text.replace(\"\", '')return text", "docstring": "Replace special characters \"&\", \"<\" and \">\" to HTML-safe sequences.\n\n The original cgi.escape will always escape \"&\", but you can control\n this one for a smart escape amp.\n\n :param quote: if set to True, \" and ' will be escaped.\n :param smart_amp: if set to False, & will always be escaped.", "id": "f14379:m2"} {"signature": "def escape_link(url):", "body": "lower_url = url.lower().strip('')for scheme in _scheme_blacklist:if lower_url.startswith(scheme):return ''return escape(url, quote=True, smart_amp=False)", "docstring": "Remove dangerous URL schemes like javascript: and escape afterwards.", "id": "f14379:m3"} {"signature": "def markdown(text, escape=True, **kwargs):", "body": "return Markdown(escape=escape, **kwargs)(text)", "docstring": "Render markdown formatted text to html.\n\n :param text: markdown formatted text content.\n :param escape: if set to False, all html tags will not be escaped.\n :param use_xhtml: output with xhtml tags.\n :param hard_wrap: if set to True, it will use the GFM line breaks feature.\n :param parse_block_html: parse text only in block level html.\n :param parse_inline_html: parse text only in inline level html.", "id": "f14379:m5"} {"signature": "def parse_lheading(self, m):", "body": "self.tokens.append({'': '','': if m.group() == '' else ,'': m.group(),})", "docstring": "Parse setext heading.", "id": "f14379:c1:m7"} {"signature": "def hard_wrap(self):", "body": "self.linebreak = re.compile(r'')self.text = re.compile(r'')", "docstring": "Grammar for hard wrap linebreak. You don't need to add two\n spaces at the end of a line.", "id": "f14379:c2:m0"} {"signature": "def placeholder(self):", "body": "return ''", "docstring": "Returns the default, empty output value for the renderer.\n\n All renderer methods use the '+=' operator to append to this value.\n Default is a string so rendering HTML can build up a result string with\n the rendered Markdown.\n\n Can be overridden by Renderer subclasses to be types like an empty\n list, allowing the renderer to create a tree-like structure to\n represent the document (which can then be reprocessed later into a\n separate format like docx or pdf).", "id": "f14379:c4:m1"} {"signature": "def block_code(self, code, lang=None):", "body": "code = code.rstrip('')if not lang:code = escape(code, smart_amp=False)return '' % codecode = escape(code, quote=True, smart_amp=False)return '' % (lang, code)", "docstring": "Rendering block level code. ``pre > code``.\n\n :param code: text content of the code block.\n :param lang: language of the given code.", "id": "f14379:c4:m2"} {"signature": "def block_quote(self, text):", "body": "return '' % text.rstrip('')", "docstring": "Rendering
        with the given text.\n\n :param text: text content of the blockquote.", "id": "f14379:c4:m3"} {"signature": "def block_html(self, html):", "body": "if self.options.get('') andhtml.lower().startswith(''):return ''if self.options.get(''):return escape(html)return html", "docstring": "Rendering block level pure html content.\n\n :param html: text content of the html snippet.", "id": "f14379:c4:m4"} {"signature": "def header(self, text, level, raw=None):", "body": "return '' % (level, text, level)", "docstring": "Rendering header/heading tags like ``

        `` ``

        ``.\n\n :param text: rendered text content for the header.\n :param level: a number for the header level, for example: 1.\n :param raw: raw text content of the header.", "id": "f14379:c4:m5"} {"signature": "def hrule(self):", "body": "if self.options.get(''):return ''return ''", "docstring": "Rendering method for ``
        `` tag.", "id": "f14379:c4:m6"} {"signature": "def list(self, body, ordered=True):", "body": "tag = ''if ordered:tag = ''return '' % (tag, body, tag)", "docstring": "Rendering list tags like ``
          `` and ``
            ``.\n\n :param body: body contents of the list.\n :param ordered: whether this list is ordered or not.", "id": "f14379:c4:m7"} {"signature": "def list_item(self, text):", "body": "return '' % text", "docstring": "Rendering list item snippet. Like ``
          1. ``.", "id": "f14379:c4:m8"} {"signature": "def paragraph(self, text):", "body": "return '' % text.strip('')", "docstring": "Rendering paragraph tags. Like ``

            ``.", "id": "f14379:c4:m9"} {"signature": "def table(self, header, body):", "body": "return ('''') % (header, body)", "docstring": "Rendering table element. Wrap header and body in it.\n\n :param header: header part of the table.\n :param body: body part of the table.", "id": "f14379:c4:m10"} {"signature": "def table_row(self, content):", "body": "return '' % content", "docstring": "Rendering a table row. Like ``

    `` ````.\n\n :param content: content of current table cell.\n :param header: whether this is header or not.\n :param align: align of current table cell.", "id": "f14379:c4:m12"} {"signature": "def double_emphasis(self, text):", "body": "return '' % text", "docstring": "Rendering **strong** text.\n\n :param text: text content for emphasis.", "id": "f14379:c4:m13"} {"signature": "def emphasis(self, text):", "body": "return '' % text", "docstring": "Rendering *emphasis* text.\n\n :param text: text content for emphasis.", "id": "f14379:c4:m14"} {"signature": "def codespan(self, text):", "body": "text = escape(text.rstrip(), smart_amp=False)return '' % text", "docstring": "Rendering inline `code` text.\n\n :param text: text content for inline code.", "id": "f14379:c4:m15"} {"signature": "def linebreak(self):", "body": "if self.options.get(''):return ''return ''", "docstring": "Rendering line break like ``
    ``.", "id": "f14379:c4:m16"} {"signature": "def strikethrough(self, text):", "body": "return '' % text", "docstring": "Rendering ~~strikethrough~~ text.\n\n :param text: text content for strikethrough.", "id": "f14379:c4:m17"} {"signature": "def text(self, text):", "body": "if self.options.get(''):return textreturn escape(text)", "docstring": "Rendering unformatted text.\n\n :param text: text content.", "id": "f14379:c4:m18"} {"signature": "def escape(self, text):", "body": "return escape(text)", "docstring": "Rendering escape sequence.\n\n :param text: text content.", "id": "f14379:c4:m19"} {"signature": "def autolink(self, link, is_email=False):", "body": "text = link = escape(link)if is_email:link = '' % linkreturn '' % (link, text)", "docstring": "Rendering a given link or email address.\n\n :param link: link content or email address.\n :param is_email: whether this is an email or not.", "id": "f14379:c4:m20"} {"signature": "def link(self, link, title, text):", "body": "link = escape_link(link)if not title:return '' % (link, text)title = escape(title, quote=True)return '' % (link, title, text)", "docstring": "Rendering a given link with content and title.\n\n :param link: href link for ```` tag.\n :param title: title content for `title` attribute.\n :param text: text content for description.", "id": "f14379:c4:m21"} {"signature": "def image(self, src, title, text):", "body": "src = escape_link(src)text = escape(text, quote=True)if title:title = escape(title, quote=True)html = '' % (src, text, title)else:html = '' % (src, text)if self.options.get(''):return '' % htmlreturn '' % html", "docstring": "Rendering a image with title and text.\n\n :param src: source link of the image.\n :param title: title text of the image.\n :param text: alt text of the image.", "id": "f14379:c4:m22"} {"signature": "def inline_html(self, html):", "body": "if self.options.get(''):return escape(html)return html", "docstring": "Rendering span level pure html content.\n\n :param html: text content of the html snippet.", "id": "f14379:c4:m23"} {"signature": "def newline(self):", "body": "return ''", "docstring": "Rendering newline element.", "id": "f14379:c4:m24"} {"signature": "def footnote_ref(self, key, index):", "body": "html = ('''') % (escape(key), escape(key), index)return html", "docstring": "Rendering the ref anchor of a footnote.\n\n :param key: identity key for the footnote.\n :param index: the index count of current footnote.", "id": "f14379:c4:m25"} {"signature": "def footnote_item(self, key, text):", "body": "back = ('') % escape(key)text = text.rstrip()if text.endswith(''):text = re.sub(r'', r'' % back, text)else:text = '' % (text, back)html = '' % (escape(key), text)return html", "docstring": "Rendering a footnote item.\n\n :param key: identity key for the footnote.\n :param text: text content of the footnote.", "id": "f14379:c4:m26"} {"signature": "def footnotes(self, text):", "body": "html = ''return html % (self.hrule(), text)", "docstring": "Wrapper for all footnotes.\n\n :param text: contents of all footnotes.", "id": "f14379:c4:m27"} {"signature": "def render(self, text):", "body": "return self.parse(text)", "docstring": "Render the Markdown text.\n\n :param text: markdown formatted text content.", "id": "f14379:c5:m2"} {"signature": "def process_docstring(app, what, name, obj, options, lines):", "body": "markdown = \"\".join(lines)rest = m2r(markdown)rest.replace(\"\", \"\")del lines[:]lines.extend(rest.split(\"\"))", "docstring": "Enable markdown syntax in docstrings", "id": "f14380:m0"} {"signature": "def setup(app):", "body": "global _is_sphinx_is_sphinx = Trueapp.add_config_value('', False, '')app.add_source_parser('', M2RParser)app.add_directive('', MdInclude)", "docstring": "When used for spinx extension.", "id": "f14381:m1"} {"signature": "def output_image_link(self, m):", "body": "return self.renderer.image_link(m.group(''), m.group(''), m.group(''))", "docstring": "Pass through rest role.", "id": "f14381:c3:m3"} {"signature": "def output_rest_role(self, m):", "body": "return self.renderer.rest_role(m.group())", "docstring": "Pass through rest role.", "id": "f14381:c3:m4"} {"signature": "def output_rest_link(self, m):", "body": "return self.renderer.rest_link(m.group())", "docstring": "Pass through rest link.", "id": "f14381:c3:m5"} {"signature": "def output_inline_math(self, m):", "body": "return self.renderer.inline_math(m.group())", "docstring": "Pass through rest link.", "id": "f14381:c3:m6"} {"signature": "def output_eol_literal_marker(self, m):", "body": "marker = '' if m.group() is None else ''return self.renderer.eol_literal_marker(marker)", "docstring": "Pass through rest link.", "id": "f14381:c3:m7"} {"signature": "def block_html(self, html):", "body": "return '' + self._indent_block(html) + ''", "docstring": "Rendering block level pure html content.\n\n :param html: text content of the html snippet.", "id": "f14381:c4:m4"} {"signature": "def header(self, text, level, raw=None):", "body": "return ''.format(text, self.hmarks[level] * len(text))", "docstring": "Rendering header/heading tags like ``

    `` ``

    ``.\n\n :param text: rendered text content for the header.\n :param level: a number for the header level, for example: 1.\n :param raw: raw text content of the header.", "id": "f14381:c4:m5"} {"signature": "def hrule(self):", "body": "return ''", "docstring": "Rendering method for ``
    `` tag.", "id": "f14381:c4:m6"} {"signature": "def list(self, body, ordered=True):", "body": "mark = '' if ordered else ''lines = body.splitlines()for i, line in enumerate(lines):if line and not line.startswith(self.list_marker):lines[i] = '' * len(mark) + linereturn ''.format(''.join(lines)).replace(self.list_marker, mark)", "docstring": "Rendering list tags like ``
      `` and ``
        ``.\n\n :param body: body contents of the list.\n :param ordered: whether this list is ordered or not.", "id": "f14381:c4:m7"} {"signature": "def list_item(self, text):", "body": "return '' + self.list_marker + text", "docstring": "Rendering list item snippet. Like ``
      1. ``.", "id": "f14381:c4:m8"} {"signature": "def paragraph(self, text):", "body": "return '' + text + ''", "docstring": "Rendering paragraph tags. Like ``

        ``.", "id": "f14381:c4:m9"} {"signature": "def table(self, header, body):", "body": "table = ''if header and not header.isspace():table = (table + self.indent + '' +self._indent_block(header) + '')else:table = table + ''table = table + self._indent_block(body) + ''return table", "docstring": "Rendering table element. Wrap header and body in it.\n\n :param header: header part of the table.\n :param body: body part of the table.", "id": "f14381:c4:m10"} {"signature": "def table_row(self, content):", "body": "contents = content.splitlines()if not contents:return ''clist = ['' + contents[]]if len(contents) > :for c in contents[:]:clist.append('' + c)return ''.join(clist) + ''", "docstring": "Rendering a table row. Like ``

    `` ````.\n\n :param content: content of current table cell.\n :param header: whether this is header or not.\n :param align: align of current table cell.", "id": "f14381:c4:m12"} {"signature": "def double_emphasis(self, text):", "body": "return ''.format(text)", "docstring": "Rendering **strong** text.\n\n :param text: text content for emphasis.", "id": "f14381:c4:m13"} {"signature": "def emphasis(self, text):", "body": "return ''.format(text)", "docstring": "Rendering *emphasis* text.\n\n :param text: text content for emphasis.", "id": "f14381:c4:m14"} {"signature": "def codespan(self, text):", "body": "if '' not in text:return ''.format(text)else:return self._raw_html(''''''.format(text.replace('', '')))", "docstring": "Rendering inline `code` text.\n\n :param text: text content for inline code.", "id": "f14381:c4:m15"} {"signature": "def linebreak(self):", "body": "if self.options.get(''):return self._raw_html('') + ''return self._raw_html('') + ''", "docstring": "Rendering line break like ``
    ``.", "id": "f14381:c4:m16"} {"signature": "def strikethrough(self, text):", "body": "return self._raw_html(''.format(text))", "docstring": "Rendering ~~strikethrough~~ text.\n\n :param text: text content for strikethrough.", "id": "f14381:c4:m17"} {"signature": "def text(self, text):", "body": "return text", "docstring": "Rendering unformatted text.\n\n :param text: text content.", "id": "f14381:c4:m18"} {"signature": "def autolink(self, link, is_email=False):", "body": "return link", "docstring": "Rendering a given link or email address.\n\n :param link: link content or email address.\n :param is_email: whether this is an email or not.", "id": "f14381:c4:m19"} {"signature": "def link(self, link, title, text):", "body": "if title:raise NotImplementedError('')return ''.format(target=link, text=text)", "docstring": "Rendering a given link with content and title.\n\n :param link: href link for ```` tag.\n :param title: title content for `title` attribute.\n :param text: text content for description.", "id": "f14381:c4:m20"} {"signature": "def image(self, src, title, text):", "body": "return ''.join(['',''.format(src),''.format(src),''.format(text),'',])", "docstring": "Rendering a image with title and text.\n\n :param src: source link of the image.\n :param title: title text of the image.\n :param text: alt text of the image.", "id": "f14381:c4:m21"} {"signature": "def inline_html(self, html):", "body": "return self._raw_html(html)", "docstring": "Rendering span level pure html content.\n\n :param html: text content of the html snippet.", "id": "f14381:c4:m22"} {"signature": "def newline(self):", "body": "return ''", "docstring": "Rendering newline element.", "id": "f14381:c4:m23"} {"signature": "def footnote_ref(self, key, index):", "body": "return ''.format(key)", "docstring": "Rendering the ref anchor of a footnote.\n\n :param key: identity key for the footnote.\n :param index: the index count of current footnote.", "id": "f14381:c4:m24"} {"signature": "def footnote_item(self, key, text):", "body": "return ''.format(key, text.strip())", "docstring": "Rendering a footnote item.\n\n :param key: identity key for the footnote.\n :param text: text content of the footnote.", "id": "f14381:c4:m25"} {"signature": "def footnotes(self, text):", "body": "if text:return '' + textelse:return ''", "docstring": "Wrapper for all footnotes.\n\n :param text: contents of all footnotes.", "id": "f14381:c4:m26"} {"signature": "def inline_math(self, math):", "body": "return ''.format(math)", "docstring": "Extension of recommonmark", "id": "f14381:c4:m30"} {"signature": "def eol_literal_marker(self, marker):", "body": "return marker", "docstring": "Extension of recommonmark", "id": "f14381:c4:m31"} {"signature": "def run(self):", "body": "if not self.state.document.settings.file_insertion_enabled:raise self.warning('' % self.name)source = self.state_machine.input_lines.source(self.lineno - self.state_machine.input_offset - )source_dir = os.path.dirname(os.path.abspath(source))path = rst.directives.path(self.arguments[])path = os.path.normpath(os.path.join(source_dir, path))path = utils.relative_path(None, path)path = nodes.reprunicode(path)encoding = self.options.get('', self.state.document.settings.input_encoding)e_handler = self.state.document.settings.input_encoding_error_handlertab_width = self.options.get('', self.state.document.settings.tab_width)try:self.state.document.settings.record_dependencies.add(path)include_file = io.FileInput(source_path=path,encoding=encoding,error_handler=e_handler)except UnicodeEncodeError as error:raise self.severe('''''' %(self.name, SafeString(path)))except IOError as error:raise self.severe('' %(self.name, ErrorString(error)))try:rawtext = include_file.read()except UnicodeError as error:raise self.severe('' %(self.name, ErrorString(error)))config = self.state.document.settings.env.configconverter = M2R(no_underscore_emphasis=config.no_underscore_emphasis)include_lines = statemachine.string2lines(converter(rawtext),tab_width,convert_whitespace=True)self.state_machine.insert_input(include_lines, path)return []", "docstring": "Most of this method is from ``docutils.parser.rst.Directive``.\n\n docutils version: 0.12", "id": "f14381:c7:m0"} {"signature": "def validate(self, filename, module, classname, ignore):", "body": "with open(os.path.normpath(os.path.join('', '', filename))) as f:docs = f.read()module = module_loading.import_module(module)methods = re.findall(r'', docs, flags=re.M)attributes = re.findall(r'', docs, flags=re.M)documented = set(filter(lambda x: x.startswith(classname), [a for a in methods] + attributes))implemented = set(classname + '' + x for x in dir(getattr(module, classname))if not x.startswith('') or x == '')print(implemented)ignored = set(classname + '' + x for x in ignore)self.assertSetEqual(implemented - documented - ignored, set(), msg='')self.assertSetEqual(documented - implemented - ignored, set(), msg='')", "docstring": "Finds all automethod and autoattribute statements in an rst file\ncomparing them to the attributes found in the actual class", "id": "f14387:c0:m0"} {"signature": "def local(path):", "body": "return os.path.join(__name__.split('')[-], path)", "docstring": "Prepend the effect package name to a path so resources\ncan still be loaded when copied into a new effect package.", "id": "f14397:m0"} {"signature": "def local(path):", "body": "return os.path.join(__name__.split('')[-], path)", "docstring": "Prepend the effect package name to a path so resources\ncan still be loaded when copied into a new effect package.", "id": "f14399:m0"} {"signature": "@propertydef ctx(self) -> moderngl.Context:", "body": "return context.ctx()", "docstring": "The moderngl context", "id": "f14402:c0:m1"} {"signature": "@classmethoddef from_single(cls, meta: ProgramDescription, source: str):", "body": "instance = cls(meta)instance.vertex_source = ShaderSource(VERTEX_SHADER,meta.path or meta.vertex_shader,source)if GEOMETRY_SHADER in source:instance.geometry_source = ShaderSource(GEOMETRY_SHADER,meta.path or meta.geometry_shader,source,)if FRAGMENT_SHADER in source:instance.fragment_source = ShaderSource(FRAGMENT_SHADER,meta.path or meta.fragment_shader,source,)if TESS_CONTROL_SHADER in source:instance.tess_control_source = ShaderSource(TESS_CONTROL_SHADER,meta.path or meta.tess_control_shader,source,)if TESS_EVALUATION_SHADER in source:instance.tess_evaluation_source = ShaderSource(TESS_EVALUATION_SHADER,meta.path or meta.tess_evaluation_shader,source,)return instance", "docstring": "Initialize a single glsl string containing all shaders", "id": "f14402:c0:m2"} {"signature": "@classmethoddef from_separate(cls, meta: ProgramDescription, vertex_source, geometry_source=None, fragment_source=None,tess_control_source=None, tess_evaluation_source=None):", "body": "instance = cls(meta)instance.vertex_source = ShaderSource(VERTEX_SHADER,meta.path or meta.vertex_shader,vertex_source,)if geometry_source:instance.geometry_source = ShaderSource(GEOMETRY_SHADER,meta.path or meta.geometry_shader,geometry_source,)if fragment_source:instance.fragment_source = ShaderSource(FRAGMENT_SHADER,meta.path or meta.fragment_shader,fragment_source,)if tess_control_source:instance.tess_control_source = ShaderSource(TESS_CONTROL_SHADER,meta.path or meta.tess_control_shader,tess_control_source,)if tess_evaluation_source:instance.tess_evaluation_source = ShaderSource(TESS_EVALUATION_SHADER,meta.path or meta.tess_control_shader,tess_evaluation_source,)return instance", "docstring": "Initialize multiple shader strings", "id": "f14402:c0:m3"} {"signature": "def create(self):", "body": "out_attribs = []if not self.fragment_source:if self.geometry_source:out_attribs = self.geometry_source.find_out_attribs()else:out_attribs = self.vertex_source.find_out_attribs()program = self.ctx.program(vertex_shader=self.vertex_source.source,geometry_shader=self.geometry_source.source if self.geometry_source else None,fragment_shader=self.fragment_source.source if self.fragment_source else None,tess_control_shader=self.tess_control_source.source if self.tess_control_source else None,tess_evaluation_shader=self.tess_evaluation_source.source if self.tess_evaluation_source else None,varyings=out_attribs,)program.extra = {'': self.meta}return program", "docstring": "Creates a shader program.\n\nReturns:\n ModernGL Program instance", "id": "f14402:c0:m4"} {"signature": "def find_out_attribs(self):", "body": "names = []for line in self.lines:if line.strip().startswith(\"\"):names.append(line.split()[].replace('', ''))return names", "docstring": "Get all out attributes in the shader source.\n\n:return: List of attribute names", "id": "f14402:c1:m1"} {"signature": "def print(self):", "body": "print(\"\".format(self.name))for i, line in enumerate(self.lines):print(\"\".format(str(i).zfill(), line))print(\"\".format(self.name))", "docstring": "Print the shader lines", "id": "f14402:c1:m2"} {"signature": "def __init__(self, meta: ProgramDescription, program: moderngl.Program):", "body": "self.program = programself.meta = meta", "docstring": "Create a shader using either a file path or a name\n:param meta: The ProgramMeta\n:param program: The program instance", "id": "f14402:c3:m0"} {"signature": "@propertydef mglo(self):", "body": "return self.program.mglo", "docstring": "The ModernGL Program object", "id": "f14402:c3:m6"} {"signature": "@propertydef glo(self) -> int:", "body": "return self.program.glo", "docstring": "int: The internal OpenGL object.\nThis values is provided for debug purposes only.", "id": "f14402:c3:m7"} {"signature": "@propertydef subroutines(self) -> Tuple[str, ...]:", "body": "return self.program.subroutines", "docstring": "tuple: The subroutine uniforms.", "id": "f14402:c3:m8"} {"signature": "@propertydef geometry_input(self) -> int:", "body": "return self.program.geometry_input", "docstring": "int: The geometry input primitive.\nThe GeometryShader's input primitive if the GeometryShader exists.\nThe geometry input primitive will be used for validation.", "id": "f14402:c3:m9"} {"signature": "@propertydef geometry_output(self) -> int:", "body": "return self.program.geometry_output", "docstring": "int: The geometry output primitive.\nThe GeometryShader's output primitive if the GeometryShader exists.", "id": "f14402:c3:m10"} {"signature": "@propertydef geometry_vertices(self) -> int:", "body": "return self.program.geometry_vertices", "docstring": "int: The maximum number of vertices that\nthe geometry shader will output.", "id": "f14402:c3:m11"} {"signature": "def __init__(self, buffer: moderngl.Buffer, buffer_format: str, attributes=None, per_instance=False):", "body": "self.buffer = bufferself.attrib_formats = types.parse_attribute_formats(buffer_format)self.attributes = attributesself.per_instance = per_instanceif self.buffer.size % self.vertex_size != :raise VAOError(\"\".format(buffer_format, self.vertex_size, self.buffer.size % self.vertex_size))self.vertices = self.buffer.size // self.vertex_size", "docstring": ":param buffer: The vbo object\n:param format: The format of the buffer", "id": "f14403:c0:m0"} {"signature": "def content(self, attributes: List[str]):", "body": "formats = []attrs = []for attrib_format, attrib in zip(self.attrib_formats, self.attributes):if attrib not in attributes:formats.append(attrib_format.pad_str())continueformats.append(attrib_format.format)attrs.append(attrib)attributes.remove(attrib)if not attrs:return Nonereturn (self.buffer,\"\".format(\"\".join(formats), '' if self.per_instance else ''),*attrs)", "docstring": "Build content tuple for the buffer", "id": "f14403:c0:m2"} {"signature": "def __init__(self, name=\"\", mode=moderngl.TRIANGLES):", "body": "self.ctx = context.ctx()self.name = nameself.mode = modetry:DRAW_MODES[self.mode]except KeyError:raise VAOError(\"\".format(DRAW_MODES.values()))self.buffers = []self._index_buffer = Noneself._index_element_size = Noneself.vertex_count = self.vaos = {}", "docstring": "Create and empty VAO\n\nKeyword Args:\n name (str): The name for debug purposes\n mode (int): Default draw mode", "id": "f14403:c1:m0"} {"signature": "def render(self, program: moderngl.Program, mode=None, vertices=-, first=, instances=):", "body": "vao = self.instance(program)if mode is None:mode = self.modevao.render(mode, vertices=vertices, first=first, instances=instances)", "docstring": "Render the VAO.\n\nArgs:\n program: The ``moderngl.Program``\n\nKeyword Args:\n mode: Override the draw mode (``TRIANGLES`` etc)\n vertices (int): The number of vertices to transform\n first (int): The index of the first vertex to start with\n instances (int): The number of instances", "id": "f14403:c1:m1"} {"signature": "def render_indirect(self, program: moderngl.Program, buffer, mode=None, count=-, *, first=):", "body": "vao = self.instance(program)if mode is None:mode = self.modevao.render_indirect(buffer, mode=mode, count=count, first=first)", "docstring": "The render primitive (mode) must be the same as the input primitive of the GeometryShader.\nThe draw commands are 5 integers: (count, instanceCount, firstIndex, baseVertex, baseInstance).\n\nArgs:\n program: The ``moderngl.Program``\n buffer: The ``moderngl.Buffer`` containing indirect draw commands\n\nKeyword Args:\n mode (int): By default :py:data:`TRIANGLES` will be used.\n count (int): The number of draws.\n first (int): The index of the first indirect draw command.", "id": "f14403:c1:m2"} {"signature": "def transform(self, program: moderngl.Program, buffer: moderngl.Buffer,mode=None, vertices=-, first=, instances=):", "body": "vao = self.instance(program)if mode is None:mode = self.modevao.transform(buffer, mode=mode, vertices=vertices, first=first, instances=instances)", "docstring": "Transform vertices. Stores the output in a single buffer.\n\nArgs:\n program: The ``moderngl.Program``\n buffer: The ``moderngl.buffer`` to store the output\n\nKeyword Args:\n mode: Draw mode (for example ``moderngl.POINTS``)\n vertices (int): The number of vertices to transform\n first (int): The index of the first vertex to start with\n instances (int): The number of instances", "id": "f14403:c1:m3"} {"signature": "def buffer(self, buffer, buffer_format: str, attribute_names, per_instance=False):", "body": "if not isinstance(attribute_names, list):attribute_names = [attribute_names, ]if not type(buffer) in [moderngl.Buffer, numpy.ndarray, bytes]:raise VAOError((\"\"\"\".format(type(buffer))))if isinstance(buffer, numpy.ndarray):buffer = self.ctx.buffer(buffer.tobytes())if isinstance(buffer, bytes):buffer = self.ctx.buffer(data=buffer)formats = buffer_format.split()if len(formats) != len(attribute_names):raise VAOError(\"\".format(buffer_format, attribute_names))self.buffers.append(BufferInfo(buffer, buffer_format, attribute_names, per_instance=per_instance))self.vertex_count = self.buffers[-].verticesreturn buffer", "docstring": "Register a buffer/vbo for the VAO. This can be called multiple times.\nadding multiple buffers (interleaved or not)\n\nArgs:\n buffer: The buffer data. Can be ``numpy.array``, ``moderngl.Buffer`` or ``bytes``.\n buffer_format (str): The format of the buffer. (eg. ``3f 3f`` for interleaved positions and normals).\n attribute_names: A list of attribute names this buffer should map to.\n\nKeyword Args:\n per_instance (bool): Is this buffer per instance data for instanced rendering?\n\nReturns:\n The ``moderngl.Buffer`` instance object. This is handy when providing ``bytes`` and ``numpy.array``.", "id": "f14403:c1:m4"} {"signature": "def index_buffer(self, buffer, index_element_size=):", "body": "if not type(buffer) in [moderngl.Buffer, numpy.ndarray, bytes]:raise VAOError(\"\")if isinstance(buffer, numpy.ndarray):buffer = self.ctx.buffer(buffer.tobytes())if isinstance(buffer, bytes):buffer = self.ctx.buffer(data=buffer)self._index_buffer = bufferself._index_element_size = index_element_size", "docstring": "Set the index buffer for this VAO\n\nArgs:\n buffer: ``moderngl.Buffer``, ``numpy.array`` or ``bytes``\n\nKeyword Args:\n index_element_size (int): Byte size of each element. 1, 2 or 4", "id": "f14403:c1:m5"} {"signature": "def instance(self, program: moderngl.Program) -> moderngl.VertexArray:", "body": "vao = self.vaos.get(program.glo)if vao:return vaoprogram_attributes = [name for name, attr in program._members.items() if isinstance(attr, moderngl.Attribute)]for attrib_name in program_attributes:if attrib_name.startswith(''):continueif not sum(buffer.has_attribute(attrib_name) for buffer in self.buffers):raise VAOError(\"\".format(self.name, attrib_name, program.name))vao_content = []for buffer in self.buffers:content = buffer.content(program_attributes)if content:vao_content.append(content)if program_attributes:for attrib_name in program_attributes:if attrib_name.startswith(''):continueraise VAOError(\"\".format([n for n in program_attributes]))if self._index_buffer:vao = context.ctx().vertex_array(program, vao_content,self._index_buffer, self._index_element_size)else:vao = context.ctx().vertex_array(program, vao_content)self.vaos[program.glo] = vaoreturn vao", "docstring": "Obtain the ``moderngl.VertexArray`` instance for the program.\nThe instance is only created once and cached internally.\n\nReturns: ``moderngl.VertexArray`` instance", "id": "f14403:c1:m6"} {"signature": "def release(self, buffer=True):", "body": "for key, vao in self.vaos:vao.release()if buffer:for buff in self.buffers:buff.buffer.release()if self._index_buffer:self._index_buffer.release()", "docstring": "Destroy the vao object\n\nKeyword Args:\n buffers (bool): also release buffers", "id": "f14403:c1:m7"} {"signature": "def buffer_format(frmt: str) -> BufferFormat:", "body": "try:return BUFFER_FORMATS[frmt]except KeyError:raise ValueError(\"\".format(frmt, BUFFER_FORMATS.keys()))", "docstring": "Look up info about a buffer format\n:param frmt: format string such as 'f', 'i' and 'u'\n:return: BufferFormat instance", "id": "f14404:m0"} {"signature": "def attribute_format(frmt: str) -> BufferFormat:", "body": "try:return ATTRIBUTE_FORMATS[frmt]except KeyError:raise ValueError(\"\".format(frmt, ATTRIBUTE_FORMATS.keys()))", "docstring": "Look up info about an attribute format\n:param frmt: Format of an\n:return: BufferFormat instance", "id": "f14404:m1"} {"signature": "def __init__(self, format_string: str, components: int, bytes_per_component: int):", "body": "self.format = format_stringself.components = componentsself.bytes_per_component = bytes_per_component", "docstring": ":param format_string: moderngl format string\n:param components: components\n:param byte_size: byte per component", "id": "f14404:c0:m0"} {"signature": "def pad_str(self) -> str:", "body": "return \"\".format(self.components, self.bytes_per_component)", "docstring": "Padding string used my moderngl in interleaved buffers", "id": "f14404:c0:m2"} {"signature": "def update(self, aspect_ratio=None, fov=None, near=None, far=None):", "body": "self.aspect_ratio = aspect_ratio or self.aspect_ratioself.fov = fov or self.fovself.near = near or self.nearself.far = far or self.farself.matrix = Matrix44.perspective_projection(self.fov, self.aspect_ratio, self.near, self.far)", "docstring": "Update the internal projection matrix based on current values\nor values passed in if specified.\n\n:param aspect_ratio: New aspect ratio\n:param fov: New field of view\n:param near: New near value\n:param far: New far value", "id": "f14405:c0:m1"} {"signature": "@propertydef projection_constants(self):", "body": "return self.far / (self.far - self.near), (self.far * self.near) / (self.near - self.far)", "docstring": "Returns the (x, y) projection constants for the current projection.\n:return: x, y tuple projection constants", "id": "f14405:c0:m3"} {"signature": "def draw(self, texture, pos=(, ), scale=(, )):", "body": "if not self.initialized:self.init()self._texture2d_shader[\"\"].value = (pos[] - , pos[] - )self._texture2d_shader[\"\"].value = (scale[], scale[])texture.use(location=)self._texture2d_sampler.use(location=)self._texture2d_shader[\"\"].value = self._quad.render(self._texture2d_shader)self._texture2d_sampler.clear(location=)", "docstring": "Draw texture using a fullscreen quad.\nBy default this will conver the entire screen.\n\n:param pos: (tuple) offset x, y\n:param scale: (tuple) scale x, y", "id": "f14406:c0:m3"} {"signature": "def draw_depth(self, texture, near, far, pos=(, ), scale=(, )):", "body": "if not self.initialized:self.init()self._depth_shader[\"\"].value = (pos[] - , pos[] - )self._depth_shader[\"\"].value = (scale[], scale[])self._depth_shader[\"\"].value = nearself._depth_shader[\"\"].value = farself._depth_sampler.use(location=)texture.use(location=)self._depth_shader[\"\"].value = self._quad.render(self._depth_shader)self._depth_sampler.clear(location=)", "docstring": "Draw depth buffer linearized.\nBy default this will draw the texture as a full screen quad.\nA sampler will be used to ensure the right conditions to draw the depth buffer.\n\n:param near: Near plane in projection\n:param far: Far plane in projection\n:param pos: (tuple) offset x, y\n:param scale: (tuple) scale x, y", "id": "f14406:c0:m4"} {"signature": "def _init_texture2d_draw(self):", "body": "if not TextureHelper._quad:TextureHelper._quad = geometry.quad_fs()TextureHelper._texture2d_shader = context.ctx().program(vertex_shader=\"\"\"\"\"\",fragment_shader=\"\"\"\"\"\")TextureHelper._texture2d_sampler = self.ctx.sampler(filter=(moderngl.LINEAR, moderngl.LINEAR),)", "docstring": "Initialize geometry and shader for drawing FBO layers", "id": "f14406:c0:m5"} {"signature": "def _init_depth_texture_draw(self):", "body": "from demosys import geometryif not TextureHelper._quad:TextureHelper._quad = geometry.quad_fs()TextureHelper._depth_shader = context.ctx().program(vertex_shader=\"\"\"\"\"\",fragment_shader=\"\"\"\"\"\")TextureHelper._depth_sampler = self.ctx.sampler(filter=(moderngl.LINEAR, moderngl.LINEAR),compare_func='',)", "docstring": "Initialize geometry and shader for drawing FBO layers", "id": "f14406:c0:m6"} {"signature": "@functools.lru_cache(maxsize=None)def get_finder(import_path):", "body": "Finder = import_string(import_path)if not issubclass(Finder, BaseFileSystemFinder):raise ImproperlyConfigured(''.format(import_path))return Finder()", "docstring": "Get a finder class from an import path.\nRaises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found.\nThis function uses an lru cache.\n\n:param import_path: string representing an import path\n:return: An instance of the finder", "id": "f14409:m0"} {"signature": "def find(self, path: Path):", "body": "if getattr(self, '', None):self.paths = getattr(settings, self.settings_attr)path_found = Nonefor entry in self.paths:abspath = entry / pathif abspath.exists():path_found = abspathreturn path_found", "docstring": "Find a file in the path. The file may exist in multiple\npaths. The last found file will be returned.\n\n:param path: The path to find\n:return: The absolute path to the file or None if not found", "id": "f14409:c0:m1"} {"signature": "def init(window=None, project=None, timeline=None):", "body": "from demosys.effects.registry import Effectfrom demosys.scene import camerawindow.timeline = timelinesetattr(Effect, '', window)setattr(Effect, '', window.ctx)setattr(Effect, '', project)window.sys_camera = camera.SystemCamera(aspect=window.aspect_ratio, fov=, near=, far=)setattr(Effect, '', window.sys_camera)print(\"\", time.time())project.load()timer_cls = import_string(settings.TIMER)window.timer = timer_cls()window.timer.start()", "docstring": "Initialize, load and run\n\n:param manager: The effect manager to use", "id": "f14412:m0"} {"signature": "def create(file_format='', name=None):", "body": "dest = \"\"if settings.SCREENSHOT_PATH:if not os.path.exists(settings.SCREENSHOT_PATH):print(\"\".format(settings.SCREENSHOT_PATH))os.makedirs(settings.SCREENSHOT_PATH)dest = settings.SCREENSHOT_PATHelse:print(\"\")if not Config.target:Config.target = context.window().fboimage = Image.frombytes(\"\",(Config.target.viewport[], Config.target.viewport[]),Config.target.read(viewport=Config.target.viewport, alignment=Config.alignment),)image = image.transpose(Image.FLIP_TOP_BOTTOM)if not name:name = \"\".format(datetime.now().strftime(\"\"), file_format)dest = os.path.join(dest, name)print(\"\", dest)image.save(dest, format=file_format)", "docstring": "Create a screenshot\n:param file_format: formats supported by PIL (png, jpeg etc)", "id": "f14413:m0"} {"signature": "def __init__(self, fov=, aspect=, near=, far=):", "body": "self.position = Vector3([, , ])self.up = Vector3([, , ])self.right = Vector3([, , ])self.dir = Vector3([, , -])self.yaw = -self.pitch = self._up = Vector3([, , ])self.projection = Projection(aspect, fov, near, far)", "docstring": "Initialize camera using a specific projection\n\n:param fov: Field of view\n:param aspect: Aspect ratio\n:param near: Near plane\n:param far: Far plane", "id": "f14415:c0:m0"} {"signature": "def set_position(self, x, y, z):", "body": "self.position = Vector3([x, y, z])", "docstring": "Set the 3D position of the camera\n\n:param x: float\n:param y: float\n:param z: float", "id": "f14415:c0:m1"} {"signature": "@propertydef view_matrix(self):", "body": "self._update_yaw_and_pitch()return self._gl_look_at(self.position, self.position + self.dir, self._up)", "docstring": ":return: The current view matrix for the camera", "id": "f14415:c0:m2"} {"signature": "def _update_yaw_and_pitch(self):", "body": "front = Vector3([, , ])front.x = cos(radians(self.yaw)) * cos(radians(self.pitch))front.y = sin(radians(self.pitch))front.z = sin(radians(self.yaw)) * cos(radians(self.pitch))self.dir = vector.normalise(front)self.right = vector.normalise(vector3.cross(self.dir, self._up))self.up = vector.normalise(vector3.cross(self.right, self.dir))", "docstring": "Updates the camera vectors based on the current yaw and pitch", "id": "f14415:c0:m3"} {"signature": "def look_at(self, vec=None, pos=None):", "body": "if pos is None:vec = Vector3(pos)if vec is None:raise ValueError(\"\")return self._gl_look_at(self.position, vec, self._up)", "docstring": "Look at a specific point\n\n:param vec: Vector3 position\n:param pos: python list [x, y, x]\n:return: Camera matrix", "id": "f14415:c0:m4"} {"signature": "def _gl_look_at(self, pos, target, up):", "body": "z = vector.normalise(pos - target)x = vector.normalise(vector3.cross(vector.normalise(up), z))y = vector3.cross(z, x)translate = matrix44.create_identity()translate[][] = -pos.xtranslate[][] = -pos.ytranslate[][] = -pos.zrotate = matrix44.create_identity()rotate[][] = x[] rotate[][] = x[]rotate[][] = x[]rotate[][] = y[] rotate[][] = y[]rotate[][] = y[]rotate[][] = z[] rotate[][] = z[]rotate[][] = z[]return matrix44.multiply(translate, rotate)", "docstring": "The standard lookAt method\n\n:param pos: current position\n:param target: target position to look at\n:param up: direction up", "id": "f14415:c0:m5"} {"signature": "def move_state(self, direction, activate):", "body": "if direction == RIGHT:self._xdir = POSITIVE if activate else STILLelif direction == LEFT:self._xdir = NEGATIVE if activate else STILLelif direction == FORWARD:self._zdir = NEGATIVE if activate else STILLelif direction == BACKWARD:self._zdir = POSITIVE if activate else STILLelif direction == UP:self._ydir = POSITIVE if activate else STILLelif direction == DOWN:self._ydir = NEGATIVE if activate else STILL", "docstring": "Set the camera position move state\n\n:param direction: What direction to update\n:param activate: Start or stop moving in the direction", "id": "f14415:c1:m7"} {"signature": "def rot_state(self, x, y):", "body": "if self.last_x is None:self.last_x = xif self.last_y is None:self.last_y = yx_offset = self.last_x - xy_offset = self.last_y - yself.last_x = xself.last_y = yx_offset *= self.mouse_sensitivityy_offset *= self.mouse_sensitivityself.yaw -= x_offsetself.pitch += y_offsetif self.pitch > :self.pitch = if self.pitch < -:self.pitch = -self._update_yaw_and_pitch()", "docstring": "Set the rotation state of the camera\n\n:param x: viewport x pos\n:param y: viewport y pos", "id": "f14415:c1:m8"} {"signature": "@propertydef view_matrix(self):", "body": "now = time.time()t = max(now - self._last_time, )self._last_time = nowif self._xdir == POSITIVE:self.position += self.right * self.velocity * telif self._xdir == NEGATIVE:self.position -= self.right * self.velocity * tif self._zdir == NEGATIVE:self.position += self.dir * self.velocity * telif self._zdir == POSITIVE:self.position -= self.dir * self.velocity * tif self._ydir == POSITIVE:self.position += self.up * self.velocity * telif self._ydir == NEGATIVE:self.position -= self.up * self.velocity * treturn self._gl_look_at(self.position, self.position + self.dir, self._up)", "docstring": ":return: The current view matrix for the camera", "id": "f14415:c1:m9"} {"signature": "def __init__(self, name, vao=None, material=None, attributes=None, bbox_min=None, bbox_max=None):", "body": "self.name = nameself.vao = vaoself.material = materialself.attributes = attributes or {}self.bbox_min = bbox_minself.bbox_max = bbox_maxself.mesh_program = None", "docstring": ":param name: Name of the mesh\n:param vao: VAO\n:param material: Material\n:param attributes: Details info about each mesh attribute (dict)\n {\n \"NORMAL\": {\"name\": \"in_normal\", \"components\": 3, \"type\": GL_FLOAT},\n \"POSITION\": {\"name\": \"in_position\", \"components\": 3, \"type\": GL_FLOAT}\n }", "id": "f14416:c0:m0"} {"signature": "def draw(self, projection_matrix=None, view_matrix=None, camera_matrix=None, time=):", "body": "if self.mesh_program:self.mesh_program.draw(self,projection_matrix=projection_matrix,view_matrix=view_matrix,camera_matrix=camera_matrix,time=time)", "docstring": "Draw the mesh using the assigned mesh program\n\n:param projection_matrix: projection_matrix (bytes)\n:param view_matrix: view_matrix (bytes)\n:param camera_matrix: camera_matrix (bytes)", "id": "f14416:c0:m1"} {"signature": "def add_attribute(self, attr_type, name, components):", "body": "self.attributes[attr_type] = {\"\": name, \"\": components}", "docstring": "Add metadata about the mesh\n:param attr_type: POSITION, NORMAL etc\n:param name: The attribute name used in the program\n:param components: Number of floats", "id": "f14416:c0:m3"} {"signature": "def __init__(self, name, mesh_programs=None, **kwargs):", "body": "self.name = nameself.root_nodes = []self.nodes = []self.materials = []self.meshes = []self.cameras = []self.bbox_min = Noneself.bbox_max = Noneself.diagonal_size = self.bbox_vao = geometry.bbox()self.bbox_program = programs.load(ProgramDescription(label='',path=''))self._view_matrix = matrix44.create_identity()", "docstring": ":param name: Unique name or path for the scene\n:param mesh_programs: List of MeshPrograms to apply to the scene\n:param loader: Loader class for the scene if relevant", "id": "f14417:c0:m0"} {"signature": "def draw(self, projection_matrix=None, camera_matrix=None, time=):", "body": "projection_matrix = projection_matrix.astype('').tobytes()camera_matrix = camera_matrix.astype('').tobytes()for node in self.root_nodes:node.draw(projection_matrix=projection_matrix,camera_matrix=camera_matrix,time=time,)self.ctx.clear_samplers(, )", "docstring": "Draw all the nodes in the scene\n\n:param projection_matrix: projection matrix (bytes)\n:param camera_matrix: camera_matrix (bytes)\n:param time: The current time", "id": "f14417:c0:m4"} {"signature": "def draw_bbox(self, projection_matrix=None, camera_matrix=None, all=True):", "body": "projection_matrix = projection_matrix.astype('').tobytes()camera_matrix = camera_matrix.astype('').tobytes()self.bbox_program[\"\"].write(projection_matrix)self.bbox_program[\"\"].write(self._view_matrix.astype('').tobytes())self.bbox_program[\"\"].write(camera_matrix)self.bbox_program[\"\"].write(self.bbox_min.astype('').tobytes())self.bbox_program[\"\"].write(self.bbox_max.astype('').tobytes())self.bbox_program[\"\"].value = (, , )self.bbox_vao.render(self.bbox_program)if not all:returnfor node in self.root_nodes:node.draw_bbox(projection_matrix, camera_matrix, self.bbox_program, self.bbox_vao)", "docstring": "Draw scene and mesh bounding boxes", "id": "f14417:c0:m5"} {"signature": "def apply_mesh_programs(self, mesh_programs=None):", "body": "if not mesh_programs:mesh_programs = [ColorProgram(), TextureProgram(), FallbackProgram()]for mesh in self.meshes:for mp in mesh_programs:instance = mp.apply(mesh)if instance is not None:if isinstance(instance, MeshProgram):mesh.mesh_program = mpbreakelse:raise ValueError(\"\".format(type(instance)))if not mesh.mesh_program:print(\"\".format(mesh.name))", "docstring": "Applies mesh programs to meshes", "id": "f14417:c0:m6"} {"signature": "def calc_scene_bbox(self):", "body": "bbox_min, bbox_max = None, Nonefor node in self.root_nodes:bbox_min, bbox_max = node.calc_global_bbox(matrix44.create_identity(),bbox_min,bbox_max)self.bbox_min = bbox_minself.bbox_max = bbox_maxself.diagonal_size = vector3.length(self.bbox_max - self.bbox_min)", "docstring": "Calculate scene bbox", "id": "f14417:c0:m7"} {"signature": "def destroy(self):", "body": "for mesh in self.meshes:mesh.vao.release()", "docstring": "Destroy the scene data and deallocate buffers", "id": "f14417:c0:m9"} {"signature": "def draw(self, mesh, projection_matrix=None, view_matrix=None, camera_matrix=None, time=):", "body": "self.program[\"\"].write(projection_matrix)self.program[\"\"].write(view_matrix)mesh.vao.render(self.program)", "docstring": "Draw code for the mesh. Should be overriden.\n\n:param projection_matrix: projection_matrix (bytes)\n:param view_matrix: view_matrix (bytes)\n:param camera_matrix: camera_matrix (bytes)\n:param time: The current time", "id": "f14419:c0:m1"} {"signature": "def apply(self, mesh):", "body": "raise NotImplementedError(\"\")", "docstring": "Determine if this MeshProgram should be applied to the mesh\nCan return self or some MeshProgram instance to support dynamic MeshProgram creation\n\n:param mesh: The mesh to inspect", "id": "f14419:c0:m2"} {"signature": "def draw(self, projection_matrix=None, camera_matrix=None, time=):", "body": "if self.mesh:self.mesh.draw(projection_matrix=projection_matrix,view_matrix=self.matrix_global_bytes,camera_matrix=camera_matrix,time=time)for child in self.children:child.draw(projection_matrix=projection_matrix,camera_matrix=camera_matrix,time=time)", "docstring": "Draw node and children\n\n:param projection_matrix: projection matrix (bytes)\n:param camera_matrix: camera_matrix (bytes)\n:param time: The current time", "id": "f14420:c0:m2"} {"signature": "def calc_global_bbox(self, view_matrix, bbox_min, bbox_max):", "body": "if self.matrix is not None:view_matrix = matrix44.multiply(self.matrix, view_matrix)if self.mesh:bbox_min, bbox_max = self.mesh.calc_global_bbox(view_matrix, bbox_min, bbox_max)for child in self.children:bbox_min, bbox_max = child.calc_global_bbox(view_matrix, bbox_min, bbox_max)return bbox_min, bbox_max", "docstring": "Recursive calculation of scene bbox", "id": "f14420:c0:m4"} {"signature": "def start(self):", "body": "if self.initialized:mixer.music.unpause()else:mixer.music.play()mixer.music.play()self.initialized = Trueself.paused = False", "docstring": "Play the music", "id": "f14422:c0:m1"} {"signature": "def pause(self):", "body": "mixer.music.pause()self.pause_time = self.get_time()self.paused = True", "docstring": "Pause the music", "id": "f14422:c0:m2"} {"signature": "def toggle_pause(self):", "body": "if self.paused:self.start()else:self.pause()", "docstring": "Toggle pause mode", "id": "f14422:c0:m3"} {"signature": "def stop(self) -> float:", "body": "mixer.music.stop()return self.get_time()", "docstring": "Stop the music\n\nReturns:\n The current location in the music", "id": "f14422:c0:m4"} {"signature": "def get_time(self) -> float:", "body": "if self.paused:return self.pause_timereturn mixer.music.get_pos() / ", "docstring": "Get the current position in the music in seconds", "id": "f14422:c0:m5"} {"signature": "def set_time(self, value: float):", "body": "if value < :value = mixer.music.set_pos(value)", "docstring": "Set the current time in the music in seconds causing the player\nto seek to this location in the file.", "id": "f14422:c0:m6"} {"signature": "def start(self):", "body": "if self.start_time is None:self.start_time = time.time()else:pause_duration = time.time() - self.pause_timeself.offset += pause_durationself.pause_time = None", "docstring": "Start the timer by recoding the current ``time.time()``\npreparing to report the number of seconds since this timestamp.", "id": "f14423:c0:m1"} {"signature": "def pause(self):", "body": "self.pause_time = time.time()", "docstring": "Pause the timer by setting the internal pause time using ``time.time()``", "id": "f14423:c0:m2"} {"signature": "def toggle_pause(self):", "body": "if self.pause_time:self.start()else:self.pause()", "docstring": "Toggle the paused state", "id": "f14423:c0:m3"} {"signature": "def stop(self) -> float:", "body": "self.stop_time = time.time()return self.stop_time - self.start_time - self.offset", "docstring": "Stop the timer\n\nReturns:\n The time the timer was stopped", "id": "f14423:c0:m4"} {"signature": "def get_time(self) -> float:", "body": "if self.pause_time is not None:curr_time = self.pause_time - self.offset - self.start_timereturn curr_timecurr_time = time.time()return curr_time - self.start_time - self.offset", "docstring": "Get the current time in seconds\n\nReturns:\n The current time in seconds", "id": "f14423:c0:m5"} {"signature": "def set_time(self, value: float):", "body": "if value < :value = self.offset += self.get_time() - value", "docstring": "Set the current time. This can be used to jump in the timeline.\n\nArgs:\n value (float): The new time", "id": "f14423:c0:m6"} {"signature": "def start(self):", "body": "raise NotImplementedError()", "docstring": "Start the timer initially or resume after pause\n\nRaises:\n NotImplementedError", "id": "f14424:c0:m1"} {"signature": "def pause(self):", "body": "raise NotImplementedError()", "docstring": "Pause the timer\n\nRaises:\n NotImplementedError", "id": "f14424:c0:m2"} {"signature": "def toggle_pause(self):", "body": "raise NotImplementedError()", "docstring": "Toggle pause state\n\nRaises:\n NotImplementedError", "id": "f14424:c0:m3"} {"signature": "def stop(self) -> float:", "body": "raise NotImplementedError()", "docstring": "Stop the timer. Should only be called once when stopping the timer.\n\nReturns:\n The time the timer was stopped\n\nRaises:\n NotImplementedError", "id": "f14424:c0:m4"} {"signature": "def get_time(self) -> float:", "body": "raise NotImplementedError()", "docstring": "Get the current time in seconds\n\nReturns:\n The current time in seconds\n\nRaises:\n NotImplementedError", "id": "f14424:c0:m5"} {"signature": "def set_time(self, value: float):", "body": "raise NotImplementedError()", "docstring": "Set the current time in seconds.\n\nArgs:\n value (float): The new time\n\nRaises:\n NotImplementedError", "id": "f14424:c0:m6"} {"signature": "def __init__(self, **kwargs):", "body": "config = getattr(settings, '', None)if config is None:config = {}self.mode = config.get('') or ''self.files = config.get('') or ''self.project = config.get('') or ''self.rps = config.get('', )self.start_paused = Falseself.controller = TimeController(self.rps)if self.mode == '':self.rocket = Rocket.from_socket(self.controller, track_path=self.files)self.start_paused = Trueelif self.mode == '':self.rocket = Rocket.from_project_file(self.controller, self.project)elif self.mode == '':self.rocket = Rocket.from_files(self.controller, self.files)else:raise ValueError(\"\".format(self.mode))for track in tracks.tacks:self.rocket.tracks.add(track)for track in tracks.tacks:self.rocket.track(track.name)self.rocket.update()super().__init__(**kwargs)", "docstring": "Initialize the rocket timer using values from settings", "id": "f14425:c0:m0"} {"signature": "def start(self):", "body": "if not self.start_paused:self.rocket.start()", "docstring": "Start the timer", "id": "f14425:c0:m1"} {"signature": "def get_time(self) -> float:", "body": "self.rocket.update()return self.rocket.time", "docstring": "Get the current time in seconds\n\nReturns:\n The current time in seconds", "id": "f14425:c0:m2"} {"signature": "def set_time(self, value: float):", "body": "if value < :value = self.controller.row = self.rps * value", "docstring": "Set the current time jumping in the timeline.\n\nArgs:\n value (float): The new time", "id": "f14425:c0:m3"} {"signature": "def pause(self):", "body": "self.controller.playing = False", "docstring": "Pause the timer", "id": "f14425:c0:m4"} {"signature": "def toggle_pause(self):", "body": "self.controller.playing = not self.controller.playing", "docstring": "Toggle pause mode", "id": "f14425:c0:m5"} {"signature": "def stop(self) -> float:", "body": "return self.rocket.time", "docstring": "Stop the timer\n\nReturns:\n The current time.", "id": "f14425:c0:m6"} {"signature": "def start(self):", "body": "self.player.play()self.paused = False", "docstring": "Start the music", "id": "f14426:c0:m1"} {"signature": "def pause(self):", "body": "self.pause_time = self.get_time()self.paused = Trueself.player.pause()", "docstring": "Pause the music", "id": "f14426:c0:m2"} {"signature": "def toggle_pause(self):", "body": "if self.paused:self.start()else:self.pause()", "docstring": "Toggle pause mode", "id": "f14426:c0:m3"} {"signature": "def stop(self) -> float:", "body": "self.player.stop()return self.get_time()", "docstring": "Stop the music\n\nReturns:\n The current time in seconds", "id": "f14426:c0:m4"} {"signature": "def get_time(self) -> float:", "body": "if self.paused:return self.pause_timereturn self.player.get_time() / ", "docstring": "Get the current time in seconds\n\nReturns:\n The current time in seconds", "id": "f14426:c0:m5"} {"signature": "def start(self):", "body": "self.music.start()if not self.start_paused:self.rocket.start()", "docstring": "Start the timer", "id": "f14427:c0:m1"} {"signature": "def get_time(self) -> float:", "body": "self.rocket.update()return self.music.get_time()", "docstring": "Get the current time in seconds\n\nReturns:\n The current time in seconds", "id": "f14427:c0:m2"} {"signature": "def set_time(self, value: float):", "body": "self.music.set_time(value)", "docstring": "Set the current time jumping in the timeline\n\nArgs:\n value (float): The new time value", "id": "f14427:c0:m3"} {"signature": "def pause(self):", "body": "self.controller.playing = Falseself.music.pause()", "docstring": "Pause the timer", "id": "f14427:c0:m4"} {"signature": "def toggle_pause(self):", "body": "self.controller.playing = not self.controller.playingself.music.toggle_pause()", "docstring": "Toggle pause mode", "id": "f14427:c0:m5"} {"signature": "def stop(self) -> float:", "body": "return self.rocket.time", "docstring": "Stop the timer\n\nReturns:\n The current time", "id": "f14427:c0:m6"} {"signature": "def draw_buffers(self, near, far):", "body": "self.ctx.disable(moderngl.DEPTH_TEST)helper.draw(self.gbuffer.color_attachments[], pos=(, ), scale=(, ))helper.draw(self.gbuffer.color_attachments[], pos=(, ), scale=(, ))helper.draw_depth(self.gbuffer.depth_attachment, near, far, pos=(, ), scale=(, ))helper.draw(self.lightbuffer.color_attachments[], pos=(, ), scale=(, ))", "docstring": "Draw framebuffers for debug purposes.\nWe need to supply near and far plane so the depth buffer can be linearized when visualizing.\n\n:param near: Projection near value\n:param far: Projection far value", "id": "f14429:c0:m1"} {"signature": "def add_point_light(self, position, radius):", "body": "self.point_lights.append(PointLight(position, radius))", "docstring": "Add point light", "id": "f14429:c0:m2"} {"signature": "def render_lights(self, camera_matrix, projection):", "body": "self.ctx.front_face = ''self.ctx.blend_func = moderngl.ONE, moderngl.ONEhelper._depth_sampler.use(location=)with self.lightbuffer_scope:for light in self.point_lights:light_size = light.radiusm_light = matrix44.multiply(light.matrix, camera_matrix)self.point_light_shader[\"\"].write(projection.tobytes())self.point_light_shader[\"\"].write(m_light.astype('').tobytes())self.gbuffer.color_attachments[].use(location=)self.point_light_shader[\"\"].value = self.gbuffer.depth_attachment.use(location=)self.point_light_shader[\"\"].value = self.point_light_shader[\"\"].value = (self.width, self.height)self.point_light_shader[\"\"].value = projection.projection_constantsself.point_light_shader[\"\"].value = light_sizeself.unit_cube.render(self.point_light_shader)helper._depth_sampler.clear(location=)", "docstring": "Render light volumes", "id": "f14429:c0:m3"} {"signature": "def render_lights_debug(self, camera_matrix, projection):", "body": "self.ctx.enable(moderngl.BLEND)self.ctx.blend_func = moderngl.SRC_ALPHA, moderngl.ONE_MINUS_SRC_ALPHAfor light in self.point_lights:m_mv = matrix44.multiply(light.matrix, camera_matrix)light_size = light.radiusself.debug_shader[\"\"].write(projection.tobytes())self.debug_shader[\"\"].write(m_mv.astype('').tobytes())self.debug_shader[\"\"].value = light_sizeself.unit_cube.render(self.debug_shader, mode=moderngl.LINE_STRIP)self.ctx.disable(moderngl.BLEND)", "docstring": "Render outlines of light volumes", "id": "f14429:c0:m4"} {"signature": "def combine(self):", "body": "self.gbuffer.color_attachments[].use(location=)self.combine_shader[\"\"].value = self.lightbuffer.color_attachments[].use(location=)self.combine_shader[\"\"].value = self.quad.render(self.combine_shader)", "docstring": "Combine diffuse and light buffer", "id": "f14429:c0:m6"} {"signature": "def clear(self):", "body": "self.gbuffer.clear()self.lightbuffer.clear()", "docstring": "clear all buffers", "id": "f14429:c0:m7"} {"signature": "def __init__(self, *args, **kwargs):", "body": "pass", "docstring": "Implement the initialize when extending the class.\nThis method is responsible for fetching or creating resources\nand doing genereal initalization of the effect.\n\nThe effect initializer is called when all resources are loaded\n(with the exception of resources you manually load in the\nthe initializer).\n\nIf your effect requires arguments during initialiation you\nare free to add positional and keyword arguments.\n\nYou **do not** have to call the superclass initializer though ``super()``\n\nExample::\n\n def __init__(self):\n # Fetch reference to resource by their label\n self.program = self.get_program('simple_textured')\n self.texture = self.get_texture('bricks')\n # .. create a cube etc ..", "id": "f14430:c0:m0"} {"signature": "def post_load(self):", "body": "pass", "docstring": "Called after all effects are initialized before drawing starts.\nSome initialization may be neccessary to do here such as\ninteraction with other effects.\n\nThis method does nothing unless implemented.", "id": "f14430:c0:m1"} {"signature": "@propertydef name(self) -> str:", "body": "return self._name", "docstring": "Full python path to the effect", "id": "f14430:c0:m2"} {"signature": "@propertydef label(self) -> str:", "body": "return self._label", "docstring": "The label assigned to this effect instance", "id": "f14430:c0:m3"} {"signature": "@propertydef window(self) -> BaseWindow:", "body": "return self._window", "docstring": "The :py:class:`Window`", "id": "f14430:c0:m4"} {"signature": "@propertydef ctx(self) -> moderngl.Context:", "body": "return self._ctx", "docstring": "The ModernGL context", "id": "f14430:c0:m5"} {"signature": "@propertydef sys_camera(self) -> camera.SystemCamera:", "body": "return self._sys_camera", "docstring": "The system camera responding to input", "id": "f14430:c0:m6"} {"signature": "def draw(self, time: float, frametime: float, target: moderngl.Framebuffer):", "body": "raise NotImplementedError(\"\")", "docstring": "Draw function called by the system every frame when the effect is active.\nThis method raises ``NotImplementedError`` unless implemented.\n\nArgs:\n time (float): The current time in seconds.\n frametime (float): The time the previous frame used to render in seconds.\n target (``moderngl.Framebuffer``): The target FBO for the effect.", "id": "f14430:c0:m7"} {"signature": "def get_program(self, label: str) -> moderngl.Program:", "body": "return self._project.get_program(label)", "docstring": "Get a program by its label\n\nArgs:\n label (str): The label for the program\n\nReturns: py:class:`moderngl.Program` instance", "id": "f14430:c0:m8"} {"signature": "def get_texture(self, label: str) -> Union[moderngl.Texture, moderngl.TextureArray,moderngl.Texture3D, moderngl.TextureCube]:", "body": "return self._project.get_texture(label)", "docstring": "Get a texture by its label\n\nArgs:\n label (str): The Label for the texture\n\nReturns:\n The py:class:`moderngl.Texture` instance", "id": "f14430:c0:m9"} {"signature": "def get_track(self, name: str) -> Track:", "body": "return resources.tracks.get(name)", "docstring": "Gets or creates a rocket track.\nOnly avaiable when using a Rocket timer.\n\nArgs:\n name (str): The rocket track name\n\nReturns:\n The :py:class:`rocket.Track` instance", "id": "f14430:c0:m10"} {"signature": "def get_scene(self, label: str) -> Scene:", "body": "return self._project.get_scene(label)", "docstring": "Get a scene by its label\n\nArgs:\n label (str): The label for the scene\n\nReturns: The :py:class:`Scene` instance", "id": "f14430:c0:m11"} {"signature": "def get_data(self, label: str) -> Any:", "body": "return self._project.get_data(label)", "docstring": "Get a data instance by its label\n\nArgs:\n label (str): Label for the data instance\n\nReturns:\n Contents of the data file", "id": "f14430:c0:m12"} {"signature": "def get_effect(self, label: str) -> '':", "body": "return self._project.get_effect(label)", "docstring": "Get an effect instance by label.\n\nArgs:\n label (str): Label for the data file\n\nReturns: The :py:class:`Effect` instance", "id": "f14430:c0:m13"} {"signature": "def get_effect_class(self, effect_name: str, package_name: str = None) -> Type['']:", "body": "return self._project.get_effect_class(effect_name, package_name=package_name)", "docstring": "Get an effect class by the class name\n\nArgs:\n effect_name (str): Name of the effect class\n\nKeyword Args:\n package_name (str): The package the effect belongs to. This is optional and only\n needed when effect class names are not unique.\n\nReturns:\n :py:class:`Effect` class", "id": "f14430:c0:m14"} {"signature": "def create_projection(self, fov: float = , near: float = , far: float = , aspect_ratio: float = None):", "body": "return matrix44.create_perspective_projection_matrix(fov,aspect_ratio or self.window.aspect_ratio,near,far,dtype='',)", "docstring": "Create a projection matrix with the following parameters.\nWhen ``aspect_ratio`` is not provided the configured aspect\nratio for the window will be used.\n\nArgs:\n fov (float): Field of view (float)\n near (float): Camera near value\n far (float): Camrea far value\n\nKeyword Args:\n aspect_ratio (float): Aspect ratio of the viewport\n\nReturns:\n The projection matrix as a float32 :py:class:`numpy.array`", "id": "f14430:c0:m15"} {"signature": "def create_transformation(self, rotation=None, translation=None):", "body": "mat = Noneif rotation is not None:mat = Matrix44.from_eulers(Vector3(rotation))if translation is not None:trans = matrix44.create_from_translation(Vector3(translation))if mat is None:mat = transelse:mat = matrix44.multiply(mat, trans)return mat", "docstring": "Creates a transformation matrix woth rotations and translation.\n\nArgs:\n rotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`\n translation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`\n\nReturns:\n A 4x4 matrix as a :py:class:`numpy.array`", "id": "f14430:c0:m16"} {"signature": "def create_normal_matrix(self, modelview):", "body": "normal_m = Matrix33.from_matrix44(modelview)normal_m = normal_m.inversenormal_m = normal_m.transpose()return normal_m", "docstring": "Creates a normal matrix from modelview matrix\n\nArgs:\n modelview: The modelview matrix\n\nReturns:\n A 3x3 Normal matrix as a :py:class:`numpy.array`", "id": "f14430:c0:m17"} {"signature": "def parse_package_string(path):", "body": "parts = path.split('')if parts[-][].isupper():return \"\".join(parts[:-]), parts[-]return path, \"\"", "docstring": "Parse the effect package string.\nCan contain the package python path or path to effect class in an effect package.\n\nExamples::\n\n # Path to effect pacakge\n examples.cubes\n\n # Path to effect class\n examples.cubes.Cubes\n\nArgs:\n path: python path to effect package. May also include effect class name.\n\nReturns:\n tuple: (package_path, effect_class)", "id": "f14432:m0"} {"signature": "def get_dirs(self) -> List[str]:", "body": "for package in self.packages:yield os.path.join(package.path, '')", "docstring": "Get all effect directories for registered effects.", "id": "f14432:c0:m1"} {"signature": "def get_effect_resources(self) -> List[Any]:", "body": "resources = []for package in self.packages:resources.extend(package.resources)return resources", "docstring": "Get all resources registed in effect packages.\nThese are typically located in ``resources.py``", "id": "f14432:c0:m2"} {"signature": "def polulate(self, package_list):", "body": "for package in package_list:self.add_package(package)", "docstring": "Polulate the registry with effect packages.\n\n:param module_list: List of effect module paths", "id": "f14432:c0:m3"} {"signature": "def add_package(self, name):", "body": "name, cls_name = parse_package_string(name)if name in self.package_map:returnpackage = EffectPackage(name)package.load()self.packages.append(package)self.package_map[package.name] = packageself.polulate(package.effect_packages)", "docstring": "Registers a single package\n\n:param name: (str) The effect package to add", "id": "f14432:c0:m4"} {"signature": "def get_package(self, name) -> '':", "body": "name, cls_name = parse_package_string(name)try:return self.package_map[name]except KeyError:raise EffectError(\"\".format(name))", "docstring": "Get a package by python path. Can also contain path to an effect.\n\nArgs:\n name (str): Path to effect package or effect\n\nReturns:\n The requested EffectPackage\n\nRaises:\n EffectError when no package is found", "id": "f14432:c0:m5"} {"signature": "def find_effect_class(self, path) -> Type[Effect]:", "body": "package_name, class_name = parse_package_string(path)if package_name:package = self.get_package(package_name)return package.find_effect_class(class_name, raise_for_error=True)for package in self.packages:effect_cls = package.find_effect_class(class_name)if effect_cls:return effect_clsraise EffectError(\"\".format(class_name))", "docstring": "Find an effect class by class name or full python path to class\n\nArgs:\n path (str): effect class name or full python path to effect class\n\nReturns:\n Effect class\n\nRaises:\n EffectError if no class is found", "id": "f14432:c0:m6"} {"signature": "def runnable_effects(self) -> List[Type[Effect]]:", "body": "return [cls for cls in self.effect_classes if cls.runnable]", "docstring": "Returns the runnable effect in the package", "id": "f14432:c1:m1"} {"signature": "def load_package(self):", "body": "try:self.package = importlib.import_module(self.name)except ModuleNotFoundError:raise ModuleNotFoundError(\"\".format(self.name))", "docstring": "FInd the effect package", "id": "f14432:c1:m6"} {"signature": "def load_effects_classes(self):", "body": "self.effect_classes = []for _, cls in inspect.getmembers(self.effect_module):if inspect.isclass(cls):if cls == Effect:continueif issubclass(cls, Effect):self.effect_classes.append(cls)self.effect_class_map[cls.__name__] = clscls._name = \"\".format(self.effect_module_name, cls.__name__)", "docstring": "Iterate the module attributes picking out effects", "id": "f14432:c1:m8"} {"signature": "def load_resource_module(self):", "body": "try:name = ''.format(self.name, '')self.dependencies_module = importlib.import_module(name)except ModuleNotFoundError as err:raise EffectError((\"\"\"\").format(self.name, err))try:self.resources = getattr(self.dependencies_module, '')except AttributeError:raise EffectError(\"\".format(name))if not isinstance(self.resources, list):raise EffectError(\"\".format(name, type(self.resources)))try:self.effect_packages = getattr(self.dependencies_module, '')except AttributeError:raise EffectError(\"\".format(name))if not isinstance(self.effect_packages, list):raise EffectError(\"\".format(name, type(self.effects)))", "docstring": "Fetch the resource list", "id": "f14432:c1:m9"} {"signature": "def _translate_string(self, data, length):", "body": "for index, char in enumerate(data):if index == length:breakyield self._meta.characters - - self._ct[char]", "docstring": "Translate string into character texture positions", "id": "f14433:c1:m2"} {"signature": "def _generate_character_map(self):", "body": "self._ct = [-] * index = for crange in self._meta.character_ranges:for cpos in range(crange[''], crange[''] + ):self._ct[cpos] = indexindex += ", "docstring": "Generate character translation map (latin1 pos to texture pos)", "id": "f14433:c1:m4"} {"signature": "def __init__(self, area, text_lines=None, texture_height=):", "body": "super().__init__(area, text_lines=text_lines)self._texture_height = texture_heightself._texture_width = self._quad = self._create_vao()self._quad_program = self.get_program('')self._fbo = Noneself._texture_width = int(round(self._meta.char_aspect_wh * self._texture_height * self.area[] / self.area[], ))self.aspect_ratio = self._texture_width / self._texture_heightself._fbo = self.ctx.framebuffer(self.ctx.texture((self._texture_width, self._texture_height), ))self._fbo_scope = self.ctx.scope(self._fbo)", "docstring": ":param area: (x, y) Text area size (number of characters)\n:param size: Text size\n:param text: Initial text", "id": "f14434:c0:m0"} {"signature": "def __init__(self, area, text_lines=None, aspect_ratio=):", "body": "super().__init__()self.area = areaself._text_lines = text_linesself._projection_bytes = Noneself._aspect_ratio = self.aspect_ratio = aspect_ratioself._vao = Noneself._config = self.get_data('')self._texture = self.get_texture('')self._program = self.get_program('')self._string_buffer = Noneself._init(FontMeta(self._config))self._string_buffer = self.ctx.buffer(reserve=self.area[] * * self.area[])self._string_buffer.clear(chunk=b'')pos = self.ctx.buffer(data=bytes([] * * ))self._vao = VAO(\"\", mode=moderngl.POINTS)self._vao.buffer(pos, '', '')self._vao.buffer(self._string_buffer, '', '', per_instance=True)self.text_lines = self._text_lines", "docstring": ":param area: (x, y) Text area size (number of characters)\n:param size: Text size\n:param text: Initial text lines", "id": "f14436:c0:m0"} {"signature": "def import_string(dotted_path):", "body": "try:module_path, class_name = dotted_path.rsplit('', )except ValueError as err:raise ImportError(\"\" % dotted_path) from errmodule = import_module(module_path)try:return getattr(module, class_name)except AttributeError as err:raise ImportError('' % (module_path,class_name)) from err", "docstring": "Import a dotted module path and return the attribute/class designated by the\nlast name in the path. Raise ImportError if the import failed.\n\n:param dotted_path: The path to attempt importing\n:return: The object", "id": "f14440:m0"} {"signature": "def load_shader(self, shader_type: str, path: str):", "body": "if path:resolved_path = self.find_program(path)if not resolved_path:raise ValueError(\"\".format(shader_type, path))print(\"\", path)with open(resolved_path, '') as fd:return fd.read()", "docstring": "Load a single shader", "id": "f14442:c0:m1"} {"signature": "def __init__(self, meta: SceneDescription):", "body": "super().__init__(meta)self.scenes = []self.nodes = []self.meshes = []self.materials = []self.images = []self.samplers = []self.textures = []self.path = Noneself.scene = None", "docstring": "Parse the json file and validate its contents.\nNo actual data loading will happen.\n\nSupported formats:\n- gltf json format with external resources\n- gltf embedded buffers\n- glb Binary format", "id": "f14444:c0:m0"} {"signature": "def load(self):", "body": "self.path = self.find_scene(self.meta.path)if not self.path:raise ValueError(\"\".format(self.meta.path))self.scene = Scene(self.path)if self.path.suffix == '':self.load_gltf()if self.path.suffix == '':self.load_glb()self.meta.check_version()self.meta.check_extensions(self.supported_extensions)self.load_images()self.load_samplers()self.load_textures()self.load_materials()self.load_meshes()self.load_nodes()self.scene.calc_scene_bbox()self.scene.prepare()return self.scene", "docstring": "Deferred loading of the scene\n\n:param scene: The scene object\n:param file: Resolved path if changed by finder", "id": "f14444:c0:m1"} {"signature": "def load_gltf(self):", "body": "with open(self.path) as fd:self.meta = GLTFMeta(self.path, json.load(fd))", "docstring": "Loads a gltf json file", "id": "f14444:c0:m2"} {"signature": "def load_glb(self):", "body": "with open(self.path, '') as fd:magic = fd.read()if magic != GLTF_MAGIC_HEADER:raise ValueError(\"\".format(self.path, magic, GLTF_MAGIC_HEADER))version = struct.unpack('', fd.read())[]if version != :raise ValueError(\"\".format(self.path, version))_ = struct.unpack('', fd.read())[] chunk_0_length = struct.unpack('', fd.read())[]chunk_0_type = fd.read()if chunk_0_type != b'':raise ValueError(\"\".format(chunk_0_type, self.path))json_meta = fd.read(chunk_0_length).decode()chunk_1_length = struct.unpack('', fd.read())[]chunk_1_type = fd.read()if chunk_1_type != b'':raise ValueError(\"\".format(chunk_1_type, self.path))self.meta = GLTFMeta(self.path, json.loads(json_meta), binary_buffer=fd.read(chunk_1_length))", "docstring": "Loads a binary gltf file", "id": "f14444:c0:m3"} {"signature": "def __init__(self, path, data, binary_buffer=None):", "body": "self.data = dataself.path = pathself.asset = GLTFAsset(data[''])self.materials = [GLTFMaterial(m) for m in data['']] if data.get('') else []self.images = [GLTFImage(i) for i in data['']] if data.get('') else []self.samplers = [GLTFSampler(s) for s in data['']] if data.get('') else []self.textures = [GLTFTexture(t) for t in data['']] if data.get('') else []self.scenes = [GLTFScene(s) for s in data['']] if data.get('') else []self.nodes = [GLTFNode(n) for n in data['']] if data.get('') else []self.meshes = [GLTFMesh(m) for m in data['']] if data.get('') else []self.cameras = [GLTFCamera(c) for c in data['']] if data.get('') else []self.buffer_views = [GLTFBufferView(i, v) for i, v in enumerate(data[''])]if data.get('') else []self.buffers = [GLTFBuffer(i, b, self.path.parent) for i, b in enumerate(data[''])]if data.get('') else []self.accessors = [GLTFAccessor(i, a) for i, a in enumerate(data[''])]if data.get('') else []if binary_buffer:self.buffers[].data = binary_bufferself._link_data()self.buffers_exist()self.images_exist()", "docstring": ":param file: GLTF file name loaded\n:param data: Metadata (json loaded)\n:param binary_buffer: Binary buffer when loading glb files", "id": "f14444:c1:m0"} {"signature": "def _link_data(self):", "body": "for acc in self.accessors:acc.bufferView = self.buffer_views[acc.bufferViewId]for buffer_view in self.buffer_views:buffer_view.buffer = self.buffers[buffer_view.bufferId]for mesh in self.meshes:for primitive in mesh.primitives:if getattr(primitive, \"\", None) is not None:primitive.indices = self.accessors[primitive.indices]for name, value in primitive.attributes.items():primitive.attributes[name] = self.accessors[value]for image in self.images:if image.bufferViewId is not None:image.bufferView = self.buffer_views[image.bufferViewId]", "docstring": "Add references", "id": "f14444:c1:m1"} {"signature": "def check_extensions(self, supported):", "body": "if self.data.get(''):for ext in self.data.get(''):if ext not in supported:raise ValueError(\"\".format(ext))if self.data.get(''):for ext in self.data.get(''):if ext not in supported:raise ValueError(\"\".format(ext))", "docstring": "\"extensionsRequired\": [\"KHR_draco_mesh_compression\"],\n\"extensionsUsed\": [\"KHR_draco_mesh_compression\"]", "id": "f14444:c1:m4"} {"signature": "def buffers_exist(self):", "body": "for buff in self.buffers:if not buff.is_separate_file:continuepath = self.path.parent / buff.uriif not os.path.exists(path):raise FileNotFoundError(\"\".format(path, self.path))", "docstring": "Checks if the bin files referenced exist", "id": "f14444:c1:m5"} {"signature": "def images_exist(self):", "body": "pass", "docstring": "checks if the images references in textures exist", "id": "f14444:c1:m6"} {"signature": "def load_indices(self, primitive):", "body": "if getattr(primitive, \"\") is None:return None, None_, component_type, buffer = primitive.indices.read()return component_type, buffer", "docstring": "Loads the index buffer / polygon list for a primitive", "id": "f14444:c3:m2"} {"signature": "def prepare_attrib_mapping(self, primitive):", "body": "buffer_info = []for name, accessor in primitive.attributes.items():info = VBOInfo(*accessor.info())info.attributes.append((name, info.components))if buffer_info and buffer_info[-].buffer_view == info.buffer_view:if buffer_info[-].interleaves(info):buffer_info[-].merge(info)continuebuffer_info.append(info)return buffer_info", "docstring": "Pre-parse buffer mappings for each VBO to detect interleaved data for a primitive", "id": "f14444:c3:m3"} {"signature": "def get_bbox(self, primitive):", "body": "accessor = primitive.attributes.get('')return accessor.min, accessor.max", "docstring": "Get the bounding box for the mesh", "id": "f14444:c3:m4"} {"signature": "def interleaves(self, info):", "body": "return info.byte_offset == self.component_type.size * self.components", "docstring": "Does the buffer interleave with this one?", "id": "f14444:c4:m1"} {"signature": "def create(self):", "body": "dtype = NP_COMPONENT_DTYPE[self.component_type.value]data = numpy.frombuffer(self.buffer.read(byte_length=self.byte_length, byte_offset=self.byte_offset),count=self.count * self.components,dtype=dtype,)return dtype, data", "docstring": "Create the VBO", "id": "f14444:c4:m3"} {"signature": "def read(self):", "body": "dtype = NP_COMPONENT_DTYPE[self.componentType.value]return ACCESSOR_TYPE[self.type], self.componentType, self.bufferView.read(byte_offset=self.byteOffset,dtype=dtype,count=self.count * ACCESSOR_TYPE[self.type],)", "docstring": "Reads buffer data\n:return: component count, component type, data", "id": "f14444:c5:m1"} {"signature": "def info(self):", "body": "buffer, byte_length, byte_offset = self.bufferView.info(byte_offset=self.byteOffset)return buffer, self.bufferView,byte_length, byte_offset,self.componentType, ACCESSOR_TYPE[self.type], self.count", "docstring": "Get underlying buffer info for this accessor\n:return: buffer, byte_length, byte_offset, component_type, count", "id": "f14444:c5:m2"} {"signature": "def info(self, byte_offset=):", "body": "return self.buffer, self.byteLength, byte_offset + self.byteOffset", "docstring": "Get the underlying buffer info\n:param byte_offset: byte offset from accessor\n:return: buffer, byte_length, byte_offset", "id": "f14444:c6:m3"} {"signature": "@propertydef has_data_uri(self):", "body": "if not self.uri:return Falsereturn self.uri.startswith(\"\")", "docstring": "Is data embedded in json?", "id": "f14444:c7:m1"} {"signature": "@propertydef is_separate_file(self):", "body": "return self.uri is not None and not self.has_data_uri", "docstring": "Buffer represents an independent bin file?", "id": "f14444:c7:m2"} {"signature": "@propertydef is_resource_node(self):", "body": "return self.camera is not None or self.mesh is not None", "docstring": "Is this just a reference node to a resource?", "id": "f14444:c9:m2"} {"signature": "def load(self) -> Scene:", "body": "raise NotImplementedError()", "docstring": "Load the scene", "id": "f14445:c0:m1"} {"signature": "@classmethoddef supports_file(cls, meta):", "body": "path = Path(meta.path)for ext in cls.file_extensions:if path.suffixes[:len(ext)] == ext:return Truereturn False", "docstring": "Check if the loader has a supported file extension", "id": "f14445:c0:m2"} {"signature": "def translate_buffer_format(vertex_format):", "body": "buffer_format = []attributes = []mesh_attributes = []if \"\" in vertex_format:buffer_format.append(\"\")attributes.append(\"\")mesh_attributes.append((\"\", \"\", ))if \"\" in vertex_format:buffer_format.append(\"\")attributes.append(\"\")mesh_attributes.append((\"\", \"\", ))if \"\" in vertex_format:buffer_format.append(\"\")attributes.append(\"\")mesh_attributes.append((\"\", \"\", ))buffer_format.append(\"\")attributes.append(\"\")mesh_attributes.append((\"\", \"\", ))return \"\".join(buffer_format), attributes, mesh_attributes", "docstring": "Translate the buffer format", "id": "f14446:m0"} {"signature": "def load(self):", "body": "path = self.find_scene(self.meta.path)if not path:raise ValueError(\"\".format(self.meta.path))if path.suffix == '':path = path.parent / path.stemdata = pywavefront.Wavefront(str(path), create_materials=True, cache=True)scene = Scene(self.meta.resolved_path)texture_cache = {}for _, mat in data.materials.items():mesh = Mesh(mat.name)if mat.vertices:buffer_format, attributes, mesh_attributes = translate_buffer_format(mat.vertex_format)vbo = numpy.array(mat.vertices, dtype='')vao = VAO(mat.name, mode=moderngl.TRIANGLES)vao.buffer(vbo, buffer_format, attributes)mesh.vao = vaofor attrs in mesh_attributes:mesh.add_attribute(*attrs)elif hasattr(mat, ''):mesh = Mesh(mat.name)mesh.vao = mat.vaofor attrs in mat.mesh_attributes:mesh.add_attribute(*attrs)else:continuescene.meshes.append(mesh)mesh.material = Material(mat.name)scene.materials.append(mesh.material)mesh.material.color = mat.diffuseif mat.texture:texture = texture_cache.get(mat.texture.path)if not texture:print(\"\", mat.texture.path)texture = textures.load(TextureDescription(label=mat.texture.path,path=mat.texture.path,mipmap=True,))texture_cache[mat.texture.path] = texturemesh.material.mat_texture = MaterialTexture(texture=texture,sampler=None,)node = Node(mesh=mesh)scene.root_nodes.append(node)scene.prepare()return scene", "docstring": "Deferred loading", "id": "f14446:c1:m1"} {"signature": "def __init__(self, meta):", "body": "self.meta = meta", "docstring": ":param meta: ResourceDescription instance", "id": "f14447:c0:m0"} {"signature": "def load(self) -> Any:", "body": "raise NotImplementedError()", "docstring": "Load a resource\n\n:returns: The newly loaded resource", "id": "f14447:c0:m1"} {"signature": "def _find_last_of(self, path, finders):", "body": "found_path = Nonefor finder in finders:result = finder.find(path)if result:found_path = resultreturn found_path", "docstring": "Find the last occurance of the file in finders", "id": "f14447:c0:m6"} {"signature": "@propertydef ctx(self):", "body": "return context.ctx()", "docstring": "ModernGL context", "id": "f14447:c0:m7"} {"signature": "def load(self):", "body": "self._open_image()components, data = image_data(self.image)texture = self.ctx.texture(self.image.size,components,data,)texture.extra = {'': self.meta}if self.meta.mipmap:texture.build_mipmaps()self._close_image()return texture", "docstring": "Load a 2d texture", "id": "f14448:c0:m0"} {"signature": "def image_data(image):", "body": "data = image.tobytes()components = len(data) // (image.size[] * image.size[])return components, data", "docstring": "Get components and bytes for an image", "id": "f14449:m0"} {"signature": "def load(self):", "body": "self._open_image()width, height, depth = self.image.size[], self.image.size[] // self.layers, self.layerscomponents, data = image_data(self.image)texture = self.ctx.texture_array((width, height, depth),components,data,)texture.extra = {'': self.meta}if self.meta.mipmap:texture.build_mipmaps()self._close_image()return texture", "docstring": "Load a texture array", "id": "f14450:c0:m1"} {"signature": "def load(self):", "body": "self.meta.resolved_path = self.find_data(self.meta.path)if not self.meta.resolved_path:raise ImproperlyConfigured(\"\".format(self.meta.path))print(\"\", self.meta.path)with open(self.meta.resolved_path, '') as fd:return fd.read()", "docstring": "Load a file in text mode", "id": "f14451:c0:m0"} {"signature": "def load(self):", "body": "self.meta.resolved_path = self.find_data(self.meta.path)if not self.meta.resolved_path:raise ImproperlyConfigured(\"\".format(self.meta.path))print(\"\", self.meta.path)with open(self.meta.resolved_path, '') as fd:return json.loads(fd.read())", "docstring": "Load a file as json", "id": "f14452:c0:m0"} {"signature": "def load(self):", "body": "self.meta.resolved_path = self.find_data(self.meta.path)if not self.meta.resolved_path:raise ImproperlyConfigured(\"\".format(self.meta.path))print(\"\", self.meta.path)with open(self.meta.resolved_path, '') as fd:return fd.read()", "docstring": "Load a file in binary mode", "id": "f14453:c0:m0"} {"signature": "def setup(**kwargs):", "body": "settings.setup()settings.update(**kwargs)", "docstring": "Configure", "id": "f14454:m0"} {"signature": "def init(*args, **kwargs):", "body": "view.init(*args, **kwargs)", "docstring": "Initialize and load", "id": "f14454:m1"} {"signature": "def run(*args, **kwargs):", "body": "view.run(*args, **kwargs)", "docstring": "Run", "id": "f14454:m2"} {"signature": "def draw(self, time, frametime, target):", "body": "raise NotImplementedError()", "docstring": "Called by the system every frame.\nThis method should be overridden.\n\n:param time: The current time in seconds\n:param frametime: The time one frame should take in seconds\n:param target: The target FBO", "id": "f14456:c0:m1"} {"signature": "def key_event(self, key, action, mods):", "body": "pass", "docstring": "Forwarded key events from the system.\n\n:param key: The key that was pressed or released.\n:param action: ACTION_PRESS, ACTION_RELEASE\n:param mods: Bit field describing which modifier keys were held down.", "id": "f14456:c0:m2"} {"signature": "def draw(self, time, frametime, target):", "body": "for effect in self.effects:value = effect.rocket_timeline_track.time_value(time)if value > :effect.draw(time, frametime, target)", "docstring": "Fetch track value for every runnable effect.\nIf the value is > 0.5 we draw it.", "id": "f14457:c0:m1"} {"signature": "def __init__(self):", "body": "pass", "docstring": "should take cmd config", "id": "f14459:c1:m0"} {"signature": "def add_arguments(self, parser):", "body": "pass", "docstring": "This method is for adding arguments to a command.\nWhen extending this class we define the arguments\nby adding it to the parser passed in.\n\n:param parser: The parser to add arguments to (standard argparse)", "id": "f14459:c1:m1"} {"signature": "def handle(self, *args, **options):", "body": "raise NotImplementedError()", "docstring": "The actual run logic for the command.\n\n:param args: arguments from the argparser\n:param options: keyword arguments from the argparser", "id": "f14459:c1:m2"} {"signature": "def run_from_argv(self, argv):", "body": "parser = self.create_parser(argv[], argv[])options = parser.parse_args(argv[:])cmd_options = vars(options)args = cmd_options.pop('', ())self.handle(*args, **cmd_options)", "docstring": "Called by the system when executing the command from the command line.\nThis should not be overridden.\n\n:param argv: Arguments from command line", "id": "f14459:c1:m3"} {"signature": "def print_help(self, prog_name, subcommand):", "body": "parser = self.create_parser(prog_name, subcommand)parser.print_help()", "docstring": "Prints the help text generated by the argument parser defined for this command.\nThis method should not be overridden.\n\n:param prog_name: name of the program that started the command.\n:param subcommand: The subcommand name", "id": "f14459:c1:m4"} {"signature": "def create_parser(self, prog_name, subcommand):", "body": "parser = argparse.ArgumentParser(prog_name, subcommand)self.add_arguments(parser)return parser", "docstring": "Create argument parser and deal with ``add_arguments``.\nThis method should not be overriden.\n\n:param prog_name: Name of the command (argv[0])\n:return: ArgumentParser", "id": "f14459:c1:m5"} {"signature": "def validate_name(self, name):", "body": "if not name:raise ValueError(\"\")if not name.isidentifier():raise ValueError(\"\".format(name))", "docstring": "Can the name be used as a python module or package?\nRaises ``ValueError`` if the name is invalid.\n\n:param name: the name to check", "id": "f14459:c2:m0"} {"signature": "def try_import(self, name):", "body": "try:import_module(name)except ImportError:passelse:raise ImportError(\"\".format(name))", "docstring": "Attempt to import the name.\nRaises ``ImportError`` if the name cannot be imported.\n\n:param name: the name to import", "id": "f14459:c2:m1"} {"signature": "def available_templates(value):", "body": "templates = list_templates()if value not in templates:raise ArgumentTypeError(\"\".format(value, \"\".join(templates)))return value", "docstring": "Scan for available templates in effect_templates", "id": "f14462:m1"} {"signature": "def root_path():", "body": "module_dir = os.path.dirname(globals()[''])return os.path.dirname(os.path.dirname(module_dir))", "docstring": "Get the absolute path to the root of the demosys package", "id": "f14462:m3"} {"signature": "def template_dir():", "body": "return os.path.join(root_path(), '')", "docstring": "Get the absolute path to the template directory", "id": "f14462:m4"} {"signature": "def initial_sanity_check(self):", "body": "self.try_import(self.project_name)self.validate_name(self.project_name)if os.path.exists(self.project_name):print(\"\".format(self.project_name))return Falseif os.path.exists(''):print(\"\")return Falsereturn True", "docstring": "Checks if we can create the project", "id": "f14464:c0:m2"} {"signature": "def create_entrypoint(self):", "body": "with open(os.path.join(self.template_dir, ''), '') as fd:data = fd.read().format(project_name=self.project_name)with open('', '') as fd:fd.write(data)os.chmod('', )", "docstring": "Write manage.py in the current directory", "id": "f14464:c0:m4"} {"signature": "def get_template_dir(self):", "body": "directory = os.path.dirname(os.path.abspath(__file__))directory = os.path.dirname(os.path.dirname(directory))directory = os.path.join(directory, '')return directory", "docstring": "Returns the absolute path to template directory", "id": "f14464:c0:m6"} {"signature": "def find_commands(command_dir: str) -> List[str]:", "body": "if not command_dir:return []return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir])if not is_pkg and not name.startswith('')]", "docstring": "Get all command names in the a folder\n\n:return: List of commands names", "id": "f14465:m0"} {"signature": "def execute_from_command_line(argv=None):", "body": "if not argv:argv = sys.argvsystem_commands = find_commands(system_command_dir())project_commands = find_commands(project_command_dir())project_package = project_package_name()command = argv[] if len(argv) > else Noneif command in system_commands:cmd = load_command_class('', command)cmd.run_from_argv(argv)elif command in project_commands:cmd = load_command_class(project_package, command)cmd.run_from_argv(argv)else:print(\"\")for name in system_commands:print(\"\".format(name))for name in project_commands:print(\"\".format(name))", "docstring": "Currently the only entrypoint (manage.py, demosys-admin)", "id": "f14465:m6"} {"signature": "def create_effect_classes(self):", "body": "effects.polulate(self.effect_packages)", "docstring": "Registers effect packages defined in ``effect_packages``.", "id": "f14467:c0:m1"} {"signature": "def create_external_resources(self) -> List[ResourceDescription]:", "body": "return effects.get_effect_resources()", "docstring": "Fetches all resource descriptions defined in effect packages.\n\nReturns:\n List of resource descriptions to load", "id": "f14467:c0:m2"} {"signature": "def create_resources(self) -> List[ResourceDescription]:", "body": "return self.resources", "docstring": "Create resources for the project.\nSimply returns the ``resources`` list and can be implemented to\nmodify what a resource list is programmatically.\n\nReturns:\n List of resource descriptions to load", "id": "f14467:c0:m3"} {"signature": "def create_effect_instances(self):", "body": "raise NotImplementedError()", "docstring": "Create instances of effects.\nMust be implemented or ``NotImplementedError`` is raised.", "id": "f14467:c0:m4"} {"signature": "def create_effect(self, label: str, name: str, *args, **kwargs) -> Effect:", "body": "effect_cls = effects.find_effect_class(name)effect = effect_cls(*args, **kwargs)effect._label = labelif label in self._effects:raise ValueError(\"\".format(label))self._effects[label] = effectreturn effect", "docstring": "Create an effect instance adding it to the internal effects dictionary using the label as key.\n\nArgs:\n label (str): The unique label for the effect instance\n name (str): Name or full python path to the effect class we want to instantiate\n args: Positional arguments to the effect initializer\n kwargs: Keyword arguments to the effect initializer\n\nReturns:\n The newly created Effect instance", "id": "f14467:c0:m5"} {"signature": "def post_load(self):", "body": "for _, effect in self._effects.items():effect.post_load()", "docstring": "Called after resources are loaded before effects starts rendering.\nIt simply iterates each effect instance calling their ``post_load`` methods.", "id": "f14467:c0:m6"} {"signature": "def load(self):", "body": "self.create_effect_classes()self._add_resource_descriptions_to_pools(self.create_external_resources())self._add_resource_descriptions_to_pools(self.create_resources())for meta, resource in resources.textures.load_pool():self._textures[meta.label] = resourcefor meta, resource in resources.programs.load_pool():self._programs[meta.label] = resourcefor meta, resource in resources.scenes.load_pool():self._scenes[meta.label] = resourcefor meta, resource in resources.data.load_pool():self._data[meta.label] = resourceself.create_effect_instances()self.post_load()", "docstring": "Loads this project instance", "id": "f14467:c0:m7"} {"signature": "def _add_resource_descriptions_to_pools(self, meta_list):", "body": "if not meta_list:returnfor meta in meta_list:getattr(resources, meta.resource_type).add(meta)", "docstring": "Takes a list of resource descriptions adding them\nto the resource pool they belong to scheduling them for loading.", "id": "f14467:c0:m8"} {"signature": "def reload_programs(self):", "body": "print(\"\")for name, program in self._programs.items():if getattr(program, '', None):print(\"\".format(program.meta.label))program.program = resources.programs.load(program.meta)", "docstring": "Reload all shader programs with the reloadable flag set", "id": "f14467:c0:m9"} {"signature": "def get_effect(self, label: str) -> Effect:", "body": "return self._get_resource(label, self._effects, \"\")", "docstring": "Get an effect instance by label\n\nArgs:\n label (str): The label for the effect instance\n\nReturns:\n Effect class instance", "id": "f14467:c0:m10"} {"signature": "def get_effect_class(self, class_name, package_name=None) -> Type[Effect]:", "body": "if package_name:return effects.find_effect_class(\"\".format(package_name, class_name))return effects.find_effect_class(class_name)", "docstring": "Get an effect class from the effect registry.\n\nArgs:\n class_name (str): The exact class name of the effect\n\nKeyword Args:\n package_name (str): The python path to the effect package the effect name is located.\n This is optional and can be used to avoid issue with class name collisions.\n\nReturns:\n Effect class", "id": "f14467:c0:m11"} {"signature": "def get_scene(self, label: str) -> Scene:", "body": "return self._get_resource(label, self._scenes, \"\")", "docstring": "Gets a scene by label\n\nArgs:\n label (str): The label for the scene to fetch\n\nReturns:\n Scene instance", "id": "f14467:c0:m12"} {"signature": "def get_texture(self, label: str) -> Union[moderngl.Texture, moderngl.TextureArray,moderngl.Texture3D, moderngl.TextureCube]:", "body": "return self._get_resource(label, self._textures, \"\")", "docstring": "Get a texture by label\n\nArgs:\n label (str): The label for the texture to fetch\n\nReturns:\n Texture instance", "id": "f14467:c0:m14"} {"signature": "def get_data(self, label: str) -> Any:", "body": "return self._get_resource(label, self._data, \"\")", "docstring": "Get a data resource by label\n\nArgs:\n label (str): The labvel for the data resource to fetch\n\nReturns:\n The requeted data object", "id": "f14467:c0:m15"} {"signature": "def _get_resource(self, label: str, source: dict, resource_type: str):", "body": "try:return source[label]except KeyError:raise ValueError(\"\".format(resource_type, label, list(source.keys())))", "docstring": "Generic resoure fetcher handling errors.\n\nArgs:\n label (str): The label to fetch\n source (dict): The dictionary to look up the label\n resource_type str: The display name of the resource type (used in errors)", "id": "f14467:c0:m16"} {"signature": "def get_runnable_effects(self) -> List[Effect]:", "body": "return [effect for name, effect in self._effects.items() if effect.runnable]", "docstring": "Returns all runnable effects in the project.\n\n:return: List of all runnable effects", "id": "f14467:c0:m17"} {"signature": "@propertydef ctx(self) -> moderngl.Context:", "body": "return context.ctx()", "docstring": "The MondernGL context", "id": "f14467:c0:m18"} {"signature": "def quad_fs() -> VAO:", "body": "return quad_2d(, , , )", "docstring": "Creates a screen aligned quad using two triangles with normals and texture coordiantes.\n\nReturns:\n A :py:class:`demosys.opengl.vao.VAO` instance.", "id": "f14469:m0"} {"signature": "def quad_2d(width, height, xpos=, ypos=) -> VAO:", "body": "pos = numpy.array([xpos - width / , ypos + height / , ,xpos - width / , ypos - height / , ,xpos + width / , ypos - height / , ,xpos - width / , ypos + height / , ,xpos + width / , ypos - height / , ,xpos + width / , ypos + height / , ,], dtype=numpy.float32)normals = numpy.array([, , ,, , ,, , ,, , ,, , ,, , ,], dtype=numpy.float32)uvs = numpy.array([, ,, ,, ,, ,, ,, ,], dtype=numpy.float32)vao = VAO(\"\", mode=moderngl.TRIANGLES)vao.buffer(pos, '', [\"\"])vao.buffer(normals, '', [\"\"])vao.buffer(uvs, '', [\"\"])return vao", "docstring": "Creates a 2D quad VAO using 2 triangles with normals and texture coordinates.\n\nArgs:\n width (float): Width of the quad\n height (float): Height of the quad\n\nKeyword Args:\n xpos (float): Center position x\n ypos (float): Center position y\n\nReturns:\n A :py:class:`demosys.opengl.vao.VAO` instance.", "id": "f14469:m1"} {"signature": "def plane_xz(size=(, ), resolution=(, )) -> VAO:", "body": "sx, sz = sizerx, rz = resolutiondx, dz = sx / rx, sz / rz ox, oz = -sx / , -sz / def gen_pos():for z in range(rz):for x in range(rx):yield ox + x * dxyield yield oz + z * dzdef gen_uv():for z in range(rz):for x in range(rx):yield x / (rx - )yield - z / (rz - )def gen_normal():for _ in range(rx * rz):yield yield yield def gen_index():for z in range(rz - ):for x in range(rx - ):yield z * rz + x + yield z * rz + xyield z * rz + x + rxyield z * rz + x + yield z * rz + x + rxyield z * rz + x + rx + pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32)uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32)normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32)index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32)vao = VAO(\"\", mode=moderngl.TRIANGLES)vao.buffer(pos_data, '', [''])vao.buffer(uv_data, '', [''])vao.buffer(normal_data, '', [''])vao.index_buffer(index_data, index_element_size=)return vao", "docstring": "Generates a plane on the xz axis of a specific size and resolution.\nNormals and texture coordinates are also included.\n\nArgs:\n size: (x, y) tuple\n resolution: (x, y) tuple\n\nReturns:\n A :py:class:`demosys.opengl.vao.VAO` instance", "id": "f14470:m0"} {"signature": "def bbox(width=, height=, depth=):", "body": "width, height, depth = width / , height / , depth / pos = numpy.array([width, -height, depth,width, height, depth,-width, -height, depth,width, height, depth,-width, height, depth,-width, -height, depth,width, -height, -depth,width, height, -depth,width, -height, depth,width, height, -depth,width, height, depth,width, -height, depth,width, -height, -depth,width, -height, depth,-width, -height, depth,width, -height, -depth,-width, -height, depth,-width, -height, -depth,-width, -height, depth,-width, height, depth,-width, height, -depth,-width, -height, depth,-width, height, -depth,-width, -height, -depth,width, height, -depth,width, -height, -depth,-width, -height, -depth,width, height, -depth,-width, -height, -depth,-width, height, -depth,width, height, -depth,-width, height, -depth,width, height, depth,-width, height, -depth,-width, height, depth,width, height, depth,], dtype=numpy.float32)vao = VAO(\"\", mode=moderngl.LINE_STRIP)vao.buffer(pos, '', [\"\"])return vao", "docstring": "Generates a bounding box with (0.0, 0.0, 0.0) as the center.\nThis is simply a box with ``LINE_STRIP`` as draw mode.\n\nKeyword Args:\n width (float): Width of the box\n height (float): Height of the box\n depth (float): Depth of the box\n\nReturns:\n A :py:class:`demosys.opengl.vao.VAO` instance", "id": "f14471:m0"} {"signature": "def points_random_3d(count, range_x=(-, ), range_y=(-, ), range_z=(-, ), seed=None) -> VAO:", "body": "random.seed(seed)def gen():for _ in range(count):yield random.uniform(*range_x)yield random.uniform(*range_y)yield random.uniform(*range_z)data = numpy.fromiter(gen(), count=count * , dtype=numpy.float32)vao = VAO(\"\", mode=moderngl.POINTS)vao.buffer(data, '', [''])return vao", "docstring": "Generates random positions inside a confied box.\n\nArgs:\n count (int): Number of points to generate\n\nKeyword Args:\n range_x (tuple): min-max range for x axis: Example (-10.0. 10.0)\n range_y (tuple): min-max range for y axis: Example (-10.0. 10.0)\n range_z (tuple): min-max range for z axis: Example (-10.0. 10.0)\n seed (int): The random seed\n\nReturns:\n A :py:class:`demosys.opengl.vao.VAO` instance", "id": "f14472:m0"} {"signature": "def cube(width, height, depth, center=(, , ), normals=True, uvs=True) -> VAO:", "body": "width, height, depth = width / , height / , depth / pos = numpy.array([center[] + width, center[] - height, center[] + depth,center[] + width, center[] + height, center[] + depth,center[] - width, center[] - height, center[] + depth,center[] + width, center[] + height, center[] + depth,center[] - width, center[] + height, center[] + depth,center[] - width, center[] - height, center[] + depth,center[] + width, center[] - height, center[] - depth,center[] + width, center[] + height, center[] - depth,center[] + width, center[] - height, center[] + depth,center[] + width, center[] + height, center[] - depth,center[] + width, center[] + height, center[] + depth,center[] + width, center[] - height, center[] + depth,center[] + width, center[] - height, center[] - depth,center[] + width, center[] - height, center[] + depth,center[] - width, center[] - height, center[] + depth,center[] + width, center[] - height, center[] - depth,center[] - width, center[] - height, center[] + depth,center[] - width, center[] - height, center[] - depth,center[] - width, center[] - height, center[] + depth,center[] - width, center[] + height, center[] + depth,center[] - width, center[] + height, center[] - depth,center[] - width, center[] - height, center[] + depth,center[] - width, center[] + height, center[] - depth,center[] - width, center[] - height, center[] - depth,center[] + width, center[] + height, center[] - depth,center[] + width, center[] - height, center[] - depth,center[] - width, center[] - height, center[] - depth,center[] + width, center[] + height, center[] - depth,center[] - width, center[] - height, center[] - depth,center[] - width, center[] + height, center[] - depth,center[] + width, center[] + height, center[] - depth,center[] - width, center[] + height, center[] - depth,center[] + width, center[] + height, center[] + depth,center[] - width, center[] + height, center[] - depth,center[] - width, center[] + height, center[] + depth,center[] + width, center[] + height, center[] + depth,], dtype=numpy.float32)if normals:normal_data = numpy.array([-, , ,-, , ,-, , ,, , ,, , ,, , ,, , ,, , ,, , ,, , ,, , ,, , ,, -, ,, -, ,, -, ,, -, ,, -, ,, -, ,-, -, ,-, -, ,-, -, ,-, -, ,-, -, ,-, -, ,, , -,, , -,, , -,, , -,, , -,, , -,, , ,, , ,, , ,, , ,, , ,, , ,], dtype=numpy.float32)if uvs:uvs_data = numpy.array([, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ,, ], dtype=numpy.float32)vao = VAO(\"\")vao.buffer(pos, '', [''])if normals:vao.buffer(normal_data, '', [''])if uvs:vao.buffer(uvs_data, '', [''])return vao", "docstring": "Creates a cube VAO with normals and texture coordinates\n\nArgs:\n width (float): Width of the cube\n height (float): Height of the cube\n depth (float): Depth of the cube\n\nKeyword Args:\n center: center of the cube as a 3-component tuple\n normals: (bool) Include normals\n uvs: (bool) include uv coordinates\n\nReturns:\n A :py:class:`demosys.opengl.vao.VAO` instance", "id": "f14474:m0"} {"signature": "def sphere(radius=, sectors=, rings=) -> VAO:", "body": "R = / (rings - )S = / (sectors - )vertices = [] * (rings * sectors * )normals = [] * (rings * sectors * )uvs = [] * (rings * sectors * )v, n, t = , , for r in range(rings):for s in range(sectors):y = math.sin(-math.pi / + math.pi * r * R)x = math.cos( * math.pi * s * S) * math.sin(math.pi * r * R)z = math.sin( * math.pi * s * S) * math.sin(math.pi * r * R)uvs[t] = s * Suvs[t + ] = r * Rvertices[v] = x * radiusvertices[v + ] = y * radiusvertices[v + ] = z * radiusnormals[n] = xnormals[n + ] = ynormals[n + ] = zt += v += n += indices = [] * rings * sectors * i = for r in range(rings - ):for s in range(sectors - ):indices[i] = r * sectors + sindices[i + ] = (r + ) * sectors + (s + )indices[i + ] = r * sectors + (s + )indices[i + ] = r * sectors + sindices[i + ] = (r + ) * sectors + sindices[i + ] = (r + ) * sectors + (s + )i += vbo_vertices = numpy.array(vertices, dtype=numpy.float32)vbo_normals = numpy.array(normals, dtype=numpy.float32)vbo_uvs = numpy.array(uvs, dtype=numpy.float32)vbo_elements = numpy.array(indices, dtype=numpy.uint32)vao = VAO(\"\", mode=mlg.TRIANGLES)vao.buffer(vbo_vertices, '', [''])vao.buffer(vbo_normals, '', [''])vao.buffer(vbo_uvs, '', [''])vao.index_buffer(vbo_elements, index_element_size=)return vao", "docstring": "Creates a sphere.\n\nKeyword Args:\n radius (float): Radius or the sphere\n rings (int): number or horizontal rings\n sectors (int): number of vertical segments\n\nReturns:\n A :py:class:`demosys.opengl.vao.VAO` instance", "id": "f14475:m0"} {"signature": "def __init__(self):", "body": "super().__init__()if not glfw.init():raise ValueError(\"\")self.check_glfw_version()glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, self.gl_version.major)glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, self.gl_version.minor)glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, True)glfw.window_hint(glfw.RESIZABLE, self.resizable)glfw.window_hint(glfw.DOUBLEBUFFER, True)glfw.window_hint(glfw.DEPTH_BITS, )glfw.window_hint(glfw.SAMPLES, self.samples)monitor = Noneif self.fullscreen:monitor = glfw.get_primary_monitor()mode = glfw.get_video_mode(monitor)self.width, self.height = mode.size.width, mode.size.heightprint(\"\", mode)print(\"\", self.width, self.height)self.window = glfw.create_window(self.width, self.height, self.title, monitor, None)if not self.window:glfw.terminate()raise ValueError(\"\")if not self.cursor:glfw.set_input_mode(self.window, glfw.CURSOR, glfw.CURSOR_DISABLED)self.buffer_width, self.buffer_height = glfw.get_framebuffer_size(self.window)print(\"\", self.buffer_width, self.buffer_height)print(\"\", glfw.get_window_size(self.window))glfw.make_context_current(self.window)if self.vsync:glfw.swap_interval()glfw.set_key_callback(self.window, self.key_event_callback)glfw.set_cursor_pos_callback(self.window, self.mouse_event_callback)glfw.set_window_size_callback(self.window, self.window_resize_callback)self.ctx = moderngl.create_context(require=self.gl_version.code)context.WINDOW = selfself.fbo = self.ctx.screenself.set_default_viewport()", "docstring": "Initializes glfw, sets up key and mouse events and\ncreates a ``moderngl.Context`` using the context glfw createad.\n\nUsing the glfw window requires glfw binaries and pyGLFW.", "id": "f14477:c0:m0"} {"signature": "def use(self):", "body": "self.fbo.use()", "docstring": "Bind the window framebuffer making it the current render target", "id": "f14477:c0:m1"} {"signature": "def should_close(self):", "body": "return glfw.window_should_close(self.window)", "docstring": "Ask glfw is the window should be closed", "id": "f14477:c0:m2"} {"signature": "def close(self):", "body": "glfw.set_window_should_close(self.window, True)", "docstring": "Set the window closing state in glfw", "id": "f14477:c0:m3"} {"signature": "def swap_buffers(self):", "body": "self.frames += glfw.swap_buffers(self.window)self.poll_events()", "docstring": "Swaps buffers, incement the framecounter and pull events.", "id": "f14477:c0:m4"} {"signature": "def resize(self, width, height):", "body": "self.width = widthself.height = heightself.buffer_width, self.buffer_height = glfw.get_framebuffer_size(self.window)self.set_default_viewport()", "docstring": "Sets the new size and buffer size internally", "id": "f14477:c0:m5"} {"signature": "def terminate(self):", "body": "glfw.terminate()", "docstring": "Terminates the glfw library", "id": "f14477:c0:m6"} {"signature": "def poll_events(self):", "body": "glfw.poll_events()", "docstring": "Poll events from glfw", "id": "f14477:c0:m7"} {"signature": "def check_glfw_version(self):", "body": "print(\"\".format(glfw.get_version(), glfw.__version__))if glfw.get_version() < self.min_glfw_version:raise ValueError(\"\".format(self.min_glfw_version))", "docstring": "Ensure glfw library version is compatible", "id": "f14477:c0:m8"} {"signature": "def key_event_callback(self, window, key, scancode, action, mods):", "body": "self.keyboard_event(key, action, mods)", "docstring": "Key event callback for glfw.\nTranslates and forwards keyboard event to :py:func:`keyboard_event`\n\n:param window: Window event origin\n:param key: The key that was pressed or released.\n:param scancode: The system-specific scancode of the key.\n:param action: GLFW_PRESS, GLFW_RELEASE or GLFW_REPEAT\n:param mods: Bit field describing which modifier keys were held down.", "id": "f14477:c0:m9"} {"signature": "def mouse_event_callback(self, window, xpos, ypos):", "body": "self.cursor_event(xpos, ypos, , )", "docstring": "Mouse event callback from glfw.\nTranslates the events forwarding them to :py:func:`cursor_event`.\n\n:param window: The window\n:param xpos: viewport x pos\n:param ypos: viewport y pos", "id": "f14477:c0:m10"} {"signature": "def window_resize_callback(self, window, width, height):", "body": "self.resize(width, height)", "docstring": "Window resize callback for glfw\n\n:param window: The window\n:param width: New width\n:param height: New height", "id": "f14477:c0:m11"} {"signature": "def __init__(self):", "body": "super().__init__()pyglet.options[''] = Falseconfig = pyglet.gl.Config()config.double_buffer = Trueconfig.major_version = self.gl_version.majorconfig.minor_version = self.gl_version.minorconfig.forward_compatible = Trueconfig.sample_buffers = if self.samples > else config.samples = self.samplesself.window = PygletWrapper(width=self.width, height=self.height,caption=self.title,resizable=self.resizable,vsync=self.vsync,fullscreen=self.fullscreen,)self.window.set_mouse_visible(self.cursor)self.window.event(self.on_key_press)self.window.event(self.on_key_release)self.window.event(self.on_mouse_motion)self.window.event(self.on_resize)self.ctx = moderngl.create_context(require=self.gl_version.code)context.WINDOW = selfself.fbo = self.ctx.screenself.set_default_viewport()", "docstring": "Opens a window using pyglet, registers input callbacks\nand creates a moderngl context.", "id": "f14480:c0:m0"} {"signature": "def on_key_press(self, symbol, modifiers):", "body": "self.keyboard_event(symbol, self.keys.ACTION_PRESS, modifiers)", "docstring": "Pyglet specific key press callback.\nForwards and translates the events to :py:func:`keyboard_event`", "id": "f14480:c0:m1"} {"signature": "def on_key_release(self, symbol, modifiers):", "body": "self.keyboard_event(symbol, self.keys.ACTION_RELEASE, modifiers)", "docstring": "Pyglet specific key release callback.\nForwards and translates the events to :py:func:`keyboard_event`", "id": "f14480:c0:m2"} {"signature": "def on_mouse_motion(self, x, y, dx, dy):", "body": "self.cursor_event(x, self.buffer_height - y, dx, dy)", "docstring": "Pyglet specific mouse motion callback.\nForwards and traslates the event to :py:func:`cursor_event`", "id": "f14480:c0:m3"} {"signature": "def on_resize(self, width, height):", "body": "self.width, self.height = width, heightself.buffer_width, self.buffer_height = width, heightself.resize(width, height)", "docstring": "Pyglet specific callback for window resize events.", "id": "f14480:c0:m4"} {"signature": "def use(self):", "body": "self.fbo.use()", "docstring": "Render to this window", "id": "f14480:c0:m5"} {"signature": "def swap_buffers(self):", "body": "if not self.window.context:returnself.frames += self.window.flip()self.window.dispatch_events()", "docstring": "Swap buffers, increment frame counter and pull events", "id": "f14480:c0:m6"} {"signature": "def should_close(self) -> bool:", "body": "return self.window.has_exit", "docstring": "returns the ``has_exit`` state in the pyglet window", "id": "f14480:c0:m7"} {"signature": "def close(self):", "body": "self.window.close()", "docstring": "Sets the close state in the pyglet window", "id": "f14480:c0:m8"} {"signature": "def terminate(self):", "body": "pass", "docstring": "No cleanup is really needed. Empty method", "id": "f14480:c0:m9"} {"signature": "def on_resize(self, width, height):", "body": "pass", "docstring": "For some reason pyglet calls its own resize handler randomly", "id": "f14480:c1:m0"} {"signature": "def __init__(self):", "body": "super().__init__()self._closed = Falsegl = QtOpenGL.QGLFormat()gl.setVersion(self.gl_version.major, self.gl_version.minor)gl.setProfile(QtOpenGL.QGLFormat.CoreProfile)gl.setDepthBufferSize()gl.setDoubleBuffer(True)gl.setSwapInterval( if self.vsync else )if self.samples > :gl.setSampleBuffers(True)gl.setSamples(self.samples)self.app = QtWidgets.QApplication([])self.widget = QtOpenGL.QGLWidget(gl)self.widget.setWindowTitle(self.title)if self.fullscreen:rect = QtWidgets.QDesktopWidget().screenGeometry()self.width = rect.width()self.height = rect.height()self.buffer_width = rect.width() * self.widget.devicePixelRatio()self.buffer_height = rect.height() * self.widget.devicePixelRatio()if self.resizable:size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding,)self.widget.setSizePolicy(size_policy)self.widget.resize(self.width, self.height)else:self.widget.setFixedSize(self.width, self.height)self.widget.move(QtWidgets.QDesktopWidget().rect().center() - self.widget.rect().center())self.widget.resizeGL = self.resize self.widget.show()if not self.cursor:self.widget.setCursor(QtCore.Qt.BlankCursor)if self.fullscreen:self.widget.showFullScreen()self.widget.setMouseTracking(True)self.widget.keyPressEvent = self.keyPressEventself.widget.keyReleaseEvent = self.keyReleaseEventself.widget.mouseMoveEvent = self.mouseMoveEventself.ctx = moderngl.create_context(require=self.gl_version.code)context.WINDOW = selfself.fbo = self.ctx.screenself.buffer_width = self.width * self.widget.devicePixelRatio()self.buffer_height = self.height * self.widget.devicePixelRatio()self.set_default_viewport()", "docstring": "Creates a pyqt application and window overriding the\nbuilt in event loop. Sets up keyboard and mouse events\nand creates a ``monderngl.Context``.", "id": "f14482:c0:m0"} {"signature": "def keyPressEvent(self, event):", "body": "self.keyboard_event(event.key(), self.keys.ACTION_PRESS, )", "docstring": "Pyqt specific key press callback function.\nTranslates and forwards events to :py:func:`keyboard_event`.", "id": "f14482:c0:m1"} {"signature": "def keyReleaseEvent(self, event):", "body": "self.keyboard_event(event.key(), self.keys.ACTION_RELEASE, )", "docstring": "Pyqt specific key release callback function.\nTranslates and forwards events to :py:func:`keyboard_event`.", "id": "f14482:c0:m2"} {"signature": "def mouseMoveEvent(self, event):", "body": "self.cursor_event(event.x(), event.y(), , )", "docstring": "Pyqt specific mouse event callback\nTranslates and forwards events to :py:func:`cursor_event`.", "id": "f14482:c0:m3"} {"signature": "def resize(self, width, height):", "body": "if not self.fbo:returnself.width = width // self.widget.devicePixelRatio()self.height = height // self.widget.devicePixelRatio()self.buffer_width = widthself.buffer_height = heightsuper().resize(width, height)", "docstring": "Pyqt specific resize callback.", "id": "f14482:c0:m4"} {"signature": "def swap_buffers(self):", "body": "self.frames += self.widget.swapBuffers()self.app.processEvents()", "docstring": "Swaps buffers, increments the frame counter and pulls events", "id": "f14482:c0:m5"} {"signature": "def use(self):", "body": "self.fbo.use()", "docstring": "Make the window's framebuffer the current render target", "id": "f14482:c0:m6"} {"signature": "def should_close(self) -> bool:", "body": "return self._closed", "docstring": "Checks if the internal close state is set", "id": "f14482:c0:m7"} {"signature": "def close(self):", "body": "self._closed = True", "docstring": "Set the internal close state", "id": "f14482:c0:m8"} {"signature": "def terminate(self):", "body": "QtCore.QCoreApplication.instance().quit()", "docstring": "Quits the running qt application", "id": "f14482:c0:m9"} {"signature": "def __init__(self):", "body": "super().__init__()self.window_closing = Falseself.tmp_size_x = c_int()self.tmp_size_y = c_int()print(\"\", self.get_library_version())if sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO) != :raise ValueError(\"\")sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MAJOR_VERSION, self.gl_version.major)sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MINOR_VERSION, self.gl_version.minor)sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK, sdl2.SDL_GL_CONTEXT_PROFILE_CORE)sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_FORWARD_COMPATIBLE_FLAG, )sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_DOUBLEBUFFER, )sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_DEPTH_SIZE, )sdl2.SDL_ShowCursor(sdl2.SDL_ENABLE if self.cursor else sdl2.SDL_DISABLE)if self.samples > :sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_MULTISAMPLEBUFFERS, )sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_MULTISAMPLESAMPLES, self.samples)flags = sdl2.SDL_WINDOW_OPENGLif self.fullscreen:flags |= sdl2.SDL_WINDOW_FULLSCREEN_DESKTOPelse:if self.resizable:flags |= sdl2.SDL_WINDOW_RESIZABLEself.window = sdl2.SDL_CreateWindow(self.title.encode(),sdl2.SDL_WINDOWPOS_UNDEFINED,sdl2.SDL_WINDOWPOS_UNDEFINED,self.width,self.height,flags)if not self.window:raise ValueError(\"\", sdl2.SDL_GetError())self.context = sdl2.SDL_GL_CreateContext(self.window)sdl2.video.SDL_GL_SetSwapInterval( if self.vsync else )self.ctx = moderngl.create_context(require=self.gl_version.code)context.WINDOW = selfself.fbo = self.ctx.screenself.set_default_viewport()", "docstring": "Initializes sdl2, sets up key and mouse events and\ncreates a ``moderngl.Context`` using the context sdl2 createad.\n\nUsing the sdl2 window requires sdl binaries and PySDL2.", "id": "f14484:c0:m0"} {"signature": "def use(self):", "body": "self.fbo.use()", "docstring": "Bind the window framebuffer making it the current render target", "id": "f14484:c0:m1"} {"signature": "def resize(self, width, height):", "body": "self.width = widthself.height = heightself.buffer_width, self.buffer_height = self.width, self.heightself.set_default_viewport()", "docstring": "Sets the new size and buffer size internally", "id": "f14484:c0:m3"} {"signature": "def __init__(self):", "body": "self.frames = self.width = settings.WINDOW[''][]self.height = settings.WINDOW[''][]self.buffer_width = self.widthself.buffer_height = self.heightself.fbo = Noneself.sys_camera = Noneself.timer = Noneself.timeline = Noneself.gl_version = GLVersion(*settings.OPENGL[''],int(\"\".format(*settings.OPENGL[''])))self.title = settings.WINDOW.get('') or \"\"self.aspect_ratio = settings.WINDOW.get('', / )self.samples = settings.WINDOW.get('', )self.resizable = settings.WINDOW.get('') or Falseself.fullscreen = settings.WINDOW.get('')self.vsync = settings.WINDOW.get('')self.cursor = settings.WINDOW.get('')self.clear_color = (, , , )self.clear_depth = ()self.ctx = None", "docstring": "Base window intializer reading values from ``settings``.\n\nWhen creating the initializer in your own window always call\nthis methods using ``super().__init__()``.\n\nThe main responsebility of the initializer is to:\n\n* initialize the window library\n* identify the window framebuffer\n* set up keyboard and mouse events\n* create the ``moderngl.Context`` instance\n* register the window in ``context.WINDOW``", "id": "f14485:c1:m0"} {"signature": "@propertydef size(self) -> Tuple[int, int]:", "body": "return (self.width, self.height)", "docstring": "(width, height) tuple containing the window size.\n\nNote that for certain displays we rely on :py:func:`buffer_size`\nto get the actual window buffer size. This is fairly common\nfor retina and 4k displays where the UI scale is > 1.0", "id": "f14485:c1:m1"} {"signature": "@propertydef buffer_size(self) -> Tuple[int, int]:", "body": "return (self.buffer_width, self.buffer_height)", "docstring": "(width, heigh) buffer size of the window.\n\nThis is the actual buffer size of the window\ntaking UI scale into account. A 1920 x 1080\nwindow running in an environment with UI scale 2.0\nwould have a 3840 x 2160 window buffer.", "id": "f14485:c1:m2"} {"signature": "def draw(self, current_time, frame_time):", "body": "self.set_default_viewport()self.timeline.draw(current_time, frame_time, self.fbo)", "docstring": "Draws a frame. Internally it calls the\nconfigured timeline's draw method.\n\nArgs:\n current_time (float): The current time (preferrably always from the configured timer class)\n frame_time (float): The duration of the previous frame in seconds", "id": "f14485:c1:m3"} {"signature": "def clear(self):", "body": "self.ctx.fbo.clear(red=self.clear_color[],green=self.clear_color[],blue=self.clear_color[],alpha=self.clear_color[],depth=self.clear_depth,)", "docstring": "Clear the window buffer", "id": "f14485:c1:m4"} {"signature": "def clear_values(self, red=, green=, blue=, alpha=, depth=):", "body": "self.clear_color = (red, green, blue, alpha)self.clear_depth = depth", "docstring": "Sets the clear values for the window buffer.\n\nArgs:\n red (float): red compoent\n green (float): green compoent\n blue (float): blue compoent\n alpha (float): alpha compoent\n depth (float): depth value", "id": "f14485:c1:m5"} {"signature": "def use(self):", "body": "raise NotImplementedError(\"\".format(self.__class__))", "docstring": "Set the window buffer as the current render target\n\nRaises:\n NotImplementedError", "id": "f14485:c1:m6"} {"signature": "def swap_buffers(self):", "body": "raise NotImplementedError()", "docstring": "Swap the buffers. Most windows have at least support for double buffering\ncycling a back and front buffer.\n\nRaises:\n NotImplementedError", "id": "f14485:c1:m7"} {"signature": "def resize(self, width, height):", "body": "self.set_default_viewport()", "docstring": "Resize the window. Should normallty be overriden\nwhen implementing a window as most window libraries need additional logic here.\n\nArgs:\n width (int): Width of the window\n height: (int): Height of the window", "id": "f14485:c1:m8"} {"signature": "def close(self):", "body": "raise NotImplementedError()", "docstring": "Set the window in close state. This doesn't actually close the window,\nbut should make :py:func:`should_close` return ``True`` so the\nmain loop can exit gracefully.\n\nRaises:\n NotImplementedError", "id": "f14485:c1:m9"} {"signature": "def should_close(self) -> bool:", "body": "raise NotImplementedError()", "docstring": "Check if window should close. This should always be checked in the main draw loop.\n\nRaises:\n NotImplementedError", "id": "f14485:c1:m10"} {"signature": "def terminate(self):", "body": "raise NotImplementedError()", "docstring": "The actual teardown of the window.\n\nRaises:\n NotImplementedError", "id": "f14485:c1:m11"} {"signature": "def keyboard_event(self, key, action, modifier):", "body": "if key == self.keys.ESCAPE:self.close()returnif key == self.keys.SPACE and action == self.keys.ACTION_PRESS:self.timer.toggle_pause()if key == self.keys.D:if action == self.keys.ACTION_PRESS:self.sys_camera.move_right(True)elif action == self.keys.ACTION_RELEASE:self.sys_camera.move_right(False)elif key == self.keys.A:if action == self.keys.ACTION_PRESS:self.sys_camera.move_left(True)elif action == self.keys.ACTION_RELEASE:self.sys_camera.move_left(False)elif key == self.keys.W:if action == self.keys.ACTION_PRESS:self.sys_camera.move_forward(True)if action == self.keys.ACTION_RELEASE:self.sys_camera.move_forward(False)elif key == self.keys.S:if action == self.keys.ACTION_PRESS:self.sys_camera.move_backward(True)if action == self.keys.ACTION_RELEASE:self.sys_camera.move_backward(False)elif key == self.keys.Q:if action == self.keys.ACTION_PRESS:self.sys_camera.move_down(True)if action == self.keys.ACTION_RELEASE:self.sys_camera.move_down(False)elif key == self.keys.E:if action == self.keys.ACTION_PRESS:self.sys_camera.move_up(True)if action == self.keys.ACTION_RELEASE:self.sys_camera.move_up(False)if key == self.keys.X and action == self.keys.ACTION_PRESS:screenshot.create()if key == self.keys.R and action == self.keys.ACTION_PRESS:project.instance.reload_programs()if key == self.keys.RIGHT and action == self.keys.ACTION_PRESS:self.timer.set_time(self.timer.get_time() + )if key == self.keys.LEFT and action == self.keys.ACTION_PRESS:self.timer.set_time(self.timer.get_time() - )self.timeline.key_event(key, action, modifier)", "docstring": "Handles the standard keyboard events such as camera movements,\ntaking a screenshot, closing the window etc.\n\nCan be overriden add new keyboard events. Ensure this method\nis also called if you want to keep the standard features.\n\nArguments:\n key: The key that was pressed or released\n action: The key action. Can be `ACTION_PRESS` or `ACTION_RELEASE`\n modifier: Modifiers such as holding shift or ctrl", "id": "f14485:c1:m12"} {"signature": "def cursor_event(self, x, y, dx, dy):", "body": "self.sys_camera.rot_state(x, y)", "docstring": "The standard mouse movement event method.\nCan be overriden to add new functionality.\nBy default this feeds the system camera with new values.\n\nArgs:\n x: The current mouse x position\n y: The current mouse y position\n dx: Delta x postion (x position difference from the previous event)\n dy: Delta y postion (y position difference from the previous event)", "id": "f14485:c1:m13"} {"signature": "def print_context_info(self):", "body": "print(\"\")print('', moderngl.__version__)print('', self.ctx.info[''])print('', self.ctx.info[''])print('', self.ctx.info[''])print('', sys.version)print('', sys.platform)print('', self.ctx.version_code)", "docstring": "Prints moderngl context info.", "id": "f14485:c1:m14"} {"signature": "def set_default_viewport(self):", "body": "expected_height = int(self.buffer_width / self.aspect_ratio)blank_space = self.buffer_height - expected_heightself.fbo.viewport = (, blank_space // , self.buffer_width, expected_height)", "docstring": "Calculates the viewport based on the configured aspect ratio in settings.\nWill add black borders if the window do not match the viewport.", "id": "f14485:c1:m15"} {"signature": "def window(raise_on_error=True) -> BaseWindow:", "body": "if not WINDOW and raise_on_error:raise RuntimeError(\"\")return WINDOW", "docstring": "The window instance we are rendering to\n\n:param raise_on_error: Raise an error if the window is not created yet", "id": "f14486:m0"} {"signature": "def ctx() -> moderngl.Context:", "body": "win = window()if not win.ctx:raise RuntimeError(\"\")return win.ctx", "docstring": "ModernGL context", "id": "f14486:m1"} {"signature": "def __init__(self):", "body": "super().__init__()self.headless_frames = getattr(settings, '', )self.headless_duration = getattr(settings, '', )if not self.headless_frames and not self.headless_duration:raise ImproperlyConfigured(\"\")self._close = Falseself.ctx = moderngl.create_standalone_context(require=self.gl_version.code)context.WINDOW = selfself.fbo = self.ctx.framebuffer(color_attachments=self.ctx.texture(self.size, ),depth_attachment=self.ctx.depth_texture(self.size),)self.set_default_viewport()self.fbo.use()", "docstring": "Creates a standalone ``moderngl.Context``.\nThe headless window currently have no event input from keyboard or mouse.\n\nUsing this window require either ``settings`` values to be present:\n\n* ``HEADLESS_FRAMES``: How many frames should be rendered before closing the window\n* ``HEADLESS_DURATION``: How many seconds rendering should last before the window closes", "id": "f14487:c0:m0"} {"signature": "def draw(self, current_time, frame_time):", "body": "super().draw(current_time, frame_time)if self.headless_duration and current_time >= self.headless_duration:self.close()", "docstring": "Calls the superclass ``draw()`` methods and checks ``HEADLESS_FRAMES``/``HEADLESS_DURATION``", "id": "f14487:c0:m1"} {"signature": "def use(self):", "body": "self.fbo.use()", "docstring": "Binds the framebuffer representing this window", "id": "f14487:c0:m2"} {"signature": "def should_close(self) -> bool:", "body": "return self._close", "docstring": "Checks if the internal close state is set", "id": "f14487:c0:m3"} {"signature": "def close(self):", "body": "self._close = True", "docstring": "Sets the internal close state", "id": "f14487:c0:m4"} {"signature": "def resize(self, width, height):", "body": "pass", "docstring": "Resizing is not supported by the headless window.\nWe simply override with an empty method.", "id": "f14487:c0:m5"} {"signature": "def swap_buffers(self):", "body": "self.frames += if self.headless_frames and self.frames >= self.headless_frames:self.close()", "docstring": "Headless window currently don't support double buffering.\nWe only increment the frame counter here.", "id": "f14487:c0:m6"} {"signature": "def terminate(self):", "body": "pass", "docstring": "No teardown is needed. We override with an empty method", "id": "f14487:c0:m7"} {"signature": "@propertydef label(self) -> str:", "body": "return self._kwargs.get('')", "docstring": "(str) The internal label this resource is associated with", "id": "f14493:c0:m1"} {"signature": "@propertydef path(self):", "body": "return self._kwargs.get('')", "docstring": "(str) The path to a resource when a single file is specified", "id": "f14493:c0:m2"} {"signature": "@propertydef loader(self):", "body": "return self._kwargs.get('') or self.default_loader", "docstring": "(str) Name of the loader", "id": "f14493:c0:m3"} {"signature": "@propertydef loader_cls(self) -> Type:", "body": "return self._kwargs.get('')", "docstring": "(Type) The loader class for this resource", "id": "f14493:c0:m5"} {"signature": "@propertydef resolved_path(self) -> Path:", "body": "return self.kwargs.get('')", "docstring": "(pathlib.Path) The resolved path by a finder", "id": "f14493:c0:m7"} {"signature": "@propertydef kwargs(self) -> Dict[str, str]:", "body": "return self._kwargs", "docstring": "(dict) All keywords arguments passed to the resource", "id": "f14493:c0:m9"} {"signature": "def load(self, meta: ResourceDescription) -> Any:", "body": "self._check_meta(meta)self.resolve_loader(meta)return meta.loader_cls(meta).load()", "docstring": "Loads a resource or return existing one\n\n:param meta: The resource description", "id": "f14493:c1:m2"} {"signature": "def add(self, meta):", "body": "self._check_meta(meta)self.resolve_loader(meta)self._resources.append(meta)", "docstring": "Add a resource to this pool.\nThe resource is loaded and returned when ``load_pool()`` is called.\n\n:param meta: The resource description", "id": "f14493:c1:m3"} {"signature": "def load_pool(self):", "body": "for meta in self._resources:resource = self.load(meta)yield meta, resourceself._resources = []", "docstring": "Loads all the data files using the configured finders.", "id": "f14493:c1:m4"} {"signature": "def resolve_loader(self, meta: ResourceDescription):", "body": "meta.loader_cls = self.get_loader(meta, raise_on_error=True)", "docstring": "Attempts to assign a loader class to a resource description\n\n:param meta: The resource description instance", "id": "f14493:c1:m5"} {"signature": "def get_loader(self, meta: ResourceDescription, raise_on_error=False) -> BaseLoader:", "body": "for loader in self._loaders:if loader.name == meta.loader:return loaderif raise_on_error:raise ImproperlyConfigured(\"\".format(meta.loader, meta, [loader.name for loader in self._loaders]))", "docstring": "Attempts to get a loader\n\n:param meta: The resource description instance\n:param raise_on_error: Raise ImproperlyConfigured if the loader cannot be resolved\n:returns: The requested loader class", "id": "f14493:c1:m6"} {"signature": "def get(self, name) -> Track:", "body": "name = name.lower()track = self.track_map.get(name)if not track:track = Track(name)self.tacks.append(track)self.track_map[name] = trackreturn track", "docstring": "Get or create a Track object.\n\n:param name: Name of the track\n:return: Track object", "id": "f14494:c0:m1"} {"signature": "def resolve_loader(self, meta: SceneDescription):", "body": "for loader_cls in self._loaders:if loader_cls.supports_file(meta):meta.loader_cls = loader_clsbreakelse:raise ImproperlyConfigured(\"\".format(meta.path))", "docstring": "Resolve scene loader based on file extension", "id": "f14495:c0:m1"} {"signature": "def resolve_loader(self, meta: ProgramDescription):", "body": "if not meta.loader:meta.loader = '' if meta.path else ''for loader_cls in self._loaders:if loader_cls.name == meta.loader:meta.loader_cls = loader_clsbreakelse:raise ImproperlyConfigured((\"\"\"\").format(meta.path))", "docstring": "Resolve program loader", "id": "f14499:c0:m1"} {"signature": "def update(self, **kwargs):", "body": "for name, value in kwargs.items():setattr(self, name, value)", "docstring": "Override settings values", "id": "f14501:c0:m1"} {"signature": "def add_program_dir(self, directory):", "body": "dirs = list(self.PROGRAM_DIRS)dirs.append(directory)self.PROGRAM_DIRS = dirs", "docstring": "Hack in program directory", "id": "f14501:c0:m4"} {"signature": "def add_texture_dir(self, directory):", "body": "dirs = list(self.TEXTURE_DIRS)dirs.append(directory)self.TEXTURE_DIRS = dirs", "docstring": "Hack in texture directory", "id": "f14501:c0:m5"} {"signature": "def add_data_dir(self, directory):", "body": "dirs = list(self.DATA_DIRS)dirs.append(directory)self.DATA_DIRS = dirs", "docstring": "Hack in a data directory", "id": "f14501:c0:m6"} {"signature": "def local(path):", "body": "return os.path.join(__name__.split('')[-], path)", "docstring": "Prepend the effect package name to a path so resources\ncan still be loaded when copied into a new effect package.", "id": "f14509:m0"} {"signature": "def calculate_triangles(n: int):", "body": "if n < :return if n == :return return (( * n - ) * ) + calculate_triangles(n - )", "docstring": "Calculate the number of triangles for the barycentric subdivision of a\nsingle triangle (where the inner and outer subdivision is equal", "id": "f14512:m0"} {"signature": "def _get_queues(g, queues, edge, edge_type):", "body": "INT = numbers.Integralif isinstance(queues, INT):queues = [queues]elif queues is None:if edge is not None:if isinstance(edge, tuple):if isinstance(edge[], INT) and isinstance(edge[], INT):queues = [g.edge_index[edge]]elif isinstance(edge[], collections.Iterable):if np.array([len(e) == for e in edge]).all():queues = [g.edge_index[e] for e in edge]else:queues = [g.edge_index[edge]]elif edge_type is not None:if isinstance(edge_type, collections.Iterable):edge_type = set(edge_type)else:edge_type = set([edge_type])tmp = []for e in g.edges():if g.ep(e, '') in edge_type:tmp.append(g.edge_index[e])queues = np.array(tmp, int)if queues is None:queues = range(g.number_of_edges())return queues", "docstring": "Used to specify edge indices from different types of arguments.", "id": "f14533:m0"} {"signature": "def animate(self, out=None, t=None, line_kwargs=None,scatter_kwargs=None, **kwargs):", "body": "if not self._initialized:msg = (\"\"\"\")raise QueueingToolError(msg)if not HAS_MATPLOTLIB:msg = \"\"raise ImportError(msg)self._update_all_colors()kwargs.setdefault('', self.colors[''])fig = plt.figure(figsize=kwargs.get('', (, )))ax = fig.gca()mpl_kwargs = {'': line_kwargs,'': scatter_kwargs,'': kwargs.get('')}line_args, scat_args = self.g.lines_scatter_args(**mpl_kwargs)lines = LineCollection(**line_args)lines = ax.add_collection(lines)scatt = ax.scatter(**scat_args)t = np.infty if t is None else tnow = self._tdef update(frame_number):if t is not None:if self._t > now + t:return Falseself._simulate_next_event(slow=True)lines.set_color(line_args[''])scatt.set_edgecolors(scat_args[''])scatt.set_facecolor(scat_args[''])if hasattr(ax, ''):ax.set_facecolor(kwargs[''])else:ax.set_axis_bgcolor(kwargs[''])ax.get_xaxis().set_visible(False)ax.get_yaxis().set_visible(False)animation_args = {'': None,'': None,'': None,'': None,'': False,'': ,'': None,'': update,'': None,'': fig,'': None,}for key, value in kwargs.items():if key in animation_args:animation_args[key] = valueanimation = FuncAnimation(**animation_args)if '' not in kwargs:plt.ioff()plt.show()else:save_args = {'': None,'': None,'': None,'': None,'': None,'': None,'': None,'': None,'': None,'': None}for key, value in kwargs.items():if key in save_args:save_args[key] = valueanimation.save(**save_args)", "docstring": "Animates the network as it's simulating.\n\n The animations can be saved to disk or viewed in interactive\n mode. Closing the window ends the animation if viewed in\n interactive mode. This method calls\n :meth:`~matplotlib.axes.scatter`, and\n :class:`~matplotlib.collections.LineCollection`, and any\n keyword arguments they accept can be passed to them.\n\n Parameters\n ----------\n out : str (optional)\n The location where the frames for the images will be saved.\n If this parameter is not given, then the animation is shown\n in interactive mode.\n t : float (optional)\n The amount of simulation time to simulate forward. If\n given, and ``out`` is given, ``t`` is used instead of\n ``n``.\n line_kwargs : dict (optional, default: None)\n Any keyword arguments accepted by\n :class:`~matplotlib.collections.LineCollection`.\n scatter_kwargs : dict (optional, default: None)\n Any keyword arguments accepted by\n :meth:`~matplotlib.axes.Axes.scatter`.\n bgcolor : list (optional, keyword only)\n A list with 4 floats representing a RGBA color. The\n default is defined in ``self.colors['bgcolor']``.\n figsize : tuple (optional, keyword only, default: ``(7, 7)``)\n The width and height of the figure in inches.\n **kwargs :\n This method calls\n :class:`~matplotlib.animation.FuncAnimation` and\n optionally :meth:`.matplotlib.animation.FuncAnimation.save`.\n Any keyword that can be passed to these functions are\n passed via ``kwargs``.\n\n Notes\n -----\n There are several parameters automatically set and passed to\n matplotlib's :meth:`~matplotlib.axes.Axes.scatter`,\n :class:`~matplotlib.collections.LineCollection`, and\n :class:`~matplotlib.animation.FuncAnimation` by default.\n These include:\n\n * :class:`~matplotlib.animation.FuncAnimation`: Uses the\n defaults for that function. Saving the animation is done\n by passing the 'filename' keyword argument to this method.\n This method also accepts any keyword arguments accepted\n by :meth:`~matplotlib.animation.FuncAnimation.save`.\n * :class:`~matplotlib.collections.LineCollection`: The default\n arguments are taken from\n :meth:`.QueueNetworkDiGraph.lines_scatter_args`.\n * :meth:`~matplotlib.axes.Axes.scatter`: The default\n arguments are taken from\n :meth:`.QueueNetworkDiGraph.lines_scatter_args`.\n\n Raises\n ------\n QueueingToolError\n Will raise a :exc:`.QueueingToolError` if the\n ``QueueNetwork`` has not been initialized. Call\n :meth:`.initialize` before running.\n\n Examples\n --------\n This function works similarly to ``QueueNetwork's``\n :meth:`.draw` method.\n\n >>> import queueing_tool as qt\n >>> g = qt.generate_pagerank_graph(100, seed=13)\n >>> net = qt.QueueNetwork(g, seed=13)\n >>> net.initialize()\n >>> net.animate(figsize=(4, 4)) # doctest: +SKIP\n\n To stop the animation just close the window. If you want to\n write the animation to disk run something like the following:\n\n >>> kwargs = {\n ... 'filename': 'test.mp4',\n ... 'frames': 300,\n ... 'fps': 30,\n ... 'writer': 'mencoder',\n ... 'figsize': (4, 4),\n ... 'vertex_size': 15\n ... }\n >>> net.animate(**kwargs) # doctest: +SKIP", "id": "f14533:c1:m9"} {"signature": "def clear(self):", "body": "self._t = self.num_events = self.num_agents = np.zeros(self.nE, int)self._fancy_heap = PriorityQueue()self._prev_edge = Noneself._initialized = Falseself.reset_colors()for q in self.edge2queue:q.clear()", "docstring": "Resets the queue to its initial state.\n\n The attributes ``t``, ``num_events``, ``num_agents`` are set to\n zero, :meth:`.reset_colors` is called, and the\n :meth:`.QueueServer.clear` method is called for each queue in\n the network.\n\n Notes\n -----\n ``QueueNetwork`` must be re-initialized before any simulations\n can run.", "id": "f14533:c1:m10"} {"signature": "def clear_data(self, queues=None, edge=None, edge_type=None):", "body": "queues = _get_queues(self.g, queues, edge, edge_type)for k in queues:self.edge2queue[k].data = {}", "docstring": "Clears data from all queues.\n\n If none of the parameters are given then every queue's data is\n cleared.\n\n Parameters\n ----------\n queues : int or an iterable of int (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` whose data will\n be cleared.\n edge : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues' data to clear. Must be\n either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types will have their data cleared.", "id": "f14533:c1:m11"} {"signature": "def copy(self):", "body": "net = QueueNetwork(None)net.g = self.g.copy()net.max_agents = copy.deepcopy(self.max_agents)net.nV = copy.deepcopy(self.nV)net.nE = copy.deepcopy(self.nE)net.num_agents = copy.deepcopy(self.num_agents)net.num_events = copy.deepcopy(self.num_events)net._t = copy.deepcopy(self._t)net._initialized = copy.deepcopy(self._initialized)net._prev_edge = copy.deepcopy(self._prev_edge)net._blocking = copy.deepcopy(self._blocking)net.colors = copy.deepcopy(self.colors)net.out_edges = copy.deepcopy(self.out_edges)net.in_edges = copy.deepcopy(self.in_edges)net.edge2queue = copy.deepcopy(self.edge2queue)net._route_probs = copy.deepcopy(self._route_probs)if net._initialized:keys = [q._key() for q in net.edge2queue if q._time < np.infty]net._fancy_heap = PriorityQueue(keys, net.nE)return net", "docstring": "Returns a deep copy of itself.", "id": "f14533:c1:m12"} {"signature": "def draw(self, update_colors=True, line_kwargs=None,scatter_kwargs=None, **kwargs):", "body": "if not HAS_MATPLOTLIB:raise ImportError(\"\")if update_colors:self._update_all_colors()if '' not in kwargs:kwargs[''] = self.colors['']self.g.draw_graph(line_kwargs=line_kwargs,scatter_kwargs=scatter_kwargs, **kwargs)", "docstring": "Draws the network. The coloring of the network corresponds\n to the number of agents at each queue.\n\n Parameters\n ----------\n update_colors : ``bool`` (optional, default: ``True``).\n Specifies whether all the colors are updated.\n line_kwargs : dict (optional, default: None)\n Any keyword arguments accepted by\n :class:`~matplotlib.collections.LineCollection`\n scatter_kwargs : dict (optional, default: None)\n Any keyword arguments accepted by\n :meth:`~matplotlib.axes.Axes.scatter`.\n bgcolor : list (optional, keyword only)\n A list with 4 floats representing a RGBA color. The\n default is defined in ``self.colors['bgcolor']``.\n figsize : tuple (optional, keyword only, default: ``(7, 7)``)\n The width and height of the canvas in inches.\n **kwargs\n Any parameters to pass to\n :meth:`.QueueNetworkDiGraph.draw_graph`.\n\n Notes\n -----\n This method relies heavily on\n :meth:`.QueueNetworkDiGraph.draw_graph`. Also, there is a\n parameter that sets the background color of the canvas, which\n is the ``bgcolor`` parameter.\n\n Examples\n --------\n To draw the current state of the network, call:\n\n >>> import queueing_tool as qt\n >>> g = qt.generate_pagerank_graph(100, seed=13)\n >>> net = qt.QueueNetwork(g, seed=13)\n >>> net.initialize(100)\n >>> net.simulate(1200)\n >>> net.draw() # doctest: +SKIP\n\n If you specify a file name and location, the drawing will be\n saved to disk. For example, to save the drawing to the current\n working directory do the following:\n\n >>> net.draw(fname=\"state.png\", scatter_kwargs={'s': 40}) # doctest: +SKIP\n\n .. figure:: current_state1.png\n :align: center\n\n The shade of each edge depicts how many agents are located at\n the corresponding queue. The shade of each vertex is determined\n by the total number of inbound agents. Although loops are not\n visible by default, the vertex that corresponds to a loop shows\n how many agents are in that loop.\n\n There are several additional parameters that can be passed --\n all :meth:`.QueueNetworkDiGraph.draw_graph` parameters are\n valid. For example, to show the edges as dashed lines do the\n following.\n\n >>> net.draw(line_kwargs={'linestyle': 'dashed'}) # doctest: +SKIP", "id": "f14533:c1:m13"} {"signature": "def get_agent_data(self, queues=None, edge=None, edge_type=None, return_header=False):", "body": "queues = _get_queues(self.g, queues, edge, edge_type)data = {}for qid in queues:for agent_id, dat in self.edge2queue[qid].data.items():datum = np.zeros((len(dat), ))datum[:, :] = np.array(dat)datum[:, ] = qidif agent_id in data:data[agent_id] = np.vstack((data[agent_id], datum))else:data[agent_id] = datumdType = [('', float),('', float),('', float),('', float),('', float),('', float)]for agent_id, dat in data.items():datum = np.array([tuple(d) for d in dat.tolist()], dtype=dType)datum = np.sort(datum, order='')data[agent_id] = np.array([tuple(d) for d in datum])if return_header:return data, ''return data", "docstring": "Gets data from queues and organizes it by agent.\n\n If none of the parameters are given then data from every\n :class:`.QueueServer` is retrieved.\n\n Parameters\n ----------\n queues : int or *array_like* (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` whose data will\n be retrieved.\n edge : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues to retrieve agent data\n from. Must be either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types to retrieve agent data from.\n return_header : bool (optonal, default: False)\n Determines whether the column headers are returned.\n\n Returns\n -------\n dict\n Returns a ``dict`` where the keys are the\n :class:`Agent's<.Agent>` ``agent_id`` and the values are\n :class:`ndarrays<~numpy.ndarray>` for that\n :class:`Agent's<.Agent>` data. The columns of this array\n are as follows:\n\n * First: The arrival time of an agent.\n * Second: The service start time of an agent.\n * Third: The departure time of an agent.\n * Fourth: The length of the queue upon the agents arrival.\n * Fifth: The total number of :class:`Agents<.Agent>` in the\n :class:`.QueueServer`.\n * Sixth: the :class:`QueueServer's<.QueueServer>` id\n (its edge index).\n\n headers : str (optional)\n A comma seperated string of the column headers. Returns\n ``'arrival,service,departure,num_queued,num_total,q_id'``", "id": "f14533:c1:m14"} {"signature": "def get_queue_data(self, queues=None, edge=None, edge_type=None, return_header=False):", "body": "queues = _get_queues(self.g, queues, edge, edge_type)data = np.zeros((, ))for q in queues:dat = self.edge2queue[q].fetch_data()if len(dat) > :data = np.vstack((data, dat))if return_header:return data, ''return data", "docstring": "Gets data from all the queues.\n\n If none of the parameters are given then data from every\n :class:`.QueueServer` is retrieved.\n\n Parameters\n ----------\n queues : int or an *array_like* of int, (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` whose data will\n be retrieved.\n edge : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues to retrieve data from. Must\n be either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types to retrieve data from.\n return_header : bool (optonal, default: False)\n Determines whether the column headers are returned.\n\n Returns\n -------\n out : :class:`~numpy.ndarray`\n * 1st: The arrival time of an agent.\n * 2nd: The service start time of an agent.\n * 3rd: The departure time of an agent.\n * 4th: The length of the queue upon the agents arrival.\n * 5th: The total number of :class:`Agents<.Agent>` in the\n :class:`.QueueServer`.\n * 6th: The :class:`QueueServer's<.QueueServer>` edge index.\n\n out : str (optional)\n A comma seperated string of the column headers. Returns\n ``'arrival,service,departure,num_queued,num_total,q_id'```\n\n Examples\n --------\n Data is not collected by default. Before simulating, by sure to\n turn it on (as well as initialize the network). The following\n returns data from queues with ``edge_type`` 1 or 3:\n\n >>> import queueing_tool as qt\n >>> g = qt.generate_pagerank_graph(100, seed=13)\n >>> net = qt.QueueNetwork(g, seed=13)\n >>> net.start_collecting_data()\n >>> net.initialize(10)\n >>> net.simulate(2000)\n >>> data = net.get_queue_data(edge_type=(1, 3))\n\n To get data from an edge connecting two vertices do the\n following:\n\n >>> data = net.get_queue_data(edge=(1, 50))\n\n To get data from several edges do the following:\n\n >>> data = net.get_queue_data(edge=[(1, 50), (10, 91), (99, 99)])\n\n You can specify the edge indices as well:\n\n >>> data = net.get_queue_data(queues=(20, 14, 0, 4))", "id": "f14533:c1:m15"} {"signature": "def initialize(self, nActive=, queues=None, edges=None, edge_type=None):", "body": "if queues is None and edges is None and edge_type is None:if nActive >= and isinstance(nActive, numbers.Integral):qs = [q.edge[] for q in self.edge2queue if q.edge[] != ]n = min(nActive, len(qs))queues = np.random.choice(qs, size=n, replace=False)elif not isinstance(nActive, numbers.Integral):msg = \"\"raise TypeError(msg)else:msg = (\"\"\"\")raise ValueError(msg)else:queues = _get_queues(self.g, queues, edges, edge_type)queues = [e for e in queues if self.edge2queue[e].edge[] != ]if len(queues) == :raise QueueingToolError(\"\")if len(queues) > self.max_agents:queues = queues[:self.max_agents]for ei in queues:self.edge2queue[ei].set_active()self.num_agents[ei] = self.edge2queue[ei]._num_totalkeys = [q._key() for q in self.edge2queue if q._time < np.infty]self._fancy_heap = PriorityQueue(keys, self.nE)self._initialized = True", "docstring": "Prepares the ``QueueNetwork`` for simulation.\n\n Each :class:`.QueueServer` in the network starts inactive,\n which means they do not accept arrivals from outside the\n network, and they have no agents in their system. This method\n sets queues to active, which then allows agents to arrive from\n outside the network.\n\n Parameters\n ----------\n nActive : int (optional, default: ``1``)\n The number of queues to set as active. The queues are\n selected randomly.\n queues : int *array_like* (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` to make active by.\n edges : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues to make active. Must be\n either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types will be set active.\n\n Raises\n ------\n ValueError\n If ``queues``, ``egdes``, and ``edge_type`` are all ``None``\n and ``nActive`` is an integer less than 1\n :exc:`~ValueError` is raised.\n TypeError\n If ``queues``, ``egdes``, and ``edge_type`` are all ``None``\n and ``nActive`` is not an integer then a :exc:`~TypeError`\n is raised.\n QueueingToolError\n Raised if all the queues specified are\n :class:`NullQueues<.NullQueue>`.\n\n Notes\n -----\n :class:`NullQueues<.NullQueue>` cannot be activated, and are\n sifted out if they are specified. More specifically, every edge\n with edge type 0 is sifted out.", "id": "f14533:c1:m16"} {"signature": "def next_event_description(self):", "body": "if self._fancy_heap.size == :event_type = ''edge_index = Noneelse:s = [q._key() for q in self.edge2queue]s.sort()e = s[][]q = self.edge2queue[e]event_type = '' if q.next_event_description() == else ''edge_index = q.edge[]return event_type, edge_index", "docstring": "Returns whether the next event is an arrival or a departure\n and the queue the event is accuring at.\n\n Returns\n -------\n des : str\n Indicates whether the next event is an arrival, a\n departure, or nothing; returns ``'Arrival'``,\n ``'Departure'``, or ``'Nothing'``.\n edge : int or ``None``\n The edge index of the edge that this event will occur at.\n If there are no events then ``None`` is returned.", "id": "f14533:c1:m17"} {"signature": "def reset_colors(self):", "body": "for k, e in enumerate(self.g.edges()):self.g.set_ep(e, '', self.edge2queue[k].colors[''])for v in self.g.nodes():self.g.set_vp(v, '', self.colors[''])", "docstring": "Resets all edge and vertex colors to their default values.", "id": "f14533:c1:m18"} {"signature": "def set_transitions(self, mat):", "body": "if isinstance(mat, dict):for key, value in mat.items():probs = list(value.values())if key not in self.g.node:msg = \"\"raise ValueError(msg)elif len(self.out_edges[key]) > and not np.isclose(sum(probs), ):msg = \"\"raise ValueError(msg)elif (np.array(probs) < ).any():msg = \"\"raise ValueError(msg)for k, e in enumerate(sorted(self.g.out_edges(key))):self._route_probs[key][k] = value.get(e[], )elif isinstance(mat, np.ndarray):non_terminal = np.array([self.g.out_degree(v) > for v in self.g.nodes()])if mat.shape != (self.nV, self.nV):msg = (\"\"\"\").format(self.nV, self.nV)raise ValueError(msg)elif not np.allclose(np.sum(mat[non_terminal, :], axis=), ):msg = \"\"raise ValueError(msg)elif (mat < ).any():raise ValueError(\"\")for k in range(self.nV):for j, e in enumerate(sorted(self.g.out_edges(k))):self._route_probs[k][j] = mat[k, e[]]else:raise TypeError(\"\")", "docstring": "Change the routing transitions probabilities for the\n network.\n\n Parameters\n ----------\n mat : dict or :class:`~numpy.ndarray`\n A transition routing matrix or transition dictionary. If\n passed a dictionary, the keys are source vertex indices and\n the values are dictionaries with target vertex indicies\n as the keys and the probabilities of routing from the\n source to the target as the values.\n\n Raises\n ------\n ValueError\n A :exc:`.ValueError` is raised if: the keys in the dict\n don't match with a vertex index in the graph; or if the\n :class:`~numpy.ndarray` is passed with the wrong shape,\n must be (``num_vertices``, ``num_vertices``); or the values\n passed are not probabilities (for each vertex they are\n positive and sum to 1);\n TypeError\n A :exc:`.TypeError` is raised if mat is not a dict or\n :class:`~numpy.ndarray`.\n\n Examples\n --------\n The default transition matrix is every out edge being equally\n likely:\n\n >>> import queueing_tool as qt\n >>> adjacency = {\n ... 0: [2],\n ... 1: [2, 3],\n ... 2: [0, 1, 2, 4],\n ... 3: [1],\n ... 4: [2],\n ... }\n >>> g = qt.adjacency2graph(adjacency)\n >>> net = qt.QueueNetwork(g)\n >>> net.transitions(False) # doctest: +ELLIPSIS\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {2: 1.0},\n 1: {2: 0.5, 3: 0.5},\n 2: {0: 0.25, 1: 0.25, 2: 0.25, 4: 0.25},\n 3: {1: 1.0},\n 4: {2: 1.0}}\n\n If you want to change only one vertex's transition\n probabilities, you can do so with the following:\n\n >>> net.set_transitions({1 : {2: 0.75, 3: 0.25}})\n >>> net.transitions(False) # doctest: +ELLIPSIS\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {2: 1.0},\n 1: {2: 0.75, 3: 0.25},\n 2: {0: 0.25, 1: 0.25, 2: 0.25, 4: 0.25},\n 3: {1: 1.0},\n 4: {2: 1.0}}\n\n One can generate a transition matrix using\n :func:`.generate_transition_matrix`. You can change all\n transition probabilities with an :class:`~numpy.ndarray`:\n\n >>> mat = qt.generate_transition_matrix(g, seed=10)\n >>> net.set_transitions(mat)\n >>> net.transitions(False) # doctest: +ELLIPSIS\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {2: 1.0},\n 1: {2: 0.962..., 3: 0.037...},\n 2: {0: 0.301..., 1: 0.353..., 2: 0.235..., 4: 0.108...},\n 3: {1: 1.0},\n 4: {2: 1.0}}\n\n See Also\n --------\n :meth:`.transitions` : Return the current routing\n probabilities.\n :func:`.generate_transition_matrix` : Generate a random routing\n matrix.", "id": "f14533:c1:m19"} {"signature": "def show_active(self, **kwargs):", "body": "g = self.gfor v in g.nodes():self.g.set_vp(v, '', [, , , ])is_active = Falsemy_iter = g.in_edges(v) if g.is_directed() else g.out_edges(v)for e in my_iter:ei = g.edge_index[e]if self.edge2queue[ei]._active:is_active = Truebreakif is_active:self.g.set_vp(v, '', self.colors[''])else:self.g.set_vp(v, '', self.colors[''])for e in g.edges():ei = g.edge_index[e]if self.edge2queue[ei]._active:self.g.set_ep(e, '', self.colors[''])else:self.g.set_ep(e, '', self.colors[''])self.draw(update_colors=False, **kwargs)self._update_all_colors()", "docstring": "Draws the network, highlighting active queues.\n\n The colored vertices represent vertices that have at least one\n queue on an in-edge that is active. Dark edges represent\n queues that are active, light edges represent queues that are\n inactive.\n\n Parameters\n ----------\n **kwargs\n Any additional parameters to pass to :meth:`.draw`, and\n :meth:`.QueueNetworkDiGraph.draw_graph`.\n\n Notes\n -----\n Active queues are :class:`QueueServers<.QueueServer>` that\n accept arrivals from outside the network. The colors are\n defined by the class attribute ``colors``. The relevant keys\n are ``vertex_active``, ``vertex_inactive``, ``edge_active``,\n and ``edge_inactive``.", "id": "f14533:c1:m20"} {"signature": "def show_type(self, edge_type, **kwargs):", "body": "for v in self.g.nodes():e = (v, v)if self.g.is_edge(e) and self.g.ep(e, '') == edge_type:ei = self.g.edge_index[e]self.g.set_vp(v, '', self.colors[''])self.g.set_vp(v, '', self.edge2queue[ei].colors[''])else:self.g.set_vp(v, '', self.colors[''])self.g.set_vp(v, '', [, , , ])for e in self.g.edges():if self.g.ep(e, '') == edge_type:self.g.set_ep(e, '', self.colors[''])else:self.g.set_ep(e, '', self.colors[''])self.draw(update_colors=False, **kwargs)self._update_all_colors()", "docstring": "Draws the network, highlighting queues of a certain type.\n\n The colored vertices represent self loops of type ``edge_type``.\n Dark edges represent queues of type ``edge_type``.\n\n Parameters\n ----------\n edge_type : int\n The type of vertices and edges to be shown.\n **kwargs\n Any additional parameters to pass to :meth:`.draw`, and\n :meth:`.QueueNetworkDiGraph.draw_graph`\n\n Notes\n -----\n The colors are defined by the class attribute ``colors``. The\n relevant colors are ``vertex_active``, ``vertex_inactive``,\n ``vertex_highlight``, ``edge_active``, and ``edge_inactive``.\n\n Examples\n --------\n The following code highlights all edges with edge type ``2``.\n If the edge is a loop then the vertex is highlighted as well.\n In this case all edges with edge type ``2`` happen to be loops.\n\n >>> import queueing_tool as qt\n >>> g = qt.generate_pagerank_graph(100, seed=13)\n >>> net = qt.QueueNetwork(g, seed=13)\n >>> fname = 'edge_type_2.png'\n >>> net.show_type(2, fname=fname) # doctest: +SKIP\n\n .. figure:: edge_type_2-1.png\n :align: center", "id": "f14533:c1:m21"} {"signature": "def simulate(self, n=, t=None):", "body": "if not self._initialized:msg = (\"\"\"\")raise QueueingToolError(msg)if t is None:for dummy in range(n):self._simulate_next_event(slow=False)else:now = self._twhile self._t < now + t:self._simulate_next_event(slow=False)", "docstring": "Simulates the network forward.\n\n Simulates either a specific number of events or for a specified\n amount of simulation time.\n\n Parameters\n ----------\n n : int (optional, default: 1)\n The number of events to simulate. If ``t`` is not given\n then this parameter is used.\n t : float (optional)\n The amount of simulation time to simulate forward. If\n given, ``t`` is used instead of ``n``.\n\n Raises\n ------\n QueueingToolError\n Will raise a :exc:`.QueueingToolError` if the\n ``QueueNetwork`` has not been initialized. Call\n :meth:`.initialize` before calling this method.\n\n Examples\n --------\n Let ``net`` denote your instance of a ``QueueNetwork``. Before\n you simulate, you need to initialize the network, which allows\n arrivals from outside the network. To initialize with 2 (random\n chosen) edges accepting arrivals run:\n\n >>> import queueing_tool as qt\n >>> g = qt.generate_pagerank_graph(100, seed=50)\n >>> net = qt.QueueNetwork(g, seed=50)\n >>> net.initialize(2)\n\n To simulate the network 50000 events run:\n\n >>> net.num_events\n 0\n >>> net.simulate(50000)\n >>> net.num_events\n 50000\n\n To simulate the network for at least 75 simulation time units\n run:\n\n >>> t0 = net.current_time\n >>> net.simulate(t=75)\n >>> t1 = net.current_time\n >>> t1 - t0 # doctest: +ELLIPSIS\n 75...", "id": "f14533:c1:m22"} {"signature": "def start_collecting_data(self, queues=None, edge=None, edge_type=None):", "body": "queues = _get_queues(self.g, queues, edge, edge_type)for k in queues:self.edge2queue[k].collect_data = True", "docstring": "Tells the queues to collect data on agents' arrival, service\n start, and departure times.\n\n If none of the parameters are given then every\n :class:`.QueueServer` will start collecting data.\n\n Parameters\n ----------\n queues : :any:`int`, *array_like* (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` that will start\n collecting data.\n edge : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues will collect data. Must be\n either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types will be set active.", "id": "f14533:c1:m24"} {"signature": "def stop_collecting_data(self, queues=None, edge=None, edge_type=None):", "body": "queues = _get_queues(self.g, queues, edge, edge_type)for k in queues:self.edge2queue[k].collect_data = False", "docstring": "Tells the queues to stop collecting data on agents.\n\n If none of the parameters are given then every\n :class:`.QueueServer` will stop collecting data.\n\n Parameters\n ----------\n queues : int, *array_like* (optional)\n The edge index (or an iterable of edge indices) identifying\n the :class:`QueueServer(s)<.QueueServer>` that will stop\n collecting data.\n edge : 2-tuple of int or *array_like* (optional)\n Explicitly specify which queues will stop collecting data.\n Must be either:\n\n * A 2-tuple of the edge's source and target vertex\n indices, or\n * An iterable of 2-tuples of the edge's source and\n target vertex indices.\n\n edge_type : int or an iterable of int (optional)\n A integer, or a collection of integers identifying which\n edge types will stop collecting data.", "id": "f14533:c1:m25"} {"signature": "def transitions(self, return_matrix=True):", "body": "if return_matrix:mat = np.zeros((self.nV, self.nV))for v in self.g.nodes():ind = [e[] for e in sorted(self.g.out_edges(v))]mat[v, ind] = self._route_probs[v]else:mat = {k: {e[]: p for e, p in zip(sorted(self.g.out_edges(k)), value)}for k, value in enumerate(self._route_probs)}return mat", "docstring": "Returns the routing probabilities for each vertex in the\n graph.\n\n Parameters\n ----------\n return_matrix : bool (optional, the default is ``True``)\n Specifies whether an :class:`~numpy.ndarray` is returned.\n If ``False``, a dict is returned instead.\n\n Returns\n -------\n out : a dict or :class:`~numpy.ndarray`\n The transition probabilities for each vertex in the graph.\n If ``out`` is an :class:`~numpy.ndarray`, then\n ``out[v, u]`` returns the probability of a transition from\n vertex ``v`` to vertex ``u``. If ``out`` is a dict\n then ``out_edge[v][u]`` is the probability of moving from\n vertex ``v`` to the vertex ``u``.\n\n Examples\n --------\n Lets change the routing probabilities:\n\n >>> import queueing_tool as qt\n >>> import networkx as nx\n >>> g = nx.sedgewick_maze_graph()\n >>> net = qt.QueueNetwork(g)\n\n Below is an adjacency list for the graph ``g``.\n\n >>> ans = qt.graph2dict(g, False)\n >>> {k: sorted(v) for k, v in ans.items()}\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: [2, 5, 7],\n 1: [7],\n 2: [0, 6],\n 3: [4, 5],\n 4: [3, 5, 6, 7],\n 5: [0, 3, 4],\n 6: [2, 4],\n 7: [0, 1, 4]}\n\n The default transition matrix is every out edge being equally\n likely:\n\n >>> net.transitions(False) # doctest: +ELLIPSIS\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {2: 0.333..., 5: 0.333..., 7: 0.333...},\n 1: {7: 1.0},\n 2: {0: 0.5, 6: 0.5},\n 3: {4: 0.5, 5: 0.5},\n 4: {3: 0.25, 5: 0.25, 6: 0.25, 7: 0.25},\n 5: {0: 0.333..., 3: 0.333..., 4: 0.333...},\n 6: {2: 0.5, 4: 0.5},\n 7: {0: 0.333..., 1: 0.333..., 4: 0.333...}}\n\n Now we will generate a random routing matrix:\n\n >>> mat = qt.generate_transition_matrix(g, seed=96)\n >>> net.set_transitions(mat)\n >>> net.transitions(False) # doctest: +ELLIPSIS\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {2: 0.112..., 5: 0.466..., 7: 0.420...},\n 1: {7: 1.0},\n 2: {0: 0.561..., 6: 0.438...},\n 3: {4: 0.545..., 5: 0.454...},\n 4: {3: 0.374..., 5: 0.381..., 6: 0.026..., 7: 0.217...},\n 5: {0: 0.265..., 3: 0.460..., 4: 0.274...},\n 6: {2: 0.673..., 4: 0.326...},\n 7: {0: 0.033..., 1: 0.336..., 4: 0.630...}}\n\n What this shows is the following: when an :class:`.Agent` is at\n vertex ``2`` they will transition to vertex ``0`` with\n probability ``0.561`` and route to vertex ``6`` probability\n ``0.438``, when at vertex ``6`` they will transition back to\n vertex ``2`` with probability ``0.673`` and route vertex ``4``\n probability ``0.326``, etc.", "id": "f14533:c1:m26"} {"signature": "def size(self, s):", "body": "leader = self.find(s)return self._size[leader]", "docstring": "Returns the number of elements in the set that ``s`` belongs to.\n\n Parameters\n ----------\n s : object\n An object\n\n Returns\n -------\n out : int\n The number of elements in the set that ``s`` belongs to.", "id": "f14535:c0:m2"} {"signature": "def find(self, s):", "body": "pSet = [s]parent = self._leader[s]while parent != self._leader[parent]:pSet.append(parent)parent = self._leader[parent]if len(pSet) > :for a in pSet:self._leader[a] = parentreturn parent", "docstring": "Locates the leader of the set to which the element ``s`` belongs.\n\n Parameters\n ----------\n s : object\n An object that the ``UnionFind`` contains.\n\n Returns\n -------\n object\n The leader of the set that contains ``s``.", "id": "f14535:c0:m3"} {"signature": "def union(self, a, b):", "body": "s1, s2 = self.find(a), self.find(b)if s1 != s2:r1, r2 = self._rank[s1], self._rank[s2]if r2 > r1:r1, r2 = r2, r1s1, s2 = s2, s1if r1 == r2:self._rank[s1] += self._leader[s2] = s1self._size[s1] += self._size[s2]self.nClusters -= ", "docstring": "Merges the set that contains ``a`` with the set that contains ``b``.\n\n Parameters\n ----------\n a, b : objects\n Two objects whose sets are to be merged.", "id": "f14535:c0:m4"} {"signature": "def add_loss(self, *args, **kwargs):", "body": "self.blocked += ", "docstring": "Adds one to the number of times the agent has been blocked\n from entering a queue.", "id": "f14536:c0:m7"} {"signature": "def desired_destination(self, network, edge):", "body": "n = len(network.out_edges[edge[]])if n <= :return network.out_edges[edge[]][]u = uniform()pr = network._route_probs[edge[]]k = _choice(pr, u, n)return network.out_edges[edge[]][k]", "docstring": "Returns the agents next destination given their current\n location on the network.\n\n An ``Agent`` chooses one of the out edges at random. The\n probability that the ``Agent`` will travel along a specific\n edge is specified in the :class:`QueueNetwork's<.QueueNetwork>`\n transition matrix.\n\n Parameters\n ----------\n network : :class:`.QueueNetwork`\n The :class:`.QueueNetwork` where the Agent resides.\n edge : tuple\n A 4-tuple indicating which edge this agent is located at.\n The first two slots indicate the current edge's source and\n target vertices, while the third slot indicates this edges\n ``edge_index``. The last slot indicates the edge type of\n that edge\n\n Returns\n -------\n out : int\n Returns an the edge index corresponding to the agents next\n edge to visit in the network.\n\n See Also\n --------\n :meth:`.transitions` : :class:`QueueNetwork's<.QueueNetwork>`\n method that returns the transition probabilities for each\n edge in the graph.", "id": "f14536:c0:m8"} {"signature": "def queue_action(self, queue, *args, **kwargs):", "body": "pass", "docstring": "A method that acts on the queue the Agent is at. This method\n is called when the Agent arrives at the queue (where\n ``args[0] == 0``), when service starts for the Agent (where\n ``args[0] == 1``), and when the Agent departs from the queue\n (where ``args[0] == 2``). By default, this method does nothing\n to the queue, but is here if the Agent class is extended and\n this method is overwritten.", "id": "f14536:c0:m9"} {"signature": "def desired_destination(self, network, edge):", "body": "adjacent_edges = network.out_edges[edge[]]d = _argmin([network.edge2queue[d].number_queued() for d in adjacent_edges])return adjacent_edges[d]", "docstring": "Returns the agents next destination given their current\n location on the network.\n\n ``GreedyAgents`` choose their next destination with-in the\n network by picking the adjacent queue with the fewest number of\n :class:`Agents<.Agent>` in the queue.\n\n Parameters\n ----------\n network : :class:`.QueueNetwork`\n The :class:`.QueueNetwork` where the Agent resides.\n edge : tuple\n A 4-tuple indicating which edge this agent is located at.\n The first two slots indicate the current edge's source and\n target vertices, while the third slot indicates this edges\n ``edge_index``. The last slot indicates the edges edge\n type.\n\n Returns\n -------\n out : int\n Returns an the edge index corresponding to the agents next\n edge to visit in the network.", "id": "f14536:c1:m2"} {"signature": "def queue_action(self, queue, *args, **kwargs):", "body": "if isinstance(queue, ResourceQueue):if self._has_resource:self._has_resource = Falseself._had_resource = Trueelse:if queue.num_servers > :queue.set_num_servers(queue.num_servers - )self._has_resource = Trueself._had_resource = False", "docstring": "Function that specifies the interaction with a\n :class:`.ResourceQueue` upon departure.\n\n When departuring from a :class:`.ResourceQueue` (or a\n :class:`.QueueServer`), this method is called. If the agent\n does not already have a resource then it decrements the number\n of servers at :class:`.ResourceQueue` by one. Note that this\n only applies to :class:`ResourceQueue's<.ResourceQueue>`.\n\n Parameters\n ----------\n queue : :class:`.QueueServer`\n The instance of the queue that the ``ResourceAgent`` will\n interact with.", "id": "f14538:c0:m2"} {"signature": "def next_event(self):", "body": "if isinstance(self._arrivals[], ResourceAgent):if self._departures[]._time < self._arrivals[]._time:return super(ResourceQueue, self).next_event()elif self._arrivals[]._time < infty:if self._arrivals[]._has_resource:arrival = heappop(self._arrivals)self._current_t = arrival._timeself._num_total -= self.set_num_servers(self.num_servers + )if self.collect_data:t = arrival._timeif arrival.agent_id not in self.data:self.data[arrival.agent_id] = [[t, t, t, len(self.queue), self.num_system]]else:self.data[arrival.agent_id].append([t, t, t, len(self.queue), self.num_system])if self._arrivals[]._time < self._departures[]._time:self._time = self._arrivals[]._timeelse:self._time = self._departures[]._timeelif self.num_system < self.num_servers:super(ResourceQueue, self).next_event()else:self.num_blocked += self._num_arrivals += self._num_total -= arrival = heappop(self._arrivals)self._current_t = arrival._timeif self.collect_data:if arrival.agent_id not in self.data:self.data[arrival.agent_id] = [[arrival._time, , , len(self.queue), self.num_system]]else:self.data[arrival.agent_id].append([arrival._time, , , len(self.queue), self.num_system])if self._arrivals[]._time < self._departures[]._time:self._time = self._arrivals[]._timeelse:self._time = self._departures[]._timeelse:return super(ResourceQueue, self).next_event()", "docstring": "Simulates the queue forward one event.\n\n This method behaves identically to a :class:`.LossQueue` if the\n arriving/departing agent is anything other than a\n :class:`.ResourceAgent`. The differences are;\n\n Arriving:\n\n * If the :class:`.ResourceAgent` has a resource then it deletes\n the agent upon arrival and adds one to ``num_servers``.\n * If the :class:`.ResourceAgent` is arriving without a resource\n then nothing special happens.\n\n Departing:\n\n * If the :class:`.ResourceAgent` does not have a resource, then\n ``num_servers`` decreases by one and the agent then *has a\n resource*.\n\n Use :meth:`~QueueServer.simulate` for simulating instead.", "id": "f14538:c1:m3"} {"signature": "def poisson_random_measure(t, rate, rate_max):", "body": "scale = / rate_maxt = t + exponential(scale)while rate_max * uniform() > rate(t):t = t + exponential(scale)return t", "docstring": "A function that returns the arrival time of the next arrival for\n a Poisson random measure.\n\n Parameters\n ----------\n t : float\n The start time from which to simulate the next arrival time.\n rate : function\n The *intensity function* for the measure, where ``rate(t)`` is\n the expected arrival rate at time ``t``.\n rate_max : float\n The maximum value of the ``rate`` function.\n\n Returns\n -------\n out : float\n The time of the next arrival.\n\n Notes\n -----\n This function returns the time of the next arrival, where the\n distribution of the number of arrivals between times :math:`t` and\n :math:`t+s` is Poisson with mean\n\n .. math::\n\n \\int_{t}^{t+s} dx \\, r(x)\n\n where :math:`r(t)` is the supplied ``rate`` function. This function\n can only simulate processes that have bounded intensity functions.\n See chapter 6 of [3]_ for more on the mathematics behind Poisson\n random measures; the book's publisher, Springer, has that chapter\n available online for free at (`pdf`_\\).\n\n A Poisson random measure is sometimes called a non-homogeneous\n Poisson process. A Poisson process is a special type of Poisson\n random measure.\n\n .. _pdf: http://www.springer.com/cda/content/document/\\\n cda_downloaddocument/9780387878584-c1.pdf\n\n Examples\n --------\n Suppose you wanted to model the arrival process as a Poisson\n random measure with rate function :math:`r(t) = 2 + \\sin( 2\\pi t)`.\n Then you could do so as follows:\n\n >>> import queueing_tool as qt\n >>> import numpy as np\n >>> np.random.seed(10)\n >>> rate = lambda t: 2 + np.sin(2 * np.pi * t)\n >>> arr_f = lambda t: qt.poisson_random_measure(t, rate, 3)\n >>> arr_f(1) # doctest: +ELLIPSIS\n 1.491...\n\n References\n ----------\n .. [3] Cinlar, Erhan. *Probability and stochastics*. Graduate Texts in\\\n Mathematics. Vol. 261. Springer, New York, 2011.\\\n :doi:`10.1007/978-0-387-87859-1`", "id": "f14539:m0"} {"signature": "def at_capacity(self):", "body": "return False", "docstring": "Returns whether the queue is at capacity or not.\n\n Returns\n -------\n bool\n Always returns ``False``, since the ``QueueServer`` class\n has infinite capacity.", "id": "f14539:c0:m7"} {"signature": "def clear(self):", "body": "self.data = {}self._num_arrivals = self._oArrivals = self.num_departures = self.num_system = self._num_total = self._current_t = self._time = inftyself._next_ct = self._active = Falseself.queue = collections.deque()inftyAgent = InftyAgent()self._arrivals = [inftyAgent]self._departures = [inftyAgent]", "docstring": "Clears out the queue. Removes all arrivals, departures, and\n queued agents from the :class:`.QueueServer`, resets\n ``num_arrivals``, ``num_departures``, ``num_system``, and the clock to\n zero. It also clears any stored ``data`` and the server is then\n set to inactive.", "id": "f14539:c0:m8"} {"signature": "def copy(self):", "body": "return copy.deepcopy(self)", "docstring": "Returns a deep copy of itself.", "id": "f14539:c0:m9"} {"signature": "def _current_color(self, which=):", "body": "if which == :color = self.colors['']elif which == :color = self.colors['']else:div = self.coloring_sensitivity * self.num_servers + tmp = - min(self.num_system / div, )if self.edge[] == self.edge[]:color = [i * tmp for i in self.colors['']]color[] = else:color = [i * tmp for i in self.colors['']]color[] = / return color", "docstring": "Returns a color for the queue.\n\n Parameters\n ----------\n which : int (optional, default: ``0``)\n Specifies the type of color to return.\n\n Returns\n -------\n color : list\n Returns a RGBA color that is represented as a list with 4\n entries where each entry can be any floating point number\n between 0 and 1.\n\n * If ``which`` is 1 then it returns the color of the edge\n as if it were a self loop. This is specified in\n ``colors['edge_loop_color']``.\n * If ``which`` is 2 then it returns the color of the vertex\n pen color (defined as color/vertex_color in\n :meth:`.QueueNetworkDiGraph.graph_draw`). This is\n specified in ``colors['vertex_color']``.\n * If ``which`` is anything else, then it returns the a\n shade of the edge that is proportional to the number of\n agents in the system -- which includes those being\n servered and those waiting to be served. More agents\n correspond to darker edge colors. Uses\n ``colors['vertex_fill_color']`` if the queue sits on a\n loop, and ``colors['edge_color']`` otherwise.", "id": "f14539:c0:m10"} {"signature": "def delay_service(self, t=None):", "body": "if len(self._departures) > :agent = heappop(self._departures)if t is None:agent._time = self.service_f(agent._time)else:agent._time = theappush(self._departures, agent)self._update_time()", "docstring": "Adds an extra service time to the next departing\n :class:`Agent's<.Agent>` service time.\n\n Parameters\n ----------\n t : float (optional)\n Specifies the departing time for the agent scheduled\n to depart next. If ``t`` is not given, then an additional\n service time is added to the next departing agent.", "id": "f14539:c0:m11"} {"signature": "def fetch_data(self, return_header=False):", "body": "qdata = []for d in self.data.values():qdata.extend(d)dat = np.zeros((len(qdata), ))if len(qdata) > :dat[:, :] = np.array(qdata)dat[:, ] = self.edge[]dType = [('', float),('', float),('', float),('', float),('', float),('', float)]dat = np.array([tuple(d) for d in dat], dtype=dType)dat = np.sort(dat, order='')dat = np.array([tuple(d) for d in dat])if return_header:return dat, ''return dat", "docstring": "Fetches data from the queue.\n\n Parameters\n ----------\n return_header : bool (optonal, default: ``False``)\n Determines whether the column headers are returned.\n\n Returns\n -------\n data : :class:`~numpy.ndarray`\n A six column :class:`~numpy.ndarray` of all the data. The\n columns are:\n\n * 1st: The arrival time of an agent.\n * 2nd: The service start time of an agent.\n * 3rd: The departure time of an agent.\n * 4th: The length of the queue upon the agents arrival.\n * 5th: The total number of :class:`Agents<.Agent>` in the\n :class:`.QueueServer`.\n * 6th: The :class:`QueueServer's<.QueueServer>` edge index.\n\n headers : str (optional)\n A comma seperated string of the column headers. Returns\n ``'arrival,service,departure,num_queued,num_total,q_id'``", "id": "f14539:c0:m12"} {"signature": "def number_queued(self):", "body": "return len(self.queue)", "docstring": "Returns the number of agents waiting in line to be served.\n\n Returns\n -------\n out : int\n The number of agents waiting in line to be served.", "id": "f14539:c0:m14"} {"signature": "def next_event(self):", "body": "if self._departures[]._time < self._arrivals[]._time:new_depart = heappop(self._departures)self._current_t = new_depart._timeself._num_total -= self.num_system -= self.num_departures += if self.collect_data and new_depart.agent_id in self.data:self.data[new_depart.agent_id][-][] = self._current_tif len(self.queue) > :agent = self.queue.popleft()if self.collect_data and agent.agent_id in self.data:self.data[agent.agent_id][-][] = self._current_tagent._time = self.service_f(self._current_t)agent.queue_action(self, )heappush(self._departures, agent)new_depart.queue_action(self, )self._update_time()return new_departelif self._arrivals[]._time < infty:arrival = heappop(self._arrivals)self._current_t = arrival._timeif self._active:self._add_arrival()self.num_system += self._num_arrivals += if self.collect_data:b = if self.num_system <= self.num_servers else if arrival.agent_id not in self.data:self.data[arrival.agent_id] =[[arrival._time, , , len(self.queue) + b, self.num_system]]else:self.data[arrival.agent_id].append([arrival._time, , , len(self.queue) + b, self.num_system])arrival.queue_action(self, )if self.num_system <= self.num_servers:if self.collect_data:self.data[arrival.agent_id][-][] = arrival._timearrival._time = self.service_f(arrival._time)arrival.queue_action(self, )heappush(self._departures, arrival)else:self.queue.append(arrival)self._update_time()", "docstring": "Simulates the queue forward one event.\n\n Use :meth:`.simulate` instead.\n\n Returns\n -------\n out : :class:`.Agent` (sometimes)\n If the next event is a departure then the departing agent\n is returned, otherwise nothing is returned.\n\n See Also\n --------\n :meth:`.simulate` : Simulates the queue forward.", "id": "f14539:c0:m15"} {"signature": "def next_event_description(self):", "body": "if self._departures[]._time < self._arrivals[]._time:return elif self._arrivals[]._time < infty:return else:return ", "docstring": "Returns an integer representing whether the next event is\n an arrival, a departure, or nothing.\n\n Returns\n -------\n out : int\n An integer representing whether the next event is an\n arrival or a departure: ``1`` corresponds to an arrival,\n ``2`` corresponds to a departure, and ``0`` corresponds to\n nothing scheduled to occur.", "id": "f14539:c0:m16"} {"signature": "def set_active(self):", "body": "if not self._active:self._active = Trueself._add_arrival()", "docstring": "Changes the ``active`` attribute to True. Agents may now\n arrive from outside the network.", "id": "f14539:c0:m17"} {"signature": "def set_inactive(self):", "body": "self._active = False", "docstring": "Changes the ``active`` attribute to False.", "id": "f14539:c0:m18"} {"signature": "def set_num_servers(self, n):", "body": "if not isinstance(n, numbers.Integral) and n is not infty:the_str = \"\"raise TypeError(the_str.format(str(self)))elif n <= :the_str = \"\"raise ValueError(the_str.format(str(self)))else:self.num_servers = n", "docstring": "Change the number of servers in the queue to ``n``.\n\n Parameters\n ----------\n n : int or :const:`numpy.infty`\n A positive integer (or ``numpy.infty``) to set the number\n of queues in the system to.\n\n Raises\n ------\n TypeError\n If ``n`` is not an integer or positive infinity then this\n error is raised.\n ValueError\n If ``n`` is not positive.", "id": "f14539:c0:m19"} {"signature": "def simulate(self, n=, t=None, nA=None, nD=None):", "body": "if t is None and nD is None and nA is None:for dummy in range(n):self.next_event()elif t is not None:then = self._current_t + twhile self._current_t < then and self._time < infty:self.next_event()elif nD is not None:num_departures = self.num_departures + nDwhile self.num_departures < num_departures and self._time < infty:self.next_event()elif nA is not None:num_arrivals = self._oArrivals + nAwhile self._oArrivals < num_arrivals and self._time < infty:self.next_event()", "docstring": "This method simulates the queue forward for a specified\n amount of simulation time, or for a specific number of\n events.\n\n Parameters\n ----------\n n : int (optional, default: ``1``)\n The number of events to simulate. If ``t``, ``nA``, and\n ``nD`` are not given then this parameter is used.\n t : float (optional)\n The minimum amount of simulation time to simulate forward.\n nA : int (optional)\n Simulate until ``nA`` additional arrivals are observed.\n nD : int (optional)\n Simulate until ``nD`` additional departures are observed.\n\n Examples\n --------\n Before any simulations can take place the ``QueueServer`` must\n be activated:\n\n >>> import queueing_tool as qt\n >>> import numpy as np\n >>> rate = lambda t: 2 + 16 * np.sin(np.pi * t / 8)**2\n >>> arr = lambda t: qt.poisson_random_measure(t, rate, 18)\n >>> ser = lambda t: t + np.random.gamma(4, 0.1)\n >>> q = qt.QueueServer(5, arrival_f=arr, service_f=ser, seed=54)\n >>> q.set_active()\n\n To simulate 50000 events do the following:\n\n >>> q.simulate(50000)\n >>> num_events = q.num_arrivals[0] + q.num_departures\n >>> num_events\n 50000\n\n To simulate forward 75 time units, do the following:\n\n >>> t0 = q.time\n >>> q.simulate(t=75)\n >>> round(float(q.time - t0), 1)\n 75.1\n >>> q.num_arrivals[1] + q.num_departures - num_events\n 1597\n\n To simulate forward until 1000 new departures are observed run:\n\n >>> nA0, nD0 = q.num_arrivals[1], q.num_departures\n >>> q.simulate(nD=1000)\n >>> q.num_departures - nD0, q.num_arrivals[1] - nA0\n (1000, 983)\n\n To simulate until 1000 new arrivals are observed run:\n\n >>> nA0, nD0 = q.num_arrivals[1], q.num_departures\n >>> q.simulate(nA=1000)\n >>> q.num_departures - nD0, q.num_arrivals[1] - nA0,\n (987, 1000)", "id": "f14539:c0:m20"} {"signature": "def at_capacity(self):", "body": "return self.num_system >= self.num_servers + self.buffer", "docstring": "Returns whether the queue is at capacity or not.\n\n Returns\n -------\n out : bool\n Returns whether the number of agents in the system -- the\n number of agents being serviced plus those waiting to be\n serviced -- is equal to ``num_servers + buffer``.", "id": "f14539:c1:m2"} {"signature": "def _matrix2dict(matrix, etype=False):", "body": "n = len(matrix)adj = {k: {} for k in range(n)}for k in range(n):for j in range(n):if matrix[k, j] != :adj[k][j] = {} if not etype else matrix[k, j]return adj", "docstring": "Takes an adjacency matrix and returns an adjacency list.", "id": "f14541:m0"} {"signature": "def _dict2dict(adj_dict):", "body": "item = adj_dict.popitem()adj_dict[item[]] = item[]if not isinstance(item[], dict):new_dict = {}for key, value in adj_dict.items():new_dict[key] = {v: {} for v in value}adj_dict = new_dictreturn adj_dict", "docstring": "Takes a dictionary based representation of an adjacency list\n and returns a dict of dicts based representation.", "id": "f14541:m1"} {"signature": "def _adjacency_adjust(adjacency, adjust, is_directed):", "body": "for v, adj in adjacency.items():for properties in adj.values():if properties.get('') is None:properties[''] = if is_directed:if adjust == :null_nodes = set()for k, adj in adjacency.items():if len(adj) == :null_nodes.add(k)for k, adj in adjacency.items():for v in adj.keys():if v in null_nodes:adj[v][''] = else:for k, adj in adjacency.items():if len(adj) == :adj[k] = {'': }return adjacency", "docstring": "Takes an adjacency list and returns a (possibly) modified\n adjacency list.", "id": "f14541:m2"} {"signature": "def adjacency2graph(adjacency, edge_type=None, adjust=, **kwargs):", "body": "if isinstance(adjacency, np.ndarray):adjacency = _matrix2dict(adjacency)elif isinstance(adjacency, dict):adjacency = _dict2dict(adjacency)else:msg = (\"\"\"\")raise TypeError(msg)if edge_type is None:edge_type = {}else:if isinstance(edge_type, np.ndarray):edge_type = _matrix2dict(edge_type, etype=True)elif isinstance(edge_type, dict):edge_type = _dict2dict(edge_type)for u, ty in edge_type.items():for v, et in ty.items():adjacency[u][v][''] = etg = nx.from_dict_of_dicts(adjacency, create_using=nx.DiGraph())adjacency = nx.to_dict_of_dicts(g)adjacency = _adjacency_adjust(adjacency, adjust, True)return nx.from_dict_of_dicts(adjacency, create_using=nx.DiGraph())", "docstring": "Takes an adjacency list, dict, or matrix and returns a graph.\n\n The purpose of this function is take an adjacency list (or matrix)\n and return a :class:`.QueueNetworkDiGraph` that can be used with a\n :class:`.QueueNetwork` instance. The Graph returned has the\n ``edge_type`` edge property set for each edge. Note that the graph may\n be altered.\n\n Parameters\n ----------\n adjacency : dict or :class:`~numpy.ndarray`\n An adjacency list as either a dict, or an adjacency matrix.\n adjust : int ``{1, 2}`` (optional, default: 1)\n Specifies what to do when the graph has terminal vertices\n (nodes with no out-edges). Note that if ``adjust`` is not 2\n then it is assumed to be 1. There are two choices:\n\n * ``adjust = 1``: A loop is added to each terminal node in the\n graph, and their ``edge_type`` of that loop is set to 0.\n * ``adjust = 2``: All edges leading to terminal nodes have\n their ``edge_type`` set to 0.\n\n **kwargs :\n Unused.\n\n Returns\n -------\n out : :any:`networkx.DiGraph`\n A directed graph with the ``edge_type`` edge property.\n\n Raises\n ------\n TypeError\n Is raised if ``adjacency`` is not a dict or\n :class:`~numpy.ndarray`.\n\n Examples\n --------\n If terminal nodes are such that all in-edges have edge type ``0``\n then nothing is changed. However, if a node is a terminal node then\n a loop is added with edge type 0.\n\n >>> import queueing_tool as qt\n >>> adj = {\n ... 0: {1: {}},\n ... 1: {2: {},\n ... 3: {}},\n ... 3: {0: {}}}\n >>> eTy = {0: {1: 1}, 1: {2: 2, 3: 4}, 3: {0: 1}}\n >>> # A loop will be added to vertex 2\n >>> g = qt.adjacency2graph(adj, edge_type=eTy)\n >>> ans = qt.graph2dict(g)\n >>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE\n [(0, {1: {'edge_type': 1}}),\n (1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}), \n (2, {2: {'edge_type': 0}}),\n (3, {0: {'edge_type': 1}})]\n\n You can use a dict of lists to represent the adjacency list.\n\n >>> adj = {0 : [1], 1: [2, 3], 3: [0]}\n >>> g = qt.adjacency2graph(adj, edge_type=eTy)\n >>> ans = qt.graph2dict(g)\n >>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE\n [(0, {1: {'edge_type': 1}}),\n (1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}),\n (2, {2: {'edge_type': 0}}),\n (3, {0: {'edge_type': 1}})]\n\n Alternatively, you could have this function adjust the edges that\n lead to terminal vertices by changing their edge type to 0:\n\n >>> # The graph is unaltered\n >>> g = qt.adjacency2graph(adj, edge_type=eTy, adjust=2)\n >>> ans = qt.graph2dict(g)\n >>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE\n [(0, {1: {'edge_type': 1}}),\n (1, {2: {'edge_type': 0}, 3: {'edge_type': 4}}),\n (2, {}),\n (3, {0: {'edge_type': 1}})]", "id": "f14541:m3"} {"signature": "def get_edge_type(self, edge_type):", "body": "edges = []for e in self.edges():if self.adj[e[]][e[]].get('') == edge_type:edges.append(e)return edges", "docstring": "Returns all edges with the specified edge type.\n\n Parameters\n ----------\n edge_type : int\n An integer specifying what type of edges to return.\n\n Returns\n -------\n out : list of 2-tuples\n A list of 2-tuples representing the edges in the graph\n with the specified edge type.\n\n Examples\n --------\n Lets get type 2 edges from the following graph\n\n >>> import queueing_tool as qt\n >>> adjacency = {\n ... 0: {1: {'edge_type': 2}},\n ... 1: {2: {'edge_type': 1},\n ... 3: {'edge_type': 4}},\n ... 2: {0: {'edge_type': 2}},\n ... 3: {3: {'edge_type': 0}}\n ... }\n >>> G = qt.QueueNetworkDiGraph(adjacency)\n >>> ans = G.get_edge_type(2)\n >>> ans.sort()\n >>> ans\n [(0, 1), (2, 0)]", "id": "f14541:c0:m14"} {"signature": "def draw_graph(self, line_kwargs=None, scatter_kwargs=None, **kwargs):", "body": "if not HAS_MATPLOTLIB:raise ImportError(\"\")fig = plt.figure(figsize=kwargs.get('', (, )))ax = fig.gca()mpl_kwargs = {'': line_kwargs,'': scatter_kwargs,'': kwargs.get('')}line_kwargs, scatter_kwargs = self.lines_scatter_args(**mpl_kwargs)edge_collection = LineCollection(**line_kwargs)ax.add_collection(edge_collection)ax.scatter(**scatter_kwargs)if hasattr(ax, ''):ax.set_facecolor(kwargs.get('', [, , , ]))else:ax.set_axis_bgcolor(kwargs.get('', [, , , ]))ax.get_xaxis().set_visible(False)ax.get_yaxis().set_visible(False)if '' in kwargs:new_kwargs = {k: v for k, v in kwargs.items() if k in SAVEFIG_KWARGS}fig.savefig(kwargs[''], **new_kwargs)else:plt.ion()plt.show()", "docstring": "Draws the graph.\n\n Uses matplotlib, specifically\n :class:`~matplotlib.collections.LineCollection` and\n :meth:`~matplotlib.axes.Axes.scatter`. Gets the default\n keyword arguments for both methods by calling\n :meth:`~.QueueNetworkDiGraph.lines_scatter_args` first.\n\n Parameters\n ----------\n line_kwargs : dict (optional, default: ``None``)\n Any keyword arguments accepted by\n :class:`~matplotlib.collections.LineCollection`\n scatter_kwargs : dict (optional, default: ``None``)\n Any keyword arguments accepted by\n :meth:`~matplotlib.axes.Axes.scatter`.\n bgcolor : list (optional, keyword only)\n A list with 4 floats representing a RGBA color. Defaults\n to ``[1, 1, 1, 1]``.\n figsize : tuple (optional, keyword only, default: ``(7, 7)``)\n The width and height of the figure in inches.\n kwargs :\n Any keyword arguments used by\n :meth:`~matplotlib.figure.Figure.savefig`.\n\n Raises\n ------\n ImportError :\n If Matplotlib is not installed then an :exc:`ImportError`\n is raised.\n\n Notes\n -----\n If the ``fname`` keyword is passed, then the figure is saved\n locally.", "id": "f14541:c0:m15"} {"signature": "def lines_scatter_args(self, line_kwargs=None, scatter_kwargs=None, pos=None):", "body": "if pos is not None:self.set_pos(pos)elif self.pos is None:self.set_pos()edge_pos = [ for e in self.edges()]for e in self.edges():ei = self.edge_index[e]edge_pos[ei] = (self.pos[e[]], self.pos[e[]])line_collecton_kwargs = {'': edge_pos,'': self.edge_color,'': (,),'': (,),'': '','': None,'': plt.cm.ocean_r,'': ,'': ,'': None,'': None,'': None,'': '','': None,}scatter_kwargs_ = {'': self.pos[:, ],'': self.pos[:, ],'': ,'': self.vertex_fill_color,'': None,'': None,'': None,'': None,'': '','': ,'': plt.cm.ocean_r,'': ,'': self.vertex_color,'': None,'': None,'': '','': None,}line_kwargs = {} if line_kwargs is None else line_kwargsscatter_kwargs = {} if scatter_kwargs is None else scatter_kwargsfor key, value in line_kwargs.items():if key in line_collecton_kwargs:line_collecton_kwargs[key] = valuefor key, value in scatter_kwargs.items():if key in scatter_kwargs_:scatter_kwargs_[key] = valuereturn line_collecton_kwargs, scatter_kwargs_", "docstring": "Returns the arguments used when plotting.\n\n Takes any keyword arguments for\n :class:`~matplotlib.collections.LineCollection` and\n :meth:`~matplotlib.axes.Axes.scatter` and returns two\n dictionaries with all the defaults set.\n\n Parameters\n ----------\n line_kwargs : dict (optional, default: ``None``)\n Any keyword arguments accepted by\n :class:`~matplotlib.collections.LineCollection`.\n scatter_kwargs : dict (optional, default: ``None``)\n Any keyword arguments accepted by\n :meth:`~matplotlib.axes.Axes.scatter`.\n\n Returns\n -------\n tuple\n A 2-tuple of dicts. The first entry is the keyword\n arguments for\n :class:`~matplotlib.collections.LineCollection` and the\n second is the keyword args for\n :meth:`~matplotlib.axes.Axes.scatter`.\n\n Notes\n -----\n If a specific keyword argument is not passed then the defaults\n are used.", "id": "f14541:c0:m16"} {"signature": "def add_edge_lengths(g):", "body": "g = _test_graph(g)g.new_edge_property('')for e in g.edges():latlon1 = g.vp(e[], '')latlon2 = g.vp(e[], '')g.set_ep(e, '', np.round(_calculate_distance(latlon1, latlon2), ))return g", "docstring": "Add add the edge lengths as a :any:`DiGraph`\n for the graph.\n\n Uses the ``pos`` vertex property to get the location of each\n vertex. These are then used to calculate the length of an edge\n between two vertices.\n\n Parameters\n ----------\n g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, \\\n ``None``, etc.\n Any object that networkx can turn into a\n :any:`DiGraph`\n\n Returns\n -------\n :class:`.QueueNetworkDiGraph`\n Returns the a graph with the ``edge_length`` edge property.\n\n Raises\n ------\n TypeError\n Raised when the parameter ``g`` is not of a type that can be\n made into a :any:`networkx.DiGraph`.", "id": "f14543:m0"} {"signature": "def _prepare_graph(g, g_colors, q_cls, q_arg, adjust_graph):", "body": "g = _test_graph(g)if adjust_graph:pos = nx.get_node_attributes(g, '')ans = nx.to_dict_of_dicts(g)g = adjacency2graph(ans, adjust=, is_directed=g.is_directed())g = QueueNetworkDiGraph(g)if len(pos) > :g.set_pos(pos)g.new_vertex_property('')g.new_vertex_property('')g.new_vertex_property('')g.new_vertex_property('')g.new_edge_property('')g.new_edge_property('')g.new_edge_property('')g.new_edge_property('')queues = _set_queues(g, q_cls, q_arg, '' in g.vertex_properties())if '' not in g.vertex_properties():g.set_pos()for k, e in enumerate(g.edges()):g.set_ep(e, '', )g.set_ep(e, '', )if e[] == e[]:g.set_ep(e, '', queues[k].colors[''])else:g.set_ep(e, '', queues[k].colors[''])for v in g.nodes():g.set_vp(v, '', )g.set_vp(v, '', )e = (v, v)if g.is_edge(e):g.set_vp(v, '', queues[g.edge_index[e]]._current_color())g.set_vp(v, '', queues[g.edge_index[e]]._current_color())else:g.set_vp(v, '', g_colors[''])g.set_vp(v, '', g_colors[''])return g, queues", "docstring": "Prepares a graph for use in :class:`.QueueNetwork`.\n\n This function is called by ``__init__`` in the\n :class:`.QueueNetwork` class. It creates the :class:`.QueueServer`\n instances that sit on the edges, and sets various edge and node\n properties that are used when drawing the graph.\n\n Parameters\n ----------\n g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, \\\n ``None``, etc.\n Any object that networkx can turn into a\n :any:`DiGraph`\n g_colors : dict\n A dictionary of colors. The specific keys used are\n ``vertex_color`` and ``vertex_fill_color`` for vertices that\n do not have any loops. Set :class:`.QueueNetwork` for the\n default values passed.\n q_cls : dict\n A dictionary where the keys are integers that represent an edge\n type, and the values are :class:`.QueueServer` classes.\n q_args : dict\n A dictionary where the keys are integers that represent an edge\n type, and the values are the arguments that are used when\n creating an instance of that :class:`.QueueServer` class.\n adjust_graph : bool\n Specifies whether the graph will be adjusted using\n :func:`.adjacency2graph`.\n\n Returns\n -------\n g : :class:`.QueueNetworkDiGraph`\n queues : list\n A list of :class:`QueueServers<.QueueServer>` where\n ``queues[k]`` is the ``QueueServer`` that sets on the edge with\n edge index ``k``.\n\n Notes\n -----\n The graph ``g`` should have the ``edge_type`` edge property map.\n If it does not then an ``edge_type`` edge property is\n created and set to 1.\n\n The following properties are set by each queue: ``vertex_color``,\n ``vertex_fill_color``, ``vertex_fill_color``, ``edge_color``.\n See :class:`.QueueServer` for more on setting these values.\n\n The following properties are assigned as a properties to the graph;\n their default values for each edge or vertex is shown:\n\n * ``vertex_pen_width``: ``1``,\n * ``vertex_size``: ``8``,\n * ``edge_control_points``: ``[]``\n * ``edge_marker_size``: ``8``\n * ``edge_pen_width``: ``1.25``\n\n Raises\n ------\n TypeError\n Raised when the parameter ``g`` is not of a type that can be\n made into a :any:`networkx.DiGraph`.", "id": "f14543:m1"} {"signature": "def _calculate_distance(latlon1, latlon2):", "body": "lat1, lon1 = latlon1lat2, lon2 = latlon2dlon = lon2 - lon1dlat = lat2 - lat1R = a = np.sin(dlat / )** + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon / ))**c = * np.pi * R * np.arctan2(np.sqrt(a), np.sqrt( - a)) / return c", "docstring": "Calculates the distance between two points on earth.", "id": "f14544:m0"} {"signature": "def graph2dict(g, return_dict_of_dict=True):", "body": "if not isinstance(g, nx.DiGraph):g = QueueNetworkDiGraph(g)dict_of_dicts = nx.to_dict_of_dicts(g)if return_dict_of_dict:return dict_of_dictselse:return {k: list(val.keys()) for k, val in dict_of_dicts.items()}", "docstring": "Takes a graph and returns an adjacency list.\n\n Parameters\n ----------\n g : :any:`networkx.DiGraph`, :any:`networkx.Graph`, etc.\n Any object that networkx can turn into a\n :any:`DiGraph`.\n return_dict_of_dict : bool (optional, default: ``True``)\n Specifies whether this function will return a dict of dicts\n or a dict of lists.\n\n Returns\n -------\n adj : dict\n An adjacency representation of graph as a dictionary of\n dictionaries, where a key is the vertex index for a vertex\n ``v`` and the values are :class:`dicts<.dict>` with keys for\n the vertex index and values as edge properties.\n\n Examples\n --------\n >>> import queueing_tool as qt\n >>> import networkx as nx\n >>> adj = {0: [1, 2], 1: [0], 2: [0, 3], 3: [2]}\n >>> g = nx.DiGraph(adj)\n >>> qt.graph2dict(g, return_dict_of_dict=True)\n ... # doctest: +NORMALIZE_WHITESPACE\n {0: {1: {}, 2: {}},\n 1: {0: {}},\n 2: {0: {}, 3: {}},\n 3: {2: {}}}\n >>> qt.graph2dict(g, return_dict_of_dict=False)\n {0: [1, 2], 1: [0], 2: [0, 3], 3: [2]}", "id": "f14544:m2"} {"signature": "def generate_transition_matrix(g, seed=None):", "body": "g = _test_graph(g)if isinstance(seed, numbers.Integral):np.random.seed(seed)nV = g.number_of_nodes()mat = np.zeros((nV, nV))for v in g.nodes():ind = [e[] for e in sorted(g.out_edges(v))]deg = len(ind)if deg == :mat[v, ind] = elif deg > :probs = np.ceil(np.random.rand(deg) * ) / if np.isclose(np.sum(probs), ):probs[np.random.randint(deg)] = mat[v, ind] = probs / np.sum(probs)return mat", "docstring": "Generates a random transition matrix for the graph ``g``.\n\n Parameters\n ----------\n g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, etc.\n Any object that :any:`DiGraph` accepts.\n seed : int (optional)\n An integer used to initialize numpy's psuedo-random number\n generator.\n\n Returns\n -------\n mat : :class:`~numpy.ndarray`\n Returns a transition matrix where ``mat[i, j]`` is the\n probability of transitioning from vertex ``i`` to vertex ``j``.\n If there is no edge connecting vertex ``i`` to vertex ``j``\n then ``mat[i, j] = 0``.", "id": "f14545:m0"} {"signature": "def generate_random_graph(num_vertices=, prob_loop=, **kwargs):", "body": "g = minimal_random_graph(num_vertices, **kwargs)for v in g.nodes():e = (v, v)if not g.is_edge(e):if np.random.uniform() < prob_loop:g.add_edge(*e)g = set_types_random(g, **kwargs)return g", "docstring": "Creates a random graph where the edges have different types.\n\n This method calls :func:`.minimal_random_graph`, and then adds\n a loop to each vertex with ``prob_loop`` probability. It then\n calls :func:`.set_types_random` on the resulting graph.\n\n Parameters\n ----------\n num_vertices : int (optional, default: 250)\n The number of vertices in the graph.\n prob_loop : float (optional, default: 0.5)\n The probability that a loop gets added to a vertex.\n **kwargs :\n Any parameters to send to :func:`.minimal_random_graph` or\n :func:`.set_types_random`.\n\n Returns\n -------\n :class:`.QueueNetworkDiGraph`\n A graph with the position of the vertex set as a property.\n The position property is called ``pos``. Also, the ``edge_type``\n edge property is set for each edge.\n\n Examples\n --------\n The following generates a directed graph with 50 vertices where half\n the edges are type 1 and 1/4th are type 2 and 1/4th are type 3:\n\n >>> import queueing_tool as qt\n >>> pTypes = {1: 0.5, 2: 0.25, 3: 0.25}\n >>> g = qt.generate_random_graph(100, proportions=pTypes, seed=17)\n >>> non_loops = [e for e in g.edges() if e[0] != e[1]]\n >>> p1 = np.sum([g.ep(e, 'edge_type') == 1 for e in non_loops])\n >>> float(p1) / len(non_loops) # doctest: +ELLIPSIS\n 0.486...\n >>> p2 = np.sum([g.ep(e, 'edge_type') == 2 for e in non_loops])\n >>> float(p2) / len(non_loops) # doctest: +ELLIPSIS\n 0.249...\n >>> p3 = np.sum([g.ep(e, 'edge_type') == 3 for e in non_loops])\n >>> float(p3) / len(non_loops) # doctest: +ELLIPSIS\n 0.264...\n\n To make an undirected graph with 25 vertices where there are 4\n different edge types with random proportions:\n\n >>> p = np.random.rand(4)\n >>> p = p / sum(p)\n >>> p = {k + 1: p[k] for k in range(4)}\n >>> g = qt.generate_random_graph(num_vertices=25, is_directed=False, proportions=p)\n\n Note that none of the edge types in the above example are 0. It is\n recommended use edge type indices starting at 1, since 0 is\n typically used for terminal edges.", "id": "f14545:m1"} {"signature": "def generate_pagerank_graph(num_vertices=, **kwargs):", "body": "g = minimal_random_graph(num_vertices, **kwargs)r = np.zeros(num_vertices)for k, pr in nx.pagerank(g).items():r[k] = prg = set_types_rank(g, rank=r, **kwargs)return g", "docstring": "Creates a random graph where the vertex types are\n selected using their pagerank.\n\n Calls :func:`.minimal_random_graph` and then\n :func:`.set_types_rank` where the ``rank`` keyword argument\n is given by :func:`networkx.pagerank`.\n\n Parameters\n ----------\n num_vertices : int (optional, the default is 250)\n The number of vertices in the graph.\n **kwargs :\n Any parameters to send to :func:`.minimal_random_graph` or\n :func:`.set_types_rank`.\n\n Returns\n -------\n :class:`.QueueNetworkDiGraph`\n A graph with a ``pos`` vertex property and the ``edge_type``\n edge property.\n\n Notes\n -----\n This function sets the edge types of a graph to be either 1, 2, or\n 3. It sets the vertices to type 2 by selecting the top\n ``pType2 * g.number_of_nodes()`` vertices given by the\n :func:`~networkx.pagerank` of the graph. A loop is added\n to all vertices identified this way (if one does not exist\n already). It then randomly sets vertices close to the type 2\n vertices as type 3, and adds loops to these vertices as well. These\n loops then have edge types that correspond to the vertices type.\n The rest of the edges are set to type 1.", "id": "f14545:m2"} {"signature": "def minimal_random_graph(num_vertices, seed=None, **kwargs):", "body": "if isinstance(seed, numbers.Integral):np.random.seed(seed)points = np.random.random((num_vertices, )) * edges = []for k in range(num_vertices - ):for j in range(k + , num_vertices):v = points[k] - points[j]edges.append((k, j, v[]** + v[]**))mytype = [('', int), ('', int), ('', np.float)]edges = np.array(edges, dtype=mytype)edges = np.sort(edges, order='')unionF = UnionFind([k for k in range(num_vertices)])g = nx.Graph()for n1, n2, dummy in edges:unionF.union(n1, n2)g.add_edge(n1, n2)if unionF.nClusters == :breakpos = {j: p for j, p in enumerate(points)}g = QueueNetworkDiGraph(g.to_directed())g.set_pos(pos)return g", "docstring": "Creates a connected graph with random vertex locations.\n\n Parameters\n ----------\n num_vertices : int\n The number of vertices in the graph.\n seed : int (optional)\n An integer used to initialize numpy's psuedorandom number\n generators.\n **kwargs :\n Unused.\n\n Returns\n -------\n :class:`.QueueNetworkDiGraph`\n A graph with a ``pos`` vertex property for each vertex's\n position.\n\n Notes\n -----\n This function first places ``num_vertices`` points in the unit square\n randomly (using the uniform distribution). Then, for every vertex\n ``v``, all other vertices with Euclidean distance less or equal to\n ``r`` are connect by an edge --- where ``r`` is the smallest number\n such that the graph ends up connected.", "id": "f14545:m3"} {"signature": "def set_types_random(g, proportions=None, loop_proportions=None, seed=None,**kwargs):", "body": "g = _test_graph(g)if isinstance(seed, numbers.Integral):np.random.seed(seed)if proportions is None:proportions = {k: / for k in range(, )}if loop_proportions is None:loop_proportions = {k: / for k in range()}edges = [e for e in g.edges() if e[] != e[]]loops = [e for e in g.edges() if e[] == e[]]props = list(proportions.values())lprops = list(loop_proportions.values())if not np.isclose(sum(props), ):raise ValueError(\"\")if not np.isclose(sum(lprops), ):raise ValueError(\"\")eTypes = {}types = list(proportions.keys())values = np.random.choice(types, size=len(edges), replace=True, p=props)for k, e in enumerate(edges):eTypes[e] = values[k]types = list(loop_proportions.keys())values = np.random.choice(types, size=len(loops), replace=True, p=lprops)for k, e in enumerate(loops):eTypes[e] = values[k]g.new_edge_property('')for e in g.edges():g.set_ep(e, '', eTypes[e])return g", "docstring": "Randomly sets ``edge_type`` (edge type) properties of the graph.\n\n This function randomly assigns each edge a type. The probability of\n an edge being a specific type is proscribed in the\n ``proportions``, ``loop_proportions`` variables.\n\n Parameters\n ----------\n g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, etc.\n Any object that :any:`DiGraph` accepts.\n proportions : dict (optional, default: ``{k: 0.25 for k in range(1, 4)}``)\n A dictionary of edge types and proportions, where the keys are\n the types and the values are the proportion of non-loop edges\n that are expected to be of that type. The values can must sum\n to one.\n loop_proportions : dict (optional, default: ``{k: 0.25 for k in range(4)}``)\n A dictionary of edge types and proportions, where the keys are\n the types and the values are the proportion of loop edges\n that are expected to be of that type. The values can must sum\n to one.\n seed : int (optional)\n An integer used to initialize numpy's psuedorandom number\n generator.\n **kwargs :\n Unused.\n\n Returns\n -------\n :class:`.QueueNetworkDiGraph`\n Returns the a graph with an ``edge_type`` edge property.\n\n Raises\n ------\n TypeError\n Raised when the parameter ``g`` is not of a type that can be\n made into a :any:`networkx.DiGraph`.\n\n ValueError\n Raises a :exc:`~ValueError` if the ``pType`` values do not sum\n to one.\n\n Notes\n -----\n If ``pTypes`` is not explicitly specified in the arguments, then it\n defaults to four types in the graph (types 0, 1, 2, and 3). It sets\n non-loop edges to be either 1, 2, or 3 33\\% chance, and loops are\n types 0, 1, 2, 3 with 25\\% chance.", "id": "f14545:m4"} {"signature": "def set_types_rank(g, rank, pType2=, pType3=, seed=None, **kwargs):", "body": "g = _test_graph(g)if isinstance(seed, numbers.Integral):np.random.seed(seed)tmp = np.sort(np.array(rank))nDests = int(np.ceil(g.number_of_nodes() * pType2))dests = np.where(rank >= tmp[-nDests])[]if '' not in g.vertex_properties():g.set_pos()dest_pos = np.array([g.vp(v, '') for v in dests])nFCQ = int(pType3 * g.number_of_nodes())min_g_dist = np.ones(nFCQ) * np.inftyind_g_dist = np.ones(nFCQ, int)r, theta = np.random.random(nFCQ) / , np.random.random(nFCQ) * xy_pos = np.array([r * np.cos(theta), r * np.sin(theta)]).transpose()g_pos = xy_pos + dest_pos[np.array(np.mod(np.arange(nFCQ), nDests), int)]for v in g.nodes():if v not in dests:tmp = np.array([_calculate_distance(g.vp(v, ''), g_pos[k, :]) for k in range(nFCQ)])min_g_dist = np.min((tmp, min_g_dist), )ind_g_dist[min_g_dist == tmp] = vind_g_dist = np.unique(ind_g_dist)fcqs = set(ind_g_dist[:min(nFCQ, len(ind_g_dist))])dests = set(dests)g.new_vertex_property('')for v in g.nodes():if v in dests:g.set_vp(v, '', )if not g.is_edge((v, v)):g.add_edge(v, v)elif v in fcqs:g.set_vp(v, '', )if not g.is_edge((v, v)):g.add_edge(v, v)g.new_edge_property('')for e in g.edges():g.set_ep(e, '', )for v in g.nodes():if g.vp(v, '') in [, ]:e = (v, v)if g.vp(v, '') == :g.set_ep(e, '', )else:g.set_ep(e, '', )return g", "docstring": "Creates a stylized graph. Sets edge and types using `pagerank`_.\n\n This function sets the edge types of a graph to be either 1, 2, or\n 3. It sets the vertices to type 2 by selecting the top\n ``pType2 * g.number_of_nodes()`` vertices given by the\n :func:`~networkx.pagerank` of the graph. A loop is added\n to all vertices identified this way (if one does not exist\n already). It then randomly sets vertices close to the type 2\n vertices as type 3, and adds loops to these vertices as well. These\n loops then have edge types the correspond to the vertices type. The\n rest of the edges are set to type 1.\n\n .. _pagerank: http://en.wikipedia.org/wiki/PageRank\n\n Parameters\n ----------\n g : :any:`networkx.DiGraph`, :class:`~numpy.ndarray`, dict, etc.\n Any object that :any:`DiGraph` accepts.\n rank : :class:`numpy.ndarray`\n An ordering of the vertices.\n pType2 : float (optional, default: 0.1)\n Specifies the proportion of vertices that will be of type 2.\n pType3 : float (optional, default: 0.1)\n Specifies the proportion of vertices that will be of type 3 and\n that are near pType2 vertices.\n seed : int (optional)\n An integer used to initialize numpy's psuedo-random number\n generator.\n **kwargs :\n Unused.\n\n Returns\n -------\n :class:`.QueueNetworkDiGraph`\n Returns the a graph with an ``edge_type`` edge property.\n\n Raises\n ------\n TypeError\n Raised when the parameter ``g`` is not of a type that can be\n made into a :any:`DiGraph`.", "id": "f14545:m5"} {"signature": "def split_code_at_show(text):", "body": "parts = []is_doctest = contains_doctest(text)part = []for line in text.split(\"\"):if (not is_doctest and line.strip() == '') or(is_doctest and line.strip() == ''):part.append(line)parts.append(\"\".join(part))part = []else:part.append(line)if \"\".join(part).strip():parts.append(\"\".join(part))return parts", "docstring": "Split code at plt.show()", "id": "f14548:m8"} {"signature": "def out_of_date(original, derived):", "body": "return (not os.path.exists(derived)or os.stat(derived).st_mtime < os.stat(original).st_mtime)", "docstring": "Returns True if derivative is out-of-date wrt original,\nboth of which are full file paths.", "id": "f14548:m10"} {"signature": "def makefig(code, code_path, output_dir, output_base, config):", "body": "default_dpi = {'': , '': , '': }formats = []for fmt in config.plot_formats:if isinstance(fmt, str):formats.append((fmt, default_dpi.get(fmt, )))elif type(fmt) in (tuple, list) and len(fmt)==:formats.append((str(fmt[]), int(fmt[])))else:raise PlotError('' % fmt)code_pieces = split_code_at_show(code)all_exists = Trueimg = ImageFile(output_base, output_dir)for format, dpi in formats:if out_of_date(code_path, img.filename(format)):all_exists = Falsebreakimg.formats.append(format)if all_exists:return [(code, [img])]results = []all_exists = Truefor i, code_piece in enumerate(code_pieces):images = []for j in range():img = ImageFile('' % (output_base, i, j), output_dir)for format, dpi in formats:if out_of_date(code_path, img.filename(format)):all_exists = Falsebreakimg.formats.append(format)if not all_exists:all_exists = (j > )breakimages.append(img)if not all_exists:breakresults.append((code_piece, images))if all_exists:return resultsresults = []ns = {}for i, code_piece in enumerate(code_pieces):plt.close('')run_code(code_piece, code_path, ns)images = []fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()for j, figman in enumerate(fig_managers):if len(fig_managers) == and len(code_pieces) == :img = ImageFile(output_base, output_dir)else:img = ImageFile(\"\" % (output_base, i, j),output_dir)images.append(img)for format, dpi in formats:try:figman.canvas.figure.savefig(img.filename(format), dpi=dpi)except exceptions.BaseException as err:raise PlotError(traceback.format_exc())img.formats.append(format)results.append((code_piece, images))return results", "docstring": "Run a pyplot script *code* and save the images under *output_dir*\nwith file names derived from *output_base*", "id": "f14548:m11"} {"signature": "def __init__(self, tree, file = sys.stdout, single_line_functions=False):", "body": "self.f = fileself._single_func = single_line_functionsself._do_indent = Trueself._indent = self._dispatch(tree)self._write(\"\")self.f.flush()", "docstring": "Unparser(tree, file=sys.stdout) -> None.\n\n Print the source for tree to file.", "id": "f14555:c0:m0"} {"signature": "def _fill(self, text = \"\"):", "body": "if self._do_indent:self._write(\"\"+\"\"*self._indent + text)else:self._write(text)", "docstring": "Indent a piece of text, according to the current indentation level", "id": "f14555:c0:m1"} {"signature": "def _write(self, text):", "body": "self.f.write(text)", "docstring": "Append a piece of text to the current line.", "id": "f14555:c0:m2"} {"signature": "def _enter(self):", "body": "self._write(\"\")self._indent += ", "docstring": "Print ':', and increase the indentation.", "id": "f14555:c0:m3"} {"signature": "def _leave(self):", "body": "self._indent -= ", "docstring": "Decrease the indentation level.", "id": "f14555:c0:m4"} {"signature": "def _dispatch(self, tree):", "body": "if isinstance(tree, list):for t in tree:self._dispatch(t)returnmeth = getattr(self, \"\"+tree.__class__.__name__)if tree.__class__.__name__ == '' and not self._do_indent:returnmeth(tree)", "docstring": "_dispatcher function, _dispatching tree type T to method _T.", "id": "f14555:c0:m5"} {"signature": "def _AssAttr(self, t):", "body": "self._dispatch(t.expr)self._write(''+t.attrname)", "docstring": "Handle assigning an attribute of an object", "id": "f14555:c0:m8"} {"signature": "def _Assign(self, t):", "body": "self._fill()for target in t.nodes:self._dispatch(target)self._write(\"\")self._dispatch(t.expr)if not self._do_indent:self._write('')", "docstring": "Expression Assignment such as \"a = 1\".\n\n This only handles assignment in expressions. Keyword assignment\n is handled separately.", "id": "f14555:c0:m9"} {"signature": "def _AssName(self, t):", "body": "self._Name(t)", "docstring": "Name on left hand side of expression.\n\n Treat just like a name on the right side of an expression.", "id": "f14555:c0:m10"} {"signature": "def _AssTuple(self, t):", "body": "for element in t.nodes[:-]:self._dispatch(element)self._write(\"\")last_element = t.nodes[-]self._dispatch(last_element)", "docstring": "Tuple on left hand side of an expression.", "id": "f14555:c0:m11"} {"signature": "def _AugAssign(self, t):", "body": "self._fill()self._dispatch(t.node)self._write(''+t.op+'')self._dispatch(t.expr)if not self._do_indent:self._write('')", "docstring": "+=,-=,*=,/=,**=, etc. operations", "id": "f14555:c0:m12"} {"signature": "def _Bitand(self, t):", "body": "for i, node in enumerate(t.nodes):self._write(\"\")self._dispatch(node)self._write(\"\")if i != len(t.nodes)-:self._write(\"\")", "docstring": "Bit and operation.", "id": "f14555:c0:m13"} {"signature": "def _Bitor(self, t):", "body": "for i, node in enumerate(t.nodes):self._write(\"\")self._dispatch(node)self._write(\"\")if i != len(t.nodes)-:self._write(\"\")", "docstring": "Bit or operation", "id": "f14555:c0:m14"} {"signature": "def _CallFunc(self, t):", "body": "self._dispatch(t.node)self._write(\"\")comma = Falsefor e in t.args:if comma: self._write(\"\")else: comma = Trueself._dispatch(e)if t.star_args:if comma: self._write(\"\")else: comma = Trueself._write(\"\")self._dispatch(t.star_args)if t.dstar_args:if comma: self._write(\"\")else: comma = Trueself._write(\"\")self._dispatch(t.dstar_args)self._write(\"\")", "docstring": "Function call.", "id": "f14555:c0:m15"} {"signature": "def _Const(self, t):", "body": "self._dispatch(t.value)", "docstring": "A constant value such as an integer value, 3, or a string, \"hello\".", "id": "f14555:c0:m17"} {"signature": "def _Decorators(self, t):", "body": "for node in t.nodes:self._dispatch(node)", "docstring": "Handle function decorators (eg. @has_units)", "id": "f14555:c0:m18"} {"signature": "def _Discard(self, t):", "body": "self._fill()self._dispatch(t.expr)", "docstring": "Node for when return value is ignored such as in \"foo(a)\".", "id": "f14555:c0:m20"} {"signature": "def _From(self, t):", "body": "self._fill(\"\")self._write(t.modname)self._write(\"\")for i, (name,asname) in enumerate(t.names):if i != :self._write(\"\")self._write(name)if asname is not None:self._write(\"\"+asname)", "docstring": "Handle \"from xyz import foo, bar as baz\".", "id": "f14555:c0:m23"} {"signature": "def _Function(self, t):", "body": "if t.decorators is not None:self._fill(\"\")self._dispatch(t.decorators)self._fill(\"\"+t.name + \"\")defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)for i, arg in enumerate(zip(t.argnames, defaults)):self._write(arg[])if arg[] is not None:self._write('')self._dispatch(arg[])if i < len(t.argnames)-:self._write('')self._write(\"\")if self._single_func:self._do_indent = Falseself._enter()self._dispatch(t.code)self._leave()self._do_indent = True", "docstring": "Handle function definitions", "id": "f14555:c0:m24"} {"signature": "def _Getattr(self, t):", "body": "if isinstance(t.expr, (Div, Mul, Sub, Add)):self._write('')self._dispatch(t.expr)self._write('')else:self._dispatch(t.expr)self._write(''+t.attrname)", "docstring": "Handle getting an attribute of an object", "id": "f14555:c0:m25"} {"signature": "def _Import(self, t):", "body": "self._fill(\"\")for i, (name,asname) in enumerate(t.names):if i != :self._write(\"\")self._write(name)if asname is not None:self._write(\"\"+asname)", "docstring": "Handle \"import xyz.foo\".", "id": "f14555:c0:m28"} {"signature": "def _Keyword(self, t):", "body": "self._write(t.name)self._write(\"\")self._dispatch(t.expr)", "docstring": "Keyword value assignment within function calls and definitions.", "id": "f14555:c0:m29"} {"signature": "def _str_member_list(self, name):", "body": "out = []if self[name]:out += ['' % name, '']prefix = getattr(self, '', '')if prefix:prefix = '' % prefixautosum = []others = []for param, param_type, desc in self[name]:param = param.strip()param_obj = getattr(self._obj, param, None)if not (callable(param_obj)or isinstance(param_obj, property)or inspect.isgetsetdescriptor(param_obj)):param_obj = Noneif param_obj and (pydoc.getdoc(param_obj) or not desc):autosum += [\"\" % (prefix, param)]else:others.append((param, param_type, desc))if autosum:out += ['']if self.class_members_toctree:out += ['']out += [''] + autosumif others:maxlen_0 = max(, max([len(x[]) for x in others]))hdr = sixu(\"\")*maxlen_0 + sixu(\"\") + sixu(\"\")*fmt = sixu('') % (maxlen_0,)out += ['', hdr]for param, param_type, desc in others:desc = sixu(\"\").join(x.strip() for x in desc).strip()if param_type:desc = \"\" % (param_type, desc)out += [fmt % (param.strip(), desc)]out += [hdr]out += ['']return out", "docstring": "Generate a member listing, autosummary:: table where possible,\nand a table where not.", "id": "f14556:c0:m11"} {"signature": "def dedent_lines(lines):", "body": "return textwrap.dedent(\"\".join(lines)).split(\"\")", "docstring": "Deindent a list of lines maximally", "id": "f14557:m1"} {"signature": "def __init__(self, data):", "body": "if isinstance(data,list):self._str = dataelse:self._str = data.split('') self.reset()", "docstring": "Parameters\n----------\ndata : str\n String with lines separated by '\\n'.", "id": "f14557:c0:m0"} {"signature": "def _parse_see_also(self, content):", "body": "items = []def parse_item_name(text):\"\"\"\"\"\"m = self._name_rgx.match(text)if m:g = m.groups()if g[] is None:return g[], Noneelse:return g[], g[]raise ValueError(\"\" % text)def push_item(name, rest):if not name:returnname, role = parse_item_name(name)items.append((name, list(rest), role))del rest[:]current_func = Nonerest = []for line in content:if not line.strip(): continuem = self._name_rgx.match(line)if m and line[m.end():].strip().startswith(''):push_item(current_func, rest)current_func, line = line[:m.end()], line[m.end():]rest = [line.split('', )[].strip()]if not rest[]:rest = []elif not line.startswith(''):push_item(current_func, rest)current_func = Noneif '' in line:for func in line.split(''):if func.strip():push_item(func, [])elif line.strip():current_func = lineelif current_func is not None:rest.append(line.strip())push_item(current_func, rest)return items", "docstring": "func_name : Descriptive text\n continued text\nanother_func_name : Descriptive text\nfunc_name1, func_name2, :meth:`func_name`, func_name3", "id": "f14557:c1:m8"} {"signature": "def _parse_index(self, section, content):", "body": "def strip_each_in(lst):return [s.strip() for s in lst]out = {}section = section.split('')if len(section) > :out[''] = strip_each_in(section[].split(''))[]for line in content:line = line.split('')if len(line) > :out[line[]] = strip_each_in(line[].split(''))return out", "docstring": ".. index: default\n :refguide: something, else, and more", "id": "f14557:c1:m9"} {"signature": "def _parse_summary(self):", "body": "if self._is_at_section():returnwhile True:summary = self._doc.read_to_next_empty_line()summary_str = \"\".join([s.strip() for s in summary]).strip()if re.compile('').match(summary_str):self[''] = summary_strif not self._is_at_section():continuebreakif summary is not None:self[''] = summaryif not self._is_at_section():self[''] = self._read_to_next_section()", "docstring": "Grab signature (if given) and summary", "id": "f14557:c1:m10"} {"signature": "def import_phantom_module(xml_file):", "body": "import lxml.etree as etreeobject_cache = {}tree = etree.parse(xml_file)root = tree.getroot()all_nodes = dict([(n.attrib[''], n) for n in root])def _get_bases(node, recurse=False):bases = [x.attrib[''] for x in node.findall('')]if recurse:j = while True:try:b = bases[j]except IndexError: breakif b in all_nodes:bases.extend(_get_bases(all_nodes[b]))j += return basestype_index = ['', '', '', '']def base_cmp(a, b):x = cmp(type_index.index(a.tag), type_index.index(b.tag))if x != : return xif a.tag == '' and b.tag == '':a_bases = _get_bases(a, recurse=True)b_bases = _get_bases(b, recurse=True)x = cmp(len(a_bases), len(b_bases))if x != : return xif a.attrib[''] in b_bases: return -if b.attrib[''] in a_bases: return return cmp(a.attrib[''].count(''), b.attrib[''].count(''))nodes = root.getchildren()nodes.sort(base_cmp)for node in nodes:name = node.attrib['']doc = (node.text or '').decode('') + \"\"if doc == \"\": doc = \"\"parent = namewhile True:parent = ''.join(parent.split('')[:-])if not parent: breakif parent in object_cache: breakobj = imp.new_module(parent)object_cache[parent] = objsys.modules[parent] = objif node.tag == '':obj = imp.new_module(name)obj.__doc__ = docsys.modules[name] = objelif node.tag == '':bases = [object_cache[b] for b in _get_bases(node)if b in object_cache]bases.append(object)init = lambda self: Noneinit.__doc__ = docobj = type(name, tuple(bases), {'': doc, '': init})obj.__name__ = name.split('')[-]elif node.tag == '':funcname = node.attrib[''].split('')[-]argspec = node.attrib.get('')if argspec:argspec = re.sub('', '', argspec)doc = \"\" % (funcname, argspec, doc)obj = lambda: obj.__argspec_is_invalid_ = Trueif sys.version_info[] >= :obj.__name__ = funcnameelse:obj.func_name = funcnameobj.__name__ = nameobj.__doc__ = docif inspect.isclass(object_cache[parent]):obj.__objclass__ = object_cache[parent]else:class Dummy(object): passobj = Dummy()obj.__name__ = nameobj.__doc__ = docif inspect.isclass(object_cache[parent]):obj.__get__ = lambda: Noneobject_cache[name] = objif parent:if inspect.ismodule(object_cache[parent]):obj.__module__ = parentsetattr(object_cache[parent], name.split('')[-], obj)for node in root:obj = object_cache.get(node.attrib[''])if obj is None: continuefor ref in node.findall(''):if node.tag == '':if ref.attrib[''].startswith(node.attrib[''] + ''):setattr(obj, ref.attrib[''],object_cache.get(ref.attrib['']))else:setattr(obj, ref.attrib[''],object_cache.get(ref.attrib['']))", "docstring": "Insert a fake Python module to sys.modules, based on a XML file.\n\nThe XML file is expected to conform to Pydocweb DTD. The fake\nmodule will contain dummy objects, which guarantee the following:\n\n- Docstrings are correct.\n- Class inheritance relationships are correct (if present in XML).\n- Function argspec is *NOT* correct (even if present in XML).\n Instead, the function signature is prepended to the function docstring.\n- Class attributes are *NOT* correct; instead, they are dummy objects.\n\nParameters\n----------\nxml_file : str\n Name of an XML file to read", "id": "f14559:m2"} {"signature": "def looks_like_issubclass(obj, classname):", "body": "t = objif t.__name__ == classname:return Truefor klass in t.__mro__:if klass.__name__ == classname:return Truereturn False", "docstring": "Return True if the object has a class or superclass with the given class\n name.\n\n Ignores old-style classes.", "id": "f14560:m0"} {"signature": "def strip_comment_marker(text):", "body": "lines = []for line in text.splitlines():lines.append(line.lstrip(''))text = textwrap.dedent(''.join(lines))return text", "docstring": "Strip # markers at the front of a block of comment text.", "id": "f14562:m0"} {"signature": "def get_class_traits(klass):", "body": "source = inspect.getsource(klass)cb = CommentBlocker()cb.process_file(StringIO(source))mod_ast = compiler.parse(source)class_ast = mod_ast.node.nodes[]for node in class_ast.code.nodes:if isinstance(node, compiler.ast.Assign):name = node.nodes[].namerhs = unparse(node.expr).strip()doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))yield name, rhs, doc", "docstring": "Yield all of the documentation for trait definitions on a class object.", "id": "f14562:m1"} {"signature": "def add(self, string, start, end, line):", "body": "self.start_lineno = min(self.start_lineno, start[])self.end_lineno = max(self.end_lineno, end[])self.text += string", "docstring": "Add a new comment line.", "id": "f14562:c0:m1"} {"signature": "def add(self, string, start, end, line):", "body": "if string.strip():self.start_lineno = min(self.start_lineno, start[])self.end_lineno = max(self.end_lineno, end[])", "docstring": "Add lines to the block.", "id": "f14562:c1:m1"} {"signature": "def process_file(self, file):", "body": "if sys.version_info[] >= :nxt = file.__next__else:nxt = file.nextfor token in tokenize.generate_tokens(nxt):self.process_token(*token)self.make_index()", "docstring": "Process a file object.", "id": "f14562:c2:m1"} {"signature": "def process_token(self, kind, string, start, end, line):", "body": "if self.current_block.is_comment:if kind == tokenize.COMMENT:self.current_block.add(string, start, end, line)else:self.new_noncomment(start[], end[])else:if kind == tokenize.COMMENT:self.new_comment(string, start, end, line)else:self.current_block.add(string, start, end, line)", "docstring": "Process a single token.", "id": "f14562:c2:m2"} {"signature": "def new_noncomment(self, start_lineno, end_lineno):", "body": "block = NonComment(start_lineno, end_lineno)self.blocks.append(block)self.current_block = block", "docstring": "We are transitioning from a noncomment to a comment.", "id": "f14562:c2:m3"} {"signature": "def new_comment(self, string, start, end, line):", "body": "prefix = line[:start[]]if prefix.strip():self.current_block.add(string, start, end, line)else:block = Comment(start[], end[], string)self.blocks.append(block)self.current_block = block", "docstring": "Possibly add a new comment.\n\n Only adds a new comment if this comment is the only thing on the line.\n Otherwise, it extends the noncomment block.", "id": "f14562:c2:m4"} {"signature": "def make_index(self):", "body": "for prev, block in zip(self.blocks[:-], self.blocks[:]):if not block.is_comment:self.index[block.start_lineno] = prev", "docstring": "Make the index mapping lines of actual code to their associated\n prefix comments.", "id": "f14562:c2:m5"} {"signature": "def search_for_comment(self, lineno, default=None):", "body": "if not self.index:self.make_index()block = self.index.get(lineno, None)text = getattr(block, '', default)return text", "docstring": "Find the comment block just before the given line number.\n\n Returns None (or the specified default) if there is no such block.", "id": "f14562:c2:m6"} {"signature": "def __init__(self, fp, observer_code, *, delimiter='', date_format='',obstype=''):", "body": "self.observer_code = observer_codeself.date_format = date_formatself.obstype = obstypefp.write('')fp.write('' % observer_code)fp.write(\"\" % pyaavso.get_version())fp.write(\"\" % delimiter)fp.write(\"\" % date_format.upper())fp.write(\"\" % obstype)self.writer = csv.writer(fp, delimiter=delimiter)", "docstring": "Creates the writer which will write observations into the file-like\nobject given in first parameter. The only other required parameter\nis the official AAVSO-assigned observer code.\n\n:param fp: file-like object to write observations into\n:param observer_code: AAVSO observer code\n:param delimiter: field delimiter (set as DELIM header)\n:param date_format: observation date format (one of *JD* or *Excel*)\n:param obstype: observation type (*Visual* or *PTG*)", "id": "f14567:c1:m0"} {"signature": "def writerow(self, observation_data):", "body": "if isinstance(observation_data, (list, tuple)):row = observation_dataelse:row = self.dict_to_row(observation_data)self.writer.writerow(row)", "docstring": "Writes a single observation to the output file.\n\nIf the ``observation_data`` parameter is a dictionary, it is\nconverted to a list to keep a consisted field order (as described\nin format specification). Otherwise it is assumed that the data\nis a raw record ready to be written to file.\n\n:param observation_data: a single observation as a dictionary or list", "id": "f14567:c1:m1"} {"signature": "@classmethoddef dict_to_row(cls, observation_data):", "body": "row = []row.append(observation_data[''])row.append(observation_data[''])row.append(observation_data[''])comment_code = observation_data.get('', '')if not comment_code:comment_code = ''row.append(comment_code)comp1 = observation_data.get('', '')if not comp1:comp1 = ''row.append(comp1)comp2 = observation_data.get('', '')if not comp2:comp2 = ''row.append(comp2)chart = observation_data.get('', '')if not chart:chart = ''row.append(chart)notes = observation_data.get('', '')if not notes:notes = ''row.append(notes)return row", "docstring": "Takes a dictionary of observation data and converts it to a list\nof fields according to AAVSO visual format specification.\n\n:param cls: current class\n:param observation_data: a single observation as a dictionary", "id": "f14567:c1:m2"} {"signature": "def __init__(self, fp):", "body": "headers = {}for line in fp:if isinstance(line, bytes):line = line.decode('')line = line.strip()if line and line[] == '' and '' in line:header_str = line[:]key, value = header_str.split('', )headers[key] = valueelif line and line[] != '':breakif '' not in headers:raise FormatException('')try:self.observer_code = headers['']except KeyError:raise FormatException('')try:self.date_format = headers['']except KeyError:raise FormatException('')self.software = headers.get('', '')self.delimiter = str(headers.get('', ''))self.obstype = headers.get('', '')data = itertools.chain([line], fp)self.reader = csv.reader(data, delimiter=self.delimiter)", "docstring": "Creates the reader instance and reads file headers.\n\nRaises :py:exc:`~pyaavso.format.visual.FormatException` when any of\nthe required headers could not be found in input. The following header\nparameters are required:\n\n * *TYPE* - always 'Visual', yet must be specified in file\n * *OBSCODE* - official AAVSO-assigned observer code\n * *DATE* - date format, must be one of 'JD' or 'Excel'\n\nOther headers described in AAVSO specification have reasonable default\nvalues, eg. the default delimiter is a comma, when not specified\nin headers. Without the *OBSTYPE* header, observations are assumed\nto be visual.\n\n:param fp: a file-like object from which data will be read", "id": "f14567:c2:m0"} {"signature": "@classmethoddef row_to_dict(cls, row):", "body": "comment_code = row[]if comment_code.lower() == '':comment_code = ''comp1 = row[]if comp1.lower() == '':comp1 = ''comp2 = row[]if comp2.lower() == '':comp2 = ''chart = row[]if chart.lower() == '':chart = ''notes = row[]if notes.lower() == '':notes = ''return {'': row[],'': row[],'': row[],'': comment_code,'': comp1,'': comp2,'': chart,'': notes,}", "docstring": "Converts a raw input record to a dictionary of observation data.\n\n:param cls: current class\n:param row: a single observation as a list or tuple", "id": "f14567:c2:m2"} {"signature": "def _clean_cell(value):", "body": "return value.replace('', '').strip()", "docstring": "Removes dashes and strips whitespace from the given value.", "id": "f14570:m0"} {"signature": "def __init__(self, html_source):", "body": "self.empty = \"\" in html_sourceif not self.empty:root = html.fromstring(html_source)self.tbody = TBODY_XPATH(root)[]", "docstring": "Creates the parser and feeds it source code of the page.", "id": "f14570:c0:m0"} {"signature": "def get_observations(self):", "body": "if self.empty:return []rows = list(self.tbody)observations = []for row_observation, row_details in zip(rows[::], rows[::]):data = {}cells = OBSERVATION_XPATH(row_observation)data[''] = _clean_cell(cells[])data[''] = _clean_cell(cells[])data[''] = _clean_cell(cells[])data[''] = _clean_cell(cells[])cells = DETAILS_XPATH(row_details)data[''] = _clean_cell(cells[])data[''] = _clean_cell(cells[]).replace('', '')data[''] = _clean_cell(cells[])data[''] = _clean_cell(cells[])observations.append(data)return observations", "docstring": "Parses the HTML table into a list of dictionaries, each of which\nrepresents a single observation.", "id": "f14570:c0:m1"} {"signature": "def download_observations(observer_code):", "body": "page_number = observations = []while True:logger.info('', page_number)response = requests.get(WEBOBS_RESULTS_URL, params={'': observer_code,'': ,'': '','': page_number,})logger.debug(response.request.url)parser = WebObsResultsParser(response.text)observations.extend(parser.get_observations())if '' not in response.text:breakpage_number += return observations", "docstring": "Downloads all variable star observations by a given observer.\n\nPerforms a series of HTTP requests to AAVSO's WebObs search and\ndownloads the results page by page. Each page is then passed to\n:py:class:`~pyaavso.parsers.webobs.WebObsResultsParser` and parse results\nare added to the final observation list.", "id": "f14571:m0"} {"signature": "def __init__(self, aws_access_key=None, aws_secret_key=None, table_name=None, schema=None, read_units=None, write_units=None, auto_create_table=True, ):", "body": "self.conn = self.get_conn(aws_access_key, aws_secret_key)self.table_name = table_name or self.table_nameself.schema = schema or self.schemaself.read_units = read_units or self.read_unitsself.write_units = write_units or self.write_unitsself.auto_create_table = auto_create_tablesuper(CounterPool, self).__init__()", "docstring": ":aws_access_key:\n AWS Acccess Key ID with permissions to use DynamoDB\n:aws_secret_key:\n AWS Access Secret Key for the given Access Key ID\n:table_name:\n The DynamoDB table that should be used to store this pool's\n counters. See http://bit.ly/DynamoDBModel for details on\n DynamoDB's data model.\n:schema:\n The schema that will be used to create a table if one does not\n already exist. See the `boto`_\n docs for details on what's expected for a schema.\n:read_units:\n Read throughput to be set when a table is created. See\n http://bit.ly/DynamoThoughput for details on Dynamo's provisioned\n throughput system.\n:write_units:\n Write throughput to be set when a table is created.\n:auto_create_table:\n Should Albertson create a dynamodb table if the provided\n `table_name` doesn't exist.", "id": "f14577:c0:m0"} {"signature": "def get_conn(self, aws_access_key=None, aws_secret_key=None):", "body": "return boto.connect_dynamodb(aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key,)", "docstring": "Hook point for overriding how the CounterPool gets its connection to\nAWS.", "id": "f14577:c0:m1"} {"signature": "def get_table_name(self):", "body": "if not self.table_name:raise NotImplementedError('')return self.table_name", "docstring": "Hook point for overriding how the CounterPool determines the table name\nto use.", "id": "f14577:c0:m2"} {"signature": "def get_schema(self):", "body": "if not self.schema:raise NotImplementedError('')return self.conn.create_schema(**self.schema)", "docstring": "Hook point for overriding how the CounterPool determines the schema\nto be used when creating a missing table.", "id": "f14577:c0:m3"} {"signature": "def get_read_units(self):", "body": "return self.read_units", "docstring": "Hook point for overriding how the CounterPool determines the read\nthroughput units to set on a newly created table.", "id": "f14577:c0:m4"} {"signature": "def get_write_units(self):", "body": "return self.write_units", "docstring": "Hook point for overriding how the CounterPool determines the write\nthroughput units to set on a newly created table.", "id": "f14577:c0:m5"} {"signature": "def create_table(self):", "body": "table = self.conn.create_table(name=self.get_table_name(),schema=self.get_schema(),read_units=self.get_read_units(),write_units=self.get_write_units(),)if table.status != '':table.refresh(wait_for_active=True, retry_seconds=)return table", "docstring": "Hook point for overriding how the CounterPool creates a new table\nin DynamooDB", "id": "f14577:c0:m6"} {"signature": "def get_table(self):", "body": "if hasattr(self, ''):table = self._tableelse:try:table = self.conn.get_table(self.get_table_name())except boto.exception.DynamoDBResponseError:if self.auto_create_table:table = self.create_table()else:raiseself._table = tablereturn table", "docstring": "Hook point for overriding how the CounterPool transforms table_name\ninto a boto DynamoDB Table object.", "id": "f14577:c0:m7"} {"signature": "def create_item(self, hash_key, start=, extra_attrs=None):", "body": "table = self.get_table()now = datetime.utcnow().replace(microsecond=).isoformat()attrs = {'': now,'': now,'': start,}if extra_attrs:attrs.update(extra_attrs)item = table.new_item(hash_key=hash_key,attrs=attrs,)return item", "docstring": "Hook point for overriding how the CouterPool creates a DynamoDB item\nfor a given counter when an existing item can't be found.", "id": "f14577:c0:m8"} {"signature": "def get_item(self, hash_key, start=, extra_attrs=None):", "body": "table = self.get_table()try:item = table.get_item(hash_key=hash_key)except DynamoDBKeyNotFoundError:item = Noneif item is None:item = self.create_item(hash_key=hash_key,start=start,extra_attrs=extra_attrs,)return item", "docstring": "Hook point for overriding how the CouterPool fetches a DynamoDB item\nfor a given counter.", "id": "f14577:c0:m9"} {"signature": "def get_counter(self, name, start=):", "body": "item = self.get_item(hash_key=name, start=start)counter = Counter(dynamo_item=item, pool=self)return counter", "docstring": "Gets the DynamoDB item behind a counter and ties it to a Counter\ninstace.", "id": "f14577:c0:m10"} {"signature": "def _select_manager(backend_name):", "body": "if backend_name == '':lock_manager = _LockManagerRediselif backend_name == '':lock_manager = _LockManagerDBelse:raise NotImplementedErrorreturn lock_manager", "docstring": "Select the proper LockManager based on the current backend used by Celery.\n\n :raise NotImplementedError: If Celery is using an unsupported backend.\n\n :param str backend_name: Class name of the current Celery backend. Usually value of\n current_app.extensions['celery'].celery.backend.__class__.__name__.\n\n :return: Class definition object (not instance). One of the _LockManager* classes.", "id": "f14581:m0"} {"signature": "def single_instance(func=None, lock_timeout=None, include_args=False):", "body": "if func is None:return partial(single_instance, lock_timeout=lock_timeout, include_args=include_args)@wraps(func)def wrapped(celery_self, *args, **kwargs):\"\"\"\"\"\"timeout = (lock_timeout or celery_self.soft_time_limit or celery_self.time_limitor celery_self.app.conf.get('')or celery_self.app.conf.get('')or ( * ))manager_class = _select_manager(celery_self.backend.__class__.__name__)lock_manager = manager_class(celery_self, timeout, include_args, args, kwargs)with lock_manager:ret_value = func(*args, **kwargs)return ret_valuereturn wrapped", "docstring": "Celery task decorator. Forces the task to have only one running instance at a time.\n\n Use with binded tasks (@celery.task(bind=True)).\n\n Modeled after:\n http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html\n http://blogs.it.ox.ac.uk/inapickle/2012/01/05/python-decorators-with-optional-arguments/\n\n Written by @Robpol86.\n\n :raise OtherInstanceError: If another instance is already running.\n\n :param function func: The function to decorate, must be also decorated by @celery.task.\n :param int lock_timeout: Lock timeout in seconds plus five more seconds, in-case the task crashes and fails to\n release the lock. If not specified, the values of the task's soft/hard limits are used. If all else fails,\n timeout will be 5 minutes.\n :param bool include_args: Include the md5 checksum of the arguments passed to the task in the Redis key. This allows\n the same task to run with different arguments, only stopping a task from running if another instance of it is\n running with the same arguments.", "id": "f14581:m1"} {"signature": "def __init__(self, celery_self, timeout, include_args, args, kwargs):", "body": "self.celery_self = celery_selfself.timeout = timeoutself.include_args = include_argsself.args = argsself.kwargs = kwargsself.log = getLogger(''.format(self.__class__.__name__, self.task_identifier))", "docstring": "May raise NotImplementedError if the Celery backend is not supported.\n\n :param celery_self: From wrapped() within single_instance(). It is the `self` object specified in a binded\n Celery task definition (implicit first argument of the Celery task when @celery.task(bind=True) is used).\n :param int timeout: Lock's timeout value in seconds.\n :param bool include_args: If single instance should take arguments into account.\n :param iter args: The task instance's args.\n :param dict kwargs: The task instance's kwargs.", "id": "f14581:c1:m0"} {"signature": "@propertydef task_identifier(self):", "body": "task_id = self.celery_self.nameif self.include_args:merged_args = str(self.args) + str([(k, self.kwargs[k]) for k in sorted(self.kwargs)])task_id += ''.format(hashlib.md5(merged_args.encode('')).hexdigest())return task_id", "docstring": "Return the unique identifier (string) of a task instance.", "id": "f14581:c1:m1"} {"signature": "@propertydef is_already_running(self):", "body": "redis_key = self.CELERY_LOCK.format(task_id=self.task_identifier)return self.celery_self.backend.client.exists(redis_key)", "docstring": "Return True if lock exists and has not timed out.", "id": "f14581:c2:m3"} {"signature": "def reset_lock(self):", "body": "redis_key = self.CELERY_LOCK.format(task_id=self.task_identifier)self.celery_self.backend.client.delete(redis_key)", "docstring": "Removed the lock regardless of timeout.", "id": "f14581:c2:m4"} {"signature": "@propertydef is_already_running(self):", "body": "date_done = (self.restore_group(self.task_identifier) or dict()).get('')if not date_done:return Falsedifference = datetime.utcnow() - date_donereturn difference < timedelta(seconds=self.timeout)", "docstring": "Return True if lock exists and has not timed out.", "id": "f14581:c3:m3"} {"signature": "def reset_lock(self):", "body": "self.delete_group(self.task_identifier)", "docstring": "Removed the lock regardless of timeout.", "id": "f14581:c3:m4"} {"signature": "def __init__(self, app=None):", "body": "self.original_register_app = _state._register_app _state._register_app = lambda _: None super(Celery, self).__init__()if app is not None:self.init_app(app)", "docstring": "If app argument provided then initialize celery using application config values.\n\n If no app argument provided you should do initialization later with init_app method.\n\n :param app: Flask application instance.", "id": "f14581:c5:m0"} {"signature": "def init_app(self, app):", "body": "_state._register_app = self.original_register_app if not hasattr(app, ''):app.extensions = dict()if '' in app.extensions:raise ValueError('')app.extensions[''] = _CeleryState(self, app)super(Celery, self).__init__(app.import_name, broker=app.config[''])if '' in app.config:self._preconf[''] = app.config['']self.conf.update(app.config)task_base = self.Taskclass ContextTask(task_base):def __call__(self, *_args, **_kwargs):with app.app_context():return task_base.__call__(self, *_args, **_kwargs)setattr(ContextTask, '', True)setattr(self, '', ContextTask)", "docstring": "Actual method to read celery settings from app configuration and initialize the celery instance.\n\n :param app: Flask application instance.", "id": "f14581:c5:m1"} {"signature": "def register_blueprint(self, _):", "body": "pass", "docstring": "Mock register_blueprint method.", "id": "f14582:c0:m0"} {"signature": "@worker_ready.connectdef on_worker_ready(**_):", "body": "WORKER_READY.append(True)", "docstring": "Called when the Celery worker thread is ready to do work.\n\n This is to avoid race conditions since everything is in one python process.", "id": "f14584:m0"} {"signature": "def run(self):", "body": "celery_args = ['', '', '', '', '', '', '']with app.app_context():celery.worker_main(celery_args)", "docstring": "Run the thread.", "id": "f14584:c0:m0"} {"signature": "def generate_config():", "body": "config = dict()if os.environ.get('') == '':config[''] = ''elif os.environ.get('') == '':config[''] = ''config[''] = config['']elif os.environ.get('', '').startswith(''):config[''] = '' + os.environ[''].split('', )[]config[''] = config['']elif os.environ.get('') == '':config[''] = ''elif os.environ.get('') == '':config[''] = ''elif os.environ.get('') == '':config[''] = ''elif os.environ.get('') == '':config[''] = ''else:if os.environ.get('') == '':config[''] = ''elif os.environ.get('') == '':config[''] = ''else:file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '')config[''] = '' + file_pathconfig[''] = '' + config['']config[''] = '' + config['']if '' in config and '' not in config:config[''] = config['']return config", "docstring": "Generate a Flask config dict with settings for a specific broker based on an environment variable.\n\n To be merged into app.config.\n\n :return: Flask config to be fed into app.config.update().\n :rtype: dict", "id": "f14587:m0"} {"signature": "def generate_context(config):", "body": "flask_app = Flask(__name__)flask_app.config.update(config)flask_app.config[''] = Trueflask_app.config[''] = ['']if '' in flask_app.config:db = SQLAlchemy(flask_app)db.engine.execute('')elif '' in flask_app.config:redis = Redis(flask_app)redis.flushdb()Celery(flask_app)return flask_app", "docstring": "Create the Flask app context and initializes any extensions such as Celery, Redis, SQLAlchemy, etc.\n\n :param dict config: Partial Flask config dict from generate_config().\n\n :return: The Flask app instance.", "id": "f14587:m1"} {"signature": "def get_flask_celery_apps():", "body": "config = generate_config()flask_app = generate_context(config=config)celery_app = flask_app.extensions[''].celeryreturn flask_app, celery_app", "docstring": "Call generate_context() and generate_config().\n\n :return: First item is the Flask app instance, second is the Celery app instance.\n :rtype: tuple", "id": "f14587:m2"} {"signature": "@celery.task(bind=True)@single_instancedef add(x, y):", "body": "return x + y", "docstring": "Celery task: add numbers.", "id": "f14587:m3"} {"signature": "@celery.task(bind=True)@single_instance(include_args=True, lock_timeout=)def mul(x, y):", "body": "return x * y", "docstring": "Celery task: multiply numbers.", "id": "f14587:m4"} {"signature": "@celery.task(bind=True)@single_instance()def sub(x, y):", "body": "return x - y", "docstring": "Celery task: subtract numbers.", "id": "f14587:m5"} {"signature": "@celery.task(bind=True, time_limit=)@single_instancedef add2(x, y):", "body": "return x + y", "docstring": "Celery task: add numbers.", "id": "f14587:m6"} {"signature": "@celery.task(bind=True, soft_time_limit=)@single_instancedef add3(x, y):", "body": "return x + y", "docstring": "Celery task: add numbers.", "id": "f14587:m7"} {"signature": "def readme(path=''):", "body": "path = os.path.realpath(os.path.join(os.path.dirname(__file__), path))handle = Noneurl_prefix = ''.format(name=NAME, version=VERSION)try:handle = codecs.open(path, encoding='')return handle.read().replace('', ''.format(url_prefix))except IOError:return ''finally:getattr(handle, '', lambda: None)()", "docstring": "Try to read README.rst or return empty string if failed.\n\n :param str path: Path to README file.\n\n :return: File contents.\n :rtype: str", "id": "f14588:m0"} {"signature": "@classmethoddef initialize_options(cls):", "body": "pass", "docstring": "Required by distutils.", "id": "f14588:c0:m0"} {"signature": "@classmethoddef finalize_options(cls):", "body": "pass", "docstring": "Required by distutils.", "id": "f14588:c0:m1"} {"signature": "@classmethoddef run(cls):", "body": "project = __import__(IMPORT, fromlist=[''])for expected, var in [('', ''), (LICENSE, ''), (VERSION, '')]:if getattr(project, var) != expected:raise SystemExit(''.format(var))if not re.compile(r'' % VERSION, re.MULTILINE).search(readme()):raise SystemExit('')if INSTALL_REQUIRES:contents = readme('')section = re.compile(r'', re.DOTALL).findall(contents)if not section:raise SystemExit('')in_tox = re.findall(r'', section[])if INSTALL_REQUIRES != in_tox:raise SystemExit('')", "docstring": "Check variables.", "id": "f14588:c0:m2"} {"signature": "@classmethoddef from_file(cls, file):", "body": "if not os.path.exists(file):raise ValueError(\"\")try:config_parser = configparser.ConfigParser()config_parser.read(file)configuration = cls(file, config_parser)if not configuration.check_config_sanity():raise ValueError(\"\")else:return configurationexcept configparser.Error:raise ValueError(\"\")", "docstring": "Try loading given config file.\n\n :param str file: full path to the config file to load", "id": "f14593:c0:m1"} {"signature": "@classmethoddef discover(cls):", "body": "file = os.path.join(Config.config_dir, Config.config_name)return cls.from_file(file)", "docstring": "Make a guess about the config file location an try loading it.", "id": "f14593:c0:m2"} {"signature": "@classmethoddef create_config(cls, cfgfile, nick, twtfile, twturl, disclose_identity, add_news):", "body": "cfgfile_dir = os.path.dirname(cfgfile)if not os.path.exists(cfgfile_dir):os.makedirs(cfgfile_dir)cfg = configparser.ConfigParser()cfg.add_section(\"\")cfg.set(\"\", \"\", nick)cfg.set(\"\", \"\", twtfile)cfg.set(\"\", \"\", twturl)cfg.set(\"\", \"\", str(disclose_identity))cfg.set(\"\", \"\", \"\")cfg.set(\"\", \"\", \"\")cfg.add_section(\"\")if add_news:cfg.set(\"\", \"\", \"\")conf = cls(cfgfile, cfg)conf.write_config()return conf", "docstring": "Create a new config file at the default location.\n\n :param str cfgfile: path to the config file\n :param str nick: nickname to use for own tweets\n :param str twtfile: path to the local twtxt file\n :param str twturl: URL to the remote twtxt file\n :param bool disclose_identity: if true the users id will be disclosed\n :param bool add_news: if true follow twtxt news feed", "id": "f14593:c0:m3"} {"signature": "def write_config(self):", "body": "with open(self.config_file, \"\") as config_file:self.cfg.write(config_file)", "docstring": "Writes `self.cfg` to `self.config_file`.", "id": "f14593:c0:m4"} {"signature": "@propertydef following(self):", "body": "following = []try:for (nick, url) in self.cfg.items(\"\"):source = Source(nick, url)following.append(source)except configparser.NoSectionError as e:logger.debug(e)return following", "docstring": "A :class:`list` of all :class:`Source` objects.", "id": "f14593:c0:m5"} {"signature": "@propertydef options(self):", "body": "try:return dict(self.cfg.items(\"\"))except configparser.NoSectionError as e:logger.debug(e)return {}", "docstring": "A :class:`dict` of all config options.", "id": "f14593:c0:m6"} {"signature": "def add_source(self, source):", "body": "if not self.cfg.has_section(\"\"):self.cfg.add_section(\"\")self.cfg.set(\"\", source.nick, source.url)self.write_config()", "docstring": "Adds a new :class:`Source` to the config\u2019s following section.", "id": "f14593:c0:m25"} {"signature": "def get_source_by_nick(self, nick):", "body": "url = self.cfg.get(\"\", nick, fallback=None)return Source(nick, url) if url else None", "docstring": "Returns the :class:`Source` of the given nick.\n\n :param str nick: nickname for which will be searched in the config", "id": "f14593:c0:m26"} {"signature": "def remove_source_by_nick(self, nick):", "body": "if not self.cfg.has_section(\"\"):return Falseret_val = self.cfg.remove_option(\"\", nick)self.write_config()return ret_val", "docstring": "Removes a :class:`Source` form the config\u2019s following section.\n\n :param str nick: nickname for which will be searched in the config", "id": "f14593:c0:m27"} {"signature": "def build_default_map(self):", "body": "default_map = {\"\": {\"\": self.check_following,\"\": self.timeout,\"\": self.porcelain,},\"\": {\"\": self.twtfile,},\"\": {\"\": self.use_pager,\"\": self.use_cache,\"\": self.limit_timeline,\"\": self.timeout,\"\": self.sorting,\"\": self.porcelain,\"\": self.twtfile,\"\": self.timeline_update_interval,},\"\": {\"\": self.use_pager,\"\": self.use_cache,\"\": self.limit_timeline,\"\": self.timeout,\"\": self.sorting,\"\": self.porcelain,\"\": self.timeline_update_interval,}}return default_map", "docstring": "Maps config options to the default values used by click, returns :class:`dict`.", "id": "f14593:c0:m28"} {"signature": "def check_config_sanity(self):", "body": "is_sane = Trueproperties = [property_name for property_name, objin self.__class__.__dict__.items()if isinstance(obj, property)]for property_name in properties:try:getattr(self, property_name)except ValueError as e:click.echo(\"\".format(property_name, e))is_sane = Falsereturn is_sane", "docstring": "Checks if the given values in the config file are sane.", "id": "f14593:c0:m29"} {"signature": "def __init__(self, cache_file, cache, update_interval):", "body": "self.cache_file = cache_fileself.cache = cacheself.update_interval = update_interval", "docstring": "Initializes new :class:`Cache` object.\n\n :param str cache_file: full path to the loaded cache file.\n :param ~shelve.Shelve cache: a Shelve object, with cache loaded.\n :param int update_interval: number of seconds the cache is considered to be\n up-to-date without calling any external resources.", "id": "f14594:c0:m0"} {"signature": "@classmethoddef from_file(cls, file, *args, **kwargs):", "body": "try:cache = shelve.open(file)return cls(file, cache, *args, **kwargs)except OSError as e:logger.debug(\"\".format(file))raise e", "docstring": "Try loading given cache file.", "id": "f14594:c0:m3"} {"signature": "@classmethoddef discover(cls, *args, **kwargs):", "body": "file = os.path.join(Cache.cache_dir, Cache.cache_name)return cls.from_file(file, *args, **kwargs)", "docstring": "Make a guess about the cache file location an try loading it.", "id": "f14594:c0:m4"} {"signature": "@propertydef last_updated(self):", "body": "try:return self.cache[\"\"]except KeyError:return ", "docstring": "Returns *NIX timestamp of last update of the cache.", "id": "f14594:c0:m5"} {"signature": "@propertydef is_valid(self):", "body": "if timestamp() - self.last_updated <= self.update_interval:return Trueelse:return False", "docstring": "Checks if the cache is considered to be up-to-date.", "id": "f14594:c0:m6"} {"signature": "def mark_updated(self):", "body": "if not self.is_valid:self.cache[\"\"] = timestamp()", "docstring": "Mark cache as updated at current *NIX timestamp", "id": "f14594:c0:m7"} {"signature": "def is_cached(self, url):", "body": "try:return True if url in self.cache else Falseexcept TypeError:return False", "docstring": "Checks if specified URL is cached.", "id": "f14594:c0:m8"} {"signature": "def last_modified(self, url):", "body": "try:return self.cache[url][\"\"]except KeyError:return None", "docstring": "Returns saved 'Last-Modified' header, if available.", "id": "f14594:c0:m9"} {"signature": "def add_tweets(self, url, last_modified, tweets):", "body": "try:self.cache[url] = {\"\": last_modified, \"\": tweets}self.mark_updated()return Trueexcept TypeError:return False", "docstring": "Adds new tweets to the cache.", "id": "f14594:c0:m10"} {"signature": "def get_tweets(self, url, limit=None):", "body": "try:tweets = self.cache[url][\"\"]self.mark_updated()return sorted(tweets, reverse=True)[:limit]except KeyError:return []", "docstring": "Retrieves tweets from the cache.", "id": "f14594:c0:m11"} {"signature": "def remove_tweets(self, url):", "body": "try:del self.cache[url]self.mark_updated()return Trueexcept KeyError:return False", "docstring": "Tries to remove cached tweets.", "id": "f14594:c0:m12"} {"signature": "def close(self):", "body": "try:self.cache.close()return Trueexcept AttributeError:return False", "docstring": "Closes Shelve object.", "id": "f14594:c0:m13"} {"signature": "def sync(self):", "body": "try:self.cache.sync()return Trueexcept AttributeError:return False", "docstring": "Syncs Shelve object.", "id": "f14594:c0:m14"} {"signature": "def make_aware(dt):", "body": "return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc)", "docstring": "Appends tzinfo and assumes UTC, if datetime object has no tzinfo already.", "id": "f14595:m0"} {"signature": "def parse_iso8601(string):", "body": "return make_aware(dateutil.parser.parse(string))", "docstring": "Parse string using dateutil.parser.", "id": "f14595:m1"} {"signature": "def parse_tweets(raw_tweets, source, now=None):", "body": "if now is None:now = datetime.now(timezone.utc)tweets = []for line in raw_tweets:try:tweet = parse_tweet(line, source, now)except (ValueError, OverflowError) as e:logger.debug(\"\".format(source.url, e))else:tweets.append(tweet)return tweets", "docstring": "Parses a list of raw tweet lines from a twtxt file\nand returns a list of :class:`Tweet` objects.\n\n:param list raw_tweets: list of raw tweet lines\n:param Source source: the source of the given tweets\n:param Datetime now: the current datetime\n\n:returns: a list of parsed tweets :class:`Tweet` objects\n:rtype: list", "id": "f14595:m2"} {"signature": "def parse_tweet(raw_tweet, source, now=None):", "body": "if now is None:now = datetime.now(timezone.utc)raw_created_at, text = raw_tweet.split(\"\", )created_at = parse_iso8601(raw_created_at)if created_at > now:raise ValueError(\"\")return Tweet(click.unstyle(text.strip()), created_at, source)", "docstring": "Parses a single raw tweet line from a twtxt file\nand returns a :class:`Tweet` object.\n\n:param str raw_tweet: a single raw tweet line\n:param Source source: the source of the given tweet\n:param Datetime now: the current datetime\n\n:returns: the parsed tweet\n:rtype: Tweet", "id": "f14595:m3"} {"signature": "@click.group()@click.option(\"\", \"\",type=click.Path(exists=True, file_okay=True, readable=True, writable=True, resolve_path=True),help=\"\")@click.option(\"\", \"\",is_flag=True, default=False,help=\"\")@click.version_option()@click.pass_contextdef cli(ctx, config, verbose):", "body": "init_logging(debug=verbose)if ctx.invoked_subcommand == \"\":return try:if config:conf = Config.from_file(config)else:conf = Config.discover()except ValueError as e:if \"\" in str(e):click.echo(\"\")else:click.echo(\"\")sys.exit()ctx.default_map = conf.build_default_map()ctx.obj = {'': conf}", "docstring": "Decentralised, minimalist microblogging service for hackers.", "id": "f14597:m0"} {"signature": "@cli.command()@click.option(\"\",callback=validate_created_at,help=\"\")@click.option(\"\", \"\",type=click.Path(file_okay=True, writable=True, resolve_path=True),help=\"\")@click.argument(\"\", callback=validate_text, nargs=-)@click.pass_contextdef tweet(ctx, created_at, twtfile, text):", "body": "text = expand_mentions(text)tweet = Tweet(text, created_at) if created_at else Tweet(text)pre_tweet_hook = ctx.obj[\"\"].pre_tweet_hookif pre_tweet_hook:run_pre_tweet_hook(pre_tweet_hook, ctx.obj[\"\"].options)if not add_local_tweet(tweet, twtfile):click.echo(\"\")else:post_tweet_hook = ctx.obj[\"\"].post_tweet_hookif post_tweet_hook:run_post_tweet_hook(post_tweet_hook, ctx.obj[\"\"].options)", "docstring": "Append a new tweet to your twtxt file.", "id": "f14597:m1"} {"signature": "@cli.command()@click.option(\"\",is_flag=True,help=\"\")@click.option(\"\", \"\",type=click.INT,help=\"\")@click.option(\"\", \"\",type=click.Path(exists=True, file_okay=True, readable=True, resolve_path=True),help=\"\")@click.option(\"\", \"\",flag_value=\"\",help=\"\")@click.option(\"\", \"\",flag_value=\"\",help=\"\")@click.option(\"\",type=click.FLOAT,help=\"\")@click.option(\"\",is_flag=True,help=\"\")@click.option(\"\", \"\",help=\"\")@click.option(\"\",is_flag=True,help=\"\")@click.option(\"\",is_flag=True,help=\"\")@click.pass_contextdef timeline(ctx, pager, limit, twtfile, sorting, timeout, porcelain, source, cache, force_update):", "body": "if source:source_obj = ctx.obj[\"\"].get_source_by_nick(source)if not source_obj:logger.debug(\"\".format(source))source_obj = Source(source, source)sources = [source_obj]else:sources = ctx.obj[\"\"].followingtweets = []if cache:try:with Cache.discover(update_interval=ctx.obj[\"\"].timeline_update_interval) as cache:force_update = force_update or not cache.is_validif force_update:tweets = get_remote_tweets(sources, limit, timeout, cache)else:logger.debug(\"\".format(cache.update_interval))tweets = list(chain.from_iterable([cache.get_tweets(source.url) for source in sources]))except OSError as e:logger.debug(e)tweets = get_remote_tweets(sources, limit, timeout)else:tweets = get_remote_tweets(sources, limit, timeout)if twtfile and not source:source = Source(ctx.obj[\"\"].nick, ctx.obj[\"\"].twturl, file=twtfile)tweets.extend(get_local_tweets(source, limit))if not tweets:returntweets = sort_and_truncate_tweets(tweets, sorting, limit)if pager:click.echo_via_pager(style_timeline(tweets, porcelain))else:click.echo(style_timeline(tweets, porcelain))", "docstring": "Retrieve your personal timeline.", "id": "f14597:m2"} {"signature": "@cli.command()@click.option(\"\",is_flag=True,help=\"\")@click.option(\"\", \"\",type=click.INT,help=\"\")@click.option(\"\", \"\",flag_value=\"\",help=\"\")@click.option(\"\", \"\",flag_value=\"\",help=\"\")@click.option(\"\",type=click.FLOAT,help=\"\")@click.option(\"\",is_flag=True,help=\"\")@click.option(\"\",is_flag=True,help=\"\")@click.option(\"\",is_flag=True,help=\"\")@click.argument(\"\")@click.pass_contextdef view(ctx, **kwargs):", "body": "ctx.forward(timeline)", "docstring": "Show feed of given source.", "id": "f14597:m3"} {"signature": "@cli.command()@click.option(\"\",is_flag=True,help=\"\")@click.option(\"\",type=click.FLOAT,help=\"\")@click.option(\"\",is_flag=True,help=\"\")@click.pass_contextdef following(ctx, check, timeout, porcelain):", "body": "sources = ctx.obj[''].followingif check:sources = get_remote_status(sources, timeout)for (source, status) in sources:click.echo(style_source_with_status(source, status, porcelain))else:sources = sorted(sources, key=lambda source: source.nick)for source in sources:click.echo(style_source(source, porcelain))", "docstring": "Return the list of sources you\u2019re following.", "id": "f14597:m4"} {"signature": "@cli.command()@click.argument(\"\")@click.argument(\"\")@click.option(\"\", \"\",flag_value=True,help=\"\")@click.pass_contextdef follow(ctx, nick, url, force):", "body": "source = Source(nick, url)sources = ctx.obj[''].followingif not force:if source.nick in (source.nick for source in sources):click.confirm(\"\".format(click.style(source.nick, bold=True)), default=False, abort=True)_, status = get_remote_status([source])[]if not status or status.status_code != :click.confirm(\"\".format(click.style(source.nick, bold=True),click.style(source.url, bold=True)), default=False, abort=True)ctx.obj[''].add_source(source)click.echo(\"\".format(click.style(source.nick, bold=True)))", "docstring": "Add a new source to your followings.", "id": "f14597:m5"} {"signature": "@cli.command()@click.argument(\"\")@click.pass_contextdef unfollow(ctx, nick):", "body": "source = ctx.obj[''].get_source_by_nick(nick)try:with Cache.discover() as cache:cache.remove_tweets(source.url)except OSError as e:logger.debug(e)ret_val = ctx.obj[''].remove_source_by_nick(nick)if ret_val:click.echo(\"\".format(click.style(source.nick, bold=True)))else:click.echo(\"\".format(click.style(nick, bold=True)))", "docstring": "Remove an existing source from your followings.", "id": "f14597:m6"} {"signature": "@cli.command()def quickstart():", "body": "width = click.get_terminal_size()[]width = width if width <= else click.secho(\"\", fg=\"\")click.secho(\"\", fg=\"\")click.echo()help_text = \"\"\"\"\"\"click.echo(textwrap.fill(help_text, width))click.echo()nick = click.prompt(\"\", default=os.environ.get(\"\", \"\"))def overwrite_check(path):if os.path.isfile(path):click.confirm(\"\".format(path), abort=True)cfgfile = click.prompt(\"\",os.path.join(Config.config_dir, Config.config_name),type=click.Path(readable=True, writable=True, file_okay=True))cfgfile = os.path.expanduser(cfgfile)overwrite_check(cfgfile)twtfile = click.prompt(\"\",os.path.expanduser(\"\"),type=click.Path(readable=True, writable=True, file_okay=True))twtfile = os.path.expanduser(twtfile)overwrite_check(twtfile)twturl = click.prompt(\"\",default=\"\")disclose_identity = click.confirm(\"\"\"\", default=False)click.echo()add_news = click.confirm(\"\", default=True)conf = Config.create_config(cfgfile, nick, twtfile, twturl, disclose_identity, add_news)twtfile_dir = os.path.dirname(twtfile)if not os.path.exists(twtfile_dir):os.makedirs(twtfile_dir)open(twtfile, \"\").close()click.echo()click.echo(\"\".format(click.format_filename(conf.config_file)))click.echo(\"\".format(click.format_filename(twtfile)))", "docstring": "Quickstart wizard for setting up twtxt.", "id": "f14597:m7"} {"signature": "@cli.command()@click.argument(\"\", required=False, callback=validate_config_key)@click.argument(\"\", required=False)@click.option(\"\",flag_value=True,help=\"\")@click.option(\"\", \"\",flag_value=True,help=\"\")@click.pass_contextdef config(ctx, key, value, remove, edit):", "body": "conf = ctx.obj[\"\"]if not edit and not key:raise click.BadArgumentUsage(\"\")if edit:return click.edit(filename=conf.config_file)if remove:try:conf.cfg.remove_option(key[], key[])except Exception as e:logger.debug(e)else:conf.write_config()returnif not value:try:click.echo(conf.cfg.get(key[], key[]))except Exception as e:logger.debug(e)returnif not conf.cfg.has_section(key[]):conf.cfg.add_section(key[])conf.cfg.set(key[], key[], value)conf.write_config()", "docstring": "Get or set config item.", "id": "f14597:m8"} {"signature": "def expand_mentions(text, embed_names=True):", "body": "if embed_names:mention_format = \"\"else:mention_format = \"\"def handle_mention(match):source = get_source_by_name(match.group())if source is None:return \"\".format(match.group())return mention_format.format(name=source.nick,url=source.url)return short_mention_re.sub(handle_mention, text)", "docstring": "Searches the given text for mentions and expands them.\n\n For example:\n \"@source.nick\" will be expanded to \"@\".", "id": "f14600:m2"} {"signature": "def format_mentions(text, format_callback=format_mention):", "body": "def handle_mention(match):name, url = match.groups()return format_callback(name, url)return mention_re.sub(handle_mention, text)", "docstring": "Searches the given text for mentions generated by `expand_mention()` and returns a human-readable form.\n\n For example:\n \"@\" will result in \"@bob\"\n\n If you follow a source: source.nick will be bold\n If you are the mentioned source: source.nick will be bold and coloured\n If nothing from the above is true: nick will be unstyled\n If nothing from the above is true and nick is not given: url will be used", "id": "f14600:m4"} {"signature": "def validate_config_key(ctx, param, value):", "body": "if not value:return valuetry:section, item = value.split(\"\", )except ValueError:raise click.BadArgumentUsage(\"\")else:return section, item", "docstring": "Validate a configuration key according to `section.item`.", "id": "f14601:m6"} {"signature": "@propertydef relative_datetime(self):", "body": "now = datetime.now(timezone.utc)tense = \"\" if self.created_at > now else \"\"return \"\".format(humanize.naturaldelta(now - self.created_at), tense)", "docstring": "Return human-readable relative time string.", "id": "f14604:c0:m8"} {"signature": "@propertydef absolute_datetime(self):", "body": "return self.created_at.strftime(\"\")", "docstring": "Return human-readable absolute time string.", "id": "f14604:c0:m9"} {"signature": "def get_version(self):", "body": "if (self.name is not None and self.version is not Noneand self.version.startswith(\"\")):return (self.__get_live_version() or self.__get_frozen_version()or self.__fail_to_get_any_version())else:return self.__base.get_version(self)", "docstring": "Get distribution version.\n\nThis method is enhanced compared to original distutils implementation.\nIf the version string is set to a special value then instead of using\nthe actual value the real version is obtained by querying versiontools.\n\nIf versiontools package is not installed then the version is obtained\nfrom the standard section of the ``PKG-INFO`` file. This file is\nautomatically created by any source distribution. This method is less\nuseful as it cannot take advantage of version control information that\nis automatically loaded by versiontools. It has the advantage of not\nrequiring versiontools installation and that it does not depend on\n``setup_requires`` feature of ``setuptools``.", "id": "f14609:c0:m0"} {"signature": "def __get_live_version(self):", "body": "try:import versiontoolsexcept ImportError:return Noneelse:return str(versiontools.Version.from_expression(self.name))", "docstring": "Get a live version string using versiontools", "id": "f14609:c0:m1"} {"signature": "def __get_frozen_version(self):", "body": "try:return self.__base(\"\").versionexcept IOError:return None", "docstring": "Get a fixed version string using an existing PKG-INFO file", "id": "f14609:c0:m2"} {"signature": "def __fail_to_get_any_version(self):", "body": "raise SystemExit(package requires versiontools for development or testing.://versiontools.readthedocs.org/ for more information aboutsiontools is and why it is useful.ll versiontools now please run:p install versiontoolsrsiontools works best when you have additional modules foring with your preferred version control system. Refer tomentation for a full list of required modules.", "docstring": "Raise an informative exception", "id": "f14609:c0:m3"} {"signature": "def fit(self, X=None, y=None):", "body": "return self", "docstring": "Do nothing; this transformer is stateless.", "id": "f14611:c1:m1"} {"signature": "def transform(self, X):", "body": "return X[self.indices]", "docstring": "Subsets the given matrix.", "id": "f14611:c1:m2"} {"signature": "def fit(self, X, y=None):", "body": "raise NotImplementedError(\"\")", "docstring": "Raises NotImplementedError.", "id": "f14611:c2:m1"} {"signature": "def transform(self, X):", "body": "return X", "docstring": "Does nothing, so that it works nicely in pipelines.", "id": "f14611:c2:m2"} {"signature": "def fit_transform(self, X, y=None):", "body": "n = X.shape[]if X.shape != (n, n):raise TypeError(\"\")X = X + X.TX /= return X", "docstring": "Symmetrizes X.\n\nParameters\n----------\nX : array, shape [n, n]\n The pairwise inputs to symmetrize.\n\nReturns\n-------\nX : array, shape [n, n]\n The symmetrized pairwise outputs.", "id": "f14611:c2:m3"} {"signature": "def fit(self, X, y=None):", "body": "X = check_array(X)if self.scale_by_median:self.median_ = np.median(X[np.triu_indices_from(X, k=)],overwrite_input=True)elif hasattr(self, ''):del self.median_return self", "docstring": "If scale_by_median, find :attr:`median_`; otherwise, do nothing.\n\nParameters\n----------\nX : array\n The raw pairwise distances.", "id": "f14611:c3:m1"} {"signature": "def transform(self, X):", "body": "X = check_array(X)X_rbf = np.empty_like(X) if self.copy else XX_in = Xif not self.squared:np.power(X_in, , out=X_rbf)X_in = X_rbfif self.scale_by_median:scale = self.median_ if self.squared else self.median_ ** gamma = self.gamma * scaleelse:gamma = self.gammanp.multiply(X_in, -gamma, out=X_rbf)np.exp(X_rbf, out=X_rbf)return X_rbf", "docstring": "Turns distances into RBF values.\n\nParameters\n----------\nX : array\n The raw pairwise distances.\n\nReturns\n-------\nX_rbf : array of same shape as X\n The distances in X passed through the RBF kernel.", "id": "f14611:c3:m2"} {"signature": "def fit(self, X, y=None):", "body": "n = X.shape[]if X.shape != (n, n):raise TypeError(\"\")memory = get_memory(self.memory)vals, vecs = memory.cache(scipy.linalg.eigh, ignore=[''])(X, overwrite_a=not self.copy)vals = vals.reshape(-, )if self.min_eig == :inner = vals > self.min_eigelse:with np.errstate(divide=''):inner = np.where(vals >= self.min_eig, ,np.where(vals == , , self.min_eig / vals))self.clip_ = np.dot(vecs, inner * vecs.T)return self", "docstring": "Learn the linear transformation to clipped eigenvalues.\n\nNote that if min_eig isn't zero and any of the original eigenvalues\nwere exactly zero, this will leave those eigenvalues as zero.\n\nParameters\n----------\nX : array, shape [n, n]\n The *symmetric* input similarities. If X is asymmetric, it will be\n treated as if it were symmetric based on its lower-triangular part.", "id": "f14611:c4:m1"} {"signature": "def transform(self, X):", "body": "n = self.clip_.shape[]if X.ndim != or X.shape[] != n:msg = \"\"raise TypeError(msg.format(self.clip_.shape[]))return np.dot(X, self.clip_)", "docstring": "Transforms X according to the linear transformation corresponding to\nclipping the input eigenvalues.\n\nParameters\n----------\nX : array, shape [n_test, n]\n The test similarities to training points.\n\nReturns\n-------\nXt : array, shape [n_test, n]\n The transformed test similarites to training points.", "id": "f14611:c4:m2"} {"signature": "def fit_transform(self, X, y=None):", "body": "n = X.shape[]if X.shape != (n, n):raise TypeError(\"\")memory = get_memory(self.memory)discard_X = not self.copy and self.negatives_likelyvals, vecs = memory.cache(scipy.linalg.eigh, ignore=[''])(X, overwrite_a=discard_X)vals = vals[:, None]self.clip_ = np.dot(vecs, (vals > self.min_eig) * vecs.T)if discard_X or vals[, ] < self.min_eig:del Xnp.maximum(vals, self.min_eig, out=vals)X = np.dot(vecs, vals * vecs.T)del vals, vecsX = Symmetrize(copy=False).fit_transform(X)return X", "docstring": "Clips the negative eigenvalues of X.\n\nParameters\n----------\nX : array, shape [n, n]\n The *symmetric* input similarities. If X is asymmetric, it will be\n treated as if it were symmetric based on its lower-triangular part.\n\nReturns\n-------\nXt : array, shape [n, n]\n The transformed training similarities; smallest eigenvalue will be\n at least `self.min_eig`.", "id": "f14611:c4:m3"} {"signature": "def fit(self, X, y=None):", "body": "n = X.shape[]if X.shape != (n, n):raise TypeError(\"\")memory = get_memory(self.memory)vals, vecs = memory.cache(scipy.linalg.eigh, ignore=[''])(X, overwrite_a=not self.copy)vals = vals[:, None]self.flip_ = np.dot(vecs, np.sign(vals) * vecs.T)return self", "docstring": "Learn the linear transformation to flipped eigenvalues.\n\nParameters\n----------\nX : array, shape [n, n]\n The *symmetric* input similarities. If X is asymmetric, it will be\n treated as if it were symmetric based on its lower-triangular part.", "id": "f14611:c5:m1"} {"signature": "def transform(self, X):", "body": "n = self.flip_.shape[]if X.ndim != or X.shape[] != n:msg = \"\"raise TypeError(msg.format(self.flip_.shape[]))return np.dot(X, self.flip_)", "docstring": "Transforms X according to the linear transformation corresponding to\nflipping the input eigenvalues.\n\nParameters\n----------\nX : array, shape [n_test, n]\n The test similarities to training points.\n\nReturns\n-------\nXt : array, shape [n_test, n]\n The transformed test similarites to training points.", "id": "f14611:c5:m2"} {"signature": "def fit_transform(self, X, y=None):", "body": "n = X.shape[]if X.shape != (n, n):raise TypeError(\"\")memory = get_memory(self.memory)discard_X = not self.copy and self.negatives_likelyvals, vecs = memory.cache(scipy.linalg.eigh, ignore=[''])(X, overwrite_a=discard_X)vals = vals[:, None]self.clip_ = np.dot(vecs, np.sign(vals) * vecs.T)if discard_X or vals[, ] < :del Xnp.abs(vals, out=vals)X = np.dot(vecs, vals * vecs.T)del vals, vecsX = Symmetrize(copy=False).fit_transform(X)return X", "docstring": "Flips the negative eigenvalues of X.\n\nParameters\n----------\nX : array, shape [n, n]\n The *symmetric* input similarities. If X is asymmetric, it will be\n treated as if it were symmetric based on its lower-triangular part.\n\nReturns\n-------\nXt : array, shape [n, n]\n The transformed training similarities.", "id": "f14611:c5:m3"} {"signature": "def fit(self, X, y=None):", "body": "n = X.shape[]if X.shape != (n, n):raise TypeError(\"\")self.train_ = Xmemory = get_memory(self.memory)lo, = memory.cache(scipy.linalg.eigvalsh)(X, eigvals=(, ))self.shift_ = max(self.min_eig - lo, )return self", "docstring": "Learn the transformation to shifted eigenvalues. Only depends\non the input dimension.\n\nParameters\n----------\nX : array, shape [n, n]\n The *symmetric* input similarities.", "id": "f14611:c6:m1"} {"signature": "def transform(self, X):", "body": "n = self.train_.shape[]if X.ndim != or X.shape[] != n:msg = \"\"raise TypeError(msg.format(n))if self.copy:X = X.copy()if self.shift_ != and X is self.train_ or (X.shape == self.train_.shape and np.allclose(X, self.train_)):X[xrange(n), xrange(n)] += self.shift_return X", "docstring": "Transforms X according to the linear transformation corresponding to\nshifting the input eigenvalues to all be at least ``self.min_eig``.\n\nParameters\n----------\nX : array, shape [n_test, n]\n The test similarities to training points.\n\nReturns\n-------\nXt : array, shape [n_test, n]\n The transformed test similarites to training points. Only different\n from X if X is the training data.", "id": "f14611:c6:m2"} {"signature": "def fit(self, X, y=None):", "body": "self.features_ = as_features(X, stack=True, bare=True)return self", "docstring": "Specify the data to which kernel values should be computed.\n\nParameters\n----------\nX : list of arrays or :class:`skl_groups.features.Features`\n The bags to compute \"to\".", "id": "f14613:c0:m2"} {"signature": "def transform(self, X):", "body": "X = as_features(X, stack=True, bare=True)Y = self.features_if X.dim != Y.dim:raise ValueError(\"\".format(X.dim, Y.dim))pointwise = pairwise_kernels(X.stacked_features, Y.stacked_features,metric=self.kernel,filter_params=True,**self._get_kernel_params())K = np.empty((len(X), len(Y)))for i in range(len(X)):for j in range(len(Y)):K[i, j] = pointwise[X._boundaries[i]:X._boundaries[i+],Y._boundaries[j]:Y._boundaries[j+]].mean()return K", "docstring": "Compute kernels from X to :attr:`features_`.\n\nParameters\n----------\nX : list of arrays or :class:`skl_groups.features.Features`\n The bags to compute \"from\". Must have same dimension as\n :attr:`features_`.\n\nReturns\n-------\nK : array of shape ``[len(X), len(features_)]``\n The kernel evaluations from X to :attr:`features_`.", "id": "f14613:c0:m3"} {"signature": "def fit(self, X, y=None, **params):", "body": "X = as_features(X, stack=True)self.transformer.fit(X.stacked_features, y, **params)return self", "docstring": "Fit the transformer on the stacked points.\n\nParameters\n----------\nX : :class:`Features` or list of arrays of shape ``[n_samples[i], n_features]``\n Training set. If a Features object, it will be stacked.\n\nany other keyword argument :\n Passed on as keyword arguments to the transformer's ``fit()``.", "id": "f14614:c0:m2"} {"signature": "def transform(self, X, **params):", "body": "X = as_features(X, stack=True)X_new = self.transformer.transform(X.stacked_features, **params)return self._gather_outputs(X, X_new)", "docstring": "Transform the stacked points.\n\nParameters\n----------\nX : :class:`Features` or list of bag feature arrays\n New data to transform.\n\nany other keyword argument :\n Passed on as keyword arguments to the transformer's ``transform()``.\n\nReturns\n-------\nX_new : :class:`Features`\n Transformed features.", "id": "f14614:c0:m3"} {"signature": "def fit_transform(self, X, y=None, **params):", "body": "X = as_features(X, stack=True)X_new = self.transformer.fit_transform(X.stacked_features, y, **params)return self._gather_outputs(X, X_new)", "docstring": "Fit and transform the stacked points.\n\nParameters\n----------\nX : :class:`Features` or list of bag feature arrays\n Data to train on and transform.\n\nany other keyword argument :\n Passed on as keyword arguments to the transformer's ``transform()``.\n\nReturns\n-------\nX_new : :class:`Features`\n Transformed features.", "id": "f14614:c0:m4"} {"signature": "def inverse_transform(self, X, **params):", "body": "X = as_features(X, stack=True)Xo = self.transformer.inverse_transform(X.stacked_features, **params)return self._gather_outputs(X, Xo)", "docstring": "Transform data back to its original space, i.e., return an input\nX_original whose transform would (maybe approximately) be X.\n\nParameters\n----------\nX : :class:`Features` or list of bag feature arrays\n Data to train on and transform.\n\nany other keyword argument :\n Passed on as keyword arguments to the transformer's \n ``inverse_transform()``.\n\nReturns\n-------\nX_original : :class:`Features`", "id": "f14614:c0:m5"} {"signature": "def fit(self, X, y=None):", "body": "X = check_array(X, copy=self.copy,dtype=[np.float64, np.float32, np.float16, np.float128])feature_range = self.feature_rangeif feature_range[] >= feature_range[]:raise ValueError(\"\"\"\" % str(feature_range))if self.fit_feature_range is not None:fit_feature_range = self.fit_feature_rangeif fit_feature_range[] >= fit_feature_range[]:raise ValueError(\"\"\"\"% str(feature_range))if (fit_feature_range[] < feature_range[] orfit_feature_range[] > feature_range[]):raise ValueError(\"\"\"\"% (str(feature_range),str(fit_feature_range)))feature_range = fit_feature_rangedata_min = np.min(X, axis=)data_range = np.max(X, axis=) - data_mindata_range[data_range == ] = self.scale_ = (feature_range[] - feature_range[]) / data_rangeself.min_ = feature_range[] - data_min * self.scale_self.data_range = data_rangeself.data_min = data_minreturn self", "docstring": "Compute the minimum and maximum to be used for later scaling.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.", "id": "f14614:c2:m1"} {"signature": "def transform(self, X):", "body": "X = check_array(X, copy=self.copy)X *= self.scale_X += self.min_if self.truncate:np.maximum(self.feature_range[], X, out=X)np.minimum(self.feature_range[], X, out=X)return X", "docstring": "Scaling features of X according to feature_range.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n Input data that will be transformed.", "id": "f14614:c2:m2"} {"signature": "def inverse_transform(self, X):", "body": "X = check_array(X, copy=self.copy)X -= self.min_X /= self.scale_return X", "docstring": "Undo the scaling of X according to feature_range.\n\n Note that if truncate is true, any truncated points will not\n be restored exactly.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n Input data that will be transformed.", "id": "f14614:c2:m3"} {"signature": "def kl(Ks, dim, num_q, rhos, nus, clamp=True):", "body": "est = dim * np.mean(np.log(nus) - np.log(rhos), axis=)est += np.log(num_q / (rhos.shape[] - ))if clamp:np.maximum(est, , out=est)return est", "docstring": "r'''\n Estimate the KL divergence between distributions:\n \\int p(x) \\log (p(x) / q(x))\n using the kNN-based estimator (5) of\n Qing Wang, Sanjeev R Kulkarni, and Sergio Verdu (2009).\n Divergence Estimation for Multidimensional Densities Via\n k-Nearest-Neighbor Distances.\n IEEE Transactions on Information Theory.\n http://www.ee.princeton.edu/~verdu/reprints/WanKulVer.May2009.pdf\n which is:\n d * 1/n \\sum \\log (nu_k(i) / rho_k(i)) + log(m / (n - 1))\n\n If clamp, enforces KL >= 0.\n\n Returns an array of shape (num_Ks,).", "id": "f14616:m1"} {"signature": "def _build_indices(X, flann_args):", "body": "logger.info(\"\")indices = [None] * len(X)for i, bag in enumerate(plog(X, name=\"\")):indices[i] = idx = FLANNIndex(**flann_args)idx.build_index(bag)return indices", "docstring": "Builds FLANN indices for each bag.", "id": "f14617:m2"} {"signature": "def _get_rhos(X, indices, Ks, max_K, save_all_Ks, min_dist):", "body": "logger.info(\"\")if max_K >= X.n_pts.min():msg = \"\"raise ValueError(msg.format(max_K, X.n_pts.min()))which_Ks = slice(, None) if save_all_Ks else Ksindices = plog(indices, name=\"\")rhos = [None] * len(X)for i, (idx, bag) in enumerate(zip(indices, X)):r = np.sqrt(idx.nn_index(bag, max_K + )[][:, which_Ks])np.maximum(min_dist, r, out=r)rhos[i] = rreturn rhos", "docstring": "Gets within-bag distances for each bag.", "id": "f14617:m3"} {"signature": "def linear(Ks, dim, num_q, rhos, nus):", "body": "return _get_linear(Ks, dim)(num_q, rhos, nus)", "docstring": "r'''\n Estimates the linear inner product \\int p q between two distributions,\n based on kNN distances.", "id": "f14617:m5"} {"signature": "def alpha_div(alphas, Ks, dim, num_q, rhos, nus):", "body": "return _get_alpha_div(alphas, Ks, dim)(num_q, rhos, nus)", "docstring": "r'''\n Estimate the alpha divergence between distributions:\n \\int p^\\alpha q^(1-\\alpha)\n based on kNN distances.\n\n Used in Renyi, Hellinger, Bhattacharyya, Tsallis divergences.\n\n Enforces that estimates are >= 0.\n\n Returns divergence estimates with shape (num_alphas, num_Ks).", "id": "f14617:m7"} {"signature": "def jensen_shannon_core(Ks, dim, num_q, rhos, nus):", "body": "ns = np.array([rhos.shape[], num_q])return _get_jensen_shannon_core(Ks, dim, ns)[](num_q, rhos, nus)", "docstring": "r'''\n Estimates\n 1/2 mean_X( d * log radius of largest ball in X+Y around X_i\n with no more than M/(n+m-1) weight\n where X points have weight 1 / (2 n - 1)\n and Y points have weight n / (m (2 n - 1))\n - digamma(# of neighbors in that ball))\n\n This is the core pairwise component of the estimator of Jensen-Shannon\n divergence based on the Hino-Murata weighted information estimator. See\n the docstring for jensen_shannon for an explanation.", "id": "f14617:m9"} {"signature": "def bhattacharyya(Ks, dim, required, clamp=True, to_self=False):", "body": "est = requiredif clamp:est = np.minimum(est, ) return est", "docstring": "r'''\n Estimate the Bhattacharyya coefficient between distributions, based on kNN\n distances: \\int \\sqrt{p q}\n\n If clamp (the default), enforces 0 <= BC <= 1.\n\n Returns an array of shape (num_Ks,).", "id": "f14617:m11"} {"signature": "def hellinger(Ks, dim, required, clamp=True, to_self=False):", "body": "bc = requiredest = - bcnp.maximum(est, , out=est)if clamp:np.minimum(est, , out=est)np.sqrt(est, out=est)return est", "docstring": "r'''\n Estimate the Hellinger distance between distributions, based on kNN\n distances: \\sqrt{1 - \\int \\sqrt{p q}}\n\n Always enforces 0 <= H, to be able to sqrt; if clamp, also enforces\n H <= 1.\n\n Returns a vector: one element for each K.", "id": "f14617:m12"} {"signature": "def renyi(alphas, Ks, dim, required, min_val=np.spacing(),clamp=True, to_self=False):", "body": "alphas = np.reshape(alphas, (-, ))est = requiredest = np.maximum(est, min_val) np.log(est, out=est)est /= alphas - if clamp:np.maximum(est, , out=est)return est", "docstring": "r'''\n Estimate the Renyi-alpha divergence between distributions, based on kNN\n distances: 1/(\\alpha-1) \\log \\int p^alpha q^(1-\\alpha)\n\n If the inner integral is less than min_val (default ``np.spacing(1)``),\n uses the log of min_val instead.\n\n If clamp (the default), enforces that the estimates are nonnegative by\n replacing any negative estimates with 0.\n\n Returns an array of shape (num_alphas, num_Ks).", "id": "f14617:m13"} {"signature": "def tsallis(alphas, Ks, dim, required, clamp=True, to_self=False):", "body": "alphas = np.reshape(alphas, (-, ))alpha_est = requiredest = alpha_est - est /= alphas - if clamp:np.maximum(est, , out=est)return est", "docstring": "r'''\n Estimate the Tsallis-alpha divergence between distributions, based on kNN\n distances: (\\int p^alpha q^(1-\\alpha) - 1) / (\\alpha - 1)\n\n If clamp (the default), enforces the estimate is nonnegative.\n\n Returns an array of shape (num_alphas, num_Ks).", "id": "f14617:m14"} {"signature": "def l2(Ks, dim, X_rhos, Y_rhos, required, clamp=True, to_self=False):", "body": "n_X = len(X_rhos)n_Y = len(Y_rhos)linears = requiredassert linears.shape == (, Ks.size, n_X, n_Y, )X_quadratics = np.empty((Ks.size, n_X), dtype=np.float32)for i, rho in enumerate(X_rhos):X_quadratics[:, i] = quadratic(Ks, dim, rho)Y_quadratics = np.empty((Ks.size, n_Y), dtype=np.float32)for j, rho in enumerate(Y_rhos):Y_quadratics[:, j] = quadratic(Ks, dim, rho)est = -linears.sum(axis=)est += X_quadratics[None, :, :, None]est += Y_quadratics[None, :, None, :]np.maximum(est, , out=est)np.sqrt(est, out=est)if to_self:est[:, :, xrange(n_X), xrange(n_Y)] = return est[:, :, :, :, None]", "docstring": "r'''\n Estimates the L2 distance between distributions, via\n \\int (p - q)^2 = \\int p^2 - \\int p q - \\int q p + \\int q^2.\n\n \\int pq and \\int qp are estimated with the linear function (in both\n directions), while \\int p^2 and \\int q^2 are estimated via the quadratic\n function below.\n\n Always clamps negative estimates of l2^2 to 0, because otherwise the sqrt\n would break.", "id": "f14617:m15"} {"signature": "def quadratic(Ks, dim, rhos, required=None):", "body": "N = rhos.shape[]Ks = np.asarray(Ks)Bs = (Ks - ) / np.pi ** (dim / ) * gamma(dim / + ) est = Bs / (N - ) * np.mean(rhos ** (-dim), axis=)return est", "docstring": "r'''\n Estimates \\int p^2 based on kNN distances.\n\n In here because it's used in the l2 distance, above.\n\n Returns array of shape (num_Ks,).", "id": "f14617:m16"} {"signature": "def jensen_shannon(Ks, dim, X_rhos, Y_rhos, required,clamp=True, to_self=False):", "body": "X_ns = np.array([rho.shape[] for rho in X_rhos])Y_ns = np.array([rho.shape[] for rho in Y_rhos])n_X = X_ns.sizen_Y = Y_ns.sizecores = requiredassert cores.shape == (, Ks.size, n_X, n_Y, )X_bits = np.empty((Ks.size, n_X), dtype=np.float32)for i, rho in enumerate(X_rhos):X_bits[:, i] = dim * np.mean(np.log(rho), axis=)X_bits += np.log(X_ns - )[np.newaxis, :]Y_bits = np.empty((Ks.size, n_Y), dtype=np.float32)for j, rho in enumerate(Y_rhos):Y_bits[:, j] = dim * np.mean(np.log(rho), axis=)Y_bits += np.log(Y_ns - )[np.newaxis, :]est = cores.sum(axis=)est -= X_bits.reshape(, Ks.size, n_X, )est -= Y_bits.reshape(, Ks.size, , n_Y)est /= est += np.log(- + X_ns[None, None, :, None] + Y_ns[None, None, None, :])est += psi(Ks)[None, :, None, None]if to_self:est[:, :, xrange(n_X), xrange(n_Y)] = if clamp: np.maximum(, est, out=est)np.minimum(np.log(), est, out=est)return est[:, :, :, :, None]", "docstring": "r'''\n Estimate the difference between the Shannon entropy of an equally-weighted\n mixture between X and Y and the mixture of the Shannon entropies:\n\n JS(X, Y) = H[ (X + Y) / 2 ] - (H[X] + H[Y]) / 2\n\n We use a special case of the Hino-Murata weighted information estimator with\n a fixed M = n \\alpha, about equivalent to the K-nearest-neighbor approach\n used for the other estimators:\n\n Hideitsu Hino and Noboru Murata (2013).\n Information estimators for weighted observations. Neural Networks.\n http://linkinghub.elsevier.com/retrieve/pii/S0893608013001676\n\n\n The estimator for JS(X, Y) is:\n\n log volume of the unit ball - log M + log(n + m - 1) + digamma(M)\n + 1/2 mean_X( d * log radius of largest ball in X+Y around X_i\n with no more than M/(n+m-1) weight\n where X points have weight 1 / (2 n - 1)\n and Y points have weight n / (m (2 n - 1))\n - digamma(# of neighbors in that ball) )\n + 1/2 mean_Y( d * log radius of largest ball in X+Y around Y_i\n with no more than M/(n+m-1) weight\n where X points have weight m / (n (2 m - 1))\n and Y points have weight 1 / (2 m - 1)\n - digamma(# of neighbors in that ball) )\n\n - 1/2 (log volume of the unit ball - log M + log(n - 1) + digamma(M))\n - 1/2 mean_X( d * log radius of the largest ball in X around X_i\n with no more than M/(n-1) weight\n where X points have weight 1 / (n - 1))\n - digamma(# of neighbors in that ball) )\n\n - 1/2 (log volume of the unit ball - log M + log(m - 1) + digamma(M))\n - 1/2 mean_Y( d * log radius of the largest ball in Y around Y_i\n with no more than M/(n-1) weight\n where X points have weight 1 / (m - 1))\n - digamma(# of neighbors in that ball) )\n\n =\n\n log(n + m - 1) + digamma(M)\n + 1/2 mean_X( d * log radius of largest ball in X+Y around X_i\n with no more than M/(n+m-1) weight\n where X points have weight 1 / (2 n - 1)\n and Y points have weight n / (m (2 n - 1))\n - digamma(# of neighbors in that ball) )\n + 1/2 mean_Y( d * log radius of largest ball in X+Y around Y_i\n with no more than M/(n+m-1) weight\n where X points have weight m / (n (2 m - 1))\n and Y points have weight 1 / (2 m - 1)\n - digamma(# of neighbors in that ball) )\n - 1/2 [log(n-1) + mean_X( d * log rho_M(X_i) )]\n - 1/2 [log(m-1) + mean_Y( d * log rho_M(Y_i) )]", "id": "f14617:m17"} {"signature": "def topological_sort(deps):", "body": "order = []available = set()def _move_available():to_delete = []for n, parents in iteritems(deps):if not parents:available.add(n)to_delete.append(n)for n in to_delete:del deps[n]_move_available()while available:n = available.pop()order.append(n)for parents in itervalues(deps):parents.discard(n)_move_available()if available:raise ValueError(\"\")return order", "docstring": "Topologically sort a DAG, represented by a dict of child => set of parents.\nThe dependency dict is destroyed during operation.\n\nUses the Kahn algorithm: http://en.wikipedia.org/wiki/Topological_sorting\nNot a particularly good implementation, but we're just running it on tiny\ngraphs.", "id": "f14617:m18"} {"signature": "def _parse_specs(specs, Ks):", "body": "funcs = {}metas = {}meta_deps = defaultdict(set)def add_func(func, alpha=None, pos=None):needs_alpha = getattr(func, '', False)is_meta = hasattr(func, '')d = metas if is_meta else funcsif func not in d:if needs_alpha:args = {'': [alpha], '': [pos]}else:args = {'': None, '': [pos]}if not is_meta:d[func] = _FuncInfo(**args)else:d[func] = _MetaFuncInfo(deps=[], **args)for req in func.needs_results:if callable(req.alpha):req_alpha = req.alpha(alpha)else:req_alpha = req.alphaadd_func(req.func, alpha=req_alpha)meta_deps[func].add(req.func)meta_deps[req.func] else:info = d[func]if not needs_alpha:if pos is not None:if info.pos != [None]:msg = \"\"raise ValueError(msg.format(func_name))info.pos[] = poselse: try:idx = info.alphas.index(alpha)except ValueError:info.alphas.append(alpha)info.pos.append(pos)if is_meta:for req in func.needs_results:if callable(req.alpha):req_alpha = req.alpha(alpha)else:req_alpha = req.alphaadd_func(req.func, alpha=req_alpha)else:if pos is not None:if info.pos[idx] is not None:msg = \"\"raise ValueError(msg.format(func_name, alpha))info.pos[idx] = posfor i, spec in enumerate(specs):func_name, alpha = (spec.split('', ) + [None])[:]if alpha is not None:alpha = float(alpha)try:func = func_mapping[func_name]except KeyError:msg = \"\"raise ValueError(msg.format(func_name))needs_alpha = getattr(func, '', False)if needs_alpha and alpha is None:msg = \"\"raise ValueError(msg.format(func_name, spec))elif not needs_alpha and alpha is not None:msg = \"\"raise ValueError(msg.format(func_name, spec))add_func(func, alpha, i)meta_counter = itertools.count(-, step=-)for info in itertools.chain(itervalues(funcs), itervalues(metas)):for i, pos in enumerate(info.pos):if pos is None:info.pos[i] = next(meta_counter)for func, info in iteritems(metas):deps = info.depsassert deps == []for req in func.needs_results:f = req.funcreq_info = (metas if hasattr(f, '') else funcs)[f]if req.alpha is not None:if callable(req.alpha):req_alpha = req.alpha(info.alphas)else:req_alpha = req.alphafind_alpha = np.vectorize(req_info.alphas.index, otypes=[int])pos = np.asarray(req_info.pos)[find_alpha(req_alpha)]if np.isscalar(pos):deps.append(pos[()])else:deps.extend(pos)else:pos, = req_info.posdeps.append(pos)meta_order = topological_sort(meta_deps)metas_ordered = OrderedDict((f, metas[f]) for f in meta_order if hasattr(f, ''))return funcs, metas_ordered, -next(meta_counter) - ", "docstring": "Set up the different functions we need to call.\n\nReturns:\n - a dict mapping base estimator functions to _FuncInfo objects.\n If the function needs_alpha, then the alphas attribute is an array\n of alpha values and pos is a corresponding array of indices.\n Otherwise, alphas is None and pos is a list containing a single index.\n Indices are >= 0 if they correspond to something in a spec,\n and negative if they're just used for a meta estimator but not\n directly requested.\n - an OrderedDict mapping functions to _MetaFuncInfo objects.\n alphas and pos are like for _FuncInfo; deps is a list of indices\n which should be passed to the estimator. Note that these might be\n other meta functions; this list is guaranteed to be in an order\n such that all dependencies are resolved before calling that function.\n If no such order is possible, raise ValueError.\n - the number of meta-only results\n\n# TODO: update doctests for _parse_specs\n\n>>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9'])\n({:\n _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3])},\n OrderedDict([\n (,\n _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])),\n (,\n _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3]))\n ]), 3)\n\n>>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2'])\n({:\n _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]),\n : _FuncInfo(alphas=None, pos=[-4])\n }, OrderedDict([\n (,\n _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])),\n (,\n _MetaFuncInfo(alphas=None, pos=[3], deps=[-4])),\n (,\n _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3]))\n ]), 4)\n\n>>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2', 'linear'])\n({:\n _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]),\n : _FuncInfo(alphas=None, pos=[4])\n }, OrderedDict([\n (,\n _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])),\n (,\n _MetaFuncInfo(alphas=None, pos=[3], deps=[4])),\n (,\n _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3]))\n ]), 3)", "id": "f14617:m19"} {"signature": "def _get_Ks(self):", "body": "Ks = as_integer_type(self.Ks)if Ks.ndim != :raise TypeError(\"\".format(Ks.shape))if Ks.min() < :raise ValueError(\"\".format(Ks.min()))return Ks", "docstring": "Ks as an array and type-checked.", "id": "f14617:c0:m1"} {"signature": "@propertydef _n_jobs(self):", "body": "if self.n_jobs == -:from multiprocessing import cpu_countreturn cpu_count()return self.n_jobs", "docstring": "n_jobs, but with the number of cores instead of -1.", "id": "f14617:c0:m2"} {"signature": "def _flann_args(self, X=None):", "body": "args = {'': self._n_jobs}if self.flann_algorithm == '':if X is None or X.dim > :args[''] = ''else:args[''] = ''else:args[''] = self.flann_algorithmif self.flann_args:args.update(self.flann_args)try:FLANNParameters().update(args)except AttributeError as e:msg = \"\"raise TypeError(msg.format(e))return args", "docstring": "The dictionary of arguments to give to FLANN.", "id": "f14617:c0:m3"} {"signature": "def fit(self, X, y=None, get_rhos=False):", "body": "self.features_ = X = as_features(X, stack=True, bare=True)Ks = self._get_Ks()_, _, _, max_K, save_all_Ks, _ = _choose_funcs(self.div_funcs, Ks, X.dim, X.n_pts, None, self.version)if max_K >= X.n_pts.min():msg = \"\"raise ValueError(msg.format(max_K, X.n_pts.min()))memory = self.memoryif isinstance(memory, string_types):memory = Memory(cachedir=memory, verbose=)self.indices_ = id = memory.cache(_build_indices)(X, self._flann_args())if get_rhos:self.rhos_ = _get_rhos(X, id, Ks, max_K, save_all_Ks, self.min_dist)elif hasattr(self, ''):del self.rhos_return self", "docstring": "Sets up for divergence estimation \"from\" new data \"to\" X.\nBuilds FLANN indices for each bag, and maybe gets within-bag distances.\n\nParameters\n----------\nX : list of arrays or :class:`skl_groups.features.Features`\n The bags to search \"to\".\n\nget_rhos : boolean, optional, default False\n Compute within-bag distances :attr:`rhos_`. These are only needed\n for some divergence functions or if do_sym is passed, and they'll\n be computed (and saved) during :meth:`transform` if they're not\n computed here.\n\n If you're using Jensen-Shannon divergence, a higher max_K may\n be needed once it sees the number of points in the transformed bags,\n so the computation here might be wasted.", "id": "f14617:c0:m4"} {"signature": "def transform(self, X):", "body": "X = as_features(X, stack=True, bare=True)Y = self.features_Ks = np.asarray(self.Ks)if X.dim != Y.dim:msg = \"\"raise ValueError(msg.format(Y.dim, X.dim))memory = self.memoryif isinstance(memory, string_types):memory = Memory(cachedir=memory, verbose=)est = memory.cache(_est_divs, ignore=['', '', ''])output, self.rhos_ = est(X, Y, self.indices_, getattr(self, '', None),self.div_funcs, Ks,self.do_sym, self.clamp, self.version, self.min_dist,self._flann_args(), self._n_jobs)return output", "docstring": "r'''\n Computes the divergences from X to :attr:`features_`.\n\n Parameters\n ----------\n X : list of bag feature arrays or :class:`skl_groups.features.Features`\n The bags to search \"from\".\n\n Returns\n -------\n divs : array of shape ``[len(div_funcs), len(Ks), len(X), len(features_)] + ([2] if do_sym else [])``\n The divergences from X to :attr:`features_`.\n ``divs[d, k, i, j]`` is the ``div_funcs[d]`` divergence\n from ``X[i]`` to ``fetaures_[j]`` using a K of ``Ks[k]``.\n If ``do_sym``, ``divs[d, k, i, j, 0]`` is\n :math:`D_{d,k}( X_i \\| \\texttt{features_}_j)` and\n ``divs[d, k, i, j, 1]`` is :math:`D_{d,k}(\\texttt{features_}_j \\| X_i)`.", "id": "f14617:c0:m5"} {"signature": "@propertydef dim_(self):", "body": "self._check_fitted()return self.inds_.shape[]", "docstring": "The dimension of the inputs, once fitted.", "id": "f14626:c0:m2"} {"signature": "@propertydef out_dim_(self):", "body": "self._check_fitted()return self.inds_.shape[]", "docstring": "The dimension of the output vectors, once fitted.", "id": "f14626:c0:m3"} {"signature": "def fit(self, X, y=None):", "body": "if is_integer(X):dim = Xelse:X = as_features(X)dim = X.dimM = self.smoothnessinds = np.mgrid[(slice(M + ),) * dim].reshape(dim, (M + ) ** dim).Tself.inds_ = inds[(inds ** ).sum(axis=) <= M ** ]return self", "docstring": "Picks the elements of the basis to use for the given data.\n\nOnly depends on the dimension of X. If it's more convenient, you can\npass a single integer for X, which is the dimension to use.\n\nParameters\n----------\nX : an integer, a :class:`Features` instance, or a list of bag features\n The input data, or just its dimension, since only the dimension is\n needed here.", "id": "f14626:c0:m4"} {"signature": "def transform(self, X):", "body": "self._check_fitted()M = self.smoothnessdim = self.dim_inds = self.inds_do_check = self.do_bounds_checkX = as_features(X)if X.dim != dim:msg = \"\"raise ValueError(msg.format(dim, X.dim))Xt = np.empty((len(X), self.inds_.shape[]))Xt.fill(np.nan)if self.basis == '': coefs = (np.pi * np.arange(M + ))[..., :]for i, bag in enumerate(X):if do_check:if np.min(bag) < or np.max(bag) > :raise ValueError(\"\".format(i))phi = coefs * bag[..., np.newaxis]np.cos(phi, out=phi)phi[:, :, :] *= np.sqrt()B = reduce(op.mul, (phi[:, i, inds[:, i]] for i in xrange(dim)))Xt[i, :] = np.mean(B, axis=)else:raise ValueError(\"\".format(self.basis))return Xt", "docstring": "Transform a list of bag features into its projection series\nrepresentation.\n\nParameters\n----------\nX : :class:`skl_groups.features.Features` or list of bag feature arrays\n New data to transform. The data should all lie in [0, 1];\n use :class:`skl_groups.preprocessing.BagMinMaxScaler` if not.\n\nReturns\n-------\nX_new : integer array, shape ``[len(X), dim_]``\n X transformed into the new space.", "id": "f14626:c0:m5"} {"signature": "def fit(self, X, y=None):", "body": "return self", "docstring": "Do nothing; this is a stateless transformer.", "id": "f14627:c0:m1"} {"signature": "def transform(self, X):", "body": "X = as_features(X)return np.vstack([np.mean(bag, axis=) for bag in X])", "docstring": "Transform a list of bag features into a matrix of its mean features.\n\nParameters\n----------\nX : :class:`skl_groups.features.Features` or list of bag feature arrays\n Data to transform.\n\nReturns\n-------\nX_new : array, shape ``[len(X), X.dim]``\n X transformed into its means.", "id": "f14627:c0:m2"} {"signature": "@propertydef n_codewords(self):", "body": "return self.kmeans.n_clusters", "docstring": "The number of codewords used.", "id": "f14628:c0:m2"} {"signature": "@propertydef codewords_(self):", "body": "self._check_fitted()return self.kmeans_fit_.cluster_centers_", "docstring": "The selected codewords; shape [n_codewords, n_features].", "id": "f14628:c0:m3"} {"signature": "def fit(self, X, y=None):", "body": "self.kmeans_fit_ = copy(self.kmeans)X = as_features(X, stack=True)self.kmeans_fit_.fit(X.stacked_features) return self", "docstring": "Choose the codewords based on a training set.\n\nParameters\n----------\nX : :class:`skl_groups.features.Features` or list of arrays of shape ``[n_samples[i], n_features]``\n Training set. If a Features object, it will be stacked.", "id": "f14628:c0:m5"} {"signature": "def transform(self, X):", "body": "self._check_fitted()X = as_features(X, stack=True)assignments = self.kmeans_fit_.predict(X.stacked_features)return self._group_assignments(X, assignments)", "docstring": "Transform a list of bag features into its bag-of-words representation.\n\nParameters\n----------\nX : :class:`skl_groups.features.Features` or list of bag feature arrays\n New data to transform.\n\nReturns\n-------\nX_new : integer array, shape [len(X), kmeans.n_clusters]\n X transformed into the new space.", "id": "f14628:c0:m6"} {"signature": "def fit_transform(self, X):", "body": "X = as_features(X, stack=True)self.kmeans_fit_ = copy(self.kmeans)assignments = self.kmeans_fit_.fit_predict(X.stacked_features) return self._group_assignments(X, assignments)", "docstring": "Compute clustering and transform a list of bag features into its\nbag-of-words representation. Like calling fit(X) and then transform(X),\nbut more efficient.\n\nParameters\n----------\nX : :class:`skl_groups.features.Features` or list of bag feature arrays\n New data to transform.\n\nReturns\n-------\nX_new : integer array, shape [len(X), kmeans.n_clusters]\n X transformed into the new space.", "id": "f14628:c0:m7"} {"signature": "def is_integer_type(x):", "body": "return issubclass(np.asanyarray(x).dtype.type, np.integer)", "docstring": "Checks whether the array is of an integral type.", "id": "f14629:m1"} {"signature": "def is_categorical_type(ary):", "body": "ary = np.asanyarray(ary)return is_integer_type(ary) or ary.dtype.kind == ''", "docstring": "Checks whether the array is either integral or boolean.", "id": "f14629:m2"} {"signature": "def is_integer(x):", "body": "return np.isscalar(x) and is_integer_type(x)", "docstring": "Checks whether the argument is a single integer.", "id": "f14629:m3"} {"signature": "def is_categorical(x):", "body": "return np.isscalar(x) and is_categorical_type(x)", "docstring": "Checks whether the argument is a single integer or boolean.", "id": "f14629:m4"} {"signature": "def as_integer_type(ary):", "body": "ary = np.asanyarray(ary)if is_integer_type(ary):return aryrounded = np.rint(ary)if np.any(rounded != ary):raise ValueError(\"\")return rounded.astype(int)", "docstring": "Returns argument as an integer array, converting floats if convertable.\nRaises ValueError if it's a float array with nonintegral values.", "id": "f14629:m5"} {"signature": "def show_progress(name, **kwargs):", "body": "logger = logging.getLogger(name)logger.setLevel(logging.INFO)logger.addHandler(ProgressBarHandler(**kwargs))", "docstring": "Sets up a :class:`ProgressBarHandler` to handle progess logs for\na given module.\n\nParameters\n----------\nname : string\n The module name of the progress logger to use. For example,\n :class:`skl_groups.divergences.KNNDivergenceEstimator`\n uses ``'skl_groups.divergences.knn.progress'``.\n\n* : anything\n Other keyword arguments are passed to the :class:`ProgressBarHandler`.", "id": "f14629:m6"} {"signature": "def start(self, total):", "body": "self.logger.info(json.dumps(['', self.name, total]))", "docstring": "Signal the start of the process.\n\nParameters\n----------\ntotal : int\n The total number of steps in the process, or None if unknown.", "id": "f14629:c1:m1"} {"signature": "def update(self, idx):", "body": "self.logger.info(''.format(idx))", "docstring": "Update the current state.\n\nParameters\n----------\nidx : int\n The current state through the process.", "id": "f14629:c1:m2"} {"signature": "def finish(self):", "body": "self.logger.info(json.dumps(['']))", "docstring": "Marks the process as done.", "id": "f14629:c1:m3"} {"signature": "def as_features(X, stack=False, bare=False):", "body": "if isinstance(X, Features):if stack:X.make_stacked()return X.bare() if bare else Xreturn Features(X, stack=stack, bare=bare)", "docstring": "Returns a version of X as a :class:`Features` object.\n\nParameters\n----------\nstack : boolean, default False\n Make a stacked version of X. Note that if X is a features object,\n this will stack it in-place, since that's usually what you want.\n (If not, just use the :class:`Features` constructor instead.)\n\nbare : boolean, default False\n Return a bare version of X (no metadata).\n\nReturns\n-------\nfeats : :class:`Features`\n A version of X. If X is already a :class:`Features` object, the original\n X may be returned, depending on the arguments.", "id": "f14630:m0"} {"signature": "def make_stacked(self):", "body": "if self.stacked:returnself._boundaries = bounds = np.r_[, np.cumsum(self.n_pts)]self.stacked_features = stacked = np.vstack(self.features)self.features = np.array([stacked[bounds[i-]:bounds[i]] for i in xrange(, len(bounds))],dtype=object)self.stacked = True", "docstring": "If unstacked, convert to stacked. If stacked, do nothing.", "id": "f14630:c0:m1"} {"signature": "@propertydef total_points(self):", "body": "return self.n_pts.sum()", "docstring": "The total number of points in all bags.", "id": "f14630:c0:m2"} {"signature": "@propertydef dim(self):", "body": "return self.features[].shape[]", "docstring": "The dimensionality of the features.", "id": "f14630:c0:m3"} {"signature": "@propertydef dtype(self):", "body": "return self.features[].dtype", "docstring": "The data type of the feature vectors.", "id": "f14630:c0:m4"} {"signature": "def copy(self, stack=False, copy_meta=False, memo=None):", "body": "if self.stacked:fs = deepcopy(self.stacked_features, memo)n_pts = self.n_pts.copy()elif stack:fs = np.vstack(self.features)n_pts = self.n_pts.copy()else:fs = deepcopy(self.features, memo)n_pts = Nonemeta = deepcopy(self.meta, memo) if copy_meta else self.metareturn Features(fs, n_pts, copy=False, **meta)", "docstring": "Copies the Feature object. Makes a copy of the features array.\n\nParameters\n----------\nstack : boolean, optional, default False\n Whether to stack the copy if this one is unstacked.\n\ncopy_meta : boolean, optional, default False\n Also copy the metadata. If False, metadata in both points to the\n same object.", "id": "f14630:c0:m5"} {"signature": "def bare(self):", "body": "if not self.meta:return selfelif self.stacked:return Features(self.stacked_features, self.n_pts, copy=False)else:return Features(self.features, copy=False)", "docstring": "Make a Features object with no metadata; points to the same features.", "id": "f14630:c0:m18"} {"signature": "def hash_file(path):", "body": "return check_output([\"\", \"\", \"\", \"\", path]).decode().strip()", "docstring": "Write file at path to Git index, return its SHA1 as a string.", "id": "f14631:m0"} {"signature": "def _lstree(files, dirs):", "body": "for f, sha1 in files:yield \"\".format(sha1, f)for d, sha1 in dirs:yield \"\".format(sha1, d)", "docstring": "Make git ls-tree like output.", "id": "f14631:m1"} {"signature": "def hash_dir(path):", "body": "dir_hash = {}for root, dirs, files in os.walk(path, topdown=False):f_hash = ((f, hash_file(join(root, f))) for f in files)d_hash = ((d, dir_hash[join(root, d)]) for d in dirs)dir_hash[join(*split(root))] = _mktree(f_hash, d_hash)return dir_hash[path]", "docstring": "Write directory at path to Git index, return its SHA1 as a string.", "id": "f14631:m3"} {"signature": "def voronoi_finite_polygons_2d(vor, radius=None):", "body": "if vor.points.shape[] != :raise ValueError(\"\")new_regions = []new_vertices = vor.vertices.tolist()center = vor.points.mean(axis=)if radius is None:radius = vor.points.ptp().max()*all_ridges = {}for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):all_ridges.setdefault(p1, []).append((p2, v1, v2))all_ridges.setdefault(p2, []).append((p1, v1, v2))for p1, region in enumerate(vor.point_region):vertices = vor.regions[region]if all(v >= for v in vertices):new_regions.append(vertices)continueridges = all_ridges[p1]new_region = [v for v in vertices if v >= ]for p2, v1, v2 in ridges:if v2 < :v1, v2 = v2, v1if v1 >= :continuet = vor.points[p2] - vor.points[p1] t /= np.linalg.norm(t)n = np.array([-t[], t[]]) midpoint = vor.points[[p1, p2]].mean(axis=)direction = np.sign(np.dot(midpoint - center, n)) * nfar_point = vor.vertices[v2] + direction * radiusnew_region.append(len(new_vertices))new_vertices.append(far_point.tolist())vs = np.asarray([new_vertices[v] for v in new_region])c = vs.mean(axis=)angles = np.arctan2(vs[:,] - c[], vs[:,] - c[])new_region = np.array(new_region)[np.argsort(angles)]new_regions.append(new_region.tolist())return new_regions, np.asarray(new_vertices)", "docstring": "Reconstruct infinite voronoi regions in a 2D diagram to finite\nregions.\n\nParameters\n----------\nvor : Voronoi\n Input diagram\nradius : float, optional\n Distance to 'points at infinity'.\n\nReturns\n-------\nregions : list of tuples\n Indices of vertices in each revised Voronoi regions.\nvertices : list of tuples\n Coordinates for revised Voronoi vertices. Same as coordinates\n of input vertices, with 'points at infinity' appended to the\n end.", "id": "f14632:m0"} {"signature": "def user_name(with_num=False):", "body": "result = first_name()if with_num:result += str(random.randint(, ))return result.lower()", "docstring": "Return a random user name.\n\n Basically it's lowercased result of\n :py:func:`~forgery_py.forgery.name.first_name()` with a number appended\n if `with_num`.", "id": "f14649:m0"} {"signature": "def top_level_domain():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random top-level domain name.", "id": "f14649:m1"} {"signature": "def domain_name():", "body": "result = random.choice(get_dictionary('')).strip()result += '' + top_level_domain()return result.lower()", "docstring": "Return a random domain name.\n\n Lowercased result of :py:func:`~forgery_py.forgery.name.company_name()`\n plus :py:func:`~top_level_domain()`.", "id": "f14649:m2"} {"signature": "def email_address(user=None):", "body": "if not user:user = user_name()else:user = user.strip().replace('', '').lower()return user + '' + domain_name()", "docstring": "Return random e-mail address in a hopefully imaginary domain.\n\n If `user` is ``None`` :py:func:`~user_name()` will be used. Otherwise it\n will be lowercased and will have spaces replaced with ``_``.\n\n Domain name is created using :py:func:`~domain_name()`.", "id": "f14649:m3"} {"signature": "def email_subject(words_quantity=):", "body": "return lorem_ipsum.title(words_quantity=words_quantity)", "docstring": "An alias for lorem_ipsum.title(words_quantity)", "id": "f14649:m4"} {"signature": "def cctld():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random country code TLD.", "id": "f14649:m5"} {"signature": "def ip_v4():", "body": "return ''.join([str(random.randint(, )) for _ in range(, )])", "docstring": "Return a random IPv4 address.", "id": "f14649:m6"} {"signature": "def _py2_ip_v6():", "body": "magnitude = ** return \"\".join((\"\" % random.randint(, magnitude) for _ in range()))", "docstring": "Return a random IPv6 address for Python versions prior to 3.3.", "id": "f14649:m7"} {"signature": "def ip_v6():", "body": "try:import ipaddressexcept ImportError:return _py2_ip_v6()return str(ipaddress.IPv6Address(random.randint(, **)))", "docstring": "Return a random IPv6 address.", "id": "f14649:m8"} {"signature": "def word():", "body": "return words(quantity=)", "docstring": "Return a random word.", "id": "f14650:m0"} {"signature": "def words(quantity=, as_list=False):", "body": "global _wordsif not _words:_words = ''.join(get_dictionary('')).lower().replace('', '')_words = re.sub(r'', '', _words)_words = _words.split('')result = random.sample(_words, quantity)if as_list:return resultelse:return ''.join(result)", "docstring": "Return random words.", "id": "f14650:m1"} {"signature": "def title(words_quantity=):", "body": "result = words(quantity=words_quantity)result += random.choice('')return result.capitalize()", "docstring": "Return a random sentence to be used as e.g. an e-mail subject.", "id": "f14650:m2"} {"signature": "def sentence():", "body": "return sentences(quantity=)", "docstring": "Return a random sentence.", "id": "f14650:m3"} {"signature": "def sentences(quantity=, as_list=False):", "body": "result = [sntc.strip() for sntc inrandom.sample(get_dictionary(''), quantity)]if as_list:return resultelse:return ''.join(result)", "docstring": "Return random sentences.", "id": "f14650:m4"} {"signature": "def paragraph(separator='', wrap_start='', wrap_end='',html=False, sentences_quantity=):", "body": "return paragraphs(quantity=, separator=separator, wrap_start=wrap_start,wrap_end=wrap_end, html=html,sentences_quantity=sentences_quantity)", "docstring": "Return a random paragraph.", "id": "f14650:m5"} {"signature": "def paragraphs(quantity=, separator='', wrap_start='', wrap_end='',html=False, sentences_quantity=, as_list=False):", "body": "if html:wrap_start = ''wrap_end = ''separator = ''result = []try:for _ in xrange(, quantity):result.append(wrap_start +sentences(sentences_quantity) +wrap_end)except NameError:for _ in range(, quantity):result.append(wrap_start +sentences(sentences_quantity) +wrap_end)if as_list:return resultelse:return separator.join(result)", "docstring": "Return random paragraphs.", "id": "f14650:m6"} {"signature": "def _to_lower_alpha_only(s):", "body": "s = re.sub(r'', '', s.lower())return re.sub(r'', '', s)", "docstring": "Return a lowercased string with non alphabetic chars removed.\n\n White spaces are not to be removed.", "id": "f14650:m7"} {"signature": "def characters(quantity=):", "body": "line = map(_to_lower_alpha_only,''.join(random.sample(get_dictionary(''), quantity)))return ''.join(line)[:quantity]", "docstring": "Return random characters.", "id": "f14650:m8"} {"signature": "def character():", "body": "return characters(quantity=)", "docstring": "Return a random character.", "id": "f14650:m9"} {"signature": "def text(what=\"\", *args, **kwargs):", "body": "if what == \"\":return character(*args, **kwargs)elif what == \"\":return characters(*args, **kwargs)elif what == \"\":return word(*args, **kwargs)elif what == \"\":return words(*args, **kwargs)elif what == \"\":return sentence(*args, **kwargs)elif what == \"\":return sentences(*args, **kwargs)elif what == \"\":return paragraph(*args, **kwargs)elif what == \"\":return paragraphs(*args, **kwargs)elif what == \"\":return title(*args, **kwargs)else:raise NameError('')", "docstring": "An aggregator for all above defined public methods.", "id": "f14650:m10"} {"signature": "def lorem_ipsum_characters():", "body": "return _to_lower_alpha_only(''.join(get_dictionary('')))", "docstring": "Return a whole lorem_ipsum dictionary as a lowercase string.", "id": "f14650:m11"} {"signature": "def lorem_ipsum_words():", "body": "return lorem_ipsum_characters().strip().split('')", "docstring": "Return a list of all lowercased words.\n\n Words are taken from the `lorem_ipsum dictionary`.", "id": "f14650:m12"} {"signature": "def first_name():", "body": "_dict = get_dictionary('')_dict += get_dictionary('')return random.choice(_dict).strip()", "docstring": "Return a random male of female first name.", "id": "f14651:m0"} {"signature": "def last_name():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random last name.", "id": "f14651:m1"} {"signature": "def full_name():", "body": "return first_name() + '' + last_name()", "docstring": "Return a random full name.\n\n Equivalent of ``first_name() + ' ' + last_name()``.", "id": "f14651:m2"} {"signature": "def male_first_name():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random male first name.", "id": "f14651:m3"} {"signature": "def female_first_name():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random female first name.", "id": "f14651:m4"} {"signature": "def company_name():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random company name.", "id": "f14651:m5"} {"signature": "def job_title():", "body": "result = random.choice(get_dictionary('')).strip()result = result.replace('', job_title_suffix())return result", "docstring": "Return a random job title.", "id": "f14651:m6"} {"signature": "def job_title_suffix():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random job title suffix.", "id": "f14651:m7"} {"signature": "def title():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random name title, e.g. ``Mr``.", "id": "f14651:m8"} {"signature": "def suffix():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random name suffix, e.g. ``Jr``.", "id": "f14651:m9"} {"signature": "def location():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random location name, e.g. ``MI6 Headquarters``.", "id": "f14651:m10"} {"signature": "def industry():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random industry name.", "id": "f14651:m11"} {"signature": "def day_of_week(abbr=False):", "body": "if abbr:return random.choice(DAYS_ABBR)else:return random.choice(DAYS)", "docstring": "Return a random (abbreviated if `abbr`) day of week name.", "id": "f14652:m0"} {"signature": "def month(abbr=False, numerical=False):", "body": "if numerical:return random.randint(, )else:if abbr:return random.choice(MONTHS_ABBR)else:return random.choice(MONTHS)", "docstring": "Return a random (abbreviated if `abbr`) month name or month number if\n `numerical`.", "id": "f14652:m1"} {"signature": "def year(past=False, min_delta=, max_delta=):", "body": "return dt.date.today().year + _delta(past, min_delta, max_delta)", "docstring": "Return a random year.", "id": "f14652:m3"} {"signature": "def day(month_length=):", "body": "return random.randint(, month_length)", "docstring": "Return a random day number in a `month_length` days long month.", "id": "f14652:m4"} {"signature": "def date(past=False, min_delta=, max_delta=):", "body": "timedelta = dt.timedelta(days=_delta(past, min_delta, max_delta))return dt.date.today() + timedelta", "docstring": "Return a random `dt.date` object. Delta args are days.", "id": "f14652:m5"} {"signature": "def datetime(past=False, min_delta=, max_delta=):", "body": "timedelta = dt.timedelta(days=_delta(past, min_delta, max_delta))return dt.datetime.today() + timedelta", "docstring": "Return a random `dt.dt` object. Delta args are days.", "id": "f14652:m6"} {"signature": "def money(min=, max=):", "body": "value = random.choice(range(min * , max * ))return \"\" % (float(value) / )", "docstring": "Return a str of decimal with two digits after a decimal mark.", "id": "f14653:m0"} {"signature": "def formatted_money(min=, max=):", "body": "return \"\" % float(money(min=min, max=max))", "docstring": "Return a random sum of money with a dollar sign as a prefix.", "id": "f14653:m1"} {"signature": "def latitude():", "body": "return random.uniform(, ) * - ", "docstring": "Return a random latitude in the range of [-90.0, +90.0].\n\n Latitude is a float.", "id": "f14654:m0"} {"signature": "def latitude_degrees():", "body": "return random.randint(, ) - ", "docstring": "Return a random latitude's degrees component in the range [-180, +180].\n\n Latitude's degree is an int.", "id": "f14654:m1"} {"signature": "def latitude_direction():", "body": "return random.choice(['', ''])", "docstring": "Return a random a latitude's direction component.\n\n Latitude's direction is denoted as either \"N\" (north) or \"S\" (south).", "id": "f14654:m2"} {"signature": "def latitude_minutes():", "body": "return random.randint(, - )", "docstring": "Return a random latitude's minutes component in the range [0, 60).\n\n Latitude's minutes is an int.", "id": "f14654:m3"} {"signature": "def latitude_seconds():", "body": "return random.randint(, - )", "docstring": "Return a random latitude's seconds component in the range [0, 60).\n\n Latitude's seconds is an int.", "id": "f14654:m4"} {"signature": "def longitude():", "body": "return random.uniform(, ) * - ", "docstring": "Return a random longitude in the range [-180.0, +180.0].\n\n Longitude is a float.", "id": "f14654:m5"} {"signature": "def longitude_degrees():", "body": "return latitude_degrees()", "docstring": "Return a random longitude's degrees component in the range [-180, +180].\n\n Longitude's degrees is an int.", "id": "f14654:m6"} {"signature": "def longitude_direction():", "body": "return random.choice(['', ''])", "docstring": "Return a random longitude's direction component.\n\n Longitude's direction is denoted as either \"E\" (east) or \"W\" (west).", "id": "f14654:m7"} {"signature": "def longitude_minutes():", "body": "return latitude_minutes()", "docstring": "Return a random longitude's minutes component in the range [0, 60).\n\n longitude's minutes is an int.", "id": "f14654:m8"} {"signature": "def longitude_seconds():", "body": "return latitude_seconds()", "docstring": "Return a random longitude's seconds component in the range [0, 60).\n\n Longitude's seconds is an int.", "id": "f14654:m9"} {"signature": "def description():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random currency description, e.g. `United Kingdom Pounds`.", "id": "f14655:m0"} {"signature": "def code():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random currency code, e.g. `GBP`.", "id": "f14655:m1"} {"signature": "def hex_color():", "body": "return ''.join(random.sample(HEX_DIGITS, ))", "docstring": "Return random HEX color.", "id": "f14656:m0"} {"signature": "def hex_color_short():", "body": "return ''.join(random.sample(HEX_DIGITS, ))", "docstring": "Return random short HEX color (e.g. `FFF` color).", "id": "f14656:m1"} {"signature": "def text(length=None, at_least=, at_most=, lowercase=True,uppercase=True, digits=True, spaces=True, punctuation=False):", "body": "base_string = ''if lowercase:base_string += string.ascii_lowercaseif uppercase:base_string += string.ascii_uppercaseif digits:base_string += string.digitsif spaces:base_string += ''if punctuation:base_string += string.punctuationif len(base_string) == :return ''if not length:length = random.randint(at_least, at_most)result = ''try:for _ in xrange(, length):result += random.choice(base_string)except NameError:for i in range(, length):result += random.choice(base_string)return result", "docstring": "Return random text.\n\nIf `length` is present the text will be exactly this chars long. Else the\ntext will be something between `at_least` and `at_most` chars long.", "id": "f14656:m2"} {"signature": "def boolean():", "body": "return random.choice(BOOLEAN)", "docstring": "Return random boolean.", "id": "f14656:m3"} {"signature": "def color():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return random color name.", "id": "f14656:m4"} {"signature": "def encrypt(password='', salt=None):", "body": "if not salt:salt = str(datetime.utcnow())try:dk = hashlib.pbkdf2_hmac('', password.encode(), salt.encode(), )hexdigest = binascii.hexlify(dk).decode('')except AttributeError:dk = hashlib.sha1()dk.update(password.encode() + salt.encode())hexdigest = dk.hexdigest()return hexdigest", "docstring": "Return SHA1 hexdigest of a password (optionally salted with a string).", "id": "f14656:m5"} {"signature": "def frequency():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return random frequency rate.\n\n Frequency rate is taken from the `frequencies` dictionary.", "id": "f14656:m6"} {"signature": "def number(at_least=, at_most=):", "body": "return random.choice(range(at_least, at_most))", "docstring": "Return a random number in the range specified.", "id": "f14656:m7"} {"signature": "def password(at_least=, at_most=, lowercase=True,uppercase=True, digits=True, spaces=False, punctuation=False):", "body": "return text(at_least=at_least, at_most=at_most, lowercase=lowercase,uppercase=uppercase, digits=digits, spaces=spaces,punctuation=punctuation)", "docstring": "Return a random string for use as a password.", "id": "f14656:m8"} {"signature": "def type():", "body": "return random.choice(list(CARDS.keys()))", "docstring": "Return a random credit card type.", "id": "f14657:m0"} {"signature": "def check_digit(num):", "body": "sum = digits = str(num)[:-][::-]for i, n in enumerate(digits):if (i + ) % != :digit = int(n) * if digit > :sum += (digit - )else:sum += digitelse:sum += int(n)return ((divmod(sum, )[] + ) * - sum) % ", "docstring": "Return a check digit of the given credit card number.\n\n Check digit calculated using Luhn algorithm (\"modulus 10\")\n See: http://www.darkcoding.net/credit-card/luhn-formula/", "id": "f14657:m1"} {"signature": "def number(type=None, length=None, prefixes=None):", "body": "if type and type in CARDS:card = typeelse:card = random.choice(list(CARDS.keys()))if not prefixes:prefixes = CARDS[card]['']prefix = random.choice(prefixes)if not length:length = CARDS[card]['']result = str(prefix)for d in range(length - len(str(prefix))):result += str(basic.number())last_digit = check_digit(int(result))return int(result[:-] + str(last_digit))", "docstring": "Return a random credit card number.\n\n:param type: credit card type. Defaults to a random selection.\n:param length: length of the credit card number.\n Defaults to the length for the selected card type.\n:param prefixes: allowed prefixes for the card number.\n Defaults to prefixes for the selected card type.\n:return: credit card randomly generated number (int)", "id": "f14657:m2"} {"signature": "def zone():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random timezone.", "id": "f14658:m0"} {"signature": "def account_number():", "body": "account = [random.randint(, ) for _ in range()]return \"\".join(map(str, account))", "docstring": "Return a random bank account number.", "id": "f14659:m0"} {"signature": "def bik():", "body": "return '' +''.join([str(random.randint(, )) for _ in range()]) +str(random.randint(, ) + )", "docstring": "Return a random bank identification number.", "id": "f14659:m1"} {"signature": "def inn(type=\"\"):", "body": "if (type in TYPES) and type == '':return person_inn()else:return legal_inn()", "docstring": "Return a random taxation ID number for either a person or a company.\n\n Further information on the topic can be found in [1] (in russian).\n [1]: https://ru.wikipedia.org/wiki/\u0418\u0434\u0435\u043d\u0442\u0438\u0444\u0438\u043a\u0430\u0446\u0438\u043e\u043d\u043d\u044b\u0439_\u043d\u043e\u043c\u0435\u0440_\u043d\u0430\u043b\u043e\u0433\u043e\u043f\u043b\u0430\u0442\u0435\u043b\u044c\u0449\u0438\u043a\u0430", "id": "f14659:m2"} {"signature": "def legal_inn():", "body": "mask = [, , , , , , , , ]inn = [random.randint(, ) for _ in range()]weighted = [v * mask[i] for i, v in enumerate(inn[:-])]inn[] = sum(weighted) % % return \"\".join(map(str, inn))", "docstring": "Return a random taxation ID number for a company.", "id": "f14659:m3"} {"signature": "def legal_ogrn():", "body": "ogrn = \"\".join(map(str, [random.randint(, ) for _ in range()]))ogrn += str((int(ogrn) % % ))return ogrn", "docstring": "Return a random government registration ID for a company.", "id": "f14659:m4"} {"signature": "def ogrn(type=\"\"):", "body": "if (type in TYPES) and type == '':return person_ogrn()else:return legal_ogrn()", "docstring": "Return a random government registration ID for either a person or a company.\n\n Further information on the topic can be found in [1] (in russian).\n [1]: https://ru.wikipedia.org/wiki/\u041e\u0441\u043d\u043e\u0432\u043d\u043e\u0439_\u0433\u043e\u0441\u0443\u0434\u0430\u0440\u0441\u0442\u0432\u0435\u043d\u043d\u044b\u0439_\u0440\u0435\u0433\u0438\u0441\u0442\u0440\u0430\u0446\u0438\u043e\u043d\u043d\u044b\u0439_\u043d\u043e\u043c\u0435\u0440", "id": "f14659:m5"} {"signature": "def person_inn():", "body": "mask11 = [, , , , , , , , , ]mask12 = [, , , , , , , , , , ]inn = [random.randint(, ) for _ in range()]weighted11 = [v * mask11[i] for i, v in enumerate(inn[:-])]inn[] = sum(weighted11) % % weighted12 = [v * mask12[i] for i, v in enumerate(inn[:-])]inn[] = sum(weighted12) % % return \"\".join(map(str, inn))", "docstring": "Return a random taxation ID number for a natural person.", "id": "f14659:m6"} {"signature": "def person_ogrn():", "body": "ogrn = \"\".join(map(str, [random.randint(, ) for _ in range()]))ogrn += str((int(ogrn) % % ))return ogrn", "docstring": "Return a random government registration ID for a person.", "id": "f14659:m7"} {"signature": "def address(user=None):", "body": "return internet.email_address(user=user)", "docstring": "An alias for internet.email_address(user).", "id": "f14660:m0"} {"signature": "def body(quantity=, separator='', wrap_start='', wrap_end='',html=False, sentences_quantity=, as_list=False):", "body": "return lorem_ipsum.paragraphs(quantity=quantity, separator=separator,wrap_start=wrap_start, wrap_end=wrap_end,html=html,sentences_quantity=sentences_quantity,as_list=as_list)", "docstring": "Return a random email text.", "id": "f14660:m1"} {"signature": "def subject(words_quantity=):", "body": "return lorem_ipsum.title(words_quantity=words_quantity)", "docstring": "An alias for lorem_ipsum.title(words_quantity)", "id": "f14660:m2"} {"signature": "def gender():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return random gender.", "id": "f14662:m0"} {"signature": "def abbreviated_gender():", "body": "return gender()[:]", "docstring": "Return random abbreviated gender.", "id": "f14662:m1"} {"signature": "def shirt_size():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random shirt size.", "id": "f14662:m2"} {"signature": "def race():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return random race.", "id": "f14662:m3"} {"signature": "def language():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random language name, e.g. ``Polish``.", "id": "f14662:m4"} {"signature": "def street_name():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random street name.", "id": "f14663:m0"} {"signature": "def street_number():", "body": "length = int(random.choice(string.digits[:]))return ''.join(random.sample(string.digits, length))", "docstring": "Return a random street number.", "id": "f14663:m1"} {"signature": "def street_suffix():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random street suffix.", "id": "f14663:m2"} {"signature": "def street_address():", "body": "return '' % (street_number(), street_name(), street_suffix())", "docstring": "Return a random street address.\n\n Equivalent of ``street_number() + ' ' +\n street_name() + ' ' + street_suffix()``.", "id": "f14663:m3"} {"signature": "def city():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random city name.", "id": "f14663:m4"} {"signature": "def state():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random US state name.", "id": "f14663:m5"} {"signature": "def state_abbrev():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random US abbreviated state name.", "id": "f14663:m6"} {"signature": "def province():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return random Canadian province or territory.", "id": "f14663:m7"} {"signature": "def province_abbrev():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return random Canadian province or territory abbreviation.", "id": "f14663:m8"} {"signature": "def zip_code():", "body": "format = ''if random.random() >= :format = ''result = ''for item in format:if item == '':result += str(random.randint(, ))else:result += itemreturn result", "docstring": "Return a random ZIP code, either in `#####` or `#####-####` format.", "id": "f14663:m9"} {"signature": "def phone():", "body": "format = ''result = ''for item in format:if item == '':result += str(random.randint(, ))else:result += itemreturn result", "docstring": "Return a random phone number in `#-(###)###-####` format.", "id": "f14663:m10"} {"signature": "def country():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random country name.", "id": "f14663:m11"} {"signature": "def continent():", "body": "return random.choice(get_dictionary('')).strip()", "docstring": "Return a random continent name.", "id": "f14663:m12"} {"signature": "def get_dictionary(dict_name):", "body": "global dictionaries_cacheif dict_name not in dictionaries_cache:try:dictionary_file = codecs.open(join(DICTIONARIES_PATH, dict_name), '', '')except IOError:Noneelse:dictionaries_cache[dict_name] = dictionary_file.readlines()dictionary_file.close()return dictionaries_cache[dict_name]", "docstring": "Load a dictionary file ``dict_name`` (if it's not cached) and return its\ncontents as an array of strings.", "id": "f14664:m0"} {"signature": "def as_list(iterable_of_arrays):", "body": "return [array.tolist() for array in iterable_of_arrays]", "docstring": "Converts an iterable of permutation matrices given as NumPy\n arrays into a list of lists.", "id": "f14668:m0"} {"signature": "def bump_version(version, which=None):", "body": "try:parts = [int(n) for n in version.split('')]except ValueError:fail('')if len(parts) != :fail('')PARTS = {'': , '': , '': }index = PARTS[which] if which in PARTS else before, middle, after = parts[:index], parts[index], parts[index + :]middle += return ''.join(str(n) for n in before + [middle] + after)", "docstring": "Returns the result of incrementing `version`.\n\n If `which` is not specified, the \"patch\" part of the version number will be\n incremented. If `which` is specified, it must be ``'major'``, ``'minor'``,\n or ``'patch'``. If it is one of these three strings, the corresponding part\n of the version number will be incremented instead of the patch number.\n\n Returns a string representing the next version number.\n\n Example::\n\n >>> bump_version('2.7.1')\n '2.7.2'\n >>> bump_version('2.7.1', 'minor')\n '2.8.0'\n >>> bump_version('2.7.1', 'major')\n '3.0.0'", "id": "f14669:m0"} {"signature": "def get_version(filename, pattern):", "body": "with open(filename) as f:match = re.search(r\"\" % pattern, f.read())if match:before, version, after = match.groups()return versionfail(''.format(pattern, filename))", "docstring": "Gets the current version from the specified file.\n\n This function assumes the file includes a string of the form::\n\n = ", "id": "f14669:m2"} {"signature": "def build_and_upload():", "body": "Popen([sys.executable, '', '', '', '','']).wait()", "docstring": "Uses Python's setup.py commands to build the package and upload it to\n PyPI.", "id": "f14669:m3"} {"signature": "def fail(message=None, exit_status=None):", "body": "print('', message, file=sys.stderr)sys.exit(exit_status or )", "docstring": "Prints the specified message and exits the program with the specified\n exit status.", "id": "f14669:m4"} {"signature": "def git_tags():", "body": "process = Popen(['', ''], stdout=PIPE)return set(process.communicate()[].splitlines())", "docstring": "Returns a list of the git tags.", "id": "f14669:m5"} {"signature": "def git_is_clean():", "body": "return Popen(['', '', '']).wait() == ", "docstring": "Returns ``True`` if and only if there are no uncommitted changes.", "id": "f14669:m6"} {"signature": "def git_commit(message):", "body": "Popen(['', '', '', message]).wait()", "docstring": "Commits all changed files with the specified message.", "id": "f14669:m7"} {"signature": "def git_tag(tag):", "body": "print(''.format(tag))msg = ''.format(tag)Popen(['', '', '', '', msg, tag]).wait()", "docstring": "Tags the current version.", "id": "f14669:m8"} {"signature": "def to_permutation_matrix(matches):", "body": "n = len(matches)P = np.zeros((n, n))P[list(zip(*(matches.items())))] = return P", "docstring": "Converts a permutation into a permutation matrix.\n\n `matches` is a dictionary whose keys are vertices and whose values are\n partners. For each vertex ``u`` and ``v``, entry (``u``, ``v``) in the\n returned matrix will be a ``1`` if and only if ``matches[u] == v``.\n\n Pre-condition: `matches` must be a permutation on an initial subset of the\n natural numbers.\n\n Returns a permutation matrix as a square NumPy array.", "id": "f14670:m0"} {"signature": "def zeros(m, n):", "body": "return np.zeros((m, n))", "docstring": "Convenience function for ``numpy.zeros((m, n))``.", "id": "f14670:m1"} {"signature": "def hstack(left, right):", "body": "return np.hstack((left, right))", "docstring": "Convenience function for ``numpy.hstack((left, right))``.", "id": "f14670:m2"} {"signature": "def vstack(top, bottom):", "body": "return np.vstack((top, bottom))", "docstring": "Convenience function for ``numpy.vstack((top, bottom))``.", "id": "f14670:m3"} {"signature": "def four_blocks(topleft, topright, bottomleft, bottomright):", "body": "return vstack(hstack(topleft, topright),hstack(bottomleft, bottomright))", "docstring": "Convenience function that creates a block matrix with the specified\n blocks.\n\n Each argument must be a NumPy matrix. The two top matrices must have the\n same number of rows, as must the two bottom matrices. The two left matrices\n must have the same number of columns, as must the two right matrices.", "id": "f14670:m4"} {"signature": "def to_bipartite_matrix(A):", "body": "m, n = A.shapereturn four_blocks(zeros(m, m), A, A.T, zeros(n, n))", "docstring": "Returns the adjacency matrix of a bipartite graph whose biadjacency\n matrix is `A`.\n\n `A` must be a NumPy array.\n\n If `A` has **m** rows and **n** columns, then the returned matrix has **m +\n n** rows and columns.", "id": "f14670:m5"} {"signature": "def to_pattern_matrix(D):", "body": "result = np.zeros_like(D)result[D.nonzero()] = return result", "docstring": "Returns the Boolean matrix in the same shape as `D` with ones exactly\n where there are nonzero entries in `D`.\n\n `D` must be a NumPy array.", "id": "f14670:m6"} {"signature": "def birkhoff_von_neumann_decomposition(D):", "body": "m, n = D.shapeif m != n:raise ValueError(''.format(m, n))indices = list(itertools.product(range(m), range(n)))coefficients = []permutations = []S = D.astype('')while not np.all(S == ):W = to_pattern_matrix(S)X = to_bipartite_matrix(W)G = from_numpy_matrix(X)left_nodes = range(n)M = maximum_matching(G, left_nodes)M = {u: v % n for u, v in M.items() if u < n}P = to_permutation_matrix(M)q = min(S[i, j] for (i, j) in indices if P[i, j] == )coefficients.append(q)permutations.append(P)S -= q * PS[np.abs(S) < TOLERANCE] = return list(zip(coefficients, permutations))", "docstring": "Returns the Birkhoff--von Neumann decomposition of the doubly\n stochastic matrix `D`.\n\n The input `D` must be a square NumPy array representing a doubly\n stochastic matrix (that is, a matrix whose entries are nonnegative\n reals and whose row sums and column sums are all 1). Each doubly\n stochastic matrix is a convex combination of at most ``n ** 2``\n permutation matrices, where ``n`` is the dimension of the input\n array.\n\n The returned value is a list of pairs whose length is at most ``n **\n 2``. In each pair, the first element is a real number in the interval **(0,\n 1]** and the second element is a NumPy array representing a permutation\n matrix. This represents the doubly stochastic matrix as a convex\n combination of the permutation matrices.\n\n The input matrix may also be a scalar multiple of a doubly\n stochastic matrix, in which case the row sums and column sums must\n each be *c*, for some positive real number *c*. This may be useful\n in avoiding precision issues: given a doubly stochastic matrix that\n will have many entries close to one, multiply it by a large positive\n integer. The returned permutation matrices will be the same\n regardless of whether the given matrix is a doubly stochastic matrix\n or a scalar multiple of a doubly stochastic matrix, but in the\n latter case, the coefficients will all be scaled by the appropriate\n scalar multiple, and their sum will be that scalar instead of one.\n\n For example::\n\n >>> import numpy as np\n >>> from birkhoff import birkhoff_von_neumann_decomposition as decomp\n >>> D = np.ones((2, 2))\n >>> zipped_pairs = decomp(D)\n >>> coefficients, permutations = zip(*zipped_pairs)\n >>> coefficients\n (1.0, 1.0)\n >>> permutations[0]\n array([[ 1., 0.],\n [ 0., 1.]])\n >>> permutations[1]\n array([[ 0., 1.],\n [ 1., 0.]])\n >>> zipped_pairs = decomp(D / 2) # halve each value in the matrix\n >>> coefficients, permutations = zip(*zipped_pairs)\n >>> coefficients # will be half as large as before\n (0.5, 0.5)\n >>> permutations[0] # will be the same as before\n array([[ 1., 0.],\n [ 0., 1.]])\n >>> permutations[1]\n array([[ 0., 1.],\n [ 1., 0.]])\n\n The returned list of pairs is given in the order computed by the algorithm\n (so in particular they are not sorted in any way).", "id": "f14670:m7"} {"signature": "def process_tables(app, docname, source):", "body": "import markdownmd = markdown.Markdown(extensions=[''])table_processor = markdown.extensions.tables.TableProcessor(md.parser)raw_markdown = source[]blocks = re.split(r'', raw_markdown)for i, block in enumerate(blocks):if table_processor.test(None, block):html = md.convert(block)styled = html.replace('', '', ) blocks[i] = styledsource[] = ''.join(blocks)", "docstring": "Convert markdown tables to html, since recommonmark can't. This requires 3 steps:\n Snip out table sections from the markdown\n Convert them to html\n Replace the old markdown table with an html table\n\nThis function is called by sphinx for each document. `source` is a 1-item list. To update the document, replace\nelement 0 in `source`.", "id": "f14673:m1"} {"signature": "def __init__(self,database=None,user=None,password=None,host=None):", "body": "self.connection = Noneself.user = Noneself.database = Noneself.host = Noneself.cursors = {}if database and user and password and host:self.open(database, user, password, host)", "docstring": "Constructor for this class.\n\n Parameters\n ----------\n\n database : str\n Name of the database to connect to.\n\n user : str\n User name of the database server user.\n\n password : str\n Password for the database server user.\n\n host : str\n Database hostname or IP address to connect to.\n\n Returns\n -------\n\n `LCDB` object instance", "id": "f14679:c0:m0"} {"signature": "def open(self, database, user, password, host):", "body": "try:self.connection = pg.connect(user=user,password=password,database=database,host=host)LOGINFO('''' % (database,user))self.database = databaseself.user = userexcept Exception as e:LOGEXCEPTION('''' % (database,user))self.database = Noneself.user = None", "docstring": "This opens a new database connection.\n\n Parameters\n ----------\n\n database : str\n Name of the database to connect to.\n\n user : str\n User name of the database server user.\n\n password : str\n Password for the database server user.\n\n host : str\n Database hostname or IP address to connect to.", "id": "f14679:c0:m1"} {"signature": "def open_default(self):", "body": "if HAVECONF:self.open(DBDATA, DBUSER, DBPASS, DBHOST)else:LOGERROR(\"\"\"\")", "docstring": "This opens the database connection using the default database parameters\ngiven in the ~/.astrobase/astrobase.conf file.", "id": "f14679:c0:m2"} {"signature": "def autocommit(self):", "body": "if len(self.cursors.keys()) == :self.connection.autocommit = Trueelse:raise AttributeError('''')", "docstring": "This sets the database connection to autocommit. Must be called before\nany cursors have been instantiated.", "id": "f14679:c0:m3"} {"signature": "def cursor(self, handle, dictcursor=False):", "body": "if handle in self.cursors:return self.cursors[handle]else:if dictcursor:self.cursors[handle] = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor)else:self.cursors[handle] = self.connection.cursor()return self.cursors[handle]", "docstring": "This gets or creates a DB cursor for the current DB connection.\n\n Parameters\n ----------\n\n handle : str\n The name of the cursor to look up in the existing list or if it\n doesn't exist, the name to be used for a new cursor to be returned.\n\n dictcursor : bool\n If True, returns a cursor where each returned row can be addressed\n as a dictionary by column name.\n\n Returns\n -------\n\n psycopg2.Cursor instance", "id": "f14679:c0:m4"} {"signature": "def newcursor(self, dictcursor=False):", "body": "handle = hashlib.sha256(os.urandom()).hexdigest()if dictcursor:self.cursors[handle] = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor)else:self.cursors[handle] = self.connection.cursor()return (self.cursors[handle], handle)", "docstring": "This creates a DB cursor for the current DB connection using a\nrandomly generated handle. Returns a tuple with cursor and handle.\n\nParameters\n----------\n\ndictcursor : bool\n If True, returns a cursor where each returned row can be addressed\n as a dictionary by column name.\n\nReturns\n-------\n\ntuple\n The tuple is of the form (handle, psycopg2.Cursor instance).", "id": "f14679:c0:m5"} {"signature": "def commit(self):", "body": "if not self.connection.closed:self.connection.commit()else:raise AttributeError('' %self.database)", "docstring": "This just calls the connection's commit method.", "id": "f14679:c0:m6"} {"signature": "def rollback(self):", "body": "if not self.connection.closed:self.connection.rollback()else:raise AttributeError('' %self.database)", "docstring": "This just calls the connection's commit method.", "id": "f14679:c0:m7"} {"signature": "def close_cursor(self, handle):", "body": "if handle in self.cursors:self.cursors[handle].close()else:raise KeyError('' % handle)", "docstring": "Closes the cursor specified and removes it from the `self.cursors`\ndictionary.", "id": "f14679:c0:m8"} {"signature": "def close_connection(self):", "body": "self.connection.close()LOGINFO('' % self.database)", "docstring": "This closes all cursors currently in use, and then closes the DB\nconnection.", "id": "f14679:c0:m9"} {"signature": "def traptransit_fit_magseries(times, mags, errs,transitparams,sigclip=,plotfit=False,magsarefluxes=False,verbose=True):", "body": "stimes, smags, serrs = sigclip_magseries(times, mags, errs,sigclip=sigclip,magsarefluxes=magsarefluxes)nzind = np.nonzero(serrs)stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]transitperiod, transitepoch, transitdepth = transitparams[:]if transitepoch is None:if verbose:LOGWARNING('''')try:spfit = spline_fit_magseries(times, mags, errs, transitperiod,sigclip=sigclip,magsarefluxes=magsarefluxes,verbose=verbose)transitepoch = spfit['']['']except Exception as e:sgfit = savgol_fit_magseries(times, mags, errs, transitperiod,sigclip=sigclip,magsarefluxes=magsarefluxes,verbose=verbose)transitepoch = sgfit['']['']finally:if transitepoch is None:LOGERROR(\"\"\"\")returndict = {'':'','':{'':transitparams,'':None,'':None,'':None,'':None,},'':np.nan,'':np.nan,'':None,'':{'':None,'':None,'':None,'':None,'':magsarefluxes,},}return returndictelse:if transitepoch.size > :if verbose:LOGWARNING(\"\"\"\")transitparams[] = transitepoch[]else:if verbose:LOGWARNING(''% transitepoch)transitparams[] = transitepoch.item()if magsarefluxes:if transitdepth < :transitparams[] = -transitdepthelse:if transitdepth > :transitparams[] = -transitdepthtry:leastsqfit = spleastsq(transits.trapezoid_transit_residual,transitparams,args=(stimes, smags, serrs),full_output=True)except Exception as e:leastsqfit = Noneif leastsqfit and leastsqfit[-] in (,,,):finalparams = leastsqfit[]covxmatrix = leastsqfit[]fitmags, phase, ptimes, pmags, perrs, n_transitpoints = (transits.trapezoid_transit_func(finalparams,stimes, smags, serrs,get_ntransitpoints=True))fitchisq = np.sum(((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs))fitredchisq = fitchisq/(len(pmags) - len(finalparams) - )residuals = leastsqfit[]['']residualvariance = (np.sum(residuals*residuals)/(pmags.size - finalparams.size))if covxmatrix is not None:covmatrix = residualvariance*covxmatrixstderrs = np.sqrt(np.diag(covmatrix))else:LOGERROR('')stderrs = Noneif verbose:LOGINFO('' %(fitchisq, fitredchisq))fperiod, fepoch = finalparams[:]returndict = {'':'','':{'':transitparams,'':finalparams,'':stderrs,'':leastsqfit,'':fitmags,'':fepoch,'':n_transitpoints},'':fitchisq,'':fitredchisq,'':None,'':{'':phase,'':ptimes,'':pmags,'':perrs,'':magsarefluxes,},}if plotfit and isinstance(plotfit, str):make_fit_plot(phase, pmags, perrs, fitmags,fperiod, ptimes.min(), fepoch,plotfit,magsarefluxes=magsarefluxes)returndict[''] = plotfitreturn returndictelse:LOGERROR('')returndict = {'':'','':{'':transitparams,'':None,'':None,'':leastsqfit,'':None,'':None,'':},'':np.nan,'':np.nan,'':None,'':{'':None,'':None,'':None,'':None,'':magsarefluxes,},}return returndict", "docstring": "This fits a trapezoid transit model to a magnitude time series.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input mag/flux time-series to fit a trapezoid planet-transit model\n to.\n\n period : float\n The period to use for the model fit.\n\n transitparams : list of floats\n These are initial parameters for the transit model fit. A list of the\n following form is required::\n\n transitparams = [transitperiod (time),\n transitepoch (time),\n transitdepth (flux or mags),\n transitduration (phase),\n ingressduration (phase)]\n\n - for magnitudes -> `transitdepth` should be < 0\n - for fluxes -> `transitdepth` should be > 0\n\n If `transitepoch` is None, this function will do an initial spline fit\n to find an approximate minimum of the phased light curve using the given\n period.\n\n The `transitdepth` provided is checked against the value of\n `magsarefluxes`. if `magsarefluxes = True`, the `transitdepth` is forced\n to be > 0; if `magsarefluxes` = False, the `transitdepth` is forced to\n be < 0.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n magsarefluxes : bool\n If True, will treat the input values of `mags` as fluxes for purposes of\n plotting the fit and sig-clipping.\n\n plotfit : str or False\n If this is a string, this function will make a plot for the fit to the\n mag/flux time-series and writes the plot to the path specified here.\n\n ignoreinitfail : bool\n If this is True, ignores the initial failure to find a set of optimized\n Fourier parameters using the global optimization function and proceeds\n to do a least-squares fit anyway.\n\n verbose : bool\n If True, will indicate progress and warn of any problems.\n\n Returns\n -------\n\n dict\n This function returns a dict containing the model fit parameters, the\n minimized chi-sq value and the reduced chi-sq value. The form of this\n dict is mostly standardized across all functions in this module::\n\n {\n 'fittype':'traptransit',\n 'fitinfo':{\n 'initialparams':the initial transit params provided,\n 'finalparams':the final model fit transit params ,\n 'finalparamerrs':formal errors in the params,\n 'leastsqfit':the full tuple returned by scipy.leastsq,\n 'fitmags': the model fit mags,\n 'fitepoch': the epoch of minimum light for the fit,\n 'ntransitpoints': the number of LC points in transit phase\n },\n 'fitchisq': the minimized value of the fit's chi-sq,\n 'fitredchisq':the reduced chi-sq value,\n 'fitplotfile': the output fit plot if fitplot is not None,\n 'magseries':{\n 'times':input times in phase order of the model,\n 'phase':the phases of the model mags,\n 'mags':input mags/fluxes in the phase order of the model,\n 'errs':errs in the phase order of the model,\n 'magsarefluxes':input value of magsarefluxes kwarg\n }\n }", "id": "f14680:m0"} {"signature": "def _get_value(quantitystr, fitparams, fixedparams):", "body": "fitparamskeys, fixedparamskeys = fitparams.keys(), fixedparams.keys()if quantitystr in fitparamskeys:quantity = fitparams[quantitystr]elif quantitystr in fixedparamskeys:quantity = fixedparams[quantitystr]return quantity", "docstring": "This decides if a value is to be fit for or is fixed in a model fit.\n\n When you want to get the value of some parameter, but you're not sure if\n it's being fit or if it is fixed. then, e.g. for `period`::\n\n period_value = _get_value('period', fitparams, fixedparams)", "id": "f14680:m1"} {"signature": "def _transit_model(times, t0, per, rp, a, inc, ecc, w, u, limb_dark,exp_time_minutes=, supersample_factor=):", "body": "params = batman.TransitParams() params.t0 = t0 params.per = per params.rp = rp params.a = a params.inc = inc params.ecc = ecc params.w = w params.u = u params.limb_dark = limb_dark t = timesm = batman.TransitModel(params, t, exp_time=exp_time_minutes//,supersample_factor=supersample_factor)return params, m", "docstring": "This returns a BATMAN planetary transit model.\n\n Parameters\n ----------\n\n times : np.array\n The times at which the model will be evaluated.\n\n t0 : float\n The time of periastron for the transit.\n\n per : float\n The orbital period of the planet.\n\n rp : float\n The stellar radius of the planet's star (in Rsun).\n\n a : float\n The semi-major axis of the planet's orbit (in Rsun).\n\n inc : float\n The orbital inclination (in degrees).\n\n ecc : float\n The eccentricity of the orbit.\n\n w : float\n The longitude of periastron (in degrees).\n\n u : list of floats\n The limb darkening coefficients specific to the limb darkening model\n used.\n\n limb_dark : {\"uniform\", \"linear\", \"quadratic\", \"square-root\", \"logarithmic\", \"exponential\", \"power2\", \"custom\"}\n The type of limb darkening model to use. See the full list here:\n\n https://www.cfa.harvard.edu/~lkreidberg/batman/tutorial.html#limb-darkening-options\n\n exp_time_minutes : float\n The amount of time to 'smear' the transit LC points over to simulate a\n long exposure time.\n\n supersample_factor: int\n The number of supersampled time data points to average the lightcurve\n model over.\n\n Returns\n -------\n\n (params, batman_model) : tuple\n The returned tuple contains the params list and the generated\n `batman.TransitModel` object.", "id": "f14680:m2"} {"signature": "def _log_prior_transit(theta, priorbounds):", "body": "allowed = Truefor ix, key in enumerate(np.sort(list(priorbounds.keys()))):if priorbounds[key][] < theta[ix] < priorbounds[key][]:allowed = True and allowedelse:allowed = Falseif allowed:return return -np.inf", "docstring": "Assume priors on all parameters have uniform probability.", "id": "f14680:m3"} {"signature": "def _log_likelihood_transit(theta, params, model, t, flux, err_flux,priorbounds):", "body": "u = []for ix, key in enumerate(sorted(priorbounds.keys())):if key == '':params.rp = theta[ix]elif key == '':params.t0 = theta[ix]elif key == '':params.a = theta[ix]elif key == '':params.inc = theta[ix]elif key == '':params.per = theta[ix]elif key == '':params.per = theta[ix]elif key == '':params.w = theta[ix]elif key == '':u.append(theta[ix])elif key == '':u.append(theta[ix])params.u = ulc = model.light_curve(params)residuals = flux - lclog_likelihood = -*(np.sum((residuals/err_flux)** + np.log(*np.pi*(err_flux)**)))return log_likelihood", "docstring": "Given a batman TransitModel and its proposed parameters (theta), update the\nbatman params object with the proposed parameters and evaluate the gaussian\nlikelihood.\n\nNote: the priorbounds are only needed to parse theta.", "id": "f14680:m5"} {"signature": "def _log_likelihood_transit_plus_line(theta, params, model, t, data_flux,err_flux, priorbounds):", "body": "u = []for ix, key in enumerate(sorted(priorbounds.keys())):if key == '':params.rp = theta[ix]elif key == '':params.t0 = theta[ix]elif key == '':params.a = theta[ix]elif key == '':params.inc = theta[ix]elif key == '':params.per = theta[ix]elif key == '':params.per = theta[ix]elif key == '':params.w = theta[ix]elif key == '':u.append(theta[ix])elif key == '':u.append(theta[ix])params.u = uelif key == '':poly_order0 = theta[ix]elif key == '':poly_order1 = theta[ix]try:poly_order0except Exception as e:poly_order0 = else:passtransit = model.light_curve(params)line = poly_order0 + t*poly_order1model = transit + lineresiduals = data_flux - modellog_likelihood = -*(np.sum((residuals/err_flux)** + np.log(*np.pi*(err_flux)**)))return log_likelihood", "docstring": "Given a batman TransitModel and its proposed parameters (theta), update the\nbatman params object with the proposed parameters and evaluate the gaussian\nlikelihood.\n\nNote: the priorbounds are only needed to parse theta.", "id": "f14680:m6"} {"signature": "def log_posterior_transit(theta, params, model, t, flux, err_flux, priorbounds):", "body": "lp = _log_prior_transit(theta, priorbounds)if not np.isfinite(lp):return -np.infelse:return lp + _log_likelihood_transit(theta, params, model, t, flux,err_flux, priorbounds)", "docstring": "Evaluate posterior probability given proposed model parameters and\nthe observed flux timeseries.", "id": "f14680:m7"} {"signature": "def log_posterior_transit_plus_line(theta, params, model, t, flux, err_flux,priorbounds):", "body": "lp = _log_prior_transit_plus_line(theta, priorbounds)if not np.isfinite(lp):return -np.infelse:return (lp + _log_likelihood_transit_plus_line(theta, params, model, t, flux, err_flux, priorbounds))", "docstring": "Evaluate posterior probability given proposed model parameters and\nthe observed flux timeseries.", "id": "f14680:m8"} {"signature": "def mandelagol_fit_magseries(times, mags, errs,fitparams,priorbounds,fixedparams,trueparams=None,burninpercent=,plotcorner=False,samplesavpath=False,n_walkers=,n_mcmc_steps=,eps=,skipsampling=False,overwriteexistingsamples=False,mcmcprogressbar=False,plotfit=False,magsarefluxes=False,sigclip=,verbose=True,nworkers=):", "body": "from multiprocessing import Poolfittype = ''if not magsarefluxes:raise NotImplementedError('')if not samplesavpath:raise ValueError('')if not mandel_agol_dependencies:raise ImportError('')stimes, smags, serrs = sigclip_magseries(times, mags, errs,sigclip=sigclip,magsarefluxes=magsarefluxes)nzind = np.nonzero(serrs)stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]init_period = _get_value('', fitparams, fixedparams)init_epoch = _get_value('', fitparams, fixedparams)init_rp = _get_value('', fitparams, fixedparams)init_sma = _get_value('', fitparams, fixedparams)init_incl = _get_value('', fitparams, fixedparams)init_ecc = _get_value('', fitparams, fixedparams)init_omega = _get_value('', fitparams, fixedparams)limb_dark = _get_value('', fitparams, fixedparams)init_u = _get_value('', fitparams, fixedparams)if not limb_dark == '':raise ValueError('')init_params, init_m = _transit_model(stimes, init_epoch, init_period,init_rp, init_sma, init_incl, init_ecc,init_omega, init_u, limb_dark)init_flux = init_m.light_curve(init_params)theta, fitparamnames = [], []for k in np.sort(list(fitparams.keys())):if isinstance(fitparams[k], float) or isinstance(fitparams[k], int):theta.append(fitparams[k])fitparamnames.append(fitparams[k])elif isinstance(fitparams[k], list):if not len(fitparams[k]) == :raise ValueError('')theta.append(fitparams[k][])theta.append(fitparams[k][])fitparamnames.append(fitparams[k][])fitparamnames.append(fitparams[k][])n_dim = len(theta)initial_position_vec = [theta + eps*np.random.randn(n_dim)for i in range(n_walkers)]if not skipsampling:backend = emcee.backends.HDFBackend(samplesavpath)if overwriteexistingsamples:LOGWARNING(''.format(samplesavpath))backend.reset(n_walkers, n_dim)starting_positions = initial_position_vecisfirstrun = Trueif os.path.exists(backend.filename):if backend.iteration > :starting_positions = Noneisfirstrun = Falseif verbose and isfirstrun:LOGINFO(''.format(fittype, n_dim, n_mcmc_steps, n_walkers) +''.format(nworkers))elif verbose and not isfirstrun:LOGINFO(''.format(fittype, n_dim, n_mcmc_steps, n_walkers) +''.format(nworkers))import sysif sys.version_info >= (, ):with Pool(nworkers) as pool:sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_posterior_transit,args=(init_params, init_m, stimes,smags, serrs, priorbounds),pool=pool,backend=backend)sampler.run_mcmc(starting_positions, n_mcmc_steps,progress=mcmcprogressbar)elif sys.version_info < (, ):sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_posterior_transit,args=(init_params, init_m, stimes, smags, serrs, priorbounds),threads=nworkers,backend=backend)sampler.run_mcmc(starting_positions, n_mcmc_steps,progress=mcmcprogressbar)if verbose:LOGINFO(''.format(fittype, n_mcmc_steps, n_walkers) + ''.format(nworkers))reader = emcee.backends.HDFBackend(samplesavpath)n_to_discard = int(burninpercent*n_mcmc_steps)samples = reader.get_chain(discard=n_to_discard, flat=True)log_prob_samples = reader.get_log_prob(discard=n_to_discard, flat=True)log_prior_samples = reader.get_blobs(discard=n_to_discard, flat=True)fit_statistics = list(map(lambda v: (v[], v[]-v[], v[]-v[]),list(zip( *np.percentile(samples, [, , ], axis=)))))medianparams, std_perrs, std_merrs = {}, {}, {}for ix, k in enumerate(np.sort(list(priorbounds.keys()))):medianparams[k] = fit_statistics[ix][]std_perrs[k] = fit_statistics[ix][]std_merrs[k] = fit_statistics[ix][]stderrs = {'':std_perrs, '':std_merrs}per = _get_value('', medianparams, fixedparams)t0 = _get_value('', medianparams, fixedparams)rp = _get_value('', medianparams, fixedparams)sma = _get_value('', medianparams, fixedparams)incl = _get_value('', medianparams, fixedparams)ecc = _get_value('', medianparams, fixedparams)omega = _get_value('', medianparams, fixedparams)limb_dark = _get_value('', medianparams, fixedparams)try:u = fixedparams['']except Exception as e:u = [medianparams[''], medianparams['']]fit_params, fit_m = _transit_model(stimes, t0, per, rp, sma, incl, ecc,omega, u, limb_dark)fitmags = fit_m.light_curve(fit_params)fepoch = t0returndict = {'':fittype,'':{'':fitparams,'':init_flux,'':fixedparams,'':medianparams,'':stderrs,'':fitmags,'':fepoch,},'':None,'':{'':stimes,'':smags,'':serrs,'':magsarefluxes,},}if plotcorner:if isinstance(trueparams,dict):trueparamkeys = np.sort(list(trueparams.keys()))truelist = [trueparams[k] for k in trueparamkeys]fig = corner.corner(samples,labels=trueparamkeys,truths=truelist,quantiles=[, , ], show_titles=True)else:fig = corner.corner(samples,labels=fitparamnames,quantiles=[, , ],show_titles=True)plt.savefig(plotcorner, dpi=)if verbose:LOGINFO(''.format(plotcorner))if plotfit and isinstance(plotfit, str):f, ax = plt.subplots(figsize=(,))ax.scatter(stimes, smags, c='', alpha=, label='',zorder=, s=, rasterized=True, linewidths=)ax.scatter(stimes, init_flux, c='', alpha=,s=, zorder=, rasterized=True, linewidths=,label='')ax.scatter(stimes, fitmags, c='', alpha=,s=, zorder=, rasterized=True, linewidths=,label=''.format(len(fitparamnames)))ax.legend(loc='')ax.set(xlabel='', ylabel='')f.savefig(plotfit, dpi=, bbox_inches='')if verbose:LOGINFO(''.format(plotfit))returndict[''] = plotfitreturn returndict", "docstring": "This fits a Mandel & Agol (2002) planetary transit model to a flux time\n series. You can fit and fix whatever parameters you want.\n\n It relies on Kreidberg (2015)'s BATMAN implementation for the transit model,\n emcee to sample the posterior (Foreman-Mackey et al 2013), `corner` to plot\n it, and `h5py` to save the samples. See e.g., Claret's work for good guesses\n of star-appropriate limb-darkening parameters.\n\n NOTE: this only works for flux time-series at the moment.\n\n NOTE: Between the `fitparams`, `priorbounds`, and `fixedparams` dicts, you\n must specify all of the planetary transit parameters required by BATMAN:\n `['t0', 'rp', 'sma', 'incl', 'u', 'rp', 'ecc', 'omega', 'period']`, or the\n BATMAN model will fail to initialize.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input flux time-series to fit a Fourier cosine series to.\n\n fitparams : dict\n This is the initial parameter guesses for MCMC, found e.g., by\n BLS. The key string format must not be changed, but any parameter can be\n either \"fit\" or \"fixed\". If it is \"fit\", it must have a corresponding\n prior. For example::\n\n fitparams = {'t0':1325.9, 'rp':np.sqrt(fitd['transitdepth']),\n 'sma':6.17, 'incl':85, 'u':[0.3, 0.2]}\n\n where 'u' is a list of the limb darkening parameters, Linear first, then\n quadratic. Quadratic limb darkening is the only form implemented.\n\n priorbounds : dict\n This sets the lower & upper bounds on uniform prior, e.g.::\n\n priorbounds = {'rp':(0.135, 0.145), 'u_linear':(0.3-1, 0.3+1),\n 'u_quad':(0.2-1, 0.2+1), 't0':(np.min(time),\n np.max(time)), 'sma':(6,6.4), 'incl':(80,90)}\n\n fixedparams : dict\n This sets which parameters are fixed, and their values. For example::\n\n fixedparams = {'ecc':0.,\n 'omega':90.,\n 'limb_dark':'quadratic',\n 'period':fitd['period'] }\n\n `limb_dark` must be \"quadratic\". It's \"fixed\", because once you\n choose your limb-darkening model, it's fixed.\n\n trueparams : list of floats\n The true parameter values you're fitting for, if they're known (e.g., a\n known planet, or fake data). Only for plotting purposes.\n\n burninpercent : float\n The percent of MCMC samples to discard as burn-in.\n\n plotcorner : str or False\n If this is a str, points to the path of output corner plot that will be\n generated for this MCMC run.\n\n samplesavpath : str\n This must be provided so `emcee` can save its MCMC samples to disk as\n HDF5 files. This will set the path of the output HDF5file written.\n\n n_walkers : int\n The number of MCMC walkers to use.\n\n n_mcmc_steps : int\n The number of MCMC steps to take.\n\n eps : float\n The radius of the `n_walkers-dimensional` Gaussian ball used to\n initialize the MCMC.\n\n skipsampling : bool\n If you've already collected MCMC samples, and you do not want any more\n sampling (e.g., just make the plots), set this to be True.\n\n overwriteexistingsamples : bool\n If you've collected samples, but you want to overwrite them, set this to\n True. Usually, it should be False, which appends samples to\n `samplesavpath` HDF5 file.\n\n mcmcprogressbar : bool\n If True, will show a progress bar for the MCMC process.\n\n plotfit: str or bool\n If a str, indicates the path of the output fit plot file. If False, no\n fit plot will be made.\n\n magsarefluxes : bool\n This indicates if the input measurements in `mags` are actually fluxes.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n verbose : bool\n If True, will indicate MCMC progress.\n\n nworkers : int\n The number of parallel workers to launch for MCMC.\n\n Returns\n -------\n\n dict\n This function returns a dict containing the model fit parameters and\n other fit information. The form of this dict is mostly standardized\n across all functions in this module::\n\n {\n 'fittype':'mandelagol',\n 'fitinfo':{\n 'initialparams':the initial transit params provided,\n 'fixedparams':the fixed transit params provided,\n 'finalparams':the final model fit transit params,\n 'finalparamerrs':formal errors in the params,\n 'fitmags': the model fit mags,\n 'fitepoch': the epoch of minimum light for the fit,\n },\n 'fitplotfile': the output fit plot if fitplot is not None,\n 'magseries':{\n 'times':input times in phase order of the model,\n 'phase':the phases of the model mags,\n 'mags':input mags/fluxes in the phase order of the model,\n 'errs':errs in the phase order of the model,\n 'magsarefluxes':input value of magsarefluxes kwarg\n }\n }", "id": "f14680:m9"} {"signature": "def mandelagol_and_line_fit_magseries(times, mags, errs,fitparams,priorbounds,fixedparams,trueparams=None,burninpercent=,plotcorner=False,timeoffset=,samplesavpath=False,n_walkers=,n_mcmc_steps=,eps=,skipsampling=False,overwriteexistingsamples=False,mcmcprogressbar=False,plotfit=False,scatterxdata=None,scatteryaxes=None,magsarefluxes=True,sigclip=,verbose=True,nworkers=):", "body": "from multiprocessing import Poolfittype = ''if not magsarefluxes:raise NotImplementedError('')if not samplesavpath:raise ValueError('')if not mandel_agol_dependencies:raise ImportError('')stimes, smags, serrs = sigclip_magseries(times, mags, errs,sigclip=sigclip,magsarefluxes=magsarefluxes)nzind = np.nonzero(serrs)stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]init_period = _get_value('', fitparams, fixedparams)init_epoch = _get_value('', fitparams, fixedparams)init_rp = _get_value('', fitparams, fixedparams)init_sma = _get_value('', fitparams, fixedparams)init_incl = _get_value('', fitparams, fixedparams)init_ecc = _get_value('', fitparams, fixedparams)init_omega = _get_value('', fitparams, fixedparams)limb_dark = _get_value('', fitparams, fixedparams)init_u = _get_value('', fitparams, fixedparams)init_poly_order0 = _get_value('', fitparams, fixedparams)init_poly_order1 = _get_value('', fitparams, fixedparams)if not limb_dark == '':raise ValueError('')init_params, init_m = _transit_model(stimes, init_epoch, init_period, init_rp, init_sma, init_incl,init_ecc, init_omega, init_u, limb_dark)init_flux = (init_m.light_curve(init_params) +init_poly_order0 + init_poly_order1*stimes)theta, fitparamnames = [], []for k in np.sort(list(fitparams.keys())):if isinstance(fitparams[k], float) or isinstance(fitparams[k], int):theta.append(fitparams[k])fitparamnames.append(fitparams[k])elif isinstance(fitparams[k], list):if not len(fitparams[k]) == :raise ValueError('')theta.append(fitparams[k][])theta.append(fitparams[k][])fitparamnames.append(fitparams[k][])fitparamnames.append(fitparams[k][])n_dim = len(theta)if not skipsampling:backend = emcee.backends.HDFBackend(samplesavpath)if overwriteexistingsamples:LOGWARNING(''.format(samplesavpath))backend.reset(n_walkers, n_dim)def nll(*args):return -_log_likelihood_transit_plus_line(*args)soln = spminimize(nll, theta, method='',args=(init_params, init_m, stimes, smags, serrs, priorbounds))theta_ml = soln.xml_poly_order0 = theta_ml[]ml_poly_order1 = theta_ml[]ml_rp = theta_ml[]ml_t0 = theta_ml[]ml_params, ml_m = _transit_model(stimes, ml_t0, init_period,ml_rp, init_sma, init_incl,init_ecc, init_omega, init_u,limb_dark)ml_mags = (ml_m.light_curve(ml_params) +ml_poly_order0 + ml_poly_order1*stimes)initial_position_vec = [theta_ml + eps*np.random.randn(n_dim)for i in range(n_walkers)]starting_positions = initial_position_vecisfirstrun = Trueif os.path.exists(backend.filename):if backend.iteration > :starting_positions = Noneisfirstrun = Falseif verbose and isfirstrun:LOGINFO(''.format(fittype, n_dim, n_mcmc_steps, n_walkers) +''.format(nworkers))elif verbose and not isfirstrun:LOGINFO(''.format(fittype, n_dim, n_mcmc_steps, n_walkers) +''.format(nworkers))with Pool(nworkers) as pool:sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_posterior_transit_plus_line,args=(init_params, init_m, stimes, smags, serrs, priorbounds),pool=pool,backend=backend)sampler.run_mcmc(starting_positions, n_mcmc_steps,progress=mcmcprogressbar)if verbose:LOGINFO(''.format(fittype, n_mcmc_steps, n_walkers) + ''.format(nworkers))reader = emcee.backends.HDFBackend(samplesavpath)n_to_discard = int(burninpercent*n_mcmc_steps)samples = reader.get_chain(discard=n_to_discard, flat=True)log_prob_samples = reader.get_log_prob(discard=n_to_discard, flat=True)log_prior_samples = reader.get_blobs(discard=n_to_discard, flat=True)fit_statistics = list(map(lambda v: (v[], v[]-v[], v[]-v[]),list(zip( *np.percentile(samples, [, , ], axis=)))))medianparams, std_perrs, std_merrs = {}, {}, {}for ix, k in enumerate(np.sort(list(priorbounds.keys()))):medianparams[k] = fit_statistics[ix][]std_perrs[k] = fit_statistics[ix][]std_merrs[k] = fit_statistics[ix][]stderrs = {'':std_perrs, '':std_merrs}per = _get_value('', medianparams, fixedparams)t0 = _get_value('', medianparams, fixedparams)rp = _get_value('', medianparams, fixedparams)sma = _get_value('', medianparams, fixedparams)incl = _get_value('', medianparams, fixedparams)ecc = _get_value('', medianparams, fixedparams)omega = _get_value('', medianparams, fixedparams)limb_dark = _get_value('', medianparams, fixedparams)try:u = fixedparams['']except Exception as e:u = [medianparams[''], medianparams['']]poly_order0 = _get_value('', medianparams, fixedparams)poly_order1 = _get_value('', medianparams, fixedparams)fit_params, fit_m = _transit_model(stimes, t0, per, rp, sma, incl, ecc,omega, u, limb_dark)fitmags = (fit_m.light_curve(fit_params) +poly_order0 + poly_order1*stimes)fepoch = t0medianparams[''] += timeoffsetreturndict = {'':fittype,'':{'':fitparams,'':init_flux,'':fixedparams,'':medianparams,'':stderrs,'':fitmags,'':fepoch+timeoffset,},'':None,'':{'':stimes+timeoffset,'':smags,'':serrs,'':magsarefluxes,},}if plotcorner:fig = corner.corner(samples,labels=['', '','',''.format(timeoffset)],truths=[ml_poly_order0, ml_poly_order1, ml_rp, ml_t0],quantiles=[, , ], show_titles=True)plt.savefig(plotcorner, dpi=)if verbose:LOGINFO(''.format(plotcorner))if plotfit and isinstance(plotfit, str):plt.close('')f, (a0, a1) = plt.subplots(nrows=, ncols=, sharex=True,figsize=(,),gridspec_kw={'':[, ]})a0.scatter(stimes, smags, c='', alpha=, label='', zorder=,s=, rasterized=True, linewidths=)DEBUGGING = Falseif DEBUGGING:a0.scatter(stimes, init_flux, c='', alpha=, s=, zorder=,rasterized=True, linewidths=,label='')a0.scatter(stimes, ml_mags, c='', alpha=, s=, zorder=,rasterized=True, linewidths=, label='')a0.plot(stimes, fitmags, c='',zorder=, rasterized=True, lw=, alpha=,label=''.format(fittype, len(fitparamnames)))a1.scatter(stimes, smags-fitmags, c='', alpha=,rasterized=True, s=, linewidths=)if scatterxdata and scatteryaxes:import matplotlib.transforms as transformsfor a in [a0, a1]:transform = transforms.blended_transform_factory(a.transData, a.transAxes)a.scatter(scatterxdata, scatteryaxes, c='', alpha=,zorder=, s=, rasterized=True, linewidths=,marker=\"\", transform=transform)a1.set_xlabel('')a0.set_ylabel('')a1.set_ylabel('')a0.legend(loc='', fontsize='')for a in [a0, a1]:a.get_yaxis().set_tick_params(which='', direction='')a.get_xaxis().set_tick_params(which='', direction='')f.tight_layout(h_pad=, w_pad=)f.savefig(plotfit, dpi=, bbox_inches='')if verbose:LOGINFO(''.format(plotfit))returndict[''] = plotfitreturn returndict", "docstring": "The model fit by this function is: a Mandel & Agol (2002) transit, PLUS a\n line. You can fit and fix whatever parameters you want.\n\n A typical use case: you want to measure transit times of individual SNR >~\n 50 transits. You fix all the transit parameters except for the mid-time,\n and also fit for a line locally.\n\n NOTE: this only works for flux time-series at the moment.\n\n NOTE: Between the `fitparams`, `priorbounds`, and `fixedparams` dicts, you\n must specify all of the planetary transit parameters required by BATMAN and\n the parameters for the line fit: `['t0', 'rp', 'sma', 'incl', 'u', 'rp',\n 'ecc', 'omega', 'period', 'poly_order0', poly_order1']`, or the BATMAN model\n will fail to initialize.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input flux time-series to fit a Fourier cosine series to.\n\n fitparams : dict\n This is the initial parameter guesses for MCMC, found e.g., by\n BLS. The key string format must not be changed, but any parameter can be\n either \"fit\" or \"fixed\". If it is \"fit\", it must have a corresponding\n prior. For example::\n\n fitparams = {'t0':1325.9,\n 'poly_order0':1,\n 'poly_order1':0.}\n\n where `t0` is the time of transit-center for a reference transit.\n `poly_order0` corresponds to the intercept of the line, `poly_order1` is\n the slope.\n\n priorbounds : dict\n This sets the lower & upper bounds on uniform prior, e.g.::\n\n priorbounds = {'t0':(np.min(time), np.max(time)),\n 'poly_order0':(0.5,1.5),\n 'poly_order1':(-0.5,0.5) }\n\n fixedparams : dict\n This sets which parameters are fixed, and their values. For example::\n\n fixedparams = {'ecc':0.,\n 'omega':90.,\n 'limb_dark':'quadratic',\n 'period':fitd['period'],\n 'rp':np.sqrt(fitd['transitdepth']),\n 'sma':6.17, 'incl':85, 'u':[0.3, 0.2]}\n\n `limb_dark` must be \"quadratic\". It's \"fixed\", because once you\n choose your limb-darkening model, it's fixed.\n\n trueparams : list of floats\n The true parameter values you're fitting for, if they're known (e.g., a\n known planet, or fake data). Only for plotting purposes.\n\n burninpercent : float\n The percent of MCMC samples to discard as burn-in.\n\n plotcorner : str or False\n If this is a str, points to the path of output corner plot that will be\n generated for this MCMC run.\n\n timeoffset : float\n If input times are offset by some constant, and you want saved pickles\n to fix that.\n\n samplesavpath : str\n This must be provided so `emcee` can save its MCMC samples to disk as\n HDF5 files. This will set the path of the output HDF5file written.\n\n n_walkers : int\n The number of MCMC walkers to use.\n\n n_mcmc_steps : int\n The number of MCMC steps to take.\n\n eps : float\n The radius of the `n_walkers-dimensional` Gaussian ball used to\n initialize the MCMC.\n\n skipsampling : bool\n If you've already collected MCMC samples, and you do not want any more\n sampling (e.g., just make the plots), set this to be True.\n\n overwriteexistingsamples : bool\n If you've collected samples, but you want to overwrite them, set this to\n True. Usually, it should be False, which appends samples to\n `samplesavpath` HDF5 file.\n\n mcmcprogressbar : bool\n If True, will show a progress bar for the MCMC process.\n\n plotfit: str or bool\n If a str, indicates the path of the output fit plot file. If False, no\n fit plot will be made.\n\n scatterxdata : np.array or None\n Use this to overplot x,y scatter points on the output model/data\n lightcurve (e.g., to highlight bad data, or to indicate an ephemeris),\n this can take a `np.ndarray` with the same units as `times`.\n\n scatteryaxes : np.array or None\n Use this to provide the y-values for scatterxdata, in units of fraction\n of an axis.\n\n magsarefluxes : bool\n This indicates if the input measurements in `mags` are actually fluxes.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n verbose : bool\n If True, will indicate MCMC progress.\n\n nworkers : int\n The number of parallel workers to launch for MCMC.\n\n Returns\n -------\n\n dict\n This function returns a dict containing the model fit parameters and\n other fit information. The form of this dict is mostly standardized\n across all functions in this module::\n\n {\n 'fittype':'mandelagol_and_line',\n 'fitinfo':{\n 'initialparams':the initial transit params provided,\n 'fixedparams':the fixed transit params provided,\n 'finalparams':the final model fit transit params,\n 'finalparamerrs':formal errors in the params,\n 'fitmags': the model fit mags,\n 'fitepoch': the epoch of minimum light for the fit,\n },\n 'fitplotfile': the output fit plot if fitplot is not None,\n 'magseries':{\n 'times':input times in phase order of the model,\n 'phase':the phases of the model mags,\n 'mags':input mags/fluxes in the phase order of the model,\n 'errs':errs in the phase order of the model,\n 'magsarefluxes':input value of magsarefluxes kwarg\n }\n }", "id": "f14680:m10"} {"signature": "def spline_fit_magseries(times, mags, errs, period,knotfraction=,maxknots=,sigclip=,plotfit=False,ignoreinitfail=False,magsarefluxes=False,verbose=True):", "body": "if errs is None:errs = npfull_like(mags, )stimes, smags, serrs = sigclip_magseries(times, mags, errs,sigclip=sigclip,magsarefluxes=magsarefluxes)nzind = npnonzero(serrs)stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]phase, pmags, perrs, ptimes, mintime = (get_phased_quantities(stimes, smags, serrs, period))nobs = len(phase)nknots = int(npfloor(knotfraction*nobs))nknots = maxknots if nknots > maxknots else nknotssplineknots = nplinspace(phase[] + ,phase[-] - ,num=nknots)phase_diffs_ind = npdiff(phase) > incphase_ind = npconcatenate((nparray([True]), phase_diffs_ind))phase, pmags, perrs = (phase[incphase_ind],pmags[incphase_ind],perrs[incphase_ind])spl = LSQUnivariateSpline(phase, pmags, t=splineknots, w=/perrs)fitmags = spl(phase)fitchisq = npsum(((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs))fitredchisq = fitchisq/(len(pmags) - nknots - )if verbose:LOGINFO('''' %(nknots, fitchisq, fitredchisq))if not magsarefluxes:fitmagminind = npwhere(fitmags == npmax(fitmags))else:fitmagminind = npwhere(fitmags == npmin(fitmags))if len(fitmagminind[]) > :fitmagminind = (fitmagminind[][],)magseriesepoch = ptimes[fitmagminind]returndict = {'':'','':{'':nknots,'':fitmags,'':magseriesepoch},'':fitchisq,'':fitredchisq,'':None,'':{'':ptimes,'':phase,'':pmags,'':perrs,'':magsarefluxes},}if plotfit and isinstance(plotfit, str):make_fit_plot(phase, pmags, perrs, fitmags,period, mintime, magseriesepoch,plotfit,magsarefluxes=magsarefluxes)returndict[''] = plotfitreturn returndict", "docstring": "This fits a univariate cubic spline to the phased light curve.\n\n This fit may be better than the Fourier fit for sharply variable objects,\n like EBs, so can be used to distinguish them from other types of variables.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input mag/flux time-series to fit a spline to.\n\n period : float\n The period to use for the spline fit.\n\n knotfraction : float\n The knot fraction is the number of internal knots to use for the\n spline. A value of 0.01 (or 1%) of the total number of non-nan\n observations appears to work quite well, without over-fitting. maxknots\n controls the maximum number of knots that will be allowed.\n\n maxknots : int\n The maximum number of knots that will be used even if `knotfraction`\n gives a value to use larger than `maxknots`. This helps dealing with\n over-fitting to short time-scale variations.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n magsarefluxes : bool\n If True, will treat the input values of `mags` as fluxes for purposes of\n plotting the fit and sig-clipping.\n\n plotfit : str or False\n If this is a string, this function will make a plot for the fit to the\n mag/flux time-series and writes the plot to the path specified here.\n\n ignoreinitfail : bool\n If this is True, ignores the initial failure to find a set of optimized\n Fourier parameters using the global optimization function and proceeds\n to do a least-squares fit anyway.\n\n verbose : bool\n If True, will indicate progress and warn of any problems.\n\n Returns\n -------\n\n dict\n This function returns a dict containing the model fit parameters, the\n minimized chi-sq value and the reduced chi-sq value. The form of this\n dict is mostly standardized across all functions in this module::\n\n {\n 'fittype':'spline',\n 'fitinfo':{\n 'nknots': the number of knots used for the fit\n 'fitmags': the model fit mags,\n 'fitepoch': the epoch of minimum light for the fit,\n },\n 'fitchisq': the minimized value of the fit's chi-sq,\n 'fitredchisq':the reduced chi-sq value,\n 'fitplotfile': the output fit plot if fitplot is not None,\n 'magseries':{\n 'times':input times in phase order of the model,\n 'phase':the phases of the model mags,\n 'mags':input mags/fluxes in the phase order of the model,\n 'errs':errs in the phase order of the model,\n 'magsarefluxes':input value of magsarefluxes kwarg\n }\n }", "id": "f14681:m0"} {"signature": "def savgol_fit_magseries(times, mags, errs, period,windowlength=None,polydeg=,sigclip=,plotfit=False,magsarefluxes=False,verbose=True):", "body": "stimes, smags, serrs = sigclip_magseries(times, mags, errs,sigclip=sigclip,magsarefluxes=magsarefluxes)nzind = npnonzero(serrs)stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]phase, pmags, perrs, ptimes, mintime = (get_phased_quantities(stimes, smags, serrs, period))if not isinstance(windowlength, int):windowlength = max(polydeg + ,int(len(phase)/))if windowlength % == :windowlength += if verbose:LOGINFO('''''''' % (windowlength,polydeg,len(pmags),period,mintime))sgf = savgol_filter(pmags, windowlength, polydeg, mode='')fitmags = sgffitchisq = npsum(((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs))nparams = int(len(pmags)/windowlength) * polydegfitredchisq = fitchisq/(len(pmags) - nparams - )fitredchisq = -if verbose:LOGINFO('' %(fitchisq, fitredchisq))if not magsarefluxes:fitmagminind = npwhere(fitmags == npmax(fitmags))else:fitmagminind = npwhere(fitmags == npmin(fitmags))if len(fitmagminind[]) > :fitmagminind = (fitmagminind[][],)magseriesepoch = ptimes[fitmagminind]returndict = {'':'','':{'':windowlength,'':polydeg,'':fitmags,'':magseriesepoch},'':fitchisq,'':fitredchisq,'':None,'':{'':ptimes,'':phase,'':pmags,'':perrs,'':magsarefluxes}}if plotfit and isinstance(plotfit, str):make_fit_plot(phase, pmags, perrs, fitmags,period, mintime, magseriesepoch,plotfit,magsarefluxes=magsarefluxes)returndict[''] = plotfitreturn returndict", "docstring": "Fit a Savitzky-Golay filter to the magnitude/flux time series.\n\n SG fits successive sub-sets (windows) of adjacent data points with a\n low-order polynomial via least squares. At each point (magnitude), it\n returns the value of the polynomial at that magnitude's time. This is made\n significantly cheaper than *actually* performing least squares for each\n window through linear algebra tricks that are possible when specifying the\n window size and polynomial order beforehand. Numerical Recipes Ch 14.8\n gives an overview, Eq. 14.8.6 is what Scipy has implemented.\n\n The idea behind Savitzky-Golay is to preserve higher moments (>=2) of the\n input data series than would be done by a simple moving window average.\n\n Note that the filter assumes evenly spaced data, which magnitude time series\n are not. By *pretending* the data points are evenly spaced, we introduce an\n additional noise source in the function values. This is a relatively small\n noise source provided that the changes in the magnitude values across the\n full width of the N=windowlength point window is < sqrt(N/2) times the\n measurement noise on a single point.\n\n TODO:\n - Find correct dof for reduced chi squared in savgol_fit_magseries\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input mag/flux time-series to fit the Savitsky-Golay model to.\n\n period : float\n The period to use for the model fit.\n\n windowlength : None or int\n The length of the filter window (the number of coefficients). Must be\n either positive and odd, or None. (The window is the number of points to\n the left, and to the right, of whatever point is having a polynomial fit\n to it locally). Bigger windows at fixed polynomial order risk lowering\n the amplitude of sharp features. If None, this routine (arbitrarily)\n sets the `windowlength` for phased LCs to be either the number of finite\n data points divided by 300, or polydeg+3, whichever is bigger.\n\n polydeg : int\n This is the order of the polynomial used to fit the samples. Must be\n less than `windowlength`. \"Higher-order filters do better at preserving\n feature heights and widths, but do less smoothing on broader features.\"\n (Numerical Recipes).\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n magsarefluxes : bool\n If True, will treat the input values of `mags` as fluxes for purposes of\n plotting the fit and sig-clipping.\n\n plotfit : str or False\n If this is a string, this function will make a plot for the fit to the\n mag/flux time-series and writes the plot to the path specified here.\n\n ignoreinitfail : bool\n If this is True, ignores the initial failure to find a set of optimized\n Fourier parameters using the global optimization function and proceeds\n to do a least-squares fit anyway.\n\n verbose : bool\n If True, will indicate progress and warn of any problems.\n\n Returns\n -------\n\n dict\n This function returns a dict containing the model fit parameters, the\n minimized chi-sq value and the reduced chi-sq value. The form of this\n dict is mostly standardized across all functions in this module::\n\n {\n 'fittype':'savgol',\n 'fitinfo':{\n 'windowlength': the window length used for the fit,\n 'polydeg':the polynomial degree used for the fit,\n 'fitmags': the model fit mags,\n 'fitepoch': the epoch of minimum light for the fit,\n },\n 'fitchisq': the minimized value of the fit's chi-sq,\n 'fitredchisq':the reduced chi-sq value,\n 'fitplotfile': the output fit plot if fitplot is not None,\n 'magseries':{\n 'times':input times in phase order of the model,\n 'phase':the phases of the model mags,\n 'mags':input mags/fluxes in the phase order of the model,\n 'errs':errs in the phase order of the model,\n 'magsarefluxes':input value of magsarefluxes kwarg\n }\n }", "id": "f14681:m1"} {"signature": "def legendre_fit_magseries(times, mags, errs, period,legendredeg=,sigclip=,plotfit=False,magsarefluxes=False,verbose=True):", "body": "stimes, smags, serrs = sigclip_magseries(times, mags, errs,sigclip=sigclip,magsarefluxes=magsarefluxes)nzind = npnonzero(serrs)stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]phase, pmags, perrs, ptimes, mintime = (get_phased_quantities(stimes, smags, serrs, period))if verbose:LOGINFO('''''''' % (legendredeg,len(pmags),period,mintime))p = Legendre.fit(phase, pmags, legendredeg)coeffs = p.coeffitmags = p(phase)fitchisq = npsum(((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs))nparams = legendredeg + fitredchisq = fitchisq/(len(pmags) - nparams - )if verbose:LOGINFO('' %(fitchisq, fitredchisq))if not magsarefluxes:fitmagminind = npwhere(fitmags == npmax(fitmags))else:fitmagminind = npwhere(fitmags == npmin(fitmags))if len(fitmagminind[]) > :fitmagminind = (fitmagminind[][],)magseriesepoch = ptimes[fitmagminind]returndict = {'':'','':{'':legendredeg,'':fitmags,'':magseriesepoch,'':coeffs,},'':fitchisq,'':fitredchisq,'':None,'':{'':ptimes,'':phase,'':pmags,'':perrs,'':magsarefluxes}}if plotfit and isinstance(plotfit, str):make_fit_plot(phase, pmags, perrs, fitmags,period, mintime, magseriesepoch,plotfit,magsarefluxes=magsarefluxes)returndict[''] = plotfitreturn returndict", "docstring": "Fit an arbitrary-order Legendre series, via least squares, to the\n magnitude/flux time series.\n\n This is a series of the form::\n\n p(x) = c_0*L_0(x) + c_1*L_1(x) + c_2*L_2(x) + ... + c_n*L_n(x)\n\n where L_i's are Legendre polynomials (also called \"Legendre functions of the\n first kind\") and c_i's are the coefficients being fit.\n\n This function is mainly just a wrapper to\n `numpy.polynomial.legendre.Legendre.fit`.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input mag/flux time-series to fit a Legendre series polynomial to.\n\n period : float\n The period to use for the Legendre fit.\n\n legendredeg : int\n This is `n` in the equation above, e.g. if you give `n=5`, you will\n get 6 coefficients. This number should be much less than the number of\n data points you are fitting.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n magsarefluxes : bool\n If True, will treat the input values of `mags` as fluxes for purposes of\n plotting the fit and sig-clipping.\n\n plotfit : str or False\n If this is a string, this function will make a plot for the fit to the\n mag/flux time-series and writes the plot to the path specified here.\n\n ignoreinitfail : bool\n If this is True, ignores the initial failure to find a set of optimized\n Fourier parameters using the global optimization function and proceeds\n to do a least-squares fit anyway.\n\n verbose : bool\n If True, will indicate progress and warn of any problems.\n\n Returns\n -------\n\n dict\n This function returns a dict containing the model fit parameters, the\n minimized chi-sq value and the reduced chi-sq value. The form of this\n dict is mostly standardized across all functions in this module::\n\n {\n 'fittype':'legendre',\n 'fitinfo':{\n 'legendredeg': the Legendre polynomial degree used,\n 'fitmags': the model fit mags,\n 'fitepoch': the epoch of minimum light for the fit,\n },\n 'fitchisq': the minimized value of the fit's chi-sq,\n 'fitredchisq':the reduced chi-sq value,\n 'fitplotfile': the output fit plot if fitplot is not None,\n 'magseries':{\n 'times':input times in phase order of the model,\n 'phase':the phases of the model mags,\n 'mags':input mags/fluxes in the phase order of the model,\n 'errs':errs in the phase order of the model,\n 'magsarefluxes':input value of magsarefluxes kwarg\n }\n }", "id": "f14681:m2"} {"signature": "def _fourier_func(fourierparams, phase, mags):", "body": "order = int(len(fourierparams)/)f_amp = fourierparams[:order]f_pha = fourierparams[order:]f_orders = [f_amp[x]*npcos(*pi_value*x*phase + f_pha[x])for x in range(order)]total_f = npmedian(mags)for fo in f_orders:total_f += foreturn total_f", "docstring": "This returns a summed Fourier cosine series.\n\n Parameters\n ----------\n\n fourierparams : list\n This MUST be a list of the following form like so::\n\n [period,\n epoch,\n [amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X],\n [phase_1, phase_2, phase_3, ..., phase_X]]\n\n where X is the Fourier order.\n\n phase,mags : np.array\n The input phase and magnitude areas to use as the basis for the cosine\n series. The phases are used directly to generate the values of the\n function, while the mags array is used to generate the zeroth order\n amplitude coefficient.\n\n Returns\n -------\n\n np.array\n The Fourier cosine series function evaluated over `phase`.", "id": "f14682:m0"} {"signature": "def _fourier_chisq(fourierparams,phase,mags,errs):", "body": "f = _fourier_func(fourierparams, phase, mags)chisq = npsum(((mags - f)*(mags - f))/(errs*errs))return chisq", "docstring": "This is the chisq objective function to be minimized by `scipy.minimize`.\n\n The parameters are the same as `_fourier_func` above. `errs` is used to\n calculate the chisq value.", "id": "f14682:m1"} {"signature": "def _fourier_residual(fourierparams,phase,mags):", "body": "f = _fourier_func(fourierparams, phase, mags)residual = mags - freturn residual", "docstring": "This is the residual objective function to be minimized by `scipy.leastsq`.\n\nThe parameters are the same as `_fourier_func` above.", "id": "f14682:m2"} {"signature": "def fourier_fit_magseries(times, mags, errs, period,fourierorder=None,fourierparams=None,sigclip=,magsarefluxes=False,plotfit=False,ignoreinitfail=True,verbose=True):", "body": "stimes, smags, serrs = sigclip_magseries(times, mags, errs,sigclip=sigclip,magsarefluxes=magsarefluxes)nzind = npnonzero(serrs)stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]phase, pmags, perrs, ptimes, mintime = (get_phased_quantities(stimes, smags, serrs, period))if fourierorder and fourierorder > and not fourierparams:fourieramps = [] + []*(fourierorder - )fourierphas = [] + []*(fourierorder - )fourierparams = fourieramps + fourierphaselif not fourierorder and fourierparams:fourierorder = int(len(fourierparams)/)else:LOGWARNING('''')fourierorder = fourieramps = [] + []*(fourierorder - )fourierphas = [] + []*(fourierorder - )fourierparams = fourieramps + fourierphasif verbose:LOGINFO('''''' % (fourierorder,len(phase),period,mintime))initialfit = spminimize(_fourier_chisq,fourierparams,method='',args=(phase, pmags, perrs))if initialfit.success or ignoreinitfail:if verbose:LOGINFO('')leastsqparams = initialfit.xtry:leastsqfit = spleastsq(_fourier_residual,leastsqparams,args=(phase, pmags))except Exception as e:leastsqfit = Noneif leastsqfit and leastsqfit[-] in (,,,):finalparams = leastsqfit[]fitmags = _fourier_func(finalparams, phase, pmags)fitchisq = npsum(((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs))fitredchisq = fitchisq/(len(pmags) - len(finalparams) - )if verbose:LOGINFO('' %(fitchisq,fitredchisq))if not magsarefluxes:fitmagminind = npwhere(fitmags == npmax(fitmags))else:fitmagminind = npwhere(fitmags == npmin(fitmags))if len(fitmagminind[]) > :fitmagminind = (fitmagminind[][],)returndict = {'':'','':{'':fourierorder,'':finalparams,'':initialfit,'':leastsqfit,'':fitmags,'':mintime,'':ptimes[fitmagminind]},'':fitchisq,'':fitredchisq,'':None,'':{'':ptimes,'':phase,'':pmags,'':perrs,'':magsarefluxes},}if plotfit and isinstance(plotfit, str):make_fit_plot(phase, pmags, perrs, fitmags,period, mintime, mintime,plotfit,magsarefluxes=magsarefluxes)returndict[''] = plotfitreturn returndictelse:LOGERROR('')return {'':'','':{'':fourierorder,'':None,'':initialfit,'':None,'':None,'':None},'':npnan,'':npnan,'':None,'':{'':ptimes,'':phase,'':pmags,'':perrs,'':magsarefluxes}}else:LOGERROR(''''% initialfit.message)return {'':'','':{'':fourierorder,'':None,'':initialfit,'':None,'':None,'':None},'':npnan,'':npnan,'':None,'':{'':ptimes,'':phase,'':pmags,'':perrs,'':magsarefluxes}}", "docstring": "This fits a Fourier series to a mag/flux time series.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input mag/flux time-series to fit a Fourier cosine series to.\n\n period : float\n The period to use for the Fourier fit.\n\n fourierorder : None or int\n If this is an int, will be interpreted as the Fourier order of the\n series to fit to the input mag/flux times-series. If this is None and\n `fourierparams` is specified, `fourierparams` will be used directly to\n generate the fit Fourier series. If `fourierparams` is also None, this\n function will try to fit a Fourier cosine series of order 3 to the\n mag/flux time-series.\n\n fourierparams : list of floats or None\n If this is specified as a list of floats, it must be of the form below::\n\n [fourier_amp1, fourier_amp2, fourier_amp3,...,fourier_ampN,\n fourier_phase1, fourier_phase2, fourier_phase3,...,fourier_phaseN]\n\n to specify a Fourier cosine series of order N. If this is None and\n `fourierorder` is specified, the Fourier order specified there will be\n used to construct the Fourier cosine series used to fit the input\n mag/flux time-series. If both are None, this function will try to fit a\n Fourier cosine series of order 3 to the input mag/flux time-series.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n magsarefluxes : bool\n If True, will treat the input values of `mags` as fluxes for purposes of\n plotting the fit and sig-clipping.\n\n plotfit : str or False\n If this is a string, this function will make a plot for the fit to the\n mag/flux time-series and writes the plot to the path specified here.\n\n ignoreinitfail : bool\n If this is True, ignores the initial failure to find a set of optimized\n Fourier parameters using the global optimization function and proceeds\n to do a least-squares fit anyway.\n\n verbose : bool\n If True, will indicate progress and warn of any problems.\n\n Returns\n -------\n\n dict\n This function returns a dict containing the model fit parameters, the\n minimized chi-sq value and the reduced chi-sq value. The form of this\n dict is mostly standardized across all functions in this module::\n\n {\n 'fittype':'fourier',\n 'fitinfo':{\n 'finalparams': the list of final model fit params,\n 'leastsqfit':the full tuple returned by scipy.leastsq,\n 'fitmags': the model fit mags,\n 'fitepoch': the epoch of minimum light for the fit,\n ... other fit function specific keys ...\n },\n 'fitchisq': the minimized value of the fit's chi-sq,\n 'fitredchisq':the reduced chi-sq value,\n 'fitplotfile': the output fit plot if fitplot is not None,\n 'magseries':{\n 'times':input times in phase order of the model,\n 'phase':the phases of the model mags,\n 'mags':input mags/fluxes in the phase order of the model,\n 'errs':errs in the phase order of the model,\n 'magsarefluxes':input value of magsarefluxes kwarg\n }\n }\n\n NOTE: the returned value of 'fitepoch' in the 'fitinfo' dict returned by\n this function is the time value of the first observation since this is\n where the LC is folded for the fit procedure. To get the actual time of\n minimum epoch as calculated by a spline fit to the phased LC, use the\n key 'actual_fitepoch' in the 'fitinfo' dict.", "id": "f14682:m3"} {"signature": "def gaussianeb_fit_magseries(times, mags, errs,ebparams,sigclip=,plotfit=False,magsarefluxes=False,verbose=True):", "body": "stimes, smags, serrs = sigclip_magseries(times, mags, errs,sigclip=sigclip,magsarefluxes=magsarefluxes)nzind = npnonzero(serrs)stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]ebperiod, ebepoch, ebdepth = ebparams[:]if ebepoch is None:if verbose:LOGWARNING('''')try:spfit = spline_fit_magseries(times, mags, errs, ebperiod,sigclip=sigclip,magsarefluxes=magsarefluxes,verbose=verbose)ebepoch = spfit['']['']except Exception as e:sgfit = savgol_fit_magseries(times, mags, errs, ebperiod,sigclip=sigclip,magsarefluxes=magsarefluxes,verbose=verbose)ebepoch = sgfit['']['']finally:if ebepoch is None:LOGERROR(\"\"\"\")returndict = {'':'','':{'':ebparams,'':None,'':None,'':None,'':None,},'':npnan,'':npnan,'':None,'':{'':None,'':None,'':None,'':None,'':magsarefluxes,},}return returndictelse:if ebepoch.size > :if verbose:LOGWARNING('''')ebparams[] = ebepoch[]else:if verbose:LOGWARNING(''% ebepoch)ebparams[] = ebepoch.item()if magsarefluxes:if ebdepth < :ebparams[] = -ebdepth[]else:if ebdepth > :ebparams[] = -ebdepth[]try:leastsqfit = spleastsq(eclipses.invgauss_eclipses_residual,ebparams,args=(stimes, smags, serrs),full_output=True)except Exception as e:leastsqfit = Noneif leastsqfit and leastsqfit[-] in (,,,):finalparams = leastsqfit[]covxmatrix = leastsqfit[]fitmags, phase, ptimes, pmags, perrs = eclipses.invgauss_eclipses_func(finalparams,stimes, smags, serrs)fitchisq = npsum(((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs))fitredchisq = fitchisq/(len(pmags) - len(finalparams) - )residuals = leastsqfit[]['']residualvariance = (npsum(residuals*residuals)/(pmags.size - finalparams.size))if covxmatrix is not None:covmatrix = residualvariance*covxmatrixstderrs = npsqrt(npdiag(covmatrix))else:LOGERROR('')stderrs = Noneif verbose:LOGINFO('' %(fitchisq, fitredchisq))fperiod, fepoch = finalparams[:]returndict = {'':'','':{'':ebparams,'':finalparams,'':stderrs,'':leastsqfit,'':fitmags,'':fepoch,},'':fitchisq,'':fitredchisq,'':None,'':{'':phase,'':ptimes,'':pmags,'':perrs,'':magsarefluxes,},}if plotfit and isinstance(plotfit, str):make_fit_plot(phase, pmags, perrs, fitmags,fperiod, ptimes.min(), fepoch,plotfit,magsarefluxes=magsarefluxes)returndict[''] = plotfitreturn returndictelse:LOGERROR('')returndict = {'':'','':{'':ebparams,'':None,'':None,'':leastsqfit,'':None,'':None,},'':npnan,'':npnan,'':None,'':{'':None,'':None,'':None,'':None,'':magsarefluxes,},}return returndict", "docstring": "This fits a double inverted gaussian EB model to a magnitude time series.\n\n Parameters\n ----------\n\n times,mags,errs : np.array\n The input mag/flux time-series to fit the EB model to.\n\n period : float\n The period to use for EB fit.\n\n ebparams : list of float\n This is a list containing the eclipsing binary parameters::\n\n ebparams = [period (time),\n epoch (time),\n pdepth (mags),\n pduration (phase),\n psdepthratio,\n secondaryphase]\n\n `period` is the period in days.\n\n `epoch` is the time of primary minimum in JD.\n\n `pdepth` is the depth of the primary eclipse:\n\n - for magnitudes -> `pdepth` should be < 0\n - for fluxes -> `pdepth` should be > 0\n\n `pduration` is the length of the primary eclipse in phase.\n\n `psdepthratio` is the ratio of the secondary eclipse depth to that of\n the primary eclipse.\n\n `secondaryphase` is the phase at which the minimum of the secondary\n eclipse is located. This effectively parameterizes eccentricity.\n\n If `epoch` is None, this function will do an initial spline fit to find\n an approximate minimum of the phased light curve using the given period.\n\n The `pdepth` provided is checked against the value of\n `magsarefluxes`. if `magsarefluxes = True`, the `ebdepth` is forced to\n be > 0; if `magsarefluxes = False`, the `ebdepth` is forced to be < 0.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n magsarefluxes : bool\n If True, will treat the input values of `mags` as fluxes for purposes of\n plotting the fit and sig-clipping.\n\n plotfit : str or False\n If this is a string, this function will make a plot for the fit to the\n mag/flux time-series and writes the plot to the path specified here.\n\n ignoreinitfail : bool\n If this is True, ignores the initial failure to find a set of optimized\n Fourier parameters using the global optimization function and proceeds\n to do a least-squares fit anyway.\n\n verbose : bool\n If True, will indicate progress and warn of any problems.\n\n Returns\n -------\n\n dict\n This function returns a dict containing the model fit parameters, the\n minimized chi-sq value and the reduced chi-sq value. The form of this\n dict is mostly standardized across all functions in this module::\n\n {\n 'fittype':'gaussianeb',\n 'fitinfo':{\n 'initialparams':the initial EB params provided,\n 'finalparams':the final model fit EB params,\n 'finalparamerrs':formal errors in the params,\n 'leastsqfit':the full tuple returned by scipy.leastsq,\n 'fitmags': the model fit mags,\n 'fitepoch': the epoch of minimum light for the fit,\n },\n 'fitchisq': the minimized value of the fit's chi-sq,\n 'fitredchisq':the reduced chi-sq value,\n 'fitplotfile': the output fit plot if fitplot is not None,\n 'magseries':{\n 'times':input times in phase order of the model,\n 'phase':the phases of the model mags,\n 'mags':input mags/fluxes in the phase order of the model,\n 'errs':errs in the phase order of the model,\n 'magsarefluxes':input value of magsarefluxes kwarg\n }\n }", "id": "f14683:m0"} {"signature": "def get_phased_quantities(stimes, smags, serrs, period):", "body": "mintime = np.min(stimes)iphase = (stimes - mintime)/period - np.floor((stimes - mintime)/period)phasesortind = np.argsort(iphase)phase = iphase[phasesortind]pmags = smags[phasesortind]perrs = serrs[phasesortind]ptimes = stimes[phasesortind]return phase, pmags, perrs, ptimes, mintime", "docstring": "Does phase-folding for the mag/flux time-series given a period.\n\n Given finite and sigma-clipped times, magnitudes, and errors, along with the\n period at which to phase-fold the data, perform the phase-folding and\n return the phase-folded values.\n\n Parameters\n ----------\n\n stimes,smags,serrs : np.array\n The sigma-clipped and finite input mag/flux time-series arrays to\n operate on.\n\n period : float\n The period to phase the mag/flux time-series at. stimes.min() is used as\n the epoch value to fold the times-series around.\n\n Returns\n -------\n\n (phase, pmags, perrs, ptimes, mintime) : tuple\n The tuple returned contains the following items:\n\n - `phase`: phase-sorted values of phase at each of stimes\n - `pmags`: phase-sorted magnitudes at each phase\n - `perrs`: phase-sorted errors\n - `ptimes`: phase-sorted times\n - `mintime`: earliest time in stimes.", "id": "f14685:m0"} {"signature": "def make_fit_plot(phase, pmags, perrs, fitmags,period, mintime, magseriesepoch,plotfit,magsarefluxes=False,wrap=False,model_over_lc=False):", "body": "plt.close('')plt.figure(figsize=(,))if model_over_lc:model_z = lc_z = else:model_z = lc_z = if not wrap:plt.plot(phase, fitmags, linewidth=, color='',zorder=model_z)plt.plot(phase,pmags,marker='',markersize=,linestyle='',rasterized=True, color='',zorder=lc_z)plt.gca().set_xticks([,,,,,,,,,,])else:plt.plot(np.concatenate([phase-,phase]),np.concatenate([fitmags,fitmags]),linewidth=,color='',zorder=model_z)plt.plot(np.concatenate([phase-,phase]),np.concatenate([pmags,pmags]),marker='',markersize=,linestyle='',rasterized=True, color='',zorder=lc_z)plt.gca().set_xlim((-,))plt.gca().set_xticks([-,-,-,-,-,-,-,-,,,,,,,,,])ymin, ymax = plt.ylim()if not magsarefluxes:plt.gca().invert_yaxis()plt.ylabel('')else:plt.ylabel('')plt.xlabel('')plt.title('' %(period, mintime, magseriesepoch))plt.savefig(plotfit)plt.close()", "docstring": "This makes a plot of the LC model fit.\n\n Parameters\n ----------\n\n phase,pmags,perrs : np.array\n The actual mag/flux time-series.\n\n fitmags : np.array\n The model fit time-series.\n\n period : float\n The period at which the phased LC was generated.\n\n mintime : float\n The minimum time value.\n\n magseriesepoch : float\n The value of time around which the phased LC was folded.\n\n plotfit : str\n The name of a file to write the plot to.\n\n magsarefluxes : bool\n Set this to True if the values in `pmags` and `fitmags` are actually\n fluxes.\n\n wrap : bool\n If True, will wrap the phased LC around 0.0 to make some phased LCs\n easier to look at.\n\n model_over_lc : bool\n Usually, this function will plot the actual LC over the model LC. Set\n this to True to plot the model over the actual LC; this is most useful\n when you have a very dense light curve and want to be able to see how it\n follows the model.\n\n Returns\n -------\n\n Nothing.", "id": "f14685:m1"} {"signature": "def read_hatpi_textlc(lcfile):", "body": "if '' in lcfile:thiscoldefs = COLDEFS + [('',float)]elif '' in lcfile:thiscoldefs = COLDEFS + [('',float)]elif '' in lcfile:thiscoldefs = COLDEFS + [('',float)]LOGINFO('' % lcfile)if lcfile.endswith(''):infd = gzip.open(lcfile,'')else:infd = open(lcfile,'')with infd:lclines = infd.read().decode().split('')lclines = [x.split() for x in lclines if ('' not in x and len(x) > )]ndet = len(lclines)if ndet > :lccols = list(zip(*lclines))lcdict = {x[]:y for (x,y) in zip(thiscoldefs, lccols)}for col in thiscoldefs:lcdict[col[]] = np.array([col[](x) for x in lcdict[col[]]])else:lcdict = {}LOGWARNING('' % lcfile)for col in thiscoldefs:lcdict[col[]] = np.array([])hatid = HATIDREGEX.findall(lcfile)lcdict[''] = hatid[] if hatid else ''lcdict[''] = [x[] for x in thiscoldefs]lcdict[''] = {'':ndet,'':hatid[] if hatid else '','':'',}framekeyelems = FRAMEREGEX.findall(''.join(lcdict['']))lcdict[''] = np.array([(int(x[]) if x[].isdigit() else np.nan)for x in framekeyelems])lcdict[''] = np.array([(int(x[]) if x[].isdigit() else np.nan)for x in framekeyelems])lcdict[''] = np.array([x[] for x in framekeyelems])lcdict[''] = np.array([(int(x[]) if x[].isdigit() else np.nan)for x in framekeyelems])lcdict[''].extend(['','','',''])lcdict[''][''] = ''lcdict[''][''] = ['' % x for x in np.unique(lcdict['']).tolist()]return lcdict", "docstring": "This reads in a textlc that is complete up to the TFA stage.", "id": "f14686:m0"} {"signature": "def lcdict_to_pickle(lcdict, outfile=None):", "body": "if not outfile and lcdict['']:outfile = '' % lcdict['']elif not outfile and not lcdict['']:outfile = ''with open(outfile,'') as outfd:pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)if os.path.exists(outfile):LOGINFO('' % (lcdict[''],outfile))return outfileelse:LOGERROR('')return None", "docstring": "This just writes the lcdict to a pickle.\n\n If outfile is None, then will try to get the name from the\n lcdict['objectid'] and write to -hptxtlc.pkl. If that fails, will\n write to a file named hptxtlc.pkl'.", "id": "f14686:m1"} {"signature": "def read_hatpi_pklc(lcfile):", "body": "try:if lcfile.endswith(''):infd = gzip.open(lcfile,'')else:infd = open(lcfile,'')lcdict = pickle.load(infd)infd.close()return lcdictexcept UnicodeDecodeError:if lcfile.endswith(''):infd = gzip.open(lcfile,'')else:infd = open(lcfile,'')LOGWARNING('''''''' % lcfile)lcdict = pickle.load(infd, encoding='')infd.close()return lcdict", "docstring": "This just reads a pickle LC. Returns an lcdict.", "id": "f14686:m2"} {"signature": "def concatenate_textlcs(lclist,sortby='',normalize=True):", "body": "lcdict = read_hatpi_textlc(lclist[])lccounter = lcdict[''] = {lccounter: os.path.abspath(lclist[])}lcdict[''] = np.full_like(lcdict[''], lccounter)if normalize:for col in MAGCOLS:if col in lcdict:thismedval = np.nanmedian(lcdict[col])if col in ('','',''):lcdict[col] = lcdict[col] / thismedvalelse:lcdict[col] = lcdict[col] - thismedvalfor lcf in lclist[:]:thislcd = read_hatpi_textlc(lcf)if thislcd[''] != lcdict['']:LOGERROR(''''% (lcf, lclist[]))continueelse:LOGINFO(''% (lcf,thislcd[''][''],lclist[],lcdict[lcdict[''][]].size))lccounter = lccounter + lcdict[''][lccounter] = os.path.abspath(lcf)lcdict[''] = np.concatenate((lcdict[''],np.full_like(thislcd[''],lccounter)))for col in lcdict['']:if normalize and col in MAGCOLS:thismedval = np.nanmedian(thislcd[col])if col in ('','',''):thislcd[col] = thislcd[col] / thismedvalelse:thislcd[col] = thislcd[col] - thismedvallcdict[col] = np.concatenate((lcdict[col], thislcd[col]))lcdict[''][''] = lcdict[lcdict[''][]].sizelcdict[''][''] = ['' % x for x in np.unique(lcdict['']).tolist()]lcdict[''] = lccounter + if sortby and sortby in [x[] for x in COLDEFS]:LOGINFO('' % sortby)sortind = np.argsort(lcdict[sortby])for col in lcdict['']:lcdict[col] = lcdict[col][sortind]lcdict[''] = lcdict[''][sortind]LOGINFO('' %lcdict[''][''])return lcdict", "docstring": "This concatenates a list of light curves.\n\n Does not care about overlaps or duplicates. The light curves must all be\n from the same aperture.\n\n The intended use is to concatenate light curves across CCDs or instrument\n changes for a single object. These can then be normalized later using\n standard astrobase tools to search for variablity and/or periodicity.\n\n sortby is a column to sort the final concatenated light curve by in\n ascending order.\n\n If normalize is True, then each light curve's magnitude columns are\n normalized to zero.\n\n The returned lcdict has an extra column: 'lcn' that tracks which measurement\n belongs to which input light curve. This can be used with\n lcdict['concatenated'] which relates input light curve index to input light\n curve filepath. Finally, there is an 'nconcatenated' key in the lcdict that\n contains the total number of concatenated light curves.", "id": "f14686:m3"} {"signature": "def concatenate_textlcs_for_objectid(lcbasedir,objectid,aperture='',postfix='',sortby='',normalize=True,recursive=True):", "body": "LOGINFO(''% (objectid, aperture, lcbasedir))if recursive is False:matching = glob.glob(os.path.join(lcbasedir,'' % (objectid,aperture,postfix)))else:if sys.version_info[:] > (,):matching = glob.glob(os.path.join(lcbasedir,'','' % (objectid,aperture,postfix)),recursive=True)LOGINFO('' % (len(matching), repr(matching)))else:walker = os.walk(lcbasedir)matching = []for root, dirs, _files in walker:for sdir in dirs:searchpath = os.path.join(root,sdir,'' % (objectid,aperture,postfix))foundfiles = glob.glob(searchpath)if foundfiles:matching.extend(foundfiles)LOGINFO('' % (repr(foundfiles),os.path.join(root,sdir)))if matching and len(matching) > :clcdict = concatenate_textlcs(matching,sortby=sortby,normalize=normalize)return clcdictelse:LOGERROR('' %(objectid, aperture))return None", "docstring": "This concatenates all text LCs for an objectid with the given aperture.\n\n Does not care about overlaps or duplicates. The light curves must all be\n from the same aperture.\n\n The intended use is to concatenate light curves across CCDs or instrument\n changes for a single object. These can then be normalized later using\n standard astrobase tools to search for variablity and/or periodicity.\n\n\n lcbasedir is the directory to start searching in.\n\n objectid is the object to search for.\n\n aperture is the aperture postfix to use: (TF1 = aperture 1,\n TF2 = aperture 2,\n TF3 = aperture 3)\n\n sortby is a column to sort the final concatenated light curve by in\n ascending order.\n\n If normalize is True, then each light curve's magnitude columns are\n normalized to zero, and the whole light curve is then normalized to the\n global median magnitude for each magnitude column.\n\n If recursive is True, then the function will search recursively in lcbasedir\n for any light curves matching the specified criteria. This may take a while,\n especially on network filesystems.\n\n The returned lcdict has an extra column: 'lcn' that tracks which measurement\n belongs to which input light curve. This can be used with\n lcdict['concatenated'] which relates input light curve index to input light\n curve filepath. Finally, there is an 'nconcatenated' key in the lcdict that\n contains the total number of concatenated light curves.", "id": "f14686:m4"} {"signature": "def concat_write_pklc(lcbasedir,objectid,aperture='',postfix='',sortby='',normalize=True,outdir=None,recursive=True):", "body": "concatlcd = concatenate_textlcs_for_objectid(lcbasedir,objectid,aperture=aperture,sortby=sortby,normalize=normalize,recursive=recursive)if not outdir:outdir = ''if not os.path.exists(outdir):os.mkdir(outdir)outfpath = os.path.join(outdir, '' % (concatlcd[''],aperture))pklc = lcdict_to_pickle(concatlcd, outfile=outfpath)return pklc", "docstring": "This concatenates all text LCs for the given object and writes to a pklc.\n\n Basically a rollup for the concatenate_textlcs_for_objectid and\n lcdict_to_pickle functions.", "id": "f14686:m5"} {"signature": "def parallel_concat_worker(task):", "body": "lcbasedir, objectid, kwargs = tasktry:return concat_write_pklc(lcbasedir, objectid, **kwargs)except Exception as e:LOGEXCEPTION(''% (objectid, lcbasedir))return None", "docstring": "This is a worker for the function below.\n\ntask[0] = lcbasedir\ntask[1] = objectid\ntask[2] = {'aperture','postfix','sortby','normalize','outdir','recursive'}", "id": "f14686:m6"} {"signature": "def parallel_concat_lcdir(lcbasedir,objectidlist,aperture='',postfix='',sortby='',normalize=True,outdir=None,recursive=True,nworkers=,maxworkertasks=):", "body": "if not outdir:outdir = ''if not os.path.exists(outdir):os.mkdir(outdir)tasks = [(lcbasedir, x, {'':aperture,'':postfix,'':sortby,'':normalize,'':outdir,'':recursive}) for x in objectidlist]pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)results = pool.map(parallel_concat_worker, tasks)pool.close()pool.join()return {x:y for (x,y) in zip(objectidlist, results)}", "docstring": "This concatenates all text LCs for the given objectidlist.", "id": "f14686:m7"} {"signature": "def merge_hatpi_textlc_apertures(lclist):", "body": "lcaps = {}framekeys = []for lc in lclist:lcd = read_hatpi_textlc(lc)for col in lcd['']:if col.startswith(''):lcaps[col] = lcdthisframekeys = lcd[''].tolist()framekeys.extend(thisframekeys)framekeys = sorted(list(set(framekeys)))", "docstring": "This merges all TFA text LCs with separate apertures for a single object.\n\n The framekey column will be used as the join column across all light curves\n in lclist. Missing values will be filled in with nans. This function assumes\n all light curves are in the format specified in COLDEFS above and readable\n by read_hatpi_textlc above (i.e. have a single column for TFA mags for a\n specific aperture at the end).", "id": "f14686:m8"} {"signature": "def read_hatpi_binnedlc(binnedpklf, textlcf, timebinsec):", "body": "LOGINFO('' % binnedpklf)lcdict = read_hatpi_textlc(textlcf)if binnedpklf.endswith(''):infd = gzip.open(binnedpklf,'')else:infd = open(binnedpklf,'')try:binned = pickle.load(infd)except Exception as e:infd.seek()binned = pickle.load(infd, encoding='')infd.close()blckeys = binned.keys()lcdict[''] = {}for key in blckeys:if (key == '' and'' in binned[key] and'' in binned[key] and'' in binned[key]):ap0mad = np.nanmedian(np.abs(binned[key][''] -np.nanmedian(binned[key][''])))ap1mad = np.nanmedian(np.abs(binned[key][''] -np.nanmedian(binned[key][''])))ap2mad = np.nanmedian(np.abs(binned[key][''] -np.nanmedian(binned[key][''])))lcdict[''][''] = {'':binned[key][''],'':binned[key][''],'':np.full_like(binned[key][''],ap0mad),'':binned[key][''],'':binned[key][''],'':timebinsec}lcdict[''][''] = {'':binned[key][''],'':binned[key][''],'':np.full_like(binned[key][''],ap1mad),'':binned[key][''],'':binned[key][''],'':timebinsec}lcdict[''][''] = {'':binned[key][''],'':binned[key][''],'':np.full_like(binned[key][''],ap2mad),'':binned[key][''],'':binned[key][''],'':timebinsec}if ((key == '' or key == '') and'' in binned[key]):ap0mad = np.nanmedian(np.abs(binned[key][''] -np.nanmedian(binned[key][''])))lcdict[''][''] = {'':binned[key][''],'':binned[key][''],'':np.full_like(binned[key][''],ap0mad),'':binned[key][''],'':binned[key][''],'':timebinsec}if ((key == '' or key == '') and'' in binned[key]):ap0mad = np.nanmedian(np.abs(binned[key][''] -np.nanmedian(binned[key][''])))lcdict[''][''] = {'':binned[key][''],'':binned[key][''],'':np.full_like(binned[key][''],ap0mad),'':binned[key][''],'':binned[key][''],'':timebinsec}if ((key == '' or key == '') and'' in binned[key]):ap0mad = np.nanmedian(np.abs(binned[key][''] -np.nanmedian(binned[key][''])))lcdict[''][''] = {'':binned[key][''],'':binned[key][''],'':np.full_like(binned[key][''],ap0mad),'':binned[key][''],'':binned[key][''],'':timebinsec}if lcdict['']:return lcdictelse:LOGERROR('' % binnedpklf)return None", "docstring": "This reads a binnedlc pickle produced by the HATPI prototype pipeline.\n\n Converts it into a standard lcdict as produced by the read_hatpi_textlc\n function above by using the information in unbinnedtextlc for the same\n object.\n\n Adds a 'binned' key to the standard lcdict containing the binned mags, etc.", "id": "f14686:m9"} {"signature": "def generate_hatpi_binnedlc_pkl(binnedpklf, textlcf, timebinsec,outfile=None):", "body": "binlcdict = read_hatpi_binnedlc(binnedpklf, textlcf, timebinsec)if binlcdict:if outfile is None:outfile = os.path.join(os.path.dirname(binnedpklf),'' % (os.path.basename(binnedpklf).replace('','')))return lcdict_to_pickle(binlcdict, outfile=outfile)else:LOGERROR('' % binnedpklf)return None", "docstring": "This reads the binned LC and writes it out to a pickle.", "id": "f14686:m10"} {"signature": "def parallel_gen_binnedlc_pkls(binnedpkldir,textlcdir,timebinsec,binnedpklglob='',textlcglob=''):", "body": "binnedpkls = sorted(glob.glob(os.path.join(binnedpkldir, binnedpklglob)))textlcs = []for bpkl in binnedpkls:objectid = HATIDREGEX.findall(bpkl)if objectid is not None:objectid = objectid[]searchpath = os.path.join(textlcdir, '' % (objectid, textlcglob))textlcf = glob.glob(searchpath)if textlcf:textlcs.append(textlcf)else:textlcs.append(None)", "docstring": "This generates the binnedlc pkls for a directory of such files.\n\nFIXME: finish this", "id": "f14686:m11"} {"signature": "def pklc_fovcatalog_objectinfo(pklcdir,fovcatalog,fovcatalog_columns=[,,,,,,,,,,,,,,,,,],fovcatalog_colnames=['','','','','','','','','','','','','','','','','',''],fovcatalog_colformats=('''''''''''''')):", "body": "if fovcatalog.endswith(''):catfd = gzip.open(fovcatalog)else:catfd = open(fovcatalog)fovcat = np.genfromtxt(catfd,usecols=fovcatalog_columns,names=fovcatalog_colnames,dtype=fovcatalog_colformats)catfd.close()pklclist = sorted(glob.glob(os.path.join(pklcdir, '')))updatedpklcs, failedpklcs = [], []for pklc in pklclist:lcdict = read_hatpi_pklc(pklc)objectid = lcdict['']catind = np.where(fovcat[''] == objectid)if len(catind) > and catind[]:lcdict[''].update({x:y for x,y in zip(fovcatalog_colnames,[np.asscalar(fovcat[z][catind]) forz in fovcatalog_colnames])})with open(pklc+'','') as outfd:pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL)if os.path.exists(pklc+''):shutil.move(pklc+'',pklc)LOGINFO('' %(pklc, objectid,lcdict[''][''],lcdict['']['']))updatedpklcs.append(pklc)else:failedpklcs.append(pklc)return updatedpklcs, failedpklcs", "docstring": "Adds catalog info to objectinfo key of all pklcs in lcdir.\n\n If fovcatalog, fovcatalog_columns, fovcatalog_colnames are provided, uses\n them to find all the additional information listed in the fovcatalog_colname\n keys, and writes this info to the objectinfo key of each lcdict. This makes\n it easier for astrobase tools to work on these light curve.\n\n The default set up for fovcatalog is to use a text file generated by the\n HATPI pipeline before auto-calibrating a field. The format is specified as\n above in _columns, _colnames, and _colformats.", "id": "f14686:m12"} {"signature": "def read_hatlc(hatlc):", "body": "lcfname = os.path.basename(hatlc)if '' in lcfname:lcf = gzip.open(hatlc,'')elif '' in lcfname:lcf = bz2.BZ2File(hatlc, '')else:lcf = open(hatlc,'')if '' in lcfname and HAVEPYFITS:hdulist = pyfits.open(lcf)objectinfo = hdulist[].headerobjectlc = hdulist[].datalccols = objectlc.columns.nameshdulist.close()lcf.close()lcdict = {}for col in lccols:lcdict[col] = np.array(objectlc[col])lcdict[''] = objectinfo['']lcdict[''] = objectinfo['']lcdict[''] = objectinfo['']lcdict[''] = objectinfo['']lcdict[''] = [objectinfo[x] for x in ('','','','','','')]lcdict[''] = objectinfo['']lcdict[''] = objectinfo['']lcdict[''] = objectinfo['']lcdict[''] = lccolsreturn lcdictelif '' in lcfname and not HAVEPYFITS:print(\"\" % lcfname)returnelif '' in lcfname or '' in lcfname:lcflines = lcf.read().decode().split('')lcf.close()objectdata = [x for x in lcflines if x.startswith('')]objectlc = [x for x in lcflines if not x.startswith('')]objectlc = [x for x in objectlc if len(x) > ]if '' in lcfname:objectlc = [x.split('') for x in objectlc]else:objectlc = [x.split() for x in objectlc]objectlc = list(zip(*objectlc))objectdata = [x.strip('') for x in objectdata]objectdata = [x.strip() for x in objectdata]objectdata = [x for x in objectdata if len(x) > ]hatid, twomassid = objectdata[].split('')ra, dec = objectdata[].split('')ra = float(ra.split('')[-].strip(''))dec = float(dec.split('')[-].strip(''))vmag, rmag, imag, jmag, hmag, kmag = objectdata[].split('')vmag = float(vmag.split('')[-])rmag = float(rmag.split('')[-])imag = float(imag.split('')[-])jmag = float(jmag.split('')[-])hmag = float(hmag.split('')[-])kmag = float(kmag.split('')[-])ndet = int(objectdata[].split('')[-])hatstations = objectdata[].split('')[-]filterhead_ind = objectdata.index('')columnhead_ind = objectdata.index('')filters = objectdata[filterhead_ind:columnhead_ind]columndefs = objectdata[columnhead_ind+:]columns = []for line in columndefs:colnum, colname, coldesc = line.split('')columns.append(colname)lcdict = {}for ind, col in enumerate(columns):lcdict[col] = np.array([TEXTLC_OUTPUT_COLUMNS[col][](x)for x in objectlc[ind]])lcdict[''] = hatidlcdict[''] = twomassid.replace('','')lcdict[''] = ralcdict[''] = declcdict[''] = [vmag, rmag, imag, jmag, hmag, kmag]lcdict[''] = ndetlcdict[''] = hatstations.split('')lcdict[''] = filters[:]lcdict[''] = columnsreturn lcdict", "docstring": "This reads a consolidated HAT LC written by the functions above.\n\nReturns a dict.", "id": "f14687:m0"} {"signature": "def read_original_textlc(lcpath):", "body": "LOGINFO(''.format(lcpath))N_lines_to_parse_comments = with open(lcpath, '') as file:head = [next(file) for ind in range(N_lines_to_parse_comments)]N_comment_lines = len([l for l in head if l.decode('')[] == ''])if N_comment_lines < N_lines_to_parse_comments:LOGERROR(''.format(fpath=lcpath))return Nonefirst_data_line = list(filter(None, head[N_comment_lines].decode('').split()))N_cols = len(first_data_line)if N_cols == :colformat = ''elif N_cols == :colformat = ''elif N_cols == :colformat = ''else:LOGERROR(\"\"\"\".format(fpath=lcpath,ncols=N_cols))return Noneif colformat == '':col_names = ['','','','','','','','','','','','','','','','','']col_dtypes = ['',float,float,float,'',float,float,'',float,float,'',float,float,float,float,float,float]dtype_pairs = [el for el in zip(col_names, col_dtypes)]data = np.genfromtxt(lcpath, names=col_names, dtype=col_dtypes,skip_header=N_comment_lines, delimiter=None)out = {}for ix in range(len(data.dtype.names)):out[data.dtype.names[ix]] = data[data.dtype.names[ix]]elif colformat == '':col_names = ['','','','','','','','','','','','','','','','','','','','']col_dtypes = ['',float,float,float,'',float,float,'',float,float,'',float,float,float,float,float,float,float,float,float]dtype_pairs = [el for el in zip(col_names, col_dtypes)]data = np.genfromtxt(lcpath, names=col_names, dtype=col_dtypes,skip_header=N_comment_lines, delimiter=None)out = {}for ix in range(len(data.dtype.names)):out[data.dtype.names[ix]] = data[data.dtype.names[ix]]elif colformat == '':col_names = ['', '', '', '','', '', '','', '', '','', '', '','', '', '','', '', '','', '', '','', '', '', '','', '', '','', '', '']out = astascii.read(lcpath, names=col_names, comment='')return out", "docstring": "Read .epdlc, and .tfalc light curves and return a corresponding labelled\ndict (if LC from <2012) or astropy table (if >=2012). Each has different\nkeys that can be accessed via .keys()\n\nInput:\nlcpath: path (string) to light curve data, which is a textfile with HAT\nLC data.\n\nExample:\ndat = read_original_textlc('HAT-115-0003266.epdlc')", "id": "f14688:m0"} {"signature": "def _squeeze(value):", "body": "return re.sub(r\"\", \"\", value).strip()", "docstring": "Replace all sequences of whitespace chars with a single space.", "id": "f14689:m0"} {"signature": "def _pycompress_sqlitecurve(sqlitecurve, force=False):", "body": "outfile = '' % sqlitecurvetry:if os.path.exists(outfile) and not force:os.remove(sqlitecurve)return outfileelse:with open(sqlitecurve,'') as infd:with gzip.open(outfile,'') as outfd:shutil.copyfileobj(infd, outfd)if os.path.exists(outfile):os.remove(sqlitecurve)return outfileexcept Exception as e:return None", "docstring": "This just compresses the sqlitecurve. Should be independent of OS.", "id": "f14689:m1"} {"signature": "def _pyuncompress_sqlitecurve(sqlitecurve, force=False):", "body": "outfile = sqlitecurve.replace('','')try:if os.path.exists(outfile) and not force:return outfileelse:with gzip.open(sqlitecurve,'') as infd:with open(outfile,'') as outfd:shutil.copyfileobj(infd, outfd)if os.path.exists(outfile):return outfileexcept Exception as e:return None", "docstring": "This just uncompresses the sqlitecurve. Should be independent of OS.", "id": "f14689:m2"} {"signature": "def _gzip_sqlitecurve(sqlitecurve, force=False):", "body": "if force:cmd = '' % sqlitecurveelse:cmd = '' % sqlitecurvetry:outfile = '' % sqlitecurveif os.path.exists(outfile) and not force:os.remove(sqlitecurve)return outfileelse:subprocess.check_output(cmd, shell=True)if os.path.exists(outfile):return outfileelse:return Noneexcept subprocess.CalledProcessError:return None", "docstring": "This just compresses the sqlitecurve in gzip format.\n\n FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).", "id": "f14689:m3"} {"signature": "def _gunzip_sqlitecurve(sqlitecurve):", "body": "cmd = '' % sqlitecurvetry:subprocess.check_output(cmd, shell=True)return sqlitecurve.replace('','')except subprocess.CalledProcessError:return None", "docstring": "This just uncompresses the sqlitecurve in gzip format.\n\n FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).", "id": "f14689:m4"} {"signature": "def _validate_sqlitecurve_filters(filterstring, lccolumns):", "body": "stringelems = _squeeze(filterstring).lower()stringelems = filterstring.replace('','')stringelems = stringelems.replace('','')stringelems = stringelems.replace('','')stringelems = stringelems.replace(\"\",'')stringelems = stringelems.replace('','')stringelems = stringelems.replace('','')stringelems = _squeeze(stringelems)stringelems = stringelems.split('')stringelems = [x.strip() for x in stringelems]stringwords = []for x in stringelems:try:float(x)except ValueError as e:stringwords.append(x)stringwords2 = []for x in stringwords:if not(x.startswith('') and x.endswith('')):stringwords2.append(x)stringwords2 = [x for x in stringwords2 if len(x) > ]wordset = set(stringwords2)allowedwords = SQLITE_ALLOWED_WORDS + lccolumnscheckset = set(allowedwords)validatecheck = list(wordset - checkset)if len(validatecheck) > :LOGWARNING(\"\"\"\" % filterstring)return Noneelse:return filterstring", "docstring": "This validates the sqlitecurve filter string.\n\n This MUST be valid SQL but not contain any commands.", "id": "f14689:m5"} {"signature": "def read_and_filter_sqlitecurve(lcfile,columns=None,sqlfilters=None,raiseonfail=False,returnarrays=True,forcerecompress=False,quiet=True):", "body": "try:if '' in lcfile[-:]:lcf = _uncompress_sqlitecurve(lcfile)else:lcf = lcfiledb = sql.connect(lcf)cur = db.cursor()query = (\"\")cur.execute(query)objectinfo = cur.fetchone()query = (\"\"\"\")cur.execute(query)lcinfo = cur.fetchone()(lcversion, lcdatarelease, lccols, lcsortcol,lcapertures, lcbestaperture,objinfocols, objidcol,lcunixtime, lcgitrev, lccomment) = lcinfolcapertures = json.loads(lcapertures)lcbestaperture = json.loads(lcbestaperture)objectinfokeys = objinfocols.split('')objectinfodict = {x:y for (x,y) in zip(objectinfokeys, objectinfo)}objectid = objectinfodict[objidcol]query = (\"\")cur.execute(query)filterinfo = cur.fetchall()if columns and all([x in lccols.split('') for x in columns]):LOGINFO('' % columns)proceed = Trueelif columns is None:columns = lccols.split('')proceed = Trueelse:proceed = Falseif not proceed:if '' in lcfile[-:] and lcf:_compress_sqlitecurve(lcf, force=forcerecompress)LOGERROR('')return None, \"\"lcdict = {'':objectid,'':objectinfodict,'':objectinfokeys,'':lcversion,'':lcdatarelease,'':columns,'':lcsortcol,'':lcapertures,'':lcbestaperture,'':lcunixtime,'':lcgitrev,'':lccomment,'':filterinfo}if ((sqlfilters is not None) and(isinstance(sqlfilters,str) orisinstance(sqlfilters, unicode))):validatedfilters = _validate_sqlitecurve_filters(sqlfilters,lccols.split(''))if validatedfilters is not None:LOGINFO('' % validatedfilters)filtersok = Trueelse:filtersok = Falseelse:validatedfilters = Nonefiltersok = Noneif validatedfilters is not None:query = (\"\"\"\").format(columns=''.join(columns), sqlfilter=validatedfilters,sortcol=lcsortcol)lcdict[''] = validatedfilterselse:query = (\"\") % (''.join(columns),lcsortcol)cur.execute(query)lightcurve = cur.fetchall()if lightcurve and len(lightcurve) > :lightcurve = list(zip(*lightcurve))lcdict.update({x:y for (x,y) in zip(lcdict[''],lightcurve)})lcok = Truelcdict[''][''] = len(lightcurve[])else:LOGWARNING('' % lcdict[''])lcdict.update({x:y for (x,y) inzip(lcdict[''],[[] for x in lcdict['']])})lcok = Falseif filtersok is True and lcok:statusmsg = ''elif filtersok is None and lcok:statusmsg = ''elif filtersok is False and lcok:statusmsg = ''else:statusmsg = ''returnval = (lcdict, statusmsg)if '' in lcfile[-:] and lcf:_compress_sqlitecurve(lcf, force=forcerecompress)if returnarrays:for column in lcdict['']:lcdict[column] = np.array([x if x is not None else np.nanfor x in lcdict[column]])except Exception as e:if not quiet:LOGEXCEPTION('' % lcfile)returnval = (None, '')if '' in lcfile[-:] and lcf:_compress_sqlitecurve(lcf, force=forcerecompress)if raiseonfail:raisereturn returnval", "docstring": "This reads a HAT sqlitecurve and optionally filters it.\n\n Parameters\n ----------\n\n lcfile : str\n The path to the HAT sqlitecurve file.\n\n columns : list\n A list of columns to extract from the ligh curve file. If None, then\n returns all columns present in the latest `columnlist` in the light\n curve.\n\n sqlfilters : list of str\n If no None, it must be a list of text SQL filters that apply to the\n columns in the lightcurve.\n\n raiseonfail : bool\n If this is True, an Exception when reading the LC will crash the\n function instead of failing silently and returning None as the result.\n\n returnarrays : bool\n If this is True, the output lcdict contains columns as np.arrays instead\n of lists. You generally want this to be True.\n\n forcerecompress : bool\n If True, the sqlitecurve will be recompressed even if a compressed\n version of it is found. This usually happens when sqlitecurve opening is\n interrupted by the OS for some reason, leaving behind a gzipped and\n un-gzipped copy. By default, this function refuses to overwrite the\n existing gzipped version so if the un-gzipped version is corrupt but\n that one isn't, it can be safely recovered.\n\n quiet : bool\n If True, will not warn about any problems, even if the light curve\n reading fails (the only clue then will be the return value of\n None). Useful for batch processing of many many light curves.\n\n Returns\n -------\n\n tuple : (lcdict, status_message)\n A two-element tuple is returned, with the first element being the\n lcdict.", "id": "f14689:m6"} {"signature": "def describe(lcdict, returndesc=False, offsetwith=None):", "body": "if '' in lcdict and '' in lcdict[''].lower():return describe_lcc_csv(lcdict, returndesc=returndesc)columndefs = []for colind, column in enumerate(lcdict['']):if '' in column:colkey, colap = column.split('')coldesc = COLUMNDEFS[colkey][] % colapelse:coldesc = COLUMNDEFS[column][]columndefstr = '' % (colind,column,coldesc)columndefs.append(columndefstr)columndefs = ''.join(columndefs)filterdefs = []for row in lcdict['']:filterid, filtername, filterdesc = rowfilterdefstr = '' % (filterid,filtername,filterdesc)filterdefs.append(filterdefstr)filterdefs = ''.join(filterdefs)aperturedefs = []for key in sorted(lcdict[''].keys()):aperturedefstr = '' % (key, lcdict[''][key])aperturedefs.append(aperturedefstr)aperturedefs = ''.join(aperturedefs)description = DESCTEMPLATE.format(objectid=lcdict[''],hatid=lcdict[''][''],twomassid=lcdict[''][''].strip(),ra=lcdict[''][''],decl=lcdict[''][''],pmra=lcdict[''][''],pmra_err=lcdict[''][''],pmdecl=lcdict[''][''],pmdecl_err=lcdict[''][''],jmag=lcdict[''][''],hmag=lcdict[''][''],kmag=lcdict[''][''],bmag=lcdict[''][''],vmag=lcdict[''][''],sdssg=lcdict[''][''],sdssr=lcdict[''][''],sdssi=lcdict[''][''],ndet=lcdict[''][''],lcsortcol=lcdict[''],lcbestaperture=json.dumps(lcdict[''],ensure_ascii=True),network=lcdict[''][''],stations=lcdict[''][''],lastupdated=lcdict[''],datarelease=lcdict[''],lcversion=lcdict[''],lcserver=lcdict[''],comment=lcdict[''],lcfiltersql=(lcdict[''] if '' in lcdict else ''),lcnormcols=(lcdict[''] if '' in lcdict else ''),filterdefs=filterdefs,columndefs=columndefs,aperturedefs=aperturedefs)if offsetwith is not None:description = textwrap.indent(description,'' % offsetwith,lambda line: True)print(description)else:print(description)if returndesc:return description", "docstring": "This describes the light curve object and columns present.\n\n Parameters\n ----------\n\n lcdict : dict\n The input lcdict to parse for column and metadata info.\n\n returndesc : bool\n If True, returns the description string as an str instead of just\n printing it to stdout.\n\n offsetwith : str\n This is a character to offset the output description lines by. This is\n useful to add comment characters like '#' to the output description\n lines.\n\n Returns\n -------\n\n str or None\n If returndesc is True, returns the description lines as a str, otherwise\n returns nothing.", "id": "f14689:m7"} {"signature": "def _smartcast(castee, caster, subval=None):", "body": "try:return caster(castee)except Exception as e:if caster is float or caster is int:return nanelif caster is str:return ''else:return subval", "docstring": "This just tries to apply the caster function to castee.\n\nReturns None on failure.", "id": "f14689:m8"} {"signature": "def _parse_csv_header(header):", "body": "headerlines = header.split('')headerlines = [x.lstrip('') for x in headerlines]objectstart = headerlines.index('')metadatastart = headerlines.index('')camfilterstart = headerlines.index('')photaperturestart = headerlines.index('')columnstart = headerlines.index('')lcstart = headerlines.index('')objectinfo = headerlines[objectstart+:metadatastart-]metadatainfo = headerlines[metadatastart+:camfilterstart-]camfilterinfo = headerlines[camfilterstart+:photaperturestart-]photapertureinfo = headerlines[photaperturestart+:columnstart-]columninfo = headerlines[columnstart+:lcstart-]metadict = {'':{}}objectinfo = [x.split('') for x in objectinfo]for elem in objectinfo:for kvelem in elem:key, val = kvelem.split('',)metadict[''][key.strip()] = (_smartcast(val, METAKEYS[key.strip()]))metadict[''] = metadict[''][''][:]del metadict['']['']metadatainfo = [x.split('') for x in metadatainfo]for elem in metadatainfo:for kvelem in elem:try:key, val = kvelem.split('',)if key.strip() == '':val = json.loads(val)if key.strip() in ('', ''):val = int(val)if key.strip() == '':val = float(val)metadict[key.strip()] = valexcept Exception as e:LOGWARNING('''' % kvelem)metadict[''] = []for row in camfilterinfo:filterid, filtername, filterdesc = row.split('')metadict[''].append((int(filterid),filtername,filterdesc))metadict[''] = {}for row in photapertureinfo:apnum, appix = row.split('')appix = float(appix.rstrip(''))metadict[''][apnum.strip()] = appixmetadict[''] = []for row in columninfo:colnum, colname, coldesc = row.split('')metadict[''].append(colname)return metadict", "docstring": "This parses the CSV header from the CSV HAT sqlitecurve.\n\nReturns a dict that can be used to update an existing lcdict with the\nrelevant metadata info needed to form a full LC.", "id": "f14689:m9"} {"signature": "def _parse_csv_header_lcc_csv_v1(headerlines):", "body": "commentchar = headerlines[]separator = headerlines[]headerlines = [x.lstrip('' % commentchar) for x in headerlines[:]]metadatastart = headerlines.index('')columnstart = headerlines.index('')lcstart = headerlines.index('')metadata = '' .join(headerlines[metadatastart+:columnstart-])columns = '' .join(headerlines[columnstart+:lcstart-])metadata = json.loads(metadata)columns = json.loads(columns)return metadata, columns, separator", "docstring": "This parses the header of the LCC CSV V1 LC format.", "id": "f14689:m10"} {"signature": "def read_lcc_csvlc(lcfile):", "body": "if '' in os.path.basename(lcfile):infd = gzip.open(lcfile,'')else:infd = open(lcfile,'')lctext = infd.read().decode()infd.close()lctextlines = lctext.split('')lcformat = lctextlines[]commentchar = lctextlines[]lcstart = lctextlines.index('' % commentchar)headerlines = lctextlines[:lcstart+]lclines = lctextlines[lcstart+:]metadata, columns, separator = _parse_csv_header_lcc_csv_v1(headerlines)objectid = metadata['']['']objectinfo = {key:metadata[key][''] for key in metadata}colnames = []colnum = []coldtypes = []for k in columns:coldef = columns[k]colnames.append(k)colnum.append(coldef[''])coldtypes.append(coldef[''])coldtypes = ''.join(coldtypes)recarr = np.genfromtxt(lclines,comments=commentchar,delimiter=separator,usecols=colnum,autostrip=True,names=colnames,dtype=coldtypes)lcdict = {x:recarr[x] for x in colnames}lcdict[''] = lcformatlcdict[''] = objectidlcdict[''] = objectinfolcdict[''] = colnameslcdict[''] = columnslcdict[''] = metadatareturn lcdict", "docstring": "This reads a CSV LC produced by an `LCC-Server\n `_ instance.\n\n Parameters\n ----------\n\n lcfile : str\n The LC file to read.\n\n Returns\n -------\n\n dict\n Returns an lcdict that's readable by most astrobase functions for\n further processing.", "id": "f14689:m11"} {"signature": "def describe_lcc_csv(lcdict, returndesc=False):", "body": "metadata_lines = []coldef_lines = []if '' in lcdict and '' in lcdict[''].lower():metadata = lcdict['']metakeys = lcdict[''].keys()coldefs = lcdict['']for mk in metakeys:metadata_lines.append('' % (mk,metadata[mk]['']))for ck in lcdict['']:coldef_lines.append(''% (coldefs[ck][''],ck,coldefs[ck][''],coldefs[ck]['']))desc = LCC_CSVLC_DESCTEMPLATE.format(objectid=lcdict[''],metadata_desc=''.join(metadata_lines),metadata=pformat(lcdict['']),columndefs=''.join(coldef_lines))print(desc)if returndesc:return descelse:LOGERROR(\"\")return None", "docstring": "This describes the LCC CSV format light curve file.\n\nParameters\n----------\n\nlcdict : dict\n The input lcdict to parse for column and metadata info.\n\nreturndesc : bool\n If True, returns the description string as an str instead of just\n printing it to stdout.\n\nReturns\n-------\n\nstr or None\n If returndesc is True, returns the description lines as a str, otherwise\n returns nothing.", "id": "f14689:m12"} {"signature": "def read_csvlc(lcfile):", "body": "if '' in os.path.basename(lcfile):LOGINFO('' % lcfile)infd = gzip.open(lcfile,'')else:LOGINFO('' % lcfile)infd = open(lcfile,'')lcformat_check = infd.read().decode()if '' in lcformat_check:infd.close()return read_lcc_csvlc(lcfile)else:infd.seek()lctext = infd.read().decode() infd.close()lcstart = lctext.index('')lcheader = lctext[:lcstart+]lccolumns = lctext[lcstart+:].split('')lccolumns = [x for x in lccolumns if len(x) > ]lcdict = _parse_csv_header(lcheader)lccolumns = [x.split('') for x in lccolumns]lccolumns = list(zip(*lccolumns)) for colind, col in enumerate(lcdict['']):if (col.split('')[] in LC_MAG_COLUMNS orcol.split('')[] in LC_ERR_COLUMNS orcol.split('')[] in LC_FLAG_COLUMNS):lcdict[col] = np.array([_smartcast(x,COLUMNDEFS[col.split('')[]][])for x in lccolumns[colind]])elif col in COLUMNDEFS:lcdict[col] = np.array([_smartcast(x,COLUMNDEFS[col][])for x in lccolumns[colind]])else:LOGWARNING('' % col)continuereturn lcdict", "docstring": "This reads a HAT data server or LCC-Server produced CSV light curve\n into an lcdict.\n\n This will automatically figure out the format of the file\n provided. Currently, it can read:\n\n - legacy HAT data server CSV LCs (e.g. from\n https://hatsouth.org/planets/lightcurves.html) with an extension of the\n form: `.hatlc.csv.gz`.\n - all LCC-Server produced LCC-CSV-V1 LCs (e.g. from\n https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`.\n\n\n Parameters\n ----------\n\n lcfile : str\n The light curve file to read.\n\n Returns\n -------\n\n dict\n Returns an lcdict that can be read and used by many astrobase processing\n functions.", "id": "f14689:m13"} {"signature": "def find_lc_timegroups(lctimes, mingap=):", "body": "lc_time_diffs = [(lctimes[x] - lctimes[x-]) for x in range(,len(lctimes))]lc_time_diffs = np.array(lc_time_diffs)group_start_indices = np.where(lc_time_diffs > mingap)[]if len(group_start_indices) > :group_indices = []for i, gindex in enumerate(group_start_indices):if i == :group_indices.append(slice(,gindex+))else:group_indices.append(slice(group_start_indices[i-]+,gindex+))group_indices.append(slice(group_start_indices[-]+,len(lctimes)))else:group_indices = [slice(,len(lctimes))]return len(group_indices), group_indices", "docstring": "This finds the time gaps in the light curve, so we can figure out which\n times are for consecutive observations and which represent gaps\n between seasons.\n\n Parameters\n ----------\n\n lctimes : np.array\n This is the input array of times, assumed to be in some form of JD.\n\n mingap : float\n This defines how much the difference between consecutive measurements is\n allowed to be to consider them as parts of different timegroups. By\n default it is set to 4.0 days.\n\n Returns\n -------\n\n tuple\n A tuple of the form below is returned, containing the number of time\n groups found and Python slice objects for each group::\n\n (ngroups, [slice(start_ind_1, end_ind_1), ...])", "id": "f14689:m14"} {"signature": "def normalize_lcdict(lcdict,timecol='',magcols='',mingap=,normto='',debugmode=False,quiet=False):", "body": "if '' in lcdict and len(lcdict['']) > :if not quiet:LOGWARNING('')return lcdictif timecol in lcdict:times = lcdict[timecol]elif '' in lcdict:times = lcdict['']else:LOGERROR(\"\" %lcdict[''])return lcdictngroups, timegroups = find_lc_timegroups(np.array(times),mingap=mingap)if '' in lcdict:apertures = sorted(lcdict[''].keys())elif '' in lcdict and '' in lcdict['']:apertures = sorted(lcdict[''][''].keys())aimcols = [('' % x) for x in apertures if ('' % x) in lcdict]armcols = [('' % x) for x in apertures if ('' % x) in lcdict]aepcols = [('' % x)for x in apertures if ('' % x) in lcdict]atfcols = [('' % x) for x in apertures if ('' % x) in lcdict]psimcols = [x for x in ['','','',''] if x in lcdict]irmcols = [('' % x) for x in apertures if ('' % x) in lcdict]iepcols = [('' % x) for x in apertures if ('' % x) in lcdict]itfcols = [('' % x) for x in apertures if ('' % x) in lcdict]if magcols == '':cols_to_normalize = (aimcols + armcols + aepcols + atfcols +psimcols + irmcols + iepcols + itfcols)elif magcols == '':cols_to_normalize = (irmcols + ([''] if '' in lcdict else []) +irmcols)elif magcols == '':cols_to_normalize = (aepcols + ([''] if '' in lcdict else []) +iepcols)elif magcols == '':cols_to_normalize = (atfcols + ([''] if '' in lcdict else []) +itfcols)elif magcols == '':cols_to_normalize = (aepcols + ([''] if '' in lcdict else []) +iepcols + atfcols +([''] if '' in lcdict else []) +itfcols)else:cols_to_normalize = magcols.split('')cols_to_normalize = [x.strip() for x in cols_to_normalize]colsnormalized = []for col in cols_to_normalize:if col in lcdict:mags = lcdict[col]mags = [(nan if x is None else x) for x in mags]mags = np.array(mags)colsnormalized.append(col)finite_ind = np.isfinite(mags)if any(finite_ind):global_mag_median = np.median(mags[finite_ind])for tgind, tg in enumerate(timegroups):finite_ind = np.isfinite(mags[tg])group_median = np.median((mags[tg])[finite_ind])mags[tg] = mags[tg] - group_medianif debugmode:LOGDEBUG('''' %(col, tgind,len(mags[tg]),len(finite_ind),group_median))else:LOGWARNING('' % col)continueif normto == '':mags = mags + global_mag_medianelif normto in ('', '', '','', '','', '', ''):if (normto in lcdict[''] andlcdict[''][normto] is not None):mags = mags + lcdict[''][normto]else:if not quiet:LOGWARNING('''' % normto)normto = ''mags = mags + global_mag_medianlcdict[col] = magselse:if not quiet:LOGWARNING('' % col)continuelcnormcols = ('''''') % (repr(colsnormalized),mingap,normto)lcdict[''] = lcnormcolsreturn lcdict", "docstring": "This normalizes magcols in `lcdict` using `timecol` to find timegroups.\n\n Parameters\n ----------\n\n lcdict : dict\n The input lcdict to process.\n\n timecol : str\n The key in the lcdict that is to be used to extract the time column.\n\n magcols : 'all' or list of str\n If this is 'all', all of the columns in the lcdict that are indicated to\n be magnitude measurement columns are normalized. If this is a list of\n str, must contain the keys of the lcdict specifying which magnitude\n columns will be normalized.\n\n mingap : float\n This defines how much the difference between consecutive measurements is\n allowed to be to consider them as parts of different timegroups. By\n default it is set to 4.0 days.\n\n normto : {'globalmedian', 'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}\n This indicates which column will be the normalization target. If this is\n 'globalmedian', the normalization will be to the global median of each\n LC column. If this is 'zero', will normalize to 0.0 for each LC\n column. Otherwise, will normalize to the value of one of the other keys\n in the lcdict['objectinfo'][magkey], meaning the normalization will be\n to some form of catalog magnitude.\n\n debugmode : bool\n If True, will indicate progress as time-groups are found and processed.\n\n quiet : bool\n If True, will not emit any messages when processing.\n\n Returns\n -------\n\n dict\n Returns the lcdict with the magnitude measurements normalized as\n specified. The normalization happens IN PLACE.", "id": "f14689:m15"} {"signature": "def normalize_lcdict_byinst(lcdict,magcols='',normto='',normkeylist=('','','','','',''),debugmode=False,quiet=False):", "body": "if '' in lcdict and len(lcdict['']) > :if not quiet:LOGWARNING('''''')return lcdictnormkeycols = []availablenormkeys = []for key in normkeylist:if key in lcdict and lcdict[key] is not None:normkeycols.append(lcdict[key])availablenormkeys.append(key)normkeycols = list(zip(*normkeycols))allkeys = [repr(x) for x in normkeycols]allkeys = [a.replace('','').replace('','').replace(\"\",'').replace('','')for a in allkeys]allkeys = np.array(allkeys)normkeys = np.unique(allkeys)if '' in lcdict:apertures = sorted(lcdict[''].keys())elif '' in lcdict and '' in lcdict['']:apertures = sorted(lcdict[''][''].keys())aimcols = [('' % x) for x in apertures if ('' % x) in lcdict]armcols = [('' % x) for x in apertures if ('' % x) in lcdict]aepcols = [('' % x)for x in apertures if ('' % x) in lcdict]atfcols = [('' % x) for x in apertures if ('' % x) in lcdict]psimcols = [x for x in ['','','',''] if x in lcdict]irmcols = [('' % x) for x in apertures if ('' % x) in lcdict]iepcols = [('' % x) for x in apertures if ('' % x) in lcdict]itfcols = [('' % x) for x in apertures if ('' % x) in lcdict]if magcols == '':cols_to_normalize = (aimcols + armcols + aepcols + atfcols +psimcols + irmcols + iepcols + itfcols)elif magcols == '':cols_to_normalize = (irmcols + ([''] if '' in lcdict else []) +irmcols)elif magcols == '':cols_to_normalize = (aepcols + ([''] if '' in lcdict else []) +iepcols)elif magcols == '':cols_to_normalize = (atfcols + ([''] if '' in lcdict else []) +itfcols)elif magcols == '':cols_to_normalize = (aepcols + ([''] if '' in lcdict else []) +iepcols + atfcols +([''] if '' in lcdict else []) +itfcols)else:cols_to_normalize = magcols.split('')cols_to_normalize = [x.strip() for x in cols_to_normalize]colsnormalized = []for col in cols_to_normalize:if col in lcdict:thismags = lcdict[col]for nkey in normkeys:thisind = allkeys == nkeythismagsize = thismags[thisind].sizethismagfinite = np.where(np.isfinite(thismags[thisind]))[].sizeif thismagsize > and thismagfinite > :medmag = np.nanmedian(thismags[thisind])lcdict[col][thisind] = lcdict[col][thisind] - medmagif debugmode:LOGDEBUG('''' %(col, nkey, len(thismags[thisind]), medmag))else:lcdict[col][thisind] = np.nanif normto in ('', '', '','', '','', '', ''):if (normto in lcdict[''] andlcdict[''][normto] is not None):lcdict[col] = lcdict[col] + lcdict[''][normto]else:if not quiet:LOGWARNING('''' % normto)normto = ''colsnormalized.append(col)else:if not quiet:LOGWARNING('' % col)continuelcinstnormcols = ('''''') % (repr(colsnormalized),normto,repr(availablenormkeys))lcdict[''] = lcinstnormcolsreturn lcdict", "docstring": "This is a function to normalize light curves across all instrument\n combinations present.\n\n Use this to normalize a light curve containing a variety of:\n\n - HAT station IDs ('stf')\n - camera IDs ('ccd')\n - filters ('flt')\n - observed field names ('fld')\n - HAT project IDs ('prj')\n - exposure times ('exp')\n\n Parameters\n ----------\n\n lcdict : dict\n The input lcdict to process.\n\n magcols : 'all' or list of str\n If this is 'all', all of the columns in the lcdict that are indicated to\n be magnitude measurement columns are normalized. If this is a list of\n str, must contain the keys of the lcdict specifying which magnitude\n columns will be normalized.\n\n normto : {'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}\n This indicates which column will be the normalization target. If this is\n 'zero', will normalize to 0.0 for each LC column. Otherwise, will\n normalize to the value of one of the other keys in the\n lcdict['objectinfo'][magkey], meaning the normalization will be to some\n form of catalog magnitude.\n\n normkeylist : list of str\n These are the column keys to use to form the normalization\n index. Measurements in the specified `magcols` with identical\n normalization index values will be considered as part of a single\n measurement 'era', and will be normalized to zero. Once all eras have\n been normalized this way, the final light curve will be re-normalized as\n specified in `normto`.\n\n debugmode : bool\n If True, will indicate progress as time-groups are found and processed.\n\n quiet : bool\n If True, will not emit any messages when processing.\n\n Returns\n -------\n\n dict\n Returns the lcdict with the magnitude measurements normalized as\n specified. The normalization happens IN PLACE.", "id": "f14689:m16"} {"signature": "def main():", "body": "import signalsignal.signal(signal.SIGPIPE, signal.SIG_DFL)import argparseaparser = argparse.ArgumentParser(description='')aparser.add_argument('',action='',type=str,help=(\"\"))aparser.add_argument('',action='',default=False,help=(\"\"))args = aparser.parse_args()filetoread = args.hatlcfileif not os.path.exists(filetoread):LOGERROR(\"\" % filetoread)sys.exit()filename = os.path.basename(filetoread)if filename.endswith('') or filename.endswith(''):if args.describe:describe(read_csvlc(filename))sys.exit()else:with gzip.open(filename,'') as infd:for line in infd:print(line.decode(),end='')elif filename.endswith(''):lcdict, msg = read_and_filter_sqlitecurve(filetoread)describe(lcdict, offsetwith='')if args.describe:sys.exit()apertures = sorted(lcdict[''].keys())for aper in apertures:COLUMNDEFS.update({'' % (x, aper): COLUMNDEFS[x] for x inLC_MAG_COLUMNS})COLUMNDEFS.update({'' % (x, aper): COLUMNDEFS[x] for x inLC_ERR_COLUMNS})COLUMNDEFS.update({'' % (x, aper): COLUMNDEFS[x] for x inLC_FLAG_COLUMNS})formstr = ''.join([COLUMNDEFS[x][] for x in lcdict['']])ndet = lcdict['']['']for ind in range(ndet):line = [lcdict[x][ind] for x in lcdict['']]formline = formstr % tuple(line)print(formline)else:LOGERROR('' % filetoread)sys.exit()", "docstring": "This is called when we're executed from the commandline.\n\nThe current usage from the command-line is described below::\n\n usage: hatlc [-h] [--describe] hatlcfile\n\n read a HAT LC of any format and output to stdout\n\n positional arguments:\n hatlcfile path to the light curve you want to read and pipe to stdout\n\n optional arguments:\n -h, --help show this help message and exit\n --describe don't dump the columns, show only object info and LC metadata", "id": "f14689:m17"} {"signature": "def _parse_csv_header(header):", "body": "headerlines = header.split('')headerlines = [x.lstrip('') for x in headerlines]metadatastart = headerlines.index('')columnstart = headerlines.index('')lcstart = headerlines.index('')metadata = headerlines[metadatastart+:columnstart-]columndefs = headerlines[columnstart+:lcstart-]metainfo = [x.split('') for x in metadata][:-]aperpixradius = metadata[-]objectid, kepid, ucac4id, kepmag = metainfo[]objectid, kepid, ucac4id, kepmag = (objectid.split('')[-],kepid.split('')[-],ucac4id.split('')[-],kepmag.split('')[-])kepmag = float(kepmag) if kepmag else Nonera, decl, ndet, k2campaign = metainfo[]ra, decl, ndet, k2campaign = (ra.split('')[-],decl.split('')[-],int(ndet.split('')[-]),int(k2campaign.split('')[-]))fovccd, fovchannel, fovmodule = metainfo[]fovccd, fovchannel, fovmodule = (int(fovccd.split('')[-]),int(fovchannel.split('')[-]),int(fovmodule.split('')[-]))try:qualflag, bjdoffset, napertures = metainfo[]qualflag, bjdoffset, napertures = (int(qualflag.split('')[-]),float(bjdoffset.split('')[-]),int(napertures.split('')[-]))kernelspec = Noneexcept Exception as e:qualflag, bjdoffset, napertures, kernelspec = metainfo[]qualflag, bjdoffset, napertures, kernelspec = (int(qualflag.split('')[-]),float(bjdoffset.split('')[-]),int(napertures.split('')[-]),str(kernelspec.split('')[-]))aperpixradius = aperpixradius.split('')[-].split('')aperpixradius = [float(x) for x in aperpixradius]columns = [x.split('')[] for x in columndefs]metadict = {'':objectid,'':{'':objectid,'':kepid,'':ucac4id,'':kepmag,'':ra,'':decl,'':ndet,'':k2campaign,'':fovccd,'':fovchannel,'':fovmodule,'':qualflag,'':bjdoffset,'':napertures,'':kernelspec,'':aperpixradius,},'':columns}return metadict", "docstring": "This parses a CSV header from a K2 CSV LC.\n\n Returns a dict that can be used to update an existing lcdict with the\n relevant metadata info needed to form a full LC.", "id": "f14690:m0"} {"signature": "def read_csv_lightcurve(lcfile):", "body": "if '' in os.path.basename(lcfile):LOGINFO('' % lcfile)infd = gzip.open(lcfile,'')else:LOGINFO('' % lcfile)infd = open(lcfile,'')lctext = infd.read().decode()infd.close()lcstart = lctext.index('')lcheader = lctext[:lcstart+]lccolumns = lctext[lcstart+:].split('')lccolumns = [x.split('') for x in lccolumns if len(x) > ]lcdict = _parse_csv_header(lcheader)lccolumns = list(zip(*lccolumns))for colind, col in enumerate(lcdict['']):lcdict[col.lower()] = np.array([COLUMNDEFS[col][](x)for x in lccolumns[colind]])lcdict[''] = [x.lower() for x in lcdict['']]return lcdict", "docstring": "This reads in a K2 lightcurve in CSV format. Transparently reads gzipped\nfiles.\n\nParameters\n----------\n\nlcfile : str\n The light curve file to read.\n\nReturns\n-------\n\ndict\n Returns an lcdict.", "id": "f14690:m1"} {"signature": "def generate_transit_lightcurve(times,mags=None,errs=None,paramdists={'':sps.uniform(loc=,scale=),'':sps.uniform(loc=,scale=),'':sps.uniform(loc=,scale=)},magsarefluxes=False,):", "body": "if mags is None:mags = np.full_like(times, )if errs is None:errs = np.full_like(times, )epoch = npr.random()*(times.max() - times.min()) + times.min()period = paramdists[''].rvs(size=)depth = paramdists[''].rvs(size=)duration = paramdists[''].rvs(size=)ingduration = npr.random()*(*duration - *duration) + *durationif magsarefluxes and depth < :depth = -depthelif not magsarefluxes and depth > :depth = -depthmodelmags, phase, ptimes, pmags, perrs = (transits.trapezoid_transit_func([period, epoch, depth,duration, ingduration],times,mags,errs))timeind = np.argsort(ptimes)mtimes = ptimes[timeind]mmags = modelmags[timeind]merrs = perrs[timeind]modeldict = {'':'','':{x:np.asscalar(y) for x,y in zip(['','','','',''],[period,epoch,depth,duration,ingduration])},'':mtimes,'':mmags,'':merrs,'':period,'':depth}return modeldict", "docstring": "This generates fake planet transit light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'transitperiod', 'transitdepth', 'transitduration'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The variability epoch will be automatically chosen from a uniform\n distribution between `times.min()` and `times.max()`.\n\n The ingress duration will be automatically chosen from a uniform\n distribution ranging from 0.05 to 0.5 of the transitduration.\n\n The transitdepth will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'planet',\n 'params': {'transitperiod': generated value of period,\n 'transitepoch': generated value of epoch,\n 'transitdepth': generated value of transit depth,\n 'transitduration': generated value of transit duration,\n 'ingressduration': generated value of transit ingress\n duration},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varperiod': the generated period of variability == 'transitperiod'\n 'varamplitude': the generated amplitude of\n variability == 'transitdepth'}", "id": "f14691:m1"} {"signature": "def generate_eb_lightcurve(times,mags=None,errs=None,paramdists={'':sps.uniform(loc=,scale=),'':sps.uniform(loc=,scale=),'':sps.uniform(loc=,scale=),'':sps.uniform(loc=,scale=),'':sps.norm(loc=,scale=)},magsarefluxes=False,):", "body": "if mags is None:mags = np.full_like(times, )if errs is None:errs = np.full_like(times, )epoch = npr.random()*(times.max() - times.min()) + times.min()period = paramdists[''].rvs(size=)pdepth = paramdists[''].rvs(size=)pduration = paramdists[''].rvs(size=)depthratio = paramdists[''].rvs(size=)secphase = paramdists[''].rvs(size=)if magsarefluxes and pdepth < :pdepth = -pdepthelif not magsarefluxes and pdepth > :pdepth = -pdepthmodelmags, phase, ptimes, pmags, perrs = (eclipses.invgauss_eclipses_func([period, epoch, pdepth,pduration, depthratio, secphase],times,mags,errs))timeind = np.argsort(ptimes)mtimes = ptimes[timeind]mmags = modelmags[timeind]merrs = perrs[timeind]modeldict = {'':'','':{x:np.asscalar(y) for x,y in zip(['','','','',''],[period,epoch,pdepth,pduration,depthratio])},'':mtimes,'':mmags,'':merrs,'':period,'':pdepth,}return modeldict", "docstring": "This generates fake EB light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'period', 'pdepth', 'pduration', 'depthratio', 'secphase'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The variability epoch will be automatically chosen from a uniform\n distribution between `times.min()` and `times.max()`.\n\n The `pdepth` will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'EB',\n 'params': {'period': generated value of period,\n 'epoch': generated value of epoch,\n 'pdepth': generated value of priary eclipse depth,\n 'pduration': generated value of prim eclipse duration,\n 'depthratio': generated value of prim/sec eclipse\n depth ratio},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varperiod': the generated period of variability == 'period'\n 'varamplitude': the generated amplitude of\n variability == 'pdepth'}", "id": "f14691:m2"} {"signature": "def generate_flare_lightcurve(times,mags=None,errs=None,paramdists={'':sps.uniform(loc=,scale=),'':[,],'':sps.uniform(loc=, scale=),'':sps.uniform(loc=, scale=)},magsarefluxes=False,):", "body": "if mags is None:mags = np.full_like(times, )if errs is None:errs = np.full_like(times, )nflares = npr.randint(paramdists[''][],high=paramdists[''][])flarepeaktimes = (npr.random(size=nflares)*(times.max() - times.min()) + times.min())params = {'':nflares}for flareind, peaktime in zip(range(nflares), flarepeaktimes):amp = paramdists[''].rvs(size=)risestdev = paramdists[''].rvs(size=)decayconst = paramdists[''].rvs(size=)if magsarefluxes and amp < :amp = -ampelif not magsarefluxes and amp > :amp = -ampmodelmags, ptimes, pmags, perrs = (flares.flare_model([amp, peaktime, risestdev, decayconst],times,mags,errs))mags = modelmagsparams[flareind] = {'':peaktime,'':amp,'':risestdev,'':decayconst}modeldict = {'':'','':params,'':times,'':mags,'':errs,'':None,'':[params[x]['']for x in range(params[''])],}return modeldict", "docstring": "This generates fake flare light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'amplitude', 'nflares', 'risestdev', 'decayconst'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The `flare_peak_time` for each flare will be generated automatically\n between `times.min()` and `times.max()` using a uniform distribution.\n\n The `amplitude` will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'flare',\n 'params': {'amplitude': generated value of flare amplitudes,\n 'nflares': generated value of number of flares,\n 'risestdev': generated value of stdev of rise time,\n 'decayconst': generated value of decay constant,\n 'peaktime': generated value of flare peak time},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varamplitude': the generated amplitude of\n variability == 'amplitude'}", "id": "f14691:m3"} {"signature": "def generate_sinusoidal_lightcurve(times,mags=None,errs=None,paramdists={'':sps.uniform(loc=,scale=),'':[,],'':sps.uniform(loc=,scale=),'':,},magsarefluxes=False):", "body": "if mags is None:mags = np.full_like(times, )if errs is None:errs = np.full_like(times, )epoch = npr.random()*(times.max() - times.min()) + times.min()period = paramdists[''].rvs(size=)fourierorder = npr.randint(paramdists[''][],high=paramdists[''][])amplitude = paramdists[''].rvs(size=)if magsarefluxes and amplitude < :amplitude = -amplitudeelif not magsarefluxes and amplitude > :amplitude = -amplitudeampcomps = [abs(amplitude/)/float(x)for x in range(,fourierorder+)]phacomps = [paramdists['']*float(x)for x in range(,fourierorder+)]modelmags, phase, ptimes, pmags, perrs = sinusoidal.sine_series_sum([period, epoch, ampcomps, phacomps],times,mags,errs)timeind = np.argsort(ptimes)mtimes = ptimes[timeind]mmags = modelmags[timeind]merrs = perrs[timeind]mphase = phase[timeind]modeldict = {'':'','':{x:y for x,y in zip(['','','','','',''],[period,epoch,amplitude,fourierorder,ampcomps,phacomps])},'':mtimes,'':mmags,'':merrs,'':mphase,'':period,'':amplitude}return modeldict", "docstring": "This generates fake sinusoidal light curves.\n\n This can be used for a variety of sinusoidal variables, e.g. RRab, RRc,\n Cepheids, Miras, etc. The functions that generate these model LCs below\n implement the following table::\n\n ## FOURIER PARAMS FOR SINUSOIDAL VARIABLES\n #\n # type fourier period [days]\n # order dist limits dist\n\n # RRab 8 to 10 uniform 0.45--0.80 uniform\n # RRc 3 to 6 uniform 0.10--0.40 uniform\n # HADS 7 to 9 uniform 0.04--0.10 uniform\n # rotator 2 to 5 uniform 0.80--120.0 uniform\n # LPV 2 to 5 uniform 250--500.0 uniform\n\n FIXME: for better model LCs, figure out how scipy.signal.butter works and\n low-pass filter using scipy.signal.filtfilt.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'period', 'fourierorder', 'amplitude', 'phioffset'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The variability epoch will be automatically chosen from a uniform\n distribution between `times.min()` and `times.max()`.\n\n The `amplitude` will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'sinusoidal',\n 'params': {'period': generated value of period,\n 'epoch': generated value of epoch,\n 'amplitude': generated value of amplitude,\n 'fourierorder': generated value of fourier order,\n 'fourieramps': generated values of fourier amplitudes,\n 'fourierphases': generated values of fourier phases},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varperiod': the generated period of variability == 'period'\n 'varamplitude': the generated amplitude of\n variability == 'amplitude'}", "id": "f14691:m4"} {"signature": "def generate_rrab_lightcurve(times,mags=None,errs=None,paramdists={'':sps.uniform(loc=,scale=),'':[,],'':sps.uniform(loc=,scale=),'':np.pi,},magsarefluxes=False):", "body": "modeldict = generate_sinusoidal_lightcurve(times,mags=mags,errs=errs,paramdists=paramdists,magsarefluxes=magsarefluxes)modeldict[''] = ''return modeldict", "docstring": "This generates fake RRab light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'period', 'fourierorder', 'amplitude'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The variability epoch will be automatically chosen from a uniform\n distribution between `times.min()` and `times.max()`.\n\n The `amplitude` will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'RRab',\n 'params': {'period': generated value of period,\n 'epoch': generated value of epoch,\n 'amplitude': generated value of amplitude,\n 'fourierorder': generated value of fourier order,\n 'fourieramps': generated values of fourier amplitudes,\n 'fourierphases': generated values of fourier phases},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varperiod': the generated period of variability == 'period'\n 'varamplitude': the generated amplitude of\n variability == 'amplitude'}", "id": "f14691:m5"} {"signature": "def generate_rrc_lightcurve(times,mags=None,errs=None,paramdists={'':sps.uniform(loc=,scale=),'':[,],'':sps.uniform(loc=,scale=),'':*np.pi,},magsarefluxes=False):", "body": "modeldict = generate_sinusoidal_lightcurve(times,mags=mags,errs=errs,paramdists=paramdists,magsarefluxes=magsarefluxes)modeldict[''] = ''return modeldict", "docstring": "This generates fake RRc light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'period', 'fourierorder', 'amplitude'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The variability epoch will be automatically chosen from a uniform\n distribution between `times.min()` and `times.max()`.\n\n The `amplitude` will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'RRc',\n 'params': {'period': generated value of period,\n 'epoch': generated value of epoch,\n 'amplitude': generated value of amplitude,\n 'fourierorder': generated value of fourier order,\n 'fourieramps': generated values of fourier amplitudes,\n 'fourierphases': generated values of fourier phases},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varperiod': the generated period of variability == 'period'\n 'varamplitude': the generated amplitude of\n variability == 'amplitude'}", "id": "f14691:m6"} {"signature": "def generate_hads_lightcurve(times,mags=None,errs=None,paramdists={'':sps.uniform(loc=,scale=),'':[,],'':sps.uniform(loc=,scale=),'':np.pi,},magsarefluxes=False):", "body": "modeldict = generate_sinusoidal_lightcurve(times,mags=mags,errs=errs,paramdists=paramdists,magsarefluxes=magsarefluxes)modeldict[''] = ''return modeldict", "docstring": "This generates fake HADS light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'period', 'fourierorder', 'amplitude'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The variability epoch will be automatically chosen from a uniform\n distribution between `times.min()` and `times.max()`.\n\n The `amplitude` will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'HADS',\n 'params': {'period': generated value of period,\n 'epoch': generated value of epoch,\n 'amplitude': generated value of amplitude,\n 'fourierorder': generated value of fourier order,\n 'fourieramps': generated values of fourier amplitudes,\n 'fourierphases': generated values of fourier phases},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varperiod': the generated period of variability == 'period'\n 'varamplitude': the generated amplitude of\n variability == 'amplitude'}", "id": "f14691:m7"} {"signature": "def generate_rotator_lightcurve(times,mags=None,errs=None,paramdists={'':sps.uniform(loc=,scale=),'':[,],'':sps.uniform(loc=,scale=),'':*np.pi,},magsarefluxes=False):", "body": "modeldict = generate_sinusoidal_lightcurve(times,mags=mags,errs=errs,paramdists=paramdists,magsarefluxes=magsarefluxes)modeldict[''] = ''return modeldict", "docstring": "This generates fake rotator light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'period', 'fourierorder', 'amplitude'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The variability epoch will be automatically chosen from a uniform\n distribution between `times.min()` and `times.max()`.\n\n The `amplitude` will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'rotator',\n 'params': {'period': generated value of period,\n 'epoch': generated value of epoch,\n 'amplitude': generated value of amplitude,\n 'fourierorder': generated value of fourier order,\n 'fourieramps': generated values of fourier amplitudes,\n 'fourierphases': generated values of fourier phases},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varperiod': the generated period of variability == 'period'\n 'varamplitude': the generated amplitude of\n variability == 'amplitude'}", "id": "f14691:m8"} {"signature": "def generate_lpv_lightcurve(times,mags=None,errs=None,paramdists={'':sps.uniform(loc=,scale=),'':[,],'':sps.uniform(loc=,scale=),'':*np.pi,},magsarefluxes=False):", "body": "modeldict = generate_sinusoidal_lightcurve(times,mags=mags,errs=errs,paramdists=paramdists,magsarefluxes=magsarefluxes)modeldict[''] = ''return modeldict", "docstring": "This generates fake long-period-variable (LPV) light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'period', 'fourierorder', 'amplitude'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The variability epoch will be automatically chosen from a uniform\n distribution between `times.min()` and `times.max()`.\n\n The `amplitude` will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'LPV',\n 'params': {'period': generated value of period,\n 'epoch': generated value of epoch,\n 'amplitude': generated value of amplitude,\n 'fourierorder': generated value of fourier order,\n 'fourieramps': generated values of fourier amplitudes,\n 'fourierphases': generated values of fourier phases},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varperiod': the generated period of variability == 'period'\n 'varamplitude': the generated amplitude of\n variability == 'amplitude'}", "id": "f14691:m9"} {"signature": "def generate_cepheid_lightcurve(times,mags=None,errs=None,paramdists={'':sps.uniform(loc=,scale=),'':[,],'':sps.uniform(loc=,scale=),'':np.pi,},magsarefluxes=False):", "body": "modeldict = generate_sinusoidal_lightcurve(times,mags=mags,errs=errs,paramdists=paramdists,magsarefluxes=magsarefluxes)modeldict[''] = ''return modeldict", "docstring": "This generates fake Cepheid light curves.\n\n Parameters\n ----------\n\n times : np.array\n This is an array of time values that will be used as the time base.\n\n mags,errs : np.array\n These arrays will have the model added to them. If either is\n None, `np.full_like(times, 0.0)` will used as a substitute and the model\n light curve will be centered around 0.0.\n\n paramdists : dict\n This is a dict containing parameter distributions to use for the\n model params, containing the following keys ::\n\n {'period', 'fourierorder', 'amplitude'}\n\n The values of these keys should all be 'frozen' scipy.stats distribution\n objects, e.g.:\n\n https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n The variability epoch will be automatically chosen from a uniform\n distribution between `times.min()` and `times.max()`.\n\n The `amplitude` will be flipped automatically as appropriate if\n `magsarefluxes=True`.\n\n magsarefluxes : bool\n If the generated time series is meant to be a flux time-series, set this\n to True to get the correct sign of variability amplitude.\n\n Returns\n -------\n\n dict\n A dict of the form below is returned::\n\n {'vartype': 'cepheid',\n 'params': {'period': generated value of period,\n 'epoch': generated value of epoch,\n 'amplitude': generated value of amplitude,\n 'fourierorder': generated value of fourier order,\n 'fourieramps': generated values of fourier amplitudes,\n 'fourierphases': generated values of fourier phases},\n 'times': the model times,\n 'mags': the model mags,\n 'errs': the model errs,\n 'varperiod': the generated period of variability == 'period'\n 'varamplitude': the generated amplitude of\n variability == 'amplitude'}", "id": "f14691:m10"} {"signature": "def make_fakelc(lcfile,outdir,magrms=None,randomizemags=True,randomizecoords=False,lcformat='',lcformatdir=None,timecols=None,magcols=None,errcols=None):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(fileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif timecols is None:timecols = dtimecolsif magcols is None:magcols = dmagcolsif errcols is None:errcols = derrcolslcdict = readerfunc(lcfile)if isinstance(lcdict, tuple) and isinstance(lcdict[],dict):lcdict = lcdict[]fakeobjectid = sha512(npr.bytes()).hexdigest()[-:]fakelcdict = {'':fakeobjectid,'':{'':fakeobjectid},'':[],'':{},'':lcformat,}if ('' in lcdict andisinstance(lcdict[''], dict)):objectinfo = lcdict['']if (not randomizecoords and '' in objectinfo andobjectinfo[''] is not None andnp.isfinite(objectinfo[''])):fakelcdict[''][''] = objectinfo['']else:LOGWARNING('' % lcfile)fakelcdict[''][''] = npr.random()*if (not randomizecoords and '' in objectinfo andobjectinfo[''] is not None andnp.isfinite(objectinfo[''])):fakelcdict[''][''] = objectinfo['']else:LOGWARNING('' % lcfile)fakelcdict[''][''] = npr.random()* - if ((not randomizemags) and '' in objectinfo andobjectinfo[''] is not None andnp.isfinite(objectinfo[''])):fakelcdict[''][''] = objectinfo['']elif ((not randomizemags) and ('' in objectinfo andobjectinfo[''] is not None andnp.isfinite(objectinfo[''])) and('' in objectinfo andobjectinfo[''] is not None andnp.isfinite(objectinfo[''])) and('' in objectinfo andobjectinfo[''] is not None andnp.isfinite(objectinfo['']))):LOGWARNING('' %lcfile)fakelcdict[''][''] = jhk_to_sdssr(objectinfo[''],objectinfo[''],objectinfo[''])elif randomizemags and magrms:LOGWARNING('''' % lcfile)magbins = magrms[magcols[]]['']binprobs = magrms[magcols[]]['']magbincenter = npr.choice(magbins,size=,p=binprobs)chosenmag = (npr.random()*((magbincenter+) - (magbincenter-)) +(magbincenter-))fakelcdict[''][''] = np.asscalar(chosenmag)else:LOGWARNING('''' % lcfile)fakelcdict[''][''] = npr.random()* + else:LOGWARNING('''' %lcfile)fakelcdict[''][''] = npr.random()*fakelcdict[''][''] = npr.random()* - fakelcdict[''][''] = npr.random()* + for tcind, tcol in enumerate(timecols):if '' in tcol:tcolget = tcol.split('')else:tcolget = [tcol]if tcol not in fakelcdict:fakelcdict[tcol] = _dict_get(lcdict, tcolget)fakelcdict[''].append(tcol)if tcind == :fakelcdict[''][''] = fakelcdict[tcol].sizefor mcol in magcols:if '' in mcol:mcolget = mcol.split('')else:mcolget = [mcol]if mcol not in fakelcdict:measuredmags = _dict_get(lcdict, mcolget)measuredmags = measuredmags[np.isfinite(measuredmags)]if (randomizemags and magrms and mcol in magrms and'' in magrms[mcol] andmagrms[mcol][''] is not None):interpfunc = magrms[mcol]['']lcmad = interpfunc(fakelcdict[''][''])fakelcdict[''][mcol] = {'': fakelcdict[''][''],'': lcmad}else:if measuredmags.size > :measuredmedian = np.median(measuredmags)measuredmad = np.median(np.abs(measuredmags - measuredmedian))fakelcdict[''][mcol] = {'':measuredmedian,'':measuredmad}else:if (magrms and mcol in magrms and'' in magrms[mcol] andmagrms[mcol][''] is not None):LOGWARNING('''''''' % lcfile)interpfunc = magrms[mcol]['']lcmad = interpfunc(fakelcdict[''][''])fakelcdict[''][mcol] = {'': fakelcdict[''][''],'': lcmad}else:LOGWARNING(''''''''% lcfile)fakelcdict[''][mcol] = {'':fakelcdict[''][''],'':npr.random()*( - ) + }fakelcdict[mcol] = np.full_like(_dict_get(lcdict, mcolget), )fakelcdict[''].append(mcol)for mcol, ecol in zip(magcols, errcols):if '' in ecol:ecolget = ecol.split('')else:ecolget = [ecol]if ecol not in fakelcdict:measurederrs = _dict_get(lcdict, ecolget)measurederrs = measurederrs[np.isfinite(measurederrs)]if (randomizemags and magrms and mcol in magrms and'' in magrms[mcol] andmagrms[mcol][''] is not None):interpfunc = magrms[mcol]['']lcmad = interpfunc(fakelcdict[''][''])fakelcdict[''][ecol] = {'': lcmad,'': *lcmad}else:if measurederrs.size > :measuredmedian = np.median(measurederrs)measuredmad = np.median(np.abs(measurederrs - measuredmedian))fakelcdict[''][ecol] = {'':measuredmedian,'':measuredmad}else:if (magrms and mcol in magrms and'' in magrms[mcol] andmagrms[mcol][''] is not None):LOGWARNING('''''''' % lcfile)interpfunc = magrms[mcol]['']lcmad = interpfunc(fakelcdict[''][''])fakelcdict[''][ecol] = {'': lcmad,'': *lcmad}else:LOGWARNING('''''''' % lcfile)fakelcdict[''][ecol] = {'':npr.random()*( - ) + ,'':npr.random()*( - ) + }fakelcdict[ecol] = np.full_like(_dict_get(lcdict, ecolget), )fakelcdict[''].append(ecol)fakelcdict[''] = timecolsfakelcdict[''] = magcolsfakelcdict[''] = errcolsfakelcfname = '' % fakelcdict['']fakelcfpath = os.path.abspath(os.path.join(outdir, fakelcfname))with open(fakelcfpath,'') as outfd:pickle.dump(fakelcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)LOGINFO('' % (lcfile, fakelcfpath))return (fakelcfpath, fakelcdict[''],fakelcdict[''], fakelcdict[''])", "docstring": "This preprocesses an input real LC and sets it up to be a fake LC.\n\n Parameters\n ----------\n\n lcfile : str\n This is an input light curve file that will be used to copy over the\n time-base. This will be used to generate the time-base for fake light\n curves to provide a realistic simulation of the observing window\n function.\n\n outdir : str\n The output directory where the the fake light curve will be written.\n\n magrms : dict\n This is a dict containing the SDSS r mag-RMS (SDSS rmag-MAD preferably)\n relation based on all light curves that the input lcfile is from. This\n will be used to generate the median mag and noise corresponding to the\n magnitude chosen for this fake LC.\n\n randomizemags : bool\n If this is True, then a random mag between the first and last magbin in\n magrms will be chosen as the median mag for this light curve. This\n choice will be weighted by the mag bin probability obtained from the\n magrms kwarg. Otherwise, the median mag will be taken from the input\n lcfile's lcdict['objectinfo']['sdssr'] key or a transformed SDSS r mag\n generated from the input lcfile's lcdict['objectinfo']['jmag'],\n ['hmag'], and ['kmag'] keys. The magrms relation for each magcol will be\n used to generate Gaussian noise at the correct level for the magbin this\n light curve's median mag falls into.\n\n randomizecoords : bool\n If this is True, will randomize the RA, DEC of the output fake object\n and not copy over the RA/DEC from the real input object.\n\n lcformat : str\n This is the `formatkey` associated with your input real light curve\n format, which you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curve specified in `lcfile`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols : list of str or None\n The timecol keys to use from the input lcdict in generating the fake\n light curve. Fake LCs will be generated for each each\n timecol/magcol/errcol combination in the input light curve.\n\n magcols : list of str or None\n The magcol keys to use from the input lcdict in generating the fake\n light curve. Fake LCs will be generated for each each\n timecol/magcol/errcol combination in the input light curve.\n\n errcols : list of str or None\n The errcol keys to use from the input lcdict in generating the fake\n light curve. Fake LCs will be generated for each each\n timecol/magcol/errcol combination in the input light curve.\n\n Returns\n -------\n\n tuple\n A tuple of the following form is returned::\n\n (fakelc_fpath,\n fakelc_lcdict['columns'],\n fakelc_lcdict['objectinfo'],\n fakelc_lcdict['moments'])", "id": "f14691:m11"} {"signature": "def collection_worker(task):", "body": "lcfile, outdir, kwargs = tasktry:fakelcresults = make_fakelc(lcfile,outdir,**kwargs)return fakelcresultsexcept Exception as e:LOGEXCEPTION('' % lcfile)return None", "docstring": "This wraps `process_fakelc` for `make_fakelc_collection` below.\n\nParameters\n----------\n\ntask : tuple\n This is of the form::\n\n task[0] = lcfile\n task[1] = outdir\n task[2] = magrms\n task[3] = dict with keys: {'lcformat', 'timecols', 'magcols',\n 'errcols', 'randomizeinfo'}\n\nReturns\n-------\n\ntuple\n This returns a tuple of the form::\n\n (fakelc_fpath,\n fakelc_lcdict['columns'],\n fakelc_lcdict['objectinfo'],\n fakelc_lcdict['moments'])", "id": "f14691:m12"} {"signature": "def make_fakelc_collection(lclist,simbasedir,magrmsfrom,magrms_interpolate='',magrms_fillvalue='',maxlcs=,maxvars=,randomizemags=True,randomizecoords=False,vartypes=('','','','','','','','',''),lcformat='',lcformatdir=None,timecols=None,magcols=None,errcols=None):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(fileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif timecols is None:timecols = dtimecolsif magcols is None:magcols = dmagcolsif errcols is None:errcols = derrcolsif not isinstance(lclist, np.ndarray):lclist = np.array(lclist)chosenlcs = npr.choice(lclist, maxlcs, replace=False)fakelcdir = os.path.join(simbasedir, '')if not os.path.exists(fakelcdir):os.makedirs(fakelcdir)if isinstance(magrmsfrom, str) and os.path.exists(magrmsfrom):with open(magrmsfrom,'') as infd:xmagrms = pickle.load(infd)elif isinstance(magrmsfrom, dict):xmagrms = magrmsfrommagrms = {}for magcol in magcols:if (magcol in xmagrms and'' in xmagrms[magcol] and'' in xmagrms[magcol]):magrms[magcol] = {'':np.array(xmagrms[magcol]['']),'':np.array(xmagrms[magcol]['']),}interpolated_magmad = spi.interp1d(xmagrms[magcol][''],xmagrms[magcol][''],kind=magrms_interpolate,fill_value=magrms_fillvalue,)magrms[magcol][''] = interpolated_magmadmagbins = np.array(xmagrms[magcol][''])bincounts = np.array(xmagrms[magcol][''])binprobs = bincounts/np.sum(bincounts)magrms[magcol][''] = binprobselse:LOGWARNING('''' % magcol)magrms[magcol] = {'':None,'':None,'':None,'':None,}tasks = [(x, fakelcdir, {'':lcformat,'':timecols,'':magcols,'':errcols,'':magrms,'':randomizemags,'':randomizecoords})for x in chosenlcs]fakeresults = [collection_worker(task) for task in tasks]fakedb = {'':simbasedir,'':lcformat,'':timecols,'':magcols,'':errcols,'':magsarefluxes}fobjects, fpaths = [], []fras, fdecls, fndets = [], [], []fmags, fmagmads = [], []ferrmeds, ferrmads = [], []totalvars = isvariableind = npr.randint(,high=len(fakeresults), size=maxvars)isvariable = np.full(len(fakeresults), False, dtype=np.bool)isvariable[isvariableind] = Truefakedb[''] = isvariableLOGINFO('' % maxvars)vartypeind = npr.randint(,high=len(vartypes), size=maxvars)vartypearr = np.array([vartypes[x] for x in vartypeind])fakedb[''] = vartypearrfor vt in sorted(vartypes):LOGINFO('' % (vt, vartypearr[vartypearr == vt].size))LOGINFO('')for fr in fakeresults:if fr is not None:fpath, fcols, finfo, fmoments = frfobjects.append(finfo[''])fpaths.append(fpath)fras.append(finfo[''])fdecls.append(finfo[''])fndets.append(finfo[''])fmags.append(finfo[''])fmagmads.append([fmoments[x][''] for x in magcols])ferrmeds.append([fmoments[x][''] for x in errcols])ferrmads.append([fmoments[x][''] for x in errcols])fobjects = np.array(fobjects)fpaths = np.array(fpaths)fras = np.array(fras)fdecls = np.array(fdecls)fndets = np.array(fndets)fmags = np.array(fmags)fmagmads = np.array(fmagmads)ferrmeds = np.array(ferrmeds)ferrmads = np.array(ferrmads)fakedb[''] = fobjectsfakedb[''] = fpathsfakedb[''] = frasfakedb[''] = fdeclsfakedb[''] = fndetsfakedb[''] = fmagsfakedb[''] = fmagmadsfakedb[''] = ferrmedsfakedb[''] = ferrmadsfakedb[''] = magrmsdboutfname = os.path.join(simbasedir,'')with open(dboutfname, '') as outfd:pickle.dump(fakedb, outfd)LOGINFO('' % (len(fakeresults), simbasedir))LOGINFO('' % dboutfname)return dboutfname", "docstring": "This prepares light curves for the recovery sim.\n\n Collects light curves from `lclist` using a uniform sampling among\n them. Copies them to the `simbasedir`, zeroes out their mags and errs but\n keeps their time bases, also keeps their RMS and median mags for later\n use. Calculates the mag-rms relation for the entire collection and writes\n that to the `simbasedir` as well.\n\n The purpose of this function is to copy over the time base and mag-rms\n relation of an existing light curve collection to use it as the basis for a\n variability recovery simulation.\n\n This returns a pickle written to the `simbasedir` that contains all the\n information for the chosen ensemble of fake light curves and writes all\n generated light curves to the `simbasedir/lightcurves` directory. Run the\n `add_variability_to_fakelc_collection` function after this function to add\n variability of the specified type to these generated light curves.\n\n Parameters\n ----------\n\n lclist : list of str\n This is a list of existing project light curves. This can be generated\n from :py:func:`astrobase.lcproc.catalogs.make_lclist` or similar.\n\n simbasedir : str\n This is the directory to where the fake light curves and their\n information will be copied to.\n\n magrmsfrom : str or dict\n This is used to generate magnitudes and RMSes for the objects in the\n output collection of fake light curves. This arg is either a string\n pointing to an existing pickle file that must contain a dict or a dict\n variable that MUST have the following key-vals at a minimum::\n\n {'': {\n 'binned_sdssr_median': array of median mags for each magbin\n 'binned_lcmad_median': array of LC MAD values per magbin\n },\n '': {\n 'binned_sdssr_median': array of median mags for each magbin\n 'binned_lcmad_median': array of LC MAD values per magbin\n },\n .\n .\n ...}\n\n where `magcol1_name`, etc. are the same as the `magcols` listed in the\n magcols kwarg (or the default magcols for the specified\n lcformat). Examples of the magrmsfrom dict (or pickle) required can be\n generated by the\n :py:func:`astrobase.lcproc.varthreshold.variability_threshold` function.\n\n magrms_interpolate,magrms_fillvalue : str\n These are arguments that will be passed directly to the\n scipy.interpolate.interp1d function to generate interpolating functions\n for the mag-RMS relation. See:\n\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html\n\n for details.\n\n maxlcs : int\n This is the total number of light curves to choose from `lclist` and\n generate as fake LCs.\n\n maxvars : int\n This is the total number of fake light curves that will be marked as\n variable.\n\n vartypes : list of str\n This is a list of variable types to put into the collection. The\n vartypes for each fake variable star will be chosen uniformly from this\n list.\n\n lcformat : str\n This is the `formatkey` associated with your input real light curves'\n format, which you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `lclist`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols : list of str or None\n The timecol keys to use from the input lcdict in generating the fake\n light curve. Fake LCs will be generated for each each\n timecol/magcol/errcol combination in the input light curves.\n\n magcols : list of str or None\n The magcol keys to use from the input lcdict in generating the fake\n light curve. Fake LCs will be generated for each each\n timecol/magcol/errcol combination in the input light curves.\n\n errcols : list of str or None\n The errcol keys to use from the input lcdict in generating the fake\n light curve. Fake LCs will be generated for each each\n timecol/magcol/errcol combination in the input light curves.\n\n Returns\n -------\n\n str\n Returns the string file name of a pickle containing all of the\n information for the fake LC collection that has been generated.", "id": "f14691:m13"} {"signature": "def add_fakelc_variability(fakelcfile,vartype,override_paramdists=None,magsarefluxes=False,overwrite=False):", "body": "lcdict = _read_pklc(fakelcfile)if ('' in lcdict and'' in lcdict andnot overwrite):LOGERROR('''''' %(fakelcfile, lcdict[''],repr(lcdict[''])))return Nonetimecols, magcols, errcols = (lcdict[''],lcdict[''],lcdict[''])if vartype in VARTYPE_LCGEN_MAP:vargenfunc = VARTYPE_LCGEN_MAP[vartype]elif vartype is None:vargenfunc = Noneelse:LOGERROR('' %(vartype, repr(list(VARTYPE_LCGEN_MAP.keys()))))return Noneif vargenfunc is not None:if (override_paramdists is not None andisinstance(override_paramdists,dict)):variablelc = vargenfunc(lcdict[timecols[]],paramdists=override_paramdists,magsarefluxes=magsarefluxes)else:variablelc = vargenfunc(lcdict[timecols[]],magsarefluxes=magsarefluxes)else:variablelc = {'':None,'':None,'':lcdict[timecols[]],'':np.full_like(lcdict[timecols[]], ),'':np.full_like(lcdict[timecols[]], )}for tcol, mcol, ecol in zip(timecols, magcols, errcols):times, mags, errs = lcdict[tcol], lcdict[mcol], lcdict[ecol]mag_median = lcdict[''][mcol]['']mag_mad = lcdict[''][mcol]['']mag_rms = mag_mad*err_median = lcdict[''][ecol]['']err_mad = lcdict[''][ecol]['']err_rms = err_mad*magnoise = npr.normal(size=variablelc[''].size)*mag_rmserrnoise = npr.normal(size=variablelc[''].size)*err_rmsfinalmags = mag_median + (variablelc[''] + magnoise)finalerrs = err_median + (variablelc[''] + errnoise)lcdict[mcol] = finalmagslcdict[ecol] = finalerrslcdict[''] = variablelc['']lcdict[''] = variablelc['']if vartype is not None:lcdict[''] = variablelc['']lcdict[''] = variablelc['']else:lcdict[''] = np.nanlcdict[''] = np.nantempoutf = '' % (fakelcfile, md5(npr.bytes()).hexdigest()[-:])with open(tempoutf, '') as outfd:pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL)if os.path.exists(tempoutf):shutil.copy(tempoutf, fakelcfile)os.remove(tempoutf)else:LOGEXCEPTION('' %os.path.dirname(tempoutf))raiseLOGINFO('' % (lcdict[''],vartype,fakelcfile))return {'':lcdict[''],'':fakelcfile,'':vartype,'':lcdict['']}", "docstring": "This adds variability of the specified type to the fake LC.\n\n The procedure is (for each `magcol`):\n\n - read the fakelcfile, get the stored moments and vartype info\n\n - add the periodic variability specified in vartype and varparamdists. if\n `vartype == None`, then do nothing in this step. If `override_vartype` is\n not None, override stored vartype with specified vartype. If\n `override_varparamdists` provided, override with specified\n `varparamdists`. NOTE: the varparamdists must make sense for the vartype,\n otherwise, weird stuff will happen.\n\n - add the median mag level stored in `fakelcfile` to the time series\n\n - add Gaussian noise to the light curve as specified in `fakelcfile`\n\n - add a varinfo key and dict to the lcdict with `varperiod`, `varepoch`,\n `varparams`\n\n - write back to fake LC pickle\n\n - return the `varinfo` dict to the caller\n\n Parameters\n ----------\n\n fakelcfile : str\n The name of the fake LC file to process.\n\n vartype : str\n The type of variability to add to this fake LC file.\n\n override_paramdists : dict\n A parameter distribution dict as in the `generate_XX_lightcurve`\n functions above. If provided, will override the distribution stored in\n the input fake LC file itself.\n\n magsarefluxes : bool\n Sets if the variability amplitude is in fluxes and not magnitudes.\n\n overwite : bool\n This overwrites the input fake LC file with a new variable LC even if\n it's been processed before.\n\n Returns\n -------\n\n dict\n A dict of the following form is returned::\n\n {'objectid':lcdict['objectid'],\n 'lcfname':fakelcfile,\n 'actual_vartype':vartype,\n 'actual_varparams':lcdict['actual_varparams']}", "id": "f14691:m14"} {"signature": "def add_variability_to_fakelc_collection(simbasedir,override_paramdists=None,overwrite_existingvar=False):", "body": "infof = os.path.join(simbasedir,'')with open(infof, '') as infd:lcinfo = pickle.load(infd)lclist = lcinfo['']varflag = lcinfo['']vartypes = lcinfo['']vartind = varinfo = {}for lc, varf, _lcind in zip(lclist, varflag, range(len(lclist))):if varf:thisvartype = vartypes[vartind]if (override_paramdists andisinstance(override_paramdists, dict) andthisvartype in override_paramdists andisinstance(override_paramdists[thisvartype], dict)):thisoverride_paramdists = override_paramdists[thisvartype]else:thisoverride_paramdists = Nonevarlc = add_fakelc_variability(lc, thisvartype,override_paramdists=thisoverride_paramdists,overwrite=overwrite_existingvar)varinfo[varlc['']] = {'': varlc[''],'': varlc['']}vartind = vartind + else:varlc = add_fakelc_variability(lc, None,overwrite=overwrite_existingvar)varinfo[varlc['']] = {'': varlc[''],'': varlc['']}lcinfo[''] = varinfotempoutf = '' % (infof, md5(npr.bytes()).hexdigest()[-:])with open(tempoutf, '') as outfd:pickle.dump(lcinfo, outfd, pickle.HIGHEST_PROTOCOL)if os.path.exists(tempoutf):shutil.copy(tempoutf, infof)os.remove(tempoutf)else:LOGEXCEPTION('' %os.path.dirname(tempoutf))raisereturn lcinfo", "docstring": "This adds variability and noise to all fake LCs in `simbasedir`.\n\n If an object is marked as variable in the `fakelcs-info`.pkl file in\n `simbasedir`, a variable signal will be added to its light curve based on\n its selected type, default period and amplitude distribution, the\n appropriate params, etc. the epochs for each variable object will be chosen\n uniformly from its time-range (and may not necessarily fall on a actual\n observed time). Nonvariable objects will only have noise added as determined\n by their params, but no variable signal will be added.\n\n Parameters\n ----------\n\n simbasedir : str\n The directory containing the fake LCs to process.\n\n override_paramdists : dict\n This can be used to override the stored variable parameters in each fake\n LC. It should be a dict of the following form::\n\n {'': {': a scipy.stats distribution function or\n the np.random.randint function,\n .\n .\n .\n ': a scipy.stats distribution function\n or the np.random.randint function}\n\n for any vartype in VARTYPE_LCGEN_MAP. These are used to override the\n default parameter distributions for each variable type.\n\n overwrite_existingvar : bool\n If this is True, then will overwrite any existing variability in the\n input fake LCs in `simbasedir`.\n\n Returns\n -------\n\n dict\n This returns a dict containing the fake LC filenames as keys and\n variability info for each as values.", "id": "f14691:m15"} {"signature": "def read_fakelc(fakelcfile):", "body": "try:with open(fakelcfile,'') as infd:lcdict = pickle.load(infd)except UnicodeDecodeError:with open(fakelcfile,'') as infd:lcdict = pickle.load(infd, encoding='')return lcdict", "docstring": "This just reads a pickled fake LC.\n\nParameters\n----------\n\nfakelcfile : str\n The fake LC file to read.\n\nReturns\n-------\n\ndict\n This returns an lcdict.", "id": "f14692:m1"} {"signature": "def get_varfeatures(simbasedir,mindet=,nworkers=None):", "body": "with open(os.path.join(simbasedir, ''),'') as infd:siminfo = pickle.load(infd)lcfpaths = siminfo['']varfeaturedir = os.path.join(simbasedir,'')timecols = siminfo['']magcols = siminfo['']errcols = siminfo['']timecols = siminfo['']magcols = siminfo['']errcols = siminfo['']fakelc_formatkey = '' % siminfo['']lcproc.register_lcformat(fakelc_formatkey,'',timecols,magcols,errcols,'','',magsarefluxes=siminfo[''])varinfo = lcvfeatures.parallel_varfeatures(lcfpaths,varfeaturedir,lcformat=fakelc_formatkey,mindet=mindet,nworkers=nworkers)with open(os.path.join(simbasedir,''),'') as outfd:pickle.dump(varinfo, outfd, pickle.HIGHEST_PROTOCOL)return os.path.join(simbasedir,'')", "docstring": "This runs `lcproc.lcvfeatures.parallel_varfeatures` on fake LCs in\n `simbasedir`.\n\n Parameters\n ----------\n\n simbasedir : str\n The directory containing the fake LCs to process.\n\n mindet : int\n The minimum number of detections needed to accept an LC and process it.\n\n nworkers : int or None\n The number of parallel workers to use when extracting variability\n features from the input light curves.\n\n Returns\n -------\n\n str\n The path to the `varfeatures` pickle created after running the\n `lcproc.lcvfeatures.parallel_varfeatures` function.", "id": "f14692:m2"} {"signature": "def precision(ntp, nfp):", "body": "if (ntp+nfp) > :return ntp/(ntp+nfp)else:return np.nan", "docstring": "This calculates precision.\n\nhttps://en.wikipedia.org/wiki/Precision_and_recall\n\nParameters\n----------\n\nntp : int\n The number of true positives.\n\nnfp : int\n The number of false positives.\n\nReturns\n-------\n\nfloat\n The precision calculated using `ntp/(ntp + nfp)`.", "id": "f14692:m3"} {"signature": "def recall(ntp, nfn):", "body": "if (ntp+nfn) > :return ntp/(ntp+nfn)else:return np.nan", "docstring": "This calculates recall.\n\nhttps://en.wikipedia.org/wiki/Precision_and_recall\n\nParameters\n----------\n\nntp : int\n The number of true positives.\n\nnfn : int\n The number of false negatives.\n\nReturns\n-------\n\nfloat\n The precision calculated using `ntp/(ntp + nfn)`.", "id": "f14692:m4"} {"signature": "def matthews_correl_coeff(ntp, ntn, nfp, nfn):", "body": "mcc_top = (ntp*ntn - nfp*nfn)mcc_bot = msqrt((ntp + nfp)*(ntp + nfn)*(ntn + nfp)*(ntn + nfn))if mcc_bot > :return mcc_top/mcc_botelse:return np.nan", "docstring": "This calculates the Matthews correlation coefficent.\n\nhttps://en.wikipedia.org/wiki/Matthews_correlation_coefficient\n\nParameters\n----------\n\nntp : int\n The number of true positives.\n\nntn : int\n The number of true negatives\n\nnfp : int\n The number of false positives.\n\nnfn : int\n The number of false negatives.\n\nReturns\n-------\n\nfloat\n The Matthews correlation coefficient.", "id": "f14692:m5"} {"signature": "def get_recovered_variables_for_magbin(simbasedir,magbinmedian,stetson_stdev_min=,inveta_stdev_min=,iqr_stdev_min=,statsonly=True):", "body": "with open(os.path.join(simbasedir, ''),'') as infd:siminfo = pickle.load(infd)objectids = siminfo['']varflags = siminfo['']sdssr = siminfo['']timecols = siminfo['']magcols = siminfo['']errcols = siminfo['']fakelc_formatkey = '' % siminfo['']lcproc.register_lcformat(fakelc_formatkey,'',timecols,magcols,errcols,'','',magsarefluxes=siminfo[''])outdir = os.path.join(simbasedir, '')if not os.path.exists(outdir):os.mkdir(outdir)varfeaturedir = os.path.join(simbasedir, '')varthreshinfof = os.path.join(outdir,'' % (magbinmedian,stetson_stdev_min,inveta_stdev_min))varthresh = varthreshold.variability_threshold(varfeaturedir,varthreshinfof,lcformat=fakelc_formatkey,min_stetj_stdev=stetson_stdev_min,min_inveta_stdev=inveta_stdev_min,min_iqr_stdev=iqr_stdev_min,verbose=False)magbins = varthresh['']magbininds = np.digitize(sdssr, magbins)binned_objectids = []binned_actualvars = []binned_actualnotvars = []for mbinind, _magi in zip(np.unique(magbininds),range(len(magbins)-)):thisbinind = np.where(magbininds == mbinind)thisbin_objectids = objectids[thisbinind]thisbin_varflags = varflags[thisbinind]thisbin_actualvars = thisbin_objectids[thisbin_varflags]thisbin_actualnotvars = thisbin_objectids[~thisbin_varflags]binned_objectids.append(thisbin_objectids)binned_actualvars.append(thisbin_actualvars)binned_actualnotvars.append(thisbin_actualnotvars)recdict = {'':simbasedir,'':timecols,'':magcols,'':errcols,'':siminfo[''],'':stetson_stdev_min,'':inveta_stdev_min,'':iqr_stdev_min,'':magbinmedian,}for magcol in magcols:magbinind = np.where(np.array(varthresh[magcol]['']) == magbinmedian)magbinind = np.asscalar(magbinind[])thisbin_objectids = binned_objectids[magbinind]thisbin_actualvars = binned_actualvars[magbinind]thisbin_actualnotvars = binned_actualnotvars[magbinind]stet_recoveredvars = varthresh[magcol][''][magbinind]stet_recoverednotvars = np.setdiff1d(thisbin_objectids,stet_recoveredvars)stet_truepositives = np.intersect1d(stet_recoveredvars,thisbin_actualvars)stet_falsepositives = np.intersect1d(stet_recoveredvars,thisbin_actualnotvars)stet_truenegatives = np.intersect1d(stet_recoverednotvars,thisbin_actualnotvars)stet_falsenegatives = np.intersect1d(stet_recoverednotvars,thisbin_actualvars)stet_recall = recall(stet_truepositives.size,stet_falsenegatives.size)stet_precision = precision(stet_truepositives.size,stet_falsepositives.size)stet_mcc = matthews_correl_coeff(stet_truepositives.size,stet_truenegatives.size,stet_falsepositives.size,stet_falsenegatives.size)inveta_recoveredvars = varthresh[magcol][''][magbinind]inveta_recoverednotvars = np.setdiff1d(thisbin_objectids,inveta_recoveredvars)inveta_truepositives = np.intersect1d(inveta_recoveredvars,thisbin_actualvars)inveta_falsepositives = np.intersect1d(inveta_recoveredvars,thisbin_actualnotvars)inveta_truenegatives = np.intersect1d(inveta_recoverednotvars,thisbin_actualnotvars)inveta_falsenegatives = np.intersect1d(inveta_recoverednotvars,thisbin_actualvars)inveta_recall = recall(inveta_truepositives.size,inveta_falsenegatives.size)inveta_precision = precision(inveta_truepositives.size,inveta_falsepositives.size)inveta_mcc = matthews_correl_coeff(inveta_truepositives.size,inveta_truenegatives.size,inveta_falsepositives.size,inveta_falsenegatives.size)iqr_recoveredvars = varthresh[magcol][''][magbinind]iqr_recoverednotvars = np.setdiff1d(thisbin_objectids,iqr_recoveredvars)iqr_truepositives = np.intersect1d(iqr_recoveredvars,thisbin_actualvars)iqr_falsepositives = np.intersect1d(iqr_recoveredvars,thisbin_actualnotvars)iqr_truenegatives = np.intersect1d(iqr_recoverednotvars,thisbin_actualnotvars)iqr_falsenegatives = np.intersect1d(iqr_recoverednotvars,thisbin_actualvars)iqr_recall = recall(iqr_truepositives.size,iqr_falsenegatives.size)iqr_precision = precision(iqr_truepositives.size,iqr_falsepositives.size)iqr_mcc = matthews_correl_coeff(iqr_truepositives.size,iqr_truenegatives.size,iqr_falsepositives.size,iqr_falsenegatives.size)stet_missed_inveta_found = np.setdiff1d(inveta_truepositives,stet_truepositives)stet_missed_iqr_found = np.setdiff1d(iqr_truepositives,stet_truepositives)inveta_missed_stet_found = np.setdiff1d(stet_truepositives,inveta_truepositives)inveta_missed_iqr_found = np.setdiff1d(iqr_truepositives,inveta_truepositives)iqr_missed_stet_found = np.setdiff1d(stet_truepositives,iqr_truepositives)iqr_missed_inveta_found = np.setdiff1d(inveta_truepositives,iqr_truepositives)if not statsonly:recdict[magcol] = {'':stet_recoveredvars,'':stet_truepositives,'':stet_falsepositives,'':stet_truenegatives,'':stet_falsenegatives,'':stet_precision,'':stet_recall,'':stet_mcc,'':inveta_recoveredvars,'':inveta_truepositives,'':inveta_falsepositives,'':inveta_truenegatives,'':inveta_falsenegatives,'':inveta_precision,'':inveta_recall,'':inveta_mcc,'':iqr_recoveredvars,'':iqr_truepositives,'':iqr_falsepositives,'':iqr_truenegatives,'':iqr_falsenegatives,'':iqr_precision,'':iqr_recall,'':iqr_mcc,'':stet_missed_inveta_found,'':stet_missed_iqr_found,'':inveta_missed_stet_found,'':inveta_missed_iqr_found,'':iqr_missed_stet_found,'':iqr_missed_inveta_found,'':thisbin_actualvars,'':thisbin_actualnotvars,'':thisbin_objectids,'':magbinind,}else:recdict[magcol] = {'':stet_recoveredvars.size,'':stet_truepositives.size,'':stet_falsepositives.size,'':stet_truenegatives.size,'':stet_falsenegatives.size,'':stet_precision,'':stet_recall,'':stet_mcc,'':inveta_recoveredvars.size,'':inveta_truepositives.size,'':inveta_falsepositives.size,'':inveta_truenegatives.size,'':inveta_falsenegatives.size,'':inveta_precision,'':inveta_recall,'':inveta_mcc,'':iqr_recoveredvars.size,'':iqr_truepositives.size,'':iqr_falsepositives.size,'':iqr_truenegatives.size,'':iqr_falsenegatives.size,'':iqr_precision,'':iqr_recall,'':iqr_mcc,'':stet_missed_inveta_found.size,'':stet_missed_iqr_found.size,'':inveta_missed_stet_found.size,'':inveta_missed_iqr_found.size,'':iqr_missed_stet_found.size,'':iqr_missed_inveta_found.size,'':thisbin_actualvars.size,'':thisbin_actualnotvars.size,'':thisbin_objectids.size,'':magbinind,}return recdict", "docstring": "This runs variability selection for the given magbinmedian.\n\n To generate a full recovery matrix over all magnitude bins, run this\n function for each magbin over the specified stetson_stdev_min and\n inveta_stdev_min grid.\n\n Parameters\n ----------\n\n simbasedir : str\n The input directory of fake LCs.\n\n magbinmedian : float\n The magbin to run the variable recovery for. This is an item from the\n dict from `simbasedir/fakelcs-info.pkl: `fakelcinfo['magrms'][magcol]`\n list for each magcol and designates which magbin to get the recovery\n stats for.\n\n stetson_stdev_min : float\n The minimum sigma above the trend in the Stetson J variability index\n distribution for this magbin to use to consider objects as variable.\n\n inveta_stdev_min : float\n The minimum sigma above the trend in the 1/eta variability index\n distribution for this magbin to use to consider objects as variable.\n\n iqr_stdev_min : float\n The minimum sigma above the trend in the IQR variability index\n distribution for this magbin to use to consider objects as variable.\n\n statsonly : bool\n If this is True, only the final stats will be returned. If False, the\n full arrays used to generate the stats will also be returned.\n\n Returns\n -------\n\n dict\n The returned dict contains statistics for this magbin and if requested,\n the full arrays used to calculate the statistics.", "id": "f14692:m6"} {"signature": "def magbin_varind_gridsearch_worker(task):", "body": "simbasedir, gridpoint, magbinmedian = tasktry:res = get_recovered_variables_for_magbin(simbasedir,magbinmedian,stetson_stdev_min=gridpoint[],inveta_stdev_min=gridpoint[],iqr_stdev_min=gridpoint[],statsonly=True)return resexcept Exception as e:LOGEXCEPTION('' % gridpoint)return None", "docstring": "This is a parallel grid search worker for the function below.", "id": "f14692:m7"} {"signature": "def variable_index_gridsearch_magbin(simbasedir,stetson_stdev_range=(,),inveta_stdev_range=(,),iqr_stdev_range=(,),ngridpoints=,ngridworkers=None):", "body": "outdir = os.path.join(simbasedir,'')if not os.path.exists(outdir):os.mkdir(outdir)with open(os.path.join(simbasedir, ''),'') as infd:siminfo = pickle.load(infd)timecols = siminfo['']magcols = siminfo['']errcols = siminfo['']magbinmedians = siminfo[''][magcols[]]['']stetson_grid = np.linspace(stetson_stdev_range[],stetson_stdev_range[],num=ngridpoints)inveta_grid = np.linspace(inveta_stdev_range[],inveta_stdev_range[],num=ngridpoints)iqr_grid = np.linspace(iqr_stdev_range[],iqr_stdev_range[],num=ngridpoints)stet_inveta_iqr_grid = []for stet in stetson_grid:for inveta in inveta_grid:for iqr in iqr_grid:grid_point = [stet, inveta, iqr]stet_inveta_iqr_grid.append(grid_point)grid_results = {'':stetson_grid,'':inveta_grid,'':iqr_grid,'':stet_inveta_iqr_grid,'':magbinmedians,'':timecols,'':magcols,'':errcols,'':os.path.abspath(simbasedir),'':[]}pool = mp.Pool(ngridworkers)for magbinmedian in magbinmedians:LOGINFO('''' % magbinmedian)tasks = [(simbasedir, gp, magbinmedian) for gp in stet_inveta_iqr_grid]thisbin_results = pool.map(magbin_varind_gridsearch_worker, tasks)grid_results[''].append(thisbin_results)pool.close()pool.join()LOGINFO('')with open(os.path.join(simbasedir,''),'') as outfd:pickle.dump(grid_results,outfd,pickle.HIGHEST_PROTOCOL)return grid_results", "docstring": "This runs a variable index grid search per magbin.\n\n For each magbin, this does a grid search using the stetson and inveta ranges\n provided and tries to optimize the Matthews Correlation Coefficient (best\n value is +1.0), indicating the best possible separation of variables\n vs. nonvariables. The thresholds on these two variable indexes that produce\n the largest coeff for the collection of fake LCs will probably be the ones\n that work best for actual variable classification on the real LCs.\n\n https://en.wikipedia.org/wiki/Matthews_correlation_coefficient\n\n For each grid-point, calculates the true positives, false positives, true\n negatives, false negatives. Then gets the precision and recall, confusion\n matrix, and the ROC curve for variable vs. nonvariable.\n\n Once we've identified the best thresholds to use, we can then calculate\n variable object numbers:\n\n - as a function of magnitude\n - as a function of period\n - as a function of number of detections\n - as a function of amplitude of variability\n\n\n Writes everything back to `simbasedir/fakevar-recovery.pkl`. Use the\n plotting function below to make plots for the results.\n\n Parameters\n ----------\n\n simbasedir : str\n The directory where the fake LCs are located.\n\n stetson_stdev_range : sequence of 2 floats\n The min and max values of the Stetson J variability index to generate a\n grid over these to test for the values of this index that produce the\n 'best' recovery rate for the injected variable stars.\n\n inveta_stdev_range : sequence of 2 floats\n The min and max values of the 1/eta variability index to generate a\n grid over these to test for the values of this index that produce the\n 'best' recovery rate for the injected variable stars.\n\n iqr_stdev_range : sequence of 2 floats\n The min and max values of the IQR variability index to generate a\n grid over these to test for the values of this index that produce the\n 'best' recovery rate for the injected variable stars.\n\n ngridpoints : int\n The number of grid points for each variability index grid. Remember that\n this function will be searching in 3D and will require lots of time to\n run if ngridpoints is too large.\n\n For the default number of grid points and 25000 simulated light curves,\n this takes about 3 days to run on a 40 (effective) core machine with 2 x\n Xeon E5-2650v3 CPUs.\n\n ngridworkers : int or None\n The number of parallel grid search workers that will be launched.\n\n Returns\n -------\n\n dict\n The returned dict contains a list of recovery stats for each magbin and\n each grid point in the variability index grids that were used. This dict\n can be passed to the plotting function below to plot the results.", "id": "f14692:m8"} {"signature": "def plot_varind_gridsearch_magbin_results(gridsearch_results):", "body": "if (isinstance(gridsearch_results, str) andos.path.exists(gridsearch_results)):with open(gridsearch_results,'') as infd:gridresults = pickle.load(infd)elif isinstance(gridsearch_results, dict):gridresults = gridsearch_resultselse:LOGERROR('''')return Noneplotres = {'':gridresults['']}recgrid = gridresults['']simbasedir = gridresults['']for magcol in gridresults['']:plotres[magcol] = {'':[],'':[],'':[],'':gridresults['']}for magbinind, magbinmedian in enumerate(gridresults['']):LOGINFO('' %(magcol, magbinmedian))stet_mcc = np.array([x[magcol]['']for x in recgrid[magbinind]])[::(gridresults[''].size *gridresults[''].size)]stet_precision = np.array([x[magcol]['']for x in recgrid[magbinind]])[::(gridresults[''].size *gridresults[''].size)]stet_recall = np.array([x[magcol]['']for x in recgrid[magbinind]])[::(gridresults[''].size *gridresults[''].size)]stet_missed_inveta_found = np.array([x[magcol]['']for x in recgrid[magbinind]])[::(gridresults[''].size *gridresults[''].size)]stet_missed_iqr_found = np.array([x[magcol]['']for x in recgrid[magbinind]])[::(gridresults[''].size *gridresults[''].size)]inveta_mcc = np.array([x[magcol]['']for x in recgrid[magbinind]])[:(gridresults[''].size *gridresults[''].size)][::gridresults[''].size]inveta_precision = np.array([x[magcol]['']for x in recgrid[magbinind]])[:(gridresults[''].size *gridresults[''].size)][::gridresults[''].size]inveta_recall = np.array([x[magcol]['']for x in recgrid[magbinind]])[:(gridresults[''].size *gridresults[''].size)][::gridresults[''].size]inveta_missed_stet_found = np.array([x[magcol]['']for x in recgrid[magbinind]])[:(gridresults[''].size *gridresults[''].size)][::gridresults[''].size]inveta_missed_iqr_found = np.array([x[magcol]['']for x in recgrid[magbinind]])[:(gridresults[''].size *gridresults[''].size)][::gridresults[''].size]iqr_mcc = np.array([x[magcol]['']for x in recgrid[magbinind]])[:(gridresults[''].size *gridresults[''].size)][:gridresults[''].size]iqr_precision = np.array([x[magcol]['']for x in recgrid[magbinind]])[:(gridresults[''].size *gridresults[''].size)][:gridresults[''].size]iqr_recall = np.array([x[magcol]['']for x in recgrid[magbinind]])[:(gridresults[''].size *gridresults[''].size)][:gridresults[''].size]iqr_missed_stet_found = np.array([x[magcol]['']for x in recgrid[magbinind]])[:(gridresults[''].size *gridresults[''].size)][:gridresults[''].size]iqr_missed_inveta_found = np.array([x[magcol]['']for x in recgrid[magbinind]])[:(gridresults[''].size *gridresults[''].size)][:gridresults[''].size]fig = plt.figure(figsize=(*, *))plt.subplot(,,)if np.any(np.isfinite(stet_mcc)):plt.plot(gridresults[''],stet_mcc)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(stet_precision)):plt.plot(gridresults[''],stet_precision)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(stet_recall)):plt.plot(gridresults[''],stet_recall)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(stet_missed_inveta_found)):plt.plot(gridresults[''],stet_missed_inveta_found)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(stet_missed_iqr_found)):plt.plot(gridresults[''],stet_missed_iqr_found)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(inveta_mcc)):plt.plot(gridresults[''],inveta_mcc)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(inveta_precision)):plt.plot(gridresults[''],inveta_precision)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(inveta_recall)):plt.plot(gridresults[''],inveta_recall)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(inveta_missed_stet_found)):plt.plot(gridresults[''],inveta_missed_stet_found)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(inveta_missed_iqr_found)):plt.plot(gridresults[''],inveta_missed_iqr_found)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(iqr_mcc)):plt.plot(gridresults[''],iqr_mcc)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(iqr_precision)):plt.plot(gridresults[''],iqr_precision)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(iqr_recall)):plt.plot(gridresults[''],iqr_recall)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(iqr_missed_stet_found)):plt.plot(gridresults[''],iqr_missed_stet_found)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplot(,,)if np.any(np.isfinite(iqr_missed_inveta_found)):plt.plot(gridresults[''],iqr_missed_inveta_found)plt.xlabel('')plt.ylabel('')plt.title('')else:plt.text(,,'''',transform=plt.gca().transAxes,horizontalalignment='',verticalalignment='')plt.xticks([])plt.yticks([])plt.subplots_adjust(hspace=,wspace=)plt.suptitle('' % (magcol, magbinmedian))plotdir = os.path.join(gridresults[''],'')if not os.path.exists(plotdir):os.mkdir(plotdir)gridplotf = os.path.join(plotdir,'' %(magcol, magbinmedian))plt.savefig(gridplotf,dpi=,bbox_inches='')plt.close('')stet_mcc_maxind = np.where(stet_mcc == np.max(stet_mcc))stet_precision_maxind = np.where(stet_precision == np.max(stet_precision))stet_recall_maxind = np.where(stet_recall == np.max(stet_recall))best_stet_mcc = stet_mcc[stet_mcc_maxind]best_stet_precision = stet_mcc[stet_precision_maxind]best_stet_recall = stet_mcc[stet_recall_maxind]stet_with_best_mcc = gridresults[''][stet_mcc_maxind]stet_with_best_precision = gridresults[''][stet_precision_maxind]stet_with_best_recall = (gridresults[''][stet_recall_maxind])inveta_mcc_maxind = np.where(inveta_mcc == np.max(inveta_mcc))inveta_precision_maxind = np.where(inveta_precision == np.max(inveta_precision))inveta_recall_maxind = (np.where(inveta_recall == np.max(inveta_recall)))best_inveta_mcc = inveta_mcc[inveta_mcc_maxind]best_inveta_precision = inveta_mcc[inveta_precision_maxind]best_inveta_recall = inveta_mcc[inveta_recall_maxind]inveta_with_best_mcc = gridresults[''][inveta_mcc_maxind]inveta_with_best_precision = gridresults[''][inveta_precision_maxind]inveta_with_best_recall = gridresults[''][inveta_recall_maxind]iqr_mcc_maxind = np.where(iqr_mcc == np.max(iqr_mcc))iqr_precision_maxind = np.where(iqr_precision == np.max(iqr_precision))iqr_recall_maxind = (np.where(iqr_recall == np.max(iqr_recall)))best_iqr_mcc = iqr_mcc[iqr_mcc_maxind]best_iqr_precision = iqr_mcc[iqr_precision_maxind]best_iqr_recall = iqr_mcc[iqr_recall_maxind]iqr_with_best_mcc = gridresults[''][iqr_mcc_maxind]iqr_with_best_precision = gridresults[''][iqr_precision_maxind]iqr_with_best_recall = gridresults[''][iqr_recall_maxind]plotres[magcol][magbinmedian] = {'':gridresults[''],'':stet_mcc,'':stet_precision,'':stet_recall,'':stet_missed_inveta_found,'':best_stet_mcc,'':stet_with_best_mcc,'':best_stet_precision,'':stet_with_best_precision,'':best_stet_recall,'':stet_with_best_recall,'':gridresults[''],'':inveta_mcc,'':inveta_precision,'':inveta_recall,'':inveta_missed_stet_found,'':best_inveta_mcc,'':inveta_with_best_mcc,'':best_inveta_precision,'':inveta_with_best_precision,'':best_inveta_recall,'':inveta_with_best_recall,'':gridresults[''],'':iqr_mcc,'':iqr_precision,'':iqr_recall,'':iqr_missed_stet_found,'':best_iqr_mcc,'':iqr_with_best_mcc,'':best_iqr_precision,'':iqr_with_best_precision,'':best_iqr_recall,'':iqr_with_best_recall,'':gridplotf}if stet_with_best_mcc.size > :plotres[magcol][''].append(stet_with_best_mcc[])elif stet_with_best_mcc.size > :plotres[magcol][''].append(stet_with_best_mcc[])else:plotres[magcol][''].append(np.nan)if inveta_with_best_mcc.size > :plotres[magcol][''].append(inveta_with_best_mcc[])elif inveta_with_best_mcc.size > :plotres[magcol][''].append(inveta_with_best_mcc[])else:plotres[magcol][''].append(np.nan)if iqr_with_best_mcc.size > :plotres[magcol][''].append(iqr_with_best_mcc[])elif iqr_with_best_mcc.size > :plotres[magcol][''].append(iqr_with_best_mcc[])else:plotres[magcol][''].append(np.nan)plotrespicklef = os.path.join(simbasedir,'')with open(plotrespicklef, '') as outfd:pickle.dump(plotres, outfd, pickle.HIGHEST_PROTOCOL)for magcol in gridresults['']:LOGINFO('' % magcol)LOGINFO('')for magbin, inveta, stet, iqr in zip(plotres[magcol][''],plotres[magcol][''],plotres[magcol][''],plotres[magcol]['']):LOGINFO('' % (magbin,inveta,stet,iqr))return plotres", "docstring": "This plots the gridsearch results from `variable_index_gridsearch_magbin`.\n\n Parameters\n ----------\n\n gridsearch_results : dict\n This is the dict produced by `variable_index_gridsearch_magbin` above.\n\n Returns\n -------\n\n dict\n The returned dict contains filenames of the recovery rate plots made for\n each variability index. These include plots of the precision, recall,\n and Matthews Correlation Coefficient over each magbin and a heatmap of\n these values over the grid points of the variability index stdev values\n arrays used.", "id": "f14692:m9"} {"signature": "def run_periodfinding(simbasedir,pfmethods=('','',''),pfkwargs=({},{},{'':,'':}),getblssnr=False,sigclip=,nperiodworkers=,ncontrolworkers=,liststartindex=None,listmaxobjects=None):", "body": "with open(os.path.join(simbasedir, ''),'') as infd:siminfo = pickle.load(infd)lcfpaths = siminfo['']pfdir = os.path.join(simbasedir,'')timecols = siminfo['']magcols = siminfo['']errcols = siminfo['']fakelc_formatkey = '' % siminfo['']lcproc.register_lcformat(fakelc_formatkey,'',timecols,magcols,errcols,'','',magsarefluxes=siminfo[''])if liststartindex:lcfpaths = lcfpaths[liststartindex:]if listmaxobjects:lcfpaths = lcfpaths[:listmaxobjects]pfinfo = periodsearch.parallel_pf(lcfpaths,pfdir,lcformat=fakelc_formatkey,pfmethods=pfmethods,pfkwargs=pfkwargs,getblssnr=getblssnr,sigclip=sigclip,nperiodworkers=nperiodworkers,ncontrolworkers=ncontrolworkers)with open(os.path.join(simbasedir,''),'') as outfd:pickle.dump(pfinfo, outfd, pickle.HIGHEST_PROTOCOL)return os.path.join(simbasedir,'')", "docstring": "This runs periodfinding using several period-finders on a collection of\n fake LCs.\n\n As a rough benchmark, 25000 fake LCs with 10000--50000 points per LC take\n about 26 days in total to run on an invocation of this function using\n GLS+PDM+BLS and 10 periodworkers and 4 controlworkers (so all 40 'cores') on\n a 2 x Xeon E5-2660v3 machine.\n\n Parameters\n ----------\n\n pfmethods : sequence of str\n This is used to specify which periodfinders to run. These must be in the\n `lcproc.periodsearch.PFMETHODS` dict.\n\n pfkwargs : sequence of dict\n This is used to provide optional kwargs to the period-finders.\n\n getblssnr : bool\n If this is True, will run BLS SNR calculations for each object and\n magcol. This takes a while to run, so it's disabled (False) by default.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n nperiodworkers : int\n This is the number of parallel period-finding worker processes to use.\n\n ncontrolworkers : int\n This is the number of parallel period-finding control workers to\n use. Each control worker will launch `nperiodworkers` worker processes.\n\n liststartindex : int\n The starting index of processing. This refers to the filename list\n generated by running `glob.glob` on the fake LCs in `simbasedir`.\n\n maxobjects : int\n The maximum number of objects to process in this run. Use this with\n `liststartindex` to effectively distribute working on a large list of\n input light curves over several sessions or machines.\n\n Returns\n -------\n\n str\n The path to the output summary pickle produced by\n `lcproc.periodsearch.parallel_pf`", "id": "f14692:m10"} {"signature": "def check_periodrec_alias(actualperiod,recoveredperiod,tolerance=):", "body": "if not (np.isfinite(actualperiod) and np.isfinite(recoveredperiod)):LOGERROR(\"\")return ''else:twotimes_p = actualperiod*half_p = actualperiod*alias_1a = actualperiod/(+actualperiod)alias_1b = actualperiod/(-actualperiod)alias_2a = actualperiod/(+*actualperiod)alias_2b = actualperiod/(-*actualperiod)alias_3a = actualperiod/(+*actualperiod)alias_3b = actualperiod/(-*actualperiod)alias_4a = actualperiod/(actualperiod - )alias_4b = actualperiod/(*actualperiod - )aliases = np.ravel(np.array([actualperiod,twotimes_p,half_p,alias_1a,alias_1b,alias_2a,alias_2b,alias_3a,alias_3b,alias_4a,alias_4b]))alias_labels = np.array(ALIAS_TYPES)closest_alias = np.isclose(recoveredperiod, aliases, atol=tolerance)if np.any(closest_alias):closest_alias_type = alias_labels[closest_alias]return ''.join(closest_alias_type.tolist())else:return ''", "docstring": "This determines what kind of aliasing (if any) exists between\n `recoveredperiod` and `actualperiod`.\n\n Parameters\n ----------\n\n actualperiod : float\n The actual period of the object.\n\n recoveredperiod : float\n The recovered period of the object.\n\n tolerance : float\n The absolute difference required between the input periods to mark the\n recovered period as close to the actual period.\n\n Returns\n -------\n\n str\n The type of alias determined for the input combination of periods. This\n will be CSV string with values taken from the following list, based on\n the types of alias found::\n\n ['actual',\n 'twice',\n 'half',\n 'ratio_over_1plus',\n 'ratio_over_1minus',\n 'ratio_over_1plus_twice',\n 'ratio_over_1minus_twice',\n 'ratio_over_1plus_thrice',\n 'ratio_over_1minus_thrice',\n 'ratio_over_minus1',\n 'ratio_over_twice_minus1']", "id": "f14692:m11"} {"signature": "def periodicvar_recovery(fakepfpkl,simbasedir,period_tolerance=):", "body": "if fakepfpkl.endswith(''):infd = gzip.open(fakepfpkl,'')else:infd = open(fakepfpkl,'')fakepf = pickle.load(infd)infd.close()objectid, lcfbasename = fakepf[''], fakepf['']lcfpath = os.path.join(simbasedir,'',lcfbasename)if not os.path.exists(lcfpath):LOGERROR('' % (objectid,lcfpath))return Nonefakelc = lcproc._read_pklc(lcfpath)actual_varparams, actual_varperiod, actual_varamplitude, actual_vartype = (fakelc[''],fakelc[''],fakelc[''],fakelc[''])actual_moments = fakelc['']magcols = fakelc['']pfres = {'':objectid,'':simbasedir,'':magcols,'':os.path.abspath(lcfpath),'':os.path.abspath(fakepfpkl),'':actual_vartype,'':actual_varperiod,'':actual_varamplitude,'':actual_varparams,'':actual_moments,'':[],'':[],'':[],'':[],'':[],'':[],}for magcol in magcols:for pfm in lcproc.PFMETHODS:if pfm in fakepf[magcol]:for rpi, rp in enumerate(fakepf[magcol][pfm]['']):if ((not np.any(np.isclose(rp,np.array(pfres['']),rtol=period_tolerance))) and np.isfinite(rp)):pfres[''].append(rp)pfres[''].append(pfm)pfres[''].append(magcol)if pfm == '':this_lspval = (np.max(fakepf[magcol][pfm]['']) -fakepf[magcol][pfm][''][rpi])else:this_lspval = (fakepf[magcol][pfm][''][rpi] /np.max(fakepf[magcol][pfm]['']))pfres[''].append(this_lspval)pfres[''] = np.array(pfres[''])pfres[''] = np.array(pfres[''])pfres[''] = np.array(pfres[''])pfres[''] = np.array(pfres[''])if (actual_vartype andactual_vartype in PERIODIC_VARTYPES andnp.isfinite(actual_varperiod)):if pfres[''].size > :for ri in range(pfres[''].size):pfres[''].append(pfres[''][ri] -np.asscalar(actual_varperiod))pfres[''].append(check_periodrec_alias(actual_varperiod,pfres[''][ri],tolerance=period_tolerance))pfres[''] = np.array(pfres[''])pfres[''] = np.array(pfres[''])rec_absdiff = np.abs(pfres[''])best_recp_ind = rec_absdiff == rec_absdiff.min()pfres[''] = (pfres[''][best_recp_ind])pfres[''] = (pfres[''][best_recp_ind])pfres[''] = (pfres[''][best_recp_ind])pfres[''] = (pfres[''][best_recp_ind])pfres[''] = (pfres[''][best_recp_ind])else:LOGWARNING('' %fakepfpkl)pfres[''] = np.array([''])pfres[''] = np.array([np.nan])pfres[''] = np.array([np.nan])pfres[''] = np.array([],dtype=np.unicode_)pfres[''] = np.array([],dtype=np.unicode_)pfres[''] = np.array([],dtype=np.unicode_)pfres[''] = np.array([np.nan])else:pfres[''] = np.array(['']*pfres[''].size)pfres[''] = np.zeros(pfres[''].size)pfres[''] = np.array([np.nan])pfres[''] = np.array([],dtype=np.unicode_)pfres[''] = np.array([],dtype=np.unicode_)pfres[''] = np.array([''])pfres[''] = np.array([np.nan])return pfres", "docstring": "Recovers the periodic variable status/info for the simulated PF result.\n\n - Uses simbasedir and the lcfbasename stored in fakepfpkl to figure out\n where the LC for this object is.\n - Gets the actual_varparams, actual_varperiod, actual_vartype,\n actual_varamplitude elements from the LC.\n - Figures out if the current objectid is a periodic variable (using\n actual_vartype).\n - If it is a periodic variable, gets the canonical period assigned to it.\n - Checks if the period was recovered in any of the five best periods\n reported by any of the period-finders, checks if the period recovered was\n a harmonic of the period.\n - Returns the objectid, actual period and vartype, recovered period, and\n recovery status.\n\n\n Parameters\n ----------\n\n fakepfpkl : str\n This is a periodfinding-.pkl[.gz] file produced in the\n `simbasedir/periodfinding` subdirectory after `run_periodfinding` above\n is done.\n\n simbasedir : str\n The base directory where all of the fake LCs and period-finding results\n are.\n\n period_tolerance : float\n The maximum difference that this function will consider between an\n actual period (or its aliases) and a recovered period to consider it as\n as a 'recovered' period.\n\n Returns\n -------\n\n dict\n Returns a dict of period-recovery results.", "id": "f14692:m12"} {"signature": "def periodrec_worker(task):", "body": "pfpkl, simbasedir, period_tolerance = tasktry:return periodicvar_recovery(pfpkl,simbasedir,period_tolerance=period_tolerance)except Exception as e:LOGEXCEPTION('' % repr(task))return None", "docstring": "This is a parallel worker for running period-recovery.\n\n Parameters\n ----------\n\n task : tuple\n This is used to pass args to the `periodicvar_recovery` function::\n\n task[0] = period-finding result pickle to work on\n task[1] = simbasedir\n task[2] = period_tolerance\n\n Returns\n -------\n\n dict\n This is the dict produced by the `periodicvar_recovery` function for the\n input period-finding result pickle.", "id": "f14692:m13"} {"signature": "def parallel_periodicvar_recovery(simbasedir,period_tolerance=,liststartind=None,listmaxobjects=None,nworkers=None):", "body": "pfpkldir = os.path.join(simbasedir,'')if not os.path.exists(pfpkldir):LOGERROR('' %simbasedir)return Nonepfpkl_list = glob.glob(os.path.join(pfpkldir,''))if len(pfpkl_list) > :if liststartind:pfpkl_list = pfpkl_list[liststartind:]if listmaxobjects:pfpkl_list = pfpkl_list[:listmaxobjects]tasks = [(x, simbasedir, period_tolerance) for x in pfpkl_list]pool = mp.Pool(nworkers)results = pool.map(periodrec_worker, tasks)pool.close()pool.join()resdict = {x['']:x for x in results if x is not None}actual_periodicvars = np.array([x[''] for x in resultsif (x is not None and x[''] in PERIODIC_VARTYPES)],dtype=np.unicode_)recovered_periodicvars = np.array([x[''] for x in resultsif (x is not None and '' in x[''])],dtype=np.unicode_)alias_twice_periodicvars = np.array([x[''] for x in resultsif (x is not None and '' in x[''])],dtype=np.unicode_)alias_half_periodicvars = np.array([x[''] for x in resultsif (x is not None and '' in x[''])],dtype=np.unicode_)all_objectids = [x[''] for x in results]outdict = {'':os.path.abspath(simbasedir),'':all_objectids,'':period_tolerance,'':actual_periodicvars,'':recovered_periodicvars,'':alias_twice_periodicvars,'':alias_half_periodicvars,'':resdict}outfile = os.path.join(simbasedir,'')with open(outfile, '') as outfd:pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)return outdictelse:LOGERROR('' %pfpkldir)return None", "docstring": "This is a parallel driver for `periodicvar_recovery`.\n\n Parameters\n ----------\n\n simbasedir : str\n The base directory where all of the fake LCs and period-finding results\n are.\n\n period_tolerance : float\n The maximum difference that this function will consider between an\n actual period (or its aliases) and a recovered period to consider it as\n as a 'recovered' period.\n\n liststartindex : int\n The starting index of processing. This refers to the filename list\n generated by running `glob.glob` on the period-finding result pickles in\n `simbasedir/periodfinding`.\n\n listmaxobjects : int\n The maximum number of objects to process in this run. Use this with\n `liststartindex` to effectively distribute working on a large list of\n input period-finding result pickles over several sessions or machines.\n\n nperiodworkers : int\n This is the number of parallel period-finding worker processes to use.\n\n Returns\n -------\n\n str\n Returns the filename of the pickle produced containing all of the period\n recovery results.", "id": "f14692:m14"} {"signature": "def plot_periodicvar_recovery_results(precvar_results,aliases_count_as_recovered=None,magbins=None,periodbins=None,amplitudebins=None,ndetbins=None,minbinsize=,plotfile_ext='',):", "body": "if isinstance(precvar_results, str) and os.path.exists(precvar_results):with open(precvar_results,'') as infd:precvar = pickle.load(infd)elif isinstance(precvar_results, dict):precvar = precvar_resultselse:LOGERROR('''')return Nonesimbasedir = precvar['']lcinfof = os.path.join(simbasedir,'')if not os.path.exists(lcinfof):LOGERROR('' %simbasedir)return Nonewith open(lcinfof,'') as infd:lcinfo = pickle.load(infd)magcols = lcinfo['']objectid = lcinfo['']ndet = lcinfo['']sdssr = lcinfo['']actual_periodicvars = precvar['']LOGINFO('')periodicvar_sdssr = []periodicvar_ndet = []periodicvar_objectids = []for pobj in actual_periodicvars:pobjind = objectid == pobjperiodicvar_objectids.append(pobj)periodicvar_sdssr.append(sdssr[pobjind])periodicvar_ndet.append(ndet[pobjind])periodicvar_sdssr = np.array(periodicvar_sdssr)periodicvar_objectids = np.array(periodicvar_objectids)periodicvar_ndet = np.array(periodicvar_ndet)LOGINFO('''')periodicvar_periods = [np.asscalar(precvar[''][x][''])for x in periodicvar_objectids]periodicvar_amplitudes = [np.asscalar(precvar[''][x][''])for x in periodicvar_objectids]periodicvar_vartypes = [precvar[''][x][''] for x in periodicvar_objectids]LOGINFO('')magbinned_sdssr = []magbinned_periodicvars = []if not magbins:magbins = PERIODREC_DEFAULT_MAGBINSmagbininds = np.digitize(np.ravel(periodicvar_sdssr), magbins)for mbinind, magi in zip(np.unique(magbininds),range(len(magbins)-)):thisbin_periodicvars = periodicvar_objectids[magbininds == mbinind]if (thisbin_periodicvars.size > (minbinsize-)):magbinned_sdssr.append((magbins[magi] + magbins[magi+])/)magbinned_periodicvars.append(thisbin_periodicvars)LOGINFO('')periodbinned_periods = []periodbinned_periodicvars = []if not periodbins:periodbins = PERIODREC_DEFAULT_PERIODBINSperiodbininds = np.digitize(np.ravel(periodicvar_periods), periodbins)for pbinind, peri in zip(np.unique(periodbininds),range(len(periodbins)-)):thisbin_periodicvars = periodicvar_objectids[periodbininds == pbinind]if (thisbin_periodicvars.size > (minbinsize-)):periodbinned_periods.append((periodbins[peri] +periodbins[peri+])/)periodbinned_periodicvars.append(thisbin_periodicvars)LOGINFO('')amplitudebinned_amplitudes = []amplitudebinned_periodicvars = []if not amplitudebins:amplitudebins = PERIODREC_DEFAULT_AMPBINSamplitudebininds = np.digitize(np.ravel(np.abs(periodicvar_amplitudes)),amplitudebins)for abinind, ampi in zip(np.unique(amplitudebininds),range(len(amplitudebins)-)):thisbin_periodicvars = periodicvar_objectids[amplitudebininds == abinind]if (thisbin_periodicvars.size > (minbinsize-)):amplitudebinned_amplitudes.append((amplitudebins[ampi] +amplitudebins[ampi+])/)amplitudebinned_periodicvars.append(thisbin_periodicvars)LOGINFO('')ndetbinned_ndets = []ndetbinned_periodicvars = []if not ndetbins:ndetbins = PERIODREC_DEFAULT_NDETBINSndetbininds = np.digitize(np.ravel(periodicvar_ndet), ndetbins)for nbinind, ndeti in zip(np.unique(ndetbininds),range(len(ndetbins)-)):thisbin_periodicvars = periodicvar_objectids[ndetbininds == nbinind]if (thisbin_periodicvars.size > (minbinsize-)):ndetbinned_ndets.append((ndetbins[ndeti] +ndetbins[ndeti+])/)ndetbinned_periodicvars.append(thisbin_periodicvars)recovered_status = ['']if isinstance(aliases_count_as_recovered, list):for atype in aliases_count_as_recovered:if atype in ALIAS_TYPES:recovered_status.append(atype)else:LOGWARNING('' % atype)elif aliases_count_as_recovered and aliases_count_as_recovered == '':for atype in ALIAS_TYPES[:]:recovered_status.append(atype)recovered_periodicvars = np.array([precvar[''][x][''] for x in precvar['']if (precvar[''][x] is not None andprecvar[''][x]['']in recovered_status)],dtype=np.unicode_)LOGINFO('''' %(recovered_periodicvars.size,actual_periodicvars.size,float(recovered_periodicvars.size/actual_periodicvars.size),''.join(recovered_status)))magbinned_recovered_objects = [np.intersect1d(x,recovered_periodicvars)for x in magbinned_periodicvars]magbinned_recfrac = np.array([float(x.size/y.size) for x,yin zip(magbinned_recovered_objects,magbinned_periodicvars)])periodbinned_recovered_objects = [np.intersect1d(x,recovered_periodicvars)for x in periodbinned_periodicvars]periodbinned_recfrac = np.array([float(x.size/y.size) for x,yin zip(periodbinned_recovered_objects,periodbinned_periodicvars)])amplitudebinned_recovered_objects = [np.intersect1d(x,recovered_periodicvars)for x in amplitudebinned_periodicvars]amplitudebinned_recfrac = np.array([float(x.size/y.size) for x,yin zip(amplitudebinned_recovered_objects,amplitudebinned_periodicvars)])ndetbinned_recovered_objects = [np.intersect1d(x,recovered_periodicvars)for x in ndetbinned_periodicvars]ndetbinned_recfrac = np.array([float(x.size/y.size) for x,yin zip(ndetbinned_recovered_objects,ndetbinned_periodicvars)])magbinned_sdssr = np.array(magbinned_sdssr)periodbinned_periods = np.array(periodbinned_periods)amplitudebinned_amplitudes = np.array(amplitudebinned_amplitudes)ndetbinned_ndets = np.array(ndetbinned_ndets)outdict = {'':simbasedir,'':precvar,'':magcols,'':objectid,'':ndet,'':sdssr,'':actual_periodicvars,'':recovered_periodicvars,'':recovered_status,'':magbins,'':magbinned_sdssr,'':magbinned_periodicvars,'':magbinned_recovered_objects,'':magbinned_recfrac,'':periodbins,'':periodbinned_periods,'':periodbinned_periodicvars,'':periodbinned_recovered_objects,'':periodbinned_recfrac,'':amplitudebins,'':amplitudebinned_amplitudes,'':amplitudebinned_periodicvars,'':amplitudebinned_recovered_objects,'':amplitudebinned_recfrac,'':ndetbins,'':ndetbinned_ndets,'':ndetbinned_periodicvars,'':ndetbinned_recovered_objects,'':ndetbinned_recfrac,}all_pfmethods = np.unique(np.concatenate([np.unique(precvar[''][x][''])for x in precvar['']]))all_vartypes = np.unique([(precvar[''][x][''])for x in precvar[''] if(precvar[''][x][''] is not None)])all_aliastypes = recovered_statusoutdict[''] = all_aliastypesoutdict[''] = all_pfmethodsoutdict[''] = all_vartypesmagbinned_per_magcol_recfracs = []magbinned_per_vartype_recfracs = []magbinned_per_pfmethod_recfracs = []magbinned_per_aliastype_recfracs = []periodbinned_per_magcol_recfracs = []periodbinned_per_vartype_recfracs = []periodbinned_per_pfmethod_recfracs = []periodbinned_per_aliastype_recfracs = []amplitudebinned_per_magcol_recfracs = []amplitudebinned_per_vartype_recfracs = []amplitudebinned_per_pfmethod_recfracs = []amplitudebinned_per_aliastype_recfracs = []ndetbinned_per_magcol_recfracs = []ndetbinned_per_vartype_recfracs = []ndetbinned_per_pfmethod_recfracs = []ndetbinned_per_aliastype_recfracs = []recplotdir = os.path.join(simbasedir, '')if not os.path.exists(recplotdir):os.mkdir(recplotdir)fig = plt.figure(figsize=(*,*))plt.plot(magbinned_sdssr, magbinned_recfrac,marker='',ms=)plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))for magcol in magcols:thismagcol_recfracs = []for magbin_pv, magbin_rv in zip(magbinned_periodicvars,magbinned_recovered_objects):thisbin_thismagcol_recvars = [x for x in magbin_rvif (precvar[''][x][''] == magcol)]thisbin_thismagcol_recfrac = (np.array(thisbin_thismagcol_recvars).size /magbin_pv.size)thismagcol_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(magbinned_sdssr,np.array(thismagcol_recfracs),marker='',label='' % magcol,ms=)magbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs))plt.plot(magbinned_sdssr, magbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_pfmethods = np.unique(np.concatenate([np.unique(precvar[''][x][''])for x in precvar['']]))for pfm in all_pfmethods:thispf_recfracs = []for magbin_pv, magbin_rv in zip(magbinned_periodicvars,magbinned_recovered_objects):thisbin_thispf_recvars = [x for x in magbin_rvif (precvar[''][x][''] == pfm)]thisbin_thismagcol_recfrac = (np.array(thisbin_thispf_recvars).size /magbin_pv.size)thispf_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(magbinned_sdssr,np.array(thispf_recfracs),marker='',label='' % pfm.upper(),ms=)magbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs))plt.plot(magbinned_sdssr, magbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_vartypes = np.unique([(precvar[''][x][''])for x in precvar[''] if(precvar[''][x][''] is not None)])for vt in all_vartypes:thisvt_recfracs = []for magbin_pv, magbin_rv in zip(magbinned_periodicvars,magbinned_recovered_objects):thisbin_thisvt_recvars = [x for x in magbin_rvif (precvar[''][x][''] == vt)]thisbin_thismagcol_recfrac = (np.array(thisbin_thisvt_recvars).size /magbin_pv.size)thisvt_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(magbinned_sdssr,np.array(thisvt_recfracs),marker='',label='' % vt,ms=)magbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs))plt.plot(magbinned_sdssr, magbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_aliastypes = recovered_statusfor at in all_aliastypes:thisat_recfracs = []for magbin_pv, magbin_rv in zip(magbinned_periodicvars,magbinned_recovered_objects):thisbin_thisat_recvars = [x for x in magbin_rvif (precvar[''][x][''][] == at)]thisbin_thismagcol_recfrac = (np.array(thisbin_thisat_recvars).size /magbin_pv.size)thisat_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(magbinned_sdssr,np.array(thisat_recfracs),marker='',label='' % at,ms=)magbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs))plt.plot(magbinned_sdssr, magbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))plt.plot(periodbinned_periods, periodbinned_recfrac,marker='',ms=)plt.xlabel('')plt.ylabel('')plt.title('')plt.ylim((,))plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))for magcol in magcols:thismagcol_recfracs = []for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,periodbinned_recovered_objects):thisbin_thismagcol_recvars = [x for x in periodbin_rvif (precvar[''][x][''] == magcol)]thisbin_thismagcol_recfrac = (np.array(thisbin_thismagcol_recvars).size /periodbin_pv.size)thismagcol_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(periodbinned_periods,np.array(thismagcol_recfracs),marker='',label='' % magcol,ms=)periodbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs))plt.plot(periodbinned_periods, periodbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_pfmethods = np.unique(np.concatenate([np.unique(precvar[''][x][''])for x in precvar['']]))for pfm in all_pfmethods:thispf_recfracs = []for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,periodbinned_recovered_objects):thisbin_thispf_recvars = [x for x in periodbin_rvif (precvar[''][x][''] == pfm)]thisbin_thismagcol_recfrac = (np.array(thisbin_thispf_recvars).size /periodbin_pv.size)thispf_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(periodbinned_periods,np.array(thispf_recfracs),marker='',label='' % pfm.upper(),ms=)periodbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs))plt.plot(periodbinned_periods, periodbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_vartypes = np.unique([(precvar[''][x][''])for x in precvar[''] if(precvar[''][x][''] is not None)])for vt in all_vartypes:thisvt_recfracs = []for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,periodbinned_recovered_objects):thisbin_thisvt_recvars = [x for x in periodbin_rvif (precvar[''][x][''] == vt)]thisbin_thismagcol_recfrac = (np.array(thisbin_thisvt_recvars).size /periodbin_pv.size)thisvt_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(periodbinned_periods,np.array(thisvt_recfracs),marker='',label='' % vt,ms=)periodbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs))plt.plot(periodbinned_periods, periodbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_aliastypes = recovered_statusfor at in all_aliastypes:thisat_recfracs = []for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,periodbinned_recovered_objects):thisbin_thisat_recvars = [x for x in periodbin_rvif (precvar[''][x][''][] == at)]thisbin_thismagcol_recfrac = (np.array(thisbin_thisat_recvars).size /periodbin_pv.size)thisat_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(periodbinned_periods,np.array(thisat_recfracs),marker='',label='' % at,ms=)periodbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs))plt.plot(periodbinned_periods, periodbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,marker='',ms=)plt.xlabel('')plt.ylabel('')plt.title('')plt.ylim((,))plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))for magcol in magcols:thismagcol_recfracs = []for amplitudebin_pv, amplitudebin_rv in zip(amplitudebinned_periodicvars,amplitudebinned_recovered_objects):thisbin_thismagcol_recvars = [x for x in amplitudebin_rvif (precvar[''][x][''] == magcol)]thisbin_thismagcol_recfrac = (np.array(thisbin_thismagcol_recvars).size /amplitudebin_pv.size)thismagcol_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(amplitudebinned_amplitudes,np.array(thismagcol_recfracs),marker='',label='' % magcol,ms=)amplitudebinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs))plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_pfmethods = np.unique(np.concatenate([np.unique(precvar[''][x][''])for x in precvar['']]))for pfm in all_pfmethods:thispf_recfracs = []for amplitudebin_pv, amplitudebin_rv in zip(amplitudebinned_periodicvars,amplitudebinned_recovered_objects):thisbin_thispf_recvars = [x for x in amplitudebin_rvif (precvar[''][x][''] == pfm)]thisbin_thismagcol_recfrac = (np.array(thisbin_thispf_recvars).size /amplitudebin_pv.size)thispf_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(amplitudebinned_amplitudes,np.array(thispf_recfracs),marker='',label='' % pfm.upper(),ms=)amplitudebinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs))plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_vartypes = np.unique([(precvar[''][x][''])for x in precvar[''] if(precvar[''][x][''] is not None)])for vt in all_vartypes:thisvt_recfracs = []for amplitudebin_pv, amplitudebin_rv in zip(amplitudebinned_periodicvars,amplitudebinned_recovered_objects):thisbin_thisvt_recvars = [x for x in amplitudebin_rvif (precvar[''][x][''] == vt)]thisbin_thismagcol_recfrac = (np.array(thisbin_thisvt_recvars).size /amplitudebin_pv.size)thisvt_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(amplitudebinned_amplitudes,np.array(thisvt_recfracs),marker='',label='' % vt,ms=)amplitudebinned_per_vartype_recfracs.append(np.array(thisvt_recfracs))plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_aliastypes = recovered_statusfor at in all_aliastypes:thisat_recfracs = []for amplitudebin_pv, amplitudebin_rv in zip(amplitudebinned_periodicvars,amplitudebinned_recovered_objects):thisbin_thisat_recvars = [x for x in amplitudebin_rvif (precvar[''][x][''][] == at)]thisbin_thismagcol_recfrac = (np.array(thisbin_thisat_recvars).size /amplitudebin_pv.size)thisat_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(amplitudebinned_amplitudes,np.array(thisat_recfracs),marker='',label='' % at,ms=)amplitudebinned_per_aliastype_recfracs.append(np.array(thisat_recfracs))plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))plt.plot(ndetbinned_ndets, ndetbinned_recfrac,marker='',ms=)plt.xlabel('')plt.ylabel('')plt.title('')plt.ylim((,))plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))for magcol in magcols:thismagcol_recfracs = []for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,ndetbinned_recovered_objects):thisbin_thismagcol_recvars = [x for x in ndetbin_rvif (precvar[''][x][''] == magcol)]thisbin_thismagcol_recfrac = (np.array(thisbin_thismagcol_recvars).size /ndetbin_pv.size)thismagcol_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(ndetbinned_ndets,np.array(thismagcol_recfracs),marker='',label='' % magcol,ms=)ndetbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs))plt.plot(ndetbinned_ndets, ndetbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_pfmethods = np.unique(np.concatenate([np.unique(precvar[''][x][''])for x in precvar['']]))for pfm in all_pfmethods:thispf_recfracs = []for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,ndetbinned_recovered_objects):thisbin_thispf_recvars = [x for x in ndetbin_rvif (precvar[''][x][''] == pfm)]thisbin_thismagcol_recfrac = (np.array(thisbin_thispf_recvars).size /ndetbin_pv.size)thispf_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(ndetbinned_ndets,np.array(thispf_recfracs),marker='',label='' % pfm.upper(),ms=)ndetbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs))plt.plot(ndetbinned_ndets, ndetbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_vartypes = np.unique([(precvar[''][x][''])for x in precvar[''] if(precvar[''][x][''] in PERIODIC_VARTYPES)])for vt in all_vartypes:thisvt_recfracs = []for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,ndetbinned_recovered_objects):thisbin_thisvt_recvars = [x for x in ndetbin_rvif (precvar[''][x][''] == vt)]thisbin_thismagcol_recfrac = (np.array(thisbin_thisvt_recvars).size /ndetbin_pv.size)thisvt_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(ndetbinned_ndets,np.array(thisvt_recfracs),marker='',label='' % vt,ms=)ndetbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs))plt.plot(ndetbinned_ndets, ndetbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))all_aliastypes = recovered_statusfor at in all_aliastypes:thisat_recfracs = []for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,ndetbinned_recovered_objects):thisbin_thisat_recvars = [x for x in ndetbin_rvif (precvar[''][x][''][] == at)]thisbin_thismagcol_recfrac = (np.array(thisbin_thisat_recvars).size /ndetbin_pv.size)thisat_recfracs.append(thisbin_thismagcol_recfrac)plt.plot(ndetbinned_ndets,np.array(thisat_recfracs),marker='',label='' % at,ms=)ndetbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs))plt.plot(ndetbinned_ndets, ndetbinned_recfrac,marker='',ms=, label='', color='')plt.xlabel(r'')plt.ylabel('')plt.title('')plt.ylim((,))plt.legend(markerscale=)plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')outdict[''] = (magbinned_per_magcol_recfracs)outdict[''] = (magbinned_per_pfmethod_recfracs)outdict[''] = (magbinned_per_vartype_recfracs)outdict[''] = (magbinned_per_aliastype_recfracs)outdict[''] = (periodbinned_per_magcol_recfracs)outdict[''] = (periodbinned_per_pfmethod_recfracs)outdict[''] = (periodbinned_per_vartype_recfracs)outdict[''] = (periodbinned_per_aliastype_recfracs)outdict[''] = (amplitudebinned_per_magcol_recfracs)outdict[''] = (amplitudebinned_per_pfmethod_recfracs)outdict[''] = (amplitudebinned_per_vartype_recfracs)outdict[''] = (amplitudebinned_per_aliastype_recfracs)outdict[''] = (ndetbinned_per_magcol_recfracs)outdict[''] = (ndetbinned_per_pfmethod_recfracs)outdict[''] = (ndetbinned_per_vartype_recfracs)outdict[''] = (ndetbinned_per_aliastype_recfracs)overall_recvars_per_pfmethod = []for pfm in all_pfmethods:thispfm_recvars = np.array([x for x in precvar[''] if((x in recovered_periodicvars) and(precvar[''][x][''] == pfm))])overall_recvars_per_pfmethod.append(thispfm_recvars)overall_recvars_per_vartype = []for vt in all_vartypes:thisvt_recvars = np.array([x for x in precvar[''] if((x in recovered_periodicvars) and(precvar[''][x][''] == vt))])overall_recvars_per_vartype.append(thisvt_recvars)overall_recvars_per_magcol = []for mc in magcols:thismc_recvars = np.array([x for x in precvar[''] if((x in recovered_periodicvars) and(precvar[''][x][''] == mc))])overall_recvars_per_magcol.append(thismc_recvars)overall_recvars_per_aliastype = []for at in all_aliastypes:thisat_recvars = np.array([x for x in precvar[''] if((x in recovered_periodicvars) and(precvar[''][x][''] == at))])overall_recvars_per_aliastype.append(thisat_recvars)outdict[''] = np.array([x.size/actual_periodicvars.size for x in overall_recvars_per_pfmethod])outdict[''] = np.array([x.size/actual_periodicvars.size for x in overall_recvars_per_vartype])outdict[''] = np.array([x.size/actual_periodicvars.size for x in overall_recvars_per_magcol])outdict[''] = np.array([x.size/actual_periodicvars.size for x in overall_recvars_per_aliastype])fig = plt.figure(figsize=(*,*))xt = np.arange(len(all_pfmethods))xl = all_pfmethodsplt.barh(xt, outdict[''], )plt.yticks(xt, xl)plt.xlabel('')plt.ylabel('')plt.title('')plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))xt = np.arange(len(magcols))xl = magcolsplt.barh(xt, outdict[''], )plt.yticks(xt, xl)plt.xlabel('')plt.ylabel('')plt.title('')plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))xt = np.arange(len(all_aliastypes))xl = all_aliastypesplt.barh(xt, outdict[''], )plt.yticks(xt, xl)plt.xlabel('')plt.ylabel('')plt.title('')plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))xt = np.arange(len(all_vartypes))xl = all_vartypesplt.barh(xt, outdict[''], )plt.yticks(xt, xl)plt.xlabel('')plt.ylabel('')plt.title('')plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')notvariable_recovered_periods = np.concatenate([precvar[''][x]['']for x in precvar[''] if(precvar[''][x][''] is None)])notvariable_recovered_lspvals = np.concatenate([precvar[''][x]['']for x in precvar[''] if(precvar[''][x][''] is None)])sortind = np.argsort(notvariable_recovered_periods)notvariable_recovered_periods = notvariable_recovered_periods[sortind]notvariable_recovered_lspvals = notvariable_recovered_lspvals[sortind]outdict[''] = notvariable_recovered_periodsoutdict[''] = notvariable_recovered_lspvalsfig = plt.figure(figsize=(*,*))plt.plot(notvariable_recovered_periods,notvariable_recovered_lspvals,ms=,linestyle='',marker='')plt.xscale('')plt.xlabel('')plt.ylabel('')plt.title('')plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')fig = plt.figure(figsize=(*,*))plt.hist(notvariable_recovered_periods,bins=np.arange(,,),histtype='')plt.xscale('')plt.xlabel('')plt.ylabel('')plt.title('')plt.savefig(os.path.join(recplotdir,'' % plotfile_ext),dpi=,bbox_inches='')plt.close('')outfile = os.path.join(simbasedir, '')with open(outfile,'') as outfd:pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)return outdict", "docstring": "This plots the results of periodic var recovery.\n\n This function makes plots for periodicvar recovered fraction as a function\n of:\n\n - magbin\n - periodbin\n - amplitude of variability\n - ndet\n\n with plot lines broken down by:\n\n - magcol\n - periodfinder\n - vartype\n - recovery status\n\n The kwargs `magbins`, `periodbins`, `amplitudebins`, and `ndetbins` can be\n used to set the bin lists as needed. The kwarg `minbinsize` controls how\n many elements per bin are required to accept a bin in processing its\n recovery characteristics for mags, periods, amplitudes, and ndets.\n\n Parameters\n ----------\n\n precvar_results : dict or str\n This is either a dict returned by parallel_periodicvar_recovery or the\n pickle created by that function.\n\n aliases_count_as_recovered : list of str or 'all'\n This is used to set which kinds of aliases this function considers as\n 'recovered' objects. Normally, we require that recovered objects have a\n recovery status of 'actual' to indicate the actual period was\n recovered. To change this default behavior, aliases_count_as_recovered\n can be set to a list of alias status strings that should be considered\n as 'recovered' objects as well. Choose from the following alias types::\n\n 'twice' recovered_p = 2.0*actual_p\n 'half' recovered_p = 0.5*actual_p\n 'ratio_over_1plus' recovered_p = actual_p/(1.0+actual_p)\n 'ratio_over_1minus' recovered_p = actual_p/(1.0-actual_p)\n 'ratio_over_1plus_twice' recovered_p = actual_p/(1.0+2.0*actual_p)\n 'ratio_over_1minus_twice' recovered_p = actual_p/(1.0-2.0*actual_p)\n 'ratio_over_1plus_thrice' recovered_p = actual_p/(1.0+3.0*actual_p)\n 'ratio_over_1minus_thrice' recovered_p = actual_p/(1.0-3.0*actual_p)\n 'ratio_over_minus1' recovered_p = actual_p/(actual_p - 1.0)\n 'ratio_over_twice_minus1' recovered_p = actual_p/(2.0*actual_p - 1.0)\n\n or set `aliases_count_as_recovered='all'` to include all of the above in\n the 'recovered' periodic var list.\n\n magbins : np.array\n The magnitude bins to plot the recovery rate results over. If None, the\n default mag bins will be used: `np.arange(8.0,16.25,0.25)`.\n\n periodbins : np.array\n The period bins to plot the recovery rate results over. If None, the\n default period bins will be used: `np.arange(0.0,500.0,0.5)`.\n\n amplitudebins : np.array\n The variability amplitude bins to plot the recovery rate results\n over. If None, the default amplitude bins will be used:\n `np.arange(0.0,2.0,0.05)`.\n\n ndetbins : np.array\n The ndet bins to plot the recovery rate results over. If None, the\n default ndet bins will be used: `np.arange(0.0,60000.0,1000.0)`.\n\n minbinsize : int\n The minimum number of objects per bin required to plot a bin and its\n recovery fraction on the plot.\n\n plotfile_ext : {'png','pdf'}\n Sets the plot output files' extension.\n\n Returns\n -------\n\n dict\n A dict containing recovery fraction statistics and the paths to each of\n the plots made.", "id": "f14692:m15"} {"signature": "def ec2_ssh(ip_address,keypem_file,username='',raiseonfail=False):", "body": "c = paramiko.client.SSHClient()c.load_system_host_keys()c.set_missing_host_key_policy(paramiko.client.AutoAddPolicy)privatekey = paramiko.RSAKey.from_private_key_file(keypem_file)try:c.connect(ip_address,pkey=privatekey,username='')return cexcept Exception as e:LOGEXCEPTION('''' %(ip_address, keypem_file, username))if raiseonfail:raisereturn None", "docstring": "This opens an SSH connection to the EC2 instance at `ip_address`.\n\n Parameters\n ----------\n\n ip_address : str\n IP address of the AWS EC2 instance to connect to.\n\n keypem_file : str\n The path to the keypair PEM file generated by AWS to allow SSH\n connections.\n\n username : str\n The username to use to login to the EC2 instance.\n\n raiseonfail : bool\n If True, will re-raise whatever Exception caused the operation to fail\n and break out immediately.\n\n Returns\n -------\n\n paramiko.SSHClient\n This has all the usual `paramiko` functionality:\n\n - Use `SSHClient.exec_command(command, environment=None)` to exec a\n shell command.\n\n - Use `SSHClient.open_sftp()` to get a `SFTPClient` for the server. Then\n call SFTPClient.get() and .put() to copy files from and to the server.", "id": "f14694:m0"} {"signature": "def s3_get_file(bucket,filename,local_file,altexts=None,client=None,raiseonfail=False):", "body": "if not client:client = boto3.client('')try:client.download_file(bucket, filename, local_file)return local_fileexcept Exception as e:if altexts is not None:for alt_extension in altexts:split_ext = os.path.splitext(filename)check_file = split_ext[] + alt_extensiontry:client.download_file(bucket,check_file,local_file.replace(split_ext[-],alt_extension))return local_file.replace(split_ext[-],alt_extension)except Exception as e:passelse:LOGEXCEPTION('' % (bucket, filename))if raiseonfail:raisereturn None", "docstring": "This gets a file from an S3 bucket.\n\n Parameters\n ----------\n\n bucket : str\n The AWS S3 bucket name.\n\n filename : str\n The full filename of the file to get from the bucket\n\n local_file : str\n Path to where the downloaded file will be stored.\n\n altexts : None or list of str\n If not None, this is a list of alternate extensions to try for the file\n other than the one provided in `filename`. For example, to get anything\n that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to\n strip the .gz.\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n raiseonfail : bool\n If True, will re-raise whatever Exception caused the operation to fail\n and break out immediately.\n\n Returns\n -------\n\n str\n Path to the downloaded filename or None if the download was\n unsuccessful.", "id": "f14694:m1"} {"signature": "def s3_get_url(url,altexts=None,client=None,raiseonfail=False):", "body": "bucket_item = url.replace('','')bucket_item = bucket_item.split('')bucket = bucket_item[]filekey = ''.join(bucket_item[:])return s3_get_file(bucket,filekey,bucket_item[-],altexts=altexts,client=client,raiseonfail=raiseonfail)", "docstring": "This gets a file from an S3 bucket based on its s3:// URL.\n\n Parameters\n ----------\n\n url : str\n S3 URL to download. This should begin with 's3://'.\n\n altexts : None or list of str\n If not None, this is a list of alternate extensions to try for the file\n other than the one provided in `filename`. For example, to get anything\n that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to\n strip the .gz.\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n raiseonfail : bool\n If True, will re-raise whatever Exception caused the operation to fail\n and break out immediately.\n\n Returns\n -------\n\n str\n Path to the downloaded filename or None if the download was\n unsuccessful. The file will be downloaded into the current working\n directory and will have a filename == basename of the file on S3.", "id": "f14694:m2"} {"signature": "def s3_put_file(local_file, bucket, client=None, raiseonfail=False):", "body": "if not client:client = boto3.client('')try:client.upload_file(local_file, bucket, os.path.basename(local_file))return '' % (bucket, os.path.basename(local_file))except Exception as e:LOGEXCEPTION('' % (local_file,bucket))if raiseonfail:raisereturn None", "docstring": "This uploads a file to S3.\n\n Parameters\n ----------\n\n local_file : str\n Path to the file to upload to S3.\n\n bucket : str\n The AWS S3 bucket to upload the file to.\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n raiseonfail : bool\n If True, will re-raise whatever Exception caused the operation to fail\n and break out immediately.\n\n Returns\n -------\n\n str or None\n If the file upload is successful, returns the s3:// URL of the uploaded\n file. If it failed, will return None.", "id": "f14694:m3"} {"signature": "def s3_delete_file(bucket, filename, client=None, raiseonfail=False):", "body": "if not client:client = boto3.client('')try:resp = client.delete_object(Bucket=bucket, Key=filename)if not resp:LOGERROR('' % (filename,bucket))else:return resp['']except Exception as e:LOGEXCEPTION('' % (filename,bucket))if raiseonfail:raisereturn None", "docstring": "This deletes a file from S3.\n\n Parameters\n ----------\n\n bucket : str\n The AWS S3 bucket to delete the file from.\n\n filename : str\n The full file name of the file to delete, including any prefixes.\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n raiseonfail : bool\n If True, will re-raise whatever Exception caused the operation to fail\n and break out immediately.\n\n Returns\n -------\n\n str or None\n If the file was successfully deleted, will return the delete-marker\n (https://docs.aws.amazon.com/AmazonS3/latest/dev/DeleteMarker.html). If\n it wasn't, returns None", "id": "f14694:m4"} {"signature": "def sqs_create_queue(queue_name, options=None, client=None):", "body": "if not client:client = boto3.client('')try:if isinstance(options, dict):resp = client.create_queue(QueueName=queue_name, Attributes=options)else:resp = client.create_queue(QueueName=queue_name)if resp is not None:return {'':resp[''],'':queue_name}else:LOGERROR(''% (queue_name, options))return Noneexcept Exception as e:LOGEXCEPTION(''% (queue_name, options))return None", "docstring": "This creates an SQS queue.\n\nParameters\n----------\n\nqueue_name : str\n The name of the queue to create.\n\noptions : dict or None\n A dict of options indicate extra attributes the queue should have.\n See the SQS docs for details. If None, no custom attributes will be\n attached to the queue.\n\nclient : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\nReturns\n-------\n\ndict\n This returns a dict of the form::\n\n {'url': SQS URL of the queue,\n 'name': name of the queue}", "id": "f14694:m5"} {"signature": "def sqs_delete_queue(queue_url, client=None):", "body": "if not client:client = boto3.client('')try:client.delete_queue(QueueUrl=queue_url)return Trueexcept Exception as e:LOGEXCEPTION(''% (queue_url,))return False", "docstring": "This deletes an SQS queue given its URL\n\n Parameters\n ----------\n\n queue_url : str\n The SQS URL of the queue to delete.\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n Returns\n -------\n\n bool\n True if the queue was deleted successfully. False otherwise.", "id": "f14694:m6"} {"signature": "def sqs_put_item(queue_url,item,delay_seconds=,client=None,raiseonfail=False):", "body": "if not client:client = boto3.client('')try:json_msg = json.dumps(item)resp = client.send_message(QueueUrl=queue_url,MessageBody=json_msg,DelaySeconds=delay_seconds,)if not resp:LOGERROR('' % queue_url)return Noneelse:return respexcept Exception as e:LOGEXCEPTION('' % queue_url)if raiseonfail:raisereturn None", "docstring": "This pushes a dict serialized to JSON to the specified SQS queue.\n\n Parameters\n ----------\n\n queue_url : str\n The SQS URL of the queue to push the object to.\n\n item : dict\n The dict passed in here will be serialized to JSON.\n\n delay_seconds : int\n The amount of time in seconds the pushed item will be held before going\n 'live' and being visible to all queue consumers.\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n raiseonfail : bool\n If True, will re-raise whatever Exception caused the operation to fail\n and break out immediately.\n\n Returns\n -------\n\n boto3.Response or None\n If the item was successfully put on the queue, will return the response\n from the service. If it wasn't, will return None.", "id": "f14694:m7"} {"signature": "def sqs_get_item(queue_url,max_items=,wait_time_seconds=,client=None,raiseonfail=False):", "body": "if not client:client = boto3.client('')try:resp = client.receive_message(QueueUrl=queue_url,AttributeNames=[''],MaxNumberOfMessages=max_items,WaitTimeSeconds=wait_time_seconds)if not resp:LOGERROR('' %queue_url)else:messages = []for msg in resp.get('',[]):try:messages.append({'':msg[''],'':msg[''],'':msg[''],'':msg[''],'':json.loads(msg['']),})except Exception as e:LOGEXCEPTION('' %(msg[''], msg['']))continuereturn messagesexcept Exception as e:LOGEXCEPTION('' % queue_url)if raiseonfail:raisereturn None", "docstring": "This gets a single item from the SQS queue.\n\n The `queue_url` is composed of some internal SQS junk plus a\n `queue_name`. For our purposes (`lcproc_aws.py`), the queue name will be\n something like::\n\n lcproc_queue_\n\n where action is one of::\n\n runcp\n runpf\n\n The item is always a JSON object::\n\n {'target': S3 bucket address of the file to process,\n 'action': the action to perform on the file ('runpf', 'runcp', etc.)\n 'args': the action's args as a tuple (not including filename, which is\n generated randomly as a temporary local file),\n 'kwargs': the action's kwargs as a dict,\n 'outbucket: S3 bucket to write the result to,\n 'outqueue': SQS queue to write the processed item's info to (optional)}\n\n The action MUST match the in the queue name for this item to be\n processed.\n\n Parameters\n ----------\n\n queue_url : str\n The SQS URL of the queue to get messages from.\n\n max_items : int\n The number of items to pull from the queue in this request.\n\n wait_time_seconds : int\n This specifies how long the function should block until a message is\n received on the queue. If the timeout expires, an empty list will be\n returned. If the timeout doesn't expire, the function will return a list\n of items received (up to `max_items`).\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n raiseonfail : bool\n If True, will re-raise whatever Exception caused the operation to fail\n and break out immediately.\n\n Returns\n -------\n\n list of dicts or None\n For each item pulled from the queue in this request (up to `max_items`),\n a dict will be deserialized from the retrieved JSON, containing the\n message items and various metadata. The most important item of the\n metadata is the `receipt_handle`, which can be used to acknowledge\n receipt of all items in this request (see `sqs_delete_item` below).\n\n If the queue pull fails outright, returns None. If no messages are\n available for this queue pull, returns an empty list.", "id": "f14694:m8"} {"signature": "def sqs_delete_item(queue_url,receipt_handle,client=None,raiseonfail=False):", "body": "if not client:client = boto3.client('')try:client.delete_message(QueueUrl=queue_url,ReceiptHandle=receipt_handle)except Exception as e:LOGEXCEPTION('''' % (receipt_handle, queue_url))if raiseonfail:raise", "docstring": "This deletes a message from the queue, effectively acknowledging its\n receipt.\n\n Call this only when all messages retrieved from the queue have been\n processed, since this will prevent redelivery of these messages to other\n queue workers pulling fromn the same queue channel.\n\n Parameters\n ----------\n\n queue_url : str\n The SQS URL of the queue where we got the messages from. This should be\n the same queue used to retrieve the messages in `sqs_get_item`.\n\n receipt_handle : str\n The receipt handle of the queue message that we're responding to, and\n will acknowledge receipt of. This will be present in each message\n retrieved using `sqs_get_item`.\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n raiseonfail : bool\n If True, will re-raise whatever Exception caused the operation to fail\n and break out immediately.\n\n Returns\n -------\n\n Nothing.", "id": "f14694:m9"} {"signature": "def make_ec2_nodes(security_groupid,subnet_id,keypair_name,iam_instance_profile_arn,launch_instances=,ami='',instance='',ebs_optimized=True,user_data=None,wait_until_up=True,client=None,raiseonfail=False,):", "body": "if not client:client = boto3.client('')if isinstance(user_data, str) and os.path.exists(user_data):with open(user_data,'') as infd:udata = infd.read()elif isinstance(user_data, str):udata = user_dataelse:udata = ('''' % datetime.utcnow().isoformat())try:resp = client.run_instances(ImageId=ami,InstanceType=instance,SecurityGroupIds=[security_groupid,],SubnetId=subnet_id,UserData=udata,IamInstanceProfile={'':iam_instance_profile_arn},InstanceInitiatedShutdownBehavior='',KeyName=keypair_name,MaxCount=launch_instances,MinCount=launch_instances,EbsOptimized=ebs_optimized,)if not resp:LOGERROR('')return Noneelse:instance_dict = {}instance_list = resp.get('',[])if len(instance_list) > :for instance in instance_list:LOGINFO(''''% (instance[''],instance[''],instance[''].isoformat(),instance['']['']))instance_dict[instance['']] = {'':instance[''],'':instance[''],'':instance[''][''],'':instance}if wait_until_up:ready_instances = []LOGINFO('')ntries = curr_try = while ( (curr_try < ntries) or( len(ready_instances) <len(list(instance_dict.keys()))) ):resp = client.describe_instances(InstanceIds=list(instance_dict.keys()),)if len(resp['']) > :for resv in resp['']:if len(resv['']) > :for instance in resv['']:if instance[''][''] == '':ready_instances.append(instance[''])instance_dict[instance['']][''] = ''instance_dict[instance['']][''] = instance['']instance_dict[instance['']][''] = instancecurr_try = curr_try + time.sleep()if len(ready_instances) == len(list(instance_dict.keys())):LOGINFO('')else:LOGWARNING('''')return instance_dictexcept ClientError as e:LOGEXCEPTION('')if raiseonfail:raisereturn Noneexcept Exception as e:LOGEXCEPTION('')if raiseonfail:raisereturn None", "docstring": "This makes new EC2 worker nodes.\n\n This requires a security group ID attached to a VPC config and subnet, a\n keypair generated beforehand, and an IAM role ARN for the instance. See:\n\n https://docs.aws.amazon.com/cli/latest/userguide/tutorial-ec2-ubuntu.html\n\n Use `user_data` to launch tasks on instance launch.\n\n Parameters\n ----------\n\n security_groupid : str\n The security group ID of the AWS VPC where the instances will be\n launched.\n\n subnet_id : str\n The subnet ID of the AWS VPC where the instances will be\n launched.\n\n keypair_name : str\n The name of the keypair to be used to allow SSH access to all instances\n launched here. This corresponds to an already downloaded AWS keypair PEM\n file.\n\n iam_instance_profile_arn : str\n The ARN string corresponding to the AWS instance profile that describes\n the permissions the launched instances have to access other AWS\n resources. Set this up in AWS IAM.\n\n launch_instances : int\n The number of instances to launch in this request.\n\n ami : str\n The Amazon Machine Image ID that describes the OS the instances will use\n after launch. The default ID is Amazon Linux 2 in the US East region.\n\n instance : str\n The instance type to launch. See the following URL for a list of IDs:\n https://aws.amazon.com/ec2/pricing/on-demand/\n\n ebs_optimized : bool\n If True, will enable EBS optimization to speed up IO. This is usually\n True for all instances made available in the last couple of years.\n\n user_data : str or None\n This is either the path to a file on disk that contains a shell-script\n or a string containing a shell-script that will be executed by root\n right after the instance is launched. Use to automatically set up\n workers and queues. If None, will not execute anything at instance\n start up.\n\n wait_until_up : bool\n If True, will not return from this function until all launched instances\n are verified as running by AWS.\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n raiseonfail : bool\n If True, will re-raise whatever Exception caused the operation to fail\n and break out immediately.\n\n Returns\n -------\n\n dict\n Returns launched instance info as a dict, keyed by instance ID.", "id": "f14694:m10"} {"signature": "def delete_ec2_nodes(instance_id_list,client=None):", "body": "if not client:client = boto3.client('')resp = client.terminate_instances(InstanceIds=instance_id_list)return resp", "docstring": "This deletes EC2 nodes and terminates the instances.\n\n Parameters\n ----------\n\n instance_id_list : list of str\n A list of EC2 instance IDs to terminate.\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n Returns\n -------\n\n Nothing.", "id": "f14694:m11"} {"signature": "def make_spot_fleet_cluster(security_groupid,subnet_id,keypair_name,iam_instance_profile_arn,spot_fleet_iam_role,target_capacity=,spot_price=,expires_days=,allocation_strategy='',instance_types=SPOT_INSTANCE_TYPES,instance_weights=None,instance_ami='',instance_user_data=None,instance_ebs_optimized=True,wait_until_up=True,client=None,raiseonfail=False):", "body": "fleetconfig = copy.deepcopy(SPOT_FLEET_CONFIG)fleetconfig[''] = spot_fleet_iam_rolefleetconfig[''] = allocation_strategyfleetconfig[''] = target_capacityfleetconfig[''] = str(spot_price)fleetconfig[''] = (datetime.utcnow() + timedelta(days=expires_days)).strftime('')if (isinstance(instance_user_data, str) andos.path.exists(instance_user_data)):with open(instance_user_data,'') as infd:udata = base64.b64encode(infd.read()).decode()elif isinstance(instance_user_data, str):udata = base64.b64encode(instance_user_data.encode()).decode()else:udata = ('''' % datetime.utcnow().isoformat())udata = base64.b64encode(udata.encode()).decode()for ind, itype in enumerate(instance_types):thisinstance = SPOT_PERINSTANCE_CONFIG.copy()thisinstance[''] = itypethisinstance[''] = instance_amithisinstance[''] = subnet_idthisinstance[''] = keypair_namethisinstance[''][''] = iam_instance_profile_arnthisinstance[''][] = {'':security_groupid}thisinstance[''] = udatathisinstance[''] = instance_ebs_optimizedif isinstance(instance_weights, list):thisinstance[''] = instance_weights[ind]fleetconfig[''].append(thisinstance)if not client:client = boto3.client('')try:resp = client.request_spot_fleet(SpotFleetRequestConfig=fleetconfig,)if not resp:LOGERROR('')return Noneelse:spot_fleet_reqid = resp['']LOGINFO('' %spot_fleet_reqid)if not wait_until_up:return spot_fleet_reqidelse:ntries = curr_try = while curr_try < ntries:resp = client.describe_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_reqid])curr_state = resp.get('',[])if len(curr_state) > :curr_state = curr_state[]['']if curr_state == '':LOGINFO('' %spot_fleet_reqid)breakLOGINFO('''' % (curr_try, ntries))curr_try = curr_try + time.sleep()return spot_fleet_reqidexcept ClientError as e:LOGEXCEPTION('')if raiseonfail:raisereturn Noneexcept Exception as e:LOGEXCEPTION('')if raiseonfail:raisereturn None", "docstring": "This makes an EC2 spot-fleet cluster.\n\n This requires a security group ID attached to a VPC config and subnet, a\n keypair generated beforehand, and an IAM role ARN for the instance. See:\n\n https://docs.aws.amazon.com/cli/latest/userguide/tutorial-ec2-ubuntu.html\n\n Use `user_data` to launch tasks on instance launch.\n\n Parameters\n ----------\n\n security_groupid : str\n The security group ID of the AWS VPC where the instances will be\n launched.\n\n subnet_id : str\n The subnet ID of the AWS VPC where the instances will be\n launched.\n\n keypair_name : str\n The name of the keypair to be used to allow SSH access to all instances\n launched here. This corresponds to an already downloaded AWS keypair PEM\n file.\n\n iam_instance_profile_arn : str\n The ARN string corresponding to the AWS instance profile that describes\n the permissions the launched instances have to access other AWS\n resources. Set this up in AWS IAM.\n\n spot_fleet_iam_role : str\n This is the name of AWS IAM role that allows the Spot Fleet Manager to\n scale up and down instances based on demand and instances failing,\n etc. Set this up in IAM.\n\n target_capacity : int\n The number of instances to target in the fleet request. The fleet\n manager service will attempt to maintain this number over the lifetime\n of the Spot Fleet Request.\n\n spot_price : float\n The bid price in USD for the instances. This is per hour. Keep this at\n about half the hourly on-demand price of the desired instances to make\n sure your instances aren't taken away by AWS when it needs capacity.\n\n expires_days : int\n The number of days this request is active for. All instances launched by\n this request will live at least this long and will be terminated\n automatically after.\n\n allocation_strategy : {'lowestPrice', 'diversified'}\n The allocation strategy used by the fleet manager.\n\n instance_types : list of str\n List of the instance type to launch. See the following URL for a list of\n IDs: https://aws.amazon.com/ec2/pricing/on-demand/\n\n instance_weights : list of float or None\n If `instance_types` is a list of different instance types, this is the\n relative weight applied towards launching each instance type. This can\n be used to launch a mix of instances in a defined ratio among their\n types. Doing this can make the spot fleet more resilient to AWS taking\n back the instances if it runs out of capacity.\n\n instance_ami : str\n The Amazon Machine Image ID that describes the OS the instances will use\n after launch. The default ID is Amazon Linux 2 in the US East region.\n\n instance_user_data : str or None\n This is either the path to a file on disk that contains a shell-script\n or a string containing a shell-script that will be executed by root\n right after the instance is launched. Use to automatically set up\n workers and queues. If None, will not execute anything at instance\n start up.\n\n instance_ebs_optimized : bool\n If True, will enable EBS optimization to speed up IO. This is usually\n True for all instances made available in the last couple of years.\n\n wait_until_up : bool\n If True, will not return from this function until the spot fleet request\n is acknowledged by AWS.\n\n client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\n raiseonfail : bool\n If True, will re-raise whatever Exception caused the operation to fail\n and break out immediately.\n\n Returns\n -------\n\n str or None\n This is the spot fleet request ID if successful. Otherwise, returns\n None.", "id": "f14694:m12"} {"signature": "def delete_spot_fleet_cluster(spot_fleet_reqid,client=None,):", "body": "if not client:client = boto3.client('')resp = client.cancel_spot_fleet_requests(SpotFleetRequestIds=[spot_fleet_reqid],TerminateInstances=True)return resp", "docstring": "This deletes a spot-fleet cluster.\n\nParameters\n----------\n\nspot_fleet_reqid : str\n The fleet request ID returned by `make_spot_fleet_cluster`.\n\nclient : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its operations. Alternatively, pass in an existing `boto3.Client`\n instance to re-use it here.\n\nReturns\n-------\n\nNothing.", "id": "f14694:m13"} {"signature": "def plot_magseries(times,mags,magsarefluxes=False,errs=None,out=None,sigclip=,normto='',normmingap=,timebin=None,yrange=None,segmentmingap=,plotdpi=):", "body": "stimes, smags, serrs = sigclip_magseries(times,mags,errs,magsarefluxes=magsarefluxes,sigclip=sigclip)if timebin and errs is not None:binned = time_bin_magseries_with_errs(stimes, smags, serrs,binsize=timebin)btimes, bmags, berrs = (binned[''],binned[''],binned[''])elif timebin and errs is None:binned = time_bin_magseries(stimes, smags,binsize=timebin)btimes, bmags, berrs = binned[''], binned[''], Noneelse:btimes, bmags, berrs = stimes, smags, serrsif normto is not False:btimes, bmags = normalize_magseries(btimes, bmags,normto=normto,magsarefluxes=magsarefluxes,mingap=normmingap)btimeorigin = btimes.min()btimes = btimes - btimeoriginif segmentmingap is not None:ntimegroups, timegroups = find_lc_timegroups(btimes,mingap=segmentmingap)if yrange and isinstance(yrange,(list,tuple)) and len(yrange) == :ymin, ymax = yrangeelse:if not magsarefluxes:ymin, ymax = (bmags.min() - ,bmags.max() + )else:ycov = bmags.max() - bmags.min()ymin = bmags.min() - *ycovymax = bmags.max() + *ycovif segmentmingap and ntimegroups > :LOGINFO('' % ntimegroups)fig, axes = plt.subplots(,ntimegroups,sharey=True)fig.set_size_inches(,)axes = np.ravel(axes)for timegroup, ax, axind in zip(timegroups, axes, range(len(axes))):tgtimes = btimes[timegroup]tgmags = bmags[timegroup]if berrs:tgerrs = berrs[timegroup]else:tgerrs = NoneLOGINFO('' % (axind,axind+,btimeorigin + tgtimes.min(),btimeorigin + tgtimes.max()))ax.errorbar(tgtimes, tgmags, fmt='', yerr=tgerrs,markersize=, markeredgewidth=, ecolor='',capsize=)ax.get_xaxis().get_major_formatter().set_useOffset(False)if axind == :ax.get_yaxis().get_major_formatter().set_useOffset(False)ax.spines[''].set_visible(False)ax.yaxis.tick_left()elif < axind < (len(axes)-):ax.spines[''].set_visible(False)ax.spines[''].set_visible(False)ax.tick_params(right='', labelright='',left='',labelleft='')elif axind == (len(axes)-):ax.spines[''].set_visible(False)ax.spines[''].set_visible(True)ax.yaxis.tick_right()if not magsarefluxes:ax.set_ylim(ymax, ymin)else:ax.set_ylim(ymin, ymax)tgrange = tgtimes.max() - tgtimes.min()if tgrange < :ticklocations = [tgrange/]ax.set_xlim(npmin(tgtimes) - , npmax(tgtimes) + )elif < tgrange < :ticklocations = np.linspace(tgtimes.min()+,tgtimes.max()-,num=)ax.set_xlim(npmin(tgtimes) - , npmax(tgtimes) + )elif < tgrange < :ticklocations = np.linspace(tgtimes.min()+,tgtimes.max()-,num=)ax.set_xlim(npmin(tgtimes) - , npmax(tgtimes) + )else:ticklocations = np.linspace(tgtimes.min()+,tgtimes.max()-,num=)ax.set_xlim(npmin(tgtimes) - , npmax(tgtimes) + )ax.xaxis.set_ticks([int(x) for x in ticklocations])plt.subplots_adjust(wspace=)fig.text(, , '' %(btimeorigin, segmentmingap), ha='')if not magsarefluxes:fig.text(, , '', va='', rotation='')else:fig.text(, , '', va='', rotation='')else:fig = plt.figure()fig.set_size_inches(,)plt.errorbar(btimes, bmags, fmt='', yerr=berrs,markersize=, markeredgewidth=, ecolor='',capsize=)plt.grid(color='',alpha=,zorder=,linewidth=,linestyle='')plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)plt.xlabel('' % btimeorigin)if not magsarefluxes:plt.ylim(ymax, ymin)plt.ylabel('')else:plt.ylim(ymin, ymax)plt.ylabel('')if sys.version_info[:] < (,):is_Strio = isinstance(out, cStringIO.InputType)else:is_Strio = isinstance(out, Strio)if out and not is_Strio:if out.endswith(''):plt.savefig(out,bbox_inches='',dpi=plotdpi)else:plt.savefig(out,bbox_inches='')plt.close()return os.path.abspath(out)elif out and is_Strio:plt.savefig(out, bbox_inches='', dpi=plotdpi, format='')return outelif not out and dispok:plt.show()plt.close()returnelse:LOGWARNING('''')outfile = ''plt.savefig(outfile,bbox_inches='',dpi=plotdpi)plt.close()return os.path.abspath(outfile)", "docstring": "This plots a magnitude/flux time-series.\n\n Parameters\n ----------\n\n times,mags : np.array\n The mag/flux time-series to plot as a function of time.\n\n magsarefluxes : bool\n Indicates if the input `mags` array is actually an array of flux\n measurements instead of magnitude measurements. If this is set to True,\n then the plot y-axis will be set as appropriate for mag or fluxes. In\n addition:\n\n - if `normto` is 'zero', then the median flux is divided from each\n observation's flux value to yield normalized fluxes with 1.0 as the\n global median.\n - if `normto` is 'globalmedian', then the global median flux value\n across the entire time series is multiplied with each measurement.\n - if `norm` is set to a `float`, then this number is multiplied with the\n flux value for each measurement.\n\n errs : np.array or None\n If this is provided, contains the measurement errors associated with\n each measurement of flux/mag in time-series. Providing this kwarg will\n add errbars to the output plot.\n\n out : str or StringIO/BytesIO object or None\n Sets the output type and target:\n\n - If `out` is a string, will save the plot to the specified file name.\n - If `out` is a StringIO/BytesIO object, will save the plot to that file\n handle. This can be useful to carry out additional operations on the\n output binary stream, or convert it to base64 text for embedding in\n HTML pages.\n - If `out` is None, will save the plot to a file called\n 'magseries-plot.png' in the current working directory.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n normto : {'globalmedian', 'zero'} or a float\n Sets the normalization target::\n\n 'globalmedian' -> norms each mag to the global median of the LC column\n 'zero' -> norms each mag to zero\n a float -> norms each mag to this specified float value.\n\n normmingap : float\n This defines how much the difference between consecutive measurements is\n allowed to be to consider them as parts of different timegroups. By\n default it is set to 4.0 days.\n\n timebin : float or None\n The bin size to use to group together measurements closer than this\n amount in time. This is in seconds. If this is None, no time-binning\n will be performed.\n\n yrange : list of two floats or None\n This is used to provide a custom y-axis range to the plot. If None, will\n automatically determine y-axis range.\n\n segmentmingap : float or None\n This controls the minimum length of time (in days) required to consider\n a timegroup in the light curve as a separate segment. This is useful\n when the light curve consists of measurements taken over several\n seasons, so there's lots of dead space in the plot that can be cut out\n to zoom in on the interesting stuff. If `segmentmingap` is not None, the\n magseries plot will be cut in this way and the x-axis will show these\n breaks.\n\n plotdpi : int\n Sets the resolution in DPI for PNG plots (default = 100).\n\n Returns\n -------\n\n str or BytesIO/StringIO object\n Returns based on the input:\n\n - If `out` is a str or None, the path to the generated plot file is\n returned.\n - If `out` is a StringIO/BytesIO object, will return the\n StringIO/BytesIO object to which the plot was written.", "id": "f14695:m0"} {"signature": "def plot_phased_magseries(times,mags,period,epoch='',fitknotfrac=,errs=None,magsarefluxes=False,normto='',normmingap=,sigclip=,phasewrap=True,phasesort=True,phasebin=None,plotphaselim=(-,),yrange=None,xtimenotphase=False,xaxlabel='',yaxlabel=None,modelmags=None,modeltimes=None,modelerrs=None,outfile=None,plotdpi=):", "body": "stimes, smags, serrs = sigclip_magseries(times,mags,errs,magsarefluxes=magsarefluxes,sigclip=sigclip)if normto is not False:stimes, smags = normalize_magseries(stimes, smags,normto=normto,magsarefluxes=magsarefluxes,mingap=normmingap)if ( isinstance(modelmags, np.ndarray) andisinstance(modeltimes, np.ndarray) ):stimes, smags = normalize_magseries(modeltimes, modelmags,normto=normto,magsarefluxes=magsarefluxes,mingap=normmingap)if epoch is None:epoch = stimes.min()elif isinstance(epoch, str) and epoch == '':try:spfit = spline_fit_magseries(stimes, smags, serrs, period,knotfraction=fitknotfrac)epoch = spfit['']['']if len(epoch) != :epoch = epoch[]except Exception as e:LOGEXCEPTION('')epoch = npmin(stimes)if errs is not None:phasedlc = phase_magseries_with_errs(stimes, smags, serrs, period,epoch, wrap=phasewrap,sort=phasesort)plotphase = phasedlc['']plotmags = phasedlc['']ploterrs = phasedlc['']if phasebin:binphasedlc = phase_bin_magseries_with_errs(plotphase, plotmags,ploterrs,binsize=phasebin)binplotphase = binphasedlc['']binplotmags = binphasedlc['']binploterrs = binphasedlc['']else:phasedlc = phase_magseries(stimes, smags, period, epoch,wrap=phasewrap, sort=phasesort)plotphase = phasedlc['']plotmags = phasedlc['']ploterrs = Noneif phasebin:binphasedlc = phase_bin_magseries(plotphase,plotmags,binsize=phasebin)binplotphase = binphasedlc['']binplotmags = binphasedlc['']binploterrs = Nonemodelplotphase, modelplotmags = None, Noneif ( isinstance(modelerrs,np.ndarray) andisinstance(modeltimes,np.ndarray) andisinstance(modelmags,np.ndarray) ):modelphasedlc = phase_magseries_with_errs(modeltimes, modelmags,modelerrs, period, epoch,wrap=phasewrap,sort=phasesort)modelplotphase = modelphasedlc['']modelplotmags = modelphasedlc['']elif ( not isinstance(modelerrs,np.ndarray) andisinstance(modeltimes,np.ndarray) andisinstance(modelmags,np.ndarray) ):modelphasedlc = phase_magseries(modeltimes, modelmags, period, epoch,wrap=phasewrap, sort=phasesort)modelplotphase = modelphasedlc['']modelplotmags = modelphasedlc['']if isinstance(outfile, matplotlib.axes.Axes):ax = outfileelse:fig = plt.figure()fig.set_size_inches(,)ax = plt.gca()if xtimenotphase:plotphase *= periodif phasebin:ax.errorbar(plotphase, plotmags, fmt='',color='',yerr=ploterrs,markersize=,markeredgewidth=,ecolor='',capsize=)if xtimenotphase:binplotphase *= periodax.errorbar(binplotphase, binplotmags, fmt='', yerr=binploterrs,markersize=, markeredgewidth=, ecolor='',capsize=)else:ax.errorbar(plotphase, plotmags, fmt='', yerr=ploterrs,markersize=, markeredgewidth=, ecolor='',capsize=)if (isinstance(modelplotphase, np.ndarray) andisinstance(modelplotmags, np.ndarray)):if xtimenotphase:modelplotphase *= periodax.plot(modelplotphase, modelplotmags, zorder=, linewidth=,alpha=, color='')ax.grid(color='',alpha=,zorder=,linewidth=,linestyle='')ax.axvline(,alpha=,linestyle='',color='')if not xtimenotphase:ax.axvline(-,alpha=,linestyle='',color='')ax.axvline(,alpha=,linestyle='',color='')else:ax.axvline(-period*,alpha=,linestyle='',color='')ax.axvline(period*,alpha=,linestyle='',color='')ax.get_yaxis().get_major_formatter().set_useOffset(False)ax.get_xaxis().get_major_formatter().set_useOffset(False)if yrange and isinstance(yrange,(list,tuple)) and len(yrange) == :ymin, ymax = yrangeelse:ymin, ymax = ax.get_ylim()if not yaxlabel:if not magsarefluxes:ax.set_ylim(ymax, ymin)yaxlabel = ''else:ax.set_ylim(ymin, ymax)yaxlabel = ''if not plotphaselim:ax.set_xlim((npmin(plotphase)-,npmax(plotphase)+))else:if xtimenotphase:ax.set_xlim((period*plotphaselim[],period*plotphaselim[]))else:ax.set_xlim((plotphaselim[],plotphaselim[]))ax.set_xlabel(xaxlabel)ax.set_ylabel(yaxlabel)ax.set_title('' % (period, epoch))LOGINFO('' % (period, epoch))if sys.version_info[:] < (,):is_Strio = isinstance(outfile, cStringIO.InputType)else:is_Strio = isinstance(outfile, Strio)if (outfile andnot is_Strio andnot isinstance(outfile, matplotlib.axes.Axes)):if outfile.endswith(''):fig.savefig(outfile, bbox_inches='', dpi=plotdpi)else:fig.savefig(outfile, bbox_inches='')plt.close()return period, epoch, os.path.abspath(outfile)elif outfile and is_Strio:fig.savefig(outfile, bbox_inches='', dpi=plotdpi, format='')return outfileelif outfile and isinstance(outfile, matplotlib.axes.Axes):return outfileelif not outfile and dispok:plt.show()plt.close()return period, epochelse:LOGWARNING('''')outfile = ''plt.savefig(outfile, bbox_inches='', dpi=plotdpi)plt.close()return period, epoch, os.path.abspath(outfile)", "docstring": "Plots a phased magnitude/flux time-series using the period provided.\n\n Parameters\n ----------\n\n times,mags : np.array\n The mag/flux time-series to plot as a function of phase given `period`.\n\n period : float\n The period to use to phase-fold the time-series. Should be the same unit\n as `times` (usually in days)\n\n epoch : 'min' or float or None\n This indicates how to get the epoch to use for phasing the light curve:\n\n - If None, uses the `min(times)` as the epoch for phasing.\n\n - If epoch is the string 'min', then fits a cubic spline to the phased\n light curve using `min(times)` as the initial epoch, finds the\n magnitude/flux minimum of this phased light curve fit, and finally\n uses the that time value as the epoch. This is useful for plotting\n planetary transits and eclipsing binary phased light curves so that\n phase 0.0 corresponds to the mid-center time of primary eclipse (or\n transit).\n\n - If epoch is a float, then uses that directly to phase the light\n curve and as the epoch of the phased mag series plot.\n\n fitknotfrac : float\n If `epoch='min'`, this function will attempt to fit a cubic spline to\n the phased light curve to find a time of light minimum as phase\n 0.0. This kwarg sets the number of knots to generate the spline as a\n fraction of the total number of measurements in the input\n time-series. By default, this is set so that 100 knots are used to\n generate a spline for fitting the phased light curve consisting of 10000\n measurements.\n\n errs : np.array or None\n If this is provided, contains the measurement errors associated with\n each measurement of flux/mag in time-series. Providing this kwarg will\n add errbars to the output plot.\n\n magsarefluxes : bool\n Indicates if the input `mags` array is actually an array of flux\n measurements instead of magnitude measurements. If this is set to True,\n then the plot y-axis will be set as appropriate for mag or fluxes.\n\n normto : {'globalmedian', 'zero'} or a float\n Sets the normalization target::\n\n 'globalmedian' -> norms each mag to the global median of the LC column\n 'zero' -> norms each mag to zero\n a float -> norms each mag to this specified float value.\n\n normmingap : float\n This defines how much the difference between consecutive measurements is\n allowed to be to consider them as parts of different timegroups. By\n default it is set to 4.0 days.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n phasewrap : bool\n If this is True, the phased time-series will be wrapped around phase\n 0.0.\n\n phasesort : bool\n If this is True, the phased time-series will be sorted in phase.\n\n phasebin : float or None\n If this is provided, indicates the bin size to use to group together\n measurements closer than this amount in phase. This is in units of\n phase. The binned phased light curve will be overplotted on top of the\n phased light curve. Useful for when one has many measurement points and\n needs to pick out a small trend in an otherwise noisy phased light\n curve.\n\n plotphaselim : sequence of two floats or None\n The x-axis limits to use when making the phased light curve plot. By\n default, this is (-0.8, 0.8), which places phase 0.0 at the center of\n the plot and covers approximately two cycles in phase to make any trends\n clear.\n\n yrange : list of two floats or None\n This is used to provide a custom y-axis range to the plot. If None, will\n automatically determine y-axis range.\n\n xtimenotphase : bool\n If True, the x-axis gets units of time (multiplies phase by period).\n\n xaxlabel : str\n Sets the label for the x-axis.\n\n yaxlabel : str or None\n Sets the label for the y-axis. If this is None, the appropriate label\n will be used based on the value of the `magsarefluxes` kwarg.\n\n modeltimes,modelmags,modelerrs : np.array or None\n If all of these are provided, then this function will overplot the\n values of modeltimes and modelmags on top of the actual phased light\n curve. This is useful for plotting variability models on top of the\n light curve (e.g. plotting a Mandel-Agol transit model over the actual\n phased light curve. These arrays will be phased using the already\n provided period and epoch.\n\n outfile : str or StringIO/BytesIO or matplotlib.axes.Axes or None\n - a string filename for the file where the plot will be written.\n - a StringIO/BytesIO object to where the plot will be written.\n - a matplotlib.axes.Axes object to where the plot will be written.\n - if None, plots to 'magseries-phased-plot.png' in current dir.\n\n plotdpi : int\n Sets the resolution in DPI for PNG plots (default = 100).\n\n Returns\n -------\n\n str or StringIO/BytesIO or matplotlib.axes.Axes\n This returns based on the input:\n\n - If `outfile` is a str or None, the path to the generated plot file is\n returned.\n - If `outfile` is a StringIO/BytesIO object, will return the\n StringIO/BytesIO object to which the plot was written.\n - If `outfile` is a matplotlib.axes.Axes object, will return the Axes\n object with the plot elements added to it. One can then directly\n include this Axes object in some other Figure.", "id": "f14695:m1"} {"signature": "def skyview_stamp(ra, decl,survey='',scaling='',flip=True,convolvewith=None,forcefetch=False,cachedir='',timeout=,retry_failed=False,savewcsheader=True,verbose=False):", "body": "stampdict = get_stamp(ra, decl,survey=survey,scaling=scaling,forcefetch=forcefetch,cachedir=cachedir,timeout=timeout,retry_failed=retry_failed,verbose=verbose)if stampdict:stampfits = pyfits.open(stampdict[''])header = stampfits[].headerframe = stampfits[].datastampfits.close()if flip:frame = np.flipud(frame)if verbose:LOGINFO(''% (ra, decl))if convolvewith:convolved = aconv.convolve(frame, convolvewith)if savewcsheader:return convolved, headerelse:return convolvedelse:if savewcsheader:return frame, headerelse:return frameelse:LOGERROR(''''% (ra, decl, survey, scaling))return None", "docstring": "This downloads a DSS FITS stamp centered on the coordinates specified.\n\n This wraps the function :py:func:`astrobase.services.skyview.get_stamp`,\n which downloads Digitized Sky Survey stamps in FITS format from the NASA\n SkyView service:\n\n https://skyview.gsfc.nasa.gov/current/cgi/query.pl\n\n Also adds some useful operations on top of the FITS file returned.\n\n Parameters\n ----------\n\n ra,decl : float\n The center coordinates for the stamp in decimal degrees.\n\n survey : str\n The survey name to get the stamp from. This is one of the\n values in the 'SkyView Surveys' option boxes on the SkyView\n webpage. Currently, we've only tested using 'DSS2 Red' as the value for\n this kwarg, but the other ones should work in principle.\n\n scaling : str\n This is the pixel value scaling function to use.\n\n flip : bool\n Will flip the downloaded image top to bottom. This should usually be\n True because matplotlib and FITS have different image coord origin\n conventions. Alternatively, set this to False and use the\n `origin='lower'` in any call to `matplotlib.pyplot.imshow` when plotting\n this image.\n\n convolvewith : astropy.convolution Kernel object or None\n If `convolvewith` is an astropy.convolution Kernel object from:\n\n http://docs.astropy.org/en/stable/convolution/kernels.html\n\n then, this function will return the stamp convolved with that\n kernel. This can be useful to see effects of wide-field telescopes (like\n the HATNet and HATSouth lenses) degrading the nominal 1 arcsec/px of\n DSS, causing blending of targets and any variability.\n\n forcefetch : bool\n If True, will disregard any existing cached copies of the stamp already\n downloaded corresponding to the requested center coordinates and\n redownload the FITS from the SkyView service.\n\n cachedir : str\n This is the path to the astrobase cache directory. All downloaded FITS\n stamps are stored here as .fits.gz files so we can immediately respond\n with the cached copy when a request is made for a coordinate center\n that's already been downloaded.\n\n timeout : float\n Sets the timeout in seconds to wait for a response from the NASA SkyView\n service.\n\n retry_failed : bool\n If the initial request to SkyView fails, and this is True, will retry\n until it succeeds.\n\n savewcsheader : bool\n If this is True, also returns the WCS header of the downloaded FITS\n stamp in addition to the FITS image itself. Useful for projecting object\n coordinates onto image xy coordinates for visualization.\n\n verbose : bool\n If True, indicates progress.\n\n Returns\n -------\n\n tuple or array or None\n This returns based on the value of `savewcsheader`:\n\n - If `savewcsheader=True`, returns a tuple:\n (FITS stamp image as a numpy array, FITS header)\n - If `savewcsheader=False`, returns only the FITS stamp image as numpy\n array.\n - If the stamp retrieval fails, returns None.", "id": "f14695:m2"} {"signature": "def fits_finder_chart(fitsfile,outfile,fitsext=,wcsfrom=None,scale=ZScaleInterval(),stretch=LinearStretch(),colormap=plt.cm.gray_r,findersize=None,finder_coordlimits=None,overlay_ra=None,overlay_decl=None,overlay_pltopts={'':'','':,'':'','':,'':''},overlay_zoomcontain=False,grid=False,gridcolor=''):", "body": "if wcsfrom is None:hdulist = pyfits.open(fitsfile)img, hdr = hdulist[fitsext].data, hdulist[fitsext].headerhdulist.close()frameshape = (hdr[''], hdr[''])w = WCS(hdr)elif os.path.exists(wcsfrom):hdulist = pyfits.open(fitsfile)img, hdr = hdulist[fitsext].data, hdulist[fitsext].headerhdulist.close()frameshape = (hdr[''], hdr[''])w = WCS(wcsfrom)else:LOGERROR('' %fitsfile)return Noneif findersize is None:fig = plt.figure(figsize=(frameshape[]/,frameshape[]/))else:fig = plt.figure(figsize=findersize)if (overlay_zoomcontain andoverlay_ra is not None andoverlay_decl is not None):finder_coordlimits = [overlay_ra.min()-/,overlay_ra.max()+/,overlay_decl.min()-/,overlay_decl.max()+/]if finder_coordlimits and isinstance(finder_coordlimits, (list,tuple)):minra, maxra, mindecl, maxdecl = finder_coordlimitscntra, cntdecl = (minra + maxra)/, (mindecl + maxdecl)/pixelcoords = w.all_world2pix([[minra, mindecl],[maxra, maxdecl],[cntra, cntdecl]],)x1, y1, x2, y2 = (int(pixelcoords[,]),int(pixelcoords[,]),int(pixelcoords[,]),int(pixelcoords[,]))xmin = x1 if x1 < x2 else x2xmax = x2 if x2 > x1 else x1ymin = y1 if y1 < y2 else y2ymax = y2 if y2 > y1 else y1whdr = w.to_header()whdr[''] = (xmax - xmin)/whdr[''] = (ymax - ymin)/whdr[''] = cntrawhdr[''] = cntdeclwhdr[''] = xmax - xminwhdr[''] = ymax - yminw = WCS(whdr)else:xmin, xmax, ymin, ymax = , hdr[''], , hdr['']fig.add_subplot(,projection=w)if scale is not None and stretch is not None:norm = ImageNormalize(img,interval=scale,stretch=stretch)plt.imshow(img[ymin:ymax,xmin:xmax],origin='',cmap=colormap,norm=norm)else:plt.imshow(img[ymin:ymax,xmin:xmax],origin='',cmap=colormap)if grid:plt.grid(color=gridcolor,ls='',lw=)if overlay_ra is not None and overlay_decl is not None:our_pltopts = dict(transform=plt.gca().get_transform(''),marker='',markersize=,markerfacecolor='',markeredgewidth=,markeredgecolor='',rasterized=True,linestyle='')if overlay_pltopts is not None and isinstance(overlay_pltopts,dict):our_pltopts.update(overlay_pltopts)plt.gca().set_autoscale_on(False)plt.gca().plot(overlay_ra, overlay_decl,**our_pltopts)plt.xlabel('')plt.ylabel('')xax = plt.gca().coords[]yax = plt.gca().coords[]yax.set_major_formatter('')xax.set_major_formatter('')plt.savefig(outfile, dpi=)plt.close('')return outfile", "docstring": "This makes a finder chart for a given FITS with an optional object\n position overlay.\n\n Parameters\n ----------\n\n fitsfile : str\n `fitsfile` is the FITS file to use to make the finder chart.\n\n outfile : str\n `outfile` is the name of the output file. This can be a png or pdf or\n whatever else matplotlib can write given a filename and extension.\n\n fitsext : int\n Sets the FITS extension in `fitsfile` to use to extract the image array\n from.\n\n wcsfrom : str or None\n If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will\n be taken from the FITS header of `fitsfile`. If this is not None, it\n must be a FITS or similar file that contains a WCS header in its first\n extension.\n\n scale : astropy.visualization.Interval object\n `scale` sets the normalization for the FITS pixel values. This is an\n astropy.visualization Interval object.\n See http://docs.astropy.org/en/stable/visualization/normalization.html\n for details on `scale` and `stretch` objects.\n\n stretch : astropy.visualization.Stretch object\n `stretch` sets the stretch function for mapping FITS pixel values to\n output pixel values. This is an astropy.visualization Stretch object.\n See http://docs.astropy.org/en/stable/visualization/normalization.html\n for details on `scale` and `stretch` objects.\n\n colormap : matplotlib Colormap object\n `colormap` is a matplotlib color map object to use for the output image.\n\n findersize : None or tuple of two ints\n If `findersize` is None, the output image size will be set by the NAXIS1\n and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,\n `findersize` must be a tuple with the intended x and y size of the image\n in inches (all output images will use a DPI = 100).\n\n finder_coordlimits : list of four floats or None\n If not None, `finder_coordlimits` sets x and y limits for the plot,\n effectively zooming it in if these are smaller than the dimensions of\n the FITS image. This should be a list of the form: [minra, maxra,\n mindecl, maxdecl] all in decimal degrees.\n\n overlay_ra, overlay_decl : np.array or None\n `overlay_ra` and `overlay_decl` are ndarrays containing the RA and Dec\n values to overplot on the image as an overlay. If these are both None,\n then no overlay will be plotted.\n\n overlay_pltopts : dict\n `overlay_pltopts` controls how the overlay points will be plotted. This\n a dict with standard matplotlib marker, etc. kwargs as key-val pairs,\n e.g. 'markersize', 'markerfacecolor', etc. The default options make red\n outline circles at the location of each object in the overlay.\n\n overlay_zoomcontain : bool\n `overlay_zoomcontain` controls if the finder chart will be zoomed to\n just contain the overlayed points. Everything outside the footprint of\n these points will be discarded.\n\n grid : bool\n `grid` sets if a grid will be made on the output image.\n\n gridcolor : str\n `gridcolor` sets the color of the grid lines. This is a usual matplotib\n color spec string.\n\n Returns\n -------\n\n str or None\n The filename of the generated output image if successful. None\n otherwise.", "id": "f14695:m3"} {"signature": "def plot_periodbase_lsp(lspinfo, outfile=None, plotdpi=):", "body": "if isinstance(lspinfo,str) and os.path.exists(lspinfo):LOGINFO('' % lspinfo)with open(lspinfo,'') as infd:lspinfo = pickle.load(infd)try:periods = lspinfo['']lspvals = lspinfo['']bestperiod = lspinfo['']lspmethod = lspinfo['']plt.plot(periods, lspvals)plt.xscale('',basex=)plt.xlabel('')plt.ylabel(PLOTYLABELS[lspmethod])plottitle = '' % (METHODSHORTLABELS[lspmethod],bestperiod)plt.title(plottitle)for bestperiod, bestpeak in zip(lspinfo[''],lspinfo['']):plt.annotate('' % bestperiod,xy=(bestperiod, bestpeak), xycoords='',xytext=(,), textcoords='',arrowprops=dict(arrowstyle=\"\"),fontsize='')plt.grid(color='',alpha=,zorder=,linewidth=,linestyle='')if outfile and isinstance(outfile, str):if outfile.endswith(''):plt.savefig(outfile,bbox_inches='',dpi=plotdpi)else:plt.savefig(outfile,bbox_inches='')plt.close()return os.path.abspath(outfile)elif dispok:plt.show()plt.close()returnelse:LOGWARNING('''')outfile = ''plt.savefig(outfile,bbox_inches='',dpi=plotdpi)plt.close()return os.path.abspath(outfile)except Exception as e:LOGEXCEPTION('')return", "docstring": "Makes a plot of periodograms obtained from `periodbase` functions.\n\n This takes the output dict produced by any `astrobase.periodbase`\n period-finder function or a pickle filename containing such a dict and makes\n a periodogram plot.\n\n Parameters\n ----------\n\n lspinfo : dict or str\n If lspinfo is a dict, it must be a dict produced by an\n `astrobase.periodbase` period-finder function or a dict from your own\n period-finder function or routine that is of the form below with at\n least these keys::\n\n {'periods': np.array of all periods searched by the period-finder,\n 'lspvals': np.array of periodogram power value for each period,\n 'bestperiod': a float value that is the period with the highest\n peak in the periodogram, i.e. the most-likely actual\n period,\n 'method': a three-letter code naming the period-finder used; must\n be one of the keys in the `METHODLABELS` dict above,\n 'nbestperiods': a list of the periods corresponding to periodogram\n peaks (`nbestlspvals` below) to annotate on the\n periodogram plot so they can be called out\n visually,\n 'nbestlspvals': a list of the power values associated with\n periodogram peaks to annotate on the periodogram\n plot so they can be called out visually; should be\n the same length as `nbestperiods` above}\n\n If lspinfo is a str, then it must be a path to a pickle file that\n contains a dict of the form described above.\n\n outfile : str or None\n If this is a str, will write the periodogram plot to the file specified\n by this string. If this is None, will write to a file called\n 'lsp-plot.png' in the current working directory.\n\n plotdpi : int\n Sets the resolution in DPI of the output periodogram plot PNG file.\n\n Returns\n -------\n\n str\n Absolute path to the periodogram plot file created.", "id": "f14695:m4"} {"signature": "def angle_wrap(angle, radians=False):", "body": "if radians:wrapped = angle % (*pi_value)if wrapped < :wrapped = *pi_value + wrappedelse:wrapped = angle % if wrapped < :wrapped = + wrappedreturn wrapped", "docstring": "Wraps the input angle to 360.0 degrees.\n\n Parameters\n ----------\n\n angle : float\n The angle to wrap around 360.0 deg.\n\n radians : bool\n If True, will assume that the input is in radians. The output will then\n also be in radians.\n\n Returns\n -------\n\n float\n Wrapped angle. If radians is True: input is assumed to be in radians,\n output is also in radians.", "id": "f14696:m0"} {"signature": "def decimal_to_dms(decimal_value):", "body": "if decimal_value < :negative = Truedec_val = fabs(decimal_value)else:negative = Falsedec_val = decimal_valuedegrees = trunc(dec_val)minutes_deg = dec_val - degreesminutes_mm = minutes_deg * minutes_out = trunc(minutes_mm)seconds = (minutes_mm - minutes_out)*if negative:degrees = degreesreturn '', degrees, minutes_out, secondselse:return '', degrees, minutes_out, seconds", "docstring": "Converts from decimal degrees (for declination coords) to DD:MM:SS.\n\n Parameters\n ----------\n\n decimal_value : float\n A decimal value to convert to degrees, minutes, seconds sexagesimal\n format.\n\n Returns\n -------\n\n tuple\n A four element tuple is returned: (sign, HH, MM, SS.ssss...)", "id": "f14696:m1"} {"signature": "def decimal_to_hms(decimal_value):", "body": "if decimal_value < :dec_wrapped = + decimal_valueelse:dec_wrapped = decimal_valuedec_hours = dec_wrapped/if dec_hours < :negative = Truedec_val = fabs(dec_hours)else:negative = Falsedec_val = dec_hourshours = trunc(dec_val)minutes_hrs = dec_val - hoursminutes_mm = minutes_hrs * minutes_out = trunc(minutes_mm)seconds = (minutes_mm - minutes_out)*if negative:hours = -hoursreturn hours, minutes_out, secondselse:return hours, minutes_out, seconds", "docstring": "Converts from decimal degrees (for RA coords) to HH:MM:SS.\n\n Parameters\n ----------\n\n decimal_value : float\n A decimal value to convert to hours, minutes, seconds. Negative values\n will be wrapped around 360.0.\n\n Returns\n -------\n\n tuple\n A three element tuple is returned: (HH, MM, SS.ssss...)", "id": "f14696:m2"} {"signature": "def hms_str_to_tuple(hms_string):", "body": "if '' in hms_string:separator = ''else:separator = ''hh, mm, ss = hms_string.split(separator)return int(hh), int(mm), float(ss)", "docstring": "Converts a string of the form HH:MM:SS or HH MM SS to a tuple of the form\n (HH, MM, SS).\n\n Parameters\n ----------\n\n hms_string : str\n A RA coordinate string of the form 'HH:MM:SS.sss' or 'HH MM SS.sss'.\n\n Returns\n -------\n\n tuple\n A three element tuple is returned (HH, MM, SS.ssss...)", "id": "f14696:m3"} {"signature": "def dms_str_to_tuple(dms_string):", "body": "if '' in dms_string:separator = ''else:separator = ''sign_dd, mm, ss = dms_string.split(separator)if sign_dd.startswith('') or sign_dd.startswith(''):sign, dd = sign_dd[], sign_dd[:]else:sign, dd = '', sign_ddreturn sign, int(dd), int(mm), float(ss)", "docstring": "Converts a string of the form [+-]DD:MM:SS or [+-]DD MM SS to a tuple of\n the form (sign, DD, MM, SS).\n\n Parameters\n ----------\n\n dms_string : str\n A declination coordinate string of the form '[+-]DD:MM:SS.sss' or\n '[+-]DD MM SS.sss'. The sign in front of DD is optional. If it's not\n there, this function will assume that the coordinate string is a\n positive value.\n\n Returns\n -------\n\n tuple\n A four element tuple of the form: (sign, DD, MM, SS.ssss...).", "id": "f14696:m4"} {"signature": "def hms_str_to_decimal(hms_string):", "body": "return hms_to_decimal(*hms_str_to_tuple(hms_string))", "docstring": "Converts a HH:MM:SS string to decimal degrees.\n\n Parameters\n ----------\n\n hms_string : str\n A right ascension coordinate string of the form: 'HH:MM:SS.sss'\n or 'HH MM SS.sss'.\n\n Returns\n -------\n\n float\n The RA value in decimal degrees (wrapped around 360.0 deg if necessary.)", "id": "f14696:m5"} {"signature": "def dms_str_to_decimal(dms_string):", "body": "return dms_to_decimal(*dms_str_to_tuple(dms_string))", "docstring": "Converts a DD:MM:SS string to decimal degrees.\n\n Parameters\n ----------\n\n dms_string : str\n A declination coordinate string of the form: '[+-]DD:MM:SS.sss'\n or '[+-]DD MM SS.sss'.\n\n Returns\n -------\n\n float\n The declination value in decimal degrees.", "id": "f14696:m6"} {"signature": "def hms_to_decimal(hours, minutes, seconds, returndeg=True):", "body": "if hours > :return Noneelse:dec_hours = fabs(hours) + fabs(minutes)/ + fabs(seconds)/if returndeg:dec_deg = dec_hours*if dec_deg < :dec_deg = dec_deg + dec_deg = dec_deg % return dec_degelse:return dec_hours", "docstring": "Converts from HH, MM, SS to a decimal value.\n\n Parameters\n ----------\n\n hours : int\n The HH part of a RA coordinate.\n\n minutes : int\n The MM part of a RA coordinate.\n\n seconds : float\n The SS.sss part of a RA coordinate.\n\n returndeg : bool\n If this is True, then will return decimal degrees as the output.\n If this is False, then will return decimal HOURS as the output.\n Decimal hours are sometimes used in FITS headers.\n\n Returns\n -------\n\n float\n The right ascension value in either decimal degrees or decimal hours\n depending on `returndeg`.", "id": "f14696:m7"} {"signature": "def dms_to_decimal(sign, degrees, minutes, seconds):", "body": "dec_deg = fabs(degrees) + fabs(minutes)/ + fabs(seconds)/if sign == '':return -dec_degelse:return dec_deg", "docstring": "Converts from DD:MM:SS to a decimal value.\n\n Parameters\n ----------\n\n sign : {'+', '-', ''}\n The sign part of a Dec coordinate.\n\n degrees : int\n The DD part of a Dec coordinate.\n\n minutes : int\n The MM part of a Dec coordinate.\n\n seconds : float\n The SS.sss part of a Dec coordinate.\n\n Returns\n -------\n\n float\n The declination value in decimal degrees.", "id": "f14696:m8"} {"signature": "def great_circle_dist(ra1, dec1, ra2, dec2):", "body": "in_ra1 = ra1 % in_ra1 = in_ra1 + *(in_ra1 < )in_ra2 = ra2 % in_ra2 = in_ra2 + *(in_ra1 < )ra1_rad, dec1_rad = np.deg2rad(in_ra1), np.deg2rad(dec1)ra2_rad, dec2_rad = np.deg2rad(in_ra2), np.deg2rad(dec2)del_dec2 = (dec2_rad - dec1_rad)/del_ra2 = (ra2_rad - ra1_rad)/sin_dist = np.sqrt(np.sin(del_dec2) * np.sin(del_dec2) +np.cos(dec1_rad) * np.cos(dec2_rad) *np.sin(del_ra2) * np.sin(del_ra2))dist_rad = * np.arcsin(sin_dist)return np.rad2deg(dist_rad)*", "docstring": "Calculates the great circle angular distance between two coords.\n\n This calculates the great circle angular distance in arcseconds between two\n coordinates (ra1,dec1) and (ra2,dec2). This is basically a clone of GCIRC\n from the IDL Astrolib.\n\n Parameters\n ----------\n\n ra1,dec1 : float or array-like\n The first coordinate's right ascension and declination value(s) in\n decimal degrees.\n\n ra2,dec2 : float or array-like\n The second coordinate's right ascension and declination value(s) in\n decimal degrees.\n\n Returns\n -------\n\n float or array-like\n Great circle distance between the two coordinates in arseconds.\n\n Notes\n -----\n\n If (`ra1`, `dec1`) is scalar and (`ra2`, `dec2`) is scalar: the result is a\n float distance in arcseconds.\n\n If (`ra1`, `dec1`) is scalar and (`ra2`, `dec2`) is array-like: the result\n is an np.array with distance in arcseconds between (`ra1`, `dec1`) and each\n element of (`ra2`, `dec2`).\n\n If (`ra1`, `dec1`) is array-like and (`ra2`, `dec2`) is scalar: the result\n is an np.array with distance in arcseconds between (`ra2`, `dec2`) and each\n element of (`ra1`, `dec1`).\n\n If (`ra1`, `dec1`) and (`ra2`, `dec2`) are both array-like: the result is an\n np.array with the pair-wise distance in arcseconds between each element of\n the two coordinate lists. In this case, if the input array-likes are not the\n same length, then excess elements of the longer one will be ignored.", "id": "f14696:m9"} {"signature": "def xmatch_basic(ra1, dec1, ra2, dec2, match_radius=):", "body": "min_dist_arcsec = np.min(great_circle_dist(ra1,dec1,ra2,dec2))if (min_dist_arcsec < match_radius):return (True,min_dist_arcsec)else:return (False,min_dist_arcsec)", "docstring": "Finds the closest object in (`ra2`, `dec2`) to scalar coordinate pair\n (`ra1`, `dec1`) and returns the distance in arcseconds.\n\n This is a quick matcher that uses the `great_circle_dist` function to find\n the closest object in (`ra2`, `dec2`) within `match_radius` arcseconds to\n (`ra1`, `dec1`). (`ra1`, `dec1`) must be a scalar pair, while\n (`ra2`, `dec2`) must be array-likes of the same lengths.\n\n Parameters\n ----------\n\n ra1,dec1 : float\n Coordinate of the object to find matches to. In decimal degrees.\n\n ra2,dec2 : array-like\n The coordinates that will be searched for matches. In decimal degrees.\n\n match_radius : float\n The match radius in arcseconds to use for the match.\n\n Returns\n -------\n\n tuple\n A two element tuple like the following::\n\n (True -> no match found or False -> found a match,\n minimum distance between target and list in arcseconds)", "id": "f14696:m10"} {"signature": "def xmatch_neighbors(ra1, dec1,ra2, dec2,match_radius=,includeself=False,sortresults=True):", "body": "dist = great_circle_dist(ra1,dec1,ra2,dec2)if includeself:match_dist_ind = np.where(dist < match_radius)else:match_dist_ind = np.where((dist < match_radius) & (dist > ))if len(match_dist_ind) > :match_dists = dist[match_dist_ind]dist_sort_ind = np.argsort(match_dists)if sortresults:match_dist_ind = (match_dist_ind[])[dist_sort_ind]min_dist = np.min(match_dists)return (True,min_dist,match_dist_ind,match_dists[dist_sort_ind])else:return (False,)", "docstring": "Finds the closest objects in (`ra2`, `dec2`) to scalar coordinate pair\n (`ra1`, `dec1`) and returns the indices of the objects that match.\n\n This is a quick matcher that uses the `great_circle_dist` function to find\n the closest object in (`ra2`, `dec2`) within `match_radius` arcseconds to\n (`ra1`, `dec1`). (`ra1`, `dec1`) must be a scalar pair, while\n (`ra2`, `dec2`) must be array-likes of the same lengths.\n\n Parameters\n ----------\n\n ra1,dec1 : float\n Coordinate of the object to find matches to. In decimal degrees.\n\n ra2,dec2 : array-like\n The coordinates that will be searched for matches. In decimal degrees.\n\n match_radius : float\n The match radius in arcseconds to use for the match.\n\n includeself : bool\n If this is True, the object itself will be included in the match\n results.\n\n sortresults : bool\n If this is True, the match indices will be sorted by distance.\n\n Returns\n -------\n\n tuple\n A tuple like the following is returned::\n\n (True -> matches found or False -> no matches found,\n minimum distance between target and list,\n np.array of indices where list of coordinates is\n closer than `match_radius` arcseconds from the target,\n np.array of distances in arcseconds)", "id": "f14696:m11"} {"signature": "def make_kdtree(ra, decl):", "body": "cosdecl = np.cos(np.radians(decl))sindecl = np.sin(np.radians(decl))cosra = np.cos(np.radians(ra))sinra = np.sin(np.radians(ra))xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl))kdt = sps.cKDTree(xyz,copy_data=True)return kdt", "docstring": "This makes a `scipy.spatial.CKDTree` on (`ra`, `decl`).\n\n Parameters\n ----------\n\n ra,decl : array-like\n The right ascension and declination coordinate pairs in decimal degrees.\n\n Returns\n -------\n\n `scipy.spatial.CKDTree`\n The cKDTRee object generated by this function is returned and can be\n used to run various spatial queries.", "id": "f14696:m12"} {"signature": "def conesearch_kdtree(kdtree,racenter,declcenter,searchradiusdeg,conesearchworkers=):", "body": "cosdecl = np.cos(np.radians(declcenter))sindecl = np.sin(np.radians(declcenter))cosra = np.cos(np.radians(racenter))sinra = np.sin(np.radians(racenter))xyzdist = * np.sin(np.radians(searchradiusdeg)/)kdtindices = kdtree.query_ball_point([cosra*cosdecl,sinra*cosdecl,sindecl],xyzdist,n_jobs=conesearchworkers)return kdtindices", "docstring": "This does a cone-search around (`racenter`, `declcenter`) in `kdtree`.\n\n Parameters\n ----------\n\n kdtree : scipy.spatial.CKDTree\n This is a kdtree object generated by the `make_kdtree` function.\n\n racenter,declcenter : float or array-like\n This is the center coordinate to run the cone-search around in decimal\n degrees. If this is an np.array, will search for all coordinate pairs in\n the array.\n\n searchradiusdeg : float\n The search radius to use for the cone-search in decimal degrees.\n\n conesearchworkers : int\n The number of parallel workers to launch for the cone-search.\n\n Returns\n -------\n\n list or np.array of lists\n If (`racenter`, `declcenter`) is a single coordinate, this will return a\n list of the indices of the matching objects in the kdtree. If\n (`racenter`, `declcenter`) are array-likes, this will return an object\n array containing lists of matching object indices for each coordinate\n searched.", "id": "f14696:m13"} {"signature": "def xmatch_kdtree(kdtree,extra, extdecl,xmatchdistdeg,closestonly=True):", "body": "ext_cosdecl = np.cos(np.radians(extdecl))ext_sindecl = np.sin(np.radians(extdecl))ext_cosra = np.cos(np.radians(extra))ext_sinra = np.sin(np.radians(extra))ext_xyz = np.column_stack((ext_cosra*ext_cosdecl,ext_sinra*ext_cosdecl,ext_sindecl))ext_xyzdist = * np.sin(np.radians(xmatchdistdeg)/)our_kdt = kdtreeext_kdt = sps.cKDTree(ext_xyz)extkd_matchinds = our_kdt.query_ball_tree(ext_kdt, ext_xyzdist)ext_matchinds = []kdt_matchinds = []for extind, mind in enumerate(extkd_matchinds):if len(mind) > :kdt_matchinds.append(extind)if closestonly:ext_matchinds.append(mind[])else:ext_matchinds.append(mind)return kdt_matchinds, ext_matchinds", "docstring": "This cross-matches between `kdtree` and (`extra`, `extdecl`) arrays.\n\n Returns the indices of the kdtree and the indices of extra, extdecl that\n xmatch successfully.\n\n Parameters\n ----------\n\n kdtree : scipy.spatial.CKDTree\n This is a kdtree object generated by the `make_kdtree` function.\n\n extra,extdecl : array-like\n These are np.arrays of 'external' coordinates in decimal degrees that\n will be cross-matched against the objects in `kdtree`.\n\n xmatchdistdeg : float\n The match radius to use for the cross-match in decimal degrees.\n\n closestonly : bool\n If closestonly is True, then this function returns only the closest\n matching indices in (extra, extdecl) for each object in kdtree if there\n are any matches. Otherwise, it returns a list of indices in (extra,\n extdecl) for all matches within xmatchdistdeg between kdtree and (extra,\n extdecl).\n\n Returns\n -------\n\n tuple of lists\n Returns a tuple of the form::\n\n (list of `kdtree` indices matching to external objects,\n list of all `extra`/`extdecl` indices that match to each\n element in `kdtree` within the specified cross-match distance)", "id": "f14696:m14"} {"signature": "def total_proper_motion(pmra, pmdecl, decl):", "body": "pm = np.sqrt( pmdecl*pmdecl + pmra*pmra*np.cos(np.radians(decl)) *np.cos(np.radians(decl)) )return pm", "docstring": "This calculates the total proper motion of an object.\n\n Parameters\n ----------\n\n pmra : float or array-like\n The proper motion(s) in right ascension, measured in mas/yr.\n\n pmdecl : float or array-like\n The proper motion(s) in declination, measured in mas/yr.\n\n decl : float or array-like\n The declination of the object(s) in decimal degrees.\n\n Returns\n -------\n\n float or array-like\n The total proper motion(s) of the object(s) in mas/yr.", "id": "f14696:m15"} {"signature": "def reduced_proper_motion(mag, propermotion):", "body": "rpm = mag + *np.log10(propermotion/)return rpm", "docstring": "This calculates the reduced proper motion using the mag measurement\n provided.\n\n Parameters\n ----------\n\n mag : float or array-like\n The magnitude(s) to use to calculate the reduced proper motion(s).\n\n propermotion : float or array-like\n The total proper motion of the object(s). Use the `total_proper_motion`\n function to calculate this if you have `pmra`, `pmdecl`, and `decl`\n values. `propermotion` should be in mas/yr.\n\n Returns\n -------\n\n float or array-like\n The reduced proper motion for the object(s). This is effectively a\n measure of the absolute magnitude in the band provided.", "id": "f14696:m16"} {"signature": "def equatorial_to_galactic(ra, decl, equinox=''):", "body": "radecl = SkyCoord(ra=ra*u.degree, dec=decl*u.degree, equinox=equinox)gl = radecl.galactic.l.degreegb = radecl.galactic.b.degreereturn gl, gb", "docstring": "This converts from equatorial coords to galactic coords.\n\n Parameters\n ----------\n\n ra : float or array-like\n Right ascension values(s) in decimal degrees.\n\n decl : float or array-like\n Declination value(s) in decimal degrees.\n\n equinox : str\n The equinox that the coordinates are measured at. This must be\n recognizable by Astropy's `SkyCoord` class.\n\n Returns\n -------\n\n tuple of (float, float) or tuple of (np.array, np.array)\n The galactic coordinates (l, b) for each element of the input\n (`ra`, `decl`).", "id": "f14696:m17"} {"signature": "def galactic_to_equatorial(gl, gb):", "body": "gal = SkyCoord(gl*u.degree, gl*u.degree, frame='')transformed = gal.transform_to('')return transformed.ra.degree, transformed.dec.degree", "docstring": "This converts from galactic coords to equatorial coordinates.\n\n Parameters\n ----------\n\n gl : float or array-like\n Galactic longitude values(s) in decimal degrees.\n\n gb : float or array-like\n Galactic latitude value(s) in decimal degrees.\n\n Returns\n -------\n\n tuple of (float, float) or tuple of (np.array, np.array)\n The equatorial coordinates (RA, DEC) for each element of the input\n (`gl`, `gb`) in decimal degrees. These are reported in the ICRS frame.", "id": "f14696:m18"} {"signature": "def xieta_from_radecl(inra, indecl,incenterra, incenterdecl,deg=True):", "body": "if deg:ra = np.radians(inra)decl = np.radians(indecl)centerra = np.radians(incenterra)centerdecl = np.radians(incenterdecl)else:ra = inradecl = indeclcenterra = incenterracenterdecl = incenterdeclcdecc = np.cos(centerdecl)sdecc = np.sin(centerdecl)crac = np.cos(centerra)srac = np.sin(centerra)uu = np.cos(decl)*np.cos(ra)vv = np.cos(decl)*np.sin(ra)ww = np.sin(decl)uun = uu*cdecc*crac + vv*cdecc*srac + ww*sdeccvvn = -uu*srac + vv*cracwwn = -uu*sdecc*crac - vv*sdecc*srac + ww*cdeccdenom = vvn*vvn + wwn*wwnaunn = np.zeros_like(uun)aunn[uun >= ] = aunn[uun < ] = np.arccos(uun)xi, eta = np.zeros_like(aunn), np.zeros_like(aunn)xi[(aunn <= ) | (denom <= )] = eta[(aunn <= ) | (denom <= )] = sdenom = np.sqrt(denom)xi[(aunn > ) | (denom > )] = aunn*vvn/sdenometa[(aunn > ) | (denom > )] = aunn*wwn/sdenomif deg:return np.degrees(xi), np.degrees(eta)else:return xi, eta", "docstring": "This returns the image-plane projected xi-eta coords for inra, indecl.\n\n Parameters\n ----------\n\n inra,indecl : array-like\n The equatorial coordinates to get the xi, eta coordinates for in decimal\n degrees or radians.\n\n incenterra,incenterdecl : float\n The center coordinate values to use to calculate the plane-projected\n coordinates around.\n\n deg : bool\n If this is True, the input angles are assumed to be in degrees and the\n output is in degrees as well.\n\n Returns\n -------\n\n tuple of np.arrays\n This is the (`xi`, `eta`) coordinate pairs corresponding to the\n image-plane projected coordinates for each pair of input equatorial\n coordinates in (`inra`, `indecl`).", "id": "f14696:m19"} {"signature": "def get_starfeatures(lcfile,outdir,kdtree,objlist,lcflist,neighbor_radius_arcsec,deredden=True,custom_bandpasses=None,lcformat='',lcformatdir=None):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Nonetry:lcdict = readerfunc(lcfile)if ( (isinstance(lcdict, (list, tuple))) and(isinstance(lcdict[], dict)) ):lcdict = lcdict[]resultdict = {'':lcdict[''],'':lcdict[''],'':os.path.basename(lcfile)}coordfeat = starfeatures.coord_features(lcdict[''])colorfeat = starfeatures.color_features(lcdict[''],deredden=deredden,custom_bandpasses=custom_bandpasses)colorclass = starfeatures.color_classification(colorfeat,coordfeat)nbrfeat = starfeatures.neighbor_gaia_features(lcdict[''],kdtree,neighbor_radius_arcsec)if nbrfeat[''].size > :nbrfeat[''] = objlist[nbrfeat['']]nbrfeat[''] = objlist[nbrfeat['']]nbrfeat[''] = lcflist[nbrfeat['']]else:nbrfeat[''] = np.array([])nbrfeat[''] = np.array([])nbrfeat[''] = np.array([])resultdict.update(coordfeat)resultdict.update(colorfeat)resultdict.update(colorclass)resultdict.update(nbrfeat)outfile = os.path.join(outdir,'' %squeeze(resultdict['']).replace('',''))with open(outfile, '') as outfd:pickle.dump(resultdict, outfd, protocol=)return outfileexcept Exception as e:LOGEXCEPTION('' %(os.path.basename(lcfile), e))return None", "docstring": "This runs the functions from :py:func:`astrobase.varclass.starfeatures`\n on a single light curve file.\n\n Parameters\n ----------\n\n lcfile : str\n This is the LC file to extract star features for.\n\n outdir : str\n This is the directory to write the output pickle to.\n\n kdtree: scipy.spatial.cKDTree\n This is a `scipy.spatial.KDTree` or `cKDTree` used to calculate neighbor\n proximity features. This is for the light curve catalog this object is\n in.\n\n objlist : np.array\n This is a Numpy array of object IDs in the same order as the\n `kdtree.data` np.array. This is for the light curve catalog this object\n is in.\n\n lcflist : np.array\n This is a Numpy array of light curve filenames in the same order as\n `kdtree.data`. This is for the light curve catalog this object is in.\n\n neighbor_radius_arcsec : float\n This indicates the radius in arcsec to search for neighbors for this\n object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,\n and in GAIA.\n\n deredden : bool\n This controls if the colors and any color classifications will be\n dereddened using 2MASS DUST.\n\n custom_bandpasses : dict or None\n This is a dict used to define any custom bandpasses in the\n `in_objectinfo` dict you want to make this function aware of and\n generate colors for. Use the format below for this dict::\n\n {\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n .\n ...\n .\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n }\n\n Where:\n\n `bandpass_key` is a key to use to refer to this bandpass in the\n `objectinfo` dict, e.g. 'sdssg' for SDSS g band\n\n `twomass_dust_key` is the key to use in the 2MASS DUST result table for\n reddening per band-pass. For example, given the following DUST result\n table (using http://irsa.ipac.caltech.edu/applications/DUST/)::\n\n |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|\n |char |float |float |float |float |float|\n | |microns| |mags | |mags |\n CTIO U 0.3734 4.107 0.209 4.968 0.253\n CTIO B 0.4309 3.641 0.186 4.325 0.221\n CTIO V 0.5517 2.682 0.137 3.240 0.165\n .\n .\n ...\n\n The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to\n skip DUST lookup and want to pass in a specific reddening magnitude\n for your bandpass, use a float for the value of\n `twomass_dust_key`. If you want to skip DUST lookup entirely for\n this bandpass, use None for the value of `twomass_dust_key`.\n\n `band_label` is the label to use for this bandpass, e.g. 'W1' for\n WISE-1 band, 'u' for SDSS u, etc.\n\n The 'colors' list contains color definitions for all colors you want\n to generate using this bandpass. this list contains elements of the\n form::\n\n ['-',' - ']\n\n where the the first item is the bandpass keys making up this color,\n and the second item is the label for this color to be used by the\n frontends. An example::\n\n ['sdssu-sdssg','u - g']\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n Returns\n -------\n\n str\n Path to the output pickle containing all of the star features for this\n object.", "id": "f14697:m1"} {"signature": "def _starfeatures_worker(task):", "body": "try:(lcfile, outdir, kdtree, objlist,lcflist, neighbor_radius_arcsec,deredden, custom_bandpasses, lcformat, lcformatdir) = taskreturn get_starfeatures(lcfile, outdir,kdtree, objlist, lcflist,neighbor_radius_arcsec,deredden=deredden,custom_bandpasses=custom_bandpasses,lcformat=lcformat,lcformatdir=lcformatdir)except Exception as e:return None", "docstring": "This wraps starfeatures.", "id": "f14697:m2"} {"signature": "def serial_starfeatures(lclist,outdir,lc_catalog_pickle,neighbor_radius_arcsec,maxobjects=None,deredden=True,custom_bandpasses=None,lcformat='',lcformatdir=None):", "body": "if not os.path.exists(outdir):os.makedirs(outdir)if maxobjects:lclist = lclist[:maxobjects]with open(lc_catalog_pickle, '') as infd:kdt_dict = pickle.load(infd)kdt = kdt_dict['']objlist = kdt_dict['']['']objlcfl = kdt_dict['']['']tasks = [(x, outdir, kdt, objlist, objlcfl,neighbor_radius_arcsec,deredden, custom_bandpasses,lcformat, lcformatdir) for x in lclist]for task in tqdm(tasks):result = _starfeatures_worker(task)return result", "docstring": "This drives the `get_starfeatures` function for a collection of LCs.\n\n Parameters\n ----------\n\n lclist : list of str\n The list of light curve file names to process.\n\n outdir : str\n The output directory where the results will be placed.\n\n lc_catalog_pickle : str\n The path to a catalog containing at a dict with least:\n\n - an object ID array accessible with `dict['objects']['objectid']`\n\n - an LC filename array accessible with `dict['objects']['lcfname']`\n\n - a `scipy.spatial.KDTree` or `cKDTree` object to use for finding\n neighbors for each object accessible with `dict['kdtree']`\n\n A catalog pickle of the form needed can be produced using\n :py:func:`astrobase.lcproc.catalogs.make_lclist` or\n :py:func:`astrobase.lcproc.catalogs.filter_lclist`.\n\n neighbor_radius_arcsec : float\n This indicates the radius in arcsec to search for neighbors for this\n object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,\n and in GAIA.\n\n maxobjects : int\n The number of objects to process from `lclist`.\n\n deredden : bool\n This controls if the colors and any color classifications will be\n dereddened using 2MASS DUST.\n\n custom_bandpasses : dict or None\n This is a dict used to define any custom bandpasses in the\n `in_objectinfo` dict you want to make this function aware of and\n generate colors for. Use the format below for this dict::\n\n {\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n .\n ...\n .\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n }\n\n Where:\n\n `bandpass_key` is a key to use to refer to this bandpass in the\n `objectinfo` dict, e.g. 'sdssg' for SDSS g band\n\n `twomass_dust_key` is the key to use in the 2MASS DUST result table for\n reddening per band-pass. For example, given the following DUST result\n table (using http://irsa.ipac.caltech.edu/applications/DUST/)::\n\n |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|\n |char |float |float |float |float |float|\n | |microns| |mags | |mags |\n CTIO U 0.3734 4.107 0.209 4.968 0.253\n CTIO B 0.4309 3.641 0.186 4.325 0.221\n CTIO V 0.5517 2.682 0.137 3.240 0.165\n .\n .\n ...\n\n The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to\n skip DUST lookup and want to pass in a specific reddening magnitude\n for your bandpass, use a float for the value of\n `twomass_dust_key`. If you want to skip DUST lookup entirely for\n this bandpass, use None for the value of `twomass_dust_key`.\n\n `band_label` is the label to use for this bandpass, e.g. 'W1' for\n WISE-1 band, 'u' for SDSS u, etc.\n\n The 'colors' list contains color definitions for all colors you want\n to generate using this bandpass. this list contains elements of the\n form::\n\n ['-',' - ']\n\n where the the first item is the bandpass keys making up this color,\n and the second item is the label for this color to be used by the\n frontends. An example::\n\n ['sdssu-sdssg','u - g']\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n Returns\n -------\n\n list of str\n A list of all star features pickles produced.", "id": "f14697:m3"} {"signature": "def parallel_starfeatures(lclist,outdir,lc_catalog_pickle,neighbor_radius_arcsec,maxobjects=None,deredden=True,custom_bandpasses=None,lcformat='',lcformatdir=None,nworkers=NCPUS):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif not os.path.exists(outdir):os.makedirs(outdir)if maxobjects:lclist = lclist[:maxobjects]with open(lc_catalog_pickle, '') as infd:kdt_dict = pickle.load(infd)kdt = kdt_dict['']objlist = kdt_dict['']['']objlcfl = kdt_dict['']['']tasks = [(x, outdir, kdt, objlist, objlcfl,neighbor_radius_arcsec,deredden, custom_bandpasses, lcformat) for x in lclist]with ProcessPoolExecutor(max_workers=nworkers) as executor:resultfutures = executor.map(_starfeatures_worker, tasks)results = [x for x in resultfutures]resdict = {os.path.basename(x):y for (x,y) in zip(lclist, results)}return resdict", "docstring": "This runs `get_starfeatures` in parallel for all light curves in `lclist`.\n\n Parameters\n ----------\n\n lclist : list of str\n The list of light curve file names to process.\n\n outdir : str\n The output directory where the results will be placed.\n\n lc_catalog_pickle : str\n The path to a catalog containing at a dict with least:\n\n - an object ID array accessible with `dict['objects']['objectid']`\n\n - an LC filename array accessible with `dict['objects']['lcfname']`\n\n - a `scipy.spatial.KDTree` or `cKDTree` object to use for finding\n neighbors for each object accessible with `dict['kdtree']`\n\n A catalog pickle of the form needed can be produced using\n :py:func:`astrobase.lcproc.catalogs.make_lclist` or\n :py:func:`astrobase.lcproc.catalogs.filter_lclist`.\n\n neighbor_radius_arcsec : float\n This indicates the radius in arcsec to search for neighbors for this\n object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,\n and in GAIA.\n\n maxobjects : int\n The number of objects to process from `lclist`.\n\n deredden : bool\n This controls if the colors and any color classifications will be\n dereddened using 2MASS DUST.\n\n custom_bandpasses : dict or None\n This is a dict used to define any custom bandpasses in the\n `in_objectinfo` dict you want to make this function aware of and\n generate colors for. Use the format below for this dict::\n\n {\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n .\n ...\n .\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n }\n\n Where:\n\n `bandpass_key` is a key to use to refer to this bandpass in the\n `objectinfo` dict, e.g. 'sdssg' for SDSS g band\n\n `twomass_dust_key` is the key to use in the 2MASS DUST result table for\n reddening per band-pass. For example, given the following DUST result\n table (using http://irsa.ipac.caltech.edu/applications/DUST/)::\n\n |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|\n |char |float |float |float |float |float|\n | |microns| |mags | |mags |\n CTIO U 0.3734 4.107 0.209 4.968 0.253\n CTIO B 0.4309 3.641 0.186 4.325 0.221\n CTIO V 0.5517 2.682 0.137 3.240 0.165\n .\n .\n ...\n\n The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to\n skip DUST lookup and want to pass in a specific reddening magnitude\n for your bandpass, use a float for the value of\n `twomass_dust_key`. If you want to skip DUST lookup entirely for\n this bandpass, use None for the value of `twomass_dust_key`.\n\n `band_label` is the label to use for this bandpass, e.g. 'W1' for\n WISE-1 band, 'u' for SDSS u, etc.\n\n The 'colors' list contains color definitions for all colors you want\n to generate using this bandpass. this list contains elements of the\n form::\n\n ['-',' - ']\n\n where the the first item is the bandpass keys making up this color,\n and the second item is the label for this color to be used by the\n frontends. An example::\n\n ['sdssu-sdssg','u - g']\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n nworkers : int\n The number of parallel workers to launch.\n\n Returns\n -------\n\n dict\n A dict with key:val pairs of the input light curve filename and the\n output star features pickle for each LC processed.", "id": "f14697:m4"} {"signature": "def parallel_starfeatures_lcdir(lcdir,outdir,lc_catalog_pickle,neighbor_radius_arcsec,fileglob=None,maxobjects=None,deredden=True,custom_bandpasses=None,lcformat='',lcformatdir=None,nworkers=NCPUS,recursive=True):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif not fileglob:fileglob = dfileglobLOGINFO('' % (lcformat, lcdir))if recursive is False:matching = glob.glob(os.path.join(lcdir, fileglob))else:if sys.version_info[:] > (,):matching = glob.glob(os.path.join(lcdir,'',fileglob),recursive=True)else:walker = os.walk(lcdir)matching = []for root, dirs, _files in walker:for sdir in dirs:searchpath = os.path.join(root,sdir,fileglob)foundfiles = glob.glob(searchpath)if foundfiles:matching.extend(foundfiles)if matching and len(matching) > :LOGINFO('' %len(matching))return parallel_starfeatures(matching,outdir,lc_catalog_pickle,neighbor_radius_arcsec,deredden=deredden,custom_bandpasses=custom_bandpasses,maxobjects=maxobjects,lcformat=lcformat,lcformatdir=lcformatdir,nworkers=nworkers)else:LOGERROR('' % (lcformat,lcdir))return None", "docstring": "This runs parallel star feature extraction for a directory of LCs.\n\n Parameters\n ----------\n\n lcdir : list of str\n The directory to search for light curves.\n\n outdir : str\n The output directory where the results will be placed.\n\n lc_catalog_pickle : str\n The path to a catalog containing at a dict with least:\n\n - an object ID array accessible with `dict['objects']['objectid']`\n\n - an LC filename array accessible with `dict['objects']['lcfname']`\n\n - a `scipy.spatial.KDTree` or `cKDTree` object to use for finding\n neighbors for each object accessible with `dict['kdtree']`\n\n A catalog pickle of the form needed can be produced using\n :py:func:`astrobase.lcproc.catalogs.make_lclist` or\n :py:func:`astrobase.lcproc.catalogs.filter_lclist`.\n\n neighbor_radius_arcsec : float\n This indicates the radius in arcsec to search for neighbors for this\n object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,\n and in GAIA.\n\n fileglob : str\n The UNIX file glob to use to search for the light curves in `lcdir`. If\n None, the default value for the light curve format specified will be\n used.\n\n maxobjects : int\n The number of objects to process from `lclist`.\n\n deredden : bool\n This controls if the colors and any color classifications will be\n dereddened using 2MASS DUST.\n\n custom_bandpasses : dict or None\n This is a dict used to define any custom bandpasses in the\n `in_objectinfo` dict you want to make this function aware of and\n generate colors for. Use the format below for this dict::\n\n {\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n .\n ...\n .\n '':{'dustkey':'',\n 'label':''\n 'colors':[['-',\n ' - '],\n ['-',\n ' - ']]},\n }\n\n Where:\n\n `bandpass_key` is a key to use to refer to this bandpass in the\n `objectinfo` dict, e.g. 'sdssg' for SDSS g band\n\n `twomass_dust_key` is the key to use in the 2MASS DUST result table for\n reddening per band-pass. For example, given the following DUST result\n table (using http://irsa.ipac.caltech.edu/applications/DUST/)::\n\n |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|\n |char |float |float |float |float |float|\n | |microns| |mags | |mags |\n CTIO U 0.3734 4.107 0.209 4.968 0.253\n CTIO B 0.4309 3.641 0.186 4.325 0.221\n CTIO V 0.5517 2.682 0.137 3.240 0.165\n .\n .\n ...\n\n The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to\n skip DUST lookup and want to pass in a specific reddening magnitude\n for your bandpass, use a float for the value of\n `twomass_dust_key`. If you want to skip DUST lookup entirely for\n this bandpass, use None for the value of `twomass_dust_key`.\n\n `band_label` is the label to use for this bandpass, e.g. 'W1' for\n WISE-1 band, 'u' for SDSS u, etc.\n\n The 'colors' list contains color definitions for all colors you want\n to generate using this bandpass. this list contains elements of the\n form::\n\n ['-',' - ']\n\n where the the first item is the bandpass keys making up this color,\n and the second item is the label for this color to be used by the\n frontends. An example::\n\n ['sdssu-sdssg','u - g']\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n nworkers : int\n The number of parallel workers to launch.\n\n Returns\n -------\n\n dict\n A dict with key:val pairs of the input light curve filename and the\n output star features pickle for each LC processed.", "id": "f14697:m5"} {"signature": "def get_periodicfeatures(pfpickle,lcbasedir,outdir,fourierorder=,transitparams=(-,,),ebparams=(-,,,),pdiff_threshold=,sidereal_threshold=,sampling_peak_multiplier=,sampling_startp=None,sampling_endp=None,starfeatures=None,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,sigclip=,verbose=True,raiseonfail=False):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(fileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif pfpickle.endswith(''):infd = gzip.open(pfpickle)else:infd = open(pfpickle)pf = pickle.load(infd)infd.close()lcfile = os.path.join(lcbasedir, pf[''])objectid = pf['']if '' in pf:kwargs = pf['']else:kwargs = Noneif kwargs and '' in kwargs and timecols is None:timecols = kwargs['']elif not kwargs and not timecols:timecols = dtimecolsif kwargs and '' in kwargs and magcols is None:magcols = kwargs['']elif not kwargs and not magcols:magcols = dmagcolsif kwargs and '' in kwargs and errcols is None:errcols = kwargs['']elif not kwargs and not errcols:errcols = derrcolsif not os.path.exists(lcfile):LOGERROR(\"\" % (lcfile, objectid))return Noneif starfeatures is not None and os.path.exists(starfeatures):with open(starfeatures,'') as infd:starfeat = pickle.load(infd)if starfeat[''].size > :nbr_full_lcf = starfeat[''][]if os.path.exists(os.path.join(lcbasedir,os.path.basename(nbr_full_lcf))):nbrlcf = os.path.join(lcbasedir,os.path.basename(nbr_full_lcf))elif os.path.exists(nbr_full_lcf):nbrlcf = nbr_full_lcfelse:LOGWARNING(\"\"\"\"\"\" %(os.path.basename(nbr_full_lcf),os.path.dirname(nbr_full_lcf),lcbasedir))nbrlcf = Noneelse:nbrlcf = Noneelse:nbrlcf = Nonetry:lcdict = readerfunc(lcfile)if ( (isinstance(lcdict, (list, tuple))) and(isinstance(lcdict[], dict)) ):lcdict = lcdict[]if nbrlcf is not None:nbrlcdict = readerfunc(nbrlcf)if ( (isinstance(nbrlcdict, (list, tuple))) and(isinstance(nbrlcdict[], dict)) ):nbrlcdict = nbrlcdict[]outfile = os.path.join(outdir, '' %squeeze(objectid).replace('',''))if normfunc is not None:lcdict = normfunc(lcdict)if nbrlcf:nbrlcdict = normfunc(nbrlcdict)resultdict = {}for tcol, mcol, ecol in zip(timecols, magcols, errcols):if '' in tcol:tcolget = tcol.split('')else:tcolget = [tcol]times = _dict_get(lcdict, tcolget)if nbrlcf:nbrtimes = _dict_get(nbrlcdict, tcolget)else:nbrtimes = Noneif '' in mcol:mcolget = mcol.split('')else:mcolget = [mcol]mags = _dict_get(lcdict, mcolget)if nbrlcf:nbrmags = _dict_get(nbrlcdict, mcolget)else:nbrmags = Noneif '' in ecol:ecolget = ecol.split('')else:ecolget = [ecol]errs = _dict_get(lcdict, ecolget)if nbrlcf:nbrerrs = _dict_get(nbrlcdict, ecolget)else:nbrerrs = Nonefinind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind]if nbrlcf:nfinind = (np.isfinite(nbrtimes) &np.isfinite(nbrmags) &np.isfinite(nbrerrs))nbrftimes, nbrfmags, nbrferrs = (nbrtimes[nfinind],nbrmags[nfinind],nbrerrs[nfinind])nzind = np.nonzero(ferrs)ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]if nbrlcf:nnzind = np.nonzero(nbrferrs)nbrftimes, nbrfmags, nbrferrs = (nbrftimes[nnzind],nbrfmags[nnzind],nbrferrs[nnzind])if normfunc is None:ntimes, nmags = normalize_magseries(ftimes, fmags,magsarefluxes=magsarefluxes)times, mags, errs = ntimes, nmags, ferrsif nbrlcf:nbrntimes, nbrnmags = normalize_magseries(nbrftimes, nbrfmags,magsarefluxes=magsarefluxes)nbrtimes, nbrmags, nbrerrs = nbrntimes, nbrnmags, nbrferrselse:nbrtimes, nbrmags, nbrerrs = None, None, Noneelse:times, mags, errs = ftimes, fmags, ferrsif times.size > :available_pfmethods = []available_pgrams = []available_bestperiods = []for k in pf[mcol].keys():if k in PFMETHODS:available_pgrams.append(pf[mcol][k])if k != '':available_pfmethods.append(pf[mcol][k][''])available_bestperiods.append(pf[mcol][k][''])featkey = '' % mcolresultdict[featkey] = {}pgramfeat = periodicfeatures.periodogram_features(available_pgrams, times, mags, errs,sigclip=sigclip,pdiff_threshold=pdiff_threshold,sidereal_threshold=sidereal_threshold,sampling_peak_multiplier=sampling_peak_multiplier,sampling_startp=sampling_startp,sampling_endp=sampling_endp,verbose=verbose)resultdict[featkey].update(pgramfeat)resultdict[featkey][''] = available_pfmethodsfor _ind, pfm, bp in zip(range(len(available_bestperiods)),available_pfmethods,available_bestperiods):resultdict[featkey][pfm] = periodicfeatures.lcfit_features(times, mags, errs, bp,fourierorder=fourierorder,transitparams=transitparams,ebparams=ebparams,sigclip=sigclip,magsarefluxes=magsarefluxes,verbose=verbose)phasedlcfeat = periodicfeatures.phasedlc_features(times, mags, errs, bp,nbrtimes=nbrtimes,nbrmags=nbrmags,nbrerrs=nbrerrs)resultdict[featkey][pfm].update(phasedlcfeat)else:LOGERROR(''''% (mcol, pfpickle))featkey = '' % mcolresultdict[featkey] = Noneoutfile = os.path.join(outdir, '' %squeeze(objectid).replace('',''))with open(outfile,'') as outfd:pickle.dump(resultdict, outfd, pickle.HIGHEST_PROTOCOL)return outfileexcept Exception as e:LOGEXCEPTION('' %(pfpickle, lcfile))if raiseonfail:raiseelse:return None", "docstring": "This gets all periodic features for the object.\n\n Parameters\n ----------\n\n pfpickle : str\n The period-finding result pickle containing period-finder results to use\n for the calculation of LC fit, periodogram, and phased LC features.\n\n lcbasedir : str\n The base directory where the light curve for the current object is\n located.\n\n outdir : str\n The output directory where the results will be written.\n\n fourierorder : int\n The Fourier order to use to generate sinusoidal function and fit that to\n the phased light curve.\n\n transitparams : list of floats\n The transit depth, duration, and ingress duration to use to generate a\n trapezoid planet transit model fit to the phased light curve. The period\n used is the one provided in `period`, while the epoch is automatically\n obtained from a spline fit to the phased light curve.\n\n ebparams : list of floats\n The primary eclipse depth, eclipse duration, the primary-secondary depth\n ratio, and the phase of the secondary eclipse to use to generate an\n eclipsing binary model fit to the phased light curve. The period used is\n the one provided in `period`, while the epoch is automatically obtained\n from a spline fit to the phased light curve.\n\n pdiff_threshold : float\n This is the max difference between periods to consider them the same.\n\n sidereal_threshold : float\n This is the max difference between any of the 'best' periods and the\n sidereal day periods to consider them the same.\n\n sampling_peak_multiplier : float\n This is the minimum multiplicative factor of a 'best' period's\n normalized periodogram peak over the sampling periodogram peak at the\n same period required to accept the 'best' period as possibly real.\n\n sampling_startp, sampling_endp : float\n If the `pgramlist` doesn't have a time-sampling Lomb-Scargle\n periodogram, it will be obtained automatically. Use these kwargs to\n control the minimum and maximum period interval to be searched when\n generating this periodogram.\n\n starfeatures : str or None\n If not None, this should be the filename of the\n `starfeatures-.pkl` created by\n :py:func:`astrobase.lcproc.lcsfeatures.get_starfeatures` for this\n object. This is used to get the neighbor's light curve and phase it with\n this object's period to see if this object is blended.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n verbose : bool\n If True, will indicate progress while working.\n\n raiseonfail : bool\n If True, will raise an Exception if something goes wrong.\n\n Returns\n -------\n\n str\n Returns a filename for the output pickle containing all of the periodic\n features for the input object's LC.", "id": "f14698:m1"} {"signature": "def _periodicfeatures_worker(task):", "body": "pfpickle, lcbasedir, outdir, starfeatures, kwargs = tasktry:return get_periodicfeatures(pfpickle,lcbasedir,outdir,starfeatures=starfeatures,**kwargs)except Exception as e:LOGEXCEPTION('' % pfpickle)", "docstring": "This is a parallel worker for the drivers below.", "id": "f14698:m2"} {"signature": "def serial_periodicfeatures(pfpkl_list,lcbasedir,outdir,starfeaturesdir=None,fourierorder=,transitparams=(-,,),ebparams=(-,,,),pdiff_threshold=,sidereal_threshold=,sampling_peak_multiplier=,sampling_startp=None,sampling_endp=None,starfeatures=None,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,sigclip=,verbose=False,maxobjects=None):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(fileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif not os.path.exists(outdir):os.makedirs(outdir)if maxobjects:pfpkl_list = pfpkl_list[:maxobjects]LOGINFO('' % len(pfpkl_list))if starfeaturesdir and os.path.exists(starfeaturesdir):starfeatures_list = []LOGINFO('')for pfpkl in pfpkl_list:sfpkl1 = os.path.basename(pfpkl).replace('','')sfpkl2 = sfpkl1.replace('','')sfpath1 = os.path.join(starfeaturesdir, sfpkl1)sfpath2 = os.path.join(starfeaturesdir, sfpkl2)if os.path.exists(sfpath1):starfeatures_list.append(sfpkl1)elif os.path.exists(sfpath2):starfeatures_list.append(sfpkl2)else:starfeatures_list.append(None)else:starfeatures_list = [None for x in pfpkl_list]kwargs = {'':fourierorder,'':transitparams,'':ebparams,'':pdiff_threshold,'':sidereal_threshold,'':sampling_peak_multiplier,'':sampling_startp,'':sampling_endp,'':timecols,'':magcols,'':errcols,'':lcformat,'':lcformatdir,'':sigclip,'':verbose}tasks = [(x, lcbasedir, outdir, y, kwargs) for (x,y) inzip(pfpkl_list, starfeatures_list)]LOGINFO('')for task in tqdm(tasks):_periodicfeatures_worker(task)", "docstring": "This drives the periodicfeatures collection for a list of periodfinding\n pickles.\n\n Parameters\n ----------\n\n pfpkl_list : list of str\n The list of period-finding pickles to use.\n\n lcbasedir : str\n The base directory where the associated light curves are located.\n\n outdir : str\n The directory where the results will be written.\n\n starfeaturesdir : str or None\n The directory containing the `starfeatures-.pkl` files for\n each object to use calculate neighbor proximity light curve features.\n\n fourierorder : int\n The Fourier order to use to generate sinusoidal function and fit that to\n the phased light curve.\n\n transitparams : list of floats\n The transit depth, duration, and ingress duration to use to generate a\n trapezoid planet transit model fit to the phased light curve. The period\n used is the one provided in `period`, while the epoch is automatically\n obtained from a spline fit to the phased light curve.\n\n ebparams : list of floats\n The primary eclipse depth, eclipse duration, the primary-secondary depth\n ratio, and the phase of the secondary eclipse to use to generate an\n eclipsing binary model fit to the phased light curve. The period used is\n the one provided in `period`, while the epoch is automatically obtained\n from a spline fit to the phased light curve.\n\n pdiff_threshold : float\n This is the max difference between periods to consider them the same.\n\n sidereal_threshold : float\n This is the max difference between any of the 'best' periods and the\n sidereal day periods to consider them the same.\n\n sampling_peak_multiplier : float\n This is the minimum multiplicative factor of a 'best' period's\n normalized periodogram peak over the sampling periodogram peak at the\n same period required to accept the 'best' period as possibly real.\n\n sampling_startp, sampling_endp : float\n If the `pgramlist` doesn't have a time-sampling Lomb-Scargle\n periodogram, it will be obtained automatically. Use these kwargs to\n control the minimum and maximum period interval to be searched when\n generating this periodogram.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n verbose : bool\n If True, will indicate progress while working.\n\n maxobjects : int\n The total number of objects to process from `pfpkl_list`.\n\n Returns\n -------\n\n Nothing.", "id": "f14698:m3"} {"signature": "def parallel_periodicfeatures(pfpkl_list,lcbasedir,outdir,starfeaturesdir=None,fourierorder=,transitparams=(-,,),ebparams=(-,,,),pdiff_threshold=,sidereal_threshold=,sampling_peak_multiplier=,sampling_startp=None,sampling_endp=None,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,sigclip=,verbose=False,maxobjects=None,nworkers=NCPUS):", "body": "if not os.path.exists(outdir):os.makedirs(outdir)if maxobjects:pfpkl_list = pfpkl_list[:maxobjects]LOGINFO('' % len(pfpkl_list))if starfeaturesdir and os.path.exists(starfeaturesdir):starfeatures_list = []LOGINFO('')for pfpkl in pfpkl_list:sfpkl1 = os.path.basename(pfpkl).replace('','')sfpkl2 = sfpkl1.replace('','')sfpath1 = os.path.join(starfeaturesdir, sfpkl1)sfpath2 = os.path.join(starfeaturesdir, sfpkl2)if os.path.exists(sfpath1):starfeatures_list.append(sfpkl1)elif os.path.exists(sfpath2):starfeatures_list.append(sfpkl2)else:starfeatures_list.append(None)else:starfeatures_list = [None for x in pfpkl_list]kwargs = {'':fourierorder,'':transitparams,'':ebparams,'':pdiff_threshold,'':sidereal_threshold,'':sampling_peak_multiplier,'':sampling_startp,'':sampling_endp,'':timecols,'':magcols,'':errcols,'':lcformat,'':lcformat,'':sigclip,'':verbose}tasks = [(x, lcbasedir, outdir, y, kwargs) for (x,y) inzip(pfpkl_list, starfeatures_list)]LOGINFO('')with ProcessPoolExecutor(max_workers=nworkers) as executor:resultfutures = executor.map(_periodicfeatures_worker, tasks)results = [x for x in resultfutures]resdict = {os.path.basename(x):y for (x,y) in zip(pfpkl_list, results)}return resdict", "docstring": "This runs periodic feature generation in parallel for all periodfinding\n pickles in the input list.\n\n Parameters\n ----------\n\n pfpkl_list : list of str\n The list of period-finding pickles to use.\n\n lcbasedir : str\n The base directory where the associated light curves are located.\n\n outdir : str\n The directory where the results will be written.\n\n starfeaturesdir : str or None\n The directory containing the `starfeatures-.pkl` files for\n each object to use calculate neighbor proximity light curve features.\n\n fourierorder : int\n The Fourier order to use to generate sinusoidal function and fit that to\n the phased light curve.\n\n transitparams : list of floats\n The transit depth, duration, and ingress duration to use to generate a\n trapezoid planet transit model fit to the phased light curve. The period\n used is the one provided in `period`, while the epoch is automatically\n obtained from a spline fit to the phased light curve.\n\n ebparams : list of floats\n The primary eclipse depth, eclipse duration, the primary-secondary depth\n ratio, and the phase of the secondary eclipse to use to generate an\n eclipsing binary model fit to the phased light curve. The period used is\n the one provided in `period`, while the epoch is automatically obtained\n from a spline fit to the phased light curve.\n\n pdiff_threshold : float\n This is the max difference between periods to consider them the same.\n\n sidereal_threshold : float\n This is the max difference between any of the 'best' periods and the\n sidereal day periods to consider them the same.\n\n sampling_peak_multiplier : float\n This is the minimum multiplicative factor of a 'best' period's\n normalized periodogram peak over the sampling periodogram peak at the\n same period required to accept the 'best' period as possibly real.\n\n sampling_startp, sampling_endp : float\n If the `pgramlist` doesn't have a time-sampling Lomb-Scargle\n periodogram, it will be obtained automatically. Use these kwargs to\n control the minimum and maximum period interval to be searched when\n generating this periodogram.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n verbose : bool\n If True, will indicate progress while working.\n\n maxobjects : int\n The total number of objects to process from `pfpkl_list`.\n\n nworkers : int\n The number of parallel workers to launch to process the input.\n\n Returns\n -------\n\n dict\n A dict containing key: val pairs of the input period-finder result and\n the output periodic feature result pickles for each input pickle is\n returned.", "id": "f14698:m4"} {"signature": "def parallel_periodicfeatures_lcdir(pfpkl_dir,lcbasedir,outdir,pfpkl_glob='',starfeaturesdir=None,fourierorder=,transitparams=(-,,),ebparams=(-,,,),pdiff_threshold=,sidereal_threshold=,sampling_peak_multiplier=,sampling_startp=None,sampling_endp=None,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,sigclip=,verbose=False,maxobjects=None,nworkers=NCPUS,recursive=True,):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Nonefileglob = pfpkl_globLOGINFO('' % pfpkl_dir)if recursive is False:matching = glob.glob(os.path.join(pfpkl_dir, fileglob))else:if sys.version_info[:] > (,):matching = glob.glob(os.path.join(pfpkl_dir,'',fileglob),recursive=True)else:walker = os.walk(pfpkl_dir)matching = []for root, dirs, _files in walker:for sdir in dirs:searchpath = os.path.join(root,sdir,fileglob)foundfiles = glob.glob(searchpath)if foundfiles:matching.extend(foundfiles)if matching and len(matching) > :LOGINFO('' %len(matching))return parallel_periodicfeatures(matching,lcbasedir,outdir,starfeaturesdir=starfeaturesdir,fourierorder=fourierorder,transitparams=transitparams,ebparams=ebparams,pdiff_threshold=pdiff_threshold,sidereal_threshold=sidereal_threshold,sampling_peak_multiplier=sampling_peak_multiplier,sampling_startp=sampling_startp,sampling_endp=sampling_endp,timecols=timecols,magcols=magcols,errcols=errcols,lcformat=lcformat,lcformatdir=lcformatdir,sigclip=sigclip,verbose=verbose,maxobjects=maxobjects,nworkers=nworkers,)else:LOGERROR('' % (pfpkl_dir))return None", "docstring": "This runs parallel periodicfeature extraction for a directory of\n periodfinding result pickles.\n\n Parameters\n ----------\n\n pfpkl_dir : str\n The directory containing the pickles to process.\n\n lcbasedir : str\n The directory where all of the associated light curve files are located.\n\n outdir : str\n The directory where all the output will be written.\n\n pfpkl_glob : str\n The UNIX file glob to use to search for period-finder result pickles in\n `pfpkl_dir`.\n\n starfeaturesdir : str or None\n The directory containing the `starfeatures-.pkl` files for\n each object to use calculate neighbor proximity light curve features.\n\n fourierorder : int\n The Fourier order to use to generate sinusoidal function and fit that to\n the phased light curve.\n\n transitparams : list of floats\n The transit depth, duration, and ingress duration to use to generate a\n trapezoid planet transit model fit to the phased light curve. The period\n used is the one provided in `period`, while the epoch is automatically\n obtained from a spline fit to the phased light curve.\n\n ebparams : list of floats\n The primary eclipse depth, eclipse duration, the primary-secondary depth\n ratio, and the phase of the secondary eclipse to use to generate an\n eclipsing binary model fit to the phased light curve. The period used is\n the one provided in `period`, while the epoch is automatically obtained\n from a spline fit to the phased light curve.\n\n pdiff_threshold : float\n This is the max difference between periods to consider them the same.\n\n sidereal_threshold : float\n This is the max difference between any of the 'best' periods and the\n sidereal day periods to consider them the same.\n\n sampling_peak_multiplier : float\n This is the minimum multiplicative factor of a 'best' period's\n normalized periodogram peak over the sampling periodogram peak at the\n same period required to accept the 'best' period as possibly real.\n\n sampling_startp, sampling_endp : float\n If the `pgramlist` doesn't have a time-sampling Lomb-Scargle\n periodogram, it will be obtained automatically. Use these kwargs to\n control the minimum and maximum period interval to be searched when\n generating this periodogram.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n verbose : bool\n If True, will indicate progress while working.\n\n maxobjects : int\n The total number of objects to process from `pfpkl_list`.\n\n nworkers : int\n The number of parallel workers to launch to process the input.\n\n Returns\n -------\n\n dict\n A dict containing key: val pairs of the input period-finder result and\n the output periodic feature result pickles for each input pickle is\n returned.", "id": "f14698:m5"} {"signature": "def apply_epd_magseries(lcfile,timecol,magcol,errcol,externalparams,lcformat='',lcformatdir=None,epdsmooth_sigclip=,epdsmooth_windowsize=,epdsmooth_func=smooth_magseries_savgol,epdsmooth_extraparams=None):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Nonelcdict = readerfunc(lcfile)if ((isinstance(lcdict, (tuple, list))) andisinstance(lcdict[], dict)):lcdict = lcdict[]objectid = lcdict['']times, mags, errs = lcdict[timecol], lcdict[magcol], lcdict[errcol]if externalparams is not None:fsv = lcdict[externalparams['']]fdv = lcdict[externalparams['']]fkv = lcdict[externalparams['']]xcc = lcdict[externalparams['']]ycc = lcdict[externalparams['']]bgv = lcdict[externalparams['']]bge = lcdict[externalparams['']]iha = lcdict[externalparams['']]izd = lcdict[externalparams['']]else:fsv = lcdict['']fdv = lcdict['']fkv = lcdict['']xcc = lcdict['']ycc = lcdict['']bgv = lcdict['']bge = lcdict['']iha = lcdict['']izd = lcdict['']epd = epd_magseries(times,mags,errs,fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd,magsarefluxes=magsarefluxes,epdsmooth_sigclip=epdsmooth_sigclip,epdsmooth_windowsize=epdsmooth_windowsize,epdsmooth_func=epdsmooth_func,epdsmooth_extraparams=epdsmooth_extraparams)lcdict[''] = epdoutfile = os.path.join(os.path.dirname(lcfile),'' % (squeeze(objectid).replace('',''),magcol))with open(outfile,'') as outfd:pickle.dump(lcdict, outfd,protocol=pickle.HIGHEST_PROTOCOL)return outfile", "docstring": "This applies external parameter decorrelation (EPD) to a light curve.\n\n Parameters\n ----------\n\n lcfile : str\n The filename of the light curve file to process.\n\n timecol,magcol,errcol : str\n The keys in the lcdict produced by your light curve reader function that\n correspond to the times, mags/fluxes, and associated measurement errors\n that will be used as input to the EPD process.\n\n externalparams : dict or None\n This is a dict that indicates which keys in the lcdict obtained from the\n lcfile correspond to the required external parameters. As with timecol,\n magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound\n keys ('magaperture1.mags'). The dict should look something like::\n\n {'fsv':'' array: S values for each observation,\n 'fdv':'' array: D values for each observation,\n 'fkv':'' array: K values for each observation,\n 'xcc':'' array: x coords for each observation,\n 'ycc':'' array: y coords for each observation,\n 'bgv':'' array: sky background for each observation,\n 'bge':'' array: sky background err for each observation,\n 'iha':'' array: hour angle for each observation,\n 'izd':'' array: zenith distance for each observation}\n\n Alternatively, if these exact keys are already present in the lcdict,\n indicate this by setting externalparams to None.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n epdsmooth_sigclip : float or int or sequence of two floats/ints or None\n This specifies how to sigma-clip the input LC before fitting the EPD\n function to it.\n\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n epdsmooth_windowsize : int\n This is the number of LC points to smooth over to generate a smoothed\n light curve that will be used to fit the EPD function.\n\n epdsmooth_func : Python function\n This sets the smoothing filter function to use. A Savitsky-Golay filter\n is used to smooth the light curve by default. The functions that can be\n used with this kwarg are listed in `varbase.trends`. If you want to use\n your own function, it MUST have the following signature::\n\n def smoothfunc(mags_array, window_size, **extraparams)\n\n and return a numpy array of the same size as `mags_array` with the\n smoothed time-series. Any extra params can be provided using the\n `extraparams` dict.\n\n epdsmooth_extraparams : dict\n This is a dict of any extra filter params to supply to the smoothing\n function.\n\n Returns\n -------\n\n str\n Writes the output EPD light curve to a pickle that contains the lcdict\n with an added `lcdict['epd']` key, which contains the EPD times,\n mags/fluxes, and errs as `lcdict['epd']['times']`,\n `lcdict['epd']['mags']`, and `lcdict['epd']['errs']`. Returns the\n filename of this generated EPD LC pickle file.\n\n Notes\n -----\n\n - S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)\n - D -> measure of PSF ellipticity in xy direction\n - K -> measure of PSF ellipticity in cross direction\n\n S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in\n A. Pal's thesis: https://arxiv.org/abs/0906.3486", "id": "f14699:m1"} {"signature": "def parallel_epd_worker(task):", "body": "(lcfile, timecol, magcol, errcol,externalparams, lcformat, lcformatdir, magsarefluxes,epdsmooth_sigclip, epdsmooth_windowsize,epdsmooth_func, epdsmooth_extraparams) = tasktry:epd = apply_epd_magseries(lcfile,timecol,magcol,errcol,externalparams,lcformat=lcformat,lcformatdir=lcformatdir,epdsmooth_sigclip=epdsmooth_sigclip,epdsmooth_windowsize=epdsmooth_windowsize,epdsmooth_func=epdsmooth_func,epdsmooth_extraparams=epdsmooth_extraparams)if epd is not None:LOGINFO('' % (lcfile, epd))return epdelse:LOGERROR('' % lcfile)return Noneexcept Exception as e:LOGEXCEPTION('' % lcfile)return None", "docstring": "This is a parallel worker for the function below.\n\n Parameters\n ----------\n\n task : tuple\n - task[0] = lcfile\n - task[1] = timecol\n - task[2] = magcol\n - task[3] = errcol\n - task[4] = externalparams\n - task[5] = lcformat\n - task[6] = lcformatdir\n - task[7] = epdsmooth_sigclip\n - task[8] = epdsmooth_windowsize\n - task[9] = epdsmooth_func\n - task[10] = epdsmooth_extraparams\n\n Returns\n -------\n\n str or None\n If EPD succeeds for an input LC, returns the filename of the output EPD\n LC pickle file. If it fails, returns None.", "id": "f14699:m2"} {"signature": "def parallel_epd_lclist(lclist,externalparams,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,epdsmooth_sigclip=,epdsmooth_windowsize=,epdsmooth_func=smooth_magseries_savgol,epdsmooth_extraparams=None,nworkers=NCPUS,maxworkertasks=):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(fileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif timecols is None:timecols = dtimecolsif magcols is None:magcols = dmagcolsif errcols is None:errcols = derrcolsoutdict = {}for t, m, e in zip(timecols, magcols, errcols):tasks = [(x, t, m, e, externalparams, lcformat, lcformatdir,epdsmooth_sigclip, epdsmooth_windowsize,epdsmooth_func, epdsmooth_extraparams) forx in lclist]pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)results = pool.map(parallel_epd_worker, tasks)pool.close()pool.join()outdict[m] = resultsreturn outdict", "docstring": "This applies EPD in parallel to all LCs in the input list.\n\n Parameters\n ----------\n\n lclist : list of str\n This is the list of light curve files to run EPD on.\n\n externalparams : dict or None\n This is a dict that indicates which keys in the lcdict obtained from the\n lcfile correspond to the required external parameters. As with timecol,\n magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound\n keys ('magaperture1.mags'). The dict should look something like::\n\n {'fsv':'' array: S values for each observation,\n 'fdv':'' array: D values for each observation,\n 'fkv':'' array: K values for each observation,\n 'xcc':'' array: x coords for each observation,\n 'ycc':'' array: y coords for each observation,\n 'bgv':'' array: sky background for each observation,\n 'bge':'' array: sky background err for each observation,\n 'iha':'' array: hour angle for each observation,\n 'izd':'' array: zenith distance for each observation}\n\n Alternatively, if these exact keys are already present in the lcdict,\n indicate this by setting externalparams to None.\n\n timecols,magcols,errcols : lists of str\n The keys in the lcdict produced by your light curve reader function that\n correspond to the times, mags/fluxes, and associated measurement errors\n that will be used as inputs to the EPD process. If these are None, the\n default values for `timecols`, `magcols`, and `errcols` for your light\n curve format will be used here.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curve files.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n epdsmooth_sigclip : float or int or sequence of two floats/ints or None\n This specifies how to sigma-clip the input LC before fitting the EPD\n function to it.\n\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n epdsmooth_windowsize : int\n This is the number of LC points to smooth over to generate a smoothed\n light curve that will be used to fit the EPD function.\n\n epdsmooth_func : Python function\n This sets the smoothing filter function to use. A Savitsky-Golay filter\n is used to smooth the light curve by default. The functions that can be\n used with this kwarg are listed in `varbase.trends`. If you want to use\n your own function, it MUST have the following signature::\n\n def smoothfunc(mags_array, window_size, **extraparams)\n\n and return a numpy array of the same size as `mags_array` with the\n smoothed time-series. Any extra params can be provided using the\n `extraparams` dict.\n\n epdsmooth_extraparams : dict\n This is a dict of any extra filter params to supply to the smoothing\n function.\n\n nworkers : int\n The number of parallel workers to launch when processing the LCs.\n\n maxworkertasks : int\n The maximum number of tasks a parallel worker will complete before it is\n replaced with a new one (sometimes helps with memory-leaks).\n\n Returns\n -------\n\n dict\n Returns a dict organized by all the keys in the input `magcols` list,\n containing lists of EPD pickle light curves for that `magcol`.\n\n Notes\n -----\n\n - S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)\n - D -> measure of PSF ellipticity in xy direction\n - K -> measure of PSF ellipticity in cross direction\n\n S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in\n A. Pal's thesis: https://arxiv.org/abs/0906.3486", "id": "f14699:m3"} {"signature": "def parallel_epd_lcdir(lcdir,externalparams,lcfileglob=None,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,epdsmooth_sigclip=,epdsmooth_windowsize=,epdsmooth_func=smooth_magseries_savgol,epdsmooth_extraparams=None,nworkers=NCPUS,maxworkertasks=):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(fileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif lcfileglob is None:lcfileglob = filegloblclist = sorted(glob.glob(os.path.join(lcdir, lcfileglob)))return parallel_epd_lclist(lclist,externalparams,timecols=timecols,magcols=magcols,errcols=errcols,lcformat=lcformat,epdsmooth_sigclip=epdsmooth_sigclip,epdsmooth_windowsize=epdsmooth_windowsize,epdsmooth_func=epdsmooth_func,epdsmooth_extraparams=epdsmooth_extraparams,nworkers=nworkers,maxworkertasks=maxworkertasks)", "docstring": "This applies EPD in parallel to all LCs in a directory.\n\n Parameters\n ----------\n\n lcdir : str\n The light curve directory to process.\n\n externalparams : dict or None\n This is a dict that indicates which keys in the lcdict obtained from the\n lcfile correspond to the required external parameters. As with timecol,\n magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound\n keys ('magaperture1.mags'). The dict should look something like::\n\n {'fsv':'' array: S values for each observation,\n 'fdv':'' array: D values for each observation,\n 'fkv':'' array: K values for each observation,\n 'xcc':'' array: x coords for each observation,\n 'ycc':'' array: y coords for each observation,\n 'bgv':'' array: sky background for each observation,\n 'bge':'' array: sky background err for each observation,\n 'iha':'' array: hour angle for each observation,\n 'izd':'' array: zenith distance for each observation}\n\n lcfileglob : str or None\n A UNIX fileglob to use to select light curve files in `lcdir`. If this\n is not None, the value provided will override the default fileglob for\n your light curve format.\n\n timecols,magcols,errcols : lists of str\n The keys in the lcdict produced by your light curve reader function that\n correspond to the times, mags/fluxes, and associated measurement errors\n that will be used as inputs to the EPD process. If these are None, the\n default values for `timecols`, `magcols`, and `errcols` for your light\n curve format will be used here.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n epdsmooth_sigclip : float or int or sequence of two floats/ints or None\n This specifies how to sigma-clip the input LC before fitting the EPD\n function to it.\n\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n epdsmooth_windowsize : int\n This is the number of LC points to smooth over to generate a smoothed\n light curve that will be used to fit the EPD function.\n\n epdsmooth_func : Python function\n This sets the smoothing filter function to use. A Savitsky-Golay filter\n is used to smooth the light curve by default. The functions that can be\n used with this kwarg are listed in `varbase.trends`. If you want to use\n your own function, it MUST have the following signature::\n\n def smoothfunc(mags_array, window_size, **extraparams)\n\n and return a numpy array of the same size as `mags_array` with the\n smoothed time-series. Any extra params can be provided using the\n `extraparams` dict.\n\n epdsmooth_extraparams : dict\n This is a dict of any extra filter params to supply to the smoothing\n function.\n\n nworkers : int\n The number of parallel workers to launch when processing the LCs.\n\n maxworkertasks : int\n The maximum number of tasks a parallel worker will complete before it is\n replaced with a new one (sometimes helps with memory-leaks).\n\n Returns\n -------\n\n dict\n Returns a dict organized by all the keys in the input `magcols` list,\n containing lists of EPD pickle light curves for that `magcol`.\n\n Notes\n -----\n\n - S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)\n - D -> measure of PSF ellipticity in xy direction\n - K -> measure of PSF ellipticity in cross direction\n\n S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in\n A. Pal's thesis: https://arxiv.org/abs/0906.3486", "id": "f14699:m4"} {"signature": "def update_checkplotdict_nbrlcs(checkplotdict,timecol, magcol, errcol,lcformat='',lcformatdir=None,verbose=True,):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return checkplotdictexcept Exception as e:LOGEXCEPTION(\"\")return checkplotdictif not ('' in checkplotdict andcheckplotdict[''] andlen(checkplotdict['']) > ):LOGERROR('' %(checkplotdict['']))return checkplotdictobjmagkeys = {}if '' in checkplotdict['']:mclist = checkplotdict['']['']else:mclist = ('','','','','','','','','','','','')for mc in mclist:if (mc in checkplotdict[''] andcheckplotdict[''][mc] is not None andnp.isfinite(checkplotdict[''][mc])):objmagkeys[mc] = checkplotdict[''][mc]for nbr in checkplotdict['']:objectid, lcfpath = (nbr[''],nbr[''])if not os.path.exists(lcfpath):LOGERROR('''' %(checkplotdict[''], objectid, lcfpath))continuelcdict = readerfunc(lcfpath)if ( (isinstance(lcdict, (list, tuple))) and(isinstance(lcdict[], dict)) ):lcdict = lcdict[]nbrmagkeys = {}for mc in objmagkeys:if (('' in lcdict) and(isinstance(lcdict[''], dict)) and(mc in lcdict['']) and(lcdict[''][mc] is not None) and(np.isfinite(lcdict[''][mc]))):nbrmagkeys[mc] = lcdict[''][mc]magdiffs = {}for omc in objmagkeys:if omc in nbrmagkeys:magdiffs[omc] = objmagkeys[omc] - nbrmagkeys[omc]colordiffs = {}for ctrio in (['','',''],['','',''],['','',''],['','',''],['','',''],['','','']):m1, m2, color = ctrioif (m1 in objmagkeys andm2 in objmagkeys andm1 in nbrmagkeys andm2 in nbrmagkeys):objcolor = objmagkeys[m1] - objmagkeys[m2]nbrcolor = nbrmagkeys[m1] - nbrmagkeys[m2]colordiffs[color] = objcolor - nbrcolornbr.update({'':magdiffs,'':colordiffs})if normfunc is not None:lcdict = normfunc(lcdict)try:if '' in timecol:timecolget = timecol.split('')else:timecolget = [timecol]times = _dict_get(lcdict, timecolget)if '' in magcol:magcolget = magcol.split('')else:magcolget = [magcol]mags = _dict_get(lcdict, magcolget)if '' in errcol:errcolget = errcol.split('')else:errcolget = [errcol]errs = _dict_get(lcdict, errcolget)except KeyError:LOGERROR('''''' %(objectid, checkplotdict[''],''.join([timecol, magcol, errcol])))continuestimes, smags, serrs = sigclip_magseries(times,mags,errs,magsarefluxes=magsarefluxes,sigclip=)if normfunc is None:ntimes, nmags = normalize_magseries(stimes, smags,magsarefluxes=magsarefluxes)xtimes, xmags, xerrs = ntimes, nmags, serrselse:xtimes, xmags, xerrs = stimes, smags, serrsif ((xtimes is None) or (xmags is None) or (xerrs is None) or(xtimes.size < ) or (xmags.size < ) or (xerrs.size < )):LOGERROR(\"\"\"\"\"\"\"\" %(checkplotdict[''],nbr[''],nbr['']))continuenbrdict = _pkl_magseries_plot(xtimes,xmags,xerrs,magsarefluxes=magsarefluxes)nbr.update(nbrdict)if '' in checkplotdict:pfmethods = checkplotdict['']else:pfmethods = []for cpkey in checkplotdict:for pfkey in PFMETHODS:if pfkey in cpkey:pfmethods.append(pfkey)for lspt in pfmethods:nbr[lspt] = {}operiod, oepoch = (checkplotdict[lspt][][''],checkplotdict[lspt][][''])(ophasewrap, ophasesort, ophasebin,ominbinelems, oplotxlim) = (checkplotdict[lspt][][''],checkplotdict[lspt][][''],checkplotdict[lspt][][''],checkplotdict[lspt][][''],checkplotdict[lspt][][''],)nbr = _pkl_phased_magseries_plot(nbr,lspt.split('')[], ,xtimes, xmags, xerrs,operiod, oepoch,phasewrap=ophasewrap,phasesort=ophasesort,phasebin=ophasebin,minbinelems=ominbinelems,plotxlim=oplotxlim,magsarefluxes=magsarefluxes,verbose=verbose,override_pfmethod=lspt)return checkplotdict", "docstring": "For all neighbors in a checkplotdict, make LCs and phased LCs.\n\n Parameters\n ----------\n\n checkplotdict : dict\n This is the checkplot to process. The light curves for the neighbors to\n the object here will be extracted from the stored file paths, and this\n function will make plots of these time-series. If the object has 'best'\n periods and epochs generated by period-finder functions in this\n checkplotdict, phased light curve plots of each neighbor will be made\n using these to check the effects of blending.\n\n timecol,magcol,errcol : str\n The timecol, magcol, and errcol keys used to generate this object's\n checkplot. This is used to extract the correct times-series from the\n neighbors' light curves.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n Returns\n -------\n\n dict\n The input checkplotdict is returned with the neighor light curve plots\n added in.", "id": "f14700:m1"} {"signature": "def runcp(pfpickle,outdir,lcbasedir,lcfname=None,cprenorm=False,lclistpkl=None,nbrradiusarcsec=,maxnumneighbors=,makeneighborlcs=True,fast_mode=False,gaia_max_timeout=,gaia_mirror=None,xmatchinfo=None,xmatchradiusarcsec=,minobservations=,sigclip=,lcformat='',lcformatdir=None,timecols=None,magcols=None,errcols=None,skipdone=False,done_callback=None,done_callback_args=None,done_callback_kwargs=None):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(fileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif pfpickle is not None:if pfpickle.endswith(''):infd = gzip.open(pfpickle,'')else:infd = open(pfpickle,'')pfresults = pickle.load(infd)infd.close()if timecols is None:timecols = dtimecolsif magcols is None:magcols = dmagcolsif errcols is None:errcols = derrcolsif ((lcfname is not None or pfpickle is None) and os.path.exists(lcfname)):lcfpath = lcfnameobjectid = Noneelse:if pfpickle is not None:objectid = pfresults['']lcfbasename = pfresults['']lcfsearchpath = os.path.join(lcbasedir, lcfbasename)if os.path.exists(lcfsearchpath):lcfpath = lcfsearchpathelif lcfname is not None and os.path.exists(lcfname):lcfpath = lcfnameelse:LOGERROR('''''' %(pfpickle, objectid, lcfsearchpath, lcfname))return Noneelse:LOGERROR(\"\"\"\")return Nonelcdict = readerfunc(lcfpath)if ( (isinstance(lcdict, (list, tuple))) and(isinstance(lcdict[], dict)) ):lcdict = lcdict[]if objectid is None:if '' in lcdict:objectid = lcdict['']elif ('' in lcdict[''] andlcdict['']['']):objectid = lcdict['']['']elif '' in lcdict[''] and lcdict['']['']:objectid = lcdict['']['']else:objectid = uuid.uuid4().hex[:]LOGWARNING('''' % objectid)if normfunc is not None:lcdict = normfunc(lcdict)cpfs = []for tcol, mcol, ecol in zip(timecols, magcols, errcols):if '' in tcol:tcolget = tcol.split('')else:tcolget = [tcol]times = _dict_get(lcdict, tcolget)if '' in mcol:mcolget = mcol.split('')else:mcolget = [mcol]mags = _dict_get(lcdict, mcolget)if '' in ecol:ecolget = ecol.split('')else:ecolget = [ecol]errs = _dict_get(lcdict, ecolget)if pfpickle is not None:if '' in pfresults[mcol]:pflist = [pfresults[mcol][x] for x inpfresults[mcol][''] iflen(pfresults[mcol][x].keys()) > ]else:pflist = []for pfm in PFMETHODS:if (pfm in pfresults[mcol] andlen(pfresults[mcol][pfm].keys()) > ):pflist.append(pfresults[mcol][pfm])else:pflist = []outfile = os.path.join(outdir,'' % (squeeze(objectid).replace('',''),mcol))if skipdone and os.path.exists(outfile):LOGWARNING('''''' % outfile)return outfileif '' not in lcdict['']:lcdict[''][''] = objectidif normfunc is None:ntimes, nmags = normalize_magseries(times, mags,magsarefluxes=magsarefluxes)xtimes, xmags, xerrs = ntimes, nmags, errselse:xtimes, xmags, xerrs = times, mags, errscpd = checkplot_dict(pflist,xtimes, xmags, xerrs,objectinfo=lcdict[''],gaia_max_timeout=gaia_max_timeout,gaia_mirror=gaia_mirror,lclistpkl=lclistpkl,nbrradiusarcsec=nbrradiusarcsec,maxnumneighbors=maxnumneighbors,xmatchinfo=xmatchinfo,xmatchradiusarcsec=xmatchradiusarcsec,sigclip=sigclip,mindet=minobservations,verbose=False,fast_mode=fast_mode,magsarefluxes=magsarefluxes,normto=cprenorm )if makeneighborlcs:cpdupdated = update_checkplotdict_nbrlcs(cpd,tcol, mcol, ecol,lcformat=lcformat,verbose=False)else:cpdupdated = cpdcpf = _write_checkplot_picklefile(cpdupdated,outfile=outfile,protocol=pickle.HIGHEST_PROTOCOL,outgzip=False)cpfs.append(cpf)LOGINFO('' % (objectid, repr(cpfs)))if done_callback is not None:if (done_callback_args is not None andisinstance(done_callback_args,list)):done_callback_args = tuple([cpfs] + done_callback_args)else:done_callback_args = (cpfs,)if (done_callback_kwargs is not None andisinstance(done_callback_kwargs, dict)):done_callback_kwargs.update(dict(fast_mode=fast_mode,lcfname=lcfname,cprenorm=cprenorm,lclistpkl=lclistpkl,nbrradiusarcsec=nbrradiusarcsec,maxnumneighbors=maxnumneighbors,gaia_max_timeout=gaia_max_timeout,gaia_mirror=gaia_mirror,xmatchinfo=xmatchinfo,xmatchradiusarcsec=xmatchradiusarcsec,minobservations=minobservations,sigclip=sigclip,lcformat=lcformat,fileglob=fileglob,readerfunc=readerfunc,normfunc=normfunc,magsarefluxes=magsarefluxes,timecols=timecols,magcols=magcols,errcols=errcols,skipdone=skipdone,))else:done_callback_kwargs = dict(fast_mode=fast_mode,lcfname=lcfname,cprenorm=cprenorm,lclistpkl=lclistpkl,nbrradiusarcsec=nbrradiusarcsec,maxnumneighbors=maxnumneighbors,gaia_max_timeout=gaia_max_timeout,gaia_mirror=gaia_mirror,xmatchinfo=xmatchinfo,xmatchradiusarcsec=xmatchradiusarcsec,minobservations=minobservations,sigclip=sigclip,lcformat=lcformat,fileglob=fileglob,readerfunc=readerfunc,normfunc=normfunc,magsarefluxes=magsarefluxes,timecols=timecols,magcols=magcols,errcols=errcols,skipdone=skipdone,)try:done_callback(*done_callback_args, **done_callback_kwargs)LOGINFO('' % cpfs)except Exception as e:LOGEXCEPTION('' % cpfs)return cpfs", "docstring": "This makes a checkplot pickle for the given period-finding result pickle\n produced by `lcproc.periodfinding.runpf`.\n\n Parameters\n ----------\n\n pfpickle : str or None\n This is the filename of the period-finding result pickle file created by\n `lcproc.periodfinding.runpf`. If this is None, the checkplot will be\n made anyway, but no phased LC information will be collected into the\n output checkplot pickle. This can be useful for just collecting GAIA and\n other external information and making LC plots for an object.\n\n outdir : str\n This is the directory to which the output checkplot pickle will be\n written.\n\n lcbasedir : str\n The base directory where this function will look for the light curve\n file associated with the object in the input period-finding result\n pickle file.\n\n lcfname : str or None\n This is usually None because we'll get the path to the light curve\n associated with this period-finding pickle from the pickle itself. If\n `pfpickle` is None, however, this function will use `lcfname` to look up\n the light curve file instead. If both are provided, the value of\n `lcfname` takes precedence.\n\n Providing the light curve file name in this kwarg is useful when you're\n making checkplots directly from light curve files and not including\n period-finder results (perhaps because period-finding takes a long time\n for large collections of LCs).\n\n cprenorm : bool\n Set this to True if the light curves should be renormalized by\n `checkplot.checkplot_pickle`. This is set to False by default because we\n do our own normalization in this function using the light curve's\n registered normalization function and pass the normalized times, mags,\n errs to the `checkplot.checkplot_pickle` function.\n\n lclistpkl : str or dict\n This is either the filename of a pickle or the actual dict produced by\n lcproc.make_lclist. This is used to gather neighbor information.\n\n nbrradiusarcsec : float\n The radius in arcseconds to use for a search conducted around the\n coordinates of this object to look for any potential confusion and\n blending of variability amplitude caused by their proximity.\n\n maxnumneighbors : int\n The maximum number of neighbors that will have their light curves and\n magnitudes noted in this checkplot as potential blends with the target\n object.\n\n makeneighborlcs : bool\n If True, will make light curve and phased light curve plots for all\n neighbors to the current object found in the catalog passed in using\n `lclistpkl`.\n\n fast_mode : bool or float\n This runs the external catalog operations in a \"fast\" mode, with short\n timeouts and not trying to hit external catalogs that take a long time\n to respond.\n\n If this is set to True, the default settings for the external requests\n will then become::\n\n skyview_lookup = False\n skyview_timeout = 10.0\n skyview_retry_failed = False\n dust_timeout = 10.0\n gaia_submit_timeout = 7.0\n gaia_max_timeout = 10.0\n gaia_submit_tries = 2\n complete_query_later = False\n search_simbad = False\n\n If this is a float, will run in \"fast\" mode with the provided timeout\n value in seconds and the following settings::\n\n skyview_lookup = True\n skyview_timeout = fast_mode\n skyview_retry_failed = False\n dust_timeout = fast_mode\n gaia_submit_timeout = 0.66*fast_mode\n gaia_max_timeout = fast_mode\n gaia_submit_tries = 2\n complete_query_later = False\n search_simbad = False\n\n gaia_max_timeout : float\n Sets the timeout in seconds to use when waiting for the GAIA service to\n respond to our request for the object's information. Note that if\n `fast_mode` is set, this is ignored.\n\n gaia_mirror : str or None\n This sets the GAIA mirror to use. This is a key in the\n `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n mirror.\n\n xmatchinfo : str or dict\n This is either the xmatch dict produced by the function\n `load_xmatch_external_catalogs` above, or the path to the xmatch info\n pickle file produced by that function.\n\n xmatchradiusarcsec : float\n This is the cross-matching radius to use in arcseconds.\n\n minobservations : int\n The minimum of observations the input object's mag/flux time-series must\n have for this function to plot its light curve and phased light\n curve. If the object has less than this number, no light curves will be\n plotted, but the checkplotdict will still contain all of the other\n information.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in generating this checkplot.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in generating this checkplot.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in generating this checkplot.\n\n skipdone : bool\n This indicates if this function will skip creating checkplots that\n already exist corresponding to the current `objectid` and `magcol`. If\n `skipdone` is set to True, this will be done.\n\n done_callback : Python function or None\n This is used to provide a function to execute after the checkplot\n pickles are generated. This is useful if you want to stream the results\n of checkplot making to some other process, e.g. directly running an\n ingestion into an LCC-Server collection. The function will always get\n the list of the generated checkplot pickles as its first arg, and all of\n the kwargs for runcp in the kwargs dict. Additional args and kwargs can\n be provided by giving a list in the `done_callbacks_args` kwarg and a\n dict in the `done_callbacks_kwargs` kwarg.\n\n NOTE: the function you pass in here should be pickleable by normal\n Python if you want to use it with the parallel_cp and parallel_cp_lcdir\n functions below.\n\n done_callback_args : tuple or None\n If not None, contains any args to pass into the `done_callback`\n function.\n\n done_callback_kwargs : dict or None\n If not None, contains any kwargs to pass into the `done_callback`\n function.\n\n Returns\n -------\n\n list of str\n This returns a list of checkplot pickle filenames with one element for\n each (timecol, magcol, errcol) combination provided in the default\n lcformat config or in the timecols, magcols, errcols kwargs.", "id": "f14700:m2"} {"signature": "def runcp_worker(task):", "body": "pfpickle, outdir, lcbasedir, kwargs = tasktry:return runcp(pfpickle, outdir, lcbasedir, **kwargs)except Exception as e:LOGEXCEPTION('' % (pfpickle, e))return None", "docstring": "This is the worker for running checkplots.\n\nParameters\n----------\n\ntask : tuple\n This is of the form: (pfpickle, outdir, lcbasedir, kwargs).\n\nReturns\n-------\n\nlist of str\n The list of checkplot pickles returned by the `runcp` function.", "id": "f14700:m3"} {"signature": "def parallel_cp(pfpicklelist,outdir,lcbasedir,fast_mode=False,lcfnamelist=None,cprenorm=False,lclistpkl=None,gaia_max_timeout=,gaia_mirror=None,nbrradiusarcsec=,maxnumneighbors=,makeneighborlcs=True,xmatchinfo=None,xmatchradiusarcsec=,sigclip=,minobservations=,lcformat='',lcformatdir=None,timecols=None,magcols=None,errcols=None,skipdone=False,done_callback=None,done_callback_args=None,done_callback_kwargs=None,liststartindex=None,maxobjects=None,nworkers=NCPUS,):", "body": "if sys.platform == '':import requestsrequests.get('')if not os.path.exists(outdir):os.mkdir(outdir)if (liststartindex is not None) and (maxobjects is None):pfpicklelist = pfpicklelist[liststartindex:]if lcfnamelist is not None:lcfnamelist = lcfnamelist[liststartindex:]elif (liststartindex is None) and (maxobjects is not None):pfpicklelist = pfpicklelist[:maxobjects]if lcfnamelist is not None:lcfnamelist = lcfnamelist[:maxobjects]elif (liststartindex is not None) and (maxobjects is not None):pfpicklelist = (pfpicklelist[liststartindex:liststartindex+maxobjects])if lcfnamelist is not None:lcfnamelist = lcfnamelist[liststartindex:liststartindex+maxobjects]if lcfnamelist is None:lcfnamelist = [None]*len(pfpicklelist)tasklist = [(x, outdir, lcbasedir,{'':lcformat,'':lcformatdir,'':y,'':timecols,'':magcols,'':errcols,'':lclistpkl,'':gaia_max_timeout,'':gaia_mirror,'':nbrradiusarcsec,'':maxnumneighbors,'':makeneighborlcs,'':xmatchinfo,'':xmatchradiusarcsec,'':sigclip,'':minobservations,'':skipdone,'':cprenorm,'':fast_mode,'':done_callback,'':done_callback_args,'':done_callback_kwargs}) forx,y in zip(pfpicklelist, lcfnamelist)]resultfutures = []results = []with ProcessPoolExecutor(max_workers=nworkers) as executor:resultfutures = executor.map(runcp_worker, tasklist)results = [x for x in resultfutures]executor.shutdown()return results", "docstring": "This drives the parallel execution of `runcp` for a list of periodfinding\n result pickles.\n\n Parameters\n ----------\n\n pfpicklelist : list of str or list of Nones\n This is the list of the filenames of the period-finding result pickles\n to process. To make checkplots using the light curves directly, set this\n to a list of Nones with the same length as the list of light curve files\n that you provide in `lcfnamelist`.\n\n outdir : str\n The directory the checkplot pickles will be written to.\n\n lcbasedir : str\n The base directory that this function will look in to find the light\n curves pointed to by the period-finding result files. If you're using\n `lcfnamelist` to provide a list of light curve filenames directly, this\n arg is ignored.\n\n lcfnamelist : list of str or None\n If this is provided, it must be a list of the input light curve\n filenames to process. These can either be associated with each input\n period-finder result pickle, or can be provided standalone to make\n checkplots without phased LC plots in them. In the second case, you must\n set `pfpicklelist` to a list of Nones that matches the length of\n `lcfnamelist`.\n\n cprenorm : bool\n Set this to True if the light curves should be renormalized by\n `checkplot.checkplot_pickle`. This is set to False by default because we\n do our own normalization in this function using the light curve's\n registered normalization function and pass the normalized times, mags,\n errs to the `checkplot.checkplot_pickle` function.\n\n lclistpkl : str or dict\n This is either the filename of a pickle or the actual dict produced by\n lcproc.make_lclist. This is used to gather neighbor information.\n\n nbrradiusarcsec : float\n The radius in arcseconds to use for a search conducted around the\n coordinates of this object to look for any potential confusion and\n blending of variability amplitude caused by their proximity.\n\n maxnumneighbors : int\n The maximum number of neighbors that will have their light curves and\n magnitudes noted in this checkplot as potential blends with the target\n object.\n\n makeneighborlcs : bool\n If True, will make light curve and phased light curve plots for all\n neighbors found in the object collection for each input object.\n\n fast_mode : bool or float\n This runs the external catalog operations in a \"fast\" mode, with short\n timeouts and not trying to hit external catalogs that take a long time\n to respond.\n\n If this is set to True, the default settings for the external requests\n will then become::\n\n skyview_lookup = False\n skyview_timeout = 10.0\n skyview_retry_failed = False\n dust_timeout = 10.0\n gaia_submit_timeout = 7.0\n gaia_max_timeout = 10.0\n gaia_submit_tries = 2\n complete_query_later = False\n search_simbad = False\n\n If this is a float, will run in \"fast\" mode with the provided timeout\n value in seconds and the following settings::\n\n skyview_lookup = True\n skyview_timeout = fast_mode\n skyview_retry_failed = False\n dust_timeout = fast_mode\n gaia_submit_timeout = 0.66*fast_mode\n gaia_max_timeout = fast_mode\n gaia_submit_tries = 2\n complete_query_later = False\n search_simbad = False\n\n gaia_max_timeout : float\n Sets the timeout in seconds to use when waiting for the GAIA service to\n respond to our request for the object's information. Note that if\n `fast_mode` is set, this is ignored.\n\n gaia_mirror : str or None\n This sets the GAIA mirror to use. This is a key in the\n `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n mirror.\n\n xmatchinfo : str or dict\n This is either the xmatch dict produced by the function\n `load_xmatch_external_catalogs` above, or the path to the xmatch info\n pickle file produced by that function.\n\n xmatchradiusarcsec : float\n This is the cross-matching radius to use in arcseconds.\n\n minobservations : int\n The minimum of observations the input object's mag/flux time-series must\n have for this function to plot its light curve and phased light\n curve. If the object has less than this number, no light curves will be\n plotted, but the checkplotdict will still contain all of the other\n information.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in generating this checkplot.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in generating this checkplot.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in generating this checkplot.\n\n skipdone : bool\n This indicates if this function will skip creating checkplots that\n already exist corresponding to the current `objectid` and `magcol`. If\n `skipdone` is set to True, this will be done.\n\n done_callback : Python function or None\n This is used to provide a function to execute after the checkplot\n pickles are generated. This is useful if you want to stream the results\n of checkplot making to some other process, e.g. directly running an\n ingestion into an LCC-Server collection. The function will always get\n the list of the generated checkplot pickles as its first arg, and all of\n the kwargs for runcp in the kwargs dict. Additional args and kwargs can\n be provided by giving a list in the `done_callbacks_args` kwarg and a\n dict in the `done_callbacks_kwargs` kwarg.\n\n NOTE: the function you pass in here should be pickleable by normal\n Python if you want to use it with the parallel_cp and parallel_cp_lcdir\n functions below.\n\n done_callback_args : tuple or None\n If not None, contains any args to pass into the `done_callback`\n function.\n\n done_callback_kwargs : dict or None\n If not None, contains any kwargs to pass into the `done_callback`\n function.\n\n liststartindex : int\n The index of the `pfpicklelist` (and `lcfnamelist` if provided) to start\n working at.\n\n maxobjects : int\n The maximum number of objects to process in this run. Use this with\n `liststartindex` to effectively distribute working on a large list of\n input period-finding result pickles (and light curves if `lcfnamelist`\n is also provided) over several sessions or machines.\n\n nworkers : int\n The number of parallel workers that will work on the checkplot\n generation process.\n\n Returns\n -------\n\n dict\n This returns a dict with keys = input period-finding pickles and vals =\n list of the corresponding checkplot pickles produced.", "id": "f14700:m4"} {"signature": "def parallel_cp_pfdir(pfpickledir,outdir,lcbasedir,pfpickleglob='',lclistpkl=None,cprenorm=False,nbrradiusarcsec=,maxnumneighbors=,makeneighborlcs=True,fast_mode=False,gaia_max_timeout=,gaia_mirror=None,xmatchinfo=None,xmatchradiusarcsec=,minobservations=,sigclip=,lcformat='',lcformatdir=None,timecols=None,magcols=None,errcols=None,skipdone=False,done_callback=None,done_callback_args=None,done_callback_kwargs=None,maxobjects=None,nworkers=):", "body": "pfpicklelist = sorted(glob.glob(os.path.join(pfpickledir, pfpickleglob)))LOGINFO('' %len(pfpicklelist))return parallel_cp(pfpicklelist,outdir,lcbasedir,fast_mode=fast_mode,lclistpkl=lclistpkl,nbrradiusarcsec=nbrradiusarcsec,gaia_max_timeout=gaia_max_timeout,gaia_mirror=gaia_mirror,maxnumneighbors=maxnumneighbors,makeneighborlcs=makeneighborlcs,xmatchinfo=xmatchinfo,xmatchradiusarcsec=xmatchradiusarcsec,sigclip=sigclip,minobservations=minobservations,cprenorm=cprenorm,maxobjects=maxobjects,lcformat=lcformat,lcformatdir=lcformatdir,timecols=timecols,magcols=magcols,errcols=errcols,skipdone=skipdone,nworkers=nworkers,done_callback=done_callback,done_callback_args=done_callback_args,done_callback_kwargs=done_callback_kwargs)", "docstring": "This drives the parallel execution of `runcp` for a directory of\n periodfinding pickles.\n\n Parameters\n ----------\n\n pfpickledir : str\n This is the directory containing all of the period-finding pickles to\n process.\n\n outdir : str\n The directory the checkplot pickles will be written to.\n\n lcbasedir : str\n The base directory that this function will look in to find the light\n curves pointed to by the period-finding result files. If you're using\n `lcfnamelist` to provide a list of light curve filenames directly, this\n arg is ignored.\n\n pkpickleglob : str\n This is a UNIX file glob to select period-finding result pickles in the\n specified `pfpickledir`.\n\n lclistpkl : str or dict\n This is either the filename of a pickle or the actual dict produced by\n lcproc.make_lclist. This is used to gather neighbor information.\n\n cprenorm : bool\n Set this to True if the light curves should be renormalized by\n `checkplot.checkplot_pickle`. This is set to False by default because we\n do our own normalization in this function using the light curve's\n registered normalization function and pass the normalized times, mags,\n errs to the `checkplot.checkplot_pickle` function.\n\n nbrradiusarcsec : float\n The radius in arcseconds to use for a search conducted around the\n coordinates of this object to look for any potential confusion and\n blending of variability amplitude caused by their proximity.\n\n maxnumneighbors : int\n The maximum number of neighbors that will have their light curves and\n magnitudes noted in this checkplot as potential blends with the target\n object.\n\n makeneighborlcs : bool\n If True, will make light curve and phased light curve plots for all\n neighbors found in the object collection for each input object.\n\n fast_mode : bool or float\n This runs the external catalog operations in a \"fast\" mode, with short\n timeouts and not trying to hit external catalogs that take a long time\n to respond.\n\n If this is set to True, the default settings for the external requests\n will then become::\n\n skyview_lookup = False\n skyview_timeout = 10.0\n skyview_retry_failed = False\n dust_timeout = 10.0\n gaia_submit_timeout = 7.0\n gaia_max_timeout = 10.0\n gaia_submit_tries = 2\n complete_query_later = False\n search_simbad = False\n\n If this is a float, will run in \"fast\" mode with the provided timeout\n value in seconds and the following settings::\n\n skyview_lookup = True\n skyview_timeout = fast_mode\n skyview_retry_failed = False\n dust_timeout = fast_mode\n gaia_submit_timeout = 0.66*fast_mode\n gaia_max_timeout = fast_mode\n gaia_submit_tries = 2\n complete_query_later = False\n search_simbad = False\n\n gaia_max_timeout : float\n Sets the timeout in seconds to use when waiting for the GAIA service to\n respond to our request for the object's information. Note that if\n `fast_mode` is set, this is ignored.\n\n gaia_mirror : str or None\n This sets the GAIA mirror to use. This is a key in the\n `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n mirror.\n\n xmatchinfo : str or dict\n This is either the xmatch dict produced by the function\n `load_xmatch_external_catalogs` above, or the path to the xmatch info\n pickle file produced by that function.\n\n xmatchradiusarcsec : float\n This is the cross-matching radius to use in arcseconds.\n\n minobservations : int\n The minimum of observations the input object's mag/flux time-series must\n have for this function to plot its light curve and phased light\n curve. If the object has less than this number, no light curves will be\n plotted, but the checkplotdict will still contain all of the other\n information.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in generating this checkplot.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in generating this checkplot.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in generating this checkplot.\n\n skipdone : bool\n This indicates if this function will skip creating checkplots that\n already exist corresponding to the current `objectid` and `magcol`. If\n `skipdone` is set to True, this will be done.\n\n done_callback : Python function or None\n This is used to provide a function to execute after the checkplot\n pickles are generated. This is useful if you want to stream the results\n of checkplot making to some other process, e.g. directly running an\n ingestion into an LCC-Server collection. The function will always get\n the list of the generated checkplot pickles as its first arg, and all of\n the kwargs for runcp in the kwargs dict. Additional args and kwargs can\n be provided by giving a list in the `done_callbacks_args` kwarg and a\n dict in the `done_callbacks_kwargs` kwarg.\n\n NOTE: the function you pass in here should be pickleable by normal\n Python if you want to use it with the parallel_cp and parallel_cp_lcdir\n functions below.\n\n done_callback_args : tuple or None\n If not None, contains any args to pass into the `done_callback`\n function.\n\n done_callback_kwargs : dict or None\n If not None, contains any kwargs to pass into the `done_callback`\n function.\n\n maxobjects : int\n The maximum number of objects to process in this run.\n\n nworkers : int\n The number of parallel workers that will work on the checkplot\n generation process.\n\n Returns\n -------\n\n dict\n This returns a dict with keys = input period-finding pickles and vals =\n list of the corresponding checkplot pickles produced.", "id": "f14700:m5"} {"signature": "def _read_pklc(lcfile):", "body": "if lcfile.endswith(''):try:with gzip.open(lcfile,'') as infd:lcdict = pickle.load(infd)except UnicodeDecodeError:with gzip.open(lcfile,'') as infd:lcdict = pickle.load(infd, encoding='')else:try:with open(lcfile,'') as infd:lcdict = pickle.load(infd)except UnicodeDecodeError:with open(lcfile,'') as infd:lcdict = pickle.load(infd, encoding='')return lcdict", "docstring": "This just reads a light curve pickle file.\n\nParameters\n----------\n\nlcfile : str\n The file name of the pickle to open.\n\nReturns\n-------\n\ndict\n This returns an lcdict.", "id": "f14701:m1"} {"signature": "def _check_extmodule(module, formatkey):", "body": "try:if os.path.exists(module):sys.path.append(os.path.dirname(module))importedok = importlib.import_module(os.path.basename(module.replace('','')))else:importedok = importlib.import_module(module)except Exception as e:LOGEXCEPTION(''''% (module, formatkey))importedok = Falsereturn importedok", "docstring": "This imports the module specified.\n\n Used to dynamically import Python modules that are needed to support LC\n formats not natively supported by astrobase.\n\n Parameters\n ----------\n\n module : str\n This is either:\n\n - a Python module import path, e.g. 'astrobase.lcproc.catalogs' or\n - a path to a Python file, e.g. '/astrobase/hatsurveys/hatlc.py'\n\n that contains the Python module that contains functions used to open\n (and optionally normalize) a custom LC format that's not natively\n supported by astrobase.\n\n formatkey : str\n A str used as the unique ID of this LC format for all lcproc functions\n and can be used to look it up later and import the correct functions\n needed to support it for lcproc operations. For example, we use\n 'kep-fits' as a the specifier for Kepler FITS light curves, which can be\n read by the `astrobase.astrokep.read_kepler_fitslc` function as\n specified by the `/data/lcformats/kep-fits.json`\n LC format specification JSON.\n\n Returns\n -------\n\n Python module\n This returns a Python module if it's able to successfully import it.", "id": "f14701:m2"} {"signature": "def register_lcformat(formatkey,fileglob,timecols,magcols,errcols,readerfunc_module,readerfunc,readerfunc_kwargs=None,normfunc_module=None,normfunc=None,normfunc_kwargs=None,magsarefluxes=False,overwrite_existing=False,lcformat_dir=''):", "body": "LOGINFO('' % formatkey)lcformat_dpath = os.path.abspath(os.path.expanduser(lcformat_dir))if not os.path.exists(lcformat_dpath):os.makedirs(lcformat_dpath)lcformat_jsonpath = os.path.join(lcformat_dpath,'' % formatkey)if os.path.exists(lcformat_jsonpath) and not overwrite_existing:LOGERROR(''''''% (lcformat_jsonpath, formatkey))return Nonereadermodule = _check_extmodule(readerfunc_module, formatkey)if not readermodule:LOGERROR(\"\"\"\" %(readerfunc_module, formatkey))return Nonetry:getattr(readermodule, readerfunc)readerfunc_in = readerfuncexcept AttributeError:LOGEXCEPTION(''''''% (formatkey, readerfunc_module, readerfunc))raiseif normfunc_module:normmodule = _check_extmodule(normfunc_module, formatkey)if not normmodule:LOGERROR(\"\"\"\" %(normfunc_module, formatkey))return Noneelse:normmodule = Noneif normfunc_module and normfunc:try:getattr(normmodule, normfunc)normfunc_in = normfuncexcept AttributeError:LOGEXCEPTION(''''''% (normfunc, formatkey, normfunc_module))raiseelse:normfunc_in = Noneformatdict = {'':fileglob,'':timecols,'':magcols,'':errcols,'':magsarefluxes,'':readerfunc_module,'':readerfunc_in,'':readerfunc_kwargs,'':normfunc_module,'':normfunc_in,'':normfunc_kwargs}with open(lcformat_jsonpath,'') as outfd:json.dump(formatdict, outfd, indent=)return lcformat_jsonpath", "docstring": "This adds a new LC format to the astrobase LC format registry.\n\n Allows handling of custom format light curves for astrobase lcproc\n drivers. Once the format is successfully registered, light curves should\n work transparently with all of the functions in this module, by simply\n calling them with the `formatkey` in the `lcformat` keyword argument.\n\n LC format specifications are generated as JSON files. astrobase comes with\n several of these in `/data/lcformats`. LC formats\n you add by using this function will have their specifiers written to the\n `~/.astrobase/lcformat-jsons` directory in your home directory.\n\n Parameters\n ----------\n\n formatkey : str\n A str used as the unique ID of this LC format for all lcproc functions\n and can be used to look it up later and import the correct functions\n needed to support it for lcproc operations. For example, we use\n 'kep-fits' as a the specifier for Kepler FITS light curves, which can be\n read by the `astrobase.astrokep.read_kepler_fitslc` function as\n specified by the `/data/lcformats/kep-fits.json`\n LC format specification JSON produced by `register_lcformat`.\n\n fileglob : str\n The default UNIX fileglob to use to search for light curve files in this\n LC format. This is a string like '*-whatever-???-*.*??-.lc'.\n\n timecols,magcols,errcols : list of str\n These are all lists of strings indicating which keys in the lcdict\n produced by your `lcreader_func` that will be extracted and used by\n lcproc functions for processing. The lists must all have the same\n dimensions, e.g. if timecols = ['timecol1','timecol2'], then magcols\n must be something like ['magcol1','magcol2'] and errcols must be\n something like ['errcol1', 'errcol2']. This allows you to process\n multiple apertures or multiple types of measurements in one go.\n\n Each element in these lists can be a simple key, e.g. 'time' (which\n would correspond to lcdict['time']), or a composite key,\n e.g. 'aperture1.times.rjd' (which would correspond to\n lcdict['aperture1']['times']['rjd']). See the examples in the lcformat\n specification JSON files in `/data/lcformats`.\n\n readerfunc_module : str\n This is either:\n\n - a Python module import path, e.g. 'astrobase.lcproc.catalogs' or\n - a path to a Python file, e.g. '/astrobase/hatsurveys/hatlc.py'\n\n that contains the Python module that contains functions used to open\n (and optionally normalize) a custom LC format that's not natively\n supported by astrobase.\n\n readerfunc : str\n This is the function name in `readerfunc_module` to use to read light\n curves in the custom format. This MUST always return a dictionary (the\n 'lcdict') with the following signature (the keys listed below are\n required, but others are allowed)::\n\n {'objectid': this object's identifier as a string,\n 'objectinfo':{'ra': this object's right ascension in decimal deg,\n 'decl': this object's declination in decimal deg,\n 'ndet': the number of observations in this LC,\n 'objectid': the object ID again for legacy reasons},\n ...other time columns, mag columns go in as their own keys}\n\n normfunc_kwargs : dict or None\n This is a dictionary containing any kwargs to pass through to\n the light curve norm function.\n\n normfunc_module : str or None\n This is either:\n\n - a Python module import path, e.g. 'astrobase.lcproc.catalogs' or\n - a path to a Python file, e.g. '/astrobase/hatsurveys/hatlc.py'\n - None, in which case we'll use default normalization\n\n that contains the Python module that contains functions used to\n normalize a custom LC format that's not natively supported by astrobase.\n\n normfunc : str or None\n This is the function name in `normfunc_module` to use to normalize light\n curves in the custom format. If None, the default normalization method\n used by lcproc is to find gaps in the time-series, normalize\n measurements grouped by these gaps to zero, then normalize the entire\n magnitude time series to global time series median using the\n `astrobase.lcmath.normalize_magseries` function.\n\n If this is provided, the normalization function should take and return\n an lcdict of the same form as that produced by `readerfunc` above. For\n an example of a specific normalization function, see\n `normalize_lcdict_by_inst` in the `astrobase.hatsurveys.hatlc` module.\n\n normfunc_kwargs : dict or None\n This is a dictionary containing any kwargs to pass through to\n the light curve normalization function.\n\n magsarefluxes : bool\n If this is True, then all lcproc functions will treat the measurement\n columns in the lcdict produced by your `readerfunc` as flux instead of\n mags, so things like default normalization and sigma-clipping will be\n done correctly. If this is False, magnitudes will be treated as\n magnitudes.\n\n overwrite_existing : bool\n If this is True, this function will overwrite any existing LC format\n specification JSON with the same name as that provided in the\n `formatkey` arg. This can be used to update LC format specifications\n while keeping the `formatkey` the same.\n\n lcformat_dir : str\n This specifies the directory where the the LC format specification JSON\n produced by this function will be written. By default, this goes to the\n `.astrobase/lcformat-jsons` directory in your home directory.\n\n Returns\n -------\n\n str\n Returns the file path to the generated LC format specification JSON\n file.", "id": "f14701:m3"} {"signature": "def get_lcformat(formatkey, use_lcformat_dir=None):", "body": "if isinstance(use_lcformat_dir, str):lcformat_jsonpath = os.path.join(use_lcformat_dir,'' % formatkey)if not os.path.exists(lcformat_jsonpath):lcformat_jsonpath = os.path.join(os.path.expanduser(''),'' % formatkey)if not os.path.exists(lcformat_jsonpath):install_path = os.path.dirname(__file__)install_path = os.path.abspath(os.path.join(install_path, '', '',''))lcformat_jsonpath = os.path.join(install_path,'' % formatkey)if not os.path.exists(lcformat_jsonpath):LOGERROR(''''''''% formatkey)return Noneelse:lcformat_jsonpath = os.path.join(os.path.expanduser(''),'' % formatkey)if not os.path.exists(lcformat_jsonpath):install_path = os.path.dirname(__file__)install_path = os.path.abspath(os.path.join(install_path, '', '',''))lcformat_jsonpath = os.path.join(install_path,'' % formatkey)if not os.path.exists(lcformat_jsonpath):LOGERROR(''''''''% formatkey)return Nonewith open(lcformat_jsonpath) as infd:lcformatdict = json.load(infd)readerfunc_module = lcformatdict['']readerfunc = lcformatdict['']readerfunc_kwargs = lcformatdict['']normfunc_module = lcformatdict['']normfunc = lcformatdict['']normfunc_kwargs = lcformatdict['']fileglob = lcformatdict['']timecols = lcformatdict['']magcols = lcformatdict['']errcols = lcformatdict['']magsarefluxes = lcformatdict['']readermodule = _check_extmodule(readerfunc_module, formatkey)if not readermodule:LOGERROR(\"\"\"\" %(readerfunc_module, formatkey))return Nonetry:readerfunc_in = getattr(readermodule, readerfunc)except AttributeError:LOGEXCEPTION(''''''% (formatkey, readerfunc_module, readerfunc))raiseif normfunc_module:normmodule = _check_extmodule(normfunc_module, formatkey)if not normmodule:LOGERROR(\"\"\"\" %(normfunc_module, formatkey))return Noneelse:normmodule = Noneif normfunc_module and normfunc:try:normfunc_in = getattr(normmodule, normfunc)except AttributeError:LOGEXCEPTION(''''''% (formatkey, normfunc_module, normfunc))raiseelse:normfunc_in = Noneif isinstance(readerfunc_kwargs, dict):readerfunc_in = partial(readerfunc_in, **readerfunc_kwargs)if normfunc_in is not None:if isinstance(normfunc_kwargs, dict):normfunc_in = partial(normfunc_in, **normfunc_kwargs)returntuple = (fileglob,readerfunc_in,timecols,magcols,errcols,magsarefluxes,normfunc_in,)return returntuple", "docstring": "This loads an LC format description from a previously-saved JSON file.\n\n Parameters\n ----------\n\n formatkey : str\n The key used to refer to the LC format. This is part of the JSON file's\n name, e.g. the format key 'hat-csv' maps to the format JSON file:\n '/data/lcformats/hat-csv.json'.\n\n use_lcformat_dir : str or None\n If provided, must be the path to a directory that contains the\n corresponding lcformat JSON file for `formatkey`. If this is None, this\n function will look for lcformat JSON files corresponding to the given\n `formatkey`:\n\n - first, in the directory specified in this kwarg,\n - if not found there, in the home directory: ~/.astrobase/lcformat-jsons\n - if not found there, in: /data/lcformats\n\n Returns\n -------\n\n tuple\n A tuple of the following form is returned::\n\n (fileglob : the file glob of the associated LC files,\n readerfunc_in : the imported Python function for reading LCs,\n timecols : list of time col keys to get from the lcdict,\n magcols : list of mag col keys to get from the lcdict ,\n errcols : list of err col keys to get from the lcdict,\n magsarefluxes : True if the measurements are fluxes not mags,\n normfunc_in : the imported Python function for normalizing LCs)\n\n All `astrobase.lcproc` functions can then use this tuple to dynamically\n import your LC reader and normalization functions to work with your LC\n format transparently.", "id": "f14701:m4"} {"signature": "def runpf(lcfile,outdir,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,pfmethods=('','','',''),pfkwargs=({},{},{},{}),sigclip=,getblssnr=False,nworkers=NCPUS,minobservations=,excludeprocessed=False,raiseonfail=False):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif timecols is None:timecols = dtimecolsif magcols is None:magcols = dmagcolsif errcols is None:errcols = derrcolstry:lcdict = readerfunc(lcfile)if ( (isinstance(lcdict, (list, tuple))) and(isinstance(lcdict[], dict)) ):lcdict = lcdict[]outfile = os.path.join(outdir, '' %squeeze(lcdict['']).replace('', ''))if excludeprocessed:test_outfile = os.path.exists(outfile)test_outfile_gz = os.path.exists(outfile+'')if (test_outfile and os.stat(outfile).st_size > ):LOGWARNING(''''% (lcfile, outfile))return outfileelif (test_outfile_gz and os.stat(outfile+'').st_size > ):LOGWARNING(''''% (lcfile, outfile+''))return outfile+''resultdict = {'':lcdict[''],'':os.path.basename(lcfile),'':{'':timecols,'':magcols,'':errcols,'':lcformat,'':lcformatdir,'':pfmethods,'':pfkwargs,'':sigclip,'':getblssnr}}if normfunc is not None:lcdict = normfunc(lcdict)for tcol, mcol, ecol in zip(timecols, magcols, errcols):if '' in tcol:tcolget = tcol.split('')else:tcolget = [tcol]times = _dict_get(lcdict, tcolget)if '' in mcol:mcolget = mcol.split('')else:mcolget = [mcol]mags = _dict_get(lcdict, mcolget)if '' in ecol:ecolget = ecol.split('')else:ecolget = [ecol]errs = _dict_get(lcdict, ecolget)if normfunc is None:ntimes, nmags = normalize_magseries(times, mags,magsarefluxes=magsarefluxes)times, mags, errs = ntimes, nmags, errsresultdict[mcol] = {}finmags = mags[np.isfinite(mags)]if finmags.size < minobservations:LOGERROR('''''' %(finmags.size, minobservations, mcol))continuepfmkeys = []for pfmind, pfm, pfkw in zip(range(len(pfmethods)),pfmethods,pfkwargs):pf_func = PFMETHODS[pfm]pf_kwargs = pfkwpf_kwargs.update({'':False,'':nworkers,'':magsarefluxes,'':sigclip})pfmkey = '' % (pfmind, pfm)pfmkeys.append(pfmkey)resultdict[mcol][pfmkey] = pf_func(times, mags, errs,**pf_kwargs)resultdict[mcol][''] = pfmkeysif '' in pfmethods and getblssnr:for pfmk in resultdict[mcol]['']:if '' in pfmk:try:bls = resultdict[mcol][pfmk]blssnr = bls_snr(bls, times, mags, errs,magsarefluxes=magsarefluxes,verbose=False)resultdict[mcol][pfmk].update({'':blssnr[''],'':blssnr[''],'':blssnr[''],})resultdict[mcol][pfmk].update({'':blssnr[''],'':blssnr['']})except Exception as e:LOGEXCEPTION('' %lcfile)resultdict[mcol][pfmk].update({'':[np.nan,np.nan,np.nan,np.nan,np.nan],'':[np.nan,np.nan,np.nan,np.nan,np.nan],'':[np.nan,np.nan,np.nan,np.nan,np.nan],})elif '' in pfmethods:for pfmk in resultdict[mcol]['']:if '' in pfmk:resultdict[mcol][pfmk].update({'':[np.nan,np.nan,np.nan,np.nan,np.nan],'':[np.nan,np.nan,np.nan,np.nan,np.nan],'':[np.nan,np.nan,np.nan,np.nan,np.nan],})with open(outfile, '') as outfd:pickle.dump(resultdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)return outfileexcept Exception as e:LOGEXCEPTION('' % (lcfile, e))if raiseonfail:raisereturn None", "docstring": "This runs the period-finding for a single LC.\n\n Parameters\n ----------\n\n lcfile : str\n The light curve file to run period-finding on.\n\n outdir : str\n The output directory where the result pickle will go.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n pfmethods : list of str\n This is a list of period finding methods to run. Each element is a\n string matching the keys of the `PFMETHODS` dict above. By default, this\n runs GLS, PDM, AoVMH, and the spectral window Lomb-Scargle periodogram.\n\n pfkwargs : list of dicts\n This is used to provide any special kwargs as dicts to each\n period-finding method function specified in `pfmethods`.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n getblssnr : bool\n If this is True and BLS is one of the methods specified in `pfmethods`,\n will also calculate the stats for each best period in the BLS results:\n transit depth, duration, ingress duration, refit period and epoch, and\n the SNR of the transit.\n\n nworkers : int\n The number of parallel period-finding workers to launch.\n\n minobservations : int\n The minimum number of finite LC points required to process a light\n curve.\n\n excludeprocessed : bool\n If this is True, light curves that have existing period-finding result\n pickles in `outdir` will not be processed.\n\n FIXME: currently, this uses a dumb method of excluding already-processed\n files. A smarter way to do this is to (i) generate a SHA512 cachekey\n based on a repr of `{'lcfile', 'timecols', 'magcols', 'errcols',\n 'lcformat', 'pfmethods', 'sigclip', 'getblssnr', 'pfkwargs'}`, (ii) make\n sure all list kwargs in the dict are sorted, (iii) check if the output\n file has the same cachekey in its filename (last 8 chars of cachekey\n should work), so the result was processed in exactly the same way as\n specifed in the input to this function, and can therefore be\n ignored. Will implement this later.\n\n raiseonfail : bool\n If something fails and this is True, will raise an Exception instead of\n returning None at the end.\n\n Returns\n -------\n\n str\n The path to the output period-finding result pickle.", "id": "f14702:m1"} {"signature": "def _runpf_worker(task):", "body": "(lcfile, outdir, timecols, magcols, errcols, lcformat, lcformatdir,pfmethods, pfkwargs, getblssnr, sigclip, nworkers, minobservations,excludeprocessed) = taskif os.path.exists(lcfile):pfresult = runpf(lcfile,outdir,timecols=timecols,magcols=magcols,errcols=errcols,lcformat=lcformat,lcformatdir=lcformatdir,pfmethods=pfmethods,pfkwargs=pfkwargs,getblssnr=getblssnr,sigclip=sigclip,nworkers=nworkers,minobservations=minobservations,excludeprocessed=excludeprocessed)return pfresultelse:LOGERROR('' % lcfile)return None", "docstring": "This runs the runpf function.", "id": "f14702:m2"} {"signature": "def parallel_pf(lclist,outdir,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,pfmethods=('','','',''),pfkwargs=({},{},{},{}),sigclip=,getblssnr=False,nperiodworkers=NCPUS,ncontrolworkers=,liststartindex=None,listmaxobjects=None,minobservations=,excludeprocessed=True):", "body": "if not os.path.exists(outdir):os.makedirs(outdir)if (liststartindex is not None) and (listmaxobjects is None):lclist = lclist[liststartindex:]elif (liststartindex is None) and (listmaxobjects is not None):lclist = lclist[:listmaxobjects]elif (liststartindex is not None) and (listmaxobjects is not None):lclist = lclist[liststartindex:liststartindex+listmaxobjects]tasklist = [(x, outdir, timecols, magcols, errcols, lcformat, lcformatdir,pfmethods, pfkwargs, getblssnr, sigclip, nperiodworkers,minobservations,excludeprocessed)for x in lclist]with ProcessPoolExecutor(max_workers=ncontrolworkers) as executor:resultfutures = executor.map(_runpf_worker, tasklist)results = [x for x in resultfutures]return results", "docstring": "This drives the overall parallel period processing for a list of LCs.\n\n As a rough benchmark, 25000 HATNet light curves with up to 50000 points per\n LC take about 26 days in total for an invocation of this function using\n GLS+PDM+BLS, 10 periodworkers, and 4 controlworkers (so all 40 'cores') on a\n 2 x Xeon E5-2660v3 machine.\n\n Parameters\n ----------\n\n lclist : list of str\n The list of light curve file to process.\n\n outdir : str\n The output directory where the period-finding result pickles will go.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n pfmethods : list of str\n This is a list of period finding methods to run. Each element is a\n string matching the keys of the `PFMETHODS` dict above. By default, this\n runs GLS, PDM, AoVMH, and the spectral window Lomb-Scargle periodogram.\n\n pfkwargs : list of dicts\n This is used to provide any special kwargs as dicts to each\n period-finding method function specified in `pfmethods`.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n getblssnr : bool\n If this is True and BLS is one of the methods specified in `pfmethods`,\n will also calculate the stats for each best period in the BLS results:\n transit depth, duration, ingress duration, refit period and epoch, and\n the SNR of the transit.\n\n nperiodworkers : int\n The number of parallel period-finding workers to launch per object task.\n\n ncontrolworkers : int\n The number of controlling processes to launch. This effectively sets how\n many objects from `lclist` will be processed in parallel.\n\n liststartindex : int or None\n This sets the index from where to start in `lclist`.\n\n listmaxobjects : int or None\n This sets the maximum number of objects in `lclist` to run\n period-finding for in this invocation. Together with `liststartindex`,\n `listmaxobjects` can be used to distribute processing over several\n independent machines if the number of light curves is very large.\n\n minobservations : int\n The minimum number of finite LC points required to process a light\n curve.\n\n excludeprocessed : bool\n If this is True, light curves that have existing period-finding result\n pickles in `outdir` will not be processed.\n\n FIXME: currently, this uses a dumb method of excluding already-processed\n files. A smarter way to do this is to (i) generate a SHA512 cachekey\n based on a repr of `{'lcfile', 'timecols', 'magcols', 'errcols',\n 'lcformat', 'pfmethods', 'sigclip', 'getblssnr', 'pfkwargs'}`, (ii) make\n sure all list kwargs in the dict are sorted, (iii) check if the output\n file has the same cachekey in its filename (last 8 chars of cachekey\n should work), so the result was processed in exactly the same way as\n specifed in the input to this function, and can therefore be\n ignored. Will implement this later.\n\n Returns\n -------\n\n list of str\n A list of the period-finding pickles created for all of input LCs\n processed.", "id": "f14702:m3"} {"signature": "def parallel_pf_lcdir(lcdir,outdir,fileglob=None,recursive=True,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,pfmethods=('','','',''),pfkwargs=({},{},{},{}),sigclip=,getblssnr=False,nperiodworkers=NCPUS,ncontrolworkers=,liststartindex=None,listmaxobjects=None,minobservations=,excludeprocessed=True):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif not fileglob:fileglob = dfileglobLOGINFO('' % (lcformat, lcdir))if recursive is False:matching = glob.glob(os.path.join(lcdir, fileglob))else:if sys.version_info[:] > (,):matching = glob.glob(os.path.join(lcdir,'',fileglob),recursive=True)else:walker = os.walk(lcdir)matching = []for root, dirs, _files in walker:for sdir in dirs:searchpath = os.path.join(root,sdir,fileglob)foundfiles = glob.glob(searchpath)if foundfiles:matching.extend(foundfiles)if matching and len(matching) > :matching = sorted(matching)LOGINFO('' % len(matching))return parallel_pf(matching,outdir,timecols=timecols,magcols=magcols,errcols=errcols,lcformat=lcformat,lcformatdir=lcformatdir,pfmethods=pfmethods,pfkwargs=pfkwargs,getblssnr=getblssnr,sigclip=sigclip,nperiodworkers=nperiodworkers,ncontrolworkers=ncontrolworkers,liststartindex=liststartindex,listmaxobjects=listmaxobjects,minobservations=minobservations,excludeprocessed=excludeprocessed)else:LOGERROR('' % (lcformat,lcdir))return None", "docstring": "This runs parallel light curve period finding for directory of LCs.\n\n Parameters\n ----------\n\n lcdir : str\n The directory containing the LCs to process.\n\n outdir : str\n The directory where the resulting period-finding pickles will go.\n\n fileglob : str or None\n The UNIX file glob to use to search for LCs in `lcdir`. If None, the\n default file glob associated with the registered LC format will be used\n instead.\n\n recursive : bool\n If True, will search recursively in `lcdir` for light curves to process.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n pfmethods : list of str\n This is a list of period finding methods to run. Each element is a\n string matching the keys of the `PFMETHODS` dict above. By default, this\n runs GLS, PDM, AoVMH, and the spectral window Lomb-Scargle periodogram.\n\n pfkwargs : list of dicts\n This is used to provide any special kwargs as dicts to each\n period-finding method function specified in `pfmethods`.\n\n sigclip : float or int or sequence of two floats/ints or None\n If a single float or int, a symmetric sigma-clip will be performed using\n the number provided as the sigma-multiplier to cut out from the input\n time-series.\n\n If a list of two ints/floats is provided, the function will perform an\n 'asymmetric' sigma-clip. The first element in this list is the sigma\n value to use for fainter flux/mag values; the second element in this\n list is the sigma value to use for brighter flux/mag values. For\n example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n dimmings and greater than 3-sigma brightenings. Here the meaning of\n \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n If `sigclip` is None, no sigma-clipping will be performed, and the\n time-series (with non-finite elems removed) will be passed through to\n the output.\n\n getblssnr : bool\n If this is True and BLS is one of the methods specified in `pfmethods`,\n will also calculate the stats for each best period in the BLS results:\n transit depth, duration, ingress duration, refit period and epoch, and\n the SNR of the transit.\n\n nperiodworkers : int\n The number of parallel period-finding workers to launch per object task.\n\n ncontrolworkers : int\n The number of controlling processes to launch. This effectively sets how\n many objects from `lclist` will be processed in parallel.\n\n liststartindex : int or None\n This sets the index from where to start in `lclist`.\n\n listmaxobjects : int or None\n This sets the maximum number of objects in `lclist` to run\n period-finding for in this invocation. Together with `liststartindex`,\n `listmaxobjects` can be used to distribute processing over several\n independent machines if the number of light curves is very large.\n\n minobservations : int\n The minimum number of finite LC points required to process a light\n curve.\n\n excludeprocessed : bool\n If this is True, light curves that have existing period-finding result\n pickles in `outdir` will not be processed.\n\n FIXME: currently, this uses a dumb method of excluding already-processed\n files. A smarter way to do this is to (i) generate a SHA512 cachekey\n based on a repr of `{'lcfile', 'timecols', 'magcols', 'errcols',\n 'lcformat', 'pfmethods', 'sigclip', 'getblssnr', 'pfkwargs'}`, (ii) make\n sure all list kwargs in the dict are sorted, (iii) check if the output\n file has the same cachekey in its filename (last 8 chars of cachekey\n should work), so the result was processed in exactly the same way as\n specifed in the input to this function, and can therefore be\n ignored. Will implement this later.\n\n Returns\n -------\n\n list of str\n A list of the period-finding pickles created for all of input LCs\n processed.", "id": "f14702:m4"} {"signature": "def xmatch_cplist_external_catalogs(cplist,xmatchpkl,xmatchradiusarcsec=,updateexisting=True,resultstodir=None):", "body": "with open(xmatchpkl,'') as infd:xmd = pickle.load(infd)status_dict = {}for cpf in cplist:cpd = _read_checkplot_picklefile(cpf)try:xmatch_external_catalogs(cpd, xmd,xmatchradiusarcsec=xmatchradiusarcsec,updatexmatch=updateexisting)for xmi in cpd['']:if cpd[''][xmi]['']:LOGINFO('''' %(os.path.basename(cpf),cpd[''],cpd[''][xmi][''],cpd[''][xmi]['']))if not resultstodir:outcpf = _write_checkplot_picklefile(cpd,outfile=cpf)else:xcpf = os.path.join(resultstodir, os.path.basename(cpf))outcpf = _write_checkplot_picklefile(cpd,outfile=xcpf)status_dict[cpf] = outcpfexcept Exception as e:LOGEXCEPTION('' % cpf)status_dict[cpf] = Nonereturn status_dict", "docstring": "This xmatches external catalogs to a collection of checkplots.\n\n Parameters\n ----------\n\n cplist : list of str\n This is the list of checkplot pickle files to process.\n\n xmatchpkl : str\n The filename of a pickle prepared beforehand with the\n `checkplot.pkl_xmatch.load_xmatch_external_catalogs` function,\n containing collected external catalogs to cross-match the objects in the\n input `cplist` against.\n\n xmatchradiusarcsec : float\n The match radius to use for the cross-match in arcseconds.\n\n updateexisting : bool\n If this is True, will only update the `xmatch` dict in each checkplot\n pickle with any new cross-matches to the external catalogs. If False,\n will overwrite the `xmatch` dict with results from the current run.\n\n resultstodir : str or None\n If this is provided, then it must be a directory to write the resulting\n checkplots to after xmatch is done. This can be used to keep the\n original checkplots in pristine condition for some reason.\n\n Returns\n -------\n\n dict\n Returns a dict with keys = input checkplot pickle filenames and vals =\n xmatch status dict for each checkplot pickle.", "id": "f14703:m1"} {"signature": "def xmatch_cpdir_external_catalogs(cpdir,xmatchpkl,cpfileglob='',xmatchradiusarcsec=,updateexisting=True,resultstodir=None):", "body": "cplist = glob.glob(os.path.join(cpdir, cpfileglob))return xmatch_cplist_external_catalogs(cplist,xmatchpkl,xmatchradiusarcsec=xmatchradiusarcsec,updateexisting=updateexisting,resultstodir=resultstodir)", "docstring": "This xmatches external catalogs to all checkplots in a directory.\n\n Parameters\n -----------\n\n cpdir : str\n This is the directory to search in for checkplots.\n\n xmatchpkl : str\n The filename of a pickle prepared beforehand with the\n `checkplot.pkl_xmatch.load_xmatch_external_catalogs` function,\n containing collected external catalogs to cross-match the objects in the\n input `cplist` against.\n\n cpfileglob : str\n This is the UNIX fileglob to use in searching for checkplots.\n\n xmatchradiusarcsec : float\n The match radius to use for the cross-match in arcseconds.\n\n updateexisting : bool\n If this is True, will only update the `xmatch` dict in each checkplot\n pickle with any new cross-matches to the external catalogs. If False,\n will overwrite the `xmatch` dict with results from the current run.\n\n resultstodir : str or None\n If this is provided, then it must be a directory to write the resulting\n checkplots to after xmatch is done. This can be used to keep the\n original checkplots in pristine condition for some reason.\n\n Returns\n -------\n\n dict\n Returns a dict with keys = input checkplot pickle filenames and vals =\n xmatch status dict for each checkplot pickle.", "id": "f14703:m2"} {"signature": "def colormagdiagram_cplist(cplist,outpkl,color_mag1=['',''],color_mag2=['',''],yaxis_mag=['','']):", "body": "cplist_objectids = []cplist_mags = []cplist_colors = []for cpf in cplist:cpd = _read_checkplot_picklefile(cpf)cplist_objectids.append(cpd[''])thiscp_mags = []thiscp_colors = []for cm1, cm2, ym in zip(color_mag1, color_mag2, yaxis_mag):if (ym in cpd[''] andcpd[''][ym] is not None):thiscp_mags.append(cpd[''][ym])else:thiscp_mags.append(np.nan)if (cm1 in cpd[''] andcpd[''][cm1] is not None andcm2 in cpd[''] andcpd[''][cm2] is not None):thiscp_colors.append(cpd[''][cm1] -cpd[''][cm2])else:thiscp_colors.append(np.nan)cplist_mags.append(thiscp_mags)cplist_colors.append(thiscp_colors)cplist_objectids = np.array(cplist_objectids)cplist_mags = np.array(cplist_mags)cplist_colors = np.array(cplist_colors)cmddict = {'':cplist_objectids,'':cplist_mags,'':cplist_colors,'':color_mag1,'':color_mag2,'':yaxis_mag}with open(outpkl,'') as outfd:pickle.dump(cmddict, outfd, pickle.HIGHEST_PROTOCOL)plt.close('')return cmddict", "docstring": "This makes color-mag diagrams for all checkplot pickles in the provided\n list.\n\n Can make an arbitrary number of CMDs given lists of x-axis colors and y-axis\n mags to use.\n\n Parameters\n ----------\n\n cplist : list of str\n This is the list of checkplot pickles to process.\n\n outpkl : str\n The filename of the output pickle that will contain the color-mag\n information for all objects in the checkplots specified in `cplist`.\n\n color_mag1 : list of str\n This a list of the keys in each checkplot's `objectinfo` dict that will\n be used as color_1 in the equation::\n\n x-axis color = color_mag1 - color_mag2\n\n color_mag2 : list of str\n This a list of the keys in each checkplot's `objectinfo` dict that will\n be used as color_2 in the equation::\n\n x-axis color = color_mag1 - color_mag2\n\n yaxis_mag : list of str\n This is a list of the keys in each checkplot's `objectinfo` dict that\n will be used as the (absolute) magnitude y-axis of the color-mag\n diagrams.\n\n Returns\n -------\n\n str\n The path to the generated CMD pickle file for the collection of objects\n in the input checkplot list.\n\n Notes\n -----\n\n This can make many CMDs in one go. For example, the default kwargs for\n `color_mag`, `color_mag2`, and `yaxis_mag` result in two CMDs generated and\n written to the output pickle file:\n\n - CMD1 -> gaiamag - kmag on the x-axis vs gaia_absmag on the y-axis\n - CMD2 -> sdssg - kmag on the x-axis vs rpmj (J reduced PM) on the y-axis", "id": "f14703:m3"} {"signature": "def colormagdiagram_cpdir(cpdir,outpkl,cpfileglob='',color_mag1=['',''],color_mag2=['',''],yaxis_mag=['','']):", "body": "cplist = glob.glob(os.path.join(cpdir, cpfileglob))return colormagdiagram_cplist(cplist,outpkl,color_mag1=color_mag1,color_mag2=color_mag2,yaxis_mag=yaxis_mag)", "docstring": "This makes CMDs for all checkplot pickles in the provided directory.\n\n Can make an arbitrary number of CMDs given lists of x-axis colors and y-axis\n mags to use.\n\n Parameters\n ----------\n\n cpdir : list of str\n This is the directory to get the list of input checkplot pickles from.\n\n outpkl : str\n The filename of the output pickle that will contain the color-mag\n information for all objects in the checkplots specified in `cplist`.\n\n cpfileglob : str\n The UNIX fileglob to use to search for checkplot pickle files.\n\n color_mag1 : list of str\n This a list of the keys in each checkplot's `objectinfo` dict that will\n be used as color_1 in the equation::\n\n x-axis color = color_mag1 - color_mag2\n\n color_mag2 : list of str\n This a list of the keys in each checkplot's `objectinfo` dict that will\n be used as color_2 in the equation::\n\n x-axis color = color_mag1 - color_mag2\n\n yaxis_mag : list of str\n This is a list of the keys in each checkplot's `objectinfo` dict that\n will be used as the (absolute) magnitude y-axis of the color-mag\n diagrams.\n\n Returns\n -------\n\n str\n The path to the generated CMD pickle file for the collection of objects\n in the input checkplot directory.\n\n Notes\n -----\n\n This can make many CMDs in one go. For example, the default kwargs for\n `color_mag`, `color_mag2`, and `yaxis_mag` result in two CMDs generated and\n written to the output pickle file:\n\n - CMD1 -> gaiamag - kmag on the x-axis vs gaia_absmag on the y-axis\n - CMD2 -> sdssg - kmag on the x-axis vs rpmj (J reduced PM) on the y-axis", "id": "f14703:m4"} {"signature": "def add_cmd_to_checkplot(cpx,cmdpkl,require_cmd_magcolor=True,save_cmd_pngs=False):", "body": "if isinstance(cpx, str) and os.path.exists(cpx):cpdict = _read_checkplot_picklefile(cpx)elif isinstance(cpx, dict):cpdict = cpxelse:LOGERROR('')return Noneif isinstance(cmdpkl, str) and os.path.exists(cmdpkl):with open(cmdpkl, '') as infd:cmd = pickle.load(infd)elif isinstance(cmdpkl, dict):cmd = cmdpklcpdict[''] = {}cplist_mags = cmd['']cplist_colors = cmd['']for c1, c2, ym, ind in zip(cmd[''],cmd[''],cmd[''],range(len(cmd['']))):if (c1 in cpdict[''] andcpdict[''][c1] is not None):c1mag = cpdict[''][c1]else:c1mag = np.nanif (c2 in cpdict[''] andcpdict[''][c2] is not None):c2mag = cpdict[''][c2]else:c2mag = np.nanif (ym in cpdict[''] andcpdict[''][ym] is not None):ymmag = cpdict[''][ym]else:ymmag = np.nanif (require_cmd_magcolor andnot (np.isfinite(c1mag) andnp.isfinite(c2mag) andnp.isfinite(ymmag))):LOGWARNING(\"\"\"\"\"\" %(c1, c2, ym, cpdict['']))continuetry:thiscmd_title = r'' % (CMD_LABELS[c1],CMD_LABELS[c2],CMD_LABELS[ym])fig = plt.figure(figsize=(,))plt.plot(cplist_colors[:,ind],cplist_mags[:,ind],rasterized=True,marker='',linestyle='',mew=,ms=)plt.plot([c1mag - c2mag], [ymmag],ms=,color='',marker='',mew=)plt.xlabel(r'' % (CMD_LABELS[c1], CMD_LABELS[c2]))plt.ylabel(r'' % CMD_LABELS[ym])plt.title('' % (cpdict[''], thiscmd_title))plt.gca().invert_yaxis()cmdpng = StrIO()plt.savefig(cmdpng, bbox_inches='',pad_inches=, format='')cmdpng.seek()cmdb64 = base64.b64encode(cmdpng.read())cmdpng.close()plt.close('')plt.gcf().clear()cpdict['']['' % (c1,c2,ym)] = cmdb64if save_cmd_pngs:if isinstance(cpx, str):outpng = os.path.join(os.path.dirname(cpx),'' %(cpdict[''],c1,c2,ym))else:outpng = '' % (cpdict[''],c1,c2,ym)_base64_to_file(cmdb64, outpng)except Exception as e:LOGEXCEPTION('' %(c1, c2, ym, cmdpkl))continueif isinstance(cpx, str):cpf = _write_checkplot_picklefile(cpdict, outfile=cpx, protocol=)return cpfelif isinstance(cpx, dict):return cpdict", "docstring": "This adds CMD figures to a checkplot dict or pickle.\n\n Looks up the CMDs in `cmdpkl`, adds the object from `cpx` as a gold(-ish)\n star in the plot, and then saves the figure to a base64 encoded PNG, which\n can then be read and used by the `checkplotserver`.\n\n Parameters\n ----------\n\n cpx : str or dict\n This is the input checkplot pickle or dict to add the CMD to.\n\n cmdpkl : str or dict\n The CMD pickle generated by the `colormagdiagram_cplist` or\n `colormagdiagram_cpdir` functions above, or the dict produced by reading\n this pickle in.\n\n require_cmd_magcolor : bool\n If this is True, a CMD plot will not be made if the color and mag keys\n required by the CMD are not present or are nan in this checkplot's\n objectinfo dict.\n\n save_cmd_png : bool\n If this is True, then will save the CMD plots that were generated and\n added back to the checkplotdict as PNGs to the same directory as\n `cpx`. If `cpx` is a dict, will save them to the current working\n directory.\n\n Returns\n -------\n\n str or dict\n If `cpx` was a str filename of checkplot pickle, this will return that\n filename to indicate that the CMD was added to the file. If `cpx` was a\n checkplotdict, this will return the checkplotdict with a new key called\n 'colormagdiagram' containing the base64 encoded PNG binary streams of\n all CMDs generated.", "id": "f14703:m5"} {"signature": "def add_cmds_cplist(cplist, cmdpkl,require_cmd_magcolor=True,save_cmd_pngs=False):", "body": "with open(cmdpkl,'') as infd:cmd = pickle.load(infd)for cpf in cplist:add_cmd_to_checkplot(cpf, cmd,require_cmd_magcolor=require_cmd_magcolor,save_cmd_pngs=save_cmd_pngs)", "docstring": "This adds CMDs for each object in cplist.\n\n Parameters\n ----------\n\n cplist : list of str\n This is the input list of checkplot pickles to add the CMDs to.\n\n cmdpkl : str\n This is the filename of the CMD pickle created previously.\n\n require_cmd_magcolor : bool\n If this is True, a CMD plot will not be made if the color and mag keys\n required by the CMD are not present or are nan in each checkplot's\n objectinfo dict.\n\n save_cmd_pngs : bool\n If this is True, then will save the CMD plots that were generated and\n added back to the checkplotdict as PNGs to the same directory as\n `cpx`.\n\n Returns\n -------\n\n Nothing.", "id": "f14703:m6"} {"signature": "def add_cmds_cpdir(cpdir,cmdpkl,cpfileglob='',require_cmd_magcolor=True,save_cmd_pngs=False):", "body": "cplist = glob.glob(os.path.join(cpdir, cpfileglob))return add_cmds_cplist(cplist,cmdpkl,require_cmd_magcolor=require_cmd_magcolor,save_cmd_pngs=save_cmd_pngs)", "docstring": "This adds CMDs for each object in cpdir.\n\n Parameters\n ----------\n\n cpdir : list of str\n This is the directory to search for checkplot pickles.\n\n cmdpkl : str\n This is the filename of the CMD pickle created previously.\n\n cpfileglob : str\n The UNIX fileglob to use when searching for checkplot pickles to operate\n on.\n\n require_cmd_magcolor : bool\n If this is True, a CMD plot will not be made if the color and mag keys\n required by the CMD are not present or are nan in each checkplot's\n objectinfo dict.\n\n save_cmd_pngs : bool\n If this is True, then will save the CMD plots that were generated and\n added back to the checkplotdict as PNGs to the same directory as\n `cpx`.\n\n Returns\n -------\n\n Nothing.", "id": "f14703:m7"} {"signature": "def cp_objectinfo_worker(task):", "body": "cpf, cpkwargs = tasktry:newcpf = update_checkplot_objectinfo(cpf, **cpkwargs)return newcpfexcept Exception as e:LOGEXCEPTION('' % cpf)return None", "docstring": "This is a parallel worker for `parallel_update_cp_objectinfo`.\n\n Parameters\n ----------\n\n task : tuple\n - task[0] = checkplot pickle file\n - task[1] = kwargs\n\n Returns\n -------\n\n str\n The name of the checkplot file that was updated. None if the update\n fails for some reason.", "id": "f14703:m8"} {"signature": "def parallel_update_objectinfo_cplist(cplist,liststartindex=None,maxobjects=None,nworkers=NCPUS,fast_mode=False,findercmap='',finderconvolve=None,deredden_object=True,custom_bandpasses=None,gaia_submit_timeout=,gaia_submit_tries=,gaia_max_timeout=,gaia_mirror=None,complete_query_later=True,lclistpkl=None,nbrradiusarcsec=,maxnumneighbors=,plotdpi=,findercachedir='',verbose=True):", "body": "if sys.platform == '':import requestsrequests.get('')if (liststartindex is not None) and (maxobjects is None):cplist = cplist[liststartindex:]elif (liststartindex is None) and (maxobjects is not None):cplist = cplist[:maxobjects]elif (liststartindex is not None) and (maxobjects is not None):cplist = (cplist[liststartindex:liststartindex+maxobjects])tasks = [(x, {'':fast_mode,'':findercmap,'':finderconvolve,'':deredden_object,'':custom_bandpasses,'':gaia_submit_timeout,'':gaia_submit_tries,'':gaia_max_timeout,'':gaia_mirror,'':complete_query_later,'':lclistpkl,'':nbrradiusarcsec,'':maxnumneighbors,'':plotdpi,'':findercachedir,'':verbose}) for x in cplist]resultfutures = []results = []with ProcessPoolExecutor(max_workers=nworkers) as executor:resultfutures = executor.map(cp_objectinfo_worker, tasks)results = [x for x in resultfutures]executor.shutdown()return results", "docstring": "This updates objectinfo for a list of checkplots.\n\nUseful in cases where a previous round of GAIA/finderchart/external catalog\nacquisition failed. This will preserve the following keys in the checkplots\nif they exist:\n\ncomments\nvarinfo\nobjectinfo.objecttags\n\nParameters\n----------\n\ncplist : list of str\n A list of checkplot pickle file names to update.\n\nliststartindex : int\n The index of the input list to start working at.\n\nmaxobjects : int\n The maximum number of objects to process in this run. Use this with\n `liststartindex` to effectively distribute working on a large list of\n input checkplot pickles over several sessions or machines.\n\nnworkers : int\n The number of parallel workers that will work on the checkplot\n update process.\n\nfast_mode : bool or float\n This runs the external catalog operations in a \"fast\" mode, with short\n timeouts and not trying to hit external catalogs that take a long time\n to respond. See the docstring for\n `checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this\n works. If this is True, will run in \"fast\" mode with default timeouts (5\n seconds in most cases). If this is a float, will run in \"fast\" mode with\n the provided timeout value in seconds.\n\nfindercmap : str or matplotlib.cm.Colormap object\n\nfindercmap : str or matplotlib.cm.ColorMap object\n The Colormap object to use for the finder chart image.\n\nfinderconvolve : astropy.convolution.Kernel object or None\n If not None, the Kernel object to use for convolving the finder image.\n\nderedden_objects : bool\n If this is True, will use the 2MASS DUST service to get extinction\n coefficients in various bands, and then try to deredden the magnitudes\n and colors of the object already present in the checkplot's objectinfo\n dict.\n\ncustom_bandpasses : dict\n This is a dict used to provide custom bandpass definitions for any\n magnitude measurements in the objectinfo dict that are not automatically\n recognized by the `varclass.starfeatures.color_features` function. See\n its docstring for details on the required format.\n\ngaia_submit_timeout : float\n Sets the timeout in seconds to use when submitting a request to look up\n the object's information to the GAIA service. Note that if `fast_mode`\n is set, this is ignored.\n\ngaia_submit_tries : int\n Sets the maximum number of times the GAIA services will be contacted to\n obtain this object's information. If `fast_mode` is set, this is\n ignored, and the services will be contacted only once (meaning that a\n failure to respond will be silently ignored and no GAIA data will be\n added to the checkplot's objectinfo dict).\n\ngaia_max_timeout : float\n Sets the timeout in seconds to use when waiting for the GAIA service to\n respond to our request for the object's information. Note that if\n `fast_mode` is set, this is ignored.\n\ngaia_mirror : str\n This sets the GAIA mirror to use. This is a key in the\n `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n mirror.\n\ncomplete_query_later : bool\n If this is True, saves the state of GAIA queries that are not yet\n complete when `gaia_max_timeout` is reached while waiting for the GAIA\n service to respond to our request. A later call for GAIA info on the\n same object will attempt to pick up the results from the existing query\n if it's completed. If `fast_mode` is True, this is ignored.\n\nlclistpkl : dict or str\n If this is provided, must be a dict resulting from reading a catalog\n produced by the `lcproc.catalogs.make_lclist` function or a str path\n pointing to the pickle file produced by that function. This catalog is\n used to find neighbors of the current object in the current light curve\n collection. Looking at neighbors of the object within the radius\n specified by `nbrradiusarcsec` is useful for light curves produced by\n instruments that have a large pixel scale, so are susceptible to\n blending of variability and potential confusion of neighbor variability\n with that of the actual object being looked at. If this is None, no\n neighbor lookups will be performed.\n\nnbrradiusarcsec : float\n The radius in arcseconds to use for a search conducted around the\n coordinates of this object to look for any potential confusion and\n blending of variability amplitude caused by their proximity.\n\nmaxnumneighbors : int\n The maximum number of neighbors that will have their light curves and\n magnitudes noted in this checkplot as potential blends with the target\n object.\n\nplotdpi : int\n The resolution in DPI of the plots to generate in this function\n (e.g. the finder chart, etc.)\n\nfindercachedir : str\n The path to the astrobase cache directory for finder chart downloads\n from the NASA SkyView service.\n\nverbose : bool\n If True, will indicate progress and warn about potential problems.\n\nReturns\n-------\n\nlist of str\n Paths to the updated checkplot pickle file.", "id": "f14703:m9"} {"signature": "def parallel_update_objectinfo_cpdir(cpdir,cpglob='',liststartindex=None,maxobjects=None,nworkers=NCPUS,fast_mode=False,findercmap='',finderconvolve=None,deredden_object=True,custom_bandpasses=None,gaia_submit_timeout=,gaia_submit_tries=,gaia_max_timeout=,gaia_mirror=None,complete_query_later=True,lclistpkl=None,nbrradiusarcsec=,maxnumneighbors=,plotdpi=,findercachedir='',verbose=True):", "body": "cplist = sorted(glob.glob(os.path.join(cpdir, cpglob)))return parallel_update_objectinfo_cplist(cplist,liststartindex=liststartindex,maxobjects=maxobjects,nworkers=nworkers,fast_mode=fast_mode,findercmap=findercmap,finderconvolve=finderconvolve,deredden_object=deredden_object,custom_bandpasses=custom_bandpasses,gaia_submit_timeout=gaia_submit_timeout,gaia_submit_tries=gaia_submit_tries,gaia_max_timeout=gaia_max_timeout,gaia_mirror=gaia_mirror,complete_query_later=complete_query_later,lclistpkl=lclistpkl,nbrradiusarcsec=nbrradiusarcsec,maxnumneighbors=maxnumneighbors,plotdpi=plotdpi,findercachedir=findercachedir,verbose=verbose)", "docstring": "This updates the objectinfo for a directory of checkplot pickles.\n\n Useful in cases where a previous round of GAIA/finderchart/external catalog\n acquisition failed. This will preserve the following keys in the checkplots\n if they exist:\n\n comments\n varinfo\n objectinfo.objecttags\n\n Parameters\n ----------\n\n cpdir : str\n The directory to look for checkplot pickles in.\n\n cpglob : str\n The UNIX fileglob to use when searching for checkplot pickle files.\n\n liststartindex : int\n The index of the input list to start working at.\n\n maxobjects : int\n The maximum number of objects to process in this run. Use this with\n `liststartindex` to effectively distribute working on a large list of\n input checkplot pickles over several sessions or machines.\n\n nworkers : int\n The number of parallel workers that will work on the checkplot\n update process.\n\n fast_mode : bool or float\n This runs the external catalog operations in a \"fast\" mode, with short\n timeouts and not trying to hit external catalogs that take a long time\n to respond. See the docstring for\n `checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this\n works. If this is True, will run in \"fast\" mode with default timeouts (5\n seconds in most cases). If this is a float, will run in \"fast\" mode with\n the provided timeout value in seconds.\n\n findercmap : str or matplotlib.cm.Colormap object\n\n findercmap : str or matplotlib.cm.ColorMap object\n The Colormap object to use for the finder chart image.\n\n finderconvolve : astropy.convolution.Kernel object or None\n If not None, the Kernel object to use for convolving the finder image.\n\n deredden_objects : bool\n If this is True, will use the 2MASS DUST service to get extinction\n coefficients in various bands, and then try to deredden the magnitudes\n and colors of the object already present in the checkplot's objectinfo\n dict.\n\n custom_bandpasses : dict\n This is a dict used to provide custom bandpass definitions for any\n magnitude measurements in the objectinfo dict that are not automatically\n recognized by the `varclass.starfeatures.color_features` function. See\n its docstring for details on the required format.\n\n gaia_submit_timeout : float\n Sets the timeout in seconds to use when submitting a request to look up\n the object's information to the GAIA service. Note that if `fast_mode`\n is set, this is ignored.\n\n gaia_submit_tries : int\n Sets the maximum number of times the GAIA services will be contacted to\n obtain this object's information. If `fast_mode` is set, this is\n ignored, and the services will be contacted only once (meaning that a\n failure to respond will be silently ignored and no GAIA data will be\n added to the checkplot's objectinfo dict).\n\n gaia_max_timeout : float\n Sets the timeout in seconds to use when waiting for the GAIA service to\n respond to our request for the object's information. Note that if\n `fast_mode` is set, this is ignored.\n\n gaia_mirror : str\n This sets the GAIA mirror to use. This is a key in the\n `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n mirror.\n\n complete_query_later : bool\n If this is True, saves the state of GAIA queries that are not yet\n complete when `gaia_max_timeout` is reached while waiting for the GAIA\n service to respond to our request. A later call for GAIA info on the\n same object will attempt to pick up the results from the existing query\n if it's completed. If `fast_mode` is True, this is ignored.\n\n lclistpkl : dict or str\n If this is provided, must be a dict resulting from reading a catalog\n produced by the `lcproc.catalogs.make_lclist` function or a str path\n pointing to the pickle file produced by that function. This catalog is\n used to find neighbors of the current object in the current light curve\n collection. Looking at neighbors of the object within the radius\n specified by `nbrradiusarcsec` is useful for light curves produced by\n instruments that have a large pixel scale, so are susceptible to\n blending of variability and potential confusion of neighbor variability\n with that of the actual object being looked at. If this is None, no\n neighbor lookups will be performed.\n\n nbrradiusarcsec : float\n The radius in arcseconds to use for a search conducted around the\n coordinates of this object to look for any potential confusion and\n blending of variability amplitude caused by their proximity.\n\n maxnumneighbors : int\n The maximum number of neighbors that will have their light curves and\n magnitudes noted in this checkplot as potential blends with the target\n object.\n\n plotdpi : int\n The resolution in DPI of the plots to generate in this function\n (e.g. the finder chart, etc.)\n\n findercachedir : str\n The path to the astrobase cache directory for finder chart downloads\n from the NASA SkyView service.\n\n verbose : bool\n If True, will indicate progress and warn about potential problems.\n\n Returns\n -------\n\n list of str\n Paths to the updated checkplot pickle file.", "id": "f14703:m10"} {"signature": "def kill_handler(sig, frame):", "body": "raise KeyboardInterrupt", "docstring": "This raises a KeyboardInterrupt when a SIGKILL comes in.\n\n This is a handle for use with the Python `signal.signal` function.", "id": "f14704:m0"} {"signature": "def cache_clean_handler(min_age_hours=):", "body": "cmd = (\"\")mmin = '' % (min_age_hours*)cmd = cmd.format(mmin=mmin)try:proc = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)ndeleted = len(proc.stdout.decode().split(''))LOGWARNING('' %(ndeleted, min_age_hours))except Exception as e:LOGEXCEPTION('')", "docstring": "This periodically cleans up the ~/.astrobase cache to save us from\n disk-space doom.\n\n Parameters\n ----------\n\n min_age_hours : int\n Files older than this number of hours from the current time will be\n deleted.\n\n Returns\n -------\n\n Nothing.", "id": "f14704:m1"} {"signature": "def shutdown_check_handler():", "body": "url = ''try:resp = requests.get(url, timeout=)resp.raise_for_status()stopinfo = resp.json()if '' in stopinfo and stopinfo[''] in ('','',''):stoptime = stopinfo['']LOGWARNING('' % (stopinfo[''],stoptime))resp.close()return Trueelse:resp.close()return Falseexcept HTTPError as e:resp.close()return Falseexcept Exception as e:resp.close()return False", "docstring": "This checks the AWS instance data URL to see if there's a pending\n shutdown for the instance.\n\n This is useful for AWS spot instances. If there is a pending shutdown posted\n to the instance data URL, we'll use the result of this function break out of\n the processing loop and shut everything down ASAP before the instance dies.\n\n Returns\n -------\n\n bool\n - True if the instance is going to die soon.\n - False if the instance is still safe.", "id": "f14704:m2"} {"signature": "def runcp_producer_loop(lightcurve_list,input_queue,input_bucket,result_queue,result_bucket,pfresult_list=None,runcp_kwargs=None,process_list_slice=None,purge_queues_when_done=False,delete_queues_when_done=False,download_when_done=True,save_state_when_done=True,s3_client=None,sqs_client=None):", "body": "if not sqs_client:sqs_client = boto3.client('')if not s3_client:s3_client = boto3.client('')if isinstance(lightcurve_list, str) and os.path.exists(lightcurve_list):with open(lightcurve_list, '') as infd:lclist = infd.readlines()lclist = [x.replace('','') for x in lclist if len(x) > ]if process_list_slice is not None:lclist = lclist[process_list_slice[]:process_list_slice[]]lclist = [x[:] for x in lclist if x.startswith('')]lclist = ['' % (input_bucket, x) for x in lclist]elif isinstance(lightcurve_list, list):lclist = lightcurve_listtry:inq = sqs_client.get_queue_url(QueueName=input_queue)inq_url = inq['']LOGINFO('')except ClientError as e:inq = awsutils.sqs_create_queue(input_queue, client=sqs_client)inq_url = inq['']try:outq = sqs_client.get_queue_url(QueueName=result_queue)outq_url = outq['']LOGINFO('')except ClientError as e:outq = awsutils.sqs_create_queue(result_queue, client=sqs_client)outq_url = outq['']LOGINFO('' % inq_url)LOGINFO('' % outq_url)LOGINFO('')time.sleep()if pfresult_list is None:pfresult_list = [None for x in lclist]for lc, pf in zip(lclist, pfresult_list):this_item = {'': lc,'': '','': (pf,),'':runcp_kwargs if runcp_kwargs is not None else {},'': result_bucket,'': outq_url}resp = awsutils.sqs_put_item(inq_url, this_item, client=sqs_client)if resp:LOGINFO('' % (lc,inq_url))done_objects = {}LOGINFO('')signal.signal(signal.SIGINT, kill_handler)signal.signal(signal.SIGTERM, kill_handler)while len(list(done_objects.keys())) < len(lclist):try:result = awsutils.sqs_get_item(outq_url, client=sqs_client)if result is not None and len(result) > :recv = result[]try:processed_object = recv['']['']except KeyError:LOGWARNING('' % recv)processed_object = ''cpf = recv['']['']receipt = recv['']if processed_object in lclist:if processed_object not in done_objects:done_objects[processed_object] = [cpf]else:done_objects[processed_object].append(cpf)LOGINFO('' % (processed_object, cpf))if download_when_done:getobj = awsutils.awsutils.s3_get_url(cpf,client=s3_client)LOGINFO('' % (cpf, getobj))else:LOGWARNING('''''')awsutils.sqs_delete_item(outq_url, receipt)except KeyboardInterrupt as e:LOGWARNING('')breakLOGINFO('')time.sleep()if purge_queues_when_done:LOGWARNING('')sqs_client.purge_queue(QueueUrl=inq_url)sqs_client.purge_queue(QueueUrl=outq_url)time.sleep()if delete_queues_when_done:LOGWARNING('')awsutils.sqs_delete_queue(inq_url)awsutils.sqs_delete_queue(outq_url)work_state = {'': done_objects,'': list(set(lclist) - set(done_objects.keys())),'':((os.path.abspath(lightcurve_list) ifisinstance(lightcurve_list, str) else lightcurve_list),input_queue,input_bucket,result_queue,result_bucket),'':{'':pfresult_list,'':runcp_kwargs,'':process_list_slice,'':download_when_done,'':purge_queues_when_done,'':save_state_when_done,'':delete_queues_when_done}}if save_state_when_done:with open('','') as outfd:pickle.dump(work_state, outfd, pickle.HIGHEST_PROTOCOL)return work_state", "docstring": "This sends checkplot making tasks to the input queue and monitors the\n result queue for task completion.\n\n Parameters\n ----------\n\n lightcurve_list : str or list of str\n This is either a string pointing to a file containing a list of light\n curves filenames to process or the list itself. The names must\n correspond to the full filenames of files stored on S3, including all\n prefixes, but not include the 's3:///' bit (these will be\n added automatically).\n\n input_queue : str\n This is the name of the SQS queue which will receive processing tasks\n generated by this function. The queue URL will automatically be obtained\n from AWS.\n\n input_bucket : str\n The name of the S3 bucket containing the light curve files to process.\n\n result_queue : str\n This is the name of the SQS queue that this function will listen to for\n messages from the workers as they complete processing on their input\n elements. This function will attempt to match input sent to the\n `input_queue` with results coming into the `result_queue` so it knows\n how many objects have been successfully processed. If this function\n receives task results that aren't in its own input queue, it will\n acknowledge them so they complete successfully, but not download them\n automatically. This handles leftover tasks completing from a previous\n run of this function.\n\n result_bucket : str\n The name of the S3 bucket which will receive the results from the\n workers.\n\n pfresult_list : list of str or None\n This is a list of periodfinder result pickle S3 URLs associated with\n each light curve. If provided, this will be used to add in phased light\n curve plots to each checkplot pickle. If this is None, the worker loop\n will produce checkplot pickles that only contain object information,\n neighbor information, and unphased light curves.\n\n runcp_kwargs : dict\n This is a dict used to pass any extra keyword arguments to the\n `lcproc.checkplotgen.runcp` function that will be run by the worker\n loop.\n\n process_list_slice : list\n This is used to index into the input light curve list so a subset of the\n full list can be processed in this specific run of this function.\n\n Use None for a slice index elem to emulate single slice spec behavior:\n\n process_list_slice = [10, None] -> lightcurve_list[10:]\n process_list_slice = [None, 500] -> lightcurve_list[:500]\n\n purge_queues_when_done : bool\n If this is True, and this function exits (either when all done, or when\n it is interrupted with a Ctrl+C), all outstanding elements in the\n input/output queues that have not yet been acknowledged by workers or by\n this function will be purged. This effectively cancels all outstanding\n work.\n\n delete_queues_when_done : bool\n If this is True, and this function exits (either when all done, or when\n it is interrupted with a Ctrl+C'), all outstanding work items will be\n purged from the input/queues and the queues themselves will be deleted.\n\n download_when_done : bool\n If this is True, the generated checkplot pickle for each input work item\n will be downloaded immediately to the current working directory when the\n worker functions report they're done with it.\n\n save_state_when_done : bool\n If this is True, will save the current state of the work item queue and\n the work items acknowledged as completed to a pickle in the current\n working directory. Call the `runcp_producer_loop_savedstate` function\n below to resume processing from this saved state later.\n\n s3_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its S3 download operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n sqs_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its SQS operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n Returns\n -------\n\n dict or str\n Returns the current work state as a dict or str path to the generated\n work state pickle depending on if `save_state_when_done` is True.", "id": "f14704:m3"} {"signature": "def runcp_producer_loop_savedstate(use_saved_state=None,lightcurve_list=None,input_queue=None,input_bucket=None,result_queue=None,result_bucket=None,pfresult_list=None,runcp_kwargs=None,process_list_slice=None,download_when_done=True,purge_queues_when_done=True,save_state_when_done=True,delete_queues_when_done=False,s3_client=None,sqs_client=None):", "body": "if use_saved_state is not None and os.path.exists(use_saved_state):with open(use_saved_state,'') as infd:saved_state = pickle.load(infd)return runcp_producer_loop(saved_state[''],saved_state[''][],saved_state[''][],saved_state[''][],saved_state[''][],**saved_state[''])else:return runcp_producer_loop(lightcurve_list,input_queue,input_bucket,result_queue,result_bucket,pfresult_list=pfresult_list,runcp_kwargs=runcp_kwargs,process_list_slice=process_list_slice,download_when_done=download_when_done,purge_queues_when_done=purge_queues_when_done,save_state_when_done=save_state_when_done,delete_queues_when_done=delete_queues_when_done,s3_client=s3_client,sqs_client=sqs_client)", "docstring": "This wraps the function above to allow for loading previous state from a\n file.\n\n Parameters\n ----------\n\n use_saved_state : str or None\n This is the path to the saved state pickle file produced by a previous\n run of `runcp_producer_loop`. Will get all of the arguments to run\n another instance of the loop from that pickle file. If this is None, you\n MUST provide all of the appropriate arguments to that function.\n\n lightcurve_list : str or list of str or None\n This is either a string pointing to a file containing a list of light\n curves filenames to process or the list itself. The names must\n correspond to the full filenames of files stored on S3, including all\n prefixes, but not include the 's3:///' bit (these will be\n added automatically).\n\n input_queue : str or None\n This is the name of the SQS queue which will receive processing tasks\n generated by this function. The queue URL will automatically be obtained\n from AWS.\n\n input_bucket : str or None\n The name of the S3 bucket containing the light curve files to process.\n\n result_queue : str or None\n This is the name of the SQS queue that this function will listen to for\n messages from the workers as they complete processing on their input\n elements. This function will attempt to match input sent to the\n `input_queue` with results coming into the `result_queue` so it knows\n how many objects have been successfully processed. If this function\n receives task results that aren't in its own input queue, it will\n acknowledge them so they complete successfully, but not download them\n automatically. This handles leftover tasks completing from a previous\n run of this function.\n\n result_bucket : str or None\n The name of the S3 bucket which will receive the results from the\n workers.\n\n pfresult_list : list of str or None\n This is a list of periodfinder result pickle S3 URLs associated with\n each light curve. If provided, this will be used to add in phased light\n curve plots to each checkplot pickle. If this is None, the worker loop\n will produce checkplot pickles that only contain object information,\n neighbor information, and unphased light curves.\n\n runcp_kwargs : dict or None\n This is a dict used to pass any extra keyword arguments to the\n `lcproc.checkplotgen.runcp` function that will be run by the worker\n loop.\n\n process_list_slice : list or None\n This is used to index into the input light curve list so a subset of the\n full list can be processed in this specific run of this function.\n\n Use None for a slice index elem to emulate single slice spec behavior:\n\n process_list_slice = [10, None] -> lightcurve_list[10:]\n process_list_slice = [None, 500] -> lightcurve_list[:500]\n\n purge_queues_when_done : bool or None\n If this is True, and this function exits (either when all done, or when\n it is interrupted with a Ctrl+C), all outstanding elements in the\n input/output queues that have not yet been acknowledged by workers or by\n this function will be purged. This effectively cancels all outstanding\n work.\n\n delete_queues_when_done : bool or None\n If this is True, and this function exits (either when all done, or when\n it is interrupted with a Ctrl+C'), all outstanding work items will be\n purged from the input/queues and the queues themselves will be deleted.\n\n download_when_done : bool or None\n If this is True, the generated checkplot pickle for each input work item\n will be downloaded immediately to the current working directory when the\n worker functions report they're done with it.\n\n save_state_when_done : bool or None\n If this is True, will save the current state of the work item queue and\n the work items acknowledged as completed to a pickle in the current\n working directory. Call the `runcp_producer_loop_savedstate` function\n below to resume processing from this saved state later.\n\n s3_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its S3 download operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n sqs_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its SQS operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n Returns\n -------\n\n dict or str\n Returns the current work state as a dict or str path to the generated\n work state pickle depending on if `save_state_when_done` is True.", "id": "f14704:m4"} {"signature": "def runcp_consumer_loop(in_queue_url,workdir,lclist_pkl_s3url,lc_altexts=('',),wait_time_seconds=,cache_clean_timer_seconds=,shutdown_check_timer_seconds=,sqs_client=None,s3_client=None):", "body": "if not sqs_client:sqs_client = boto3.client('')if not s3_client:s3_client = boto3.client('')lclist_pklf = lclist_pkl_s3url.split('')[-]if not os.path.exists(lclist_pklf):lclist_pklf = awsutils.s3_get_url(lclist_pkl_s3url,client=s3_client)with open(lclist_pklf,'') as infd:lclistpkl = pickle.load(infd)signal.signal(signal.SIGINT, kill_handler)signal.signal(signal.SIGTERM, kill_handler)shutdown_last_time = time.monotonic()diskspace_last_time = time.monotonic()while True:curr_time = time.monotonic()if (curr_time - shutdown_last_time) > shutdown_check_timer_seconds:shutdown_check = shutdown_check_handler()if shutdown_check:LOGWARNING('')breakshutdown_last_time = time.monotonic()if (curr_time - diskspace_last_time) > cache_clean_timer_seconds:cache_clean_handler()diskspace_last_time = time.monotonic()try:work = awsutils.sqs_get_item(in_queue_url,client=sqs_client,raiseonfail=True)if work is not None and len(work) > :recv = work[]action = recv['']['']if action != '':continuetarget = recv['']['']args = recv['']['']kwargs = recv['']['']outbucket = recv['']['']if '' in recv['']:out_queue_url = recv['']['']else:out_queue_url = Nonereceipt = recv['']try:lc_filename = awsutils.s3_get_url(target,altexts=lc_altexts,client=s3_client,)if len(args) > and args[] is not None:pf_pickle = awsutils.s3_get_url(args[],client=s3_client)else:pf_pickle = Nonecpfs = runcp(pf_pickle,workdir,workdir,lcfname=lc_filename,lclistpkl=lclistpkl,makeneighborlcs=False,**kwargs)if cpfs and all(os.path.exists(x) for x in cpfs):LOGINFO('' %(lc_filename, pf_pickle, cpfs))resp = s3_client.list_objects_v2(Bucket=outbucket,MaxKeys=,Prefix=cpfs[])outbucket_list = resp.get('',[])if outbucket_list and len(outbucket_list) > :LOGWARNING(''''% target)awsutils.sqs_delete_item(in_queue_url, receipt)continuefor cpf in cpfs:put_url = awsutils.s3_put_file(cpf,outbucket,client=s3_client)if put_url is not None:LOGINFO('' % put_url)if out_queue_url is not None:awsutils.sqs_put_item(out_queue_url,{'':put_url,'': target,'':lc_filename,'':lclist_pklf,'':kwargs},raiseonfail=True)os.remove(cpf)else:LOGERROR('' % cpf)awsutils.sqs_delete_item(in_queue_url,receipt)if ( (lc_filename is not None) and(os.path.exists(lc_filename)) ):os.remove(lc_filename)else:LOGWARNING('' %(lc_filename, pf_pickle))with open('' %lc_filename, '') as outfd:pickle.dump({'':in_queue_url,'':target,'':lc_filename,'':lclist_pklf,'':kwargs,'':outbucket,'':out_queue_url},outfd, pickle.HIGHEST_PROTOCOL)put_url = awsutils.s3_put_file('' % lc_filename,outbucket,client=s3_client)if out_queue_url is not None:awsutils.sqs_put_item(out_queue_url,{'':put_url,'':lc_filename,'':lclist_pklf,'':kwargs},raiseonfail=True)awsutils.sqs_delete_item(in_queue_url,receipt,raiseonfail=True)if ( (lc_filename is not None) and(os.path.exists(lc_filename)) ):os.remove(lc_filename)except ClientError as e:LOGWARNING('')breakexcept Exception as e:LOGEXCEPTION('')if '' in locals():with open('' %lc_filename,'') as outfd:pickle.dump({'':in_queue_url,'':target,'':lc_filename,'':lclist_pklf,'':kwargs,'':outbucket,'':out_queue_url},outfd, pickle.HIGHEST_PROTOCOL)put_url = awsutils.s3_put_file('' % lc_filename,outbucket,client=s3_client)if out_queue_url is not None:awsutils.sqs_put_item(out_queue_url,{'':put_url,'':lc_filename,'':lclist_pklf,'':kwargs},raiseonfail=True)if ( (lc_filename is not None) and(os.path.exists(lc_filename)) ):os.remove(lc_filename)awsutils.sqs_delete_item(in_queue_url,receipt,raiseonfail=True)except KeyboardInterrupt:LOGWARNING('')breakexcept ClientError as e:LOGWARNING('')breakexcept Exception as e:LOGEXCEPTION('')if '' in locals():with open('' %lc_filename,'') as outfd:pickle.dump({'':in_queue_url,'':target,'':lclist_pklf,'':kwargs,'':outbucket,'':out_queue_url},outfd, pickle.HIGHEST_PROTOCOL)put_url = awsutils.s3_put_file('' % lc_filename,outbucket,client=s3_client)if out_queue_url is not None:awsutils.sqs_put_item(out_queue_url,{'':put_url,'':lclist_pklf,'':kwargs},raiseonfail=True)if ( (lc_filename is not None) and(os.path.exists(lc_filename)) ):os.remove(lc_filename)awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True)", "docstring": "This runs checkplot pickle making in a loop until interrupted.\n\n Consumes work task items from an input queue set up by `runcp_producer_loop`\n above. For the moment, we don't generate neighbor light curves since this\n would require a lot more S3 calls.\n\n Parameters\n ----------\n\n in_queue_url : str\n The SQS URL of the input queue to listen to for work assignment\n messages. The task orders will include the input and output S3 bucket\n names, as well as the URL of the output queue to where this function\n will report its work-complete or work-failed status.\n\n workdir : str\n The directory on the local machine where this worker loop will download\n the input light curves and associated period-finder results (if any),\n process them, and produce its output checkplot pickles. These will then\n be uploaded to the specified S3 output bucket and then deleted from the\n workdir when the upload is confirmed to make it safely to S3.\n\n lclist_pkl : str\n S3 URL of a catalog pickle generated by `lcproc.catalogs.make_lclist`\n that contains objectids and coordinates, as well as a kdtree for all of\n the objects in the current light curve collection being processed. This\n is used to look up neighbors for each object being processed.\n\n lc_altexts : sequence of str\n If not None, this is a sequence of alternate extensions to try for the\n input light curve file other than the one provided in the input task\n order. For example, to get anything that's an .sqlite where .sqlite.gz\n is expected, use altexts=[''] to strip the .gz.\n\n wait_time_seconds : int\n The amount of time to wait in the input SQS queue for an input task\n order. If this timeout expires and no task has been received, this\n function goes back to the top of the work loop.\n\n cache_clean_timer_seconds : float\n The amount of time in seconds to wait before periodically removing old\n files (such as finder chart FITS, external service result pickles) from\n the astrobase cache directory. These accumulate as the work items are\n processed, and take up significant space, so must be removed\n periodically.\n\n shutdown_check_timer_seconds : float\n The amount of time to wait before checking for a pending EC2 shutdown\n message for the instance this worker loop is operating on. If a shutdown\n is noticed, the worker loop is cancelled in preparation for instance\n shutdown.\n\n sqs_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its SQS operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n s3_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its S3 operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n Returns\n -------\n\n Nothing.", "id": "f14704:m5"} {"signature": "def runpf_producer_loop(lightcurve_list,input_queue,input_bucket,result_queue,result_bucket,pfmethods=('','','','',''),pfkwargs=({}, {}, {}, {}, {}),extra_runpf_kwargs={'':True},process_list_slice=None,purge_queues_when_done=False,delete_queues_when_done=False,download_when_done=True,save_state_when_done=True,s3_client=None,sqs_client=None):", "body": "if not sqs_client:sqs_client = boto3.client('')if not s3_client:s3_client = boto3.client('')if isinstance(lightcurve_list, str) and os.path.exists(lightcurve_list):with open(lightcurve_list, '') as infd:lclist = infd.readlines()lclist = [x.replace('','') for x in lclist if len(x) > ]if process_list_slice is not None:lclist = lclist[process_list_slice[]:process_list_slice[]]lclist = [x[:] for x in lclist if x.startswith('')]lclist = ['' % (input_bucket, x) for x in lclist]elif isinstance(lightcurve_list, list):lclist = lightcurve_listtry:inq = sqs_client.get_queue_url(QueueName=input_queue)inq_url = inq['']LOGINFO('')except ClientError as e:inq = awsutils.sqs_create_queue(input_queue, client=sqs_client)inq_url = inq['']try:outq = sqs_client.get_queue_url(QueueName=result_queue)outq_url = outq['']LOGINFO('')except ClientError as e:outq = awsutils.sqs_create_queue(result_queue, client=sqs_client)outq_url = outq['']LOGINFO('' % inq_url)LOGINFO('' % outq_url)LOGINFO('')time.sleep()all_runpf_kwargs = {'':pfmethods,'':pfkwargs}if isinstance(extra_runpf_kwargs, dict):all_runpf_kwargs.update(extra_runpf_kwargs)for lc in lclist:this_item = {'': lc,'': '','': ('',),'':all_runpf_kwargs,'': result_bucket,'': outq_url}resp = awsutils.sqs_put_item(inq_url, this_item, client=sqs_client)if resp:LOGINFO('' % (lc, inq_url))done_objects = {}LOGINFO('')signal.signal(signal.SIGINT, kill_handler)signal.signal(signal.SIGTERM, kill_handler)while len(list(done_objects.keys())) < len(lclist):try:result = awsutils.sqs_get_item(outq_url, client=sqs_client)if result is not None and len(result) > :recv = result[]try:processed_object = recv['']['']except KeyError:LOGWARNING('' % recv)processed_object = ''pfresult = recv['']['']receipt = recv['']if processed_object in lclist:if processed_object not in done_objects:done_objects[processed_object] = [pfresult]else:done_objects[processed_object].append(pfresult)LOGINFO('' % (processed_object, pfresult))if download_when_done:getobj = awsutils.s3_get_url(pfresult,client=s3_client)LOGINFO('' % (pfresult, getobj))else:LOGWARNING('''''')awsutils.sqs_delete_item(outq_url, receipt)except KeyboardInterrupt as e:LOGWARNING('')breakLOGINFO('')time.sleep()if purge_queues_when_done:LOGWARNING('')sqs_client.purge_queue(QueueUrl=inq_url)sqs_client.purge_queue(QueueUrl=outq_url)time.sleep()if delete_queues_when_done:LOGWARNING('')awsutils.sqs_delete_queue(inq_url)awsutils.sqs_delete_queue(outq_url)work_state = {'': done_objects,'': list(set(lclist) - set(done_objects.keys())),'':((os.path.abspath(lightcurve_list) ifisinstance(lightcurve_list, str) else lightcurve_list),input_queue,input_bucket,result_queue,result_bucket),'':{'':pfmethods,'':pfkwargs,'':extra_runpf_kwargs,'':process_list_slice,'':purge_queues_when_done,'':delete_queues_when_done,'':download_when_done,'':save_state_when_done}}if save_state_when_done:with open('','') as outfd:pickle.dump(work_state, outfd, pickle.HIGHEST_PROTOCOL)return work_state", "docstring": "This queues up work for period-finders using SQS.\n\n Parameters\n ----------\n\n lightcurve_list : str or list of str\n This is either a string pointing to a file containing a list of light\n curves filenames to process or the list itself. The names must\n correspond to the full filenames of files stored on S3, including all\n prefixes, but not include the 's3:///' bit (these will be\n added automatically).\n\n input_queue : str\n This is the name of the SQS queue which will receive processing tasks\n generated by this function. The queue URL will automatically be obtained\n from AWS.\n\n input_bucket : str\n The name of the S3 bucket containing the light curve files to process.\n\n result_queue : str\n This is the name of the SQS queue that this function will listen to for\n messages from the workers as they complete processing on their input\n elements. This function will attempt to match input sent to the\n `input_queue` with results coming into the `result_queue` so it knows\n how many objects have been successfully processed. If this function\n receives task results that aren't in its own input queue, it will\n acknowledge them so they complete successfully, but not download them\n automatically. This handles leftover tasks completing from a previous\n run of this function.\n\n result_bucket : str\n The name of the S3 bucket which will receive the results from the\n workers.\n\n pfmethods : sequence of str\n This is a list of period-finder method short names as listed in the\n `lcproc.periodfinding.PFMETHODS` dict. This is used to tell the worker\n loop which period-finders to run on the input light curve.\n\n pfkwargs : sequence of dicts\n This contains optional kwargs as dicts to be supplied to all of the\n period-finder functions listed in `pfmethods`. This should be the same\n length as that sequence.\n\n extra_runpf_kwargs : dict\n This is a dict of kwargs to be supplied to `runpf` driver function\n itself.\n\n process_list_slice : list\n This is used to index into the input light curve list so a subset of the\n full list can be processed in this specific run of this function.\n\n Use None for a slice index elem to emulate single slice spec behavior:\n\n process_list_slice = [10, None] -> lightcurve_list[10:]\n process_list_slice = [None, 500] -> lightcurve_list[:500]\n\n purge_queues_when_done : bool\n If this is True, and this function exits (either when all done, or when\n it is interrupted with a Ctrl+C), all outstanding elements in the\n input/output queues that have not yet been acknowledged by workers or by\n this function will be purged. This effectively cancels all outstanding\n work.\n\n delete_queues_when_done : bool\n If this is True, and this function exits (either when all done, or when\n it is interrupted with a Ctrl+C'), all outstanding work items will be\n purged from the input/queues and the queues themselves will be deleted.\n\n download_when_done : bool\n If this is True, the generated periodfinding result pickle for each\n input work item will be downloaded immediately to the current working\n directory when the worker functions report they're done with it.\n\n save_state_when_done : bool\n If this is True, will save the current state of the work item queue and\n the work items acknowledged as completed to a pickle in the current\n working directory. Call the `runcp_producer_loop_savedstate` function\n below to resume processing from this saved state later.\n\n s3_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its S3 download operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n sqs_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its SQS operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n Returns\n -------\n\n dict or str\n Returns the current work state as a dict or str path to the generated\n work state pickle depending on if `save_state_when_done` is True.", "id": "f14704:m6"} {"signature": "def runpf_consumer_loop(in_queue_url,workdir,lc_altexts=('',),wait_time_seconds=,shutdown_check_timer_seconds=,sqs_client=None,s3_client=None):", "body": "if not sqs_client:sqs_client = boto3.client('')if not s3_client:s3_client = boto3.client('')signal.signal(signal.SIGINT, kill_handler)signal.signal(signal.SIGTERM, kill_handler)shutdown_last_time = time.monotonic()while True:curr_time = time.monotonic()if (curr_time - shutdown_last_time) > shutdown_check_timer_seconds:shutdown_check = shutdown_check_handler()if shutdown_check:LOGWARNING('')breakshutdown_last_time = time.monotonic()try:work = awsutils.sqs_get_item(in_queue_url,client=sqs_client,raiseonfail=True)if work is not None and len(work) > :recv = work[]action = recv['']['']if action != '':continuetarget = recv['']['']args = recv['']['']kwargs = recv['']['']outbucket = recv['']['']if '' in recv['']:out_queue_url = recv['']['']else:out_queue_url = Nonereceipt = recv['']try:lc_filename = awsutils.s3_get_url(target,altexts=lc_altexts,client=s3_client)runpf_args = (lc_filename, args[])pfresult = runpf(*runpf_args,**kwargs)if pfresult and os.path.exists(pfresult):LOGINFO('' %(lc_filename, pfresult))resp = s3_client.list_objects_v2(Bucket=outbucket,MaxKeys=,Prefix=pfresult)outbucket_list = resp.get('',[])if outbucket_list and len(outbucket_list) > :LOGWARNING(''''% target)awsutils.sqs_delete_item(in_queue_url, receipt)continueput_url = awsutils.s3_put_file(pfresult,outbucket,client=s3_client)if put_url is not None:LOGINFO('' % put_url)if out_queue_url is not None:awsutils.sqs_put_item(out_queue_url,{'':put_url,'': target,'':lc_filename,'':kwargs},raiseonfail=True)os.remove(pfresult)else:LOGERROR('' % pfresult)os.remove(pfresult)awsutils.sqs_delete_item(in_queue_url, receipt)if ( (lc_filename is not None) and(os.path.exists(lc_filename)) ):os.remove(lc_filename)else:LOGWARNING('' %(lc_filename,))with open('' %lc_filename, '') as outfd:pickle.dump({'':in_queue_url,'':target,'':lc_filename,'':kwargs,'':outbucket,'':out_queue_url},outfd, pickle.HIGHEST_PROTOCOL)put_url = awsutils.s3_put_file('' % lc_filename,outbucket,client=s3_client)if out_queue_url is not None:awsutils.sqs_put_item(out_queue_url,{'':put_url,'':lc_filename,'':kwargs},raiseonfail=True)awsutils.sqs_delete_item(in_queue_url,receipt,raiseonfail=True)if ( (lc_filename is not None) and(os.path.exists(lc_filename)) ):os.remove(lc_filename)except ClientError as e:LOGWARNING('')breakexcept Exception as e:LOGEXCEPTION('')if '' in locals():with open('' %lc_filename,'') as outfd:pickle.dump({'':in_queue_url,'':target,'':lc_filename,'':kwargs,'':outbucket,'':out_queue_url},outfd, pickle.HIGHEST_PROTOCOL)put_url = awsutils.s3_put_file('' % lc_filename,outbucket,client=s3_client)if out_queue_url is not None:awsutils.sqs_put_item(out_queue_url,{'':put_url,'':lc_filename,'':kwargs},raiseonfail=True)if ( (lc_filename is not None) and(os.path.exists(lc_filename)) ):os.remove(lc_filename)awsutils.sqs_delete_item(in_queue_url,receipt,raiseonfail=True)except KeyboardInterrupt:LOGWARNING('')breakexcept ClientError as e:LOGWARNING('')breakexcept Exception as e:LOGEXCEPTION('')if '' in locals():with open('' %lc_filename,'') as outfd:pickle.dump({'':in_queue_url,'':target,'':kwargs,'':outbucket,'':out_queue_url},outfd, pickle.HIGHEST_PROTOCOL)put_url = awsutils.s3_put_file('' % lc_filename,outbucket,client=s3_client)if out_queue_url is not None:awsutils.sqs_put_item(out_queue_url,{'':put_url,'':kwargs},raiseonfail=True)if ( (lc_filename is not None) and(os.path.exists(lc_filename)) ):os.remove(lc_filename)awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True)", "docstring": "This runs period-finding in a loop until interrupted.\n\n Consumes work task items from an input queue set up by `runpf_producer_loop`\n above.\n\n Parameters\n ----------\n\n in_queue_url : str\n The SQS URL of the input queue to listen to for work assignment\n messages. The task orders will include the input and output S3 bucket\n names, as well as the URL of the output queue to where this function\n will report its work-complete or work-failed status.\n\n workdir : str\n The directory on the local machine where this worker loop will download\n the input light curves, process them, and produce its output\n periodfinding result pickles. These will then be uploaded to the\n specified S3 output bucket, and then deleted from the local disk.\n\n lc_altexts : sequence of str\n If not None, this is a sequence of alternate extensions to try for the\n input light curve file other than the one provided in the input task\n order. For example, to get anything that's an .sqlite where .sqlite.gz\n is expected, use altexts=[''] to strip the .gz.\n\n wait_time_seconds : int\n The amount of time to wait in the input SQS queue for an input task\n order. If this timeout expires and no task has been received, this\n function goes back to the top of the work loop.\n\n shutdown_check_timer_seconds : float\n The amount of time to wait before checking for a pending EC2 shutdown\n message for the instance this worker loop is operating on. If a shutdown\n is noticed, the worker loop is cancelled in preparation for instance\n shutdown.\n\n sqs_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its SQS operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n s3_client : boto3.Client or None\n If None, this function will instantiate a new `boto3.Client` object to\n use in its S3 operations. Alternatively, pass in an existing\n `boto3.Client` instance to re-use it here.\n\n Returns\n -------\n\n Nothing.", "id": "f14704:m7"} {"signature": "def timebinlc(lcfile,binsizesec,outdir=None,lcformat='',lcformatdir=None,timecols=None,magcols=None,errcols=None,minbinelems=):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif timecols is None:timecols = dtimecolsif magcols is None:magcols = dmagcolsif errcols is None:errcols = derrcolslcdict = readerfunc(lcfile)if ( (isinstance(lcdict, (list, tuple))) and(isinstance(lcdict[], dict)) ):lcdict = lcdict[]if '' in lcdict:LOGERROR('')return Nonelcdict[''] = {}for tcol, mcol, ecol in zip(timecols, magcols, errcols):if '' in tcol:tcolget = tcol.split('')else:tcolget = [tcol]times = _dict_get(lcdict, tcolget)if '' in mcol:mcolget = mcol.split('')else:mcolget = [mcol]mags = _dict_get(lcdict, mcolget)if '' in ecol:ecolget = ecol.split('')else:ecolget = [ecol]errs = _dict_get(lcdict, ecolget)if normfunc is None:ntimes, nmags = normalize_magseries(times, mags,magsarefluxes=magsarefluxes)times, mags, errs = ntimes, nmags, errsbinned = time_bin_magseries_with_errs(times,mags,errs,binsize=binsizesec,minbinelems=minbinelems)lcdict[''][mcol] = {'':binned[''],'':binned[''],'':binned[''],'':binned[''],'':binned[''],'':binsizesec}if outdir is None:outdir = os.path.dirname(lcfile)outfile = os.path.join(outdir, '' %(squeeze(lcdict['']).replace('',''),binsizesec, lcformat))with open(outfile, '') as outfd:pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)return outfile", "docstring": "This bins the given light curve file in time using the specified bin size.\n\n Parameters\n ----------\n\n lcfile : str\n The file name to process.\n\n binsizesec : float\n The time bin-size in seconds.\n\n outdir : str or None\n If this is a str, the output LC will be written to `outdir`. If this is\n None, the output LC will be written to the same directory as `lcfile`.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curve file.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols,magcols,errcols : lists of str\n The keys in the lcdict produced by your light curve reader function that\n correspond to the times, mags/fluxes, and associated measurement errors\n that will be used as inputs to the binning process. If these are None,\n the default values for `timecols`, `magcols`, and `errcols` for your\n light curve format will be used here.\n\n minbinelems : int\n The minimum number of time-bin elements required to accept a time-bin as\n valid for the output binned light curve.\n\n Returns\n -------\n\n str\n The name of the output pickle file with the binned LC.\n\n Writes the output binned light curve to a pickle that contains the\n lcdict with an added `lcdict['binned'][magcol]` key, which contains the\n binned times, mags/fluxes, and errs as\n `lcdict['binned'][magcol]['times']`, `lcdict['binned'][magcol]['mags']`,\n and `lcdict['epd'][magcol]['errs']` for each `magcol` provided in the\n input or default `magcols` value for this light curve format.", "id": "f14705:m1"} {"signature": "def timebinlc_worker(task):", "body": "lcfile, binsizesec, kwargs = tasktry:binnedlc = timebinlc(lcfile, binsizesec, **kwargs)LOGINFO('' %(lcfile, binsizesec, binnedlc))return binnedlcexcept Exception as e:LOGEXCEPTION('' % (lcfile,binsizesec))return None", "docstring": "This is a parallel worker for the function below.\n\nParameters\n----------\n\ntask : tuple\n This is of the form::\n\n task[0] = lcfile\n task[1] = binsizesec\n task[3] = {'outdir','lcformat','lcformatdir',\n 'timecols','magcols','errcols','minbinelems'}\n\nReturns\n-------\n\nstr\n The output pickle file with the binned LC if successful. None otherwise.", "id": "f14705:m2"} {"signature": "def parallel_timebin(lclist,binsizesec,maxobjects=None,outdir=None,lcformat='',lcformatdir=None,timecols=None,magcols=None,errcols=None,minbinelems=,nworkers=NCPUS,maxworkertasks=):", "body": "if outdir and not os.path.exists(outdir):os.mkdir(outdir)if maxobjects is not None:lclist = lclist[:maxobjects]tasks = [(x, binsizesec, {'':outdir,'':lcformat,'':lcformatdir,'':timecols,'':magcols,'':errcols,'':minbinelems}) for x in lclist]pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)results = pool.map(timebinlc_worker, tasks)pool.close()pool.join()resdict = {os.path.basename(x):y for (x,y) in zip(lclist, results)}return resdict", "docstring": "This time-bins all the LCs in the list using the specified bin size.\n\n Parameters\n ----------\n\n lclist : list of str\n The input LCs to process.\n\n binsizesec : float\n The time bin size to use in seconds.\n\n maxobjects : int or None\n If provided, LC processing will stop at `lclist[maxobjects]`.\n\n outdir : str or None\n The directory where output LCs will be written. If None, will write to\n the same directory as the input LCs.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curve file.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols,magcols,errcols : lists of str\n The keys in the lcdict produced by your light curve reader function that\n correspond to the times, mags/fluxes, and associated measurement errors\n that will be used as inputs to the binning process. If these are None,\n the default values for `timecols`, `magcols`, and `errcols` for your\n light curve format will be used here.\n\n minbinelems : int\n The minimum number of time-bin elements required to accept a time-bin as\n valid for the output binned light curve.\n\n nworkers : int\n Number of parallel workers to launch.\n\n maxworkertasks : int\n The maximum number of tasks a parallel worker will complete before being\n replaced to guard against memory leaks.\n\n Returns\n -------\n\n dict\n The returned dict contains keys = input LCs, vals = output LCs.", "id": "f14705:m3"} {"signature": "def parallel_timebin_lcdir(lcdir,binsizesec,maxobjects=None,outdir=None,lcformat='',lcformatdir=None,timecols=None,magcols=None,errcols=None,minbinelems=,nworkers=NCPUS,maxworkertasks=):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(fileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Nonelclist = sorted(glob.glob(os.path.join(lcdir, fileglob)))return parallel_timebin(lclist,binsizesec,maxobjects=maxobjects,outdir=outdir,lcformat=lcformat,timecols=timecols,magcols=magcols,errcols=errcols,minbinelems=minbinelems,nworkers=nworkers,maxworkertasks=maxworkertasks)", "docstring": "This time bins all the light curves in the specified directory.\n\nParameters\n----------\n\nlcdir : list of str\n Directory containing the input LCs to process.\n\nbinsizesec : float\n The time bin size to use in seconds.\n\nmaxobjects : int or None\n If provided, LC processing will stop at `lclist[maxobjects]`.\n\noutdir : str or None\n The directory where output LCs will be written. If None, will write to\n the same directory as the input LCs.\n\nlcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curve file.\n\nlcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\ntimecols,magcols,errcols : lists of str\n The keys in the lcdict produced by your light curve reader function that\n correspond to the times, mags/fluxes, and associated measurement errors\n that will be used as inputs to the binning process. If these are None,\n the default values for `timecols`, `magcols`, and `errcols` for your\n light curve format will be used here.\n\nminbinelems : int\n The minimum number of time-bin elements required to accept a time-bin as\n valid for the output binned light curve.\n\nnworkers : int\n Number of parallel workers to launch.\n\nmaxworkertasks : int\n The maximum number of tasks a parallel worker will complete before being\n replaced to guard against memory leaks.\n\nReturns\n-------\n\ndict\n The returned dict contains keys = input LCs, vals = output LCs.", "id": "f14705:m4"} {"signature": "def get_varfeatures(lcfile,outdir,timecols=None,magcols=None,errcols=None,mindet=,lcformat='',lcformatdir=None):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif timecols is None:timecols = dtimecolsif magcols is None:magcols = dmagcolsif errcols is None:errcols = derrcolstry:lcdict = readerfunc(lcfile)if ( (isinstance(lcdict, (list, tuple))) and(isinstance(lcdict[], dict)) ):lcdict = lcdict[]resultdict = {'':lcdict[''],'':lcdict[''],'':os.path.basename(lcfile)}if normfunc is not None:lcdict = normfunc(lcdict)for tcol, mcol, ecol in zip(timecols, magcols, errcols):if '' in tcol:tcolget = tcol.split('')else:tcolget = [tcol]times = _dict_get(lcdict, tcolget)if '' in mcol:mcolget = mcol.split('')else:mcolget = [mcol]mags = _dict_get(lcdict, mcolget)if '' in ecol:ecolget = ecol.split('')else:ecolget = [ecol]errs = _dict_get(lcdict, ecolget)if normfunc is None:ntimes, nmags = normalize_magseries(times, mags,magsarefluxes=magsarefluxes)times, mags, errs = ntimes, nmags, errsfinind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)if mags[finind].size < mindet:LOGINFO('' %(mags[finind].size, mcol, os.path.basename(lcfile)))resultdict[mcol] = Noneelse:lcfeatures = varfeatures.all_nonperiodic_features(times, mags, errs)resultdict[mcol] = lcfeaturestry:magmads = np.zeros(len(magcols))for mind, mcol in enumerate(magcols):if '' in mcol:mcolget = mcol.split('')else:mcolget = [mcol]magmads[mind] = resultdict[mcol]['']bestmagcolind = np.where(magmads == np.min(magmads))[]resultdict[''] = magcols[bestmagcolind]except Exception as e:resultdict[''] = Noneoutfile = os.path.join(outdir,'' %squeeze(resultdict['']).replace('',''))with open(outfile, '') as outfd:pickle.dump(resultdict, outfd, protocol=)return outfileexcept Exception as e:LOGEXCEPTION('' %(os.path.basename(lcfile), e))return None", "docstring": "This runs :py:func:`astrobase.varclass.varfeatures.all_nonperiodic_features`\n on a single LC file.\n\n Parameters\n ----------\n\n lcfile : str\n The input light curve to process.\n\n outfile : str\n The filename of the output variable features pickle that will be\n generated.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n mindet : int\n The minimum number of LC points required to generate variability\n features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n Returns\n -------\n\n str\n The generated variability features pickle for the input LC, with results\n for each magcol in the input `magcol` or light curve format's default\n `magcol` list.", "id": "f14706:m1"} {"signature": "def _varfeatures_worker(task):", "body": "try:(lcfile, outdir, timecols, magcols, errcols,mindet, lcformat, lcformatdir) = taskreturn get_varfeatures(lcfile, outdir,timecols=timecols,magcols=magcols,errcols=errcols,mindet=mindet,lcformat=lcformat,lcformatdir=lcformatdir)except Exception as e:return None", "docstring": "This wraps varfeatures.", "id": "f14706:m2"} {"signature": "def serial_varfeatures(lclist,outdir,maxobjects=None,timecols=None,magcols=None,errcols=None,mindet=,lcformat='',lcformatdir=None):", "body": "if maxobjects:lclist = lclist[:maxobjects]tasks = [(x, outdir, timecols, magcols, errcols,mindet, lcformat, lcformatdir)for x in lclist]for task in tqdm(tasks):result = _varfeatures_worker(task)return result", "docstring": "This runs variability feature extraction for a list of LCs.\n\n Parameters\n ----------\n\n lclist : list of str\n The list of light curve file names to process.\n\n outdir : str\n The directory where the output varfeatures pickle files will be written.\n\n maxobjects : int\n The number of LCs to process from `lclist`.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n mindet : int\n The minimum number of LC points required to generate variability\n features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n Returns\n -------\n\n list of str\n List of the generated variability features pickles for the input LCs,\n with results for each magcol in the input `magcol` or light curve\n format's default `magcol` list.", "id": "f14706:m3"} {"signature": "def parallel_varfeatures(lclist,outdir,maxobjects=None,timecols=None,magcols=None,errcols=None,mindet=,lcformat='',lcformatdir=None,nworkers=NCPUS):", "body": "if not os.path.exists(outdir):os.makedirs(outdir)if maxobjects:lclist = lclist[:maxobjects]tasks = [(x, outdir, timecols, magcols, errcols, mindet,lcformat, lcformatdir) for x in lclist]with ProcessPoolExecutor(max_workers=nworkers) as executor:resultfutures = executor.map(varfeatures_worker, tasks)results = [x for x in resultfutures]resdict = {os.path.basename(x):y for (x,y) in zip(lclist, results)}return resdict", "docstring": "This runs variable feature extraction in parallel for all LCs in `lclist`.\n\n Parameters\n ----------\n\n lclist : list of str\n The list of light curve file names to process.\n\n outdir : str\n The directory where the output varfeatures pickle files will be written.\n\n maxobjects : int\n The number of LCs to process from `lclist`.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n mindet : int\n The minimum number of LC points required to generate variability\n features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n nworkers : int\n The number of parallel workers to launch.\n\n Returns\n -------\n\n dict\n A dict with key:val pairs of input LC file name : the generated\n variability features pickles for each of the input LCs, with results for\n each magcol in the input `magcol` or light curve format's default\n `magcol` list.", "id": "f14706:m4"} {"signature": "def parallel_varfeatures_lcdir(lcdir,outdir,fileglob=None,maxobjects=None,timecols=None,magcols=None,errcols=None,recursive=True,mindet=,lcformat='',lcformatdir=None,nworkers=NCPUS):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif not fileglob:fileglob = dfileglobLOGINFO('' % (lcformat, lcdir))if recursive is False:matching = glob.glob(os.path.join(lcdir, fileglob))else:if sys.version_info[:] > (,):matching = glob.glob(os.path.join(lcdir,'',fileglob),recursive=True)else:walker = os.walk(lcdir)matching = []for root, dirs, _files in walker:for sdir in dirs:searchpath = os.path.join(root,sdir,fileglob)foundfiles = glob.glob(searchpath)if foundfiles:matching.extend(foundfiles)if matching and len(matching) > :LOGINFO('' %len(matching))return parallel_varfeatures(matching,outdir,maxobjects=maxobjects,timecols=timecols,magcols=magcols,errcols=errcols,mindet=mindet,lcformat=lcformat,lcformatdir=lcformatdir,nworkers=nworkers)else:LOGERROR('' % (lcformat,lcdir))return None", "docstring": "This runs parallel variable feature extraction for a directory of LCs.\n\n Parameters\n ----------\n\n lcdir : str\n The directory of light curve files to process.\n\n outdir : str\n The directory where the output varfeatures pickle files will be written.\n\n fileglob : str or None\n The file glob to use when looking for light curve files in `lcdir`. If\n None, the default file glob associated for this LC format will be used.\n\n maxobjects : int\n The number of LCs to process from `lclist`.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n mindet : int\n The minimum number of LC points required to generate variability\n features.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n nworkers : int\n The number of parallel workers to launch.\n\n Returns\n -------\n\n dict\n A dict with key:val pairs of input LC file name : the generated\n variability features pickles for each of the input LCs, with results for\n each magcol in the input `magcol` or light curve format's default\n `magcol` list.", "id": "f14706:m5"} {"signature": "def _collect_tfa_stats(task):", "body": "try:(lcfile, lcformat, lcformatdir,timecols, magcols, errcols,custom_bandpasses) = tasktry:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif timecols is None:timecols = dtimecolsif magcols is None:magcols = dmagcolsif errcols is None:errcols = derrcolslcdict = readerfunc(lcfile)if ( (isinstance(lcdict, (list, tuple))) and(isinstance(lcdict[], dict)) ):lcdict = lcdict[]if '' in lcdict:objectid = lcdict['']elif '' in lcdict and '' in lcdict['']:objectid = lcdict['']['']elif '' in lcdict and '' in lcdict['']:objectid = lcdict['']['']else:LOGERROR('''' % lcfile)objectid = os.path.splitext(os.path.basename(lcfile))[]if '' in lcdict:colorfeat = starfeatures.color_features(lcdict[''],deredden=False,custom_bandpasses=custom_bandpasses)else:LOGERROR('''''' %lcfile)return Noneresultdict = {'':objectid,'':lcdict[''][''],'':lcdict[''][''],'':colorfeat,'':os.path.abspath(lcfile),'':lcformat,'':lcformatdir,'':timecols,'':magcols,'':errcols}for tcol, mcol, ecol in zip(timecols, magcols, errcols):try:if '' in tcol:tcolget = tcol.split('')else:tcolget = [tcol]times = _dict_get(lcdict, tcolget)if '' in mcol:mcolget = mcol.split('')else:mcolget = [mcol]mags = _dict_get(lcdict, mcolget)if '' in ecol:ecolget = ecol.split('')else:ecolget = [ecol]errs = _dict_get(lcdict, ecolget)if normfunc is None:ntimes, nmags = normalize_magseries(times, mags,magsarefluxes=magsarefluxes)times, mags, errs = ntimes, nmags, errsvarfeat = varfeatures.all_nonperiodic_features(times, mags, errs)resultdict[mcol] = varfeatexcept Exception as e:LOGEXCEPTION('' %(lcfile, mcol))resultdict[mcol] = {'':,'':np.nan,'':np.nan}return resultdictexcept Exception as e:LOGEXCEPTION('' %repr(task))return None", "docstring": "This is a parallel worker to gather LC stats.\n\ntask[0] = lcfile\ntask[1] = lcformat\ntask[2] = lcformatdir\ntask[3] = timecols\ntask[4] = magcols\ntask[5] = errcols\ntask[6] = custom_bandpasses", "id": "f14707:m1"} {"signature": "def _reform_templatelc_for_tfa(task):", "body": "try:(lcfile, lcformat, lcformatdir,tcol, mcol, ecol,timebase, interpolate_type, sigclip) = tasktry:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Nonelcdict = readerfunc(lcfile)if ( (isinstance(lcdict, (list, tuple))) and(isinstance(lcdict[], dict)) ):lcdict = lcdict[]outdict = {}if '' in tcol:tcolget = tcol.split('')else:tcolget = [tcol]times = _dict_get(lcdict, tcolget)if '' in mcol:mcolget = mcol.split('')else:mcolget = [mcol]mags = _dict_get(lcdict, mcolget)if '' in ecol:ecolget = ecol.split('')else:ecolget = [ecol]errs = _dict_get(lcdict, ecolget)if normfunc is None:ntimes, nmags = normalize_magseries(times, mags,magsarefluxes=magsarefluxes)times, mags, errs = ntimes, nmags, errsstimes, smags, serrs = sigclip_magseries(times,mags,errs,sigclip=sigclip)mags_interpolator = spi.interp1d(stimes, smags,kind=interpolate_type,fill_value='')errs_interpolator = spi.interp1d(stimes, serrs,kind=interpolate_type,fill_value='')interpolated_mags = mags_interpolator(timebase)interpolated_errs = errs_interpolator(timebase)magmedian = np.median(interpolated_mags)renormed_mags = interpolated_mags - magmedianoutdict = {'':renormed_mags,'':interpolated_errs,'':interpolated_mags}return outdictexcept Exception as e:LOGEXCEPTION('' % repr(task))return None", "docstring": "This is a parallel worker that reforms light curves for TFA.\n\ntask[0] = lcfile\ntask[1] = lcformat\ntask[2] = lcformatdir\ntask[3] = timecol\ntask[4] = magcol\ntask[5] = errcol\ntask[6] = timebase\ntask[7] = interpolate_type\ntask[8] = sigclip", "id": "f14707:m2"} {"signature": "def tfa_templates_lclist(lclist,lcinfo_pkl=None,outfile=None,target_template_frac=,max_target_frac_obs=,min_template_number=,max_template_number=,max_rms=,max_mult_above_magmad=,max_mult_above_mageta=,xieta_bins=,mag_bandpass='',custom_bandpasses=None,mag_bright_limit=,mag_faint_limit=,process_template_lcs=True,template_sigclip=,template_interpolate='',lcformat='',lcformatdir=None,timecols=None,magcols=None,errcols=None,nworkers=NCPUS,maxworkertasks=,):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif timecols is None:timecols = dtimecolsif magcols is None:magcols = dmagcolsif errcols is None:errcols = derrcolsLOGINFO('' %len(lclist))if lcinfo_pkl and os.path.exists(lcinfo_pkl):with open(lcinfo_pkl,'') as infd:results = pickle.load(infd)elif ((not outfile) andos.path.exists('' % lcformat)):with open('' % lcformat, '') as infd:results = pickle.load(infd)elif (outfile and os.path.exists('' %(lcformat, os.path.basename(outfile)))):with open('' %(lcformat, os.path.basename(outfile)),'') as infd:results = pickle.load(infd)else:tasks = [(x, lcformat, lcformat,timecols, magcols, errcols,custom_bandpasses) for x in lclist]pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)results = pool.map(_collect_tfa_stats, tasks)pool.close()pool.join()if not outfile:with open('' % lcformat,'') as outfd:pickle.dump(results, outfd, pickle.HIGHEST_PROTOCOL)else:with open('' %(lcformat, os.path.basename(outfile)),'') as outfd:pickle.dump(results, outfd, pickle.HIGHEST_PROTOCOL)all_ras = np.array([res[''] for res in results])all_decls = np.array([res[''] for res in results])center_ra = np.nanmedian(all_ras)center_decl = np.nanmedian(all_decls)outdict = {'':[],'':[],'':[],'':center_ra,'':center_decl,}for tcol, mcol, ecol in zip(timecols, magcols, errcols):if '' in tcol:tcolget = tcol.split('')else:tcolget = [tcol](lcmag, lcmad, lceta,lcndet, lcobj, lcfpaths,lcra, lcdecl) = [], [], [], [], [], [], [], []outdict[''].append(tcol)outdict[''].append(mcol)outdict[''].append(ecol)outdict[mcol] = {'':{'':[],'':[],'':[],'':[],'':[],'':[],'':[],'':[]}}LOGINFO('' %mcol)for result in results:try:thismag = result[''][mag_bandpass]thismad = result[mcol]['']thiseta = result[mcol]['']thisndet = result[mcol]['']thisobj = result['']thislcf = result['']thisra = result['']thisdecl = result['']outdict[mcol][''][''].append(thismag)outdict[mcol][''][''].append(thismad)outdict[mcol][''][''].append(thiseta)outdict[mcol][''][''].append(thisndet)outdict[mcol][''][''].append(thisobj)outdict[mcol][''][''].append(thislcf)outdict[mcol][''][''].append(thisra)outdict[mcol][''][''].append(thisdecl)if isinstance(mag_bright_limit, (list, tuple)):use_bright_maglim = mag_bright_limit[magcols.index(mcol)]else:use_bright_maglim = mag_bright_limitif isinstance(mag_faint_limit, (list, tuple)):use_faint_maglim = mag_faint_limit[magcols.index(mcol)]else:use_faint_maglim = mag_faint_limitif ((use_bright_maglim < thismag < use_faint_maglim) and(*thismad < max_rms)):lcmag.append(thismag)lcmad.append(thismad)lceta.append(thiseta)lcndet.append(thisndet)lcobj.append(thisobj)lcfpaths.append(thislcf)lcra.append(thisra)lcdecl.append(thisdecl)except Exception as e:passif len(lcobj) >= min_template_number:LOGINFO('''''' %(mcol, len(lcobj),mag_bright_limit, mag_faint_limit, max_rms))lcmag = np.array(lcmag)lcmad = np.array(lcmad)lceta = np.array(lceta)lcndet = np.array(lcndet)lcobj = np.array(lcobj)lcfpaths = np.array(lcfpaths)lcra = np.array(lcra)lcdecl = np.array(lcdecl)sortind = np.argsort(lcmag)lcmag = lcmag[sortind]lcmad = lcmad[sortind]lceta = lceta[sortind]lcndet = lcndet[sortind]lcobj = lcobj[sortind]lcfpaths = lcfpaths[sortind]lcra = lcra[sortind]lcdecl = lcdecl[sortind]splfit_ind = np.diff(lcmag) > splfit_ind = np.concatenate((np.array([True]), splfit_ind))fit_lcmag = lcmag[splfit_ind]fit_lcmad = lcmad[splfit_ind]fit_lceta = lceta[splfit_ind]magmadfit = np.poly1d(np.polyfit(fit_lcmag,fit_lcmad,))magmadind = lcmad/magmadfit(lcmag) < max_mult_above_magmadmagetafit = np.poly1d(np.polyfit(fit_lcmag,fit_lceta,))magetaind = magetafit(lcmag)/lceta < max_mult_above_magetamedian_ndet = np.median(lcndet)ndetind = lcndet >= median_ndettemplateind = magmadind & magetaind & ndetindif templateind.sum() >= min_template_number:LOGINFO('' %(mcol, templateind.sum()))templatemag = lcmag[templateind]templatemad = lcmad[templateind]templateeta = lceta[templateind]templatendet = lcndet[templateind]templateobj = lcobj[templateind]templatelcf = lcfpaths[templateind]templatera = lcra[templateind]templatedecl = lcdecl[templateind]target_number_templates = int(target_template_frac*len(results))if target_number_templates > max_template_number:target_number_templates = max_template_numberLOGINFO('' %(mcol, target_number_templates))template_cxi, template_ceta = coordutils.xieta_from_radecl(templatera,templatedecl,center_ra,center_decl)cxi_bins = np.linspace(template_cxi.min(),template_cxi.max(),num=xieta_bins)ceta_bins = np.linspace(template_ceta.min(),template_ceta.max(),num=xieta_bins)digitized_cxi_inds = np.digitize(template_cxi, cxi_bins)digitized_ceta_inds = np.digitize(template_ceta, ceta_bins)targetind = npr.choice(xieta_bins,target_number_templates,replace=True)selected_template_obj = []selected_template_lcf = []selected_template_ndet = []selected_template_ra = []selected_template_decl = []selected_template_mag = []selected_template_mad = []selected_template_eta = []for ind in targetind:passtargetind = npr.choice(templateobj.size,target_number_templates,replace=False)templatemag = templatemag[targetind]templatemad = templatemad[targetind]templateeta = templateeta[targetind]templatendet = templatendet[targetind]templateobj = templateobj[targetind]templatelcf = templatelcf[targetind]templatera = templatera[targetind]templatedecl = templatedecl[targetind]maxndetind = templatendet == templatendet.max()timebaselcf = templatelcf[maxndetind][]timebasendet = templatendet[maxndetind][]LOGINFO('''' %(mcol, timebaselcf, timebasendet))if process_template_lcs:timebaselcdict = readerfunc(timebaselcf)if ( (isinstance(timebaselcdict, (list, tuple))) and(isinstance(timebaselcdict[], dict)) ):timebaselcdict = timebaselcdict[]timebase = _dict_get(timebaselcdict, tcolget)else:timebase = Noneif target_number_templates > timebasendet:LOGWARNING('''''''''''''' %(target_number_templates,timebasendet,max_target_frac_obs))newmaxtemplates = int(max_target_frac_obs*timebasendet)LOGWARNING('''' %(mcol, newmaxtemplates))targetind = npr.choice(templateobj.size,newmaxtemplates,replace=False)templatemag = templatemag[targetind]templatemad = templatemad[targetind]templateeta = templateeta[targetind]templatendet = templatendet[targetind]templateobj = templateobj[targetind]templatelcf = templatelcf[targetind]templatera = templatera[targetind]templatedecl = templatedecl[targetind]maxndetind = templatendet == templatendet.max()timebaselcf = templatelcf[maxndetind][]timebasendet = templatendet[maxndetind][]LOGWARNING('''' %(mcol, timebaselcf, timebasendet))if process_template_lcs:timebaselcdict = readerfunc(timebaselcf)if ( (isinstance(timebaselcdict, (list, tuple))) and(isinstance(timebaselcdict[], dict)) ):timebaselcdict = timebaselcdict[]timebase = _dict_get(timebaselcdict, tcolget)else:timebase = Noneif process_template_lcs:LOGINFO('''' % mcol)tasks = [(x, lcformat, lcformatdir,tcol, mcol, ecol,timebase, template_interpolate,template_sigclip) for xin templatelcf]pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)reform_results = pool.map(_reform_templatelc_for_tfa, tasks)pool.close()pool.join()template_magseries = np.array([x['']for x in reform_results])template_errseries = np.array([x['']for x in reform_results])else:template_magseries = Nonetemplate_errseries = Noneoutdict[mcol].update({'':timebaselcf,'':timebase,'':{'':magmadfit,'':magetafit},'':templateobj,'':templatera,'':templatedecl,'':templatemag,'':templatemad,'':templateeta,'':templatendet,'':template_magseries,'':template_errseries})outdict[mcol][''] = (coordutils.make_kdtree(templatera, templatedecl))else:LOGERROR('''''' % mcol)continueelse:LOGERROR('''' % (len(lcobj),mcol))continueplt.plot(lcmag, lcmad, marker='', linestyle='', ms=)modelmags = np.linspace(lcmag.min(), lcmag.max(), num=)plt.plot(modelmags, outdict[mcol][''][''](modelmags))plt.yscale('')plt.xlabel('')plt.ylabel('')plt.title('')plt.savefig('' % mcol,bbox_inches='')plt.close('')plt.plot(lcmag, lceta, marker='', linestyle='', ms=)modelmags = np.linspace(lcmag.min(), lcmag.max(), num=)plt.plot(modelmags, outdict[mcol][''][''](modelmags))plt.yscale('')plt.xlabel('')plt.ylabel('')plt.title('')plt.savefig('' % mcol,bbox_inches='')plt.close('')if outfile:if outfile.endswith(''):outfd = gzip.open(outfile,'')else:outfd = open(outfile,'')with outfd:pickle.dump(outdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)return outdict", "docstring": "This selects template objects for TFA.\n\n Selection criteria for TFA template ensemble objects:\n\n - not variable: use a poly fit to the mag-MAD relation and eta-normal\n variability index to get nonvar objects\n\n - not more than 10% of the total number of objects in the field or\n `max_tfa_templates` at most\n\n - allow shuffling of the templates if the target ends up in them\n\n - nothing with less than the median number of observations in the field\n\n - sigma-clip the input time series observations\n\n - TODO: uniform sampling in tangent plane coordinates (we'll need ra and\n decl)\n\n This also determines the effective cadence that all TFA LCs will be binned\n to as the template LC with the largest number of non-nan observations will\n be used. All template LCs will be renormed to zero.\n\n Parameters\n ----------\n\n lclist : list of str\n This is a list of light curves to use as input to generate the template\n set.\n\n lcinfo_pkl : str or None\n If provided, is a file path to a pickle file created by this function on\n a previous run containing the LC information. This will be loaded\n directly instead of having to re-run LC info collection.\n\n outfile : str or None\n This is the pickle filename to which the TFA template list will be\n written to. If None, a default file name will be used for this.\n\n target_template_frac : float\n This is the fraction of total objects in lclist to use for the number of\n templates.\n\n max_target_frac_obs : float\n This sets the number of templates to generate if the number of\n observations for the light curves is smaller than the number of objects\n in the collection. The number of templates will be set to this fraction\n of the number of observations if this is the case.\n\n min_template_number : int\n This is the minimum number of templates to generate.\n\n max_template_number : int\n This is the maximum number of templates to generate. If\n `target_template_frac` times the number of objects is greater than\n `max_template_number`, only `max_template_number` templates will be\n used.\n\n max_rms : float\n This is the maximum light curve RMS for an object to consider it as a\n possible template ensemble member.\n\n max_mult_above_magmad : float\n This is the maximum multiplier above the mag-RMS fit to consider an\n object as variable and thus not part of the template ensemble.\n\n max_mult_above_mageta : float\n This is the maximum multiplier above the mag-eta (variable index) fit to\n consider an object as variable and thus not part of the template\n ensemble.\n\n mag_bandpass : str\n This sets the key in the light curve dict's objectinfo dict to use as\n the canonical magnitude for the object and apply any magnitude limits\n to.\n\n custom_bandpasses : dict or None\n This can be used to provide any custom band name keys to the star\n feature collection function.\n\n mag_bright_limit : float or list of floats\n This sets the brightest mag (in the `mag_bandpass` filter) for a\n potential member of the TFA template ensemble. If this is a single\n float, the value will be used for all magcols. If this is a list of\n floats with len = len(magcols), the specific bright limits will be used\n for each magcol individually.\n\n mag_faint_limit : float or list of floats\n This sets the faintest mag (in the `mag_bandpass` filter) for a\n potential member of the TFA template ensemble. If this is a single\n float, the value will be used for all magcols. If this is a list of\n floats with len = len(magcols), the specific faint limits will be used\n for each magcol individually.\n\n process_template_lcs : bool\n If True, will reform the template light curves to the chosen\n time-base. If False, will only select light curves for templates but not\n process them. This is useful for initial exploration of how the template\n LC are selected.\n\n template_sigclip : float or sequence of floats or None\n This sets the sigma-clip to be applied to the template light curves.\n\n template_interpolate : str\n This sets the kwarg to pass to `scipy.interpolate.interp1d` to set the\n kind of interpolation to use when reforming light curves to the TFA\n template timebase.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the features.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the features.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the features.\n\n nworkers : int\n The number of parallel workers to launch.\n\n maxworkertasks : int\n The maximum number of tasks to run per worker before it is replaced by a\n fresh one.\n\n Returns\n -------\n\n dict\n This function returns a dict that can be passed directly to\n `apply_tfa_magseries` below. It can optionally produce a pickle with the\n same dict, which can also be passed to that function.", "id": "f14707:m3"} {"signature": "def apply_tfa_magseries(lcfile,timecol,magcol,errcol,templateinfo,mintemplatedist_arcmin=,lcformat='',lcformatdir=None,interp='',sigclip=):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif isinstance(templateinfo,str) and os.path.exists(templateinfo):with open(templateinfo,'') as infd:templateinfo = pickle.load(infd)lcdict = readerfunc(lcfile)if ((isinstance(lcdict, (tuple, list))) andisinstance(lcdict[], dict)):lcdict = lcdict[]objectid = lcdict['']tmagseries = templateinfo[magcol][''][::]if objectid in templateinfo[magcol]['']:LOGWARNING('' %objectid)templateind = templateinfo[magcol][''] == objectidtmagseries = tmagseries[~templateind,:]object_matches = coordutils.conesearch_kdtree(templateinfo[magcol][''],lcdict[''][''], lcdict[''][''],mintemplatedist_arcmin/)if len(object_matches) > :LOGWARNING(\"\"\"\"\"\" %(objectid, mintemplatedist_arcmin, len(object_matches)))removalind = np.full(templateinfo[magcol][''].size,False, dtype=np.bool)removalind[np.array(object_matches)] = Truetmagseries = tmagseries[~removalind,:]normal_matrix = np.dot(tmagseries, tmagseries.T)normal_matrix_inverse = spla.pinv2(normal_matrix)timebase = templateinfo[magcol]['']reformed_targetlc = _reform_templatelc_for_tfa((lcfile,lcformat,lcformatdir,timecol,magcol,errcol,timebase,interp,sigclip))scalar_products = np.dot(tmagseries, reformed_targetlc[''])corrections = np.dot(normal_matrix_inverse, scalar_products)corrected_magseries = (reformed_targetlc[''] -np.dot(tmagseries.T, corrections))outdict = {'':timebase,'':corrected_magseries,'':reformed_targetlc[''],'':np.median(corrected_magseries),'': np.median(np.abs(corrected_magseries -np.median(corrected_magseries))),'':{'':tmagseries,'':normal_matrix,'':normal_matrix_inverse,'':scalar_products,'':corrections,'':reformed_targetlc},}lcdict[''] = outdictoutfile = os.path.join(os.path.dirname(lcfile),'' % (squeeze(objectid).replace('',''),magcol))with open(outfile,'') as outfd:pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL)return outfile", "docstring": "This applies the TFA correction to an LC given TFA template information.\n\n Parameters\n ----------\n\n lcfile : str\n This is the light curve file to apply the TFA correction to.\n\n timecol,magcol,errcol : str\n These are the column keys in the lcdict for the LC file to apply the TFA\n correction to.\n\n templateinfo : dict or str\n This is either the dict produced by `tfa_templates_lclist` or the pickle\n produced by the same function.\n\n mintemplatedist_arcmin : float\n This sets the minimum distance required from the target object for\n objects in the TFA template ensemble. Objects closer than this distance\n will be removed from the ensemble.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n interp : str\n This is passed to scipy.interpolate.interp1d as the kind of\n interpolation to use when reforming this light curve to the timebase of\n the TFA templates.\n\n sigclip : float or sequence of two floats or None\n This is the sigma clip to apply to this light curve before running TFA\n on it.\n\n Returns\n -------\n\n str\n This returns the filename of the light curve file generated after TFA\n applications. This is a pickle (that can be read by `lcproc.read_pklc`)\n in the same directory as `lcfile`. The `magcol` will be encoded in the\n filename, so each `magcol` in `lcfile` gets its own output file.", "id": "f14707:m4"} {"signature": "def _parallel_tfa_worker(task):", "body": "(lcfile, timecol, magcol, errcol,templateinfo, lcformat, lcformatdir,interp, sigclip, mintemplatedist_arcmin) = tasktry:res = apply_tfa_magseries(lcfile, timecol, magcol, errcol,templateinfo,lcformat=lcformat,lcformatdir=lcformatdir,interp=interp,sigclip=sigclip,mintemplatedist_arcmin=mintemplatedist_arcmin)if res:LOGINFO('' % (lcfile, res))return resexcept Exception as e:LOGEXCEPTION('' % lcfile)return None", "docstring": "This is a parallel worker for the function below.\n\ntask[0] = lcfile\ntask[1] = timecol\ntask[2] = magcol\ntask[3] = errcol\ntask[4] = templateinfo\ntask[5] = lcformat\ntask[6] = lcformatdir\ntask[6] = interp\ntask[7] = sigclip", "id": "f14707:m5"} {"signature": "def parallel_tfa_lclist(lclist,templateinfo,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,interp='',sigclip=,mintemplatedist_arcmin=,nworkers=NCPUS,maxworkertasks=):", "body": "if isinstance(templateinfo,str) and os.path.exists(templateinfo):with open(templateinfo,'') as infd:templateinfo = pickle.load(infd)try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif timecols is None:timecols = templateinfo['']if magcols is None:magcols = templateinfo['']if errcols is None:errcols = templateinfo['']outdict = {}for t, m, e in zip(timecols, magcols, errcols):tasks = [(x, t, m, e, templateinfo,lcformat, lcformatdir,interp, sigclip) forx in lclist]pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)results = pool.map(_parallel_tfa_worker, tasks)pool.close()pool.join()outdict[m] = resultsreturn outdict", "docstring": "This applies TFA in parallel to all LCs in the given list of file names.\n\n Parameters\n ----------\n\n lclist : str\n This is a list of light curve files to apply TFA correction to.\n\n templateinfo : dict or str\n This is either the dict produced by `tfa_templates_lclist` or the pickle\n produced by the same function.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in applying TFA corrections.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in applying TFA corrections.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in applying TFA corrections.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n interp : str\n This is passed to scipy.interpolate.interp1d as the kind of\n interpolation to use when reforming the light curves to the timebase of\n the TFA templates.\n\n sigclip : float or sequence of two floats or None\n This is the sigma clip to apply to the light curves before running TFA\n on it.\n\n mintemplatedist_arcmin : float\n This sets the minimum distance required from the target object for\n objects in the TFA template ensemble. Objects closer than this distance\n will be removed from the ensemble.\n\n nworkers : int\n The number of parallel workers to launch\n\n maxworkertasks : int\n The maximum number of tasks per worker allowed before it's replaced by a\n fresh one.\n\n Returns\n -------\n\n dict\n Contains the input file names and output TFA light curve filenames per\n input file organized by each `magcol` in `magcols`.", "id": "f14707:m6"} {"signature": "def parallel_tfa_lcdir(lcdir,templateinfo,lcfileglob=None,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,interp='',sigclip=,mintemplatedist_arcmin=,nworkers=NCPUS,maxworkertasks=):", "body": "if isinstance(templateinfo,str) and os.path.exists(templateinfo):with open(templateinfo,'') as infd:templateinfo = pickle.load(infd)try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif lcfileglob is None:lcfileglob = dfilegloblclist = sorted(glob.glob(os.path.join(lcdir, lcfileglob)))return parallel_tfa_lclist(lclist,templateinfo,timecols=timecols,magcols=magcols,errcols=errcols,lcformat=lcformat,lcformatdir=None,interp=interp,sigclip=sigclip,mintemplatedist_arcmin=mintemplatedist_arcmin,nworkers=nworkers,maxworkertasks=maxworkertasks)", "docstring": "This applies TFA in parallel to all LCs in a directory.\n\n Parameters\n ----------\n\n lcdir : str\n This is the directory containing the light curve files to process..\n\n templateinfo : dict or str\n This is either the dict produced by `tfa_templates_lclist` or the pickle\n produced by the same function.\n\n lcfileglob : str or None\n The UNIX file glob to use when searching for light curve files in\n `lcdir`. If None, the default file glob associated with registered LC\n format provided is used.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in applying TFA corrections.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in applying TFA corrections.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in applying TFA corrections.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n interp : str\n This is passed to scipy.interpolate.interp1d as the kind of\n interpolation to use when reforming the light curves to the timebase of\n the TFA templates.\n\n sigclip : float or sequence of two floats or None\n This is the sigma clip to apply to the light curves before running TFA\n on it.\n\n mintemplatedist_arcmin : float\n This sets the minimum distance required from the target object for\n objects in the TFA template ensemble. Objects closer than this distance\n will be removed from the ensemble.\n\n nworkers : int\n The number of parallel workers to launch\n\n maxworkertasks : int\n The maximum number of tasks per worker allowed before it's replaced by a\n fresh one.\n\n Returns\n -------\n\n dict\n Contains the input file names and output TFA light curve filenames per\n input file organized by each `magcol` in `magcols`.", "id": "f14707:m7"} {"signature": "def variability_threshold(featuresdir,outfile,magbins=DEFAULT_MAGBINS,maxobjects=None,timecols=None,magcols=None,errcols=None,lcformat='',lcformatdir=None,min_lcmad_stdev=,min_stetj_stdev=,min_iqr_stdev=,min_inveta_stdev=,verbose=True):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif timecols is None:timecols = dtimecolsif magcols is None:magcols = dmagcolsif errcols is None:errcols = derrcolspklist = glob.glob(os.path.join(featuresdir, ''))if maxobjects:pklist = pklist[:maxobjects]allobjects = {}for magcol in magcols:if (isinstance(min_stetj_stdev, list) orisinstance(min_stetj_stdev, np.ndarray)):magcol_min_stetj_stdev = min_stetj_stdev[::]else:magcol_min_stetj_stdev = min_stetj_stdevif (isinstance(min_iqr_stdev, list) orisinstance(min_iqr_stdev, np.ndarray)):magcol_min_iqr_stdev = min_iqr_stdev[::]else:magcol_min_iqr_stdev = min_iqr_stdevif (isinstance(min_inveta_stdev, list) orisinstance(min_inveta_stdev, np.ndarray)):magcol_min_inveta_stdev = min_inveta_stdev[::]else:magcol_min_inveta_stdev = min_inveta_stdevLOGINFO('')allobjects[magcol] = {'':[],'':[],'':[],'':[],'':[],'':[]}if TQDM and verbose:listiterator = tqdm(pklist)else:listiterator = pklistfor pkl in listiterator:with open(pkl,'') as infd:thisfeatures = pickle.load(infd)objectid = thisfeatures['']if ('' in thisfeatures andthisfeatures[''] and'' in thisfeatures['']):if (thisfeatures[''][''] andthisfeatures[''][''] > ):sdssr = thisfeatures['']['']elif (magcol in thisfeatures andthisfeatures[magcol] and'' in thisfeatures[magcol] andthisfeatures[magcol][''] > ):sdssr = thisfeatures[magcol]['']elif (thisfeatures[''][''] andthisfeatures[''][''] andthisfeatures['']['']):sdssr = jhk_to_sdssr(thisfeatures[''][''],thisfeatures[''][''],thisfeatures[''][''])else:sdssr = np.nanelse:sdssr = np.nanif (magcol in thisfeatures andthisfeatures[magcol] andthisfeatures[magcol]['']):lcmad = thisfeatures[magcol]['']else:lcmad = np.nanif (magcol in thisfeatures andthisfeatures[magcol] andthisfeatures[magcol]['']):stetsonj = thisfeatures[magcol]['']else:stetsonj = np.nanif (magcol in thisfeatures andthisfeatures[magcol] andthisfeatures[magcol]['']):iqr = thisfeatures[magcol]['']else:iqr = np.nanif (magcol in thisfeatures andthisfeatures[magcol] andthisfeatures[magcol]['']):eta = thisfeatures[magcol]['']else:eta = np.nanallobjects[magcol][''].append(objectid)allobjects[magcol][''].append(sdssr)allobjects[magcol][''].append(lcmad)allobjects[magcol][''].append(stetsonj)allobjects[magcol][''].append(iqr)allobjects[magcol][''].append(eta)LOGINFO('')allobjects[magcol][''] = np.ravel(np.array(allobjects[magcol]['']))allobjects[magcol][''] = np.ravel(np.array(allobjects[magcol]['']))allobjects[magcol][''] = np.ravel(np.array(allobjects[magcol]['']))allobjects[magcol][''] = np.ravel(np.array(allobjects[magcol]['']))allobjects[magcol][''] = np.ravel(np.array(allobjects[magcol]['']))allobjects[magcol][''] = np.ravel(np.array(allobjects[magcol]['']))thisfinind = (np.isfinite(allobjects[magcol]['']) &np.isfinite(allobjects[magcol]['']) &np.isfinite(allobjects[magcol]['']) &np.isfinite(allobjects[magcol]['']) &np.isfinite(allobjects[magcol]['']))allobjects[magcol][''] = allobjects[magcol][''][thisfinind]allobjects[magcol][''] = allobjects[magcol][''][thisfinind]allobjects[magcol][''] = allobjects[magcol][''][thisfinind]allobjects[magcol][''] = allobjects[magcol][''][thisfinind]allobjects[magcol][''] = allobjects[magcol][''][thisfinind]allobjects[magcol][''] = allobjects[magcol][''][thisfinind]allobjects[magcol][''] = /allobjects[magcol]['']magbininds = np.digitize(allobjects[magcol][''],magbins)binned_objectids = []binned_sdssr = []binned_sdssr_median = []binned_lcmad = []binned_stetsonj = []binned_iqr = []binned_inveta = []binned_count = []binned_objectids_thresh_stetsonj = []binned_objectids_thresh_iqr = []binned_objectids_thresh_inveta = []binned_objectids_thresh_all = []binned_lcmad_median = []binned_lcmad_stdev = []binned_stetsonj_median = []binned_stetsonj_stdev = []binned_inveta_median = []binned_inveta_stdev = []binned_iqr_median = []binned_iqr_stdev = []for mbinind, magi in zip(np.unique(magbininds),range(len(magbins)-)):thisbinind = np.where(magbininds == mbinind)thisbin_sdssr_median = (magbins[magi] + magbins[magi+])/binned_sdssr_median.append(thisbin_sdssr_median)thisbin_objectids = allobjects[magcol][''][thisbinind]thisbin_sdssr = allobjects[magcol][''][thisbinind]thisbin_lcmad = allobjects[magcol][''][thisbinind]thisbin_stetsonj = allobjects[magcol][''][thisbinind]thisbin_iqr = allobjects[magcol][''][thisbinind]thisbin_inveta = allobjects[magcol][''][thisbinind]thisbin_count = thisbin_objectids.sizeif thisbin_count > :thisbin_lcmad_median = np.median(thisbin_lcmad)thisbin_lcmad_stdev = np.median(np.abs(thisbin_lcmad - thisbin_lcmad_median)) * binned_lcmad_median.append(thisbin_lcmad_median)binned_lcmad_stdev.append(thisbin_lcmad_stdev)thisbin_stetsonj_median = np.median(thisbin_stetsonj)thisbin_stetsonj_stdev = np.median(np.abs(thisbin_stetsonj - thisbin_stetsonj_median)) * binned_stetsonj_median.append(thisbin_stetsonj_median)binned_stetsonj_stdev.append(thisbin_stetsonj_stdev)if isinstance(magcol_min_stetj_stdev, float):thisbin_objectids_thresh_stetsonj = thisbin_objectids[thisbin_stetsonj > (thisbin_stetsonj_median +magcol_min_stetj_stdev*thisbin_stetsonj_stdev)]elif (isinstance(magcol_min_stetj_stdev, np.ndarray) orisinstance(magcol_min_stetj_stdev, list)):thisbin_min_stetj_stdev = magcol_min_stetj_stdev[magi]if not np.isfinite(thisbin_min_stetj_stdev):LOGWARNING('''' %thisbin_sdssr_median)thisbin_min_stetj_stdev = magcol_min_stetj_stdev[magi] = thisbin_objectids_thresh_stetsonj = thisbin_objectids[thisbin_stetsonj > (thisbin_stetsonj_median +thisbin_min_stetj_stdev*thisbin_stetsonj_stdev)]thisbin_iqr_median = np.median(thisbin_iqr)thisbin_iqr_stdev = np.median(np.abs(thisbin_iqr - thisbin_iqr_median)) * binned_iqr_median.append(thisbin_iqr_median)binned_iqr_stdev.append(thisbin_iqr_stdev)if isinstance(magcol_min_iqr_stdev, float):thisbin_objectids_thresh_iqr = thisbin_objectids[thisbin_iqr > (thisbin_iqr_median +magcol_min_iqr_stdev*thisbin_iqr_stdev)]elif (isinstance(magcol_min_iqr_stdev, np.ndarray) orisinstance(magcol_min_iqr_stdev, list)):thisbin_min_iqr_stdev = magcol_min_iqr_stdev[magi]if not np.isfinite(thisbin_min_iqr_stdev):LOGWARNING('''' %thisbin_sdssr_median)thisbin_min_iqr_stdev = magcol_min_iqr_stdev[magi] = thisbin_objectids_thresh_iqr = thisbin_objectids[thisbin_iqr > (thisbin_iqr_median +thisbin_min_iqr_stdev*thisbin_iqr_stdev)]thisbin_inveta_median = np.median(thisbin_inveta)thisbin_inveta_stdev = np.median(np.abs(thisbin_inveta - thisbin_inveta_median)) * binned_inveta_median.append(thisbin_inveta_median)binned_inveta_stdev.append(thisbin_inveta_stdev)if isinstance(magcol_min_inveta_stdev, float):thisbin_objectids_thresh_inveta = thisbin_objectids[thisbin_inveta > (thisbin_inveta_median +magcol_min_inveta_stdev*thisbin_inveta_stdev)]elif (isinstance(magcol_min_inveta_stdev, np.ndarray) orisinstance(magcol_min_inveta_stdev, list)):thisbin_min_inveta_stdev = magcol_min_inveta_stdev[magi]if not np.isfinite(thisbin_min_inveta_stdev):LOGWARNING('''' %thisbin_sdssr_median)thisbin_min_inveta_stdev = magcol_min_inveta_stdev[magi] = thisbin_objectids_thresh_inveta = thisbin_objectids[thisbin_inveta > (thisbin_inveta_median +thisbin_min_inveta_stdev*thisbin_inveta_stdev)]else:thisbin_objectids_thresh_stetsonj = (np.array([],dtype=np.unicode_))thisbin_objectids_thresh_iqr = (np.array([],dtype=np.unicode_))thisbin_objectids_thresh_inveta = (np.array([],dtype=np.unicode_))thisbin_objectids_thresh_all = reduce(np.intersect1d,(thisbin_objectids_thresh_stetsonj,thisbin_objectids_thresh_iqr,thisbin_objectids_thresh_inveta))binned_objectids.append(thisbin_objectids)binned_sdssr.append(thisbin_sdssr)binned_lcmad.append(thisbin_lcmad)binned_stetsonj.append(thisbin_stetsonj)binned_iqr.append(thisbin_iqr)binned_inveta.append(thisbin_inveta)binned_count.append(thisbin_objectids.size)binned_objectids_thresh_stetsonj.append(thisbin_objectids_thresh_stetsonj)binned_objectids_thresh_iqr.append(thisbin_objectids_thresh_iqr)binned_objectids_thresh_inveta.append(thisbin_objectids_thresh_inveta)binned_objectids_thresh_all.append(thisbin_objectids_thresh_all)allobjects[magcol][''] = magbinsallobjects[magcol][''] = binned_objectidsallobjects[magcol][''] = binned_sdssr_medianallobjects[magcol][''] = binned_sdssrallobjects[magcol][''] = binned_countallobjects[magcol][''] = binned_lcmadallobjects[magcol][''] = binned_lcmad_medianallobjects[magcol][''] = binned_lcmad_stdevallobjects[magcol][''] = binned_stetsonjallobjects[magcol][''] = binned_stetsonj_medianallobjects[magcol][''] = binned_stetsonj_stdevallobjects[magcol][''] = binned_iqrallobjects[magcol][''] = binned_iqr_medianallobjects[magcol][''] = binned_iqr_stdevallobjects[magcol][''] = binned_invetaallobjects[magcol][''] = binned_inveta_medianallobjects[magcol][''] = binned_inveta_stdevallobjects[magcol][''] = (binned_objectids_thresh_stetsonj)allobjects[magcol][''] = (binned_objectids_thresh_iqr)allobjects[magcol][''] = (binned_objectids_thresh_inveta)allobjects[magcol][''] = (binned_objectids_thresh_all)try:allobjects[magcol][''] = np.unique(np.concatenate(allobjects[magcol]['']))except ValueError:LOGWARNING('')allobjects[magcol][''] = (np.array([]))allobjects[magcol][''] = np.unique(np.concatenate(allobjects[magcol]['']))allobjects[magcol][''] = np.unique(np.concatenate(allobjects[magcol]['']))allobjects[magcol][''] = np.unique(np.concatenate(allobjects[magcol]['']))if isinstance(min_stetj_stdev, list):allobjects[magcol][''] = np.array(magcol_min_stetj_stdev)else:allobjects[magcol][''] = magcol_min_stetj_stdevif isinstance(min_iqr_stdev, list):allobjects[magcol][''] = np.array(magcol_min_iqr_stdev)else:allobjects[magcol][''] = magcol_min_iqr_stdevif isinstance(min_inveta_stdev, list):allobjects[magcol][''] = np.array(magcol_min_inveta_stdev)else:allobjects[magcol][''] = magcol_min_inveta_stdevallobjects[magcol][''] = min_lcmad_stdevallobjects[''] = magbinswith open(outfile,'') as outfd:pickle.dump(allobjects, outfd, protocol=pickle.HIGHEST_PROTOCOL)return allobjects", "docstring": "This generates a list of objects with stetson J, IQR, and 1.0/eta\n above some threshold value to select them as potential variable stars.\n\n Use this to pare down the objects to review and put through\n period-finding. This does the thresholding per magnitude bin; this should be\n better than one single cut through the entire magnitude range. Set the\n magnitude bins using the magbins kwarg.\n\n FIXME: implement a voting classifier here. this will choose variables based\n on the thresholds in IQR, stetson, and inveta based on weighting carried\n over from the variability recovery sims.\n\n Parameters\n ----------\n\n featuresdir : str\n This is the directory containing variability feature pickles created by\n :py:func:`astrobase.lcproc.lcpfeatures.parallel_varfeatures` or similar.\n\n outfile : str\n This is the output pickle file that will contain all the threshold\n information.\n\n magbins : np.array of floats\n This sets the magnitude bins to use for calculating thresholds.\n\n maxobjects : int or None\n This is the number of objects to process. If None, all objects with\n feature pickles in `featuresdir` will be processed.\n\n timecols : list of str or None\n The timecol keys to use from the lcdict in calculating the thresholds.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict in calculating the thresholds.\n\n errcols : list of str or None\n The errcol keys to use from the lcdict in calculating the thresholds.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n min_lcmad_stdev,min_stetj_stdev,min_iqr_stdev,min_inveta_stdev : float or np.array\n These are all the standard deviation multiplier for the distributions of\n light curve standard deviation, Stetson J variability index, the light\n curve interquartile range, and 1/eta variability index\n respectively. These multipliers set the minimum values of these measures\n to use for selecting variable stars. If provided as floats, the same\n value will be used for all magbins. If provided as np.arrays of `size =\n magbins.size - 1`, will be used to apply possibly different sigma cuts\n for each magbin.\n\n verbose : bool\n If True, will report progress and warn about any problems.\n\n Returns\n -------\n\n dict\n Contains all of the variability threshold information along with indices\n into the array of the object IDs chosen as variables.", "id": "f14708:m1"} {"signature": "def plot_variability_thresholds(varthreshpkl,xmin_lcmad_stdev=,xmin_stetj_stdev=,xmin_iqr_stdev=,xmin_inveta_stdev=,lcformat='',lcformatdir=None,magcols=None):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif magcols is None:magcols = dmagcolswith open(varthreshpkl,'') as infd:allobjects = pickle.load(infd)magbins = allobjects['']for magcol in magcols:min_lcmad_stdev = (xmin_lcmad_stdev or allobjects[magcol][''])min_stetj_stdev = (xmin_stetj_stdev or allobjects[magcol][''])min_iqr_stdev = (xmin_iqr_stdev or allobjects[magcol][''])min_inveta_stdev = (xmin_inveta_stdev or allobjects[magcol][''])fig = plt.figure(figsize=(,))plt.subplot()plt.plot(allobjects[magcol][''],allobjects[magcol]['']*,marker='',ms=, linestyle='',rasterized=True)plt.plot(allobjects[magcol][''],np.array(allobjects[magcol][''])*,linewidth=)plt.plot(allobjects[magcol][''],np.array(allobjects[magcol][''])* +min_lcmad_stdev*np.array(allobjects[magcol]['']),linewidth=, linestyle='')plt.xlim((magbins.min()-, magbins.max()))plt.xlabel('')plt.ylabel(r'')plt.title('' % magcol)plt.yscale('')plt.tight_layout()plt.subplot()plt.plot(allobjects[magcol][''],allobjects[magcol][''],marker='',ms=, linestyle='',rasterized=True)plt.plot(allobjects[magcol][''],allobjects[magcol][''],linewidth=)plt.plot(allobjects[magcol][''],np.array(allobjects[magcol]['']) +min_stetj_stdev*np.array(allobjects[magcol]['']),linewidth=, linestyle='')plt.xlim((magbins.min()-, magbins.max()))plt.xlabel('')plt.ylabel('')plt.title('' % magcol)plt.yscale('')plt.tight_layout()plt.subplot()plt.plot(allobjects[magcol][''],allobjects[magcol][''],marker='',ms=, linestyle='',rasterized=True)plt.plot(allobjects[magcol][''],allobjects[magcol][''],linewidth=)plt.plot(allobjects[magcol][''],np.array(allobjects[magcol]['']) +min_iqr_stdev*np.array(allobjects[magcol]['']),linewidth=, linestyle='')plt.xlabel('')plt.ylabel('')plt.title('' % magcol)plt.xlim((magbins.min()-, magbins.max()))plt.yscale('')plt.tight_layout()plt.subplot()plt.plot(allobjects[magcol][''],allobjects[magcol][''],marker='',ms=, linestyle='',rasterized=True)plt.plot(allobjects[magcol][''],allobjects[magcol][''],linewidth=)plt.plot(allobjects[magcol][''],np.array(allobjects[magcol]['']) +min_inveta_stdev*np.array(allobjects[magcol]['']),linewidth=, linestyle='')plt.xlabel('')plt.ylabel(r'')plt.title(r'' % magcol)plt.xlim((magbins.min()-, magbins.max()))plt.yscale('')plt.tight_layout()plt.savefig('' % (varthreshpkl,magcol),bbox_inches='')plt.close('')", "docstring": "This makes plots for the variability threshold distributions.\n\n Parameters\n ----------\n\n varthreshpkl : str\n The pickle produced by the function above.\n\n xmin_lcmad_stdev,xmin_stetj_stdev,xmin_iqr_stdev,xmin_inveta_stdev : float or np.array\n Values of the threshold values to override the ones in the\n `vartresholdpkl`. If provided, will plot the thresholds accordingly\n instead of using the ones in the input pickle directly.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n magcols : list of str or None\n The magcol keys to use from the lcdict.\n\n Returns\n -------\n\n str\n The file name of the threshold plot generated.", "id": "f14708:m2"} {"signature": "def _lclist_parallel_worker(task):", "body": "lcf, columns, lcformat, lcformatdir, lcndetkey = tasktry:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Nonelcobjdict = {'':os.path.abspath(lcf)}try:lcdict = readerfunc(lcf)if ( (isinstance(lcdict, (list, tuple))) and(isinstance(lcdict[], dict)) ):lcdict = lcdict[]for colkey in columns:if '' in colkey:getkey = colkey.split('')else:getkey = [colkey]try:thiscolval = _dict_get(lcdict, getkey)except Exception as e:LOGWARNING('' %(colkey, lcf))thiscolval = np.nanlcobjdict[getkey[-]] = thiscolvalexcept Exception as e:LOGEXCEPTION('' % lcf)for colkey in columns:if '' in colkey:getkey = colkey.split('')else:getkey = [colkey]thiscolval = np.nanlcobjdict[getkey[-]] = thiscolvalfor dk in lcndetkey:try:if '' in dk:getdk = dk.split('')else:getdk = [dk]ndetcol = _dict_get(lcdict, getdk)actualndets = ndetcol[np.isfinite(ndetcol)].sizelcobjdict['' % getdk[-]] = actualndetsexcept Exception as e:lcobjdict['' % getdk[-]] = np.nanreturn lcobjdict", "docstring": "This is a parallel worker for makelclist.\n\n Parameters\n ----------\n\n task : tuple\n This is a tuple containing the following items:\n\n task[0] = lcf\n task[1] = columns\n task[2] = lcformat\n task[3] = lcformatdir\n task[4] = lcndetkey\n\n Returns\n -------\n\n dict or None\n This contains all of the info for the object processed in this LC read\n operation. If this fails, returns None", "id": "f14709:m1"} {"signature": "def make_lclist(basedir,outfile,use_list_of_filenames=None,lcformat='',lcformatdir=None,fileglob=None,recursive=True,columns=['','','',''],makecoordindex=('',''),field_fitsfile=None,field_wcsfrom=None,field_scale=ZScaleInterval(),field_stretch=LinearStretch(),field_colormap=plt.cm.gray_r,field_findersize=None,field_pltopts={'':'','':,'':'','':,'':''},field_grid=False,field_gridcolor='',field_zoomcontain=True,maxlcs=None,nworkers=NCPUS):", "body": "try:formatinfo = get_lcformat(lcformat,use_lcformat_dir=lcformatdir)if formatinfo:(dfileglob, readerfunc,dtimecols, dmagcols, derrcols,magsarefluxes, normfunc) = formatinfoelse:LOGERROR(\"\")return Noneexcept Exception as e:LOGEXCEPTION(\"\")return Noneif not fileglob:fileglob = dfilegloblcndetkey = dmagcolsif isinstance(use_list_of_filenames, list):matching = use_list_of_filenameselse:if isinstance(basedir, list):matching = []for bdir in basedir:LOGINFO('' % (lcformat,bdir))if recursive is False:matching.extend(glob.glob(os.path.join(bdir, fileglob)))else:if sys.version_info[:] > (,):matching.extend(glob.glob(os.path.join(bdir,'',fileglob),recursive=True))else:walker = os.walk(bdir)for root, dirs, _files in walker:for sdir in dirs:searchpath = os.path.join(root,sdir,fileglob)foundfiles = glob.glob(searchpath)if foundfiles:matching.extend(foundfiles)else:LOGINFO('' %(lcformat, basedir))if recursive is False:matching = glob.glob(os.path.join(basedir, fileglob))else:if sys.version_info[:] > (,):matching = glob.glob(os.path.join(basedir,'',fileglob),recursive=True)else:walker = os.walk(basedir)matching = []for root, dirs, _files in walker:for sdir in dirs:searchpath = os.path.join(root,sdir,fileglob)foundfiles = glob.glob(searchpath)if foundfiles:matching.extend(foundfiles)if matching and len(matching) > :LOGINFO('' % len(matching))if maxlcs:matching = matching[:maxlcs]lclistdict = {'':basedir,'':lcformat,'':fileglob,'':recursive,'':columns,'':makecoordindex,'':len(matching),'': {}}derefcols = ['']derefcols.extend(['' % x.split('')[-] for x in lcndetkey])for dc in derefcols:lclistdict[''][dc] = []for col in columns:thiscol = col.split('')thiscol = thiscol[-]lclistdict[''][thiscol] = []derefcols.append(thiscol)LOGINFO('')tasks = [(x, columns, lcformat, lcformatdir, lcndetkey)for x in matching]with ProcessPoolExecutor(max_workers=nworkers) as executor:results = executor.map(_lclist_parallel_worker, tasks)results = [x for x in results]for result in results:for xcol in derefcols:lclistdict[''][xcol].append(result[xcol])executor.shutdown()for col in lclistdict['']:lclistdict[''][col] = np.array(lclistdict[''][col])uniques, counts = np.unique(lclistdict[''][''],return_counts=True)duplicated_objectids = uniques[counts > ]if duplicated_objectids.size > :dt = lclistdict[''][''].dtype.strdt = '' % (int(dt.replace('','').replace('','').replace('','')) + )lclistdict[''][''] = np.array(lclistdict[''][''],dtype=dt)for objid in duplicated_objectids:objid_inds = np.where(lclistdict[''][''] == objid)for ncounter, nind in enumerate(objid_inds[][:]):lclistdict[''][''][nind] = '' % (lclistdict[''][''][nind],ncounter+)LOGWARNING('''' %(ncounter+, objid, objid, ncounter+,lclistdict[''][''][nind]))if (makecoordindex andisinstance(makecoordindex, (list, tuple)) andlen(makecoordindex) == ):try:racol, declcol = makecoordindexracol = racol.split('')[-]declcol = declcol.split('')[-]objra, objdecl = (lclistdict[''][racol],lclistdict[''][declcol])cosdecl = np.cos(np.radians(objdecl))sindecl = np.sin(np.radians(objdecl))cosra = np.cos(np.radians(objra))sinra = np.sin(np.radians(objra))xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl))kdt = sps.cKDTree(xyz,copy_data=True)lclistdict[''] = kdtLOGINFO('' %(makecoordindex[], makecoordindex[]))except Exception as e:LOGEXCEPTION('' %(makecoordindex[], makecoordindex[]))raiseif field_fitsfile and os.path.exists(field_fitsfile):if field_wcsfrom is None:hdulist = pyfits.open(field_fitsfile)hdr = hdulist[].headerhdulist.close()w = WCS(hdr)wcsok = Trueelif os.path.exists(field_wcsfrom):w = WCS(field_wcsfrom)wcsok = Trueelse:LOGERROR('' %field_fitsfile)wcsok = Falseif wcsok:radecl = np.column_stack((objra, objdecl))lclistdict[''][''] = w.all_world2pix(radecl,)finder_outfile = os.path.join(os.path.dirname(outfile),os.path.splitext(os.path.basename(outfile))[] + '')finder_png = fits_finder_chart(field_fitsfile,finder_outfile,wcsfrom=field_wcsfrom,scale=field_scale,stretch=field_stretch,colormap=field_colormap,findersize=field_findersize,overlay_ra=objra,overlay_decl=objdecl,overlay_pltopts=field_pltopts,overlay_zoomcontain=field_zoomcontain,grid=field_grid,gridcolor=field_gridcolor)if finder_png is not None:LOGINFO('''''' % finder_png)with open(outfile,'') as outfd:pickle.dump(lclistdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)LOGINFO('' % outfile)return outfileelse:LOGERROR('' % (basedir, fileglob))return None", "docstring": "This generates a light curve catalog for all light curves in a directory.\n\n Given a base directory where all the files are, and a light curve format,\n this will find all light curves, pull out the keys in each lcdict requested\n in the `columns` kwarg for each object, and write them to the requested\n output pickle file. These keys should be pointers to scalar values\n (i.e. something like `objectinfo.ra` is OK, but something like 'times' won't\n work because it's a vector).\n\n Generally, this works with light curve reading functions that produce\n lcdicts as detailed in the docstring for `lcproc.register_lcformat`. Once\n you've registered your light curve reader functions using the\n `lcproc.register_lcformat` function, pass in the `formatkey` associated with\n your light curve format, and this function will be able to read all light\n curves in that format as well as the object information stored in their\n `objectinfo` dict.\n\n Parameters\n ----------\n\n basedir : str or list of str\n If this is a str, points to a single directory to search for light\n curves. If this is a list of str, it must be a list of directories to\n search for light curves. All of these will be searched to find light\n curve files matching either your light curve format's default fileglob\n (when you registered your LC format), or a specific fileglob that you\n can pass in using the `fileglob` kwargh here. If the `recursive` kwarg\n is set, the provided directories will be searched recursively.\n\n If `use_list_of_filenames` is not None, it will override this argument\n and the function will take those light curves as the list of files it\n must process instead of whatever is specified in `basedir`.\n\n outfile : str\n This is the name of the output file to write. This will be a pickle\n file, so a good convention to use for this name is something like\n 'my-lightcurve-catalog.pkl'.\n\n use_list_of_filenames : list of str or None\n Use this kwarg to override whatever is provided in `basedir` and\n directly pass in a list of light curve files to process. This can speed\n up this function by a lot because no searches on disk will be performed\n to find light curve files matching `basedir` and `fileglob`.\n\n lcformat : str\n This is the `formatkey` associated with your light curve format, which\n you previously passed in to the `lcproc.register_lcformat`\n function. This will be used to look up how to find and read the light\n curves specified in `basedir` or `use_list_of_filenames`.\n\n lcformatdir : str or None\n If this is provided, gives the path to a directory when you've stored\n your lcformat description JSONs, other than the usual directories lcproc\n knows to search for them in. Use this along with `lcformat` to specify\n an LC format JSON file that's not currently registered with lcproc.\n\n fileglob : str or None\n If provided, is a string that is a valid UNIX filename glob. Used to\n override the default fileglob for this LC format when searching for\n light curve files in `basedir`.\n\n recursive : bool\n If True, the directories specified in `basedir` will be searched\n recursively for all light curve files that match the default fileglob\n for this LC format or a specific one provided in `fileglob`.\n\n columns : list of str\n This is a list of keys in the lcdict produced by your light curve reader\n function that contain object information, which will be extracted and\n put into the output light curve catalog. It's highly recommended that\n your LC reader function produce a lcdict that contains at least the\n default keys shown here.\n\n The lcdict keys to extract are specified by using an address scheme:\n\n - First level dict keys can be specified directly:\n e.g., 'objectid' will extract lcdict['objectid']\n - Keys at other levels can be specified by using a period to indicate\n the level:\n\n - e.g., 'objectinfo.ra' will extract lcdict['objectinfo']['ra']\n - e.g., 'objectinfo.varinfo.features.stetsonj' will extract\n lcdict['objectinfo']['varinfo']['features']['stetsonj']\n\n makecoordindex : list of two str or None\n This is used to specify which lcdict keys contain the right ascension\n and declination coordinates for this object. If these are provided, the\n output light curve catalog will have a kdtree built on all object\n coordinates, which enables fast spatial searches and cross-matching to\n external catalogs by `checkplot` and `lcproc` functions.\n\n field_fitsfile : str or None\n If this is not None, it should be the path to a FITS image containing\n the objects these light curves are for. If this is provided,\n `make_lclist` will use the WCS information in the FITS itself if\n `field_wcsfrom` is None (or from a WCS header file pointed to by\n `field_wcsfrom`) to obtain x and y pixel coordinates for all of the\n objects in the field. A finder chart will also be made using\n `astrobase.plotbase.fits_finder_chart` using the corresponding\n `field_scale`, `_stretch`, `_colormap`, `_findersize`, `_pltopts`,\n `_grid`, and `_gridcolors` kwargs for that function, reproduced here to\n enable customization of the finder chart plot.\n\n field_wcsfrom : str or None\n If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will\n be taken from the FITS header of `fitsfile`. If this is not None, it\n must be a FITS or similar file that contains a WCS header in its first\n extension.\n\n field_scale : astropy.visualization.Interval object\n `scale` sets the normalization for the FITS pixel values. This is an\n astropy.visualization Interval object.\n See http://docs.astropy.org/en/stable/visualization/normalization.html\n for details on `scale` and `stretch` objects.\n\n field_stretch : astropy.visualization.Stretch object\n `stretch` sets the stretch function for mapping FITS pixel values to\n output pixel values. This is an astropy.visualization Stretch object.\n See http://docs.astropy.org/en/stable/visualization/normalization.html\n for details on `scale` and `stretch` objects.\n\n field_colormap : matplotlib Colormap object\n `colormap` is a matplotlib color map object to use for the output image.\n\n field_findersize : None or tuple of two ints\n If `findersize` is None, the output image size will be set by the NAXIS1\n and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,\n `findersize` must be a tuple with the intended x and y size of the image\n in inches (all output images will use a DPI = 100).\n\n field_pltopts : dict\n `field_pltopts` controls how the overlay points will be plotted. This\n a dict with standard matplotlib marker, etc. kwargs as key-val pairs,\n e.g. 'markersize', 'markerfacecolor', etc. The default options make red\n outline circles at the location of each object in the overlay.\n\n field_grid : bool\n `grid` sets if a grid will be made on the output image.\n\n field_gridcolor : str\n `gridcolor` sets the color of the grid lines. This is a usual matplotib\n color spec string.\n\n field_zoomcontain : bool\n `field_zoomcontain` controls if the finder chart will be zoomed to\n just contain the overlayed points. Everything outside the footprint of\n these points will be discarded.\n\n maxlcs : int or None\n This sets how many light curves to process in the input LC list\n generated by searching for LCs in `basedir` or in the list provided as\n `use_list_of_filenames`.\n\n nworkers : int\n This sets the number of parallel workers to launch to collect\n information from the light curves.\n\n Returns\n -------\n\n str\n Returns the path to the generated light curve catalog pickle file.", "id": "f14709:m2"} {"signature": "def filter_lclist(lc_catalog,objectidcol='',racol='',declcol='',xmatchexternal=None,xmatchdistarcsec=,externalcolnums=(,,),externalcolnames=['','',''],externalcoldtypes='',externalcolsep=None,externalcommentchar='',conesearch=None,conesearchworkers=,columnfilters=None,field_fitsfile=None,field_wcsfrom=None,field_scale=ZScaleInterval(),field_stretch=LinearStretch(),field_colormap=plt.cm.gray_r,field_findersize=None,field_pltopts={'':'','':,'':'','':,'':''},field_grid=False,field_gridcolor='',field_zoomcontain=True,copylcsto=None):", "body": "with open(lc_catalog,'') as infd:lclist = pickle.load(infd)xmatch_matching_index = np.full_like(lclist[''][objectidcol],False,dtype=np.bool)conesearch_matching_index = np.full_like(lclist[''][objectidcol],False,dtype=np.bool)ext_matches = []ext_matching_objects = []if (xmatchexternal andisinstance(xmatchexternal, str) andos.path.exists(xmatchexternal)):try:extcat = np.genfromtxt(xmatchexternal,usecols=externalcolnums,delimiter=externalcolsep,names=externalcolnames,dtype=externalcoldtypes,comments=externalcommentchar)ext_cosdecl = np.cos(np.radians(extcat['']))ext_sindecl = np.sin(np.radians(extcat['']))ext_cosra = np.cos(np.radians(extcat['']))ext_sinra = np.sin(np.radians(extcat['']))ext_xyz = np.column_stack((ext_cosra*ext_cosdecl,ext_sinra*ext_cosdecl,ext_sindecl))ext_xyzdist = * np.sin(np.radians(xmatchdistarcsec/)/)our_kdt = lclist['']ext_kdt = sps.cKDTree(ext_xyz)extkd_matchinds = ext_kdt.query_ball_tree(our_kdt, ext_xyzdist)for extind, mind in enumerate(extkd_matchinds):if len(mind) > :ext_matches.append(mind[])ext_matching_objects.append(extcat[extind])ext_matches = np.array(ext_matches)if ext_matches.size > :xmatch_matching_index[ext_matches] = TrueLOGINFO('' %(xmatchexternal, xmatchdistarcsec, ext_matches.size))else:LOGERROR(\"\"\"\" % xmatchexternal)return None, None, Noneexcept Exception as e:LOGEXCEPTION('' %repr(xmatchexternal))raiseif (conesearch andisinstance(conesearch, (list, tuple)) andlen(conesearch) == ):try:racenter, declcenter, searchradius = conesearchcosdecl = np.cos(np.radians(declcenter))sindecl = np.sin(np.radians(declcenter))cosra = np.cos(np.radians(racenter))sinra = np.sin(np.radians(racenter))xyzdist = * np.sin(np.radians(searchradius)/)our_kdt = lclist['']kdtindices = our_kdt.query_ball_point([cosra*cosdecl,sinra*cosdecl,sindecl],xyzdist,n_jobs=conesearchworkers)if kdtindices and len(kdtindices) > :LOGINFO('''' %(searchradius, racenter, declcenter, len(kdtindices)))matchingind = kdtindicesconesearch_matching_index[np.array(matchingind)] = Trueelse:LOGERROR(\"\"\"\" %(searchradius, racenter, declcenter, len(kdtindices)))return None, Noneexcept Exception as e:LOGEXCEPTION('''' % lc_catalog)raiseallfilterinds = []if columnfilters and isinstance(columnfilters, list):for cfilt in columnfilters:try:fcol, foperator, foperand = cfilt.split('')foperator = FILTEROPS[foperator]filterstr = (\"\"\"\") % (fcol, fcol, foperator, foperand)filterind = eval(filterstr)ngood = lclist[''][objectidcol][filterind].sizeLOGINFO('' % (cfilt, ngood))allfilterinds.append(filterind)except Exception as e:LOGEXCEPTION(''% cfilt)LOGWARNING('')if (xmatchexternal or conesearch or columnfilters):filterstack = []if xmatchexternal:filterstack.append(xmatch_matching_index)if conesearch:filterstack.append(conesearch_matching_index)if columnfilters:filterstack.extend(allfilterinds)finalfilterind = np.column_stack(filterstack)finalfilterind = np.all(finalfilterind, axis=)filteredobjectids = lclist[''][objectidcol][finalfilterind]filteredlcfnames = lclist[''][''][finalfilterind]else:filteredobjectids = lclist[''][objectidcol]filteredlcfnames = lclist['']['']if field_fitsfile is not None and os.path.exists(field_fitsfile):matching_ra = lclist[''][racol][finalfilterind]matching_decl = lclist[''][declcol][finalfilterind]matching_postfix = []if xmatchexternal is not None:matching_postfix.append('' %os.path.splitext(os.path.basename(xmatchexternal))[])if conesearch is not None:matching_postfix.append('' %tuple(conesearch))if columnfilters is not None:for cfi, cf in enumerate(columnfilters):if cfi == :matching_postfix.append('' %tuple(cf.split('')))else:matching_postfix.append('' %tuple(cf.split('')))if len(matching_postfix) > :matching_postfix = '' % ''.join(matching_postfix)else:matching_postfix = ''finder_outfile = os.path.join(os.path.dirname(lc_catalog),'' %(os.path.splitext(os.path.basename(lc_catalog))[],matching_postfix))finder_png = fits_finder_chart(field_fitsfile,finder_outfile,wcsfrom=field_wcsfrom,scale=field_scale,stretch=field_stretch,colormap=field_colormap,findersize=field_findersize,overlay_ra=matching_ra,overlay_decl=matching_decl,overlay_pltopts=field_pltopts,field_zoomcontain=field_zoomcontain,grid=field_grid,gridcolor=field_gridcolor)if finder_png is not None:LOGINFO('''''' % finder_png)if copylcsto is not None:if not os.path.exists(copylcsto):os.mkdir(copylcsto)if TQDM:lciter = tqdm(filteredlcfnames)else:lciter = filteredlcfnamesLOGINFO('' % copylcsto)for lc in lciter:shutil.copy(lc, copylcsto)LOGINFO('' % filteredobjectids.size)if xmatchexternal and len(ext_matching_objects) > :return filteredlcfnames, filteredobjectids, ext_matching_objectselse:return filteredlcfnames, filteredobjectids", "docstring": "This is used to perform cone-search, cross-match, and column-filter\n operations on a light curve catalog generated by `make_lclist`.\n\n Uses the output of `make_lclist` above. This function returns a list of\n light curves matching various criteria specified by the `xmatchexternal`,\n `conesearch`, and `columnfilters kwargs`. Use this function to generate\n input lists for other lcproc functions,\n e.g. `lcproc.lcvfeatures.parallel_varfeatures`,\n `lcproc.periodfinding.parallel_pf`, and `lcproc.lcbin.parallel_timebin`,\n among others.\n\n The operations are applied in this order if more than one is specified:\n `xmatchexternal` -> `conesearch` -> `columnfilters`. All results from these\n operations are joined using a logical AND operation.\n\n Parameters\n ----------\n\n objectidcol : str\n This is the name of the object ID column in the light curve catalog.\n\n racol : str\n This is the name of the RA column in the light curve catalog.\n\n declcol : str\n This is the name of the Dec column in the light curve catalog.\n\n xmatchexternal : str or None\n If provided, this is the filename of a text file containing objectids,\n ras and decs to match the objects in the light curve catalog to by their\n positions.\n\n xmatchdistarcsec : float\n This is the distance in arcseconds to use when cross-matching to the\n external catalog in `xmatchexternal`.\n\n externalcolnums : sequence of int\n This a list of the zero-indexed column numbers of columns to extract\n from the external catalog file.\n\n externalcolnames : sequence of str\n This is a list of names of columns that will be extracted from the\n external catalog file. This is the same length as\n `externalcolnums`. These must contain the names provided as the\n `objectid`, `ra`, and `decl` column names so this function knows which\n column numbers correspond to those columns and can use them to set up\n the cross-match.\n\n externalcoldtypes : str\n This is a CSV string containing numpy dtype definitions for all columns\n listed to extract from the external catalog file. The number of dtype\n definitions should be equal to the number of columns to extract.\n\n externalcolsep : str or None\n The column separator to use when extracting columns from the external\n catalog file. If None, any whitespace between columns is used as the\n separator.\n\n externalcommentchar : str\n The character indicating that a line in the external catalog file is to\n be ignored.\n\n conesearch : list of float\n\n This is used to specify cone-search parameters. It should be a three\n element list:\n\n [center_ra_deg, center_decl_deg, search_radius_deg]\n\n conesearchworkers : int\n The number of parallel workers to launch for the cone-search operation.\n\n columnfilters : list of str\n\n This is a list of strings indicating any filters to apply on each column\n in the light curve catalog. All column filters are applied in the\n specified sequence and are combined with a logical AND operator. The\n format of each filter string should be:\n\n '||'\n\n where:\n\n - is a column in the lc_catalog pickle file\n - is one of: 'lt', 'gt', 'le', 'ge', 'eq', 'ne', which\n correspond to the usual operators: <, >, <=, >=, ==, != respectively.\n - is a float, int, or string.\n\n field_fitsfile : str or None\n If this is not None, it should be the path to a FITS image containing\n the objects these light curves are for. If this is provided,\n `make_lclist` will use the WCS information in the FITS itself if\n `field_wcsfrom` is None (or from a WCS header file pointed to by\n `field_wcsfrom`) to obtain x and y pixel coordinates for all of the\n objects in the field. A finder chart will also be made using\n `astrobase.plotbase.fits_finder_chart` using the corresponding\n `field_scale`, `_stretch`, `_colormap`, `_findersize`, `_pltopts`,\n `_grid`, and `_gridcolors` kwargs for that function, reproduced here to\n enable customization of the finder chart plot.\n\n field_wcsfrom : str or None\n If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will\n be taken from the FITS header of `fitsfile`. If this is not None, it\n must be a FITS or similar file that contains a WCS header in its first\n extension.\n\n field_scale : astropy.visualization.Interval object\n `scale` sets the normalization for the FITS pixel values. This is an\n astropy.visualization Interval object.\n See http://docs.astropy.org/en/stable/visualization/normalization.html\n for details on `scale` and `stretch` objects.\n\n field_stretch : astropy.visualization.Stretch object\n `stretch` sets the stretch function for mapping FITS pixel values to\n output pixel values. This is an astropy.visualization Stretch object.\n See http://docs.astropy.org/en/stable/visualization/normalization.html\n for details on `scale` and `stretch` objects.\n\n field_colormap : matplotlib Colormap object\n `colormap` is a matplotlib color map object to use for the output image.\n\n field_findersize : None or tuple of two ints\n If `findersize` is None, the output image size will be set by the NAXIS1\n and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,\n `findersize` must be a tuple with the intended x and y size of the image\n in inches (all output images will use a DPI = 100).\n\n field_pltopts : dict\n `field_pltopts` controls how the overlay points will be plotted. This\n a dict with standard matplotlib marker, etc. kwargs as key-val pairs,\n e.g. 'markersize', 'markerfacecolor', etc. The default options make red\n outline circles at the location of each object in the overlay.\n\n field_grid : bool\n `grid` sets if a grid will be made on the output image.\n\n field_gridcolor : str\n `gridcolor` sets the color of the grid lines. This is a usual matplotib\n color spec string.\n\n field_zoomcontain : bool\n `field_zoomcontain` controls if the finder chart will be zoomed to\n just contain the overlayed points. Everything outside the footprint of\n these points will be discarded.\n\n copylcsto : str\n If this is provided, it is interpreted as a directory target to copy\n all the light curves that match the specified conditions.\n\n Returns\n -------\n\n tuple\n Returns a two elem tuple: (matching_object_lcfiles, matching_objectids)\n if conesearch and/or column filters are used. If `xmatchexternal` is\n also used, a three-elem tuple is returned: (matching_object_lcfiles,\n matching_objectids, extcat_matched_objectids).", "id": "f14709:m3"} {"signature": "def _cpinfo_key_worker(task):", "body": "cpfile, keyspeclist = taskkeystoget = [x[] for x in keyspeclist]nonesubs = [x[-] for x in keyspeclist]nansubs = [x[-] for x in keyspeclist]for i, k in enumerate(keystoget):thisk = k.split('')if sys.version_info[:] < (,):thisk = [(int(x) if x.isdigit() else x) for x in thisk]else:thisk = [(int(x) if x.isdecimal() else x) for x in thisk]keystoget[i] = thiskkeystoget.insert(,[''])nonesubs.insert(, '')nansubs.insert(,'')vals = checkplot_infokey_worker((cpfile, keystoget))for val, nonesub, nansub, valind in zip(vals, nonesubs,nansubs, range(len(vals))):if val is None:outval = nonesubelif isinstance(val, float) and not np.isfinite(val):outval = nansubelif isinstance(val, (list, tuple)):outval = ''.join(val)else:outval = valvals[valind] = outvalreturn vals", "docstring": "This wraps `checkplotlist.checkplot_infokey_worker`.\n\n This is used to get the correct dtype for each element in retrieved results.\n\n Parameters\n ----------\n\n task : tuple\n task[0] = cpfile\n task[1] = keyspeclist (infokeys kwarg from `add_cpinfo_to_lclist`)\n\n Returns\n -------\n\n dict\n All of the requested keys from the checkplot are returned along with\n their values in a dict.", "id": "f14709:m4"} {"signature": "def add_cpinfo_to_lclist(checkplots, initial_lc_catalog,magcol, outfile,checkplotglob='',infokeys=CPINFO_DEFAULTKEYS,nworkers=NCPUS):", "body": "if not isinstance(checkplots, list) and os.path.exists(checkplots):checkplots = sorted(glob.glob(os.path.join(checkplots, checkplotglob)))tasklist = [(cpf, infokeys) for cpf in checkplots]with ProcessPoolExecutor(max_workers=nworkers) as executor:resultfutures = executor.map(_cpinfo_key_worker, tasklist)results = [x for x in resultfutures]executor.shutdown()with open(initial_lc_catalog,'') as infd:lc_catalog = pickle.load(infd)catalog_objectids = np.array(lc_catalog[''][''])checkplot_objectids = np.array([x[] for x in results])extrainfokeys = []actualkeys = []for keyspec in infokeys:key, dtype, firstlevel, overwrite_append, nonesub, nansub = keyspecif firstlevel:eik = keyelse:eik = '' % (magcol, key)extrainfokeys.append(eik)eactual = eik.split('')if not eactual[-].isdigit():if not firstlevel:eactual = ''.join([eactual[], eactual[-]])else:eactual = eactual[-]else:elastkey = eactual[-]if elastkey.endswith(''):elastkey = elastkey[:-]elif elastkey.endswith(''):elastkey = elastkey[:-]if not firstlevel:eactual = ''.join([eactual[], elastkey])else:eactual = elastkeyactualkeys.append(eactual)if eactual not in lc_catalog['']:lc_catalog[''].append(eactual)lc_catalog[''][eactual] = []for catobj in tqdm(catalog_objectids):cp_objind = np.where(checkplot_objectids == catobj)if len(cp_objind[]) > :thiscpinfo = results[cp_objind[][]]thiscpinfo = thiscpinfo[:]for ekind, ek in enumerate(actualkeys):lc_catalog[''][ek].append(thiscpinfo[ekind])else:for ekind, ek in enumerate(actualkeys):thiskeyspec = infokeys[ekind]nonesub = thiskeyspec[-]lc_catalog[''][ek].append(nonesub)for ek in actualkeys:lc_catalog[''][ek] = np.array(lc_catalog[''][ek])if '' in lc_catalog:if magcol not in lc_catalog['']:lc_catalog[''].append(magcol)else:lc_catalog[''] = [magcol]with open(outfile, '') as outfd:pickle.dump(lc_catalog, outfd, protocol=pickle.HIGHEST_PROTOCOL)return outfile", "docstring": "This adds checkplot info to the initial light curve catalogs generated by\n `make_lclist`.\n\n This is used to incorporate all the extra info checkplots can have for\n objects back into columns in the light curve catalog produced by\n `make_lclist`. Objects are matched between the checkplots and the light\n curve catalog using their `objectid`. This then allows one to search this\n 'augmented' light curve catalog by these extra columns. The 'augmented'\n light curve catalog also forms the basis for search interface provided by\n the LCC-Server.\n\n The default list of keys that will be extracted from a checkplot and added\n as columns in the initial light curve catalog is listed above in the\n `CPINFO_DEFAULTKEYS` list.\n\n Parameters\n ----------\n\n checkplots : str or list\n If this is a str, is interpreted as a directory which will be searched\n for checkplot pickle files using `checkplotglob`. If this is a list, it\n will be interpreted as a list of checkplot pickle files to process.\n\n initial_lc_catalog : str\n This is the path to the light curve catalog pickle made by\n `make_lclist`.\n\n magcol : str\n This is used to indicate the light curve magnitude column to extract\n magnitude column specific information. For example, Stetson variability\n indices can be generated using magnitude measurements in separate\n photometric apertures, which appear in separate `magcols` in the\n checkplot. To associate each such feature of the object with its\n specific `magcol`, pass that `magcol` in here. This `magcol` will then\n be added as a prefix to the resulting column in the 'augmented' LC\n catalog, e.g. Stetson J will appear as `magcol1_stetsonj` and\n `magcol2_stetsonj` for two separate magcols.\n\n outfile : str\n This is the file name of the output 'augmented' light curve catalog\n pickle file that will be written.\n\n infokeys : list of tuples\n\n This is a list of keys to extract from the checkplot and some info on\n how this extraction is to be done. Each key entry is a six-element\n tuple of the following form:\n\n - key name in the checkplot\n - numpy dtype of the value of this key\n - False if key is associated with a magcol or True otherwise\n - False if subsequent updates to the same column name will append to\n existing key values in the output augmented light curve catalog or\n True if these will overwrite the existing key value\n - character to use to substitute a None value of the key in the\n checkplot in the output light curve catalog column\n - character to use to substitute a nan value of the key in the\n checkplot in the output light curve catalog column\n\n See the `CPFINFO_DEFAULTKEYS` list above for examples.\n\n nworkers : int\n The number of parallel workers to launch to extract checkplot\n information.\n\n Returns\n -------\n\n str\n Returns the path to the generated 'augmented' light curve catalog pickle\n file.", "id": "f14709:m5"} {"signature": "def _recv_sigint(signum, stack):", "body": "raise KeyboardInterrupt", "docstring": "handler function to receive and process a SIGINT", "id": "f14710:m0"} {"signature": "def main():", "body": "tornado.options.parse_command_line()DEBUG = True if options.debugmode == else FalseLOGGER = logging.getLogger('')if DEBUG:LOGGER.setLevel(logging.DEBUG)else:LOGGER.setLevel(logging.INFO)MAXPROCS = options.maxprocsASSETPATH = options.assetpathBASEURL = options.baseurlEXECUTOR = ProcessPoolExecutor(MAXPROCS)if options.standalone:if ( (not options.sharedsecret) or(options.sharedsecret andnot os.path.exists(options.sharedsecret)) ):LOGGER.error('''''''''''')sys.exit()elif options.sharedsecret and os.path.exists(options.sharedsecret):fileperm = oct(os.stat(options.sharedsecret)[stat.ST_MODE])if fileperm == '' or fileperm == '':with open(options.sharedsecret,'') as infd:SHAREDSECRET = infd.read().strip('')standalonespec = (r'',cphandlers.StandaloneHandler,{'':EXECUTOR,'':SHAREDSECRET})else:LOGGER.error('''')sys.exit()else:LOGGER.error('''' %options.sharedsecret)sys.exit()HANDLERS = [standalonespec]else:if not BASEURL.endswith(''):BASEURL = BASEURL + ''READONLY = options.readonlyif READONLY:LOGGER.warning('')CURRENTDIR = os.getcwd()cplistfile = options.checkplotlistif cplistfile and os.path.exists(cplistfile):with open(cplistfile,'') as infd:CHECKPLOTLIST = json.load(infd)LOGGER.info('' % cplistfile)elif cplistfile and not os.path.exists(cplistfile):helpmsg = (\"\"\"\"\"\"\"\"\"\" %(cplistfile, os.path.join(modpath,'')))LOGGER.error(helpmsg)sys.exit()else:LOGGER.warning('''''''''' % CURRENTDIR)if os.path.exists(os.path.join(CURRENTDIR,'')):cplistfile = os.path.join(CURRENTDIR,'')with open(cplistfile,'') as infd:CHECKPLOTLIST = json.load(infd)LOGGER.info('' % cplistfile)elif os.path.exists(os.path.join(CURRENTDIR,'')):cplistfile = os.path.join(CURRENTDIR,'')with open(cplistfile,'') as infd:CHECKPLOTLIST = json.load(infd)LOGGER.info('' % cplistfile)else:helpmsg = (\"\"\"\"\"\"\"\"\"\"\"\")LOGGER.error(helpmsg)sys.exit()HANDLERS = [(r''.format(baseurl=BASEURL),cphandlers.IndexHandler,{'':CURRENTDIR,'':ASSETPATH,'':CHECKPLOTLIST,'':cplistfile,'':EXECUTOR,'':READONLY,'':BASEURL}),(r''.format(baseurl=BASEURL),cphandlers.CheckplotHandler,{'':CURRENTDIR,'':ASSETPATH,'':CHECKPLOTLIST,'':cplistfile,'':EXECUTOR,'':READONLY}),(r''.format(baseurl=BASEURL),cphandlers.CheckplotListHandler,{'':CURRENTDIR,'':ASSETPATH,'':CHECKPLOTLIST,'':cplistfile,'':EXECUTOR,'':READONLY}),(r''.format(baseurl=BASEURL),cphandlers.LCToolHandler,{'':CURRENTDIR,'':ASSETPATH,'':CHECKPLOTLIST,'':cplistfile,'':EXECUTOR,'':READONLY}),(r''.format(baseurl=BASEURL),tornado.web.StaticFileHandler, {'': CURRENTDIR})]app = tornado.web.Application(handlers=HANDLERS,static_path=ASSETPATH,template_path=ASSETPATH,static_url_prefix=''.format(baseurl=BASEURL),compress_response=True,debug=DEBUG,)http_server = tornado.httpserver.HTTPServer(app, xheaders=True)portok = Falseserverport = options.portmaxtrys = thistry = while not portok and thistry < maxtrys:try:http_server.listen(serverport, options.serve)portok = Trueexcept socket.error as e:LOGGER.warning('' %(options.serve, serverport, serverport + ))serverport = serverport + if not portok:LOGGER.error('')sys.exit()LOGGER.info('' %(options.serve, serverport, BASEURL))signal.signal(signal.SIGINT,_recv_sigint)signal.signal(signal.SIGTERM,_recv_sigint)try:tornado.ioloop.IOLoop.instance().start()except KeyboardInterrupt:LOGGER.info('')tornado.ioloop.IOLoop.instance().stop()EXECUTOR.shutdown()time.sleep()", "docstring": "This launches the server. The current script args are shown below::\n\n Usage: checkplotserver [OPTIONS]\n\n Options:\n\n --help show this help information\n\n checkplotserver.py options:\n\n --assetpath Sets the asset (server images, css, js, DB)\n path for checkplotserver.\n (default \n /astrobase/cpserver/cps-assets)\n --baseurl Set the base URL of the checkplotserver.\n This is useful when you're running\n checkplotserver on a remote machine and are\n reverse-proxying more than one instances of\n it so you can access them using HTTP from\n outside on different base URLs like\n /cpserver1/, /cpserver2/, etc. If this is\n set, all URLs will take the form\n [baseurl]/..., instead of /... (default /)\n --checkplotlist The path to the checkplot-filelist.json file\n listing checkplots to load and serve. If\n this is not provided, checkplotserver will\n look for a checkplot-pickle-flist.json in\n the directory that it was started in\n --debugmode start up in debug mode if set to 1. (default\n 0)\n --maxprocs Number of background processes to use for\n saving/loading checkplot files and running\n light curves tools (default 2)\n --port Run on the given port. (default 5225)\n --readonly Run the server in readonly mode. This is\n useful for a public-facing instance of\n checkplotserver where you just want to allow\n collaborators to review objects but not edit\n them. (default False)\n --serve Bind to given address and serve content.\n (default 127.0.0.1)\n --sharedsecret a file containing a cryptographically secure\n string that is used to authenticate requests\n that come into the special standalone mode.\n --standalone This starts the server in standalone mode.\n (default 0)\n\n tornado/log.py options:\n\n --log-file-max-size max size of log files before rollover\n (default 100000000)\n --log-file-num-backups number of log files to keep (default 10)\n --log-file-prefix=PATH Path prefix for log files. Note that if you\n are running multiple tornado processes,\n log_file_prefix must be different for each\n of them (e.g. include the port number)\n --log-rotate-interval The interval value of timed rotating\n (default 1)\n --log-rotate-mode The mode of rotating files(time or size)\n (default size)\n --log-rotate-when specify the type of TimedRotatingFileHandler\n interval other options:('S', 'M', 'H', 'D',\n 'W0'-'W6') (default midnight)\n --log-to-stderr Send log output to stderr (colorized if\n possible). By default use stderr if\n --log_file_prefix is not set and no other\n logging is configured.\n --logging=debug|info|warning|error|none\n Set the Python log level. If 'none', tornado\n won't touch the logging configuration.\n (default info)", "id": "f14710:m1"} {"signature": "def _time_independent_equals(a, b):", "body": "if len(a) != len(b):return Falseresult = if isinstance(a[], int): for x, y in zip(a, b):result |= x ^ yelse: for x, y in zip(a, b):result |= ord(x) ^ ord(y)return result == ", "docstring": "This compares two values in constant time.\n\nTaken from tornado:\n\nhttps://github.com/tornadoweb/tornado/blob/\nd4eb8eb4eb5cc9a6677e9116ef84ded8efba8859/tornado/web.py#L3060", "id": "f14711:m0"} {"signature": "def default(self, obj):", "body": "if isinstance(obj, np.ndarray):return obj.tolist()elif isinstance(obj, bytes):return obj.decode()elif isinstance(obj, complex):return (obj.real, obj.imag)elif (isinstance(obj, (float, np.float64, np.float_)) andnot np.isfinite(obj)):return Noneelif isinstance(obj, (np.int8, np.int16, np.int32, np.int64)):return int(obj)else:return json.JSONEncoder.default(self, obj)", "docstring": "Overrides the default serializer for `JSONEncoder`.\n\n This can serialize the following objects in addition to what\n `JSONEncoder` can already do.\n\n - `np.array`\n - `bytes`\n - `complex`\n - `np.float64` and other `np.dtype` objects\n\n Parameters\n ----------\n\n obj : object\n A Python object to serialize to JSON.\n\n Returns\n -------\n\n str\n A JSON encoded representation of the input object.", "id": "f14711:c0:m0"} {"signature": "def initialize(self, currentdir, assetpath, cplist,cplistfile, executor, readonly, baseurl):", "body": "self.currentdir = currentdirself.assetpath = assetpathself.currentproject = cplistself.cplistfile = cplistfileself.executor = executorself.readonly = readonlyself.baseurl = baseurl", "docstring": "handles initial setup.", "id": "f14711:c1:m0"} {"signature": "def get(self):", "body": "project_checkplots = self.currentproject['']project_checkplotbasenames = [os.path.basename(x)for x in project_checkplots]project_checkplotindices = range(len(project_checkplots))project_cpsortkey = self.currentproject['']if self.currentproject[''] == '':project_cpsortorder = ''elif self.currentproject[''] == '':project_cpsortorder = ''project_cpfilterstatements = self.currentproject['']self.render('',project_checkplots=project_checkplots,project_cpsortorder=project_cpsortorder,project_cpsortkey=project_cpsortkey,project_cpfilterstatements=project_cpfilterstatements,project_checkplotbasenames=project_checkplotbasenames,project_checkplotindices=project_checkplotindices,project_checkplotfile=self.cplistfile,readonly=self.readonly,baseurl=self.baseurl)", "docstring": "This handles GET requests to the index page.\n\n TODO: provide the correct baseurl from the checkplotserver options dict,\n so the frontend JS can just read that off immediately.", "id": "f14711:c1:m1"} {"signature": "def initialize(self, currentdir, assetpath, cplist,cplistfile, executor, readonly):", "body": "self.currentdir = currentdirself.assetpath = assetpathself.currentproject = cplistself.cplistfile = cplistfileself.executor = executorself.readonly = readonly", "docstring": "This handles initial setup of this `RequestHandler`.", "id": "f14711:c2:m0"} {"signature": "@gen.coroutinedef get(self, checkplotfname):", "body": "if checkplotfname:self.checkplotfname = xhtml_escape(base64.b64decode(url_unescape(checkplotfname)))if self.checkplotfname in self.currentproject['']:cpfpath = os.path.join(os.path.abspath(os.path.dirname(self.cplistfile)),self.checkplotfname)LOGGER.info('' % cpfpath)if not os.path.exists(cpfpath):msg = \"\" % cpfpathLOGGER.error(msg)resultdict = {'':'','':msg,'':None}self.write(resultdict)raise tornado.web.Finish()cpdict = yield self.executor.submit(_read_checkplot_picklefile, cpfpath)LOGGER.info('' % cpfpath)objectid = cpdict['']objectinfo = cpdict['']varinfo = cpdict['']if '' in cpdict:pfmethods = cpdict['']else:pfmethods = []for pfm in PFMETHODS:if pfm in cpdict:pfmethods.append(pfm)neighbors = []if ('' in cpdict andcpdict[''] is not None andlen(cpdict[''])) > :nbrlist = cpdict['']for nbr in nbrlist:if '' in nbr:nbrmagdiffs = nbr['']else:nbrmagdiffs = Noneif '' in nbr:nbrcolordiffs = nbr['']else:nbrcolordiffs = Nonethisnbrdict = {'':nbr[''],'':{'':nbr[''],'':nbr[''],'':nbr[''],'':nbr[''],'':nbr[''],'':nbrmagdiffs,'':nbrcolordiffs}}try:nbr_magseries = nbr['']['']thisnbrdict[''] = nbr_magseriesexcept Exception as e:LOGGER.error(\"\"\"\"% (nbr[''],cpdict['']))try:for pfm in pfmethods:if pfm in nbr:thisnbrdict[pfm] = {'':nbr[pfm][][''],'':nbr[pfm][][''],'':nbr[pfm][]['']}except Exception as e:LOGGER.error(\"\"\"\"% (nbr[''],cpdict['']))neighbors.append(thisnbrdict)if '' in cpdict:objectcomments = cpdict['']else:objectcomments = Noneif '' in cpdict:objectxmatch = cpdict['']for xmcat in objectxmatch:if isinstance(objectxmatch[xmcat][''], dict):xminfo = objectxmatch[xmcat]['']for xmek in xminfo:if (isinstance(xminfo[xmek], float) and(not np.isfinite(xminfo[xmek]))):xminfo[xmek] = Noneelse:objectxmatch = Noneif '' in cpdict:colormagdiagram = cpdict['']else:colormagdiagram = Noneif '' in cpdict:finderchart = cpdict['']else:finderchart = Noneif ('' in cpdict andisinstance(cpdict[''], dict) and'' in cpdict['']):magseries = cpdict['']['']time0 = cpdict[''][''].min()magseries_ndet = cpdict[''][''].sizeelse:magseries = Nonetime0 = magseries_ndet = LOGGER.warning(\"\"\"\")if '' in cpdict:cpstatus = cpdict['']else:cpstatus = ''if '' in cpdict:uifilters = cpdict['']else:uifilters = {'':None,'':None,'':None}resultdict = {'':'','':'' % self.checkplotfname,'':self.readonly,'':{'':'' % time0,'':objectid,'':objectinfo,'':colormagdiagram,'':objectcomments,'':varinfo,'':uifilters,'':neighbors,'':objectxmatch,'':finderchart,'':magseries,'':magseries_ndet,'':cpstatus,'':pfmethods}}for key in resultdict['']['']:if (isinstance(resultdict[''][''][key],(float, np.float64, np.float_)) and(not np.isfinite(resultdict[''][''][key]))):resultdict[''][''][key] = Noneelif (isinstance(resultdict[''][''][key],ndarray)):thisval = resultdict[''][''][key]thisval = thisval.tolist()for i, v in enumerate(thisval):if (isinstance(v,(float, np.float64, np.float_)) and(not(np.isfinite(v)))):thisval[i] = Noneresultdict[''][''][key] = thisvalfor key in resultdict['']['']:if (isinstance(resultdict[''][''][key],(float, np.float64, np.float_)) and(not np.isfinite(resultdict[''][''][key]))):resultdict[''][''][key] = Noneelif (isinstance(resultdict[''][''][key],ndarray)):thisval = (resultdict[''][''][key])thisval = thisval.tolist()for i, v in enumerate(thisval):if (isinstance(v,(float, np.float64, np.float_)) and(not(np.isfinite(v)))):thisval[i] = Noneresultdict[''][''][key] = (thisval)if ('' in resultdict[''][''] andisinstance(resultdict[''][''][''],dict)):for key in resultdict['']['']['']:if (isinstance(resultdict[''][''][''][key],(float, np.float64, np.float_)) and(not np.isfinite(resultdict[''][''][''][key]))):resultdict[''][''][''][key] = Noneelif (isinstance(resultdict[''][''][''][key],ndarray)):thisval = (resultdict[''][''][''][key])thisval = thisval.tolist()for i, v in enumerate(thisval):if (isinstance(v,(float,np.float64,np.float_)) and(not(np.isfinite(v)))):thisval[i] = Noneresultdict[''][''][''][key] = (thisval)for key in pfmethods:periodogram = cpdict[key]['']if in cpdict[key] and isinstance(cpdict[key][], dict):phasedlc0plot = cpdict[key][]['']phasedlc0period = float(cpdict[key][][''])phasedlc0epoch = float(cpdict[key][][''])else:phasedlc0plot = Nonephasedlc0period = Nonephasedlc0epoch = Noneif ( in cpdict[key] andisinstance(cpdict[key][], dict) and'' in cpdict[key][] andisinstance(cpdict[key][][''], dict)):phasedlc0fit = {'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][][''][''][''] if'' incpdict[key][][''][''] else None)}else:phasedlc0fit = Noneif in cpdict[key] and isinstance(cpdict[key][], dict):phasedlc1plot = cpdict[key][]['']phasedlc1period = float(cpdict[key][][''])phasedlc1epoch = float(cpdict[key][][''])else:phasedlc1plot = Nonephasedlc1period = Nonephasedlc1epoch = Noneif ( in cpdict[key] andisinstance(cpdict[key][], dict) and'' in cpdict[key][] andisinstance(cpdict[key][][''], dict)):phasedlc1fit = {'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][][''][''][''] if'' incpdict[key][][''][''] else None)}else:phasedlc1fit = Noneif in cpdict[key] and isinstance(cpdict[key][], dict):phasedlc2plot = cpdict[key][]['']phasedlc2period = float(cpdict[key][][''])phasedlc2epoch = float(cpdict[key][][''])else:phasedlc2plot = Nonephasedlc2period = Nonephasedlc2epoch = Noneif ( in cpdict[key] andisinstance(cpdict[key][], dict) and'' in cpdict[key][] andisinstance(cpdict[key][][''], dict)):phasedlc2fit = {'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][][''][''][''] if'' incpdict[key][][''][''] else None)}else:phasedlc2fit = Noneresultdict[''][key] = {'':cpdict[key][''],'':periodogram,'':cpdict[key][''],'':{'':phasedlc0plot,'':phasedlc0period,'':phasedlc0epoch,'':phasedlc0fit,},'':{'':phasedlc1plot,'':phasedlc1period,'':phasedlc1epoch,'':phasedlc1fit,},'':{'':phasedlc2plot,'':phasedlc2period,'':phasedlc2epoch,'':phasedlc2fit,},}self.write(resultdict)self.finish()else:LOGGER.error('' % self.checkplotfname)resultdict = {'':'','':\"\",'':self.readonly,'':None}self.write(resultdict)self.finish()else:resultdict = {'':'','':'','':self.readonly,'':None}self.write(resultdict)", "docstring": "This handles GET requests to serve a specific checkplot pickle.\n\n This is an AJAX endpoint; returns JSON that gets converted by the\n frontend into things to render.", "id": "f14711:c2:m1"} {"signature": "@gen.coroutinedef post(self, cpfile):", "body": "if self.readonly:msg = \"\"resultdict = {'':'','':msg,'':self.readonly,'':None}self.write(resultdict)raise tornado.web.Finish()try:self.cpfile = base64.b64decode(url_unescape(cpfile)).decode()cpcontents = self.get_argument('', default=None)savetopng = self.get_argument('', default=None)if not self.cpfile or not cpcontents:msg = \"\"resultdict = {'':'','':msg,'':self.readonly,'':None}self.write(resultdict)raise tornado.web.Finish()cpcontents = json.loads(cpcontents)updated = {'': cpcontents[''],'':cpcontents[''],'':cpcontents[''],'':cpcontents['']}cpfpath = os.path.join(os.path.abspath(os.path.dirname(self.cplistfile)),self.cpfile)LOGGER.info('' % cpfpath)if not os.path.exists(cpfpath):msg = \"\" % cpfpathLOGGER.error(msg)resultdict = {'':'','':msg,'':self.readonly,'':None}self.write(resultdict)raise tornado.web.Finish()updated = yield self.executor.submit(checkplot_pickle_update,cpfpath, updated)if updated:LOGGER.info('' % updated)resultdict = {'':'','':'','':self.readonly,'':{'':updated,'':utime.time(),'':cpcontents,'': None}}if savetopng:cpfpng = os.path.abspath(cpfpath.replace('',''))cpfpng = StrIO()pngdone = yield self.executor.submit(checkplot_pickle_to_png,cpfpath, cpfpng)if pngdone is not None:pngdone.seek()pngbin = pngdone.read()pngb64 = base64.b64encode(pngbin)pngdone.close()del pngbinresultdict[''][''] = pngb64else:resultdict[''][''] = ''self.write(resultdict)self.finish()else:LOGGER.error('' %(self.cpfile, cpcontents))msg = \"\"resultdict = {'':'','':msg,'':self.readonly,'':None}self.write(resultdict)self.finish()except Exception as e:LOGGER.exception('' %(self.cpfile, cpcontents))msg = \"\"resultdict = {'':'','':msg,'':self.readonly,'':None}self.write(resultdict)self.finish()", "docstring": "This handles POST requests.\n\n Also an AJAX endpoint. Updates the persistent checkplot dict using the\n changes from the UI, and then saves it back to disk. This could\n definitely be faster by just loading the checkplot into a server-wide\n shared dict or something.", "id": "f14711:c2:m2"} {"signature": "def initialize(self, currentdir, assetpath, cplist,cplistfile, executor, readonly):", "body": "self.currentdir = currentdirself.assetpath = assetpathself.currentproject = cplistself.cplistfile = cplistfileself.executor = executorself.readonly = readonly", "docstring": "This handles initial setup of the `RequestHandler`.", "id": "f14711:c3:m0"} {"signature": "def get(self):", "body": "if '' not in self.currentproject:self.currentproject[''] = {}self.write(self.currentproject)", "docstring": "This handles GET requests for the current checkplot-list.json file.\n\nUsed with AJAX from frontend.", "id": "f14711:c3:m1"} {"signature": "def post(self):", "body": "if self.readonly:msg = \"\"resultdict = {'':'','':msg,'':self.readonly,'':None}self.write(resultdict)raise tornado.web.Finish()objectid = self.get_argument('', None)changes = self.get_argument('',None)if not objectid or not changes:msg = (\"\"\"\")LOGGER.error(msg)resultdict = {'':'','':msg,'':self.readonly,'':None}self.write(resultdict)raise tornado.web.Finish()objectid = xhtml_escape(objectid)changes = json.loads(changes)if '' not in self.currentproject:self.currentproject[''] = {}self.currentproject[''][objectid] = changeswith open(self.cplistfile,'') as outfd:json.dump(self.currentproject, outfd)msg = (\"\"\"\" % objectid)LOGGER.info(msg)resultdict = {'':'','':msg,'':self.readonly,'':{'':objectid,'':changes}}self.write(resultdict)self.finish()", "docstring": "This handles POST requests.\n\n Saves the changes made by the user on the frontend back to the current\n checkplot-list.json file.", "id": "f14711:c3:m2"} {"signature": "def initialize(self, currentdir, assetpath, cplist,cplistfile, executor, readonly):", "body": "self.currentdir = currentdirself.assetpath = assetpathself.currentproject = cplistself.cplistfile = cplistfileself.executor = executorself.readonly = readonly", "docstring": "This handles initial setup of the `RequestHandler`.", "id": "f14711:c4:m0"} {"signature": "@gen.coroutinedef get(self, cpfile):", "body": "if cpfile:self.cpfile = (xhtml_escape(base64.b64decode(url_unescape(cpfile))))if self.cpfile in self.currentproject['']:cpfpath = os.path.join(os.path.abspath(os.path.dirname(self.cplistfile)),self.cpfile)if not os.path.exists(cpfpath):msg = \"\" % cpfpathLOGGER.error(msg)resultdict = {'':'','':msg,'':self.readonly,'':None}self.write(resultdict)raise tornado.web.Finish()forcereload = self.get_argument('',False)if forcereload and xhtml_escape(forcereload):forcereload = True if forcereload == '' else Falsecpobjectid = self.get_argument('',None)lctool = self.get_argument('', None)resultdict = {'':None,'':None,'':self.readonly,'':None}if lctool:lctool = xhtml_escape(lctool)lctoolargs = []lctoolkwargs = {}if lctool in CPTOOLMAP:try:for xkwarg, xkwargtype, xkwargdef in zip(CPTOOLMAP[lctool][''],CPTOOLMAP[lctool][''],CPTOOLMAP[lctool]['']):if xkwargtype is list:wbkwarg = self.get_arguments(xkwarg)if len(wbkwarg) > :wbkwarg = [url_unescape(xhtml_escape(x))for x in wbkwarg]else:wbkwarg = Noneelse:wbkwarg = self.get_argument(xkwarg, None)if wbkwarg is not None:wbkwarg = url_unescape(xhtml_escape(wbkwarg))LOGGER.info('' %(xkwarg, repr(wbkwarg)))if wbkwarg is None:wbkwarg = xkwargdefelse:if xkwargtype is list:wbkwarg = [float(x) for x in wbkwarg]elif xkwargtype is bool:if wbkwarg == '':wbkwarg = Falseelif wbkwarg == '':wbkwarg = Trueelse:wbkwarg = xkwargdefelse:wbkwarg = xkwargtype(wbkwarg)if xkwarg.endswith(''):xkwarg = xkwarg.rstrip('')lctoolkwargs.update({xkwarg:wbkwarg})except Exception as e:LOGGER.exception('''' %(lctool, xkwarg))resultdict[''] = ''resultdict[''] = ('''' %(lctool, xkwarg))resultdict[''] = {'':cpobjectid}self.write(resultdict)raise tornado.web.Finish()else:LOGGER.error('' % lctool)resultdict[''] = ''resultdict[''] = ('' % lctool)resultdict[''] = {'':cpobjectid}self.write(resultdict)raise tornado.web.Finish()else:LOGGER.error('')resultdict[''] = ''resultdict[''] = ('')resultdict[''] = {'':cpobjectid}self.write(resultdict)raise tornado.web.Finish()LOGGER.info('' % cpfpath)cpdict = yield self.executor.submit(_read_checkplot_picklefile, cpfpath)tempfpath = cpfpath + ''if os.path.exists(tempfpath):tempcpdict = yield self.executor.submit(_read_checkplot_picklefile, tempfpath)else:tempcpdict = {'':cpdict[''],'':{'':cpdict[''][''],'':cpdict[''][''],'':cpdict[''][''],}}if not forcereload:cptimes, cpmags, cperrs = (tempcpdict[''][''],tempcpdict[''][''],tempcpdict[''][''],)LOGGER.info('')else:cptimes, cpmags, cperrs = (cpdict[''][''],cpdict[''][''],cpdict[''][''])LOGGER.info('')for xarg, xargtype in zip(CPTOOLMAP[lctool][''],CPTOOLMAP[lctool]['']):if xarg is None:lctoolargs.append(None)elif xarg == '':lctoolargs.append(cptimes)elif xarg == '':lctoolargs.append(cpmags)elif xarg == '':lctoolargs.append(cperrs)else:try:if xargtype is list:wbarg = self.get_arguments(xarg)else:wbarg = url_unescape(xhtml_escape(self.get_argument(xarg, None)))if xargtype is list:wbarg = [float(x) for x in wbarg]elif xargtype is float and xarg == '':try:wbarg = xargtype(wbarg)except Exception as e:wbarg = Noneelse:wbarg = xargtype(wbarg)lctoolargs.append(wbarg)except Exception as e:LOGGER.exception('''' %(lctool, xarg))resultdict[''] = ''resultdict[''] = ('''' %(lctool, xarg))resultdict[''] = {'':cpobjectid}self.write(resultdict)raise tornado.web.Finish()LOGGER.info(lctool)LOGGER.info(lctoolargs)LOGGER.info(lctoolkwargs)resloc = CPTOOLMAP[lctool]['']objectid = cpdict['']if lctool in ('','','','','','',''):lspmethod = resloc[]if (lspmethod in tempcpdict andisinstance(tempcpdict[lspmethod], dict) and(not forcereload)):bestperiod = (tempcpdict[lspmethod][''])nbestperiods = (tempcpdict[lspmethod][''])nbestlspvals = (tempcpdict[lspmethod][''])periodogram = (tempcpdict[lspmethod][''])phasedlc0plot = (tempcpdict[lspmethod][][''])phasedlc0period = float(tempcpdict[lspmethod][][''])phasedlc0epoch = float(tempcpdict[lspmethod][][''])LOGGER.warning('''' %(lctool, tempfpath))resultdict[''] = ''resultdict[''] = ('''' %lctool)resultdict[''] = {'':objectid,lspmethod:{'':nbestperiods,'':periodogram,'':bestperiod,'':nbestlspvals,'':{'':phasedlc0plot,'':phasedlc0period,'':phasedlc0epoch,}}}self.write(resultdict)self.finish()else:if lctoolkwargs[''] is not None:wtimes, wmags, werrs = lcmath.sigclip_magseries(lctoolargs[],lctoolargs[],lctoolargs[],sigclip=lctoolkwargs[''],magsarefluxes=lctoolkwargs[''])lctoolargs[] = wtimeslctoolargs[] = wmagslctoolargs[] = werrsif lctoolkwargs['']:wtimes, wmags, werrs = (lctoolargs[],lctoolargs[],lctoolargs[])filtermasks = [np.full_like(wtimes, False, dtype=np.bool_)]filterstr = lctoolkwargs['']filters = filterstr.split('')filters = [x.strip().lstrip('').rstrip('').strip()for x in filters]for filt in filters:try:thisfilt = filt.split('')if len(thisfilt) == :filt_lo = float(thisfilt[])filt_hi = float(thisfilt[])filtermasks.append(((wtimes -cptimes.min()) < filt_hi) &((wtimes -cptimes.min()) > filt_lo))elif (len(thisfilt) == andthisfilt[].strip() == ''):filt_lo = float(thisfilt[])filt_hi = float(thisfilt[])filtermasks.append(np.logical_not((((wtimes -cptimes.min()) < filt_hi) &((wtimes -cptimes.min()) > filt_lo))))else:continueexcept Exception as e:continueif len(filtermasks) > :filterind = np.column_stack(filtermasks)filterind = np.any(filterind, axis=)lctoolargs[] = wtimes[filterind]lctoolargs[] = wmags[filterind]lctoolargs[] = werrs[filterind]if lctoolkwargs['']:wtimes, wmags, werrs = (lctoolargs[],lctoolargs[],lctoolargs[])filtermasks = [np.full_like(wtimes, False, dtype=np.bool_)]filterstr = lctoolkwargs['']filters = filterstr.split('')filters = [x.strip().strip()for x in filters]for filt in filters:try:thisfilt = filt.split('')if len(thisfilt) == :filt_lo = float(thisfilt[])filt_hi = float(thisfilt[])filtermasks.append((wmags < filt_hi) &(wmags > filt_lo))elif (len(thisfilt) == andthisfilt[].strip() == ''):filt_lo = float(thisfilt[])filt_hi = float(thisfilt[])filtermasks.append(np.logical_not(((wmags < filt_hi) &(wmags > filt_lo))))else:continueexcept Exception as e:continueif len(filtermasks) > :filterind = np.column_stack(filtermasks)filterind = np.any(filterind, axis=)lctoolargs[] = wtimes[filterind]lctoolargs[] = wmags[filterind]lctoolargs[] = werrs[filterind]del lctoolkwargs['']del lctoolkwargs['']lctoolfunction = CPTOOLMAP[lctool]['']funcresults = yield self.executor.submit(lctoolfunction,*lctoolargs,**lctoolkwargs)nbestperiods = funcresults['']nbestlspvals = funcresults['']bestperiod = funcresults['']pgramres = yield self.executor.submit(_pkl_periodogram,funcresults,)phasedlcargs0 = (None,lspmethod,-,lctoolargs[],lctoolargs[],lctoolargs[],nbestperiods[],'')if len(nbestperiods) > :phasedlcargs1 = (None,lspmethod,-,lctoolargs[],lctoolargs[],lctoolargs[],nbestperiods[],'')else:phasedlcargs1 = Noneif len(nbestperiods) > :phasedlcargs2 = (None,lspmethod,-,lctoolargs[],lctoolargs[],lctoolargs[],nbestperiods[],'')else:phasedlcargs2 = Nonephasedlckwargs = {'':False,'':lctoolkwargs[''],'':'',}phasedlc0 = yield self.executor.submit(_pkl_phased_magseries_plot,*phasedlcargs0,**phasedlckwargs)if phasedlcargs1 is not None:phasedlc1 = yield self.executor.submit(_pkl_phased_magseries_plot,*phasedlcargs1,**phasedlckwargs)else:phasedlc1 = Noneif phasedlcargs2 is not None:phasedlc2 = yield self.executor.submit(_pkl_phased_magseries_plot,*phasedlcargs2,**phasedlckwargs)else:phasedlc2 = Noneif not self.readonly:tempcpdict[lspmethod] = {'':funcresults[''],'':funcresults[''],'':funcresults[''],'':funcresults[''],'':funcresults[''],'':(pgramres[lspmethod]['']),:phasedlc0,}if phasedlc1 is not None:tempcpdict[lspmethod][] = phasedlc1if phasedlc2 is not None:tempcpdict[lspmethod][] = phasedlc2savekwargs = {'':tempfpath,'':pickle.HIGHEST_PROTOCOL}savedcpf = yield self.executor.submit(_write_checkplot_picklefile,tempcpdict,**savekwargs)LOGGER.info('''' %(lctool, savedcpf))else:LOGGER.warning('''')periodogram = pgramres[lspmethod]['']phasedlc0plot = phasedlc0['']phasedlc0period = float(phasedlc0[''])phasedlc0epoch = float(phasedlc0[''])if phasedlc1 is not None:phasedlc1plot = phasedlc1['']phasedlc1period = float(phasedlc1[''])phasedlc1epoch = float(phasedlc1[''])if phasedlc2 is not None:phasedlc2plot = phasedlc2['']phasedlc2period = float(phasedlc2[''])phasedlc2epoch = float(phasedlc2[''])resultdict[''] = ''resultdict[''] = ('' %lctool)resultdict[''] = {'':objectid,lspmethod:{'':nbestperiods,'':nbestlspvals,'':periodogram,'':bestperiod,'':{'':phasedlc0plot,'':phasedlc0period,'':phasedlc0epoch,},}}if phasedlc1 is not None:resultdict[''][lspmethod][''] = {'':phasedlc1plot,'':phasedlc1period,'':phasedlc1epoch,}if phasedlc2 is not None:resultdict[''][lspmethod][''] = {'':phasedlc2plot,'':phasedlc2period,'':phasedlc2epoch,}self.write(resultdict)self.finish()elif lctool == '':lspmethod = lctoolargs[]periodind = lctoolargs[]if (not forcereload and lspmethod in tempcpdict andisinstance(tempcpdict[lspmethod], dict) andperiodind in tempcpdict[lspmethod] andisinstance(tempcpdict[lspmethod][periodind], dict)):phasedlc = tempcpdict[lspmethod][periodind]LOGGER.warning('''' %(lctool, tempfpath))resultdict[''] = ''resultdict[''] = ('''' %lctool)retkey = '' % periodindresultdict[''] = {'':objectid,lspmethod:{retkey:phasedlc}}self.write(resultdict)self.finish()else:lctoolkwargs[''] = ''lctoolargs[] = -if lctoolkwargs[''] is not None:stimes, smags, serrs = lcmath.sigclip_magseries(lctoolargs[],lctoolargs[],lctoolargs[],sigclip=lctoolkwargs[''],magsarefluxes=lctoolkwargs[''])else:stimes, smags, serrs = (lctoolargs[],lctoolargs[],lctoolargs[])if lctoolkwargs['']:wtimes, wmags, werrs = stimes, smags, serrsfiltermasks = [np.full_like(wtimes, False, dtype=np.bool_)]filterstr = lctoolkwargs['']filters = filterstr.split('')filters = [x.strip().lstrip('').rstrip('').strip()for x in filters]for filt in filters:try:thisfilt = filt.split('')if len(thisfilt) == :filt_lo = float(thisfilt[])filt_hi = float(thisfilt[])filtermasks.append(((wtimes -cptimes.min()) < filt_hi) &((wtimes -cptimes.min()) > filt_lo))elif (len(thisfilt) == andthisfilt[].strip() == ''):filt_lo = float(thisfilt[])filt_hi = float(thisfilt[])filtermasks.append(np.logical_not((((wtimes -cptimes.min()) < filt_hi) &((wtimes -cptimes.min()) > filt_lo))))else:continueexcept Exception as e:continueif len(filtermasks) > :filterind = np.column_stack(filtermasks)filterind = np.any(filterind, axis=)stimes = wtimes[filterind]smags = wmags[filterind]serrs = werrs[filterind]if lctoolkwargs['']:wtimes, wmags, werrs = stimes, smags, serrsfiltermasks = [np.full_like(wtimes, False, dtype=np.bool_)]filterstr = lctoolkwargs['']filters = filterstr.split('')filters = [x.strip().strip()for x in filters]for filt in filters:try:thisfilt = filt.split('')if len(thisfilt) == :filt_lo = float(thisfilt[])filt_hi = float(thisfilt[])filtermasks.append((wmags < filt_hi) &(wmags > filt_lo))elif (len(thisfilt) == andthisfilt[].strip() == ''):filt_lo = float(thisfilt[])filt_hi = float(thisfilt[])filtermasks.append(np.logical_not(((wmags < filt_hi) &(wmags > filt_lo))))else:continueexcept Exception as e:continueif len(filtermasks) > :filterind = np.column_stack(filtermasks)filterind = np.any(filterind, axis=)stimes = wtimes[filterind]smags = wmags[filterind]serrs = werrs[filterind]del lctoolkwargs['']del lctoolkwargs['']if lctoolargs[-] is None:LOGGER.warning('''')try:spfit = lcfit.spline_fit_magseries(stimes, smags, serrs, lctoolargs[], magsarefluxes=lctoolkwargs[''],sigclip=None,verbose=True)lctoolargs[-] = spfit['']['']if len(spfit['']['']) != :lctoolargs[-] = (spfit[''][''][])except Exception as e:LOGGER.exception('''')lctoolargs[-] = np.min(stimes)lctoolargs[] = stimeslctoolargs[] = smagslctoolargs[] = serrsdel lctoolkwargs['']lctoolfunction = CPTOOLMAP[lctool]['']funcresults = yield self.executor.submit(lctoolfunction,*lctoolargs,**lctoolkwargs)if not self.readonly:if (lspmethod in tempcpdict andisinstance(tempcpdict[lspmethod], dict)):if periodind in tempcpdict[lspmethod]:tempcpdict[lspmethod][periodind] = (funcresults)else:tempcpdict[lspmethod].update({periodind: funcresults})else:tempcpdict[lspmethod] = {periodind: funcresults}savekwargs = {'':tempfpath,'':pickle.HIGHEST_PROTOCOL}savedcpf = yield self.executor.submit(_write_checkplot_picklefile,tempcpdict,**savekwargs)LOGGER.info('''' %(lctool, savedcpf))else:LOGGER.warning('''')resultdict[''] = ''resultdict[''] = ('' %lctool)retkey = '' % periodindresultdict[''] = {'':objectid,lspmethod:{retkey:funcresults}}self.write(resultdict)self.finish()elif lctool == '':if (not forcereload and'' in tempcpdict andisinstance(tempcpdict[''], dict) and'' in tempcpdict[''] andisinstance(tempcpdict[''][''], dict)):LOGGER.warning('''' %(lctool, tempfpath))resultdict[''] = ''resultdict[''] = ('''' %lctool)resultdict[''] = {'':objectid,'': {'': (tempcpdict[''][''])}}self.write(resultdict)self.finish()else:lctoolfunction = CPTOOLMAP[lctool]['']funcresults = yield self.executor.submit(lctoolfunction,*lctoolargs,**lctoolkwargs)if not self.readonly:if ('' in tempcpdict andisinstance(tempcpdict[''], dict)):if '' in tempcpdict['']:tempcpdict[''][''] = (funcresults)else:tempcpdict[''].update({'': funcresults})else:tempcpdict[''] = {'':funcresults}savekwargs = {'':tempfpath,'':pickle.HIGHEST_PROTOCOL}savedcpf = yield self.executor.submit(_write_checkplot_picklefile,tempcpdict,**savekwargs)LOGGER.info('''' %(lctool, savedcpf))else:LOGGER.warning('''')resultdict[''] = ''resultdict[''] = ('' %lctool)resultdict[''] = {'':objectid,'':{'':funcresults}}self.write(resultdict)self.finish()elif lctool in ('',''):key1, key2 = reslocif (not forcereload andkey1 in tempcpdict andisinstance(tempcpdict[key1], dict) andkey2 in tempcpdict[key1] andisinstance(tempcpdict[key1][key2], dict)):LOGGER.warning('''' %(lctool, tempfpath))resultdict[''] = ''resultdict[''] = ('''' %lctool)resultdict[''] = {'':objectid,key1: {key2: (tempcpdict[key1][key2])}}self.write(resultdict)self.finish()else:lctoolfunction = CPTOOLMAP[lctool]['']lctoolkwargs[''] = StrIO()funcresults = yield self.executor.submit(lctoolfunction,*lctoolargs,**lctoolkwargs)fitfd = funcresults['']fitfd.seek()fitbin = fitfd.read()fitb64 = base64.b64encode(fitbin)fitfd.close()funcresults[''] = fitb64if not self.readonly:if (key1 in tempcpdict andisinstance(tempcpdict[key1], dict)):if key2 in tempcpdict[key1]:tempcpdict[key1][key2] = (funcresults)else:tempcpdict[key1].update({key2: funcresults})else:tempcpdict[key1] = {key2: funcresults}savekwargs = {'':tempfpath,'':pickle.HIGHEST_PROTOCOL}savedcpf = yield self.executor.submit(_write_checkplot_picklefile,tempcpdict,**savekwargs)LOGGER.info('''' %(lctool, savedcpf))else:LOGGER.warning('''')fitreturndict = {'':fitb64}resultdict[''] = ''resultdict[''] = ('' %lctool)resultdict[''] = {'':objectid,key1:{key2:fitreturndict}}self.write(resultdict)self.finish()elif lctool in ('','','',''):key1, key2 = reslocif (not forcereload andkey1 in tempcpdict andisinstance(tempcpdict[key1], dict) andkey2 in tempcpdict[key1] andisinstance(tempcpdict[key1][key2], dict)):LOGGER.warning('''' %(lctool, tempfpath))resultdict[''] = ''resultdict[''] = ('''' %lctool)phasedfitlc = tempcpdict[key1][key2]fitresults = {'':phasedfitlc[''][''],'':phasedfitlc[''][''],'':phasedfitlc[''][''],'':phasedfitlc[''],'':phasedfitlc[''],'':phasedfitlc[''],}if ('' in phasedfitlc[''][''] andphasedfitlc['']['']['']is not None):fitresults[''] = (phasedfitlc[''][''][''])resultdict[''] = {'':objectid,key1: {key2: (fitresults)}}self.write(resultdict)self.finish()else:lctoolfunction = CPTOOLMAP[lctool]['']funcresults = yield self.executor.submit(lctoolfunction,*lctoolargs,**lctoolkwargs)phasedlcargs = (None,'',-,cptimes,cpmags,cperrs,lctoolargs[], '')phasedlckwargs = {'':False,'':lctoolkwargs[''],'':'','':funcresults}phasedlc = yield self.executor.submit(_pkl_phased_magseries_plot,*phasedlcargs,**phasedlckwargs)if not self.readonly:if (key1 in tempcpdict andisinstance(tempcpdict[key1], dict)):if key2 in tempcpdict[key1]:tempcpdict[key1][key2] = (phasedlc)else:tempcpdict[key1].update({key2: phasedlc})else:tempcpdict[key1] = {key2: phasedlc}savekwargs = {'':tempfpath,'':pickle.HIGHEST_PROTOCOL}savedcpf = yield self.executor.submit(_write_checkplot_picklefile,tempcpdict,**savekwargs)LOGGER.info('''' %(lctool, savedcpf))else:LOGGER.warning('''')fitresults = {'':phasedlc[''][''],'':phasedlc[''][''],'':phasedlc[''][''],'':phasedlc[''],'':phasedlc[''],'':phasedlc[''],}if ('' in funcresults[''] andfuncresults[''][''] is not None):fitresults[''] = (funcresults[''][''])resultdict[''] = ''resultdict[''] = ('' %lctool)resultdict[''] = {'':objectid,key1:{key2:fitresults}}self.write(resultdict)self.finish()elif lctool == '':fitmethod, periodind = lctoolargselif lctool == '':if os.path.exists(tempfpath):os.remove(tempfpath)LOGGER.warning('''' %(tempfpath, cpfpath))resultdict[''] = ''else:resultdict[''] = ''LOGGER.warning('''''' %(tempfpath, cpfpath))resultdict[''] = ('')resultdict[''] = {'':cpobjectid}self.write(resultdict)self.finish()elif lctool == '':target = self.get_argument('',None)if target is not None:target = xhtml_escape(target)if (target not in CPTOOLMAP ortarget == '' ortarget == '' ortarget == '' ortarget == ''):LOGGER.error(\"\" % target)resultdict[''] = ''resultdict[''] = (\"\" % target)resultdict[''] = {'':cpobjectid}self.write(resultdict)raise tornado.web.Finish()targetloc = CPTOOLMAP[target]['']else:passelse:LOGGER.error('' % lctool)resultdict[''] = ''resultdict[''] = ('' % lctool)resultdict[''] = {'':cpobjectid}self.write(resultdict)raise tornado.web.Finish()else:LOGGER.error('' % self.cpfile)resultdict = {'':'','':\"\",'':self.readonly,'':None}self.write(resultdict)raise tornado.web.Finish()else:resultdict = {'':'','':'','':self.readonly,'':None}self.write(resultdict)raise tornado.web.Finish()", "docstring": "This handles a GET request to run a specified LC tool.\n\n Parameters\n ----------\n\n cpfile : str\n This is the checkplot file to run the tool on.\n\n Returns\n -------\n\n str\n Returns a JSON response.\n\n Notes\n -----\n\n The URI structure is::\n\n /tools/?[args]\n\n where args are::\n\n ?lctool=&argkey1=argval1&argkey2=argval2&...\n\n &forcereload=true <- if this is present, then reload values from\n original checkplot.\n\n &objectid=\n\n `lctool` is one of the strings below\n\n Period search functions::\n\n psearch-gls: run Lomb-Scargle with given params\n psearch-bls: run BLS with given params\n psearch-pdm: run phase dispersion minimization with given params\n psearch-aov: run analysis-of-variance with given params\n psearch-mav: run analysis-of-variance (multi-harm) with given params\n psearch-acf: run ACF period search with given params\n psearch-win: run spectral window function search with given params\n\n Arguments recognized by all period-search functions are::\n\n startp=XX\n endp=XX\n magsarefluxes=True|False\n autofreq=True|False\n stepsize=XX\n\n Variability characterization functions::\n\n var-varfeatures: gets the variability features from the checkplot or\n recalculates if they're not present\n\n var-prewhiten: pre-whitens the light curve with a sinusoidal signal\n\n var-masksig: masks a given phase location with given width from the\n light curve\n\n Light curve manipulation functions ::\n\n phasedlc-newplot: make phased LC with new provided period/epoch\n lcfit-fourier: fit a Fourier function to the phased LC\n lcfit-spline: fit a spline function to the phased LC\n lcfit-legendre: fit a Legendre polynomial to the phased LC\n lcfit-savgol: fit a Savitsky-Golay polynomial to the phased LC\n\n FIXME: figure out how to cache the results of these functions\n temporarily and save them back to the checkplot after we click on save\n in the frontend.\n\n TODO: look for a checkplot-blah-blah.pkl-cps-processing file in the same\n place as the usual pickle file. if this exists and is newer than the pkl\n file, load it instead. Or have a checkplotdict['cpservertemp'] item.", "id": "f14711:c4:m1"} {"signature": "def post(self, cpfile):", "body": "", "docstring": "This handles a POST request.\n\n TODO: implement this.\n\n This will save the results of the previous tool run to the checkplot\n file and the JSON filelist.\n\n This is only called when the user explicitly clicks on the 'permanently\n update checkplot with results' button. If the server is in readonly\n mode, this has no effect.\n\n This will copy everything from the '.pkl-cpserver-temp' file to the\n actual checkplot pickle and then remove that file.", "id": "f14711:c4:m2"} {"signature": "def initialize(self, executor, secret):", "body": "self.executor = executorself.secret = secret", "docstring": "This handles initial setup of the `RequestHandler`.", "id": "f14711:c5:m0"} {"signature": "@gen.coroutinedef get(self):", "body": "provided_key = self.get_argument('',default=None)if not provided_key:LOGGER.error('')retdict = {'':'','':(''''),'':None,'':True}self.set_status()self.write(retdict)raise tornado.web.Finish()else:provided_key = xhtml_escape(provided_key)if not _time_independent_equals(provided_key,self.secret):LOGGER.error('')retdict = {'':'','':(''''),'':None,'':True}self.set_status()self.write(retdict)raise tornado.web.Finish()LOGGER.info('')checkplotfname = self.get_argument('', default=None)if checkplotfname:try:cpfpath = xhtml_escape(base64.b64decode(url_unescape(checkplotfname)))except Exception as e:msg = ''LOGGER.error(msg)resultdict = {'':'','':msg,'':None,'':True}self.set_status()self.write(resultdict)raise tornado.web.Finish()LOGGER.info('' % cpfpath)if not os.path.exists(cpfpath):msg = \"\" % cpfpathLOGGER.error(msg)resultdict = {'':'','':msg,'':None,'':True}self.set_status()self.write(resultdict)raise tornado.web.Finish()cpdict = yield self.executor.submit(_read_checkplot_picklefile, cpfpath)LOGGER.info('' % cpfpath)objectid = cpdict['']objectinfo = cpdict['']varinfo = cpdict['']if '' in cpdict:pfmethods = cpdict['']else:pfmethods = []for pfm in PFMETHODS:if pfm in cpdict:pfmethods.append(pfm)neighbors = []if ('' in cpdict andcpdict[''] is not None andlen(cpdict[''])) > :nbrlist = cpdict['']for nbr in nbrlist:if '' in nbr:nbrmagdiffs = nbr['']else:nbrmagdiffs = Noneif '' in nbr:nbrcolordiffs = nbr['']else:nbrcolordiffs = Nonethisnbrdict = {'':nbr[''],'':{'':nbr[''],'':nbr[''],'':nbr[''],'':nbr[''],'':nbr[''],'':nbrmagdiffs,'':nbrcolordiffs}}try:nbr_magseries = nbr['']['']thisnbrdict[''] = nbr_magseriesexcept Exception as e:LOGGER.error(\"\"\"\"% (nbr[''],cpdict['']))try:for pfm in pfmethods:if pfm in nbr:thisnbrdict[pfm] = {'':nbr[pfm][][''],'':nbr[pfm][][''],'':nbr[pfm][]['']}except Exception as e:LOGGER.error(\"\"\"\"% (nbr[''],cpdict['']))neighbors.append(thisnbrdict)if '' in cpdict:objectcomments = cpdict['']else:objectcomments = Noneif '' in cpdict:objectxmatch = cpdict['']else:objectxmatch = Noneif '' in cpdict:colormagdiagram = cpdict['']else:colormagdiagram = Noneif '' in cpdict:finderchart = cpdict['']else:finderchart = Noneif ('' in cpdict andisinstance(cpdict[''], dict) and'' in cpdict['']):magseries = cpdict['']['']time0 = cpdict[''][''].min()magseries_ndet = cpdict[''][''].sizeelse:magseries = Nonetime0 = magseries_ndet = LOGGER.warning(\"\"\"\")if '' in cpdict:cpstatus = cpdict['']else:cpstatus = ''if '' in cpdict:uifilters = cpdict['']else:uifilters = {'':None,'':None,'':None}resultdict = {'':'','':'' % os.path.basename(cpfpath),'':True,'':{'':'' % time0,'':objectid,'':objectinfo,'':colormagdiagram,'':objectcomments,'':varinfo,'':uifilters,'':neighbors,'':objectxmatch,'':finderchart,'':magseries,'':magseries_ndet,'':cpstatus,'':pfmethods}}for key in pfmethods:periodogram = cpdict[key]['']if in cpdict[key] and isinstance(cpdict[key][], dict):phasedlc0plot = cpdict[key][]['']phasedlc0period = float(cpdict[key][][''])phasedlc0epoch = float(cpdict[key][][''])else:phasedlc0plot = Nonephasedlc0period = Nonephasedlc0epoch = Noneif ( in cpdict[key] andisinstance(cpdict[key][], dict) and'' in cpdict[key][] andisinstance(cpdict[key][][''], dict)):phasedlc0fit = {'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][][''][''][''] if'' incpdict[key][][''][''] else None)}else:phasedlc0fit = Noneif in cpdict[key] and isinstance(cpdict[key][], dict):phasedlc1plot = cpdict[key][]['']phasedlc1period = float(cpdict[key][][''])phasedlc1epoch = float(cpdict[key][][''])else:phasedlc1plot = Nonephasedlc1period = Nonephasedlc1epoch = Noneif ( in cpdict[key] andisinstance(cpdict[key][], dict) and'' in cpdict[key][] andisinstance(cpdict[key][][''], dict)):phasedlc1fit = {'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][][''][''][''] if'' incpdict[key][][''][''] else None)}else:phasedlc1fit = Noneif in cpdict[key] and isinstance(cpdict[key][], dict):phasedlc2plot = cpdict[key][]['']phasedlc2period = float(cpdict[key][][''])phasedlc2epoch = float(cpdict[key][][''])else:phasedlc2plot = Nonephasedlc2period = Nonephasedlc2epoch = Noneif ( in cpdict[key] andisinstance(cpdict[key][], dict) and'' in cpdict[key][] andisinstance(cpdict[key][][''], dict)):phasedlc2fit = {'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][]['']['']),'':(cpdict[key][][''][''][''] if'' incpdict[key][][''][''] else None)}else:phasedlc2fit = Noneresultdict[''][key] = {'':cpdict[key][''],'':periodogram,'':cpdict[key][''],'':{'':phasedlc0plot,'':phasedlc0period,'':phasedlc0epoch,'':phasedlc0fit,},'':{'':phasedlc1plot,'':phasedlc1period,'':phasedlc1epoch,'':phasedlc1fit,},'':{'':phasedlc2plot,'':phasedlc2period,'':phasedlc2epoch,'':phasedlc2fit,},}self.set_header('','')self.write(resultdict)self.finish()else:LOGGER.error('')resultdict = {'':'','':\"\",'':True,'':None}self.status()self.write(resultdict)self.finish()", "docstring": "This handles GET requests.\n\n Returns the requested checkplot pickle's information as JSON.\n\n Requires a pre-shared secret `key` argument for the operation to\n complete successfully. This is obtained from a command-line argument.", "id": "f14711:c5:m1"} {"signature": "def _dict_get(datadict, keylist):", "body": "return reduce(getitem, keylist, datadict)", "docstring": "This gets a requested dict key by walking the dict.\n\n Parameters\n ----------\n\n datadict : dict\n The dict to get the specified key from.\n\n keylist : list of str\n This is a list of keys to use to walk the dict and get to the key that\n is provided as the last element in `keylist`. For example::\n\n keylist = ['key1','key2','key3']\n\n will walk `datadict` recursively to get to `datadict[key1][key2][key3]`.\n\n Returns\n -------\n\n object\n The dict value of the specified key address.", "id": "f14713:m0"} {"signature": "def checkplot_infokey_worker(task):", "body": "cpf, keys = taskcpd = _read_checkplot_picklefile(cpf)resultkeys = []for k in keys:try:resultkeys.append(_dict_get(cpd, k))except Exception as e:resultkeys.append(np.nan)return resultkeys", "docstring": "This gets the required keys from the requested file.\n\n Parameters\n ----------\n\n task : tuple\n Task is a two element tuple::\n\n - task[0] is the dict to work on\n\n - task[1] is a list of lists of str indicating all the key address to\n extract items from the dict for\n\n Returns\n -------\n\n list\n This is a list of all of the items at the requested key addresses.", "id": "f14713:m1"} {"signature": "def main():", "body": "aparser = argparse.ArgumentParser(epilog=PROGEPILOG,description=PROGDESC,formatter_class=argparse.RawDescriptionHelpFormatter,)aparser.add_argument('',action='',choices=['',''],type=str,help=(\"\"\"\"))aparser.add_argument('',action='',type=str,help=(\"\"))aparser.add_argument('',action='',default='',type=str,help=(\"\"\"\"\"\"))aparser.add_argument('',action='',type=str,help=(\"\"))aparser.add_argument('',action='',type=str,help=(\"\"\"\"\"\"\"\"))aparser.add_argument('',action='',type=int,default=,help=(\"\"\"\"\"\"\"\"))aparser.add_argument('',action='',type=str,help=(\"\"\"\"\"\"\"\"\"\"))aparser.add_argument('',action='',type=int,default=int(CPU_COUNT/),help=(\"\"\"\"\"\"))args = aparser.parse_args()checkplotbasedir = args.cpdirfileglob = args.searchsplitout = args.splitoutoutprefix = args.outprefix if args.outprefix else Noneif args.sortby:sortkey, sortorder = args.sortby.split('')if outprefix is None:outprefix = args.sortbyelse:sortkey, sortorder = '', ''if args.filterby:filterkeys, filterconditions = [], []for filt in args.filterby:f = filt.split('')filterkeys.append(f[])filterconditions.append(f[])if outprefix is None:outprefix = ''.join(args.filterby)else:outprefix = '' % (''.join(args.filterby), outprefix)else:filterkeys, filterconditions = None, Noneif args.cptype == '':checkplotext = ''elif args.cptype == '':checkplotext = ''else:print(\"\"% args.cptype)sys.exit()currdir = os.getcwd()checkplotglob = os.path.join(checkplotbasedir,'' % (fileglob, checkplotext))print('' % checkplotglob)searchresults = glob.glob(checkplotglob)if searchresults:print('' %(len(searchresults), checkplotbasedir))sortdone = Falsefilterok = Falsefilterstatements = []if ((args.cptype == '') and((sortkey and sortorder) or (filterkeys and filterconditions))):keystoget = []if (sortkey and sortorder):print('' %(sortkey, sortorder))sortkeys = sortkey.split('')if sys.version_info[:] < (,):sortkeys = [(int(x) if x.isdigit() else x)for x in sortkeys]else:sortkeys = [(int(x) if x.isdecimal() else x)for x in sortkeys]keystoget.append(sortkeys)if (filterkeys and filterconditions):print('' %(filterkeys, filterconditions))for fdk in filterkeys:fdictkeys = fdk.split('')fdictkeys = [(int(x) if x.isdecimal() else x)for x in fdictkeys]keystoget.append(fdictkeys)print(''% args.maxkeyworkers)pool = mp.Pool(args.maxkeyworkers)tasks = [(x, keystoget) for x in searchresults]keytargets = pool.map(checkplot_infokey_worker, tasks)pool.close()pool.join()if (len(keystoget) > and(sortkey and sortorder) and(filterkeys and filterconditions)):sorttargets = [x[] for x in keytargets]filtertargets = [x[:] for x in keytargets]elif (len(keystoget) > and(not (sortkey and sortorder)) and(filterkeys and filterconditions)):sorttargets = Nonefiltertargets = keytargetselif (len(keystoget) == and(sortkey and sortorder) and(not(filterkeys and filterconditions))):sorttargets = keytargetsfiltertargets = Noneelif (len(keystoget) == and(filterkeys and filterconditions) and(not(sortkey and sortorder))):sorttargets = Nonefiltertargets = keytargetssearchresults = np.array(searchresults)if sorttargets:sorttargets = np.ravel(np.array(sorttargets))sortind = np.argsort(sorttargets)if sortorder == '':sortind = sortind[::-]searchresults = searchresults[sortind]sortdone = Trueif filtertargets:finalfilterind = []for ind, fcond in enumerate(filterconditions):thisftarget = np.array([x[ind] for x in filtertargets])if (sortdone):thisftarget = thisftarget[sortind]try:foperator, foperand = fcond.split('')foperator = FILTEROPS[foperator]filterstr = ('' %(foperator, foperand))filterind = eval(filterstr)finalfilterind.append(filterind)filterstatements.append('' % (filterkeys[ind],foperator,foperand))except Exception as e:print('''' %(args.filterby[ind], e))print('')finalfilterind = np.column_stack(finalfilterind)finalfilterind = np.all(finalfilterind, axis=)filterresults = searchresults[finalfilterind]if filterresults.size > :print('' %(repr(args.filterby), filterresults.size))searchresults = filterresultsfilterok = Trueelse:print('' %(repr(args.filterby), ))print('')searchresults = searchresults.tolist()if not(sortkey and sortorder):print('''''''')searchresults = sorted(searchresults)sortkey = ''sortorder = ''nchunks = int(len(searchresults)/splitout) + searchchunks = [searchresults[x*splitout:x*splitout+splitout] for xin range(nchunks)]if nchunks > :print('''' % (splitout, nchunks))if (filterkeys and filterconditions) and not filterok:filterstatements = []for chunkind, chunk in enumerate(searchchunks):outjson = os.path.abspath(os.path.join(currdir,'' % (('' % outprefix if outprefix is not None else ''),('' % chunkind if len(searchchunks) > else ''),)))outjson = outjson.replace('','')outjson = outjson.replace('','')if os.path.exists(outjson):if sys.version_info[:] < (,):answer = raw_input('''''''' %outjson)else:answer = input('''''''' %outjson)if answer and answer == '':with open(outjson,'') as outfd:print('''' % outjson)outdict = {'':chunk,'':len(chunk),'':sortkey,'':sortorder,'':filterstatements}json.dump(outdict,outfd)else:print('''')with open(outjson,'') as infd:indict = json.load(infd)indict[''] = chunkindict[''] = len(chunk)indict[''] = sortkeyindict[''] = sortorderindict[''] = filterstatementswith open(outjson,'') as outfd:json.dump(indict, outfd)else:with open(outjson,'') as outfd:outdict = {'':chunk,'':len(chunk),'':sortkey,'':sortorder,'':filterstatements}json.dump(outdict,outfd)if os.path.exists(outjson):print('' % outjson)else:print('')else:print('' % checkplotbasedir)", "docstring": "This is the main function of this script.\n\n The current script args are shown below ::\n\n Usage: checkplotlist [-h] [--search SEARCH] [--sortby SORTBY]\n [--filterby FILTERBY] [--splitout SPLITOUT]\n [--outprefix OUTPREFIX] [--maxkeyworkers MAXKEYWORKERS]\n {pkl,png} cpdir\n\n This makes a checkplot file list for use with the checkplot-viewer.html\n (for checkplot PNGs) or the checkplotserver.py (for checkplot pickles)\n webapps.\n\n positional arguments:\n {pkl,png} type of checkplot to search for: pkl -> checkplot\n pickles, png -> checkplot PNGs\n cpdir directory containing the checkplots to process\n\n optional arguments:\n -h, --help show this help message and exit\n --search SEARCH file glob prefix to use when searching for checkplots,\n default: '*checkplot*', (the extension is added\n automatically - .png or .pkl)\n --sortby SORTBY the sort key and order to use when sorting\n --filterby FILTERBY the filter key and condition to use when filtering.\n you can specify this multiple times to filter by\n several keys at once. all filters are joined with a\n logical AND operation in the order they're given.\n --splitout SPLITOUT if there are more than SPLITOUT objects in the target\n directory (default: 5000), checkplotlist will split\n the output JSON into multiple files. this helps keep\n the checkplotserver webapp responsive.\n --outprefix OUTPREFIX\n a prefix string to use for the output JSON file(s).\n use this to separate out different sort orders or\n filter conditions, for example. if this isn't\n provided, but --sortby or --filterby are, will use\n those to figure out the output files' prefixes\n --maxkeyworkers MAXKEYWORKERS\n the number of parallel workers that will be launched\n to retrieve checkplot key values used for sorting and\n filtering (default: 2)", "id": "f14713:m2"} {"signature": "def collect_nonperiodic_features(featuresdir,magcol,outfile,pklglob='',featurestouse=NONPERIODIC_FEATURES_TO_COLLECT,maxobjects=None,labeldict=None,labeltype='',):", "body": "pklist = glob.glob(os.path.join(featuresdir, pklglob))if maxobjects:pklist = pklist[:maxobjects]if TQDM:listiterator = tqdm(pklist)else:listiterator = pklistfeature_dict = {'':[],'':magcol, '':[]}LOGINFO('' % magcol)for pkl in listiterator:with open(pkl,'') as infd:varf = pickle.load(infd)objectid = varf['']if objectid not in feature_dict['']:feature_dict[''].append(objectid)thisfeatures = varf[magcol]if featurestouse and len(featurestouse) > :featurestoget = featurestouseelse:featurestoget = NONPERIODIC_FEATURES_TO_COLLECTfor feature in featurestoget:if ((feature not in feature_dict['']) and(feature in thisfeatures)):feature_dict[''].append(feature)feature_dict[feature] = []if feature in thisfeatures:feature_dict[feature].append(thisfeatures[feature])for feat in feature_dict['']:feature_dict[feat] = np.array(feature_dict[feat])feature_dict[''] = np.array(feature_dict[''])feature_array = np.column_stack([feature_dict[feat] for feat infeature_dict['']])feature_dict[''] = feature_arrayif isinstance(labeldict, dict):labelarray = np.zeros(feature_dict[''].size, dtype=np.int64)for ind, objectid in enumerate(feature_dict['']):if objectid in labeldict:if labeltype == '':if labeldict[objectid]:labelarray[ind] = elif labeltype == '':labelarray[ind] = labeldict[objectid]feature_dict[''] = labelarrayfeature_dict[''] = {'':pklglob,'':featurestouse,'':maxobjects,'':labeltype}with open(outfile,'') as outfd:pickle.dump(feature_dict, outfd, pickle.HIGHEST_PROTOCOL)return feature_dict", "docstring": "This collects variability features into arrays for use with the classifer.\n\n Parameters\n ----------\n\n featuresdir : str\n This is the directory where all the varfeatures pickles are. Use\n `pklglob` to specify the glob to search for. The `varfeatures` pickles\n contain objectids, a light curve magcol, and features as dict\n key-vals. The :py:mod:`astrobase.lcproc.lcvfeatures` module can be used\n to produce these.\n\n magcol : str\n This is the key in each varfeatures pickle corresponding to the magcol\n of the light curve the variability features were extracted from.\n\n outfile : str\n This is the filename of the output pickle that will be written\n containing a dict of all the features extracted into np.arrays.\n\n pklglob : str\n This is the UNIX file glob to use to search for varfeatures pickle files\n in `featuresdir`.\n\n featurestouse : list of str\n Each varfeatures pickle can contain any combination of non-periodic,\n stellar, and periodic features; these must have the same names as\n elements in the list of strings provided in `featurestouse`. This tries\n to get all the features listed in NONPERIODIC_FEATURES_TO_COLLECT by\n default. If `featurestouse` is provided as a list, gets only the\n features listed in this kwarg instead.\n\n maxobjects : int or None\n The controls how many pickles from the featuresdir to process. If None,\n will process all varfeatures pickles.\n\n labeldict : dict or None\n If this is provided, it must be a dict with the following key:val list::\n\n '':
    or
    .\n\n Call withAttribute with a series of attribute names and values. Specify the list\n of filter attributes names and values as:\n - keyword arguments, as in (class=\"Customer\",align=\"right\"), or\n - a list of name-value tuples, as in ( (\"ns1:class\", \"Customer\"), (\"ns2:align\",\"right\") )\n For attribute names with a namespace prefix, you must use the second form. Attribute\n names are matched insensitive to upper/lower case.\n\n To verify that the attribute exists, but without specifying a value, pass\n withAttribute.ANY_VALUE as the value.", "id": "f17196:m28"} {"signature": "def operatorPrecedence( baseExpr, opList ):", "body": "ret = Forward()lastExpr = baseExpr | ( Suppress('') + ret + Suppress('') )for i,operDef in enumerate(opList):opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:]if arity == :if opExpr is None or len(opExpr) != :raise ValueError(\"\")opExpr1, opExpr2 = opExprthisExpr = Forward()if rightLeftAssoc == opAssoc.LEFT:if arity == :matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )elif arity == :if opExpr is not None:matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )else:matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )elif arity == :matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) +Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )else:raise ValueError(\"\")elif rightLeftAssoc == opAssoc.RIGHT:if arity == :if not isinstance(opExpr, Optional):opExpr = Optional(opExpr)matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )elif arity == :if opExpr is not None:matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )else:matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )elif arity == :matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) +Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )else:raise ValueError(\"\")else:raise ValueError(\"\")if pa:matchExpr.setParseAction( pa )thisExpr << ( matchExpr | lastExpr )lastExpr = thisExprret << lastExprreturn ret", "docstring": "Helper method for constructing grammars of expressions made up of\n operators working in a precedence hierarchy. Operators may be unary or\n binary, left- or right-associative. Parse actions can also be attached\n to operator expressions.\n\n Parameters:\n - baseExpr - expression representing the most basic element for the nested\n - opList - list of tuples, one for each operator precedence level in the\n expression grammar; each tuple is of the form\n (opExpr, numTerms, rightLeftAssoc, parseAction), where:\n - opExpr is the pyparsing expression for the operator;\n may also be a string, which will be converted to a Literal;\n if numTerms is 3, opExpr is a tuple of two expressions, for the\n two operators separating the 3 terms\n - numTerms is the number of terms for this operator (must\n be 1, 2, or 3)\n - rightLeftAssoc is the indicator whether the operator is\n right or left associative, using the pyparsing-defined\n constants opAssoc.RIGHT and opAssoc.LEFT.\n - parseAction is the parse action to be associated with\n expressions matching this operator expression (the\n parse action tuple member may be omitted)", "id": "f17196:m29"} {"signature": "def nestedExpr(opener=\"\", closer=\"\", content=None, ignoreExpr=quotedString):", "body": "if opener == closer:raise ValueError(\"\")if content is None:if isinstance(opener,str) and isinstance(closer,str):if ignoreExpr is not None:content = (Combine(OneOrMore(~ignoreExpr +CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=))).setParseAction(lambda t:t[].strip()))else:content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[].strip()))else:raise ValueError(\"\")ret = Forward()if ignoreExpr is not None:ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )else:ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )return ret", "docstring": "Helper method for defining nested lists enclosed in opening and closing\n delimiters (\"(\" and \")\" are the default).\n\n Parameters:\n - opener - opening character for a nested list (default=\"(\"); can also be a pyparsing expression\n - closer - closing character for a nested list (default=\")\"); can also be a pyparsing expression\n - content - expression for items within the nested lists (default=None)\n - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)\n\n If an expression is not provided for the content argument, the nested\n expression will capture all whitespace-delimited content between delimiters\n as a list of separate values.\n\n Use the ignoreExpr argument to define expressions that may contain\n opening or closing characters that should not be treated as opening\n or closing characters for nesting, such as quotedString or a comment\n expression. Specify multiple expressions using an Or or MatchFirst.\n The default is quotedString, but if no expressions are to be ignored,\n then pass None for this argument.", "id": "f17196:m30"} {"signature": "def indentedBlock(blockStatementExpr, indentStack, indent=True):", "body": "def checkPeerIndent(s,l,t):if l >= len(s): returncurCol = col(l,s)if curCol != indentStack[-]:if curCol > indentStack[-]:raise ParseFatalException(s,l,\"\")raise ParseException(s,l,\"\")def checkSubIndent(s,l,t):curCol = col(l,s)if curCol > indentStack[-]:indentStack.append( curCol )else:raise ParseException(s,l,\"\")def checkUnindent(s,l,t):if l >= len(s): returncurCol = col(l,s)if not(indentStack and curCol < indentStack[-] and curCol <= indentStack[-]):raise ParseException(s,l,\"\")indentStack.pop()NL = OneOrMore(LineEnd().setWhitespaceChars(\"\").suppress())INDENT = Empty() + Empty().setParseAction(checkSubIndent)PEER = Empty().setParseAction(checkPeerIndent)UNDENT = Empty().setParseAction(checkUnindent)if indent:smExpr = Group( Optional(NL) +FollowedBy(blockStatementExpr) +INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)else:smExpr = Group( Optional(NL) +(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )blockStatementExpr.ignore(\"\" + LineEnd())return smExpr", "docstring": "Helper method for defining space-delimited indentation blocks, such as\n those used to define block statements in Python source code.\n\n Parameters:\n - blockStatementExpr - expression defining syntax of statement that\n is repeated within the indented block\n - indentStack - list created by caller to manage indentation stack\n (multiple statementWithIndentedBlock expressions within a single grammar\n should share a common indentStack)\n - indent - boolean indicating whether block must be indented beyond the\n the current level; set to False for block of left-most statements\n (default=True)\n\n A valid block must contain at least one blockStatement.", "id": "f17196:m31"} {"signature": "def __getattr__( self, aname ):", "body": "if( aname == \"\" ):return lineno( self.loc, self.pstr )elif( aname in (\"\", \"\") ):return col( self.loc, self.pstr )elif( aname == \"\" ):return line( self.loc, self.pstr )else:raise AttributeError(aname)", "docstring": "supported attributes by name are:\n - lineno - returns the line number of the exception text\n - col - returns the column number of the exception text\n - line - returns the line containing the exception text", "id": "f17196:c1:m1"} {"signature": "def markInputline( self, markerString = \"\" ):", "body": "line_str = self.lineline_column = self.column - if markerString:line_str = \"\".join( [line_str[:line_column],markerString, line_str[line_column:]])return line_str.strip()", "docstring": "Extracts the exception line from the input string, and marks\n the location of the exception with a special symbol.", "id": "f17196:c1:m4"} {"signature": "def keys( self ):", "body": "return list(self.__tokdict.keys())", "docstring": "Returns all named result keys.", "id": "f17196:c7:m10"} {"signature": "def pop( self, index=- ):", "body": "ret = self[index]del self[index]return ret", "docstring": "Removes and returns item at specified index (default=last).\n Will work with either numeric indices or dict-key indicies.", "id": "f17196:c7:m11"} {"signature": "def get(self, key, defaultValue=None):", "body": "if key in self:return self[key]else:return defaultValue", "docstring": "Returns named result matching the given key, or if there is no\n such name, then returns the given defaultValue or None if no\n defaultValue is specified.", "id": "f17196:c7:m12"} {"signature": "def items( self ):", "body": "return [(k,self[k]) for k in self.__tokdict]", "docstring": "Returns all named result keys and values as a list of tuples.", "id": "f17196:c7:m14"} {"signature": "def values( self ):", "body": "return [ v[-][] for v in list(self.__tokdict.values()) ]", "docstring": "Returns all named result values.", "id": "f17196:c7:m15"} {"signature": "def asList( self ):", "body": "out = []for res in self.__toklist:if isinstance(res,ParseResults):out.append( res.asList() )else:out.append( res )return out", "docstring": "Returns the parse results as a nested list of matching tokens, all converted to strings.", "id": "f17196:c7:m22"} {"signature": "def asDict( self ):", "body": "return dict( list(self.items()) )", "docstring": "Returns the named parse results as dictionary.", "id": "f17196:c7:m23"} {"signature": "def copy( self ):", "body": "ret = ParseResults( self.__toklist )ret.__tokdict = self.__tokdict.copy()ret.__parent = self.__parentret.__accumNames.update( self.__accumNames )ret.__name = self.__namereturn ret", "docstring": "Returns a new copy of a ParseResults object.", "id": "f17196:c7:m24"} {"signature": "def asXML( self, doctag=None, namedItemsOnly=False, indent=\"\", formatted=True ):", "body": "nl = \"\"out = []namedItems = dict( [ (v[],k) for (k,vlist) in list(self.__tokdict.items())for v in vlist ] )nextLevelIndent = indent + \"\"if not formatted:indent = \"\"nextLevelIndent = \"\"nl = \"\"selfTag = Noneif doctag is not None:selfTag = doctagelse:if self.__name:selfTag = self.__nameif not selfTag:if namedItemsOnly:return \"\"else:selfTag = \"\"out += [ nl, indent, \"\", selfTag, \">\" ]worklist = self.__toklistfor i,res in enumerate(worklist):if isinstance(res,ParseResults):if i in namedItems:out += [ res.asXML(namedItems[i],namedItemsOnly and doctag is None,nextLevelIndent,formatted)]else:out += [ res.asXML(None,namedItemsOnly and doctag is None,nextLevelIndent,formatted)]else:resTag = Noneif i in namedItems:resTag = namedItems[i]if not resTag:if namedItemsOnly:continueelse:resTag = \"\"xmlBodyText = xml.sax.saxutils.escape(_ustr(res))out += [ nl, nextLevelIndent, \"\", resTag, \">\",xmlBodyText,\"\", resTag, \">\" ]out += [ nl, indent, \"\", selfTag, \">\" ]return \"\".join(out)", "docstring": "Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.", "id": "f17196:c7:m25"} {"signature": "def getName(self):", "body": "if self.__name:return self.__nameelif self.__parent:par = self.__parent()if par:return par.__lookup(self)else:return Noneelif (len(self) == andlen(self.__tokdict) == andlist(self.__tokdict.values())[][][] in (,-)):return list(self.__tokdict.keys())[]else:return None", "docstring": "Returns the results name for this token expression.", "id": "f17196:c7:m27"} {"signature": "def dump(self,indent='',depth=):", "body": "out = []out.append( indent+_ustr(self.asList()) )keys = list(self.items())keys.sort()for k,v in keys:if out:out.append('')out.append( \"\" % (indent,(''*depth), k) )if isinstance(v,ParseResults):if list(v.keys()):out.append( v.dump(indent,depth+) )else:out.append(_ustr(v))else:out.append(_ustr(v))return \"\".join(out)", "docstring": "Diagnostic method for listing out the contents of a ParseResults.\n Accepts an optional indent argument so that this string can be embedded\n in a nested display of other data.", "id": "f17196:c7:m28"} {"signature": "def setDefaultWhitespaceChars( chars ):", "body": "ParserElement.DEFAULT_WHITE_CHARS = chars", "docstring": "Overrides the default whitespace chars", "id": "f17196:c8:m0"} {"signature": "def copy( self ):", "body": "cpy = copy.copy( self )cpy.parseAction = self.parseAction[:]cpy.ignoreExprs = self.ignoreExprs[:]if self.copyDefaultWhiteChars:cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARSreturn cpy", "docstring": "Make a copy of this ParserElement. Useful for defining different parse actions\n for the same parsing pattern, using copies of the original parse element.", "id": "f17196:c8:m2"} {"signature": "def setName( self, name ):", "body": "self.name = nameself.errmsg = \"\" + self.nameif hasattr(self,\"\"):self.exception.msg = self.errmsgreturn self", "docstring": "Define name for this expression, for use in debugging.", "id": "f17196:c8:m3"} {"signature": "def setResultsName( self, name, listAllMatches=False ):", "body": "newself = self.copy()newself.resultsName = namenewself.modalResults = not listAllMatchesreturn newself", "docstring": "Define name for referencing matching tokens as a nested attribute\n of the returned parse results.\n NOTE: this returns a *copy* of the original ParserElement object;\n this is so that the client can define a basic element, such as an\n integer, and reference it in multiple places with different names.", "id": "f17196:c8:m4"} {"signature": "def setBreak(self,breakFlag = True):", "body": "if breakFlag:_parseMethod = self._parsedef breaker(instring, loc, doActions=True, callPreParse=True):import pdbpdb.set_trace()_parseMethod( instring, loc, doActions, callPreParse )breaker._originalParseMethod = _parseMethodself._parse = breakerelse:if hasattr(self._parse,\"\"):self._parse = self._parse._originalParseMethodreturn self", "docstring": "Method to invoke the Python pdb debugger when this element is\n about to be parsed. Set breakFlag to True to enable, False to\n disable.", "id": "f17196:c8:m5"} {"signature": "def _normalizeParseActionArgs( f ):", "body": "STAR_ARGS = try:restore = Noneif isinstance(f,type):restore = ff = f.__init__if not _PY3K:codeObj = f.__code__else:codeObj = f.codeif codeObj.co_flags & STAR_ARGS:return fnumargs = codeObj.co_argcountif not _PY3K:if hasattr(f,\"\"):numargs -= else:if hasattr(f,\"\"):numargs -= if restore:f = restoreexcept AttributeError:try:if not _PY3K:call_im_func_code = f.__call__.__func__.__code__else:call_im_func_code = f.__code__if call_im_func_code.co_flags & STAR_ARGS:return fnumargs = call_im_func_code.co_argcountif not _PY3K:if hasattr(f.__call__,\"\"):numargs -= else:if hasattr(f.__call__,\"\"):numargs -= except AttributeError:if not _PY3K:call_func_code = f.__call__.__code__else:call_func_code = f.__call__.__code__if call_func_code.co_flags & STAR_ARGS:return fnumargs = call_func_code.co_argcountif not _PY3K:if hasattr(f.__call__,\"\"):numargs -= else:if hasattr(f.__call__,\"\"):numargs -= if numargs == :return felse:if numargs > :def tmp(s,l,t):return f(f.__call__.__self__, s,l,t)if numargs == :def tmp(s,l,t):return f(l,t)elif numargs == :def tmp(s,l,t):return f(t)else: def tmp(s,l,t):return f()try:tmp.__name__ = f.__name__except (AttributeError,TypeError):passtry:tmp.__doc__ = f.__doc__except (AttributeError,TypeError):passtry:tmp.__dict__.update(f.__dict__)except (AttributeError,TypeError):passreturn tmp", "docstring": "Internal method used to decorate parse actions that take fewer than 3 arguments,\n so that all parse actions can be called as f(s,l,t).", "id": "f17196:c8:m6"} {"signature": "def setParseAction( self, *fns, **kwargs ):", "body": "self.parseAction = list(map(self._normalizeParseActionArgs, list(fns)))self.callDuringTry = (\"\" in kwargs and kwargs[\"\"])return self", "docstring": "Define action to perform when successfully matching parse element definition.\n Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks),\n fn(loc,toks), fn(toks), or just fn(), where:\n - s = the original string being parsed (see note below)\n - loc = the location of the matching substring\n - toks = a list of the matched tokens, packaged as a ParseResults object\n If the functions in fns modify the tokens, they can return them as the return\n value from fn, and the modified list of tokens will replace the original.\n Otherwise, fn does not need to return any value.\n\n Note: the default parsing behavior is to expand tabs in the input string\n before starting the parsing process. See L{I{parseString}} for more information\n on parsing strings containing s, and suggested methods to maintain a\n consistent view of the parsed string, the parse location, and line and column\n positions within the parsed string.", "id": "f17196:c8:m7"} {"signature": "def addParseAction( self, *fns, **kwargs ):", "body": "self.parseAction += list(map(self._normalizeParseActionArgs, list(fns)))self.callDuringTry = self.callDuringTry or (\"\" in kwargs and kwargs[\"\"])return self", "docstring": "Add parse action to expression's list of parse actions. See L{I{setParseAction}}.", "id": "f17196:c8:m8"} {"signature": "def setFailAction( self, fn ):", "body": "self.failAction = fnreturn self", "docstring": "Define action to perform if parsing fails at this expression.\n Fail acton fn is a callable function that takes the arguments\n fn(s,loc,expr,err) where:\n - s = string being parsed\n - loc = location where expression match was attempted and failed\n - expr = the parse expression that failed\n - err = the exception thrown\n The function returns no value. It may throw ParseFatalException\n if it is desired to stop parsing immediately.", "id": "f17196:c8:m9"} {"signature": "def enablePackrat():", "body": "if not ParserElement._packratEnabled:ParserElement._packratEnabled = TrueParserElement._parse = ParserElement._parseCache", "docstring": "Enables \"packrat\" parsing, which adds memoizing to the parsing logic.\n Repeated parse attempts at the same string location (which happens\n often in many complex grammars) can immediately return a cached value,\n instead of re-executing parsing/validating code. Memoizing is done of\n both valid results and parsing exceptions.\n\n This speedup may break existing programs that use parse actions that\n have side-effects. For this reason, packrat parsing is disabled when\n you first import pyparsing. To activate the packrat feature, your\n program must call the class method ParserElement.enablePackrat(). If\n your program uses psyco to \"compile as you go\", you must call\n enablePackrat before calling psyco.full(). If you do not do this,\n Python will crash. For best results, call enablePackrat() immediately\n after importing pyparsing.", "id": "f17196:c8:m18"} {"signature": "def parseString( self, instring, parseAll=False ):", "body": "ParserElement.resetCache()if not self.streamlined:self.streamline()for e in self.ignoreExprs:e.streamline()if not self.keepTabs:instring = instring.expandtabs()loc, tokens = self._parse( instring, )if parseAll:StringEnd()._parse( instring, loc )return tokens", "docstring": "Execute the parse expression with the given string.\n This is the main interface to the client code, once the complete\n expression has been built.\n\n If you want the grammar to require that the entire input string be\n successfully parsed, then set parseAll to True (equivalent to ending\n the grammar with StringEnd()).\n\n Note: parseString implicitly calls expandtabs() on the input string,\n in order to report proper column numbers in parse actions.\n If the input string contains tabs and\n the grammar uses parse actions that use the loc argument to index into the\n string being parsed, you can ensure you have a consistent view of the input\n string by:\n - calling parseWithTabs on your grammar before calling parseString\n (see L{I{parseWithTabs}})\n - define your parse action using the full (s,loc,toks) signature, and\n reference the input string using the parse action's s argument\n - explictly expand the tabs in your input string before calling\n parseString", "id": "f17196:c8:m19"} {"signature": "def scanString( self, instring, maxMatches=_MAX_INT ):", "body": "if not self.streamlined:self.streamline()for e in self.ignoreExprs:e.streamline()if not self.keepTabs:instring = _ustr(instring).expandtabs()instrlen = len(instring)loc = preparseFn = self.preParseparseFn = self._parseParserElement.resetCache()matches = while loc <= instrlen and matches < maxMatches:try:preloc = preparseFn( instring, loc )nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )except ParseException:loc = preloc+else:matches += yield tokens, preloc, nextLocloc = nextLoc", "docstring": "Scan the input string for expression matches. Each match will return the\n matching tokens, start location, and end location. May be called with optional\n maxMatches argument, to clip scanning after 'n' matches are found.\n\n Note that the start and end locations are reported relative to the string\n being parsed. See L{I{parseString}} for more information on parsing\n strings with embedded tabs.", "id": "f17196:c8:m20"} {"signature": "def transformString( self, instring ):", "body": "out = []lastE = self.keepTabs = Truefor t,s,e in self.scanString( instring ):out.append( instring[lastE:s] )if t:if isinstance(t,ParseResults):out += t.asList()elif isinstance(t,list):out += telse:out.append(t)lastE = eout.append(instring[lastE:])return \"\".join(map(_ustr,out))", "docstring": "Extension to scanString, to modify matching text with modified tokens that may\n be returned from a parse action. To use transformString, define a grammar and\n attach a parse action to it that modifies the returned token list.\n Invoking transformString() on a target string will then scan for matches,\n and replace the matched text patterns according to the logic in the parse\n action. transformString() returns the resulting transformed string.", "id": "f17196:c8:m21"} {"signature": "def searchString( self, instring, maxMatches=_MAX_INT ):", "body": "return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])", "docstring": "Another extension to scanString, simplifying the access to the tokens found\n to match the given parse expression. May be called with optional\n maxMatches argument, to clip searching after 'n' matches are found.", "id": "f17196:c8:m22"} {"signature": "def __add__(self, other ):", "body": "if isinstance( other, str ):other = Literal( other )if not isinstance( other, ParserElement ):warnings.warn(\"\" % type(other),SyntaxWarning, stacklevel=)return Nonereturn And( [ self, other ] )", "docstring": "Implementation of + operator - returns And", "id": "f17196:c8:m23"} {"signature": "def __radd__(self, other ):", "body": "if isinstance( other, str ):other = Literal( other )if not isinstance( other, ParserElement ):warnings.warn(\"\" % type(other),SyntaxWarning, stacklevel=)return Nonereturn other + self", "docstring": "Implementation of + operator when left operand is not a ParserElement", "id": "f17196:c8:m24"} {"signature": "def __sub__(self, other):", "body": "if isinstance( other, str ):other = Literal( other )if not isinstance( other, ParserElement ):warnings.warn(\"\" % type(other),SyntaxWarning, stacklevel=)return Nonereturn And( [ self, And._ErrorStop(), other ] )", "docstring": "Implementation of - operator, returns And with error stop", "id": "f17196:c8:m25"} {"signature": "def __rsub__(self, other ):", "body": "if isinstance( other, str ):other = Literal( other )if not isinstance( other, ParserElement ):warnings.warn(\"\" % type(other),SyntaxWarning, stacklevel=)return Nonereturn other - self", "docstring": "Implementation of - operator when left operand is not a ParserElement", "id": "f17196:c8:m26"} {"signature": "def __or__(self, other ):", "body": "if isinstance( other, str ):other = Literal( other )if not isinstance( other, ParserElement ):warnings.warn(\"\" % type(other),SyntaxWarning, stacklevel=)return Nonereturn MatchFirst( [ self, other ] )", "docstring": "Implementation of | operator - returns MatchFirst", "id": "f17196:c8:m29"} {"signature": "def __ror__(self, other ):", "body": "if isinstance( other, str ):other = Literal( other )if not isinstance( other, ParserElement ):warnings.warn(\"\" % type(other),SyntaxWarning, stacklevel=)return Nonereturn other | self", "docstring": "Implementation of | operator when left operand is not a ParserElement", "id": "f17196:c8:m30"} {"signature": "def __xor__(self, other ):", "body": "if isinstance( other, str ):other = Literal( other )if not isinstance( other, ParserElement ):warnings.warn(\"\" % type(other),SyntaxWarning, stacklevel=)return Nonereturn Or( [ self, other ] )", "docstring": "Implementation of ^ operator - returns Or", "id": "f17196:c8:m31"} {"signature": "def __rxor__(self, other ):", "body": "if isinstance( other, str ):other = Literal( other )if not isinstance( other, ParserElement ):warnings.warn(\"\" % type(other),SyntaxWarning, stacklevel=)return Nonereturn other ^ self", "docstring": "Implementation of ^ operator when left operand is not a ParserElement", "id": "f17196:c8:m32"} {"signature": "def __and__(self, other ):", "body": "if isinstance( other, str ):other = Literal( other )if not isinstance( other, ParserElement ):warnings.warn(\"\" % type(other),SyntaxWarning, stacklevel=)return Nonereturn Each( [ self, other ] )", "docstring": "Implementation of & operator - returns Each", "id": "f17196:c8:m33"} {"signature": "def __rand__(self, other ):", "body": "if isinstance( other, str ):other = Literal( other )if not isinstance( other, ParserElement ):warnings.warn(\"\" % type(other),SyntaxWarning, stacklevel=)return Nonereturn other & self", "docstring": "Implementation of & operator when left operand is not a ParserElement", "id": "f17196:c8:m34"} {"signature": "def __invert__( self ):", "body": "return NotAny( self )", "docstring": "Implementation of ~ operator - returns NotAny", "id": "f17196:c8:m35"} {"signature": "def __call__(self, name):", "body": "return self.setResultsName(name)", "docstring": "Shortcut for setResultsName, with listAllMatches=default::\n userdata = Word(alphas).setResultsName(\"name\") + Word(nums+\"-\").setResultsName(\"socsecno\")\n could be written as::\n userdata = Word(alphas)(\"name\") + Word(nums+\"-\")(\"socsecno\")", "id": "f17196:c8:m36"} {"signature": "def suppress( self ):", "body": "return Suppress( self )", "docstring": "Suppresses the output of this ParserElement; useful to keep punctuation from\n cluttering up returned output.", "id": "f17196:c8:m37"} {"signature": "def leaveWhitespace( self ):", "body": "self.skipWhitespace = Falsereturn self", "docstring": "Disables the skipping of whitespace before matching the characters in the\n ParserElement's defined pattern. This is normally only used internally by\n the pyparsing module, but may be needed in some whitespace-sensitive grammars.", "id": "f17196:c8:m38"} {"signature": "def setWhitespaceChars( self, chars ):", "body": "self.skipWhitespace = Trueself.whiteChars = charsself.copyDefaultWhiteChars = Falsereturn self", "docstring": "Overrides the default whitespace chars", "id": "f17196:c8:m39"} {"signature": "def parseWithTabs( self ):", "body": "self.keepTabs = Truereturn self", "docstring": "Overrides default behavior to expand s to spaces before parsing the input string.\n Must be called before parseString when the input grammar contains elements that\n match characters.", "id": "f17196:c8:m40"} {"signature": "def ignore( self, other ):", "body": "if isinstance( other, Suppress ):if other not in self.ignoreExprs:self.ignoreExprs.append( other )else:self.ignoreExprs.append( Suppress( other ) )return self", "docstring": "Define expression to be ignored (e.g., comments) while doing pattern\n matching; may be called repeatedly, to define multiple comment or other\n ignorable patterns.", "id": "f17196:c8:m41"} {"signature": "def setDebugActions( self, startAction, successAction, exceptionAction ):", "body": "self.debugActions = (startAction or _defaultStartDebugAction,successAction or _defaultSuccessDebugAction,exceptionAction or _defaultExceptionDebugAction)self.debug = Truereturn self", "docstring": "Enable display of debugging messages while doing pattern matching.", "id": "f17196:c8:m42"} {"signature": "def setDebug( self, flag=True ):", "body": "if flag:self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )else:self.debug = Falsereturn self", "docstring": "Enable display of debugging messages while doing pattern matching.\n Set flag to True to enable, False to disable.", "id": "f17196:c8:m43"} {"signature": "def validate( self, validateTrace=[] ):", "body": "self.checkRecursion( [] )", "docstring": "Check defined expressions for valid structure, check for infinite recursive definitions.", "id": "f17196:c8:m48"} {"signature": "def parseFile( self, file_or_filename ):", "body": "try:file_contents = file_or_filename.read()except AttributeError:f = open(file_or_filename, \"\")file_contents = f.read()f.close()return self.parseString(file_contents)", "docstring": "Execute the parse expression on the given file or filename.\n If a filename is specified (instead of a file object),\n the entire file is opened, read, and closed before parsing.", "id": "f17196:c8:m49"} {"signature": "def setDefaultKeywordChars( chars ):", "body": "Keyword.DEFAULT_KEYWORD_CHARS = chars", "docstring": "Overrides the default Keyword chars", "id": "f17196:c13:m3"} {"signature": "def __init__( self, pattern, flags=):", "body": "super(Regex,self).__init__()if len(pattern) == :warnings.warn(\"\",SyntaxWarning, stacklevel=)self.pattern = patternself.flags = flagstry:self.re = re.compile(self.pattern, self.flags)self.reString = self.patternexcept sre_constants.error:warnings.warn(\"\" % pattern,SyntaxWarning, stacklevel=)raiseself.name = _ustr(self)self.errmsg = \"\" + self.nameself.mayIndexError = Falseself.mayReturnEmpty = True", "docstring": "The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.", "id": "f17196:c17:m0"} {"signature": "def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):", "body": "super(QuotedString,self).__init__()quoteChar = quoteChar.strip()if len(quoteChar) == :warnings.warn(\"\",SyntaxWarning,stacklevel=)raise SyntaxError()if endQuoteChar is None:endQuoteChar = quoteCharelse:endQuoteChar = endQuoteChar.strip()if len(endQuoteChar) == :warnings.warn(\"\",SyntaxWarning,stacklevel=)raise SyntaxError()self.quoteChar = quoteCharself.quoteCharLen = len(quoteChar)self.firstQuoteChar = quoteChar[]self.endQuoteChar = endQuoteCharself.endQuoteCharLen = len(endQuoteChar)self.escChar = escCharself.escQuote = escQuoteself.unquoteResults = unquoteResultsif multiline:self.flags = re.MULTILINE | re.DOTALLself.pattern = r'' %( re.escape(self.quoteChar),_escapeRegexRangeChars(self.endQuoteChar[]),(escChar is not None and _escapeRegexRangeChars(escChar) or '') )else:self.flags = self.pattern = r'' %( re.escape(self.quoteChar),_escapeRegexRangeChars(self.endQuoteChar[]),(escChar is not None and _escapeRegexRangeChars(escChar) or '') )if len(self.endQuoteChar) > :self.pattern += ('' + ''.join([\"\" % (re.escape(self.endQuoteChar[:i]),_escapeRegexRangeChars(self.endQuoteChar[i]))for i in range(len(self.endQuoteChar)-,,-)]) + '')if escQuote:self.pattern += (r'' % re.escape(escQuote))if escChar:self.pattern += (r'' % re.escape(escChar))self.escCharReplacePattern = re.escape(self.escChar)+\"\"self.pattern += (r'' % re.escape(self.endQuoteChar))try:self.re = re.compile(self.pattern, self.flags)self.reString = self.patternexcept sre_constants.error:warnings.warn(\"\" % self.pattern,SyntaxWarning, stacklevel=)raiseself.name = _ustr(self)self.errmsg = \"\" + self.nameself.mayIndexError = Falseself.mayReturnEmpty = True", "docstring": "Defined with the following parameters:\n - quoteChar - string of one or more characters defining the quote delimiting string\n - escChar - character to escape quotes, typically backslash (default=None)\n - escQuote - special quote sequence to escape an embedded quote string (such as SQL's \"\" to escape an embedded \") (default=None)\n - multiline - boolean indicating whether quotes can span multiple lines (default=False)\n - unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)\n - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)", "id": "f17196:c18:m0"} {"signature": "def leaveWhitespace( self ):", "body": "self.skipWhitespace = Falseself.exprs = [ e.copy() for e in self.exprs ]for e in self.exprs:e.leaveWhitespace()return self", "docstring": "Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on\n all contained expressions.", "id": "f17196:c29:m3"} {"signature": "def __patch__init__(self,edgecolor=None,facecolor=None,linewidth=None,linestyle=None,antialiased = None,hatch = None,fill=True,**kwargs):", "body": "artist.Artist.__init__(self)if linewidth is None: linewidth = mpl.rcParams['']if linestyle is None: linestyle = \"\"if antialiased is None: antialiased = mpl.rcParams['']self.set_edgecolor(edgecolor)self.set_facecolor(facecolor)self.set_linewidth(linewidth)self.set_linestyle(linestyle)self.set_antialiased(antialiased)self.set_hatch(hatch)self.fill = fillself._combined_transform = transforms.IdentityTransform()if len(kwargs): artist.setp(self, **kwargs)", "docstring": "The following kwarg properties are supported\n\n%(Patch)s", "id": "f17197:m0"} {"signature": "def bbox_artist(artist, renderer, props=None, fill=True):", "body": "if props is None: props = {}props = props.copy() pad = props.pop('', )pad = renderer.points_to_pixels(pad)bbox = artist.get_window_extent(renderer)l,b,w,h = bbox.boundsl-=pad/b-=pad/w+=padh+=padr = Rectangle(xy=(l,b),width=w,height=h,fill=fill,)r.set_transform(transforms.IdentityTransform())r.set_clip_on( False )r.update(props)r.draw(renderer)", "docstring": "This is a debug function to draw a rectangle around the bounding\nbox returned by\n:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,\nto test whether the artist is returning the correct bbox.\n\n*props* is a dict of rectangle props with the additional property\n'pad' that sets the padding around the bbox in points.", "id": "f17197:m1"} {"signature": "def draw_bbox(bbox, renderer, color='', trans=None):", "body": "l,b,w,h = bbox.get_bounds()r = Rectangle(xy=(l,b),width=w,height=h,edgecolor=color,fill=False,)if trans is not None: r.set_transform(trans)r.set_clip_on( False )r.draw(renderer)", "docstring": "This is a debug function to draw a rectangle around the bounding\nbox returned by\n:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,\nto test whether the artist is returning the correct bbox.", "id": "f17197:m2"} {"signature": "def _pprint_table(_table, leadingspace=):", "body": "if leadingspace:pad = ''*leadingspaceelse:pad = ''columns = [[] for cell in _table[]]for row in _table:for column, cell in zip(columns, row):column.append(cell)col_len = [max([len(cell) for cell in column]) for column in columns]lines = []table_formatstr = pad + ''.join([('' * cl) for cl in col_len])lines.append('')lines.append(table_formatstr)lines.append(pad + ''.join([cell.ljust(cl) for cell, cl in zip(_table[], col_len)]))lines.append(table_formatstr)lines.extend([(pad + ''.join([cell.ljust(cl) for cell, cl in zip(row, col_len)]))for row in _table[:]])lines.append(table_formatstr)lines.append('')return \"\".join(lines)", "docstring": "Given the list of list of strings, return a string of REST table format.", "id": "f17197:m3"} {"signature": "def _pprint_styles(_styles, leadingspace=):", "body": "if leadingspace:pad = ''*leadingspaceelse:pad = ''names, attrss, clss = [], [], []import inspect_table = [[\"\", \"\", \"\"]]for name, cls in sorted(_styles.items()):args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)if defaults:args = [(argname, argdefault)for argname, argdefault in zip(args[:], defaults)]else:args = Noneif args is None:argstr = ''else:argstr = \"\".join([(\"\" % (an, av)) for an, av in args])_table.append([cls.__name__, \"\"%name, argstr])return _pprint_table(_table)", "docstring": "A helper function for the _Style class. Given the dictionary of\n(stylename : styleclass), return a formatted string listing all the\nstyles. Used to update the documentation.", "id": "f17197:m4"} {"signature": "def get_verts(self):", "body": "trans = self.get_transform()path = self.get_path()polygons = path.to_polygons(trans)if len(polygons):return polygons[]return []", "docstring": "Return a copy of the vertices used in this patch\n\nIf the patch contains B\u00e9zier curves, the curves will be\ninterpolated by line segments. To access the curves as\ncurves, use :meth:`get_path`.", "id": "f17197:c0:m1"} {"signature": "def contains(self, mouseevent):", "body": "if callable(self._contains): return self._contains(self,mouseevent)inside = self.get_path().contains_point((mouseevent.x, mouseevent.y), self.get_transform())return inside, {}", "docstring": "Test whether the mouse event occurred in the patch.\n\n Returns T/F, {}", "id": "f17197:c0:m2"} {"signature": "def update_from(self, other):", "body": "artist.Artist.update_from(self, other)self.set_edgecolor(other.get_edgecolor())self.set_facecolor(other.get_facecolor())self.set_fill(other.get_fill())self.set_hatch(other.get_hatch())self.set_linewidth(other.get_linewidth())self.set_linestyle(other.get_linestyle())self.set_transform(other.get_data_transform())self.set_figure(other.get_figure())self.set_alpha(other.get_alpha())", "docstring": "Updates this :class:`Patch` from the properties of *other*.", "id": "f17197:c0:m3"} {"signature": "def get_extents(self):", "body": "return self.get_path().get_extents(self.get_transform())", "docstring": "Return a :class:`~matplotlib.transforms.Bbox` object defining\nthe axis-aligned extents of the :class:`Patch`.", "id": "f17197:c0:m4"} {"signature": "def get_transform(self):", "body": "return self.get_patch_transform() + artist.Artist.get_transform(self)", "docstring": "Return the :class:`~matplotlib.transforms.Transform` applied\nto the :class:`Patch`.", "id": "f17197:c0:m5"} {"signature": "def get_antialiased(self):", "body": "return self._antialiased", "docstring": "Returns True if the :class:`Patch` is to be drawn with antialiasing.", "id": "f17197:c0:m8"} {"signature": "def get_edgecolor(self):", "body": "return self._edgecolor", "docstring": "Return the edge color of the :class:`Patch`.", "id": "f17197:c0:m9"} {"signature": "def get_facecolor(self):", "body": "return self._facecolor", "docstring": "Return the face color of the :class:`Patch`.", "id": "f17197:c0:m10"} {"signature": "def get_linewidth(self):", "body": "return self._linewidth", "docstring": "Return the line width in points.", "id": "f17197:c0:m11"} {"signature": "def get_linestyle(self):", "body": "return self._linestyle", "docstring": "Return the linestyle. Will be one of ['solid' | 'dashed' |\n'dashdot' | 'dotted']", "id": "f17197:c0:m12"} {"signature": "def set_antialiased(self, aa):", "body": "if aa is None: aa = mpl.rcParams['']self._antialiased = aa", "docstring": "Set whether to use antialiased rendering\n\nACCEPTS: [True | False] or None for default", "id": "f17197:c0:m13"} {"signature": "def set_aa(self, aa):", "body": "return self.set_antialiased(aa)", "docstring": "alias for set_antialiased", "id": "f17197:c0:m14"} {"signature": "def set_edgecolor(self, color):", "body": "if color is None: color = mpl.rcParams['']self._edgecolor = color", "docstring": "Set the patch edge color\n\nACCEPTS: mpl color spec, or None for default, or 'none' for no color", "id": "f17197:c0:m15"} {"signature": "def set_ec(self, color):", "body": "return self.set_edgecolor(color)", "docstring": "alias for set_edgecolor", "id": "f17197:c0:m16"} {"signature": "def set_facecolor(self, color):", "body": "if color is None: color = mpl.rcParams['']self._facecolor = color", "docstring": "Set the patch face color\n\nACCEPTS: mpl color spec, or None for default, or 'none' for no color", "id": "f17197:c0:m17"} {"signature": "def set_fc(self, color):", "body": "return self.set_facecolor(color)", "docstring": "alias for set_facecolor", "id": "f17197:c0:m18"} {"signature": "def set_linewidth(self, w):", "body": "if w is None: w = mpl.rcParams['']self._linewidth = w", "docstring": "Set the patch linewidth in points\n\nACCEPTS: float or None for default", "id": "f17197:c0:m19"} {"signature": "def set_lw(self, lw):", "body": "return self.set_linewidth(lw)", "docstring": "alias for set_linewidth", "id": "f17197:c0:m20"} {"signature": "def set_linestyle(self, ls):", "body": "if ls is None: ls = \"\"self._linestyle = ls", "docstring": "Set the patch linestyle\n\nACCEPTS: ['solid' | 'dashed' | 'dashdot' | 'dotted']", "id": "f17197:c0:m21"} {"signature": "def set_ls(self, ls):", "body": "return self.set_linestyle(ls)", "docstring": "alias for set_linestyle", "id": "f17197:c0:m22"} {"signature": "def set_fill(self, b):", "body": "self.fill = b", "docstring": "Set whether to fill the patch\n\nACCEPTS: [True | False]", "id": "f17197:c0:m23"} {"signature": "def get_fill(self):", "body": "return self.fill", "docstring": "return whether fill is set", "id": "f17197:c0:m24"} {"signature": "def set_hatch(self, h):", "body": "self._hatch = h", "docstring": "Set the hatching pattern\n\nhatch can be one of::\n\n / - diagonal hatching\n \\ - back diagonal\n | - vertical\n - - horizontal\n # - crossed\n x - crossed diagonal\n\nLetters can be combined, in which case all the specified\nhatchings are done. If same letter repeats, it increases the\ndensity of hatching in that direction.\n\nCURRENT LIMITATIONS:\n\n1. Hatching is supported in the PostScript backend only.\n\n2. Hatching is done with solid black lines of width 0.\n\n\nACCEPTS: [ '/' | '\\\\' | '|' | '-' | '#' | 'x' ]", "id": "f17197:c0:m25"} {"signature": "def get_hatch(self):", "body": "return self._hatch", "docstring": "Return the current hatching pattern", "id": "f17197:c0:m26"} {"signature": "def draw(self, renderer):", "body": "if not self.get_visible(): returngc = renderer.new_gc()if cbook.is_string_like(self._edgecolor) and self._edgecolor.lower()=='':gc.set_linewidth()else:gc.set_foreground(self._edgecolor)gc.set_linewidth(self._linewidth)gc.set_linestyle(self._linestyle)gc.set_antialiased(self._antialiased)self._set_gc_clip(gc)gc.set_capstyle('')gc.set_url(self._url)gc.set_snap(self._snap)if (not self.fill or self._facecolor is None or(cbook.is_string_like(self._facecolor) and self._facecolor.lower()=='')):rgbFace = Nonegc.set_alpha()else:r, g, b, a = colors.colorConverter.to_rgba(self._facecolor, self._alpha)rgbFace = (r, g, b)gc.set_alpha(a)if self._hatch:gc.set_hatch(self._hatch )path = self.get_path()transform = self.get_transform()tpath = transform.transform_path_non_affine(path)affine = transform.get_affine()renderer.draw_path(gc, tpath, affine, rgbFace)", "docstring": "Draw the :class:`Patch` to the given *renderer*.", "id": "f17197:c0:m27"} {"signature": "def get_path(self):", "body": "raise NotImplementedError('')", "docstring": "Return the path of this patch", "id": "f17197:c0:m28"} {"signature": "def __init__(self, patch, ox, oy, props=None, **kwargs):", "body": "Patch.__init__(self)self.patch = patchself.props = propsself._ox, self._oy = ox, oyself._update_transform()self._update()", "docstring": "Create a shadow of the given *patch* offset by *ox*, *oy*.\n*props*, if not *None*, is a patch property update dictionary.\nIf *None*, the shadow will have have the same color as the face,\nbut darkened.\n\nkwargs are\n%(Patch)s", "id": "f17197:c1:m1"} {"signature": "def __init__(self, xy, width, height, **kwargs):", "body": "Patch.__init__(self, **kwargs)self._x = xy[]self._y = xy[]self._width = widthself._height = heightself._rect_transform = transforms.IdentityTransform()", "docstring": "*fill* is a boolean indicating whether to fill the rectangle\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c2:m1"} {"signature": "def get_path(self):", "body": "return Path.unit_rectangle()", "docstring": "Return the vertices of the rectangle", "id": "f17197:c2:m2"} {"signature": "def _update_patch_transform(self):", "body": "x = self.convert_xunits(self._x)y = self.convert_yunits(self._y)width = self.convert_xunits(self._width)height = self.convert_yunits(self._height)bbox = transforms.Bbox.from_bounds(x, y, width, height)self._rect_transform = transforms.BboxTransformTo(bbox)", "docstring": "NOTE: This cannot be called until after this has been added\n to an Axes, otherwise unit conversion will fail. This\n maxes it very important to call the accessor method and\n not directly access the transformation member variable.", "id": "f17197:c2:m3"} {"signature": "def get_x(self):", "body": "return self._x", "docstring": "Return the left coord of the rectangle", "id": "f17197:c2:m6"} {"signature": "def get_y(self):", "body": "return self._y", "docstring": "Return the bottom coord of the rectangle", "id": "f17197:c2:m7"} {"signature": "def get_xy(self):", "body": "return self._x, self._y", "docstring": "Return the left and bottom coords of the rectangle", "id": "f17197:c2:m8"} {"signature": "def get_width(self):", "body": "return self._width", "docstring": "Return the width of the rectangle", "id": "f17197:c2:m9"} {"signature": "def get_height(self):", "body": "return self._height", "docstring": "Return the height of the rectangle", "id": "f17197:c2:m10"} {"signature": "def set_x(self, x):", "body": "self._x = x", "docstring": "Set the left coord of the rectangle\n\nACCEPTS: float", "id": "f17197:c2:m11"} {"signature": "def set_y(self, y):", "body": "self._y = y", "docstring": "Set the bottom coord of the rectangle\n\nACCEPTS: float", "id": "f17197:c2:m12"} {"signature": "def set_xy(self, xy):", "body": "self._x, self._y = xy", "docstring": "Set the left and bottom coords of the rectangle\n\nACCEPTS: 2-item sequence", "id": "f17197:c2:m13"} {"signature": "def set_width(self, w):", "body": "self._width = w", "docstring": "Set the width rectangle\n\nACCEPTS: float", "id": "f17197:c2:m14"} {"signature": "def set_height(self, h):", "body": "self._height = h", "docstring": "Set the width rectangle\n\nACCEPTS: float", "id": "f17197:c2:m15"} {"signature": "def set_bounds(self, *args):", "body": "if len(args)==:l,b,w,h = args[]else:l,b,w,h = argsself._x = lself._y = bself._width = wself._height = h", "docstring": "Set the bounds of the rectangle: l,b,w,h\n\nACCEPTS: (left, bottom, width, height)", "id": "f17197:c2:m16"} {"signature": "def __init__(self, xy, numVertices, radius=, orientation=,**kwargs):", "body": "self._xy = xyself._numVertices = numVerticesself._orientation = orientationself._radius = radiusself._path = Path.unit_regular_polygon(numVertices)self._poly_transform = transforms.Affine2D()self._update_transform()Patch.__init__(self, **kwargs)", "docstring": "Constructor arguments:\n\n*xy*\n A length 2 tuple (*x*, *y*) of the center.\n\n*numVertices*\n the number of vertices.\n\n*radius*\n The distance from the center to each of the vertices.\n\n*orientation*\n rotates the polygon (in radians).\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c3:m1"} {"signature": "def __init__(self, path, **kwargs):", "body": "Patch.__init__(self, **kwargs)self._path = path", "docstring": "*path* is a :class:`matplotlib.path.Path` object.\n\nValid kwargs are:\n%(Patch)s\n\n.. seealso::\n :class:`Patch`:\n For additional kwargs", "id": "f17197:c4:m1"} {"signature": "def __init__(self, xy, closed=True, **kwargs):", "body": "Patch.__init__(self, **kwargs)xy = np.asarray(xy, np.float_)self._path = Path(xy)self.set_closed(closed)", "docstring": "*xy* is a numpy array with shape Nx2.\n\nIf *closed* is *True*, the polygon will be closed so the\nstarting and ending points are the same.\n\nValid kwargs are:\n%(Patch)s\n\n.. seealso::\n :class:`Patch`:\n For additional kwargs", "id": "f17197:c5:m1"} {"signature": "def __init__(self, center, r, theta1, theta2, width=None, **kwargs):", "body": "Patch.__init__(self, **kwargs)self.center = centerself.r,self.width = r,widthself.theta1,self.theta2 = theta1,theta2delta=theta2-theta1if abs((theta2-theta1) - ) <= :theta1,theta2 = ,connector = Path.MOVETOelse:connector = Path.LINETOarc = Path.arc(theta1,theta2)if width is not None:v1 = arc.verticesv2 = arc.vertices[::-]*float(r-width)/rv = np.vstack([v1,v2,v1[,:],(,)])c = np.hstack([arc.codes,arc.codes,connector,Path.CLOSEPOLY])c[len(arc.codes)]=connectorelse:v = np.vstack([arc.vertices,[(,),arc.vertices[,:],(,)]])c = np.hstack([arc.codes,[connector,connector,Path.CLOSEPOLY]])v *= rv += np.asarray(center)self._path = Path(v,c)self._patch_transform = transforms.IdentityTransform()", "docstring": "Draw a wedge centered at *x*, *y* center with radius *r* that\nsweeps *theta1* to *theta2* (in degrees). If *width* is given,\nthen a partial wedge is drawn from inner radius *r* - *width*\nto outer radius *r*.\n\nValid kwargs are:\n\n%(Patch)s", "id": "f17197:c6:m1"} {"signature": "def __init__( self, x, y, dx, dy, width=, **kwargs ):", "body": "Patch.__init__(self, **kwargs)L = np.sqrt(dx**+dy**) or cx = float(dx)/Lsx = float(dy)/Ltrans1 = transforms.Affine2D().scale(L, width)trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, , )trans3 = transforms.Affine2D().translate(x, y)trans = trans1 + trans2 + trans3self._patch_transform = trans.frozen()", "docstring": "Draws an arrow, starting at (*x*, *y*), direction and length\ngiven by (*dx*, *dy*) the width of the arrow is scaled by *width*.\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c7:m1"} {"signature": "def __init__(self, x, y, dx, dy, width=, length_includes_head=False,head_width=None, head_length=None, shape='', overhang=,head_starts_at_zero=False,**kwargs):", "body": "if head_width is None:head_width = * widthif head_length is None:head_length = * head_widthdistance = np.sqrt(dx** + dy**)if length_includes_head:length=distanceelse:length=distance+head_lengthif not length:verts = [] else:hw, hl, hs, lw = head_width, head_length, overhang, widthleft_half_arrow = np.array([[,], [-hl, -hw/], [-hl*(-hs), -lw/], [-length, -lw/], [-length, ],])if not length_includes_head:left_half_arrow += [head_length, ]if head_starts_at_zero:left_half_arrow += [head_length/, ]if shape == '':coords = left_half_arrowelse:right_half_arrow = left_half_arrow*[,-]if shape == '':coords = right_half_arrowelif shape == '':coords=np.concatenate([left_half_arrow[:-],right_half_arrow[-::-]])else:raise ValueError(\"\" % shape)cx = float(dx)/distancesx = float(dy)/distanceM = np.array([[cx, sx],[-sx,cx]])verts = np.dot(coords, M) + (x+dx, y+dy)Polygon.__init__(self, list(map(tuple, verts)), **kwargs)", "docstring": "Constructor arguments\n\n *length_includes_head*:\n *True* if head is counted in calculating the length.\n\n *shape*: ['full', 'left', 'right']\n\n *overhang*:\n distance that the arrow is swept back (0 overhang means\n triangular shape).\n\n *head_starts_at_zero*:\n If *True*, the head starts being drawn at coordinate 0\n instead of ending at coordinate 0.\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c8:m1"} {"signature": "def __init__(self, figure, xytip, xybase, width=, frac=, headwidth=, **kwargs):", "body": "self.figure = figureself.xytip = xytipself.xybase = xybaseself.width = widthself.frac = fracself.headwidth = headwidthPatch.__init__(self, **kwargs)", "docstring": "Constructor arguments:\n\n*xytip*\n (*x*, *y*) location of arrow tip\n\n*xybase*\n (*x*, *y*) location the arrow base mid point\n\n*figure*\n The :class:`~matplotlib.figure.Figure` instance\n (fig.dpi)\n\n*width*\n The width of the arrow in points\n\n*frac*\n The fraction of the arrow length occupied by the head\n\n*headwidth*\n The width of the base of the arrow head in points\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c9:m1"} {"signature": "def getpoints(self, x1,y1,x2,y2, k):", "body": "x1,y1,x2,y2,k = list(map(float, (x1,y1,x2,y2,k)))if y2-y1 == :return x2, y2+k, x2, y2-kelif x2-x1 == :return x2+k, y2, x2-k, y2m = (y2-y1)/(x2-x1)pm = -/ma = b = -*y2c = y2** - k***pm**/( + pm**)y3a = (-b + math.sqrt(b**-*a*c))/(*a)x3a = (y3a - y2)/pm + x2y3b = (-b - math.sqrt(b**-*a*c))/(*a)x3b = (y3b - y2)/pm + x2return x3a, y3a, x3b, y3b", "docstring": "For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)\nreturn the points on the line that is perpendicular to the\nline and intersects (*x2*, *y2*) and the distance from (*x2*,\n*y2*) of the returned points is *k*.", "id": "f17197:c9:m4"} {"signature": "def __init__(self, xy, radius=,resolution=, **kwargs):", "body": "RegularPolygon.__init__(self, xy,resolution,radius,orientation=,**kwargs)", "docstring": "Create a circle at *xy* = (*x*, *y*) with given *radius*.\nThis circle is approximated by a regular polygon with\n*resolution* sides. For a smoother circle drawn with splines,\nsee :class:`~matplotlib.patches.Circle`.\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c10:m1"} {"signature": "def __init__(self, xy, width, height, angle=, **kwargs):", "body": "Patch.__init__(self, **kwargs)self.center = xyself.width, self.height = width, heightself.angle = angleself._path = Path.unit_circle()self._patch_transform = transforms.IdentityTransform()", "docstring": "*xy*\n center of ellipse\n\n*width*\n length of horizontal axis\n\n*height*\n length of vertical axis\n\n*angle*\n rotation in degrees (anti-clockwise)\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c11:m1"} {"signature": "def _recompute_transform(self):", "body": "center = (self.convert_xunits(self.center[]),self.convert_yunits(self.center[]))width = self.convert_xunits(self.width)height = self.convert_yunits(self.height)self._patch_transform = transforms.Affine2D().scale(width * , height * ).rotate_deg(self.angle).translate(*center)", "docstring": "NOTE: This cannot be called until after this has been added\n to an Axes, otherwise unit conversion will fail. This\n maxes it very important to call the accessor method and\n not directly access the transformation member variable.", "id": "f17197:c11:m2"} {"signature": "def get_path(self):", "body": "return self._path", "docstring": "Return the vertices of the rectangle", "id": "f17197:c11:m3"} {"signature": "def __init__(self, xy, radius=, **kwargs):", "body": "if '' in kwargs:import warningswarnings.warn('', DeprecationWarning)kwargs.pop('')self.radius = radiusEllipse.__init__(self, xy, radius*, radius*, **kwargs)", "docstring": "Create true circle at center *xy* = (*x*, *y*) with given\n*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`\nwhich is a polygonal approximation, this uses B\u00e9zier splines\nand is much closer to a scale-free circle.\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c12:m1"} {"signature": "def __init__(self, xy, width, height, angle=, theta1=, theta2=, **kwargs):", "body": "fill = kwargs.pop('')if fill:raise ValueError(\"\")kwargs[''] = FalseEllipse.__init__(self, xy, width, height, angle, **kwargs)self.theta1 = theta1self.theta2 = theta2", "docstring": "The following args are supported:\n\n*xy*\n center of ellipse\n\n*width*\n length of horizontal axis\n\n*height*\n length of vertical axis\n\n*angle*\n rotation in degrees (anti-clockwise)\n\n*theta1*\n starting angle of the arc in degrees\n\n*theta2*\n ending angle of the arc in degrees\n\nIf *theta1* and *theta2* are not provided, the arc will form a\ncomplete ellipse.\n\nValid kwargs are:\n\n%(Patch)s", "id": "f17197:c13:m1"} {"signature": "def draw(self, renderer):", "body": "if not hasattr(self, ''):raise RuntimeError('')self._recompute_transform()width = self.convert_xunits(self.width)height = self.convert_yunits(self.height)width, height = self.get_transform().transform_point((width, height))inv_error = ( / ) * if width < inv_error and height < inv_error:self._path = Path.arc(self.theta1, self.theta2)return Patch.draw(self, renderer)def iter_circle_intersect_on_line(x0, y0, x1, y1):dx = x1 - x0dy = y1 - y0dr2 = dx*dx + dy*dyD = x0*y1 - x1*y0D2 = D*Ddiscrim = dr2 - D2if discrim == :x = (D*dy) / dr2y = (-D*dx) / dr2yield x, yelif discrim > :if dy < :sign_dy = -else:sign_dy = sqrt_discrim = np.sqrt(discrim)for sign in (, -):x = (D*dy + sign * sign_dy * dx * sqrt_discrim) / dr2y = (-D*dx + sign * np.abs(dy) * sqrt_discrim) / dr2yield x, ydef iter_circle_intersect_on_line_seg(x0, y0, x1, y1):epsilon = if x1 < x0:x0e, x1e = x1, x0else:x0e, x1e = x0, x1if y1 < y0:y0e, y1e = y1, y0else:y0e, y1e = y0, y1x0e -= epsilony0e -= epsilonx1e += epsilony1e += epsilonfor x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):if x >= x0e and x <= x1e and y >= y0e and y <= y1e:yield x, ybox_path = Path.unit_rectangle()box_path_transform = transforms.BboxTransformTo(self.axes.bbox) +self.get_transform().inverted()box_path = box_path.transformed(box_path_transform)PI = np.piTWOPI = PI * RAD2DEG = / PIDEG2RAD = PI / theta1 = self.theta1theta2 = self.theta2thetas = {}for p0, p1 in zip(box_path.vertices[:-], box_path.vertices[:]):x0, y0 = p0x1, y1 = p1for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):theta = np.arccos(x)if y < :theta = TWOPI - thetatheta *= RAD2DEGif theta > theta1 and theta < theta2:thetas[theta] = Nonethetas = list(thetas.keys())thetas.sort()thetas.append(theta2)last_theta = theta1theta1_rad = theta1 * DEG2RADinside = box_path.contains_point((np.cos(theta1_rad), np.sin(theta1_rad)))for theta in thetas:if inside:self._path = Path.arc(last_theta, theta, )Patch.draw(self, renderer)inside = Falseelse:inside = Truelast_theta = theta", "docstring": "Ellipses are normally drawn using an approximation that uses\neight cubic bezier splines. The error of this approximation\nis 1.89818e-6, according to this unverified source:\n\n Lancaster, Don. Approximating a Circle or an Ellipse Using\n Four Bezier Cubic Splines.\n\n http://www.tinaja.com/glib/ellipse4.pdf\n\nThere is a use case where very large ellipses must be drawn\nwith very high accuracy, and it is too expensive to render the\nentire ellipse with enough segments (either splines or line\nsegments). Therefore, in the case where either radius of the\nellipse is large enough that the error of the spline\napproximation will be visible (greater than one pixel offset\nfrom the ideal), a different technique is used.\n\nIn that case, only the visible parts of the ellipse are drawn,\nwith each visible arc using a fixed number of spline segments\n(8). The algorithm proceeds as follows:\n\n 1. The points where the ellipse intersects the axes bounding\n box are located. (This is done be performing an inverse\n transformation on the axes bbox such that it is relative\n to the unit circle -- this makes the intersection\n calculation much easier than doing rotated ellipse\n intersection directly).\n\n This uses the \"line intersecting a circle\" algorithm\n from:\n\n Vince, John. Geometry for Computer Graphics: Formulae,\n Examples & Proofs. London: Springer-Verlag, 2005.\n\n 2. The angles of each of the intersection points are\n calculated.\n\n 3. Proceeding counterclockwise starting in the positive\n x-direction, each of the visible arc-segments between the\n pairs of vertices are drawn using the bezier arc\n approximation technique implemented in\n :meth:`matplotlib.path.Path.arc`.", "id": "f17197:c13:m2"} {"signature": "def __new__(self, stylename, **kw):", "body": "_list = stylename.replace(\"\",\"\").split(\"\")_name = _list[].lower()try:_cls = self._style_list[_name]except KeyError:raise ValueError(\"\" % stylename)try:_args_pair = [cs.split(\"\") for cs in _list[:]]_args = dict([(k, float(v)) for k, v in _args_pair])except ValueError:raise ValueError(\"\" % stylename)_args.update(kw)return _cls(**_args)", "docstring": "return the instance of the subclass with the given style name.", "id": "f17197:c14:m0"} {"signature": "@classmethoddef get_styles(klass):", "body": "return klass._style_list", "docstring": "A class method which returns a dictionary of available styles.", "id": "f17197:c14:m1"} {"signature": "@classmethoddef pprint_styles(klass):", "body": "return _pprint_styles(klass._style_list)", "docstring": "A class method which returns a string of the available styles.", "id": "f17197:c14:m2"} {"signature": "def __init__(self, xy, width, height,boxstyle=\"\",bbox_transmuter=None,mutation_scale=,mutation_aspect=None,**kwargs):", "body": "Patch.__init__(self, **kwargs)self._x = xy[]self._y = xy[]self._width = widthself._height = heightif boxstyle == \"\":if bbox_transmuter is None:raise ValueError(\"\")self._bbox_transmuter = bbox_transmuterelse:self.set_boxstyle(boxstyle)self._mutation_scale=mutation_scaleself._mutation_aspect=mutation_aspect", "docstring": "*xy* = lower left corner\n\n*width*, *height*\n\n*boxstyle* determines what kind of fancy box will be drawn. It\ncan be a string of the style name with a comma separated\nattribute, or an instance of :class:`BoxStyle`. Following box\nstyles are available.\n\n%(AvailableBoxstyles)s\n\n*mutation_scale* : a value with which attributes of boxstyle\n(e.g., pad) will be scaled. default=1.\n\n*mutation_aspect* : The height of the rectangle will be\nsqueezed by this value before the mutation and the mutated\nbox will be stretched by the inverse of it. default=None.\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c16:m1"} {"signature": "def set_boxstyle(self, boxstyle=None, **kw):", "body": "if boxstyle==None:return BoxStyle.pprint_styles()if isinstance(boxstyle, BoxStyle._Base):self._bbox_transmuter = boxstyleelif callable(boxstyle):self._bbox_transmuter = boxstyleelse:self._bbox_transmuter = BoxStyle(boxstyle, **kw)", "docstring": "Set the box style.\n\n*boxstyle* can be a string with boxstyle name with optional\ncomma-separated attributes. Alternatively, the attrs can\nbe provided as keywords::\n\n set_boxstyle(\"round,pad=0.2\")\n set_boxstyle(\"round\", pad=0.2)\n\nOld attrs simply are forgotten.\n\nWithout argument (or with *boxstyle* = None), it returns\navailable box styles.\n\nACCEPTS: [ %(AvailableBoxstyles)s ]", "id": "f17197:c16:m2"} {"signature": "def set_mutation_scale(self, scale):", "body": "self._mutation_scale=scale", "docstring": "Set the mutation scale.\n\nACCEPTS: float", "id": "f17197:c16:m3"} {"signature": "def get_mutation_scale(self):", "body": "return self._mutation_scale", "docstring": "Return the mutation scale.", "id": "f17197:c16:m4"} {"signature": "def set_mutation_aspect(self, aspect):", "body": "self._mutation_aspect=aspect", "docstring": "Set the aspect ratio of the bbox mutation.\n\nACCEPTS: float", "id": "f17197:c16:m5"} {"signature": "def get_mutation_aspect(self):", "body": "return self._mutation_aspect", "docstring": "Return the aspect ratio of the bbox mutation.", "id": "f17197:c16:m6"} {"signature": "def get_boxstyle(self):", "body": "return self._bbox_transmuter", "docstring": "Return the boxstyle object", "id": "f17197:c16:m7"} {"signature": "def get_path(self):", "body": "_path = self.get_boxstyle()(self._x, self._y,self._width, self._height,self.get_mutation_scale(),self.get_mutation_aspect())return _path", "docstring": "Return the mutated path of the rectangle", "id": "f17197:c16:m8"} {"signature": "def get_x(self):", "body": "return self._x", "docstring": "Return the left coord of the rectangle", "id": "f17197:c16:m9"} {"signature": "def get_y(self):", "body": "return self._y", "docstring": "Return the bottom coord of the rectangle", "id": "f17197:c16:m10"} {"signature": "def get_width(self):", "body": "return self._width", "docstring": "Return the width of the rectangle", "id": "f17197:c16:m11"} {"signature": "def get_height(self):", "body": "return self._height", "docstring": "Return the height of the rectangle", "id": "f17197:c16:m12"} {"signature": "def set_x(self, x):", "body": "self._x = x", "docstring": "Set the left coord of the rectangle\n\nACCEPTS: float", "id": "f17197:c16:m13"} {"signature": "def set_y(self, y):", "body": "self._y = y", "docstring": "Set the bottom coord of the rectangle\n\nACCEPTS: float", "id": "f17197:c16:m14"} {"signature": "def set_width(self, w):", "body": "self._width = w", "docstring": "Set the width rectangle\n\nACCEPTS: float", "id": "f17197:c16:m15"} {"signature": "def set_height(self, h):", "body": "self._height = h", "docstring": "Set the width rectangle\n\nACCEPTS: float", "id": "f17197:c16:m16"} {"signature": "def set_bounds(self, *args):", "body": "if len(args)==:l,b,w,h = args[]else:l,b,w,h = argsself._x = lself._y = bself._width = wself._height = h", "docstring": "Set the bounds of the rectangle: l,b,w,h\n\nACCEPTS: (left, bottom, width, height)", "id": "f17197:c16:m17"} {"signature": "def __init__(self, posA=None, posB=None,path=None,arrowstyle=\"\",arrow_transmuter=None,connectionstyle=\"\",connector=None,patchA=None,patchB=None,shrinkA=,shrinkB=,mutation_scale=,mutation_aspect=None,**kwargs):", "body": "if posA is not None and posB is not None and path is None:self._posA_posB = [posA, posB]if connectionstyle is None:connectionstyle = \"\"self.set_connectionstyle(connectionstyle)elif posA is None and posB is None and path is not None:self._posA_posB = Noneself._connetors = Noneelse:raise ValueError(\"\")self.patchA = patchAself.patchB = patchBself.shrinkA = shrinkAself.shrinkB = shrinkBPatch.__init__(self, **kwargs)self._path_original = pathself.set_arrowstyle(arrowstyle)self._mutation_scale=mutation_scaleself._mutation_aspect=mutation_aspect", "docstring": "If *posA* and *posB* is given, a path connecting two point are\ncreated according to the connectionstyle. The path will be\nclipped with *patchA* and *patchB* and further shirnked by\n*shrinkA* and *shrinkB*. An arrow is drawn along this\nresulting path using the *arrowstyle* parameter. If *path*\nprovided, an arrow is drawn along this path and *patchA*,\n*patchB*, *shrinkA*, and *shrinkB* are ignored.\n\nThe *connectionstyle* describes how *posA* and *posB* are\nconnected. It can be an instance of the ConnectionStyle class\n(matplotlib.patches.ConnectionStlye) or a string of the\nconnectionstyle name, with optional comma-separated\nattributes. The following connection styles are available.\n\n%(AvailableConnectorstyles)s\n\n\nThe *arrowstyle* describes how the fancy arrow will be\ndrawn. It can be string of the available arrowstyle names,\nwith optional comma-separated attributes, or one of the\nArrowStyle instance. The optional attributes are meant to be\nscaled with the *mutation_scale*. The following arrow styles are\navailable.\n\n%(AvailableArrowstyles)s\n\n*mutation_scale* : a value with which attributes of arrowstyle\n (e.g., head_length) will be scaled. default=1.\n\n*mutation_aspect* : The height of the rectangle will be\n squeezed by this value before the mutation and the mutated\n box will be stretched by the inverse of it. default=None.\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c19:m1"} {"signature": "def set_positions(self, posA, posB):", "body": "if posA is not None: self._posA_posB[] = posAif posB is not None: self._posA_posB[] = posB", "docstring": "set the begin end end positions of the connecting\n path. Use current vlaue if None.", "id": "f17197:c19:m2"} {"signature": "def set_patchA(self, patchA):", "body": "self.patchA = patchA", "docstring": "set the begin patch.", "id": "f17197:c19:m3"} {"signature": "def set_patchB(self, patchB):", "body": "self.patchB = patchB", "docstring": "set the begin patch", "id": "f17197:c19:m4"} {"signature": "def set_connectionstyle(self, connectionstyle, **kw):", "body": "if connectionstyle==None:return ConnectionStyle.pprint_styles()if isinstance(connectionstyle, ConnectionStyle._Base):self._connector = connectionstyleelif callable(connectionstyle):self._connector = connectionstyleelse:self._connector = ConnectionStyle(connectionstyle, **kw)", "docstring": "Set the connection style.\n\n*connectionstyle* can be a string with connectionstyle name with optional\n comma-separated attributes. Alternatively, the attrs can\n be probided as keywords.\n\n set_connectionstyle(\"arc,angleA=0,armA=30,rad=10\")\n set_connectionstyle(\"arc\", angleA=0,armA=30,rad=10)\n\nOld attrs simply are forgotten.\n\nWithout argument (or with connectionstyle=None), return\navailable styles as a list of strings.", "id": "f17197:c19:m5"} {"signature": "def get_connectionstyle(self):", "body": "return self._connector", "docstring": "Return the ConnectionStyle instance", "id": "f17197:c19:m6"} {"signature": "def set_arrowstyle(self, arrowstyle=None, **kw):", "body": "if arrowstyle==None:return ArrowStyle.pprint_styles()if isinstance(arrowstyle, ConnectionStyle._Base):self._arrow_transmuter = arrowstyleelse:self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)", "docstring": "Set the arrow style.\n\n*arrowstyle* can be a string with arrowstyle name with optional\n comma-separated attributes. Alternatively, the attrs can\n be provided as keywords.\n\n set_arrowstyle(\"Fancy,head_length=0.2\")\n set_arrowstyle(\"fancy\", head_length=0.2)\n\nOld attrs simply are forgotten.\n\nWithout argument (or with arrowstyle=None), return\navailable box styles as a list of strings.", "id": "f17197:c19:m7"} {"signature": "def get_arrowstyle(self):", "body": "return self._arrow_transmuter", "docstring": "Return the arrowstyle object", "id": "f17197:c19:m8"} {"signature": "def set_mutation_scale(self, scale):", "body": "self._mutation_scale=scale", "docstring": "Set the mutation scale.\n\nACCEPTS: float", "id": "f17197:c19:m9"} {"signature": "def get_mutation_scale(self):", "body": "return self._mutation_scale", "docstring": "Return the mutation scale.", "id": "f17197:c19:m10"} {"signature": "def set_mutation_aspect(self, aspect):", "body": "self._mutation_aspect=aspect", "docstring": "Set the aspect ratio of the bbox mutation.\n\nACCEPTS: float", "id": "f17197:c19:m11"} {"signature": "def get_mutation_aspect(self):", "body": "return self._mutation_aspect", "docstring": "Return the aspect ratio of the bbox mutation.", "id": "f17197:c19:m12"} {"signature": "def get_path(self):", "body": "_path = self.get_path_in_displaycoord()return self.get_transform().inverted().transform_path(_path)", "docstring": "return the path of the arrow in the data coordinate. Use\nget_path_in_displaycoord() medthod to retrieve the arrow path\nin the disaply coord.", "id": "f17197:c19:m13"} {"signature": "def get_path_in_displaycoord(self):", "body": "if self._posA_posB is not None:posA = self.get_transform().transform_point(self._posA_posB[])posB = self.get_transform().transform_point(self._posA_posB[])_path = self.get_connectionstyle()(posA, posB,patchA=self.patchA,patchB=self.patchB,shrinkA=self.shrinkA,shrinkB=self.shrinkB)else:_path = self.get_transform().transform_path(self._path_original)_path, closed = self.get_arrowstyle()(_path,self.get_mutation_scale(),self.get_linewidth(),self.get_mutation_aspect())if not closed:self.fill = Falsereturn _path", "docstring": "Return the mutated path of the arrow in the display coord", "id": "f17197:c19:m14"} {"signature": "def __init__(self, axes, loc, label,size = None, gridOn = None, tick1On = True,tick2On = True,label1On = True,label2On = False,major = True,):", "body": "artist.Artist.__init__(self)if gridOn is None: gridOn = rcParams['']self.set_figure(axes.figure)self.axes = axesname = self.__name__.lower()if size is None:if major:size = rcParams[''%name]pad = rcParams[''%name]else:size = rcParams[''%name]pad = rcParams[''%name]self._tickdir = rcParams[''%name]if self._tickdir == '':self._xtickmarkers = (mlines.TICKUP, mlines.TICKDOWN)self._ytickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)self._pad = padelse:self._xtickmarkers = (mlines.TICKDOWN, mlines.TICKUP)self._ytickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)self._pad = pad + sizeself._loc = locself._size = sizeself.tick1line = self._get_tick1line()self.tick2line = self._get_tick2line()self.gridline = self._get_gridline()self.label1 = self._get_text1()self.label = self.label1 self.label2 = self._get_text2()self.gridOn = gridOnself.tick1On = tick1Onself.tick2On = tick2Onself.label1On = label1Onself.label2On = label2Onself.update_position(loc)", "docstring": "bbox is the Bound2D bounding box in display coords of the Axes\nloc is the tick location in data coords\nsize is the tick size in relative, axes coords", "id": "f17198:c0:m0"} {"signature": "def contains(self, mouseevent):", "body": "if callable(self._contains): return self._contains(self,mouseevent)return False,{}", "docstring": "Test whether the mouse event occured in the Tick marks.\n\nThis function always returns false. It is more useful to test if the\naxis as a whole contains the mouse rather than the set of tick marks.", "id": "f17198:c0:m4"} {"signature": "def set_pad(self, val):", "body": "self._pad = val", "docstring": "Set the tick label pad in points\n\nACCEPTS: float", "id": "f17198:c0:m5"} {"signature": "def get_pad(self):", "body": "return self._pad", "docstring": "Get the value of the tick label pad in points", "id": "f17198:c0:m6"} {"signature": "def _get_text1(self):", "body": "pass", "docstring": "Get the default Text 1 instance", "id": "f17198:c0:m7"} {"signature": "def _get_text2(self):", "body": "pass", "docstring": "Get the default Text 2 instance", "id": "f17198:c0:m8"} {"signature": "def _get_tick1line(self):", "body": "pass", "docstring": "Get the default line2D instance for tick1", "id": "f17198:c0:m9"} {"signature": "def _get_tick2line(self):", "body": "pass", "docstring": "Get the default line2D instance for tick2", "id": "f17198:c0:m10"} {"signature": "def _get_gridline(self):", "body": "pass", "docstring": "Get the default grid Line2d instance for this tick", "id": "f17198:c0:m11"} {"signature": "def get_loc(self):", "body": "return self._loc", "docstring": "Return the tick location (data coords) as a scalar", "id": "f17198:c0:m12"} {"signature": "def set_label1(self, s):", "body": "self.label1.set_text(s)", "docstring": "Set the text of ticklabel\n\nACCEPTS: str", "id": "f17198:c0:m14"} {"signature": "def set_label2(self, s):", "body": "self.label2.set_text(s)", "docstring": "Set the text of ticklabel2\n\nACCEPTS: str", "id": "f17198:c0:m15"} {"signature": "def get_view_interval(self):", "body": "raise NotImplementedError('')", "docstring": "return the view Interval instance for the axis this tick is ticking", "id": "f17198:c0:m17"} {"signature": "def _get_text1(self):", "body": "trans, vert, horiz = self.axes.get_xaxis_text1_transform(self._pad)size = rcParams['']t = mtext.Text(x=, y=,fontproperties=font_manager.FontProperties(size=size),color=rcParams[''],verticalalignment=vert,horizontalalignment=horiz,)t.set_transform(trans)self._set_artist_props(t)return t", "docstring": "Get the default Text instance", "id": "f17198:c1:m0"} {"signature": "def _get_text2(self):", "body": "trans, vert, horiz = self.axes.get_xaxis_text2_transform(self._pad)t = mtext.Text(x=, y=,fontproperties=font_manager.FontProperties(size=rcParams['']),color=rcParams[''],verticalalignment=vert,horizontalalignment=horiz,)t.set_transform(trans)self._set_artist_props(t)return t", "docstring": "Get the default Text 2 instance", "id": "f17198:c1:m1"} {"signature": "def _get_tick1line(self):", "body": "l = mlines.Line2D(xdata=(,), ydata=(,),color='',linestyle = '',marker = self._xtickmarkers[],markersize=self._size,)l.set_transform(self.axes.get_xaxis_transform())self._set_artist_props(l)return l", "docstring": "Get the default line2D instance", "id": "f17198:c1:m2"} {"signature": "def _get_tick2line(self):", "body": "l = mlines.Line2D( xdata=(,), ydata=(,),color='',linestyle = '',marker = self._xtickmarkers[],markersize=self._size,)l.set_transform(self.axes.get_xaxis_transform())self._set_artist_props(l)return l", "docstring": "Get the default line2D instance", "id": "f17198:c1:m3"} {"signature": "def _get_gridline(self):", "body": "l = mlines.Line2D(xdata=(, ), ydata=(, ),color=rcParams[''],linestyle=rcParams[''],linewidth=rcParams[''],)l.set_transform(self.axes.get_xaxis_transform())self._set_artist_props(l)return l", "docstring": "Get the default line2D instance", "id": "f17198:c1:m4"} {"signature": "def update_position(self, loc):", "body": "x = locnonlinear = (hasattr(self.axes, '') andself.axes.yaxis.get_scale() != '' orhasattr(self.axes, '') andself.axes.xaxis.get_scale() != '')if self.tick1On:self.tick1line.set_xdata((x,))if self.tick2On:self.tick2line.set_xdata((x,))if self.gridOn:self.gridline.set_xdata((x,))if self.label1On:self.label1.set_x(x)if self.label2On:self.label2.set_x(x)if nonlinear:self.tick1line._invalid = Trueself.tick2line._invalid = Trueself.gridline._invalid = Trueself._loc = loc", "docstring": "Set the location of tick in data coords with scalar *loc*", "id": "f17198:c1:m5"} {"signature": "def get_view_interval(self):", "body": "return self.axes.viewLim.intervalx", "docstring": "return the Interval instance for this axis view limits", "id": "f17198:c1:m6"} {"signature": "def get_data_interval(self):", "body": "return self.axes.dataLim.intervalx", "docstring": "return the Interval instance for this axis data limits", "id": "f17198:c1:m9"} {"signature": "def _get_text1(self):", "body": "trans, vert, horiz = self.axes.get_yaxis_text1_transform(self._pad)t = mtext.Text(x=, y=,fontproperties=font_manager.FontProperties(size=rcParams['']),color=rcParams[''],verticalalignment=vert,horizontalalignment=horiz,)t.set_transform(trans)self._set_artist_props(t)return t", "docstring": "Get the default Text instance", "id": "f17198:c2:m0"} {"signature": "def _get_text2(self):", "body": "trans, vert, horiz = self.axes.get_yaxis_text2_transform(self._pad)t = mtext.Text(x=, y=,fontproperties=font_manager.FontProperties(size=rcParams['']),color=rcParams[''],verticalalignment=vert,horizontalalignment=horiz,)t.set_transform(trans)self._set_artist_props(t)return t", "docstring": "Get the default Text instance", "id": "f17198:c2:m1"} {"signature": "def _get_tick1line(self):", "body": "l = mlines.Line2D( (,), (,), color='',marker = self._ytickmarkers[],linestyle = '',markersize=self._size,)l.set_transform(self.axes.get_yaxis_transform())self._set_artist_props(l)return l", "docstring": "Get the default line2D instance", "id": "f17198:c2:m2"} {"signature": "def _get_tick2line(self):", "body": "l = mlines.Line2D( (,), (,), color='',marker = self._ytickmarkers[],linestyle = '',markersize=self._size,)l.set_transform(self.axes.get_yaxis_transform())self._set_artist_props(l)return l", "docstring": "Get the default line2D instance", "id": "f17198:c2:m3"} {"signature": "def _get_gridline(self):", "body": "l = mlines.Line2D( xdata=(,), ydata=(, ),color=rcParams[''],linestyle=rcParams[''],linewidth=rcParams[''],)l.set_transform(self.axes.get_yaxis_transform())self._set_artist_props(l)return l", "docstring": "Get the default line2D instance", "id": "f17198:c2:m4"} {"signature": "def update_position(self, loc):", "body": "y = locnonlinear = (hasattr(self.axes, '') andself.axes.yaxis.get_scale() != '' orhasattr(self.axes, '') andself.axes.xaxis.get_scale() != '')if self.tick1On:self.tick1line.set_ydata((y,))if self.tick2On:self.tick2line.set_ydata((y,))if self.gridOn:self.gridline.set_ydata((y, ))if self.label1On:self.label1.set_y( y )if self.label2On:self.label2.set_y( y )if nonlinear:self.tick1line._invalid = Trueself.tick2line._invalid = Trueself.gridline._invalid = Trueself._loc = loc", "docstring": "Set the location of tick in data coords with scalar loc", "id": "f17198:c2:m5"} {"signature": "def get_view_interval(self):", "body": "return self.axes.viewLim.intervaly", "docstring": "return the Interval instance for this axis view limits", "id": "f17198:c2:m6"} {"signature": "def get_data_interval(self):", "body": "return self.axes.dataLim.intervaly", "docstring": "return the Interval instance for this axis data limits", "id": "f17198:c2:m9"} {"signature": "def __init__(self, axes, pickradius=):", "body": "artist.Artist.__init__(self)self.set_figure(axes.figure)self.axes = axesself.major = Ticker()self.minor = Ticker()self.callbacks = cbook.CallbackRegistry(('', ''))self._autolabelpos = Trueself.label = self._get_label()self.offsetText = self._get_offset_text()self.majorTicks = []self.minorTicks = []self.pickradius = pickradiusself.cla()self.set_scale('')", "docstring": "Init the axis with the parent Axes instance", "id": "f17198:c4:m1"} {"signature": "def set_label_coords(self, x, y, transform=None):", "body": "self._autolabelpos = Falseif transform is None:transform = self.axes.transAxesself.label.set_transform(transform)self.label.set_position((x, y))", "docstring": "Set the coordinates of the label. By default, the x\ncoordinate of the y label is determined by the tick label\nbounding boxes, but this can lead to poor alignment of\nmultiple ylabels if there are multiple axes. Ditto for the y\ncoodinate of the x label.\n\nYou can also specify the coordinate system of the label with\nthe transform. If None, the default coordinate system will be\nthe axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)\nis middle, etc", "id": "f17198:c4:m2"} {"signature": "def cla(self):", "body": "self.set_major_locator(mticker.AutoLocator())self.set_major_formatter(mticker.ScalarFormatter())self.set_minor_locator(mticker.NullLocator())self.set_minor_formatter(mticker.NullFormatter())self.callbacks = cbook.CallbackRegistry(('', ''))self._gridOnMajor = rcParams['']self._gridOnMinor = Falseself.label.set_text('')self._set_artist_props(self.label)cbook.popall(self.majorTicks)cbook.popall(self.minorTicks)self.majorTicks.extend([self._get_tick(major=True)])self.minorTicks.extend([self._get_tick(major=False)])self._lastNumMajorTicks = self._lastNumMinorTicks = self.converter = Noneself.units = Noneself.set_units(None)", "docstring": "clear the current axis", "id": "f17198:c4:m8"} {"signature": "def get_view_interval(self):", "body": "raise NotImplementedError('')", "docstring": "return the Interval instance for this axis view limits", "id": "f17198:c4:m10"} {"signature": "def get_data_interval(self):", "body": "raise NotImplementedError('')", "docstring": "return the Interval instance for this axis data limits", "id": "f17198:c4:m12"} {"signature": "def set_data_interval(self):", "body": "raise NotImplementedError('')", "docstring": "Set the axis data limits", "id": "f17198:c4:m13"} {"signature": "def iter_ticks(self):", "body": "majorLocs = self.major.locator()majorTicks = self.get_major_ticks(len(majorLocs))self.major.formatter.set_locs(majorLocs)majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]minorLocs = self.minor.locator()minorTicks = self.get_minor_ticks(len(minorLocs))self.minor.formatter.set_locs(minorLocs)minorLabels = [self.minor.formatter(val, i) for i, val in enumerate(minorLocs)]major_minor = [(majorTicks, majorLocs, majorLabels),(minorTicks, minorLocs, minorLabels)]for group in major_minor:for tick in zip(*group):yield tick", "docstring": "Iterate through all of the major and minor ticks.", "id": "f17198:c4:m15"} {"signature": "def get_ticklabel_extents(self, renderer):", "body": "ticklabelBoxes = []ticklabelBoxes2 = []interval = self.get_view_interval()for tick, loc, label in self.iter_ticks():if tick is None: continueif not mtransforms.interval_contains(interval, loc): continuetick.update_position(loc)tick.set_label1(label)tick.set_label2(label)if tick.label1On and tick.label1.get_visible():extent = tick.label1.get_window_extent(renderer)ticklabelBoxes.append(extent)if tick.label2On and tick.label2.get_visible():extent = tick.label2.get_window_extent(renderer)ticklabelBoxes2.append(extent)if len(ticklabelBoxes):bbox = mtransforms.Bbox.union(ticklabelBoxes)else:bbox = mtransforms.Bbox.from_extents(, , , )if len(ticklabelBoxes2):bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)else:bbox2 = mtransforms.Bbox.from_extents(, , , )return bbox, bbox2", "docstring": "Get the extents of the tick labels on either side\nof the axes.", "id": "f17198:c4:m16"} {"signature": "def draw(self, renderer, *args, **kwargs):", "body": "ticklabelBoxes = []ticklabelBoxes2 = []if not self.get_visible(): returnrenderer.open_group(__name__)interval = self.get_view_interval()for tick, loc, label in self.iter_ticks():if tick is None: continueif not mtransforms.interval_contains(interval, loc): continuetick.update_position(loc)tick.set_label1(label)tick.set_label2(label)tick.draw(renderer)if tick.label1On and tick.label1.get_visible():extent = tick.label1.get_window_extent(renderer)ticklabelBoxes.append(extent)if tick.label2On and tick.label2.get_visible():extent = tick.label2.get_window_extent(renderer)ticklabelBoxes2.append(extent)self._update_label_position(ticklabelBoxes, ticklabelBoxes2)self.label.draw(renderer)self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)self.offsetText.set_text( self.major.formatter.get_offset() )self.offsetText.draw(renderer)if : for tick in majorTicks:label = tick.label1mpatches.bbox_artist(label, renderer)mpatches.bbox_artist(self.label, renderer)renderer.close_group(__name__)", "docstring": "Draw the axis lines, grid lines, tick lines and labels", "id": "f17198:c4:m17"} {"signature": "def get_gridlines(self):", "body": "ticks = self.get_major_ticks()return cbook.silent_list('', [tick.gridline for tick in ticks])", "docstring": "Return the grid lines as a list of Line2D instance", "id": "f17198:c4:m20"} {"signature": "def get_label(self):", "body": "return self.label", "docstring": "Return the axis label as a Text instance", "id": "f17198:c4:m21"} {"signature": "def get_offset_text(self):", "body": "return self.offsetText", "docstring": "Return the axis offsetText as a Text instance", "id": "f17198:c4:m22"} {"signature": "def get_pickradius(self):", "body": "return self.pickradius", "docstring": "Return the depth of the axis used by the picker", "id": "f17198:c4:m23"} {"signature": "def get_majorticklabels(self):", "body": "ticks = self.get_major_ticks()labels1 = [tick.label1 for tick in ticks if tick.label1On]labels2 = [tick.label2 for tick in ticks if tick.label2On]return cbook.silent_list('', labels1+labels2)", "docstring": "Return a list of Text instances for the major ticklabels", "id": "f17198:c4:m24"} {"signature": "def get_minorticklabels(self):", "body": "ticks = self.get_minor_ticks()labels1 = [tick.label1 for tick in ticks if tick.label1On]labels2 = [tick.label2 for tick in ticks if tick.label2On]return cbook.silent_list('', labels1+labels2)", "docstring": "Return a list of Text instances for the minor ticklabels", "id": "f17198:c4:m25"} {"signature": "def get_ticklabels(self, minor=False):", "body": "if minor:return self.get_minorticklabels()return self.get_majorticklabels()", "docstring": "Return a list of Text instances for ticklabels", "id": "f17198:c4:m26"} {"signature": "def get_majorticklines(self):", "body": "lines = []ticks = self.get_major_ticks()for tick in ticks:lines.append(tick.tick1line)lines.append(tick.tick2line)return cbook.silent_list('', lines)", "docstring": "Return the major tick lines as a list of Line2D instances", "id": "f17198:c4:m27"} {"signature": "def get_minorticklines(self):", "body": "lines = []ticks = self.get_minor_ticks()for tick in ticks:lines.append(tick.tick1line)lines.append(tick.tick2line)return cbook.silent_list('', lines)", "docstring": "Return the minor tick lines as a list of Line2D instances", "id": "f17198:c4:m28"} {"signature": "def get_ticklines(self, minor=False):", "body": "if minor:return self.get_minorticklines()return self.get_majorticklines()", "docstring": "Return the tick lines as a list of Line2D instances", "id": "f17198:c4:m29"} {"signature": "def get_majorticklocs(self):", "body": "return self.major.locator()", "docstring": "Get the major tick locations in data coordinates as a numpy array", "id": "f17198:c4:m30"} {"signature": "def get_minorticklocs(self):", "body": "return self.minor.locator()", "docstring": "Get the minor tick locations in data coordinates as a numpy array", "id": "f17198:c4:m31"} {"signature": "def get_ticklocs(self, minor=False):", "body": "if minor:return self.minor.locator()return self.major.locator()", "docstring": "Get the tick locations in data coordinates as a numpy array", "id": "f17198:c4:m32"} {"signature": "def _get_tick(self, major):", "body": "raise NotImplementedError('')", "docstring": "return the default tick intsance", "id": "f17198:c4:m33"} {"signature": "def _copy_tick_props(self, src, dest):", "body": "if src is None or dest is None: returndest.label1.update_from(src.label1)dest.label2.update_from(src.label2)dest.tick1line.update_from(src.tick1line)dest.tick2line.update_from(src.tick2line)dest.gridline.update_from(src.gridline)dest.tick1On = src.tick1Ondest.tick2On = src.tick2Ondest.label1On = src.label1Ondest.label2On = src.label2On", "docstring": "Copy the props from src tick to dest tick", "id": "f17198:c4:m34"} {"signature": "def get_major_locator(self):", "body": "return self.major.locator", "docstring": "Get the locator of the major ticker", "id": "f17198:c4:m35"} {"signature": "def get_minor_locator(self):", "body": "return self.minor.locator", "docstring": "Get the locator of the minor ticker", "id": "f17198:c4:m36"} {"signature": "def get_major_formatter(self):", "body": "return self.major.formatter", "docstring": "Get the formatter of the major ticker", "id": "f17198:c4:m37"} {"signature": "def get_minor_formatter(self):", "body": "return self.minor.formatter", "docstring": "Get the formatter of the minor ticker", "id": "f17198:c4:m38"} {"signature": "def get_major_ticks(self, numticks=None):", "body": "if numticks is None:numticks = len(self.get_major_locator()())if len(self.majorTicks) < numticks:for i in range(numticks - len(self.majorTicks)):tick = self._get_tick(major=True)self.majorTicks.append(tick)if self._lastNumMajorTicks < numticks:protoTick = self.majorTicks[]for i in range(self._lastNumMajorTicks, len(self.majorTicks)):tick = self.majorTicks[i]if self._gridOnMajor: tick.gridOn = Trueself._copy_tick_props(protoTick, tick)self._lastNumMajorTicks = numticksticks = self.majorTicks[:numticks]return ticks", "docstring": "get the tick instances; grow as necessary", "id": "f17198:c4:m39"} {"signature": "def get_minor_ticks(self, numticks=None):", "body": "if numticks is None:numticks = len(self.get_minor_locator()())if len(self.minorTicks) < numticks:for i in range(numticks - len(self.minorTicks)):tick = self._get_tick(major=False)self.minorTicks.append(tick)if self._lastNumMinorTicks < numticks:protoTick = self.minorTicks[]for i in range(self._lastNumMinorTicks, len(self.minorTicks)):tick = self.minorTicks[i]if self._gridOnMinor: tick.gridOn = Trueself._copy_tick_props(protoTick, tick)self._lastNumMinorTicks = numticksticks = self.minorTicks[:numticks]return ticks", "docstring": "get the minor tick instances; grow as necessary", "id": "f17198:c4:m40"} {"signature": "def grid(self, b=None, which='', **kwargs):", "body": "if len(kwargs): b = Trueif which.lower().find('')>=:if b is None: self._gridOnMinor = not self._gridOnMinorelse: self._gridOnMinor = bfor tick in self.minorTicks: if tick is None: continuetick.gridOn = self._gridOnMinorif len(kwargs): artist.setp(tick.gridline,**kwargs)else:if b is None: self._gridOnMajor = not self._gridOnMajorelse: self._gridOnMajor = bfor tick in self.majorTicks: if tick is None: continuetick.gridOn = self._gridOnMajorif len(kwargs): artist.setp(tick.gridline,**kwargs)", "docstring": "Set the axis grid on or off; b is a boolean use *which* =\n'major' | 'minor' to set the grid for major or minor ticks\n\nif *b* is *None* and len(kwargs)==0, toggle the grid state. If\n*kwargs* are supplied, it is assumed you want the grid on and *b*\nwill be set to True\n\n*kwargs* are used to set the line properties of the grids, eg,\n\n xax.grid(color='r', linestyle='-', linewidth=2)", "id": "f17198:c4:m41"} {"signature": "def update_units(self, data):", "body": "converter = munits.registry.get_converter(data)if converter is None: return Falseself.converter = converterdefault = self.converter.default_units(data)if default is not None and self.units is None:self.set_units(default)self._update_axisinfo()return True", "docstring": "introspect *data* for units converter and update the\naxis.converter instance if necessary. Return *True* is *data* is\nregistered for unit conversion", "id": "f17198:c4:m42"} {"signature": "def _update_axisinfo(self):", "body": "if self.converter is None:returninfo = self.converter.axisinfo(self.units)if info is None:returnif info.majloc is not None and self.major.locator!=info.majloc:self.set_major_locator(info.majloc)if info.minloc is not None and self.minor.locator!=info.minloc:self.set_minor_locator(info.minloc)if info.majfmt is not None and self.major.formatter!=info.majfmt:self.set_major_formatter(info.majfmt)if info.minfmt is not None and self.minor.formatter!=info.minfmt:self.set_minor_formatter(info.minfmt)if info.label is not None:label = self.get_label()label.set_text(info.label)", "docstring": "check the axis converter for the stored units to see if the\naxis info needs to be updated", "id": "f17198:c4:m43"} {"signature": "def set_units(self, u):", "body": "pchanged = Falseif u is None:self.units = Nonepchanged = Trueelse:if u!=self.units:self.units = upchanged = Trueif pchanged:self._update_axisinfo()self.callbacks.process('')self.callbacks.process('')", "docstring": "set the units for axis\n\nACCEPTS: a units tag", "id": "f17198:c4:m46"} {"signature": "def get_units(self):", "body": "return self.units", "docstring": "return the units for axis", "id": "f17198:c4:m47"} {"signature": "def set_major_formatter(self, formatter):", "body": "self.major.formatter = formatterformatter.set_axis(self)", "docstring": "Set the formatter of the major ticker\n\nACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance", "id": "f17198:c4:m48"} {"signature": "def set_minor_formatter(self, formatter):", "body": "self.minor.formatter = formatterformatter.set_axis(self)", "docstring": "Set the formatter of the minor ticker\n\nACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance", "id": "f17198:c4:m49"} {"signature": "def set_major_locator(self, locator):", "body": "self.major.locator = locatorlocator.set_axis(self)", "docstring": "Set the locator of the major ticker\n\nACCEPTS: a :class:`~matplotlib.ticker.Locator` instance", "id": "f17198:c4:m50"} {"signature": "def set_minor_locator(self, locator):", "body": "self.minor.locator = locatorlocator.set_axis(self)", "docstring": "Set the locator of the minor ticker\n\nACCEPTS: a :class:`~matplotlib.ticker.Locator` instance", "id": "f17198:c4:m51"} {"signature": "def set_pickradius(self, pickradius):", "body": "self.pickradius = pickradius", "docstring": "Set the depth of the axis used by the picker\n\nACCEPTS: a distance in points", "id": "f17198:c4:m52"} {"signature": "def set_ticklabels(self, ticklabels, *args, **kwargs):", "body": "minor = kwargs.pop('', False)if minor:self.set_minor_formatter(mticker.FixedFormatter(ticklabels))ticks = self.get_minor_ticks()else:self.set_major_formatter( mticker.FixedFormatter(ticklabels) )ticks = self.get_major_ticks()self.set_major_formatter( mticker.FixedFormatter(ticklabels) )ret = []for i, tick in enumerate(ticks):if itick.label1.set_text(ticklabels[i])ret.append(tick.label1)tick.label1.update(kwargs)return ret", "docstring": "Set the text values of the tick labels. Return a list of Text\ninstances. Use *kwarg* *minor=True* to select minor ticks.\n\nACCEPTS: sequence of strings", "id": "f17198:c4:m53"} {"signature": "def set_ticks(self, ticks, minor=False):", "body": "ticks = self.convert_units(ticks)if len(ticks) > :xleft, xright = self.get_view_interval()if xright > xleft:self.set_view_interval(min(ticks), max(ticks))else:self.set_view_interval(max(ticks), min(ticks))if minor:self.set_minor_locator(mticker.FixedLocator(ticks))return self.get_minor_ticks(len(ticks))else:self.set_major_locator( mticker.FixedLocator(ticks) )return self.get_major_ticks(len(ticks))", "docstring": "Set the locations of the tick marks from sequence ticks\n\nACCEPTS: sequence of floats", "id": "f17198:c4:m54"} {"signature": "def _update_label_position(self, bboxes, bboxes2):", "body": "raise NotImplementedError('')", "docstring": "Update the label position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c4:m55"} {"signature": "def _update_offset_text_postion(self, bboxes, bboxes2):", "body": "raise NotImplementedError('')", "docstring": "Update the label position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c4:m56"} {"signature": "def pan(self, numsteps):", "body": "self.major.locator.pan(numsteps)", "docstring": "Pan *numsteps* (can be positive or negative)", "id": "f17198:c4:m57"} {"signature": "def zoom(self, direction):", "body": "self.major.locator.zoom(direction)", "docstring": "Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out", "id": "f17198:c4:m58"} {"signature": "def contains(self,mouseevent):", "body": "if callable(self._contains): return self._contains(self,mouseevent)x,y = mouseevent.x,mouseevent.ytry:trans = self.axes.transAxes.inverted()xaxes,yaxes = trans.transform_point((x,y))except ValueError:return False, {}l,b = self.axes.transAxes.transform_point((,))r,t = self.axes.transAxes.transform_point((,))inaxis = xaxes>= and xaxes<= and ((yb-self.pickradius) or(y>t and yreturn inaxis, {}", "docstring": "Test whether the mouse event occured in the x axis.", "id": "f17198:c5:m0"} {"signature": "def get_label_position(self):", "body": "return self.label_position", "docstring": "Return the label position (top or bottom)", "id": "f17198:c5:m4"} {"signature": "def set_label_position(self, position):", "body": "assert position == '' or position == ''if position == '':self.label.set_verticalalignment('')else:self.label.set_verticalalignment('')self.label_position=position", "docstring": "Set the label position (top or bottom)\n\nACCEPTS: [ 'top' | 'bottom' ]", "id": "f17198:c5:m5"} {"signature": "def _update_label_position(self, bboxes, bboxes2):", "body": "if not self._autolabelpos: returnx,y = self.label.get_position()if self.label_position == '':if not len(bboxes):bottom = self.axes.bbox.yminelse:bbox = mtransforms.Bbox.union(bboxes)bottom = bbox.y0self.label.set_position( (x, bottom - self.LABELPAD*self.figure.dpi / ))else:if not len(bboxes2):top = self.axes.bbox.ymaxelse:bbox = mtransforms.Bbox.union(bboxes2)top = bbox.y1self.label.set_position( (x, top+self.LABELPAD*self.figure.dpi / ))", "docstring": "Update the label position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c5:m6"} {"signature": "def _update_offset_text_position(self, bboxes, bboxes2):", "body": "x,y = self.offsetText.get_position()if not len(bboxes):bottom = self.axes.bbox.yminelse:bbox = mtransforms.Bbox.union(bboxes)bottom = bbox.y0self.offsetText.set_position((x, bottom-self.OFFSETTEXTPAD*self.figure.dpi/))", "docstring": "Update the offset_text position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c5:m7"} {"signature": "def get_text_heights(self, renderer):", "body": "bbox, bbox2 = self.get_ticklabel_extents(renderer)padPixels = self.majorTicks[].get_pad_pixels()above = if bbox2.height:above += bbox2.height + padPixelsbelow = if bbox.height:below += bbox.height + padPixelsif self.get_label_position() == '':above += self.label.get_window_extent(renderer).height + padPixelselse:below += self.label.get_window_extent(renderer).height + padPixelsreturn above, below", "docstring": "Returns the amount of space one should reserve for text\nabove and below the axes. Returns a tuple (above, below)", "id": "f17198:c5:m8"} {"signature": "def set_ticks_position(self, position):", "body": "assert position in ('', '', '', '', '')ticks = list( self.get_major_ticks() ) ticks.extend( self.get_minor_ticks() )if position == '':for t in ticks:t.tick1On = Falset.tick2On = Truet.label1On = Falset.label2On = Trueelif position == '':for t in ticks:t.tick1On = Truet.tick2On = Falset.label1On = Truet.label2On = Falseelif position == '':for t in ticks:t.tick1On = Truet.tick2On = Truet.label1On = Truet.label2On = Falseelif position == '':for t in ticks:t.tick1On = Falset.tick2On = Falseelse:for t in ticks:t.tick1On = Truet.tick2On = Truefor t in ticks:t.update_position(t._loc)", "docstring": "Set the ticks position (top, bottom, both, default or none)\nboth sets the ticks to appear on both positions, but does not\nchange the tick labels. default resets the tick positions to\nthe default: ticks on both positions, labels at bottom. none\ncan be used if you don't want any ticks.\n\nACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]", "id": "f17198:c5:m9"} {"signature": "def tick_top(self):", "body": "self.set_ticks_position('')", "docstring": "use ticks only on top", "id": "f17198:c5:m10"} {"signature": "def tick_bottom(self):", "body": "self.set_ticks_position('')", "docstring": "use ticks only on bottom", "id": "f17198:c5:m11"} {"signature": "def get_ticks_position(self):", "body": "majt=self.majorTicks[]mT=self.minorTicks[]majorTop=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2OnminorTop=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2Onif majorTop and minorTop: return ''MajorBottom=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)MinorBottom=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)if MajorBottom and MinorBottom: return ''majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)if majorDefault and minorDefault: return ''return ''", "docstring": "Return the ticks position (top, bottom, default or unknown)", "id": "f17198:c5:m12"} {"signature": "def get_view_interval(self):", "body": "return self.axes.viewLim.intervalx", "docstring": "return the Interval instance for this axis view limits", "id": "f17198:c5:m13"} {"signature": "def get_data_interval(self):", "body": "return self.axes.dataLim.intervalx", "docstring": "return the Interval instance for this axis data limits", "id": "f17198:c5:m16"} {"signature": "def set_data_interval(self, vmin, vmax, ignore=False):", "body": "if ignore:self.axes.dataLim.intervalx = vmin, vmaxelse:Vmin, Vmax = self.get_data_interval()self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)", "docstring": "return the Interval instance for this axis data limits", "id": "f17198:c5:m17"} {"signature": "def contains(self,mouseevent):", "body": "if callable(self._contains): return self._contains(self,mouseevent)x,y = mouseevent.x,mouseevent.ytry:trans = self.axes.transAxes.inverted()xaxes,yaxes = trans.transform_point((x,y))except ValueError:return False, {}l,b = self.axes.transAxes.transform_point((,))r,t = self.axes.transAxes.transform_point((,))inaxis = yaxes>= and yaxes<= and ((xl-self.pickradius) or(x>r and xreturn inaxis, {}", "docstring": "Test whether the mouse event occurred in the y axis.\n\n Returns *True* | *False*", "id": "f17198:c6:m0"} {"signature": "def get_label_position(self):", "body": "return self.label_position", "docstring": "Return the label position (left or right)", "id": "f17198:c6:m4"} {"signature": "def set_label_position(self, position):", "body": "assert position == '' or position == ''if position == '':self.label.set_horizontalalignment('')else:self.label.set_horizontalalignment('')self.label_position=position", "docstring": "Set the label position (left or right)\n\nACCEPTS: [ 'left' | 'right' ]", "id": "f17198:c6:m5"} {"signature": "def _update_label_position(self, bboxes, bboxes2):", "body": "if not self._autolabelpos: returnx,y = self.label.get_position()if self.label_position == '':if not len(bboxes):left = self.axes.bbox.xminelse:bbox = mtransforms.Bbox.union(bboxes)left = bbox.x0self.label.set_position( (left-self.LABELPAD*self.figure.dpi/, y))else:if not len(bboxes2):right = self.axes.bbox.xmaxelse:bbox = mtransforms.Bbox.union(bboxes2)right = bbox.x1self.label.set_position( (right+self.LABELPAD*self.figure.dpi/, y))", "docstring": "Update the label position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c6:m6"} {"signature": "def _update_offset_text_position(self, bboxes, bboxes2):", "body": "x,y = self.offsetText.get_position()top = self.axes.bbox.ymaxself.offsetText.set_position((x, top+self.OFFSETTEXTPAD*self.figure.dpi/))", "docstring": "Update the offset_text position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c6:m7"} {"signature": "def set_ticks_position(self, position):", "body": "assert position in ('', '', '', '', '')ticks = list( self.get_major_ticks() ) ticks.extend( self.get_minor_ticks() )if position == '':self.set_offset_position('')for t in ticks:t.tick1On = Falset.tick2On = Truet.label1On = Falset.label2On = Trueelif position == '':self.set_offset_position('')for t in ticks:t.tick1On = Truet.tick2On = Falset.label1On = Truet.label2On = Falseelif position == '':self.set_offset_position('')for t in ticks:t.tick1On = Truet.tick2On = Truet.label1On = Truet.label2On = Falseelif position == '':for t in ticks:t.tick1On = Falset.tick2On = Falseelse:self.set_offset_position('')for t in ticks:t.tick1On = Truet.tick2On = True", "docstring": "Set the ticks position (left, right, both or default)\nboth sets the ticks to appear on both positions, but\ndoes not change the tick labels.\ndefault resets the tick positions to the default:\nticks on both positions, labels on the left.\n\nACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]", "id": "f17198:c6:m10"} {"signature": "def tick_right(self):", "body": "self.set_ticks_position('')", "docstring": "use ticks only on right", "id": "f17198:c6:m11"} {"signature": "def tick_left(self):", "body": "self.set_ticks_position('')", "docstring": "use ticks only on left", "id": "f17198:c6:m12"} {"signature": "def get_ticks_position(self):", "body": "majt=self.majorTicks[]mT=self.minorTicks[]majorRight=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2OnminorRight=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2Onif majorRight and minorRight: return ''majorLeft=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)minorLeft=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)if majorLeft and minorLeft: return ''majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)if majorDefault and minorDefault: return ''return ''", "docstring": "Return the ticks position (left, right, both or unknown)", "id": "f17198:c6:m13"} {"signature": "def get_view_interval(self):", "body": "return self.axes.viewLim.intervaly", "docstring": "return the Interval instance for this axis view limits", "id": "f17198:c6:m14"} {"signature": "def get_data_interval(self):", "body": "return self.axes.dataLim.intervaly", "docstring": "return the Interval instance for this axis data limits", "id": "f17198:c6:m17"} {"signature": "def set_data_interval(self, vmin, vmax, ignore=False):", "body": "if ignore:self.axes.dataLim.intervaly = vmin, vmaxelse:Vmin, Vmax = self.get_data_interval()self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)", "docstring": "return the Interval instance for this axis data limits", "id": "f17198:c6:m18"} {"signature": "def slice2gridspec(key):", "body": "if ((len(key) != ) or(not isinstance(key[], slice)) or(not isinstance(key[], slice))):raise ValueError(\"\")x0 = key[].startx1 = key[].stopxstep = key[].stepif not isinstance(xstep, complex) or int(xstep.real) != xstep.real:raise ValueError(\"\")xstep = int(xstep.imag)y0 = key[].starty1 = key[].stopystep = key[].stepif not isinstance(ystep, complex) or int(ystep.real) != ystep.real:raise ValueError(\"\")ystep = int(ystep.imag)return x0, x1, xstep, y0, y1, ystep", "docstring": "Convert a 2-tuple of slices to start,stop,steps for x and y.\n\n key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))\n\n For now, the only accepted step values are imaginary integers (interpreted\n in the same way numpy.mgrid, etc. do).", "id": "f17199:m0"} {"signature": "def quality(func, mesh, interpolator='', n=):", "body": "fz = func(mesh.x, mesh.y)tri = Triangulation(mesh.x, mesh.y)intp = getattr(tri, interpolator+'')(fz, bbox=(,,,))Y, X = np.mgrid[::complex(,n),::complex(,n)]Z = func(X, Y)iz = intp[::complex(,n),::complex(,n)]numgood = n*nSE = (Z - iz)**SSE = np.sum(SE.flat)meanZ = np.sum(Z.flat) / numgoodSM = (Z - meanZ)**SSM = np.sum(SM.flat)r2 = - SSE/SSMprint(func.__name__, r2, SSE, SSM, numgood)return r2", "docstring": "Compute a quality factor (the quantity r**2 from TOMS792).\n\n interpolator must be in ('linear', 'nn').", "id": "f17201:m17"} {"signature": "def _collapse_duplicate_points(self):", "body": "j_sorted = np.lexsort(keys=(self.x, self.y))mask_unique = np.hstack([True,(np.diff(self.x[j_sorted]) != ) | (np.diff(self.y[j_sorted]) != ),])return j_sorted[mask_unique]", "docstring": "Generate index array that picks out unique x,y points.\n\n This appears to be required by the underlying delaunay triangulation\n code.", "id": "f17202:c1:m1"} {"signature": "def _compute_convex_hull(self):", "body": "border = (self.triangle_neighbors == -)edges = {}edges.update(dict(zip(self.triangle_nodes[border[:,]][:,],self.triangle_nodes[border[:,]][:,])))edges.update(dict(zip(self.triangle_nodes[border[:,]][:,],self.triangle_nodes[border[:,]][:,])))edges.update(dict(zip(self.triangle_nodes[border[:,]][:,],self.triangle_nodes[border[:,]][:,])))hull = list(edges.popitem())while edges:hull.append(edges.pop(hull[-]))hull.pop()return hull", "docstring": "Extract the convex hull from the triangulation information.\n\n The output will be a list of point_id's in counter-clockwise order\n forming the convex hull of the data set.", "id": "f17202:c1:m2"} {"signature": "def linear_interpolator(self, z, default_value=np.nan):", "body": "z = np.asarray(z, dtype=np.float64)if z.shape != self.old_shape:raise ValueError(\"\")if self.j_unique is not None:z = z[self.j_unique]return LinearInterpolator(self, z, default_value)", "docstring": "Get an object which can interpolate within the convex hull by\n assigning a plane to each triangle.\n\n z -- an array of floats giving the known function values at each point\n in the triangulation.", "id": "f17202:c1:m3"} {"signature": "def nn_interpolator(self, z, default_value=np.nan):", "body": "z = np.asarray(z, dtype=np.float64)if z.shape != self.old_shape:raise ValueError(\"\")if self.j_unique is not None:z = z[self.j_unique]return NNInterpolator(self, z, default_value)", "docstring": "Get an object which can interpolate within the convex hull by\n the natural neighbors method.\n\n z -- an array of floats giving the known function values at each point\n in the triangulation.", "id": "f17202:c1:m4"} {"signature": "def node_graph(self):", "body": "g = {}for i, j in self.edge_db:s = g.setdefault(i, set())s.add(j)s = g.setdefault(j, set())s.add(i)return g", "docstring": "Return a graph of node_id's pointing to node_id's.\n\n The arcs of the graph correspond to the edges in the triangulation.\n\n {node_id: set([node_id, ...]), ...}", "id": "f17202:c1:m8"} {"signature": "def __init__(self, majloc=None, minloc=None,majfmt=None, minfmt=None, label=None):", "body": "self.majloc = majlocself.minloc = minlocself.majfmt = majfmtself.minfmt = minfmtself.label = label", "docstring": "majloc and minloc: TickLocators for the major and minor ticks\nmajfmt and minfmt: TickFormatters for the major and minor ticks\nlabel: the default axis label\n\nIf any of the above are None, the axis will simply use the default", "id": "f17205:c0:m0"} {"signature": "def axisinfo(unit):", "body": "return None", "docstring": "return an units.AxisInfo instance for unit", "id": "f17205:c1:m0"} {"signature": "def default_units(x):", "body": "return None", "docstring": "return the default unit for x or None", "id": "f17205:c1:m1"} {"signature": "def convert(obj, unit):", "body": "return obj", "docstring": "convert obj using unit. If obj is a sequence, return the\nconverted sequence. The ouput must be a sequence of scalars\nthat can be used by the numpy array layer", "id": "f17205:c1:m2"} {"signature": "def is_numlike(x):", "body": "if iterable(x):for thisx in x:return is_numlike(thisx)else:return is_numlike(x)", "docstring": "The matplotlib datalim, autoscaling, locators etc work with\nscalars which are the units converted to floats given the\ncurrent unit. The converter may be passed these floats, or\narrays of them, even when units are set. Derived conversion\ninterfaces may opt to pass plain-ol unitless numbers through\nthe conversion interface and this is a helper function for\nthem.", "id": "f17205:c1:m3"} {"signature": "def get_converter(self, x):", "body": "if not len(self): return None converter = Noneclassx = getattr(x, '', None)if classx is not None:converter = self.get(classx)if converter is None and iterable(x):if isinstance(x, np.ndarray) and x.dtype != np.object:return Nonefor thisx in x:converter = self.get_converter( thisx )return converterreturn converter", "docstring": "get the converter interface instance for x, or None", "id": "f17205:c2:m1"} {"signature": "def _is_writable_dir(p):", "body": "try: p + '' except TypeError: return Falsetry:t = tempfile.TemporaryFile(dir=p)t.write('')t.close()except OSError: return Falseelse: return True", "docstring": "p is a string pointing to a putative writable dir -- return True p\nis such a string, else False", "id": "f17206:m1"} {"signature": "def compare_versions(a, b):", "body": "if a:a = distutils.version.LooseVersion(a)b = distutils.version.LooseVersion(b)if a>=b: return Trueelse: return Falseelse: return False", "docstring": "return True if a is greater than or equal to b", "id": "f17206:m6"} {"signature": "def _get_home():", "body": "path=''try:path=os.path.expanduser(\"\")except:passif not os.path.isdir(path):for evar in ('', '', ''):try:path = os.environ[evar]if os.path.isdir(path):breakexcept: passif path:return pathelse:raise RuntimeError('')", "docstring": "Find user's home directory if possible.\n Otherwise raise error.\n\n :see: http://mail.python.org/pipermail/python-list/2005-February/263921.html", "id": "f17206:m9"} {"signature": "def _get_configdir():", "body": "configdir = os.environ.get('')if configdir is not None:if not _is_writable_dir(configdir):raise RuntimeError(''%configdir)return configdirh = get_home()p = os.path.join(get_home(), '')if os.path.exists(p):if not _is_writable_dir(p):raise RuntimeError(\"\"% (h, h))else:if not _is_writable_dir(h):raise RuntimeError(\"\"%h)os.mkdir(p)return p", "docstring": "Return the string representing the configuration dir.\n\ndefault is HOME/.matplotlib. you can override this with the\nMPLCONFIGDIR environment variable", "id": "f17206:m10"} {"signature": "def _get_data_path():", "body": "if '' in os.environ:path = os.environ['']if not os.path.isdir(path):raise RuntimeError('')return pathpath = os.sep.join([os.path.dirname(__file__), ''])if os.path.isdir(path): return pathimport matplotlib.afmpath = os.sep.join([os.path.dirname(matplotlib.afm.__file__), ''])if os.path.isdir(path): return pathif getattr(sys,'',None):path = os.path.join(os.path.split(sys.path[])[], '')if os.path.isdir(path): return pathelse:path = os.path.join(os.path.split(os.path.split(sys.path[])[])[],'')if os.path.isdir(path): return pathelse:path = os.path.join(sys.path[], '')if os.path.isdir(path): return pathraise RuntimeError('')", "docstring": "get the path to matplotlib data", "id": "f17206:m11"} {"signature": "def get_example_data(fname):", "body": "datadir = os.path.join(get_data_path(), '')fullpath = os.path.join(datadir, fname)if not os.path.exists(fullpath):raise IOError(''%(fname, datadir))return file(fullpath, '')", "docstring": "return a filehandle to one of the example files in mpl-data/example\n\n*fname*\n the name of one of the files in mpl-data/example", "id": "f17206:m13"} {"signature": "def matplotlib_fname():", "body": "oldname = os.path.join( os.getcwd(), '')if os.path.exists(oldname):print(\"\"\"\"\"\", file=sys.stderr)shutil.move('', '')home = get_home()oldname = os.path.join( home, '')if os.path.exists(oldname):configdir = get_configdir()newname = os.path.join(configdir, '')print(\"\"\"\"\"\"%(oldname, newname), file=sys.stderr)shutil.move(oldname, newname)fname = os.path.join( os.getcwd(), '')if os.path.exists(fname): return fnameif '' in os.environ:path = os.environ['']if os.path.exists(path):fname = os.path.join(path, '')if os.path.exists(fname):return fnamefname = os.path.join(get_configdir(), '')if os.path.exists(fname): return fnamepath = get_data_path() fname = os.path.join(path, '')if not os.path.exists(fname):warnings.warn('')return fname", "docstring": "Return the path to the rc file\n\nSearch order:\n\n * current working dir\n * environ var MATPLOTLIBRC\n * HOME/.matplotlib/matplotlibrc\n * MATPLOTLIBDATA/matplotlibrc", "id": "f17206:m15"} {"signature": "def rc_params(fail_on_error=False):", "body": "fname = matplotlib_fname()if not os.path.exists(fname):message = ''ret = RcParams([ (key, default) for key, (default, converter) indefaultParams.items() ])warnings.warn(message)return retcnt = rc_temp = {}for line in file(fname):cnt += strippedline = line.split('',)[].strip()if not strippedline: continuetup = strippedline.split('',)if len(tup) !=:warnings.warn(''%(cnt, line, fname))continuekey, val = tupkey = key.strip()val = val.strip()if key in rc_temp:warnings.warn(''%(fname,cnt))rc_temp[key] = (val, line, cnt)ret = RcParams([ (key, default) for key, (default, converter) indefaultParams.items() ])for key in ('', ''):if key in rc_temp:val, line, cnt = rc_temp.pop(key)if fail_on_error:ret[key] = val else:try: ret[key] = val except Exception as msg:warnings.warn('' % (val, cnt, line, fname, msg))verbose.set_level(ret[''])verbose.set_fileo(ret[''])for key, (val, line, cnt) in rc_temp.items():if key in defaultParams:if fail_on_error:ret[key] = val else:try: ret[key] = val except Exception as msg:warnings.warn('' % (val, cnt, line, fname, msg))else:print(\"\"\"\"\"\" % (key, cnt, fname), file=sys.stderr)if ret[''] is None:ret[''] = get_data_path()if not ret[''] == ['']:verbose.report(", "docstring": "Return the default params updated from the values in the rc file", "id": "f17206:m16"} {"signature": "def rc(group, **kwargs):", "body": "aliases = {'' : '','' : '','' : '','' : '','' : '','' : '','' : '',}if is_string_like(group):group = (group,)for g in group:for k,v in list(kwargs.items()):name = aliases.get(k) or kkey = '' % (g, name)if key not in rcParams:raise KeyError('' %(key, g, name))rcParams[key] = v", "docstring": "Set the current rc params. Group is the grouping for the rc, eg.\nfor ``lines.linewidth`` the group is ``lines``, for\n``axes.facecolor``, the group is ``axes``, and so on. Group may\nalso be a list or tuple of group names, eg. (*xtick*, *ytick*).\n*kwargs* is a dictionary attribute name/value pairs, eg::\n\n rc('lines', linewidth=2, color='r')\n\nsets the current rc params and is equivalent to::\n\n rcParams['lines.linewidth'] = 2\n rcParams['lines.color'] = 'r'\n\nThe following aliases are available to save typing for interactive\nusers:\n\n===== =================\nAlias Property\n===== =================\n'lw' 'linewidth'\n'ls' 'linestyle'\n'c' 'color'\n'fc' 'facecolor'\n'ec' 'edgecolor'\n'mew' 'markeredgewidth'\n'aa' 'antialiased'\n===== =================\n\nThus you could abbreviate the above rc command as::\n\n rc('lines', lw=2, c='r')\n\n\nNote you can use python's kwargs dictionary facility to store\ndictionaries of default parameters. Eg, you can customize the\nfont rc as follows::\n\n font = {'family' : 'monospace',\n 'weight' : 'bold',\n 'size' : 'larger'}\n\n rc('font', **font) # pass in the font dict as kwargs\n\nThis enables you to easily switch between several configurations.\nUse :func:`~matplotlib.pyplot.rcdefaults` to restore the default\nrc params after changes.", "id": "f17206:m17"} {"signature": "def rcdefaults():", "body": "rcParams.update(rcParamsDefault)", "docstring": "Restore the default rc params - the ones that were created at\nmatplotlib load time.", "id": "f17206:m18"} {"signature": "def use(arg, warn=True):", "body": "if '' in sys.modules:if warn: warnings.warn(_use_error_msg)returnarg = arg.lower()if arg.startswith(''):name = argelse:be_parts = arg.split('')name = validate_backend(be_parts[])rcParams[''] = nameif name == '' and len(be_parts) > :rcParams[''] = validate_cairo_format(be_parts[])", "docstring": "Set the matplotlib backend to one of the known backends.\n\nThe argument is case-insensitive. For the Cairo backend,\nthe argument can have an extension to indicate the type of\noutput. Example:\n\n use('cairo.pdf')\n\nwill specify a default of pdf output generated by Cairo.\n\nNote: this function must be called *before* importing pylab for\nthe first time; or, if you are not using pylab, it must be called\nbefore importing matplotlib.backends. If warn is True, a warning\nis issued if you try and callthis after pylab or pyplot have been\nloaded. In certain black magic use cases, eg\npyplot.switch_backends, we are doing the reloading necessary to\nmake the backend switch work (in some cases, eg pure image\nbackends) so one can set warn=False to supporess the warnings", "id": "f17206:m19"} {"signature": "def get_backend():", "body": "return rcParams['']", "docstring": "Returns the current backend", "id": "f17206:m20"} {"signature": "def interactive(b):", "body": "rcParams[''] = b", "docstring": "Set interactive mode to boolean b.\n\nIf b is True, then draw after every plotting command, eg, after xlabel", "id": "f17206:m21"} {"signature": "def is_interactive():", "body": "b = rcParams['']return b", "docstring": "Return true if plot mode is interactive", "id": "f17206:m22"} {"signature": "def tk_window_focus():", "body": "if rcParams[''] != '':return Falsereturn rcParams['']", "docstring": "Return true if focus maintenance under TkAgg on win32 is on.\n This currently works only for python.exe and IPython.exe.\n Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on.", "id": "f17206:m23"} {"signature": "def set_level(self, level):", "body": "if self._commandLineVerbose is not None:level = self._commandLineVerboseif level not in self.levels:raise ValueError(''%(level, self.levels))self.level = level", "docstring": "set the verbosity to one of the Verbose.levels strings", "id": "f17206:c0:m1"} {"signature": "def report(self, s, level=''):", "body": "if self.ge(level):print(s, file=self.fileo)return Truereturn False", "docstring": "print message s to self.fileo if self.level>=level. Return\nvalue indicates whether a message was issued", "id": "f17206:c0:m3"} {"signature": "def wrap(self, fmt, func, level='', always=True):", "body": "assert callable(func)def wrapper(*args, **kwargs):ret = func(*args, **kwargs)if (always or not wrapper._spoke):spoke = self.report(fmt%ret, level)if not wrapper._spoke: wrapper._spoke = spokereturn retwrapper._spoke = Falsewrapper.__doc__ = func.__doc__return wrapper", "docstring": "return a callable function that wraps func and reports it\noutput through the verbose handler if current verbosity level\nis higher than level\n\nif always is True, the report will occur on every function\ncall; otherwise only on the first time the function is called", "id": "f17206:c0:m4"} {"signature": "def ge(self, level):", "body": "return self.vald[self.level]>=self.vald[level]", "docstring": "return true if self.level is >= level", "id": "f17206:c0:m5"} {"signature": "def open_group(self, s):", "body": "pass", "docstring": "Open a grouping element with label *s*. Is only currently used by\n:mod:`~matplotlib.backends.backend_svg`", "id": "f17207:c0:m1"} {"signature": "def close_group(self, s):", "body": "pass", "docstring": "Close a grouping element with label *s*\nIs only currently used by :mod:`~matplotlib.backends.backend_svg`", "id": "f17207:c0:m2"} {"signature": "def draw_path(self, gc, path, transform, rgbFace=None):", "body": "raise NotImplementedError", "docstring": "Draws a :class:`~matplotlib.path.Path` instance using the\ngiven affine transform.", "id": "f17207:c0:m3"} {"signature": "def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):", "body": "tpath = trans.transform_path(path)for vertices, codes in tpath.iter_segments():if len(vertices):x,y = vertices[-:]self.draw_path(gc, marker_path,marker_trans + transforms.Affine2D().translate(x, y),rgbFace)", "docstring": "Draws a marker at each of the vertices in path. This includes\nall vertices, including control points on curves. To avoid\nthat behavior, those vertices should be removed before calling\nthis function.\n\n*gc*\n the :class:`GraphicsContextBase` instance\n\n*marker_trans*\n is an affine transform applied to the marker.\n\n*trans*\n is an affine transform applied to the path.\n\nThis provides a fallback implementation of draw_markers that\nmakes multiple calls to :meth:`draw_path`. Some backends may\nwant to override this method in order to draw the marker only\nonce and reuse it multiple times.", "id": "f17207:c0:m4"} {"signature": "def draw_path_collection(self, master_transform, cliprect, clippath,clippath_trans, paths, all_transforms, offsets,offsetTrans, facecolors, edgecolors, linewidths,linestyles, antialiaseds, urls):", "body": "path_ids = []for path, transform in self._iter_collection_raw_paths(master_transform, paths, all_transforms):path_ids.append((path, transform))for xo, yo, path_id, gc, rgbFace in self._iter_collection(path_ids, cliprect, clippath, clippath_trans,offsets, offsetTrans, facecolors, edgecolors,linewidths, linestyles, antialiaseds, urls):path, transform = path_idtransform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)self.draw_path(gc, path, transform, rgbFace)", "docstring": "Draws a collection of paths, selecting drawing properties from\nthe lists *facecolors*, *edgecolors*, *linewidths*,\n*linestyles* and *antialiaseds*. *offsets* is a list of\noffsets to apply to each of the paths. The offsets in\n*offsets* are first transformed by *offsetTrans* before\nbeing applied.\n\nThis provides a fallback implementation of\n:meth:`draw_path_collection` that makes multiple calls to\ndraw_path. Some backends may want to override this in order\nto render each set of path data only once, and then reference\nthat path multiple times with the different offsets, colors,\nstyles etc. The generator methods\n:meth:`_iter_collection_raw_paths` and\n:meth:`_iter_collection` are provided to help with (and\nstandardize) the implementation across backends. It is highly\nrecommended to use those generators, so that changes to the\nbehavior of :meth:`draw_path_collection` can be made globally.", "id": "f17207:c0:m5"} {"signature": "def draw_quad_mesh(self, master_transform, cliprect, clippath,clippath_trans, meshWidth, meshHeight, coordinates,offsets, offsetTrans, facecolors, antialiased,showedges):", "body": "from matplotlib.collections import QuadMeshpaths = QuadMesh.convert_mesh_to_paths(meshWidth, meshHeight, coordinates)if showedges:edgecolors = np.array([[, , , ]], np.float_)linewidths = np.array([], np.float_)else:edgecolors = facecolorslinewidths = np.array([], np.float_)return self.draw_path_collection(master_transform, cliprect, clippath, clippath_trans,paths, [], offsets, offsetTrans, facecolors, edgecolors,linewidths, [], [antialiased], [None])", "docstring": "This provides a fallback implementation of\n:meth:`draw_quad_mesh` that generates paths and then calls\n:meth:`draw_path_collection`.", "id": "f17207:c0:m6"} {"signature": "def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):", "body": "Npaths = len(paths)Ntransforms = len(all_transforms)N = max(Npaths, Ntransforms)if Npaths == :returntransform = transforms.IdentityTransform()for i in range(N):path = paths[i % Npaths]if Ntransforms:transform = all_transforms[i % Ntransforms]yield path, transform + master_transform", "docstring": "This is a helper method (along with :meth:`_iter_collection`) to make\nit easier to write a space-efficent :meth:`draw_path_collection`\nimplementation in a backend.\n\nThis method yields all of the base path/transform\ncombinations, given a master transform, a list of paths and\nlist of transforms.\n\nThe arguments should be exactly what is passed in to\n:meth:`draw_path_collection`.\n\nThe backend should take each yielded path and transform and\ncreate an object that can be referenced (reused) later.", "id": "f17207:c0:m7"} {"signature": "def _iter_collection(self, path_ids, cliprect, clippath, clippath_trans,offsets, offsetTrans, facecolors, edgecolors,linewidths, linestyles, antialiaseds, urls):", "body": "Npaths = len(path_ids)Noffsets = len(offsets)N = max(Npaths, Noffsets)Nfacecolors = len(facecolors)Nedgecolors = len(edgecolors)Nlinewidths = len(linewidths)Nlinestyles = len(linestyles)Naa = len(antialiaseds)Nurls = len(urls)if (Nfacecolors == and Nedgecolors == ) or Npaths == :returnif Noffsets:toffsets = offsetTrans.transform(offsets)gc = self.new_gc()gc.set_clip_rectangle(cliprect)if clippath is not None:clippath = transforms.TransformedPath(clippath, clippath_trans)gc.set_clip_path(clippath)if Nfacecolors == :rgbFace = Noneif Nedgecolors == :gc.set_linewidth()xo, yo = , for i in range(N):path_id = path_ids[i % Npaths]if Noffsets:xo, yo = toffsets[i % Noffsets]if Nfacecolors:rgbFace = facecolors[i % Nfacecolors]if Nedgecolors:gc.set_foreground(edgecolors[i % Nedgecolors])if Nlinewidths:gc.set_linewidth(linewidths[i % Nlinewidths])if Nlinestyles:gc.set_dashes(*linestyles[i % Nlinestyles])if rgbFace is not None and len(rgbFace)==:gc.set_alpha(rgbFace[-])rgbFace = rgbFace[:]gc.set_antialiased(antialiaseds[i % Naa])if Nurls:gc.set_url(urls[i % Nurls])yield xo, yo, path_id, gc, rgbFace", "docstring": "This is a helper method (along with\n:meth:`_iter_collection_raw_paths`) to make it easier to write\na space-efficent :meth:`draw_path_collection` implementation in a\nbackend.\n\nThis method yields all of the path, offset and graphics\ncontext combinations to draw the path collection. The caller\nshould already have looped over the results of\n:meth:`_iter_collection_raw_paths` to draw this collection.\n\nThe arguments should be the same as that passed into\n:meth:`draw_path_collection`, with the exception of\n*path_ids*, which is a list of arbitrary objects that the\nbackend will use to reference one of the paths created in the\n:meth:`_iter_collection_raw_paths` stage.\n\nEach yielded result is of the form::\n\n xo, yo, path_id, gc, rgbFace\n\nwhere *xo*, *yo* is an offset; *path_id* is one of the elements of\n*path_ids*; *gc* is a graphics context and *rgbFace* is a color to\nuse for filling the path.", "id": "f17207:c0:m8"} {"signature": "def get_image_magnification(self):", "body": "return ", "docstring": "Get the factor by which to magnify images passed to :meth:`draw_image`.\nAllows a backend to have images at a different resolution to other\nartists.", "id": "f17207:c0:m9"} {"signature": "def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):", "body": "raise NotImplementedError", "docstring": "Draw the image instance into the current axes;\n\n*x*\n is the distance in pixels from the left hand side of the canvas.\n\n*y*\n the distance from the origin. That is, if origin is\n upper, y is the distance from top. If origin is lower, y\n is the distance from bottom\n\n*im*\n the :class:`matplotlib._image.Image` instance\n\n*bbox*\n a :class:`matplotlib.transforms.Bbox` instance for clipping, or\n None", "id": "f17207:c0:m10"} {"signature": "def option_image_nocomposite(self):", "body": "return False", "docstring": "overwrite this method for renderers that do not necessarily\nwant to rescale and composite raster images. (like SVG)", "id": "f17207:c0:m11"} {"signature": "def draw_text(self, gc, x, y, s, prop, angle, ismath=False):", "body": "raise NotImplementedError", "docstring": "Draw the text instance\n\n*gc*\n the :class:`GraphicsContextBase` instance\n\n*x*\n the x location of the text in display coords\n\n*y*\n the y location of the text in display coords\n\n*s*\n a :class:`matplotlib.text.Text` instance\n\n*prop*\n a :class:`matplotlib.font_manager.FontProperties` instance\n\n*angle*\n the rotation angle in degrees\n\n**backend implementers note**\n\nWhen you are trying to determine if you have gotten your bounding box\nright (which is what enables the text layout/alignment to work\nproperly), it helps to change the line in text.py::\n\n if 0: bbox_artist(self, renderer)\n\nto if 1, and then the actual bounding box will be blotted along with\nyour text.", "id": "f17207:c0:m13"} {"signature": "def flipy(self):", "body": "return True", "docstring": "Return true if y small numbers are top for renderer Is used\nfor drawing text (:mod:`matplotlib.text`) and images\n(:mod:`matplotlib.image`) only", "id": "f17207:c0:m14"} {"signature": "def get_canvas_width_height(self):", "body": "return , ", "docstring": "return the canvas width and height in display coords", "id": "f17207:c0:m15"} {"signature": "def get_texmanager(self):", "body": "if self._texmanager is None:from matplotlib.texmanager import TexManagerself._texmanager = TexManager()return self._texmanager", "docstring": "return the :class:`matplotlib.texmanager.TexManager` instance", "id": "f17207:c0:m16"} {"signature": "def get_text_width_height_descent(self, s, prop, ismath):", "body": "raise NotImplementedError", "docstring": "get the width and height, and the offset from the bottom to the\nbaseline (descent), in display coords of the string s with\n:class:`~matplotlib.font_manager.FontProperties` prop", "id": "f17207:c0:m17"} {"signature": "def new_gc(self):", "body": "return GraphicsContextBase()", "docstring": "Return an instance of a :class:`GraphicsContextBase`", "id": "f17207:c0:m18"} {"signature": "def points_to_pixels(self, points):", "body": "return points", "docstring": "Convert points to display units\n\n*points*\n a float or a numpy array of float\n\nreturn points converted to pixels\n\nYou need to override this function (unless your backend\ndoesn't have a dpi, eg, postscript or svg). Some imaging\nsystems assume some value for pixels per inch::\n\n points to pixels = points * pixels_per_inch/72.0 * dpi/72.0", "id": "f17207:c0:m19"} {"signature": "def copy_properties(self, gc):", "body": "self._alpha = gc._alphaself._antialiased = gc._antialiasedself._capstyle = gc._capstyleself._cliprect = gc._cliprectself._clippath = gc._clippathself._dashes = gc._dashesself._joinstyle = gc._joinstyleself._linestyle = gc._linestyleself._linewidth = gc._linewidthself._rgb = gc._rgbself._hatch = gc._hatchself._url = gc._urlself._snap = gc._snap", "docstring": "Copy properties from gc to self", "id": "f17207:c1:m1"} {"signature": "def get_alpha(self):", "body": "return self._alpha", "docstring": "Return the alpha value used for blending - not supported on\nall backends", "id": "f17207:c1:m2"} {"signature": "def get_antialiased(self):", "body": "return self._antialiased", "docstring": "Return true if the object should try to do antialiased rendering", "id": "f17207:c1:m3"} {"signature": "def get_capstyle(self):", "body": "return self._capstyle", "docstring": "Return the capstyle as a string in ('butt', 'round', 'projecting')", "id": "f17207:c1:m4"} {"signature": "def get_clip_rectangle(self):", "body": "return self._cliprect", "docstring": "Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox` instance", "id": "f17207:c1:m5"} {"signature": "def get_clip_path(self):", "body": "if self._clippath is not None:return self._clippath.get_transformed_path_and_affine()return None, None", "docstring": "Return the clip path in the form (path, transform), where path\nis a :class:`~matplotlib.path.Path` instance, and transform is\nan affine transform to apply to the path before clipping.", "id": "f17207:c1:m6"} {"signature": "def get_dashes(self):", "body": "return self._dashes", "docstring": "Return the dash information as an offset dashlist tuple.\n\nThe dash list is a even size list that gives the ink on, ink\noff in pixels.\n\nSee p107 of to PostScript `BLUEBOOK\n`_\nfor more info.\n\nDefault value is None", "id": "f17207:c1:m7"} {"signature": "def get_joinstyle(self):", "body": "return self._joinstyle", "docstring": "Return the line join style as one of ('miter', 'round', 'bevel')", "id": "f17207:c1:m8"} {"signature": "def get_linestyle(self, style):", "body": "return self._linestyle", "docstring": "Return the linestyle: one of ('solid', 'dashed', 'dashdot',\n'dotted').", "id": "f17207:c1:m9"} {"signature": "def get_linewidth(self):", "body": "return self._linewidth", "docstring": "Return the line width in points as a scalar", "id": "f17207:c1:m10"} {"signature": "def get_rgb(self):", "body": "return self._rgb", "docstring": "returns a tuple of three floats from 0-1. color can be a\nmatlab format string, a html hex color string, or a rgb tuple", "id": "f17207:c1:m11"} {"signature": "def get_url(self):", "body": "return self._url", "docstring": "returns a url if one is set, None otherwise", "id": "f17207:c1:m12"} {"signature": "def get_snap(self):", "body": "return self._snap", "docstring": "returns the snap setting which may be:\n\n * True: snap vertices to the nearest pixel center\n\n * False: leave vertices as-is\n\n * None: (auto) If the path contains only rectilinear line\n segments, round to the nearest pixel center", "id": "f17207:c1:m13"} {"signature": "def set_alpha(self, alpha):", "body": "self._alpha = alpha", "docstring": "Set the alpha value used for blending - not supported on\nall backends", "id": "f17207:c1:m14"} {"signature": "def set_antialiased(self, b):", "body": "if b: self._antialiased = else: self._antialiased = ", "docstring": "True if object should be drawn with antialiased rendering", "id": "f17207:c1:m15"} {"signature": "def set_capstyle(self, cs):", "body": "if cs in ('', '', ''):self._capstyle = cselse:raise ValueError('' % cs)", "docstring": "Set the capstyle as a string in ('butt', 'round', 'projecting')", "id": "f17207:c1:m16"} {"signature": "def set_clip_rectangle(self, rectangle):", "body": "self._cliprect = rectangle", "docstring": "Set the clip rectangle with sequence (left, bottom, width, height)", "id": "f17207:c1:m17"} {"signature": "def set_clip_path(self, path):", "body": "assert path is None or isinstance(path, transforms.TransformedPath)self._clippath = path", "docstring": "Set the clip path and transformation. Path should be a\n:class:`~matplotlib.transforms.TransformedPath` instance.", "id": "f17207:c1:m18"} {"signature": "def set_dashes(self, dash_offset, dash_list):", "body": "self._dashes = dash_offset, dash_list", "docstring": "Set the dash style for the gc.\n\n*dash_offset*\n is the offset (usually 0).\n\n*dash_list*\n specifies the on-off sequence as points. ``(None, None)`` specifies a solid line", "id": "f17207:c1:m19"} {"signature": "def set_foreground(self, fg, isRGB=False):", "body": "if isRGB:self._rgb = fgelse:self._rgb = colors.colorConverter.to_rgba(fg)", "docstring": "Set the foreground color. fg can be a matlab format string, a\nhtml hex color string, an rgb unit tuple, or a float between 0\nand 1. In the latter case, grayscale is used.\n\nThe :class:`GraphicsContextBase` converts colors to rgb\ninternally. If you know the color is rgb already, you can set\n``isRGB=True`` to avoid the performace hit of the conversion", "id": "f17207:c1:m20"} {"signature": "def set_graylevel(self, frac):", "body": "self._rgb = (frac, frac, frac)", "docstring": "Set the foreground color to be a gray level with *frac*", "id": "f17207:c1:m21"} {"signature": "def set_joinstyle(self, js):", "body": "if js in ('', '', ''):self._joinstyle = jselse:raise ValueError('' % js)", "docstring": "Set the join style to be one of ('miter', 'round', 'bevel')", "id": "f17207:c1:m22"} {"signature": "def set_linewidth(self, w):", "body": "self._linewidth = w", "docstring": "Set the linewidth in points", "id": "f17207:c1:m23"} {"signature": "def set_linestyle(self, style):", "body": "try:offset, dashes = self.dashd[style]except:raise ValueError('' % style)self._linestyle = styleself.set_dashes(offset, dashes)", "docstring": "Set the linestyle to be one of ('solid', 'dashed', 'dashdot',\n'dotted').", "id": "f17207:c1:m24"} {"signature": "def set_url(self, url):", "body": "self._url = url", "docstring": "Sets the url for links in compatible backends", "id": "f17207:c1:m25"} {"signature": "def set_snap(self, snap):", "body": "self._snap = snap", "docstring": "Sets the snap setting which may be:\n\n * True: snap vertices to the nearest pixel center\n\n * False: leave vertices as-is\n\n * None: (auto) If the path contains only rectilinear line\n segments, round to the nearest pixel center", "id": "f17207:c1:m26"} {"signature": "def set_hatch(self, hatch):", "body": "self._hatch = hatch", "docstring": "Sets the hatch style for filling", "id": "f17207:c1:m27"} {"signature": "def get_hatch(self):", "body": "return self._hatch", "docstring": "Gets the current hatch style", "id": "f17207:c1:m28"} {"signature": "def __init__(self, name, canvas, x, y,guiEvent=None):", "body": "Event.__init__(self, name, canvas,guiEvent=guiEvent)self.x = xself.y = yif x is None or y is None:self.inaxes = Noneself._update_enter_leave()returnaxes_list = [a for a in self.canvas.figure.get_axes() if a.in_axes(self)]if len(axes_list) == : self.inaxes = Noneself._update_enter_leave()returnelif (len(axes_list) > ): axCmp = lambda _x,_y: cmp(_x.zorder, _y.zorder)axes_list.sort(axCmp)self.inaxes = axes_list[-] else: self.inaxes = axes_list[]try:xdata, ydata = self.inaxes.transData.inverted().transform_point((x, y))except ValueError:self.xdata = Noneself.ydata = Noneelse:self.xdata = xdataself.ydata = ydataself._update_enter_leave()", "docstring": "*x*, *y* in figure coords, 0,0 = bottom, left", "id": "f17207:c6:m0"} {"signature": "def _update_enter_leave(self):", "body": "if LocationEvent.lastevent is not None:last = LocationEvent.lasteventif last.inaxes!=self.inaxes:if last.inaxes is not None:last.canvas.callbacks.process('', last)if self.inaxes is not None:self.canvas.callbacks.process('', self)else:if self.inaxes is not None:self.canvas.callbacks.process('', self)LocationEvent.lastevent = self", "docstring": "process the figure/axes enter leave events", "id": "f17207:c6:m1"} {"signature": "def __init__(self, name, canvas, x, y, button=None, key=None,step=, guiEvent=None):", "body": "LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)self.button = buttonself.key = keyself.step = step", "docstring": "x, y in figure coords, 0,0 = bottom, left\nbutton pressed None, 1, 2, 3, 'up', 'down'", "id": "f17207:c7:m0"} {"signature": "def onRemove(self, ev):", "body": "def sort_artists(artists):L = [ (h.zorder, h) for h in artists ]L.sort()return [ h for zorder, h in L ]under = sort_artists(self.figure.hitlist(ev))h = Noneif under: h = under[-]while h:print(\"\",h)if h.remove():self.draw_idle()breakparent = Nonefor p in under:if h in p.get_children():parent = pbreakh = parent", "docstring": "Mouse event processor which removes the top artist\nunder the cursor. Connect this to the 'mouse_press_event'\nusing::\n\n canvas.mpl_connect('mouse_press_event',canvas.onRemove)", "id": "f17207:c10:m1"} {"signature": "def onHilite(self, ev):", "body": "if not hasattr(self,''): self._active = dict()under = self.figure.hitlist(ev)enter = [a for a in under if a not in self._active]leave = [a for a in self._active if a not in under]print(\"\",\"\".join([str(x) for x in under]))for a in leave:if hasattr(a,''):a.set_color(self._active[a])elif hasattr(a,''):a.set_edgecolor(self._active[a][])a.set_facecolor(self._active[a][])del self._active[a]for a in enter:if hasattr(a,''):self._active[a] = a.get_color()elif hasattr(a,''):self._active[a] = (a.get_edgecolor(),a.get_facecolor())else: self._active[a] = Nonefor a in enter:if hasattr(a,''):a.set_color('')elif hasattr(a,''):a.set_edgecolor('')a.set_facecolor('')else: self._active[a] = Noneself.draw_idle()", "docstring": "Mouse event processor which highlights the artists\nunder the cursor. Connect this to the 'motion_notify_event'\nusing::\n\n canvas.mpl_connect('motion_notify_event',canvas.onHilite)", "id": "f17207:c10:m2"} {"signature": "def blit(self, bbox=None):", "body": "pass", "docstring": "blit the canvas in bbox (default entire canvas)", "id": "f17207:c10:m4"} {"signature": "def resize(self, w, h):", "body": "pass", "docstring": "set the canvas size in pixels", "id": "f17207:c10:m5"} {"signature": "def draw_event(self, renderer):", "body": "s = ''event = DrawEvent(s, self, renderer)self.callbacks.process(s, event)", "docstring": "This method will be call all functions connected to the\n'draw_event' with a :class:`DrawEvent`", "id": "f17207:c10:m6"} {"signature": "def resize_event(self):", "body": "s = ''event = ResizeEvent(s, self)self.callbacks.process(s, event)", "docstring": "This method will be call all functions connected to the\n'resize_event' with a :class:`ResizeEvent`", "id": "f17207:c10:m7"} {"signature": "def key_press_event(self, key, guiEvent=None):", "body": "self._key = keys = ''event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)self.callbacks.process(s, event)", "docstring": "This method will be call all functions connected to the\n'key_press_event' with a :class:`KeyEvent`", "id": "f17207:c10:m8"} {"signature": "def key_release_event(self, key, guiEvent=None):", "body": "s = ''event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)self.callbacks.process(s, event)self._key = None", "docstring": "This method will be call all functions connected to the\n'key_release_event' with a :class:`KeyEvent`", "id": "f17207:c10:m9"} {"signature": "def pick_event(self, mouseevent, artist, **kwargs):", "body": "s = ''event = PickEvent(s, self, mouseevent, artist, **kwargs)self.callbacks.process(s, event)", "docstring": "This method will be called by artists who are picked and will\nfire off :class:`PickEvent` callbacks registered listeners", "id": "f17207:c10:m10"} {"signature": "def scroll_event(self, x, y, step, guiEvent=None):", "body": "if step >= :self._button = ''else:self._button = ''s = ''mouseevent = MouseEvent(s, self, x, y, self._button, self._key,step=step, guiEvent=guiEvent)self.callbacks.process(s, mouseevent)", "docstring": "Backend derived classes should call this function on any\nscroll wheel event. x,y are the canvas coords: 0,0 is lower,\nleft. button and key are as defined in MouseEvent.\n\nThis method will be call all functions connected to the\n'scroll_event' with a :class:`MouseEvent` instance.", "id": "f17207:c10:m11"} {"signature": "def button_press_event(self, x, y, button, guiEvent=None):", "body": "self._button = buttons = ''mouseevent = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)self.callbacks.process(s, mouseevent)", "docstring": "Backend derived classes should call this function on any mouse\nbutton press. x,y are the canvas coords: 0,0 is lower, left.\nbutton and key are as defined in :class:`MouseEvent`.\n\nThis method will be call all functions connected to the\n'button_press_event' with a :class:`MouseEvent` instance.", "id": "f17207:c10:m12"} {"signature": "def button_release_event(self, x, y, button, guiEvent=None):", "body": "s = ''event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)self.callbacks.process(s, event)self._button = None", "docstring": "Backend derived classes should call this function on any mouse\nbutton release.\n\n*x*\n the canvas coordinates where 0=left\n\n*y*\n the canvas coordinates where 0=bottom\n\n*guiEvent*\n the native UI event that generated the mpl event\n\n\nThis method will be call all functions connected to the\n'button_release_event' with a :class:`MouseEvent` instance.", "id": "f17207:c10:m13"} {"signature": "def motion_notify_event(self, x, y, guiEvent=None):", "body": "self._lastx, self._lasty = x, ys = ''event = MouseEvent(s, self, x, y, self._button, self._key,guiEvent=guiEvent)self.callbacks.process(s, event)", "docstring": "Backend derived classes should call this function on any\nmotion-notify-event.\n\n*x*\n the canvas coordinates where 0=left\n\n*y*\n the canvas coordinates where 0=bottom\n\n*guiEvent*\n the native UI event that generated the mpl event\n\n\nThis method will be call all functions connected to the\n'motion_notify_event' with a :class:`MouseEvent` instance.", "id": "f17207:c10:m14"} {"signature": "def leave_notify_event(self, guiEvent=None):", "body": "self.callbacks.process('', LocationEvent.lastevent)LocationEvent.lastevent = None", "docstring": "Backend derived classes should call this function when leaving\ncanvas\n\n*guiEvent*\n the native UI event that generated the mpl event", "id": "f17207:c10:m15"} {"signature": "def enter_notify_event(self, guiEvent=None):", "body": "event = Event('', self, guiEvent)self.callbacks.process('', event)", "docstring": "Backend derived classes should call this function when entering\ncanvas\n\n*guiEvent*\n the native UI event that generated the mpl event", "id": "f17207:c10:m16"} {"signature": "def idle_event(self, guiEvent=None):", "body": "s = ''event = IdleEvent(s, self, guiEvent=guiEvent)self.callbacks.process(s, event)", "docstring": "call when GUI is idle", "id": "f17207:c10:m17"} {"signature": "def draw(self, *args, **kwargs):", "body": "pass", "docstring": "Render the :class:`~matplotlib.figure.Figure`", "id": "f17207:c10:m18"} {"signature": "def draw_idle(self, *args, **kwargs):", "body": "self.draw(*args, **kwargs)", "docstring": ":meth:`draw` only if idle; defaults to draw but backends can overrride", "id": "f17207:c10:m19"} {"signature": "def draw_cursor(self, event):", "body": "pass", "docstring": "Draw a cursor in the event.axes if inaxes is not None. Use\nnative GUI drawing for efficiency if possible", "id": "f17207:c10:m20"} {"signature": "def get_width_height(self):", "body": "return int(self.figure.bbox.width), int(self.figure.bbox.height)", "docstring": "return the figure width and height in points or pixels\n(depending on the backend), truncated to integers", "id": "f17207:c10:m21"} {"signature": "def print_figure(self, filename, dpi=None, facecolor='', edgecolor='',orientation='', format=None, **kwargs):", "body": "if format is None:if cbook.is_string_like(filename):format = os.path.splitext(filename)[][:]if format is None or format == '':format = self.get_default_filetype()if cbook.is_string_like(filename):filename = filename.rstrip('') + '' + formatformat = format.lower()method_name = '' % formatif (format not in self.filetypes ornot hasattr(self, method_name)):formats = list(self.filetypes.keys())formats.sort()raise ValueError('''''' % (format, ''.join(formats)))if dpi is None:dpi = rcParams['']origDPI = self.figure.dpiorigfacecolor = self.figure.get_facecolor()origedgecolor = self.figure.get_edgecolor()self.figure.dpi = dpiself.figure.set_facecolor(facecolor)self.figure.set_edgecolor(edgecolor)try:result = getattr(self, method_name)(filename,dpi=dpi,facecolor=facecolor,edgecolor=edgecolor,orientation=orientation,**kwargs)finally:self.figure.dpi = origDPIself.figure.set_facecolor(origfacecolor)self.figure.set_edgecolor(origedgecolor)self.figure.set_canvas(self)return result", "docstring": "Render the figure to hardcopy. Set the figure patch face and edge\ncolors. This is useful because some of the GUIs have a gray figure\nface color background and you'll probably want to override this on\nhardcopy.\n\nArguments are:\n\n*filename*\n can also be a file object on image backends\n\n*orientation*\n only currently applies to PostScript printing.\n\n*dpi*\n the dots per inch to save the figure in; if None, use savefig.dpi\n\n*facecolor*\n the facecolor of the figure\n\n*edgecolor*\n the edgecolor of the figure\n\n*orientation* '\n landscape' | 'portrait' (not supported on all backends)\n\n*format*\n when set, forcibly set the file format to save to", "id": "f17207:c10:m32"} {"signature": "def set_window_title(self, title):", "body": "if hasattr(self, \"\"):self.manager.set_window_title(title)", "docstring": "Set the title text of the window containing the figure. Note that\nthis has no effect if there is no window (eg, a PS backend).", "id": "f17207:c10:m34"} {"signature": "def switch_backends(self, FigureCanvasClass):", "body": "newCanvas = FigureCanvasClass(self.figure)return newCanvas", "docstring": "instantiate an instance of FigureCanvasClass\n\nThis is used for backend switching, eg, to instantiate a\nFigureCanvasPS from a FigureCanvasGTK. Note, deep copying is\nnot done, so any changes to one of the instances (eg, setting\nfigure size or line props), will be reflected in the other", "id": "f17207:c10:m35"} {"signature": "def mpl_connect(self, s, func):", "body": "return self.callbacks.connect(s, func)", "docstring": "Connect event with string *s* to *func*. The signature of *func* is::\n\n def func(event)\n\nwhere event is a :class:`matplotlib.backend_bases.Event`. The\nfollowing events are recognized\n\n- 'button_press_event'\n- 'button_release_event'\n- 'draw_event'\n- 'key_press_event'\n- 'key_release_event'\n- 'motion_notify_event'\n- 'pick_event'\n- 'resize_event'\n- 'scroll_event'\n\nFor the location events (button and key press/release), if the\nmouse is over the axes, the variable ``event.inaxes`` will be\nset to the :class:`~matplotlib.axes.Axes` the event occurs is\nover, and additionally, the variables ``event.xdata`` and\n``event.ydata`` will be defined. This is the mouse location\nin data coords. See\n:class:`~matplotlib.backend_bases.KeyEvent` and\n:class:`~matplotlib.backend_bases.MouseEvent` for more info.\n\nReturn value is a connection id that can be used with\n:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.\n\nExample usage::\n\n def on_press(event):\n print 'you pressed', event.button, event.xdata, event.ydata\n\n cid = canvas.mpl_connect('button_press_event', on_press)", "id": "f17207:c10:m36"} {"signature": "def mpl_disconnect(self, cid):", "body": "return self.callbacks.disconnect(cid)", "docstring": "disconnect callback id cid\n\nExample usage::\n\n cid = canvas.mpl_connect('button_press_event', on_press)\n #...later\n canvas.mpl_disconnect(cid)", "id": "f17207:c10:m37"} {"signature": "def flush_events(self):", "body": "raise NotImplementedError", "docstring": "Flush the GUI events for the figure. Implemented only for\nbackends with GUIs.", "id": "f17207:c10:m38"} {"signature": "def start_event_loop(self,timeout):", "body": "raise NotImplementedError", "docstring": "Start an event loop. This is used to start a blocking event\nloop so that interactive functions, such as ginput and\nwaitforbuttonpress, can wait for events. This should not be\nconfused with the main GUI event loop, which is always running\nand has nothing to do with this.\n\nThis is implemented only for backends with GUIs.", "id": "f17207:c10:m39"} {"signature": "def stop_event_loop(self):", "body": "raise NotImplementedError", "docstring": "Stop an event loop. This is used to stop a blocking event\nloop so that interactive functions, such as ginput and\nwaitforbuttonpress, can wait for events.\n\nThis is implemented only for backends with GUIs.", "id": "f17207:c10:m40"} {"signature": "def start_event_loop_default(self,timeout=):", "body": "str = \"\"str += \"\"warnings.warn(str,DeprecationWarning)if timeout <= : timeout = np.inftimestep = counter = self._looping = Truewhile self._looping and counter*timestep < timeout:self.flush_events()time.sleep(timestep)counter += ", "docstring": "Start an event loop. This is used to start a blocking event\nloop so that interactive functions, such as ginput and\nwaitforbuttonpress, can wait for events. This should not be\nconfused with the main GUI event loop, which is always running\nand has nothing to do with this.\n\nThis function provides default event loop functionality based\non time.sleep that is meant to be used until event loop\nfunctions for each of the GUI backends can be written. As\nsuch, it throws a deprecated warning.\n\nCall signature::\n\n start_event_loop_default(self,timeout=0)\n\nThis call blocks until a callback function triggers\nstop_event_loop() or *timeout* is reached. If *timeout* is\n<=0, never timeout.", "id": "f17207:c10:m41"} {"signature": "def stop_event_loop_default(self):", "body": "self._looping = False", "docstring": "Stop an event loop. This is used to stop a blocking event\nloop so that interactive functions, such as ginput and\nwaitforbuttonpress, can wait for events.\n\nCall signature::\n\n stop_event_loop_default(self)", "id": "f17207:c10:m42"} {"signature": "def resize(self, w, h):", "body": "pass", "docstring": "For gui backends: resize window in pixels", "id": "f17207:c11:m3"} {"signature": "def show_popup(self, msg):", "body": "pass", "docstring": "Display message in a popup -- GUI only", "id": "f17207:c11:m5"} {"signature": "def set_window_title(self, title):", "body": "pass", "docstring": "Set the title text of the window containing the figure. Note that\nthis has no effect if there is no window (eg, a PS backend).", "id": "f17207:c11:m6"} {"signature": "def set_message(self, s):", "body": "pass", "docstring": "display a message on toolbar or in status bar", "id": "f17207:c13:m1"} {"signature": "def back(self, *args):", "body": "self._views.back()self._positions.back()self.set_history_buttons()self._update_view()", "docstring": "move back up the view lim stack", "id": "f17207:c13:m2"} {"signature": "def draw_rubberband(self, event, x0, y0, x1, y1):", "body": "pass", "docstring": "draw a rectangle rubberband to indicate zoom limits", "id": "f17207:c13:m4"} {"signature": "def forward(self, *args):", "body": "self._views.forward()self._positions.forward()self.set_history_buttons()self._update_view()", "docstring": "move forward in the view lim stack", "id": "f17207:c13:m5"} {"signature": "def home(self, *args):", "body": "self._views.home()self._positions.home()self.set_history_buttons()self._update_view()", "docstring": "restore the original view", "id": "f17207:c13:m6"} {"signature": "def _init_toolbar(self):", "body": "raise NotImplementedError", "docstring": "This is where you actually build the GUI widgets (called by\n__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,\n``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard\nacross backends (there are ppm versions in CVS also).\n\nYou just need to set the callbacks\n\nhome : self.home\nback : self.back\nforward : self.forward\nhand : self.pan\nzoom_to_rect : self.zoom\nfilesave : self.save_figure\n\nYou only need to define the last one - the others are in the base\nclass implementation.", "id": "f17207:c13:m7"} {"signature": "def pan(self,*args):", "body": "if self._active == '':self._active = Noneelse:self._active = ''if self._idPress is not None:self._idPress = self.canvas.mpl_disconnect(self._idPress)self.mode = ''if self._idRelease is not None:self._idRelease = self.canvas.mpl_disconnect(self._idRelease)self.mode = ''if self._active:self._idPress = self.canvas.mpl_connect('', self.press_pan)self._idRelease = self.canvas.mpl_connect('', self.release_pan)self.mode = ''self.canvas.widgetlock(self)else:self.canvas.widgetlock.release(self)for a in self.canvas.figure.get_axes():a.set_navigate_mode(self._active)self.set_message(self.mode)", "docstring": "Activate the pan/zoom tool. pan with left button, zoom with right", "id": "f17207:c13:m9"} {"signature": "def press(self, event):", "body": "pass", "docstring": "this will be called whenver a mouse button is pressed", "id": "f17207:c13:m10"} {"signature": "def press_pan(self, event):", "body": "if event.button == :self._button_pressed=elif event.button == :self._button_pressed=else:self._button_pressed=Nonereturnx, y = event.x, event.yif self._views.empty(): self.push_current()self._xypress=[]for i, a in enumerate(self.canvas.figure.get_axes()):if x is not None and y is not None and a.in_axes(event) and a.get_navigate():a.start_pan(x, y, event.button)self._xypress.append((a, i))self.canvas.mpl_disconnect(self._idDrag)self._idDrag=self.canvas.mpl_connect('', self.drag_pan)self.press(event)", "docstring": "the press mouse button in pan/zoom mode callback", "id": "f17207:c13:m11"} {"signature": "def press_zoom(self, event):", "body": "if event.button == :self._button_pressed=elif event.button == :self._button_pressed=else:self._button_pressed=Nonereturnx, y = event.x, event.yif self._views.empty(): self.push_current()self._xypress=[]for i, a in enumerate(self.canvas.figure.get_axes()):if x is not None and y is not None and a.in_axes(event)and a.get_navigate() and a.can_zoom():self._xypress.append(( x, y, a, i, a.viewLim.frozen(), a.transData.frozen()))self.press(event)", "docstring": "the press mouse button in zoom to rect mode callback", "id": "f17207:c13:m12"} {"signature": "def push_current(self):", "body": "lims = []; pos = []for a in self.canvas.figure.get_axes():xmin, xmax = a.get_xlim()ymin, ymax = a.get_ylim()lims.append( (xmin, xmax, ymin, ymax) )pos.append( (a.get_position(True).frozen(),a.get_position().frozen() ) )self._views.push(lims)self._positions.push(pos)self.set_history_buttons()", "docstring": "push the current view limits and position onto the stack", "id": "f17207:c13:m13"} {"signature": "def release(self, event):", "body": "pass", "docstring": "this will be called whenever mouse button is released", "id": "f17207:c13:m14"} {"signature": "def release_pan(self, event):", "body": "self.canvas.mpl_disconnect(self._idDrag)self._idDrag=self.canvas.mpl_connect('', self.mouse_move)for a, ind in self._xypress:a.end_pan()if not self._xypress: returnself._xypress = []self._button_pressed=Noneself.push_current()self.release(event)self.draw()", "docstring": "the release mouse button callback in pan/zoom mode", "id": "f17207:c13:m15"} {"signature": "def drag_pan(self, event):", "body": "for a, ind in self._xypress:a.drag_pan(self._button_pressed, event.key, event.x, event.y)self.dynamic_update()", "docstring": "the drag callback in pan/zoom mode", "id": "f17207:c13:m16"} {"signature": "def release_zoom(self, event):", "body": "if not self._xypress: returnlast_a = []for cur_xypress in self._xypress:x, y = event.x, event.ylastx, lasty, a, ind, lim, trans = cur_xypressif abs(x-lastx)< or abs(y-lasty)<:self._xypress = Noneself.release(event)self.draw()returnx0, y0, x1, y1 = lim.extentsinverse = a.transData.inverted()lastx, lasty = inverse.transform_point( (lastx, lasty) )x, y = inverse.transform_point( (x, y) )Xmin,Xmax=a.get_xlim()Ymin,Ymax=a.get_ylim()twinx, twiny = False, Falseif last_a:for la in last_a:if a.get_shared_x_axes().joined(a,la): twinx=Trueif a.get_shared_y_axes().joined(a,la): twiny=Truelast_a.append(a)if twinx:x0, x1 = Xmin, Xmaxelse:if Xmin < Xmax:if xelse: x0, x1 = lastx, xif x0 < Xmin: x0=Xminif x1 > Xmax: x1=Xmaxelse:if x>lastx: x0, x1 = x, lastxelse: x0, x1 = lastx, xif x0 > Xmin: x0=Xminif x1 < Xmax: x1=Xmaxif twiny:y0, y1 = Ymin, Ymaxelse:if Ymin < Ymax:if yelse: y0, y1 = lasty, yif y0 < Ymin: y0=Yminif y1 > Ymax: y1=Ymaxelse:if y>lasty: y0, y1 = y, lastyelse: y0, y1 = lasty, yif y0 > Ymin: y0=Yminif y1 < Ymax: y1=Ymaxif self._button_pressed == :a.set_xlim((x0, x1))a.set_ylim((y0, y1))elif self._button_pressed == :if a.get_xscale()=='':alpha=np.log(Xmax/Xmin)/np.log(x1/x0)rx1=pow(Xmin/x0,alpha)*Xminrx2=pow(Xmax/x0,alpha)*Xminelse:alpha=(Xmax-Xmin)/(x1-x0)rx1=alpha*(Xmin-x0)+Xminrx2=alpha*(Xmax-x0)+Xminif a.get_yscale()=='':alpha=np.log(Ymax/Ymin)/np.log(y1/y0)ry1=pow(Ymin/y0,alpha)*Yminry2=pow(Ymax/y0,alpha)*Yminelse:alpha=(Ymax-Ymin)/(y1-y0)ry1=alpha*(Ymin-y0)+Yminry2=alpha*(Ymax-y0)+Ymina.set_xlim((rx1, rx2))a.set_ylim((ry1, ry2))self.draw()self._xypress = Noneself._button_pressed = Noneself.push_current()self.release(event)", "docstring": "the release mouse button callback in zoom to rect mode", "id": "f17207:c13:m17"} {"signature": "def draw(self):", "body": "for a in self.canvas.figure.get_axes():xaxis = getattr(a, '', None)yaxis = getattr(a, '', None)locators = []if xaxis is not None:locators.append(xaxis.get_major_locator())locators.append(xaxis.get_minor_locator())if yaxis is not None:locators.append(yaxis.get_major_locator())locators.append(yaxis.get_minor_locator())for loc in locators:loc.refresh()self.canvas.draw()", "docstring": "redraw the canvases, update the locators", "id": "f17207:c13:m18"} {"signature": "def _update_view(self):", "body": "lims = self._views()if lims is None: returnpos = self._positions()if pos is None: returnfor i, a in enumerate(self.canvas.figure.get_axes()):xmin, xmax, ymin, ymax = lims[i]a.set_xlim((xmin, xmax))a.set_ylim((ymin, ymax))a.set_position( pos[i][], '' )a.set_position( pos[i][], '' )self.draw()", "docstring": "update the viewlim and position from the view and\n position stack for each axes", "id": "f17207:c13:m19"} {"signature": "def save_figure(self, *args):", "body": "raise NotImplementedError", "docstring": "save the current figure", "id": "f17207:c13:m20"} {"signature": "def set_cursor(self, cursor):", "body": "pass", "docstring": "Set the current cursor to one of the :class:`Cursors`\nenums values", "id": "f17207:c13:m21"} {"signature": "def update(self):", "body": "self._views.clear()self._positions.clear()self.set_history_buttons()", "docstring": "reset the axes stack", "id": "f17207:c13:m22"} {"signature": "def zoom(self, *args):", "body": "if self._active == '':self._active = Noneelse:self._active = ''if self._idPress is not None:self._idPress=self.canvas.mpl_disconnect(self._idPress)self.mode = ''if self._idRelease is not None:self._idRelease=self.canvas.mpl_disconnect(self._idRelease)self.mode = ''if self._active:self._idPress = self.canvas.mpl_connect('', self.press_zoom)self._idRelease = self.canvas.mpl_connect('', self.release_zoom)self.mode = ''self.canvas.widgetlock(self)else:self.canvas.widgetlock.release(self)for a in self.canvas.figure.get_axes():a.set_navigate_mode(self._active)self.set_message(self.mode)", "docstring": "activate zoom to rect mode", "id": "f17207:c13:m23"} {"signature": "def set_history_buttons(self):", "body": "pass", "docstring": "enable or disable back/forward button", "id": "f17207:c13:m24"} {"signature": "def _fix2comp(num):", "body": "assert <= num < **if num & **:return num - **else:return num", "docstring": "Convert from two's complement to negative.", "id": "f17208:m0"} {"signature": "def _mul2012(num1, num2):", "body": "return (num1*num2) >> ", "docstring": "Multiply two numbers in 20.12 fixed point format.", "id": "f17208:m1"} {"signature": "def find_tex_file(filename, format=None):", "body": "cmd = ['']if format is not None:cmd += ['' + format]cmd += [filename]matplotlib.verbose.report(''% (filename,cmd), '')pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)result = pipe.communicate()[].rstrip()matplotlib.verbose.report('' % result,'')return result", "docstring": "Call kpsewhich to find a file in the texmf tree.\nIf format is not None, it is used as the value for the --format option.\nSee the kpathsea documentation for more information.\n\nApparently most existing TeX distributions on Unix-like systems\nuse kpathsea. I hear MikTeX (a popular distribution on Windows)\ndoesn't use kpathsea, so what do we do? (TODO)", "id": "f17208:m2"} {"signature": "def __init__(self, filename, dpi):", "body": "matplotlib.verbose.report('' + filename, '')self.file = open(filename, '')self.dpi = dpiself.fonts = {}self.state = _dvistate.pre", "docstring": "Initialize the object. This takes the filename as input and\nopens the file; actually reading the file happens when\niterating through the pages of the file.", "id": "f17208:c0:m0"} {"signature": "def __iter__(self):", "body": "while True:have_page = self._read()if have_page:yield self._output()else:break", "docstring": "Iterate through the pages of the file.\n\nReturns (text, pages) pairs, where:\n text is a list of (x, y, fontnum, glyphnum, width) tuples\n boxes is a list of (x, y, height, width) tuples\n\nThe coordinates are transformed into a standard Cartesian\ncoordinate system at the dpi value given when initializing.\nThe coordinates are floating point numbers, but otherwise\nprecision is not lost and coordinate values are not clipped to\nintegers.", "id": "f17208:c0:m1"} {"signature": "def close(self):", "body": "if not self.file.closed:self.file.close()", "docstring": "Close the underlying file if it is open.", "id": "f17208:c0:m2"} {"signature": "def _output(self):", "body": "minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.infmaxy_pure = -np.inffor elt in self.text + self.boxes:if len(elt) == : x,y,h,w = elte = else: x,y,font,g,w = elth = _mul2012(font._scale, font._tfm.height[g])e = _mul2012(font._scale, font._tfm.depth[g])minx = min(minx, x)miny = min(miny, y - h)maxx = max(maxx, x + w)maxy = max(maxy, y + e)maxy_pure = max(maxy_pure, y)if self.dpi is None:return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,width=maxx-minx, height=maxy_pure-miny,descent=maxy-maxy_pure)d = self.dpi / ( * **) text = [ ((x-minx)*d, (maxy-y)*d, f, g, w*d)for (x,y,f,g,w) in self.text ]boxes = [ ((x-minx)*d, (maxy-y)*d, h*d, w*d) for (x,y,h,w) in self.boxes ]return mpl_cbook.Bunch(text=text, boxes=boxes,width=(maxx-minx)*d,height=(maxy_pure-miny)*d,descent=(maxy-maxy_pure)*d)", "docstring": "Output the text and boxes belonging to the most recent page.\npage = dvi._output()", "id": "f17208:c0:m3"} {"signature": "def _read(self):", "body": "while True:byte = ord(self.file.read())self._dispatch(byte)if self.state == _dvistate.inpage:matplotlib.verbose.report('' %(byte, self.h, self.v),'')if byte == : return Trueif self.state == _dvistate.post_post: self.close()return False", "docstring": "Read one page from the file. Return True if successful,\nFalse if there were no more pages.", "id": "f17208:c0:m4"} {"signature": "def _arg(self, nbytes, signed=False):", "body": "str = self.file.read(nbytes)value = ord(str[])if signed and value >= :value = value - for i in range(, nbytes):value = *value + ord(str[i])return value", "docstring": "Read and return an integer argument \"nbytes\" long.\nSignedness is determined by the \"signed\" keyword.", "id": "f17208:c0:m5"} {"signature": "def _dispatch(self, byte):", "body": "if <= byte <= : self._set_char(byte)elif byte == : self._set_char(self._arg())elif byte == : self._set_char(self._arg())elif byte == : self._set_char(self._arg())elif byte == : self._set_char(self._arg(, True))elif byte == : self._set_rule(self._arg(, True), self._arg(, True))elif byte == : self._put_char(self._arg())elif byte == : self._put_char(self._arg())elif byte == : self._put_char(self._arg())elif byte == : self._put_char(self._arg(, True))elif byte == : self._put_rule(self._arg(, True), self._arg(, True))elif byte == : self._nop()elif byte == : self._bop(*[self._arg(, True) for i in range()])elif byte == : self._eop()elif byte == : self._push()elif byte == : self._pop()elif byte == : self._right(self._arg(, True))elif byte == : self._right(self._arg(, True))elif byte == : self._right(self._arg(, True))elif byte == : self._right(self._arg(, True))elif byte == : self._right_w(None)elif byte == : self._right_w(self._arg(, True))elif byte == : self._right_w(self._arg(, True))elif byte == : self._right_w(self._arg(, True))elif byte == : self._right_w(self._arg(, True))elif byte == : self._right_x(None)elif byte == : self._right_x(self._arg(, True))elif byte == : self._right_x(self._arg(, True))elif byte == : self._right_x(self._arg(, True))elif byte == : self._right_x(self._arg(, True))elif byte == : self._down(self._arg(, True))elif byte == : self._down(self._arg(, True))elif byte == : self._down(self._arg(, True))elif byte == : self._down(self._arg(, True))elif byte == : self._down_y(None)elif byte == : self._down_y(self._arg(, True))elif byte == : self._down_y(self._arg(, True))elif byte == : self._down_y(self._arg(, True))elif byte == : self._down_y(self._arg(, True))elif byte == : self._down_z(None)elif byte == : self._down_z(self._arg(, True))elif byte == : self._down_z(self._arg(, True))elif byte == : self._down_z(self._arg(, True))elif byte == : self._down_z(self._arg(, True))elif <= byte <= : self._fnt_num(byte-)elif byte == : self._fnt_num(self._arg())elif byte == : self._fnt_num(self._arg())elif byte == : self._fnt_num(self._arg())elif byte == : self._fnt_num(self._arg(, True))elif <= byte <= :len = self._arg(byte-)special = self.file.read(len)self._xxx(special)elif <= byte <= :k = self._arg(byte-, byte==)c, s, d, a, l = [ self._arg(x) for x in (, , , , ) ]n = self.file.read(a+l)self._fnt_def(k, c, s, d, a, l, n)elif byte == :i, num, den, mag, k = [ self._arg(x) for x in (, , , , ) ]x = self.file.read(k)self._pre(i, num, den, mag, x)elif byte == : self._post()elif byte == : self._post_post()else:raise ValueError(\"\"%byte)", "docstring": "Based on the opcode \"byte\", read the correct kinds of\narguments from the dvi file and call the method implementing\nthat opcode with those arguments.", "id": "f17208:c0:m6"} {"signature": "def _width_of(self, char):", "body": "width = self._tfm.width.get(char, None)if width is not None:return _mul2012(width, self._scale)matplotlib.verbose.report('' % (char, self.texname),'')return ", "docstring": "Width of char in dvi units. For internal use by dviread.py.", "id": "f17208:c1:m3"} {"signature": "def _parse(self, file):", "body": "for line in file:line = line.strip()if line == '' or line.startswith(''):continuewords, pos = [], while pos < len(line):if line[pos] == '': pos += end = line.index('', pos)words.append(line[pos:end])pos = end + else: end = line.find('', pos+)if end == -: end = len(line)words.append(line[pos:end])pos = endwhile pos < len(line) and line[pos] == '':pos += self._register(words)", "docstring": "Parse each line into words.", "id": "f17208:c4:m2"} {"signature": "def _register(self, words):", "body": "texname, psname = words[:]effects, encoding, filename = [], None, Nonefor word in words[:]:if not word.startswith(''):effects.append(word)else:word = word.lstrip('')if word.startswith(''):assert encoding is Noneencoding = word[:]elif word.endswith(''):assert encoding is Noneencoding = wordelse:assert filename is Nonefilename = wordself._font[texname] = mpl_cbook.Bunch(texname=texname, psname=psname, effects=effects,encoding=encoding, filename=filename)", "docstring": "Register a font described by \"words\".\n\n The format is, AFAIK: texname fontname [effects and filenames]\n Effects are PostScript snippets like \".177 SlantFont\",\n filenames begin with one or two less-than signs. A filename\n ending in enc is an encoding file, other filenames are font\n files. This can be overridden with a left bracket: <[foobar\n indicates an encoding file named foobar.\n\n There is some difference between ', r'', r'', r'', r'', '', '', '')s = s[:-]for r in remove: s = s.replace(r,'')return s", "docstring": "remove latex formatting from mathtext", "id": "f17209:m1"} {"signature": "def unique(x):", "body": "return list(dict([ (val, ) for val in x]).keys())", "docstring": "Return a list of unique elements of *x*", "id": "f17209:m2"} {"signature": "def iterable(obj):", "body": "try: len(obj)except: return Falsereturn True", "docstring": "return true if *obj* is iterable", "id": "f17209:m3"} {"signature": "def is_string_like(obj):", "body": "if isinstance(obj, str): return Trueif ma.isMaskedArray(obj):if obj.ndim == and obj.dtype.kind in '':return Trueelse:return Falsetry: obj + ''except (TypeError, ValueError): return Falsereturn True", "docstring": "Return True if *obj* looks like a string", "id": "f17209:m4"} {"signature": "def is_sequence_of_strings(obj):", "body": "if not iterable(obj): return Falseif is_string_like(obj): return Falsefor o in obj:if not is_string_like(o): return Falsereturn True", "docstring": "Returns true if *obj* is iterable and contains strings", "id": "f17209:m5"} {"signature": "def is_writable_file_like(obj):", "body": "return hasattr(obj, '') and callable(obj.write)", "docstring": "return true if *obj* looks like a file object with a *write* method", "id": "f17209:m6"} {"signature": "def is_scalar(obj):", "body": "return not is_string_like(obj) and not iterable(obj)", "docstring": "return true if *obj* is not string like and is not iterable", "id": "f17209:m7"} {"signature": "def is_numlike(obj):", "body": "try: obj+except TypeError: return Falseelse: return True", "docstring": "return true if *obj* looks like a number", "id": "f17209:m8"} {"signature": "def to_filehandle(fname, flag='', return_opened=False):", "body": "if is_string_like(fname):if fname.endswith(''):import gzipfh = gzip.open(fname, flag)else:fh = file(fname, flag)opened = Trueelif hasattr(fname, ''):fh = fnameopened = Falseelse:raise ValueError('')if return_opened:return fh, openedreturn fh", "docstring": "*fname* can be a filename or a file handle. Support for gzipped\nfiles is automatic, if the filename ends in .gz. *flag* is a\nread/write flag for :func:`file`", "id": "f17209:m9"} {"signature": "def flatten(seq, scalarp=is_scalar_or_string):", "body": "for item in seq:if scalarp(item): yield itemelse:for subitem in flatten(item, scalarp):yield subitem", "docstring": "this generator flattens nested containers such as\n\n>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])\n\nso that\n\n>>> for i in flatten(l): print i,\nJohn Hunter 1 23 42 5 23\n\nBy: Composite of Holger Krekel and Luther Blissett\nFrom: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294\nand Recipe 1.12 in cookbook", "id": "f17209:m11"} {"signature": "def soundex(name, len=):", "body": "soundex_digits = ''sndx = ''fc = ''for c in name.upper():if c.isalpha():if not fc: fc = c d = soundex_digits[ord(c)-ord('')]if not sndx or (d != sndx[-]):sndx += dsndx = fc + sndx[:]sndx = sndx.replace('', '')return (sndx + (len * ''))[:len]", "docstring": "soundex module conforming to Odell-Russell algorithm", "id": "f17209:m12"} {"signature": "def mkdirs(newdir, mode=):", "body": "try:if not os.path.exists(newdir):parts = os.path.split(newdir)for i in range(, len(parts)+):thispart = os.path.join(*parts[:i])if not os.path.exists(thispart):os.makedirs(thispart, mode)except OSError as err:if err.errno != errno.EEXIST or not os.path.isdir(newdir):raise", "docstring": "make directory *newdir* recursively, and set *mode*. Equivalent to ::\n\n > mkdir -p NEWDIR\n > chmod MODE NEWDIR", "id": "f17209:m13"} {"signature": "def dict_delall(d, keys):", "body": "for key in keys:try: del d[key]except KeyError: pass", "docstring": "delete all of the *keys* from the :class:`dict` *d*", "id": "f17209:m14"} {"signature": "def get_split_ind(seq, N):", "body": "sLen = for (word, ind) in zip(seq, list(range(len(seq)))):sLen += len(word) + if sLen>=N: return indreturn len(seq)", "docstring": "*seq* is a list of words. Return the index into seq such that::\n\n len(' '.join(seq[:ind])<=N", "id": "f17209:m15"} {"signature": "def wrap(prefix, text, cols):", "body": "pad = ''*len(prefix.expandtabs())available = cols - len(pad)seq = text.split('')Nseq = len(seq)ind = lines = []while indlastInd = indind += get_split_ind(seq[ind:], available)lines.append(seq[lastInd:ind])ret = prefix + ''.join(lines[]) + ''for line in lines[:]:ret += pad + ''.join(line) + ''return ret", "docstring": "wrap *text* with *prefix* at length *cols*", "id": "f17209:m16"} {"signature": "def dedent(s):", "body": "if not s: return ''match = _find_dedent_regex.match(s)if match is None:return snshift = match.end() - match.start()if nshift == :return sunindent = _dedent_regex.get(nshift, None)if unindent is None:unindent = re.compile(\"\" % nshift)_dedent_regex[nshift] = unindentresult = unindent.sub(\"\", s).strip()return result", "docstring": "Remove excess indentation from docstring *s*.\n\nDiscards any leading blank lines, then removes up to n whitespace\ncharacters from each line, where n is the number of leading\nwhitespace characters in the first line. It differs from\ntextwrap.dedent in its deletion of leading blank lines and its use\nof the first non-blank line to determine the indentation.\n\nIt is also faster in most cases.", "id": "f17209:m17"} {"signature": "def listFiles(root, patterns='', recurse=, return_folders=):", "body": "import os.path, fnmatchpattern_list = patterns.split('')class Bunch:def __init__(self, **kwds): self.__dict__.update(kwds)arg = Bunch(recurse=recurse, pattern_list=pattern_list,return_folders=return_folders, results=[])def visit(arg, dirname, files):for name in files:fullname = os.path.normpath(os.path.join(dirname, name))if arg.return_folders or os.path.isfile(fullname):for pattern in arg.pattern_list:if fnmatch.fnmatch(name, pattern):arg.results.append(fullname)breakif not arg.recurse: files[:]=[]os.path.walk(root, visit, arg)return arg.results", "docstring": "Recursively list files\n\nfrom Parmar and Martelli in the Python Cookbook", "id": "f17209:m18"} {"signature": "def get_recursive_filelist(args):", "body": "files = []for arg in args:if os.path.isfile(arg):files.append(arg)continueif os.path.isdir(arg):newfiles = listFiles(arg, recurse=, return_folders=)files.extend(newfiles)return [f for f in files if not os.path.islink(f)]", "docstring": "Recurs all the files and dirs in *args* ignoring symbolic links\nand return the files as a list of strings", "id": "f17209:m19"} {"signature": "def pieces(seq, num=):", "body": "start = while :item = seq[start:start+num]if not len(item): breakyield itemstart += num", "docstring": "Break up the *seq* into *num* tuples", "id": "f17209:m20"} {"signature": "def allequal(seq):", "body": "if len(seq)<: return Trueval = seq[]for i in range(, len(seq)):thisval = seq[i]if thisval != val: return Falsereturn True", "docstring": "Return *True* if all elements of *seq* compare equal. If *seq* is\n0 or 1 length, return *True*", "id": "f17209:m22"} {"signature": "def alltrue(seq):", "body": "if not len(seq): return Falsefor val in seq:if not val: return Falsereturn True", "docstring": "Return *True* if all elements of *seq* evaluate to *True*. If\n*seq* is empty, return *False*.", "id": "f17209:m23"} {"signature": "def onetrue(seq):", "body": "if not len(seq): return Falsefor val in seq:if val: return Truereturn False", "docstring": "Return *True* if one element of *seq* is *True*. It *seq* is\nempty, return *False*.", "id": "f17209:m24"} {"signature": "def allpairs(x):", "body": "return [ (s, f) for i, f in enumerate(x) for s in x[i+:] ]", "docstring": "return all possible pairs in sequence *x*\n\nCondensed by Alex Martelli from this thread_ on c.l.python\n\n.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1", "id": "f17209:m25"} {"signature": "def popd(d, *args):", "body": "warnings.warn(\"\", DeprecationWarning)if len(args)==:key = args[]val = d[key]del d[key]elif len(args)==:key, default = argsval = d.get(key, default)try: del d[key]except KeyError: passreturn val", "docstring": "Should behave like python2.3 :meth:`dict.pop` method; *d* is a\n:class:`dict`::\n\n # returns value for key and deletes item; raises a KeyError if key\n # is not in dict\n val = popd(d, key)\n\n # returns value for key if key exists, else default. Delete key,\n # val item if it exists. Will not raise a KeyError\n val = popd(d, key, default)", "id": "f17209:m26"} {"signature": "def popall(seq):", "body": "for i in range(len(seq)): seq.pop()", "docstring": "empty a list", "id": "f17209:m27"} {"signature": "def finddir(o, match, case=False):", "body": "if case:names = [(name,name) for name in dir(o) if is_string_like(name)]else:names = [(name.lower(), name) for name in dir(o) if is_string_like(name)]match = match.lower()return [orig for name, orig in names if name.find(match)>=]", "docstring": "return all attributes of *o* which match string in match. if case\nis True require an exact case match.", "id": "f17209:m28"} {"signature": "def reverse_dict(d):", "body": "return dict([(v,k) for k,v in list(d.items())])", "docstring": "reverse the dictionary -- may lose data if values are not unique!", "id": "f17209:m29"} {"signature": "def report_memory(i=): ", "body": "pid = os.getpid()if sys.platform=='':a2 = os.popen('' % pid).readlines()mem = int(a2[-].strip())elif sys.platform.startswith(''):a2 = os.popen('' % pid).readlines()mem = int(a2[].split()[])elif sys.platform.startswith(''):a2 = os.popen('' % pid).readlines()mem = int(a2[].split()[])return mem", "docstring": "return the memory consumed by process", "id": "f17209:m30"} {"signature": "def safezip(*args):", "body": "Nx = len(args[])for i, arg in enumerate(args[:]):if len(arg) != Nx:raise ValueError(_safezip_msg % (Nx, i+, len(arg)))return list(zip(*args))", "docstring": "make sure *args* are equal len before zipping", "id": "f17209:m31"} {"signature": "def issubclass_safe(x, klass):", "body": "try:return issubclass(x, klass)except TypeError:return False", "docstring": "return issubclass(x, klass) and return False on a TypeError", "id": "f17209:m32"} {"signature": "def print_cycles(objects, outstream=sys.stdout, show_progress=False):", "body": "import gcfrom types import FrameTypedef print_path(path):for i, step in enumerate(path):next = path[(i + ) % len(path)]outstream.write(\"\" % str(type(step)))if isinstance(step, dict):for key, val in list(step.items()):if val is next:outstream.write(\"\" % repr(key))breakif key is next:outstream.write(\"\" % repr(val))breakelif isinstance(step, list):outstream.write(\"\" % step.index(next))elif isinstance(step, tuple):outstream.write(\"\")else:outstream.write(repr(step))outstream.write(\"\")outstream.write(\"\")def recurse(obj, start, all, current_path):if show_progress:outstream.write(\"\" % len(all))all[id(obj)] = Nonereferents = gc.get_referents(obj)for referent in referents:if referent is start:print_path(current_path)elif referent is objects or isinstance(referent, FrameType):continueelif id(referent) not in all:recurse(referent, start, all, current_path + [obj])for obj in objects:outstream.write(\"\" % (obj,))recurse(obj, obj, { }, [])", "docstring": "*objects*\n A list of objects to find cycles in. It is often useful to\n pass in gc.garbage to find the cycles that are preventing some\n objects from being garbage collected.\n\n*outstream*\n The stream for output.\n\n*show_progress*\n If True, print the number of objects reached as they are found.", "id": "f17209:m33"} {"signature": "def delete_masked_points(*args):", "body": "if not len(args):return ()if (is_string_like(args[]) or not iterable(args[])):raise ValueError(\"\")nrecs = len(args[])margs = []seqlist = [False] * len(args)for i, x in enumerate(args):if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:seqlist[i] = Trueif ma.isMA(x):if x.ndim > :raise ValueError(\"\")else:x = np.asarray(x)margs.append(x)masks = [] for i, x in enumerate(margs):if seqlist[i]:if x.ndim > :continue if ma.isMA(x):masks.append(~ma.getmaskarray(x)) xd = x.dataelse:xd = xtry:mask = np.isfinite(xd)if isinstance(mask, np.ndarray):masks.append(mask)except: passif len(masks):mask = reduce(np.logical_and, masks)igood = mask.nonzero()[]if len(igood) < nrecs:for i, x in enumerate(margs):if seqlist[i]:margs[i] = x.take(igood, axis=)for i, x in enumerate(margs):if seqlist[i] and ma.isMA(x):margs[i] = x.filled()return margs", "docstring": "Find all masked and/or non-finite points in a set of arguments,\nand return the arguments with only the unmasked points remaining.\n\nArguments can be in any of 5 categories:\n\n1) 1-D masked arrays\n2) 1-D ndarrays\n3) ndarrays with more than one dimension\n4) other non-string iterables\n5) anything else\n\nThe first argument must be in one of the first four categories;\nany argument with a length differing from that of the first\nargument (and hence anything in category 5) then will be\npassed through unchanged.\n\nMasks are obtained from all arguments of the correct length\nin categories 1, 2, and 4; a point is bad if masked in a masked\narray or if it is a nan or inf. No attempt is made to\nextract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`\ndoes not yield a Boolean array.\n\nAll input arguments that are not passed unchanged are returned\nas ndarrays after removing the points or rows corresponding to\nmasks in any of the arguments.\n\nA vastly simpler version of this function was originally\nwritten as a helper for Axes.scatter().", "id": "f17209:m36"} {"signature": "def unmasked_index_ranges(mask, compressed = True):", "body": "mask = mask.reshape(mask.size)m = np.concatenate(((,), mask, (,)))indices = np.arange(len(mask) + )mdif = m[:] - m[:-]i0 = np.compress(mdif == -, indices)i1 = np.compress(mdif == , indices)assert len(i0) == len(i1)if len(i1) == :return None if not compressed:return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=)seglengths = i1 - i0breakpoints = np.cumsum(seglengths)ic0 = np.concatenate(((,), breakpoints[:-]))ic1 = breakpointsreturn np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=)", "docstring": "Find index ranges where *mask* is *False*.\n\n*mask* will be flattened if it is not already 1-D.\n\nReturns Nx2 :class:`numpy.ndarray` with each row the start and stop\nindices for slices of the compressed :class:`numpy.ndarray`\ncorresponding to each of *N* uninterrupted runs of unmasked\nvalues. If optional argument *compressed* is *False*, it returns\nthe start and stop indices into the original :class:`numpy.ndarray`,\nnot the compressed :class:`numpy.ndarray`. Returns *None* if there\nare no unmasked values.\n\nExample::\n\n y = ma.array(np.arange(5), mask = [0,0,1,0,0])\n ii = unmasked_index_ranges(ma.getmaskarray(y))\n # returns array [[0,2,] [2,4,]]\n\n y.compressed()[ii[1,0]:ii[1,1]]\n # returns array [3,4,]\n\n ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)\n # returns array [[0, 2], [3, 5]]\n\n y.filled()[ii[1,0]:ii[1,1]]\n # returns array [3,4,]\n\nPrior to the transforms refactoring, this was used to support\nmasked arrays in Line2D.", "id": "f17209:m37"} {"signature": "def less_simple_linear_interpolation( x, y, xi, extrap=False ):", "body": "warnings.warn('', DeprecationWarning)import matplotlib.mlab as mlabreturn mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m38"} {"signature": "def isvector(X):", "body": "warnings.warn('', DeprecationWarning)import matplotlib.mlab as mlabreturn mlab.isvector( x, y, xi, extrap=extrap )", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m39"} {"signature": "def vector_lengths( X, P=, axis=None ):", "body": "warnings.warn('', DeprecationWarning)import matplotlib.mlab as mlabreturn mlab.vector_lengths( X, P=, axis=axis )", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m40"} {"signature": "def distances_along_curve( X ):", "body": "warnings.warn('', DeprecationWarning)import matplotlib.mlab as mlabreturn mlab.distances_along_curve( X )", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m41"} {"signature": "def path_length(X):", "body": "warnings.warn('', DeprecationWarning)import matplotlib.mlab as mlabreturn mlab.path_length(X)", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m42"} {"signature": "def is_closed_polygon(X):", "body": "warnings.warn('', DeprecationWarning)import matplotlib.mlab as mlabreturn mlab.is_closed_polygon(X)", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m43"} {"signature": "def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):", "body": "warnings.warn('', DeprecationWarning)import matplotlib.mlab as mlabreturn mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m44"} {"signature": "def __init__(self, fmt='', missing='', missingval=None):", "body": "converter.__init__(self, missing, missingval)self.fmt = fmt", "docstring": "use a :func:`time.strptime` format string for conversion", "id": "f17209:c2:m0"} {"signature": "def __init__(self, fmt='', missing='', missingval=None):", "body": "converter.__init__(self, missing, missingval)self.fmt = fmt", "docstring": "use a :func:`time.strptime` format string for conversion", "id": "f17209:c3:m0"} {"signature": "def __init__(self, signals):", "body": "self.signals = set(signals)self.callbacks = dict([(s, dict()) for s in signals])self._cid = ", "docstring": "*signals* is a sequence of valid signals", "id": "f17209:c6:m0"} {"signature": "def _check_signal(self, s):", "body": "if s not in self.signals:signals = list(self.signals)signals.sort()raise ValueError(''%(s, signals))", "docstring": "make sure *s* is a valid signal or raise a ValueError", "id": "f17209:c6:m1"} {"signature": "def connect(self, s, func):", "body": "self._check_signal(s)self._cid +=self.callbacks[s][self._cid] = funcreturn self._cid", "docstring": "register *func* to be called when a signal *s* is generated\nfunc will be called", "id": "f17209:c6:m2"} {"signature": "def disconnect(self, cid):", "body": "for eventname, callbackd in list(self.callbacks.items()):try: del callbackd[cid]except KeyError: continueelse: return", "docstring": "disconnect the callback registered with callback id *cid*", "id": "f17209:c6:m3"} {"signature": "def process(self, s, *args, **kwargs):", "body": "self._check_signal(s)for func in list(self.callbacks[s].values()):func(*args, **kwargs)", "docstring": "process signal *s*. All of the functions registered to receive\ncallbacks on *s* will be called with *\\*args* and *\\*\\*kwargs*", "id": "f17209:c6:m4"} {"signature": "def _make_regex(self):", "body": "return re.compile(\"\".join(map(re.escape, list(self.keys()))))", "docstring": "Build re object based on the keys of the current dictionary", "id": "f17209:c13:m0"} {"signature": "def __call__(self, match):", "body": "return self[match.group()]", "docstring": "Handler invoked for each regex *match*", "id": "f17209:c13:m1"} {"signature": "def xlat(self, text):", "body": "return self._make_regex().sub(self, text)", "docstring": "Translate *text*, returns the modified text.", "id": "f17209:c13:m2"} {"signature": "def append(self,x):", "body": "self.data.append(x)if len(self.data) == self.max:self.cur = self.__class__ = __Full", "docstring": "append an element at the end of the buffer", "id": "f17209:c16:m1"} {"signature": "def get(self):", "body": "return self.data", "docstring": "Return a list of elements from the oldest to the newest.", "id": "f17209:c16:m2"} {"signature": "def __call__(self):", "body": "if not len(self._elements): return self._defaultelse: return self._elements[self._pos]", "docstring": "return the current element, or None", "id": "f17209:c18:m1"} {"signature": "def forward(self):", "body": "N = len(self._elements)if self._pos: self._pos += return self()", "docstring": "move the position forward and return the current element", "id": "f17209:c18:m2"} {"signature": "def back(self):", "body": "if self._pos>: self._pos -= return self()", "docstring": "move the position back and return the current element", "id": "f17209:c18:m3"} {"signature": "def push(self, o):", "body": "self._elements = self._elements[:self._pos+]self._elements.append(o)self._pos = len(self._elements)-return self()", "docstring": "push object onto stack at current position - all elements\noccurring later than the current position are discarded", "id": "f17209:c18:m4"} {"signature": "def home(self):", "body": "if not len(self._elements): returnself.push(self._elements[])return self()", "docstring": "push the first element onto the top of the stack", "id": "f17209:c18:m5"} {"signature": "def clear(self):", "body": "self._pos = -self._elements = []", "docstring": "empty the stack", "id": "f17209:c18:m7"} {"signature": "def bubble(self, o):", "body": "if o not in self._elements:raise ValueError('')old = self._elements[:]self.clear()bubbles = []for thiso in old:if thiso==o: bubbles.append(thiso)else: self.push(thiso)for thiso in bubbles:self.push(o)return o", "docstring": "raise *o* to the top of the stack and return *o*. *o* must be\nin the stack", "id": "f17209:c18:m8"} {"signature": "def remove(self, o):", "body": "if o not in self._elements:raise ValueError('')old = self._elements[:]self.clear()for thiso in old:if thiso==o: continueelse: self.push(thiso)", "docstring": "remove element *o* from the stack", "id": "f17209:c18:m9"} {"signature": "def clean(self):", "body": "mapping = self._mappingfor key, val in list(mapping.items()):if key() is None:del mapping[key]val.remove(key)", "docstring": "Clean dead weak references from the dictionary", "id": "f17209:c20:m2"} {"signature": "def join(self, a, *args):", "body": "mapping = self._mappingset_a = mapping.setdefault(ref(a), [ref(a)])for arg in args:set_b = mapping.get(ref(arg))if set_b is None:set_a.append(ref(arg))mapping[ref(arg)] = set_aelif set_b is not set_a:if len(set_b) > len(set_a):set_a, set_b = set_b, set_aset_a.extend(set_b)for elem in set_b:mapping[elem] = set_aself.clean()", "docstring": "Join given arguments into the same set. Accepts one or more\narguments.", "id": "f17209:c20:m3"} {"signature": "def joined(self, a, b):", "body": "self.clean()mapping = self._mappingtry:return mapping[ref(a)] is mapping[ref(b)]except KeyError:return False", "docstring": "Returns True if *a* and *b* are members of the same set.", "id": "f17209:c20:m4"} {"signature": "def __iter__(self):", "body": "self.clean()class Token: passtoken = Token()for group in self._mapping.values():if not group[-] is token:yield [x() for x in group]group.append(token)for group in self._mapping.values():if group[-] is token:del group[-]", "docstring": "Iterate over each of the disjoint sets as a list.\n\nThe iterator is invalid if interleaved with calls to join().", "id": "f17209:c20:m5"} {"signature": "def get_siblings(self, a):", "body": "self.clean()siblings = self._mapping.get(ref(a), [ref(a)])return [x() for x in siblings]", "docstring": "Returns all of the items joined with *a*, including itself.", "id": "f17209:c20:m6"} {"signature": "def figaspect(arg):", "body": "isarray = hasattr(arg, '')figsize_min = np.array((,)) figsize_max = np.array((,)) if isarray:nr,nc = arg.shape[:]arr_ratio = float(nr)/ncelse:arr_ratio = float(arg)fig_height = rcParams[''][]newsize = np.array((fig_height/arr_ratio,fig_height))newsize /= min(,*(newsize/figsize_min))newsize /= max(,*(newsize/figsize_max))newsize = np.clip(newsize,figsize_min,figsize_max)return newsize", "docstring": "Create a figure with specified aspect ratio. If *arg* is a number,\nuse that aspect ratio. If *arg* is an array, figaspect will\ndetermine the width and height for a figure that would fit array\npreserving aspect ratio. The figure width, height in inches are\nreturned. Be sure to create an axes with equal with and height,\neg\n\nExample usage::\n\n # make a figure twice as tall as it is wide\n w, h = figaspect(2.)\n fig = Figure(figsize=(w,h))\n ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n ax.imshow(A, **kwargs)\n\n\n # make a figure with the proper aspect for an array\n A = rand(5,3)\n w, h = figaspect(A)\n fig = Figure(figsize=(w,h))\n ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n ax.imshow(A, **kwargs)\n\nThanks to Fernando Perez for this function", "id": "f17210:m0"} {"signature": "def __init__(self, left=None, bottom=None, right=None, top=None,wspace=None, hspace=None):", "body": "self.validate = Trueself.update(left, bottom, right, top, wspace, hspace)", "docstring": "All dimensions are fraction of the figure width or height.\nAll values default to their rc params\n\nThe following attributes are available\n\n*left* = 0.125\n the left side of the subplots of the figure\n*right* = 0.9\n the right side of the subplots of the figure\n*bottom* = 0.1\n the bottom of the subplots of the figure\n*top* = 0.9\n the top of the subplots of the figure\n*wspace* = 0.2\n the amount of width reserved for blank space between subplots\n*hspace* = 0.2\n the amount of height reserved for white space between subplots\n*validate*\n make sure the params are in a legal state (*left*<*right*, etc)", "id": "f17210:c0:m0"} {"signature": "def update(self,left=None, bottom=None, right=None, top=None,wspace=None, hspace=None):", "body": "thisleft = getattr(self, '', None)thisright = getattr(self, '', None)thistop = getattr(self, '', None)thisbottom = getattr(self, '', None)thiswspace = getattr(self, '', None)thishspace = getattr(self, '', None)self._update_this('', left)self._update_this('', right)self._update_this('', bottom)self._update_this('', top)self._update_this('', wspace)self._update_this('', hspace)def reset():self.left = thisleftself.right = thisrightself.top = thistopself.bottom = thisbottomself.wspace = thiswspaceself.hspace = thishspaceif self.validate:if self.left>=self.right:reset()raise ValueError('')if self.bottom>=self.top:reset()raise ValueError('')", "docstring": "Update the current values. If any kwarg is None, default to\nthe current value, if set, otherwise to rc", "id": "f17210:c0:m1"} {"signature": "def __init__(self,figsize = None, dpi = None, facecolor = None, edgecolor = None, linewidth = , frameon = True, subplotpars = None, ):", "body": "Artist.__init__(self)self.callbacks = cbook.CallbackRegistry(('', ))if figsize is None : figsize = rcParams['']if dpi is None : dpi = rcParams['']if facecolor is None: facecolor = rcParams['']if edgecolor is None: edgecolor = rcParams['']self.dpi_scale_trans = Affine2D()self.dpi = dpiself.bbox_inches = Bbox.from_bounds(, , *figsize)self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)self.frameon = frameonself.transFigure = BboxTransformTo(self.bbox)self.patch = self.figurePatch = Rectangle(xy=(,), width=, height=,facecolor=facecolor, edgecolor=edgecolor,linewidth=linewidth,)self._set_artist_props(self.patch)self._hold = rcParams['']self.canvas = Noneif subplotpars is None:subplotpars = SubplotParams()self.subplotpars = subplotparsself._axstack = Stack() self.axes = []self.clf()self._cachedRenderer = None", "docstring": "*figsize*\n w,h tuple in inches\n*dpi*\n dots per inch\n*facecolor*\n the figure patch facecolor; defaults to rc ``figure.facecolor``\n*edgecolor*\n the figure patch edge color; defaults to rc ``figure.edgecolor``\n*linewidth*\n the figure patch edge linewidth; the default linewidth of the frame\n*frameon*\n if False, suppress drawing the figure frame\n*subplotpars*\n a :class:`SubplotParams` instance, defaults to rc", "id": "f17210:c1:m1"} {"signature": "def autofmt_xdate(self, bottom=, rotation=, ha=''):", "body": "allsubplots = np.alltrue([hasattr(ax, '') for ax in self.axes])if len(self.axes)==:for label in ax.get_xticklabels():label.set_ha(ha)label.set_rotation(rotation)else:if allsubplots:for ax in self.get_axes():if ax.is_last_row():for label in ax.get_xticklabels():label.set_ha(ha)label.set_rotation(rotation)else:for label in ax.get_xticklabels():label.set_visible(False)ax.set_xlabel('')if allsubplots:self.subplots_adjust(bottom=bottom)", "docstring": "Date ticklabels often overlap, so it is useful to rotate them\nand right align them. Also, a common use case is a number of\nsubplots with shared xaxes where the x-axis is date data. The\nticklabels are often long, and it helps to rotate them on the\nbottom subplot and turn them off on other subplots, as well as\nturn off xlabels.\n\n*bottom*\n the bottom of the subplots for :meth:`subplots_adjust`\n*rotation*\n the rotation of the xtick labels\n*ha*\n the horizontal alignment of the xticklabels", "id": "f17210:c1:m4"} {"signature": "def get_children(self):", "body": "children = [self.patch]children.extend(self.artists)children.extend(self.axes)children.extend(self.lines)children.extend(self.patches)children.extend(self.texts)children.extend(self.images)children.extend(self.legends)return children", "docstring": "get a list of artists contained in the figure", "id": "f17210:c1:m5"} {"signature": "def contains(self, mouseevent):", "body": "if callable(self._contains): return self._contains(self,mouseevent)inside = self.bbox.contains(mouseevent.x,mouseevent.y)return inside,{}", "docstring": "Test whether the mouse event occurred on the figure.\n\nReturns True,{}", "id": "f17210:c1:m6"} {"signature": "def get_window_extent(self, *args, **kwargs):", "body": "return self.bbox", "docstring": "get the figure bounding box in display space; kwargs are void", "id": "f17210:c1:m7"} {"signature": "def suptitle(self, t, **kwargs):", "body": "x = kwargs.pop('', )y = kwargs.pop('', )if ('' not in kwargs) and ('' not in kwargs):kwargs[''] = ''if ('' not in kwargs) and ('' not in kwargs):kwargs[''] = ''t = self.text(x, y, t, **kwargs)return t", "docstring": "Add a centered title to the figure.\n\nkwargs are :class:`matplotlib.text.Text` properties. Using figure\ncoordinates, the defaults are:\n\n - *x* = 0.5\n the x location of text in figure coords\n\n - *y* = 0.98\n the y location of the text in figure coords\n\n - *horizontalalignment* = 'center'\n the horizontal alignment of the text\n\n - *verticalalignment* = 'top'\n the vertical alignment of the text\n\nA :class:`matplotlib.text.Text` instance is returned.\n\nExample::\n\n fig.subtitle('this is the figure title', fontsize=12)", "id": "f17210:c1:m8"} {"signature": "def set_canvas(self, canvas):", "body": "self.canvas = canvas", "docstring": "Set the canvas the contains the figure\n\nACCEPTS: a FigureCanvas instance", "id": "f17210:c1:m9"} {"signature": "def hold(self, b=None):", "body": "if b is None: self._hold = not self._holdelse: self._hold = b", "docstring": "Set the hold state. If hold is None (default), toggle the\nhold state. Else set the hold state to boolean value b.\n\nEg::\n\n hold() # toggle hold\n hold(True) # hold is on\n hold(False) # hold is off", "id": "f17210:c1:m10"} {"signature": "def figimage(self, X,xo=,yo=,alpha=,norm=None,cmap=None,vmin=None,vmax=None,origin=None):", "body": "if not self._hold: self.clf()im = FigureImage(self, cmap, norm, xo, yo, origin)im.set_array(X)im.set_alpha(alpha)if norm is None:im.set_clim(vmin, vmax)self.images.append(im)return im", "docstring": "call signatures::\n\n figimage(X, **kwargs)\n\nadds a non-resampled array *X* to the figure.\n\n::\n\n figimage(X, xo, yo)\n\nwith pixel offsets *xo*, *yo*,\n\n*X* must be a float array:\n\n* If *X* is MxN, assume luminance (grayscale)\n* If *X* is MxNx3, assume RGB\n* If *X* is MxNx4, assume RGBA\n\nOptional keyword arguments:\n\n ========= ==========================================================\n Keyword Description\n ========= ==========================================================\n xo or yo An integer, the *x* and *y* image offset in pixels\n cmap a :class:`matplotlib.cm.ColorMap` instance, eg cm.jet.\n If None, default to the rc ``image.cmap`` value\n norm a :class:`matplotlib.colors.Normalize` instance. The\n default is normalization(). This scales luminance -> 0-1\n vmin|vmax are used to scale a luminance image to 0-1. If either is\n None, the min and max of the luminance values will be\n used. Note if you pass a norm instance, the settings for\n *vmin* and *vmax* will be ignored.\n alpha the alpha blending value, default is 1.0\n origin [ 'upper' | 'lower' ] Indicates where the [0,0] index of\n the array is in the upper left or lower left corner of\n the axes. Defaults to the rc image.origin value\n ========= ==========================================================\n\nfigimage complements the axes image\n(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled\nto fit the current axes. If you want a resampled image to\nfill the entire figure, you can define an\n:class:`~matplotlib.axes.Axes` with size [0,1,0,1].\n\nAn :class:`matplotlib.image.FigureImage` instance is returned.\n\n.. plot:: mpl_examples/pylab_examples/figimage_demo.py", "id": "f17210:c1:m11"} {"signature": "def set_size_inches(self, *args, **kwargs):", "body": "forward = kwargs.get('', False)if len(args)==:w,h = args[]else:w,h = argsdpival = self.dpiself.bbox_inches.p1 = w, hif forward:dpival = self.dpicanvasw = w*dpivalcanvash = h*dpivalmanager = getattr(self.canvas, '', None)if manager is not None:manager.resize(int(canvasw), int(canvash))", "docstring": "set_size_inches(w,h, forward=False)\n\nSet the figure size in inches\n\nUsage::\n\n fig.set_size_inches(w,h) # OR\n fig.set_size_inches((w,h) )\n\noptional kwarg *forward=True* will cause the canvas size to be\nautomatically updated; eg you can resize the figure window\nfrom the shell\n\nWARNING: forward=True is broken on all backends except GTK*\nand WX*\n\nACCEPTS: a w,h tuple with w,h in inches", "id": "f17210:c1:m13"} {"signature": "def get_edgecolor(self):", "body": "return self.patch.get_edgecolor()", "docstring": "Get the edge color of the Figure rectangle", "id": "f17210:c1:m15"} {"signature": "def get_facecolor(self):", "body": "return self.patch.get_facecolor()", "docstring": "Get the face color of the Figure rectangle", "id": "f17210:c1:m16"} {"signature": "def get_figwidth(self):", "body": "return self.bbox_inches.width", "docstring": "Return the figwidth as a float", "id": "f17210:c1:m17"} {"signature": "def get_figheight(self):", "body": "return self.bbox_inches.height", "docstring": "Return the figheight as a float", "id": "f17210:c1:m18"} {"signature": "def get_dpi(self):", "body": "return self.dpi", "docstring": "Return the dpi as a float", "id": "f17210:c1:m19"} {"signature": "def get_frameon(self):", "body": "return self.frameon", "docstring": "get the boolean indicating frameon", "id": "f17210:c1:m20"} {"signature": "def set_edgecolor(self, color):", "body": "self.patch.set_edgecolor(color)", "docstring": "Set the edge color of the Figure rectangle\n\nACCEPTS: any matplotlib color - see help(colors)", "id": "f17210:c1:m21"} {"signature": "def set_facecolor(self, color):", "body": "self.patch.set_facecolor(color)", "docstring": "Set the face color of the Figure rectangle\n\nACCEPTS: any matplotlib color - see help(colors)", "id": "f17210:c1:m22"} {"signature": "def set_dpi(self, val):", "body": "self.dpi = val", "docstring": "Set the dots-per-inch of the figure\n\nACCEPTS: float", "id": "f17210:c1:m23"} {"signature": "def set_figwidth(self, val):", "body": "self.bbox_inches.x1 = val", "docstring": "Set the width of the figure in inches\n\nACCEPTS: float", "id": "f17210:c1:m24"} {"signature": "def set_figheight(self, val):", "body": "self.bbox_inches.y1 = val", "docstring": "Set the height of the figure in inches\n\nACCEPTS: float", "id": "f17210:c1:m25"} {"signature": "def set_frameon(self, b):", "body": "self.frameon = b", "docstring": "Set whether the figure frame (background) is displayed or invisible\n\nACCEPTS: boolean", "id": "f17210:c1:m26"} {"signature": "def delaxes(self, a):", "body": "self.axes.remove(a)self._axstack.remove(a)keys = []for key, thisax in self._seen.items():if a==thisax: del self._seen[key]for func in self._axobservers: func(self)", "docstring": "remove a from the figure and update the current axes", "id": "f17210:c1:m27"} {"signature": "def _make_key(self, *args, **kwargs):", "body": "def fixitems(items):ret = []for k, v in items:if iterable(v): v = tuple(v)ret.append((k,v))return tuple(ret)def fixlist(args):ret = []for a in args:if iterable(a): a = tuple(a)ret.append(a)return tuple(ret)key = fixlist(args), fixitems(kwargs.items())return key", "docstring": "make a hashable key out of args and kwargs", "id": "f17210:c1:m28"} {"signature": "def add_axes(self, *args, **kwargs):", "body": "key = self._make_key(*args, **kwargs)if key in self._seen:ax = self._seen[key]self.sca(ax)return axif not len(args): returnif isinstance(args[], Axes):a = args[]assert(a.get_figure() is self)else:rect = args[]ispolar = kwargs.pop('', False)projection = kwargs.pop('', None)if ispolar:if projection is not None and projection != '':raise ValueError(\"\" +\"\" %projection)projection = ''a = projection_factory(projection, self, rect, **kwargs)self.axes.append(a)self._axstack.push(a)self.sca(a)self._seen[key] = areturn a", "docstring": "Add an a axes with axes rect [*left*, *bottom*, *width*,\n*height*] where all quantities are in fractions of figure\nwidth and height. kwargs are legal\n:class:`~matplotlib.axes.Axes` kwargs plus *projection* which\nsets the projection type of the axes. (For backward\ncompatibility, ``polar=True`` may also be provided, which is\nequivalent to ``projection='polar'``). Valid values for\n*projection* are: %(list)s. Some of these projections support\nadditional kwargs, which may be provided to :meth:`add_axes`::\n\n rect = l,b,w,h\n fig.add_axes(rect)\n fig.add_axes(rect, frameon=False, axisbg='g')\n fig.add_axes(rect, polar=True)\n fig.add_axes(rect, projection='polar')\n fig.add_axes(ax) # add an Axes instance\n\nIf the figure already has an axes with the same parameters,\nthen it will simply make that axes current and return it. If\nyou do not want this behavior, eg. you want to force the\ncreation of a new axes, you must use a unique set of args and\nkwargs. The axes :attr:`~matplotlib.axes.Axes.label`\nattribute has been exposed for this purpose. Eg., if you want\ntwo axes that are otherwise identical to be added to the\nfigure, make sure you give them unique labels::\n\n fig.add_axes(rect, label='axes1')\n fig.add_axes(rect, label='axes2')\n\nThe :class:`~matplotlib.axes.Axes` instance will be returned.\n\nThe following kwargs are supported:\n\n%(Axes)s", "id": "f17210:c1:m29"} {"signature": "def add_subplot(self, *args, **kwargs):", "body": "kwargs = kwargs.copy()if not len(args): returnif isinstance(args[], SubplotBase):a = args[]assert(a.get_figure() is self)else:ispolar = kwargs.pop('', False)projection = kwargs.pop('', None)if ispolar:if projection is not None and projection != '':raise ValueError(\"\" +\"\" %projection)projection = ''projection_class = get_projection_class(projection)key = self._make_key(*args, **kwargs)if key in self._seen:ax = self._seen[key]if isinstance(ax, projection_class):self.sca(ax)return axelse:self.axes.remove(ax)self._axstack.remove(ax)a = subplot_class_factory(projection_class)(self, *args, **kwargs)self._seen[key] = aself.axes.append(a)self._axstack.push(a)self.sca(a)return a", "docstring": "Add a subplot. Examples:\n\n fig.add_subplot(111)\n fig.add_subplot(1,1,1) # equivalent but more general\n fig.add_subplot(212, axisbg='r') # add subplot with red background\n fig.add_subplot(111, polar=True) # add a polar subplot\n fig.add_subplot(sub) # add Subplot instance sub\n\n*kwargs* are legal :class:`!matplotlib.axes.Axes` kwargs plus\n*projection*, which chooses a projection type for the axes.\n(For backward compatibility, *polar=True* may also be\nprovided, which is equivalent to *projection='polar'*). Valid\nvalues for *projection* are: %(list)s. Some of these projections\nsupport additional *kwargs*, which may be provided to\n:meth:`add_axes`.\n\nThe :class:`~matplotlib.axes.Axes` instance will be returned.\n\nIf the figure already has a subplot with key (*args*,\n*kwargs*) then it will simply make that subplot current and\nreturn it.\n\nThe following kwargs are supported:\n\n%(Axes)s", "id": "f17210:c1:m30"} {"signature": "def clf(self):", "body": "self.suppressComposite = Noneself.callbacks = cbook.CallbackRegistry(('', ))for ax in tuple(self.axes): ax.cla()self.delaxes(ax) toolbar = getattr(self.canvas, '', None)if toolbar is not None:toolbar.update()self._axstack.clear()self._seen = {}self.artists = []self.lines = []self.patches = []self.texts=[]self.images = []self.legends = []self._axobservers = []", "docstring": "Clear the figure", "id": "f17210:c1:m31"} {"signature": "def clear(self):", "body": "self.clf()", "docstring": "Clear the figure -- synonym for fig.clf", "id": "f17210:c1:m32"} {"signature": "def draw(self, renderer):", "body": "if not self.get_visible(): returnrenderer.open_group('')if self.frameon: self.patch.draw(renderer)for p in self.patches: p.draw(renderer)for l in self.lines: l.draw(renderer)for a in self.artists: a.draw(renderer)composite = renderer.option_image_nocomposite()if self.suppressComposite is not None:composite = self.suppressCompositeif len(self.images)<= or composite or not allequal([im.origin for im in self.images]):for im in self.images:im.draw(renderer)else:mag = renderer.get_image_magnification()ims = [(im.make_image(mag), im.ox, im.oy)for im in self.images]im = _image.from_images(self.bbox.height * mag,self.bbox.width * mag,ims)im.is_grayscale = Falsel, b, w, h = self.bbox.boundsclippath, affine = self.get_transformed_clip_path_and_affine()renderer.draw_image(l, b, im, self.bbox,clippath, affine)for a in self.axes: a.draw(renderer)for t in self.texts: t.draw(renderer)for legend in self.legends:legend.draw(renderer)renderer.close_group('')self._cachedRenderer = rendererself.canvas.draw_event(renderer)", "docstring": "Render the figure using :class:`matplotlib.backend_bases.RendererBase` instance renderer", "id": "f17210:c1:m33"} {"signature": "def draw_artist(self, a):", "body": "assert self._cachedRenderer is not Nonea.draw(self._cachedRenderer)", "docstring": "draw :class:`matplotlib.artist.Artist` instance *a* only --\nthis is available only after the figure is drawn", "id": "f17210:c1:m34"} {"signature": "def legend(self, handles, labels, *args, **kwargs):", "body": "handles = flatten(handles)l = Legend(self, handles, labels, *args, **kwargs)self.legends.append(l)return l", "docstring": "Place a legend in the figure. Labels are a sequence of\nstrings, handles is a sequence of\n:class:`~matplotlib.lines.Line2D` or\n:class:`~matplotlib.patches.Patch` instances, and loc can be a\nstring or an integer specifying the legend location\n\nUSAGE::\n\n legend( (line1, line2, line3),\n ('label1', 'label2', 'label3'),\n 'upper right')\n\nThe *loc* location codes are::\n\n 'best' : 0, (currently not supported for figure legends)\n 'upper right' : 1,\n 'upper left' : 2,\n 'lower left' : 3,\n 'lower right' : 4,\n 'right' : 5,\n 'center left' : 6,\n 'center right' : 7,\n 'lower center' : 8,\n 'upper center' : 9,\n 'center' : 10,\n\n*loc* can also be an (x,y) tuple in figure coords, which\nspecifies the lower left of the legend box. figure coords are\n(0,0) is the left, bottom of the figure and 1,1 is the right,\ntop.\n\nThe legend instance is returned. The following kwargs are supported\n\n*loc*\n the location of the legend\n*numpoints*\n the number of points in the legend line\n*prop*\n a :class:`matplotlib.font_manager.FontProperties` instance\n*pad*\n the fractional whitespace inside the legend border\n*markerscale*\n the relative size of legend markers vs. original\n*shadow*\n if True, draw a shadow behind legend\n*labelsep*\n the vertical space between the legend entries\n*handlelen*\n the length of the legend lines\n*handletextsep*\n the space between the legend line and legend text\n*axespad*\n the border between the axes and legend edge\n\n.. plot:: mpl_examples/pylab_examples/figlegend_demo.py", "id": "f17210:c1:m36"} {"signature": "def text(self, x, y, s, *args, **kwargs):", "body": "override = _process_text_args({}, *args, **kwargs)t = Text(x=x, y=y, text=s,)t.update(override)self._set_artist_props(t)self.texts.append(t)return t", "docstring": "Call signature::\n\n figtext(x, y, s, fontdict=None, **kwargs)\n\nAdd text to figure at location *x*, *y* (relative 0-1\ncoords). See :func:`~matplotlib.pyplot.text` for the meaning\nof the other arguments.\n\nkwargs control the :class:`~matplotlib.text.Text` properties:\n\n%(Text)s", "id": "f17210:c1:m37"} {"signature": "def gca(self, **kwargs):", "body": "ax = self._axstack()if ax is not None:ispolar = kwargs.get('', False)projection = kwargs.get('', None)if ispolar:if projection is not None and projection != '':raise ValueError(\"\" +\"\" %projection)projection = ''projection_class = get_projection_class(projection)if isinstance(ax, projection_class):return axreturn self.add_subplot(, **kwargs)", "docstring": "Return the current axes, creating one if necessary\n\nThe following kwargs are supported\n%(Axes)s", "id": "f17210:c1:m39"} {"signature": "def sca(self, a):", "body": "self._axstack.bubble(a)for func in self._axobservers: func(self)return a", "docstring": "Set the current axes to be a and return a", "id": "f17210:c1:m40"} {"signature": "def add_axobserver(self, func):", "body": "self._axobservers.append(func)", "docstring": "whenever the axes state change, func(self) will be called", "id": "f17210:c1:m41"} {"signature": "def savefig(self, *args, **kwargs):", "body": "for key in ('', '', ''):if key not in kwargs:kwargs[key] = rcParams[''%key]transparent = kwargs.pop('', False)if transparent:original_figure_alpha = self.patch.get_alpha()self.patch.set_alpha()original_axes_alpha = []for ax in self.axes:patch = ax.patchoriginal_axes_alpha.append(patch.get_alpha())patch.set_alpha()self.canvas.print_figure(*args, **kwargs)if transparent:self.patch.set_alpha(original_figure_alpha)for ax, alpha in zip(self.axes, original_axes_alpha):ax.patch.set_alpha(alpha)", "docstring": "call signature::\n\n savefig(fname, dpi=None, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format=None,\n transparent=False):\n\nSave the current figure.\n\nThe output formats available depend on the backend being used.\n\nArguments:\n\n *fname*:\n A string containing a path to a filename, or a Python file-like object.\n\n If *format* is *None* and *fname* is a string, the output\n format is deduced from the extension of the filename.\n\nKeyword arguments:\n\n *dpi*: [ None | scalar > 0 ]\n The resolution in dots per inch. If *None* it will default to\n the value ``savefig.dpi`` in the matplotlibrc file.\n\n *facecolor*, *edgecolor*:\n the colors of the figure rectangle\n\n *orientation*: [ 'landscape' | 'portrait' ]\n not supported on all backends; currently only on postscript output\n\n *papertype*:\n One of 'letter', 'legal', 'executive', 'ledger', 'a0' through\n 'a10', 'b0' through 'b10'. Only supported for postscript\n output.\n\n *format*:\n One of the file extensions supported by the active\n backend. Most backends support png, pdf, ps, eps and svg.\n\n *transparent*:\n If *True*, the figure patch and axes patches will all be\n transparent. This is useful, for example, for displaying\n a plot on top of a colored background on a web page. The\n transparency of these patches will be restored to their\n original values upon exit of this function.", "id": "f17210:c1:m42"} {"signature": "def subplots_adjust(self, *args, **kwargs):", "body": "self.subplotpars.update(*args, **kwargs)import matplotlib.axesfor ax in self.axes:if not isinstance(ax, matplotlib.axes.SubplotBase):if ax._sharex is not None and isinstance(ax._sharex, matplotlib.axes.SubplotBase):ax._sharex.update_params()ax.set_position(ax._sharex.figbox)elif ax._sharey is not None and isinstance(ax._sharey, matplotlib.axes.SubplotBase):ax._sharey.update_params()ax.set_position(ax._sharey.figbox)else:ax.update_params()ax.set_position(ax.figbox)", "docstring": "fig.subplots_adjust(left=None, bottom=None, right=None, wspace=None, hspace=None)\n\nUpdate the :class:`SubplotParams` with *kwargs* (defaulting to rc where\nNone) and update the subplot locations", "id": "f17210:c1:m44"} {"signature": "def ginput(self, n=, timeout=, show_clicks=True):", "body": "blocking_mouse_input = BlockingMouseInput(self)return blocking_mouse_input(n=n, timeout=timeout,show_clicks=show_clicks)", "docstring": "call signature::\n\n ginput(self, n=1, timeout=30, show_clicks=True)\n\nBlocking call to interact with the figure.\n\nThis will wait for *n* clicks from the user and return a list of the\ncoordinates of each click.\n\nIf *timeout* is zero or negative, does not timeout.\n\nIf *n* is zero or negative, accumulate clicks until a middle click\n(or potentially both mouse buttons at once) terminates the input.\n\nRight clicking cancels last input.\n\nThe keyboard can also be used to select points in case your mouse\ndoes not have one or more of the buttons. The delete and backspace\nkeys act like right clicking (i.e., remove last point), the enter key\nterminates input and any other key (not already used by the window\nmanager) selects a point.", "id": "f17210:c1:m45"} {"signature": "def waitforbuttonpress(self, timeout=-):", "body": "blocking_input = BlockingKeyMouseInput(self)return blocking_input(timeout=timeout)", "docstring": "call signature::\n\n waitforbuttonpress(self, timeout=-1)\n\nBlocking call to interact with the figure.\n\nThis will return True is a key was pressed, False if a mouse\nbutton was pressed and None if *timeout* was reached without\neither being pressed.\n\nIf *timeout* is negative, does not timeout.", "id": "f17210:c1:m46"} {"signature": "def fill(strings, linelen=):", "body": "currpos = lasti = result = []for i, s in enumerate(strings):length = len(s)if currpos + length < linelen:currpos += length + else:result.append(''.join(strings[lasti:i]))lasti = icurrpos = lengthresult.append(''.join(strings[lasti:]))return ''.join(result)", "docstring": "Make one string from sequence of strings, with whitespace\n in between. The whitespace is chosen to form lines of at most\n linelen characters, if possible.", "id": "f17211:m0"} {"signature": "def pdfRepr(obj):", "body": "if hasattr(obj, ''):return obj.pdfRepr()elif isinstance(obj, float):if not npy.isfinite(obj):raise ValueError(\"\")r = \"\" % objreturn r.rstrip('').rstrip('')elif isinstance(obj, int):return \"\" % objelif is_string_like(obj):return '' + _string_escape_regex.sub(r'', obj) + ''elif isinstance(obj, dict):r = [\"\"]r.extend([\"\" % (Name(key).pdfRepr(), pdfRepr(val))for key, val in list(obj.items())])r.append(\"\")return fill(r)elif isinstance(obj, (list, tuple)):r = [\"\"]r.extend([pdfRepr(val) for val in obj])r.append(\"\")return fill(r)elif isinstance(obj, bool):return ['', ''][obj]elif obj is None:return ''elif isinstance(obj, datetime):r = obj.strftime('')if time.daylight: z = time.altzoneelse: z = time.timezoneif z == : r += ''elif z < : r += \"\" % ((-z)//, (-z)%)else: r += \"\" % (z//, z%)return pdfRepr(r)elif isinstance(obj, BboxBase):return fill([pdfRepr(val) for val in obj.bounds])else:raise TypeError(\"\"% type(obj))", "docstring": "Map Python objects to PDF syntax.", "id": "f17211:m1"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "FigureClass = kwargs.pop('', Figure)thisFig = FigureClass(*args, **kwargs)canvas = FigureCanvasPdf(thisFig)manager = FigureManagerPdf(canvas, num)return manager", "docstring": "Create a new figure manager instance", "id": "f17211:m2"} {"signature": "def __init__(self, id, len, file, extra=None):", "body": "self.id = id self.len = len self.pdfFile = fileself.file = file.fh self.compressobj = None if extra is None: self.extra = dict()else: self.extra = extraself.pdfFile.recordXref(self.id)if rcParams['']:self.compressobj = zlib.compressobj(rcParams[''])if self.len is None:self.file = StringIO()else:self._writeHeader()self.pos = self.file.tell()", "docstring": "id: object id of stream; len: an unused Reference object for the\n length of the stream, or None (to use a memory buffer); file:\n a PdfFile; extra: a dictionary of extra key-value pairs to\n include in the stream header", "id": "f17211:c3:m0"} {"signature": "def end(self):", "body": "self._flush()if self.len is None:contents = self.file.getvalue()self.len = len(contents)self.file = self.pdfFile.fhself._writeHeader()self.file.write(contents)self.file.write(\"\")else:length = self.file.tell() - self.posself.file.write(\"\")self.pdfFile.writeObject(self.len, length)", "docstring": "Finalize stream.", "id": "f17211:c3:m2"} {"signature": "def write(self, data):", "body": "if self.compressobj is None:self.file.write(data)else:compressed = self.compressobj.compress(data)self.file.write(compressed)", "docstring": "Write some data on the stream.", "id": "f17211:c3:m3"} {"signature": "def _flush(self):", "body": "if self.compressobj is not None:compressed = self.compressobj.flush()self.file.write(compressed)self.compressobj = None", "docstring": "Flush the compression object.", "id": "f17211:c3:m4"} {"signature": "def fontName(self, fontprop):", "body": "if is_string_like(fontprop):filename = fontpropelif rcParams['']:filename = findfont(fontprop, fontext='')else:filename = findfont(fontprop)Fx = self.fontNames.get(filename)if Fx is None:Fx = Name('' % self.nextFont)self.fontNames[filename] = Fxself.nextFont += return Fx", "docstring": "Select a font based on fontprop and return a name suitable for\nOp.selectfont. If fontprop is a string, it will be interpreted\nas the filename of the font.", "id": "f17211:c4:m6"} {"signature": "def embedTTF(self, filename, characters):", "body": "font = FT2Font(str(filename))fonttype = rcParams['']def cvt(length, upe=font.units_per_EM, nearest=True):\"\"value = length / upe * if nearest: return round(value)if value < : return floor(value)else: return ceil(value)def embedTTFType3(font, characters, descriptor):\"\"\"\"\"\"widthsObject = self.reserveObject('')fontdescObject = self.reserveObject('')fontdictObject = self.reserveObject('')charprocsObject = self.reserveObject('')differencesArray = []firstchar, lastchar = , bbox = [cvt(x, nearest=False) for x in font.bbox]fontdict = {'' : Name(''),'' : ps_name,'' : firstchar,'' : lastchar,'' : fontdescObject,'' : Name(''),'' : descriptor[''],'' : bbox,'' : [ , , , , , ],'' : charprocsObject,'' : {'' : Name(''),'' : differencesArray},'' : widthsObject}from encodings import cp1252if hasattr(cp1252, ''):def decode_char(charcode):return cp1252.decoding_map[charcode] or else:def decode_char(charcode):return ord(cp1252.decoding_table[charcode])def get_char_width(charcode):str = decode_char(charcode)width = font.load_char(str, flags=LOAD_NO_SCALE|LOAD_NO_HINTING).horiAdvancereturn cvt(width)widths = [ get_char_width(charcode) for charcode in range(firstchar, lastchar+) ]descriptor[''] = max(widths)cmap = font.get_charmap()glyph_ids = []differences = []multi_byte_chars = set()for c in characters:ccode = cgind = cmap.get(ccode) or glyph_ids.append(gind)glyph_name = font.get_glyph_name(gind)if ccode <= :differences.append((ccode, glyph_name))else:multi_byte_chars.add(glyph_name)differences.sort()last_c = -for c, name in differences:if c != last_c + :differencesArray.append(c)differencesArray.append(Name(name))last_c = crawcharprocs = ttconv.get_pdf_charprocs(filename, glyph_ids)charprocs = {}charprocsRef = {}for charname, stream in list(rawcharprocs.items()):charprocDict = { '': len(stream) }if charname in multi_byte_chars:charprocDict[''] = Name('')charprocDict[''] = Name('')charprocDict[''] = bboxstream = stream[stream.find(\"\") + :]charprocObject = self.reserveObject('')self.beginStream(charprocObject.id, None, charprocDict)self.currentstream.write(stream)self.endStream()if charname in multi_byte_chars:name = self._get_xobject_symbol_name(filename, charname)self.multi_byte_charprocs[name] = charprocObjectelse:charprocs[charname] = charprocObjectself.writeObject(fontdictObject, fontdict)self.writeObject(fontdescObject, descriptor)self.writeObject(widthsObject, widths)self.writeObject(charprocsObject, charprocs)return fontdictObjectdef embedTTFType42(font, characters, descriptor):\"\"\"\"\"\"fontdescObject = self.reserveObject('')cidFontDictObject = self.reserveObject('')type0FontDictObject = self.reserveObject('')cidToGidMapObject = self.reserveObject('')fontfileObject = self.reserveObject('')wObject = self.reserveObject('')toUnicodeMapObject = self.reserveObject('')cidFontDict = {'' : Name(''),'' : Name(''),'' : ps_name,'' : {'' : '','' : '','' : },'' : fontdescObject,'' : wObject,'' : cidToGidMapObject}type0FontDict = {'' : Name(''),'' : Name(''),'' : ps_name,'' : Name(''),'' : [cidFontDictObject],'' : toUnicodeMapObject}descriptor[''] = fontfileObjectlength1Object = self.reserveObject('')self.beginStream(fontfileObject.id,self.reserveObject(''),{'': length1Object})fontfile = open(filename, '')length1 = while True:data = fontfile.read()if not data: breaklength1 += len(data)self.currentstream.write(data)fontfile.close()self.endStream()self.writeObject(length1Object, length1)cid_to_gid_map = [''] * cmap = font.get_charmap()unicode_mapping = []widths = []max_ccode = for c in characters:ccode = cgind = cmap.get(ccode) or glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)widths.append((ccode, glyph.horiAdvance / ))if ccode < :cid_to_gid_map[ccode] = chr(gind)max_ccode = max(ccode, max_ccode)widths.sort()cid_to_gid_map = cid_to_gid_map[:max_ccode + ]last_ccode = -w = []max_width = unicode_groups = []for ccode, width in widths:if ccode != last_ccode + :w.append(ccode)w.append([width])unicode_groups.append([ccode, ccode])else:w[-].append(width)unicode_groups[-][] = ccodemax_width = max(max_width, width)last_ccode = ccodeunicode_bfrange = []for start, end in unicode_groups:unicode_bfrange.append(\"\" %(start, end,\"\".join([\"\" % x for x in range(start, end+)])))unicode_cmap = (self._identityToUnicodeCMap %(len(unicode_groups),\"\".join(unicode_bfrange)))cid_to_gid_map = \"\".join(cid_to_gid_map).encode(\"\")self.beginStream(cidToGidMapObject.id,None,{'': len(cid_to_gid_map)})self.currentstream.write(cid_to_gid_map)self.endStream()self.beginStream(toUnicodeMapObject.id,None,{'': unicode_cmap})self.currentstream.write(unicode_cmap)self.endStream()descriptor[''] = max_widthself.writeObject(cidFontDictObject, cidFontDict)self.writeObject(type0FontDictObject, type0FontDict)self.writeObject(fontdescObject, descriptor)self.writeObject(wObject, w)return type0FontDictObjectps_name = Name(font.get_sfnt()[(,,,)])pclt = font.get_sfnt_table('')or { '': , '': }post = font.get_sfnt_table('')or { '': (,) }ff = font.face_flagssf = font.style_flagsflags = symbolic = False if ff & FIXED_WIDTH: flags |= << if : flags |= << if symbolic: flags |= << else: flags |= << if sf & ITALIC: flags |= << if : flags |= << if : flags |= << if : flags |= << descriptor = {'' : Name(''),'' : ps_name,'' : flags,'' : [ cvt(x, nearest=False) for x in font.bbox ],'' : cvt(font.ascender, nearest=False),'' : cvt(font.descender, nearest=False),'' : cvt(pclt[''], nearest=False),'' : cvt(pclt['']),'' : post[''][], '' : }if is_opentype_cff_font(filename):fonttype = warnings.warn((\"\" +\"\") %os.path.basename(filename))if fonttype == :return embedTTFType3(font, characters, descriptor)elif fonttype == :return embedTTFType42(font, characters, descriptor)", "docstring": "Embed the TTF font from the named file into the document.", "id": "f17211:c4:m11"} {"signature": "def alphaState(self, alpha):", "body": "state = self.alphaStates.get(alpha, None)if state is not None:return state[]name = Name('' % self.nextAlphaState)self.nextAlphaState += self.alphaStates[alpha] =(name, { '': Name(''),'': alpha, '': alpha })return name", "docstring": "Return name of an ExtGState that sets alpha to the given value", "id": "f17211:c4:m12"} {"signature": "def imageObject(self, image):", "body": "pair = self.images.get(image, None)if pair is not None:return pair[]name = Name('' % self.nextImage)ob = self.reserveObject('' % self.nextImage)self.nextImage += self.images[image] = (name, ob)return name", "docstring": "Return name of an image XObject representing the given image.", "id": "f17211:c4:m15"} {"signature": "def markerObject(self, path, trans, fillp, lw):", "body": "key = (path, trans, fillp is not None, lw)result = self.markers.get(key)if result is None:name = Name('' % len(self.markers))ob = self.reserveObject('' % len(self.markers))self.markers[key] = (name, ob, path, trans, fillp, lw)else:name = result[]return name", "docstring": "Return name of a marker XObject representing the given path.", "id": "f17211:c4:m19"} {"signature": "def reserveObject(self, name=''):", "body": "id = self.nextObjectself.nextObject += self.xrefTable.append([None, , name])return Reference(id)", "docstring": "Reserve an ID for an indirect object.\n The name is used for debugging in case we forget to print out\n the object with writeObject.", "id": "f17211:c4:m23"} {"signature": "def writeXref(self):", "body": "self.startxref = self.fh.tell()self.write(\"\" % self.nextObject)i = borken = Falsefor offset, generation, name in self.xrefTable:if offset is None:print('' % (i, name), file=sys.stderr)borken = Trueelse:self.write(\"\" % (offset, generation))i += if borken:raise AssertionError('')", "docstring": "Write out the xref table.", "id": "f17211:c4:m26"} {"signature": "def writeTrailer(self):", "body": "self.write(\"\")self.write(pdfRepr({'': self.nextObject,'': self.rootObject,'': self.infoObject }))self.write(\"\" % self.startxref)", "docstring": "Write out the PDF trailer.", "id": "f17211:c4:m27"} {"signature": "def track_characters(self, font, s):", "body": "if isinstance(font, str):fname = fontelse:fname = font.fnamerealpath, stat_key = get_realpath_and_stat(fname)used_characters = self.used_characters.setdefault(stat_key, (realpath, set()))used_characters[].update([ord(x) for x in s])", "docstring": "Keeps track of which characters are required from\n each font.", "id": "f17211:c5:m4"} {"signature": "def clip_cmd(self, cliprect, clippath):", "body": "cmds = []while (self._cliprect, self._clippath) != (cliprect, clippath)and self.parent is not None:cmds.extend(self.pop())if (self._cliprect, self._clippath) != (cliprect, clippath):cmds.extend(self.push())if self._cliprect != cliprect:cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])if self._clippath != clippath:cmds.extend(PdfFile.pathOperations(*clippath.get_transformed_path_and_affine()) +[Op.clip, Op.endpath])return cmds", "docstring": "Set clip rectangle. Calls self.pop() and self.push().", "id": "f17211:c6:m16"} {"signature": "def delta(self, other):", "body": "cmds = []for params, cmd in self.commands:different = Falsefor p in params:ours = getattr(self, p)theirs = getattr(other, p)try:different = bool(ours != theirs)except ValueError:ours = npy.asarray(ours)theirs = npy.asarray(theirs)different = ours.shape != theirs.shape or npy.any(ours != theirs)if different:breakif different:theirs = [getattr(other, p) for p in params]cmds.extend(cmd(self, *theirs))for p in params:setattr(self, p, getattr(other, p))return cmds", "docstring": "Copy properties of other into self and return PDF commands\nneeded to transform self into other.", "id": "f17211:c6:m17"} {"signature": "def copy_properties(self, other):", "body": "GraphicsContextBase.copy_properties(self, other)self._fillcolor = other._fillcolor", "docstring": "Copy properties of other into self.", "id": "f17211:c6:m18"} {"signature": "def finalize(self):", "body": "cmds = []while self.parent is not None:cmds.extend(self.pop())return cmds", "docstring": "Make sure every pushed graphics state is popped.", "id": "f17211:c6:m19"} {"signature": "def draw_if_interactive():", "body": "if matplotlib.is_interactive():figManager = Gcf.get_active()if figManager is not None:figManager.canvas.draw()", "docstring": "Is called after every pylab drawing command", "id": "f17212:m2"} {"signature": "def show(mainloop=True):", "body": "for manager in Gcf.get_all_fig_managers():manager.window.show()if mainloop and gtk.main_level() == andlen(Gcf.get_all_fig_managers())>:gtk.main()", "docstring": "Show all the figures and enter the gtk main loop\nThis should be the last line of your script", "id": "f17212:m3"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "FigureClass = kwargs.pop('', Figure)thisFig = FigureClass(*args, **kwargs)canvas = FigureCanvasGTK(thisFig)manager = FigureManagerGTK(canvas, num)return manager", "docstring": "Create a new figure manager instance", "id": "f17212:m4"} {"signature": "def _renderer_init(self):", "body": "self._renderer = RendererGDK (self, self.figure.dpi)", "docstring": "Override by GTK backends to select a different renderer\n Renderer should provide the methods:\n set_pixmap ()\n set_width_height ()\n that are used by\n _render_figure() / _pixmap_prepare()", "id": "f17212:c0:m14"} {"signature": "def _pixmap_prepare(self, width, height):", "body": "if _debug: print('' % fn_name())create_pixmap = Falseif width > self._pixmap_width:self._pixmap_width = max (int (self._pixmap_width * ),width)create_pixmap = Trueif height > self._pixmap_height:self._pixmap_height = max (int (self._pixmap_height * ),height)create_pixmap = Trueif create_pixmap:self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,self._pixmap_height)self._renderer.set_pixmap (self._pixmap)", "docstring": "Make sure _._pixmap is at least width, height,\ncreate new pixmap if necessary", "id": "f17212:c0:m15"} {"signature": "def _render_figure(self, pixmap, width, height):", "body": "self._renderer.set_width_height (width, height)self.figure.draw (self._renderer)", "docstring": "used by GTK and GTKcairo. GTKAgg overrides", "id": "f17212:c0:m16"} {"signature": "def expose_event(self, widget, event):", "body": "if _debug: print('' % fn_name())if GTK_WIDGET_DRAWABLE(self):if self._need_redraw:x, y, w, h = self.allocationself._pixmap_prepare (w, h)self._render_figure(self._pixmap, w, h)self._need_redraw = Falsex, y, w, h = event.areaself.window.draw_drawable (self.style.fg_gc[self.state],self._pixmap, x, y, x, y, w, h)return False", "docstring": "Expose_event for all GTK backends. Should not be overridden.", "id": "f17212:c0:m17"} {"signature": "def resize(self, width, height):", "body": "self.window.resize(width, height)", "docstring": "set the canvas size in pixels", "id": "f17212:c1:m6"} {"signature": "def draw_rubberband(self, event, x0, y0, x1, y1):", "body": "drawable = self.canvas.windowif drawable is None:returngc = drawable.new_gc()height = self.canvas.figure.bbox.heighty1 = height - y1y0 = height - y0w = abs(x1 - x0)h = abs(y1 - y0)rect = [int(val)for val in (min(x0,x1), min(y0, y1), w, h)]try: lastrect, imageBack = self._imageBackexcept AttributeError:if event.inaxes is None:returnax = event.inaxesl,b,w,h = [int(val) for val in ax.bbox.bounds]b = int(height)-(b+h)axrect = l,b,w,hself._imageBack = axrect, drawable.get_image(*axrect)drawable.draw_rectangle(gc, False, *rect)self._idle_draw_id = else:def idle_draw(*args):drawable.draw_image(gc, imageBack, , , *lastrect)drawable.draw_rectangle(gc, False, *rect)self._idle_draw_id = return Falseif self._idle_draw_id == :self._idle_draw_id = gobject.idle_add(idle_draw)", "docstring": "adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744", "id": "f17212:c2:m5"} {"signature": "def __init__(self, canvas, window):", "body": "gtk.Toolbar.__init__(self)self.canvas = canvasself.win = windowself.set_style(gtk.TOOLBAR_ICONS)if gtk.pygtk_version >= (,,):self._create_toolitems_2_4()self.update = self._update_2_4self.fileselect = FileChooserDialog(title='',parent=self.win,filetypes=self.canvas.get_supported_filetypes(),default_filetype=self.canvas.get_default_filetype())else:self._create_toolitems_2_2()self.update = self._update_2_2self.fileselect = FileSelection(title='',parent=self.win)self.show_all()self.update()", "docstring": "figManager is the FigureManagerGTK instance that contains the\ntoolbar, with attributes figure, window and drawingArea", "id": "f17212:c3:m0"} {"signature": "def panx(self, button, direction):", "body": "for a in self._active:a.xaxis.pan(direction)self.canvas.draw()return True", "docstring": "panx in direction", "id": "f17212:c3:m7"} {"signature": "def pany(self, button, direction):", "body": "for a in self._active:a.yaxis.pan(direction)self.canvas.draw()return True", "docstring": "pany in direction", "id": "f17212:c3:m8"} {"signature": "def zoomx(self, button, direction):", "body": "for a in self._active:a.xaxis.zoom(direction)self.canvas.draw()return True", "docstring": "zoomx in direction", "id": "f17212:c3:m9"} {"signature": "def zoomy(self, button, direction):", "body": "for a in self._active:a.yaxis.zoom(direction)self.canvas.draw()return True", "docstring": "zoomy in direction", "id": "f17212:c3:m10"} {"signature": "def show(self):", "body": "self._updateson = Falsecbox = self.cbox_linepropsfor i in range(self._lastcnt-,-,-):cbox.remove_text(i)for line in self.lines:cbox.append_text(line.get_label())cbox.set_active()self._updateson = Trueself._lastcnt = len(self.lines)self.dlg.show()", "docstring": "populate the combo box", "id": "f17212:c4:m1"} {"signature": "def get_active_line(self):", "body": "ind = self.cbox_lineprops.get_active()line = self.lines[ind]return line", "docstring": "get the active line", "id": "f17212:c4:m2"} {"signature": "def get_active_linestyle(self):", "body": "ind = self.cbox_linestyles.get_active()ls = self.linestyles[ind]return ls", "docstring": "get the active lineinestyle", "id": "f17212:c4:m3"} {"signature": "def get_active_marker(self):", "body": "ind = self.cbox_markers.get_active()m = self.markers[ind]return m", "docstring": "get the active lineinestyle", "id": "f17212:c4:m4"} {"signature": "def _update(self):", "body": "if not self._inited or not self._updateson: returnline = self.get_active_line()ls = self.get_active_linestyle()marker = self.get_active_marker()line.set_linestyle(ls)line.set_marker(marker)button = self.wtree.get_widget('')color = button.get_color()r, g, b = [val/ for val in (color.red, color.green, color.blue)]line.set_color((r,g,b))button = self.wtree.get_widget('')color = button.get_color()r, g, b = [val/ for val in (color.red, color.green, color.blue)]line.set_markerfacecolor((r,g,b))line.figure.canvas.draw()", "docstring": "update the active line props from the widgets", "id": "f17212:c4:m5"} {"signature": "def on_combobox_lineprops_changed(self, item):", "body": "if not self._inited: returnself._updateson = Falseline = self.get_active_line()ls = line.get_linestyle()if ls is None: ls = ''self.cbox_linestyles.set_active(self.linestyled[ls])marker = line.get_marker()if marker is None: marker = ''self.cbox_markers.set_active(self.markerd[marker])r,g,b = colorConverter.to_rgb(line.get_color())color = gtk.gdk.Color(*[int(val*) for val in (r,g,b)])button = self.wtree.get_widget('')button.set_color(color)r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())color = gtk.gdk.Color(*[int(val*) for val in (r,g,b)])button = self.wtree.get_widget('')button.set_color(color)self._updateson = True", "docstring": "update the widgets from the active line", "id": "f17212:c4:m6"} {"signature": "def on_colorbutton_markerface_color_set(self, button):", "body": "self._update()", "docstring": "called colorbutton marker clicked", "id": "f17212:c4:m10"} {"signature": "def new_figure_manager( num, *args, **kwargs ):", "body": "if DEBUG: print('')FigureClass = kwargs.pop('', Figure)thisFig = FigureClass( *args, **kwargs )canvas = FigureCanvasQTAgg( thisFig )return FigureManagerQT( canvas, num )", "docstring": "Create a new figure manager instance", "id": "f17213:m0"} {"signature": "def paintEvent( self, e ):", "body": "if DEBUG: print('', self,self.get_width_height())if type(self.replot) is bool: if self.replot:FigureCanvasAgg.draw(self)if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:stringBuffer = self.renderer._renderer.tostring_bgra()else:stringBuffer = self.renderer._renderer.tostring_argb()qImage = QtGui.QImage(stringBuffer, self.renderer.width,self.renderer.height,QtGui.QImage.Format_ARGB32)p = QtGui.QPainter(self)p.drawPixmap(QtCore.QPoint(, ), QtGui.QPixmap.fromImage(qImage))if self.drawRect:p.setPen( QtGui.QPen( QtCore.Qt.black, , QtCore.Qt.DotLine ) )p.drawRect( self.rect[], self.rect[], self.rect[], self.rect[] )p.end()else:bbox = self.replotl, b, r, t = bbox.extentsw = int(r) - int(l)h = int(t) - int(b)t = int(b) + hreg = self.copy_from_bbox(bbox)stringBuffer = reg.to_string_argb()qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32)pixmap = QtGui.QPixmap.fromImage(qImage)p = QtGui.QPainter( self )p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)p.end()self.replot = Falseself.drawRect = False", "docstring": "Draw to the Agg backend and then copy the image to the qt.drawable.\nIn Qt, all drawing should be done inside of here when a widget is\nshown onscreen.", "id": "f17213:c2:m3"} {"signature": "def draw( self ):", "body": "if DEBUG: print(\"\", self)self.replot = TrueFigureCanvasAgg.draw(self)self.update()QtGui.qApp.processEvents()", "docstring": "Draw the figure when xwindows is ready for the update", "id": "f17213:c2:m4"} {"signature": "def blit(self, bbox=None):", "body": "self.replot = bboxl, b, w, h = bbox.boundst = b + hself.update(l, self.renderer.height-t, w, h)", "docstring": "Blit the region in bbox", "id": "f17213:c2:m5"} {"signature": "def new_figure_manager(num, *args, **kwargs): ", "body": "if _debug: print('' % (self.__class__.__name__, _fn_name()))FigureClass = kwargs.pop('', Figure)thisFig = FigureClass(*args, **kwargs)canvas = FigureCanvasCairo(thisFig)manager = FigureManagerBase(canvas, num)return manager", "docstring": "Create a new figure manager instance", "id": "f17214:m1"} {"signature": "def raise_msg_to_str(msg):", "body": "if not is_string_like(msg):msg = ''.join(map(str, msg))return msg", "docstring": "msg is a return arg from a raise. Join with new lines", "id": "f17215:m1"} {"signature": "def show():", "body": "for manager in Gcf.get_all_fig_managers():manager.show()import matplotlibmatplotlib.interactive(True)if rcParams['']:os.environ[''] = ''if show._needmain:Tk.mainloop()show._needmain = False", "docstring": "Show all the figures and enter the gtk mainloop\n\nThis should be the last line of your script. This function sets\ninteractive mode to True, as detailed on\nhttp://matplotlib.sf.net/interactive.html", "id": "f17215:m4"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "_focus = windowing.FocusManager()FigureClass = kwargs.pop('', Figure)figure = FigureClass(*args, **kwargs)window = Tk.Tk()canvas = FigureCanvasTkAgg(figure, master=window)figManager = FigureManagerTkAgg(canvas, num, window)if matplotlib.is_interactive():figManager.show()return figManager", "docstring": "Create a new figure manager instance", "id": "f17215:m5"} {"signature": "def draw_idle(self):", "body": "d = self._idleself._idle = Falsedef idle_draw(*args):self.draw()self._idle = Trueif d:self._tkcanvas.after_idle(idle_draw)", "docstring": "update drawing area only if idle", "id": "f17215:c0:m4"} {"signature": "def get_tk_widget(self):", "body": "return self._tkcanvas", "docstring": "returns the Tk widget used to implement FigureCanvasTkAgg.\n Although the initial implementation uses a Tk canvas, this routine\n is intended to hide that fact.", "id": "f17215:c0:m5"} {"signature": "def scroll_event_windows(self, event):", "body": "w = event.widget.winfo_containing(event.x_root, event.y_root)if w == self._tkcanvas:x = event.x_root - w.winfo_rootx()y = event.y_root - w.winfo_rooty()y = self.figure.bbox.height - ystep = event.delta/FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)", "docstring": "MouseWheel event processor", "id": "f17215:c0:m10"} {"signature": "def show(self):", "body": "def destroy(*args):self.window = NoneGcf.destroy(self._num)if not self._shown:self.canvas._tkcanvas.bind(\"\", destroy)_focus = windowing.FocusManager()if not self._shown:self.window.deiconify()if sys.platform == '':self.window.update()else:self.canvas.draw()self._shown = True", "docstring": "this function doesn't segfault but causes the\nPyEval_RestoreThread: NULL state bug on win32", "id": "f17215:c1:m2"} {"signature": "def dynamic_update(self):", "body": "self.canvas.draw_idle()", "docstring": "update drawing area only if idle", "id": "f17215:c4:m12"} {"signature": "def draw_if_interactive():", "body": "pass", "docstring": "For image backends - is not required\nFor GUI backends - this should be overriden if drawing should be done in\ninteractive python mode", "id": "f17216:m0"} {"signature": "def show():", "body": "for manager in Gcf.get_all_fig_managers():pass", "docstring": "For image backends - is not required\nFor GUI backends - show() is usually the last line of a pylab script and\ntells the backend that it is time to draw. In interactive mode, this may\nbe a do nothing func. See the GTK backend for an example of how to handle\ninteractive versus batch mode", "id": "f17216:m1"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "FigureClass = kwargs.pop('', Figure)thisFig = FigureClass(*args, **kwargs)canvas = FigureCanvasTemplate(thisFig)manager = FigureManagerTemplate(canvas, num)return manager", "docstring": "Create a new figure manager instance", "id": "f17216:m2"} {"signature": "def draw(self):", "body": "renderer = RendererTemplate(self.figure.dpi)self.figure.draw(renderer)", "docstring": "Draw the figure using the renderer", "id": "f17216:c2:m0"} {"signature": "def print_foo(self, filename, *args, **kwargs):", "body": "pass", "docstring": "Write out format foo. The dpi, facecolor and edgecolor are restored\nto their original values after this call, so you don't need to\nsave and restore them.", "id": "f17216:c2:m1"} {"signature": "def draw_if_interactive():", "body": "if matplotlib.is_interactive():figManager = Gcf.get_active()if figManager != None:figManager.canvas.draw()", "docstring": "Is called after every pylab drawing command", "id": "f17217:m1"} {"signature": "def _create_qApp():", "body": "if qt.QApplication.startingUp():if DEBUG: print(\"\")global qAppqApp = qt.QApplication( [\"\"] )qt.QObject.connect( qApp, qt.SIGNAL( \"\" ),qApp, qt.SLOT( \"\" ) )_create_qApp.qAppCreatedHere = True", "docstring": "Only one qApp can exist at a time, so check before creating one", "id": "f17217:m2"} {"signature": "def show():", "body": "for manager in Gcf.get_all_fig_managers():manager.window.show()if DEBUG: print('')figManager = Gcf.get_active()if figManager != None:figManager.canvas.draw()if _create_qApp.qAppCreatedHere:qt.qApp.exec_loop()", "docstring": "Show all the figures and enter the qt main loop\nThis should be the last line of your script", "id": "f17217:m3"} {"signature": "def new_figure_manager( num, *args, **kwargs ):", "body": "FigureClass = kwargs.pop('', Figure)thisFig = FigureClass( *args, **kwargs )canvas = FigureCanvasQT( thisFig )manager = FigureManagerQT( canvas, num )return manager", "docstring": "Create a new figure manager instance", "id": "f17217:m4"} {"signature": "def exception_handler( type, value, tb ):", "body": "msg = ''if hasattr(value, '') and value.filename != None:msg = value.filename + ''if hasattr(value, '') and value.strerror != None:msg += value.strerrorelse:msg += str(value)if len( msg ) : error_msg_qt( msg )", "docstring": "Handle uncaught exceptions\n It does not catch SystemExit", "id": "f17217:m6"} {"signature": "def resize(self, width, height):", "body": "self.window.resize(width, height)", "docstring": "set the canvas size in pixels", "id": "f17217:c1:m4"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "if _debug: print('' % fn_name())FigureClass = kwargs.pop('', Figure)thisFig = FigureClass(*args, **kwargs)canvas = FigureCanvasGTKCairo(thisFig)return FigureManagerGTK(canvas, num)", "docstring": "Create a new figure manager instance", "id": "f17218:m0"} {"signature": "def _renderer_init(self):", "body": "if _debug: print('' % (self.__class__.__name__, _fn_name()))self._renderer = RendererGTKCairo (self.figure.dpi)", "docstring": "Override to use cairo (rather than GDK) renderer", "id": "f17218:c1:m0"} {"signature": "def __init__(self, width, height, dpi, vector_renderer, raster_renderer_class=None):", "body": "if raster_renderer_class is None:raster_renderer_class = RendererAggself._raster_renderer_class = raster_renderer_classself._width = widthself._height = heightself.dpi = dpiassert not vector_renderer.option_image_nocomposite()self._vector_renderer = vector_rendererself._raster_renderer = Noneself._rasterizing = self._set_current_renderer(vector_renderer)", "docstring": "width: The width of the canvas in logical units\n\nheight: The height of the canvas in logical units\n\ndpi: The dpi of the canvas\n\nvector_renderer: An instance of a subclass of RendererBase\nthat will be used for the vector drawing.\n\nraster_renderer_class: The renderer class to use for the\nraster drawing. If not provided, this will use the Agg\nbackend (which is currently the only viable option anyway.)", "id": "f17219:c0:m0"} {"signature": "def start_rasterizing(self):", "body": "if self._rasterizing == :self._raster_renderer = self._raster_renderer_class(self._width*self.dpi, self._height*self.dpi, self.dpi)self._set_current_renderer(self._raster_renderer)self._rasterizing += ", "docstring": "Enter \"raster\" mode. All subsequent drawing commands (until\nstop_rasterizing is called) will be drawn with the raster\nbackend.\n\nIf start_rasterizing is called multiple times before\nstop_rasterizing is called, this method has no effect.", "id": "f17219:c0:m2"} {"signature": "def stop_rasterizing(self):", "body": "self._rasterizing -= if self._rasterizing == :self._set_current_renderer(self._vector_renderer)width, height = self._width * self.dpi, self._height * self.dpibuffer, bounds = self._raster_renderer.tostring_rgba_minimized()l, b, w, h = boundsif w > and h > :image = frombuffer(buffer, w, h, True)image.is_grayscale = Falseimage.flipud_out()self._renderer.draw_image(l, height - b - h, image, None)self._raster_renderer = Noneself._rasterizing = False", "docstring": "Exit \"raster\" mode. All of the drawing that was done since\nthe last start_rasterizing command will be copied to the\nvector backend by calling draw_image.\n\nIf stop_rasterizing is called multiple times before\nstart_rasterizing is called, this method has no effect.", "id": "f17219:c0:m3"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "DEBUG_MSG(\"\", , None)backend_wx._create_wx_app()FigureClass = kwargs.pop('', Figure)fig = FigureClass(*args, **kwargs)frame = FigureFrameWxAgg(num, fig)figmgr = frame.get_figure_manager()if matplotlib.is_interactive():figmgr.frame.Show()return figmgr", "docstring": "Create a new figure manager instance", "id": "f17220:m0"} {"signature": "def _py_convert_agg_to_wx_image(agg, bbox):", "body": "image = wx.EmptyImage(int(agg.width), int(agg.height))image.SetData(agg.tostring_rgb())if bbox is None:return imageelse:return wx.ImageFromBitmap(_clipped_image_as_bitmap(image, bbox))", "docstring": "Convert the region of the agg buffer bounded by bbox to a wx.Image. If\nbbox is None, the entire buffer is converted.\n\nNote: agg must be a backend_agg.RendererAgg instance.", "id": "f17220:m1"} {"signature": "def _py_convert_agg_to_wx_bitmap(agg, bbox):", "body": "if bbox is None:return wx.BitmapFromImage(_py_convert_agg_to_wx_image(agg, None))else:return _clipped_image_as_bitmap(_py_convert_agg_to_wx_image(agg, None),bbox)", "docstring": "Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If\nbbox is None, the entire buffer is converted.\n\nNote: agg must be a backend_agg.RendererAgg instance.", "id": "f17220:m2"} {"signature": "def _clipped_image_as_bitmap(image, bbox):", "body": "l, b, width, height = bbox.get_bounds()r = l + widtht = b + heightsrcBmp = wx.BitmapFromImage(image)srcDC = wx.MemoryDC()srcDC.SelectObject(srcBmp)destBmp = wx.EmptyBitmap(int(width), int(height))destDC = wx.MemoryDC()destDC.SelectObject(destBmp)destDC.BeginDrawing()x = int(l)y = int(image.GetHeight() - t)destDC.Blit(, , int(width), int(height), srcDC, x, y)destDC.EndDrawing()srcDC.SelectObject(wx.NullBitmap)destDC.SelectObject(wx.NullBitmap)return destBmp", "docstring": "Convert the region of a wx.Image bounded by bbox to a wx.Bitmap.", "id": "f17220:m3"} {"signature": "def _py_WX28_convert_agg_to_wx_image(agg, bbox):", "body": "if bbox is None:image = wx.EmptyImage(int(agg.width), int(agg.height))image.SetData(agg.tostring_rgb())return imageelse:return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))", "docstring": "Convert the region of the agg buffer bounded by bbox to a wx.Image. If\nbbox is None, the entire buffer is converted.\n\nNote: agg must be a backend_agg.RendererAgg instance.", "id": "f17220:m4"} {"signature": "def _py_WX28_convert_agg_to_wx_bitmap(agg, bbox):", "body": "if bbox is None:return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),agg.buffer_rgba(, ))else:return _WX28_clipped_agg_as_bitmap(agg, bbox)", "docstring": "Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If\nbbox is None, the entire buffer is converted.\n\nNote: agg must be a backend_agg.RendererAgg instance.", "id": "f17220:m5"} {"signature": "def _WX28_clipped_agg_as_bitmap(agg, bbox):", "body": "l, b, width, height = bbox.get_bounds()r = l + widtht = b + heightsrcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),agg.buffer_rgba(, ))srcDC = wx.MemoryDC()srcDC.SelectObject(srcBmp)destBmp = wx.EmptyBitmap(int(width), int(height))destDC = wx.MemoryDC()destDC.SelectObject(destBmp)destDC.BeginDrawing()x = int(l)y = int(int(agg.height) - t)destDC.Blit(, , int(width), int(height), srcDC, x, y)destDC.EndDrawing()srcDC.SelectObject(wx.NullBitmap)destDC.SelectObject(wx.NullBitmap)return destBmp", "docstring": "Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.\n\nNote: agg must be a backend_agg.RendererAgg instance.", "id": "f17220:m6"} {"signature": "def _use_accelerator(state):", "body": "global _convert_agg_to_wx_imageglobal _convert_agg_to_wx_bitmapif getattr(wx, '', '')[:] < '':if state and _wxagg is not None:_convert_agg_to_wx_image = _wxagg.convert_agg_to_wx_image_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmapelse:_convert_agg_to_wx_image = _py_convert_agg_to_wx_image_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmapelse:_convert_agg_to_wx_image = _py_WX28_convert_agg_to_wx_image_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap", "docstring": "Enable or disable the WXAgg accelerator, if it is present and is also\ncompatible with whatever version of wxPython is in use.", "id": "f17220:m7"} {"signature": "def draw(self, drawDC=None):", "body": "DEBUG_MSG(\"\", , self)FigureCanvasAgg.draw(self)self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)self._isDrawn = Trueself.gui_repaint(drawDC=drawDC)", "docstring": "Render the figure using agg.", "id": "f17220:c1:m0"} {"signature": "def blit(self, bbox=None):", "body": "if bbox is None:self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)self.gui_repaint()returnl, b, w, h = bbox.boundsr = l + wt = b + hx = int(l)y = int(self.bitmap.GetHeight() - t)srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)srcDC = wx.MemoryDC()srcDC.SelectObject(srcBmp)destDC = wx.MemoryDC()destDC.SelectObject(self.bitmap)destDC.BeginDrawing()destDC.Blit(x, y, int(w), int(h), srcDC, x, y)destDC.EndDrawing()destDC.SelectObject(wx.NullBitmap)srcDC.SelectObject(wx.NullBitmap)self.gui_repaint()", "docstring": "Transfer the region of the agg buffer defined by bbox to the display.\nIf bbox is None, the entire buffer is transferred.", "id": "f17220:c1:m1"} {"signature": "def ishow():", "body": "for manager in Gcf.get_all_fig_managers():manager.show()if show._needmain:_thread.start_new_thread(Fltk_run_interactive,())show._needmain = False", "docstring": "Show all the figures and enter the fltk mainloop in another thread\nThis allows to keep hand in interractive python session\nWarning: does not work under windows\nThis should be the last line of your script", "id": "f17221:m3"} {"signature": "def show():", "body": "for manager in Gcf.get_all_fig_managers():manager.show()if show._needmain:Fltk.Fl.run()show._needmain = False", "docstring": "Show all the figures and enter the fltk mainloop\n\nThis should be the last line of your script", "id": "f17221:m4"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "FigureClass = kwargs.pop('', Figure)figure = FigureClass(*args, **kwargs)window = Fltk.Fl_Double_Window(,,,)canvas = FigureCanvasFltkAgg(figure)window.end()window.show()window.make_current()figManager = FigureManagerFltkAgg(canvas, num, window)if matplotlib.is_interactive():figManager.show()return figManager", "docstring": "Create a new figure manager instance", "id": "f17221:m5"} {"signature": "def new_figure_manager( num, *args, **kwargs ):", "body": "if DEBUG: print('')FigureClass = kwargs.pop('', Figure)thisFig = FigureClass( *args, **kwargs )canvas = FigureCanvasQTAgg( thisFig )return FigureManagerQTAgg( canvas, num )", "docstring": "Create a new figure manager instance", "id": "f17222:m0"} {"signature": "def paintEvent( self, e ):", "body": "FigureCanvasQT.paintEvent( self, e )if DEBUG: print('', self,self.get_width_height())p = qt.QPainter( self )if type(self.replot) is bool: if self.replot:FigureCanvasAgg.draw( self )if ( qt.QImage.systemByteOrder() == qt.QImage.LittleEndian ):stringBuffer = self.renderer._renderer.tostring_bgra()else:stringBuffer = self.renderer._renderer.tostring_argb()qImage = qt.QImage( stringBuffer, self.renderer.width,self.renderer.height, , None, ,qt.QImage.IgnoreEndian )self.pixmap.convertFromImage( qImage, qt.QPixmap.Color )p.drawPixmap( qt.QPoint( , ), self.pixmap )if ( self.drawRect ):p.setPen( qt.QPen( qt.Qt.black, , qt.Qt.DotLine ) )p.drawRect( self.rect[], self.rect[], self.rect[], self.rect[] )else:bbox = self.replotl, b, r, t = bbox.extentsw = int(r) - int(l)h = int(t) - int(b)reg = self.copy_from_bbox(bbox)stringBuffer = reg.to_string_argb()qImage = qt.QImage(stringBuffer, w, h, , None, , qt.QImage.IgnoreEndian)self.pixmap.convertFromImage(qImage, qt.QPixmap.Color)p.drawPixmap(qt.QPoint(l, self.renderer.height-t), self.pixmap)p.end()self.replot = Falseself.drawRect = False", "docstring": "Draw to the Agg backend and then copy the image to the qt.drawable.\nIn Qt, all drawing should be done inside of here when a widget is\nshown onscreen.", "id": "f17222:c2:m3"} {"signature": "def draw( self ):", "body": "if DEBUG: print(\"\", self)self.replot = TrueFigureCanvasAgg.draw(self)self.repaint(False)", "docstring": "Draw the figure when xwindows is ready for the update", "id": "f17222:c2:m4"} {"signature": "def blit(self, bbox=None):", "body": "self.replot = bboxself.repaint(False)", "docstring": "Blit the region in bbox", "id": "f17222:c2:m5"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "FigureClass = kwargs.pop('', Figure)thisFig = FigureClass(*args, **kwargs)canvas = FigureCanvasGDK(thisFig)manager = FigureManagerBase(canvas, num)return manager", "docstring": "Create a new figure manager instance", "id": "f17223:m1"} {"signature": "def set_width_height (self, width, height):", "body": "self.width, self.height = width, height", "docstring": "w,h is the figure w,h not the pixmap w,h", "id": "f17223:c0:m2"} {"signature": "def _draw_rotated_text(self, gc, x, y, s, prop, angle):", "body": "gdrawable = self.gdkDrawableggc = gc.gdkGClayout, inkRect, logicalRect = self._get_pango_layout(s, prop)l, b, w, h = inkRectx = int(x-h)y = int(y-w)if x < or y < : returnkey = (x,y,s,angle,hash(prop))imageVert = self.rotated.get(key)if imageVert != None:gdrawable.draw_image(ggc, imageVert, , , x, y, h, w)returnimageBack = gdrawable.get_image(x, y, w, h)imageVert = gdrawable.get_image(x, y, h, w)imageFlip = gtk.gdk.Image(type=gdk.IMAGE_FASTEST,visual=gdrawable.get_visual(),width=w, height=h)if imageFlip == None or imageBack == None or imageVert == None:warnings.warn(\"\")returnimageFlip.set_colormap(self._cmap)for i in range(w):for j in range(h):imageFlip.put_pixel(i, j, imageVert.get_pixel(j,w-i-) )gdrawable.draw_image(ggc, imageFlip, , , x, y, w, h)gdrawable.draw_layout(ggc, x, y-b, layout)imageIn = gdrawable.get_image(x, y, w, h)for i in range(w):for j in range(h):imageVert.put_pixel(j, i, imageIn.get_pixel(w-i-,j) )gdrawable.draw_image(ggc, imageBack, , , x, y, w, h)gdrawable.draw_image(ggc, imageVert, , , x, y, h, w)self.rotated[key] = imageVert", "docstring": "Draw the text rotated 90 degrees, other angles are not supported", "id": "f17223:c0:m7"} {"signature": "def _get_pango_layout(self, s, prop):", "body": "key = self.dpi, s, hash(prop)value = self.layoutd.get(key)if value != None:return valuesize = prop.get_size_in_points() * self.dpi / size = round(size)font_str = '' % (prop.get_name(), prop.get_style(), size,)font = pango.FontDescription(font_str)font.set_weight(self.fontweights[prop.get_weight()])layout = self.gtkDA.create_pango_layout(s)layout.set_font_description(font)inkRect, logicalRect = layout.get_pixel_extents()self.layoutd[key] = layout, inkRect, logicalRectreturn layout, inkRect, logicalRect", "docstring": "Create a pango layout instance for Text 's' with properties 'prop'.\nReturn - pango layout (from cache if already exists)\n\nNote that pango assumes a logical DPI of 96\nRef: pango/fonts.c/pango_font_description_set_size() manual page", "id": "f17223:c0:m8"} {"signature": "def rgb_to_gdk_color(self, rgb):", "body": "try:return self._cached[tuple(rgb)]except KeyError:color = self._cached[tuple(rgb)] =self._cmap.alloc_color(int(rgb[]*),int(rgb[]*),int(rgb[]*))return color", "docstring": "rgb - an RGB tuple (three 0.0-1.0 values)\nreturn an allocated gtk.gdk.Color", "id": "f17223:c1:m1"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "if DEBUG: print('')FigureClass = kwargs.pop('', Figure)thisFig = FigureClass(*args, **kwargs)canvas = FigureCanvasGTKAgg(thisFig)return FigureManagerGTKAgg(canvas, num)if DEBUG: print('')", "docstring": "Create a new figure manager instance", "id": "f17224:m0"} {"signature": "def pylab_setup():", "body": "if backend.startswith(''):backend_name = backend[:]else:backend_name = ''+backendbackend_name = backend_name.lower() backend_name = ''%backend_name.lower()backend_mod = __import__(backend_name,globals(),locals(),[backend_name])new_figure_manager = backend_mod.new_figure_managerdef do_nothing_show(*args, **kwargs):frame = inspect.currentframe()fname = frame.f_back.f_code.co_filenameif fname in ('', ''):warnings.warn(\"\"\"\"\"\" %(backend, matplotlib.matplotlib_fname()))def do_nothing(*args, **kwargs): passbackend_version = getattr(backend_mod,'', '')show = getattr(backend_mod, '', do_nothing_show)draw_if_interactive = getattr(backend_mod, '', do_nothing)if backend.lower() in ['','']:Toolbar = backend_mod.Toolbar__all__.append('')matplotlib.verbose.report('' % (backend,backend_version))return new_figure_manager, draw_if_interactive, show", "docstring": "return new_figure_manager, draw_if_interactive and show for pylab", "id": "f17225:m0"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "if __debug__: verbose.report('','')FigureClass = kwargs.pop('', Figure)thisFig = FigureClass(*args, **kwargs)canvas = FigureCanvasAgg(thisFig)manager = FigureManagerBase(canvas, num)return manager", "docstring": "Create a new figure manager instance", "id": "f17226:m0"} {"signature": "def draw_mathtext(self, gc, x, y, s, prop, angle):", "body": "if __debug__: verbose.report('','')ox, oy, width, height, descent, font_image, used_characters =self.mathtext_parser.parse(s, self.dpi, prop)x = int(x) + oxy = int(y) - oyself._renderer.draw_text_image(font_image, x, y + , angle, gc)", "docstring": "Draw the math text using matplotlib.mathtext", "id": "f17226:c0:m2"} {"signature": "def draw_text(self, gc, x, y, s, prop, angle, ismath):", "body": "if __debug__: verbose.report('', '')if ismath:return self.draw_mathtext(gc, x, y, s, prop, angle)font = self._get_agg_font(prop)if font is None: return Noneif len(s) == and ord(s) > :font.load_char(ord(s), flags=LOAD_FORCE_AUTOHINT)else:font.set_text(s, , flags=LOAD_FORCE_AUTOHINT)font.draw_glyphs_to_bitmap()self._renderer.draw_text_image(font.get_image(), int(x), int(y) + , angle, gc)", "docstring": "Render the text", "id": "f17226:c0:m3"} {"signature": "def get_text_width_height_descent(self, s, prop, ismath):", "body": "if ismath=='':size = prop.get_size_in_points()texmanager = self.get_texmanager()Z = texmanager.get_grey(s, size, self.dpi)m,n = Z.shapereturn n, m, if ismath:ox, oy, width, height, descent, fonts, used_characters =self.mathtext_parser.parse(s, self.dpi, prop)return width, height, descentfont = self._get_agg_font(prop)font.set_text(s, , flags=LOAD_FORCE_AUTOHINT) w, h = font.get_width_height()d = font.get_descent()w /= h /= d /= return w, h, d", "docstring": "get the width and height in display coords of the string s\nwith FontPropertry prop\n\n# passing rgb is a little hack to make cacheing in the\n# texmanager more efficient. It is not meant to be used\n# outside the backend", "id": "f17226:c0:m4"} {"signature": "def get_canvas_width_height(self):", "body": "return self.width, self.height", "docstring": "return the canvas width and height in display coords", "id": "f17226:c0:m6"} {"signature": "def _get_agg_font(self, prop):", "body": "if __debug__: verbose.report('','')key = hash(prop)font = self._fontd.get(key)if font is None:fname = findfont(prop)font = self._fontd.get(fname)if font is None:font = FT2Font(str(fname))self._fontd[fname] = fontself._fontd[key] = fontfont.clear()size = prop.get_size_in_points()font.set_size(size, self.dpi)return font", "docstring": "Get the font for text instance t, cacheing for efficiency", "id": "f17226:c0:m7"} {"signature": "def points_to_pixels(self, points):", "body": "if __debug__: verbose.report('','')return points*self.dpi/", "docstring": "convert point measures to pixes using dpi and the pixels per\ninch of the display", "id": "f17226:c0:m8"} {"signature": "def draw(self):", "body": "if __debug__: verbose.report('', '')self.renderer = self.get_renderer()self.figure.draw(self.renderer)", "docstring": "Draw the figure using the renderer", "id": "f17226:c1:m2"} {"signature": "def debug_on_error(type, value, tb):", "body": "traceback.print_exc(type, value, tb)print()pdb.pm()", "docstring": "Code due to Thomas Heller - published in Python Cookbook (O'Reilley)", "id": "f17227:m1"} {"signature": "def error_msg_wx(msg, parent=None):", "body": "dialog =wx.MessageDialog(parent = parent,message = msg,caption = '',style=wx.OK | wx.CENTRE)dialog.ShowModal()dialog.Destroy()return None", "docstring": "Signal an error condition -- in a GUI, popup a error dialog", "id": "f17227:m2"} {"signature": "def raise_msg_to_str(msg):", "body": "if not is_string_like(msg):msg = ''.join(map(str, msg))return msg", "docstring": "msg is a return arg from a raise. Join with new lines", "id": "f17227:m3"} {"signature": "def _create_wx_app():", "body": "wxapp = wx.GetApp()if wxapp is None:wxapp = wx.PySimpleApp()wxapp.SetExitOnFrameDelete(True)_create_wx_app.theWxApp = wxapp", "docstring": "Creates a wx.PySimpleApp instance if a wx.App has not been created.", "id": "f17227:m4"} {"signature": "def draw_if_interactive():", "body": "DEBUG_MSG(\"\", , None)if matplotlib.is_interactive():figManager = Gcf.get_active()if figManager is not None:figManager.canvas.draw()", "docstring": "This should be overriden in a windowing environment if drawing\nshould be done in interactive python mode", "id": "f17227:m5"} {"signature": "def show():", "body": "DEBUG_MSG(\"\", , None)for figwin in Gcf.get_all_fig_managers():figwin.frame.Show()if show._needmain and not matplotlib.is_interactive():wxapp = wx.GetApp()if wxapp is not None:imlr = getattr(wxapp, '', lambda: False)if not imlr():wxapp.MainLoop()show._needmain = False", "docstring": "Current implementation assumes that matplotlib is executed in a PyCrust\nshell. It appears to be possible to execute wxPython applications from\nwithin a PyCrust without having to ensure that wxPython has been created\nin a secondary thread (e.g. SciPy gui_thread).\n\nUnfortunately, gui_thread seems to introduce a number of further\ndependencies on SciPy modules, which I do not wish to introduce\ninto the backend at this point. If there is a need I will look\ninto this in a later release.", "id": "f17227:m6"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "DEBUG_MSG(\"\", , None)_create_wx_app()FigureClass = kwargs.pop('', Figure)fig = FigureClass(*args, **kwargs)frame = FigureFrameWx(num, fig)figmgr = frame.get_figure_manager()if matplotlib.is_interactive():figmgr.frame.Show()return figmgr", "docstring": "Create a new figure manager instance", "id": "f17227:m7"} {"signature": "def _load_bitmap(filename):", "body": "basedir = os.path.join(rcParams[''],'')bmpFilename = os.path.normpath(os.path.join(basedir, filename))if not os.path.exists(bmpFilename):raise IOError(''%bmpFilename)bmp = wx.Bitmap(bmpFilename)return bmp", "docstring": "Load a bitmap file from the backends/images subdirectory in which the\nmatplotlib library is installed. The filename parameter should not\ncontain any path information as this is determined automatically.\n\nReturns a wx.Bitmap object", "id": "f17227:m8"} {"signature": "def __init__(self, bitmap, dpi):", "body": "DEBUG_MSG(\"\", , self)if wx.VERSION_STRING < \"\":raise RuntimeError(\"\")self.width = bitmap.GetWidth()self.height = bitmap.GetHeight()self.bitmap = bitmapself.fontd = {}self.dpi = dpiself.gc = None", "docstring": "Initialise a wxWindows renderer instance.", "id": "f17227:c1:m0"} {"signature": "def get_text_width_height_descent(self, s, prop, ismath):", "body": "if ismath: s = self.strip_math(s)if self.gc is None:gc = self.new_gc()else:gc = self.gcgfx_ctx = gc.gfx_ctxfont = self.get_wx_font(s, prop)gfx_ctx.SetFont(font, wx.BLACK)w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)return w, h, descent", "docstring": "get the width and height in display coords of the string s\nwith FontPropertry prop", "id": "f17227:c1:m3"} {"signature": "def get_canvas_width_height(self):", "body": "return self.width, self.height", "docstring": "return the canvas width and height in display coords", "id": "f17227:c1:m4"} {"signature": "def draw_text(self, gc, x, y, s, prop, angle, ismath):", "body": "if ismath: s = self.strip_math(s)DEBUG_MSG(\"\", , self)gc.select()self.handle_clip_rectangle(gc)gfx_ctx = gc.gfx_ctxfont = self.get_wx_font(s, prop)color = gc.get_wxcolour(gc.get_rgb())gfx_ctx.SetFont(font, color)w, h, d = self.get_text_width_height_descent(s, prop, ismath)x = int(x)y = int(y-h)if angle == :gfx_ctx.DrawText(s, x, y)else:rads = angle / * math.pixo = h * math.sin(rads)yo = h * math.cos(rads)gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)gc.unselect()", "docstring": "Render the matplotlib.text.Text instance\nNone)", "id": "f17227:c1:m9"} {"signature": "def new_gc(self):", "body": "DEBUG_MSG('', , self)self.gc = GraphicsContextWx(self.bitmap, self)self.gc.select()self.gc.unselect()return self.gc", "docstring": "Return an instance of a GraphicsContextWx, and sets the current gc copy", "id": "f17227:c1:m10"} {"signature": "def get_gc(self):", "body": "assert self.gc != None, \"\"return self.gc", "docstring": "Fetch the locally cached gc.", "id": "f17227:c1:m11"} {"signature": "def get_wx_font(self, s, prop):", "body": "DEBUG_MSG(\"\", , self)key = hash(prop)fontprop = propfontname = fontprop.get_name()font = self.fontd.get(key)if font is not None:return fontwxFontname = self.fontnames.get(fontname, wx.ROMAN)wxFacename = '' size = self.points_to_pixels(fontprop.get_size_in_points())font =wx.Font(int(size+), wxFontname, self.fontangles[fontprop.get_style()], self.fontweights[fontprop.get_weight()], False, wxFacename) self.fontd[key] = fontreturn font", "docstring": "Return a wx font. Cache instances in a font dictionary for\nefficiency", "id": "f17227:c1:m12"} {"signature": "def points_to_pixels(self, points):", "body": "return points*(PIXELS_PER_INCH/*self.dpi/)", "docstring": "convert point measures to pixes using dpi and the pixels per\ninch of the display", "id": "f17227:c1:m13"} {"signature": "def select(self):", "body": "if sys.platform=='':self.dc.SelectObject(self.bitmap)self.IsSelected = True", "docstring": "Select the current bitmap into this wxDC instance", "id": "f17227:c2:m1"} {"signature": "def unselect(self):", "body": "if sys.platform=='':self.dc.SelectObject(wx.NullBitmap)self.IsSelected = False", "docstring": "Select a Null bitmasp into this wxDC instance", "id": "f17227:c2:m2"} {"signature": "def set_foreground(self, fg, isRGB=None):", "body": "DEBUG_MSG(\"\", , self)self.select()GraphicsContextBase.set_foreground(self, fg, isRGB)self._pen.SetColour(self.get_wxcolour(self.get_rgb()))self.gfx_ctx.SetPen(self._pen)self.unselect()", "docstring": "Set the foreground color. fg can be a matlab format string, a\nhtml hex color string, an rgb unit tuple, or a float between 0\nand 1. In the latter case, grayscale is used.", "id": "f17227:c2:m3"} {"signature": "def set_graylevel(self, frac):", "body": "DEBUG_MSG(\"\", , self)self.select()GraphicsContextBase.set_graylevel(self, frac)self._pen.SetColour(self.get_wxcolour(self.get_rgb()))self.gfx_ctx.SetPen(self._pen)self.unselect()", "docstring": "Set the foreground color. fg can be a matlab format string, a\nhtml hex color string, an rgb unit tuple, or a float between 0\nand 1. In the latter case, grayscale is used.", "id": "f17227:c2:m4"} {"signature": "def set_linewidth(self, w):", "body": "DEBUG_MSG(\"\", , self)self.select()if w> and w<: w = GraphicsContextBase.set_linewidth(self, w)lw = int(self.renderer.points_to_pixels(self._linewidth))if lw==: lw = self._pen.SetWidth(lw)self.gfx_ctx.SetPen(self._pen)self.unselect()", "docstring": "Set the line width.", "id": "f17227:c2:m5"} {"signature": "def set_capstyle(self, cs):", "body": "DEBUG_MSG(\"\", , self)self.select()GraphicsContextBase.set_capstyle(self, cs)self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])self.gfx_ctx.SetPen(self._pen)self.unselect()", "docstring": "Set the capstyle as a string in ('butt', 'round', 'projecting')", "id": "f17227:c2:m6"} {"signature": "def set_joinstyle(self, js):", "body": "DEBUG_MSG(\"\", , self)self.select()GraphicsContextBase.set_joinstyle(self, js)self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])self.gfx_ctx.SetPen(self._pen)self.unselect()", "docstring": "Set the join style to be one of ('miter', 'round', 'bevel')", "id": "f17227:c2:m7"} {"signature": "def set_linestyle(self, ls):", "body": "DEBUG_MSG(\"\", , self)self.select()GraphicsContextBase.set_linestyle(self, ls)try:self._style = GraphicsContextWx._dashd_wx[ls]except KeyError:self._style = wx.LONG_DASHif wx.Platform == '':self.set_linewidth()self._pen.SetStyle(self._style)self.gfx_ctx.SetPen(self._pen)self.unselect()", "docstring": "Set the line style to be one of", "id": "f17227:c2:m8"} {"signature": "def get_wxcolour(self, color):", "body": "DEBUG_MSG(\"\", , self)if len(color) == :r, g, b = colorr *= g *= b *= return wx.Colour(red=int(r), green=int(g), blue=int(b))else:r, g, b, a = colorr *= g *= b *= a *= return wx.Colour(red=int(r), green=int(g), blue=int(b), alpha=int(a))", "docstring": "return a wx.Colour from RGB format", "id": "f17227:c2:m9"} {"signature": "def __init__(self, parent, id, figure):", "body": "FigureCanvasBase.__init__(self, figure)l,b,w,h = figure.bbox.boundsw = int(math.ceil(w))h = int(math.ceil(h))wx.Panel.__init__(self, parent, id, size=wx.Size(w, h))def do_nothing(*args, **kwargs):warnings.warn(''%backend_version)passtry:getattr(self, '')except AttributeError:self.SetInitialSize = getattr(self, '', do_nothing)if not hasattr(self,''):self.IsShownOnScreen = getattr(self, '', lambda *args: True)self.bitmap =wx.EmptyBitmap(w, h)DEBUG_MSG(\"\" % (w,h), , self)self._isDrawn = Falsebind(self, wx.EVT_SIZE, self._onSize)bind(self, wx.EVT_PAINT, self._onPaint)bind(self, wx.EVT_ERASE_BACKGROUND, self._onEraseBackground)bind(self, wx.EVT_KEY_DOWN, self._onKeyDown)bind(self, wx.EVT_KEY_UP, self._onKeyUp)bind(self, wx.EVT_RIGHT_DOWN, self._onRightButtonDown)bind(self, wx.EVT_RIGHT_DCLICK, self._onRightButtonDown)bind(self, wx.EVT_RIGHT_UP, self._onRightButtonUp)bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel)bind(self, wx.EVT_LEFT_DOWN, self._onLeftButtonDown)bind(self, wx.EVT_LEFT_DCLICK, self._onLeftButtonDown)bind(self, wx.EVT_LEFT_UP, self._onLeftButtonUp)bind(self, wx.EVT_MOTION, self._onMotion)bind(self, wx.EVT_LEAVE_WINDOW, self._onLeave)bind(self, wx.EVT_ENTER_WINDOW, self._onEnter)bind(self, wx.EVT_IDLE, self._onIdle)self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)self.macros = {} self.Printer_Init()", "docstring": "Initialise a FigureWx instance.\n\n- Initialise the FigureCanvasBase and wxPanel parents.\n- Set event handlers for:\n EVT_SIZE (Resize event)\n EVT_PAINT (Paint event)", "id": "f17227:c3:m0"} {"signature": "def Copy_to_Clipboard(self, event=None):", "body": "bmp_obj = wx.BitmapDataObject()bmp_obj.SetBitmap(self.bitmap)wx.TheClipboard.Open()wx.TheClipboard.SetData(bmp_obj)wx.TheClipboard.Close()", "docstring": "copy bitmap of canvas to system clipboard", "id": "f17227:c3:m2"} {"signature": "def Printer_Init(self):", "body": "self.printerData = wx.PrintData()self.printerData.SetPaperId(wx.PAPER_LETTER)self.printerData.SetPrintMode(wx.PRINT_MODE_PRINTER)self.printerPageData= wx.PageSetupDialogData()self.printerPageData.SetMarginBottomRight((,))self.printerPageData.SetMarginTopLeft((,))self.printerPageData.SetPrintData(self.printerData)self.printer_width = self.printer_margin= ", "docstring": "initialize printer settings using wx methods", "id": "f17227:c3:m3"} {"signature": "def Printer_Setup(self, event=None):", "body": "dmsg = \"\"\"\"\"\"dlg = wx.Dialog(self, -, '' , (-,-))df = dlg.GetFont()df.SetWeight(wx.NORMAL)df.SetPointSize()dlg.SetFont(df)x_wid = wx.TextCtrl(dlg,-,value=\"\" % self.printer_width, size=(,-))x_mrg = wx.TextCtrl(dlg,-,value=\"\" % self.printer_margin,size=(,-))sizerAll = wx.BoxSizer(wx.VERTICAL)sizerAll.Add(wx.StaticText(dlg,-,dmsg),, wx.ALL | wx.EXPAND, )sizer = wx.FlexGridSizer(,)sizerAll.Add(sizer, , wx.ALL | wx.EXPAND, )sizer.Add(wx.StaticText(dlg,-,''),, wx.ALIGN_LEFT|wx.ALL, )sizer.Add(x_wid,, wx.ALIGN_LEFT|wx.ALL, )sizer.Add(wx.StaticText(dlg,-,''),, wx.ALIGN_LEFT|wx.ALL, )sizer.Add(wx.StaticText(dlg,-,''),, wx.ALIGN_LEFT|wx.ALL, )sizer.Add(x_mrg,, wx.ALIGN_LEFT|wx.ALL, )sizer.Add(wx.StaticText(dlg,-,''),, wx.ALIGN_LEFT|wx.ALL, )btn = wx.Button(dlg,wx.ID_OK, \"\")btn.SetDefault()sizer.Add(btn, , wx.ALIGN_LEFT, )btn = wx.Button(dlg,wx.ID_CANCEL, \"\")sizer.Add(btn, , wx.ALIGN_LEFT, )dlg.SetSizer(sizerAll)dlg.SetAutoLayout(True)sizerAll.Fit(dlg)if dlg.ShowModal() == wx.ID_OK:try:self.printer_width = float(x_wid.GetValue())self.printer_margin = float(x_mrg.GetValue())except:passif ((self.printer_width + self.printer_margin) > ):self.printerData.SetOrientation(wx.LANDSCAPE)else:self.printerData.SetOrientation(wx.PORTRAIT)dlg.Destroy()return", "docstring": "set up figure for printing. The standard wx Printer\n Setup Dialog seems to die easily. Therefore, this setup\n simply asks for image width and margin for printing.", "id": "f17227:c3:m4"} {"signature": "def Printer_Setup2(self, event=None):", "body": "if hasattr(self, ''):data = wx.PageSetupDialogData()data.SetPrintData(self.printerData)else:data = wx.PageSetupDialogData()data.SetMarginTopLeft( (, ) )data.SetMarginBottomRight( (, ) )dlg = wx.PageSetupDialog(self, data)if dlg.ShowModal() == wx.ID_OK:data = dlg.GetPageSetupData()tl = data.GetMarginTopLeft()br = data.GetMarginBottomRight()self.printerData = wx.PrintData(data.GetPrintData())dlg.Destroy()", "docstring": "set up figure for printing. Using the standard wx Printer\n Setup Dialog.", "id": "f17227:c3:m5"} {"signature": "def Printer_Preview(self, event=None):", "body": "po1 = PrintoutWx(self, width=self.printer_width,margin=self.printer_margin)po2 = PrintoutWx(self, width=self.printer_width,margin=self.printer_margin)self.preview = wx.PrintPreview(po1,po2,self.printerData)if not self.preview.Ok(): print(\"\")self.preview.SetZoom()frameInst= selfwhile not isinstance(frameInst, wx.Frame):frameInst= frameInst.GetParent()frame = wx.PreviewFrame(self.preview, frameInst, \"\")frame.Initialize()frame.SetPosition(self.GetPosition())frame.SetSize((,))frame.Centre(wx.BOTH)frame.Show(True)self.gui_repaint()", "docstring": "generate Print Preview with wx Print mechanism", "id": "f17227:c3:m6"} {"signature": "def Printer_Print(self, event=None):", "body": "pdd = wx.PrintDialogData()pdd.SetPrintData(self.printerData)pdd.SetToPage()printer = wx.Printer(pdd)printout = PrintoutWx(self, width=int(self.printer_width),margin=int(self.printer_margin))print_ok = printer.Print(self, printout, True)if wx.VERSION_STRING >= '':if not print_ok and not printer.GetLastError() == wx.PRINTER_CANCELLED:wx.MessageBox(\"\"\"\"\"\",\"\", wx.OK)else:if not print_ok:wx.MessageBox(\"\"\"\"\"\",\"\", wx.OK)printout.Destroy()self.gui_repaint()", "docstring": "Print figure using wx Print mechanism", "id": "f17227:c3:m7"} {"signature": "def draw_idle(self):", "body": "DEBUG_MSG(\"\", , self)self._isDrawn = False if hasattr(self,''):self._idletimer.Restart(IDLE_DELAY)else:self._idletimer = wx.FutureCall(IDLE_DELAY,self._onDrawIdle)", "docstring": "Delay rendering until the GUI is idle.", "id": "f17227:c3:m8"} {"signature": "def draw(self, drawDC=None):", "body": "DEBUG_MSG(\"\", , self)self.renderer = RendererWx(self.bitmap, self.figure.dpi)self.figure.draw(self.renderer)self._isDrawn = Trueself.gui_repaint(drawDC=drawDC)", "docstring": "Render the figure using RendererWx instance renderer, or using a\npreviously defined renderer if none is specified.", "id": "f17227:c3:m10"} {"signature": "def start_event_loop(self, timeout=):", "body": "if hasattr(self, ''):raise RuntimeError(\"\")id = wx.NewId()timer = wx.Timer(self, id=id)if timeout > :timer.Start(timeout*, oneShot=True)bind(self, wx.EVT_TIMER, self.stop_event_loop, id=id)self._event_loop = wx.EventLoop()self._event_loop.Run()timer.Stop()", "docstring": "Start an event loop. This is used to start a blocking event\nloop so that interactive functions, such as ginput and\nwaitforbuttonpress, can wait for events. This should not be\nconfused with the main GUI event loop, which is always running\nand has nothing to do with this.\n\nCall signature::\n\nstart_event_loop(self,timeout=0)\n\nThis call blocks until a callback function triggers\nstop_event_loop() or *timeout* is reached. If *timeout* is\n<=0, never timeout.\n\nRaises RuntimeError if event loop is already running.", "id": "f17227:c3:m12"} {"signature": "def stop_event_loop(self, event=None):", "body": "if hasattr(self,''):if self._event_loop.IsRunning():self._event_loop.Exit()del self._event_loop", "docstring": "Stop an event loop. This is used to stop a blocking event\nloop so that interactive functions, such as ginput and\nwaitforbuttonpress, can wait for events.\n\nCall signature::\n\nstop_event_loop_default(self)", "id": "f17227:c3:m13"} {"signature": "def _get_imagesave_wildcards(self):", "body": "default_filetype = self.get_default_filetype()filetypes = self.get_supported_filetypes_grouped()sorted_filetypes = list(filetypes.items())sorted_filetypes.sort()wildcards = []extensions = []filter_index = for i, (name, exts) in enumerate(sorted_filetypes):ext_list = ''.join(['' % ext for ext in exts])extensions.append(exts[])wildcard = '' % (name, ext_list, ext_list)if default_filetype in exts:filter_index = iwildcards.append(wildcard)wildcards = ''.join(wildcards)return wildcards, extensions, filter_index", "docstring": "return the wildcard string for the filesave dialog", "id": "f17227:c3:m14"} {"signature": "def gui_repaint(self, drawDC=None):", "body": "DEBUG_MSG(\"\", , self)if self.IsShownOnScreen():if drawDC is None:drawDC=wx.ClientDC(self)drawDC.BeginDrawing()drawDC.DrawBitmap(self.bitmap, , )drawDC.EndDrawing()else:pass", "docstring": "Performs update of the displayed image on the GUI canvas, using the\nsupplied device context. If drawDC is None, a ClientDC will be used to\nredraw the image.", "id": "f17227:c3:m15"} {"signature": "def _onPaint(self, evt):", "body": "DEBUG_MSG(\"\", , self)drawDC = wx.PaintDC(self)if not self._isDrawn:self.draw(drawDC=drawDC)else:self.gui_repaint(drawDC=drawDC)evt.Skip()", "docstring": "Called when wxPaintEvt is generated", "id": "f17227:c3:m25"} {"signature": "def _onEraseBackground(self, evt):", "body": "pass", "docstring": "Called when window is redrawn; since we are blitting the entire\nimage, we can leave this blank to suppress flicker.", "id": "f17227:c3:m26"} {"signature": "def _onSize(self, evt):", "body": "DEBUG_MSG(\"\", , self)self._width, self._height = self.GetClientSize()self.bitmap =wx.EmptyBitmap(self._width, self._height)self._isDrawn = Falseif self._width <= or self._height <= : return dpival = self.figure.dpiwinch = self._width/dpivalhinch = self._height/dpivalself.figure.set_size_inches(winch, hinch)self.Refresh(eraseBackground=False)", "docstring": "Called when wxEventSize is generated.\n\nIn this application we attempt to resize to fit the window, so it\nis better to take the performance hit and redraw the whole window.", "id": "f17227:c3:m27"} {"signature": "def _onIdle(self, evt):", "body": "evt.Skip()FigureCanvasBase.idle_event(self, guiEvent=evt)", "docstring": "a GUI idle event", "id": "f17227:c3:m29"} {"signature": "def _onKeyDown(self, evt):", "body": "key = self._get_key(evt)evt.Skip()FigureCanvasBase.key_press_event(self, key, guiEvent=evt)", "docstring": "Capture key press.", "id": "f17227:c3:m30"} {"signature": "def _onKeyUp(self, evt):", "body": "key = self._get_key(evt)evt.Skip()FigureCanvasBase.key_release_event(self, key, guiEvent=evt)", "docstring": "Release key.", "id": "f17227:c3:m31"} {"signature": "def _onRightButtonDown(self, evt):", "body": "x = evt.GetX()y = self.figure.bbox.height - evt.GetY()evt.Skip()self.CaptureMouse()FigureCanvasBase.button_press_event(self, x, y, , guiEvent=evt)", "docstring": "Start measuring on an axis.", "id": "f17227:c3:m32"} {"signature": "def _onRightButtonUp(self, evt):", "body": "x = evt.GetX()y = self.figure.bbox.height - evt.GetY()evt.Skip()if self.HasCapture(): self.ReleaseMouse()FigureCanvasBase.button_release_event(self, x, y, , guiEvent=evt)", "docstring": "End measuring on an axis.", "id": "f17227:c3:m33"} {"signature": "def _onLeftButtonDown(self, evt):", "body": "x = evt.GetX()y = self.figure.bbox.height - evt.GetY()evt.Skip()self.CaptureMouse()FigureCanvasBase.button_press_event(self, x, y, , guiEvent=evt)", "docstring": "Start measuring on an axis.", "id": "f17227:c3:m34"} {"signature": "def _onLeftButtonUp(self, evt):", "body": "x = evt.GetX()y = self.figure.bbox.height - evt.GetY()evt.Skip()if self.HasCapture(): self.ReleaseMouse()FigureCanvasBase.button_release_event(self, x, y, , guiEvent=evt)", "docstring": "End measuring on an axis.", "id": "f17227:c3:m35"} {"signature": "def _onMouseWheel(self, evt):", "body": "x = evt.GetX()y = self.figure.bbox.height - evt.GetY()delta = evt.GetWheelDelta()rotation = evt.GetWheelRotation()rate = evt.GetLinesPerAction()step = rate*float(rotation)/deltaevt.Skip()if wx.Platform == '':if not hasattr(self,''):self._skipwheelevent = Trueelif self._skipwheelevent:self._skipwheelevent = Falsereturn else:self._skipwheelevent = TrueFigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt)", "docstring": "Translate mouse wheel events into matplotlib events", "id": "f17227:c3:m36"} {"signature": "def _onMotion(self, evt):", "body": "x = evt.GetX()y = self.figure.bbox.height - evt.GetY()evt.Skip()FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt)", "docstring": "Start measuring on an axis.", "id": "f17227:c3:m37"} {"signature": "def _onLeave(self, evt):", "body": "evt.Skip()FigureCanvasBase.leave_notify_event(self, guiEvent = evt)", "docstring": "Mouse has left the window.", "id": "f17227:c3:m38"} {"signature": "def _onEnter(self, evt):", "body": "FigureCanvasBase.enter_notify_event(self, guiEvent = evt)", "docstring": "Mouse has entered the window.", "id": "f17227:c3:m39"} {"signature": "def GetToolBar(self):", "body": "return self.toolbar", "docstring": "Override wxFrame::GetToolBar as we don't have managed toolbar", "id": "f17227:c4:m5"} {"signature": "def resize(self, width, height):", "body": "self.canvas.SetInitialSize(wx.Size(width, height))self.window.GetSizer().Fit(self.window)", "docstring": "Set the canvas size in pixels", "id": "f17227:c5:m3"} {"signature": "def _onMenuButton(self, evt):", "body": "x, y = self.GetPositionTuple()w, h = self.GetSizeTuple()self.PopupMenuXY(self._menu, x, y+h-)evt.Skip()", "docstring": "Handle menu button pressed.", "id": "f17227:c6:m2"} {"signature": "def _handleSelectAllAxes(self, evt):", "body": "if len(self._axisId) == :returnfor i in range(len(self._axisId)):self._menu.Check(self._axisId[i], True)self._toolbar.set_active(self.getActiveAxes())evt.Skip()", "docstring": "Called when the 'select all axes' menu item is selected.", "id": "f17227:c6:m3"} {"signature": "def _handleInvertAxesSelected(self, evt):", "body": "if len(self._axisId) == : returnfor i in range(len(self._axisId)):if self._menu.IsChecked(self._axisId[i]):self._menu.Check(self._axisId[i], False)else:self._menu.Check(self._axisId[i], True)self._toolbar.set_active(self.getActiveAxes())evt.Skip()", "docstring": "Called when the invert all menu item is selected", "id": "f17227:c6:m4"} {"signature": "def _onMenuItemSelected(self, evt):", "body": "current = self._menu.IsChecked(evt.GetId())if current:new = Falseelse:new = Trueself._menu.Check(evt.GetId(), new)self._toolbar.set_active(self.getActiveAxes())evt.Skip()", "docstring": "Called whenever one of the specific axis menu items is selected", "id": "f17227:c6:m5"} {"signature": "def updateAxes(self, maxAxis):", "body": "if maxAxis > len(self._axisId):for i in range(len(self._axisId) + , maxAxis + , ):menuId =wx.NewId()self._axisId.append(menuId)self._menu.Append(menuId, \"\" % i, \"\" % i, True)self._menu.Check(menuId, True)bind(self, wx.EVT_MENU, self._onMenuItemSelected, id=menuId)self._toolbar.set_active(list(range(len(self._axisId))))", "docstring": "Ensures that there are entries for max_axis axes in the menu\n (selected by default).", "id": "f17227:c6:m6"} {"signature": "def getActiveAxes(self):", "body": "active = []for i in range(len(self._axisId)):if self._menu.IsChecked(self._axisId[i]):active.append(i)return active", "docstring": "Return a list of the selected axes.", "id": "f17227:c6:m7"} {"signature": "def updateButtonText(self, lst):", "body": "axis_txt = ''for e in lst:axis_txt += '' % (e+)self.SetLabel(\"\" % axis_txt[:-])", "docstring": "Update the list of selected axes in the menu button", "id": "f17227:c6:m8"} {"signature": "def draw_rubberband(self, event, x0, y0, x1, y1):", "body": "canvas = self.canvasdc =wx.ClientDC(canvas)dc.SetLogicalFunction(wx.XOR)wbrush =wx.Brush(wx.Colour(,,), wx.TRANSPARENT)wpen =wx.Pen(wx.Colour(, , ), , wx.SOLID)dc.SetBrush(wbrush)dc.SetPen(wpen)dc.ResetBoundingBox()dc.BeginDrawing()height = self.canvas.figure.bbox.heighty1 = height - y1y0 = height - y0if y1if x1w = x1 - x0h = y1 - y0rect = int(x0), int(y0), int(w), int(h)try: lastrect = self.lastrectexcept AttributeError: passelse: dc.DrawRectangle(*lastrect) self.lastrect = rectdc.DrawRectangle(*rect)dc.EndDrawing()", "docstring": "adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744", "id": "f17227:c8:m10"} {"signature": "def __init__(self, canvas, can_kill=False):", "body": "wx.ToolBar.__init__(self, canvas.GetParent(), -)DEBUG_MSG(\"\", , self)self.canvas = canvasself._lastControl = Noneself._mouseOnButton = Noneself._parent = canvas.GetParent()self._NTB_BUTTON_HANDLER = {_NTB_X_PAN_LEFT : self.panx,_NTB_X_PAN_RIGHT : self.panx,_NTB_X_ZOOMIN : self.zoomx,_NTB_X_ZOOMOUT : self.zoomy,_NTB_Y_PAN_UP : self.pany,_NTB_Y_PAN_DOWN : self.pany,_NTB_Y_ZOOMIN : self.zoomy,_NTB_Y_ZOOMOUT : self.zoomy }self._create_menu()self._create_controls(can_kill)self.Realize()", "docstring": "figure is the Figure instance that the toolboar controls\n\nwin, if not None, is the wxWindow the Figure is embedded in", "id": "f17227:c9:m0"} {"signature": "def _create_menu(self):", "body": "DEBUG_MSG(\"\", , self)self._menu = MenuButtonWx(self)self.AddControl(self._menu)self.AddSeparator()", "docstring": "Creates the 'menu' - implemented as a button which opens a\npop-up menu since wxPython does not allow a menu as a control", "id": "f17227:c9:m1"} {"signature": "def _create_controls(self, can_kill):", "body": "DEBUG_MSG(\"\", , self)self.SetToolBitmapSize(wx.Size(,))self.AddSimpleTool(_NTB_X_PAN_LEFT, _load_bitmap(''),'', '')self.AddSimpleTool(_NTB_X_PAN_RIGHT, _load_bitmap(''),'', '')self.AddSimpleTool(_NTB_X_ZOOMIN, _load_bitmap(''),'', '')self.AddSimpleTool(_NTB_X_ZOOMOUT, _load_bitmap(''),'', '')self.AddSeparator()self.AddSimpleTool(_NTB_Y_PAN_UP,_load_bitmap(''),'', '')self.AddSimpleTool(_NTB_Y_PAN_DOWN, _load_bitmap(''),'', '')self.AddSimpleTool(_NTB_Y_ZOOMIN, _load_bitmap(''),'', '')self.AddSimpleTool(_NTB_Y_ZOOMOUT, _load_bitmap(''),'', '')self.AddSeparator()self.AddSimpleTool(_NTB_SAVE, _load_bitmap(''),'', '')self.AddSeparator()bind(self, wx.EVT_TOOL, self._onLeftScroll, id=_NTB_X_PAN_LEFT)bind(self, wx.EVT_TOOL, self._onRightScroll, id=_NTB_X_PAN_RIGHT)bind(self, wx.EVT_TOOL, self._onXZoomIn, id=_NTB_X_ZOOMIN)bind(self, wx.EVT_TOOL, self._onXZoomOut, id=_NTB_X_ZOOMOUT)bind(self, wx.EVT_TOOL, self._onUpScroll, id=_NTB_Y_PAN_UP)bind(self, wx.EVT_TOOL, self._onDownScroll, id=_NTB_Y_PAN_DOWN)bind(self, wx.EVT_TOOL, self._onYZoomIn, id=_NTB_Y_ZOOMIN)bind(self, wx.EVT_TOOL, self._onYZoomOut, id=_NTB_Y_ZOOMOUT)bind(self, wx.EVT_TOOL, self._onSave, id=_NTB_SAVE)bind(self, wx.EVT_TOOL_ENTER, self._onEnterTool, id=self.GetId())if can_kill:bind(self, wx.EVT_TOOL, self._onClose, id=_NTB_CLOSE)bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel)", "docstring": "Creates the button controls, and links them to event handlers", "id": "f17227:c9:m2"} {"signature": "def set_active(self, ind):", "body": "DEBUG_MSG(\"\", , self)self._ind = indif ind != None:self._active = [ self._axes[i] for i in self._ind ]else:self._active = []self._menu.updateButtonText(ind)", "docstring": "ind is a list of index numbers for the axes which are to be made active", "id": "f17227:c9:m3"} {"signature": "def get_last_control(self):", "body": "return self._lastControl", "docstring": "Returns the identity of the last toolbar button pressed.", "id": "f17227:c9:m4"} {"signature": "def update(self):", "body": "DEBUG_MSG(\"\", , self)self._axes = self.canvas.figure.get_axes()self._menu.updateAxes(len(self._axes))", "docstring": "Update the toolbar menu - called when (e.g.) a new subplot or axes are added", "id": "f17227:c9:m9"} {"signature": "def _do_nothing(self, d):", "body": "pass", "docstring": "A NULL event handler - does nothing whatsoever", "id": "f17227:c9:m10"} {"signature": "def draw_if_interactive():", "body": "if matplotlib.is_interactive():figManager = Gcf.get_active()if figManager != None:figManager.canvas.draw()", "docstring": "Is called after every pylab drawing command", "id": "f17228:m1"} {"signature": "def _create_qApp():", "body": "if QtGui.QApplication.startingUp():if DEBUG: print(\"\")global qAppqApp = QtGui.QApplication( [\"\"] )QtCore.QObject.connect( qApp, QtCore.SIGNAL( \"\" ),qApp, QtCore.SLOT( \"\" ) )_create_qApp.qAppCreatedHere = True", "docstring": "Only one qApp can exist at a time, so check before creating one.", "id": "f17228:m2"} {"signature": "def show():", "body": "for manager in Gcf.get_all_fig_managers():manager.window.show()if DEBUG: print('')figManager = Gcf.get_active()if figManager != None:figManager.canvas.draw()if _create_qApp.qAppCreatedHere:QtGui.qApp.exec_()", "docstring": "Show all the figures and enter the qt main loop\nThis should be the last line of your script", "id": "f17228:m3"} {"signature": "def new_figure_manager( num, *args, **kwargs ):", "body": "thisFig = Figure( *args, **kwargs )canvas = FigureCanvasQT( thisFig )manager = FigureManagerQT( canvas, num )return manager", "docstring": "Create a new figure manager instance", "id": "f17228:m4"} {"signature": "def exception_handler( type, value, tb ):", "body": "msg = ''if hasattr(value, '') and value.filename != None:msg = value.filename + ''if hasattr(value, '') and value.strerror != None:msg += value.strerrorelse:msg += str(value)if len( msg ) : error_msg_qt( msg )", "docstring": "Handle uncaught exceptions\n It does not catch SystemExit", "id": "f17228:m6"} {"signature": "def resize(self, width, height):", "body": "self.window.resize(width, height)", "docstring": "set the canvas size in pixels", "id": "f17228:c1:m3"} {"signature": "def __init__(self, canvas, parent, coordinates=True):", "body": "self.canvas = canvasself.coordinates = coordinatesQtGui.QToolBar.__init__( self, parent )NavigationToolbar2.__init__( self, canvas )", "docstring": "coordinates: should we show the coordinates on the right?", "id": "f17228:c2:m0"} {"signature": "def draw_if_interactive():", "body": "pass", "docstring": "For image backends - is not required\nFor GUI backends - this should be overriden if drawing should be done in\ninteractive python mode", "id": "f17229:m0"} {"signature": "def show():", "body": "for manager in Gcf.get_all_fig_managers():pass", "docstring": "For image backends - is not required\nFor GUI backends - show() is usually the last line of a pylab script and\ntells the backend that it is time to draw. In interactive mode, this may\nbe a do nothing func. See the GTK backend for an example of how to handle\ninteractive versus batch mode", "id": "f17229:m1"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "FigureClass = kwargs.pop('', Figure)thisFig = FigureClass(*args, **kwargs)canvas = FigureCanvasEMF(thisFig)manager = FigureManagerEMF(canvas, num)return manager", "docstring": "Create a new figure manager instance", "id": "f17229:m2"} {"signature": "def __init__(self, outfile, width, height, dpi):", "body": "self.outfile = outfileself._cached = {}self._fontHandle = {}self.lastHandle = {'':-, '':-, '':-}self.emf=pyemf.EMF(width,height,dpi,'')self.width=int(width*dpi)self.height=int(height*dpi)self.dpi = dpiself.pointstodpi = dpi/self.hackPointsForMathExponent = self.emf.SetBkMode(pyemf.TRANSPARENT)self.emf.SetTextAlign( pyemf.TA_BOTTOM|pyemf.TA_LEFT)if debugPrint: print(\"\" % (self.width,self.height,outfile,dpi))", "docstring": "Initialize the renderer with a gd image instance", "id": "f17229:c3:m0"} {"signature": "def draw_arc(self, gcEdge, rgbFace, x, y, width, height, angle1, angle2, rotation):", "body": "if debugPrint: print(\"\" % (x,y,angle1,angle2,width,height))pen=self.select_pen(gcEdge)brush=self.select_brush(rgbFace)hw=width/hh=height/x1=int(x-width/)y1=int(y-height/)if brush:self.emf.Pie(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/)*hw),int(self.height-(y+math.sin(angle1*math.pi/)*hh)),int(x+math.cos(angle2*math.pi/)*hw),int(self.height-(y+math.sin(angle2*math.pi/)*hh)))else:self.emf.Arc(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/)*hw),int(self.height-(y+math.sin(angle1*math.pi/)*hh)),int(x+math.cos(angle2*math.pi/)*hw),int(self.height-(y+math.sin(angle2*math.pi/)*hh)))", "docstring": "Draw an arc using GraphicsContext instance gcEdge, centered at x,y,\nwith width and height and angles from 0.0 to 360.0\n0 degrees is at 3-o'clock\npositive angles are anti-clockwise\n\nIf the color rgbFace is not None, fill the arc with it.", "id": "f17229:c3:m2"} {"signature": "def draw_image(self, x, y, im, bbox):", "body": "pass", "docstring": "Draw the Image instance into the current axes; x is the\ndistance in pixels from the left hand side of the canvas. y is\nthe distance from the origin. That is, if origin is upper, y\nis the distance from top. If origin is lower, y is the\ndistance from bottom\n\nbbox is a matplotlib.transforms.BBox instance for clipping, or\nNone", "id": "f17229:c3:m3"} {"signature": "def draw_line(self, gc, x1, y1, x2, y2):", "body": "if debugPrint: print(\"\" % (x1,y1,x2,y2))if self.select_pen(gc):self.emf.Polyline([(int(x1),int(self.height-y1)),(int(x2),int(self.height-y2))])else:if debugPrint: print(\"\" % (x1,y1,x2,y2))", "docstring": "Draw a single line from x1,y1 to x2,y2", "id": "f17229:c3:m4"} {"signature": "def draw_lines(self, gc, x, y):", "body": "if debugPrint: print(\"\" % len(str(x)))if self.select_pen(gc):points = [(int(x[i]), int(self.height-y[i])) for i in range(len(x))]self.emf.Polyline(points)", "docstring": "x and y are equal length arrays, draw lines connecting each\npoint in x, y", "id": "f17229:c3:m5"} {"signature": "def draw_point(self, gc, x, y):", "body": "if debugPrint: print(\"\" % (x,y))pen=EMFPen(self.emf,gc)self.emf.SetPixel(int(x),int(self.height-y),(pen.r,pen.g,pen.b))", "docstring": "Draw a single point at x,y\nWhere 'point' is a device-unit point (or pixel), not a matplotlib point", "id": "f17229:c3:m6"} {"signature": "def draw_polygon(self, gcEdge, rgbFace, points):", "body": "if debugPrint: print(\"\" % len(points))pen=self.select_pen(gcEdge)brush=self.select_brush(rgbFace)if pen or brush:points = [(int(x), int(self.height-y)) for x,y in points]self.emf.Polygon(points)else:points = [(int(x), int(self.height-y)) for x,y in points]if debugPrint: print(\"\" % (len(points),str(points)))", "docstring": "Draw a polygon using the GraphicsContext instance gc.\npoints is a len vertices tuple, each element\ngiving the x,y coords a vertex\n\nIf the color rgbFace is not None, fill the polygon with it", "id": "f17229:c3:m7"} {"signature": "def draw_rectangle(self, gcEdge, rgbFace, x, y, width, height):", "body": "if debugPrint: print(\"\" % (x,y,width,height))pen=self.select_pen(gcEdge)brush=self.select_brush(rgbFace)if pen or brush:self.emf.Rectangle(int(x),int(self.height-y),int(x)+int(width),int(self.height-y)-int(height))else:if debugPrint: print(\"\" % (x,y,width,height))", "docstring": "Draw a non-filled rectangle using the GraphicsContext instance gcEdge,\nwith lower left at x,y with width and height.\n\nIf rgbFace is not None, fill the rectangle with it.", "id": "f17229:c3:m8"} {"signature": "def draw_text(self, gc, x, y, s, prop, angle, ismath=False):", "body": "if debugText: print(\"\" % (x,y,angle,s))if ismath:self.draw_math_text(gc,x,y,s,prop,angle)else:self.draw_plain_text(gc,x,y,s,prop,angle)", "docstring": "Draw the text.Text instance s at x,y (display coords) with font\nproperties instance prop at angle in degrees, using GraphicsContext gc\n\n**backend implementers note**\n\nWhen you are trying to determine if you have gotten your bounding box\nright (which is what enables the text layout/alignment to work\nproperly), it helps to change the line in text.py\n\n if 0: bbox_artist(self, renderer)\n\nto if 1, and then the actual bounding box will be blotted along with\nyour text.", "id": "f17229:c3:m9"} {"signature": "def draw_plain_text(self, gc, x, y, s, prop, angle):", "body": "if debugText: print(\"\" % (x,y,angle,s))if debugText: print(\"\"+str(prop))self.select_font(prop,angle)hackoffsetper300dpi=xhack=math.sin(angle*math.pi/)*hackoffsetper300dpi*self.dpi/yhack=math.cos(angle*math.pi/)*hackoffsetper300dpi*self.dpi/self.emf.TextOut(int(x+xhack),int(y+yhack),s)", "docstring": "Draw a text string verbatim; no conversion is done.", "id": "f17229:c3:m10"} {"signature": "def draw_math_text(self, gc, x, y, s, prop, angle):", "body": "if debugText: print(\"\" % (x,y,angle,s))s = s[:-] match=re.match(\"\",s)if match:exp=match.group()if debugText: print(\"\" % exp)font = self._get_font_ttf(prop)font.set_text(\"\", )w, h = font.get_width_height()w /= h /= self.draw_plain_text(gc,x,y,\"\",prop,angle)propexp=prop.copy()propexp.set_size(prop.get_size_in_points()*)self.draw_plain_text(gc,x+w+self.points_to_pixels(self.hackPointsForMathExponent),y-(h/),exp,propexp,angle)else:self.draw_plain_text(gc,x,y,s,prop,angle)", "docstring": "Draw a subset of TeX, currently handles exponents only. Since\npyemf doesn't have any raster functionality yet, the\ntexmanager.get_rgba won't help.", "id": "f17229:c3:m11"} {"signature": "def get_math_text_width_height(self, s, prop):", "body": "if debugText: print(\"\")s = s[:-] match=re.match(\"\",s)if match:exp=match.group()if debugText: print(\"\" % exp)font = self._get_font_ttf(prop)font.set_text(\"\", )w1, h1 = font.get_width_height()propexp=prop.copy()propexp.set_size(prop.get_size_in_points()*)fontexp=self._get_font_ttf(propexp)fontexp.set_text(exp, )w2, h2 = fontexp.get_width_height()w=w1+w2h=h1+(h2/)w /= h /= w+=self.points_to_pixels(self.hackPointsForMathExponent)if debugText: print(\"\" % (s, w, h))else:w,h=self.get_text_width_height(s,prop,False)return w, h", "docstring": "get the width and height in display coords of the string s\nwith FontPropertry prop, ripped right out of backend_ps. This\nmethod must be kept in sync with draw_math_text.", "id": "f17229:c3:m12"} {"signature": "def flipy(self):", "body": "return True", "docstring": "return true if y small numbers are top for renderer\n Is used for drawing text (text.py) and images (image.py) only", "id": "f17229:c3:m13"} {"signature": "def get_canvas_width_height(self):", "body": "return self.width,self.height", "docstring": "return the canvas width and height in display coords", "id": "f17229:c3:m14"} {"signature": "def set_handle(self,type,handle):", "body": "if self.lastHandle[type] != handle:self.emf.SelectObject(handle)self.lastHandle[type]=handle", "docstring": "Update the EMF file with the current handle, but only if it\nisn't the same as the last one. Don't want to flood the file\nwith duplicate info.", "id": "f17229:c3:m15"} {"signature": "def get_font_handle(self, prop, angle):", "body": "prop=EMFFontProperties(prop,angle)size=int(prop.get_size_in_points()*self.pointstodpi)face=prop.get_name()key = hash(prop)handle = self._fontHandle.get(key)if handle is None:handle=self.emf.CreateFont(-size, , int(angle)*, int(angle)*,pyemf.FW_NORMAL, , , ,pyemf.ANSI_CHARSET, pyemf.OUT_DEFAULT_PRECIS,pyemf.CLIP_DEFAULT_PRECIS, pyemf.DEFAULT_QUALITY,pyemf.DEFAULT_PITCH | pyemf.FF_DONTCARE, face);if debugHandle: print(\"\" % (handle,face,size))self._fontHandle[key]=handleif debugHandle: print(\"\" % (handle,face,size))self.set_handle(\"\",handle)return handle", "docstring": "Look up the handle for the font based on the dict of\nproperties *and* the rotation angle, since in EMF the font\nrotation is a part of the font definition.", "id": "f17229:c3:m16"} {"signature": "def select_pen(self, gc):", "body": "pen=EMFPen(self.emf,gc)key=hash(pen)handle=self._fontHandle.get(key)if handle is None:handle=pen.get_handle()self._fontHandle[key]=handleif debugHandle: print(\"\" % handle)self.set_handle(\"\",handle)if pen.style != pyemf.PS_NULL:return penelse:return None", "docstring": "Select a pen that includes the color, line width and line\nstyle. Return the pen if it will draw a line, or None if the\npen won't produce any output (i.e. the style is PS_NULL)", "id": "f17229:c3:m18"} {"signature": "def select_brush(self, rgb):", "body": "if rgb is not None:brush=EMFBrush(self.emf,rgb)key=hash(brush)handle=self._fontHandle.get(key)if handle is None:handle=brush.get_handle()self._fontHandle[key]=handleif debugHandle: print(\"\" % handle)self.set_handle(\"\",handle)return brushelse:return None", "docstring": "Select a fill color, and return the brush if the color is\nvalid or None if this won't produce a fill operation.", "id": "f17229:c3:m19"} {"signature": "def _get_font_ttf(self, prop):", "body": "key = hash(prop)font = _fontd.get(key)if font is None:fname = findfont(prop)if debugText: print(\"\" % fname)font = FT2Font(str(fname))_fontd[key] = fontfont.clear()size = prop.get_size_in_points()font.set_size(size, self.dpi)return font", "docstring": "get the true type font properties, used because EMFs on\nwindows will use true type fonts.", "id": "f17229:c3:m20"} {"signature": "def get_text_width_height(self, s, prop, ismath):", "body": "if debugText: print(\"\" % (str(ismath),str(prop)))if ismath:if debugText: print(\"\" % str(ismath))w,h = self.get_math_text_width_height(s, prop)return w,hfont = self._get_font_ttf(prop)font.set_text(s, )w, h = font.get_width_height()w /= h /= if debugText: print(\"\" % (s, w, h))return w, h", "docstring": "get the width and height in display coords of the string s\nwith FontPropertry prop, ripped right out of backend_ps", "id": "f17229:c3:m21"} {"signature": "def draw(self):", "body": "pass", "docstring": "Draw the figure using the renderer", "id": "f17229:c5:m0"} {"signature": "def show():", "body": "_macosx.show()", "docstring": "Show all the figures and enter the Cocoa mainloop.\n This function will not return until all windows are closed or\n the interpreter exits.", "id": "f17230:m0"} {"signature": "def draw_if_interactive():", "body": "figManager = Gcf.get_active()if figManager is not None:figManager.canvas.invalidate()", "docstring": "For performance reasons, we don't want to redraw the figure after\neach draw command. Instead, we mark the figure as invalid, so that\nit will be redrawn as soon as the event loop resumes via PyOS_InputHook.\nThis function should be called after each draw event, even if\nmatplotlib is not running interactively.", "id": "f17230:m1"} {"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "FigureClass = kwargs.pop('', Figure)figure = FigureClass(*args, **kwargs)canvas = FigureCanvasMac(figure)manager = FigureManagerMac(canvas, num)return manager", "docstring": "Create a new figure manager instance", "id": "f17230:m2"} {"signature": "def _get_style(self, gc, rgbFace):", "body": "if rgbFace is None:fill = ''else:fill = rgb2hex(rgbFace[:])offset, seq = gc.get_dashes()if seq is None:dashes = ''else:dashes = '' % (''.join([''%val for val in seq]), offset)linewidth = gc.get_linewidth()if linewidth:return '''' % (fill,rgb2hex(gc.get_rgb()[:]),linewidth,gc.get_joinstyle(),_capstyle_d[gc.get_capstyle()],dashes,gc.get_alpha(),)else:return '' % (fill,gc.get_alpha(),)", "docstring": "return the style string.\nstyle is generated from the GraphicsContext, rgbFace and clippath", "id": "f17232:c0:m3"} {"signature": "def option_image_nocomposite(self):", "body": "return rcParams['']", "docstring": "if svg.image_noscale is True, compositing multiple images into one is prohibited", "id": "f17232:c0:m7"} {"signature": "def _draw_mathtext(self, gc, x, y, s, prop, angle):", "body": "width, height, descent, svg_elements, used_characters =self.mathtext_parser.parse(s, , prop)svg_glyphs = svg_elements.svg_glyphssvg_rects = svg_elements.svg_rectscolor = rgb2hex(gc.get_rgb()[:])write = self._svgwriter.writestyle = \"\" % colorif rcParams['']:new_chars = []for font, fontsize, thetext, new_x, new_y_mtc, metrics in svg_glyphs:path = self._add_char_def(font, thetext)if path is not None:new_chars.append(path)if len(new_chars):write('')for path in new_chars:write(path)write('')svg = ['' % style]if angle != :svg.append(''% (x,y,-angle) )else:svg.append('' % (x, y))svg.append('')for font, fontsize, thetext, new_x, new_y_mtc, metrics in svg_glyphs:charid = self._get_char_def_id(font, thetext)svg.append('' %(charid, new_x, -new_y_mtc, fontsize / self.FONT_SCALE))svg.append('')else: svg = ['' % (style, x, y)]if angle != :svg.append(''% (x,y,-angle,-x,-y) ) svg.append('')curr_x,curr_y = ,for font, fontsize, thetext, new_x, new_y_mtc, metrics in svg_glyphs:new_y = - new_y_mtcstyle = \"\" % (fontsize, font.family_name)svg.append('' % style)xadvance = metrics.advancesvg.append('' % xadvance)dx = new_x - curr_xif dx != :svg.append('' % dx)dy = new_y - curr_yif dy != :svg.append('' % dy)thetext = escape_xml_text(thetext)svg.append('' % thetext)curr_x = new_x + xadvancecurr_y = new_ysvg.append('')if len(svg_rects):style = \"\" % colorsvg.append('' % style)if angle != :svg.append(''% (x,y,-angle) )else:svg.append('' % (x, y))svg.append('')for x, y, width, height in svg_rects:svg.append('' % (x, -y + height, width, height))svg.append(\"\")self.open_group(\"\")write (''.join(svg))self.close_group(\"\")", "docstring": "Draw math text using matplotlib.mathtext", "id": "f17232:c0:m17"} {"signature": "def quote_ps_string(s):", "body": "s=s.replace(\"\", \"\")s=s.replace(\"\", \"\")s=s.replace(\"\", \"\")s=s.replace(\"\", \"\")s=s.replace(\"\", \"\")s=re.sub(r\"\", lambda x: r\"\"%ord(x.group()), s)return s", "docstring": "Quote dangerous characters of S for use in a PostScript string constant.", "id": "f17234:m4"} {"signature": "def seq_allequal(seq1, seq2):", "body": "if seq1 is None:return seq2 is Noneif seq2 is None:return Falseif len(seq1) != len(seq2): return Falsereturn npy.alltrue(npy.equal(seq1, seq2))", "docstring": "seq1 and seq2 are either None or sequences or numerix arrays\nReturn True if both are None or both are seqs with identical\nelements", "id": "f17234:m5"} {"signature": "def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,paperWidth, paperHeight, orientation):", "body": "tmpdir = os.path.split(tmpfile)[]epsfile = tmpfile+''shutil.move(tmpfile, epsfile)latexfile = tmpfile+''outfile = tmpfile+''latexh = file(latexfile, '')dvifile = tmpfile+''psfile = tmpfile+''if orientation=='': angle = else: angle = if rcParams['']:unicode_preamble = \"\"\"\"\"\"else:unicode_preamble = ''s =", "docstring": "When we want to use the LaTeX backend with postscript, we write PSFrag tags\nto a temporary postscript file, each one marking a position for LaTeX to\nrender some text. convert_psfrags generates a LaTeX document containing the\ncommands to convert those tags to text. LaTeX/dvips produces the postscript\nfile that includes the actual text.", "id": "f17234:m7"} {"signature": "def gs_distill(tmpfile, eps=False, ptype='', bbox=None):", "body": "paper = ''% ptypepsfile = tmpfile + ''outfile = tmpfile + ''dpi = rcParams['']if sys.platform == '': gs_exe = ''else: gs_exe = ''command = ''% (gs_exe, dpi, paper, psfile, tmpfile, outfile)verbose.report(command, '')exit_status = os.system(command)fh = file(outfile)if exit_status: raise RuntimeError('' + fh.read())else: verbose.report(fh.read(), '')fh.close()os.remove(outfile)os.remove(tmpfile)shutil.move(psfile, tmpfile)if eps:pstoeps(tmpfile, bbox)", "docstring": "Use ghostscript's pswrite or epswrite device to distill a file.\nThis yields smaller files without illegal encapsulated postscript\noperators. The output is low-level, converting text to outlines.", "id": "f17234:m8"} {"signature": "def xpdf_distill(tmpfile, eps=False, ptype='', bbox=None):", "body": "pdffile = tmpfile + ''psfile = tmpfile + ''outfile = tmpfile + ''command = ''%pe, tmpfile, pdffile, outfile)if sys.platform == '': command = command.replace('', '')verbose.report(command, '')exit_status = os.system(command)fh = file(outfile)if exit_status: raise RuntimeError('' + fh.read())else: verbose.report(fh.read(), '')fh.close()os.remove(outfile)command = ''%(pdffile, psfile, outfile)verbose.report(command, '')exit_status = os.system(command)fh = file(outfile)if exit_status: raise RuntimeError('' + fh.read())else: verbose.report(fh.read(), '')fh.close()os.remove(outfile)os.remove(tmpfile)shutil.move(psfile, tmpfile)if eps:pstoeps(tmpfile, bbox)for fname in glob.glob(tmpfile+''):os.remove(fname)", "docstring": "Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.\nThis yields smaller files without illegal encapsulated postscript\noperators. This distiller is preferred, generating high-level postscript\noutput that treats text as text.", "id": "f17234:m9"} {"signature": "def get_bbox(tmpfile, bbox):", "body": "outfile = tmpfile + ''if sys.platform == '': gs_exe = ''else: gs_exe = ''command = '' %(gs_exe, tmpfile)verbose.report(command, '')stdin, stdout, stderr = os.popen3(command)verbose.report(stdout.read(), '')bbox_info = stderr.read()verbose.report(bbox_info, '')bbox_found = re.search('', bbox_info)if bbox_found:bbox_info = bbox_found.group()else:raise RuntimeError(''% bbox_info)l, b, r, t = [float(i) for i in bbox_info.split()[-:]]if bbox is None:l, b, r, t = (l-, b-, r+, t+)else:x = (l+r)/y = (b+t)/dx = (bbox[]-bbox[])/dy = (bbox[]-bbox[])/l,b,r,t = (x-dx, y-dy, x+dx, y+dy)bbox_info = '' % (l, b, npy.ceil(r), npy.ceil(t))hires_bbox_info = '' % (l, b, r, t)return ''.join([bbox_info, hires_bbox_info])", "docstring": "Use ghostscript's bbox device to find the center of the bounding box. Return\nan appropriately sized bbox centered around that point. A bit of a hack.", "id": "f17234:m10"} {"signature": "def pstoeps(tmpfile, bbox):", "body": "bbox_info = get_bbox(tmpfile, bbox)epsfile = tmpfile + ''epsh = file(epsfile, '')tmph = file(tmpfile)line = tmph.readline()while line:if line.startswith(''):print(\"\", file=epsh)print(bbox_info, file=epsh)elif line.startswith(''):epsh.write(line)print('', file=epsh)print('', file=epsh)print('', file=epsh)print('', file=epsh)print('', file=epsh)print('', file=epsh)print('', file=epsh)print('', file=epsh)print('', file=epsh)breakelif line.startswith('')or line.startswith('')or line.startswith(''):passelse:epsh.write(line)line = tmph.readline()line = tmph.readline()while line:if line.startswith(''):print('', file=epsh)print('', file=epsh)print('', file=epsh)print('', file=epsh)print('', file=epsh)if rcParams[''] == '':line = tmph.readline()else:epsh.write(line)line = tmph.readline()tmph.close()epsh.close()os.remove(tmpfile)shutil.move(epsfile, tmpfile)", "docstring": "Convert the postscript to encapsulated postscript.", "id": "f17234:m11"} {"signature": "def __init__(self, width, height, pswriter, imagedpi=):", "body": "RendererBase.__init__(self)self.width = widthself.height = heightself._pswriter = pswriterif rcParams['']:self.textcnt = self.psfrag = []self.imagedpi = imagedpiif rcParams['']:self.simplify = (width * imagedpi, height * imagedpi)else:self.simplify = Noneself.color = Noneself.linewidth = Noneself.linejoin = Noneself.linecap = Noneself.linedash = Noneself.fontname = Noneself.fontsize = Noneself.hatch = Noneself.image_magnification = imagedpi/self._clip_paths = {}self._path_collection_id = self.used_characters = {}self.mathtext_parser = MathTextParser(\"\")", "docstring": "Although postscript itself is dpi independent, we need to\nimform the image code about a requested dpi to generate high\nres images and them scale them before embeddin them", "id": "f17234:c0:m0"} {"signature": "def track_characters(self, font, s):", "body": "realpath, stat_key = get_realpath_and_stat(font.fname)used_characters = self.used_characters.setdefault(stat_key, (realpath, set()))used_characters[].update([ord(x) for x in s])", "docstring": "Keeps track of which characters are required from\n each font.", "id": "f17234:c0:m1"} {"signature": "def set_hatch(self, hatch):", "body": "hatches = {'':, '':, '':, '':}for letter in hatch:if (letter == ''): hatches[''] += elif (letter == ''): hatches[''] += elif (letter == ''): hatches[''] += elif (letter == ''): hatches[''] += elif (letter == ''):hatches[''] += hatches[''] += elif (letter.lower() == ''):hatches[''] += hatches[''] += def do_hatch(angle, density):if (density == ): return \"\"return", "docstring": "hatch can be one of:\n / - diagonal hatching\n \\ - back diagonal\n | - vertical\n - - horizontal\n + - crossed\n X - crossed diagonal\n\nletters can be combined, in which case all the specified\nhatchings are done\n\nif same letter repeats, it increases the density of hatching\nin that direction", "id": "f17234:c0:m9"} {"signature": "def get_canvas_width_height(self):", "body": "return self.width, self.height", "docstring": "return the canvas width and height in display coords", "id": "f17234:c0:m10"} {"signature": "def get_text_width_height_descent(self, s, prop, ismath):", "body": "if rcParams['']:texmanager = self.get_texmanager()fontsize = prop.get_size_in_points()l,b,r,t = texmanager.get_ps_bbox(s, fontsize)w = (r-l)h = (t-b)return w, h, if ismath:width, height, descent, pswriter, used_characters =self.mathtext_parser.parse(s, , prop)return width, height, descentif rcParams['']:if ismath: s = s[:-]font = self._get_font_afm(prop)l,b,w,h,d = font.get_str_bbox_and_descent(s)fontsize = prop.get_size_in_points()scale = *fontsizew *= scaleh *= scaled *= scalereturn w, h, dfont = self._get_font_ttf(prop)font.set_text(s, , flags=LOAD_NO_HINTING)w, h = font.get_width_height()w /= h /= d = font.get_descent()d /= return w, h, d", "docstring": "get the width and height in display coords of the string s\nwith FontPropertry prop", "id": "f17234:c0:m11"} {"signature": "def flipy(self):", "body": "return False", "docstring": "return true if small y numbers are top for renderer", "id": "f17234:c0:m12"} {"signature": "def get_image_magnification(self):", "body": "return self.image_magnification", "docstring": "Get the factor by which to magnify images passed to draw_image.\nAllows a backend to have images at a different resolution to other\nartists.", "id": "f17234:c0:m19"} {"signature": "def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):", "body": "im.flipud_out()if im.is_grayscale:h, w, bits = self._gray(im)imagecmd = \"\"else:h, w, bits = self._rgb(im)imagecmd = \"\"hexlines = ''.join(self._hex_lines(bits))xscale, yscale = (w/self.image_magnification, h/self.image_magnification)figh = self.height*clip = []if bbox is not None:clipx,clipy,clipw,cliph = bbox.boundsclip.append('' % _nums_to_str(clipw, cliph, clipx, clipy))if clippath is not None:id = self._get_clip_path(clippath, clippath_trans)clip.append('' % id)clip = ''.join(clip)ps =", "docstring": "Draw the Image instance into the current axes; x is the\ndistance in pixels from the left hand side of the canvas and y\nis the distance from bottom\n\nbbox is a matplotlib.transforms.BBox instance for clipping, or\nNone", "id": "f17234:c0:m20"} {"signature": "def draw_path(self, gc, path, transform, rgbFace=None):", "body": "ps = self._convert_path(path, transform, self.simplify)self._draw_ps(ps, gc, rgbFace)", "docstring": "Draws a Path instance using the given affine transform.", "id": "f17234:c0:m23"} {"signature": "def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):", "body": "if debugPS: self._pswriter.write('')write = self._pswriter.writeif rgbFace:if rgbFace[]==rgbFace[] and rgbFace[]==rgbFace[]:ps_color = '' % rgbFace[]else:ps_color = '' % rgbFaceps_cmd = ['', '', '', ''] ps_cmd.append(self._convert_path(marker_path, marker_trans))if rgbFace:ps_cmd.extend(['', ps_color, '', ''])ps_cmd.extend(['', '', ''])tpath = trans.transform_path(path)for vertices, code in tpath.iter_segments():if len(vertices):x, y = vertices[-:]ps_cmd.append(\"\" % (x, y))ps = ''.join(ps_cmd)self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)", "docstring": "Draw the markers defined by path at each of the positions in x\nand y. path coordinates are points, x and y coords will be\ntransformed by the transform", "id": "f17234:c0:m24"} {"signature": "def draw_tex(self, gc, x, y, s, prop, angle, ismath=''):", "body": "w, h, bl = self.get_text_width_height_descent(s, prop, ismath)fontsize = prop.get_size_in_points()corr = pos = _nums_to_str(x-corr, y)thetext = '' % self.textcntcolor = ''% gc.get_rgb()[:]fontcmd = {'' : r'','' : r''}.get(rcParams[''], r'')s = fontcmd % stex = r'' % (color, s)self.psfrag.append(r''%(thetext, angle, fontsize, fontsize*, tex))ps =", "docstring": "draw a Text instance", "id": "f17234:c0:m26"} {"signature": "def draw_text(self, gc, x, y, s, prop, angle, ismath):", "body": "write = self._pswriter.writeif debugPS:write(\"\")if ismath=='':return self.tex(gc, x, y, s, prop, angle)elif ismath:return self.draw_mathtext(gc, x, y, s, prop, angle)elif isinstance(s, str):return self.draw_unicode(gc, x, y, s, prop, angle)elif rcParams['']:font = self._get_font_afm(prop)l,b,w,h = font.get_str_bbox(s)fontsize = prop.get_size_in_points()l *= *fontsizeb *= *fontsizew *= *fontsizeh *= *fontsizeif angle==: l,b = -b, l pos = _nums_to_str(x-l, y-b)thetext = '' % sfontname = font.get_fontname()fontsize = prop.get_size_in_points()rotate = '' % anglesetcolor = '' % gc.get_rgb()[:]ps =", "docstring": "draw a Text instance", "id": "f17234:c0:m27"} {"signature": "def draw_unicode(self, gc, x, y, s, prop, angle):", "body": "if rcParams['']:self.set_color(*gc.get_rgb())font = self._get_font_afm(prop)fontname = font.get_fontname()fontsize = prop.get_size_in_points()scale = *fontsizethisx = thisy = font.get_str_bbox_and_descent(s)[] * scalelast_name = Nonelines = []for c in s:name = uni2type1.get(ord(c), '')try:width = font.get_width_from_char_name(name)except KeyError:name = ''width = font.get_width_char('')if last_name is not None:kern = font.get_kern_dist_from_name(last_name, name)else:kern = last_name = namethisx += kern * scalelines.append(''%(thisx, thisy, name))thisx += width * scalethetext = \"\".join(lines)ps = \"\"\"\"\"\"gsavey)f translatef rotatet)scals()self._pswriter.write(ps)", "docstring": "draw a unicode string. ps doesn't have unicode support, so\n we have to do this the hard way", "id": "f17234:c0:m29"} {"signature": "def draw_mathtext(self, gc,x, y, s, prop, angle):", "body": "if debugPS:self._pswriter.write(\"\")width, height, descent, pswriter, used_characters =self.mathtext_parser.parse(s, , prop)self.merge_used_characters(used_characters)self.set_color(*gc.get_rgb())thetext = pswriter.getvalue()ps =", "docstring": "Draw the math text using matplotlib.mathtext", "id": "f17234:c0:m30"} {"signature": "def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):", "body": "write = self._pswriter.writeif debugPS and command:write(\"\"+command+\"\")mightstroke = (gc.get_linewidth() > and(len(gc.get_rgb()) <= or gc.get_rgb()[] != ))stroke = stroke and mightstrokefill = (fill and rgbFace is not None and(len(rgbFace) <= or rgbFace[] != ))if mightstroke:self.set_linewidth(gc.get_linewidth())jint = gc.get_joinstyle()self.set_linejoin(jint)cint = gc.get_capstyle()self.set_linecap(cint)self.set_linedash(*gc.get_dashes())self.set_color(*gc.get_rgb()[:])write('')cliprect = gc.get_clip_rectangle()if cliprect:x,y,w,h=cliprect.boundswrite('' % (w,h,x,y))clippath, clippath_trans = gc.get_clip_path()if clippath:id = self._get_clip_path(clippath, clippath_trans)write('' % id)write(ps.strip())write(\"\")if fill:if stroke:write(\"\")self.set_color(store=, *rgbFace[:])write(\"\")else:self.set_color(store=, *rgbFace[:])write(\"\")hatch = gc.get_hatch()if hatch:self.set_hatch(hatch)if stroke:write(\"\")write(\"\")", "docstring": "Emit the PostScript sniplet 'ps' with all the attributes from 'gc'\napplied. 'ps' must consist of PostScript commands to construct a path.\n\nThe fill and/or stroke kwargs can be set to False if the\n'ps' string already includes filling and/or stroking, in\nwhich case _draw_ps is just supplying properties and\nclipping.", "id": "f17234:c0:m31"} {"signature": "def _print_figure(self, outfile, format, dpi=, facecolor='', edgecolor='',orientation='', isLandscape=False, papertype=None):", "body": "isEPSF = format == ''passed_in_file_object = Falseif is_string_like(outfile):title = outfiletmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())elif is_writable_file_like(outfile):title = Nonetmpfile = os.path.join(gettempdir(), md5(str(hash(outfile))).hexdigest())passed_in_file_object = Trueelse:raise ValueError(\"\")fh = file(tmpfile, '')width, height = self.figure.get_size_inches()if papertype == '':if isLandscape: papertype = _get_papertype(height, width)else: papertype = _get_papertype(width, height)if isLandscape: paperHeight, paperWidth = papersize[papertype]else: paperWidth, paperHeight = papersize[papertype]if rcParams[''] and not papertype == '':if width>paperWidth or height>paperHeight:if isLandscape:papertype = _get_papertype(height, width)paperHeight, paperWidth = papersize[papertype]else:papertype = _get_papertype(width, height)paperWidth, paperHeight = papersize[papertype]xo = **(paperWidth - width)yo = **(paperHeight - height)l, b, w, h = self.figure.bbox.boundsllx = xolly = yourx = llx + wury = lly + hrotation = if isLandscape:llx, lly, urx, ury = lly, llx, ury, urxxo, yo = *paperHeight - yo, xorotation = bbox = (llx, lly, urx, ury)origfacecolor = self.figure.get_facecolor()origedgecolor = self.figure.get_edgecolor()self.figure.set_facecolor(facecolor)self.figure.set_edgecolor(edgecolor)self._pswriter = StringIO()renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)self.figure.draw(renderer)self.figure.set_facecolor(origfacecolor)self.figure.set_edgecolor(origedgecolor)if isEPSF: print(\"\", file=fh)else: print(\"\", file=fh)if title: print(\"\"+title, file=fh)print((\"\"+__version__+\"\"), file=fh)print(\"\"+time.ctime(time.time()), file=fh)print(\"\" + orientation, file=fh)if not isEPSF: print(\"\"+papertype, file=fh)print(\"\" % bbox, file=fh)if not isEPSF: print(\"\", file=fh)print(\"\", file=fh)Ndict = len(psDefs)print(\"\", file=fh)if not rcParams['']:Ndict += len(renderer.used_characters)print(\"\"%Ndict, file=fh)print(\"\", file=fh)for d in psDefs:d=d.strip()for l in d.split(''):print(l.strip(), file=fh)if not rcParams['']:for font_filename, chars in list(renderer.used_characters.values()):if len(chars):font = FT2Font(font_filename)cmap = font.get_charmap()glyph_ids = []for c in chars:gind = cmap.get(c) or glyph_ids.append(gind)if is_opentype_cff_font(font_filename):raise RuntimeError(\"\")else:fonttype = rcParams['']convert_ttf_to_ps(font_filename, fh, rcParams[''], glyph_ids)print(\"\", file=fh)print(\"\", file=fh)if not isEPSF: print(\"\", file=fh)print(\"\", file=fh)print(\"\"%_nums_to_str(xo, yo), file=fh)if rotation: print(\"\"%rotation, file=fh)print(\"\"%_nums_to_str(width*, height*, , ), file=fh)print(self._pswriter.getvalue(), file=fh)print(\"\", file=fh)print(\"\", file=fh)if not isEPSF: print(\"\", file=fh)fh.close()if rcParams[''] == '':gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)elif rcParams[''] == '':xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)if passed_in_file_object:fh = file(tmpfile)print(fh.read(), file=outfile)else:shutil.move(tmpfile, outfile)", "docstring": "Render the figure to hardcopy. Set the figure patch face and\nedge colors. This is useful because some of the GUIs have a\ngray figure face color background and you'll probably want to\noverride this on hardcopy\n\nIf outfile is a string, it is interpreted as a file name.\nIf the extension matches .ep* write encapsulated postscript,\notherwise write a stand-alone PostScript file.\n\nIf outfile is a file object, a stand-alone PostScript file is\nwritten into this file object.", "id": "f17234:c2:m5"} {"signature": "def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,orientation, isLandscape, papertype):", "body": "isEPSF = format == ''title = outfiletmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())fh = file(tmpfile, '')self.figure.dpi = width, height = self.figure.get_size_inches()xo = yo = l, b, w, h = self.figure.bbox.boundsllx = xolly = yourx = llx + wury = lly + hbbox = (llx, lly, urx, ury)origfacecolor = self.figure.get_facecolor()origedgecolor = self.figure.get_edgecolor()self.figure.set_facecolor(facecolor)self.figure.set_edgecolor(edgecolor)self._pswriter = StringIO()renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)self.figure.draw(renderer)self.figure.set_facecolor(origfacecolor)self.figure.set_edgecolor(origedgecolor)print(\"\", file=fh)if title: print(\"\"+title, file=fh)print((\"\"+__version__+\"\"), file=fh)print(\"\"+time.ctime(time.time()), file=fh)print(\"\" % bbox, file=fh)print(\"\", file=fh)Ndict = len(psDefs)print(\"\", file=fh)print(\"\"%Ndict, file=fh)print(\"\", file=fh)for d in psDefs:d=d.strip()for l in d.split(''):print(l.strip(), file=fh)print(\"\", file=fh)print(\"\", file=fh)print(\"\", file=fh)print(\"\"%_nums_to_str(xo, yo), file=fh)print(\"\"%_nums_to_str(width*, height*, , ), file=fh)print(self._pswriter.getvalue(), file=fh)print(\"\", file=fh)print(\"\", file=fh)fh.close()if isLandscape: isLandscape = Truewidth, height = height, widthbbox = (lly, llx, ury, urx)temp_papertype = _get_papertype(width, height)if papertype=='':papertype = temp_papertypepaperWidth, paperHeight = papersize[temp_papertype]else:paperWidth, paperHeight = papersize[papertype]if (width>paperWidth or height>paperHeight) and isEPSF:paperWidth, paperHeight = papersize[temp_papertype]verbose.report(''%(papertype, temp_papertype), '')texmanager = renderer.get_texmanager()font_preamble = texmanager.get_font_preamble()custom_preamble = texmanager.get_custom_preamble()convert_psfrags(tmpfile, renderer.psfrag, font_preamble,custom_preamble, paperWidth, paperHeight,orientation)if rcParams[''] == '':gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)elif rcParams[''] == '':xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)elif rcParams['']:if False: pass else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)if isinstance(outfile, file):fh = file(tmpfile)print(fh.read(), file=outfile)else: shutil.move(tmpfile, outfile)", "docstring": "If text.usetex is True in rc, a temporary pair of tex/eps files\nare created to allow tex to manage the text layout via the PSFrags\npackage. These files are processed to yield the final ps or eps file.", "id": "f17234:c2:m6"} {"signature": "def __call__(self, o):", "body": "if not self.available(o):raise ValueError('')self._owner = o", "docstring": "reserve the lock for o", "id": "f17235:c0:m1"} {"signature": "def release(self, o):", "body": "if not self.available(o):raise ValueError('')self._owner = None", "docstring": "release the lock", "id": "f17235:c0:m2"} {"signature": "def available(self, o):", "body": "return not self.locked() or self.isowner(o)", "docstring": "drawing is available to o", "id": "f17235:c0:m3"} {"signature": "def isowner(self, o):", "body": "return self._owner is o", "docstring": "o owns the lock", "id": "f17235:c0:m4"} {"signature": "def locked(self):", "body": "return self._owner is not None", "docstring": "the lock is held", "id": "f17235:c0:m5"} {"signature": "def __init__(self, ax, label, image=None,color='', hovercolor=''):", "body": "if image is not None:ax.imshow(image)self.label = ax.text(, , label,verticalalignment='',horizontalalignment='',transform=ax.transAxes)self.cnt = self.observers = {}self.ax = axax.figure.canvas.mpl_connect('', self._click)ax.figure.canvas.mpl_connect('', self._motion)ax.set_navigate(False)ax.set_axis_bgcolor(color)ax.set_xticks([])ax.set_yticks([])self.color = colorself.hovercolor = hovercolorself._lastcolor = color", "docstring": "ax is the Axes instance the button will be placed into\n\nlabel is a string which is the button text\n\nimage if not None, is an image to place in the button -- can\n be any legal arg to imshow (numpy array, matplotlib Image\n instance, or PIL image)\n\ncolor is the color of the button when not activated\n\nhovercolor is the color of the button when the mouse is over\n it", "id": "f17235:c2:m0"} {"signature": "def on_clicked(self, func):", "body": "cid = self.cntself.observers[cid] = funcself.cnt += return cid", "docstring": "When the button is clicked, call this func with event\n\nA connection id is returned which can be used to disconnect", "id": "f17235:c2:m3"} {"signature": "def disconnect(self, cid):", "body": "try: del self.observers[cid]except KeyError: pass", "docstring": "remove the observer with connection id cid", "id": "f17235:c2:m4"} {"signature": "def __init__(self, ax, label, valmin, valmax, valinit=, valfmt='',closedmin=True, closedmax=True, slidermin=None, slidermax=None,dragging=True, **kwargs):", "body": "self.ax = axself.valmin = valminself.valmax = valmaxself.val = valinitself.valinit = valinitself.poly = ax.axvspan(valmin,valinit,,, **kwargs)self.vline = ax.axvline(valinit,,, color='', lw=)self.valfmt=valfmtax.set_yticks([])ax.set_xlim((valmin, valmax))ax.set_xticks([])ax.set_navigate(False)ax.figure.canvas.mpl_connect('', self._update)if dragging:ax.figure.canvas.mpl_connect('', self._update)self.label = ax.text(-, , label, transform=ax.transAxes,verticalalignment='',horizontalalignment='')self.valtext = ax.text(, , valfmt%valinit,transform=ax.transAxes,verticalalignment='',horizontalalignment='')self.cnt = self.observers = {}self.closedmin = closedminself.closedmax = closedmaxself.slidermin = sliderminself.slidermax = slidermax", "docstring": "Create a slider from valmin to valmax in axes ax;\n\nvalinit - the slider initial position\n\nlabel - the slider label\n\nvalfmt - used to format the slider value\n\nclosedmin and closedmax - indicate whether the slider interval is closed\n\nslidermin and slidermax - be used to contrain the value of\n this slider to the values of other sliders.\n\nadditional kwargs are passed on to self.poly which is the\nmatplotlib.patches.Rectangle which draws the slider. See the\nmatplotlib.patches.Rectangle documentation for legal property\nnames (eg facecolor, edgecolor, alpha, ...)", "id": "f17235:c3:m0"} {"signature": "def _update(self, event):", "body": "if event.button !=: returnif event.inaxes != self.ax: returnval = event.xdataif not self.closedmin and val<=self.valmin: returnif not self.closedmax and val>=self.valmax: returnif self.slidermin is not None:if val<=self.slidermin.val: returnif self.slidermax is not None:if val>=self.slidermax.val: returnself.set_val(val)", "docstring": "update the slider position", "id": "f17235:c3:m1"} {"signature": "def on_changed(self, func):", "body": "cid = self.cntself.observers[cid] = funcself.cnt += return cid", "docstring": "When the slider valud is changed, call this func with the new\nslider position\n\nA connection id is returned which can be used to disconnect", "id": "f17235:c3:m3"} {"signature": "def disconnect(self, cid):", "body": "try: del self.observers[cid]except KeyError: pass", "docstring": "remove the observer with connection id cid", "id": "f17235:c3:m4"} {"signature": "def reset(self):", "body": "if (self.val != self.valinit):self.set_val(self.valinit)", "docstring": "reset the slider to the initial value if needed", "id": "f17235:c3:m5"} {"signature": "def __init__(self, ax, labels, actives):", "body": "ax.set_xticks([])ax.set_yticks([])ax.set_navigate(False)if len(labels)>:dy = /(len(labels)+)ys = np.linspace(-dy, dy, len(labels))else:dy = ys = []cnt = axcolor = ax.get_axis_bgcolor()self.labels = []self.lines = []self.rectangles = []lineparams = {'':'', '':, '':ax.transAxes,'':''}for y, label in zip(ys, labels):t = ax.text(, y, label, transform=ax.transAxes,horizontalalignment='',verticalalignment='')w, h = dy/, dy/x, y = , y-h/p = Rectangle(xy=(x,y), width=w, height=h,facecolor=axcolor,transform=ax.transAxes)l1 = Line2D([x, x+w], [y+h, y], **lineparams)l2 = Line2D([x, x+w], [y, y+h], **lineparams)l1.set_visible(actives[cnt])l2.set_visible(actives[cnt])self.labels.append(t)self.rectangles.append(p)self.lines.append((l1,l2))ax.add_patch(p)ax.add_line(l1)ax.add_line(l2)cnt += ax.figure.canvas.mpl_connect('', self._clicked)self.ax = axself.cnt = self.observers = {}", "docstring": "Add check buttons to axes.Axes instance ax\n\nlabels is a len(buttons) list of labels as strings\n\nactives is a len(buttons) list of booleans indicating whether\n the button is active", "id": "f17235:c4:m0"} {"signature": "def on_clicked(self, func):", "body": "cid = self.cntself.observers[cid] = funcself.cnt += return cid", "docstring": "When the button is clicked, call this func with button label\n\nA connection id is returned which can be used to disconnect", "id": "f17235:c4:m2"} {"signature": "def disconnect(self, cid):", "body": "try: del self.observers[cid]except KeyError: pass", "docstring": "remove the observer with connection id cid", "id": "f17235:c4:m3"} {"signature": "def __init__(self, ax, labels, active=, activecolor=''):", "body": "self.activecolor = activecolorax.set_xticks([])ax.set_yticks([])ax.set_navigate(False)dy = /(len(labels)+)ys = np.linspace(-dy, dy, len(labels))cnt = axcolor = ax.get_axis_bgcolor()self.labels = []self.circles = []for y, label in zip(ys, labels):t = ax.text(, y, label, transform=ax.transAxes,horizontalalignment='',verticalalignment='')if cnt==active:facecolor = activecolorelse:facecolor = axcolorp = Circle(xy=(, y), radius=, facecolor=facecolor,transform=ax.transAxes)self.labels.append(t)self.circles.append(p)ax.add_patch(p)cnt += ax.figure.canvas.mpl_connect('', self._clicked)self.ax = axself.cnt = self.observers = {}", "docstring": "Add radio buttons to axes.Axes instance ax\n\nlabels is a len(buttons) list of labels as strings\n\nactive is the index into labels for the button that is active\n\nactivecolor is the color of the button when clicked", "id": "f17235:c5:m0"} {"signature": "def on_clicked(self, func):", "body": "cid = self.cntself.observers[cid] = funcself.cnt += return cid", "docstring": "When the button is clicked, call this func with button label\n\nA connection id is returned which can be used to disconnect", "id": "f17235:c5:m2"} {"signature": "def disconnect(self, cid):", "body": "try: del self.observers[cid]except KeyError: pass", "docstring": "remove the observer with connection id cid", "id": "f17235:c5:m3"} {"signature": "def __init__(self, targetfig, toolfig):", "body": "self.targetfig = targetfigtoolfig.subplots_adjust(left=, right=)class toolbarfmt:def __init__(self, slider):self.slider = sliderdef __call__(self, x, y):fmt = ''%(self.slider.label.get_text(), self.slider.valfmt)return fmt%xself.axleft = toolfig.add_subplot()self.axleft.set_title('')self.axleft.set_navigate(False)self.sliderleft = Slider(self.axleft, '', , , targetfig.subplotpars.left, closedmax=False)self.sliderleft.on_changed(self.funcleft)self.axbottom = toolfig.add_subplot()self.axbottom.set_navigate(False)self.sliderbottom = Slider(self.axbottom, '', , , targetfig.subplotpars.bottom, closedmax=False)self.sliderbottom.on_changed(self.funcbottom)self.axright = toolfig.add_subplot()self.axright.set_navigate(False)self.sliderright = Slider(self.axright, '', , , targetfig.subplotpars.right, closedmin=False)self.sliderright.on_changed(self.funcright)self.axtop = toolfig.add_subplot()self.axtop.set_navigate(False)self.slidertop = Slider(self.axtop, '', , , targetfig.subplotpars.top, closedmin=False)self.slidertop.on_changed(self.functop)self.axwspace = toolfig.add_subplot()self.axwspace.set_navigate(False)self.sliderwspace = Slider(self.axwspace, '', , , targetfig.subplotpars.wspace, closedmax=False)self.sliderwspace.on_changed(self.funcwspace)self.axhspace = toolfig.add_subplot()self.axhspace.set_navigate(False)self.sliderhspace = Slider(self.axhspace, '', , , targetfig.subplotpars.hspace, closedmax=False)self.sliderhspace.on_changed(self.funchspace)self.sliderleft.slidermax = self.sliderrightself.sliderright.slidermin = self.sliderleftself.sliderbottom.slidermax = self.slidertopself.slidertop.slidermin = self.sliderbottombax = toolfig.add_axes([, , , ])self.buttonreset = Button(bax, '')sliders = (self.sliderleft, self.sliderbottom, self.sliderright,self.slidertop, self.sliderwspace, self.sliderhspace, )def func(event):thisdrawon = self.drawonself.drawon = Falsebs = []for slider in sliders:bs.append(slider.drawon)slider.drawon = Falsefor slider in sliders:slider.reset()for slider, b in zip(sliders, bs):slider.drawon = bself.drawon = thisdrawonif self.drawon:toolfig.canvas.draw()self.targetfig.canvas.draw()validate = toolfig.subplotpars.validatetoolfig.subplotpars.validate = Falseself.buttonreset.on_clicked(func)toolfig.subplotpars.validate = validate", "docstring": "targetfig is the figure to adjust\n\ntoolfig is the figure to embed the the subplot tool into. If\nNone, a default pylab figure will be created. If you are\nusing this from the GUI", "id": "f17235:c6:m0"} {"signature": "def __init__(self, ax, useblit=False, **lineprops):", "body": "self.ax = axself.canvas = ax.figure.canvasself.canvas.mpl_connect('', self.onmove)self.canvas.mpl_connect('', self.clear)self.visible = Trueself.horizOn = Trueself.vertOn = Trueself.useblit = useblitself.lineh = ax.axhline(ax.get_ybound()[], visible=False, **lineprops)self.linev = ax.axvline(ax.get_xbound()[], visible=False, **lineprops)self.background = Noneself.needclear = False", "docstring": "Add a cursor to ax. If useblit=True, use the backend\ndependent blitting features for faster updates (GTKAgg only\nnow). lineprops is a dictionary of line properties. See\nexamples/widgets/cursor.py.", "id": "f17235:c7:m0"} {"signature": "def clear(self, event):", "body": "if self.useblit:self.background = self.canvas.copy_from_bbox(self.ax.bbox)self.linev.set_visible(False)self.lineh.set_visible(False)", "docstring": "clear the cursor", "id": "f17235:c7:m1"} {"signature": "def onmove(self, event):", "body": "if event.inaxes != self.ax:self.linev.set_visible(False)self.lineh.set_visible(False)if self.needclear:self.canvas.draw()self.needclear = Falsereturnself.needclear = Trueif not self.visible: returnself.linev.set_xdata((event.xdata, event.xdata))self.lineh.set_ydata((event.ydata, event.ydata))self.linev.set_visible(self.visible and self.vertOn)self.lineh.set_visible(self.visible and self.horizOn)self._update()", "docstring": "on mouse motion draw the cursor if visible", "id": "f17235:c7:m2"} {"signature": "def clear(self, event):", "body": "if self.useblit:self.background = self.canvas.copy_from_bbox(self.canvas.figure.bbox)for line in self.lines: line.set_visible(False)", "docstring": "clear the cursor", "id": "f17235:c8:m1"} {"signature": "def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None):", "body": "if rectprops is None:rectprops = dict(facecolor='', alpha=)assert direction in ['', ''], ''self.direction = directionself.ax = Noneself.canvas = Noneself.visible = Trueself.cids=[]self.rect = Noneself.background = Noneself.pressv = Noneself.rectprops = rectpropsself.onselect = onselectself.onmove_callback = onmove_callbackself.useblit = useblitself.minspan = minspanself.buttonDown = Falseself.prev = (, )self.new_axes(ax)", "docstring": "Create a span selector in ax. When a selection is made, clear\nthe span and call onselect with\n\n onselect(vmin, vmax)\n\nand clear the span.\n\ndirection must be 'horizontal' or 'vertical'\n\nIf minspan is not None, ignore events smaller than minspan\n\nThe span rect is drawn with rectprops; default\n rectprops = dict(facecolor='red', alpha=0.5)\n\nset the visible attribute to False if you want to turn off\nthe functionality of the span selector", "id": "f17235:c9:m0"} {"signature": "def update_background(self, event):", "body": "if self.useblit:self.background = self.canvas.copy_from_bbox(self.ax.bbox)", "docstring": "force an update of the background", "id": "f17235:c9:m2"} {"signature": "def ignore(self, event):", "body": "return event.inaxes!=self.ax or not self.visible or event.button !=", "docstring": "return True if event should be ignored", "id": "f17235:c9:m3"} {"signature": "def press(self, event):", "body": "if self.ignore(event): returnself.buttonDown = Trueself.rect.set_visible(self.visible)if self.direction == '':self.pressv = event.xdataelse:self.pressv = event.ydatareturn False", "docstring": "on button press event", "id": "f17235:c9:m4"} {"signature": "def release(self, event):", "body": "if self.pressv is None or (self.ignore(event) and not self.buttonDown): returnself.buttonDown = Falseself.rect.set_visible(False)self.canvas.draw()vmin = self.pressvif self.direction == '':vmax = event.xdata or self.prev[]else:vmax = event.ydata or self.prev[]if vmin>vmax: vmin, vmax = vmax, vminspan = vmax - vminif self.minspan is not None and spanself.onselect(vmin, vmax)self.pressv = Nonereturn False", "docstring": "on button release event", "id": "f17235:c9:m5"} {"signature": "def update(self):", "body": "if self.useblit:if self.background is not None:self.canvas.restore_region(self.background)self.ax.draw_artist(self.rect)self.canvas.blit(self.ax.bbox)else:self.canvas.draw_idle()return False", "docstring": "draw using newfangled blit or oldfangled draw depending on useblit", "id": "f17235:c9:m6"} {"signature": "def onmove(self, event):", "body": "if self.pressv is None or self.ignore(event): returnx, y = event.xdata, event.ydataself.prev = x, yif self.direction == '':v = xelse:v = yminv, maxv = v, self.pressvif minv>maxv: minv, maxv = maxv, minvif self.direction == '':self.rect.set_x(minv)self.rect.set_width(maxv-minv)else:self.rect.set_y(minv)self.rect.set_height(maxv-minv)if self.onmove_callback is not None:vmin = self.pressvif self.direction == '':vmax = event.xdata or self.prev[]else:vmax = event.ydata or self.prev[]if vmin>vmax: vmin, vmax = vmax, vminself.onmove_callback(vmin, vmax)self.update()return False", "docstring": "on motion notify event", "id": "f17235:c9:m7"} {"signature": "def __init__(self, ax, onselect, drawtype='',minspanx=None, minspany=None, useblit=False,lineprops=None, rectprops=None, spancoords=''):", "body": "self.ax = axself.visible = Trueself.canvas = ax.figure.canvasself.canvas.mpl_connect('', self.onmove)self.canvas.mpl_connect('', self.press)self.canvas.mpl_connect('', self.release)self.canvas.mpl_connect('', self.update_background)self.active = True self.to_draw = Noneself.background = Noneif drawtype == '':drawtype = '' self.visible = False if drawtype == '':if rectprops is None:rectprops = dict(facecolor='', edgecolor = '',alpha=, fill=False)self.rectprops = rectpropsself.to_draw = Rectangle((,), , ,visible=False,**self.rectprops)self.ax.add_patch(self.to_draw)if drawtype == '':if lineprops is None:lineprops = dict(color='', linestyle='',linewidth = , alpha=)self.lineprops = linepropsself.to_draw = Line2D([,],[,],visible=False,**self.lineprops)self.ax.add_line(self.to_draw)self.onselect = onselectself.useblit = useblitself.minspanx = minspanxself.minspany = minspanyassert(spancoords in ('', ''))self.spancoords = spancoordsself.drawtype = drawtypeself.eventpress = Noneself.eventrelease = None", "docstring": "Create a selector in ax. When a selection is made, clear\nthe span and call onselect with\n\n onselect(pos_1, pos_2)\n\nand clear the drawn box/line. There pos_i are arrays of length 2\ncontaining the x- and y-coordinate.\n\nIf minspanx is not None then events smaller than minspanx\nin x direction are ignored(it's the same for y).\n\nThe rect is drawn with rectprops; default\n rectprops = dict(facecolor='red', edgecolor = 'black',\n alpha=0.5, fill=False)\n\nThe line is drawn with lineprops; default\n lineprops = dict(color='black', linestyle='-',\n linewidth = 2, alpha=0.5)\n\nUse type if you want the mouse to draw a line, a box or nothing\nbetween click and actual position ny setting\n\ndrawtype = 'line', drawtype='box' or drawtype = 'none'.\n\nspancoords is one of 'data' or 'pixels'. If 'data', minspanx\nand minspanx will be interpreted in the same coordinates as\nthe x and ya axis, if 'pixels', they are in pixels", "id": "f17235:c11:m0"} {"signature": "def update_background(self, event):", "body": "if self.useblit:self.background = self.canvas.copy_from_bbox(self.ax.bbox)", "docstring": "force an update of the background", "id": "f17235:c11:m1"} {"signature": "def ignore(self, event):", "body": "if not self.active:return Trueif not self.canvas.widgetlock.available(self):return Trueif self.eventpress == None:return event.inaxes!= self.axreturn (event.inaxes!=self.ax orevent.button != self.eventpress.button)", "docstring": "return True if event should be ignored", "id": "f17235:c11:m2"} {"signature": "def press(self, event):", "body": "if self.ignore(event): returnself.to_draw.set_visible(self.visible)self.eventpress = eventreturn False", "docstring": "on button press event", "id": "f17235:c11:m3"} {"signature": "def release(self, event):", "body": "if self.eventpress is None or self.ignore(event): returnself.to_draw.set_visible(False)self.canvas.draw()self.eventrelease = eventif self.spancoords=='':xmin, ymin = self.eventpress.xdata, self.eventpress.ydataxmax, ymax = self.eventrelease.xdata, self.eventrelease.ydataelif self.spancoords=='':xmin, ymin = self.eventpress.x, self.eventpress.yxmax, ymax = self.eventrelease.x, self.eventrelease.yelse:raise ValueError('')if xmin>xmax: xmin, xmax = xmax, xminif ymin>ymax: ymin, ymax = ymax, yminspanx = xmax - xminspany = ymax - yminxproblems = self.minspanx is not None and spanxyproblems = self.minspany is not None and spanyif (self.drawtype=='') and (xproblems or yproblems):\"\"\"\"\"\" return if (self.drawtype=='') and (xproblems and yproblems):\"\"\"\"\"\" return self.onselect(self.eventpress, self.eventrelease)self.eventpress = None self.eventrelease = None return False", "docstring": "on button release event", "id": "f17235:c11:m4"} {"signature": "def update(self):", "body": "if self.useblit:if self.background is not None:self.canvas.restore_region(self.background)self.ax.draw_artist(self.to_draw)self.canvas.blit(self.ax.bbox)else:self.canvas.draw_idle()return False", "docstring": "draw using newfangled blit or oldfangled draw depending on useblit", "id": "f17235:c11:m5"} {"signature": "def onmove(self, event):", "body": "if self.eventpress is None or self.ignore(event): returnx,y = event.xdata, event.ydata if self.drawtype == '':minx, maxx = self.eventpress.xdata, x miny, maxy = self.eventpress.ydata, y if minx>maxx: minx, maxx = maxx, minx if miny>maxy: miny, maxy = maxy, minyself.to_draw.set_x(minx) self.to_draw.set_y(miny)self.to_draw.set_width(maxx-minx) self.to_draw.set_height(maxy-miny)self.update()return Falseif self.drawtype == '':self.to_draw.set_data([self.eventpress.xdata, x],[self.eventpress.ydata, y])self.update()return False", "docstring": "on motion notify event if box/line is wanted", "id": "f17235:c11:m6"} {"signature": "def set_active(self, active):", "body": "self.active = active", "docstring": "Use this to activate / deactivate the RectangleSelector\n\n from your program with an boolean variable 'active'.", "id": "f17235:c11:m7"} {"signature": "def get_active(self):", "body": "return self.active", "docstring": "to get status of active mode (boolean variable)", "id": "f17235:c11:m8"} {"signature": "def validate_path_exists(s):", "body": "if os.path.exists(s): return selse:raise RuntimeError(''%s)", "docstring": "If s is a path, return s, else False", "id": "f17236:m0"} {"signature": "def validate_bool(b):", "body": "if type(b) is str:b = b.lower()if b in ('', '', '', '', '', '', , True): return Trueelif b in ('', '', '', '', '', '', , False): return Falseelse:raise ValueError('' % b)", "docstring": "Convert b to a boolean or raise", "id": "f17236:m1"} {"signature": "def validate_bool_maybe_none(b):", "body": "if type(b) is str:b = b.lower()if b=='': return Noneif b in ('', '', '', '', '', '', , True): return Trueelif b in ('', '', '', '', '', '', , False): return Falseelse:raise ValueError('' % b)", "docstring": "Convert b to a boolean or raise", "id": "f17236:m2"} {"signature": "def validate_float(s):", "body": "try: return float(s)except ValueError:raise ValueError('' % s)", "docstring": "convert s to float or raise", "id": "f17236:m3"} {"signature": "def validate_int(s):", "body": "try: return int(s)except ValueError:raise ValueError('' % s)", "docstring": "convert s to int or raise", "id": "f17236:m4"} {"signature": "def validate_fonttype(s):", "body": "fonttypes = { '': ,'': }try:fonttype = validate_int(s)except ValueError:if s.lower() in list(fonttypes.keys()):return fonttypes[s.lower()]raise ValueError('' % list(fonttypes.keys()))else:if fonttype not in list(fonttypes.values()):raise ValueError('' % list(fonttypes.values()))return fonttype", "docstring": "confirm that this is a Postscript of PDF font type that we know how to convert to", "id": "f17236:m5"} {"signature": "def validate_color(s):", "body": "if s.lower() == '':return ''if is_color_like(s):return sstmp = '' + sif is_color_like(stmp):return stmpcolorarg = smsg = ''if s.find('')>=:stmp = ''.join([ c for c in s if c.isdigit() or c=='' or c==''])vals = stmp.split('')if len(vals)!=:msg = ''else:try:colorarg = [float(val) for val in vals]except ValueError:msg = ''if not msg and is_color_like(colorarg):return colorargraise ValueError(''%(s, msg))", "docstring": "return a valid color arg", "id": "f17236:m8"} {"signature": "def validate_stringlist(s):", "body": "if type(s) is str:return [ v.strip() for v in s.split('') ]else:assert type(s) in [list,tuple]return [ str(v) for v in s ]", "docstring": "return a list", "id": "f17236:m9"} {"signature": "def __init__(self, key, valid, ignorecase=False):", "body": "self.key = keyself.ignorecase = ignorecasedef func(s):if ignorecase: return s.lower()else: return sself.valid = dict([(func(k),k) for k in valid])", "docstring": "valid is a list of legal strings", "id": "f17236:c0:m0"} {"signature": "def __call__(self, s):", "body": "if type(s) is str:ss = s.split('')if len(ss) != self.n:raise ValueError(''%self.n)try:return [float(val) for val in ss]except ValueError:raise ValueError('')else:assert type(s) in (list,tuple)if len(s) != self.n:raise ValueError(''%self.n)return [float(val) for val in s]", "docstring": "return a seq of n floats or raise", "id": "f17236:c1:m1"} {"signature": "def __call__(self, s):", "body": "if type(s) is str:ss = s.split('')if len(ss) != self.n:raise ValueError(''%self.n)try:return [int(val) for val in ss]except ValueError:raise ValueError('')else:assert type(s) in (list,tuple)if len(s) != self.n:raise ValueError(''%self.n)return [int(val) for val in s]", "docstring": "return a seq of n ints or raise", "id": "f17236:c2:m1"} {"signature": "def _norm(x):", "body": "return np.sqrt(np.dot(x,x))", "docstring": "return sqrt(x dot x)", "id": "f17237:m4"} {"signature": "def window_hanning(x):", "body": "return np.hanning(len(x))*x", "docstring": "return x times the hanning window of len(x)", "id": "f17237:m5"} {"signature": "def window_none(x):", "body": "return x", "docstring": "No window function; simply return x", "id": "f17237:m6"} {"signature": "def conv(x, y, mode=):", "body": "warnings.warn(\"\", DeprecationWarning)return np.convolve(x,y,mode)", "docstring": "convolve x with y", "id": "f17237:m7"} {"signature": "def demean(x, axis=):", "body": "x = np.asarray(x)if axis:ind = [slice(None)] * axisind.append(np.newaxis)return x - x.mean(axis)[ind]return x - x.mean(axis)", "docstring": "Return x minus its mean along the specified axis", "id": "f17237:m9"} {"signature": "def detrend_mean(x):", "body": "return x - x.mean()", "docstring": "Return x minus the mean(x)", "id": "f17237:m10"} {"signature": "def detrend_none(x):", "body": "return x", "docstring": "Return x: no detrending", "id": "f17237:m11"} {"signature": "def detrend_linear(y):", "body": "x = np.arange(len(y), dtype=np.float_)C = np.cov(x, y, bias=)b = C[,]/C[,]a = y.mean() - b*x.mean()return y - (b*x + a)", "docstring": "Return y minus best fit line; 'linear' detrending", "id": "f17237:m12"} {"signature": "def psd(x, NFFT=, Fs=, detrend=detrend_none, window=window_hanning,noverlap=, pad_to=None, sides='', scale_by_freq=None):", "body": "Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,scale_by_freq)return Pxx.real,freqs", "docstring": "The power spectral density by Welch's average periodogram method.\nThe vector *x* is divided into *NFFT* length blocks. Each block\nis detrended by the function *detrend* and windowed by the function\n*window*. *noverlap* gives the length of the overlap between blocks.\nThe absolute(fft(block))**2 of each segment are averaged to compute\n*Pxx*, with a scaling to correct for power loss due to windowing.\n\nIf len(*x*) < *NFFT*, it will be zero padded to *NFFT*.\n\n*x*\n Array or sequence containing the data\n%(PSD)s\nReturns the tuple (*Pxx*, *freqs*).\n\nRefs:\n Bendat & Piersol -- Random Data: Analysis and Measurement\n Procedures, John Wiley & Sons (1986)", "id": "f17237:m14"} {"signature": "def csd(x, y, NFFT=, Fs=, detrend=detrend_none, window=window_hanning,noverlap=, pad_to=None, sides='', scale_by_freq=None):", "body": "Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,noverlap, pad_to, sides, scale_by_freq)if len(Pxy.shape) == and Pxy.shape[]>:Pxy = Pxy.mean(axis=)return Pxy, freqs", "docstring": "The cross power spectral density by Welch's average periodogram\nmethod. The vectors *x* and *y* are divided into *NFFT* length\nblocks. Each block is detrended by the function *detrend* and\nwindowed by the function *window*. *noverlap* gives the length\nof the overlap between blocks. The product of the direct FFTs\nof *x* and *y* are averaged over each segment to compute *Pxy*,\nwith a scaling to correct for power loss due to windowing.\n\nIf len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero\npadded to *NFFT*.\n\n*x*, *y*\n Array or sequence containing the data\n%(PSD)s\nReturns the tuple (*Pxy*, *freqs*).\n\nRefs:\n Bendat & Piersol -- Random Data: Analysis and Measurement\n Procedures, John Wiley & Sons (1986)", "id": "f17237:m15"} {"signature": "def specgram(x, NFFT=, Fs=, detrend=detrend_none, window=window_hanning,noverlap=, pad_to=None, sides='', scale_by_freq=None):", "body": "assert(NFFT > noverlap)Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,noverlap, pad_to, sides, scale_by_freq)Pxx = Pxx.real if (np.iscomplexobj(x) and sides == '') or sides == '':freqs = np.concatenate((freqs[NFFT/:]-Fs,freqs[:NFFT/]))Pxx = np.concatenate((Pxx[NFFT/:,:],Pxx[:NFFT/,:]),)return Pxx, freqs, t", "docstring": "Compute a spectrogram of data in *x*. Data are split into *NFFT*\nlength segements and the PSD of each section is computed. The\nwindowing function *window* is applied to each segment, and the\namount of overlap of each segment is specified with *noverlap*.\n\nIf *x* is real (i.e. non-complex) only the spectrum of the positive\nfrequencie is returned. If *x* is complex then the complete\nspectrum is returned.\n%(PSD)s\nReturns a tuple (*Pxx*, *freqs*, *t*):\n\n - *Pxx*: 2-D array, columns are the periodograms of\n successive segments\n\n - *freqs*: 1-D array of frequencies corresponding to the rows\n in Pxx\n\n - *t*: 1-D array of times corresponding to midpoints of\n segments.\n\n.. seealso::\n :func:`psd`:\n :func:`psd` differs in the default overlap; in returning\n the mean of the segment periodograms; and in not returning\n times.", "id": "f17237:m16"} {"signature": "def cohere(x, y, NFFT=, Fs=, detrend=detrend_none, window=window_hanning,noverlap=, pad_to=None, sides='', scale_by_freq=None):", "body": "if len(x)<*NFFT:raise ValueError(_coh_error)Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,scale_by_freq)Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,scale_by_freq)Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,scale_by_freq)Cxy = np.divide(np.absolute(Pxy)**, Pxx*Pyy)Cxy.shape = (len(f),)return Cxy, f", "docstring": "The coherence between *x* and *y*. Coherence is the normalized\ncross spectral density:\n\n.. math::\n\n C_{xy} = \\\\frac{|P_{xy}|^2}{P_{xx}P_{yy}}\n\n*x*, *y*\n Array or sequence containing the data\n%(PSD)s\nThe return value is the tuple (*Cxy*, *f*), where *f* are the\nfrequencies of the coherence vector. For cohere, scaling the\nindividual densities by the sampling frequency has no effect, since\nthe factors cancel out.\n\n.. seealso::\n :func:`psd` and :func:`csd`:\n For information about the methods used to compute\n :math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.", "id": "f17237:m17"} {"signature": "def corrcoef(*args):", "body": "warnings.warn(\"\", DeprecationWarning)kw = dict(rowvar=False)return np.corrcoef(*args, **kw)", "docstring": "corrcoef(*X*) where *X* is a matrix returns a matrix of correlation\ncoefficients for the columns of *X*\n\ncorrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of\ncorrelation coefficients for *x* and *y*.\n\nNumpy arrays can be real or complex.\n\nThe correlation matrix is defined from the covariance matrix *C*\nas\n\n.. math::\n\n r_{ij} = \\\\frac{C_{ij}}{\\\\sqrt{C_{ii}C_{jj}}}", "id": "f17237:m18"} {"signature": "def polyfit(*args, **kwargs):", "body": "warnings.warn(\"\", DeprecationWarning)return np.polyfit(*args, **kwargs)", "docstring": "polyfit(*x*, *y*, *N*)\n\nDo a best fit polynomial of order *N* of *y* to *x*. Return value\nis a vector of polynomial coefficients [pk ... p1 p0]. Eg, for\n*N*=2::\n\n p2*x0^2 + p1*x0 + p0 = y1\n p2*x1^2 + p1*x1 + p0 = y1\n p2*x2^2 + p1*x2 + p0 = y2\n .....\n p2*xk^2 + p1*xk + p0 = yk\n\n\nMethod: if *X* is a the Vandermonde Matrix computed from *x* (see\n`vandermonds\n`_), then the\npolynomial least squares solution is given by the '*p*' in\n\n X*p = y\n\nwhere *X* is a (len(*x*) \\N{MULTIPLICATION SIGN} *N* + 1) matrix,\n*p* is a *N*+1 length vector, and *y* is a (len(*x*)\n\\N{MULTIPLICATION SIGN} 1) vector.\n\nThis equation can be solved as\n\n.. math::\n\n p = (X_t X)^-1 X_t y\n\nwhere :math:`X_t` is the transpose of *X* and -1 denotes the\ninverse. Numerically, however, this is not a good method, so we\nuse :func:`numpy.linalg.lstsq`.\n\nFor more info, see `least squares fitting\n`_,\nbut note that the *k*'s and *n*'s in the superscripts and\nsubscripts on that page. The linear algebra is correct, however.\n\n.. seealso::\n :func:`polyval`", "id": "f17237:m19"} {"signature": "def polyval(*args, **kwargs):", "body": "warnings.warn(\"\", DeprecationWarning)return np.polyval(*args, **kwargs)", "docstring": "*y* = polyval(*p*, *x*)\n\n*p* is a vector of polynomial coeffients and *y* is the polynomial\nevaluated at *x*.\n\nExample code to remove a polynomial (quadratic) trend from y::\n\n p = polyfit(x, y, 2)\n trend = polyval(p, x)\n resid = y - trend\n\n.. seealso::\n :func:`polyfit`", "id": "f17237:m20"} {"signature": "def vander(*args, **kwargs):", "body": "warnings.warn(\"\", DeprecationWarning)return np.vander(*args, **kwargs)", "docstring": "*X* = vander(*x*, *N* = *None*)\n\nThe Vandermonde matrix of vector *x*. The *i*-th column of *X* is the\nthe *i*-th power of *x*. *N* is the maximum power to compute; if *N* is\n*None* it defaults to len(*x*).", "id": "f17237:m21"} {"signature": "def cohere_pairs( X, ij, NFFT=, Fs=, detrend=detrend_none,window=window_hanning, noverlap=,preferSpeedOverMemory=True,progressCallback=donothing_callback,returnPxx=False):", "body": "numRows, numCols = X.shapeif numRows < NFFT:tmp = XX = np.zeros( (NFFT, numCols), X.dtype)X[:numRows,:] = tmpdel tmpnumRows, numCols = X.shapeseen = {}for i,j in ij:seen[i]=; seen[j] = allColumns = list(seen.keys())Ncols = len(allColumns)del seenif np.iscomplexobj(X): numFreqs = NFFTelse: numFreqs = NFFT//+if cbook.iterable(window):assert(len(window) == NFFT)windowVals = windowelse:windowVals = window(np.ones((NFFT,), typecode(X)))ind = list(range(, numRows-NFFT+, NFFT-noverlap))numSlices = len(ind)FFTSlices = {}FFTConjSlices = {}Pxx = {}slices = list(range(numSlices))normVal = norm(windowVals)**for iCol in allColumns:progressCallback(i/Ncols, '')Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)for iSlice in slices:thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]thisSlice = windowVals*detrend(thisSlice)Slices[iSlice,:] = fft(thisSlice)[:numFreqs]FFTSlices[iCol] = Slicesif preferSpeedOverMemory:FFTConjSlices[iCol] = conjugate(Slices)Pxx[iCol] = np.divide(np.mean(absolute(Slices)**), normVal)del Slices, ind, windowValsCxy = {}Phase = {}count = N = len(ij)for i,j in ij:count +=if count%==:progressCallback(count/N, '')if preferSpeedOverMemory:Pxy = FFTSlices[i] * FFTConjSlices[j]else:Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])if numSlices>: Pxy = np.mean(Pxy)Pxy = np.divide(Pxy, normVal)Cxy[(i,j)] = np.divide(np.absolute(Pxy)**, Pxx[i]*Pxx[j])Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)freqs = Fs/NFFT*np.arange(numFreqs)if returnPxx:return Cxy, Phase, freqs, Pxxelse:return Cxy, Phase, freqs", "docstring": "Cxy, Phase, freqs = cohere_pairs(X, ij, ...)\n\nCompute the coherence for all pairs in *ij*. *X* is a\n(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples\n(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*\nfor which you want to compute coherence. For example, if *X* has 64\ncolumns, and you want to compute all nonredundant pairs, define *ij*\nas::\n\n ij = []\n for i in range(64):\n for j in range(i+1,64):\n ij.append( (i, j) )\n\nThe other function arguments, except for *preferSpeedOverMemory*\n(see below), are explained in the help string of :func:`psd`.\n\nReturn value is a tuple (*Cxy*, *Phase*, *freqs*).\n\n - *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that\n pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of\n dictionary keys is ``len(ij)``.\n\n - *Phase*: a dictionary of phases of the cross spectral density at\n each frequency for each pair. The keys are ``(i,j)``.\n\n - *freqs*: a vector of frequencies, equal in length to either\n the coherence or phase vectors for any (*i*, *j*) key.. Eg,\n to make a coherence Bode plot::\n\n subplot(211)\n plot( freqs, Cxy[(12,19)])\n subplot(212)\n plot( freqs, Phase[(12,19)])\n\nFor a large number of pairs, :func:`cohere_pairs` can be much more\nefficient than just calling :func:`cohere` for each pair, because\nit caches most of the intensive computations. If *N* is the\nnumber of pairs, this function is O(N) for most of the heavy\nlifting, whereas calling cohere for each pair is\nO(N\\N{SUPERSCRIPT TWO}). However, because of the caching, it is\nalso more memory intensive, making 2 additional complex arrays\nwith approximately the same number of elements as *X*.\n\nThe parameter *preferSpeedOverMemory*, if *False*, limits the\ncaching by only making one, rather than two, complex cache arrays.\nThis is useful if memory becomes critical. Even when\n*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will\nstill give significant performace gains over calling\n:func:`cohere` for each pair, and will use subtantially less\nmemory than if *preferSpeedOverMemory* is *True*. In my tests\nwith a (43000, 64) array over all non-redundant pairs,\n*preferSpeedOverMemory* = *True* delivered a 33% performace boost\non a 1.7GHZ Athlon with 512MB RAM compared with\n*preferSpeedOverMemory* = *False*. But both solutions were more\nthan 10x faster than naievly crunching all possible pairs through\ncohere.\n\n.. seealso::\n :file:`test/cohere_pairs_test.py` in the src tree:\n For an example script that shows that this\n :func:`cohere_pairs` and :func:`cohere` give the same\n results for a given pair.", "id": "f17237:m23"} {"signature": "def entropy(y, bins):", "body": "n,bins = np.histogram(y, bins)n = n.astype(np.float_)n = np.take(n, np.nonzero(n)[]) p = np.divide(n, len(y))delta = bins[]-bins[]S = -*np.sum(p*log(p)) + log(delta)return S", "docstring": "r\"\"\"\n Return the entropy of the data in *y*.\n\n .. math::\n\n \\sum p_i \\log_2(p_i)\n\n where :math:`p_i` is the probability of observing *y* in the\n :math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a\n range of bins; see :func:`numpy.histogram`.\n\n Compare *S* with analytic calculation for a Gaussian::\n\n x = mu + sigma * randn(200000)\n Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )", "id": "f17237:m24"} {"signature": "def hist(y, bins=, normed=):", "body": "warnings.warn(\"\", DeprecationWarning)return np.histogram(y, bins=bins, range=None, normed=normed)", "docstring": "Return the histogram of *y* with *bins* equally sized bins. If\nbins is an array, use those bins. Return value is (*n*, *x*)\nwhere *n* is the count for each bin in *x*.\n\nIf *normed* is *False*, return the counts in the first element of\nthe returned tuple. If *normed* is *True*, return the probability\ndensity :math:`\\\\frac{n}{(len(y)\\mathrm{dbin}}`.\n\nIf *y* has rank > 1, it will be raveled. If *y* is masked, only the\nunmasked values will be used.\n\nCredits: the Numeric 22 documentation", "id": "f17237:m25"} {"signature": "def normpdf(x, *args):", "body": "mu, sigma = argsreturn /(np.sqrt(*np.pi)*sigma)*np.exp(- * (/sigma*(x - mu))**)", "docstring": "Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*", "id": "f17237:m26"} {"signature": "def levypdf(x, gamma, alpha):", "body": "N = len(x)if N% != :raise ValueError('' +'')dx = x[]-x[]f = /(N*dx)*np.arange(-N/, N/, np.float_)ind = np.concatenate([np.arange(N/, N, int),np.arange(, N/, int)])df = f[]-f[]cfl = exp(-gamma*np.absolute(*pi*f)**alpha)px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)return np.take(px, ind)", "docstring": "Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*", "id": "f17237:m27"} {"signature": "def find(condition):", "body": "res, = np.nonzero(np.ravel(condition))return res", "docstring": "Return the indices where ravel(condition) is true", "id": "f17237:m28"} {"signature": "def trapz(x, y):", "body": "warnings.warn(\"\", DeprecationWarning)return np.trapz(y, x)", "docstring": "Trapezoidal integral of *y*(*x*).", "id": "f17237:m29"} {"signature": "def longest_contiguous_ones(x):", "body": "x = np.ravel(x)if len(x)==:return np.array([])ind = (x==).nonzero()[]if len(ind)==:return np.arange(len(x))if len(ind)==len(x):return np.array([])y = np.zeros( (len(x)+,), x.dtype)y[:-] = xdif = np.diff(y)up = (dif == ).nonzero()[];dn = (dif == -).nonzero()[];i = (dn-up == max(dn - up)).nonzero()[][]ind = np.arange(up[i], dn[i])return ind", "docstring": "Return the indices of the longest stretch of contiguous ones in *x*,\nassuming *x* is a vector of zeros and ones. If there are two\nequally long stretches, pick the first.", "id": "f17237:m30"} {"signature": "def longest_ones(x):", "body": "return longest_contiguous_ones(x)", "docstring": "alias for longest_contiguous_ones", "id": "f17237:m31"} {"signature": "def prepca(P, frac=):", "body": "U,s,v = np.linalg.svd(P)varEach = s**/P.shape[]totVar = varEach.sum()fracVar = varEach/totVarind = slice((fracVar>=frac).sum())Trans = U[:,ind].transpose()Pcomponents = np.dot(Trans,P)return Pcomponents, Trans, fracVar[ind]", "docstring": "Compute the principal components of *P*. *P* is a (*numVars*,\n*numObs*) array. *frac* is the minimum fraction of variance that a\ncomponent must contain to be included.\n\nReturn value is a tuple of the form (*Pcomponents*, *Trans*,\n*fracVar*) where:\n\n - *Pcomponents* : a (numVars, numObs) array\n\n - *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *\n *P*\n\n - *fracVar* : the fraction of the variance accounted for by each\n component returned\n\nA similar function of the same name was in the Matlab (TM)\nR13 Neural Network Toolbox but is not found in later versions;\nits successor seems to be called \"processpcs\".", "id": "f17237:m32"} {"signature": "def prctile(x, p = (, , , , )):", "body": "x = np.array(x).ravel() x.sort()Nx = len(x)if not cbook.iterable(p):return x[int(p*Nx/)]p = np.asarray(p)* Nx/ind = p.astype(int)ind = np.where(ind>=Nx, Nx-, ind)return x.take(ind)", "docstring": "Return the percentiles of *x*. *p* can either be a sequence of\npercentile values or a scalar. If *p* is a sequence, the ith\nelement of the return sequence is the *p*(i)-th percentile of *x*.\nIf *p* is a scalar, the largest value of *x* less than or equal to\nthe *p* percentage point in the sequence is returned.", "id": "f17237:m33"} {"signature": "def prctile_rank(x, p):", "body": "if not cbook.iterable(p):p = np.arange(/p, , /p)else:p = np.asarray(p)if p.max()<= or p.min()< or p.max()>:raise ValueError('')ptiles = prctile(x, p)return np.searchsorted(ptiles, x)", "docstring": "Return the rank for each element in *x*, return the rank\n0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a\nlen(*x*) array with values in [0,1,2,3] where 0 indicates the\nvalue is less than the 25th percentile, 1 indicates the value is\n>= the 25th and < 50th percentile, ... and 3 indicates the value\nis above the 75th percentile cutoff.\n\n*p* is either an array of percentiles in [0..100] or a scalar which\nindicates how many quantiles of data you want ranked.", "id": "f17237:m34"} {"signature": "def center_matrix(M, dim=):", "body": "M = np.asarray(M, np.float_)if dim:M = (M - M.mean(axis=)) / M.std(axis=)else:M = (M - M.mean(axis=)[:,np.newaxis])M = M / M.std(axis=)[:,np.newaxis]return M", "docstring": "Return the matrix *M* with each row having zero mean and unit std.\n\nIf *dim* = 1 operate on columns instead of rows. (*dim* is\nopposite to the numpy axis kwarg.)", "id": "f17237:m35"} {"signature": "def rk4(derivs, y0, t):", "body": "try: Ny = len(y0)except TypeError:yout = np.zeros( (len(t),), np.float_)else:yout = np.zeros( (len(t), Ny), np.float_)yout[] = y0i = for i in np.arange(len(t)-):thist = t[i]dt = t[i+] - thistdt2 = dt/y0 = yout[i]k1 = np.asarray(derivs(y0, thist))k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))yout[i+] = y0 + dt/*(k1 + *k2 + *k3 + k4)return yout", "docstring": "Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.\nThis is a toy implementation which may be useful if you find\nyourself stranded on a system w/o scipy. Otherwise use\n:func:`scipy.integrate`.\n\n*y0*\n initial state vector\n\n*t*\n sample times\n\n*derivs*\n returns the derivative of the system and has the\n signature ``dy = derivs(yi, ti)``\n\n\nExample 1 ::\n\n ## 2D system\n\n def derivs6(x,t):\n d1 = x[0] + 2*x[1]\n d2 = -3*x[0] + 4*x[1]\n return (d1, d2)\n dt = 0.0005\n t = arange(0.0, 2.0, dt)\n y0 = (1,2)\n yout = rk4(derivs6, y0, t)\n\nExample 2::\n\n ## 1D system\n alpha = 2\n def derivs(x,t):\n return -alpha*x + exp(-t)\n\n y0 = 1\n yout = rk4(derivs, y0, t)\n\n\nIf you have access to scipy, you should probably be using the\nscipy.integrate tools rather than this function.", "id": "f17237:m36"} {"signature": "def bivariate_normal(X, Y, sigmax=, sigmay=,mux=, muy=, sigmaxy=):", "body": "Xmu = X-muxYmu = Y-muyrho = sigmaxy/(sigmax*sigmay)z = Xmu**/sigmax** + Ymu**/sigmay** - *rho*Xmu*Ymu/(sigmax*sigmay)denom = *np.pi*sigmax*sigmay*np.sqrt(-rho**)return np.exp( -z/(*(-rho**))) / denom", "docstring": "Bivariate Gaussian distribution for equal shape *X*, *Y*.\n\nSee `bivariate normal\n`_\nat mathworld.", "id": "f17237:m37"} {"signature": "def get_xyz_where(Z, Cond):", "body": "X,Y = np.indices(Z.shape)return X[Cond], Y[Cond], Z[Cond]", "docstring": "*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is\na boolean matrix where some condition is satisfied. Return value\nis (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and\n*z* are the values of *Z* at those indices. *x*, *y*, and *z* are\n1D arrays.", "id": "f17237:m38"} {"signature": "def get_sparse_matrix(M,N,frac=):", "body": "data = np.zeros((M,N))*for i in range(int(M*N*frac)):x = np.random.randint(,M-)y = np.random.randint(,N-)data[x,y] = np.random.rand()return data", "docstring": "Return a *M* x *N* sparse matrix with *frac* elements randomly\nfilled.", "id": "f17237:m39"} {"signature": "def dist(x,y):", "body": "d = x-yreturn np.sqrt(np.dot(d,d))", "docstring": "Return the distance between two points.", "id": "f17237:m40"} {"signature": "def dist_point_to_segment(p, s0, s1):", "body": "p = np.asarray(p, np.float_)s0 = np.asarray(s0, np.float_)s1 = np.asarray(s1, np.float_)v = s1 - s0w = p - s0c1 = np.dot(w,v);if ( c1 <= ):return dist(p, s0);c2 = np.dot(v,v)if ( c2 <= c1 ):return dist(p, s1);b = c1 / c2pb = s0 + b * v;return dist(p, pb)", "docstring": "Get the distance of a point to a segment.\n\n *p*, *s0*, *s1* are *xy* sequences\n\nThis algorithm from\nhttp://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment", "id": "f17237:m41"} {"signature": "def segments_intersect(s1, s2):", "body": "(x1, y1), (x2, y2) = s1(x3, y3), (x4, y4) = s2den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))if den == :return Falseu1 = n1/denu2 = n2/denreturn <= u1 <= and <= u2 <= ", "docstring": "Return *True* if *s1* and *s2* intersect.\n*s1* and *s2* are defined as::\n\n s1: (x1, y1), (x2, y2)\n s2: (x3, y3), (x4, y4)", "id": "f17237:m42"} {"signature": "def fftsurr(x, detrend=detrend_none, window=window_none):", "body": "if cbook.iterable(window):x=window*detrend(x)else:x = window(detrend(x))z = np.fft.fft(x)a = *np.pi*phase = a * np.random.rand(len(x))z = z*np.exp(phase)return np.fft.ifft(z).real", "docstring": "Compute an FFT phase randomized surrogate of *x*.", "id": "f17237:m43"} {"signature": "def liaupunov(x, fprime):", "body": "return np.mean(np.log(np.absolute(fprime(x))))", "docstring": "*x* is a very long trajectory from a map, and *fprime* returns the\nderivative of *x*.\n\nReturns :\n.. math::\n\n \\lambda = \\\\frac{1}{n}\\\\sum \\\\ln|f^'(x_i)|\n\n.. seealso::\n Sec 10.5 Strogatz (1994) \"Nonlinear Dynamics and Chaos\".\n\n `Wikipedia article on Lyapunov Exponent\n `_.\n\n.. note::\n What the function here calculates may not be what you really want;\n *caveat emptor*.\n\n It also seems that this function's name is badly misspelled.", "id": "f17237:m44"} {"signature": "def movavg(x,n):", "body": "w = np.empty((n,), dtype=np.float_)w[:] = /nreturn np.convolve(x, w, mode='')", "docstring": "Compute the len(*n*) moving average of *x*.", "id": "f17237:m45"} {"signature": "def save(fname, X, fmt='',delimiter=''):", "body": "if cbook.is_string_like(fname):if fname.endswith(''):import gzipfh = gzip.open(fname,'')else:fh = file(fname,'')elif hasattr(fname, ''):fh = fnameelse:raise ValueError('')X = np.asarray(X)origShape = Noneif X.ndim == :origShape = X.shapeX.shape = len(X), for row in X:fh.write(delimiter.join([fmt%val for val in row]) + '')if origShape is not None:X.shape = origShape", "docstring": "Save the data in *X* to file *fname* using *fmt* string to convert the\ndata to strings.\n\n*fname* can be a filename or a file handle. If the filename ends\nin '.gz', the file is automatically saved in compressed gzip\nformat. The :func:`load` function understands gzipped files\ntransparently.\n\nExample usage::\n\n save('test.out', X) # X is an array\n save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays\n save('test2.out', x) # x is 1D\n save('test3.out', x, fmt='%1.4e') # use exponential notation\n\n*delimiter* is used to separate the fields, eg. *delimiter* ','\nfor comma-separated values.", "id": "f17237:m46"} {"signature": "def load(fname,comments='',delimiter=None, converters=None,skiprows=,usecols=None, unpack=False, dtype=np.float_):", "body": "if converters is None: converters = {}fh = cbook.to_filehandle(fname)X = []if delimiter=='':def splitfunc(x):return x.split()else:def splitfunc(x):return x.split(delimiter)converterseq = Nonefor i,line in enumerate(fh):if iline = line.split(comments, )[].strip()if not len(line): continueif converterseq is None:converterseq = [converters.get(j,float)for j,val in enumerate(splitfunc(line))]if usecols is not None:vals = splitfunc(line)row = [converterseq[j](vals[j]) for j in usecols]else:row = [converterseq[j](val)for j,val in enumerate(splitfunc(line))]thisLen = len(row)X.append(row)X = np.array(X, dtype)r,c = X.shapeif r== or c==:X.shape = max(r,c),if unpack: return X.transpose()else: return X", "docstring": "Load ASCII data from *fname* into an array and return the array.\n\nThe data must be regular, same number of values in every row\n\n*fname* can be a filename or a file handle. Support for gzipped\nfiles is automatic, if the filename ends in '.gz'.\n\nmatfile data is not supported; for that, use :mod:`scipy.io.mio`\nmodule.\n\nExample usage::\n\n X = load('test.dat') # data in two columns\n t = X[:,0]\n y = X[:,1]\n\nAlternatively, you can do the same with \"unpack\"; see below::\n\n X = load('test.dat') # a matrix of data\n x = load('test.dat') # a single column of data\n\n- *comments*: the character used to indicate the start of a comment\n in the file\n\n- *delimiter* is a string-like character used to seperate values\n in the file. If *delimiter* is unspecified or *None*, any\n whitespace string is a separator.\n\n- *converters*, if not *None*, is a dictionary mapping column number to\n a function that will convert that column to a float (or the optional\n *dtype* if specified). Eg, if column 0 is a date string::\n\n converters = {0:datestr2num}\n\n- *skiprows* is the number of rows from the top to skip.\n\n- *usecols*, if not *None*, is a sequence of integer column indexes to\n extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract\n just the 2nd, 5th and 6th columns\n\n- *unpack*, if *True*, will transpose the matrix allowing you to unpack\n into named arguments on the left hand side::\n\n t,y = load('test.dat', unpack=True) # for two column data\n x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)\n\n- *dtype*: the array will have this dtype. default: ``numpy.float_``\n\n.. seealso::\n See :file:`examples/pylab_examples/load_converter.py` in the source tree:\n Exercises many of these options.", "id": "f17237:m47"} {"signature": "def slopes(x,y):", "body": "x=np.asarray(x, np.float_)y=np.asarray(y, np.float_)yp=np.zeros(y.shape, np.float_)dx=x[:] - x[:-]dy=y[:] - y[:-]dydx = dy/dxyp[:-] = (dydx[:-] * dx[:] + dydx[:] * dx[:-])/(dx[:] + dx[:-])yp[] = * dy[]/dx[] - yp[]yp[-] = * dy[-]/dx[-] - yp[-]return yp", "docstring": "SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES\ncalculates Y'(X), i.e the slope of a curve Y(X). The slope is\nestimated using the slope obtained from that of a parabola through\nany three consecutive points.\n\nThis method should be superior to that described in the appendix\nof A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel\nW. Stineman (Creative Computing July 1980) in at least one aspect:\n\nCircles for interpolation demand a known aspect ratio between x-\nand y-values. For many functions, however, the abscissa are given\nin different dimensions, so an aspect ratio is completely\narbitrary.\n\nThe parabola method gives very similar results to the circle\nmethod for most regular cases but behaves much better in special\ncases\n\nNorbert Nemec, Institute of Theoretical Physics, University or\nRegensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de\n\n(inspired by a original implementation by Halldor Bjornsson,\nIcelandic Meteorological Office, March 2006 halldor at vedur.is)", "id": "f17237:m48"} {"signature": "def stineman_interp(xi,x,y,yp=None):", "body": "x=np.asarray(x, np.float_)y=np.asarray(y, np.float_)assert x.shape == y.shapeN=len(y)if yp is None:yp = slopes(x,y)else:yp=np.asarray(yp, np.float_)xi=np.asarray(xi, np.float_)yi=np.zeros(xi.shape, np.float_)dx = x[:] - x[:-]dy = y[:] - y[:-]s = dy/dx idx = np.searchsorted(x[:-], xi)sidx = s.take(idx)xidx = x.take(idx)yidx = y.take(idx)xidxp1 = x.take(idx+)yo = yidx + sidx * (xi - xidx)dy1 = (yp.take(idx)- sidx) * (xi - xidx) dy2 = (yp.take(idx+)-sidx) * (xi - xidxp1) dy1dy2 = dy1*dy2yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+,((*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),,/(dy1+dy2),))return yi", "docstring": "STINEMAN_INTERP Well behaved data interpolation. Given data\nvectors X and Y, the slope vector YP and a new abscissa vector XI\nthe function stineman_interp(xi,x,y,yp) uses Stineman\ninterpolation to calculate a vector YI corresponding to XI.\n\nHere's an example that generates a coarse sine curve, then\ninterpolates over a finer abscissa:\n\n x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)\n xi = linspace(0,2*pi,40);\n yi = stineman_interp(xi,x,y,yp);\n plot(x,y,'o',xi,yi)\n\nThe interpolation method is described in the article A\nCONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell\nW. Stineman. The article appeared in the July 1980 issue of\nCreative Computing with a note from the editor stating that while\nthey were\n\n not an academic journal but once in a while something serious\n and original comes in adding that this was\n \"apparently a real solution\" to a well known problem.\n\nFor yp=None, the routine automatically determines the slopes using\nthe \"slopes\" routine.\n\nX is assumed to be sorted in increasing order\n\nFor values xi[j] < x[0] or xi[j] > x[-1], the routine tries a\nextrapolation. The relevance of the data obtained from this, of\ncourse, questionable...\n\noriginal implementation by Halldor Bjornsson, Icelandic\nMeteorolocial Office, March 2006 halldor at vedur.is\n\ncompletely reworked and optimized for Python by Norbert Nemec,\nInstitute of Theoretical Physics, University or Regensburg, April\n2006 Norbert.Nemec at physik.uni-regensburg.de", "id": "f17237:m49"} {"signature": "def inside_poly(points, verts):", "body": "res, = np.nonzero(nxutils.points_inside_poly(points, verts))return res", "docstring": "points is a sequence of x,y points\nverts is a sequence of x,y vertices of a poygon\n\nreturn value is a sequence of indices into points for the points\nthat are inside the polygon", "id": "f17237:m50"} {"signature": "def poly_below(ymin, xs, ys):", "body": "return poly_between(xs, ys, xmin)", "docstring": "given a arrays *xs* and *ys*, return the vertices of a polygon\nthat has a scalar lower bound *ymin* and an upper bound at the *ys*.\n\nintended for use with Axes.fill, eg::\n\n xv, yv = poly_below(0, x, y)\n ax.fill(xv, yv)", "id": "f17237:m51"} {"signature": "def poly_between(x, ylower, yupper):", "body": "Nx = len(x)if not cbook.iterable(ylower):ylower = ylower*np.ones(Nx)if not cbook.iterable(yupper):yupper = yupper*np.ones(Nx)x = np.concatenate( (x, x[::-]) )y = np.concatenate( (yupper, ylower[::-]) )return x,y", "docstring": "given a sequence of x, ylower and yupper, return the polygon that\nfills the regions between them. ylower or yupper can be scalar or\niterable. If they are iterable, they must be equal in length to x\n\nreturn value is x, y arrays for use with Axes.fill", "id": "f17237:m52"} {"signature": "def exp_safe(x):", "body": "if type(x) is np.ndarray:return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))else:return math.exp(x)", "docstring": "Compute exponentials which safely underflow to zero.\n\nSlow, but convenient to use. Note that numpy provides proper\nfloating point exception handling with access to the underlying\nhardware.", "id": "f17237:m53"} {"signature": "def amap(fn,*args):", "body": "return np.array(list(map(fn,*args)))", "docstring": "amap(function, sequence[, sequence, ...]) -> array.\n\nWorks like :func:`map`, but it returns an array. This is just a\nconvenient shorthand for ``numpy.array(map(...))``.", "id": "f17237:m54"} {"signature": "def zeros_like(a):", "body": "warnings.warn(\"\", DeprecationWarning)return np.zeros_like(a)", "docstring": "Return an array of zeros of the shape and typecode of *a*.", "id": "f17237:m55"} {"signature": "def sum_flat(a):", "body": "warnings.warn(\"\", DeprecationWarning)return np.sum(a)", "docstring": "Return the sum of all the elements of *a*, flattened out.\n\nIt uses ``a.flat``, and if *a* is not contiguous, a call to\n``ravel(a)`` is made.", "id": "f17237:m56"} {"signature": "def mean_flat(a):", "body": "warnings.warn(\"\", DeprecationWarning)return np.mean(a)", "docstring": "Return the mean of all the elements of *a*, flattened out.", "id": "f17237:m57"} {"signature": "def rms_flat(a):", "body": "return np.sqrt(np.mean(np.absolute(a)**))", "docstring": "Return the root mean square of all the elements of *a*, flattened out.", "id": "f17237:m58"} {"signature": "def l1norm(a):", "body": "return np.sum(np.absolute(a))", "docstring": "Return the *l1* norm of *a*, flattened out.\n\nImplemented as a separate function (not a call to :func:`norm` for speed).", "id": "f17237:m59"} {"signature": "def l2norm(a):", "body": "return np.sqrt(np.sum(np.absolute(a)**))", "docstring": "Return the *l2* norm of *a*, flattened out.\n\nImplemented as a separate function (not a call to :func:`norm` for speed).", "id": "f17237:m60"} {"signature": "def norm_flat(a,p=):", "body": "if p=='':return np.amax(np.absolute(a))else:return (np.sum(np.absolute(a)**p))**(/p)", "docstring": "norm(a,p=2) -> l-p norm of a.flat\n\nReturn the l-p norm of *a*, considered as a flat array. This is NOT a true\nmatrix norm, since arrays of arbitrary rank are always flattened.\n\n*p* can be a number or the string 'Infinity' to get the L-infinity norm.", "id": "f17237:m61"} {"signature": "def frange(xini,xfin=None,delta=None,**kw):", "body": "kw.setdefault('',)endpoint = kw[''] != if xfin == None:xfin = xini + xini = if delta == None:delta = try:npts=kw['']delta=(xfin-xini)/float(npts-endpoint)except KeyError:npts = int(round((xfin-xini)/delta)) + endpointreturn np.arange(npts)*delta+xini", "docstring": "frange([start,] stop[, step, keywords]) -> array of floats\n\nReturn a numpy ndarray containing a progression of floats. Similar to\n:func:`numpy.arange`, but defaults to a closed interval.\n\n``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*\ndefaults to 0, and the endpoint *is included*. This behavior is\ndifferent from that of :func:`range` and\n:func:`numpy.arange`. This is deliberate, since :func:`frange`\nwill probably be more useful for generating lists of points for\nfunction evaluation, and endpoints are often desired in this\nuse. The usual behavior of :func:`range` can be obtained by\nsetting the keyword *closed* = 0, in this case, :func:`frange`\nbasically becomes :func:numpy.arange`.\n\nWhen *step* is given, it specifies the increment (or\ndecrement). All arguments can be floating point numbers.\n\n``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where\n*xfin* <= *x1*.\n\n:func:`frange` can also be called with the keyword *npts*. This\nsets the number of points the list should contain (and overrides\nthe value *step* might have been given). :func:`numpy.arange`\ndoesn't offer this option.\n\nExamples::\n\n >>> frange(3)\n array([ 0., 1., 2., 3.])\n >>> frange(3,closed=0)\n array([ 0., 1., 2.])\n >>> frange(1,6,2)\n array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries\n >>> frange(1,6.5,npts=5)\n array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])", "id": "f17237:m62"} {"signature": "def diagonal_matrix(diag):", "body": "warnings.warn(\"\", DeprecationWarning)return np.diag(diag)", "docstring": "Return square diagonal matrix whose non-zero elements are given by the\ninput array.", "id": "f17237:m63"} {"signature": "def identity(n, rank=, dtype='', typecode=None):", "body": "if typecode is not None:warnings.warn(\"\",DeprecationWarning)dtype = typecodeiden = np.zeros((n,)*rank, dtype)for i in range(n):idx = (i,)*rankiden[idx] = return iden", "docstring": "Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).\n\nFor ranks higher than 2, this object is simply a multi-index Kronecker\ndelta::\n\n / 1 if i0=i1=...=iR,\n id[i0,i1,...,iR] = -|\n \\ 0 otherwise.\n\nOptionally a *dtype* (or typecode) may be given (it defaults to 'l').\n\nSince rank defaults to 2, this function behaves in the default case (when\nonly *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is\nmuch faster.", "id": "f17237:m64"} {"signature": "def base_repr (number, base = , padding = ):", "body": "chars = ''if number < base:return (padding - ) * chars [] + chars [int (number)]max_exponent = int (math.log (number)/math.log (base))max_power = int (base) ** max_exponentlead_digit = int (number/max_power)return chars [lead_digit] +base_repr (number - max_power * lead_digit, base,max (padding - , max_exponent))", "docstring": "Return the representation of a *number* in any given *base*.", "id": "f17237:m65"} {"signature": "def binary_repr(number, max_length = ):", "body": "shifts = list(map (operator.rshift, max_length * [number],list(range(max_length - , -, -))))digits = list(map (operator.mod, shifts, max_length * []))if not digits.count (): return digits = digits [digits.index ():]return ''.join (map (repr, digits)).replace('','')", "docstring": "Return the binary representation of the input *number* as a\nstring.\n\nThis is more efficient than using :func:`base_repr` with base 2.\n\nIncrease the value of max_length for very large numbers. Note that\non 32-bit machines, 2**1023 is the largest integer power of 2\nwhich can be converted to a Python float.", "id": "f17237:m66"} {"signature": "def log2(x,ln2 = math.log()):", "body": "try:bin_n = binary_repr(x)[:]except (AssertionError,TypeError):return math.log(x)/ln2else:if '' in bin_n:return math.log(x)/ln2else:return len(bin_n)", "docstring": "Return the log(*x*) in base 2.\n\nThis is a _slow_ function but which is guaranteed to return the correct\ninteger value if the input is an integer exact power of 2.", "id": "f17237:m67"} {"signature": "def ispower2(n):", "body": "bin_n = binary_repr(n)[:]if '' in bin_n:return else:return len(bin_n)", "docstring": "Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.\n\nNote the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.", "id": "f17237:m68"} {"signature": "def isvector(X):", "body": "return np.prod(X.shape)==np.max(X.shape)", "docstring": "Like the Matlab (TM) function with the same name, returns *True*\nif the supplied numpy array or matrix *X* looks like a vector,\nmeaning it has a one non-singleton axis (i.e., it can have\nmultiple axes, but all must have length 1, except for one of\nthem).\n\nIf you just want to see if the array has 1 axis, use X.ndim == 1.", "id": "f17237:m69"} {"signature": "def fromfunction_kw(function, dimensions, **kwargs):", "body": "warnings.warn(\"\", DeprecationWarning)return np.fromfunction(function, dimensions, **kwargs)", "docstring": "Drop-in replacement for :func:`numpy.fromfunction`.\n\nAllows passing keyword arguments to the desired function.\n\nCall it as (keywords are optional)::\n\n fromfunction_kw(MyFunction, dimensions, keywords)\n\nThe function ``MyFunction`` is responsible for handling the\ndictionary of keywords it will receive.", "id": "f17237:m70"} {"signature": "def rem(x,y):", "body": "raise NotImplementedError('')", "docstring": "Deprecated - see :func:`numpy.remainder`", "id": "f17237:m71"} {"signature": "def norm(x,y=):", "body": "raise NotImplementedError('')", "docstring": "Deprecated - see :func:`numpy.linalg.norm`", "id": "f17237:m72"} {"signature": "def orth(A):", "body": "raise NotImplementedError('')", "docstring": "Deprecated - needs clean room implementation", "id": "f17237:m73"} {"signature": "def rank(x):", "body": "raise NotImplementedError('')", "docstring": "Deprecated - see :func:`numpy.rank`", "id": "f17237:m74"} {"signature": "def sqrtm(x):", "body": "raise NotImplementedError('')", "docstring": "Deprecated - needs clean room implementation", "id": "f17237:m75"} {"signature": "def mfuncC(f, x):", "body": "raise NotImplementedError('')", "docstring": "Deprecated", "id": "f17237:m76"} {"signature": "def approx_real(x):", "body": "raise NotImplementedError('')", "docstring": "Deprecated - needs clean room implementation", "id": "f17237:m77"} {"signature": "def safe_isnan(x):", "body": "if cbook.is_string_like(x):return Falsetry: b = np.isnan(x)except NotImplementedError: return Falseexcept TypeError: return Falseelse: return b", "docstring": ":func:`numpy.isnan` for arbitrary types", "id": "f17237:m78"} {"signature": "def safe_isinf(x):", "body": "if cbook.is_string_like(x):return Falsetry: b = np.isinf(x)except NotImplementedError: return Falseexcept TypeError: return Falseelse: return b", "docstring": ":func:`numpy.isinf` for arbitrary types", "id": "f17237:m79"} {"signature": "def rec_view(rec):", "body": "return rec.view(np.recarray)", "docstring": "Return a view of an ndarray as a recarray\n\n.. seealso::\n\n http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html", "id": "f17237:m80"} {"signature": "def rec_append_field(rec, name, arr, dtype=None):", "body": "warnings.warn(\"\", DeprecationWarning)return rec_append_fields(rec, name, arr, dtype)", "docstring": "Return a new record array with field name populated with data from\narray *arr*. This function is Deprecated. Please use\n:func:`rec_append_fields`.", "id": "f17237:m81"} {"signature": "def rec_append_fields(rec, names, arrs, dtypes=None):", "body": "if (not cbook.is_string_like(names) and cbook.iterable(names)and len(names) and cbook.is_string_like(names[])):if len(names) != len(arrs):raise ValueError(\"\")else: names = [names]arrs = [arrs]arrs = list(map(np.asarray, arrs))if dtypes is None:dtypes = [a.dtype for a in arrs]elif not cbook.iterable(dtypes):dtypes = [dtypes]if len(arrs) != len(dtypes):if len(dtypes) == :dtypes = dtypes * len(arrs)else:raise ValueError(\"\")newdtype = np.dtype(rec.dtype.descr + list(zip(names, dtypes)))newrec = np.empty(rec.shape, dtype=newdtype)for field in rec.dtype.fields:newrec[field] = rec[field]for name, arr in zip(names, arrs):newrec[name] = arrreturn rec_view(newrec)", "docstring": "Return a new record array with field names populated with data\nfrom arrays in *arrs*. If appending a single field, then *names*,\n*arrs* and *dtypes* do not have to be lists. They can just be the\nvalues themselves.", "id": "f17237:m82"} {"signature": "def rec_drop_fields(rec, names):", "body": "names = set(names)Nr = len(rec)newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.namesif name not in names])newrec = np.empty(Nr, dtype=newdtype)for field in newdtype.names:newrec[field] = rec[field]return rec_view(newrec)", "docstring": "Return a new numpy record array with fields in *names* dropped.", "id": "f17237:m83"} {"signature": "def rec_groupby(r, groupby, stats):", "body": "rowd = dict()for i, row in enumerate(r):key = tuple([row[attr] for attr in groupby])rowd.setdefault(key, []).append(i)keys = list(rowd.keys())keys.sort()rows = []for key in keys:row = list(key)ind = rowd[key]thisr = r[ind]row.extend([func(thisr[attr]) for attr, func, outname in stats])rows.append(row)attrs, funcs, outnames = list(zip(*stats))names = list(groupby)names.extend(outnames)return np.rec.fromrecords(rows, names=names)", "docstring": "*r* is a numpy record array\n\n*groupby* is a sequence of record array attribute names that\ntogether form the grouping key. eg ('date', 'productcode')\n\n*stats* is a sequence of (*attr*, *func*, *outname*) tuples which\nwill call ``x = func(attr)`` and assign *x* to the record array\noutput with attribute *outname*. For example::\n\n stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )\n\nReturn record array has *dtype* names for each attribute name in\nthe the *groupby* argument, with the associated group values, and\nfor each outname name in the *stats* argument, with the associated\nstat summary output.", "id": "f17237:m84"} {"signature": "def rec_summarize(r, summaryfuncs):", "body": "names = list(r.dtype.names)arrays = [r[name] for name in names]for attr, func, outname in summaryfuncs:names.append(outname)arrays.append(np.asarray(func(r[attr])))return np.rec.fromarrays(arrays, names=names)", "docstring": "*r* is a numpy record array\n\n*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples\nwhich will apply *func* to the the array *r*[attr] and assign the\noutput to a new attribute name *outname*. The returned record\narray is identical to *r*, with extra arrays for each element in\n*summaryfuncs*.", "id": "f17237:m85"} {"signature": "def rec_join(key, r1, r2, jointype='', defaults=None, r1postfix='', r2postfix=''):", "body": "if cbook.is_string_like(key):key = (key, )for name in key:if name not in r1.dtype.names:raise ValueError(''%name)if name not in r2.dtype.names:raise ValueError(''%name)def makekey(row):return tuple([row[name] for name in key])r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])r1keys = set(r1d.keys())r2keys = set(r2d.keys())common_keys = r1keys & r2keysr1ind = np.array([r1d[k] for k in common_keys])r2ind = np.array([r2d[k] for k in common_keys])common_len = len(common_keys)left_len = right_len = if jointype == \"\" or jointype == \"\":left_keys = r1keys.difference(r2keys)left_ind = np.array([r1d[k] for k in left_keys])left_len = len(left_ind)if jointype == \"\":right_keys = r2keys.difference(r1keys)right_ind = np.array([r2d[k] for k in right_keys])right_len = len(right_ind)def key_desc(name):''dt1 = r1.dtype[name]if dt1.type != np.string_:return (name, dt1.descr[][])dt2 = r1.dtype[name]assert dt2==dt1if dt1.num>dt2.num:return (name, dt1.descr[][])else:return (name, dt2.descr[][])keydesc = [key_desc(name) for name in key]def mapped_r1field(name):\"\"\"\"\"\"if name in key or name not in r2.dtype.names: return nameelse: return name + r1postfixdef mapped_r2field(name):\"\"\"\"\"\"if name in key or name not in r1.dtype.names: return nameelse: return name + r2postfixr1desc = [(mapped_r1field(desc[]), desc[]) for desc in r1.dtype.descr if desc[] not in key]r2desc = [(mapped_r2field(desc[]), desc[]) for desc in r2.dtype.descr if desc[] not in key]newdtype = np.dtype(keydesc + r1desc + r2desc)newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)if jointype != '' and defaults is not None: newrec_fields = list(newrec.dtype.fields.keys())for k, v in list(defaults.items()):if k in newrec_fields:newrec[k] = vfor field in r1.dtype.names:newfield = mapped_r1field(field)if common_len:newrec[newfield][:common_len] = r1[field][r1ind]if (jointype == \"\" or jointype == \"\") and left_len:newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]for field in r2.dtype.names:newfield = mapped_r2field(field)if field not in key and common_len:newrec[newfield][:common_len] = r2[field][r2ind]if jointype == \"\" and right_len:newrec[newfield][-right_len:] = r2[field][right_ind]newrec.sort(order=key)return rec_view(newrec)", "docstring": "Join record arrays *r1* and *r2* on *key*; *key* is a tuple of\nfield names -- if *key* is a string it is assumed to be a single\nattribute name. If *r1* and *r2* have equal values on all the keys\nin the *key* tuple, then their fields will be merged into a new\nrecord array containing the intersection of the fields of *r1* and\n*r2*.\n\n*r1* (also *r2*) must not have any duplicate keys.\n\nThe *jointype* keyword can be 'inner', 'outer', 'leftouter'. To\ndo a rightouter join just reverse *r1* and *r2*.\n\nThe *defaults* keyword is a dictionary filled with\n``{column_name:default_value}`` pairs.\n\nThe keywords *r1postfix* and *r2postfix* are postfixed to column names\n(other than keys) that are both in *r1* and *r2*.", "id": "f17237:m86"} {"signature": "def csv2rec(fname, comments='', skiprows=, checkrows=, delimiter='',converterd=None, names=None, missing='', missingd=None,use_mrecords=True):", "body": "if converterd is None:converterd = dict()if missingd is None:missingd = {}import dateutil.parserimport datetimeparsedate = dateutil.parser.parsefh = cbook.to_filehandle(fname)class FH:\"\"\"\"\"\"def __init__(self, fh):self.fh = fhdef close(self):self.fh.close()def seek(self, arg):self.fh.seek(arg)def fix(self, s):return ''.join(s.split())def __next__(self):return self.fix(next(self.fh))def __iter__(self):for line in self.fh:yield self.fix(line)if delimiter=='':fh = FH(fh)reader = csv.reader(fh, delimiter=delimiter)def process_skiprows(reader):if skiprows:for i, row in enumerate(reader):if i>=(skiprows-): breakreturn fh, readerprocess_skiprows(reader)def ismissing(name, val):\"\"if val == missing or val == missingd.get(name) or val == '':return Trueelse:return Falsedef with_default_value(func, default):def newfunc(name, val):if ismissing(name, val):return defaultelse:return func(val)return newfuncdef mybool(x):if x=='': return Trueelif x=='': return Falseelse: raise ValueError('')dateparser = dateutil.parser.parsemydateparser = with_default_value(dateparser, datetime.date(,,))myfloat = with_default_value(float, np.nan)myint = with_default_value(int, -)mystr = with_default_value(str, '')mybool = with_default_value(mybool, None)def mydate(x):d = dateparser(x)if d.hour> or d.minute> or d.second>:raise ValueError('')return d.date()mydate = with_default_value(mydate, datetime.date(,,))def get_func(name, item, func):funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}try: func(name, item)except:if func==mystr:raise ValueError('')else: return get_func(name, item, funcmap[func]) else: return funcitemd = {'' : '','' : '','' : '',}def get_converters(reader):converters = Nonefor i, row in enumerate(reader):if i==:converters = [mybool]*len(row)if checkrows and i>checkrows:breakfor j, (name, item) in enumerate(zip(names, row)):func = converterd.get(j)if func is None:func = converterd.get(name)if func is None:func = converters[j]if len(item.strip()):func = get_func(name, item, func)else:func = with_default_value(func, None)converters[j] = funcreturn convertersneedheader = names is Noneif needheader:for row in reader:if len(row) and row[].startswith(comments):continueheaders = rowbreakdelete = set(\"\"\"\"\"\")delete.add('')names = []seen = dict()for i, item in enumerate(headers):item = item.strip().lower().replace('', '')item = ''.join([c for c in item if c not in delete])if not len(item):item = ''%iitem = itemd.get(item, item)cnt = seen.get(item, )if cnt>:names.append(item + ''%cnt)else:names.append(item)seen[item] = cnt+else:if cbook.is_string_like(names):names = [n.strip() for n in names.split('')]converters = get_converters(reader)if converters is None:raise ValueError('')fh.seek()reader = csv.reader(fh, delimiter=delimiter)process_skiprows(reader)if needheader:skipheader = next(reader)rows = []rowmasks = []for i, row in enumerate(reader):if not len(row): continueif row[].startswith(comments): continuerows.append([func(name, val) for func, name, val in zip(converters, names, row)])rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])fh.close()if not len(rows):return Noneif use_mrecords and np.any(rowmasks):try: from numpy.ma import mrecordsexcept ImportError:raise RuntimeError('')else:r = mrecords.fromrecords(rows, names=names, mask=rowmasks)else:r = np.rec.fromrecords(rows, names=names)return r", "docstring": "Load data from comma/space/tab delimited file in *fname* into a\nnumpy record array and return the record array.\n\nIf *names* is *None*, a header row is required to automatically\nassign the recarray names. The headers will be lower cased,\nspaces will be converted to underscores, and illegal attribute\nname characters removed. If *names* is not *None*, it is a\nsequence of names to use for the column names. In this case, it\nis assumed there is no header row.\n\n\n- *fname*: can be a filename or a file handle. Support for gzipped\n files is automatic, if the filename ends in '.gz'\n\n- *comments*: the character used to indicate the start of a comment\n in the file\n\n- *skiprows*: is the number of rows from the top to skip\n\n- *checkrows*: is the number of rows to check to validate the column\n data type. When set to zero all rows are validated.\n\n- *converted*: if not *None*, is a dictionary mapping column number or\n munged column name to a converter function.\n\n- *names*: if not None, is a list of header names. In this case, no\n header will be read from the file\n\n- *missingd* is a dictionary mapping munged column names to field values\n which signify that the field does not contain actual data and should\n be masked, e.g. '0000-00-00' or 'unused'\n\n- *missing*: a string whose value signals a missing field regardless of\n the column it appears in\n\n- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing\n\n If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`", "id": "f17237:m87"} {"signature": "def get_formatd(r, formatd=None):", "body": "if formatd is None:formatd = dict()for i, name in enumerate(r.dtype.names):dt = r.dtype[name]format = formatd.get(name)if format is None:format = defaultformatd.get(dt.type, FormatObj())formatd[name] = formatreturn formatd", "docstring": "build a formatd guaranteed to have a key for every dtype name", "id": "f17237:m88"} {"signature": "def rec2txt(r, header=None, padding=, precision=):", "body": "if cbook.is_numlike(precision):precision = [precision]*len(r.dtype)def get_type(item,atype=int):tdict = {None:int, int:float, float:str}try: atype(str(item))except: return get_type(item,tdict[atype])return atypedef get_justify(colname, column, precision):ntype = type(column[])if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:length = max(len(colname),column.itemsize)return , length+padding, \"\" if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:length = max(len(colname),np.max(list(map(len,list(map(str,column))))))return , length+padding, \"\" \"\"\"\"\"\"if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, '') and (ntype==np.float96)) or ntype==np.float_:fmt = \"\" + str(precision) + \"\"length = max(len(colname),np.max(list(map(len,[fmt%x for x in column]))))return , length+padding, fmt return , max(len(colname),np.max(list(map(len,list(map(str,column))))))+padding, \"\"if header is None:header = r.dtype.namesjustify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]justify_pad_prec_spacer = []for i in range(len(justify_pad_prec)):just,pad,prec = justify_pad_prec[i]if i == :justify_pad_prec_spacer.append((just,pad,prec,))else:pjust,ppad,pprec = justify_pad_prec[i-]if pjust == and just == :justify_pad_prec_spacer.append((just,pad-padding,prec,))elif pjust == and just == :justify_pad_prec_spacer.append((just,pad,prec,padding))else:justify_pad_prec_spacer.append((just,pad,prec,))def format(item, just_pad_prec_spacer):just, pad, prec, spacer = just_pad_prec_spacerif just == :return spacer*'' + str(item).ljust(pad)else:if get_type(item) == float:item = (prec%float(item))elif get_type(item) == int:item = (prec%int(item))return item.rjust(pad)textl = []textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))for i, row in enumerate(r):textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))if i==:textl[] = textl[].rstrip()text = os.linesep.join(textl)return text", "docstring": "Returns a textual representation of a record array.\n\n*r*: numpy recarray\n\n*header*: list of column headers\n\n*padding*: space between each column\n\n*precision*: number of decimal places to use for floats.\n Set to an integer to apply to all floats. Set to a\n list of integers to apply precision individually.\n Precision for non-floats is simply ignored.\n\nExample::\n\n precision=[0,2,3]\n\nOutput::\n\n ID Price Return\n ABC 12.54 0.234\n XYZ 6.32 -0.076", "id": "f17237:m90"} {"signature": "def rec2csv(r, fname, delimiter='', formatd=None, missing='',missingd=None):", "body": "if missingd is None:missingd = dict()def with_mask(func):def newfunc(val, mask, mval):if mask:return mvalelse:return func(val)return newfuncformatd = get_formatd(r, formatd)funcs = []for i, name in enumerate(r.dtype.names):funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))fh, opened = cbook.to_filehandle(fname, '', return_opened=True)writer = csv.writer(fh, delimiter=delimiter)header = r.dtype.nameswriter.writerow(header)mvals = []for name in header:mvals.append(missingd.get(name, missing))ismasked = Falseif len(r):row = r[]ismasked = hasattr(row, '')for row in r:if ismasked:row, rowmask = row.item(), row._fieldmask.item()else:rowmask = [False] * len(row)writer.writerow([func(val, mask, mval) for func, val, mask, mvalin zip(funcs, row, rowmask, mvals)])if opened:fh.close()", "docstring": "Save the data from numpy recarray *r* into a\ncomma-/space-/tab-delimited file. The record array dtype names\nwill be used for column headers.\n\n*fname*: can be a filename or a file handle. Support for gzipped\n files is automatic, if the filename ends in '.gz'\n\n.. seealso::\n :func:`csv2rec`:\n For information about *missing* and *missingd*, which can\n be used to fill in masked values into your CSV file.", "id": "f17237:m91"} {"signature": "def griddata(x,y,z,xi,yi):", "body": "try:from mpl_toolkits.natgrid import _natgrid, __version___use_natgrid = Trueexcept ImportError:import matplotlib.delaunay as delaunayfrom matplotlib.delaunay import __version___use_natgrid = Falseif not griddata._reported:if _use_natgrid:verbose.report('' % __version__)else:verbose.report('' % __version__)griddata._reported = Trueif xi.ndim != yi.ndim:raise TypeError(\"\")if xi.ndim != and xi.ndim != :raise TypeError(\"\")if not len(x)==len(y)==len(z):raise TypeError(\"\")if hasattr(z,''):x = x.compress(z.mask == False)y = y.compress(z.mask == False)z = z.compressed()if _use_natgrid: if xi.ndim == :xi = xi[,:]yi = yi[:,]_natgrid.seti('',)_natgrid.setr('',np.nan)x = x.astype(np.float)y = y.astype(np.float)z = z.astype(np.float)xo = xi.astype(np.float)yo = yi.astype(np.float)if min(xo[:]-xo[:-]) < or min(yo[:]-yo[:-]) < :raise ValueError('')zo = np.empty((yo.shape[],xo.shape[]), np.float)_natgrid.natgridd(x,y,z,xo,yo,zo)else: if xi.ndim != yi.ndim:raise TypeError(\"\")if xi.ndim != and xi.ndim != :raise TypeError(\"\")if xi.ndim == :xi,yi = np.meshgrid(xi,yi)tri = delaunay.Triangulation(x,y)interp = tri.nn_interpolator(z)zo = interp(xi,yi)if np.any(np.isnan(zo)):zo = np.ma.masked_where(np.isnan(zo),zo)return zo", "docstring": "``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =\n*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced\nvectors (*x*, *y*, *z*). :func:`griddata` interpolates this\nsurface at the points specified by (*xi*, *yi*) to produce\n*zi*. *xi* and *yi* must describe a regular grid, can be either 1D\nor 2D, but must be monotonically increasing.\n\nA masked array is returned if any grid points are outside convex\nhull defined by input data (no extrapolation is done).\n\nUses natural neighbor interpolation based on Delaunay\ntriangulation. By default, this algorithm is provided by the\n:mod:`matplotlib.delaunay` package, written by Robert Kern. The\ntriangulation algorithm in this package is known to fail on some\nnearly pathological cases. For this reason, a separate toolkit\n(:mod:`mpl_tookits.natgrid`) has been created that provides a more\nrobust algorithm fof triangulation and interpolation. This\ntoolkit is based on the NCAR natgrid library, which contains code\nthat is not redistributable under a BSD-compatible license. When\ninstalled, this function will use the :mod:`mpl_toolkits.natgrid`\nalgorithm, otherwise it will use the built-in\n:mod:`matplotlib.delaunay` package.\n\nThe natgrid matplotlib toolkit can be downloaded from\nhttp://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792", "id": "f17237:m92"} {"signature": "def less_simple_linear_interpolation( x, y, xi, extrap=False ):", "body": "if cbook.is_scalar(xi): xi = [xi]x = np.asarray(x)y = np.asarray(y)xi = np.asarray(xi)s = list(y.shape)s[] = len(xi)yi = np.tile( np.nan, s )for ii,xx in enumerate(xi):bb = x == xxif np.any(bb):jj, = np.nonzero(bb)yi[ii] = y[jj[]]elif xx]:if extrap:yi[ii] = y[]elif xx>x[-]:if extrap:yi[ii] = y[-]else:jj, = np.nonzero(xjj = max(jj)yi[ii] = y[jj] + (xx-x[jj])/(x[jj+]-x[jj]) * (y[jj+]-y[jj])return yi", "docstring": "This function provides simple (but somewhat less so than\n:func:`cbook.simple_linear_interpolation`) linear interpolation.\n:func:`simple_linear_interpolation` will give a list of point\nbetween a start and an end, while this does true linear\ninterpolation at an arbitrary set of points.\n\nThis is very inefficient linear interpolation meant to be used\nonly for a small number of points in relatively non-intensive use\ncases. For real linear interpolation, use scipy.", "id": "f17237:m93"} {"signature": "def slopes(x,y):", "body": "x=np.asarray(x, np.float_)y=np.asarray(y, np.float_)yp=np.zeros(y.shape, np.float_)dx=x[:] - x[:-]dy=y[:] - y[:-]dydx = dy/dxyp[:-] = (dydx[:-] * dx[:] + dydx[:] * dx[:-])/(dx[:] + dx[:-])yp[] = * dy[]/dx[] - yp[]yp[-] = * dy[-]/dx[-] - yp[-]return yp", "docstring": ":func:`slopes` calculates the slope *y*'(*x*)\n\nThe slope is estimated using the slope obtained from that of a\nparabola through any three consecutive points.\n\nThis method should be superior to that described in the appendix\nof A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel\nW. Stineman (Creative Computing July 1980) in at least one aspect:\n\n Circles for interpolation demand a known aspect ratio between\n *x*- and *y*-values. For many functions, however, the abscissa\n are given in different dimensions, so an aspect ratio is\n completely arbitrary.\n\nThe parabola method gives very similar results to the circle\nmethod for most regular cases but behaves much better in special\ncases.\n\nNorbert Nemec, Institute of Theoretical Physics, University or\nRegensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de\n\n(inspired by a original implementation by Halldor Bjornsson,\nIcelandic Meteorological Office, March 2006 halldor at vedur.is)", "id": "f17237:m94"} {"signature": "def stineman_interp(xi,x,y,yp=None):", "body": "x=np.asarray(x, np.float_)y=np.asarray(y, np.float_)assert x.shape == y.shapeN=len(y)if yp is None:yp = slopes(x,y)else:yp=np.asarray(yp, np.float_)xi=np.asarray(xi, np.float_)yi=np.zeros(xi.shape, np.float_)dx = x[:] - x[:-]dy = y[:] - y[:-]s = dy/dx idx = np.searchsorted(x[:-], xi)sidx = s.take(idx)xidx = x.take(idx)yidx = y.take(idx)xidxp1 = x.take(idx+)yo = yidx + sidx * (xi - xidx)dy1 = (yp.take(idx)- sidx) * (xi - xidx) dy2 = (yp.take(idx+)-sidx) * (xi - xidxp1) dy1dy2 = dy1*dy2yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+,((*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),,/(dy1+dy2),))return yi", "docstring": "Given data vectors *x* and *y*, the slope vector *yp* and a new\nabscissa vector *xi*, the function :func:`stineman_interp` uses\nStineman interpolation to calculate a vector *yi* corresponding to\n*xi*.\n\nHere's an example that generates a coarse sine curve, then\ninterpolates over a finer abscissa::\n\n x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)\n xi = linspace(0,2*pi,40);\n yi = stineman_interp(xi,x,y,yp);\n plot(x,y,'o',xi,yi)\n\nThe interpolation method is described in the article A\nCONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell\nW. Stineman. The article appeared in the July 1980 issue of\nCreative Computing with a note from the editor stating that while\nthey were:\n\n not an academic journal but once in a while something serious\n and original comes in adding that this was\n \"apparently a real solution\" to a well known problem.\n\nFor *yp* = *None*, the routine automatically determines the slopes\nusing the :func:`slopes` routine.\n\n*x* is assumed to be sorted in increasing order.\n\nFor values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine\ntries an extrapolation. The relevance of the data obtained from\nthis, of course, is questionable...\n\nOriginal implementation by Halldor Bjornsson, Icelandic\nMeteorolocial Office, March 2006 halldor at vedur.is\n\nCompletely reworked and optimized for Python by Norbert Nemec,\nInstitute of Theoretical Physics, University or Regensburg, April\n2006 Norbert.Nemec at physik.uni-regensburg.de", "id": "f17237:m95"} {"signature": "def inside_poly(points, verts):", "body": "res, = np.nonzero(nxutils.points_inside_poly(points, verts))return res", "docstring": "*points* is a sequence of *x*, *y* points.\n*verts* is a sequence of *x*, *y* vertices of a polygon.\n\nReturn value is a sequence of indices into points for the points\nthat are inside the polygon.", "id": "f17237:m96"} {"signature": "def poly_below(xmin, xs, ys):", "body": "if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):nx = maelse:nx = npxs = nx.asarray(xs)ys = nx.asarray(ys)Nx = len(xs)Ny = len(ys)assert(Nx==Ny)x = xmin*nx.ones(*Nx)y = nx.ones(*Nx)x[:Nx] = xsy[:Nx] = ysy[Nx:] = ys[::-]return x, y", "docstring": "Given a sequence of *xs* and *ys*, return the vertices of a\npolygon that has a horizontal base at *xmin* and an upper bound at\nthe *ys*. *xmin* is a scalar.\n\nIntended for use with :meth:`matplotlib.axes.Axes.fill`, eg::\n\n xv, yv = poly_below(0, x, y)\n ax.fill(xv, yv)", "id": "f17237:m97"} {"signature": "def poly_between(x, ylower, yupper):", "body": "if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):nx = maelse:nx = npNx = len(x)if not cbook.iterable(ylower):ylower = ylower*nx.ones(Nx)if not cbook.iterable(yupper):yupper = yupper*nx.ones(Nx)x = nx.concatenate( (x, x[::-]) )y = nx.concatenate( (yupper, ylower[::-]) )return x,y", "docstring": "Given a sequence of *x*, *ylower* and *yupper*, return the polygon\nthat fills the regions between them. *ylower* or *yupper* can be\nscalar or iterable. If they are iterable, they must be equal in\nlength to *x*.\n\nReturn value is *x*, *y* arrays for use with\n:meth:`matplotlib.axes.Axes.fill`.", "id": "f17237:m98"} {"signature": "def is_closed_polygon(X):", "body": "return np.all(X[] == X[-])", "docstring": "Tests whether first and last object in a sequence are the same. These are\npresumably coordinates on a polygonal curve, in which case this function\ntests if that curve is closed.", "id": "f17237:m99"} {"signature": "def contiguous_regions(mask):", "body": "in_region = Noneboundaries = []for i, val in enumerate(mask):if in_region is None and val:in_region = ielif in_region is not None and not val:boundaries.append((in_region, i))in_region = Noneif in_region is not None:boundaries.append((in_region, i+))return boundaries", "docstring": "return a list of (ind0, ind1) such that mask[ind0:ind1].all() is\nTrue and we cover all such regions\n\nTODO: this is a pure python implementation which probably has a much faster numpy impl", "id": "f17237:m100"} {"signature": "def vector_lengths( X, P=, axis=None ):", "body": "X = np.asarray(X)return (np.sum(X**(P),axis=axis))**(/P)", "docstring": "Finds the length of a set of vectors in *n* dimensions. This is\nlike the :func:`numpy.norm` function for vectors, but has the ability to\nwork over a particular axis of the supplied array or matrix.\n\nComputes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the\nelements of *X* along the given axis. If *axis* is *None*,\ncompute over all elements of *X*.", "id": "f17237:m101"} {"signature": "def distances_along_curve( X ):", "body": "X = np.diff( X, axis= )return vector_lengths(X,axis=)", "docstring": "Computes the distance between a set of successive points in *N* dimensions.\n\nWhere *X* is an *M* x *N* array or matrix. The distances between\nsuccessive rows is computed. Distance is the standard Euclidean\ndistance.", "id": "f17237:m102"} {"signature": "def path_length(X):", "body": "X = distances_along_curve(X)return np.concatenate( (np.zeros(), np.cumsum(X)) )", "docstring": "Computes the distance travelled along a polygonal curve in *N* dimensions.\n\nWhere *X* is an *M* x *N* array or matrix. Returns an array of\nlength *M* consisting of the distance along the curve at each point\n(i.e., the rows of *X*).", "id": "f17237:m103"} {"signature": "def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):", "body": "c1x, c1y = q0x + / * (q1x - q0x), q0y + / * (q1y - q0y)c2x, c2y = c1x + / * (q2x - q0x), c1y + / * (q2y - q0y)return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y", "docstring": "Converts a quadratic Bezier curve to a cubic approximation.\n\nThe inputs are the *x* and *y* coordinates of the three control\npoints of a quadratic curve, and the output is a tuple of *x* and\n*y* coordinates of the four control points of the cubic curve.", "id": "f17237:m104"} {"signature": "def __init__(self, nmax):", "body": "self._xa = np.zeros((nmax,), np.float_)self._ya = np.zeros((nmax,), np.float_)self._xs = np.zeros((nmax,), np.float_)self._ys = np.zeros((nmax,), np.float_)self._ind = self._nmax = nmaxself.dataLim = Noneself.callbackd = {}", "docstring": "Buffer up to *nmax* points.", "id": "f17237:c0:m0"} {"signature": "def register(self, func, N):", "body": "self.callbackd.setdefault(N, []).append(func)", "docstring": "Call *func* every time *N* events are passed; *func* signature\nis ``func(fifo)``.", "id": "f17237:c0:m1"} {"signature": "def add(self, x, y):", "body": "if self.dataLim is not None:xys = ((x,y),)self.dataLim.update(xys, -) ind = self._ind % self._nmaxself._xs[ind] = xself._ys[ind] = yfor N,funcs in list(self.callbackd.items()):if (self._ind%N)==:for func in funcs:func(self)self._ind += ", "docstring": "Add scalar *x* and *y* to the queue.", "id": "f17237:c0:m2"} {"signature": "def last(self):", "body": "if self._ind==: return None, Noneind = (self._ind-) % self._nmaxreturn self._xs[ind], self._ys[ind]", "docstring": "Get the last *x*, *y* or *None*. *None* if no data set.", "id": "f17237:c0:m3"} {"signature": "def asarrays(self):", "body": "if self._indreturn self._xs[:self._ind], self._ys[:self._ind]ind = self._ind % self._nmaxself._xa[:self._nmax-ind] = self._xs[ind:]self._xa[self._nmax-ind:] = self._xs[:ind]self._ya[:self._nmax-ind] = self._ys[ind:]self._ya[self._nmax-ind:] = self._ys[:ind]return self._xa, self._ya", "docstring": "Return *x* and *y* as arrays; their length will be the len of\ndata added or *nmax*.", "id": "f17237:c0:m4"} {"signature": "def update_datalim_to_current(self):", "body": "if self.dataLim is None:raise ValueError('')x, y = self.asarrays()self.dataLim.update_numerix(x, y, True)", "docstring": "Update the *datalim* in the current data in the fifo.", "id": "f17237:c0:m5"} {"signature": "def _process_plot_format(fmt):", "body": "linestyle = Nonemarker = Nonecolor = Nonetry:color = mcolors.colorConverter.to_rgb(fmt)return linestyle, marker, color except ValueError:pass if fmt.find('')>=:linestyle = ''fmt = fmt.replace('', '')if fmt.find('')>=:linestyle = ''fmt = fmt.replace('', '')if fmt.find('')>=:linestyle = ''fmt = fmt.replace('', '')chars = [c for c in fmt]for c in chars:if c in mlines.lineStyles:if linestyle is not None:raise ValueError('' % fmt)linestyle = celif c in mlines.lineMarkers:if marker is not None:raise ValueError('' % fmt)marker = celif c in mcolors.colorConverter.colors:if color is not None:raise ValueError('' % fmt)color = celse:raise ValueError('' % c)if linestyle is None and marker is None:linestyle = rcParams['']if linestyle is None:linestyle = ''if marker is None:marker = ''return linestyle, marker, color", "docstring": "Process a matlab(TM) style color/line style format string. Return a\n(*linestyle*, *color*) tuple as a result of the processing. Default\nvalues are ('-', 'b'). Example format strings include:\n\n* 'ko': black circles\n* '.b': blue dots\n* 'r--': red dashed lines\n\n.. seealso::\n :func:`~matplotlib.Line2D.lineStyles` and\n :func:`~matplotlib.pyplot.colors`:\n for all possible styles and color format string.", "id": "f17238:m0"} {"signature": "def set_default_color_cycle(clist):", "body": "_process_plot_var_args.defaultColors = clist[:]rcParams[''] = clist[]", "docstring": "Change the default cycle of colors that will be used by the plot\ncommand. This must be called before creating the\n:class:`Axes` to which it will apply; it will\napply to all future axes.\n\n*clist* is a sequence of mpl color specifiers", "id": "f17238:m1"} {"signature": "def get_window_extent(self, *args, **kwargs):", "body": "return self.bbox", "docstring": "get the axes bounding box in display space; *args* and\n*kwargs* are empty", "id": "f17238:c1:m2"} {"signature": "def _init_axis(self):", "body": "self.xaxis = maxis.XAxis(self)self.yaxis = maxis.YAxis(self)self._update_transScale()", "docstring": "move this out of __init__ because non-separable axes don't use it", "id": "f17238:c1:m3"} {"signature": "def set_figure(self, fig):", "body": "martist.Artist.set_figure(self, fig)self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)self.dataLim = mtransforms.Bbox.unit()self.viewLim = mtransforms.Bbox.unit()self.transScale = mtransforms.TransformWrapper(mtransforms.IdentityTransform())self._set_lim_and_transforms()", "docstring": "Set the class:`~matplotlib.axes.Axes` figure\n\naccepts a class:`~matplotlib.figure.Figure` instance", "id": "f17238:c1:m4"} {"signature": "def _set_lim_and_transforms(self):", "body": "self.transAxes = mtransforms.BboxTransformTo(self.bbox)self.transScale = mtransforms.TransformWrapper(mtransforms.IdentityTransform())self.transLimits = mtransforms.BboxTransformFrom(mtransforms.TransformedBbox(self.viewLim, self.transScale))self.transData = self.transScale + (self.transLimits + self.transAxes)self._xaxis_transform = mtransforms.blended_transform_factory(self.axes.transData, self.axes.transAxes)self._yaxis_transform = mtransforms.blended_transform_factory(self.axes.transAxes, self.axes.transData)", "docstring": "set the *dataLim* and *viewLim*\n:class:`~matplotlib.transforms.Bbox` attributes and the\n*transScale*, *transData*, *transLimits* and *transAxes*\ntransformations.", "id": "f17238:c1:m5"} {"signature": "def get_xaxis_transform(self):", "body": "return self._xaxis_transform", "docstring": "Get the transformation used for drawing x-axis labels, ticks\nand gridlines. The x-direction is in data coordinates and the\ny-direction is in axis coordinates.\n\n.. note::\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.", "id": "f17238:c1:m6"} {"signature": "def get_xaxis_text1_transform(self, pad_points):", "body": "return (self._xaxis_transform +mtransforms.ScaledTranslation(, - * pad_points / ,self.figure.dpi_scale_trans),\"\", \"\")", "docstring": "Get the transformation used for drawing x-axis labels, which\nwill add the given amount of padding (in points) between the\naxes and the label. The x-direction is in data coordinates\nand the y-direction is in axis coordinates. Returns a\n3-tuple of the form::\n\n (transform, valign, halign)\n\nwhere *valign* and *halign* are requested alignments for the\ntext.\n\n.. note::\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.", "id": "f17238:c1:m7"} {"signature": "def get_xaxis_text2_transform(self, pad_points):", "body": "return (self._xaxis_transform +mtransforms.ScaledTranslation(, pad_points / ,self.figure.dpi_scale_trans),\"\", \"\")", "docstring": "Get the transformation used for drawing the secondary x-axis\nlabels, which will add the given amount of padding (in points)\nbetween the axes and the label. The x-direction is in data\ncoordinates and the y-direction is in axis coordinates.\nReturns a 3-tuple of the form::\n\n (transform, valign, halign)\n\nwhere *valign* and *halign* are requested alignments for the\ntext.\n\n.. note::\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.", "id": "f17238:c1:m8"} {"signature": "def get_yaxis_transform(self):", "body": "return self._yaxis_transform", "docstring": "Get the transformation used for drawing y-axis labels, ticks\nand gridlines. The x-direction is in axis coordinates and the\ny-direction is in data coordinates.\n\n.. note::\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.", "id": "f17238:c1:m9"} {"signature": "def get_yaxis_text1_transform(self, pad_points):", "body": "return (self._yaxis_transform +mtransforms.ScaledTranslation(- * pad_points / , ,self.figure.dpi_scale_trans),\"\", \"\")", "docstring": "Get the transformation used for drawing y-axis labels, which\nwill add the given amount of padding (in points) between the\naxes and the label. The x-direction is in axis coordinates\nand the y-direction is in data coordinates. Returns a 3-tuple\nof the form::\n\n (transform, valign, halign)\n\nwhere *valign* and *halign* are requested alignments for the\ntext.\n\n.. note::\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.", "id": "f17238:c1:m10"} {"signature": "def get_yaxis_text2_transform(self, pad_points):", "body": "return (self._yaxis_transform +mtransforms.ScaledTranslation(pad_points / , ,self.figure.dpi_scale_trans),\"\", \"\")", "docstring": "Get the transformation used for drawing the secondary y-axis\nlabels, which will add the given amount of padding (in points)\nbetween the axes and the label. The x-direction is in axis\ncoordinates and the y-direction is in data coordinates.\nReturns a 3-tuple of the form::\n\n (transform, valign, halign)\n\nwhere *valign* and *halign* are requested alignments for the\ntext.\n\n.. note::\n\n This transformation is primarily used by the\n :class:`~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.", "id": "f17238:c1:m11"} {"signature": "def get_position(self, original=False):", "body": "if original:return self._originalPosition.frozen()else:return self._position.frozen()", "docstring": "Return the a copy of the axes rectangle as a Bbox", "id": "f17238:c1:m13"} {"signature": "def set_position(self, pos, which=''):", "body": "if not isinstance(pos, mtransforms.BboxBase):pos = mtransforms.Bbox.from_bounds(*pos)if which in ('', ''):self._position.set(pos)if which in ('', ''):self._originalPosition.set(pos)", "docstring": "Set the axes position with::\n\n pos = [left, bottom, width, height]\n\nin relative 0,1 coords, or *pos* can be a\n:class:`~matplotlib.transforms.Bbox`\n\nThere are two position variables: one which is ultimately\nused, but which may be modified by :meth:`apply_aspect`, and a\nsecond which is the starting point for :meth:`apply_aspect`.\n\n\nOptional keyword arguments:\n *which*\n\n ========== ====================\n value description\n ========== ====================\n 'active' to change the first\n 'original' to change the second\n 'both' to change both\n ========== ====================", "id": "f17238:c1:m14"} {"signature": "def reset_position(self):", "body": "pos = self.get_position(original=True)self.set_position(pos, which='')", "docstring": "Make the original position the active position", "id": "f17238:c1:m15"} {"signature": "def _set_artist_props(self, a):", "body": "a.set_figure(self.figure)if not a.is_transform_set():a.set_transform(self.transData)a.set_axes(self)", "docstring": "set the boilerplate props for artists added to axes", "id": "f17238:c1:m16"} {"signature": "def _gen_axes_patch(self):", "body": "return mpatches.Rectangle((, ), , )", "docstring": "Returns the patch used to draw the background of the axes. It\nis also used as the clipping path for any data elements on the\naxes.\n\nIn the standard axes, this is a rectangle, but in other\nprojections it may not be.\n\n.. note::\n Intended to be overridden by new projection types.", "id": "f17238:c1:m17"} {"signature": "def cla(self):", "body": "self.xaxis.cla()self.yaxis.cla()self.ignore_existing_data_limits = Trueself.callbacks = cbook.CallbackRegistry(('',''))if self._sharex is not None:self.xaxis.major = self._sharex.xaxis.majorself.xaxis.minor = self._sharex.xaxis.minorx0, x1 = self._sharex.get_xlim()self.set_xlim(x0, x1, emit=False)self.xaxis.set_scale(self._sharex.xaxis.get_scale())else:self.xaxis.set_scale('')if self._sharey is not None:self.yaxis.major = self._sharey.yaxis.majorself.yaxis.minor = self._sharey.yaxis.minory0, y1 = self._sharey.get_ylim()self.set_ylim(y0, y1, emit=False)self.yaxis.set_scale(self._sharey.yaxis.get_scale())else:self.yaxis.set_scale('')self._autoscaleon = Trueself._update_transScale() self._get_lines = _process_plot_var_args(self)self._get_patches_for_fill = _process_plot_var_args(self, '')self._gridOn = rcParams['']self.lines = []self.patches = []self.texts = []self.tables = []self.artists = []self.images = []self.legend_ = Noneself.collections = [] self.grid(self._gridOn)props = font_manager.FontProperties(size=rcParams[''])self.titleOffsetTrans = mtransforms.ScaledTranslation(, / , self.figure.dpi_scale_trans)self.title = mtext.Text(x=, y=, text='',fontproperties=props,verticalalignment='',horizontalalignment='',)self.title.set_transform(self.transAxes + self.titleOffsetTrans)self.title.set_clip_box(None)self._set_artist_props(self.title)self.patch = self.axesPatch = self._gen_axes_patch()self.patch.set_figure(self.figure)self.patch.set_facecolor(self._axisbg)self.patch.set_edgecolor('')self.patch.set_linewidth()self.patch.set_transform(self.transAxes)self.frame = self.axesFrame = self._gen_axes_patch()self.frame.set_figure(self.figure)self.frame.set_facecolor('')self.frame.set_edgecolor(rcParams[''])self.frame.set_linewidth(rcParams[''])self.frame.set_transform(self.transAxes)self.frame.set_zorder()self.axison = Trueself.xaxis.set_clip_path(self.patch)self.yaxis.set_clip_path(self.patch)self._shared_x_axes.clean()self._shared_y_axes.clean()", "docstring": "Clear the current axes", "id": "f17238:c1:m18"} {"signature": "def clear(self):", "body": "self.cla()", "docstring": "clear the axes", "id": "f17238:c1:m19"} {"signature": "def set_color_cycle(self, clist):", "body": "self._get_lines.set_color_cycle(clist)", "docstring": "Set the color cycle for any future plot commands on this Axes.\n\nclist is a list of mpl color specifiers.", "id": "f17238:c1:m20"} {"signature": "def ishold(self):", "body": "return self._hold", "docstring": "return the HOLD status of the axes", "id": "f17238:c1:m21"} {"signature": "def hold(self, b=None):", "body": "if b is None:self._hold = not self._holdelse:self._hold = b", "docstring": "call signature::\n\n hold(b=None)\n\nSet the hold state. If *hold* is *None* (default), toggle the\n*hold* state. Else set the *hold* state to boolean value *b*.\n\nExamples:\n\n* toggle hold:\n >>> hold()\n* turn hold on:\n >>> hold(True)\n* turn hold off\n >>> hold(False)\n\n\nWhen hold is True, subsequent plot commands will be added to\nthe current axes. When hold is False, the current axes and\nfigure will be cleared on the next plot command", "id": "f17238:c1:m22"} {"signature": "def set_aspect(self, aspect, adjustable=None, anchor=None):", "body": "if aspect in ('', ''):self._aspect = ''elif aspect == '':self._aspect = ''else:self._aspect = float(aspect) if adjustable is not None:self.set_adjustable(adjustable)if anchor is not None:self.set_anchor(anchor)", "docstring": "*aspect*\n\n ======== ================================================\n value description\n ======== ================================================\n 'auto' automatic; fill position rectangle with data\n 'normal' same as 'auto'; deprecated\n 'equal' same scaling from data to plot units for x and y\n num a circle will be stretched such that the height\n is num times the width. aspect=1 is the same as\n aspect='equal'.\n ======== ================================================\n\n*adjustable*\n\n ========= ============================\n value description\n ========= ============================\n 'box' change physical size of axes\n 'datalim' change xlim or ylim\n ========= ============================\n\n*anchor*\n\n ===== =====================\n value description\n ===== =====================\n 'C' centered\n 'SW' lower left corner\n 'S' middle of bottom edge\n 'SE' lower right corner\n etc.\n ===== =====================", "id": "f17238:c1:m24"} {"signature": "def set_adjustable(self, adjustable):", "body": "if adjustable in ('', ''):if self in self._shared_x_axes or self in self._shared_y_axes:if adjustable == '':raise ValueError('')self._adjustable = adjustableelse:raise ValueError('')", "docstring": "ACCEPTS: [ 'box' | 'datalim' ]", "id": "f17238:c1:m26"} {"signature": "def set_anchor(self, anchor):", "body": "if anchor in list(mtransforms.Bbox.coefs.keys()) or len(anchor) == :self._anchor = anchorelse:raise ValueError('' %''.join(list(mtransforms.BBox.coefs.keys())))", "docstring": "*anchor*\n\n ===== ============\n value description\n ===== ============\n 'C' Center\n 'SW' bottom left\n 'S' bottom\n 'SE' bottom right\n 'E' right\n 'NE' top right\n 'N' top\n 'NW' top left\n 'W' left\n ===== ============", "id": "f17238:c1:m28"} {"signature": "def get_data_ratio(self):", "body": "xmin,xmax = self.get_xbound()xsize = max(math.fabs(xmax-xmin), )ymin,ymax = self.get_ybound()ysize = max(math.fabs(ymax-ymin), )return ysize/xsize", "docstring": "Returns the aspect ratio of the raw data.\n\nThis method is intended to be overridden by new projection\ntypes.", "id": "f17238:c1:m29"} {"signature": "def apply_aspect(self, position=None):", "body": "if position is None:position = self.get_position(original=True)aspect = self.get_aspect()if aspect == '':self.set_position( position , which='')returnif aspect == '':A = else:A = aspectif self in self._shared_x_axes or self in self._shared_y_axes:if self._adjustable == '':self._adjustable = ''warnings.warn('')figW,figH = self.get_figure().get_size_inches()fig_aspect = figH/figWif self._adjustable == '':box_aspect = A * self.get_data_ratio()pb = position.frozen()pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)self.set_position(pb1.anchored(self.get_anchor(), pb), '')returnself.set_position(position, which='')xmin,xmax = self.get_xbound()xsize = max(math.fabs(xmax-xmin), )ymin,ymax = self.get_ybound()ysize = max(math.fabs(ymax-ymin), )l,b,w,h = position.boundsbox_aspect = fig_aspect * (h/w)data_ratio = box_aspect / Ay_expander = (data_ratio*xsize/ysize - )if abs(y_expander) < :returndL = self.dataLimxr = * dL.widthyr = * dL.heightxmarg = xsize - xrymarg = ysize - yrYsize = data_ratio * xsizeXsize = ysize / data_ratioXmarg = Xsize - xrYmarg = Ysize - yrxm = ym = changex = (self in self._shared_y_axesand self not in self._shared_x_axes)changey = (self in self._shared_x_axesand self not in self._shared_y_axes)if changex and changey:warnings.warn(\"\"\"\")returnif changex:adjust_y = Falseelse:if xmarg > xm and ymarg > ym:adjy = ((Ymarg > and y_expander < )or (Xmarg < and y_expander > ))else:adjy = y_expander > adjust_y = changey or adjy if adjust_y:yc = *(ymin+ymax)y0 = yc - Ysize/y1 = yc + Ysize/self.set_ybound((y0, y1))else:xc = *(xmin+xmax)x0 = xc - Xsize/x1 = xc + Xsize/self.set_xbound((x0, x1))", "docstring": "Use :meth:`_aspect` and :meth:`_adjustable` to modify the\naxes box or the view limits.", "id": "f17238:c1:m30"} {"signature": "def axis(self, *v, **kwargs):", "body": "if len(v)== and is_string_like(v[]):s = v[].lower()if s=='': self.set_axis_on()elif s=='': self.set_axis_off()elif s in ('', '', '', '', '', ''):self.set_autoscale_on(True)self.set_aspect('')self.autoscale_view()if s=='':self.set_aspect('', adjustable='')elif s == '':self.set_aspect('', adjustable='', anchor='')self.set_autoscale_on(False) elif s=='':self.autoscale_view(tight=True)self.set_autoscale_on(False)elif s == '':self.autoscale_view(tight=True)self.set_autoscale_on(False)self.set_aspect('', adjustable='', anchor='')else:raise ValueError('''' % s)xmin, xmax = self.get_xlim()ymin, ymax = self.get_ylim()return xmin, xmax, ymin, ymaxtry: v[]except IndexError:emit = kwargs.get('', True)xmin = kwargs.get('', None)xmax = kwargs.get('', None)xmin, xmax = self.set_xlim(xmin, xmax, emit)ymin = kwargs.get('', None)ymax = kwargs.get('', None)ymin, ymax = self.set_ylim(ymin, ymax, emit)return xmin, xmax, ymin, ymaxv = v[]if len(v) != :raise ValueError('')self.set_xlim([v[], v[]])self.set_ylim([v[], v[]])return v", "docstring": "Convenience method for manipulating the x and y view limits\nand the aspect ratio of the plot.\n\n*kwargs* are passed on to :meth:`set_xlim` and\n:meth:`set_ylim`", "id": "f17238:c1:m31"} {"signature": "def get_child_artists(self):", "body": "raise DeprecationWarning('')", "docstring": "Return a list of artists the axes contains.\n\n.. deprecated:: 0.98", "id": "f17238:c1:m32"} {"signature": "def get_frame(self):", "body": "warnings.warn('', DeprecationWarning)return self.patch", "docstring": "Return the axes Rectangle frame", "id": "f17238:c1:m33"} {"signature": "def get_legend(self):", "body": "return self.legend_", "docstring": "Return the legend.Legend instance, or None if no legend is defined", "id": "f17238:c1:m34"} {"signature": "def get_images(self):", "body": "return cbook.silent_list('', self.images)", "docstring": "return a list of Axes images contained by the Axes", "id": "f17238:c1:m35"} {"signature": "def get_lines(self):", "body": "return cbook.silent_list('', self.lines)", "docstring": "Return a list of lines contained by the Axes", "id": "f17238:c1:m36"} {"signature": "def get_xaxis(self):", "body": "return self.xaxis", "docstring": "Return the XAxis instance", "id": "f17238:c1:m37"} {"signature": "def get_xgridlines(self):", "body": "return cbook.silent_list('', self.xaxis.get_gridlines())", "docstring": "Get the x grid lines as a list of Line2D instances", "id": "f17238:c1:m38"} {"signature": "def get_xticklines(self):", "body": "return cbook.silent_list('', self.xaxis.get_ticklines())", "docstring": "Get the xtick lines as a list of Line2D instances", "id": "f17238:c1:m39"} {"signature": "def get_yaxis(self):", "body": "return self.yaxis", "docstring": "Return the YAxis instance", "id": "f17238:c1:m40"} {"signature": "def get_ygridlines(self):", "body": "return cbook.silent_list('', self.yaxis.get_gridlines())", "docstring": "Get the y grid lines as a list of Line2D instances", "id": "f17238:c1:m41"} {"signature": "def get_yticklines(self):", "body": "return cbook.silent_list('', self.yaxis.get_ticklines())", "docstring": "Get the ytick lines as a list of Line2D instances", "id": "f17238:c1:m42"} {"signature": "def has_data(self):", "body": "return (len(self.collections) +len(self.images) +len(self.lines) +len(self.patches))>", "docstring": "Return *True* if any artists have been added to axes.\n\n This should not be used to determine whether the *dataLim*\n need to be updated, and may not actually be useful for\n anything.", "id": "f17238:c1:m43"} {"signature": "def add_artist(self, a):", "body": "a.set_axes(self)self.artists.append(a)self._set_artist_props(a)a.set_clip_path(self.patch)a._remove_method = lambda h: self.artists.remove(h)", "docstring": "Add any :class:`~matplotlib.artist.Artist` to the axes", "id": "f17238:c1:m44"} {"signature": "def add_collection(self, collection, autolim=True):", "body": "label = collection.get_label()if not label:collection.set_label(''%len(self.collections))self.collections.append(collection)self._set_artist_props(collection)collection.set_clip_path(self.patch)if autolim:if collection._paths and len(collection._paths):self.update_datalim(collection.get_datalim(self.transData))collection._remove_method = lambda h: self.collections.remove(h)", "docstring": "add a :class:`~matplotlib.collections.Collection` instance\nto the axes", "id": "f17238:c1:m45"} {"signature": "def add_line(self, line):", "body": "self._set_artist_props(line)line.set_clip_path(self.patch)self._update_line_limits(line)if not line.get_label():line.set_label(''%len(self.lines))self.lines.append(line)line._remove_method = lambda h: self.lines.remove(h)", "docstring": "Add a :class:`~matplotlib.lines.Line2D` to the list of plot\nlines", "id": "f17238:c1:m46"} {"signature": "def add_patch(self, p):", "body": "self._set_artist_props(p)p.set_clip_path(self.patch)self._update_patch_limits(p)self.patches.append(p)p._remove_method = lambda h: self.patches.remove(h)", "docstring": "Add a :class:`~matplotlib.patches.Patch` *p* to the list of\naxes patches; the clipbox will be set to the Axes clipping\nbox. If the transform is not set, it will be set to\n:attr:`transData`.", "id": "f17238:c1:m48"} {"signature": "def _update_patch_limits(self, patch):", "body": "if (isinstance(patch, mpatches.Rectangle) and(patch.get_width()== or patch.get_height()==)):returnvertices = patch.get_path().verticesif vertices.size > :xys = patch.get_patch_transform().transform(vertices)if patch.get_data_transform() != self.transData:transform = (patch.get_data_transform() +self.transData.inverted())xys = transform.transform(xys)self.update_datalim(xys, updatex=patch.x_isdata,updatey=patch.y_isdata)", "docstring": "update the data limits for patch *p*", "id": "f17238:c1:m49"} {"signature": "def add_table(self, tab):", "body": "self._set_artist_props(tab)self.tables.append(tab)tab.set_clip_path(self.patch)tab._remove_method = lambda h: self.tables.remove(h)", "docstring": "Add a :class:`~matplotlib.tables.Table` instance to the\nlist of axes tables", "id": "f17238:c1:m50"} {"signature": "def relim(self):", "body": "self.dataLim.ignore(True)self.ignore_existing_data_limits = Truefor line in self.lines:self._update_line_limits(line)for p in self.patches:self._update_patch_limits(p)", "docstring": "recompute the data limits based on current artists", "id": "f17238:c1:m51"} {"signature": "def update_datalim(self, xys, updatex=True, updatey=True):", "body": "if iterable(xys) and not len(xys): returnif not ma.isMaskedArray(xys):xys = np.asarray(xys)self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,updatex=updatex, updatey=updatey)self.ignore_existing_data_limits = False", "docstring": "Update the data lim bbox with seq of xy tups or equiv. 2-D array", "id": "f17238:c1:m52"} {"signature": "def update_datalim_numerix(self, x, y):", "body": "if iterable(x) and not len(x): returnself.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)self.ignore_existing_data_limits = False", "docstring": "Update the data lim bbox with seq of xy tups", "id": "f17238:c1:m53"} {"signature": "def update_datalim_bounds(self, bounds):", "body": "self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))", "docstring": "Update the datalim to include the given\n:class:`~matplotlib.transforms.Bbox` *bounds*", "id": "f17238:c1:m54"} {"signature": "def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):", "body": "if self.xaxis is None or self.yaxis is None: returnif xdata is not None:if not self.xaxis.have_units():self.xaxis.update_units(xdata)if ydata is not None:if not self.yaxis.have_units():self.yaxis.update_units(ydata)if kwargs is not None:xunits = kwargs.pop( '', self.xaxis.units)if xunits!=self.xaxis.units:self.xaxis.set_units(xunits)if xdata is not None:self.xaxis.update_units(xdata)yunits = kwargs.pop('', self.yaxis.units)if yunits!=self.yaxis.units:self.yaxis.set_units(yunits)if ydata is not None:self.yaxis.update_units(ydata)", "docstring": "look for unit *kwargs* and update the axis instances as necessary", "id": "f17238:c1:m55"} {"signature": "def in_axes(self, mouseevent):", "body": "return self.patch.contains(mouseevent)[]", "docstring": "return *True* if the given *mouseevent* (in display coords)\nis in the Axes", "id": "f17238:c1:m56"} {"signature": "def get_autoscale_on(self):", "body": "return self._autoscaleon", "docstring": "Get whether autoscaling is applied on plot commands", "id": "f17238:c1:m57"} {"signature": "def set_autoscale_on(self, b):", "body": "self._autoscaleon = b", "docstring": "Set whether autoscaling is applied on plot commands\n\naccepts: [ *True* | *False* ]", "id": "f17238:c1:m58"} {"signature": "def autoscale_view(self, tight=False, scalex=True, scaley=True):", "body": "if not self._autoscaleon: returnif scalex:xshared = self._shared_x_axes.get_siblings(self)dl = [ax.dataLim for ax in xshared]bb = mtransforms.BboxBase.union(dl)x0, x1 = bb.intervalxif scaley:yshared = self._shared_y_axes.get_siblings(self)dl = [ax.dataLim for ax in yshared]bb = mtransforms.BboxBase.union(dl)y0, y1 = bb.intervalyif (tight or (len(self.images)> andlen(self.lines)== andlen(self.patches)==)):if scalex:self.set_xbound(x0, x1)if scaley:self.set_ybound(y0, y1)returnif scalex:XL = self.xaxis.get_major_locator().view_limits(x0, x1)self.set_xbound(XL)if scaley:YL = self.yaxis.get_major_locator().view_limits(y0, y1)self.set_ybound(YL)", "docstring": "autoscale the view limits using the data limits. You can\nselectively autoscale only a single axis, eg, the xaxis by\nsetting *scaley* to *False*. The autoscaling preserves any\naxis direction reversal that has already been done.", "id": "f17238:c1:m59"} {"signature": "def draw(self, renderer=None, inframe=False):", "body": "if renderer is None:renderer = self._cachedRendererif renderer is None:raise RuntimeError('')if not self.get_visible(): returnrenderer.open_group('')self.apply_aspect()if self.axison and self._frameon:self.patch.draw(renderer)artists = []if len(self.images)<= or renderer.option_image_nocomposite():for im in self.images:im.draw(renderer)else:mag = renderer.get_image_magnification()ims = [(im.make_image(mag),,)for im in self.images if im.get_visible()]l, b, r, t = self.bbox.extentswidth = mag*((round(r) + ) - (round(l) - ))height = mag*((round(t) + ) - (round(b) - ))im = mimage.from_images(height,width,ims)im.is_grayscale = Falsel, b, w, h = self.bbox.boundsrenderer.draw_image(round(l), round(b), im, self.bbox,self.patch.get_path(),self.patch.get_transform())artists.extend(self.collections)artists.extend(self.patches)artists.extend(self.lines)artists.extend(self.texts)artists.extend(self.artists)if self.axison and not inframe:if self._axisbelow:self.xaxis.set_zorder()self.yaxis.set_zorder()else:self.xaxis.set_zorder()self.yaxis.set_zorder()artists.extend([self.xaxis, self.yaxis])if not inframe: artists.append(self.title)artists.extend(self.tables)if self.legend_ is not None:artists.append(self.legend_)if self.axison and self._frameon:artists.append(self.frame)dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)if not a.get_animated() ]dsu.sort()for zorder, i, a in dsu:a.draw(renderer)renderer.close_group('')self._cachedRenderer = renderer", "docstring": "Draw everything (plot lines, axes, labels)", "id": "f17238:c1:m60"} {"signature": "def draw_artist(self, a):", "body": "assert self._cachedRenderer is not Nonea.draw(self._cachedRenderer)", "docstring": "This method can only be used after an initial draw which\ncaches the renderer. It is used to efficiently update Axes\ndata (axis ticks, labels, etc are not updated)", "id": "f17238:c1:m61"} {"signature": "def redraw_in_frame(self):", "body": "assert self._cachedRenderer is not Noneself.draw(self._cachedRenderer, inframe=True)", "docstring": "This method can only be used after an initial draw which\ncaches the renderer. It is used to efficiently update Axes\ndata (axis ticks, labels, etc are not updated)", "id": "f17238:c1:m62"} {"signature": "def get_frame_on(self):", "body": "return self._frameon", "docstring": "Get whether the axes rectangle patch is drawn", "id": "f17238:c1:m65"} {"signature": "def set_frame_on(self, b):", "body": "self._frameon = b", "docstring": "Set whether the axes rectangle patch is drawn\n\nACCEPTS: [ *True* | *False* ]", "id": "f17238:c1:m66"} {"signature": "def get_axisbelow(self):", "body": "return self._axisbelow", "docstring": "Get whether axis below is true or not", "id": "f17238:c1:m67"} {"signature": "def set_axisbelow(self, b):", "body": "self._axisbelow = b", "docstring": "Set whether the axis ticks and gridlines are above or below most artists\n\nACCEPTS: [ *True* | *False* ]", "id": "f17238:c1:m68"} {"signature": "def grid(self, b=None, **kwargs):", "body": "if len(kwargs): b = Trueself.xaxis.grid(b, **kwargs)self.yaxis.grid(b, **kwargs)", "docstring": "call signature::\n\n grid(self, b=None, **kwargs)\n\nSet the axes grids on or off; *b* is a boolean\n\nIf *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If\n*kwargs* are supplied, it is assumed that you want a grid and *b*\nis thus set to *True*\n\n*kawrgs* are used to set the grid line properties, eg::\n\n ax.grid(color='r', linestyle='-', linewidth=2)\n\nValid :class:`~matplotlib.lines.Line2D` kwargs are\n\n%(Line2D)s", "id": "f17238:c1:m69"} {"signature": "def ticklabel_format(self, **kwargs):", "body": "style = kwargs.pop('', '').lower()scilimits = kwargs.pop('', None)if scilimits is not None:try:m, n = scilimitsm+n+ except (ValueError, TypeError):raise ValueError(\"\")axis = kwargs.pop('', '').lower()if style[:] == '':sb = Trueelif style in ['', '']:sb = Falseif style == '':cb = Falseelse:cb = Trueraise NotImplementedError(\"\")elif style == '':sb = Noneelse:raise ValueError(\"\")try:if sb is not None:if axis == '' or axis == '':self.xaxis.major.formatter.set_scientific(sb)if axis == '' or axis == '':self.yaxis.major.formatter.set_scientific(sb)if scilimits is not None:if axis == '' or axis == '':self.xaxis.major.formatter.set_powerlimits(scilimits)if axis == '' or axis == '':self.yaxis.major.formatter.set_powerlimits(scilimits)except AttributeError:raise AttributeError(\"\")", "docstring": "Convenience method for manipulating the ScalarFormatter\nused by default for linear axes.\n\nOptional keyword arguments:\n\n ============ =====================================\n Keyword Description\n ============ =====================================\n *style* [ 'sci' (or 'scientific') | 'plain' ]\n plain turns off scientific notation\n *scilimits* (m, n), pair of integers; if *style*\n is 'sci', scientific notation will\n be used for numbers outside the range\n 10`-m`:sup: to 10`n`:sup:.\n Use (0,0) to include all numbers.\n *axis* [ 'x' | 'y' | 'both' ]\n ============ =====================================\n\nOnly the major ticks are affected.\nIf the method is called when the\n:class:`~matplotlib.ticker.ScalarFormatter` is not the\n:class:`~matplotlib.ticker.Formatter` being used, an\n:exc:`AttributeError` will be raised.", "id": "f17238:c1:m70"} {"signature": "def set_axis_off(self):", "body": "self.axison = False", "docstring": "turn off the axis", "id": "f17238:c1:m71"} {"signature": "def set_axis_on(self):", "body": "self.axison = True", "docstring": "turn on the axis", "id": "f17238:c1:m72"} {"signature": "def get_axis_bgcolor(self):", "body": "return self._axisbg", "docstring": "Return the axis background color", "id": "f17238:c1:m73"} {"signature": "def set_axis_bgcolor(self, color):", "body": "self._axisbg = colorself.patch.set_facecolor(color)", "docstring": "set the axes background color\n\nACCEPTS: any matplotlib color - see\n:func:`~matplotlib.pyplot.colors`", "id": "f17238:c1:m74"} {"signature": "def invert_xaxis(self):", "body": "left, right = self.get_xlim()self.set_xlim(right, left)", "docstring": "Invert the x-axis.", "id": "f17238:c1:m75"} {"signature": "def xaxis_inverted(self):", "body": "left, right = self.get_xlim()return right < left", "docstring": "Returns True if the x-axis is inverted.", "id": "f17238:c1:m76"} {"signature": "def get_xbound(self):", "body": "left, right = self.get_xlim()if left < right:return left, rightelse:return right, left", "docstring": "Returns the x-axis numerical bounds where::\n\n lowerBound < upperBound", "id": "f17238:c1:m77"} {"signature": "def set_xbound(self, lower=None, upper=None):", "body": "if upper is None and iterable(lower):lower,upper = lowerold_lower,old_upper = self.get_xbound()if lower is None: lower = old_lowerif upper is None: upper = old_upperif self.xaxis_inverted():if lower < upper:self.set_xlim(upper, lower)else:self.set_xlim(lower, upper)else:if lower < upper:self.set_xlim(lower, upper)else:self.set_xlim(upper, lower)", "docstring": "Set the lower and upper numerical bounds of the x-axis.\nThis method will honor axes inversion regardless of parameter order.", "id": "f17238:c1:m78"} {"signature": "def get_xlim(self):", "body": "return tuple(self.viewLim.intervalx)", "docstring": "Get the x-axis range [*xmin*, *xmax*]", "id": "f17238:c1:m79"} {"signature": "def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):", "body": "if xmax is None and iterable(xmin):xmin,xmax = xminself._process_unit_info(xdata=(xmin, xmax))if xmin is not None:xmin = self.convert_xunits(xmin)if xmax is not None:xmax = self.convert_xunits(xmax)old_xmin,old_xmax = self.get_xlim()if xmin is None: xmin = old_xminif xmax is None: xmax = old_xmaxxmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)self.viewLim.intervalx = (xmin, xmax)if emit:self.callbacks.process('', self)for other in self._shared_x_axes.get_siblings(self):if other is not self:other.set_xlim(self.viewLim.intervalx, emit=False)if (other.figure != self.figure andother.figure.canvas is not None):other.figure.canvas.draw_idle()return xmin, xmax", "docstring": "call signature::\n\n set_xlim(self, *args, **kwargs)\n\nSet the limits for the xaxis\n\nReturns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]\n\nExamples::\n\n set_xlim((valmin, valmax))\n set_xlim(valmin, valmax)\n set_xlim(xmin=1) # xmax unchanged\n set_xlim(xmax=1) # xmin unchanged\n\nKeyword arguments:\n\n *ymin*: scalar\n the min of the ylim\n *ymax*: scalar\n the max of the ylim\n *emit*: [ True | False ]\n notify observers of lim change\n\nACCEPTS: len(2) sequence of floats", "id": "f17238:c1:m80"} {"signature": "def set_xscale(self, value, **kwargs):", "body": "self.xaxis.set_scale(value, **kwargs)self.autoscale_view()self._update_transScale()", "docstring": "call signature::\n\n set_xscale(value)\n\nSet the scaling of the x-axis: %(scale)s\n\nACCEPTS: [%(scale)s]\n\nDifferent kwargs are accepted, depending on the scale:\n%(scale_docs)s", "id": "f17238:c1:m82"} {"signature": "def get_xticks(self, minor=False):", "body": "return self.xaxis.get_ticklocs(minor=minor)", "docstring": "Return the x ticks as a list of locations", "id": "f17238:c1:m83"} {"signature": "def set_xticks(self, ticks, minor=False):", "body": "return self.xaxis.set_ticks(ticks, minor=minor)", "docstring": "Set the x ticks with list of *ticks*\n\nACCEPTS: sequence of floats", "id": "f17238:c1:m84"} {"signature": "def get_xmajorticklabels(self):", "body": "return cbook.silent_list('',self.xaxis.get_majorticklabels())", "docstring": "Get the xtick labels as a list of Text instances", "id": "f17238:c1:m85"} {"signature": "def get_xminorticklabels(self):", "body": "return cbook.silent_list('',self.xaxis.get_minorticklabels())", "docstring": "Get the xtick labels as a list of Text instances", "id": "f17238:c1:m86"} {"signature": "def get_xticklabels(self, minor=False):", "body": "return cbook.silent_list('',self.xaxis.get_ticklabels(minor=minor))", "docstring": "Get the xtick labels as a list of Text instances", "id": "f17238:c1:m87"} {"signature": "def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):", "body": "return self.xaxis.set_ticklabels(labels, fontdict,minor=minor, **kwargs)", "docstring": "call signature::\n\n set_xticklabels(labels, fontdict=None, minor=False, **kwargs)\n\nSet the xtick labels with list of strings *labels*. Return a\nlist of axis text instances.\n\n*kwargs* set the :class:`~matplotlib.text.Text` properties.\nValid properties are\n%(Text)s\n\nACCEPTS: sequence of strings", "id": "f17238:c1:m88"} {"signature": "def invert_yaxis(self):", "body": "left, right = self.get_ylim()self.set_ylim(right, left)", "docstring": "Invert the y-axis.", "id": "f17238:c1:m89"} {"signature": "def yaxis_inverted(self):", "body": "left, right = self.get_ylim()return right < left", "docstring": "Returns True if the y-axis is inverted.", "id": "f17238:c1:m90"} {"signature": "def get_ybound(self):", "body": "left, right = self.get_ylim()if left < right:return left, rightelse:return right, left", "docstring": "Return y-axis numerical bounds in the form of lowerBound < upperBound", "id": "f17238:c1:m91"} {"signature": "def set_ybound(self, lower=None, upper=None):", "body": "if upper is None and iterable(lower):lower,upper = lowerold_lower,old_upper = self.get_ybound()if lower is None: lower = old_lowerif upper is None: upper = old_upperif self.yaxis_inverted():if lower < upper:self.set_ylim(upper, lower)else:self.set_ylim(lower, upper)else:if lower < upper:self.set_ylim(lower, upper)else:self.set_ylim(upper, lower)", "docstring": "Set the lower and upper numerical bounds of the y-axis.\n This method will honor axes inversion regardless of parameter order.", "id": "f17238:c1:m92"} {"signature": "def get_ylim(self):", "body": "return tuple(self.viewLim.intervaly)", "docstring": "Get the y-axis range [*ymin*, *ymax*]", "id": "f17238:c1:m93"} {"signature": "def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):", "body": "if ymax is None and iterable(ymin):ymin,ymax = yminif ymin is not None:ymin = self.convert_yunits(ymin)if ymax is not None:ymax = self.convert_yunits(ymax)old_ymin,old_ymax = self.get_ylim()if ymin is None: ymin = old_yminif ymax is None: ymax = old_ymaxymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)self.viewLim.intervaly = (ymin, ymax)if emit:self.callbacks.process('', self)for other in self._shared_y_axes.get_siblings(self):if other is not self:other.set_ylim(self.viewLim.intervaly, emit=False)if (other.figure != self.figure andother.figure.canvas is not None):other.figure.canvas.draw_idle()return ymin, ymax", "docstring": "call signature::\n\n set_ylim(self, *args, **kwargs):\n\nSet the limits for the yaxis; v = [ymin, ymax]::\n\n set_ylim((valmin, valmax))\n set_ylim(valmin, valmax)\n set_ylim(ymin=1) # ymax unchanged\n set_ylim(ymax=1) # ymin unchanged\n\nKeyword arguments:\n\n *ymin*: scalar\n the min of the ylim\n *ymax*: scalar\n the max of the ylim\n *emit*: [ True | False ]\n notify observers of lim change\n\nReturns the current ylimits as a length 2 tuple\n\nACCEPTS: len(2) sequence of floats", "id": "f17238:c1:m94"} {"signature": "def set_yscale(self, value, **kwargs):", "body": "self.yaxis.set_scale(value, **kwargs)self.autoscale_view()self._update_transScale()", "docstring": "call signature::\n\n set_yscale(value)\n\nSet the scaling of the y-axis: %(scale)s\n\nACCEPTS: [%(scale)s]\n\nDifferent kwargs are accepted, depending on the scale:\n%(scale_docs)s", "id": "f17238:c1:m96"} {"signature": "def get_yticks(self, minor=False):", "body": "return self.yaxis.get_ticklocs(minor=minor)", "docstring": "Return the y ticks as a list of locations", "id": "f17238:c1:m97"} {"signature": "def set_yticks(self, ticks, minor=False):", "body": "return self.yaxis.set_ticks(ticks, minor=minor)", "docstring": "Set the y ticks with list of *ticks*\n\nACCEPTS: sequence of floats\n\nKeyword arguments:\n\n *minor*: [ False | True ]\n Sets the minor ticks if True", "id": "f17238:c1:m98"} {"signature": "def get_ymajorticklabels(self):", "body": "return cbook.silent_list('',self.yaxis.get_majorticklabels())", "docstring": "Get the xtick labels as a list of Text instances", "id": "f17238:c1:m99"} {"signature": "def get_yminorticklabels(self):", "body": "return cbook.silent_list('',self.yaxis.get_minorticklabels())", "docstring": "Get the xtick labels as a list of Text instances", "id": "f17238:c1:m100"} {"signature": "def get_yticklabels(self, minor=False):", "body": "return cbook.silent_list('',self.yaxis.get_ticklabels(minor=minor))", "docstring": "Get the xtick labels as a list of Text instances", "id": "f17238:c1:m101"} {"signature": "def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):", "body": "return self.yaxis.set_ticklabels(labels, fontdict,minor=minor, **kwargs)", "docstring": "call signature::\n\n set_yticklabels(labels, fontdict=None, minor=False, **kwargs)\n\nSet the ytick labels with list of strings *labels*. Return a list of\n:class:`~matplotlib.text.Text` instances.\n\n*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.\nValid properties are\n%(Text)s\n\nACCEPTS: sequence of strings", "id": "f17238:c1:m102"} {"signature": "def xaxis_date(self, tz=None):", "body": "xmin, xmax = self.dataLim.intervalxif xmin==:dmax = today = datetime.date.today()dmin = today-datetime.timedelta(days=)self._process_unit_info(xdata=(dmin, dmax))dmin, dmax = self.convert_xunits([dmin, dmax])self.viewLim.intervalx = dmin, dmaxself.dataLim.intervalx = dmin, dmaxlocator = self.xaxis.get_major_locator()if not isinstance(locator, mdates.DateLocator):locator = mdates.AutoDateLocator(tz)self.xaxis.set_major_locator(locator)if self.viewLim.intervalx[]==:self.viewLim.intervalx = tuple(self.dataLim.intervalx)locator.refresh()formatter = self.xaxis.get_major_formatter()if not isinstance(formatter, mdates.DateFormatter):formatter = mdates.AutoDateFormatter(locator, tz)self.xaxis.set_major_formatter(formatter)", "docstring": "Sets up x-axis ticks and labels that treat the x data as dates.\n\n *tz* is the time zone to use in labeling dates. Defaults to rc value.", "id": "f17238:c1:m103"} {"signature": "def yaxis_date(self, tz=None):", "body": "ymin, ymax = self.dataLim.intervalyif ymin==:dmax = today = datetime.date.today()dmin = today-datetime.timedelta(days=)self._process_unit_info(ydata=(dmin, dmax))dmin, dmax = self.convert_yunits([dmin, dmax])self.viewLim.intervaly = dmin, dmaxself.dataLim.intervaly = dmin, dmaxlocator = self.yaxis.get_major_locator()if not isinstance(locator, mdates.DateLocator):locator = mdates.AutoDateLocator(tz)self.yaxis.set_major_locator(locator)if self.viewLim.intervaly[]==:self.viewLim.intervaly = tuple(self.dataLim.intervaly)locator.refresh()formatter = self.xaxis.get_major_formatter()if not isinstance(formatter, mdates.DateFormatter):formatter = mdates.AutoDateFormatter(locator, tz)self.yaxis.set_major_formatter(formatter)", "docstring": "Sets up y-axis ticks and labels that treat the y data as dates.\n\n *tz* is the time zone to use in labeling dates. Defaults to rc value.", "id": "f17238:c1:m104"} {"signature": "def format_xdata(self, x):", "body": "try: return self.fmt_xdata(x)except TypeError:func = self.xaxis.get_major_formatter().format_data_shortval = func(x)return val", "docstring": "Return *x* string formatted. This function will use the attribute\nself.fmt_xdata if it is callable, else will fall back on the xaxis\nmajor formatter", "id": "f17238:c1:m105"} {"signature": "def format_ydata(self, y):", "body": "try: return self.fmt_ydata(y)except TypeError:func = self.yaxis.get_major_formatter().format_data_shortval = func(y)return val", "docstring": "Return y string formatted. This function will use the\n:attr:`fmt_ydata` attribute if it is callable, else will fall\nback on the yaxis major formatter", "id": "f17238:c1:m106"} {"signature": "def format_coord(self, x, y):", "body": "if x is None:x = ''if y is None:y = ''xs = self.format_xdata(x)ys = self.format_ydata(y)return ''%(xs,ys)", "docstring": "return a format string formatting the *x*, *y* coord", "id": "f17238:c1:m107"} {"signature": "def can_zoom(self):", "body": "return True", "docstring": "Return *True* if this axes support the zoom box", "id": "f17238:c1:m108"} {"signature": "def get_navigate(self):", "body": "return self._navigate", "docstring": "Get whether the axes responds to navigation commands", "id": "f17238:c1:m109"} {"signature": "def set_navigate(self, b):", "body": "self._navigate = b", "docstring": "Set whether the axes responds to navigation toolbar commands\n\nACCEPTS: [ True | False ]", "id": "f17238:c1:m110"} {"signature": "def get_navigate_mode(self):", "body": "return self._navigate_mode", "docstring": "Get the navigation toolbar button status: 'PAN', 'ZOOM', or None", "id": "f17238:c1:m111"} {"signature": "def set_navigate_mode(self, b):", "body": "self._navigate_mode = b", "docstring": "Set the navigation toolbar button status;\n\n.. warning::\n this is not a user-API function.", "id": "f17238:c1:m112"} {"signature": "def start_pan(self, x, y, button):", "body": "self._pan_start = cbook.Bunch(lim = self.viewLim.frozen(),trans = self.transData.frozen(),trans_inverse = self.transData.inverted().frozen(),bbox = self.bbox.frozen(),x = x,y = y)", "docstring": "Called when a pan operation has started.\n\n*x*, *y* are the mouse coordinates in display coords.\nbutton is the mouse button number:\n\n* 1: LEFT\n* 2: MIDDLE\n* 3: RIGHT\n\n.. note::\n Intended to be overridden by new projection types.", "id": "f17238:c1:m113"} {"signature": "def end_pan(self):", "body": "del self._pan_start", "docstring": "Called when a pan operation completes (when the mouse button\nis up.)\n\n.. note::\n Intended to be overridden by new projection types.", "id": "f17238:c1:m114"} {"signature": "def drag_pan(self, button, key, x, y):", "body": "def format_deltas(key, dx, dy):if key=='':if(abs(dx)>abs(dy)):dy = dxelse:dx = dyelif key=='':dy = elif key=='':dx = elif key=='':if *abs(dx) < abs(dy):dx=elif *abs(dy) < abs(dx):dy=elif(abs(dx)>abs(dy)):dy=dy/abs(dy)*abs(dx)else:dx=dx/abs(dx)*abs(dy)return (dx,dy)p = self._pan_startdx = x - p.xdy = y - p.yif dx == and dy == :returnif button == :dx, dy = format_deltas(key, dx, dy)result = p.bbox.translated(-dx, -dy).transformed(p.trans_inverse)elif button == :try:dx = -dx / float(self.bbox.width)dy = -dy / float(self.bbox.height)dx, dy = format_deltas(key, dx, dy)if self.get_aspect() != '':dx = * (dx + dy)dy = dxalpha = np.power(, (dx, dy))start = p.trans_inverse.transform_point((p.x, p.y))lim_points = p.lim.get_points()result = start + alpha * (lim_points - start)result = mtransforms.Bbox(result)except OverflowError:warnings.warn('')returnself.set_xlim(*result.intervalx)self.set_ylim(*result.intervaly)", "docstring": "Called when the mouse moves during a pan operation.\n\n*button* is the mouse button number:\n\n* 1: LEFT\n* 2: MIDDLE\n* 3: RIGHT\n\n*key* is a \"shift\" key\n\n*x*, *y* are the mouse coordinates in display coords.\n\n.. note::\n Intended to be overridden by new projection types.", "id": "f17238:c1:m115"} {"signature": "def get_cursor_props(self):", "body": "return self._cursorProps", "docstring": "return the cursor propertiess as a (*linewidth*, *color*)\ntuple, where *linewidth* is a float and *color* is an RGBA\ntuple", "id": "f17238:c1:m116"} {"signature": "def set_cursor_props(self, *args):", "body": "if len(args)==:lw, c = args[]elif len(args)==:lw, c = argselse:raise ValueError('')c =mcolors.colorConverter.to_rgba(c)self._cursorProps = lw, c", "docstring": "Set the cursor property as::\n\n ax.set_cursor_props(linewidth, color)\n\nor::\n\n ax.set_cursor_props((linewidth, color))\n\nACCEPTS: a (*float*, *color*) tuple", "id": "f17238:c1:m117"} {"signature": "def connect(self, s, func):", "body": "raise DeprecationWarning('''')", "docstring": "Register observers to be notified when certain events occur. Register\nwith callback functions with the following signatures. The function\nhas the following signature::\n\n func(ax) # where ax is the instance making the callback.\n\nThe following events can be connected to:\n\n 'xlim_changed','ylim_changed'\n\nThe connection id is is returned - you can use this with\ndisconnect to disconnect from the axes event", "id": "f17238:c1:m118"} {"signature": "def disconnect(self, cid):", "body": "raise DeprecationWarning('''')", "docstring": "disconnect from the Axes event.", "id": "f17238:c1:m119"} {"signature": "def get_children(self):", "body": "children = []children.append(self.xaxis)children.append(self.yaxis)children.extend(self.lines)children.extend(self.patches)children.extend(self.texts)children.extend(self.tables)children.extend(self.artists)children.extend(self.images)if self.legend_ is not None:children.append(self.legend_)children.extend(self.collections)children.append(self.title)children.append(self.patch)children.append(self.frame)return children", "docstring": "return a list of child artists", "id": "f17238:c1:m120"} {"signature": "def contains(self,mouseevent):", "body": "if callable(self._contains): return self._contains(self,mouseevent)return self.patch.contains(mouseevent)", "docstring": "Test whether the mouse event occured in the axes.\n\n Returns T/F, {}", "id": "f17238:c1:m121"} {"signature": "def pick(self, *args):", "body": "if len(args)>:raise DeprecationWarning('''')martist.Artist.pick(self,args[])", "docstring": "call signature::\n\n pick(mouseevent)\n\neach child artist will fire a pick event if mouseevent is over\nthe artist and the artist has picker set", "id": "f17238:c1:m122"} {"signature": "def __pick(self, x, y, trans=None, among=None):", "body": "if trans is not None:xywin = trans.transform_point((x,y))else:xywin = x,ydef dist_points(p1, p2):''x1, y1 = p1x2, y2 = p2return math.sqrt((x1-x2)**+(y1-y2)**)def dist_x_y(p1, x, y):''x1, y1 = p1return min(np.sqrt((x-x1)**+(y-y1)**))def dist(a):if isinstance(a, Text):bbox = a.get_window_extent()l,b,w,h = bbox.boundsverts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)xt, yt = list(zip(*verts))elif isinstance(a, Patch):path = a.get_path()tverts = a.get_transform().transform_path(path)xt, yt = list(zip(*tverts))elif isinstance(a, mlines.Line2D):xdata = a.get_xdata(orig=False)ydata = a.get_ydata(orig=False)xt, yt = a.get_transform().numerix_x_y(xdata, ydata)return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))artists = self.lines + self.patches + self.textsif callable(among):artists = list(filter(test, artists))elif iterable(among):amongd = dict([(k,) for k in among])artists = [a for a in artists if a in amongd]elif among is None:passelse:raise ValueError('')if not len(artists): return Noneds = [ (dist(a),a) for a in artists]ds.sort()return ds[][]", "docstring": "Return the artist under point that is closest to the *x*, *y*.\nIf *trans* is *None*, *x*, and *y* are in window coords,\n(0,0 = lower left). Otherwise, *trans* is a\n:class:`~matplotlib.transforms.Transform` that specifies the\ncoordinate system of *x*, *y*.\n\nThe selection of artists from amongst which the pick function\nfinds an artist can be narrowed using the optional keyword\nargument *among*. If provided, this should be either a sequence\nof permitted artists or a function taking an artist as its\nargument and returning a true value if and only if that artist\ncan be selected.\n\nNote this algorithm calculates distance to the vertices of the\npolygon, so if you want to pick a patch, click on the edge!", "id": "f17238:c1:m123"} {"signature": "def get_title(self):", "body": "return self.title.get_text()", "docstring": "Get the title text string.", "id": "f17238:c1:m124"} {"signature": "def set_title(self, label, fontdict=None, **kwargs):", "body": "default = {'':rcParams[''],'' : '','' : ''}self.title.set_text(label)self.title.update(default)if fontdict is not None: self.title.update(fontdict)self.title.update(kwargs)return self.title", "docstring": "call signature::\n\n set_title(label, fontdict=None, **kwargs):\n\nSet the title for the axes.\n\nkwargs are Text properties:\n%(Text)s\n\nACCEPTS: str\n\n.. seealso::\n :meth:`text`:\n for information on how override and the optional args work", "id": "f17238:c1:m125"} {"signature": "def get_xlabel(self):", "body": "label = self.xaxis.get_label()return label.get_text()", "docstring": "Get the xlabel text string.", "id": "f17238:c1:m126"} {"signature": "def set_xlabel(self, xlabel, fontdict=None, **kwargs):", "body": "label = self.xaxis.get_label()label.set_text(xlabel)if fontdict is not None: label.update(fontdict)label.update(kwargs)return label", "docstring": "call signature::\n\n set_xlabel(xlabel, fontdict=None, **kwargs)\n\nSet the label for the xaxis.\n\nValid kwargs are Text properties:\n%(Text)s\nACCEPTS: str\n\n.. seealso::\n :meth:`text`:\n for information on how override and the optional args work", "id": "f17238:c1:m127"} {"signature": "def get_ylabel(self):", "body": "label = self.yaxis.get_label()return label.get_text()", "docstring": "Get the ylabel text string.", "id": "f17238:c1:m128"} {"signature": "def set_ylabel(self, ylabel, fontdict=None, **kwargs):", "body": "label = self.yaxis.get_label()label.set_text(ylabel)if fontdict is not None: label.update(fontdict)label.update(kwargs)return label", "docstring": "call signature::\n\n set_ylabel(ylabel, fontdict=None, **kwargs)\n\nSet the label for the yaxis\n\nValid kwargs are Text properties:\n%(Text)s\nACCEPTS: str\n\n.. seealso::\n :meth:`text`:\n for information on how override and the optional args work", "id": "f17238:c1:m129"} {"signature": "def text(self, x, y, s, fontdict=None,withdash=False, **kwargs):", "body": "default = {'' : '','' : '','' : self.transData,}if withdash:t = mtext.TextWithDash(x=x, y=y, text=s,)else:t = mtext.Text(x=x, y=y, text=s,)self._set_artist_props(t)t.update(default)if fontdict is not None: t.update(fontdict)t.update(kwargs)self.texts.append(t)t._remove_method = lambda h: self.texts.remove(h)if '' in kwargs: t.set_clip_box(self.bbox)return t", "docstring": "call signature::\n\n text(x, y, s, fontdict=None, **kwargs)\n\n Add text in string *s* to axis at location *x*, *y*, data\n coordinates.\n\n Keyword arguments:\n\n *fontdict*:\n A dictionary to override the default text properties.\n If *fontdict* is *None*, the defaults are determined by your rc\n parameters.\n\n *withdash*: [ False | True ]\n Creates a :class:`~matplotlib.text.TextWithDash` instance\n instead of a :class:`~matplotlib.text.Text` instance.\n\n Individual keyword arguments can be used to override any given\n parameter::\n\n text(x, y, s, fontsize=12)\n\n The default transform specifies that text is in data coords,\n alternatively, you can specify text in axis coords (0,0 is\n lower-left and 1,1 is upper-right). The example below places\n text in the center of the axes::\n\n text(0.5, 0.5,'matplotlib',\n horizontalalignment='center',\n verticalalignment='center',\n transform = ax.transAxes)\n\nYou can put a rectangular box around the text instance (eg. to\nset a background color) by using the keyword *bbox*. *bbox* is\na dictionary of :class:`matplotlib.patches.Rectangle`\nproperties. For example::\n\n text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))\n\nValid kwargs are :class:`matplotlib.text.Text` properties:\n\n%(Text)s", "id": "f17238:c1:m130"} {"signature": "def annotate(self, *args, **kwargs):", "body": "a = mtext.Annotation(*args, **kwargs)a.set_transform(mtransforms.IdentityTransform())self._set_artist_props(a)if '' in kwargs: a.set_clip_path(self.patch)self.texts.append(a)return a", "docstring": "call signature::\n\n annotate(s, xy, xytext=None, xycoords='data',\n textcoords='data', arrowprops=None, **kwargs)\n\nKeyword arguments:\n\n%(Annotation)s\n\n.. plot:: mpl_examples/pylab_examples/annotation_demo2.py", "id": "f17238:c1:m131"} {"signature": "def axhline(self, y=, xmin=, xmax=, **kwargs):", "body": "ymin, ymax = self.get_ybound()yy = self.convert_yunits( y )scaley = (yyymax)trans = mtransforms.blended_transform_factory(self.transAxes, self.transData)l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)l.x_isdata = Falseself.add_line(l)self.autoscale_view(scalex=False, scaley=scaley)return l", "docstring": "call signature::\n\n axhline(y=0, xmin=0, xmax=1, **kwargs)\n\nAxis Horizontal Line\n\nDraw a horizontal line at *y* from *xmin* to *xmax*. With the\ndefault values of *xmin* = 0 and *xmax* = 1, this line will\nalways span the horizontal extent of the axes, regardless of\nthe xlim settings, even if you change them, eg. with the\n:meth:`set_xlim` command. That is, the horizontal extent is\nin axes coords: 0=left, 0.5=middle, 1.0=right but the *y*\nlocation is in data coordinates.\n\nReturn value is the :class:`~matplotlib.lines.Line2D`\ninstance. kwargs are the same as kwargs to plot, and can be\nused to control the line properties. Eg.,\n\n* draw a thick red hline at *y* = 0 that spans the xrange\n\n >>> axhline(linewidth=4, color='r')\n\n* draw a default hline at *y* = 1 that spans the xrange\n\n >>> axhline(y=1)\n\n* draw a default hline at *y* = .5 that spans the the middle half of\n the xrange\n\n >>> axhline(y=.5, xmin=0.25, xmax=0.75)\n\nValid kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n.. seealso::\n :meth:`axhspan`:\n for example plot and source code", "id": "f17238:c1:m132"} {"signature": "def axvline(self, x=, ymin=, ymax=, **kwargs):", "body": "xmin, xmax = self.get_xbound()xx = self.convert_xunits( x )scalex = (xxxmax)trans = mtransforms.blended_transform_factory(self.transData, self.transAxes)l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)l.y_isdata = Falseself.add_line(l)self.autoscale_view(scalex=scalex, scaley=False)return l", "docstring": "call signature::\n\n axvline(x=0, ymin=0, ymax=1, **kwargs)\n\nAxis Vertical Line\n\nDraw a vertical line at *x* from *ymin* to *ymax*. With the\ndefault values of *ymin* = 0 and *ymax* = 1, this line will\nalways span the vertical extent of the axes, regardless of the\nxlim settings, even if you change them, eg. with the\n:meth:`set_xlim` command. That is, the vertical extent is in\naxes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location\nis in data coordinates.\n\nReturn value is the :class:`~matplotlib.lines.Line2D`\ninstance. kwargs are the same as kwargs to plot, and can be\nused to control the line properties. Eg.,\n\n* draw a thick red vline at *x* = 0 that spans the yrange\n\n >>> axvline(linewidth=4, color='r')\n\n* draw a default vline at *x* = 1 that spans the yrange\n\n >>> axvline(x=1)\n\n* draw a default vline at *x* = .5 that spans the the middle half of\n the yrange\n\n >>> axvline(x=.5, ymin=0.25, ymax=0.75)\n\nValid kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n.. seealso::\n :meth:`axhspan`:\n for example plot and source code", "id": "f17238:c1:m133"} {"signature": "def axhspan(self, ymin, ymax, xmin=, xmax=, **kwargs):", "body": "trans = mtransforms.blended_transform_factory(self.transAxes, self.transData)self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )xmin, xmax = self.convert_xunits( [xmin, xmax] )ymin, ymax = self.convert_yunits( [ymin, ymax] )verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)p = mpatches.Polygon(verts, **kwargs)p.set_transform(trans)p.x_isdata = Falseself.add_patch(p)return p", "docstring": "call signature::\n\n axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)\n\nAxis Horizontal Span.\n\n*y* coords are in data units and *x* coords are in axes (relative\n0-1) units.\n\nDraw a horizontal span (rectangle) from *ymin* to *ymax*.\nWith the default values of *xmin* = 0 and *xmax* = 1, this\nalways spans the xrange, regardless of the xlim settings, even\nif you change them, eg. with the :meth:`set_xlim` command.\nThat is, the horizontal extent is in axes coords: 0=left,\n0.5=middle, 1.0=right but the *y* location is in data\ncoordinates.\n\nReturn value is a :class:`matplotlib.patches.Polygon`\ninstance.\n\nExamples:\n\n* draw a gray rectangle from *y* = 0.25-0.75 that spans the\n horizontal extent of the axes\n\n >>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)\n\nValid kwargs are :class:`~matplotlib.patches.Polygon` properties:\n\n%(Polygon)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/axhspan_demo.py", "id": "f17238:c1:m134"} {"signature": "def axvspan(self, xmin, xmax, ymin=, ymax=, **kwargs):", "body": "trans = mtransforms.blended_transform_factory(self.transData, self.transAxes)self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )xmin, xmax = self.convert_xunits( [xmin, xmax] )ymin, ymax = self.convert_yunits( [ymin, ymax] )verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]p = mpatches.Polygon(verts, **kwargs)p.set_transform(trans)p.y_isdata = Falseself.add_patch(p)return p", "docstring": "call signature::\n\n axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)\n\nAxis Vertical Span.\n\n*x* coords are in data units and *y* coords are in axes (relative\n0-1) units.\n\nDraw a vertical span (rectangle) from *xmin* to *xmax*. With\nthe default values of *ymin* = 0 and *ymax* = 1, this always\nspans the yrange, regardless of the ylim settings, even if you\nchange them, eg. with the :meth:`set_ylim` command. That is,\nthe vertical extent is in axes coords: 0=bottom, 0.5=middle,\n1.0=top but the *y* location is in data coordinates.\n\nReturn value is the :class:`matplotlib.patches.Polygon`\ninstance.\n\nExamples:\n\n* draw a vertical green translucent rectangle from x=1.25 to 1.55 that\n spans the yrange of the axes\n\n >>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)\n\nValid kwargs are :class:`~matplotlib.patches.Polygon`\nproperties:\n\n%(Polygon)s\n\n.. seealso::\n :meth:`axhspan`:\n for example plot and source code", "id": "f17238:c1:m135"} {"signature": "def hlines(self, y, xmin, xmax, colors='', linestyles='',label='', **kwargs):", "body": "if kwargs.get('') is not None:raise DeprecationWarning('''''')y = self.convert_yunits( y )xmin = self.convert_xunits( xmin )xmax = self.convert_xunits( xmax )if not iterable(y): y = [y]if not iterable(xmin): xmin = [xmin]if not iterable(xmax): xmax = [xmax]y = np.asarray(y)xmin = np.asarray(xmin)xmax = np.asarray(xmax)if len(xmin)==:xmin = np.resize( xmin, y.shape )if len(xmax)==:xmax = np.resize( xmax, y.shape )if len(xmin)!=len(y):raise ValueError('')if len(xmax)!=len(y):raise ValueError('')verts = [ ((thisxmin, thisy), (thisxmax, thisy))for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]coll = mcoll.LineCollection(verts, colors=colors,linestyles=linestyles, label=label)self.add_collection(coll)coll.update(kwargs)minx = min(xmin.min(), xmax.min())maxx = max(xmin.max(), xmax.max())miny = y.min()maxy = y.max()corners = (minx, miny), (maxx, maxy)self.update_datalim(corners)self.autoscale_view()return coll", "docstring": "call signature::\n\n hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)\n\nPlot horizontal lines at each *y* from *xmin* to *xmax*.\n\nReturns the :class:`~matplotlib.collections.LineCollection`\nthat was added.\n\nRequired arguments:\n\n *y*:\n a 1-D numpy array or iterable.\n\n *xmin* and *xmax*:\n can be scalars or ``len(x)`` numpy arrays. If they are\n scalars, then the respective values are constant, else the\n widths of the lines are determined by *xmin* and *xmax*.\n\nOptional keyword arguments:\n\n *colors*:\n a line collections color argument, either a single color\n or a ``len(y)`` list of colors\n\n *linestyles*:\n [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/hline_demo.py", "id": "f17238:c1:m136"} {"signature": "def vlines(self, x, ymin, ymax, colors='', linestyles='',label='', **kwargs):", "body": "if kwargs.get('') is not None:raise DeprecationWarning('''''')self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)x = self.convert_xunits( x )ymin = self.convert_yunits( ymin )ymax = self.convert_yunits( ymax )if not iterable(x): x = [x]if not iterable(ymin): ymin = [ymin]if not iterable(ymax): ymax = [ymax]x = np.asarray(x)ymin = np.asarray(ymin)ymax = np.asarray(ymax)if len(ymin)==:ymin = np.resize( ymin, x.shape )if len(ymax)==:ymax = np.resize( ymax, x.shape )if len(ymin)!=len(x):raise ValueError('')if len(ymax)!=len(x):raise ValueError('')Y = np.array([ymin, ymax]).Tverts = [ ((thisx, thisymin), (thisx, thisymax))for thisx, (thisymin, thisymax) in zip(x,Y)]coll = mcoll.LineCollection(verts, colors=colors,linestyles=linestyles, label=label)self.add_collection(coll)coll.update(kwargs)minx = min( x )maxx = max( x )miny = min( min(ymin), min(ymax) )maxy = max( max(ymin), max(ymax) )corners = (minx, miny), (maxx, maxy)self.update_datalim(corners)self.autoscale_view()return coll", "docstring": "call signature::\n\n vlines(x, ymin, ymax, color='k', linestyles='solid')\n\nPlot vertical lines at each *x* from *ymin* to *ymax*. *ymin*\nor *ymax* can be scalars or len(*x*) numpy arrays. If they are\nscalars, then the respective values are constant, else the\nheights of the lines are determined by *ymin* and *ymax*.\n\n*colors*\n a line collections color args, either a single color\n or a len(*x*) list of colors\n\n*linestyles*\n\n one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n\nReturns the :class:`matplotlib.collections.LineCollection`\nthat was added.\n\nkwargs are :class:`~matplotlib.collections.LineCollection` properties:\n\n%(LineCollection)s", "id": "f17238:c1:m137"} {"signature": "def plot(self, *args, **kwargs):", "body": "scalex = kwargs.pop( '', True)scaley = kwargs.pop( '', True)if not self._hold: self.cla()lines = []for line in self._get_lines(*args, **kwargs):self.add_line(line)lines.append(line)self.autoscale_view(scalex=scalex, scaley=scaley)return lines", "docstring": "Plot lines and/or markers to the\n:class:`~matplotlib.axes.Axes`. *args* is a variable length\nargument, allowing for multiple *x*, *y* pairs with an\noptional format string. For example, each of the following is\nlegal::\n\n plot(x, y) # plot x and y using default line style and color\n plot(x, y, 'bo') # plot x and y using blue circle markers\n plot(y) # plot y using x as index array 0..N-1\n plot(y, 'r+') # ditto, but with red plusses\n\nIf *x* and/or *y* is 2-dimensional, then the corresponding columns\nwill be plotted.\n\nAn arbitrary number of *x*, *y*, *fmt* groups can be\nspecified, as in::\n\n a.plot(x1, y1, 'g^', x2, y2, 'g-')\n\nReturn value is a list of lines that were added.\n\nThe following format string characters are accepted to control\nthe line style or marker:\n\n================ ===============================\ncharacter description\n================ ===============================\n'-' solid line style\n'--' dashed line style\n'-.' dash-dot line style\n':' dotted line style\n'.' point marker\n',' pixel marker\n'o' circle marker\n'v' triangle_down marker\n'^' triangle_up marker\n'<' triangle_left marker\n'>' triangle_right marker\n'1' tri_down marker\n'2' tri_up marker\n'3' tri_left marker\n'4' tri_right marker\n's' square marker\n'p' pentagon marker\n'*' star marker\n'h' hexagon1 marker\n'H' hexagon2 marker\n'+' plus marker\n'x' x marker\n'D' diamond marker\n'd' thin_diamond marker\n'|' vline marker\n'_' hline marker\n================ ===============================\n\n\nThe following color abbreviations are supported:\n\n========== ========\ncharacter color\n========== ========\n'b' blue\n'g' green\n'r' red\n'c' cyan\n'm' magenta\n'y' yellow\n'k' black\n'w' white\n========== ========\n\nIn addition, you can specify colors in many weird and\nwonderful ways, including full names (``'green'``), hex\nstrings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or\ngrayscale intensities as a string (``'0.8'``). Of these, the\nstring specifications can be used in place of a ``fmt`` group,\nbut the tuple forms can be used only as ``kwargs``.\n\nLine styles and colors are combined in a single format string, as in\n``'bo'`` for blue circles.\n\nThe *kwargs* can be used to set line properties (any property that has\na ``set_*`` method). You can use this to set a line label (for auto\nlegends), linewidth, anitialising, marker face color, etc. Here is an\nexample::\n\n plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)\n plot([1,2,3], [1,4,9], 'rs', label='line 2')\n axis([0, 4, 0, 10])\n legend()\n\nIf you make multiple lines with one plot command, the kwargs\napply to all those lines, e.g.::\n\n plot(x1, y1, x2, y2, antialised=False)\n\nNeither line will be antialiased.\n\nYou do not need to use format strings, which are just\nabbreviations. All of the line properties can be controlled\nby keyword arguments. For example, you can set the color,\nmarker, linestyle, and markercolor with::\n\n plot(x, y, color='green', linestyle='dashed', marker='o',\n markerfacecolor='blue', markersize=12). See\n :class:`~matplotlib.lines.Line2D` for details.\n\nThe kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\nkwargs *scalex* and *scaley*, if defined, are passed on to\n:meth:`~matplotlib.axes.Axes.autoscale_view` to determine\nwhether the *x* and *y* axes are autoscaled; the default is\n*True*.", "id": "f17238:c1:m138"} {"signature": "def plot_date(self, x, y, fmt='', tz=None, xdate=True, ydate=False,**kwargs):", "body": "if not self._hold: self.cla()ret = self.plot(x, y, fmt, **kwargs)if xdate:self.xaxis_date(tz)if ydate:self.yaxis_date(tz)self.autoscale_view()return ret", "docstring": "call signature::\n\n plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)\n\nSimilar to the :func:`~matplotlib.pyplot.plot` command, except\nthe *x* or *y* (or both) data is considered to be dates, and the\naxis is labeled accordingly.\n\n*x* and/or *y* can be a sequence of dates represented as float\ndays since 0001-01-01 UTC.\n\nKeyword arguments:\n\n *fmt*: string\n The plot format string.\n\n *tz*: [ None | timezone string ]\n The time zone to use in labeling dates. If *None*, defaults to rc\n value.\n\n *xdate*: [ True | False ]\n If *True*, the *x*-axis will be labeled with dates.\n\n *ydate*: [ False | True ]\n If *True*, the *y*-axis will be labeled with dates.\n\nNote if you are using custom date tickers and formatters, it\nmay be necessary to set the formatters/locators after the call\nto :meth:`plot_date` since :meth:`plot_date` will set the\ndefault tick locator to\n:class:`matplotlib.ticker.AutoDateLocator` (if the tick\nlocator is not already set to a\n:class:`matplotlib.ticker.DateLocator` instance) and the\ndefault tick formatter to\n:class:`matplotlib.ticker.AutoDateFormatter` (if the tick\nformatter is not already set to a\n:class:`matplotlib.ticker.DateFormatter` instance).\n\nValid kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n.. seealso::\n :mod:`~matplotlib.dates`:\n for helper functions\n\n :func:`~matplotlib.dates.date2num`,\n :func:`~matplotlib.dates.num2date` and\n :func:`~matplotlib.dates.drange`:\n for help on creating the required floating point\n dates.", "id": "f17238:c1:m139"} {"signature": "def loglog(self, *args, **kwargs):", "body": "if not self._hold: self.cla()dx = {'': kwargs.pop('', ),'': kwargs.pop('', None),}dy = {'': kwargs.pop('', ),'': kwargs.pop('', None),}self.set_xscale('', **dx)self.set_yscale('', **dy)b = self._holdself._hold = True l = self.plot(*args, **kwargs)self._hold = b return l", "docstring": "call signature::\n\n loglog(*args, **kwargs)\n\nMake a plot with log scaling on the *x* and *y* axis.\n\n:func:`~matplotlib.pyplot.loglog` supports all the keyword\narguments of :func:`~matplotlib.pyplot.plot` and\n:meth:`matplotlib.axes.Axes.set_xscale` /\n:meth:`matplotlib.axes.Axes.set_yscale`.\n\nNotable keyword arguments:\n\n *basex*/*basey*: scalar > 1\n base of the *x*/*y* logarithm\n\n *subsx*/*subsy*: [ None | sequence ]\n the location of the minor *x*/*y* ticks; *None* defaults\n to autosubs, which depend on the number of decades in the\n plot; see :meth:`matplotlib.axes.Axes.set_xscale` /\n :meth:`matplotlib.axes.Axes.set_yscale` for details\n\nThe remaining valid kwargs are\n:class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/log_demo.py", "id": "f17238:c1:m140"} {"signature": "def semilogx(self, *args, **kwargs):", "body": "if not self._hold: self.cla()d = {'': kwargs.pop( '', ),'': kwargs.pop( '', None),}self.set_xscale('', **d)b = self._holdself._hold = True l = self.plot(*args, **kwargs)self._hold = b return l", "docstring": "call signature::\n\n semilogx(*args, **kwargs)\n\nMake a plot with log scaling on the *x* axis.\n\n:func:`semilogx` supports all the keyword arguments of\n:func:`~matplotlib.pyplot.plot` and\n:meth:`matplotlib.axes.Axes.set_xscale`.\n\nNotable keyword arguments:\n\n *basex*: scalar > 1\n base of the *x* logarithm\n\n *subsx*: [ None | sequence ]\n The location of the minor xticks; *None* defaults to\n autosubs, which depend on the number of decades in the\n plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for\n details.\n\nThe remaining valid kwargs are\n:class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n.. seealso::\n :meth:`loglog`:\n For example code and figure", "id": "f17238:c1:m141"} {"signature": "def semilogy(self, *args, **kwargs):", "body": "if not self._hold: self.cla()d = {'': kwargs.pop('', ),'': kwargs.pop('', None),}self.set_yscale('', **d)b = self._holdself._hold = True l = self.plot(*args, **kwargs)self._hold = b return l", "docstring": "call signature::\n\n semilogy(*args, **kwargs)\n\nMake a plot with log scaling on the *y* axis.\n\n:func:`semilogy` supports all the keyword arguments of\n:func:`~matplotlib.pylab.plot` and\n:meth:`matplotlib.axes.Axes.set_yscale`.\n\nNotable keyword arguments:\n\n *basey*: scalar > 1\n Base of the *y* logarithm\n\n *subsy*: [ None | sequence ]\n The location of the minor yticks; *None* defaults to\n autosubs, which depend on the number of decades in the\n plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for\n details.\n\nThe remaining valid kwargs are\n:class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n.. seealso::\n :meth:`loglog`:\n For example code and figure", "id": "f17238:c1:m142"} {"signature": "def acorr(self, x, **kwargs):", "body": "return self.xcorr(x, x, **kwargs)", "docstring": "call signature::\n\n acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,\n maxlags=None, **kwargs)\n\nPlot the autocorrelation of *x*. If *normed* = *True*,\nnormalize the data by the autocorrelation at 0-th lag. *x* is\ndetrended by the *detrend* callable (default no normalization).\n\nData are plotted as ``plot(lags, c, **kwargs)``\n\nReturn value is a tuple (*lags*, *c*, *line*) where:\n\n - *lags* are a length 2*maxlags+1 lag vector\n\n - *c* is the 2*maxlags+1 auto correlation vector\n\n - *line* is a :class:`~matplotlib.lines.Line2D` instance\n returned by :meth:`plot`\n\nThe default *linestyle* is None and the default *marker* is\n``'o'``, though these can be overridden with keyword args.\nThe cross correlation is performed with\n:func:`numpy.correlate` with *mode* = 2.\n\nIf *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`\nrather than :meth:`~matplotlib.axes.Axes.plot` is used to draw\nvertical lines from the origin to the acorr. Otherwise, the\nplot style is determined by the kwargs, which are\n:class:`~matplotlib.lines.Line2D` properties.\n\n*maxlags* is a positive integer detailing the number of lags\nto show. The default value of *None* will return all\n:math:`2 \\mathrm{len}(x) - 1` lags.\n\nThe return value is a tuple (*lags*, *c*, *linecol*, *b*)\nwhere\n\n- *linecol* is the\n :class:`~matplotlib.collections.LineCollection`\n\n- *b* is the *x*-axis.\n\n.. seealso::\n :meth:`~matplotlib.axes.Axes.plot` or\n :meth:`~matplotlib.axes.Axes.vlines`: For documentation on\n valid kwargs.\n\n**Example:**\n\n:func:`~matplotlib.pyplot.xcorr` above, and\n:func:`~matplotlib.pyplot.acorr` below.\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/xcorr_demo.py", "id": "f17238:c1:m143"} {"signature": "def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,usevlines=False, maxlags=None, **kwargs):", "body": "Nx = len(x)if Nx!=len(y):raise ValueError('')x = detrend(np.asarray(x))y = detrend(np.asarray(y))c = np.correlate(x, y, mode=)if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))if maxlags is None: maxlags = Nx - if maxlags >= Nx or maxlags < :raise ValueError(''''%Nx)lags = np.arange(-maxlags,maxlags+)c = c[Nx--maxlags:Nx+maxlags]if usevlines:a = self.vlines(lags, [], c, **kwargs)b = self.axhline(**kwargs)else:kwargs.setdefault('', '')kwargs.setdefault('', '')a, = self.plot(lags, c, **kwargs)b = Nonereturn lags, c, a, b", "docstring": "call signature::\n\n xcorr(x, y, normed=False, detrend=mlab.detrend_none,\n usevlines=False, **kwargs):\n\nPlot the cross correlation between *x* and *y*. If *normed* =\n*True*, normalize the data by the cross correlation at 0-th\nlag. *x* and y are detrended by the *detrend* callable\n(default no normalization). *x* and *y* must be equal length.\n\nData are plotted as ``plot(lags, c, **kwargs)``\n\nReturn value is a tuple (*lags*, *c*, *line*) where:\n\n - *lags* are a length ``2*maxlags+1`` lag vector\n\n - *c* is the ``2*maxlags+1`` auto correlation vector\n\n - *line* is a :class:`~matplotlib.lines.Line2D` instance\n returned by :func:`~matplotlib.pyplot.plot`.\n\nThe default *linestyle* is *None* and the default *marker* is\n'o', though these can be overridden with keyword args. The\ncross correlation is performed with :func:`numpy.correlate`\nwith *mode* = 2.\n\nIf *usevlines* is *True*:\n\n :func:`~matplotlib.pyplot.vlines`\n rather than :func:`~matplotlib.pyplot.plot` is used to draw\n vertical lines from the origin to the xcorr. Otherwise the\n plotstyle is determined by the kwargs, which are\n :class:`~matplotlib.lines.Line2D` properties.\n\n The return value is a tuple (*lags*, *c*, *linecol*, *b*)\n where *linecol* is the\n :class:`matplotlib.collections.LineCollection` instance and\n *b* is the *x*-axis.\n\n*maxlags* is a positive integer detailing the number of lags to show.\nThe default value of *None* will return all ``(2*len(x)-1)`` lags.\n\n**Example:**\n\n:func:`~matplotlib.pyplot.xcorr` above, and\n:func:`~matplotlib.pyplot.acorr` below.\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/xcorr_demo.py", "id": "f17238:c1:m144"} {"signature": "def legend(self, *args, **kwargs):", "body": "def get_handles():handles = self.lines[:]handles.extend(self.patches)handles.extend([c for c in self.collectionsif isinstance(c, mcoll.LineCollection)])handles.extend([c for c in self.collectionsif isinstance(c, mcoll.RegularPolyCollection)])return handlesif len(args)==:handles = []labels = []for handle in get_handles():label = handle.get_label()if (label is not None andlabel != '' and not label.startswith('')):handles.append(handle)labels.append(label)if len(handles) == :warnings.warn(\"\"\"\")return Noneelif len(args)==:labels = args[]handles = [h for h, label in zip(get_handles(), labels)]elif len(args)==:if is_string_like(args[]) or isinstance(args[], int):labels, loc = argshandles = [h for h, label in zip(get_handles(), labels)]kwargs[''] = locelse:handles, labels = argselif len(args)==:handles, labels, loc = argskwargs[''] = locelse:raise TypeError('')handles = cbook.flatten(handles)self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)return self.legend_", "docstring": "call signature::\n\n legend(*args, **kwargs)\n\nPlace a legend on the current axes at location *loc*. Labels are a\nsequence of strings and *loc* can be a string or an integer specifying\nthe legend location.\n\nTo make a legend with existing lines::\n\n legend()\n\n:meth:`legend` by itself will try and build a legend using the label\nproperty of the lines/patches/collections. You can set the label of\na line by doing::\n\n plot(x, y, label='my data')\n\nor::\n\n line.set_label('my data').\n\nIf label is set to '_nolegend_', the item will not be shown in\nlegend.\n\nTo automatically generate the legend from labels::\n\n legend( ('label1', 'label2', 'label3') )\n\nTo make a legend for a list of lines and labels::\n\n legend( (line1, line2, line3), ('label1', 'label2', 'label3') )\n\nTo make a legend at a given location, using a location argument::\n\n legend( ('label1', 'label2', 'label3'), loc='upper left')\n\nor::\n\n legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)\n\nThe location codes are\n\n =============== =============\n Location String Location Code\n =============== =============\n 'best' 0\n 'upper right' 1\n 'upper left' 2\n 'lower left' 3\n 'lower right' 4\n 'right' 5\n 'center left' 6\n 'center right' 7\n 'lower center' 8\n 'upper center' 9\n 'center' 10\n =============== =============\n\nIf none of these are locations are suitable, loc can be a 2-tuple\ngiving x,y in axes coords, ie::\n\n loc = 0, 1 # left top\n loc = 0.5, 0.5 # center\n\nKeyword arguments:\n\n *isaxes*: [ True | False ]\n Indicates that this is an axes legend\n\n *numpoints*: integer\n The number of points in the legend line, default is 4\n\n *prop*: [ None | FontProperties ]\n A :class:`matplotlib.font_manager.FontProperties`\n instance, or *None* to use rc settings.\n\n *pad*: [ None | scalar ]\n The fractional whitespace inside the legend border, between 0 and 1.\n If *None*, use rc settings.\n\n *markerscale*: [ None | scalar ]\n The relative size of legend markers vs. original. If *None*, use rc\n settings.\n\n *shadow*: [ None | False | True ]\n If *True*, draw a shadow behind legend. If *None*, use rc settings.\n\n *labelsep*: [ None | scalar ]\n The vertical space between the legend entries. If *None*, use rc\n settings.\n\n *handlelen*: [ None | scalar ]\n The length of the legend lines. If *None*, use rc settings.\n\n *handletextsep*: [ None | scalar ]\n The space between the legend line and legend text. If *None*, use rc\n settings.\n\n *axespad*: [ None | scalar ]\n The border between the axes and legend edge. If *None*, use rc\n settings.\n\n**Example:**\n\n.. plot:: mpl_examples/api/legend_demo.py", "id": "f17238:c1:m145"} {"signature": "def step(self, x, y, *args, **kwargs):", "body": "where = kwargs.pop('', '')if where not in ('', '', ''):raise ValueError(\"\"\"\")kwargs[''] = '' + wherereturn self.plot(x, y, *args, **kwargs)", "docstring": "call signature::\n\n step(x, y, *args, **kwargs)\n\nMake a step plot. Additional keyword args to :func:`step` are the same\nas those for :func:`~matplotlib.pyplot.plot`.\n\n*x* and *y* must be 1-D sequences, and it is assumed, but not checked,\nthat *x* is uniformly increasing.\n\nKeyword arguments:\n\n*where*: [ 'pre' | 'post' | 'mid' ]\n If 'pre', the interval from x[i] to x[i+1] has level y[i]\n\n If 'post', that interval has level y[i+1]\n\n If 'mid', the jumps in *y* occur half-way between the\n *x*-values.", "id": "f17238:c1:m146"} {"signature": "def bar(self, left, height, width=, bottom=None,color=None, edgecolor=None, linewidth=None,yerr=None, xerr=None, ecolor=None, capsize=,align='', orientation='', log=False,**kwargs):", "body": "if not self._hold: self.cla()label = kwargs.pop('', '')def make_iterable(x):if not iterable(x):return [x]else:return x_left = leftleft = make_iterable(left)height = make_iterable(height)width = make_iterable(width)_bottom = bottombottom = make_iterable(bottom)linewidth = make_iterable(linewidth)adjust_ylim = Falseadjust_xlim = Falseif orientation == '':self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)if log:self.set_yscale('')if _bottom is None:if self.get_yscale() == '':bottom = []adjust_ylim = Trueelse:bottom = []nbars = len(left)if len(width) == :width *= nbarsif len(bottom) == :bottom *= nbarselif orientation == '':self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)if log:self.set_xscale('')if _left is None:if self.get_xscale() == '':left = []adjust_xlim = Trueelse:left = []nbars = len(bottom)if len(left) == :left *= nbarsif len(height) == :height *= nbarselse:raise ValueError('' % orientation)if len(linewidth) < nbars:linewidth *= nbarsif color is None:color = [None] * nbarselse:color = list(mcolors.colorConverter.to_rgba_array(color))if len(color) < nbars:color *= nbarsif edgecolor is None:edgecolor = [None] * nbarselse:edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))if len(edgecolor) < nbars:edgecolor *= nbarsif yerr is not None:if not iterable(yerr):yerr = [yerr]*nbarsif xerr is not None:if not iterable(xerr):xerr = [xerr]*nbarsassert len(left)==nbars, \"\" % nbarsassert len(height)==nbars, (\"\" %nbars)assert len(width)==nbars, (\"\" %nbars)assert len(bottom)==nbars, (\"\" %nbars)if yerr is not None and len(yerr)!=nbars:raise ValueError(\"\" % nbars)if xerr is not None and len(xerr)!=nbars:raise ValueError(\"\" % nbars)patches = []if self.xaxis is not None:xconv = self.xaxis.converterif xconv is not None:units = self.xaxis.get_units()left = xconv.convert( left, units )width = xconv.convert( width, units )if self.yaxis is not None:yconv = self.yaxis.converterif yconv is not None :units = self.yaxis.get_units()bottom = yconv.convert( bottom, units )height = yconv.convert( height, units )if align == '':passelif align == '':if orientation == '':left = [left[i] - width[i]/ for i in range(len(left))]elif orientation == '':bottom = [bottom[i] - height[i]/ for i in range(len(bottom))]else:raise ValueError('' % align)args = list(zip(left, bottom, width, height, color, edgecolor, linewidth))for l, b, w, h, c, e, lw in args:if h<:b += hh = abs(h)if w<:l += ww = abs(w)r = mpatches.Rectangle(xy=(l, b), width=w, height=h,facecolor=c,edgecolor=e,linewidth=lw,label=label)label = ''r.update(kwargs)self.add_patch(r)patches.append(r)holdstate = self._holdself.hold(True) if xerr is not None or yerr is not None:if orientation == '':x = [l+*w for l, w in zip(left, width)]y = [b+h for b,h in zip(bottom, height)]elif orientation == '':x = [l+w for l,w in zip(left, width)]y = [b+*h for b,h in zip(bottom, height)]self.errorbar(x, y,yerr=yerr, xerr=xerr,fmt=None, ecolor=ecolor, capsize=capsize)self.hold(holdstate) if adjust_xlim:xmin, xmax = self.dataLim.intervalxxmin = np.amin(width[width!=]) if xerr is not None:xmin = xmin - np.amax(xerr)xmin = max(xmin*, )self.dataLim.intervalx = (xmin, xmax)if adjust_ylim:ymin, ymax = self.dataLim.intervalyymin = np.amin(height[height!=]) if yerr is not None:ymin = ymin - np.amax(yerr)ymin = max(ymin*, )self.dataLim.intervaly = (ymin, ymax)self.autoscale_view()return patches", "docstring": "call signature::\n\n bar(left, height, width=0.8, bottom=0,\n color=None, edgecolor=None, linewidth=None,\n yerr=None, xerr=None, ecolor=None, capsize=3,\n align='edge', orientation='vertical', log=False)\n\nMake a bar plot with rectangles bounded by:\n\n *left*, *left* + *width*, *bottom*, *bottom* + *height*\n (left, right, bottom and top edges)\n\n*left*, *height*, *width*, and *bottom* can be either scalars\nor sequences\n\nReturn value is a list of\n:class:`matplotlib.patches.Rectangle` instances.\n\nRequired arguments:\n\n ======== ===============================================\n Argument Description\n ======== ===============================================\n *left* the x coordinates of the left sides of the bars\n *height* the heights of the bars\n ======== ===============================================\n\nOptional keyword arguments:\n\n =============== ==========================================\n Keyword Description\n =============== ==========================================\n *width* the widths of the bars\n *bottom* the y coordinates of the bottom edges of\n the bars\n *color* the colors of the bars\n *edgecolor* the colors of the bar edges\n *linewidth* width of bar edges; None means use default\n linewidth; 0 means don't draw edges.\n *xerr* if not None, will be used to generate\n errorbars on the bar chart\n *yerr* if not None, will be used to generate\n errorbars on the bar chart\n *ecolor* specifies the color of any errorbar\n *capsize* (default 3) determines the length in\n points of the error bar caps\n *align* 'edge' (default) | 'center'\n *orientation* 'vertical' | 'horizontal'\n *log* [False|True] False (default) leaves the\n orientation axis as-is; True sets it to\n log scale\n =============== ==========================================\n\nFor vertical bars, *align* = 'edge' aligns bars by their left\nedges in left, while *align* = 'center' interprets these\nvalues as the *x* coordinates of the bar centers. For\nhorizontal bars, *align* = 'edge' aligns bars by their bottom\nedges in bottom, while *align* = 'center' interprets these\nvalues as the *y* coordinates of the bar centers.\n\nThe optional arguments *color*, *edgecolor*, *linewidth*,\n*xerr*, and *yerr* can be either scalars or sequences of\nlength equal to the number of bars. This enables you to use\nbar as the basis for stacked bar charts, or candlestick plots.\n\nOther optional kwargs:\n\n%(Rectangle)s\n\n**Example:** A stacked bar chart.\n\n.. plot:: mpl_examples/pylab_examples/bar_stacked.py", "id": "f17238:c1:m147"} {"signature": "def barh(self, bottom, width, height=, left=None, **kwargs):", "body": "patches = self.bar(left=left, height=height, width=width, bottom=bottom,orientation='', **kwargs)return patches", "docstring": "call signature::\n\n barh(bottom, width, height=0.8, left=0, **kwargs)\n\nMake a horizontal bar plot with rectangles bounded by:\n\n *left*, *left* + *width*, *bottom*, *bottom* + *height*\n (left, right, bottom and top edges)\n\n*bottom*, *width*, *height*, and *left* can be either scalars\nor sequences\n\nReturn value is a list of\n:class:`matplotlib.patches.Rectangle` instances.\n\nRequired arguments:\n\n ======== ======================================================\n Argument Description\n ======== ======================================================\n *bottom* the vertical positions of the bottom edges of the bars\n *width* the lengths of the bars\n ======== ======================================================\n\nOptional keyword arguments:\n\n =============== ==========================================\n Keyword Description\n =============== ==========================================\n *height* the heights (thicknesses) of the bars\n *left* the x coordinates of the left edges of the\n bars\n *color* the colors of the bars\n *edgecolor* the colors of the bar edges\n *linewidth* width of bar edges; None means use default\n linewidth; 0 means don't draw edges.\n *xerr* if not None, will be used to generate\n errorbars on the bar chart\n *yerr* if not None, will be used to generate\n errorbars on the bar chart\n *ecolor* specifies the color of any errorbar\n *capsize* (default 3) determines the length in\n points of the error bar caps\n *align* 'edge' (default) | 'center'\n *log* [False|True] False (default) leaves the\n horizontal axis as-is; True sets it to log\n scale\n =============== ==========================================\n\nSetting *align* = 'edge' aligns bars by their bottom edges in\nbottom, while *align* = 'center' interprets these values as\nthe *y* coordinates of the bar centers.\n\nThe optional arguments *color*, *edgecolor*, *linewidth*,\n*xerr*, and *yerr* can be either scalars or sequences of\nlength equal to the number of bars. This enables you to use\nbarh as the basis for stacked bar charts, or candlestick\nplots.\n\nother optional kwargs:\n\n%(Rectangle)s", "id": "f17238:c1:m148"} {"signature": "def broken_barh(self, xranges, yrange, **kwargs):", "body": "col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)self.add_collection(col, autolim=True)self.autoscale_view()return col", "docstring": "call signature::\n\n broken_barh(self, xranges, yrange, **kwargs)\n\nA collection of horizontal bars spanning *yrange* with a sequence of\n*xranges*.\n\nRequired arguments:\n\n ========= ==============================\n Argument Description\n ========= ==============================\n *xranges* sequence of (*xmin*, *xwidth*)\n *yrange* sequence of (*ymin*, *ywidth*)\n ========= ==============================\n\nkwargs are\n:class:`matplotlib.collections.BrokenBarHCollection`\nproperties:\n\n%(BrokenBarHCollection)s\n\nthese can either be a single argument, ie::\n\n facecolors = 'black'\n\nor a sequence of arguments for the various bars, ie::\n\n facecolors = ('black', 'red', 'green')\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/broken_barh.py", "id": "f17238:c1:m149"} {"signature": "def stem(self, x, y, linefmt='', markerfmt='', basefmt=''):", "body": "remember_hold=self._holdif not self._hold: self.cla()self.hold(True)markerline, = self.plot(x, y, markerfmt)stemlines = []for thisx, thisy in zip(x, y):l, = self.plot([thisx,thisx], [, thisy], linefmt)stemlines.append(l)baseline, = self.plot([np.amin(x), np.amax(x)], [,], basefmt)self.hold(remember_hold)return markerline, stemlines, baseline", "docstring": "call signature::\n\n stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')\n\nA stem plot plots vertical lines (using *linefmt*) at each *x*\nlocation from the baseline to *y*, and places a marker there\nusing *markerfmt*. A horizontal line at 0 is is plotted using\n*basefmt*.\n\nReturn value is a tuple (*markerline*, *stemlines*,\n*baseline*).\n\n.. seealso::\n `this document`__ for details\n\n :file:`examples/pylab_examples/stem_plot.py`:\n for a demo\n\n__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html", "id": "f17238:c1:m150"} {"signature": "def pie(self, x, explode=None, labels=None, colors=None,autopct=None, pctdistance=, shadow=False,labeldistance=):", "body": "self.set_frame_on(False)x = np.asarray(x).astype(np.float32)sx = float(x.sum())if sx>: x = np.divide(x,sx)if labels is None: labels = ['']*len(x)if explode is None: explode = []*len(x)assert(len(x)==len(labels))assert(len(x)==len(explode))if colors is None: colors = ('', '', '', '', '', '', '', '')center = ,radius = theta1 = i = texts = []slices = []autotexts = []for frac, label, expl in cbook.safezip(x,labels, explode):x, y = centertheta2 = theta1 + fracthetam = *math.pi**(theta1+theta2)x += expl*math.cos(thetam)y += expl*math.sin(thetam)w = mpatches.Wedge((x,y), radius, *theta1, *theta2,facecolor=colors[i%len(colors)])slices.append(w)self.add_patch(w)w.set_label(label)if shadow:shad = mpatches.Shadow(w, -, -,)shad.set_zorder(*w.get_zorder())self.add_patch(shad)xt = x + labeldistance*radius*math.cos(thetam)yt = y + labeldistance*radius*math.sin(thetam)label_alignment = xt > and '' or ''t = self.text(xt, yt, label,size=rcParams[''],horizontalalignment=label_alignment,verticalalignment='')texts.append(t)if autopct is not None:xt = x + pctdistance*radius*math.cos(thetam)yt = y + pctdistance*radius*math.sin(thetam)if is_string_like(autopct):s = autopct%(*frac)elif callable(autopct):s = autopct(*frac)else:raise TypeError('')t = self.text(xt, yt, s,horizontalalignment='',verticalalignment='')autotexts.append(t)theta1 = theta2i += self.set_xlim((-, ))self.set_ylim((-, ))self.set_xticks([])self.set_yticks([])if autopct is None: return slices, textselse: return slices, texts, autotexts", "docstring": "r\"\"\"\n call signature::\n\n pie(x, explode=None, labels=None,\n colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),\n autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)\n\n Make a pie chart of array *x*. The fractional area of each\n wedge is given by x/sum(x). If sum(x) <= 1, then the values\n of x give the fractional area directly and the array will not\n be normalized.\n\n Keyword arguments:\n\n *explode*: [ None | len(x) sequence ]\n If not *None*, is a len(*x*) array which specifies the\n fraction of the radius with which to offset each wedge.\n\n *colors*: [ None | color sequence ]\n A sequence of matplotlib color args through which the pie chart\n will cycle.\n\n *labels*: [ None | len(x) sequence of strings ]\n A sequence of strings providing the labels for each wedge\n\n *autopct*: [ None | format string | format function ]\n If not *None*, is a string or function used to label the\n wedges with their numeric value. The label will be placed inside\n the wedge. If it is a format string, the label will be ``fmt%pct``.\n If it is a function, it will be called.\n\n *pctdistance*: scalar\n The ratio between the center of each pie slice and the\n start of the text generated by *autopct*. Ignored if\n *autopct* is *None*; default is 0.6.\n\n *labeldistance*: scalar\n The radial distance at which the pie labels are drawn\n\n *shadow*: [ False | True ]\n Draw a shadow beneath the pie.\n\n The pie chart will probably look best if the figure and axes are\n square. Eg.::\n\n figure(figsize=(8,8))\n ax = axes([0.1, 0.1, 0.8, 0.8])\n\n Return value:\n If *autopct* is None, return the tuple (*patches*, *texts*):\n\n - *patches* is a sequence of\n :class:`matplotlib.patches.Wedge` instances\n\n - *texts* is a list of the label\n :class:`matplotlib.text.Text` instances.\n\n If *autopct* is not *None*, return the tuple (*patches*,\n *texts*, *autotexts*), where *patches* and *texts* are as\n above, and *autotexts* is a list of\n :class:`~matplotlib.text.Text` instances for the numeric\n labels.", "id": "f17238:c1:m151"} {"signature": "def errorbar(self, x, y, yerr=None, xerr=None,fmt='', ecolor=None, elinewidth=None, capsize=,barsabove=False, lolims=False, uplims=False,xlolims=False, xuplims=False, **kwargs):", "body": "self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)if not self._hold: self.cla()if not iterable(x):x = [x]if not iterable(y):y = [y]if xerr is not None:if not iterable(xerr):xerr = [xerr]*len(x)if yerr is not None:if not iterable(yerr):yerr = [yerr]*len(y)l0 = Noneif barsabove and fmt is not None:l0, = self.plot(x,y,fmt,**kwargs)barcols = []caplines = []lines_kw = {'':''}if elinewidth:lines_kw[''] = elinewidthelse:if '' in kwargs:lines_kw['']=kwargs['']if '' in kwargs:lines_kw['']=kwargs['']if '' in kwargs:lines_kw[''] = kwargs['']if not iterable(lolims):lolims = np.asarray([lolims]*len(x), bool)else: lolims = np.asarray(lolims, bool)if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)else: uplims = np.asarray(uplims, bool)if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)else: xlolims = np.asarray(xlolims, bool)if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)else: xuplims = np.asarray(xuplims, bool)def xywhere(xs, ys, mask):\"\"\"\"\"\"assert len(xs)==len(ys)assert len(xs)==len(mask)xs = [thisx for thisx, b in zip(xs, mask) if b]ys = [thisy for thisy, b in zip(ys, mask) if b]return xs, ysif capsize > :plot_kw = {'':*capsize,'':''}if '' in kwargs:plot_kw['']=kwargs['']if '' in kwargs:plot_kw['']=kwargs['']if '' in kwargs:plot_kw[''] = kwargs['']if xerr is not None:if (iterable(xerr) and len(xerr)== anditerable(xerr[]) and iterable(xerr[])):left = [thisx-thiserr for (thisx, thiserr)in cbook.safezip(x,xerr[])]right = [thisx+thiserr for (thisx, thiserr)in cbook.safezip(x,xerr[])]else:left = [thisx-thiserr for (thisx, thiserr)in cbook.safezip(x,xerr)]right = [thisx+thiserr for (thisx, thiserr)in cbook.safezip(x,xerr)]barcols.append( self.hlines(y, left, right, **lines_kw ) )if capsize > :if xlolims.any():leftlo, ylo = xywhere(left, y, xlolims)caplines.extend(self.plot(leftlo, ylo, ls='',marker=mlines.CARETLEFT, **plot_kw) )xlolims = ~xlolimsleftlo, ylo = xywhere(left, y, xlolims)caplines.extend( self.plot(leftlo, ylo, '', **plot_kw) )else:caplines.extend( self.plot(left, y, '', **plot_kw) )if xuplims.any():rightup, yup = xywhere(right, y, xuplims)caplines.extend(self.plot(rightup, yup, ls='',marker=mlines.CARETRIGHT, **plot_kw) )xuplims = ~xuplimsrightup, yup = xywhere(right, y, xuplims)caplines.extend( self.plot(rightup, yup, '', **plot_kw) )else:caplines.extend( self.plot(right, y, '', **plot_kw) )if yerr is not None:if (iterable(yerr) and len(yerr)== anditerable(yerr[]) and iterable(yerr[])):lower = [thisy-thiserr for (thisy, thiserr)in cbook.safezip(y,yerr[])]upper = [thisy+thiserr for (thisy, thiserr)in cbook.safezip(y,yerr[])]else:lower = [thisy-thiserr for (thisy, thiserr)in cbook.safezip(y,yerr)]upper = [thisy+thiserr for (thisy, thiserr)in cbook.safezip(y,yerr)]barcols.append( self.vlines(x, lower, upper, **lines_kw) )if capsize > :if lolims.any():xlo, lowerlo = xywhere(x, lower, lolims)caplines.extend(self.plot(xlo, lowerlo, ls='',marker=mlines.CARETDOWN, **plot_kw) )lolims = ~lolimsxlo, lowerlo = xywhere(x, lower, lolims)caplines.extend( self.plot(xlo, lowerlo, '', **plot_kw) )else:caplines.extend( self.plot(x, lower, '', **plot_kw) )if uplims.any():xup, upperup = xywhere(x, upper, uplims)caplines.extend(self.plot(xup, upperup, ls='',marker=mlines.CARETUP, **plot_kw) )uplims = ~uplimsxup, upperup = xywhere(x, upper, uplims)caplines.extend( self.plot(xup, upperup, '', **plot_kw) )else:caplines.extend( self.plot(x, upper, '', **plot_kw) )if not barsabove and fmt is not None:l0, = self.plot(x,y,fmt,**kwargs)if ecolor is None:if l0 is None:ecolor = self._get_lines._get_next_cycle_color()else:ecolor = l0.get_color()for l in barcols:l.set_color(ecolor)for l in caplines:l.set_color(ecolor)self.autoscale_view()return (l0, caplines, barcols)", "docstring": "call signature::\n\n errorbar(x, y, yerr=None, xerr=None,\n fmt='-', ecolor=None, elinewidth=None, capsize=3,\n barsabove=False, lolims=False, uplims=False,\n xlolims=False, xuplims=False)\n\nPlot *x* versus *y* with error deltas in *yerr* and *xerr*.\nVertical errorbars are plotted if *yerr* is not *None*.\nHorizontal errorbars are plotted if *xerr* is not *None*.\n\n*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a\nsingle error bar at *x*, *y*.\n\nOptional keyword arguments:\n\n *xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]\n If a scalar number, len(N) array-like object, or an Nx1 array-like\n object, errorbars are drawn +/- value.\n\n If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and\n +column2\n\n *fmt*: '-'\n The plot format symbol for *y*. If *fmt* is *None*, just plot the\n errorbars with no line symbols. This can be useful for creating a\n bar plot with errorbars.\n\n *ecolor*: [ None | mpl color ]\n a matplotlib color arg which gives the color the errorbar lines; if\n *None*, use the marker color.\n\n *elinewidth*: scalar\n the linewidth of the errorbar lines. If *None*, use the linewidth.\n\n *capsize*: scalar\n the size of the error bar caps in points\n\n *barsabove*: [ True | False ]\n if *True*, will plot the errorbars above the plot\n symbols. Default is below.\n\n *lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]\n These arguments can be used to indicate that a value gives\n only upper/lower limits. In that case a caret symbol is\n used to indicate this. lims-arguments may be of the same\n type as *xerr* and *yerr*.\n\nAll other keyword arguments are passed on to the plot command for the\nmarkers, so you can add additional key=value pairs to control the\nerrorbar markers. For example, this code makes big red squares with\nthick green edges::\n\n x,y,yerr = rand(3,10)\n errorbar(x, y, yerr, marker='s',\n mfc='red', mec='green', ms=20, mew=4)\n\nwhere *mfc*, *mec*, *ms* and *mew* are aliases for the longer\nproperty names, *markerfacecolor*, *markeredgecolor*, *markersize*\nand *markeredgewith*.\n\nvalid kwargs for the marker properties are\n\n%(Line2D)s\n\nReturn value is a length 3 tuple. The first element is the\n:class:`~matplotlib.lines.Line2D` instance for the *y* symbol\nlines. The second element is a list of error bar cap lines,\nthe third element is a list of\n:class:`~matplotlib.collections.LineCollection` instances for\nthe horizontal and vertical error ranges.\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/errorbar_demo.py", "id": "f17238:c1:m152"} {"signature": "def boxplot(self, x, notch=, sym='', vert=, whis=,positions=None, widths=None):", "body": "if not self._hold: self.cla()holdStatus = self._holdwhiskers, caps, boxes, medians, fliers = [], [], [], [], []if hasattr(x, ''):if len(x.shape) == :if hasattr(x[], ''):x = list(x)else:x = [x,]elif len(x.shape) == :nr, nc = x.shapeif nr == :x = [x]elif nc == :x = [x.ravel()]else:x = [x[:,i] for i in range(nc)]else:raise ValueError(\"\")if not hasattr(x[], ''):x = [x]col = len(x)if positions is None:positions = list(range(, col + ))if widths is None:distance = max(positions) - min(positions)widths = min(*max(distance,), )if isinstance(widths, float) or isinstance(widths, int):widths = np.ones((col,), float) * widthsself.hold(True)for i,pos in enumerate(positions):d = np.ravel(x[i])row = len(d)q1, med, q3 = mlab.prctile(d,[,,])iq = q3 - q1hi_val = q3 + whis*iqwisk_hi = np.compress( d <= hi_val , d )if len(wisk_hi) == :wisk_hi = q3else:wisk_hi = max(wisk_hi)lo_val = q1 - whis*iqwisk_lo = np.compress( d >= lo_val, d )if len(wisk_lo) == :wisk_lo = q1else:wisk_lo = min(wisk_lo)flier_hi = []flier_lo = []flier_hi_x = []flier_lo_x = []if len(sym) != :flier_hi = np.compress( d > wisk_hi, d )flier_lo = np.compress( d < wisk_lo, d )flier_hi_x = np.ones(flier_hi.shape[]) * posflier_lo_x = np.ones(flier_lo.shape[]) * posbox_x_min = pos - widths[i] * box_x_max = pos + widths[i] * wisk_x = np.ones() * poscap_x_min = pos - widths[i] * cap_x_max = pos + widths[i] * cap_x = [cap_x_min, cap_x_max]med_y = [med, med]if notch == :box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]box_y = [q1, q1, q3, q3, q1 ]med_x = [box_x_min, box_x_max]else:notch_max = med + *iq/np.sqrt(row)notch_min = med - *iq/np.sqrt(row)if notch_max > q3:notch_max = q3if notch_min < q1:notch_min = q1box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,box_x_min ]box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,med, notch_min, q1]med_x = [cap_x_min, cap_x_max]med_y = [med, med]if vert:def doplot(*args):return self.plot(*args)else:def doplot(*args):shuffled = []for i in range(, len(args), ):shuffled.extend([args[i+], args[i], args[i+]])return self.plot(*shuffled)whiskers.extend(doplot(wisk_x, [q1, wisk_lo], '',wisk_x, [q3, wisk_hi], ''))caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], '',cap_x, [wisk_lo, wisk_lo], ''))boxes.extend(doplot(box_x, box_y, ''))medians.extend(doplot(med_x, med_y, ''))fliers.extend(doplot(flier_hi_x, flier_hi, sym,flier_lo_x, flier_lo, sym))if == vert:setticks, setlim = self.set_xticks, self.set_xlimelse:setticks, setlim = self.set_yticks, self.set_ylimnewlimits = min(positions)-, max(positions)+setlim(newlimits)setticks(positions)self.hold(holdStatus)return dict(whiskers=whiskers, caps=caps, boxes=boxes,medians=medians, fliers=fliers)", "docstring": "call signature::\n\n boxplot(x, notch=0, sym='+', vert=1, whis=1.5,\n positions=None, widths=None)\n\nMake a box and whisker plot for each column of *x* or each\nvector in sequence *x*. The box extends from the lower to\nupper quartile values of the data, with a line at the median.\nThe whiskers extend from the box to show the range of the\ndata. Flier points are those past the end of the whiskers.\n\n- *notch* = 0 (default) produces a rectangular box plot.\n- *notch* = 1 will produce a notched box plot\n\n*sym* (default 'b+') is the default symbol for flier points.\nEnter an empty string ('') if you don't want to show fliers.\n\n- *vert* = 1 (default) makes the boxes vertical.\n- *vert* = 0 makes horizontal boxes. This seems goofy, but\n that's how Matlab did it.\n\n*whis* (default 1.5) defines the length of the whiskers as\na function of the inner quartile range. They extend to the\nmost extreme data point within ( ``whis*(75%-25%)`` ) data range.\n\n*positions* (default 1,2,...,n) sets the horizontal positions of\nthe boxes. The ticks and limits are automatically set to match\nthe positions.\n\n*widths* is either a scalar or a vector and sets the width of\neach box. The default is 0.5, or ``0.15*(distance between extreme\npositions)`` if that is smaller.\n\n*x* is an array or a sequence of vectors.\n\nReturns a dictionary mapping each component of the boxplot\nto a list of the :class:`matplotlib.lines.Line2D`\ninstances created.\n\n**Example:**\n\n.. plot:: pyplots/boxplot_demo.py", "id": "f17238:c1:m153"} {"signature": "def scatter(self, x, y, s=, c='', marker='', cmap=None, norm=None,vmin=None, vmax=None, alpha=, linewidths=None,faceted=True, verts=None,**kwargs):", "body": "if not self._hold: self.cla()syms = { '' : (,math.pi/,), '' : (,,), '' : (,,), '>' : (,math.pi/,), '' : (,math.pi,), '' : (,*math.pi/,), '' : (,,), '' : (,,), '' : (,,), '' : (,,), '' : (,,), '' : (,math.pi/,) }self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)x, y, s, c = cbook.delete_masked_points(x, y, s, c)if is_string_like(c) or cbook.is_sequence_of_strings(c):colors = mcolors.colorConverter.to_rgba_array(c, alpha)else:sh = np.shape(c)if len(sh) == and sh[] == len(x):colors = None else:colors = mcolors.colorConverter.to_rgba_array(c, alpha)if not iterable(s):scales = (s,)else:scales = sif faceted:edgecolors = Noneelse:edgecolors = ''warnings.warn('''''',DeprecationWarning) sym = Nonesymstyle = if marker is None and not (verts is None):marker = (verts, )verts = Noneif is_string_like(marker):sym = syms.get(marker)if sym is None and verts is None:raise ValueError('')numsides, rotation, symstyle = syms[marker]elif iterable(marker):if len(marker)< or len(marker)>:raise ValueError('')if cbook.is_numlike(marker[]):if len(marker)==:numsides, rotation = marker[], elif len(marker)==:numsides, rotation = marker[], marker[]sym = Trueif marker[] in (,):symstyle = marker[]else:verts = np.asarray(marker[])if sym is not None:if symstyle==:collection = mcoll.RegularPolyCollection(numsides, rotation, scales,facecolors = colors,edgecolors = edgecolors,linewidths = linewidths,offsets = list(zip(x,y)),transOffset = self.transData,)elif symstyle==:collection = mcoll.StarPolygonCollection(numsides, rotation, scales,facecolors = colors,edgecolors = edgecolors,linewidths = linewidths,offsets = list(zip(x,y)),transOffset = self.transData,)elif symstyle==:collection = mcoll.AsteriskPolygonCollection(numsides, rotation, scales,facecolors = colors,edgecolors = edgecolors,linewidths = linewidths,offsets = list(zip(x,y)),transOffset = self.transData,)elif symstyle==:collection = mcoll.CircleCollection(scales,facecolors = colors,edgecolors = edgecolors,linewidths = linewidths,offsets = list(zip(x,y)),transOffset = self.transData,)else:rescale = np.sqrt(max(verts[:,]**+verts[:,]**))verts /= rescalecollection = mcoll.PolyCollection((verts,), scales,facecolors = colors,edgecolors = edgecolors,linewidths = linewidths,offsets = list(zip(x,y)),transOffset = self.transData,)collection.set_transform(mtransforms.IdentityTransform())collection.set_alpha(alpha)collection.update(kwargs)if colors is None:if norm is not None: assert(isinstance(norm, mcolors.Normalize))if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))collection.set_array(np.asarray(c))collection.set_cmap(cmap)collection.set_norm(norm)if vmin is not None or vmax is not None:collection.set_clim(vmin, vmax)else:collection.autoscale_None()temp_x = xtemp_y = yminx = np.amin(temp_x)maxx = np.amax(temp_x)miny = np.amin(temp_y)maxy = np.amax(temp_y)w = maxx-minxh = maxy-minypadx, pady = *w, *hcorners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)self.update_datalim( corners)self.autoscale_view()self.add_collection(collection)return collection", "docstring": "call signatures::\n\n scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,\n vmin=None, vmax=None, alpha=1.0, linewidths=None,\n verts=None, **kwargs)\n\nMake a scatter plot of *x* versus *y*, where *x*, *y* are 1-D\nsequences of the same length, *N*.\n\nKeyword arguments:\n\n *s*:\n size in points^2. It is a scalar or an array of the same\n length as *x* and *y*.\n\n *c*:\n a color. *c* can be a single color format string, or a\n sequence of color specifications of length *N*, or a\n sequence of *N* numbers to be mapped to colors using the\n *cmap* and *norm* specified via kwargs (see below). Note\n that *c* should not be a single numeric RGB or RGBA\n sequence because that is indistinguishable from an array\n of values to be colormapped. *c* can be a 2-D array in\n which the rows are RGB or RGBA, however.\n\n *marker*:\n can be one of:\n\n ===== ==============\n Value Description\n ===== ==============\n 's' square\n 'o' circle\n '^' triangle up\n '>' triangle right\n 'v' triangle down\n '<' triangle left\n 'd' diamond\n 'p' pentagram\n 'h' hexagon\n '8' octagon\n '+' plus\n 'x' cross\n ===== ==============\n\n The marker can also be a tuple (*numsides*, *style*,\n *angle*), which will create a custom, regular symbol.\n\n *numsides*:\n the number of sides\n\n *style*:\n the style of the regular symbol:\n\n ===== =============================================\n Value Description\n ===== =============================================\n 0 a regular polygon\n 1 a star-like symbol\n 2 an asterisk\n 3 a circle (*numsides* and *angle* is ignored)\n ===== =============================================\n\n *angle*:\n the angle of rotation of the symbol\n\n Finally, *marker* can be (*verts*, 0): *verts* is a\n sequence of (*x*, *y*) vertices for a custom scatter\n symbol. Alternatively, use the kwarg combination\n *marker* = *None*, *verts* = *verts*.\n\nAny or all of *x*, *y*, *s*, and *c* may be masked arrays, in\nwhich case all masks will be combined and only unmasked points\nwill be plotted.\n\nOther keyword arguments: the color mapping and normalization\narguments will be used only if *c* is an array of floats.\n\n *cmap*: [ None | Colormap ]\n A :class:`matplotlib.colors.Colormap` instance. If *None*,\n defaults to rc ``image.cmap``. *cmap* is only used if *c*\n is an array of floats.\n\n *norm*: [ None | Normalize ]\n A :class:`matplotlib.colors.Normalize` instance is used to\n scale luminance data to 0, 1. If *None*, use the default\n :func:`normalize`. *norm* is only used if *c* is an array\n of floats.\n\n *vmin*/*vmax*:\n *vmin* and *vmax* are used in conjunction with norm to\n normalize luminance data. If either are None, the min and\n max of the color array *C* is used. Note if you pass a\n *norm* instance, your settings for *vmin* and *vmax* will\n be ignored.\n\n *alpha*: 0 <= scalar <= 1\n The alpha value for the patches\n\n *linewidths*: [ None | scalar | sequence ]\n If *None*, defaults to (lines.linewidth,). Note that this\n is a tuple, and if you set the linewidths argument you\n must set it as a sequence of floats, as required by\n :class:`~matplotlib.collections.RegularPolyCollection`.\n\nOptional kwargs control the\n:class:`~matplotlib.collections.Collection` properties; in\nparticular:\n\n *edgecolors*:\n 'none' to plot faces with no outlines\n\n *facecolors*:\n 'none' to plot unfilled outlines\n\nHere are the standard descriptions of all the\n:class:`~matplotlib.collections.Collection` kwargs:\n\n%(Collection)s\n\nA :class:`~matplotlib.collections.Collection` instance is\nreturned.", "id": "f17238:c1:m154"} {"signature": "def hexbin(self, x, y, C = None, gridsize = , bins = None,xscale = '', yscale = '',cmap=None, norm=None, vmin=None, vmax=None,alpha=, linewidths=None, edgecolors='',reduce_C_function = np.mean,**kwargs):", "body": "if not self._hold: self.cla()self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)x, y, C = cbook.delete_masked_points(x, y, C)if iterable(gridsize):nx, ny = gridsizeelse:nx = gridsizeny = int(nx/math.sqrt())x = np.array(x, float)y = np.array(y, float)if xscale=='':x = np.log10(x)if yscale=='':y = np.log10(y)xmin = np.amin(x)xmax = np.amax(x)ymin = np.amin(y)ymax = np.amax(y)padding = * (xmax - xmin)xmin -= paddingxmax += paddingsx = (xmax-xmin) / nxsy = (ymax-ymin) / nyx = (x-xmin)/sxy = (y-ymin)/syix1 = np.round(x).astype(int)iy1 = np.round(y).astype(int)ix2 = np.floor(x).astype(int)iy2 = np.floor(y).astype(int)nx1 = nx + ny1 = ny + nx2 = nxny2 = nyn = nx1*ny1+nx2*ny2d1 = (x-ix1)** + * (y-iy1)**d2 = (x-ix2-)** + * (y-iy2-)**bdist = (d1if C is None:accum = np.zeros(n)lattice1 = accum[:nx1*ny1]lattice2 = accum[nx1*ny1:]lattice1.shape = (nx1,ny1)lattice2.shape = (nx2,ny2)for i in range(len(x)):if bdist[i]:lattice1[ix1[i], iy1[i]]+=else:lattice2[ix2[i], iy2[i]]+=else:lattice1 = np.empty((nx1,ny1),dtype=object)for i in range(nx1):for j in range(ny1):lattice1[i,j] = []lattice2 = np.empty((nx2,ny2),dtype=object)for i in range(nx2):for j in range(ny2):lattice2[i,j] = []for i in range(len(x)):if bdist[i]:lattice1[ix1[i], iy1[i]].append( C[i] )else:lattice2[ix2[i], iy2[i]].append( C[i] )for i in range(nx1):for j in range(ny1):vals = lattice1[i,j]if len(vals):lattice1[i,j] = reduce_C_function( vals )else:lattice1[i,j] = np.nanfor i in range(nx2):for j in range(ny2):vals = lattice2[i,j]if len(vals):lattice2[i,j] = reduce_C_function( vals )else:lattice2[i,j] = np.nanaccum = np.hstack((lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))good_idxs = ~np.isnan(accum)px = xmin + sx * np.array([ , , , -, -, ])py = ymin + sy * np.array([-, , , , -, -]) / polygons = np.zeros((, n, ), float)polygons[:,:nx1*ny1,] = np.repeat(np.arange(nx1), ny1)polygons[:,:nx1*ny1,] = np.tile(np.arange(ny1), nx1)polygons[:,nx1*ny1:,] = np.repeat(np.arange(nx2) + , ny2)polygons[:,nx1*ny1:,] = np.tile(np.arange(ny2), nx2) + if C is not None:polygons = polygons[:,good_idxs,:]accum = accum[good_idxs]polygons = np.transpose(polygons, axes=[,,])polygons[:,:,] *= sxpolygons[:,:,] *= sypolygons[:,:,] += pxpolygons[:,:,] += pyif xscale=='':polygons[:,:,] = **(polygons[:,:,])xmin = **xminxmax = **xmaxself.set_xscale('')if yscale=='':polygons[:,:,] = **(polygons[:,:,])ymin = **yminymax = **ymaxself.set_yscale('')if edgecolors=='':edgecolors = ''collection = mcoll.PolyCollection(polygons,edgecolors = edgecolors,linewidths = linewidths,transOffset = self.transData,)if bins=='':accum = np.log10(accum+)elif bins!=None:if not iterable(bins):minimum, maximum = min(accum), max(accum)bins-= bins = minimum + (maximum-minimum)*np.arange(bins)/binsbins = np.sort(bins)accum = bins.searchsorted(accum)if norm is not None: assert(isinstance(norm, mcolors.Normalize))if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))collection.set_array(accum)collection.set_cmap(cmap)collection.set_norm(norm)collection.set_alpha(alpha)collection.update(kwargs)if vmin is not None or vmax is not None:collection.set_clim(vmin, vmax)else:collection.autoscale_None()corners = ((xmin, ymin), (xmax, ymax))self.update_datalim( corners)self.autoscale_view()self.add_collection(collection)return collection", "docstring": "call signature::\n\n hexbin(x, y, C = None, gridsize = 100, bins = None,\n xscale = 'linear', yscale = 'linear',\n cmap=None, norm=None, vmin=None, vmax=None,\n alpha=1.0, linewidths=None, edgecolors='none'\n reduce_C_function = np.mean,\n **kwargs)\n\nMake a hexagonal binning plot of *x* versus *y*, where *x*,\n*y* are 1-D sequences of the same length, *N*. If *C* is None\n(the default), this is a histogram of the number of occurences\nof the observations at (x[i],y[i]).\n\nIf *C* is specified, it specifies values at the coordinate\n(x[i],y[i]). These values are accumulated for each hexagonal\nbin and then reduced according to *reduce_C_function*, which\ndefaults to numpy's mean function (np.mean). (If *C* is\nspecified, it must also be a 1-D sequence of the same length\nas *x* and *y*.)\n\n*x*, *y* and/or *C* may be masked arrays, in which case only\nunmasked points will be plotted.\n\nOptional keyword arguments:\n\n *gridsize*: [ 100 | integer ]\n The number of hexagons in the *x*-direction, default is\n 100. The corresponding number of hexagons in the\n *y*-direction is chosen such that the hexagons are\n approximately regular. Alternatively, gridsize can be a\n tuple with two elements specifying the number of hexagons\n in the *x*-direction and the *y*-direction.\n\n *bins*: [ None | 'log' | integer | sequence ]\n If *None*, no binning is applied; the color of each hexagon\n directly corresponds to its count value.\n\n If 'log', use a logarithmic scale for the color\n map. Internally, :math:`log_{10}(i+1)` is used to\n determine the hexagon color.\n\n If an integer, divide the counts in the specified number\n of bins, and color the hexagons accordingly.\n\n If a sequence of values, the values of the lower bound of\n the bins to be used.\n\n *xscale*: [ 'linear' | 'log' ]\n Use a linear or log10 scale on the horizontal axis.\n\n *scale*: [ 'linear' | 'log' ]\n Use a linear or log10 scale on the vertical axis.\n\nOther keyword arguments controlling color mapping and normalization\narguments:\n\n *cmap*: [ None | Colormap ]\n a :class:`matplotlib.cm.Colormap` instance. If *None*,\n defaults to rc ``image.cmap``.\n\n *norm*: [ None | Normalize ]\n :class:`matplotlib.colors.Normalize` instance is used to\n scale luminance data to 0,1.\n\n *vmin*/*vmax*: scalar\n *vmin* and *vmax* are used in conjunction with *norm* to normalize\n luminance data. If either are *None*, the min and max of the color\n array *C* is used. Note if you pass a norm instance, your settings\n for *vmin* and *vmax* will be ignored.\n\n *alpha*: scalar\n the alpha value for the patches\n\n *linewidths*: [ None | scalar ]\n If *None*, defaults to rc lines.linewidth. Note that this\n is a tuple, and if you set the linewidths argument you\n must set it as a sequence of floats, as required by\n :class:`~matplotlib.collections.RegularPolyCollection`.\n\nOther keyword arguments controlling the Collection properties:\n\n *edgecolors*: [ None | mpl color | color sequence ]\n If 'none', draws the edges in the same color as the fill color.\n This is the default, as it avoids unsightly unpainted pixels\n between the hexagons.\n\n If *None*, draws the outlines in the default color.\n\n If a matplotlib color arg or sequence of rgba tuples, draws the\n outlines in the specified color.\n\nHere are the standard descriptions of all the\n:class:`~matplotlib.collections.Collection` kwargs:\n\n%(Collection)s\n\nThe return value is a\n:class:`~matplotlib.collections.PolyCollection` instance; use\n:meth:`~matplotlib.collection.PolyCollection.get_array` on\nthis :class:`~matplotlib.collections.PolyCollection` to get\nthe counts in each hexagon.\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/hexbin_demo.py", "id": "f17238:c1:m155"} {"signature": "def arrow(self, x, y, dx, dy, **kwargs):", "body": "a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)self.add_artist(a)return a", "docstring": "call signature::\n\n arrow(x, y, dx, dy, **kwargs)\n\nDraws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,\n*y* + *dy*).\n\nOptional kwargs control the arrow properties:\n%(FancyArrow)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/arrow_demo.py", "id": "f17238:c1:m156"} {"signature": "def barbs(self, *args, **kw):", "body": "if not self._hold: self.cla()b = mquiver.Barbs(self, *args, **kw)self.add_collection(b)self.update_datalim(b.get_offsets())self.autoscale_view()return b", "docstring": "%(barbs_doc)s\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/barb_demo.py", "id": "f17238:c1:m159"} {"signature": "def fill(self, *args, **kwargs):", "body": "if not self._hold: self.cla()patches = []for poly in self._get_patches_for_fill(*args, **kwargs):self.add_patch( poly )patches.append( poly )self.autoscale_view()return patches", "docstring": "call signature::\n\n fill(*args, **kwargs)\n\nPlot filled polygons. *args* is a variable length argument,\nallowing for multiple *x*, *y* pairs with an optional color\nformat string; see :func:`~matplotlib.pyplot.plot` for details\non the argument parsing. For example, to plot a polygon with\nvertices at *x*, *y* in blue.::\n\n ax.fill(x,y, 'b' )\n\nAn arbitrary number of *x*, *y*, *color* groups can be specified::\n\n ax.fill(x1, y1, 'g', x2, y2, 'r')\n\nReturn value is a list of :class:`~matplotlib.patches.Patch`\ninstances that were added.\n\nThe same color strings that :func:`~matplotlib.pyplot.plot`\nsupports are supported by the fill format string.\n\nIf you would like to fill below a curve, eg. shade a region\nbetween 0 and *y* along *x*, use :meth:`fill_between`\n\nThe *closed* kwarg will close the polygon when *True* (default).\n\nkwargs control the Polygon properties:\n\n%(Polygon)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/fill_demo.py", "id": "f17238:c1:m160"} {"signature": "def fill_between(self, x, y1, y2=, where=None, **kwargs):", "body": "self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)self._process_unit_info(ydata=y2)x = np.asarray(self.convert_xunits(x))y1 = np.asarray(self.convert_yunits(y1))y2 = np.asarray(self.convert_yunits(y2))if not cbook.iterable(y1):y1 = np.ones_like(x)*y1if not cbook.iterable(y2):y2 = np.ones_like(x)*y2if where is None:where = np.ones(len(x), np.bool)where = np.asarray(where)assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))polys = []for ind0, ind1 in mlab.contiguous_regions(where):theseverts = []xslice = x[ind0:ind1]y1slice = y1[ind0:ind1]y2slice = y2[ind0:ind1]if not len(xslice):continueN = len(xslice)X = np.zeros((*N+, ), np.float)X[] = xslice[], y2slice[]X[N+] = xslice[-], y2slice[-]X[:N+,] = xsliceX[:N+,] = y1sliceX[N+:,] = xslice[::-]X[N+:,] = y2slice[::-]polys.append(X)collection = mcoll.PolyCollection(polys, **kwargs)XY1 = np.array([x[where], y1[where]]).TXY2 = np.array([x[where], y2[where]]).Tself.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,updatex=True, updatey=True)self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,updatex=False, updatey=True)self.add_collection(collection)self.autoscale_view()return collection", "docstring": "call signature::\n\n fill_between(x, y1, y2=0, where=None, **kwargs)\n\nCreate a :class:`~matplotlib.collections.PolyCollection`\nfilling the regions between *y1* and *y2* where\n``where==True``\n\n*x*\n an N length np array of the x data\n\n*y1*\n an N length scalar or np array of the x data\n\n*y2*\n an N length scalar or np array of the x data\n\n*where*\n if None, default to fill between everywhere. If not None,\n it is a a N length numpy boolean array and the fill will\n only happen over the regions where ``where==True``\n\n*kwargs*\n keyword args passed on to the :class:`PolyCollection`\n\nkwargs control the Polygon properties:\n\n%(PolyCollection)s\n\n.. plot:: mpl_examples/pylab_examples/fill_between.py", "id": "f17238:c1:m161"} {"signature": "def imshow(self, X, cmap=None, norm=None, aspect=None,interpolation=None, alpha=, vmin=None, vmax=None,origin=None, extent=None, shape=None, filternorm=,filterrad=, imlim=None, resample=None, url=None, **kwargs):", "body": "if not self._hold: self.cla()if norm is not None: assert(isinstance(norm, mcolors.Normalize))if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))if aspect is None: aspect = rcParams['']self.set_aspect(aspect)im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,filternorm=filternorm,filterrad=filterrad, resample=resample, **kwargs)im.set_data(X)im.set_alpha(alpha)self._set_artist_props(im)im.set_clip_path(self.patch)if vmin is not None or vmax is not None:im.set_clim(vmin, vmax)else:im.autoscale_None()im.set_url(url)xmin, xmax, ymin, ymax = im.get_extent()corners = (xmin, ymin), (xmax, ymax)self.update_datalim(corners)if self._autoscaleon:self.set_xlim((xmin, xmax))self.set_ylim((ymin, ymax))self.images.append(im)return im", "docstring": "call signature::\n\n imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,\n alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,\n **kwargs)\n\nDisplay the image in *X* to current axes. *X* may be a float\narray, a uint8 array or a PIL image. If *X* is an array, *X*\ncan have the following shapes:\n\n* MxN -- luminance (grayscale, float array only)\n* MxNx3 -- RGB (float or uint8 array)\n* MxNx4 -- RGBA (float or uint8 array)\n\nThe value for each component of MxNx3 and MxNx4 float arrays should be\nin the range 0.0 to 1.0; MxN float arrays may be normalised.\n\nAn :class:`matplotlib.image.AxesImage` instance is returned.\n\nKeyword arguments:\n\n *cmap*: [ None | Colormap ]\n A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.\n If *None*, default to rc ``image.cmap`` value.\n\n *cmap* is ignored when *X* has RGB(A) information\n\n *aspect*: [ None | 'auto' | 'equal' | scalar ]\n If 'auto', changes the image aspect ratio to match that of the axes\n\n If 'equal', and *extent* is *None*, changes the axes\n aspect ratio to match that of the image. If *extent* is\n not *None*, the axes aspect ratio is changed to match that\n of the extent.\n\n If *None*, default to rc ``image.aspect`` value.\n\n *interpolation*:\n\n Acceptable values are *None*, 'nearest', 'bilinear',\n 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',\n 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',\n 'bessel', 'mitchell', 'sinc', 'lanczos',\n\n\n If *interpolation* is *None*, default to rc\n ``image.interpolation``. See also the *filternorm* and\n *filterrad* parameters\n\n *norm*: [ None | Normalize ]\n An :class:`matplotlib.colors.Normalize` instance; if\n *None*, default is ``normalization()``. This scales\n luminance -> 0-1\n\n *norm* is only used for an MxN float array.\n\n *vmin*/*vmax*: [ None | scalar ]\n Used to scale a luminance image to 0-1. If either is\n *None*, the min and max of the luminance values will be\n used. Note if *norm* is not *None*, the settings for\n *vmin* and *vmax* will be ignored.\n\n *alpha*: scalar\n The alpha blending value, between 0 (transparent) and 1 (opaque)\n\n *origin*: [ None | 'upper' | 'lower' ]\n Place the [0,0] index of the array in the upper left or lower left\n corner of the axes. If *None*, default to rc ``image.origin``.\n\n *extent*: [ None | scalars (left, right, bottom, top) ]\n Eata values of the axes. The default assigns zero-based row,\n column indices to the *x*, *y* centers of the pixels.\n\n *shape*: [ None | scalars (columns, rows) ]\n For raw buffer images\n\n *filternorm*:\n A parameter for the antigrain image resize filter. From the\n antigrain documentation, if *filternorm* = 1, the filter normalizes\n integer values and corrects the rounding errors. It doesn't do\n anything with the source floating point values, it corrects only\n integers according to the rule of 1.0 which means that any sum of\n pixel weights must be equal to 1.0. So, the filter function must\n produce a graph of the proper shape.\n\n *filterrad*:\n The filter radius for filters that have a radius\n parameter, i.e. when interpolation is one of: 'sinc',\n 'lanczos' or 'blackman'\n\nAdditional kwargs are :class:`~matplotlib.artist.Artist` properties:\n\n%(Artist)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/image_demo.py", "id": "f17238:c1:m162"} {"signature": "def pcolor(self, *args, **kwargs):", "body": "if not self._hold: self.cla()alpha = kwargs.pop('', )norm = kwargs.pop('', None)cmap = kwargs.pop('', None)vmin = kwargs.pop('', None)vmax = kwargs.pop('', None)shading = kwargs.pop('', '')X, Y, C = self._pcolorargs('', *args)Ny, Nx = X.shapeC = ma.asarray(C)X = ma.asarray(X)Y = ma.asarray(Y)mask = ma.getmaskarray(X)+ma.getmaskarray(Y)xymask = mask[:-,:-]+mask[:,:]+mask[:-,:]+mask[:,:-]mask = ma.getmaskarray(C)[:Ny-,:Nx-]+xymasknewaxis = np.newaxiscompress = np.compressravelmask = (mask==).ravel()X1 = compress(ravelmask, ma.filled(X[:-,:-]).ravel())Y1 = compress(ravelmask, ma.filled(Y[:-,:-]).ravel())X2 = compress(ravelmask, ma.filled(X[:,:-]).ravel())Y2 = compress(ravelmask, ma.filled(Y[:,:-]).ravel())X3 = compress(ravelmask, ma.filled(X[:,:]).ravel())Y3 = compress(ravelmask, ma.filled(Y[:,:]).ravel())X4 = compress(ravelmask, ma.filled(X[:-,:]).ravel())Y4 = compress(ravelmask, ma.filled(Y[:-,:]).ravel())npoly = len(X1)xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],X2[:,newaxis], Y2[:,newaxis],X3[:,newaxis], Y3[:,newaxis],X4[:,newaxis], Y4[:,newaxis],X1[:,newaxis], Y1[:,newaxis]),axis=)verts = xy.reshape((npoly, , ))C = compress(ravelmask, ma.filled(C[:Ny-,:Nx-]).ravel())if shading == '':edgecolors = (,,,),linewidths = (,)else:edgecolors = ''linewidths = (,)kwargs.setdefault('', edgecolors)kwargs.setdefault('', (,))kwargs.setdefault('', linewidths)collection = mcoll.PolyCollection(verts, **kwargs)collection.set_alpha(alpha)collection.set_array(C)if norm is not None: assert(isinstance(norm, mcolors.Normalize))if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))collection.set_cmap(cmap)collection.set_norm(norm)if vmin is not None or vmax is not None:collection.set_clim(vmin, vmax)else:collection.autoscale_None()self.grid(False)x = X.compressed()y = Y.compressed()minx = np.amin(x)maxx = np.amax(x)miny = np.amin(y)maxy = np.amax(y)corners = (minx, miny), (maxx, maxy)self.update_datalim( corners)self.autoscale_view()self.add_collection(collection)return collection", "docstring": "call signatures::\n\n pcolor(C, **kwargs)\n pcolor(X, Y, C, **kwargs)\n\nCreate a pseudocolor plot of a 2-D array.\n\n*C* is the array of color values.\n\n*X* and *Y*, if given, specify the (*x*, *y*) coordinates of\nthe colored quadrilaterals; the quadrilateral for C[i,j] has\ncorners at::\n\n (X[i, j], Y[i, j]),\n (X[i, j+1], Y[i, j+1]),\n (X[i+1, j], Y[i+1, j]),\n (X[i+1, j+1], Y[i+1, j+1]).\n\nIdeally the dimensions of *X* and *Y* should be one greater\nthan those of *C*; if the dimensions are the same, then the\nlast row and column of *C* will be ignored.\n\nNote that the the column index corresponds to the\n*x*-coordinate, and the row index corresponds to *y*; for\ndetails, see the :ref:`Grid Orientation\n` section below.\n\nIf either or both of *X* and *Y* are 1-D arrays or column vectors,\nthey will be expanded as needed into the appropriate 2-D arrays,\nmaking a rectangular grid.\n\n*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one\nof the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],\n[i, j+1],[i+1, j+1]) is masked, nothing is plotted.\n\nKeyword arguments:\n\n *cmap*: [ None | Colormap ]\n A :class:`matplotlib.cm.Colormap` instance. If *None*, use\n rc settings.\n\n norm: [ None | Normalize ]\n An :class:`matplotlib.colors.Normalize` instance is used\n to scale luminance data to 0,1. If *None*, defaults to\n :func:`normalize`.\n\n *vmin*/*vmax*: [ None | scalar ]\n *vmin* and *vmax* are used in conjunction with *norm* to\n normalize luminance data. If either are *None*, the min\n and max of the color array *C* is used. If you pass a\n *norm* instance, *vmin* and *vmax* will be ignored.\n\n *shading*: [ 'flat' | 'faceted' ]\n If 'faceted', a black grid is drawn around each rectangle; if\n 'flat', edges are not drawn. Default is 'flat', contrary to\n Matlab(TM).\n\n This kwarg is deprecated; please use 'edgecolors' instead:\n * shading='flat' -- edgecolors='None'\n * shading='faceted -- edgecolors='k'\n\n *edgecolors*: [ None | 'None' | color | color sequence]\n If *None*, the rc setting is used by default.\n\n If 'None', edges will not be visible.\n\n An mpl color or sequence of colors will set the edge color\n\n *alpha*: 0 <= scalar <= 1\n the alpha blending value\n\nReturn value is a :class:`matplotlib.collection.Collection`\ninstance.\n\n.. _axes-pcolor-grid-orientation:\n\nThe grid orientation follows the Matlab(TM) convention: an\narray *C* with shape (*nrows*, *ncolumns*) is plotted with\nthe column number as *X* and the row number as *Y*, increasing\nup; hence it is plotted the way the array would be printed,\nexcept that the *Y* axis is reversed. That is, *C* is taken\nas *C*(*y*, *x*).\n\nSimilarly for :func:`~matplotlib.pyplot.meshgrid`::\n\n x = np.arange(5)\n y = np.arange(3)\n X, Y = meshgrid(x,y)\n\nis equivalent to:\n\n X = array([[0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4]])\n\n Y = array([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2]])\n\nso if you have::\n\n C = rand( len(x), len(y))\n\nthen you need::\n\n pcolor(X, Y, C.T)\n\nor::\n\n pcolor(C.T)\n\nMatlab :func:`pcolor` always discards the last row and column\nof *C*, but matplotlib displays the last row and column if *X* and\n*Y* are not specified, or if *X* and *Y* have one more row and\ncolumn than *C*.\n\nkwargs can be used to control the\n:class:`~matplotlib.collection.PolyCollection` properties:\n\n%(PolyCollection)s", "id": "f17238:c1:m164"} {"signature": "def pcolormesh(self, *args, **kwargs):", "body": "if not self._hold: self.cla()alpha = kwargs.pop('', )norm = kwargs.pop('', None)cmap = kwargs.pop('', None)vmin = kwargs.pop('', None)vmax = kwargs.pop('', None)shading = kwargs.pop('', '')edgecolors = kwargs.pop('', '')antialiased = kwargs.pop('', False)X, Y, C = self._pcolorargs('', *args)Ny, Nx = X.shapeC = ma.ravel(C[:Ny-, :Nx-]) X = X.ravel()Y = Y.ravel()coords = np.zeros(((Nx * Ny), ), dtype=float)coords[:, ] = Xcoords[:, ] = Yif shading == '' or edgecolors != '':showedges = else:showedges = collection = mcoll.QuadMesh(Nx - , Ny - , coords, showedges,antialiased=antialiased) collection.set_alpha(alpha)collection.set_array(C)if norm is not None: assert(isinstance(norm, mcolors.Normalize))if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))collection.set_cmap(cmap)collection.set_norm(norm)if vmin is not None or vmax is not None:collection.set_clim(vmin, vmax)else:collection.autoscale_None()self.grid(False)minx = np.amin(X)maxx = np.amax(X)miny = np.amin(Y)maxy = np.amax(Y)corners = (minx, miny), (maxx, maxy)self.update_datalim( corners)self.autoscale_view()self.add_collection(collection)return collection", "docstring": "call signatures::\n\n pcolormesh(C)\n pcolormesh(X, Y, C)\n pcolormesh(C, **kwargs)\n\n*C* may be a masked array, but *X* and *Y* may not. Masked\narray support is implemented via *cmap* and *norm*; in\ncontrast, :func:`~matplotlib.pyplot.pcolor` simply does not\ndraw quadrilaterals with masked colors or vertices.\n\nKeyword arguments:\n\n *cmap*: [ None | Colormap ]\n A :class:`matplotlib.cm.Colormap` instance. If None, use\n rc settings.\n\n *norm*: [ None | Normalize ]\n A :class:`matplotlib.colors.Normalize` instance is used to\n scale luminance data to 0,1. If None, defaults to\n :func:`normalize`.\n\n *vmin*/*vmax*: [ None | scalar ]\n *vmin* and *vmax* are used in conjunction with *norm* to\n normalize luminance data. If either are *None*, the min\n and max of the color array *C* is used. If you pass a\n *norm* instance, *vmin* and *vmax* will be ignored.\n\n *shading*: [ 'flat' | 'faceted' ]\n If 'faceted', a black grid is drawn around each rectangle; if\n 'flat', edges are not drawn. Default is 'flat', contrary to\n Matlab(TM).\n\n This kwarg is deprecated; please use 'edgecolors' instead:\n * shading='flat' -- edgecolors='None'\n * shading='faceted -- edgecolors='k'\n\n *edgecolors*: [ None | 'None' | color | color sequence]\n If None, the rc setting is used by default.\n\n If 'None', edges will not be visible.\n\n An mpl color or sequence of colors will set the edge color\n\n *alpha*: 0 <= scalar <= 1\n the alpha blending value\n\nReturn value is a :class:`matplotlib.collection.QuadMesh`\nobject.\n\nkwargs can be used to control the\n:class:`matplotlib.collections.QuadMesh`\nproperties:\n\n%(QuadMesh)s\n\n.. seealso::\n :func:`~matplotlib.pyplot.pcolor`:\n For an explanation of the grid orientation and the\n expansion of 1-D *X* and/or *Y* to 2-D arrays.", "id": "f17238:c1:m165"} {"signature": "def pcolorfast(self, *args, **kwargs):", "body": "if not self._hold: self.cla()alpha = kwargs.pop('', )norm = kwargs.pop('', None)cmap = kwargs.pop('', None)vmin = kwargs.pop('', None)vmax = kwargs.pop('', None)if norm is not None: assert(isinstance(norm, mcolors.Normalize))if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))C = args[-]nr, nc = C.shapeif len(args) == :style = \"\"x = [, nc]y = [, nr]elif len(args) == :x, y = args[:]x = np.asarray(x)y = np.asarray(y)if x.ndim == and y.ndim == :if x.size == and y.size == :style = \"\"else:dx = np.diff(x)dy = np.diff(y)if (np.ptp(dx) < *np.abs(dx.mean()) andnp.ptp(dy) < *np.abs(dy.mean())):style = \"\"else:style = \"\"elif x.ndim == and y.ndim == :style = \"\"else:raise TypeError(\"\")else:raise TypeError(\"\")if style == \"\":C = ma.ravel(C) X = x.ravel()Y = y.ravel()Nx = nc+Ny = nr+coords = np.empty(((Nx * Ny), ), np.float64)coords[:, ] = Xcoords[:, ] = Ycollection = mcoll.QuadMesh(nc, nr, coords, )collection.set_alpha(alpha)collection.set_array(C)collection.set_cmap(cmap)collection.set_norm(norm)self.add_collection(collection)xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()ret = collectionelse:xl, xr, yb, yt = x[], x[-], y[], y[-]if style == \"\":im = mimage.AxesImage(self, cmap, norm,interpolation='',origin='',extent=(xl, xr, yb, yt),**kwargs)im.set_data(C)im.set_alpha(alpha)self.images.append(im)ret = imif style == \"\":im = mimage.PcolorImage(self, x, y, C,cmap=cmap,norm=norm,alpha=alpha,**kwargs)self.images.append(im)ret = imself._set_artist_props(ret)if vmin is not None or vmax is not None:ret.set_clim(vmin, vmax)else:ret.autoscale_None()self.update_datalim(np.array([[xl, yb], [xr, yt]]))self.autoscale_view(tight=True)return ret", "docstring": "pseudocolor plot of a 2-D array\n\nExperimental; this is a version of pcolor that\ndoes not draw lines, that provides the fastest\npossible rendering with the Agg backend, and that\ncan handle any quadrilateral grid.\n\nCall signatures::\n\n pcolor(C, **kwargs)\n pcolor(xr, yr, C, **kwargs)\n pcolor(x, y, C, **kwargs)\n pcolor(X, Y, C, **kwargs)\n\nC is the 2D array of color values corresponding to quadrilateral\ncells. Let (nr, nc) be its shape. C may be a masked array.\n\n``pcolor(C, **kwargs)`` is equivalent to\n``pcolor([0,nc], [0,nr], C, **kwargs)``\n\n*xr*, *yr* specify the ranges of *x* and *y* corresponding to the\nrectangular region bounding *C*. If::\n\n xr = [x0, x1]\n\nand::\n\n yr = [y0,y1]\n\nthen *x* goes from *x0* to *x1* as the second index of *C* goes\nfrom 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of\ncell (0,0), and (*x1*, *y1*) is the outermost corner of cell\n(*nr*-1, *nc*-1). All cells are rectangles of the same size.\nThis is the fastest version.\n\n*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,\ngiving the x and y boundaries of the cells. Hence the cells are\nrectangular but the grid may be nonuniform. The speed is\nintermediate. (The grid is checked, and if found to be\nuniform the fast version is used.)\n\n*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify\nthe (x,y) coordinates of the corners of the colored\nquadrilaterals; the quadrilateral for C[i,j] has corners at\n(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),\n(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.\nThis is the most general, but the slowest to render. It may\nproduce faster and more compact output using ps, pdf, and\nsvg backends, however.\n\nNote that the the column index corresponds to the x-coordinate,\nand the row index corresponds to y; for details, see\nthe \"Grid Orientation\" section below.\n\nOptional keyword arguments:\n\n *cmap*: [ None | Colormap ]\n A cm Colormap instance from cm. If None, use rc settings.\n *norm*: [ None | Normalize ]\n An mcolors.Normalize instance is used to scale luminance data to\n 0,1. If None, defaults to normalize()\n *vmin*/*vmax*: [ None | scalar ]\n *vmin* and *vmax* are used in conjunction with norm to normalize\n luminance data. If either are *None*, the min and max of the color\n array *C* is used. If you pass a norm instance, *vmin* and *vmax*\n will be *None*.\n *alpha*: 0 <= scalar <= 1\n the alpha blending value\n\nReturn value is an image if a regular or rectangular grid\nis specified, and a QuadMesh collection in the general\nquadrilateral case.", "id": "f17238:c1:m166"} {"signature": "def table(self, **kwargs):", "body": "return mtable.table(self, **kwargs)", "docstring": "call signature::\n\n table(cellText=None, cellColours=None,\n cellLoc='right', colWidths=None,\n rowLabels=None, rowColours=None, rowLoc='left',\n colLabels=None, colColours=None, colLoc='center',\n loc='bottom', bbox=None):\n\nAdd a table to the current axes. Returns a\n:class:`matplotlib.table.Table` instance. For finer grained\ncontrol over tables, use the :class:`~matplotlib.table.Table`\nclass and add it to the axes with\n:meth:`~matplotlib.axes.Axes.add_table`.\n\nThanks to John Gill for providing the class and table.\n\nkwargs control the :class:`~matplotlib.table.Table`\nproperties:\n\n%(Table)s", "id": "f17238:c1:m170"} {"signature": "def twinx(self):", "body": "ax2 = self.figure.add_axes(self.get_position(True), sharex=self,frameon=False)ax2.yaxis.tick_right()ax2.yaxis.set_label_position('')self.yaxis.tick_left()return ax2", "docstring": "call signature::\n\n ax = twinx()\n\ncreate a twin of Axes for generating a plot with a sharex\nx-axis but independent y axis. The y-axis of self will have\nticks on left and the returned axes will have ticks on the\nright", "id": "f17238:c1:m171"} {"signature": "def twiny(self):", "body": "ax2 = self.figure.add_axes(self.get_position(True), sharey=self,frameon=False)ax2.xaxis.tick_top()ax2.xaxis.set_label_position('')self.xaxis.tick_bottom()return ax2", "docstring": "call signature::\n\n ax = twiny()\n\ncreate a twin of Axes for generating a plot with a shared\ny-axis but independent x axis. The x-axis of self will have\nticks on bottom and the returned axes will have ticks on the\ntop", "id": "f17238:c1:m172"} {"signature": "def get_shared_x_axes(self):", "body": "return self._shared_x_axes", "docstring": "Return a copy of the shared axes Grouper object for x axes", "id": "f17238:c1:m173"} {"signature": "def get_shared_y_axes(self):", "body": "return self._shared_y_axes", "docstring": "Return a copy of the shared axes Grouper object for y axes", "id": "f17238:c1:m174"} {"signature": "def hist(self, x, bins=, range=None, normed=False, cumulative=False,bottom=None, histtype='', align='',orientation='', rwidth=None, log=False, **kwargs):", "body": "if not self._hold: self.cla()if kwargs.get('') is not None:raise DeprecationWarning('''')try:x = np.transpose(np.array(x))if len(x.shape)==:x.shape = (,x.shape[])elif len(x.shape)== and x.shape[]]:warnings.warn('''')except ValueError:if iterable(x[]) and not is_string_like(x[]):tx = []for i in range(len(x)):tx.append( np.array(x[i]) )x = txelse:raise ValueError('')binsgiven = (cbook.iterable(bins) or range != None)if np.__version__ < \"\": hist_kwargs = dict(range=range,normed=bool(normed), new=True)else: hist_kwargs = dict(range=range,normed=bool(normed))n = []for i in range(len(x)):m, bins = np.histogram(x[i], bins, **hist_kwargs)n.append(m)if cumulative:slc = slice(None)if cbook.is_numlike(cumulative) and cumulative < :slc = slice(None,None,-)if normed:n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]else:n = [m[slc].cumsum()[slc] for m in n]patches = []if histtype.startswith(''):totwidth = np.diff(bins)stacked = Falseif rwidth is not None: dr = min(, max(, rwidth))elif len(n)>: dr = else: dr = if histtype=='':width = dr*totwidth/len(n)dw = widthif len(n)>:boffset = -*dr*totwidth*(-/len(n))else:boffset = elif histtype=='':width = dr*totwidthboffset, dw = , stacked = Trueelse:raise ValueError('' % histtype)if align == '' or align == '':boffset += *totwidthelif align == '':boffset += totwidthelif align != '' and align != '':raise ValueError('' % align)if orientation == '':for m in n:color = self._get_lines._get_next_cycle_color()patch = self.barh(bins[:-]+boffset, m, height=width,left=bottom, align='', log=log,color=color)patches.append(patch)if stacked:if bottom is None: bottom = bottom += mboffset += dwelif orientation == '':for m in n:color = self._get_lines._get_next_cycle_color()patch = self.bar(bins[:-]+boffset, m, width=width,bottom=bottom, align='', log=log,color=color)patches.append(patch)if stacked:if bottom is None: bottom = bottom += mboffset += dwelse:raise ValueError('' % orientation)elif histtype.startswith(''):x = np.zeros( *len(bins), np.float )y = np.zeros( *len(bins), np.float )x[::], x[::] = bins, binsif align == '' or align == '':x -= *(bins[]-bins[])elif align == '':x += *(bins[]-bins[])elif align != '' and align != '':raise ValueError('' % align)if log:y[],y[-] = , if orientation == '':self.set_xscale('')elif orientation == '':self.set_yscale('')fill = Falseif histtype == '':fill = Trueelif histtype != '':raise ValueError('' % histtype)for m in n:y[:-:], y[::] = m, mif orientation == '':x,y = y,xelif orientation != '':raise ValueError('' % orientation)color = self._get_lines._get_next_cycle_color()if fill:patches.append( self.fill(x, y,closed=False, facecolor=color) )else:patches.append( self.fill(x, y,closed=False, edgecolor=color, fill=False) )if orientation == '':xmin, xmax = , self.dataLim.intervalx[]for m in n:xmin = np.amin(m[m!=]) xmin = max(xmin*, )self.dataLim.intervalx = (xmin, xmax)elif orientation == '':ymin, ymax = , self.dataLim.intervaly[]for m in n:ymin = np.amin(m[m!=]) ymin = max(ymin*, )self.dataLim.intervaly = (ymin, ymax)self.autoscale_view()else:raise ValueError('' % histtype)label = kwargs.pop('', '')for patch in patches:for p in patch:p.update(kwargs)p.set_label(label)label = ''if binsgiven:self.set_autoscale_on(False)if orientation == '':self.autoscale_view(scalex=False, scaley=True)XL = self.xaxis.get_major_locator().view_limits(bins[], bins[-])self.set_xbound(XL)else:self.autoscale_view(scalex=True, scaley=False)YL = self.yaxis.get_major_locator().view_limits(bins[], bins[-])self.set_ybound(YL)if len(n)==:return n[], bins, cbook.silent_list('', patches[])else:return n, bins, cbook.silent_list('', patches)", "docstring": "call signature::\n\n hist(x, bins=10, range=None, normed=False, cumulative=False,\n bottom=None, histtype='bar', align='mid',\n orientation='vertical', rwidth=None, log=False, **kwargs)\n\nCompute and draw the histogram of *x*. The return value is a\ntuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,\n[*patches0*, *patches1*,...]) if the input contains multiple\ndata.\n\nKeyword arguments:\n\n *bins*:\n Either an integer number of bins or a sequence giving the\n bins. *x* are the data to be binned. *x* can be an array,\n a 2D array with multiple data in its columns, or a list of\n arrays with data of different length. Note, if *bins*\n is an integer input argument=numbins, *bins* + 1 bin edges\n will be returned, compatible with the semantics of\n :func:`numpy.histogram` with the *new* = True argument.\n Unequally spaced bins are supported if *bins* is a sequence.\n\n *range*:\n The lower and upper range of the bins. Lower and upper outliers\n are ignored. If not provided, *range* is (x.min(), x.max()).\n Range has no effect if *bins* is a sequence.\n\n If *bins* is a sequence or *range* is specified, autoscaling is\n set off (*autoscale_on* is set to *False*) and the xaxis limits\n are set to encompass the full specified bin range.\n\n *normed*:\n If *True*, the first element of the return tuple will\n be the counts normalized to form a probability density, i.e.,\n ``n/(len(x)*dbin)``. In a probability density, the integral of\n the histogram should be 1; you can verify that with a\n trapezoidal integration of the probability density function::\n\n pdf, bins, patches = ax.hist(...)\n print np.sum(pdf * np.diff(bins))\n\n *cumulative*:\n If *True*, then a histogram is computed where each bin\n gives the counts in that bin plus all bins for smaller values.\n The last bin gives the total number of datapoints. If *normed*\n is also *True* then the histogram is normalized such that the\n last bin equals 1. If *cumulative* evaluates to less than 0\n (e.g. -1), the direction of accumulation is reversed. In this\n case, if *normed* is also *True*, then the histogram is normalized\n such that the first bin equals 1.\n\n *histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]\n The type of histogram to draw.\n\n - 'bar' is a traditional bar-type histogram. If multiple data\n are given the bars are aranged side by side.\n\n - 'barstacked' is a bar-type histogram where multiple\n data are stacked on top of each other.\n\n - 'step' generates a lineplot that is by default\n unfilled.\n\n - 'stepfilled' generates a lineplot that is by default\n filled.\n\n *align*: ['left' | 'mid' | 'right' ]\n Controls how the histogram is plotted.\n\n - 'left': bars are centered on the left bin edges.\n\n - 'mid': bars are centered between the bin edges.\n\n - 'right': bars are centered on the right bin edges.\n\n *orientation*: [ 'horizontal' | 'vertical' ]\n If 'horizontal', :func:`~matplotlib.pyplot.barh` will be\n used for bar-type histograms and the *bottom* kwarg will be\n the left edges.\n\n *rwidth*:\n The relative width of the bars as a fraction of the bin\n width. If *None*, automatically compute the width. Ignored\n if *histtype* = 'step' or 'stepfilled'.\n\n *log*:\n If *True*, the histogram axis will be set to a log scale.\n If *log* is *True* and *x* is a 1D array, empty bins will\n be filtered out and only the non-empty (*n*, *bins*,\n *patches*) will be returned.\n\nkwargs are used to update the properties of the hist\n:class:`~matplotlib.patches.Rectangle` instances:\n\n%(Rectangle)s\n\nYou can use labels for your histogram, and only the first\n:class:`~matplotlib.patches.Rectangle` gets the label (the\nothers get the magic string '_nolegend_'. This will make the\nhistograms work in the intuitive way for bar charts::\n\n ax.hist(10+2*np.random.randn(1000), label='men')\n ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)\n ax.legend()\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/histogram_demo.py", "id": "f17238:c1:m175"} {"signature": "def psd(self, x, NFFT=, Fs=, Fc=, detrend=mlab.detrend_none,window=mlab.window_hanning, noverlap=, pad_to=None,sides='', scale_by_freq=None, **kwargs):", "body": "if not self._hold: self.cla()pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,sides, scale_by_freq)pxx.shape = len(freqs),freqs += Fcif scale_by_freq in (None, True):psd_units = ''else:psd_units = ''self.plot(freqs, *np.log10(pxx), **kwargs)self.set_xlabel('')self.set_ylabel('' % psd_units)self.grid(True)vmin, vmax = self.viewLim.intervalyintv = vmax-vminlogi = int(np.log10(intv))if logi==: logi=step = *logiticks = np.arange(math.floor(vmin), math.ceil(vmax)+, step)self.set_yticks(ticks)return pxx, freqs", "docstring": "call signature::\n\n psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, pad_to=None,\n sides='default', scale_by_freq=None, **kwargs)\n\nThe power spectral density by Welch's average periodogram\nmethod. The vector *x* is divided into *NFFT* length\nsegments. Each segment is detrended by function *detrend* and\nwindowed by function *window*. *noverlap* gives the length of\nthe overlap between segments. The :math:`|\\mathrm{fft}(i)|^2`\nof each segment :math:`i` are averaged to compute *Pxx*, with a\nscaling to correct for power loss due to windowing. *Fs* is the\nsampling frequency.\n\n%(PSD)s\n\n *Fc*: integer\n The center frequency of *x* (defaults to 0), which offsets\n the x extents of the plot to reflect the frequency range used\n when a signal is acquired and then filtered and downsampled to\n baseband.\n\nReturns the tuple (*Pxx*, *freqs*).\n\nFor plotting, the power is plotted as\n:math:`10\\log_{10}(P_{xx})` for decibels, though *Pxx* itself\nis returned.\n\nReferences:\n Bendat & Piersol -- Random Data: Analysis and Measurement\n Procedures, John Wiley & Sons (1986)\n\nkwargs control the :class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/psd_demo.py", "id": "f17238:c1:m176"} {"signature": "def csd(self, x, y, NFFT=, Fs=, Fc=, detrend=mlab.detrend_none,window=mlab.window_hanning, noverlap=, pad_to=None,sides='', scale_by_freq=None, **kwargs):", "body": "if not self._hold: self.cla()pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,pad_to, sides, scale_by_freq)pxy.shape = len(freqs),freqs += Fcself.plot(freqs, *np.log10(np.absolute(pxy)), **kwargs)self.set_xlabel('')self.set_ylabel('')self.grid(True)vmin, vmax = self.viewLim.intervalyintv = vmax-vminstep = *int(np.log10(intv))ticks = np.arange(math.floor(vmin), math.ceil(vmax)+, step)self.set_yticks(ticks)return pxy, freqs", "docstring": "call signature::\n\n csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=0, pad_to=None,\n sides='default', scale_by_freq=None, **kwargs)\n\nThe cross spectral density :math:`P_{xy}` by Welch's average\nperiodogram method. The vectors *x* and *y* are divided into\n*NFFT* length segments. Each segment is detrended by function\n*detrend* and windowed by function *window*. The product of\nthe direct FFTs of *x* and *y* are averaged over each segment\nto compute :math:`P_{xy}`, with a scaling to correct for power\nloss due to windowing.\n\nReturns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum\n(complex valued), and :math:`10\\log_{10}|P_{xy}|` is\nplotted.\n\n%(PSD)s\n\n *Fc*: integer\n The center frequency of *x* (defaults to 0), which offsets\n the x extents of the plot to reflect the frequency range used\n when a signal is acquired and then filtered and downsampled to\n baseband.\n\nReferences:\n Bendat & Piersol -- Random Data: Analysis and Measurement\n Procedures, John Wiley & Sons (1986)\n\nkwargs control the Line2D properties:\n\n%(Line2D)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/csd_demo.py\n\n.. seealso:\n :meth:`psd`\n For a description of the optional parameters.", "id": "f17238:c1:m177"} {"signature": "def cohere(self, x, y, NFFT=, Fs=, Fc=, detrend=mlab.detrend_none,window=mlab.window_hanning, noverlap=, pad_to=None,sides='', scale_by_freq=None, **kwargs):", "body": "if not self._hold: self.cla()cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,scale_by_freq)freqs += Fcself.plot(freqs, cxy, **kwargs)self.set_xlabel('')self.set_ylabel('')self.grid(True)return cxy, freqs", "docstring": "call signature::\n\n cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,\n window = mlab.window_hanning, noverlap=0, pad_to=None,\n sides='default', scale_by_freq=None, **kwargs)\n\ncohere the coherence between *x* and *y*. Coherence is the normalized\ncross spectral density:\n\n.. math::\n\n C_{xy} = \\\\frac{|P_{xy}|^2}{P_{xx}P_{yy}}\n\n%(PSD)s\n\n *Fc*: integer\n The center frequency of *x* (defaults to 0), which offsets\n the x extents of the plot to reflect the frequency range used\n when a signal is acquired and then filtered and downsampled to\n baseband.\n\nThe return value is a tuple (*Cxy*, *f*), where *f* are the\nfrequencies of the coherence vector.\n\nkwargs are applied to the lines.\n\nReferences:\n\n * Bendat & Piersol -- Random Data: Analysis and Measurement\n Procedures, John Wiley & Sons (1986)\n\nkwargs control the :class:`~matplotlib.lines.Line2D`\nproperties of the coherence plot:\n\n%(Line2D)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/cohere_demo.py", "id": "f17238:c1:m178"} {"signature": "def specgram(self, x, NFFT=, Fs=, Fc=, detrend=mlab.detrend_none,window=mlab.window_hanning, noverlap=,cmap=None, xextent=None, pad_to=None, sides='',scale_by_freq=None):", "body": "if not self._hold: self.cla()Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,window, noverlap, pad_to, sides, scale_by_freq)Z = * np.log10(Pxx)Z = np.flipud(Z)if xextent is None: xextent = , np.amax(bins)xmin, xmax = xextentfreqs += Fcextent = xmin, xmax, freqs[], freqs[-]im = self.imshow(Z, cmap, extent=extent)self.axis('')return Pxx, freqs, bins, im", "docstring": "call signature::\n\n specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,\n window=mlab.window_hanning, noverlap=128,\n cmap=None, xextent=None, pad_to=None, sides='default',\n scale_by_freq=None)\n\nCompute a spectrogram of data in *x*. Data are split into\n*NFFT* length segments and the PSD of each section is\ncomputed. The windowing function *window* is applied to each\nsegment, and the amount of overlap of each segment is\nspecified with *noverlap*.\n\n%(PSD)s\n\n *Fc*: integer\n The center frequency of *x* (defaults to 0), which offsets\n the y extents of the plot to reflect the frequency range used\n when a signal is acquired and then filtered and downsampled to\n baseband.\n\n *cmap*:\n A :class:`matplotlib.cm.Colormap` instance; if *None* use\n default determined by rc\n\n *xextent*:\n The image extent along the x-axis. xextent = (xmin,xmax)\n The default is (0,max(bins)), where bins is the return\n value from :func:`mlab.specgram`\n\nReturn value is (*Pxx*, *freqs*, *bins*, *im*):\n\n - *bins* are the time points the spectrogram is calculated over\n - *freqs* is an array of frequencies\n - *Pxx* is a len(times) x len(freqs) array of power\n - *im* is a :class:`matplotlib.image.AxesImage` instance\n\nNote: If *x* is real (i.e. non-complex), only the positive\nspectrum is shown. If *x* is complex, both positive and\nnegative parts of the spectrum are shown. This can be\noverridden using the *sides* keyword argument.\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/specgram_demo.py", "id": "f17238:c1:m179"} {"signature": "def spy(self, Z, precision=, marker=None, markersize=None,aspect='', **kwargs):", "body": "if precision is None:precision = warnings.DeprecationWarning(\"\")if marker is None and markersize is None and hasattr(Z, ''):marker = ''if marker is None and markersize is None:Z = np.asarray(Z)mask = np.absolute(Z)>precisionif '' not in kwargs:kwargs[''] = mcolors.ListedColormap(['', ''],name='')nr, nc = Z.shapeextent = [-, nc-, nr-, -]ret = self.imshow(mask, interpolation='', aspect=aspect,extent=extent, origin='', **kwargs)else:if hasattr(Z, ''):c = Z.tocoo()if precision == '':y = c.rowx = c.colelse:nonzero = np.absolute(c.data) > precisiony = c.row[nonzero]x = c.col[nonzero]else:Z = np.asarray(Z)nonzero = np.absolute(Z)>precisiony, x = np.nonzero(nonzero)if marker is None: marker = ''if markersize is None: markersize = marks = mlines.Line2D(x, y, linestyle='',marker=marker, markersize=markersize, **kwargs)self.add_line(marks)nr, nc = Z.shapeself.set_xlim(xmin=-, xmax=nc-)self.set_ylim(ymin=nr-, ymax=-)self.set_aspect(aspect)ret = marksself.title.set_y()self.xaxis.tick_top()self.xaxis.set_ticks_position('')self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=,steps=[, , , ],integer=True))self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=,steps=[, , , ],integer=True))return ret", "docstring": "call signature::\n\n spy(Z, precision=0, marker=None, markersize=None,\n aspect='equal', **kwargs)\n\n``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.\n\nIf *precision* is 0, any non-zero value will be plotted;\nelse, values of :math:`|Z| > precision` will be plotted.\n\nFor :class:`scipy.sparse.spmatrix` instances, there is a\nspecial case: if *precision* is 'present', any value present in\nthe array will be plotted, even if it is identically zero.\n\nThe array will be plotted as it would be printed, with\nthe first index (row) increasing down and the second\nindex (column) increasing to the right.\n\nBy default aspect is 'equal', so that each array element\noccupies a square space; set the aspect kwarg to 'auto'\nto allow the plot to fill the plot box, or to any scalar\nnumber to specify the aspect ratio of an array element\ndirectly.\n\nTwo plotting styles are available: image or marker. Both\nare available for full arrays, but only the marker style\nworks for :class:`scipy.sparse.spmatrix` instances.\n\nIf *marker* and *markersize* are *None*, an image will be\nreturned and any remaining kwargs are passed to\n:func:`~matplotlib.pyplot.imshow`; else, a\n:class:`~matplotlib.lines.Line2D` object will be returned with\nthe value of marker determining the marker type, and any\nremaining kwargs passed to the\n:meth:`~matplotlib.axes.Axes.plot` method.\n\nIf *marker* and *markersize* are *None*, useful kwargs include:\n\n* *cmap*\n* *alpha*\n\n.. seealso::\n :func:`~matplotlib.pyplot.imshow`\n\nFor controlling colors, e.g. cyan background and red marks,\nuse::\n\n cmap = mcolors.ListedColormap(['c','r'])\n\nIf *marker* or *markersize* is not *None*, useful kwargs include:\n\n* *marker*\n* *markersize*\n* *color*\n\nUseful values for *marker* include:\n\n* 's' square (default)\n* 'o' circle\n* '.' point\n* ',' pixel\n\n.. seealso::\n :func:`~matplotlib.pyplot.plot`", "id": "f17238:c1:m180"} {"signature": "def matshow(self, Z, **kwargs):", "body": "Z = np.asarray(Z)nr, nc = Z.shapeextent = [-, nc-, nr-, -]kw = {'': extent,'': '','': '','': ''} kw.update(kwargs)im = self.imshow(Z, **kw)self.title.set_y()self.xaxis.tick_top()self.xaxis.set_ticks_position('')self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=,steps=[, , , ],integer=True))self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=,steps=[, , , ],integer=True))return im", "docstring": "Plot a matrix or array as an image.\n\nThe matrix will be shown the way it would be printed,\nwith the first row at the top. Row and column numbering\nis zero-based.\n\nArgument:\n *Z* anything that can be interpreted as a 2-D array\n\nkwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.\n:meth:`matshow` sets defaults for *extent*, *origin*,\n*interpolation*, and *aspect*; use care in overriding the\n*extent* and *origin* kwargs, because they interact. (Also,\nif you want to change them, you probably should be using\nimshow directly in your own version of matshow.)\n\nReturns: an :class:`matplotlib.image.AxesImage` instance.", "id": "f17238:c1:m181"} {"signature": "def __init__(self, fig, *args, **kwargs):", "body": "self.figure = figif len(args)==:s = str(args[])if len(s) != :raise ValueError('')rows, cols, num = list(map(int, s))elif len(args)==:rows, cols, num = argselse:raise ValueError( '')total = rows*colsnum -= if num >= total:raise ValueError( '')self._rows = rowsself._cols = colsself._num = numself.update_params()self._axes_class.__init__(self, fig, self.figbox, **kwargs)", "docstring": "*fig* is a :class:`matplotlib.figure.Figure` instance.\n\n*args* is the tuple (*numRows*, *numCols*, *plotNum*), where\nthe array of subplots in the figure has dimensions *numRows*,\n*numCols*, and where *plotNum* is the number of the subplot\nbeing created. *plotNum* starts at 1 in the upper left\ncorner and increases to the right.\n\nIf *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the\ndecimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.", "id": "f17238:c2:m0"} {"signature": "def get_geometry(self):", "body": "return self._rows, self._cols, self._num+", "docstring": "get the subplot geometry, eg 2,2,3", "id": "f17238:c2:m1"} {"signature": "def change_geometry(self, numrows, numcols, num):", "body": "self._rows = numrowsself._cols = numcolsself._num = num-self.update_params()self.set_position(self.figbox)", "docstring": "change subplot geometry, eg. from 1,1,1 to 2,2,3", "id": "f17238:c2:m2"} {"signature": "def update_params(self):", "body": "rows = self._rowscols = self._colsnum = self._numpars = self.figure.subplotparsleft = pars.leftright = pars.rightbottom = pars.bottomtop = pars.topwspace = pars.wspacehspace = pars.hspacetotWidth = right-lefttotHeight = top-bottomfigH = totHeight/(rows + hspace*(rows-))sepH = hspace*figHfigW = totWidth/(cols + wspace*(cols-))sepW = wspace*figWrowNum, colNum = divmod(num, cols)figBottom = top - (rowNum+)*figH - rowNum*sepHfigLeft = left + colNum*(figW + sepW)self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,figW, figH)self.rowNum = rowNumself.colNum = colNumself.numRows = rowsself.numCols = colsif :print('', rows, cols, num)print('', left, bottom, right, top)print('', self.figBottom)print('', self.figLeft)print('', self.figW)print('', self.figH)print('', self.rowNum)print('', self.colNum)print('', self.numRows)print('', self.numCols)", "docstring": "update the subplot position from fig.subplotpars", "id": "f17238:c2:m3"} {"signature": "def label_outer(self):", "body": "lastrow = self.is_last_row()firstcol = self.is_first_col()for label in self.get_xticklabels():label.set_visible(lastrow)for label in self.get_yticklabels():label.set_visible(firstcol)", "docstring": "set the visible property on ticklabels so xticklabels are\nvisible only if the subplot is in the last row and yticklabels\nare visible only if the subplot is in the first column", "id": "f17238:c2:m8"} {"signature": "def decade_down(x, base=):", "body": "lx = math.floor(math.log(x)/math.log(base))return base**lx", "docstring": "floor x to the nearest lower decade", "id": "f17239:m2"} {"signature": "def decade_up(x, base=):", "body": "lx = math.ceil(math.log(x)/math.log(base))return base**lx", "docstring": "ceil x to the nearest higher decade", "id": "f17239:m3"} {"signature": "def __call__(self, x, pos=None):", "body": "raise NotImplementedError('')", "docstring": "Return the format for tick val x at position pos; pos=None indicated unspecified", "id": "f17239:c1:m0"} {"signature": "def format_data_short(self,value):", "body": "return self.format_data(value)", "docstring": "return a short string version", "id": "f17239:c1:m2"} {"signature": "def fix_minus(self, s):", "body": "return s", "docstring": "some classes may want to replace a hyphen for minus with the\nproper unicode symbol as described `here\n`_.\nThe default is to do nothing\n\nNote, if you use this method, eg in :meth`format_data` or\ncall, you probably don't want to use it for\n:meth:`format_data_short` since the toolbar uses this for\ninterative coord reporting and I doubt we can expect GUIs\nacross platforms will handle the unicode correctly. So for\nnow the classes that override :meth:`fix_minus` should have an\nexplicit :meth:`format_data_short` method", "id": "f17239:c1:m5"} {"signature": "def __call__(self, x, pos=None):", "body": "return ''", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c2:m0"} {"signature": "def __init__(self, seq):", "body": "self.seq = seqself.offset_string = ''", "docstring": "seq is a sequence of strings. For positions `i=len(self.seq): return ''else: return self.seq[pos]", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c3:m1"} {"signature": "def __call__(self, x, pos=None):", "body": "return self.func(x, pos)", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c4:m1"} {"signature": "def __call__(self, x, pos=None):", "body": "return self.fmt % x", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c5:m1"} {"signature": "def __call__(self, x, pos=None):", "body": "xmin, xmax = self.axis.get_view_interval()d = abs(xmax - xmin)return self.pprint_val(x,d)", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c6:m0"} {"signature": "def fix_minus(self, s):", "body": "if rcParams[''] or not rcParams['']: return selse: return s.replace('', '')", "docstring": "use a unicode minus rather than hyphen", "id": "f17239:c7:m1"} {"signature": "def __call__(self, x, pos=None):", "body": "if len(self.locs)==:return ''else:s = self.pprint_val(x)return self.fix_minus(s)", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c7:m2"} {"signature": "def set_scientific(self, b):", "body": "self._scientific = bool(b)", "docstring": "True or False to turn scientific notation on or off\n see also :meth:`set_powerlimits`", "id": "f17239:c7:m3"} {"signature": "def set_powerlimits(self, lims):", "body": "assert len(lims) == , \"\"self._powerlimits = lims", "docstring": "Sets size thresholds for scientific notation.\n\ne.g. ``xaxis.set_powerlimits((-3, 4))`` sets the pre-2007 default in\nwhich scientific notation is used for numbers less than\n1e-3 or greater than 1e4.\nSee also :meth:`set_scientific`.", "id": "f17239:c7:m4"} {"signature": "def format_data_short(self,value):", "body": "return ''%value", "docstring": "return a short formatted string representation of a number", "id": "f17239:c7:m5"} {"signature": "def format_data(self,value):", "body": "s = self._formatSciNotation(''% value)return self.fix_minus(s)", "docstring": "return a formatted string representation of a number", "id": "f17239:c7:m6"} {"signature": "def get_offset(self):", "body": "if len(self.locs)==: return ''s = ''if self.orderOfMagnitude or self.offset:offsetStr = ''sciNotStr = ''if self.offset:offsetStr = self.format_data(self.offset)if self.offset > : offsetStr = '' + offsetStrif self.orderOfMagnitude:if self._usetex or self._useMathText:sciNotStr = self.format_data(**self.orderOfMagnitude)else:sciNotStr = ''% self.orderOfMagnitudeif self._useMathText:if sciNotStr != '':sciNotStr = r'' % sciNotStrs = ''.join(('',sciNotStr,r'',offsetStr,''))elif self._usetex:if sciNotStr != '':sciNotStr = r'' % sciNotStrs = ''.join(('',sciNotStr,offsetStr,''))else:s = ''.join((sciNotStr,offsetStr))return self.fix_minus(s)", "docstring": "Return scientific notation, plus offset", "id": "f17239:c7:m7"} {"signature": "def set_locs(self, locs):", "body": "self.locs = locsif len(self.locs) > :vmin, vmax = self.axis.get_view_interval()d = abs(vmax-vmin)if self._useOffset: self._set_offset(d)self._set_orderOfMagnitude(d)self._set_format()", "docstring": "set the locations of the ticks", "id": "f17239:c7:m8"} {"signature": "def __init__(self, base=, labelOnlyBase = True):", "body": "self._base = base+self.labelOnlyBase=labelOnlyBaseself.decadeOnly = True", "docstring": "*base* is used to locate the decade tick,\nwhich will be the only one to be labeled if *labelOnlyBase*\nis ``False``", "id": "f17239:c8:m0"} {"signature": "def base(self,base):", "body": "self._base=base", "docstring": "change the *base* for labeling - warning: should always match the base used for :class:`LogLocator`", "id": "f17239:c8:m1"} {"signature": "def label_minor(self,labelOnlyBase):", "body": "self.labelOnlyBase=labelOnlyBase", "docstring": "switch on/off minor ticks labeling", "id": "f17239:c8:m2"} {"signature": "def __call__(self, x, pos=None):", "body": "vmin, vmax = self.axis.get_view_interval()d = abs(vmax - vmin)b=self._baseif x == :return ''sign = np.sign(x)fx = math.log(abs(x))/math.log(b)isDecade = self.is_decade(fx)if not isDecade and self.labelOnlyBase: s = ''elif x>: s= ''%xelif x<: s = ''%xelse : s = self.pprint_val(x,d)if sign == -:s = '' % sreturn self.fix_minus(s)", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c8:m3"} {"signature": "def format_data_short(self,value):", "body": "return ''%value", "docstring": "return a short formatted string representation of a number", "id": "f17239:c8:m5"} {"signature": "def __call__(self, x, pos=None):", "body": "vmin, vmax = self.axis.get_view_interval()vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = )d = abs(vmax-vmin)b=self._baseif x == :return ''sign = np.sign(x)fx = math.log(abs(x))/math.log(b)isDecade = self.is_decade(fx)if not isDecade and self.labelOnlyBase: s = ''elif fx>: s= ''%fxelif fx<: s = ''%fxelse : s = self.pprint_val(fx,d)if sign == -:s = '' % sreturn self.fix_minus(s)", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c9:m0"} {"signature": "def __call__(self, x, pos=None):", "body": "b = self._baseif x == :return ''sign = np.sign(x)fx = math.log(abs(x))/math.log(b)isDecade = self.is_decade(fx)usetex = rcParams['']if sign == -:sign_string = ''else:sign_string = ''if not isDecade and self.labelOnlyBase: s = ''elif not isDecade:if usetex:s = r''% (sign_string, b, fx)else:s = ''% (sign_string, b, fx)else:if usetex:s = r''% (sign_string, b, self.nearest_long(fx))else:s = r''% (sign_string, b, self.nearest_long(fx))return s", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c10:m0"} {"signature": "def __call__(self):", "body": "raise NotImplementedError('')", "docstring": "Return the locations of the ticks", "id": "f17239:c11:m0"} {"signature": "def view_limits(self, vmin, vmax):", "body": "return mtransforms.nonsingular(vmin, vmax)", "docstring": "select a scale for the range from vmin to vmax\n\nNormally This will be overridden.", "id": "f17239:c11:m1"} {"signature": "def autoscale(self):", "body": "return self.view_limits(*self.axis.get_view_interval())", "docstring": "autoscale the view limits", "id": "f17239:c11:m2"} {"signature": "def pan(self, numsteps):", "body": "ticks = self()numticks = len(ticks)vmin, vmax = self.axis.get_view_interval()vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = )if numticks>:step = numsteps*abs(ticks[]-ticks[])else:d = abs(vmax-vmin)step = numsteps*d/vmin += stepvmax += stepself.axis.set_view_interval(vmin, vmax, ignore=True)", "docstring": "Pan numticks (can be positive or negative)", "id": "f17239:c11:m3"} {"signature": "def zoom(self, direction):", "body": "vmin, vmax = self.axis.get_view_interval()vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = )interval = abs(vmax-vmin)step = *interval*directionself.axis.set_view_interval(vmin + step, vmax - step, ignore=True)", "docstring": "Zoom in/out on axis; if direction is >0 zoom in, else zoom out", "id": "f17239:c11:m4"} {"signature": "def refresh(self):", "body": "pass", "docstring": "refresh internal information based on current lim", "id": "f17239:c11:m5"} {"signature": "def __init__(self, base, offset):", "body": "self._base = baseself.offset = offset", "docstring": "place ticks on the i-th data points where (i-offset)%base==0", "id": "f17239:c12:m0"} {"signature": "def __call__(self):", "body": "dmin, dmax = self.axis.get_data_interval()return np.arange(dmin + self.offset, dmax+, self._base)", "docstring": "Return the locations of the ticks", "id": "f17239:c12:m1"} {"signature": "def __call__(self):", "body": "if self.nbins is None:return self.locsstep = max(int( + len(self.locs) / float(self.nbins)), )return self.locs[::step]", "docstring": "Return the locations of the ticks", "id": "f17239:c13:m1"} {"signature": "def __call__(self):", "body": "return []", "docstring": "Return the locations of the ticks", "id": "f17239:c14:m0"} {"signature": "def __init__(self, numticks = None, presets=None):", "body": "self.numticks = numticksif presets is None:self.presets = {}else:self.presets = presets", "docstring": "Use presets to set locs based on lom. A dict mapping vmin, vmax->locs", "id": "f17239:c15:m0"} {"signature": "def __call__(self):", "body": "vmin, vmax = self.axis.get_view_interval()vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = )if vmaxvmin, vmax = vmax, vminif (vmin, vmax) in self.presets:return self.presets[(vmin, vmax)]if self.numticks is None:self._set_numticks()if self.numticks==: return []ticklocs = np.linspace(vmin, vmax, self.numticks)return ticklocs", "docstring": "Return the locations of the ticks", "id": "f17239:c15:m1"} {"signature": "def view_limits(self, vmin, vmax):", "body": "if vmaxvmin, vmax = vmax, vminif vmin==vmax:vmin-=vmax+=exponent, remainder = divmod(math.log10(vmax - vmin), )if remainder < :exponent -= scale = **(-exponent)vmin = math.floor(scale*vmin)/scalevmax = math.ceil(scale*vmax)/scalereturn mtransforms.nonsingular(vmin, vmax)", "docstring": "Try to choose the view limits intelligently", "id": "f17239:c15:m3"} {"signature": "def lt(self, x):", "body": "d,m = divmod(x, self._base)if closeto(m,) and not closeto(m/self._base,):return (d-)*self._basereturn d*self._base", "docstring": "return the largest multiple of base < x", "id": "f17239:c16:m1"} {"signature": "def le(self, x):", "body": "d,m = divmod(x, self._base)if closeto(m/self._base,): return (d+)*self._basereturn d*self._base", "docstring": "return the largest multiple of base <= x", "id": "f17239:c16:m2"} {"signature": "def gt(self, x):", "body": "d,m = divmod(x, self._base)if closeto(m/self._base,):return (d+)*self._basereturn (d+)*self._base", "docstring": "return the smallest multiple of base > x", "id": "f17239:c16:m3"} {"signature": "def ge(self, x):", "body": "d,m = divmod(x, self._base)if closeto(m,) and not closeto(m/self._base,):return d*self._basereturn (d+)*self._base", "docstring": "return the smallest multiple of base >= x", "id": "f17239:c16:m4"} {"signature": "def __call__(self):", "body": "vmin, vmax = self.axis.get_view_interval()if vmaxvmin, vmax = vmax, vminvmin = self._base.ge(vmin)base = self._base.get_base()n = (vmax - vmin + *base)//baselocs = vmin + np.arange(n+) * basereturn locs", "docstring": "Return the locations of the ticks", "id": "f17239:c17:m1"} {"signature": "def view_limits(self, dmin, dmax):", "body": "vmin = self._base.le(dmin)vmax = self._base.ge(dmax)if vmin==vmax:vmin -=vmax +=return mtransforms.nonsingular(vmin, vmax)", "docstring": "Set the view limits to the nearest multiples of base that\ncontain the data", "id": "f17239:c17:m2"} {"signature": "def __init__(self, base=, subs=[]):", "body": "self.base(base)self.subs(subs)self.numticks = ", "docstring": "place ticks on the location= base**i*subs[j]", "id": "f17239:c19:m0"} {"signature": "def base(self,base):", "body": "self._base=base+", "docstring": "set the base of the log scaling (major tick every base**i, i interger)", "id": "f17239:c19:m1"} {"signature": "def subs(self,subs):", "body": "if subs is None:self._subs = None else:self._subs = np.asarray(subs)+", "docstring": "set the minor ticks the log scaling every base**i*subs[j]", "id": "f17239:c19:m2"} {"signature": "def __call__(self):", "body": "b=self._basevmin, vmax = self.axis.get_view_interval()if vmin <= :vmin = self.axis.get_minpos()if vmin <= :raise ValueError(\"\")vmin = math.log(vmin)/math.log(b)vmax = math.log(vmax)/math.log(b)if vmaxvmin, vmax = vmax, vminnumdec = math.floor(vmax)-math.ceil(vmin)if self._subs is None: if numdec>: subs = np.array([])elif numdec>: subs = np.arange(, b, )else: subs = np.arange(, b)else:subs = self._subsstride = while numdec/stride+ > self.numticks:stride += decades = np.arange(math.floor(vmin),math.ceil(vmax)+stride, stride)if len(subs) > or (len(subs == ) and subs[] != ):ticklocs = []for decadeStart in b**decades:ticklocs.extend( subs*decadeStart )else:ticklocs = b**decadesreturn np.array(ticklocs)", "docstring": "Return the locations of the ticks", "id": "f17239:c19:m4"} {"signature": "def view_limits(self, vmin, vmax):", "body": "if vmaxvmin, vmax = vmax, vminminpos = self.axis.get_minpos()if minpos<=:raise ValueError(\"\")if vmin <= minpos:vmin = minposif not is_decade(vmin,self._base): vmin = decade_down(vmin,self._base)if not is_decade(vmax,self._base): vmax = decade_up(vmax,self._base)if vmin==vmax:vmin = decade_down(vmin,self._base)vmax = decade_up(vmax,self._base)result = mtransforms.nonsingular(vmin, vmax)return result", "docstring": "Try to choose the view limits intelligently", "id": "f17239:c19:m5"} {"signature": "def __init__(self, transform, subs=[]):", "body": "self._transform = transformself._subs = subsself.numticks = ", "docstring": "place ticks on the location= base**i*subs[j]", "id": "f17239:c20:m0"} {"signature": "def __call__(self):", "body": "b = self._transform.basevmin, vmax = self.axis.get_view_interval()vmin, vmax = self._transform.transform((vmin, vmax))if vmaxvmin, vmax = vmax, vminnumdec = math.floor(vmax)-math.ceil(vmin)if self._subs is None:if numdec>: subs = np.array([])elif numdec>: subs = np.arange(, b, )else: subs = np.arange(, b)else:subs = np.asarray(self._subs)stride = while numdec/stride+ > self.numticks:stride += decades = np.arange(math.floor(vmin), math.ceil(vmax)+stride, stride)if len(subs) > or subs[] != :ticklocs = []for decade in decades:ticklocs.extend(subs * (np.sign(decade) * b ** np.abs(decade)))else:ticklocs = np.sign(decades) * b ** np.abs(decades)return np.array(ticklocs)", "docstring": "Return the locations of the ticks", "id": "f17239:c20:m2"} {"signature": "def view_limits(self, vmin, vmax):", "body": "b = self._transform.baseif vmaxvmin, vmax = vmax, vminif not is_decade(abs(vmin), b):if vmin < :vmin = -decade_up(-vmin, b)else:vmin = decade_down(vmin, b)if not is_decade(abs(vmax), b):if vmax < :vmax = -decade_down(-vmax, b)else:vmax = decade_up(vmax, b)if vmin == vmax:if vmin < :vmin = -decade_up(-vmin, b)vmax = -decade_down(-vmax, b)else:vmin = decade_down(vmin, b)vmax = decade_up(vmax, b)result = mtransforms.nonsingular(vmin, vmax)return result", "docstring": "Try to choose the view limits intelligently", "id": "f17239:c20:m3"} {"signature": "def __call__(self):", "body": "self.refresh()return self._locator()", "docstring": "Return the locations of the ticks", "id": "f17239:c22:m1"} {"signature": "def refresh(self):", "body": "vmin, vmax = self.axis.get_view_interval()vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = )d = abs(vmax-vmin)self._locator = self.get_locator(d)", "docstring": "refresh internal information based on current lim", "id": "f17239:c22:m2"} {"signature": "def view_limits(self, vmin, vmax):", "body": "d = abs(vmax-vmin)self._locator = self.get_locator(d)return self._locator.view_limits(vmin, vmax)", "docstring": "Try to choose the view limits intelligently", "id": "f17239:c22:m3"} {"signature": "def get_locator(self, d):", "body": "d = abs(d)if d<=:locator = MultipleLocator()else:try: ld = math.log10(d)except OverflowError:raise RuntimeError('')fld = math.floor(ld)base = **fldif d >= *base : ticksize = baseelif d >= *base : ticksize = base/else : ticksize = base/locator = MultipleLocator(ticksize)return locator", "docstring": "pick the best locator based on a distance", "id": "f17239:c22:m4"} {"signature": "def get_intersection(cx1, cy1, cos_t1, sin_t1,cx2, cy2, cos_t2, sin_t2):", "body": "line1_rhs = sin_t1 * cx1 - cos_t1 * cy1line2_rhs = sin_t2 * cx2 - cos_t2 * cy2a, b = sin_t1, -cos_t1c, d = sin_t2, -cos_t2ad_bc = a*d-b*cif ad_bc == :raise ValueError(\"\")a_, b_ = d, -bc_, d_ = -c, aa_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]x = a_* line1_rhs + b_ * line2_rhsy = c_* line1_rhs + d_ * line2_rhsreturn x, y", "docstring": "return a intersecting point between a line through (cx1, cy1)\n and having angle t1 and a line through (cx2, cy2) and angle t2.", "id": "f17240:m0"} {"signature": "def get_normal_points(cx, cy, cos_t, sin_t, length):", "body": "if length == :return cx, cy, cx, cycos_t1, sin_t1 = sin_t, -cos_tcos_t2, sin_t2 = -sin_t, cos_tx1, y1 = length*cos_t1 + cx, length*sin_t1 + cyx2, y2 = length*cos_t2 + cx, length*sin_t2 + cyreturn x1, y1, x2, y2", "docstring": "For a line passing through (*cx*, *cy*) and having a angle *t*,\nreturn locations of the two points located along its perpendicular line at the distance of *length*.", "id": "f17240:m1"} {"signature": "def split_de_casteljau(beta, t):", "body": "beta = np.asarray(beta)beta_list = [beta]while True:beta = _de_casteljau1(beta, t)beta_list.append(beta)if len(beta) == :breakleft_beta = [beta[] for beta in beta_list]right_beta = [beta[-] for beta in reversed(beta_list)]return left_beta, right_beta", "docstring": "split a bezier segment defined by its controlpoints *beta*\n into two separate segment divided at *t* and return their control points.", "id": "f17240:m3"} {"signature": "def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath, t0=, t1=, tolerence=):", "body": "start = bezier_point_at_t(t0)end = bezier_point_at_t(t1)start_inside = inside_closedpath(start)end_inside = inside_closedpath(end)if not xor(start_inside, end_inside):raise ValueError(\"\")while :if (start[]-end[])** + (start[]-end[])** < tolerence**:return t0, t1middle_t = *(t0+t1)middle = bezier_point_at_t(middle_t)middle_inside = inside_closedpath(middle)if xor(start_inside, middle_inside):t1 = middle_tend = middleend_inside = middle_insideelse:t0 = middle_tstart = middlestart_inside = middle_inside", "docstring": "Find a parameter t0 and t1 of the given bezier path which\n bounds the intersecting points with a provided closed\n path(*inside_closedpath*). Search starts from *t0* and *t1* and it\n uses a simple bisecting algorithm therefore one of the end point\n must be inside the path while the orther doesn't. The search stop\n when |t0-t1| gets smaller than the given tolerence. \n value for\n\n - bezier_point_at_t : a function which returns x, y coordinates at *t*\n\n - inside_closedpath : return True if the point is insed the path", "id": "f17240:m4"} {"signature": "def split_bezier_intersecting_with_closedpath(bezier,inside_closedpath, tolerence=):", "body": "bz = BezierSegment(bezier)bezier_point_at_t = bz.point_at_tt0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,inside_closedpath, tolerence=tolerence)_left, _right = split_de_casteljau(bezier, (t0+t1)/)return _left, _right", "docstring": "bezier : control points of the bezier segment\ninside_closedpath : a function which returns true if the point is inside the path", "id": "f17240:m5"} {"signature": "def find_r_to_boundary_of_closedpath(inside_closedpath, xy,cos_t, sin_t, rmin=, rmax=, tolerence=):", "body": "cx, cy = xydef _f(r):return cos_t*r + cx, sin_t*r + cyfind_bezier_t_intersecting_with_closedpath(_f, inside_closedpath, t0=rmin, t1=rmax, tolerence=tolerence)", "docstring": "Find a radius r (centered at *xy*) between *rmin* and *rmax* at\nwhich it intersect with the path.\n\ninside_closedpath : function\ncx, cy : center\ncos_t, sin_t : cosine and sine for the angle\nrmin, rmax :", "id": "f17240:m6"} {"signature": "def split_path_inout(path, inside, tolerence=, reorder_inout=False):", "body": "path_iter = path.iter_segments()ctl_points, command = path_iter.next()begin_inside = inside(ctl_points[-:]) bezier_path = Nonectl_points_old = ctl_pointsconcat = np.concatenateiold=i = for ctl_points, command in path_iter:iold=ii += len(ctl_points)/if inside(ctl_points[-:]) != begin_inside:bezier_path = concat([ctl_points_old[-:], ctl_points])breakctl_points_old = ctl_pointsif bezier_path is None:raise ValueError(\"\")bp = zip(bezier_path[::], bezier_path[::])left, right = split_bezier_intersecting_with_closedpath(bp,inside,tolerence)if len(left) == :codes_left = [Path.LINETO]codes_right = [Path.MOVETO, Path.LINETO]elif len(left) == :codes_left = [Path.CURVE3, Path.CURVE3]codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]elif len(left) == :codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]else:raise ValueError()verts_left = left[:]verts_right = right[:]if path.codes is None:path_in = Path(concat([path.vertices[:i], verts_left]))path_out = Path(concat([verts_right, path.vertices[i:]]))else:path_in = Path(concat([path.vertices[:iold], verts_left]),concat([path.codes[:iold], codes_left]))path_out = Path(concat([verts_right, path.vertices[i:]]),concat([codes_right, path.codes[i:]]))if reorder_inout and begin_inside == False:path_in, path_out = path_out, path_inreturn path_in, path_out", "docstring": "divide a path into two segment at the point where inside(x, y)\n becomes False.", "id": "f17240:m7"} {"signature": "def get_parallels(bezier2, width):", "body": "c1x, c1y = bezier2[]cmx, cmy = bezier2[]c2x, c2y = bezier2[]cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)c1x_left, c1y_left, c1x_right, c1y_right =get_normal_points(c1x, c1y, cos_t1, sin_t1, width)c2x_left, c2y_left, c2x_right, c2y_right =get_normal_points(c2x, c2y, cos_t2, sin_t2, width)cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,c2x_left, c2y_left, cos_t2, sin_t2)cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,c2x_right, c2y_right, cos_t2, sin_t2)path_left = [(c1x_left, c1y_left), (cmx_left, cmy_left), (c2x_left, c2y_left)]path_right = [(c1x_right, c1y_right), (cmx_right, cmy_right), (c2x_right, c2y_right)]return path_left, path_right", "docstring": "Given the quadraitc bezier control points *bezier2*, returns\ncontrol points of quadrativ bezier lines roughly parralel to given\none separated by *width*.", "id": "f17240:m10"} {"signature": "def make_wedged_bezier2(bezier2, length, shrink_factor=):", "body": "xx1, yy1 = bezier2[]xx2, yy2 = bezier2[]xx3, yy3 = bezier2[]cx, cy = xx3, yy3x0, y0 = xx2, yy2dist = sqrt((x0-cx)** + (y0-cy)**)cos_t, sin_t = (x0-cx)/dist, (y0-cy)/dist,x1, y1, x2, y2 = get_normal_points(cx, cy, cos_t, sin_t, length)xx12, yy12 = (xx1+xx2)/, (yy1+yy2)/, xx23, yy23 = (xx2+xx3)/, (yy2+yy3)/, dist = sqrt((xx12-xx23)** + (yy12-yy23)**)cos_t, sin_t = (xx12-xx23)/dist, (yy12-yy23)/dist,xm1, ym1, xm2, ym2 = get_normal_points(xx2, yy2, cos_t, sin_t, length*shrink_factor)l_plus = [(x1, y1), (xm1, ym1), (xx1, yy1)]l_minus = [(x2, y2), (xm2, ym2), (xx1, yy1)]return l_plus, l_minus", "docstring": "Being similar to get_parallels, returns\ncontrol points of two quadrativ bezier lines having a width roughly parralel to given\none separated by *width*.", "id": "f17240:m11"} {"signature": "def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):", "body": "cmx = * (*mmx - (c1x + c2x))cmy = * (*mmy - (c1y + c2y))return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]", "docstring": "Find control points of the bezier line throught c1, mm, c2. We\n simply assume that c1, mm, c2 which have parameteric value 0, 0.5, and 1.", "id": "f17240:m12"} {"signature": "def make_wedged_bezier2(bezier2, width, w1=, wm=, w2=):", "body": "c1x, c1y = bezier2[]cmx, cmy = bezier2[]c3x, c3y = bezier2[]cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)c1x_left, c1y_left, c1x_right, c1y_right =get_normal_points(c1x, c1y, cos_t1, sin_t1, width*w1)c3x_left, c3y_left, c3x_right, c3y_right =get_normal_points(c3x, c3y, cos_t2, sin_t2, width*w2)c12x, c12y = (c1x+cmx)*, (c1y+cmy)*c23x, c23y = (cmx+c3x)*, (cmy+c3y)* c123x, c123y = (c12x+c23x)*, (c12y+c23y)*cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)c123x_left, c123y_left, c123x_right, c123y_right =get_normal_points(c123x, c123y, cos_t123, sin_t123, width*wm)path_left = find_control_points(c1x_left, c1y_left,c123x_left, c123y_left,c3x_left, c3y_left)path_right = find_control_points(c1x_right, c1y_right,c123x_right, c123y_right,c3x_right, c3y_right)return path_left, path_right", "docstring": "Being similar to get_parallels, returns\ncontrol points of two quadrativ bezier lines having a width roughly parralel to given\none separated by *width*.", "id": "f17240:m13"} {"signature": "def __init__(self, control_points):", "body": "_o = len(control_points)self._orders = np.arange(_o)_coeff = BezierSegment._binom_coeff[_o - ]_control_points = np.asarray(control_points)xx = _control_points[:,]yy = _control_points[:,]self._px = xx * _coeffself._py = yy * _coeff", "docstring": "*control_points* : location of contol points. It needs have a\n shpae of n * 2, where n is the order of the bezier line. 1<=\n n <= 3 is supported.", "id": "f17240:c0:m0"} {"signature": "def point_at_t(self, t):", "body": "one_minus_t_powers = np.power(-t, self._orders)[::-]t_powers = np.power(t, self._orders)tt = one_minus_t_powers * t_powers_x = sum(tt * self._px)_y = sum(tt * self._py)return _x, _y", "docstring": "evaluate a point at t", "id": "f17240:c0:m1"} {"signature": "def is_color_like(c):", "body": "try:colorConverter.to_rgb(c)return Trueexcept ValueError:return False", "docstring": "Return *True* if *c* can be converted to *RGB*", "id": "f17241:m0"} {"signature": "def rgb2hex(rgb):", "body": "return '' % tuple([round(val*) for val in rgb])", "docstring": "Given a len 3 rgb tuple of 0-1 floats, return the hex string", "id": "f17241:m1"} {"signature": "def hex2color(s):", "body": "if not isinstance(s, str):raise TypeError('')if hexColorPattern.match(s) is None:raise ValueError('' % s)return tuple([int(n, )/ for n in (s[:], s[:], s[:])])", "docstring": "Take a hex string *s* and return the corresponding rgb 3-tuple\nExample: #efefef -> (0.93725, 0.93725, 0.93725)", "id": "f17241:m2"} {"signature": "def makeMappingArray(N, data):", "body": "try:adata = np.array(data)except:raise TypeError(\"\")shape = adata.shapeif len(shape) != and shape[] != :raise ValueError(\"\")x = adata[:,]y0 = adata[:,]y1 = adata[:,]if x[] != or x[-] != :raise ValueError(\"\")if np.sometrue(np.sort(x)-x):raise ValueError(\"\")x = x * (N-)lut = np.zeros((N,), np.float)xind = np.arange(float(N))ind = np.searchsorted(x, xind)[:-]lut[:-] = ( ((xind[:-] - x[ind-]) / (x[ind] - x[ind-]))* (y0[ind] - y1[ind-]) + y1[ind-])lut[] = y1[]lut[-] = y0[-]np.clip(lut, , )return lut", "docstring": "Create an *N* -element 1-d lookup table\n\n *data* represented by a list of x,y0,y1 mapping correspondences.\n Each element in this list represents how a value between 0 and 1\n (inclusive) represented by x is mapped to a corresponding value\n between 0 and 1 (inclusive). The two values of y are to allow\n for discontinuous mapping functions (say as might be found in a\n sawtooth) where y0 represents the value of y for values of x\n <= to that given, and y1 is the value to be used for x > than\n that given). The list must start with x=0, end with x=1, and\n all values of x must be in increasing order. Values between\n the given mapping points are determined by simple linear interpolation.\n\n The function returns an array \"result\" where ``result[x*(N-1)]``\n gives the closest value for values of x between 0 and 1.", "id": "f17241:m3"} {"signature": "def to_rgb(self, arg):", "body": "try: return self.cache[arg]except KeyError: passexcept TypeError: arg = tuple(arg)try: return self.cache[arg]except KeyError: passexcept TypeError:raise ValueError(''% (str(arg),))try:if cbook.is_string_like(arg):color = self.colors.get(arg, None)if color is None:str1 = cnames.get(arg, arg)if str1.startswith(''):color = hex2color(str1)else:fl = float(arg)if fl < or fl > :raise ValueError('')color = tuple([fl]*)elif cbook.iterable(arg):if len(arg) > or len(arg) < :raise ValueError(''%len(arg))color = tuple(arg[:])if [x for x in color if (float(x) < ) or (x > )]:raise ValueError('')else:raise ValueError('')self.cache[arg] = colorexcept (KeyError, ValueError, TypeError) as exc:raise ValueError('' % (str(arg), exc))return color", "docstring": "Returns an *RGB* tuple of three floats from 0-1.\n\n*arg* can be an *RGB* or *RGBA* sequence or a string in any of\nseveral forms:\n\n 1) a letter from the set 'rgbcmykw'\n 2) a hex color string, like '#00FFFF'\n 3) a standard name, like 'aqua'\n 4) a float, like '0.4', indicating gray on a 0-1 scale\n\nif *arg* is *RGBA*, the *A* will simply be discarded.", "id": "f17241:c0:m0"} {"signature": "def to_rgba(self, arg, alpha=None):", "body": "try:if not cbook.is_string_like(arg) and cbook.iterable(arg):if len(arg) == :if [x for x in arg if (float(x) < ) or (x > )]:raise ValueError('')if alpha is None:return tuple(arg)if alpha < or alpha > :raise ValueError(\"\")return arg[], arg[], arg[], arg[] * alphar,g,b = arg[:]if [x for x in (r,g,b) if (float(x) < ) or (x > )]:raise ValueError('')else:r,g,b = self.to_rgb(arg)if alpha is None:alpha = return r,g,b,alphaexcept (TypeError, ValueError) as exc:raise ValueError('' % (str(arg), exc))", "docstring": "Returns an *RGBA* tuple of four floats from 0-1.\n\nFor acceptable values of *arg*, see :meth:`to_rgb`.\nIf *arg* is an *RGBA* sequence and *alpha* is not *None*,\n*alpha* will replace the original *A*.", "id": "f17241:c0:m1"} {"signature": "def to_rgba_array(self, c, alpha=None):", "body": "try:if c.lower() == '':return np.zeros((,), dtype=np.float_)except AttributeError:passif len(c) == :return np.zeros((,), dtype=np.float_)try:result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)except ValueError:if isinstance(c, np.ndarray):if c.ndim != and c.dtype.kind not in '':raise ValueError(\"\")result = np.zeros((len(c), ))for i, cc in enumerate(c):result[i] = self.to_rgba(cc, alpha) return np.asarray(result, np.float_)", "docstring": "Returns a numpy array of *RGBA* tuples.\n\nAccepts a single mpl color spec or a sequence of specs.\n\nSpecial case to handle \"no color\": if *c* is \"none\" (case-insensitive),\nthen an empty array will be returned. Same for an empty list.", "id": "f17241:c0:m2"} {"signature": "def __init__(self, name, N=):", "body": "self.name = nameself.N = Nself._rgba_bad = (, , , ) self._rgba_under = Noneself._rgba_over = Noneself._i_under = Nself._i_over = N+self._i_bad = N+self._isinit = False", "docstring": "Public class attributes:\n :attr:`N` : number of rgb quantization levels\n :attr:`name` : name of colormap", "id": "f17241:c1:m0"} {"signature": "def __call__(self, X, alpha=, bytes=False):", "body": "if not self._isinit: self._init()alpha = min(alpha, ) alpha = max(alpha, )self._lut[:-, -] = alphamask_bad = Noneif not cbook.iterable(X):vtype = ''xa = np.array([X])else:vtype = ''xma = ma.asarray(X)xa = xma.filled()mask_bad = ma.getmask(xma)if xa.dtype.char in np.typecodes['']:np.putmask(xa, xa==, ) if NP_CLIP_OUT:np.clip(xa * self.N, -, self.N, out=xa)else:xa = np.clip(xa * self.N, -, self.N)xa = xa.astype(int)np.putmask(xa, xa>self.N-, self._i_over)np.putmask(xa, xa<, self._i_under)if mask_bad is not None and mask_bad.shape == xa.shape:np.putmask(xa, mask_bad, self._i_bad)if bytes:lut = (self._lut * ).astype(np.uint8)else:lut = self._lutrgba = np.empty(shape=xa.shape+(,), dtype=lut.dtype)lut.take(xa, axis=, mode='', out=rgba)if vtype == '':rgba = tuple(rgba[,:])return rgba", "docstring": "*X* is either a scalar or an array (of any dimension).\nIf scalar, a tuple of rgba values is returned, otherwise\nan array with the new shape = oldshape+(4,). If the X-values\nare integers, then they are used as indices into the array.\nIf they are floating point, then they must be in the\ninterval (0.0, 1.0).\nAlpha must be a scalar.\nIf bytes is False, the rgba values will be floats on a\n0-1 scale; if True, they will be uint8, 0-255.", "id": "f17241:c1:m1"} {"signature": "def set_bad(self, color = '', alpha = ):", "body": "self._rgba_bad = colorConverter.to_rgba(color, alpha)if self._isinit: self._set_extremes()", "docstring": "Set color to be used for masked values.", "id": "f17241:c1:m2"} {"signature": "def set_under(self, color = '', alpha = ):", "body": "self._rgba_under = colorConverter.to_rgba(color, alpha)if self._isinit: self._set_extremes()", "docstring": "Set color to be used for low out-of-range values.\n Requires norm.clip = False", "id": "f17241:c1:m3"} {"signature": "def set_over(self, color = '', alpha = ):", "body": "self._rgba_over = colorConverter.to_rgba(color, alpha)if self._isinit: self._set_extremes()", "docstring": "Set color to be used for high out-of-range values.\n Requires norm.clip = False", "id": "f17241:c1:m4"} {"signature": "def _init():", "body": "raise NotImplementedError(\"\")", "docstring": "Generate the lookup table, self._lut", "id": "f17241:c1:m6"} {"signature": "def __init__(self, name, segmentdata, N=):", "body": "self.monochrome = False Colormap.__init__(self, name, N)self._segmentdata = segmentdata", "docstring": "Create color map from linear mapping segments\n\n segmentdata argument is a dictionary with a red, green and blue\n entries. Each entry should be a list of *x*, *y0*, *y1* tuples,\n forming rows in a table.\n\n Example: suppose you want red to increase from 0 to 1 over\n the bottom half, green to do the same over the middle half,\n and blue over the top half. Then you would use::\n\n cdict = {'red': [(0.0, 0.0, 0.0),\n (0.5, 1.0, 1.0),\n (1.0, 1.0, 1.0)],\n\n 'green': [(0.0, 0.0, 0.0),\n (0.25, 0.0, 0.0),\n (0.75, 1.0, 1.0),\n (1.0, 1.0, 1.0)],\n\n 'blue': [(0.0, 0.0, 0.0),\n (0.5, 0.0, 0.0),\n (1.0, 1.0, 1.0)]}\n\n Each row in the table for a given color is a sequence of\n *x*, *y0*, *y1* tuples. In each sequence, *x* must increase\n monotonically from 0 to 1. For any input value *z* falling\n between *x[i]* and *x[i+1]*, the output value of a given color\n will be linearly interpolated between *y1[i]* and *y0[i+1]*::\n\n row i: x y0 y1\n /\n /\n row i+1: x y0 y1\n\n Hence y0 in the first row and y1 in the last row are never used.\n\n\n .. seealso::\n :func:`makeMappingArray`", "id": "f17241:c2:m0"} {"signature": "def __init__(self, colors, name = '', N = None):", "body": "self.colors = colorsself.monochrome = False if N is None:N = len(self.colors)else:if cbook.is_string_like(self.colors):self.colors = [self.colors] * Nself.monochrome = Trueelif cbook.iterable(self.colors):self.colors = list(self.colors) if len(self.colors) == :self.monochrome = Trueif len(self.colors) < N:self.colors = list(self.colors) * Ndel(self.colors[N:])else:try: gray = float(self.colors)except TypeError: passelse: self.colors = [gray] * Nself.monochrome = TrueColormap.__init__(self, name, N)", "docstring": "Make a colormap from a list of colors.\n\n*colors*\n a list of matplotlib color specifications,\n or an equivalent Nx3 floating point array (*N* rgb values)\n*name*\n a string to identify the colormap\n*N*\n the number of entries in the map. The default is *None*,\n in which case there is one colormap entry for each\n element in the list of colors. If::\n\n N < len(colors)\n\n the list will be truncated at *N*. If::\n\n N > len(colors)\n\n the list will be extended by repetition.", "id": "f17241:c3:m0"} {"signature": "def __init__(self, vmin=None, vmax=None, clip=False):", "body": "self.vmin = vminself.vmax = vmaxself.clip = clip", "docstring": "If *vmin* or *vmax* is not given, they are taken from the input's\nminimum and maximum value respectively. If *clip* is *True* and\nthe given value falls outside the range, the returned value\nwill be 0 or 1, whichever is closer. Returns 0 if::\n\n vmin==vmax\n\nWorks with scalars or arrays, including masked arrays. If\n*clip* is *True*, masked values are set to 1; otherwise they\nremain masked. Clipping silently defeats the purpose of setting\nthe over, under, and masked colors in the colormap, so it is\nlikely to lead to surprises; therefore the default is\n*clip* = *False*.", "id": "f17241:c4:m0"} {"signature": "def autoscale(self, A):", "body": "self.vmin = ma.minimum(A)self.vmax = ma.maximum(A)", "docstring": "Set *vmin*, *vmax* to min, max of *A*.", "id": "f17241:c4:m3"} {"signature": "def autoscale_None(self, A):", "body": "if self.vmin is None: self.vmin = ma.minimum(A)if self.vmax is None: self.vmax = ma.maximum(A)", "docstring": "autoscale only None-valued vmin or vmax", "id": "f17241:c4:m4"} {"signature": "def scaled(self):", "body": "return (self.vmin is not None and self.vmax is not None)", "docstring": "return true if vmin and vmax set", "id": "f17241:c4:m5"} {"signature": "def __init__(self, boundaries, ncolors, clip=False):", "body": "self.clip = clipself.vmin = boundaries[]self.vmax = boundaries[-]self.boundaries = np.asarray(boundaries)self.N = len(self.boundaries)self.Ncmap = ncolorsif self.N- == self.Ncmap:self._interp = Falseelse:self._interp = True", "docstring": "*boundaries*\n a monotonically increasing sequence\n*ncolors*\n number of colors in the colormap to be used\n\nIf::\n\n b[i] <= v < b[i+1]\n\nthen v is mapped to color j;\nas i varies from 0 to len(boundaries)-2,\nj goes from 0 to ncolors-1.\n\nOut-of-range values are mapped to -1 if low and ncolors\nif high; these are converted to valid indices by\n:meth:`Colormap.__call__` .", "id": "f17241:c6:m0"} {"signature": "def _get_packed_offsets(wd_list, total, sep, mode=\"\"):", "body": "w_list, d_list = zip(*wd_list)if mode == \"\":offsets_ = np.add.accumulate([]+[w + sep for w in w_list])offsets = offsets_[:-]if total is None:total = offsets_[-] - sepreturn total, offsetselif mode == \"\":sep = (total - sum(w_list))/(len(w_list)-)offsets_ = np.add.accumulate([]+[w + sep for w in w_list])offsets = offsets_[:-]return total, offsetselif mode == \"\":maxh = max(w_list)if total is None:total = (maxh+sep)*len(w_list)else:sep = float(total)/(len(w_list)) - maxhoffsets = np.array([(maxh+sep)*i for i in range(len(w_list))])return total, offsetselse:raise ValueError(\"\" % (mode,))", "docstring": "Geiven a list of (width, xdescent) of each boxes, calculate the\ntotal width and the x-offset positions of each items according to\n*mode*. xdescent is analagous to the usual descent, but along the\nx-direction. xdescent values are currently ignored.\n\n*wd_list* : list of (width, xdescent) of boxes to be packed.\n*sep* : spacing between boxes\n*total* : Intended total length. None if not used.\n*mode* : packing mode. 'fixed', 'expand', or 'equal'.", "id": "f17242:m1"} {"signature": "def _get_aligned_offsets(hd_list, height, align=\"\"):", "body": "if height is None:height = max([h for h, d in hd_list])if align == \"\":height_descent = max([h-d for h, d in hd_list])descent = max([d for h, d in hd_list])height = height_descent + descentoffsets = [ for h, d in hd_list]elif align in [\"\",\"\"]:descent=offsets = [d for h, d in hd_list]elif align in [\"\",\"\"]:descent=offsets = [height-h+d for h, d in hd_list]elif align == \"\":descent=offsets = [(height-h)*+d for h, d in hd_list]else:raise ValueError(\"\" % (align,))return height, descent, offsets", "docstring": "Geiven a list of (height, descent) of each boxes, align the boxes\nwith *align* and calculate the y-offsets of each boxes.\ntotal width and the offset positions of each items according to\n*mode*. xdescent is analagous to the usual descent, but along the\nx-direction. xdescent values are currently ignored.\n\n*hd_list* : list of (width, xdescent) of boxes to be aligned.\n*sep* : spacing between boxes\n*height* : Intended total length. None if not used.\n*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.", "id": "f17242:m2"} {"signature": "def set_figure(self, fig):", "body": "martist.Artist.set_figure(self, fig)for c in self.get_children():c.set_figure(fig)", "docstring": "Set the figure\n\naccepts a class:`~matplotlib.figure.Figure` instance", "id": "f17242:c0:m1"} {"signature": "def set_offset(self, xy):", "body": "self._offset = xy", "docstring": "Set the offset\n\naccepts x, y, tuple, or a callable object.", "id": "f17242:c0:m2"} {"signature": "def get_offset(self, width, height, xdescent, ydescent):", "body": "if callable(self._offset):return self._offset(width, height, xdescent, ydescent)else:return self._offset", "docstring": "Get the offset\n\naccepts extent of the box", "id": "f17242:c0:m3"} {"signature": "def set_width(self, width):", "body": "self.width = width", "docstring": "Set the width\n\naccepts float", "id": "f17242:c0:m4"} {"signature": "def set_height(self, height):", "body": "self.height = height", "docstring": "Set the height\n\naccepts float", "id": "f17242:c0:m5"} {"signature": "def get_children(self):", "body": "return self._children", "docstring": "Return a list of artists it contains.", "id": "f17242:c0:m6"} {"signature": "def get_extent(self, renderer):", "body": "w, h, xd, yd, offsets = self.get_extent_offsets(renderer)return w, h, xd, yd", "docstring": "Return with, height, xdescent, ydescent of box", "id": "f17242:c0:m8"} {"signature": "def get_window_extent(self, renderer):", "body": "w, h, xd, yd, offsets = self.get_extent_offsets(renderer)px, py = self.get_offset(w, h, xd, yd)return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)", "docstring": "get the bounding box in display space.", "id": "f17242:c0:m9"} {"signature": "def draw(self, renderer):", "body": "width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)px, py = self.get_offset(width, height, xdescent, ydescent)for c, (ox, oy) in zip(self.get_children(), offsets):c.set_offset((px+ox, py+oy))c.draw(renderer)bbox_artist(self, renderer, fill=False, props=dict(pad=))", "docstring": "Update the location of children if necessary and draw them\nto the given *renderer*.", "id": "f17242:c0:m10"} {"signature": "def __init__(self, pad=None, sep=None, width=None, height=None,align=None, mode=None,children=None):", "body": "super(PackerBase, self).__init__()self.height = heightself.width = widthself.sep = sepself.pad = padself.mode = modeself.align = alignself._children = children", "docstring": "*pad* : boundary pad\n*sep* : spacing between items\n*width*, *height* : width and height of the container box.\n calculated if None.\n*align* : alignment of boxes\n*mode* : packing mode", "id": "f17242:c1:m0"} {"signature": "def __init__(self, pad=None, sep=None, width=None, height=None,align=\"\", mode=\"\",children=None):", "body": "super(VPacker, self).__init__(pad, sep, width, height,align, mode, children)", "docstring": "*pad* : boundary pad\n*sep* : spacing between items\n*width*, *height* : width and height of the container box.\n calculated if None.\n*align* : alignment of boxes\n*mode* : packing mode", "id": "f17242:c2:m0"} {"signature": "def get_extent_offsets(self, renderer):", "body": "whd_list = [c.get_extent(renderer) for c in self.get_children()]whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]wd_list = [(w, xd) for w, h, xd, yd in whd_list]width, xdescent, xoffsets = _get_aligned_offsets(wd_list,self.width,self.align)pack_list = [(h, yd) for w,h,xd,yd in whd_list]height, yoffsets_ = _get_packed_offsets(pack_list, self.height,self.sep, self.mode)yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]ydescent = height - yoffsets[]yoffsets = height - yoffsetsyoffsets = yoffsets - ydescentreturn width + *self.pad, height + *self.pad,xdescent+self.pad, ydescent+self.pad,zip(xoffsets, yoffsets)", "docstring": "update offset of childrens and return the extents of the box", "id": "f17242:c2:m1"} {"signature": "def __init__(self, pad=None, sep=None, width=None, height=None, align=\"\", mode=\"\",children=None):", "body": "super(HPacker, self).__init__(pad, sep, width, height,align, mode, children)", "docstring": "*pad* : boundary pad\n*sep* : spacing between items\n*width*, *height* : width and height of the container box.\n calculated if None.\n*align* : alignment of boxes\n*mode* : packing mode", "id": "f17242:c3:m0"} {"signature": "def get_extent_offsets(self, renderer):", "body": "whd_list = [c.get_extent(renderer) for c in self.get_children()]if self.height is None:height_descent = max([h-yd for w,h,xd,yd in whd_list]) ydescent = max([yd for w,h,xd,yd in whd_list])height = height_descent + ydescentelse:height = self.height - *self._pad hd_list = [(h, yd) for w, h, xd, yd in whd_list]height, ydescent, yoffsets = _get_aligned_offsets(hd_list,self.height,self.align)pack_list = [(w, xd) for w,h,xd,yd in whd_list]width, xoffsets_ = _get_packed_offsets(pack_list, self.width,self.sep, self.mode)xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]xdescent=whd_list[][]xoffsets = xoffsets - xdescentreturn width + *self.pad, height + *self.pad,xdescent + self.pad, ydescent + self.pad,zip(xoffsets, yoffsets)", "docstring": "update offset of childrens and return the extents of the box", "id": "f17242:c3:m1"} {"signature": "def __init__(self, width, height, xdescent=,ydescent=, clip=True):", "body": "super(DrawingArea, self).__init__()self.width = widthself.height = heightself.xdescent = xdescentself.ydescent = ydescentself.offset_transform = mtransforms.Affine2D()self.offset_transform.clear()self.offset_transform.translate(, )", "docstring": "*width*, *height* : width and height of the container box.\n*xdescent*, *ydescent* : descent of the box in x- and y-direction.", "id": "f17242:c4:m0"} {"signature": "def get_transform(self):", "body": "return self.offset_transform", "docstring": "Return the :class:`~matplotlib.transforms.Transform` applied\nto the children", "id": "f17242:c4:m1"} {"signature": "def set_transform(self, t):", "body": "pass", "docstring": "set_transform is ignored.", "id": "f17242:c4:m2"} {"signature": "def set_offset(self, xy):", "body": "self._offset = xyself.offset_transform.clear()self.offset_transform.translate(xy[], xy[])", "docstring": "set offset of the container.\n\nAccept : tuple of x,y cooridnate in disokay units.", "id": "f17242:c4:m3"} {"signature": "def get_offset(self):", "body": "return self._offset", "docstring": "return offset of the container.", "id": "f17242:c4:m4"} {"signature": "def get_window_extent(self, renderer):", "body": "w, h, xd, yd = self.get_extent(renderer)ox, oy = self.get_offset() return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)", "docstring": "get the bounding box in display space.", "id": "f17242:c4:m5"} {"signature": "def get_extent(self, renderer):", "body": "return self.width, self.height, self.xdescent, self.ydescent", "docstring": "Return with, height, xdescent, ydescent of box", "id": "f17242:c4:m6"} {"signature": "def add_artist(self, a):", "body": "self._children.append(a)a.set_transform(self.get_transform())", "docstring": "Add any :class:`~matplotlib.artist.Artist` to the container box", "id": "f17242:c4:m7"} {"signature": "def draw(self, renderer):", "body": "for c in self._children:c.draw(renderer)bbox_artist(self, renderer, fill=False, props=dict(pad=))", "docstring": "Draw the children", "id": "f17242:c4:m8"} {"signature": "def __init__(self, s,textprops=None,multilinebaseline=None,minimumdescent=True,):", "body": "if textprops is None:textprops = {}if not textprops.has_key(\"\"):textprops[\"\"]=\"\"self._text = mtext.Text(, , s, **textprops)OffsetBox.__init__(self)self._children = [self._text]self.offset_transform = mtransforms.Affine2D()self.offset_transform.clear()self.offset_transform.translate(, )self._baseline_transform = mtransforms.Affine2D()self._text.set_transform(self.offset_transform+self._baseline_transform)self._multilinebaseline = multilinebaselineself._minimumdescent = minimumdescent", "docstring": "*s* : a string to be displayed.\n*textprops* : property dictionary for the text\n*multilinebaseline* : If True, baseline for multiline text is\n adjusted so that it is (approximatedly)\n center-aligned with singleline text.\n*minimumdescent* : If True, the box has a minimum descent of \"p\".", "id": "f17242:c5:m0"} {"signature": "def set_multilinebaseline(self, t):", "body": "self._multilinebaseline = t", "docstring": "Set multilinebaseline .\n\nIf True, baseline for multiline text is\nadjusted so that it is (approximatedly) center-aligned with\nsingleline text.", "id": "f17242:c5:m1"} {"signature": "def get_multilinebaseline(self):", "body": "return self._multilinebaseline", "docstring": "get multilinebaseline .", "id": "f17242:c5:m2"} {"signature": "def set_minimumdescent(self, t):", "body": "self._minimumdescent = t", "docstring": "Set minimumdescent .\n\nIf True, extent of the single line text is adjusted so that \nit has minimum descent of \"p\"", "id": "f17242:c5:m3"} {"signature": "def get_minimumdescent(self):", "body": "return self._minimumdescent", "docstring": "get minimumdescent.", "id": "f17242:c5:m4"} {"signature": "def set_transform(self, t):", "body": "pass", "docstring": "set_transform is ignored.", "id": "f17242:c5:m5"} {"signature": "def set_offset(self, xy):", "body": "self._offset = xyself.offset_transform.clear()self.offset_transform.translate(xy[], xy[])", "docstring": "set offset of the container.\n\nAccept : tuple of x,y cooridnate in disokay units.", "id": "f17242:c5:m6"} {"signature": "def get_offset(self):", "body": "return self._offset", "docstring": "return offset of the container.", "id": "f17242:c5:m7"} {"signature": "def get_window_extent(self, renderer):", "body": "w, h, xd, yd = self.get_extent(renderer)ox, oy = self.get_offset() return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)", "docstring": "get the bounding box in display space.", "id": "f17242:c5:m8"} {"signature": "def draw(self, renderer):", "body": "self._text.draw(renderer)bbox_artist(self, renderer, fill=False, props=dict(pad=))", "docstring": "Draw the children", "id": "f17242:c5:m10"} {"signature": "def get_fontext_synonyms(fontext):", "body": "return {'': ('', ''),'': ('', ''),'': ('',)}[fontext]", "docstring": "Return a list of file extensions extensions that are synonyms for\nthe given file extension *fileext*.", "id": "f17243:m0"} {"signature": "def win32FontDirectory():", "body": "try:import winregexcept ImportError:pass else:try:user = winreg.OpenKey(winreg.HKEY_CURRENT_USER, MSFolders)try:try:return winreg.QueryValueEx(user, '')[]except OSError:pass finally:winreg.CloseKey(user)except OSError:pass return os.path.join(os.environ[''], '')", "docstring": "Return the user-specified font directory for Win32. This is\nlooked up from the registry key::\n\n \\\\HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\\Fonts\n\nIf the key is not found, $WINDIR/Fonts will be returned.", "id": "f17243:m1"} {"signature": "def win32InstalledFonts(directory=None, fontext=''):", "body": "import winregif directory is None:directory = win32FontDirectory()fontext = get_fontext_synonyms(fontext)key, items = None, {}for fontdir in MSFontDirectories:try:local = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, fontdir)except OSError:continueif not local:files = []for ext in fontext:files.extend(glob.glob(os.path.join(directory, ''+ext)))return filestry:for j in range(winreg.QueryInfoKey(local)[]):try:key, direc, any = winreg.EnumValue( local, j)if not os.path.dirname(direc):direc = os.path.join(directory, direc)direc = os.path.abspath(direc).lower()if os.path.splitext(direc)[][:] in fontext:items[direc] = except EnvironmentError:continueexcept WindowsError:continuereturn list(items.keys())finally:winreg.CloseKey(local)return None", "docstring": "Search for fonts in the specified font directory, or use the\nsystem directories if none given. A list of TrueType font\nfilenames are returned by default, or AFM fonts if *fontext* ==\n'afm'.", "id": "f17243:m2"} {"signature": "def OSXFontDirectory():", "body": "fontpaths = []def add(arg,directory,files):fontpaths.append(directory)for fontdir in OSXFontDirectories:try:if os.path.isdir(fontdir):os.path.walk(fontdir, add, None)except (IOError, OSError, TypeError, ValueError):passreturn fontpaths", "docstring": "Return the system font directories for OS X. This is done by\nstarting at the list of hardcoded paths in\n:attr:`OSXFontDirectories` and returning all nested directories\nwithin them.", "id": "f17243:m3"} {"signature": "def OSXInstalledFonts(directory=None, fontext=''):", "body": "if directory is None:directory = OSXFontDirectory()fontext = get_fontext_synonyms(fontext)files = []for path in directory:if fontext is None:files.extend(glob.glob(os.path.join(path,'')))else:for ext in fontext:files.extend(glob.glob(os.path.join(path, ''+ext)))files.extend(glob.glob(os.path.join(path, ''+ext.upper())))return files", "docstring": "Get list of font files on OS X - ignores font suffix by default.", "id": "f17243:m4"} {"signature": "def x11FontDirectory():", "body": "fontpaths = []def add(arg,directory,files):fontpaths.append(directory)for fontdir in X11FontDirectories:try:if os.path.isdir(fontdir):os.path.walk(fontdir, add, None)except (IOError, OSError, TypeError, ValueError):passreturn fontpaths", "docstring": "Return the system font directories for X11. This is done by\nstarting at the list of hardcoded paths in\n:attr:`X11FontDirectories` and returning all nested directories\nwithin them.", "id": "f17243:m5"} {"signature": "def get_fontconfig_fonts(fontext=''):", "body": "try:import subprocessexcept ImportError:return {}fontext = get_fontext_synonyms(fontext)fontfiles = {}status, output = subprocess.getstatusoutput(\"\")if status == :for line in output.split(''):fname = line.split('')[]if (os.path.splitext(fname)[][:] in fontext andos.path.exists(fname)):fontfiles[fname] = return fontfiles", "docstring": "Grab a list of all the fonts that are being tracked by fontconfig\nby making a system call to ``fc-list``. This is an easy way to\ngrab all of the fonts the user wants to be made available to\napplications, without needing knowing where all of them reside.", "id": "f17243:m6"} {"signature": "def findSystemFonts(fontpaths=None, fontext=''):", "body": "fontfiles = {}fontexts = get_fontext_synonyms(fontext)if fontpaths is None:if sys.platform == '':fontdir = win32FontDirectory()fontpaths = [fontdir]for f in win32InstalledFonts(fontdir):base, ext = os.path.splitext(f)if len(ext)> and ext[:].lower() in fontexts:fontfiles[f] = else:fontpaths = x11FontDirectory()if sys.platform == '':for f in OSXInstalledFonts(fontext=fontext):fontfiles[f] = for f in get_fontconfig_fonts(fontext):fontfiles[f] = elif isinstance(fontpaths, str):fontpaths = [fontpaths]for path in fontpaths:files = []for ext in fontexts:files.extend(glob.glob(os.path.join(path, ''+ext)))files.extend(glob.glob(os.path.join(path, ''+ext.upper())))for fname in files:fontfiles[os.path.abspath(fname)] = return [fname for fname in list(fontfiles.keys()) if os.path.exists(fname)]", "docstring": "Search for fonts in the specified font paths. If no paths are\ngiven, will use a standard set of system paths, as well as the\nlist of fonts tracked by fontconfig if fontconfig is installed and\navailable. A list of TrueType fonts are returned by default with\nAFM fonts as an option.", "id": "f17243:m7"} {"signature": "def weight_as_number(weight):", "body": "if isinstance(weight, str):try:weight = weight_dict[weight.lower()]except KeyError:weight = elif weight in range(, , ):passelse:raise ValueError('')return weight", "docstring": "Return the weight property as a numeric value. String values\nare converted to their corresponding numeric value.", "id": "f17243:m8"} {"signature": "def ttfFontProperty(font):", "body": "name = font.family_namesfnt = font.get_sfnt()sfnt2 = sfnt.get((,,,))sfnt4 = sfnt.get((,,,))if sfnt2:sfnt2 = sfnt2.lower()else:sfnt2 = ''if sfnt4:sfnt4 = sfnt4.lower()else:sfnt4 = ''if sfnt4.find('') >= :style = ''elif sfnt4.find('') >= :style = ''elif sfnt2.find('') >= :style = ''elif font.style_flags & ft2font.ITALIC:style = ''else:style = ''if name.lower() in ['', '']:variant = ''else:variant = ''weight = Nonefor w in list(weight_dict.keys()):if sfnt4.find(w) >= :weight = wbreakif not weight:if font.style_flags & ft2font.BOLD:weight = else:weight = weight = weight_as_number(weight)if sfnt4.find('') >= or sfnt4.find('') >= orsfnt4.find('') >= :stretch = ''elif sfnt4.find('') >= :stretch = ''elif sfnt4.find('') >= or sfnt4.find('') >= :stretch = ''else:stretch = ''if font.scalable:size = ''else:size = str(float(font.get_fontsize()))size_adjust = Nonereturn FontEntry(font.fname, name, style, variant, weight, stretch, size)", "docstring": "A function for populating the :class:`FontKey` by extracting\ninformation from the TrueType font file.\n\n*font* is a :class:`FT2Font` instance.", "id": "f17243:m9"} {"signature": "def afmFontProperty(fontpath, font):", "body": "name = font.get_familyname()if font.get_angle() != or name.lower().find('') >= :style = ''elif name.lower().find('') >= :style = ''else:style = ''if name.lower() in ['', '']:variant = ''else:variant = ''weight = weight_as_number(font.get_weight().lower())stretch = ''size = ''size_adjust = Nonereturn FontEntry(fontpath, name, style, variant, weight, stretch, size)", "docstring": "A function for populating a :class:`FontKey` instance by\nextracting information from the AFM font file.\n\n*font* is a class:`AFM` instance.", "id": "f17243:m10"} {"signature": "def createFontList(fontfiles, fontext=''):", "body": "fontlist = []seen = {}for fpath in fontfiles:verbose.report('' % (fpath), '')fname = os.path.split(fpath)[]if fname in seen: continueelse: seen[fname] = if fontext == '':try:fh = open(fpath, '')except:verbose.report(\"\" % fpath)continuetry:try:font = afm.AFM(fh)finally:fh.close()except RuntimeError:verbose.report(\"\"%fpath)continueprop = afmFontProperty(fpath, font)else:try:font = ft2font.FT2Font(str(fpath))except RuntimeError:verbose.report(\"\"%fpath)continueexcept UnicodeError:verbose.report(\"\")continuetry: prop = ttfFontProperty(font)except: continuefontlist.append(prop)return fontlist", "docstring": "A function to create a font lookup list. The default is to create\na list of TrueType fonts. An AFM font list can optionally be\ncreated.", "id": "f17243:m11"} {"signature": "def ttfdict_to_fnames(d):", "body": "fnames = []for named in list(d.values()):for styled in list(named.values()):for variantd in list(styled.values()):for weightd in list(variantd.values()):for stretchd in list(weightd.values()):for fname in list(stretchd.values()):fnames.append(fname)return fnames", "docstring": "flatten a ttfdict to all the filenames it contains", "id": "f17243:m12"} {"signature": "def pickle_dump(data, filename):", "body": "fh = open(filename, '')try:pickle.dump(data, fh)finally:fh.close()", "docstring": "Equivalent to pickle.dump(data, open(filename, 'w'))\nbut closes the file to prevent filehandle leakage.", "id": "f17243:m13"} {"signature": "def pickle_load(filename):", "body": "fh = open(filename, '')try:data = pickle.load(fh)finally:fh.close()return data", "docstring": "Equivalent to pickle.load(open(filename, 'r'))\nbut closes the file to prevent filehandle leakage.", "id": "f17243:m14"} {"signature": "def is_opentype_cff_font(filename):", "body": "if os.path.splitext(filename)[].lower() == '':result = _is_opentype_cff_font_cache.get(filename)if result is None:fd = open(filename, '')tag = fd.read()fd.close()result = (tag == '')_is_opentype_cff_font_cache[filename] = resultreturn resultreturn False", "docstring": "Returns True if the given font is a Postscript Compact Font Format\nFont embedded in an OpenType wrapper. Used by the PostScript and\nPDF backends that can not subset these fonts.", "id": "f17243:m15"} {"signature": "def get_family(self):", "body": "if self._family is None:family = rcParams['']if is_string_like(family):return [family]return familyreturn self._family", "docstring": "Return a list of font names that comprise the font family.", "id": "f17243:c1:m4"} {"signature": "def get_name(self):", "body": "return ft2font.FT2Font(str(findfont(self))).family_name", "docstring": "Return the name of the font that best matches the font\nproperties.", "id": "f17243:c1:m5"} {"signature": "def get_style(self):", "body": "if self._slant is None:return rcParams['']return self._slant", "docstring": "Return the font style. Values are: 'normal', 'italic' or\n'oblique'.", "id": "f17243:c1:m6"} {"signature": "def get_variant(self):", "body": "if self._variant is None:return rcParams['']return self._variant", "docstring": "Return the font variant. Values are: 'normal' or\n'small-caps'.", "id": "f17243:c1:m7"} {"signature": "def get_weight(self):", "body": "if self._weight is None:return rcParams['']return self._weight", "docstring": "Set the font weight. Options are: A numeric value in the\nrange 0-1000 or one of 'light', 'normal', 'regular', 'book',\n'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',\n'heavy', 'extra bold', 'black'", "id": "f17243:c1:m8"} {"signature": "def get_stretch(self):", "body": "if self._stretch is None:return rcParams['']return self._stretch", "docstring": "Return the font stretch or width. Options are: 'ultra-condensed',\n'extra-condensed', 'condensed', 'semi-condensed', 'normal',\n'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.", "id": "f17243:c1:m9"} {"signature": "def get_size(self):", "body": "if self._size is None:return rcParams['']return self._size", "docstring": "Return the font size.", "id": "f17243:c1:m10"} {"signature": "def get_file(self):", "body": "return self._file", "docstring": "Return the filename of the associated font.", "id": "f17243:c1:m12"} {"signature": "def get_fontconfig_pattern(self):", "body": "return generate_fontconfig_pattern(self)", "docstring": "Get a fontconfig pattern suitable for looking up the font as\nspecified with fontconfig's ``fc-match`` utility.\n\nSee the documentation on `fontconfig patterns\n`_.\n\nThis support does not require fontconfig to be installed or\nsupport for it to be enabled. We are merely borrowing its\npattern syntax for use here.", "id": "f17243:c1:m13"} {"signature": "def set_family(self, family):", "body": "if family is None:self._family = Noneelse:if is_string_like(family):family = [family]self._family = family", "docstring": "Change the font family. May be either an alias (generic name\nis CSS parlance), such as: 'serif', 'sans-serif', 'cursive',\n'fantasy', or 'monospace', or a real font name.", "id": "f17243:c1:m14"} {"signature": "def set_style(self, style):", "body": "if style not in ('', '', '', None):raise ValueError(\"\")self._slant = style", "docstring": "Set the font style. Values are: 'normal', 'italic' or\n'oblique'.", "id": "f17243:c1:m15"} {"signature": "def set_variant(self, variant):", "body": "if variant not in ('', '', None):raise ValueError(\"\")self._variant = variant", "docstring": "Set the font variant. Values are: 'normal' or 'small-caps'.", "id": "f17243:c1:m16"} {"signature": "def set_weight(self, weight):", "body": "if weight is not None:try:weight = int(weight)if weight < or weight > :raise ValueError()except ValueError:if weight not in weight_dict:raise ValueError(\"\")self._weight = weight", "docstring": "Set the font weight. May be either a numeric value in the\nrange 0-1000 or one of 'ultralight', 'light', 'normal',\n'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',\n'demi', 'bold', 'heavy', 'extra bold', 'black'", "id": "f17243:c1:m17"} {"signature": "def set_stretch(self, stretch):", "body": "if stretch is not None:try:stretch = int(stretch)if stretch < or stretch > :raise ValueError()except ValueError:if stretch not in stretch_dict:raise ValueError(\"\")self._stretch = stretch", "docstring": "Set the font stretch or width. Options are: 'ultra-condensed',\n'extra-condensed', 'condensed', 'semi-condensed', 'normal',\n'semi-expanded', 'expanded', 'extra-expanded' or\n'ultra-expanded', or a numeric value in the range 0-1000.", "id": "f17243:c1:m18"} {"signature": "def set_size(self, size):", "body": "if size is not None:try:size = float(size)except ValueError:if size is not None and size not in font_scalings:raise ValueError(\"\")self._size = size", "docstring": "Set the font size. Either an relative value of 'xx-small',\n'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'\nor an absolute font size, e.g. 12.", "id": "f17243:c1:m19"} {"signature": "def set_file(self, file):", "body": "self._file = file", "docstring": "Set the filename of the fontfile to use. In this case, all\nother properties will be ignored.", "id": "f17243:c1:m20"} {"signature": "def set_fontconfig_pattern(self, pattern):", "body": "for key, val in list(self._parse_fontconfig_pattern(pattern).items()):if type(val) == list:getattr(self, \"\" + key)(val[])else:getattr(self, \"\" + key)(val)", "docstring": "Set the properties by parsing a fontconfig *pattern*.\n\nSee the documentation on `fontconfig patterns\n`_.\n\nThis support does not require fontconfig to be installed or\nsupport for it to be enabled. We are merely borrowing its\npattern syntax for use here.", "id": "f17243:c1:m21"} {"signature": "def copy(self):", "body": "return FontProperties(_init = self)", "docstring": "Return a deep copy of self", "id": "f17243:c1:m22"} {"signature": "def get_default_weight(self):", "body": "return self.__default_weight", "docstring": "Return the default font weight.", "id": "f17243:c2:m1"} {"signature": "def get_default_size(self):", "body": "if self.default_size is None:return rcParams['']return self.default_size", "docstring": "Return the default font size.", "id": "f17243:c2:m2"} {"signature": "def set_default_weight(self, weight):", "body": "self.__default_weight = weight", "docstring": "Set the default font weight. The initial value is 'normal'.", "id": "f17243:c2:m3"} {"signature": "def set_default_size(self, size):", "body": "self.default_size = size", "docstring": "Set the default font size in points. The initial value is set\nby ``font.size`` in rc.", "id": "f17243:c2:m4"} {"signature": "def update_fonts(self, filenames):", "body": "raise NotImplementedError", "docstring": "Update the font dictionary with new font files.\nCurrently not implemented.", "id": "f17243:c2:m5"} {"signature": "def score_family(self, families, family2):", "body": "for i, family1 in enumerate(families):if family1.lower() in font_family_aliases:if family1 == '':family1 == ''options = rcParams['' + family1]if family2 in options:idx = options.index(family2)return * (float(idx) / len(options))elif family1.lower() == family2.lower():return return ", "docstring": "Returns a match score between the list of font families in\n*families* and the font family name *family2*.\n\nAn exact match anywhere in the list returns 0.0.\n\nA match by generic font name will return 0.1.\n\nNo match will return 1.0.", "id": "f17243:c2:m6"} {"signature": "def score_style(self, style1, style2):", "body": "if style1 == style2:return elif style1 in ('', '') andstyle2 in ('', ''):return return ", "docstring": "Returns a match score between *style1* and *style2*.\n\nAn exact match returns 0.0.\n\nA match between 'italic' and 'oblique' returns 0.1.\n\nNo match returns 1.0.", "id": "f17243:c2:m7"} {"signature": "def score_variant(self, variant1, variant2):", "body": "if variant1 == variant2:return else:return ", "docstring": "Returns a match score between *variant1* and *variant2*.\n\nAn exact match returns 0.0, otherwise 1.0.", "id": "f17243:c2:m8"} {"signature": "def score_stretch(self, stretch1, stretch2):", "body": "try:stretchval1 = int(stretch1)except ValueError:stretchval1 = stretch_dict.get(stretch1, )try:stretchval2 = int(stretch2)except ValueError:stretchval2 = stretch_dict.get(stretch2, )return abs(stretchval1 - stretchval2) / ", "docstring": "Returns a match score between *stretch1* and *stretch2*.\n\nThe result is the absolute value of the difference between the\nCSS numeric values of *stretch1* and *stretch2*, normalized\nbetween 0.0 and 1.0.", "id": "f17243:c2:m9"} {"signature": "def score_weight(self, weight1, weight2):", "body": "try:weightval1 = int(weight1)except ValueError:weightval1 = weight_dict.get(weight1, )try:weightval2 = int(weight2)except ValueError:weightval2 = weight_dict.get(weight2, )return abs(weightval1 - weightval2) / ", "docstring": "Returns a match score between *weight1* and *weight2*.\n\nThe result is the absolute value of the difference between the\nCSS numeric values of *weight1* and *weight2*, normalized\nbetween 0.0 and 1.0.", "id": "f17243:c2:m10"} {"signature": "def score_size(self, size1, size2):", "body": "if size2 == '':return try:sizeval1 = float(size1)except ValueError:sizeval1 = self.default_size * font_scalings(size1)try:sizeval2 = float(size2)except ValueError:return return abs(sizeval1 - sizeval2) / ", "docstring": "Returns a match score between *size1* and *size2*.\n\nIf *size2* (the size specified in the font file) is 'scalable', this\nfunction always returns 0.0, since any font size can be generated.\n\nOtherwise, the result is the absolute distance between *size1* and\n*size2*, normalized so that the usual range of font sizes (6pt -\n72pt) will lie between 0.0 and 1.0.", "id": "f17243:c2:m11"} {"signature": "def findfont(self, prop, fontext=''):", "body": "debug = Falseif prop is None:return self.defaultFontif is_string_like(prop):prop = FontProperties(prop)fname = prop.get_file()if fname is not None:verbose.report(''%fname, '')return fnameif fontext == '':font_cache = self.afm_lookup_cachefontlist = self.afmlistelse:font_cache = self.ttf_lookup_cachefontlist = self.ttflistcached = font_cache.get(hash(prop))if cached:return cachedbest_score = best_font = Nonefor font in fontlist:score =self.score_family(prop.get_family(), font.name) * +self.score_style(prop.get_style(), font.style) +self.score_variant(prop.get_variant(), font.variant) +self.score_weight(prop.get_weight(), font.weight) +self.score_stretch(prop.get_stretch(), font.stretch) +self.score_size(prop.get_size(), font.size)if score < best_score:best_score = scorebest_font = fontif score == :breakif best_font is None or best_score >= :verbose.report('' %(prop, self.defaultFont))result = self.defaultFontelse:verbose.report('' %(prop, best_font.name, best_font.fname, best_score))result = best_font.fnamefont_cache[hash(prop)] = resultreturn result", "docstring": "Search the font list for the font that most closely matches\nthe :class:`FontProperties` *prop*.\n\n:meth:`findfont` performs a nearest neighbor search. Each\nfont is given a similarity score to the target font\nproperties. The first font with the highest score is\nreturned. If no matches below a certain threshold are found,\nthe default font (usually Vera Sans) is returned.\n\nThe result is cached, so subsequent lookups don't have to\nperform the O(n) nearest neighbor search.\n\nSee the `W3C Cascading Style Sheet, Level 1\n`_ documentation\nfor a description of the font finding algorithm.", "id": "f17243:c2:m12"} {"signature": "def isnan(a):", "body": "return reshape(array([_isnan(i) for i in ravel(a)],''), shape(a))", "docstring": "y = isnan(x) returns True where x is Not-A-Number", "id": "f17245:m0"} {"signature": "def all(a, axis=None):", "body": "if axis is None:return alltrue(ravel(a))else:return alltrue(a, axis)", "docstring": "Numpy-compatible version of all()", "id": "f17245:m1"} {"signature": "def _import_fail_message(module, version):", "body": "_dict = { \"\" : which[],\"\" : module,\"\" : version + module}print(\"\"\"\"\"\" % _dict)", "docstring": "Prints a message when the array package specific version of an extension\n fails to import correctly.", "id": "f17251:m0"} {"signature": "def all(a, axis=None):", "body": "if axis is None:return _all(a)return alltrue(a, axis)", "docstring": "Numpy-compatible version of all()", "id": "f17253:m0"} {"signature": "def Matrix(data, typecode=None, copy=, savespace=):", "body": "if isinstance(data, type(\"\")):raise TypeError(\"\")a = fromlist(data, type=typecode)if a.rank == :a.shape = (,)elif a.rank == :a.shape = (,) + a.shapea.__class__ = _Matrixreturn a", "docstring": "Matrix constructs new matrices from 2D nested lists of numbers", "id": "f17253:m1"} {"signature": "def getp(o, property=None):", "body": "insp = ArtistInspector(o)if property is None:ret = insp.pprint_getters()print(''.join(ret))returnfunc = getattr(o, '' + property)return func()", "docstring": "Return the value of handle property. property is an optional string\nfor the property you want to return\n\nExample usage::\n\n getp(o) # get all the object properties\n getp(o, 'linestyle') # get the linestyle property\n\n*o* is a :class:`Artist` instance, eg\n:class:`~matplotllib.lines.Line2D` or an instance of a\n:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.\nIf the *property* is 'somename', this function returns\n\n o.get_somename()\n\n:func:`getp` can be used to query all the gettable properties with\n``getp(o)``. Many properties have aliases for shorter typing, e.g.\n'lw' is an alias for 'linewidth'. In the output, aliases and full\nproperty names will be listed as:\n\n property or alias = value\n\ne.g.:\n\n linewidth or lw = 2", "id": "f17255:m0"} {"signature": "def setp(h, *args, **kwargs):", "body": "insp = ArtistInspector(h)if len(kwargs)== and len(args)==:print(''.join(insp.pprint_setters()))returnif len(kwargs)== and len(args)==:print(insp.pprint_setters(prop=args[]))returnif not cbook.iterable(h): h = [h]else: h = cbook.flatten(h)if len(args)%:raise ValueError('')funcvals = []for i in range(, len(args)-, ):funcvals.append((args[i], args[i+]))funcvals.extend(list(kwargs.items()))ret = []for o in h:for s, val in funcvals:s = s.lower()funcName = \"\"%sfunc = getattr(o,funcName)ret.extend( [func(val)] )return [x for x in cbook.flatten(ret)]", "docstring": "matplotlib supports the use of :func:`setp` (\"set property\") and\n:func:`getp` to set and get object properties, as well as to do\nintrospection on the object. For example, to set the linestyle of a\nline to be dashed, you can do::\n\n >>> line, = plot([1,2,3])\n >>> setp(line, linestyle='--')\n\nIf you want to know the valid types of arguments, you can provide the\nname of the property you want to set without a value::\n\n >>> setp(line, 'linestyle')\n linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]\n\nIf you want to see all the properties that can be set, and their\npossible values, you can do::\n\n >>> setp(line)\n ... long output listing omitted\n\n:func:`setp` operates on a single instance or a list of instances.\nIf you are in query mode introspecting the possible values, only\nthe first instance in the sequence is used. When actually setting\nvalues, all the instances will be set. E.g., suppose you have a\nlist of two lines, the following will make both lines thicker and\nred::\n\n >>> x = arange(0,1.0,0.01)\n >>> y1 = sin(2*pi*x)\n >>> y2 = sin(4*pi*x)\n >>> lines = plot(x, y1, x, y2)\n >>> setp(lines, linewidth=2, color='r')\n\n:func:`setp` works with the matlab(TM) style string/value pairs or\nwith python kwargs. For example, the following are equivalent::\n\n >>> setp(lines, 'linewidth', 2, 'color', r') # matlab style\n\n >>> setp(lines, linewidth=2, color='r') # python style", "id": "f17255:m1"} {"signature": "def remove(self):", "body": "if self._remove_method != None:self._remove_method(self)else:raise NotImplementedError('')", "docstring": "Remove the artist from the figure if possible. The effect\nwill not be visible until the figure is redrawn, e.g., with\n:meth:`matplotlib.axes.Axes.draw_idle`. Call\n:meth:`matplotlib.axes.Axes.relim` to update the axes limits\nif desired.\n\nNote: :meth:`~matplotlib.axes.Axes.relim` will not see\ncollections even if the collection was added to axes with\n*autolim* = True.\n\nNote: there is no support for removing the artist's legend entry.", "id": "f17255:c0:m1"} {"signature": "def have_units(self):", "body": "ax = self.axesif ax is None or ax.xaxis is None:return Falsereturn ax.xaxis.have_units() or ax.yaxis.have_units()", "docstring": "Return *True* if units are set on the *x* or *y* axes", "id": "f17255:c0:m2"} {"signature": "def convert_xunits(self, x):", "body": "ax = getattr(self, '', None)if ax is None or ax.xaxis is None:return xreturn ax.xaxis.convert_units(x)", "docstring": "For artists in an axes, if the xaxis has units support,\n convert *x* using xaxis unit type", "id": "f17255:c0:m3"} {"signature": "def convert_yunits(self, y):", "body": "ax = getattr(self, '', None)if ax is None or ax.yaxis is None: return yreturn ax.yaxis.convert_units(y)", "docstring": "For artists in an axes, if the yaxis has units support,\n convert *y* using yaxis unit type", "id": "f17255:c0:m4"} {"signature": "def set_axes(self, axes):", "body": "self.axes = axes", "docstring": "Set the :class:`~matplotlib.axes.Axes` instance in which the\nartist resides, if any.\n\nACCEPTS: an :class:`~matplotlib.axes.Axes` instance", "id": "f17255:c0:m5"} {"signature": "def get_axes(self):", "body": "return self.axes", "docstring": "Return the :class:`~matplotlib.axes.Axes` instance the artist\nresides in, or *None*", "id": "f17255:c0:m6"} {"signature": "def add_callback(self, func):", "body": "oid = self._oidself._propobservers[oid] = funcself._oid += return oid", "docstring": "Adds a callback function that will be called whenever one of\nthe :class:`Artist`'s properties changes.\n\nReturns an *id* that is useful for removing the callback with\n:meth:`remove_callback` later.", "id": "f17255:c0:m7"} {"signature": "def remove_callback(self, oid):", "body": "try: del self._propobservers[oid]except KeyError: pass", "docstring": "Remove a callback based on its *id*.\n\n.. seealso::\n :meth:`add_callback`", "id": "f17255:c0:m8"} {"signature": "def pchanged(self):", "body": "for oid, func in list(self._propobservers.items()):func(self)", "docstring": "Fire an event when property changed, calling all of the\nregistered callbacks.", "id": "f17255:c0:m9"} {"signature": "def is_transform_set(self):", "body": "return self._transformSet", "docstring": "Returns *True* if :class:`Artist` has a transform explicitly\nset.", "id": "f17255:c0:m10"} {"signature": "def set_transform(self, t):", "body": "self._transform = tself._transformSet = Trueself.pchanged()", "docstring": "Set the :class:`~matplotlib.transforms.Transform` instance\nused by this artist.\n\nACCEPTS: :class:`~matplotlib.transforms.Transform` instance", "id": "f17255:c0:m11"} {"signature": "def get_transform(self):", "body": "if self._transform is None:self._transform = IdentityTransform()return self._transform", "docstring": "Return the :class:`~matplotlib.transforms.Transform`\ninstance used by this artist.", "id": "f17255:c0:m12"} {"signature": "def hitlist(self, event):", "body": "import tracebackL = []try:hascursor,info = self.contains(event)if hascursor:L.append(self)except:traceback.print_exc()print(\"\",self.__class__)for a in self.get_children():L.extend(a.hitlist(event))return L", "docstring": "List the children of the artist which contain the mouse event *event*.", "id": "f17255:c0:m13"} {"signature": "def get_children(self):", "body": "return []", "docstring": "Return a list of the child :class:`Artist`s this\n:class:`Artist` contains.", "id": "f17255:c0:m14"} {"signature": "def contains(self, mouseevent):", "body": "if callable(self._contains): return self._contains(self,mouseevent)warnings.warn(\"\" % self.__class__.__name__)return False,{}", "docstring": "Test whether the artist contains the mouse event.\n\n Returns the truth value and a dictionary of artist specific details of\n selection, such as which points are contained in the pick radius. See\n individual artists for details.", "id": "f17255:c0:m15"} {"signature": "def set_contains(self,picker):", "body": "self._contains = picker", "docstring": "Replace the contains test used by this artist. The new picker\nshould be a callable function which determines whether the\nartist is hit by the mouse event::\n\n hit, props = picker(artist, mouseevent)\n\nIf the mouse event is over the artist, return *hit* = *True*\nand *props* is a dictionary of properties you want returned\nwith the contains test.\n\nACCEPTS: a callable function", "id": "f17255:c0:m16"} {"signature": "def get_contains(self):", "body": "return self._contains", "docstring": "Return the _contains test used by the artist, or *None* for default.", "id": "f17255:c0:m17"} {"signature": "def pickable(self):", "body": "return (self.figure is not None andself.figure.canvas is not None andself._picker is not None)", "docstring": "Return *True* if :class:`Artist` is pickable.", "id": "f17255:c0:m18"} {"signature": "def pick(self, mouseevent):", "body": "if self.pickable():picker = self.get_picker()if callable(picker):inside,prop = picker(self,mouseevent)else:inside,prop = self.contains(mouseevent)if inside:self.figure.canvas.pick_event(mouseevent, self, **prop)for a in self.get_children():a.pick(mouseevent)", "docstring": "call signature::\n\n pick(mouseevent)\n\neach child artist will fire a pick event if *mouseevent* is over\nthe artist and the artist has picker set", "id": "f17255:c0:m19"} {"signature": "def set_picker(self, picker):", "body": "self._picker = picker", "docstring": "Set the epsilon for picking used by this artist\n\n*picker* can be one of the following:\n\n * *None*: picking is disabled for this artist (default)\n\n * A boolean: if *True* then picking will be enabled and the\n artist will fire a pick event if the mouse event is over\n the artist\n\n * A float: if picker is a number it is interpreted as an\n epsilon tolerance in points and the artist will fire\n off an event if it's data is within epsilon of the mouse\n event. For some artists like lines and patch collections,\n the artist may provide additional data to the pick event\n that is generated, e.g. the indices of the data within\n epsilon of the pick event\n\n * A function: if picker is callable, it is a user supplied\n function which determines whether the artist is hit by the\n mouse event::\n\n hit, props = picker(artist, mouseevent)\n\n to determine the hit test. if the mouse event is over the\n artist, return *hit=True* and props is a dictionary of\n properties you want added to the PickEvent attributes.\n\nACCEPTS: [None|float|boolean|callable]", "id": "f17255:c0:m20"} {"signature": "def get_picker(self):", "body": "return self._picker", "docstring": "Return the picker object used by this artist", "id": "f17255:c0:m21"} {"signature": "def is_figure_set(self):", "body": "return self.figure is not None", "docstring": "Returns True if the artist is assigned to a\n:class:`~matplotlib.figure.Figure`.", "id": "f17255:c0:m22"} {"signature": "def get_url(self):", "body": "return self._url", "docstring": "Returns the url", "id": "f17255:c0:m23"} {"signature": "def set_url(self, url):", "body": "self._url = url", "docstring": "Sets the url for the artist", "id": "f17255:c0:m24"} {"signature": "def get_snap(self):", "body": "return self._snap", "docstring": "Returns the snap setting which may be:\n\n * True: snap vertices to the nearest pixel center\n\n * False: leave vertices as-is\n\n * None: (auto) If the path contains only rectilinear line\n segments, round to the nearest pixel center\n\nOnly supported by the Agg backends.", "id": "f17255:c0:m25"} {"signature": "def set_snap(self, snap):", "body": "self._snap = snap", "docstring": "Sets the snap setting which may be:\n\n * True: snap vertices to the nearest pixel center\n\n * False: leave vertices as-is\n\n * None: (auto) If the path contains only rectilinear line\n segments, round to the nearest pixel center\n\nOnly supported by the Agg backends.", "id": "f17255:c0:m26"} {"signature": "def get_figure(self):", "body": "return self.figure", "docstring": "Return the :class:`~matplotlib.figure.Figure` instance the\nartist belongs to.", "id": "f17255:c0:m27"} {"signature": "def set_figure(self, fig):", "body": "self.figure = figself.pchanged()", "docstring": "Set the :class:`~matplotlib.figure.Figure` instance the artist\nbelongs to.\n\nACCEPTS: a :class:`matplotlib.figure.Figure` instance", "id": "f17255:c0:m28"} {"signature": "def set_clip_box(self, clipbox):", "body": "self.clipbox = clipboxself.pchanged()", "docstring": "Set the artist's clip :class:`~matplotlib.transforms.Bbox`.\n\nACCEPTS: a :class:`matplotlib.transforms.Bbox` instance", "id": "f17255:c0:m29"} {"signature": "def set_clip_path(self, path, transform=None):", "body": "from patches import Patch, Rectanglesuccess = Falseif transform is None:if isinstance(path, Rectangle):self.clipbox = TransformedBbox(Bbox.unit(), path.get_transform())self._clippath = Nonesuccess = Trueelif isinstance(path, Patch):self._clippath = TransformedPath(path.get_path(),path.get_transform())success = Trueif path is None:self._clippath = Nonesuccess = Trueelif isinstance(path, Path):self._clippath = TransformedPath(path, transform)success = Trueif not success:print(type(path), type(transform))raise TypeError(\"\")self.pchanged()", "docstring": "Set the artist's clip path, which may be:\n\n * a :class:`~matplotlib.patches.Patch` (or subclass) instance\n\n * a :class:`~matplotlib.path.Path` instance, in which case\n an optional :class:`~matplotlib.transforms.Transform`\n instance may be provided, which will be applied to the\n path before using it for clipping.\n\n * *None*, to remove the clipping path\n\nFor efficiency, if the path happens to be an axis-aligned\nrectangle, this method will set the clipping box to the\ncorresponding rectangle and set the clipping path to *None*.\n\nACCEPTS: [ (:class:`~matplotlib.path.Path`,\n:class:`~matplotlib.transforms.Transform`) |\n:class:`~matplotlib.patches.Patch` | None ]", "id": "f17255:c0:m30"} {"signature": "def get_alpha(self):", "body": "return self._alpha", "docstring": "Return the alpha value used for blending - not supported on all\nbackends", "id": "f17255:c0:m31"} {"signature": "def get_visible(self):", "body": "return self._visible", "docstring": "Return the artist's visiblity", "id": "f17255:c0:m32"} {"signature": "def get_animated(self):", "body": "return self._animated", "docstring": "Return the artist's animated state", "id": "f17255:c0:m33"} {"signature": "def get_clip_on(self):", "body": "return self._clipon", "docstring": "Return whether artist uses clipping", "id": "f17255:c0:m34"} {"signature": "def get_clip_box(self):", "body": "return self.clipbox", "docstring": "Return artist clipbox", "id": "f17255:c0:m35"} {"signature": "def get_clip_path(self):", "body": "return self._clippath", "docstring": "Return artist clip path", "id": "f17255:c0:m36"} {"signature": "def get_transformed_clip_path_and_affine(self):", "body": "if self._clippath is not None:return self._clippath.get_transformed_path_and_affine()return None, None", "docstring": "Return the clip path with the non-affine part of its\ntransformation applied, and the remaining affine part of its\ntransformation.", "id": "f17255:c0:m37"} {"signature": "def set_clip_on(self, b):", "body": "self._clipon = bself.pchanged()", "docstring": "Set whether artist uses clipping.\n\nACCEPTS: [True | False]", "id": "f17255:c0:m38"} {"signature": "def _set_gc_clip(self, gc):", "body": "if self._clipon:if self.clipbox is not None:gc.set_clip_rectangle(self.clipbox)gc.set_clip_path(self._clippath)else:gc.set_clip_rectangle(None)gc.set_clip_path(None)", "docstring": "Set the clip properly for the gc", "id": "f17255:c0:m39"} {"signature": "def draw(self, renderer, *args, **kwargs):", "body": "if not self.get_visible(): return", "docstring": "Derived classes drawing method", "id": "f17255:c0:m40"} {"signature": "def set_alpha(self, alpha):", "body": "self._alpha = alphaself.pchanged()", "docstring": "Set the alpha value used for blending - not supported on\nall backends\n\nACCEPTS: float (0.0 transparent through 1.0 opaque)", "id": "f17255:c0:m41"} {"signature": "def set_lod(self, on):", "body": "self._lod = onself.pchanged()", "docstring": "Set Level of Detail on or off. If on, the artists may examine\nthings like the pixel width of the axes and draw a subset of\ntheir contents accordingly\n\nACCEPTS: [True | False]", "id": "f17255:c0:m42"} {"signature": "def set_visible(self, b):", "body": "self._visible = bself.pchanged()", "docstring": "Set the artist's visiblity.\n\nACCEPTS: [True | False]", "id": "f17255:c0:m43"} {"signature": "def set_animated(self, b):", "body": "self._animated = bself.pchanged()", "docstring": "Set the artist's animation state.\n\nACCEPTS: [True | False]", "id": "f17255:c0:m44"} {"signature": "def update(self, props):", "body": "store = self.eventsonself.eventson = Falsechanged = Falsefor k,v in list(props.items()):func = getattr(self, ''+k, None)if func is None or not callable(func):raise AttributeError(''%k)func(v)changed = Trueself.eventson = storeif changed: self.pchanged()", "docstring": "Update the properties of this :class:`Artist` from the\ndictionary *prop*.", "id": "f17255:c0:m45"} {"signature": "def get_label(self):", "body": "return self._label", "docstring": "Get the label used for this artist in the legend.", "id": "f17255:c0:m46"} {"signature": "def set_label(self, s):", "body": "self._label = sself.pchanged()", "docstring": "Set the label to *s* for auto legend.\n\nACCEPTS: any string", "id": "f17255:c0:m47"} {"signature": "def get_zorder(self):", "body": "return self.zorder", "docstring": "Return the :class:`Artist`'s zorder.", "id": "f17255:c0:m48"} {"signature": "def set_zorder(self, level):", "body": "self.zorder = levelself.pchanged()", "docstring": "Set the zorder for the artist. Artists with lower zorder\nvalues are drawn first.\n\nACCEPTS: any number", "id": "f17255:c0:m49"} {"signature": "def update_from(self, other):", "body": "self._transform = other._transformself._transformSet = other._transformSetself._visible = other._visibleself._alpha = other._alphaself.clipbox = other.clipboxself._clipon = other._cliponself._clippath = other._clippathself._lod = other._lodself._label = other._labelself.pchanged()", "docstring": "Copy properties from *other* to *self*.", "id": "f17255:c0:m50"} {"signature": "def set(self, **kwargs):", "body": "ret = []for k,v in list(kwargs.items()):k = k.lower()funcName = \"\"%kfunc = getattr(self,funcName)ret.extend( [func(v)] )return ret", "docstring": "A tkstyle set command, pass *kwargs* to set properties", "id": "f17255:c0:m51"} {"signature": "def findobj(self, match=None):", "body": "if match is None: def matchfunc(x): return Trueelif cbook.issubclass_safe(match, Artist):def matchfunc(x):return isinstance(x, match)elif callable(match):matchfunc = matchelse:raise ValueError('')artists = []for c in self.get_children():if matchfunc(c):artists.append(c)artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])if matchfunc(self):artists.append(self)return artists", "docstring": "pyplot signature:\n findobj(o=gcf(), match=None)\n\nRecursively find all :class:matplotlib.artist.Artist instances\ncontained in self.\n\n*match* can be\n\n - None: return all objects contained in artist (including artist)\n\n - function with signature ``boolean = match(artist)`` used to filter matches\n\n - class instance: eg Line2D. Only return artists of class type\n\n.. plot:: mpl_examples/pylab_examples/findobj_demo.py", "id": "f17255:c0:m52"} {"signature": "def __init__(self, o):", "body": "if cbook.iterable(o) and len(o): o = o[]self.oorig = oif not isinstance(o, type):o = type(o)self.o = oself.aliasd = self.get_aliases()", "docstring": "Initialize the artist inspector with an\n:class:`~matplotlib.artist.Artist` or sequence of\n:class:`Artists`. If a sequence is used, we assume it is a\nhomogeneous sequence (all :class:`Artists` are of the same\ntype) and it is your responsibility to make sure this is so.", "id": "f17255:c1:m0"} {"signature": "def get_aliases(self):", "body": "names = [name for name in dir(self.o) if(name.startswith('') or name.startswith(''))and callable(getattr(self.o,name))]aliases = {}for name in names:func = getattr(self.o, name)if not self.is_alias(func): continuedocstring = func.__doc__fullname = docstring[:]aliases.setdefault(fullname[:], {})[name[:]] = Nonereturn aliases", "docstring": "Get a dict mapping *fullname* -> *alias* for each *alias* in\nthe :class:`~matplotlib.artist.ArtistInspector`.\n\nEg., for lines::\n\n {'markerfacecolor': 'mfc',\n 'linewidth' : 'lw',\n }", "id": "f17255:c1:m1"} {"signature": "def get_valid_values(self, attr):", "body": "name = ''%attrif not hasattr(self.o, name):raise AttributeError(''%(self.o,name))func = getattr(self.o, name)docstring = func.__doc__if docstring is None: return ''if docstring.startswith(''):return Nonematch = self._get_valid_values_regex.search(docstring)if match is not None:return match.group().replace('', '')return ''", "docstring": "Get the legal arguments for the setter associated with *attr*.\n\nThis is done by querying the docstring of the function *set_attr*\nfor a line that begins with ACCEPTS:\n\nEg., for a line linestyle, return\n[ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]", "id": "f17255:c1:m2"} {"signature": "def _get_setters_and_targets(self):", "body": "setters = []for name in dir(self.o):if not name.startswith(''): continueo = getattr(self.o, name)if not callable(o): continuefunc = oif self.is_alias(func): continuesource_class = self.o.__module__ + \"\" + self.o.__name__for cls in self.o.mro():if name in cls.__dict__:source_class = cls.__module__ + \"\" + cls.__name__breaksetters.append((name[:], source_class + \"\" + name))return setters", "docstring": "Get the attribute strings and a full path to where the setter\nis defined for all setters in an object.", "id": "f17255:c1:m3"} {"signature": "def get_setters(self):", "body": "return [prop for prop, target in self._get_setters_and_targets()]", "docstring": "Get the attribute strings with setters for object. Eg., for a line,\nreturn ``['markerfacecolor', 'linewidth', ....]``.", "id": "f17255:c1:m4"} {"signature": "def is_alias(self, o):", "body": "ds = o.__doc__if ds is None: return Falsereturn ds.startswith('')", "docstring": "Return *True* if method object *o* is an alias for another\nfunction.", "id": "f17255:c1:m5"} {"signature": "def aliased_name(self, s):", "body": "if s in self.aliasd:return s + ''.join(['' % x for x in list(self.aliasd[s].keys())])else:return s", "docstring": "return 'PROPNAME or alias' if *s* has an alias, else return\nPROPNAME.\n\nE.g. for the line markerfacecolor property, which has an\nalias, return 'markerfacecolor or mfc' and for the transform\nproperty, which does not, return 'transform'", "id": "f17255:c1:m6"} {"signature": "def aliased_name_rest(self, s, target):", "body": "if s in self.aliasd:aliases = ''.join(['' % x for x in list(self.aliasd[s].keys())])else:aliases = ''return '' % (s, target, aliases)", "docstring": "return 'PROPNAME or alias' if *s* has an alias, else return\nPROPNAME formatted for ReST\n\nE.g. for the line markerfacecolor property, which has an\nalias, return 'markerfacecolor or mfc' and for the transform\nproperty, which does not, return 'transform'", "id": "f17255:c1:m7"} {"signature": "def pprint_setters(self, prop=None, leadingspace=):", "body": "if leadingspace:pad = ''*leadingspaceelse:pad = ''if prop is not None:accepts = self.get_valid_values(prop)return '' %(pad, prop, accepts)attrs = self._get_setters_and_targets()attrs.sort()lines = []for prop, path in attrs:accepts = self.get_valid_values(prop)name = self.aliased_name(prop)lines.append('' %(pad, name, accepts))return lines", "docstring": "If *prop* is *None*, return a list of strings of all settable properies\nand their valid values.\n\nIf *prop* is not *None*, it is a valid property name and that\nproperty will be returned as a string of property : valid\nvalues.", "id": "f17255:c1:m8"} {"signature": "def pprint_setters_rest(self, prop=None, leadingspace=):", "body": "if leadingspace:pad = ''*leadingspaceelse:pad = ''if prop is not None:accepts = self.get_valid_values(prop)return '' %(pad, prop, accepts)attrs = self._get_setters_and_targets()attrs.sort()lines = []names = [self.aliased_name_rest(prop, target) for prop, target in attrs]accepts = [self.get_valid_values(prop) for prop, target in attrs]col0_len = max([len(n) for n in names])col1_len = max([len(a) for a in accepts])table_formatstr = pad + ''*col0_len + '' + ''*col1_lenlines.append('')lines.append(table_formatstr)lines.append(pad + ''.ljust(col0_len+) +''.ljust(col1_len))lines.append(table_formatstr)lines.extend([pad + n.ljust(col0_len+) + a.ljust(col1_len)for n, a in zip(names, accepts)])lines.append(table_formatstr)lines.append('')return linesfor prop, path in attrs:accepts = self.get_valid_values(prop)name = self.aliased_name_rest(prop, path)lines.append('' %(pad, name, accepts))return lines", "docstring": "If *prop* is *None*, return a list of strings of all settable properies\nand their valid values. Format the output for ReST\n\nIf *prop* is not *None*, it is a valid property name and that\nproperty will be returned as a string of property : valid\nvalues.", "id": "f17255:c1:m9"} {"signature": "def pprint_getters(self):", "body": "o = self.ooriggetters = [name for name in dir(o)if name.startswith('')and callable(getattr(o, name))]getters.sort()lines = []for name in getters:func = getattr(o, name)if self.is_alias(func): continuetry: val = func()except: continueif getattr(val, '', ()) != () and len(val)>:s = str(val[:]) + ''else:s = str(val)s = s.replace('', '')if len(s)>:s = s[:] + ''name = self.aliased_name(name[:])lines.append('' %(name, s))return lines", "docstring": "Return the getters and actual values as list of strings.", "id": "f17255:c1:m10"} {"signature": "def findobj(self, match=None):", "body": "if match is None: def matchfunc(x): return Trueelif issubclass(match, Artist):def matchfunc(x):return isinstance(x, match)elif callable(match):matchfunc = funcelse:raise ValueError('')artists = []for c in self.get_children():if matchfunc(c):artists.append(c)artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])if matchfunc(self):artists.append(self)return artists", "docstring": "Recursively find all :class:`matplotlib.artist.Artist`\ninstances contained in *self*.\n\nIf *match* is not None, it can be\n\n - function with signature ``boolean = match(artist)``\n\n - class instance: eg :class:`~matplotlib.lines.Line2D`\n\nused to filter matches.", "id": "f17255:c1:m11"} {"signature": "def _allSubclasses(cls):", "body": "return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in _allSubclasses(s)]", "docstring": "Get all subclasses\n:param cls: The class to get subclasses from\n:return: list with all subclasses", "id": "f17263:m0"} {"signature": "def _getAttributes(obj):", "body": "if isinstance(obj, dict):attrs = objelif hasattr(obj, \"\"):attrs = {attr: getattr(obj, attr) for attr in obj.__slots__}elif hasattr(obj, \"\"):attrs = obj.__dict__testParams = SERIALIZABLE_SUBCLASSES[obj.__class__.__name__]if \"\" in testParams:for f in testParams[\"\"]:if f in attrs:del attrs[f]return attrs", "docstring": "Get all attributes of the given object", "id": "f17263:m1"} {"signature": "def _remove(fname):", "body": "if os.path.isfile(fname):os.remove(fname)", "docstring": "Clean up function used to delete files created by the test\n:param fname: File to be deleted\n:return:", "id": "f17263:m2"} {"signature": "def customAssertArrayEquals(self, a1, a2, msg=None):", "body": "numpy.testing.assert_equal(a1, a2, msg)", "docstring": "Function used by `addTypeEqualityFunc` comparing numpy arrays", "id": "f17263:c0:m0"} {"signature": "def customAssertSequenceEquals(self, l1, l2, msg=None):", "body": "self.assertEqual(len(l1), len(l2), msg)for i in range(len(l1)):first = l1[i]second = l2[i]if type(first).__name__ in SERIALIZABLE_SUBCLASSES:first = _getAttributes(first)second = _getAttributes(second)self.assertEqual(first, second, msg)", "docstring": "Function used by `addTypeEqualityFunc` comparing sequences", "id": "f17263:c0:m1"} {"signature": "def customAssertDictEquals(self, d1, d2, msg=None):", "body": "self.assertIsInstance(d1, dict, '')self.assertIsInstance(d2, dict, '')self.assertEqual(len(d1), len(d2), msg + str(d1) + '' + str(d2))for k, _ in list(d1.items()):if k not in d2:raise AssertionError(repr(k))first = d1[k]second = d2[k]if type(first).__name__ in SERIALIZABLE_SUBCLASSES:first = _getAttributes(first)second = _getAttributes(second)self.assertEqual(first, second, '' % (k, msg))", "docstring": "Function used by `addTypeEqualityFunc` comparing dicts", "id": "f17263:c0:m2"} {"signature": "def overlapsForRelativeAreas(n, w, initPosition, initRadius, dPosition=None,dRadius=, num=, verbose=False):", "body": "encoder = CoordinateEncoder(name=\"\", n=n, w=w)overlaps = np.empty(num)outputA = encode(encoder, np.array(initPosition), initRadius)for i in range(num):newPosition = initPosition if dPosition is None else (initPosition + (i + ) * dPosition)newRadius = initRadius + (i + ) * dRadiusoutputB = encode(encoder, newPosition, newRadius)overlaps[i] = overlap(outputA, outputB)if verbose:print()print((\"\"\"\"\"\").format(n, w, initPosition, initRadius, dPosition, dRadius))print(\"\".format(np.average(overlaps)))print(\"\".format(np.max(overlaps)))return overlaps", "docstring": "Return overlaps between an encoding and other encodings relative to it\n\n:param n: the size of the encoder output\n:param w: the number of active bits in the encoder output\n:param initPosition: the position of the first encoding\n:param initRadius: the radius of the first encoding\n:param dPosition: the offset to apply to each subsequent position\n:param dRadius: the offset to apply to each subsequent radius\n:param num: the number of encodings to generate\n:param verbose: whether to print verbose output", "id": "f17266:m2"} {"signature": "def overlapsForUnrelatedAreas(n, w, radius, repetitions=, verbose=False):", "body": "return overlapsForRelativeAreas(n, w, np.array([, ]), radius,dPosition=np.array([, radius * ]),num=repetitions, verbose=verbose)", "docstring": "Return overlaps between an encoding and other, unrelated encodings", "id": "f17266:m3"} {"signature": "def computeOverlap(x, y):", "body": "return (x & y).sum()", "docstring": "Given two binary arrays, compute their overlap. The overlap is the number\nof bits where x[i] and y[i] are both 1", "id": "f17269:m0"} {"signature": "def validateEncoder(encoder, subsampling):", "body": "for i in range(encoder.minIndex, encoder.maxIndex+, ):for j in range(i+, encoder.maxIndex+, subsampling):if not encoder._overlapOK(i, j):return Falsereturn True", "docstring": "Given an encoder, calculate overlaps statistics and ensure everything is ok.\nWe don't check every possible combination for speed reasons.", "id": "f17269:m1"} {"signature": "def _printOneTrainingVector(x):", "body": "print(''.join('' if k != else '' for k in x))", "docstring": "Print a single vector succinctly.", "id": "f17280:m0"} {"signature": "def _getSimplePatterns(numOnes, numPatterns):", "body": "numCols = numOnes * numPatternsp = []for i in range(numPatterns):x = np.zeros(numCols, dtype='')x[i*numOnes:(i + )*numOnes] = p.append(x)return p", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n bits on. There are numPatterns*numOnes bits in the vector. These patterns\n are used as elements of sequences when building up a training set.", "id": "f17280:m1"} {"signature": "def _createTms(numCols):", "body": "minThreshold = activationThreshold = newSynapseCount = initialPerm = connectedPerm = permanenceInc = permanenceDec = globalDecay = cellsPerColumn = cppTm = BacktrackingTMCPP(numberOfCols=numCols,cellsPerColumn=cellsPerColumn,initialPerm=initialPerm,connectedPerm=connectedPerm,minThreshold=minThreshold,newSynapseCount=newSynapseCount,permanenceInc=permanenceInc,permanenceDec=permanenceDec,activationThreshold=activationThreshold,globalDecay=globalDecay, burnIn=,seed=_SEED, verbosity=VERBOSITY,checkSynapseConsistency=True,pamLength=)cppTm.retrieveLearningStates = TruepyTm = BacktrackingTM(numberOfCols=numCols,cellsPerColumn=cellsPerColumn,initialPerm=initialPerm,connectedPerm=connectedPerm,minThreshold=minThreshold,newSynapseCount=newSynapseCount,permanenceInc=permanenceInc,permanenceDec=permanenceDec,activationThreshold=activationThreshold,globalDecay=globalDecay, burnIn=,seed=_SEED, verbosity=VERBOSITY,pamLength=)return cppTm, pyTm", "docstring": "Create two instances of temporal poolers (backtracking_tm.py\n and backtracking_tm_cpp.py) with identical parameter settings.", "id": "f17280:m2"} {"signature": "def _basicTest(self, tm=None):", "body": "trainingSet = _getSimplePatterns(, )for _ in range():for seq in trainingSet[:]:for _ in range():tm.learn(seq)tm.reset()print(\"\")print(\"\")tm.collectStats = Truefor seq in trainingSet[:]:tm.reset()tm.resetStats()for _ in range():tm.infer(seq)if VERBOSITY > :print()_printOneTrainingVector(seq)tm.printStates(False, False)print()print()if VERBOSITY > :print(tm.getStats())self.assertGreater(tm.getStats()[''], )print((\"\",tm.getStats()['']))print(\"\")", "docstring": "Test creation, pickling, and basic run of learning and inference.", "id": "f17280:c0:m1"} {"signature": "def _getDateList(numSamples, startDatetime):", "body": "dateList = []td = datetime.timedelta(minutes=)curDate = startDatetime + tdfor _ in range(numSamples):dateList.append(curDate)curDate = curDate + tdreturn dateList", "docstring": "Generate a sequence of sample dates starting at startDatetime and incrementing\nevery minute.", "id": "f17282:m0"} {"signature": "@staticmethoddef _addSampleData(origData=None, numSamples=, spikeValue=,spikePeriod=):", "body": "if origData is None:origData = []if len(origData) > :lastDate = origData[-][]else:lastDate = datetime.datetime(, , )dateList = _getDateList(numSamples, lastDate)data = copy.copy(origData)for idx, date in enumerate(dateList):if (spikePeriod > ) and ( (idx + ) % spikePeriod == ):data.append([date, idx, spikeValue])else:data.append([date, idx, ])return data", "docstring": "Add sample anomaly data to the existing data list and return it.\nNote: this does not modify the original data list\nNote 2: here we just add in increasing integers as the metric value", "id": "f17282:c0:m1"} {"signature": "def runSideBySide(self, params, seed = None,learnMode = None,convertEveryIteration = False):", "body": "randomState = getNumpyRandomGenerator(seed)cppSp = CreateSP(\"\", params)pySp = CreateSP(\"\", params)self.compare(pySp, cppSp)numColumns = pySp.getNumColumns()numInputs = pySp.getNumInputs()threshold = inputMatrix = (randomState.rand(numRecords,numInputs) > threshold).astype(uintType)for i in range(numRecords):if learnMode is None:learn = (randomState.rand() > )else:learn = learnModeif self.verbosity > :print(\"\",i,\"\",learn)PyActiveArray = numpy.zeros(numColumns).astype(uintType)CppActiveArray = numpy.zeros(numColumns).astype(uintType)inputVector = inputMatrix[i,:]pySp.compute(inputVector, learn, PyActiveArray)cppSp.compute(inputVector, learn, CppActiveArray)self.assertListEqual(list(PyActiveArray), list(CppActiveArray))self.compare(pySp,cppSp)cppBoostFactors = numpy.zeros(numColumns, dtype=realType)cppSp.getBoostFactors(cppBoostFactors)pySp.setBoostFactors(cppBoostFactors)if convertEveryIteration or ((i+)% == ):convertPermanences(pySp, cppSp)", "docstring": "Run the PY and CPP implementations side by side on random inputs.\nIf seed is None a random seed will be chosen based on time, otherwise\nthe fixed seed will be used.\n\nIf learnMode is None learning will be randomly turned on and off.\nIf it is False or True then set it accordingly.\n\nIf convertEveryIteration is True, the CPP will be copied from the PY\ninstance on every iteration just before each compute.", "id": "f17283:c0:m3"} {"signature": "def frequency(self,n=,w=,columnDimensions = ,numActiveColumnsPerInhArea = ,stimulusThreshold = ,spSeed = ,spVerbosity = ,numColors = ,seed=,minVal=,maxVal=,encoder = '',forced=True):", "body": "print(\"\")print(encoder, '', '', seed, '', numColors, '')spImpl = SpatialPooler(columnDimensions=(columnDimensions, ),inputDimensions=(, n),potentialRadius=n/,numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,spVerbosity=spVerbosity,stimulusThreshold=stimulusThreshold,potentialPct=,seed=spSeed,globalInhibition=True,)rnd.seed(seed)numpy.random.seed(seed)colors = []coincs = []reUsedCoincs = []spOutput = []patterns = set([])if encoder=='':enc = scalar.ScalarEncoder(name='', w=w, n=n, minval=minVal,maxval=maxVal, periodic=False, forced=True) for y in range(numColors):temp = enc.encode(rnd.random()*maxVal)colors.append(numpy.array(temp, dtype=numpy.uint32))else:for y in range(numColors):sdr = numpy.zeros(n, dtype=numpy.uint32)sdr[rnd.sample(range(n), w)] = colors.append(sdr)print('', numColors, '')startTime = time.time()for i in range(numColors):spInput = colors[i]onCells = numpy.zeros(columnDimensions, dtype=numpy.uint32)spImpl.compute(spInput, True, onCells)spOutput.append(onCells.tolist())activeCoincIndices = set(onCells.nonzero()[])reUsed = activeCoincIndices.intersection(patterns)if len(reUsed) == :coincs.append((i, activeCoincIndices, colors[i]))else:reUsedCoincs.append((i, activeCoincIndices, colors[i]))patterns.update(activeCoincIndices)if (i + ) % == :print('', i + )print(\"\" % (time.time() - startTime))print(len(reUsedCoincs), \"\")summ = []for z in coincs:summ.append(sum([len(z[].intersection(y[])) for y in reUsedCoincs]))zeros = len([x for x in summ if x==])factor = max(summ)*len(summ)/sum(summ)if len(reUsed) < :self.assertLess(factor, ,\"\" % (factor, ))self.assertLess(zeros, *len(summ),\"\" % (zeros, *len(summ)))else:self.assertLess(factor, ,\"\" % (factor, ))self.assertLess(zeros, ,\"\" % (zeros, ))", "docstring": "Helper function that tests whether the SP predicts the most\n frequent record", "id": "f17285:c0:m4"} {"signature": "def basicComputeLoop(self, imp, params, inputSize, columnDimensions,seed = None):", "body": "sp = CreateSP(imp,params)numRecords = randomState = getNumpyRandomGenerator(seed)inputMatrix = (randomState.rand(numRecords,inputSize) > ).astype(uintType)y = numpy.zeros(columnDimensions, dtype = uintType)dutyCycles = numpy.zeros(columnDimensions, dtype = uintType)for v in inputMatrix:y.fill()sp.compute(v, True, y)self.assertEqual(sp.getNumActiveColumnsPerInhArea(),y.sum())self.assertEqual(,y.min())self.assertEqual(,y.max())for v in inputMatrix:y.fill()sp.compute(v, False, y)self.assertEqual(sp.getNumActiveColumnsPerInhArea(),y.sum())self.assertEqual(,y.min())self.assertEqual(,y.max())", "docstring": "Feed in some vectors and retrieve outputs. Ensure the right number of\ncolumns win, that we always get binary outputs, and that nothing crashes.", "id": "f17286:c0:m0"} {"signature": "def assertTMsEqual(self, tm1, tm2):", "body": "self.assertEqual(tm1, tm2, tm1.diff(tm2))self.assertTrue(fdrutilities.tmDiff2(tm1, tm2, , False))", "docstring": "Asserts that two TM instances are the same.\n\n This is temporarily disabled since it does not work with the C++\n implementation of the TM.", "id": "f17287:c0:m9"} {"signature": "@staticmethoddef generateSequence(n=, numCols=, minOnes=, maxOnes=):", "body": "return [None] + [BacktrackingTMTest.generatePattern(numCols, minOnes,maxOnes)for _ in range(n)]", "docstring": "Generates a sequence of n patterns.", "id": "f17287:c0:m10"} {"signature": "@staticmethoddef generatePattern(numCols=, minOnes=, maxOnes=):", "body": "assert minOnes < maxOnesassert maxOnes < numColsnOnes = random.randint(minOnes, maxOnes)ind = random.sample(range(numCols), nOnes)x = numpy.zeros(numCols, dtype='')x[ind] = return x", "docstring": "Generate a single test pattern with given parameters.\n\n Parameters:\n numCols: Number of columns in each pattern.\n minOnes: The minimum number of 1's in each pattern.\n maxOnes: The maximum number of 1's in each pattern.", "id": "f17287:c0:m11"} {"signature": "def _computeOverlap(x, y):", "body": "return ((x + y) == ).sum()", "docstring": "Given two binary arrays, compute their overlap. The overlap is the number\nof bits where x[i] and y[i] are both 1", "id": "f17289:m0"} {"signature": "def _areAllSDRsUnique(sdrDict):", "body": "for k1, v1 in sdrDict.items():for k2, v2 in sdrDict.items():if (k2 != k1) and ((v1 == v2).sum() == v1.size):return Falsereturn True", "docstring": "Return True iff all the SDR's in the dict are unique.", "id": "f17289:m1"} {"signature": "def setUp(self):", "body": "self.inputSize = self.columnDimensions = self.x = numpy.zeros((, self.inputSize), dtype=uintType)self.x[, :] = self.x[, :] = self.x[, :] = self.x[, :] = self.x[, :] = self.winningIteration = numpy.zeros(self.columnDimensions)self.lastSDR = {}self.spImplementation = \"\"self.sp = Noneself.params = {'': [self.inputSize],'': [self.columnDimensions],'': self.inputSize,'': ,'': True,'': ,'': ,'': ,'': ,'': ,'': SEED,}print(\"\", self.params[''])", "docstring": "Set various constants. Create the input patterns and the spatial pooler", "id": "f17289:c0:m0"} {"signature": "def debugPrint(self):", "body": "activeDutyCycle = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())self.sp.getActiveDutyCycles(activeDutyCycle)boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())self.sp.getBoostFactors(boost)print(\"\", (self.sp.getIterationNum() ),\"\")print(\"\", self.spImplementation)print(\"\", end='')print(\"\", (activeDutyCycle.max(), activeDutyCycle.min() ))print(\"\", (activeDutyCycle[activeDutyCycle>].mean() ))print(\"\", activeDutyCycle)print()print(\"\", boost)print()print(\"\")print(self.winningIteration)print(\"\", (self.columnDimensions - (self.winningIteration==).sum() ))", "docstring": "Helpful debug print statements while debugging this test.", "id": "f17289:c0:m1"} {"signature": "def verifySDRProperties(self):", "body": "self.assertTrue(_areAllSDRsUnique(self.lastSDR), \"\")self.assertGreater(_computeOverlap(self.lastSDR[], self.lastSDR[]), ,\"\")for i in [, , ]:for j in range():if (i!=j):self.assertLess(_computeOverlap(self.lastSDR[i], self.lastSDR[j]),, \"\")", "docstring": "Verify that all SDRs have the properties desired for this test.\n\nThe bounds for checking overlap are set fairly loosely here since there is\nsome variance due to randomness and the artificial parameters used in this\ntest.", "id": "f17289:c0:m2"} {"signature": "def boostTestLoop(self, imp):", "body": "self.sp = CreateSP(imp, self.params)self.spImplementation = impself.winningIteration.fill()self.lastSDR = {}self.boostTestPhase1()self.boostTestPhase2()self.boostTestPhase3()self.boostTestPhase4()", "docstring": "Main test loop.", "id": "f17289:c0:m7"} {"signature": "def _sampleDistribution(params, numSamples, verbosity=):", "body": "if \"\" in params:if params[\"\"] == \"\":samples = numpy.random.normal(loc=params[\"\"],scale=math.sqrt(params[\"\"]),size=numSamples)elif params[\"\"] == \"\":samples = numpy.random.pareto(params[\"\"], size=numSamples)elif params[\"\"] == \"\":samples = numpy.random.beta(a=params[\"\"], b=params[\"\"],size=numSamples)else:raise ValueError(\"\" + params[\"\"])else:raise ValueError(\"\" + str(params))if verbosity > :print(\"\", params)print(\"\", numpy.mean(samples),\"\", numpy.var(samples), \"\", math.sqrt(numpy.var(samples)))return samples", "docstring": "Given the parameters of a distribution, generate numSamples points from it.\nThis routine is mostly for testing.\n\n:returns: A numpy array of samples.", "id": "f17291:m0"} {"signature": "def _generateSampleData(mean=, variance=, metricMean=,metricVariance=):", "body": "data = []p = {\"\": mean,\"\": \"\",\"\": math.sqrt(variance),\"\": variance}samples = _sampleDistribution(p, )p = {\"\": metricMean,\"\": \"\",\"\": math.sqrt(metricVariance),\"\": metricVariance}metricValues = _sampleDistribution(p, )for hour in range(, ):for minute in range(, ):data.append([datetime.datetime(, , , hour, minute, ),metricValues[hour * + minute],samples[hour * + minute],])return data", "docstring": "Generate 1440 samples of fake metrics data with a particular distribution\nof anomaly scores and metric values. Here we generate values every minute.", "id": "f17291:m1"} {"signature": "def checkCell0(tm):", "body": "for c in range(tm.numberOfCols):assert tm.getNumSegmentsInCell(c, ) == ", "docstring": "Check that cell 0 has no incoming segments", "id": "f17295:m0"} {"signature": "def setVerbosity(verbosity, tm, tmPy):", "body": "tm.cells4.setVerbosity(verbosity)tm.verbosity = verbositytmPy.verbosity = verbosity", "docstring": "Set verbosity levels of the TM's", "id": "f17295:m1"} {"signature": "def basicTest(self):", "body": "tm = BacktrackingTMCPP(numberOfCols=, cellsPerColumn=,initialPerm=, connectedPerm= ,minThreshold=, newSynapseCount=,permanenceInc=, permanenceDec= ,permanenceMax=, globalDecay=,activationThreshold=, doPooling=False,segUpdateValidDuration=, seed=SEED,verbosity=VERBOSITY)tm.retrieveLearningStates = Truetm.makeCells4Ephemeral = Falsepickle.dump(tm, open(\"\", \"\"))tm2 = pickle.load(open(\"\"))self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY, checkStates=False))for i in range():x = numpy.zeros(tm.numberOfCols, dtype='')_RGEN.initializeUInt32Array(x, )tm.learn(x)tm.reset()tm.makeCells4Ephemeral = Falsepickle.dump(tm, open(\"\", \"\"))tm2 = pickle.load(open(\"\"))self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY))patterns = numpy.zeros((, tm.numberOfCols), dtype='')for i in range():_RGEN.initializeUInt32Array(patterns[i], )for i in range():x = numpy.zeros(tm.numberOfCols, dtype='')_RGEN.initializeUInt32Array(x, )tm.infer(x)if i > :tm._checkPrediction(patterns)", "docstring": "Basic test (creation, pickling, basic run of learning and inference)", "id": "f17295:c0:m0"} {"signature": "def _createNetwork():", "body": "network = Network()network.addRegion('', '', '')network.addRegion('', '', '')_createSensorToClassifierLinks(network, '', '')sensorRegion = network.regions[''].getSelf()encoderParams = {'': {'': '','': ,'': ,'': '','': ''}}encoder = MultiEncoder()encoder.addMultipleEncoders(encoderParams)sensorRegion.encoder = encodertestDir = os.path.dirname(os.path.abspath(__file__))inputFile = os.path.join(testDir, '', '')dataSource = FileRecordStream(streamID=inputFile)sensorRegion.dataSource = dataSourcenetwork.regions[''].setParameter('', '')return network", "docstring": "Create a network with a RecordSensor region and a SDRClassifier region", "id": "f17305:m0"} {"signature": "def _createSensorToClassifierLinks(network, sensorRegionName,classifierRegionName):", "body": "network.link(sensorRegionName, classifierRegionName, '', '',srcOutput='', destInput='')network.link(sensorRegionName, classifierRegionName, '', '',srcOutput='', destInput='')network.link(sensorRegionName, classifierRegionName, '', '',srcOutput='', destInput='')network.link(sensorRegionName, classifierRegionName, '', '',srcOutput='', destInput='')", "docstring": "Create links from sensor region to classifier region.", "id": "f17305:m1"} {"signature": "def _createNetwork():", "body": "network = Network()network.addRegion('', '', '')sensorRegion = network.regions[''].getSelf()encoderParams = {'': {'': '','': ,'': ,'': '','': ''}}encoder = MultiEncoder()encoder.addMultipleEncoders(encoderParams)sensorRegion.encoder = encodertestDir = os.path.dirname(os.path.abspath(__file__))inputFile = os.path.join(testDir, '', '')dataSource = FileRecordStream(streamID=inputFile)sensorRegion.dataSource = dataSourcenetwork.regions[''].setParameter('', '')return network", "docstring": "Create network with one RecordSensor region.", "id": "f17306:m0"} {"signature": "def setUp(self):", "body": "self.files = {}with tempfile.NamedTemporaryFile(prefix='', delete=False) as outp:self.addCleanup(os.remove, outp.name)with open(resource_filename(__name__, '')) as inp:outp.write(inp.read())self.files[''] = outp.namewith tempfile.NamedTemporaryFile(prefix='', delete=False) as outp:self.addCleanup(os.remove, outp.name)with open(resource_filename(__name__, '')) as inp:outp.write(inp.read())self.files[''] = outp.name", "docstring": "configuration.Configuration relies on static methods\n which load files by name. Since we need to be able to run tests and\n potentially change the content of those files between tests without\n interfering with one another and with the system configuration, this\n setUp() function will allocate temporary files used only during the using\n conf/nupic-default.xml and conf/nupic-site.xml (relative to the unit tests)\n as templates.", "id": "f17311:c0:m0"} {"signature": "def mockSleepTime(self, mockTime, mockSleep):", "body": "class _TimeContainer(object):accumulatedTime = def testTime():return _TimeContainer.accumulatedTimedef testSleep(duration):_TimeContainer.accumulatedTime += durationmockTime.side_effect = testTimemockSleep.side_effect = testSleep", "docstring": "Configures mocks for time.time and time.sleep such that every call\n to time.sleep(x) increments the return value of time.time() by x.\n\n mockTime: time.time mock\n mockSleep: time.sleep mock", "id": "f17312:c2:m0"} {"signature": "def _getTempFileName():", "body": "handle = tempfile.NamedTemporaryFile(prefix='', suffix='', dir='')filename = handle.namehandle.close()return filename", "docstring": "Creates unique file name that starts with 'test' and ends with '.txt'.", "id": "f17319:m0"} {"signature": "def setUp(self):", "body": "self.interpreter = SafeInterpreter(writer=io.BytesIO())", "docstring": "Set up an interpreter directing output to a BytesIO stream.", "id": "f17337:c0:m0"} {"signature": "def _getPredictionsGenerator(examplesDir, exampleName):", "body": "sys.path.insert(, os.path.join(examplesDir, exampleName))modName = \"\" % exampleNamemod = __import__(modName, fromlist=[\"\"])return getattr(mod, \"\")", "docstring": "Get predictions generator for one of the quick-start example. \n\n.. note::\n\n The examples are not part of the nupic package so we need to manually \n append the example module path to syspath.\n\n:param examplesDir: \n (str) path to the example parent directory.\n:param exampleName: \n (str) name of the example. E.g: \"opf\", \"network\", \"algo\".\n:return predictionsGenerator: \n (function) predictions generator functions.", "id": "f17338:m0"} {"signature": "@classmethoddef setUpClass(cls):", "body": "for example in cls.examples:predictionGenerator = _getPredictionsGenerator(cls.examplesDir, example)for prediction in predictionGenerator(MAX_PREDICTIONS):cls.oneStepPredictions[example].append(prediction[])cls.oneStepConfidences[example].append(prediction[])cls.fiveStepPredictions[example].append(prediction[])cls.fiveStepConfidences[example].append(prediction[])", "docstring": "Get the predictions and prediction confidences for all examples.", "id": "f17338:c0:m0"} {"signature": "def createEncoder():", "body": "consumption_encoder = ScalarEncoder(, , , n=, name=\"\",clipInput=True)time_encoder = DateEncoder(timeOfDay=(, ), name=\"\")encoder = MultiEncoder()encoder.addEncoder(\"\", consumption_encoder)encoder.addEncoder(\"\", time_encoder)return encoder", "docstring": "Create the encoder instance for our test and return it.", "id": "f17342:m0"} {"signature": "def createNetwork(dataSource, enableTP=False, temporalImp=\"\"):", "body": "network = Network()network.addRegion(\"\", \"\",json.dumps({\"\": _VERBOSITY}))sensor = network.regions[\"\"].getSelf()sensor.encoder = createEncoder()sensor.dataSource = dataSourceSP_PARAMS[\"\"] = sensor.encoder.getWidth()network.addRegion(\"\", \"\", json.dumps(SP_PARAMS))network.link(\"\", \"\", \"\", \"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")if enableTP:TM_PARAMS[\"\"] = temporalImpnetwork.addRegion(\"\", \"\",json.dumps(TM_PARAMS))network.link(\"\", \"\", \"\", \"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")spatialPoolerRegion = network.regions[\"\"]spatialPoolerRegion.setParameter(\"\", True)spatialPoolerRegion.setParameter(\"\", False)if enableTP:temporalPoolerRegion = network.regions[\"\"]temporalPoolerRegion.setParameter(\"\", True)temporalPoolerRegion.setParameter(\"\", True)temporalPoolerRegion.setParameter(\"\", True)temporalPoolerRegion.setParameter(\"\", True)return network", "docstring": "Create the Network instance.\n\n The network has a sensor region reading data from `dataSource` and passing\n the encoded representation to an SPRegion. The SPRegion output is passed to\n a TMRegion.\n\n :param dataSource: a RecordStream instance to get data from\n :returns: a Network instance ready to run", "id": "f17342:m1"} {"signature": "def generatePattern(numCols = ,minOnes =,maxOnes =,colSet = [],prevPattern =numpy.array([])):", "body": "assert minOnes < maxOnesassert maxOnes < numColsnOnes = rgen.randint(minOnes, maxOnes)candidates = list(colSet.difference(set(prevPattern.nonzero()[])))rgen.shuffle(candidates)ind = candidates[:nOnes]x = numpy.zeros(numCols, dtype='')x[ind] = return x", "docstring": "Generate a single test pattern with given parameters.\n\n Parameters:\n --------------------------------------------\n numCols: Number of columns in each pattern.\n minOnes: The minimum number of 1's in each pattern.\n maxOnes: The maximum number of 1's in each pattern.\n colSet: The set of column indices for the pattern.\n prevPattern: Pattern to avoid (null intersection).", "id": "f17345:m2"} {"signature": "def buildTrainingSet(numSequences = ,sequenceLength = ,pctShared = ,seqGenMode = '',subsequenceStartPos = ,numCols = ,minOnes=,maxOnes = ,disjointConsecutive =True):", "body": "colSet = set(range(numCols))if '' in seqGenMode:assert '' in seqGenMode and '' not in seqGenModeif '' in seqGenMode or numSequences == :pctShared = if '' not in seqGenMode and '' not in seqGenMode:sharedSequenceLength = int(pctShared*sequenceLength)elif '' in seqGenMode:sharedSequenceLength = else:sharedSequenceLength = assert sharedSequenceLength + subsequenceStartPos < sequenceLengthsharedSequence = []for i in range(sharedSequenceLength):if disjointConsecutive and i > :x = generatePattern(numCols, minOnes, maxOnes, colSet, sharedSequence[i-])else:x = generatePattern(numCols, minOnes, maxOnes, colSet)sharedSequence.append(x)trainingSequences = []if '' not in seqGenMode:trailingLength = sequenceLength - sharedSequenceLength - subsequenceStartPoselse:trailingLength = sequenceLength - sharedSequenceLengthfor k,s in enumerate(range(numSequences)):if len(trainingSequences) > and '' in seqGenMode:r = list(range(subsequenceStartPos))+ list(range(subsequenceStartPos + sharedSequenceLength, sequenceLength))rgen.shuffle(r)r = r[:subsequenceStartPos]+ list(range(subsequenceStartPos, subsequenceStartPos + sharedSequenceLength))+ r[subsequenceStartPos:]sequence = [trainingSequences[k-][j] for j in r]else:sequence = []if '' not in seqGenMode:for i in range(subsequenceStartPos):if disjointConsecutive and i > :x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-])else:x = generatePattern(numCols, minOnes, maxOnes, colSet)sequence.append(x)if '' in seqGenMode and '' not in seqGenMode:sequence.extend(sharedSequence)for i in range(trailingLength):if disjointConsecutive and i > :x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-])else:x = generatePattern(numCols, minOnes, maxOnes, colSet)sequence.append(x)assert len(sequence) == sequenceLengthtrainingSequences.append(sequence)assert len(trainingSequences) == numSequencesif VERBOSITY >= :print(\"\")pprint.pprint(trainingSequences)if sharedSequenceLength > :return (trainingSequences, subsequenceStartPos + sharedSequenceLength)else:return (trainingSequences, -)", "docstring": "Build random high order test sequences.\n\n Parameters:\n --------------------------------------------\n numSequences: The number of sequences created.\n sequenceLength: The length of each sequence.\n pctShared: The percentage of sequenceLength that is shared across\n every sequence. If sequenceLength is 100 and pctShared\n is 0.2, then a subsequence consisting of 20 patterns\n will be in every sequence. Can also be the keyword\n 'one pattern', in which case a single time step is shared.\n seqGenMode: What kind of sequence to generate. If contains 'shared'\n generates shared subsequence. If contains 'no shared',\n does not generate any shared subsequence. If contains\n 'shuffle', will use common patterns shuffle among the\n different sequences. If contains 'beginning', will\n place shared subsequence at the beginning.\n subsequenceStartPos: The position where the shared subsequence starts\n numCols: Number of columns in each pattern.\n minOnes: The minimum number of 1's in each pattern.\n maxOnes: The maximum number of 1's in each pattern.\n disjointConsecutive: Whether to generate disjoint consecutive patterns or not.", "id": "f17345:m3"} {"signature": "def getSimplePatterns(numOnes, numPatterns):", "body": "numCols = numOnes * numPatternsp = []for i in range(numPatterns):x = numpy.zeros(numCols, dtype='')x[i*numOnes:(i+)*numOnes] = p.append(x)return p", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n bits on. There are numPatterns*numOnes bits in the vector.", "id": "f17345:m4"} {"signature": "def buildSimpleTrainingSet(numOnes=):", "body": "numPatterns = p = getSimplePatterns(numOnes, numPatterns)s1 = [p[], p[], p[], p[], p[], p[], p[] ]s2 = [p[], p[], p[], p[], p[], p[], p[]]trainingSequences = [s1, s2]return (trainingSequences, )", "docstring": "Two very simple high order sequences for debugging. Each pattern in the\n sequence has a series of 1's in a specific set of columns.", "id": "f17345:m5"} {"signature": "def buildAlternatingTrainingSet(numOnes=):", "body": "numPatterns = p = getSimplePatterns(numOnes, numPatterns)s1 = [p[], p[], p[], p[], p[], p[]]s2 = [p[], p[], p[], p[], p[], p[]]s3 = [p[], p[], p[], p[], p[], p[]]s4 = [p[], p[], p[], p[], p[], p[]]trainingSequences = [s1, s2, s3, s4]return (trainingSequences, )", "docstring": "High order sequences that alternate elements. Pattern i has one's in\n i*numOnes to (i+1)*numOnes.\n\n The sequences are:\n A B A B A C\n A B A B D E\n A B F G H I\n A J K L M N", "id": "f17345:m6"} {"signature": "def buildHL0aTrainingSet(numOnes=):", "body": "numPatterns = p = getSimplePatterns(numOnes, numPatterns)s = []s.append(p[rgen.randint(,)])for i in range():s.append(p[rgen.randint(,)])s.append(p[])s.append(p[])s.append(p[])s.append(p[rgen.randint(,)])return ([s], [[p[], p[], p[]]])", "docstring": "Simple sequences for HL0. Each pattern in the sequence has a series of 1's\n in a specific set of columns.\n There are 23 patterns, p0 to p22.\n The sequence we want to learn is p0->p1->p2\n We create a very long sequence consisting of N N p0 p1 p2 N N p0 p1 p2\n N is randomly chosen from p3 to p22", "id": "f17345:m7"} {"signature": "def buildHL0bTrainingSet(numOnes=):", "body": "numPatterns = p = getSimplePatterns(numOnes, numPatterns)s = []s.append(p[rgen.randint(,numPatterns)])for i in range():r = rgen.randint(,numPatterns)print(r, end='')s.append(p[r])if rgen.binomial(, ) > :print(\"\", end='')s.append(p[])s.append(p[])s.append(p[])s.append(p[])else:print(\"\", end='')s.append(p[])s.append(p[])s.append(p[])r = rgen.randint(,numPatterns)s.append(p[r])print(r, end='')print()return ([s], [ [p[], p[], p[], p[]], [p[], p[], p[]] ])", "docstring": "Simple sequences for HL0b. Each pattern in the sequence has a series of 1's\n in a specific set of columns.\n There are 23 patterns, p0 to p22.\n The sequences we want to learn are p1->p2->p3 and p0->p1->p2->p4.\n We create a very long sequence consisting of these two sub-sequences\n intermixed with noise, such as:\n N N p0 p1 p2 p4 N N p1 p2 p3 N N p1 p2 p3\n N is randomly chosen from p5 to p22", "id": "f17345:m8"} {"signature": "def findAcceptablePatterns(tm, t, whichSequence, trainingSequences, nAcceptable = ):", "body": "upTo = t + if tm.doPooling:upTo += min(tm.segUpdateValidDuration, nAcceptable)assert upTo <= len(trainingSequences[whichSequence])acceptablePatterns = []if len(trainingSequences) == and(trainingSequences[][] == trainingSequences[][]).all():if (trainingSequences[][t] == trainingSequences[][t]).all()and (trainingSequences[][t+] != trainingSequences[][t+]).any():acceptablePatterns.append(trainingSequences[][t+])acceptablePatterns.append(trainingSequences[][t+])acceptablePatterns += [trainingSequences[whichSequence][t]for t in range(t,upTo)]return acceptablePatterns", "docstring": "Tries to infer the set of acceptable patterns for prediction at the given\ntime step and for the give sequence. Acceptable patterns are: the current one,\nplus a certain number of patterns after timeStep, in the sequence that the TM\nis currently tracking. Any other pattern is not acceptable.\n\nTODO:\n====\n- Doesn't work for noise cases.\n- Might run in trouble if shared subsequence at the beginning.\n\nParameters:\n==========\ntm the whole TM, so that we can look at its parameters\nt the current time step\nwhichSequence the sequence we are currently tracking\ntrainingSequences all the training sequences\nnAcceptable the number of steps forward from the current timeStep\n we are willing to consider acceptable. In the case of\n pooling, it is less than or equal to the min of the\n number of training reps and the segUpdateValidDuration\n parameter of the TM, depending on the test case.\n The default value is 1, because by default, the pattern\n after the current one should always be predictable.\n\nReturn value:\n============\nacceptablePatterns A list of acceptable patterns for prediction.", "id": "f17345:m10"} {"signature": "def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =, nSequences =[],pctShared = , seqGenMode = '',shouldFail = False):", "body": "print(\"\")nFailed = subsequenceStartPos = assert subsequenceStartPos < sequenceLengthfor numSequences in nSequences:print(\"\",sequenceLength, end='')print(\"\",cellsPerColumn,\"\",nTests,\"\", numCols)print(\"\",numSequences, \"\", pctShared, end='')print(\"\", seqGenMode)for k in range(nTests): trainingSet = buildTrainingSet(numSequences = numSequences,sequenceLength = sequenceLength,pctShared = pctShared, seqGenMode = seqGenMode,subsequenceStartPos = subsequenceStartPos,numCols = numCols,minOnes = , maxOnes = )print(\"\")numFailures3, numStrictErrors3, numPerfect3, tm3 =_testSequence(trainingSet,nTrainingReps = ,numberOfCols = numCols,cellsPerColumn = cellsPerColumn,initialPerm = ,connectedPerm = ,minThreshold = ,permanenceInc = ,permanenceDec = ,permanenceMax = ,globalDecay = ,newSynapseCount = ,activationThreshold = ,doPooling = False,shouldFail = shouldFail)print(\"\")numFailures, numStrictErrors, numPerfect, tm2 =_testSequence(trainingSet,nTrainingReps = ,numberOfCols = numCols,cellsPerColumn = cellsPerColumn,initialPerm = ,connectedPerm = ,minThreshold = ,permanenceInc = ,permanenceDec = ,permanenceMax = ,globalDecay = ,newSynapseCount = ,activationThreshold = ,doPooling = False,shouldFail = shouldFail)print(\"\")numFailures1, numStrictErrors1, numPerfect1, tm1 =_testSequence(trainingSet,nTrainingReps = ,numberOfCols = numCols,cellsPerColumn = cellsPerColumn,initialPerm = ,connectedPerm = ,minThreshold = ,permanenceInc = ,permanenceDec = ,permanenceMax = ,globalDecay = ,newSynapseCount = ,activationThreshold = ,doPooling = False,shouldFail = shouldFail)segmentInfo1 = tm1.getSegmentInfo()segmentInfo2 = tm2.getSegmentInfo()if (abs(segmentInfo1[] - segmentInfo2[]) > ) or(abs(segmentInfo1[] - segmentInfo2[]) > *) :print(\"\")print(segmentInfo1)print(segmentInfo2)print(tm3.getSegmentInfo())tm3.trimSegments()print(tm3.getSegmentInfo())print(\"\")print(numFailures1, numStrictErrors1, numPerfect1)print(numFailures, numStrictErrors, numPerfect)print(numFailures3, numStrictErrors3, numPerfect3)numFailures += if numFailures == and not shouldFailor numFailures > and shouldFail:print(\"\", end='')if shouldFail:print('')else:print()else:print(\"\")nFailed = nFailed + print(\"\", numFailures)print(\"\", numStrictErrors)print(\"\", numPerfect)return nFailed", "docstring": "Still need to test:\n Two overlapping sequences. OK to get new segments but check that we can\n get correct high order prediction after multiple reps.", "id": "f17345:m19"} {"signature": "def worker(x):", "body": "cellsPerColumn, numSequences = x[], x[]nTrainingReps = sequenceLength = numCols = print('', cellsPerColumn, numSequences)seqGenMode = ''subsequenceStartPos = trainingSet = buildTrainingSet(numSequences = numSequences,sequenceLength = sequenceLength,pctShared = , seqGenMode = seqGenMode,subsequenceStartPos = subsequenceStartPos,numCols = numCols,minOnes = , maxOnes = )numFailures1, numStrictErrors1, numPerfect1, atHub, tm =_testSequence(trainingSet,nTrainingReps = nTrainingReps,numberOfCols = numCols,cellsPerColumn = cellsPerColumn,initialPerm = ,connectedPerm = ,minThreshold = ,permanenceInc = ,permanenceDec = ,permanenceMax = ,globalDecay = ,newSynapseCount = ,activationThreshold = ,doPooling = False,shouldFail = False,predJustAfterHubOnly = )seqGenMode = ''trainingSet = buildTrainingSet(numSequences = numSequences,sequenceLength = sequenceLength,pctShared = , seqGenMode = seqGenMode,subsequenceStartPos = ,numCols = numCols,minOnes = , maxOnes = )numFailures2, numStrictErrors2, numPerfect2, tm =_testSequence(trainingSet,nTrainingReps = nTrainingReps,numberOfCols = numCols,cellsPerColumn = cellsPerColumn,initialPerm = ,connectedPerm = ,minThreshold = ,permanenceInc = ,permanenceDec = ,permanenceMax = ,globalDecay = ,newSynapseCount = ,activationThreshold = ,doPooling = False,shouldFail = False)print('', end='')print(cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub,numFailures2, numStrictErrors2, numPerfect2)return cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub,numFailures2, numStrictErrors2, numPerfect2", "docstring": "Worker function to use in parallel hub capacity test below.", "id": "f17345:m24"} {"signature": "def hubCapacity():", "body": "from multiprocessing import Poolimport itertoolsprint(\"\")p = Pool()results = p.map(worker, itertools.product([,,,,,,,], range(,,)))f = open('', '')for i,r in enumerate(results):print('' % r, file=f)f.close()", "docstring": "Study hub capacity. Figure out how many sequences can share a pattern\nfor a given number of cells per column till we the system fails.\nDON'T RUN IN BUILD SYSTEM!!! (takes too long)", "id": "f17345:m25"} {"signature": "def printOneTrainingVector(x):", "body": "print(''.join('' if k != else '' for k in x))", "docstring": "Print a single vector succinctly.", "id": "f17347:m0"} {"signature": "def getSimplePatterns(numOnes, numPatterns, patternOverlap=):", "body": "assert (patternOverlap < numOnes)numNewBitsInEachPattern = numOnes - patternOverlapnumCols = numNewBitsInEachPattern * numPatterns + patternOverlapp = []for i in range(numPatterns):x = numpy.zeros(numCols, dtype='')startBit = i*numNewBitsInEachPatternnextStartBit = startBit + numOnesx[startBit:nextStartBit] = p.append(x)return p", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n bits on. The amount of overlap between consecutive patterns is\n configurable, via the patternOverlap parameter.\n\n Parameters:\n -----------------------------------------------------------------------\n numOnes: Number of bits ON in each pattern\n numPatterns: Number of unique patterns to generate\n patternOverlap: Number of bits of overlap between each successive pattern\n retval: patterns", "id": "f17347:m2"} {"signature": "def buildOverlappedSequences( numSequences = ,seqLen = ,sharedElements = [,],numOnBitsPerPattern = ,patternOverlap = ,seqOverlap = ,**kwargs):", "body": "numSharedElements = len(sharedElements)numUniqueElements = seqLen - numSharedElementsnumPatterns = numSharedElements + numUniqueElements * numSequencespatterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)numCols = len(patterns[])trainingSequences = []uniquePatternIndices = list(range(numSharedElements, numPatterns))for i in range(numSequences):sequence = []sharedPatternIndices = list(range(numSharedElements))for j in range(seqLen):if j in sharedElements:patIdx = sharedPatternIndices.pop()else:patIdx = uniquePatternIndices.pop()sequence.append(patterns[patIdx])trainingSequences.append(sequence)if VERBOSITY >= :print(\"\")printAllTrainingSequences(trainingSequences)return (numCols, trainingSequences)", "docstring": "Create training sequences that share some elements in the middle.\n\n Parameters:\n -----------------------------------------------------\n numSequences: Number of unique training sequences to generate\n seqLen: Overall length of each sequence\n sharedElements: Which element indices of each sequence are shared. These\n will be in the range between 0 and seqLen-1\n numOnBitsPerPattern: Number of ON bits in each TM input pattern\n patternOverlap: Max number of bits of overlap between any 2 patterns\n retval: (numCols, trainingSequences)\n numCols - width of the patterns\n trainingSequences - a list of training sequences", "id": "f17347:m3"} {"signature": "def buildSequencePool(numSequences = ,seqLen = [,,],numPatterns = ,numOnBitsPerPattern = ,patternOverlap = ,**kwargs):", "body": "patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)numCols = len(patterns[])trainingSequences = []for i in range(numSequences):sequence = []length = random.choice(seqLen)for j in range(length):patIdx = random.choice(range(numPatterns))sequence.append(patterns[patIdx])trainingSequences.append(sequence)if VERBOSITY >= :print(\"\")printAllTrainingSequences(trainingSequences)return (numCols, trainingSequences)", "docstring": "Create a bunch of sequences of various lengths, all built from\n a fixed set of patterns.\n\n Parameters:\n -----------------------------------------------------\n numSequences: Number of training sequences to generate\n seqLen: List of possible sequence lengths\n numPatterns: How many possible patterns there are to use within\n sequences\n numOnBitsPerPattern: Number of ON bits in each TM input pattern\n patternOverlap: Max number of bits of overlap between any 2 patterns\n retval: (numCols, trainingSequences)\n numCols - width of the patterns\n trainingSequences - a list of training sequences", "id": "f17347:m4"} {"signature": "def createTMs(includeCPP = True,includePy = True,numCols = ,cellsPerCol = ,activationThreshold = ,minThreshold = ,newSynapseCount = ,initialPerm = ,permanenceInc = ,permanenceDec = ,globalDecay = ,pamLength = ,checkSynapseConsistency = True,maxInfBacktrack = ,maxLrnBacktrack = ,**kwargs):", "body": "connectedPerm = tms = dict()if includeCPP:if VERBOSITY >= :print(\"\")cpp_tm = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = cellsPerCol,initialPerm = initialPerm, connectedPerm = connectedPerm,minThreshold = minThreshold, newSynapseCount = newSynapseCount,permanenceInc = permanenceInc, permanenceDec = permanenceDec,activationThreshold = activationThreshold,globalDecay = globalDecay, burnIn = ,seed=SEED, verbosity=VERBOSITY,checkSynapseConsistency = checkSynapseConsistency,collectStats = True,pamLength = pamLength,maxInfBacktrack = maxInfBacktrack,maxLrnBacktrack = maxLrnBacktrack,)cpp_tm.retrieveLearningStates = Truetms[''] = cpp_tmif includePy:if VERBOSITY >= :print(\"\")py_tm = BacktrackingTM(numberOfCols = numCols, cellsPerColumn = cellsPerCol,initialPerm = initialPerm, connectedPerm = connectedPerm,minThreshold = minThreshold, newSynapseCount = newSynapseCount,permanenceInc = permanenceInc, permanenceDec = permanenceDec,activationThreshold = activationThreshold,globalDecay = globalDecay, burnIn = ,seed=SEED, verbosity=VERBOSITY,collectStats = True,pamLength = pamLength,maxInfBacktrack = maxInfBacktrack,maxLrnBacktrack = maxLrnBacktrack,)tms[''] = py_tmreturn tms", "docstring": "Create one or more TM instances, placing each into a dict keyed by\n name.\n\n Parameters:\n ------------------------------------------------------------------\n retval: tms - dict of TM instances", "id": "f17347:m5"} {"signature": "def assertNoTMDiffs(tms):", "body": "if len(tms) == :returnif len(tms) > :raise \"\"same = fdrutils.tmDiff2(*list(tms.values()), verbosity=VERBOSITY)assert(same)return", "docstring": "Check for diffs among the TM instances in the passed in tms dict and\nraise an assert if any are detected\n\nParameters:\n---------------------------------------------------------------------\ntms: dict of TM instances", "id": "f17347:m6"} {"signature": "def _getSimplePatterns(numOnes, numPatterns):", "body": "numCols = numOnes * numPatternsp = []for i in range(numPatterns):x = numpy.zeros(numCols, dtype='')x[i*numOnes:(i+)*numOnes] = p.append(x)return p", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n bits on. There are numPatterns*numOnes bits in the vector.", "id": "f17350:m0"} {"signature": "def _buildLikelihoodTrainingSet(numOnes=, relativeFrequencies=None):", "body": "numPatterns = p = _getSimplePatterns(numOnes, numPatterns)s1 = [p[], p[], p[], p[], p[]]s2 = [p[], p[], p[], p[], p[]]s3 = [p[], p[], p[], p[], p[]]trainingSequences = [s1, s2, s3]allPatterns = preturn (trainingSequences, relativeFrequencies, allPatterns)", "docstring": "Two very simple high order sequences for debugging. Each pattern in the\n sequence has a series of 1's in a specific set of columns.", "id": "f17350:m1"} {"signature": "def _createTMs(numCols, cellsPerColumn=, checkSynapseConsistency=True):", "body": "minThreshold = activationThreshold = newSynapseCount = initialPerm = connectedPerm = permanenceInc = permanenceDec = globalDecay = if VERBOSITY > :print(\"\")cppTm = BacktrackingTMCPP(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,initialPerm=initialPerm, connectedPerm=connectedPerm,minThreshold=minThreshold, newSynapseCount=newSynapseCount,permanenceInc=permanenceInc, permanenceDec=permanenceDec,activationThreshold=activationThreshold,globalDecay=globalDecay, burnIn=,seed=SEED, verbosity=VERBOSITY,checkSynapseConsistency=checkSynapseConsistency,pamLength=)if VERBOSITY > :print(\"\")pyTm = BacktrackingTM(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,initialPerm=initialPerm, connectedPerm=connectedPerm,minThreshold=minThreshold, newSynapseCount=newSynapseCount,permanenceInc=permanenceInc, permanenceDec=permanenceDec,activationThreshold=activationThreshold,globalDecay=globalDecay, burnIn=,seed=SEED, verbosity=VERBOSITY,pamLength=)return cppTm, pyTm", "docstring": "Create TM and BacktrackingTMCPP instances with identical parameters.", "id": "f17350:m2"} {"signature": "def _computeTMMetric(tm=None, sequences=None, useResets=True, verbosity=):", "body": "datasetScore = numPredictions = tm.resetStats()for seqIdx, seq in enumerate(sequences):if useResets:tm.reset()seq = numpy.array(seq, dtype='')if verbosity > :print(\"\")for i, inputPattern in enumerate(seq):if verbosity > :print(\"\" % (seqIdx, i), end='')print(\"\", inputPattern)y = tm.infer(inputPattern)if verbosity > :stats = tm.getStats()if stats[''] > :print(\"\", stats[''])if verbosity > :print(\"\")predOut = numpy.sum(tm.predictedState[''], axis=)actOut = numpy.sum(tm.activeState[''], axis=)outout = numpy.sum(y.reshape(tm.activeState[''].shape), axis=)print(\"\", predOut.nonzero())print(\"\", actOut.nonzero())print(\"\", inputPattern.nonzero())print(\"\", outout.nonzero())stats = tm.getStats()datasetScore = stats['']numPredictions = stats['']print(\"\", datasetScore, end='')print(\"\", numPredictions)return datasetScore, numPredictions", "docstring": "Given a trained TM and a list of sequences, compute the temporal memory\n performance metric on those sequences.\n\n Parameters:\n ===========\n tm: A trained temporal memory.\n sequences: A list of sequences. Each sequence is a list of numpy\n vectors.\n useResets: If True, the TM's reset method will be called before the\n the start of each new sequence.\n verbosity: An integer controlling the level of printouts. The higher\n the number the more debug printouts.\n\n Return value:\n ============\n The following pair is returned: (score, numPredictions)\n\n score: The average prediction score per pattern.\n numPredictions: The total number of predictions that were made.", "id": "f17350:m3"} {"signature": "def _createDataset(numSequences, originalSequences, relativeFrequencies):", "body": "dataSet = []trainingCummulativeFrequencies = numpy.cumsum(relativeFrequencies)for _ in range(numSequences):whichSequence = numpy.searchsorted(trainingCummulativeFrequencies,_RGEN.random_sample())dataSet.append(originalSequences[whichSequence])return dataSet", "docstring": "Given a set of sequences, create a dataset consisting of numSequences\n sequences. The i'th pattern in this dataset is chosen from originalSequences\n according to the relative frequencies specified in relativeFrequencies.", "id": "f17350:m4"} {"signature": "def _printOneTrainingVector(self, x):", "body": "print(''.join('' if k != else '' for k in x))", "docstring": "Print a single vector succinctly.", "id": "f17351:c0:m1"} {"signature": "def _printAllTrainingSequences(self, trainingSequences):", "body": "for i, trainingSequence in enumerate(trainingSequences):print(\"\", i, \"\")for pattern in trainingSequence:self._printOneTrainingVector(pattern)", "docstring": "Print all vectors", "id": "f17351:c0:m2"} {"signature": "def _setVerbosity(self, verbosity, tm, tmPy):", "body": "tm.cells4.setVerbosity(verbosity)tm.verbosity = verbositytmPy.verbosity = verbosity", "docstring": "Set verbosity level on the TM", "id": "f17351:c0:m3"} {"signature": "def _createTMs(self, numCols, fixedResources=False,checkSynapseConsistency = True):", "body": "minThreshold = activationThreshold = newSynapseCount = initialPerm = connectedPerm = permanenceInc = permanenceDec = if fixedResources:permanenceDec = maxSegmentsPerCell = maxSynapsesPerSegment = globalDecay = maxAge = else:permanenceDec = maxSegmentsPerCell = -maxSynapsesPerSegment = -globalDecay = maxAge = if g_testCPPTM:if g_options.verbosity > :print(\"\")cppTM = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = ,initialPerm = initialPerm, connectedPerm = connectedPerm,minThreshold = minThreshold,newSynapseCount = newSynapseCount,permanenceInc = permanenceInc,permanenceDec = permanenceDec,activationThreshold = activationThreshold,globalDecay = globalDecay, maxAge=maxAge, burnIn = ,seed=g_options.seed, verbosity=g_options.verbosity,checkSynapseConsistency = checkSynapseConsistency,pamLength = ,maxSegmentsPerCell = maxSegmentsPerCell,maxSynapsesPerSegment = maxSynapsesPerSegment,)cppTM.retrieveLearningStates = Trueelse:cppTM = Noneif g_options.verbosity > :print(\"\")pyTM = BacktrackingTM(numberOfCols = numCols, cellsPerColumn = ,initialPerm = initialPerm,connectedPerm = connectedPerm,minThreshold = minThreshold,newSynapseCount = newSynapseCount,permanenceInc = permanenceInc,permanenceDec = permanenceDec,activationThreshold = activationThreshold,globalDecay = globalDecay, maxAge=maxAge, burnIn = ,seed=g_options.seed, verbosity=g_options.verbosity,pamLength = ,maxSegmentsPerCell = maxSegmentsPerCell,maxSynapsesPerSegment = maxSynapsesPerSegment,)return cppTM, pyTM", "docstring": "Create an instance of the appropriate temporal memory. We isolate\n all parameters as constants specified here.", "id": "f17351:c0:m4"} {"signature": "def _getSimplePatterns(self, numOnes, numPatterns):", "body": "numCols = numOnes * numPatternsp = []for i in range(numPatterns):x = numpy.zeros(numCols, dtype='')x[i*numOnes:(i+)*numOnes] = p.append(x)return p", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n bits on. There are numPatterns*numOnes bits in the vector. These patterns\n are used as elements of sequences when building up a training set.", "id": "f17351:c0:m5"} {"signature": "def _buildSegmentLearningTrainingSet(self, numOnes=, numRepetitions= ):", "body": "numPatterns = numCols = * numPatterns * numOneshalfCols = numPatterns * numOnesnumNoiseBits = numOnesp = self._getSimplePatterns(numOnes, numPatterns)trainingSequences = []for i in range(numRepetitions):sequence = []for j in range(numPatterns):v = numpy.zeros(numCols)v[:halfCols] = p[j]noiseIndices = (self._rgen.permutation(halfCols)+ halfCols)[:numNoiseBits]v[noiseIndices] = sequence.append(v)trainingSequences.append(sequence)testSequence = []for j in range(numPatterns):v = numpy.zeros(numCols, dtype='')v[:halfCols] = p[j]testSequence.append(v)if g_options.verbosity > :print(\"\")self.printAllTrainingSequences(trainingSequences)print(\"\")self.printAllTrainingSequences([testSequence])return (trainingSequences, [testSequence])", "docstring": "A simple sequence of 5 patterns. The left half of the vector contains\n the pattern elements, each with numOnes consecutive bits. The right half\n contains numOnes random bits. The function returns a pair:\n\n trainingSequences: A list containing numRepetitions instances of the\n above sequence\n testSequence: A single clean test sequence containing the 5 patterns\n but with no noise on the right half", "id": "f17351:c0:m6"} {"signature": "def _buildSL2TrainingSet(self, numOnes=, numRepetitions= ):", "body": "numPatterns = numCols = * numPatterns * numOneshalfCols = numPatterns * numOnesnumNoiseBits = numOnesp = self._getSimplePatterns(numOnes, numPatterns)numSequences = indices = [[, , , , ],[, , , , ],[, , , , ],]trainingSequences = []for i in range(numRepetitions*numSequences):sequence = []for j in range(numPatterns):v = numpy.zeros(numCols, dtype='')v[:halfCols] = p[indices[i % numSequences][j]]noiseIndices = (self._rgen.permutation(halfCols)+ halfCols)[:numNoiseBits]v[noiseIndices] = sequence.append(v)trainingSequences.append(sequence)testSequences = []for i in range(numSequences):sequence = []for j in range(numPatterns):v = numpy.zeros(numCols, dtype='')v[:halfCols] = p[indices[i % numSequences][j]]sequence.append(v)testSequences.append(sequence)if g_options.verbosity > :print(\"\")self.printAllTrainingSequences(trainingSequences)print(\"\")self.printAllTrainingSequences(testSequences)return (trainingSequences, testSequences)", "docstring": "Three simple sequences, composed of the same 5 static patterns. The left\n half of the vector contains the pattern elements, each with numOnes\n consecutive bits. The right half contains numOnes random bits.\n\n Sequence 1 is: p0, p1, p2, p3, p4\n Sequence 2 is: p4, p3, p2, p1, p0\n Sequence 3 is: p2, p0, p4, p1, p3\n\n The function returns a pair:\n\n trainingSequences: A list containing numRepetitions instances of the\n above sequences\n testSequence: Clean test sequences with no noise on the right half", "id": "f17351:c0:m7"} {"signature": "def simulateCategories(numSamples=, numDimensions=):", "body": "failures = \"\"LOGGER.info(\"\")knn = KNNClassifier(k=, distanceNorm=, useSparseMemory=True)for i in range(, numSamples):c = *numpy.random.randint(, ) + v = createPattern(c, numDimensions)knn.learn(v, c)for i in range(, ):c = *i+v = createPattern(c, numDimensions)knn.learn(v, c)errors = for i in range(, numSamples):c = *numpy.random.randint(, ) + v = createPattern(c, numDimensions)inferCat, _kir, _kd, _kcd = knn.infer(v)if inferCat != c:LOGGER.info(\"\", v[v.nonzero()],\"\", inferCat, \"\", c)LOGGER.info(\"\", v.nonzero())errors += if errors != :failures += \"\"errors = for i in range(, ):c = *numpy.random.randint(, ) + v = createPattern(c, numDimensions)p = knn.closestTrainingPattern(v, c)if not (c in p.nonzero()[]):LOGGER.info(\"\", p.nonzero(), v.nonzero())LOGGER.info(\"\", p[p.nonzero()], v[v.nonzero()])errors += if errors != :failures += \"\"return failures, knn", "docstring": "Simulate running KNN classifier on many disjoint categories", "id": "f17353:m0"} {"signature": "def createPattern(c, numDimensions):", "body": "v = numpy.zeros(numDimensions)v[c] = *numpy.random.random() + v[c+] = numpy.random.random()if c > :v[c-] = numpy.random.random()return v", "docstring": "Create a sparse pattern from category c with the given number of dimensions.\nThe pattern is created by setting element c to be a high random number.\nElement c-1 and c+1 are set to low random numbers. numDimensions must be > c.", "id": "f17353:m1"} {"signature": "def simulateKMoreThanOne():", "body": "failures = \"\"LOGGER.info(\"\")knn = KNNClassifier(k=)v = numpy.zeros((, ))v[] = [, ]v[] = [, ]v[] = [, ]v[] = [, ]v[] = [, ]v[] = [, ]knn.learn(v[], )knn.learn(v[], )knn.learn(v[], )knn.learn(v[], )knn.learn(v[], )knn.learn(v[], )winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[])if winner != :failures += \"\"winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[])if winner != :failures += \"\"winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[])if winner != :failures += \"\"winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[])if winner != :failures += \"\"if len(failures) == :LOGGER.info(\"\")return failures", "docstring": "A small test with k=3", "id": "f17354:m0"} {"signature": "def getNumTestPatterns(short=):", "body": "if short==:LOGGER.info(\"\")numPatterns = numpy.random.randint(, )numClasses = numpy.random.randint(, )elif short==:LOGGER.info(\"\")numPatterns = numpy.random.randint(, )numClasses = numpy.random.randint(, )else:LOGGER.info(\"\")numPatterns = numpy.random.randint(, )numClasses = numpy.random.randint(, )LOGGER.info(\"\", numPatterns)LOGGER.info(\"\", numClasses)return numPatterns, numClasses", "docstring": "Return the number of patterns and classes the test should use.", "id": "f17354:m2"} {"signature": "def runTestKNNClassifier(self, short = ):", "body": "failures = \"\"if short != :numpy.random.seed()else:seed_value = int(time.time())numpy.random.seed(seed_value)LOGGER.info('', seed_value)f = open('', '')f.write(str(seed_value))f.write('')f.close()failures += simulateKMoreThanOne()LOGGER.info(\"\")numPatterns, numClasses = getNumTestPatterns(short)patternSize = patterns = numpy.random.rand(numPatterns, patternSize)patternDict = dict()testDict = dict()for i in xrange(numPatterns):patternDict[i] = dict()patternDict[i][''] = patterns[i]patternDict[i][''] = numpy.random.randint(, numClasses-)testDict[i] = copy.deepcopy(patternDict[i])testDict[i][''][:int(*patternSize)] = numpy.random.rand()testDict[i][''] = NoneLOGGER.info(\"\")knn = KNNClassifier(k=)failures += simulateClassifier(knn, patternDict,\"\")LOGGER.info(\"\")knnL1 = KNNClassifier(k=, distanceNorm=)failures += simulateClassifier(knnL1, patternDict,\"\")LOGGER.info(\"\"\"\"\"\")knnExact = KNNClassifier(k=, exact=True)failures += simulateClassifier(knnExact,patternDict,\"\",testDict=testDict)numPatterns, numClasses = getNumTestPatterns(short)patterns = (numpy.random.rand(numPatterns, ) > ).astype(RealNumpyDType)patternDict = dict()for i in patterns:iString = str(i.tolist())if not patternDict.has_key(iString):randCategory = numpy.random.randint(, numClasses-)patternDict[iString] = dict()patternDict[iString][''] = ipatternDict[iString][''] = randCategoryLOGGER.info(\"\")knnDense = KNNClassifier(k=)failures += simulateClassifier(knnDense, patternDict,\"\")self.assertEqual(len(failures), ,\"\" + failures)if short == :f = open('', '')f.write('')f.close()", "docstring": "Test the KNN classifier in this module. short can be:\n 0 (short), 1 (medium), or 2 (long)", "id": "f17354:c0:m0"} {"signature": "def _setupTempDirectory(filename):", "body": "tmpDir = tempfile.mkdtemp()tmpFileName = os.path.join(tmpDir, os.path.basename(filename))return tmpDir, tmpFileName", "docstring": "Create a temp directory, and return path to filename in that directory", "id": "f17359:m1"} {"signature": "def _createEncoder():", "body": "encoder = MultiEncoder()encoder.addMultipleEncoders({'': dict(fieldname='', type='',timeOfDay=(,), forced=True),'': dict(fieldname='', type='',name='', minval=, maxval=,clipInput=True, w=, resolution=, forced=True),'': dict(fieldname='',type='',name='', minval=,maxval=,clipInput=True, w=, resolution=, forced=True),})return encoder", "docstring": "Create the encoder instance for our test and return it.", "id": "f17359:m2"} {"signature": "def _createOPFNetwork(addSP = True, addTP = False):", "body": "sensorParams = dict(verbosity = _VERBOSITY)encoder = _createEncoder()trainFile = resource_filename(\"\", \"\")dataSource = FileRecordStream(streamID=trainFile)dataSource.setAutoRewind(True)n = Network()n.addRegion(\"\", \"\", json.dumps(sensorParams))sensor = n.regions[''].getSelf()sensor.encoder = encodersensor.dataSource = dataSourceif addSP:print(\"\")g_spRegionConfig[''] = encoder.getWidth()n.addRegion(\"\", \"\", json.dumps(g_spRegionConfig))n.link(\"\", \"\", \"\", \"\")n.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")n.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")n.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")if addTP and addSP:print(\"\")g_tpRegionConfig[''] = g_spRegionConfig['']n.addRegion(\"\", \"\", json.dumps(g_tpRegionConfig))n.link(\"\", \"\", \"\", \"\")n.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")n.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")elif addTP:print(\"\")g_tpRegionConfig[''] = encoder.getWidth()n.addRegion(\"\", \"\", json.dumps(g_tpRegionConfig))n.link(\"\", \"\", \"\", \"\")n.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")return n", "docstring": "Create a 'new-style' network ala OPF and return it.\n If addSP is true, an SPRegion will be added named 'level1SP'.\n If addTP is true, a TMRegion will be added named 'level1TP'", "id": "f17359:m3"} {"signature": "def _executeExternalCmdAndReapStdout(args):", "body": "_debugOut((\"\") %(args,))p = subprocess.Popen(args,env=os.environ,stdout=subprocess.PIPE,stderr=subprocess.PIPE)_debugOut((\"\") % (args,))(stdoutData, stderrData) = p.communicate()_debugOut((\"\" +\"\") %(args, p.returncode, type(stdoutData), stdoutData, stderrData))result = dict(exitStatus = p.returncode,stdoutData = stdoutData,stderrData = stderrData,)_debugOut((\"\") %(args, pprint.pformat(result, indent=)))return result", "docstring": "args: Args list as defined for the args parameter in subprocess.Popen()\n\nReturns: result dicionary:\n {\n 'exitStatus':,\n 'stdoutData':\"string\",\n 'stderrData':\"string\"\n }", "id": "f17360:m0"} {"signature": "def _getTestList():", "body": "suiteNames = ['']testNames = []for suite in suiteNames:for f in dir(eval(suite)):if f.startswith(''):testNames.append('' % (suite, f))return testNames", "docstring": "Get the list of tests that can be run from this module", "id": "f17360:m2"} {"signature": "def setUp(self):", "body": "global g_myEnvif not g_myEnv:params = type('', (object,), {'' : resource_filename(\"\", \"\")})g_myEnv = MyTestEnvironment(params)", "docstring": "Method called to prepare the test fixture. This is called by the\n unittest framework immediately before calling the test method; any exception\n raised by this method will be considered an error rather than a test\n failure. The default implementation does nothing.", "id": "f17360:c1:m1"} {"signature": "def tearDown(self):", "body": "self.resetExtraLogItems()g_myEnv.cleanUp()", "docstring": "Method called immediately after the test method has been called and the\n result recorded. This is called even if the test method raised an exception,\n so the implementation in subclasses may need to be particularly careful\n about checking internal state. Any exception raised by this method will be\n considered an error rather than a test failure. This method will only be\n called if the setUp() succeeds, regardless of the outcome of the test\n method. The default implementation does nothing.", "id": "f17360:c1:m2"} {"signature": "def shortDescription(self):", "body": "return None", "docstring": "Override to force unittest framework to use test method names instead\n of docstrings in the report.", "id": "f17360:c1:m3"} {"signature": "def getModules(self, expDesc, hsVersion=''):", "body": "shutil.rmtree(g_myEnv.testOutDir, ignore_errors=True)args = [\"\" % (json.dumps(expDesc)),\"\" % (g_myEnv.testOutDir),\"\" % (hsVersion)]self.addExtraLogItem({'':args})experiment_generator.expGenerator(args)descriptionPyPath = os.path.join(g_myEnv.testOutDir, \"\")permutationsPyPath = os.path.join(g_myEnv.testOutDir, \"\")return (self.checkPythonScript(descriptionPyPath),self.checkPythonScript(permutationsPyPath))", "docstring": "This does the following:\n\n 1.) Calls ExpGenerator to generate a base description file and permutations\n file from expDescription.\n\n 2.) Verifies that description.py and permutations.py are valid python\n modules that can be loaded\n\n 3.) Returns the loaded base description module and permutations module\n\n Parameters:\n -------------------------------------------------------------------\n expDesc: JSON format experiment description\n hsVersion: which version of hypersearch to use ('v2'; 'v1' was dropped)\n retval: (baseModule, permutationsModule)", "id": "f17360:c1:m5"} {"signature": "def runBaseDescriptionAndPermutations(self, expDesc, hsVersion, maxModels=):", "body": "self.getModules(expDesc, hsVersion=hsVersion)permutationsPyPath = os.path.join(g_myEnv.testOutDir, \"\")args = [g_myEnv.testOutDir]from nupic.frameworks.opf.experiment_runner import runExperimentLOGGER.info(\"\")LOGGER.info(\"\")LOGGER.info(\"\")LOGGER.info(\"\")runExperiment(args)jobParams = {'' : generatePersistentJobGUID(),'': permutationsPyPath,'': hsVersion,}if maxModels is not None:jobParams[''] = maxModelsargs = ['', '' % (json.dumps(jobParams))]self.resetExtraLogItems()self.addExtraLogItem({'':jobParams})LOGGER.info(\"\")LOGGER.info(\"\")LOGGER.info(\"\")LOGGER.info(\"\")jobID = hypersearch_worker.main(args)cjDAO = ClientJobsDAO.get()models = cjDAO.modelsGetUpdateCounters(jobID)modelIDs = [model.modelId for model in models]results = cjDAO.modelsGetResultAndStatus(modelIDs)if maxModels is not None:self.assertEqual(len(results), maxModels, \"\"\"\" % (maxModels, len(results)))for result in results:self.assertEqual(result.completionReason, cjDAO.CMPL_REASON_EOF,\"\" % (result.completionMsg))return results", "docstring": "This does the following:\n\n 1.) Calls ExpGenerator to generate a base description file and permutations\n file from expDescription.\n\n 2.) Verifies that description.py and permutations.py are valid python\n modules that can be loaded\n\n 3.) Runs the base description.py as an experiment using OPF RunExperiment.\n\n 4.) Runs a Hypersearch using the generated permutations.py by passing it\n to HypersearchWorker.\n\n Parameters:\n -------------------------------------------------------------------\n expDesc: JSON format experiment description\n hsVersion: which version of hypersearch to use ('v2'; 'v1' was dropped)\n retval: list of model results", "id": "f17360:c1:m6"} {"signature": "def assertValidSwarmingAggregations(self, expDesc, expectedAttempts):", "body": "minAggregation = dict(expDesc[''][''])minAggregation.pop('')(base, perms) = self.getModules(expDesc)predictionSteps = expDesc[''][''][]self.assertEqual(base.control[''][''],expDesc[''][''])tmpAggregationInfo = rCopy(base.config[''],lambda value, _: value)tmpAggregationInfo.pop('')self.assertDictEqual(tmpAggregationInfo, minAggregation)predictAheadTime = dict(minAggregation)for key in predictAheadTime.keys():predictAheadTime[key] *= predictionStepsself.assertEqual(base.config[''],predictAheadTime)self.assertEqual(perms.minimize,(\"\"\"\"))metrics = base.control['']metricTuples = [(metric.metric, metric.inferenceElement, metric.params)for metric in metrics]self.assertIn(('','',{'': , '': [predictionSteps],'': ''}),metricTuples)aggPeriods = perms.permutations['']aggAttempts = []for agg in aggPeriods.choices:multipleOfMinAgg = aggregationDivide(agg, minAggregation)self.assertIsInt(multipleOfMinAgg,\"\"\"\" % (agg, minAggregation))self.assertGreaterEqual(int(round(multipleOfMinAgg)), ,\"\" %(agg, minAggregation))requiredSteps = aggregationDivide(predictAheadTime, agg)self.assertIsInt(requiredSteps,\"\"\"\" % (agg, predictAheadTime))self.assertGreaterEqual(int(round(requiredSteps)), ,\"\"\"\" % (agg, predictAheadTime))quotient = aggregationDivide(expDesc[''], agg)self.assertIsInt(quotient,\"\"\"\" % (agg, expDesc['']))self.assertGreaterEqual(int(round(quotient)), ,\"\"\"\" % (agg, expDesc['']))aggAttempts.append((int(round(multipleOfMinAgg)), int(requiredSteps)))LOGGER.info(\"\", aggAttempts)aggAttempts.sort()expectedAttempts.sort()self.assertEqual(aggAttempts, expectedAttempts, \"\"\"\"\"\" %(expectedAttempts, aggAttempts))", "docstring": "Test that the set of aggregations produced for a swarm are correct\n\n Parameters:\n -----------------------------------------------------------------------\n expDesc: JSON experiment description\n expectedAttempts: list of (minAggregationMultiple, predictionSteps) pairs\n that we expect to find in the aggregation choices.", "id": "f17360:c1:m8"} {"signature": "def shortDescription(self):", "body": "return None", "docstring": "Override to force unittest framework to use test method names instead\n of docstrings in the report.", "id": "f17363:c0:m0"} {"signature": "def compareOPFPredictionFiles(self, path1, path2, temporal,maxMismatches=None):", "body": "experimentLabel = \"\" %(\"\" if temporal else \"\")print(\"\" % (experimentLabel, path1, path2))self.assertTrue(os.path.isfile(path1),msg=\"\" % (path1))(opf1CsvReader, opf1FieldNames) = self._openOpfPredictionCsvFile(path1)self.assertTrue(os.path.isfile(path2),msg=\"\" % (path2))(opf2CsvReader, opf2FieldNames) = self._openOpfPredictionCsvFile(path2)self.assertEqual(len(opf1FieldNames), len(opf2FieldNames),(\"\"\"\") % (experimentLabel, len(opf1FieldNames),len(opf2FieldNames)))self.assertEqual(opf1FieldNames, opf2FieldNames)opf1EOF = Falseopf2EOF = Falseopf1CurrentDataRowIndex = -opf2CurrentDataRowIndex = -if temporal:_skipOpf1Row = next(opf1CsvReader)opf1CurrentDataRowIndex += _skipOpf2Row = next(opf2CsvReader)opf2CurrentDataRowIndex += fieldsIndexesToCompare = tuple(range(, len(opf1FieldNames), ))self.assertGreater(len(fieldsIndexesToCompare), )print((\"\"\"\") % (experimentLabel,fieldsIndexesToCompare,[opf1FieldNames[i] for i in fieldsIndexesToCompare],[opf2FieldNames[i] for i in fieldsIndexesToCompare]))for i in fieldsIndexesToCompare:self.assertTrue(opf1FieldNames[i].endswith(\"\"),msg=\"\" % opf1FieldNames[i])self.assertTrue(opf2FieldNames[i].endswith(\"\"),msg=\"\" % opf2FieldNames[i])mismatchCount = while True:try:opf1Row = next(opf1CsvReader)except StopIteration:opf1EOF = Trueelse:opf1CurrentDataRowIndex += try:opf2Row = next(opf2CsvReader)except StopIteration:opf2EOF = Trueelse:opf2CurrentDataRowIndex += if opf1EOF != opf2EOF:print((\"\"\"\"\"\") % (experimentLabel,opf1EOF, opf1CurrentDataRowIndex,opf2EOF, opf2CurrentDataRowIndex))return Falseif opf1EOF and opf2EOF:breakself.assertEqual(len(opf1Row), len(opf2Row))for i in fieldsIndexesToCompare:opf1FloatValue = float(opf1Row[i])opf2FloatValue = float(opf2Row[i])if opf1FloatValue != opf2FloatValue:mismatchCount += print((\"\"\"\"\"\"\"\"\"\") % (experimentLabel,opf1CurrentDataRowIndex,i,opf1FieldNames[i],opf1Row[i],opf2Row[i],opf1FloatValue,opf2FloatValue,opf1Row,opf2Row))if maxMismatches is not None and mismatchCount >= maxMismatches:breakif mismatchCount != :print(\"\" % (experimentLabel, mismatchCount, path1, path2))return Falseself.assertEqual(opf1CurrentDataRowIndex, opf2CurrentDataRowIndex)print((\"\"\"\"\"\") %(experimentLabel,opf1CurrentDataRowIndex + ,path1,path2))return True", "docstring": "Compare temporal or non-temporal predictions for the given experiment\n that just finished executing\n\n experimentName: e.g., \"gym\"; this string will be used to form\n a directory path to the experiments.\n\n maxMismatches: Maximum number of row mismatches to report before\n terminating the comparison; None means: report all\n mismatches\n\n Returns: True if equal; False if different", "id": "f17363:c0:m1"} {"signature": "def _openOpfPredictionCsvFile(self, filepath):", "body": "csvReader = self._openCsvFile(filepath)names = next(csvReader)_types = next(csvReader)_specials = next(csvReader)return (csvReader, names)", "docstring": "Open an OPF prediction CSV file and advance it to the first data row\n\n Returns: the tuple (csvReader, fieldNames), where 'csvReader' is the\n csv reader object, and 'fieldNames' is a sequence of field\n names.", "id": "f17363:c0:m2"} {"signature": "def _executeExternalCmdAndReapOutputs(args):", "body": "import subprocess_debugOut((\"\") %(args,))p = subprocess.Popen(args,env=os.environ,stdout=subprocess.PIPE,stderr=subprocess.PIPE)_debugOut((\"\") % (args,))(stdoutData, stderrData) = p.communicate()_debugOut((\"\" +\"\") %(args, p.returncode, type(stdoutData), stdoutData, stderrData))result = dict(exitStatus = p.returncode,stdoutData = stdoutData,stderrData = stderrData,)_debugOut((\"\") %(args, pprint.pformat(result, indent=)))return result", "docstring": "args: Args list as defined for the args parameter in subprocess.Popen()\n\nReturns: result dicionary:\n {\n 'exitStatus':,\n 'stdoutData':\"string\",\n 'stderrData':\"string\"\n }", "id": "f17372:m0"} {"signature": "def whoisCallersCaller():", "body": "import inspectframeObj = inspect.stack()[][]return inspect.getframeinfo(frameObj)", "docstring": "Returns: Traceback namedtuple for our caller's caller", "id": "f17372:m2"} {"signature": "def getOpfExperimentPath(self, experimentName):", "body": "path = os.path.join(self.__opfExperimentsParentDir, experimentName)assert os.path.isdir(path),\"\" % (path,)return path", "docstring": "experimentName: e.g., \"gym\"; this string will be used to form\n a directory path to the experiment.\n\nReturns: absolute path to the experiment directory", "id": "f17372:c0:m2"} {"signature": "def setUp(self):", "body": "global g_myEnvif not g_myEnv:g_myEnv = MyTestEnvironment()", "docstring": "Method called to prepare the test fixture. This is called immediately\n before calling the test method; any exception raised by this method will be\n considered an error rather than a test failure. The default implementation\n does nothing.", "id": "f17372:c1:m0"} {"signature": "def tearDown(self):", "body": "self.resetExtraLogItems()", "docstring": "Method called immediately after the test method has been called and the\n result recorded. This is called even if the test method raised an exception,\n so the implementation in subclasses may need to be particularly careful\n about checking internal state. Any exception raised by this method will be\n considered an error rather than a test failure. This method will only be\n called if the setUp() succeeds, regardless of the outcome of the test\n method. The default implementation does nothing.", "id": "f17372:c1:m1"} {"signature": "def shortDescription(self):", "body": "return None", "docstring": "Override to force unittest framework to use test method names instead\n of docstrings in the report.", "id": "f17372:c1:m2"} {"signature": "def executePositiveOpfExperiment(self, experimentName, short=False):", "body": "opfRunner = g_myEnv.getOpfRunExperimentPyPath()opfExpDir = g_myEnv.getOpfExperimentPath(experimentName)r = self.__executePositiveRunExperimentTest(runnerPath=opfRunner,experimentDirPath=opfExpDir,short=short)return r", "docstring": "Executes a positive OPF RunExperiment test as a subprocess and validates\n its exit status.\n\n experimentName: e.g., \"gym\"; this string will be used to form\n a directory path to the experiment.\n\n short: if True, attempt to run the experiment with --testMode\n flag turned on, which causes all inference and training\n iteration counts to be overridden with small counts.\n\n Returns: result from _executeExternalCmdAndReapOutputs", "id": "f17372:c1:m3"} {"signature": "def __executePositiveRunExperimentTest(self,runnerPath,experimentDirPath,customOptions=[],short=False):", "body": "command = [\"\",runnerPath,experimentDirPath,]command.extend(customOptions)if short:command.append(\"\")self.addExtraLogItem({'':command})r = _executeExternalCmdAndReapOutputs(command)self.addExtraLogItem({'':r})_debugOut((\"\") % (command, r))self.assertEqual(r[''], ,(\"\") %(runnerPath, r[''],))self.resetExtraLogItems()return r", "docstring": "Executes a positive RunExperiment.py test and performs\n basic validation\n\n runnerPath: experiment running (LPF or OPF RunExperiment.py path)\n\n experimentDirPath: directory containing the description.py file of interest\n\n short: if True, attempt to run the experiment with --testMode\n flag turned on, which causes all inference and training\n iteration counts to be overridden with small counts.\n NOTE: if the (possibly aggregated) dataset has fewer\n rows than the count overrides, then an LPF experiment\n will fail.\n\n Returns: result from _executeExternalCmdAndReapOutputs", "id": "f17372:c1:m4"} {"signature": "def getAllDirectoriesWithFile(path, filename, excludeDirs):", "body": "directoryList = []for dirpath, dirnames, filenames in os.walk(path):for d in dirnames[:]:if d in excludeDirs:dirnames.remove(d)print(\"\" % (os.path.join(dirpath, d)))elif '' in os.listdir(os.path.join(dirpath, d)):dirnames.remove(d)print(\"\" % (os.path.join(dirpath, d)))for f in filenames:if f==filename:directoryList.append(dirpath)return directoryList", "docstring": "Returns a list of directories in the with a given , excluding\n", "id": "f17376:m0"} {"signature": "def getAllExperimentDirectories(excludedExperiments=[]):", "body": "excludedDirectories = ['', '', '', '']excludedDirectories.extend(excludedExperiments)return getAllDirectoriesWithFile(path=\"\",filename=\"\",excludeDirs=excludedDirectories)", "docstring": "Experiment directories are the directories with a description.py file", "id": "f17376:m1"} {"signature": "def runReducedExperiment(path, reduced=True):", "body": "initExperimentPrng()if reduced:args = [path, '']else:args = [path]runExperiment(args)", "docstring": "Run the experiment in the with a reduced iteration count", "id": "f17376:m2"} {"signature": "def _getTempFileName():", "body": "handle = tempfile.NamedTemporaryFile(prefix=\"\", suffix=\"\", dir=\"\")filename = handle.namehandle.close()return filename", "docstring": "Creates unique test csv file name.", "id": "f17377:m0"} {"signature": "def _getTempFileName():", "body": "handle = tempfile.NamedTemporaryFile(prefix=\"\", suffix=\"\", dir=\"\")filename = handle.namehandle.close()return filename", "docstring": "Creates unique test csv file name.", "id": "f17378:m0"} {"signature": "def _aggregate(input, options, output, timeFieldName):", "body": "aggregator = Aggregator(aggregationInfo=options, inputFields=input.getFields(),timeFieldName=timeFieldName)while True:inRecord = input.getNextRecord()print(\"\", inRecord)(outRecord, aggBookmark) = aggregator.next(record = inRecord, curInputBookmark = None)print(\"\", outRecord)if outRecord is not None:output.appendRecord(outRecord, None)if inRecord is None and outRecord is None:break", "docstring": "Aggregate the input stream and write aggregated records to the output\n stream", "id": "f17379:m0"} {"signature": "def setUp(self):", "body": "print()return", "docstring": "Method called to prepare the test fixture. This is called immediately\n before calling the test method; any exception raised by this method will be\n considered an error rather than a test failure. The default implementation\n does nothing.\n\n NOTE: this is called once for every sub-test and a new AggregationTests\n instance is constructed for every sub-test.", "id": "f17379:c3:m0"} {"signature": "def dummyModelParams(perm):", "body": "errScore = if perm[''][''][''][''] is not None:errScore -= if perm[''][''][''][''] is not None:errScore -= if perm['']['']['']['']is not None:errScore -= if perm['']['']['']['']is not None:errScore -= dummyModelParams = dict(metricValue = errScore,metricFunctions = None,)return dummyModelParams", "docstring": "This function can be used for Hypersearch algorithm development. When\n present, Hypersearch doesn't actually run the CLA model in the OPF, but instead run\n a dummy model. This function returns the dummy model params that will be\n used. See the OPFDummyModelRunner class source code (in\n nupic.swarming.ModelRunner) for a description of the schema for\n the dummy model params.", "id": "f17381:m0"} {"signature": "def permutationFilter(perm):", "body": "limit = int(os.environ.get('', ))if perm[''][''][''][''][''] > limit:return False;return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17381:m1"} {"signature": "def dummyModelParams(perm):", "body": "errScore = if not perm['']['']['']['']is None:errScore -= if not perm['']['']['']['']is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore += dummyModelParams = dict(metricValue = errScore,metricFunctions = None,)return dummyModelParams", "docstring": "This function can be used for Hypersearch algorithm development. When\n present, we don't actually run the CLA model in the OPF, but instead run\n a dummy model. This function returns the dummy model params that will be\n used. See the OPFDummyModelRunner class source code (in\n nupic.swarming.ModelRunner) for a description of the schema for\n the dummy model params.", "id": "f17383:m0"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17383:m1"} {"signature": "def dummyModelParams(perm):", "body": "errScore = errScore += abs(perm[''][''][''][''][''] - )errScore += abs(perm[''][''][''][''][''] - )if perm[''][''][''][''] is not None:errScore -= if perm[''][''][''][''] is not None:errScore -= waitTime = Noneif eval(os.environ.get('', '')):if perm[''][''][''][''] is not None:waitTime = dummyModelParams = dict(metricValue = errScore,iterations = int(os.environ.get('', '')),waitTime = waitTime,sysExitModelRange = os.environ.get('',None),errModelRange = os.environ.get('',None),jobFailErr = bool(os.environ.get('', False)))return dummyModelParams", "docstring": "This function can be used for Hypersearch algorithm development. When\n present, Hypersearch doesn't actually run the CLA model in the OPF, but\n instead runs a dummy model. This function returns the dummy model params that\n will be used. See the OPFDummyModelRunner class source code (in\n nupic.swarming.ModelRunner) for a description of the schema\n for the dummy model params.", "id": "f17385:m0"} {"signature": "def permutationFilter(perm):", "body": "limit = int(os.environ.get('', ))if perm[''][''][''][''][''] > limit:return False;return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17385:m1"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17387:m0"} {"signature": "def dummyModelParams(perm):", "body": "errScore = if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= delay = encoderCount = encoders = perm['']['']['']for field,encoder in encoders.items():if encoder is not None:encoderCount += if encoderCount == :delay = elif encoderCount == and perm[''][''][''][\"\"] is None:delay = elif encoderCount == :delay = dummyModelParams = dict(metricValue = errScore,metricFunctions = None,delay=delay,)return dummyModelParams", "docstring": "This function can be used for Hypersearch algorithm development. When\n present, we don't actually run the CLA model in the OPF, but instead run\n a dummy model. This function returns the dummy model params that will be\n used. See the OPFDummyModelRunner class source code (in\n nupic.swarming.ModelRunner) for a description of the schema for\n the dummy model params.", "id": "f17389:m0"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17389:m1"} {"signature": "def dummyModelParams(perm):", "body": "errScore = if not perm['']['']['']['']is None:errScore -= if not perm['']['']['']['']is None:errScore -= if not perm['']['']['']['']is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm['']['']['']['']is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore += dummyModelParams = dict(metricValue = errScore,metricFunctions = None,)return dummyModelParams", "docstring": "This function can be used for Hypersearch algorithm development. When\n present, we don't actually run the CLA model in the OPF, but instead run\n a dummy model. This function returns the dummy model params that will be\n used. See the OPFDummyModelRunner class source code (in\n nupic.swarming.ModelRunner) for a description of the schema for\n the dummy model params.", "id": "f17391:m0"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17391:m1"} {"signature": "def permutationFilter(perm):", "body": "if perm[''][''][''][''][''] > :return False;return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17393:m0"} {"signature": "def dummyModelParams(perm):", "body": "errScore = if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= if not perm[''][''][''][''] is None:errScore -= delay = encoderCount = for key in perm.keys():if '' in key and not perm[key] is None:encoderCount += delay = encoderCount*encoderCount*dummyModelParams = dict(metricValue=errScore,metricFunctions=None,delay=delay,)return dummyModelParams", "docstring": "This function can be used for Hypersearch algorithm development. When\n present, we don't actually run the CLA model in the OPF, but instead run\n a dummy model. This function returns the dummy model params that will be\n used. See the OPFDummyModelRunner class source code (in\n nupic.swarming.ModelRunner) for a description of the schema for\n the dummy model params.", "id": "f17395:m0"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17395:m1"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17397:m0"} {"signature": "def dummyModelParams(perm):", "body": "errScore = if perm[''][''][''][''] is not None:errScore -= if perm[''][''][''][''] is not None:errScore -= if perm['']['']['']['']is not None:errScore += if perm['']['']['']['']is not None:errScore += dummyModelParams = dict(metricValue = errScore,iterations = int(os.environ.get('', '')),waitTime = None,sysExitModelRange = os.environ.get('',None),errModelRange = os.environ.get('',None),jobFailErr = bool(os.environ.get('', False)))return dummyModelParams", "docstring": "This function can be used for Hypersearch algorithm development. When\n present, Hypersearch doesn't actually run the CLA model in the OPF, but\n instead runs a dummy model. This function returns the dummy model params that\n will be used. See the OPFDummyModelRunner class source code (in\n nupic.swarming.ModelRunner) for a description of the schema\n for the dummy model params.", "id": "f17399:m0"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17399:m1"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17401:m0"} {"signature": "def dummyModelParams(perm):", "body": "errScore = if perm[''][''][''][''] is not None:errScore += if perm[''][''][''][''] is not None:errScore -= if perm[''][''][''][''] is not None:errScore -= if perm['']['']['']['']is not None:errScore -= if perm['']['']['']['']is not None:errScore -= dummyModelParams = dict(metricValue = errScore,iterations = int(os.environ.get('', '')),waitTime = None,sysExitModelRange = os.environ.get('',None),errModelRange = os.environ.get('',None),jobFailErr = bool(os.environ.get('', False)))return dummyModelParams", "docstring": "This function can be used for Hypersearch algorithm development. When\n present, Hypersearch doesn't actually run the CLA model in the OPF, but\n instead runs a dummy model. This function returns the dummy model params that\n will be used. See the OPFDummyModelRunner class source code (in\n nupic.swarming.ModelRunner) for a description of the schema\n for the dummy model params.", "id": "f17403:m0"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17403:m1"} {"signature": "def permutationFilter(perm):", "body": "limit = int(os.environ.get('', ))if perm[''][''][''][''][''] > limit:return False;return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17405:m0"} {"signature": "def dummyModelParams(perm):", "body": "errScore = waitTime = dummyModelParams = dict(metricValue = errScore,iterations = int(os.environ.get('', '')),waitTime = waitTime,sysExitModelRange = os.environ.get('',None),delayModelRange = os.environ.get('',None),errModelRange = os.environ.get('',None),jobFailErr = bool(os.environ.get('', False)))return dummyModelParams", "docstring": "This function can be used for Hypersearch algorithm development. When\n present, Hypersearch doesn't actually run the CLA model in the OPF, but instead run\n a dummy model. This function returns the dummy model params that will be\n used. See the OPFDummyModelRunner class source code (in\n nupic.swarming.ModelRunner) for a description of the schema for\n the dummy model params.", "id": "f17407:m0"} {"signature": "def permutationFilter(perm):", "body": "limit = int(os.environ.get('', ))if perm[''][''][''][''][''] > limit:return False;return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17407:m1"} {"signature": "def getHypersearchWinningModelID(jobID):", "body": "cjDAO = ClientJobsDAO.get()jobResults = cjDAO.jobGetFields(jobID, [''])[]print(\"\" % (jobResults,))jobResults = json.loads(jobResults)return jobResults['']", "docstring": "Parameters:\n-------------------------------------------------------------------\njobID: jobID of successfully-completed Hypersearch job\n\nretval: modelID of the winning model", "id": "f17408:m0"} {"signature": "def _executeExternalCmdAndReapStdout(args):", "body": "_debugOut((\"\") %(args,))p = subprocess.Popen(args,env=os.environ,stdout=subprocess.PIPE,stderr=subprocess.PIPE)_debugOut((\"\") % (args,))(stdoutData, stderrData) = p.communicate()_debugOut((\"\" +\"\") %(args, p.returncode, type(stdoutData), stdoutData, stderrData))result = dict(exitStatus = p.returncode,stdoutData = stdoutData,stderrData = stderrData,)_debugOut((\"\") %(args, pprint.pformat(result, indent=)))return result", "docstring": "args: Args list as defined for the args parameter in subprocess.Popen()\n\nReturns: result dicionary:\n {\n 'exitStatus':,\n 'stdoutData':\"string\",\n 'stderrData':\"string\"\n }", "id": "f17408:m1"} {"signature": "def _getTestList():", "body": "suiteNames = ['','','','',]testNames = []for suite in suiteNames:for f in dir(eval(suite)):if f.startswith(''):testNames.append('' % (suite, f))return testNames", "docstring": "Get the list of tests that can be run from this module", "id": "f17408:m3"} {"signature": "def setUp(self):", "body": "pass", "docstring": "Method called to prepare the test fixture. This is called by the\n unittest framework immediately before calling the test method; any exception\n raised by this method will be considered an error rather than a test\n failure. The default implementation does nothing.", "id": "f17408:c1:m0"} {"signature": "def tearDown(self):", "body": "self.resetExtraLogItems()", "docstring": "Method called immediately after the test method has been called and the\n result recorded. This is called even if the test method raised an exception,\n so the implementation in subclasses may need to be particularly careful\n about checking internal state. Any exception raised by this method will be\n considered an error rather than a test failure. This method will only be\n called if the setUp() succeeds, regardless of the outcome of the test\n method. The default implementation does nothing.", "id": "f17408:c1:m1"} {"signature": "def shortDescription(self):", "body": "return None", "docstring": "Override to force unittest framework to use test method names instead\n of docstrings in the report.", "id": "f17408:c1:m2"} {"signature": "def _printTestHeader(self):", "body": "print(\"\")print(\"\" % (self.__class__, self._testMethodName))", "docstring": "Print out what test we are running", "id": "f17408:c1:m3"} {"signature": "def _setDataPath(self, env):", "body": "assert env is not Noneif \"\" in env:newPath = \"\" % (env[\"\"], os.pathsep, g_myEnv.testSrcDataDir)else:newPath = g_myEnv.testSrcDataDirenv[\"\"] = newPath", "docstring": "Put the path to our datasets int the NTA_DATA_PATH variable which\n will be used to set the environment for each of the workers\n\n Parameters:\n ---------------------------------------------------------------------\n env: The current environment dict", "id": "f17408:c1:m4"} {"signature": "def _launchWorkers(self, cmdLine, numWorkers):", "body": "workers = []for i in range(numWorkers):stdout = tempfile.TemporaryFile()stderr = tempfile.TemporaryFile()p = subprocess.Popen(cmdLine, bufsize=, env=os.environ, shell=True,stdin=None, stdout=stdout, stderr=stderr)workers.append(p)return workers", "docstring": "Launch worker processes to execute the given command line\n\n Parameters:\n -----------------------------------------------\n cmdLine: The command line for each worker\n numWorkers: number of workers to launch\n retval: list of workers", "id": "f17408:c1:m5"} {"signature": "def _getJobInfo(self, cjDAO, workers, jobID):", "body": "jobInfo = cjDAO.jobInfo(jobID)runningCount = for worker in workers:retCode = worker.poll()if retCode is None:runningCount += if runningCount > :status = ClientJobsDAO.STATUS_RUNNINGelse:status = ClientJobsDAO.STATUS_COMPLETEDjobInfo = jobInfo._replace(status=status)if status == ClientJobsDAO.STATUS_COMPLETED:jobInfo = jobInfo._replace(completionReason=ClientJobsDAO.CMPL_REASON_SUCCESS)return jobInfo", "docstring": "Return the job info for a job\n\n Parameters:\n -----------------------------------------------\n cjDAO: client jobs database instance\n workers: list of workers for this job\n jobID: which job ID\n\n retval: job info", "id": "f17408:c1:m6"} {"signature": "def _generateHSJobParams(self,expDirectory=None,hsImp='',maxModels=,predictionCacheMaxRecords=None,dataPath=None,maxRecords=):", "body": "if expDirectory is not None:descriptionPyPath = os.path.join(expDirectory, \"\")permutationsPyPath = os.path.join(expDirectory, \"\")permutationsPyContents = open(permutationsPyPath, '').read()descriptionPyContents = open(descriptionPyPath, '').read()jobParams = {'' : generatePersistentJobGUID(),'': permutationsPyContents,'': descriptionPyContents,'': maxModels,'': hsImp}if predictionCacheMaxRecords is not None:jobParams[''] = predictionCacheMaxRecordselse:if dataPath is None:dataPath = resource_filename(\"\",os.path.join(\"\", \"\", \"\",\"\"))streamDef = dict(version = ,info = \"\",streams = [dict(source=\"\" % (dataPath),info=dataPath,columns=[\"\"],first_record=,last_record=maxRecords),],)expDesc = {\"\": \"\",\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\"},{ \"\": \"\",\"\": \"\",\"\": ,\"\": ,},],\"\": maxRecords,\"\": {'': ,'': ,'': ,'': ,'': ,'': ,'': ,},}jobParams = {\"\": generatePersistentJobGUID(),\"\":expDesc,\"\": maxModels,\"\": hsImp,}if predictionCacheMaxRecords is not None:jobParams[''] = predictionCacheMaxRecordsreturn jobParams", "docstring": "This method generates a canned Hypersearch Job Params structure based\non some high level options\n\nParameters:\n---------------------------------------------------------------------\npredictionCacheMaxRecords:\n If specified, determine the maximum number of records in\n the prediction cache.\ndataPath: When expDirectory is not specified, this is the data file\n to be used for the operation. If this value is not specified,\n it will use the /extra/qa/hotgym/qa_hotgym.csv.", "id": "f17408:c1:m7"} {"signature": "def _runPermutationsLocal(self, jobParams, loggingLevel=logging.INFO,env=None, waitForCompletion=True,continueJobId=None, ignoreErrModels=False):", "body": "print()print(\"\")print(\"\")print(\"\")if env is not None:saveEnvState = copy.deepcopy(os.environ)os.environ.update(env)cjDAO = ClientJobsDAO.get()if continueJobId is None:jobID = cjDAO.jobInsert(client='', cmdLine='',params=json.dumps(jobParams),alreadyRunning=True, minimumWorkers=, maximumWorkers=,jobType = cjDAO.JOB_TYPE_HS)else:jobID = continueJobIdargs = ['', '' % (jobID),'' % (loggingLevel)]if continueJobId is None:args.append('')try:hypersearch_worker.main(args)except SystemExit:passexcept:raiseif env is not None:os.environ = saveEnvStatemodels = cjDAO.modelsGetUpdateCounters(jobID)modelIDs = [model.modelId for model in models]if len(modelIDs) > :results = cjDAO.modelsGetResultAndStatus(modelIDs)else:results = []metricResults = []for result in results:if result.results is not None:metricResults.append(list(json.loads(result.results)[].values())[])else:metricResults.append(None)if not ignoreErrModels:self.assertNotEqual(result.completionReason, cjDAO.CMPL_REASON_ERROR,\"\" % (result.completionMsg))jobInfo = cjDAO.jobInfo(jobID)return (jobID, jobInfo, results, metricResults)", "docstring": "This runs permutations on the given experiment using just 1 worker\n in the current process\n\n Parameters:\n -------------------------------------------------------------------\n jobParams: filled in job params for a hypersearch\n loggingLevel: logging level to use in the Hypersearch worker\n env: if not None, this is a dict of environment variables\n that should be sent to each worker process. These can\n aid in re-using the same description/permutations file\n for different tests.\n waitForCompletion: If True, wait for job to complete before returning\n If False, then return resultsInfoForAllModels and\n metricResults will be None\n continueJobId: If not None, then this is the JobId of a job we want\n to continue working on with another worker.\n ignoreErrModels: If true, ignore erred models\n retval: (jobId, jobInfo, resultsInfoForAllModels, metricResults)", "id": "f17408:c1:m8"} {"signature": "def _runPermutationsCluster(self, jobParams, loggingLevel=logging.INFO,maxNumWorkers=, env=None,waitForCompletion=True, ignoreErrModels=False,timeoutSec=DEFAULT_JOB_TIMEOUT_SEC):", "body": "print()print(\"\")print(\"\")print(\"\")if env is not None and len(env) > :envItems = []for (key, value) in env.items():if (sys.platform.startswith('')):envItems.append(\"\" % (key, value))else:envItems.append(\"\" % (key, value))if (sys.platform.startswith('')):envStr = \"\" % (''.join(envItems))else:envStr = \"\" % (''.join(envItems))else:envStr = ''cmdLine = ''''% (envStr, loggingLevel)cjDAO = ClientJobsDAO.get()jobID = cjDAO.jobInsert(client='', cmdLine=cmdLine,params=json.dumps(jobParams),minimumWorkers=, maximumWorkers=maxNumWorkers,jobType = cjDAO.JOB_TYPE_HS)workerCmdLine = ''''% (envStr, jobID, loggingLevel)workers = self._launchWorkers(cmdLine=workerCmdLine, numWorkers=maxNumWorkers)print(\"\" % (jobID))print(\"\" % (maxNumWorkers),cmdLine)if not waitForCompletion:return (jobID, None, None)if timeoutSec is None:timeout=DEFAULT_JOB_TIMEOUT_SECelse:timeout=timeoutSecstartTime = time.time()lastUpdate = time.time()lastCompleted = lastCompletedWithError = lastCompletedAsOrphan = lastStarted = lastJobStatus = \"\"lastJobResults = NonelastActiveSwarms = NonelastEngStatus = NonemodelIDs = []print(\"\" % (\"\", \"\",\"\", \"\", \"\"))print(\"\")while (lastJobStatus != ClientJobsDAO.STATUS_COMPLETED)and (time.time() - lastUpdate < timeout):printUpdate = Falseif g_myEnv.options.verbosity == :time.sleep()jobInfo = self._getJobInfo(cjDAO, workers, jobID)if jobInfo.status != lastJobStatus:if jobInfo.status == ClientJobsDAO.STATUS_RUNNINGand lastJobStatus != ClientJobsDAO.STATUS_RUNNING:print(\"\"% (jobInfo.jobId))lastJobStatus = jobInfo.statusprintUpdate = Trueif g_myEnv.options.verbosity >= :if jobInfo.engWorkerState is not None:activeSwarms = json.loads(jobInfo.engWorkerState)['']if activeSwarms != lastActiveSwarms:print(\"\", ''.join(activeSwarms))lastActiveSwarms = activeSwarmsprint()if jobInfo.results != lastJobResults:print(\"\", jobInfo.results, \"\")lastJobResults = jobInfo.resultsif jobInfo.engStatus != lastEngStatus:print('' % jobInfo.engStatus)print()lastEngStatus = jobInfo.engStatusmodelCounters = cjDAO.modelsGetUpdateCounters(jobID)if len(modelCounters) != lastStarted:modelIDs = [x.modelId for x in modelCounters]lastStarted = len(modelCounters)printUpdate = Trueif len(modelIDs) > :completed = completedWithError = completedAsOrphan = infos = cjDAO.modelsGetResultAndStatus(modelIDs)for info in infos:if info.status == ClientJobsDAO.STATUS_COMPLETED:completed += if info.completionReason == ClientJobsDAO.CMPL_REASON_ERROR:completedWithError += if info.completionReason == ClientJobsDAO.CMPL_REASON_ORPHAN:completedAsOrphan += if completed != lastCompletedor completedWithError != lastCompletedWithErroror completedAsOrphan != lastCompletedAsOrphan:lastCompleted = completedlastCompletedWithError = completedWithErrorlastCompletedAsOrphan = completedAsOrphanprintUpdate = Trueif printUpdate:lastUpdate = time.time()if g_myEnv.options.verbosity >= :print(\"\", end='')print(\"\" % (lastJobStatus, lastStarted,lastCompleted,lastCompletedWithError,lastCompletedAsOrphan))print(\"\" % (lastJobStatus, lastStarted,lastCompleted,lastCompletedWithError,lastCompletedAsOrphan))jobInfo = self._getJobInfo(cjDAO, workers, jobID)if not ignoreErrModels:self.assertEqual (jobInfo.completionReason,ClientJobsDAO.CMPL_REASON_SUCCESS)models = cjDAO.modelsGetUpdateCounters(jobID)modelIDs = [model.modelId for model in models]if len(modelIDs) > :results = cjDAO.modelsGetResultAndStatus(modelIDs)else:results = []metricResults = []for result in results:if result.results is not None:metricResults.append(list(json.loads(result.results)[].values())[])else:metricResults.append(None)if not ignoreErrModels:self.assertNotEqual(result.completionReason, cjDAO.CMPL_REASON_ERROR,\"\" % (result.completionMsg))return (jobID, jobInfo, results, metricResults)", "docstring": "Given a prepared, filled in jobParams for a hypersearch, this starts\n the job, waits for it to complete, and returns the results for all\n models.\n\n Parameters:\n -------------------------------------------------------------------\n jobParams: filled in job params for a hypersearch\n loggingLevel: logging level to use in the Hypersearch worker\n maxNumWorkers: max # of worker processes to use\n env: if not None, this is a dict of environment variables\n that should be sent to each worker process. These can\n aid in re-using the same description/permutations file\n for different tests.\n waitForCompletion: If True, wait for job to complete before returning\n If False, then return resultsInfoForAllModels and\n metricResults will be None\n ignoreErrModels: If true, ignore erred models\n retval: (jobID, jobInfo, resultsInfoForAllModels, metricResults)", "id": "f17408:c1:m9"} {"signature": "def runPermutations(self, expDirectory, hsImp='', maxModels=,maxNumWorkers=, loggingLevel=logging.INFO,onCluster=False, env=None, waitForCompletion=True,continueJobId=None, dataPath=None, maxRecords=None,timeoutSec=None, ignoreErrModels=False,predictionCacheMaxRecords=None, **kwargs):", "body": "if env is None:env = dict()self._setDataPath(env)jobParams = self._generateHSJobParams(expDirectory=expDirectory,hsImp=hsImp, maxModels=maxModels,maxRecords=maxRecords,dataPath=dataPath,predictionCacheMaxRecords=predictionCacheMaxRecords)jobParams.update(kwargs)if onCluster:(jobID, jobInfo, resultInfos, metricResults)= self._runPermutationsCluster(jobParams=jobParams,loggingLevel=loggingLevel,maxNumWorkers=maxNumWorkers,env=env,waitForCompletion=waitForCompletion,ignoreErrModels=ignoreErrModels,timeoutSec=timeoutSec)else:(jobID, jobInfo, resultInfos, metricResults)= self._runPermutationsLocal(jobParams=jobParams,loggingLevel=loggingLevel,env=env,waitForCompletion=waitForCompletion,continueJobId=continueJobId,ignoreErrModels=ignoreErrModels)if not waitForCompletion:return (jobID, jobInfo, resultInfos, metricResults, None)print(\"\")print(\"\" % (jobInfo.completionReason))print(\"\" % (jobInfo.workerCompletionReason))print(\"\" % (jobInfo.workerCompletionMsg))if jobInfo.engWorkerState is not None:print(\"\")print(\"\")pprint.pprint(json.loads(jobInfo.engWorkerState))minErrScore=NonemetricAmts = []for result in metricResults:if result is None:metricAmts.append(numpy.inf)else:metricAmts.append(result)metricAmts = numpy.array(metricAmts)if len(metricAmts) > :minErrScore = metricAmts.min()minModelID = resultInfos[metricAmts.argmin()].modelIdcjDAO = ClientJobsDAO.get()modelParams = cjDAO.modelsGetParams([minModelID])[].paramsprint(\"\"% (pprint.pformat(json.loads(modelParams))))print(\"\" % (minErrScore))else:print(\"\")return (jobID, jobInfo, resultInfos, metricResults, minErrScore)", "docstring": "This runs permutations on the given experiment using just 1 worker\n\n Parameters:\n -------------------------------------------------------------------\n expDirectory: directory containing the description.py and permutations.py\n hsImp: which implementation of Hypersearch to use\n maxModels: max # of models to generate\n maxNumWorkers: max # of workers to use, N/A if onCluster is False\n loggingLevel: logging level to use in the Hypersearch worker\n onCluster: if True, run on the Hadoop cluster\n env: if not None, this is a dict of environment variables\n that should be sent to each worker process. These can\n aid in re-using the same description/permutations file\n for different tests.\n waitForCompletion: If True, wait for job to complete before returning\n If False, then return resultsInfoForAllModels and\n metricResults will be None\n continueJobId: If not None, then this is the JobId of a job we want\n to continue working on with another worker.\n ignoreErrModels: If true, ignore erred models\n maxRecords: This value is passed to the function, _generateHSJobParams(),\n to represent the maximum number of records to generate for\n the operation.\n dataPath: This value is passed to the function, _generateHSJobParams(),\n which points to the data file for the operation.\n predictionCacheMaxRecords:\n If specified, determine the maximum number of records in\n the prediction cache.\n\n retval: (jobID, jobInfo, resultsInfoForAllModels, metricResults,\n minErrScore)", "id": "f17408:c1:m10"} {"signature": "@classmethoddef _processArgs(cls):", "body": "helpString =\"\"\"\"\"\"allTests = _getTestList()for test in allTests:helpString += \"\" % (test)parser = OptionParser(helpString,conflict_handler=\"\")parser.add_option(\"\", default=, type=\"\",help=\"\")parser.add_option(\"\", action=\"\", default=False,help=\"\"\"\")parser.add_option(\"\", action=\"\", type=\"\",default=logging.INFO,help=\"\"\"\"\"\")parser.add_option(\"\", dest=\"\", default=, type='',help=(\"\"\"\"))return parser.parse_args(args=cls.args)", "docstring": "Parse our command-line args/options and strip them from sys.argv\nReturns the tuple (parsedOptions, remainingArgs)", "id": "f17408:c6:m0"} {"signature": "@classmethoddef parseArgs(cls):", "body": "return cls._processArgs()[]", "docstring": "Returns the test arguments after parsing", "id": "f17408:c6:m1"} {"signature": "@classmethoddef consumeArgs(cls):", "body": "return cls._processArgs()[]", "docstring": "Consumes the test arguments and returns the remaining arguments meant\n for unittest.man", "id": "f17408:c6:m2"} {"signature": "def __updateProcessCounter(self):", "body": "newcounter = for job in self.__procs:if job.is_alive():newcounter+=self.__numRunningProcs = newcounterreturn newcounter", "docstring": "Function that iterates through the running Processes\n and counts the number of processes that are currently alive.\n Sets numRunningProcs to this count", "id": "f17409:c0:m2"} {"signature": "def cancelJobs(self):", "body": "print(\"\")for proc in self.__procs:if not proc.is_alive():proc.terminate()print(\"\")", "docstring": "Function that cancels all the jobs in the\n process queue.", "id": "f17409:c0:m3"} {"signature": "def runJobs(self, maxJobs):", "body": "jobsrunning = self.__numRunningProcsif(maxJobs > ):jobsindx = while(jobsindx):if(jobsindxcurJob = self.testQ[jobsindx]p = Process(target = curJob[], args = curJob[])p.start()self.__procs.append(p)jobsindx+=if jobsrunning >= maxJobs:time.sleep()print (\"\"\"\")elif jobsindx == len(self.testQ):time.sleep()print(\"\")jobsrunning = self.__updateProcessCounter()for proc in self.__procs:if proc.exitcode == :self.cancelJobs()assert False, (\"\"\"\")try:while True:result = self.__resultQ.get(True, )self.assertBenchmarks(result)except Empty:pass", "docstring": "Function that launched Hypersearch benchmark jobs.\n Runs jobs contained in self.testQ, until maxJobs are running\n in parallel at which point it waits until some jobs finish.", "id": "f17409:c0:m4"} {"signature": "def setUpExportDicts(self):", "body": "ret = []if self.maxBranchings is None:self.maxBranchings = [None]else:self.maxBranchings = self.maxBranchings.split('')if self.maxParticles is None:self.maxParticles = [None]else:self.maxParticles = self.maxParticles.split(\"\")for branch in self.maxBranchings:for part in self.maxParticles:curdict = dict()if not branch is None:curdict[self.BRANCHING_PROP] = branchif not part is None:curdict[self.PARTICLE_PROP] = partret+=[curdict]return ret", "docstring": "Setup up a dict of branchings and particles", "id": "f17409:c0:m19"} {"signature": "def benchmarkHotGym(self):", "body": "dataPath = os.path.join(self.datadir, \"\", \"\")streamDef = dict(version=,info=\"\",streams=[dict(source=\"\" % (dataPath),info=\"\",columns=[\"\", \"\", \"\"],last_record=self.splits[''],)],aggregation={'' : ,'' : [('', ''),('', ''),]},)expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()expDesc[\"\"][\"\"] = \"\"expDesc.update({\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\"},{ \"\": \"\",\"\": \"\",\"\": ,\"\": ,},{ \"\": \"\",\"\": \"\",},],\"\": self.__recordsToProcess,})expdir = os.path.join(self.outdir, \"\")self.generateModules(expDesc, expdir)self.descriptions[\"\"]=(expdir, expDesc)return expdir", "docstring": "Try running a basic experiment and permutations.", "id": "f17409:c0:m31"} {"signature": "def benchmarkSine(self):", "body": "dataPath = os.path.join(self.datadir, \"\", \"\")streamDef = dict(version=,info=\"\",streams=[dict(source=\"\" % (dataPath),info=\"\",columns=[\"\",\"\"],last_record=self.splits['']),],)expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()expDesc[\"\"][\"\"] = \"\"expDesc.update({\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,},{ \"\": \"\",\"\": \"\",\"\": ,\"\": ,},],\"\": self.__recordsToProcess,})expdir = os.path.join(self.outdir, \"\")self.generateModules(expDesc, expdir)self.descriptions[\"\"]=(expdir, expDesc)return expdir", "docstring": "Try running a basic experiment and permutations", "id": "f17409:c0:m32"} {"signature": "def benchmarkTwoVars(self):", "body": "dataPath = os.path.join(self.datadir, \"\", \"\",\"\", \"\")streamDef = dict(version=,info=\"\",streams=[dict(source=\"\" % (dataPath),info=\"\",columns=[\"\",\"\"],last_record=self.splits[''],),],)expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()expDesc[\"\"][\"\"] = \"\"expDesc.update({\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,},{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,},],\"\": self.__recordsToProcess,})expdir = os.path.join(self.outdir, \"\")self.generateModules(expDesc, expdir)self.descriptions[\"\"]=(expdir, expDesc)return expdir", "docstring": "Try running a basic experiment and permutations", "id": "f17409:c0:m33"} {"signature": "def benchmarkThreeVars(self):", "body": "dataPath = os.path.join(self.datadir, \"\", \"\",\"\", \"\")streamDef = dict(version=,info=\"\",streams=[dict(source=\"\" % (dataPath),info=\"\",columns=[\"\",\"\",\"\"],last_record=self.splits['']),],)expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()expDesc[\"\"][\"\"] = \"\"expDesc.update({\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,},{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,},{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,}],\"\": self.__recordsToProcess,})expdir = os.path.join(self.outdir, \"\")self.generateModules(expDesc, expdir)self.descriptions[\"\"]=(expdir, expDesc)return expdir", "docstring": "Try running a basic experiment and permutations", "id": "f17409:c0:m34"} {"signature": "def benchmarkFourVars(self):", "body": "dataPath = os.path.join(self.datadir, \"\", \"\",\"\", \"\")streamDef = dict(version=,info=\"\",streams=[dict(source=\"\" % (dataPath),info=\"\",columns=[\"\",\"\",\"\",\"\"],last_record=self.splits['']),],)expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()expDesc[\"\"][\"\"] = \"\"expDesc.update({\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,},{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,},{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,},{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,}],\"\": self.__recordsToProcess,})expdir = os.path.join(self.outdir, \"\")self.generateModules(expDesc, expdir)self.descriptions[\"\"]=(expdir, expDesc)return expdir", "docstring": "Try running a basic experiment and permutations", "id": "f17409:c0:m35"} {"signature": "def benchmarkCategories(self):", "body": "dataPath = os.path.join(self.datadir, \"\", \"\",\"\", \"\")streamDef = dict(version=,info=\"\",streams=[dict(source=\"\" % (dataPath),info=\"\",columns=[\"\",\"\"],last_record=self.splits['']),],)expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()expDesc[\"\"][\"\"] = \"\"expDesc.update({\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\",},{ \"\": \"\",\"\": \"\",}],\"\": self.__recordsToProcess,})expdir = os.path.join(self.outdir, \"\")self.generateModules(expDesc, expdir)self.descriptions[\"\"]=(expdir, expDesc)return expdir", "docstring": "Try running a basic experiment and permutations", "id": "f17409:c0:m36"} {"signature": "def benchmarkTwoVarsSquare(self):", "body": "dataPath = os.path.join(self.datadir, \"\", \"\",\"\", \"\")streamDef = dict(version=,info=\"\",streams=[dict(source=\"\" % (dataPath),info=\"\",columns=[\"\",\"\"],last_record=self.splits['']),],)expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()expDesc[\"\"][\"\"] = \"\"expDesc.update({\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,},{ \"\": \"\",\"\": \"\",\"\": -,\"\": ,}],\"\": self.__recordsToProcess,})expdir = os.path.join(self.outdir, \"\")self.generateModules(expDesc, expdir)self.descriptions[\"\"]=(expdir, expDesc)return expdir", "docstring": "Try running a basic experiment and permutations", "id": "f17409:c0:m37"} {"signature": "def benchmarkSawtooth(self):", "body": "dataPath = os.path.join(self.datadir, \"\", \"\")streamDef = dict(version=,info=\"\",streams=[dict(source=\"\" % (dataPath),info=\"\",columns=[\"\"],last_record=self.splits[''],),],)expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()expDesc[\"\"][\"\"] = \"\"expDesc.update({\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\",\"\":True,},],\"\": self.__recordsToProcess,})expdir = os.path.join(self.outdir, \"\")self.generateModules(expDesc, expdir)self.descriptions[\"\"]=(expdir, expDesc)return expdir", "docstring": "Try running a basic experiment and permutations", "id": "f17409:c0:m38"} {"signature": "def benchmarkHotGymSC(self):", "body": "dataPath = os.path.join(self.datadir, \"\", \"\")streamDef = dict(version=,info=\"\",streams=[dict(source=\"\" % (dataPath),info=\"\",columns=[\"\", \"\", \"\"],last_record=self.splits[''],)],aggregation={'' : ,'' : [('', ''),('', ''),]},)expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()expDesc[\"\"][\"\"] = \"\"expDesc[\"\"][\"\"] = []expDesc.update({\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\"},{ \"\": \"\",\"\": \"\",\"\": ,\"\": ,},{ \"\": \"\",\"\": \"\",},],\"\": self.__recordsToProcess,})expdir = os.path.join(self.outdir, \"\")self.generateModules(expDesc, expdir)self.descriptions[\"\"]=(expdir, expDesc)return expdir", "docstring": "The HotGym dataset, only the first gym, solved using spatial\n classification. This model learns the association between the date/time\n stamp and the consumption - the model does not get consumption fed in at\n the bottom.", "id": "f17409:c0:m39"} {"signature": "def generateModules(self, expDesc, outdir):", "body": "jobParams = dict(desription=expDesc)shutil.rmtree(outdir, ignore_errors=True)outdirv2term = os.path.join(outdir, \"\", \"\")outdirv2noterm = os.path.join(outdir, \"\", \"\")outdirdef = os.path.join(outdir, \"\", \"\")if self.__doV2Term:experiment_generator.expGenerator(args)args = [\"\" % (json.dumps(expDesc)),\"\",\"\" % (outdirv2noterm)]if self.__doV2noTerm:experiment_generator.expGenerator(args)args = [\"\" % (json.dumps(expDesc)),\"\",\"\" % (outdirdef)]if self.__doClusterDef:experiment_generator.expGenerator(args)", "docstring": "This calls ExpGenerator to generate a base description file and\n permutations file from expDesc.\n\n Parameters:\n -------------------------------------------------------------------\n expDesc: Experiment description dict\n outDir: Which output directory to use", "id": "f17409:c0:m40"} {"signature": "def createEncoder():", "body": "encoder = MultiEncoder()encoder.addMultipleEncoders({\"\": {\"\": \"\",\"\": \"\",\"\": \"\",\"\": ,\"\": ,\"\": True,\"\": ,\"\": },\"\": {\"\": \"\",\"\": \"\",\"\": \"\",\"\": (, )}})return encoder", "docstring": "Creates and returns a #MultiEncoder including a ScalarEncoder for\nenergy consumption and a DateEncoder for the time of the day.\n\n@see nupic/encoders/__init__.py for type to file-name mapping\n@see nupic/encoders for encoder source files", "id": "f17412:m0"} {"signature": "def createRecordSensor(network, name, dataSource):", "body": "regionType = \"\"regionParams = json.dumps({\"\": _VERBOSITY})network.addRegion(name, regionType, regionParams)sensorRegion = network.regions[name].getSelf()sensorRegion.encoder = createEncoder()network.regions[name].setParameter(\"\", \"\")sensorRegion.dataSource = dataSourcereturn sensorRegion", "docstring": "Creates a RecordSensor region that allows us to specify a file record\nstream as the input source.", "id": "f17412:m1"} {"signature": "def createNetwork(dataSource):", "body": "network = Network()sensor = createRecordSensor(network, name=_RECORD_SENSOR,dataSource=dataSource)createSpatialPooler(network, name=_L1_SPATIAL_POOLER,inputWidth=sensor.encoder.getWidth())linkType = \"\"linkParams = \"\"network.link(_RECORD_SENSOR, _L1_SPATIAL_POOLER, linkType, linkParams)l1temporalMemory = createTemporalMemory(network, _L1_TEMPORAL_MEMORY)network.link(_L1_SPATIAL_POOLER, _L1_TEMPORAL_MEMORY, linkType, linkParams)classifierParams = { '': ,'': '','': '','': }l1Classifier = network.addRegion(_L1_CLASSIFIER, \"\",json.dumps(classifierParams))l1Classifier.setParameter('', True)l1Classifier.setParameter('', True)network.link(_L1_TEMPORAL_MEMORY, _L1_CLASSIFIER, linkType, linkParams,srcOutput=\"\", destInput=\"\")network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams,srcOutput=\"\", destInput=\"\")network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams,srcOutput=\"\", destInput=\"\")network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams,srcOutput=\"\", destInput=\"\")l2inputWidth = l1temporalMemory.getSelf().getOutputElementCount(\"\")createSpatialPooler(network, name=_L2_SPATIAL_POOLER, inputWidth=l2inputWidth)network.link(_L1_TEMPORAL_MEMORY, _L2_SPATIAL_POOLER, linkType, linkParams)createTemporalMemory(network, _L2_TEMPORAL_MEMORY)network.link(_L2_SPATIAL_POOLER, _L2_TEMPORAL_MEMORY, linkType, linkParams)l2Classifier = network.addRegion(_L2_CLASSIFIER, \"\",json.dumps(classifierParams))l2Classifier.setParameter('', True)l2Classifier.setParameter('', True)network.link(_L2_TEMPORAL_MEMORY, _L2_CLASSIFIER, linkType, linkParams,srcOutput=\"\", destInput=\"\")network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams,srcOutput=\"\", destInput=\"\")network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams,srcOutput=\"\", destInput=\"\")network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams,srcOutput=\"\", destInput=\"\")return network", "docstring": "Creates and returns a new Network with a sensor region reading data from\n 'dataSource'. There are two hierarchical levels, each with one SP and one TM.\n @param dataSource - A RecordStream containing the input data\n @returns a Network ready to run", "id": "f17412:m4"} {"signature": "def runNetwork(network, numRecords, writer):", "body": "sensorRegion = network.regions[_RECORD_SENSOR]l1SpRegion = network.regions[_L1_SPATIAL_POOLER]l1TpRegion = network.regions[_L1_TEMPORAL_MEMORY]l1Classifier = network.regions[_L1_CLASSIFIER]l2SpRegion = network.regions[_L2_SPATIAL_POOLER]l2TpRegion = network.regions[_L2_TEMPORAL_MEMORY]l2Classifier = network.regions[_L2_CLASSIFIER]l1PreviousPredictedColumns = []l2PreviousPredictedColumns = []l1PreviousPrediction = Nonel2PreviousPrediction = Nonel1ErrorSum = l2ErrorSum = for record in range(numRecords):network.run()actual = float(sensorRegion.getOutputData(\"\")[])l1Predictions = l1Classifier.getOutputData(\"\")l1Probabilities = l1Classifier.getOutputData(\"\")l1Prediction = l1Predictions[l1Probabilities.argmax()]if l1PreviousPrediction is not None:l1ErrorSum += math.fabs(l1PreviousPrediction - actual)l1PreviousPrediction = l1Predictionl2Predictions = l2Classifier.getOutputData(\"\")l2Probabilities = l2Classifier.getOutputData(\"\")l2Prediction = l2Predictions[l2Probabilities.argmax()]if l2PreviousPrediction is not None:l2ErrorSum += math.fabs(l2PreviousPrediction - actual)l2PreviousPrediction = l2Predictionl1AnomalyScore = l1TpRegion.getOutputData(\"\")[]l2AnomalyScore = l2TpRegion.getOutputData(\"\")[]writer.writerow((record, actual, l1PreviousPrediction, l1AnomalyScore, l2PreviousPrediction, l2AnomalyScore))l1PredictedColumns = l1TpRegion.getOutputData(\"\").nonzero()[]l1PreviousPredictedColumns = copy.deepcopy(l1PredictedColumns)l2PredictedColumns = l2TpRegion.getOutputData(\"\").nonzero()[]l2PreviousPredictedColumns = copy.deepcopy(l2PredictedColumns)if numRecords > :print(\"\" % (l1ErrorSum / (numRecords - )))print(\"\" % (l2ErrorSum / (numRecords - )))", "docstring": "Runs specified Network writing the ensuing anomaly\nscores to writer.\n\n@param network: The Network instance to be run\n@param writer: A csv.writer used to write to output file.", "id": "f17412:m5"} {"signature": "def createEncoder():", "body": "consumption_encoder = ScalarEncoder(, , , n=, name=\"\",clipInput=True)time_encoder = DateEncoder(timeOfDay=(, ), name=\"\")encoder = MultiEncoder()encoder.addEncoder(\"\", consumption_encoder)encoder.addEncoder(\"\", time_encoder)return encoder", "docstring": "Create the encoder instance for our test and return it.", "id": "f17413:m0"} {"signature": "def createNetwork(dataSource):", "body": "network = Network()network.addRegion(\"\", \"\",json.dumps({\"\": _VERBOSITY}))sensor = network.regions[\"\"].getSelf()sensor.encoder = createEncoder()sensor.dataSource = dataSourceSP_PARAMS[\"\"] = sensor.encoder.getWidth()network.addRegion(\"\", \"\", json.dumps(SP_PARAMS))network.link(\"\", \"\", \"\", \"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")network.addRegion(\"\", \"\",json.dumps(TM_PARAMS))network.link(\"\", \"\", \"\", \"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")network.addRegion(\"\", \"\",json.dumps({}))network.link(\"\", \"\", \"\",\"\", srcOutput=\"\", destInput=\"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")spatialPoolerRegion = network.regions[\"\"]spatialPoolerRegion.setParameter(\"\", True)spatialPoolerRegion.setParameter(\"\", False)temporalPoolerRegion = network.regions[\"\"]temporalPoolerRegion.setParameter(\"\", True)temporalPoolerRegion.setParameter(\"\", True)temporalPoolerRegion.setParameter(\"\", True)temporalPoolerRegion.setParameter(\"\", True)return network", "docstring": "Create the Network instance.\n\n The network has a sensor region reading data from `dataSource` and passing\n the encoded representation to an SPRegion. The SPRegion output is passed to\n a TMRegion.\n\n :param dataSource: a RecordStream instance to get data from\n :returns: a Network instance ready to run", "id": "f17413:m1"} {"signature": "def runNetwork(network, writer):", "body": "sensorRegion = network.regions[\"\"]spatialPoolerRegion = network.regions[\"\"]temporalPoolerRegion = network.regions[\"\"]anomalyLikelihoodRegion = network.regions[\"\"]prevPredictedColumns = []for i in range(_NUM_RECORDS):network.run()consumption = sensorRegion.getOutputData(\"\")[]anomalyScore = temporalPoolerRegion.getOutputData(\"\")[]anomalyLikelihood = anomalyLikelihoodRegion.getOutputData(\"\")[]writer.writerow((i, consumption, anomalyScore, anomalyLikelihood))", "docstring": "Run the network and write output to writer.\n\n :param network: a Network instance to run\n :param writer: a csv.writer instance to write output to", "id": "f17413:m2"} {"signature": "def compute(self, inputs, outputs):", "body": "outputs[\"\"][:] = inputs[\"\"]", "docstring": "Run one iteration of IdentityRegion's compute", "id": "f17414:c0:m2"} {"signature": "@classmethoddef getSpec(cls):", "body": "spec = {\"\":IdentityRegion.__doc__,\"\":True,\"\":{\"\":{\"\":\"\",\"\":\"\",\"\":,\"\":True,\"\":False,\"\":True,\"\":False},},\"\":{\"\":{\"\":\"\",\"\":\"\",\"\":,\"\":True,\"\":True},},\"\":{\"\":{\"\":\"\",\"\":\"\",\"\":\"\",\"\":,\"\":\"\"},},}return spec", "docstring": "Return the Spec for IdentityRegion.", "id": "f17414:c0:m3"} {"signature": "def createTemporalAnomaly(recordParams, spatialParams=_SP_PARAMS,temporalParams=_TM_PARAMS,verbosity=_VERBOSITY):", "body": "inputFilePath = recordParams[\"\"]scalarEncoderArgs = recordParams[\"\"]dateEncoderArgs = recordParams[\"\"]scalarEncoder = ScalarEncoder(**scalarEncoderArgs)dateEncoder = DateEncoder(**dateEncoderArgs)encoder = MultiEncoder()encoder.addEncoder(scalarEncoderArgs[\"\"], scalarEncoder)encoder.addEncoder(dateEncoderArgs[\"\"], dateEncoder)network = Network()network.addRegion(\"\", \"\",json.dumps({\"\": verbosity}))sensor = network.regions[\"\"].getSelf()sensor.encoder = encodersensor.dataSource = FileRecordStream(streamID=inputFilePath)spatialParams[\"\"] = sensor.encoder.getWidth()network.addRegion(\"\", \"\",json.dumps(spatialParams))network.link(\"\", \"\", \"\", \"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")network.addRegion(\"\", \"\",json.dumps(temporalParams))network.link(\"\", \"\", \"\", \"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")spatialPoolerRegion = network.regions[\"\"]spatialPoolerRegion.setParameter(\"\", True)spatialPoolerRegion.setParameter(\"\", False)temporalPoolerRegion = network.regions[\"\"]temporalPoolerRegion.setParameter(\"\", True)temporalPoolerRegion.setParameter(\"\", True)temporalPoolerRegion.setParameter(\"\", True)temporalPoolerRegion.setParameter(\"\", True)return network", "docstring": "Generates a Network with connected RecordSensor, SP, TM.\n\n This function takes care of generating regions and the canonical links.\n The network has a sensor region reading data from a specified input and\n passing the encoded representation to an SPRegion.\n The SPRegion output is passed to a TMRegion.\n\n Note: this function returns a network that needs to be initialized. This\n allows the user to extend the network by adding further regions and\n connections.\n\n :param recordParams: a dict with parameters for creating RecordSensor region.\n :param spatialParams: a dict with parameters for creating SPRegion.\n :param temporalParams: a dict with parameters for creating TMRegion.\n :param verbosity: an integer representing how chatty the network will be.", "id": "f17415:m0"} {"signature": "def runNetwork(network, writer):", "body": "sensorRegion = network.regions[\"\"]temporalPoolerRegion = network.regions[\"\"]for i in range(_NUM_RECORDS):network.run()anomalyScore = temporalPoolerRegion.getOutputData(\"\")[]consumption = sensorRegion.getOutputData(\"\")[]writer.writerow((i, consumption, anomalyScore))", "docstring": "Run the network and write output to writer.\n\n :param network: a Network instance to run\n :param writer: a csv.writer instance to write output to", "id": "f17415:m1"} {"signature": "def createEncoder():", "body": "consumption_encoder = ScalarEncoder(, , , n=, name=\"\",clipInput=True)time_encoder = DateEncoder(timeOfDay=(, ), name=\"\")encoder = MultiEncoder()encoder.addEncoder(\"\", consumption_encoder)encoder.addEncoder(\"\", time_encoder)return encoder", "docstring": "Create the encoder instance for our test and return it.", "id": "f17416:m0"} {"signature": "def createNetwork(dataSource):", "body": "network = Network()network.addRegion(\"\", \"\",json.dumps({\"\": _VERBOSITY}))sensor = network.regions[\"\"].getSelf()sensor.encoder = createEncoder()sensor.dataSource = dataSourcesys.path.append(os.path.dirname(os.path.abspath(__file__)))from custom_region.identity_region import IdentityRegionNetwork.registerRegion(IdentityRegion)network.addRegion(\"\", \"\",json.dumps({\"\": sensor.encoder.getWidth(),}))network.link(\"\", \"\", \"\", \"\")network.initialize()return network", "docstring": "Create the Network instance.\n\n The network has a sensor region reading data from `dataSource` and passing\n the encoded representation to an Identity Region.\n\n :param dataSource: a RecordStream instance to get data from\n :returns: a Network instance ready to run", "id": "f17416:m1"} {"signature": "def runNetwork(network, writer):", "body": "identityRegion = network.regions[\"\"]for i in range(_NUM_RECORDS):network.run()encoding = identityRegion.getOutputData(\"\")writer.writerow((i, encoding))", "docstring": "Run the network and write output to writer.\n\n :param network: a Network instance to run\n :param writer: a csv.writer instance to write output to", "id": "f17416:m2"} {"signature": "def computeAccuracy(model, size, top):", "body": "accuracy = []filename = os.path.join(os.path.dirname(__file__), \"\")with zipfile.ZipFile(filename) as archive:with archive.open(\"\") as datafile:for _ in range():next(datafile)for _ in range(LEARNING_RECORDS):next(datafile)for _ in range(size):pages = readUserSession(datafile)model.resetSequenceStates()for i in range(len(pages) - ):result = model.run({\"\": pages[i]})inferences = result.inferences[\"\"][]predicted = sorted(list(inferences.items()), key=itemgetter(), reverse=True)[:top]accuracy.append( if pages[i + ] in list(zip(*predicted))[] else )return np.mean(accuracy)", "docstring": "Compute prediction accuracy by checking if the next page in the sequence is\nwithin the top N predictions calculated by the model\nArgs:\n model: HTM model\n size: Sample size\n top: top N predictions to use\n\nReturns: Probability the next page in the sequence is within the top N\n predicted pages", "id": "f17417:m0"} {"signature": "def readUserSession(datafile):", "body": "for line in datafile:pages = line.split()total = len(pages)if total < :continueif total > :continuereturn [PAGE_CATEGORIES[int(i) - ] for i in pages]return []", "docstring": "Reads the user session record from the file's cursor position\nArgs:\n datafile: Data file whose cursor points at the beginning of the record\n\nReturns:\n list of pages in the order clicked by the user", "id": "f17417:m1"} {"signature": "def filter(perm):", "body": "if perm[''] != perm['']:return Falsereturn True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called for every possible permutation of the variables\n in the permutations dict. It should return True for valid a combination of \n permutation values and False for an invalid one. \n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value \n pairs chosen from permutations.", "id": "f17432:m0"} {"signature": "def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):", "body": "lines = activeCoincsFile.readlines()inputs = encodingsFile.readlines()w = len(inputs[].split(''))-patterns = set([])encodings = set([])coincs = [] reUsedCoincs = []firstLine = inputs[].split('')size = int(firstLine.pop())spOutput = np.zeros((len(lines),))inputBits = np.zeros((len(lines),w))print('', size)print('', len(lines), '')print('', w)count = for x in range(len(lines)):inputSpace = [] spBUout = [int(z) for z in lines[x].split('')] spBUout.pop() temp = set(spBUout)spOutput[x]=spBUoutinput = [int(z) for z in inputs[x].split('')] input.pop() tempInput = set(input)inputBits[x]=inputfor m in range(size):if m in tempInput:inputSpace.append(m)else:inputSpace.append('') repeatedBits = tempInput.intersection(encodings) reUsed = temp.intersection(patterns) if len(reUsed)==:coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) else:reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))patterns=patterns.union(temp) encodings = encodings.union(tempInput)count +=overlap = {}overlapVal = seen = []seen = (printOverlaps(coincs, coincs, seen))print(len(seen), '')seen = printOverlaps(reUsedCoincs, coincs, seen)Summ=[]for z in coincs:c=for y in reUsedCoincs:c += len(z[].intersection(y[]))Summ.append(c)print('', Summ)for m in range():displayLimit = min(, len(spOutput[m*:]))if displayLimit>:drawFile(dataset, np.zeros([len(inputBits[:(m+)*displayLimit]),len(inputBits[:(m+)*displayLimit])]), inputBits[:(m+)*displayLimit], spOutput[:(m+)*displayLimit], w, m+)else: print('')pyl.show()", "docstring": "Mirror Image Visualization: Shows the encoding space juxtaposed against the\n coincidence space. The encoding space is the bottom-up sensory encoding and\n the coincidence space depicts the corresponding activation of coincidences in\n the SP. Hence, the mirror image visualization is a visual depiction of the\n mapping of SP cells to the input representations.\n\n Note:\n * The files spBUOut and sensorBUOut are assumed to be in the output format\n used for LPF experiment outputs.\n * BU outputs for some sample datasets are provided. Specify the name of the\n dataset as an option while running this script.", "id": "f17441:m0"} {"signature": "def drawFile(dataset, matrix, patterns, cells, w, fnum):", "body": "score=count = assert len(patterns)==len(cells)for p in range(len(patterns)-):matrix[p+:,p] = [len(set(patterns[p]).intersection(set(q)))*/w for q in patterns[p+:]]matrix[p,p+:] = [len(set(cells[p]).intersection(set(r)))*/ for r in cells[p+:]]score += sum(abs(np.array(matrix[p+:,p])-np.array(matrix[p,p+:])))count += len(matrix[p+:,p])print('', score/count)fig = pyl.figure(figsize = (,), num = fnum)pyl.matshow(matrix, fignum = fnum)pyl.colorbar()pyl.title('', verticalalignment='', fontsize=)pyl.xlabel(''+dataset, fontsize=)pyl.ylabel('', fontsize=)", "docstring": "The similarity of two patterns in the bit-encoding space is displayed alongside\n their similarity in the sp-coinc space.", "id": "f17441:m1"} {"signature": "def printOverlaps(comparedTo, coincs, seen):", "body": "inputOverlap = cellOverlap = for y in comparedTo:closestInputs = []closestCells = []if len(seen)>:inputOverlap = max([len(seen[m][].intersection(y[])) for m in range(len(seen))])cellOverlap = max([len(seen[m][].intersection(y[])) for m in range(len(seen))])for m in range( len(seen) ):if len(seen[m][].intersection(y[]))==inputOverlap:closestInputs.append(seen[m][])if len(seen[m][].intersection(y[]))==cellOverlap:closestCells.append(seen[m][])seen.append((y[], y[], y[]))print('',y[]+,'',''.join(str(len(z[].intersection(y[]))).rjust() for z in coincs),'', inputOverlap, '', len(closestInputs), '',''.join(str(m+) for m in closestInputs).ljust(),'', cellOverlap, '', len(closestCells), '',''.join(str(m+) for m in closestCells))return seen", "docstring": "Compare the results and return True if success, False if failure\n\n Parameters:\n --------------------------------------------------------------------\n coincs: Which cells are we comparing?\n comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)\n seen: Which of the cells we are comparing to have already been encountered.\n This helps glue together the unique and reused coincs", "id": "f17441:m2"} {"signature": "def generatePlot(outputs, origData):", "body": "PLOT_PRECISION = distribMatrix = np.zeros((PLOT_PRECISION+,PLOT_PRECISION+))outputSize = len(outputs)for i in range(,outputSize):for j in range(i+,outputSize):in1 = outputs[i]in2 = outputs[j]dist = (abs(in1-in2) > )intDist = int(dist.sum()/+)orig1 = origData[i]orig2 = origData[j]origDist = (abs(orig1-orig2) > )intOrigDist = int(origDist.sum()/+)if intDist < and intOrigDist > :print('' % (i, j, intDist))print('' % intOrigDist)x = int(PLOT_PRECISION*intDist/)y = int(PLOT_PRECISION*intOrigDist/)if distribMatrix[x, y] < :distribMatrix[x, y] = else:if distribMatrix[x, y] < :distribMatrix[x, y] += distribMatrix[, ] = distribMatrix[, ] = distribMatrix[, ] = distribMatrix[, ] = distribMatrix[, ] = distribMatrix[, ] = distribMatrix[, ] = distribMatrix[, ] = return distribMatrix", "docstring": "Generates a table where each cell represent a frequency of pairs\n as described below.\n x coordinate is the % difference between input records (origData list),\n y coordinate is the % difference between corresponding output records.", "id": "f17442:m0"} {"signature": "def generateRandomInput(numRecords, elemSize = , numSet = ):", "body": "inputs = []for _ in range(numRecords):input = np.zeros(elemSize, dtype=realDType)for _ in range(,numSet):ind = np.random.random_integers(, elemSize-, )[]input[ind] = while abs(input.sum() - numSet) > :ind = np.random.random_integers(, elemSize-, )[]input[ind] = inputs.append(input)return inputs", "docstring": "Generates a set of input record\n\n Params:\n numRecords - how many records to generate\n elemSize - the size of each record (num 0s or 1s)\n numSet - how many 1s in each record\n\n Returns: a list of inputs", "id": "f17442:m1"} {"signature": "def appendInputWithSimilarValues(inputs):", "body": "numInputs = len(inputs)for i in range(numInputs):input = inputs[i]for j in range(len(input)-):if input[j] == and input[j+] == :newInput = copy.deepcopy(input)newInput[j] = newInput[j+] = inputs.append(newInput)break", "docstring": "Creates an 'one-off' record for each record in the inputs. Appends new\n records to the same inputs list.", "id": "f17442:m2"} {"signature": "def appendInputWithNSimilarValues(inputs, numNear = ):", "body": "numInputs = len(inputs)skipOne = Falsefor i in range(numInputs):input = inputs[i]numChanged = newInput = copy.deepcopy(input)for j in range(len(input)-):if skipOne:skipOne = Falsecontinueif input[j] == and input[j+] == :newInput[j] = newInput[j+] = inputs.append(newInput)newInput = copy.deepcopy(newInput)numChanged += skipOne = Trueif numChanged == numNear:break", "docstring": "Creates a neighboring record for each record in the inputs and adds\n new records at the end of the inputs list", "id": "f17442:m3"} {"signature": "def modifyBits(inputVal, maxChanges):", "body": "changes = np.random.random_integers(, maxChanges, )[]if changes == :return inputValinputWidth = len(inputVal)whatToChange = np.random.random_integers(, , changes)runningIndex = -numModsDone = for i in range(inputWidth):if numModsDone >= changes:breakif inputVal[i] == :runningIndex += if runningIndex in whatToChange:if i != and inputVal[i-] == :inputVal[i-] = inputVal[i] = return inputVal", "docstring": "Modifies up to maxChanges number of bits in the inputVal", "id": "f17442:m4"} {"signature": "def getRandomWithMods(inputSpace, maxChanges):", "body": "size = len(inputSpace)ind = np.random.random_integers(, size-, )[]value = copy.deepcopy(inputSpace[ind])if maxChanges == :return valuereturn modifyBits(value, maxChanges)", "docstring": "Returns a random selection from the inputSpace with randomly modified\n up to maxChanges number of bits.", "id": "f17442:m5"} {"signature": "def printMatrix(inputs, spOutput):", "body": "from pylab import matplotlib as matw=len(np.nonzero(inputs[])[])numActive=len(np.nonzero(spOutput[])[])matrix = np.zeros([*w+,*numActive+])for x in xrange(len(inputs)):i = [_hammingDistance(inputs[x], z) for z in inputs[x:]]j = [_hammingDistance(spOutput[x], a) for a in spOutput[x:]]for p, q in zip(i,j):matrix[p,q]+=for y in xrange(len(matrix)) :matrix[y]=[max(*x, ) if (x< and x>) else x for x in matrix[y]]cdict = {'':((,,),(,,),(,,),(,,)),'': ((,,),(,,),(,,),(,,)),'': ((,,),(,,),(,,),(,,))}my_cmap = mat.colors.LinearSegmentedColormap('',cdict,)pyl=mat.pyplotpyl.matshow(matrix, cmap = my_cmap)pyl.colorbar()pyl.ylabel('') pyl.xlabel('')pyl.title('')pyl.show()", "docstring": "(i,j)th cell of the diff matrix will have the number of inputs for which the input and output\n pattern differ by i bits and the cells activated differ at j places.\n Parameters:\n --------------------------------------------------------------------\n inputs: the input encodings\n spOutput: the coincidences activated in response to each input", "id": "f17443:m0"} {"signature": "def _hammingDistance(s1, s2):", "body": "return sum(abs(s1-s2))", "docstring": "Hamming distance between two numpy arrays s1 and s2", "id": "f17443:m1"} {"signature": "def createModel(modelParams):", "body": "model = ModelFactory.create(modelParams)model.enableInference({\"\": \"\"})return model", "docstring": "Given a model params dictionary, create a CLA Model. Automatically enables\ninference for kw_energy_consumption.\n:param modelParams: Model params dict\n:return: OPF Model object", "id": "f17447:m0"} {"signature": "def getModelParamsFromName(gymName):", "body": "importName = \"\" % (gymName.replace(\"\", \"\").replace(\"\", \"\"))print(\"\" % importName)try:importedModelParams = importlib.import_module(importName).MODEL_PARAMSexcept ImportError:raise Exception(\"\"% gymName)return importedModelParams", "docstring": "Given a gym name, assumes a matching model params python module exists within\nthe model_params directory and attempts to import it.\n:param gymName: Gym name, used to guess the model params module name.\n:return: OPF Model params dictionary", "id": "f17447:m1"} {"signature": "def runIoThroughNupic(inputData, model, gymName, plot):", "body": "inputFile = open(inputData, \"\")csvReader = csv.reader(inputFile)next(csvReader)next(csvReader)next(csvReader)shifter = InferenceShifter()if plot:output = nupic_anomaly_output.NuPICPlotOutput(gymName)else:output = nupic_anomaly_output.NuPICFileOutput(gymName)counter = for row in csvReader:counter += if (counter % == ):print(\"\" % counter)timestamp = datetime.datetime.strptime(row[], DATE_FORMAT)consumption = float(row[])result = model.run({\"\": timestamp,\"\": consumption})if plot:result = shifter.shift(result)prediction = result.inferences[\"\"][]anomalyScore = result.inferences[\"\"]output.write(timestamp, consumption, prediction, anomalyScore)inputFile.close()output.close()", "docstring": "Handles looping over the input data and passing each row into the given model\nobject, as well as extracting the result object and passing it into an output\nhandler.\n:param inputData: file path to input data CSV\n:param model: OPF Model object\n:param gymName: Gym name, used for output handler naming\n:param plot: Whether to use matplotlib or not. If false, uses file output.", "id": "f17447:m2"} {"signature": "def runModel(gymName, plot=False):", "body": "print(\"\" % gymName)model = createModel(getModelParamsFromName(gymName))inputData = \"\" % (DATA_DIR, gymName.replace(\"\", \"\"))runIoThroughNupic(inputData, model, gymName, plot)", "docstring": "Assumes the gynName corresponds to both a like-named model_params file in the\nmodel_params directory, and that the data exists in a like-named CSV file in\nthe current directory.\n:param gymName: Important for finding model params and input CSV file\n:param plot: Plot in matplotlib? Don't use this unless matplotlib is\ninstalled.", "id": "f17447:m3"} {"signature": "def refreshGUI(self):", "body": "plt.pause()", "docstring": "Give plot a pause, so data is drawn and GUI's event loop can run.", "id": "f17452:c2:m3"} {"signature": "def _setRandomEncoderResolution(minResolution=):", "body": "encoder = (model_params.MODEL_PARAMS[\"\"][\"\"][\"\"][\"\"])if encoder[\"\"] == \"\":rangePadding = abs(_INPUT_MAX - _INPUT_MIN) * minValue = _INPUT_MIN - rangePaddingmaxValue = _INPUT_MAX + rangePaddingresolution = max(minResolution,(maxValue - minValue) / encoder.pop(\"\"))encoder[\"\"] = resolution", "docstring": "Given model params, figure out the correct resolution for the\nRandomDistributed encoder. Modifies params in place.", "id": "f17459:m0"} {"signature": "def runCPU():", "body": "model = ModelFactory.create(model_params.MODEL_PARAMS)model.enableInference({'': ''})shifter = InferenceShifter()actHistory = deque([] * WINDOW, maxlen=)predHistory = deque([] * WINDOW, maxlen=)actline, = plt.plot(range(WINDOW), actHistory)predline, = plt.plot(range(WINDOW), predHistory)actline.axes.set_ylim(, )predline.axes.set_ylim(, )while True:s = time.time()cpu = psutil.cpu_percent()modelInput = {'': cpu}result = shifter.shift(model.run(modelInput))inference = result.inferences[''][]if inference is not None:actHistory.append(result.rawInput[''])predHistory.append(inference)actline.set_ydata(actHistory) predline.set_ydata(predHistory) plt.draw()plt.legend( ('','') )try:plt.pause(SECONDS_PER_STEP)except:pass", "docstring": "Poll CPU usage, make predictions, and plot the results. Runs forever.", "id": "f17462:m0"} {"signature": "def _generateCategory(filename=\"\", numSequences=, elementsPerSeq=, numRepeats=, resets=False):", "body": "scriptDir = os.path.dirname(__file__)pathname = os.path.join(scriptDir, '', filename)print(\"\" % (pathname))fields = [('', '', ''), ('', '', ''),('', '', '')] outFile = FileRecordStream(pathname, write=True, fields=fields)sequences = []for i in range(numSequences):seq = [x for x in range(i*elementsPerSeq, (i+)*elementsPerSeq)]sequences.append(seq)seqIdxs = []for i in range(numRepeats):seqIdxs += list(range(numSequences))random.shuffle(seqIdxs)for seqIdx in seqIdxs:reset = int(resets)seq = sequences[seqIdx]for x in seq:outFile.appendRecord([reset, str(seqIdx), str(x)])reset = outFile.close()", "docstring": "Generate a simple dataset. This contains a bunch of non-overlapping\n sequences. \n\n Parameters:\n ----------------------------------------------------\n filename: name of the file to produce, including extension. It will\n be created in a 'datasets' sub-directory within the \n directory containing this script. \n numSequences: how many sequences to generate\n elementsPerSeq: length of each sequence\n numRepeats: how many times to repeat each sequence in the output \n resets: if True, turn on reset at start of each sequence", "id": "f17465:m0"} {"signature": "def _generateScalar(filename=\"\", numSequences=, elementsPerSeq=, numRepeats=, stepSize=, resets=False):", "body": "scriptDir = os.path.dirname(__file__)pathname = os.path.join(scriptDir, '', filename)print(\"\" % (pathname))fields = [('', '', ''), ('', '', ''),('', '', '')] outFile = FileRecordStream(pathname, write=True, fields=fields)sequences = []for i in range(numSequences):seq = [x for x in range(i*elementsPerSeq, (i+)*elementsPerSeq)]sequences.append(seq)seqIdxs = []for i in range(numRepeats):seqIdxs += list(range(numSequences))random.shuffle(seqIdxs)for seqIdx in seqIdxs:reset = int(resets)seq = sequences[seqIdx]for x in seq:outFile.appendRecord([reset, str(seqIdx), x*stepSize])reset = outFile.close()", "docstring": "Generate a simple dataset. This contains a bunch of non-overlapping\n sequences of scalar values. \n\n Parameters:\n ----------------------------------------------------\n filename: name of the file to produce, including extension. It will\n be created in a 'datasets' sub-directory within the \n directory containing this script. \n numSequences: how many sequences to generate\n elementsPerSeq: length of each sequence\n numRepeats: how many times to repeat each sequence in the output\n stepSize: how far apart each scalar is \n resets: if True, turn on reset at start of each sequence", "id": "f17465:m1"} {"signature": "def _generateOverlapping(filename=\"\", numSequences=, elementsPerSeq=, numRepeats=, hub=[,], hubOffset=, resets=False):", "body": "assert (hubOffset + len(hub) <= elementsPerSeq)scriptDir = os.path.dirname(__file__)pathname = os.path.join(scriptDir, '', filename)print(\"\" % (pathname))fields = [('', '', ''), ('', '', ''),('', '', '')] outFile = FileRecordStream(pathname, write=True, fields=fields)sequences = []nextElemIdx = max(hub)+for _ in range(numSequences):seq = []for j in range(hubOffset):seq.append(nextElemIdx)nextElemIdx += for j in hub:seq.append(j)j = hubOffset + len(hub)while j < elementsPerSeq:seq.append(nextElemIdx)nextElemIdx += j += sequences.append(seq)seqIdxs = []for _ in range(numRepeats):seqIdxs += list(range(numSequences))random.shuffle(seqIdxs)for seqIdx in seqIdxs:reset = int(resets)seq = sequences[seqIdx]for (x) in seq:outFile.appendRecord([reset, str(seqIdx), str(x)])reset = outFile.close()", "docstring": "Generate a temporal dataset containing sequences that overlap one or more\n elements with other sequences. \n\n Parameters:\n ----------------------------------------------------\n filename: name of the file to produce, including extension. It will\n be created in a 'datasets' sub-directory within the \n directory containing this script. \n numSequences: how many sequences to generate\n elementsPerSeq: length of each sequence\n numRepeats: how many times to repeat each sequence in the output \n hub: sub-sequence to place within each other sequence \n hubOffset: where, within each sequence, to place the hub\n resets: if True, turn on reset at start of each sequence", "id": "f17465:m2"} {"signature": "def _generateSimple(filename=\"\", numSequences=, elementsPerSeq=, numRepeats=):", "body": "scriptDir = os.path.dirname(__file__)pathname = os.path.join(scriptDir, '', filename)print(\"\" % (pathname))fields = [('', '', ''), ('', '', ''), ('', '', '')] outFile = FileRecordStream(pathname, write=True, fields=fields)sequences = []for i in range(numSequences):seq = [x for x in range(i*elementsPerSeq, (i+)*elementsPerSeq)]sequences.append(seq)seqIdxs = []for i in range(numRepeats):seqIdxs += list(range(numSequences))random.shuffle(seqIdxs)timestamp = datetime.datetime(year=, month=, day=, hour=, minute=,second=)timeDelta = datetime.timedelta(hours=)for seqIdx in seqIdxs:seq = sequences[seqIdx]for x in seq:outFile.appendRecord([timestamp, str(x), x])timestamp += timeDeltafor seqIdx in seqIdxs:seq = sequences[seqIdx]for i,x in enumerate(seq):if i != :outFile.appendRecord([timestamp, str(x), x])timestamp += timeDeltafor seqIdx in seqIdxs:seq = sequences[seqIdx]for i,x in enumerate(seq):if i != :outFile.appendRecord([timestamp, str(x), x])timestamp += timeDeltafor seqIdx in seqIdxs:seq = sequences[seqIdx]for x in seq:outFile.appendRecord([timestamp, str(x), x])timestamp += timeDeltaoutFile.close()", "docstring": "Generate a simple dataset. This contains a bunch of non-overlapping\n sequences. \n\n At the end of the dataset, we introduce missing records so that test\n code can insure that the model didn't get confused by them. \n\n Parameters:\n ----------------------------------------------------\n filename: name of the file to produce, including extension. It will\n be created in a 'datasets' sub-directory within the \n directory containing this script. \n numSequences: how many sequences to generate\n elementsPerSeq: length of each sequence\n numRepeats: how many times to repeat each sequence in the output", "id": "f17493:m0"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17497:m0"} {"signature": "def permutationFilter(perm):", "body": "return True", "docstring": "This function can be used to selectively filter out specific permutation\n combinations. It is called by RunPermutations for every possible permutation\n of the variables in the permutations dict. It should return True for valid a\n combination of permutation values and False for an invalid one.\n\n Parameters:\n ---------------------------------------------------------\n perm: dict of one possible combination of name:value\n pairs chosen from permutations.", "id": "f17499:m0"} {"signature": "def _generateSimple(filename=\"\", numSequences=, elementsPerSeq=, numRepeats=, resets=False):", "body": "scriptDir = os.path.dirname(__file__)pathname = os.path.join(scriptDir, '', filename)print(\"\" % (pathname))fields = [('', '', ''), ('', '', ''), ('', '', '')] outFile = FileRecordStream(pathname, write=True, fields=fields)sequences = []for i in range(numSequences):seq = [x for x in range(i*elementsPerSeq, (i+)*elementsPerSeq)]sequences.append(seq)seqIdxs = []for i in range(numRepeats):seqIdxs += list(range(numSequences))random.shuffle(seqIdxs)for seqIdx in seqIdxs:reset = int(resets)seq = sequences[seqIdx]for x in seq:outFile.appendRecord([reset, str(x), x])reset = outFile.close()", "docstring": "Generate a simple dataset. This contains a bunch of non-overlapping\n sequences. \n\n Parameters:\n ----------------------------------------------------\n filename: name of the file to produce, including extension. It will\n be created in a 'datasets' sub-directory within the \n directory containing this script. \n numSequences: how many sequences to generate\n elementsPerSeq: length of each sequence\n numRepeats: how many times to repeat each sequence in the output \n resets: if True, turn on reset at start of each sequence", "id": "f17515:m0"} {"signature": "def _generateOverlapping(filename=\"\", numSequences=, elementsPerSeq=, numRepeats=, hub=[,], hubOffset=, resets=False):", "body": "assert (hubOffset + len(hub) <= elementsPerSeq)scriptDir = os.path.dirname(__file__)pathname = os.path.join(scriptDir, '', filename)print(\"\" % (pathname))fields = [('', '', ''), ('', '', ''), ('', '', '')] outFile = FileRecordStream(pathname, write=True, fields=fields)sequences = []nextElemIdx = max(hub)+for _ in range(numSequences):seq = []for j in range(hubOffset):seq.append(nextElemIdx)nextElemIdx += for j in hub:seq.append(j)j = hubOffset + len(hub)while j < elementsPerSeq:seq.append(nextElemIdx)nextElemIdx += j += sequences.append(seq)seqIdxs = []for _ in range(numRepeats):seqIdxs += list(range(numSequences))random.shuffle(seqIdxs)for seqIdx in seqIdxs:reset = int(resets)seq = sequences[seqIdx]for (x) in seq:outFile.appendRecord([reset, str(x), x])reset = outFile.close()", "docstring": "Generate a temporal dataset containing sequences that overlap one or more\n elements with other sequences. \n\n Parameters:\n ----------------------------------------------------\n filename: name of the file to produce, including extension. It will\n be created in a 'datasets' sub-directory within the \n directory containing this script. \n numSequences: how many sequences to generate\n elementsPerSeq: length of each sequence\n numRepeats: how many times to repeat each sequence in the output \n hub: sub-sequence to place within each other sequence \n hubOffset: where, within each sequence, to place the hub\n resets: if True, turn on reset at start of each sequence", "id": "f17515:m1"} {"signature": "def _generateFirstOrder0():", "body": "numCategories = initProb = numpy.zeros(numCategories)initProb[] = firstOrder = dict()firstOrder[''] = numpy.array([, , , , ])firstOrder[''] = numpy.array([, , , , ])firstOrder[''] = numpy.array([, , , , ])firstOrder[''] = numpy.array([, , , , ])firstOrder[''] = numpy.array([, , , , ])secondOrder = NonecategoryList = ['' % x for x in range()]return (initProb, firstOrder, secondOrder, , categoryList)", "docstring": "Generate the initial, first order, and second order transition\n probabilities for 'probability0'. For this model, we generate the following\n set of sequences:\n\n .1 .75\n 0----1-----2\n \\ \\ \n \\ \\ .25\n \\ \\-----3\n \\\n \\ .9 .5 \n \\--- 4--------- 2\n \\\n \\ .5\n \\---------3 \n\n\n\n\n Parameters:\n ----------------------------------------------------------------------\n retval: (initProb, firstOrder, secondOrder, seqLen)\n initProb: Initial probability for each category. This is a vector\n of length len(categoryList).\n firstOrder: A dictionary of the 1st order probabilities. The key\n is the 1st element of the sequence, the value is\n the probability of each 2nd element given the first. \n secondOrder: A dictionary of the 2nd order probabilities. The key\n is the first 2 elements of the sequence, the value is\n the probability of each possible 3rd element given the \n first two. \n seqLen: Desired length of each sequence. The 1st element will\n be generated using the initProb, the 2nd element by the\n firstOrder table, and the 3rd and all successive \n elements by the secondOrder table. \n categoryList: list of category names to use\n\n\n Here is an example of some return values when there are 3 categories\n initProb: [0.7, 0.2, 0.1]\n\n firstOrder: {'[0]': [0.3, 0.3, 0.4],\n '[1]': [0.3, 0.3, 0.4],\n '[2]': [0.3, 0.3, 0.4]}\n\n secondOrder: {'[0,0]': [0.3, 0.3, 0.4],\n '[0,1]': [0.3, 0.3, 0.4],\n '[0,2]': [0.3, 0.3, 0.4],\n '[1,0]': [0.3, 0.3, 0.4],\n '[1,1]': [0.3, 0.3, 0.4],\n '[1,2]': [0.3, 0.3, 0.4],\n '[2,0]': [0.3, 0.3, 0.4],\n '[2,1]': [0.3, 0.3, 0.4],\n '[2,2]': [0.3, 0.3, 0.4]}", "id": "f17515:m2"} {"signature": "def _generateFileFromProb(filename, numRecords, categoryList, initProb, firstOrderProb, secondOrderProb, seqLen, numNoise=, resetsEvery=None):", "body": "print(\"\" % (filename))fields = [('', '', ''), ('', '', ''),('', '', '')]scriptDir = os.path.dirname(__file__)pathname = os.path.join(scriptDir, '', filename)outFile = FileRecordStream(pathname, write=True, fields=fields)initCumProb = initProb.cumsum()firstOrderCumProb = dict()for (key,value) in firstOrderProb.items():firstOrderCumProb[key] = value.cumsum()if secondOrderProb is not None:secondOrderCumProb = dict()for (key,value) in secondOrderProb.items():secondOrderCumProb[key] = value.cumsum()else:secondOrderCumProb = NoneelementsInSeq = []numElementsSinceReset = maxCatIdx = len(categoryList) - for _ in range(numRecords):if numElementsSinceReset == :reset = else:reset = rand = numpy.random.rand()if secondOrderCumProb is None:if len(elementsInSeq) == :catIdx = numpy.searchsorted(initCumProb, rand)elif len(elementsInSeq) >= and(seqLen is None or len(elementsInSeq) < seqLen-numNoise):catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq[-])], rand)else: catIdx = numpy.random.randint(len(categoryList))else:if len(elementsInSeq) == :catIdx = numpy.searchsorted(initCumProb, rand)elif len(elementsInSeq) == :catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq)], rand)elif (len(elementsInSeq) >=) and(seqLen is None or len(elementsInSeq) < seqLen-numNoise):catIdx = numpy.searchsorted(secondOrderCumProb[str(elementsInSeq[-:])], rand)else: catIdx = numpy.random.randint(len(categoryList))catIdx = min(maxCatIdx, catIdx)outFile.appendRecord([reset, categoryList[catIdx], catIdx]) elementsInSeq.append(catIdx)numElementsSinceReset += if resetsEvery is not None and numElementsSinceReset == resetsEvery:numElementsSinceReset = elementsInSeq = []if seqLen is not None and (len(elementsInSeq) == seqLen+numNoise):elementsInSeq = []outFile.close()", "docstring": "Generate a set of records reflecting a set of probabilities.\n\n Parameters:\n ----------------------------------------------------------------\n filename: name of .csv file to generate\n numRecords: number of records to generate\n categoryList: list of category names\n initProb: Initial probability for each category. This is a vector\n of length len(categoryList).\n firstOrderProb: A dictionary of the 1st order probabilities. The key\n is the 1st element of the sequence, the value is\n the probability of each 2nd element given the first. \n secondOrderProb: A dictionary of the 2nd order probabilities. The key\n is the first 2 elements of the sequence, the value is\n the probability of each possible 3rd element given the \n first two. If this is None, then the sequences will be\n first order only. \n seqLen: Desired length of each sequence. The 1st element will\n be generated using the initProb, the 2nd element by the\n firstOrder table, and the 3rd and all successive \n elements by the secondOrder table. None means infinite\n length. \n numNoise: Number of noise elements to place between each \n sequence. The noise elements are evenly distributed from \n all categories. \n resetsEvery: If not None, generate a reset every N records\n\n\n Here is an example of some parameters:\n\n categoryList: ['cat1', 'cat2', 'cat3']\n\n initProb: [0.7, 0.2, 0.1]\n\n firstOrderProb: {'[0]': [0.3, 0.3, 0.4],\n '[1]': [0.3, 0.3, 0.4],\n '[2]': [0.3, 0.3, 0.4]}\n\n secondOrderProb: {'[0,0]': [0.3, 0.3, 0.4],\n '[0,1]': [0.3, 0.3, 0.4],\n '[0,2]': [0.3, 0.3, 0.4],\n '[1,0]': [0.3, 0.3, 0.4],\n '[1,1]': [0.3, 0.3, 0.4],\n '[1,2]': [0.3, 0.3, 0.4],\n '[2,0]': [0.3, 0.3, 0.4],\n '[2,1]': [0.3, 0.3, 0.4],\n '[2,2]': [0.3, 0.3, 0.4]}", "id": "f17515:m3"} {"signature": "def chunk(l, n):", "body": "newn = int( * len(l) / n + )for i in range(, n-):yield l[i*newn:i*newn+newn]yield l[n*newn-newn:]", "docstring": "Yield n successive chunks from l.", "id": "f17518:m3"} {"signature": "def slice_sampler(px, N = , x = None):", "body": "values = np.zeros(N, dtype=np.int)samples = np.arange(len(px))px = np.array(px) / (*sum(px))u = uniform(, max(px))for n in range(N):included = px>=uchoice = random.sample(list(range(np.sum(included))), )[]values[n] = samples[included][choice]u = uniform(, px[included][choice])if x:if len(x) == len(px):x=np.array(x)values = x[values]else:print(\"\")return values", "docstring": "Provides samples from a user-defined distribution.\n\nslice_sampler(px, N = 1, x = None)\n\nInputs:\npx = A discrete probability distribution.\nN = Number of samples to return, default is 1\nx = Optional list/array of observation values to return, where prob(x) = px.\n\nOutputs:\nIf x=None (default) or if len(x) != len(px), it will return an array of integers\nbetween 0 and len(px)-1. If x is supplied, it will return the\nsamples from x according to the distribution px.", "id": "f17518:m6"} {"signature": "def getSearch(rootDir):", "body": "dataPath = os.path.abspath(os.path.join(rootDir, '', ''))streamDef = dict(version = , info = \"\",streams = [dict(source=\"\" % (dataPath), info=\"\", columns=[\"\"],),],)expDesc = {\"\": '',\"\":{\"\":\"\",\"\": [],},\"\": \"\",\"\": streamDef,\"\": [{ \"\": \"\",\"\": \"\",},{ \"\": \"\",\"\": \"\",},{ \"\": \"\",\"\": \"\",},],\"\": -,}return expDesc", "docstring": "This method returns search description. See the following file for the\n schema of the dictionary this method returns:\n py/nupic/swarming/exp_generator/experimentDescriptionSchema.json\n\n The streamDef element defines the stream for this model. The schema for this\n element can be found at:\n py/nupicengine/cluster/database/StreamDef.json", "id": "f17520:m0"} {"signature": "def _generateCategory(filename=\"\", numSequences=, elementsPerSeq=, numRepeats=):", "body": "scriptDir = os.path.dirname(__file__)pathname = os.path.join(scriptDir, '', filename)print(\"\" % (pathname))fields = [('', '', ''), ('', '', '')] outFile = FileRecordStream(pathname, write=True, fields=fields)sequences = []for i in range(numSequences):seq = [x for x in range(i*elementsPerSeq, (i+)*elementsPerSeq)]sequences.append(seq)seqIdxs = []for i in range(numRepeats):seqIdxs += list(range(numSequences))random.shuffle(seqIdxs)for seqIdx in seqIdxs:seq = sequences[seqIdx]for x in seq:outFile.appendRecord([str(seqIdx), str(x)])outFile.close()", "docstring": "Generate a simple dataset. This contains a bunch of non-overlapping\n sequences. \n\n Parameters:\n ----------------------------------------------------\n filename: name of the file to produce, including extension. It will\n be created in a 'datasets' sub-directory within the \n directory containing this script. \n numSequences: how many sequences to generate\n elementsPerSeq: length of each sequence\n numRepeats: how many times to repeat each sequence in the output", "id": "f17524:m0"} {"signature": "def _generateScalar(filename=\"\", numSequences=, elementsPerSeq=, numRepeats=, stepSize=, includeRandom=False):", "body": "scriptDir = os.path.dirname(__file__)pathname = os.path.join(scriptDir, '', filename)print(\"\" % (pathname))fields = [('', '', ''), ('', '', '')] if includeRandom:fields += [('', '', '')] outFile = FileRecordStream(pathname, write=True, fields=fields)sequences = []for i in range(numSequences):seq = [x for x in range(i*elementsPerSeq, (i+)*elementsPerSeq)]sequences.append(seq)random.seed()seqIdxs = []for i in range(numRepeats):seqIdxs += list(range(numSequences))random.shuffle(seqIdxs)for seqIdx in seqIdxs:seq = sequences[seqIdx]for x in seq:if includeRandom:outFile.appendRecord([seqIdx, x*stepSize, random.random()])else:outFile.appendRecord([seqIdx, x*stepSize])outFile.close()", "docstring": "Generate a simple dataset. This contains a bunch of non-overlapping\n sequences of scalar values. \n\n Parameters:\n ----------------------------------------------------\n filename: name of the file to produce, including extension. It will\n be created in a 'datasets' sub-directory within the \n directory containing this script. \n numSequences: how many sequences to generate\n elementsPerSeq: length of each sequence\n numRepeats: how many times to repeat each sequence in the output\n stepSize: how far apart each scalar is \n includeRandom: if true, include another random field", "id": "f17524:m1"} {"signature": "def getFileUsed():", "body": "output = Configuration._readConfigFile(USER_CONFIG) if output != {}:return USER_CONFIGreturn DEFAULT_CONFIG", "docstring": "Determine which NuPIC configuration file is being used and returns the\nname of the configuration file it is using. Either DEFAULT_CONFIG or\nUSER_CONFIG.", "id": "f17527:m0"} {"signature": "def dbValidator():", "body": "fileused = getFileUsed()host = Configuration.get(\"\")port = int(Configuration.get(\"\"))user = Configuration.get(\"\")passwd = Configuration.get(\"\")print(\"\")print(\"\")print(\"\")print(\"\")print(\"\")print()print(\"\")print(\"\")print(\"\")print(\"\")print()print(\"\")print()print(\"\", fileused)print(\"\", host)print(\"\", port)print(\"\", user)print(\"\", \"\" * len(passwd))testDbConnection(host, port, user, passwd)print(\"\")", "docstring": "Let the user know what NuPIC config file is being used\nand whether or not they have mysql set up correctly for\nswarming.", "id": "f17527:m2"} {"signature": "def __init__(self, inputDimensions, columnDimensions):", "body": "self.inputDimensions = inputDimensionsself.columnDimensions = columnDimensionsself.inputSize = np.array(inputDimensions).prod()self.columnNumber = np.array(columnDimensions).prod()self.inputArray = np.zeros(self.inputSize, dtype=uintType)self.activeArray = np.zeros(self.columnNumber, dtype=uintType)random.seed()self.sp = SP(self.inputDimensions,self.columnDimensions,potentialRadius = self.inputSize,numActiveColumnsPerInhArea = int(*self.columnNumber),globalInhibition = True,seed = ,synPermActiveInc = ,synPermInactiveDec = )", "docstring": "Parameters:\n----------\n_inputDimensions: The size of the input. (m,n) will give a size m x n\n_columnDimensions: The size of the 2 dimensional array of columns", "id": "f17528:c0:m0"} {"signature": "def createInput(self):", "body": "print(\"\" * + \"\" + \"\" * )self.inputArray[:] = for i in range(self.inputSize):self.inputArray[i] = random.randrange()", "docstring": "create a random input vector", "id": "f17528:c0:m1"} {"signature": "def run(self):", "body": "print(\"\" * + \"\" + \"\" * )self.sp.compute(self.inputArray, True, self.activeArray)print(self.activeArray.nonzero())", "docstring": "Run the spatial pooler with the input vector", "id": "f17528:c0:m2"} {"signature": "def addNoise(self, noiseLevel):", "body": "for _ in range(int(noiseLevel * self.inputSize)):randomPosition = int(random.random() * self.inputSize)if self.inputArray[randomPosition] == :self.inputArray[randomPosition] = else:self.inputArray[randomPosition] = ", "docstring": "Flip the value of 10% of input bits (add noise)\n\n :param noiseLevel: The percentage of total input bits that should be flipped", "id": "f17528:c0:m3"} {"signature": "def percentOverlap(x1, x2, size):", "body": "nonZeroX1 = np.count_nonzero(x1)nonZeroX2 = np.count_nonzero(x2)minX1X2 = min(nonZeroX1, nonZeroX2)percentOverlap = if minX1X2 > :percentOverlap = float(np.dot(x1, x2))/float(minX1X2)return percentOverlap", "docstring": "Computes the percentage of overlap between vectors x1 and x2.\n\n@param x1 (array) binary vector\n@param x2 (array) binary vector\n@param size (int) length of binary vectors\n\n@return percentOverlap (float) percentage overlap between x1 and x2", "id": "f17529:m0"} {"signature": "def corruptVector(vector, noiseLevel):", "body": "size = len(vector)for i in range(size):rnd = random.random()if rnd < noiseLevel:if vector[i] == :vector[i] = else:vector[i] = ", "docstring": "Corrupts a binary vector by inverting noiseLevel percent of its bits.\n\n@param vector (array) binary vector to be corrupted\n@param noiseLevel (float) amount of noise to be applied on the vector.", "id": "f17529:m1"} {"signature": "def resetVector(x1, x2):", "body": "size = len(x1)for i in range(size):x2[i] = x1[i]", "docstring": "Copies the contents of vector x1 into vector x2.\n\n@param x1 (array) binary vector to be copied\n@param x2 (array) binary vector where x1 is copied", "id": "f17529:m2"} {"signature": "def _printOneTrainingVector(x):", "body": "print(''.join('' if k != else '' for k in x))", "docstring": "Print a single vector succinctly.", "id": "f17530:m0"} {"signature": "def _getSimplePatterns(numOnes, numPatterns):", "body": "numCols = numOnes * numPatternsp = []for i in range(numPatterns):x = np.zeros(numCols, dtype='')x[i*numOnes:(i + )*numOnes] = p.append(x)return p", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n bits on. There are numPatterns*numOnes bits in the vector. These patterns\n are used as elements of sequences when building up a training set.", "id": "f17530:m1"} {"signature": "def _createTms(numCols):", "body": "minThreshold = activationThreshold = newSynapseCount = initialPerm = connectedPerm = permanenceInc = permanenceDec = globalDecay = cellsPerColumn = cppTm = BacktrackingTMCPP(numberOfCols=numCols,cellsPerColumn=cellsPerColumn,initialPerm=initialPerm,connectedPerm=connectedPerm,minThreshold=minThreshold,newSynapseCount=newSynapseCount,permanenceInc=permanenceInc,permanenceDec=permanenceDec,activationThreshold=activationThreshold,globalDecay=globalDecay, burnIn=,seed=SEED, verbosity=VERBOSITY,checkSynapseConsistency=True,pamLength=)cppTm.retrieveLearningStates = TruepyTm = BacktrackingTM(numberOfCols=numCols,cellsPerColumn=cellsPerColumn,initialPerm=initialPerm,connectedPerm=connectedPerm,minThreshold=minThreshold,newSynapseCount=newSynapseCount,permanenceInc=permanenceInc,permanenceDec=permanenceDec,activationThreshold=activationThreshold,globalDecay=globalDecay, burnIn=,seed=SEED, verbosity=VERBOSITY,pamLength=)return cppTm, pyTm", "docstring": "Create two instances of temporal poolers (backtracking_tm.py\n and backtracking_tm_cpp.py) with identical parameter settings.", "id": "f17530:m2"} {"signature": "def _basicTest(self, tm=None):", "body": "trainingSet = _getSimplePatterns(, )for _ in range():for seq in trainingSet[:]:for _ in range():tm.learn(seq)tm.reset()print(\"\")print(\"\")tm.collectStats = Truefor seq in trainingSet[:]:tm.reset()tm.resetStats()for _ in range():tm.infer(seq)if VERBOSITY > :print()_printOneTrainingVector(seq)tm.printStates(False, False)print()print()if VERBOSITY > :print(tm.getStats())self.assertGreater(tm.getStats()[''], )print((\"\",tm.getStats()['']))print(\"\")", "docstring": "Test creation, pickling, and basic run of learning and inference.", "id": "f17530:c0:m1"} {"signature": "def generatePattern(numCols = ,minOnes =,maxOnes =,colSet = [],prevPattern =numpy.array([])):", "body": "assert minOnes < maxOnesassert maxOnes < numColsnOnes = rgen.randint(minOnes, maxOnes)candidates = list(colSet.difference(set(prevPattern.nonzero()[])))rgen.shuffle(candidates)ind = candidates[:nOnes]x = numpy.zeros(numCols, dtype='')x[ind] = return x", "docstring": "Generate a single test pattern with given parameters.\n\n Parameters:\n --------------------------------------------\n numCols: Number of columns in each pattern.\n minOnes: The minimum number of 1's in each pattern.\n maxOnes: The maximum number of 1's in each pattern.\n colSet: The set of column indices for the pattern.\n prevPattern: Pattern to avoid (null intersection).", "id": "f17531:m2"} {"signature": "def buildTrainingSet(numSequences = ,sequenceLength = ,pctShared = ,seqGenMode = '',subsequenceStartPos = ,numCols = ,minOnes=,maxOnes = ,disjointConsecutive =True):", "body": "colSet = set(range(numCols))if '' in seqGenMode:assert '' in seqGenMode and '' not in seqGenModeif '' in seqGenMode or numSequences == :pctShared = if '' not in seqGenMode and '' not in seqGenMode:sharedSequenceLength = int(pctShared*sequenceLength)elif '' in seqGenMode:sharedSequenceLength = else:sharedSequenceLength = assert sharedSequenceLength + subsequenceStartPos < sequenceLengthsharedSequence = []for i in range(sharedSequenceLength):if disjointConsecutive and i > :x = generatePattern(numCols, minOnes, maxOnes, colSet,sharedSequence[i-])else:x = generatePattern(numCols, minOnes, maxOnes, colSet)sharedSequence.append(x)trainingSequences = []if '' not in seqGenMode:trailingLength = sequenceLength - sharedSequenceLength - subsequenceStartPoselse:trailingLength = sequenceLength - sharedSequenceLengthfor k,s in enumerate(range(numSequences)):if len(trainingSequences) > and '' in seqGenMode:r = list(range(subsequenceStartPos))+ list(range(subsequenceStartPos + sharedSequenceLength, sequenceLength))rgen.shuffle(r)r = r[:subsequenceStartPos]+ list(range(subsequenceStartPos, subsequenceStartPos + sharedSequenceLength))+ r[subsequenceStartPos:]sequence = [trainingSequences[k-][j] for j in r]else:sequence = []if '' not in seqGenMode:for i in range(subsequenceStartPos):if disjointConsecutive and i > :x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-])else:x = generatePattern(numCols, minOnes, maxOnes, colSet)sequence.append(x)if '' in seqGenMode and '' not in seqGenMode:sequence.extend(sharedSequence)for i in range(trailingLength):if disjointConsecutive and i > :x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-])else:x = generatePattern(numCols, minOnes, maxOnes, colSet)sequence.append(x)assert len(sequence) == sequenceLengthtrainingSequences.append(sequence)assert len(trainingSequences) == numSequencesif VERBOSITY >= :print(\"\")pprint.pprint(trainingSequences)if sharedSequenceLength > :return (trainingSequences, subsequenceStartPos + sharedSequenceLength)else:return (trainingSequences, -)", "docstring": "Build random high order test sequences.\n\n Parameters:\n --------------------------------------------\n numSequences: The number of sequences created.\n sequenceLength: The length of each sequence.\n pctShared: The percentage of sequenceLength that is shared across\n every sequence. If sequenceLength is 100 and pctShared\n is 0.2, then a subsequence consisting of 20 patterns\n will be in every sequence. Can also be the keyword\n 'one pattern', in which case a single time step is\n shared.\n seqGenMode: What kind of sequence to generate. If contains 'shared'\n generates shared subsequence. If contains 'no shared',\n does not generate any shared subsequence. If contains\n 'shuffle', will use common patterns shuffle among the\n different sequences. If contains 'beginning', will\n place shared subsequence at the beginning.\n subsequenceStartPos: The position where the shared subsequence starts\n numCols: Number of columns in each pattern.\n minOnes: The minimum number of 1's in each pattern.\n maxOnes: The maximum number of 1's in each pattern.\n disjointConsecutive: Whether to generate disjoint consecutive patterns or not.", "id": "f17531:m3"} {"signature": "def getSimplePatterns(numOnes, numPatterns):", "body": "numCols = numOnes * numPatternsp = []for i in range(numPatterns):x = numpy.zeros(numCols, dtype='')x[i*numOnes:(i+)*numOnes] = p.append(x)return p", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n bits on. There are numPatterns*numOnes bits in the vector.", "id": "f17531:m4"} {"signature": "def buildSimpleTrainingSet(numOnes=):", "body": "numPatterns = p = getSimplePatterns(numOnes, numPatterns)s1 = [p[], p[], p[], p[], p[], p[], p[] ]s2 = [p[], p[], p[], p[], p[], p[], p[]]trainingSequences = [s1, s2]return (trainingSequences, )", "docstring": "Two very simple high order sequences for debugging. Each pattern in the\n sequence has a series of 1's in a specific set of columns.", "id": "f17531:m5"} {"signature": "def buildAlternatingTrainingSet(numOnes=):", "body": "numPatterns = p = getSimplePatterns(numOnes, numPatterns)s1 = [p[], p[], p[], p[], p[], p[]]s2 = [p[], p[], p[], p[], p[], p[]]s3 = [p[], p[], p[], p[], p[], p[]]s4 = [p[], p[], p[], p[], p[], p[]]trainingSequences = [s1, s2, s3, s4]return (trainingSequences, )", "docstring": "High order sequences that alternate elements. Pattern i has one's in\n i*numOnes to (i+1)*numOnes.\n\n The sequences are:\n A B A B A C\n A B A B D E\n A B F G H I\n A J K L M N", "id": "f17531:m6"} {"signature": "def buildHL0aTrainingSet(numOnes=):", "body": "numPatterns = p = getSimplePatterns(numOnes, numPatterns)s = []s.append(p[rgen.randint(,)])for _ in range():s.append(p[rgen.randint(,)])s.append(p[])s.append(p[])s.append(p[])s.append(p[rgen.randint(,)])return ([s], [[p[], p[], p[]]])", "docstring": "Simple sequences for HL0. Each pattern in the sequence has a series of 1's\n in a specific set of columns.\n There are 23 patterns, p0 to p22.\n The sequence we want to learn is p0->p1->p2\n We create a very long sequence consisting of N N p0 p1 p2 N N p0 p1 p2\n N is randomly chosen from p3 to p22", "id": "f17531:m7"} {"signature": "def buildHL0bTrainingSet(numOnes=):", "body": "numPatterns = p = getSimplePatterns(numOnes, numPatterns)s = []s.append(p[rgen.randint(,numPatterns)])for _ in range():r = rgen.randint(,numPatterns)print(r, end='')s.append(p[r])if rgen.binomial(, ) > :print(\"\", end='')s.append(p[])s.append(p[])s.append(p[])s.append(p[])else:print(\"\", end='')s.append(p[])s.append(p[])s.append(p[])r = rgen.randint(,numPatterns)s.append(p[r])print(r, end='')print()return ([s], [ [p[], p[], p[], p[]], [p[], p[], p[]] ])", "docstring": "Simple sequences for HL0b. Each pattern in the sequence has a series of 1's\n in a specific set of columns.\n There are 23 patterns, p0 to p22.\n The sequences we want to learn are p1->p2->p3 and p0->p1->p2->p4.\n We create a very long sequence consisting of these two sub-sequences\n intermixed with noise, such as:\n N N p0 p1 p2 p4 N N p1 p2 p3 N N p1 p2 p3\n N is randomly chosen from p5 to p22", "id": "f17531:m8"} {"signature": "def findAcceptablePatterns(tm, t, whichSequence, trainingSequences, nAcceptable = ):", "body": "upTo = t + if tm.doPooling:upTo += min(tm.segUpdateValidDuration, nAcceptable)assert upTo <= len(trainingSequences[whichSequence])acceptablePatterns = []if len(trainingSequences) == and(trainingSequences[][] == trainingSequences[][]).all():if (trainingSequences[][t] == trainingSequences[][t]).all()and (trainingSequences[][t+] != trainingSequences[][t+]).any():acceptablePatterns.append(trainingSequences[][t+])acceptablePatterns.append(trainingSequences[][t+])acceptablePatterns += [trainingSequences[whichSequence][t]for t in range(t,upTo)]return acceptablePatterns", "docstring": "Tries to infer the set of acceptable patterns for prediction at the given\ntime step and for the give sequence. Acceptable patterns are: the current one,\nplus a certain number of patterns after timeStep, in the sequence that the TM\nis currently tracking. Any other pattern is not acceptable.\n\nTODO:\n====\n- Doesn't work for noise cases.\n- Might run in trouble if shared subsequence at the beginning.\n\nParameters:\n==========\ntm the whole TM, so that we can look at its parameters\nt the current time step\nwhichSequence the sequence we are currently tracking\ntrainingSequences all the training sequences\nnAcceptable the number of steps forward from the current timeStep\n we are willing to consider acceptable. In the case of\n pooling, it is less than or equal to the min of the\n number of training reps and the segUpdateValidDuration\n parameter of the TM, depending on the test case.\n The default value is 1, because by default, the pattern\n after the current one should always be predictable.\n\nReturn value:\n============\nacceptablePatterns A list of acceptable patterns for prediction.", "id": "f17531:m10"} {"signature": "def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =, nSequences =[],pctShared = , seqGenMode = '',shouldFail = False):", "body": "print(\"\")nFailed = subsequenceStartPos = assert subsequenceStartPos < sequenceLengthfor numSequences in nSequences:print(\"\",sequenceLength, end='')print(\"\",cellsPerColumn,\"\",nTests,\"\", numCols)print(\"\",numSequences, \"\", pctShared, end='')print(\"\", seqGenMode)for _ in range(nTests): trainingSet = buildTrainingSet(numSequences = numSequences,sequenceLength = sequenceLength,pctShared = pctShared, seqGenMode = seqGenMode,subsequenceStartPos = subsequenceStartPos,numCols = numCols,minOnes = , maxOnes = )print(\"\")numFailures3, numStrictErrors3, numPerfect3, tm3 =testSequence(trainingSet,nTrainingReps = ,numberOfCols = numCols,cellsPerColumn = cellsPerColumn,initialPerm = ,connectedPerm = ,minThreshold = ,permanenceInc = ,permanenceDec = ,permanenceMax = ,globalDecay = ,newSynapseCount = ,activationThreshold = ,doPooling = False,shouldFail = shouldFail)print(\"\")numFailures, numStrictErrors, numPerfect, tm2 =testSequence(trainingSet,nTrainingReps = ,numberOfCols = numCols,cellsPerColumn = cellsPerColumn,initialPerm = ,connectedPerm = ,minThreshold = ,permanenceInc = ,permanenceDec = ,permanenceMax = ,globalDecay = ,newSynapseCount = ,activationThreshold = ,doPooling = False,shouldFail = shouldFail)print(\"\")numFailures1, numStrictErrors1, numPerfect1, tm1 =testSequence(trainingSet,nTrainingReps = ,numberOfCols = numCols,cellsPerColumn = cellsPerColumn,initialPerm = ,connectedPerm = ,minThreshold = ,permanenceInc = ,permanenceDec = ,permanenceMax = ,globalDecay = ,newSynapseCount = ,activationThreshold = ,doPooling = False,shouldFail = shouldFail)segmentInfo1 = tm1.getSegmentInfo()segmentInfo2 = tm2.getSegmentInfo()if (abs(segmentInfo1[] - segmentInfo2[]) > ) or(abs(segmentInfo1[] - segmentInfo2[]) > *) :print(\"\")print(segmentInfo1)print(segmentInfo2)print(tm3.getSegmentInfo())tm3.trimSegments()print(tm3.getSegmentInfo())print(\"\")print(numFailures1, numStrictErrors1, numPerfect1)print(numFailures, numStrictErrors, numPerfect)print(numFailures3, numStrictErrors3, numPerfect3)numFailures += if numFailures == and not shouldFailor numFailures > and shouldFail:print(\"\", end='')if shouldFail:print('')else:print()else:print(\"\")nFailed = nFailed + print(\"\", numFailures)print(\"\", numStrictErrors)print(\"\", numPerfect)return nFailed", "docstring": "Still need to test:\n Two overlapping sequences. OK to get new segments but check that we can\n get correct high order prediction after multiple reps.", "id": "f17531:m19"} {"signature": "def worker(x):", "body": "cellsPerColumn, numSequences = x[], x[]nTrainingReps = sequenceLength = numCols = print('', cellsPerColumn, numSequences)seqGenMode = ''subsequenceStartPos = trainingSet = buildTrainingSet(numSequences = numSequences,sequenceLength = sequenceLength,pctShared = , seqGenMode = seqGenMode,subsequenceStartPos = subsequenceStartPos,numCols = numCols,minOnes = , maxOnes = )numFailures1, numStrictErrors1, numPerfect1, atHub, tm =testSequence(trainingSet,nTrainingReps = nTrainingReps,numberOfCols = numCols,cellsPerColumn = cellsPerColumn,initialPerm = ,connectedPerm = ,minThreshold = ,permanenceInc = ,permanenceDec = ,permanenceMax = ,globalDecay = ,newSynapseCount = ,activationThreshold = ,doPooling = False,shouldFail = False,predJustAfterHubOnly = )seqGenMode = ''trainingSet = buildTrainingSet(numSequences = numSequences,sequenceLength = sequenceLength,pctShared = , seqGenMode = seqGenMode,subsequenceStartPos = ,numCols = numCols,minOnes = , maxOnes = )numFailures2, numStrictErrors2, numPerfect2, tm =testSequence(trainingSet,nTrainingReps = nTrainingReps,numberOfCols = numCols,cellsPerColumn = cellsPerColumn,initialPerm = ,connectedPerm = ,minThreshold = ,permanenceInc = ,permanenceDec = ,permanenceMax = ,globalDecay = ,newSynapseCount = ,activationThreshold = ,doPooling = False,shouldFail = False)print('', end='')print(cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub,numFailures2, numStrictErrors2, numPerfect2)return cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub,numFailures2, numStrictErrors2, numPerfect2", "docstring": "Worker function to use in parallel hub capacity test below.", "id": "f17531:m24"} {"signature": "def hubCapacity():", "body": "from multiprocessing import Poolimport itertoolsprint(\"\")p = Pool()results = p.map(worker, itertools.product([,,,,,,,], range(,,)))f = open('', '')for i,r in enumerate(results):print('' % r, file=f)f.close()", "docstring": "Study hub capacity. Figure out how many sequences can share a pattern\nfor a given number of cells per column till we the system fails.\nDON'T RUN IN BUILD SYSTEM!!! (takes too long)", "id": "f17531:m25"} {"signature": "def printOneTrainingVector(x):", "body": "print(''.join('' if k != else '' for k in x))", "docstring": "Print a single vector succinctly.", "id": "f17532:m0"} {"signature": "def getSimplePatterns(numOnes, numPatterns, patternOverlap=):", "body": "assert (patternOverlap < numOnes)numNewBitsInEachPattern = numOnes - patternOverlapnumCols = numNewBitsInEachPattern * numPatterns + patternOverlapp = []for i in range(numPatterns):x = numpy.zeros(numCols, dtype='')startBit = i*numNewBitsInEachPatternnextStartBit = startBit + numOnesx[startBit:nextStartBit] = p.append(x)return p", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n bits on. The amount of overlap between consecutive patterns is\n configurable, via the patternOverlap parameter.\n\n Parameters:\n -----------------------------------------------------------------------\n numOnes: Number of bits ON in each pattern\n numPatterns: Number of unique patterns to generate\n patternOverlap: Number of bits of overlap between each successive pattern\n retval: patterns", "id": "f17532:m2"} {"signature": "def buildOverlappedSequences( numSequences = ,seqLen = ,sharedElements = [,],numOnBitsPerPattern = ,patternOverlap = ,seqOverlap = ,**kwargs):", "body": "numSharedElements = len(sharedElements)numUniqueElements = seqLen - numSharedElementsnumPatterns = numSharedElements + numUniqueElements * numSequencespatterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)numCols = len(patterns[])trainingSequences = []uniquePatternIndices = list(range(numSharedElements, numPatterns))for _ in range(numSequences):sequence = []sharedPatternIndices = list(range(numSharedElements))for j in range(seqLen):if j in sharedElements:patIdx = sharedPatternIndices.pop()else:patIdx = uniquePatternIndices.pop()sequence.append(patterns[patIdx])trainingSequences.append(sequence)if VERBOSITY >= :print(\"\")printAllTrainingSequences(trainingSequences)return (numCols, trainingSequences)", "docstring": "Create training sequences that share some elements in the middle.\n\n Parameters:\n -----------------------------------------------------\n numSequences: Number of unique training sequences to generate\n seqLen: Overall length of each sequence\n sharedElements: Which element indices of each sequence are shared. These\n will be in the range between 0 and seqLen-1\n numOnBitsPerPattern: Number of ON bits in each TM input pattern\n patternOverlap: Max number of bits of overlap between any 2 patterns\n retval: (numCols, trainingSequences)\n numCols - width of the patterns\n trainingSequences - a list of training sequences", "id": "f17532:m3"} {"signature": "def buildSequencePool(numSequences = ,seqLen = [,,],numPatterns = ,numOnBitsPerPattern = ,patternOverlap = ,**kwargs):", "body": "patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)numCols = len(patterns[])trainingSequences = []for _ in range(numSequences):sequence = []length = random.choice(seqLen)for _ in range(length):patIdx = random.choice(range(numPatterns))sequence.append(patterns[patIdx])trainingSequences.append(sequence)if VERBOSITY >= :print(\"\")printAllTrainingSequences(trainingSequences)return (numCols, trainingSequences)", "docstring": "Create a bunch of sequences of various lengths, all built from\n a fixed set of patterns.\n\n Parameters:\n -----------------------------------------------------\n numSequences: Number of training sequences to generate\n seqLen: List of possible sequence lengths\n numPatterns: How many possible patterns there are to use within\n sequences\n numOnBitsPerPattern: Number of ON bits in each TM input pattern\n patternOverlap: Max number of bits of overlap between any 2 patterns\n retval: (numCols, trainingSequences)\n numCols - width of the patterns\n trainingSequences - a list of training sequences", "id": "f17532:m4"} {"signature": "def createTMs(includeCPP = True,includePy = True,numCols = ,cellsPerCol = ,activationThreshold = ,minThreshold = ,newSynapseCount = ,initialPerm = ,permanenceInc = ,permanenceDec = ,globalDecay = ,pamLength = ,checkSynapseConsistency = True,maxInfBacktrack = ,maxLrnBacktrack = ,**kwargs):", "body": "connectedPerm = tms = dict()if includeCPP:if VERBOSITY >= :print(\"\")cpp_tm = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = cellsPerCol,initialPerm = initialPerm, connectedPerm = connectedPerm,minThreshold = minThreshold, newSynapseCount = newSynapseCount,permanenceInc = permanenceInc, permanenceDec = permanenceDec,activationThreshold = activationThreshold,globalDecay = globalDecay, burnIn = ,seed=SEED, verbosity=VERBOSITY,checkSynapseConsistency = checkSynapseConsistency,collectStats = True,pamLength = pamLength,maxInfBacktrack = maxInfBacktrack,maxLrnBacktrack = maxLrnBacktrack,)cpp_tm.retrieveLearningStates = Truetms[''] = cpp_tmif includePy:if VERBOSITY >= :print(\"\")py_tm = BacktrackingTM(numberOfCols = numCols,cellsPerColumn = cellsPerCol,initialPerm = initialPerm,connectedPerm = connectedPerm,minThreshold = minThreshold,newSynapseCount = newSynapseCount,permanenceInc = permanenceInc,permanenceDec = permanenceDec,activationThreshold = activationThreshold,globalDecay = globalDecay, burnIn = ,seed=SEED, verbosity=VERBOSITY,collectStats = True,pamLength = pamLength,maxInfBacktrack = maxInfBacktrack,maxLrnBacktrack = maxLrnBacktrack,)tms[''] = py_tmreturn tms", "docstring": "Create one or more TM instances, placing each into a dict keyed by\n name.\n\n Parameters:\n ------------------------------------------------------------------\n retval: tms - dict of TM instances", "id": "f17532:m5"} {"signature": "def assertNoTMDiffs(tms):", "body": "if len(tms) == :returnif len(tms) > :raise \"\"same = fdrutils.tmDiff2(list(tms.values()), verbosity=VERBOSITY)assert(same)return", "docstring": "Check for diffs among the TM instances in the passed in tms dict and\nraise an assert if any are detected\n\nParameters:\n---------------------------------------------------------------------\ntms: dict of TM instances", "id": "f17532:m6"} {"signature": "def _printOneTrainingVector(self, x):", "body": "print(''.join('' if k != else '' for k in x))", "docstring": "Print a single vector succinctly.", "id": "f17534:c0:m1"} {"signature": "def _printAllTrainingSequences(self, trainingSequences):", "body": "for i, trainingSequence in enumerate(trainingSequences):print(\"\", i, \"\")for pattern in trainingSequence:self._printOneTrainingVector(pattern)", "docstring": "Print all vectors", "id": "f17534:c0:m2"} {"signature": "def _setVerbosity(self, verbosity, tm, tmPy):", "body": "tm.cells4.setVerbosity(verbosity)tm.verbosity = verbositytmPy.verbosity = verbosity", "docstring": "Set verbosity level on the TM", "id": "f17534:c0:m3"} {"signature": "def _createTMs(self, numCols, fixedResources=False,checkSynapseConsistency = True):", "body": "minThreshold = activationThreshold = newSynapseCount = initialPerm = connectedPerm = permanenceInc = permanenceDec = if fixedResources:permanenceDec = maxSegmentsPerCell = maxSynapsesPerSegment = globalDecay = maxAge = else:permanenceDec = maxSegmentsPerCell = -maxSynapsesPerSegment = -globalDecay = maxAge = if g_testCPPTM:if g_options.verbosity > :print(\"\")cppTM = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = ,initialPerm = initialPerm, connectedPerm = connectedPerm,minThreshold = minThreshold,newSynapseCount = newSynapseCount,permanenceInc = permanenceInc,permanenceDec = permanenceDec,activationThreshold = activationThreshold,globalDecay = globalDecay, maxAge=maxAge, burnIn = ,seed=g_options.seed, verbosity=g_options.verbosity,checkSynapseConsistency = checkSynapseConsistency,pamLength = ,maxSegmentsPerCell = maxSegmentsPerCell,maxSynapsesPerSegment = maxSynapsesPerSegment,)cppTM.retrieveLearningStates = Trueelse:cppTM = Noneif g_options.verbosity > :print(\"\")pyTM = BacktrackingTM(numberOfCols = numCols,cellsPerColumn = ,initialPerm = initialPerm,connectedPerm = connectedPerm,minThreshold = minThreshold,newSynapseCount = newSynapseCount,permanenceInc = permanenceInc,permanenceDec = permanenceDec,activationThreshold = activationThreshold,globalDecay = globalDecay, maxAge=maxAge, burnIn = ,seed=g_options.seed, verbosity=g_options.verbosity,pamLength = ,maxSegmentsPerCell = maxSegmentsPerCell,maxSynapsesPerSegment = maxSynapsesPerSegment,)return cppTM, pyTM", "docstring": "Create an instance of the appropriate temporal memory. We isolate\n all parameters as constants specified here.", "id": "f17534:c0:m4"} {"signature": "def _getSimplePatterns(self, numOnes, numPatterns):", "body": "numCols = numOnes * numPatternsp = []for i in range(numPatterns):x = numpy.zeros(numCols, dtype='')x[i*numOnes:(i+)*numOnes] = p.append(x)return p", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n bits on. There are numPatterns*numOnes bits in the vector. These patterns\n are used as elements of sequences when building up a training set.", "id": "f17534:c0:m5"} {"signature": "def _buildSegmentLearningTrainingSet(self, numOnes=, numRepetitions= ):", "body": "numPatterns = numCols = * numPatterns * numOneshalfCols = numPatterns * numOnesnumNoiseBits = numOnesp = self._getSimplePatterns(numOnes, numPatterns)trainingSequences = []for _ in range(numRepetitions):sequence = []for j in range(numPatterns):v = numpy.zeros(numCols)v[:halfCols] = p[j]noiseIndices = (self._rgen.permutation(halfCols)+ halfCols)[:numNoiseBits]v[noiseIndices] = sequence.append(v)trainingSequences.append(sequence)testSequence = []for j in range(numPatterns):v = numpy.zeros(numCols, dtype='')v[:halfCols] = p[j]testSequence.append(v)if g_options.verbosity > :print(\"\")self._printAllTrainingSequences(trainingSequences)print(\"\")self._printAllTrainingSequences([testSequence])return (trainingSequences, [testSequence])", "docstring": "A simple sequence of 5 patterns. The left half of the vector contains\n the pattern elements, each with numOnes consecutive bits. The right half\n contains numOnes random bits. The function returns a pair:\n\n trainingSequences: A list containing numRepetitions instances of the\n above sequence\n testSequence: A single clean test sequence containing the 5 patterns\n but with no noise on the right half", "id": "f17534:c0:m6"} {"signature": "def _buildSL2TrainingSet(self, numOnes=, numRepetitions= ):", "body": "numPatterns = numCols = * numPatterns * numOneshalfCols = numPatterns * numOnesnumNoiseBits = numOnesp = self._getSimplePatterns(numOnes, numPatterns)numSequences = indices = [[, , , , ],[, , , , ],[, , , , ],]trainingSequences = []for i in range(numRepetitions*numSequences):sequence = []for j in range(numPatterns):v = numpy.zeros(numCols, dtype='')v[:halfCols] = p[indices[i % numSequences][j]]noiseIndices = (self._rgen.permutation(halfCols)+ halfCols)[:numNoiseBits]v[noiseIndices] = sequence.append(v)trainingSequences.append(sequence)testSequences = []for i in range(numSequences):sequence = []for j in range(numPatterns):v = numpy.zeros(numCols, dtype='')v[:halfCols] = p[indices[i % numSequences][j]]sequence.append(v)testSequences.append(sequence)if g_options.verbosity > :print(\"\")self._printAllTrainingSequences(trainingSequences)print(\"\")self._printAllTrainingSequences(testSequences)return (trainingSequences, testSequences)", "docstring": "Three simple sequences, composed of the same 5 static patterns. The left\n half of the vector contains the pattern elements, each with numOnes\n consecutive bits. The right half contains numOnes random bits.\n\n Sequence 1 is: p0, p1, p2, p3, p4\n Sequence 2 is: p4, p3, p2, p1, p0\n Sequence 3 is: p2, p0, p4, p1, p3\n\n The function returns a pair:\n\n trainingSequences: A list containing numRepetitions instances of the\n above sequences\n testSequence: Clean test sequences with no noise on the right half", "id": "f17534:c0:m7"} {"signature": "def accuracy(current, predicted):", "body": "accuracy = if np.count_nonzero(predicted) > :accuracy = float(np.dot(current, predicted))/float(np.count_nonzero(predicted))return accuracy", "docstring": "Computes the accuracy of the TM at time-step t based on the prediction\nat time-step t-1 and the current active columns at time-step t.\n\n@param current (array) binary vector containing current active columns\n@param predicted (array) binary vector containing predicted active columns\n\n@return acc (float) prediction accuracy of the TM at time-step t", "id": "f17535:m0"} {"signature": "def corruptVector(v1, noiseLevel, numActiveCols):", "body": "size = len(v1)v2 = np.zeros(size, dtype=\"\")bitsToSwap = int(noiseLevel * numActiveCols)for i in range(size):v2[i] = v1[i]for _ in range(bitsToSwap):i = random.randrange(size)if v2[i] == :v2[i] = else:v2[i] = return v2", "docstring": "Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.\n\n@param v1 (array) binary vector whose copy will be corrupted\n@param noiseLevel (float) amount of noise to be applied on the new vector\n@param numActiveCols (int) number of sparse columns that represent an input\n\n@return v2 (array) corrupted binary vector", "id": "f17535:m1"} {"signature": "def showPredictions():", "body": "for k in range():tm.reset()print(\"\" + \"\"[k] + \"\")tm.compute(set(seqT[k][:].nonzero()[].tolist()), learn=False)activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()] currentColumns = [ if i in activeColumnsIndices else for i in range(tm.numberOfColumns())]predictedColumns = [ if i in predictedColumnIndices else for i in range(tm.numberOfColumns())]print((\"\" + str(np.nonzero(currentColumns)[])))print((\"\" + str(np.nonzero(predictedColumns)[])))print(\"\")", "docstring": "Shows predictions of the TM when presented with the characters A, B, C, D, X, and\nY without any contextual information, that is, not embedded within a sequence.", "id": "f17535:m2"} {"signature": "def trainTM(sequence, timeSteps, noiseLevel):", "body": "currentColumns = np.zeros(tm.numberOfColumns(), dtype=\"\")predictedColumns = np.zeros(tm.numberOfColumns(), dtype=\"\")ts = for t in range(timeSteps):tm.reset()for k in range():v = corruptVector(sequence[k][:], noiseLevel, sparseCols)tm.compute(set(v[:].nonzero()[].tolist()), learn=True)activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]currentColumns = [ if i in activeColumnsIndices else for i in range(tm.numberOfColumns())]acc = accuracy(currentColumns, predictedColumns)x.append(ts)y.append(acc)ts += predictedColumns = [ if i in predictedColumnIndices else for i in range(tm.numberOfColumns())]", "docstring": "Trains the TM with given sequence for a given number of time steps and level of input\ncorruption\n\n@param sequence (array) array whose rows are the input characters\n@param timeSteps (int) number of time steps in which the TM will be presented with sequence\n@param noiseLevel (float) amount of noise to be applied on the characters in the sequence", "id": "f17535:m3"} {"signature": "def Array(dtype, size=None, ref=False):", "body": "def getArrayType(self):\"\"\"\"\"\"return self._dtypeif ref:assert size is Noneindex = basicTypes.index(dtype)if index == -:raise Exception('' + dtype)if size and size <= :raise Exception('')suffix = '' if ref else ''arrayFactory = getattr(engine_internal, dtype + suffix)arrayFactory.getType = getArrayTypeif size:a = arrayFactory(size)else:a = arrayFactory()a._dtype = basicTypes[index]return a", "docstring": "Factory function that creates typed Array or ArrayRef objects\n\n dtype - the data type of the array (as string).\n Supported types are: Byte, Int16, UInt16, Int32, UInt32, Int64, UInt64, Real32, Real64\n\n size - the size of the array. Must be positive integer.", "id": "f17536:m1"} {"signature": "def __init__(self, *args):", "body": "engine_internal.Dimensions.__init__(self, *args)", "docstring": "Construct a Dimensions object\n\n The constructor can be called with no arguments or with a list\n of integers", "id": "f17536:c0:m0"} {"signature": "def __init__(self, region, network):", "body": "self._network = networkself._region = regionself.__class__.__doc__ == region.__class__.__doc__self._paramTypeCache = {}", "docstring": "Store the wraped region and hosting network\n\n The network is the high-level Network and not the internal\n Network. This is important in case the user requests the network\n from the region (never leak a engine object, remember)", "id": "f17536:c5:m0"} {"signature": "@staticmethoddef getSpecFromType(nodeType):", "body": "return Spec(engine_internal.Region.getSpecFromType(nodeType))", "docstring": "@doc:place_holder(Region.getSpecFromType)", "id": "f17536:c5:m3"} {"signature": "def compute(self):", "body": "return self._region.compute()", "docstring": "@doc:place_holder(Region.compute)\n\n** This line comes from the original docstring (not generated by Documentor)", "id": "f17536:c5:m4"} {"signature": "def getInputData(self, inputName):", "body": "return self._region.getInputArray(inputName)", "docstring": "@doc:place_holder(Region.getInputData)", "id": "f17536:c5:m5"} {"signature": "def getOutputData(self, outputName):", "body": "return self._region.getOutputArray(outputName)", "docstring": "@doc:place_holder(Region.getOutputData)", "id": "f17536:c5:m6"} {"signature": "def getInputNames(self):", "body": "inputs = self.getSpec().inputsreturn [inputs.getByIndex(i)[] for i in range(inputs.getCount())]", "docstring": "Returns list of input names in spec.", "id": "f17536:c5:m7"} {"signature": "def getOutputNames(self):", "body": "outputs = self.getSpec().outputsreturn [outputs.getByIndex(i)[] for i in range(outputs.getCount())]", "docstring": "Returns list of output names in spec.", "id": "f17536:c5:m8"} {"signature": "def executeCommand(self, args):", "body": "return self._region.executeCommand(args)", "docstring": "@doc:place_holder(Region.executeCommand)", "id": "f17536:c5:m9"} {"signature": "def _getSpec(self):", "body": "return Spec(self._region.getSpec())", "docstring": "Spec of the region", "id": "f17536:c5:m10"} {"signature": "def _getDimensions(self):", "body": "return Dimensions(tuple(self._region.getDimensions()))", "docstring": "Dimensions of the region", "id": "f17536:c5:m11"} {"signature": "def _getNetwork(self):", "body": "return self._network", "docstring": "Network for the region", "id": "f17536:c5:m12"} {"signature": "def __hash__(self):", "body": "return self._region.__hash__()", "docstring": "Hash a region", "id": "f17536:c5:m13"} {"signature": "def __eq__(self, other):", "body": "return self._region == other._region", "docstring": "Compare regions", "id": "f17536:c5:m14"} {"signature": "def __ne__(self, other):", "body": "return self._region != other._region", "docstring": "Compare regions", "id": "f17536:c5:m15"} {"signature": "def _getParameterMethods(self, paramName):", "body": "if paramName in self._paramTypeCache:return self._paramTypeCache[paramName]try:paramSpec = self.getSpec().parameters.getByName(paramName)except:return (None, None)dataType = paramSpec.dataTypedataTypeName = basicTypes[dataType]count = paramSpec.countif count == :x = '' + dataTypeNametry:g = getattr(self, '' + x) s = getattr(self, '' + x) except AttributeError:raise Exception(\"\" %dataTypeName)info = (s, g)else:if dataTypeName == \"\":info = (self.setParameterString, self.getParameterString)else:helper = _ArrayParameterHelper(self, dataType)info = (self.setParameterArray, helper.getParameterArray)self._paramTypeCache[paramName] = inforeturn info", "docstring": "Returns functions to set/get the parameter. These are\n the strongly typed functions get/setParameterUInt32, etc.\n The return value is a pair:\n setfunc, getfunc\n If the parameter is not available on this region, setfunc/getfunc\n are None.", "id": "f17536:c5:m16"} {"signature": "def getParameter(self, paramName):", "body": "(setter, getter) = self._getParameterMethods(paramName)if getter is None:import exceptionsraise exceptions.Exception(\"\"% (paramName, self.name, self.type))return getter(paramName)", "docstring": "Get parameter value", "id": "f17536:c5:m17"} {"signature": "def setParameter(self, paramName, value):", "body": "(setter, getter) = self._getParameterMethods(paramName)if setter is None:import exceptionsraise exceptions.Exception(\"\"% (paramName, self.name, self.type))setter(paramName, value)", "docstring": "Set parameter value", "id": "f17536:c5:m18"} {"signature": "def _get(self, method):", "body": "return getattr(self._region, method)()", "docstring": "Auto forwarding of properties to get methods of internal region", "id": "f17536:c5:m19"} {"signature": "def __init__(self, *args):", "body": "engine_internal.Network.__init__(self, *args)docTable = ((engine_internal.Network.getRegions, ''),)for obj, docString in docTable:if isinstance(obj, str):prop = getattr(Network, obj)assert isinstance(prop, property)setattr(Network, obj, property(prop.fget, prop.fset, prop.fdel,docString))else:obj.__func__.__doc__ = docString", "docstring": "Constructor\n\n - Initialize the internal engine_internal.Network class generated by Swig\n - Attach docstrings to selected methods", "id": "f17536:c6:m0"} {"signature": "def _getRegions(self):", "body": "def makeRegion(name, r):\"\"\"\"\"\"r = Region(r, self)return rregions = CollectionWrapper(engine_internal.Network.getRegions(self), makeRegion)return regions", "docstring": "Get the collection of regions in a network\n\n This is a tricky one. The collection of regions returned from\n from the internal network is a collection of internal regions.\n The desired collection is a collelcion of net.Region objects\n that also points to this network (net.network) and not to\n the internal network. To achieve that a CollectionWrapper\n class is used with a custom makeRegion() function (see bellow)\n as a value wrapper. The CollectionWrapper class wraps each value in the\n original collection with the result of the valueWrapper.", "id": "f17536:c6:m1"} {"signature": "def addRegion(self, name, nodeType, nodeParams):", "body": "engine_internal.Network.addRegion(self, name, nodeType, nodeParams)return self._getRegions()[name]", "docstring": "@doc:place_holder(Network.addRegion)", "id": "f17536:c6:m2"} {"signature": "def addRegionFromBundle(self, name, nodeType, dimensions, bundlePath, label):", "body": "engine_internal.Network.addRegionFromBundle(self, name, nodeType, dimensions,bundlePath, label)return self._getRegions()[name]", "docstring": "@doc:place_holder(Network.addRegionFromBundle)", "id": "f17536:c6:m3"} {"signature": "def setPhases(self, name, phases):", "body": "phases = engine_internal.UInt32Set(phases)engine_internal.Network.setPhases(self, name, phases)", "docstring": "@doc:place_holder(Network.setPhases)", "id": "f17536:c6:m4"} {"signature": "def run(self, n):", "body": "engine_internal.Network.run(self, n)", "docstring": "@doc:place_holder(Network.run)", "id": "f17536:c6:m5"} {"signature": "def disableProfiling(self, *args, **kwargs):", "body": "engine_internal.Network.disableProfiling(self, *args, **kwargs)", "docstring": "@doc:place_holder(Network.disableProfiling)", "id": "f17536:c6:m6"} {"signature": "def enableProfiling(self, *args, **kwargs):", "body": "engine_internal.Network.enableProfiling(self, *args, **kwargs)", "docstring": "@doc:place_holder(Network.enableProfiling)", "id": "f17536:c6:m7"} {"signature": "def getCallbacks(self, *args, **kwargs):", "body": "engine_internal.Network.getCallbacks(self, *args, **kwargs)", "docstring": "@doc:place_holder(Network.getCallbacks)", "id": "f17536:c6:m8"} {"signature": "def initialize(self, *args, **kwargs):", "body": "engine_internal.Network.initialize(self, *args, **kwargs)", "docstring": "@doc:place_holder(Network.initialize)", "id": "f17536:c6:m9"} {"signature": "def link(self, *args, **kwargs):", "body": "engine_internal.Network.link(self, *args, **kwargs)", "docstring": "@doc:place_holder(Network.link)", "id": "f17536:c6:m10"} {"signature": "def removeLink(self, *args, **kwargs):", "body": "engine_internal.Network.removeLink(self, *args, **kwargs)", "docstring": "@doc:place_holder(Network.removeLink)", "id": "f17536:c6:m11"} {"signature": "def removeRegion(self, *args, **kwargs):", "body": "engine_internal.Network.removeRegion(self, *args, **kwargs)", "docstring": "@doc:place_holder(Network.removeRegion)", "id": "f17536:c6:m12"} {"signature": "def resetProfiling(self, *args, **kwargs):", "body": "engine_internal.Network.resetProfiling(self, *args, **kwargs)", "docstring": "@doc:place_holder(Network.resetProfiling)", "id": "f17536:c6:m13"} {"signature": "def save(self, *args, **kwargs):", "body": "if len(args) > and not isinstance(args[], str):raise TypeError(\"\".format(str))engine_internal.Network.save(self, *args, **kwargs)", "docstring": "@doc:place_holder(Network.save)", "id": "f17536:c6:m14"} {"signature": "def getRegionsByType(self, regionClass):", "body": "regions = []for region in list(self.regions.values()):if type(region.getSelf()) is regionClass:regions.append(region)return regions", "docstring": "Gets all region instances of a given class\n(for example, nupic.regions.sp_region.SPRegion).", "id": "f17536:c6:m15"} {"signature": "@staticmethoddef registerRegion(regionClass):", "body": "engine_internal.Network.registerPyRegion(regionClass.__module__,regionClass.__name__)", "docstring": "Adds the module and class name for the region to the list of classes the network can use\nregionClass: a pointer to a subclass of PyRegion", "id": "f17536:c6:m16"} {"signature": "@staticmethoddef unregisterRegion(regionName):", "body": "engine_internal.Network.unregisterPyRegion(regionName)", "docstring": "Unregisters a region from the internal list of regions\n\n:param str regionName: The name of the region to unregister\n (ex: regionName=regionClass.__name__)", "id": "f17536:c6:m17"} {"signature": "def getWidth(self):", "body": "return self.n", "docstring": "See `nupic.encoders.base.Encoder` for more information.", "id": "f17537:c0:m1"} {"signature": "def getDescription(self):", "body": "return [('', ), ('', )]", "docstring": "See `nupic.encoders.base.Encoder` for more information.", "id": "f17537:c0:m2"} {"signature": "def getScalars(self, inputData):", "body": "return numpy.array([]*len(inputData))", "docstring": "See `nupic.encoders.base.Encoder` for more information.", "id": "f17537:c0:m3"} {"signature": "def encodeIntoArray(self, inputData, output):", "body": "(coordinate, radius) = inputDataassert isinstance(radius, int), (\"\".format(radius, type(radius)))neighbors = self._neighbors(coordinate, radius)winners = self._topWCoordinates(neighbors, self.w)bitFn = lambda coordinate: self._bitForCoordinate(coordinate, self.n)indices = numpy.array([bitFn(w) for w in winners])output[:] = output[indices] = ", "docstring": "See `nupic.encoders.base.Encoder` for more information.\n\n@param inputData (tuple) Contains coordinate (numpy.array, N-dimensional\n integer coordinate) and radius (int)\n@param output (numpy.array) Stores encoded SDR in this numpy array", "id": "f17537:c0:m4"} {"signature": "@staticmethoddef _neighbors(coordinate, radius):", "body": "ranges = (xrange(n-radius, n+radius+) for n in coordinate.tolist())return numpy.array(list(itertools.product(*ranges)))", "docstring": "Returns coordinates around given coordinate, within given radius.\nIncludes given coordinate.\n\n@param coordinate (numpy.array) N-dimensional integer coordinate\n@param radius (int) Radius around `coordinate`\n\n@return (numpy.array) List of coordinates", "id": "f17537:c0:m5"} {"signature": "@classmethoddef _topWCoordinates(cls, coordinates, w):", "body": "orders = numpy.array([cls._orderForCoordinate(c)for c in coordinates.tolist()])indices = numpy.argsort(orders)[-w:]return coordinates[indices]", "docstring": "Returns the top W coordinates by order.\n\n@param coordinates (numpy.array) A 2D numpy array, where each element\n is a coordinate\n@param w (int) Number of top coordinates to return\n@return (numpy.array) A subset of `coordinates`, containing only the\n top ones by order", "id": "f17537:c0:m6"} {"signature": "@staticmethoddef _hashCoordinate(coordinate):", "body": "coordinateStr = \"\".join(str(v) for v in coordinate)hash = int(int(hashlib.md5(coordinateStr).hexdigest(), ) % ( ** ))return hash", "docstring": "Hash a coordinate to a 64 bit integer.", "id": "f17537:c0:m7"} {"signature": "@classmethoddef _orderForCoordinate(cls, coordinate):", "body": "seed = cls._hashCoordinate(coordinate)rng = Random(seed)return rng.getReal64()", "docstring": "Returns the order for a coordinate.\n\n@param coordinate (numpy.array) Coordinate\n@return (float) A value in the interval [0, 1), representing the\n order of the coordinate", "id": "f17537:c0:m8"} {"signature": "@classmethoddef _bitForCoordinate(cls, coordinate, n):", "body": "seed = cls._hashCoordinate(coordinate)rng = Random(seed)return rng.getUInt32(n)", "docstring": "Maps the coordinate to a bit in the SDR.\n\n@param coordinate (numpy.array) Coordinate\n@param n (int) The number of available bits in the SDR\n@return (int) The index to a bit in the SDR", "id": "f17537:c0:m9"} {"signature": "def getScalarNames(self, parentFieldName=''):", "body": "names = []def _formFieldName(encoder):if parentFieldName == '':return encoder.nameelse:return '' % (parentFieldName, encoder.name)if self.seasonEncoder is not None:names.append(_formFieldName(self.seasonEncoder))if self.dayOfWeekEncoder is not None:names.append(_formFieldName(self.dayOfWeekEncoder))if self.customDaysEncoder is not None:names.append(_formFieldName(self.customDaysEncoder))if self.weekendEncoder is not None:names.append(_formFieldName(self.weekendEncoder))if self.holidayEncoder is not None:names.append(_formFieldName(self.holidayEncoder))if self.timeOfDayEncoder is not None:names.append(_formFieldName(self.timeOfDayEncoder))return names", "docstring": "See method description in base.py", "id": "f17539:c0:m2"} {"signature": "def getEncodedValues(self, input):", "body": "if input == SENTINEL_VALUE_FOR_MISSING_DATA:return numpy.array([None])assert isinstance(input, datetime.datetime)values = []timetuple = input.timetuple()timeOfDay = timetuple.tm_hour + float(timetuple.tm_min)/if self.seasonEncoder is not None:dayOfYear = timetuple.tm_ydayvalues.append(dayOfYear-)if self.dayOfWeekEncoder is not None:dayOfWeek = timetuple.tm_wday + timeOfDay / values.append(dayOfWeek)if self.weekendEncoder is not None:if timetuple.tm_wday == or timetuple.tm_wday == or (timetuple.tm_wday == and timeOfDay > ):weekend = else:weekend = values.append(weekend)if self.customDaysEncoder is not None:if timetuple.tm_wday in self.customDays:customDay = else:customDay = values.append(customDay)if self.holidayEncoder is not None:if len(self.holidays) == :holidays = [(, )]else:holidays = self.holidaysval = for h in holidays:if len(h) == :hdate = datetime.datetime(h[], h[], h[], , , )else:hdate = datetime.datetime(timetuple.tm_year, h[], h[], , , )if input > hdate:diff = input - hdateif diff.days == :val = breakelif diff.days == :val = - (float(diff.seconds) / )breakelse:diff = hdate - inputif diff.days == :val = - (float(diff.seconds) / )values.append(val)if self.timeOfDayEncoder is not None:values.append(timeOfDay)return values", "docstring": "See method description in base.py", "id": "f17539:c0:m3"} {"signature": "def getScalars(self, input):", "body": "return numpy.array(self.getEncodedValues(input))", "docstring": "See method description in :meth:`~.nupic.encoders.base.Encoder.getScalars`.\n\n:param input: (datetime) representing the time being encoded\n\n:returns: A numpy array of the corresponding scalar values in the following\n order: season, dayOfWeek, weekend, holiday, timeOfDay. Some of\n these fields might be omitted if they were not specified in the\n encoder.", "id": "f17539:c0:m4"} {"signature": "def getBucketIndices(self, input):", "body": "if input == SENTINEL_VALUE_FOR_MISSING_DATA:return [None] * len(self.encoders)else:assert isinstance(input, datetime.datetime)scalars = self.getScalars(input)result = []for i in xrange(len(self.encoders)):(name, encoder, offset) = self.encoders[i]result.extend(encoder.getBucketIndices(scalars[i]))return result", "docstring": "See method description in base.py", "id": "f17539:c0:m5"} {"signature": "def encodeIntoArray(self, input, output):", "body": "if input == SENTINEL_VALUE_FOR_MISSING_DATA:output[:] = else:if not isinstance(input, datetime.datetime):raise ValueError(\"\" % (type(input), str(input)))scalars = self.getScalars(input)for i in xrange(len(self.encoders)):(name, encoder, offset) = self.encoders[i]encoder.encodeIntoArray(scalars[i], output[offset:])", "docstring": "See method description in base.py", "id": "f17539:c0:m6"} {"signature": "def __init__(self, n, w=None, name=\"\", forced=False, verbosity=):", "body": "super(SparsePassThroughEncoder, self).__init__(n, w, name, forced, verbosity)", "docstring": "n is the total bits in input\nw is the number of bits used to encode each input bit", "id": "f17540:c0:m0"} {"signature": "def encodeIntoArray(self, value, output):", "body": "denseInput = numpy.zeros(output.shape)try:denseInput[value] = except IndexError:if isinstance(value, numpy.ndarray):raise ValueError(\"\".format(value.dtype))raisesuper(SparsePassThroughEncoder, self).encodeIntoArray(denseInput, output)", "docstring": "See method description in base.py", "id": "f17540:c0:m1"} {"signature": "def getDecoderOutputFieldTypes(self):", "body": "return (FieldMetaType.integer,)", "docstring": "[Encoder class virtual method override]", "id": "f17541:c0:m1"} {"signature": "def getScalars(self, input):", "body": "if input == SENTINEL_VALUE_FOR_MISSING_DATA:return numpy.array([None])else:return numpy.array([self.categoryToIndex.get(input, )])", "docstring": "See method description in base.py", "id": "f17541:c0:m4"} {"signature": "def getBucketIndices(self, input):", "body": "if input == SENTINEL_VALUE_FOR_MISSING_DATA:return [None]else:return self.encoder.getBucketIndices(self.categoryToIndex.get(input, ))", "docstring": "See method description in base.py", "id": "f17541:c0:m5"} {"signature": "def decode(self, encoded, parentFieldName=''):", "body": "(fieldsDict, fieldNames) = self.encoder.decode(encoded)if len(fieldsDict) == :return (fieldsDict, fieldNames)assert(len(fieldsDict) == )(inRanges, inDesc) = list(fieldsDict.values())[]outRanges = []desc = \"\"for (minV, maxV) in inRanges:minV = int(round(minV))maxV = int(round(maxV))outRanges.append((minV, maxV))while minV <= maxV:if len(desc) > :desc += \"\"desc += self.indexToCategory[minV]minV += if parentFieldName != '':fieldName = \"\" % (parentFieldName, self.name)else:fieldName = self.namereturn ({fieldName: (outRanges, desc)}, [fieldName])", "docstring": "See the function description in base.py", "id": "f17541:c0:m7"} {"signature": "def closenessScores(self, expValues, actValues, fractional=True,):", "body": "expValue = expValues[]actValue = actValues[]if expValue == actValue:closeness = else:closeness = if not fractional:closeness = - closenessreturn numpy.array([closeness])", "docstring": "See the function description in base.py\n\n kwargs will have the keyword \"fractional\", which is ignored by this encoder", "id": "f17541:c0:m8"} {"signature": "def getBucketValues(self):", "body": "if self._bucketValues is None:numBuckets = len(self.encoder.getBucketValues())self._bucketValues = []for bucketIndex in range(numBuckets):self._bucketValues.append(self.getBucketInfo([bucketIndex])[].value)return self._bucketValues", "docstring": "See the function description in base.py", "id": "f17541:c0:m9"} {"signature": "def getBucketInfo(self, buckets):", "body": "bucketInfo = self.encoder.getBucketInfo(buckets)[]categoryIndex = int(round(bucketInfo.value))category = self.indexToCategory[categoryIndex]return [EncoderResult(value=category, scalar=categoryIndex,encoding=bucketInfo.encoding)]", "docstring": "See the function description in base.py", "id": "f17541:c0:m10"} {"signature": "def topDownCompute(self, encoded):", "body": "encoderResult = self.encoder.topDownCompute(encoded)[]value = encoderResult.valuecategoryIndex = int(round(value))category = self.indexToCategory[categoryIndex]return EncoderResult(value=category, scalar=categoryIndex,encoding=encoderResult.encoding)", "docstring": "See the function description in base.py", "id": "f17541:c0:m11"} {"signature": "def _isSequence(obj):", "body": "mType = type(obj)return mType is list or mType is tuple", "docstring": "Helper function to determine if a function is a list or sequence.", "id": "f17542:m0"} {"signature": "def getWidth(self):", "body": "raise NotImplementedError()", "docstring": "Should return the output width, in bits.\n\n :return: (int) output width in bits", "id": "f17542:c0:m0"} {"signature": "def encodeIntoArray(self, inputData, output):", "body": "raise NotImplementedError()", "docstring": "Encodes inputData and puts the encoded value into the numpy output array,\nwhich is a 1-D array of length returned by :meth:`.getWidth`.\n\n.. note:: The numpy output array is reused, so clear it before updating it.\n\n:param inputData: Data to encode. This should be validated by the encoder.\n:param output: numpy 1-D array of same length returned by\n :meth:`.getWidth`.", "id": "f17542:c0:m1"} {"signature": "def setLearning(self, learningEnabled):", "body": "if hasattr(self, \"\"):self._learningEnabled = learningEnabled", "docstring": "Set whether learning is enabled.\n\n :param learningEnabled: (bool) whether learning should be enabled", "id": "f17542:c0:m2"} {"signature": "def setFieldStats(self, fieldName, fieldStatistics):", "body": "pass", "docstring": "This method is called by the model to set the statistics like min and\nmax for the underlying encoders if this information is available.\n\n:param fieldName: name of the field this encoder is encoding, provided by\n :class:`~.nupic.encoders.multi.MultiEncoder`.\n\n:param fieldStatistics: dictionary of dictionaries with the first level being\n the fieldname and the second index the statistic ie:\n ``fieldStatistics['pounds']['min']``", "id": "f17542:c0:m3"} {"signature": "def encode(self, inputData):", "body": "output = numpy.zeros((self.getWidth(),), dtype=defaultDtype)self.encodeIntoArray(inputData, output)return output", "docstring": "Convenience wrapper for :meth:`.encodeIntoArray`.\n\n This may be less efficient because it allocates a new numpy array every\n call.\n\n :param inputData: input data to be encoded\n :return: a numpy array with the encoded representation of inputData", "id": "f17542:c0:m4"} {"signature": "def getScalarNames(self, parentFieldName=''):", "body": "names = []if self.encoders is not None:for (name, encoder, offset) in self.encoders:subNames = encoder.getScalarNames(parentFieldName=name)if parentFieldName != '':subNames = ['' % (parentFieldName, name) for name in subNames]names.extend(subNames)else:if parentFieldName != '':names.append(parentFieldName)else:names.append(self.name)return names", "docstring": "Return the field names for each of the scalar values returned by\ngetScalars.\n\n:param parentFieldName: The name of the encoder which is our parent. This\n name is prefixed to each of the field names within this encoder to\n form the keys of the dict() in the retval.\n\n:return: array of field names", "id": "f17542:c0:m5"} {"signature": "def getDecoderOutputFieldTypes(self):", "body": "if hasattr(self, '') andself._flattenedFieldTypeList is not None:return self._flattenedFieldTypeListfieldTypes = []for (name, encoder, offset) in self.encoders:subTypes = encoder.getDecoderOutputFieldTypes()fieldTypes.extend(subTypes)self._flattenedFieldTypeList = fieldTypesreturn fieldTypes", "docstring": "Returns a sequence of field types corresponding to the elements in the\ndecoded output field array. The types are defined by\n:class:`~nupic.data.field_meta.FieldMetaType`.\n\n:return: list of :class:`~nupic.data.field_meta.FieldMetaType` objects", "id": "f17542:c0:m6"} {"signature": "def setStateLock(self,lock):", "body": "pass", "docstring": "Setting this to true freezes the state of the encoder\nThis is separate from the learning state which affects changing parameters.\nImplemented in subclasses.", "id": "f17542:c0:m7"} {"signature": "def _getInputValue(self, obj, fieldName):", "body": "if isinstance(obj, dict):if not fieldName in obj:knownFields = \"\".join(key for key in list(obj.keys()) if not key.startswith(\"\"))raise ValueError(\"\"\"\"\"\" % (fieldName, knownFields, fieldName))return obj[fieldName]else:return getattr(obj, fieldName)", "docstring": "Gets the value of a given field from the input record", "id": "f17542:c0:m8"} {"signature": "def getEncoderList(self):", "body": "if hasattr(self, '') andself._flattenedEncoderList is not None:return self._flattenedEncoderListencoders = []if self.encoders is not None:for (name, encoder, offset) in self.encoders:subEncoders = encoder.getEncoderList()encoders.extend(subEncoders)else:encoders.append(self)self._flattenedEncoderList = encodersreturn encoders", "docstring": ":return: a reference to each sub-encoder in this encoder. They are\n returned in the same order as they are for :meth:`.getScalarNames`\n and :meth:`.getScalars`.", "id": "f17542:c0:m9"} {"signature": "def getScalars(self, inputData):", "body": "retVals = numpy.array([])if self.encoders is not None:for (name, encoder, offset) in self.encoders:values = encoder.getScalars(self._getInputValue(inputData, name))retVals = numpy.hstack((retVals, values))else:retVals = numpy.hstack((retVals, inputData))return retVals", "docstring": "Returns a numpy array containing the sub-field scalar value(s) for\neach sub-field of the ``inputData``. To get the associated field names for\neach of the scalar values, call :meth:`.getScalarNames()`.\n\nFor a simple scalar encoder, the scalar value is simply the input unmodified.\nFor category encoders, it is the scalar representing the category string\nthat is passed in. For the datetime encoder, the scalar value is the\nthe number of seconds since epoch.\n\nThe intent of the scalar representation of a sub-field is to provide a\nbaseline for measuring error differences. You can compare the scalar value\nof the inputData with the scalar value returned from :meth:`.topDownCompute`\non a top-down representation to evaluate prediction accuracy, for example.\n\n:param inputData: The data from the source. This is typically an object with\n members\n:return: array of scalar values", "id": "f17542:c0:m10"} {"signature": "def getEncodedValues(self, inputData):", "body": "retVals = []if self.encoders is not None:for name, encoders, offset in self.encoders:values = encoders.getEncodedValues(self._getInputValue(inputData, name))if _isSequence(values):retVals.extend(values)else:retVals.append(values)else:if _isSequence(inputData):retVals.extend(inputData)else:retVals.append(inputData)return tuple(retVals)", "docstring": "Returns the input in the same format as is returned by\n:meth:`.topDownCompute`. For most encoder types, this is the same as the\ninput data. For instance, for scalar and category types, this corresponds to\nthe numeric and string values, respectively, from the inputs. For datetime\nencoders, this returns the list of scalars for each of the sub-fields\n(timeOfDay, dayOfWeek, etc.)\n\nThis method is essentially the same as :meth:`.getScalars` except that it\nreturns strings.\n\n:param inputData: The input data in the format it is received from the data\n source\n\n:return: A list of values, in the same format and in the same order as they\n are returned by :meth:`.topDownCompute`.", "id": "f17542:c0:m11"} {"signature": "def getBucketIndices(self, inputData):", "body": "retVals = []if self.encoders is not None:for (name, encoder, offset) in self.encoders:values = encoder.getBucketIndices(self._getInputValue(inputData, name))retVals.extend(values)else:assert False, \"\"\"\"return retVals", "docstring": "Returns an array containing the sub-field bucket indices for each sub-field\nof the inputData. To get the associated field names for each of the buckets,\ncall :meth:`.getScalarNames`.\n\n:param inputData: The data from the source. This is typically an object with\n members.\n:return: array of bucket indices", "id": "f17542:c0:m12"} {"signature": "def scalarsToStr(self, scalarValues, scalarNames=None):", "body": "if scalarNames is None:scalarNames = self.getScalarNames()desc = ''for (name, value) in zip(scalarNames, scalarValues):if len(desc) > :desc += \"\" % (name, value)else:desc += \"\" % (name, value)return desc", "docstring": "Return a pretty print string representing the return values from\n:meth:`.getScalars` and :meth:`.getScalarNames`.\n\n:param scalarValues: input values to encode to string\n:param scalarNames: optional input of scalar names to convert. If None, gets\n scalar names from :meth:`.getScalarNames`\n:return: string representation of scalar values", "id": "f17542:c0:m13"} {"signature": "def getDescription(self):", "body": "raise Exception(\"\")", "docstring": "**Must be overridden by subclasses.**\n\nThis returns a list of tuples, each containing (``name``, ``offset``).\nThe ``name`` is a string description of each sub-field, and ``offset`` is\nthe bit offset of the sub-field for that encoder.\n\nFor now, only the 'multi' and 'date' encoders have multiple (name, offset)\npairs. All other encoders have a single pair, where the offset is 0.\n\n:return: list of tuples containing (name, offset)", "id": "f17542:c0:m14"} {"signature": "def getFieldDescription(self, fieldName):", "body": "description = self.getDescription() + [(\"\", self.getWidth())]for i in range(len(description)):(name, offset) = description[i]if (name == fieldName):breakif i >= len(description)-:raise RuntimeError(\"\" % fieldName)return (offset, description[i+][] - offset)", "docstring": "Return the offset and length of a given field within the encoded output.\n\n:param fieldName: Name of the field\n:return: tuple(``offset``, ``width``) of the field within the encoded output", "id": "f17542:c0:m15"} {"signature": "def encodedBitDescription(self, bitOffset, formatted=False):", "body": "(prevFieldName, prevFieldOffset) = (None, None)description = self.getDescription()for i in range(len(description)):(name, offset) = description[i]if formatted:offset = offset + iif bitOffset == offset-:prevFieldName = \"\"prevFieldOffset = bitOffsetbreakif bitOffset < offset:break(prevFieldName, prevFieldOffset) = (name, offset)width = self.getDisplayWidth() if formatted else self.getWidth()if prevFieldOffset is None or bitOffset > self.getWidth():raise IndexError(\"\" % width)return (prevFieldName, bitOffset - prevFieldOffset)", "docstring": "Return a description of the given bit in the encoded output.\nThis will include the field name and the offset within the field.\n\n:param bitOffset: Offset of the bit to get the description of\n:param formatted: If True, the bitOffset is w.r.t. formatted output,\n which includes separators\n:return: tuple(``fieldName``, ``offsetWithinField``)", "id": "f17542:c0:m16"} {"signature": "def pprintHeader(self, prefix=\"\"):", "body": "print(prefix, end='')description = self.getDescription() + [(\"\", self.getWidth())]for i in range(len(description) - ):name = description[i][]width = description[i+][] - description[i][]formatStr = \"\" % widthif len(name) > width:pname = name[:width]else:pname = nameprint(formatStr % pname, end='')print()print(prefix, \"\" * (self.getWidth() + (len(description) - )* - ))", "docstring": "Pretty-print a header that labels the sub-fields of the encoded\noutput. This can be used in conjuction with :meth:`.pprint`.\n\n:param prefix: printed before the header if specified", "id": "f17542:c0:m17"} {"signature": "def pprint(self, output, prefix=\"\"):", "body": "print(prefix, end='')description = self.getDescription() + [(\"\", self.getWidth())]for i in range(len(description) - ):offset = description[i][]nextoffset = description[i+][]print(\"\" % bitsToString(output[offset:nextoffset]), end='')print()", "docstring": "Pretty-print the encoded output using ascii art.\n\n:param output: to print\n:param prefix: printed before the header if specified", "id": "f17542:c0:m18"} {"signature": "def decode(self, encoded, parentFieldName=''):", "body": "fieldsDict = dict()fieldsOrder = []if parentFieldName == '':parentName = self.nameelse:parentName = \"\" % (parentFieldName, self.name)if self.encoders is not None:for i in range(len(self.encoders)):(name, encoder, offset) = self.encoders[i]if i < len(self.encoders)-:nextOffset = self.encoders[i+][]else:nextOffset = self.widthfieldOutput = encoded[offset:nextOffset](subFieldsDict, subFieldsOrder) = encoder.decode(fieldOutput,parentFieldName=parentName)fieldsDict.update(subFieldsDict)fieldsOrder.extend(subFieldsOrder)return (fieldsDict, fieldsOrder)", "docstring": "Takes an encoded output and does its best to work backwards and generate\nthe input that would have generated it.\n\nIn cases where the encoded output contains more ON bits than an input\nwould have generated, this routine will return one or more ranges of inputs\nwhich, if their encoded outputs were ORed together, would produce the\ntarget output. This behavior makes this method suitable for doing things\nlike generating a description of a learned coincidence in the SP, which\nin many cases might be a union of one or more inputs.\n\nIf instead, you want to figure the *most likely* single input scalar value\nthat would have generated a specific encoded output, use the\n:meth:`.topDownCompute` method.\n\nIf you want to pretty print the return value from this method, use the\n:meth:`.decodedToStr` method.\n\n:param encoded: The encoded output that you want decode\n:param parentFieldName: The name of the encoder which is our parent. This name\n is prefixed to each of the field names within this encoder to form the\n keys of the dict() in the retval.\n\n:return: tuple(``fieldsDict``, ``fieldOrder``)\n\n ``fieldsDict`` is a dict() where the keys represent field names\n (only 1 if this is a simple encoder, > 1 if this is a multi\n or date encoder) and the values are the result of decoding each\n field. If there are no bits in encoded that would have been\n generated by a field, it won't be present in the dict. The\n key of each entry in the dict is formed by joining the passed in\n parentFieldName with the child encoder name using a '.'.\n\n Each 'value' in ``fieldsDict`` consists of (ranges, desc), where\n ranges is a list of one or more (minVal, maxVal) ranges of\n input that would generate bits in the encoded output and 'desc'\n is a pretty print description of the ranges. For encoders like\n the category encoder, the 'desc' will contain the category\n names that correspond to the scalar values included in the\n ranges.\n\n ``fieldOrder`` is a list of the keys from ``fieldsDict``, in the\n same order as the fields appear in the encoded output.\n\n TODO: when we switch to Python 2.7 or 3.x, use OrderedDict\n\nExample retvals for a scalar encoder:\n\n.. code-block:: python\n\n {'amount': ( [[1,3], [7,10]], '1-3, 7-10' )}\n {'amount': ( [[2.5,2.5]], '2.5' )}\n\nExample retval for a category encoder:\n\n.. code-block:: python\n\n {'country': ( [[1,1], [5,6]], 'US, GB, ES' )}\n\nExample retval for a multi encoder:\n\n.. code-block:: python\n\n {'amount': ( [[2.5,2.5]], '2.5' ),\n 'country': ( [[1,1], [5,6]], 'US, GB, ES' )}", "id": "f17542:c0:m19"} {"signature": "def decodedToStr(self, decodeResults):", "body": "(fieldsDict, fieldsOrder) = decodeResultsdesc = ''for fieldName in fieldsOrder:(ranges, rangesStr) = fieldsDict[fieldName]if len(desc) > :desc += \"\" % (fieldName)else:desc += \"\" % (fieldName)desc += \"\" % (rangesStr)return desc", "docstring": "Return a pretty print string representing the return value from\n:meth:`.decode`.", "id": "f17542:c0:m20"} {"signature": "def getBucketValues(self):", "body": "raise Exception(\"\")", "docstring": "**Must be overridden by subclasses.**\n\nReturns a list of items, one for each bucket defined by this encoder.\nEach item is the value assigned to that bucket, this is the same as the\n:attr:`.EncoderResult.value` that would be returned by\n:meth:`.getBucketInfo` for that bucket and is in the same format as the\ninput that would be passed to :meth:`.encode`.\n\nThis call is faster than calling :meth:`.getBucketInfo` on each bucket\nindividually if all you need are the bucket values.\n\n:return: list of items, each item representing the bucket value for that\n bucket.", "id": "f17542:c0:m21"} {"signature": "def getBucketInfo(self, buckets):", "body": "if self.encoders is None:raise RuntimeError(\"\")retVals = []bucketOffset = for i in range(len(self.encoders)):(name, encoder, offset) = self.encoders[i]if encoder.encoders is not None:nextBucketOffset = bucketOffset + len(encoder.encoders)else:nextBucketOffset = bucketOffset + bucketIndices = buckets[bucketOffset:nextBucketOffset]values = encoder.getBucketInfo(bucketIndices)retVals.extend(values)bucketOffset = nextBucketOffsetreturn retVals", "docstring": "Returns a list of :class:`.EncoderResult` namedtuples describing the inputs\nfor each sub-field that correspond to the bucket indices passed in\n``buckets``. To get the associated field names for each of the values, call\n:meth:`.getScalarNames`.\n\n:param buckets: The list of bucket indices, one for each sub-field encoder.\n These bucket indices for example may have been retrieved\n from the :meth:`.getBucketIndices` call.\n:return: A list of :class:`.EncoderResult`.", "id": "f17542:c0:m22"} {"signature": "def topDownCompute(self, encoded):", "body": "if self.encoders is None:raise RuntimeError(\"\")retVals = []for i in range(len(self.encoders)):(name, encoder, offset) = self.encoders[i]if i < len(self.encoders)-:nextOffset = self.encoders[i+][]else:nextOffset = self.widthfieldOutput = encoded[offset:nextOffset]values = encoder.topDownCompute(fieldOutput)if _isSequence(values):retVals.extend(values)else:retVals.append(values)return retVals", "docstring": "Returns a list of :class:`.EncoderResult` namedtuples describing the\ntop-down best guess inputs for each sub-field given the encoded output.\nThese are the values which are most likely to generate the given encoded\noutput. To get the associated field names for each of the values, call\n:meth:`.getScalarNames`.\n\n:param encoded: The encoded output. Typically received from the topDown\n outputs from the spatial pooler just above us.\n\n:return: A list of :class:`.EncoderResult`", "id": "f17542:c0:m23"} {"signature": "def closenessScores(self, expValues, actValues, fractional=True):", "body": "if self.encoders is None:err = abs(expValues[] - actValues[])if fractional:denom = max(expValues[], actValues[])if denom == :denom = closeness = - float(err)/denomif closeness < :closeness = else:closeness = errreturn numpy.array([closeness])scalarIdx = retVals = numpy.array([])for (name, encoder, offset) in self.encoders:values = encoder.closenessScores(expValues[scalarIdx:], actValues[scalarIdx:],fractional=fractional)scalarIdx += len(values)retVals = numpy.hstack((retVals, values))return retVals", "docstring": "Compute closeness scores between the expected scalar value(s) and actual\nscalar value(s). The expected scalar values are typically those obtained\nfrom the :meth:`.getScalars` method. The actual scalar values are typically\nthose returned from :meth:`.topDownCompute`.\n\nThis method returns one closeness score for each value in expValues (or\nactValues which must be the same length). The closeness score ranges from\n0 to 1.0, 1.0 being a perfect match and 0 being the worst possible match.\n\nIf this encoder is a simple, single field encoder, then it will expect\njust 1 item in each of the ``expValues`` and ``actValues`` arrays.\nMulti-encoders will expect 1 item per sub-encoder.\n\nEach encoder type can define it's own metric for closeness. For example,\na category encoder may return either 1 or 0, if the scalar matches exactly\nor not. A scalar encoder might return a percentage match, etc.\n\n:param expValues: Array of expected scalar values, typically obtained from\n :meth:`.getScalars`\n:param actValues: Array of actual values, typically obtained from\n :meth:`.topDownCompute`\n\n:return: Array of closeness scores, one per item in expValues (or\n actValues).", "id": "f17542:c0:m24"} {"signature": "def getDisplayWidth(self):", "body": "width = self.getWidth() + len(self.getDescription()) - return width", "docstring": "Calculate width of display for bits plus blanks between fields.\n\n:return: (int) width of display for bits plus blanks between fields", "id": "f17542:c0:m25"} {"signature": "def getDecoderOutputFieldTypes(self):", "body": "return (FieldMetaType.float, )", "docstring": "Encoder class virtual method override", "id": "f17543:c0:m3"} {"signature": "def _getScaledValue(self, inpt):", "body": "if inpt == SENTINEL_VALUE_FOR_MISSING_DATA:return Noneelse:val = inptif val < self.minval:val = self.minvalelif val > self.maxval:val = self.maxvalscaledVal = math.log10(val)return scaledVal", "docstring": "Convert the input, which is in normal space, into log space", "id": "f17543:c0:m4"} {"signature": "def getBucketIndices(self, inpt):", "body": "scaledVal = self._getScaledValue(inpt)if scaledVal is None:return [None]else:return self.encoder.getBucketIndices(scaledVal)", "docstring": "See the function description in base.py", "id": "f17543:c0:m5"} {"signature": "def encodeIntoArray(self, inpt, output):", "body": "scaledVal = self._getScaledValue(inpt)if scaledVal is None:output[:] = else:self.encoder.encodeIntoArray(scaledVal, output)if self.verbosity >= :print(\"\", inpt, \"\", scaledVal, \"\", output)print(\"\", self.decodedToStr(self.decode(output)))", "docstring": "See the function description in base.py", "id": "f17543:c0:m6"} {"signature": "def decode(self, encoded, parentFieldName=''):", "body": "(fieldsDict, fieldNames) = self.encoder.decode(encoded)if len(fieldsDict) == :return (fieldsDict, fieldNames)assert(len(fieldsDict) == )(inRanges, inDesc) = list(fieldsDict.values())[]outRanges = []for (minV, maxV) in inRanges:outRanges.append((math.pow(, minV),math.pow(, maxV)))desc = \"\"numRanges = len(outRanges)for i in range(numRanges):if outRanges[i][] != outRanges[i][]:desc += \"\" % (outRanges[i][], outRanges[i][])else:desc += \"\" % (outRanges[i][])if i < numRanges-:desc += \"\"if parentFieldName != '':fieldName = \"\" % (parentFieldName, self.name)else:fieldName = self.namereturn ({fieldName: (outRanges, desc)}, [fieldName])", "docstring": "See the function description in base.py", "id": "f17543:c0:m7"} {"signature": "def getBucketValues(self):", "body": "if self._bucketValues is None:scaledValues = self.encoder.getBucketValues()self._bucketValues = []for scaledValue in scaledValues:value = math.pow(, scaledValue)self._bucketValues.append(value)return self._bucketValues", "docstring": "See the function description in base.py", "id": "f17543:c0:m8"} {"signature": "def getBucketInfo(self, buckets):", "body": "scaledResult = self.encoder.getBucketInfo(buckets)[]scaledValue = scaledResult.valuevalue = math.pow(, scaledValue)return [EncoderResult(value=value, scalar=value,encoding = scaledResult.encoding)]", "docstring": "See the function description in base.py", "id": "f17543:c0:m9"} {"signature": "def topDownCompute(self, encoded):", "body": "scaledResult = self.encoder.topDownCompute(encoded)[]scaledValue = scaledResult.valuevalue = math.pow(, scaledValue)return EncoderResult(value=value, scalar=value,encoding = scaledResult.encoding)", "docstring": "See the function description in base.py", "id": "f17543:c0:m10"} {"signature": "def closenessScores(self, expValues, actValues, fractional=True):", "body": "if expValues[] > :expValue = math.log10(expValues[])else:expValue = self.minScaledValueif actValues [] > :actValue = math.log10(actValues[])else:actValue = self.minScaledValueif fractional:err = abs(expValue - actValue)pctErr = err / (self.maxScaledValue - self.minScaledValue)pctErr = min(, pctErr)closeness = - pctErrelse:err = abs(expValue - actValue)closeness = errreturn numpy.array([closeness])", "docstring": "See the function description in base.py", "id": "f17543:c0:m11"} {"signature": "def addEncoder(self, name, encoder):", "body": "self.encoders.append((name, encoder, self.width))for d in encoder.getDescription():self.description.append((d[], d[] + self.width))self.width += encoder.getWidth()", "docstring": "Adds one encoder.\n\n:param name: (string) name of encoder, should be unique\n:param encoder: (:class:`.Encoder`) the encoder to add", "id": "f17544:c0:m2"} {"signature": "def getWidth(self):", "body": "return self.width", "docstring": "Represents the sum of the widths of each fields encoding.", "id": "f17544:c0:m5"} {"signature": "def addMultipleEncoders(self, fieldEncodings):", "body": "encoderList = sorted(fieldEncodings.items())for key, fieldParams in encoderList:if '' not in key and fieldParams is not None:fieldParams = fieldParams.copy()fieldName = fieldParams.pop('')encoderName = fieldParams.pop('')try:self.addEncoder(fieldName, eval(encoderName)(**fieldParams))except TypeError as e:print((\"\"\"\"\"\" % (encoderName, fieldParams)))raise", "docstring": ":param fieldEncodings: dict of dicts, mapping field names to the field\n params dict.\n\n Each field params dict has the following keys:\n\n 1. ``fieldname``: data field name\n 2. ``type`` an encoder type\n 3. All other keys are encoder parameters\n\nFor example,\n\n.. code-block:: python\n\n fieldEncodings={\n 'dateTime': dict(fieldname='dateTime', type='DateEncoder',\n timeOfDay=(5,5)),\n 'attendeeCount': dict(fieldname='attendeeCount', type='ScalarEncoder',\n name='attendeeCount', minval=0, maxval=250,\n clipInput=True, w=5, resolution=10),\n 'consumption': dict(fieldname='consumption',type='ScalarEncoder',\n name='consumption', minval=0,maxval=110,\n clipInput=True, w=5, resolution=5),\n }\n\nwould yield a vector with a part encoded by the :class:`.DateEncoder`, and\nto parts seperately taken care of by the :class:`.ScalarEncoder` with the\nspecified parameters. The three seperate encodings are then merged together\nto the final vector, in such a way that they are always at the same location\nwithin the vector.", "id": "f17544:c0:m9"} {"signature": "def getDecoderOutputFieldTypes(self):", "body": "return (FieldMetaType.string,)", "docstring": "[Encoder class virtual method override]", "id": "f17546:c0:m1"} {"signature": "def getScalars(self, input):", "body": "return numpy.array([])", "docstring": "See method description in base.py", "id": "f17546:c0:m4"} {"signature": "def getBucketIndices(self, input):", "body": "return []", "docstring": "See method description in base.py", "id": "f17546:c0:m5"} {"signature": "def encodeIntoArray(self, inputVal, outputVal):", "body": "if len(inputVal) != len(outputVal):raise ValueError(\"\" % (len(inputVal), len(outputVal)))if self.w is not None and sum(inputVal) != self.w:raise ValueError(\"\" % (sum(inputVal), self.w))outputVal[:] = inputVal[:]if self.verbosity >= :print(\"\", inputVal, \"\", outputVal)print(\"\", self.decodedToStr(self.decode(outputVal)))", "docstring": "See method description in base.py", "id": "f17546:c0:m6"} {"signature": "def decode(self, encoded, parentFieldName=\"\"):", "body": "if parentFieldName != \"\":fieldName = \"\" % (parentFieldName, self.name)else:fieldName = self.namereturn ({fieldName: ([[, ]], \"\")}, [fieldName])", "docstring": "See the function description in base.py", "id": "f17546:c0:m7"} {"signature": "def getBucketInfo(self, buckets):", "body": "return [EncoderResult(value=, scalar=, encoding=numpy.zeros(self.n))]", "docstring": "See the function description in base.py", "id": "f17546:c0:m8"} {"signature": "def topDownCompute(self, encoded):", "body": "return EncoderResult(value=, scalar=,encoding=numpy.zeros(self.n))", "docstring": "See the function description in base.py", "id": "f17546:c0:m9"} {"signature": "def closenessScores(self, expValues, actValues, **kwargs):", "body": "ratio = esum = int(expValues.sum())asum = int(actValues.sum())if asum > esum:diff = asum - esumif diff < esum:ratio = - diff/float(esum)else:ratio = /float(diff)olap = expValues & actValuesosum = int(olap.sum())if esum == :r = else:r = osum/float(esum)r = r * ratioreturn numpy.array([r])", "docstring": "Does a bitwise compare of the two bitmaps and returns a fractonal\nvalue between 0 and 1 of how similar they are.\n\n- ``1`` => identical\n- ``0`` => no overlaping bits\n\n``kwargs`` will have the keyword \"fractional\", which is assumed by this\nencoder.", "id": "f17546:c0:m10"} {"signature": "def getDescription(self):", "body": "return [('', ), ('', ), ('', ), ('', )]", "docstring": "See `nupic.encoders.base.Encoder` for more information.", "id": "f17547:c0:m1"} {"signature": "def getScalars(self, inputData):", "body": "return numpy.array([] * len(self.getDescription()))", "docstring": "See `nupic.encoders.base.Encoder` for more information.", "id": "f17547:c0:m2"} {"signature": "def encodeIntoArray(self, inputData, output):", "body": "altitude = Noneif len(inputData) == :(speed, longitude, latitude, altitude) = inputDataelse:(speed, longitude, latitude) = inputDatacoordinate = self.coordinateForPosition(longitude, latitude, altitude)radius = self.radiusForSpeed(speed)super(GeospatialCoordinateEncoder, self).encodeIntoArray((coordinate, radius), output)", "docstring": "See `nupic.encoders.base.Encoder` for more information.\n\n:param: inputData (tuple) Contains speed (float), longitude (float),\n latitude (float), altitude (float)\n:param: output (numpy.array) Stores encoded SDR in this numpy array", "id": "f17547:c0:m3"} {"signature": "def coordinateForPosition(self, longitude, latitude, altitude=None):", "body": "coords = PROJ(longitude, latitude)if altitude is not None:coords = transform(PROJ, geocentric, coords[], coords[], altitude)coordinate = numpy.array(coords)coordinate = coordinate / self.scalereturn coordinate.astype(int)", "docstring": "Returns coordinate for given GPS position.\n\n:param: longitude (float) Longitude of position\n:param: latitude (float) Latitude of position\n:param: altitude (float) Altitude of position\n:returns: (numpy.array) Coordinate that the given GPS position\n maps to", "id": "f17547:c0:m4"} {"signature": "def radiusForSpeed(self, speed):", "body": "overlap = coordinatesPerTimestep = speed * self.timestep / self.scaleradius = int(round(float(coordinatesPerTimestep) / * overlap))minRadius = int(math.ceil((math.sqrt(self.w) - ) / ))return max(radius, minRadius)", "docstring": "Returns radius for given speed.\n\nTries to get the encodings of consecutive readings to be\nadjacent with some overlap.\n\n:param: speed (float) Speed (in meters per second)\n:returns: (int) Radius for given speed", "id": "f17547:c0:m5"} {"signature": "def _initEncoder(self, w, minval, maxval, n, radius, resolution):", "body": "if n != :if (radius != or resolution != ):raise ValueError(\"\")assert n > wself.n = nif (minval is not None and maxval is not None):if not self.periodic:self.resolution = float(self.rangeInternal) / (self.n - self.w)else:self.resolution = float(self.rangeInternal) / (self.n)self.radius = self.w * self.resolutionif self.periodic:self.range = self.rangeInternalelse:self.range = self.rangeInternal + self.resolutionelse:if radius != :if (resolution != ):raise ValueError(\"\")self.radius = radiusself.resolution = float(self.radius) / welif resolution != :self.resolution = float(resolution)self.radius = self.resolution * self.welse:raise Exception(\"\")if (minval is not None and maxval is not None):if self.periodic:self.range = self.rangeInternalelse:self.range = self.rangeInternal + self.resolutionnfloat = self.w * (self.range / self.radius) + * self.paddingself.n = int(math.ceil(nfloat))", "docstring": "(helper function) There are three different ways of thinking about the representation.\n Handle each case here.", "id": "f17548:c0:m1"} {"signature": "def _checkReasonableSettings(self):", "body": "if self.w < :raise ValueError(\"\"\"\" % self.w)", "docstring": "(helper function) check if the settings are reasonable for SP to work", "id": "f17548:c0:m2"} {"signature": "def getDecoderOutputFieldTypes(self):", "body": "return (FieldMetaType.float, )", "docstring": "[Encoder class virtual method override]", "id": "f17548:c0:m3"} {"signature": "def _getFirstOnBit(self, input):", "body": "if input == SENTINEL_VALUE_FOR_MISSING_DATA:return [None]else:if input < self.minval:if self.clipInput and not self.periodic:if self.verbosity > :print(\"\" % (self.name, input,self.minval))input = self.minvalelse:raise Exception('' %(str(input), str(self.minval), str(self.maxval)))if self.periodic:if input >= self.maxval:raise Exception('' %(str(input), str(self.minval), str(self.maxval)))else:if input > self.maxval:if self.clipInput:if self.verbosity > :print(\"\" % (self.name, input,self.maxval))input = self.maxvalelse:raise Exception('' %(str(input), str(self.minval), str(self.maxval)))if self.periodic:centerbin = int((input - self.minval) * self.nInternal / self.range)+ self.paddingelse:centerbin = int(((input - self.minval) + self.resolution/)/ self.resolution ) + self.paddingminbin = centerbin - self.halfwidthreturn [minbin]", "docstring": "Return the bit offset of the first bit to be set in the encoder output.\n For periodic encoders, this can be a negative number when the encoded output\n wraps around.", "id": "f17548:c0:m7"} {"signature": "def getBucketIndices(self, input):", "body": "if type(input) is float and math.isnan(input):input = SENTINEL_VALUE_FOR_MISSING_DATAif input == SENTINEL_VALUE_FOR_MISSING_DATA:return [None]minbin = self._getFirstOnBit(input)[]if self.periodic:bucketIdx = minbin + self.halfwidthif bucketIdx < :bucketIdx += self.nelse:bucketIdx = minbinreturn [bucketIdx]", "docstring": "See method description in base.py", "id": "f17548:c0:m8"} {"signature": "def encodeIntoArray(self, input, output, learn=True):", "body": "if input is not None and not isinstance(input, numbers.Number):raise TypeError(\"\" % type(input))if type(input) is float and math.isnan(input):input = SENTINEL_VALUE_FOR_MISSING_DATAbucketIdx = self._getFirstOnBit(input)[]if bucketIdx is None:output[:self.n] = else:output[:self.n] = minbin = bucketIdxmaxbin = minbin + *self.halfwidthif self.periodic:if maxbin >= self.n:bottombins = maxbin - self.n + output[:bottombins] = maxbin = self.n - if minbin < :topbins = -minbinoutput[self.n - topbins:self.n] = minbin = assert minbin >= assert maxbin < self.noutput[minbin:maxbin + ] = if self.verbosity >= :print()print(\"\", input)print(\"\", self.minval, \"\", self.maxval)print(\"\", self.n, \"\", self.w, \"\", self.resolution,\"\", self.radius, \"\", self.periodic)print(\"\", end='')self.pprint(output)print(\"\", self.decodedToStr(self.decode(output)))", "docstring": "See method description in base.py", "id": "f17548:c0:m9"} {"signature": "def decode(self, encoded, parentFieldName=''):", "body": "tmpOutput = numpy.array(encoded[:self.n] > ).astype(encoded.dtype)if not tmpOutput.any():return (dict(), [])maxZerosInARow = self.halfwidthfor i in range(maxZerosInARow):searchStr = numpy.ones(i + , dtype=encoded.dtype)searchStr[:-] = subLen = len(searchStr)if self.periodic:for j in range(self.n):outputIndices = numpy.arange(j, j + subLen)outputIndices %= self.nif numpy.array_equal(searchStr, tmpOutput[outputIndices]):tmpOutput[outputIndices] = else:for j in range(self.n - subLen + ):if numpy.array_equal(searchStr, tmpOutput[j:j + subLen]):tmpOutput[j:j + subLen] = if self.verbosity >= :print(\"\", encoded[:self.n])print(\"\", tmpOutput)nz = tmpOutput.nonzero()[]runs = [] run = [nz[], ]i = while (i < len(nz)):if nz[i] == run[] + run[]:run[] += else:runs.append(run)run = [nz[i], ]i += runs.append(run)if self.periodic and len(runs) > :if runs[][] == and runs[-][] + runs[-][] == self.n:runs[-][] += runs[][]runs = runs[:]ranges = []for run in runs:(start, runLen) = runif runLen <= self.w:left = right = start + runLen / else:left = start + self.halfwidthright = start + runLen - - self.halfwidthif not self.periodic:inMin = (left - self.padding) * self.resolution + self.minvalinMax = (right - self.padding) * self.resolution + self.minvalelse:inMin = (left - self.padding) * self.range / self.nInternal + self.minvalinMax = (right - self.padding) * self.range / self.nInternal + self.minvalif self.periodic:if inMin >= self.maxval:inMin -= self.rangeinMax -= self.rangeif inMin < self.minval:inMin = self.minvalif inMax < self.minval:inMax = self.minvalif self.periodic and inMax >= self.maxval:ranges.append([inMin, self.maxval])ranges.append([self.minval, inMax - self.range])else:if inMax > self.maxval:inMax = self.maxvalif inMin > self.maxval:inMin = self.maxvalranges.append([inMin, inMax])desc = self._generateRangeDescription(ranges)if parentFieldName != '':fieldName = \"\" % (parentFieldName, self.name)else:fieldName = self.namereturn ({fieldName: (ranges, desc)}, [fieldName])", "docstring": "See the function description in base.py", "id": "f17548:c0:m10"} {"signature": "def _generateRangeDescription(self, ranges):", "body": "desc = \"\"numRanges = len(ranges)for i in range(numRanges):if ranges[i][] != ranges[i][]:desc += \"\" % (ranges[i][], ranges[i][])else:desc += \"\" % (ranges[i][])if i < numRanges - :desc += \"\"return desc", "docstring": "generate description from a text description of the ranges", "id": "f17548:c0:m11"} {"signature": "def _getTopDownMapping(self):", "body": "if self._topDownMappingM is None:if self.periodic:self._topDownValues = numpy.arange(self.minval + self.resolution / ,self.maxval,self.resolution)else:self._topDownValues = numpy.arange(self.minval,self.maxval + self.resolution / ,self.resolution)numCategories = len(self._topDownValues)self._topDownMappingM = SM32(numCategories, self.n)outputSpace = numpy.zeros(self.n, dtype=GetNTAReal())for i in range(numCategories):value = self._topDownValues[i]value = max(value, self.minval)value = min(value, self.maxval)self.encodeIntoArray(value, outputSpace, learn=False)self._topDownMappingM.setRowFromDense(i, outputSpace)return self._topDownMappingM", "docstring": "Return the interal _topDownMappingM matrix used for handling the\n bucketInfo() and topDownCompute() methods. This is a matrix, one row per\n category (bucket) where each row contains the encoded output for that\n category.", "id": "f17548:c0:m12"} {"signature": "def getBucketValues(self):", "body": "if self._bucketValues is None:topDownMappingM = self._getTopDownMapping()numBuckets = topDownMappingM.nRows()self._bucketValues = []for bucketIdx in range(numBuckets):self._bucketValues.append(self.getBucketInfo([bucketIdx])[].value)return self._bucketValues", "docstring": "See the function description in base.py", "id": "f17548:c0:m13"} {"signature": "def getBucketInfo(self, buckets):", "body": "topDownMappingM = self._getTopDownMapping()category = buckets[]encoding = self._topDownMappingM.getRow(category)if self.periodic:inputVal = (self.minval + (self.resolution / ) +(category * self.resolution))else:inputVal = self.minval + (category * self.resolution)return [EncoderResult(value=inputVal, scalar=inputVal, encoding=encoding)]", "docstring": "See the function description in base.py", "id": "f17548:c0:m14"} {"signature": "def topDownCompute(self, encoded):", "body": "topDownMappingM = self._getTopDownMapping()category = topDownMappingM.rightVecProd(encoded).argmax()return self.getBucketInfo([category])", "docstring": "See the function description in base.py", "id": "f17548:c0:m15"} {"signature": "def closenessScores(self, expValues, actValues, fractional=True):", "body": "expValue = expValues[]actValue = actValues[]if self.periodic:expValue = expValue % self.maxvalactValue = actValue % self.maxvalerr = abs(expValue - actValue)if self.periodic:err = min(err, self.maxval - err)if fractional:pctErr = float(err) / (self.maxval - self.minval)pctErr = min(, pctErr)closeness = - pctErrelse:closeness = errreturn numpy.array([closeness])", "docstring": "See the function description in base.py", "id": "f17548:c0:m16"} {"signature": "def __init__(self, w, minval=None, maxval=None, periodic=False, n=, radius=,resolution=, name=None, verbosity=, clipInput=True, forced=False):", "body": "self._learningEnabled = Trueself._stateLock = Falseself.width = self.encoders = Noneself.description = []self.name = nameif periodic:raise Exception('')assert n!= self._adaptiveScalarEnc = AdaptiveScalarEncoder(w=w, n=n, minval=minval,maxval=maxval, clipInput=True, name=name, verbosity=verbosity, forced=forced)self.width+=self._adaptiveScalarEnc.getWidth()self.n = self._adaptiveScalarEnc.nself._prevAbsolute = None self._prevDelta = None", "docstring": "[ScalarEncoder class method override]", "id": "f17549:c0:m0"} {"signature": "def topDownCompute(self, encoded):", "body": "if self._prevAbsolute==None or self._prevDelta==None:return [EncoderResult(value=, scalar=,encoding=numpy.zeros(self.n))]ret = self._adaptiveScalarEnc.topDownCompute(encoded)if self._prevAbsolute != None:ret = [EncoderResult(value=ret[].value+self._prevAbsolute,scalar=ret[].scalar+self._prevAbsolute,encoding=ret[].encoding)]et[].value+=self._prevAbsoluteet[].scalar+=self._prevAbsolutereturn ret", "docstring": "[ScalarEncoder class method override]", "id": "f17549:c0:m6"} {"signature": "def bitsToString(arr):", "body": "s = array('',''*len(arr))for i in xrange(len(arr)):if arr[i] == :s[i]=''return s", "docstring": "Returns a string representing a numpy array of 0's and 1's", "id": "f17550:m0"} {"signature": "def _seed(self, seed=-):", "body": "if seed != -:self.random = NupicRandom(seed)else:self.random = NupicRandom()", "docstring": "Initialize the random seed", "id": "f17551:c0:m3"} {"signature": "def getDecoderOutputFieldTypes(self):", "body": "return (FieldMetaType.string,)", "docstring": "[Encoder class virtual method override]", "id": "f17551:c0:m4"} {"signature": "def _newRep(self):", "body": "maxAttempts = for _ in range(maxAttempts):foundUnique = Truepopulation = numpy.arange(self.n, dtype=numpy.uint32)choices = numpy.arange(self.w, dtype=numpy.uint32)oneBits = sorted(self.random.sample(population, choices))sdr = numpy.zeros(self.n, dtype='')sdr[oneBits] = for i in range(self.ncategories):if (sdr == self.sdrs[i]).all():foundUnique = Falsebreakif foundUnique:break;if not foundUnique:raise RuntimeError(\"\"\"\" % (self.ncategories, maxAttempts))return sdr", "docstring": "Generate a new and unique representation. Returns a numpy array\n of shape (n,).", "id": "f17551:c0:m6"} {"signature": "def getScalars(self, input):", "body": "if input == SENTINEL_VALUE_FOR_MISSING_DATA:return numpy.array([])index = self.categoryToIndex.get(input, None)if index is None:if self._learningEnabled:self._addCategory(input)index = self.ncategories - else:index = return numpy.array([index])", "docstring": "See method description in base.py", "id": "f17551:c0:m9"} {"signature": "def getBucketIndices(self, input):", "body": "return self.getScalars(input)", "docstring": "See method description in base.py", "id": "f17551:c0:m10"} {"signature": "def decode(self, encoded, parentFieldName=''):", "body": "assert (encoded[:self.n] <= ).all()resultString = \"\"resultRanges = []overlaps = (self.sdrs * encoded[:self.n]).sum(axis=)if self.verbosity >= :print(\"\")for i in range(, self.ncategories):print(\"\" % (overlaps[i], self.categories[i]))matchingCategories = (overlaps > self.thresholdOverlap).nonzero()[]for index in matchingCategories:if resultString != \"\":resultString += \"\"resultString += str(self.categories[index])resultRanges.append([int(index),int(index)])if parentFieldName != '':fieldName = \"\" % (parentFieldName, self.name)else:fieldName = self.namereturn ({fieldName: (resultRanges, resultString)}, [fieldName])", "docstring": "See the function description in base.py", "id": "f17551:c0:m12"} {"signature": "def _getTopDownMapping(self):", "body": "if self._topDownMappingM is None:self._topDownMappingM = SM32(self.ncategories, self.n)outputSpace = numpy.zeros(self.n, dtype=GetNTAReal())for i in range(self.ncategories):self.encodeIntoArray(self.categories[i], outputSpace)self._topDownMappingM.setRowFromDense(i, outputSpace)return self._topDownMappingM", "docstring": "Return the interal _topDownMappingM matrix used for handling the\n bucketInfo() and topDownCompute() methods. This is a matrix, one row per\n category (bucket) where each row contains the encoded output for that\n category.", "id": "f17551:c0:m13"} {"signature": "def getBucketValues(self):", "body": "return self.categories", "docstring": "See the function description in base.py", "id": "f17551:c0:m14"} {"signature": "def getBucketInfo(self, buckets):", "body": "if self.ncategories==:return topDownMappingM = self._getTopDownMapping()categoryIndex = buckets[]category = self.categories[categoryIndex]encoding = topDownMappingM.getRow(categoryIndex)return [EncoderResult(value=category, scalar=categoryIndex,encoding=encoding)]", "docstring": "See the function description in base.py", "id": "f17551:c0:m15"} {"signature": "def topDownCompute(self, encoded):", "body": "if self.ncategories==:return topDownMappingM = self._getTopDownMapping()categoryIndex = topDownMappingM.rightVecProd(encoded).argmax()category = self.categories[categoryIndex]encoding = topDownMappingM.getRow(categoryIndex)return EncoderResult(value=category, scalar=categoryIndex, encoding=encoding)", "docstring": "See the function description in base.py", "id": "f17551:c0:m16"} {"signature": "def closenessScores(self, expValues, actValues, fractional=True):", "body": "expValue = expValues[]actValue = actValues[]if expValue == actValue:closeness = else:closeness = if not fractional:closeness = - closenessreturn numpy.array([closeness])", "docstring": "See the function description in base.py\n\n kwargs will have the keyword \"fractional\", which is ignored by this encoder", "id": "f17551:c0:m17"} {"signature": "def _seed(self, seed=-):", "body": "if seed != -:self.random = NupicRandom(seed)else:self.random = NupicRandom()", "docstring": "Initialize the random seed", "id": "f17552:c0:m2"} {"signature": "def getDecoderOutputFieldTypes(self):", "body": "return (FieldMetaType.float, )", "docstring": "See method description in base.py", "id": "f17552:c0:m3"} {"signature": "def getWidth(self):", "body": "return self.n", "docstring": "See method description in base.py", "id": "f17552:c0:m4"} {"signature": "def getBucketIndices(self, x):", "body": "if ((isinstance(x, float) and math.isnan(x)) orx == SENTINEL_VALUE_FOR_MISSING_DATA):return [None]if self._offset is None:self._offset = xbucketIdx = ((self._maxBuckets/) + int(round((x - self._offset) / self.resolution)))if bucketIdx < :bucketIdx = elif bucketIdx >= self._maxBuckets:bucketIdx = self._maxBuckets-return [bucketIdx]", "docstring": "See method description in base.py", "id": "f17552:c0:m6"} {"signature": "def mapBucketIndexToNonZeroBits(self, index):", "body": "if index < :index = if index >= self._maxBuckets:index = self._maxBuckets-if index not in self.bucketMap:if self.verbosity >= :print(\"\", index)self._createBucket(index)return self.bucketMap[index]", "docstring": "Given a bucket index, return the list of non-zero bits. If the bucket\nindex does not exist, it is created. If the index falls outside our range\nwe clip it.\n\n:param index The bucket index to get non-zero bits for.\n@returns numpy array of indices of non-zero bits for specified index.", "id": "f17552:c0:m7"} {"signature": "def encodeIntoArray(self, x, output):", "body": "if x is not None and not isinstance(x, numbers.Number):raise TypeError(\"\" % type(x))bucketIdx = self.getBucketIndices(x)[]output[:self.n] = if bucketIdx is not None:output[self.mapBucketIndexToNonZeroBits(bucketIdx)] = ", "docstring": "See method description in base.py", "id": "f17552:c0:m8"} {"signature": "def _createBucket(self, index):", "body": "if index < self.minIndex:if index == self.minIndex - :self.bucketMap[index] = self._newRepresentation(self.minIndex,index)self.minIndex = indexelse:self._createBucket(index+)self._createBucket(index)else:if index == self.maxIndex + :self.bucketMap[index] = self._newRepresentation(self.maxIndex,index)self.maxIndex = indexelse:self._createBucket(index-)self._createBucket(index)", "docstring": "Create the given bucket index. Recursively create as many in-between\nbucket indices as necessary.", "id": "f17552:c0:m9"} {"signature": "def _newRepresentation(self, index, newIndex):", "body": "newRepresentation = self.bucketMap[index].copy()ri = newIndex % self.wnewBit = self.random.getUInt32(self.n)newRepresentation[ri] = newBitwhile newBit in self.bucketMap[index] ornot self._newRepresentationOK(newRepresentation, newIndex):self.numTries += newBit = self.random.getUInt32(self.n)newRepresentation[ri] = newBitreturn newRepresentation", "docstring": "Return a new representation for newIndex that overlaps with the\nrepresentation at index by exactly w-1 bits", "id": "f17552:c0:m10"} {"signature": "def _newRepresentationOK(self, newRep, newIndex):", "body": "if newRep.size != self.w:return Falseif (newIndex < self.minIndex-) or (newIndex > self.maxIndex+):raise ValueError(\"\")newRepBinary = numpy.array([False]*self.n)newRepBinary[newRep] = TruemidIdx = self._maxBuckets/runningOverlap = self._countOverlap(self.bucketMap[self.minIndex], newRep)if not self._overlapOK(self.minIndex, newIndex, overlap=runningOverlap):return Falsefor i in range(self.minIndex+, midIdx+):newBit = (i-)%self.wif newRepBinary[self.bucketMap[i-][newBit]]:runningOverlap -= if newRepBinary[self.bucketMap[i][newBit]]:runningOverlap += if not self._overlapOK(i, newIndex, overlap=runningOverlap):return Falsefor i in range(midIdx+, self.maxIndex+):newBit = i%self.wif newRepBinary[self.bucketMap[i-][newBit]]:runningOverlap -= if newRepBinary[self.bucketMap[i][newBit]]:runningOverlap += if not self._overlapOK(i, newIndex, overlap=runningOverlap):return Falsereturn True", "docstring": "Return True if this new candidate representation satisfies all our overlap\nrules. Since we know that neighboring representations differ by at most\none bit, we compute running overlaps.", "id": "f17552:c0:m11"} {"signature": "def _countOverlapIndices(self, i, j):", "body": "if i in self.bucketMap and j in self.bucketMap:iRep = self.bucketMap[i]jRep = self.bucketMap[j]return self._countOverlap(iRep, jRep)else:raise ValueError(\"\")", "docstring": "Return the overlap between bucket indices i and j", "id": "f17552:c0:m12"} {"signature": "@staticmethoddef _countOverlap(rep1, rep2):", "body": "overlap = for e in rep1:if e in rep2:overlap += return overlap", "docstring": "Return the overlap between two representations. rep1 and rep2 are lists of\nnon-zero indices.", "id": "f17552:c0:m13"} {"signature": "def _overlapOK(self, i, j, overlap=None):", "body": "if overlap is None:overlap = self._countOverlapIndices(i, j)if abs(i-j) < self.w:if overlap == (self.w - abs(i-j)):return Trueelse:return Falseelse:if overlap <= self._maxOverlap:return Trueelse:return False", "docstring": "Return True if the given overlap between bucket indices i and j are\nacceptable. If overlap is not specified, calculate it from the bucketMap", "id": "f17552:c0:m14"} {"signature": "def _initializeBucketMap(self, maxBuckets, offset):", "body": "self._maxBuckets = maxBucketsself.minIndex = self._maxBuckets / self.maxIndex = self._maxBuckets / self._offset = offsetself.bucketMap = {}def _permutation(n):r = numpy.arange(n, dtype=numpy.uint32)self.random.shuffle(r)return rself.bucketMap[self.minIndex] = _permutation(self.n)[:self.w]self.numTries = ", "docstring": "Initialize the bucket map assuming the given number of maxBuckets.", "id": "f17552:c0:m15"} {"signature": "def _setEncoderParams(self):", "body": "self.rangeInternal = float(self.maxval - self.minval)self.resolution = float(self.rangeInternal) / (self.n - self.w)self.radius = self.w * self.resolutionself.range = self.rangeInternal + self.resolutionself.nInternal = self.n - * self.paddingself._bucketValues = None", "docstring": "Set the radius, resolution and range. These values are updated when minval\nand/or maxval change.", "id": "f17553:c0:m1"} {"signature": "def setFieldStats(self, fieldName, fieldStats):", "body": "if fieldStats[fieldName][''] == None orfieldStats[fieldName][''] == None:returnself.minval = fieldStats[fieldName]['']self.maxval = fieldStats[fieldName]['']if self.minval == self.maxval:self.maxval+=self._setEncoderParams()", "docstring": "TODO: document", "id": "f17553:c0:m2"} {"signature": "def _setMinAndMax(self, input, learn):", "body": "self.slidingWindow.next(input)if self.minval is None and self.maxval is None:self.minval = inputself.maxval = input+ self._setEncoderParams()elif learn:sorted = self.slidingWindow.getSlidingWindow()sorted.sort()minOverWindow = sorted[]maxOverWindow = sorted[len(sorted)-]if minOverWindow < self.minval:if self.verbosity >= :print(\"\"% (self.name, input, self.minval, minOverWindow))self.minval = minOverWindow self._setEncoderParams()if maxOverWindow > self.maxval:if self.verbosity >= :print(\"\"% (self.name, input, self.maxval, maxOverWindow))self.maxval = maxOverWindow self._setEncoderParams()", "docstring": "Potentially change the minval and maxval using input.\n**The learn flag is currently not supported by cla regions.**", "id": "f17553:c0:m3"} {"signature": "def getBucketIndices(self, input, learn=None):", "body": "self.recordNum +=if learn is None:learn = self._learningEnabledif type(input) is float and math.isnan(input):input = SENTINEL_VALUE_FOR_MISSING_DATAif input == SENTINEL_VALUE_FOR_MISSING_DATA:return [None]else:self._setMinAndMax(input, learn)return super(AdaptiveScalarEncoder, self).getBucketIndices(input)", "docstring": "[overrides nupic.encoders.scalar.ScalarEncoder.getBucketIndices]", "id": "f17553:c0:m4"} {"signature": "def encodeIntoArray(self, input, output,learn=None):", "body": "self.recordNum +=if learn is None:learn = self._learningEnabledif input == SENTINEL_VALUE_FOR_MISSING_DATA:output[:self.n] = elif not math.isnan(input):self._setMinAndMax(input, learn)super(AdaptiveScalarEncoder, self).encodeIntoArray(input, output)", "docstring": "[overrides nupic.encoders.scalar.ScalarEncoder.encodeIntoArray]", "id": "f17553:c0:m5"} {"signature": "def getBucketInfo(self, buckets):", "body": "if self.minval is None or self.maxval is None:return [EncoderResult(value=, scalar=,encoding=numpy.zeros(self.n))]return super(AdaptiveScalarEncoder, self).getBucketInfo(buckets)", "docstring": "[overrides nupic.encoders.scalar.ScalarEncoder.getBucketInfo]", "id": "f17553:c0:m6"} {"signature": "def topDownCompute(self, encoded):", "body": "if self.minval is None or self.maxval is None:return [EncoderResult(value=, scalar=,encoding=numpy.zeros(self.n))]return super(AdaptiveScalarEncoder, self).topDownCompute(encoded)", "docstring": "[overrides nupic.encoders.scalar.ScalarEncoder.topDownCompute]", "id": "f17553:c0:m7"} {"signature": "def enableConcurrencyChecks(maxConcurrency, raiseException=True):", "body": "global g_max_concurrency, g_max_concurrency_raise_exceptionassert maxConcurrency >= g_max_concurrency = maxConcurrencyg_max_concurrency_raise_exception = raiseExceptionreturn", "docstring": "Enable the diagnostic feature for debugging unexpected concurrency in\n acquiring ConnectionWrapper instances.\n\n NOTE: This MUST be done early in your application's execution, BEFORE any\n accesses to ConnectionFactory or connection policies from your application\n (including imports and sub-imports of your app).\n\n Parameters:\n ----------------------------------------------------------------\n maxConcurrency: A non-negative integer that represents the maximum expected\n number of outstanding connections. When this value is\n exceeded, useful information will be logged and, depending\n on the value of the raiseException arg,\n ConcurrencyExceededError may be raised.\n raiseException: If true, ConcurrencyExceededError will be raised when\n maxConcurrency is exceeded.", "id": "f17554:m0"} {"signature": "def _getCommonSteadyDBArgsDict():", "body": "return dict(creator = pymysql,host = Configuration.get(''),port = int(Configuration.get('')),user = Configuration.get(''),passwd = Configuration.get(''),charset = '',use_unicode = True,setsession = [''])", "docstring": "Returns a dictionary of arguments for DBUtils.SteadyDB.SteadyDBConnection\n constructor.", "id": "f17554:m2"} {"signature": "def _getLogger(cls, logLevel=None):", "body": "logger = logging.getLogger(\"\".join(['', _MODULE_NAME, cls.__name__]))if logLevel is not None:logger.setLevel(logLevel)return logger", "docstring": "Gets a logger for the given class in this module", "id": "f17554:m3"} {"signature": "@classmethoddef get(cls):", "body": "if cls._connectionPolicy is None:logger = _getLogger(cls)logger.info(\"\",cls._connectionPolicyInstanceProvider)cls._connectionPolicy = cls._connectionPolicyInstanceProvider()logger.debug(\"\", cls._connectionPolicy)return cls._connectionPolicy.acquireConnection()", "docstring": "Acquire a ConnectionWrapper instance that represents a connection\n to the SQL server per nupic.cluster.database.* configuration settings.\n\n NOTE: caller is responsible for calling the ConnectionWrapper instance's\n release() method after using the connection in order to release resources.\n Better yet, use the returned ConnectionWrapper instance in a Context Manager\n statement for automatic invocation of release():\n Example:\n # If using Jython 2.5.x, first import with_statement at the very top of\n your script (don't need this import for Jython/Python 2.6.x and later):\n from __future__ import with_statement\n # Then:\n from nupic.database.Connection import ConnectionFactory\n # Then use it like this\n with ConnectionFactory.get() as conn:\n conn.cursor.execute(\"SELECT ...\")\n conn.cursor.fetchall()\n conn.cursor.execute(\"INSERT ...\")\n\n WARNING: DO NOT close the underlying connection or cursor as it may be\n shared by other modules in your process. ConnectionWrapper's release()\n method will do the right thing.\n\n Parameters:\n ----------------------------------------------------------------\n retval: A ConnectionWrapper instance. NOTE: Caller is responsible\n for releasing resources as described above.", "id": "f17554:c1:m0"} {"signature": "@classmethoddef close(cls):", "body": "if cls._connectionPolicy is not None:cls._connectionPolicy.close()cls._connectionPolicy = Nonereturn", "docstring": "Close ConnectionFactory's connection policy. Typically, there is no need\n to call this method as the system will automatically close the connections\n when the process exits.\n\n NOTE: This method should be used with CAUTION. It is designed to be\n called ONLY by the code responsible for startup and shutdown of the process\n since it closes the connection(s) used by ALL clients in this process.", "id": "f17554:c1:m1"} {"signature": "@classmethoddef setConnectionPolicyProvider(cls, provider):", "body": "cls._connectionPolicyInstanceProvider = providerreturn", "docstring": "Set the method for ConnectionFactory to use when it needs to\n instantiate its database connection policy.\n\n NOTE: This method should be used with CAUTION. ConnectionFactory's default\n behavior should be adequate for all NuPIC code, and this method is provided\n primarily for diagnostics. It is designed to only be called by the code\n responsible for startup of the process since the provider method has no\n impact after ConnectionFactory's connection policy instance is instantiated.\n\n See ConnectionFactory._createDefaultPolicy\n\n Parameters:\n ----------------------------------------------------------------\n provider: The method that instantiates the singleton database\n connection policy to be used by ConnectionFactory class.\n The method must be compatible with the following signature:\n provider()", "id": "f17554:c1:m2"} {"signature": "@classmethoddef _createDefaultPolicy(cls):", "body": "logger = _getLogger(cls)logger.debug(\"\",platform.system(), pymysql.VERSION)if platform.system() == \"\":policy = SingleSharedConnectionPolicy()else:policy = PooledConnectionPolicy()return policy", "docstring": "[private] Create the default database connection policy instance\n\n Parameters:\n ----------------------------------------------------------------\n retval: The default database connection policy instance", "id": "f17554:c1:m3"} {"signature": "def __init__(self, dbConn, cursor, releaser, logger):", "body": "global g_max_concurrencytry:self._logger = loggerself.dbConn = dbConn\"\"\"\"\"\"self.cursor = cursor\"\"\"\"\"\" True if we added self to _clsOutstandingInstances \"\"\"\"\"\" Instance creation traceback string (if g_max_concurrency is enabled)", "docstring": "Parameters:\n----------------------------------------------------------------\ndbConn: the underlying database connection instance\ncursor: database cursor\nreleaser: a method to call to release the connection and cursor;\n method signature:\n None dbConnReleaser(dbConn, cursor)", "id": "f17554:c2:m0"} {"signature": "def __enter__(self):", "body": "return self", "docstring": "[Context Manager protocol method] Permit a ConnectionWrapper instance\n to be used in a context manager expression (with ... as:) to facilitate\n robust release of resources (instead of try:/finally:/release()). See\n examples in ConnectionFactory docstring.", "id": "f17554:c2:m2"} {"signature": "def __exit__(self, exc_type, exc_val, exc_tb):", "body": "self.release()return False", "docstring": "[Context Manager protocol method] Release resources.", "id": "f17554:c2:m3"} {"signature": "def release(self):", "body": "self._logger.debug(\"\", self)if self._addedToInstanceSet:try:self._clsOutstandingInstances.remove(self)except:self._logger.exception(\"\", self)raiseself._releaser(dbConn=self.dbConn, cursor=self.cursor)self.__class__._clsNumOutstanding -= assert self._clsNumOutstanding >= ,\"\" % (self._clsNumOutstanding,)self._releaser = Noneself.cursor = Noneself.dbConn = Noneself._creationTracebackString = Noneself._addedToInstanceSet = Falseself._logger = Nonereturn", "docstring": "Release the database connection and cursor\n\n The receiver of the Connection instance MUST call this method in order\n to reclaim resources", "id": "f17554:c2:m4"} {"signature": "def _trackInstanceAndCheckForConcurrencyViolation(self):", "body": "global g_max_concurrency, g_max_concurrency_raise_exceptionassert g_max_concurrency is not Noneassert self not in self._clsOutstandingInstances, repr(self)self._creationTracebackString = traceback.format_stack()if self._clsNumOutstanding >= g_max_concurrency:errorMsg = (\"\"\"\"\"\") % (self._clsNumOutstanding, g_max_concurrency, self,len(self._clsOutstandingInstances), self._clsOutstandingInstances,)self._logger.error(errorMsg)if g_max_concurrency_raise_exception:raise ConcurrencyExceededError(errorMsg)self._clsOutstandingInstances.add(self)self._addedToInstanceSet = Truereturn", "docstring": "Check for concurrency violation and add self to\n _clsOutstandingInstances.\n\n ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is\n incremented", "id": "f17554:c2:m5"} {"signature": "def close(self):", "body": "raise NotImplementedError()", "docstring": "Close the policy instance and its shared database connection.", "id": "f17554:c3:m0"} {"signature": "def acquireConnection(self):", "body": "raise NotImplementedError()", "docstring": "Get a Connection instance.\n\n Parameters:\n ----------------------------------------------------------------\n retval: A ConnectionWrapper instance.\n Caller is responsible for calling the ConnectionWrapper\n instance's release() method to release resources.", "id": "f17554:c3:m1"} {"signature": "def __init__(self):", "body": "self._logger = _getLogger(self.__class__)self._conn = SteadyDB.connect(** _getCommonSteadyDBArgsDict())self._logger.debug(\"\", self.__class__.__name__)return", "docstring": "Consruct an instance. The instance's open() method must be\n called to make it ready for acquireConnection() calls.", "id": "f17554:c4:m0"} {"signature": "def close(self):", "body": "self._logger.info(\"\")if self._conn is not None:self._conn.close()self._conn = Noneelse:self._logger.warning(\"\")return", "docstring": "Close the policy instance and its shared database connection.", "id": "f17554:c4:m1"} {"signature": "def acquireConnection(self):", "body": "self._logger.debug(\"\")self._conn._ping_check()connWrap = ConnectionWrapper(dbConn=self._conn,cursor=self._conn.cursor(),releaser=self._releaseConnection,logger=self._logger)return connWrap", "docstring": "Get a Connection instance.\n\n Parameters:\n ----------------------------------------------------------------\n retval: A ConnectionWrapper instance. NOTE: Caller\n is responsible for calling the ConnectionWrapper\n instance's release() method or use it in a context manager\n expression (with ... as:) to release resources.", "id": "f17554:c4:m2"} {"signature": "def _releaseConnection(self, dbConn, cursor):", "body": "self._logger.debug(\"\")cursor.close()return", "docstring": "Release database connection and cursor; passed as a callback to\n ConnectionWrapper", "id": "f17554:c4:m3"} {"signature": "def __init__(self):", "body": "self._logger = _getLogger(self.__class__)self._logger.debug(\"\")self._pool = PooledDB(**_getCommonSteadyDBArgsDict())self._logger.info(\"\", self.__class__.__name__)return", "docstring": "Consruct an instance. The instance's open() method must be\n called to make it ready for acquireConnection() calls.", "id": "f17554:c5:m0"} {"signature": "def close(self):", "body": "self._logger.info(\"\")if self._pool is not None:self._pool.close()self._pool = Noneelse:self._logger.warning(\"\")return", "docstring": "Close the policy instance and its database connection pool.", "id": "f17554:c5:m1"} {"signature": "def acquireConnection(self):", "body": "self._logger.debug(\"\")dbConn = self._pool.connection(shareable=False)connWrap = ConnectionWrapper(dbConn=dbConn,cursor=dbConn.cursor(),releaser=self._releaseConnection,logger=self._logger)return connWrap", "docstring": "Get a connection from the pool.\n\n Parameters:\n ----------------------------------------------------------------\n retval: A ConnectionWrapper instance. NOTE: Caller\n is responsible for calling the ConnectionWrapper\n instance's release() method or use it in a context manager\n expression (with ... as:) to release resources.", "id": "f17554:c5:m2"} {"signature": "def _releaseConnection(self, dbConn, cursor):", "body": "self._logger.debug(\"\")cursor.close()dbConn.close()return", "docstring": "Release database connection and cursor; passed as a callback to\n ConnectionWrapper", "id": "f17554:c5:m3"} {"signature": "def __init__(self):", "body": "self._logger = _getLogger(self.__class__)self._opened = Trueself._logger.info(\"\", self.__class__.__name__)return", "docstring": "Consruct an instance. The instance's open() method must be\n called to make it ready for acquireConnection() calls.", "id": "f17554:c6:m0"} {"signature": "def close(self):", "body": "self._logger.info(\"\")if self._opened:self._opened = Falseelse:self._logger.warning(\"\")return", "docstring": "Close the policy instance.", "id": "f17554:c6:m1"} {"signature": "def acquireConnection(self):", "body": "self._logger.debug(\"\")dbConn = SteadyDB.connect(** _getCommonSteadyDBArgsDict())connWrap = ConnectionWrapper(dbConn=dbConn,cursor=dbConn.cursor(),releaser=self._releaseConnection,logger=self._logger)return connWrap", "docstring": "Create a Connection instance.\n\n Parameters:\n ----------------------------------------------------------------\n retval: A ConnectionWrapper instance. NOTE: Caller\n is responsible for calling the ConnectionWrapper\n instance's release() method or use it in a context manager\n expression (with ... as:) to release resources.", "id": "f17554:c6:m2"} {"signature": "def _releaseConnection(self, dbConn, cursor):", "body": "self._logger.debug(\"\")cursor.close()dbConn.close()return", "docstring": "Release database connection and cursor; passed as a callback to\n ConnectionWrapper", "id": "f17554:c6:m3"} {"signature": "def _abbreviate(text, threshold):", "body": "if text is not None and len(text) > threshold:text = text[:threshold] + \"\"return text", "docstring": "Abbreviate the given text to threshold chars and append an ellipsis if its\n length exceeds threshold; used for logging;\n\n NOTE: the resulting text could be longer than threshold due to the ellipsis", "id": "f17555:m0"} {"signature": "@classmethoddef dbNamePrefix(cls):", "body": "return cls.__getDBNamePrefixForVersion(cls._DB_VERSION)", "docstring": "Get the beginning part of the database name for the current version\n of the database. This, concatenated with\n '_' + Configuration.get('nupic.cluster.database.nameSuffix') will\n produce the actual database name used.", "id": "f17555:c1:m0"} {"signature": "@classmethoddef __getDBNamePrefixForVersion(cls, dbVersion):", "body": "return '' % (cls._DB_ROOT_NAME, dbVersion)", "docstring": "Get the beginning part of the database name for the given database\n version. This, concatenated with\n '_' + Configuration.get('nupic.cluster.database.nameSuffix') will\n produce the actual database name used.\n\n Parameters:\n ----------------------------------------------------------------\n dbVersion: ClientJobs database version number\n\n retval: the ClientJobs database name prefix for the given DB version", "id": "f17555:c1:m1"} {"signature": "@classmethoddef _getDBName(cls):", "body": "return cls.__getDBNameForVersion(cls._DB_VERSION)", "docstring": "Generates the ClientJobs database name for the current version of the\n database; \"semi-private\" class method for use by friends of the class.\n\n Parameters:\n ----------------------------------------------------------------\n retval: the ClientJobs database name", "id": "f17555:c1:m2"} {"signature": "@classmethoddef __getDBNameForVersion(cls, dbVersion):", "body": "prefix = cls.__getDBNamePrefixForVersion(dbVersion)suffix = Configuration.get('')suffix = suffix.replace(\"\", \"\")suffix = suffix.replace(\"\", \"\")dbName = '' % (prefix, suffix)return dbName", "docstring": "Generates the ClientJobs database name for the given version of the\n database\n\n Parameters:\n ----------------------------------------------------------------\n dbVersion: ClientJobs database version number\n\n retval: the ClientJobs database name for the given DB version", "id": "f17555:c1:m3"} {"signature": "@staticmethod@logExceptions(_LOGGER)def get():", "body": "if ClientJobsDAO._instance is None:cjDAO = ClientJobsDAO()cjDAO.connect()ClientJobsDAO._instance = cjDAOreturn ClientJobsDAO._instance", "docstring": "Get the instance of the ClientJobsDAO created for this process (or\n perhaps at some point in the future, for this thread).\n\n Parameters:\n ----------------------------------------------------------------\n retval: instance of ClientJobsDAO", "id": "f17555:c1:m4"} {"signature": "@logExceptions(_LOGGER)def __init__(self):", "body": "self._logger = _LOGGERassert (ClientJobsDAO._instance is None)self.dbName = self._getDBName()self._jobs = self._JobsTableInfo()self._jobs.tableName = '' % (self.dbName)self._models = self._ModelsTableInfo()self._models.tableName = '' % (self.dbName)self._connectionID = None", "docstring": "Instantiate a ClientJobsDAO instance.\n\n Parameters:\n ----------------------------------------------------------------", "id": "f17555:c1:m5"} {"signature": "def _columnNameDBToPublic(self, dbName):", "body": "words = dbName.split('')if dbName.startswith(''):words = words[:]pubWords = [words[]]for word in words[:]:pubWords.append(word[].upper() + word[:])return ''.join(pubWords)", "docstring": "Convert a database internal column name to a public name. This\n takes something of the form word1_word2_word3 and converts it to:\n word1Word2Word3. If the db field name starts with '_', it is stripped out\n so that the name is compatible with collections.namedtuple.\n for example: _word1_word2_word3 => word1Word2Word3\n\n Parameters:\n --------------------------------------------------------------\n dbName: database internal field name\n retval: public name", "id": "f17555:c1:m8"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef connect(self, deleteOldVersions=False, recreate=False):", "body": "with ConnectionFactory.get() as conn:self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions,recreate=recreate)conn.cursor.execute('')self._connectionID = conn.cursor.fetchall()[][]self._logger.info(\"\", self._connectionID)return", "docstring": "Locate the current version of the jobs DB or create a new one, and\n optionally delete old versions laying around. If desired, this method\n can be called at any time to re-create the tables from scratch, delete\n old versions of the database, etc.\n\n Parameters:\n ----------------------------------------------------------------\n deleteOldVersions: if true, delete any old versions of the DB left\n on the server\n recreate: if true, recreate the database from scratch even\n if it already exists.", "id": "f17555:c1:m9"} {"signature": "@logExceptions(_LOGGER)def _initTables(self, cursor, deleteOldVersions, recreate):", "body": "if deleteOldVersions:self._logger.info(\"\",traceback.format_stack())for i in range(self._DB_VERSION):cursor.execute('' %(self.__getDBNameForVersion(i),))if recreate:self._logger.info(\"\",self.dbName, traceback.format_stack())cursor.execute('' % (self.dbName))cursor.execute('' % (self.dbName))cursor.execute('' % (self.dbName))output = cursor.fetchall()tableNames = [x[] for x in output]if '' not in tableNames:self._logger.info(\"\", self.jobsTableName)fields = ['','' % (self.CLIENT_MAX_LEN),'','','','','' % (self.HASH_MAX_LEN),'','','','' %self.CMPL_REASON_SUCCESS,'','','','','','','','','' % self.DEFAULT_JOB_PRIORITY,'','','','','' %self.CLEAN_NOT_DONE,'','','','','','','','','','','']options = ['',]query = '' %(self.jobsTableName, ''.join(fields), ''.join(options))cursor.execute(query)if '' not in tableNames:self._logger.info(\"\", self.modelsTableName)fields = ['','','','','','','','','','','','','','','','' % (self.HASH_MAX_LEN),'' % (self.HASH_MAX_LEN),'','','','','','','','','','','',]options = ['',]query = '' %(self.modelsTableName, ''.join(fields), ''.join(options))cursor.execute(query)cursor.execute('' % (self.jobsTableName))fields = cursor.fetchall()self._jobs.dbFieldNames = [str(field[]) for field in fields]cursor.execute('' % (self.modelsTableName))fields = cursor.fetchall()self._models.dbFieldNames = [str(field[]) for field in fields]self._jobs.publicFieldNames = [self._columnNameDBToPublic(x)for x in self._jobs.dbFieldNames]self._models.publicFieldNames = [self._columnNameDBToPublic(x)for x in self._models.dbFieldNames]self._jobs.pubToDBNameDict = dict(list(zip(self._jobs.publicFieldNames, self._jobs.dbFieldNames)))self._jobs.dbToPubNameDict = dict(list(zip(self._jobs.dbFieldNames, self._jobs.publicFieldNames)))self._models.pubToDBNameDict = dict(list(zip(self._models.publicFieldNames, self._models.dbFieldNames)))self._models.dbToPubNameDict = dict(list(zip(self._models.dbFieldNames, self._models.publicFieldNames)))self._models.modelInfoNamedTuple = collections.namedtuple('', self._models.publicFieldNames)self._jobs.jobInfoNamedTuple = collections.namedtuple('', self._jobs.publicFieldNames)return", "docstring": "Initialize tables, if needed\n\n Parameters:\n ----------------------------------------------------------------\n cursor: SQL cursor\n deleteOldVersions: if true, delete any old versions of the DB left\n on the server\n recreate: if true, recreate the database from scratch even\n if it already exists.", "id": "f17555:c1:m10"} {"signature": "def _getMatchingRowsNoRetries(self, tableInfo, conn, fieldsToMatch,selectFieldNames, maxRows=None):", "body": "assert fieldsToMatch, repr(fieldsToMatch)assert all(k in tableInfo.dbFieldNamesfor k in fieldsToMatch.keys()), repr(fieldsToMatch)assert selectFieldNames, repr(selectFieldNames)assert all(f in tableInfo.dbFieldNames for f in selectFieldNames), repr(selectFieldNames)matchPairs = list(fieldsToMatch.items())matchExpressionGen = (p[] +('' + {True:'', False:''}[p[]] if isinstance(p[], bool)else '' if p[] is Noneelse '' if isinstance(p[], self._SEQUENCE_TYPES)else '')for p in matchPairs)matchFieldValues = [p[] for p in matchPairsif (not isinstance(p[], (bool)) and p[] is not None)]query = '' % (''.join(selectFieldNames), tableInfo.tableName,''.join(matchExpressionGen))sqlParams = matchFieldValuesif maxRows is not None:query += ''sqlParams.append(maxRows)conn.cursor.execute(query, sqlParams)rows = conn.cursor.fetchall()if rows:assert maxRows is None or len(rows) <= maxRows, \"\" % (len(rows), maxRows)assert len(rows[]) == len(selectFieldNames), \"\" % (len(rows[]), len(selectFieldNames))else:rows = tuple()return rows", "docstring": "Return a sequence of matching rows with the requested field values from\n a table or empty sequence if nothing matched.\n\n tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance\n conn: Owned connection acquired from ConnectionFactory.get()\n fieldsToMatch: Dictionary of internal fieldName/value mappings that\n identify the desired rows. If a value is an instance of\n ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the\n operator 'IN' will be used in the corresponding SQL\n predicate; if the value is bool: \"IS TRUE/FALSE\"; if the\n value is None: \"IS NULL\"; '=' will be used for all other\n cases.\n selectFieldNames:\n list of fields to return, using internal field names\n maxRows: maximum number of rows to return; unlimited if maxRows\n is None\n\n retval: A sequence of matching rows, each row consisting of field\n values in the order of the requested field names. Empty\n sequence is returned when not match exists.", "id": "f17555:c1:m11"} {"signature": "@g_retrySQLdef _getMatchingRowsWithRetries(self, tableInfo, fieldsToMatch,selectFieldNames, maxRows=None):", "body": "with ConnectionFactory.get() as conn:return self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch,selectFieldNames, maxRows)", "docstring": "Like _getMatchingRowsNoRetries(), but with retries on transient MySQL\n failures", "id": "f17555:c1:m12"} {"signature": "def _getOneMatchingRowNoRetries(self, tableInfo, conn, fieldsToMatch,selectFieldNames):", "body": "rows = self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch,selectFieldNames, maxRows=)if rows:assert len(rows) == , repr(len(rows))result = rows[]else:result = Nonereturn result", "docstring": "Return a single matching row with the requested field values from the\n the requested table or None if nothing matched.\n\n tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance\n conn: Owned connection acquired from ConnectionFactory.get()\n fieldsToMatch: Dictionary of internal fieldName/value mappings that\n identify the desired rows. If a value is an instance of\n ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the\n operator 'IN' will be used in the corresponding SQL\n predicate; if the value is bool: \"IS TRUE/FALSE\"; if the\n value is None: \"IS NULL\"; '=' will be used for all other\n cases.\n selectFieldNames:\n list of fields to return, using internal field names\n\n retval: A sequence of field values of the matching row in the order\n of the given field names; or None if there was no match.", "id": "f17555:c1:m13"} {"signature": "@g_retrySQLdef _getOneMatchingRowWithRetries(self, tableInfo, fieldsToMatch,selectFieldNames):", "body": "with ConnectionFactory.get() as conn:return self._getOneMatchingRowNoRetries(tableInfo, conn, fieldsToMatch,selectFieldNames)", "docstring": "Like _getOneMatchingRowNoRetries(), but with retries on transient MySQL\n failures", "id": "f17555:c1:m14"} {"signature": "def _insertOrGetUniqueJobNoRetries(self, conn, client, cmdLine, jobHash, clientInfo, clientKey, params,minimumWorkers, maximumWorkers, jobType, priority, alreadyRunning):", "body": "assert len(client) <= self.CLIENT_MAX_LEN, \"\" + repr(client)assert cmdLine, \"\" + repr(cmdLine)assert len(jobHash) == self.HASH_MAX_LEN, \"\" % len(jobHash)if alreadyRunning:initStatus = self.STATUS_TESTMODEelse:initStatus = self.STATUS_NOTSTARTEDquery = ''''''''''% (self.jobsTableName,)sqlParams = (initStatus, client, clientInfo, clientKey, cmdLine, params,jobHash, minimumWorkers, maximumWorkers, priority, jobType)numRowsInserted = conn.cursor.execute(query, sqlParams)jobID = if numRowsInserted == :conn.cursor.execute('')jobID = conn.cursor.fetchall()[][]if jobID == :self._logger.warn('''''''',jobType, client, _abbreviate(clientInfo, ), clientKey, jobHash,cmdLine)else:assert numRowsInserted == , repr(numRowsInserted)if jobID == :row = self._getOneMatchingRowNoRetries(self._jobs, conn, dict(client=client, job_hash=jobHash), [''])assert row is not Noneassert len(row) == , '' + repr(len(row))jobID = row[]if alreadyRunning:query = ''''''''% (self.jobsTableName,)conn.cursor.execute(query, (self._connectionID, jobID))return jobID", "docstring": "Attempt to insert a row with the given parameters into the jobs table.\n Return jobID of the inserted row, or of an existing row with matching\n client/jobHash key.\n\n The combination of client and jobHash are expected to be unique (enforced\n by a unique index on the two columns).\n\n NOTE: It's possibe that this or another process (on this or another machine)\n already inserted a row with matching client/jobHash key (e.g.,\n StreamMgr). This may also happen undetected by this function due to a\n partially-successful insert operation (e.g., row inserted, but then\n connection was lost while reading response) followed by retries either of\n this function or in SteadyDB module.\n\n Parameters:\n ----------------------------------------------------------------\n conn: Owned connection acquired from ConnectionFactory.get()\n client: Name of the client submitting the job\n cmdLine: Command line to use to launch each worker process; must be\n a non-empty string\n jobHash: unique hash of this job. The caller must insure that this,\n together with client, uniquely identifies this job request\n for the purposes of detecting duplicates.\n clientInfo: JSON encoded dict of client specific information.\n clientKey: Foreign key.\n params: JSON encoded dict of the parameters for the job. This\n can be fetched out of the database by the worker processes\n based on the jobID.\n minimumWorkers: minimum number of workers design at a time.\n maximumWorkers: maximum number of workers desired at a time.\n priority: Job scheduling priority; 0 is the default priority (\n ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are\n higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),\n and negative values are lower priority (down to\n ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will\n be scheduled to run at the expense of the lower-priority\n jobs, and higher-priority job tasks will preempt those\n with lower priority if there is inadequate supply of\n scheduling slots. Excess lower priority job tasks will\n starve as long as slot demand exceeds supply. Most jobs\n should be scheduled with DEFAULT_JOB_PRIORITY. System jobs\n that must run at all cost, such as Multi-Model-Master,\n should be scheduled with MAX_JOB_PRIORITY.\n alreadyRunning: Used for unit test purposes only. This inserts the job\n in the running state. It is used when running a worker\n in standalone mode without hadoop- it gives it a job\n record to work with.\n\n retval: jobID of the inserted jobs row, or of an existing jobs row\n with matching client/jobHash key", "id": "f17555:c1:m16"} {"signature": "def _resumeJobNoRetries(self, conn, jobID, alreadyRunning):", "body": "if alreadyRunning:initStatus = self.STATUS_TESTMODEelse:initStatus = self.STATUS_NOTSTARTEDassignments = ['','','','','','','','','','','','','',]assignmentValues = [initStatus]if alreadyRunning:assignments += ['', '','']assignmentValues.append(self._connectionID)else:assignments += ['', '']assignments = ''.join(assignments)query = ''''% (self.jobsTableName, assignments)sqlParams = assignmentValues + [jobID, self.STATUS_COMPLETED]numRowsAffected = conn.cursor.execute(query, sqlParams)assert numRowsAffected <= , repr(numRowsAffected)if numRowsAffected == :self._logger.info(\"\"\"\"\"\", jobID)return", "docstring": "Resumes processing of an existing job that is presently in the\n STATUS_COMPLETED state.\n\n NOTE: this is primarily for resuming suspended Production and Stream Jobs; DO\n NOT use it on Hypersearch jobs.\n\n This prepares an existing job entry to resume processing. The CJM is always\n periodically sweeping the jobs table and when it finds a job that is ready\n to run, it will proceed to start it up on Hadoop.\n\n Parameters:\n ----------------------------------------------------------------\n conn: Owned connection acquired from ConnectionFactory.get()\n jobID: jobID of the job to resume\n alreadyRunning: Used for unit test purposes only. This inserts the job\n in the running state. It is used when running a worker\n in standalone mode without hadoop.\n\n raises: Throws a RuntimeError if no rows are affected. This could\n either be because:\n 1) Because there was not matching jobID\n 2) or if the status of the job was not STATUS_COMPLETED.\n\n retval: nothing", "id": "f17555:c1:m17"} {"signature": "def getConnectionID(self):", "body": "return self._connectionID", "docstring": "Return our connection ID. This can be used for worker identification\n purposes.\n\n NOTE: the actual MySQL connection ID used in queries may change from time\n to time if connection is re-acquired (e.g., upon MySQL server restart) or\n when more than one entry from the connection pool has been used (e.g.,\n multi-threaded apps)", "id": "f17555:c1:m18"} {"signature": "@logExceptions(_LOGGER)def jobSuspend(self, jobID):", "body": "self.jobCancel(jobID)return", "docstring": "Requests a job to be suspended\n\n NOTE: this is primarily for suspending Production Jobs; DO NOT use\n it on Hypersearch jobs. For canceling any job type, use jobCancel() instead!\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID of the job to resume\n\n retval: nothing", "id": "f17555:c1:m19"} {"signature": "@logExceptions(_LOGGER)def jobResume(self, jobID, alreadyRunning=False):", "body": "row = self.jobGetFields(jobID, [''])(jobStatus,) = rowif jobStatus != self.STATUS_COMPLETED:raise RuntimeError((\"\"\"\") % (jobID, jobStatus))@g_retrySQLdef resumeWithRetries():with ConnectionFactory.get() as conn:self._resumeJobNoRetries(conn, jobID, alreadyRunning)resumeWithRetries()return", "docstring": "Resumes processing of an existing job that is presently in the\n STATUS_COMPLETED state.\n\n NOTE: this is primarily for resuming suspended Production Jobs; DO NOT use\n it on Hypersearch jobs.\n\n NOTE: The job MUST be in the STATUS_COMPLETED state at the time of this\n call, otherwise an exception will be raised.\n\n This prepares an existing job entry to resume processing. The CJM is always\n periodically sweeping the jobs table and when it finds a job that is ready\n to run, will proceed to start it up on Hadoop.\n\n Parameters:\n ----------------------------------------------------------------\n job: jobID of the job to resume\n alreadyRunning: Used for unit test purposes only. This inserts the job\n in the running state. It is used when running a worker\n in standalone mode without hadoop.\n\n raises: Throws a RuntimeError if no rows are affected. This could\n either be because:\n 1) Because there was not matching jobID\n 2) or if the status of the job was not STATUS_COMPLETED.\n\n retval: nothing", "id": "f17555:c1:m20"} {"signature": "@logExceptions(_LOGGER)def jobInsert(self, client, cmdLine, clientInfo='', clientKey='', params='',alreadyRunning=False, minimumWorkers=, maximumWorkers=,jobType='', priority=DEFAULT_JOB_PRIORITY):", "body": "jobHash = self._normalizeHash(uuid.uuid1().bytes)@g_retrySQLdef insertWithRetries():with ConnectionFactory.get() as conn:return self._insertOrGetUniqueJobNoRetries(conn, client=client, cmdLine=cmdLine, jobHash=jobHash,clientInfo=clientInfo, clientKey=clientKey, params=params,minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers,jobType=jobType, priority=priority, alreadyRunning=alreadyRunning)try:jobID = insertWithRetries()except:self._logger.exception('''',jobType, client, _abbreviate(clientInfo, ), clientKey, jobHash,cmdLine)raiseelse:self._logger.info('''',jobID, jobType, client, _abbreviate(clientInfo, ), clientKey,jobHash, cmdLine)return jobID", "docstring": "Add an entry to the jobs table for a new job request. This is called by\n clients that wish to startup a new job, like a Hypersearch, stream job, or\n specific model evaluation from the engine.\n\n This puts a new entry into the jobs table. The CJM is always periodically\n sweeping the jobs table and when it finds a new job, will proceed to start it\n up on Hadoop.\n\n Parameters:\n ----------------------------------------------------------------\n client: Name of the client submitting the job\n cmdLine: Command line to use to launch each worker process; must be\n a non-empty string\n clientInfo: JSON encoded dict of client specific information.\n clientKey: Foreign key.\n params: JSON encoded dict of the parameters for the job. This\n can be fetched out of the database by the worker processes\n based on the jobID.\n alreadyRunning: Used for unit test purposes only. This inserts the job\n in the running state. It is used when running a worker\n in standalone mode without hadoop - it gives it a job\n record to work with.\n minimumWorkers: minimum number of workers design at a time.\n maximumWorkers: maximum number of workers desired at a time.\n jobType: The type of job that this is. This should be one of the\n JOB_TYPE_XXXX enums. This is needed to allow a standard\n way of recognizing a job's function and capabilities.\n priority: Job scheduling priority; 0 is the default priority (\n ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are\n higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),\n and negative values are lower priority (down to\n ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will\n be scheduled to run at the expense of the lower-priority\n jobs, and higher-priority job tasks will preempt those\n with lower priority if there is inadequate supply of\n scheduling slots. Excess lower priority job tasks will\n starve as long as slot demand exceeds supply. Most jobs\n should be scheduled with DEFAULT_JOB_PRIORITY. System jobs\n that must run at all cost, such as Multi-Model-Master,\n should be scheduled with MAX_JOB_PRIORITY.\n\n retval: jobID - unique ID assigned to this job", "id": "f17555:c1:m21"} {"signature": "@logExceptions(_LOGGER)def jobInsertUnique(self, client, cmdLine, jobHash, clientInfo='',clientKey='', params='', minimumWorkers=,maximumWorkers=, jobType='',priority=DEFAULT_JOB_PRIORITY):", "body": "assert cmdLine, \"\" + repr(cmdLine)@g_retrySQLdef insertUniqueWithRetries():jobHashValue = self._normalizeHash(jobHash)jobID = Nonewith ConnectionFactory.get() as conn:row = self._getOneMatchingRowNoRetries(self._jobs, conn, dict(client=client, job_hash=jobHashValue),['', ''])if row is not None:(jobID, status) = rowif status == self.STATUS_COMPLETED:query = ''''''''''''''''''% (self.jobsTableName,)sqlParams = (clientInfo, clientKey, cmdLine, params,minimumWorkers, maximumWorkers, priority,jobType, jobID, self.STATUS_COMPLETED)numRowsUpdated = conn.cursor.execute(query, sqlParams)assert numRowsUpdated <= , repr(numRowsUpdated)if numRowsUpdated == :self._logger.info(\"\"\"\"\"\", jobID)self._resumeJobNoRetries(conn, jobID, alreadyRunning=False)else:jobID = self._insertOrGetUniqueJobNoRetries(conn, client=client, cmdLine=cmdLine, jobHash=jobHashValue,clientInfo=clientInfo, clientKey=clientKey, params=params,minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers,jobType=jobType, priority=priority, alreadyRunning=False)return jobIDtry:jobID = insertUniqueWithRetries()except:self._logger.exception('''',jobType, client, _abbreviate(clientInfo, ), clientKey, jobHash,cmdLine)raiseelse:self._logger.info('''',jobID, jobType, client, _abbreviate(clientInfo, ), clientKey,jobHash, cmdLine)return jobID", "docstring": "Add an entry to the jobs table for a new job request, but only if the\n same job, by the same client is not already running. If the job is already\n running, or queued up to run, this call does nothing. If the job does not\n exist in the jobs table or has completed, it will be inserted and/or started\n up again.\n\n This method is called by clients, like StreamMgr, that wish to only start up\n a job if it hasn't already been started up.\n\n Parameters:\n ----------------------------------------------------------------\n client: Name of the client submitting the job\n cmdLine: Command line to use to launch each worker process; must be\n a non-empty string\n jobHash: unique hash of this job. The client must insure that this\n uniquely identifies this job request for the purposes\n of detecting duplicates.\n clientInfo: JSON encoded dict of client specific information.\n clientKey: Foreign key.\n params: JSON encoded dict of the parameters for the job. This\n can be fetched out of the database by the worker processes\n based on the jobID.\n minimumWorkers: minimum number of workers design at a time.\n maximumWorkers: maximum number of workers desired at a time.\n jobType: The type of job that this is. This should be one of the\n JOB_TYPE_XXXX enums. This is needed to allow a standard\n way of recognizing a job's function and capabilities.\n priority: Job scheduling priority; 0 is the default priority (\n ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are\n higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),\n and negative values are lower priority (down to\n ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will\n be scheduled to run at the expense of the lower-priority\n jobs, and higher-priority job tasks will preempt those\n with lower priority if there is inadequate supply of\n scheduling slots. Excess lower priority job tasks will\n starve as long as slot demand exceeds supply. Most jobs\n should be scheduled with DEFAULT_JOB_PRIORITY. System jobs\n that must run at all cost, such as Multi-Model-Master,\n should be scheduled with MAX_JOB_PRIORITY.\n\n retval: jobID of the newly inserted or existing job.", "id": "f17555:c1:m22"} {"signature": "@g_retrySQLdef _startJobWithRetries(self, jobID):", "body": "with ConnectionFactory.get() as conn:query = ''''''''''% (self.jobsTableName,)sqlParams = [self.STATUS_RUNNING, self._connectionID,jobID, self.STATUS_NOTSTARTED]numRowsUpdated = conn.cursor.execute(query, sqlParams)if numRowsUpdated != :self._logger.warn('''''', numRowsUpdated)return", "docstring": "Place the given job in STATUS_RUNNING mode; the job is expected to be\n STATUS_NOTSTARTED.\n\n NOTE: this function was factored out of jobStartNext because it's also\n needed for testing (e.g., test_client_jobs_dao.py)", "id": "f17555:c1:m23"} {"signature": "@logExceptions(_LOGGER)def jobStartNext(self):", "body": "row = self._getOneMatchingRowWithRetries(self._jobs, dict(status=self.STATUS_NOTSTARTED), [''])if row is None:return None(jobID,) = rowself._startJobWithRetries(jobID)return jobID", "docstring": "For use only by Nupic Scheduler (also known as ClientJobManager) Look\n through the jobs table and see if any new job requests have been\n queued up. If so, pick one and mark it as starting up and create the\n model table to hold the results\n\n Parameters:\n ----------------------------------------------------------------\n retval: jobID of the job we are starting up, if found; None if not found", "id": "f17555:c1:m24"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobReactivateRunningJobs(self):", "body": "with ConnectionFactory.get() as conn:query = ''''''% (self.jobsTableName,)conn.cursor.execute(query, [self._connectionID, self.STATUS_RUNNING])return", "docstring": "Look through the jobs table and reactivate all that are already in the\n running state by setting their _eng_allocate_new_workers fields to True;\n used by Nupic Scheduler as part of its failure-recovery procedure.", "id": "f17555:c1:m25"} {"signature": "@logExceptions(_LOGGER)def jobGetDemand(self,):", "body": "rows = self._getMatchingRowsWithRetries(self._jobs, dict(status=self.STATUS_RUNNING),[self._jobs.pubToDBNameDict[f]for f in self._jobs.jobDemandNamedTuple._fields])return [self._jobs.jobDemandNamedTuple._make(r) for r in rows]", "docstring": "Look through the jobs table and get the demand - minimum and maximum\n number of workers requested, if new workers are to be allocated, if there\n are any untended dead workers, for all running jobs.\n\n Parameters:\n ----------------------------------------------------------------\n retval: list of ClientJobsDAO._jobs.jobDemandNamedTuple nametuples\n containing the demand - min and max workers,\n allocate_new_workers, untended_dead_workers, num_failed_workers\n for each running (STATUS_RUNNING) job. Empty list when there\n isn't any demand.", "id": "f17555:c1:m26"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobCancelAllRunningJobs(self):", "body": "with ConnectionFactory.get() as conn:query = ''% (self.jobsTableName,)conn.cursor.execute(query, [self.STATUS_COMPLETED])return", "docstring": "Set cancel field of all currently-running jobs to true.", "id": "f17555:c1:m27"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobCountCancellingJobs(self,):", "body": "with ConnectionFactory.get() as conn:query = ''''''% (self.jobsTableName,)conn.cursor.execute(query, [self.STATUS_COMPLETED])rows = conn.cursor.fetchall()return rows[][]", "docstring": "Look through the jobs table and count the running jobs whose\n cancel field is true.\n\n Parameters:\n ----------------------------------------------------------------\n retval: A count of running jobs with the cancel field set to true.", "id": "f17555:c1:m28"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobGetCancellingJobs(self,):", "body": "with ConnectionFactory.get() as conn:query = ''''''% (self.jobsTableName,)conn.cursor.execute(query, [self.STATUS_COMPLETED])rows = conn.cursor.fetchall()return tuple(r[] for r in rows)", "docstring": "Look through the jobs table and get the list of running jobs whose\n cancel field is true.\n\n Parameters:\n ----------------------------------------------------------------\n retval: A (possibly empty) sequence of running job IDs with cancel field\n set to true", "id": "f17555:c1:m29"} {"signature": "@staticmethod@logExceptions(_LOGGER)def partitionAtIntervals(data, intervals):", "body": "assert sum(intervals) <= len(data)start = for interval in intervals:end = start + intervalyield data[start:end]start = endraise StopIteration", "docstring": "Generator to allow iterating slices at dynamic intervals\n\n Parameters:\n ----------------------------------------------------------------\n data: Any data structure that supports slicing (i.e. list or tuple)\n *intervals: Iterable of intervals. The sum of intervals should be less\n than, or equal to the length of data.", "id": "f17555:c1:m30"} {"signature": "@staticmethod@logExceptions(_LOGGER)def _combineResults(result, *namedTuples):", "body": "results = ClientJobsDAO.partitionAtIntervals(result, [len(nt._fields) for nt in namedTuples])return [nt._make(result) for nt, result in zip(namedTuples, results)]", "docstring": "Return a list of namedtuples from the result of a join query. A\n single database result is partitioned at intervals corresponding to the\n fields in namedTuples. The return value is the result of applying\n namedtuple._make() to each of the partitions, for each of the namedTuples.\n\n Parameters:\n ----------------------------------------------------------------\n result: Tuple representing a single result from a database query\n *namedTuples: List of named tuples.", "id": "f17555:c1:m31"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobInfoWithModels(self, jobID):", "body": "combinedResults = Nonewith ConnectionFactory.get() as conn:query = ''.join(['' % (self.jobsTableName, self.modelsTableName),'' % self.jobsTableName,'' % self.modelsTableName,''])conn.cursor.execute(query, (jobID,))if conn.cursor.rowcount > :combinedResults = [ClientJobsDAO._combineResults(result, self._jobs.jobInfoNamedTuple,self._models.modelInfoNamedTuple) for result in conn.cursor.fetchall()]if combinedResults is not None:return combinedResultsraise RuntimeError(\"\" % (jobID))", "docstring": "Get all info about a job, with model details, if available.\n\n Parameters:\n ----------------------------------------------------------------\n job: jobID of the job to query\n retval: A sequence of two-tuples if the jobID exists in the jobs\n table (exeption is raised if it doesn't exist). Each two-tuple\n contains an instance of jobInfoNamedTuple as the first element and\n an instance of modelInfoNamedTuple as the second element. NOTE: In\n the case where there are no matching model rows, a sequence of one\n two-tuple will still be returned, but the modelInfoNamedTuple\n fields will be None, and the jobInfoNamedTuple fields will be\n populated.", "id": "f17555:c1:m32"} {"signature": "@logExceptions(_LOGGER)def jobInfo(self, jobID):", "body": "row = self._getOneMatchingRowWithRetries(self._jobs, dict(job_id=jobID),[self._jobs.pubToDBNameDict[n]for n in self._jobs.jobInfoNamedTuple._fields])if row is None:raise RuntimeError(\"\" % (jobID))return self._jobs.jobInfoNamedTuple._make(row)", "docstring": "Get all info about a job\n\n Parameters:\n ----------------------------------------------------------------\n job: jobID of the job to query\n retval: namedtuple containing the job info.", "id": "f17555:c1:m33"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobSetStatus(self, jobID, status, useConnectionID=True,):", "body": "with ConnectionFactory.get() as conn:query = ''''''% (self.jobsTableName,)sqlParams = [status, jobID]if useConnectionID:query += ''sqlParams.append(self._connectionID)result = conn.cursor.execute(query, sqlParams)if result != :raise RuntimeError(\"\"\"\" % (jobID, status))", "docstring": "Change the status on the given job\n\n Parameters:\n ----------------------------------------------------------------\n job: jobID of the job to change status\n status: new status string (ClientJobsDAO.STATUS_xxxxx)\n\n useConnectionID: True if the connection id of the calling function\n must be the same as the connection that created the job. Set\n to False for hypersearch workers", "id": "f17555:c1:m34"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobSetCompleted(self, jobID, completionReason, completionMsg,useConnectionID = True):", "body": "with ConnectionFactory.get() as conn:query = ''''''''''''% (self.jobsTableName,)sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg,jobID]if useConnectionID:query += ''sqlParams.append(self._connectionID)result = conn.cursor.execute(query, sqlParams)if result != :raise RuntimeError(\"\"\"\"\"\" % (jobID))", "docstring": "Change the status on the given job to completed\n\n Parameters:\n ----------------------------------------------------------------\n job: jobID of the job to mark as completed\n completionReason: completionReason string\n completionMsg: completionMsg string\n\n useConnectionID: True if the connection id of the calling function\n must be the same as the connection that created the job. Set\n to False for hypersearch workers", "id": "f17555:c1:m35"} {"signature": "@logExceptions(_LOGGER)def jobCancel(self, jobID):", "body": "self._logger.info('', jobID)self.jobSetFields(jobID, {\"\" : True}, useConnectionID=False)", "docstring": "Cancel the given job. This will update the cancel field in the\n jobs table and will result in the job being cancelled.\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID of the job to mark as completed\n\n to False for hypersearch workers", "id": "f17555:c1:m36"} {"signature": "@logExceptions(_LOGGER)def jobGetModelIDs(self, jobID):", "body": "rows = self._getMatchingRowsWithRetries(self._models, dict(job_id=jobID),[''])return [r[] for r in rows]", "docstring": "Fetch all the modelIDs that correspond to a given jobID; empty sequence\n if none", "id": "f17555:c1:m37"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef getActiveJobCountForClientInfo(self, clientInfo):", "body": "with ConnectionFactory.get() as conn:query = '''''''' % self.jobsTableNameconn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED])activeJobCount = conn.cursor.fetchone()[]return activeJobCount", "docstring": "Return the number of jobs for the given clientInfo and a status that is\n not completed.", "id": "f17555:c1:m38"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef getActiveJobCountForClientKey(self, clientKey):", "body": "with ConnectionFactory.get() as conn:query = '''''''' % self.jobsTableNameconn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])activeJobCount = conn.cursor.fetchone()[]return activeJobCount", "docstring": "Return the number of jobs for the given clientKey and a status that is\n not completed.", "id": "f17555:c1:m39"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef getActiveJobsForClientInfo(self, clientInfo, fields=[]):", "body": "dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]dbFieldsStr = ''.join([''] + dbFields)with ConnectionFactory.get() as conn:query = '''''' % (dbFieldsStr, self.jobsTableName)conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED])rows = conn.cursor.fetchall()return rows", "docstring": "Fetch jobIDs for jobs in the table with optional fields given a\n specific clientInfo", "id": "f17555:c1:m40"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef getActiveJobsForClientKey(self, clientKey, fields=[]):", "body": "dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]dbFieldsStr = ''.join([''] + dbFields)with ConnectionFactory.get() as conn:query = '''''' % (dbFieldsStr, self.jobsTableName)conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])rows = conn.cursor.fetchall()return rows", "docstring": "Fetch jobIDs for jobs in the table with optional fields given a\n specific clientKey", "id": "f17555:c1:m41"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef getJobs(self, fields=[]):", "body": "dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]dbFieldsStr = ''.join([''] + dbFields)with ConnectionFactory.get() as conn:query = '' % (dbFieldsStr, self.jobsTableName)conn.cursor.execute(query)rows = conn.cursor.fetchall()return rows", "docstring": "Fetch jobIDs for jobs in the table with optional fields", "id": "f17555:c1:m42"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef getFieldsForActiveJobsOfType(self, jobType, fields=[]):", "body": "dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]dbFieldsStr = ''.join([''] + dbFields)with ConnectionFactory.get() as conn:query ='''''''''' % (dbFieldsStr, self.jobsTableName,self.modelsTableName)conn.cursor.execute(query, [self.STATUS_COMPLETED, jobType])return conn.cursor.fetchall()", "docstring": "Helper function for querying the models table including relevant job\n info where the job type matches the specified jobType. Only records for\n which there is a matching jobId in both tables is returned, and only the\n requested fields are returned in each result, assuming that there is not\n a conflict. This function is useful, for example, in querying a cluster\n for a list of actively running production models (according to the state\n of the client jobs database). jobType must be one of the JOB_TYPE_XXXX\n enumerations.\n\n Parameters:\n ----------------------------------------------------------------\n jobType: jobType enum\n fields: list of fields to return\n\n Returns: List of tuples containing the jobId and requested field values", "id": "f17555:c1:m43"} {"signature": "@logExceptions(_LOGGER)def jobGetFields(self, jobID, fields):", "body": "return self.jobsGetFields([jobID], fields, requireAll=True)[][]", "docstring": "Fetch the values of 1 or more fields from a job record. Here, 'fields'\n is a list with the names of the fields to fetch. The names are the public\n names of the fields (camelBack, not the lower_case_only form as stored in\n the DB).\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID of the job record\n fields: list of fields to return\n\n Returns: A sequence of field values in the same order as the requested\n field list -> [field1, field2, ...]", "id": "f17555:c1:m44"} {"signature": "@logExceptions(_LOGGER)def jobsGetFields(self, jobIDs, fields, requireAll=True):", "body": "assert isinstance(jobIDs, self._SEQUENCE_TYPES)assert len(jobIDs) >=rows = self._getMatchingRowsWithRetries(self._jobs, dict(job_id=jobIDs),[''] + [self._jobs.pubToDBNameDict[x] for x in fields])if requireAll and len(rows) < len(jobIDs):raise RuntimeError(\"\" % ((set(jobIDs) - set(r[] for r in rows)),))return [(r[], list(r[:])) for r in rows]", "docstring": "Fetch the values of 1 or more fields from a sequence of job records.\n Here, 'fields' is a sequence (list or tuple) with the names of the fields to\n fetch. The names are the public names of the fields (camelBack, not the\n lower_case_only form as stored in the DB).\n\n WARNING!!!: The order of the results are NOT necessarily in the same order as\n the order of the job IDs passed in!!!\n\n Parameters:\n ----------------------------------------------------------------\n jobIDs: A sequence of jobIDs\n fields: A list of fields to return for each jobID\n\n Returns: A list of tuples->(jobID, [field1, field2,...])", "id": "f17555:c1:m45"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobSetFields(self, jobID, fields, useConnectionID=True,ignoreUnchanged=False):", "body": "assignmentExpressions = ''.join([\"\" % (self._jobs.pubToDBNameDict[f],) for f in fields.keys()])assignmentValues = list(fields.values())query = ''''% (self.jobsTableName, assignmentExpressions,)sqlParams = assignmentValues + [jobID]if useConnectionID:query += ''sqlParams.append(self._connectionID)with ConnectionFactory.get() as conn:result = conn.cursor.execute(query, sqlParams)if result != and not ignoreUnchanged:raise RuntimeError(\"\"\"\" % (assignmentExpressions, jobID, self._connectionID, result, query))", "docstring": "Change the values of 1 or more fields in a job. Here, 'fields' is a\n dict with the name/value pairs to change. The names are the public names of\n the fields (camelBack, not the lower_case_only form as stored in the DB).\n This method is for private use by the ClientJobManager only.\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID of the job record\n\n fields: dictionary of fields to change\n\n useConnectionID: True if the connection id of the calling function\n must be the same as the connection that created the job. Set\n to False for hypersearch workers\n\n ignoreUnchanged: The default behavior is to throw a\n RuntimeError if no rows are affected. This could either be\n because:\n 1) Because there was not matching jobID\n 2) or if the data to update matched the data in the DB exactly.\n\n Set this parameter to True if you expect case 2 and wish to\n supress the error.", "id": "f17555:c1:m46"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobSetFieldIfEqual(self, jobID, fieldName, newValue, curValue):", "body": "dbFieldName = self._jobs.pubToDBNameDict[fieldName]conditionValue = []if isinstance(curValue, bool):conditionExpression = '' % (dbFieldName, {True:'', False:''}[curValue])elif curValue is None:conditionExpression = '' % (dbFieldName,)else:conditionExpression = '' % (dbFieldName,)conditionValue.append(curValue)query = ''''% (self.jobsTableName, dbFieldName, conditionExpression)sqlParams = [newValue, jobID] + conditionValuewith ConnectionFactory.get() as conn:result = conn.cursor.execute(query, sqlParams)return (result == )", "docstring": "Change the value of 1 field in a job to 'newValue', but only if the\n current value matches 'curValue'. The 'fieldName' is the public name of\n the field (camelBack, not the lower_case_only form as stored in the DB).\n\n This method is used for example by HypersearcWorkers to update the\n engWorkerState field periodically. By qualifying on curValue, it insures\n that only 1 worker at a time is elected to perform the next scheduled\n periodic sweep of the models.\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID of the job record to modify\n fieldName: public field name of the field\n newValue: new value of the field to set\n curValue: current value to qualify against\n\n retval: True if we successfully modified the field\n False if curValue did not match", "id": "f17555:c1:m47"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobIncrementIntField(self, jobID, fieldName, increment=,useConnectionID=False):", "body": "dbFieldName = self._jobs.pubToDBNameDict[fieldName]with ConnectionFactory.get() as conn:query = ''''% (self.jobsTableName, dbFieldName, dbFieldName)sqlParams = [increment, jobID]if useConnectionID:query += ''sqlParams.append(self._connectionID)result = conn.cursor.execute(query, sqlParams)if result != :raise RuntimeError(\"\"\"\" % (dbFieldName, jobID, self._connectionID, result, query))", "docstring": "Incremet the value of 1 field in a job by increment. The 'fieldName' is\n the public name of the field (camelBack, not the lower_case_only form as\n stored in the DB).\n\n This method is used for example by HypersearcWorkers to update the\n engWorkerState field periodically. By qualifying on curValue, it insures\n that only 1 worker at a time is elected to perform the next scheduled\n periodic sweep of the models.\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID of the job record to modify\n fieldName: public field name of the field\n increment: increment is added to the current value of the field", "id": "f17555:c1:m48"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef jobUpdateResults(self, jobID, results):", "body": "with ConnectionFactory.get() as conn:query = '''''' % (self.jobsTableName,)conn.cursor.execute(query, [results, jobID])", "docstring": "Update the results string and last-update-time fields of a model.\n\n Parameters:\n ----------------------------------------------------------------\n jobID: job ID of model to modify\n results: new results (json dict string)", "id": "f17555:c1:m49"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef modelsClearAll(self):", "body": "self._logger.info('',self.modelsTableName)with ConnectionFactory.get() as conn:query = '' % (self.modelsTableName)conn.cursor.execute(query)", "docstring": "Delete all models from the models table\n\n Parameters:\n ----------------------------------------------------------------", "id": "f17555:c1:m50"} {"signature": "@logExceptions(_LOGGER)def modelInsertAndStart(self, jobID, params, paramsHash, particleHash=None):", "body": "if particleHash is None:particleHash = paramsHashparamsHash = self._normalizeHash(paramsHash)particleHash = self._normalizeHash(particleHash)def findExactMatchNoRetries(conn):return self._getOneMatchingRowNoRetries(self._models, conn,{'':jobID, '':paramsHash,'':particleHash},['', ''])@g_retrySQLdef findExactMatchWithRetries():with ConnectionFactory.get() as conn:return findExactMatchNoRetries(conn)row = findExactMatchWithRetries()if row is not None:return (row[], False)@g_retrySQLdef insertModelWithRetries():\"\"\"\"\"\"with ConnectionFactory.get() as conn:query = ''''''''''% (self.modelsTableName,)sqlParams = (jobID, params, self.STATUS_RUNNING, paramsHash,particleHash, self._connectionID)try:numRowsAffected = conn.cursor.execute(query, sqlParams)except Exception as e:if \"\" not in str(e):raiseself._logger.info('''',jobID, paramsHash.encode(''),particleHash.encode(''), e)else:if numRowsAffected == :conn.cursor.execute('')modelID = conn.cursor.fetchall()[][]if modelID != :return (modelID, True)else:self._logger.warn('''',jobID, paramsHash, particleHash)else:self._logger.error('''''',numRowsAffected, jobID, paramsHash, particleHash)row = findExactMatchNoRetries(conn)if row is not None:(modelID, connectionID) = rowreturn (modelID, connectionID == self._connectionID)query = ''''''''''% (self.modelsTableName,)sqlParams = [jobID, paramsHash, particleHash]numRowsFound = conn.cursor.execute(query, sqlParams)assert numRowsFound == , ('''') % (jobID, paramsHash, particleHash, numRowsFound)(modelID,) = conn.cursor.fetchall()[]return (modelID, False)return insertModelWithRetries()", "docstring": "Insert a new unique model (based on params) into the model table in the\n \"running\" state. This will return two things: whether or not the model was\n actually inserted (i.e. that set of params isn't already in the table) and\n the modelID chosen for that set of params. Even if the model was not\n inserted by this call (it was already there) the modelID of the one already\n inserted is returned.\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID of the job to add models for\n params: params for this model\n paramsHash hash of the params, generated by the worker\n particleHash hash of the particle info (for PSO). If not provided,\n then paramsHash will be used.\n\n retval: (modelID, wasInserted)\n modelID: the model ID for this set of params\n wasInserted: True if this call ended up inserting the\n new model. False if this set of params was already in\n the model table.", "id": "f17555:c1:m51"} {"signature": "@logExceptions(_LOGGER)def modelsInfo(self, modelIDs):", "body": "assert isinstance(modelIDs, self._SEQUENCE_TYPES), (\"\") % (type(modelIDs),)assert modelIDs, \"\"rows = self._getMatchingRowsWithRetries(self._models, dict(model_id=modelIDs),[self._models.pubToDBNameDict[f]for f in self._models.modelInfoNamedTuple._fields])results = [self._models.modelInfoNamedTuple._make(r) for r in rows]assert len(results) == len(modelIDs), \"\" % (set(modelIDs) - set(r.modelId for r in results))return results", "docstring": "Get ALL info for a set of models\n\n WARNING!!!: The order of the results are NOT necessarily in the same order as\n the order of the model IDs passed in!!!\n\n Parameters:\n ----------------------------------------------------------------\n modelIDs: list of model IDs\n retval: list of nametuples containing all the fields stored for each\n model.", "id": "f17555:c1:m52"} {"signature": "@logExceptions(_LOGGER)def modelsGetFields(self, modelIDs, fields):", "body": "assert len(fields) >= , ''isSequence = isinstance(modelIDs, self._SEQUENCE_TYPES)if isSequence:assert len(modelIDs) >=, ''else:modelIDs = [modelIDs]rows = self._getMatchingRowsWithRetries(self._models, dict(model_id=modelIDs),[''] + [self._models.pubToDBNameDict[f] for f in fields])if len(rows) < len(modelIDs):raise RuntimeError(\"\" % ((set(modelIDs) - set(r[] for r in rows)),))if not isSequence:return list(rows[][:])return [(r[], list(r[:])) for r in rows]", "docstring": "Fetch the values of 1 or more fields from a sequence of model records.\n Here, 'fields' is a list with the names of the fields to fetch. The names\n are the public names of the fields (camelBack, not the lower_case_only form\n as stored in the DB).\n\n WARNING!!!: The order of the results are NOT necessarily in the same order\n as the order of the model IDs passed in!!!\n\n\n Parameters:\n ----------------------------------------------------------------\n modelIDs: A single modelID or sequence of modelIDs\n fields: A list of fields to return\n\n Returns: If modelIDs is a sequence:\n a list of tuples->(modelID, [field1, field2,...])\n If modelIDs is a single modelID:\n a list of field values->[field1, field2,...]", "id": "f17555:c1:m53"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef modelsGetFieldsForJob(self, jobID, fields, ignoreKilled=False):", "body": "assert len(fields) >= , ''dbFields = [self._models.pubToDBNameDict[x] for x in fields]dbFieldsStr = ''.join(dbFields)query = ''''% (dbFieldsStr, self.modelsTableName)sqlParams = [jobID]if ignoreKilled:query += ''sqlParams.append(self.CMPL_REASON_KILLED)with ConnectionFactory.get() as conn:conn.cursor.execute(query, sqlParams)rows = conn.cursor.fetchall()if rows is None:self._logger.error(\"\"\"\",query, traceback.format_exc())return [(r[], list(r[:])) for r in rows]", "docstring": "Gets the specified fields for all the models for a single job. This is\n similar to modelsGetFields\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID for the models to be searched\n fields: A list of fields to return\n ignoreKilled: (True/False). If True, this will ignore models that\n have been killed\n\n Returns: a (possibly empty) list of tuples as follows\n [\n (model_id1, [field1, ..., fieldn]),\n (model_id2, [field1, ..., fieldn]),\n (model_id3, [field1, ..., fieldn])\n ...\n ]\n\n NOTE: since there is a window of time between a job getting inserted into\n jobs table and the job's worker(s) starting up and creating models, an\n empty-list result is one of the normal outcomes.", "id": "f17555:c1:m54"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef modelsGetFieldsForCheckpointed(self, jobID, fields):", "body": "assert len(fields) >= , \"\"with ConnectionFactory.get() as conn:dbFields = [self._models.pubToDBNameDict[f] for f in fields]dbFieldStr = \"\".join(dbFields)query = ''''.format(fields=dbFieldStr, models=self.modelsTableName)conn.cursor.execute(query, [jobID])rows = conn.cursor.fetchall()return [(r[], list(r[:])) for r in rows]", "docstring": "Gets fields from all models in a job that have been checkpointed. This is\nused to figure out whether or not a new model should be checkpointed.\n\nParameters:\n-----------------------------------------------------------------------\njobID: The jobID for the models to be searched\nfields: A list of fields to return\n\nReturns: a (possibly-empty) list of tuples as follows\n [\n (model_id1, [field1, ..., fieldn]),\n (model_id2, [field1, ..., fieldn]),\n (model_id3, [field1, ..., fieldn])\n ...\n ]", "id": "f17555:c1:m55"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef modelSetFields(self, modelID, fields, ignoreUnchanged = False):", "body": "assignmentExpressions = ''.join('' % (self._models.pubToDBNameDict[f],) for f in fields.keys())assignmentValues = list(fields.values())query = ''''% (self.modelsTableName, assignmentExpressions)sqlParams = assignmentValues + [modelID]with ConnectionFactory.get() as conn:numAffectedRows = conn.cursor.execute(query, sqlParams)self._logger.debug(\"\",numAffectedRows, query, sqlParams)if numAffectedRows != and not ignoreUnchanged:raise RuntimeError((\"\"\"\") % (fields, modelID, self._connectionID, numAffectedRows, query,sqlParams,))", "docstring": "Change the values of 1 or more fields in a model. Here, 'fields' is a\n dict with the name/value pairs to change. The names are the public names of\n the fields (camelBack, not the lower_case_only form as stored in the DB).\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID of the job record\n\n fields: dictionary of fields to change\n\n ignoreUnchanged: The default behavior is to throw a\n RuntimeError if no rows are affected. This could either be\n because:\n 1) Because there was no matching modelID\n 2) or if the data to update matched the data in the DB exactly.\n\n Set this parameter to True if you expect case 2 and wish to\n supress the error.", "id": "f17555:c1:m56"} {"signature": "@logExceptions(_LOGGER)def modelsGetParams(self, modelIDs):", "body": "assert isinstance(modelIDs, self._SEQUENCE_TYPES), (\"\") % (type(modelIDs),)assert len(modelIDs) >= , \"\"rows = self._getMatchingRowsWithRetries(self._models, {'' : modelIDs},[self._models.pubToDBNameDict[f]for f in self._models.getParamsNamedTuple._fields])assert len(rows) == len(modelIDs), \"\" % ((set(modelIDs) - set(r[] for r in rows)),)return [self._models.getParamsNamedTuple._make(r) for r in rows]", "docstring": "Get the params and paramsHash for a set of models.\n\n WARNING!!!: The order of the results are NOT necessarily in the same order as\n the order of the model IDs passed in!!!\n\n Parameters:\n ----------------------------------------------------------------\n modelIDs: list of model IDs\n retval: list of result namedtuples defined in\n ClientJobsDAO._models.getParamsNamedTuple. Each tuple\n contains: (modelId, params, engParamsHash)", "id": "f17555:c1:m57"} {"signature": "@logExceptions(_LOGGER)def modelsGetResultAndStatus(self, modelIDs):", "body": "assert isinstance(modelIDs, self._SEQUENCE_TYPES), (\"\") % type(modelIDs)assert len(modelIDs) >= , \"\"rows = self._getMatchingRowsWithRetries(self._models, {'' : modelIDs},[self._models.pubToDBNameDict[f]for f in self._models.getResultAndStatusNamedTuple._fields])assert len(rows) == len(modelIDs), \"\" % ((set(modelIDs) - set(r[] for r in rows)),)return [self._models.getResultAndStatusNamedTuple._make(r) for r in rows]", "docstring": "Get the results string and other status fields for a set of models.\n\n WARNING!!!: The order of the results are NOT necessarily in the same order\n as the order of the model IDs passed in!!!\n\n For each model, this returns a tuple containing:\n (modelID, results, status, updateCounter, numRecords, completionReason,\n completionMsg, engParamsHash\n\n Parameters:\n ----------------------------------------------------------------\n modelIDs: list of model IDs\n retval: list of result tuples. Each tuple contains:\n (modelID, results, status, updateCounter, numRecords,\n completionReason, completionMsg, engParamsHash)", "id": "f17555:c1:m58"} {"signature": "@logExceptions(_LOGGER)def modelsGetUpdateCounters(self, jobID):", "body": "rows = self._getMatchingRowsWithRetries(self._models, {'' : jobID},[self._models.pubToDBNameDict[f]for f in self._models.getUpdateCountersNamedTuple._fields])return [self._models.getUpdateCountersNamedTuple._make(r) for r in rows]", "docstring": "Return info on all of the models that are in already in the models\n table for a given job. For each model, this returns a tuple\n containing: (modelID, updateCounter).\n\n Note that we don't return the results for all models, since the results\n string could be quite large. The information we are returning is\n just 2 integer fields.\n\n Parameters:\n ----------------------------------------------------------------\n jobID: jobID to query\n retval: (possibly empty) list of tuples. Each tuple contains:\n (modelID, updateCounter)", "id": "f17555:c1:m59"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef modelUpdateResults(self, modelID, results=None, metricValue =None,numRecords=None):", "body": "assignmentExpressions = ['','']assignmentValues = []if results is not None:assignmentExpressions.append('')assignmentValues.append(results)if numRecords is not None:assignmentExpressions.append('')assignmentValues.append(numRecords)if metricValue is not None and (metricValue==metricValue):assignmentExpressions.append('')assignmentValues.append(float(metricValue))query = ''''% (self.modelsTableName, ''.join(assignmentExpressions))sqlParams = assignmentValues + [modelID, self._connectionID]with ConnectionFactory.get() as conn:numRowsAffected = conn.cursor.execute(query, sqlParams)if numRowsAffected != :raise InvalidConnectionException((\"\"\"\"\"\") % (modelID,self._connectionID, numRowsAffected,))", "docstring": "Update the results string, and/or num_records fields of\n a model. This will fail if the model does not currently belong to this\n client (connection_id doesn't match).\n\n Parameters:\n ----------------------------------------------------------------\n modelID: model ID of model to modify\n results: new results, or None to ignore\n metricValue: the value of the metric being optimized, or None to ignore\n numRecords: new numRecords, or None to ignore", "id": "f17555:c1:m60"} {"signature": "@logExceptions(_LOGGER)@g_retrySQLdef modelSetCompleted(self, modelID, completionReason, completionMsg,cpuTime=, useConnectionID=True):", "body": "if completionMsg is None:completionMsg = ''query = ''''''''''''''''% (self.modelsTableName,)sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg,cpuTime, modelID]if useConnectionID:query += \"\"sqlParams.append(self._connectionID)with ConnectionFactory.get() as conn:numRowsAffected = conn.cursor.execute(query, sqlParams)if numRowsAffected != :raise InvalidConnectionException((\"\"\"\"\"\") % (modelID, self._connectionID, numRowsAffected))", "docstring": "Mark a model as completed, with the given completionReason and\n completionMsg. This will fail if the model does not currently belong to this\n client (connection_id doesn't match).\n\n Parameters:\n ----------------------------------------------------------------\n modelID: model ID of model to modify\n completionReason: completionReason string\n completionMsg: completionMsg string\n cpuTime: amount of CPU time spent on this model\n useConnectionID: True if the connection id of the calling function\n must be the same as the connection that created the\n job. Set to True for hypersearch workers, which use\n this mechanism for orphaned model detection.", "id": "f17555:c1:m62"} {"signature": "@logExceptions(_LOGGER)def modelAdoptNextOrphan(self, jobId, maxUpdateInterval):", "body": "@g_retrySQLdef findCandidateModelWithRetries():modelID = Nonewith ConnectionFactory.get() as conn:query = ''''''''''''''% (self.modelsTableName,)sqlParams = [self.STATUS_RUNNING, jobId, maxUpdateInterval]numRows = conn.cursor.execute(query, sqlParams)rows = conn.cursor.fetchall()assert numRows <= , \"\" % numRowsif numRows == :(modelID,) = rows[]return modelID@g_retrySQLdef adoptModelWithRetries(modelID):adopted = Falsewith ConnectionFactory.get() as conn:query = ''''''''''''''''% (self.modelsTableName,)sqlParams = [self._connectionID, modelID, self.STATUS_RUNNING,maxUpdateInterval]numRowsAffected = conn.cursor.execute(query, sqlParams)assert numRowsAffected <= , '' % (numRowsAffected,)if numRowsAffected == :adopted = Trueelse:(status, connectionID) = self._getOneMatchingRowNoRetries(self._models, conn, {'':modelID},['', ''])adopted = (status == self.STATUS_RUNNING andconnectionID == self._connectionID)return adoptedadoptedModelID = Nonewhile True:modelID = findCandidateModelWithRetries()if modelID is None:breakif adoptModelWithRetries(modelID):adoptedModelID = modelIDbreakreturn adoptedModelID", "docstring": "Look through the models table for an orphaned model, which is a model\n that is not completed yet, whose _eng_last_update_time is more than\n maxUpdateInterval seconds ago.\n\n If one is found, change its _eng_worker_conn_id to the current worker's\n and return the model id.\n\n Parameters:\n ----------------------------------------------------------------\n retval: modelId of the model we adopted, or None if none found", "id": "f17555:c1:m63"} {"signature": "def estimateAnomalyLikelihoods(anomalyScores,averagingWindow=,skipRecords=,verbosity=):", "body": "if verbosity > :print(\"\")print(\"\", len(anomalyScores))print(\"\", skipRecords)print(\"\", anomalyScores[:min(, len(anomalyScores))])if len(anomalyScores) == :raise ValueError(\"\")aggRecordList, historicalValues, total = _anomalyScoreMovingAverage(anomalyScores,windowSize = averagingWindow,verbosity = verbosity)s = [r[] for r in aggRecordList]dataValues = numpy.array(s)if len(aggRecordList) <= skipRecords:distributionParams = nullDistribution(verbosity = verbosity)else:distributionParams = estimateNormal(dataValues[skipRecords:])s = [r[] for r in aggRecordList]if all([isinstance(r[], numbers.Number) for r in aggRecordList]):metricValues = numpy.array(s)metricDistribution = estimateNormal(metricValues[skipRecords:],performLowerBoundCheck=False)if metricDistribution[\"\"] < :distributionParams = nullDistribution(verbosity = verbosity)likelihoods = numpy.array(dataValues, dtype=float)for i, s in enumerate(dataValues):likelihoods[i] = tailProbability(s, distributionParams)filteredLikelihoods = numpy.array(_filterLikelihoods(likelihoods) )params = {\"\": distributionParams,\"\": {\"\": historicalValues,\"\": total,\"\": averagingWindow,},\"\":list(likelihoods[-min(averagingWindow, len(likelihoods)):]),}if verbosity > :print(\"\")print(params)print(\"\", len(likelihoods))print(\"\", (filteredLikelihoods[:min(, len(filteredLikelihoods))] ))print(\"\")return (filteredLikelihoods, aggRecordList, params)", "docstring": "Given a series of anomaly scores, compute the likelihood for each score. This\nfunction should be called once on a bunch of historical anomaly scores for an\ninitial estimate of the distribution. It should be called again every so often\n(say every 50 records) to update the estimate.\n\n:param anomalyScores: a list of records. Each record is a list with the\n following three elements: [timestamp, value, score]\n\n Example::\n\n [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]\n\n For best results, the list should be between 1000\n and 10,000 records\n:param averagingWindow: integer number of records to average over\n:param skipRecords: integer specifying number of records to skip when\n estimating distributions. If skip records are >=\n len(anomalyScores), a very broad distribution is returned\n that makes everything pretty likely.\n:param verbosity: integer controlling extent of printouts for debugging\n\n 0 = none\n 1 = occasional information\n 2 = print every record\n\n:returns: 3-tuple consisting of:\n\n - likelihoods\n\n numpy array of likelihoods, one for each aggregated point\n\n - avgRecordList\n\n list of averaged input records\n\n - params\n\n a small JSON dict that contains the state of the estimator", "id": "f17556:m0"} {"signature": "def updateAnomalyLikelihoods(anomalyScores,params,verbosity=):", "body": "if verbosity > :print(\"\")print(\"\", len(anomalyScores))print(\"\", anomalyScores[:min(, len(anomalyScores))])print(\"\", params)if len(anomalyScores) == :raise ValueError(\"\")if not isValidEstimatorParams(params):raise ValueError(\"\")if \"\" not in params:params[\"\"] = []historicalValues = params[\"\"][\"\"]total = params[\"\"][\"\"]windowSize = params[\"\"][\"\"]aggRecordList = numpy.zeros(len(anomalyScores), dtype=float)likelihoods = numpy.zeros(len(anomalyScores), dtype=float)for i, v in enumerate(anomalyScores):newAverage, historicalValues, total = (MovingAverage.compute(historicalValues, total, v[], windowSize))aggRecordList[i] = newAveragelikelihoods[i] = tailProbability(newAverage, params[\"\"])likelihoods2 = params[\"\"] + list(likelihoods)filteredLikelihoods = _filterLikelihoods(likelihoods2)likelihoods[:] = filteredLikelihoods[-len(likelihoods):]historicalLikelihoods = likelihoods2[-min(windowSize, len(likelihoods2)):]newParams = {\"\": params[\"\"],\"\": {\"\": historicalValues,\"\": total,\"\": windowSize,},\"\": historicalLikelihoods,}assert len(newParams[\"\"]) <= windowSizeif verbosity > :print(\"\", len(likelihoods))print(\"\", likelihoods[:min(, len(likelihoods))])print(\"\")return (likelihoods, aggRecordList, newParams)", "docstring": "Compute updated probabilities for anomalyScores using the given params.\n\n:param anomalyScores: a list of records. Each record is a list with the\n following three elements: [timestamp, value, score]\n\n Example::\n\n [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]\n\n:param params: the JSON dict returned by estimateAnomalyLikelihoods\n:param verbosity: integer controlling extent of printouts for debugging\n:type verbosity: int\n\n:returns: 3-tuple consisting of:\n\n - likelihoods\n\n numpy array of likelihoods, one for each aggregated point\n\n - avgRecordList\n\n list of averaged input records\n\n - params\n\n an updated JSON object containing the state of this metric.", "id": "f17556:m1"} {"signature": "def _filterLikelihoods(likelihoods,redThreshold=, yellowThreshold=):", "body": "redThreshold = - redThresholdyellowThreshold = - yellowThresholdfilteredLikelihoods = [likelihoods[]]for i, v in enumerate(likelihoods[:]):if v <= redThreshold:if likelihoods[i] > redThreshold:filteredLikelihoods.append(v)else:filteredLikelihoods.append(yellowThreshold)else:filteredLikelihoods.append(v)return filteredLikelihoods", "docstring": "Filter the list of raw (pre-filtered) likelihoods so that we only preserve\nsharp increases in likelihood. 'likelihoods' can be a numpy array of floats or\na list of floats.\n\n:returns: A new list of floats likelihoods containing the filtered values.", "id": "f17556:m2"} {"signature": "def _anomalyScoreMovingAverage(anomalyScores,windowSize=,verbosity=,):", "body": "historicalValues = []total = averagedRecordList = [] for record in anomalyScores:if not isinstance(record, (list, tuple)) or len(record) != :if verbosity >= :print(\"\", record)continueavg, historicalValues, total = (MovingAverage.compute(historicalValues, total, record[], windowSize))averagedRecordList.append( [record[], record[], avg] )if verbosity > :print(\"\", record)print(\"\", [record[], record[], avg])return averagedRecordList, historicalValues, total", "docstring": "Given a list of anomaly scores return a list of averaged records.\nanomalyScores is assumed to be a list of records of the form:\n [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]\n\nEach record in the returned list list contains:\n [datetime, value, averagedScore]\n\n*Note:* we only average the anomaly score.", "id": "f17556:m3"} {"signature": "def estimateNormal(sampleData, performLowerBoundCheck=True):", "body": "params = {\"\": \"\",\"\": numpy.mean(sampleData),\"\": numpy.var(sampleData),}if performLowerBoundCheck:if params[\"\"] < :params[\"\"] = if params[\"\"] < :params[\"\"] = if params[\"\"] > :params[\"\"] = math.sqrt(params[\"\"])else:params[\"\"] = return params", "docstring": ":param sampleData:\n:type sampleData: Numpy array.\n:param performLowerBoundCheck:\n:type performLowerBoundCheck: bool\n:returns: A dict containing the parameters of a normal distribution based on\n the ``sampleData``.", "id": "f17556:m4"} {"signature": "def nullDistribution(verbosity=):", "body": "if verbosity>:print(\"\")return {\"\": \"\",\"\": ,\"\": ,\"\": ,}", "docstring": ":param verbosity: integer controlling extent of printouts for debugging\n:type verbosity: int\n:returns: A distribution that is very broad and makes every anomaly score\n between 0 and 1 pretty likely.", "id": "f17556:m5"} {"signature": "def tailProbability(x, distributionParams):", "body": "if \"\" not in distributionParams or \"\" not in distributionParams:raise RuntimeError(\"\")if x < distributionParams[\"\"]:xp = * distributionParams[\"\"] - xreturn tailProbability(xp, distributionParams)z = (x - distributionParams[\"\"]) / distributionParams[\"\"]return * math.erfc(z/)", "docstring": "Given the normal distribution specified by the mean and standard deviation\nin distributionParams, return the probability of getting samples further\nfrom the mean. For values above the mean, this is the probability of getting\nsamples > x and for values below the mean, the probability of getting\nsamples < x. This is the Q-function: the tail probability of the normal distribution.\n\n:param distributionParams: dict with 'mean' and 'stdev' of the distribution", "id": "f17556:m6"} {"signature": "def isValidEstimatorParams(p):", "body": "if not isinstance(p, dict):return Falseif \"\" not in p:return Falseif \"\" not in p:return Falsedist = p[\"\"]if not (\"\" in dist and \"\" in distand \"\" in dist and \"\" in dist):return Falsereturn True", "docstring": ":returns: ``True`` if ``p`` is a valid estimator params as might be returned\n by ``estimateAnomalyLikelihoods()`` or ``updateAnomalyLikelihoods``,\n ``False`` otherwise. Just does some basic validation.", "id": "f17556:m7"} {"signature": "def __init__(self,claLearningPeriod=None,learningPeriod=,estimationSamples=,historicWindowSize=,reestimationPeriod=):", "body": "if historicWindowSize < estimationSamples:raise ValueError(\"\")self._iteration = self._historicalScores = collections.deque(maxlen=historicWindowSize)self._distribution = Noneif claLearningPeriod != None:print(\"\")self._learningPeriod = claLearningPeriodelse:self._learningPeriod = learningPeriodself._probationaryPeriod = self._learningPeriod + estimationSamplesself._reestimationPeriod = reestimationPeriod", "docstring": "NOTE: Anomaly likelihood scores are reported at a flat 0.5 for\nlearningPeriod + estimationSamples iterations.\n\nclaLearningPeriod and learningPeriod are specifying the same variable,\nalthough claLearningPeriod is a deprecated name for it.\n\n:param learningPeriod: (claLearningPeriod: deprecated) - (int) the number of\n iterations required for the algorithm to learn the basic patterns in the\n dataset and for the anomaly score to 'settle down'. The default is based\n on empirical observations but in reality this could be larger for more\n complex domains. The downside if this is too large is that real anomalies\n might get ignored and not flagged.\n\n:param estimationSamples: (int) the number of reasonable anomaly scores\n required for the initial estimate of the Gaussian. The default of 100\n records is reasonable - we just need sufficient samples to get a decent\n estimate for the Gaussian. It's unlikely you will need to tune this since\n the Gaussian is re-estimated every 10 iterations by default.\n\n:param historicWindowSize: (int) size of sliding window of historical\n data points to maintain for periodic reestimation of the Gaussian. Note:\n the default of 8640 is based on a month's worth of history at 5-minute\n intervals.\n\n:param reestimationPeriod: (int) how often we re-estimate the Gaussian\n distribution. The ideal is to re-estimate every iteration but this is a\n performance hit. In general the system is not very sensitive to this\n number as long as it is small relative to the total number of records\n processed.", "id": "f17556:c0:m0"} {"signature": "@staticmethoddef computeLogLikelihood(likelihood):", "body": "return math.log( - likelihood) / -", "docstring": "Compute a log scale representation of the likelihood value. Since the\nlikelihood computations return low probabilities that often go into four 9's\nor five 9's, a log value is more useful for visualization, thresholding,\netc.", "id": "f17556:c0:m3"} {"signature": "@staticmethoddef _calcSkipRecords(numIngested, windowSize, learningPeriod):", "body": "numShiftedOut = max(, numIngested - windowSize)return min(numIngested, max(, learningPeriod - numShiftedOut))", "docstring": "Return the value of skipRecords for passing to estimateAnomalyLikelihoods\n\n If `windowSize` is very large (bigger than the amount of data) then this\n could just return `learningPeriod`. But when some values have fallen out of\n the historical sliding window of anomaly records, then we have to take those\n into account as well so we return the `learningPeriod` minus the number\n shifted out.\n\n :param numIngested - (int) number of data points that have been added to the\n sliding window of historical data points.\n :param windowSize - (int) size of sliding window of historical data points.\n :param learningPeriod - (int) the number of iterations required for the\n algorithm to learn the basic patterns in the dataset and for the anomaly\n score to 'settle down'.", "id": "f17556:c0:m4"} {"signature": "@classmethoddef read(cls, proto):", "body": "anomalyLikelihood = object.__new__(cls)anomalyLikelihood._iteration = proto.iterationanomalyLikelihood._historicalScores = collections.deque(maxlen=proto.historicWindowSize)for i, score in enumerate(proto.historicalScores):anomalyLikelihood._historicalScores.append((i, score.value,score.anomalyScore))if proto.distribution.name: anomalyLikelihood._distribution = dict()anomalyLikelihood._distribution[''] = dict()anomalyLikelihood._distribution[''][\"\"] = proto.distribution.nameanomalyLikelihood._distribution[''][\"\"] = proto.distribution.meananomalyLikelihood._distribution[''][\"\"] = proto.distribution.varianceanomalyLikelihood._distribution[''][\"\"] = proto.distribution.stdevanomalyLikelihood._distribution[\"\"] = {}anomalyLikelihood._distribution[\"\"][\"\"] = proto.distribution.movingAverage.windowSizeanomalyLikelihood._distribution[\"\"][\"\"] = []for value in proto.distribution.movingAverage.historicalValues:anomalyLikelihood._distribution[\"\"][\"\"].append(value)anomalyLikelihood._distribution[\"\"][\"\"] = proto.distribution.movingAverage.totalanomalyLikelihood._distribution[\"\"] = []for likelihood in proto.distribution.historicalLikelihoods:anomalyLikelihood._distribution[\"\"].append(likelihood)else:anomalyLikelihood._distribution = NoneanomalyLikelihood._probationaryPeriod = proto.probationaryPeriodanomalyLikelihood._learningPeriod = proto.learningPeriodanomalyLikelihood._reestimationPeriod = proto.reestimationPeriodreturn anomalyLikelihood", "docstring": "capnp deserialization method for the anomaly likelihood object\n\n :param proto: (Object) capnp proto object specified in\n nupic.regions.anomaly_likelihood.capnp\n\n :returns: (Object) the deserialized AnomalyLikelihood object", "id": "f17556:c0:m6"} {"signature": "def write(self, proto):", "body": "proto.iteration = self._iterationpHistScores = proto.init('', len(self._historicalScores))for i, score in enumerate(list(self._historicalScores)):_, value, anomalyScore = scorerecord = pHistScores[i]record.value = float(value)record.anomalyScore = float(anomalyScore)if self._distribution:proto.distribution.name = self._distribution[\"\"][\"\"]proto.distribution.mean = float(self._distribution[\"\"][\"\"])proto.distribution.variance = float(self._distribution[\"\"][\"\"])proto.distribution.stdev = float(self._distribution[\"\"][\"\"])proto.distribution.movingAverage.windowSize = float(self._distribution[\"\"][\"\"])historicalValues = self._distribution[\"\"][\"\"]pHistValues = proto.distribution.movingAverage.init(\"\", len(historicalValues))for i, value in enumerate(historicalValues):pHistValues[i] = float(value)proto.distribution.movingAverage.total = float(self._distribution[\"\"][\"\"])historicalLikelihoods = self._distribution[\"\"]pHistLikelihoods = proto.distribution.init(\"\",len(historicalLikelihoods))for i, likelihood in enumerate(historicalLikelihoods):pHistLikelihoods[i] = float(likelihood)proto.probationaryPeriod = self._probationaryPeriodproto.learningPeriod = self._learningPeriodproto.reestimationPeriod = self._reestimationPeriodproto.historicWindowSize = self._historicalScores.maxlen", "docstring": "capnp serialization method for the anomaly likelihood object\n\n :param proto: (Object) capnp proto object specified in\n nupic.regions.anomaly_likelihood.capnp", "id": "f17556:c0:m7"} {"signature": "def anomalyProbability(self, value, anomalyScore, timestamp=None):", "body": "if timestamp is None:timestamp = self._iterationdataPoint = (timestamp, value, anomalyScore)if self._iteration < self._probationaryPeriod:likelihood = else:if ( (self._distribution is None) or(self._iteration % self._reestimationPeriod == ) ):numSkipRecords = self._calcSkipRecords(numIngested=self._iteration,windowSize=self._historicalScores.maxlen,learningPeriod=self._learningPeriod)_, _, self._distribution = estimateAnomalyLikelihoods(self._historicalScores,skipRecords=numSkipRecords)likelihoods, _, self._distribution = updateAnomalyLikelihoods([dataPoint],self._distribution)likelihood = - likelihoods[]self._historicalScores.append(dataPoint)self._iteration += return likelihood", "docstring": "Compute the probability that the current value plus anomaly score represents\nan anomaly given the historical distribution of anomaly scores. The closer\nthe number is to 1, the higher the chance it is an anomaly.\n\n:param value: the current metric (\"raw\") input value, eg. \"orange\", or\n '21.2' (deg. Celsius), ...\n:param anomalyScore: the current anomaly score\n:param timestamp: [optional] timestamp of the ocurrence,\n default (None) results in using iteration step.\n:returns: the anomalyLikelihood for this record.", "id": "f17556:c0:m8"} {"signature": "def setRandomSeed(seed):", "body": "random.seed(seed)numpy.random.seed(seed)", "docstring": "Set the random seeds. Helpful to make unit tests repeatable", "id": "f17557:m0"} {"signature": "def addNoise(input, noise=, doForeground=True, doBackground=True):", "body": "if doForeground and doBackground:return numpy.abs(input - (numpy.random.random(input.shape) < noise))else:if doForeground:return numpy.logical_and(input, numpy.random.random(input.shape) > noise)if doBackground:return numpy.logical_or(input, numpy.random.random(input.shape) < noise)return input", "docstring": "Add noise to the given input.\n\nParameters:\n-----------------------------------------------\ninput: the input to add noise to\nnoise: how much noise to add\ndoForeground: If true, turn off some of the 1 bits in the input\ndoBackground: If true, turn on some of the 0 bits in the input", "id": "f17557:m1"} {"signature": "def generateCoincMatrix(nCoinc=, length=, activity=):", "body": "coincMatrix0 = SM32(int(nCoinc), int(length))theOnes = numpy.array([] * activity, dtype=numpy.float32)for rowIdx in range(nCoinc):coinc = numpy.array(random.sample(range(length),activity), dtype=numpy.uint32)coinc.sort()coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)coincMatrix = SM32(int(nCoinc), int(length))coincMatrix.initializeWithFixedNNZR(activity)return coincMatrix0", "docstring": "Generate a coincidence matrix. This is used to generate random inputs to the\ntemporal learner and to compare the predicted output against.\n\nIt generates a matrix of nCoinc rows, each row has length 'length' and has\na total of 'activity' bits on.\n\nParameters:\n-----------------------------------------------\nnCoinc: the number of rows to generate\nlength: the length of each row\nactivity: the number of ones to put into each row.", "id": "f17557:m2"} {"signature": "def generateVectors(numVectors=, length=, activity=):", "body": "vectors = []coinc = numpy.zeros(length, dtype='')indexList = list(range(length))for i in range(numVectors):coinc[:] = coinc[random.sample(indexList, activity)] = vectors.append(coinc.copy())return vectors", "docstring": "Generate a list of random sparse distributed vectors. This is used to generate\ntraining vectors to the spatial or temporal learner and to compare the predicted\noutput against.\n\nIt generates a list of 'numVectors' elements, each element has length 'length'\nand has a total of 'activity' bits on.\n\nParameters:\n-----------------------------------------------\nnumVectors: the number of vectors to generate\nlength: the length of each row\nactivity: the number of ones to put into each row.", "id": "f17557:m3"} {"signature": "def generateSimpleSequences(nCoinc=, seqLength=[,,], nSeq=):", "body": "coincList = list(range(nCoinc))seqList = []for i in range(nSeq):if max(seqLength) <= nCoinc:seqList.append(random.sample(coincList, random.choice(seqLength)))else:len = random.choice(seqLength)seq = []for x in range(len):seq.append(random.choice(coincList))seqList.append(seq)return seqList", "docstring": "Generate a set of simple sequences. The elements of the sequences will be\nintegers from 0 to 'nCoinc'-1. The length of each sequence will be\nrandomly chosen from the 'seqLength' list.\n\nParameters:\n-----------------------------------------------\nnCoinc: the number of elements available to use in the sequences\nseqLength: a list of possible sequence lengths. The length of each\n sequence will be randomly chosen from here.\nnSeq: The number of sequences to generate\n\nretval: a list of sequences. Each sequence is itself a list\n containing the coincidence indices for that sequence.", "id": "f17557:m4"} {"signature": "def generateHubSequences(nCoinc=, hubs = [,], seqLength=[,,], nSeq=):", "body": "coincList = list(range(nCoinc))for hub in hubs:coincList.remove(hub)seqList = []for i in range(nSeq):length = random.choice(seqLength)-seq = random.sample(coincList,length)seq.insert(length//, random.choice(hubs))seqList.append(seq)return seqList", "docstring": "Generate a set of hub sequences. These are sequences which contain a hub\nelement in the middle. The elements of the sequences will be integers\nfrom 0 to 'nCoinc'-1. The hub elements will only appear in the middle of\neach sequence. The length of each sequence will be randomly chosen from the\n'seqLength' list.\n\nParameters:\n-----------------------------------------------\nnCoinc: the number of elements available to use in the sequences\nhubs: which of the elements will be used as hubs.\nseqLength: a list of possible sequence lengths. The length of each\n sequence will be randomly chosen from here.\nnSeq: The number of sequences to generate\n\nretval: a list of sequences. Each sequence is itself a list\n containing the coincidence indices for that sequence.", "id": "f17557:m5"} {"signature": "def genTestSeqsForLookback(nPatterns=, patternLen=, patternActivity=,seqLength=[,,], nSequences=):", "body": "patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen,activity=patternActivity)similarity = []for i in range(nPatterns):similarity.append(patterns.rightVecProd(patterns.getRow(i)))similarity = numpy.array(similarity, dtype='')print(similarity)seqList1 = generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength,nSeq=nSequences)seqList2 = copy.deepcopy(seqList1)for i in range(,len(seqList2)):seqList2[i][] = random.randint(,nPatterns-)return (seqList1, seqList2, patterns)", "docstring": "Generate two sets of sequences. The first set of sequences is used to train\nthe sequence learner till it fills up capacity. The second set is then used\nto further train the system to test its generalization capability using the\none step look back idea. The second set of sequences are generated by modifying\nthe first set\n\nParameters:\n-----------------------------------------------\nnPatterns: the number of patterns to use in the sequences.\npatternLen: The number of elements in each pattern\npatternActivity: The number of elements that should be active in\n each pattern\nseqLength: a list of possible sequence lengths. The length of each\n sequence will be randomly chosen from here.\nnSequences: The number of simple sequences in the first set\n\nretval: (seqList1, seqList2, patterns)\n seqList1, seqList2: a list of sequences. Each sequence is itself a list\n containing the input pattern indices for that sequence.\n patterns: the input patterns used in the seqList.", "id": "f17557:m6"} {"signature": "def generateSimpleCoincMatrix(nCoinc=, length=, activity=):", "body": "assert nCoinc*activity<=length, \"\"coincMatrix = SM32(, length)coinc = numpy.zeros(length, dtype='')for i in range(nCoinc):coinc[:] = coinc[i*activity:(i+)*activity] = coincMatrix.addRow(coinc)return coincMatrix", "docstring": "Generate a non overlapping coincidence matrix. This is used to generate random\ninputs to the temporal learner and to compare the predicted output against.\n\nIt generates a matrix of nCoinc rows, each row has length 'length' and has\na total of 'activity' bits on.\n\nParameters:\n-----------------------------------------------\nnCoinc: the number of rows to generate\nlength: the length of each row\nactivity: the number of ones to put into each row.", "id": "f17557:m7"} {"signature": "def generateSequences(nPatterns=, patternLen=, patternActivity=,hubs=[,], seqLength=[,,],nSimpleSequences=, nHubSequences=):", "body": "patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen,activity=patternActivity)seqList = generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength,nSeq=nSimpleSequences) +generateHubSequences(nCoinc=nPatterns, hubs=hubs, seqLength=seqLength,nSeq=nHubSequences)return (seqList, patterns)", "docstring": "Generate a set of simple and hub sequences. A simple sequence contains\na randomly chosen set of elements from 0 to 'nCoinc-1'. A hub sequence\nalways contains a hub element in the middle of it.\n\nParameters:\n-----------------------------------------------\nnPatterns: the number of patterns to use in the sequences.\npatternLen: The number of elements in each pattern\npatternActivity: The number of elements that should be active in\n each pattern\nhubs: which of the elements will be used as hubs.\nseqLength: a list of possible sequence lengths. The length of each\n sequence will be randomly chosen from here.\nnSimpleSequences: The number of simple sequences to generate\nnHubSequences: The number of hub sequences to generate\n\nretval: (seqList, patterns)\n seqList: a list of sequences. Each sequence is itself a list\n containing the input pattern indices for that sequence.\n patterns: the input patterns used in the seqList.", "id": "f17557:m8"} {"signature": "def generateL2Sequences(nL1Patterns=, l1Hubs=[,], l1SeqLength=[,,],nL1SimpleSequences=, nL1HubSequences=,l1Pooling=, perfectStability=False, spHysteresisFactor=,patternLen=, patternActivity=):", "body": "l1SeqList = generateSimpleSequences(nCoinc=nL1Patterns, seqLength=l1SeqLength,nSeq=nL1SimpleSequences) +generateHubSequences(nCoinc=nL1Patterns, hubs=l1Hubs,seqLength=l1SeqLength, nSeq=nL1HubSequences)spOutput = generateSlowSPOutput(seqListBelow = l1SeqList,poolingTimeBelow=l1Pooling, outputWidth=patternLen,activity=patternActivity, perfectStability=perfectStability,spHysteresisFactor=spHysteresisFactor)outSeq = NoneoutSeqList = []outPatterns = SM32(, patternLen)for pattern in spOutput:if pattern.sum() == :if outSeq is not None:outSeqList.append(outSeq)outSeq = []continuepatternIdx = Noneif outPatterns.nRows() > :matches = outPatterns.rightVecSumAtNZ(pattern)outCoinc = matches.argmax().astype('')numOnes = pattern.sum()if matches[outCoinc] == numOnesand outPatterns.getRow(int(outCoinc)).sum() == numOnes:patternIdx = outCoincif patternIdx is None:outPatterns.addRow(pattern)patternIdx = outPatterns.nRows() - outSeq.append(patternIdx)if outSeq is not None:outSeqList.append(outSeq)return (outSeqList, outPatterns)", "docstring": "Generate the simulated output from a spatial pooler that's sitting\non top of another spatial pooler / temporal memory pair. The average on-time\nof the outputs from the simulated TM is given by the l1Pooling argument.\n\nIn this routine, L1 refers to the first spatial and temporal memory and L2\nrefers to the spatial pooler above that.\n\nParameters:\n-----------------------------------------------\nnL1Patterns: the number of patterns to use in the L1 sequences.\nl1Hubs: which of the elements will be used as hubs.\nl1SeqLength: a list of possible sequence lengths. The length of each\n sequence will be randomly chosen from here.\nnL1SimpleSequences: The number of simple sequences to generate for L1\nnL1HubSequences: The number of hub sequences to generate for L1\nl1Pooling: The number of time steps to pool over in the L1 temporal\n pooler\nperfectStability: If true, then the input patterns represented by the\n sequences generated will have perfect stability over\n l1Pooling time steps. This is the best case ideal input\n to a TM. In actual situations, with an actual SP\n providing input, the stability will always be less than\n this.\nspHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler.\n Only used when perfectStability is False\npatternLen: The number of elements in each pattern output by L2\npatternActivity: The number of elements that should be active in\n each pattern\n\n@retval: (seqList, patterns)\n seqList: a list of sequences output from L2. Each sequence is\n itself a list containing the input pattern indices for that\n sequence.\n patterns: the input patterns used in the L2 seqList.", "id": "f17557:m9"} {"signature": "def vectorsFromSeqList(seqList, patternMatrix):", "body": "totalLen = for seq in seqList:totalLen += len(seq)vectors = numpy.zeros((totalLen, patternMatrix.shape[]), dtype='')vecOffset = for seq in seqList:seq = numpy.array(seq, dtype='')for idx,coinc in enumerate(seq):vectors[vecOffset] = patternMatrix.getRow(int(coinc))vecOffset += return vectors", "docstring": "Convert a list of sequences of pattern indices, and a pattern lookup table\n into a an array of patterns\n\nParameters:\n-----------------------------------------------\nseq: the sequence, given as indices into the patternMatrix\npatternMatrix: a SparseMatrix contaning the possible patterns used in\n the sequence.", "id": "f17557:m10"} {"signature": "def sameTMParams(tp1, tp2):", "body": "result = Truefor param in [\"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\",\"\", \"\", \"\",\"\", \"\",\"\", \"\", \"\"]:if getattr(tp1, param) != getattr(tp2,param):print(param,\"\")print(getattr(tp1, param), \"\", getattr(tp2,param))result = Falsereturn result", "docstring": "Given two TM instances, see if any parameters are different.", "id": "f17557:m11"} {"signature": "def sameSynapse(syn, synapses):", "body": "for s in synapses:if (s[]==syn[]) and (s[]==syn[]) and (abs(s[]-syn[]) <= ):return Truereturn False", "docstring": "Given a synapse and a list of synapses, check whether this synapse\n exist in the list. A synapse is represented as [col, cell, permanence].\n A synapse matches if col and cell are identical and the permanence value is\n within 0.001.", "id": "f17557:m12"} {"signature": "def sameSegment(seg1, seg2):", "body": "result = Truefor field in [, , , , , ]:if abs(seg1[][field] - seg2[][field]) > :result = Falseif len(seg1[:]) != len(seg2[:]):result = Falsefor syn in seg2[:]:if syn[] <= :print(\"\")result = Falseif result == True:for syn in seg1[:]:if syn[] <= :print(\"\")result = Falseres = sameSynapse(syn, seg2[:])if res == False:result = Falsereturn result", "docstring": "Return True if seg1 and seg2 are identical, ignoring order of synapses", "id": "f17557:m13"} {"signature": "def tmDiff(tm1, tm2, verbosity = , relaxSegmentTests =True):", "body": "if sameTMParams(tm1, tm2) == False:print(\"\")return Falseresult = Trueif (tm1.activeState[''] != tm2.activeState['']).any():print('', numpy.where(tm1.activeState[''] != tm2.activeState['']))result = Falseif (tm1.predictedState[''] - tm2.predictedState['']).any():print('', numpy.where(tm1.predictedState[''] != tm2.predictedState['']))result = Falseif tm1.getNumSegments() != tm2.getNumSegments():print(\"\", tm1.getNumSegments(), tm2.getNumSegments())result = Falseif tm1.getNumSynapses() != tm2.getNumSynapses():print(\"\", tm1.getNumSynapses(), tm2.getNumSynapses())tm1.printCells()tm2.printCells()result = Falsefor c in range(tm1.numberOfCols):for i in range(tm2.cellsPerColumn):if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i):print(\"\",c,i, end='')print(tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i))result = Falseif result == True and not relaxSegmentTests:for c in range(tm1.numberOfCols):for i in range(tm2.cellsPerColumn):nSegs = tm1.getNumSegmentsInCell(c, i)for segIdx in range(nSegs):tm1seg = tm1.getSegmentOnCell(c, i, segIdx)res = Falsefor tm2segIdx in range(nSegs):tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx)if sameSegment(tm1seg, tm2seg) == True:res = Truebreakif res == False:print(\"\",c,i)if verbosity >= :print(\"\")tm1.printCell(c, i)print(\"\")tm2.printCell(c, i)result = Falseif result == True and (verbosity > ):print(\"\")return result", "docstring": "Given two TM instances, list the difference between them and returns False\nif there is a difference. This function checks the major parameters. If this\npasses (and checkLearn is true) it checks the number of segments on\neach cell. If this passes, checks each synapse on each segment.\nWhen comparing C++ and Py, the segments are usually in different orders in the\ncells. tmDiff ignores segment order when comparing TM's.", "id": "f17557:m14"} {"signature": "def tmDiff2(tm1, tm2, verbosity = , relaxSegmentTests =True,checkLearn = True, checkStates = True):", "body": "if sameTMParams(tm1, tm2) == False:print(\"\")return Falsetm1Label = \"\" % tm1.__class__.__name__tm2Label = \"\" % tm2.__class__.__name__result = Trueif checkStates:if (tm1.infActiveState[''] != tm2.infActiveState['']).any():print('', numpy.where(tm1.infActiveState[''] != tm2.infActiveState['']))result = Falseif (tm1.infPredictedState[''] - tm2.infPredictedState['']).any():print('', numpy.where(tm1.infPredictedState[''] != tm2.infPredictedState['']))result = Falseif checkLearn and (tm1.lrnActiveState[''] - tm2.lrnActiveState['']).any():print('', numpy.where(tm1.lrnActiveState[''] != tm2.lrnActiveState['']))result = Falseif checkLearn and (tm1.lrnPredictedState[''] - tm2.lrnPredictedState['']).any():print('', numpy.where(tm1.lrnPredictedState[''] != tm2.lrnPredictedState['']))result = Falseif checkLearn and abs(tm1.getAvgLearnedSeqLength() - tm2.getAvgLearnedSeqLength()) > :print(\"\", end='')print(tm1.getAvgLearnedSeqLength(), \"\", tm2.getAvgLearnedSeqLength())result = Falseif tm1.getNumSegments() != tm2.getNumSegments():print(\"\", tm1.getNumSegments(), tm2.getNumSegments())result = Falseif tm1.getNumSynapses() != tm2.getNumSynapses():print(\"\", tm1.getNumSynapses(), tm2.getNumSynapses())if verbosity >= :print(\"\" % tm1Label, end='')tm1.printCells()print(\"\" % tm2Label, end='')tm2.printCells()for c in range(tm1.numberOfCols):for i in range(tm2.cellsPerColumn):if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i):print(\"\",c,i, end='')print(tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i))result = Falseif result == True and not relaxSegmentTests and checkLearn:for c in range(tm1.numberOfCols):for i in range(tm2.cellsPerColumn):nSegs = tm1.getNumSegmentsInCell(c, i)for segIdx in range(nSegs):tm1seg = tm1.getSegmentOnCell(c, i, segIdx)res = Falsefor tm2segIdx in range(nSegs):tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx)if sameSegment(tm1seg, tm2seg) == True:res = Truebreakif res == False:print(\"\",c,i)result = Falseif verbosity >= :print(\"\" % tm1Label, end='')tm1.printCell(c, i)print(\"\" % tm2Label, end='')tm2.printCell(c, i)if result == True and (verbosity > ):print(\"\")return result", "docstring": "Given two TM instances, list the difference between them and returns False\nif there is a difference. This function checks the major parameters. If this\npasses (and checkLearn is true) it checks the number of segments on each cell.\nIf this passes, checks each synapse on each segment.\nWhen comparing C++ and Py, the segments are usually in different orders in the\ncells. tmDiff ignores segment order when comparing TM's.\n\nIf checkLearn is True, will check learn states as well as all the segments\n\nIf checkStates is True, will check the various state arrays", "id": "f17557:m15"} {"signature": "def spDiff(SP1,SP2):", "body": "if(len(SP1._masterConnectedM)!=len(SP2._masterConnectedM)):print(\"\")return Falseif(len(SP1._masterPotentialM)!=len(SP2._masterPotentialM)):print(\"\")return Falseif(len(SP1._masterPermanenceM)!=len(SP2._masterPermanenceM)):print(\"\")return Falsefor i in range(,len(SP1._masterConnectedM)):connected1 = SP1._masterConnectedM[i]connected2 = SP2._masterConnectedM[i]if(connected1!=connected2):print(\"\" % (i))return Falsepermanences1 = SP1._masterPermanenceM[i];permanences2 = SP2._masterPermanenceM[i];if(permanences1!=permanences2):print(\"\" % (i))return Falsepotential1 = SP1._masterPotentialM[i];potential2 = SP2._masterPotentialM[i];if(potential1!=potential2):print(\"\" % (i))return Falseif(not numpy.array_equal(SP1._firingBoostFactors,SP2._firingBoostFactors)):print(\"\")return Falseif(not numpy.array_equal(SP1._dutyCycleAfterInh,SP2._dutyCycleAfterInh)):print(\"\")return Falseif(not numpy.array_equal(SP1._dutyCycleBeforeInh,SP2._dutyCycleBeforeInh)):print(\"\")return Falseprint(\"\")return True", "docstring": "Function that compares two spatial pooler instances. Compares the\nstatic variables between the two poolers to make sure that they are equivalent.\n\nParameters\n-----------------------------------------\nSP1 first spatial pooler to be compared\n\nSP2 second spatial pooler to be compared\n\nTo establish equality, this function does the following:\n\n1.Compares the connected synapse matrices for each coincidence\n\n2.Compare the potential synapse matrices for each coincidence\n\n3.Compare the permanence matrices for each coincidence\n\n4.Compare the firing boosts between the two poolers.\n\n5.Compare the duty cycles before and after inhibition for both poolers", "id": "f17557:m16"} {"signature": "def removeSeqStarts(vectors, resets, numSteps=):", "body": "if numSteps == :return vectorsresetIndices = resets.nonzero()[]removeRows = resetIndicesfor i in range(numSteps-):removeRows = numpy.hstack((removeRows, resetIndices+i+))return numpy.delete(vectors, removeRows, axis=)", "docstring": "Convert a list of sequences of pattern indices, and a pattern lookup table\n into a an array of patterns\n\nParameters:\n-----------------------------------------------\nvectors: the data vectors. Row 0 contains the outputs from time\n step 0, row 1 from time step 1, etc.\nresets: the reset signal. This is a vector of booleans\n the same length as the number of rows in 'vectors'. It\n has a 1 where a sequence started and a 0 otherwise. The\n first 'numSteps' rows of 'vectors' of each sequence will\n not be included in the return result.\nnumSteps Number of samples to remove from the start of each sequence\n\nretval: copy of vectors, with the first 'numSteps' samples at the\n start of each sequence removed.", "id": "f17557:m17"} {"signature": "def _accumulateFrequencyCounts(values, freqCounts=None):", "body": "values = numpy.array(values)numEntries = values.max() + if freqCounts is not None:numEntries = max(numEntries, freqCounts.size)if freqCounts is not None:if freqCounts.size != numEntries:newCounts = numpy.zeros(numEntries, dtype='')newCounts[:freqCounts.size] = freqCountselse:newCounts = freqCountselse:newCounts = numpy.zeros(numEntries, dtype='')for v in values:newCounts[v] += return newCounts", "docstring": "Accumulate a list of values 'values' into the frequency counts 'freqCounts',\nand return the updated frequency counts\n\nFor example, if values contained the following: [1,1,3,5,1,3,5], and the initial\nfreqCounts was None, then the return value would be:\n[0,3,0,2,0,2]\nwhich corresponds to how many of each value we saw in the input, i.e. there\nwere 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's.\n\nIf freqCounts is not None, the values will be added to the existing counts and\nthe length of the frequency Counts will be automatically extended as necessary\n\nParameters:\n-----------------------------------------------\nvalues: The values to accumulate into the frequency counts\nfreqCounts: Accumulated frequency counts so far, or none", "id": "f17557:m18"} {"signature": "def _listOfOnTimesInVec(vector):", "body": "durations = []numOnTimes = totalOnTime = nonzeros = numpy.array(vector).nonzero()[]if len(nonzeros) == :return (, , [])if len(nonzeros) == :return (, , [])prev = nonzeros[]onTime = endIdx = nonzeros[-]for idx in nonzeros[:]:if idx != prev+:totalOnTime += onTimenumOnTimes += durations.append(onTime)onTime = else:onTime += prev = idxtotalOnTime += onTimenumOnTimes += durations.append(onTime)return (totalOnTime, numOnTimes, durations)", "docstring": "Returns 3 things for a vector:\n * the total on time\n * the number of runs\n * a list of the durations of each run.\n\nParameters:\n-----------------------------------------------\ninput stream: 11100000001100000000011111100000\nreturn value: (11, 3, [3, 2, 6])", "id": "f17557:m19"} {"signature": "def _fillInOnTimes(vector, durations):", "body": "nonzeros = numpy.array(vector).nonzero()[]if len(nonzeros) == :returnif len(nonzeros) == :durations[nonzeros[]] = returnprev = nonzeros[]onTime = onStartIdx = prevendIdx = nonzeros[-]for idx in nonzeros[:]:if idx != prev+:durations[onStartIdx:onStartIdx+onTime] = list(range(,onTime+))onTime = onStartIdx = idxelse:onTime += prev = idxdurations[onStartIdx:onStartIdx+onTime] = list(range(,onTime+))", "docstring": "Helper function used by averageOnTimePerTimestep. 'durations' is a vector\nwhich must be the same len as vector. For each \"on\" in vector, it fills in\nthe corresponding element of duration with the duration of that \"on\" signal\nup until that time\n\nParameters:\n-----------------------------------------------\nvector: vector of output values over time\ndurations: vector same length as 'vector', initialized to 0's.\n This is filled in with the durations of each 'on\" signal.\n\nExample:\nvector: 11100000001100000000011111100000\ndurations: 12300000001200000000012345600000", "id": "f17557:m20"} {"signature": "def averageOnTimePerTimestep(vectors, numSamples=None):", "body": "if vectors.ndim == :vectors.shape = (-,)numTimeSteps = len(vectors)numElements = len(vectors[])if numSamples is not None:import pdb; pdb.set_trace() countOn = numpy.random.randint(, numElements, numSamples)vectors = vectors[:, countOn]durations = numpy.zeros(vectors.shape, dtype='')for col in range(vectors.shape[]):_fillInOnTimes(vectors[:,col], durations[:,col])sums = vectors.sum(axis=)sums.clip(min=, max=numpy.inf, out=sums)avgDurations = durations.sum(axis=, dtype='') / sumsavgOnTime = avgDurations.sum() / (avgDurations > ).sum()freqCounts = _accumulateFrequencyCounts(avgDurations)return (avgOnTime, freqCounts)", "docstring": "Computes the average on-time of the outputs that are on at each time step, and\nthen averages this over all time steps.\n\nThis metric is resiliant to the number of outputs that are on at each time\nstep. That is, if time step 0 has many more outputs on than time step 100, it\nwon't skew the results. This is particularly useful when measuring the\naverage on-time of things like the temporal memory output where you might\nhave many columns bursting at the start of a sequence - you don't want those\nstart of sequence bursts to over-influence the calculated average on-time.\n\nParameters:\n-----------------------------------------------\nvectors: the vectors for which the onTime is calculated. Row 0\n contains the outputs from time step 0, row 1 from time step\n 1, etc.\nnumSamples: the number of elements for which on-time is calculated.\n If not specified, then all elements are looked at.\n\nReturns (scalar average on-time over all time steps,\n list containing frequency counts of each encountered on-time)", "id": "f17557:m21"} {"signature": "def averageOnTime(vectors, numSamples=None):", "body": "if vectors.ndim == :vectors.shape = (-,)numTimeSteps = len(vectors)numElements = len(vectors[])if numSamples is None:numSamples = numElementscountOn = list(range(numElements))else:countOn = numpy.random.randint(, numElements, numSamples)sumOfLengths = onTimeFreqCounts = Nonen = for i in countOn:(onTime, segments, durations) = _listOfOnTimesInVec(vectors[:,i])if onTime != :sumOfLengths += onTimen += segmentsonTimeFreqCounts = _accumulateFrequencyCounts(durations, onTimeFreqCounts)if n > :return (sumOfLengths/n, onTimeFreqCounts)else:return (, onTimeFreqCounts)", "docstring": "Returns the average on-time, averaged over all on-time runs.\n\nParameters:\n-----------------------------------------------\nvectors: the vectors for which the onTime is calculated. Row 0\n contains the outputs from time step 0, row 1 from time step\n 1, etc.\nnumSamples: the number of elements for which on-time is calculated.\n If not specified, then all elements are looked at.\n\nReturns: (scalar average on-time of all outputs,\n list containing frequency counts of each encountered on-time)", "id": "f17557:m22"} {"signature": "def plotOutputsOverTime(vectors, buVectors=None, title=''):", "body": "import pylabpylab.ion()pylab.figure()imData = vectors.transpose()if buVectors is not None:assert(buVectors.shape == vectors.shape)imData = imData.copy()imData[buVectors.transpose().astype('')] = pylab.imshow(imData, aspect='', cmap=pylab.cm.gray_r,interpolation='')pylab.title(title)", "docstring": "Generate a figure that shows each output over time. Time goes left to right,\nand each output is plotted on a different line, allowing you to see the overlap\nin the outputs, when they turn on/off, etc.\n\nParameters:\n------------------------------------------------------------\nvectors: the vectors to plot\nbuVectors: These are normally specified when plotting the pooling\n outputs of the temporal memory over time. The 'buVectors'\n are the sequence outputs and the 'vectors' are the\n pooling outputs. The buVector (sequence) outputs will be drawn\n in a darker color than the vector (pooling) outputs to\n distinguish where the cell is outputting due to pooling vs.\n sequence memory.\ntitle: title for the plot\navgOnTime: The average on-time measurement. If not supplied,\n then it will be calculated from the passed in vectors.", "id": "f17557:m23"} {"signature": "def plotHistogram(freqCounts, title='', xLabel=''):", "body": "import pylabpylab.ion()pylab.figure()pylab.bar(numpy.arange(len(freqCounts)) - , freqCounts)pylab.title(title)pylab.xlabel(xLabel)", "docstring": "This is usually used to display a histogram of the on-times encountered\nin a particular output.\n\nThe freqCounts is a vector containg the frequency counts of each on-time\n(starting at an on-time of 0 and going to an on-time = len(freqCounts)-1)\n\nThe freqCounts are typically generated from the averageOnTimePerTimestep\nor averageOnTime methods of this module.\n\nParameters:\n-----------------------------------------------\nfreqCounts: The frequency counts to plot\ntitle: Title of the plot", "id": "f17557:m24"} {"signature": "def populationStability(vectors, numSamples=None):", "body": "numVectors = len(vectors)if numSamples is None:numSamples = numVectors-countOn = list(range(numVectors-))else:countOn = numpy.random.randint(, numVectors-, numSamples)sigmap = for i in countOn:match = checkMatch(vectors[i], vectors[i+], sparse=False)if match[] != :sigmap += float(match[])/match[]return sigmap / numSamples", "docstring": "Returns the stability for the population averaged over multiple time steps\n\nParameters:\n-----------------------------------------------\nvectors: the vectors for which the stability is calculated\nnumSamples the number of time steps where stability is counted\n\nAt each time step, count the fraction of the active elements which are stable\nfrom the previous step\nAverage all the fraction", "id": "f17557:m25"} {"signature": "def percentOutputsStableOverNTimeSteps(vectors, numSamples=None):", "body": "totalSamples = len(vectors)windowSize = numSamplesnumWindows = pctStable = for wStart in range(, totalSamples-windowSize+):data = vectors[wStart:wStart+windowSize]outputSums = data.sum(axis=)stableOutputs = (outputSums == windowSize).sum()samplePctStable = float(stableOutputs) / data[].sum()print(samplePctStable)pctStable += samplePctStablenumWindows += return float(pctStable) / numWindows", "docstring": "Returns the percent of the outputs that remain completely stable over\nN time steps.\n\nParameters:\n-----------------------------------------------\nvectors: the vectors for which the stability is calculated\nnumSamples: the number of time steps where stability is counted\n\nFor each window of numSamples, count how many outputs are active during\nthe entire window.", "id": "f17557:m26"} {"signature": "def computeSaturationLevels(outputs, outputsShape, sparseForm=False):", "body": "if not sparseForm:outputs = outputs.reshape(outputsShape)spOut = SM32(outputs)else:if len(outputs) > :assert (outputs.max() < outputsShape[] * outputsShape[])spOut = SM32(, outputsShape[] * outputsShape[])spOut.setRowFromSparse(, outputs, []*len(outputs))spOut.reshape(outputsShape[], outputsShape[])regionSize = rows = range(regionSize+, outputsShape[]+, regionSize)cols = range(regionSize+, outputsShape[]+, regionSize)regionSums = spOut.nNonZerosPerBox(rows, cols)(locations, values) = regionSums.tolist()values /= float(regionSize * regionSize)sat = list(values)innerSat = []locationSet = set(locations)for (location, value) in zip(locations, values):(row, col) = locationif (row-,col) in locationSet and (row, col-) in locationSetand (row+, col) in locationSet and (row, col+) in locationSet:innerSat.append(value)return (sat, innerSat)", "docstring": "Compute the saturation for a continuous level. This breaks the level into\nmultiple regions and computes the saturation level for each region.\n\nParameters:\n--------------------------------------------\noutputs: output of the level. If sparseForm is True, this is a list of\n the non-zeros. If sparseForm is False, it is the dense\n representation\noutputsShape: The shape of the outputs of the level (height, width)\nretval: (sat, innerSat):\n sat: list of the saturation levels of each non-empty\n region of the level (each 0 -> 1.0)\n innerSat: list of the saturation level of each non-empty region\n that is not near an edge (each 0 -> 1.0)", "id": "f17557:m27"} {"signature": "def checkMatch(input, prediction, sparse=True, verbosity=):", "body": "if sparse:activeElementsInInput = set(input)activeElementsInPrediction = set(prediction)else:activeElementsInInput = set(input.nonzero()[])activeElementsInPrediction = set(prediction.nonzero()[])totalActiveInPrediction = len(activeElementsInPrediction)totalActiveInInput = len(activeElementsInInput)foundInInput = len(activeElementsInPrediction.intersection(activeElementsInInput))missingFromInput = len(activeElementsInPrediction.difference(activeElementsInInput))missingFromPrediction = len(activeElementsInInput.difference(activeElementsInPrediction))if verbosity >= :print(\"\", foundInInput, \"\", totalActiveInPrediction, end='')print(\"\", missingFromInput, \"\",totalActiveInPrediction, end='')print(\"\", missingFromPrediction, \"\",totalActiveInInput)return (foundInInput, totalActiveInInput, missingFromInput,totalActiveInPrediction)", "docstring": "Compares the actual input with the predicted input and returns results\n\nParameters:\n-----------------------------------------------\ninput: The actual input\nprediction: the predicted input\nverbosity: If > 0, print debugging messages\nsparse: If true, they are in sparse form (list of\n active indices)\n\nretval (foundInInput, totalActiveInInput, missingFromInput,\n totalActiveInPrediction)\n foundInInput: The number of predicted active elements that were\n found in the actual input\n totalActiveInInput: The total number of active elements in the input.\n missingFromInput: The number of predicted active elements that were not\n found in the actual input\n totalActiveInPrediction: The total number of active elements in the prediction", "id": "f17557:m28"} {"signature": "def predictionExtent(inputs, resets, outputs, minOverlapPct=):", "body": "predCounts = NonepredTotal = nSamples = len(outputs)predTotalNotLimited = nSamplesNotLimited = nCols = len(inputs[])nCellsPerCol = len(outputs[]) // nColsfor idx in range(nSamples):activeCols = outputs[idx].reshape(nCols, nCellsPerCol).max(axis=)steps = while (idx+steps+ < nSamples) and (resets[idx+steps+] == ):overlap = numpy.logical_and(inputs[idx+steps+], activeCols)overlapPct = * float(overlap.sum()) / inputs[idx+steps+].sum()if overlapPct >= minOverlapPct:steps += else:breakpredCounts = _accumulateFrequencyCounts([steps], predCounts)predTotal += stepsif resets[idx] or((idx+steps+ < nSamples) and (not resets[idx+steps+])):predTotalNotLimited += stepsnSamplesNotLimited += return (float(predTotal) / nSamples,float(predTotalNotLimited) / nSamplesNotLimited,predCounts)", "docstring": "Computes the predictive ability of a temporal memory (TM). This routine returns\na value which is the average number of time steps of prediction provided\nby the TM. It accepts as input the inputs, outputs, and resets provided to\nthe TM as well as a 'minOverlapPct' used to evalulate whether or not a\nprediction is a good enough match to the actual input.\n\nThe 'outputs' are the pooling outputs of the TM. This routine treats each output\nas a \"manifold\" that includes the active columns that should be present in the\nnext N inputs. It then looks at each successive input and sees if it's active\ncolumns are within the manifold. For each output sample, it computes how\nmany time steps it can go forward on the input before the input overlap with\nthe manifold is less then 'minOverlapPct'. It returns the average number of\ntime steps calculated for each output.\n\nParameters:\n-----------------------------------------------\ninputs: The inputs to the TM. Row 0 contains the inputs from time\n step 0, row 1 from time step 1, etc.\nresets: The reset input to the TM. Element 0 contains the reset from\n time step 0, element 1 from time step 1, etc.\noutputs: The pooling outputs from the TM. Row 0 contains the outputs\n from time step 0, row 1 from time step 1, etc.\nminOverlapPct: How much each input's columns must overlap with the pooling\n output's columns to be considered a valid prediction.\n\nretval: (Average number of time steps of prediction over all output\n samples,\n Average number of time steps of prediction when we aren't\n cut short by the end of the sequence,\n List containing frequency counts of each encountered\n prediction time)", "id": "f17557:m29"} {"signature": "def getCentreAndSpreadOffsets(spaceShape,spreadShape,stepSize=):", "body": "from nupic.math.cross import crossshape = spaceShapeif shape[] == and shape[] == :centerOffsets = [(,)]else:xMin = - * (shape[] // )xMax = xMin + shape[] - xPositions = list(range(stepSize * xMin, stepSize * xMax + , stepSize))yMin = - * (shape[] // )yMax = yMin + shape[] - yPositions = list(range(stepSize * yMin, stepSize * yMax + , stepSize))centerOffsets = list(cross(yPositions, xPositions))numCenterOffsets = len(centerOffsets)print(\"\", centerOffsets)shape = spreadShapeif shape[] == and shape[] == :spreadOffsets = [(,)]else:xMin = - * (shape[] // )xMax = xMin + shape[] - xPositions = list(range(stepSize * xMin, stepSize * xMax + , stepSize))yMin = - * (shape[] // )yMax = yMin + shape[] - yPositions = list(range(stepSize * yMin, stepSize * yMax + , stepSize))spreadOffsets = list(cross(yPositions, xPositions))spreadOffsets.remove((,))spreadOffsets.insert(, (,))numSpreadOffsets = len(spreadOffsets)print(\"\", spreadOffsets)return centerOffsets, spreadOffsets", "docstring": "Generates centre offsets and spread offsets for block-mode based training\nregimes - star, cross, block.\n\n Parameters:\n -----------------------------------------------\n spaceShape: The (height, width) of the 2-D space to explore. This\n sets the number of center-points.\n spreadShape: The shape (height, width) of the area around each center-point\n to explore.\n stepSize: The step size. How big each step is, in pixels. This controls\n *both* the spacing of the center-points within the block and the\n points we explore around each center-point\n retval: (centreOffsets, spreadOffsets)", "id": "f17557:m30"} {"signature": "def makeCloneMap(columnsShape, outputCloningWidth, outputCloningHeight=-):", "body": "if outputCloningHeight < :outputCloningHeight = outputCloningWidthcolumnsHeight, columnsWidth = columnsShapenumDistinctMasters = outputCloningWidth * outputCloningHeighta = numpy.empty((columnsHeight, columnsWidth), '')for row in range(columnsHeight):for col in range(columnsWidth):a[row, col] = (col % outputCloningWidth) +(row % outputCloningHeight) * outputCloningWidthreturn a, numDistinctMasters", "docstring": "Make a two-dimensional clone map mapping columns to clone master.\n\n This makes a map that is (numColumnsHigh, numColumnsWide) big that can\n be used to figure out which clone master to use for each column. Here are\n a few sample calls\n\n >>> makeCloneMap(columnsShape=(10, 6), outputCloningWidth=4)\n (array([[ 0, 1, 2, 3, 0, 1],\n [ 4, 5, 6, 7, 4, 5],\n [ 8, 9, 10, 11, 8, 9],\n [12, 13, 14, 15, 12, 13],\n [ 0, 1, 2, 3, 0, 1],\n [ 4, 5, 6, 7, 4, 5],\n [ 8, 9, 10, 11, 8, 9],\n [12, 13, 14, 15, 12, 13],\n [ 0, 1, 2, 3, 0, 1],\n [ 4, 5, 6, 7, 4, 5]], dtype=uint32), 16)\n\n >>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3)\n (array([[0, 1, 2, 0, 1, 2, 0, 1],\n [3, 4, 5, 3, 4, 5, 3, 4],\n [6, 7, 8, 6, 7, 8, 6, 7],\n [0, 1, 2, 0, 1, 2, 0, 1],\n [3, 4, 5, 3, 4, 5, 3, 4],\n [6, 7, 8, 6, 7, 8, 6, 7],\n [0, 1, 2, 0, 1, 2, 0, 1]], dtype=uint32), 9)\n\n >>> makeCloneMap(columnsShape=(7, 11), outputCloningWidth=5)\n (array([[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],\n [ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5],\n [10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10],\n [15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15],\n [20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20],\n [ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],\n [ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5]], dtype=uint32), 25)\n\n >>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3, outputCloningHeight=4)\n (array([[ 0, 1, 2, 0, 1, 2, 0, 1],\n [ 3, 4, 5, 3, 4, 5, 3, 4],\n [ 6, 7, 8, 6, 7, 8, 6, 7],\n [ 9, 10, 11, 9, 10, 11, 9, 10],\n [ 0, 1, 2, 0, 1, 2, 0, 1],\n [ 3, 4, 5, 3, 4, 5, 3, 4],\n [ 6, 7, 8, 6, 7, 8, 6, 7]], dtype=uint32), 12)\n\n The basic idea with this map is that, if you imagine things stretching off\n to infinity, every instance of a given clone master is seeing the exact\n same thing in all directions. That includes:\n - All neighbors must be the same\n - The \"meaning\" of the input to each of the instances of the same clone\n master must be the same. If input is pixels and we have translation\n invariance--this is easy. At higher levels where input is the output\n of lower levels, this can be much harder.\n - The \"meaning\" of the inputs to neighbors of a clone master must be the\n same for each instance of the same clone master.\n\n\n The best way to think of this might be in terms of 'inputCloningWidth' and\n 'outputCloningWidth'.\n - The 'outputCloningWidth' is the number of columns you'd have to move\n horizontally (or vertically) before you get back to the same the same\n clone that you started with. MUST BE INTEGRAL!\n - The 'inputCloningWidth' is the 'outputCloningWidth' of the node below us.\n If we're getting input from an sensor where every element just represents\n a shift of every other element, this is 1.\n At a conceptual level, it means that if two different inputs are shown\n to the node and the only difference between them is that one is shifted\n horizontally (or vertically) by this many pixels, it means we are looking\n at the exact same real world input, but shifted by some number of pixels\n (doesn't have to be 1). MUST BE INTEGRAL!\n\n At level 1, I think you could have this:\n * inputCloningWidth = 1\n * sqrt(coincToInputRatio^2) = 2.5\n * outputCloningWidth = 5\n ...in this case, you'd end up with 25 masters.\n\n\n Let's think about this case:\n input: - - - 0 1 2 3 4 5 - - - - -\n columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4\n\n ...in other words, input 0 is fed to both column 0 and column 1. Input 1\n is fed to columns 2, 3, and 4, etc. Hopefully, you can see that you'll\n get the exact same output (except shifted) with:\n input: - - - - - 0 1 2 3 4 5 - - -\n columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4\n\n ...in other words, we've shifted the input 2 spaces and the output shifted\n 5 spaces.\n\n\n *** The outputCloningWidth MUST ALWAYS be an integral multiple of the ***\n *** inputCloningWidth in order for all of our rules to apply. ***\n *** NOTE: inputCloningWidth isn't passed here, so it's the caller's ***\n *** responsibility to ensure that this is true. ***\n\n *** The outputCloningWidth MUST ALWAYS be an integral multiple of ***\n *** sqrt(coincToInputRatio^2), too. ***\n\n @param columnsShape The shape (height, width) of the columns.\n @param outputCloningWidth See docstring above.\n @param outputCloningHeight If non-negative, can be used to make\n rectangular (instead of square) cloning fields.\n @return cloneMap An array (numColumnsHigh, numColumnsWide) that\n contains the clone index to use for each\n column.\n @return numDistinctClones The number of distinct clones in the map. This\n is just outputCloningWidth*outputCloningHeight.", "id": "f17557:m31"} {"signature": "def numpyStr(array, format='', includeIndices=False, includeZeros=True):", "body": "shape = array.shapeassert (len(shape) <= )items = ['']if len(shape) == :if includeIndices:format = '' + formatif includeZeros:rowItems = [format % (c,x) for (c,x) in enumerate(array)]else:rowItems = [format % (c,x) for (c,x) in enumerate(array) if x != ]else:rowItems = [format % (x) for x in array]items.extend(rowItems)else:(rows, cols) = shapeif includeIndices:format = '' + formatfor r in range(rows):if includeIndices:rowItems = [format % (r,c,x) for c,x in enumerate(array[r])]else:rowItems = [format % (x) for x in array[r]]if r > :items.append('')items.append('')items.extend(rowItems)if r < rows-:items.append('')else:items.append('')items.append('')return ''.join(items)", "docstring": "Pretty print a numpy matrix using the given format string for each\n value. Return the string representation\n\n Parameters:\n ------------------------------------------------------------\n array: The numpy array to print. This can be either a 1D vector or 2D matrix\n format: The format string to use for each value\n includeIndices: If true, include [row,col] label for each value\n includeZeros: Can only be set to False if includeIndices is on.\n If True, include 0 values in the print-out\n If False, exclude 0 values from the print-out.", "id": "f17557:m32"} {"signature": "@classmethoddef read(cls, proto):", "body": "tm = super(TemporalMemoryMonitorMixin, cls).read(proto)tm.mmName = Nonetm._mmTraces = Nonetm._mmData = Nonetm.mmClearHistory()tm._mmResetActive = Truereturn tm", "docstring": "Intercepts TemporalMemory deserialization request in order to initialize\n`TemporalMemoryMonitorMixin` state\n\n@param proto (DynamicStructBuilder) Proto object\n\n@return (TemporalMemory) TemporalMemory shim instance", "id": "f17559:c0:m1"} {"signature": "def __init__(self,numberOfCols=,cellsPerColumn=,initialPerm=,connectedPerm=,minThreshold=,newSynapseCount=,permanenceInc=,permanenceDec=,permanenceMax=,activationThreshold=,predictedSegmentDecrement=,maxSegmentsPerCell=,maxSynapsesPerSegment=,globalDecay=,maxAge=,pamLength=,verbosity=,outputType=\"\",seed=):", "body": "super(TMShimMixin, self).__init__(columnDimensions=(numberOfCols,),cellsPerColumn=cellsPerColumn,activationThreshold=activationThreshold,initialPermanence=initialPerm,connectedPermanence=connectedPerm,minThreshold=minThreshold,maxNewSynapseCount=newSynapseCount,permanenceIncrement=permanenceInc,permanenceDecrement=permanenceDec,predictedSegmentDecrement=predictedSegmentDecrement,maxSegmentsPerCell=maxSegmentsPerCell,maxSynapsesPerSegment=maxSynapsesPerSegment,seed=seed)self.infActiveState = {\"\": None}", "docstring": "Translate parameters and initialize member variables specific to `backtracking_tm.py`.", "id": "f17559:c1:m0"} {"signature": "@classmethoddef read(cls, proto):", "body": "tm = super(TMShimMixin, cls).read(proto)tm.infActiveState = {\"\": None}return tm", "docstring": "Intercepts TemporalMemory deserialization request in order to initialize\n`self.infActiveState`\n\n@param proto (DynamicStructBuilder) Proto object\n\n@return (TemporalMemory) TemporalMemory shim instance", "id": "f17559:c1:m1"} {"signature": "def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):", "body": "super(TMShimMixin, self).compute(set(bottomUpInput.nonzero()[]),learn=enableLearn)numberOfCells = self.numberOfCells()activeState = numpy.zeros(numberOfCells)activeState[self.getActiveCells()] = self.infActiveState[\"\"] = activeStateoutput = numpy.zeros(numberOfCells)output[self.getPredictiveCells()] = output[self.getActiveCells()] = return output", "docstring": "(From `backtracking_tm.py`)\nHandle one compute, possibly learning.\n\n@param bottomUpInput The bottom-up input, typically from a spatial pooler\n@param enableLearn If true, perform learning\n@param computeInfOutput If None, default behavior is to disable the inference\n output when enableLearn is on.\n If true, compute the inference output\n If false, do not compute the inference output", "id": "f17559:c1:m2"} {"signature": "def topDownCompute(self, topDownIn=None):", "body": "output = numpy.zeros(self.numberOfColumns())columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()]output[columns] = return output", "docstring": "(From `backtracking_tm.py`)\nTop-down compute - generate expected input given output of the TM\n\n@param topDownIn top down input from the level above us\n\n@returns best estimate of the TM input that would have generated bottomUpOut.", "id": "f17559:c1:m3"} {"signature": "def __init__(self,numberOfCols=,cellsPerColumn=,initialPerm=,connectedPerm=,minThreshold=,newSynapseCount=,permanenceInc=,permanenceDec=,permanenceMax=,activationThreshold=,predictedSegmentDecrement=,maxSegmentsPerCell=,maxSynapsesPerSegment=,globalDecay=,maxAge=,pamLength=,verbosity=,outputType=\"\",seed=):", "body": "super(MonitoredTMShim, self).__init__(columnDimensions=(numberOfCols,),cellsPerColumn=cellsPerColumn,activationThreshold=activationThreshold,initialPermanence=initialPerm,connectedPermanence=connectedPerm,minThreshold=minThreshold,maxNewSynapseCount=newSynapseCount,permanenceIncrement=permanenceInc,permanenceDecrement=permanenceDec,predictedSegmentDecrement=predictedSegmentDecrement,maxSegmentsPerCell=maxSegmentsPerCell,maxSynapsesPerSegment=maxSynapsesPerSegment,seed=seed)self.infActiveState = {\"\": None}", "docstring": "Translate parameters and initialize member variables specific to `backtracking_tm.py`.", "id": "f17559:c4:m0"} {"signature": "@classmethoddef read(cls, proto):", "body": "tm = super(MonitoredTMShim, cls).read(proto)tm.infActiveState = {\"\": None}return tm", "docstring": "Intercepts TemporalMemory deserialization request in order to initialize\n`self.infActiveState`\n\n@param proto (DynamicStructBuilder) Proto object\n\n@return (TemporalMemory) TemporalMemory shim instance", "id": "f17559:c4:m1"} {"signature": "def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):", "body": "super(MonitoredTMShim, self).compute(set(bottomUpInput.nonzero()[]),learn=enableLearn)numberOfCells = self.numberOfCells()activeState = numpy.zeros(numberOfCells)activeState[self.getActiveCells()] = self.infActiveState[\"\"] = activeStateoutput = numpy.zeros(numberOfCells)output[self.getPredictiveCells() + self.getActiveCells()] = return output", "docstring": "(From `backtracking_tm.py`)\nHandle one compute, possibly learning.\n\n@param bottomUpInput The bottom-up input, typically from a spatial pooler\n@param enableLearn If true, perform learning\n@param computeInfOutput If None, default behavior is to disable the inference\n output when enableLearn is on.\n If true, compute the inference output\n If false, do not compute the inference output", "id": "f17559:c4:m2"} {"signature": "def topDownCompute(self, topDownIn=None):", "body": "output = numpy.zeros(self.numberOfColumns())columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()]output[columns] = return output", "docstring": "(From `backtracking_tm.py`)\nTop-down compute - generate expected input given output of the TM\n\n@param topDownIn top down input from the level above us\n\n@returns best estimate of the TM input that would have generated bottomUpOut.", "id": "f17559:c4:m3"} {"signature": "def _pFormatArray(array_, fmt=\"\"):", "body": "return \"\" + \"\".join(fmt % x for x in array_) + \"\"", "docstring": "Return a string with pretty-print of a numpy array using the given format\n for each element", "id": "f17560:m0"} {"signature": "def compute(self, recordNum, patternNZ, classification, learn, infer):", "body": "if self.verbosity >= :print(\"\", learn)print(\"\", recordNum)print(\"\" % len(patternNZ), patternNZ)print(\"\", classification)if len(self._patternNZHistory) > :if recordNum < self._patternNZHistory[-][]:raise ValueError(\"\")if len(self._patternNZHistory) == orrecordNum > self._patternNZHistory[-][]:self._patternNZHistory.append((recordNum, patternNZ))retval = {}if max(patternNZ) > self._maxInputIdx:newMaxInputIdx = max(patternNZ)for nSteps in self.steps:self._weightMatrix[nSteps] = numpy.concatenate((self._weightMatrix[nSteps],numpy.zeros(shape=(newMaxInputIdx-self._maxInputIdx,self._maxBucketIdx+))), axis=)self._maxInputIdx = int(newMaxInputIdx)if classification is not None:if type(classification[\"\"]) is not list:bucketIdxList = [classification[\"\"]]actValueList = [classification[\"\"]]numCategory = else:bucketIdxList = classification[\"\"]actValueList = classification[\"\"]numCategory = len(classification[\"\"])else:if learn:raise ValueError(\"\")actValueList = NonebucketIdxList = Noneif infer:retval = self.infer(patternNZ, actValueList)if learn and classification[\"\"] is not None:for categoryI in range(numCategory):bucketIdx = bucketIdxList[categoryI]actValue = actValueList[categoryI]if bucketIdx > self._maxBucketIdx:for nSteps in self.steps:self._weightMatrix[nSteps] = numpy.concatenate((self._weightMatrix[nSteps],numpy.zeros(shape=(self._maxInputIdx+,bucketIdx-self._maxBucketIdx))), axis=)self._maxBucketIdx = int(bucketIdx)while self._maxBucketIdx > len(self._actualValues) - :self._actualValues.append(None)if self._actualValues[bucketIdx] is None:self._actualValues[bucketIdx] = actValueelse:if (isinstance(actValue, int) orisinstance(actValue, float) orisinstance(actValue, int)):self._actualValues[bucketIdx] = (( - self.actValueAlpha)* self._actualValues[bucketIdx]+ self.actValueAlpha * actValue)else:self._actualValues[bucketIdx] = actValuefor (learnRecordNum, learnPatternNZ) in self._patternNZHistory:error = self._calculateError(recordNum, bucketIdxList)nSteps = recordNum - learnRecordNumif nSteps in self.steps:for bit in learnPatternNZ:self._weightMatrix[nSteps][bit, :] += self.alpha * error[nSteps]if infer and self.verbosity >= :print(\"\")print(\"\", retval[\"\"])for (nSteps, votes) in list(retval.items()):if nSteps == \"\":continueprint(\"\" % (nSteps), _pFormatArray(votes))bestBucketIdx = votes.argmax()print((\"\"\"\" % (bestBucketIdx,retval[\"\"][bestBucketIdx])))print()return retval", "docstring": "Process one input sample.\n\nThis method is called by outer loop code outside the nupic-engine. We\nuse this instead of the nupic engine compute() because our inputs and\noutputs aren't fixed size vectors of reals.\n\n\n:param recordNum: Record number of this input pattern. Record numbers\n normally increase sequentially by 1 each time unless there are missing\n records in the dataset. Knowing this information insures that we don't get\n confused by missing records.\n\n:param patternNZ: List of the active indices from the output below. When the\n input is from TemporalMemory, this list should be the indices of the\n active cells.\n\n:param classification: Dict of the classification information where:\n\n - bucketIdx: list of indices of the encoder bucket\n - actValue: list of actual values going into the encoder\n\n Classification could be None for inference mode.\n:param learn: (bool) if true, learn this sample\n:param infer: (bool) if true, perform inference\n\n:return: Dict containing inference results, there is one entry for each\n step in self.steps, where the key is the number of steps, and\n the value is an array containing the relative likelihood for\n each bucketIdx starting from bucketIdx 0.\n\n There is also an entry containing the average actual value to\n use for each bucket. The key is 'actualValues'.\n\n for example:\n\n .. code-block:: python\n\n {1 : [0.1, 0.3, 0.2, 0.7],\n 4 : [0.2, 0.4, 0.3, 0.5],\n 'actualValues': [1.5, 3,5, 5,5, 7.6],\n }", "id": "f17560:c0:m1"} {"signature": "def infer(self, patternNZ, actValueList):", "body": "if self.steps[] == or actValueList is None:defaultValue = else:defaultValue = actValueList[]actValues = [x if x is not None else defaultValuefor x in self._actualValues]retval = {\"\": actValues}for nSteps in self.steps:predictDist = self.inferSingleStep(patternNZ, self._weightMatrix[nSteps])retval[nSteps] = predictDistreturn retval", "docstring": "Return the inference value from one input sample. The actual\nlearning happens in compute().\n\n:param patternNZ: list of the active indices from the output below\n:param classification: dict of the classification information:\n bucketIdx: index of the encoder bucket\n actValue: actual value going into the encoder\n\n:return: dict containing inference results, one entry for each step in\n self.steps. The key is the number of steps, the value is an\n array containing the relative likelihood for each bucketIdx\n starting from bucketIdx 0.\n\n for example:\n\n .. code-block:: python\n\n {'actualValues': [0.0, 1.0, 2.0, 3.0]\n 1 : [0.1, 0.3, 0.2, 0.7]\n 4 : [0.2, 0.4, 0.3, 0.5]}", "id": "f17560:c0:m2"} {"signature": "def inferSingleStep(self, patternNZ, weightMatrix):", "body": "outputActivation = weightMatrix[patternNZ].sum(axis=)outputActivation = outputActivation - numpy.max(outputActivation)expOutputActivation = numpy.exp(outputActivation)predictDist = expOutputActivation / numpy.sum(expOutputActivation)return predictDist", "docstring": "Perform inference for a single step. Given an SDR input and a weight\nmatrix, return a predicted distribution.\n\n:param patternNZ: list of the active indices from the output below\n:param weightMatrix: numpy array of the weight matrix\n:return: numpy array of the predicted class label distribution", "id": "f17560:c0:m3"} {"signature": "def _calculateError(self, recordNum, bucketIdxList):", "body": "error = dict()targetDist = numpy.zeros(self._maxBucketIdx + )numCategories = len(bucketIdxList)for bucketIdx in bucketIdxList:targetDist[bucketIdx] = /numCategoriesfor (learnRecordNum, learnPatternNZ) in self._patternNZHistory:nSteps = recordNum - learnRecordNumif nSteps in self.steps:predictDist = self.inferSingleStep(learnPatternNZ,self._weightMatrix[nSteps])error[nSteps] = targetDist - predictDistreturn error", "docstring": "Calculate error signal\n\n:param bucketIdxList: list of encoder buckets\n\n:return: dict containing error. The key is the number of steps\n The value is a numpy array of error at the output layer", "id": "f17560:c0:m7"} {"signature": "def __getitem__(self, columnIndex):", "body": "return super(_SparseMatrixCorticalColumnAdapter, self).getRow(columnIndex)", "docstring": "Wraps getRow() such that instances may be indexed by columnIndex.", "id": "f17561:c1:m0"} {"signature": "def replace(self, columnIndex, bitmap):", "body": "return super(_SparseMatrixCorticalColumnAdapter, self).replaceSparseRow(columnIndex, bitmap)", "docstring": "Wraps replaceSparseRow()", "id": "f17561:c1:m1"} {"signature": "def update(self, columnIndex, vector):", "body": "return super(_SparseMatrixCorticalColumnAdapter, self).setRowFromDense(columnIndex, vector)", "docstring": "Wraps setRowFromDense()", "id": "f17561:c1:m2"} {"signature": "def getColumnDimensions(self):", "body": "return self._columnDimensions", "docstring": ":returns: (iter) the dimensions of the columns in the region", "id": "f17561:c4:m1"} {"signature": "def getInputDimensions(self):", "body": "return self._inputDimensions", "docstring": ":returns: (iter) the dimensions of the input vector", "id": "f17561:c4:m2"} {"signature": "def getNumColumns(self):", "body": "return self._numColumns", "docstring": ":returns: (int) the total number of columns", "id": "f17561:c4:m3"} {"signature": "def getNumInputs(self):", "body": "return self._numInputs", "docstring": ":returns: (int) the total number of inputs.", "id": "f17561:c4:m4"} {"signature": "def getPotentialRadius(self):", "body": "return self._potentialRadius", "docstring": ":returns: (float) the potential radius", "id": "f17561:c4:m5"} {"signature": "def setPotentialRadius(self, potentialRadius):", "body": "self._potentialRadius = potentialRadius", "docstring": ":param potentialRadius: (float) value to set", "id": "f17561:c4:m6"} {"signature": "def getPotentialPct(self):", "body": "return self._potentialPct", "docstring": ":returns: (float) the potential percent", "id": "f17561:c4:m7"} {"signature": "def setPotentialPct(self, potentialPct):", "body": "self._potentialPct = potentialPct", "docstring": ":param potentialPct: (float) value to set", "id": "f17561:c4:m8"} {"signature": "def getGlobalInhibition(self):", "body": "return self._globalInhibition", "docstring": ":returns: (bool) whether global inhibition is enabled.", "id": "f17561:c4:m9"} {"signature": "def setGlobalInhibition(self, globalInhibition):", "body": "self._globalInhibition = globalInhibition", "docstring": ":param globalInhibition: (bool) value to set.", "id": "f17561:c4:m10"} {"signature": "def getNumActiveColumnsPerInhArea(self):", "body": "return self._numActiveColumnsPerInhArea", "docstring": ":returns: (float) the number of active columns per inhibition area. Returns \n a value less than 0 if parameter is unused.", "id": "f17561:c4:m11"} {"signature": "def setNumActiveColumnsPerInhArea(self, numActiveColumnsPerInhArea):", "body": "assert(numActiveColumnsPerInhArea > )self._numActiveColumnsPerInhArea = numActiveColumnsPerInhAreaself._localAreaDensity = ", "docstring": "Sets the number of active columns per inhibition area. Invalidates the\n``localAreaDensity`` parameter\n\n:param numActiveColumnsPerInhArea: (float) value to set", "id": "f17561:c4:m12"} {"signature": "def getLocalAreaDensity(self):", "body": "return self._localAreaDensity", "docstring": ":returns: (float) the local area density. Returns a value less than 0 if \n parameter is unused.", "id": "f17561:c4:m13"} {"signature": "def setLocalAreaDensity(self, localAreaDensity):", "body": "assert(localAreaDensity > and localAreaDensity <= )self._localAreaDensity = localAreaDensityself._numActiveColumnsPerInhArea = ", "docstring": "Sets the local area density. Invalidates the 'numActiveColumnsPerInhArea'\nparameter\n\n:param localAreaDensity: (float) value to set", "id": "f17561:c4:m14"} {"signature": "def getStimulusThreshold(self):", "body": "return self._stimulusThreshold", "docstring": ":returns: (int) the stimulus threshold", "id": "f17561:c4:m15"} {"signature": "def setStimulusThreshold(self, stimulusThreshold):", "body": "self._stimulusThreshold = stimulusThreshold", "docstring": ":param stimulusThreshold: (float) value to set.", "id": "f17561:c4:m16"} {"signature": "def getInhibitionRadius(self):", "body": "return self._inhibitionRadius", "docstring": ":returns: (int) the inhibition radius", "id": "f17561:c4:m17"} {"signature": "def setInhibitionRadius(self, inhibitionRadius):", "body": "self._inhibitionRadius = inhibitionRadius", "docstring": ":param inhibitionRadius: (int) value to set", "id": "f17561:c4:m18"} {"signature": "def getDutyCyclePeriod(self):", "body": "return self._dutyCyclePeriod", "docstring": ":returns: (int) the duty cycle period", "id": "f17561:c4:m19"} {"signature": "def setDutyCyclePeriod(self, dutyCyclePeriod):", "body": "self._dutyCyclePeriod = dutyCyclePeriod", "docstring": ":param dutyCyclePeriod: (int) value to set.", "id": "f17561:c4:m20"} {"signature": "def getBoostStrength(self):", "body": "return self._boostStrength", "docstring": ":returns: (float) the maximum boost value used.", "id": "f17561:c4:m21"} {"signature": "def setBoostStrength(self, boostStrength):", "body": "self._boostStrength = boostStrength", "docstring": "Sets the maximum boost value.\n:param boostStrength: (float) value to set", "id": "f17561:c4:m22"} {"signature": "def getIterationNum(self):", "body": "return self._iterationNum", "docstring": ":returns: the iteration number", "id": "f17561:c4:m23"} {"signature": "def setIterationNum(self, iterationNum):", "body": "self._iterationNum = iterationNum", "docstring": ":param iterationNum: (int) value to set", "id": "f17561:c4:m24"} {"signature": "def getIterationLearnNum(self):", "body": "return self._iterationLearnNum", "docstring": ":returns: (int) The number of iterations that have been learned.", "id": "f17561:c4:m25"} {"signature": "def setIterationLearnNum(self, iterationLearnNum):", "body": "self._iterationLearnNum = iterationLearnNum", "docstring": ":param iterationLearnNum: (int) value to set", "id": "f17561:c4:m26"} {"signature": "def getSpVerbosity(self):", "body": "return self._spVerbosity", "docstring": ":returns: (int) the verbosity level, larger is more verbose.", "id": "f17561:c4:m27"} {"signature": "def setSpVerbosity(self, spVerbosity):", "body": "self._spVerbosity = spVerbosity", "docstring": ":param spVerbosity: (int) value to set, larger is more verbose.", "id": "f17561:c4:m28"} {"signature": "def getUpdatePeriod(self):", "body": "return self._updatePeriod", "docstring": ":returns: (int) The period at which active duty cycles are updated.", "id": "f17561:c4:m29"} {"signature": "def setUpdatePeriod(self, updatePeriod):", "body": "self._updatePeriod = updatePeriod", "docstring": ":param updatePeriod: (int) The period at which active duty cycles are \n updated.", "id": "f17561:c4:m30"} {"signature": "def getSynPermTrimThreshold(self):", "body": "return self._synPermTrimThreshold", "docstring": "Sparsity is enforced by trimming out all permanence values below this value.\n\n:returns: (float) the permanence trim threshold", "id": "f17561:c4:m31"} {"signature": "def setSynPermTrimThreshold(self, synPermTrimThreshold):", "body": "self._synPermTrimThreshold = synPermTrimThreshold", "docstring": "Sparsity is enforced by trimming out all permanence values below this value.\n\n:param synPermTrimThreshold: (float) the permanence trim threshold", "id": "f17561:c4:m32"} {"signature": "def getSynPermActiveInc(self):", "body": "return self._synPermActiveInc", "docstring": ":returns: (float) the permanence increment amount for active synapses inputs", "id": "f17561:c4:m33"} {"signature": "def setSynPermActiveInc(self, synPermActiveInc):", "body": "self._synPermActiveInc = synPermActiveInc", "docstring": "Sets the permanence increment amount for active synapses.\n\n:param synPermActiveInc: (float) value to set.", "id": "f17561:c4:m34"} {"signature": "def getSynPermInactiveDec(self):", "body": "return self._synPermInactiveDec", "docstring": ":returns: (float) the permanence decrement amount for inactive synapses.", "id": "f17561:c4:m35"} {"signature": "def setSynPermInactiveDec(self, synPermInactiveDec):", "body": "self._synPermInactiveDec = synPermInactiveDec", "docstring": "Sets the permanence decrement amount for inactive synapses.\n\n:param synPermInactiveDec: (float) value to set.", "id": "f17561:c4:m36"} {"signature": "def getSynPermBelowStimulusInc(self):", "body": "return self._synPermBelowStimulusInc", "docstring": ":returns: (float) the permanence increment amount for columns that have not \n been recently active.", "id": "f17561:c4:m37"} {"signature": "def setSynPermBelowStimulusInc(self, synPermBelowStimulusInc):", "body": "self._synPermBelowStimulusInc = synPermBelowStimulusInc", "docstring": "Sets the permanence increment amount for columns that have not been\nrecently active.\n\n:param synPermBelowStimulusInc: (float) value to set.", "id": "f17561:c4:m38"} {"signature": "def getSynPermConnected(self):", "body": "return self._synPermConnected", "docstring": ":returns: (float) the permanence amount that qualifies a synapse as being \n connected.", "id": "f17561:c4:m39"} {"signature": "def setSynPermConnected(self, synPermConnected):", "body": "self._synPermConnected = synPermConnected", "docstring": "Sets the permanence amount that qualifies a synapse as being\nconnected.\n\n:param synPermConnected: (float) value to set.", "id": "f17561:c4:m40"} {"signature": "def getMinPctOverlapDutyCycles(self):", "body": "return self._minPctOverlapDutyCycles", "docstring": ":returns: (float) the minimum tolerated overlaps, given as percent of\n neighbors overlap score", "id": "f17561:c4:m41"} {"signature": "def setMinPctOverlapDutyCycles(self, minPctOverlapDutyCycles):", "body": "self._minPctOverlapDutyCycles = minPctOverlapDutyCycles", "docstring": "Sets the minimum tolerated activity duty cycle, given as percent of\nneighbors' activity duty cycle.\n\n:param minPctOverlapDutyCycles: (float) value to set.", "id": "f17561:c4:m42"} {"signature": "def getBoostFactors(self, boostFactors):", "body": "boostFactors[:] = self._boostFactors[:]", "docstring": "Gets the boost factors for all columns. Input list will be overwritten.\n\n:param boostFactors: (list) size must match number of columns.", "id": "f17561:c4:m43"} {"signature": "def setBoostFactors(self, boostFactors):", "body": "self._boostFactors[:] = boostFactors[:]", "docstring": "Sets the boost factors for all columns. ``boostFactors`` size must match\nthe number of columns.\n\n:param boostFactors: (iter) value to set.", "id": "f17561:c4:m44"} {"signature": "def getOverlapDutyCycles(self, overlapDutyCycles):", "body": "overlapDutyCycles[:] = self._overlapDutyCycles[:]", "docstring": "Gets the overlap duty cycles for all columns. ``overlapDutyCycles``\nsize must match the number of columns.\n\n:param overlapDutyCycles: (list) will be overwritten.", "id": "f17561:c4:m45"} {"signature": "def setOverlapDutyCycles(self, overlapDutyCycles):", "body": "self._overlapDutyCycles[:] = overlapDutyCycles", "docstring": "Sets the overlap duty cycles for all columns. ``overlapDutyCycles``\nsize must match the number of columns.\n\n:param overlapDutyCycles: (list) value to set.", "id": "f17561:c4:m46"} {"signature": "def getActiveDutyCycles(self, activeDutyCycles):", "body": "activeDutyCycles[:] = self._activeDutyCycles[:]", "docstring": "Gets the activity duty cycles for all columns. Input list will be \noverwritten.\n\n:param activeDutyCycles: (list) size must match number of columns.", "id": "f17561:c4:m47"} {"signature": "def setActiveDutyCycles(self, activeDutyCycles):", "body": "self._activeDutyCycles[:] = activeDutyCycles", "docstring": "Sets the activity duty cycles for all columns. ``activeDutyCycles`` size \nmust match the number of columns.\n\n:param activeDutyCycles: (list) value to set.", "id": "f17561:c4:m48"} {"signature": "def getMinOverlapDutyCycles(self, minOverlapDutyCycles):", "body": "minOverlapDutyCycles[:] = self._minOverlapDutyCycles[:]", "docstring": ":returns: (list) the minimum overlap duty cycles for all columns. \n ``minOverlapDutyCycles`` size must match the number of columns.", "id": "f17561:c4:m49"} {"signature": "def setMinOverlapDutyCycles(self, minOverlapDutyCycles):", "body": "self._minOverlapDutyCycles[:] = minOverlapDutyCycles[:]", "docstring": "Sets the minimum overlap duty cycles for all columns. \n``minOverlapDutyCycles`` size must match the number of columns.\n\n:param minOverlapDutyCycles: (iter) value to set.", "id": "f17561:c4:m50"} {"signature": "def getPotential(self, columnIndex, potential):", "body": "assert(columnIndex < self._numColumns)potential[:] = self._potentialPools[columnIndex]", "docstring": ":param columnIndex: (int) column index to get potential for.\n:param potential: (list) will be overwritten with column potentials. Must \n match the number of inputs.", "id": "f17561:c4:m51"} {"signature": "def setPotential(self, columnIndex, potential):", "body": "assert(columnIndex < self._numColumns)potentialSparse = numpy.where(potential > )[]if len(potentialSparse) < self._stimulusThreshold:raise Exception(\"\" +\"\" +\"\")self._potentialPools.replace(columnIndex, potentialSparse)", "docstring": "Sets the potential mapping for a given column. ``potential`` size must match \nthe number of inputs, and must be greater than ``stimulusThreshold``.\n\n:param columnIndex: (int) column index to set potential for.\n:param potential: (list) value to set.", "id": "f17561:c4:m52"} {"signature": "def getPermanence(self, columnIndex, permanence):", "body": "assert(columnIndex < self._numColumns)permanence[:] = self._permanences[columnIndex]", "docstring": "Returns the permanence values for a given column. ``permanence`` size\nmust match the number of inputs.\n\n:param columnIndex: (int) column index to get permanence for.\n:param permanence: (list) will be overwritten with permanences.", "id": "f17561:c4:m53"} {"signature": "def setPermanence(self, columnIndex, permanence):", "body": "assert(columnIndex < self._numColumns)self._updatePermanencesForColumn(permanence, columnIndex, raisePerm=False)", "docstring": "Sets the permanence values for a given column. ``permanence`` size must \nmatch the number of inputs.\n\n:param columnIndex: (int) column index to set permanence for.\n:param permanence: (list) value to set.", "id": "f17561:c4:m54"} {"signature": "def getConnectedSynapses(self, columnIndex, connectedSynapses):", "body": "assert(columnIndex < self._numColumns)connectedSynapses[:] = self._connectedSynapses[columnIndex]", "docstring": ":param connectedSynapses: (list) will be overwritten\n:returns: (iter) the connected synapses for a given column.\n ``connectedSynapses`` size must match the number of inputs", "id": "f17561:c4:m55"} {"signature": "def getConnectedCounts(self, connectedCounts):", "body": "connectedCounts[:] = self._connectedCounts[:]", "docstring": ":param connectedCounts: (list) will be overwritten\n:returns: (int) the number of connected synapses for all columns.\n ``connectedCounts`` size must match the number of columns.", "id": "f17561:c4:m56"} {"signature": "def getOverlaps(self):", "body": "return self._overlaps", "docstring": ":returns: (iter) the overlap score for each column.", "id": "f17561:c4:m57"} {"signature": "def getBoostedOverlaps(self):", "body": "return self._boostedOverlaps", "docstring": ":returns: (list) the boosted overlap score for each column.", "id": "f17561:c4:m58"} {"signature": "def compute(self, inputVector, learn, activeArray):", "body": "if not isinstance(inputVector, numpy.ndarray):raise TypeError(\"\" %str(type(inputVector)))if inputVector.size != self._numInputs:raise ValueError(\"\" % (inputVector.size, self._numInputs))self._updateBookeepingVars(learn)inputVector = numpy.array(inputVector, dtype=realDType)inputVector.reshape(-)self._overlaps = self._calculateOverlap(inputVector)if learn:self._boostedOverlaps = self._boostFactors * self._overlapselse:self._boostedOverlaps = self._overlapsactiveColumns = self._inhibitColumns(self._boostedOverlaps)if learn:self._adaptSynapses(inputVector, activeColumns)self._updateDutyCycles(self._overlaps, activeColumns)self._bumpUpWeakColumns()self._updateBoostFactors()if self._isUpdateRound():self._updateInhibitionRadius()self._updateMinDutyCycles()activeArray.fill()activeArray[activeColumns] = ", "docstring": "This is the primary public method of the SpatialPooler class. This\nfunction takes a input vector and outputs the indices of the active columns.\nIf 'learn' is set to True, this method also updates the permanences of the\ncolumns.\n\n:param inputVector: A numpy array of 0's and 1's that comprises the input\n to the spatial pooler. The array will be treated as a one dimensional\n array, therefore the dimensions of the array do not have to match the\n exact dimensions specified in the class constructor. In fact, even a\n list would suffice. The number of input bits in the vector must,\n however, match the number of bits specified by the call to the\n constructor. Therefore there must be a '0' or '1' in the array for\n every input bit.\n:param learn: A boolean value indicating whether learning should be\n performed. Learning entails updating the permanence values of the\n synapses, and hence modifying the 'state' of the model. Setting\n learning to 'off' freezes the SP and has many uses. For example, you\n might want to feed in various inputs and examine the resulting SDR's.\n:param activeArray: An array whose size is equal to the number of columns.\n Before the function returns this array will be populated with 1's at\n the indices of the active columns, and 0's everywhere else.", "id": "f17561:c4:m59"} {"signature": "def stripUnlearnedColumns(self, activeArray):", "body": "neverLearned = numpy.where(self._activeDutyCycles == )[]activeArray[neverLearned] = ", "docstring": "Removes the set of columns who have never been active from the set of\nactive columns selected in the inhibition round. Such columns cannot\nrepresent learned pattern and are therefore meaningless if only inference\nis required. This should not be done when using a random, unlearned SP\nsince you would end up with no active columns.\n\n:param activeArray: An array whose size is equal to the number of columns.\n Any columns marked as active with an activeDutyCycle of 0 have\n never been activated before and therefore are not active due to\n learning. Any of these (unlearned) columns will be disabled (set to 0).", "id": "f17561:c4:m60"} {"signature": "def _updateMinDutyCycles(self):", "body": "if self._globalInhibition or self._inhibitionRadius > self._numInputs:self._updateMinDutyCyclesGlobal()else:self._updateMinDutyCyclesLocal()", "docstring": "Updates the minimum duty cycles defining normal activity for a column. A\ncolumn with activity duty cycle below this minimum threshold is boosted.", "id": "f17561:c4:m61"} {"signature": "def _updateMinDutyCyclesGlobal(self):", "body": "self._minOverlapDutyCycles.fill(self._minPctOverlapDutyCycles * self._overlapDutyCycles.max())", "docstring": "Updates the minimum duty cycles in a global fashion. Sets the minimum duty\ncycles for the overlap all columns to be a percent of the maximum in the\nregion, specified by minPctOverlapDutyCycle. Functionality it is equivalent\nto _updateMinDutyCyclesLocal, but this function exploits the globality of\nthe computation to perform it in a straightforward, and efficient manner.", "id": "f17561:c4:m62"} {"signature": "def _updateMinDutyCyclesLocal(self):", "body": "for column in range(self._numColumns):neighborhood = self._getColumnNeighborhood(column)maxActiveDuty = self._activeDutyCycles[neighborhood].max()maxOverlapDuty = self._overlapDutyCycles[neighborhood].max()self._minOverlapDutyCycles[column] = (maxOverlapDuty *self._minPctOverlapDutyCycles)", "docstring": "Updates the minimum duty cycles. The minimum duty cycles are determined\nlocally. Each column's minimum duty cycles are set to be a percent of the\nmaximum duty cycles in the column's neighborhood. Unlike\n_updateMinDutyCyclesGlobal, here the values can be quite different for\ndifferent columns.", "id": "f17561:c4:m63"} {"signature": "def _updateDutyCycles(self, overlaps, activeColumns):", "body": "overlapArray = numpy.zeros(self._numColumns, dtype=realDType)activeArray = numpy.zeros(self._numColumns, dtype=realDType)overlapArray[overlaps > ] = activeArray[activeColumns] = period = self._dutyCyclePeriodif (period > self._iterationNum):period = self._iterationNumself._overlapDutyCycles = self._updateDutyCyclesHelper(self._overlapDutyCycles,overlapArray,period)self._activeDutyCycles = self._updateDutyCyclesHelper(self._activeDutyCycles,activeArray,period)", "docstring": "Updates the duty cycles for each column. The OVERLAP duty cycle is a moving\naverage of the number of inputs which overlapped with the each column. The\nACTIVITY duty cycles is a moving average of the frequency of activation for\neach column.\n\nParameters:\n----------------------------\n:param overlaps:\n An array containing the overlap score for each column.\n The overlap score for a column is defined as the number\n of synapses in a \"connected state\" (connected synapses)\n that are connected to input bits which are turned on.\n:param activeColumns:\n An array containing the indices of the active columns,\n the sparse set of columns which survived inhibition", "id": "f17561:c4:m64"} {"signature": "def _updateInhibitionRadius(self):", "body": "if self._globalInhibition:self._inhibitionRadius = int(self._columnDimensions.max())returnavgConnectedSpan = numpy.average([self._avgConnectedSpanForColumnND(i)for i in range(self._numColumns)])columnsPerInput = self._avgColumnsPerInput()diameter = avgConnectedSpan * columnsPerInputradius = (diameter - ) / radius = max(, radius)self._inhibitionRadius = int(radius + )", "docstring": "Update the inhibition radius. The inhibition radius is a measure of the\nsquare (or hypersquare) of columns that each a column is \"connected to\"\non average. Since columns are are not connected to each other directly, we\ndetermine this quantity by first figuring out how many *inputs* a column is\nconnected to, and then multiplying it by the total number of columns that\nexist for each input. For multiple dimension the aforementioned\ncalculations are averaged over all dimensions of inputs and columns. This\nvalue is meaningless if global inhibition is enabled.", "id": "f17561:c4:m65"} {"signature": "def _avgColumnsPerInput(self):", "body": "numDim = max(self._columnDimensions.size, self._inputDimensions.size)colDim = numpy.ones(numDim)colDim[:self._columnDimensions.size] = self._columnDimensionsinputDim = numpy.ones(numDim)inputDim[:self._inputDimensions.size] = self._inputDimensionscolumnsPerInput = colDim.astype(realDType) / inputDimreturn numpy.average(columnsPerInput)", "docstring": "The average number of columns per input, taking into account the topology\nof the inputs and columns. This value is used to calculate the inhibition\nradius. This function supports an arbitrary number of dimensions. If the\nnumber of column dimensions does not match the number of input dimensions,\nwe treat the missing, or phantom dimensions as 'ones'.", "id": "f17561:c4:m66"} {"signature": "def _avgConnectedSpanForColumn1D(self, columnIndex):", "body": "assert(self._inputDimensions.size == )connected = self._connectedSynapses[columnIndex].nonzero()[]if connected.size == :return else:return max(connected) - min(connected) + ", "docstring": "The range of connected synapses for column. This is used to\ncalculate the inhibition radius. This variation of the function only\nsupports a 1 dimensional column topology.\n\nParameters:\n----------------------------\n:param columnIndex: The index identifying a column in the permanence,\n potential and connectivity matrices", "id": "f17561:c4:m67"} {"signature": "def _avgConnectedSpanForColumn2D(self, columnIndex):", "body": "assert(self._inputDimensions.size == )connected = self._connectedSynapses[columnIndex](rows, cols) = connected.reshape(self._inputDimensions).nonzero()if rows.size == and cols.size == :return rowSpan = rows.max() - rows.min() + colSpan = cols.max() - cols.min() + return numpy.average([rowSpan, colSpan])", "docstring": "The range of connectedSynapses per column, averaged for each dimension.\nThis value is used to calculate the inhibition radius. This variation of\nthe function only supports a 2 dimensional column topology.\n\nParameters:\n----------------------------\n:param columnIndex: The index identifying a column in the permanence,\n potential and connectivity matrices", "id": "f17561:c4:m68"} {"signature": "def _avgConnectedSpanForColumnND(self, columnIndex):", "body": "dimensions = self._inputDimensionsconnected = self._connectedSynapses[columnIndex].nonzero()[]if connected.size == :return maxCoord = numpy.empty(self._inputDimensions.size)minCoord = numpy.empty(self._inputDimensions.size)maxCoord.fill(-)minCoord.fill(max(self._inputDimensions))for i in connected:maxCoord = numpy.maximum(maxCoord, numpy.unravel_index(i, dimensions))minCoord = numpy.minimum(minCoord, numpy.unravel_index(i, dimensions))return numpy.average(maxCoord - minCoord + )", "docstring": "The range of connectedSynapses per column, averaged for each dimension.\nThis value is used to calculate the inhibition radius. This variation of\nthe function supports arbitrary column dimensions.\n\nParameters:\n----------------------------\n:param index: The index identifying a column in the permanence, potential\n and connectivity matrices.", "id": "f17561:c4:m69"} {"signature": "def _adaptSynapses(self, inputVector, activeColumns):", "body": "inputIndices = numpy.where(inputVector > )[]permChanges = numpy.zeros(self._numInputs, dtype=realDType)permChanges.fill(- * self._synPermInactiveDec)permChanges[inputIndices] = self._synPermActiveIncfor columnIndex in activeColumns:perm = self._permanences[columnIndex]maskPotential = numpy.where(self._potentialPools[columnIndex] > )[]perm[maskPotential] += permChanges[maskPotential]self._updatePermanencesForColumn(perm, columnIndex, raisePerm=True)", "docstring": "The primary method in charge of learning. Adapts the permanence values of\nthe synapses based on the input vector, and the chosen columns after\ninhibition round. Permanence values are increased for synapses connected to\ninput bits that are turned on, and decreased for synapses connected to\ninputs bits that are turned off.\n\nParameters:\n----------------------------\n:param inputVector:\n A numpy array of 0's and 1's that comprises the input to\n the spatial pooler. There exists an entry in the array\n for every input bit.\n:param activeColumns:\n An array containing the indices of the columns that\n survived inhibition.", "id": "f17561:c4:m70"} {"signature": "def _bumpUpWeakColumns(self):", "body": "weakColumns = numpy.where(self._overlapDutyCycles< self._minOverlapDutyCycles)[]for columnIndex in weakColumns:perm = self._permanences[columnIndex].astype(realDType)maskPotential = numpy.where(self._potentialPools[columnIndex] > )[]perm[maskPotential] += self._synPermBelowStimulusIncself._updatePermanencesForColumn(perm, columnIndex, raisePerm=False)", "docstring": "This method increases the permanence values of synapses of columns whose\nactivity level has been too low. Such columns are identified by having an\noverlap duty cycle that drops too much below those of their peers. The\npermanence values for such columns are increased.", "id": "f17561:c4:m71"} {"signature": "def _raisePermanenceToThreshold(self, perm, mask):", "body": "if len(mask) < self._stimulusThreshold:raise Exception(\"\" +\"\" +\"\")numpy.clip(perm, self._synPermMin, self._synPermMax, out=perm)while True:numConnected = numpy.nonzero(perm > self._synPermConnected - PERMANENCE_EPSILON)[].sizeif numConnected >= self._stimulusThreshold:returnperm[mask] += self._synPermBelowStimulusInc", "docstring": "This method ensures that each column has enough connections to input bits\nto allow it to become active. Since a column must have at least\n'self._stimulusThreshold' overlaps in order to be considered during the\ninhibition phase, columns without such minimal number of connections, even\nif all the input bits they are connected to turn on, have no chance of\nobtaining the minimum threshold. For such columns, the permanence values\nare increased until the minimum number of connections are formed.\n\n\nParameters:\n----------------------------\n:param perm: An array of permanence values for a column. The array is\n \"dense\", i.e. it contains an entry for each input bit, even\n if the permanence value is 0.\n:param mask: the indices of the columns whose permanences need to be\n raised.", "id": "f17561:c4:m72"} {"signature": "def _updatePermanencesForColumn(self, perm, columnIndex, raisePerm=True):", "body": "maskPotential = numpy.where(self._potentialPools[columnIndex] > )[]if raisePerm:self._raisePermanenceToThreshold(perm, maskPotential)perm[perm < self._synPermTrimThreshold] = numpy.clip(perm, self._synPermMin, self._synPermMax, out=perm)newConnected = numpy.where(perm >=self._synPermConnected - PERMANENCE_EPSILON)[]self._permanences.update(columnIndex, perm)self._connectedSynapses.replace(columnIndex, newConnected)self._connectedCounts[columnIndex] = newConnected.size", "docstring": "This method updates the permanence matrix with a column's new permanence\nvalues. The column is identified by its index, which reflects the row in\nthe matrix, and the permanence is given in 'dense' form, i.e. a full\narray containing all the zeros as well as the non-zero values. It is in\ncharge of implementing 'clipping' - ensuring that the permanence values are\nalways between 0 and 1 - and 'trimming' - enforcing sparsity by zeroing out\nall permanence values below '_synPermTrimThreshold'. It also maintains\nthe consistency between 'self._permanences' (the matrix storing the\npermanence values), 'self._connectedSynapses', (the matrix storing the bits\neach column is connected to), and 'self._connectedCounts' (an array storing\nthe number of input bits each column is connected to). Every method wishing\nto modify the permanence matrix should do so through this method.\n\nParameters:\n----------------------------\n:param perm: An array of permanence values for a column. The array is\n \"dense\", i.e. it contains an entry for each input bit, even\n if the permanence value is 0.\n:param index: The index identifying a column in the permanence, potential\n and connectivity matrices\n:param raisePerm: A boolean value indicating whether the permanence values\n should be raised until a minimum number are synapses are in\n a connected state. Should be set to 'false' when a direct\n assignment is required.", "id": "f17561:c4:m73"} {"signature": "def _initPermConnected(self):", "body": "p = self._synPermConnected + (self._synPermMax - self._synPermConnected)*self._random.getReal64()p = int(p*) / return p", "docstring": "Returns a randomly generated permanence value for a synapses that is\ninitialized in a connected state. The basic idea here is to initialize\npermanence values very close to synPermConnected so that a small number of\nlearning steps could make it disconnected or connected.\n\nNote: experimentation was done a long time ago on the best way to initialize\npermanence values, but the history for this particular scheme has been lost.", "id": "f17561:c4:m74"} {"signature": "def _initPermNonConnected(self):", "body": "p = self._synPermConnected * self._random.getReal64()p = int(p*) / return p", "docstring": "Returns a randomly generated permanence value for a synapses that is to be\ninitialized in a non-connected state.", "id": "f17561:c4:m75"} {"signature": "def _initPermanence(self, potential, connectedPct):", "body": "perm = numpy.zeros(self._numInputs, dtype=realDType)for i in range(self._numInputs):if (potential[i] < ):continueif (self._random.getReal64() <= connectedPct):perm[i] = self._initPermConnected()else:perm[i] = self._initPermNonConnected()perm[perm < self._synPermTrimThreshold] = return perm", "docstring": "Initializes the permanences of a column. The method\nreturns a 1-D array the size of the input, where each entry in the\narray represents the initial permanence value between the input bit\nat the particular index in the array, and the column represented by\nthe 'index' parameter.\n\nParameters:\n----------------------------\n:param potential: A numpy array specifying the potential pool of the column.\n Permanence values will only be generated for input bits\n corresponding to indices for which the mask value is 1.\n:param connectedPct: A value between 0 or 1 governing the chance, for each\n permanence, that the initial permanence value will\n be a value that is considered connected.", "id": "f17561:c4:m76"} {"signature": "def _mapColumn(self, index):", "body": "columnCoords = numpy.unravel_index(index, self._columnDimensions)columnCoords = numpy.array(columnCoords, dtype=realDType)ratios = columnCoords / self._columnDimensionsinputCoords = self._inputDimensions * ratiosinputCoords += * self._inputDimensions / self._columnDimensionsinputCoords = inputCoords.astype(int)inputIndex = numpy.ravel_multi_index(inputCoords, self._inputDimensions)return inputIndex", "docstring": "Maps a column to its respective input index, keeping to the topology of\nthe region. It takes the index of the column as an argument and determines\nwhat is the index of the flattened input vector that is to be the center of\nthe column's potential pool. It distributes the columns over the inputs\nuniformly. The return value is an integer representing the index of the\ninput bit. Examples of the expected output of this method:\n* If the topology is one dimensional, and the column index is 0, this\n method will return the input index 0. If the column index is 1, and there\n are 3 columns over 7 inputs, this method will return the input index 3.\n* If the topology is two dimensional, with column dimensions [3, 5] and\n input dimensions [7, 11], and the column index is 3, the method\n returns input index 8.\n\nParameters:\n----------------------------\n:param index: The index identifying a column in the permanence, potential\n and connectivity matrices.\n:param wrapAround: A boolean value indicating that boundaries should be\n ignored.", "id": "f17561:c4:m77"} {"signature": "def _mapPotential(self, index):", "body": "centerInput = self._mapColumn(index)columnInputs = self._getInputNeighborhood(centerInput).astype(uintType)numPotential = int(columnInputs.size * self._potentialPct + )selectedInputs = numpy.empty(numPotential, dtype=uintType)self._random.sample(columnInputs, selectedInputs)potential = numpy.zeros(self._numInputs, dtype=uintType)potential[selectedInputs] = return potential", "docstring": "Maps a column to its input bits. This method encapsulates the topology of\nthe region. It takes the index of the column as an argument and determines\nwhat are the indices of the input vector that are located within the\ncolumn's potential pool. The return value is a list containing the indices\nof the input bits. The current implementation of the base class only\nsupports a 1 dimensional topology of columns with a 1 dimensional topology\nof inputs. To extend this class to support 2-D topology you will need to\noverride this method. Examples of the expected output of this method:\n* If the potentialRadius is greater than or equal to the largest input\n dimension then each column connects to all of the inputs.\n* If the topology is one dimensional, the input space is divided up evenly\n among the columns and each column is centered over its share of the\n inputs. If the potentialRadius is 5, then each column connects to the\n input it is centered above as well as the 5 inputs to the left of that\n input and the five inputs to the right of that input, wrapping around if\n wrapAround=True.\n* If the topology is two dimensional, the input space is again divided up\n evenly among the columns and each column is centered above its share of\n the inputs. If the potentialRadius is 5, the column connects to a square\n that has 11 inputs on a side and is centered on the input that the column\n is centered above.\n\nParameters:\n----------------------------\n:param index: The index identifying a column in the permanence, potential\n and connectivity matrices.", "id": "f17561:c4:m78"} {"signature": "@staticmethoddef _updateDutyCyclesHelper(dutyCycles, newInput, period):", "body": "assert(period >= )return (dutyCycles * (period -) + newInput) / period", "docstring": "Updates a duty cycle estimate with a new value. This is a helper\nfunction that is used to update several duty cycle variables in\nthe Column class, such as: overlapDutyCucle, activeDutyCycle,\nminPctDutyCycleBeforeInh, minPctDutyCycleAfterInh, etc. returns\nthe updated duty cycle. Duty cycles are updated according to the following\nformula:\n\n (period - 1)*dutyCycle + newValue\n dutyCycle := ----------------------------------\n period\n\nParameters:\n----------------------------\n:param dutyCycles: An array containing one or more duty cycle values that need\n to be updated\n:param newInput: A new numerical value used to update the duty cycle\n:param period: The period of the duty cycle", "id": "f17561:c4:m79"} {"signature": "def _updateBoostFactors(self):", "body": "if self._globalInhibition:self._updateBoostFactorsGlobal()else:self._updateBoostFactorsLocal()", "docstring": "Update the boost factors for all columns. The boost factors are used to\nincrease the overlap of inactive columns to improve their chances of\nbecoming active, and hence encourage participation of more columns in the\nlearning process. The boosting function is a curve defined as:\nboostFactors = exp[ - boostStrength * (dutyCycle - targetDensity)]\nIntuitively this means that columns that have been active at the target\nactivation level have a boost factor of 1, meaning their overlap is not\nboosted. Columns whose active duty cycle drops too much below that of their\nneighbors are boosted depending on how infrequently they have been active.\nColumns that has been active more than the target activation level have\na boost factor below 1, meaning their overlap is suppressed\n\nThe boostFactor depends on the activeDutyCycle via an exponential function:\n\n boostFactor\n ^\n |\n |\\\n | \\\n 1 _ | \\\n | _\n | _ _\n | _ _ _ _\n +--------------------> activeDutyCycle\n |\n targetDensity", "id": "f17561:c4:m80"} {"signature": "def _updateBoostFactorsGlobal(self):", "body": "if (self._localAreaDensity > ):targetDensity = self._localAreaDensityelse:inhibitionArea = (( * self._inhibitionRadius + )** self._columnDimensions.size)inhibitionArea = min(self._numColumns, inhibitionArea)targetDensity = float(self._numActiveColumnsPerInhArea) / inhibitionAreatargetDensity = min(targetDensity, )self._boostFactors = numpy.exp((targetDensity - self._activeDutyCycles) * self._boostStrength)", "docstring": "Update boost factors when global inhibition is used", "id": "f17561:c4:m81"} {"signature": "def _updateBoostFactorsLocal(self):", "body": "targetDensity = numpy.zeros(self._numColumns, dtype=realDType)for i in range(self._numColumns):maskNeighbors = self._getColumnNeighborhood(i)targetDensity[i] = numpy.mean(self._activeDutyCycles[maskNeighbors])self._boostFactors = numpy.exp((targetDensity - self._activeDutyCycles) * self._boostStrength)", "docstring": "Update boost factors when local inhibition is used", "id": "f17561:c4:m82"} {"signature": "def _updateBookeepingVars(self, learn):", "body": "self._iterationNum += if learn:self._iterationLearnNum += ", "docstring": "Updates counter instance variables each round.\n\nParameters:\n----------------------------\n:param learn: a boolean value indicating whether learning should be\n performed. Learning entails updating the permanence\n values of the synapses, and hence modifying the 'state'\n of the model. setting learning to 'off' might be useful\n for indicating separate training vs. testing sets.", "id": "f17561:c4:m83"} {"signature": "def _calculateOverlap(self, inputVector):", "body": "overlaps = numpy.zeros(self._numColumns, dtype=realDType)self._connectedSynapses.rightVecSumAtNZ_fast(inputVector.astype(realDType),overlaps)return overlaps", "docstring": "This function determines each column's overlap with the current input\nvector. The overlap of a column is the number of synapses for that column\nthat are connected (permanence value is greater than '_synPermConnected')\nto input bits which are turned on. The implementation takes advantage of\nthe SparseBinaryMatrix class to perform this calculation efficiently.\n\nParameters:\n----------------------------\n:param inputVector: a numpy array of 0's and 1's that comprises the input to\n the spatial pooler.", "id": "f17561:c4:m84"} {"signature": "def _inhibitColumns(self, overlaps):", "body": "if (self._localAreaDensity > ):density = self._localAreaDensityelse:inhibitionArea = ((*self._inhibitionRadius + )** self._columnDimensions.size)inhibitionArea = min(self._numColumns, inhibitionArea)density = float(self._numActiveColumnsPerInhArea) / inhibitionAreadensity = min(density, )if self._globalInhibition orself._inhibitionRadius > max(self._columnDimensions):return self._inhibitColumnsGlobal(overlaps, density)else:return self._inhibitColumnsLocal(overlaps, density)", "docstring": "Performs inhibition. This method calculates the necessary values needed to\nactually perform inhibition and then delegates the task of picking the\nactive columns to helper functions.\n\nParameters:\n----------------------------\n:param overlaps: an array containing the overlap score for each column.\n The overlap score for a column is defined as the number\n of synapses in a \"connected state\" (connected synapses)\n that are connected to input bits which are turned on.", "id": "f17561:c4:m86"} {"signature": "def _inhibitColumnsGlobal(self, overlaps, density):", "body": "numActive = int(density * self._numColumns)sortedWinnerIndices = numpy.argsort(overlaps, kind='')start = len(sortedWinnerIndices) - numActivewhile start < len(sortedWinnerIndices):i = sortedWinnerIndices[start]if overlaps[i] >= self._stimulusThreshold:breakelse:start += return sortedWinnerIndices[start:][::-]", "docstring": "Perform global inhibition. Performing global inhibition entails picking the\ntop 'numActive' columns with the highest overlap score in the entire\nregion. At most half of the columns in a local neighborhood are allowed to\nbe active. Columns with an overlap score below the 'stimulusThreshold' are\nalways inhibited.\n\n:param overlaps: an array containing the overlap score for each column.\n The overlap score for a column is defined as the number\n of synapses in a \"connected state\" (connected synapses)\n that are connected to input bits which are turned on.\n:param density: The fraction of columns to survive inhibition.\n@return list with indices of the winning columns", "id": "f17561:c4:m87"} {"signature": "def _inhibitColumnsLocal(self, overlaps, density):", "body": "activeArray = numpy.zeros(self._numColumns, dtype=\"\")for column, overlap in enumerate(overlaps):if overlap >= self._stimulusThreshold:neighborhood = self._getColumnNeighborhood(column)neighborhoodOverlaps = overlaps[neighborhood]numBigger = numpy.count_nonzero(neighborhoodOverlaps > overlap)ties = numpy.where(neighborhoodOverlaps == overlap)tiedNeighbors = neighborhood[ties]numTiesLost = numpy.count_nonzero(activeArray[tiedNeighbors])numActive = int( + density * len(neighborhood))if numBigger + numTiesLost < numActive:activeArray[column] = Truereturn activeArray.nonzero()[]", "docstring": "Performs local inhibition. Local inhibition is performed on a column by\ncolumn basis. Each column observes the overlaps of its neighbors and is\nselected if its overlap score is within the top 'numActive' in its local\nneighborhood. At most half of the columns in a local neighborhood are\nallowed to be active. Columns with an overlap score below the\n'stimulusThreshold' are always inhibited.\n\n:param overlaps: an array containing the overlap score for each column.\n The overlap score for a column is defined as the number\n of synapses in a \"connected state\" (connected synapses)\n that are connected to input bits which are turned on.\n:param density: The fraction of columns to survive inhibition. This\n value is only an intended target. Since the surviving\n columns are picked in a local fashion, the exact fraction\n of surviving columns is likely to vary.\n@return list with indices of the winning columns", "id": "f17561:c4:m88"} {"signature": "def _isUpdateRound(self):", "body": "return (self._iterationNum % self._updatePeriod) == ", "docstring": "returns true if enough rounds have passed to warrant updates of\nduty cycles", "id": "f17561:c4:m89"} {"signature": "def _getColumnNeighborhood(self, centerColumn):", "body": "if self._wrapAround:return topology.wrappingNeighborhood(centerColumn,self._inhibitionRadius,self._columnDimensions)else:return topology.neighborhood(centerColumn,self._inhibitionRadius,self._columnDimensions)", "docstring": "Gets a neighborhood of columns.\n\nSimply calls topology.neighborhood or topology.wrappingNeighborhood\n\nA subclass can insert different topology behavior by overriding this method.\n\n:param centerColumn (int)\nThe center of the neighborhood.\n\n@returns (1D numpy array of integers)\nThe columns in the neighborhood.", "id": "f17561:c4:m90"} {"signature": "def _getInputNeighborhood(self, centerInput):", "body": "if self._wrapAround:return topology.wrappingNeighborhood(centerInput,self._potentialRadius,self._inputDimensions)else:return topology.neighborhood(centerInput,self._potentialRadius,self._inputDimensions)", "docstring": "Gets a neighborhood of inputs.\n\nSimply calls topology.wrappingNeighborhood or topology.neighborhood.\n\nA subclass can insert different topology behavior by overriding this method.\n\n:param centerInput (int)\nThe center of the neighborhood.\n\n@returns (1D numpy array of integers)\nThe inputs in the neighborhood.", "id": "f17561:c4:m91"} {"signature": "def _seed(self, seed=-):", "body": "if seed != -:self._random = NupicRandom(seed)else:self._random = NupicRandom()", "docstring": "Initialize the random seed", "id": "f17561:c4:m92"} {"signature": "def __setstate__(self, state):", "body": "if state[''] < :state[''] = Trueif state[''] < :state[''] = numpy.zeros(self._numColumns, dtype=realDType)state[''] = numpy.zeros(self._numColumns, dtype=realDType)state[''] = VERSIONself.__dict__.update(state)", "docstring": "Initialize class properties from stored values.", "id": "f17561:c4:m93"} {"signature": "def printParameters(self):", "body": "print(\"\")print(\"\", self.getNumInputs())print(\"\", self.getNumColumns())print(\"\", self._columnDimensions)print(\"\", self.getNumActiveColumnsPerInhArea())print(\"\", self.getPotentialPct())print(\"\", self.getGlobalInhibition())print(\"\", self.getLocalAreaDensity())print(\"\", self.getStimulusThreshold())print(\"\", self.getSynPermActiveInc())print(\"\", self.getSynPermInactiveDec())print(\"\", self.getSynPermConnected())print(\"\", self.getMinPctOverlapDutyCycles())print(\"\", self.getDutyCyclePeriod())print(\"\", self.getBoostStrength())print(\"\", self.getSpVerbosity())print(\"\", self._version)", "docstring": "Useful for debugging.", "id": "f17561:c4:m97"} {"signature": "def _extractCallingMethodArgs():", "body": "import inspectimport copycallingFrame = inspect.stack()[][]argNames, _, _, frameLocalVarDict = inspect.getargvalues(callingFrame)argNames.remove(\"\")args = copy.copy(frameLocalVarDict)for varName in frameLocalVarDict:if varName not in argNames:args.pop(varName)return args", "docstring": "Returns args dictionary from the calling method", "id": "f17562:m0"} {"signature": "def write(self, proto):", "body": "super(BacktrackingTMCPP, self).write(proto.baseTM)self.cells4.write(proto.cells4)proto.makeCells4Ephemeral = self.makeCells4Ephemeralproto.seed = self.seedproto.checkSynapseConsistency = self.checkSynapseConsistencyproto.initArgs = json.dumps(self._initArgsDict)", "docstring": "Populate serialization proto instance.\n\n :param proto: (BacktrackingTMCppProto) the proto instance to populate", "id": "f17562:c0:m2"} {"signature": "@classmethoddef read(cls, proto):", "body": "obj = BacktrackingTM.read(proto.baseTM)obj.__class__ = clsnewCells4 = Cells4.read(proto.cells4)print(newCells4)obj.cells4 = newCells4obj.makeCells4Ephemeral = proto.makeCells4Ephemeralobj.seed = proto.seedobj.checkSynapseConsistency = proto.checkSynapseConsistencyobj._initArgsDict = json.loads(proto.initArgs)obj._initArgsDict[\"\"] = str(obj._initArgsDict[\"\"])obj.allocateStatesInCPP = Falseobj.retrieveLearningStates = Falseobj._setStatePointers()return obj", "docstring": "Deserialize from proto instance.\n\n :param proto: (BacktrackingTMCppProto) the proto instance to read from", "id": "f17562:c0:m3"} {"signature": "def __setstate__(self, state):", "body": "super(BacktrackingTMCPP, self).__setstate__(state)if self.makeCells4Ephemeral:self._initCells4()", "docstring": "Set the state of ourself from a serialized state.", "id": "f17562:c0:m5"} {"signature": "def _getEphemeralMembers(self):", "body": "e = BacktrackingTM._getEphemeralMembers(self)if self.makeCells4Ephemeral:e.extend([''])return e", "docstring": "List of our member variables that we don't need to be saved", "id": "f17562:c0:m6"} {"signature": "def _initEphemerals(self):", "body": "BacktrackingTM._initEphemerals(self)self.allocateStatesInCPP = Falseself.retrieveLearningStates = Falseif self.makeCells4Ephemeral:self._initCells4()", "docstring": "Initialize all ephemeral members after being restored to a pickled state.", "id": "f17562:c0:m7"} {"signature": "def saveToFile(self, filePath):", "body": "self.cells4.saveToFile(filePath)", "docstring": "Save Cells4 state to a file. File can be loaded with :meth:`loadFromFile`.", "id": "f17562:c0:m8"} {"signature": "def loadFromFile(self, filePath):", "body": "self._setStatePointers()self.cells4.loadFromFile(filePath)", "docstring": "Load Cells4 state from a file saved with :meth:`saveToFile`.", "id": "f17562:c0:m9"} {"signature": "def __getattr__(self, name):", "body": "try:return super(BacktrackingTMCPP, self).__getattr__(name)except AttributeError:raise AttributeError(\"\" % name)", "docstring": "Patch __getattr__ so that we can catch the first access to 'cells' and load.\n\nThis function is only called when we try to access an attribute that doesn't\nexist. We purposely make sure that \"self.cells\" doesn't exist after\nunpickling so that we'll hit this, then we can load it on the first access.\n\nIf this is called at any other time, it will raise an AttributeError.\nThat's because:\n- If 'name' is \"cells\", after the first call, self._realCells won't exist\n so we'll get an implicit AttributeError.\n- If 'name' isn't \"cells\", I'd expect our super wouldn't have __getattr__,\n so we'll raise our own Attribute error. If the super did get __getattr__,\n we'll just return what it gives us.", "id": "f17562:c0:m10"} {"signature": "def compute(self, bottomUpInput, enableLearn, enableInference=None):", "body": "assert (bottomUpInput.dtype == numpy.dtype('')) or(bottomUpInput.dtype == numpy.dtype('')) or(bottomUpInput.dtype == numpy.dtype(''))self.iterationIdx = self.iterationIdx + if enableInference is None:if enableLearn:enableInference = Falseelse:enableInference = Trueself._setStatePointers()y = self.cells4.compute(bottomUpInput, enableInference, enableLearn)self.currentOutput = y.reshape((self.numberOfCols, self.cellsPerColumn))self.avgLearnedSeqLength = self.cells4.getAvgLearnedSeqLength()self._copyAllocatedStates()if self.collectStats:activeColumns = bottomUpInput.nonzero()[]if enableInference:predictedState = self.infPredictedState['']else:predictedState = self.lrnPredictedState['']self._updateStatsInferEnd(self._internalStats,activeColumns,predictedState,self.colConfidence[''])output = self._computeOutput()self.printComputeEnd(output, learn=enableLearn)self.resetCalled = Falsereturn output", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.compute`.", "id": "f17562:c0:m11"} {"signature": "def _inferPhase2(self):", "body": "self._setStatePointers()self.cells4.inferPhase2()self._copyAllocatedStates()", "docstring": "This calls phase 2 of inference (used in multistep prediction).", "id": "f17562:c0:m12"} {"signature": "def _copyAllocatedStates(self):", "body": "if self.verbosity > or self.retrieveLearningStates:(activeT, activeT1, predT, predT1) = self.cells4.getLearnStates()self.lrnActiveState[''] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn))self.lrnActiveState[''] = activeT.reshape((self.numberOfCols, self.cellsPerColumn))self.lrnPredictedState[''] = predT1.reshape((self.numberOfCols, self.cellsPerColumn))self.lrnPredictedState[''] = predT.reshape((self.numberOfCols, self.cellsPerColumn))if self.allocateStatesInCPP:assert False(activeT, activeT1, predT, predT1, colConfidenceT, colConfidenceT1, confidenceT,confidenceT1) = self.cells4.getStates()self.cellConfidence[''] = confidenceT.reshape((self.numberOfCols, self.cellsPerColumn))self.cellConfidence[''] = confidenceT1.reshape((self.numberOfCols, self.cellsPerColumn))self.colConfidence[''] = colConfidenceT.reshape(self.numberOfCols)self.colConfidence[''] = colConfidenceT1.reshape(self.numberOfCols)self.infActiveState[''] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn))self.infActiveState[''] = activeT.reshape((self.numberOfCols, self.cellsPerColumn))self.infPredictedState[''] = predT1.reshape((self.numberOfCols, self.cellsPerColumn))self.infPredictedState[''] = predT.reshape((self.numberOfCols, self.cellsPerColumn))", "docstring": "If state is allocated in CPP, copy over the data into our numpy arrays.", "id": "f17562:c0:m14"} {"signature": "def _setStatePointers(self):", "body": "if not self.allocateStatesInCPP:self.cells4.setStatePointers(self.infActiveState[\"\"], self.infActiveState[\"\"],self.infPredictedState[\"\"], self.infPredictedState[\"\"],self.colConfidence[\"\"], self.colConfidence[\"\"],self.cellConfidence[\"\"], self.cellConfidence[\"\"])", "docstring": "If we are having CPP use numpy-allocated buffers, set these buffer\n pointers. This is a relatively fast operation and, for safety, should be\n done before every call to the cells4 compute methods. This protects us\n in situations where code can cause Python or numpy to create copies.", "id": "f17562:c0:m15"} {"signature": "def reset(self):", "body": "if self.verbosity >= :print(\"\")self._setStatePointers()self.cells4.reset()BacktrackingTM.reset(self)", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.reset`.", "id": "f17562:c0:m16"} {"signature": "def finishLearning(self):", "body": "self.trimSegments(minPermanence=)", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.finishLearning`.", "id": "f17562:c0:m17"} {"signature": "def trimSegments(self, minPermanence=None, minNumSyns=None):", "body": "if minPermanence is None:minPermanence = if minNumSyns is None:minNumSyns = if self.verbosity >= :print(\"\")self.printCells(predictedOnly=False)return self.cells4.trimSegments(minPermanence=minPermanence, minNumSyns=minNumSyns)", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.trimSegments`.", "id": "f17562:c0:m18"} {"signature": "def printSegmentUpdates(self):", "body": "assert Falseprint(\"\", len(self.segmentUpdates))for key, updateList in self.segmentUpdates.items():c,i = key[],key[]print(c,i,updateList)", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printSegmentUpdates`.", "id": "f17562:c0:m20"} {"signature": "def _slowIsSegmentActive(self, seg, timeStep):", "body": "numSyn = seg.size()numActiveSyns = for synIdx in range(numSyn):if seg.getPermanence(synIdx) < self.connectedPerm:continuesc, si = self.getColCellIdx(seg.getSrcCellIdx(synIdx))if self.infActiveState[timeStep][sc, si]:numActiveSyns += if numActiveSyns >= self.activationThreshold:return Truereturn numActiveSyns >= self.activationThreshold", "docstring": "A segment is active if it has >= activationThreshold connected\nsynapses that are active due to infActiveState.", "id": "f17562:c0:m21"} {"signature": "def printCell(self, c, i, onlyActiveSegments=False):", "body": "nSegs = self.cells4.nSegmentsOnCell(c,i)if nSegs > :segList = self.cells4.getNonEmptySegList(c,i)gidx = c * self.cellsPerColumn + iprint(\"\", c, \"\", i, \"\"%(gidx),\"\", nSegs, \"\")for k,segIdx in enumerate(segList):seg = self.cells4.getSegment(c, i, segIdx)isActive = self._slowIsSegmentActive(seg, '')if onlyActiveSegments and not isActive:continueisActiveStr = \"\" if isActive else \"\"print(\"\" % (isActiveStr, segIdx), end='')print(seg.size(), end='')print(seg.isSequenceSegment(), \"\" % (seg.dutyCycle(self.cells4.getNLrnIterations(), False, True)), end='')print(\"\" % (seg.getPositiveActivations(),seg.getTotalActivations()), end='')print(\"\" % (self.cells4.getNLrnIterations()- seg.getLastActiveIteration()), end='')numSyn = seg.size()for s in range(numSyn):sc, si = self.getColCellIdx(seg.getSrcCellIdx(s))print(\"\"%(sc, si, seg.getPermanence(s)), end='')print()", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.printCell`.", "id": "f17562:c0:m22"} {"signature": "def getAvgLearnedSeqLength(self):", "body": "return self.cells4.getAvgLearnedSeqLength()", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getAvgLearnedSeqLength`.", "id": "f17562:c0:m23"} {"signature": "def getColCellIdx(self, idx):", "body": "c = idx//self.cellsPerColumni = idx - c*self.cellsPerColumnreturn c,i", "docstring": "Get column and cell within column from a global cell index.\nThe global index is ``idx = colIdx * nCellsPerCol() + cellIdxInCol``\n\n:param idx: (int) global cell index\n:returns: (tuple) (colIdx, cellIdxInCol)", "id": "f17562:c0:m24"} {"signature": "def getSegmentOnCell(self, c, i, segIdx):", "body": "segList = self.cells4.getNonEmptySegList(c,i)seg = self.cells4.getSegment(c, i, segList[segIdx])numSyn = seg.size()assert numSyn != result = []result.append([int(segIdx), bool(seg.isSequenceSegment()),seg.getPositiveActivations(),seg.getTotalActivations(), seg.getLastActiveIteration(),seg.getLastPosDutyCycle(),seg.getLastPosDutyCycleIteration()])for s in range(numSyn):sc, si = self.getColCellIdx(seg.getSrcCellIdx(s))result.append([int(sc), int(si), seg.getPermanence(s)])return result", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`.", "id": "f17562:c0:m25"} {"signature": "def getNumSegments(self):", "body": "return self.cells4.nSegments()", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSegments`.", "id": "f17562:c0:m26"} {"signature": "def getNumSynapses(self):", "body": "return self.cells4.nSynapses()", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSynapses`.", "id": "f17562:c0:m27"} {"signature": "def getNumSegmentsInCell(self, c, i):", "body": "return self.cells4.nSegmentsOnCell(c,i)", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSegmentsInCell`.", "id": "f17562:c0:m28"} {"signature": "def getSegmentInfo(self, collectActiveData = False):", "body": "assert collectActiveData == FalsenSegments, nSynapses = self.getNumSegments(), self.cells4.nSynapses()distSegSizes, distNSegsPerCell = {}, {}nActiveSegs, nActiveSynapses = , distPermValues = {} numAgeBuckets = distAges = []ageBucketSize = int((self.iterationIdx+) / )for i in range(numAgeBuckets):distAges.append(['' % (i*ageBucketSize, (i+)*ageBucketSize-), ])for c in range(self.numberOfCols):for i in range(self.cellsPerColumn):nSegmentsThisCell = self.getNumSegmentsInCell(c,i)if nSegmentsThisCell > :if nSegmentsThisCell in distNSegsPerCell:distNSegsPerCell[nSegmentsThisCell] += else:distNSegsPerCell[nSegmentsThisCell] = segList = self.cells4.getNonEmptySegList(c,i)for segIdx in range(nSegmentsThisCell):seg = self.getSegmentOnCell(c, i, segIdx)nSynapsesThisSeg = len(seg) - if nSynapsesThisSeg > :if nSynapsesThisSeg in distSegSizes:distSegSizes[nSynapsesThisSeg] += else:distSegSizes[nSynapsesThisSeg] = for syn in seg[:]:p = int(syn[]*)if p in distPermValues:distPermValues[p] += else:distPermValues[p] = segObj = self.cells4.getSegment(c, i, segList[segIdx])age = self.iterationIdx - segObj.getLastActiveIteration()ageBucket = int(age/ageBucketSize)distAges[ageBucket][] += return (nSegments, nSynapses, nActiveSegs, nActiveSynapses,distSegSizes, distNSegsPerCell, distPermValues, distAges)", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentInfo`.", "id": "f17562:c0:m29"} {"signature": "def computeRawAnomalyScore(activeColumns, prevPredictedColumns):", "body": "nActiveColumns = len(activeColumns)if nActiveColumns > :score = numpy.in1d(activeColumns, prevPredictedColumns).sum()score = (nActiveColumns - score) / float(nActiveColumns)else:score = return score", "docstring": "Computes the raw anomaly score.\n\n The raw anomaly score is the fraction of active columns not predicted.\n\n :param activeColumns: array of active column indices\n :param prevPredictedColumns: array of columns indices predicted in prev step\n :returns: anomaly score 0..1 (float)", "id": "f17563:m0"} {"signature": "def compute(self, activeColumns, predictedColumns,inputValue=None, timestamp=None):", "body": "anomalyScore = computeRawAnomalyScore(activeColumns, predictedColumns)if self._mode == Anomaly.MODE_PURE:score = anomalyScoreelif self._mode == Anomaly.MODE_LIKELIHOOD:if inputValue is None:raise ValueError(\"\"\"\")probability = self._likelihood.anomalyProbability(inputValue, anomalyScore, timestamp)score = - probabilityelif self._mode == Anomaly.MODE_WEIGHTED:probability = self._likelihood.anomalyProbability(inputValue, anomalyScore, timestamp)score = anomalyScore * ( - probability)if self._movingAverage is not None:score = self._movingAverage.next(score)if self._binaryThreshold is not None:if score >= self._binaryThreshold:score = else:score = return score", "docstring": "Compute the anomaly score as the percent of active columns not predicted.\n\n :param activeColumns: array of active column indices\n :param predictedColumns: array of columns indices predicted in this step\n (used for anomaly in step T+1)\n :param inputValue: (optional) value of current input to encoders\n (eg \"cat\" for category encoder)\n (used in anomaly-likelihood)\n :param timestamp: (optional) date timestamp when the sample occured\n (used in anomaly-likelihood)\n :returns: the computed anomaly score; float 0..1", "id": "f17563:c0:m1"} {"signature": "def __setstate__(self, state):", "body": "self.__dict__.update(state)if not hasattr(self, ''):self._mode = Anomaly.MODE_PUREif not hasattr(self, ''):self._movingAverage = Noneif not hasattr(self, ''):self._binaryThreshold = None", "docstring": "deserialization", "id": "f17563:c0:m4"} {"signature": "@staticmethoddef create(*args, **kwargs):", "body": "impl = kwargs.pop('', None)if impl is None:impl = Configuration.get('')if impl == '':return SDRClassifier(*args, **kwargs)elif impl == '':return FastSDRClassifier(*args, **kwargs)elif impl == '':return SDRClassifierDiff(*args, **kwargs)else:raise ValueError('''' % impl)", "docstring": "Create a SDR classifier factory.\nThe implementation of the SDR Classifier can be specified with\nthe \"implementation\" keyword argument.\n\nThe SDRClassifierFactory uses the implementation as specified in\n `Default NuPIC Configuration `_.", "id": "f17564:c0:m0"} {"signature": "@staticmethoddef read(proto):", "body": "impl = proto.implementationif impl == '':return SDRClassifier.read(proto.sdrClassifier)elif impl == '':return FastSDRClassifier.read(proto.sdrClassifier)elif impl == '':return SDRClassifierDiff.read(proto.sdrClassifier)else:raise ValueError('''' % impl)", "docstring": ":param proto: SDRClassifierRegionProto capnproto object", "id": "f17564:c0:m1"} {"signature": "def _getEphemeralMembers(self):", "body": "return []", "docstring": "List of our member variables that we don't need to be saved.", "id": "f17565:c0:m1"} {"signature": "def _initEphemerals(self):", "body": "self.segmentUpdates = {}self.resetStats()self._prevInfPatterns = []self._prevLrnPatterns = []stateShape = (self.numberOfCols, self.cellsPerColumn)self.lrnActiveState = {}self.lrnActiveState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.lrnActiveState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.lrnPredictedState = {}self.lrnPredictedState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.lrnPredictedState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.infActiveState = {}self.infActiveState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.infActiveState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.infActiveState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.infActiveState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.infPredictedState = {}self.infPredictedState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.infPredictedState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.infPredictedState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.infPredictedState[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.cellConfidence = {}self.cellConfidence[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.cellConfidence[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.cellConfidence[\"\"] = numpy.zeros(stateShape, dtype=\"\")self.colConfidence = {}self.colConfidence[\"\"] = numpy.zeros(self.numberOfCols, dtype=\"\")self.colConfidence[\"\"] = numpy.zeros(self.numberOfCols, dtype=\"\")self.colConfidence[\"\"] = numpy.zeros(self.numberOfCols,dtype=\"\")", "docstring": "Initialize all ephemeral members after being restored to a pickled state.", "id": "f17565:c0:m2"} {"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()for ephemeralMemberName in self._getEphemeralMembers():state.pop(ephemeralMemberName, None)state[''] = self._getRandomState()state[''] = TM_VERSIONreturn state", "docstring": "@internal\n Return serializable state. This function will return a version of the\n __dict__ with all \"ephemeral\" members stripped out. \"Ephemeral\" members\n are defined as those that do not need to be (nor should be) stored\n in any kind of persistent file (e.g., NuPIC network XML file.)", "id": "f17565:c0:m3"} {"signature": "def __setstate__(self, state):", "body": "self._setRandomState(state[''])del state['']version = state.pop('')assert version == TM_VERSIONself.__dict__.update(state)", "docstring": "@internal\n Set the state of ourself from a serialized state.", "id": "f17565:c0:m4"} {"signature": "def write(self, proto):", "body": "proto.version = TM_VERSIONself._random.write(proto.random)proto.numberOfCols = self.numberOfColsproto.cellsPerColumn = self.cellsPerColumnproto.initialPerm = float(self.initialPerm)proto.connectedPerm = float(self.connectedPerm)proto.minThreshold = self.minThresholdproto.newSynapseCount = self.newSynapseCountproto.permanenceInc = float(self.permanenceInc)proto.permanenceDec = float(self.permanenceDec)proto.permanenceMax = float(self.permanenceMax)proto.globalDecay = float(self.globalDecay)proto.activationThreshold = self.activationThresholdproto.doPooling = self.doPoolingproto.segUpdateValidDuration = self.segUpdateValidDurationproto.burnIn = self.burnInproto.collectStats = self.collectStatsproto.verbosity = self.verbosityproto.pamLength = self.pamLengthproto.maxAge = self.maxAgeproto.maxInfBacktrack = self.maxInfBacktrackproto.maxLrnBacktrack = self.maxLrnBacktrackproto.maxSeqLength = self.maxSeqLengthproto.maxSegmentsPerCell = self.maxSegmentsPerCellproto.maxSynapsesPerSegment = self.maxSynapsesPerSegmentproto.outputType = self.outputTypeproto.activeColumns = self.activeColumnscellListProto = proto.init(\"\", len(self.cells))for i, columnSegments in enumerate(self.cells):columnSegmentsProto = cellListProto.init(i, len(columnSegments))for j, cellSegments in enumerate(columnSegments):cellSegmentsProto = columnSegmentsProto.init(j, len(cellSegments))for k, segment in enumerate(cellSegments):segment.write(cellSegmentsProto[k])proto.lrnIterationIdx = self.lrnIterationIdxproto.iterationIdx = self.iterationIdxproto.segID = self.segIDif self.currentOutput is None:proto.currentOutput.none = Noneelse:proto.currentOutput.list = self.currentOutput.tolist()proto.pamCounter = self.pamCounterproto.collectSequenceStats = self.collectSequenceStatsproto.resetCalled = self.resetCalledproto.avgInputDensity = self.avgInputDensity or -proto.learnedSeqLength = self.learnedSeqLengthproto.avgLearnedSeqLength = self.avgLearnedSeqLengthproto.prevLrnPatterns = self._prevLrnPatternsproto.prevInfPatterns = self._prevInfPatternssegmentUpdatesListProto = proto.init(\"\",len(self.segmentUpdates))for i, (key, updates) in enumerate(self.segmentUpdates.items()):cellSegmentUpdatesProto = segmentUpdatesListProto[i]cellSegmentUpdatesProto.columnIdx = key[]cellSegmentUpdatesProto.cellIdx = key[]segmentUpdatesProto = cellSegmentUpdatesProto.init(\"\",len(updates))for j, (lrnIterationIdx, segmentUpdate) in enumerate(updates):segmentUpdateWrapperProto = segmentUpdatesProto[j]segmentUpdateWrapperProto.lrnIterationIdx = lrnIterationIdxsegmentUpdate.write(segmentUpdateWrapperProto.segmentUpdate)proto.cellConfidenceT = self.cellConfidence[\"\"].tolist()proto.cellConfidenceT1 = self.cellConfidence[\"\"].tolist()proto.cellConfidenceCandidate = self.cellConfidence[\"\"].tolist()proto.colConfidenceT = self.colConfidence[\"\"].tolist()proto.colConfidenceT1 = self.colConfidence[\"\"].tolist()proto.colConfidenceCandidate = self.colConfidence[\"\"].tolist()proto.lrnActiveStateT = self.lrnActiveState[\"\"].tolist()proto.lrnActiveStateT1 = self.lrnActiveState[\"\"].tolist()proto.infActiveStateT = self.infActiveState[\"\"].tolist()proto.infActiveStateT1 = self.infActiveState[\"\"].tolist()proto.infActiveStateBackup = self.infActiveState[\"\"].tolist()proto.infActiveStateCandidate = self.infActiveState[\"\"].tolist()proto.lrnPredictedStateT = self.lrnPredictedState[\"\"].tolist()proto.lrnPredictedStateT1 = self.lrnPredictedState[\"\"].tolist()proto.infPredictedStateT = self.infPredictedState[\"\"].tolist()proto.infPredictedStateT1 = self.infPredictedState[\"\"].tolist()proto.infPredictedStateBackup = self.infPredictedState[\"\"].tolist()proto.infPredictedStateCandidate = self.infPredictedState[\"\"].tolist()proto.consolePrinterVerbosity = self.consolePrinterVerbosity", "docstring": "Populate serialization proto instance.\n\n :param proto: (BacktrackingTMProto) the proto instance to populate", "id": "f17565:c0:m6"} {"signature": "@classmethoddef read(cls, proto):", "body": "assert proto.version == TM_VERSIONobj = object.__new__(cls)obj._random = Random()obj._random.read(proto.random)obj.numberOfCols = int(proto.numberOfCols)obj.cellsPerColumn = int(proto.cellsPerColumn)obj._numberOfCells = obj.numberOfCols * obj.cellsPerColumnobj.initialPerm = numpy.float32(proto.initialPerm)obj.connectedPerm = numpy.float32(proto.connectedPerm)obj.minThreshold = int(proto.minThreshold)obj.newSynapseCount = int(proto.newSynapseCount)obj.permanenceInc = numpy.float32(proto.permanenceInc)obj.permanenceDec = numpy.float32(proto.permanenceDec)obj.permanenceMax = numpy.float32(proto.permanenceMax)obj.globalDecay = numpy.float32(proto.globalDecay)obj.activationThreshold = int(proto.activationThreshold)obj.doPooling = proto.doPoolingobj.segUpdateValidDuration = int(proto.segUpdateValidDuration)obj.burnIn = int(proto.burnIn)obj.collectStats = proto.collectStatsobj.verbosity = int(proto.verbosity)obj.pamLength = int(proto.pamLength)obj.maxAge = int(proto.maxAge)obj.maxInfBacktrack = int(proto.maxInfBacktrack)obj.maxLrnBacktrack = int(proto.maxLrnBacktrack)obj.maxSeqLength = int(proto.maxSeqLength)obj.maxSegmentsPerCell = proto.maxSegmentsPerCellobj.maxSynapsesPerSegment = proto.maxSynapsesPerSegmentobj.outputType = proto.outputTypeobj.activeColumns = [int(col) for col in proto.activeColumns]obj.cells = [[] for _ in range(len(proto.cells))]for columnSegments, columnSegmentsProto in zip(obj.cells, proto.cells):columnSegments.extend([[] for _ in range(len(columnSegmentsProto))])for cellSegments, cellSegmentsProto in zip(columnSegments,columnSegmentsProto):for segmentProto in cellSegmentsProto:segment = Segment.read(segmentProto, obj)cellSegments.append(segment)obj.lrnIterationIdx = int(proto.lrnIterationIdx)obj.iterationIdx = int(proto.iterationIdx)obj.segID = int(proto.segID)obj.pamCounter = int(proto.pamCounter)obj.collectSequenceStats = proto.collectSequenceStatsobj.resetCalled = proto.resetCalledavgInputDensity = proto.avgInputDensityif avgInputDensity < :obj.avgInputDensity = Noneelse:obj.avgInputDensity = avgInputDensityobj.learnedSeqLength = int(proto.learnedSeqLength)obj.avgLearnedSeqLength = proto.avgLearnedSeqLengthobj._initEphemerals()if proto.currentOutput.which() == \"\":obj.currentOutput = Noneelse:obj.currentOutput = numpy.array(proto.currentOutput.list,dtype='')for pattern in proto.prevLrnPatterns:obj.prevLrnPatterns.append([v for v in pattern])for pattern in proto.prevInfPatterns:obj.prevInfPatterns.append([v for v in pattern])for cellWrapperProto in proto.segmentUpdates:key = (cellWrapperProto.columnIdx, cellWrapperProto.cellIdx)value = []for updateWrapperProto in cellWrapperProto.segmentUpdates:segmentUpdate = SegmentUpdate.read(updateWrapperProto.segmentUpdate, obj)value.append((int(updateWrapperProto.lrnIterationIdx), segmentUpdate))obj.segmentUpdates[key] = valuenumpy.copyto(obj.cellConfidence[\"\"], proto.cellConfidenceT)numpy.copyto(obj.cellConfidence[\"\"], proto.cellConfidenceT1)numpy.copyto(obj.cellConfidence[\"\"],proto.cellConfidenceCandidate)numpy.copyto(obj.colConfidence[\"\"], proto.colConfidenceT)numpy.copyto(obj.colConfidence[\"\"], proto.colConfidenceT1)numpy.copyto(obj.colConfidence[\"\"], proto.colConfidenceCandidate)numpy.copyto(obj.lrnActiveState[\"\"], proto.lrnActiveStateT)numpy.copyto(obj.lrnActiveState[\"\"], proto.lrnActiveStateT1)numpy.copyto(obj.infActiveState[\"\"], proto.infActiveStateT)numpy.copyto(obj.infActiveState[\"\"], proto.infActiveStateT1)numpy.copyto(obj.infActiveState[\"\"], proto.infActiveStateBackup)numpy.copyto(obj.infActiveState[\"\"],proto.infActiveStateCandidate)numpy.copyto(obj.lrnPredictedState[\"\"], proto.lrnPredictedStateT)numpy.copyto(obj.lrnPredictedState[\"\"], proto.lrnPredictedStateT1)numpy.copyto(obj.infPredictedState[\"\"], proto.infPredictedStateT)numpy.copyto(obj.infPredictedState[\"\"], proto.infPredictedStateT1)numpy.copyto(obj.infPredictedState[\"\"],proto.infPredictedStateBackup)numpy.copyto(obj.infPredictedState[\"\"],proto.infPredictedStateCandidate)obj.consolePrinterVerbosity = int(proto.consolePrinterVerbosity)return obj", "docstring": "Deserialize from proto instance.\n\n :param proto: (BacktrackingTMProto) the proto instance to read from", "id": "f17565:c0:m7"} {"signature": "def __getattr__(self, name):", "body": "try:return super(BacktrackingTM, self).__getattr__(name)except AttributeError:raise AttributeError(\"\" % name)", "docstring": "@internal\n Patch __getattr__ so that we can catch the first access to 'cells' and load.\n\n This function is only called when we try to access an attribute that doesn't\n exist. We purposely make sure that \"self.cells\" doesn't exist after\n unpickling so that we'll hit this, then we can load it on the first access.\n\n If this is called at any other time, it will raise an AttributeError.\n That's because:\n - If 'name' is \"cells\", after the first call, self._realCells won't exist\n so we'll get an implicit AttributeError.\n - If 'name' isn't \"cells\", I'd expect our super wouldn't have __getattr__,\n so we'll raise our own Attribute error. If the super did get __getattr__,\n we'll just return what it gives us.", "id": "f17565:c0:m8"} {"signature": "def saveToFile(self, filePath):", "body": "pass", "docstring": "Implemented in \n:meth:`nupic.algorithms.backtracking_tm_cpp.BacktrackingTMCPP.saveToFile`.", "id": "f17565:c0:m14"} {"signature": "def loadFromFile(self, filePath):", "body": "pass", "docstring": "Implemented in \n:meth:`nupic.algorithms.backtracking_tm_cpp.BacktrackingTMCPP.loadFromFile`.", "id": "f17565:c0:m15"} {"signature": "def _getRandomState(self):", "body": "return pickle.dumps(self._random)", "docstring": "@internal\n Return the random number state.\n\n This is used during unit testing to generate repeatable results.", "id": "f17565:c0:m16"} {"signature": "def _setRandomState(self, state):", "body": "self._random = pickle.loads(state)", "docstring": "@internal Set the random number state.\n\n This is used during unit testing to generate repeatable results.", "id": "f17565:c0:m17"} {"signature": "def reset(self,):", "body": "if self.verbosity >= :print(\"\")self.lrnActiveState[''].fill()self.lrnActiveState[''].fill()self.lrnPredictedState[''].fill()self.lrnPredictedState[''].fill()self.infActiveState[''].fill()self.infActiveState[''].fill()self.infPredictedState[''].fill()self.infPredictedState[''].fill()self.cellConfidence[''].fill()self.cellConfidence[''].fill()self.segmentUpdates = {}self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = Noneif self.collectSequenceStats:if self._internalStats[''].sum() > :sig = self._internalStats[''].copy()sig.reshape(self.numberOfCols * self.cellsPerColumn)self._internalStats[''] = sigself._internalStats[''].fill()self.resetCalled = Trueself._prevInfPatterns = []self._prevLrnPatterns = []", "docstring": "Reset the state of all cells.\n\nThis is normally used between sequences while training. All internal states\nare reset to 0.", "id": "f17565:c0:m18"} {"signature": "def resetStats(self):", "body": "self._stats = dict()self._internalStats = dict()self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = self._internalStats[''] = Noneif self.collectSequenceStats:self._internalStats[''] = (numpy.zeros((self.numberOfCols, self.cellsPerColumn),dtype=\"\"))", "docstring": "Reset the learning and inference stats. This will usually be called by\nuser code at the start of each inference run (for a particular data set).", "id": "f17565:c0:m19"} {"signature": "def getStats(self):", "body": "if not self.collectStats:return Noneself._stats[''] = self._internalStats['']self._stats[''] = self._internalStats['']self._stats[''] = self._internalStats['']self._stats[''] = self._internalStats['']self._stats[''] = self._internalStats['']nPredictions = max(, self._stats[''])self._stats[''] = (self._internalStats[''])self._stats[''] = (self._internalStats[''] / nPredictions)self._stats[''] = (self._internalStats[''])self._stats[''] = (self._internalStats[''] / nPredictions)self._stats[''] = (self._internalStats[''])self._stats[''] = (self._internalStats[''] / nPredictions)self._stats[''] = (self._internalStats[''] /nPredictions)self._stats[''] = (self._internalStats[''] /nPredictions)self._stats[''] = (self._internalStats[''])return self._stats", "docstring": "Return the current learning and inference stats. This returns a dict\ncontaining all the learning and inference stats we have collected since the\nlast :meth:`resetStats` call. If :class:`BacktrackingTM` ``collectStats`` \nparameter is False, then None is returned.\n\n:returns: (dict) The following keys are returned in the dict when \n ``collectStats`` is True:\n\n - ``nPredictions``: the number of predictions. This is the total \n number of inferences excluding burn-in and the last inference.\n - ``curPredictionScore``: the score for predicting the current input\n (predicted during the previous inference)\n - ``curMissing``: the number of bits in the current input that were \n not predicted to be on.\n - ``curExtra``: the number of bits in the predicted output that are \n not in the next input\n - ``predictionScoreTotal``: the sum of every prediction score to date\n - ``predictionScoreAvg``: ``predictionScoreTotal / nPredictions``\n - ``pctMissingTotal``: the total number of bits that were missed over \n all predictions\n - ``pctMissingAvg``: ``pctMissingTotal / nPredictions``\n - ``prevSequenceSignature``: signature for the sequence immediately \n preceding the last reset. 'None' if ``collectSequenceStats`` is \n False.", "id": "f17565:c0:m20"} {"signature": "def _updateStatsInferEnd(self, stats, bottomUpNZ, predictedState,colConfidence):", "body": "if not self.collectStats:returnstats[''] += (numExtra2, numMissing2, confidences2) = self._checkPrediction(patternNZs=[bottomUpNZ], output=predictedState,colConfidence=colConfidence)predictionScore, positivePredictionScore, negativePredictionScore = (confidences2[])stats[''] = float(predictionScore)stats[''] = - float(positivePredictionScore)stats[''] = float(negativePredictionScore)stats[''] = numMissing2stats[''] = numExtra2if stats[''] <= self.burnIn:returnstats[''] += numExpected = max(, float(len(bottomUpNZ)))stats[''] += numMissing2stats[''] += numExtra2stats[''] += * numExtra2 / numExpectedstats[''] += * numMissing2 / numExpectedstats[''] += float(predictionScore)stats[''] += - float(positivePredictionScore)stats[''] += float(negativePredictionScore)if self.collectSequenceStats:cc = self.cellConfidence[''] * self.infActiveState['']sconf = cc.sum(axis=)for c in range(self.numberOfCols):if sconf[c] > :cc[c, :] /= sconf[c]self._internalStats[''] += cc", "docstring": "Called at the end of learning and inference, this routine will update\na number of stats in our _internalStats dictionary, including our computed\nprediction score.\n\n:param stats internal stats dictionary\n:param bottomUpNZ list of the active bottom-up inputs\n:param predictedState The columns we predicted on the last time step (should\n match the current bottomUpNZ in the best case)\n:param colConfidence Column confidences we determined on the last time step", "id": "f17565:c0:m21"} {"signature": "def printState(self, aState):", "body": "def formatRow(var, i):s = ''for c in range(self.numberOfCols):if c > and c % == :s += ''s += str(var[c, i])s += ''return sfor i in range(self.cellsPerColumn):print(formatRow(aState, i))", "docstring": "Print an integer array that is the same shape as activeState.\n\n:param aState: TODO: document", "id": "f17565:c0:m22"} {"signature": "def printConfidence(self, aState, maxCols = ):", "body": "def formatFPRow(var, i):s = ''for c in range(min(maxCols, self.numberOfCols)):if c > and c % == :s += ''s += '' % var[c, i]s += ''return sfor i in range(self.cellsPerColumn):print(formatFPRow(aState, i))", "docstring": "Print a floating point array that is the same shape as activeState.\n\n:param aState: TODO: document\n:param maxCols: TODO: document", "id": "f17565:c0:m23"} {"signature": "def printColConfidence(self, aState, maxCols = ):", "body": "def formatFPRow(var):s = ''for c in range(min(maxCols, self.numberOfCols)):if c > and c % == :s += ''s += '' % var[c]s += ''return sprint(formatFPRow(aState))", "docstring": "Print up to maxCols number from a flat floating point array.\n\n:param aState: TODO: document\n:param maxCols: TODO: document", "id": "f17565:c0:m24"} {"signature": "def printStates(self, printPrevious = True, printLearnState = True):", "body": "def formatRow(var, i):s = ''for c in range(self.numberOfCols):if c > and c % == :s += ''s += str(var[c, i])s += ''return sprint(\"\")for i in range(self.cellsPerColumn):if printPrevious:print(formatRow(self.infActiveState[''], i), end='')print(formatRow(self.infActiveState[''], i))print(\"\")for i in range(self.cellsPerColumn):if printPrevious:print(formatRow(self.infPredictedState[''], i), end='')print(formatRow(self.infPredictedState[''], i))if printLearnState:print(\"\")for i in range(self.cellsPerColumn):if printPrevious:print(formatRow(self.lrnActiveState[''], i), end='')print(formatRow(self.lrnActiveState[''], i))print(\"\")for i in range(self.cellsPerColumn):if printPrevious:print(formatRow(self.lrnPredictedState[''], i), end='')print(formatRow(self.lrnPredictedState[''], i))", "docstring": "TODO: document\n\n:param printPrevious: \n:param printLearnState: \n:return:", "id": "f17565:c0:m25"} {"signature": "def printOutput(self, y):", "body": "print(\"\")for i in range(self.cellsPerColumn):for c in range(self.numberOfCols):print(int(y[c, i]), end='')print()", "docstring": "TODO: document\n\n:param y: \n:return:", "id": "f17565:c0:m26"} {"signature": "def printInput(self, x):", "body": "print(\"\")for c in range(self.numberOfCols):print(int(x[c]), end='')print()", "docstring": "TODO: document\n\n:param x: \n:return:", "id": "f17565:c0:m27"} {"signature": "def printParameters(self):", "body": "print(\"\", self.numberOfCols)print(\"\", self.cellsPerColumn)print(\"\", self.minThreshold)print(\"\", self.newSynapseCount)print(\"\", self.activationThreshold)print()print(\"\", self.initialPerm)print(\"\", self.connectedPerm)print(\"\", self.permanenceInc)print(\"\", self.permanenceDec)print(\"\", self.permanenceMax)print(\"\", self.globalDecay)print()print(\"\", self.doPooling)print(\"\", self.segUpdateValidDuration)print(\"\", self.pamLength)", "docstring": "Print the parameter settings for the TM.", "id": "f17565:c0:m28"} {"signature": "def printActiveIndices(self, state, andValues=False):", "body": "if len(state.shape) == :(cols, cellIdxs) = state.nonzero()else:cols = state.nonzero()[]cellIdxs = numpy.zeros(len(cols))if len(cols) == :print(\"\")returnprevCol = -for (col, cellIdx) in zip(cols, cellIdxs):if col != prevCol:if prevCol != -:print(\"\", end='')print(\"\" % (col), end='')prevCol = colif andValues:if len(state.shape) == :value = state[col, cellIdx]else:value = state[col]print(\"\" % (cellIdx, value), end='')else:print(\"\" % (cellIdx), end='')print(\"\")", "docstring": "Print the list of ``[column, cellIdx]`` indices for each of the active cells \nin state.\n\n:param state: TODO: document\n:param andValues: TODO: document", "id": "f17565:c0:m29"} {"signature": "def printComputeEnd(self, output, learn=False):", "body": "if self.verbosity >= :print(\"\")print(\"\", learn)print(\"\" % (self.infActiveState[''].min(axis=).sum()), end='')print(\"\" % (self._internalStats['']), end='')print(\"\" % (self._internalStats['']), end='')print(\"\" % ( - self._internalStats['']))print(\"\", self.getNumSegments(), end='')print(\"\", self.avgLearnedSeqLength)print(\"\" % (self.infActiveState[''].sum()))self.printActiveIndices(self.infActiveState[''])if self.verbosity >= :self.printState(self.infActiveState[''])print(\"\" % (self.infPredictedState[''].sum()))self.printActiveIndices(self.infPredictedState[''])if self.verbosity >= :self.printState(self.infPredictedState[''])print(\"\" % (self.lrnActiveState[''].sum()))self.printActiveIndices(self.lrnActiveState[''])if self.verbosity >= :self.printState(self.lrnActiveState[''])print(\"\" % (self.lrnPredictedState[''].sum()))self.printActiveIndices(self.lrnPredictedState[''])if self.verbosity >= :self.printState(self.lrnPredictedState[''])print(\"\")self.printActiveIndices(self.cellConfidence[''], andValues=True)if self.verbosity >= :self.printConfidence(self.cellConfidence[''])print(\"\")self.printActiveIndices(self.colConfidence[''], andValues=True)print(\"\")cc = self.cellConfidence[''] * self.infActiveState['']self.printActiveIndices(cc, andValues=True)if self.verbosity == :print(\"\")self.printCells(predictedOnly=True)elif self.verbosity >= :print(\"\")self.printCells(predictedOnly=False)print()elif self.verbosity >= :print(\"\", learn)print(\"\" % len(output.nonzero()[]), end='')self.printActiveIndices(output.reshape(self.numberOfCols,self.cellsPerColumn))", "docstring": "Called at the end of inference to print out various diagnostic\ninformation based on the current verbosity level.\n\n:param output: TODO: document\n:param learn: TODO: document", "id": "f17565:c0:m30"} {"signature": "def printSegmentUpdates(self):", "body": "print(\"\", len(self.segmentUpdates))for key, updateList in self.segmentUpdates.items():c, i = key[], key[]print(c, i, updateList)", "docstring": "TODO: document\n\n:return:", "id": "f17565:c0:m31"} {"signature": "def printCell(self, c, i, onlyActiveSegments=False):", "body": "if len(self.cells[c][i]) > :print(\"\", c, \"\", i, \"\", end='')print(len(self.cells[c][i]), \"\")for j, s in enumerate(self.cells[c][i]):isActive = self._isSegmentActive(s, self.infActiveState[''])if not onlyActiveSegments or isActive:isActiveStr = \"\" if isActive else \"\"print(\"\" % (isActiveStr, j), end='')s.debugPrint()", "docstring": "TODO: document\n\n:param c: \n:param i: \n:param onlyActiveSegments: \n:return:", "id": "f17565:c0:m32"} {"signature": "def printCells(self, predictedOnly=False):", "body": "if predictedOnly:print(\"\")else:print(\"\")print(\"\", self.activationThreshold, end='')print(\"\", self.minThreshold, end='')print(\"\", self.connectedPerm)for c in range(self.numberOfCols):for i in range(self.cellsPerColumn):if not predictedOnly or self.infPredictedState[''][c, i]:self.printCell(c, i, predictedOnly)", "docstring": "TODO: document\n\n:param predictedOnly: \n:return:", "id": "f17565:c0:m33"} {"signature": "def getNumSegmentsInCell(self, c, i):", "body": "return len(self.cells[c][i])", "docstring": ":param c: (int) column index\n:param i: (int) cell index within column\n:returns: (int) the total number of synapses in cell (c, i)", "id": "f17565:c0:m34"} {"signature": "def getNumSynapses(self):", "body": "nSyns = self.getSegmentInfo()[]return nSyns", "docstring": ":returns: (int) the total number of synapses", "id": "f17565:c0:m35"} {"signature": "def getNumSynapsesPerSegmentAvg(self):", "body": "return float(self.getNumSynapses()) / max(, self.getNumSegments())", "docstring": ":returns: (int) the average number of synapses per segment", "id": "f17565:c0:m36"} {"signature": "def getNumSegments(self):", "body": "nSegs = self.getSegmentInfo()[]return nSegs", "docstring": ":returns: (int) the total number of segments", "id": "f17565:c0:m37"} {"signature": "def getNumCells(self):", "body": "return self.numberOfCols * self.cellsPerColumn", "docstring": ":returns: (int) the total number of cells", "id": "f17565:c0:m38"} {"signature": "def getSegmentOnCell(self, c, i, segIdx):", "body": "seg = self.cells[c][i][segIdx]retlist = [[seg.segID, seg.isSequenceSeg, seg.positiveActivations,seg.totalActivations, seg.lastActiveIteration,seg._lastPosDutyCycle, seg._lastPosDutyCycleIteration]]retlist += seg.synsreturn retlist", "docstring": ":param c: (int) column index\n:param i: (int) cell index in column\n:param segIdx: (int) segment index to match\n\n:returns: (list) representing the the segment on cell (c, i) with index \n ``segIdx``.\n ::\n\n [ [segmentID, sequenceSegmentFlag, positiveActivations,\n totalActivations, lastActiveIteration,\n lastPosDutyCycle, lastPosDutyCycleIteration],\n [col1, idx1, perm1],\n [col2, idx2, perm2], ...\n ]", "id": "f17565:c0:m39"} {"signature": "def _addToSegmentUpdates(self, c, i, segUpdate):", "body": "if segUpdate is None or len(segUpdate.activeSynapses) == :returnkey = (c, i) if key in self.segmentUpdates:self.segmentUpdates[key] += [(self.lrnIterationIdx, segUpdate)]else:self.segmentUpdates[key] = [(self.lrnIterationIdx, segUpdate)]", "docstring": "Store a dated potential segment update. The \"date\" (iteration index) is used\nlater to determine whether the update is too old and should be forgotten.\nThis is controlled by parameter ``segUpdateValidDuration``.\n\n:param c: TODO: document\n:param i: TODO: document\n:param segUpdate: TODO: document", "id": "f17565:c0:m40"} {"signature": "def _removeSegmentUpdate(self, updateInfo):", "body": "(creationDate, segUpdate) = updateInfokey = (segUpdate.columnIdx, segUpdate.cellIdx)self.segmentUpdates[key].remove(updateInfo)", "docstring": "Remove a segment update (called when seg update expires or is processed)\n\n:param updateInfo: (tuple) (creationDate, SegmentUpdate)", "id": "f17565:c0:m41"} {"signature": "def _computeOutput(self):", "body": "if self.outputType == '':mostActiveCellPerCol = self.cellConfidence[''].argmax(axis=)self.currentOutput = numpy.zeros(self.infActiveState[''].shape,dtype='')numCols = self.currentOutput.shape[]self.currentOutput[(range(numCols), mostActiveCellPerCol)] = activeCols = self.infActiveState[''].max(axis=)inactiveCols = numpy.where(activeCols==)[]self.currentOutput[inactiveCols, :] = elif self.outputType == '':self.currentOutput = self.infActiveState['']elif self.outputType == '':self.currentOutput = numpy.logical_or(self.infPredictedState[''],self.infActiveState[''])else:raise RuntimeError(\"\")return self.currentOutput.reshape(-).astype('')", "docstring": "Computes output for both learning and inference. In both cases, the\noutput is the boolean OR of ``activeState`` and ``predictedState`` at ``t``.\nStores ``currentOutput`` for ``checkPrediction``.\n\n:returns: TODO: document", "id": "f17565:c0:m42"} {"signature": "def _getActiveState(self):", "body": "return self.infActiveState[''].reshape(-).astype('')", "docstring": "Return the current active state. This is called by the node to\nobtain the sequence output of the TM.\n\n:returns: TODO: document", "id": "f17565:c0:m43"} {"signature": "def getPredictedState(self):", "body": "return self.infPredictedState['']", "docstring": ":returns: numpy array of predicted cells, representing the current predicted\n state. ``predictedCells[c][i]`` represents the state of the i'th cell in \n the c'th column.", "id": "f17565:c0:m44"} {"signature": "def predict(self, nSteps):", "body": "pristineTPDynamicState = self._getTPDynamicState()assert (nSteps>)multiStepColumnPredictions = numpy.zeros((nSteps, self.numberOfCols),dtype=\"\")step = while True:multiStepColumnPredictions[step, :] = self.topDownCompute()if step == nSteps-:breakstep += self.infActiveState[''][:, :] = self.infActiveState[''][:, :]self.infPredictedState[''][:, :] = self.infPredictedState[''][:, :]self.cellConfidence[''][:, :] = self.cellConfidence[''][:, :]self.infActiveState[''][:, :] = self.infPredictedState[''][:, :]self.infPredictedState[''].fill()self.cellConfidence[''].fill()self._inferPhase2()self._setTPDynamicState(pristineTPDynamicState)return multiStepColumnPredictions", "docstring": "This function gives the future predictions for timesteps starting\nfrom the current TM state. The TM is returned to its original state at the\nend before returning.\n\n1. We save the TM state.\n2. Loop for nSteps\n\n a. Turn-on with lateral support from the current active cells\n b. Set the predicted cells as the next step's active cells. This step\n in learn and infer methods use input here to correct the predictions.\n We don't use any input here.\n\n3. Revert back the TM state to the time before prediction\n\n:param nSteps: (int) The number of future time steps to be predicted\n:returns: all the future predictions - a numpy array of type \"float32\" and\n shape (nSteps, numberOfCols). The ith row gives the tm prediction for \n each column at a future timestep (t+i+1).", "id": "f17565:c0:m45"} {"signature": "def _getTPDynamicStateVariableNames(self):", "body": "return [\"\",\"\",\"\",\"\",\"\",\"\",]", "docstring": "Any newly added dynamic states in the TM should be added to this list.\n\nParameters:\n--------------------------------------------\nretval: The list of names of TM dynamic state variables.", "id": "f17565:c0:m46"} {"signature": "def _getTPDynamicState(self,):", "body": "tpDynamicState = dict()for variableName in self._getTPDynamicStateVariableNames():tpDynamicState[variableName] = copy.deepcopy(self.__dict__[variableName])return tpDynamicState", "docstring": "Parameters:\n--------------------------------------------\nretval: A dict with all the dynamic state variable names as keys and\n their values at this instant as values.", "id": "f17565:c0:m47"} {"signature": "def _setTPDynamicState(self, tpDynamicState):", "body": "for variableName in self._getTPDynamicStateVariableNames():self.__dict__[variableName] = tpDynamicState.pop(variableName)", "docstring": "Set all the dynamic state variables from the dict.\n\n dict has all the dynamic state variable names as keys and\ntheir values at this instant as values.\n\nWe set the dynamic state variables in the tm object with these items.", "id": "f17565:c0:m48"} {"signature": "def _updateAvgLearnedSeqLength(self, prevSeqLength):", "body": "if self.lrnIterationIdx < :alpha = else:alpha = self.avgLearnedSeqLength = (( - alpha) * self.avgLearnedSeqLength +(alpha * prevSeqLength))", "docstring": "Update our moving average of learned sequence length.", "id": "f17565:c0:m49"} {"signature": "def getAvgLearnedSeqLength(self):", "body": "return self.avgLearnedSeqLength", "docstring": ":returns: Moving average of learned sequence length", "id": "f17565:c0:m50"} {"signature": "def _inferBacktrack(self, activeColumns):", "body": "numPrevPatterns = len(self._prevInfPatterns)if numPrevPatterns <= :returncurrentTimeStepsOffset = numPrevPatterns - self.infActiveState[''][:, :] = self.infActiveState[''][:, :]self.infPredictedState[''][:, :] = self.infPredictedState[''][:, :]badPatterns = []inSequence = FalsecandConfidence = NonecandStartOffset = Nonefor startOffset in range(, numPrevPatterns):if startOffset == currentTimeStepsOffset and candConfidence is not None:breakif self.verbosity >= :print((\"\" % (numPrevPatterns - - startOffset),self._prevInfPatterns[startOffset]))inSequence = Falsefor offset in range(startOffset, numPrevPatterns):if offset == currentTimeStepsOffset:totalConfidence = self.colConfidence[''][activeColumns].sum()self.infPredictedState[''][:, :] = self.infPredictedState[''][:, :]inSequence = self._inferPhase1(self._prevInfPatterns[offset],useStartCells = (offset == startOffset))if not inSequence:breakif self.verbosity >= :print((\"\",self._prevInfPatterns[offset]))inSequence = self._inferPhase2()if not inSequence:breakif not inSequence:badPatterns.append(startOffset)continuecandConfidence = totalConfidencecandStartOffset = startOffsetif self.verbosity >= and startOffset != currentTimeStepsOffset:print((\"\"\"\" % (numPrevPatterns - - startOffset),totalConfidence))if candStartOffset == currentTimeStepsOffset: breakself.infActiveState[''][:, :] = self.infActiveState[''][:, :]self.infPredictedState[''][:, :] = (self.infPredictedState[''][:, :])self.cellConfidence[''][:, :] = self.cellConfidence[''][:, :]self.colConfidence[''][:] = self.colConfidence[''][:]breakif candStartOffset is None:if self.verbosity >= :print(\"\")self.infActiveState[''][:, :] = self.infActiveState[''][:, :]self._inferPhase2()else:if self.verbosity >= :print((\"\"\"\" % (numPrevPatterns - - candStartOffset),self._prevInfPatterns[candStartOffset]))if candStartOffset != currentTimeStepsOffset:self.infActiveState[''][:, :] = self.infActiveState[''][:, :]self.infPredictedState[''][:, :] = (self.infPredictedState[''][:, :])self.cellConfidence[''][:, :] = self.cellConfidence[''][:, :]self.colConfidence[''][:] = self.colConfidence[''][:]for i in range(numPrevPatterns):if (i in badPatterns or(candStartOffset is not None and i <= candStartOffset)):if self.verbosity >= :print((\"\",self._prevInfPatterns[]))self._prevInfPatterns.pop()else:breakself.infPredictedState[''][:, :] = self.infPredictedState[''][:, :]", "docstring": "This \"backtracks\" our inference state, trying to see if we can lock onto\nthe current set of inputs by assuming the sequence started up to N steps\nago on start cells.\n\nThis will adjust @ref infActiveState['t'] if it does manage to lock on to a\nsequence that started earlier. It will also compute infPredictedState['t']\nbased on the possibly updated @ref infActiveState['t'], so there is no need to\ncall inferPhase2() after calling inferBacktrack().\n\nThis looks at:\n - ``infActiveState['t']``\n\nThis updates/modifies:\n - ``infActiveState['t']``\n - ``infPredictedState['t']``\n - ``colConfidence['t']``\n - ``cellConfidence['t']``\n\nHow it works:\n\nThis method gets called from :meth:`updateInferenceState` when we detect \neither of the following two conditions:\n\n#. The current bottom-up input had too many un-expected columns\n#. We fail to generate a sufficient number of predicted columns for the\n next time step.\n\nEither of these two conditions indicate that we have fallen out of a\nlearned sequence.\n\nRather than simply \"giving up\" and bursting on the unexpected input\ncolumns, a better approach is to see if perhaps we are in a sequence that\nstarted a few steps ago. The real world analogy is that you are driving\nalong and suddenly hit a dead-end, you will typically go back a few turns\nago and pick up again from a familiar intersection.\n\nThis back-tracking goes hand in hand with our learning methodology, which\nalways tries to learn again from start cells after it loses context. This\nresults in a network that has learned multiple, overlapping paths through\nthe input data, each starting at different points. The lower the global\ndecay and the more repeatability in the data, the longer each of these\npaths will end up being.\n\nThe goal of this function is to find out which starting point in the past\nleads to the current input with the most context as possible. This gives us\nthe best chance of predicting accurately going forward. Consider the\nfollowing example, where you have learned the following sub-sequences which\nhave the given frequencies:\n\n::\n\n ? - Q - C - D - E 10X seq 0\n ? - B - C - D - F 1X seq 1\n ? - B - C - H - I 2X seq 2\n ? - B - C - D - F 3X seq 3\n ? - Z - A - B - C - D - J 2X seq 4\n ? - Z - A - B - C - H - I 1X seq 5\n ? - Y - A - B - C - D - F 3X seq 6\n\n ----------------------------------------\n W - X - Z - A - B - C - D <= input history\n ^\n current time step\n\nSuppose, in the current time step, the input pattern is D and you have not\npredicted D, so you need to backtrack. Suppose we can backtrack up to 6\nsteps in the past, which path should we choose? From the table above, we can\nsee that the correct answer is to assume we are in seq 4. How do we\nimplement the backtrack to give us this right answer? The current\nimplementation takes the following approach:\n\n#. Start from the farthest point in the past.\n#. For each starting point S, calculate the confidence of the current\n input, conf(startingPoint=S), assuming we followed that sequence.\n Note that we must have learned at least one sequence that starts at\n point S.\n#. If conf(startingPoint=S) is significantly different from\n conf(startingPoint=S-1), then choose S-1 as the starting point.\n\nThe assumption here is that starting point S-1 is the starting point of\na learned sub-sequence that includes the current input in it's path and\nthat started the longest ago. It thus has the most context and will be\nthe best predictor going forward.\n\nFrom the statistics in the above table, we can compute what the confidences\nwill be for each possible starting point:\n\n::\n\n startingPoint confidence of D\n -----------------------------------------\n B (t-2) 4/6 = 0.667 (seq 1,3)/(seq 1,2,3)\n Z (t-4) 2/3 = 0.667 (seq 4)/(seq 4,5)\n\nFirst of all, we do not compute any confidences at starting points t-1, t-3,\nt-5, t-6 because there are no learned sequences that start at those points.\n\nNotice here that Z is the starting point of the longest sub-sequence leading\nup to the current input. Event though starting at t-2 and starting at t-4\ngive the same confidence value, we choose the sequence starting at t-4\nbecause it gives the most context, and it mirrors the way that learning\nextends sequences.\n\n:param activeColumns: (list) of active column indices", "id": "f17565:c0:m51"} {"signature": "def _inferPhase1(self, activeColumns, useStartCells):", "body": "self.infActiveState[''].fill()numPredictedColumns = if useStartCells:for c in activeColumns:self.infActiveState[''][c, ] = else:for c in activeColumns:predictingCells = numpy.where(self.infPredictedState[''][c] == )[]numPredictingCells = len(predictingCells)if numPredictingCells > :self.infActiveState[''][c, predictingCells] = numPredictedColumns += else:self.infActiveState[''][c, :] = if useStartCells or numPredictedColumns >= * len(activeColumns):return Trueelse:return False", "docstring": "Update the inference active state from the last set of predictions\nand the current bottom-up.\n\nThis looks at:\n - ``infPredictedState['t-1']``\nThis modifies:\n - ``infActiveState['t']``\n\n:param activeColumns: (list) active bottom-ups\n:param useStartCells: (bool) If true, ignore previous predictions and simply \n turn on the start cells in the active columns\n:returns: (bool) True if the current input was sufficiently predicted, OR if \n we started over on startCells. False indicates that the current input \n was NOT predicted, and we are now bursting on most columns.", "id": "f17565:c0:m52"} {"signature": "def _inferPhase2(self):", "body": "self.infPredictedState[''].fill()self.cellConfidence[''].fill()self.colConfidence[''].fill()for c in range(self.numberOfCols):for i in range(self.cellsPerColumn):for s in self.cells[c][i]:numActiveSyns = self._getSegmentActivityLevel(s, self.infActiveState[''], connectedSynapsesOnly=False)if numActiveSyns < self.activationThreshold:continueif self.verbosity >= :print(\"\" % (c, i), end='')s.debugPrint()dc = s.dutyCycle()self.cellConfidence[''][c, i] += dcself.colConfidence[''][c] += dcif self._isSegmentActive(s, self.infActiveState['']):self.infPredictedState[''][c, i] = sumConfidences = self.colConfidence[''].sum()if sumConfidences > :self.colConfidence[''] /= sumConfidencesself.cellConfidence[''] /= sumConfidencesnumPredictedCols = self.infPredictedState[''].max(axis=).sum()if numPredictedCols >= * self.avgInputDensity:return Trueelse:return False", "docstring": "Phase 2 for the inference state. The computes the predicted state, then\nchecks to insure that the predicted state is not over-saturated, i.e.\nlook too close like a burst. This indicates that there were so many\nseparate paths learned from the current input columns to the predicted\ninput columns that bursting on the current input columns is most likely\ngenerated mix and match errors on cells in the predicted columns. If\nwe detect this situation, we instead turn on only the start cells in the\ncurrent active columns and re-generate the predicted state from those.\n\nThis looks at:\n - `` infActiveState['t']``\n\nThis modifies:\n - `` infPredictedState['t']``\n - `` colConfidence['t']``\n - `` cellConfidence['t']``\n\n:returns: (bool) True if we have a decent guess as to the next input.\n Returning False from here indicates to the caller that we have\n reached the end of a learned sequence.", "id": "f17565:c0:m53"} {"signature": "def _updateInferenceState(self, activeColumns):", "body": "self.infActiveState[''][:, :] = self.infActiveState[''][:, :]self.infPredictedState[''][:, :] = self.infPredictedState[''][:, :]self.cellConfidence[''][:, :] = self.cellConfidence[''][:, :]self.colConfidence[''][:] = self.colConfidence[''][:]if self.maxInfBacktrack > :if len(self._prevInfPatterns) > self.maxInfBacktrack:self._prevInfPatterns.pop()self._prevInfPatterns.append(activeColumns)inSequence = self._inferPhase1(activeColumns, self.resetCalled)if not inSequence:if self.verbosity >= :print (\"\"\"\")self._inferBacktrack(activeColumns)returninSequence = self._inferPhase2()if not inSequence:if self.verbosity >= :print (\"\"\"\")self._inferBacktrack(activeColumns)", "docstring": "Update the inference state. Called from :meth:`compute` on every iteration.\n\n:param activeColumns: (list) active column indices.", "id": "f17565:c0:m54"} {"signature": "def _learnBacktrackFrom(self, startOffset, readOnly=True):", "body": "numPrevPatterns = len(self._prevLrnPatterns)currentTimeStepsOffset = numPrevPatterns - if not readOnly:self.segmentUpdates = {}if self.verbosity >= :if readOnly:print((\"\" % (numPrevPatterns - - startOffset),self._prevLrnPatterns[startOffset]))else:print((\"\" % (numPrevPatterns - - startOffset),self._prevLrnPatterns[startOffset]))inSequence = Truefor offset in range(startOffset, numPrevPatterns):self.lrnPredictedState[''][:, :] = self.lrnPredictedState[''][:, :]self.lrnActiveState[''][:, :] = self.lrnActiveState[''][:, :]inputColumns = self._prevLrnPatterns[offset]if not readOnly:self._processSegmentUpdates(inputColumns)if offset == startOffset:self.lrnActiveState[''].fill()for c in inputColumns:self.lrnActiveState[''][c, ] = inSequence = Trueelse:inSequence = self._learnPhase1(inputColumns, readOnly=readOnly)if not inSequence or offset == currentTimeStepsOffset:breakif self.verbosity >= :print(\"\", inputColumns)self._learnPhase2(readOnly=readOnly)return inSequence", "docstring": "A utility method called from learnBacktrack. This will backtrack\nstarting from the given startOffset in our prevLrnPatterns queue.\n\nIt returns True if the backtrack was successful and we managed to get\npredictions all the way up to the current time step.\n\nIf readOnly, then no segments are updated or modified, otherwise, all\nsegment updates that belong to the given path are applied.\n\nThis updates/modifies:\n\n - lrnActiveState['t']\n\nThis trashes:\n\n - lrnPredictedState['t']\n - lrnPredictedState['t-1']\n - lrnActiveState['t-1']\n\n:param startOffset: Start offset within the prevLrnPatterns input history\n:param readOnly: \n:return: True if we managed to lock on to a sequence that started\n earlier.\n If False, we lost predictions somewhere along the way\n leading up to the current time.", "id": "f17565:c0:m55"} {"signature": "def _learnBacktrack(self):", "body": "numPrevPatterns = len(self._prevLrnPatterns) - if numPrevPatterns <= :if self.verbosity >= :print(\"\")return FalsebadPatterns = []inSequence = Falsefor startOffset in range(, numPrevPatterns):inSequence = self._learnBacktrackFrom(startOffset, readOnly=True)if inSequence:breakbadPatterns.append(startOffset)if not inSequence:if self.verbosity >= :print (\"\"\"\")self._prevLrnPatterns = []return Falseif self.verbosity >= :print((\"\"\"\" % (numPrevPatterns - startOffset),self._prevLrnPatterns[startOffset]))self._learnBacktrackFrom(startOffset, readOnly=False)for i in range(numPrevPatterns):if i in badPatterns or i <= startOffset:if self.verbosity >= :print((\"\",self._prevLrnPatterns[]))self._prevLrnPatterns.pop()else:breakreturn numPrevPatterns - startOffset", "docstring": "This \"backtracks\" our learning state, trying to see if we can lock onto\nthe current set of inputs by assuming the sequence started up to N steps\nago on start cells.\n\nThis will adjust @ref lrnActiveState['t'] if it does manage to lock on to a\nsequence that started earlier.\n\n:returns: >0 if we managed to lock on to a sequence that started\n earlier. The value returned is how many steps in the\n past we locked on.\n If 0 is returned, the caller needs to change active\n state to start on start cells.\n\nHow it works:\n-------------------------------------------------------------------\nThis method gets called from updateLearningState when we detect either of\nthe following two conditions:\n\n#. Our PAM counter (@ref pamCounter) expired\n#. We reached the max allowed learned sequence length\n\nEither of these two conditions indicate that we want to start over on start\ncells.\n\nRather than start over on start cells on the current input, we can\naccelerate learning by backtracking a few steps ago and seeing if perhaps\na sequence we already at least partially know already started.\n\nThis updates/modifies:\n - @ref lrnActiveState['t']\n\nThis trashes:\n - @ref lrnActiveState['t-1']\n - @ref lrnPredictedState['t']\n - @ref lrnPredictedState['t-1']", "id": "f17565:c0:m56"} {"signature": "def _learnPhase1(self, activeColumns, readOnly=False):", "body": "self.lrnActiveState[''].fill()numUnpredictedColumns = for c in activeColumns:predictingCells = numpy.where(self.lrnPredictedState[''][c] == )[]numPredictedCells = len(predictingCells)assert numPredictedCells <= if numPredictedCells == :i = predictingCells[]self.lrnActiveState[''][c, i] = continuenumUnpredictedColumns += if readOnly:continuei, s, numActive = self._getBestMatchingCell(c, self.lrnActiveState[''], self.minThreshold)if s is not None and s.isSequenceSegment():if self.verbosity >= :print(\"\", c)self.lrnActiveState[''][c, i] = segUpdate = self._getSegmentActiveSynapses(c, i, s, self.lrnActiveState[''], newSynapses = True)s.totalActivations += trimSegment = self._adaptSegment(segUpdate)if trimSegment:self._trimSegmentsInCell(c, i, [s], minPermanence = ,minNumSyns = )else:i = self._getCellForNewSegment(c)if (self.verbosity >= ):print(\"\", c, end='')print(\"\", i)self.lrnActiveState[''][c, i] = segUpdate = self._getSegmentActiveSynapses(c, i, None, self.lrnActiveState[''], newSynapses=True)segUpdate.sequenceSegment = True self._adaptSegment(segUpdate) numBottomUpColumns = len(activeColumns)if numUnpredictedColumns < numBottomUpColumns / :return True else:return False", "docstring": "Compute the learning active state given the predicted state and\nthe bottom-up input.\n\n:param activeColumns list of active bottom-ups\n:param readOnly True if being called from backtracking logic.\n This tells us not to increment any segment\n duty cycles or queue up any updates.\n:returns: True if the current input was sufficiently predicted, OR\n if we started over on startCells. False indicates that the current\n input was NOT predicted, well enough to consider it as \"inSequence\"\n\nThis looks at:\n - @ref lrnActiveState['t-1']\n - @ref lrnPredictedState['t-1']\n\nThis modifies:\n - @ref lrnActiveState['t']\n - @ref lrnActiveState['t-1']", "id": "f17565:c0:m57"} {"signature": "def _learnPhase2(self, readOnly=False):", "body": "self.lrnPredictedState[''].fill()for c in range(self.numberOfCols):i, s, numActive = self._getBestMatchingCell(c, self.lrnActiveState[''], minThreshold = self.activationThreshold)if i is None:continueself.lrnPredictedState[''][c, i] = if readOnly:continuesegUpdate = self._getSegmentActiveSynapses(c, i, s, activeState=self.lrnActiveState[''],newSynapses=(numActive < self.newSynapseCount))s.totalActivations += self._addToSegmentUpdates(c, i, segUpdate)if self.doPooling:predSegment = self._getBestMatchingSegment(c, i,self.lrnActiveState[''])segUpdate = self._getSegmentActiveSynapses(c, i, predSegment,self.lrnActiveState[''], newSynapses=True)self._addToSegmentUpdates(c, i, segUpdate)", "docstring": "Compute the predicted segments given the current set of active cells.\n\n:param readOnly True if being called from backtracking logic.\n This tells us not to increment any segment\n duty cycles or queue up any updates.\n\nThis computes the lrnPredictedState['t'] and queues up any segments that\nbecame active (and the list of active synapses for each segment) into\nthe segmentUpdates queue\n\nThis looks at:\n - @ref lrnActiveState['t']\n\nThis modifies:\n - @ref lrnPredictedState['t']\n - @ref segmentUpdates", "id": "f17565:c0:m58"} {"signature": "def _updateLearningState(self, activeColumns):", "body": "self.lrnPredictedState[''][:, :] = self.lrnPredictedState[''][:, :]self.lrnActiveState[''][:, :] = self.lrnActiveState[''][:, :]if self.maxLrnBacktrack > :if len(self._prevLrnPatterns) > self.maxLrnBacktrack:self._prevLrnPatterns.pop()self._prevLrnPatterns.append(activeColumns)if self.verbosity >= :print(\"\")print(self._prevLrnPatterns)self._processSegmentUpdates(activeColumns)if self.pamCounter > :self.pamCounter -= self.learnedSeqLength += if not self.resetCalled:inSequence = self._learnPhase1(activeColumns)if inSequence:self.pamCounter = self.pamLengthif self.verbosity >= :print(\"\", self.pamCounter, \"\",self.learnedSeqLength)if (self.resetCalled or self.pamCounter == or(self.maxSeqLength != andself.learnedSeqLength >= self.maxSeqLength)):if self.verbosity >= :if self.resetCalled:print(\"\", activeColumns, \"\")elif self.pamCounter == :print(\"\", activeColumns, \"\")else:print(\"\", activeColumns, \"\")if self.pamCounter == :seqLength = self.learnedSeqLength - self.pamLengthelse:seqLength = self.learnedSeqLengthif self.verbosity >= :print(\"\", seqLength)self._updateAvgLearnedSeqLength(seqLength)backSteps = if not self.resetCalled:backSteps = self._learnBacktrack()if self.resetCalled or backSteps is None or backSteps == :backSteps = self.lrnActiveState[''].fill()for c in activeColumns:self.lrnActiveState[''][c, ] = self._prevLrnPatterns = []self.pamCounter = self.pamLengthself.learnedSeqLength = backStepsself.segmentUpdates = {}self._learnPhase2()", "docstring": "Update the learning state. Called from compute() on every iteration\n:param activeColumns List of active column indices", "id": "f17565:c0:m59"} {"signature": "def compute(self, bottomUpInput, enableLearn, enableInference=None):", "body": "if enableInference is None:if enableLearn:enableInference = Falseelse:enableInference = Trueassert (enableLearn or enableInference)activeColumns = bottomUpInput.nonzero()[]if enableLearn:self.lrnIterationIdx += self.iterationIdx += if self.verbosity >= :print(\"\" % (self.iterationIdx))print(\"\", activeColumns)if enableLearn:if self.lrnIterationIdx in Segment.dutyCycleTiers:for c, i in itertools.product(range(self.numberOfCols),range(self.cellsPerColumn)):for segment in self.cells[c][i]:segment.dutyCycle()if self.avgInputDensity is None:self.avgInputDensity = len(activeColumns)else:self.avgInputDensity = ( * self.avgInputDensity + * len(activeColumns))if enableInference:self._updateInferenceState(activeColumns)if enableLearn:self._updateLearningState(activeColumns)if self.globalDecay > and ((self.lrnIterationIdx % self.maxAge) == ):for c, i in itertools.product(range(self.numberOfCols),range(self.cellsPerColumn)):segsToDel = [] for segment in self.cells[c][i]:age = self.lrnIterationIdx - segment.lastActiveIterationif age <= self.maxAge:continuesynsToDel = [] for synapse in segment.syns:synapse[] = synapse[] - self.globalDecay if synapse[] <= :synsToDel.append(synapse) if len(synsToDel) == segment.getNumSynapses():segsToDel.append(segment) elif len(synsToDel) > :for syn in synsToDel: segment.syns.remove(syn)for seg in segsToDel: self._cleanUpdatesList(c, i, seg)self.cells[c][i].remove(seg)if self.collectStats:if enableInference:predictedState = self.infPredictedState['']else:predictedState = self.lrnPredictedState['']self._updateStatsInferEnd(self._internalStats,activeColumns,predictedState,self.colConfidence[''])output = self._computeOutput()self.printComputeEnd(output, learn=enableLearn)self.resetCalled = Falsereturn output", "docstring": "Handle one compute, possibly learning.\n\n.. note:: It is an error to have both ``enableLearn`` and \n ``enableInference`` set to False\n\n.. note:: By default, we don't compute the inference output when learning \n because it slows things down, but you can override this by passing \n in True for ``enableInference``.\n\n:param bottomUpInput: The bottom-up input as numpy list, typically from a \n spatial pooler.\n:param enableLearn: (bool) If true, perform learning\n:param enableInference: (bool) If None, default behavior is to disable the \n inference output when ``enableLearn`` is on. If true, compute the \n inference output. If false, do not compute the inference output.\n\n:returns: TODO: document", "id": "f17565:c0:m60"} {"signature": "def infer(self, bottomUpInput):", "body": "return self.compute(bottomUpInput, enableLearn=False)", "docstring": "TODO: document\n\n:param bottomUpInput: \n:return:", "id": "f17565:c0:m61"} {"signature": "def learn(self, bottomUpInput, enableInference=None):", "body": "return self.compute(bottomUpInput, enableLearn=True,enableInference=enableInference)", "docstring": "TODO: document\n\n:param bottomUpInput: \n:param enableInference: \n:return:", "id": "f17565:c0:m62"} {"signature": "def _columnConfidences(self):", "body": "return self.colConfidence['']", "docstring": "Returns the stored cell confidences from the last compute.\n\n:returns: Column confidence scores", "id": "f17565:c0:m63"} {"signature": "def topDownCompute(self):", "body": "return self._columnConfidences()", "docstring": "For now, we will assume there is no one above us and that bottomUpOut is\nsimply the output that corresponds to our currently stored column\nconfidences.\n\n:returns: the same thing as :meth:`columnConfidences`", "id": "f17565:c0:m64"} {"signature": "def _trimSegmentsInCell(self, colIdx, cellIdx, segList, minPermanence,minNumSyns):", "body": "if minPermanence is None:minPermanence = self.connectedPermif minNumSyns is None:minNumSyns = self.activationThresholdnSegsRemoved, nSynsRemoved = , segsToDel = [] for segment in segList:synsToDel = [syn for syn in segment.syns if syn[] < minPermanence]if len(synsToDel) == len(segment.syns):segsToDel.append(segment) else:if len(synsToDel) > :for syn in synsToDel: segment.syns.remove(syn)nSynsRemoved += if len(segment.syns) < minNumSyns:segsToDel.append(segment)nSegsRemoved += len(segsToDel)for seg in segsToDel: self._cleanUpdatesList(colIdx, cellIdx, seg)self.cells[colIdx][cellIdx].remove(seg)nSynsRemoved += len(seg.syns)return nSegsRemoved, nSynsRemoved", "docstring": "This method goes through a list of segments for a given cell and\ndeletes all synapses whose permanence is less than minPermanence and deletes\nany segments that have less than minNumSyns synapses remaining.\n\n:param colIdx Column index\n:param cellIdx Cell index within the column\n:param segList List of segment references\n:param minPermanence Any syn whose permamence is 0 or < minPermanence will\n be deleted.\n:param minNumSyns Any segment with less than minNumSyns synapses remaining\n in it will be deleted.\n\n:returns: tuple (numSegsRemoved, numSynsRemoved)", "id": "f17565:c0:m65"} {"signature": "def trimSegments(self, minPermanence=None, minNumSyns=None):", "body": "if minPermanence is None:minPermanence = self.connectedPermif minNumSyns is None:minNumSyns = self.activationThresholdtotalSegsRemoved, totalSynsRemoved = , for c, i in itertools.product(range(self.numberOfCols),range(self.cellsPerColumn)):(segsRemoved, synsRemoved) = self._trimSegmentsInCell(colIdx=c, cellIdx=i, segList=self.cells[c][i],minPermanence=minPermanence, minNumSyns=minNumSyns)totalSegsRemoved += segsRemovedtotalSynsRemoved += synsRemovedif self.verbosity >= :print(\"\")self.printCells(predictedOnly=False)return totalSegsRemoved, totalSynsRemoved", "docstring": "This method deletes all synapses whose permanence is less than\nminPermanence and deletes any segments that have less than\nminNumSyns synapses remaining.\n\n:param minPermanence: (float) Any syn whose permanence is 0 or < \n ``minPermanence`` will be deleted. If None is passed in, then \n ``self.connectedPerm`` is used.\n:param minNumSyns: (int) Any segment with less than ``minNumSyns`` synapses \n remaining in it will be deleted. If None is passed in, then \n ``self.activationThreshold`` is used.\n:returns: (tuple) ``numSegsRemoved``, ``numSynsRemoved``", "id": "f17565:c0:m66"} {"signature": "def _cleanUpdatesList(self, col, cellIdx, seg):", "body": "for key, updateList in self.segmentUpdates.items():c, i = key[], key[]if c == col and i == cellIdx:for update in updateList:if update[].segment == seg:self._removeSegmentUpdate(update)", "docstring": "Removes any update that would be for the given col, cellIdx, segIdx.\n\nNOTE: logically, we need to do this when we delete segments, so that if\nan update refers to a segment that was just deleted, we also remove\nthat update from the update list. However, I haven't seen it trigger\nin any of the unit tests yet, so it might mean that it's not needed\nand that situation doesn't occur, by construction.", "id": "f17565:c0:m67"} {"signature": "def finishLearning(self):", "body": "self.trimSegments(minPermanence=)for c, i in itertools.product(range(self.numberOfCols),range(self.cellsPerColumn)):for segment in self.cells[c][i]:segment.dutyCycle()if self.cellsPerColumn > :for c in range(self.numberOfCols):assert self.getNumSegmentsInCell(c, ) == ", "docstring": "Called when learning has been completed. This method just calls\n:meth:`trimSegments` and then clears out caches.", "id": "f17565:c0:m68"} {"signature": "def _checkPrediction(self, patternNZs, output=None, colConfidence=None,details=False):", "body": "numPatterns = len(patternNZs)orAll = set()orAll = orAll.union(*patternNZs)if output is None:assert self.currentOutput is not Noneoutput = self.currentOutputoutput = set(output.sum(axis=).nonzero()[])totalExtras = len(output.difference(orAll))totalMissing = len(orAll.difference(output))if colConfidence is None:colConfidence = self.colConfidence['']confidences = []for i in range(numPatterns):positivePredictionSum = colConfidence[patternNZs[i]].sum()positiveColumnCount = len(patternNZs[i])totalPredictionSum = colConfidence.sum()totalColumnCount = len(colConfidence)negativePredictionSum = totalPredictionSum - positivePredictionSumnegativeColumnCount = totalColumnCount - positiveColumnCountif positiveColumnCount != :positivePredictionScore = positivePredictionSumelse:positivePredictionScore = if negativeColumnCount != :negativePredictionScore = negativePredictionSumelse:negativePredictionScore = currentSum = negativePredictionScore + positivePredictionScoreif currentSum > :positivePredictionScore *= /currentSumnegativePredictionScore *= /currentSumpredictionScore = positivePredictionScore - negativePredictionScoreconfidences.append((predictionScore,positivePredictionScore,negativePredictionScore))if details:missingPatternBits = [set(pattern).difference(output)for pattern in patternNZs]return (totalExtras, totalMissing, confidences, missingPatternBits)else:return (totalExtras, totalMissing, confidences)", "docstring": "This function produces goodness-of-match scores for a set of input patterns,\nby checking for their presence in the current and predicted output of the\nTM. Returns a global count of the number of extra and missing bits, the\nconfidence scores for each input pattern, and (if requested) the\nbits in each input pattern that were not present in the TM's prediction.\n\n:param patternNZs a list of input patterns that we want to check for. Each\n element is a list of the non-zeros in that pattern.\n:param output The output of the TM. If not specified, then use the\n TM's current output. This can be specified if you are\n trying to check the prediction metric for an output from\n the past.\n:param colConfidence The column confidences. If not specified, then use the\n TM's current self.colConfidence. This can be specified if you\n are trying to check the prediction metrics for an output\n from the past.\n:param details if True, also include details of missing bits per pattern.\n\n:returns: list containing:\n\n [\n totalExtras,\n totalMissing,\n [conf_1, conf_2, ...],\n [missing1, missing2, ...]\n ]\n\n@retval totalExtras a global count of the number of 'extras', i.e. bits that\n are on in the current output but not in the or of all the\n passed in patterns\n@retval totalMissing a global count of all the missing bits, i.e. the bits\n that are on in the or of the patterns, but not in the\n current output\n@retval conf_i the confidence score for the i'th pattern inpatternsToCheck\n This consists of 3 items as a tuple:\n (predictionScore, posPredictionScore, negPredictionScore)\n@retval missing_i the bits in the i'th pattern that were missing\n in the output. This list is only returned if details is\n True.", "id": "f17565:c0:m69"} {"signature": "def _isSegmentActive(self, seg, activeState):", "body": "return isSegmentActive(seg.syns, activeState,self.connectedPerm, self.activationThreshold)", "docstring": "A segment is active if it has >= activationThreshold connected\nsynapses that are active due to activeState.\n\nNotes: studied various cutoffs, none of which seem to be worthwhile\n list comprehension didn't help either\n\n:param seg TODO: document\n:param activeState TODO: document", "id": "f17565:c0:m70"} {"signature": "def _getSegmentActivityLevel(self, seg, activeState,connectedSynapsesOnly=False):", "body": "return getSegmentActivityLevel(seg.syns, activeState, connectedSynapsesOnly,self.connectedPerm)", "docstring": "This routine computes the activity level of a segment given activeState.\nIt can tally up only connected synapses (permanence >= connectedPerm), or\nall the synapses of the segment, at either t or t-1.\n\n:param seg TODO: document\n:param activeState TODO: document\n:param connectedSynapsesOnly TODO: document", "id": "f17565:c0:m71"} {"signature": "def _getBestMatchingCell(self, c, activeState, minThreshold):", "body": "bestActivityInCol = minThresholdbestSegIdxInCol = -bestCellInCol = -for i in range(self.cellsPerColumn):maxSegActivity = maxSegIdx = for j, s in enumerate(self.cells[c][i]):activity = self._getSegmentActivityLevel(s, activeState)if activity > maxSegActivity:maxSegActivity = activitymaxSegIdx = jif maxSegActivity >= bestActivityInCol:bestActivityInCol = maxSegActivitybestSegIdxInCol = maxSegIdxbestCellInCol = iif bestCellInCol == -:return (None, None, None)else:return (bestCellInCol, self.cells[c][bestCellInCol][bestSegIdxInCol],bestActivityInCol)", "docstring": "Find weakly activated cell in column with at least minThreshold active\nsynapses.\n\n:param c which column to look at\n:param activeState the active cells\n:param minThreshold minimum number of synapses required\n\n:returns: tuple (cellIdx, segment, numActiveSynapses)", "id": "f17565:c0:m72"} {"signature": "def _getBestMatchingSegment(self, c, i, activeState):", "body": "maxActivity, which = self.minThreshold, -for j, s in enumerate(self.cells[c][i]):activity = self._getSegmentActivityLevel(s, activeState,connectedSynapsesOnly=False)if activity >= maxActivity:maxActivity, which = activity, jif which == -:return Noneelse:return self.cells[c][i][which]", "docstring": "For the given cell, find the segment with the largest number of active\nsynapses. This routine is aggressive in finding the best match. The\npermanence value of synapses is allowed to be below connectedPerm. The number\nof active synapses is allowed to be below activationThreshold, but must be\nabove minThreshold. The routine returns the segment index. If no segments are\nfound, then an index of -1 is returned.\n\n:param c TODO: document\n:param i TODO: document\n:param activeState TODO: document", "id": "f17565:c0:m73"} {"signature": "def _getCellForNewSegment(self, colIdx):", "body": "if self.maxSegmentsPerCell < :if self.cellsPerColumn > :i = self._random.getUInt32(self.cellsPerColumn-) + else:i = return icandidateCellIdxs = []if self.cellsPerColumn == :minIdx = maxIdx = else:minIdx = maxIdx = self.cellsPerColumn-for i in range(minIdx, maxIdx+):numSegs = len(self.cells[colIdx][i])if numSegs < self.maxSegmentsPerCell:candidateCellIdxs.append(i)if len(candidateCellIdxs) > :candidateCellIdx = (candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))])if self.verbosity >= :print(\"\" % (colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx])))return candidateCellIdxcandidateSegment = NonecandidateSegmentDC = for i in range(minIdx, maxIdx+):for s in self.cells[colIdx][i]:dc = s.dutyCycle()if dc < candidateSegmentDC:candidateCellIdx = icandidateSegmentDC = dccandidateSegment = sif self.verbosity >= :print((\"\"\"\" % (candidateSegment.segID, colIdx, candidateCellIdx)))candidateSegment.debugPrint()self._cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment)self.cells[colIdx][candidateCellIdx].remove(candidateSegment)return candidateCellIdx", "docstring": "Return the index of a cell in this column which is a good candidate\nfor adding a new segment.\n\nWhen we have fixed size resources in effect, we insure that we pick a\ncell which does not already have the max number of allowed segments. If\nnone exists, we choose the least used segment in the column to re-allocate.\n\n:param colIdx which column to look at\n:returns: cell index", "id": "f17565:c0:m74"} {"signature": "def _getSegmentActiveSynapses(self, c, i, s, activeState, newSynapses=False):", "body": "activeSynapses = []if s is not None: activeSynapses = [idx for idx, syn in enumerate(s.syns)if activeState[syn[], syn[]]]if newSynapses: nSynapsesToAdd = self.newSynapseCount - len(activeSynapses)activeSynapses += self._chooseCellsToLearnFrom(c, i, s, nSynapsesToAdd,activeState)update = BacktrackingTM._SegmentUpdate(c, i, s, activeSynapses)return update", "docstring": "Return a segmentUpdate data structure containing a list of proposed\nchanges to segment s. Let activeSynapses be the list of active synapses\nwhere the originating cells have their activeState output = 1 at time step\nt. (This list is empty if s is None since the segment doesn't exist.)\nnewSynapses is an optional argument that defaults to false. If newSynapses\nis true, then newSynapseCount - len(activeSynapses) synapses are added to\nactiveSynapses. These synapses are randomly chosen from the set of cells\nthat have learnState = 1 at timeStep.\n\n:param c TODO: document\n:param i TODO: document\n:param s TODO: document\n:param activeState TODO: document\n:param newSynapses TODO: document", "id": "f17565:c0:m75"} {"signature": "def _chooseCellsToLearnFrom(self, c, i, s, n, activeState):", "body": "if n <= :return []tmpCandidates = numpy.where(activeState == )if len(tmpCandidates[]) == :return []if s is None: cands = [syn for syn in zip(tmpCandidates[], tmpCandidates[])]else:synapsesAlreadyInSegment = set((syn[], syn[]) for syn in s.syns)cands = [syn for syn in zip(tmpCandidates[], tmpCandidates[])if (syn[], syn[]) not in synapsesAlreadyInSegment]if len(cands) <= n:return candsif n == : idx = self._random.getUInt32(len(cands))return [cands[idx]] indices = numpy.array([j for j in range(len(cands))], dtype='')tmp = numpy.zeros(min(n, len(indices)), dtype='')self._random.sample(indices, tmp)return sorted([cands[j] for j in tmp])", "docstring": "Choose n random cells to learn from.\n\nThis function is called several times while learning with timeStep = t-1, so\nwe cache the set of candidates for that case. It's also called once with\ntimeStep = t, and we cache that set of candidates.\n\n:returns: tuple (column index, cell index).", "id": "f17565:c0:m76"} {"signature": "def _processSegmentUpdates(self, activeColumns):", "body": "removeKeys = []trimSegments = []for key, updateList in self.segmentUpdates.items():c, i = key[], key[]if c in activeColumns:action = ''else:if self.doPooling and self.lrnPredictedState[''][c, i] == :action = ''else:action = ''updateListKeep = []if action != '':for (createDate, segUpdate) in updateList:if self.verbosity >= :print(\"\", self.lrnIterationIdx, end='')print(segUpdate)if self.lrnIterationIdx - createDate > self.segUpdateValidDuration:continueif action == '':trimSegment = self._adaptSegment(segUpdate)if trimSegment:trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx,segUpdate.segment))else:updateListKeep.append((createDate, segUpdate))self.segmentUpdates[key] = updateListKeepif len(updateListKeep) == :removeKeys.append(key)for key in removeKeys:self.segmentUpdates.pop(key)for (c, i, segment) in trimSegments:self._trimSegmentsInCell(c, i, [segment], minPermanence = ,minNumSyns = )", "docstring": "Go through the list of accumulated segment updates and process them\nas follows:\n\nif the segment update is too old, remove the update\nelse if the cell received bottom-up, update its permanences\nelse if it's still being predicted, leave it in the queue\nelse remove it.\n\n:param activeColumns TODO: document", "id": "f17565:c0:m77"} {"signature": "def _adaptSegment(self, segUpdate):", "body": "trimSegment = Falsec, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segmentactiveSynapses = segUpdate.activeSynapsessynToUpdate = set([syn for syn in activeSynapses if type(syn) == int])if segment is not None:if self.verbosity >= :print(\"\" % (segment.segID, c, i))print(\"\", end='')segment.debugPrint()segment.lastActiveIteration = self.lrnIterationIdxsegment.positiveActivations += segment.dutyCycle(active=True)lastSynIndex = len(segment.syns) - inactiveSynIndices = [s for s in range(, lastSynIndex+)if s not in synToUpdate]trimSegment = segment.updateSynapses(inactiveSynIndices,-self.permanenceDec)activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex]segment.updateSynapses(activeSynIndices, self.permanenceInc)synsToAdd = [syn for syn in activeSynapses if type(syn) != int]if self.maxSynapsesPerSegment > and len(synsToAdd) + len(segment.syns) > self.maxSynapsesPerSegment:numToFree = (len(segment.syns) + len(synsToAdd) -self.maxSynapsesPerSegment)segment.freeNSynapses(numToFree, inactiveSynIndices, self.verbosity)for newSyn in synsToAdd:segment.addSynapse(newSyn[], newSyn[], self.initialPerm)if self.verbosity >= :print(\"\", end='')segment.debugPrint()else:newSegment = Segment(tm=self, isSequenceSeg=segUpdate.sequenceSegment)for synapse in activeSynapses:newSegment.addSynapse(synapse[], synapse[], self.initialPerm)if self.verbosity >= :print(\"\" % (self.segID-, c, i), end='')newSegment.debugPrint()self.cells[c][i].append(newSegment)return trimSegment", "docstring": "This function applies segment update information to a segment in a\ncell.\n\nSynapses on the active list get their permanence counts incremented by\npermanenceInc. All other synapses get their permanence counts decremented\nby permanenceDec.\n\nWe also increment the positiveActivations count of the segment.\n\n:param segUpdate SegmentUpdate instance\n:returns: True if some synapses were decremented to 0 and the segment is a\n candidate for trimming", "id": "f17565:c0:m78"} {"signature": "def getSegmentInfo(self, collectActiveData = False):", "body": "nSegments, nSynapses = , nActiveSegs, nActiveSynapses = , distSegSizes, distNSegsPerCell = {}, {}distPermValues = {} numAgeBuckets = distAges = []ageBucketSize = int((self.lrnIterationIdx+) / )for i in range(numAgeBuckets):distAges.append(['' % (i*ageBucketSize, (i+)*ageBucketSize-), ])for c in range(self.numberOfCols):for i in range(self.cellsPerColumn):if len(self.cells[c][i]) > :nSegmentsThisCell = len(self.cells[c][i])nSegments += nSegmentsThisCellif nSegmentsThisCell in distNSegsPerCell:distNSegsPerCell[nSegmentsThisCell] += else:distNSegsPerCell[nSegmentsThisCell] = for seg in self.cells[c][i]:nSynapsesThisSeg = seg.getNumSynapses()nSynapses += nSynapsesThisSegif nSynapsesThisSeg in distSegSizes:distSegSizes[nSynapsesThisSeg] += else:distSegSizes[nSynapsesThisSeg] = for syn in seg.syns:p = int(syn[]*)if p in distPermValues:distPermValues[p] += else:distPermValues[p] = age = self.lrnIterationIdx - seg.lastActiveIterationageBucket = int(age/ageBucketSize)distAges[ageBucket][] += if collectActiveData:if self._isSegmentActive(seg, self.infActiveState['']):nActiveSegs += for syn in seg.syns:if self.activeState[''][syn[]][syn[]] == :nActiveSynapses += return (nSegments, nSynapses, nActiveSegs, nActiveSynapses,distSegSizes, distNSegsPerCell, distPermValues, distAges)", "docstring": "Returns information about the distribution of segments, synapses and\n permanence values in the current TM. If requested, also returns information\n regarding the number of currently active segments and synapses.\n\n :returns: tuple described below:\n\n ::\n\n (\n nSegments,\n nSynapses,\n nActiveSegs,\n nActiveSynapses,\n distSegSizes,\n distNSegsPerCell,\n distPermValues,\n distAges\n )\n\n - ``nSegments``: (int) total number of segments\n - ``nSynapses``: (int) total number of synapses\n - ``nActiveSegs``: (int) total number of active segments (0 if \n ``collectActiveData`` is False)\n - ``nActiveSynapses``: (int) total number of active synapses 0 if \n ``collectActiveData`` is False\n - ``distSegSizes``: (dict) where d[n] = number of segments with n synapses\n - ``distNSegsPerCell``: (dict) where d[n] = number of cells with n segments\n - ``distPermValues``: (dict) where d[p] = number of synapses with perm = p/10\n - ``distAges``: (list) of tuples (``ageRange``, ``numSegments``)", "id": "f17565:c0:m79"} {"signature": "def dutyCycle(self, active=False, readOnly=False):", "body": "if self.tm.lrnIterationIdx <= self.dutyCycleTiers[]:dutyCycle = float(self.positiveActivations)/ self.tm.lrnIterationIdxif not readOnly:self._lastPosDutyCycleIteration = self.tm.lrnIterationIdxself._lastPosDutyCycle = dutyCyclereturn dutyCycleage = self.tm.lrnIterationIdx - self._lastPosDutyCycleIterationif age == and not active:return self._lastPosDutyCyclefor tierIdx in range(len(self.dutyCycleTiers)-, , -):if self.tm.lrnIterationIdx > self.dutyCycleTiers[tierIdx]:alpha = self.dutyCycleAlphas[tierIdx]breakdutyCycle = pow(-alpha, age) * self._lastPosDutyCycleif active:dutyCycle += alphaif not readOnly:self._lastPosDutyCycleIteration = self.tm.lrnIterationIdxself._lastPosDutyCycle = dutyCyclereturn dutyCycle", "docstring": "Compute/update and return the positive activations duty cycle of\n this segment. This is a measure of how often this segment is\n providing good predictions.\n\n :param active True if segment just provided a good prediction\n\n :param readOnly If True, compute the updated duty cycle, but don't change\n the cached value. This is used by debugging print statements.\n\n :returns: The duty cycle, a measure of how often this segment is\n providing good predictions.\n\n **NOTE:** This method relies on different schemes to compute the duty cycle\n based on how much history we have. In order to support this tiered\n approach **IT MUST BE CALLED ON EVERY SEGMENT AT EACH DUTY CYCLE TIER**\n (@ref dutyCycleTiers).\n\n When we don't have a lot of history yet (first tier), we simply return\n number of positive activations / total number of iterations\n\n After a certain number of iterations have accumulated, it converts into\n a moving average calculation, which is updated only when requested\n since it can be a bit expensive to compute on every iteration (it uses\n the pow() function).\n\n The duty cycle is computed as follows:\n\n dc[t] = (1-alpha) * dc[t-1] + alpha * value[t]\n\n If the value[t] has been 0 for a number of steps in a row, you can apply\n all of the updates at once using:\n\n dc[t] = (1-alpha)^(t-lastT) * dc[lastT]\n\n We use the alphas and tiers as defined in @ref dutyCycleAlphas and\n @ref dutyCycleTiers.", "id": "f17565:c1:m6"} {"signature": "def debugPrint(self):", "body": "print(\"\" % (self.segID), end='')if self.isSequenceSeg:print(\"\", end='')else:print(\"\", end='')print(\"\" % (self.dutyCycle(readOnly=True)), end='')print(\"\" % (self.positiveActivations,self.totalActivations), end='')print(\"\" % (self.tm.lrnIterationIdx - self.lastActiveIteration), end='')sortedSyns = sorted(self.syns)for _, synapse in enumerate(sortedSyns):print(\"\" % (synapse[], synapse[], synapse[]), end='')print()", "docstring": "Print segment information for verbose messaging and debugging.\n This uses the following format:\n\n ID:54413 True 0.64801 (24/36) 101 [9,1]0.75 [10,1]0.75 [11,1]0.75\n\n where:\n 54413 - is the unique segment id\n True - is sequence segment\n 0.64801 - moving average duty cycle\n (24/36) - (numPositiveActivations / numTotalActivations)\n 101 - age, number of iterations since last activated\n [9,1]0.75 - synapse from column 9, cell #1, strength 0.75\n [10,1]0.75 - synapse from column 10, cell #1, strength 0.75\n [11,1]0.75 - synapse from column 11, cell #1, strength 0.75", "id": "f17565:c1:m7"} {"signature": "def freeNSynapses(self, numToFree, inactiveSynapseIndices, verbosity= ):", "body": "assert (numToFree <= len(self.syns))if (verbosity >= ):print(\"\", numToFree, end='')print(\"\", end='')for i in inactiveSynapseIndices:print(self.syns[i][:], end='')print()if len(inactiveSynapseIndices) > :perms = numpy.array([self.syns[i][] for i in inactiveSynapseIndices])candidates = numpy.array(inactiveSynapseIndices)[perms.argsort()[:numToFree]]candidates = list(candidates)else:candidates = []if len(candidates) < numToFree:activeSynIndices = [i for i in range(len(self.syns))if i not in inactiveSynapseIndices]perms = numpy.array([self.syns[i][] for i in activeSynIndices])moreToFree = numToFree - len(candidates)moreCandidates = numpy.array(activeSynIndices)[perms.argsort()[:moreToFree]]candidates += list(moreCandidates)if verbosity >= :print(\"\" % (len(candidates)), candidates)print(\"\", end='')self.debugPrint()synsToDelete = [self.syns[i] for i in candidates]for syn in synsToDelete:self.syns.remove(syn)if verbosity >= :print(\"\", end='')self.debugPrint()", "docstring": "Free up some synapses in this segment. We always free up inactive\n synapses (lowest permanence freed up first) before we start to free up\n active ones.\n\n :param numToFree number of synapses to free up\n :param inactiveSynapseIndices list of the inactive synapse indices.", "id": "f17565:c1:m10"} {"signature": "def addSynapse(self, srcCellCol, srcCellIdx, perm):", "body": "self.syns.append([int(srcCellCol), int(srcCellIdx), numpy.float32(perm)])", "docstring": "Add a new synapse\n\n :param srcCellCol source cell column\n :param srcCellIdx source cell index within the column\n :param perm initial permanence", "id": "f17565:c1:m11"} {"signature": "def updateSynapses(self, synapses, delta):", "body": "reached0 = Falseif delta > :for synapse in synapses:self.syns[synapse][] = newValue = self.syns[synapse][] + deltaif newValue > self.tm.permanenceMax:self.syns[synapse][] = self.tm.permanenceMaxelse:for synapse in synapses:self.syns[synapse][] = newValue = self.syns[synapse][] + deltaif newValue <= :self.syns[synapse][] = reached0 = Truereturn reached0", "docstring": "Update a set of synapses in the segment.\n\n :param tm The owner TM\n :param synapses List of synapse indices to update\n :param delta How much to add to each permanence\n\n :returns: True if synapse reached 0", "id": "f17565:c1:m12"} {"signature": "def binSearch(arr, val):", "body": "i = bisect_left(arr, val)if i != len(arr) and arr[i] == val:return ireturn -", "docstring": "Function for running binary search on a sorted list.\n\n:param arr: (list) a sorted list of integers to search\n:param val: (int) a integer to search for in the sorted array\n:returns: (int) the index of the element if it is found and -1 otherwise.", "id": "f17567:m0"} {"signature": "def __eq__(self, other):", "body": "return (self.cell == other.cell and(sorted(self._synapses, key=lambda x: x._ordinal) ==sorted(other._synapses, key=lambda x: x._ordinal)))", "docstring": "Explicitly implement this for unit testing. The flatIdx is not designed\n to be consistent after serialize / deserialize, and the synapses might not\n enumerate in the same order.", "id": "f17567:c0:m1"} {"signature": "def __eq__(self, other):", "body": "return (self.segment.cell == other.segment.cell andself.presynapticCell == other.presynapticCell andabs(self.permanence - other.permanence) < EPSILON)", "docstring": "Explicitly implement this for unit testing. Allow floating point\n differences for synapse permanence.", "id": "f17567:c1:m1"} {"signature": "def segmentsForCell(self, cell):", "body": "return self._cells[cell]._segments", "docstring": "Returns the segments that belong to a cell.\n\n:param cell: (int) Cell index\n:returns: (list) Segment objects representing segments on the given cell.", "id": "f17567:c3:m1"} {"signature": "def synapsesForSegment(self, segment):", "body": "return segment._synapses", "docstring": "Returns the synapses on a segment.\n\n:param segment: (int) Segment index\n:returns: (set) Synapse objects representing synapses on the given segment.", "id": "f17567:c3:m2"} {"signature": "def dataForSynapse(self, synapse):", "body": "return synapse", "docstring": "Returns the data for a synapse.\n\n.. note:: This method exists to match the interface of the C++ Connections. \n This allows tests and tools to inspect the connections using a common \n interface.\n\n:param synapse: (:class:`Synapse`)\n:returns: Synapse data", "id": "f17567:c3:m3"} {"signature": "def dataForSegment(self, segment):", "body": "return segment", "docstring": "Returns the data for a segment.\n\n.. note:: This method exists to match the interface of the C++ Connections. \n This allows tests and tools to inspect the connections using a common \n interface.\n\n:param segment (:class:`Segment`)\n:returns: segment data", "id": "f17567:c3:m4"} {"signature": "def getSegment(self, cell, idx):", "body": "return self._cells[cell]._segments[idx]", "docstring": "Returns a :class:`Segment` object of the specified segment using data from \nthe ``self._cells`` array.\n\n:param cell: (int) cell index\n:param idx: (int) segment index on a cell\n:returns: (:class:`Segment`) Segment object with index idx on the specified cell", "id": "f17567:c3:m5"} {"signature": "def segmentForFlatIdx(self, flatIdx):", "body": "return self._segmentForFlatIdx[flatIdx]", "docstring": "Get the segment with the specified flatIdx.\n\n:param flatIdx: (int) The segment's flattened list index.\n\n:returns: (:class:`Segment`)", "id": "f17567:c3:m6"} {"signature": "def segmentFlatListLength(self):", "body": "return self._nextFlatIdx", "docstring": "Get the needed length for a list to hold a value for every segment's \nflatIdx.\n\n:returns: (int) Required list length", "id": "f17567:c3:m7"} {"signature": "def synapsesForPresynapticCell(self, presynapticCell):", "body": "return self._synapsesForPresynapticCell[presynapticCell]", "docstring": "Returns the synapses for the source cell that they synapse on.\n\n:param presynapticCell: (int) Source cell index\n\n:returns: (set) :class:`Synapse` objects", "id": "f17567:c3:m8"} {"signature": "def createSegment(self, cell):", "body": "cellData = self._cells[cell]if len(self._freeFlatIdxs) > :flatIdx = self._freeFlatIdxs.pop()else:flatIdx = self._nextFlatIdxself._segmentForFlatIdx.append(None)self._nextFlatIdx += ordinal = self._nextSegmentOrdinalself._nextSegmentOrdinal += segment = Segment(cell, flatIdx, ordinal)cellData._segments.append(segment)self._segmentForFlatIdx[flatIdx] = segmentreturn segment", "docstring": "Adds a new segment on a cell.\n\n:param cell: (int) Cell index\n:returns: (int) New segment index", "id": "f17567:c3:m9"} {"signature": "def destroySegment(self, segment):", "body": "for synapse in segment._synapses:self._removeSynapseFromPresynapticMap(synapse)self._numSynapses -= len(segment._synapses)segments = self._cells[segment.cell]._segmentsi = segments.index(segment)del segments[i]self._freeFlatIdxs.append(segment.flatIdx)self._segmentForFlatIdx[segment.flatIdx] = None", "docstring": "Destroys a segment.\n\n:param segment: (:class:`Segment`) representing the segment to be destroyed.", "id": "f17567:c3:m10"} {"signature": "def createSynapse(self, segment, presynapticCell, permanence):", "body": "idx = len(segment._synapses)synapse = Synapse(segment, presynapticCell, permanence,self._nextSynapseOrdinal)self._nextSynapseOrdinal += segment._synapses.add(synapse)self._synapsesForPresynapticCell[presynapticCell].add(synapse)self._numSynapses += return synapse", "docstring": "Creates a new synapse on a segment.\n\n:param segment: (:class:`Segment`) Segment object for synapse to be synapsed \n to.\n:param presynapticCell: (int) Source cell index.\n:param permanence: (float) Initial permanence of synapse.\n:returns: (:class:`Synapse`) created synapse", "id": "f17567:c3:m11"} {"signature": "def destroySynapse(self, synapse):", "body": "self._numSynapses -= self._removeSynapseFromPresynapticMap(synapse)synapse.segment._synapses.remove(synapse)", "docstring": "Destroys a synapse.\n\n:param synapse: (:class:`Synapse`) synapse to destroy", "id": "f17567:c3:m13"} {"signature": "def updateSynapsePermanence(self, synapse, permanence):", "body": "synapse.permanence = permanence", "docstring": "Updates the permanence for a synapse.\n\n:param synapse: (class:`Synapse`) to be updated.\n:param permanence: (float) New permanence.", "id": "f17567:c3:m14"} {"signature": "def computeActivity(self, activePresynapticCells, connectedPermanence):", "body": "numActiveConnectedSynapsesForSegment = [] * self._nextFlatIdxnumActivePotentialSynapsesForSegment = [] * self._nextFlatIdxthreshold = connectedPermanence - EPSILONfor cell in activePresynapticCells:for synapse in self._synapsesForPresynapticCell[cell]:flatIdx = synapse.segment.flatIdxnumActivePotentialSynapsesForSegment[flatIdx] += if synapse.permanence > threshold:numActiveConnectedSynapsesForSegment[flatIdx] += return (numActiveConnectedSynapsesForSegment,numActivePotentialSynapsesForSegment)", "docstring": "Compute each segment's number of active synapses for a given input.\nIn the returned lists, a segment's active synapse count is stored at index\n``segment.flatIdx``.\n\n:param activePresynapticCells: (iter) Active cells.\n:param connectedPermanence: (float) Permanence threshold for a synapse to be \n considered connected\n\n:returns: (tuple) (``numActiveConnectedSynapsesForSegment`` [list],\n ``numActivePotentialSynapsesForSegment`` [list])", "id": "f17567:c3:m15"} {"signature": "def numSegments(self, cell=None):", "body": "if cell is not None:return len(self._cells[cell]._segments)return self._nextFlatIdx - len(self._freeFlatIdxs)", "docstring": "Returns the number of segments.\n\n:param cell: (int) Optional parameter to get the number of segments on a \n cell.\n:returns: (int) Number of segments on all cells if cell is not specified, or \n on a specific specified cell", "id": "f17567:c3:m16"} {"signature": "def numSynapses(self, segment=None):", "body": "if segment is not None:return len(segment._synapses)return self._numSynapses", "docstring": "Returns the number of Synapses.\n\n:param segment: (:class:`Segment`) Optional parameter to get the number of \n synapses on a segment.\n\n:returns: (int) Number of synapses on all segments if segment is not \n specified, or on a specified segment.", "id": "f17567:c3:m17"} {"signature": "def segmentPositionSortKey(self, segment):", "body": "return segment.cell + (segment._ordinal / float(self._nextSegmentOrdinal))", "docstring": "Return a numeric key for sorting this segment. This can be used with the \npython built-in ``sorted()`` function.\n\n:param segment: (:class:`Segment`) within this :class:`Connections` \n instance.\n:returns: (float) A numeric key for sorting.", "id": "f17567:c3:m18"} {"signature": "def write(self, proto):", "body": "protoCells = proto.init('', self.numCells)for i in xrange(self.numCells):segments = self._cells[i]._segmentsprotoSegments = protoCells[i].init('', len(segments))for j, segment in enumerate(segments):synapses = segment._synapsesprotoSynapses = protoSegments[j].init('', len(synapses))for k, synapse in enumerate(sorted(synapses, key=lambda s: s._ordinal)):protoSynapses[k].presynapticCell = synapse.presynapticCellprotoSynapses[k].permanence = synapse.permanence", "docstring": "Writes serialized data to proto object.\n\n:param proto: (DynamicStructBuilder) Proto object", "id": "f17567:c3:m19"} {"signature": "@classmethoddef read(cls, proto):", "body": "protoCells = proto.cellsconnections = cls(len(protoCells))for cellIdx, protoCell in enumerate(protoCells):protoCell = protoCells[cellIdx]protoSegments = protoCell.segmentsconnections._cells[cellIdx] = CellData()segments = connections._cells[cellIdx]._segmentsfor segmentIdx, protoSegment in enumerate(protoSegments):segment = Segment(cellIdx, connections._nextFlatIdx,connections._nextSegmentOrdinal)segments.append(segment)connections._segmentForFlatIdx.append(segment)connections._nextFlatIdx += connections._nextSegmentOrdinal += synapses = segment._synapsesprotoSynapses = protoSegment.synapsesfor synapseIdx, protoSynapse in enumerate(protoSynapses):presynapticCell = protoSynapse.presynapticCellsynapse = Synapse(segment, presynapticCell, protoSynapse.permanence,ordinal=connections._nextSynapseOrdinal)connections._nextSynapseOrdinal += synapses.add(synapse)connections._synapsesForPresynapticCell[presynapticCell].add(synapse)connections._numSynapses += return connections", "docstring": "Reads deserialized data from proto object\n\n:param proto: (DynamicStructBuilder) Proto object\n\n:returns: (:class:`Connections`) instance", "id": "f17567:c3:m21"} {"signature": "def __eq__(self, other):", "body": "for i in xrange(self.numCells):segments = self._cells[i]._segmentsotherSegments = other._cells[i]._segmentsif len(segments) != len(otherSegments):return Falsefor j in xrange(len(segments)):segment = segments[j]otherSegment = otherSegments[j]synapses = segment._synapsesotherSynapses = otherSegment._synapsesif len(synapses) != len(otherSynapses):return Falsefor synapse in synapses:found = Falsefor candidate in otherSynapses:if synapse == candidate:found = Truebreakif not found:return Falseif (len(self._synapsesForPresynapticCell) !=len(self._synapsesForPresynapticCell)):return Falsefor i in self._synapsesForPresynapticCell.keys():synapses = self._synapsesForPresynapticCell[i]otherSynapses = other._synapsesForPresynapticCell[i]if len(synapses) != len(otherSynapses):return Falsefor synapse in synapses:found = Falsefor candidate in otherSynapses:if synapse == candidate:found = Truebreakif not found:return Falseif self._numSynapses != other._numSynapses:return Falsereturn True", "docstring": "Equality operator for Connections instances.\n Checks if two instances are functionally identical\n\n :param other: (:class:`Connections`) Connections instance to compare to", "id": "f17567:c3:m22"} {"signature": "def __ne__(self, other):", "body": "return not self.__eq__(other)", "docstring": "Non-equality operator for Connections instances.\nChecks if two instances are not functionally identical\n\n:param other: (:class:`Connections`) Connections instance to compare to", "id": "f17567:c3:m23"} {"signature": "def _labeledInput(activeInputs, cellsPerCol=):", "body": "if cellsPerCol == :cellsPerCol = cols = activeInputs.size / cellsPerColactiveInputs = activeInputs.reshape(cols, cellsPerCol)(cols, cellIdxs) = activeInputs.nonzero()if len(cols) == :return \"\"items = [\"\" % (len(cols))]prevCol = -for (col,cellIdx) in zip(cols, cellIdxs):if col != prevCol:if prevCol != -:items.append(\"\")items.append(\"\" % col)prevCol = colitems.append(\"\" % cellIdx)items.append(\"\")return \"\".join(items)", "docstring": "Print the list of [column, cellIdx] indices for each of the active\n cells in activeInputs.", "id": "f17568:m0"} {"signature": "def clear(self):", "body": "self._Memory = Noneself._numPatterns = self._M = Noneself._categoryList = []self._partitionIdList = []self._partitionIdMap = {}self._finishedLearning = Falseself._iterationIdx = -if self.maxStoredPatterns > :assert self.useSparseMemory, (\"\"\"\")self.fixedCapacity = Trueself._categoryRecencyList = []else:self.fixedCapacity = Falseself._protoSizes = Noneself._s = Noneself._vt = Noneself._nc = Noneself._mean = Noneself._specificIndexTraining = Falseself._nextTrainingIndices = None", "docstring": "Clears the state of the KNNClassifier.", "id": "f17568:c0:m1"} {"signature": "def prototypeSetCategory(self, idToCategorize, newCategory):", "body": "if idToCategorize not in self._categoryRecencyList:returnrecordIndex = self._categoryRecencyList.index(idToCategorize)self._categoryList[recordIndex] = newCategory", "docstring": "Allows ids to be assigned a category and subsequently enables users to use:\n\n - :meth:`~.KNNClassifier.KNNClassifier.removeCategory`\n - :meth:`~.KNNClassifier.KNNClassifier.closestTrainingPattern`\n - :meth:`~.KNNClassifier.KNNClassifier.closestOtherTrainingPattern`", "id": "f17568:c0:m4"} {"signature": "def removeIds(self, idsToRemove):", "body": "rowsToRemove = [k for k, rowID in enumerate(self._categoryRecencyList)if rowID in idsToRemove]self._removeRows(rowsToRemove)", "docstring": "There are two caveats. First, this is a potentially slow operation. Second,\npattern indices will shift if patterns before them are removed.\n\n:param idsToRemove: A list of row indices to remove.", "id": "f17568:c0:m5"} {"signature": "def removeCategory(self, categoryToRemove):", "body": "removedRows = if self._Memory is None:return removedRowscatToRemove = float(categoryToRemove)rowsToRemove = [k for k, catID in enumerate(self._categoryList)if catID == catToRemove]self._removeRows(rowsToRemove)assert catToRemove not in self._categoryList", "docstring": "There are two caveats. First, this is a potentially slow operation. Second,\npattern indices will shift if patterns before them are removed.\n\n:param categoryToRemove: Category label to remove", "id": "f17568:c0:m6"} {"signature": "def _removeRows(self, rowsToRemove):", "body": "removalArray = numpy.array(rowsToRemove)self._categoryList = numpy.delete(numpy.array(self._categoryList),removalArray).tolist()if self.fixedCapacity:self._categoryRecencyList = numpy.delete(numpy.array(self._categoryRecencyList), removalArray).tolist()for row in reversed(rowsToRemove): self._partitionIdList.pop(row)self._rebuildPartitionIdMap(self._partitionIdList)if self.useSparseMemory:for rowIndex in rowsToRemove[::-]:self._Memory.deleteRow(rowIndex)else:self._M = numpy.delete(self._M, removalArray, )numRemoved = len(rowsToRemove)numRowsExpected = self._numPatterns - numRemovedif self.useSparseMemory:if self._Memory is not None:assert self._Memory.nRows() == numRowsExpectedelse:assert self._M.shape[] == numRowsExpectedassert len(self._categoryList) == numRowsExpectedself._numPatterns -= numRemovedreturn numRemoved", "docstring": "A list of row indices to remove. There are two caveats. First, this is\na potentially slow operation. Second, pattern indices will shift if\npatterns before them are removed.", "id": "f17568:c0:m7"} {"signature": "def doIteration(self):", "body": "self._iterationIdx += ", "docstring": "Utility method to increment the iteration index. Intended for models that\ndon't learn each timestep.", "id": "f17568:c0:m8"} {"signature": "def learn(self, inputPattern, inputCategory, partitionId=None, isSparse=,rowID=None):", "body": "if self.verbosity >= :print(\"\" % g_debugPrefix)print(\"\", int(inputCategory))print(\"\", _labeledInput(inputPattern,cellsPerCol=self.cellsPerCol))if isSparse > :assert all(inputPattern[i] <= inputPattern[i+]for i in range(len(inputPattern)-)),\"\"assert all(bit < isSparse for bit in inputPattern),(\"\"\"\")if rowID is None:rowID = self._iterationIdxif not self.useSparseMemory:assert self.cellsPerCol == , \"\"if isSparse > :denseInput = numpy.zeros(isSparse)denseInput[inputPattern] = inputPattern = denseInputif self._specificIndexTraining and not self._nextTrainingIndices:return self._numPatternsif self._Memory is None:inputWidth = len(inputPattern)self._Memory = numpy.zeros((,inputWidth))self._numPatterns = self._M = self._Memory[:self._numPatterns]addRow = Trueif self._vt is not None:inputPattern = numpy.dot(self._vt, inputPattern - self._mean)if self.distThreshold > :dist = self._calcDistance(inputPattern)minDist = dist.min()addRow = (minDist >= self.distThreshold)if addRow:self._protoSizes = None if self._numPatterns == self._Memory.shape[]:self._doubleMemoryNumRows()if not self._specificIndexTraining:self._Memory[self._numPatterns] = inputPatternself._numPatterns += self._categoryList.append(int(inputCategory))else:vectorIndex = self._nextTrainingIndices.pop()while vectorIndex >= self._Memory.shape[]:self._doubleMemoryNumRows()self._Memory[vectorIndex] = inputPatternself._numPatterns = max(self._numPatterns, vectorIndex + )if vectorIndex >= len(self._categoryList):self._categoryList += [-] * (vectorIndex -len(self._categoryList) + )self._categoryList[vectorIndex] = int(inputCategory)self._M = self._Memory[:self._numPatterns]self._addPartitionId(self._numPatterns-, partitionId)else:if isSparse > and (self._vt is not None or self.distThreshold > or self.numSVDDims is not None or self.numSVDSamples > or self.numWinners > ):denseInput = numpy.zeros(isSparse)denseInput[inputPattern] = inputPattern = denseInputisSparse = if isSparse > :inputWidth = isSparseelse:inputWidth = len(inputPattern)if self._Memory is None:self._Memory = NearestNeighbor(, inputWidth)if self._vt is not None:inputPattern = numpy.dot(self._vt, inputPattern - self._mean)if isSparse == :thresholdedInput = self._sparsifyVector(inputPattern, True)addRow = Trueif self.cellsPerCol >= :burstingCols = thresholdedInput.reshape(-,self.cellsPerCol).min(axis=).nonzero()[]for col in burstingCols:thresholdedInput[(col * self.cellsPerCol) + :(col * self.cellsPerCol) + self.cellsPerCol] = if self._Memory.nRows() > :dist = Noneif self.replaceDuplicates:dist = self._calcDistance(thresholdedInput, distanceNorm=)if dist.min() == :rowIdx = dist.argmin()self._categoryList[rowIdx] = int(inputCategory)if self.fixedCapacity:self._categoryRecencyList[rowIdx] = rowIDaddRow = Falseif self.distThreshold > :if dist is None or self.distanceNorm != :dist = self._calcDistance(thresholdedInput)minDist = dist.min()addRow = (minDist >= self.distThreshold)if not addRow:if self.fixedCapacity:rowIdx = dist.argmin()self._categoryRecencyList[rowIdx] = rowIDif addRow and self.minSparsity > :if isSparse==:sparsity = ( float(len(thresholdedInput.nonzero()[])) /len(thresholdedInput) )else:sparsity = float(len(inputPattern)) / isSparseif sparsity < self.minSparsity:addRow = Falseif addRow:self._protoSizes = None if isSparse == :self._Memory.addRow(thresholdedInput)else:self._Memory.addRowNZ(inputPattern, []*len(inputPattern))self._numPatterns += self._categoryList.append(int(inputCategory))self._addPartitionId(self._numPatterns-, partitionId)if self.fixedCapacity:self._categoryRecencyList.append(rowID)if self._numPatterns > self.maxStoredPatterns andself.maxStoredPatterns > :leastRecentlyUsedPattern = numpy.argmin(self._categoryRecencyList)self._Memory.deleteRow(leastRecentlyUsedPattern)self._categoryList.pop(leastRecentlyUsedPattern)self._categoryRecencyList.pop(leastRecentlyUsedPattern)self._numPatterns -= if self.numSVDDims is not None and self.numSVDSamples > and self._numPatterns == self.numSVDSamples:self.computeSVD()return self._numPatterns", "docstring": "Train the classifier to associate specified input pattern with a\nparticular category.\n\n:param inputPattern: (list) The pattern to be assigned a category. If\n isSparse is 0, this should be a dense array (both ON and OFF bits\n present). Otherwise, if isSparse > 0, this should be a list of the\n indices of the non-zero bits in sorted order\n\n:param inputCategory: (int) The category to be associated to the training\n pattern\n\n:param partitionId: (int) partitionID allows you to associate an id with each\n input vector. It can be used to associate input patterns stored in the\n classifier with an external id. This can be useful for debugging or\n visualizing. Another use case is to ignore vectors with a specific id\n during inference (see description of infer() for details). There can be\n at most one partitionId per stored pattern (i.e. if two patterns are\n within distThreshold, only the first partitionId will be stored). This\n is an optional parameter.\n\n:param isSparse: (int) 0 if the input pattern is a dense representation.\n When the input pattern is a list of non-zero indices, then isSparse\n is the number of total bits (n). E.g. for the dense array\n [0, 1, 1, 0, 0, 1], isSparse should be `0`. For the equivalent sparse\n representation [1, 2, 5] (which specifies the indices of active bits),\n isSparse should be `6`, which is the total number of bits in the input\n space.\n\n:param rowID: (int) UNKNOWN\n\n:returns: The number of patterns currently stored in the classifier", "id": "f17568:c0:m9"} {"signature": "def getOverlaps(self, inputPattern):", "body": "assert self.useSparseMemory, \"\"overlaps = self._Memory.rightVecSumAtNZ(inputPattern)return (overlaps, self._categoryList)", "docstring": "Return the degree of overlap between an input pattern and each category\nstored in the classifier. The overlap is computed by computing:\n\n.. code-block:: python\n\n logical_and(inputPattern != 0, trainingPattern != 0).sum()\n\n:param inputPattern: pattern to check overlap of\n\n:returns: (overlaps, categories) Two numpy arrays of the same length, where:\n\n * overlaps: an integer overlap amount for each category\n * categories: category index for each element of overlaps", "id": "f17568:c0:m10"} {"signature": "def getDistances(self, inputPattern):", "body": "dist = self._getDistances(inputPattern)return (dist, self._categoryList)", "docstring": "Return the distances between the input pattern and all other\n stored patterns.\n\n :param inputPattern: pattern to check distance with\n\n :returns: (distances, categories) numpy arrays of the same length.\n - overlaps: an integer overlap amount for each category\n - categories: category index for each element of distances", "id": "f17568:c0:m11"} {"signature": "def infer(self, inputPattern, computeScores=True, overCategories=True,partitionId=None):", "body": "sparsity = if self.minSparsity > :sparsity = ( float(len(inputPattern.nonzero()[])) /len(inputPattern) )if len(self._categoryList) == or sparsity < self.minSparsity:winner = NoneinferenceResult = numpy.zeros()dist = numpy.ones()categoryDist = numpy.ones()else:maxCategoryIdx = max(self._categoryList)inferenceResult = numpy.zeros(maxCategoryIdx+)dist = self._getDistances(inputPattern, partitionId=partitionId)validVectorCount = len(self._categoryList) - self._categoryList.count(-)if self.exact:exactMatches = numpy.where(dist<)[]if len(exactMatches) > :for i in exactMatches[:min(self.k, validVectorCount)]:inferenceResult[self._categoryList[i]] += else:sorted = dist.argsort()for j in sorted[:min(self.k, validVectorCount)]:inferenceResult[self._categoryList[j]] += if inferenceResult.any():winner = inferenceResult.argmax()inferenceResult /= inferenceResult.sum()else:winner = NonecategoryDist = min_score_per_category(maxCategoryIdx,self._categoryList, dist)categoryDist.clip(, , categoryDist)if self.verbosity >= :print(\"\" % (g_debugPrefix))print(\"\", _labeledInput(inputPattern,cellsPerCol=self.cellsPerCol))print(\"\", winner)print(\"\", inferenceResult)print(\"\", dist)print(\"\", categoryDist)result = (winner, inferenceResult, dist, categoryDist)return result", "docstring": "Finds the category that best matches the input pattern. Returns the\n winning category index as well as a distribution over all categories.\n\n :param inputPattern: (list or array) The pattern to be classified. This\n must be a dense representation of the array (e.g. [0, 0, 1, 1, 0, 1]).\n\n :param computeScores: NO EFFECT\n\n :param overCategories: NO EFFECT\n\n :param partitionId: (int) If provided, all training vectors with partitionId\n equal to that of the input pattern are ignored.\n For example, this may be used to perform k-fold cross validation\n without repopulating the classifier. First partition all the data into\n k equal partitions numbered 0, 1, 2, ... and then call learn() for each\n vector passing in its partitionId. Then, during inference, by passing\n in the partition ID in the call to infer(), all other vectors with the\n same partitionId are ignored simulating the effect of repopulating the\n classifier while ommitting the training vectors in the same partition.\n\n :returns: 4-tuple with these keys:\n\n - ``winner``: The category with the greatest number of nearest neighbors\n within the kth nearest neighbors. If the inferenceResult contains no\n neighbors, the value of winner is None. This can happen, for example,\n in cases of exact matching, if there are no stored vectors, or if\n minSparsity is not met.\n - ``inferenceResult``: A list of length numCategories, each entry contains\n the number of neighbors within the top k neighbors that are in that\n category.\n - ``dist``: A list of length numPrototypes. Each entry is the distance\n from the unknown to that prototype. All distances are between 0.0 and\n 1.0.\n - ``categoryDist``: A list of length numCategories. Each entry is the\n distance from the unknown to the nearest prototype of\n that category. All distances are between 0 and 1.0.", "id": "f17568:c0:m12"} {"signature": "def getClosest(self, inputPattern, topKCategories=):", "body": "inferenceResult = numpy.zeros(max(self._categoryList)+)dist = self._getDistances(inputPattern)sorted = dist.argsort()validVectorCount = len(self._categoryList) - self._categoryList.count(-)for j in sorted[:min(self.k, validVectorCount)]:inferenceResult[self._categoryList[j]] += winner = inferenceResult.argmax()topNCats = []for i in range(topKCategories):topNCats.append((self._categoryList[sorted[i]], dist[sorted[i]] ))return winner, dist, topNCats", "docstring": "Returns the index of the pattern that is closest to inputPattern,\n the distances of all patterns to inputPattern, and the indices of the k\n closest categories.", "id": "f17568:c0:m13"} {"signature": "def closestTrainingPattern(self, inputPattern, cat):", "body": "dist = self._getDistances(inputPattern)sorted = dist.argsort()for patIdx in sorted:patternCat = self._categoryList[patIdx]if patternCat == cat:if self.useSparseMemory:closestPattern = self._Memory.getRow(int(patIdx))else:closestPattern = self._M[patIdx]return closestPatternreturn None", "docstring": "Returns the closest training pattern to inputPattern that belongs to\n category \"cat\".\n\n :param inputPattern: The pattern whose closest neighbor is sought\n\n :param cat: The required category of closest neighbor\n\n :returns: A dense version of the closest training pattern, or None if no\n such patterns exist", "id": "f17568:c0:m14"} {"signature": "def closestOtherTrainingPattern(self, inputPattern, cat):", "body": "dist = self._getDistances(inputPattern)sorted = dist.argsort()for patIdx in sorted:patternCat = self._categoryList[patIdx]if patternCat != cat:if self.useSparseMemory:closestPattern = self._Memory.getRow(int(patIdx))else:closestPattern = self._M[patIdx]return closestPatternreturn None", "docstring": "Return the closest training pattern that is *not* of the given\n category \"cat\".\n\n :param inputPattern: The pattern whose closest neighbor is sought\n\n :param cat: Training patterns of this category will be ignored no matter\n their distance to inputPattern\n\n :returns: A dense version of the closest training pattern, or None if no\n such patterns exist", "id": "f17568:c0:m15"} {"signature": "def getPattern(self, idx, sparseBinaryForm=False, cat=None):", "body": "if cat is not None:assert idx is Noneidx = self._categoryList.index(cat)if not self.useSparseMemory:pattern = self._Memory[idx]if sparseBinaryForm:pattern = pattern.nonzero()[]else:(nz, values) = self._Memory.rowNonZeros(idx)if not sparseBinaryForm:pattern = numpy.zeros(self._Memory.nCols())numpy.put(pattern, nz, )else:pattern = nzreturn pattern", "docstring": "Gets a training pattern either by index or category number.\n\n :param idx: Index of the training pattern\n\n :param sparseBinaryForm: If true, returns a list of the indices of the\n non-zero bits in the training pattern\n\n :param cat: If not None, get the first pattern belonging to category cat. If\n this is specified, idx must be None.\n\n :returns: The training pattern with specified index", "id": "f17568:c0:m16"} {"signature": "def getPartitionId(self, i):", "body": "if (i < ) or (i >= self._numPatterns):raise RuntimeError(\"\")partitionId = self._partitionIdList[i]if partitionId == numpy.inf:return Noneelse:return partitionId", "docstring": "Gets the partition id given an index.\n\n:param i: index of partition\n:returns: the partition id associated with pattern i. Returns None if no id\n is associated with it.", "id": "f17568:c0:m17"} {"signature": "def getPartitionIdList(self):", "body": "return self._partitionIdList", "docstring": ":returns: a list of complete partition id objects", "id": "f17568:c0:m18"} {"signature": "def getNumPartitionIds(self):", "body": "return len(self._partitionIdMap)", "docstring": ":returns: the number of unique partition Ids stored.", "id": "f17568:c0:m19"} {"signature": "def getPartitionIdKeys(self):", "body": "return list(self._partitionIdMap.keys())", "docstring": ":returns: a list containing unique (non-None) partition Ids (just the keys)", "id": "f17568:c0:m20"} {"signature": "def getPatternIndicesWithPartitionId(self, partitionId):", "body": "return self._partitionIdMap.get(partitionId, [])", "docstring": ":returns: a list of pattern indices corresponding to this partitionId.\n Return an empty list if there are none.", "id": "f17568:c0:m21"} {"signature": "def _addPartitionId(self, index, partitionId=None):", "body": "if partitionId is None:self._partitionIdList.append(numpy.inf)else:self._partitionIdList.append(partitionId)indices = self._partitionIdMap.get(partitionId, [])indices.append(index)self._partitionIdMap[partitionId] = indices", "docstring": "Adds partition id for pattern index", "id": "f17568:c0:m22"} {"signature": "def _rebuildPartitionIdMap(self, partitionIdList):", "body": "self._partitionIdMap = {}for row, partitionId in enumerate(partitionIdList):indices = self._partitionIdMap.get(partitionId, [])indices.append(row)self._partitionIdMap[partitionId] = indices", "docstring": "Rebuilds the partition Id map using the given partitionIdList", "id": "f17568:c0:m23"} {"signature": "def _calcDistance(self, inputPattern, distanceNorm=None):", "body": "if distanceNorm is None:distanceNorm = self.distanceNormif self.useSparseMemory:if self._protoSizes is None:self._protoSizes = self._Memory.rowSums()overlapsWithProtos = self._Memory.rightVecSumAtNZ(inputPattern)inputPatternSum = inputPattern.sum()if self.distanceMethod == \"\":dist = inputPattern.sum() - overlapsWithProtoselif self.distanceMethod == \"\":dist = inputPatternSum - overlapsWithProtosif inputPatternSum > :dist /= inputPatternSumelif self.distanceMethod == \"\":overlapsWithProtos /= self._protoSizesdist = - overlapsWithProtoselif self.distanceMethod == \"\":maxVal = numpy.maximum(self._protoSizes, inputPatternSum)if maxVal.all() > :overlapsWithProtos /= maxValdist = - overlapsWithProtoselif self.distanceMethod == \"\":dist = self._Memory.vecLpDist(self.distanceNorm, inputPattern)distMax = dist.max()if distMax > :dist /= distMaxelse:raise RuntimeError(\"\" %self.distanceMethod)else:if self.distanceMethod == \"\":dist = numpy.power(numpy.abs(self._M - inputPattern), self.distanceNorm)dist = dist.sum()dist = numpy.power(dist, /self.distanceNorm)dist /= dist.max()else:raise RuntimeError (\"\")return dist", "docstring": "Calculate the distances from inputPattern to all stored patterns. All\n distances are between 0.0 and 1.0\n\n :param inputPattern The pattern from which distances to all other patterns\n are calculated\n\n :param distanceNorm Degree of the distance norm", "id": "f17568:c0:m24"} {"signature": "def _getDistances(self, inputPattern, partitionId=None):", "body": "if not self._finishedLearning:self.finishLearning()self._finishedLearning = Trueif self._vt is not None and len(self._vt) > :inputPattern = numpy.dot(self._vt, inputPattern - self._mean)sparseInput = self._sparsifyVector(inputPattern)dist = self._calcDistance(sparseInput)if self._specificIndexTraining:dist[numpy.array(self._categoryList) == -] = numpy.infif partitionId is not None:dist[self._partitionIdMap.get(partitionId, [])] = numpy.infreturn dist", "docstring": "Return the distances from inputPattern to all stored patterns.\n\n :param inputPattern The pattern from which distances to all other patterns\n are returned\n\n :param partitionId If provided, ignore all training vectors with this\n partitionId.", "id": "f17568:c0:m25"} {"signature": "def finishLearning(self):", "body": "if self.numSVDDims is not None and self._vt is None:self.computeSVD()", "docstring": "Used for batch scenarios. This method needs to be called between learning\nand inference.", "id": "f17568:c0:m26"} {"signature": "def computeSVD(self, numSVDSamples=, finalize=True):", "body": "if numSVDSamples == :numSVDSamples = self._numPatternsif not self.useSparseMemory:self._a = self._Memory[:self._numPatterns]else:self._a = self._Memory.toDense()[:self._numPatterns]self._mean = numpy.mean(self._a, axis=)self._a -= self._meanu,self._s,self._vt = numpy.linalg.svd(self._a[:numSVDSamples])if finalize:self._finalizeSVD()return self._s", "docstring": "Compute the singular value decomposition (SVD). The SVD is a factorization\nof a real or complex matrix. It factors the matrix `a` as\n`u * np.diag(s) * v`, where `u` and `v` are unitary and `s` is a 1-d array\nof `a`'s singular values.\n\n**Reason for computing the SVD:**\n\nThere are cases where you want to feed a lot of vectors to the\nKNNClassifier. However, this can be slow. You can speed up training by (1)\ncomputing the SVD of the input patterns which will give you the\neigenvectors, (2) only keeping a fraction of the eigenvectors, and (3)\nprojecting the input patterns onto the remaining eigenvectors.\n\nNote that all input patterns are projected onto the eigenvectors in the same\nfashion. Keeping only the highest eigenvectors increases training\nperformance since it reduces the dimensionality of the input.\n\n:param numSVDSamples: (int) the number of samples to use for the SVD\n computation.\n\n:param finalize: (bool) whether to apply SVD to the input patterns.\n\n:returns: (array) The singular values for every matrix, sorted in\n descending order.", "id": "f17568:c0:m27"} {"signature": "def getAdaptiveSVDDims(self, singularValues, fractionOfMax=):", "body": "v = singularValues/singularValues[]idx = numpy.where(v]if len(idx):print(\"\", idx[], \"\", len(v))return idx[]else:print(\"\", len(v)-, \"\", len(v))return len(v)-", "docstring": "Compute the number of eigenvectors (singularValues) to keep.\n\n:param singularValues:\n:param fractionOfMax:\n:return:", "id": "f17568:c0:m28"} {"signature": "def _finalizeSVD(self, numSVDDims=None):", "body": "if numSVDDims is not None:self.numSVDDims = numSVDDimsif self.numSVDDims==\"\":if self.fractionOfMax is not None:self.numSVDDims = self.getAdaptiveSVDDims(self._s, self.fractionOfMax)else:self.numSVDDims = self.getAdaptiveSVDDims(self._s)if self._vt.shape[] < self.numSVDDims:print(\"\")print (\"\"\"\")print(\"\", self._vt.shape[])print(\"\")self.numSVDDims = self._vt.shape[]self._vt = self._vt[:self.numSVDDims]if len(self._vt) == :returnself._Memory = numpy.zeros((self._numPatterns,self.numSVDDims))self._M = self._Memoryself.useSparseMemory = Falsefor i in range(self._numPatterns):self._Memory[i] = numpy.dot(self._vt, self._a[i])self._a = None", "docstring": "Called by finalizeLearning(). This will project all the patterns onto the\nSVD eigenvectors.\n:param numSVDDims: (int) number of egeinvectors used for projection.\n:return:", "id": "f17568:c0:m29"} {"signature": "def remapCategories(self, mapping):", "body": "categoryArray = numpy.array(self._categoryList)newCategoryArray = numpy.zeros(categoryArray.shape[])newCategoryArray.fill(-)for i in range(len(mapping)):newCategoryArray[categoryArray==i] = mapping[i]self._categoryList = list(newCategoryArray)", "docstring": "Change the category indices.\n\n Used by the Network Builder to keep the category indices in sync with the\n ImageSensor categoryInfo when the user renames or removes categories.\n\n :param mapping: List of new category indices. For example, mapping=[2,0,1]\n would change all vectors of category 0 to be category 2, category 1 to\n 0, and category 2 to 1", "id": "f17568:c0:m30"} {"signature": "def setCategoryOfVectors(self, vectorIndices, categoryIndices):", "body": "if not hasattr(vectorIndices, \"\"):vectorIndices = [vectorIndices]categoryIndices = [categoryIndices]elif not hasattr(categoryIndices, \"\"):categoryIndices = [categoryIndices] * len(vectorIndices)for i in range(len(vectorIndices)):vectorIndex = vectorIndices[i]categoryIndex = categoryIndices[i]if vectorIndex < len(self._categoryList):self._categoryList[vectorIndex] = categoryIndex", "docstring": "Change the category associated with this vector(s).\n\n Used by the Network Builder to move vectors between categories, to enable\n categories, and to invalidate vectors by setting the category to -1.\n\n :param vectorIndices: Single index or list of indices\n\n :param categoryIndices: Single index or list of indices. Can also be a\n single index when vectorIndices is a list, in which case the same\n category will be used for all vectors", "id": "f17568:c0:m31"} {"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()return state", "docstring": "Return serializable state.\n\n This function will return a version of the __dict__.", "id": "f17568:c0:m35"} {"signature": "def __setstate__(self, state):", "body": "if \"\" not in state:passelif state[\"\"] == :passelif state[\"\"] == :raise RuntimeError(\"\"\"\")if \"\" in state:state.pop(\"\")if \"\" not in state:state[\"\"] = self.__dict__.update(state)if \"\" not in state:self._rebuildPartitionIdMap(self._partitionIdList)self.version = KNNCLASSIFIER_VERSION", "docstring": "Set the state of this object from a serialized state.", "id": "f17568:c0:m36"} {"signature": "def importAndRunFunction(path,moduleName,funcName,**keywords):", "body": "import sysoriginalPath = sys.pathtry:augmentedPath = [path] + sys.pathsys.path = augmentedPathfunc = getattr(__import__(moduleName, fromlist=[funcName]), funcName)sys.path = originalPathexcept:sys.path = originalPathraisereturn func(**keywords)", "docstring": "Run a named function specified by a filesystem path, module name\nand function name.\n\nReturns the value returned by the imported function.\n\nUse this when access is needed to code that has\nnot been added to a package accessible from the ordinary Python\npath. Encapsulates the multiple lines usually needed to\nsafely manipulate and restore the Python path.\n\nParameters\n----------\npath: filesystem path\nPath to the directory where the desired module is stored.\nThis will be used to temporarily augment the Python path.\n\nmoduleName: basestring\nName of the module, without trailing extension, where the desired\nfunction is stored. This module should be in the directory specified\nwith path.\n\nfuncName: basestring\nName of the function to import and call.\n\nkeywords:\nKeyword arguments to be passed to the imported function.", "id": "f17569:m0"} {"signature": "def getLockedHandle(runtimeElement, expression):", "body": "fullExpression = '''' + expression + ''return runtimeElement.interpret(fullExpression)", "docstring": "Calls runtimeElement.interpret(expression) and wraps the result\nin a call to nupic.bindings.research.lockHandle().", "id": "f17569:m1"} {"signature": "def transferCoincidences(network, fromElementName, toElementName):", "body": "coincidenceHandle = getLockedHandle(runtimeElement=network.getElement(fromElementName),expression=\"\")network.getElement(toElementName).setParameter(\"\",coincidenceHandle)", "docstring": "Gets the coincidence matrix from one element and sets it on\nanother element\n(using locked handles, a la nupic.bindings.research.lockHandle).\n\nTODO: Generalize to more node types, parameter name pairs, etc.\n\nDoes not work across processes.", "id": "f17569:m2"} {"signature": "def __init__(self,columnDimensions=(,),cellsPerColumn=,activationThreshold=,initialPermanence=,connectedPermanence=,minThreshold=,maxNewSynapseCount=,permanenceIncrement=,permanenceDecrement=,seed=):", "body": "numberOfCols = for n in columnDimensions:numberOfCols *= nsuper(TemporalMemoryShim, self).__init__(numberOfCols=numberOfCols,cellsPerColumn=cellsPerColumn,initialPerm=initialPermanence,connectedPerm=connectedPermanence,minThreshold=minThreshold,newSynapseCount=maxNewSynapseCount,permanenceInc=permanenceIncrement,permanenceDec=permanenceDecrement,permanenceMax=,globalDecay=,activationThreshold=activationThreshold,seed=seed)self.connections = Connections(numberOfCols * cellsPerColumn)self.predictiveCells = set()", "docstring": "Translate parameters and initialize member variables\nspecific to TemporalMemory", "id": "f17570:c0:m0"} {"signature": "def compute(self, activeColumns, learn=True):", "body": "bottomUpInput = numpy.zeros(self.numberOfCols, dtype=dtype)bottomUpInput[list(activeColumns)] = super(TemporalMemoryShim, self).compute(bottomUpInput,enableLearn=learn,enableInference=True)predictedState = self.getPredictedState()self.predictiveCells = set(numpy.flatnonzero(predictedState))", "docstring": "Feeds input record through TM, performing inference and learning.\nUpdates member variables with new state.\n\n@param activeColumns (set) Indices of active columns in `t`", "id": "f17570:c0:m1"} {"signature": "@classmethoddef read(cls, proto):", "body": "tm = super(TemporalMemoryShim, cls).read(proto.baseTM)tm.predictiveCells = set(proto.predictedState)tm.connections = Connections.read(proto.conncetions)", "docstring": "Deserialize from proto instance.\n\n :param proto: (TemporalMemoryShimProto) the proto instance to read from", "id": "f17570:c0:m3"} {"signature": "def write(self, proto):", "body": "super(TemporalMemoryShim, self).write(proto.baseTM)proto.connections.write(self.connections)proto.predictiveCells = self.predictiveCells", "docstring": "Populate serialization proto instance.\n\n :param proto: (TemporalMemoryShimProto) the proto instance to populate", "id": "f17570:c0:m4"} {"signature": "@staticmethoddef connectionsFactory(*args, **kwargs):", "body": "return Connections(*args, **kwargs)", "docstring": "Create a :class:`~nupic.algorithms.connections.Connections` instance. \n:class:`TemporalMemory` subclasses may override this method to choose a \ndifferent :class:`~nupic.algorithms.connections.Connections` implementation, \nor to augment the instance otherwise returned by the default \n:class:`~nupic.algorithms.connections.Connections` implementation.\n\nSee :class:`~nupic.algorithms.connections.Connections` for constructor \nsignature and usage.\n\n:returns: :class:`~nupic.algorithms.connections.Connections` instance", "id": "f17571:c0:m1"} {"signature": "def compute(self, activeColumns, learn=True):", "body": "self.activateCells(sorted(activeColumns), learn)self.activateDendrites(learn)", "docstring": "Perform one time step of the Temporal Memory algorithm.\n\nThis method calls :meth:`activateCells`, then calls \n:meth:`activateDendrites`. Using :class:`TemporalMemory` via its \n:meth:`compute` method ensures that you'll always be able to call \n:meth:`getPredictiveCells` to get predictions for the next time step.\n\n:param activeColumns: (iter) Indices of active columns.\n\n:param learn: (bool) Whether or not learning is enabled.", "id": "f17571:c0:m2"} {"signature": "def activateCells(self, activeColumns, learn=True):", "body": "prevActiveCells = self.activeCellsprevWinnerCells = self.winnerCellsself.activeCells = []self.winnerCells = []segToCol = lambda segment: int(segment.cell / self.cellsPerColumn)identity = lambda x: xfor columnData in groupby2(activeColumns, identity,self.activeSegments, segToCol,self.matchingSegments, segToCol):(column,activeColumns,columnActiveSegments,columnMatchingSegments) = columnDataif activeColumns is not None:if columnActiveSegments is not None:cellsToAdd = self.activatePredictedColumn(column,columnActiveSegments,columnMatchingSegments,prevActiveCells,prevWinnerCells,learn)self.activeCells += cellsToAddself.winnerCells += cellsToAddelse:(cellsToAdd,winnerCell) = self.burstColumn(column,columnMatchingSegments,prevActiveCells,prevWinnerCells,learn)self.activeCells += cellsToAddself.winnerCells.append(winnerCell)else:if learn:self.punishPredictedColumn(column,columnActiveSegments,columnMatchingSegments,prevActiveCells,prevWinnerCells)", "docstring": "Calculate the active cells, using the current active columns and dendrite\nsegments. Grow and reinforce synapses.\n\n:param activeColumns: (iter) A sorted list of active column indices.\n\n:param learn: (bool) If true, reinforce / punish / grow synapses.\n\n **Pseudocode:**\n\n ::\n\n for each column\n if column is active and has active distal dendrite segments\n call activatePredictedColumn\n if column is active and doesn't have active distal dendrite segments\n call burstColumn\n if column is inactive and has matching distal dendrite segments\n call punishPredictedColumn", "id": "f17571:c0:m3"} {"signature": "def activateDendrites(self, learn=True):", "body": "(numActiveConnected,numActivePotential) = self.connections.computeActivity(self.activeCells,self.connectedPermanence)activeSegments = (self.connections.segmentForFlatIdx(i)for i in xrange(len(numActiveConnected))if numActiveConnected[i] >= self.activationThreshold)matchingSegments = (self.connections.segmentForFlatIdx(i)for i in xrange(len(numActivePotential))if numActivePotential[i] >= self.minThreshold)self.activeSegments = sorted(activeSegments,key=self.connections.segmentPositionSortKey)self.matchingSegments = sorted(matchingSegments,key=self.connections.segmentPositionSortKey)self.numActiveConnectedSynapsesForSegment = numActiveConnectedself.numActivePotentialSynapsesForSegment = numActivePotentialif learn:for segment in self.activeSegments:self.lastUsedIterationForSegment[segment.flatIdx] = self.iterationself.iteration += ", "docstring": "Calculate dendrite segment activity, using the current active cells.\n\n:param learn: (bool) If true, segment activations will be recorded. This \n information is used during segment cleanup.\n\n**Pseudocode:**\n\n::\n\n for each distal dendrite segment with activity >= activationThreshold\n mark the segment as active\n for each distal dendrite segment with unconnected activity >= minThreshold\n mark the segment as matching", "id": "f17571:c0:m4"} {"signature": "def reset(self):", "body": "self.activeCells = []self.winnerCells = []self.activeSegments = []self.matchingSegments = []", "docstring": "Indicates the start of a new sequence. Clears any predictions and makes sure\nsynapses don't grow to the currently active cells in the next time step.", "id": "f17571:c0:m5"} {"signature": "def activatePredictedColumn(self, column, columnActiveSegments,columnMatchingSegments, prevActiveCells,prevWinnerCells, learn):", "body": "return self._activatePredictedColumn(self.connections, self._random,columnActiveSegments, prevActiveCells, prevWinnerCells,self.numActivePotentialSynapsesForSegment,self.maxNewSynapseCount, self.initialPermanence,self.permanenceIncrement, self.permanenceDecrement,self.maxSynapsesPerSegment, learn)", "docstring": "Determines which cells in a predicted column should be added to winner cells\nlist, and learns on the segments that correctly predicted this column.\n\n:param column: (int) Index of bursting column.\n\n:param columnActiveSegments: (iter) Active segments in this column.\n\n:param columnMatchingSegments: (iter) Matching segments in this column.\n\n:param prevActiveCells: (list) Active cells in ``t-1``.\n\n:param prevWinnerCells: (list) Winner cells in ``t-1``.\n\n:param learn: (bool) If true, grow and reinforce synapses.\n\n:returns: (list) A list of predicted cells that will be added to \n active cells and winner cells.", "id": "f17571:c0:m6"} {"signature": "def burstColumn(self, column, columnMatchingSegments, prevActiveCells,prevWinnerCells, learn):", "body": "start = self.cellsPerColumn * columncellsForColumn = xrange(start, start + self.cellsPerColumn)return self._burstColumn(self.connections, self._random, self.lastUsedIterationForSegment, column,columnMatchingSegments, prevActiveCells, prevWinnerCells, cellsForColumn,self.numActivePotentialSynapsesForSegment, self.iteration,self.maxNewSynapseCount, self.initialPermanence, self.permanenceIncrement,self.permanenceDecrement, self.maxSegmentsPerCell,self.maxSynapsesPerSegment, learn)", "docstring": "Activates all of the cells in an unpredicted active column, chooses a winner\ncell, and, if learning is turned on, learns on one segment, growing a new\nsegment if necessary.\n\n:param column: (int) Index of bursting column.\n\n:param columnMatchingSegments: (iter) Matching segments in this column, or \n None if there aren't any.\n\n:param prevActiveCells: (list) Active cells in ``t-1``.\n\n:param prevWinnerCells: (list) Winner cells in ``t-1``.\n\n:param learn: (bool) Whether or not learning is enabled.\n\n:returns: (tuple) Contains (``cells`` [iter], ``winnerCell`` [int])", "id": "f17571:c0:m7"} {"signature": "def punishPredictedColumn(self, column, columnActiveSegments,columnMatchingSegments, prevActiveCells,prevWinnerCells):", "body": "self._punishPredictedColumn(self.connections, columnMatchingSegments, prevActiveCells,self.predictedSegmentDecrement)", "docstring": "Punishes the Segments that incorrectly predicted a column to be active.\n\n:param column: (int) Index of bursting column.\n\n:param columnActiveSegments: (iter) Active segments for this column, or None \n if there aren't any.\n\n:param columnMatchingSegments: (iter) Matching segments for this column, or \n None if there aren't any.\n\n:param prevActiveCells: (list) Active cells in ``t-1``.\n\n:param prevWinnerCells: (list) Winner cells in ``t-1``.", "id": "f17571:c0:m8"} {"signature": "def createSegment(self, cell):", "body": "return self._createSegment(self.connections, self.lastUsedIterationForSegment, cell, self.iteration,self.maxSegmentsPerCell)", "docstring": "Create a :class:`~nupic.algorithms.connections.Segment` on the specified \ncell. This method calls \n:meth:`~nupic.algorithms.connections.Connections.createSegment` on the \nunderlying :class:`~nupic.algorithms.connections.Connections`, and it does \nsome extra bookkeeping. Unit tests should call this method, and not \n:meth:`~nupic.algorithms.connections.Connections.createSegment`.\n\n:param cell: (int) Index of cell to create a segment on.\n\n:returns: (:class:`~nupic.algorithms.connections.Segment`) The created \n segment.", "id": "f17571:c0:m9"} {"signature": "@classmethoddef _activatePredictedColumn(cls, connections, random, columnActiveSegments,prevActiveCells, prevWinnerCells,numActivePotentialSynapsesForSegment,maxNewSynapseCount, initialPermanence,permanenceIncrement, permanenceDecrement,maxSynapsesPerSegment, learn):", "body": "cellsToAdd = []previousCell = Nonefor segment in columnActiveSegments:if segment.cell != previousCell:cellsToAdd.append(segment.cell)previousCell = segment.cellif learn:cls._adaptSegment(connections, segment, prevActiveCells,permanenceIncrement, permanenceDecrement)active = numActivePotentialSynapsesForSegment[segment.flatIdx]nGrowDesired = maxNewSynapseCount - activeif nGrowDesired > :cls._growSynapses(connections, random, segment, nGrowDesired,prevWinnerCells, initialPermanence,maxSynapsesPerSegment)return cellsToAdd", "docstring": ":param connections: (Object)\nConnections for the TM. Gets mutated.\n\n:param random: (Object)\nRandom number generator. Gets mutated.\n\n:param columnActiveSegments: (iter)\nActive segments in this column.\n\n:param prevActiveCells: (list)\nActive cells in `t-1`.\n\n:param prevWinnerCells: (list)\nWinner cells in `t-1`.\n\n:param numActivePotentialSynapsesForSegment: (list)\nNumber of active potential synapses per segment, indexed by the segment's\nflatIdx.\n\n:param maxNewSynapseCount: (int)\nThe maximum number of synapses added to a segment during learning\n\n:param initialPermanence: (float)\nInitial permanence of a new synapse.\n\n@permanenceIncrement (float)\nAmount by which permanences of synapses are incremented during learning.\n\n@permanenceDecrement (float)\nAmount by which permanences of synapses are decremented during learning.\n\n:param maxSynapsesPerSegment: (int)\nThe maximum number of synapses per segment.\n\n:param learn: (bool)\nIf true, grow and reinforce synapses.\n\n:returns: cellsToAdd (list)\nA list of predicted cells that will be added to active cells and winner\ncells.\n\nPseudocode:\nfor each cell in the column that has an active distal dendrite segment\n mark the cell as active\n mark the cell as a winner cell\n (learning) for each active distal dendrite segment\n strengthen active synapses\n weaken inactive synapses\n grow synapses to previous winner cells", "id": "f17571:c0:m10"} {"signature": "@classmethoddef _burstColumn(cls, connections, random, lastUsedIterationForSegment,column, columnMatchingSegments, prevActiveCells,prevWinnerCells, cellsForColumn,numActivePotentialSynapsesForSegment, iteration,maxNewSynapseCount, initialPermanence, permanenceIncrement,permanenceDecrement, maxSegmentsPerCell,maxSynapsesPerSegment, learn):", "body": "if columnMatchingSegments is not None:numActive = lambda s: numActivePotentialSynapsesForSegment[s.flatIdx]bestMatchingSegment = max(columnMatchingSegments, key=numActive)winnerCell = bestMatchingSegment.cellif learn:cls._adaptSegment(connections, bestMatchingSegment, prevActiveCells,permanenceIncrement, permanenceDecrement)nGrowDesired = maxNewSynapseCount - numActive(bestMatchingSegment)if nGrowDesired > :cls._growSynapses(connections, random, bestMatchingSegment,nGrowDesired, prevWinnerCells, initialPermanence,maxSynapsesPerSegment)else:winnerCell = cls._leastUsedCell(random, cellsForColumn, connections)if learn:nGrowExact = min(maxNewSynapseCount, len(prevWinnerCells))if nGrowExact > :segment = cls._createSegment(connections,lastUsedIterationForSegment, winnerCell,iteration, maxSegmentsPerCell)cls._growSynapses(connections, random, segment, nGrowExact,prevWinnerCells, initialPermanence,maxSynapsesPerSegment)return cellsForColumn, winnerCell", "docstring": ":param connections: (Object)\nConnections for the TM. Gets mutated.\n\n:param random: (Object)\nRandom number generator. Gets mutated.\n\n:param lastUsedIterationForSegment: (list)\nLast used iteration for each segment, indexed by the segment's flatIdx.\nGets mutated.\n\n:param column: (int)\nIndex of bursting column.\n\n:param columnMatchingSegments: (iter)\nMatching segments in this column.\n\n:param prevActiveCells: (list)\nActive cells in `t-1`.\n\n:param prevWinnerCells: (list)\nWinner cells in `t-1`.\n\n:param cellsForColumn: (sequence)\nRange of cell indices on which to operate.\n\n:param numActivePotentialSynapsesForSegment: (list)\nNumber of active potential synapses per segment, indexed by the segment's\nflatIdx.\n\n:param iteration: (int)\nThe current timestep.\n\n:param maxNewSynapseCount: (int)\nThe maximum number of synapses added to a segment during learning.\n\n:param initialPermanence: (float)\nInitial permanence of a new synapse.\n\n:param permanenceIncrement: (float)\nAmount by which permanences of synapses are incremented during learning.\n\n:param permanenceDecrement: (float)\nAmount by which permanences of synapses are decremented during learning.\n\n:param maxSegmentsPerCell: (int)\nThe maximum number of segments per cell.\n\n:param maxSynapsesPerSegment: (int)\nThe maximum number of synapses per segment.\n\n:param learn: (bool)\nWhether or not learning is enabled.\n\n:returns: (tuple) Contains:\n `cells` (iter),\n `winnerCell` (int),\n\nPseudocode:\nmark all cells as active\nif there are any matching distal dendrite segments\n find the most active matching segment\n mark its cell as a winner cell\n (learning)\n grow and reinforce synapses to previous winner cells\nelse\n find the cell with the least segments, mark it as a winner cell\n (learning)\n (optimization) if there are prev winner cells\n add a segment to this winner cell\n grow synapses to previous winner cells", "id": "f17571:c0:m11"} {"signature": "@classmethoddef _punishPredictedColumn(cls, connections, columnMatchingSegments,prevActiveCells, predictedSegmentDecrement):", "body": "if predictedSegmentDecrement > and columnMatchingSegments is not None:for segment in columnMatchingSegments:cls._adaptSegment(connections, segment, prevActiveCells,-predictedSegmentDecrement, )", "docstring": ":param connections: (Object)\nConnections for the TM. Gets mutated.\n\n:param columnMatchingSegments: (iter)\nMatching segments for this column.\n\n:param prevActiveCells: (list)\nActive cells in `t-1`.\n\n:param predictedSegmentDecrement: (float)\nAmount by which segments are punished for incorrect predictions.\n\nPseudocode:\nfor each matching segment in the column\n weaken active synapses", "id": "f17571:c0:m12"} {"signature": "@classmethoddef _createSegment(cls, connections, lastUsedIterationForSegment, cell,iteration, maxSegmentsPerCell):", "body": "while connections.numSegments(cell) >= maxSegmentsPerCell:leastRecentlyUsedSegment = min(connections.segmentsForCell(cell),key=lambda segment : lastUsedIterationForSegment[segment.flatIdx])connections.destroySegment(leastRecentlyUsedSegment)segment = connections.createSegment(cell)if segment.flatIdx == len(lastUsedIterationForSegment):lastUsedIterationForSegment.append(iteration)elif segment.flatIdx < len(lastUsedIterationForSegment):lastUsedIterationForSegment[segment.flatIdx] = iterationelse:raise AssertionError(\"\")return segment", "docstring": "Create a segment on the connections, enforcing the maxSegmentsPerCell\nparameter.", "id": "f17571:c0:m13"} {"signature": "@classmethoddef _destroyMinPermanenceSynapses(cls, connections, random, segment,nDestroy, excludeCells):", "body": "destroyCandidates = sorted((synapse for synapse in connections.synapsesForSegment(segment)if synapse.presynapticCell not in excludeCells),key=lambda s: s._ordinal)for _ in xrange(nDestroy):if len(destroyCandidates) == :breakminSynapse = NoneminPermanence = float(\"\")for synapse in destroyCandidates:if synapse.permanence < minPermanence - EPSILON:minSynapse = synapseminPermanence = synapse.permanenceconnections.destroySynapse(minSynapse)destroyCandidates.remove(minSynapse)", "docstring": "Destroy nDestroy synapses on the specified segment, but don't destroy\nsynapses to the \"excludeCells\".", "id": "f17571:c0:m14"} {"signature": "@classmethoddef _leastUsedCell(cls, random, cells, connections):", "body": "leastUsedCells = []minNumSegments = float(\"\")for cell in cells:numSegments = connections.numSegments(cell)if numSegments < minNumSegments:minNumSegments = numSegmentsleastUsedCells = []if numSegments == minNumSegments:leastUsedCells.append(cell)i = random.getUInt32(len(leastUsedCells))return leastUsedCells[i]", "docstring": "Gets the cell with the smallest number of segments.\nBreak ties randomly.\n\n:param random: (Object)\nRandom number generator. Gets mutated.\n\n:param cells: (list)\nIndices of cells.\n\n:param connections: (Object)\nConnections instance for the TM.\n\n:returns: (int) Cell index.", "id": "f17571:c0:m15"} {"signature": "@classmethoddef _growSynapses(cls, connections, random, segment, nDesiredNewSynapes,prevWinnerCells, initialPermanence, maxSynapsesPerSegment):", "body": "candidates = list(prevWinnerCells)for synapse in connections.synapsesForSegment(segment):i = binSearch(candidates, synapse.presynapticCell)if i != -:del candidates[i]nActual = min(nDesiredNewSynapes, len(candidates))overrun = connections.numSynapses(segment) + nActual - maxSynapsesPerSegmentif overrun > :cls._destroyMinPermanenceSynapses(connections, random, segment, overrun,prevWinnerCells)nActual = min(nActual,maxSynapsesPerSegment - connections.numSynapses(segment))for _ in range(nActual):i = random.getUInt32(len(candidates))connections.createSynapse(segment, candidates[i], initialPermanence)del candidates[i]", "docstring": "Creates nDesiredNewSynapes synapses on the segment passed in if\npossible, choosing random cells from the previous winner cells that are\nnot already on the segment.\n\n:param connections: (Object) Connections instance for the tm\n:param random: (Object) TM object used to generate random\n numbers\n:param segment: (int) Segment to grow synapses on.\n:param nDesiredNewSynapes: (int) Desired number of synapses to grow\n:param prevWinnerCells: (list) Winner cells in `t-1`\n:param initialPermanence: (float) Initial permanence of a new synapse.", "id": "f17571:c0:m16"} {"signature": "@classmethoddef _adaptSegment(cls, connections, segment, prevActiveCells,permanenceIncrement, permanenceDecrement):", "body": "synapsesToDestroy = []for synapse in connections.synapsesForSegment(segment):permanence = synapse.permanenceif binSearch(prevActiveCells, synapse.presynapticCell) != -:permanence += permanenceIncrementelse:permanence -= permanenceDecrementpermanence = max(, min(, permanence))if permanence < EPSILON:synapsesToDestroy.append(synapse)else:connections.updateSynapsePermanence(synapse, permanence)for synapse in synapsesToDestroy:connections.destroySynapse(synapse)if connections.numSynapses(segment) == :connections.destroySegment(segment)", "docstring": "Updates synapses on segment.\nStrengthens active synapses; weakens inactive synapses.\n\n:param connections: (Object) Connections instance for the tm\n:param segment: (int) Segment to adapt\n:param prevActiveCells: (list) Active cells in `t-1`\n:param permanenceIncrement: (float) Amount to increment active synapses\n:param permanenceDecrement: (float) Amount to decrement inactive synapses", "id": "f17571:c0:m17"} {"signature": "def columnForCell(self, cell):", "body": "self._validateCell(cell)return int(cell / self.cellsPerColumn)", "docstring": "Returns the index of the column that a cell belongs to.\n\n:param cell: (int) Cell index\n\n:returns: (int) Column index", "id": "f17571:c0:m18"} {"signature": "def cellsForColumn(self, column):", "body": "self._validateColumn(column)start = self.cellsPerColumn * columnend = start + self.cellsPerColumnreturn range(start, end)", "docstring": "Returns the indices of cells that belong to a column.\n\n:param column: (int) Column index\n\n:returns: (list) Cell indices", "id": "f17571:c0:m19"} {"signature": "def numberOfColumns(self):", "body": "return reduce(mul, self.columnDimensions, )", "docstring": "Returns the number of columns in this layer.\n\n:returns: (int) Number of columns", "id": "f17571:c0:m20"} {"signature": "def numberOfCells(self):", "body": "return self.numberOfColumns() * self.cellsPerColumn", "docstring": "Returns the number of cells in this layer.\n\n:returns: (int) Number of cells", "id": "f17571:c0:m21"} {"signature": "def mapCellsToColumns(self, cells):", "body": "cellsForColumns = defaultdict(set)for cell in cells:column = self.columnForCell(cell)cellsForColumns[column].add(cell)return cellsForColumns", "docstring": "Maps cells to the columns they belong to.\n\n:param cells: (set) Cells\n\n:returns: (dict) Mapping from columns to their cells in `cells`", "id": "f17571:c0:m22"} {"signature": "def getActiveCells(self):", "body": "return self.getCellIndices(self.activeCells)", "docstring": "Returns the indices of the active cells.\n\n:returns: (list) Indices of active cells.", "id": "f17571:c0:m23"} {"signature": "def getPredictiveCells(self):", "body": "previousCell = NonepredictiveCells = []for segment in self.activeSegments:if segment.cell != previousCell:predictiveCells.append(segment.cell)previousCell = segment.cellreturn predictiveCells", "docstring": "Returns the indices of the predictive cells.\n\n :returns: (list) Indices of predictive cells.", "id": "f17571:c0:m24"} {"signature": "def getWinnerCells(self):", "body": "return self.getCellIndices(self.winnerCells)", "docstring": "Returns the indices of the winner cells.\n\n:returns: (list) Indices of winner cells.", "id": "f17571:c0:m25"} {"signature": "def getActiveSegments(self):", "body": "return self.activeSegments", "docstring": "Returns the active segments.\n\n:returns: (list) Active segments", "id": "f17571:c0:m26"} {"signature": "def getMatchingSegments(self):", "body": "return self.matchingSegments", "docstring": "Returns the matching segments.\n\n:returns: (list) Matching segments", "id": "f17571:c0:m27"} {"signature": "def getCellsPerColumn(self):", "body": "return self.cellsPerColumn", "docstring": "Returns the number of cells per column.\n\n:returns: (int) The number of cells per column.", "id": "f17571:c0:m28"} {"signature": "def getColumnDimensions(self):", "body": "return self.columnDimensions", "docstring": "Returns the dimensions of the columns in the region.\n\n:returns: (tuple) Column dimensions", "id": "f17571:c0:m29"} {"signature": "def getActivationThreshold(self):", "body": "return self.activationThreshold", "docstring": "Returns the activation threshold.\n\n:returns: (int) The activation threshold.", "id": "f17571:c0:m30"} {"signature": "def setActivationThreshold(self, activationThreshold):", "body": "self.activationThreshold = activationThreshold", "docstring": "Sets the activation threshold.\n\n:param activationThreshold: (int) activation threshold.", "id": "f17571:c0:m31"} {"signature": "def getInitialPermanence(self):", "body": "return self.initialPermanence", "docstring": "Get the initial permanence.\n\n:returns: (float) The initial permanence.", "id": "f17571:c0:m32"} {"signature": "def setInitialPermanence(self, initialPermanence):", "body": "self.initialPermanence = initialPermanence", "docstring": "Sets the initial permanence.\n\n:param initialPermanence: (float) The initial permanence.", "id": "f17571:c0:m33"} {"signature": "def getMinThreshold(self):", "body": "return self.minThreshold", "docstring": "Returns the min threshold.\n\n:returns: (int) The min threshold.", "id": "f17571:c0:m34"} {"signature": "def setMinThreshold(self, minThreshold):", "body": "self.minThreshold = minThreshold", "docstring": "Sets the min threshold.\n\n:param minThreshold: (int) min threshold.", "id": "f17571:c0:m35"} {"signature": "def getMaxNewSynapseCount(self):", "body": "return self.maxNewSynapseCount", "docstring": "Returns the max new synapse count.\n\n:returns: (int) The max new synapse count.", "id": "f17571:c0:m36"} {"signature": "def setMaxNewSynapseCount(self, maxNewSynapseCount):", "body": "self.maxNewSynapseCount = maxNewSynapseCount", "docstring": "Sets the max new synapse count.\n\n:param maxNewSynapseCount: (int) Max new synapse count.", "id": "f17571:c0:m37"} {"signature": "def getPermanenceIncrement(self):", "body": "return self.permanenceIncrement", "docstring": "Get the permanence increment.\n\n:returns: (float) The permanence increment.", "id": "f17571:c0:m38"} {"signature": "def setPermanenceIncrement(self, permanenceIncrement):", "body": "self.permanenceIncrement = permanenceIncrement", "docstring": "Sets the permanence increment.\n\n:param permanenceIncrement: (float) The permanence increment.", "id": "f17571:c0:m39"} {"signature": "def getPermanenceDecrement(self):", "body": "return self.permanenceDecrement", "docstring": "Get the permanence decrement.\n\n:returns: (float) The permanence decrement.", "id": "f17571:c0:m40"} {"signature": "def setPermanenceDecrement(self, permanenceDecrement):", "body": "self.permanenceDecrement = permanenceDecrement", "docstring": "Sets the permanence decrement.\n\n:param permanenceDecrement: (float) The permanence decrement.", "id": "f17571:c0:m41"} {"signature": "def getPredictedSegmentDecrement(self):", "body": "return self.predictedSegmentDecrement", "docstring": "Get the predicted segment decrement.\n\n:returns: (float) The predicted segment decrement.", "id": "f17571:c0:m42"} {"signature": "def setPredictedSegmentDecrement(self, predictedSegmentDecrement):", "body": "self.predictedSegmentDecrement = predictedSegmentDecrement", "docstring": "Sets the predicted segment decrement.\n\n:param predictedSegmentDecrement: (float) The predicted segment decrement.", "id": "f17571:c0:m43"} {"signature": "def getConnectedPermanence(self):", "body": "return self.connectedPermanence", "docstring": "Get the connected permanence.\n\n:returns: (float) The connected permanence.", "id": "f17571:c0:m44"} {"signature": "def setConnectedPermanence(self, connectedPermanence):", "body": "self.connectedPermanence = connectedPermanence", "docstring": "Sets the connected permanence.\n\n:param connectedPermanence: (float) The connected permanence.", "id": "f17571:c0:m45"} {"signature": "def getMaxSegmentsPerCell(self):", "body": "return self.maxSegmentsPerCell", "docstring": "Get the maximum number of segments per cell\n\n:returns: (int) max number of segments per cell", "id": "f17571:c0:m46"} {"signature": "def getMaxSynapsesPerSegment(self):", "body": "return self.maxSynapsesPerSegment", "docstring": "Get the maximum number of synapses per segment.\n\n:returns: (int) max number of synapses per segment", "id": "f17571:c0:m47"} {"signature": "def write(self, proto):", "body": "proto.columnDimensions = list(self.columnDimensions)proto.cellsPerColumn = self.cellsPerColumnproto.activationThreshold = self.activationThresholdproto.initialPermanence = round(self.initialPermanence, EPSILON_ROUND)proto.connectedPermanence = round(self.connectedPermanence, EPSILON_ROUND)proto.minThreshold = self.minThresholdproto.maxNewSynapseCount = self.maxNewSynapseCountproto.permanenceIncrement = round(self.permanenceIncrement, EPSILON_ROUND)proto.permanenceDecrement = round(self.permanenceDecrement, EPSILON_ROUND)proto.predictedSegmentDecrement = self.predictedSegmentDecrementproto.maxSegmentsPerCell = self.maxSegmentsPerCellproto.maxSynapsesPerSegment = self.maxSynapsesPerSegmentself.connections.write(proto.connections)self._random.write(proto.random)proto.activeCells = list(self.activeCells)proto.winnerCells = list(self.winnerCells)protoActiveSegments = proto.init(\"\", len(self.activeSegments))for i, segment in enumerate(self.activeSegments):protoActiveSegments[i].cell = segment.cellidx = self.connections.segmentsForCell(segment.cell).index(segment)protoActiveSegments[i].idxOnCell = idxprotoMatchingSegments = proto.init(\"\",len(self.matchingSegments))for i, segment in enumerate(self.matchingSegments):protoMatchingSegments[i].cell = segment.cellidx = self.connections.segmentsForCell(segment.cell).index(segment)protoMatchingSegments[i].idxOnCell = idxprotoNumActivePotential = proto.init(\"\",len(self.numActivePotentialSynapsesForSegment))for i, numActivePotentialSynapses in enumerate(self.numActivePotentialSynapsesForSegment):segment = self.connections.segmentForFlatIdx(i)if segment is not None:protoNumActivePotential[i].cell = segment.cellidx = self.connections.segmentsForCell(segment.cell).index(segment)protoNumActivePotential[i].idxOnCell = idxprotoNumActivePotential[i].number = numActivePotentialSynapsesproto.iteration = self.iterationprotoLastUsedIteration = proto.init(\"\",len(self.numActivePotentialSynapsesForSegment))for i, lastUsed in enumerate(self.lastUsedIterationForSegment):segment = self.connections.segmentForFlatIdx(i)if segment is not None:protoLastUsedIteration[i].cell = segment.cellidx = self.connections.segmentsForCell(segment.cell).index(segment)protoLastUsedIteration[i].idxOnCell = idxprotoLastUsedIteration[i].number = lastUsed", "docstring": "Writes serialized data to proto object.\n\n:param proto: (DynamicStructBuilder) Proto object", "id": "f17571:c0:m48"} {"signature": "@classmethoddef read(cls, proto):", "body": "tm = object.__new__(cls)tm.columnDimensions = tuple(proto.columnDimensions)tm.cellsPerColumn = int(proto.cellsPerColumn)tm.activationThreshold = int(proto.activationThreshold)tm.initialPermanence = round(proto.initialPermanence, EPSILON_ROUND)tm.connectedPermanence = round(proto.connectedPermanence, EPSILON_ROUND)tm.minThreshold = int(proto.minThreshold)tm.maxNewSynapseCount = int(proto.maxNewSynapseCount)tm.permanenceIncrement = round(proto.permanenceIncrement, EPSILON_ROUND)tm.permanenceDecrement = round(proto.permanenceDecrement, EPSILON_ROUND)tm.predictedSegmentDecrement = round(proto.predictedSegmentDecrement,EPSILON_ROUND)tm.maxSegmentsPerCell = int(proto.maxSegmentsPerCell)tm.maxSynapsesPerSegment = int(proto.maxSynapsesPerSegment)tm.connections = Connections.read(proto.connections)tm._random = Random()tm._random.read(proto.random)tm.activeCells = [int(x) for x in proto.activeCells]tm.winnerCells = [int(x) for x in proto.winnerCells]flatListLength = tm.connections.segmentFlatListLength()tm.numActiveConnectedSynapsesForSegment = [] * flatListLengthtm.numActivePotentialSynapsesForSegment = [] * flatListLengthtm.lastUsedIterationForSegment = [] * flatListLengthtm.activeSegments = []tm.matchingSegments = []for protoSegment in proto.activeSegments:tm.activeSegments.append(tm.connections.getSegment(protoSegment.cell,protoSegment.idxOnCell))for protoSegment in proto.matchingSegments:tm.matchingSegments.append(tm.connections.getSegment(protoSegment.cell,protoSegment.idxOnCell))for protoSegment in proto.numActivePotentialSynapsesForSegment:segment = tm.connections.getSegment(protoSegment.cell,protoSegment.idxOnCell)tm.numActivePotentialSynapsesForSegment[segment.flatIdx] = (int(protoSegment.number))tm.iteration = long(proto.iteration)for protoSegment in proto.lastUsedIterationForSegment:segment = tm.connections.getSegment(protoSegment.cell,protoSegment.idxOnCell)tm.lastUsedIterationForSegment[segment.flatIdx] = (long(protoSegment.number))return tm", "docstring": "Reads deserialized data from proto object.\n\n:param proto: (DynamicStructBuilder) Proto object\n\n:returns: (:class:TemporalMemory) TemporalMemory instance", "id": "f17571:c0:m50"} {"signature": "def __eq__(self, other):", "body": "if self.columnDimensions != other.columnDimensions:return Falseif self.cellsPerColumn != other.cellsPerColumn:return Falseif self.activationThreshold != other.activationThreshold:return Falseif abs(self.initialPermanence - other.initialPermanence) > EPSILON:return Falseif abs(self.connectedPermanence - other.connectedPermanence) > EPSILON:return Falseif self.minThreshold != other.minThreshold:return Falseif self.maxNewSynapseCount != other.maxNewSynapseCount:return Falseif abs(self.permanenceIncrement - other.permanenceIncrement) > EPSILON:return Falseif abs(self.permanenceDecrement - other.permanenceDecrement) > EPSILON:return Falseif abs(self.predictedSegmentDecrement -other.predictedSegmentDecrement) > EPSILON:return Falseif self.connections != other.connections:return Falseif self.activeCells != other.activeCells:return Falseif self.winnerCells != other.winnerCells:return Falseif self.matchingSegments != other.matchingSegments:return Falseif self.activeSegments != other.activeSegments:return Falsereturn True", "docstring": "Non-equality operator for TemporalMemory instances.\nChecks if two instances are functionally identical\n(might have different internal state).\n\n:param other: (TemporalMemory) TemporalMemory instance to compare to", "id": "f17571:c0:m51"} {"signature": "def __ne__(self, other):", "body": "return not self.__eq__(other)", "docstring": "Non-equality operator for TemporalMemory instances.\nChecks if two instances are not functionally identical\n(might have different internal state).\n\n:param other: (TemporalMemory) TemporalMemory instance to compare to", "id": "f17571:c0:m52"} {"signature": "def _validateColumn(self, column):", "body": "if column >= self.numberOfColumns() or column < :raise IndexError(\"\")", "docstring": "Raises an error if column index is invalid.\n\n:param column: (int) Column index", "id": "f17571:c0:m53"} {"signature": "def _validateCell(self, cell):", "body": "if cell >= self.numberOfCells() or cell < :raise IndexError(\"\")", "docstring": "Raises an error if cell index is invalid.\n\n:param cell: (int) Cell index", "id": "f17571:c0:m54"} {"signature": "@classmethoddef getCellIndices(cls, cells):", "body": "return [cls.getCellIndex(c) for c in cells]", "docstring": "Returns the indices of the cells passed in.\n\n:param cells: (list) cells to find the indices of", "id": "f17571:c0:m55"} {"signature": "@staticmethoddef getCellIndex(cell):", "body": "return cell", "docstring": "Returns the index of the cell.\n\n:param cell: (int) cell to find the index of", "id": "f17571:c0:m56"} {"signature": "def __init__(self, *args, **kwargs):", "body": "self.mmName = kwargs.get(\"\")if \"\" in kwargs:del kwargs[\"\"]super(MonitorMixinBase, self).__init__(*args, **kwargs)self._mmTraces = Noneself._mmData = Noneself.mmClearHistory()", "docstring": "Note: If you set the kwarg \"mmName\", then pretty-printing of traces and\n metrics will include the name you specify as a tag before every title.", "id": "f17572:c0:m0"} {"signature": "def mmClearHistory(self):", "body": "self._mmTraces = {}self._mmData = {}", "docstring": "Clears the stored history.", "id": "f17572:c0:m1"} {"signature": "@staticmethoddef mmPrettyPrintTraces(traces, breakOnResets=None):", "body": "assert len(traces) > , \"\"table = PrettyTable([\"\"] + [trace.prettyPrintTitle() for trace in traces])for i in xrange(len(traces[].data)):if breakOnResets and breakOnResets.data[i]:table.add_row([\"\"] * (len(traces) + ))table.add_row([i] +[trace.prettyPrintDatum(trace.data[i]) for trace in traces])return table.get_string().encode(\"\")", "docstring": "Returns pretty-printed table of traces.\n\n@param traces (list) Traces to print in table\n@param breakOnResets (BoolsTrace) Trace of resets to break table on\n\n@return (string) Pretty-printed table of traces.", "id": "f17572:c0:m2"} {"signature": "@staticmethoddef mmPrettyPrintMetrics(metrics, sigFigs=):", "body": "assert len(metrics) > , \"\"table = PrettyTable([\"\", \"\", \"\",\"\", \"\", \"\", ])for metric in metrics:table.add_row([metric.prettyPrintTitle()] + metric.getStats())return table.get_string().encode(\"\")", "docstring": "Returns pretty-printed table of metrics.\n\n@param metrics (list) Traces to print in table\n@param sigFigs (int) Number of significant figures to print\n\n@return (string) Pretty-printed table of metrics.", "id": "f17572:c0:m3"} {"signature": "def mmGetDefaultTraces(self, verbosity=):", "body": "return []", "docstring": "Returns list of default traces. (To be overridden.)\n\n@param verbosity (int) Verbosity level\n\n@return (list) Default traces", "id": "f17572:c0:m4"} {"signature": "def mmGetDefaultMetrics(self, verbosity=):", "body": "return []", "docstring": "Returns list of default metrics. (To be overridden.)\n\n@param verbosity (int) Verbosity level\n\n@return (list) Default metrics", "id": "f17572:c0:m5"} {"signature": "def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title=\"\",showReset=False, resetShading=):", "body": "plot = Plot(self, title)resetTrace = self.mmGetTraceResets().datadata = numpy.zeros((cellCount, ))for i in xrange(len(cellTrace)):if showReset and resetTrace[i]:activity = numpy.ones((cellCount, )) * resetShadingelse:activity = numpy.zeros((cellCount, ))activeIndices = cellTrace[i]activity[list(activeIndices)] = data = numpy.concatenate((data, activity), )plot.add2DArray(data, xlabel=\"\", ylabel=activityType, name=title)return plot", "docstring": "Returns plot of the cell activity. Note that if many timesteps of\nactivities are input, matplotlib's image interpolation may omit activities\n(columns in the image).\n\n@param cellTrace (list) a temporally ordered list of sets of cell\n activities\n\n@param cellCount (int) number of cells in the space being rendered\n\n@param activityType (string) type of cell activity being displayed\n\n@param title (string) an optional title for the figure\n\n@param showReset (bool) if true, the first set of cell activities\n after a reset will have a grayscale background\n\n@param resetShading (float) applicable if showReset is true, specifies the\n intensity of the reset background with 0.0\n being white and 1.0 being black\n\n@return (Plot) plot", "id": "f17572:c0:m6"} {"signature": "def __init__(self, monitor, title, show=True):", "body": "self._monitor = monitorself._title = titleself._fig = self._initFigure()self._show = showif self._show:plt.ion()plt.show()", "docstring": "@param monitor (MonitorMixinBase) Monitor Mixin instance that generated\n this plot\n\n@param title (string) Plot title", "id": "f17573:c0:m0"} {"signature": "def addGraph(self, data, position=, xlabel=None, ylabel=None):", "body": "ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)ax.plot(data)plt.draw()", "docstring": "Adds a graph to the plot's figure.\n\n @param data See matplotlib.Axes.plot documentation.\n @param position A 3-digit number. The first two digits define a 2D grid\n where subplots may be added. The final digit specifies the nth grid\n location for the added subplot\n @param xlabel text to be displayed on the x-axis\n @param ylabel text to be displayed on the y-axis", "id": "f17573:c0:m3"} {"signature": "def addHistogram(self, data, position=, xlabel=None, ylabel=None,bins=None):", "body": "ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)ax.hist(data, bins=bins, color=\"\", alpha=)plt.draw()", "docstring": "Adds a histogram to the plot's figure.\n\n @param data See matplotlib.Axes.hist documentation.\n @param position A 3-digit number. The first two digits define a 2D grid\n where subplots may be added. The final digit specifies the nth grid\n location for the added subplot\n @param xlabel text to be displayed on the x-axis\n @param ylabel text to be displayed on the y-axis", "id": "f17573:c0:m4"} {"signature": "def add2DArray(self, data, position=, xlabel=None, ylabel=None, cmap=None,aspect=\"\", interpolation=\"\", name=None):", "body": "if cmap is None:cmap = cm.Greysax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)if self._show:plt.draw()if name is not None:if not os.path.exists(\"\"):os.mkdir(\"\")plt.savefig(\"\".format(name=name), bbox_inches=\"\",figsize=(, ), dpi=)", "docstring": "Adds an image to the plot's figure.\n\n @param data a 2D array. See matplotlib.Axes.imshow documentation.\n @param position A 3-digit number. The first two digits define a 2D grid\n where subplots may be added. The final digit specifies the nth grid\n location for the added subplot\n @param xlabel text to be displayed on the x-axis\n @param ylabel text to be displayed on the y-axis\n @param cmap color map used in the rendering\n @param aspect how aspect ratio is handled during resize\n @param interpolation interpolation method", "id": "f17573:c0:m5"} {"signature": "def _addBase(self, position, xlabel=None, ylabel=None):", "body": "ax = self._fig.add_subplot(position)ax.set_xlabel(xlabel)ax.set_ylabel(ylabel)return ax", "docstring": "Adds a subplot to the plot's figure at specified position.\n\n @param position A 3-digit number. The first two digits define a 2D grid\n where subplots may be added. The final digit specifies the nth grid\n location for the added subplot\n @param xlabel text to be displayed on the x-axis\n @param ylabel text to be displayed on the y-axis\n @returns (matplotlib.Axes) Axes instance", "id": "f17573:c0:m6"} {"signature": "def mmGetTraceActiveColumns(self):", "body": "return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of active columns", "id": "f17574:c0:m1"} {"signature": "def mmGetTracePredictiveCells(self):", "body": "return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of predictive cells", "id": "f17574:c0:m2"} {"signature": "def mmGetTraceNumSegments(self):", "body": "return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of # segments", "id": "f17574:c0:m3"} {"signature": "def mmGetTraceNumSynapses(self):", "body": "return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of # synapses", "id": "f17574:c0:m4"} {"signature": "def mmGetTraceSequenceLabels(self):", "body": "return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of sequence labels", "id": "f17574:c0:m5"} {"signature": "def mmGetTraceResets(self):", "body": "return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of resets", "id": "f17574:c0:m6"} {"signature": "def mmGetTracePredictedActiveCells(self):", "body": "self._mmComputeTransitionTraces()return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of predicted => active cells", "id": "f17574:c0:m7"} {"signature": "def mmGetTracePredictedInactiveCells(self):", "body": "self._mmComputeTransitionTraces()return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of predicted => inactive cells", "id": "f17574:c0:m8"} {"signature": "def mmGetTracePredictedActiveColumns(self):", "body": "self._mmComputeTransitionTraces()return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of predicted => active columns", "id": "f17574:c0:m9"} {"signature": "def mmGetTracePredictedInactiveColumns(self):", "body": "self._mmComputeTransitionTraces()return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of predicted => inactive columns", "id": "f17574:c0:m10"} {"signature": "def mmGetTraceUnpredictedActiveColumns(self):", "body": "self._mmComputeTransitionTraces()return self._mmTraces[\"\"]", "docstring": "@return (Trace) Trace of unpredicted => active columns", "id": "f17574:c0:m11"} {"signature": "def mmGetMetricFromTrace(self, trace):", "body": "return Metric.createFromTrace(trace.makeCountsTrace(),excludeResets=self.mmGetTraceResets())", "docstring": "Convenience method to compute a metric over an indices trace, excluding\nresets.\n\n@param (IndicesTrace) Trace of indices\n\n@return (Metric) Metric over trace excluding resets", "id": "f17574:c0:m12"} {"signature": "def mmGetMetricSequencesPredictedActiveCellsPerColumn(self):", "body": "self._mmComputeTransitionTraces()numCellsPerColumn = []for predictedActiveCells in (self._mmData[\"\"].values()):cellsForColumn = self.mapCellsToColumns(predictedActiveCells)numCellsPerColumn += [len(x) for x in cellsForColumn.values()]return Metric(self,\"\",numCellsPerColumn)", "docstring": "Metric for number of predicted => active cells per column for each sequence\n\n@return (Metric) metric", "id": "f17574:c0:m13"} {"signature": "def mmGetMetricSequencesPredictedActiveCellsShared(self):", "body": "self._mmComputeTransitionTraces()numSequencesForCell = defaultdict(lambda: )for predictedActiveCells in (self._mmData[\"\"].values()):for cell in predictedActiveCells:numSequencesForCell[cell] += return Metric(self,\"\",numSequencesForCell.values())", "docstring": "Metric for number of sequences each predicted => active cell appears in\n\nNote: This metric is flawed when it comes to high-order sequences.\n\n@return (Metric) metric", "id": "f17574:c0:m14"} {"signature": "def mmPrettyPrintConnections(self):", "body": "text = \"\"text += (\"\"\"\")text += \"\"columns = range(self.numberOfColumns())for column in columns:cells = self.cellsForColumn(column)for cell in cells:segmentDict = dict()for seg in self.connections.segmentsForCell(cell):synapseList = []for synapse in self.connections.synapsesForSegment(seg):synapseData = self.connections.dataForSynapse(synapse)synapseList.append((synapseData.presynapticCell, synapseData.permanence))synapseList.sort()synapseStringList = [\"\".format(sourceCell, permanence) forsourceCell, permanence in synapseList]segmentDict[seg] = \"\".format(\"\".join(synapseStringList))text += (\"\".format(column, cell,len(segmentDict.values()),\"\".format(\"\".join(segmentDict.values()))))if column < len(columns) - : text += \"\"text += \"\"return text", "docstring": "Pretty print the connections in the temporal memory.\n\nTODO: Use PrettyTable.\n\n@return (string) Pretty-printed text", "id": "f17574:c0:m15"} {"signature": "def mmPrettyPrintSequenceCellRepresentations(self, sortby=\"\"):", "body": "self._mmComputeTransitionTraces()table = PrettyTable([\"\", \"\", \"\"])for sequenceLabel, predictedActiveCells in (self._mmData[\"\"].iteritems()):cellsForColumn = self.mapCellsToColumns(predictedActiveCells)for column, cells in cellsForColumn.iteritems():table.add_row([sequenceLabel, column, list(cells)])return table.get_string(sortby=sortby).encode(\"\")", "docstring": "Pretty print the cell representations for sequences in the history.\n\n@param sortby (string) Column of table to sort by\n\n@return (string) Pretty-printed text", "id": "f17574:c0:m16"} {"signature": "def _mmComputeTransitionTraces(self):", "body": "if not self._mmTransitionTracesStale:returnself._mmData[\"\"] = defaultdict(set)self._mmTraces[\"\"] = IndicesTrace(self,\"\")self._mmTraces[\"\"] = IndicesTrace(self,\"\")self._mmTraces[\"\"] = IndicesTrace(self,\"\")self._mmTraces[\"\"] = IndicesTrace(self,\"\")self._mmTraces[\"\"] = IndicesTrace(self,\"\")predictedCellsTrace = self._mmTraces[\"\"]for i, activeColumns in enumerate(self.mmGetTraceActiveColumns().data):predictedActiveCells = set()predictedInactiveCells = set()predictedActiveColumns = set()predictedInactiveColumns = set()for predictedCell in predictedCellsTrace.data[i]:predictedColumn = self.columnForCell(predictedCell)if predictedColumn in activeColumns:predictedActiveCells.add(predictedCell)predictedActiveColumns.add(predictedColumn)sequenceLabel = self.mmGetTraceSequenceLabels().data[i]if sequenceLabel is not None:self._mmData[\"\"][sequenceLabel].add(predictedCell)else:predictedInactiveCells.add(predictedCell)predictedInactiveColumns.add(predictedColumn)unpredictedActiveColumns = activeColumns - predictedActiveColumnsself._mmTraces[\"\"].data.append(predictedActiveCells)self._mmTraces[\"\"].data.append(predictedInactiveCells)self._mmTraces[\"\"].data.append(predictedActiveColumns)self._mmTraces[\"\"].data.append(predictedInactiveColumns)self._mmTraces[\"\"].data.append(unpredictedActiveColumns)self._mmTransitionTracesStale = False", "docstring": "Computes the transition traces, if necessary.\n\nTransition traces are the following:\n\n predicted => active cells\n predicted => inactive cells\n predicted => active columns\n predicted => inactive columns\n unpredicted => active columns", "id": "f17574:c0:m17"} {"signature": "def mmGetCellActivityPlot(self, title=\"\", showReset=False,resetShading=, activityType=\"\"):", "body": "if activityType == \"\":self._mmComputeTransitionTraces()cellTrace = copy.deepcopy(self._mmTraces[activityType].data)for i in xrange(len(cellTrace)):cellTrace[i] = self.getCellIndices(cellTrace[i])return self.mmGetCellTracePlot(cellTrace, self.numberOfCells(),activityType, title, showReset,resetShading)", "docstring": "Returns plot of the cell activity.\n\n@param title (string) an optional title for the figure\n\n@param showReset (bool) if true, the first set of cell activities\n after a reset will have a gray background\n\n@param resetShading (float) if showReset is true, this float specifies the\n intensity of the reset background with 0.0\n being white and 1.0 being black\n\n@param activityType (string) The type of cell activity to display. Valid\n types include \"activeCells\",\n \"predictiveCells\", \"predictedCells\",\n and \"predictedActiveCells\"\n\n@return (Plot) plot", "id": "f17574:c0:m23"} {"signature": "def __init__(self, monitor, title, data):", "body": "self.monitor = monitorself.title = titleself.min = Noneself.max = Noneself.sum = Noneself.mean = Noneself.standardDeviation = Noneself._computeStats(data)", "docstring": "@param monitor (MonitorMixinBase) Monitor Mixin instance that generated\n this trace\n@param title (string) Title\n@param data (list) List of numbers to compute metric from", "id": "f17575:c0:m0"} {"signature": "def __init__(self, monitor, title):", "body": "self.monitor = monitorself.title = titleself.data = []", "docstring": "@param monitor (MonitorMixinBase) Monitor Mixin instance that generated\n this trace\n@param title (string) Title", "id": "f17576:c0:m0"} {"signature": "@staticmethoddef prettyPrintDatum(datum):", "body": "return str(datum) if datum is not None else \"\"", "docstring": "@param datum (object) Datum from `self.data` to pretty-print\n\n@return (string) Pretty-printed datum", "id": "f17576:c0:m2"} {"signature": "def makeCountsTrace(self):", "body": "trace = CountsTrace(self.monitor, \"\".format(self.title))trace.data = [len(indices) for indices in self.data]return trace", "docstring": "@return (CountsTrace) A new Trace made up of counts of this trace's indices.", "id": "f17576:c1:m0"} {"signature": "def makeCumCountsTrace(self):", "body": "trace = CountsTrace(self.monitor, \"\".format(self.title))countsTrace = self.makeCountsTrace()def accumulate(iterator):total = for item in iterator:total += itemyield totaltrace.data = list(accumulate(countsTrace.data))return trace", "docstring": "@return (CountsTrace) A new Trace made up of cumulative counts of this\ntrace's indices.", "id": "f17576:c1:m1"} {"signature": "def GET(self):", "body": "global g_modelsreturn json.dumps({\"\": g_models.keys()})", "docstring": "/models\n\nreturns:\n[model1, model2, model3, ...] list of model names", "id": "f17578:c0:m0"} {"signature": "def POST(self, name):", "body": "global g_modelsdata = json.loads(web.data())modelParams = data[\"\"]predictedFieldName = data[\"\"]if name in g_models.keys():raise web.badrequest(\"\" % name)model = ModelFactory.create(modelParams)model.enableInference({'': predictedFieldName})g_models[name] = modelreturn json.dumps({\"\": name})", "docstring": "/models/{name}\n\nschema:\n{\n \"modelParams\": dict containing model parameters\n \"predictedFieldName\": str\n}\n\nreturns:\n{\"success\":name}", "id": "f17578:c0:m1"} {"signature": "def POST(self, name):", "body": "global g_modelsdata = json.loads(web.data())data[\"\"] = datetime.datetime.strptime(data[\"\"], \"\")if name not in g_models.keys():raise web.notfound(\"\" % name)modelResult = g_models[name].run(data)predictionNumber = modelResult.predictionNumberanomalyScore = modelResult.inferences[\"\"]return json.dumps({\"\": predictionNumber,\"\": anomalyScore})", "docstring": "/models/{name}/run\n\nschema:\n {\n predictedFieldName: value\n timestamp: %m/%d/%y %H:%M\n }\n NOTE: predictedFieldName MUST be the same name specified when\n creating the model.\n\nreturns:\n{\n \"predictionNumber\":,\n \"anomalyScore\":anomalyScore\n}", "id": "f17578:c1:m0"} {"signature": "def __init__(self, hsObj):", "body": "self._hsObj = hsObjself._allResults = []self._errModels = set()self._numErrModels = self._completedModels = set()self._numCompletedModels = self._modelIDToIdx = dict()self._bestResult = numpy.infself._bestModelID = Noneself._swarmBestOverall = dict()self._swarmNumParticlesPerGeneration = dict()self._modifiedSwarmGens = set()self._maturedSwarmGens = set()self._particleBest = dict()self._particleLatestGenIdx = dict()self._swarmIdToIndexes = dict()self._paramsHashToIndexes = dict()", "docstring": "Instantiate our results database\n\n Parameters:\n --------------------------------------------------------------------\n hsObj: Reference to the HypersearchV2 instance", "id": "f17579:c0:m0"} {"signature": "def update(self, modelID, modelParams, modelParamsHash, metricResult,completed, completionReason, matured, numRecords):", "body": "assert (modelParamsHash is not None)if completed:matured = Trueif metricResult is not None and matured andcompletionReason in [ClientJobsDAO.CMPL_REASON_EOF,ClientJobsDAO.CMPL_REASON_STOPPED]:if self._hsObj._maximize:errScore = - * metricResultelse:errScore = metricResultif errScore < self._bestResult:self._bestResult = errScoreself._bestModelID = modelIDself._hsObj.logger.info(\"\"\"\" % (len(self._allResults), self._bestResult,self._bestModelID))else:errScore = numpy.infif completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:errScore = numpy.infhidden = Trueelse:hidden = Falseif completed:self._completedModels.add(modelID)self._numCompletedModels = len(self._completedModels)if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:self._errModels.add(modelID)self._numErrModels = len(self._errModels)wasHidden = Falseif modelID not in self._modelIDToIdx:assert (modelParams is not None)entry = dict(modelID=modelID, modelParams=modelParams,modelParamsHash=modelParamsHash,errScore=errScore, completed=completed,matured=matured, numRecords=numRecords, hidden=hidden)self._allResults.append(entry)entryIdx = len(self._allResults) - self._modelIDToIdx[modelID] = entryIdxself._paramsHashToIndexes[modelParamsHash] = entryIdxswarmId = modelParams['']['']if not hidden:if swarmId in self._swarmIdToIndexes:self._swarmIdToIndexes[swarmId].append(entryIdx)else:self._swarmIdToIndexes[swarmId] = [entryIdx]genIdx = modelParams['']['']numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [])while genIdx >= len(numPsEntry):numPsEntry.append()numPsEntry[genIdx] += self._swarmNumParticlesPerGeneration[swarmId] = numPsEntryelse:entryIdx = self._modelIDToIdx.get(modelID, None)assert (entryIdx is not None)entry = self._allResults[entryIdx]wasHidden = entry['']if entry[''] != modelParamsHash:self._paramsHashToIndexes.pop(entry[''])self._paramsHashToIndexes[modelParamsHash] = entryIdxentry[''] = modelParamsHashmodelParams = entry['']swarmId = modelParams['']['']genIdx = modelParams['']['']if hidden and not wasHidden:assert (entryIdx in self._swarmIdToIndexes[swarmId])self._swarmIdToIndexes[swarmId].remove(entryIdx)self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= entry[''] = errScoreentry[''] = completedentry[''] = maturedentry[''] = numRecordsentry[''] = hiddenparticleId = modelParams['']['']genIdx = modelParams['']['']if matured and not hidden:(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))if errScore < oldResult:pos = Particle.getPositionFromState(modelParams[''])self._particleBest[particleId] = (errScore, pos)prevGenIdx = self._particleLatestGenIdx.get(particleId, -)if not hidden and genIdx > prevGenIdx:self._particleLatestGenIdx[particleId] = genIdxelif hidden and not wasHidden and genIdx == prevGenIdx:self._particleLatestGenIdx[particleId] = genIdx-if not hidden:swarmId = modelParams['']['']if not swarmId in self._swarmBestOverall:self._swarmBestOverall[swarmId] = []bestScores = self._swarmBestOverall[swarmId]while genIdx >= len(bestScores):bestScores.append((None, numpy.inf))if errScore < bestScores[genIdx][]:bestScores[genIdx] = (modelID, errScore)if not hidden:key = (swarmId, genIdx)if not key in self._maturedSwarmGens:self._modifiedSwarmGens.add(key)return errScore", "docstring": "Insert a new entry or update an existing one. If this is an update\n of an existing entry, then modelParams will be None\n\n Parameters:\n --------------------------------------------------------------------\n modelID: globally unique modelID of this model\n modelParams: params dict for this model, or None if this is just an update\n of a model that it already previously reported on.\n\n See the comments for the createModels() method for\n a description of this dict.\n\n modelParamsHash: hash of the modelParams dict, generated by the worker\n that put it into the model database.\n metricResult: value on the optimizeMetric for this model.\n May be None if we have no results yet.\n completed: True if the model has completed evaluation, False if it\n is still running (and these are online results)\n completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates\n matured: True if this model has matured\n numRecords: Number of records that have been processed so far by this\n model.\n\n retval: Canonicalized result on the optimize metric", "id": "f17579:c0:m1"} {"signature": "def getNumErrModels(self):", "body": "return self._numErrModels", "docstring": "Return number of models that completed with errors.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: # if models", "id": "f17579:c0:m2"} {"signature": "def getErrModelIds(self):", "body": "return list(self._errModels)", "docstring": "Return list of models IDs that completed with errors.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: # if models", "id": "f17579:c0:m3"} {"signature": "def getNumCompletedModels(self):", "body": "return self._numCompletedModels", "docstring": "Return total number of models that completed.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: # if models that completed", "id": "f17579:c0:m4"} {"signature": "def getModelIDFromParamsHash(self, paramsHash):", "body": "entryIdx = self. _paramsHashToIndexes.get(paramsHash, None)if entryIdx is not None:return self._allResults[entryIdx]['']else:return None", "docstring": "Return the modelID of the model with the given paramsHash, or\n None if not found.\n\n Parameters:\n ---------------------------------------------------------------------\n paramsHash: paramsHash to look for\n retval: modelId, or None if not found", "id": "f17579:c0:m5"} {"signature": "def numModels(self, swarmId=None, includeHidden=False):", "body": "if includeHidden:if swarmId is None:return len(self._allResults)else:return len(self._swarmIdToIndexes.get(swarmId, []))else:if swarmId is None:entries = self._allResultselse:entries = [self._allResults[entryIdx]for entryIdx in self._swarmIdToIndexes.get(swarmId,[])]return len([entry for entry in entries if not entry['']])", "docstring": "Return the total # of models we have in our database (if swarmId is\n None) or in a specific swarm.\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: A string representation of the sorted list of encoders\n in this swarm. For example '__address_encoder.__gym_encoder'\n includeHidden: If False, this will only return the number of models\n that are not hidden (i.e. orphanned, etc.)\n retval: numModels", "id": "f17579:c0:m6"} {"signature": "def bestModelIdAndErrScore(self, swarmId=None, genIdx=None):", "body": "if swarmId is None:return (self._bestModelID, self._bestResult)else:if swarmId not in self._swarmBestOverall:return (None, numpy.inf)genScores = self._swarmBestOverall[swarmId]bestModelId = NonebestScore = numpy.inffor (i, (modelId, errScore)) in enumerate(genScores):if genIdx is not None and i > genIdx:breakif errScore < bestScore:bestScore = errScorebestModelId = modelIdreturn (bestModelId, bestScore)", "docstring": "Return the model ID of the model with the best result so far and\n it's score on the optimize metric. If swarm is None, then it returns\n the global best, otherwise it returns the best for the given swarm\n for all generatons up to and including genIdx.\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: A string representation of the sorted list of encoders in this\n swarm. For example '__address_encoder.__gym_encoder'\n genIdx: consider the best in all generations up to and including this\n generation if not None.\n retval: (modelID, result)", "id": "f17579:c0:m7"} {"signature": "def getParticleInfo(self, modelId):", "body": "entry = self._allResults[self._modelIDToIdx[modelId]]return (entry[''][''], modelId, entry[''],entry[''], entry[''])", "docstring": "Return particle info for a specific modelId.\n\n Parameters:\n ---------------------------------------------------------------------\n modelId: which model Id\n\n retval: (particleState, modelId, errScore, completed, matured)", "id": "f17579:c0:m8"} {"signature": "def getParticleInfos(self, swarmId=None, genIdx=None, completed=None,matured=None, lastDescendent=False):", "body": "if swarmId is not None:entryIdxs = self._swarmIdToIndexes.get(swarmId, [])else:entryIdxs = list(range(len(self._allResults)))if len(entryIdxs) == :return ([], [], [], [], [])particleStates = []modelIds = []errScores = []completedFlags = []maturedFlags = []for idx in entryIdxs:entry = self._allResults[idx]if swarmId is not None:assert (not entry[''])modelParams = entry['']isCompleted = entry['']isMatured = entry['']particleState = modelParams['']particleGenIdx = particleState['']particleId = particleState['']if genIdx is not None and particleGenIdx != genIdx:continueif completed is not None and (completed != isCompleted):continueif matured is not None and (matured != isMatured):continueif lastDescendentand (self._particleLatestGenIdx[particleId] != particleGenIdx):continueparticleStates.append(particleState)modelIds.append(entry[''])errScores.append(entry[''])completedFlags.append(isCompleted)maturedFlags.append(isMatured)return (particleStates, modelIds, errScores, completedFlags, maturedFlags)", "docstring": "Return a list of particleStates for all particles we know about in\n the given swarm, their model Ids, and metric results.\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: A string representation of the sorted list of encoders in this\n swarm. For example '__address_encoder.__gym_encoder'\n\n genIdx: If not None, only return particles at this specific generation\n index.\n\n completed: If not None, only return particles of the given state (either\n completed if 'completed' is True, or running if 'completed'\n is false\n\n matured: If not None, only return particles of the given state (either\n matured if 'matured' is True, or not matured if 'matured'\n is false. Note that any model which has completed is also\n considered matured.\n\n lastDescendent: If True, only return particles that are the last descendent,\n that is, the highest generation index for a given particle Id\n\n retval: (particleStates, modelIds, errScores, completed, matured)\n particleStates: list of particleStates\n modelIds: list of modelIds\n errScores: list of errScores, numpy.inf is plugged in\n if we don't have a result yet\n completed: list of completed booleans\n matured: list of matured booleans", "id": "f17579:c0:m9"} {"signature": "def getOrphanParticleInfos(self, swarmId, genIdx):", "body": "entryIdxs = list(range(len(self._allResults)))if len(entryIdxs) == :return ([], [], [], [], [])particleStates = []modelIds = []errScores = []completedFlags = []maturedFlags = []for idx in entryIdxs:entry = self._allResults[idx]if not entry['']:continuemodelParams = entry['']if modelParams[''][''] != swarmId:continueisCompleted = entry['']isMatured = entry['']particleState = modelParams['']particleGenIdx = particleState['']particleId = particleState['']if genIdx is not None and particleGenIdx != genIdx:continueparticleStates.append(particleState)modelIds.append(entry[''])errScores.append(entry[''])completedFlags.append(isCompleted)maturedFlags.append(isMatured)return (particleStates, modelIds, errScores, completedFlags, maturedFlags)", "docstring": "Return a list of particleStates for all particles in the given\n swarm generation that have been orphaned.\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: A string representation of the sorted list of encoders in this\n swarm. For example '__address_encoder.__gym_encoder'\n\n genIdx: If not None, only return particles at this specific generation\n index.\n\n retval: (particleStates, modelIds, errScores, completed, matured)\n particleStates: list of particleStates\n modelIds: list of modelIds\n errScores: list of errScores, numpy.inf is plugged in\n if we don't have a result yet\n completed: list of completed booleans\n matured: list of matured booleans", "id": "f17579:c0:m10"} {"signature": "def getMaturedSwarmGenerations(self):", "body": "result = []modifiedSwarmGens = sorted(self._modifiedSwarmGens)for key in modifiedSwarmGens:(swarmId, genIdx) = keyif key in self._maturedSwarmGens:self._modifiedSwarmGens.remove(key)continueif (genIdx >= ) and not (swarmId, genIdx-) in self._maturedSwarmGens:continue(_, _, errScores, completedFlags, maturedFlags) =self.getParticleInfos(swarmId, genIdx)maturedFlags = numpy.array(maturedFlags)numMatured = maturedFlags.sum()if numMatured >= self._hsObj._minParticlesPerSwarmand numMatured == len(maturedFlags):errScores = numpy.array(errScores)bestScore = errScores.min()self._maturedSwarmGens.add(key)self._modifiedSwarmGens.remove(key)result.append((swarmId, genIdx, bestScore))return result", "docstring": "Return a list of swarm generations that have completed and the\n best (minimal) errScore seen for each of them.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: list of tuples. Each tuple is of the form:\n (swarmId, genIdx, bestErrScore)", "id": "f17579:c0:m11"} {"signature": "def firstNonFullGeneration(self, swarmId, minNumParticles):", "body": "if not swarmId in self._swarmNumParticlesPerGeneration:return NonenumPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]numPsPerGen = numpy.array(numPsPerGen)firstNonFull = numpy.where(numPsPerGen < minNumParticles)[]if len(firstNonFull) == :return len(numPsPerGen)else:return firstNonFull[]", "docstring": "Return the generation index of the first generation in the given\n swarm that does not have numParticles particles in it, either still in the\n running state or completed. This does not include orphaned particles.\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: A string representation of the sorted list of encoders in this\n swarm. For example '__address_encoder.__gym_encoder'\n minNumParticles: minium number of partices required for a full\n generation.\n\n retval: generation index, or None if no particles at all.", "id": "f17579:c0:m12"} {"signature": "def highestGeneration(self, swarmId):", "body": "numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]return len(numPsPerGen)-", "docstring": "Return the generation index of the highest generation in the given\n swarm.\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: A string representation of the sorted list of encoders in this\n swarm. For example '__address_encoder.__gym_encoder'\n retval: generation index", "id": "f17579:c0:m13"} {"signature": "def getParticleBest(self, particleId):", "body": "return self._particleBest.get(particleId, (None, None))", "docstring": "Return the best score and position for a given particle. The position\n is given as a dict, with varName:varPosition items in it.\n\n Parameters:\n ---------------------------------------------------------------------\n particleId: which particle\n retval: (bestResult, bestPosition)", "id": "f17579:c0:m14"} {"signature": "def getResultsPerChoice(self, swarmId, maxGenIdx, varName):", "body": "results = dict()(allParticles, _, resultErrs, _, _) = self.getParticleInfos(swarmId,genIdx=None, matured=True)for particleState, resultErr in zip(allParticles, resultErrs):if maxGenIdx is not None:if particleState[''] > maxGenIdx:continueif resultErr == numpy.inf:continueposition = Particle.getPositionFromState(particleState)varPosition = position[varName]varPositionStr = str(varPosition)if varPositionStr in results:results[varPositionStr][].append(resultErr)else:results[varPositionStr] = (varPosition, [resultErr])return results", "docstring": "Return a dict of the errors obtained on models that were run with\n each value from a PermuteChoice variable.\n\n For example, if a PermuteChoice variable has the following choices:\n ['a', 'b', 'c']\n\n The dict will have 3 elements. The keys are the stringified choiceVars,\n and each value is tuple containing (choiceVar, errors) where choiceVar is\n the original form of the choiceVar (before stringification) and errors is\n the list of errors received from models that used the specific choice:\n retval:\n ['a':('a', [0.1, 0.2, 0.3]), 'b':('b', [0.5, 0.1, 0.6]), 'c':('c', [])]\n\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: swarm Id of the swarm to retrieve info from\n maxGenIdx: max generation index to consider from other models, ignored\n if None\n varName: which variable to retrieve\n\n retval: list of the errors obtained from each choice.", "id": "f17579:c0:m15"} {"signature": "def __init__(self, searchParams, workerID=None, cjDAO=None, jobID=None,logLevel=None):", "body": "self.logger = logging.getLogger(\"\".join( ['',self.__class__.__module__, self.__class__.__name__]))if logLevel is not None:self.logger.setLevel(logLevel)random.seed()self._searchParams = searchParamsself._workerID = workerIDself._cjDAO = cjDAOself._jobID = jobIDself.logger.info(\"\" % (pprint.pformat(clippedObj(searchParams))))self._createCheckpoints = self._searchParams.get('',False)self._maxModels = self._searchParams.get('', None)if self._maxModels == -:self._maxModels = Noneself._predictionCacheMaxRecords = self._searchParams.get('', None)self._speculativeParticles = self._searchParams.get('',bool(int(Configuration.get(''))))self._speculativeWaitSecondsMax = float(Configuration.get(''))self._maxBranching= int(Configuration.get(''))self._minFieldContribution= float(Configuration.get(''))self._jobCancelled = Falseif '' in self._searchParams:useTerminators = self._searchParams['']useTerminators = str(int(useTerminators))Configuration.set('', useTerminators)Configuration.set('', useTerminators)Configuration.set('', useTerminators)if '' in os.environ:self._maxModels = int(os.environ[''])self._dummyModel = self._searchParams.get('', None)self._tempDir = Nonetry:if '' in self._searchParams:if ('' in self._searchParams or'' in self._searchParams or'' in self._searchParams):raise RuntimeError(\"\"\"\"\"\")searchParamObj = self._searchParamsanomalyParams = searchParamObj[''].get('',dict())if anomalyParams is None:anomalyParams = dict()if (('' not in anomalyParams) or(anomalyParams[''] is None)):streamDef = self._getStreamDef(searchParamObj[''])from nupic.data.stream_reader import StreamReadertry:streamReader = StreamReader(streamDef, isBlocking=False,maxTimeout=, eofOnTimeout=True)anomalyParams[''] =streamReader.getDataRowCount()except Exception:anomalyParams[''] = Noneself._searchParams[''][''] = anomalyParamsoutDir = self._tempDir = tempfile.mkdtemp()expGenerator(['' % (json.dumps(self._searchParams[''])),'','' % (outDir)])permutationsScript = os.path.join(outDir, '')elif '' in self._searchParams:if ('' in self._searchParams or'' in self._searchParams or'' in self._searchParams):raise RuntimeError(\"\"\"\"\"\")permutationsScript = self._searchParams['']elif '' in self._searchParams:if ('' in self._searchParams or'' in self._searchParams):raise RuntimeError(\"\"\"\"\"\")assert ('' in self._searchParams)outDir = self._tempDir = tempfile.mkdtemp()permutationsScript = os.path.join(outDir, '')fd = open(permutationsScript, '')fd.write(self._searchParams[''])fd.close()fd = open(os.path.join(outDir, ''), '')fd.write(self._searchParams[''])fd.close()else:raise RuntimeError (\"\"\"\")self._basePath = os.path.dirname(permutationsScript)self._baseDescription = open(os.path.join(self._basePath,'')).read()self._baseDescriptionHash = hashlib.md5(self._baseDescription).digest()modelDescription, _ = helpers.loadExperiment(self._basePath)self._readPermutationsFile(permutationsScript, modelDescription)if self._cjDAO is not None:updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,fieldName='',curValue=None,newValue = self._baseDescription)if updated:permContents = open(permutationsScript).read()self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,fieldName='',curValue=None,newValue = permContents)if self._dummyModelParamsFunc is not None:if self._dummyModel is None:self._dummyModel = dict()if self.logger.getEffectiveLevel() <= logging.DEBUG:msg = io.StringIO()print(\"\", file=msg)info = dict()for key in ['', '','', '','', '', '']:info[key] = getattr(self, key)print(pprint.pformat(info), file=msg)self.logger.debug(msg.getvalue())msg.close()self._resultsDB = ResultsDB(self)self._swarmTerminator = SwarmTerminator()self._hsState = Noneself._maxUniqueModelAttempts = int(Configuration.get(''))self._modelOrphanIntervalSecs = float(Configuration.get(''))self._maxPctErrModels = float(Configuration.get(''))except:if self._tempDir is not None:shutil.rmtree(self._tempDir)self._tempDir = Noneraisereturn", "docstring": "Instantiate the HyperseachV2 instance.\n\n Parameters:\n ----------------------------------------------------------------------\n searchParams: a dict of the job's search parameters. The format is:\n\n persistentJobGUID: REQUIRED.\n Persistent, globally-unique identifier for this job\n for use in constructing persistent model checkpoint\n keys. MUST be compatible with S3 key-naming rules, but\n MUST NOT contain forward slashes. This GUID is\n expected to retain its global uniqueness across\n clusters and cluster software updates (unlike the\n record IDs in the Engine's jobs table, which recycle\n upon table schema change and software update). In the\n future, this may also be instrumental for checkpoint\n garbage collection.\n\n permutationsPyFilename:\n OPTIONAL - path to permutations.py file\n permutationsPyContents:\n OPTIONAL - JSON encoded string with\n contents of permutations.py file\n descriptionPyContents:\n OPTIONAL - JSON encoded string with\n contents of base description.py file\n description: OPTIONAL - JSON description of the search\n createCheckpoints: OPTIONAL - Whether to create checkpoints\n useTerminators OPTIONAL - True of False (default config.xml). When set\n to False, the model and swarm terminators\n are disabled\n maxModels: OPTIONAL - max # of models to generate\n NOTE: This is a deprecated location for this\n setting. Now, it should be specified through\n the maxModels variable within the permutations\n file, or maxModels in the JSON description\n dummyModel: OPTIONAL - Either (True/False) or a dict of parameters\n for a dummy model. If this key is absent,\n a real model is trained.\n See utils.py/OPFDummyModel runner for the\n schema of the dummy parameters\n speculativeParticles OPTIONAL - True or False (default obtained from\n nupic.hypersearch.speculative.particles.default\n configuration property). See note below.\n\n NOTE: The caller must provide just ONE of the following to describe the\n hypersearch:\n 1.) permutationsPyFilename\n OR 2.) permutationsPyContents & permutationsPyContents\n OR 3.) description\n\n The schema for the description element can be found at:\n \"py/nupic/frameworks/opf/expGenerator/experimentDescriptionSchema.json\"\n\n NOTE about speculativeParticles: If true (not 0), hypersearch workers will\n go ahead and create and run particles in subsequent sprints and\n generations before the current generation or sprint has been completed. If\n false, a worker will wait in a sleep loop until the current generation or\n sprint has finished before choosing the next particle position or going\n into the next sprint. When true, the best model can be found faster, but\n results are less repeatable due to the randomness of when each worker\n completes each particle. This property can be overridden via the\n speculativeParticles element of the Hypersearch job params.\n\n\n workerID: our unique Hypersearch worker ID\n\n cjDAO: ClientJobsDB Data Access Object\n jobID: job ID for this hypersearch job\n logLevel: override logging level to this value, if not None", "id": "f17579:c1:m0"} {"signature": "def _getStreamDef(self, modelDescription):", "body": "aggregationPeriod = {'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,}aggFunctionsDict = {}if '' in modelDescription['']:for key in list(aggregationPeriod.keys()):if key in modelDescription['']['']:aggregationPeriod[key] = modelDescription[''][''][key]if '' in modelDescription['']['']:for (fieldName, func) in modelDescription['']['']['']:aggFunctionsDict[fieldName] = str(func)hasAggregation = Falsefor v in list(aggregationPeriod.values()):if v != :hasAggregation = TruebreakaggFunctionList = list(aggFunctionsDict.items())aggregationInfo = dict(aggregationPeriod)aggregationInfo[''] = aggFunctionListstreamDef = copy.deepcopy(modelDescription[''])streamDef[''] = copy.deepcopy(aggregationInfo)return streamDef", "docstring": "Generate stream definition based on", "id": "f17579:c1:m1"} {"signature": "def __del__(self):", "body": "self.close()return", "docstring": "Destructor; NOTE: this is not guaranteed to be called (bugs like\n circular references could prevent it from being called).", "id": "f17579:c1:m2"} {"signature": "def close(self):", "body": "if self._tempDir is not None and os.path.isdir(self._tempDir):self.logger.debug(\"\", self._tempDir)shutil.rmtree(self._tempDir)self._tempDir = Nonereturn", "docstring": "Deletes temporary system objects/files.", "id": "f17579:c1:m3"} {"signature": "def _readPermutationsFile(self, filename, modelDescription):", "body": "vars = {}permFile = exec(compile(open(filename, \"\").read(), filename, ''), globals(), vars)self._reportKeys = vars.get('', [])self._filterFunc = vars.get('', None)self._dummyModelParamsFunc = vars.get('', None)self._predictedField = None self._predictedFieldEncoder = None self._fixedFields = None self._fastSwarmModelParams = vars.get('', None)if self._fastSwarmModelParams is not None:encoders = self._fastSwarmModelParams['']['']['']['']self._fixedFields = []for fieldName in encoders:if encoders[fieldName] is not None:self._fixedFields.append(fieldName)if '' in vars:self._fixedFields = vars['']self._minParticlesPerSwarm = vars.get('')if self._minParticlesPerSwarm == None:self._minParticlesPerSwarm = Configuration.get('')self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)self._killUselessSwarms = vars.get('', True)self._inputPredictedField = vars.get(\"\", \"\")self._tryAll3FieldCombinations = vars.get('', False)self._tryAll3FieldCombinationsWTimestamps = vars.get('', False)minFieldContribution = vars.get('', None)if minFieldContribution is not None:self._minFieldContribution = minFieldContributionmaxBranching = vars.get('', None)if maxBranching is not None:self._maxBranching = maxBranchingif '' in vars:self._optimizeKey = vars['']self._maximize = Trueelif '' in vars:self._optimizeKey = vars['']self._maximize = Falseelse:raise RuntimeError(\"\"\"\")maxModels = vars.get('')if maxModels is not None:if self._maxModels is None:self._maxModels = maxModelselse:raise RuntimeError('''')inferenceType = modelDescription['']['']if not InferenceType.validate(inferenceType):raise ValueError(\"\" %inferenceType)if inferenceType in [InferenceType.TemporalMultiStep,InferenceType.NontemporalMultiStep]:classifierOnlyEncoder = Nonefor encoder in list(modelDescription[\"\"][\"\"][\"\"].values()):if encoder.get(\"\", False)and encoder[\"\"] == vars.get('', None):classifierOnlyEncoder = encoderbreakif classifierOnlyEncoder is None or self._inputPredictedField==\"\":self._searchType = HsSearchType.legacyTemporalelse:self._searchType = HsSearchType.temporalelif inferenceType in [InferenceType.TemporalNextStep,InferenceType.TemporalAnomaly]:self._searchType = HsSearchType.legacyTemporalelif inferenceType in (InferenceType.TemporalClassification,InferenceType.NontemporalClassification):self._searchType = HsSearchType.classificationelse:raise RuntimeError(\"\" % inferenceType)self._predictedField = vars.get('', None)if self._predictedField is None:raise RuntimeError(\"\"\"\" % filename)if '' not in vars:raise RuntimeError(\"\" % filename)if not isinstance(vars[''], dict):raise RuntimeError(\"\"\"\")self._encoderNames = []self._permutations = vars['']self._flattenedPermutations = dict()def _flattenPermutations(value, keys):if '' in keys[-]:raise RuntimeError(\"\"\"\")flatKey = _flattenKeys(keys)if isinstance(value, PermuteEncoder):self._encoderNames.append(flatKey)if value.fieldName == self._predictedField:self._predictedFieldEncoder = flatKeyfor encKey, encValue in value.kwArgs.items():if isinstance(encValue, PermuteVariable):self._flattenedPermutations['' % (flatKey, encKey)] = encValueelif isinstance(value, PermuteVariable):self._flattenedPermutations[flatKey] = valueelse:if isinstance(value, PermuteVariable):self._flattenedPermutations[key] = valuerApply(self._permutations, _flattenPermutations)", "docstring": "Read the permutations file and initialize the following member variables:\n _predictedField: field name of the field we are trying to\n predict\n _permutations: Dict containing the full permutations dictionary.\n _flattenedPermutations: Dict containing the flattened version of\n _permutations. The keys leading to the value in the dict are joined\n with a period to create the new key and permute variables within\n encoders are pulled out of the encoder.\n _encoderNames: keys from self._permutations of only the encoder\n variables.\n _reportKeys: The 'report' list from the permutations file.\n This is a list of the items from each experiment's pickled\n results file that should be included in the final report. The\n format of each item is a string of key names separated by colons,\n each key being one level deeper into the experiment results\n dict. For example, 'key1:key2'.\n _filterFunc: a user-supplied function that can be used to\n filter out specific permutation combinations.\n _optimizeKey: which report key to optimize for\n _maximize: True if we should try and maximize the optimizeKey\n metric. False if we should minimize it.\n _dummyModelParamsFunc: a user-supplied function that can be used to\n artificially generate HTMPredictionModel results. When supplied,\n the model is not actually run through the OPF, but instead is run\n through a \"Dummy Model\" (nupic.swarming.ModelRunner.\n OPFDummyModelRunner). This function returns the params dict used\n to control various options in the dummy model (the returned metric,\n the execution time, etc.). This is used for hypersearch algorithm\n development.\n\nParameters:\n---------------------------------------------------------\nfilename: Name of permutations file\nretval: None", "id": "f17579:c1:m4"} {"signature": "def getExpectedNumModels(self):", "body": "return -", "docstring": "Computes the number of models that are expected to complete as part of\n this instances's HyperSearch.\n\n NOTE: This is compute-intensive for HyperSearches with a huge number of\n combinations.\n\n NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the\n benefit of perutations_runner.py for use in progress\n reporting.\n\n Parameters:\n ---------------------------------------------------------\n retval: The total number of expected models, if known; -1 if unknown", "id": "f17579:c1:m5"} {"signature": "def getModelNames(self):", "body": "return None", "docstring": "Generates a list of model names that are expected to complete as part of\n this instances's HyperSearch.\n\n NOTE: This is compute-intensive for HyperSearches with a huge number of\n combinations.\n\n NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the\n benefit of perutations_runner.py.\n\n Parameters:\n ---------------------------------------------------------\n retval: List of model names for this HypersearchV2 instance, or\n None of not applicable", "id": "f17579:c1:m6"} {"signature": "def getPermutationVariables(self):", "body": "return self._flattenedPermutations", "docstring": "Returns a dictionary of permutation variables.\n\n Parameters:\n ---------------------------------------------------------\n retval: A dictionary of permutation variables; keys are\n flat permutation variable names and each value is\n a sub-class of PermuteVariable.", "id": "f17579:c1:m7"} {"signature": "def getComplexVariableLabelLookupDict(self):", "body": "raise NotImplementedError", "docstring": "Generates a lookup dictionary of permutation variables whose values\n are too complex for labels, so that artificial labels have to be generated\n for them.\n\n Parameters:\n ---------------------------------------------------------\n retval: A look-up dictionary of permutation\n variables whose values are too complex for labels, so\n artificial labels were generated instead (e.g., \"Choice0\",\n \"Choice1\", etc.); the key is the name of the complex variable\n and the value is:\n dict(labels=, values=).", "id": "f17579:c1:m8"} {"signature": "def getOptimizationMetricInfo(self):", "body": "return (self._optimizeKey, self._maximize)", "docstring": "Retrives the optimization key name and optimization function.\n\n Parameters:\n ---------------------------------------------------------\n retval: (optimizationMetricKey, maximize)\n optimizationMetricKey: which report key to optimize for\n maximize: True if we should try and maximize the optimizeKey\n metric. False if we should minimize it.", "id": "f17579:c1:m9"} {"signature": "def _checkForOrphanedModels (self):", "body": "self.logger.debug(\"\" %(self._modelOrphanIntervalSecs))while True:orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,self._modelOrphanIntervalSecs)if orphanedModelId is None:returnself.logger.info(\"\" % (orphanedModelId))for attempt in range():paramsHash = hashlib.md5(\"\" % (orphanedModelId,attempt)).digest()particleHash = hashlib.md5(\"\" % (orphanedModelId,attempt)).digest()try:self._cjDAO.modelSetFields(orphanedModelId,dict(engParamsHash=paramsHash,engParticleHash=particleHash))success = Trueexcept:success = Falseif success:breakif not success:raise RuntimeError(\"\"\"\")self._cjDAO.modelSetCompleted(modelID=orphanedModelId,completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,completionMsg=\"\")self._resultsDB.update(modelID=orphanedModelId,modelParams=None,modelParamsHash=paramsHash,metricResult=None,completed = True,completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,matured = True,numRecords = )", "docstring": "If there are any models that haven't been updated in a while, consider\n them dead, and mark them as hidden in our resultsDB. We also change the\n paramsHash and particleHash of orphaned models so that we can\n re-generate that particle and/or model again if we desire.\n\n Parameters:\n ----------------------------------------------------------------------\n retval:", "id": "f17579:c1:m10"} {"signature": "def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):", "body": "if self._hsState is None:self._hsState = HsState(self)self._hsState.readStateFromDB()completedSwarms = set()if exhaustedSwarmId is not None:self.logger.info(\"\"\"\"\"\" % (exhaustedSwarmId))(particles, _, _, _, _) = self._resultsDB.getParticleInfos(swarmId=exhaustedSwarmId, matured=False)if len(particles) > :exhaustedSwarmStatus = ''else:exhaustedSwarmStatus = ''if self._killUselessSwarms:self._hsState.killUselessSwarms()completingSwarms = self._hsState.getCompletingSwarms()for swarmId in completingSwarms:(particles, _, _, _, _) = self._resultsDB.getParticleInfos(swarmId=swarmId, matured=False)if len(particles) == :completedSwarms.add(swarmId)completedSwarmGens = self._resultsDB.getMaturedSwarmGenerations()priorCompletedSwarms = self._hsState.getCompletedSwarms()for (swarmId, genIdx, errScore) in completedSwarmGens:if swarmId in priorCompletedSwarms:continuecompletedList = self._swarmTerminator.recordDataPoint(swarmId=swarmId, generation=genIdx, errScore=errScore)statusMsg = \"\"\"\" % (genIdx, swarmId, errScore)if len(completedList) > :statusMsg = \"\" % (statusMsg, completedList)self.logger.info(statusMsg)self._cjDAO.jobSetFields (jobID=self._jobID,fields=dict(engStatus=statusMsg),useConnectionID=False,ignoreUnchanged=True)if '' in os.environ:while True:resultsStr = self._cjDAO.jobGetFields(self._jobID, [''])[]if resultsStr is None:results = {}else:results = json.loads(resultsStr)if not '' in results:results[''] = {}for swarm in completedList:if swarm not in results['']:results[''][swarm] = (genIdx,self._swarmTerminator.swarmScores[swarm])newResultsStr = json.dumps(results)if newResultsStr == resultsStr:breakupdated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,fieldName='',curValue=resultsStr,newValue = json.dumps(results))if updated:breakif len(completedList) > :for name in completedList:self.logger.info(\"\"\"\" % (name, genIdx, errScore))completedSwarms = completedSwarms.union(completedList)if len(completedSwarms)== and (exhaustedSwarmId is None):returnwhile True:if exhaustedSwarmId is not None:self._hsState.setSwarmState(exhaustedSwarmId, exhaustedSwarmStatus)for swarmId in completedSwarms:self._hsState.setSwarmState(swarmId, '')if not self._hsState.isDirty():returnsuccess = self._hsState.writeStateToDB()if success:jobResultsStr = self._cjDAO.jobGetFields(self._jobID, [''])[]if jobResultsStr is not None:jobResults = json.loads(jobResultsStr)bestModelId = jobResults.get('', None)else:bestModelId = Nonefor swarmId in list(completedSwarms):(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(swarmId=swarmId, completed=False)if bestModelId in modelIds:modelIds.remove(bestModelId)if len(modelIds) == :continueself.logger.info(\"\"\"\" % (swarmId,str(modelIds)))for modelId in modelIds:self._cjDAO.modelSetFields(modelId,dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),ignoreUnchanged = True)returnself._hsState.readStateFromDB()self.logger.debug(\"\"\"\" % (pprint.pformat(self._hsState._state, indent=)))", "docstring": "Periodically, check to see if we should remove a certain field combination\nfrom evaluation (because it is doing so poorly) or move on to the next\nsprint (add in more fields).\n\nThis method is called from _getCandidateParticleAndSwarm(), which is called\nright before we try and create a new model to run.\n\nParameters:\n-----------------------------------------------------------------------\nremoveSwarmId: If not None, force a change to the current set of active\n swarms by removing this swarm. This is used in situations\n where we can't find any new unique models to create in\n this swarm. In these situations, we update the hypersearch\n state regardless of the timestamp of the last time another\n worker updated it.", "id": "f17579:c1:m11"} {"signature": "def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None):", "body": "jobCancel = self._cjDAO.jobGetFields(self._jobID, [''])[]if jobCancel:self._jobCancelled = True(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID,['', ''])if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:self.logger.info(\"\")self._cjDAO.jobSetFields(self._jobID,dict(workerCompletionMsg=\"\"),useConnectionID=False, ignoreUnchanged=True)else:self.logger.error(\"\"\"\" %(workerCmpReason, workerCmpMsg))return (True, None, None)if self._hsState is not None:priorActiveSwarms = self._hsState.getActiveSwarms()else:priorActiveSwarms = Noneself._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)activeSwarms = self._hsState.getActiveSwarms()if activeSwarms != priorActiveSwarms:self.logger.info(\"\" % (activeSwarms,priorActiveSwarms))self.logger.debug(\"\" % (activeSwarms))totalCmpModels = self._resultsDB.getNumCompletedModels()if totalCmpModels > :numErrs = self._resultsDB.getNumErrModels()if (float(numErrs) / totalCmpModels) > self._maxPctErrModels:errModelIds = self._resultsDB.getErrModelIds()resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[]])[]modelErrMsg = resInfo.completionMsgcmpMsg = \"\"\"\" %(ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels,modelErrMsg)self.logger.error(cmpMsg)workerCmpReason = self._cjDAO.jobGetFields(self._jobID,[''])[]if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:self._cjDAO.jobSetFields(self._jobID,fields=dict(cancel=True,workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,workerCompletionMsg = cmpMsg),useConnectionID=False,ignoreUnchanged=True)return (True, None, None)if self._hsState.isSearchOver():cmpMsg = \"\"\"\"self.logger.info(cmpMsg)self._cjDAO.jobSetFields(self._jobID,dict(workerCompletionMsg=cmpMsg),useConnectionID=False, ignoreUnchanged=True)return (True, None, None)sprintIdx = -while True:sprintIdx += (active, eos) = self._hsState.isSprintActive(sprintIdx)if eos:if self._hsState.anyGoodSprintsActive():self.logger.info(\"\"\"\")return (False, None, None)else:cmpMsg = \"\"\"\"self._cjDAO.jobSetFields(self._jobID,dict(workerCompletionMsg=cmpMsg),useConnectionID=False, ignoreUnchanged=True)self.logger.info(cmpMsg)return (True, None, None)if not active:if not self._speculativeParticles:if not self._hsState.isSprintCompleted(sprintIdx):self.logger.info(\"\"\"\" % (sprintIdx))return (False, None, None)continueswarmIds = self._hsState.getActiveSwarms(sprintIdx)for swarmId in swarmIds:firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(swarmId=swarmId,minNumParticles=self._minParticlesPerSwarm)if firstNonFullGenIdx is None:continueif firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId):self.logger.info(\"\"\"\" % (firstNonFullGenIdx, swarmId, sprintIdx))(allParticles, allModelIds, errScores, completed, matured) =self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)if len(allModelIds) > :newParticleId = Trueself.logger.info(\"\")else:newParticleId = Trueself.logger.info(\"\")(allParticles, allModelIds, errScores, completed, matured) =self._resultsDB.getParticleInfos(swarmId=swarmId,genIdx=firstNonFullGenIdx)modelId = random.choice(allModelIds)self.logger.info(\"\" % (modelId))(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)particle = Particle(hsObj = self,resultsDB = self._resultsDB,flattenedPermuteVars=self._flattenedPermutations,newFromClone=particleState,newParticleId=newParticleId)return (False, particle, swarmId)swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])swarmSizeAndIdList = list(zip(swarmSizes, swarmIds))swarmSizeAndIdList.sort()for (_, swarmId) in swarmSizeAndIdList:(allParticles, allModelIds, errScores, completed, matured) = (self._resultsDB.getParticleInfos(swarmId))if len(allParticles) < self._minParticlesPerSwarm:particle = Particle(hsObj=self,resultsDB=self._resultsDB,flattenedPermuteVars=self._flattenedPermutations,swarmId=swarmId,newFarFrom=allParticles)bestPriorModel = Noneif sprintIdx >= :(bestPriorModel, errScore) = self._hsState.bestModelInSprint()if bestPriorModel is not None:self.logger.info(\"\"\"\" % (, str(bestPriorModel), errScore))(baseState, modelId, errScore, completed, matured)= self._resultsDB.getParticleInfo(bestPriorModel)particle.copyEncoderStatesFrom(baseState)particle.copyVarStatesFrom(baseState, [''])whichVars = []for varName in baseState['']:if '' in varName:whichVars.append(varName)particle.newPosition(whichVars)self.logger.debug(\"\"\"\" % (str(particle)))return (False, particle, swarmId)(readyParticles, readyModelIds, readyErrScores, _, _) = (self._resultsDB.getParticleInfos(swarmId, genIdx=None,matured=True, lastDescendent=True))if len(readyParticles) > :readyGenIdxs = [x[''] for x in readyParticles]sortedGenIdxs = sorted(set(readyGenIdxs))genIdx = sortedGenIdxs[]useParticle = Nonefor particle in readyParticles:if particle[''] == genIdx:useParticle = particlebreakif not self._speculativeParticles:(particles, _, _, _, _) = self._resultsDB.getParticleInfos(swarmId, genIdx=genIdx, matured=False)if len(particles) > :continueparticle = Particle(hsObj=self,resultsDB=self._resultsDB,flattenedPermuteVars=self._flattenedPermutations,evolveFromState=useParticle)return (False, particle, swarmId)if not self._speculativeParticles:self.logger.info(\"\"\"\"% (str(swarmIds)))return (False, None, None)", "docstring": "Find or create a candidate particle to produce a new model.\n\n At any one time, there is an active set of swarms in the current sprint, where\n each swarm in the sprint represents a particular combination of fields.\n Ideally, we should try to balance the number of models we have evaluated for\n each swarm at any time.\n\n This method will see how many models have been evaluated for each active\n swarm in the current active sprint(s) and then try and choose a particle\n from the least represented swarm in the first possible active sprint, with\n the following constraints/rules:\n\n for each active sprint:\n for each active swarm (preference to those with least# of models so far):\n 1.) The particle will be created from new (generation #0) if there are not\n already self._minParticlesPerSwarm particles in the swarm.\n\n 2.) Find the first gen that has a completed particle and evolve that\n particle to the next generation.\n\n 3.) If we got to here, we know that we have satisfied the min# of\n particles for the swarm, and they are all currently running (probably at\n various generation indexes). Go onto the next swarm\n\n If we couldn't find a swarm to allocate a particle in, go onto the next\n sprint and start allocating particles there....\n\n\n Parameters:\n ----------------------------------------------------------------\n exhaustedSwarmId: If not None, force a change to the current set of active\n swarms by marking this swarm as either 'completing' or\n 'completed'. If there are still models being evaluaed in\n it, mark it as 'completing', else 'completed. This is\n used in situations where we can't find any new unique\n models to create in this swarm. In these situations, we\n force an update to the hypersearch state so no other\n worker wastes time try to use this swarm.\n\n retval: (exit, particle, swarm)\n exit: If true, this worker is ready to exit (particle and\n swarm will be None)\n particle: Which particle to run\n swarm: which swarm the particle is in\n\n NOTE: When particle and swarm are None and exit is False, it\n means that we need to wait for one or more other worker(s) to\n finish their respective models before we can pick a particle\n to run. This will generally only happen when speculativeParticles\n is set to False.", "id": "f17579:c1:m12"} {"signature": "def _okToExit(self):", "body": "print(\"\", file=sys.stderr)if not self._jobCancelled:(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(matured=False)if len(modelIds) > :self.logger.info(\"\"\"\"\"\")time.sleep( * random.random())return False(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(completed=False)for modelId in modelIds:self.logger.info(\"\"% (modelId))self._cjDAO.modelSetFields(modelId,dict(engStop=ClientJobsDAO.STOP_REASON_STOPPED),ignoreUnchanged = True)self._hsStatePeriodicUpdate()pctFieldContributions, absFieldContributions =self._hsState.getFieldContributions()jobResultsStr = self._cjDAO.jobGetFields(self._jobID, [''])[]if jobResultsStr is not None:jobResults = json.loads(jobResultsStr)else:jobResults = {}if pctFieldContributions != jobResults.get('', None):jobResults[''] = pctFieldContributionsjobResults[''] = absFieldContributionsisUpdated = self._cjDAO.jobSetFieldIfEqual(self._jobID,fieldName='',curValue=jobResultsStr,newValue=json.dumps(jobResults))if isUpdated:self.logger.info('',pctFieldContributions)else:self.logger.info('''')return True", "docstring": "Test if it's OK to exit this worker. This is only called when we run\n out of prospective new models to evaluate. This method sees if all models\n have matured yet. If not, it will sleep for a bit and return False. This\n will indicate to the hypersearch worker that we should keep running, and\n check again later. This gives this worker a chance to pick up and adopt any\n model which may become orphaned by another worker before it matures.\n\n If all models have matured, this method will send a STOP message to all\n matured, running models (presummably, there will be just one - the model\n which thinks it's the best) before returning True.", "id": "f17579:c1:m13"} {"signature": "def createModels(self, numModels=):", "body": "self._checkForOrphanedModels()modelResults = []for _ in range(numModels):candidateParticle = Noneif (self._maxModels is not None and(self._resultsDB.numModels() - self._resultsDB.getNumErrModels()) >=self._maxModels):return (self._okToExit(), [])if candidateParticle is None:(exitNow, candidateParticle, candidateSwarm) = (self._getCandidateParticleAndSwarm())if candidateParticle is None:if exitNow:return (self._okToExit(), [])else:print(\"\", file=sys.stderr)time.sleep(self._speculativeWaitSecondsMax * random.random())return (False, [])useEncoders = candidateSwarm.split('')numAttempts = while True:if numAttempts >= :self.logger.debug(\"\"\"\" % (numAttempts))candidateParticle.agitate()position = candidateParticle.getPosition()structuredParams = dict()def _buildStructuredParams(value, keys):flatKey = _flattenKeys(keys)if flatKey in self._encoderNames:if flatKey in useEncoders:return value.getDict(flatKey, position)else:return Noneelif flatKey in position:return position[flatKey]else:return valuestructuredParams = rCopy(self._permutations,_buildStructuredParams,discardNoneKeys=False)modelParams = dict(structuredParams=structuredParams,particleState = candidateParticle.getState())m = hashlib.md5()m.update(sortedJSONDumpS(structuredParams))m.update(self._baseDescriptionHash)paramsHash = m.digest()particleInst = \"\" % (modelParams[''][''],modelParams[''][''])particleHash = hashlib.md5(particleInst).digest()numAttempts += if self._filterFunc and not self._filterFunc(structuredParams):valid = Falseelse:valid = Trueif valid and self._resultsDB.getModelIDFromParamsHash(paramsHash) is None:breakif numAttempts >= self._maxUniqueModelAttempts:(exitNow, candidateParticle, candidateSwarm)= self._getCandidateParticleAndSwarm(exhaustedSwarmId=candidateSwarm)if candidateParticle is None:if exitNow:return (self._okToExit(), [])else:time.sleep(self._speculativeWaitSecondsMax * random.random())return (False, [])numAttempts = useEncoders = candidateSwarm.split('')if self.logger.getEffectiveLevel() <= logging.DEBUG:self.logger.debug(\"\"% (pprint.pformat(modelParams, indent=)))modelResults.append((modelParams, paramsHash, particleHash))return (False, modelResults)", "docstring": "Create one or more new models for evaluation. These should NOT be models\n that we already know are in progress (i.e. those that have been sent to us\n via recordModelProgress). We return a list of models to the caller\n (HypersearchWorker) and if one can be successfully inserted into\n the models table (i.e. it is not a duplicate) then HypersearchWorker will\n turn around and call our runModel() method, passing in this model. If it\n is a duplicate, HypersearchWorker will call this method again. A model\n is a duplicate if either the modelParamsHash or particleHash is\n identical to another entry in the model table.\n\n The numModels is provided by HypersearchWorker as a suggestion as to how\n many models to generate. This particular implementation only ever returns 1\n model.\n\n Before choosing some new models, we first do a sweep for any models that\n may have been abandonded by failed workers. If/when we detect an abandoned\n model, we mark it as complete and orphaned and hide it from any subsequent\n queries to our ResultsDB. This effectively considers it as if it never\n existed. We also change the paramsHash and particleHash in the model record\n of the models table so that we can create another model with the same\n params and particle status and run it (which we then do immediately).\n\n The modelParamsHash returned for each model should be a hash (max allowed\n size of ClientJobsDAO.hashMaxSize) that uniquely identifies this model by\n it's params and the optional particleHash should be a hash of the particleId\n and generation index. Every model that gets placed into the models database,\n either by this worker or another worker, will have these hashes computed for\n it. The recordModelProgress gets called for every model in the database and\n the hash is used to tell which, if any, are the same as the ones this worker\n generated.\n\n NOTE: We check first ourselves for possible duplicates using the paramsHash\n before we return a model. If HypersearchWorker failed to insert it (because\n some other worker beat us to it), it will turn around and call our\n recordModelProgress with that other model so that we now know about it. It\n will then call createModels() again.\n\n This methods returns an exit boolean and the model to evaluate. If there is\n no model to evalulate, we may return False for exit because we want to stay\n alive for a while, waiting for all other models to finish. This gives us\n a chance to detect and pick up any possibly orphaned model by another\n worker.\n\n Parameters:\n ----------------------------------------------------------------------\n numModels: number of models to generate\n retval: (exit, models)\n exit: true if this worker should exit.\n models: list of tuples, one for each model. Each tuple contains:\n (modelParams, modelParamsHash, particleHash)\n\n modelParams is a dictionary containing the following elements:\n\n structuredParams: dictionary containing all variables for\n this model, with encoders represented as a dict within\n this dict (or None if they are not included.\n\n particleState: dictionary containing the state of this\n particle. This includes the position and velocity of\n each of it's variables, the particleId, and the particle\n generation index. It contains the following keys:\n\n id: The particle Id of the particle we are using to\n generate/track this model. This is a string of the\n form .\n genIdx: the particle's generation index. This starts at 0\n and increments every time we move the particle to a\n new position.\n swarmId: The swarmId, which is a string of the form\n .... that describes this swarm\n varStates: dict of the variable states. The key is the\n variable name, the value is a dict of the variable's\n position, velocity, bestPosition, bestResult, etc.", "id": "f17579:c1:m15"} {"signature": "def recordModelProgress(self, modelID, modelParams, modelParamsHash, results,completed, completionReason, matured, numRecords):", "body": "if results is None:metricResult = Noneelse:metricResult = list(results[].values())[]errScore = self._resultsDB.update(modelID=modelID,modelParams=modelParams,modelParamsHash=modelParamsHash,metricResult=metricResult, completed=completed,completionReason=completionReason, matured=matured,numRecords=numRecords)self.logger.debug('''' ,modelID, completed, completionReason, numRecords, errScore)(bestModelID, bestResult) = self._resultsDB.bestModelIdAndErrScore()self.logger.debug('' %(bestResult, bestModelID))", "docstring": "Record or update the results for a model. This is called by the\n HSW whenever it gets results info for another model, or updated results\n on a model that is still running.\n\n The first time this is called for a given modelID, the modelParams will\n contain the params dict for that model and the modelParamsHash will contain\n the hash of the params. Subsequent updates of the same modelID will\n have params and paramsHash values of None (in order to save overhead).\n\n The Hypersearch object should save these results into it's own working\n memory into some table, which it then uses to determine what kind of\n new models to create next time createModels() is called.\n\n Parameters:\n ----------------------------------------------------------------------\n modelID: ID of this model in models table\n modelParams: params dict for this model, or None if this is just an update\n of a model that it already previously reported on.\n\n See the comments for the createModels() method for a\n description of this dict.\n\n modelParamsHash: hash of the modelParams dict, generated by the worker\n that put it into the model database.\n results: tuple containing (allMetrics, optimizeMetric). Each is a\n dict containing metricName:result pairs. .\n May be none if we have no results yet.\n completed: True if the model has completed evaluation, False if it\n is still running (and these are online results)\n completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates\n matured: True if this model has matured. In most cases, once a\n model matures, it will complete as well. The only time a\n model matures and does not complete is if it's currently\n the best model and we choose to keep it running to generate\n predictions.\n numRecords: Number of records that have been processed so far by this\n model.", "id": "f17579:c1:m16"} {"signature": "def runModel(self, modelID, jobID, modelParams, modelParamsHash,jobsDAO, modelCheckpointGUID):", "body": "if not self._createCheckpoints:modelCheckpointGUID = Noneself._resultsDB.update(modelID=modelID,modelParams=modelParams,modelParamsHash=modelParamsHash,metricResult = None,completed = False,completionReason = None,matured = False,numRecords = )structuredParams = modelParams['']if self.logger.getEffectiveLevel() <= logging.DEBUG:self.logger.debug(\"\" %(pprint.pformat(modelParams, indent=), modelID))cpuTimeStart = time.clock()logLevel = self.logger.getEffectiveLevel()try:if self._dummyModel is None or self._dummyModel is False:(cmpReason, cmpMsg) = runModelGivenBaseAndParams(modelID=modelID,jobID=jobID,baseDescription=self._baseDescription,params=structuredParams,predictedField=self._predictedField,reportKeys=self._reportKeys,optimizeKey=self._optimizeKey,jobsDAO=jobsDAO,modelCheckpointGUID=modelCheckpointGUID,logLevel=logLevel,predictionCacheMaxRecords=self._predictionCacheMaxRecords)else:dummyParams = dict(self._dummyModel)dummyParams[''] = structuredParamsif self._dummyModelParamsFunc is not None:permInfo = dict(structuredParams)permInfo [''] = modelParams['']['']dummyParams.update(self._dummyModelParamsFunc(permInfo))(cmpReason, cmpMsg) = runDummyModel(modelID=modelID,jobID=jobID,params=dummyParams,predictedField=self._predictedField,reportKeys=self._reportKeys,optimizeKey=self._optimizeKey,jobsDAO=jobsDAO,modelCheckpointGUID=modelCheckpointGUID,logLevel=logLevel,predictionCacheMaxRecords=self._predictionCacheMaxRecords)jobsDAO.modelSetCompleted(modelID,completionReason = cmpReason,completionMsg = cmpMsg,cpuTime = time.clock() - cpuTimeStart)except InvalidConnectionException as e:self.logger.warn(\"\", e)", "docstring": "Run the given model.\n\n This runs the model described by 'modelParams'. Periodically, it updates\n the results seen on the model to the model database using the databaseAO\n (database Access Object) methods.\n\n Parameters:\n -------------------------------------------------------------------------\n modelID: ID of this model in models table\n\n jobID: ID for this hypersearch job in the jobs table\n\n modelParams: parameters of this specific model\n modelParams is a dictionary containing the name/value\n pairs of each variable we are permuting over. Note that\n variables within an encoder spec have their name\n structure as:\n .\n\n modelParamsHash: hash of modelParamValues\n\n jobsDAO jobs data access object - the interface to the jobs\n database where model information is stored\n\n modelCheckpointGUID: A persistent, globally-unique identifier for\n constructing the model checkpoint key", "id": "f17579:c1:m17"} {"signature": "def getState(self):", "body": "raise NotImplementedError", "docstring": "Return the current state of this particle. This is used for\n communicating our state into a model record entry so that it can be\n instantiated on another worker.", "id": "f17581:c0:m1"} {"signature": "def setState(self, state):", "body": "raise NotImplementedError", "docstring": "Set the current state of this particle. This is counterpart to getState.", "id": "f17581:c0:m2"} {"signature": "def getPosition(self):", "body": "raise NotImplementedError", "docstring": "for int vars, returns position to nearest int\n\n Parameters:\n --------------------------------------------------------------\n retval: current position", "id": "f17581:c0:m3"} {"signature": "def agitate(self):", "body": "raise NotImplementedError", "docstring": "This causes the variable to jiggle away from its current position.\n It does this by increasing its velocity by a multiplicative factor.\n Every time agitate() is called, the velocity will increase. In this way,\n you can call agitate over and over again until the variable reaches a\n new position.", "id": "f17581:c0:m4"} {"signature": "def newPosition(self, globalBestPosition, rng):", "body": "raise NotImplementedError", "docstring": "Choose a new position based on results obtained so far from other\n particles and the passed in globalBestPosition.\n\n Parameters:\n --------------------------------------------------------------\n globalBestPosition: global best position for this colony\n rng: instance of random.Random() used for generating\n random numbers\n retval: new position", "id": "f17581:c0:m5"} {"signature": "def pushAwayFrom(self, otherVars, rng):", "body": "raise NotImplementedError", "docstring": "Choose a new position that is as far away as possible from all\n 'otherVars', where 'otherVars' is a list of PermuteVariable instances.\n\n Parameters:\n --------------------------------------------------------------\n otherVars: list of other PermuteVariables to push away from\n rng: instance of random.Random() used for generating\n random numbers", "id": "f17581:c0:m6"} {"signature": "def resetVelocity(self, rng):", "body": "raise NotImplementedError", "docstring": "Reset the velocity to be some fraction of the total distance. This\n is called usually when we start a new swarm and want to start at the\n previous best position found in the previous swarm but with a\n velocity which is a known fraction of the total distance between min\n and max.\n\n Parameters:\n --------------------------------------------------------------\n rng: instance of random.Random() used for generating\n random numbers", "id": "f17581:c0:m7"} {"signature": "def __init__(self, min, max, stepSize=None, inertia=None, cogRate=None,socRate=None):", "body": "super(PermuteFloat, self).__init__()self.min = minself.max = maxself.stepSize = stepSizeself._position = (self.max + self.min) / self._velocity = (self.max - self.min) / self._inertia = (float(Configuration.get(\"\"))if inertia is None else inertia)self._cogRate = (float(Configuration.get(\"\"))if cogRate is None else cogRate)self._socRate = (float(Configuration.get(\"\"))if socRate is None else socRate)self._bestPosition = self.getPosition()self._bestResult = None", "docstring": "Construct a variable that permutes over floating point values using\n the Particle Swarm Optimization (PSO) algorithm. See descriptions of\n PSO (i.e. http://en.wikipedia.org/wiki/Particle_swarm_optimization)\n for references to the inertia, cogRate, and socRate parameters.\n\n Parameters:\n -----------------------------------------------------------------------\n min: min allowed value of position\n max: max allowed value of position\n stepSize: if not None, the position must be at min + N * stepSize,\n where N is an integer\n inertia: The inertia for the particle.\n cogRate: This parameter controls how much the particle is affected\n by its distance from it's local best position\n socRate: This parameter controls how much the particle is affected\n by its distance from the global best position", "id": "f17581:c1:m0"} {"signature": "def __repr__(self):", "body": "return (\"\"\"\" % (self.min, self.max, self.stepSize, self.getPosition(),self._position, self._velocity, self._bestPosition,self._bestResult))", "docstring": "See comments in base class.", "id": "f17581:c1:m1"} {"signature": "def getState(self):", "body": "return dict(_position = self._position,position = self.getPosition(),velocity = self._velocity,bestPosition = self._bestPosition,bestResult = self._bestResult)", "docstring": "See comments in base class.", "id": "f17581:c1:m2"} {"signature": "def setState(self, state):", "body": "self._position = state['']self._velocity = state['']self._bestPosition = state['']self._bestResult = state['']", "docstring": "See comments in base class.", "id": "f17581:c1:m3"} {"signature": "def getPosition(self):", "body": "if self.stepSize is None:return self._positionnumSteps = (self._position - self.min) / self.stepSizenumSteps = int(round(numSteps))position = self.min + (numSteps * self.stepSize)position = max(self.min, position)position = min(self.max, position)return position", "docstring": "See comments in base class.", "id": "f17581:c1:m4"} {"signature": "def agitate(self):", "body": "self._velocity *= / self._inertiamaxV = (self.max - self.min)/if self._velocity > maxV:self._velocity = maxVelif self._velocity < -maxV:self._velocity = -maxVif self._position == self.max and self._velocity > :self._velocity *= -if self._position == self.min and self._velocity < :self._velocity *= -", "docstring": "See comments in base class.", "id": "f17581:c1:m5"} {"signature": "def newPosition(self, globalBestPosition, rng):", "body": "lb=float(Configuration.get(\"\"))ub=float(Configuration.get(\"\"))self._velocity = (self._velocity * self._inertia + rng.uniform(lb, ub) *self._cogRate * (self._bestPosition - self.getPosition()))if globalBestPosition is not None:self._velocity += rng.uniform(lb, ub) * self._socRate * (globalBestPosition - self.getPosition())self._position += self._velocityself._position = max(self.min, self._position)self._position = min(self.max, self._position)return self.getPosition()", "docstring": "See comments in base class.", "id": "f17581:c1:m6"} {"signature": "def pushAwayFrom(self, otherPositions, rng):", "body": "if self.max == self.min:returnnumPositions = len(otherPositions) * if numPositions == :returnstepSize = float(self.max-self.min) / numPositionspositions = numpy.arange(self.min, self.max + stepSize, stepSize)numPositions = len(positions)weights = numpy.zeros(numPositions)maxDistanceSq = - * (stepSize ** )for pos in otherPositions:distances = pos - positionsvarWeights = numpy.exp(numpy.power(distances, ) / maxDistanceSq)weights += varWeightspositionIdx = weights.argmin()self._position = positions[positionIdx]self._bestPosition = self.getPosition()self._velocity *= rng.choice([, -])", "docstring": "See comments in base class.", "id": "f17581:c1:m7"} {"signature": "def resetVelocity(self, rng):", "body": "maxVelocity = (self.max - self.min) / self._velocity = maxVelocity self._velocity *= rng.choice([, -])", "docstring": "See comments in base class.", "id": "f17581:c1:m8"} {"signature": "def __repr__(self):", "body": "return (\"\"\"\" % (self.min, self.max, self.stepSize, self.getPosition(),self._position, self._velocity, self._bestPosition,self._bestResult))", "docstring": "See comments in base class.", "id": "f17581:c2:m1"} {"signature": "def getPosition(self):", "body": "position = super(PermuteInt, self).getPosition()position = int(round(position))return position", "docstring": "See comments in base class.", "id": "f17581:c2:m2"} {"signature": "def __repr__(self):", "body": "return \"\" % (self.choices,self.choices[self._positionIdx])", "docstring": "See comments in base class.", "id": "f17581:c3:m1"} {"signature": "def getState(self):", "body": "return dict(_position = self.getPosition(),position = self.getPosition(),velocity = None,bestPosition = self.choices[self._bestPositionIdx],bestResult = self._bestResult)", "docstring": "See comments in base class.", "id": "f17581:c3:m2"} {"signature": "def setState(self, state):", "body": "self._positionIdx = self.choices.index(state[''])self._bestPositionIdx = self.choices.index(state[''])self._bestResult = state['']", "docstring": "See comments in base class.", "id": "f17581:c3:m3"} {"signature": "def setResultsPerChoice(self, resultsPerChoice):", "body": "self._resultsPerChoice = [[]] * len(self.choices)for (choiceValue, values) in resultsPerChoice:choiceIndex = self.choices.index(choiceValue)self._resultsPerChoice[choiceIndex] = list(values)", "docstring": "Setup our resultsPerChoice history based on the passed in\n resultsPerChoice.\n\n For example, if this variable has the following choices:\n ['a', 'b', 'c']\n\n resultsPerChoice will have up to 3 elements, each element is a tuple\n containing (choiceValue, errors) where errors is the list of errors\n received from models that used the specific choice:\n retval:\n [('a', [0.1, 0.2, 0.3]), ('b', [0.5, 0.1, 0.6]), ('c', [0.2])]", "id": "f17581:c3:m4"} {"signature": "def getPosition(self):", "body": "return self.choices[self._positionIdx]", "docstring": "See comments in base class.", "id": "f17581:c3:m5"} {"signature": "def agitate(self):", "body": "pass", "docstring": "See comments in base class.", "id": "f17581:c3:m6"} {"signature": "def newPosition(self, globalBestPosition, rng):", "body": "numChoices = len(self.choices)meanScorePerChoice = []overallSum = numResults = for i in range(numChoices):if len(self._resultsPerChoice[i]) > :data = numpy.array(self._resultsPerChoice[i])meanScorePerChoice.append(data.mean())overallSum += data.sum()numResults += data.sizeelse:meanScorePerChoice.append(None)if numResults == :overallSum = numResults = for i in range(numChoices):if meanScorePerChoice[i] is None:meanScorePerChoice[i] = overallSum / numResultsmeanScorePerChoice = numpy.array(meanScorePerChoice)meanScorePerChoice = ( * meanScorePerChoice.max()) - meanScorePerChoiceif self._fixEarly:meanScorePerChoice **= (numResults * self._fixEarlyFactor / numChoices)total = meanScorePerChoice.sum()if total == :total = meanScorePerChoice /= totaldistribution = meanScorePerChoice.cumsum()r = rng.random() * distribution[-]choiceIdx = numpy.where(r <= distribution)[][]self._positionIdx = choiceIdxreturn self.getPosition()", "docstring": "See comments in base class.", "id": "f17581:c3:m7"} {"signature": "def pushAwayFrom(self, otherPositions, rng):", "body": "positions = [self.choices.index(x) for x in otherPositions]positionCounts = [] * len(self.choices)for pos in positions:positionCounts[pos] += self._positionIdx = numpy.array(positionCounts).argmin()self._bestPositionIdx = self._positionIdx", "docstring": "See comments in base class.", "id": "f17581:c3:m8"} {"signature": "def resetVelocity(self, rng):", "body": "pass", "docstring": "See comments in base class.", "id": "f17581:c3:m9"} {"signature": "def __repr__(self):", "body": "suffix = \"\"for key, value in list(self.kwArgs.items()):suffix += \"\" % (key, value)return \"\" % ((self.fieldName, self.encoderClass, self.name, suffix))", "docstring": "See comments in base class.", "id": "f17581:c4:m1"} {"signature": "def getDict(self, encoderName, flattenedChosenValues):", "body": "encoder = dict(fieldname=self.fieldName,name=self.name)for encoderArg, value in self.kwArgs.items():if isinstance(value, PermuteVariable):value = flattenedChosenValues[\"\" % (encoderName, encoderArg)]encoder[encoderArg] = valueif '' in self.encoderClass:(encoder[''], argName) = self.encoderClass.split('')argValue = (encoder[''], encoder[''])encoder[argName] = argValueencoder.pop('')encoder.pop('')else:encoder[''] = self.encoderClassreturn encoder", "docstring": "Return a dict that can be used to construct this encoder. This dict\n can be passed directly to the addMultipleEncoders() method of the\n multi encoder.\n\n Parameters:\n ----------------------------------------------------------------------\n encoderName: name of the encoder\n flattenedChosenValues: dict of the flattened permutation variables. Any\n variables within this dict whose key starts\n with encoderName will be substituted for\n encoder constructor args which are being\n permuted over.", "id": "f17581:c4:m2"} {"signature": "def run(self):", "body": "self.verbosity = self._testValidPositions(varClass=PermuteFloat, minValue=,maxValue=, stepSize=)self._testValidPositions(varClass=PermuteInt, minValue=,maxValue=, stepSize=)self._testValidPositions(varClass=PermuteInt, minValue=,maxValue=, stepSize=)self._testConvergence(varClass=PermuteFloat, minValue=,maxValue=, targetValue=)self._testConvergence(varClass=PermuteFloat, minValue=,maxValue=, targetValue=)self._testConvergence(varClass=PermuteFloat, minValue=,maxValue=, targetValue=)self._testConvergence(varClass=PermuteInt, minValue=,maxValue=, targetValue=)self._testConvergence(varClass=PermuteInt, minValue=,maxValue=, targetValue=)self._testChoices()", "docstring": "Run unit tests on this module.", "id": "f17581:c5:m3"} {"signature": "def clean(s):", "body": "lines = [l.rstrip() for l in s.split('')]return ''.join(lines)", "docstring": "Removes trailing whitespace on each line.", "id": "f17583:m5"} {"signature": "def __init__(self, hsObj, resultsDB, flattenedPermuteVars,swarmId=None, newFarFrom=None, evolveFromState=None,newFromClone=None, newParticleId=False):", "body": "self._hsObj = hsObjself.logger = hsObj.loggerself._resultsDB = resultsDBself._rng = random.Random()self._rng.seed()def _setupVars(flattenedPermuteVars):allowedEncoderNames = self.swarmId.split('')self.permuteVars = copy.deepcopy(flattenedPermuteVars)varNames = self.permuteVars.keys()for varName in varNames:if '' in varName: if varName.split('')[] not in allowedEncoderNames:self.permuteVars.pop(varName)continueif isinstance(self.permuteVars[varName], PermuteChoices):if self._hsObj._speculativeParticles:maxGenIdx = Noneelse:maxGenIdx = self.genIdx - resultsPerChoice = self._resultsDB.getResultsPerChoice(swarmId=self.swarmId, maxGenIdx=maxGenIdx, varName=varName)self.permuteVars[varName].setResultsPerChoice(resultsPerChoice.values())if swarmId is not None:assert (evolveFromState is None)assert (newFromClone is None)self.swarmId = swarmIdself.particleId = \"\" % (str(self._hsObj._workerID),str(Particle._nextParticleID))Particle._nextParticleID += self.genIdx = _setupVars(flattenedPermuteVars)if newFarFrom is not None:for varName in self.permuteVars.iterkeys():otherPositions = []for particleState in newFarFrom:otherPositions.append(particleState[''][varName][''])self.permuteVars[varName].pushAwayFrom(otherPositions, self._rng)self._rng.seed(str(otherPositions))elif evolveFromState is not None:assert (swarmId is None)assert (newFarFrom is None)assert (newFromClone is None)self.particleId = evolveFromState['']self.genIdx = evolveFromState[''] + self.swarmId = evolveFromState['']_setupVars(flattenedPermuteVars)self.initStateFrom(self.particleId, evolveFromState, newBest=True)self.newPosition()elif newFromClone is not None:assert (swarmId is None)assert (newFarFrom is None)assert (evolveFromState is None)self.particleId = newFromClone['']if newParticleId:self.particleId = \"\" % (str(self._hsObj._workerID),str(Particle._nextParticleID))Particle._nextParticleID += self.genIdx = newFromClone['']self.swarmId = newFromClone['']_setupVars(flattenedPermuteVars)self.initStateFrom(self.particleId, newFromClone, newBest=False)else:assert False, \"\"self.logger.debug(\"\" % (str(self)))", "docstring": "Create a particle.\n\n There are 3 fundamentally different methods of instantiating a particle:\n 1.) You can instantiate a new one from scratch, at generation index #0. This\n particle gets a new particleId.\n required: swarmId\n optional: newFarFrom\n must be None: evolveFromState, newFromClone\n\n 2.) You can instantiate one from savedState, in which case it's generation\n index is incremented (from the value stored in the saved state) and\n its particleId remains the same.\n required: evolveFromState\n optional:\n must be None: flattenedPermuteVars, swarmId, newFromClone\n\n 3.) You can clone another particle, creating a new particle at the same\n generationIdx but a different particleId. This new particle will end\n up at exactly the same position as the one it was cloned from. If\n you want to move it to the next position, or just jiggle it a bit, call\n newPosition() or agitate() after instantiation.\n required: newFromClone\n optional:\n must be None: flattenedPermuteVars, swarmId, evolveFromState\n\n\n Parameters:\n --------------------------------------------------------------------\n hsObj: The HypersearchV2 instance\n\n resultsDB: the ResultsDB instance that holds all the model results\n\n flattenedPermuteVars: dict() containing the (key, PermuteVariable) pairs\n of the flattened permutation variables as read from the permutations\n file.\n\n swarmId: String that represents the encoder names of the encoders that are\n to be included in this particle's model. Of the form\n 'encoder1.encoder2'.\n Required for creation method #1.\n\n newFarFrom: If not None, this is a list of other particleState dicts in the\n swarm that we want to be as far away from as possible. Optional\n argument for creation method #1.\n\n evolveFromState: If not None, evolve an existing particle. This is a\n dict containing the particle's state. Preserve the particleId, but\n increment the generation index. Required for creation method #2.\n\n newFromClone: If not None, clone this other particle's position and generation\n index, with small random perturbations. This is a dict containing the\n particle's state. Required for creation method #3.\n\n newParticleId: Only applicable when newFromClone is True. Give the clone\n a new particle ID.", "id": "f17585:c0:m0"} {"signature": "def getState(self):", "body": "varStates = dict()for varName, var in self.permuteVars.iteritems():varStates[varName] = var.getState()return dict(id=self.particleId,genIdx=self.genIdx,swarmId=self.swarmId,varStates=varStates)", "docstring": "Get the particle state as a dict. This is enough information to\n instantiate this particle on another worker.", "id": "f17585:c0:m2"} {"signature": "def initStateFrom(self, particleId, particleState, newBest):", "body": "if newBest:(bestResult, bestPosition) = self._resultsDB.getParticleBest(particleId)else:bestResult = bestPosition = NonevarStates = particleState['']for varName in varStates.keys():varState = copy.deepcopy(varStates[varName])if newBest:varState[''] = bestResultif bestPosition is not None:varState[''] = bestPosition[varName]self.permuteVars[varName].setState(varState)", "docstring": "Init all of our variable positions, velocities, and optionally the best\n result and best position from the given particle.\n\n If newBest is true, we get the best result and position for this new\n generation from the resultsDB, This is used when evoloving a particle\n because the bestResult and position as stored in was the best AT THE TIME\n THAT PARTICLE STARTED TO RUN and does not include the best since that\n particle completed.", "id": "f17585:c0:m3"} {"signature": "def copyEncoderStatesFrom(self, particleState):", "body": "allowedToMove = Truefor varName in particleState['']:if '' in varName: if varName not in self.permuteVars:continuestate = copy.deepcopy(particleState[''][varName])state[''] = state['']state[''] = state['']if not allowedToMove:state[''] = self.permuteVars[varName].setState(state)if allowedToMove:self.permuteVars[varName].resetVelocity(self._rng)", "docstring": "Copy all encoder variables from particleState into this particle.\n\n Parameters:\n --------------------------------------------------------------\n particleState: dict produced by a particle's getState() method", "id": "f17585:c0:m4"} {"signature": "def copyVarStatesFrom(self, particleState, varNames):", "body": "allowedToMove = Truefor varName in particleState['']:if varName in varNames:if varName not in self.permuteVars:continuestate = copy.deepcopy(particleState[''][varName])state[''] = state['']state[''] = state['']if not allowedToMove:state[''] = self.permuteVars[varName].setState(state)if allowedToMove:self.permuteVars[varName].resetVelocity(self._rng)", "docstring": "Copy specific variables from particleState into this particle.\n\n Parameters:\n --------------------------------------------------------------\n particleState: dict produced by a particle's getState() method\n varNames: which variables to copy", "id": "f17585:c0:m5"} {"signature": "def getPosition(self):", "body": "result = dict()for (varName, value) in self.permuteVars.iteritems():result[varName] = value.getPosition()return result", "docstring": "Return the position of this particle. This returns a dict() of key\n value pairs where each key is the name of the flattened permutation\n variable and the value is its chosen value.\n\n Parameters:\n --------------------------------------------------------------\n retval: dict() of flattened permutation choices", "id": "f17585:c0:m6"} {"signature": "@staticmethoddef getPositionFromState(pState):", "body": "result = dict()for (varName, value) in pState[''].iteritems():result[varName] = value['']return result", "docstring": "Return the position of a particle given its state dict.\n\n Parameters:\n --------------------------------------------------------------\n retval: dict() of particle position, keys are the variable names,\n values are their positions", "id": "f17585:c0:m7"} {"signature": "def agitate(self):", "body": "for (varName, var) in self.permuteVars.iteritems():var.agitate()self.newPosition()", "docstring": "Agitate this particle so that it is likely to go to a new position.\n Every time agitate is called, the particle is jiggled an even greater\n amount.\n\n Parameters:\n --------------------------------------------------------------\n retval: None", "id": "f17585:c0:m8"} {"signature": "def newPosition(self, whichVars=None):", "body": "globalBestPosition = Noneif self._hsObj._speculativeParticles:genIdx = self.genIdxelse:genIdx = self.genIdx - if genIdx >= :(bestModelId, _) = self._resultsDB.bestModelIdAndErrScore(self.swarmId,genIdx)if bestModelId is not None:(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(bestModelId)globalBestPosition = Particle.getPositionFromState(particleState)for (varName, var) in self.permuteVars.iteritems():if whichVars is not None and varName not in whichVars:continueif globalBestPosition is None:var.newPosition(None, self._rng)else:var.newPosition(globalBestPosition[varName], self._rng)position = self.getPosition()if self.logger.getEffectiveLevel() <= logging.DEBUG:msg = StringIO.StringIO()print >> msg, \"\" % (pprint.pformat(position,indent=))print >> msg, \"\"for (varName, var) in self.permuteVars.iteritems():print >> msg, \"\" % (varName, str(var))self.logger.debug(msg.getvalue())msg.close()return position", "docstring": "Choose a new position based on results obtained so far from all other\n particles.\n\n Parameters:\n --------------------------------------------------------------\n whichVars: If not None, only move these variables\n retval: new position", "id": "f17585:c0:m9"} {"signature": "def __init__(self, hsObj):", "body": "self._hsObj = hsObjself.logger = self._hsObj.loggerself._state = Noneself._priorStateJSON = Noneself._dirty = Falseself.readStateFromDB()", "docstring": "Create our state object.\n\n Parameters:\n ---------------------------------------------------------------------\n hsObj: Reference to the HypersesarchV2 instance\n cjDAO: ClientJobsDAO instance\n logger: logger to use\n jobID: our JobID", "id": "f17586:c0:m0"} {"signature": "def isDirty(self):", "body": "return self._dirty", "docstring": "Return true if our local copy of the state has changed since the\n last time we read from the DB.", "id": "f17586:c0:m1"} {"signature": "def isSearchOver(self):", "body": "return self._state['']", "docstring": "Return true if the search should be considered over.", "id": "f17586:c0:m2"} {"signature": "def readStateFromDB(self):", "body": "self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,[''])[]if self._priorStateJSON is None:swarms = dict()if self._hsObj._fixedFields is not None:print(self._hsObj._fixedFields)encoderSet = []for field in self._hsObj._fixedFields:if field =='':continueencoderName = self.getEncoderKeyFromName(field)assert encoderName in self._hsObj._encoderNames, \"\"\"\"\"\" % (field)encoderSet.append(encoderName)encoderSet.sort()swarms[''.join(encoderSet)] = {'': '','': None,'': None,'': ,}elif self._hsObj._searchType == HsSearchType.temporal:for encoderName in self._hsObj._encoderNames:swarms[encoderName] = {'': '','': None,'': None,'': ,}elif self._hsObj._searchType == HsSearchType.classification:for encoderName in self._hsObj._encoderNames:if encoderName == self._hsObj._predictedFieldEncoder:continueswarms[encoderName] = {'': '','': None,'': None,'': ,}elif self._hsObj._searchType == HsSearchType.legacyTemporal:swarms[self._hsObj._predictedFieldEncoder] = {'': '','': None,'': None,'': ,}else:raise RuntimeError(\"\" %(self._hsObj._searchType))self._state = dict(lastUpdateTime = time.time(),lastGoodSprint = None,searchOver = False,activeSwarms = list(swarms.keys()),swarms = swarms,sprints = [{'': '','': None,'': None}],blackListedEncoders = [],)self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID, '', json.dumps(self._state), None)self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID, [''])[]assert (self._priorStateJSON is not None)self._state = json.loads(self._priorStateJSON)self._dirty = False", "docstring": "Set our state to that obtained from the engWorkerState field of the\n job record.\n\n\n Parameters:\n ---------------------------------------------------------------------\n stateJSON: JSON encoded state from job record", "id": "f17586:c0:m3"} {"signature": "def writeStateToDB(self):", "body": "if not self._dirty:return Trueself._state[''] = time.time()newStateJSON = json.dumps(self._state)success = self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID,'', str(newStateJSON), str(self._priorStateJSON))if success:self.logger.debug(\"\" %(pprint.pformat(self._state, indent=)))self._priorStateJSON = newStateJSONelse:self.logger.debug(\"\" %(pprint.pformat(self._state, indent=)))self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,[''])[]self._state = json.loads(self._priorStateJSON)self.logger.info(\"\"\"\" % (pprint.pformat(self._state, indent=)))return success", "docstring": "Update the state in the job record with our local changes (if any).\n If we don't have the latest state in our priorStateJSON, then re-load\n in the latest state and return False. If we were successful writing out\n our changes, return True\n\n Parameters:\n ---------------------------------------------------------------------\n retval: True if we were successful writing out our changes\n False if our priorState is not the latest that was in the DB.\n In this case, we will re-load our state from the DB", "id": "f17586:c0:m4"} {"signature": "def getEncoderNameFromKey(self, key):", "body": "return key.split('')[-]", "docstring": "Given an encoder dictionary key, get the encoder name.\n\n Encoders are a sub-dict within model params, and in HSv2, their key\n is structured like this for example:\n 'modelParams|sensorParams|encoders|home_winloss'\n\n The encoderName is the last word in the | separated key name", "id": "f17586:c0:m5"} {"signature": "def getEncoderKeyFromName(self, name):", "body": "return '' % (name)", "docstring": "Given an encoder name, get the key.\n\n Encoders are a sub-dict within model params, and in HSv2, their key\n is structured like this for example:\n 'modelParams|sensorParams|encoders|home_winloss'\n\n The encoderName is the last word in the | separated key name", "id": "f17586:c0:m6"} {"signature": "def getFieldContributions(self):", "body": "if self._hsObj._fixedFields is not None:return dict(), dict()predictedEncoderName = self._hsObj._predictedFieldEncoderfieldScores = []for swarmId, info in self._state[''].items():encodersUsed = swarmId.split('')if len(encodersUsed) != :continuefield = self.getEncoderNameFromKey(encodersUsed[])bestScore = info['']if bestScore is None:(_modelId, bestScore) =self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)fieldScores.append((bestScore, field))if self._hsObj._searchType == HsSearchType.legacyTemporal:assert(len(fieldScores)==)(baseErrScore, baseField) = fieldScores[]for swarmId, info in self._state[''].items():encodersUsed = swarmId.split('')if len(encodersUsed) != :continuefields = [self.getEncoderNameFromKey(name) for name in encodersUsed]fields.remove(baseField)fieldScores.append((info[''], fields[]))else:fieldScores.sort(reverse=True)if self._hsObj._maxBranching > and len(fieldScores) > self._hsObj._maxBranching:baseErrScore = fieldScores[-self._hsObj._maxBranching-][]else:baseErrScore = fieldScores[][]pctFieldContributionsDict = dict()absFieldContributionsDict = dict()if baseErrScore is not None:if abs(baseErrScore) < :baseErrScore = for (errScore, field) in fieldScores:if errScore is not None:pctBetter = (baseErrScore - errScore) * / baseErrScoreelse:pctBetter = errScore = baseErrScore pctFieldContributionsDict[field] = pctBetterabsFieldContributionsDict[field] = baseErrScore - errScoreself.logger.debug(\"\" % (pctFieldContributionsDict))return pctFieldContributionsDict, absFieldContributionsDict", "docstring": "Return the field contributions statistics.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: Dictionary where the keys are the field names and the values\n are how much each field contributed to the best score.", "id": "f17586:c0:m7"} {"signature": "def getAllSwarms(self, sprintIdx):", "body": "swarmIds = []for swarmId, info in self._state[''].items():if info[''] == sprintIdx:swarmIds.append(swarmId)return swarmIds", "docstring": "Return the list of all swarms in the given sprint.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: list of active swarm Ids in the given sprint", "id": "f17586:c0:m8"} {"signature": "def getActiveSwarms(self, sprintIdx=None):", "body": "swarmIds = []for swarmId, info in self._state[''].items():if sprintIdx is not None and info[''] != sprintIdx:continueif info[''] == '':swarmIds.append(swarmId)return swarmIds", "docstring": "Return the list of active swarms in the given sprint. These are swarms\n which still need new particles created in them.\n\n Parameters:\n ---------------------------------------------------------------------\n sprintIdx: which sprint to query. If None, get active swarms from all\n sprints\n retval: list of active swarm Ids in the given sprint", "id": "f17586:c0:m9"} {"signature": "def getNonKilledSwarms(self, sprintIdx):", "body": "swarmIds = []for swarmId, info in self._state[''].items():if info[''] == sprintIdx and info[''] != '':swarmIds.append(swarmId)return swarmIds", "docstring": "Return the list of swarms in the given sprint that were not killed.\n This is called when we are trying to figure out which encoders to carry\n forward to the next sprint. We don't want to carry forward encoder\n combintations which were obviously bad (in killed swarms).\n\n Parameters:\n ---------------------------------------------------------------------\n retval: list of active swarm Ids in the given sprint", "id": "f17586:c0:m10"} {"signature": "def getCompletedSwarms(self):", "body": "swarmIds = []for swarmId, info in self._state[''].items():if info[''] == '':swarmIds.append(swarmId)return swarmIds", "docstring": "Return the list of all completed swarms.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: list of active swarm Ids", "id": "f17586:c0:m11"} {"signature": "def getCompletingSwarms(self):", "body": "swarmIds = []for swarmId, info in self._state[''].items():if info[''] == '':swarmIds.append(swarmId)return swarmIds", "docstring": "Return the list of all completing swarms.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: list of active swarm Ids", "id": "f17586:c0:m12"} {"signature": "def bestModelInCompletedSwarm(self, swarmId):", "body": "swarmInfo = self._state[''][swarmId]return (swarmInfo[''],swarmInfo[''])", "docstring": "Return the best model ID and it's errScore from the given swarm.\n If the swarm has not completed yet, the bestModelID will be None.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: (modelId, errScore)", "id": "f17586:c0:m13"} {"signature": "def bestModelInCompletedSprint(self, sprintIdx):", "body": "sprintInfo = self._state[''][sprintIdx]return (sprintInfo[''],sprintInfo[''])", "docstring": "Return the best model ID and it's errScore from the given sprint.\n If the sprint has not completed yet, the bestModelID will be None.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: (modelId, errScore)", "id": "f17586:c0:m14"} {"signature": "def bestModelInSprint(self, sprintIdx):", "body": "swarms = self.getAllSwarms(sprintIdx)bestModelId = NonebestErrScore = numpy.inffor swarmId in swarms:(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)if errScore < bestErrScore:bestModelId = modelIdbestErrScore = errScorereturn (bestModelId, bestErrScore)", "docstring": "Return the best model ID and it's errScore from the given sprint,\n which may still be in progress. This returns the best score from all models\n in the sprint which have matured so far.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: (modelId, errScore)", "id": "f17586:c0:m15"} {"signature": "def setSwarmState(self, swarmId, newStatus):", "body": "assert (newStatus in ['', '', '', ''])swarmInfo = self._state[''][swarmId]if swarmInfo[''] == newStatus:returnif swarmInfo[''] == '' and newStatus == '':returnself._dirty = TrueswarmInfo[''] = newStatusif newStatus == '':(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)swarmInfo[''] = modelIdswarmInfo[''] = errScoreif newStatus != '' and swarmId in self._state['']:self._state[''].remove(swarmId)if newStatus=='':self._hsObj.killSwarmParticles(swarmId)sprintIdx = swarmInfo['']self.isSprintActive(sprintIdx)sprintInfo = self._state[''][sprintIdx]statusCounts = dict(active=, completing=, completed=, killed=)bestModelIds = []bestErrScores = []for info in self._state[''].values():if info[''] != sprintIdx:continuestatusCounts[info['']] += if info[''] == '':bestModelIds.append(info[''])bestErrScores.append(info[''])if statusCounts[''] > :sprintStatus = ''elif statusCounts[''] > :sprintStatus = ''else:sprintStatus = ''sprintInfo[''] = sprintStatusif sprintStatus == '':if len(bestErrScores) > :whichIdx = numpy.array(bestErrScores).argmin()sprintInfo[''] = bestModelIds[whichIdx]sprintInfo[''] = bestErrScores[whichIdx]else:sprintInfo[''] = sprintInfo[''] = numpy.infbestPrior = numpy.inffor idx in range(sprintIdx):if self._state[''][idx][''] == '':(_, errScore) = self.bestModelInCompletedSprint(idx)if errScore is None:errScore = numpy.infelse:errScore = numpy.infif errScore < bestPrior:bestPrior = errScoreif sprintInfo[''] >= bestPrior:self._state[''] = sprintIdx-if self._state[''] is not Noneand not self.anyGoodSprintsActive():self._state[''] = True", "docstring": "Change the given swarm's state to 'newState'. If 'newState' is\n 'completed', then bestModelId and bestErrScore must be provided.\n\n Parameters:\n ---------------------------------------------------------------------\n swarmId: swarm Id\n newStatus: new status, either 'active', 'completing', 'completed', or\n 'killed'", "id": "f17586:c0:m16"} {"signature": "def anyGoodSprintsActive(self):", "body": "if self._state[''] is not None:goodSprints = self._state[''][:self._state['']+]else:goodSprints = self._state['']for sprint in goodSprints:if sprint[''] == '':anyActiveSprints = Truebreakelse:anyActiveSprints = Falsereturn anyActiveSprints", "docstring": "Return True if there are any more good sprints still being explored.\n A 'good' sprint is one that is earlier than where we detected an increase\n in error from sprint to subsequent sprint.", "id": "f17586:c0:m17"} {"signature": "def isSprintCompleted(self, sprintIdx):", "body": "numExistingSprints = len(self._state[''])if sprintIdx >= numExistingSprints:return Falsereturn (self._state[''][sprintIdx][''] == '')", "docstring": "Return True if the given sprint has completed.", "id": "f17586:c0:m18"} {"signature": "def killUselessSwarms(self):", "body": "numExistingSprints = len(self._state[''])if self._hsObj._searchType == HsSearchType.legacyTemporal:if numExistingSprints <= :returnelse:if numExistingSprints <= :returncompletedSwarms = self.getCompletedSwarms()completedSwarms = [(swarm, self._state[\"\"][swarm],self._state[\"\"][swarm][\"\"])for swarm in completedSwarms]completedMatrix = [[] for i in range(numExistingSprints)]for swarm in completedSwarms:completedMatrix[swarm[][\"\"]].append(swarm)for sprint in completedMatrix:sprint.sort(key=itemgetter())activeSwarms = self.getActiveSwarms()activeSwarms.extend(self.getCompletingSwarms())activeSwarms = [(swarm, self._state[\"\"][swarm],self._state[\"\"][swarm][\"\"])for swarm in activeSwarms]activeMatrix = [[] for i in range(numExistingSprints)]for swarm in activeSwarms:activeMatrix[swarm[][\"\"]].append(swarm)for sprint in activeMatrix:sprint.sort(key=itemgetter())toKill = []for i in range(, numExistingSprints):for swarm in activeMatrix[i]:curSwarmEncoders = swarm[].split(\"\")if(len(activeMatrix[i-])==):if i== and (self._hsObj._tryAll3FieldCombinations orself._hsObj._tryAll3FieldCombinationsWTimestamps):passelse:bestInPrevious = completedMatrix[i-][]bestEncoders = bestInPrevious[].split('')for encoder in bestEncoders:if not encoder in curSwarmEncoders:toKill.append(swarm)if len(toKill) > :print(\"\" + str(toKill))for swarm in toKill:self.setSwarmState(swarm[], \"\")return", "docstring": "See if we can kill off some speculative swarms. If an earlier sprint\n has finally completed, we can now tell which fields should *really* be present\n in the sprints we've already started due to speculation, and kill off the\n swarms that should not have been included.", "id": "f17586:c0:m19"} {"signature": "def isSprintActive(self, sprintIdx):", "body": "while True:numExistingSprints = len(self._state[''])if sprintIdx <= numExistingSprints-:if not self._hsObj._speculativeParticles:active = (self._state[''][sprintIdx][''] == '')return (active, False)else:active = (self._state[''][sprintIdx][''] == '')if not active:return (active, False)activeSwarmIds = self.getActiveSwarms(sprintIdx)swarmSizes = [self._hsObj._resultsDB.getParticleInfos(swarmId,matured=False)[] for swarmId in activeSwarmIds]notFullSwarms = [len(swarm) for swarm in swarmSizesif len(swarm) < self._hsObj._minParticlesPerSwarm]if len(notFullSwarms) > :return (True, False)if self._state[''] is not None:return (False, True)if self._hsObj._fixedFields is not None:return (False, True)if sprintIdx > and self._state[''][sprintIdx-][''] == '':(bestModelId, _) = self.bestModelInCompletedSprint(sprintIdx-)(particleState, _, _, _, _) = self._hsObj._resultsDB.getParticleInfo(bestModelId)bestSwarmId = particleState['']baseEncoderSets = [bestSwarmId.split('')]else:bestSwarmId = NoneparticleState = NonebaseEncoderSets = []for swarmId in self.getNonKilledSwarms(sprintIdx-):baseEncoderSets.append(swarmId.split(''))encoderAddSet = []limitFields = Falseif self._hsObj._maxBranching > or self._hsObj._minFieldContribution >= :if self._hsObj._searchType == HsSearchType.temporal orself._hsObj._searchType == HsSearchType.classification:if sprintIdx >= :limitFields = TruebaseSprintIdx = elif self._hsObj._searchType == HsSearchType.legacyTemporal:if sprintIdx >= :limitFields = TruebaseSprintIdx = else:raise RuntimeError(\"\" %(self._hsObj._searchType))if limitFields:pctFieldContributions, absFieldContributions =self.getFieldContributions()toRemove = []self.logger.debug(\"\" %(self._hsObj._minFieldContribution))for fieldname in pctFieldContributions:if pctFieldContributions[fieldname] < self._hsObj._minFieldContribution:self.logger.debug(\"\" % (fieldname))toRemove.append(self.getEncoderKeyFromName(fieldname))else:self.logger.debug(\"\" % (fieldname))swarms = self._state[\"\"]sprintSwarms = [(swarm, swarms[swarm][\"\"])for swarm in swarms if swarms[swarm][\"\"] == baseSprintIdx]sprintSwarms = sorted(sprintSwarms, key=itemgetter())if self._hsObj._maxBranching > :sprintSwarms = sprintSwarms[:self._hsObj._maxBranching]for swarm in sprintSwarms:swarmEncoders = swarm[].split(\"\")for encoder in swarmEncoders:if not encoder in encoderAddSet:encoderAddSet.append(encoder)encoderAddSet = [encoder for encoder in encoderAddSetif not str(encoder) in toRemove]else:encoderAddSet = self._hsObj._encoderNamesnewSwarmIds = set()if (self._hsObj._searchType == HsSearchType.temporalor self._hsObj._searchType == HsSearchType.legacyTemporal)and sprintIdx == and (self._hsObj._tryAll3FieldCombinations orself._hsObj._tryAll3FieldCombinationsWTimestamps):if self._hsObj._tryAll3FieldCombinations:newEncoders = set(self._hsObj._encoderNames)if self._hsObj._predictedFieldEncoder in newEncoders:newEncoders.remove(self._hsObj._predictedFieldEncoder)else:newEncoders = set(encoderAddSet)if self._hsObj._predictedFieldEncoder in newEncoders:newEncoders.remove(self._hsObj._predictedFieldEncoder)for encoder in self._hsObj._encoderNames:if encoder.endswith('') or encoder.endswith('')or encoder.endswith(''):newEncoders.add(encoder)allCombos = list(itertools.combinations(newEncoders, ))for combo in allCombos:newSet = list(combo)newSet.append(self._hsObj._predictedFieldEncoder)newSet.sort()newSwarmId = ''.join(newSet)if newSwarmId not in self._state['']:newSwarmIds.add(newSwarmId)if (len(self.getActiveSwarms(sprintIdx-)) > ):breakelse:for baseEncoderSet in baseEncoderSets:for encoder in encoderAddSet:if encoder not in self._state['']and encoder not in baseEncoderSet:newSet = list(baseEncoderSet)newSet.append(encoder)newSet.sort()newSwarmId = ''.join(newSet)if newSwarmId not in self._state['']:newSwarmIds.add(newSwarmId)if (len(self.getActiveSwarms(sprintIdx-)) > ):breaknewSwarmIds = sorted(newSwarmIds)if len(newSwarmIds) == :if len(self.getAllSwarms(sprintIdx)) > :return (True, False)else:return (False, True)self._dirty = Trueif len(self._state[\"\"]) == sprintIdx:self._state[''].append({'': '','': None,'': None})for swarmId in newSwarmIds:self._state[''][swarmId] = {'': '','': None,'': None,'': sprintIdx}self._state[''] = self.getActiveSwarms()success = self.writeStateToDB()if success:return (True, False)", "docstring": "If the given sprint exists and is active, return active=True.\n\n If the sprint does not exist yet, this call will create it (and return\n active=True). If it already exists, but is completing or complete, return\n active=False.\n\n If sprintIdx is past the end of the possible sprints, return\n active=False, noMoreSprints=True\n\n IMPORTANT: When speculative particles are enabled, this call has some\n special processing to handle speculative sprints:\n\n * When creating a new speculative sprint (creating sprint N before\n sprint N-1 has completed), it initially only puts in only ONE swarm into\n the sprint.\n\n * Every time it is asked if sprint N is active, it also checks to see if\n it is time to add another swarm to the sprint, and adds a new swarm if\n appropriate before returning active=True\n\n * We decide it is time to add a new swarm to a speculative sprint when ALL\n of the currently active swarms in the sprint have all the workers they\n need (number of running (not mature) particles is _minParticlesPerSwarm).\n This means that we have capacity to run additional particles in a new\n swarm.\n\n It is expected that the sprints will be checked IN ORDER from 0 on up. (It\n is an error not to) The caller should always try to allocate from the first\n active sprint it finds. If it can't, then it can call this again to\n find/create the next active sprint.\n\n Parameters:\n ---------------------------------------------------------------------\n retval: (active, noMoreSprints)\n active: True if the given sprint is active\n noMoreSprints: True if there are no more sprints possible", "id": "f17586:c0:m20"} {"signature": "def debug(self, msg, *args, **kwargs):", "body": "self._baseLogger.debug(self, self.getExtendedMsg(msg), *args, **kwargs)", "docstring": "Log 'msg % args' with severity 'DEBUG'.\n\nTo pass exception information, use the keyword argument exc_info with\na true value, e.g.\n\nlogger.debug(\"Houston, we have a %s\", \"thorny problem\", exc_info=1)", "id": "f17587:c0:m3"} {"signature": "def info(self, msg, *args, **kwargs):", "body": "self._baseLogger.info(self, self.getExtendedMsg(msg), *args, **kwargs)", "docstring": "Log 'msg % args' with severity 'INFO'.\n\nTo pass exception information, use the keyword argument exc_info with\na true value, e.g.\n\nlogger.info(\"Houston, we have a %s\", \"interesting problem\", exc_info=1)", "id": "f17587:c0:m4"} {"signature": "def warning(self, msg, *args, **kwargs):", "body": "self._baseLogger.warning(self, self.getExtendedMsg(msg), *args, **kwargs)", "docstring": "Log 'msg % args' with severity 'WARNING'.\n\nTo pass exception information, use the keyword argument exc_info with\na true value, e.g.\n\nlogger.warning(\"Houston, we have a %s\", \"bit of a problem\", exc_info=1)", "id": "f17587:c0:m5"} {"signature": "def error(self, msg, *args, **kwargs):", "body": "self._baseLogger.error(self, self.getExtendedMsg(msg), *args, **kwargs)", "docstring": "Log 'msg % args' with severity 'ERROR'.\n\nTo pass exception information, use the keyword argument exc_info with\na true value, e.g.\n\nlogger.error(\"Houston, we have a %s\", \"major problem\", exc_info=1)", "id": "f17587:c0:m6"} {"signature": "def critical(self, msg, *args, **kwargs):", "body": "self._baseLogger.critical(self, self.getExtendedMsg(msg), *args, **kwargs)", "docstring": "Log 'msg % args' with severity 'CRITICAL'.\n\nTo pass exception information, use the keyword argument exc_info with\na true value, e.g.\n\nlogger.critical(\"Houston, we have a %s\", \"major disaster\", exc_info=1)", "id": "f17587:c0:m7"} {"signature": "def log(self, level, msg, *args, **kwargs):", "body": "self._baseLogger.log(self, level, self.getExtendedMsg(msg), *args,**kwargs)", "docstring": "Log 'msg % args' with the integer severity 'level'.\n\nTo pass exception information, use the keyword argument exc_info with\na true value, e.g.\n\nlogger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)", "id": "f17587:c0:m8"} {"signature": "def getTerminationCallbacks(self, terminationFunc):", "body": "activities = [None] * len(ModelTerminator._MILESTONES)for index, (iteration, _) in enumerate(ModelTerminator._MILESTONES):cb = functools.partial(terminationFunc, index=index)activities[index] = PeriodicActivityRequest(repeating =False,period = iteration,cb=cb)", "docstring": "Returns the periodic checks to see if the model should\n continue running.\n\n Parameters:\n -----------------------------------------------------------------------\n terminationFunc: The function that will be called in the model main loop\n as a wrapper around this function. Must have a parameter\n called 'index'\n\n Returns: A list of PeriodicActivityRequest objects.", "id": "f17589:c0:m1"} {"signature": "def recordDataPoint(self, swarmId, generation, errScore):", "body": "terminatedSwarms = []if swarmId in self.swarmScores:entry = self.swarmScores[swarmId]assert(len(entry) == generation)entry.append(errScore)entry = self.swarmBests[swarmId]entry.append(min(errScore, entry[-]))assert(len(self.swarmBests[swarmId]) == len(self.swarmScores[swarmId]))else:assert (generation == )self.swarmScores[swarmId] = [errScore]self.swarmBests[swarmId] = [errScore]if generation + < self.MATURITY_WINDOW:return terminatedSwarmsif self.MAX_GENERATIONS is not None and generation > self.MAX_GENERATIONS:self._logger.info('' %(swarmId, self.MAX_GENERATIONS))terminatedSwarms.append(swarmId)if self._isTerminationEnabled:terminatedSwarms.extend(self._getTerminatedSwarms(generation))cumulativeBestScores = self.swarmBests[swarmId]if cumulativeBestScores[-] == cumulativeBestScores[-self.MATURITY_WINDOW]:self._logger.info(''''% (swarmId, self.MATURITY_WINDOW))terminatedSwarms.append(swarmId)self.terminatedSwarms = self.terminatedSwarms.union(terminatedSwarms)return terminatedSwarms", "docstring": "Record the best score for a swarm's generation index (x)\n Returns list of swarmIds to terminate.", "id": "f17590:c0:m1"} {"signature": "def Enum(*args, **kwargs):", "body": "def getLabel(cls, val):\"\"\"\"\"\"return cls.__labels[val]def validate(cls, val):\"\"\"\"\"\"return val in cls.__valuesdef getValues(cls):\"\"\"\"\"\"return list(cls.__values)def getLabels(cls):\"\"\"\"\"\"return list(cls.__labels.values())def getValue(cls, label):\"\"\"\"\"\"return cls.__labels[label]for arg in list(args)+list(kwargs.keys()):if type(arg) is not str:raise TypeError(\"\".format(arg))if not __isidentifier(arg):raise ValueError(\"\"\"\".format(arg))kwargs.update(list(zip(args, args)))newType = type(\"\", (object,), kwargs)newType.__labels = dict( (v,k) for k,v in kwargs.items())newType.__values = set(newType.__labels.keys())newType.getLabel = functools.partial(getLabel, newType)newType.validate = functools.partial(validate, newType)newType.getValues = functools.partial(getValues, newType)newType.getLabels = functools.partial(getLabels, newType)newType.getValue = functools.partial(getValue, newType)return newType", "docstring": "Utility function for creating enumerations in python\n\nExample Usage:\n >> Color = Enum(\"Red\", \"Green\", \"Blue\", \"Magenta\")\n >> print Color.Red\n >> 0\n >> print Color.Green\n >> 1\n >> print Color.Blue\n >> 2\n >> print Color.Magenta\n >> 3\n >> Color.Violet\n >> 'violet'\n >> Color.getLabel(Color.Red)\n >> 'Red'\n >> Color.getLabel(2)\n >> 'Blue'", "id": "f17591:m1"} {"signature": "def makeDirectoryFromAbsolutePath(absDirPath):", "body": "assert os.path.isabs(absDirPath)try:os.makedirs(absDirPath)except OSError as e:if e.errno != os.errno.EEXIST:raisereturn absDirPath", "docstring": "Makes directory for the given directory path with default permissions.\n If the directory already exists, it is treated as success.\n\n absDirPath: absolute path of the directory to create.\n\n Returns: absDirPath arg\n\n Exceptions: OSError if directory creation fails", "id": "f17591:m2"} {"signature": "@classmethoddef getString(cls, prop):", "body": "if cls._properties is None:cls._readStdConfigFiles()envValue = os.environ.get(\"\" % (cls.envPropPrefix,prop.replace('', '')), None)if envValue is not None:return envValuereturn cls._properties[prop]", "docstring": "Retrieve the requested property as a string. If property does not exist,\n then KeyError will be raised.\n\n Parameters:\n ----------------------------------------------------------------\n prop: name of the property\n retval: property value as a string", "id": "f17591:c0:m0"} {"signature": "@classmethoddef getBool(cls, prop):", "body": "value = cls.getInt(prop)if value not in (, ):raise ValueError(\"\" % (value, prop))return bool(value)", "docstring": "Retrieve the requested property and return it as a bool. If property\n does not exist, then KeyError will be raised. If the property value is\n neither 0 nor 1, then ValueError will be raised\n\n Parameters:\n ----------------------------------------------------------------\n prop: name of the property\n retval: property value as bool", "id": "f17591:c0:m1"} {"signature": "@classmethoddef getInt(cls, prop):", "body": "return int(cls.getString(prop))", "docstring": "Retrieve the requested property and return it as an int. If property\n does not exist, then KeyError will be raised.\n\n Parameters:\n ----------------------------------------------------------------\n prop: name of the property\n retval: property value as int", "id": "f17591:c0:m2"} {"signature": "@classmethoddef getFloat(cls, prop):", "body": "return float(cls.getString(prop))", "docstring": "Retrieve the requested property and return it as a float. If property\n does not exist, then KeyError will be raised.\n\n Parameters:\n ----------------------------------------------------------------\n prop: name of the property\n retval: property value as float", "id": "f17591:c0:m3"} {"signature": "@classmethoddef get(cls, prop, default=None):", "body": "try:return cls.getString(prop)except KeyError:return default", "docstring": "Get the value of the given configuration property as string. This\n returns a string which is the property value, or the value of \"default\" arg\n if the property is not found. Use Configuration.getString() instead.\n\n NOTE: it's atypical for our configuration properties to be missing - a\n missing configuration property is usually a very serious error. Because\n of this, it's preferable to use one of the getString, getInt, getFloat,\n etc. variants instead of get(). Those variants will raise KeyError when\n an expected property is missing.\n\n Parameters:\n ----------------------------------------------------------------\n prop: name of the property\n default: default value to return if property does not exist\n retval: property value (as a string), or default if the property does\n not exist.", "id": "f17591:c0:m4"} {"signature": "@classmethoddef set(cls, prop, value):", "body": "if cls._properties is None:cls._readStdConfigFiles()cls._properties[prop] = str(value)", "docstring": "Set the value of the given configuration property.\n\n Parameters:\n ----------------------------------------------------------------\n prop: name of the property\n value: value to set", "id": "f17591:c0:m5"} {"signature": "@classmethoddef dict(cls):", "body": "if cls._properties is None:cls._readStdConfigFiles()result = dict(cls._properties)keys = list(os.environ.keys())replaceKeys = [x for x in keys if x.startswith(cls.envPropPrefix)]for envKey in replaceKeys:key = envKey[len(cls.envPropPrefix):]key = key.replace('', '')result[key] = os.environ[envKey]return result", "docstring": "Return a dict containing all of the configuration properties\n\n Parameters:\n ----------------------------------------------------------------\n retval: dict containing all configuration properties.", "id": "f17591:c0:m6"} {"signature": "@classmethoddef readConfigFile(cls, filename, path=None):", "body": "properties = cls._readConfigFile(filename, path)if cls._properties is None:cls._properties = dict()for name in properties:if '' in properties[name]:cls._properties[name] = properties[name]['']", "docstring": "Parse the given XML file and store all properties it describes.\n\n Parameters:\n ----------------------------------------------------------------\n filename: name of XML file to parse (no path)\n path: path of the XML file. If None, then use the standard\n configuration search path.", "id": "f17591:c0:m7"} {"signature": "@classmethoddef _readConfigFile(cls, filename, path=None):", "body": "outputProperties = dict()if path is None:filePath = cls.findConfigFile(filename)else:filePath = os.path.join(path, filename)try:if filePath is not None:try:_getLoggerBase().debug(\"\", filePath)with open(filePath, '') as inp:contents = inp.read()except Exception:raise RuntimeError(\"\" % filePath)else:try:contents = resource_string(\"\", filename)except Exception as resourceException:if filename in [USER_CONFIG, CUSTOM_CONFIG]:contents = ''else:raise resourceExceptionelements = ElementTree.XML(contents)if elements.tag != '':raise RuntimeError(\"\"\"\" % (elements.tag))propertyElements = elements.findall('')for propertyItem in propertyElements:propInfo = dict()propertyAttributes = list(propertyItem)for propertyAttribute in propertyAttributes:propInfo[propertyAttribute.tag] = propertyAttribute.textname = propInfo.get('', None)if '' in propInfo and propInfo[''] is None:value = ''else:value = propInfo.get('', None)if value is None:if '' in propInfo:continueelse:raise RuntimeError(\"\"\"\" % (str(propInfo)))restOfValue = valuevalue = ''while True:pos = restOfValue.find('')if pos == -:value += restOfValuebreakvalue += restOfValue[:pos]varTailPos = restOfValue.find('', pos)if varTailPos == -:raise RuntimeError(\"\"\"\" % (restOfValue))varname = restOfValue[pos + :varTailPos]if varname not in os.environ:raise RuntimeError(\"\"\"\" % (varname))envVarValue = os.environ[varname]value += envVarValuerestOfValue = restOfValue[varTailPos + :]if name is None:raise RuntimeError(\"\"\"\" % (str(propInfo)))propInfo[''] = valueoutputProperties[name] = propInforeturn outputPropertiesexcept Exception:_getLoggerBase().exception(\"\",filePath)raise", "docstring": "Parse the given XML file and return a dict describing the file.\n\n Parameters:\n ----------------------------------------------------------------\n filename: name of XML file to parse (no path)\n path: path of the XML file. If None, then use the standard\n configuration search path.\n retval: returns a dict with each property as a key and a dict of all\n the property's attributes as value", "id": "f17591:c0:m8"} {"signature": "@classmethoddef clear(cls):", "body": "cls._properties = Nonecls._configPaths = None", "docstring": "Clear out the entire configuration.", "id": "f17591:c0:m9"} {"signature": "@classmethoddef findConfigFile(cls, filename):", "body": "paths = cls.getConfigPaths()for p in paths:testPath = os.path.join(p, filename)if os.path.isfile(testPath):return os.path.join(p, filename)", "docstring": "Search the configuration path (specified via the NTA_CONF_PATH\n environment variable) for the given filename. If found, return the complete\n path to the file.\n\n Parameters:\n ----------------------------------------------------------------\n filename: name of file to locate", "id": "f17591:c0:m10"} {"signature": "@classmethoddef getConfigPaths(cls):", "body": "configPaths = []if cls._configPaths is not None:return cls._configPathselse:if '' in os.environ:configVar = os.environ['']configPaths = configVar.split(os.pathsep)return configPaths", "docstring": "Return the list of paths to search for configuration files.\n\n Parameters:\n ----------------------------------------------------------------\n retval: list of paths.", "id": "f17591:c0:m11"} {"signature": "@classmethoddef setConfigPaths(cls, paths):", "body": "cls._configPaths = list(paths)", "docstring": "Modify the paths we use to search for configuration files.\n\n Parameters:\n ----------------------------------------------------------------\n paths: list of paths to search for config files.", "id": "f17591:c0:m12"} {"signature": "@classmethoddef _readStdConfigFiles(cls):", "body": "cls.readConfigFile(DEFAULT_CONFIG)cls.readConfigFile(USER_CONFIG)", "docstring": "Read in all standard configuration files", "id": "f17591:c0:m13"} {"signature": "@classmethoddef getCustomDict(cls):", "body": "return _CustomConfigurationFileWrapper.getCustomDict()", "docstring": "Return a dict containing all custom configuration properties\n\n Parameters:\n ----------------------------------------------------------------\n retval: dict containing all custom configuration properties.", "id": "f17591:c1:m0"} {"signature": "@classmethoddef setCustomProperty(cls, propertyName, value):", "body": "cls.setCustomProperties({propertyName: value})", "docstring": "Set a single custom setting and persist it to the custom\n configuration store.\n\n Parameters:\n ----------------------------------------------------------------\n propertyName: string containing the name of the property to get\n value: value to set the property to", "id": "f17591:c1:m1"} {"signature": "@classmethoddef setCustomProperties(cls, properties):", "body": "_getLogger().info(\"\",properties, traceback.format_stack())_CustomConfigurationFileWrapper.edit(properties)for propertyName, value in properties.items():cls.set(propertyName, value)", "docstring": "Set multiple custom properties and persist them to the custom\n configuration store.\n\n Parameters:\n ----------------------------------------------------------------\n properties: a dict of property name/value pairs to set", "id": "f17591:c1:m2"} {"signature": "@classmethoddef clear(cls):", "body": "super(Configuration, cls).clear()_CustomConfigurationFileWrapper.clear(persistent=False)", "docstring": "Clear all configuration properties from in-memory cache, but do NOT\n alter the custom configuration file. Used in unit-testing.", "id": "f17591:c1:m3"} {"signature": "@classmethoddef resetCustomConfig(cls):", "body": "_getLogger().info(\"\"\"\", traceback.format_stack())super(Configuration, cls).clear()_CustomConfigurationFileWrapper.clear(persistent=True)", "docstring": "Clear all custom configuration settings and delete the persistent\n custom configuration store.", "id": "f17591:c1:m4"} {"signature": "@classmethoddef loadCustomConfig(cls):", "body": "cls.readConfigFile(_CustomConfigurationFileWrapper.customFileName)", "docstring": "Loads custom configuration settings from their persistent storage.\n DO NOT CALL THIS: It's typically not necessary to call this method\n directly - see NOTE below.\n\n NOTE: this method exists *solely* for the benefit of prepare_conf.py, which\n needs to load configuration files selectively.", "id": "f17591:c1:m5"} {"signature": "@classmethoddef _readStdConfigFiles(cls):", "body": "super(Configuration, cls)._readStdConfigFiles()cls.loadCustomConfig()", "docstring": "Intercept the _readStdConfigFiles call from our base config class to\n read in base and custom configuration settings.", "id": "f17591:c1:m6"} {"signature": "@classmethoddef clear(cls, persistent=False):", "body": "if persistent:try:os.unlink(cls.getPath())except OSError as e:if e.errno != errno.ENOENT:_getLogger().exception(\"\"\"\", e.errno,cls.getPath())raisecls._path = None", "docstring": "If persistent is True, delete the temporary file\n\n Parameters:\n ----------------------------------------------------------------\n persistent: if True, custom configuration file is deleted", "id": "f17591:c2:m0"} {"signature": "@classmethoddef getCustomDict(cls):", "body": "if not os.path.exists(cls.getPath()):return dict()properties = Configuration._readConfigFile(os.path.basename(cls.getPath()), os.path.dirname(cls.getPath()))values = dict()for propName in properties:if '' in properties[propName]:values[propName] = properties[propName]['']return values", "docstring": "Returns a dict of all temporary values in custom configuration file", "id": "f17591:c2:m1"} {"signature": "@classmethoddef edit(cls, properties):", "body": "copyOfProperties = copy(properties)configFilePath = cls.getPath()try:with open(configFilePath, '') as fp:contents = fp.read()except IOError as e:if e.errno != errno.ENOENT:_getLogger().exception(\"\"\"\",e.errno, configFilePath, properties)raisecontents = ''try:elements = ElementTree.XML(contents)ElementTree.tostring(elements)except Exception as e:msg = \"\"\"\" %(configFilePath, contents, type(e), e)_getLogger().exception(msg)raise RuntimeError(msg).with_traceback(sys.exc_info()[])if elements.tag != '':e = \"\" %(elements.tag)_getLogger().error(e)raise RuntimeError(e)for propertyItem in elements.findall(''):propInfo = dict((attr.tag, attr.text) for attr in propertyItem)name = propInfo['']if name in copyOfProperties:foundValues = propertyItem.findall('')if len(foundValues) > :foundValues[].text = str(copyOfProperties.pop(name))if not copyOfProperties:breakelse:e = \"\" % (name,)_getLogger().error(e)raise RuntimeError(e)for propertyName, value in copyOfProperties.items():newProp = ElementTree.Element('')nameTag = ElementTree.Element('')nameTag.text = propertyNamenewProp.append(nameTag)valueTag = ElementTree.Element('')valueTag.text = str(value)newProp.append(valueTag)elements.append(newProp)try:makeDirectoryFromAbsolutePath(os.path.dirname(configFilePath))with open(configFilePath, '') as fp:fp.write(ElementTree.tostring(elements))except Exception as e:_getLogger().exception(\"\"\"\", properties,configFilePath)raise", "docstring": "Edits the XML configuration file with the parameters specified by\n properties\n\n Parameters:\n ----------------------------------------------------------------\n properties: dict of settings to be applied to the custom configuration store\n (key is property name, value is value)", "id": "f17591:c2:m2"} {"signature": "@classmethoddef _setPath(cls):", "body": "cls._path = os.path.join(os.environ[''],cls.customFileName)", "docstring": "Sets the path of the custom configuration file", "id": "f17591:c2:m3"} {"signature": "@classmethoddef getPath(cls):", "body": "if cls._path is None:cls._setPath()return cls._path", "docstring": "Get the path of the custom configuration file", "id": "f17591:c2:m4"} {"signature": "def __init__(self,modelID,jobID,params,predictedField,reportKeyPatterns,optimizeKeyPattern,jobsDAO,modelCheckpointGUID,logLevel=None,predictionCacheMaxRecords=None):", "body": "super(OPFDummyModelRunner, self).__init__(modelID=modelID,jobID=jobID,predictedField=predictedField,experimentDir=None,reportKeyPatterns=reportKeyPatterns,optimizeKeyPattern=optimizeKeyPattern,jobsDAO=jobsDAO,modelCheckpointGUID=modelCheckpointGUID,logLevel=logLevel,predictionCacheMaxRecords=None)self._predictionCacheMaxRecords = predictionCacheMaxRecordsself._streamDef = copy.deepcopy(self._DUMMY_STREAMDEF)self._params = copy.deepcopy(self._DEFAULT_PARAMS)if '' in paramsand '' in params['']:self.modelIndex=params['']['']else:self.modelIndex = OPFDummyModelRunner.modelIndexOPFDummyModelRunner.modelIndex += self._loadDummyModelParameters(params)self._logger.debug(\"\", self._params)self._busyWaitTime = self._params['']self._iterations = self._params['']self._doFinalize = self._params['']self._delay = self._params['']self._sleepModelRange = self._params['']self._makeCheckpoint = self._params['']self._finalDelay = self._params['']self._exitAfter = self._params['']self.randomizeWait = self._params['']if self._busyWaitTime is not None:self.__computeWaitTime()if self._params[''] is not Noneand self._params[''] is not None:raise RuntimeError(\"\"\"\")self.metrics = Noneself.metricValue = Noneif self._params[''] is not None:self.metrics = eval(self._params[''])elif self._params[''] is not None:self.metricValue = float(self._params[''])else:self.metrics = OPFDummyModelRunner.metrics[]if self._params[''] is not None:self._model = self.__createModel(self._params[''])self.__fieldInfo = self._model.getFieldInfo()self._sysExitModelRange = self._params['']if self._sysExitModelRange is not None:self._sysExitModelRange = [int(x) for x in self._sysExitModelRange.split('')]self._delayModelRange = self._params['']if self._delayModelRange is not None:self._delayModelRange = [int(x) for x in self._delayModelRange.split('')]self._errModelRange = self._params['']if self._errModelRange is not None:self._errModelRange = [int(x) for x in self._errModelRange.split('')]self._computModelDelay()self._jobFailErr = self._params['']self._logger.debug(\"\", self._modelID, self._params)", "docstring": "Parameters:\n-------------------------------------------------------------------------\nmodelID: ID of this model in the models table\n\njobID:\n\nparams: a dictionary of parameters for this dummy model. The\n possible keys are:\n\n delay: OPTIONAL-This specifies the amount of time\n (in seconds) that the experiment should wait\n before STARTING to process records. This is\n useful for simulating workers that start/end\n at different times\n\n finalDelay: OPTIONAL-This specifies the amount of time\n (in seconds) that the experiment should wait\n before it conducts its finalization operations.\n These operations include checking if the model\n is the best model, and writing out checkpoints.\n\n waitTime: OPTIONAL-The amount of time (in seconds)\n to wait in a busy loop to simulate\n computation time on EACH ITERATION\n\n randomizeWait: OPTIONAL-([0.0-1.0] ). Default:None\n If set to a value, the above specified\n wait time will be randomly be dithered by\n +/- % of the specfied value.\n For example, if randomizeWait=0.2, the wait\n time will be dithered by +/- 20% of its value.\n\n iterations: OPTIONAL-How many iterations to run the model\n for. -1 means run forever (default=1)\n\n metricFunctions: OPTIONAL-A list of single argument functions\n serialized as strings, which return the metric\n value given the record number.\n\n Mutually exclusive with metricValue\n\n metricValue: OPTIONAL-A single value to use for the metric\n value (used to debug hypersearch).\n\n Mutually exclusive with metricFunctions\n\n finalize: OPTIONAL-(True/False). Default:True\n When False, this will prevent the model from\n recording it's metrics and performing other\n functions that it usually performs after the\n model has finished running\n\n permutationParams: A dict containing the instances of all the\n variables being permuted over\n\n experimentDirectory: REQUIRED-An absolute path to a directory\n with a valid description.py file.\n\n NOTE: This does not actually affect the\n running of the model or the metrics\n produced. It is required to create certain\n objects (such as the output stream)\n\n makeCheckpoint: True to actually write a checkpoint out to\n disk (default: False)\n\n sysExitModelRange: A string containing two integers 'firstIdx,\n endIdx'. When present, if we are running the\n firstIdx'th model up to but not including the\n endIdx'th model, then do a sys.exit() while\n running the model. This causes the worker to\n exit, simulating an orphaned model.\n\n delayModelRange: A string containing two integers 'firstIdx,\n endIdx'. When present, if we are running the\n firstIdx'th model up to but not including the\n endIdx'th model, then do a delay of 10 sec.\n while running the model. This causes the\n worker to run slower and for some other worker\n to think the model should be orphaned.\n\n exitAfter: The number of iterations after which the model\n should perform a sys exit. This is an\n alternative way of creating an orphaned model\n that use's the dummmy model's modelIndex\n instead of the modelID\n\n errModelRange: A string containing two integers 'firstIdx,\n endIdx'. When present, if we are running the\n firstIdx'th model up to but not including the\n endIdx'th model, then raise an exception while\n running the model. This causes the model to\n fail with a CMPL_REASON_ERROR reason\n\n sleepModelRange: A string containing 3 integers 'firstIdx,\n endIdx: delay'. When present, if we are running\n the firstIdx'th model up to but not including\n the endIdx'th model, then sleep for delay\n seconds at the beginning of the run.\n\n jobFailErr: If true, model will raise a JobFailException\n which should cause the job to be marked as\n failed and immediately cancel all other workers.\n\npredictedField: Name of the input field for which this model is being\n optimized\n\nreportKeyPatterns: list of items from the results dict to include in\n the report. These can be regular expressions.\noptimizeKeyPattern: Which report item, if any, we will be optimizing for.\n This can also be a regular expression, but is an error\n if it matches more than one key from the experiment's\n results.\n\njobsDAO: Jobs data access object - the interface to the\n jobs database which has the model's table.\n\nmodelCheckpointGUID:\n A persistent, globally-unique identifier for\n constructing the model checkpoint key\n\nlogLevel: override logging level to this value, if not None\npredictionCacheMaxRecords:\n Maximum number of records for the prediction output cache.\n Pass None for the default value.", "id": "f17592:c0:m0"} {"signature": "def _loadDummyModelParameters(self, params):", "body": "for key, value in params.iteritems():if type(value) == list:index = self.modelIndex % len(params[key])self._params[key] = params[key][index]else:self._params[key] = params[key]", "docstring": "Loads all the parameters for this dummy model. For any paramters\n specified as lists, read the appropriate value for this model using the model\n index", "id": "f17592:c0:m1"} {"signature": "def _computModelDelay(self):", "body": "if self._params[''] is not Noneand self._params[''] is not None:raise RuntimeError(\"\"\"\")if self._sleepModelRange is not None:range, delay = self._sleepModelRange.split('')delay = float(delay)range = map(int, range.split(''))modelIDs = self._jobsDAO.jobGetModelIDs(self._jobID)modelIDs.sort()range[] = min(range[], len(modelIDs))if self._modelID in modelIDs[range[]:range[]]:self._delay = delayelse:self._delay = self._params['']", "docstring": "Computes the amount of time (if any) to delay the run of this model.\n This can be determined by two mutually exclusive parameters:\n delay and sleepModelRange.\n\n 'delay' specifies the number of seconds a model should be delayed. If a list\n is specified, the appropriate amount of delay is determined by using the\n model's modelIndex property.\n\n However, this doesn't work when testing orphaned models, because the\n modelIndex will be the same for every recovery attempt. Therefore, every\n recovery attempt will also be delayed and potentially orphaned.\n\n 'sleepModelRange' doesn't use the modelIndex property for a model, but rather\n sees which order the model is in the database, and uses that to determine\n whether or not a model should be delayed.", "id": "f17592:c0:m2"} {"signature": "def _getMetrics(self):", "body": "metric = Noneif self.metrics is not None:metric = self.metrics(self._currentRecordIndex+)elif self.metricValue is not None:metric = self.metricValueelse:raise RuntimeError('')return {self._optimizeKeyPattern:metric}", "docstring": "Protected function that can be overridden by subclasses. Its main purpose\n is to allow the the OPFDummyModelRunner to override this with deterministic\n values\n\n Returns: All the metrics being computed for this model", "id": "f17592:c0:m3"} {"signature": "def run(self):", "body": "self._logger.debug(\"\" % (self._modelID))periodic = self._initPeriodicActivities()self._optimizedMetricLabel = self._optimizeKeyPatternself._reportMetricLabels = [self._optimizeKeyPattern]if self._iterations >= :iterTracker = iter(xrange(self._iterations))else:iterTracker = iter(itertools.count())doSysExit = Falseif self._sysExitModelRange is not None:modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)modelIDs = [x[] for x in modelAndCounters]modelIDs.sort()(beg,end) = self._sysExitModelRangeif self._modelID in modelIDs[int(beg):int(end)]:doSysExit = Trueif self._delayModelRange is not None:modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)modelIDs = [x[] for x in modelAndCounters]modelIDs.sort()(beg,end) = self._delayModelRangeif self._modelID in modelIDs[int(beg):int(end)]:time.sleep()if self._errModelRange is not None:modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)modelIDs = [x[] for x in modelAndCounters]modelIDs.sort()(beg,end) = self._errModelRangeif self._modelID in modelIDs[int(beg):int(end)]:raise RuntimeError(\"\")if self._delay is not None:time.sleep(self._delay)self._currentRecordIndex = while True:if self._isKilled:breakif self._isCanceled:breakif self._isMature:if not self._isBestModel:self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPEDbreakelse:self._cmpReason = self._jobsDAO.CMPL_REASON_EOFtry:self._currentRecordIndex = next(iterTracker)except StopIteration:breakself._writePrediction(ModelResult(None, None, None, None))periodic.tick()if self.__shouldSysExit(self._currentRecordIndex):sys.exit()if self._busyWaitTime is not None:time.sleep(self._busyWaitTime)self.__computeWaitTime()if doSysExit:sys.exit()if self._jobFailErr:raise utils.JobFailException(\"\",\"\")if self._doFinalize:if not self._makeCheckpoint:self._model = Noneif self._finalDelay is not None:time.sleep(self._finalDelay)self._finalize()self._logger.info(\"\"% (self._modelID))return (self._cmpReason, None)", "docstring": "Runs the given OPF task against the given Model instance", "id": "f17592:c0:m4"} {"signature": "def _createPredictionLogger(self):", "body": "class DummyLogger:def writeRecord(self, record): passdef writeRecords(self, records, progressCB): passdef close(self): passself._predictionLogger = DummyLogger()", "docstring": "Creates the model's PredictionLogger object, which is an interface to write\nmodel results to a permanent storage location", "id": "f17592:c0:m7"} {"signature": "def __shouldSysExit(self, iteration):", "body": "if self._exitAfter is Noneor iteration < self._exitAfter:return Falseresults = self._jobsDAO.modelsGetFieldsForJob(self._jobID, [''])modelIDs = [e[] for e in results]modelNums = [json.loads(e[][])[''][''] for e in results]sameModelNumbers = filter(lambda x: x[] == self.modelIndex,zip(modelIDs, modelNums))firstModelID = min(zip(*sameModelNumbers)[])return firstModelID == self._modelID", "docstring": "Checks to see if the model should exit based on the exitAfter dummy\nparameter", "id": "f17592:c0:m8"} {"signature": "def __init__(self,modelID,jobID,predictedField,experimentDir,reportKeyPatterns,optimizeKeyPattern,jobsDAO,modelCheckpointGUID,logLevel=None,predictionCacheMaxRecords=None):", "body": "self._MIN_RECORDS_TO_BE_BEST = int(Configuration.get(''))self._MATURITY_MAX_CHANGE = float(Configuration.get(''))self._MATURITY_NUM_POINTS = int(Configuration.get(''))self._modelID = modelIDself._jobID = jobIDself._predictedField = predictedFieldself._experimentDir = experimentDirself._reportKeyPatterns = reportKeyPatternsself._optimizeKeyPattern = optimizeKeyPatternself._jobsDAO = jobsDAOself._modelCheckpointGUID = modelCheckpointGUIDself._predictionCacheMaxRecords = predictionCacheMaxRecordsself._isMaturityEnabled = bool(int(Configuration.get('')))self._logger = logging.getLogger(\"\".join( ['',self.__class__.__module__, self.__class__.__name__]))self._optimizedMetricLabel = Noneself._reportMetricLabels = []self._cmpReason = ClientJobsDAO.CMPL_REASON_EOFif logLevel is not None:self._logger.setLevel(logLevel)self.__metricMgr = Noneself.__task = Noneself._periodic = Noneself._streamDef = Noneself._model = Noneself._inputSource = Noneself._currentRecordIndex = Noneself._predictionLogger = Noneself.__predictionCache = deque()self._isBestModel = Falseself._isBestModelStored = Falseself._isCanceled = Falseself._isKilled = Falseself._isMature = Falseself._isInterrupted = threading.Event()self._metricRegression = regression.AveragePctChange(windowSize=self._MATURITY_NUM_POINTS)self.__loggedMetricPatterns = []", "docstring": "Parameters:\n-------------------------------------------------------------------------\nmodelID: ID for this model in the models table\n\njobID: ID for this hypersearch job in the jobs table\npredictedField: Name of the input field for which this model is being\n optimized\nexperimentDir: Directory path containing the experiment's\n description.py script\nreportKeyPatterns: list of items from the results dict to include in\n the report. These can be regular expressions.\noptimizeKeyPattern: Which report item, if any, we will be optimizing for.\n This can also be a regular expression, but is an error\n if it matches more than one key from the experiment's\n results.\njobsDAO: Jobs data access object - the interface to the\n jobs database which has the model's table.\nmodelCheckpointGUID:\n A persistent, globally-unique identifier for\n constructing the model checkpoint key. If None, then\n don't bother creating a model checkpoint.\nlogLevel: override logging level to this value, if not None\npredictionCacheMaxRecords:\n Maximum number of records for the prediction output cache.\n Pass None for default value.", "id": "f17593:c0:m0"} {"signature": "def run(self):", "body": "descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(self._experimentDir)expIface = helpers.getExperimentDescriptionInterfaceFromModule(descriptionPyModule)expIface.normalizeStreamSources()modelDescription = expIface.getModelDescription()self._modelControl = expIface.getModelControl()streamDef = self._modelControl['']from nupic.data.stream_reader import StreamReaderreadTimeout = self._inputSource = StreamReader(streamDef, isBlocking=False,maxTimeout=readTimeout)fieldStats = self._getFieldStats()self._model = ModelFactory.create(modelDescription)self._model.setFieldStatistics(fieldStats)self._model.enableLearning()self._model.enableInference(self._modelControl.get(\"\", None))self.__metricMgr = MetricsManager(self._modelControl.get('',None),self._model.getFieldInfo(),self._model.getInferenceType())self.__loggedMetricPatterns = self._modelControl.get(\"\", [])self._optimizedMetricLabel = self.__getOptimizedMetricLabel()self._reportMetricLabels = matchPatterns(self._reportKeyPatterns,self._getMetricLabels())self._periodic = self._initPeriodicActivities()numIters = self._modelControl.get('', -)learningOffAt = NoneiterationCountInferOnly = self._modelControl.get('', )if iterationCountInferOnly == -:self._model.disableLearning()elif iterationCountInferOnly > :assert numIters > iterationCountInferOnly, \"\"\"\"\"\"learningOffAt = numIters - iterationCountInferOnlyself.__runTaskMainLoop(numIters, learningOffAt=learningOffAt)self._finalize()return (self._cmpReason, None)", "docstring": "Runs the OPF Model\n\n Parameters:\n -------------------------------------------------------------------------\n retval: (completionReason, completionMsg)\n where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX\n equates.", "id": "f17593:c0:m1"} {"signature": "def __runTaskMainLoop(self, numIters, learningOffAt=None):", "body": "self._model.resetSequenceStates()self._currentRecordIndex = -while True:if self._isKilled:breakif self._isCanceled:breakif self._isInterrupted.isSet():self.__setAsOrphaned()breakif self._isMature:if not self._isBestModel:self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPEDbreakelse:self._cmpReason = self._jobsDAO.CMPL_REASON_EOFif learningOffAt is not Noneand self._currentRecordIndex == learningOffAt:self._model.disableLearning()try:inputRecord = self._inputSource.getNextRecordDict()if self._currentRecordIndex < :self._inputSource.setTimeout()except Exception as e:raise utils.JobFailException(ErrorCodes.streamReading, str(e.args),traceback.format_exc())if inputRecord is None:self._cmpReason = self._jobsDAO.CMPL_REASON_EOFbreakif inputRecord:self._currentRecordIndex += result = self._model.run(inputRecord=inputRecord)result.metrics = self.__metricMgr.update(result)if not result.metrics:result.metrics = self.__metricMgr.getMetrics()if InferenceElement.encodings in result.inferences:result.inferences.pop(InferenceElement.encodings)result.sensorInput.dataEncodings = Noneself._writePrediction(result)self._periodic.tick()if numIters >= and self._currentRecordIndex >= numIters-:breakelse:raise ValueError(\"\" %inputRecord)", "docstring": "Main loop of the OPF Model Runner.\n\n Parameters:\n -----------------------------------------------------------------------\n\n recordIterator: Iterator for counting number of records (see _runTask)\n learningOffAt: If not None, learning is turned off when we reach this\n iteration number", "id": "f17593:c0:m2"} {"signature": "def _finalize(self):", "body": "self._logger.info(\"\",self._modelID, self._currentRecordIndex + )self._updateModelDBResults()if not self._isKilled:self.__updateJobResults()else:self.__deleteOutputCache(self._modelID)if self._predictionLogger:self._predictionLogger.close()if self._inputSource: self._inputSource.close()", "docstring": "Run final activities after a model has run. These include recording and\n logging the final score", "id": "f17593:c0:m3"} {"signature": "def __createModelCheckpoint(self):", "body": "if self._model is None or self._modelCheckpointGUID is None:returnif self._predictionLogger is None:self._createPredictionLogger()predictions = io.StringIO()self._predictionLogger.checkpoint(checkpointSink=predictions,maxRows=int(Configuration.get('')))self._model.save(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))self._jobsDAO.modelSetFields(modelID,{'':str(self._modelCheckpointGUID)},ignoreUnchanged=True)self._logger.info(\"\"\"\", self._modelID, checkpointID)return", "docstring": "Create a checkpoint from the current model, and store it in a dir named\n after checkpoint GUID, and finally store the GUID in the Models DB", "id": "f17593:c0:m4"} {"signature": "def __deleteModelCheckpoint(self, modelID):", "body": "checkpointID =self._jobsDAO.modelsGetFields(modelID, [''])[]if checkpointID is None:returntry:shutil.rmtree(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))except:self._logger.warn(\"\"\"\",checkpointID)returnself._jobsDAO.modelSetFields(modelID,{'':None},ignoreUnchanged=True)return", "docstring": "Delete the stored checkpoint for the specified modelID. This function is\ncalled if the current model is now the best model, making the old model's\ncheckpoint obsolete\n\nParameters:\n-----------------------------------------------------------------------\nmodelID: The modelID for the checkpoint to delete. This is NOT the\n unique checkpointID", "id": "f17593:c0:m5"} {"signature": "def _createPredictionLogger(self):", "body": "self._predictionLogger = BasicPredictionLogger(fields=self._model.getFieldInfo(),experimentDir=self._experimentDir,label = \"\",inferenceType=self._model.getInferenceType())if self.__loggedMetricPatterns:metricLabels = self.__metricMgr.getMetricLabels()loggedMetrics = matchPatterns(self.__loggedMetricPatterns, metricLabels)self._predictionLogger.setLoggedMetrics(loggedMetrics)", "docstring": "Creates the model's PredictionLogger object, which is an interface to write\nmodel results to a permanent storage location", "id": "f17593:c0:m6"} {"signature": "def __getOptimizedMetricLabel(self):", "body": "matchingKeys = matchPatterns([self._optimizeKeyPattern],self._getMetricLabels())if len(matchingKeys) == :raise Exception(\"\"\"\" %(self._optimizeKeyPattern, self._getMetricLabels()))elif len(matchingKeys) > :raise Exception(\"\"\"\" % (self._optimizeKeyPattern, matchingKeys))return matchingKeys[]", "docstring": "Get the label for the metric being optimized. This function also caches\n the label in the instance variable self._optimizedMetricLabel\n\n Parameters:\n -----------------------------------------------------------------------\n metricLabels: A sequence of all the labels being computed for this model\n\n Returns: The label for the metric being optmized over", "id": "f17593:c0:m7"} {"signature": "def _getMetricLabels(self):", "body": "return self.__metricMgr.getMetricLabels()", "docstring": "Returns: A list of labels that correspond to metrics being computed", "id": "f17593:c0:m8"} {"signature": "def _getFieldStats(self):", "body": "fieldStats = dict()fieldNames = self._inputSource.getFieldNames()for field in fieldNames:curStats = dict()curStats[''] = self._inputSource.getFieldMin(field)curStats[''] = self._inputSource.getFieldMax(field)fieldStats[field] = curStatsreturn fieldStats", "docstring": "Method which returns a dictionary of field statistics received from the\ninput source.\n\nReturns:\n\n fieldStats: dict of dicts where the first level is the field name and\n the second level is the statistic. ie. fieldStats['pounds']['min']", "id": "f17593:c0:m9"} {"signature": "def _getMetrics(self):", "body": "return self.__metricMgr.getMetrics()", "docstring": "Protected function that can be overriden by subclasses. Its main purpose\n is to allow the the OPFDummyModelRunner to override this with deterministic\n values\n\n Returns: All the metrics being computed for this model", "id": "f17593:c0:m10"} {"signature": "def _updateModelDBResults(self):", "body": "metrics = self._getMetrics()reportDict = dict([(k,metrics[k]) for k in self._reportMetricLabels])metrics = self._getMetrics()optimizeDict = dict()if self._optimizeKeyPattern is not None:optimizeDict[self._optimizedMetricLabel] =metrics[self._optimizedMetricLabel]results = json.dumps((metrics , optimizeDict))self._jobsDAO.modelUpdateResults(self._modelID, results=results,metricValue=list(optimizeDict.values())[],numRecords=(self._currentRecordIndex + ))self._logger.debug(\"\" %(self._modelID, self._currentRecordIndex + , results))return", "docstring": "Retrieves the current results and updates the model's record in\n the Model database.", "id": "f17593:c0:m11"} {"signature": "def __updateJobResultsPeriodic(self):", "body": "if self._isBestModelStored and not self._isBestModel:returnwhile True:jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, [''])[]if jobResultsStr is None:jobResults = {}else:self._isBestModelStored = Trueif not self._isBestModel:returnjobResults = json.loads(jobResultsStr)bestModel = jobResults.get('', None)bestMetric = jobResults.get('', None)isSaved = jobResults.get('', False)if (bestModel is not None) and (self._modelID != bestModel):self._isBestModel = Falsereturnself.__flushPredictionCache()self._jobsDAO.modelUpdateTimestamp(self._modelID)metrics = self._getMetrics()jobResults[''] = self._modelIDjobResults[''] = metrics[self._optimizedMetricLabel]jobResults[''] = metricsjobResults[''] = FalsenewResults = json.dumps(jobResults)isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,fieldName='',curValue=jobResultsStr,newValue=newResults)if isUpdated or (not isUpdated and newResults==jobResultsStr):self._isBestModel = Truebreak", "docstring": "Periodic check to see if this is the best model. This should only have an\neffect if this is the *first* model to report its progress", "id": "f17593:c0:m12"} {"signature": "def __checkIfBestCompletedModel(self):", "body": "jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, [''])[]if jobResultsStr is None:jobResults = {}else:jobResults = json.loads(jobResultsStr)isSaved = jobResults.get('', False)bestMetric = jobResults.get('', None)currentMetric = self._getMetrics()[self._optimizedMetricLabel]self._isBestModel = (not isSaved)or (currentMetric < bestMetric)return self._isBestModel, jobResults, jobResultsStr", "docstring": "Reads the current \"best model\" for the job and returns whether or not the\ncurrent model is better than the \"best model\" stored for the job\n\nReturns: (isBetter, storedBest, origResultsStr)\n\nisBetter:\n True if the current model is better than the stored \"best model\"\nstoredResults:\n A dict of the currently stored results in the jobs table record\norigResultsStr:\n The json-encoded string that currently resides in the \"results\" field\n of the jobs record (used to create atomicity)", "id": "f17593:c0:m13"} {"signature": "def __updateJobResults(self):", "body": "isSaved = Falsewhile True:self._isBestModel, jobResults, jobResultsStr =self.__checkIfBestCompletedModel()if self._isBestModel:if not isSaved:self.__flushPredictionCache()self._jobsDAO.modelUpdateTimestamp(self._modelID)self.__createModelCheckpoint()self._jobsDAO.modelUpdateTimestamp(self._modelID)isSaved = TrueprevBest = jobResults.get('', None)prevWasSaved = jobResults.get('', False)if prevBest == self._modelID:assert not prevWasSavedmetrics = self._getMetrics()jobResults[''] = self._modelIDjobResults[''] = metrics[self._optimizedMetricLabel]jobResults[''] = metricsjobResults[''] = TrueisUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,fieldName='',curValue=jobResultsStr,newValue=json.dumps(jobResults))if isUpdated:if prevWasSaved:self.__deleteOutputCache(prevBest)self._jobsDAO.modelUpdateTimestamp(self._modelID)self.__deleteModelCheckpoint(prevBest)self._jobsDAO.modelUpdateTimestamp(self._modelID)self._logger.info(\"\", self._modelID)breakelse:self.__deleteOutputCache(self._modelID)self._jobsDAO.modelUpdateTimestamp(self._modelID)self.__deleteModelCheckpoint(self._modelID)self._jobsDAO.modelUpdateTimestamp(self._modelID)break", "docstring": "Check if this is the best model\nIf so:\n 1) Write it's checkpoint\n 2) Record this model as the best\n 3) Delete the previous best's output cache\nOtherwise:\n 1) Delete our output cache", "id": "f17593:c0:m14"} {"signature": "def _writePrediction(self, result):", "body": "self.__predictionCache.append(result)if self._isBestModel:self.__flushPredictionCache()", "docstring": "Writes the results of one iteration of a model. The results are written to\nthis ModelRunner's in-memory cache unless this model is the \"best model\" for\nthe job. If this model is the \"best model\", the predictions are written out\nto a permanent store via a prediction output stream instance\n\n\nParameters:\n-----------------------------------------------------------------------\nresult: A opf_utils.ModelResult object, which contains the input and\n output for this iteration", "id": "f17593:c0:m15"} {"signature": "def __writeRecordsCallback(self):", "body": "self._jobsDAO.modelUpdateResults(self._modelID)", "docstring": "This callback is called by self.__predictionLogger.writeRecords()\n between each batch of records it writes. It gives us a chance to say that\n the model is 'still alive' during long write operations.", "id": "f17593:c0:m16"} {"signature": "def __flushPredictionCache(self):", "body": "if not self.__predictionCache:returnif self._predictionLogger is None:self._createPredictionLogger()startTime = time.time()self._predictionLogger.writeRecords(self.__predictionCache,progressCB=self.__writeRecordsCallback)self._logger.info(\"\",len(self.__predictionCache), time.time() - startTime)self.__predictionCache.clear()", "docstring": "Writes the contents of this model's in-memory prediction cache to a permanent\nstore via the prediction output stream instance", "id": "f17593:c0:m17"} {"signature": "def __deleteOutputCache(self, modelID):", "body": "if modelID == self._modelID and self._predictionLogger is not None:self._predictionLogger.close()del self.__predictionCacheself._predictionLogger = Noneself.__predictionCache = None", "docstring": "Delete's the output cache associated with the given modelID. This actually\nclears up the resources associated with the cache, rather than deleting al\nthe records in the cache\n\nParameters:\n-----------------------------------------------------------------------\nmodelID: The id of the model whose output cache is being deleted", "id": "f17593:c0:m18"} {"signature": "def _initPeriodicActivities(self):", "body": "updateModelDBResults = PeriodicActivityRequest(repeating=True,period=,cb=self._updateModelDBResults)updateJobResults = PeriodicActivityRequest(repeating=True,period=,cb=self.__updateJobResultsPeriodic)checkCancelation = PeriodicActivityRequest(repeating=True,period=,cb=self.__checkCancelation)checkMaturity = PeriodicActivityRequest(repeating=True,period=,cb=self.__checkMaturity)updateJobResultsFirst = PeriodicActivityRequest(repeating=False,period=,cb=self.__updateJobResultsPeriodic)periodicActivities = [updateModelDBResults,updateJobResultsFirst,updateJobResults,checkCancelation]if self._isMaturityEnabled:periodicActivities.append(checkMaturity)return PeriodicActivityMgr(requestedActivities=periodicActivities)", "docstring": "Creates and returns a PeriodicActivityMgr instance initialized with\n our periodic activities\n\n Parameters:\n -------------------------------------------------------------------------\n retval: a PeriodicActivityMgr instance", "id": "f17593:c0:m19"} {"signature": "def __checkCancelation(self):", "body": "print(\"\", file=sys.stderr)jobCancel = self._jobsDAO.jobGetFields(self._jobID, [''])[]if jobCancel:self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLEDself._isCanceled = Trueself._logger.info(\"\",self._modelID, self._jobID)else:stopReason = self._jobsDAO.modelsGetFields(self._modelID, [''])[]if stopReason is None:passelif stopReason == ClientJobsDAO.STOP_REASON_KILLED:self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLEDself._isKilled = Trueself._logger.info(\"\",self._modelID)elif stopReason == ClientJobsDAO.STOP_REASON_STOPPED:self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPEDself._isCanceled = Trueself._logger.info(\"\", self._modelID)else:raise RuntimeError (\"\" % (stopReason))", "docstring": "Check if the cancelation flag has been set for this model\n in the Model DB", "id": "f17593:c0:m20"} {"signature": "def __checkMaturity(self):", "body": "if self._currentRecordIndex+ < self._MIN_RECORDS_TO_BE_BEST:returnif self._isMature:returnmetric = self._getMetrics()[self._optimizedMetricLabel]self._metricRegression.addPoint(x=self._currentRecordIndex, y=metric)Perform a linear regression to see if the error is leveled offpctChange, absPctChange = self._metricRegression.getPctChanges()if pctChange is not None and absPctChange <= self._MATURITY_MAX_CHANGE:self._jobsDAO.modelSetFields(self._modelID,{'':True})self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPEDself._isMature = Trueself._logger.info(\"\"\"\"\"\",self._modelID, pctChange,self._MATURITY_NUM_POINTS,self._metricRegression._window)", "docstring": "Save the current metric value and see if the model's performance has\n 'leveled off.' We do this by looking at some number of previous number of\n recordings", "id": "f17593:c0:m21"} {"signature": "def handleWarningSignal(self, signum, frame):", "body": "self._isInterrupted.set()", "docstring": "Handles a \"warning signal\" from the scheduler. This is received when the\nscheduler is about to kill the the current process so that the worker can be\nallocated to another job.\n\nRight now, this function just sets the current model to the \"Orphaned\" state\nin the models table so that another worker can eventually re-run this model\n\nParameters:\n-----------------------------------------------------------------------", "id": "f17593:c0:m22"} {"signature": "def __setAsOrphaned(self):", "body": "cmplReason = ClientJobsDAO.CMPL_REASON_ORPHANcmplMessage = \"\"self._jobsDAO.modelSetCompleted(self._modelID, cmplReason, cmplMessage)", "docstring": "Sets the current model as orphaned. This is called when the scheduler is\nabout to kill the process to reallocate the worker to a different process.", "id": "f17593:c0:m23"} {"signature": "@staticmethoddef getInputElement(inferenceElement):", "body": "return InferenceElement.__inferenceInputMap.get(inferenceElement, None)", "docstring": "Get the sensor input element that corresponds to the given inference\n element. This is mainly used for metrics and prediction logging", "id": "f17595:c0:m0"} {"signature": "@staticmethoddef isTemporal(inferenceElement):", "body": "if InferenceElement.__temporalInferenceElements is None:InferenceElement.__temporalInferenceElements =set([InferenceElement.prediction])return inferenceElement in InferenceElement.__temporalInferenceElements", "docstring": "Returns True if the inference from this timestep is predicted the input\n for the NEXT timestep.\n\n NOTE: This should only be checked IF THE MODEL'S INFERENCE TYPE IS ALSO\n TEMPORAL. That is, a temporal model CAN have non-temporal inference elements,\n but a non-temporal model CANNOT have temporal inference elements", "id": "f17595:c0:m1"} {"signature": "@staticmethoddef getTemporalDelay(inferenceElement, key=None):", "body": "if inferenceElement in (InferenceElement.prediction,InferenceElement.encodings):return if inferenceElement in (InferenceElement.anomalyScore,InferenceElement.anomalyLabel,InferenceElement.classification,InferenceElement.classConfidences):return if inferenceElement in (InferenceElement.multiStepPredictions,InferenceElement.multiStepBestPredictions):return int(key)return ", "docstring": "Returns the number of records that elapse between when an inference is\n made and when the corresponding input record will appear. For example, a\n multistep prediction for 3 timesteps out will have a delay of 3\n\n\n Parameters:\n -----------------------------------------------------------------------\n\n inferenceElement: The InferenceElement value being delayed\n key: If the inference is a dictionary type, this specifies\n key for the sub-inference that is being delayed", "id": "f17595:c0:m2"} {"signature": "@staticmethoddef getMaxDelay(inferences):", "body": "maxDelay = for inferenceElement, inference in inferences.iteritems():if isinstance(inference, dict):for key in inference.iterkeys():maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement,key),maxDelay)else:maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement),maxDelay)return maxDelay", "docstring": "Returns the maximum delay for the InferenceElements in the inference\ndictionary\n\nParameters:\n-----------------------------------------------------------------------\ninferences: A dictionary where the keys are InferenceElements", "id": "f17595:c0:m3"} {"signature": "@staticmethoddef isTemporal(inferenceType):", "body": "if InferenceType.__temporalInferenceTypes is None:InferenceType.__temporalInferenceTypes =set([InferenceType.TemporalNextStep,InferenceType.TemporalClassification,InferenceType.TemporalAnomaly,InferenceType.TemporalMultiStep,InferenceType.NontemporalMultiStep])return inferenceType in InferenceType.__temporalInferenceTypes", "docstring": "Returns True if the inference type is 'temporal', i.e. requires a\n temporal memory in the network.", "id": "f17595:c1:m0"} {"signature": "def __init__(self, jobID, jobsDAO, logLevel = None):", "body": "self._jobID = jobIDself._cjDB = jobsDAOself._lastUpdateAttemptTime = initLogging(verbose = True)self.logger = logging.getLogger(\"\".join( ['',self.__class__.__module__, self.__class__.__name__]))if logLevel is not None:self.logger.setLevel(logLevel)self.logger.info(\"\" % str(jobID))", "docstring": "TODO: Documentation", "id": "f17596:c0:m0"} {"signature": "def updateResultsForJob(self, forceUpdate=True):", "body": "updateInterval = time.time() - self._lastUpdateAttemptTimeif updateInterval < self._MIN_UPDATE_INTERVAL and not forceUpdate:returnself.logger.info(\"\"\"\"%(self._jobID,time.time(),self._lastUpdateAttemptTime))timestampUpdated = self._cjDB.jobUpdateSelectionSweep(self._jobID,self._MIN_UPDATE_INTERVAL)if not timestampUpdated:self.logger.info(\"\"\"\"%(self._jobID, self._lastUpdateAttemptTime))if not forceUpdate:returnself._lastUpdateAttemptTime = time.time()self.logger.info(\"\"%(self._jobID, self._lastUpdateAttemptTime))minUpdateRecords = self._MIN_UPDATE_THRESHOLDjobResults = self._getJobResults()if forceUpdate or jobResults is None:minUpdateRecords = candidateIDs, bestMetric = self._cjDB.modelsGetCandidates(self._jobID, minUpdateRecords)self.logger.info(\"\"%(candidateIDs, bestMetric, self._jobID))if len(candidateIDs) == :returnself._jobUpdateCandidate(candidateIDs[], bestMetric, results=jobResults)", "docstring": "Chooses the best model for a given job.\n\n Parameters\n -----------------------------------------------------------------------\n forceUpdate: (True/False). If True, the update will ignore all the\n restrictions on the minimum time to update and the minimum\n number of records to update. This should typically only be\n set to true if the model has completed running", "id": "f17596:c0:m1"} {"signature": "def _paramsFileHead():", "body": "str = getCopyrightHead() +\"his file defines parameters for a prediction experiment.IMPORTANT!!!is params file is dynamically generated by the RunExperimentPermutationsript. Any changes made manually will be over-written the next timenExperimentPermutations is run!!!nupic.frameworks.opf.exp_description_helpers import importBaseDescriptione sub-experiment configurationig ={return str", "docstring": "This is the first portion of every sub-experiment params file we generate. Between\nthe head and the tail are the experiment specific options.", "id": "f17597:m1"} {"signature": "def _paramsFileTail():", "body": "str =\"= importBaseDescription('', config)ls().update(mod.__dict__)return str", "docstring": "This is the tail of every params file we generate. Between the head and the tail\nare the experiment specific options.", "id": "f17597:m2"} {"signature": "def _appendReportKeys(keys, prefix, results):", "body": "allKeys = list(results.keys())allKeys.sort()for key in allKeys:if hasattr(results[key], ''):_appendReportKeys(keys, \"\" % (prefix, key), results[key])else:keys.add(\"\" % (prefix, key))", "docstring": "Generate a set of possible report keys for an experiment's results.\nA report key is a string of key names separated by colons, each key being one\nlevel deeper into the experiment results dict. For example, 'key1:key2'.\n\nThis routine is called recursively to build keys that are multiple levels\ndeep from the results dict.\n\nParameters:\n-----------------------------------------------------------\nkeys: Set of report keys accumulated so far\nprefix: prefix formed so far, this is the colon separated list of key\n names that led up to the dict passed in results\nresults: dictionary of results at this level.", "id": "f17597:m3"} {"signature": "def _matchReportKeys(reportKeyREs=[], allReportKeys=[]):", "body": "matchingReportKeys = []for keyRE in reportKeyREs:matchObj = re.compile(keyRE)found = Falsefor keyName in allReportKeys:match = matchObj.match(keyName)if match and match.end() == len(keyName):matchingReportKeys.append(keyName)found = Trueif not found:raise _BadKeyError(keyRE)return matchingReportKeys", "docstring": "Extract all items from the 'allKeys' list whose key matches one of the regular\nexpressions passed in 'reportKeys'.\n\nParameters:\n----------------------------------------------------------------------------\nreportKeyREs: List of regular expressions\nallReportKeys: List of all keys\n\nretval: list of keys from allReportKeys that match the regular expressions\n in 'reportKeyREs'\n If an invalid regular expression was included in 'reportKeys',\n then BadKeyError() is raised", "id": "f17597:m4"} {"signature": "def _getReportItem(itemName, results):", "body": "subKeys = itemName.split('')subResults = resultsfor subKey in subKeys:subResults = subResults[subKey]return subResults", "docstring": "Get a specific item by name out of the results dict.\n\nThe format of itemName is a string of dictionary keys separated by colons,\neach key being one level deeper into the results dict. For example,\n'key1:key2' would fetch results['key1']['key2'].\n\nIf itemName is not found in results, then None is returned", "id": "f17597:m5"} {"signature": "def filterResults(allResults, reportKeys, optimizeKey=None):", "body": "optimizeDict = dict()allReportKeys = set()_appendReportKeys(keys=allReportKeys, prefix='', results=allResults)matchingKeys = _matchReportKeys(reportKeys, allReportKeys)reportDict = dict()for keyName in matchingKeys:value = _getReportItem(keyName, allResults)reportDict[keyName] = valueif optimizeKey is not None:matchingKeys = _matchReportKeys([optimizeKey], allReportKeys)if len(matchingKeys) == :raise _BadKeyError(optimizeKey)elif len(matchingKeys) > :raise _BadOptimizeKeyError(optimizeKey, matchingKeys)optimizeKeyFullName = matchingKeys[]value = _getReportItem(optimizeKeyFullName, allResults)optimizeDict[optimizeKeyFullName] = valuereportDict[optimizeKeyFullName] = valuereturn(reportDict, optimizeDict)", "docstring": "Given the complete set of results generated by an experiment (passed in\n 'results'), filter out and return only the ones the caller wants, as\n specified through 'reportKeys' and 'optimizeKey'.\n\n A report key is a string of key names separated by colons, each key being one\n level deeper into the experiment results dict. For example, 'key1:key2'.\n\n\n Parameters:\n -------------------------------------------------------------------------\n results: dict of all results generated by an experiment\n reportKeys: list of items from the results dict to include in\n the report. These can be regular expressions.\n optimizeKey: Which report item, if any, we will be optimizing for. This can\n also be a regular expression, but is an error if it matches\n more than one key from the experiment's results.\n retval: (reportDict, optimizeDict)\n reportDict: a dictionary of the metrics named by desiredReportKeys\n optimizeDict: A dictionary containing 1 item: the full name and\n value of the metric identified by the optimizeKey", "id": "f17597:m6"} {"signature": "def _quoteAndEscape(string):", "body": "assert type(string) in (str,)return pprint.pformat(string)", "docstring": "string: input string (ascii or unicode)\n\nReturns: a quoted string with characters that are represented in python via\n escape sequences converted to those escape sequences", "id": "f17597:m7"} {"signature": "def _handleModelRunnerException(jobID, modelID, jobsDAO, experimentDir, logger,e):", "body": "msg = io.StringIO()print(\"\" % (modelID, e, type(e)), file=msg)traceback.print_exc(None, msg)completionReason = jobsDAO.CMPL_REASON_ERRORcompletionMsg = msg.getvalue()logger.error(completionMsg)if type(e) is not InvalidConnectionException:jobsDAO.modelUpdateResults(modelID, results=None, numRecords=)if type(e) == JobFailException:workerCmpReason = jobsDAO.jobGetFields(jobID,[''])[]if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:jobsDAO.jobSetFields(jobID, fields=dict(cancel=True,workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,workerCompletionMsg = \"\".join(str(i) for i in e.args)),useConnectionID=False,ignoreUnchanged=True)return (completionReason, completionMsg)", "docstring": "Perform standard handling of an exception that occurs while running\n a model.\n\n Parameters:\n -------------------------------------------------------------------------\n jobID: ID for this hypersearch job in the jobs table\n modelID: model ID\n jobsDAO: ClientJobsDAO instance\n experimentDir: directory containing the experiment\n logger: the logger to use\n e: the exception that occurred\n retval: (completionReason, completionMsg)", "id": "f17597:m8"} {"signature": "def runModelGivenBaseAndParams(modelID, jobID, baseDescription, params,predictedField, reportKeys, optimizeKey, jobsDAO,modelCheckpointGUID, logLevel=None, predictionCacheMaxRecords=None):", "body": "from nupic.swarming.ModelRunner import OPFModelRunnerlogger = logging.getLogger('')experimentDir = tempfile.mkdtemp()try:logger.info(\"\" % (experimentDir))paramsFilePath = os.path.join(experimentDir, '')paramsFile = open(paramsFilePath, '')paramsFile.write(_paramsFileHead())items = list(params.items())items.sort()for (key,value) in items:quotedKey = _quoteAndEscape(key)if isinstance(value, str):paramsFile.write(\"\" % (quotedKey , value))else:paramsFile.write(\"\" % (quotedKey , value))paramsFile.write(_paramsFileTail())paramsFile.close()baseParamsFile = open(os.path.join(experimentDir, ''), '')baseParamsFile.write(baseDescription)baseParamsFile.close()fd = open(paramsFilePath)expDescription = fd.read()fd.close()jobsDAO.modelSetFields(modelID, {'': expDescription})try:runner = OPFModelRunner(modelID=modelID,jobID=jobID,predictedField=predictedField,experimentDir=experimentDir,reportKeyPatterns=reportKeys,optimizeKeyPattern=optimizeKey,jobsDAO=jobsDAO,modelCheckpointGUID=modelCheckpointGUID,logLevel=logLevel,predictionCacheMaxRecords=predictionCacheMaxRecords)signal.signal(signal.SIGINT, runner.handleWarningSignal)(completionReason, completionMsg) = runner.run()except InvalidConnectionException:raiseexcept Exception as e:(completionReason, completionMsg) = _handleModelRunnerException(jobID,modelID, jobsDAO, experimentDir, logger, e)finally:shutil.rmtree(experimentDir)signal.signal(signal.SIGINT, signal.default_int_handler)return (completionReason, completionMsg)", "docstring": "This creates an experiment directory with a base.py description file\n created from 'baseDescription' and a description.py generated from the\n given params dict and then runs the experiment.\n\n Parameters:\n -------------------------------------------------------------------------\n modelID: ID for this model in the models table\n jobID: ID for this hypersearch job in the jobs table\n baseDescription: Contents of a description.py with the base experiment\n description\n params: Dictionary of specific parameters to override within\n the baseDescriptionFile.\n predictedField: Name of the input field for which this model is being\n optimized\n reportKeys: Which metrics of the experiment to store into the\n results dict of the model's database entry\n optimizeKey: Which metric we are optimizing for\n jobsDAO Jobs data access object - the interface to the\n jobs database which has the model's table.\n modelCheckpointGUID: A persistent, globally-unique identifier for\n constructing the model checkpoint key\n logLevel: override logging level to this value, if not None\n\n retval: (completionReason, completionMsg)", "id": "f17597:m9"} {"signature": "def generatePersistentJobGUID():", "body": "return \"\" + str(uuid.uuid1())", "docstring": "Generates a \"persistentJobGUID\" value.\n\n Parameters:\n ----------------------------------------------------------------------\n retval: A persistentJobGUID value", "id": "f17597:m11"} {"signature": "def rCopy(d, f=identityConversion, discardNoneKeys=True, deepCopy=True):", "body": "if deepCopy:d = copy.deepcopy(d)newDict = {}toCopy = [(k, v, newDict, ()) for k, v in d.items()]while len(toCopy) > :k, v, d, prevKeys = toCopy.pop()prevKeys = prevKeys + (k,)if isinstance(v, dict):d[k] = dict()toCopy[:] = [(innerK, innerV, d[k], prevKeys)for innerK, innerV in v.items()]else:newV = f(v, prevKeys)if not discardNoneKeys or newV is not None:d[k] = newVreturn newDict", "docstring": "Recursively copies a dict and returns the result.\n\n Args:\n d: The dict to copy.\n f: A function to apply to values when copying that takes the value and the\n list of keys from the root of the dict to the value and returns a value\n for the new dict.\n discardNoneKeys: If True, discard key-value pairs when f returns None for\n the value.\n deepCopy: If True, all values in returned dict are true copies (not the\n same object).\n Returns:\n A new dict with keys and values from d replaced with the result of f.", "id": "f17597:m13"} {"signature": "def rApply(d, f):", "body": "remainingDicts = [(d, ())]while len(remainingDicts) > :current, prevKeys = remainingDicts.pop()for k, v in current.items():keys = prevKeys + (k,)if isinstance(v, dict):remainingDicts.insert(, (v, keys))else:f(v, keys)", "docstring": "Recursively applies f to the values in dict d.\n\n Args:\n d: The dict to recurse over.\n f: A function to apply to values in d that takes the value and a list of\n keys from the root of the dict to the value.", "id": "f17597:m14"} {"signature": "def clippedObj(obj, maxElementSize=):", "body": "if hasattr(obj, ''):obj = obj._asdict()if isinstance(obj, dict):objOut = dict()for key,val in obj.items():objOut[key] = clippedObj(val)elif hasattr(obj, ''):objOut = []for val in obj:objOut.append(clippedObj(val))else:objOut = str(obj)if len(objOut) > maxElementSize:objOut = objOut[:maxElementSize] + ''return objOut", "docstring": "Return a clipped version of obj suitable for printing, This\nis useful when generating log messages by printing data structures, but\ndon't want the message to be too long.\n\nIf passed in a dict, list, or namedtuple, each element of the structure's\nstring representation will be limited to 'maxElementSize' characters. This\nwill return a new object where the string representation of each element\nhas been truncated to fit within maxElementSize.", "id": "f17597:m15"} {"signature": "def validate(value, **kwds):", "body": "assert len(list(kwds.keys())) >= assert '' in kwds or '' in kwdsschemaDict = Noneif '' in kwds:schemaPath = kwds.pop('')schemaDict = loadJsonValueFromFile(schemaPath)elif '' in kwds:schemaDict = kwds.pop('')try:validictory.validate(value, schemaDict, **kwds)except validictory.ValidationError as e:raise ValidationError(e)", "docstring": "Validate a python value against json schema:\n validate(value, schemaPath)\n validate(value, schemaDict)\n\n value: python object to validate against the schema\n\n The json schema may be specified either as a path of the file containing\n the json schema or as a python dictionary using one of the\n following keywords as arguments:\n schemaPath: Path of file containing the json schema object.\n schemaDict: Python dictionary containing the json schema object\n\n Returns: nothing\n\n Raises:\n ValidationError when value fails json validation", "id": "f17597:m16"} {"signature": "def loadJsonValueFromFile(inputFilePath):", "body": "with open(inputFilePath) as fileObj:value = json.load(fileObj)return value", "docstring": "Loads a json value from a file and converts it to the corresponding python\n object.\n\n inputFilePath:\n Path of the json file;\n\n Returns:\n python value that represents the loaded json value", "id": "f17597:m17"} {"signature": "def sortedJSONDumpS(obj):", "body": "itemStrs = []if isinstance(obj, dict):items = list(obj.items())items.sort()for key, value in items:itemStrs.append('' % (json.dumps(key), sortedJSONDumpS(value)))return '' % (''.join(itemStrs))elif hasattr(obj, ''):for val in obj:itemStrs.append(sortedJSONDumpS(val))return '' % (''.join(itemStrs))else:return json.dumps(obj)", "docstring": "Return a JSON representation of obj with sorted keys on any embedded dicts.\nThis insures that the same object will always be represented by the same\nstring even if it contains dicts (where the sort order of the keys is\nnormally undefined).", "id": "f17597:m18"} {"signature": "def __init__(self, requestedActivities):", "body": "self.__activities = []for req in requestedActivities:act = self.Activity(repeating=req.repeating,period=req.period,cb=req.cb,iteratorHolder=[iter(range(req.period))])self.__activities.append(act)return", "docstring": "requestedActivities: a sequence of PeriodicActivityRequest elements", "id": "f17597:c2:m0"} {"signature": "def tick(self):", "body": "for act in self.__activities:if not act.iteratorHolder[]:continuetry:next(act.iteratorHolder[])except StopIteration:act.cb()if act.repeating:act.iteratorHolder[] = iter(range(act.period))else:act.iteratorHolder[] = Nonereturn True", "docstring": "Activity tick handler; services all activities\n\n Returns: True if controlling iterator says it's okay to keep going;\n False to stop", "id": "f17597:c2:m1"} {"signature": "def _escape(s):", "body": "assert isinstance(s, str),\"\" % (type(str), type(s), s)s = s.replace(\"\", \"\")s = s.replace(\"\", \"\")s = s.replace(\"\", \"\")s = s.replace(\"\", \"\")return s", "docstring": "Escape commas, tabs, newlines and dashes in a string\n\n Commas are encoded as tabs", "id": "f17598:m4"} {"signature": "def _engineServicesRunning():", "body": "process = subprocess.Popen([\"\", \"\"], stdout=subprocess.PIPE)stdout = process.communicate()[]result = process.returncodeif result != :raise RuntimeError(\"\")running = Falsefor line in stdout.split(\"\"):if \"\" in line and \"\" in line:running = Truebreakreturn running", "docstring": "Return true if the engine services are running", "id": "f17598:m5"} {"signature": "def runWithConfig(swarmConfig, options,outDir=None, outputLabel=\"\",permWorkDir=None, verbosity=):", "body": "global g_currentVerbosityLevelg_currentVerbosityLevel = verbosityif outDir is None:outDir = os.getcwd()if permWorkDir is None:permWorkDir = os.getcwd()_checkOverwrite(options, outDir)_generateExpFilesFromSwarmDescription(swarmConfig, outDir)options[\"\"] = swarmConfigoptions[\"\"] = outputLabeloptions[\"\"] = outDiroptions[\"\"] = permWorkDirrunOptions = _injectDefaultOptions(options)_validateOptions(runOptions)return _runAction(runOptions)", "docstring": "Starts a swarm, given an dictionary configuration.\n@param swarmConfig {dict} A complete [swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description) object.\n@param outDir {string} Optional path to write swarm details (defaults to\n current working directory).\n@param outputLabel {string} Optional label for output (defaults to \"default\").\n@param permWorkDir {string} Optional location of working directory (defaults\n to current working directory).\n@param verbosity {int} Optional (1,2,3) increasing verbosity of output.\n\n@returns {object} Model parameters", "id": "f17598:m12"} {"signature": "def runWithJsonFile(expJsonFilePath, options, outputLabel, permWorkDir):", "body": "if \"\" in options:verbosity = options[\"\"]del options[\"\"]else:verbosity = _setupInterruptHandling()with open(expJsonFilePath, \"\") as jsonFile:expJsonConfig = json.loads(jsonFile.read())outDir = os.path.dirname(expJsonFilePath)return runWithConfig(expJsonConfig, options, outDir=outDir,outputLabel=outputLabel, permWorkDir=permWorkDir,verbosity=verbosity)", "docstring": "Starts a swarm, given a path to a JSON file containing configuration.\n\nThis function is meant to be used with a CLI wrapper that passes command line\narguments in through the options parameter.\n\n@param expJsonFilePath {string} Path to a JSON file containing the complete\n [swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description).\n@param options {dict} CLI options.\n@param outputLabel {string} Label for output.\n@param permWorkDir {string} Location of working directory.\n\n@returns {int} Swarm job id.", "id": "f17598:m13"} {"signature": "def runWithPermutationsScript(permutationsFilePath, options,outputLabel, permWorkDir):", "body": "global g_currentVerbosityLevelif \"\" in options:g_currentVerbosityLevel = options[\"\"]del options[\"\"]else:g_currentVerbosityLevel = _setupInterruptHandling()options[\"\"] = permutationsFilePathoptions[\"\"] = outputLabeloptions[\"\"] = permWorkDiroptions[\"\"] = permWorkDirrunOptions = _injectDefaultOptions(options)_validateOptions(runOptions)return _runAction(runOptions)", "docstring": "Starts a swarm, given a path to a permutations.py script.\n\nThis function is meant to be used with a CLI wrapper that passes command line\narguments in through the options parameter.\n\n@param permutationsFilePath {string} Path to permutations.py.\n@param options {dict} CLI options.\n@param outputLabel {string} Label for output.\n@param permWorkDir {string} Location of working directory.\n\n@returns {object} Model parameters.", "id": "f17598:m14"} {"signature": "def runPermutations(_):", "body": "raise DeprecationWarning(\"\"\"\"\"\"\"\"\"\")", "docstring": "DEPRECATED. Use @ref runWithConfig.", "id": "f17598:m15"} {"signature": "def _clientJobsDB():", "body": "return cjdao.ClientJobsDAO.get()", "docstring": "Returns: The shared cjdao.ClientJobsDAO instance", "id": "f17598:m17"} {"signature": "def _nupicHyperSearchHasErrors(hyperSearchJob):", "body": "return False", "docstring": "Check whether any experiments failed in our latest hypersearch\n\n Parameters:\n hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved\n jobID, if any\n\n Returns: False if all models succeeded, True if one or more had errors", "id": "f17598:m18"} {"signature": "def _backupFile(filePath):", "body": "assert os.path.exists(filePath)stampNum = (prefix, suffix) = os.path.splitext(filePath)while True:backupPath = \"\" % (prefix, stampNum, suffix)stampNum += if not os.path.exists(backupPath):breakshutil.copyfile(filePath, backupPath)return backupPath", "docstring": "Back up a file\n\n Parameters:\n ----------------------------------------------------------------------\n retval: Filepath of the back-up", "id": "f17598:m19"} {"signature": "def _getOneModelInfo(nupicModelID):", "body": "return next(_iterModels([nupicModelID]))", "docstring": "A convenience function that retrieves inforamtion about a single model\n\n See also: _iterModels()\n\n Parameters:\n ----------------------------------------------------------------------\n nupicModelID: Nupic modelID\n retval: _NupicModelInfo instance for the given nupicModelID.", "id": "f17598:m20"} {"signature": "def _iterModels(modelIDs):", "body": "class ModelInfoIterator(object):\"\"\"\"\"\"__CACHE_LIMIT = debug=Falsedef __init__(self, modelIDs):\"\"\"\"\"\"self.__modelIDs = tuple(modelIDs)if self.debug:_emit(Verbosity.DEBUG,\"\" % len(self.__modelIDs))self.__nextIndex = self.__modelCache = collections.deque()returndef __iter__(self):\"\"\"\"\"\"return selfdef __next__(self):\"\"\"\"\"\"return self.__getNext()def __getNext(self):\"\"\"\"\"\"if self.debug:_emit(Verbosity.DEBUG,\"\" % (len(self.__modelCache)))if not self.__modelCache:self.__fillCache()if not self.__modelCache:raise StopIteration()return self.__modelCache.popleft()def __fillCache(self):\"\"\"\"\"\"assert (not self.__modelCache)numModelIDs = len(self.__modelIDs) if self.__modelIDs else if self.__nextIndex >= numModelIDs:returnidRange = self.__nextIndex + self.__CACHE_LIMITif idRange > numModelIDs:idRange = numModelIDslookupIDs = self.__modelIDs[self.__nextIndex:idRange]self.__nextIndex += (idRange - self.__nextIndex)infoList = _clientJobsDB().modelsInfo(lookupIDs)assert len(infoList) == len(lookupIDs),\"\" %(len(infoList), len(lookupIDs))for rawInfo in infoList:modelInfo = _NupicModelInfo(rawInfo=rawInfo)self.__modelCache.append(modelInfo)assert len(self.__modelCache) == len(lookupIDs),\"\" %(len(self.__modelCache), len(lookupIDs))if self.debug:_emit(Verbosity.DEBUG,\"\" %(len(self.__modelCache),))return ModelInfoIterator(modelIDs)", "docstring": "Creates an iterator that returns ModelInfo elements for the given modelIDs\n\n WARNING: The order of ModelInfo elements returned by the iterator\n may not match the order of the given modelIDs\n\n Parameters:\n ----------------------------------------------------------------------\n modelIDs: A sequence of model identifiers (e.g., as returned by\n _HyperSearchJob.queryModelIDs()).\n retval: Iterator that returns ModelInfo elements for the given\n modelIDs (NOTE:possibly in a different order)", "id": "f17598:m21"} {"signature": "def __init__(self, options):", "body": "self.__cjDAO = _clientJobsDB()self._options = optionsself.__searchJob = Noneself.__foundMetrcsKeySet = set()self._workers = Nonereturn", "docstring": "Parameters:\n----------------------------------------------------------------------\noptions: NupicRunPermutations options dict\nretval: nothing", "id": "f17598:c1:m0"} {"signature": "def runNewSearch(self):", "body": "self.__searchJob = self.__startSearch()self.monitorSearchJob()", "docstring": "Start a new hypersearch job and monitor it to completion\n Parameters:\n ----------------------------------------------------------------------\n retval: nothing", "id": "f17598:c1:m1"} {"signature": "def pickupSearch(self):", "body": "self.__searchJob = self.loadSavedHyperSearchJob(permWorkDir=self._options[\"\"],outputLabel=self._options[\"\"])self.monitorSearchJob()", "docstring": "Pick up the latest search from a saved jobID and monitor it to completion\n Parameters:\n ----------------------------------------------------------------------\n retval: nothing", "id": "f17598:c1:m2"} {"signature": "def monitorSearchJob(self):", "body": "assert self.__searchJob is not NonejobID = self.__searchJob.getJobID()startTime = time.time()lastUpdateTime = datetime.now()expectedNumModels = self.__searchJob.getExpectedNumModels(searchMethod = self._options[\"\"])lastNumFinished = finishedModelIDs = set()finishedModelStats = _ModelStats()lastWorkerState = NonelastJobResults = NonelastModelMilestones = NonelastEngStatus = NonehyperSearchFinished = Falsewhile not hyperSearchFinished:jobInfo = self.__searchJob.getJobStatus(self._workers)hyperSearchFinished = jobInfo.isFinished()modelIDs = self.__searchJob.queryModelIDs()_emit(Verbosity.DEBUG,\"\" % (len(modelIDs), len(finishedModelIDs)))if len(modelIDs) > :checkModelIDs = []for modelID in modelIDs:if modelID not in finishedModelIDs:checkModelIDs.append(modelID)del modelIDsif checkModelIDs:_emit(Verbosity.DEBUG,\"\" % (len(checkModelIDs)))errorCompletionMsg = Nonefor (i, modelInfo) in enumerate(_iterModels(checkModelIDs)):_emit(Verbosity.DEBUG,\"\" % (i, modelInfo))if modelInfo.isFinished():finishedModelIDs.add(modelInfo.getModelID())finishedModelStats.update(modelInfo)if (modelInfo.getCompletionReason().isError() andnot errorCompletionMsg):errorCompletionMsg = modelInfo.getCompletionMsg()metrics = modelInfo.getReportMetrics()self.__foundMetrcsKeySet.update(list(metrics.keys()))numFinished = len(finishedModelIDs)if numFinished != lastNumFinished:lastNumFinished = numFinishedif expectedNumModels is None:expModelsStr = \"\"else:expModelsStr = \"\" % (expectedNumModels)stats = finishedModelStatsprint((\"\"\"\" % (jobID,numFinished,expModelsStr,(stats.numCompletedEOF+stats.numCompletedStopped),\"\" if stats.numCompletedEOF else \"\",stats.numCompletedEOF,\"\" if stats.numCompletedStopped else \"\",stats.numCompletedStopped,\"\" if stats.numCompletedKilled else \"\",stats.numCompletedKilled,\"\" if stats.numCompletedError else \"\",stats.numCompletedError,\"\" if stats.numCompletedError else \"\",stats.numCompletedOrphaned,\"\" if stats.numCompletedOther else \"\",stats.numCompletedOther)))if errorCompletionMsg:print(\"\" % errorCompletionMsg)workerState = jobInfo.getWorkerState()if workerState != lastWorkerState:print(\"\" % (pprint.pformat(workerState,indent=)))lastWorkerState = workerStatejobResults = jobInfo.getResults()if jobResults != lastJobResults:print(\"\"% (pprint.pformat(jobResults, indent=), time.time()-startTime))lastJobResults = jobResultsmodelMilestones = jobInfo.getModelMilestones()if modelMilestones != lastModelMilestones:print(\"\" % (pprint.pformat(modelMilestones, indent=)))lastModelMilestones = modelMilestonesengStatus = jobInfo.getEngStatus()if engStatus != lastEngStatus:print(\"\" % (engStatus))lastEngStatus = engStatusif not hyperSearchFinished:if self._options[\"\"] != None:if ((datetime.now() - lastUpdateTime) >timedelta(minutes=self._options[\"\"])):print(\"\")self.__cjDAO.jobCancel(jobID)sys.exit()time.sleep()modelIDs = self.__searchJob.queryModelIDs()print(\"\" % len(modelIDs))print(\"\")jobInfo = self.__searchJob.getJobStatus(self._workers)print(\"\" % (jobInfo.getWorkerCompletionMsg()))", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: nothing", "id": "f17598:c1:m3"} {"signature": "def _launchWorkers(self, cmdLine, numWorkers):", "body": "self._workers = []for i in range(numWorkers):stdout = tempfile.NamedTemporaryFile(delete=False)stderr = tempfile.NamedTemporaryFile(delete=False)p = subprocess.Popen(cmdLine, bufsize=, env=os.environ, shell=True,stdin=None, stdout=stdout, stderr=stderr)p._stderr_file = stderrp._stdout_file = stdoutself._workers.append(p)", "docstring": "Launch worker processes to execute the given command line\n\n Parameters:\n -----------------------------------------------\n cmdLine: The command line for each worker\n numWorkers: number of workers to launch", "id": "f17598:c1:m4"} {"signature": "def __startSearch(self):", "body": "params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options,forRunning=True)if self._options[\"\"] == \"\":args = [sys.argv[], \"\" % (json.dumps(params))]print()print(\"\")print(\"\")print(\"\")jobID = hypersearch_worker.main(args)else:cmdLine = _setUpExports(self._options[\"\"])cmdLine += \"\"maxWorkers = self._options[\"\"]jobID = self.__cjDAO.jobInsert(client=\"\",cmdLine=cmdLine,params=json.dumps(params),minimumWorkers=,maximumWorkers=maxWorkers,jobType=self.__cjDAO.JOB_TYPE_HS)cmdLine = \"\"\"\" % (jobID)self._launchWorkers(cmdLine, maxWorkers)searchJob = _HyperSearchJob(jobID)self.__saveHyperSearchJobID(permWorkDir=self._options[\"\"],outputLabel=self._options[\"\"],hyperSearchJob=searchJob)if self._options[\"\"] == \"\":print(\"\" % (jobID))else:print(\"\" % (jobID))_emit(Verbosity.DEBUG,\"\" % (cmdLine,))return searchJob", "docstring": "Starts HyperSearch as a worker or runs it inline for the \"dryRun\" action\n\n Parameters:\n ----------------------------------------------------------------------\n retval: the new _HyperSearchJob instance representing the\n HyperSearch job", "id": "f17598:c1:m5"} {"signature": "def peekSearchJob(self):", "body": "assert self.__searchJob is not Nonereturn self.__searchJob", "docstring": "Retrieves the runner's _HyperSearchJob instance; NOTE: only available\n after run().\n\n Parameters:\n ----------------------------------------------------------------------\n retval: _HyperSearchJob instance or None", "id": "f17598:c1:m6"} {"signature": "def getDiscoveredMetricsKeys(self):", "body": "return tuple(self.__foundMetrcsKeySet)", "docstring": "Returns a tuple of all metrics keys discovered while running HyperSearch.\n\n NOTE: This is an optimization so that our client may\n use this info for generating the report csv file without having\n to pre-scan all modelInfos\n\n Parameters:\n ----------------------------------------------------------------------\n retval: Tuple of metrics keys discovered while running\n HyperSearch;", "id": "f17598:c1:m7"} {"signature": "@classmethoddef printModels(cls, options):", "body": "print(\"\")searchParams = _ClientJobUtils.makeSearchJobParamsDict(options=options)", "docstring": "Prints a listing of experiments that would take place without\n actually executing them.\n\n Parameters:\n ----------------------------------------------------------------------\n options: NupicRunPermutations options dict\n retval: nothing", "id": "f17598:c1:m8"} {"signature": "@classmethoddef generateReport(cls,options,replaceReport,hyperSearchJob,metricsKeys):", "body": "if hyperSearchJob is None:hyperSearchJob = cls.loadSavedHyperSearchJob(permWorkDir=options[\"\"],outputLabel=options[\"\"])modelIDs = hyperSearchJob.queryModelIDs()bestModel = Nonemetricstmp = set()searchVar = set()for modelInfo in _iterModels(modelIDs):if modelInfo.isFinished():vars = list(modelInfo.getParamLabels().keys())searchVar.update(vars)metrics = modelInfo.getReportMetrics()metricstmp.update(list(metrics.keys()))if metricsKeys is None:metricsKeys = metricstmpreportWriter = _ReportCSVWriter(hyperSearchJob=hyperSearchJob,metricsKeys=metricsKeys,searchVar=searchVar,outputDirAbsPath=options[\"\"],outputLabel=options[\"\"],replaceReport=replaceReport)modelStats = _ModelStats()print(\"\")print(\"\")searchParams = hyperSearchJob.getParams()(optimizationMetricKey, maximizeMetric) = (_PermutationUtils.getOptimizationMetricInfo(searchParams))formatStr = NonefoundMetricsKeySet = set(metricsKeys)sortedMetricsKeys = []jobInfo = _clientJobsDB().jobInfo(hyperSearchJob.getJobID())if jobInfo.cancel == :raise Exception(jobInfo.workerCompletionMsg)try:results = json.loads(jobInfo.results)except Exception as e:print(\"\"\"\")print(\"\", jobInfo)print(\"\", jobInfo.results)print(\"\", e)raisebestModelNum = results[\"\"]bestModelIterIndex = NonetotalWallTime = totalRecords = scoreModelIDDescList = []for (i, modelInfo) in enumerate(_iterModels(modelIDs)):reportWriter.emit(modelInfo)totalRecords+=modelInfo.getNumRecords()format = \"\"startTime = modelInfo.getStartTime()if modelInfo.isFinished():endTime = modelInfo.getEndTime()st = datetime.strptime(startTime, format)et = datetime.strptime(endTime, format)totalWallTime+=(et-st).secondsmodelStats.update(modelInfo)expDesc = modelInfo.getModelDescription()reportMetrics = modelInfo.getReportMetrics()optimizationMetrics = modelInfo.getOptimizationMetrics()if modelInfo.getModelID() == bestModelNum:bestModel = modelInfobestModelIterIndex=ibestMetric = list(optimizationMetrics.values())[]if optimizationMetrics:assert len(optimizationMetrics) == , (\"\" % (len(optimizationMetrics), optimizationMetrics, modelInfo))if modelInfo.getCompletionReason().isEOF():scoreModelIDDescList.append((list(optimizationMetrics.values())[],modelInfo.getModelID(),modelInfo.getGeneratedDescriptionFile(),modelInfo.getParamLabels()))print(\"\" % (i, modelInfo, expDesc))if (modelInfo.isFinished() andnot (modelInfo.getCompletionReason().isStopped ormodelInfo.getCompletionReason().isEOF())):print(\"\" % modelInfo.getCompletionMsg())if reportMetrics:foundMetricsKeySet.update(iter(reportMetrics.keys()))if len(sortedMetricsKeys) != len(foundMetricsKeySet):sortedMetricsKeys = sorted(foundMetricsKeySet)maxKeyLen = max([len(k) for k in sortedMetricsKeys])formatStr = \"\" % (maxKeyLen+)for key in sortedMetricsKeys:if key in reportMetrics:if key == optimizationMetricKey:m = \"\" % reportMetrics[key]else:m = \"\" % reportMetrics[key]print(formatStr % (key+\"\"), m)print()print(\"\")if len(modelIDs) > :print(\"\" % (len(modelIDs),(\"\"if (modelStats.numCompletedKilled + modelStats.numCompletedEOF) ==len(modelIDs)else \"\" % (len(modelIDs) - (modelStats.numCompletedKilled + modelStats.numCompletedEOF +modelStats.numCompletedStopped)))))if modelStats.numStatusOther > :print(\"\" % (modelStats.numStatusOther))print(\"\" % modelStats.numStatusWaitingToStart)print(\"\" % modelStats.numStatusRunning)print(\"\" % modelStats.numStatusCompleted)if modelStats.numCompletedOther > :print(\"\" % (modelStats.numCompletedOther))print(\"\" % modelStats.numCompletedEOF)print(\"\" % modelStats.numCompletedStopped)print(\"\" % modelStats.numCompletedOrphaned)print(\"\" % modelStats.numCompletedKilled)print(\"\" % modelStats.numCompletedError)assert modelStats.numStatusOther == , \"\" % (modelStats.numStatusOther)assert modelStats.numCompletedOther == , \"\" % (modelStats.numCompletedOther)else:print(\"\")print()global gCurrentSearchjobStatus = hyperSearchJob.getJobStatus(gCurrentSearch._workers)jobResults = jobStatus.getResults()if \"\" in jobResults:print(\"\")pprint.pprint(jobResults[\"\"], indent=)else:print(\"\")if bestModel is not None:maxKeyLen = max([len(k) for k in sortedMetricsKeys])maxKeyLen = max(maxKeyLen, len(optimizationMetricKey))formatStr = \"\" % (maxKeyLen+)bestMetricValue = list(bestModel.getOptimizationMetrics().values())[]optimizationMetricName = list(bestModel.getOptimizationMetrics().keys())[]print()print(\"\" % (optimizationMetricName, maximizeMetric))print(\"\" % (bestModelIterIndex, bestModel, bestModel.getModelDescription()))print(formatStr % (optimizationMetricName+\"\"), bestMetricValue)print()print(\"\" % totalRecords)print()print(\"\" % totalWallTime)hsJobParams = hyperSearchJob.getParams()if options[\"\"] > :print(\"\" % (options[\"\"]))scoreModelIDDescList.sort()scoreModelIDDescList = scoreModelIDDescList[:options[\"\"]]i = -for (score, modelID, description, paramLabels) in scoreModelIDDescList:i += outDir = os.path.join(options[\"\"], \"\" % (i))print(\"\" %(modelID, outDir))if not os.path.exists(outDir):os.makedirs(outDir)base_description_path = os.path.join(options[\"\"],\"\")base_description_relpath = os.path.relpath(base_description_path,start=outDir)description = description.replace(\"\",\"\" % base_description_relpath)fd = open(os.path.join(outDir, \"\"), \"\")fd.write(description)fd.close()fd = open(os.path.join(outDir, \"\"), \"\")writer = csv.writer(fd)colNames = list(paramLabels.keys())colNames.sort()writer.writerow(colNames)row = [paramLabels[x] for x in colNames]writer.writerow(row)fd.close()print(\"\")mod = imp.load_source(\"\", os.path.join(outDir,\"\"))model_description = mod.descriptionInterface.getModelDescription()fd = open(os.path.join(outDir, \"\"), \"\")fd.write(\"\" % (getCopyrightHead(),pprint.pformat(model_description)))fd.close()print()reportWriter.finalize()return model_description", "docstring": "Prints all available results in the given HyperSearch job and emits\n model information to the permutations report csv.\n\n The job may be completed or still in progress.\n\n Parameters:\n ----------------------------------------------------------------------\n options: NupicRunPermutations options dict\n replaceReport: True to replace existing report csv, if any; False to\n append to existing report csv, if any\n hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved\n jobID, if any\n metricsKeys: sequence of report metrics key names to include in report;\n if None, will pre-scan all modelInfos to generate a complete\n list of metrics key names.\n retval: model parameters", "id": "f17598:c1:m9"} {"signature": "@classmethoddef loadSavedHyperSearchJob(cls, permWorkDir, outputLabel):", "body": "jobID = cls.__loadHyperSearchJobID(permWorkDir=permWorkDir,outputLabel=outputLabel)searchJob = _HyperSearchJob(nupicJobID=jobID)return searchJob", "docstring": "Instantiates a _HyperSearchJob instance from info saved in file\n\n Parameters:\n ----------------------------------------------------------------------\n permWorkDir: Directory path for saved jobID file\n outputLabel: Label string for incorporating into file name for saved jobID\n retval: _HyperSearchJob instance; raises exception if not found", "id": "f17598:c1:m10"} {"signature": "@classmethoddef __saveHyperSearchJobID(cls, permWorkDir, outputLabel, hyperSearchJob):", "body": "jobID = hyperSearchJob.getJobID()filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,outputLabel=outputLabel)if os.path.exists(filePath):_backupFile(filePath)d = dict(hyperSearchJobID = jobID)with open(filePath, \"\") as jobIdPickleFile:pickle.dump(d, jobIdPickleFile)", "docstring": "Saves the given _HyperSearchJob instance's jobID to file\n\n Parameters:\n ----------------------------------------------------------------------\n permWorkDir: Directory path for saved jobID file\n outputLabel: Label string for incorporating into file name for saved jobID\n hyperSearchJob: _HyperSearchJob instance\n retval: nothing", "id": "f17598:c1:m11"} {"signature": "@classmethoddef __loadHyperSearchJobID(cls, permWorkDir, outputLabel):", "body": "filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,outputLabel=outputLabel)jobID = Nonewith open(filePath, \"\") as jobIdPickleFile:jobInfo = pickle.load(jobIdPickleFile)jobID = jobInfo[\"\"]return jobID", "docstring": "Loads a saved jobID from file\n\n Parameters:\n ----------------------------------------------------------------------\n permWorkDir: Directory path for saved jobID file\n outputLabel: Label string for incorporating into file name for saved jobID\n retval: HyperSearch jobID; raises exception if not found.", "id": "f17598:c1:m12"} {"signature": "@classmethoddef __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel):", "body": "basePath = permWorkDirfilename = \"\" % (outputLabel,)filepath = os.path.join(basePath, filename)return filepath", "docstring": "Returns filepath where to store HyperSearch JobID\n\n Parameters:\n ----------------------------------------------------------------------\n permWorkDir: Directory path for saved jobID file\n outputLabel: Label string for incorporating into file name for saved jobID\n retval: Filepath where to store HyperSearch JobID", "id": "f17598:c1:m13"} {"signature": "def __init__(self,hyperSearchJob,metricsKeys,searchVar,outputDirAbsPath,outputLabel,replaceReport):", "body": "self.__searchJob = hyperSearchJobself.__searchJobID = hyperSearchJob.getJobID()self.__sortedMetricsKeys = sorted(metricsKeys)self.__outputDirAbsPath = os.path.abspath(outputDirAbsPath)self.__outputLabel = outputLabelself.__replaceReport = replaceReportself.__sortedVariableNames=searchVarself.__csvFileObj = Noneself.__reportCSVPath = Noneself.__backupCSVPath = None", "docstring": "Parameters:\n----------------------------------------------------------------------\nhyperSearchJob: _HyperSearchJob instance\nmetricsKeys: sequence of report metrics key names to include in report\noutputDirAbsPath:\n Directory for creating report CSV file (absolute path)\noutputLabel: A string label to incorporate into report CSV file name\nreplaceReport: True to replace existing report csv, if any; False to\n append to existing report csv, if any\nretval: nothing", "id": "f17598:c3:m0"} {"signature": "def emit(self, modelInfo):", "body": "if self.__csvFileObj is None:self.__openAndInitCSVFile(modelInfo)csv = self.__csvFileObjprint(\"\" % (self.__searchJobID), end='', file=csv)print(\"\" % (modelInfo.getModelID()), end='', file=csv)print(\"\" % (modelInfo.statusAsString()), end='', file=csv)if modelInfo.isFinished():print(\"\" % (modelInfo.getCompletionReason()), end='', file=csv)else:print(\"\", end='', file=csv)if not modelInfo.isWaitingToStart():print(\"\" % (modelInfo.getStartTime()), end='', file=csv)else:print(\"\", end='', file=csv)if modelInfo.isFinished():dateFormat = \"\"startTime = modelInfo.getStartTime()endTime = modelInfo.getEndTime()print(\"\" % endTime, end='', file=csv)st = datetime.strptime(startTime, dateFormat)et = datetime.strptime(endTime, dateFormat)print(\"\" % (str((et - st).seconds)), end='', file=csv)else:print(\"\", end='', file=csv)print(\"\", end='', file=csv)print(\"\" % str(modelInfo.getModelDescription()), end='', file=csv)print(\"\" % str(modelInfo.getNumRecords()), end='', file=csv)paramLabelsDict = modelInfo.getParamLabels()for key in self.__sortedVariableNames:if key in paramLabelsDict:print(\"\" % (paramLabelsDict[key]), end='', file=csv)else:print(\"\", end='', file=csv)metrics = modelInfo.getReportMetrics()for key in self.__sortedMetricsKeys:value = metrics.get(key, \"\")value = str(value)value = value.replace(\"\", \"\")print(\"\" % (value), end='', file=csv)print(file=csv)", "docstring": "Emit model info to csv file\n\n Parameters:\n ----------------------------------------------------------------------\n modelInfo: _NupicModelInfo instance\n retval: nothing", "id": "f17598:c3:m1"} {"signature": "def finalize(self):", "body": "if self.__csvFileObj is not None:self.__csvFileObj.close()self.__csvFileObj = Noneprint(\"\" % (self.__reportCSVPath,))if self.__backupCSVPath:print(\"\" %(self.__backupCSVPath,))else:print(\"\")", "docstring": "Close file and print report/backup csv file paths\n\n Parameters:\n ----------------------------------------------------------------------\n retval: nothing", "id": "f17598:c3:m2"} {"signature": "def __openAndInitCSVFile(self, modelInfo):", "body": "basePath = self.__outputDirAbsPathreportCSVName = \"\" % (self.__outputLabel,)reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)backupCSVPath = Noneif os.path.exists(reportCSVPath):backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)if self.__replaceReport:mode = \"\"else:mode = \"\"csv = self.__csvFileObj = open(reportCSVPath, mode)if not self.__replaceReport and backupCSVPath:print(file=csv)print(file=csv)print(\"\", end='', file=csv)print(\"\", end='', file=csv)print(\"\", end='', file=csv)print(\"\", end='', file=csv)print(\"\", end='', file=csv)print(\"\", end='', file=csv)print(\"\", end='', file=csv)print(\"\", end='', file=csv)print(\"\", end='', file=csv)for key in self.__sortedVariableNames:print(\"\" % key, end='', file=csv)for key in self.__sortedMetricsKeys:print(\"\" % key, end='', file=csv)print(file=csv)", "docstring": "- Backs up old report csv file;\n- opens the report csv file in append or overwrite mode (per\n self.__replaceReport);\n- emits column fields;\n- sets up self.__sortedVariableNames, self.__csvFileObj,\n self.__backupCSVPath, and self.__reportCSVPath\n\nParameters:\n----------------------------------------------------------------------\nmodelInfo: First _NupicModelInfo instance passed to emit()\nretval: nothing", "id": "f17598:c3:m3"} {"signature": "def __init__(self, nupicJobID):", "body": "self.__nupicJobID = nupicJobIDjobInfo = _clientJobsDB().jobInfo(nupicJobID)assert jobInfo is not None, \"\" % nupicJobIDassert jobInfo.jobId == nupicJobID, \"\" % (jobInfo.jobId, nupicJobID)_emit(Verbosity.DEBUG, \"\" % pprint.pformat(jobInfo, indent=))if jobInfo.params is not None:self.__params = json.loads(jobInfo.params)else:self.__params = None", "docstring": "_NupicJob constructor\n\n Parameters:\n ----------------------------------------------------------------------\n retval: Nupic Client JobID of the job", "id": "f17598:c4:m0"} {"signature": "def __repr__(self):", "body": "return \"\" % (self.__class__.__name__, self.__nupicJobID)", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: representation of this _NupicJob instance", "id": "f17598:c4:m1"} {"signature": "def getJobStatus(self, workers):", "body": "jobInfo = self.JobStatus(self.__nupicJobID, workers)return jobInfo", "docstring": "Parameters:\n----------------------------------------------------------------------\nworkers: If this job was launched outside of the nupic job engine, then this\n is an array of subprocess Popen instances, one for each worker\nretval: _NupicJob.JobStatus instance", "id": "f17598:c4:m2"} {"signature": "def getJobID(self):", "body": "return self.__nupicJobID", "docstring": "Semi-private method for retrieving the jobId\n\n Parameters:\n ----------------------------------------------------------------------\n retval: Nupic Client JobID of this _NupicJob instance", "id": "f17598:c4:m3"} {"signature": "def getParams(self):", "body": "return self.__params", "docstring": "Semi-private method for retrieving the job-specific params\n\n Parameters:\n ----------------------------------------------------------------------\n retval: Job params dict corresponding to the JSON params value\n returned by ClientJobsDAO.jobInfo()", "id": "f17598:c4:m4"} {"signature": "def __init__(self, reason):", "body": "self.__reason = reason", "docstring": "Parameters:\n----------------------------------------------------------------------\nreason: completion reason value from ClientJobsDAO.jobInfo()", "id": "f17598:c5:m0"} {"signature": "def __init__(self, nupicJobID):", "body": "super(_HyperSearchJob, self).__init__(nupicJobID)self.__expectedNumModels = None", "docstring": "Parameters:\n----------------------------------------------------------------------\nnupicJobID: Nupic Client JobID of a HyperSearch job\nretval: nothing", "id": "f17598:c6:m0"} {"signature": "def queryModelIDs(self):", "body": "jobID = self.getJobID()modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID)modelIDs = tuple(x[] for x in modelCounterPairs)return modelIDs", "docstring": "Queuries DB for model IDs of all currently instantiated models\n associated with this HyperSearch job.\n\n See also: _iterModels()\n\n Parameters:\n ----------------------------------------------------------------------\n retval: A sequence of Nupic modelIDs", "id": "f17598:c6:m1"} {"signature": "def getExpectedNumModels(self, searchMethod):", "body": "return self.__expectedNumModels", "docstring": "Returns: the total number of expected models if known, -1 if it can't\n be determined.\n\n NOTE: this can take a LONG time to complete for HyperSearches with a huge\n number of possible permutations.\n\n Parameters:\n ----------------------------------------------------------------------\n searchMethod: \"v2\" is the only method currently supported\n retval: The total number of expected models, if known; -1 if unknown", "id": "f17598:c6:m2"} {"signature": "@classmethoddef makeSearchJobParamsDict(cls, options, forRunning=False):", "body": "if options[\"\"] == \"\":hsVersion = \"\"else:raise Exception(\"\" % options[\"\"])maxModels = options[\"\"]if options[\"\"] == \"\" and maxModels is None:maxModels = useTerminators = options[\"\"]if useTerminators is None:params = {\"\": hsVersion,\"\": maxModels,}else:params = {\"\": hsVersion,\"\": useTerminators,\"\": maxModels,}if forRunning:params[\"\"] = str(uuid.uuid1())if options[\"\"]:params[\"\"] = options[\"\"]elif options[\"\"]:params[\"\"] = options[\"\"]else:with open(options[\"\"], mode=\"\") as fp:params[\"\"] = json.load(fp)return params", "docstring": "Constructs a dictionary of HyperSearch parameters suitable for converting\n to json and passing as the params argument to ClientJobsDAO.jobInsert()\n Parameters:\n ----------------------------------------------------------------------\n options: NupicRunPermutations options dict\n forRunning: True if the params are for running a Hypersearch job; False\n if params are for introspection only.\n\n retval: A dictionary of HyperSearch parameters for\n ClientJobsDAO.jobInsert()", "id": "f17598:c7:m0"} {"signature": "@classmethoddef getOptimizationMetricInfo(cls, searchJobParams):", "body": "if searchJobParams[\"\"] == \"\":search = HypersearchV2(searchParams=searchJobParams)else:raise RuntimeError(\"\" %(searchJobParams[\"\"]))info = search.getOptimizationMetricInfo()return info", "docstring": "Retrives the optimization key name and optimization function.\n\n Parameters:\n ---------------------------------------------------------\n searchJobParams:\n Parameter for passing as the searchParams arg to\n Hypersearch constructor.\n retval: (optimizationMetricKey, maximize)\n optimizationMetricKey: which report key to optimize for\n maximize: True if we should try and maximize the optimizeKey\n metric. False if we should minimize it.", "id": "f17598:c8:m0"} {"signature": "def __init__(self, rawInfo):", "body": "self.__rawInfo = rawInfoself.__cachedResults = Noneassert self.__rawInfo.params is not Noneself.__cachedParams = None", "docstring": "Parameters:\n----------------------------------------------------------------------\nrawInfo: A single model information element as returned by\n ClientJobsDAO.modelsInfo()\nretval: nothing.", "id": "f17598:c9:m0"} {"signature": "def __repr__(self):", "body": "return (\"\"\"\" % (\"\",self.__rawInfo.jobId,self.__rawInfo.modelId,self.__rawInfo.status,self.__rawInfo.completionReason,self.__rawInfo.updateCounter,self.__rawInfo.numRecords))", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: Representation of this _NupicModelInfo instance.", "id": "f17598:c9:m1"} {"signature": "def getModelID(self):", "body": "return self.__rawInfo.modelId", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: Nupic modelID associated with this model info.", "id": "f17598:c9:m2"} {"signature": "def statusAsString(self):", "body": "return \"\" % self.__rawInfo.status", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: Human-readable string representation of the model's status.", "id": "f17598:c9:m3"} {"signature": "def getModelDescription(self):", "body": "params = self.__unwrapParams()if \"\" in params:return params[\"\"]else:paramSettings = self.getParamLabels()items = []for key, value in list(paramSettings.items()):items.append(\"\" % (key, value))return \"\".join(items)", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: Printable description of the model.", "id": "f17598:c9:m4"} {"signature": "def getGeneratedDescriptionFile(self):", "body": "return self.__rawInfo.genDescription", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: Contents of the sub-experiment description file for\n this model", "id": "f17598:c9:m5"} {"signature": "def getNumRecords(self):", "body": "return self.__rawInfo.numRecords", "docstring": "Paramets:\n----------------------------------------------------------------------\nretval: The number of records processed by the model.", "id": "f17598:c9:m6"} {"signature": "def getParamLabels(self):", "body": "params = self.__unwrapParams()if \"\" in params:retval = dict()queue = [(pair, retval) for pair inparams[\"\"][\"\"].items()]while len(queue) > :pair, output = queue.pop()k, v = pairif (\"\" in v and \"\" in v and\"\" in v):output[k] = v[\"\"]else:if k not in output:output[k] = dict()queue.extend((pair, output[k]) for pair in v.items())return retval", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: a dictionary of model parameter labels. For each entry\n the key is the name of the parameter and the value\n is the value chosen for it.", "id": "f17598:c9:m7"} {"signature": "def __unwrapParams(self):", "body": "if self.__cachedParams is None:self.__cachedParams = json.loads(self.__rawInfo.params)assert self.__cachedParams is not None,\"\" % self.__rawInfo.paramsreturn self.__cachedParams", "docstring": "Unwraps self.__rawInfo.params into the equivalent python dictionary\n and caches it in self.__cachedParams. Returns the unwrapped params\n\n Parameters:\n ----------------------------------------------------------------------\n retval: Model params dictionary as correpsonding to the json\n as returned in ClientJobsDAO.modelsInfo()[x].params", "id": "f17598:c9:m8"} {"signature": "def getReportMetrics(self):", "body": "return self.__unwrapResults().reportMetrics", "docstring": "Retrives a dictionary of metrics designated for report\n Parameters:\n ----------------------------------------------------------------------\n retval: a dictionary of metrics that were collected for the model or\n an empty dictionary if there aren't any.", "id": "f17598:c9:m9"} {"signature": "def getOptimizationMetrics(self):", "body": "return self.__unwrapResults().optimizationMetrics", "docstring": "Retrives a dictionary of metrics designagted for optimization\n Parameters:\n ----------------------------------------------------------------------\n retval: a dictionary of optimization metrics that were collected\n for the model or an empty dictionary if there aren't any.", "id": "f17598:c9:m10"} {"signature": "def getAllMetrics(self):", "body": "result = self.getReportMetrics()result.update(self.getOptimizationMetrics())return result", "docstring": "Retrives a dictionary of metrics that combines all report and\n optimization metrics\n\n Parameters:\n ----------------------------------------------------------------------\n retval: a dictionary of optimization metrics that were collected\n for the model; an empty dictionary if there aren't any.", "id": "f17598:c9:m11"} {"signature": "def __unwrapResults(self):", "body": "if self.__cachedResults is None:if self.__rawInfo.results is not None:resultList = json.loads(self.__rawInfo.results)assert len(resultList) == ,\"\" % (len(resultList), resultList)self.__cachedResults = self.ModelResults(reportMetrics=resultList[],optimizationMetrics=resultList[])else:self.__cachedResults = self.ModelResults(reportMetrics={},optimizationMetrics={})return self.__cachedResults", "docstring": "Unwraps self.__rawInfo.results and caches it in self.__cachedResults;\n Returns the unwrapped params\n\n Parameters:\n ----------------------------------------------------------------------\n retval: ModelResults namedtuple instance", "id": "f17598:c9:m12"} {"signature": "def isWaitingToStart(self):", "body": "waiting = (self.__rawInfo.status == self.__nupicModelStatus_notStarted)return waiting", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: True if the job has not been started yet", "id": "f17598:c9:m13"} {"signature": "def isRunning(self):", "body": "running = (self.__rawInfo.status == self.__nupicModelStatus_running)return running", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: True if the job has not been started yet", "id": "f17598:c9:m14"} {"signature": "def isFinished(self):", "body": "finished = (self.__rawInfo.status == self.__nupicModelStatus_completed)return finished", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval: True if the model's processing has completed (either with\n success or failure).", "id": "f17598:c9:m15"} {"signature": "def getCompletionReason(self):", "body": "assert self.isFinished(), \"\" % selfreturn _ModelCompletionReason(self.__rawInfo.completionReason)", "docstring": "Returns _ModelCompletionReason.\n\n NOTE: it's an error to call this method if isFinished() would return False.\n\n Parameters:\n ----------------------------------------------------------------------\n retval: _ModelCompletionReason instance", "id": "f17598:c9:m16"} {"signature": "def getCompletionMsg(self):", "body": "assert self.isFinished(), \"\" % selfreturn self.__rawInfo.completionMsg", "docstring": "Returns model completion message.\n\n NOTE: it's an error to call this method if isFinished() would return False.\n\n Parameters:\n ----------------------------------------------------------------------\n retval: completion message", "id": "f17598:c9:m17"} {"signature": "def getStartTime(self):", "body": "assert not self.isWaitingToStart(), \"\" % selfreturn \"\" % self.__rawInfo.startTime", "docstring": "Returns model evaluation start time.\n\n NOTE: it's an error to call this method if isWaitingToStart() would\n return True.\n\n Parameters:\n ----------------------------------------------------------------------\n retval: model evaluation start time", "id": "f17598:c9:m18"} {"signature": "def getEndTime(self):", "body": "assert self.isFinished(), \"\" % selfreturn \"\" % self.__rawInfo.endTime", "docstring": "Returns mode evaluation end time.\n\n NOTE: it's an error to call this method if isFinished() would return False.\n\n Parameters:\n ----------------------------------------------------------------------\n retval: model evaluation end time", "id": "f17598:c9:m19"} {"signature": "def main(argv):", "body": "parser = OptionParser(helpString)parser.add_option(\"\", action=\"\", type=\"\", default=None,help=\"\")parser.add_option(\"\", action=\"\", type=\"\", default=None,help=(\"\"\"\"))parser.add_option(\"\", action=\"\", type=\"\", default=None,help=(\"\"\"\"))parser.add_option(\"\", action=\"\", default=None,help=\"\"\"\"\"\")parser.add_option(\"\", action=\"\", default=False,help=\"\")parser.add_option(\"\", action=\"\", default=False,help=\"\")parser.add_option(\"\", action=\"\", type=\"\", default=None,help=\"\"\"\"\"\")(options, args) = parser.parse_args(argv[:])if len(args) != :raise RuntimeError(\"\" %(args))if (options.jobID and options.params):raise RuntimeError(\"\")if (options.jobID is None and options.params is None):raise RuntimeError(\"\")initLogging(verbose=True)hst = HypersearchWorker(options, argv[:])if options.params is None:try:jobID = hst.run()except Exception as e:jobID = options.jobIDmsg = io.StringIO()print(\"\" %(ErrorCodes.hypersearchLogicErr, e), file=msg)traceback.print_exc(None, msg)completionReason = ClientJobsDAO.CMPL_REASON_ERRORcompletionMsg = msg.getvalue()hst.logger.error(completionMsg)jobsDAO = ClientJobsDAO.get()workerCmpReason = jobsDAO.jobGetFields(options.jobID,[''])[]if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:jobsDAO.jobSetFields(options.jobID, fields=dict(cancel=True,workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,workerCompletionMsg = completionMsg),useConnectionID=False,ignoreUnchanged=True)else:jobID = NonecompletionReason = ClientJobsDAO.CMPL_REASON_SUCCESScompletionMsg = \"\"try:jobID = hst.run()except Exception as e:jobID = hst._options.jobIDcompletionReason = ClientJobsDAO.CMPL_REASON_ERRORcompletionMsg = \"\" % (e,)raisefinally:if jobID is not None:cjDAO = ClientJobsDAO.get()cjDAO.jobSetCompleted(jobID=jobID,completionReason=completionReason,completionMsg=completionMsg)return jobID", "docstring": "The main function of the HypersearchWorker script. This parses the command\nline arguments, instantiates a HypersearchWorker instance, and then\nruns it.\n\nParameters:\n----------------------------------------------------------------------\nretval: jobID of the job we ran. This is used by unit test code\n when calling this working using the --params command\n line option (which tells this worker to insert the job\n itself).", "id": "f17599:m0"} {"signature": "def __init__(self, options, cmdLineArgs):", "body": "self._options = optionsself.logger = logging.getLogger(\"\".join(['', self.__class__.__name__]))if options.logLevel is not None:self.logger.setLevel(options.logLevel)self.logger.info(\"\" %str(cmdLineArgs))self.logger.debug(\"\" % (pprint.pformat(os.environ)))random.seed()self._hs = Noneself._modelIDCtrDict = dict()self._modelIDCtrList = []self._modelIDSet = set()self._workerID = None", "docstring": "Instantiate the Hypersearch worker\n\n Parameters:\n ---------------------------------------------------------------------\n options: The command line options. See the main() method for a\n description of these options\n cmdLineArgs: Copy of the command line arguments, so we can place them\n in the log", "id": "f17599:c0:m0"} {"signature": "def _processUpdatedModels(self, cjDAO):", "body": "curModelIDCtrList = cjDAO.modelsGetUpdateCounters(self._options.jobID)if len(curModelIDCtrList) == :returnself.logger.debug(\"\"% (str(curModelIDCtrList)))self.logger.debug(\"\"% (str(self._modelIDCtrList)))curModelIDCtrList = sorted(curModelIDCtrList)numItems = len(curModelIDCtrList)changedEntries = [x for x in zip(range(numItems), curModelIDCtrList,self._modelIDCtrList) if x[][] != x[][]]if len(changedEntries) > :self.logger.debug(\"\", str(changedEntries))for entry in changedEntries:(idx, (modelID, curCtr), (_, oldCtr)) = entryself._modelIDCtrDict[modelID] = curCtrassert (self._modelIDCtrList[idx][] == modelID)assert (curCtr != oldCtr)self._modelIDCtrList[idx][] = curCtrchangedModelIDs = [x[][] for x in changedEntries]modelResults = cjDAO.modelsGetResultAndStatus(changedModelIDs)for mResult in modelResults:results = mResult.resultsif results is not None:results = json.loads(results)self._hs.recordModelProgress(modelID=mResult.modelId,modelParams = None,modelParamsHash = mResult.engParamsHash,results = results,completed = (mResult.status == cjDAO.STATUS_COMPLETED),completionReason = mResult.completionReason,matured = mResult.engMatured,numRecords = mResult.numRecords)curModelIDSet = set([x[] for x in curModelIDCtrList])newModelIDs = curModelIDSet.difference(self._modelIDSet)if len(newModelIDs) > :self._modelIDSet.update(newModelIDs)curModelIDCtrDict = dict(curModelIDCtrList)modelInfos = cjDAO.modelsGetResultAndStatus(newModelIDs)modelInfos.sort()modelParamsAndHashs = cjDAO.modelsGetParams(newModelIDs)modelParamsAndHashs.sort()for (mResult, mParamsAndHash) in zip(modelInfos,modelParamsAndHashs):modelID = mResult.modelIdassert (modelID == mParamsAndHash.modelId)self._modelIDCtrDict[modelID] = curModelIDCtrDict[modelID]self._modelIDCtrList.append([modelID, curModelIDCtrDict[modelID]])results = mResult.resultsif results is not None:results = json.loads(mResult.results)self._hs.recordModelProgress(modelID = modelID,modelParams = json.loads(mParamsAndHash.params),modelParamsHash = mParamsAndHash.engParamsHash,results = results,completed = (mResult.status == cjDAO.STATUS_COMPLETED),completionReason = (mResult.completionReason),matured = mResult.engMatured,numRecords = mResult.numRecords)self._modelIDCtrList.sort()", "docstring": "For all models that modified their results since last time this method\n was called, send their latest results to the Hypersearch implementation.", "id": "f17599:c0:m1"} {"signature": "def run(self):", "body": "options = self._optionsself.logger.info(\"\")cjDAO = ClientJobsDAO.get()self._workerID = cjDAO.getConnectionID()if options.clearModels:cjDAO.modelsClearAll()if options.params is not None:options.jobID = cjDAO.jobInsert(client='', cmdLine=\"\",params=options.params, alreadyRunning=True,minimumWorkers=, maximumWorkers=,jobType = cjDAO.JOB_TYPE_HS)if options.workerID is not None:wID = options.workerIDelse:wID = self._workerIDbuildID = Configuration.get('', '')logPrefix = '' %(buildID, wID, options.jobID)ExtendedLogger.setLogPrefix(logPrefix)if options.resetJobStatus:cjDAO.jobSetFields(options.jobID,fields={'': ClientJobsDAO.CMPL_REASON_SUCCESS,'': False,},useConnectionID=False,ignoreUnchanged=True)jobInfo = cjDAO.jobInfo(options.jobID)self.logger.info(\"\" % (str(clippedObj(jobInfo))))jobParams = json.loads(jobInfo.params)jsonSchemaPath = os.path.join(os.path.dirname(__file__),\"\",\"\")validate(jobParams, schemaPath=jsonSchemaPath)hsVersion = jobParams.get('', None)if hsVersion == '':self._hs = HypersearchV2(searchParams=jobParams, workerID=self._workerID,cjDAO=cjDAO, jobID=options.jobID, logLevel=options.logLevel)else:raise RuntimeError(\"\"% (hsVersion))try:exit = FalsenumModelsTotal = print(\"\", file=sys.stderr)while not exit:batchSize = modelIDToRun = Nonewhile modelIDToRun is None:if options.modelID is None:self._processUpdatedModels(cjDAO)(exit, newModels) = self._hs.createModels(numModels = batchSize)if exit:breakif len(newModels) == :continuefor (modelParams, modelParamsHash, particleHash) in newModels:jsonModelParams = json.dumps(modelParams)(modelID, ours) = cjDAO.modelInsertAndStart(options.jobID,jsonModelParams, modelParamsHash, particleHash)if not ours:mParamsAndHash = cjDAO.modelsGetParams([modelID])[]mResult = cjDAO.modelsGetResultAndStatus([modelID])[]results = mResult.resultsif results is not None:results = json.loads(results)modelParams = json.loads(mParamsAndHash.params)particleHash = cjDAO.modelsGetFields(modelID,[''])[]particleInst = \"\" % (modelParams[''][''],modelParams[''][''])self.logger.info(\"\"\"\"\"\", modelID,mParamsAndHash.engParamsHash.encode(''),particleHash.encode(''), particleInst)self._hs.recordModelProgress(modelID = modelID,modelParams = modelParams,modelParamsHash = mParamsAndHash.engParamsHash,results = results,completed = (mResult.status == cjDAO.STATUS_COMPLETED),completionReason = mResult.completionReason,matured = mResult.engMatured,numRecords = mResult.numRecords)else:modelIDToRun = modelIDbreakelse:modelIDToRun = int(options.modelID)mParamsAndHash = cjDAO.modelsGetParams([modelIDToRun])[]modelParams = json.loads(mParamsAndHash.params)modelParamsHash = mParamsAndHash.engParamsHashcjDAO.modelSetFields(modelIDToRun,dict(engWorkerConnId=self._workerID))if False:for attempt in range():paramsHash = hashlib.md5(\"\" % (modelIDToRun,attempt)).digest()particleHash = hashlib.md5(\"\" % (modelIDToRun,attempt)).digest()try:cjDAO.modelSetFields(modelIDToRun,dict(engParamsHash=paramsHash,engParticleHash=particleHash))success = Trueexcept:success = Falseif success:breakif not success:raise RuntimeError(\"\"\"\")(modelIDToRun, ours) = cjDAO.modelInsertAndStart(options.jobID,mParamsAndHash.params, modelParamsHash)if exit:breakself.logger.info(\"\",modelIDToRun, modelParamsHash.encode(''), modelParams)persistentJobGUID = jobParams['']assert persistentJobGUID, \"\" % (persistentJobGUID,)modelCheckpointGUID = jobInfo.client + \"\" + persistentJobGUID + ('' + str(modelIDToRun))self._hs.runModel(modelID=modelIDToRun, jobID = options.jobID,modelParams=modelParams, modelParamsHash=modelParamsHash,jobsDAO=cjDAO, modelCheckpointGUID=modelCheckpointGUID)numModelsTotal += self.logger.info(\"\",modelIDToRun, numModelsTotal)print(\"\" %(numModelsTotal), file=sys.stderr)print(\"\", file=sys.stderr)if options.modelID is not None:exit = Truefinally:self._hs.close()self.logger.info(\"\" % (numModelsTotal))print(\"\" % (numModelsTotal), file=sys.stderr)return options.jobID", "docstring": "Run this worker.\n\n Parameters:\n ----------------------------------------------------------------------\n retval: jobID of the job we ran. This is used by unit test code\n when calling this working using the --params command\n line option (which tells this worker to insert the job\n itself).", "id": "f17599:c0:m2"} {"signature": "def createAndStartSwarm(client, clientInfo=\"\", clientKey=\"\", params=\"\",minimumWorkers=None, maximumWorkers=None,alreadyRunning=False):", "body": "if minimumWorkers is None:minimumWorkers = Configuration.getInt(\"\")if maximumWorkers is None:maximumWorkers = Configuration.getInt(\"\")return ClientJobsDAO.get().jobInsert(client=client,cmdLine=\"\",clientInfo=clientInfo,clientKey=clientKey,alreadyRunning=alreadyRunning,params=params,minimumWorkers=minimumWorkers,maximumWorkers=maximumWorkers,jobType=ClientJobsDAO.JOB_TYPE_HS)", "docstring": "Create and start a swarm job.\n\n Args:\n client - A string identifying the calling client. There is a small limit\n for the length of the value. See ClientJobsDAO.CLIENT_MAX_LEN.\n clientInfo - JSON encoded dict of client specific information.\n clientKey - Foreign key. Limited in length, see ClientJobsDAO._initTables.\n params - JSON encoded dict of the parameters for the job. This can be\n fetched out of the database by the worker processes based on the jobID.\n minimumWorkers - The minimum workers to allocate to the swarm. Set to None\n to use the default.\n maximumWorkers - The maximum workers to allocate to the swarm. Set to None\n to use the swarm default. Set to 0 to use the maximum scheduler value.\n alreadyRunning - Insert a job record for an already running process. Used\n for testing.", "id": "f17600:m0"} {"signature": "def getSwarmModelParams(modelID):", "body": "cjDAO = ClientJobsDAO.get()(jobID, description) = cjDAO.modelsGetFields(modelID,[\"\", \"\"])(baseDescription,) = cjDAO.jobGetFields(jobID, [\"\"])descriptionDirectory = tempfile.mkdtemp()try:baseDescriptionFilePath = os.path.join(descriptionDirectory, \"\")with open(baseDescriptionFilePath, mode=\"\") as f:f.write(baseDescription)descriptionFilePath = os.path.join(descriptionDirectory, \"\")with open(descriptionFilePath, mode=\"\") as f:f.write(description)expIface = helpers.getExperimentDescriptionInterfaceFromModule(helpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory))return json.dumps(dict(modelConfig=expIface.getModelDescription(),inferenceArgs=expIface.getModelControl().get(\"\", None)))finally:shutil.rmtree(descriptionDirectory, ignore_errors=True)", "docstring": "Retrieve the Engine-level model params from a Swarm model\n\n Args:\n modelID - Engine-level model ID of the Swarm model\n\n Returns:\n JSON-encoded string containing Model Params", "id": "f17600:m1"} {"signature": "def _makeUsageErrorStr(errorString, usageString):", "body": "return \"\" % (errorString, usageString)", "docstring": "Combines an error string and usage string into a regular format, so they\n all look consistent.", "id": "f17601:m0"} {"signature": "def _handleShowSchemaOption():", "body": "print(\"\")print((json.dumps(_getExperimentDescriptionSchema(), indent=_INDENT_STEP*)))print(\"\")return", "docstring": "Displays command schema to stdout and exit program", "id": "f17601:m1"} {"signature": "def _handleDescriptionOption(cmdArgStr, outDir, usageStr, hsVersion,claDescriptionTemplateFile):", "body": "try:args = json.loads(cmdArgStr)except Exception as e:raise _InvalidCommandArgException(_makeUsageErrorStr((\"\" +\"\") % (str(e), cmdArgStr), usageStr))filesDescription = _generateExperiment(args, outDir, hsVersion=hsVersion,claDescriptionTemplateFile = claDescriptionTemplateFile)pprint.pprint(filesDescription)return", "docstring": "Parses and validates the --description option args and executes the\nrequest\n\nParameters:\n-----------------------------------------------------------------------\ncmdArgStr: JSON string compatible with _gExperimentDescriptionSchema\noutDir: where to place generated experiment files\nusageStr: program usage string\nhsVersion: which version of hypersearch permutations file to generate, can\n be 'v1' or 'v2'\nclaDescriptionTemplateFile: Filename containing the template description\nretval: nothing", "id": "f17601:m2"} {"signature": "def _handleDescriptionFromFileOption(filename, outDir, usageStr, hsVersion,claDescriptionTemplateFile):", "body": "try:fileHandle = open(filename, '')JSONStringFromFile = fileHandle.read().splitlines()JSONStringFromFile = ''.join(JSONStringFromFile)except Exception as e:raise _InvalidCommandArgException(_makeUsageErrorStr((\"\" +\"\") % (str(e), filename), usageStr))_handleDescriptionOption(JSONStringFromFile, outDir, usageStr,hsVersion=hsVersion,claDescriptionTemplateFile = claDescriptionTemplateFile)return", "docstring": "Parses and validates the --descriptionFromFile option and executes the\nrequest\n\nParameters:\n-----------------------------------------------------------------------\nfilename: File from which we'll extract description JSON\noutDir: where to place generated experiment files\nusageStr: program usage string\nhsVersion: which version of hypersearch permutations file to generate, can\n be 'v1' or 'v2'\nclaDescriptionTemplateFile: Filename containing the template description\nretval: nothing", "id": "f17601:m3"} {"signature": "def _isInt(x, precision = ):", "body": "xInt = int(round(x))return (abs(x - xInt) < precision * x, xInt)", "docstring": "Return (isInt, intValue) for a given floating point number.\n\nParameters:\n----------------------------------------------------------------------\nx: floating point number to evaluate\nprecision: desired precision\nretval: (isInt, intValue)\n isInt: True if x is close enough to an integer value\n intValue: x as an integer", "id": "f17601:m4"} {"signature": "def _isString(obj):", "body": "return type(obj) in (str,)", "docstring": "returns whether or not the object is a string", "id": "f17601:m5"} {"signature": "def _quoteAndEscape(string):", "body": "assert _isString(string)return pprint.pformat(string)", "docstring": "string: input string (ascii or unicode)\n\nReturns: a quoted string with characters that are represented in python via\n escape sequences converted to those escape sequences", "id": "f17601:m6"} {"signature": "def _indentLines(str, indentLevels = , indentFirstLine=True):", "body": "indent = _ONE_INDENT * indentLevelslines = str.splitlines(True)result = ''if len(lines) > and not indentFirstLine:first = result += lines[]else:first = for line in lines[first:]:result += indent + linereturn result", "docstring": "Indent all lines in the given string\n\n str: input string\n indentLevels: number of levels of indentation to apply\n indentFirstLine: if False, the 1st line will not be indented\n\n Returns: The result string with all lines indented", "id": "f17601:m7"} {"signature": "def _isCategory(fieldType):", "body": "if fieldType == '':return Trueif fieldType == '' or fieldType=='':return False", "docstring": "Prediction function for determining whether a function is a categorical\n variable or a scalar variable. Mainly used for determining the appropriate\n metrics.", "id": "f17601:m8"} {"signature": "def _generateMetricSpecString(inferenceElement, metric,params=None, field=None,returnLabel=False):", "body": "metricSpecArgs = dict(metric=metric,field=field,params=params,inferenceElement=inferenceElement)metricSpecAsString = \"\" %''.join(['' % (item[],item[])for item in metricSpecArgs.items()])if not returnLabel:return metricSpecAsStringspec = MetricSpec(**metricSpecArgs)metricLabel = spec.getLabel()return metricSpecAsString, metricLabel", "docstring": "Generates the string representation of a MetricSpec object, and returns\n the metric key associated with the metric.\n\n\n Parameters:\n -----------------------------------------------------------------------\n inferenceElement:\n An InferenceElement value that indicates which part of the inference this\n metric is computed on\n\n metric:\n The type of the metric being computed (e.g. aae, avg_error)\n\n params:\n A dictionary of parameters for the metric. The keys are the parameter names\n and the values should be the parameter values (e.g. window=200)\n\n field:\n The name of the field for which this metric is being computed\n\n returnLabel:\n If True, returns the label of the MetricSpec that was generated", "id": "f17601:m9"} {"signature": "def _generateFileFromTemplates(templateFileNames, outputFilePath,replacementDict):", "body": "installPath = os.path.dirname(__file__)outputFile = open(outputFilePath, \"\")outputLines = []inputLines = []firstFile = Truefor templateFileName in templateFileNames:if not firstFile:inputLines.extend([os.linesep]*)firstFile = FalseinputFilePath = os.path.join(installPath, templateFileName)inputFile = open(inputFilePath)inputLines.extend(inputFile.readlines())inputFile.close()print(\"\", len(inputLines), \"\")for line in inputLines:tempLine = linefor k, v in replacementDict.items():if v is None:v = \"\"tempLine = re.sub(k, v, tempLine)outputFile.write(tempLine)outputFile.close()", "docstring": "Generates a file by applying token replacements to the given template\n file\n\n templateFileName:\n A list of template file names; these files are assumed to be in\n the same directory as the running experiment_generator.py script.\n ExpGenerator will perform the substitution and concanetate\n the files in the order they are specified\n\n outputFilePath: Absolute path of the output file\n\n replacementDict:\n A dictionary of token/replacement pairs", "id": "f17601:m10"} {"signature": "def _generateEncoderChoicesV1(fieldInfo):", "body": "width = fieldName = fieldInfo['']fieldType = fieldInfo['']encoderChoicesList = []if fieldType in ['', '']:aggFunction = ''encoders = [None]for n in (, , , ):encoder = dict(type='', name=fieldName, fieldname=fieldName,n=n, w=width, clipInput=True,space=\"\")if '' in fieldInfo:encoder[''] = fieldInfo['']if '' in fieldInfo:encoder[''] = fieldInfo['']encoders.append(encoder)encoderChoicesList.append(encoders)elif fieldType == '':aggFunction = ''encoders = [None]encoder = dict(type='', name=fieldName,fieldname=fieldName, n=, w=width)encoders.append(encoder)encoderChoicesList.append(encoders)elif fieldType == '':aggFunction = ''encoders = [None]for radius in (, ):encoder = dict(type='', name='' % (fieldName),fieldname=fieldName, timeOfDay=(width, radius))encoders.append(encoder)encoderChoicesList.append(encoders)encoders = [None]for radius in (, ):encoder = dict(type='', name='' % (fieldName),fieldname=fieldName, dayOfWeek=(width, radius))encoders.append(encoder)encoderChoicesList.append(encoders)else:raise RuntimeError(\"\" % (fieldType))return (encoderChoicesList, aggFunction)", "docstring": "Return a list of possible encoder parameter combinations for the given\n field and the default aggregation function to use. Each parameter combination\n is a dict defining the parameters for the encoder. Here is an example\n return value for the encoderChoicesList:\n\n [\n None,\n {'fieldname':'timestamp',\n 'name': 'timestamp_timeOfDay',\n 'type':'DateEncoder'\n 'dayOfWeek': (7,1)\n },\n {'fieldname':'timestamp',\n 'name': 'timestamp_timeOfDay',\n 'type':'DateEncoder'\n 'dayOfWeek': (7,3)\n },\n ],\n\n Parameters:\n --------------------------------------------------\n fieldInfo: item from the 'includedFields' section of the\n description JSON object\n\n retval: (encoderChoicesList, aggFunction)\n encoderChoicesList: a list of encoder choice lists for this field.\n Most fields will generate just 1 encoder choice list.\n DateTime fields can generate 2 or more encoder choice lists,\n one for dayOfWeek, one for timeOfDay, etc.\n aggFunction: name of aggregation function to use for this\n field type", "id": "f17601:m11"} {"signature": "def _generateEncoderStringsV1(includedFields):", "body": "encoderChoicesList = []for fieldInfo in includedFields:fieldName = fieldInfo[''](choicesList, aggFunction) = _generateEncoderChoicesV1(fieldInfo)encoderChoicesList.extend(choicesList)encoderSpecsList = []for encoderChoices in encoderChoicesList:encoder = encoderChoices[-]for c in _ILLEGAL_FIELDNAME_CHARACTERS:if encoder[''].find(c) >= :raise _ExpGeneratorException(\"\" % (c, encoder['']))encoderSpecsList.append(\"\" % (_quoteAndEscape(encoder['']),*_ONE_INDENT,pprint.pformat(encoder, indent=*_INDENT_STEP)))encoderSpecsStr = ''.join(encoderSpecsList)permEncoderChoicesList = []for encoderChoices in encoderChoicesList:permEncoderChoicesList.append(\"\" % (_quoteAndEscape(encoderChoices[-]['']),pprint.pformat(encoderChoices, indent=*_INDENT_STEP)))permEncoderChoicesStr = ''.join(permEncoderChoicesList)permEncoderChoicesStr = _indentLines(permEncoderChoicesStr, ,indentFirstLine=False)return (encoderSpecsStr, permEncoderChoicesStr)", "docstring": "Generate and return the following encoder related substitution variables:\n\n encoderSpecsStr:\n For the base description file, this string defines the default\n encoding dicts for each encoder. For example:\n '__gym_encoder' : { 'fieldname': 'gym',\n 'n': 13,\n 'name': 'gym',\n 'type': 'SDRCategoryEncoder',\n 'w': 7},\n '__address_encoder' : { 'fieldname': 'address',\n 'n': 13,\n 'name': 'address',\n 'type': 'SDRCategoryEncoder',\n 'w': 7}\n\n encoderSchemaStr:\n For the base description file, this is a list containing a\n DeferredDictLookup entry for each encoder. For example:\n [DeferredDictLookup('__gym_encoder'),\n DeferredDictLookup('__address_encoder'),\n DeferredDictLookup('__timestamp_timeOfDay_encoder'),\n DeferredDictLookup('__timestamp_dayOfWeek_encoder'),\n DeferredDictLookup('__consumption_encoder')],\n\n permEncoderChoicesStr:\n For the permutations file, this defines the possible\n encoder dicts for each encoder. For example:\n '__timestamp_dayOfWeek_encoder': [\n None,\n {'fieldname':'timestamp',\n 'name': 'timestamp_timeOfDay',\n 'type':'DateEncoder'\n 'dayOfWeek': (7,1)\n },\n {'fieldname':'timestamp',\n 'name': 'timestamp_timeOfDay',\n 'type':'DateEncoder'\n 'dayOfWeek': (7,3)\n },\n ],\n\n '__field_consumption_encoder': [\n None,\n {'fieldname':'consumption',\n 'name': 'consumption',\n 'type':'AdaptiveScalarEncoder',\n 'n': 13,\n 'w': 7,\n }\n ]\n\n\n\n Parameters:\n --------------------------------------------------\n includedFields: item from the 'includedFields' section of the\n description JSON object. This is a list of dicts, each\n dict defining the field name, type, and optional min\n and max values.\n\n retval: (encoderSpecsStr, encoderSchemaStr permEncoderChoicesStr)", "id": "f17601:m12"} {"signature": "def _generatePermEncoderStr(options, encoderDict):", "body": "permStr = \"\"if encoderDict.get('', False):permStr = \"\"for key, value in list(encoderDict.items()):if key == \"\":continueif key == '' and encoderDict[''] != '':permStr += \"\" % (encoderDict[\"\"] + ,encoderDict[\"\"] + )else:if issubclass(type(value), str):permStr += \"\" % (key, value)else:permStr += \"\" % (key, value)permStr += \"\"else:if encoderDict[\"\"] in [\"\", \"\",\"\", \"\"]:permStr = \"\"for key, value in list(encoderDict.items()):if key == \"\":key = \"\"elif key == \"\":key = \"\"elif key == \"\":continueif key == \"\":permStr += \"\" % (encoderDict[\"\"] + ,encoderDict[\"\"] + )elif key == \"\":if value and not \"\" in encoderDict:permStr += \"\"% (_quoteAndEscape(\"\"), _quoteAndEscape(\"\"))encoderDict.pop(\"\")else:if issubclass(type(value), str):permStr += \"\" % (key, value)else:permStr += \"\" % (key, value)permStr += \"\"elif encoderDict[\"\"] in [\"\"]:permStr = \"\"for key, value in list(encoderDict.items()):if key == \"\":key = \"\"elif key == \"\":key = \"\"elif key == \"\":continueif issubclass(type(value), str):permStr += \"\" % (key, value)else:permStr += \"\" % (key, value)permStr += \"\"elif encoderDict[\"\"] in [\"\"]:permStr = \"\"for key, value in list(encoderDict.items()):if key == \"\":key = \"\"elif key == \"\":continueelif key == \"\":continueif key == \"\":permStr += \"\" % (encoderDict[\"\"])permStr += \"\"permStr += \"\" % (value[])elif key == \"\":permStr += \"\" % (encoderDict[\"\"])permStr += \"\"permStr += \"\" % (value[])elif key == \"\":permStr += \"\" % (encoderDict[\"\"])permStr += \"\"permStr += \"\" % (value)else:if issubclass(type(value), str):permStr += \"\" % (key, value)else:permStr += \"\" % (key, value)permStr += \"\"else:raise RuntimeError(\"\" %(encoderDict[\"\"]))return permStr", "docstring": "Generate the string that defines the permutations to apply for a given\n encoder.\n\n Parameters:\n -----------------------------------------------------------------------\n options: experiment params\n encoderDict: the encoder dict, which gets placed into the description.py\n\n\n For example, if the encoderDict contains:\n 'consumption': {\n 'clipInput': True,\n 'fieldname': u'consumption',\n 'n': 100,\n 'name': u'consumption',\n 'type': 'AdaptiveScalarEncoder',\n 'w': 21},\n\n The return string will contain:\n \"PermuteEncoder(fieldName='consumption',\n encoderClass='AdaptiveScalarEncoder',\n w=21,\n n=PermuteInt(28, 521),\n clipInput=True)\"", "id": "f17601:m13"} {"signature": "def _generateEncoderStringsV2(includedFields, options):", "body": "width = encoderDictsList = []if options[''] in [\"\",\"\",\"\",\"\"]:classifierOnlyField = options['']['']else:classifierOnlyField = Nonefor fieldInfo in includedFields:fieldName = fieldInfo['']fieldType = fieldInfo['']if fieldType in ['', '']:runDelta = fieldInfo.get(\"\", False)if runDelta or \"\" in fieldInfo:encoderDict = dict(type='', name=fieldName,fieldname=fieldName, n=, w=width, clipInput=True)if runDelta:encoderDict[\"\"] = Trueelse:encoderDict = dict(type='', name=fieldName,fieldname=fieldName, n=, w=width, clipInput=True)if '' in fieldInfo:encoderDict[''] = fieldInfo['']if '' in fieldInfo:encoderDict[''] = fieldInfo['']if ('' in fieldInfo and '' in fieldInfo)and (encoderDict[''] == ''):encoderDict[''] = ''if '' in fieldInfo:encoderDict[''] = fieldInfo['']if '' in fieldInfo:encoderDict[''] = fieldInfo['']encoderDictsList.append(encoderDict)elif fieldType == '':encoderDict = dict(type='', name=fieldName,fieldname=fieldName, n=+width, w=width)if '' in fieldInfo:encoderDict[''] = fieldInfo['']encoderDictsList.append(encoderDict)elif fieldType == '':encoderDict = dict(type='', name='' % (fieldName),fieldname=fieldName, timeOfDay=(width, ))if '' in fieldInfo:encoderDict[''] = fieldInfo['']encoderDictsList.append(encoderDict)encoderDict = dict(type='', name='' % (fieldName),fieldname=fieldName, dayOfWeek=(width, ))if '' in fieldInfo:encoderDict[''] = fieldInfo['']encoderDictsList.append(encoderDict)encoderDict = dict(type='', name='' % (fieldName),fieldname=fieldName, weekend=(width))if '' in fieldInfo:encoderDict[''] = fieldInfo['']encoderDictsList.append(encoderDict)else:raise RuntimeError(\"\" % (fieldType))if fieldName == classifierOnlyField:clEncoderDict = dict(encoderDict)clEncoderDict[''] = TrueclEncoderDict[''] = ''encoderDictsList.append(clEncoderDict)if options[\"\"][\"\"] == \"\":encoderDictsList.remove(encoderDict)if options.get('') is not None:tempList=[]for encoderDict in encoderDictsList:if encoderDict[''] in options['']:tempList.append(encoderDict)encoderDictsList = tempListencoderSpecsList = []permEncoderChoicesList = []for encoderDict in encoderDictsList:if encoderDict[''].find('') >= :raise _ExpGeneratorException(\"\")for c in _ILLEGAL_FIELDNAME_CHARACTERS:if encoderDict[''].find(c) >= :raise _ExpGeneratorException(\"\" %(c, encoderDict['']))constructorStr = _generatePermEncoderStr(options, encoderDict)encoderKey = _quoteAndEscape(encoderDict[''])encoderSpecsList.append(\"\" % (encoderKey,*_ONE_INDENT,pprint.pformat(encoderDict, indent=*_INDENT_STEP)))permEncoderChoicesList.append(\"\" % (encoderKey, constructorStr))encoderSpecsStr = ''.join(encoderSpecsList)permEncoderChoicesStr = ''.join(permEncoderChoicesList)permEncoderChoicesStr = _indentLines(permEncoderChoicesStr, ,indentFirstLine=True)return (encoderSpecsStr, permEncoderChoicesStr)", "docstring": "Generate and return the following encoder related substitution variables:\n\n encoderSpecsStr:\n For the base description file, this string defines the default\n encoding dicts for each encoder. For example:\n\n __gym_encoder = { 'fieldname': 'gym',\n 'n': 13,\n 'name': 'gym',\n 'type': 'SDRCategoryEncoder',\n 'w': 7},\n __address_encoder = { 'fieldname': 'address',\n 'n': 13,\n 'name': 'address',\n 'type': 'SDRCategoryEncoder',\n 'w': 7}\n\n\n permEncoderChoicesStr:\n For the permutations file, this defines the possible\n encoder dicts for each encoder. For example:\n\n '__gym_encoder' : PermuteEncoder('gym', 'SDRCategoryEncoder', w=7,\n n=100),\n\n '__address_encoder' : PermuteEncoder('address', 'SDRCategoryEncoder',\n w=7, n=100),\n\n '__timestamp_dayOfWeek_encoder' : PermuteEncoder('timestamp',\n 'DateEncoder.timeOfDay', w=7, radius=PermuteChoices([1, 8])),\n\n '__consumption_encoder': PermuteEncoder('consumption', 'AdaptiveScalarEncoder',\n w=7, n=PermuteInt(13, 500, 20), minval=0,\n maxval=PermuteInt(100, 300, 25)),\n\n\n\n Parameters:\n --------------------------------------------------\n includedFields: item from the 'includedFields' section of the\n description JSON object. This is a list of dicts, each\n dict defining the field name, type, and optional min\n and max values.\n\n retval: (encoderSpecsStr permEncoderChoicesStr)", "id": "f17601:m14"} {"signature": "def _handleJAVAParameters(options):", "body": "if '' not in options:prediction = options.get('', {InferenceType.TemporalNextStep:{'':True}})inferenceType = Nonefor infType, value in prediction.items():if value['']:inferenceType = infTypebreakif inferenceType == '':inferenceType = InferenceType.TemporalNextStepif inferenceType != InferenceType.TemporalNextStep:raise _ExpGeneratorException(\"\" %(inferenceType))options[''] = inferenceTypeif '' in options:if '' not in options:options[''] = {'': options['']}elif '' not in options['']:options[''][''] = options['']", "docstring": "Handle legacy options (TEMPORARY)", "id": "f17601:m15"} {"signature": "def _getPropertyValue(schema, propertyName, options):", "body": "if propertyName not in options:paramsSchema = schema[''][propertyName]if '' in paramsSchema:options[propertyName] = paramsSchema['']else:options[propertyName] = None", "docstring": "Checks to see if property is specified in 'options'. If not, reads the\n default value from the schema", "id": "f17601:m16"} {"signature": "def _getExperimentDescriptionSchema():", "body": "installPath = os.path.dirname(os.path.abspath(__file__))schemaFilePath = os.path.join(installPath, \"\")return json.loads(open(schemaFilePath, '').read())", "docstring": "Returns the experiment description schema. This implementation loads it in\nfrom file experimentDescriptionSchema.json.\n\nParameters:\n--------------------------------------------------------------------------\nReturns: returns a dict representing the experiment description schema.", "id": "f17601:m17"} {"signature": "def _generateExperiment(options, outputDirPath, hsVersion,claDescriptionTemplateFile):", "body": "_gExperimentDescriptionSchema = _getExperimentDescriptionSchema()try:validictory.validate(options, _gExperimentDescriptionSchema)except Exception as e:raise _InvalidCommandArgException((\"\" +\"\") % (str(e), pprint.pformat(options)))streamSchema = json.load(resource_stream(jsonschema.__name__,''))try:validictory.validate(options[''], streamSchema)except Exception as e:raise _InvalidCommandArgException((\"\" +\"\") % (str(e), json.dumps(options)))_handleJAVAParameters(options)for propertyName in _gExperimentDescriptionSchema['']:_getPropertyValue(_gExperimentDescriptionSchema, propertyName, options)if options[''] is not None:infArgs = _gExperimentDescriptionSchema['']['']for schema in infArgs['']:if isinstance(schema, dict):for propertyName in schema['']:_getPropertyValue(schema, propertyName, options[''])if options[''] is not None:anomalyArgs = _gExperimentDescriptionSchema['']['']for schema in anomalyArgs['']:if isinstance(schema, dict):for propertyName in schema['']:_getPropertyValue(schema, propertyName, options[''])predictionSteps = options[''].get('', None)if options[''] == InferenceType.NontemporalClassification:if predictionSteps is not None and predictionSteps != []:raise RuntimeError(\"\"\"\")if predictionSteps == []and options[''] in ['','','']:options[''] = InferenceType.NontemporalClassificationif options[\"\"] == InferenceType.NontemporalClassification:if options[\"\"][\"\"] == \"\"or options[\"\"][\"\"] == \"\":raise RuntimeError(\"\"\"\")options[\"\"][\"\"] = \"\"swarmSize = options['']if swarmSize is None:if options[\"\"][\"\"] is None:options[\"\"][\"\"] = \"\"elif swarmSize == '':if options[''] is None:options[''] = if options[''] is None:options[''] = if options[''] is None:options[''] = if options[\"\"][\"\"] is None:options[\"\"][\"\"] = \"\"elif swarmSize == '':if options[''] is None:options[''] = if options[''] is None:options[''] = if options[''] is None:options[''] = if options[\"\"][\"\"] is None:options[\"\"][\"\"] = \"\"elif swarmSize == '':if options[''] is None:options[''] = options[''] = Trueif options[\"\"][\"\"] is None:options[\"\"][\"\"] = \"\"else:raise RuntimeError(\"\" % (swarmSize))tokenReplacements = dict()includedFields = options['']if hsVersion == '':(encoderSpecsStr, permEncoderChoicesStr) =_generateEncoderStringsV1(includedFields)elif hsVersion in ['', '']:(encoderSpecsStr, permEncoderChoicesStr) =_generateEncoderStringsV2(includedFields, options)else:raise RuntimeError(\"\" % (hsVersion))if options[''] is not None:sensorAutoResetStr = pprint.pformat(options[''],indent=*_INDENT_STEP)else:sensorAutoResetStr = ''aggregationPeriod = {'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,'': ,}aggFunctionsDict = {}if '' in options['']:for key in list(aggregationPeriod.keys()):if key in options['']['']:aggregationPeriod[key] = options[''][''][key]if '' in options['']['']:for (fieldName, func) in options['']['']['']:aggFunctionsDict[fieldName] = str(func)hasAggregation = Falsefor v in list(aggregationPeriod.values()):if v != :hasAggregation = TruebreakaggFunctionList = list(aggFunctionsDict.items())aggregationInfo = dict(aggregationPeriod)aggregationInfo[''] = aggFunctionListaggregationInfoStr = \"\" % (pprint.pformat(aggregationInfo,indent=*_INDENT_STEP))datasetSpec = options['']if '' in datasetSpec:datasetSpec.pop('')if hasAggregation:datasetSpec[''] = ''datasetSpecStr = pprint.pformat(datasetSpec, indent=*_INDENT_STEP)datasetSpecStr = datasetSpecStr.replace(\"\", \"\")datasetSpecStr = _indentLines(datasetSpecStr, , indentFirstLine=False)computeInterval = options['']if computeInterval is not Noneand options[''] in ['','','']:predictionSteps = options[''].get('', [])if len(predictionSteps) > :raise _InvalidCommandArgException(\"\"\"\"\"\" % predictionSteps)if max(aggregationInfo.values()) == :raise _InvalidCommandArgException(\"\"\"\"\"\")numSteps = predictionSteps[]predictAheadTime = dict(aggregationPeriod)for key in predictAheadTime.keys():predictAheadTime[key] *= numStepspredictAheadTimeStr = pprint.pformat(predictAheadTime,indent=*_INDENT_STEP)options[''] = Trueelse:options[''] = FalsepredictAheadTimeStr = \"\"tokenReplacements[''] =_quoteAndEscape(os.path.abspath(__file__))inferenceType = options['']if inferenceType == '':inferenceType = InferenceType.TemporalMultiSteptokenReplacements[''] = \"\" % inferenceTypeif inferenceType == InferenceType.NontemporalClassification:tokenReplacements[''] = \"\"tokenReplacements[''] = \"\"else:tokenReplacements[''] = \"\"tokenReplacements[''] = \"\"tokenReplacements[''] = \"\"tokenReplacements[''] = pprint.pformat(options[''], indent=*_INDENT_STEP)tokenReplacements[''] = encoderSpecsStrtokenReplacements[''] = sensorAutoResetStrtokenReplacements[''] = aggregationInfoStrtokenReplacements[''] = datasetSpecStrif options[''] is None:options[''] = -tokenReplacements['']= str(options[''])tokenReplacements['']= str(options[''])tokenReplacements['']= str(options[''])tokenReplacements['']= str(options[''])tokenReplacements['']= str(options[''])tokenReplacements['']= permEncoderChoicesStrpredictionSteps = options[''].get('', [])predictionStepsStr = ''.join([str(x) for x in predictionSteps])tokenReplacements[''] = \"\" % (predictionStepsStr)tokenReplacements[''] = predictAheadTimeStrtokenReplacements[''] = \"\"if options['']and options[''] != '':tokenReplacements[''] =_ONE_INDENT +\"\"if options[''] in ['','']:tokenReplacements[''] = \"\"else:tokenReplacements[''] =\"\"+ \"\"+ \"\"if options[''] == '':tokenReplacements[''] =\"\"+ \"\"else:tokenReplacements[''] = \"\"if options[''] in ['', '','', '','']:tokenReplacements[''] =\"\"else:tokenReplacements[''] = \"\"tokenReplacements[''] =\"\" %(options[\"\"][\"\"])if options.get('', None) is not None:tokenReplacements[''] =\"\" % (options[''])else:tokenReplacements[''] = \"\"if options.get('', None) is not None:tokenReplacements[''] =\"\" % (options[''])else:tokenReplacements[''] = \"\"if options.get('', None) is not None:tokenReplacements[''] =\"\" % (options[''])else:tokenReplacements[''] = \"\"if options.get('', None) is not None:tokenReplacements[''] =\"\" % (options[''])else:tokenReplacements[''] = \"\"if options.get('', None) is not None:tokenReplacements[''] =\"\" %(options[''])else:tokenReplacements[''] = \"\"if options.get('', None) is not None:tokenReplacements[''] =\"\" % (options[''])else:tokenReplacements[''] = \"\"if options.get('', None) is not None:tokenReplacements[''] =\"\" % (options[''])else:tokenReplacements[''] = \"\"if options.get('', None) is not None:tokenReplacements[''] =\"\" % (options[''])else:tokenReplacements[''] = \"\"if options['']:debugAgg = Truequotient = aggregationDivide(computeInterval, aggregationPeriod)(isInt, multiple) = _isInt(quotient)if not isInt or multiple < :raise _InvalidCommandArgException(\"\"\"\"\"\" % (computeInterval, aggregationPeriod))mTimesN = float(predictionSteps[])possibleNs = []for n in range(, int(mTimesN)+):m = mTimesN / nmInt = int(round(m))if mInt < :breakif abs(m - mInt) > * m:continuepossibleNs.append(n)if debugAgg:print(\"\" % (mTimesN, possibleNs))aggChoices = []for n in possibleNs:agg = dict(aggregationPeriod)for key in agg.keys():agg[key] *= nquotient = aggregationDivide(computeInterval, agg)(isInt, multiple) = _isInt(quotient)if not isInt or multiple < :continueaggChoices.append(agg)aggChoices = aggChoices[-:]if debugAgg:print(\"\")for agg in aggChoices:print(\"\", agg)print()tokenReplacements[''] = (\"\" % (pprint.pformat(aggChoices, indent=*_INDENT_STEP)))else:tokenReplacements[''] = aggregationInfoStr_generateInferenceArgs(options, tokenReplacements)_generateMetricsSubstitutions(options, tokenReplacements)environment = options['']if environment == OpfEnvironment.Nupic:tokenReplacements[''] = \"\"%OpfEnvironment.NupiccontrolTemplate = \"\"elif environment == OpfEnvironment.Experiment:tokenReplacements[''] = \"\"%OpfEnvironment.ExperimentcontrolTemplate = \"\"else:raise _InvalidCommandArgException(\"\"% environment)if outputDirPath is None:outputDirPath = tempfile.mkdtemp()if not os.path.exists(outputDirPath):os.makedirs(outputDirPath)print(\"\" % (outputDirPath))descriptionPyPath = os.path.join(outputDirPath, \"\")_generateFileFromTemplates([claDescriptionTemplateFile, controlTemplate],descriptionPyPath,tokenReplacements)permutationsPyPath = os.path.join(outputDirPath, \"\")if hsVersion == '':_generateFileFromTemplates([''],permutationsPyPath,tokenReplacements)elif hsVersion == '':_generateFileFromTemplates([''],permutationsPyPath,tokenReplacements)elif hsVersion == '':_generateFileFromTemplates([''],permutationsPyPath,tokenReplacements)else:raise ValueErrorprint(\"\")", "docstring": "Executes the --description option, which includes:\n\n 1. Perform provider compatibility checks\n 2. Preprocess the training and testing datasets (filter, join providers)\n 3. If test dataset omitted, split the training dataset into training\n and testing datasets.\n 4. Gather statistics about the training and testing datasets.\n 5. Generate experiment scripts (description.py, permutaions.py)\n\n Parameters:\n --------------------------------------------------------------------------\n options: dictionary that matches the schema defined by the return value of\n _getExperimentDescriptionSchema(); NOTE: this arg may be modified\n by this function.\n\n outputDirPath: where to place generated files\n\n hsVersion: which version of hypersearch permutations file to generate, can\n be 'v1' or 'v2'\n claDescriptionTemplateFile: Filename containing the template description\n\n\n Returns: on success, returns a dictionary per _experimentResultsJSONSchema;\n raises exception on error\n\n Assumption1: input train and test files have identical field metadata", "id": "f17601:m18"} {"signature": "def _generateMetricsSubstitutions(options, tokenReplacements):", "body": "options[''] = [\"\"]metricList, optimizeMetricLabel = _generateMetricSpecs(options)metricListString = \"\".join(metricList)metricListString = _indentLines(metricListString, , indentFirstLine=False)permOptimizeSettingStr = '' % optimizeMetricLabelloggedMetricsListAsStr = \"\" % (\"\".join([\"\"% ptrnfor ptrn in options['']]))tokenReplacements['']= loggedMetricsListAsStrtokenReplacements[''] = metricListStringtokenReplacements['']= permOptimizeSettingStr", "docstring": "Generate the token substitution for metrics related fields.\n This includes:\n \\$METRICS\n \\$LOGGED_METRICS\n \\$PERM_OPTIMIZE_SETTING", "id": "f17601:m19"} {"signature": "def _generateMetricSpecs(options):", "body": "inferenceType = options['']inferenceArgs = options['']predictionSteps = inferenceArgs['']metricWindow = options['']if metricWindow is None:metricWindow = int(Configuration.get(\"\"))metricSpecStrings = []optimizeMetricLabel = \"\"metricSpecStrings.extend(_generateExtraMetricSpecs(options))optimizeMetricSpec = Noneif options['']:assert len(predictionSteps) == predictionSteps = ['']if inferenceType in (InferenceType.TemporalNextStep,InferenceType.TemporalAnomaly,InferenceType.TemporalMultiStep,InferenceType.NontemporalMultiStep,InferenceType.NontemporalClassification,''):predictedFieldName, predictedFieldType = _getPredictedField(options)isCategory = _isCategory(predictedFieldType)metricNames = ('',) if isCategory else ('', '')trivialErrorMetric = '' if isCategory else ''oneGramErrorMetric = '' if isCategory else ''movingAverageBaselineName = '' if isCategory else ''for metricName in metricNames:metricSpec, metricLabel =_generateMetricSpecString(field=predictedFieldName,inferenceElement=InferenceElement.multiStepBestPredictions,metric='',params={'': metricName,'':metricWindow,'': predictionSteps},returnLabel=True)metricSpecStrings.append(metricSpec)if options[\"\"] is not None :metricParams = dict(options[\"\"])metricParams[''] = ''metricParams[''] = predictionStepsif not \"\" in metricParams:metricParams[\"\"] = metricWindowmetricSpec, metricLabel =_generateMetricSpecString(field=predictedFieldName,inferenceElement=InferenceElement.multiStepPredictions,metric=\"\",params=metricParams,returnLabel=True)metricSpecStrings.append(metricSpec)optimizeMetricSpec = metricSpecmetricLabel = metricLabel.replace('', '')metricLabel = metricLabel.replace('', '')optimizeMetricLabel = metricLabelif options[\"\"] is not None :optimizeMetricLabel = \"\"if options[\"\"]and inferenceType != InferenceType.NontemporalClassification:for steps in predictionSteps:metricSpecStrings.append(_generateMetricSpecString(field=predictedFieldName,inferenceElement=InferenceElement.prediction,metric=\"\",params={'':metricWindow,\"\":trivialErrorMetric,'': steps}))if isCategory:metricSpecStrings.append(_generateMetricSpecString(field=predictedFieldName,inferenceElement=InferenceElement.prediction,metric=movingAverageBaselineName,params={'':metricWindow,\"\":\"\",\"\":,\"\": steps}))else :metricSpecStrings.append(_generateMetricSpecString(field=predictedFieldName,inferenceElement=InferenceElement.prediction,metric=movingAverageBaselineName,params={'':metricWindow,\"\":\"\",\"\":,\"\": steps}))elif inferenceType in (InferenceType.TemporalClassification):metricName = ''trivialErrorMetric = ''oneGramErrorMetric = ''movingAverageBaselineName = ''optimizeMetricSpec, optimizeMetricLabel =_generateMetricSpecString(inferenceElement=InferenceElement.classification,metric=metricName,params={'':metricWindow},returnLabel=True)metricSpecStrings.append(optimizeMetricSpec)if options[\"\"]:if inferenceType == InferenceType.TemporalClassification:metricSpecStrings.append(_generateMetricSpecString(inferenceElement=InferenceElement.classification,metric=\"\",params={'':metricWindow,\"\":trivialErrorMetric}))metricSpecStrings.append(_generateMetricSpecString(inferenceElement=InferenceElement.classification,metric=\"\",params={'':metricWindow,\"\":oneGramErrorMetric}))metricSpecStrings.append(_generateMetricSpecString(inferenceElement=InferenceElement.classification,metric=movingAverageBaselineName,params={'':metricWindow,\"\":\"\",\"\":}))if not options[\"\"] == None :if not \"\" in options[\"\"]:options[\"\"][\"\"] = metricWindowoptimizeMetricSpec = _generateMetricSpecString(inferenceElement=InferenceElement.classification,metric=\"\",params=options[\"\"])optimizeMetricLabel = \"\"metricSpecStrings.append(optimizeMetricSpec)if options['']:for i in range(len(metricSpecStrings)):metricSpecStrings[i] = metricSpecStrings[i].replace(\"\", \"\")optimizeMetricLabel = optimizeMetricLabel.replace(\"\", \"\")return metricSpecStrings, optimizeMetricLabel", "docstring": "Generates the Metrics for a given InferenceType\n\n Parameters:\n -------------------------------------------------------------------------\n options: ExpGenerator options\n retval: (metricsList, optimizeMetricLabel)\n metricsList: list of metric string names\n optimizeMetricLabel: Name of the metric which to optimize over", "id": "f17601:m20"} {"signature": "def _generateExtraMetricSpecs(options):", "body": "_metricSpecSchema = {'': {}}results = []for metric in options['']:for propertyName in list(_metricSpecSchema[''].keys()):_getPropertyValue(_metricSpecSchema, propertyName, metric)specString, label = _generateMetricSpecString(field=metric[''],metric=metric[''],params=metric[''],inferenceElement=metric[''],returnLabel=True)if metric['']:options[''].append(label)results.append(specString)return results", "docstring": "Generates the non-default metrics specified by the expGenerator params", "id": "f17601:m21"} {"signature": "def _getPredictedField(options):", "body": "if not options[''] ornot options['']['']:return None, NonepredictedField = options['']['']predictedFieldInfo = NoneincludedFields = options['']for info in includedFields:if info[''] == predictedField:predictedFieldInfo = infobreakif predictedFieldInfo is None:raise ValueError(\"\" % predictedField)predictedFieldType = predictedFieldInfo['']return predictedField, predictedFieldType", "docstring": "Gets the predicted field and it's datatype from the options dictionary\n\n Returns: (predictedFieldName, predictedFieldType)", "id": "f17601:m22"} {"signature": "def _generateInferenceArgs(options, tokenReplacements):", "body": "inferenceType = options['']optionInferenceArgs = options.get('', None)resultInferenceArgs = {}predictedField = _getPredictedField(options)[]if inferenceType in (InferenceType.TemporalNextStep,InferenceType.TemporalAnomaly):assert predictedField, \"\"\"\"% inferenceTypeif optionInferenceArgs:if options['']:altOptionInferenceArgs = copy.deepcopy(optionInferenceArgs)altOptionInferenceArgs[''] = ''resultInferenceArgs = pprint.pformat(altOptionInferenceArgs)resultInferenceArgs = resultInferenceArgs.replace(\"\",'')else:resultInferenceArgs = pprint.pformat(optionInferenceArgs)tokenReplacements[''] = resultInferenceArgstokenReplacements[''] = predictedField", "docstring": "Generates the token substitutions related to the predicted field\n and the supplemental arguments for prediction", "id": "f17601:m23"} {"signature": "def expGenerator(args):", "body": "parser = OptionParser()parser.set_usage(\"\" +\"\" +\"\")parser.add_option(\"\", dest = \"\",help = \"\"\"\"\"\")parser.add_option(\"\", dest = '',help = \"\"\"\")parser.add_option(\"\",dest = '',default = '',help = \"\"\"\")parser.add_option(\"\",action=\"\", dest=\"\",help=\"\")parser.add_option(\"\", dest = '', default='',help = \"\"\"\")parser.add_option(\"\",dest = \"\", default=None,help = \"\"\"\")(options, remainingArgs) = parser.parse_args(args)if len(remainingArgs) > :raise _InvalidCommandArgException(_makeUsageErrorStr(\"\" %(''.join(remainingArgs),), parser.get_usage()))activeOptions = [x for x in ('', '') if getattr(options, x) != None]if len(activeOptions) > :raise _InvalidCommandArgException(_makeUsageErrorStr((\"\" +\"\") % (activeOptions,),parser.get_usage()))if options.showSchema:_handleShowSchemaOption()elif options.description:_handleDescriptionOption(options.description, options.outDir,parser.get_usage(), hsVersion=options.version,claDescriptionTemplateFile = options.claDescriptionTemplateFile)elif options.descriptionFromFile:_handleDescriptionFromFileOption(options.descriptionFromFile,options.outDir, parser.get_usage(), hsVersion=options.version,claDescriptionTemplateFile = options.claDescriptionTemplateFile)else:raise _InvalidCommandArgException(_makeUsageErrorStr(\"\"\"\", parser.get_usage()))", "docstring": "Parses, validates, and executes command-line options;\n\n On success: Performs requested operation and exits program normally\n\n On Error: Dumps exception/error info in JSON format to stdout and exits the\n program with non-zero status.", "id": "f17601:m24"} {"signature": "def __init__(self, dirPath, reason):", "body": "super(_CreateDirectoryException, self).__init__((\"\" +\"\") % (str(dirPath), str(reason)))self.reason = reason", "docstring": "dirPath: the path that we attempted to create for experiment files\n\nreason: any object that can be converted to a string that explains\n the reason (may be an exception)", "id": "f17601:c1:m0"} {"signature": "def __init__(self, problem, precursor):", "body": "super(_ErrorReportingException, self).__init__((\"\" +\"\")% (problem, precursor))", "docstring": "problem: a string-convertible object that describes the problem\n experienced by the error-reporting funciton.\n\nprecursor: a string-convertible object that explains\n the original error that the error-reporting function\n was attempting to report when it encountered its own failure.", "id": "f17601:c4:m0"} {"signature": "def coordinatesFromIndex(index, dimensions):", "body": "coordinates = [] * len(dimensions)shifted = indexfor i in xrange(len(dimensions) - , , -):coordinates[i] = shifted % dimensions[i]shifted = shifted / dimensions[i]coordinates[] = shiftedreturn coordinates", "docstring": "Translate an index into coordinates, using the given coordinate system.\n\nSimilar to ``numpy.unravel_index``.\n\n:param index: (int) The index of the point. The coordinates are expressed as a \n single index by using the dimensions as a mixed radix definition. For \n example, in dimensions 42x10, the point [1, 4] is index \n 1*420 + 4*10 = 460.\n\n:param dimensions (list of ints) The coordinate system.\n\n:returns: (list) of coordinates of length ``len(dimensions)``.", "id": "f17602:m0"} {"signature": "def indexFromCoordinates(coordinates, dimensions):", "body": "index = for i, dimension in enumerate(dimensions):index *= dimensionindex += coordinates[i]return index", "docstring": "Translate coordinates into an index, using the given coordinate system.\n\nSimilar to ``numpy.ravel_multi_index``.\n\n:param coordinates: (list of ints) A list of coordinates of length \n ``dimensions.size()``.\n\n:param dimensions: (list of ints) The coordinate system.\n\n:returns: (int) The index of the point. The coordinates are expressed as a \n single index by using the dimensions as a mixed radix definition. \n For example, in dimensions 42x10, the point [1, 4] is index \n 1*420 + 4*10 = 460.", "id": "f17602:m1"} {"signature": "def neighborhood(centerIndex, radius, dimensions):", "body": "centerPosition = coordinatesFromIndex(centerIndex, dimensions)intervals = []for i, dimension in enumerate(dimensions):left = max(, centerPosition[i] - radius)right = min(dimension - , centerPosition[i] + radius)intervals.append(xrange(left, right + ))coords = numpy.array(list(itertools.product(*intervals)))return numpy.ravel_multi_index(coords.T, dimensions)", "docstring": "Get the points in the neighborhood of a point.\n\nA point's neighborhood is the n-dimensional hypercube with sides ranging\n[center - radius, center + radius], inclusive. For example, if there are two\ndimensions and the radius is 3, the neighborhood is 6x6. Neighborhoods are\ntruncated when they are near an edge.\n\nThis is designed to be fast. In C++ it's fastest to iterate through neighbors\none by one, calculating them on-demand rather than creating a list of them.\nBut in Python it's faster to build up the whole list in batch via a few calls\nto C code rather than calculating them on-demand with lots of calls to Python\ncode.\n\n:param centerIndex: (int) The index of the point. The coordinates are \n expressed as a single index by using the dimensions as a mixed radix \n definition. For example, in dimensions 42x10, the point [1, 4] is index \n 1*420 + 4*10 = 460.\n\n:param radius: (int) The radius of this neighborhood about the \n ``centerIndex``.\n\n:param dimensions: (indexable sequence) The dimensions of the world outside \n this neighborhood.\n\n:returns: (numpy array) The points in the neighborhood, including \n ``centerIndex``.", "id": "f17602:m2"} {"signature": "def wrappingNeighborhood(centerIndex, radius, dimensions):", "body": "centerPosition = coordinatesFromIndex(centerIndex, dimensions)intervals = []for i, dimension in enumerate(dimensions):left = centerPosition[i] - radiusright = min(centerPosition[i] + radius,left + dimensions[i] - )interval = [v % dimension for v in xrange(left, right + )]intervals.append(interval)coords = numpy.array(list(itertools.product(*intervals)))return numpy.ravel_multi_index(coords.T, dimensions)", "docstring": "Like :meth:`neighborhood`, except that the neighborhood isn't truncated when \nit's near an edge. It wraps around to the other side.\n\n:param centerIndex: (int) The index of the point. The coordinates are \n expressed as a single index by using the dimensions as a mixed radix \n definition. For example, in dimensions 42x10, the point [1, 4] is index \n 1*420 + 4*10 = 460.\n\n:param radius: (int) The radius of this neighborhood about the \n ``centerIndex``.\n\n:param dimensions: (indexable sequence) The dimensions of the world outside \n this neighborhood.\n\n:returns: (numpy array) The points in the neighborhood, including \n ``centerIndex``.", "id": "f17602:m3"} {"signature": "def propose(self, current, r):", "body": "stay = (r.uniform(, ) < self.kernel)if stay:logKernel = numpy.log(self.kernel)return current, logKernel, logKernelelse: curIndex = self.keyMap[current]ri = r.randint(, self.nKeys-)logKernel = numpy.log( - self.kernel)lp = logKernel + self.logpif ri < curIndex: return self.keys[ri], lp, lpelse: return self.keys[ri+], lp, lp", "docstring": "Generates a random sample from the discrete probability distribution and\n returns its value, the log of the probability of sampling that value and the\n log of the probability of sampling the current value (passed in).", "id": "f17603:c5:m1"} {"signature": "def propose(self, current, r):", "body": "curLambda = current + self.offsetx, logProb = PoissonDistribution(curLambda).sample(r)logBackward = PoissonDistribution(x+self.offset).logDensity(current)return x, logProb, logBackward", "docstring": "Generates a random sample from the Poisson probability distribution with\n with location and scale parameter equal to the current value (passed in).\n Returns the value of the random sample, the log of the probability of\n sampling that value, and the log of the probability of sampling the current\n value if the roles of the new sample and the current sample were reversed\n (the log of the backward proposal probability).", "id": "f17603:c7:m1"} {"signature": "def lscsum(lx, epsilon=None):", "body": "lx = numpy.asarray(lx)base = lx.max()if numpy.isinf(base):return baseif (epsilon is not None) and (base < epsilon):return epsilonx = numpy.exp(lx - base)ssum = x.sum()result = numpy.log(ssum) + basereturn result", "docstring": "Accepts log-values as input, exponentiates them, computes the sum,\nthen converts the sum back to log-space and returns the result.\nHandles underflow by rescaling so that the largest values is exactly 1.0.", "id": "f17604:m1"} {"signature": "def lscsum0(lx):", "body": "lx = numpy.asarray(lx)bases = lx.max() x = numpy.exp(lx - bases)ssum = x.sum()result = numpy.log(ssum) + basestry:conventional = numpy.log(numpy.exp(lx).sum())if not similar(result, conventional):if numpy.isinf(conventional).any() and not numpy.isinf(result).any():passelse:import sysprint(\"\", file=sys.stderr)print(\"\", file=sys.stderr)print(result, file=sys.stderr)print(\"\", file=sys.stderr)print(conventional, file=sys.stderr)except FloatingPointError as e:passreturn result", "docstring": "Accepts log-values as input, exponentiates them, sums down the rows\n(first dimension), then converts the sum back to log-space and returns the result.\nHandles underflow by rescaling so that the largest values is exactly 1.0.", "id": "f17604:m2"} {"signature": "def normalize(lx):", "body": "lx = numpy.asarray(lx)base = lx.max()x = numpy.exp(lx - base)result = x / x.sum()conventional = (numpy.exp(lx) / numpy.exp(lx).sum())assert similar(result, conventional)return result", "docstring": "Accepts log-values as input, exponentiates them,\nnormalizes and returns the result.\nHandles underflow by rescaling so that the largest values is exactly 1.0.", "id": "f17604:m3"} {"signature": "def nsum0(lx):", "body": "lx = numpy.asarray(lx)base = lx.max()x = numpy.exp(lx - base)ssum = x.sum()result = ssum / ssum.sum()conventional = (numpy.exp(lx).sum() / numpy.exp(lx).sum())assert similar(result, conventional)return result", "docstring": "Accepts log-values as input, exponentiates them, sums down the rows\n(first dimension), normalizes and returns the result.\nHandles underflow by rescaling so that the largest values is exactly 1.0.", "id": "f17604:m4"} {"signature": "def lnsum0(lx):", "body": "lx = numpy.asarray(lx)base = lx.max()x = numpy.exp(lx - base)ssum = x.sum()normalized = nsum0(lx)result = numpy.log(normalized)conventional = numpy.log(numpy.exp(lx).sum() / numpy.exp(lx).sum())assert similar(result, conventional)return result", "docstring": "Accepts log-values as input, exponentiates them, sums down the rows\n(first dimension), normalizes, then converts the sum back to\nlog-space and returns the result.\nHandles underflow by rescaling so that the largest values is exactly 1.0.", "id": "f17604:m5"} {"signature": "def logSumExp(A, B, out=None):", "body": "if out is None:out = numpy.zeros(A.shape)indicator1 = A >= Bindicator2 = numpy.logical_not(indicator1)out[indicator1] = A[indicator1] + numpy.log1p(numpy.exp(B[indicator1]-A[indicator1]))out[indicator2] = B[indicator2] + numpy.log1p(numpy.exp(A[indicator2]-B[indicator2]))return out", "docstring": "returns log(exp(A) + exp(B)). A and B are numpy arrays", "id": "f17604:m6"} {"signature": "def logDiffExp(A, B, out=None):", "body": "if out is None:out = numpy.zeros(A.shape)indicator1 = A >= Bassert indicator1.all(), \"\"out[indicator1] = A[indicator1] + numpy.log( - numpy.exp(B[indicator1]-A[indicator1]))return out", "docstring": "returns log(exp(A) - exp(B)). A and B are numpy arrays. values in A should be\n greater than or equal to corresponding values in B", "id": "f17604:m7"} {"signature": "def pickByDistribution(distribution, r=None):", "body": "if r is None:r = randomx = r.uniform(, sum(distribution))for i, d in enumerate(distribution):if x <= d:return ix -= d", "docstring": "Pick a value according to the provided distribution.\n\nExample:\n\n::\n\n pickByDistribution([.2, .1])\n\nReturns 0 two thirds of the time and 1 one third of the time.\n\n:param distribution: Probability distribution. Need not be normalized.\n:param r: Instance of random.Random. Uses the system instance if one is\n not provided.", "id": "f17606:m0"} {"signature": "def Indicator(pos, size, dtype):", "body": "x = numpy.zeros(size, dtype=dtype)x[pos] = return x", "docstring": "Returns an array of length size and type dtype that is everywhere 0,\nexcept in the index in pos.\n\n:param pos: (int) specifies the position of the one entry that will be set.\n:param size: (int) The total size of the array to be returned.\n:param dtype: The element type (compatible with NumPy array())\n of the array to be returned.\n:returns: (list) of length ``size`` and element type ``dtype``.", "id": "f17606:m1"} {"signature": "def MultiArgMax(x):", "body": "m = x.max()return (i for i, v in enumerate(x) if v == m)", "docstring": "Get tuple (actually a generator) of indices where the max value of\narray x occurs. Requires that x have a max() method, as x.max()\n(in the case of NumPy) is much faster than max(x).\nFor a simpler, faster argmax when there is only a single maximum entry,\nor when knowing only the first index where the maximum occurs,\ncall argmax() on a NumPy array.\n\n:param x: Any sequence that has a max() method.\n:returns: Generator with the indices where the max value occurs.", "id": "f17606:m2"} {"signature": "def Any(sequence):", "body": "return bool(reduce(lambda x, y: x or y, sequence, False))", "docstring": "Tests much faster (30%) than bool(sum(bool(x) for x in sequence)).\n\n:returns: (bool) true if any element of the sequence satisfies True. \n\n:param sequence: Any sequence whose elements can be evaluated as booleans.", "id": "f17606:m3"} {"signature": "def All(sequence):", "body": "return bool(reduce(lambda x, y: x and y, sequence, True))", "docstring": ":param sequence: Any sequence whose elements can be evaluated as booleans.\n:returns: true if all elements of the sequence satisfy True and x.", "id": "f17606:m4"} {"signature": "def Product(sequence):", "body": "return reduce(lambda x, y: x * y, sequence)", "docstring": "Returns the product of the elements of the sequence.\nUse numpy.prod() if the sequence is an array, as it will be faster.\nRemember that the product of many numbers may rapidly overflow or\nunderflow the numeric precision of the computer.\nUse a sum of the logs of the sequence elements instead when precision\nshould be maintained.\n\n:param sequence: Any sequence whose elements can be multiplied by their\n neighbors.\n:returns: A single value that is the product of all the sequence elements.", "id": "f17606:m5"} {"signature": "def MultiIndicator(pos, size, dtype):", "body": "x = numpy.zeros(size, dtype=dtype)if hasattr(pos, ''):for i in pos: x[i] = else: x[pos] = return x", "docstring": "Returns an array of length size and type dtype that is everywhere 0,\nexcept in the indices listed in sequence pos.\n\n:param pos: A single integer or sequence of integers that specify\n the position of ones to be set.\n:param size: The total size of the array to be returned.\n:param dtype: The element type (compatible with NumPy array())\n of the array to be returned.\n:returns: An array of length size and element type dtype.", "id": "f17606:m6"} {"signature": "def Distribution(pos, size, counts, dtype):", "body": "x = numpy.zeros(size, dtype=dtype)if hasattr(pos, ''):total = for i in pos:total += counts[i]total = float(total)for i in pos:x[i] = counts[i]/totalelse: x[pos] = return x", "docstring": "Returns an array of length size and type dtype that is everywhere 0,\nexcept in the indices listed in sequence pos. The non-zero indices\ncontain a normalized distribution based on the counts.\n\n\n:param pos: A single integer or sequence of integers that specify\n the position of ones to be set.\n:param size: The total size of the array to be returned.\n:param counts: The number of times we have observed each index.\n:param dtype: The element type (compatible with NumPy array())\n of the array to be returned.\n:returns: An array of length size and element type dtype.", "id": "f17606:m7"} {"signature": "def numRows(self):", "body": "if self.hist_: return self.hist_.nRows()else: return ", "docstring": "Gets the number of rows in the histogram.\n\n :returns: Integer number of rows.", "id": "f17606:c0:m1"} {"signature": "def numColumns(self):", "body": "if self.hist_: return self.hist_.nCols()else: return ", "docstring": ":return: (int) number of columns", "id": "f17606:c0:m2"} {"signature": "def grow(self, rows, cols):", "body": "if not self.hist_:self.hist_ = SparseMatrix(rows, cols)self.rowSums_ = numpy.zeros(rows, dtype=dtype)self.colSums_ = numpy.zeros(cols, dtype=dtype)self.hack_ = Noneelse:oldRows = self.hist_.nRows()oldCols = self.hist_.nCols()nextRows = max(oldRows, rows)nextCols = max(oldCols, cols)if (oldRows < nextRows) or (oldCols < nextCols):self.hist_.resize(nextRows, nextCols)if oldRows < nextRows:oldSums = self.rowSums_self.rowSums_ = numpy.zeros(nextRows, dtype=dtype)self.rowSums_[:len(oldSums)] = oldSumsself.hack_ = Noneif oldCols < nextCols:oldSums = self.colSums_self.colSums_ = numpy.zeros(nextCols, dtype=dtype)self.colSums_[:len(oldSums)] = oldSumsself.hack_ = None", "docstring": "Grows the histogram to have rows rows and cols columns.\nMust not have been initialized before, or already have the same\nnumber of columns.\nIf rows is smaller than the current number of rows,\ndoes not shrink.\nAlso updates the sizes of the row and column sums.\n\n:param rows: Integer number of rows.\n:param cols: Integer number of columns.", "id": "f17606:c0:m3"} {"signature": "def updateRow(self, row, distribution):", "body": "self.grow(row+, len(distribution))self.hist_.axby(row, , , distribution)self.rowSums_[row] += distribution.sum()self.colSums_ += distributionself.hack_ = None", "docstring": "Add distribution to row row.\nDistribution should be an array of probabilities or counts.\n\n:param row: Integer index of the row to add to.\n May be larger than the current number of rows, in which case\n the histogram grows.\n:param distribution: Array of length equal to the number of columns.", "id": "f17606:c0:m4"} {"signature": "def inferRow(self, distribution):", "body": "return self.hist_ * (distribution / self.colSums_)", "docstring": "Computes the sumProp probability of each row given the input probability\nof each column. Normalizes the distribution in each column on the fly.\n\nThe semantics are as follows: If the distribution is P(col|e) where e is\nthe evidence is col is the column, and the CPD represents P(row|col), then\nthis calculates sum(P(col|e) P(row|col)) = P(row|e).\n\n:param distribution: Array of length equal to the number of columns.\n:returns: array of length equal to the number of rows.", "id": "f17606:c0:m5"} {"signature": "def inferRowEvidence(self, distribution):", "body": "return (self.hist_ * distribution) / self.rowSums_", "docstring": "Computes the probability of evidence given each row from the probability\nof evidence given each column. Essentially, this just means that it sums\nprobabilities over (normalized) rows. Normalizes the distribution over\neach row on the fly.\n\nThe semantics are as follows: If the distribution is P(e|col) where e is\nevidence and col is the column, and the CPD is of P(col|row), then this\ncalculates sum(P(e|col) P(col|row)) = P(e|row).\n\n:param distribution: Array of length equal to the number of columns.\n:returns: array of length equal to the number of rows.", "id": "f17606:c0:m6"} {"signature": "def inferRowCompat(self, distribution):", "body": "if self.hack_ is None:self.clean_outcpd()return self.hack_.vecMaxProd(distribution)", "docstring": "Equivalent to the category inference of zeta1.TopLevel.\nComputes the max_prod (maximum component of a component-wise multiply)\nbetween the rows of the histogram and the incoming distribution.\nMay be slow if the result of clean_outcpd() is not valid.\n\n:param distribution: Array of length equal to the number of columns.\n:returns: array of length equal to the number of rows.", "id": "f17606:c0:m8"} {"signature": "def clean_outcpd(self):", "body": "m = self.hist_.toDense()for j in xrange(m.shape[]): cmax = m[:,j].max()if cmax:m[:,j] = numpy.array(m[:,j] == cmax, dtype=dtype)self.hack_ = SparseMatrix(, self.hist_.nCols())for i in xrange(m.shape[]):self.hack_.addRow(m[i,:])", "docstring": "Hack to act like clean_outcpd on zeta1.TopLevelNode.\n Take the max element in each to column, set it to 1, and set all the\n other elements to 0.\n Only called by inferRowMaxProd() and only needed if an updateRow()\n has been called since the last clean_outcpd().", "id": "f17606:c0:m9"} {"signature": "def logFactorial(x):", "body": "return lgamma(x + )", "docstring": "Approximation to the log of the factorial function.", "id": "f17608:m0"} {"signature": "def sample(self, rgen):", "body": "rf = rgen.uniform(, self.sum)index = bisect.bisect(self.cdf, rf)return self.keys[index], numpy.log(self.pmf[index])", "docstring": "Generates a random sample from the discrete probability distribution\n and returns its value and the log of the probability of sampling that value.", "id": "f17608:c2:m1"} {"signature": "def logProbability(self, distn):", "body": "x = numpy.asarray(distn)n = x.sum()return (logFactorial(n) - numpy.sum([logFactorial(k) for k in x]) +numpy.sum(x * numpy.log(self.dist.pmf)))", "docstring": "Form of distribution must be an array of counts in order of self.keys.", "id": "f17608:c3:m4"} {"signature": "def sample(self, rgen):", "body": "x = rgen.poisson(self.lambdaParameter)return x, self.logDensity(x)", "docstring": "Generates a random sample from the Poisson probability distribution and\n returns its value and the log of the probability of sampling that value.", "id": "f17608:c4:m1"} {"signature": "def ROCCurve(y_true, y_score):", "body": "y_true = np.ravel(y_true)classes = np.unique(y_true)if classes.shape[] != :raise ValueError(\"\")y_score = np.ravel(y_score)n_pos = float(np.sum(y_true == classes[])) n_neg = float(np.sum(y_true == classes[])) thresholds = np.unique(y_score)neg_value, pos_value = classes[], classes[]tpr = np.empty(thresholds.size, dtype=np.float) fpr = np.empty(thresholds.size, dtype=np.float) current_pos_count = current_neg_count = sum_pos = sum_neg = idx = signal = np.c_[y_score, y_true]sorted_signal = signal[signal[:, ].argsort(), :][::-]last_score = sorted_signal[][]for score, value in sorted_signal:if score == last_score:if value == pos_value:current_pos_count += else:current_neg_count += else:tpr[idx] = (sum_pos + current_pos_count) / n_posfpr[idx] = (sum_neg + current_neg_count) / n_negsum_pos += current_pos_countsum_neg += current_neg_countcurrent_pos_count = if value == pos_value else current_neg_count = if value == neg_value else idx += last_score = scoreelse:tpr[-] = (sum_pos + current_pos_count) / n_posfpr[-] = (sum_neg + current_neg_count) / n_negif fpr.shape[] == :fpr = np.array([, fpr[], fpr[]])tpr = np.array([, tpr[], tpr[]])elif fpr.shape[] == :fpr = np.array([, fpr[], ])tpr = np.array([, tpr[], ])return fpr, tpr, thresholds", "docstring": "compute Receiver operating characteristic (ROC)\n\n Note: this implementation is restricted to the binary classification task.\n\n Parameters\n ----------\n\n y_true : array, shape = [n_samples]\n true binary labels\n\n y_score : array, shape = [n_samples]\n target scores, can either be probability estimates of\n the positive class, confidence values, or binary decisions.\n\n Returns\n -------\n fpr : array, shape = [>2]\n False Positive Rates\n\n tpr : array, shape = [>2]\n True Positive Rates\n\n thresholds : array, shape = [>2]\n Thresholds on y_score used to compute fpr and tpr\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import metrics\n >>> y = np.array([1, 1, 2, 2])\n >>> scores = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores)\n >>> fpr\n array([ 0. , 0.5, 0.5, 1. ])\n\n References\n ----------\n http://en.wikipedia.org/wiki/Receiver_operating_characteristic", "id": "f17609:m0"} {"signature": "def AreaUnderCurve(x, y):", "body": "if x.shape[] != y.shape[]:raise ValueError(''''''% (x.shape, y.shape))if x.shape[] < :raise ValueError('''' % x.shape)order = np.argsort(x)x = x[order]y = y[order]h = np.diff(x)area = np.sum(h * (y[:] + y[:-])) / return area", "docstring": "Compute Area Under the Curve (AUC) using the trapezoidal rule\n\n Parameters\n ----------\n x : array, shape = [n]\n x coordinates\n\n y : array, shape = [n]\n y coordinates\n\n Returns\n -------\n auc : float\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import metrics\n >>> y = np.array([1, 1, 2, 2])\n >>> pred = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)\n >>> metrics.auc(fpr, tpr)\n 0.75", "id": "f17609:m1"} {"signature": "def cross_list(*sequences):", "body": "result = [[ ]]for seq in sequences:result = [sublist+[item] for sublist in result for item in seq]return result", "docstring": "From: http://book.opensourceproject.org.cn/lamp/python/pythoncook2/opensource/0596007973/pythoncook2-chp-19-sect-9.html", "id": "f17610:m0"} {"signature": "def cross(*sequences):", "body": "wheels = map(iter, sequences)digits = [it.next( ) for it in wheels]while True:yield tuple(digits)for i in range(len(digits)-, -, -):try:digits[i] = wheels[i].next( )breakexcept StopIteration:wheels[i] = iter(sequences[i])digits[i] = wheels[i].next( )else:break", "docstring": "From: http://book.opensourceproject.org.cn/lamp/python/pythoncook2/opensource/0596007973/pythoncook2-chp-19-sect-9.html", "id": "f17610:m1"} {"signature": "def dcross(**keywords):", "body": "keys = keywords.keys()sequences = [keywords[key] for key in keys]wheels = map(iter, sequences)digits = [it.next( ) for it in wheels]while True:yield dict(zip(keys, digits))for i in range(len(digits)-, -, -):try:digits[i] = wheels[i].next( )breakexcept StopIteration:wheels[i] = iter(sequences[i])digits[i] = wheels[i].next( )else:break", "docstring": "Similar to cross(), but generates output dictionaries instead of tuples.", "id": "f17610:m2"} {"signature": "def __init__(self, windowSize, existingHistoricalValues=None):", "body": "if not isinstance(windowSize, numbers.Integral):raise TypeError(\"\")if windowSize <= :raise ValueError(\"\")self.windowSize = windowSizeif existingHistoricalValues is not None:self.slidingWindow = existingHistoricalValues[len(existingHistoricalValues)-windowSize:]else:self.slidingWindow = []self.total = float(sum(self.slidingWindow))", "docstring": "new instance of MovingAverage, so method .next() can be used\n@param windowSize - length of sliding window\n@param existingHistoricalValues - construct the object with already\n some values in it.", "id": "f17611:c0:m0"} {"signature": "@staticmethoddef compute(slidingWindow, total, newVal, windowSize):", "body": "if len(slidingWindow) == windowSize:total -= slidingWindow.pop()slidingWindow.append(newVal)total += newValreturn float(total) / len(slidingWindow), slidingWindow, total", "docstring": "Routine for computing a moving average.\n\n @param slidingWindow a list of previous values to use in computation that\n will be modified and returned\n @param total the sum of the values in slidingWindow to be used in the\n calculation of the moving average\n @param newVal a new number compute the new windowed average\n @param windowSize how many values to use in the moving window\n\n @returns an updated windowed average, the modified input slidingWindow list,\n and the new total sum of the sliding window", "id": "f17611:c0:m1"} {"signature": "def next(self, newValue):", "body": "newAverage, self.slidingWindow, self.total = self.compute(self.slidingWindow, self.total, newValue, self.windowSize)return newAverage", "docstring": "Instance method wrapper around compute.", "id": "f17611:c0:m2"} {"signature": "def getCurrentAvg(self):", "body": "return float(self.total) / len(self.slidingWindow)", "docstring": "get current average", "id": "f17611:c0:m4"} {"signature": "def __setstate__(self, state):", "body": "self.__dict__.update(state)if not hasattr(self, \"\"):self.slidingWindow = []if not hasattr(self, \"\"):self.total = self.slidingWindow = sum(self.slidingWindow)", "docstring": "for loading this object", "id": "f17611:c0:m5"} {"signature": "def processClubAttendance(f, clubs):", "body": "try:line = next(f)while line == '':line = next(f)name = line.split('')[]if name not in clubs:clubs[name] = Club(name)c = clubs[name]c.processAttendance(f) return Trueexcept StopIteration:return False", "docstring": "Process the attendance data of one club\n\n If the club already exists in the list update its data.\n If the club is new create a new Club object and add it to the dict\n\n The next step is to iterate over all the lines and add a record for each line.\n When reaching an empty line it means there are no more records for this club.\n\n Along the way some redundant lines are skipped. When the file ends the f.next()\n call raises a StopIteration exception and that's the sign to return False,\n which indicates to the caller that there are no more clubs to process.", "id": "f17614:m0"} {"signature": "def processClubConsumption(f, clubs):", "body": "try:line = next(f)assert line.endswith('')valid_times = list(range())t = club = NoneclubName = NonelastDate = Nonewhile True:assert t in valid_timesconsumption = for x in range():line = f.next()[:-]fields = line.split('')assert len(fields) == for i, field in enumerate(fields):assert field[] == '' and field[-] == ''fields[i] = field[:-]name = fields[]partialNames = ('', '', '', '')for pn in partialNames:if pn in name:name = pnif name != clubName:clubName = nameclub = clubs[name]tokens = fields[].split()if len(tokens) == :assert consumption == and t == date = tokens[]consumption += float(fields[])club.updateRecord(date, t, consumption)t += t %= except StopIteration:return", "docstring": "Process the consumption a club\n\n - Skip the header line\n - Iterate over lines\n - Read 4 records at a time\n - Parse each line: club, date, time, consumption\n - Get club object from dictionary if needed\n - Aggregate consumption\n - Call club.processConsumption() with data", "id": "f17614:m1"} {"signature": "def _generateModel0(numCategories):", "body": "initProb = numpy.zeros(numCategories)initProb[] = initProb[] = firstOrder = dict()for catIdx in range(numCategories):key = str([catIdx])probs = numpy.ones(numCategories) / numCategoriesif catIdx == or catIdx == :probs.fill()probs[] = firstOrder[key] = probssecondOrder = dict()for firstIdx in range(numCategories):for secondIdx in range(numCategories):key = str([firstIdx, secondIdx])probs = numpy.ones(numCategories) / numCategoriesif key == str([,]):probs.fill()probs[] = probs[] = elif key == str([,]):probs.fill()probs[] = probs[] = secondOrder[key] = probsreturn (initProb, firstOrder, secondOrder, )", "docstring": "Generate the initial, first order, and second order transition\n probabilities for 'model0'. For this model, we generate the following\n set of sequences:\n\n 1-2-3 (4X)\n 1-2-4 (1X)\n 5-2-3 (1X)\n 5-2-4 (4X)\n\n\n Parameters:\n ----------------------------------------------------------------------\n numCategories: Number of categories\n retval: (initProb, firstOrder, secondOrder, seqLen)\n initProb: Initial probability for each category. This is a vector\n of length len(categoryList).\n firstOrder: A dictionary of the 1st order probabilities. The key\n is the 1st element of the sequence, the value is\n the probability of each 2nd element given the first. \n secondOrder: A dictionary of the 2nd order probabilities. The key\n is the first 2 elements of the sequence, the value is\n the probability of each possible 3rd element given the \n first two. \n seqLen: Desired length of each sequence. The 1st element will\n be generated using the initProb, the 2nd element by the\n firstOrder table, and the 3rd and all successive \n elements by the secondOrder table. \n\n\n Here is an example of some return values:\n initProb: [0.7, 0.2, 0.1]\n\n firstOrder: {'[0]': [0.3, 0.3, 0.4],\n '[1]': [0.3, 0.3, 0.4],\n '[2]': [0.3, 0.3, 0.4]}\n\n secondOrder: {'[0,0]': [0.3, 0.3, 0.4],\n '[0,1]': [0.3, 0.3, 0.4],\n '[0,2]': [0.3, 0.3, 0.4],\n '[1,0]': [0.3, 0.3, 0.4],\n '[1,1]': [0.3, 0.3, 0.4],\n '[1,2]': [0.3, 0.3, 0.4],\n '[2,0]': [0.3, 0.3, 0.4],\n '[2,1]': [0.3, 0.3, 0.4],\n '[2,2]': [0.3, 0.3, 0.4]}", "id": "f17616:m0"} {"signature": "def _generateModel1(numCategories):", "body": "initProb = numpy.zeros(numCategories)initProb[] = initProb[] = firstOrder = dict()for catIdx in range(numCategories):key = str([catIdx])probs = numpy.ones(numCategories) / numCategoriesif catIdx == or catIdx == :indices = numpy.array([,,,,])probs.fill()probs[indices] = probs /= probs.sum()firstOrder[key] = probssecondOrder = dict()for firstIdx in range(numCategories):for secondIdx in range(numCategories):key = str([firstIdx, secondIdx])probs = numpy.ones(numCategories) / numCategoriesif key == str([,]):probs.fill()probs[] = elif key == str([,]):probs.fill()probs[] = elif key == str([,]):probs.fill()probs[] = elif key == str([,]):probs.fill()probs[] = elif key == str([,]):probs.fill()probs[] = elif key == str([,]):probs.fill()probs[] = elif key == str([,]):probs.fill()probs[] = elif key == str([,]):probs.fill()probs[] = elif key == str([,]):probs.fill()probs[] = elif key == str([,]):probs.fill()probs[] = secondOrder[key] = probsreturn (initProb, firstOrder, secondOrder, )", "docstring": "Generate the initial, first order, and second order transition\n probabilities for 'model1'. For this model, we generate the following\n set of sequences:\n\n 0-10-15 (1X)\n 0-11-16 (1X)\n 0-12-17 (1X)\n 0-13-18 (1X)\n 0-14-19 (1X)\n\n 1-10-20 (1X)\n 1-11-21 (1X)\n 1-12-22 (1X)\n 1-13-23 (1X)\n 1-14-24 (1X)\n\n\n Parameters:\n ----------------------------------------------------------------------\n numCategories: Number of categories\n retval: (initProb, firstOrder, secondOrder, seqLen)\n initProb: Initial probability for each category. This is a vector\n of length len(categoryList).\n firstOrder: A dictionary of the 1st order probabilities. The key\n is the 1st element of the sequence, the value is\n the probability of each 2nd element given the first. \n secondOrder: A dictionary of the 2nd order probabilities. The key\n is the first 2 elements of the sequence, the value is\n the probability of each possible 3rd element given the \n first two. \n seqLen: Desired length of each sequence. The 1st element will\n be generated using the initProb, the 2nd element by the\n firstOrder table, and the 3rd and all successive \n elements by the secondOrder table. \n\n\n Here is an example of some return values:\n initProb: [0.7, 0.2, 0.1]\n\n firstOrder: {'[0]': [0.3, 0.3, 0.4],\n '[1]': [0.3, 0.3, 0.4],\n '[2]': [0.3, 0.3, 0.4]}\n\n secondOrder: {'[0,0]': [0.3, 0.3, 0.4],\n '[0,1]': [0.3, 0.3, 0.4],\n '[0,2]': [0.3, 0.3, 0.4],\n '[1,0]': [0.3, 0.3, 0.4],\n '[1,1]': [0.3, 0.3, 0.4],\n '[1,2]': [0.3, 0.3, 0.4],\n '[2,0]': [0.3, 0.3, 0.4],\n '[2,1]': [0.3, 0.3, 0.4],\n '[2,2]': [0.3, 0.3, 0.4]}", "id": "f17616:m1"} {"signature": "def _generateModel2(numCategories, alpha=):", "body": "initProb = numpy.ones(numCategories)/numCategoriesdef generatePeakedProbabilities(lastIdx,numCategories=numCategories, alpha=alpha):probs = numpy.random.dirichlet(alpha=[alpha]*numCategories)probs[lastIdx] = probs /= probs.sum()return probs firstOrder = dict()for catIdx in range(numCategories):key = str([catIdx])probs = generatePeakedProbabilities(catIdx) firstOrder[key] = probssecondOrder = dict()for firstIdx in range(numCategories):for secondIdx in range(numCategories):key = str([firstIdx, secondIdx])probs = generatePeakedProbabilities(secondIdx) secondOrder[key] = probsreturn (initProb, firstOrder, secondOrder, None)", "docstring": "Generate the initial, first order, and second order transition\n probabilities for 'model2'. For this model, we generate peaked random \n transitions using dirichlet distributions. \n\n Parameters:\n ----------------------------------------------------------------------\n numCategories: Number of categories\n alpha: Determines the peakedness of the transitions. Low alpha \n values (alpha=0.01) place the entire weight on a single \n transition. Large alpha values (alpha=10) distribute the \n evenly among all transitions. Intermediate values (alpha=0.5)\n give a moderately peaked transitions. \n retval: (initProb, firstOrder, secondOrder, seqLen)\n initProb: Initial probability for each category. This is a vector\n of length len(categoryList).\n firstOrder: A dictionary of the 1st order probabilities. The key\n is the 1st element of the sequence, the value is\n the probability of each 2nd element given the first. \n secondOrder: A dictionary of the 2nd order probabilities. The key\n is the first 2 elements of the sequence, the value is\n the probability of each possible 3rd element given the \n first two. \n seqLen: Desired length of each sequence. The 1st element will\n be generated using the initProb, the 2nd element by the\n firstOrder table, and the 3rd and all successive \n elements by the secondOrder table. None means infinite\n length. \n\n\n Here is an example of some return values for an intermediate alpha value:\n initProb: [0.33, 0.33, 0.33]\n\n firstOrder: {'[0]': [0.2, 0.7, 0.1],\n '[1]': [0.1, 0.1, 0.8],\n '[2]': [0.1, 0.0, 0.9]}\n\n secondOrder: {'[0,0]': [0.1, 0.0, 0.9],\n '[0,1]': [0.0, 0.2, 0.8],\n '[0,2]': [0.1, 0.8, 0.1],\n ...\n '[2,2]': [0.8, 0.2, 0.0]}", "id": "f17616:m2"} {"signature": "def _generateFile(filename, numRecords, categoryList, initProb, firstOrderProb, secondOrderProb, seqLen, numNoise=, resetsEvery=None):", "body": "print(\"\" % (filename))fields = [('', '', ''), ('', '', '')]outFile = FileRecordStream(filename, write=True, fields=fields)initCumProb = initProb.cumsum()firstOrderCumProb = dict()for (key,value) in firstOrderProb.items():firstOrderCumProb[key] = value.cumsum()secondOrderCumProb = dict()for (key,value) in secondOrderProb.items():secondOrderCumProb[key] = value.cumsum()elementsInSeq = []numElementsSinceReset = maxCatIdx = len(categoryList) - for i in range(numRecords):if numElementsSinceReset == :reset = else:reset = rand = numpy.random.rand()if len(elementsInSeq) == :catIdx = numpy.searchsorted(initCumProb, rand)elif len(elementsInSeq) == :catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq)], rand)elif (len(elementsInSeq) >=) and(seqLen is None or len(elementsInSeq) < seqLen-numNoise):catIdx = numpy.searchsorted(secondOrderCumProb[str(elementsInSeq[-:])], rand)else: catIdx = numpy.random.randint(len(categoryList))catIdx = min(maxCatIdx, catIdx)outFile.appendRecord([reset,categoryList[catIdx]]) elementsInSeq.append(catIdx)numElementsSinceReset += if resetsEvery is not None and numElementsSinceReset == resetsEvery:numElementsSinceReset = elementsInSeq = []if seqLen is not None and (len(elementsInSeq) == seqLen+numNoise):elementsInSeq = []outFile.close()", "docstring": "Generate a set of records reflecting a set of probabilities.\n\n Parameters:\n ----------------------------------------------------------------\n filename: name of .csv file to generate\n numRecords: number of records to generate\n categoryList: list of category names\n initProb: Initial probability for each category. This is a vector\n of length len(categoryList).\n firstOrderProb: A dictionary of the 1st order probabilities. The key\n is the 1st element of the sequence, the value is\n the probability of each 2nd element given the first. \n secondOrderProb: A dictionary of the 2nd order probabilities. The key\n is the first 2 elements of the sequence, the value is\n the probability of each possible 3rd element given the \n first two. \n seqLen: Desired length of each sequence. The 1st element will\n be generated using the initProb, the 2nd element by the\n firstOrder table, and the 3rd and all successive \n elements by the secondOrder table. None means infinite\n length. \n numNoise: Number of noise elements to place between each \n sequence. The noise elements are evenly distributed from \n all categories. \n resetsEvery: If not None, generate a reset every N records\n\n\n Here is an example of some parameters:\n\n categoryList: ['cat1', 'cat2', 'cat3']\n\n initProb: [0.7, 0.2, 0.1]\n\n firstOrderProb: {'[0]': [0.3, 0.3, 0.4],\n '[1]': [0.3, 0.3, 0.4],\n '[2]': [0.3, 0.3, 0.4]}\n\n secondOrderProb: {'[0,0]': [0.3, 0.3, 0.4],\n '[0,1]': [0.3, 0.3, 0.4],\n '[0,2]': [0.3, 0.3, 0.4],\n '[1,0]': [0.3, 0.3, 0.4],\n '[1,1]': [0.3, 0.3, 0.4],\n '[1,2]': [0.3, 0.3, 0.4],\n '[2,0]': [0.3, 0.3, 0.4],\n '[2,1]': [0.3, 0.3, 0.4],\n '[2,2]': [0.3, 0.3, 0.4]}", "id": "f17616:m3"} {"signature": "def _generateFile(filename, data):", "body": "print(\"\" % (filename))numRecords, numFields = data.shapefields = [(''%(i+), '', '') for i in range(numFields)]outFile = File(filename, fields)for i in range(numRecords):outFile.write(data[i].tolist())outFile.close()", "docstring": "Parameters:\n----------------------------------------------------------------\nfilename: name of .csv file to generate", "id": "f17617:m4"} {"signature": "def _getTPClass(temporalImp):", "body": "if temporalImp == '':return backtracking_tm.BacktrackingTMelif temporalImp == '':return backtracking_tm_cpp.BacktrackingTMCPPelif temporalImp == '':return backtracking_tm_shim.TMShimelif temporalImp == '':return backtracking_tm_shim.TMCPPShimelif temporalImp == '':return backtracking_tm_shim.MonitoredTMShimelse:raise RuntimeError(\"\"\"\" % (temporalImp))", "docstring": "Return the class corresponding to the given temporalImp string", "id": "f17618:m0"} {"signature": "def _buildArgs(f, self=None, kwargs={}):", "body": "argTuples = getArgumentDescriptions(f)argTuples = argTuples[:] init = TMRegion.__init__ourArgNames = [t[] for t in getArgumentDescriptions(init)]ourArgNames += ['', ]for argTuple in argTuples[:]:if argTuple[] in ourArgNames:argTuples.remove(argTuple)if self:for argTuple in argTuples:argName = argTuple[]if argName in kwargs:argValue = kwargs.pop(argName)else:if len(argTuple) == :raise TypeError(\"\" % argName)argValue = argTuple[]setattr(self, argName, argValue)return argTuples", "docstring": "Get the default arguments from the function and assign as instance vars.\n\nReturn a list of 3-tuples with (name, description, defaultValue) for each\n argument to the function.\n\nAssigns all arguments to the function as instance variables of TMRegion.\nIf the argument was not provided, uses the default value.\n\nPops any values from kwargs that go to the function.", "id": "f17618:m1"} {"signature": "def _getAdditionalSpecs(temporalImp, kwargs={}):", "body": "typeNames = {int: '', float: '', str: '', bool: '', tuple: ''}def getArgType(arg):t = typeNames.get(type(arg), '')count = if t == '' else if t == '':t = typeNames.get(type(arg[]), '')count = len(arg)if t == '':t = ''return (t, count)def getConstraints(arg):t = typeNames.get(type(arg), '')if t == '':return ''elif t == '':return ''else:return ''TemporalClass = _getTPClass(temporalImp)tArgTuples = _buildArgs(TemporalClass.__init__)temporalSpec = {}for argTuple in tArgTuples:d = dict(description=argTuple[],accessMode='',dataType=getArgType(argTuple[])[],count=getArgType(argTuple[])[],constraints=getConstraints(argTuple[]))temporalSpec[argTuple[]] = dtemporalSpec.update(dict(columnCount=dict(description='',accessMode='',dataType='',count=,constraints=''),cellsPerColumn=dict(description='',accessMode='',dataType='',count=,constraints=''),inputWidth=dict(description='',accessMode='',dataType='',count=,constraints=''),predictedSegmentDecrement=dict(description='',accessMode='',dataType='',count=,constraints=''),orColumnOutputs=dict(description=\"\"\"\"\"\",accessMode='',dataType='',count=,constraints=''),cellsSavePath=dict(description=\"\"\"\"\"\",accessMode='',dataType='',count=,constraints=''),temporalImp=dict(description=\"\"\"\"\"\",accessMode='',dataType='',count=,constraints=''),))otherSpec = dict(learningMode=dict(description='',accessMode='',dataType='',count=,defaultValue=True,constraints=''),inferenceMode=dict(description='',accessMode='',dataType='',count=,defaultValue=False,constraints=''),computePredictedActiveCellIndices=dict(description='',accessMode='',dataType='',count=,defaultValue=False,constraints=''),anomalyMode=dict(description='',accessMode='',dataType='',count=,defaultValue=False,constraints=''),topDownMode=dict(description='''',accessMode='',dataType='',count=,defaultValue=False,constraints=''),activeOutputCount=dict(description='',accessMode='',dataType='',count=,constraints=''),storeDenseOutput=dict(description=\"\"\"\"\"\",accessMode='',dataType='',count=,constraints=''),logPathOutput=dict(description='''',accessMode='',dataType='',count=,constraints=''),)return temporalSpec, otherSpec", "docstring": "Build the additional specs in three groups (for the inspector)\n\n Use the type of the default argument to set the Spec type, defaulting\n to 'Byte' for None and complex types\n\n Determines the spatial parameters based on the selected implementation.\n It defaults to TemporalMemory.\n Determines the temporal parameters based on the temporalImp", "id": "f17618:m2"} {"signature": "def _initialize(self):", "body": "for attrName in self._getEphemeralMembersBase():if attrName != \"\":if hasattr(self, attrName):if self._loaded:passelse:print(self.__class__.__name__, \"\" %attrName)if not self._loaded:for attrName in self._getEphemeralMembersBase():if attrName != \"\":assert not hasattr(self, attrName)else:assert hasattr(self, attrName)self._profileObj = Noneself._iterations = self._initEphemerals()self._checkEphemeralMembers()", "docstring": "Initialize all ephemeral data members, and give the derived\nclass the opportunity to do the same by invoking the\nvirtual member _initEphemerals(), which is intended to be\noverridden.", "id": "f17618:c0:m1"} {"signature": "def initialize(self):", "body": "autoArgs = dict((name, getattr(self, name))for name in self._temporalArgNames)if self._tfdr is None:tpClass = _getTPClass(self.temporalImp)if self.temporalImp in ['', '', '','', '','',]:self._tfdr = tpClass(numberOfCols=self.columnCount,cellsPerColumn=self.cellsPerColumn,**autoArgs)else:raise RuntimeError(\"\")", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.initialize`.", "id": "f17618:c0:m2"} {"signature": "def compute(self, inputs, outputs):", "body": "if False and self.learningModeand self._iterations > and self._iterations <= :import hotshotif self._iterations == :print(\"\")stats = hotshot.stats.load(\"\")stats.strip_dirs()stats.sort_stats('', '')stats.print_stats()if self._profileObj is None:print(\"\")if os.path.exists(''):os.remove('')self._profileObj = hotshot.Profile(\"\", , )self._profileObj.runcall(self._compute, *[inputs, outputs])else:self._compute(inputs, outputs)", "docstring": "Run one iteration of :class:`~nupic.regions.tm_region.TMRegion` compute, \nprofiling it if requested.\n\n:param inputs: (dict) mapping region input names to numpy.array values\n:param outputs: (dict) mapping region output names to numpy.arrays that \n should be populated with output values by this method", "id": "f17618:c0:m3"} {"signature": "def _compute(self, inputs, outputs):", "body": "if self._tfdr is None:raise RuntimeError(\"\")self._conditionalBreak()self._iterations += buInputVector = inputs['']resetSignal = Falseif '' in inputs:assert len(inputs['']) == if inputs[''][] != :self._tfdr.reset()self._sequencePos = if self.computePredictedActiveCellIndices:prevPredictedState = self._tfdr.getPredictedState().reshape(-).astype('')if self.anomalyMode:prevPredictedColumns = self._tfdr.topDownCompute().copy().nonzero()[]tpOutput = self._tfdr.compute(buInputVector, self.learningMode, self.inferenceMode)self._sequencePos += if self.orColumnOutputs:tpOutput= tpOutput.reshape(self.columnCount,self.cellsPerColumn).max(axis=)if self._fpLogTPOutput:output = tpOutput.reshape(-)outputNZ = tpOutput.nonzero()[]outStr = \"\".join([\"\" % int(token) for token in outputNZ])print(output.size, outStr, file=self._fpLogTPOutput)outputs[''][:] = tpOutput.flatif self.topDownMode:outputs[''][:] = self._tfdr.topDownCompute().copy()if self.anomalyMode:activeLearnCells = self._tfdr.getLearnActiveStateT()size = activeLearnCells.shape[] * activeLearnCells.shape[]outputs[''][:] = activeLearnCells.reshape(size)activeColumns = buInputVector.nonzero()[]outputs[''][:] = anomaly.computeRawAnomalyScore(activeColumns, prevPredictedColumns)if self.computePredictedActiveCellIndices:activeState = self._tfdr._getActiveState().reshape(-).astype('')activeIndices = numpy.where(activeState != )[]predictedIndices= numpy.where(prevPredictedState != )[]predictedActiveIndices = numpy.intersect1d(activeIndices, predictedIndices)outputs[\"\"].fill()outputs[\"\"][activeIndices] = outputs[\"\"].fill()outputs[\"\"][predictedActiveIndices] = ", "docstring": "Run one iteration of TMRegion's compute", "id": "f17618:c0:m4"} {"signature": "@classmethoddef getBaseSpec(cls):", "body": "spec = dict(description=TMRegion.__doc__,singleNodeOnly=True,inputs=dict(bottomUpIn=dict(description=\"\"\"\"\"\",dataType='',count=,required=True,regionLevel=False,isDefaultInput=True,requireSplitterMap=False),resetIn=dict(description=\"\"\"\"\"\",dataType='',count=,required=False,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),sequenceIdIn=dict(description=\"\",dataType='',count=,required=False,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),),outputs=dict(bottomUpOut=dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=True),topDownOut=dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=False),activeCells=dict(description=\"\",dataType='',count=,regionLevel=True,isDefaultOutput=False),predictedActiveCells=dict(description=\"\",dataType='',count=,regionLevel=True,isDefaultOutput=False),anomalyScore = dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=False),lrnActiveStateT = dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=False),),parameters=dict(breakPdb=dict(description='',dataType='',count=,constraints='',defaultValue=,accessMode=''),breakKomodo=dict(description='',dataType='',count=,constraints='',defaultValue=,accessMode=''),),commands = {})return spec", "docstring": "Doesn't include the spatial, temporal and other parameters\n\n:returns: (dict) the base Spec for TMRegion.", "id": "f17618:c0:m5"} {"signature": "@classmethoddef getSpec(cls):", "body": "spec = cls.getBaseSpec()t, o = _getAdditionalSpecs(temporalImp=gDefaultTemporalImp)spec[''].update(t)spec[''].update(o)return spec", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSpec`.\n\nThe parameters collection is constructed based on the parameters specified\nby the various components (spatialSpec, temporalSpec and otherSpec)", "id": "f17618:c0:m6"} {"signature": "def getAlgorithmInstance(self):", "body": "return self._tfdr", "docstring": ":returns: instance of the underlying \n :class:`~nupic.algorithms.temporal_memory.TemporalMemory` \n algorithm object.", "id": "f17618:c0:m7"} {"signature": "def getParameter(self, parameterName, index=-):", "body": "if parameterName in self._temporalArgNames:return getattr(self._tfdr, parameterName)else:return PyRegion.getParameter(self, parameterName, index)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameter`.\n\nGet the value of a parameter. Most parameters are handled automatically by\n:class:`~nupic.bindings.regions.PyRegion.PyRegion`'s parameter get mechanism. The \nones that need special treatment are explicitly handled here.", "id": "f17618:c0:m8"} {"signature": "def setParameter(self, parameterName, index, parameterValue):", "body": "if parameterName in self._temporalArgNames:setattr(self._tfdr, parameterName, parameterValue)elif parameterName == \"\":self.logPathOutput = parameterValueif self._fpLogTPOutput is not None:self._fpLogTPOutput.close()self._fpLogTPOutput = Noneif parameterValue:self._fpLogTPOutput = open(self.logPathOutput, '')elif hasattr(self, parameterName):setattr(self, parameterName, parameterValue)else:raise Exception('' + parameterName)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.", "id": "f17618:c0:m9"} {"signature": "def resetSequenceStates(self):", "body": "self._tfdr.reset()self._sequencePos = return", "docstring": "Resets the region's sequence states.", "id": "f17618:c0:m10"} {"signature": "def finishLearning(self):", "body": "if self._tfdr is None:raise RuntimeError(\"\")if hasattr(self._tfdr, ''):self.resetSequenceStates()self._tfdr.finishLearning()", "docstring": "Perform an internal optimization step that speeds up inference if we know\nlearning will not be performed anymore. This call may, for example, remove\nall potential inputs to each column.", "id": "f17618:c0:m11"} {"signature": "@staticmethoddef getSchema():", "body": "return TMRegionProto", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`.", "id": "f17618:c0:m12"} {"signature": "def writeToProto(self, proto):", "body": "proto.temporalImp = self.temporalImpproto.columnCount = self.columnCountproto.inputWidth = self.inputWidthproto.cellsPerColumn = self.cellsPerColumnproto.learningMode = self.learningModeproto.inferenceMode = self.inferenceModeproto.anomalyMode = self.anomalyModeproto.topDownMode = self.topDownModeproto.computePredictedActiveCellIndices = (self.computePredictedActiveCellIndices)proto.orColumnOutputs = self.orColumnOutputsif self.temporalImp == \"\":tmProto = proto.init(\"\")elif self.temporalImp == \"\":tmProto = proto.init(\"\")elif self.temporalImp == \"\":tmProto = proto.init(\"\")elif self.temporalImp == \"\":tmProto = proto.init(\"\")else:raise TypeError(\"\".format(self.temporalImp))self._tfdr.write(tmProto)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.\n\nWrite state to proto object.\n\n:param proto: TMRegionProto capnproto object", "id": "f17618:c0:m13"} {"signature": "@classmethoddef readFromProto(cls, proto):", "body": "instance = cls(proto.columnCount, proto.inputWidth, proto.cellsPerColumn)instance.temporalImp = proto.temporalImpinstance.learningMode = proto.learningModeinstance.inferenceMode = proto.inferenceModeinstance.anomalyMode = proto.anomalyModeinstance.topDownMode = proto.topDownModeinstance.computePredictedActiveCellIndices = (proto.computePredictedActiveCellIndices)instance.orColumnOutputs = proto.orColumnOutputsif instance.temporalImp == \"\":tmProto = proto.backtrackingTMelif instance.temporalImp == \"\":tmProto = proto.backtrackingTMCppelif instance.temporalImp == \"\":tmProto = proto.temporalMemoryelif instance.temporalImp == \"\":tmProto = proto.temporalMemoryelse:raise TypeError(\"\".format(instance.temporalImp))instance._tfdr = _getTPClass(proto.temporalImp).read(tmProto)return instance", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`.\n\nRead state from proto object.\n\n:param proto: TMRegionProto capnproto object", "id": "f17618:c0:m14"} {"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()for ephemeralMemberName in self._getEphemeralMembersAll():state.pop(ephemeralMemberName, None)return state", "docstring": "Return serializable state. This function will return a version of the\n__dict__ with all \"ephemeral\" members stripped out. \"Ephemeral\" members\nare defined as those that do not need to be (nor should be) stored\nin any kind of persistent file (e.g., NuPIC network XML file.)", "id": "f17618:c0:m15"} {"signature": "def serializeExtraData(self, filePath):", "body": "if self._tfdr is not None:self._tfdr.saveToFile(filePath)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.serializeExtraData`.", "id": "f17618:c0:m16"} {"signature": "def deSerializeExtraData(self, filePath):", "body": "if self._tfdr is not None:self._tfdr.loadFromFile(filePath)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.deSerializeExtraData`.\n\nThis method is called during network deserialization with an external\nfilename that can be used to bypass pickle for loading large binary states.\n\n:param filePath: (string) absolute file path", "id": "f17618:c0:m17"} {"signature": "def __setstate__(self, state):", "body": "if not hasattr(self, ''):self.storeDenseOutput = Falseif not hasattr(self, ''):self.computePredictedActiveCellIndices = Falseself.__dict__.update(state)self._loaded = Trueself._initialize()", "docstring": "Set the state of ourself from a serialized state.", "id": "f17618:c0:m18"} {"signature": "def _initEphemerals(self):", "body": "self._sequencePos = self._fpLogTPOutput = Noneself.logPathOutput = None", "docstring": "Initialize all ephemerals used by derived classes.", "id": "f17618:c0:m19"} {"signature": "def _getEphemeralMembers(self):", "body": "return ['', '', '',]", "docstring": "Callback that returns a list of all \"ephemeral\" members (i.e., data members\nthat should not and/or cannot be pickled.)", "id": "f17618:c0:m20"} {"signature": "def _getEphemeralMembersBase(self):", "body": "return ['','','',]", "docstring": "Returns list of all ephemeral members.", "id": "f17618:c0:m21"} {"signature": "def _getEphemeralMembersAll(self):", "body": "return self._getEphemeralMembersBase() + self._getEphemeralMembers()", "docstring": "Returns a concatenated list of both the standard base class\nephemeral members, as well as any additional ephemeral members\n(e.g., file handles, etc.).", "id": "f17618:c0:m22"} {"signature": "def getOutputElementCount(self, name):", "body": "if name == '':return self.outputWidthelif name == '':return self.columnCountelif name == '':return self.outputWidthelif name == \"\":return self.outputWidthelif name == \"\":return self.outputWidthelse:raise Exception(\"\")", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.", "id": "f17618:c0:m25"} {"signature": "def getParameterArrayCount(self, name, index):", "body": "p = self.getParameter(name)if (not hasattr(p, '')):raise Exception(\"\" % name)return len(p)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArrayCount`.", "id": "f17618:c0:m26"} {"signature": "def getParameterArray(self, name, index, a):", "body": "p = self.getParameter(name)if (not hasattr(p, '')):raise Exception(\"\" % name)if len(p) > :a[:] = p[:]", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.", "id": "f17618:c0:m27"} {"signature": "@classmethoddef getSpec(cls):", "body": "ns = dict(description=SDRClassifierRegion.__doc__,singleNodeOnly=True,inputs=dict(actValueIn=dict(description=\"\"\"\",dataType=\"\",count=,required=False,regionLevel=False,isDefaultInput=False,requireSplitterMap=False),bucketIdxIn=dict(description=\"\"\"\"\"\",dataType=\"\",count=,required=False,regionLevel=False,isDefaultInput=False,requireSplitterMap=False),categoryIn=dict(description='',dataType='',count=,required=True,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),bottomUpIn=dict(description='',dataType='',count=,required=True,regionLevel=False,isDefaultInput=True,requireSplitterMap=False),predictedActiveCells=dict(description=\"\",dataType='',count=,required=True,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),sequenceIdIn=dict(description=\"\",dataType='',count=,required=False,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),),outputs=dict(categoriesOut=dict(description='',dataType='',count=,regionLevel=True,isDefaultOutput=False,requireSplitterMap=False),actualValues=dict(description='',dataType='',count=,regionLevel=True,isDefaultOutput=False,requireSplitterMap=False),probabilities=dict(description='',dataType='',count=,regionLevel=True,isDefaultOutput=False,requireSplitterMap=False),),parameters=dict(learningMode=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),inferenceMode=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),maxCategoryCount=dict(description='''',dataType='',required=True,count=,constraints='',defaultValue=,accessMode=''),steps=dict(description='''',dataType=\"\",count=,constraints='',defaultValue='',accessMode=''),alpha=dict(description='''''',dataType=\"\",count=,constraints='',defaultValue=,accessMode=''),implementation=dict(description='',accessMode='',dataType='',count=,constraints=''),verbosity=dict(description='''''',dataType='',count=,constraints='',defaultValue=,accessMode=''),),commands=dict())return ns", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.", "id": "f17619:c0:m0"} {"signature": "def initialize(self):", "body": "if self._sdrClassifier is None:self._sdrClassifier = SDRClassifierFactory.create(steps=self.stepsList,alpha=self.alpha,verbosity=self.verbosity,implementation=self.implementation,)", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.initialize`.\n\nIs called once by NuPIC before the first call to compute().\nInitializes self._sdrClassifier if it is not already initialized.", "id": "f17619:c0:m2"} {"signature": "def getAlgorithmInstance(self):", "body": "return self._sdrClassifier", "docstring": ":returns: (:class:`nupic.regions.sdr_classifier_region.SDRClassifierRegion`)", "id": "f17619:c0:m3"} {"signature": "def getParameter(self, name, index=-):", "body": "return PyRegion.getParameter(self, name, index)", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.", "id": "f17619:c0:m4"} {"signature": "def setParameter(self, name, index, value):", "body": "if name == \"\":self.learningMode = bool(int(value))elif name == \"\":self.inferenceMode = bool(int(value))else:return PyRegion.setParameter(self, name, index, value)", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.", "id": "f17619:c0:m5"} {"signature": "@staticmethoddef getSchema():", "body": "return SDRClassifierRegionProto", "docstring": ":returns: the pycapnp proto type that the class uses for serialization.", "id": "f17619:c0:m6"} {"signature": "def writeToProto(self, proto):", "body": "proto.implementation = self.implementationproto.steps = self.stepsproto.alpha = self.alphaproto.verbosity = self.verbosityproto.maxCategoryCount = self.maxCategoryCountproto.learningMode = self.learningModeproto.inferenceMode = self.inferenceModeproto.recordNum = self.recordNumself._sdrClassifier.write(proto.sdrClassifier)", "docstring": "Write state to proto object.\n\n:param proto: SDRClassifierRegionProto capnproto object", "id": "f17619:c0:m7"} {"signature": "@classmethoddef readFromProto(cls, proto):", "body": "instance = cls()instance.implementation = proto.implementationinstance.steps = proto.stepsinstance.stepsList = [int(i) for i in proto.steps.split(\"\")]instance.alpha = proto.alphainstance.verbosity = proto.verbosityinstance.maxCategoryCount = proto.maxCategoryCountinstance._sdrClassifier = SDRClassifierFactory.read(proto)instance.learningMode = proto.learningModeinstance.inferenceMode = proto.inferenceModeinstance.recordNum = proto.recordNumreturn instance", "docstring": "Read state from proto object.\n\n:param proto: SDRClassifierRegionProto capnproto object", "id": "f17619:c0:m8"} {"signature": "def compute(self, inputs, outputs):", "body": "self._computeFlag = TruepatternNZ = inputs[\"\"].nonzero()[]if self.learningMode:categories = [category for category in inputs[\"\"]if category >= ]if len(categories) > :bucketIdxList = []actValueList = []for category in categories:bucketIdxList.append(int(category))if \"\" not in inputs:actValueList.append(int(category))else:actValueList.append(float(inputs[\"\"]))classificationIn = {\"\": bucketIdxList,\"\": actValueList}else:if \"\" not in inputs:raise KeyError(\"\")if \"\" not in inputs:raise KeyError(\"\")classificationIn = {\"\": int(inputs[\"\"]),\"\": float(inputs[\"\"])}else:classificationIn = {\"\": , \"\": }clResults = self._sdrClassifier.compute(recordNum=self.recordNum,patternNZ=patternNZ,classification=classificationIn,learn=self.learningMode,infer=self.inferenceMode)if clResults is not None and len(clResults) > :outputs[''][:len(clResults[\"\"])] =clResults[\"\"]for step in self.stepsList:stepIndex = self.stepsList.index(step)categoryOut = clResults[\"\"][clResults[step].argmax()]outputs[''][stepIndex] = categoryOutstepProbabilities = clResults[step]for categoryIndex in xrange(self.maxCategoryCount):flatIndex = categoryIndex + stepIndex * self.maxCategoryCountif categoryIndex < len(stepProbabilities):outputs[''][flatIndex] =stepProbabilities[categoryIndex]else:outputs[''][flatIndex] = self.recordNum += ", "docstring": "Process one input sample.\nThis method is called by the runtime engine.\n\n:param inputs: (dict) mapping region input names to numpy.array values\n:param outputs: (dict) mapping region output names to numpy.arrays that \n should be populated with output values by this method", "id": "f17619:c0:m9"} {"signature": "def customCompute(self, recordNum, patternNZ, classification):", "body": "if not hasattr(self, \"\"):self._computeFlag = Falseif self._computeFlag:warnings.simplefilter('', DeprecationWarning)warnings.warn(\"\"\"\"\"\"\"\",DeprecationWarning)return self._sdrClassifier.compute(recordNum,patternNZ,classification,self.learningMode,self.inferenceMode)", "docstring": "Just return the inference value from one input sample. The actual\nlearning happens in compute() -- if, and only if learning is enabled --\nwhich is called when you run the network.\n\n.. warning:: This method is deprecated and exists only to maintain backward \n compatibility. This method is deprecated, and will be removed. Use \n :meth:`nupic.engine.Network.run` instead, which will call \n :meth:`~nupic.regions.sdr_classifier_region.compute`.\n\n:param recordNum: (int) Record number of the input sample.\n:param patternNZ: (list) of the active indices from the output below\n:param classification: (dict) of the classification information:\n\n * ``bucketIdx``: index of the encoder bucket\n * ``actValue``: actual value going into the encoder\n\n:returns: (dict) containing inference results, one entry for each step in\n ``self.steps``. The key is the number of steps, the value is an\n array containing the relative likelihood for each ``bucketIdx``\n starting from 0.\n\n For example:\n\n :: \n\n {'actualValues': [0.0, 1.0, 2.0, 3.0]\n 1 : [0.1, 0.3, 0.2, 0.7]\n 4 : [0.2, 0.4, 0.3, 0.5]}", "id": "f17619:c0:m10"} {"signature": "def getOutputElementCount(self, outputName):", "body": "if outputName == \"\":return len(self.stepsList)elif outputName == \"\":return len(self.stepsList) * self.maxCategoryCountelif outputName == \"\":return self.maxCategoryCountelse:raise ValueError(\"\".format(outputName))", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.", "id": "f17619:c0:m11"} {"signature": "@classmethoddef getSpec(cls):", "body": "ns = dict(description=KNNClassifierRegion.__doc__,singleNodeOnly=True,inputs=dict(categoryIn=dict(description='''',dataType='',count=,required=True,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),bottomUpIn=dict(description='',dataType='',count=,required=True,regionLevel=False,isDefaultInput=True,requireSplitterMap=False),partitionIn=dict(description='',dataType='',count=,required=True,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),auxDataIn=dict(description='',dataType='',count=,required=False,regionLevel=True,isDefaultInput=False,requireSplitterMap=False)),outputs=dict(categoriesOut=dict(description='''''''',dataType='',count=,regionLevel=True,isDefaultOutput=True),bestPrototypeIndices=dict(description='''''',dataType='',count=,regionLevel=True,isDefaultOutput=False),categoryProbabilitiesOut=dict(description='''''''',dataType='',count=,regionLevel=True,isDefaultOutput=True),),parameters=dict(learningMode=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),inferenceMode=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),acceptanceProbability=dict(description='''''''''',dataType='',count=,constraints='',defaultValue=,accessMode=''), confusion=dict(description='''''',dataType='',count=,constraints='',defaultValue=None,accessMode=''),activeOutputCount=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),categoryCount=dict(description='''',dataType='',count=,constraints='',defaultValue=None,accessMode=''),patternCount=dict(description='',dataType='',count=,constraints='',defaultValue=None,accessMode=''),patternMatrix=dict(description='''',dataType='',count=,constraints='',defaultValue=None,accessMode=''),k=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),maxCategoryCount=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),distanceNorm=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),distanceMethod=dict(description='''''',dataType=\"\",count=,constraints='''',defaultValue='',accessMode=''),outputProbabilitiesByDist=dict(description='''''''',dataType='',count=,constraints='',defaultValue=,accessMode=''),distThreshold=dict(description='''''''''',dataType='',count=,constraints='',defaultValue=,accessMode=''),inputThresh=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),doBinarization=dict(description='',dataType='',count=,constraints='',defaultValue=,accessMode=''),useSparseMemory=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),minSparsity=dict(description=\"\"\"\"\"\"\"\"\"\",dataType='',count=,constraints='',defaultValue=,accessMode=''),sparseThreshold=dict(description='''''',dataType='',count=,constraints='',defaultValue=,accessMode=''),relativeThreshold=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),winnerCount=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),doSphering=dict(description='''''''''''''''',dataType='',count=,constraints='',defaultValue=,accessMode=''),SVDSampleCount=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),SVDDimCount=dict(description='''''''',dataType='',count=,constraints='',defaultValue=-,accessMode=''),fractionOfMax=dict(description='''''',dataType='',count=,constraints='',defaultValue=,accessMode=''),useAuxiliary=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),justUseAuxiliary=dict(description='''',dataType='',count=,constraints='',defaultValue=,accessMode=''),verbosity=dict(description='''''',dataType='',count=,constraints='',defaultValue= ,accessMode=''),keepAllDistances=dict(description='''''''''',dataType='',count=,constraints='',defaultValue=None,accessMode=''),replaceDuplicates=dict(description='''''''',dataType='',count=,constraints='',defaultValue=None,accessMode=''),cellsPerCol=dict(description='''''''''',dataType='',count=,constraints='',defaultValue=,accessMode=''),maxStoredPatterns=dict(description='''''''''',dataType='',count=,constraints='',defaultValue=-,accessMode=''),),commands=dict())return ns", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.", "id": "f17620:c0:m0"} {"signature": "def _getEphemeralAttributes(self):", "body": "return ['', '', '','']", "docstring": "List of attributes to not save with serialized state.", "id": "f17620:c0:m2"} {"signature": "def _initEphemerals(self):", "body": "self._firstComputeCall = Trueself._accuracy = Noneself._protoScores = Noneself._categoryDistances = Noneself._knn = knn_classifier.KNNClassifier(**self.knnParams)for x in ('', '', '','', ''):if not hasattr(self, x):setattr(self, x, None)", "docstring": "Initialize attributes that are not saved with the checkpoint.", "id": "f17620:c0:m3"} {"signature": "def __setstate__(self, state):", "body": "if '' not in state:self.__dict__.update(state)elif state[''] == :if \"\" in state:state.pop(\"\")knnState = state['']del state['']self.__dict__.update(state)self._initEphemerals()self._knn.__setstate__(knnState)else:raise RuntimeError(\"\")self.version = KNNClassifierRegion.__VERSION__", "docstring": "Set state from serialized state.", "id": "f17620:c0:m4"} {"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()state[''] = self._knn.__getstate__()del state['']for field in self._getEphemeralAttributes():del state[field]return state", "docstring": "Get serializable state.", "id": "f17620:c0:m5"} {"signature": "def getAlgorithmInstance(self):", "body": "return self._knn", "docstring": ":returns: (:class:`~nupic.algorithms.knn_classifier.KNNClassifier`)", "id": "f17620:c0:m12"} {"signature": "def getParameter(self, name, index=-):", "body": "if name == \"\":return self._knn._numPatternselif name == \"\":return self._getPatternMatrix()elif name == \"\":return self._knn.kelif name == \"\":return self._knn.distanceNormelif name == \"\":return self._knn.distanceMethodelif name == \"\":return self._knn.distThresholdelif name == \"\":return self._knn.binarizationThresholdelif name == \"\":return self._knn.doBinarizationelif name == \"\":return self._knn.useSparseMemoryelif name == \"\":return self._knn.sparseThresholdelif name == \"\":return self._knn.numWinnerselif name == \"\":return self._knn.relativeThresholdelif name == \"\":v = self._knn.numSVDSamplesreturn v if v is not None else elif name == \"\":v = self._knn.numSVDDimsreturn v if v is not None else elif name == \"\":v = self._knn.fractionOfMaxreturn v if v is not None else elif name == \"\":return self._useAuxiliaryelif name == \"\":return self._justUseAuxiliaryelif name == \"\":return self._doSpheringelif name == \"\":return self._knn.cellsPerColelif name == \"\":return self.maxStoredPatternselif name == '':return self._knn._categoryRecencyListelse:return PyRegion.getParameter(self, name, index)", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.", "id": "f17620:c0:m13"} {"signature": "def setParameter(self, name, index, value):", "body": "if name == \"\":self.learningMode = bool(int(value))self._epoch = elif name == \"\":self._epoch = if int(value) and not self.inferenceMode:self._finishLearning()self.inferenceMode = bool(int(value))elif name == \"\":self._knn.distanceNorm = valueelif name == \"\":self._knn.distanceMethod = valueelif name == \"\":self.keepAllDistances = bool(value)if not self.keepAllDistances:if self._protoScores is not None and self._protoScores.shape[] > :self._protoScores = self._protoScores[-,:]if self._protoScores is not None:self._protoScoreCount = else:self._protoScoreCount = elif name == \"\":self.verbosity = valueself._knn.verbosity = valueelse:return PyRegion.setParameter(self, name, index, value)", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.", "id": "f17620:c0:m14"} {"signature": "def reset(self):", "body": "self.confusion = numpy.zeros((, ))", "docstring": "Resets confusion matrix.", "id": "f17620:c0:m15"} {"signature": "def enableTap(self, tapPath):", "body": "self._tapFileIn = open(tapPath + '', '')self._tapFileOut = open(tapPath + '', '')", "docstring": "Begin writing output tap files.\n\n:param tapPath: (string) base name of the output tap files to write.", "id": "f17620:c0:m17"} {"signature": "def disableTap(self):", "body": "if self._tapFileIn is not None:self._tapFileIn.close()self._tapFileIn = Noneif self._tapFileOut is not None:self._tapFileOut.close()self._tapFileOut = None", "docstring": "Disable writing of output tap files.", "id": "f17620:c0:m18"} {"signature": "def handleLogInput(self, inputs):", "body": "if self._tapFileIn is not None:for input in inputs:for k in range(len(input)):print(input[k], end='', file=self._tapFileIn)print(file=self._tapFileIn)", "docstring": "Write inputs to output tap file.\n\n:param inputs: (iter) some inputs.", "id": "f17620:c0:m19"} {"signature": "def handleLogOutput(self, output):", "body": "if self._tapFileOut is not None:for k in range(len(output)):print(output[k], end='', file=self._tapFileOut)print(file=self._tapFileOut)", "docstring": "Write outputs to output tap file.\n\n:param outputs: (iter) some outputs.", "id": "f17620:c0:m20"} {"signature": "def _storeSample(self, inputVector, trueCatIndex, partition=):", "body": "if self._samples is None:self._samples = numpy.zeros((, len(inputVector)), dtype=RealNumpyDType)assert self._labels is Noneself._labels = []self._samples = numpy.concatenate((self._samples, numpy.atleast_2d(inputVector)), axis=)self._labels += [trueCatIndex]if self._partitions is None:self._partitions = []if partition is None:partition = self._partitions += [partition]", "docstring": "Store a training sample and associated category label", "id": "f17620:c0:m21"} {"signature": "def compute(self, inputs, outputs):", "body": "if self._useAuxiliary is None:self._useAuxiliary = Falseif self._firstComputeCall:self._firstComputeCall = Falseif self._useAuxiliary:if self._justUseAuxiliary == True:print(\"\")inputVector = inputs['']if self._useAuxiliary==True:auxVector = inputs['']if auxVector.dtype != numpy.float32:raise RuntimeError(\"\")if self._justUseAuxiliary == True:inputVector = inputs['']else:inputVector = numpy.concatenate([inputVector, inputs['']])self.handleLogInput([inputVector])assert \"\" in inputs, \"\"categories = inputs['']if \"\" in inputs:assert len(inputs[\"\"]) == , \"\"partInput = inputs['']assert len(partInput) == , \"\"partition = int(partInput[])else:partition = Noneif self.inferenceMode:categoriesOut = outputs['']probabilitiesOut = outputs['']if self._doSphering:inputVector = (inputVector + self._normOffset) * self._normScalenPrototypes = if \"\" in outputs:bestPrototypeIndicesOut = outputs[\"\"]nPrototypes = len(bestPrototypeIndicesOut)winner, inference, protoScores, categoryDistances =self._knn.infer(inputVector, partitionId=partition)if not self.keepAllDistances:self._protoScores = protoScoreselse:if self._protoScores is None:self._protoScores = numpy.zeros((, protoScores.shape[]),protoScores.dtype)self._protoScores[,:] = protoScoresself._protoScoreCount = else:if self._protoScoreCount == self._protoScores.shape[]:newProtoScores = numpy.zeros((self._protoScores.shape[] * ,self._protoScores.shape[]),self._protoScores.dtype)newProtoScores[:self._protoScores.shape[],:] = self._protoScoresself._protoScores = newProtoScoresself._protoScores[self._protoScoreCount,:] = protoScoresself._protoScoreCount += self._categoryDistances = categoryDistancesif self.outputProbabilitiesByDist:scores = - self._categoryDistanceselse:scores = inferencetotal = scores.sum()if total == :numScores = len(scores)probabilities = numpy.ones(numScores) / numScoreselse:probabilities = scores / totalnout = min(len(categoriesOut), len(inference))categoriesOut.fill()categoriesOut[:nout] = inference[:nout]probabilitiesOut.fill()probabilitiesOut[:nout] = probabilities[:nout]if self.verbosity >= :print(\"\", categoriesOut[:nout])print(\"\", probabilitiesOut[:nout])if self._scanInfo is not None:self._scanResults = [tuple(inference[:nout])]for category in categories:if category >= :dims = max(int(category)+, len(inference))oldDims = len(self.confusion)if oldDims < dims:confusion = numpy.zeros((dims, dims))confusion[:oldDims, :oldDims] = self.confusionself.confusion = confusionself.confusion[inference.argmax(), int(category)] += if nPrototypes > :bestPrototypeIndicesOut.fill()if categoryDistances is not None:indices = categoryDistances.argsort()nout = min(len(indices), nPrototypes)bestPrototypeIndicesOut[:nout] = indices[:nout]elif nPrototypes == :if (categoryDistances is not None) and len(categoryDistances):bestPrototypeIndicesOut[] = categoryDistances.argmin()else:bestPrototypeIndicesOut[] = self.handleLogOutput(inference)if self.learningMode:if (self.acceptanceProbability < ) and(self._rgen.getReal64() > self.acceptanceProbability):passelse:for category in categories:if category >= :if self._doSphering:self._storeSample(inputVector, category, partition)else:self._knn.learn(inputVector, category, partition)self._epoch += ", "docstring": "Process one input sample. This method is called by the runtime engine.\n\n.. note:: the number of input categories may vary, but the array size is \n fixed to the max number of categories allowed (by a lower region), so \n \"unused\" indices of the input category array are filled with -1s.\n\nTODO: confusion matrix does not support multi-label classification\n\n:param inputs: (dict) mapping region input names to numpy.array values\n:param outputs: (dict) mapping region output names to numpy.arrays that \n should be populated with output values by this method", "id": "f17620:c0:m22"} {"signature": "def getCategoryList(self):", "body": "return self._knn._categoryList", "docstring": "Public API for returning the category list. This is a required API of the \nNearestNeighbor inspector.\n\n:returns: (list) which has one entry per stored prototype. The value of the \n entry is the category # of that stored prototype.", "id": "f17620:c0:m23"} {"signature": "def removeCategory(self, categoryToRemove):", "body": "return self._knn.removeCategory(categoryToRemove)", "docstring": "Removes a category.\n\n:param categoryToRemove: (string) label to remove", "id": "f17620:c0:m24"} {"signature": "def getAllDistances(self):", "body": "if self._protoScores is None:return Nonereturn self._protoScores[:self._protoScoreCount, :]", "docstring": "Like :meth:`~nupic.regions.knn_classifier_region.KNNClassifierRegion.getLatestDistances`, \nbut returns all the scores if more than one set is available. \n:meth:`~nupic.regions.knn_classifier_region.KNNClassifierRegion.getLatestDistances` \nwill always just return one set of scores.\n\n:returns: (list) all the prototype distances from all computes available.", "id": "f17620:c0:m26"} {"signature": "def _finishLearning(self):", "body": "if self._doSphering:self._finishSphering()self._knn.finishLearning()self._accuracy = None", "docstring": "Does nothing. Kept here for API compatibility", "id": "f17620:c0:m28"} {"signature": "def _finishSphering(self):", "body": "self._normOffset = self._samples.mean(axis=) * -self._samples += self._normOffsetvariance = self._samples.var(axis=)variance[numpy.where(variance == )] = self._normScale = / numpy.sqrt(variance)self._samples *= self._normScalefor sampleIndex in range(len(self._labels)):self._knn.learn(self._samples[sampleIndex],self._labels[sampleIndex],self._partitions[sampleIndex])", "docstring": "Compute normalization constants for each feature dimension\nbased on the collected training samples. Then normalize our\ntraining samples using these constants (so that each input\ndimension has mean and variance of zero and one, respectively.)\nThen feed these \"sphered\" training samples into the underlying\nSVM model.", "id": "f17620:c0:m29"} {"signature": "def getOutputElementCount(self, name):", "body": "if name == '':return self.maxCategoryCountelif name == '':return self.maxCategoryCountelif name == '':return self._bestPrototypeIndexCount if self._bestPrototypeIndexCount else else:raise Exception('' + name)", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.", "id": "f17620:c0:m31"} {"signature": "def invariant(self):", "body": "assert isinstance(self.description, str)assert isinstance(self.singleNodeOnly, bool)assert isinstance(self.inputs, dict)assert isinstance(self.outputs, dict)assert isinstance(self.parameters, dict)assert isinstance(self.commands, dict)hasDefaultInput = Falsefor k, v in self.inputs.items():assert isinstance(k, str)assert isinstance(v, InputSpec)v.invariant()if v.isDefaultInput:assert not hasDefaultInputhasDefaultInput = TruehasDefaultOutput = Falsefor k, v in self.outputs.items():assert isinstance(k, str)assert isinstance(v, OutputSpec)v.invariant()if v.isDefaultOutput:assert not hasDefaultOutputhasDefaultOutput = Truefor k, v in self.parameters.items():assert isinstance(k, str)assert isinstance(v, ParameterSpec)v.invariant()for k, v in self.commands.items():assert isinstance(k, str)assert isinstance(v, CommandSpec)v.invariant()", "docstring": "Verify the validity of the node spec object\n\n The type of each sub-object is verified and then\n the validity of each node spec item is verified by calling\n it invariant() method. It also makes sure that there is at most\n one default input and one default output.", "id": "f17621:c4:m1"} {"signature": "def toDict(self):", "body": "def items2dict(items):\"\"\"\"\"\"d = {}for k, v in items.items():d[k] = v.__dict__return dself.invariant()return dict(description=self.description,singleNodeOnly=self.singleNodeOnly,inputs=items2dict(self.inputs),outputs=items2dict(self.outputs),parameters=items2dict(self.parameters),commands=items2dict(self.commands))", "docstring": "Convert the information of the node spec to a plain dict of basic types\n\n The description and singleNodeOnly attributes are placed directly in\n the result dicts. The inputs, outputs, parameters and commands dicts\n contain Spec item objects (InputSpec, OutputSpec, etc). Each such object\n is converted also to a plain dict using the internal items2dict() function\n (see bellow).", "id": "f17621:c4:m2"} {"signature": "def getDefaultSPImp():", "body": "return ''", "docstring": "Return the default spatial pooler implementation for this region.", "id": "f17623:m0"} {"signature": "def getSPClass(spatialImp):", "body": "if spatialImp == '':return PYSpatialPoolerelif spatialImp == '':return CPPSpatialPoolerelse:raise RuntimeError(\"\"\"\" % (spatialImp))", "docstring": "Return the class corresponding to the given spatialImp string", "id": "f17623:m1"} {"signature": "def _buildArgs(f, self=None, kwargs={}):", "body": "argTuples = getArgumentDescriptions(f)argTuples = argTuples[:] init = SPRegion.__init__ourArgNames = [t[] for t in getArgumentDescriptions(init)]ourArgNames += ['',]for argTuple in argTuples[:]:if argTuple[] in ourArgNames:argTuples.remove(argTuple)if self:for argTuple in argTuples:argName = argTuple[]if argName in kwargs:argValue = kwargs.pop(argName)else:if len(argTuple) == :raise TypeError(\"\" % argName)argValue = argTuple[]setattr(self, argName, argValue)return argTuples", "docstring": "Get the default arguments from the function and assign as instance vars.\n\nReturn a list of 3-tuples with (name, description, defaultValue) for each\n argument to the function.\n\nAssigns all arguments to the function as instance variables of SPRegion.\nIf the argument was not provided, uses the default value.\n\nPops any values from kwargs that go to the function.", "id": "f17623:m2"} {"signature": "def _getAdditionalSpecs(spatialImp, kwargs={}):", "body": "typeNames = {int: '', float: '', str: '', bool: '', tuple: ''}def getArgType(arg):t = typeNames.get(type(arg), '')count = if t == '' else if t == '':t = typeNames.get(type(arg[]), '')count = len(arg)if t == '':t = ''return (t, count)def getConstraints(arg):t = typeNames.get(type(arg), '')if t == '':return ''elif t == '':return ''else:return ''SpatialClass = getSPClass(spatialImp)sArgTuples = _buildArgs(SpatialClass.__init__)spatialSpec = {}for argTuple in sArgTuples:d = dict(description=argTuple[],accessMode='',dataType=getArgType(argTuple[])[],count=getArgType(argTuple[])[],constraints=getConstraints(argTuple[]))spatialSpec[argTuple[]] = dspatialSpec.update(dict(columnCount=dict(description='',accessMode='',dataType='',count=,constraints=''),inputWidth=dict(description='',accessMode='',dataType='',count=,constraints=''),spInputNonZeros=dict(description='',accessMode='',dataType='',count=,constraints=''),spOutputNonZeros=dict(description='',accessMode='',dataType='',count=,constraints=''),spOverlapDistribution=dict(description=\"\"\"\"\"\",accessMode='',dataType='',count=,constraints=''),sparseCoincidenceMatrix=dict(description='',accessMode='',dataType='',count=,constraints=''),denseOutput=dict(description='',accessMode='',dataType='',count=,constraints=''),spLearningStatsStr=dict(description=\"\"\"\"\"\",accessMode='',dataType='',count=,constraints=''),spatialImp=dict(description=\"\"\"\"\"\",accessMode='',dataType='',count=,constraints=''),))otherSpec = dict(learningMode=dict(description='',accessMode='',dataType='',count=,constraints=''),inferenceMode=dict(description='',accessMode='',dataType='',count=,constraints=''),anomalyMode=dict(description='',accessMode='',dataType='',count=,constraints=''),topDownMode=dict(description='''',accessMode='',dataType='',count=,constraints=''),activeOutputCount=dict(description='',accessMode='',dataType='',count=,constraints=''),logPathInput=dict(description='''',accessMode='',dataType='',count=,constraints=''),logPathOutput=dict(description='''',accessMode='',dataType='',count=,constraints=''),logPathOutputDense=dict(description='''',accessMode='',dataType='',count=,constraints=''),)return spatialSpec, otherSpec", "docstring": "Build the additional specs in three groups (for the inspector)\n\n Use the type of the default argument to set the Spec type, defaulting\n to 'Byte' for None and complex types\n\n Determines the spatial parameters based on the selected implementation.\n It defaults to SpatialPooler.", "id": "f17623:m3"} {"signature": "def _initializeEphemeralMembers(self):", "body": "for attrName in self._getEphemeralMembersBase():if attrName != \"\":if hasattr(self, attrName):if self._loaded:passelse:print(self.__class__.__name__, \"\" %attrName)if not self._loaded:for attrName in self._getEphemeralMembersBase():if attrName != \"\":assert not hasattr(self, attrName)else:assert hasattr(self, attrName)self._profileObj = Noneself._iterations = self._initEphemerals()self._checkEphemeralMembers()", "docstring": "Initialize all ephemeral data members, and give the derived class the\nopportunity to do the same by invoking the virtual member _initEphemerals(),\nwhich is intended to be overridden.\n\nNOTE: this is used by both __init__ and __setstate__ code paths.", "id": "f17623:c0:m1"} {"signature": "def initialize(self):", "body": "self._spatialPoolerOutput = numpy.zeros(self.columnCount,dtype=GetNTAReal())self._spatialPoolerInput = numpy.zeros((, self.inputWidth),dtype=GetNTAReal())self._allocateSpatialFDR(None)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.initialize`.", "id": "f17623:c0:m2"} {"signature": "def _allocateSpatialFDR(self, rfInput):", "body": "if self._sfdr:returnautoArgs = dict((name, getattr(self, name))for name in self._spatialArgNames)if ( (self.SpatialClass == CPPSpatialPooler) or(self.SpatialClass == PYSpatialPooler) ):autoArgs[''] = [self.columnCount]autoArgs[''] = [self.inputWidth]autoArgs[''] = self.inputWidthself._sfdr = self.SpatialClass(**autoArgs)", "docstring": "Allocate the spatial pooler instance.", "id": "f17623:c0:m3"} {"signature": "def compute(self, inputs, outputs):", "body": "if False and self.learningModeand self._iterations > and self._iterations <= :import hotshotif self._iterations == :print(\"\")stats = hotshot.stats.load(\"\")stats.strip_dirs()stats.sort_stats('', '')stats.print_stats()if self._profileObj is None:print(\"\")if os.path.exists(''):os.remove('')self._profileObj = hotshot.Profile(\"\", , )self._profileObj.runcall(self._compute, *[inputs, outputs])else:self._compute(inputs, outputs)", "docstring": "Run one iteration, profiling it if requested.\n\n:param inputs: (dict) mapping region input names to numpy.array values\n:param outputs: (dict) mapping region output names to numpy.arrays that \n should be populated with output values by this method", "id": "f17623:c0:m4"} {"signature": "def _compute(self, inputs, outputs):", "body": "if self._sfdr is None:raise RuntimeError(\"\")if not self.topDownMode:self._iterations += buInputVector = inputs['']resetSignal = Falseif '' in inputs:assert len(inputs['']) == resetSignal = inputs[''][] != rfOutput = self._doBottomUpCompute(rfInput = buInputVector.reshape((,buInputVector.size)),resetSignal = resetSignal)outputs[''][:] = rfOutput.flatelse:topDownIn = inputs.get('',None)spatialTopDownOut, temporalTopDownOut = self._doTopDownInfer(topDownIn)outputs[''][:] = spatialTopDownOutif temporalTopDownOut is not None:outputs[''][:] = temporalTopDownOutoutputs[''][:] = ", "docstring": "Run one iteration of SPRegion's compute", "id": "f17623:c0:m5"} {"signature": "def _doBottomUpCompute(self, rfInput, resetSignal):", "body": "self._conditionalBreak()self._spatialPoolerInput = rfInput.reshape(-)assert(rfInput.shape[] == )inputVector = numpy.array(rfInput[]).astype('')outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype('')self._sfdr.compute(inputVector, self.learningMode, outputVector)self._spatialPoolerOutput[:] = outputVector[:]if self._fpLogSP:output = self._spatialPoolerOutput.reshape(-)outputNZ = output.nonzero()[]outStr = \"\".join([\"\" % int(token) for token in outputNZ])print(output.size, outStr, file=self._fpLogSP)if self._fpLogSPInput:output = rfInput.reshape(-)outputNZ = output.nonzero()[]outStr = \"\".join([\"\" % int(token) for token in outputNZ])print(output.size, outStr, file=self._fpLogSPInput)return self._spatialPoolerOutput", "docstring": "Do one iteration of inference and/or learning and return the result\n\nParameters:\n--------------------------------------------\nrfInput: Input vector. Shape is: (1, inputVectorLen).\nresetSignal: True if reset is asserted", "id": "f17623:c0:m6"} {"signature": "def _doTopDownInfer(self, topDownInput = None):", "body": "return None, None", "docstring": "Do one iteration of top-down inference.\n\nParameters:\n--------------------------------------------\ntdInput: Top-down input\n\nretval: (spatialTopDownOut, temporalTopDownOut)\n spatialTopDownOut is the top down output computed only from the SP,\n using it's current bottom-up output.\n temporalTopDownOut is the top down output computed from the topDown in\n of the level above us.", "id": "f17623:c0:m7"} {"signature": "@classmethoddef getBaseSpec(cls):", "body": "spec = dict(description=SPRegion.__doc__,singleNodeOnly=True,inputs=dict(bottomUpIn=dict(description=\"\"\"\"\"\",dataType='',count=,required=True,regionLevel=False,isDefaultInput=True,requireSplitterMap=False),resetIn=dict(description=\"\"\"\"\"\",dataType='',count=,required=False,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),topDownIn=dict(description=\"\"\"\"\"\",dataType='',count=,required = False,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),sequenceIdIn=dict(description=\"\",dataType='',count=,required=False,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),),outputs=dict(bottomUpOut=dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=True),topDownOut=dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=False),spatialTopDownOut = dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=False),temporalTopDownOut = dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=False),anomalyScore = dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=False),),parameters=dict(breakPdb=dict(description='',dataType='',count=,constraints='',defaultValue=,accessMode=''),breakKomodo=dict(description='',dataType='',count=,constraints='',defaultValue=,accessMode=''),),)return spec", "docstring": "Doesn't include the spatial, temporal and other parameters\n\n:returns: (dict) The base Spec for SPRegion.", "id": "f17623:c0:m8"} {"signature": "@classmethoddef getSpec(cls):", "body": "spec = cls.getBaseSpec()s, o = _getAdditionalSpecs(spatialImp=getDefaultSPImp())spec[''].update(s)spec[''].update(o)return spec", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSpec`.\n\nThe parameters collection is constructed based on the parameters specified\nby the various components (spatialSpec, temporalSpec and otherSpec)", "id": "f17623:c0:m9"} {"signature": "def getAlgorithmInstance(self):", "body": "return self._sfdr", "docstring": ":returns: (:class:`~nupic.algorithms.spatial_pooler.SpatialPooler`) instance \n of the underlying algorithm object.", "id": "f17623:c0:m10"} {"signature": "def getParameter(self, parameterName, index=-):", "body": "if parameterName == '':return self.columnCountelif parameterName == '':return list(self._spatialPoolerInput.reshape(-))elif parameterName == '':return list(self._spatialPoolerOutput)elif parameterName == '':return len(self._spatialPoolerOutput.nonzero()[])elif parameterName == '':return [len(self._spatialPoolerOutput)] +list(self._spatialPoolerOutput.nonzero()[])elif parameterName == '':import pdb; pdb.set_trace()return [len(self._spatialPoolerInput)] +list(self._spatialPoolerInput.nonzero()[])elif parameterName == '':try:return str(self._sfdr.getLearningStats())except:return str(dict())else:return PyRegion.getParameter(self, parameterName, index)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameter`.\n\nMost parameters are handled automatically by PyRegion's parameter get \nmechanism. The ones that need special treatment are explicitly handled here.", "id": "f17623:c0:m11"} {"signature": "def setParameter(self, parameterName, index, parameterValue):", "body": "if parameterName in self._spatialArgNames:setattr(self._sfdr, parameterName, parameterValue)elif parameterName == \"\":self.logPathInput = parameterValueif self._fpLogSPInput:self._fpLogSPInput.close()self._fpLogSPInput = Noneif parameterValue:self._fpLogSPInput = open(self.logPathInput, '')elif parameterName == \"\":self.logPathOutput = parameterValueif self._fpLogSP:self._fpLogSP.close()self._fpLogSP = Noneif parameterValue:self._fpLogSP = open(self.logPathOutput, '')elif parameterName == \"\":self.logPathOutputDense = parameterValueif self._fpLogSPDense:self._fpLogSPDense.close()self._fpLogSPDense = Noneif parameterValue:self._fpLogSPDense = open(self.logPathOutputDense, '')elif hasattr(self, parameterName):setattr(self, parameterName, parameterValue)else:raise Exception('' + parameterName)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.\n\nSet the value of a Spec parameter. Most parameters are handled\nautomatically by PyRegion's parameter set mechanism. The ones that need\nspecial treatment are explicitly handled here.", "id": "f17623:c0:m12"} {"signature": "@staticmethoddef getSchema():", "body": "return SPRegionProto", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`.", "id": "f17623:c0:m13"} {"signature": "def writeToProto(self, proto):", "body": "proto.spatialImp = self.spatialImpproto.columnCount = self.columnCountproto.inputWidth = self.inputWidthproto.learningMode = if self.learningMode else proto.inferenceMode = if self.inferenceMode else proto.anomalyMode = if self.anomalyMode else proto.topDownMode = if self.topDownMode else self._sfdr.write(proto.spatialPooler)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.\n\nWrite state to proto object.\n\n:param proto: SPRegionProto capnproto object", "id": "f17623:c0:m14"} {"signature": "@classmethoddef readFromProto(cls, proto):", "body": "instance = cls(proto.columnCount, proto.inputWidth)instance.spatialImp = proto.spatialImpinstance.learningMode = proto.learningModeinstance.inferenceMode = proto.inferenceModeinstance.anomalyMode = proto.anomalyModeinstance.topDownMode = proto.topDownModespatialImp = proto.spatialImpinstance._sfdr = getSPClass(spatialImp).read(proto.spatialPooler)return instance", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`.\n\nRead state from proto object.\n\n:param proto: SPRegionProto capnproto object", "id": "f17623:c0:m15"} {"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()for ephemeralMemberName in self._getEphemeralMembersAll():state.pop(ephemeralMemberName, None)return state", "docstring": "Return serializable state. This function will return a version of the\n__dict__ with all \"ephemeral\" members stripped out. \"Ephemeral\" members\nare defined as those that do not need to be (nor should be) stored\nin any kind of persistent file (e.g., NuPIC network XML file.)", "id": "f17623:c0:m16"} {"signature": "def __setstate__(self, state):", "body": "self.__dict__.update(state)self._loaded = Trueif not hasattr(self, \"\"):self.SpatialClass = self._sfdr.__class__self._initializeEphemeralMembers()self._allocateSpatialFDR(None)", "docstring": "Set the state of ourself from a serialized state.", "id": "f17623:c0:m17"} {"signature": "def _initEphemerals(self):", "body": "if hasattr(self, '') and self._sfdr:self._spatialPoolerOutput = numpy.zeros(self.columnCount,dtype=GetNTAReal())else:self._spatialPoolerOutput = None self._fpLogSPInput = Noneself._fpLogSP = Noneself._fpLogSPDense = Noneself.logPathInput = \"\"self.logPathOutput = \"\"self.logPathOutputDense = \"\"", "docstring": "Initialize all ephemerals used by derived classes.", "id": "f17623:c0:m18"} {"signature": "def _getEphemeralMembers(self):", "body": "return ['', '', '','', '', '']", "docstring": "Callback that returns a list of all \"ephemeral\" members (i.e., data members\nthat should not and/or cannot be pickled.)", "id": "f17623:c0:m19"} {"signature": "def _getEphemeralMembersBase(self):", "body": "return ['','','',]", "docstring": "Returns list of all ephemeral members.", "id": "f17623:c0:m20"} {"signature": "def _getEphemeralMembersAll(self):", "body": "return self._getEphemeralMembersBase() + self._getEphemeralMembers()", "docstring": "Returns a concatenated list of both the standard base class\nephemeral members, as well as any additional ephemeral members\n(e.g., file handles, etc.).", "id": "f17623:c0:m21"} {"signature": "def getParameterArrayCount(self, name, index):", "body": "p = self.getParameter(name)if (not hasattr(p, '')):raise Exception(\"\" % name)return len(p)", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArrayCount`.\n\nTODO: as a temporary hack, getParameterArrayCount checks to see if there's a\nvariable, private or not, with that name. If so, it returns the value of the\nvariable.", "id": "f17623:c0:m25"} {"signature": "def getParameterArray(self, name, index, a):", "body": "p = self.getParameter(name)if (not hasattr(p, '')):raise Exception(\"\" % name)if len(p) > :a[:] = p[:]", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.\n\nTODO: as a temporary hack, getParameterArray checks to see if there's a\nvariable, private or not, with that name. If so, it returns the value of the\nvariable.", "id": "f17623:c0:m26"} {"signature": "@classmethoddef getSpec(cls):", "body": "ns = dict(description=KNNAnomalyClassifierRegion.__doc__,singleNodeOnly=True,inputs=dict(spBottomUpOut=dict(description=\"\"\"\"\"\",dataType='',count=,required=True,regionLevel=False,isDefaultInput=True,requireSplitterMap=False),tpTopDownOut=dict(description=\"\"\"\"\"\",dataType='',count=,required=True,regionLevel=False,isDefaultInput=True,requireSplitterMap=False),tpLrnActiveStateT=dict(description=\"\"\"\"\"\",dataType='',count=,required=True,regionLevel=False,isDefaultInput=True,requireSplitterMap=False),sequenceIdIn=dict(description=\"\",dataType='',count=,required=False,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),),outputs=dict(),parameters=dict(trainRecords=dict(description='',dataType='',count=,constraints='',defaultValue=,accessMode=''),anomalyThreshold=dict(description='',dataType='',count=,constraints='',defaultValue=,accessMode=''),cacheSize=dict(description='',dataType='',count=,constraints='',defaultValue=,accessMode=''),classificationVectorType=dict(description=\"\"\"\"\"\",dataType='',count=,constraints='',defaultValue=,accessMode=''),activeColumnCount=dict(description=\"\"\"\"\"\",dataType='',count=,constraints='',defaultValue=,accessMode=''),classificationMaxDist=dict(description=\"\"\"\"\"\",dataType='',count=,constraints='',defaultValue=,accessMode='')),commands=dict(getLabels=dict(description=\"\"\"\"\"\"\"\"\"\"),addLabel=dict(description=\"\"\"\"\"\"),removeLabels=dict(description=\"\"\"\"\"\"\"\"\"\")))ns[''].update(KNNClassifierRegion.getSpec()[''])return ns", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.", "id": "f17624:c0:m0"} {"signature": "def getParameter(self, name, index=-):", "body": "if name == \"\":return self.trainRecordselif name == \"\":return self.anomalyThresholdelif name == \"\":return self._activeColumnCountelif name == \"\":return self._classificationMaxDistelse:return PyRegion.getParameter(self, name, index)", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.", "id": "f17624:c0:m3"} {"signature": "def setParameter(self, name, index, value):", "body": "if name == \"\":if not (isinstance(value, float) or isinstance(value, int)):raise HTMPredictionModelInvalidArgument(\"\"\"\" % (type(value)))if len(self._recordsCache) > and value < self._recordsCache[].ROWID:raise HTMPredictionModelInvalidArgument(\"\"\"\"\"\" % (self._recordsCache[].ROWID))self.trainRecords = valueself._deleteRangeFromKNN(, self._recordsCache[].ROWID)self._classifyStates()elif name == \"\":if not (isinstance(value, float) or isinstance(value, int)):raise HTMPredictionModelInvalidArgument(\"\"\"\" % (type(value)))self.anomalyThreshold = valueself._classifyStates()elif name == \"\":if not (isinstance(value, float) or isinstance(value, int)):raise HTMPredictionModelInvalidArgument(\"\"\"\" % (type(value)))self._classificationMaxDist = valueself._classifyStates()elif name == \"\":self._activeColumnCount = valueelse:return PyRegion.setParameter(self, name, index, value)", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.", "id": "f17624:c0:m4"} {"signature": "def compute(self, inputs, outputs):", "body": "record = self._constructClassificationRecord(inputs)if record.ROWID >= self.getParameter(''):self._classifyState(record)self._recordsCache.append(record)while len(self._recordsCache) > self.cacheSize:self._recordsCache.pop()self.labelResults = record.anomalyLabelself._iteration += ", "docstring": "Process one input sample.\nThis method is called by the runtime engine.", "id": "f17624:c0:m5"} {"signature": "def getLabelResults(self):", "body": "return self.labelResults", "docstring": "Get the labels of the previously computed record.\n\n:returns: (list) of strings representing the classification labels", "id": "f17624:c0:m6"} {"signature": "def _classifyStates(self):", "body": "for state in self._recordsCache:self._classifyState(state)", "docstring": "Reclassifies all internal state", "id": "f17624:c0:m7"} {"signature": "def _classifyState(self, state):", "body": "if state.ROWID < self.getParameter(''):if not state.setByUser:state.anomalyLabel = []self._deleteRecordsFromKNN([state])returnlabel = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABELautoLabel = label + KNNAnomalyClassifierRegion.AUTO_TAGnewCategory = self._recomputeRecordFromKNN(state)labelList = self._categoryToLabelList(newCategory)if state.setByUser:if label in state.anomalyLabel:state.anomalyLabel.remove(label)if autoLabel in state.anomalyLabel:state.anomalyLabel.remove(autoLabel)labelList.extend(state.anomalyLabel)if state.anomalyScore >= self.getParameter(''):labelList.append(label)elif label in labelList:ind = labelList.index(label)labelList[ind] = autoLabellabelList = list(set(labelList))if label in labelList and autoLabel in labelList:labelList.remove(autoLabel)if state.anomalyLabel == labelList:returnstate.anomalyLabel = labelListif state.anomalyLabel == []:self._deleteRecordsFromKNN([state])else:self._addRecordToKNN(state)", "docstring": "Reclassifies given state.", "id": "f17624:c0:m8"} {"signature": "def _constructClassificationRecord(self, inputs):", "body": "allSPColumns = inputs[\"\"]activeSPColumns = allSPColumns.nonzero()[]score = anomaly.computeRawAnomalyScore(activeSPColumns,self._prevPredictedColumns)spSize = len(allSPColumns)allTPCells = inputs['']tpSize = len(inputs[''])classificationVector = numpy.array([])if self.classificationVectorType == :classificationVector = numpy.zeros(tpSize)activeCellMatrix = inputs[\"\"].reshape(tpSize, )activeCellIdx = numpy.where(activeCellMatrix > )[]if activeCellIdx.shape[] > :classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = elif self.classificationVectorType == :classificationVector = numpy.zeros(spSize+spSize)if activeSPColumns.shape[] > :classificationVector[activeSPColumns] = errorColumns = numpy.setdiff1d(self._prevPredictedColumns,activeSPColumns)if errorColumns.shape[] > :errorColumnIndexes = ( numpy.array(errorColumns, dtype=numpy.uint16) +spSize )classificationVector[errorColumnIndexes] = else:raise TypeError(\"\"\"\" % (self.classificationVectorType))numPredictedCols = len(self._prevPredictedColumns)predictedColumns = allTPCells.nonzero()[]self._prevPredictedColumns = copy.deepcopy(predictedColumns)if self._anomalyVectorLength is None:self._anomalyVectorLength = len(classificationVector)result = _CLAClassificationRecord(ROWID=self._iteration, anomalyScore=score,anomalyVector=classificationVector.nonzero()[].tolist(),anomalyLabel=[])return result", "docstring": "Construct a _HTMClassificationRecord based on the state of the model\npassed in through the inputs.\n\nTypes for self.classificationVectorType:\n 1 - TM active cells in learn state\n 2 - SP columns concatenated with error from TM column predictions and SP", "id": "f17624:c0:m9"} {"signature": "def _addRecordToKNN(self, record):", "body": "knn = self._knnclassifier._knnprototype_idx = self._knnclassifier.getParameter('')category = self._labelListToCategoryNumber(record.anomalyLabel)if record.ROWID in prototype_idx:knn.prototypeSetCategory(record.ROWID, category)returnpattern = self._getStateAnomalyVector(record)rowID = record.ROWIDknn.learn(pattern, category, rowID=rowID)", "docstring": "Adds the record to the KNN classifier.", "id": "f17624:c0:m10"} {"signature": "def _deleteRecordsFromKNN(self, recordsToDelete):", "body": "prototype_idx = self._knnclassifier.getParameter('')idsToDelete = ([r.ROWID for r in recordsToDelete ifnot r.setByUser and r.ROWID in prototype_idx])nProtos = self._knnclassifier._knn._numPatternsself._knnclassifier._knn.removeIds(idsToDelete)assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete)", "docstring": "Removes the given records from the classifier.\n\nparameters\n------------\nrecordsToDelete - list of records to delete from the classififier", "id": "f17624:c0:m11"} {"signature": "def _deleteRangeFromKNN(self, start=, end=None):", "body": "prototype_idx = numpy.array(self._knnclassifier.getParameter(''))if end is None:end = prototype_idx.max() + idsIdxToDelete = numpy.logical_and(prototype_idx >= start,prototype_idx < end)idsToDelete = prototype_idx[idsIdxToDelete]nProtos = self._knnclassifier._knn._numPatternsself._knnclassifier._knn.removeIds(idsToDelete.tolist())assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete)", "docstring": "Removes any stored records within the range from start to\nend. Noninclusive of end.\n\nparameters\n------------\nstart - integer representing the ROWID of the start of the deletion range,\nend - integer representing the ROWID of the end of the deletion range,\n if None, it will default to end.", "id": "f17624:c0:m12"} {"signature": "def _recomputeRecordFromKNN(self, record):", "body": "inputs = {\"\": [None],\"\": self._getStateAnomalyVector(record),}outputs = {\"\": numpy.zeros((,)),\"\":numpy.zeros((,)),\"\":numpy.zeros((,))}classifier_indexes = numpy.array(self._knnclassifier.getParameter(''))valid_idx = numpy.where((classifier_indexes >= self.getParameter('')) &(classifier_indexes < record.ROWID))[].tolist()if len(valid_idx) == :return Noneself._knnclassifier.setParameter('', None, True)self._knnclassifier.setParameter('', None, False)self._knnclassifier.compute(inputs, outputs)self._knnclassifier.setParameter('', None, True)classifier_distances = self._knnclassifier.getLatestDistances()valid_distances = classifier_distances[valid_idx]if valid_distances.min() <= self._classificationMaxDist:classifier_indexes_prev = classifier_indexes[valid_idx]rowID = classifier_indexes_prev[valid_distances.argmin()]indexID = numpy.where(classifier_indexes == rowID)[][]category = self._knnclassifier.getCategoryList()[indexID]return categoryreturn None", "docstring": "returns the classified labeling of record", "id": "f17624:c0:m13"} {"signature": "def _labelToCategoryNumber(self, label):", "body": "if label not in self.saved_categories:self.saved_categories.append(label)return pow(, self.saved_categories.index(label))", "docstring": "Since the KNN Classifier stores categories as numbers, we must store each\nlabel as a number. This method converts from a label to a unique number.\nEach label is assigned a unique bit so multiple labels may be assigned to\na single record.", "id": "f17624:c0:m14"} {"signature": "def _labelListToCategoryNumber(self, labelList):", "body": "categoryNumber = for label in labelList:categoryNumber += self._labelToCategoryNumber(label)return categoryNumber", "docstring": "This method takes a list of labels and returns a unique category number.\nThis enables this class to store a list of categories for each point since\nthe KNN classifier only stores a single number category for each record.", "id": "f17624:c0:m15"} {"signature": "def _categoryToLabelList(self, category):", "body": "if category is None:return []labelList = []labelNum = while category > :if category % == :labelList.append(self.saved_categories[labelNum])labelNum += category = category >> return labelList", "docstring": "Converts a category number into a list of labels", "id": "f17624:c0:m16"} {"signature": "def _getStateAnomalyVector(self, state):", "body": "vector = numpy.zeros(self._anomalyVectorLength)vector[state.anomalyVector] = return vector", "docstring": "Returns a state's anomaly vertor converting it from spare to dense", "id": "f17624:c0:m17"} {"signature": "def getLabels(self, start=None, end=None):", "body": "if len(self._recordsCache) == :return {'': False,'': []}try:start = int(start)except Exception:start = try:end = int(end)except Exception:end = self._recordsCache[-].ROWIDif end <= start:raise HTMPredictionModelInvalidRangeError(\"\",debugInfo={'': {'': start,'': end},'': len(self._recordsCache)})results = {'': False,'': []}ROWIDX = numpy.array(self._knnclassifier.getParameter(''))validIdx = numpy.where((ROWIDX >= start) & (ROWIDX < end))[].tolist()categories = self._knnclassifier.getCategoryList()for idx in validIdx:row = dict(ROWID=int(ROWIDX[idx]),labels=self._categoryToLabelList(categories[idx]))results[''].append(row)return results", "docstring": "Get the labels on classified points within range start to end. Not inclusive\nof end.\n\n:returns: (dict) with format:\n\n ::\n\n {\n 'isProcessing': boolean,\n 'recordLabels': list of results\n }\n\n ``isProcessing`` - currently always false as recalculation blocks; used if\n reprocessing of records is still being performed;\n\n Each item in ``recordLabels`` is of format:\n\n ::\n\n {\n 'ROWID': id of the row,\n 'labels': list of strings\n }", "id": "f17624:c0:m18"} {"signature": "def addLabel(self, start, end, labelName):", "body": "if len(self._recordsCache) == :raise HTMPredictionModelInvalidRangeError(\"\"\"\")try:start = int(start)except Exception:start = try:end = int(end)except Exception:end = int(self._recordsCache[-].ROWID)startID = self._recordsCache[].ROWIDclippedStart = max(, start - startID)clippedEnd = max(, min( len( self._recordsCache) , end - startID))if clippedEnd <= clippedStart:raise HTMPredictionModelInvalidRangeError(\"\",debugInfo={'': {'': start,'': end},'': {'': clippedStart,'': clippedEnd},'': {'': startID,'': self._recordsCache[len(self._recordsCache)-].ROWID},'': len(self._recordsCache)})for state in self._recordsCache[clippedStart:clippedEnd]:if labelName not in state.anomalyLabel:state.anomalyLabel.append(labelName)state.setByUser = Trueself._addRecordToKNN(state)assert len(self.saved_categories) > for state in self._recordsCache[clippedEnd:]:self._classifyState(state)", "docstring": "Add the label labelName to each record with record ROWID in range from\n``start`` to ``end``, noninclusive of end.\n\nThis will recalculate all points from end to the last record stored in the\ninternal cache of this classifier.\n\n:param start: (int) start index \n:param end: (int) end index (noninclusive)\n:param labelName: (string) label name", "id": "f17624:c0:m19"} {"signature": "def removeLabels(self, start=None, end=None, labelFilter=None):", "body": "if len(self._recordsCache) == :raise HTMPredictionModelInvalidRangeError(\"\"\"\")try:start = int(start)except Exception:start = try:end = int(end)except Exception:end = self._recordsCache[-].ROWIDstartID = self._recordsCache[].ROWIDclippedStart = if start is None else max(, start - startID)clippedEnd = len(self._recordsCache) if end is None elsemax(, min( len( self._recordsCache) , end - startID))if clippedEnd <= clippedStart:raise HTMPredictionModelInvalidRangeError(\"\"\"\", debugInfo={'': {'': start,'': end},'': {'': clippedStart,'': clippedEnd},'': {'': startID,'': self._recordsCache[len(self._recordsCache)-].ROWID},'': len(self._recordsCache)})recordsToDelete = []for state in self._recordsCache[clippedStart:clippedEnd]:if labelFilter is not None:if labelFilter in state.anomalyLabel:state.anomalyLabel.remove(labelFilter)else:state.anomalyLabel = []state.setByUser = FalserecordsToDelete.append(state)self._deleteRecordsFromKNN(recordsToDelete)self._deleteRangeFromKNN(start, end)for state in self._recordsCache[clippedEnd:]:self._classifyState(state)", "docstring": "Remove labels from each record with record ROWID in range from\n``start`` to ``end``, noninclusive of end. Removes all records if \n``labelFilter`` is None, otherwise only removes the labels equal to \n``labelFilter``.\n\nThis will recalculate all points from end to the last record stored in the\ninternal cache of this classifier.\n\n:param start: (int) start index \n:param end: (int) end index (noninclusive)\n:param labelFilter: (string) label filter", "id": "f17624:c0:m20"} {"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()state[''] = state[''].__getstate__()state.pop('')return state", "docstring": "Return serializable state. This function will return a version of the\n__dict__ with all \"ephemeral\" members stripped out. \"Ephemeral\" members\nare defined as those that do not need to be (nor should be) stored\nin any kind of persistent file (e.g., NuPIC network XML file.)", "id": "f17624:c0:m24"} {"signature": "def __setstate__(self, state):", "body": "if '' not in state or state[''] == :knnclassifierProps = state.pop('')self.__dict__.update(state)self._knnclassifier = KNNClassifierRegion(**self._knnclassifierArgs)self._knnclassifier.__setstate__(knnclassifierProps)self._version = KNNAnomalyClassifierRegion.__VERSION__else:raise Exception(\"\"\"\" % (KNNAnomalyClassifierRegion.__VERSION__))", "docstring": "Set the state of ourself from a serialized state.", "id": "f17624:c0:m25"} {"signature": "def getOutputElementCount(self, name):", "body": "if name == '':return self._maxLabelOutputselse:raise Exception(\"\")", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.", "id": "f17624:c0:m27"} {"signature": "def whois_callers_caller():", "body": "import inspectframeObj = inspect.stack()[][]return inspect.getframeinfo(frameObj)", "docstring": "Returns: Traceback namedtuple for our caller's caller", "id": "f17625:m1"} {"signature": "@abstractmethoddef compute(self, inputs, outputs):", "body": "", "docstring": "Perform the main computation\n\n This method is called in each iteration for each phase the node supports.\n\n Called from the scope of the region's PyRegion.compute() method.\n\n inputs: dict of numpy arrays (one per input)\n outputs: dict of numpy arrays (one per output)", "id": "f17625:c0:m1"} {"signature": "@abstractmethoddef getOutputElementCount(self, name):", "body": "", "docstring": "Return the number of elements in the given output of the region\n\n Called from the scope of the region's PyRegion.getOutputElementCount() method.\n\n name: the name of the output", "id": "f17625:c0:m2"} {"signature": "@abstractmethoddef getName(self):", "body": "", "docstring": "Return the name of the region", "id": "f17625:c0:m3"} {"signature": "def __constructEphemeralInstanceVars(self):", "body": "assert not hasattr(self, '')self.ephemeral = DictObj()self.ephemeral.logPathInput = ''self.ephemeral.logPathOutput = ''self.ephemeral.logPathOutputDense = ''self.ephemeral._fpLogInput = Noneself.ephemeral._fpLogOutput = Noneself.ephemeral._fpLogOutputDense = Nonereturn", "docstring": "Initialize ephemeral instance variables (those that aren't serialized)", "id": "f17625:c1:m1"} {"signature": "def initialize(self):", "body": "self.identityPolicy.initialize(self)_debugOut(self.identityPolicy.getName())", "docstring": "Called by network after all links have been set up", "id": "f17625:c1:m2"} {"signature": "def compute(self, inputs, outputs):", "body": "self.identityPolicy.compute(inputs, outputs)_debugOut((\"\") %(self.identityPolicy.getName(),inputs, outputs))return", "docstring": "Run one iteration of the region's compute.\n\nThe guts of the compute are contained in the _compute() call so that\nwe can profile it if requested.", "id": "f17625:c1:m3"} {"signature": "@classmethoddef getSpec(cls):", "body": "spec = dict(description=\"\",singleNodeOnly=True,inputs=dict(bottomUpIn=dict(description=\"\"\"\"\"\",dataType='',count=,required=False,regionLevel=True,isDefaultInput=True,requireSplitterMap=False),topDownIn=dict(description=\"\"\"\"\"\",dataType='',count=,required = False,regionLevel=True,isDefaultInput=False,requireSplitterMap=False),),outputs=dict(bottomUpOut=dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=True),topDownOut=dict(description=\"\"\"\"\"\",dataType='',count=,regionLevel=True,isDefaultOutput=False),),parameters=dict(logPathInput=dict(description='''',accessMode='',dataType='',count=,constraints=''),logPathOutput=dict(description='''',accessMode='',dataType='',count=,constraints=''),logPathOutputDense=dict(description='''',accessMode='',dataType='',count=,constraints=''),breakPdb=dict(description='',dataType='',count=,constraints='',defaultValue=,accessMode=''),breakKomodo=dict(description='',dataType='',count=,constraints='',defaultValue=,accessMode=''),),commands=dict(setIdentityPolicyInstance=dict(description=\"\" +\"\" +\"\"),getIdentityPolicyInstance=dict(description=\"\" +\"\" +\"\"),))return spec", "docstring": "Return the base Spec for TestRegion.", "id": "f17625:c1:m7"} {"signature": "def getParameter(self, parameterName, index=-):", "body": "assert not (parameterName in self.__dict__ and parameterName in self.ephemeral)if parameterName in self.ephemeral:assert parameterName not in self.__dict__return self.ephemeral[parameterName]else:return super(PyRegion, self).getParameter(parameterName, index)", "docstring": "Get the value of a NodeSpec parameter. Most parameters are handled\nautomatically by PyRegion's parameter get mechanism. The ones that need\nspecial treatment are explicitly handled here.", "id": "f17625:c1:m8"} {"signature": "def setParameter(self, parameterName, index, parameterValue):", "body": "assert not (parameterName in self.__dict__ and parameterName in self.ephemeral)if parameterName in self.ephemeral:if parameterName == \"\":self.ephemeral.logPathInput = parameterValueif self.ephemeral._fpLogInput:self.ephemeral._fpLogInput.close()self.ephemeral._fpLogInput = Noneif parameterValue:self.ephemeral._fpLogInput = open(self.ephemeral.logPathInput, '')elif parameterName == \"\":self.ephemeral.logPathOutput = parameterValueif self.ephemeral._fpLogOutput:self.ephemeral._fpLogOutput.close()self.ephemeral._fpLogOutput = Noneif parameterValue:self.ephemeral._fpLogOutput = open(self.ephemeral.logPathOutput, '')elif parameterName == \"\":self.ephemeral.logPathOutputDense = parameterValueif self.ephemeral._fpLogOutputDense:self.ephemeral._fpLogOutputDense.close()self.ephemeral._fpLogOutputDense = Noneif parameterValue:self.ephemeral._fpLogOutputDense = open(self.ephemeral.logPathOutputDense, '')else:raise Exception('' + parameterName)return", "docstring": "Set the value of a Spec parameter. Most parameters are handled\nautomatically by PyRegion's parameter set mechanism. The ones that need\nspecial treatment are explicitly handled here.", "id": "f17625:c1:m9"} {"signature": "def setIdentityPolicyInstance(self, identityPolicyObj):", "body": "assert not self.identityPolicyassert isinstance(identityPolicyObj, RegionIdentityPolicyBase)self.identityPolicy = identityPolicyObjreturn", "docstring": "TestRegion command that sets identity policy instance. The instance\n MUST be derived from TestRegion's RegionIdentityPolicyBase class.\n\n Users MUST set the identity instance BEFORE running the network\n\n Exception: AssertionError if identity policy instance has already been set\n or if the passed-in instance is not derived from\n RegionIdentityPolicyBase.", "id": "f17625:c1:m10"} {"signature": "def getIdentityPolicyInstance(self):", "body": "assert self.identityPolicyreturn self.identityPolicy", "docstring": "TestRegion command that returns the identity policy instance that was\n associated with this TestRegion instance via setIdentityPolicyInstance().\n\n Returns: a RegionIdentityPolicyBase-based instance that was associated with\n this TestRegion intstance.\n\n Exception: AssertionError if no identity policy instance has been set.", "id": "f17625:c1:m11"} {"signature": "def write(self, proto):", "body": "proto.breakPdb = self.breakPdbproto.breakKomodo = self.breakKomodo", "docstring": "Save the region's state.\n\n The ephemerals and identity policy are excluded from the saved state.\n\n :param proto: an instance of TestRegionProto to serialize", "id": "f17625:c1:m13"} {"signature": "def read(self, proto):", "body": "self.breakPdb = proto.breakPdbself.breakKomodo = proto.breakKomodoself.__constructEphemeralInstanceVars()", "docstring": "Load the state from the given proto instance.\n\n The saved state does not include the identity policy so this must be\n constructed and set after the region is deserialized. This can be done by\n calling 'setIdentityPolicyInstance'.\n\n :param proto: an instance of TestRegionProto to load state from", "id": "f17625:c1:m14"} {"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()state.pop('')return state", "docstring": "Return serializable state. This function will return a version of the\n__dict__ with all \"ephemeral\" members stripped out. \"Ephemeral\" members\nare defined as those that do not need to be (nor should be) stored\nin any kind of persistent file (e.g., NuPIC network XML file.)", "id": "f17625:c1:m15"} {"signature": "def __setstate__(self, state):", "body": "assert '' not in stateself.__dict__.update(state)self.__constructEphemeralInstanceVars()return", "docstring": "Set the state of ourself from a serialized state.", "id": "f17625:c1:m16"} {"signature": "def rewind(self):", "body": "self._iterNum = if self.dataSource is not None:self.dataSource.rewind()", "docstring": "Reset the sensor to beginning of data.", "id": "f17628:c0:m4"} {"signature": "def getNextRecord(self):", "body": "allFiltersHaveEnoughData = Falsewhile not allFiltersHaveEnoughData:data = self.dataSource.getNextRecordDict()if not data:raise StopIteration(\"\")if \"\" not in data:data[\"\"] = if \"\" not in data:data[\"\"] = if \"\" not in data:data[\"\"] = [None]data, allFiltersHaveEnoughData = self.applyFilters(data)self.lastRecord = datareturn data", "docstring": "Get the next record to encode. Includes getting a record from the \n`dataSource` and applying filters. If the filters request more data from the \n`dataSource` continue to get data from the `dataSource` until all filters \nare satisfied. This method is separate from :meth:`.RecordSensor.compute` so that we can \nuse a standalone :class:`.RecordSensor` to get filtered data.", "id": "f17628:c0:m5"} {"signature": "def applyFilters(self, data):", "body": "if self.verbosity > :print(\"\" % data)allFiltersHaveEnoughData = Trueif len(self.preEncodingFilters) > :originalReset = data['']actualReset = originalResetfor f in self.preEncodingFilters:filterHasEnoughData = f.process(data)allFiltersHaveEnoughData = (allFiltersHaveEnoughDataand filterHasEnoughData)actualReset = actualReset or data['']data[''] = originalResetdata[''] = actualResetreturn data, allFiltersHaveEnoughData", "docstring": "Apply pre-encoding filters. These filters may modify or add data. If a \nfilter needs another record (e.g. a delta filter) it will request another \nrecord by returning False and the current record will be skipped (but will \nstill be given to all filters).\n\nWe have to be very careful about resets. A filter may add a reset,\nbut other filters should not see the added reset, each filter sees\nthe original reset value, and we keep track of whether any filter\nadds a reset.\n\n:param data: (dict) The data that will be processed by the filter.\n:returns: (tuple) with the data processed by the filter and a boolean to\n know whether or not the filter needs mode data.", "id": "f17628:c0:m6"} {"signature": "def populateCategoriesOut(self, categories, output):", "body": "if categories[] is None:output[:] = -else:for i, cat in enumerate(categories[:len(output)]):output[i] = catoutput[len(categories):] = -", "docstring": "Populate the output array with the category indices.\n\n.. note:: Non-categories are represented with ``-1``.\n\n:param categories: (list) of category strings\n:param output: (list) category output, will be overwritten", "id": "f17628:c0:m7"} {"signature": "def compute(self, inputs, outputs):", "body": "if not self.topDownMode:data = self.getNextRecord()reset = data[\"\"]sequenceId = data[\"\"]categories = data[\"\"]self.encoder.encodeIntoArray(data, outputs[\"\"])if self.predictedField is not None and self.predictedField != \"\":allEncoders = list(self.encoder.encoders)if self.disabledEncoder is not None:allEncoders.extend(self.disabledEncoder.encoders)encoders = [e for e in allEncodersif e[] == self.predictedField]if len(encoders) == :raise ValueError(\"\"\"\" % self.predictedField)else:encoder = encoders[][]actualValue = data[self.predictedField]outputs[\"\"][:] = encoder.getBucketIndices(actualValue)if isinstance(actualValue, str):outputs[\"\"][:] = encoder.getBucketIndices(actualValue)else:outputs[\"\"][:] = actualValueoutputs[\"\"][:] = self.encoder.getScalars(data)self._outputValues[\"\"] = self.encoder.getEncodedValues(data)encoders = self.encoder.getEncoderList()prevOffset = sourceEncodings = []bitData = outputs[\"\"]for encoder in encoders:nextOffset = prevOffset + encoder.getWidth()sourceEncodings.append(bitData[prevOffset:nextOffset])prevOffset = nextOffsetself._outputValues[''] = sourceEncodingsfor filter in self.postEncodingFilters:filter.process(encoder=self.encoder, data=outputs[''])outputs[''][] = resetoutputs[''][] = sequenceIdself.populateCategoriesOut(categories, outputs[''])if self.verbosity >= :if self._iterNum == :self.encoder.pprintHeader(prefix=\"\")if reset:print(\"\" % sequenceId)if self.verbosity >= :print()if self.verbosity >= :self.encoder.pprint(outputs[\"\"], prefix=\"\" % (self._iterNum))scalarValues = self.encoder.getScalars(data)nz = outputs[\"\"].nonzero()[]print(\"\" % (len(nz)), nz)print(\"\", self.encoder.scalarsToStr(scalarValues))if self.verbosity >= :print(\"\", str(data))if self.verbosity >= :decoded = self.encoder.decode(outputs[\"\"])print(\"\", self.encoder.decodedToStr(decoded))self._iterNum += else:spatialTopDownIn = inputs['']spatialTopDownOut = self.encoder.topDownCompute(spatialTopDownIn)values = [elem.value for elem in spatialTopDownOut]scalars = [elem.scalar for elem in spatialTopDownOut]encodings = [elem.encoding for elem in spatialTopDownOut]self._outputValues[''] = valuesoutputs[''][:] = numpy.array(scalars)self._outputValues[''] = encodingstemporalTopDownIn = inputs['']temporalTopDownOut = self.encoder.topDownCompute(temporalTopDownIn)values = [elem.value for elem in temporalTopDownOut]scalars = [elem.scalar for elem in temporalTopDownOut]encodings = [elem.encoding for elem in temporalTopDownOut]self._outputValues[''] = valuesoutputs[''][:] = numpy.array(scalars)self._outputValues[''] = encodingsassert len(spatialTopDownOut) == len(temporalTopDownOut), (\"\"\"\")", "docstring": "Get a record from the dataSource and encode it.\n\nOverrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.compute`.", "id": "f17628:c0:m8"} {"signature": "def _convertNonNumericData(self, spatialOutput, temporalOutput, output):", "body": "encoders = self.encoder.getEncoderList()types = self.encoder.getDecoderOutputFieldTypes()for i, (encoder, type) in enumerate(zip(encoders, types)):spatialData = spatialOutput[i]temporalData = temporalOutput[i]if type != FieldMetaType.integer and type != FieldMetaType.float:spatialData = encoder.getScalars(spatialData)[]temporalData = encoder.getScalars(temporalData)[]assert isinstance(spatialData, (float, int))assert isinstance(temporalData, (float, int))output[''][i] = spatialDataoutput[''][i] = temporalData", "docstring": "Converts all of the non-numeric fields from spatialOutput and temporalOutput\ninto their scalar equivalents and records them in the output dictionary.\n\n:param spatialOutput: The results of topDownCompute() for the spatial input.\n:param temporalOutput: The results of topDownCompute() for the temporal\n input.\n:param output: The main dictionary of outputs passed to compute(). It is\n expected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that\n are mapped to numpy arrays.", "id": "f17628:c0:m9"} {"signature": "def getOutputValues(self, outputName):", "body": "return self._outputValues[outputName]", "docstring": ".. note:: These are normal Python lists, rather than numpy arrays. This is \n to support lists with mixed scalars and strings, as in the case of \n records with categorical variables.\n\n:returns: (dict) output values.", "id": "f17628:c0:m10"} {"signature": "def getOutputElementCount(self, name):", "body": "if name == \"\":print (\"\"\"\")return elif name == \"\":print (\"\"\"\")return elif name == \"\":if self.encoder is None:raise Exception(\"\"\"\"\"\")return self.encoder.getWidth()elif name == \"\":if self.encoder is None:raise Exception(\"\"\"\"\"\")return len(self.encoder.getDescription())elif name == \"\":return elif name == \"\":return elif name == \"\":return self.numCategorieselif name == '' or name == '':if self.encoder is None:raise Exception(\"\"\"\"\"\")return len(self.encoder.getDescription())else:raise Exception(\"\" % name)", "docstring": "Computes the width of dataOut.\n\nOverrides \n:meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.", "id": "f17628:c0:m11"} {"signature": "def setParameter(self, parameterName, index, parameterValue):", "body": "if parameterName == '':self.topDownMode = parameterValueelif parameterName == '':self.predictedField = parameterValueelse:raise Exception('' + parameterName)", "docstring": "Set the value of a Spec parameter. Most parameters are handled\nautomatically by PyRegion's parameter set mechanism. The ones that need\nspecial treatment are explicitly handled here.", "id": "f17628:c0:m12"} {"signature": "@staticmethoddef getSchema():", "body": "return RecordSensorProto", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSchema`.", "id": "f17628:c0:m13"} {"signature": "def writeToProto(self, proto):", "body": "self.encoder.write(proto.encoder)if self.disabledEncoder is not None:self.disabledEncoder.write(proto.disabledEncoder)proto.topDownMode = int(self.topDownMode)proto.verbosity = self.verbosityproto.numCategories = self.numCategories", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.", "id": "f17628:c0:m14"} {"signature": "@classmethoddef readFromProto(cls, proto):", "body": "instance = cls()instance.encoder = MultiEncoder.read(proto.encoder)if proto.disabledEncoder is not None:instance.disabledEncoder = MultiEncoder.read(proto.disabledEncoder)instance.topDownMode = bool(proto.topDownMode)instance.verbosity = proto.verbosityinstance.numCategories = proto.numCategoriesreturn instance", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.readFromProto`.", "id": "f17628:c0:m15"} {"signature": "def getSensedValue(self):", "body": "return self._sensedValue", "docstring": ":return: sensed value", "id": "f17629:c0:m5"} {"signature": "def setSensedValue(self, value):", "body": "self._sensedValue = value", "docstring": ":param value: will be encoded when this region does a compute.", "id": "f17629:c0:m6"} {"signature": "def retrySQL(timeoutSec=*, logger=None):", "body": "if logger is None:logger = logging.getLogger(__name__)def retryFilter(e, args, kwargs):if isinstance(e, (pymysql.InternalError, pymysql.OperationalError)):if e.args and e.args[] in _ALL_RETRIABLE_ERROR_CODES:return Trueelif isinstance(e, pymysql.Error):if (e.args andinspect.isclass(e.args[]) and issubclass(e.args[], socket_error)):return Truereturn FalseretryExceptions = tuple([pymysql.InternalError,pymysql.OperationalError,pymysql.Error,])return make_retry_decorator(timeoutSec=timeoutSec, initialRetryDelaySec=, maxRetryDelaySec=,retryExceptions=retryExceptions, retryFilter=retryFilter,logger=logger)", "docstring": "Return a closure suitable for use as a decorator for\n retrying a pymysql DAO function on certain failures that warrant retries (\n e.g., RDS/MySQL server down temporarily, transaction deadlock, etc.).\n We share this function across multiple scripts (e.g., ClientJobsDAO,\n StreamMgr) for consitent behavior.\n\n .. note:: Please ensure that the operation being retried is idempotent.\n\n .. note:: logging must be initialized *before* any loggers are created, else\n there will be no output; see nupic.support.initLogging()\n\n Usage Example:\n\n .. code-block:: python\n\n @retrySQL()\n def jobInfo(self, jobID):\n ...\n\n :param timeoutSec: How many seconds from time of initial call to stop retrying\n (floating point)\n :param logger: User-supplied logger instance.", "id": "f17630:m0"} {"signature": "def cPrint(self, level, message, *args, **kw):", "body": "if level > self.consolePrinterVerbosity:returnif len(kw) > :raise KeyError(\"\" % str(list(kw.keys())))newline = kw.get(\"\", True)if len(kw) == and '' not in kw:raise KeyError(\"\" % list(kw.keys())[])if len(args) == :if newline:print(message)else:print(message, end='')else:if newline:print(message % args)else:print(message % args, end='')", "docstring": "Print a message to the console.\n\n Prints only if level <= self.consolePrinterVerbosity\n Printing with level 0 is equivalent to using a print statement,\n and should normally be avoided.\n\n :param level: (int) indicating the urgency of the message with\n lower values meaning more urgent (messages at level 0 are the most\n urgent and are always printed)\n\n :param message: (string) possibly with format specifiers\n\n :param args: specifies the values for any format specifiers in message\n\n :param kw: newline is the only keyword argument. True (default) if a newline\n should be printed", "id": "f17631:c0:m1"} {"signature": "def _allow_new_attributes(f):", "body": "def decorated(self, *args, **kw):\"\"\"\"\"\"if not hasattr(self, ''):self.__dict__[''] = else:self._canAddAttributes += assert self._canAddAttributes >= count = self._canAddAttributesf(self, *args, **kw)if hasattr(self, ''):self._canAddAttributes -= else:self._canAddAttributes = count - assert self._canAddAttributes >= if self._canAddAttributes == :del self._canAddAttributesdecorated.__doc__ = f.__doc__decorated.__name__ = f.__name__return decorated", "docstring": "A decorator that maintains the attribute lock state of an object\n\n It coperates with the LockAttributesMetaclass (see bellow) that replaces\n the __setattr__ method with a custom one that checks the _canAddAttributes\n counter and allows setting new attributes only if _canAddAttributes > 0.\n\n New attributes can be set only from methods decorated\n with this decorator (should be only __init__ and __setstate__ normally)\n\n The decorator is reentrant (e.g. if from inside a decorated function another\n decorated function is invoked). Before invoking the target function it\n increments the counter (or sets it to 1). After invoking the target function\n it decrements the counter and if it's 0 it removed the counter.", "id": "f17632:m0"} {"signature": "def _simple_init(self, *args, **kw):", "body": "type(self).__base__.__init__(self, *args, **kw)", "docstring": "trivial init method that just calls base class's __init__()\n\n This method is attached to classes that don't define __init__(). It is needed\n because LockAttributesMetaclass must decorate the __init__() method of\n its target class.", "id": "f17632:m1"} {"signature": "def Enum(*args, **kwargs):", "body": "def getLabel(cls, val):\"\"\"\"\"\"return cls.__labels[val]def validate(cls, val):\"\"\"\"\"\"return val in cls.__valuesdef getValues(cls):\"\"\"\"\"\"return list(cls.__values)def getLabels(cls):\"\"\"\"\"\"return list(cls.__labels.values())def getValue(cls, label):\"\"\"\"\"\"return cls.__labels[label]for arg in list(args)+kwargs.keys():if type(arg) is not str:raise TypeError(\"\".format(arg))if not __isidentifier(arg):raise ValueError(\"\"\"\".format(arg))kwargs.update(zip(args, args))newType = type(\"\", (object,), kwargs)newType.__labels = dict( (v,k) for k,v in kwargs.iteritems())newType.__values = set(newType.__labels.keys())newType.getLabel = functools.partial(getLabel, newType)newType.validate = functools.partial(validate, newType)newType.getValues = functools.partial(getValues, newType)newType.getLabels = functools.partial(getLabels, newType)newType.getValue = functools.partial(getValue, newType)return newType", "docstring": "Utility function for creating enumerations in python\n\nExample Usage:\n >> Color = Enum(\"Red\", \"Green\", \"Blue\", \"Magenta\")\n >> print Color.Red\n >> 0\n >> print Color.Green\n >> 1\n >> print Color.Blue\n >> 2\n >> print Color.Magenta\n >> 3\n >> Color.Violet\n >> 'violet'\n >> Color.getLabel(Color.Red)\n >> 'Red'\n >> Color.getLabel(2)\n >> 'Blue'", "id": "f17633:m1"} {"signature": "def groupby2(*args):", "body": "generatorList = [] if len(args) % == :raise ValueError(\"\")advanceList = []for i in xrange(, len(args), ):listn = args[i]fn = args[i + ]if listn is not None:generatorList.append(groupby(listn, fn))advanceList.append(True) else:generatorList.append(None)advanceList.append(False)n = len(generatorList)nextList = [None] * nwhile True:for i in xrange(n):if advanceList[i]:try:nextList[i] = generatorList[i].next()except StopIteration:nextList[i] = Noneif all(entry is None for entry in nextList):breakminKeyVal = min(nextVal[] for nextVal in nextListif nextVal is not None)retGroups = [minKeyVal]for i in xrange(n):if nextList[i] is not None and nextList[i][] == minKeyVal:retGroups.append(nextList[i][])advanceList[i] = Trueelse:advanceList[i] = FalseretGroups.append(None)yield tuple(retGroups)", "docstring": "Like itertools.groupby, with the following additions:\n\n - Supports multiple sequences. Instead of returning (k, g), each iteration\n returns (k, g0, g1, ...), with one `g` for each input sequence. The value of\n each `g` is either a non-empty iterator or `None`.\n - It treats the value `None` as an empty sequence. So you can make subsequent\n calls to groupby2 on any `g` value.\n\n .. note:: Read up on groupby here:\n https://docs.python.org/dev/library/itertools.html#itertools.groupby\n\n :param args: (list) Parameters alternating between sorted lists and their\n respective key functions. The lists should be sorted with\n respect to their key function.\n\n :returns: (tuple) A n + 1 dimensional tuple, where the first element is the\n key of the iteration, and the other n entries are groups of\n objects that share this key. Each group corresponds to the an\n input sequence. `groupby2` is a generator that returns a tuple\n for every iteration. If an input sequence has no members with\n the current key, None is returned in place of a generator.", "id": "f17634:m0"} {"signature": "def printTestHeader(self):", "body": "print()print(\"\")print(\"\" % (self,))print(\"\" % (datetime.utcnow()))print(\"\")sys.stdout.flush()return", "docstring": "Print out what test we are running", "id": "f17635:c0:m1"} {"signature": "def printBanner(self, msg, *args):", "body": "print()print(\"\")print(msg % args)print(\"\" % (datetime.utcnow(), self,), file=sys.stdout)print(\"\")sys.stdout.flush()return", "docstring": "Print out a banner", "id": "f17635:c0:m2"} {"signature": "def addExtraLogItem(self, item):", "body": "self.__logItems.append(item)return", "docstring": "Add an item to the log items list for the currently running session.\n Our self.myAssertXXXXXX wrappers add the current items to the msg that is\n passed to the unittest's assertXXXXX methods. The extra info will show up\n in test results if the test fails.", "id": "f17635:c0:m3"} {"signature": "def __wrapMsg(self, msg):", "body": "msg = msgif not self.__logItemselse {\"\":msg, \"\":copy.copy(self.__logItems)}msg = str(msg)msg = msg.replace('', '')return msg", "docstring": "Called by our unittest.TestCase.assertXXXXXX overrides to construct a\n message from the given message plus self.__logItems, if any. If\n self.__logItems is non-empty, returns a dictionary containing the given\n message value as the \"msg\" property and self.__logItems as the \"extra\"\n property. If self.__logItems is empy, returns the given msg arg.", "id": "f17635:c0:m5"} {"signature": "def assertEqual(self, first, second, msg=None):", "body": "unittest.TestCase.assertEqual(self, first, second, self.__wrapMsg(msg))return", "docstring": "unittest.TestCase.assertEqual override; adds extra log items to msg", "id": "f17635:c0:m6"} {"signature": "def assertNotEqual(self, first, second, msg=None):", "body": "unittest.TestCase.assertNotEqual(self, first, second, self.__wrapMsg(msg))return", "docstring": "unittest.TestCase.assertNotEqual override; adds extra log items to msg", "id": "f17635:c0:m7"} {"signature": "def assertTrue(self, expr, msg=None):", "body": "unittest.TestCase.assertTrue(self, expr, self.__wrapMsg(msg))return", "docstring": "unittest.TestCase.assertTrue override; adds extra log items to msg", "id": "f17635:c0:m8"} {"signature": "def assertFalse(self, expr, msg=None):", "body": "unittest.TestCase.assertFalse(self, expr, self.__wrapMsg(msg))return", "docstring": "unittest.TestCase.assertFalse override; adds extra log items to msg", "id": "f17635:c0:m9"} {"signature": "def tagTest(tag, comment=None):", "body": "return getattr(pytest.mark, tag)", "docstring": "A decorator for tagging a test class or test method with the given tag\n string\n\n tag: test tag string\n comment: reason for the tag; string; optional\n\n Examples:\n\n @tagTest(\"slowTests\", \"takes a long time to execute\")\n class ClusterTests(TestCase):\n def testSwarmWithAggregation(self):\n pass\n\n def testSwarmWithoutAggregation(self):\n pass\n\n or\n\n class MiscTests(TestCase):\n def testOnePlusOne(self):\n pass\n\n @tagTest(\"slowTests\")\n def testSwarm(self):\n pass", "id": "f17636:m0"} {"signature": "def getNumpyRandomGenerator(seed = None):", "body": "if seed is None:seed = int((time.time()%)*)print(\"\", seed, \"\", end='')callStack = traceback.extract_stack(limit=)print(callStack[][], \"\", callStack[][], \"\", callStack[][])return numpy.random.RandomState(seed)", "docstring": "Return a numpy random number generator with the given seed.\nIf seed is None, set it randomly based on time. Regardless we log\nthe actual seed and stack trace so that test failures are replicable.", "id": "f17637:m0"} {"signature": "def convertPermanences(sourceSP, destSP):", "body": "numColumns = sourceSP.getNumColumns()numInputs = sourceSP.getNumInputs()for i in range(numColumns):potential = numpy.zeros(numInputs).astype(uintType)sourceSP.getPotential(i, potential)destSP.setPotential(i, potential)perm = numpy.zeros(numInputs).astype(realType)sourceSP.getPermanence(i, perm)destSP.setPermanence(i, perm)", "docstring": "Transfer the permanences from source to dest SP's. This is used in test\nroutines to counteract some drift between implementations.\nWe assume the two SP's have identical configurations/parameters.", "id": "f17637:m1"} {"signature": "def getSeed():", "body": "seed = int((time.time()%)*)print(\"\", seed, \"\", end='')callStack = traceback.extract_stack(limit=)print(callStack[][], \"\", callStack[][], \"\", callStack[][])return seed", "docstring": "Generate and log a 32-bit compatible seed value.", "id": "f17637:m2"} {"signature": "def convertSP(pySp, newSeed):", "body": "columnDim = pySp._columnDimensionsinputDim = pySp._inputDimensionsnumInputs = pySp.getNumInputs()numColumns = pySp.getNumColumns()cppSp = CPPSpatialPooler(inputDim, columnDim)cppSp.setPotentialRadius(pySp.getPotentialRadius())cppSp.setPotentialPct(pySp.getPotentialPct())cppSp.setGlobalInhibition(pySp.getGlobalInhibition())numActiveColumnsPerInhArea = pySp.getNumActiveColumnsPerInhArea()localAreaDensity = pySp.getLocalAreaDensity()if (numActiveColumnsPerInhArea > ):cppSp.setNumActiveColumnsPerInhArea(numActiveColumnsPerInhArea)else:cppSp.setLocalAreaDensity(localAreaDensity)cppSp.setStimulusThreshold(pySp.getStimulusThreshold())cppSp.setInhibitionRadius(pySp.getInhibitionRadius())cppSp.setDutyCyclePeriod(pySp.getDutyCyclePeriod())cppSp.setBoostStrength(pySp.getBoostStrength())cppSp.setIterationNum(pySp.getIterationNum())cppSp.setIterationLearnNum(pySp.getIterationLearnNum())cppSp.setSpVerbosity(pySp.getSpVerbosity())cppSp.setUpdatePeriod(pySp.getUpdatePeriod())cppSp.setSynPermTrimThreshold(pySp.getSynPermTrimThreshold())cppSp.setSynPermActiveInc(pySp.getSynPermActiveInc())cppSp.setSynPermInactiveDec(pySp.getSynPermInactiveDec())cppSp.setSynPermBelowStimulusInc(pySp.getSynPermBelowStimulusInc())cppSp.setSynPermConnected(pySp.getSynPermConnected())cppSp.setMinPctOverlapDutyCycles(pySp.getMinPctOverlapDutyCycles())boostFactors = numpy.zeros(numColumns).astype(realType)pySp.getBoostFactors(boostFactors)cppSp.setBoostFactors(boostFactors)overlapDuty = numpy.zeros(numColumns).astype(realType)pySp.getOverlapDutyCycles(overlapDuty)cppSp.setOverlapDutyCycles(overlapDuty)activeDuty = numpy.zeros(numColumns).astype(realType)pySp.getActiveDutyCycles(activeDuty)cppSp.setActiveDutyCycles(activeDuty)minOverlapDuty = numpy.zeros(numColumns).astype(realType)pySp.getMinOverlapDutyCycles(minOverlapDuty)cppSp.setMinOverlapDutyCycles(minOverlapDuty)for i in range(numColumns):potential = numpy.zeros(numInputs).astype(uintType)pySp.getPotential(i, potential)cppSp.setPotential(i, potential)perm = numpy.zeros(numInputs).astype(realType)pySp.getPermanence(i, perm)cppSp.setPermanence(i, perm)pySp._random = NupicRandom(newSeed)cppSp.seed_(newSeed)return cppSp", "docstring": "Given an instance of a python spatial_pooler return an instance of the CPP\nspatial_pooler with identical parameters.", "id": "f17637:m3"} {"signature": "def CreateSP(imp, params):", "body": "if (imp == \"\"):spClass = PySpatialPoolerelif (imp == \"\"):spClass = CPPSpatialPoolerelse:raise RuntimeError(\"\")print(params)sp = spClass(**params)return sp", "docstring": "Helper class for creating an instance of the appropriate spatial pooler using\ngiven parameters.\n\nParameters:\n----------------------------\nimp: Either 'py' or 'cpp' for creating the appropriate instance.\nparams: A dict for overriding constructor parameters. The keys must\n correspond to contructor parameter names.\n\nReturns the SP object.", "id": "f17637:m4"} {"signature": "@abstractmethoddef getTMClass(self):", "body": "", "docstring": "Implement this method to specify the Temporal Memory class.", "id": "f17638:c0:m0"} {"signature": "@abstractmethoddef getPatternMachine(self):", "body": "", "docstring": "Implement this method to provide the pattern machine.", "id": "f17638:c0:m1"} {"signature": "def getDefaultTMParams(self):", "body": "return {}", "docstring": "Override this method to set the default TM params for `self.tm`.", "id": "f17638:c0:m2"} {"signature": "def init(self, overrides=None):", "body": "params = self._computeTMParams(overrides)class MonitoredTemporalMemory(TemporalMemoryMonitorMixin,self.getTMClass()): passself.tm = MonitoredTemporalMemory(**params)", "docstring": "Initialize Temporal Memory, and other member variables.\n\n:param overrides: overrides for default Temporal Memory parameters", "id": "f17638:c0:m4"} {"signature": "def getCallerInfo(depth=):", "body": "f = sys._getframe(depth)method_name = f.f_code.co_namefilename = f.f_code.co_filenamearg_class = Noneargs = inspect.getargvalues(f)if len(args[]) > :arg_name = args[][] arg_class = args[][arg_name].__class__.__name__return (method_name, filename, arg_class)", "docstring": "Utility function to get information about function callers\n\n The information is the tuple (function/method name, filename, class)\n The class will be None if the caller is just a function and not an object\n method.\n\n :param depth: (int) how far back in the callstack to go to extract the caller\n info", "id": "f17640:m0"} {"signature": "def title(s=None, additional='', stream=sys.stdout):", "body": "if s is None:callable_name, file_name, class_name = getCallerInfo()s = callable_nameif class_name is not None:s = class_name + '' + callable_namelines = (s + additional).split('')length = max(len(line) for line in lines)print >> stream, '' * lengthprint >> stream, s + additionalprint >> stream, '' * length", "docstring": "Utility function to display nice titles\n\n It automatically extracts the name of the function/method it is called from\n and you can add additional text. title() will then print the name\n of the function/method and the additional text surrounded by tow lines\n of dashes. If you don't want the name of the function, you can provide\n alternative text (regardless of the additional text)\n\n :param s: (string) text to display, uses the function name and arguments by\n default\n :param additional: (string) extra text to display (not needed if s is not\n None)\n :param stream: (stream) the stream to print to. Ny default goes to standard\n output\n\n Examples:\n\n .. code-block:: python\n\n def foo():\n title()\n\n will display:\n\n .. code-block:: text\n\n ---\n foo\n ---\n\n .. code-block:: python\n\n def foo():\n title(additional='(), this is cool!!!')\n\n will display:\n\n .. code-block:: text\n\n ----------------------\n foo(), this is cool!!!\n ----------------------\n\n .. code-block:: python\n\n def foo():\n title('No function name here!')\n\n will display:\n\n .. code-block:: text\n\n ----------------------\n No function name here!\n ----------------------", "id": "f17640:m1"} {"signature": "def getArgumentDescriptions(f):", "body": "argspec = inspect.getargspec(f)docstring = f.__doc__descriptions = {}if docstring:lines = docstring.split('')i = while i < len(lines):stripped = lines[i].lstrip()if not stripped:i += continueindentLevel = lines[i].index(stripped[])firstWord = stripped.split()[]if firstWord.endswith(''):firstWord = firstWord[:-]if firstWord in argspec.args:argName = firstWordrestOfLine = stripped[len(firstWord)+:].strip()argLines = [restOfLine]i += while i < len(lines):stripped = lines[i].lstrip()if not stripped:breakif lines[i].index(stripped[]) <= indentLevel:breakargLines.append(lines[i].strip())i += descriptions[argName] = ''.join(argLines)else:i += args = []if argspec.defaults:defaultCount = len(argspec.defaults)else:defaultCount = nonDefaultArgCount = len(argspec.args) - defaultCountfor i, argName in enumerate(argspec.args):if i >= nonDefaultArgCount:defaultValue = argspec.defaults[i - nonDefaultArgCount]args.append((argName, descriptions.get(argName, \"\"), defaultValue))else:args.append((argName, descriptions.get(argName, \"\")))return args", "docstring": "Get the arguments, default values, and argument descriptions for a function.\n\nParses the argument descriptions out of the function docstring, using a\nformat something lke this:\n\n::\n\n [junk]\n argument_name: description...\n description...\n description...\n [junk]\n [more arguments]\n\nIt will find an argument as long as the exact argument name starts the line.\nIt will then strip a trailing colon, if present, then strip the rest of the\nline and use it to start the description. It will then strip and append any\nsubsequent lines with a greater indent level than the original argument name.\n\n:param f: (function) to inspect\n:returns: (list of tuples) (``argName``, ``argDescription``, ``defaultValue``)\n If an argument has no default value, the tuple is only two elements long (as\n ``None`` cannot be used, since it could be a default value itself).", "id": "f17640:m2"} {"signature": "def initLogging(verbose=False, console='', consoleLevel=''):", "body": "global gLoggingInitializedif gLoggingInitialized:if verbose:print >> sys.stderr, \"\"returnconsoleStreamMappings = {'' : '','' : '',}consoleLogLevels = ['', '', '', '', '', '','']assert console is None or console in consoleStreamMappings.keys(), ('') % (console,)assert consoleLevel in consoleLogLevels, ('') % (consoleLevel)configFilename = ''configFilePath = resource_filename(\"\", configFilename)configLogDir = os.environ.get('', None)if verbose:print >> sys.stderr, (\"\") % (configFilePath)replacements = dict()def makeKey(name):\"\"\"\"\"\"return \"\" % (name)platform = sys.platform.lower()if platform.startswith(''):import java.langplatform = java.lang.System.getProperty(\"\").lower()if platform.startswith(''):platform = ''if platform.startswith(''):replacements[makeKey('')] = ''elif platform.startswith(''):replacements[makeKey('')] = ''elif platform.startswith(''):replacements[makeKey('')] = ''else:raise RuntimeError(\"\" % (sys.platform,))replacements[makeKey('')] = ''if platform.startswith(''):replacements[makeKey('')] = ''else:replacements[makeKey('')] = ''handlers = list()if configLogDir is not None:logFilePath = _genLoggingFilePath()makeDirectoryFromAbsolutePath(os.path.dirname(logFilePath))replacements[makeKey('')] = repr(logFilePath)handlers.append(replacements[makeKey('')])if console is not None:handlers.append(consoleStreamMappings[console])replacements[makeKey('')] = \"\".join(handlers)replacements[makeKey('')] = consoleLevelcustomConfig = StringIO()loggingFileContents = resource_string(__name__, configFilename)for lineNum, line in enumerate(loggingFileContents.splitlines()):if \"\" in line:for (key, value) in replacements.items():line = line.replace(key, value)if \"\" in line and \"\" not in line:raise RuntimeError((\"\"\"\"\"\") % (line, lineNum, configFilePath))customConfig.write(\"\" % line)customConfig.seek()if python_version()[:] >= '':logging.config.fileConfig(customConfig, disable_existing_loggers=False)else:logging.config.fileConfig(customConfig)gLoggingInitialized = True", "docstring": "Initilize NuPic logging by reading in from the logging configuration file. The\nlogging configuration file is named ``nupic-logging.conf`` and is expected to\nbe in the format defined by the python logging module.\n\nIf the environment variable ``NTA_CONF_PATH`` is defined, then the logging\nconfiguration file is expected to be in the ``NTA_CONF_PATH`` directory. If\n``NTA_CONF_PATH`` is not defined, then it is found in the 'conf/default'\nsubdirectory of the NuPic installation directory (typically\n~/nupic/current/conf/default)\n\nThe logging configuration file can use the environment variable\n``NTA_LOG_DIR`` to set the locations of log files. If this variable is not\ndefined, logging to files will be disabled.\n\n:param console: Defines console output for the default \"root\" logging\n configuration; this may be one of 'stdout', 'stderr', or None;\n Use None to suppress console logging output\n:param consoleLevel:\n Logging-level filter string for console output corresponding to\n logging levels in the logging module; may be one of:\n 'DEBUG', 'INFO', 'WARNING', 'ERROR', or 'CRITICAL'.\n E.g., a value of'WARNING' suppresses DEBUG and INFO level output\n to console, but allows WARNING, ERROR, and CRITICAL", "id": "f17640:m3"} {"signature": "def _genLoggingFilePath():", "body": "appName = os.path.splitext(os.path.basename(sys.argv[]))[] or ''appLogDir = os.path.abspath(os.path.join(os.environ[''],'' % (os.environ[''],),appName))appLogFileName = '' % (appName, long(time.mktime(time.gmtime())), os.getpid())return os.path.join(appLogDir, appLogFileName)", "docstring": "Generate a filepath for the calling app", "id": "f17640:m4"} {"signature": "def aggregationToMonthsSeconds(interval):", "body": "seconds = interval.get('', ) * seconds += interval.get('', ) * seconds += interval.get('', )seconds += interval.get('', ) * seconds += interval.get('', ) * * seconds += interval.get('', ) * * * seconds += interval.get('', ) * * * * months = interval.get('', )months += * interval.get('', )return {'': months, '': seconds}", "docstring": "Return the number of months and seconds from an aggregation dict that\nrepresents a date and time.\n\nInterval is a dict that contain one or more of the following keys: 'years',\n'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds',\n'microseconds'.\n\nFor example:\n\n::\n\n aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) ==\n {'months':12, 'seconds':14400.000042}\n\n:param interval: (dict) The aggregation interval representing a date and time\n:returns: (dict) number of months and seconds in the interval:\n ``{months': XX, 'seconds': XX}``. The seconds is\n a floating point that can represent resolutions down to a\n microsecond.", "id": "f17640:m5"} {"signature": "def aggregationDivide(dividend, divisor):", "body": "dividendMonthSec = aggregationToMonthsSeconds(dividend)divisorMonthSec = aggregationToMonthsSeconds(divisor)if (dividendMonthSec[''] != and divisorMonthSec[''] != )or (dividendMonthSec[''] != and divisorMonthSec[''] != ):raise RuntimeError(\"\"\"\"\"\")if dividendMonthSec[''] > :return float(dividendMonthSec['']) / divisor['']else:return float(dividendMonthSec['']) / divisorMonthSec['']", "docstring": "Return the result from dividing two dicts that represent date and time.\n\nBoth dividend and divisor are dicts that contain one or more of the following\nkeys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',\n'milliseconds', 'microseconds'.\n\nFor example:\n\n::\n\n aggregationDivide({'hours': 4}, {'minutes': 15}) == 16\n\n:param dividend: (dict) The numerator, as a dict representing a date and time\n:param divisor: (dict) the denominator, as a dict representing a date and time\n:returns: (float) number of times divisor goes into dividend", "id": "f17640:m6"} {"signature": "@classmethoddef getString(cls, prop):", "body": "if cls._properties is None:cls._readStdConfigFiles()envValue = os.environ.get(\"\" % (cls.envPropPrefix,prop.replace('', '')), None)if envValue is not None:return envValuereturn cls._properties[prop]", "docstring": "Retrieve the requested property as a string. If property does not exist,\n then KeyError will be raised.\n\n :param prop: (string) name of the property\n :raises: KeyError\n :returns: (string) property value", "id": "f17641:c0:m0"} {"signature": "@classmethoddef getBool(cls, prop):", "body": "value = cls.getInt(prop)if value not in (, ):raise ValueError(\"\" % (value, prop))return bool(value)", "docstring": "Retrieve the requested property and return it as a bool. If property\n does not exist, then KeyError will be raised. If the property value is\n neither 0 nor 1, then ValueError will be raised\n\n :param prop: (string) name of the property\n :raises: KeyError, ValueError\n :returns: (bool) property value", "id": "f17641:c0:m1"} {"signature": "@classmethoddef getInt(cls, prop):", "body": "return int(cls.getString(prop))", "docstring": "Retrieve the requested property and return it as an int. If property\n does not exist, then KeyError will be raised.\n\n :param prop: (string) name of the property\n :returns: (int) property value", "id": "f17641:c0:m2"} {"signature": "@classmethoddef getFloat(cls, prop):", "body": "return float(cls.getString(prop))", "docstring": "Retrieve the requested property and return it as a float. If property\n does not exist, then KeyError will be raised.\n\n :param prop: (string) name of the property\n :returns: (float) property value", "id": "f17641:c0:m3"} {"signature": "@classmethoddef get(cls, prop, default=None):", "body": "try:return cls.getString(prop)except KeyError:return default", "docstring": "Get the value of the given configuration property as string. This\n returns a string which is the property value, or the value of \"default\" arg.\n If the property is not found, use :meth:`getString` instead.\n\n .. note:: it's atypical for our configuration properties to be missing - a\n missing configuration property is usually a very serious error. Because\n of this, it's preferable to use one of the :meth:`getString`,\n :meth:`getInt`, :meth:`getFloat`, etc. variants instead of :meth:`get`.\n Those variants will raise KeyError when an expected property is missing.\n\n :param prop: (string) name of the property\n :param default: default value to return if property does not exist\n :returns: (string) property value, or default if the property does not exist", "id": "f17641:c0:m4"} {"signature": "@classmethoddef set(cls, prop, value):", "body": "if cls._properties is None:cls._readStdConfigFiles()cls._properties[prop] = str(value)", "docstring": "Set the value of the given configuration property.\n\n :param prop: (string) name of the property\n :param value: (object) value to set", "id": "f17641:c0:m5"} {"signature": "@classmethoddef dict(cls):", "body": "if cls._properties is None:cls._readStdConfigFiles()result = dict(cls._properties)keys = os.environ.keys()replaceKeys = filter(lambda x: x.startswith(cls.envPropPrefix),keys)for envKey in replaceKeys:key = envKey[len(cls.envPropPrefix):]key = key.replace('', '')result[key] = os.environ[envKey]return result", "docstring": "Return a dict containing all of the configuration properties\n\n :returns: (dict) containing all configuration properties.", "id": "f17641:c0:m6"} {"signature": "@classmethoddef readConfigFile(cls, filename, path=None):", "body": "properties = cls._readConfigFile(filename, path)if cls._properties is None:cls._properties = dict()for name in properties:if '' in properties[name]:cls._properties[name] = properties[name]['']", "docstring": "Parse the given XML file and store all properties it describes.\n\n :param filename: (string) name of XML file to parse (no path)\n :param path: (string) path of the XML file. If None, then use the standard\n configuration search path.", "id": "f17641:c0:m7"} {"signature": "@classmethoddef _readConfigFile(cls, filename, path=None):", "body": "outputProperties = dict()if path is None:filePath = cls.findConfigFile(filename)else:filePath = os.path.join(path, filename)try:if filePath is not None:try:_getLogger().debug(\"\", filePath)with open(filePath, '') as inp:contents = inp.read()except Exception:raise RuntimeError(\"\" % filePath)else:try:contents = resource_string(\"\", filename)except Exception as resourceException:if filename in [USER_CONFIG, CUSTOM_CONFIG]:contents = ''else:raise resourceExceptionelements = ElementTree.XML(contents)if elements.tag != '':raise RuntimeError(\"\"\"\" % (elements.tag))propertyElements = elements.findall('')for propertyItem in propertyElements:propInfo = dict()propertyAttributes = list(propertyItem)for propertyAttribute in propertyAttributes:propInfo[propertyAttribute.tag] = propertyAttribute.textname = propInfo.get('', None)if '' in propInfo and propInfo[''] is None:value = ''else:value = propInfo.get('', None)if value is None:if '' in propInfo:continueelse:raise RuntimeError(\"\"\"\" % (str(propInfo)))restOfValue = valuevalue = ''while True:pos = restOfValue.find('')if pos == -:value += restOfValuebreakvalue += restOfValue[:pos]varTailPos = restOfValue.find('', pos)if varTailPos == -:raise RuntimeError(\"\"\"\" % (restOfValue))varname = restOfValue[pos+:varTailPos]if varname not in os.environ:raise RuntimeError(\"\"\"\" % (varname))envVarValue = os.environ[varname]value += envVarValuerestOfValue = restOfValue[varTailPos+:]if name is None:raise RuntimeError(\"\"\"\" % (str(propInfo)))propInfo[''] = valueoutputProperties[name] = propInforeturn outputPropertiesexcept Exception:_getLogger().exception(\"\",filePath)raise", "docstring": "Parse the given XML file and return a dict describing the file.\n\n :param filename: (string) name of XML file to parse (no path)\n :param path: (string) path of the XML file. If None, then use the standard\n configuration search path.\n :returns: (dict) with each property as a key and a dict of all the\n property's attributes as value", "id": "f17641:c0:m8"} {"signature": "@classmethoddef clear(cls):", "body": "cls._properties = Nonecls._configPaths = None", "docstring": "Clear out the entire configuration.", "id": "f17641:c0:m9"} {"signature": "@classmethoddef findConfigFile(cls, filename):", "body": "paths = cls.getConfigPaths()for p in paths:testPath = os.path.join(p, filename)if os.path.isfile(testPath):return os.path.join(p, filename)", "docstring": "Search the configuration path (specified via the NTA_CONF_PATH\n environment variable) for the given filename. If found, return the complete\n path to the file.\n\n :param filename: (string) name of file to locate", "id": "f17641:c0:m10"} {"signature": "@classmethoddef getConfigPaths(cls):", "body": "configPaths = []if cls._configPaths is not None:return cls._configPathselse:if '' in os.environ:configVar = os.environ['']configPaths = configVar.split(os.pathsep)return configPaths", "docstring": "Return the list of paths to search for configuration files.\n\n :returns: (list) of paths", "id": "f17641:c0:m11"} {"signature": "@classmethoddef setConfigPaths(cls, paths):", "body": "cls._configPaths = list(paths)", "docstring": "Modify the paths we use to search for configuration files.\n\n :param paths: (list) of paths to search for config files.", "id": "f17641:c0:m12"} {"signature": "@classmethoddef _readStdConfigFiles(cls):", "body": "cls.readConfigFile(DEFAULT_CONFIG)cls.readConfigFile(USER_CONFIG)", "docstring": "Read in all standard configuration files", "id": "f17641:c0:m13"} {"signature": "@classmethoddef getCustomDict(cls):", "body": "return _CustomConfigurationFileWrapper.getCustomDict()", "docstring": "returns: (dict) containing all custom configuration properties.", "id": "f17642:c0:m0"} {"signature": "@classmethoddef setCustomProperty(cls, propertyName, value):", "body": "cls.setCustomProperties({propertyName : value})", "docstring": "Set a single custom setting and persist it to the custom configuration \nstore.\n\n:param propertyName: (string) containing the name of the property to get\n:param value: (object) value to set the property to", "id": "f17642:c0:m1"} {"signature": "@classmethoddef setCustomProperties(cls, properties):", "body": "_getLogger().info(\"\",properties, traceback.format_stack())_CustomConfigurationFileWrapper.edit(properties)for propertyName, value in properties.items():cls.set(propertyName, value)", "docstring": "Set multiple custom properties and persist them to the custom configuration \nstore.\n\n:param properties: (dict) of property name/value pairs to set", "id": "f17642:c0:m2"} {"signature": "@classmethoddef clear(cls):", "body": "super(Configuration, cls).clear()_CustomConfigurationFileWrapper.clear(persistent=False)", "docstring": "Clear all configuration properties from in-memory cache, but do NOT alter \nthe custom configuration file. Used in unit-testing.", "id": "f17642:c0:m3"} {"signature": "@classmethoddef resetCustomConfig(cls):", "body": "_getLogger().info(\"\"\"\", traceback.format_stack())super(Configuration, cls).clear()_CustomConfigurationFileWrapper.clear(persistent=True)", "docstring": "Clear all custom configuration settings and delete the persistent custom \nconfiguration store.", "id": "f17642:c0:m4"} {"signature": "@classmethoddef loadCustomConfig(cls):", "body": "cls.readConfigFile(_CustomConfigurationFileWrapper.customFileName)", "docstring": "Loads custom configuration settings from their persistent storage.\n\n.. warning :: DO NOT CALL THIS: It's typically not necessary to call this \n method directly. This method exists *solely* for the benefit of \n ``prepare_conf.py``, which needs to load configuration files selectively.", "id": "f17642:c0:m5"} {"signature": "@classmethoddef _readStdConfigFiles(cls):", "body": "super(Configuration, cls)._readStdConfigFiles()cls.loadCustomConfig()", "docstring": "Intercept the _readStdConfigFiles call from our base config class to\n read in base and custom configuration settings.", "id": "f17642:c0:m6"} {"signature": "@classmethoddef clear(cls, persistent=False):", "body": "if persistent:try:os.unlink(cls.getPath())except OSError as e:if e.errno != errno.ENOENT:_getLogger().exception(\"\"\"\", e.errno, cls.getPath())raisecls._path = None", "docstring": "If persistent is True, delete the temporary file\n\n Parameters:\n ----------------------------------------------------------------\n persistent: if True, custom configuration file is deleted", "id": "f17642:c1:m0"} {"signature": "@classmethoddef getCustomDict(cls):", "body": "if not os.path.exists(cls.getPath()):return dict()properties = Configuration._readConfigFile(os.path.basename(cls.getPath()), os.path.dirname(cls.getPath()))values = dict()for propName in properties:if '' in properties[propName]:values[propName] = properties[propName]['']return values", "docstring": "Returns a dict of all temporary values in custom configuration file", "id": "f17642:c1:m1"} {"signature": "@classmethoddef edit(cls, properties):", "body": "copyOfProperties = copy(properties)configFilePath = cls.getPath()try:with open(configFilePath, '') as fp:contents = fp.read()except IOError as e:if e.errno != errno.ENOENT:_getLogger().exception(\"\"\"\",e.errno, configFilePath, properties)raisecontents = ''try:elements = ElementTree.XML(contents)ElementTree.tostring(elements)except Exception as e:msg = \"\"\"\" %(configFilePath, contents, type(e), e)_getLogger().exception(msg)raise RuntimeError(msg).with_traceback(sys.exc_info()[])if elements.tag != '':e = \"\" %(elements.tag)_getLogger().error(e)raise RuntimeError(e)for propertyItem in elements.findall(''):propInfo = dict((attr.tag, attr.text) for attr in propertyItem)name = propInfo['']if name in copyOfProperties:foundValues = propertyItem.findall('')if len(foundValues) > :foundValues[].text = str(copyOfProperties.pop(name))if not copyOfProperties:breakelse:e = \"\" % (name,)_getLogger().error(e)raise RuntimeError(e)for propertyName, value in copyOfProperties.items():newProp = ElementTree.Element('')nameTag = ElementTree.Element('')nameTag.text = propertyNamenewProp.append(nameTag)valueTag = ElementTree.Element('')valueTag.text = str(value)newProp.append(valueTag)elements.append(newProp)try:makeDirectoryFromAbsolutePath(os.path.dirname(configFilePath))with open(configFilePath,'') as fp:fp.write(ElementTree.tostring(elements))except Exception as e:_getLogger().exception(\"\"\"\", properties, configFilePath)raise", "docstring": "Edits the XML configuration file with the parameters specified by\n properties\n\n Parameters:\n ----------------------------------------------------------------\n properties: dict of settings to be applied to the custom configuration store\n (key is property name, value is value)", "id": "f17642:c1:m2"} {"signature": "@classmethoddef _setPath(cls):", "body": "cls._path = os.path.join(os.environ[''],cls.customFileName)", "docstring": "Sets the path of the custom configuration file", "id": "f17642:c1:m3"} {"signature": "@classmethoddef getPath(cls):", "body": "if cls._path is None:cls._setPath()return cls._path", "docstring": "Get the path of the custom configuration file", "id": "f17642:c1:m4"} {"signature": "def logExceptions(logger=None):", "body": "logger = (logger if logger is not None else logging.getLogger(__name__))def exceptionLoggingDecorator(func):@functools.wraps(func)def exceptionLoggingWrap(*args, **kwargs):try:return func(*args, **kwargs)except:logger.exception(\"\",sys.exc_info()[], func, ''.join(traceback.format_stack()), )raisereturn exceptionLoggingWrapreturn exceptionLoggingDecorator", "docstring": "Returns a closure suitable for use as function/method decorator for\n logging exceptions that leave the scope of the decorated function. Exceptions\n are logged at ERROR level.\n\n logger: user-supplied logger instance. Defaults to logging.getLogger.\n\n Usage Example:\n NOTE: logging must be initialized *before* any loggers are created, else\n there will be no output; see nupic.support.initLogging()\n\n @logExceptions()\n def myFunctionFoo():\n ...\n raise RuntimeError(\"something bad happened\")\n ...", "id": "f17643:m0"} {"signature": "def logEntryExit(getLoggerCallback=logging.getLogger,entryExitLogLevel=logging.DEBUG, logArgs=False,logTraceback=False):", "body": "def entryExitLoggingDecorator(func):@functools.wraps(func)def entryExitLoggingWrap(*args, **kwargs):if entryExitLogLevel is None:enabled = Falseelse:logger = getLoggerCallback()enabled = logger.isEnabledFor(entryExitLogLevel)if not enabled:return func(*args, **kwargs)funcName = str(func)if logArgs:argsRepr = ''.join([repr(a) for a in args] +['' % (k,v,) for k,v in kwargs.items()])else:argsRepr = ''logger.log(entryExitLogLevel, \"\", funcName, argsRepr,'' if not logTraceback else '' + repr(traceback.format_stack()))try:return func(*args, **kwargs)finally:logger.log(entryExitLogLevel, \"\", funcName, argsRepr,'' if not logTraceback else '' + repr(traceback.format_stack()))return entryExitLoggingWrapreturn entryExitLoggingDecorator", "docstring": "Returns a closure suitable for use as function/method decorator for\n logging entry/exit of function/method.\n\n getLoggerCallback: user-supplied callback function that takes no args and\n returns the logger instance to use for logging.\n entryExitLogLevel: Log level for logging entry/exit of decorated function;\n e.g., logging.DEBUG; pass None to disable entry/exit\n logging.\n logArgs: If True, also log args\n logTraceback: If True, also log Traceback information\n\n Usage Examples:\n NOTE: logging must be initialized *before* any loggers are created, else\n there will be no output; see nupic.support.initLogging()\n\n @logEntryExit()\n def myFunctionBar():\n ...\n\n\n @logEntryExit(logTraceback=True)\n @logExceptions()\n def myFunctionGamma():\n ...\n raise RuntimeError(\"something bad happened\")\n ...", "id": "f17643:m1"} {"signature": "def retry(timeoutSec, initialRetryDelaySec, maxRetryDelaySec,retryExceptions=(Exception,),retryFilter=lambda e, args, kwargs: True,logger=None, clientLabel=\"\"):", "body": "assert initialRetryDelaySec > , str(initialRetryDelaySec)assert timeoutSec >= , str(timeoutSec)assert maxRetryDelaySec >= initialRetryDelaySec,\"\" % (maxRetryDelaySec, initialRetryDelaySec)assert isinstance(retryExceptions, tuple), (\"\") % (type(retryExceptions),)if logger is None:logger = logging.getLogger(__name__)def retryDecorator(func):@functools.wraps(func)def retryWrap(*args, **kwargs):numAttempts = delaySec = initialRetryDelaySecstartTime = time.time()while True:numAttempts += try:result = func(*args, **kwargs)except retryExceptions as e:if not retryFilter(e, args, kwargs):if logger.isEnabledFor(logging.DEBUG):logger.debug('''', clientLabel, func,''.join(traceback.format_stack()), exc_info=True)raisenow = time.time()if now < startTime:startTime = nowif (now - startTime) >= timeoutSec:logger.exception('''', clientLabel, timeoutSec, numAttempts, func,''.join(traceback.format_stack()))raiseif numAttempts == :logger.warning('''', clientLabel, func, delaySec,timeoutSec, ''.join(traceback.format_stack()), exc_info=True)else:logger.debug('''',clientLabel, func, numAttempts, delaySec, timeoutSec,''.join(traceback.format_stack()), exc_info=True)time.sleep(delaySec)delaySec = min(delaySec*, maxRetryDelaySec)else:if numAttempts > :logger.info('',clientLabel, func, numAttempts)return resultreturn retryWrapreturn retryDecorator", "docstring": "Returns a closure suitable for use as function/method decorator for\n retrying a function being decorated.\n\n timeoutSec: How many seconds from time of initial call to stop\n retrying (floating point); 0 = no retries\n initialRetryDelaySec: Number of seconds to wait for first retry.\n Subsequent retries will occur at geometrically\n doubling intervals up to a maximum interval of\n maxRetryDelaySec (floating point)\n maxRetryDelaySec: Maximum amount of seconds to wait between retries\n (floating point)\n retryExceptions: A tuple (must be a tuple) of exception classes that,\n including their subclasses, should trigger retries;\n Default: any Exception-based exception will trigger\n retries\n retryFilter: Optional filter function used to further filter the\n exceptions in the retryExceptions tuple; called if the\n current exception meets the retryExceptions criteria:\n takes the current exception instance, args, and kwargs\n that were passed to the decorated function, and returns\n True to retry, False to allow the exception to be\n re-raised without retrying. Default: permits any\n exception that matches retryExceptions to be retried.\n logger: User-supplied logger instance to use for logging.\n None=defaults to logging.getLogger(__name__).\n\n Usage Example:\n NOTE: logging must be initialized *before* any loggers are created, else\n there will be no output; see nupic.support.initLogging()\n\n _retry = retry(timeoutSec=300, initialRetryDelaySec=0.2,\n maxRetryDelaySec=10, retryExceptions=[socket.error])\n @_retry\n def myFunctionFoo():\n ...\n raise RuntimeError(\"something bad happened\")\n ...", "id": "f17643:m2"} {"signature": "def makeDirectoryFromAbsolutePath(absDirPath):", "body": "assert os.path.isabs(absDirPath)try:os.makedirs(absDirPath)except OSError as e:if e.errno != os.errno.EEXIST:raisereturn absDirPath", "docstring": "Makes directory for the given directory path with default permissions.\n If the directory already exists, it is treated as success.\n\n :param absDirPath: (string) absolute path of the directory to create.\n :raises: OSError if directory creation fails\n :returns: (string) absolute path provided", "id": "f17644:m0"} {"signature": "@staticmethoddef _openStream(dataUrl,isBlocking, maxTimeout, bookmark,firstRecordIdx):", "body": "filePath = dataUrl[len(FILE_PREF):]if not os.path.isabs(filePath):filePath = os.path.join(os.getcwd(), filePath)return FileRecordStream(streamID=filePath,write=False,bookmark=bookmark,firstRecord=firstRecordIdx)", "docstring": "Open the underlying file stream\n This only supports 'file://' prefixed paths.\n\n :returns: record stream instance\n :rtype: FileRecordStream", "id": "f17645:c1:m1"} {"signature": "def close(self):", "body": "return self._recordStore.close()", "docstring": "Close the stream", "id": "f17645:c1:m2"} {"signature": "def getNextRecord(self):", "body": "while True:if self._sourceLastRecordIdx is not None andself._recordStore.getNextRecordIdx() >= self._sourceLastRecordIdx:preAggValues = None bookmark = self._recordStore.getBookmark()else:preAggValues = self._recordStore.getNextRecord()bookmark = self._recordStore.getBookmark()if preAggValues == (): if self._eofOnTimeout:preAggValues = None else:return preAggValues self._logger.debug('',self._recordStore.getNextRecordIdx()-, preAggValues)(fieldValues, aggBookmark) = self._aggregator.next(preAggValues, bookmark)if fieldValues is not None:self._aggBookmark = aggBookmarkif preAggValues is None and fieldValues is None:return Noneif fieldValues is not None:breakif self._needFieldsFiltering:values = []srcDict = dict(zip(self._recordStoreFieldNames, fieldValues))for name in self._streamFieldNames:values.append(srcDict[name])fieldValues = valuesif self._writer is not None:self._writer.appendRecord(fieldValues)self._recordCount += self._logger.debug('''',self._recordCount-, fieldValues, self._aggBookmark)return fieldValues", "docstring": "Returns combined data from all sources (values only).\n\n :returns: None on EOF; empty sequence on timeout.", "id": "f17645:c1:m3"} {"signature": "def getDataRowCount(self):", "body": "inputRowCountAfterAggregation = while True:record = self.getNextRecord()if record is None:return inputRowCountAfterAggregationinputRowCountAfterAggregation += if inputRowCountAfterAggregation > :raise RuntimeError('')", "docstring": "Iterates through stream to calculate total records after aggregation.\nThis will alter the bookmark state.", "id": "f17645:c1:m4"} {"signature": "def getNextRecordIdx(self):", "body": "return self._recordCount", "docstring": ":returns: the index of the record that will be read next from\n :meth:`getNextRecord`.", "id": "f17645:c1:m5"} {"signature": "def recordsExistAfter(self, bookmark):", "body": "return self._recordStore.recordsExistAfter(bookmark)", "docstring": ":returns: True if there are records left after the bookmark.", "id": "f17645:c1:m6"} {"signature": "def getAggregationMonthsAndSeconds(self):", "body": "return self._aggMonthsAndSeconds", "docstring": "Returns the aggregation period of the record stream as a dict\n containing 'months' and 'seconds'. The months is always an integer and\n seconds is a floating point. Only one is allowed to be non-zero at a\n time.\n\n Will return the aggregation period from this call. This call is\n used by the :meth:`nupic.data.record_stream.RecordStream.getNextRecordDict`\n method to assign a record number to a record given its timestamp and the\n aggregation interval.\n\n :returns: aggregationPeriod (as a dict) where:\n\n - ``months``: number of months in aggregation period\n - ``seconds``: number of seconds in aggregation period\n (as a float)", "id": "f17645:c1:m7"} {"signature": "def getFieldNames(self):", "body": "return [f.name for f in self._streamFields]", "docstring": "Returns all fields in all inputs (list of plain names).\n\n.. note:: currently, only one input is supported", "id": "f17645:c1:m11"} {"signature": "def getFields(self):", "body": "return self._streamFields", "docstring": ":returns: a sequence of :class:`nupic.data.fieldmeta.FieldMetaInfo` for each\n field in the stream.", "id": "f17645:c1:m12"} {"signature": "def getBookmark(self):", "body": "return self._aggBookmark", "docstring": ":returns: a bookmark to the current position", "id": "f17645:c1:m13"} {"signature": "def clearStats(self):", "body": "self._recordStore.clearStats()", "docstring": "Resets stats collected so far.", "id": "f17645:c1:m14"} {"signature": "def getStats(self):", "body": "recordStoreStats = self._recordStore.getStats()streamStats = dict()for (key, values) in recordStoreStats.items():fieldStats = dict(zip(self._recordStoreFieldNames, values))streamValues = []for name in self._streamFieldNames:streamValues.append(fieldStats[name])streamStats[key] = streamValuesreturn streamStats", "docstring": "TODO: This method needs to be enhanced to get the stats on the *aggregated*\nrecords.\n\n:returns: stats (like min and max values of the fields).", "id": "f17645:c1:m15"} {"signature": "def getError(self):", "body": "return self._recordStore.getError()", "docstring": ":returns: errors saved in the stream.", "id": "f17645:c1:m16"} {"signature": "def setError(self, error):", "body": "self._recordStore.setError(error)", "docstring": "Saves specified error in the stream.\n\n :param error: to save", "id": "f17645:c1:m17"} {"signature": "def isCompleted(self):", "body": "return self._recordStore.isCompleted()", "docstring": ":returns: True if all records have been read.", "id": "f17645:c1:m18"} {"signature": "def setCompleted(self, completed=True):", "body": "self._recordStore.setCompleted(completed)", "docstring": "Marks the stream completed (True or False)\n\n:param completed: (bool) is completed or not", "id": "f17645:c1:m19"} {"signature": "def setTimeout(self, timeout):", "body": "self._recordStore.setTimeout(timeout)", "docstring": "Set the read timeout.\n\n :param timeout: (float or int) timeout length", "id": "f17645:c1:m20"} {"signature": "def rUpdate(original, updates):", "body": "dictPairs = [(original, updates)]while len(dictPairs) > :original, updates = dictPairs.pop()for k, v in updates.items():if k in original and isinstance(original[k], dict) and isinstance(v, dict):dictPairs.append((original[k], v))else:original[k] = v", "docstring": "Recursively updates the values in original with the values from updates.", "id": "f17646:m0"} {"signature": "def rApply(d, f):", "body": "remainingDicts = [(d, ())]while len(remainingDicts) > :current, prevKeys = remainingDicts.pop()for k, v in current.items():keys = prevKeys + (k,)if isinstance(v, dict):remainingDicts.insert(, (v, keys))else:f(v, keys)", "docstring": "Recursively applies f to the values in dict d.\n\n Args:\n d: The dict to recurse over.\n f: A function to apply to values in d that takes the value and a list of\n keys from the root of the dict to the value.", "id": "f17646:m1"} {"signature": "def dictDiffAndReport(da, db):", "body": "differences = dictDiff(da, db)if not differences:return differencesif differences['']:print(\"\" % differences[''])if differences['']:print(\"\" % differences[''])for key in differences['']:print(\"\" % (key, key))print(\"\" % (key, da[key]))print(\"\" % (key, db[key]))return differences", "docstring": "Compares two python dictionaries at the top level and report differences,\n if any, to stdout\n\n da: first dictionary\n db: second dictionary\n\n Returns: The same value as returned by dictDiff() for the given args", "id": "f17646:m5"} {"signature": "def dictDiff(da, db):", "body": "different = FalseresultDict = dict()resultDict[''] = set(da) - set(db)if resultDict['']:different = TrueresultDict[''] = set(db) - set(da)if resultDict['']:different = TrueresultDict[''] = []for key in (set(da) - resultDict['']):comparisonResult = da[key] == db[key]if isinstance(comparisonResult, bool):isEqual = comparisonResultelse:isEqual = comparisonResult.all()if not isEqual:resultDict[''].append(key)different = Trueassert (((resultDict[''] or resultDict[''] orresultDict['']) and different) or not different)return resultDict if different else None", "docstring": "Compares two python dictionaries at the top level and return differences\n\n da: first dictionary\n db: second dictionary\n\n Returns: None if dictionaries test equal; otherwise returns a\n dictionary as follows:\n {\n 'inAButNotInB':\n \n 'inBButNotInA':\n \n 'differentValues':\n \n }", "id": "f17646:m6"} {"signature": "def sort(filename, key, outputFile, fields=None, watermark= * * ):", "body": "if fields is not None:assert set(key).issubset(set([f[] for f in fields]))with FileRecordStream(filename) as f:if fields:fieldNames = [ff[] for ff in fields]indices = [f.getFieldNames().index(name) for name in fieldNames]assert len(indices) == len(fields)else:fileds = f.getFields()fieldNames = f.getFieldNames()indices = Nonekey = [fieldNames.index(name) for name in key]chunk = records = []for i, r in enumerate(f):if indices:temp = []for i in indices:temp.append(r[i])r = temprecords.append(r)available_memory = psutil.avail_phymem()if available_memory < watermark:_sortChunk(records, key, chunk, fields)records = []chunk += if len(records) > :_sortChunk(records, key, chunk, fields)chunk += _mergeFiles(key, chunk, outputFile, fields)", "docstring": "Sort a potentially big file\n\n filename - the input file (standard File format)\n key - a list of field names to sort by\n outputFile - the name of the output file\n fields - a list of fields that should be included (all fields if None)\n watermark - when available memory goes bellow the watermark create a new chunk\n\n sort() works by reading as records from the file into memory\n and calling _sortChunk() on each chunk. In the process it gets\n rid of unneeded fields if any. Once all the chunks have been sorted and\n written to chunk files it calls _merge() to merge all the chunks into a\n single sorted file.\n\n Note, that sort() gets a key that contains field names, which it converts\n into field indices for _sortChunk() becuase _sortChunk() doesn't need to know\n the field name.\n\n sort() figures out by itself how many chunk files to use by reading records\n from the file until the low watermark value of availabel memory is hit and\n then it sorts the current records, generates a chunk file, clears the sorted\n records and starts on a new chunk.\n\n The key field names are turned into indices", "id": "f17647:m0"} {"signature": "def _sortChunk(records, key, chunkIndex, fields):", "body": "title(additional='' % (str(key), chunkIndex))assert len(records) > records.sort(key=itemgetter(*key))if chunkIndex is not None:filename = '' % chunkIndexwith FileRecordStream(filename, write=True, fields=fields) as o:for r in records:o.appendRecord(r)assert os.path.getsize(filename) > return records", "docstring": "Sort in memory chunk of records\n\n records - a list of records read from the original dataset\n key - a list of indices to sort the records by\n chunkIndex - the index of the current chunk\n\n The records contain only the fields requested by the user.\n\n _sortChunk() will write the sorted records to a standard File\n named \"chunk_.csv\" (chunk_0.csv, chunk_1.csv,...).", "id": "f17647:m1"} {"signature": "def _mergeFiles(key, chunkCount, outputFile, fields):", "body": "title()files = [FileRecordStream('' % i) for i in range(chunkCount)]with FileRecordStream(outputFile, write=True, fields=fields) as o:files = [FileRecordStream('' % i) for i in range(chunkCount)]records = [f.getNextRecord() for f in files]while not all(r is None for r in records):indices = [i for i,r in enumerate(records) if r is not None]records = [records[i] for i in indices]files = [files[i] for i in indices]r = min(records, key=itemgetter(*key))o.appendRecord(r)index = records.index(r)records[index] = files[index].getNextRecord()for i, f in enumerate(files):f.close()os.remove('' % i)", "docstring": "Merge sorted chunk files into a sorted output file\n\n chunkCount - the number of available chunk files\n outputFile the name of the sorted output file\n\n _mergeFiles()", "id": "f17647:m2"} {"signature": "def close(self):", "body": "if self._file is not None:self._file.close()self._file = None", "docstring": "Closes the stream.", "id": "f17648:c0:m3"} {"signature": "def rewind(self):", "body": "super(FileRecordStream, self).rewind()self.close()self._file = open(self._filename, self._mode)self._reader = csv.reader(self._file, dialect=\"\")self._reader.next()self._reader.next()self._reader.next()self._recordCount = ", "docstring": "Put us back at the beginning of the file again.", "id": "f17648:c0:m4"} {"signature": "def getNextRecord(self, useCache=True):", "body": "assert self._file is not Noneassert self._mode == self._FILE_READ_MODEtry:line = self._reader.next()except StopIteration:if self.rewindAtEOF:if self._recordCount == :raise Exception(\"\"\"\" % self._filename)self.rewind()line = self._reader.next()else:return Noneself._recordCount += record = []for i, f in enumerate(line):if f in self._missingValues:record.append(SENTINEL_VALUE_FOR_MISSING_DATA)else:record.append(self._adapters[i](f))return record", "docstring": "Returns next available data record from the file.\n\n :returns: a data row (a list or tuple) if available; None, if no more\n records in the table (End of Stream - EOS); empty sequence (list\n or tuple) when timing out while waiting for the next record.", "id": "f17648:c0:m5"} {"signature": "def appendRecord(self, record):", "body": "assert self._file is not Noneassert self._mode == self._FILE_WRITE_MODEassert isinstance(record, (list, tuple)),\"\" + repr(type(record))assert len(record) == self._fieldCount,\"\" % (len(record), self._fieldCount)if self._recordCount == :names, types, specials = zip(*self.getFields())for line in names, types, specials:self._writer.writerow(line)self._updateSequenceInfo(record)line = [self._adapters[i](f) for i, f in enumerate(record)]self._writer.writerow(line)self._recordCount += ", "docstring": "Saves the record in the underlying csv file.\n\n:param record: a list of Python objects that will be string-ified", "id": "f17648:c0:m6"} {"signature": "def appendRecords(self, records, progressCB=None):", "body": "for record in records:self.appendRecord(record)if progressCB is not None:progressCB()", "docstring": "Saves multiple records in the underlying storage.\n\n:param records: array of records as in\n :meth:`~.FileRecordStream.appendRecord`\n:param progressCB: (function) callback to report progress", "id": "f17648:c0:m7"} {"signature": "def getBookmark(self):", "body": "if self._write and self._recordCount==:return NonerowDict = dict(filepath=os.path.realpath(self._filename),currentRow=self._recordCount)return json.dumps(rowDict)", "docstring": "Gets a bookmark or anchor to the current position.\n\n:returns: an anchor to the current position in the data. Passing this\n anchor to a constructor makes the current position to be the first\n returned record.", "id": "f17648:c0:m8"} {"signature": "def recordsExistAfter(self, bookmark):", "body": "return (self.getDataRowCount() - self.getNextRecordIdx()) > ", "docstring": "Returns whether there are more records from current position. ``bookmark``\nis not used in this implementation.\n\n:return: True if there are records left after current position.", "id": "f17648:c0:m9"} {"signature": "def seekFromEnd(self, numRecords):", "body": "self._file.seek(self._getTotalLineCount() - numRecords)return self.getBookmark()", "docstring": "Seeks to ``numRecords`` from the end and returns a bookmark to the new\nposition.\n\n:param numRecords: how far to seek from end of file.\n:return: bookmark to desired location.", "id": "f17648:c0:m10"} {"signature": "def setAutoRewind(self, autoRewind):", "body": "self.rewindAtEOF = autoRewind", "docstring": "Controls whether :meth:`~.FileRecordStream.getNextRecord` should\nautomatically rewind the source when EOF is reached.\n\n:param autoRewind: (bool)\n\n - if True, :meth:`~.FileRecordStream.getNextRecord` will automatically rewind\n the source on EOF.\n - if False, :meth:`~.FileRecordStream.getNextRecord` will not automatically\n rewind the source on EOF.", "id": "f17648:c0:m11"} {"signature": "def getStats(self):", "body": "if self._stats == None:assert self._mode == self._FILE_READ_MODEinFile = open(self._filename, self._FILE_READ_MODE)reader = csv.reader(inFile, dialect=\"\")names = [n.strip() for n in reader.next()]types = [t.strip() for t in reader.next()]reader.next()self._stats = dict()self._stats[''] = []self._stats[''] = []for i in xrange(len(names)):self._stats[''].append(None)self._stats[''].append(None)while True:try:line = reader.next()for i, f in enumerate(line):if (len(types) > i andtypes[i] in [FieldMetaType.integer, FieldMetaType.float] andf not in self._missingValues):value = self._adapters[i](f)if self._stats[''][i] == None orself._stats[''][i] < value:self._stats[''][i] = valueif self._stats[''][i] == None orself._stats[''][i] > value:self._stats[''][i] = valueexcept StopIteration:breakreturn self._stats", "docstring": "Parse the file using dedicated reader and collect fields stats. Never\ncalled if user of :class:`~.FileRecordStream` does not invoke\n:meth:`~.FileRecordStream.getStats` method.\n\n:returns:\n a dictionary of stats. In the current implementation, min and max\n fields are supported. Example of the return dictionary is:\n\n .. code-block:: python\n\n {\n 'min' : [f1_min, f2_min, None, None, fn_min],\n 'max' : [f1_max, f2_max, None, None, fn_max]\n }\n\n (where fx_min/fx_max are set for scalar fields, or None if not)", "id": "f17648:c0:m12"} {"signature": "def clearStats(self):", "body": "self._stats = None", "docstring": "Resets stats collected so far.", "id": "f17648:c0:m13"} {"signature": "def getError(self):", "body": "return None", "docstring": "Not implemented. CSV file version does not provide storage for the error\ninformation", "id": "f17648:c0:m14"} {"signature": "def setError(self, error):", "body": "return", "docstring": "Not implemented. CSV file version does not provide storage for the error\ninformation", "id": "f17648:c0:m15"} {"signature": "def isCompleted(self):", "body": "return True", "docstring": "Not implemented. CSV file is always considered completed.", "id": "f17648:c0:m16"} {"signature": "def setCompleted(self, completed=True):", "body": "return", "docstring": "Not implemented: CSV file is always considered completed, nothing to do.", "id": "f17648:c0:m17"} {"signature": "def getFieldNames(self):", "body": "return [f.name for f in self._fields]", "docstring": ":returns: (list) field names associated with the data.", "id": "f17648:c0:m18"} {"signature": "def getFields(self):", "body": "if self._fields is None:return Noneelse:return copy.copy(self._fields)", "docstring": ":returns: a sequence of :class:`~.FieldMetaInfo`\n ``name``/``type``/``special`` tuples for each field in the stream.", "id": "f17648:c0:m19"} {"signature": "def _updateSequenceInfo(self, r):", "body": "newSequence = FalsesequenceId = (r[self._sequenceIdIdx]if self._sequenceIdIdx is not None else None)if sequenceId != self._currSequence:if sequenceId in self._sequences:raise Exception('' %(sequenceId, r))self._sequences.add(self._currSequence)self._currSequence = sequenceIdif self._resetIdx:assert r[self._resetIdx] == newSequence = Trueelse:reset = Falseif self._resetIdx:reset = r[self._resetIdx]if reset == :newSequence = Trueif not newSequence:if self._timeStampIdx and self._currTime is not None:t = r[self._timeStampIdx]if t < self._currTime:raise Exception('' % r)if self._timeStampIdx:self._currTime = r[self._timeStampIdx]", "docstring": "Keep track of sequence and make sure time goes forward\n\n Check if the current record is the beginning of a new sequence\n A new sequence starts in 2 cases:\n\n 1. The sequence id changed (if there is a sequence id field)\n 2. The reset field is 1 (if there is a reset field)\n\n Note that if there is no sequenceId field or resetId field then the entire\n dataset is technically one big sequence. The function will not return True\n for the first record in this case. This is Ok because it is important to\n detect new sequences only when there are multiple sequences in the file.", "id": "f17648:c0:m20"} {"signature": "def _getStartRow(self, bookmark):", "body": "bookMarkDict = json.loads(bookmark)realpath = os.path.realpath(self._filename)bookMarkFile = bookMarkDict.get('', None)if bookMarkFile != realpath:print (\"\"\"\") % (realpath, bookMarkDict)return else:return bookMarkDict['']", "docstring": "Extracts start row from the bookmark information", "id": "f17648:c0:m21"} {"signature": "def _getTotalLineCount(self):", "body": "if self._mode == self._FILE_WRITE_MODE:self._file.flush()return sum( for line in open(self._filename, self._FILE_READ_MODE))", "docstring": "Returns: count of ALL lines in dataset, including header lines", "id": "f17648:c0:m22"} {"signature": "def getNextRecordIdx(self):", "body": "return self._recordCount", "docstring": ":returns: (int) the index of the record that will be read next from\n :meth:`~.FileRecordStream.getNextRecord`.", "id": "f17648:c0:m23"} {"signature": "def getDataRowCount(self):", "body": "numLines = self._getTotalLineCount()if numLines == :assert self._mode == self._FILE_WRITE_MODE and self._recordCount == numDataRows = else:numDataRows = numLines - self._NUM_HEADER_ROWSassert numDataRows >= return numDataRows", "docstring": ":returns: (int) count of data rows in dataset (excluding header lines)", "id": "f17648:c0:m24"} {"signature": "def flush(self):", "body": "if self._file is not None:self._file.flush()", "docstring": "Flushes the file.", "id": "f17648:c0:m26"} {"signature": "def __enter__(self):", "body": "return self", "docstring": "Context guard - enter\n\n Just return the object", "id": "f17648:c0:m27"} {"signature": "def __exit__(self, yupe, value, traceback):", "body": "self.close()", "docstring": "Context guard - exit\n\n Ensures that the file is always closed at the end of the 'with' block.\n Lets exceptions propagate.", "id": "f17648:c0:m28"} {"signature": "def __iter__(self):", "body": "return self", "docstring": "Support for the iterator protocol. Return itself", "id": "f17648:c0:m29"} {"signature": "def next(self):", "body": "record = self.getNextRecord()if record is None:raise StopIterationreturn record", "docstring": "Implement the iterator protocol", "id": "f17648:c0:m30"} {"signature": "def __init__(self, origField, deltaField):", "body": "self.origField = origFieldself.deltaField = deltaFieldself.previousValue = Noneself.rememberReset = False", "docstring": "Add a delta field to the data.", "id": "f17649:c1:m0"} {"signature": "def initFilter(input, filterInfo = None):", "body": "if filterInfo is None:return NonefilterList = []for i, fieldName in enumerate(input.getFieldNames()):fieldFilter = filterInfo.get(fieldName, None)if fieldFilter == None:continuevar = dict()var[''] = Nonemin = fieldFilter.get('', None)max = fieldFilter.get('', None)var[''] = minvar[''] = maxif fieldFilter[''] == '':var[''] = fieldFilter['']fp = lambda x: (x[''] != SENTINEL_VALUE_FOR_MISSING_DATA andx[''] in x[''])elif fieldFilter[''] == '':if min != None and max != None:fp = lambda x: (x[''] != SENTINEL_VALUE_FOR_MISSING_DATA andx[''] >= x[''] and x[''] <= x[''])elif min != None:fp = lambda x: (x[''] != SENTINEL_VALUE_FOR_MISSING_DATA andx[''] >= x[''])else:fp = lambda x: (x[''] != SENTINEL_VALUE_FOR_MISSING_DATA andx[''] <= x[''])filterList.append((i, fp, var))return (_filterRecord, filterList)", "docstring": "Initializes internal filter variables for further processing.\n Returns a tuple (function to call,parameters for the filter call)\n\n The filterInfo is a dict. Here is an example structure:\n {fieldName: {'min': x,\n 'max': y,\n 'type': 'category', # or 'number'\n 'acceptValues': ['foo', 'bar'],\n }\n }\n\n This returns the following:\n (filterFunc, ((fieldIdx, fieldFilterFunc, filterDict),\n ...)\n\n Where fieldIdx is the index of the field within each record\n fieldFilterFunc returns True if the value is \"OK\" (within min, max or\n part of acceptValues)\n fieldDict is a dict containing 'type', 'min', max', 'acceptValues'", "id": "f17650:m0"} {"signature": "def _filterRecord(filterList, record):", "body": "for (fieldIdx, fp, params) in filterList:x = dict()x[''] = record[fieldIdx]x[''] = params['']x[''] = params['']x[''] = params['']if not fp(x):return Falsereturn True", "docstring": "Takes a record and returns true if record meets filter criteria,\n false otherwise", "id": "f17650:m1"} {"signature": "def _aggr_first(inList):", "body": "for elem in inList:if elem != SENTINEL_VALUE_FOR_MISSING_DATA:return elemreturn None", "docstring": "Returns first non-None element in the list, or None if all are None", "id": "f17650:m2"} {"signature": "def _aggr_last(inList):", "body": "for elem in reversed(inList):if elem != SENTINEL_VALUE_FOR_MISSING_DATA:return elemreturn None", "docstring": "Returns last non-None element in the list, or None if all are None", "id": "f17650:m3"} {"signature": "def _aggr_sum(inList):", "body": "aggrMean = _aggr_mean(inList)if aggrMean == None:return NoneaggrSum = for elem in inList:if elem != SENTINEL_VALUE_FOR_MISSING_DATA:aggrSum += elemelse:aggrSum += aggrMeanreturn aggrSum", "docstring": "Returns sum of the elements in the list. Missing items are replaced with\n the mean value", "id": "f17650:m4"} {"signature": "def _aggr_mean(inList):", "body": "aggrSum = nonNone = for elem in inList:if elem != SENTINEL_VALUE_FOR_MISSING_DATA:aggrSum += elemnonNone += if nonNone != :return aggrSum / nonNoneelse:return None", "docstring": "Returns mean of non-None elements of the list", "id": "f17650:m5"} {"signature": "def _aggr_mode(inList):", "body": "valueCounts = dict()nonNone = for elem in inList:if elem == SENTINEL_VALUE_FOR_MISSING_DATA:continuenonNone += if elem in valueCounts:valueCounts[elem] += else:valueCounts[elem] = if nonNone == :return NonesortedCounts = list(valueCounts.items())sortedCounts.sort(cmp=lambda x,y: x[] - y[], reverse=True)return sortedCounts[][]", "docstring": "Returns most common value seen in the non-None elements of the list", "id": "f17650:m6"} {"signature": "def _aggr_weighted_mean(inList, params):", "body": "assert(len(inList) == len(params))weightsSum = sum(params)if weightsSum == :return NoneweightedMean = for i, elem in enumerate(inList):weightedMean += elem * params[i]return weightedMean / weightsSum", "docstring": "Weighted mean uses params (must be the same size as inList) and\n makes weighed mean of inList", "id": "f17650:m7"} {"signature": "def generateDataset(aggregationInfo, inputFilename, outputFilename=None):", "body": "inputFullPath = resource_filename(\"\", inputFilename)inputObj = FileRecordStream(inputFullPath)aggregator = Aggregator(aggregationInfo=aggregationInfo,inputFields=inputObj.getFields())if aggregator.isNullAggregation():return inputFullPathif outputFilename is None:outputFilename = '' %os.path.splitext(os.path.basename(inputFullPath))[]timePeriods = ''''for k in timePeriods.split():if aggregationInfo.get(k, ) > :outputFilename += '' % (k, aggregationInfo[k])outputFilename += ''outputFilename = os.path.join(os.path.dirname(inputFullPath), outputFilename)lockFilePath = outputFilename + ''if os.path.isfile(outputFilename) oros.path.isfile(lockFilePath):while os.path.isfile(lockFilePath):print('' %lockFilePath)time.sleep()return outputFilenamelockFD = open(lockFilePath, '')outputObj = FileRecordStream(streamID=outputFilename, write=True,fields=inputObj.getFields())while True:inRecord = inputObj.getNextRecord()(aggRecord, aggBookmark) = aggregator.next(inRecord, None)if aggRecord is None and inRecord is None:breakif aggRecord is not None:outputObj.appendRecord(aggRecord)return outputFilename", "docstring": "Generate a dataset of aggregated values\n\n Parameters:\n ----------------------------------------------------------------------------\n aggregationInfo: a dictionary that contains the following entries\n - fields: a list of pairs. Each pair is a field name and an\n aggregation function (e.g. sum). The function will be used to aggregate\n multiple values during the aggregation period.\n\n aggregation period: 0 or more of unit=value fields; allowed units are:\n [years months] |\n [weeks days hours minutes seconds milliseconds microseconds]\n NOTE: years and months are mutually-exclusive with the other units.\n See getEndTime() and _aggregate() for more details.\n Example1: years=1, months=6,\n Example2: hours=1, minutes=30,\n If none of the period fields are specified or if all that are specified\n have values of 0, then aggregation will be suppressed, and the given\n inputFile parameter value will be returned.\n\n inputFilename: filename of the input dataset within examples/prediction/data\n\n outputFilename: name for the output file. If not given, a name will be\n generated based on the input filename and the aggregation params\n\n retval: Name of the generated output file. This will be the same as the input\n file name if no aggregation needed to be performed\n\n\n\n If the input file contained a time field, sequence id field or reset field\n that were not specified in aggregationInfo fields, those fields will be\n added automatically with the following rules:\n\n 1. The order will be R, S, T, rest of the fields\n 2. The aggregation function for all will be to pick the first: lambda x: x[0]\n\n Returns: the path of the aggregated data file if aggregation was performed\n (in the same directory as the given input file); if aggregation did not\n need to be performed, then the given inputFile argument value is returned.", "id": "f17650:m8"} {"signature": "def getFilename(aggregationInfo, inputFile):", "body": "inputFile = resource_filename(\"\", inputFile)a = defaultdict(lambda: , aggregationInfo)outputDir = os.path.dirname(inputFile)outputFile = '' % os.path.splitext(os.path.basename(inputFile))[]noAggregation = TruetimePeriods = ''''for k in timePeriods.split():if a[k] > :noAggregation = FalseoutputFile += '' % (k, a[k])if noAggregation:return inputFileoutputFile += ''outputFile = os.path.join(outputDir, outputFile)return outputFile", "docstring": "Generate the filename for aggregated dataset\n\n The filename is based on the input filename and the\n aggregation period.\n\n Returns the inputFile if no aggregation required (aggregation\n info has all 0's)", "id": "f17650:m9"} {"signature": "def __init__(self, aggregationInfo, inputFields, timeFieldName=None,sequenceIdFieldName=None, resetFieldName=None, filterInfo=None):", "body": "self._filterInfo = filterInfoself._nullAggregation = Falseself._inputFields = inputFieldsself._nullAggregation = Falseif aggregationInfo is None:self._nullAggregation = Trueelse:aggDef = defaultdict(lambda: , aggregationInfo)if (aggDef[''] == aggDef[''] == aggDef[''] ==aggDef[''] == aggDef[''] == aggDef[''] ==aggDef[''] == aggDef[''] ==aggDef[''] == ):self._nullAggregation = Trueself._filter = initFilter(self._inputFields, self._filterInfo)self._fields = Noneself._resetFieldIdx = Noneself._timeFieldIdx = Noneself._sequenceIdFieldIdx = Noneself._aggTimeDelta = datetime.timedelta()self._aggYears = self._aggMonths = self._aggrInputBookmark = Noneself._startTime = Noneself._endTime = Noneself._sequenceId = Noneself._firstSequenceStartTime = Noneself._inIdx = -self._slice = defaultdict(list)if not self._nullAggregation:fieldNames = [f[] for f in aggregationInfo['']]readerFieldNames = [f[] for f in self._inputFields]for name in fieldNames:if not name in readerFieldNames:raise Exception('' % (name))if timeFieldName is not None:self._timeFieldIdx = readerFieldNames.index(timeFieldName)if resetFieldName is not None:self._resetFieldIdx = readerFieldNames.index(resetFieldName)if sequenceIdFieldName is not None:self._sequenceIdFieldIdx = readerFieldNames.index(sequenceIdFieldName)self._fields = []fieldIdx = -for (name, type, special) in self._inputFields:fieldIdx += found = Falsefor field in aggregationInfo['']:if field[] == name:aggFunctionName = field[]found = Truebreakif not found:aggFunctionName = ''(funcPtr, params) = self._getFuncPtrAndParams(aggFunctionName)self._fields.append((fieldIdx, funcPtr, params))if special == FieldMetaSpecial.reset and self._resetFieldIdx is None:self._resetFieldIdx = fieldIdxif special == FieldMetaSpecial.timestamp and self._timeFieldIdx is None:self._timeFieldIdx = fieldIdxif (special == FieldMetaSpecial.sequence andself._sequenceIdFieldIdx is None):self._sequenceIdFieldIdx = fieldIdxassert self._timeFieldIdx is not None, \"\"self._aggTimeDelta = datetime.timedelta(days=aggDef[''],hours=aggDef[''],minutes=aggDef[''],seconds=aggDef[''],milliseconds=aggDef[''],microseconds=aggDef[''],weeks=aggDef[''])self._aggYears = aggDef['']self._aggMonths = aggDef['']if self._aggTimeDelta:assert self._aggYears == assert self._aggMonths == ", "docstring": "Construct an aggregator instance\n\n Params:\n\n - aggregationInfo: a dictionary that contains the following entries\n - fields: a list of pairs. Each pair is a field name and an\n aggregation function (e.g. sum). The function will be used to aggregate\n multiple values during the aggregation period.\n\n - aggregation period: 0 or more of unit=value fields; allowed units are:\n [years months] | [weeks days hours minutes seconds milliseconds\n microseconds]\n NOTE: years and months are mutually-exclusive with the other units. See\n getEndTime() and _aggregate() for more details.\n Example1: years=1, months=6,\n Example2: hours=1, minutes=30,\n If none of the period fields are specified or if all that are specified\n have values of 0, then aggregation will be suppressed, and the given\n inputFile parameter value will be returned.\n\n - inputFields: The fields from the data source. This is a sequence of\n `nupic.data.fieldmeta.FieldMetaInfo` instances.\n\n - timeFieldName: name of the field to use as the time field. If None,\n then the time field will be queried from the reader.\n\n - sequenceIdFieldName: name of the field to use as the sequenecId. If None,\n then the time field will be queried from the reader.\n\n - resetFieldName: name of the field to use as the reset field. If None,\n then the time field will be queried from the reader.\n\n - filterInfo: a structure with rules for filtering records out\n\n\n If the input file contains a time field, sequence id field or reset field\n that were not specified in aggregationInfo fields, those fields will be\n added automatically with the following rules:\n\n 1. The order will be R, S, T, rest of the fields\n 2. The aggregation function for these will be to pick the first:\n lambda x: x[0]", "id": "f17650:c0:m0"} {"signature": "def _getEndTime(self, t):", "body": "assert isinstance(t, datetime.datetime)if self._aggTimeDelta:return t + self._aggTimeDeltaelse:year = t.year + self._aggYears + (t.month - + self._aggMonths) / month = (t.month - + self._aggMonths) % + return t.replace(year=year, month=month)", "docstring": "Add the aggregation period to the input time t and return a datetime object\n\n Years and months are handled as aspecial case due to leap years\n and months with different number of dates. They can't be converted\n to a strict timedelta because a period of 3 months will have different\n durations actually. The solution is to just add the years and months\n fields directly to the current time.\n\n Other periods are converted to timedelta and just added to current time.", "id": "f17650:c0:m1"} {"signature": "def _getFuncPtrAndParams(self, funcName):", "body": "params = Noneif isinstance(funcName, str):if funcName == '':fp = _aggr_sumelif funcName == '':fp = _aggr_firstelif funcName == '':fp = _aggr_lastelif funcName == '':fp = _aggr_meanelif funcName == '':fp = maxelif funcName == '':fp = minelif funcName == '':fp = _aggr_modeelif funcName.startswith(''):fp = _aggr_weighted_meanparamsName = funcName[:]params = [f[] for f in self._inputFields].index(paramsName)else:fp = funcNamereturn (fp, params)", "docstring": "Given the name of an aggregation function, returns the function pointer\n and param.\n\n Parameters:\n ------------------------------------------------------------------------\n funcName: a string (name of function) or funcPtr\n retval: (funcPtr, param)", "id": "f17650:c0:m2"} {"signature": "def _createAggregateRecord(self):", "body": "record = []for i, (fieldIdx, aggFP, paramIdx) in enumerate(self._fields):if aggFP is None: continuevalues = self._slice[i]refIndex = Noneif paramIdx is not None:record.append(aggFP(values, self._slice[paramIdx]))else:record.append(aggFP(values))return record", "docstring": "Generate the aggregated output record\n\n Parameters:\n ------------------------------------------------------------------------\n retval: outputRecord", "id": "f17650:c0:m3"} {"signature": "def isNullAggregation(self):", "body": "return self._nullAggregation", "docstring": "Return True if no aggregation will be performed, either because the\n aggregationInfo was None or all aggregation params within it were 0.", "id": "f17650:c0:m4"} {"signature": "def next(self, record, curInputBookmark):", "body": "outRecord = NoneretInputBookmark = Noneif record is not None:self._inIdx += if self._filter != None and not self._filter[](self._filter[], record):return (None, None)if self._nullAggregation:return (record, curInputBookmark)t = record[self._timeFieldIdx]if self._firstSequenceStartTime == None:self._firstSequenceStartTime = tif self._startTime is None:self._startTime = tif self._endTime is None:self._endTime = self._getEndTime(t)assert self._endTime > tif self._resetFieldIdx is not None:resetSignal = record[self._resetFieldIdx]else:resetSignal = Noneif self._sequenceIdFieldIdx is not None:currSequenceId = record[self._sequenceIdFieldIdx]else:currSequenceId = NonenewSequence = (resetSignal == and self._inIdx > )or self._sequenceId != currSequenceIdor self._inIdx == if newSequence:self._sequenceId = currSequenceIdsliceEnded = (t >= self._endTime or t < self._startTime)if (newSequence or sliceEnded) and len(self._slice) > :for j, f in enumerate(self._fields):index = f[]if index == self._timeFieldIdx:self._slice[j][] = self._startTimebreakoutRecord = self._createAggregateRecord()retInputBookmark = self._aggrInputBookmarkself._slice = defaultdict(list)for j, f in enumerate(self._fields):index = f[]self._slice[j].append(record[index])self._aggrInputBookmark = curInputBookmarkif newSequence:self._startTime = tself._endTime = self._getEndTime(t)if sliceEnded:if t < self._startTime:self._endTime = self._firstSequenceStartTimewhile t >= self._endTime:self._startTime = self._endTimeself._endTime = self._getEndTime(self._endTime)if outRecord is not None:return (outRecord, retInputBookmark)elif self._slice:for j, f in enumerate(self._fields):index = f[]if index == self._timeFieldIdx:self._slice[j][] = self._startTimebreakoutRecord = self._createAggregateRecord()retInputBookmark = self._aggrInputBookmarkself._slice = defaultdict(list)return (outRecord, retInputBookmark)", "docstring": "Return the next aggregated record, if any\n\n Parameters:\n ------------------------------------------------------------------------\n record: The input record (values only) from the input source, or\n None if the input has reached EOF (this will cause this\n method to force completion of and return any partially\n aggregated time period)\n curInputBookmark: The bookmark to the next input record\n retval:\n (outputRecord, inputBookmark)\n\n outputRecord: the aggregated record\n inputBookmark: a bookmark to the last position from the input that\n contributed to this aggregated record.\n\n If we don't have any aggregated records yet, returns (None, None)\n\n\n The caller should generally do a loop like this:\n while True:\n inRecord = reader.getNextRecord()\n bookmark = reader.getBookmark()\n\n (aggRecord, aggBookmark) = aggregator.next(inRecord, bookmark)\n\n # reached EOF?\n if inRecord is None and aggRecord is None:\n break\n\n if aggRecord is not None:\n proessRecord(aggRecord, aggBookmark)\n\n\n This method makes use of the self._slice member variable to build up\n the values we need to aggregate. This is a dict of lists. The keys are\n the field indices and the elements of each list are the values for that\n field. For example:\n\n self._siice = { 0: [42, 53], 1: [4.0, 5.1] }", "id": "f17650:c0:m5"} {"signature": "def _getFieldIndexBySpecial(fields, special):", "body": "for i, field in enumerate(fields):if field.special == special:return ireturn None", "docstring": "Return index of the field matching the field meta special value.\n :param fields: sequence of nupic.data.fieldmeta.FieldMetaInfo objects\n representing the fields of a stream\n :param special: one of the special field attribute values from\n nupic.data.fieldmeta.FieldMetaSpecial\n :returns: first zero-based index of the field tagged with the target field\n meta special attribute; None if no such field", "id": "f17651:m0"} {"signature": "def __init__(self, fields, aggregationPeriod=None):", "body": "if not fields:raise ValueError('' % (fields,))self._fields = fieldsself._aggregationPeriod = aggregationPeriodself._sequenceId = -self._fieldNames = tuple(f.name for f in fields)self._categoryFieldIndex = _getFieldIndexBySpecial(fields,FieldMetaSpecial.category)self._resetFieldIndex = _getFieldIndexBySpecial(fields,FieldMetaSpecial.reset)self._sequenceFieldIndex = _getFieldIndexBySpecial(fields,FieldMetaSpecial.sequence)self._timestampFieldIndex = _getFieldIndexBySpecial(fields,FieldMetaSpecial.timestamp)self._learningFieldIndex = _getFieldIndexBySpecial(fields,FieldMetaSpecial.learning)", "docstring": ":param fields: non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo\n objects corresponding to fields in input rows.\n:param aggregationPeriod: (dict) aggregation period of the record stream \n containing 'months' and 'seconds'. The months is always an integer\n and seconds is a floating point. Only one is allowed to be non-zero at a\n time. If there is no aggregation associated with the stream, pass None.\n Typically, a raw file or hbase stream will NOT have any aggregation info,\n but subclasses of RecordStreamIface, like StreamReader, will and will\n provide the aggregation period. This is used by the encode method to\n assign a record number to a record given its timestamp and the aggregation\n interval.", "id": "f17651:c0:m0"} {"signature": "def rewind(self):", "body": "self._sequenceId = -", "docstring": "Put us back at the beginning of the file again", "id": "f17651:c0:m1"} {"signature": "def encode(self, inputRow):", "body": "result = dict(zip(self._fieldNames, inputRow))if self._categoryFieldIndex is not None:if isinstance(inputRow[self._categoryFieldIndex], int):result[''] = [inputRow[self._categoryFieldIndex]]else:result[''] = (inputRow[self._categoryFieldIndex]if inputRow[self._categoryFieldIndex]else [None])else:result[''] = [None]if self._resetFieldIndex is not None:result[''] = int(bool(inputRow[self._resetFieldIndex]))else:result[''] = if self._learningFieldIndex is not None:result[''] = int(bool(inputRow[self._learningFieldIndex]))result[''] = Noneif self._timestampFieldIndex is not None:result[''] = inputRow[self._timestampFieldIndex]result[''] = self._computeTimestampRecordIdx(inputRow[self._timestampFieldIndex])else:result[''] = NonehasReset = self._resetFieldIndex is not NonehasSequenceId = self._sequenceFieldIndex is not Noneif hasReset and not hasSequenceId:if result['']:self._sequenceId += sequenceId = self._sequenceIdelif not hasReset and hasSequenceId:sequenceId = inputRow[self._sequenceFieldIndex]result[''] = int(sequenceId != self._sequenceId)self._sequenceId = sequenceIdelif hasReset and hasSequenceId:sequenceId = inputRow[self._sequenceFieldIndex]else:sequenceId = if sequenceId is not None:result[''] = hash(sequenceId)else:result[''] = Nonereturn result", "docstring": "Encodes the given input row as a dict, with the\n keys being the field names. This also adds in some meta fields:\n '_category': The value from the category field (if any)\n '_reset': True if the reset field was True (if any)\n '_sequenceId': the value from the sequenceId field (if any)\n\n :param inputRow: sequence of values corresponding to a single input metric\n data row\n :rtype: dict", "id": "f17651:c0:m2"} {"signature": "def _computeTimestampRecordIdx(self, recordTS):", "body": "if self._aggregationPeriod is None:return Noneif self._aggregationPeriod[''] > :assert self._aggregationPeriod[''] == result = int((recordTS.year * + (recordTS.month-)) /self._aggregationPeriod[''])elif self._aggregationPeriod[''] > :delta = recordTS - datetime.datetime(year=, month=, day=)deltaSecs = delta.days * * * + delta.seconds+ delta.microseconds / result = int(deltaSecs / self._aggregationPeriod[''])else:result = Nonereturn result", "docstring": "Give the timestamp of a record (a datetime object), compute the record's\n timestamp index - this is the timestamp divided by the aggregation period.\n\n\n Parameters:\n ------------------------------------------------------------------------\n recordTS: datetime instance\n retval: record timestamp index, or None if no aggregation period", "id": "f17651:c0:m3"} {"signature": "@abstractmethoddef close(self):", "body": "", "docstring": "Close the stream", "id": "f17651:c1:m1"} {"signature": "def rewind(self):", "body": "if self._modelRecordEncoder is not None:self._modelRecordEncoder.rewind()", "docstring": "Put us back at the beginning of the file again.", "id": "f17651:c1:m2"} {"signature": "@abstractmethoddef getNextRecord(self, useCache=True):", "body": "", "docstring": "Returns next available data record from the storage. If ``useCache`` is\n``False``, then don't read ahead and don't cache any records.\n\n:return: a data row (a list or tuple) if available; None, if no more records\n in the table (End of Stream - EOS); empty sequence (list or tuple)\n when timing out while waiting for the next record.", "id": "f17651:c1:m3"} {"signature": "def getNextRecordDict(self):", "body": "values = self.getNextRecord()if values is None:return Noneif not values:return dict()if self._modelRecordEncoder is None:self._modelRecordEncoder = ModelRecordEncoder(fields=self.getFields(),aggregationPeriod=self.getAggregationMonthsAndSeconds())return self._modelRecordEncoder.encode(values)", "docstring": "Returns next available data record from the storage as a dict, with the\n keys being the field names. This also adds in some meta fields:\n\n - ``_category``: The value from the category field (if any)\n - ``_reset``: True if the reset field was True (if any)\n - ``_sequenceId``: the value from the sequenceId field (if any)", "id": "f17651:c1:m4"} {"signature": "def getAggregationMonthsAndSeconds(self):", "body": "return None", "docstring": "Returns the aggregation period of the record stream as a dict\ncontaining 'months' and 'seconds'. The months is always an integer and\nseconds is a floating point. Only one is allowed to be non-zero.\n\nIf there is no aggregation associated with the stream, returns None.\n\nTypically, a raw file or hbase stream will NOT have any aggregation info,\nbut subclasses of :class:`~nupic.data.record_stream.RecordStreamIface`, like\n:class:`~nupic.data.stream_reader.StreamReader`, will and will return the\naggregation period from this call. This call is used by\n:meth:`getNextRecordDict` to assign a record number to a record given its\ntimestamp and the aggregation interval.\n\n:returns: ``None``", "id": "f17651:c1:m5"} {"signature": "@abstractmethoddef getNextRecordIdx(self):", "body": "", "docstring": ":returns: (int) index of the record that will be read next from\n :meth:`getNextRecord`", "id": "f17651:c1:m6"} {"signature": "@abstractmethoddef appendRecord(self, record):", "body": "", "docstring": "Saves the record in the underlying storage. Should be implemented in\nsubclasses.\n\n:param record: (object) to store", "id": "f17651:c1:m7"} {"signature": "@abstractmethoddef appendRecords(self, records, progressCB=None):", "body": "", "docstring": "Saves multiple records in the underlying storage. Should be implemented in\nsubclasses.\n\n:param records: (list) of objects to store\n:param progressCB: (func) called after each appension", "id": "f17651:c1:m8"} {"signature": "@abstractmethoddef getBookmark(self):", "body": "", "docstring": "Returns an anchor to the current position in the data. Passing this\n anchor to the constructor makes the current position to be the first\n returned record. If record is no longer in the storage, the first available\n after it will be returned.\n\n :returns: anchor to current position in the data.", "id": "f17651:c1:m9"} {"signature": "@abstractmethoddef recordsExistAfter(self, bookmark):", "body": "", "docstring": ":param bookmark: (int) where to start\n:returns: True if there are records left after the bookmark.", "id": "f17651:c1:m10"} {"signature": "@abstractmethoddef seekFromEnd(self, numRecords):", "body": "", "docstring": ":param numRecords: (int) number of records from the end.\n:returns: (int) a bookmark numRecords from the end of the stream.", "id": "f17651:c1:m11"} {"signature": "@abstractmethoddef getStats(self):", "body": "", "docstring": ":returns: storage stats (like min and max values of the fields).", "id": "f17651:c1:m12"} {"signature": "def getFieldMin(self, fieldName):", "body": "stats = self.getStats()if stats == None:return NoneminValues = stats.get('', None)if minValues == None:return Noneindex = self.getFieldNames().index(fieldName)return minValues[index]", "docstring": "If underlying implementation does not support min/max stats collection,\nor if a field type does not support min/max (non scalars), the return\nvalue will be None.\n\n:param fieldName: (string) name of field to get min\n:returns: current minimum value for the field ``fieldName``.", "id": "f17651:c1:m13"} {"signature": "def getFieldMax(self, fieldName):", "body": "stats = self.getStats()if stats == None:return NonemaxValues = stats.get('', None)if maxValues == None:return Noneindex = self.getFieldNames().index(fieldName)return maxValues[index]", "docstring": "If underlying implementation does not support min/max stats collection,\nor if a field type does not support min/max (non scalars), the return\nvalue will be None.\n\n:param fieldName: (string) name of field to get max\n:returns: current maximum value for the field ``fieldName``.", "id": "f17651:c1:m14"} {"signature": "@abstractmethoddef clearStats(self):", "body": "", "docstring": "Resets stats collected so far.", "id": "f17651:c1:m15"} {"signature": "@abstractmethoddef getError(self):", "body": "", "docstring": ":returns: errors saved in the storage.", "id": "f17651:c1:m16"} {"signature": "@abstractmethoddef setError(self, error):", "body": "", "docstring": "Saves specified error in the storage.\n\n:param error: Error to store.", "id": "f17651:c1:m17"} {"signature": "@abstractmethoddef isCompleted(self):", "body": "", "docstring": ":returns: True if all records are already in the storage or False\n if more records is expected.", "id": "f17651:c1:m18"} {"signature": "@abstractmethoddef setCompleted(self, completed):", "body": "", "docstring": "Marks the stream completed.\n\n:param completed: (bool) is completed?", "id": "f17651:c1:m19"} {"signature": "@abstractmethoddef getFieldNames(self):", "body": "", "docstring": ":returns: (list) of field names associated with the data.", "id": "f17651:c1:m20"} {"signature": "@abstractmethoddef getFields(self):", "body": "", "docstring": ":returns: (list) of :class:`nupic.data.fieldmeta.FieldMetaInfo` objects for\n each field in the stream. Might be None, if that information is provided\n externally (through the `Stream Definition `_, \n for example).", "id": "f17651:c1:m21"} {"signature": "def getResetFieldIdx(self):", "body": "return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.reset)", "docstring": ":returns: (int) index of the ``reset`` field; ``None`` if no such field.", "id": "f17651:c1:m22"} {"signature": "def getTimestampFieldIdx(self):", "body": "return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.timestamp)", "docstring": ":returns: (int) index of the ``timestamp`` field.", "id": "f17651:c1:m23"} {"signature": "def getSequenceIdFieldIdx(self):", "body": "return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.sequence)", "docstring": ":returns: (int) index of the ``sequenceId`` field.", "id": "f17651:c1:m24"} {"signature": "def getCategoryFieldIdx(self):", "body": "return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.category)", "docstring": ":returns: (int) index of ``category`` field", "id": "f17651:c1:m25"} {"signature": "def getLearningFieldIdx(self):", "body": "return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.learning)", "docstring": ":returns: (int) index of the ``learning`` field.", "id": "f17651:c1:m26"} {"signature": "@abstractmethoddef setTimeout(self, timeout):", "body": "", "docstring": "Set the read timeout in seconds\n\n:param timeout: (int or floating point)", "id": "f17651:c1:m27"} {"signature": "@abstractmethoddef flush(self):", "body": "", "docstring": "Flush the file to disk", "id": "f17651:c1:m28"} {"signature": "def generateStats(filename, statsInfo, maxSamples = None, filters=[], cache=True):", "body": "if not isinstance(statsInfo, dict):raise RuntimeError(\"\"\"\" % type(statsInfo))filename = resource_filename(\"\", filename)if cache:statsFilename = getStatsFilename(filename, statsInfo, filters)if os.path.exists(statsFilename):try:r = pickle.load(open(statsFilename, \"\"))except:print(\"\"\"\" % filename)r = dict()requestedKeys = set([s for s in statsInfo])availableKeys = set(r.keys())unavailableKeys = requestedKeys.difference(availableKeys)if len(unavailableKeys ) == :return relse:print(\"\"\"\" %(filename, str(unavailableKeys)))os.remove(filename)print(\"\" % (filename, filters))sensor = RecordSensor()sensor.dataSource = FileRecordStream(filename)sensor.preEncodingFilters = filtersstats = []for field in statsInfo:if statsInfo[field] == \"\":statsInfo[field] = NumberStatsCollector()elif statsInfo[field] == \"\":statsInfo[field] = CategoryStatsCollector()else:raise RuntimeError(\"\" % (statsInfo[field], field))if maxSamples is None:maxSamples = for i in range(maxSamples):try:record = sensor.getNextRecord()except StopIteration:breakfor (name, collector) in list(statsInfo.items()):collector.add(record[name])del sensorr = dict()for (field, collector) in list(statsInfo.items()):stats = collector.getStats()if field not in r:r[field] = statselse:r[field].update(stats)if cache:f = open(statsFilename, \"\")pickle.dump(r, f)f.close()r[\"\"] = statsFilenamereturn r", "docstring": "Generate requested statistics for a dataset and cache to a file.\n If filename is None, then don't cache to a file", "id": "f17652:m1"} {"signature": "def generateStats(filename, maxSamples = None,):", "body": "statsCollectorMapping = {'': FloatStatsCollector,'': IntStatsCollector,'': StringStatsCollector,'': DateTimeStatsCollector,'': BoolStatsCollector,}filename = resource_filename(\"\", filename)print(\"\"*)print(\"\" % (filename,))dataFile = FileRecordStream(filename)statsCollectors = []for fieldName, fieldType, fieldSpecial in dataFile.getFields():statsCollector =statsCollectorMapping[fieldType](fieldName, fieldType, fieldSpecial)statsCollectors.append(statsCollector)if maxSamples is None:maxSamples = for i in range(maxSamples):record = dataFile.getNextRecord()if record is None:breakfor i, value in enumerate(record):statsCollectors[i].addValue(value)stats = {}for statsCollector in statsCollectors:statsCollector.getStats(stats)if dataFile.getResetFieldIdx() is not None:resetFieldName,_,_ = dataFile.getFields()[dataFile.reset]stats.pop(resetFieldName)if VERBOSITY > :pprint.pprint(stats)return stats", "docstring": "Collect statistics for each of the fields in the user input data file and\nreturn a stats dict object.\n\nParameters:\n------------------------------------------------------------------------------\nfilename: The path and name of the data file.\nmaxSamples: Upper bound on the number of rows to be processed\nretval: A dictionary of dictionaries. The top level keys are the\n field names and the corresponding values are the statistics\n collected for the individual file.\n Example:\n {\n 'consumption':{'min':0,'max':90,'mean':50,...},\n 'gym':{'numDistinctCategories':10,...},\n ...\n }", "id": "f17653:m0"} {"signature": "def getStats(self, stats):", "body": "BaseStatsCollector.getStats(self, stats)sortedNumberList = sorted(self.valueList)listLength = len(sortedNumberList)min = sortedNumberList[]max = sortedNumberList[-]mean = numpy.mean(self.valueList)median = sortedNumberList[int(*listLength)]percentile1st = sortedNumberList[int(*listLength)]percentile99th = sortedNumberList[int(*listLength)]differenceList =[(cur - prev) for prev, cur in zip(list(self.valueSet)[:-],list(self.valueSet)[:])]if min > max:print(self.fieldname, min, max, '')meanResolution = numpy.mean(differenceList)stats[self.fieldname][''] = minstats[self.fieldname][''] = maxstats[self.fieldname][''] = meanstats[self.fieldname][''] = medianstats[self.fieldname][''] = percentile1ststats[self.fieldname][''] = percentile99thstats[self.fieldname][''] = meanResolutionpassData = Trueif passData:stats[self.fieldname][''] = self.valueListif VERBOSITY > :print('')print(\"\")print(\"\", min)print(\"\", max)print(\"\", mean)print(\"\", median)print(\"\", percentile1st)print(\"\", percentile99th)print('')print(\"\")print(\"\", meanResolution)if VERBOSITY > :print('')print(\"\")counts, bins = numpy.histogram(self.valueList, new=True)print(\"\", counts.tolist())print(\"\", bins.tolist())", "docstring": "Override of getStats() in BaseStatsCollector\n\n stats: A dictionary where all the stats are\n outputted", "id": "f17653:c2:m0"} {"signature": "def __init__(self, filterDict):", "body": "self.filterDict = filterDict", "docstring": "TODO describe filterDict schema", "id": "f17655:c0:m0"} {"signature": "def match(self, record):", "body": "for field, meta in self.filterDict.iteritems():index = meta['']categories = meta['']for category in categories:if not record:continueif record[index].find(category) != -:''''''return Truereturn False", "docstring": "Returns True if the record matches any of the provided filters", "id": "f17655:c0:m1"} {"signature": "@staticmethoddef createFromFileFieldElement(fieldInfoTuple):", "body": "return FieldMetaInfo._make(fieldInfoTuple)", "docstring": "Creates a :class:`.field_meta.FieldMetaInfo` instance from a tuple containing\n``name``, ``type``, and ``special``.\n\n:param fieldInfoTuple: Must contain ``name``, ``type``, and ``special``\n:return: (:class:`~.field_meta.FieldMetaInfo`) instance", "id": "f17656:c0:m1"} {"signature": "@classmethoddef createListFromFileFieldList(cls, fields):", "body": "return [cls.createFromFileFieldElement(f) for f in fields]", "docstring": "Creates a FieldMetaInfo list from the a list of tuples. Basically runs\n:meth:`~.field_meta.FieldMetaInfo.createFromFileFieldElement` on each tuple.\n\n*Example:*\n\n.. code-block:: python\n\n # Create a list of FieldMetaInfo instances from a list of File meta-data\n # tuples\n el = [(\"pounds\", FieldMetaType.float, FieldMetaSpecial.none),\n (\"price\", FieldMetaType.float, FieldMetaSpecial.none),\n (\"id\", FieldMetaType.string, FieldMetaSpecial.sequence),\n (\"date\", FieldMetaType.datetime, FieldMetaSpecial.timestamp),\n ]\n ml = FieldMetaInfo.createListFromFileFieldList(el)\n\n:param fields: a sequence of field attribute tuples conforming to the format\n of ``name``, ``type``, and ``special``\n\n:return: A list of :class:`~.field_meta.FieldMetaInfo` elements corresponding\n to the given 'fields' list.", "id": "f17656:c0:m2"} {"signature": "@classmethoddef isValid(cls, fieldDataType):", "body": "return fieldDataType in cls._ALL", "docstring": "Check a candidate value whether it's one of the valid field data types\n\n :param fieldDataType: (string) candidate field data type\n :returns: True if the candidate value is a legitimate field data type value;\n False if not", "id": "f17656:c1:m0"} {"signature": "@classmethoddef isValid(cls, attr):", "body": "return attr in cls._ALL", "docstring": "Check a candidate value whether it's one of the valid attributes\n\n :param attr: (string) candidate value\n :returns: True if the candidate value is a legitimate \"special\" field\n attribute; False if not", "id": "f17656:c2:m0"} {"signature": "def parseTimestamp(s):", "body": "s = s.strip()for pattern in DATETIME_FORMATS:try:return datetime.datetime.strptime(s, pattern)except ValueError:passraise ValueError('''' % (s, ''.join(DATETIME_FORMATS)))", "docstring": "Parses a textual datetime format and return a Python datetime object.\n\nThe supported format is: ``yyyy-mm-dd h:m:s.ms``\n\nThe time component is optional.\n\n- hours are 00..23 (no AM/PM)\n- minutes are 00..59\n- seconds are 00..59\n- micro-seconds are 000000..999999\n\n:param s: (string) input time text\n:return: (datetime.datetime)", "id": "f17657:m0"} {"signature": "def serializeTimestamp(t):", "body": "return t.strftime(DATETIME_FORMATS[])", "docstring": "Turns a datetime object into a string.\n\n:param t: (datetime.datetime)\n:return: (string) in default format (see \n :const:`~nupic.data.utils.DATETIME_FORMATS` [0])", "id": "f17657:m1"} {"signature": "def serializeTimestampNoMS(t):", "body": "return t.strftime(DATETIME_FORMATS[])", "docstring": "Turns a datetime object into a string ignoring milliseconds.\n\n:param t: (datetime.datetime)\n:return: (string) in default format (see \n :const:`~nupic.data.utils.DATETIME_FORMATS` [2])", "id": "f17657:m2"} {"signature": "def parseBool(s):", "body": "l = s.lower()if l in (\"\", \"\", \"\"):return Trueif l in (\"\", \"\", \"\"):return Falseraise Exception(\"\" % s)", "docstring": "String to boolean\n\n:param s: (string)\n:return: (bool)", "id": "f17657:m3"} {"signature": "def floatOrNone(f):", "body": "if f == '':return Nonereturn float(f)", "docstring": "Tries to convert input to a float input or returns ``None``.\n\n:param f: (object) thing to convert to a float\n:return: (float or ``None``)", "id": "f17657:m4"} {"signature": "def intOrNone(i):", "body": "if i.strip() == '' or i.strip() == '':return Nonereturn int(i)", "docstring": "Tries to convert input to a int input or returns ``None``.\n\n:param f: (object) thing to convert to a int\n:return: (int or ``None``)", "id": "f17657:m5"} {"signature": "def escape(s):", "body": "if s is None:return ''assert isinstance(s, basestring),\"\" % (basestring, type(s), s)s = s.replace('', '')s = s.replace('', '')s = s.replace('', '')s = s.replace('', '')return s", "docstring": "Escape commas, tabs, newlines and dashes in a string\n\nCommas are encoded as tabs.\n\n:param s: (string) to escape\n:returns: (string) escaped string", "id": "f17657:m6"} {"signature": "def unescape(s):", "body": "assert isinstance(s, basestring)s = s.replace('', '')s = s.replace('', '')s = s.replace('', '')s = s.replace('', '')return s", "docstring": "Unescapes a string that may contain commas, tabs, newlines and dashes\n\nCommas are decoded from tabs.\n\n:param s: (string) to unescape\n:returns: (string) unescaped string", "id": "f17657:m7"} {"signature": "def parseSdr(s):", "body": "assert isinstance(s, basestring)sdr = [int(c) for c in s if c in (\"\", \"\")]if len(sdr) != len(s):raise ValueError(\"\"\"\")return sdr", "docstring": "Parses a string containing only 0's and 1's and return a Python list object.\n\n:param s: (string) string to parse\n:returns: (list) SDR out", "id": "f17657:m8"} {"signature": "def serializeSdr(sdr):", "body": "return \"\".join(str(bit) for bit in sdr)", "docstring": "Serialize Python list object containing only 0's and 1's to string.\n\n:param sdr: (list) binary\n:returns: (string) SDR out", "id": "f17657:m9"} {"signature": "def parseStringList(s):", "body": "assert isinstance(s, basestring)return [int(i) for i in s.split()]", "docstring": "Parse a string of space-separated numbers, returning a Python list.\n\n:param s: (string) to parse\n:returns: (list) binary SDR", "id": "f17657:m10"} {"signature": "def stripList(listObj):", "body": "return \"\".join(str(i) for i in listObj)", "docstring": "Convert a list of numbers to a string of space-separated values.\n\n:param listObj: (list) to convert\n:returns: (string) of space-separated values", "id": "f17657:m11"} {"signature": "def __init__(self,n,w,num=,seed=):", "body": "self._n = nself._w = wself._num = numself._random = Random(seed)self._patterns = dict()self._generate()", "docstring": "@param n (int) Number of available bits in pattern\n@param w (int/list) Number of on bits in pattern\n If list, each pattern will have a `w` randomly\n selected from the list.\n@param num (int) Number of available patterns", "id": "f17658:c0:m0"} {"signature": "def get(self, number):", "body": "if not number in self._patterns:raise IndexError(\"\")return self._patterns[number]", "docstring": "Return a pattern for a number.\n\n@param number (int) Number of pattern\n\n@return (set) Indices of on bits", "id": "f17658:c0:m1"} {"signature": "def addNoise(self, bits, amount):", "body": "newBits = set()for bit in bits:if self._random.getReal64() < amount:newBits.add(self._random.getUInt32(self._n))else:newBits.add(bit)return newBits", "docstring": "Add noise to pattern.\n\n@param bits (set) Indices of on bits\n@param amount (float) Probability of switching an on bit with a random bit\n\n@return (set) Indices of on bits in noisy pattern", "id": "f17658:c0:m2"} {"signature": "def numbersForBit(self, bit):", "body": "if bit >= self._n:raise IndexError(\"\")numbers = set()for index, pattern in self._patterns.items():if bit in pattern:numbers.add(index)return numbers", "docstring": "Return the set of pattern numbers that match a bit.\n\n@param bit (int) Index of bit\n\n@return (set) Indices of numbers", "id": "f17658:c0:m3"} {"signature": "def numberMapForBits(self, bits):", "body": "numberMap = dict()for bit in bits:numbers = self.numbersForBit(bit)for number in numbers:if not number in numberMap:numberMap[number] = set()numberMap[number].add(bit)return numberMap", "docstring": "Return a map from number to matching on bits,\nfor all numbers that match a set of bits.\n\n@param bits (set) Indices of bits\n\n@return (dict) Mapping from number => on bits.", "id": "f17658:c0:m4"} {"signature": "def prettyPrintPattern(self, bits, verbosity=):", "body": "numberMap = self.numberMapForBits(bits)text = \"\"numberList = []numberItems = sorted(iter(numberMap.items()),key=lambda number_bits: len(number_bits[]),reverse=True)for number, bits in numberItems:if verbosity > :strBits = [str(n) for n in bits]numberText = \"\".format(number, \"\".join(strBits))elif verbosity > :numberText = \"\".format(number, len(bits))else:numberText = str(number)numberList.append(numberText)text += \"\".format(\"\".join(numberList))return text", "docstring": "Pretty print a pattern.\n\n@param bits (set) Indices of on bits\n@param verbosity (int) Verbosity level\n\n@return (string) Pretty-printed text", "id": "f17658:c0:m5"} {"signature": "def _generate(self):", "body": "candidates = np.array(list(range(self._n)), np.uint32)for i in range(self._num):self._random.shuffle(candidates)pattern = candidates[:self._getW()]self._patterns[i] = set(pattern)", "docstring": "Generates set of random patterns.", "id": "f17658:c0:m6"} {"signature": "def _getW(self):", "body": "w = self._wif type(w) is list:return w[self._random.getUInt32(len(w))]else:return w", "docstring": "Gets a value of `w` for use in generating a pattern.", "id": "f17658:c0:m7"} {"signature": "def _generate(self):", "body": "n = self._nw = self._wassert type(w) is int, \"\"for i in range(n / w):pattern = set(range(i * w, (i+) * w))self._patterns[i] = pattern", "docstring": "Generates set of consecutive patterns.", "id": "f17658:c1:m0"} {"signature": "def __init__(self,patternMachine,seed=):", "body": "self.patternMachine = patternMachineself._random = Random(seed)", "docstring": "@param patternMachine (PatternMachine) Pattern machine instance", "id": "f17659:c0:m0"} {"signature": "def generateFromNumbers(self, numbers):", "body": "sequence = []for number in numbers:if number == None:sequence.append(number)else:pattern = self.patternMachine.get(number)sequence.append(pattern)return sequence", "docstring": "Generate a sequence from a list of numbers.\n\nNote: Any `None` in the list of numbers is considered a reset.\n\n@param numbers (list) List of numbers\n\n@return (list) Generated sequence", "id": "f17659:c0:m1"} {"signature": "def addSpatialNoise(self, sequence, amount):", "body": "newSequence = []for pattern in sequence:if pattern is not None:pattern = self.patternMachine.addNoise(pattern, amount)newSequence.append(pattern)return newSequence", "docstring": "Add spatial noise to each pattern in the sequence.\n\n@param sequence (list) Sequence\n@param amount (float) Amount of spatial noise\n\n@return (list) Sequence with spatial noise", "id": "f17659:c0:m2"} {"signature": "def prettyPrintSequence(self, sequence, verbosity=):", "body": "text = \"\"for i in xrange(len(sequence)):pattern = sequence[i]if pattern == None:text += \"\"if i < len(sequence) - :text += \"\"else:text += self.patternMachine.prettyPrintPattern(pattern,verbosity=verbosity)return text", "docstring": "Pretty print a sequence.\n\n@param sequence (list) Sequence\n@param verbosity (int) Verbosity level\n\n@return (string) Pretty-printed text", "id": "f17659:c0:m3"} {"signature": "def generateNumbers(self, numSequences, sequenceLength, sharedRange=None):", "body": "numbers = []if sharedRange:sharedStart, sharedEnd = sharedRangesharedLength = sharedEnd - sharedStartsharedNumbers = range(numSequences * sequenceLength,numSequences * sequenceLength + sharedLength)for i in xrange(numSequences):start = i * sequenceLengthnewNumbers = np.array(range(start, start + sequenceLength), np.uint32)self._random.shuffle(newNumbers)newNumbers = list(newNumbers)if sharedRange is not None:newNumbers[sharedStart:sharedEnd] = sharedNumbersnumbers += newNumbersnumbers.append(None)return numbers", "docstring": "@param numSequences (int) Number of sequences to return,\n separated by None\n@param sequenceLength (int) Length of each sequence\n@param sharedRange (tuple) (start index, end index) indicating range of\n shared subsequence in each sequence\n (None if no shared subsequences)\n@return (list) Numbers representing sequences", "id": "f17659:c0:m4"} {"signature": "def getDescription(self):", "body": "description = {'':self.name, '':[f.name for f in self.fields],'':[f.numRecords for f in self.fields]}return description", "docstring": "Returns a description of the dataset", "id": "f17660:c0:m1"} {"signature": "def setSeed(self, seed):", "body": "rand.seed(seed)np.random.seed(seed)", "docstring": "Set the random seed and the numpy seed\n Parameters:\n --------------------------------------------------------------------\n seed: random seed", "id": "f17660:c0:m2"} {"signature": "def addField(self, name, fieldParams, encoderParams):", "body": "assert fieldParams is not None and'' in fieldParamsdataClassName = fieldParams.pop('')try:dataClass=eval(dataClassName)(fieldParams)except TypeError as e:print((\"\"\"\"\"\" % (dataClass, fieldParams)))raiseencoderParams['']=dataClassencoderParams['']=dataClassNamefieldIndex = self.defineField(name, encoderParams)", "docstring": "Add a single field to the dataset.\n Parameters:\n -------------------------------------------------------------------\n name: The user-specified name of the field\n fieldSpec: A list of one or more dictionaries specifying parameters\n to be used for dataClass initialization. Each dict must\n contain the key 'type' that specifies a distribution for\n the values in this field\n encoderParams: Parameters for the field encoder", "id": "f17660:c0:m3"} {"signature": "def addMultipleFields(self, fieldsInfo):", "body": "assert all(x in field for x in ['', '', ''] for fieldin fieldsInfo)for spec in fieldsInfo:self.addField(spec.pop(''), spec.pop(''), spec.pop(''))", "docstring": "Add multiple fields to the dataset.\n Parameters:\n -------------------------------------------------------------------\n fieldsInfo: A list of dictionaries, containing a field name, specs for\n the data classes and encoder params for the corresponding\n field.", "id": "f17660:c0:m4"} {"signature": "def defineField(self, name, encoderParams=None):", "body": "self.fields.append(_field(name, encoderParams))return len(self.fields)-", "docstring": "Initialize field using relevant encoder parameters.\n Parameters:\n -------------------------------------------------------------------\n name: Field name\n encoderParams: Parameters for the encoder.\n\n Returns the index of the field", "id": "f17660:c0:m5"} {"signature": "def setFlag(self, index, flag):", "body": "assert len(self.fields)>indexself.fields[index].flag=flag", "docstring": "Set flag for field at index. Flags are special characters such as 'S' for\n sequence or 'T' for timestamp.\n Parameters:\n --------------------------------------------------------------------\n index: index of field whose flag is being set\n flag: special character", "id": "f17660:c0:m6"} {"signature": "def generateRecord(self, record):", "body": "assert(len(record)==len(self.fields))if record is not None:for x in range(len(self.fields)):self.fields[x].addValue(record[x])else:for field in self.fields:field.addValue(field.dataClass.getNext())", "docstring": "Generate a record. Each value is stored in its respective field.\n Parameters:\n --------------------------------------------------------------------\n record: A 1-D array containing as many values as the number of fields\n fields: An object of the class field that specifies the characteristics\n of each value in the record\n Assertion:\n --------------------------------------------------------------------\n len(record)==len(fields): A value for each field must be specified.\n Replace missing values of any type by\n SENTINEL_VALUE_FOR_MISSING_DATA\n\n This method supports external classes but not combination of classes.", "id": "f17660:c0:m7"} {"signature": "def generateRecords(self, records):", "body": "if self.verbosity>: print('', len(records), '')for record in records:self.generateRecord(record)", "docstring": "Generate multiple records. Refer to definition for generateRecord", "id": "f17660:c0:m8"} {"signature": "def getRecord(self, n=None):", "body": "if n is None:assert len(self.fields)>n = self.fields[].numRecords-assert (all(field.numRecords>n for field in self.fields))record = [field.values[n] for field in self.fields]return record", "docstring": "Returns the nth record", "id": "f17660:c0:m9"} {"signature": "def getAllRecords(self):", "body": "values=[]numRecords = self.fields[].numRecordsassert (all(field.numRecords==numRecords for field in self.fields))for x in range(numRecords):values.append(self.getRecord(x))return values", "docstring": "Returns all the records", "id": "f17660:c0:m10"} {"signature": "def encodeRecord(self, record, toBeAdded=True):", "body": "encoding=[self.fields[i].encodeValue(record[i], toBeAdded) for i inrange(len(self.fields))]return encoding", "docstring": "Encode a record as a sparse distributed representation\n Parameters:\n --------------------------------------------------------------------\n record: Record to be encoded\n toBeAdded: Whether the encodings corresponding to the record are added to\n the corresponding fields", "id": "f17660:c0:m11"} {"signature": "def encodeAllRecords(self, records=None, toBeAdded=True):", "body": "if records is None:records = self.getAllRecords()if self.verbosity>: print('', len(records), '')encodings = [self.encodeRecord(record, toBeAdded) for record in records]return encodings", "docstring": "Encodes a list of records.\n Parameters:\n --------------------------------------------------------------------\n records: One or more records. (i,j)th element of this 2D array\n specifies the value at field j of record i.\n If unspecified, records previously generated and stored are\n used.\n toBeAdded: Whether the encodings corresponding to the record are added to\n the corresponding fields", "id": "f17660:c0:m12"} {"signature": "def addValueToField(self, i, value=None):", "body": "assert(len(self.fields)>i)if value is None:value = self.fields[i].dataClass.getNext()self.fields[i].addValue(value)return valueelse: self.fields[i].addValue(value)", "docstring": "Add 'value' to the field i.\n Parameters:\n --------------------------------------------------------------------\n value: value to be added\n i: value is added to field i", "id": "f17660:c0:m13"} {"signature": "def addValuesToField(self, i, numValues):", "body": "assert(len(self.fields)>i)values = [self.addValueToField(i) for n in range(numValues)]return values", "docstring": "Add values to the field i.", "id": "f17660:c0:m14"} {"signature": "def getSDRforValue(self, i, j):", "body": "assert len(self.fields)>iassert self.fields[i].numRecords>jencoding = self.fields[i].encodings[j]return encoding", "docstring": "Returns the sdr for jth value at column i", "id": "f17660:c0:m15"} {"signature": "def getZeroedOutEncoding(self, n):", "body": "assert all(field.numRecords>n for field in self.fields)encoding = np.concatenate([field.encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)if field.isPredictedField else field.encodings[n] for field in self.fields])return encoding", "docstring": "Returns the nth encoding with the predictedField zeroed out", "id": "f17660:c0:m16"} {"signature": "def getTotaln(self):", "body": "n = sum([field.n for field in self.fields])return n", "docstring": "Returns the cumulative n for all the fields in the dataset", "id": "f17660:c0:m17"} {"signature": "def getTotalw(self):", "body": "w = sum([field.w for field in self.fields])return w", "docstring": "Returns the cumulative w for all the fields in the dataset", "id": "f17660:c0:m18"} {"signature": "def getEncoding(self, n):", "body": "assert (all(field.numEncodings>n for field in self.fields))encoding = np.concatenate([field.encodings[n] for field in self.fields])return encoding", "docstring": "Returns the nth encoding", "id": "f17660:c0:m19"} {"signature": "def getAllEncodings(self):", "body": "numEncodings=self.fields[].numEncodingsassert (all(field.numEncodings==numEncodings for field in self.fields))encodings = [self.getEncoding(index) for index in range(numEncodings)]return encodings", "docstring": "Returns encodings for all the records", "id": "f17660:c0:m20"} {"signature": "def getAllFieldNames(self):", "body": "names = [field.name for field in self.fields]return names", "docstring": "Returns all field names", "id": "f17660:c0:m21"} {"signature": "def getAllFlags(self):", "body": "flags = [field.flag for field in self.fields]return flags", "docstring": "Returns flags for all fields", "id": "f17660:c0:m22"} {"signature": "def getAllDataTypes(self):", "body": "dataTypes = [field.dataType for field in self.fields]return dataTypes", "docstring": "Returns data types for all fields", "id": "f17660:c0:m23"} {"signature": "def getFieldDescriptions(self):", "body": "descriptions = [field.getDescription() for field in self.fields]return descriptions", "docstring": "Returns descriptions for all fields", "id": "f17660:c0:m24"} {"signature": "def saveRecords(self, path=''):", "body": "numRecords = self.fields[].numRecordsassert (all(field.numRecords==numRecords for field in self.fields))import csvwith open(path+'', '') as f:writer = csv.writer(f)writer.writerow(self.getAllFieldNames())writer.writerow(self.getAllDataTypes())writer.writerow(self.getAllFlags())writer.writerows(self.getAllRecords())if self.verbosity>:print('', numRecords,'',path,'')", "docstring": "Export all the records into a csv file in numenta format.\n\n Example header format:\n fieldName1 fieldName2 fieldName3\n date string float\n T S\n\n Parameters:\n --------------------------------------------------------------------\n path: Relative path of the file to which the records are to be exported", "id": "f17660:c0:m25"} {"signature": "def removeAllRecords(self):", "body": "for field in self.fields:field.encodings, field.values=[], []field.numRecords, field.numEncodings= (, )", "docstring": "Deletes all the values in the dataset", "id": "f17660:c0:m26"} {"signature": "def __init__(self, name, encoderSpec):", "body": "self.name=nameself.n, self.w = (, )self.encoderType,self.dataType,self.dataClassName = (None, None, None)self.flag=''self.isPredictedField=Falseif encoderSpec is not None:if '' in encoderSpec: self.n = encoderSpec.pop('')if '' in encoderSpec: self.w = encoderSpec.pop('')if '' in encoderSpec: self.flag = encoderSpec.pop('')if '' in encoderSpec: self.isPredictedField= encoderSpec.pop('')if '' in encoderSpec: self.dataClass= encoderSpec.pop('')if '' in encoderSpec: self.dataClassName= encoderSpec.pop('')if '' in encoderSpec: self.dataType = encoderSpec.pop('')if '' in encoderSpec: self.encoderType= encoderSpec.pop('')if self.dataType is None and self.encoderType is None:raise RuntimeError('')assert(self.dataType is not None or self.encoderType is not None)if self.dataType is None or self.encoderType is None:self._setTypes(encoderSpec)self._initializeEncoders(encoderSpec)self.encodings=[]self.values=[]self.numRecords=self.numEncodings=", "docstring": "Initialize a field with various parameters such as n, w, flag, dataType,\n encoderType, and tag predicted field.", "id": "f17660:c1:m0"} {"signature": "def addValues(self, values):", "body": "for v in values:self.addValue(v)", "docstring": "Add values to the field", "id": "f17660:c1:m2"} {"signature": "def addValue(self, value):", "body": "self.values.append(value)self.numRecords+=", "docstring": "Add value to the field", "id": "f17660:c1:m3"} {"signature": "def encodeValue(self, value, toBeAdded=True):", "body": "encodedValue = np.array(self.encoder.encode(value), dtype=realDType)if toBeAdded:self.encodings.append(encodedValue)self.numEncodings+=return encodedValue", "docstring": "Value is encoded as a sdr using the encoding parameters of the Field", "id": "f17660:c1:m4"} {"signature": "def _setTypes(self, encoderSpec):", "body": "if self.encoderType is None:if self.dataType in ['','']:self.encoderType=''elif self.dataType=='':self.encoderType=''elif self.dataType in ['', '']:self.encoderType=''if self.dataType is None:if self.encoderType in ['','']:self.dataType=''elif self.encoderType in ['', '']:self.dataType=''elif self.encoderType in ['', '']:self.dataType=''", "docstring": "Set up the dataTypes and initialize encoders", "id": "f17660:c1:m5"} {"signature": "def _initializeEncoders(self, encoderSpec):", "body": "if self.encoderType in ['', '']:if '' in encoderSpec:self.minval = encoderSpec.pop('')else: self.minval=Noneif '' in encoderSpec:self.maxval = encoderSpec.pop('')else: self.maxval = Noneself.encoder=adaptive_scalar.AdaptiveScalarEncoder(name='',w=self.w, n=self.n, minval=self.minval, maxval=self.maxval, periodic=False, forced=True)elif self.encoderType=='':self.encoder=sdr_category.SDRCategoryEncoder(name='',w=self.w, n=self.n)elif self.encoderType in ['', '']:self.encoder=date.DateEncoder(name='')else:raise RuntimeError('''')", "docstring": "Initialize the encoders", "id": "f17660:c1:m6"} {"signature": "def add(reader, writer, column, start, stop, value):", "body": "for i, row in enumerate(reader):if i >= start and i <= stop:row[column] = type(value)(row[column]) + valuewriter.appendRecord(row)", "docstring": "Adds a value over a range of rows.\n\n Args:\n reader: A FileRecordStream object with input data.\n writer: A FileRecordStream object to write output data to.\n column: The column of data to modify.\n start: The first row in the range to modify.\n end: The last row in the range to modify.\n value: The value to add.", "id": "f17661:m0"} {"signature": "def scale(reader, writer, column, start, stop, multiple):", "body": "for i, row in enumerate(reader):if i >= start and i <= stop:row[column] = type(multiple)(row[column]) * multiplewriter.appendRecord(row)", "docstring": "Multiplies a value over a range of rows.\n\n Args:\n reader: A FileRecordStream object with input data.\n writer: A FileRecordStream object to write output data to.\n column: The column of data to modify.\n start: The first row in the range to modify.\n end: The last row in the range to modify.\n multiple: The value to scale/multiply by.", "id": "f17661:m1"} {"signature": "def copy(reader, writer, start, stop, insertLocation=None, tsCol=None):", "body": "assert stop >= startstartRows = []copyRows = []ts = Noneinc = Noneif tsCol is None:tsCol = reader.getTimestampFieldIdx()for i, row in enumerate(reader):if ts is None:ts = row[tsCol]elif inc is None:inc = row[tsCol] - tsif i >= start and i <= stop:copyRows.append(row)startRows.append(row)if insertLocation is None:insertLocation = stop + startRows[insertLocation:insertLocation] = copyRowsfor row in startRows:row[tsCol] = tswriter.appendRecord(row)ts += inc", "docstring": "Copies a range of values to a new location in the data set.\n\n Args:\n reader: A FileRecordStream object with input data.\n writer: A FileRecordStream object to write output data to.\n start: The first row in the range to copy.\n stop: The last row in the range to copy.\n insertLocation: The location to insert the copied range. If not specified,\n the range is inserted immediately following itself.", "id": "f17661:m2"} {"signature": "def sample(reader, writer, n, start=None, stop=None, tsCol=None,writeSampleOnly=True):", "body": "rows = list(reader)if tsCol is not None:ts = rows[][tsCol]inc = rows[][tsCol] - tsif start is None:start = if stop is None:stop = len(rows) - initialN = stop - start + numDeletes = initialN - nfor i in range(numDeletes):delIndex = random.randint(start, stop - i)del rows[delIndex]if writeSampleOnly:rows = rows[start:start + n]if tsCol is not None:ts = rows[][tsCol]for row in rows:if tsCol is not None:row[tsCol] = tsts += incwriter.appendRecord(row)", "docstring": "Samples n rows.\n\n Args:\n reader: A FileRecordStream object with input data.\n writer: A FileRecordStream object to write output data to.\n n: The number of elements to sample.\n start: The first row in the range to sample from.\n stop: The last row in the range to sample from.\n tsCol: If specified, the timestamp column to update.\n writeSampleOnly: If False, the rows before start are written before the\n sample and the rows after stop are written after the sample.", "id": "f17661:m3"} {"signature": "def __init__(self):", "body": "", "docstring": "A distribution is a set of values with certain statistical properties\n\n Methods/properties that must be implemented by subclasses\n - getNext() -- Returns the next value for the distribution\n - getData(n) -- Returns n values for the distribution\n - getDescription() -- Returns a dict of parameters pertinent to the\n distribution, if any as well as state variables.", "id": "f17663:c0:m0"} {"signature": "def getNext(self):", "body": "raise Exception(\"\")", "docstring": "Returns the next value of the disribution using knowledge about the\n current state of the distribution as stored in numValues.", "id": "f17663:c0:m1"} {"signature": "def getData(self, n):", "body": "records = [self.getNext() for x in range(n)]return records", "docstring": "Returns the next n values for the distribution as a list.", "id": "f17663:c0:m2"} {"signature": "def getDescription(self):", "body": "raise Exception(\"\")", "docstring": "Returns a dict of parameters pertinent to the distribution (if any) as\n well as state variables such as numValues.", "id": "f17663:c0:m3"} {"signature": "def _cacheSequenceInfoType(self):", "body": "hasReset = self.resetFieldName is not NonehasSequenceId = self.sequenceIdFieldName is not Noneif hasReset and not hasSequenceId:self._sequenceInfoType = self.SEQUENCEINFO_RESET_ONLYself._prevSequenceId = elif not hasReset and hasSequenceId:self._sequenceInfoType = self.SEQUENCEINFO_SEQUENCEID_ONLYself._prevSequenceId = Noneelif hasReset and hasSequenceId:self._sequenceInfoType = self.SEQUENCEINFO_BOTHelse:self._sequenceInfoType = self.SEQUENCEINFO_NONE", "docstring": "Figure out whether reset, sequenceId,\n both or neither are present in the data.\n Compute once instead of every time.\n\n Taken from filesource.py", "id": "f17664:c0:m1"} {"signature": "def shift(self, modelResult):", "body": "inferencesToWrite = {}if self._inferenceBuffer is None:maxDelay = InferenceElement.getMaxDelay(modelResult.inferences)self._inferenceBuffer = collections.deque(maxlen=maxDelay + )self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences))for inferenceElement, inference in modelResult.inferences.iteritems():if isinstance(inference, dict):inferencesToWrite[inferenceElement] = {}for key, _ in inference.iteritems():delay = InferenceElement.getTemporalDelay(inferenceElement, key)if len(self._inferenceBuffer) > delay:prevInference = self._inferenceBuffer[delay][inferenceElement][key]inferencesToWrite[inferenceElement][key] = prevInferenceelse:inferencesToWrite[inferenceElement][key] = Noneelse:delay = InferenceElement.getTemporalDelay(inferenceElement)if len(self._inferenceBuffer) > delay:inferencesToWrite[inferenceElement] = (self._inferenceBuffer[delay][inferenceElement])else:if type(inference) in (list, tuple):inferencesToWrite[inferenceElement] = [None] * len(inference)else:inferencesToWrite[inferenceElement] = NoneshiftedResult = ModelResult(rawInput=modelResult.rawInput,sensorInput=modelResult.sensorInput,inferences=inferencesToWrite,metrics=modelResult.metrics,predictedFieldIdx=modelResult.predictedFieldIdx,predictedFieldName=modelResult.predictedFieldName)return shiftedResult", "docstring": "Shift the model result and return the new instance.\n\n Queues up the T(i+1) prediction value and emits a T(i)\n input/prediction pair, if possible. E.g., if the previous T(i-1)\n iteration was learn-only, then we would not have a T(i) prediction in our\n FIFO and would not be able to emit a meaningful input/prediction pair.\n\n :param modelResult: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult`\n instance to shift.\n :return: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance that\n has been shifted", "id": "f17665:c0:m1"} {"signature": "def validate(value, **kwds):", "body": "assert len(list(kwds.keys())) >= assert '' in kwds or '' in kwdsschemaDict = Noneif '' in kwds:schemaPath = kwds.pop('')schemaDict = loadJsonValueFromFile(schemaPath)elif '' in kwds:schemaDict = kwds.pop('')try:validictory.validate(value, schemaDict, **kwds)except validictory.ValidationError as e:raise ValidationError(e)", "docstring": "Validate a python value against json schema:\n validate(value, schemaPath)\n validate(value, schemaDict)\n\n value: python object to validate against the schema\n\n The json schema may be specified either as a path of the file containing\n the json schema or as a python dictionary using one of the\n following keywords as arguments:\n schemaPath: Path of file containing the json schema object.\n schemaDict: Python dictionary containing the json schema object\n\n Returns: nothing\n\n Raises:\n ValidationError when value fails json validation", "id": "f17666:m0"} {"signature": "def loadJsonValueFromFile(inputFilePath):", "body": "with open(inputFilePath) as fileObj:value = json.load(fileObj)return value", "docstring": "Loads a json value from a file and converts it to the corresponding python\n object.\n\n inputFilePath:\n Path of the json file;\n\n Returns:\n python value that represents the loaded json value", "id": "f17666:m1"} {"signature": "@classmethod@abstractmethoddef getSchema(cls):", "body": "pass", "docstring": "Get Cap'n Proto schema.\n\n..warning: This is an abstract method. Per abc protocol, attempts to subclass\n without overriding will fail.\n\n@returns Cap'n Proto schema", "id": "f17667:c0:m0"} {"signature": "@classmethod@abstractmethoddef read(cls, proto):", "body": "pass", "docstring": "Create a new object initialized from Cap'n Proto obj.\n\nNote: This is an abstract method. Per abc protocol, attempts to subclass\nwithout overriding will fail.\n\n:param proto: Cap'n Proto obj\n:return: Obj initialized from proto", "id": "f17667:c0:m1"} {"signature": "@abstractmethoddef write(self, proto):", "body": "pass", "docstring": "Write obj instance to Cap'n Proto object\n\n.. warning: This is an abstract method. Per abc protocol, attempts to\n subclass without overriding will fail.\n\n:param proto: Cap'n Proto obj", "id": "f17667:c0:m2"} {"signature": "@classmethoddef readFromFile(cls, f, packed=True):", "body": "schema = cls.getSchema()if packed:proto = schema.read_packed(f)else:proto = schema.read(f)return cls.read(proto)", "docstring": "Read serialized object from file.\n\n:param f: input file\n:param packed: If true, will assume content is packed\n:return: first-class instance initialized from proto obj", "id": "f17667:c0:m3"} {"signature": "def writeToFile(self, f, packed=True):", "body": "schema = self.getSchema()proto = schema.new_message()self.write(proto)if packed:proto.write_packed(f)else:proto.write(f)", "docstring": "Write serialized object to file.\n\n:param f: output file\n:param packed: If true, will pack contents.", "id": "f17667:c0:m4"} {"signature": "def export(self):", "body": "graph = nx.MultiDiGraph()regions = self.network.getRegions()for idx in xrange(regions.getCount()):regionPair = regions.getByIndex(idx)regionName = regionPair[]graph.add_node(regionName, label=regionName)for linkName, link in self.network.getLinks():graph.add_edge(link.getSrcRegionName(),link.getDestRegionName(),src=link.getSrcOutputName(),dest=link.getDestInputName())return graph", "docstring": "Exports a network as a networkx MultiDiGraph intermediate representation\nsuitable for visualization.\n\n:return: networkx MultiDiGraph", "id": "f17668:c0:m1"} {"signature": "def render(self, renderer=DEFAULT_RENDERER):", "body": "renderer().render(self.export())", "docstring": "Render network. Default is \n:class:`~nupic.frameworks.viz.dot_renderer.DotRenderer`.\n\n:param renderer: Constructor parameter to a \"renderer\" implementation.\n Return value for which must have a \"render\" method that accepts a \n single argument (a networkx graph instance).", "id": "f17668:c0:m2"} {"signature": "def __init__(self, *args, **kwargs):", "body": "self.supported_nodes = tuple(set(self.supported_nodes) -self.blacklisted_nodes)asteval.Interpreter.__init__(self, *args, **kwargs)", "docstring": "Initialize interpreter with blacklisted nodes removed from supported\n nodes.", "id": "f17675:c0:m0"} {"signature": "def getModule(metricSpec):", "body": "metricName = metricSpec.metricif metricName == '':return MetricRMSE(metricSpec)if metricName == '':return MetricNRMSE(metricSpec)elif metricName == '':return MetricAAE(metricSpec)elif metricName == '':return MetricAccuracy(metricSpec)elif metricName == '':return MetricAveError(metricSpec)elif metricName == '':return MetricTrivial(metricSpec)elif metricName == '':return MetricTwoGram(metricSpec)elif metricName == '':return MetricMovingMean(metricSpec)elif metricName == '':return MetricMovingMode(metricSpec)elif metricName == '':return MetricNegAUC(metricSpec)elif metricName == '':return CustomErrorMetric(metricSpec)elif metricName == '':return MetricMultiStep(metricSpec)elif metricName == '':return MetricMultiStepProbability(metricSpec)elif metricName == '':return MetricMultiStepAAE(metricSpec)elif metricName == '':return MetricMultiStepAveError(metricSpec)elif metricName == '':return MetricPassThruPrediction(metricSpec)elif metricName == '':return MetricAltMAPE(metricSpec)elif metricName == '':return MetricMAPE(metricSpec)elif metricName == '':return MetricMulti(metricSpec)elif metricName == '':return MetricNegativeLogLikelihood(metricSpec)else:raise Exception(\"\" % metricName)", "docstring": "Factory method to return an appropriate :class:`MetricsIface` module.\n\n- ``rmse``: :class:`MetricRMSE`\n- ``nrmse``: :class:`MetricNRMSE`\n- ``aae``: :class:`MetricAAE`\n- ``acc``: :class:`MetricAccuracy`\n- ``avg_err``: :class:`MetricAveError`\n- ``trivial``: :class:`MetricTrivial`\n- ``two_gram``: :class:`MetricTwoGram`\n- ``moving_mean``: :class:`MetricMovingMean`\n- ``moving_mode``: :class:`MetricMovingMode`\n- ``neg_auc``: :class:`MetricNegAUC`\n- ``custom_error_metric``: :class:`CustomErrorMetric`\n- ``multiStep``: :class:`MetricMultiStep`\n- ``ms_aae``: :class:`MetricMultiStepAAE`\n- ``ms_avg_err``: :class:`MetricMultiStepAveError`\n- ``passThruPrediction``: :class:`MetricPassThruPrediction`\n- ``altMAPE``: :class:`MetricAltMAPE`\n- ``MAPE``: :class:`MetricMAPE`\n- ``multi``: :class:`MetricMulti`\n- ``negativeLogLikelihood``: :class:`MetricNegativeLogLikelihood`\n\n:param metricSpec: (:class:`MetricSpec`) metric to find module for. \n ``metricSpec.metric`` must be in the list above.\n\n:returns: (:class:`AggregateMetric`) an appropriate metric module", "id": "f17676:m0"} {"signature": "def getLabel(self, inferenceType=None):", "body": "result = []if inferenceType is not None:result.append(InferenceType.getLabel(inferenceType))result.append(self.inferenceElement)result.append(self.metric)params = self.paramsif params is not None:sortedParams= list(params.keys())sortedParams.sort()for param in sortedParams:if param in ('', '', ''):continuevalue = params[param]if isinstance(value, str):result.extend([\"\"% (param, value)])else:result.extend([\"\"% (param, value)])if self.field:result.append(\"\"% (self.field) )return self._LABEL_SEPARATOR.join(result)", "docstring": "Helper method that generates a unique label for a :class:`MetricSpec` / \n:class:`~nupic.frameworks.opf.opf_utils.InferenceType` pair. The label is \nformatted as follows:\n\n::\n\n ::(paramName=value)*:field=\n\nFor example:\n\n:: \n\n classification:aae:paramA=10.2:paramB=20:window=100:field=pounds\n\n:returns: (string) label for inference type", "id": "f17676:c0:m2"} {"signature": "@classmethoddef getInferenceTypeFromLabel(cls, label):", "body": "infType, _, _= label.partition(cls._LABEL_SEPARATOR)if not InferenceType.validate(infType):return Nonereturn infType", "docstring": "Extracts the PredictionKind (temporal vs. nontemporal) from the given\nmetric label.\n\n:param label: (string) for a metric spec generated by \n :meth:`getMetricLabel`\n\n:returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)", "id": "f17676:c0:m3"} {"signature": "def __init__(self, windowSize = None):", "body": "self._windowSize = windowSizeself._countDict = dict()self._history = deque([])", "docstring": ":param windowSize: The number of values that are used to compute the\n moving average", "id": "f17676:c1:m0"} {"signature": "@abstractmethoddef addInstance(self, groundTruth, prediction, record = None, result = None):", "body": "", "docstring": "Add one instance consisting of ground truth and a prediction.\n\n:param groundTruth:\n The actual measured value at the current timestep\n\n:param prediction:\n The value predicted by the network at the current timestep\n\n:param record: the raw input record as fed to \n :meth:`~nupic.frameworks.opf.model.Model.run` by the user. The \n typical usage is to feed a record to that method and get a \n :class:`~nupic.frameworks.opf.opf_utils.ModelResult`. Then you pass \n :class:`~nupic.frameworks.opf.opf_utils.ModelResult`.rawInput into \n this function as the record parameter.\n\n:param result: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`) the\n result of running a row of data through an OPF model\n\n:returns:\n The average error as computed over the metric's window size", "id": "f17676:c2:m1"} {"signature": "@abstractmethoddef getMetric(self):", "body": "", "docstring": "``stats`` is expected to contain further information relevant to the given \nmetric, for example the number of timesteps represented in the current \nmeasurement. All stats are implementation defined, and ``stats`` can be \n``None``.\n\n:returns: (dict) representing data from the metric\n ::\n\n {value : , \"stats\" : { : ...}}", "id": "f17676:c2:m2"} {"signature": "def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result):", "body": "", "docstring": "Updates the accumulated error given the prediction and the\nground truth.\n\n:param groundTruth: Actual value that is observed for the current timestep\n\n:param prediction: Value predicted by the network for the given timestep\n\n:param accumulatedError: The total accumulated score from the previous\n predictions (possibly over some finite window)\n\n:param historyBuffer: A buffer of the last ground truth values\n that have been observed.\n\n If historyBuffer = None, it means that no history is being kept.\n\n:param result: An ModelResult class (see opf_utils.py), used for advanced\n metric calculation (e.g., MetricNegativeLogLikelihood)\n\n:returns: The new accumulated error. That is:\n\n .. code-block:: python\n\n self.accumulatedError = self.accumulate(\n groundTruth, predictions, accumulatedError\n )\n\n ``historyBuffer`` should also be updated in this method.\n ``self.spec.params[\"window\"]`` indicates the maximum size of the window.", "id": "f17676:c3:m0"} {"signature": "def aggregate(self, accumulatedError, historyBuffer, steps):", "body": "", "docstring": "Updates the final aggregated score error given the prediction and the ground \ntruth.\n\n:param accumulatedError: The total accumulated score from the previous\n predictions (possibly over some finite window)\n\n:param historyBuffer: A buffer of the last ground truth values\n that have been observed. If ``historyBuffer`` = None, it means that \n no history is being kept.\n\n:param steps: (int) The total number of (groundTruth, prediction) pairs that \n have been passed to the metric. This does not include pairs where \n ``groundTruth = SENTINEL_VALUE_FOR_MISSING_DATA``\n\n:returns: The new aggregate (final) error measure.", "id": "f17676:c3:m1"} {"signature": "def __init__(self, metricSpec):", "body": "self.id = Noneself.verbosity = self.window = -self.history = Noneself.accumulatedError = self.aggregateError = Noneself.steps = self.spec = metricSpecself.disabled = Falseself._predictionSteps = []self._groundTruthHistory = deque([])self._subErrorMetrics = Noneself._maxRecords = Noneif metricSpec is not None and metricSpec.params is not None:self.id = metricSpec.params.get('', None)self._predictionSteps = metricSpec.params.get('', [])if not hasattr(self._predictionSteps, ''):self._predictionSteps = [self._predictionSteps]self.verbosity = metricSpec.params.get('', )self._maxRecords = metricSpec.params.get('', None)if '' in metricSpec.params:assert metricSpec.params[''] >= self.history = deque([])self.window = metricSpec.params['']if '' in metricSpec.params:self._subErrorMetrics = []for step in self._predictionSteps:subSpec = copy.deepcopy(metricSpec)subSpec.params.pop('', None)subSpec.params.pop('')subSpec.metric = metricSpec.params['']self._subErrorMetrics.append(getModule(subSpec))", "docstring": "Initialize this metric\n\n If the params contains the key 'errorMetric', then that is the name of\n another metric to which we will pass a modified groundTruth and prediction\n to from our addInstance() method. For example, we may compute a moving mean\n on the groundTruth and then pass that to the AbsoluteAveError metric", "id": "f17676:c3:m2"} {"signature": "def _getShiftedGroundTruth(self, groundTruth):", "body": "self._groundTruthHistory.append(groundTruth)assert (len(self._predictionSteps) == )if len(self._groundTruthHistory) > self._predictionSteps[]:return self._groundTruthHistory.popleft()else:if hasattr(groundTruth, ''):return [None] * len(groundTruth)else:return None", "docstring": "Utility function that saves the passed in groundTruth into a local\n history buffer, and returns the groundTruth from self._predictionSteps ago,\n where self._predictionSteps is defined by the 'steps' parameter.\n This can be called from the beginning of a derived class's addInstance()\n before it passes groundTruth and prediction onto accumulate().", "id": "f17676:c3:m3"} {"signature": "def addInstance(self, groundTruth, prediction, record = None, result = None):", "body": "self.value = self.avg(prediction)", "docstring": "Compute and store metric value", "id": "f17676:c10:m1"} {"signature": "def getMetric(self):", "body": "return {\"\": self.value}", "docstring": "Return the metric value", "id": "f17676:c10:m2"} {"signature": "def mostLikely(self, pred):", "body": "if len(pred) == :return list(pred.keys())[]mostLikelyOutcome = NonemaxProbability = for prediction, probability in list(pred.items()):if probability > maxProbability:mostLikelyOutcome = predictionmaxProbability = probabilityreturn mostLikelyOutcome", "docstring": "Helper function to return a scalar value representing the most\n likely outcome given a probability distribution", "id": "f17676:c12:m7"} {"signature": "def expValue(self, pred):", "body": "if len(pred) == :return list(pred.keys())[]return sum([x*p for x,p in list(pred.items())])", "docstring": "Helper function to return a scalar value representing the expected\n value of a probability distribution", "id": "f17676:c12:m8"} {"signature": "def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):", "body": "if self.disabled:return if historyBuffer is not None:historyBuffer.append((groundTruth, prediction[]))if len(historyBuffer) > self.spec.params[\"\"] :historyBuffer.popleft()return ", "docstring": "Accumulate history of groundTruth and \"prediction\" values.\n\nFor this metric, groundTruth is the actual category and \"prediction\" is a\ndict containing one top-level item with a key of 0 (meaning this is the\n0-step classificaton) and a value which is another dict, which contains the\nprobability for each category as output from the classifier. For example,\nthis is what \"prediction\" would be if the classifier said that category 0\nhad a 0.6 probability and category 1 had a 0.4 probability: {0:0.6, 1: 0.4}", "id": "f17676:c18:m0"} {"signature": "def __init__(self, metricSpec):", "body": "raise ValueError(\"\"\"\")", "docstring": "MetricMulti constructor using metricSpec is not allowed.", "id": "f17676:c21:m0"} {"signature": "def __init__(self, weights, metrics, window=None):", "body": "if (weights is None or not isinstance(weights, list) ornot len(weights) > ornot isinstance(weights[], float)):raise ValueError(\"\")self.weights = weightsif (metrics is None or not isinstance(metrics, list) ornot len(metrics) > ornot isinstance(metrics[], MetricsIface)):raise ValueError(\"\")self.metrics = metricsif window is not None:self.movingAvg = MovingAverage(windowSize=window)else:self.movingAvg = None", "docstring": "MetricMulti\n @param weights - [list of floats] used as weights\n @param metrics - [list of submetrics]\n @param window - (opt) window size for moving average, or None when disabled", "id": "f17676:c21:m1"} {"signature": "def htmPredictionModelControlEnableSPLearningCb(htmPredictionModel):", "body": "assert isinstance(htmPredictionModel, HTMPredictionModel)htmPredictionModel._getSPRegion().setParameter('', True)return", "docstring": "Enables learning in the HTMPredictionModel's Spatial Pooler\n\n See also htmPredictionModelControlDisableSPLearningCb.\n\n htmPredictionModel: pointer to a HTMPredictionModel instance\n\n Returns: nothing", "id": "f17677:m0"} {"signature": "def htmPredictionModelControlDisableSPLearningCb(htmPredictionModel):", "body": "assert isinstance(htmPredictionModel, HTMPredictionModel)htmPredictionModel._getSPRegion().setParameter('', False)return", "docstring": "Disables learning in the HTMPredictionModel's Spatial Pooler, while\n retaining the ability to re-enable SP learning in the future.\n\n See also: htmPredictionModelControlEnableSPLearningCb.\n See also: model_callbacks.modelControlFinishLearningCb.\n\n htmPredictionModel: pointer to a HTMPredictionModel instance\n\n Returns: nothing", "id": "f17677:m1"} {"signature": "def htmPredictionModelControlEnableTPLearningCb(htmPredictionModel):", "body": "assert isinstance(htmPredictionModel, HTMPredictionModel)htmPredictionModel._getTPRegion().setParameter('', True)return", "docstring": "Enables learning in the HTMPredictionModel's Temporal Pooler\n\n See also htmPredictionModelControlDisableTPLearningCb.\n\n htmPredictionModel: pointer to a HTMPredictionModel instance\n\n Returns: nothing", "id": "f17677:m2"} {"signature": "def htmPredictionModelControlDisableTPLearningCb(htmPredictionModel):", "body": "assert isinstance(htmPredictionModel, HTMPredictionModel)htmPredictionModel._getTPRegion().setParameter('', False)return", "docstring": "Disables learning in the HTMPredictionModel's Temporal Pooler, while\n retaining the ability to re-enable TM learning in the future.\n\n See also: htmPredictionModelControlEnableTPLearningCb.\n See also: model_callbacks.modelControlFinishLearningCb.\n\n htmPredictionModel: pointer to a HTMPredictionModel instance\n\n Returns: nothing", "id": "f17677:m3"} {"signature": "def __init__(self, filePath):", "body": "self.__filePath = filePathreturn", "docstring": "filePath: path of file where SP __init__ args are to be saved", "id": "f17677:c0:m0"} {"signature": "def __init__(self, filePath):", "body": "self.__filePath = filePathreturn", "docstring": "filePath: path of file where TM __init__ args are to be saved", "id": "f17677:c1:m0"} {"signature": "def runExperiment(args, model=None):", "body": "opt = _parseCommandLineOptions(args)model = _runExperimentImpl(opt, model)return model", "docstring": "Run a single OPF experiment.\n\n.. note:: The caller is responsible for initializing python logging before\n calling this function (e.g., import :mod:`nupic.support`;\n :meth:`nupic.support.initLogging`)\n\nSee also: :meth:`.initExperimentPrng`.\n\n:param args: (string) Experiment command-line args list. Too see all options,\n run with ``--help``:\n\n .. code-block:: text\n\n Options:\n -h, --help show this help message and exit\n -c Create a model and save it under the given \n name, but don't run it\n --listCheckpoints List all available checkpoints\n --listTasks List all task labels in description.py\n --load= Load a model from the given and run it.\n Run with --listCheckpoints flag for more details.\n --newSerialization Use new capnproto serialization\n --tasks Run the tasks with the given TASK LABELS in the order\n they are given. Either end of arg-list, or a\n standalone dot ('.') arg or the next short or long\n option name (-a or --blah) terminates the list. NOTE:\n FAILS TO RECOGNIZE task label names with one or more\n leading dashes. [default: run all of the tasks in\n description.py]\n --testMode Reduce iteration count for testing\n --noCheckpoint Don't checkpoint the model after running each task.\n\n:param model: (:class:`~nupic.frameworks.opf.model.Model`) For testing, may\n pass in an existing OPF Model to use instead of creating a new one.\n\n:returns: (:class:`~nupic.frameworks.opf.model.Model`)\n reference to OPF Model instance that was constructed (this\n is provided to aid with debugging) or None, if none was\n created.", "id": "f17678:m0"} {"signature": "def initExperimentPrng():", "body": "seed = random.seed(seed)numpy.random.seed(seed)", "docstring": "Initialize PRNGs that may be used by other modules in the experiment stack.\n\n .. note:: User may call this function to initialize PRNGs that are used by the\n experiment stack before calling runExperiment(), unless user has its own\n own logic for initializing these PRNGs.", "id": "f17678:m1"} {"signature": "def _parseCommandLineOptions(args):", "body": "usageStr = (\"\"\"\"\"\")parser = optparse.OptionParser(usage=usageStr)parser.add_option(\"\",help=\"\"\"\",dest=\"\",action=\"\", type=\"\", default=\"\",metavar=\"\")parser.add_option(\"\",help=\"\",dest=\"\",action=\"\", default=False)parser.add_option(\"\",help=\"\",dest=\"\",action=\"\", default=False)parser.add_option(\"\",help=\"\"\"\",dest=\"\",action=\"\", type=\"\", default=\"\",metavar=\"\")parser.add_option(\"\",help=\"\",dest=\"\",action=\"\", default=False)parser.add_option(\"\",help=\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\",dest=\"\", default=[],action=\"\", callback=reapVarArgsCallback,metavar=\"\")parser.add_option(\"\",help=\"\",dest=\"\", action=\"\",default=False)parser.add_option(\"\",help=\"\",dest=\"\", action=\"\",default=True)options, experiments = parser.parse_args(args)mutuallyExclusiveOptionCount = sum([bool(options.createCheckpointName),options.listAvailableCheckpoints,options.listTasks,bool(options.runCheckpointName)])if mutuallyExclusiveOptionCount > :_reportCommandLineUsageErrorAndExit(parser,\"\"\"\")mutuallyExclusiveOptionCount = sum([bool(not options.checkpointModel),bool(options.createCheckpointName)])if mutuallyExclusiveOptionCount > :_reportCommandLineUsageErrorAndExit(parser,\"\"\"\")if len(experiments) != :_reportCommandLineUsageErrorAndExit(parser,\"\" % (len(experiments), experiments))parser.destroy()experimentDir = os.path.abspath(experiments[])privateOptions = dict()privateOptions[''] = options.createCheckpointNameprivateOptions[''] = options.listAvailableCheckpointsprivateOptions[''] = options.listTasksprivateOptions[''] = options.runCheckpointNameprivateOptions[''] = options.newSerializationprivateOptions[''] = options.testModeprivateOptions[''] = options.taskLabelsprivateOptions[''] = options.checkpointModelresult = ParseCommandLineOptionsResult(experimentDir=experimentDir,privateOptions=privateOptions)return result", "docstring": "Parse command line options\n\n Args:\n args: command line arguments (not including sys.argv[0])\n Returns:\n namedtuple ParseCommandLineOptionsResult", "id": "f17678:m2"} {"signature": "def reapVarArgsCallback(option, optStr, value, parser):", "body": "newValues = []gotDot = Falsefor arg in parser.rargs:if arg.startswith(\"\") and len(arg) > :breakif arg.startswith(\"\") and len(arg) > :breakif arg == \"\":gotDot = TruebreaknewValues.append(arg)if not newValues:raise optparse.OptionValueError((\"\"\"\") % (optStr, parser.rargs))del parser.rargs[:len(newValues) + int(gotDot)]value = getattr(parser.values, option.dest, [])if value is None:value = []value.extend(newValues)setattr(parser.values, option.dest, value)", "docstring": "Used as optparse callback for reaping a variable number of option args.\n The option may be specified multiple times, and all the args associated with\n that option name will be accumulated in the order that they are encountered", "id": "f17678:m3"} {"signature": "def _reportCommandLineUsageErrorAndExit(parser, message):", "body": "print(parser.get_usage())print(message)sys.exit()", "docstring": "Report usage error and exit program with error indication.", "id": "f17678:m4"} {"signature": "def _runExperimentImpl(options, model=None):", "body": "json_helpers.validate(options.privateOptions,schemaDict=g_parsedPrivateCommandLineOptionsSchema)experimentDir = options.experimentDirdescriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(experimentDir)expIface = helpers.getExperimentDescriptionInterfaceFromModule(descriptionPyModule)if options.privateOptions['']:_printAvailableCheckpoints(experimentDir)return NoneexperimentTasks = expIface.getModelControl().get('', [])if (len(experimentTasks) == andexpIface.getModelControl()[''] == OpfEnvironment.Nupic):expIface.convertNupicEnvToOPF()experimentTasks = expIface.getModelControl().get('', [])expIface.normalizeStreamSources()newSerialization = options.privateOptions['']if options.privateOptions['']:print(\"\")for label in [t[''] for t in experimentTasks]:print(\"\", label)return Noneif options.privateOptions['']:assert model is NonecheckpointName = options.privateOptions['']model = ModelFactory.loadFromCheckpoint(savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName),newSerialization=newSerialization)elif model is not None:print(\"\")else:modelDescription = expIface.getModelDescription()model = ModelFactory.create(modelDescription)if options.privateOptions['']:checkpointName = options.privateOptions['']_saveModel(model=model,experimentDir=experimentDir,checkpointLabel=checkpointName,newSerialization=newSerialization)return modeltaskIndexList = list(range(len(experimentTasks)))customTaskExecutionLabelsList = options.privateOptions['']if customTaskExecutionLabelsList:taskLabelsList = [t[''] for t in experimentTasks]taskLabelsSet = set(taskLabelsList)customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)assert customTaskExecutionLabelsSet.issubset(taskLabelsSet),(\"\"\"\"\"\") % (customTaskExecutionLabelsSet - taskLabelsSet,customTaskExecutionLabelsList)taskIndexList = [taskLabelsList.index(label) for label incustomTaskExecutionLabelsList]print(\"\" % [taskLabelsList[i] fori in taskIndexList])for taskIndex in taskIndexList:task = experimentTasks[taskIndex]taskRunner = _TaskRunner(model=model,task=task,cmdOptions=options)taskRunner.run()del taskRunnerif options.privateOptions['']:_saveModel(model=model,experimentDir=experimentDir,checkpointLabel=task[''],newSerialization=newSerialization)return model", "docstring": "Creates and runs the experiment\n\n Args:\n options: namedtuple ParseCommandLineOptionsResult\n model: For testing: may pass in an existing OPF Model instance\n to use instead of creating a new one.\n\n Returns: reference to OPFExperiment instance that was constructed (this\n is provided to aid with debugging) or None, if none was\n created.", "id": "f17678:m5"} {"signature": "def _saveModel(model, experimentDir, checkpointLabel, newSerialization=False):", "body": "checkpointDir = _getModelCheckpointDir(experimentDir, checkpointLabel)if newSerialization:model.writeToCheckpoint(checkpointDir)else:model.save(saveModelDir=checkpointDir)", "docstring": "Save model", "id": "f17678:m6"} {"signature": "def _getModelCheckpointDir(experimentDir, checkpointLabel):", "body": "checkpointDir = os.path.join(getCheckpointParentDir(experimentDir),checkpointLabel + g_defaultCheckpointExtension)checkpointDir = os.path.abspath(checkpointDir)return checkpointDir", "docstring": "Creates directory for serialization of the model\n\n checkpointLabel:\n Checkpoint label (string)\n\n Returns:\n absolute path to the serialization directory", "id": "f17678:m7"} {"signature": "def getCheckpointParentDir(experimentDir):", "body": "baseDir = os.path.join(experimentDir, \"\")baseDir = os.path.abspath(baseDir)return baseDir", "docstring": "Get checkpoint parent dir.\n\n Returns: absolute path to the base serialization directory within which\n model checkpoints for this experiment are created", "id": "f17678:m8"} {"signature": "def _checkpointLabelFromCheckpointDir(checkpointDir):", "body": "assert checkpointDir.endswith(g_defaultCheckpointExtension)lastSegment = os.path.split(checkpointDir)[]checkpointLabel = lastSegment[:-len(g_defaultCheckpointExtension)]return checkpointLabel", "docstring": "Returns a checkpoint label string for the given model checkpoint directory\n\n checkpointDir: relative or absolute model checkpoint directory path", "id": "f17678:m9"} {"signature": "def _isCheckpointDir(checkpointDir):", "body": "lastSegment = os.path.split(checkpointDir)[]if lastSegment[] == '':return Falseif not checkpointDir.endswith(g_defaultCheckpointExtension):return Falseif not os.path.isdir(checkpointDir):return Falsereturn True", "docstring": "Return true iff checkpointDir appears to be a checkpoint directory.", "id": "f17678:m10"} {"signature": "def _printAvailableCheckpoints(experimentDir):", "body": "checkpointParentDir = getCheckpointParentDir(experimentDir)if not os.path.exists(checkpointParentDir):print(\"\")returncheckpointDirs = [x for x in os.listdir(checkpointParentDir)if _isCheckpointDir(os.path.join(checkpointParentDir, x))]if not checkpointDirs:print(\"\")returnprint(\"\")checkpointList = [_checkpointLabelFromCheckpointDir(x)for x in checkpointDirs]for checkpoint in sorted(checkpointList):print(\"\", checkpoint)print()print(\"\")print(\"\")print(\"\")print(\"\")", "docstring": "List available checkpoints for the specified experiment.", "id": "f17678:m11"} {"signature": "def main():", "body": "initLogging(verbose=True)initExperimentPrng()runExperiment(sys.argv[:])", "docstring": "Module-level entry point. Run according to options in sys.argv\n\n Usage: python -m python -m nupic.frameworks.opf.experiment_runner", "id": "f17678:m12"} {"signature": "def __init__(self, model, task, cmdOptions):", "body": "validateOpfJsonValue(task, \"\")self.__logger = logging.getLogger(\"\".join(['', self.__class__.__module__, self.__class__.__name__]))self.__logger.debug((\"\" +\"\" +\"\" +\"\") %(self.__class__.__name__,model,task,cmdOptions))streamDef = task['']datasetReader = opf_basic_environment.BasicDatasetReader(streamDef)self.__model = modelself.__datasetReader = datasetReaderself.__task = taskself.__cmdOptions = cmdOptionsself.__predictionLogger = opf_basic_environment.BasicPredictionLogger(fields=model.getFieldInfo(),experimentDir=cmdOptions.experimentDir,label=task[''],inferenceType=self.__model.getInferenceType())taskControl = task['']self.__taskDriver = OPFTaskDriver(taskControl=taskControl,model=model)loggedMetricPatterns = taskControl.get('', None)loggedMetricLabels = matchPatterns(loggedMetricPatterns,self.__taskDriver.getMetricLabels())self.__predictionLogger.setLoggedMetrics(loggedMetricLabels)self.__metricsLogger = opf_basic_environment.BasicPredictionMetricsLogger(experimentDir=cmdOptions.experimentDir,label=task[''])", "docstring": "Constructor\n\n Args:\n model: The OPF Model instance against which to run the task\n task: A dictionary conforming to opfTaskSchema.json\n cmdOptions: ParseCommandLineOptionsResult namedtuple", "id": "f17678:c0:m0"} {"signature": "def __del__(self):", "body": "", "docstring": "Destructor", "id": "f17678:c0:m1"} {"signature": "def run(self):", "body": "self.__logger.debug(\"\", self.__task[''])if self.__cmdOptions.privateOptions['']:numIters = else:numIters = self.__task['']if numIters >= :iterTracker = iter(range(numIters))else:iterTracker = iter(itertools.count())periodic = PeriodicActivityMgr(requestedActivities=self._createPeriodicActivities())self.__model.resetSequenceStates()self.__taskDriver.setup()while True:try:next(iterTracker)except StopIteration:breaktry:inputRecord = next(self.__datasetReader)except StopIteration:breakresult = self.__taskDriver.handleInputRecord(inputRecord=inputRecord)if InferenceElement.encodings in result.inferences:result.inferences.pop(InferenceElement.encodings)self.__predictionLogger.writeRecord(result)periodic.tick()self._getAndEmitExperimentMetrics(final=True)self.__taskDriver.finalize()self.__model.resetSequenceStates()", "docstring": "Runs a single experiment task", "id": "f17678:c0:m2"} {"signature": "def _createPeriodicActivities(self):", "body": "periodicActivities = []class MetricsReportCb(object):def __init__(self, taskRunner):self.__taskRunner = taskRunnerreturndef __call__(self):self.__taskRunner._getAndEmitExperimentMetrics()reportMetrics = PeriodicActivityRequest(repeating=True,period=,cb=MetricsReportCb(self))periodicActivities.append(reportMetrics)class IterationProgressCb(object):PROGRESS_UPDATE_PERIOD_TICKS = def __init__(self, taskLabel, requestedIterationCount, logger):self.__taskLabel = taskLabelself.__requestedIterationCount = requestedIterationCountself.__logger = loggerself.__numIterationsSoFar = def __call__(self):self.__numIterationsSoFar += self.PROGRESS_UPDATE_PERIOD_TICKSself.__logger.debug(\"\" % (self.__taskLabel,self.__numIterationsSoFar,self.__requestedIterationCount))iterationProgressCb = IterationProgressCb(taskLabel=self.__task[''],requestedIterationCount=self.__task[''],logger=self.__logger)iterationProgressReporter = PeriodicActivityRequest(repeating=True,period=IterationProgressCb.PROGRESS_UPDATE_PERIOD_TICKS,cb=iterationProgressCb)periodicActivities.append(iterationProgressReporter)return periodicActivities", "docstring": "Creates and returns a list of activites for this TaskRunner instance\n\n Returns: a list of PeriodicActivityRequest elements", "id": "f17678:c0:m3"} {"signature": "def __init__(self, requestedActivities):", "body": "self.__activities = []for req in requestedActivities:act = self.Activity(repeating=req.repeating,period=req.period,cb=req.cb,iteratorHolder=[iter(range(req.period-))])self.__activities.append(act)", "docstring": "requestedActivities: a sequence of PeriodicActivityRequest elements", "id": "f17678:c1:m0"} {"signature": "def tick(self):", "body": "for act in self.__activities:if not act.iteratorHolder[]:continuetry:next(act.iteratorHolder[])except StopIteration:act.cb()if act.repeating:act.iteratorHolder[] = iter(range(act.period-))else:act.iteratorHolder[] = Nonereturn True", "docstring": "Activity tick handler; services all activities\n\n Returns:\n True if controlling iterator says it's okay to keep going;\n False to stop", "id": "f17678:c1:m1"} {"signature": "def __init__(self, inferenceType=None, proto=None):", "body": "assert inferenceType is not None and proto is None or (inferenceType is None and proto is not None), (\"\")if proto is None:self._numPredictions = self.__inferenceType = inferenceTypeself.__learningEnabled = Trueself.__inferenceEnabled = Trueself.__inferenceArgs = {}else:self._numPredictions = proto.numPredictionsinferenceType = str(proto.inferenceType)inferenceType = inferenceType[:].upper() + inferenceType[:]self.__inferenceType = InferenceType.getValue(inferenceType)self.__learningEnabled = proto.learningEnabledself.__inferenceEnabled = proto.inferenceEnabledself.__inferenceArgs = json.loads(proto.inferenceArgs)", "docstring": ":param opf_utils.InferenceType inferenceType: mutually-exclusive with proto\n arg\n:param proto: capnp ModelProto message reader for deserializing;\n mutually-exclusive with the other constructor args.", "id": "f17679:c0:m0"} {"signature": "def run(self, inputRecord):", "body": "predictionNumber = self._numPredictionsself._numPredictions += result = opf_utils.ModelResult(predictionNumber=predictionNumber,rawInput=inputRecord)return result", "docstring": "Run one iteration of this model.\n\n:param inputRecord: (object)\n A record object formatted according to\n :meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or\n :meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`\n result format.\n:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)\n An ModelResult namedtuple. The contents of ModelResult.inferences\n depends on the the specific inference type of this model, which\n can be queried by :meth:`.getInferenceType`.", "id": "f17679:c0:m1"} {"signature": "@abstractmethoddef finishLearning(self):", "body": "", "docstring": "Place the model in a permanent \"finished learning\" mode.\n In such a mode the model will not be able to learn from subsequent input\n records.\n\n .. note:: Upon completion of this command, learning may not be resumed on\n the given instance of the model (e.g., the implementation may optimize\n itself by pruning data structures that are necessary for learning).", "id": "f17679:c0:m2"} {"signature": "@abstractmethoddef resetSequenceStates(self):", "body": "", "docstring": "Signal that the input record is the start of a new sequence. Normally called\nto force the delineation of a sequence, such as between OPF tasks.", "id": "f17679:c0:m3"} {"signature": "@abstractmethoddef getFieldInfo(self, includeClassifierOnlyField=False):", "body": "", "docstring": "Return the sequence of :class:`~nupic.data.field_meta.FieldMetaInfo` objects\nspecifying the format of Model's output.\n\nThis may be different than the list of\n:class:`~nupic.data.field_meta.FieldMetaInfo` objects supplied at\ninitialization (e.g., due to the transcoding of some input fields into\nmeta-fields, such as datetime -> dayOfWeek, timeOfDay, etc.).\n\n:param includeClassifierOnlyField: (bool)\n If True, any field which is only sent to the classifier (i.e. not\n sent in to the bottom of the network) is also included\n:returns: (list) of :class:`~nupic.data.field_meta.FieldMetaInfo` objects.", "id": "f17679:c0:m4"} {"signature": "@abstractmethoddef setFieldStatistics(self, fieldStats):", "body": "", "docstring": "Propagate field statistics to the model in case some of its machinery\n needs it.\n\n :param fieldStats: (dict)\n A dict of dicts with first key being the fieldname and the second\n key is min,max or other supported statistics.", "id": "f17679:c0:m5"} {"signature": "@abstractmethoddef getRuntimeStats(self):", "body": "", "docstring": "Get runtime statistics specific to this model, i.e.\n ``activeCellOverlapAvg``.\n\n :returns: (dict) A {statistic names: stats} dictionary", "id": "f17679:c0:m6"} {"signature": "@abstractmethoddef _getLogger(self):", "body": "", "docstring": "Get the logger for this object.\n This is a protected method that is used by the ModelBase to access the\n logger created by the subclass.\n\n :returns: (Logger) A Logger object, it should not be None.", "id": "f17679:c0:m7"} {"signature": "def getInferenceType(self):", "body": "return self.__inferenceType", "docstring": "Return the InferenceType of this model.\n This is immutable.\n\n :returns: :class:`~nupic.frameworks.opf.opf_utils.InferenceType`", "id": "f17679:c0:m8"} {"signature": "def enableLearning(self):", "body": "self.__learningEnabled = Truereturn", "docstring": "Turn Learning on for the current model.", "id": "f17679:c0:m9"} {"signature": "def disableLearning(self):", "body": "self.__learningEnabled = Falsereturn", "docstring": "Turn Learning off for the current model.", "id": "f17679:c0:m10"} {"signature": "def isLearningEnabled(self):", "body": "return self.__learningEnabled", "docstring": "Return the Learning state of the current model.\n\n :returns: (bool) The learning state", "id": "f17679:c0:m11"} {"signature": "def enableInference(self, inferenceArgs=None):", "body": "self.__inferenceEnabled = Trueself.__inferenceArgs = inferenceArgs", "docstring": "Enable inference for this model.\n\n :param inferenceArgs: (dict)\n A dictionary of arguments required for inference. These depend on\n the InferenceType of the current model", "id": "f17679:c0:m12"} {"signature": "def getInferenceArgs(self):", "body": "return self.__inferenceArgs", "docstring": "Return the dict of arguments for the current inference mode.\n\n :returns: (dict) The arguments of the inference mode", "id": "f17679:c0:m13"} {"signature": "def disableInference(self):", "body": "self.__inferenceEnabled = False", "docstring": "Turn Inference off for the current model.", "id": "f17679:c0:m14"} {"signature": "def isInferenceEnabled(self):", "body": "return self.__inferenceEnabled", "docstring": "Return the inference state of the current model.\n\n :returns: (bool) The inference state", "id": "f17679:c0:m15"} {"signature": "@staticmethoddef getSchema():", "body": "raise NotImplementedError()", "docstring": "Return the pycapnp proto type that the class uses for serialization.\n\n This is used to convert the proto into the proper type before passing it\n into the read or write method of the subclass.", "id": "f17679:c0:m16"} {"signature": "@staticmethoddef _getModelCheckpointFilePath(checkpointDir):", "body": "path = os.path.join(checkpointDir, \"\")path = os.path.abspath(path)return path", "docstring": "Return the absolute path of the model's checkpoint file.\n\n :param checkpointDir: (string)\n Directory of where the experiment is to be or was saved\n :returns: (string) An absolute path.", "id": "f17679:c0:m17"} {"signature": "def writeToCheckpoint(self, checkpointDir):", "body": "proto = self.getSchema().new_message()self.write(proto)checkpointPath = self._getModelCheckpointFilePath(checkpointDir)if os.path.exists(checkpointDir):if not os.path.isdir(checkpointDir):raise Exception((\"\"\"\")% checkpointDir)if not os.path.isfile(checkpointPath):raise Exception((\"\"\"\"\"\") %(checkpointDir, checkpointPath))shutil.rmtree(checkpointDir)self.__makeDirectoryFromAbsolutePath(checkpointDir)with open(checkpointPath, '') as f:proto.write(f)", "docstring": "Serializes model using capnproto and writes data to ``checkpointDir``", "id": "f17679:c0:m18"} {"signature": "@classmethoddef readFromCheckpoint(cls, checkpointDir):", "body": "checkpointPath = cls._getModelCheckpointFilePath(checkpointDir)with open(checkpointPath, '') as f:proto = cls.getSchema().read(f,traversal_limit_in_words=_TRAVERSAL_LIMIT_IN_WORDS)model = cls.read(proto)return model", "docstring": "Deserializes model from checkpointDir using capnproto", "id": "f17679:c0:m19"} {"signature": "def writeBaseToProto(self, proto):", "body": "inferenceType = self.getInferenceType()inferenceType = inferenceType[:].lower() + inferenceType[:]proto.inferenceType = inferenceTypeproto.numPredictions = self._numPredictionsproto.learningEnabled = self.__learningEnabledproto.inferenceEnabled = self.__inferenceEnabledproto.inferenceArgs = json.dumps(self.__inferenceArgs)", "docstring": "Save the state maintained by the Model base class\n\n :param proto: capnp ModelProto message builder", "id": "f17679:c0:m20"} {"signature": "def write(self, proto):", "body": "raise NotImplementedError()", "docstring": "Write state to proto object.\n\n The type of proto is determined by :meth:`getSchema`.", "id": "f17679:c0:m21"} {"signature": "@classmethoddef read(cls, proto):", "body": "raise NotImplementedError()", "docstring": "Read state from proto object.\n\n The type of proto is determined by :meth:`getSchema`.", "id": "f17679:c0:m22"} {"signature": "def save(self, saveModelDir):", "body": "logger = self._getLogger()logger.debug(\"\",self, saveModelDir)modelPickleFilePath = self._getModelPickleFilePath(saveModelDir)if os.path.exists(saveModelDir):if not os.path.isdir(saveModelDir):raise Exception((\"\"\"\")% saveModelDir)if not os.path.isfile(modelPickleFilePath):raise Exception((\"\"\"\"\"\") %(saveModelDir, modelPickleFilePath))shutil.rmtree(saveModelDir)self.__makeDirectoryFromAbsolutePath(saveModelDir)with open(modelPickleFilePath, '') as modelPickleFile:logger.debug(\"\", self)pickle.dump(self, modelPickleFile, protocol=pickle.HIGHEST_PROTOCOL)logger.debug(\"\", self)self._serializeExtraData(extraDataDir=self._getModelExtraDataDir(saveModelDir))logger.debug(\"\", self)return", "docstring": "Save the model in the given directory.\n\n :param saveModelDir: (string)\n Absolute directory path for saving the model. This directory should\n only be used to store a saved model. If the directory does not exist,\n it will be created automatically and populated with model data. A\n pre-existing directory will only be accepted if it contains previously\n saved model data. If such a directory is given, the full contents of\n the directory will be deleted and replaced with current model data.", "id": "f17679:c0:m23"} {"signature": "def _serializeExtraData(self, extraDataDir):", "body": "pass", "docstring": "Protected method that is called during serialization with an external\n directory path. It can be overridden by subclasses to bypass pickle for\n saving large binary states.\n This is called by ModelBase only.\n\n :param extraDataDir: (string) Model's extra data directory path", "id": "f17679:c0:m24"} {"signature": "@classmethoddef load(cls, savedModelDir):", "body": "logger = opf_utils.initLogger(cls)logger.debug(\"\", savedModelDir)modelPickleFilePath = Model._getModelPickleFilePath(savedModelDir)with open(modelPickleFilePath, '') as modelPickleFile:logger.debug(\"\")model = pickle.load(modelPickleFile)logger.debug(\"\")model._deSerializeExtraData(extraDataDir=Model._getModelExtraDataDir(savedModelDir))logger.debug(\"\")return model", "docstring": "Load saved model.\n\n :param savedModelDir: (string)\n Directory of where the experiment is to be or was saved\n :returns: (:class:`Model`) The loaded model instance", "id": "f17679:c0:m25"} {"signature": "def _deSerializeExtraData(self, extraDataDir):", "body": "pass", "docstring": "Protected method that is called during deserialization\n (after __setstate__) with an external directory path.\n It can be overridden by subclasses to bypass pickle for loading large\n binary states.\n This is called by ModelBase only.\n\n :param extraDataDir: (string) Model's extra data directory path", "id": "f17679:c0:m26"} {"signature": "@staticmethoddef _getModelPickleFilePath(saveModelDir):", "body": "path = os.path.join(saveModelDir, \"\")path = os.path.abspath(path)return path", "docstring": "Return the absolute path of the model's pickle file.\n\n :param saveModelDir: (string)\n Directory of where the experiment is to be or was saved\n :returns: (string) An absolute path.", "id": "f17679:c0:m27"} {"signature": "@staticmethoddef _getModelExtraDataDir(saveModelDir):", "body": "path = os.path.join(saveModelDir, \"\")path = os.path.abspath(path)return path", "docstring": "Return the absolute path to the directory where the model's own\n \"extra data\" are stored (i.e., data that's too big for pickling).\n\n :param saveModelDir: (string)\n Directory of where the experiment is to be or was saved\n :returns: (string) An absolute path.", "id": "f17679:c0:m28"} {"signature": "@staticmethoddef __makeDirectoryFromAbsolutePath(absDirPath):", "body": "assert os.path.isabs(absDirPath)try:os.makedirs(absDirPath)except OSError as e:if e.errno != os.errno.EEXIST:raisereturn", "docstring": "Make directory for the given directory path if it doesn't already\n exist in the filesystem.\n\n :param absDirPath: (string) Absolute path of the directory to create\n @exception (Exception) OSError if directory creation fails", "id": "f17679:c0:m29"} {"signature": "def requireAnomalyModel(func):", "body": "@wraps(func)def _decorator(self, *args, **kwargs):if not self.getInferenceType() == InferenceType.TemporalAnomaly:raise RuntimeError(\"\")if self._getAnomalyClassifier() is None:raise RuntimeError(\"\"\"\")return func(self, *args, **kwargs)return _decorator", "docstring": "Decorator for functions that require anomaly models.", "id": "f17680:m0"} {"signature": "def __init__(self, net, statsCollectors):", "body": "self.net = netself.statsCollectors = statsCollectorsreturn", "docstring": "net: The CLA Network instance\nstatsCollectors:\n Sequence of 0 or more CLAStatistic-based instances", "id": "f17680:c0:m0"} {"signature": "def __init__(self,sensorParams={},inferenceType=InferenceType.TemporalNextStep,spEnable=True,spParams={},trainSPNetOnlyIfRequested=False,tmEnable=True,tmParams={},clEnable=True,clParams={},anomalyParams={},minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD,maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP,network=None,baseProto=None):", "body": "if not inferenceType in self.__supportedInferenceKindSet:raise ValueError(\"\".format(self.__class__, inferenceType))if baseProto is None:super(HTMPredictionModel, self).__init__(inferenceType)else:super(HTMPredictionModel, self).__init__(proto=baseProto)self.__restoringFromState = Falseself.__restoringFromV1 = Falseself.__logger = initLogger(self)self.__logger.debug(\"\" % self.__myClassName)self._minLikelihoodThreshold = minLikelihoodThresholdself._maxPredictionsPerStep = maxPredictionsPerStepself.__spLearningEnabled = bool(spEnable)self.__tpLearningEnabled = bool(tmEnable)if not InferenceType.isTemporal(self.getInferenceType())or self.getInferenceType() == InferenceType.NontemporalMultiStep:tmEnable = Falseself._netInfo = Noneself._hasSP = spEnableself._hasTP = tmEnableself._hasCL = clEnableself._classifierInputEncoder = Noneself._predictedFieldIdx = Noneself._predictedFieldName = Noneself._numFields = Noneif network is not None:self._netInfo = NetworkInfo(net=network, statsCollectors=[])else:self._netInfo = self.__createHTMNetwork(sensorParams, spEnable, spParams, tmEnable, tmParams, clEnable,clParams, anomalyParams)if self.getInferenceType() == InferenceType.NontemporalAnomaly:self._getSPRegion().setParameter(\"\", True)if self.getInferenceType() == InferenceType.TemporalAnomaly:self._getTPRegion().setParameter(\"\", True)self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequestedself.__numRunCalls = self.__finishedLearning = Falseself.__logger.debug(\"\" % self.__class__.__name__)self._input = Nonereturn", "docstring": ":param network: if not None, the deserialized nupic.engine.Network instance\n to use instead of creating a new Network\n:param baseProto: if not None, capnp ModelProto message reader for\n deserializing; supersedes inferenceType", "id": "f17680:c1:m0"} {"signature": "def getParameter(self, paramName):", "body": "if paramName == '':return self.__numRunCallselse:raise RuntimeError(\"\" %(paramName))", "docstring": "Currently only supports a parameter named ``__numRunCalls``.\n\n:param paramName: (string) name of parameter to get. If not\n ``__numRunCalls`` an exception is thrown.\n:returns: (int) the value of ``self.__numRunCalls``", "id": "f17680:c1:m1"} {"signature": "@requireAnomalyModeldef setAnomalyParameter(self, param, value):", "body": "self._getAnomalyClassifier().setParameter(param, value)", "docstring": "Set a parameter of the anomaly classifier within this model.\n\n:param param: (string) name of parameter to set\n:param value: (object) value to set", "id": "f17680:c1:m9"} {"signature": "@requireAnomalyModeldef getAnomalyParameter(self, param):", "body": "return self._getAnomalyClassifier().getParameter(param)", "docstring": "Get a parameter of the anomaly classifier within this model by key.\n\n:param param: (string) name of parameter to retrieve", "id": "f17680:c1:m10"} {"signature": "@requireAnomalyModeldef anomalyRemoveLabels(self, start, end, labelFilter):", "body": "self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter)", "docstring": "Remove labels from the anomaly classifier within this model. Removes all\nrecords if ``labelFilter==None``, otherwise only removes the labels equal to\n``labelFilter``.\n\n:param start: (int) index to start removing labels\n:param end: (int) index to end removing labels\n:param labelFilter: (string) If specified, only removes records that match", "id": "f17680:c1:m11"} {"signature": "@requireAnomalyModeldef anomalyAddLabel(self, start, end, labelName):", "body": "self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName)", "docstring": "Add labels from the anomaly classifier within this model.\n\n:param start: (int) index to start label\n:param end: (int) index to end label\n:param labelName: (string) name of label", "id": "f17680:c1:m12"} {"signature": "@requireAnomalyModeldef anomalyGetLabels(self, start, end):", "body": "return self._getAnomalyClassifier().getSelf().getLabels(start, end)", "docstring": "Get labels from the anomaly classifier within this model.\n\n:param start: (int) index to start getting labels\n:param end: (int) index to end getting labels", "id": "f17680:c1:m13"} {"signature": "def _getSensorInputRecord(self, inputRecord):", "body": "sensor = self._getSensorRegion()dataRow = copy.deepcopy(sensor.getSelf().getOutputValues(''))dataDict = copy.deepcopy(inputRecord)inputRecordEncodings = sensor.getSelf().getOutputValues('')inputRecordCategory = int(sensor.getOutputData('')[])resetOut = sensor.getOutputData('')[]return SensorInput(dataRow=dataRow,dataDict=dataDict,dataEncodings=inputRecordEncodings,sequenceReset=resetOut,category=inputRecordCategory)", "docstring": "inputRecord - dict containing the input to the sensor\n\nReturn a 'SensorInput' object, which represents the 'parsed'\nrepresentation of the input record", "id": "f17680:c1:m15"} {"signature": "def _getClassifierInputRecord(self, inputRecord):", "body": "absoluteValue = NonebucketIdx = Noneif self._predictedFieldName is not None and self._classifierInputEncoder is not None:absoluteValue = inputRecord[self._predictedFieldName]bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[]return ClassifierInput(dataRow=absoluteValue,bucketIndex=bucketIdx)", "docstring": "inputRecord - dict containing the input to the sensor\n\nReturn a 'ClassifierInput' object, which contains the mapped\nbucket index for input Record", "id": "f17680:c1:m16"} {"signature": "def _anomalyCompute(self):", "body": "inferenceType = self.getInferenceType()inferences = {}sp = self._getSPRegion()score = Noneif inferenceType == InferenceType.NontemporalAnomaly:score = sp.getOutputData(\"\")[] elif inferenceType == InferenceType.TemporalAnomaly:tm = self._getTPRegion()if sp is not None:activeColumns = sp.getOutputData(\"\").nonzero()[]else:sensor = self._getSensorRegion()activeColumns = sensor.getOutputData('').nonzero()[]if not self._predictedFieldName in self._input:raise ValueError(\"\"% self._predictedFieldName)score = tm.getOutputData(\"\")[]if sp is not None:self._getAnomalyClassifier().setParameter(\"\", len(activeColumns))self._getAnomalyClassifier().prepareInputs()self._getAnomalyClassifier().compute()labels = self._getAnomalyClassifier().getSelf().getLabelResults()inferences[InferenceElement.anomalyLabel] = \"\" % labelsinferences[InferenceElement.anomalyScore] = scorereturn inferences", "docstring": "Compute Anomaly score, if required", "id": "f17680:c1:m26"} {"signature": "def _handleSDRClassifierMultiStep(self, patternNZ,inputTSRecordIdx,rawInput):", "body": "inferenceArgs = self.getInferenceArgs()predictedFieldName = inferenceArgs.get('', None)if predictedFieldName is None:raise ValueError(\"\")self._predictedFieldName = predictedFieldNameclassifier = self._getClassifierRegion()if not self._hasCL or classifier is None:return {}sensor = self._getSensorRegion()minLikelihoodThreshold = self._minLikelihoodThresholdmaxPredictionsPerStep = self._maxPredictionsPerStepneedLearning = self.isLearningEnabled()inferences = {}if self._classifierInputEncoder is None:if predictedFieldName is None:raise RuntimeError(\"\"\"\"\"\")encoderList = sensor.getSelf().encoder.getEncoderList()self._numFields = len(encoderList)fieldNames = sensor.getSelf().encoder.getScalarNames()if predictedFieldName in fieldNames:self._predictedFieldIdx = fieldNames.index(predictedFieldName)else:self._predictedFieldIdx = Noneif sensor.getSelf().disabledEncoder is not None:encoderList = sensor.getSelf().disabledEncoder.getEncoderList()else:encoderList = []if len(encoderList) >= :fieldNames = sensor.getSelf().disabledEncoder.getScalarNames()self._classifierInputEncoder = encoderList[fieldNames.index(predictedFieldName)]else:encoderList = sensor.getSelf().encoder.getEncoderList()self._classifierInputEncoder = encoderList[self._predictedFieldIdx]if not predictedFieldName in rawInput:raise ValueError(\"\"\"\"% predictedFieldName)absoluteValue = rawInput[predictedFieldName]bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[]if isinstance(self._classifierInputEncoder, DeltaEncoder):if not hasattr(self,\"\"):self._ms_prevVal = absoluteValueprevValue = self._ms_prevValself._ms_prevVal = absoluteValueactualValue = absoluteValue - prevValueelse:actualValue = absoluteValueif isinstance(actualValue, float) and math.isnan(actualValue):actualValue = SENTINEL_VALUE_FOR_MISSING_DATAclassifier.setParameter('', True)classifier.setParameter('', needLearning)classificationIn = {'': bucketIdx,'': actualValue}if inputTSRecordIdx is not None:recordNum = inputTSRecordIdxelse:recordNum = self.__numRunCallsclResults = classifier.getSelf().customCompute(recordNum=recordNum,patternNZ=patternNZ,classification=classificationIn)predictionSteps = classifier.getParameter('')predictionSteps = [int(x) for x in predictionSteps.split('')]inferences[InferenceElement.multiStepPredictions] = dict()inferences[InferenceElement.multiStepBestPredictions] = dict()inferences[InferenceElement.multiStepBucketLikelihoods] = dict()for steps in predictionSteps:likelihoodsVec = clResults[steps]bucketValues = clResults['']likelihoodsDict = dict()bestActValue = NonebestProb = Nonefor (actValue, prob) in zip(bucketValues, likelihoodsVec):if actValue in likelihoodsDict:likelihoodsDict[actValue] += probelse:likelihoodsDict[actValue] = probif bestProb is None or likelihoodsDict[actValue] > bestProb:bestProb = likelihoodsDict[actValue]bestActValue = actValuelikelihoodsDict = HTMPredictionModel._removeUnlikelyPredictions(likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep)bucketLikelihood = {}for k in likelihoodsDict.keys():bucketLikelihood[self._classifierInputEncoder.getBucketIndices(k)[]] = (likelihoodsDict[k])if isinstance(self._classifierInputEncoder, DeltaEncoder):if not hasattr(self, ''):self._ms_predHistories = dict()predHistories = self._ms_predHistoriesif not steps in predHistories:predHistories[steps] = deque()predHistory = predHistories[steps]sumDelta = sum(predHistory)offsetDict = dict()for (k, v) in likelihoodsDict.iteritems():if k is not None:offsetDict[absoluteValue+float(k)+sumDelta] = vbucketLikelihoodOffset = {}for k in offsetDict.keys():bucketLikelihoodOffset[self._classifierInputEncoder.getBucketIndices(k)[]] = (offsetDict[k])if bestActValue is not None:predHistory.append(bestActValue)if len(predHistory) >= steps:predHistory.popleft()if len(offsetDict)>:inferences[InferenceElement.multiStepPredictions][steps] = offsetDictinferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodOffsetelse:inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDictinferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodif bestActValue is None:inferences[InferenceElement.multiStepBestPredictions][steps] = Noneelse:inferences[InferenceElement.multiStepBestPredictions][steps] = (absoluteValue + sumDelta + bestActValue)else:inferences[InferenceElement.multiStepPredictions][steps] = (likelihoodsDict)inferences[InferenceElement.multiStepBestPredictions][steps] = (bestActValue)inferences[InferenceElement.multiStepBucketLikelihoods][steps] = (bucketLikelihood)return inferences", "docstring": "Handle the CLA Classifier compute logic when implementing multi-step\n prediction. This is where the patternNZ is associated with one of the\n other fields from the dataset 0 to N steps in the future. This method is\n used by each type of network (encoder only, SP only, SP +TM) to handle the\n compute logic through the CLA Classifier. It fills in the inference dict with\n the results of the compute.\n\n Parameters:\n -------------------------------------------------------------------\n patternNZ: The input to the CLA Classifier as a list of active input indices\n inputTSRecordIdx: The index of the record as computed from the timestamp\n and aggregation interval. This normally increments by 1\n each time unless there are missing records. If there is no\n aggregation interval or timestamp in the data, this will be\n None.\n rawInput: The raw input to the sensor, as a dict.", "id": "f17680:c1:m27"} {"signature": "@classmethoddef _removeUnlikelyPredictions(cls, likelihoodsDict, minLikelihoodThreshold,maxPredictionsPerStep):", "body": "maxVal = (None, None)for (k, v) in likelihoodsDict.items():if len(likelihoodsDict) <= :breakif maxVal[] is None or v >= maxVal[]:if maxVal[] is not None and maxVal[] < minLikelihoodThreshold:del likelihoodsDict[maxVal[]]maxVal = (k, v)elif v < minLikelihoodThreshold:del likelihoodsDict[k]likelihoodsDict = dict(sorted(likelihoodsDict.iteritems(),key=itemgetter(),reverse=True)[:maxPredictionsPerStep])return likelihoodsDict", "docstring": "Remove entries with 0 likelihood or likelihood less than\n minLikelihoodThreshold, but don't leave an empty dict.", "id": "f17680:c1:m28"} {"signature": "def getRuntimeStats(self):", "body": "ret = {\"\" : self.__numRunCalls}temporalStats = dict()if self._hasTP:for stat in self._netInfo.statsCollectors:sdict = stat.getStats()temporalStats.update(sdict)ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStatsreturn ret", "docstring": "Only returns data for a stat called ``numRunCalls``.\n:return:", "id": "f17680:c1:m29"} {"signature": "def _getLogger(self):", "body": "return self.__logger", "docstring": "Get the logger for this object. This is a protected method that is used\n by the Model to access the logger created by the subclass\n\n return:\n A logging.Logger object. Should not be None", "id": "f17680:c1:m31"} {"signature": "def _getSPRegion(self):", "body": "return self._netInfo.net.regions.get('', None)", "docstring": "Returns reference to the network's SP region", "id": "f17680:c1:m32"} {"signature": "def _getTPRegion(self):", "body": "return self._netInfo.net.regions.get('', None)", "docstring": "Returns reference to the network's TM region", "id": "f17680:c1:m33"} {"signature": "def _getSensorRegion(self):", "body": "return self._netInfo.net.regions['']", "docstring": "Returns reference to the network's Sensor region", "id": "f17680:c1:m34"} {"signature": "def _getClassifierRegion(self):", "body": "if (self._netInfo.net is not None and\"\" in self._netInfo.net.regions):return self._netInfo.net.regions[\"\"]else:return None", "docstring": "Returns reference to the network's Classifier region", "id": "f17680:c1:m35"} {"signature": "def _getEncoder(self):", "body": "return self._getSensorRegion().getSelf().encoder", "docstring": "Returns: sensor region's encoder for the given network", "id": "f17680:c1:m37"} {"signature": "def _getClassifierOnlyEncoder(self):", "body": "return self._getSensorRegion().getSelf().disabledEncoder", "docstring": "Returns: sensor region's encoder that is sent only to the classifier,\n not to the bottom of the network", "id": "f17680:c1:m38"} {"signature": "def _getDataSource(self):", "body": "return self._getSensorRegion().getSelf().dataSource", "docstring": "Returns: data source that we installed in sensor region", "id": "f17680:c1:m39"} {"signature": "def __createHTMNetwork(self, sensorParams, spEnable, spParams, tmEnable,tmParams, clEnable, clParams, anomalyParams):", "body": "n = Network()n.addRegion(\"\", \"\", json.dumps(dict(verbosity=sensorParams[''])))sensor = n.regions[''].getSelf()enabledEncoders = copy.deepcopy(sensorParams[''])for name, params in enabledEncoders.items():if params is not None:classifierOnly = params.pop('', False)if classifierOnly:enabledEncoders.pop(name)disabledEncoders = copy.deepcopy(sensorParams[''])for name, params in disabledEncoders.items():if params is None:disabledEncoders.pop(name)else:classifierOnly = params.pop('', False)if not classifierOnly:disabledEncoders.pop(name)encoder = MultiEncoder(enabledEncoders)sensor.encoder = encodersensor.disabledEncoder = MultiEncoder(disabledEncoders)sensor.dataSource = DataBuffer()prevRegion = \"\"prevRegionWidth = encoder.getWidth()if spEnable:spParams = spParams.copy()spParams[''] = prevRegionWidthself.__logger.debug(\"\" % spParams)n.addRegion(\"\", \"\", json.dumps(spParams))n.link(\"\", \"\", \"\", \"\")n.link(\"\", \"\", \"\", \"\", srcOutput=\"\",destInput=\"\")n.link(\"\", \"\", \"\", \"\", srcOutput=\"\",destInput=\"\")n.link(\"\", \"\", \"\", \"\", srcOutput=\"\",destInput=\"\")prevRegion = \"\"prevRegionWidth = spParams['']if tmEnable:tmParams = tmParams.copy()if prevRegion == '':tmParams[''] = tmParams[''] = prevRegionWidthelse:assert tmParams[''] == prevRegionWidthtmParams[''] = tmParams['']self.__logger.debug(\"\" % tmParams)n.addRegion(\"\", \"\", json.dumps(tmParams))n.link(prevRegion, \"\", \"\", \"\")if prevRegion != \"\":n.link(\"\", prevRegion, \"\", \"\", srcOutput=\"\",destInput=\"\")else:n.link(\"\", prevRegion, \"\", \"\", srcOutput=\"\",destInput=\"\")n.link(\"\", \"\", \"\", \"\", srcOutput=\"\",destInput=\"\")prevRegion = \"\"prevRegionWidth = tmParams['']if clEnable and clParams is not None:clParams = clParams.copy()clRegionName = clParams.pop('')self.__logger.debug(\"\" % (clRegionName,clParams))n.addRegion(\"\", \"\" % str(clRegionName), json.dumps(clParams))if str(clRegionName) == \"\":n.link(\"\", \"\", \"\", \"\", srcOutput=\"\",destInput=\"\")n.link(\"\", \"\", \"\", \"\", srcOutput=\"\",destInput=\"\")n.link(\"\", \"\", \"\", \"\", srcOutput=\"\",destInput=\"\")n.link(prevRegion, \"\", \"\", \"\")if self.getInferenceType() == InferenceType.TemporalAnomaly:anomalyClParams = dict(trainRecords=anomalyParams.get('', None),cacheSize=anomalyParams.get('', None))self._addAnomalyClassifierRegion(n, anomalyClParams, spEnable, tmEnable)n.initialize()return NetworkInfo(net=n, statsCollectors=[])", "docstring": "Create a CLA network and return it.\n\n description: HTMPredictionModel description dictionary (TODO: define schema)\n Returns: NetworkInfo instance;", "id": "f17680:c1:m40"} {"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()state[\"\"] = NetworkInfo(net=None,statsCollectors=self._netInfo.statsCollectors)for ephemeral in [self.__manglePrivateMemberName(\"\"),self.__manglePrivateMemberName(\"\")]:state.pop(ephemeral)return state", "docstring": "Return serializable state. This function will return a version of the\n__dict__ with data that shouldn't be pickled stripped out. In particular,\nthe CLA Network is stripped out because it has it's own serialization\nmechanism)\n\nSee also: _serializeExtraData()", "id": "f17680:c1:m41"} {"signature": "def __setstate__(self, state):", "body": "self.__dict__.update(state)self.__restoringFromState = Trueself.__logger = initLogger(self)if not hasattr(self, \"\"):self.__restoringFromV1 = Trueself._hasSP = Trueif self.__temporalNetInfo is not None:self._Model__inferenceType = InferenceType.TemporalNextStepself._netInfo = self.__temporalNetInfoself._hasTP = Trueelse:raise RuntimeError(\"\")self._Model__inferenceArgs = {}self._Model__learningEnabled = Trueself._Model__inferenceEnabled = Trueself.__dict__.pop(\"\", None)self.__dict__.pop(\"\", None)self.__dict__.pop(\"\", None)if not hasattr(self, \"\"):self._hasSP = Falseself._hasTP = Falseif self.__encoderNetInfo is not None:self._netInfo = self.__encoderNetInfoelif self.__nonTemporalNetInfo is not None:self._netInfo = self.__nonTemporalNetInfoself._hasSP = Trueelse:self._netInfo = self.__temporalNetInfoself._hasSP = Trueself._hasTP = Trueself.__dict__.pop(\"\", None)self.__dict__.pop(\"\", None)self.__dict__.pop(\"\", None)self._classifierInputEncoder = Noneif not hasattr(self, ''):self._minLikelihoodThreshold = DEFAULT_LIKELIHOOD_THRESHOLDif not hasattr(self, ''):self._maxPredictionsPerStep = DEFAULT_MAX_PREDICTIONS_PER_STEPif not hasattr(self, ''):self._hasCL = (self._getClassifierRegion() is not None)self.__logger.debug(\"\" % self.__class__.__name__)", "docstring": "Set the state of ourself from a serialized state.\n\nSee also: _deSerializeExtraData", "id": "f17680:c1:m42"} {"signature": "def write(self, proto):", "body": "super(HTMPredictionModel, self).writeBaseToProto(proto.modelBase)proto.numRunCalls = self.__numRunCallsproto.minLikelihoodThreshold = self._minLikelihoodThresholdproto.maxPredictionsPerStep = self._maxPredictionsPerStepself._netInfo.net.write(proto.network)proto.spLearningEnabled = self.__spLearningEnabledproto.tpLearningEnabled = self.__tpLearningEnabledif self._predictedFieldIdx is None:proto.predictedFieldIdx.none = Noneelse:proto.predictedFieldIdx.value = self._predictedFieldIdxif self._predictedFieldName is None:proto.predictedFieldName.none = Noneelse:proto.predictedFieldName.value = self._predictedFieldNameif self._numFields is None:proto.numFields.none = Noneelse:proto.numFields.value = self._numFieldsproto.trainSPNetOnlyIfRequested = self.__trainSPNetOnlyIfRequestedproto.finishedLearning = self.__finishedLearning", "docstring": ":param proto: capnp HTMPredictionModelProto message builder", "id": "f17680:c1:m44"} {"signature": "@classmethoddef read(cls, proto):", "body": "obj = object.__new__(cls)super(HTMPredictionModel, obj).__init__(proto=proto.modelBase)obj._minLikelihoodThreshold = round(proto.minLikelihoodThreshold,EPSILON_ROUND)obj._maxPredictionsPerStep = proto.maxPredictionsPerStepnetwork = Network.read(proto.network)obj._hasSP = (\"\" in network.regions)obj._hasTP = (\"\" in network.regions)obj._hasCL = (\"\" in network.regions)obj._netInfo = NetworkInfo(net=network, statsCollectors=[])obj.__spLearningEnabled = bool(proto.spLearningEnabled)obj.__tpLearningEnabled = bool(proto.tpLearningEnabled)obj.__numRunCalls = proto.numRunCallsobj._classifierInputEncoder = Noneif proto.predictedFieldIdx.which() == \"\":obj._predictedFieldIdx = Noneelse:obj._predictedFieldIdx = proto.predictedFieldIdx.valueif proto.predictedFieldName.which() == \"\":obj._predictedFieldName = Noneelse:obj._predictedFieldName = proto.predictedFieldName.valueobj._numFields = proto.numFieldsif proto.numFields.which() == \"\":obj._numFields = Noneelse:obj._numFields = proto.numFields.valueobj.__trainSPNetOnlyIfRequested = proto.trainSPNetOnlyIfRequestedobj.__finishedLearning = proto.finishedLearningobj._input = Nonesensor = network.regions[''].getSelf()sensor.dataSource = DataBuffer()network.initialize()obj.__logger = initLogger(obj)obj.__logger.debug(\"\" % obj.__myClassName)obj.__restoringFromState = Falseobj.__restoringFromV1 = Falsereturn obj", "docstring": ":param proto: capnp HTMPredictionModelProto message reader", "id": "f17680:c1:m45"} {"signature": "def _serializeExtraData(self, extraDataDir):", "body": "makeDirectoryFromAbsolutePath(extraDataDir)outputDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)self.__logger.debug(\"\")self._netInfo.net.save(outputDir)self.__logger.debug(\"\")return", "docstring": "[virtual method override] This method is called during serialization\n with an external directory path that can be used to bypass pickle for saving\n large binary states.\n\n extraDataDir:\n Model's extra data directory path", "id": "f17680:c1:m46"} {"signature": "def _deSerializeExtraData(self, extraDataDir):", "body": "assert self.__restoringFromStateassert (self._netInfo.net is None), \"\"stateDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)self.__logger.debug(\"\", self)self._netInfo.net = Network(stateDir)self.__logger.debug(\"\", self)self._netInfo.net.initialize()if self.getInferenceType() == InferenceType.TemporalAnomaly:classifierType = self._getAnomalyClassifier().getSelf().__class__.__name__if classifierType is '':anomalyClParams = dict(trainRecords=self._classifier_helper._autoDetectWaitRecords,cacheSize=self._classifier_helper._history_length,)spEnable = (self._getSPRegion() is not None)tmEnable = TrueknnRegion = self._getAnomalyClassifier().getSelf()self._addAnomalyClassifierRegion(self._netInfo.net, anomalyClParams,spEnable, tmEnable)self._getAnomalyClassifier().getSelf()._iteration = self.__numRunCallsself._getAnomalyClassifier().getSelf()._recordsCache = (self._classifier_helper.saved_states)self._getAnomalyClassifier().getSelf().saved_categories = (self._classifier_helper.saved_categories)self._getAnomalyClassifier().getSelf()._knnclassifier = knnRegionself._getTPRegion().setParameter('', True)del self._classifier_helperself._netInfo.net.initialize()self.__restoringFromState = Falseself.__logger.debug(\"\", self)return", "docstring": "[virtual method override] This method is called during deserialization\n (after __setstate__) with an external directory path that can be used to\n bypass pickle for loading large binary states.\n\n extraDataDir:\n Model's extra data directory path", "id": "f17680:c1:m47"} {"signature": "def _addAnomalyClassifierRegion(self, network, params, spEnable, tmEnable):", "body": "allParams = copy.deepcopy(params)knnParams = dict(k=,distanceMethod='',distanceNorm=,doBinarization=,replaceDuplicates=,maxStoredPatterns=)allParams.update(knnParams)if allParams[''] is None:allParams[''] = DEFAULT_ANOMALY_TRAINRECORDSif allParams[''] is None:allParams[''] = DEFAULT_ANOMALY_CACHESIZEif self._netInfo is not None and self._netInfo.net is not Noneand self._getAnomalyClassifier() is not None:self._netInfo.net.removeRegion('')network.addRegion(\"\",\"\",json.dumps(allParams))if spEnable:network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")else:network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")if tmEnable:network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")network.link(\"\", \"\", \"\", \"\",srcOutput=\"\", destInput=\"\")else:raise RuntimeError(\"\")", "docstring": "Attaches an 'AnomalyClassifier' region to the network. Will remove current\n'AnomalyClassifier' region if it exists.\n\nParameters\n-----------\nnetwork - network to add the AnomalyClassifier region\nparams - parameters to pass to the region\nspEnable - True if network has an SP region\ntmEnable - True if network has a TM region; Currently requires True", "id": "f17680:c1:m48"} {"signature": "def __getNetworkStateDirectory(self, extraDataDir):", "body": "if self.__restoringFromV1:if self.getInferenceType() == InferenceType.TemporalNextStep:leafName = ''+ \"\"else:leafName = ''+ \"\"else:leafName = InferenceType.getLabel(self.getInferenceType()) + \"\"path = os.path.join(extraDataDir, leafName)path = os.path.abspath(path)return path", "docstring": "extraDataDir:\n Model's extra data directory path\nReturns: Absolute directory path for saving CLA Network", "id": "f17680:c1:m49"} {"signature": "def __manglePrivateMemberName(self, privateMemberName, skipCheck=False):", "body": "assert privateMemberName.startswith(\"\"),\"\" % privateMemberNameassert not privateMemberName.startswith(\"\"),\"\" % privateMemberNameassert not privateMemberName.endswith(\"\"),\"\" % privateMemberNamerealName = \"\" + (self.__myClassName).lstrip(\"\") + privateMemberNameif not skipCheck:getattr(self, realName)return realName", "docstring": "Mangles the given mangled (private) member name; a mangled member name\n is one whose name begins with two or more underscores and ends with one\n or zero underscores.\n\n privateMemberName:\n The private member name (e.g., \"__logger\")\n\n skipCheck: Pass True to skip test for presence of the demangled member\n in our instance.\n\n Returns: The demangled member name (e.g., \"_HTMPredictionModel__logger\")", "id": "f17680:c1:m50"} {"signature": "@abstractmethoddef emitPeriodicMetrics(self, metrics):", "body": "", "docstring": "Emits periodic metrics to stdout in JSON.\n\n :param metrics: A list of metrics as returned by\n :meth:`nupic.frameworks.opf.opf_task_driver.OPFTaskDriver.getMetrics`.", "id": "f17681:c0:m0"} {"signature": "@abstractmethoddef emitFinalMetrics(self, metrics):", "body": "", "docstring": "Emits final metrics.\n\n .. note:: the intention is that the final metrics may go to a different\n place (e.g., csv file) versus :meth:`emitPeriodicMetrics`\n (e.g., stdout)\n\n :param metrics: A list of metrics as returned by\n :meth:`nupic.frameworks.opf.opf_task_driver.OPFTaskDriver.getMetrics`.", "id": "f17681:c0:m1"} {"signature": "@abstractmethoddef getDatasetFieldMetaData(self):", "body": "", "docstring": ":returns: a tuple of dataset field metadata descriptors that are\n arranged in the same order as the columns in the dataset.\n Each field metadata descriptor is of type\n :class:`nupic.data.field_meta.FieldMetaInfo`", "id": "f17681:c1:m0"} {"signature": "@abstractmethoddef next(self):", "body": "", "docstring": ":returns: The next record from the dataset. The returned record object\n is of the same structure as returned by\n :meth:`nupic.data.record_stream.RecordStreamIface.getNextRecord`.\n Returns ``None`` if the next record is not available yet.\n\n:raises: (StopIteration) if a hard \"end of file\" has been reached\n and no more records will be forthcoming.", "id": "f17681:c1:m1"} {"signature": "@abstractmethoddef close(self):", "body": "", "docstring": "Closes the writer (e.g., close the underlying file)", "id": "f17681:c2:m0"} {"signature": "@abstractmethoddef append(self, inputRow, predictionRow, sequenceReset, metrics=None):", "body": "", "docstring": "Emits a single prediction as input versus predicted.\n\n inputRow: A tuple or list of fields comprising the input data row.\n predictionRow: A tuple or list of fields comprising the prediction, or None\n if prediction is not available. The None use case is\n intended for temporal inference where there is no matching\n prediction for the same timestep as the given ground truth,\n such as the case with the very first input record.\n sequenceReset: A value that tests True if the input row was\n accompanied by a sequence reset signal; False if not\n accompanied by a sequence reset signal.\n\n metrics: OPTIONAL -A dictionary of metrics that will be written out\n with every prediction. The keys are the automatically\n generated metric labels (see MetricSpec in\n prediction_metrics_manager.py), and the value is the real\n number value of the metric.", "id": "f17681:c2:m1"} {"signature": "@abstractmethoddef checkpoint(self, checkpointSink, maxRows):", "body": "", "docstring": "Save a checkpoint of the prediction output stream. The checkpoint\n comprises up to maxRows of the most recent inference records.\n\n Parameters:\n ----------------------------------------------------------------------\n checkpointSink: A File-like object where predictions checkpoint data, if\n any, will be stored.\n maxRows: Maximum number of most recent inference rows\n to checkpoint.", "id": "f17681:c2:m2"} {"signature": "def _translateMetricsToJSON(self, metrics, label):", "body": "metricsDict = metricsdef _mapNumpyValues(obj):\"\"\"\"\"\"import numpyif isinstance(obj, numpy.float32):return float(obj)elif isinstance(obj, numpy.bool_):return bool(obj)elif isinstance(obj, numpy.ndarray):return obj.tolist()else:raise TypeError(\"\" % (obj, obj.__class__))jsonString = json.dumps(metricsDict, indent=, default=_mapNumpyValues)return jsonString", "docstring": "Translates the given metrics value to JSON string\n\n metrics: A list of dictionaries per OPFTaskDriver.getMetrics():\n\n Returns: JSON string representing the given metrics object.", "id": "f17681:c3:m4"} {"signature": "def __init__(self, experimentDir, label, inferenceType,fields, metricNames=None, checkpointSource=None):", "body": "self.__experimentDir = experimentDirself.__inferenceType = inferenceTypeself.__inputFieldsMeta = tuple(copy.deepcopy(fields))self.__numInputFields = len(self.__inputFieldsMeta)self.__label = labelif metricNames is not None:metricNames.sort()self.__metricNames = metricNamesself.__outputFieldsMeta = []self._rawInputNames = []self.__datasetPath = Noneself.__dataset = Noneself.__checkpointCache = Noneif checkpointSource is not None:checkpointSource.seek()self.__checkpointCache = io.StringIO()shutil.copyfileobj(checkpointSource, self.__checkpointCache)return", "docstring": "Constructor\n\n experimentDir:\n experiment directory path that contains description.py\n\n label: A label string to incorporate into the filename.\n\n\n inferenceElements:\n\n\n inferenceType:\n An constant from opf_utils.InferenceType for the\n requested prediction writer\n\n fields: a non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo\n representing fields that will be emitted to this prediction\n writer\n\n metricNames: OPTIONAL - A list of metric names that well be emiited by this\n prediction writer\n\n checkpointSource:\n If not None, a File-like object containing the\n previously-checkpointed predictions for setting the initial\n contents of this PredictionOutputStream. Will be copied\n before returning, if needed.", "id": "f17681:c5:m0"} {"signature": "def __openDatafile(self, modelResult):", "body": "resetFieldMeta = FieldMetaInfo(name=\"\",type=FieldMetaType.integer,special = FieldMetaSpecial.reset)self.__outputFieldsMeta.append(resetFieldMeta)rawInput = modelResult.rawInputrawFields = list(rawInput.keys())rawFields.sort()for field in rawFields:if field.startswith('') or field == '':continuevalue = rawInput[field]meta = FieldMetaInfo(name=field, type=FieldMetaType.string,special=FieldMetaSpecial.none)self.__outputFieldsMeta.append(meta)self._rawInputNames.append(field)for inferenceElement, value in modelResult.inferences.items():inferenceLabel = InferenceElement.getLabel(inferenceElement)if type(value) in (list, tuple):self.__outputFieldsMeta.extend(self.__getListMetaInfo(inferenceElement))elif isinstance(value, dict):self.__outputFieldsMeta.extend(self.__getDictMetaInfo(inferenceElement,value))else:if InferenceElement.getInputElement(inferenceElement):self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel+\"\",type=FieldMetaType.string, special = ''))self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel,type=FieldMetaType.string, special = ''))if self.__metricNames:for metricName in self.__metricNames:metricField = FieldMetaInfo(name = metricName,type = FieldMetaType.float,special = FieldMetaSpecial.none)self.__outputFieldsMeta.append(metricField)inferenceDir = _FileUtils.createExperimentInferenceDir(self.__experimentDir)filename = (self.__label + \"\" +opf_utils.InferenceType.getLabel(self.__inferenceType) +\"\")self.__datasetPath = os.path.join(inferenceDir, filename)print(\"\" % self.__datasetPath)print(\"\" % ([tuple(i) for i in self.__outputFieldsMeta],))self.__dataset = FileRecordStream(streamID=self.__datasetPath, write=True,fields=self.__outputFieldsMeta)if self.__checkpointCache is not None:self.__checkpointCache.seek()reader = csv.reader(self.__checkpointCache, dialect='')try:header = next(reader)except StopIteration:print(\"\" % (self.__datasetPath,))else:assert tuple(self.__dataset.getFieldNames()) == tuple(header),\"\" % (tuple(self.__dataset.getFieldNames()), tuple(header))numRowsCopied = while True:try:row = next(reader)except StopIteration:breakself.__dataset.appendRecord(row)numRowsCopied += self.__dataset.flush()print(\"\" % (numRowsCopied, self.__datasetPath))self.__checkpointCache.close()self.__checkpointCache = Nonereturn", "docstring": "Open the data file and write the header row", "id": "f17681:c5:m1"} {"signature": "def setLoggedMetrics(self, metricNames):", "body": "if metricNames is None:self.__metricNames = set([])else:self.__metricNames = set(metricNames)", "docstring": "Tell the writer which metrics should be written\n\n Parameters:\n -----------------------------------------------------------------------\n metricsNames: A list of metric lables to be written", "id": "f17681:c5:m2"} {"signature": "def close(self):", "body": "if self.__dataset:self.__dataset.close()self.__dataset = Nonereturn", "docstring": "[virtual method override] Closes the writer (e.g., close the underlying\n file)", "id": "f17681:c5:m3"} {"signature": "def __getListMetaInfo(self, inferenceElement):", "body": "fieldMetaInfo = []inferenceLabel = InferenceElement.getLabel(inferenceElement)for inputFieldMeta in self.__inputFieldsMeta:if InferenceElement.getInputElement(inferenceElement):outputFieldMeta = FieldMetaInfo(name=inputFieldMeta.name + \"\",type=inputFieldMeta.type,special=inputFieldMeta.special)predictionField = FieldMetaInfo(name=inputFieldMeta.name + \"\" + inferenceLabel,type=inputFieldMeta.type,special=inputFieldMeta.special)fieldMetaInfo.append(outputFieldMeta)fieldMetaInfo.append(predictionField)return fieldMetaInfo", "docstring": "Get field metadata information for inferences that are of list type\n TODO: Right now we assume list inferences are associated with the input field\n metadata", "id": "f17681:c5:m4"} {"signature": "def __getDictMetaInfo(self, inferenceElement, inferenceDict):", "body": "fieldMetaInfo = []inferenceLabel = InferenceElement.getLabel(inferenceElement)if InferenceElement.getInputElement(inferenceElement):fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+\"\",type=FieldMetaType.string,special = ''))keys = sorted(inferenceDict.keys())for key in keys:fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+\"\"+str(key),type=FieldMetaType.string,special=''))return fieldMetaInfo", "docstring": "Get field metadate information for inferences that are of dict type", "id": "f17681:c5:m5"} {"signature": "def append(self, modelResult):", "body": "inferences = modelResult.inferenceshasInferences = Falseif inferences is not None:for value in inferences.values():hasInferences = hasInferences or (value is not None)if not hasInferences:returnif self.__dataset is None:self.__openDatafile(modelResult)inputData = modelResult.sensorInputsequenceReset = int(bool(inputData.sequenceReset))outputRow = [sequenceReset]rawInput = modelResult.rawInputfor field in self._rawInputNames:outputRow.append(str(rawInput[field]))for inferenceElement, outputVal in inferences.items():inputElement = InferenceElement.getInputElement(inferenceElement)if inputElement:inputVal = getattr(inputData, inputElement)else:inputVal = Noneif type(outputVal) in (list, tuple):assert type(inputVal) in (list, tuple, None)for iv, ov in zip(inputVal, outputVal):outputRow.append(str(iv))outputRow.append(str(ov))elif isinstance(outputVal, dict):if inputVal is not None:if modelResult.predictedFieldName is not None:outputRow.append(str(inputVal[modelResult.predictedFieldName]))else:outputRow.append(str(inputVal))for key in sorted(outputVal.keys()):outputRow.append(str(outputVal[key]))else:if inputVal is not None:outputRow.append(str(inputVal))outputRow.append(str(outputVal))metrics = modelResult.metricsfor metricName in self.__metricNames:outputRow.append(metrics.get(metricName, ))self.__dataset.appendRecord(outputRow)self.__dataset.flush()return", "docstring": "[virtual method override] Emits a single prediction as input versus\n predicted.\n\n modelResult: An opf_utils.ModelResult object that contains the model input\n and output for the current timestep.", "id": "f17681:c5:m6"} {"signature": "def checkpoint(self, checkpointSink, maxRows):", "body": "checkpointSink.truncate()if self.__dataset is None:if self.__checkpointCache is not None:self.__checkpointCache.seek()shutil.copyfileobj(self.__checkpointCache, checkpointSink)checkpointSink.flush()returnelse:returnself.__dataset.flush()totalDataRows = self.__dataset.getDataRowCount()if totalDataRows == :returnreader = FileRecordStream(self.__datasetPath, missingValues=[])writer = csv.writer(checkpointSink)writer.writerow(reader.getFieldNames())numToWrite = min(maxRows, totalDataRows)numRowsToSkip = totalDataRows - numToWritefor i in range(numRowsToSkip):next(reader)numWritten = while True:row = reader.getNextRecord()if row is None:break;row = [str(element) for element in row]writer.writerow(row)numWritten +=assert numWritten == numToWrite,\"\" % (numWritten, numToWrite)checkpointSink.flush()return", "docstring": "[virtual method override] Save a checkpoint of the prediction output\n stream. The checkpoint comprises up to maxRows of the most recent inference\n records.\n\n Parameters:\n ----------------------------------------------------------------------\n checkpointSink: A File-like object where predictions checkpoint data, if\n any, will be stored.\n maxRows: Maximum number of most recent inference rows\n to checkpoint.", "id": "f17681:c5:m7"} {"signature": "def update(self, modelResult):", "body": "self.__writer.append(modelResult)return", "docstring": "Emit a input/prediction pair, if possible.\n\n modelResult: An opf_utils.ModelResult object that contains the model input\n and output for the current timestep.", "id": "f17681:c6:m2"} {"signature": "def __init__(self, writer):", "body": "self.__logger = logging.getLogger(\"\".join(['', self.__class__.__module__, self.__class__.__name__]))self.__writer = writerself.__inferenceShifter = InferenceShifter()return", "docstring": "writer: Non-temporal prediction log writer conforming to\n PredictionWriterIface interface.", "id": "f17681:c7:m0"} {"signature": "def update(self, modelResult):", "body": "self.__writer.append(self.__inferenceShifter.shift(modelResult))", "docstring": "Queue up the T(i+1) prediction value and emit a T(i)\n input/prediction pair, if possible. E.g., if the previous T(i-1)\n iteration was learn-only, then we would not have a T(i) prediction in our\n FIFO and would not be able to emit a meaningful input/prediction\n pair.\n\n modelResult: An opf_utils.ModelResult object that contains the model input\n and output for the current timestep.", "id": "f17681:c7:m2"} {"signature": "@staticmethoddef getExperimentInferenceDirPath(experimentDir):", "body": "return os.path.abspath(os.path.join(experimentDir, \"\"))", "docstring": "experimentDir: experiment directory path that contains description.py\n\nReturns: experiment inference directory path string (the path may not\n yet exist - see createExperimentInferenceDir())", "id": "f17681:c9:m0"} {"signature": "@classmethoddef createExperimentInferenceDir(cls, experimentDir):", "body": "path = cls.getExperimentInferenceDirPath(experimentDir)cls.makeDirectory(path)return path", "docstring": "Creates the inference output directory for the given experiment\n\n experimentDir: experiment directory path that contains description.py\n\n Returns: path of the inference output directory", "id": "f17681:c9:m1"} {"signature": "@staticmethoddef makeDirectory(path):", "body": "try:os.makedirs(path)except OSError as e:if e.errno == os.errno.EEXIST:passelse:raisereturn", "docstring": "Makes directory for the given directory path if it doesn't already exist\n in the filesystem. Creates all requested directory segments as needed.\n\n path: path of the directory to create.\n\n Returns: nothing", "id": "f17681:c9:m2"} {"signature": "@abstractmethoddef close(self):", "body": "", "docstring": "Closes connect to output store and cleans up any resources associated\n with writing.", "id": "f17683:c0:m0"} {"signature": "@abstractmethoddef writeRecord(self, modelResult):", "body": "", "docstring": "Emits a set of inputs data, inferences, and metrics from a model\n resulting from a single record.\n\n :param modelResult: (:class:`nupic.frameworks.opf.opf_utils.ModelResult`)\n contains the model input and output for the current timestep.", "id": "f17683:c0:m1"} {"signature": "@abstractmethoddef writeRecords(self, modelResults, progressCB=None):", "body": "", "docstring": "Same as :meth:`writeRecord`, but emits multiple rows in one shot.\n\n:param modelResults: (list) of\n :class:`nupic.frameworks.opf.opf_utils.ModelResult` objects, each\n represents one record.\n:param progressCB: (func) optional callback method that will be called after\n each batch of records is written.", "id": "f17683:c0:m2"} {"signature": "@abstractmethoddef setLoggedMetrics(self, metricNames):", "body": "", "docstring": "Sets which metrics should be written to the prediction log.\n\n :param metricNames: (list) metric names that match the labels of the\n metrics that should be written to the prediction log", "id": "f17683:c0:m3"} {"signature": "@abstractmethoddef checkpoint(self, checkpointSink, maxRows):", "body": "", "docstring": "Save a checkpoint of the prediction output stream. The checkpoint\n comprises up to maxRows of the most recent inference records.\n\n :param checkpointSink: A File-like object where predictions checkpoint data,\n if any, will be stored.\n :param maxRows: (int) Maximum number of most recent inference rows to\n checkpoint.", "id": "f17683:c0:m4"} {"signature": "@classmethoddef __getLogger(cls):", "body": "if cls.__logger is None:cls.__logger = opf_utils.initLogger(cls)return cls.__logger", "docstring": "Get the logger for this object.\n\n :returns: (Logger) A Logger object.", "id": "f17684:c0:m0"} {"signature": "@staticmethoddef create(modelConfig, logLevel=logging.ERROR):", "body": "logger = ModelFactory.__getLogger()logger.setLevel(logLevel)logger.debug(\"\", modelConfig)modelClass = Noneif modelConfig[''] == \"\":modelClass = HTMPredictionModelelif modelConfig[''] == \"\":modelClass = TwoGramModelelif modelConfig[''] == \"\":modelClass = PreviousValueModelelse:raise Exception(\"\" %modelConfig[''])return modelClass(**modelConfig[''])", "docstring": "Create a new model instance, given a description dictionary.\n\n :param modelConfig: (dict)\n A dictionary describing the current model,\n `described here <../../quick-start/example-model-params.html>`_.\n\n :param logLevel: (int) The level of logging output that should be generated\n\n :raises Exception: Unsupported model type\n\n :returns: :class:`nupic.frameworks.opf.model.Model`", "id": "f17684:c0:m1"} {"signature": "@staticmethoddef loadFromCheckpoint(savedModelDir, newSerialization=False):", "body": "if newSerialization:return HTMPredictionModel.readFromCheckpoint(savedModelDir)else:return Model.load(savedModelDir)", "docstring": "Load saved model.\n\n :param savedModelDir: (string)\n Directory of where the experiment is to be or was saved\n :returns: (:class:`nupic.frameworks.opf.model.Model`) The loaded model\n instance.", "id": "f17684:c0:m2"}